summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/modules')
-rw-r--r--third_party/libwebrtc/modules/BUILD.gn257
-rw-r--r--third_party/libwebrtc/modules/async_audio_processing/BUILD.gn43
-rw-r--r--third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.cc61
-rw-r--r--third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.h76
-rw-r--r--third_party/libwebrtc/modules/async_audio_processing/async_audio_processing_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_coding/BUILD.gn2189
-rw-r--r--third_party/libwebrtc/modules/audio_coding/DEPS7
-rw-r--r--third_party/libwebrtc/modules/audio_coding/OWNERS4
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.cc161
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.h98
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc358
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.h237
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc464
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.cc114
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing_unittest.cc191
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.cc61
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.cc174
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.h91
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module.cc643
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module_unittest.cc1618
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.cc63
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.h64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/call_statistics_unittest.cc57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_coding.gni30
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_coding_gn/moz.build216
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_coding_module_typedefs_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_coding_opus_common_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_encoder_cng_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc171
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h89
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc306
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc73
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.h59
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc250
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.cc62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.h58
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc101
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/config.proto196
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.cc19
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.cc454
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.h124
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc486
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump.proto42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc163
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h54
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.cc51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.h52
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc82
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.cc78
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.h44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc240
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc113
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h74
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc489
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc201
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.h93
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc444
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.cc73
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2_unittest.cc121
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h49
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h44
-rwxr-xr-xthird_party/libwebrtc/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py149
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h118
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc632
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_config_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_gn/moz.build222
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/audio_decoder.h20
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/audio_encoder.h20
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc196
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc178
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc322
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h49
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc520
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/cng/cng_unittest.cc252
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.cc436
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.h99
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc102
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h81
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc126
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h128
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.c59
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.h135
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/test/testG711.cc168
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc178
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h86
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc156
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.c104
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.h173
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/test/testG722.cc155
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.c82
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c89
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc110
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h54
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc151
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h61
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.c44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.h37
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.c80
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.h44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c81
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h37
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c69
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c67
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.c405
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.h40
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c115
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c89
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.c76
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.c51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.h39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/complexityMeasures.m57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.c667
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.h95
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c83
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.c261
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.c185
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.h45
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c85
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/defines.h225
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.c309
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.h44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.c517
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.c112
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.h33
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.c53
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.h40
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c382
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c50
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.c90
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.c47
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.c105
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c126
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h40
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.c84
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c111
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.c90
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.c91
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.c288
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.h251
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc140
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.c40
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.h27
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.c45
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.h31
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.c98
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.c73
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.c48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c53
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.c62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.c73
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.h32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h37
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c63
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.c88
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.h33
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c86
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.c56
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h37
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.c253
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.c32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.h32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.c159
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.c141
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.h44
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c133
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c96
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h37
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h34
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c49
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h37
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.c212
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.c56
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.h33
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.c53
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.c63
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.c116
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.h38
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.c121
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/empty.cc0
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c238
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c215
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c343
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.c241
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.h39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.c64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.c63
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.h36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.c64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c142
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h54
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h84
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h108
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h224
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/bandwidth_info.h24
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/empty.cc0
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h22
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h22
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/isacfix.h486
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines.c122
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c401
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c413
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routins.h149
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c1021
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h128
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/codec.h212
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode.c221
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c69
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c805
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/encode.c635
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c2056
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h177
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c249
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c217
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.c415
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.h39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h87
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h52
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks.c297
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c242
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c278
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters.c112
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_mips.c365
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_neon.c114
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc68
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/initialize.c173
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h106
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c1230
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c321
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S77
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c329
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c195
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c949
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h69
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c237
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc61
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c1281
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h97
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c435
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h67
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c114
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c193
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c248
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S143
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c73
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c133
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c149
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c306
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h99
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/settings.h211
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c193
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h97
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/structs.h345
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform.c214
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_mips.c1294
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_neon.c479
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_tables.c110
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc199
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc120
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc346
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h22
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h22
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/isac.h617
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.c60
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.h67
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c291
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c303
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc61
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c1013
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h165
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/codec.h223
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.c111
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode.c303
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c89
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode.c1260
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c706
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h246
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.c2066
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.h347
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c195
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.h23
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c114
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/intialize.c72
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac.c2307
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h100
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc111
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.c409
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.h45
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lattice.c219
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c496
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c136
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h50
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c158
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h66
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c247
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h81
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.c601
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.h99
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c695
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c388
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.h42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c104
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c277
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h116
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/settings.h196
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c139
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h79
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/structs.h448
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/transform.c126
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc942
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc425
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c461
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.c179
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.h108
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc88
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.h53
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc179
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/DEPS5
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.cc52
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.h89
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc182
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h74
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc148
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc128
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc366
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h92
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc156
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc827
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h185
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc906
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc152
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc97
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc248
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_inst.h48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.cc878
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.h547
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc147
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_unittest.cc978
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/BUILD.gn55
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.cc76
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer_unittest.cc111
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.cc215
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.h127
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker_unittest.cc293
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.cc100
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.h175
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform_unittest.cc203
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc70
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h52
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc39
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h63
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc29
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.h22
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc272
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h102
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc641
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc126
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h93
-rw-r--r--third_party/libwebrtc/modules/audio_coding/default_neteq_factory_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_coding/g3doc/index.md32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/g711_c_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_coding/g711_gn/moz.build206
-rw-r--r--third_party/libwebrtc/modules/audio_coding/g722_c_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_coding/g722_gn/moz.build206
-rw-r--r--third_party/libwebrtc/modules/audio_coding/ilbc_c_gn/moz.build280
-rw-r--r--third_party/libwebrtc/modules/audio_coding/ilbc_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_coding/include/audio_coding_module.h257
-rw-r--r--third_party/libwebrtc/modules/audio_coding/include/audio_coding_module_typedefs.h137
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_bwinfo_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_c_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_common_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_fix_c_arm_asm_gn/moz.build89
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_fix_c_gn/moz.build118
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_fix_common_gn/moz.build93
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_fix_gn/moz.build93
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_gn/moz.build192
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_neon_gn/moz.build96
-rw-r--r--third_party/libwebrtc/modules/audio_coding/isac_vad_gn/moz.build200
-rw-r--r--third_party/libwebrtc/modules/audio_coding/legacy_encoded_audio_frame_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/accelerate.cc106
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/accelerate.h79
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc678
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.cc222
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.h138
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector_unittest.cc317
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.cc381
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.h172
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/audio_vector_unittest.cc384
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc309
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h138
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/background_noise_unittest.cc26
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.cc64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.h54
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter_unittest.cc116
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.cc130
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.h72
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise_unittest.cc31
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.cc55
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.h51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc508
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.h201
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc204
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.cc285
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.h204
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decoder_database_unittest.cc227
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.cc31
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.h41
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.cc207
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.h121
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/delay_manager_unittest.cc246
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.cc373
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.h161
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper_unittest.cc88
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.cc246
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.h104
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc297
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc215
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.h57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc180
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/expand.cc888
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/expand.h154
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.cc71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.h57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/expand_unittest.cc203
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/g3doc/index.md102
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/histogram.cc149
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/histogram.h64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/histogram_unittest.cc73
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/merge.cc391
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/merge.h101
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/merge_unittest.cc121
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h28
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h50
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h29
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h35
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h33
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_expand.h60
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_histogram.h30
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_neteq_controller.h62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h82
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h30
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_statistics_calculator.h30
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.cc267
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.h211
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker_unittest.cc565
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_decoder_plc_unittest.cc313
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc2141
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h404
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc1871
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc345
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc424
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc1013
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.proto31
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/normal.cc194
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/normal.h76
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/normal_unittest.cc147
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet.cc36
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet.h128
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc107
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h82
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc124
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.cc405
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.h181
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc989
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc90
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc25
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.cc117
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.h85
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/random_vector.cc63
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/random_vector.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/random_vector_unittest.cc25
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.cc190
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.h51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter_unittest.cc390
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.cc75
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.h43
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer_unittest.cc70
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.cc394
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.h210
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator_unittest.cc206
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.cc118
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.h110
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer_unittest.cc174
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/parse_delay_file.m201
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/plot_neteq_delay.m197
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.cc423
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.h96
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc81
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc102
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc183
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcm16b_quality_test.cc81
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc80
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_performance_unittest.cc49
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc58
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.cc109
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.h50
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.cc216
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.h113
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/time_stretch_unittest.cc124
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.cc87
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.h67
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc324
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/DEPS3
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/README.md17
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_checksum.h64
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.cc61
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.h57
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.cc26
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.h70
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h55
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc94
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.h69
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.cc169
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h77
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.cc79
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.cc96
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.h62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc59
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc304
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h76
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.cc68
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.h51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.cc93
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.h107
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc90
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.h70
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc128
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h32
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc475
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h176
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc116
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc406
-rwxr-xr-xthird_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay_test.sh183
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.cc139
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.h106
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc111
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.h47
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.cc349
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.h129
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.cc342
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.h172
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/output_audio_file.h51
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/output_wav_file.h46
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.cc133
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.h104
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.cc25
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.h43
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_unittest.cc226
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc48
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h55
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc168
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc166
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_encode.cc359
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc100
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.h68
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.cc60
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.h83
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_jitter.cc148
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/rtpcat.cc45
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.cc71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.h50
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer_unittest.cc42
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build247
-rw-r--r--third_party/libwebrtc/modules/audio_coding/pcm16b_c_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_coding/pcm16b_gn/moz.build207
-rw-r--r--third_party/libwebrtc/modules/audio_coding/red_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/Channel.cc274
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/Channel.h117
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.cc272
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.h111
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/PCMFile.cc240
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/PCMFile.h77
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.cc167
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.h77
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/RTPFile.cc235
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/RTPFile.h133
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.cc445
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.h83
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestRedFec.cc233
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestRedFec.h56
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestStereo.cc599
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestStereo.h100
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.cc270
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.h115
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/Tester.cc122
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.cc191
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.h62
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/iSACTest.cc273
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/iSACTest.h68
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/opus_test.cc402
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/opus_test.h59
-rw-r--r--third_party/libwebrtc/modules/audio_coding/test/target_delay_unittest.cc161
-rw-r--r--third_party/libwebrtc/modules/audio_coding/webrtc_cng_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_coding/webrtc_multiopus_gn/moz.build231
-rw-r--r--third_party/libwebrtc/modules/audio_coding/webrtc_opus_gn/moz.build238
-rw-r--r--third_party/libwebrtc/modules/audio_coding/webrtc_opus_wrapper_gn/moz.build230
-rw-r--r--third_party/libwebrtc/modules/audio_device/BUILD.gn502
-rw-r--r--third_party/libwebrtc/modules/audio_device/DEPS13
-rw-r--r--third_party/libwebrtc/modules/audio_device/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/aaudio_player.cc228
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/aaudio_player.h147
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.cc220
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.h129
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.cc499
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.h127
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_common.h28
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_device_template.h435
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_device_unittest.cc1020
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_manager.cc318
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_manager.h225
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_manager_unittest.cc239
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_record_jni.cc280
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_record_jni.h168
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_track_jni.cc296
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/audio_track_jni.h161
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/build_info.cc59
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/build_info.h86
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/ensure_initialized.cc42
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/ensure_initialized.h17
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java51
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java312
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java371
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java409
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java494
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java382
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/opensles_common.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/opensles_common.h62
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/opensles_player.cc434
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/opensles_player.h195
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/opensles_recorder.cc431
-rw-r--r--third_party/libwebrtc/modules/audio_device/android/opensles_recorder.h193
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_api_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc518
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_buffer.h244
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_buffer_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_config.h30
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc372
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_default_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_generic.cc66
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_generic.h145
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_generic_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build206
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_impl.cc951
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_impl.h180
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_impl_gn/moz.build217
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_name.cc27
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_name.h50
-rw-r--r--third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc1243
-rw-r--r--third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc226
-rw-r--r--third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h117
-rw-r--r--third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc508
-rw-r--r--third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h163
-rw-r--r--third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc62
-rw-r--r--third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h44
-rw-r--r--third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc130
-rw-r--r--third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h94
-rw-r--r--third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc158
-rw-r--r--third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md171
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/audio_device.h179
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h72
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/audio_device_default.h132
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h177
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc53
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h59
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h33
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h156
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h81
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc498
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/test_audio_device.h150
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc192
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc40
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h148
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc1637
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h208
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc2286
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h349
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc979
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h71
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc844
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h114
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc106
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h168
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc41
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h106
-rw-r--r--third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc2500
-rw-r--r--third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h350
-rw-r--r--third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc924
-rw-r--r--third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h73
-rw-r--r--third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h35
-rw-r--r--third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.cc123
-rw-r--r--third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.h45
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc4178
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h300
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc522
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h87
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc948
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h203
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc453
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h73
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc422
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h72
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc1529
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h560
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc876
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/BUILD.gn143
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/DEPS13
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc92
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h33
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc66
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc264
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h100
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc794
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc182
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc41
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h36
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc245
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/frame_combiner.h62
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc341
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/g3doc/index.md54
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc63
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h42
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h32
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc35
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h40
-rw-r--r--third_party/libwebrtc/modules/audio_processing/BUILD.gn664
-rw-r--r--third_party/libwebrtc/modules/audio_processing/DEPS14
-rw-r--r--third_party/libwebrtc/modules/audio_processing/OWNERS8
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/BUILD.gn383
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.cc744
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.h192
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc188
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.cc102
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.h54
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_avx2.cc37
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc106
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_gn/moz.build204
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc594
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_avx2_gn/moz.build178
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.cc58
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.h114
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_common_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.cc144
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.h75
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_gn/moz.build204
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_unittest.cc213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec3_gn/moz.build269
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec_state.cc481
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec_state.h300
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/aec_state_unittest.cc297
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.cc163
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.h57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer_unittest.cc196
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.cc121
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.h60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc109
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block.h91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.cc23
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.h60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.cc69
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.h43
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_framer.cc83
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_framer.h49
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_framer_unittest.cc337
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_processor.cc292
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_processor.h81
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.cc104
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.h46
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics_unittest.cc34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/block_processor_unittest.cc341
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.cc61
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.h40
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector_unittest.cc57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.h74
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain_unittest.cc268
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.cc186
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc72
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/config_selector.cc71
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/config_selector.h41
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/config_selector_unittest.cc116
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/decimator.cc91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/decimator.h41
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/decimator_unittest.cc135
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/delay_estimate.h31
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.cc75
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.h56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.cc25
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.h58
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.cc119
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.h85
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.cc992
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.h230
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3_unittest.cc1160
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.cc127
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.h80
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc185
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.cc22
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.h37
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability_unittest.cc50
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.cc521
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.h62
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.cc157
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.h78
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc156
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_unittest.cc210
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.cc146
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.h58
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator_unittest.cc104
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.cc89
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.h112
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator_unittest.cc288
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.cc27
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.h60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fft_data.h104
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fft_data_avx2.cc33
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fft_data_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fft_data_unittest.cc186
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.cc289
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.h150
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer_unittest.cc33
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.cc80
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker_unittest.cc425
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.cc191
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.h118
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.cc807
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.h173
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_avx2.cc261
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc161
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.h97
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc113
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_unittest.cc558
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.h53
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.h56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.cc36
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h67
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.h42
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/moving_average.cc60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/moving_average.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/moving_average_unittest.cc89
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.cc148
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.h96
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector_unittest.cc470
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/nearend_detector.h42
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.cc173
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.h91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain_unittest.cc392
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.cc81
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.h115
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_unittest.cc46
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.cc519
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.h86
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer_unittest.cc130
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.cc184
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.cc145
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.h56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc35
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_unittest.cc334
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.cc156
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.h62
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc171
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.cc379
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.h85
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc199
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.cc410
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.h120
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.cc108
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.h55
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.cc59
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.h58
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.cc57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.h72
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc157
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc416
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.h104
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc208
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.cc30
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.h62
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.cc241
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.h123
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.cc251
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.h106
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.cc70
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.h52
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor.cc364
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor.h150
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.cc58
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.h52
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.cc64
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/subtractor_unittest.cc320
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.cc180
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter_unittest.cc257
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.cc465
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.h145
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain_unittest.cc149
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.cc243
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.h47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/vector_math.h229
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/vector_math_avx2.cc82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/vector_math_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec3/vector_math_unittest.cc209
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/BUILD.gn112
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_factory.h48
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.cc281
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.h85
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_integration_test.cc93
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_unittest.cc87
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.cc59
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.h66
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.cc19
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.h82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory.cc34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aec_dump_interface_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/BUILD.gn44
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.cc1125
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.h441
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_c.cc671
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_gn/moz.build222
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_mips.cc1656
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_neon.cc206
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/aecm_defines.h87
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.cc599
-rw-r--r--third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.h209
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/BUILD.gn193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/agc.cc98
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/agc.h52
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/agc_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.cc720
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.h226
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc1452
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc130
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.h67
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc169
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.cc383
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.h63
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc214
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.h122
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc763
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.h71
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_unittest.cc131
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_unittest.cc491
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/gain_control.h105
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/gain_control_interface_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/gain_map_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/gain_map_internal.h40
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.cc1238
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.h118
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.cc704
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.h75
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/legacy/gain_control.h253
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/legacy_agc_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/level_estimation_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.cc229
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.h90
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram_unittest.cc107
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/mock_agc.h32
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/utility.cc39
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc/utility.h27
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/BUILD.gn309
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc267
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.h71
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc373
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc108
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.h63
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gn/moz.build217
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc164
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc217
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/agc2_common.h57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.cc93
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.h82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common_unittest.cc27
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.cc60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.h56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_unittest.cc175
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/common_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc229
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.h48
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.cc62
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.h39
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/cpu_features_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.cc121
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.h66
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc159
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.h44
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_unittest.cc93
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.cc204
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.h152
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc203
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/limiter.cc155
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/limiter.h63
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.cc138
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.h76
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve_unittest.cc60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/limiter_unittest.cc60
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.cc164
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.h36
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_unittest.cc98
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/BUILD.gn334
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/DEPS3
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.h49
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation_unittest.cc66
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/common.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.cc90
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.h61
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.cc141
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.h41
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual_unittest.cc80
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.cc70
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.h54
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc513
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h114
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal_unittest.cc217
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_unittest.cc53
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer.h65
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer_unittest.cc112
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.cc91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.h53
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.h72
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc_unittest.cc111
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc198
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.h70
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru_unittest.cc186
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_unittest.cc70
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_auto_correlation_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_common_gn/moz.build204
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_layers_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_lp_residual_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_pitch_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_ring_buffer_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_sequence_buffer_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_spectral_features_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_symmetric_matrix_buffer_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_tool.cc123
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc185
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer.h79
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc102
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.cc214
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.h79
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc188
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h100
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc160
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc160
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h95
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc107
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.cc143
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.h130
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math.h114
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2.cc55
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2_gn/moz.build173
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_gn/moz.build204
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_unittest.cc71
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.cc183
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.h46
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.cc77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.h59
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer_unittest.cc73
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_unittest.cc140
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.cc106
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.h78
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_unittest.cc181
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.cc39
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.h42
-rw-r--r--third_party/libwebrtc/modules/audio_processing/api_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/apm_logging_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_buffer.cc396
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_buffer.h172
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_buffer_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_buffer_unittest.cc93
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_frame_proxies_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_frame_view_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_frame_view_unittest.cc51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_builder_impl.cc34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_gn/moz.build219
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc2193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_impl.h549
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc1012
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_impl_unittest.cc814
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_performance_unittest.cc592
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_statistics_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_unittest.cc3155
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/BUILD.gn47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.cc92
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h46
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler_unittest.cc204
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.cc96
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h88
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_unittest.cc187
-rw-r--r--third_party/libwebrtc/modules/audio_processing/debug.proto115
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc221
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.cc287
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.h86
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_control_mobile_unittest.cc43
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.cc49
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.h44
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer_unittest.cc53
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.cc47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.h33
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc65
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.cc52
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.h36
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max_unittest.cc68
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc43
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.h43
-rw-r--r--third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc41
-rw-r--r--third_party/libwebrtc/modules/audio_processing/g3doc/audio_processing_module.md26
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_control_impl.cc395
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_control_impl.h92
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_control_unittest.cc393
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_controller2.cc180
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_controller2.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_controller2_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/audio_processing/gain_controller2_unittest.cc492
-rw-r--r--third_party/libwebrtc/modules/audio_processing/high_pass_filter.cc115
-rw-r--r--third_party/libwebrtc/modules/audio_processing/high_pass_filter.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/high_pass_filter_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_processing/high_pass_filter_unittest.cc301
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/aec_dump.cc41
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/aec_dump.h115
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.cc66
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.h41
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_frame_view.h68
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_processing.cc211
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_processing.h932
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.cc22
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.h67
-rw-r--r--third_party/libwebrtc/modules/audio_processing/include/mock_audio_processing.h178
-rw-r--r--third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.cc100
-rw-r--r--third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.h452
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/BUILD.gn104
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/fast_math.cc84
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/fast_math.h38
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/histograms.cc47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/histograms.h55
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.cc195
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.cc555
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.h92
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor_unittest.cc102
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/ns_common.h34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/ns_config.h24
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/ns_fft.cc64
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/ns_fft.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/ns_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.cc18
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.h32
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.cc170
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.h39
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.cc88
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/signal_model.cc24
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/signal_model.h34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.cc175
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.h58
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/suppression_params.cc49
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/suppression_params.h30
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.cc120
-rw-r--r--third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.h57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.cc36
-rw-r--r--third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.h42
-rw-r--r--third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_processing/render_queue_item_verifier.h36
-rw-r--r--third_party/libwebrtc/modules/audio_processing/residual_echo_detector.cc205
-rw-r--r--third_party/libwebrtc/modules/audio_processing/residual_echo_detector.h91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/residual_echo_detector_unittest.cc138
-rw-r--r--third_party/libwebrtc/modules/audio_processing/rms_level.cc138
-rw-r--r--third_party/libwebrtc/modules/audio_processing/rms_level.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/rms_level_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/rms_level_unittest.cc197
-rw-r--r--third_party/libwebrtc/modules/audio_processing/splitting_filter.cc144
-rw-r--r--third_party/libwebrtc/modules/audio_processing/splitting_filter.h72
-rw-r--r--third_party/libwebrtc/modules/audio_processing/splitting_filter_unittest.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.cc654
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.h82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/android/apmtest/AndroidManifest.xml30
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/android/apmtest/default.properties11
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/android/apmtest/jni/main.c307
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/android/apmtest/res/values/strings.xml4
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.cc95
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.h47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/apmtest.m365
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.cc68
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.h42
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.cc51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.h95
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.cc609
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.h247
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.cc815
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.cc148
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.h56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/BUILD.gn81
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/OWNERS3
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/README.md74
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.cc31
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.h43
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator.cc89
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator_unittest.cc675
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.cc34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.h48
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.cc66
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.h59
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.cc193
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.h104
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.cc235
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.h44
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.cc73
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h34
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.cc65
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.h36
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_interface.h40
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.cc248
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.h78
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/debug_dump_test.cc535
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.cc47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.h47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools_unittest.cc82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/echo_control_mock.h46
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.cc190
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.h74
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/fake_recording_device_unittest.cc231
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/performance_timer.cc75
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/performance_timer.h47
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.cc79
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.h40
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/BUILD.gn170
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/OWNERS5
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/README.md125
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_configs/default.json1
-rwxr-xr-xthird_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.py217
-rwxr-xr-xthird_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.sh91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py154
-rwxr-xr-xthird_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_export.py63
-rwxr-xr-xthird_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_gencfgs.py128
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py189
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_unittest.py28
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/output/README.md1
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/__init__.py7
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations.py296
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations_unittest.py160
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_configs/default.json1
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_vad.cc96
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/audioproc_wrapper.py100
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/collect_data.py243
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/data_access.py154
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation.py136
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_factory.py48
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_unittest.py82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py427
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_factory.py55
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py137
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/exceptions.py45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py426
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py86
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py75
-rwxr-xr-xthird_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py25
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py97
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py140
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py68
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.css32
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.js376
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py359
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py183
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py446
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py203
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/sound_level.cc127
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py526
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py71
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py207
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.cc50
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.h23
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.cc86
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.h66
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/test_utils.cc89
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/test_utils.h170
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/unittest.proto48
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.cc202
-rw-r--r--third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.h63
-rw-r--r--third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.cc278
-rw-r--r--third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.h77
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/BUILD.gn133
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/click_annotate.cc107
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/common.h27
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h44
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator.h68
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator_unittest.cc111
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/file_utils.cc257
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/file_utils.h117
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/file_utils_unittest.cc501
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/moving_moments.cc50
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/moving_moments.h53
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/moving_moments_unittest.cc207
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/test/plotDetection.m22
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/test/readDetection.m26
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/test/readPCM.m26
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_detector.cc176
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_detector.h89
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_detector_unittest.cc95
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppression_test.cc238
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor.h75
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_api_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.cc455
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.h115
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl_gn/moz.build216
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_unittest.cc175
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.cc56
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.h43
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_unittest.cc108
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/windows_private.h557
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/wpd_node.cc72
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/wpd_node.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/wpd_node_unittest.cc64
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.cc118
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.h92
-rw-r--r--third_party/libwebrtc/modules/audio_processing/transient/wpd_tree_unittest.cc177
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/BUILD.gn79
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/DEPS3
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.cc126
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.h80
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc157
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.cc708
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.h257
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_internal.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_unittest.cc621
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc489
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.h248
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/legacy_delay_estimator_gn/moz.build202
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.cc135
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.h94
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_unittest.cc182
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/BUILD.gn69
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/common.h29
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/gmm.cc61
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/gmm.h45
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/gmm_unittest.cc65
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/noise_gmm_tables.h82
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.cc120
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.h57
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad_unittest.cc75
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.cc55
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.h30
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pitch_internal_unittest.cc54
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.cc107
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.h51
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter_unittest.cc103
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.cc91
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.h69
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/standalone_vad_unittest.cc107
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.cc275
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.h90
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_internal.h79
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc62
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.cc135
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.h69
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer_unittest.cc134
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/vad_gn/moz.build219
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.cc85
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.h74
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector_unittest.cc168
-rw-r--r--third_party/libwebrtc/modules/audio_processing/vad/voice_gmm_tables.h77
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/BUILD.gn73
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/DEPS5
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/OWNERS6
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/congestion_controller_gn/moz.build216
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/BUILD.gn367
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.cc70
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h52
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.cc92
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h85
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc136
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.cc111
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.h76
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_unittest.cc206
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.cc166
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.h62
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.cc81
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h48
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.cc305
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.h132
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_gn/moz.build216
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc309
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc529
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h189
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h43
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/estimators_gn/moz.build220
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc720
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.h150
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc958
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.cc140
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.h90
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.cc77
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.h38
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc260
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h97
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v1_gn/moz.build206
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc962
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h175
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc921
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc201
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h58
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator_unittest.cc228
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc442
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h156
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc449
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/pushback_controller_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.cc189
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.h50
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator_unittest.cc427
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc680
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h206
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc206
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bwe_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc200
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.h75
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.cc332
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.h125
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator_unittest.cc151
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/include/receive_side_congestion_controller.h93
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/BUILD.gn123
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.cc139
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.h74
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller_unittest.cc303
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.cc135
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.h71
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval_unittest.cc190
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.cc30
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.h30
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.cc391
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.h125
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller_unittest.cc119
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.cc41
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.h39
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker_unittest.cc71
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.cc86
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.h78
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/pcc/utility_function_unittest.cc113
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller.cc135
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller_unittest.cc126
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/remb_throttler.cc63
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/remb_throttler.h54
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/remb_throttler_unittest.cc100
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/BUILD.gn104
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.cc89
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.h54
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/control_handler_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.cc275
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.h102
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc407
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.cc94
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.h60
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc99
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/BUILD.gn752
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/DEPS19
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc139
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h83
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper_unittest.cc165
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.cc66
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.h33
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame_unittest.cc115
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.cc135
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.h84
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer_win.cc327
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.cc260
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.h98
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc479
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_differ_sse2_gn/moz.build142
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_gn/moz.build439
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_metadata.h31
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.cc60
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.h22
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_objc_gn/moz.build72
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.cc61
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.h244
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capture_types.h77
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer.cc122
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer.h188
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.cc232
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.h72
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc291
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.cc60
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.h48
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame.cc209
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame.h227
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.cc184
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.h121
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.cc117
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.h52
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation_unittest.cc449
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_unittest.cc336
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.cc73
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.h49
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_geometry.cc79
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_geometry.h169
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_geometry_unittest.cc106
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_region.cc567
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_region.h169
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/desktop_region_unittest.cc834
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/differ_block.cc71
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/differ_block.h42
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/differ_block_unittest.cc89
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.cc102
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.h31
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.cc84
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.h76
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc183
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h64
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper_unittest.cc207
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.cc30
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.h50
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.cc84
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.h83
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.cc158
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.h74
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/drm.sigs11
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.cc703
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.h68
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.cc59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.h44
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc1081
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h182
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire.sigs50
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire_stub_header.fragment9
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/portal_request_response.h34
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.cc33
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.h41
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.cc57
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.h65
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.cc127
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h72
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.cc455
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.h213
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.cc132
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.h62
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.cc870
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.h76
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.cc195
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h111
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_session_details.h33
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc255
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.h68
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc512
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.h147
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc102
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.h84
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc255
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.h78
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc52
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.h35
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc198
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.h56
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc51
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.h45
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc70
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.h51
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc379
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.h89
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc43
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.h63
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.h96
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.mm179
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.cc73
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h55
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.h58
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.mm108
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.h45
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.mm61
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.h59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.mm70
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.cc238
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.h24
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.h128
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.mm552
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.cc430
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.h117
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.cc23
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.h39
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor.cc36
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor.h49
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor.h111
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_linux.cc65
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm213
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_null.cc38
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc128
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc222
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/primitives_gn/moz.build185
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/resolution_tracker.cc34
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/resolution_tracker.h34
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/rgba_color.cc61
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/rgba_color.h59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/rgba_color_unittest.cc45
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capture_frame_queue.h75
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_darwin.mm33
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.cc415
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.h65
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.cc90
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.h91
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper_unittest.cc193
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_integration_test.cc380
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_linux.cc50
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac.mm766
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac_unittest.cc101
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_null.cc21
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_unittest.cc224
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_capturer_win.cc62
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer.cc30
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer.h79
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer_linux.cc185
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.cc59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.h39
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer_mac.cc30
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer_unittest.cc160
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/screen_drawer_win.cc209
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.cc59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.h65
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/shared_memory.cc24
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/shared_memory.h82
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/test_utils.cc50
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/test_utils.h27
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/test_utils_unittest.cc110
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor.cc233
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor.h25
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_24bpp.curbin0 -> 3262 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_32bpp.curbin0 -> 4286 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_8bpp.curbin0 -> 2238 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_1bpp.curbin0 -> 326 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_32bpp.curbin0 -> 4286 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_32bpp.curbin0 -> 4286 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_4bpp.curbin0 -> 766 bytes
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest.cc91
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.h24
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.rc28
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/d3d_device.cc100
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/d3d_device.h59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/desktop.cc111
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/desktop.h65
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.cc32
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.h29
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.cc37
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.h38
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.cc185
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.h92
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.cc33
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.h62
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.cc514
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.h253
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.cc77
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.h63
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc390
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.h149
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.cc81
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.h73
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.cc58
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.h47
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.cc132
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.h68
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.cc296
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.h24
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/scoped_gdi_object.h91
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.cc54
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.h55
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.cc184
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.h75
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils_unittest.cc81
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc230
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.h105
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx_unittest.cc41
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc243
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h85
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc398
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h140
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.cc59
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.h45
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.cc104
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.h51
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.cc423
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.h137
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.cc218
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.h141
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source_unittest.cc148
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.cc363
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.h166
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win_unittest.cc572
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.cc25
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.h46
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.cc486
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.h136
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils_unittest.cc153
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.cc400
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.h78
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_capturer_linux.cc50
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_capturer_mac.mm211
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_capturer_null.cc70
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_capturer_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_capturer_win.cc48
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder.cc20
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder.h65
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder_mac.h37
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder_mac.mm52
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder_unittest.cc178
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder_win.cc46
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/window_finder_win.h30
-rw-r--r--third_party/libwebrtc/modules/include/module_common_types.h66
-rw-r--r--third_party/libwebrtc/modules/include/module_common_types_public.h111
-rw-r--r--third_party/libwebrtc/modules/include/module_fec_types.h34
-rw-r--r--third_party/libwebrtc/modules/module_api_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/module_api_public_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/module_common_types_unittest.cc224
-rw-r--r--third_party/libwebrtc/modules/module_fec_api_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/pacing/BUILD.gn119
-rw-r--r--third_party/libwebrtc/modules/pacing/DEPS6
-rw-r--r--third_party/libwebrtc/modules/pacing/OWNERS6
-rw-r--r--third_party/libwebrtc/modules/pacing/bitrate_prober.cc204
-rw-r--r--third_party/libwebrtc/modules/pacing/bitrate_prober.h123
-rw-r--r--third_party/libwebrtc/modules/pacing/bitrate_prober_unittest.cc287
-rw-r--r--third_party/libwebrtc/modules/pacing/g3doc/index.md164
-rw-r--r--third_party/libwebrtc/modules/pacing/interval_budget.cc68
-rw-r--r--third_party/libwebrtc/modules/pacing/interval_budget.h44
-rw-r--r--third_party/libwebrtc/modules/pacing/interval_budget_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/pacing/interval_budget_unittest.cc123
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_controller.cc681
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_controller.h273
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc2065
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_gn/moz.build222
-rw-r--r--third_party/libwebrtc/modules/pacing/packet_router.cc335
-rw-r--r--third_party/libwebrtc/modules/pacing/packet_router.h114
-rw-r--r--third_party/libwebrtc/modules/pacing/packet_router_unittest.cc673
-rw-r--r--third_party/libwebrtc/modules/pacing/prioritized_packet_queue.cc267
-rw-r--r--third_party/libwebrtc/modules/pacing/prioritized_packet_queue.h122
-rw-r--r--third_party/libwebrtc/modules/pacing/prioritized_packet_queue_unittest.cc291
-rw-r--r--third_party/libwebrtc/modules/pacing/round_robin_packet_queue.cc403
-rw-r--r--third_party/libwebrtc/modules/pacing/round_robin_packet_queue.h172
-rw-r--r--third_party/libwebrtc/modules/pacing/round_robin_packet_queue_unittest.cc98
-rw-r--r--third_party/libwebrtc/modules/pacing/rtp_packet_pacer.h74
-rw-r--r--third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc356
-rw-r--r--third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h196
-rw-r--r--third_party/libwebrtc/modules/pacing/task_queue_paced_sender_unittest.cc839
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/BUILD.gn144
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/DEPS6
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/OWNERS6
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc440
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.h126
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control_unittest.cc484
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/bwe_defines.cc24
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/include/bwe_defines.h46
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h70
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.cc163
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.h95
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival_unittest.cc531
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.cc160
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.h62
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc809
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.cc164
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.h83
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.cc193
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.h127
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map_test.cc251
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc399
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h135
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc296
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_gn/moz.build226
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc252
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h87
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc78
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc594
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h225
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc328
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h143
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc649
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc262
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h360
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc109
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h25
-rw-r--r--third_party/libwebrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc67
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn673
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/DEPS8
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/OWNERS6
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_receiver.h80
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_sender.h103
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/receive_statistics.h83
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h74
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.cc44
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.h59
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtcp_statistics.h77
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtp_cvo.h56
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtp_header_extension_map.h75
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtp_packet_sender.h35
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp.h45
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.cc62
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h494
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h29
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h28
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h25
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h189
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_format_gn/moz.build256
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_gn/moz.build266
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/rtp_video_header_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc125
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h86
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc353
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.cc124
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.h88
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender_unittest.cc374
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.cc124
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.h63
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc272
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/byte_io.h402
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/byte_io_unittest.cc270
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.cc33
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.h51
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc58
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc43
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.h26
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc481
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h152
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.cc51
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.h43
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.cc660
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h37
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty_unittest.cc82
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.cc660
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.h27
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.cc230
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.h125
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc320
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.h88
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc560
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver.cc190
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc706
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender.cc204
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender_unittest.cc342
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.cc807
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.h421
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc519
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h121
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc293
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.cc141
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.h58
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc198
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.cc156
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.h77
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer_unittest.cc250
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc389
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.h246
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc582
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc109
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc128
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.cc29
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.h40
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats_unittest.cc64
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.cc99
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.h111
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc101
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.h67
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc110
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc141
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h57
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc147
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.cc89
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.h52
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header_unittest.cc103
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc50
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h47
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc155
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc94
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h80
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc92
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.cc195
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h73
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports_unittest.cc169
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.cc113
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.h62
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir_unittest.cc93
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.cc133
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h82
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification_unittest.cc136
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc176
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h59
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc171
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc79
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h39
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc58
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc47
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h48
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.cc68
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h40
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request_unittest.cc64
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc112
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h60
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc161
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.cc143
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.h59
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb_unittest.cc128
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.cc148
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h59
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate_unittest.cc56
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc100
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h72
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc110
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc49
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h59
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc50
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc45
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h47
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.cc199
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.h56
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes_unittest.cc244
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.cc141
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.h81
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report_unittest.cc142
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.cc127
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h63
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate_unittest.cc96
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.cc71
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h52
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc109
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h55
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc111
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h54
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc89
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc747
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h185
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc664
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc42
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.cc1292
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.h457
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc2012
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.cc974
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.h332
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc844
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.cc150
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.h105
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.cc80
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.h188
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc857
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.h170
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc1744
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc365
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc55
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h61
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc137
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc239
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.h69
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc396
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h89
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.cc58
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.h27
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc1129
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.cc144
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.h61
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.cc318
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.h89
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc502
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_unittest.cc283
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc100
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h71
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc172
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc169
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.h74
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.cc174
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h56
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc115
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc451
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.h72
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc608
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.cc100
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h79
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.cc173
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h42
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension_unittest.cc266
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map.cc169
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map_unittest.cc115
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.cc48
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.h32
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size_unittest.cc92
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.cc934
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.h386
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.cc706
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.h263
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.cc428
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.h196
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc681
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.cc81
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.h77
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.cc31
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h136
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_unittest.cc1273
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.cc424
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.h72
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc57
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h51
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc342
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h30
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc769
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h327
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc827
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.h340
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc1159
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc700
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_interface.h469
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.cc791
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.h217
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc400
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.h122
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc264
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.cc645
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.h184
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc971
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc1339
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.cc918
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.h254
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc190
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h84
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc1571
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.cc129
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.h85
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map_unittest.cc502
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.cc63
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.h31
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util_unittest.cc86
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.cc24
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.h94
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc416
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h39
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc287
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.cc104
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.h130
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker_unittest.cc469
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/time_util.cc54
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/time_util.h56
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/time_util_unittest.cc128
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.cc184
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.h35
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.cc268
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.h123
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc273
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc141
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.h69
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc245
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.cc249
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.h82
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc545
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_fec_generator.h54
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.cc42
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.h41
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.cc407
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h42
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1_unittest.cc392
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.cc72
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h30
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic_unittest.cc71
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc315
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h28
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc430
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.cc28
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h30
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw_unittest.cc51
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.cc201
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h42
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8_unittest.cc244
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc226
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h42
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9_unittest.cc373
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h57
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_fec.cc474
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc1060
-rw-r--r--third_party/libwebrtc/modules/third_party/fft/BUILD.gn16
-rw-r--r--third_party/libwebrtc/modules/third_party/fft/LICENSE25
-rw-r--r--third_party/libwebrtc/modules/third_party/fft/README.chromium12
-rw-r--r--third_party/libwebrtc/modules/third_party/fft/fft.c942
-rw-r--r--third_party/libwebrtc/modules/third_party/fft/fft.h58
-rw-r--r--third_party/libwebrtc/modules/third_party/fft/fft_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/third_party/g711/BUILD.gn17
-rw-r--r--third_party/libwebrtc/modules/third_party/g711/LICENSE14
-rw-r--r--third_party/libwebrtc/modules/third_party/g711/README.chromium11
-rw-r--r--third_party/libwebrtc/modules/third_party/g711/g711.c72
-rw-r--r--third_party/libwebrtc/modules/third_party/g711/g711.h350
-rw-r--r--third_party/libwebrtc/modules/third_party/g711/g711_3p_gn/moz.build197
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/BUILD.gn18
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/LICENSE20
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/README.chromium11
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/g722_3p_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/g722_decode.c399
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/g722_enc_dec.h153
-rw-r--r--third_party/libwebrtc/modules/third_party/g722/g722_encode.c429
-rw-r--r--third_party/libwebrtc/modules/third_party/portaudio/BUILD.gn18
-rw-r--r--third_party/libwebrtc/modules/third_party/portaudio/LICENSE91
-rw-r--r--third_party/libwebrtc/modules/third_party/portaudio/README.chromium14
-rw-r--r--third_party/libwebrtc/modules/third_party/portaudio/pa_memorybarrier.h144
-rw-r--r--third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.c237
-rw-r--r--third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.h263
-rw-r--r--third_party/libwebrtc/modules/utility/BUILD.gn36
-rw-r--r--third_party/libwebrtc/modules/utility/DEPS5
-rw-r--r--third_party/libwebrtc/modules/utility/OWNERS1
-rw-r--r--third_party/libwebrtc/modules/utility/include/helpers_android.h80
-rw-r--r--third_party/libwebrtc/modules/utility/include/jvm_android.h193
-rw-r--r--third_party/libwebrtc/modules/utility/source/helpers_android.cc117
-rw-r--r--third_party/libwebrtc/modules/utility/source/jvm_android.cc292
-rw-r--r--third_party/libwebrtc/modules/utility/utility_gn/moz.build202
-rw-r--r--third_party/libwebrtc/modules/video_capture/BUILD.gn158
-rw-r--r--third_party/libwebrtc/modules/video_capture/DEPS6
-rw-r--r--third_party/libwebrtc/modules/video_capture/OWNERS4
-rw-r--r--third_party/libwebrtc/modules/video_capture/device_info_impl.cc223
-rw-r--r--third_party/libwebrtc/modules/video_capture/device_info_impl.h63
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/device_info_linux.cc42
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.cc449
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.h71
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/video_capture_linux.cc51
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc433
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h65
-rw-r--r--third_party/libwebrtc/modules/video_capture/test/video_capture_unittest.cc343
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture.h166
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_config.h33
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_defines.h71
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_factory.cc26
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_factory.h40
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_factory_null.cc27
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_impl.cc302
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_impl.h112
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_internal_impl_gn/moz.build239
-rw-r--r--third_party/libwebrtc/modules/video_capture/video_capture_module_gn/moz.build218
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/device_info_ds.cc705
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/device_info_ds.h107
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.cc155
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.h118
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.cc959
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.h162
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.cc323
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.h74
-rw-r--r--third_party/libwebrtc/modules/video_capture/windows/video_capture_factory_windows.cc37
-rw-r--r--third_party/libwebrtc/modules/video_coding/BUILD.gn1312
-rw-r--r--third_party/libwebrtc/modules/video_coding/DEPS23
-rw-r--r--third_party/libwebrtc/modules/video_coding/OWNERS7
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc62
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc126
-rw-r--r--third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build193
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn153
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS4
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h32
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc171
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc202
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h23
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder_gn/moz.build218
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.cc200
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.h26
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_absent.cc24
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc819
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h31
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc239
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc365
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS5
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc166
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc178
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h38
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc648
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h114
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc653
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h117
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc89
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc103
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h72
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h85
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc99
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h28
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc373
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h128
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h147
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc65
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h62
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h80
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h91
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc266
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc277
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h120
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc353
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc319
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc78
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h30
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps1
-rwxr-xr-xthird_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh56
-rwxr-xr-xthird_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh70
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc77
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h92
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h28
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm30
-rwxr-xr-xthird_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py438
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc180
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h128
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc155
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc102
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc63
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc831
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h107
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc465
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc241
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc87
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc405
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h84
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc90
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc88
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc700
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h261
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc200
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc884
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h168
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc781
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h63
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h50
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h49
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc405
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h77
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc1428
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h158
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc108
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc624
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h164
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc788
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h17
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc146
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc913
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc24
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h24
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS3
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h54
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h226
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc419
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc2183
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h249
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc235
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h39
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc288
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc2446
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc182
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h134
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoder_database.cc135
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoder_database.h63
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoding_state.cc368
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoding_state.h89
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc713
-rw-r--r--third_party/libwebrtc/modules/video_coding/encoded_frame.cc151
-rw-r--r--third_party/libwebrtc/modules/video_coding/encoded_frame.h127
-rw-r--r--third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/video_coding/event_wrapper.cc39
-rw-r--r--third_party/libwebrtc/modules/video_coding/event_wrapper.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_controller_default.cc211
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_controller_default.h68
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc114
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_rate_table.h461
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer.cc265
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer.h89
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer2.cc622
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer2.h193
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc665
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc75
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h49
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc121
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers.cc90
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers.h30
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_object.cc131
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_object.h68
-rw-r--r--third_party/libwebrtc/modules/video_coding/g3doc/index.md177
-rw-r--r--third_party/libwebrtc/modules/video_coding/generic_decoder.cc326
-rw-r--r--third_party/libwebrtc/modules/video_coding/generic_decoder.h124
-rw-r--r--third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc190
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc287
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h56
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc778
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc53
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h38
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc45
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc271
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h76
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc368
-rw-r--r--third_party/libwebrtc/modules/video_coding/histogram.cc61
-rw-r--r--third_party/libwebrtc/modules/video_coding/histogram.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/histogram_unittest.cc77
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h45
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc20
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h119
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_coding.h150
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h119
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_error_codes.h30
-rw-r--r--third_party/libwebrtc/modules/video_coding/internal_defines.h23
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer.cc892
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer.h276
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h60
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc1849
-rw-r--r--third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc173
-rw-r--r--third_party/libwebrtc/modules/video_coding/loss_notification_controller.h111
-rw-r--r--third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc607
-rw-r--r--third_party/libwebrtc/modules/video_coding/media_opt_util.cc704
-rw-r--r--third_party/libwebrtc/modules/video_coding/media_opt_util.h350
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester.cc340
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester.h157
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build213
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc402
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet.cc69
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet.h80
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer.cc418
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer.h133
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc827
-rw-r--r--third_party/libwebrtc/modules/video_coding/receiver.cc191
-rw-r--r--third_party/libwebrtc/modules/video_coding/receiver.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/receiver_unittest.cc493
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc33
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h38
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc189
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h60
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc322
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc44
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h32
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc186
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h70
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc254
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h83
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc370
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc367
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h105
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc719
-rw-r--r--third_party/libwebrtc/modules/video_coding/session_info.cc537
-rw-r--r--third_party/libwebrtc/modules/video_coding/session_info.h122
-rw-r--r--third_party/libwebrtc/modules/video_coding/session_info_unittest.cc469
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/BUILD.gn135
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc171
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h35
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc215
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h33
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc47
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc421
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h180
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc123
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc378
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h122
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc245
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc177
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h64
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc358
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc294
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h109
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc101
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h59
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc389
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h139
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc88
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h40
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc452
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc584
-rw-r--r--third_party/libwebrtc/modules/video_coding/test/stream_generator.cc128
-rw-r--r--third_party/libwebrtc/modules/video_coding/test/stream_generator.h74
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/BUILD.gn132
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc58
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/codec_timer.h50
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.cc148
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.h102
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_unittest.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc71
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc190
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc314
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h129
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc113
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc161
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing.cc297
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing.h161
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc339
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc148
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h93
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc276
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc92
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h52
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc114
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc268
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h94
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc160
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc85
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h47
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc90
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h23
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc243
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h82
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc188
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc245
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h66
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc311
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc53
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/qp_parser.h45
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc340
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h120
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc253
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc343
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h70
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc824
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc918
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h93
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc104
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h33
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc200
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h40
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h198
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc533
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h155
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc94
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc352
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc493
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_defines.cc20
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build230
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_impl.cc123
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_impl.h135
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver.cc279
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver2.cc131
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver2.h79
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc234
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_h264_gn/moz.build219
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_multiplex_gn/moz.build217
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build217
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build201
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build219
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build219
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build214
-rw-r--r--third_party/libwebrtc/modules/video_processing/BUILD.gn112
-rw-r--r--third_party/libwebrtc/modules/video_processing/DEPS6
-rw-r--r--third_party/libwebrtc/modules/video_processing/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/video_processing/denoiser_filter_gn/moz.build189
-rw-r--r--third_party/libwebrtc/modules/video_processing/test/denoiser_test.cc148
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter.cc65
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter.h50
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.cc126
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.h40
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.cc182
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.h38
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.cc200
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.h40
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/noise_estimation.cc113
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/noise_estimation.h63
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/skin_detection.cc96
-rw-r--r--third_party/libwebrtc/modules/video_processing/util/skin_detection.h30
-rw-r--r--third_party/libwebrtc/modules/video_processing/video_denoiser.cc339
-rw-r--r--third_party/libwebrtc/modules/video_processing/video_denoiser.h86
-rw-r--r--third_party/libwebrtc/modules/video_processing/video_processing_gn/moz.build220
-rw-r--r--third_party/libwebrtc/modules/video_processing/video_processing_neon_gn/moz.build175
-rw-r--r--third_party/libwebrtc/modules/video_processing/video_processing_sse2_gn/moz.build184
2598 files changed, 501327 insertions, 0 deletions
diff --git a/third_party/libwebrtc/modules/BUILD.gn b/third_party/libwebrtc/modules/BUILD.gn
new file mode 100644
index 0000000000..abbb284bcc
--- /dev/null
+++ b/third_party/libwebrtc/modules/BUILD.gn
@@ -0,0 +1,257 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+import("audio_coding/audio_coding.gni")
+
+group("modules") {
+ deps = [
+ "audio_coding",
+ "audio_device",
+ "audio_mixer",
+ "audio_processing",
+ "congestion_controller",
+ "pacing",
+ "remote_bitrate_estimator",
+ "rtp_rtcp",
+ "utility",
+ "video_coding",
+ "video_processing",
+ ]
+
+ if (rtc_desktop_capture_supported) {
+ deps += [ "desktop_capture" ]
+ }
+}
+
+rtc_source_set("module_api_public") {
+ sources = [ "include/module_common_types_public.h" ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_source_set("module_api") {
+ visibility = [ "*" ]
+ sources = [ "include/module_common_types.h" ]
+}
+
+rtc_source_set("module_fec_api") {
+ visibility = [ "*" ]
+ sources = [ "include/module_fec_types.h" ]
+}
+
+if (rtc_include_tests && !build_with_chromium) {
+ modules_tests_resources = [
+ "../resources/audio_coding/testfile16kHz.pcm",
+ "../resources/audio_coding/testfile32kHz.pcm",
+ "../resources/audio_coding/teststereo32kHz.pcm",
+ "../resources/foreman_cif.yuv",
+ ]
+
+ if (is_ios) {
+ bundle_data("modules_tests_bundle_data") {
+ testonly = true
+ sources = modules_tests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+
+ rtc_test("modules_tests") {
+ testonly = true
+
+ deps = [
+ "../test:test_main",
+ "../test:video_test_common",
+ "audio_coding:audio_coding_modules_tests",
+ "rtp_rtcp:rtp_rtcp_modules_tests",
+ "video_coding:video_coding_modules_tests",
+ "//testing/gtest",
+ ]
+
+ if (rtc_desktop_capture_supported) {
+ deps += [ "desktop_capture:desktop_capture_modules_tests" ]
+ }
+
+ data = modules_tests_resources
+
+ if (is_android) {
+ use_default_launcher = false
+ deps += [
+ # NOTE(brandtr): Including Java classes seems only to be possible from
+ # rtc_test targets. Therefore we include this target here, instead of
+ # in video_coding_modules_tests, where it is actually used.
+ "../sdk/android:libjingle_peerconnection_java",
+ "//sdk/android:native_test_jni_onload",
+ "//testing/android/native_test:native_test_support",
+ ]
+ shard_timeout = 900
+ }
+
+ if (is_ios) {
+ deps += [ ":modules_tests_bundle_data" ]
+ }
+ }
+
+ modules_unittests_resources = [
+ "../resources/audio_coding/neteq_opus.rtp",
+ "../resources/audio_coding/neteq_opus_dtx.rtp",
+ "../resources/audio_coding/neteq_universal_new.rtp",
+ "../resources/audio_coding/speech_4_channels_48k_one_second.wav",
+ "../resources/audio_coding/speech_mono_16kHz.pcm",
+ "../resources/audio_coding/speech_mono_32_48kHz.pcm",
+ "../resources/audio_coding/testfile16kHz.pcm",
+ "../resources/audio_coding/testfile32kHz.pcm",
+ "../resources/audio_coding/testfile_fake_stereo_32kHz.pcm",
+ "../resources/audio_coding/teststereo32kHz.pcm",
+ "../resources/audio_device/audio_short16.pcm",
+ "../resources/audio_device/audio_short44.pcm",
+ "../resources/audio_device/audio_short48.pcm",
+ "../resources/audio_processing/agc/agc_audio.pcm",
+ "../resources/audio_processing/agc/agc_no_circular_buffer.dat",
+ "../resources/audio_processing/agc/agc_pitch_gain.dat",
+ "../resources/audio_processing/agc/agc_pitch_lag.dat",
+ "../resources/audio_processing/agc/agc_spectral_peak.dat",
+ "../resources/audio_processing/agc/agc_vad.dat",
+ "../resources/audio_processing/agc/agc_voicing_prob.dat",
+ "../resources/audio_processing/agc/agc_with_circular_buffer.dat",
+ "../resources/audio_processing/output_data_fixed.pb",
+ "../resources/audio_processing/output_data_float.pb",
+ "../resources/audio_processing/output_data_float_avx2.pb",
+ "../resources/audio_processing/output_data_mac.pb",
+ "../resources/audio_processing/transient/ajm-macbook-1-spke16m.pcm",
+ "../resources/audio_processing/transient/audio16kHz.pcm",
+ "../resources/audio_processing/transient/audio32kHz.pcm",
+ "../resources/audio_processing/transient/audio48kHz.pcm",
+ "../resources/audio_processing/transient/audio8kHz.pcm",
+ "../resources/audio_processing/transient/detect16kHz.dat",
+ "../resources/audio_processing/transient/detect32kHz.dat",
+ "../resources/audio_processing/transient/detect48kHz.dat",
+ "../resources/audio_processing/transient/detect8kHz.dat",
+ "../resources/audio_processing/transient/double-utils.dat",
+ "../resources/audio_processing/transient/float-utils.dat",
+ "../resources/audio_processing/transient/suppressed16kHz.pcm",
+ "../resources/audio_processing/transient/suppressed32kHz.pcm",
+ "../resources/audio_processing/transient/suppressed8kHz.pcm",
+ "../resources/audio_processing/transient/wpd0.dat",
+ "../resources/audio_processing/transient/wpd1.dat",
+ "../resources/audio_processing/transient/wpd2.dat",
+ "../resources/audio_processing/transient/wpd3.dat",
+ "../resources/audio_processing/transient/wpd4.dat",
+ "../resources/audio_processing/transient/wpd5.dat",
+ "../resources/audio_processing/transient/wpd6.dat",
+ "../resources/audio_processing/transient/wpd7.dat",
+ "../resources/deflicker_before_cif_short.yuv",
+ "../resources/far16_stereo.pcm",
+ "../resources/far176_stereo.pcm",
+ "../resources/far192_stereo.pcm",
+ "../resources/far22_stereo.pcm",
+ "../resources/far32_stereo.pcm",
+ "../resources/far44_stereo.pcm",
+ "../resources/far48_stereo.pcm",
+ "../resources/far88_stereo.pcm",
+ "../resources/far8_stereo.pcm",
+ "../resources/far96_stereo.pcm",
+ "../resources/foremanColorEnhanced_cif_short.yuv",
+ "../resources/foreman_cif.yuv",
+ "../resources/foreman_cif_short.yuv",
+ "../resources/near16_stereo.pcm",
+ "../resources/near176_stereo.pcm",
+ "../resources/near192_stereo.pcm",
+ "../resources/near22_stereo.pcm",
+ "../resources/near32_stereo.pcm",
+ "../resources/near44_stereo.pcm",
+ "../resources/near48_stereo.pcm",
+ "../resources/near88_stereo.pcm",
+ "../resources/near8_stereo.pcm",
+ "../resources/near96_stereo.pcm",
+ "../resources/ref03.aecdump",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_1_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_1_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke2_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke2_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke2_1_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke2_1_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingDelay1_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingDelay1_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingLoss1_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingLoss1_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_Multi1_1_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_Multi1_1_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyChoke_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyChoke_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyChoke_1_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyChoke_1_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyDelay_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyDelay_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyLoss_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_SteadyLoss_0_TOF.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_UnlimitedSpeed_0_AST.bin",
+ "../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_UnlimitedSpeed_0_TOF.bin",
+ "../resources/short_mixed_mono_48.dat",
+ "../resources/short_mixed_mono_48.pcm",
+ "../resources/short_mixed_mono_48_arm.dat",
+ "../resources/short_mixed_stereo_48.dat",
+ "../resources/short_mixed_stereo_48.pcm",
+ "../resources/voice_engine/audio_tiny48.wav",
+ ]
+ if (is_ios) {
+ bundle_data("modules_unittests_bundle_data") {
+ testonly = true
+ sources = modules_unittests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+
+ rtc_test("modules_unittests") {
+ testonly = true
+ defines = []
+ sources = [ "module_common_types_unittest.cc" ]
+
+ deps = [
+ ":module_api",
+ ":module_api_public",
+ "../test:test_main",
+ "../test:test_support",
+ "audio_coding:audio_coding_unittests",
+ "audio_device:audio_device_unittests",
+ "audio_mixer:audio_mixer_unittests",
+ "audio_processing:audio_processing_unittests",
+ "audio_processing/aec3:aec3_unittests",
+ "audio_processing/ns:ns_unittests",
+ "congestion_controller:congestion_controller_unittests",
+ "pacing:pacing_unittests",
+ "remote_bitrate_estimator:remote_bitrate_estimator_unittests",
+ "rtp_rtcp:rtp_rtcp_unittests",
+ "video_coding:video_coding_unittests",
+ "video_coding/timing:timing_unittests",
+ "video_processing:video_processing_unittests",
+ ]
+
+ if (rtc_desktop_capture_supported) {
+ deps += [ "desktop_capture:desktop_capture_unittests" ]
+ }
+
+ data = modules_unittests_resources
+
+ if (is_android) {
+ use_default_launcher = false
+ deps += [
+ "../sdk/android:libjingle_peerconnection_java",
+ "//testing/android/native_test:native_test_support",
+ ]
+ shard_timeout = 900
+ }
+ if (is_ios) {
+ info_plist = "../test/ios/Info.plist"
+ deps += [ ":modules_unittests_bundle_data" ]
+ configs += [ "..:common_objc" ]
+ ldflags = [ "-ObjC" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/async_audio_processing/BUILD.gn b/third_party/libwebrtc/modules/async_audio_processing/BUILD.gn
new file mode 100644
index 0000000000..7a7ca20df1
--- /dev/null
+++ b/third_party/libwebrtc/modules/async_audio_processing/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("async_audio_processing") {
+ sources = [
+ "async_audio_processing.cc",
+ "async_audio_processing.h",
+ ]
+
+ public = [ "async_audio_processing.h" ]
+
+ deps = [
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio:audio_frame_processor",
+ "../../api/task_queue:task_queue",
+ "../../rtc_base:checks",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_task_queue",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("async_audio_processing_test") {
+ testonly = true
+
+ sources = []
+
+ deps = [
+ ":async_audio_processing",
+ "../../api/audio:audio_frame_api",
+ "../../rtc_base:checks",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.cc b/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.cc
new file mode 100644
index 0000000000..9452f3bcf9
--- /dev/null
+++ b/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.cc
@@ -0,0 +1,61 @@
+
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/async_audio_processing/async_audio_processing.h"
+
+#include <utility>
+
+#include "api/audio/audio_frame.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AsyncAudioProcessing::Factory::~Factory() = default;
+AsyncAudioProcessing::Factory::Factory(AudioFrameProcessor& frame_processor,
+ TaskQueueFactory& task_queue_factory)
+ : frame_processor_(frame_processor),
+ task_queue_factory_(task_queue_factory) {}
+
+std::unique_ptr<AsyncAudioProcessing>
+AsyncAudioProcessing::Factory::CreateAsyncAudioProcessing(
+ AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback) {
+ return std::make_unique<AsyncAudioProcessing>(
+ frame_processor_, task_queue_factory_,
+ std::move(on_frame_processed_callback));
+}
+
+AsyncAudioProcessing::~AsyncAudioProcessing() {
+ frame_processor_.SetSink(nullptr);
+}
+
+AsyncAudioProcessing::AsyncAudioProcessing(
+ AudioFrameProcessor& frame_processor,
+ TaskQueueFactory& task_queue_factory,
+ AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback)
+ : on_frame_processed_callback_(std::move(on_frame_processed_callback)),
+ frame_processor_(frame_processor),
+ task_queue_(task_queue_factory.CreateTaskQueue(
+ "AsyncAudioProcessing",
+ TaskQueueFactory::Priority::NORMAL)) {
+ frame_processor_.SetSink([this](std::unique_ptr<AudioFrame> frame) {
+ task_queue_.PostTask([this, frame = std::move(frame)]() mutable {
+ on_frame_processed_callback_(std::move(frame));
+ });
+ });
+}
+
+void AsyncAudioProcessing::Process(std::unique_ptr<AudioFrame> frame) {
+ task_queue_.PostTask([this, frame = std::move(frame)]() mutable {
+ frame_processor_.Process(std::move(frame));
+ });
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.h b/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.h
new file mode 100644
index 0000000000..bbd0f69b1b
--- /dev/null
+++ b/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_ASYNC_AUDIO_PROCESSING_ASYNC_AUDIO_PROCESSING_H_
+#define MODULES_ASYNC_AUDIO_PROCESSING_ASYNC_AUDIO_PROCESSING_H_
+
+#include <memory>
+
+#include "api/audio/audio_frame_processor.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/task_queue.h"
+
+namespace webrtc {
+
+class AudioFrame;
+class TaskQueueFactory;
+
+// Helper class taking care of interactions with AudioFrameProcessor
+// in asynchronous manner. Offloads AudioFrameProcessor::Process calls
+// to a dedicated task queue. Makes sure that it's always safe for
+// AudioFrameProcessor to pass processed frames back to its sink.
+class AsyncAudioProcessing final {
+ public:
+ // Helper class passing AudioFrameProcessor and TaskQueueFactory into
+ // AsyncAudioProcessing constructor.
+ class Factory : public rtc::RefCountInterface {
+ public:
+ Factory(const Factory&) = delete;
+ Factory& operator=(const Factory&) = delete;
+
+ ~Factory();
+ Factory(AudioFrameProcessor& frame_processor,
+ TaskQueueFactory& task_queue_factory);
+
+ std::unique_ptr<AsyncAudioProcessing> CreateAsyncAudioProcessing(
+ AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback);
+
+ private:
+ AudioFrameProcessor& frame_processor_;
+ TaskQueueFactory& task_queue_factory_;
+ };
+
+ AsyncAudioProcessing(const AsyncAudioProcessing&) = delete;
+ AsyncAudioProcessing& operator=(const AsyncAudioProcessing&) = delete;
+
+ ~AsyncAudioProcessing();
+
+ // Creates AsyncAudioProcessing which will pass audio frames to
+ // `frame_processor` on `task_queue_` and reply with processed frames passed
+ // into `on_frame_processed_callback`, which is posted back onto
+ // `task_queue_`. `task_queue_` is created using the provided
+ // `task_queue_factory`.
+ AsyncAudioProcessing(
+ AudioFrameProcessor& frame_processor,
+ TaskQueueFactory& task_queue_factory,
+ AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback);
+
+ // Accepts `frame` for asynchronous processing. Thread-safe.
+ void Process(std::unique_ptr<AudioFrame> frame);
+
+ private:
+ AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback_;
+ AudioFrameProcessor& frame_processor_;
+ rtc::TaskQueue task_queue_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_ASYNC_AUDIO_PROCESSING_ASYNC_AUDIO_PROCESSING_H_
diff --git a/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing_gn/moz.build b/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing_gn/moz.build
new file mode 100644
index 0000000000..24f8656db0
--- /dev/null
+++ b/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/async_audio_processing/async_audio_processing.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("async_audio_processing_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/BUILD.gn b/third_party/libwebrtc/modules/audio_coding/BUILD.gn
new file mode 100644
index 0000000000..1a7e923fe2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/BUILD.gn
@@ -0,0 +1,2189 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+import("audio_coding.gni")
+if (rtc_enable_protobuf) {
+ import("//third_party/protobuf/proto_library.gni")
+}
+
+visibility = [ ":*" ]
+
+rtc_source_set("audio_coding_module_typedefs") {
+ visibility += [ "*" ]
+ sources = [ "include/audio_coding_module_typedefs.h" ]
+}
+
+rtc_library("audio_coding") {
+ visibility += [ "*" ]
+ sources = [
+ "acm2/acm_receiver.cc",
+ "acm2/acm_receiver.h",
+ "acm2/acm_remixing.cc",
+ "acm2/acm_remixing.h",
+ "acm2/acm_resampler.cc",
+ "acm2/acm_resampler.h",
+ "acm2/audio_coding_module.cc",
+ "acm2/call_statistics.cc",
+ "acm2/call_statistics.h",
+ "include/audio_coding_module.h",
+ ]
+
+ defines = []
+
+ deps = [
+ ":audio_coding_module_typedefs",
+ ":default_neteq_factory",
+ ":neteq",
+ "..:module_api",
+ "..:module_api_public",
+ "../../api:array_view",
+ "../../api:function_view",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/neteq:neteq_api",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:audio_format_to_string",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("legacy_encoded_audio_frame") {
+ sources = [
+ "codecs/legacy_encoded_audio_frame.cc",
+ "codecs/legacy_encoded_audio_frame.h",
+ ]
+ deps = [
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("webrtc_cng") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "codecs/cng/webrtc_cng.cc",
+ "codecs/cng/webrtc_cng.h",
+ ]
+
+ deps = [
+ "../../api:array_view",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ ]
+}
+
+rtc_library("audio_encoder_cng") {
+ visibility += [ "*" ]
+ sources = [
+ "codecs/cng/audio_encoder_cng.cc",
+ "codecs/cng/audio_encoder_cng.h",
+ ]
+
+ deps = [
+ ":webrtc_cng",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/units:time_delta",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("red") {
+ visibility += [ "*" ]
+ sources = [
+ "codecs/red/audio_encoder_copy_red.cc",
+ "codecs/red/audio_encoder_copy_red.h",
+ ]
+
+ deps = [
+ "../../api:array_view",
+ "../../api:field_trials_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/units:time_delta",
+ "../../common_audio",
+ "../../rtc_base:buffer",
+ "../../rtc_base:byte_order",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("g711") {
+ visibility += [ "*" ]
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/g711/audio_decoder_pcm.cc",
+ "codecs/g711/audio_decoder_pcm.h",
+ "codecs/g711/audio_encoder_pcm.cc",
+ "codecs/g711/audio_encoder_pcm.h",
+ ]
+
+ deps = [
+ ":legacy_encoded_audio_frame",
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/units:time_delta",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ public_deps = [ ":g711_c" ] # no-presubmit-check TODO(webrtc:8603)
+}
+
+rtc_library("g711_c") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/g711/g711_interface.c",
+ "codecs/g711/g711_interface.h",
+ ]
+ deps = [ "../third_party/g711:g711_3p" ]
+}
+
+rtc_library("g722") {
+ visibility += [ "*" ]
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/g722/audio_decoder_g722.cc",
+ "codecs/g722/audio_decoder_g722.h",
+ "codecs/g722/audio_encoder_g722.cc",
+ "codecs/g722/audio_encoder_g722.h",
+ ]
+
+ deps = [
+ ":legacy_encoded_audio_frame",
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs/g722:audio_encoder_g722_config",
+ "../../api/units:time_delta",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ public_deps = [ ":g722_c" ] # no-presubmit-check TODO(webrtc:8603)
+}
+
+rtc_library("g722_c") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/g722/g722_interface.c",
+ "codecs/g722/g722_interface.h",
+ ]
+ deps = [ "../third_party/g722:g722_3p" ]
+}
+
+rtc_library("ilbc") {
+ visibility += webrtc_default_visibility
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/ilbc/audio_decoder_ilbc.cc",
+ "codecs/ilbc/audio_decoder_ilbc.h",
+ "codecs/ilbc/audio_encoder_ilbc.cc",
+ "codecs/ilbc/audio_encoder_ilbc.h",
+ ]
+
+ deps = [
+ ":legacy_encoded_audio_frame",
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs/ilbc:audio_encoder_ilbc_config",
+ "../../api/units:time_delta",
+ "../../common_audio",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:safe_conversions",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ public_deps = [ ":ilbc_c" ] # no-presubmit-check TODO(webrtc:8603)
+}
+
+rtc_library("ilbc_c") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/ilbc/abs_quant.c",
+ "codecs/ilbc/abs_quant.h",
+ "codecs/ilbc/abs_quant_loop.c",
+ "codecs/ilbc/abs_quant_loop.h",
+ "codecs/ilbc/augmented_cb_corr.c",
+ "codecs/ilbc/augmented_cb_corr.h",
+ "codecs/ilbc/bw_expand.c",
+ "codecs/ilbc/bw_expand.h",
+ "codecs/ilbc/cb_construct.c",
+ "codecs/ilbc/cb_construct.h",
+ "codecs/ilbc/cb_mem_energy.c",
+ "codecs/ilbc/cb_mem_energy.h",
+ "codecs/ilbc/cb_mem_energy_augmentation.c",
+ "codecs/ilbc/cb_mem_energy_augmentation.h",
+ "codecs/ilbc/cb_mem_energy_calc.c",
+ "codecs/ilbc/cb_mem_energy_calc.h",
+ "codecs/ilbc/cb_search.c",
+ "codecs/ilbc/cb_search.h",
+ "codecs/ilbc/cb_search_core.c",
+ "codecs/ilbc/cb_search_core.h",
+ "codecs/ilbc/cb_update_best_index.c",
+ "codecs/ilbc/cb_update_best_index.h",
+ "codecs/ilbc/chebyshev.c",
+ "codecs/ilbc/chebyshev.h",
+ "codecs/ilbc/comp_corr.c",
+ "codecs/ilbc/comp_corr.h",
+ "codecs/ilbc/constants.c",
+ "codecs/ilbc/constants.h",
+ "codecs/ilbc/create_augmented_vec.c",
+ "codecs/ilbc/create_augmented_vec.h",
+ "codecs/ilbc/decode.c",
+ "codecs/ilbc/decode.h",
+ "codecs/ilbc/decode_residual.c",
+ "codecs/ilbc/decode_residual.h",
+ "codecs/ilbc/decoder_interpolate_lsf.c",
+ "codecs/ilbc/decoder_interpolate_lsf.h",
+ "codecs/ilbc/defines.h",
+ "codecs/ilbc/do_plc.c",
+ "codecs/ilbc/do_plc.h",
+ "codecs/ilbc/encode.c",
+ "codecs/ilbc/encode.h",
+ "codecs/ilbc/energy_inverse.c",
+ "codecs/ilbc/energy_inverse.h",
+ "codecs/ilbc/enh_upsample.c",
+ "codecs/ilbc/enh_upsample.h",
+ "codecs/ilbc/enhancer.c",
+ "codecs/ilbc/enhancer.h",
+ "codecs/ilbc/enhancer_interface.c",
+ "codecs/ilbc/enhancer_interface.h",
+ "codecs/ilbc/filtered_cb_vecs.c",
+ "codecs/ilbc/filtered_cb_vecs.h",
+ "codecs/ilbc/frame_classify.c",
+ "codecs/ilbc/frame_classify.h",
+ "codecs/ilbc/gain_dequant.c",
+ "codecs/ilbc/gain_dequant.h",
+ "codecs/ilbc/gain_quant.c",
+ "codecs/ilbc/gain_quant.h",
+ "codecs/ilbc/get_cd_vec.c",
+ "codecs/ilbc/get_cd_vec.h",
+ "codecs/ilbc/get_lsp_poly.c",
+ "codecs/ilbc/get_lsp_poly.h",
+ "codecs/ilbc/get_sync_seq.c",
+ "codecs/ilbc/get_sync_seq.h",
+ "codecs/ilbc/hp_input.c",
+ "codecs/ilbc/hp_input.h",
+ "codecs/ilbc/hp_output.c",
+ "codecs/ilbc/hp_output.h",
+ "codecs/ilbc/ilbc.c",
+ "codecs/ilbc/ilbc.h",
+ "codecs/ilbc/index_conv_dec.c",
+ "codecs/ilbc/index_conv_dec.h",
+ "codecs/ilbc/index_conv_enc.c",
+ "codecs/ilbc/index_conv_enc.h",
+ "codecs/ilbc/init_decode.c",
+ "codecs/ilbc/init_decode.h",
+ "codecs/ilbc/init_encode.c",
+ "codecs/ilbc/init_encode.h",
+ "codecs/ilbc/interpolate.c",
+ "codecs/ilbc/interpolate.h",
+ "codecs/ilbc/interpolate_samples.c",
+ "codecs/ilbc/interpolate_samples.h",
+ "codecs/ilbc/lpc_encode.c",
+ "codecs/ilbc/lpc_encode.h",
+ "codecs/ilbc/lsf_check.c",
+ "codecs/ilbc/lsf_check.h",
+ "codecs/ilbc/lsf_interpolate_to_poly_dec.c",
+ "codecs/ilbc/lsf_interpolate_to_poly_dec.h",
+ "codecs/ilbc/lsf_interpolate_to_poly_enc.c",
+ "codecs/ilbc/lsf_interpolate_to_poly_enc.h",
+ "codecs/ilbc/lsf_to_lsp.c",
+ "codecs/ilbc/lsf_to_lsp.h",
+ "codecs/ilbc/lsf_to_poly.c",
+ "codecs/ilbc/lsf_to_poly.h",
+ "codecs/ilbc/lsp_to_lsf.c",
+ "codecs/ilbc/lsp_to_lsf.h",
+ "codecs/ilbc/my_corr.c",
+ "codecs/ilbc/my_corr.h",
+ "codecs/ilbc/nearest_neighbor.c",
+ "codecs/ilbc/nearest_neighbor.h",
+ "codecs/ilbc/pack_bits.c",
+ "codecs/ilbc/pack_bits.h",
+ "codecs/ilbc/poly_to_lsf.c",
+ "codecs/ilbc/poly_to_lsf.h",
+ "codecs/ilbc/poly_to_lsp.c",
+ "codecs/ilbc/poly_to_lsp.h",
+ "codecs/ilbc/refiner.c",
+ "codecs/ilbc/refiner.h",
+ "codecs/ilbc/simple_interpolate_lsf.c",
+ "codecs/ilbc/simple_interpolate_lsf.h",
+ "codecs/ilbc/simple_lpc_analysis.c",
+ "codecs/ilbc/simple_lpc_analysis.h",
+ "codecs/ilbc/simple_lsf_dequant.c",
+ "codecs/ilbc/simple_lsf_dequant.h",
+ "codecs/ilbc/simple_lsf_quant.c",
+ "codecs/ilbc/simple_lsf_quant.h",
+ "codecs/ilbc/smooth.c",
+ "codecs/ilbc/smooth.h",
+ "codecs/ilbc/smooth_out_data.c",
+ "codecs/ilbc/smooth_out_data.h",
+ "codecs/ilbc/sort_sq.c",
+ "codecs/ilbc/sort_sq.h",
+ "codecs/ilbc/split_vq.c",
+ "codecs/ilbc/split_vq.h",
+ "codecs/ilbc/state_construct.c",
+ "codecs/ilbc/state_construct.h",
+ "codecs/ilbc/state_search.c",
+ "codecs/ilbc/state_search.h",
+ "codecs/ilbc/swap_bytes.c",
+ "codecs/ilbc/swap_bytes.h",
+ "codecs/ilbc/unpack_bits.c",
+ "codecs/ilbc/unpack_bits.h",
+ "codecs/ilbc/vq3.c",
+ "codecs/ilbc/vq3.h",
+ "codecs/ilbc/vq4.c",
+ "codecs/ilbc/vq4.h",
+ "codecs/ilbc/window32_w32.c",
+ "codecs/ilbc/window32_w32.h",
+ "codecs/ilbc/xcorr_coef.c",
+ "codecs/ilbc/xcorr_coef.h",
+ ]
+
+ deps = [
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:checks",
+ "../../rtc_base:sanitizer",
+ "../../rtc_base/system:arch",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
+}
+
+rtc_source_set("isac_common") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/audio_decoder_isac_t.h",
+ "codecs/isac/audio_decoder_isac_t_impl.h",
+ "codecs/isac/audio_encoder_isac_t.h",
+ "codecs/isac/audio_encoder_isac_t_impl.h",
+ ]
+ deps = [
+ ":isac_bwinfo",
+ "../../api:scoped_refptr",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/units:time_delta",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_minmax",
+ "../../system_wrappers:field_trial",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("isac") {
+ visibility += [ "*" ]
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/main/include/audio_decoder_isac.h",
+ "codecs/isac/main/include/audio_encoder_isac.h",
+ "codecs/isac/main/source/audio_decoder_isac.cc",
+ "codecs/isac/main/source/audio_encoder_isac.cc",
+ ]
+
+ deps = [
+ ":isac_common",
+ "../../api/audio_codecs:audio_codecs_api",
+ ]
+ public_deps = [ ":isac_c" ] # no-presubmit-check TODO(webrtc:8603)
+}
+
+rtc_source_set("isac_bwinfo") {
+ sources = [ "codecs/isac/bandwidth_info.h" ]
+ deps = []
+}
+
+rtc_library("isac_vad") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "codecs/isac/main/source/filter_functions.c",
+ "codecs/isac/main/source/filter_functions.h",
+ "codecs/isac/main/source/isac_vad.c",
+ "codecs/isac/main/source/isac_vad.h",
+ "codecs/isac/main/source/os_specific_inline.h",
+ "codecs/isac/main/source/pitch_estimator.c",
+ "codecs/isac/main/source/pitch_estimator.h",
+ "codecs/isac/main/source/pitch_filter.c",
+ "codecs/isac/main/source/pitch_filter.h",
+ "codecs/isac/main/source/settings.h",
+ "codecs/isac/main/source/structs.h",
+ ]
+ deps = [
+ ":isac_bwinfo",
+ "../../rtc_base:compile_assert_c",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:ignore_warnings",
+ "../third_party/fft",
+ ]
+}
+
+rtc_library("isac_c") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/main/include/isac.h",
+ "codecs/isac/main/source/arith_routines.c",
+ "codecs/isac/main/source/arith_routines.h",
+ "codecs/isac/main/source/arith_routines_hist.c",
+ "codecs/isac/main/source/arith_routines_logist.c",
+ "codecs/isac/main/source/bandwidth_estimator.c",
+ "codecs/isac/main/source/bandwidth_estimator.h",
+ "codecs/isac/main/source/codec.h",
+ "codecs/isac/main/source/crc.c",
+ "codecs/isac/main/source/crc.h",
+ "codecs/isac/main/source/decode.c",
+ "codecs/isac/main/source/decode_bwe.c",
+ "codecs/isac/main/source/encode.c",
+ "codecs/isac/main/source/encode_lpc_swb.c",
+ "codecs/isac/main/source/encode_lpc_swb.h",
+ "codecs/isac/main/source/entropy_coding.c",
+ "codecs/isac/main/source/entropy_coding.h",
+ "codecs/isac/main/source/filterbanks.c",
+ "codecs/isac/main/source/intialize.c",
+ "codecs/isac/main/source/isac.c",
+ "codecs/isac/main/source/isac_float_type.h",
+ "codecs/isac/main/source/lattice.c",
+ "codecs/isac/main/source/lpc_analysis.c",
+ "codecs/isac/main/source/lpc_analysis.h",
+ "codecs/isac/main/source/lpc_gain_swb_tables.c",
+ "codecs/isac/main/source/lpc_gain_swb_tables.h",
+ "codecs/isac/main/source/lpc_shape_swb12_tables.c",
+ "codecs/isac/main/source/lpc_shape_swb12_tables.h",
+ "codecs/isac/main/source/lpc_shape_swb16_tables.c",
+ "codecs/isac/main/source/lpc_shape_swb16_tables.h",
+ "codecs/isac/main/source/lpc_tables.c",
+ "codecs/isac/main/source/lpc_tables.h",
+ "codecs/isac/main/source/pitch_gain_tables.c",
+ "codecs/isac/main/source/pitch_gain_tables.h",
+ "codecs/isac/main/source/pitch_lag_tables.c",
+ "codecs/isac/main/source/pitch_lag_tables.h",
+ "codecs/isac/main/source/spectrum_ar_model_tables.c",
+ "codecs/isac/main/source/spectrum_ar_model_tables.h",
+ "codecs/isac/main/source/transform.c",
+ ]
+
+ if (is_linux || is_chromeos) {
+ libs = [ "m" ]
+ }
+
+ deps = [
+ ":isac_bwinfo",
+ ":isac_vad",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:checks",
+ "../../rtc_base:compile_assert_c",
+ "../../rtc_base/system:arch",
+ "../third_party/fft",
+ ]
+}
+
+rtc_library("isac_fix") {
+ visibility += [ "*" ]
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/fix/source/audio_decoder_isacfix.cc",
+ "codecs/isac/fix/source/audio_encoder_isacfix.cc",
+ ]
+
+ deps = [
+ ":isac_common",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../common_audio",
+ "../../system_wrappers",
+ ]
+ public_deps = [ ":isac_fix_c" ] # no-presubmit-check TODO(webrtc:8603)
+
+ if (rtc_build_with_neon) {
+ deps += [ ":isac_neon" ]
+ }
+}
+
+rtc_library("isac_fix_common") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/fix/source/codec.h",
+ "codecs/isac/fix/source/entropy_coding.h",
+ "codecs/isac/fix/source/fft.c",
+ "codecs/isac/fix/source/fft.h",
+ "codecs/isac/fix/source/filterbank_internal.h",
+ "codecs/isac/fix/source/settings.h",
+ "codecs/isac/fix/source/structs.h",
+ "codecs/isac/fix/source/transform_tables.c",
+ ]
+ deps = [
+ ":isac_bwinfo",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ ]
+}
+
+rtc_source_set("isac_fix_c_arm_asm") {
+ poisonous = [ "audio_codecs" ]
+ sources = []
+ if (target_cpu == "arm" && arm_version >= 7) {
+ sources += [
+ "codecs/isac/fix/source/lattice_armv7.S",
+ "codecs/isac/fix/source/pitch_filter_armv6.S",
+ ]
+ deps = [
+ ":isac_fix_common",
+ "../../rtc_base/system:asm_defines",
+ ]
+ }
+}
+
+rtc_library("isac_fix_c") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/fix/include/audio_decoder_isacfix.h",
+ "codecs/isac/fix/include/audio_encoder_isacfix.h",
+ "codecs/isac/fix/include/isacfix.h",
+ "codecs/isac/fix/source/arith_routines.c",
+ "codecs/isac/fix/source/arith_routines_hist.c",
+ "codecs/isac/fix/source/arith_routines_logist.c",
+ "codecs/isac/fix/source/arith_routins.h",
+ "codecs/isac/fix/source/bandwidth_estimator.c",
+ "codecs/isac/fix/source/bandwidth_estimator.h",
+ "codecs/isac/fix/source/decode.c",
+ "codecs/isac/fix/source/decode_bwe.c",
+ "codecs/isac/fix/source/decode_plc.c",
+ "codecs/isac/fix/source/encode.c",
+ "codecs/isac/fix/source/entropy_coding.c",
+ "codecs/isac/fix/source/filterbank_tables.c",
+ "codecs/isac/fix/source/filterbank_tables.h",
+ "codecs/isac/fix/source/filterbanks.c",
+ "codecs/isac/fix/source/filters.c",
+ "codecs/isac/fix/source/initialize.c",
+ "codecs/isac/fix/source/isac_fix_type.h",
+ "codecs/isac/fix/source/isacfix.c",
+ "codecs/isac/fix/source/lattice.c",
+ "codecs/isac/fix/source/lattice_c.c",
+ "codecs/isac/fix/source/lpc_masking_model.c",
+ "codecs/isac/fix/source/lpc_masking_model.h",
+ "codecs/isac/fix/source/lpc_tables.c",
+ "codecs/isac/fix/source/lpc_tables.h",
+ "codecs/isac/fix/source/pitch_estimator.c",
+ "codecs/isac/fix/source/pitch_estimator.h",
+ "codecs/isac/fix/source/pitch_estimator_c.c",
+ "codecs/isac/fix/source/pitch_filter.c",
+ "codecs/isac/fix/source/pitch_filter_c.c",
+ "codecs/isac/fix/source/pitch_gain_tables.c",
+ "codecs/isac/fix/source/pitch_gain_tables.h",
+ "codecs/isac/fix/source/pitch_lag_tables.c",
+ "codecs/isac/fix/source/pitch_lag_tables.h",
+ "codecs/isac/fix/source/spectrum_ar_model_tables.c",
+ "codecs/isac/fix/source/spectrum_ar_model_tables.h",
+ "codecs/isac/fix/source/transform.c",
+ ]
+
+ deps = [
+ ":isac_bwinfo",
+ ":isac_common",
+ ":isac_fix_common",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:checks",
+ "../../rtc_base:compile_assert_c",
+ "../../rtc_base:sanitizer",
+ "../../system_wrappers",
+ "../third_party/fft",
+ ]
+
+ if (rtc_build_with_neon) {
+ deps += [ ":isac_neon" ]
+
+ # TODO(bugs.webrtc.org/9579): Consider moving the usage of NEON from
+ # pitch_estimator_c.c into the "isac_neon" target and delete this flag:
+ if (target_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+ }
+
+ if (target_cpu == "arm" && arm_version >= 7) {
+ sources -= [
+ "codecs/isac/fix/source/lattice_c.c",
+ "codecs/isac/fix/source/pitch_filter_c.c",
+ ]
+ deps += [ ":isac_fix_c_arm_asm" ]
+ }
+
+ if (target_cpu == "mipsel") {
+ sources += [
+ "codecs/isac/fix/source/entropy_coding_mips.c",
+ "codecs/isac/fix/source/filters_mips.c",
+ "codecs/isac/fix/source/lattice_mips.c",
+ "codecs/isac/fix/source/pitch_estimator_mips.c",
+ "codecs/isac/fix/source/transform_mips.c",
+ ]
+ sources -= [
+ "codecs/isac/fix/source/lattice_c.c",
+ "codecs/isac/fix/source/pitch_estimator_c.c",
+ ]
+ if (mips_dsp_rev > 0) {
+ sources += [ "codecs/isac/fix/source/filterbanks_mips.c" ]
+ }
+ if (mips_dsp_rev > 1) {
+ sources += [
+ "codecs/isac/fix/source/lpc_masking_model_mips.c",
+ "codecs/isac/fix/source/pitch_filter_mips.c",
+ ]
+ sources -= [ "codecs/isac/fix/source/pitch_filter_c.c" ]
+ }
+ }
+}
+
+if (rtc_build_with_neon) {
+ rtc_library("isac_neon") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/isac/fix/source/entropy_coding_neon.c",
+ "codecs/isac/fix/source/filterbanks_neon.c",
+ "codecs/isac/fix/source/filters_neon.c",
+ "codecs/isac/fix/source/lattice_neon.c",
+ "codecs/isac/fix/source/transform_neon.c",
+ ]
+
+ if (target_cpu != "arm64") {
+ # Enable compilation for the NEON instruction set.
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":isac_fix_common",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:checks",
+ ]
+ }
+}
+
+rtc_library("pcm16b") {
+ visibility += [ "*" ]
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/pcm16b/audio_decoder_pcm16b.cc",
+ "codecs/pcm16b/audio_decoder_pcm16b.h",
+ "codecs/pcm16b/audio_encoder_pcm16b.cc",
+ "codecs/pcm16b/audio_encoder_pcm16b.h",
+ "codecs/pcm16b/pcm16b_common.cc",
+ "codecs/pcm16b/pcm16b_common.h",
+ ]
+
+ deps = [
+ ":g711",
+ ":legacy_encoded_audio_frame",
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ ]
+ public_deps = [ ":pcm16b_c" ] # no-presubmit-check TODO(webrtc:8603)
+}
+
+rtc_library("pcm16b_c") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/pcm16b/pcm16b.c",
+ "codecs/pcm16b/pcm16b.h",
+ ]
+}
+
+rtc_library("audio_coding_opus_common") {
+ sources = [
+ "codecs/opus/audio_coder_opus_common.cc",
+ "codecs/opus/audio_coder_opus_common.h",
+ ]
+
+ deps = [
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("webrtc_opus") {
+ visibility += webrtc_default_visibility
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/opus/audio_decoder_opus.cc",
+ "codecs/opus/audio_decoder_opus.h",
+ "codecs/opus/audio_encoder_opus.cc",
+ "codecs/opus/audio_encoder_opus.h",
+ ]
+
+ deps = [
+ ":audio_coding_opus_common",
+ ":audio_network_adaptor",
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs/opus:audio_encoder_opus_config",
+ "../../common_audio",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:protobuf_utils",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers:field_trial",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ public_deps = # no-presubmit-check TODO(webrtc:8603)
+ [ ":webrtc_opus_wrapper" ]
+
+ if (build_with_mozilla) {
+ include_dirs = [ "/media/libopus/include" ]
+ }
+
+ defines = audio_codec_defines
+}
+
+rtc_library("webrtc_multiopus") {
+ visibility += webrtc_default_visibility
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/opus/audio_decoder_multi_channel_opus_impl.cc",
+ "codecs/opus/audio_decoder_multi_channel_opus_impl.h",
+ "codecs/opus/audio_encoder_multi_channel_opus_impl.cc",
+ "codecs/opus/audio_encoder_multi_channel_opus_impl.h",
+ ]
+
+ deps = [
+ ":audio_coding_opus_common",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs/opus:audio_decoder_opus_config",
+ "../../api/audio_codecs/opus:audio_encoder_opus_config",
+ "../../api/units:time_delta",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:stringutils",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ public_deps = # no-presubmit-check TODO(webrtc:8603)
+ [ ":webrtc_opus_wrapper" ]
+
+ if (build_with_mozilla) {
+ include_dirs = [ "/media/libopus/include" ]
+ }
+
+ defines = audio_codec_defines
+}
+
+rtc_library("webrtc_opus_wrapper") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "codecs/opus/opus_inst.h",
+ "codecs/opus/opus_interface.cc",
+ "codecs/opus/opus_interface.h",
+ ]
+
+ defines = audio_coding_defines
+
+ deps = [
+ "../../api:array_view",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../system_wrappers:field_trial",
+ ]
+
+ if (rtc_build_opus) {
+ deps += [ rtc_opus_dir ]
+ public_configs = [ "//third_party/opus:opus_config" ]
+ } else if (build_with_mozilla) {
+ include_dirs = [ "/media/libopus/include" ]
+ }
+}
+
+if (rtc_enable_protobuf) {
+ proto_library("ana_debug_dump_proto") {
+ visibility += webrtc_default_visibility
+ sources = [ "audio_network_adaptor/debug_dump.proto" ]
+ link_deps = [ ":ana_config_proto" ]
+ proto_out_dir = "modules/audio_coding/audio_network_adaptor"
+ }
+ proto_library("ana_config_proto") {
+ visibility += [ "*" ]
+ sources = [ "audio_network_adaptor/config.proto" ]
+ proto_out_dir = "modules/audio_coding/audio_network_adaptor"
+ }
+}
+
+rtc_library("audio_network_adaptor_config") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "audio_network_adaptor/audio_network_adaptor_config.cc",
+ "audio_network_adaptor/include/audio_network_adaptor_config.h",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("audio_network_adaptor") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "audio_network_adaptor/audio_network_adaptor_impl.cc",
+ "audio_network_adaptor/audio_network_adaptor_impl.h",
+ "audio_network_adaptor/bitrate_controller.cc",
+ "audio_network_adaptor/bitrate_controller.h",
+ "audio_network_adaptor/channel_controller.cc",
+ "audio_network_adaptor/channel_controller.h",
+ "audio_network_adaptor/controller.cc",
+ "audio_network_adaptor/controller.h",
+ "audio_network_adaptor/controller_manager.cc",
+ "audio_network_adaptor/controller_manager.h",
+ "audio_network_adaptor/debug_dump_writer.cc",
+ "audio_network_adaptor/debug_dump_writer.h",
+ "audio_network_adaptor/dtx_controller.cc",
+ "audio_network_adaptor/dtx_controller.h",
+ "audio_network_adaptor/event_log_writer.cc",
+ "audio_network_adaptor/event_log_writer.h",
+ "audio_network_adaptor/fec_controller_plr_based.cc",
+ "audio_network_adaptor/fec_controller_plr_based.h",
+ "audio_network_adaptor/frame_length_controller.cc",
+ "audio_network_adaptor/frame_length_controller.h",
+ "audio_network_adaptor/frame_length_controller_v2.cc",
+ "audio_network_adaptor/frame_length_controller_v2.h",
+ "audio_network_adaptor/include/audio_network_adaptor.h",
+ "audio_network_adaptor/util/threshold_curve.h",
+ ]
+
+ public_deps = # no-presubmit-check TODO(webrtc:8603)
+ [ ":audio_network_adaptor_config" ]
+
+ deps = [
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/rtc_event_log",
+ "../../common_audio",
+ "../../logging:rtc_event_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:logging",
+ "../../rtc_base:protobuf_utils",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/system:file_wrapper",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (rtc_enable_protobuf) {
+ deps += [
+ ":ana_config_proto",
+ ":ana_debug_dump_proto",
+ ]
+ }
+}
+
+rtc_library("neteq") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "neteq/accelerate.cc",
+ "neteq/accelerate.h",
+ "neteq/audio_multi_vector.cc",
+ "neteq/audio_multi_vector.h",
+ "neteq/audio_vector.cc",
+ "neteq/audio_vector.h",
+ "neteq/background_noise.cc",
+ "neteq/background_noise.h",
+ "neteq/buffer_level_filter.cc",
+ "neteq/buffer_level_filter.h",
+ "neteq/comfort_noise.cc",
+ "neteq/comfort_noise.h",
+ "neteq/cross_correlation.cc",
+ "neteq/cross_correlation.h",
+ "neteq/decision_logic.cc",
+ "neteq/decision_logic.h",
+ "neteq/decoder_database.cc",
+ "neteq/decoder_database.h",
+ "neteq/delay_manager.cc",
+ "neteq/delay_manager.h",
+ "neteq/dsp_helper.cc",
+ "neteq/dsp_helper.h",
+ "neteq/dtmf_buffer.cc",
+ "neteq/dtmf_buffer.h",
+ "neteq/dtmf_tone_generator.cc",
+ "neteq/dtmf_tone_generator.h",
+ "neteq/expand.cc",
+ "neteq/expand.h",
+ "neteq/expand_uma_logger.cc",
+ "neteq/expand_uma_logger.h",
+ "neteq/histogram.cc",
+ "neteq/histogram.h",
+ "neteq/merge.cc",
+ "neteq/merge.h",
+ "neteq/nack_tracker.cc",
+ "neteq/nack_tracker.h",
+ "neteq/neteq_impl.cc",
+ "neteq/neteq_impl.h",
+ "neteq/normal.cc",
+ "neteq/normal.h",
+ "neteq/packet.cc",
+ "neteq/packet.h",
+ "neteq/packet_arrival_history.cc",
+ "neteq/packet_arrival_history.h",
+ "neteq/packet_buffer.cc",
+ "neteq/packet_buffer.h",
+ "neteq/post_decode_vad.cc",
+ "neteq/post_decode_vad.h",
+ "neteq/preemptive_expand.cc",
+ "neteq/preemptive_expand.h",
+ "neteq/random_vector.cc",
+ "neteq/random_vector.h",
+ "neteq/red_payload_splitter.cc",
+ "neteq/red_payload_splitter.h",
+ "neteq/reorder_optimizer.cc",
+ "neteq/reorder_optimizer.h",
+ "neteq/statistics_calculator.cc",
+ "neteq/statistics_calculator.h",
+ "neteq/sync_buffer.cc",
+ "neteq/sync_buffer.h",
+ "neteq/time_stretch.cc",
+ "neteq/time_stretch.h",
+ "neteq/timestamp_scaler.cc",
+ "neteq/timestamp_scaler.h",
+ "neteq/underrun_optimizer.cc",
+ "neteq/underrun_optimizer.h",
+ ]
+
+ deps = [
+ ":audio_coding_module_typedefs",
+ ":webrtc_cng",
+ "..:module_api_public",
+ "../../api:array_view",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/neteq:neteq_api",
+ "../../api/neteq:neteq_controller_api",
+ "../../api/neteq:tick_timer",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:audio_format_to_string",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:gtest_prod",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:sanitizer",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("default_neteq_factory") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "neteq/default_neteq_factory.cc",
+ "neteq/default_neteq_factory.h",
+ ]
+ deps = [
+ ":neteq",
+ "../../api:scoped_refptr",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/neteq:default_neteq_controller_factory",
+ "../../api/neteq:neteq_api",
+ "../../system_wrappers:system_wrappers",
+ ]
+}
+
+# Although providing only test support, this target must be outside of the
+# rtc_include_tests conditional. The reason is that it supports fuzzer tests
+# that ultimately are built and run as a part of the Chromium ecosystem, which
+# does not set the rtc_include_tests flag.
+rtc_library("neteq_tools_minimal") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "neteq/tools/audio_sink.cc",
+ "neteq/tools/audio_sink.h",
+ "neteq/tools/encode_neteq_input.cc",
+ "neteq/tools/encode_neteq_input.h",
+ "neteq/tools/neteq_input.cc",
+ "neteq/tools/neteq_input.h",
+ "neteq/tools/neteq_test.cc",
+ "neteq/tools/neteq_test.h",
+ "neteq/tools/packet.cc",
+ "neteq/tools/packet.h",
+ "neteq/tools/packet_source.cc",
+ "neteq/tools/packet_source.h",
+ ]
+
+ deps = [
+ ":default_neteq_factory",
+ ":neteq",
+ "../../api:array_view",
+ "../../api:neteq_simulator_api",
+ "../../api:rtp_headers",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/neteq:custom_neteq_factory",
+ "../../api/neteq:default_neteq_controller_factory",
+ "../../api/neteq:neteq_api",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../system_wrappers",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ defines = audio_codec_defines
+}
+
+rtc_library("neteq_test_tools") {
+ visibility += webrtc_default_visibility
+ testonly = true
+ sources = [
+ "neteq/tools/audio_checksum.h",
+ "neteq/tools/audio_loop.cc",
+ "neteq/tools/audio_loop.h",
+ "neteq/tools/constant_pcm_packet_source.cc",
+ "neteq/tools/constant_pcm_packet_source.h",
+ "neteq/tools/initial_packet_inserter_neteq_input.cc",
+ "neteq/tools/initial_packet_inserter_neteq_input.h",
+ "neteq/tools/neteq_packet_source_input.cc",
+ "neteq/tools/neteq_packet_source_input.h",
+ "neteq/tools/output_audio_file.h",
+ "neteq/tools/output_wav_file.h",
+ "neteq/tools/rtp_file_source.cc",
+ "neteq/tools/rtp_file_source.h",
+ "neteq/tools/rtp_generator.cc",
+ "neteq/tools/rtp_generator.h",
+ ]
+
+ deps = [
+ ":neteq_tools",
+ ":neteq_tools_minimal",
+ ":pcm16b",
+ "../../api:array_view",
+ "../../api:rtp_headers",
+ "../../common_audio",
+ "../../rtc_base",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ "../../rtc_base/system:arch",
+ "../../test:rtp_test_utils",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (rtc_enable_protobuf) {
+ sources += [
+ "neteq/tools/neteq_event_log_input.cc",
+ "neteq/tools/neteq_event_log_input.h",
+ ]
+ deps += [ ":rtc_event_log_source" ]
+ }
+}
+
+rtc_library("neteq_tools") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "neteq/tools/fake_decode_from_file.cc",
+ "neteq/tools/fake_decode_from_file.h",
+ "neteq/tools/neteq_delay_analyzer.cc",
+ "neteq/tools/neteq_delay_analyzer.h",
+ "neteq/tools/neteq_replacement_input.cc",
+ "neteq/tools/neteq_replacement_input.h",
+ "neteq/tools/neteq_stats_getter.cc",
+ "neteq/tools/neteq_stats_getter.h",
+ "neteq/tools/neteq_stats_plotter.cc",
+ "neteq/tools/neteq_stats_plotter.h",
+ ]
+
+ deps = [
+ ":neteq_input_audio_tools",
+ ":neteq_tools_minimal",
+ "..:module_api_public",
+ "../../api:array_view",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../rtp_rtcp",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("neteq_input_audio_tools") {
+ visibility += webrtc_default_visibility
+ sources = [
+ "neteq/tools/input_audio_file.cc",
+ "neteq/tools/input_audio_file.h",
+ "neteq/tools/resample_input_audio_file.cc",
+ "neteq/tools/resample_input_audio_file.h",
+ ]
+
+ deps = [
+ "../../common_audio",
+ "../../rtc_base:checks",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+if (rtc_enable_protobuf) {
+ rtc_library("rtc_event_log_source") {
+ testonly = true
+
+ sources = [
+ "neteq/tools/rtc_event_log_source.cc",
+ "neteq/tools/rtc_event_log_source.h",
+ ]
+
+ deps = [
+ ":neteq_tools_minimal",
+ "../../logging:rtc_event_log_parser",
+ "../../rtc_base:checks",
+ "../rtp_rtcp",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ public_deps = # no-presubmit-check TODO(webrtc:8603)
+ [ "../../logging:rtc_event_log_proto" ]
+ }
+
+ # Only used for test purpose. Since we want to use it from chromium
+ # (see audio_coding_modules_tests_shared below), we cannot guard it
+ # under rtc_include_tests.
+ proto_library("neteq_unittest_proto") {
+ testonly = true
+ sources = [ "neteq/neteq_unittest.proto" ]
+ proto_out_dir = "modules/audio_coding/neteq"
+ }
+}
+
+# Allow to re-use some test classes from chromium.
+if (rtc_include_tests) {
+rtc_library("audio_coding_modules_tests_shared") {
+ testonly = true
+ visibility = []
+ visibility = [ "*" ]
+
+ sources = [
+ "neteq/test/neteq_decoding_test.cc",
+ "neteq/test/neteq_decoding_test.h",
+ "neteq/test/result_sink.cc",
+ "neteq/test/result_sink.h",
+ "test/PCMFile.cc",
+ "test/PCMFile.h",
+ "test/TestStereo.cc",
+ "test/TestStereo.h",
+ "test/opus_test.cc",
+ "test/opus_test.h",
+ ]
+
+ deps = [
+ ":audio_coding",
+ ":audio_coding_module_typedefs",
+ ":default_neteq_factory",
+ ":neteq_test_tools",
+ ":neteq_tools_minimal",
+ ":webrtc_opus_wrapper",
+ "..:module_api",
+ "../../api:rtp_headers",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../api/neteq:neteq_api",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:stringutils",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ defines = audio_coding_defines
+
+ if (rtc_enable_protobuf) {
+ defines += [ "WEBRTC_NETEQ_UNITTEST_BITEXACT" ]
+ deps += [ ":neteq_unittest_proto" ]
+ }
+}
+}
+
+if (rtc_include_tests) {
+ audio_coding_deps = [
+ ":audio_encoder_cng",
+ ":g711",
+ ":g722",
+ ":pcm16b",
+ "../../common_audio",
+ "../../system_wrappers",
+ ]
+ if (rtc_include_ilbc) {
+ audio_coding_deps += [ ":ilbc" ]
+ }
+ if (rtc_include_opus) {
+ audio_coding_deps += [ ":webrtc_opus" ]
+ }
+ if (target_cpu == "arm") {
+ audio_coding_deps += [ ":isac_fix" ]
+ } else {
+ audio_coding_deps += [ ":isac" ]
+ }
+ if (!build_with_mozilla && !build_with_chromium) {
+ audio_coding_deps += [ ":red" ]
+ }
+
+ rtc_source_set("mocks") {
+ testonly = true
+ sources = [
+ "audio_network_adaptor/mock/mock_audio_network_adaptor.h",
+ "audio_network_adaptor/mock/mock_controller.h",
+ "audio_network_adaptor/mock/mock_controller_manager.h",
+ "audio_network_adaptor/mock/mock_debug_dump_writer.h",
+ ]
+ deps = [
+ ":audio_network_adaptor",
+ "../../test:test_support",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ group("audio_coding_tests") {
+ visibility += webrtc_default_visibility
+ testonly = true
+ public_deps = [ # no-presubmit-check TODO(webrtc:8603)
+ ":acm_receive_test",
+ ":acm_send_test",
+ ":audio_codec_speed_tests",
+ ":audio_decoder_unittests",
+ ":audio_decoder_unittests",
+ ":g711_test",
+ ":g722_test",
+ ":ilbc_test",
+ ":isac_api_test",
+ ":isac_switch_samprate_test",
+ ":isac_test",
+ ":neteq_ilbc_quality_test",
+ ":neteq_isac_quality_test",
+ ":neteq_opus_quality_test",
+ ":neteq_pcm16b_quality_test",
+ ":neteq_pcmu_quality_test",
+ ":neteq_speed_test",
+ ":rtp_analyze",
+ ":rtp_encode",
+ ":rtp_jitter",
+ ":rtpcat",
+ ":webrtc_opus_fec_test",
+ ]
+ if (rtc_enable_protobuf) {
+ public_deps += # no-presubmit-check TODO(webrtc:8603)
+ [ ":neteq_rtpplay" ]
+ }
+ }
+ }
+
+ rtc_library("audio_coding_modules_tests") {
+ testonly = true
+ visibility += webrtc_default_visibility
+
+ sources = [
+ "test/Channel.cc",
+ "test/Channel.h",
+ "test/EncodeDecodeTest.cc",
+ "test/EncodeDecodeTest.h",
+ "test/PacketLossTest.cc",
+ "test/PacketLossTest.h",
+ "test/RTPFile.cc",
+ "test/RTPFile.h",
+ "test/TestAllCodecs.cc",
+ "test/TestAllCodecs.h",
+ "test/TestRedFec.cc",
+ "test/TestRedFec.h",
+ "test/TestVADDTX.cc",
+ "test/TestVADDTX.h",
+ "test/Tester.cc",
+ "test/TwoWayCommunication.cc",
+ "test/TwoWayCommunication.h",
+ "test/iSACTest.cc",
+ "test/iSACTest.h",
+ "test/target_delay_unittest.cc",
+ ]
+ deps = [
+ ":audio_coding",
+ ":audio_coding_module_typedefs",
+ ":audio_coding_modules_tests_shared",
+ ":audio_encoder_cng",
+ ":pcm16b_c",
+ ":red",
+ ":webrtc_opus_wrapper",
+ "..:module_api",
+ "../../api:rtp_headers",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../api/audio_codecs/L16:audio_decoder_L16",
+ "../../api/audio_codecs/L16:audio_encoder_L16",
+ "../../api/audio_codecs/g711:audio_decoder_g711",
+ "../../api/audio_codecs/g711:audio_encoder_g711",
+ "../../api/audio_codecs/g722:audio_decoder_g722",
+ "../../api/audio_codecs/g722:audio_encoder_g722",
+ "../../api/audio_codecs/ilbc:audio_decoder_ilbc",
+ "../../api/audio_codecs/ilbc:audio_encoder_ilbc",
+ "../../api/audio_codecs/isac:audio_decoder_isac_float",
+ "../../api/audio_codecs/isac:audio_encoder_isac_float",
+ "../../api/audio_codecs/opus:audio_decoder_opus",
+ "../../api/audio_codecs/opus:audio_encoder_opus",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../test:fileutils",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ defines = audio_coding_defines
+ }
+
+ rtc_library("audio_coding_perf_tests") {
+ testonly = true
+ visibility += webrtc_default_visibility
+
+ sources = [
+ "codecs/opus/opus_complexity_unittest.cc",
+ "neteq/test/neteq_performance_unittest.cc",
+ ]
+ deps = [
+ ":neteq_test_support",
+ ":neteq_test_tools",
+ "../../api/audio_codecs/opus:audio_encoder_opus",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../test:fileutils",
+ "../../test:perf_test",
+ "../../test:test_support",
+ ]
+ }
+
+ rtc_library("acm_receive_test") {
+ testonly = true
+ sources = [
+ "acm2/acm_receive_test.cc",
+ "acm2/acm_receive_test.h",
+ ]
+
+ defines = audio_coding_defines
+
+ deps = [
+ ":audio_coding",
+ ":neteq_tools",
+ ":neteq_tools_minimal",
+ "../../api:scoped_refptr",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+
+ deps += audio_coding_deps
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ }
+
+ rtc_library("acm_send_test") {
+ testonly = true
+ sources = [
+ "acm2/acm_send_test.cc",
+ "acm2/acm_send_test.h",
+ ]
+
+ defines = audio_coding_defines
+
+ deps = [
+ ":audio_coding",
+ ":neteq_input_audio_tools",
+ ":neteq_tools",
+ ":neteq_tools_minimal",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ deps += audio_coding_deps
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ }
+
+ if (!build_with_chromium) {
+ audio_decoder_unittests_resources =
+ [ "../../resources/audio_coding/testfile32kHz.pcm" ]
+
+ if (is_ios) {
+ bundle_data("audio_decoder_unittests_bundle_data") {
+ testonly = true
+ sources = audio_decoder_unittests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+
+ rtc_test("audio_decoder_unittests") {
+ testonly = true
+ sources = [ "neteq/audio_decoder_unittest.cc" ]
+
+ defines = neteq_defines
+
+ deps = [
+ ":ilbc",
+ ":isac",
+ ":isac_fix",
+ ":neteq",
+ ":neteq_input_audio_tools",
+ ":neteq_tools",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs/opus:audio_encoder_opus",
+ "../../common_audio",
+ "../../rtc_base/system:arch",
+ "../../test:fileutils",
+ "../../test:test_main",
+ "../../test:test_support",
+ "//testing/gtest",
+ ] + audio_coding_deps
+
+ data = audio_decoder_unittests_resources
+
+ if (is_android) {
+ use_default_launcher = false
+ deps += [
+ "//build/android/gtest_apk:native_test_instrumentation_test_runner_java",
+ "//testing/android/native_test:native_test_java",
+ "//testing/android/native_test:native_test_support",
+ ]
+ shard_timeout = 900
+ }
+ if (is_ios) {
+ deps += [ ":audio_decoder_unittests_bundle_data" ]
+ }
+ }
+ }
+
+ if (rtc_enable_protobuf) {
+ rtc_library("neteq_test_factory") {
+ testonly = true
+ visibility += webrtc_default_visibility
+ defines = audio_codec_defines
+ deps = [
+ ":neteq_input_audio_tools",
+ ":neteq_tools",
+ ":neteq_tools_minimal",
+ "../../rtc_base:checks",
+ "../../rtc_base:refcount",
+ "../../test:fileutils",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ sources = [
+ "neteq/tools/neteq_test_factory.cc",
+ "neteq/tools/neteq_test_factory.h",
+ ]
+
+ deps += [
+ ":neteq",
+ ":neteq_test_tools",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/neteq:neteq_api",
+ "../../test:audio_test_common",
+ "../../test:field_trial",
+ "../../test:test_support",
+ ]
+ }
+ }
+
+ if (rtc_enable_protobuf && !build_with_chromium) {
+ rtc_executable("neteq_rtpplay") {
+ testonly = true
+ visibility += [ "*" ]
+ defines = []
+ deps = [
+ ":neteq_test_factory",
+ ":neteq_test_tools",
+ ":neteq_tools_minimal",
+ "../../rtc_base:stringutils",
+ "../../system_wrappers:field_trial",
+ "../../test:field_trial",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ sources = [ "neteq/tools/neteq_rtpplay.cc" ]
+ }
+ }
+
+ if (!build_with_chromium) {
+ audio_codec_speed_tests_resources = [
+ "//resources/audio_coding/music_stereo_48kHz.pcm",
+ "//resources/audio_coding/speech_mono_16kHz.pcm",
+ "//resources/audio_coding/speech_mono_32_48kHz.pcm",
+ ]
+
+ if (is_ios) {
+ bundle_data("audio_codec_speed_tests_data") {
+ testonly = true
+ sources = audio_codec_speed_tests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+
+ rtc_test("audio_codec_speed_tests") {
+ testonly = true
+ defines = []
+ deps = [
+ ":isac_fix_common",
+ "../../rtc_base:macromagic",
+ "../../test:fileutils",
+ ]
+ sources = [
+ "codecs/isac/fix/test/isac_speed_test.cc",
+ "codecs/opus/opus_speed_test.cc",
+ "codecs/tools/audio_codec_speed_test.cc",
+ "codecs/tools/audio_codec_speed_test.h",
+ ]
+
+ data = audio_codec_speed_tests_resources
+
+ if (is_android) {
+ use_default_launcher = false
+ deps += [
+ "//build/android/gtest_apk:native_test_instrumentation_test_runner_java",
+ "//testing/android/native_test:native_test_java",
+ "//testing/android/native_test:native_test_support",
+ ]
+ shard_timeout = 900
+ }
+
+ if (is_ios) {
+ deps += [ ":audio_codec_speed_tests_data" ]
+ }
+
+ deps += [
+ ":isac_fix",
+ ":webrtc_opus",
+ "../../rtc_base:checks",
+ "../../test:test_main",
+ "../../test:test_support",
+ "../audio_processing",
+ "//testing/gtest",
+ ]
+ }
+ }
+
+ rtc_library("neteq_test_support") {
+ testonly = true
+ sources = [
+ "neteq/tools/neteq_performance_test.cc",
+ "neteq/tools/neteq_performance_test.h",
+ ]
+
+ deps = [
+ ":default_neteq_factory",
+ ":neteq",
+ ":neteq_test_tools",
+ ":pcm16b",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/neteq:neteq_api",
+ "../../rtc_base:checks",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_library("neteq_quality_test_support") {
+ testonly = true
+ sources = [
+ "neteq/tools/neteq_quality_test.cc",
+ "neteq/tools/neteq_quality_test.h",
+ ]
+
+ deps = [
+ ":default_neteq_factory",
+ ":neteq",
+ ":neteq_input_audio_tools",
+ ":neteq_test_tools",
+ ":neteq_tools_minimal",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/neteq:neteq_api",
+ "../../rtc_base:checks",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/strings",
+ ]
+ }
+
+ rtc_executable("rtp_encode") {
+ testonly = true
+
+ deps = [
+ ":audio_coding",
+ ":audio_encoder_cng",
+ ":neteq_input_audio_tools",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs/L16:audio_encoder_L16",
+ "../../api/audio_codecs/g711:audio_encoder_g711",
+ "../../api/audio_codecs/g722:audio_encoder_g722",
+ "../../api/audio_codecs/ilbc:audio_encoder_ilbc",
+ "../../api/audio_codecs/isac:audio_encoder_isac",
+ "../../api/audio_codecs/opus:audio_encoder_opus",
+ "../../rtc_base:safe_conversions",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/memory",
+ ]
+
+ deps += audio_coding_deps
+
+ sources = [ "neteq/tools/rtp_encode.cc" ]
+
+ defines = audio_coding_defines
+ }
+
+ rtc_executable("rtp_jitter") {
+ testonly = true
+
+ deps = [
+ "../../api:array_view",
+ "../../rtc_base:buffer",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+
+ deps += audio_coding_deps
+
+ sources = [ "neteq/tools/rtp_jitter.cc" ]
+
+ defines = audio_coding_defines
+ }
+
+ rtc_executable("rtpcat") {
+ testonly = true
+
+ sources = [ "neteq/tools/rtpcat.cc" ]
+
+ deps = [
+ "../../rtc_base:checks",
+ "../../test:rtp_test_utils",
+ "//testing/gtest",
+ ]
+ }
+
+ rtc_executable("rtp_analyze") {
+ testonly = true
+
+ sources = [ "neteq/tools/rtp_analyze.cc" ]
+
+ deps = [
+ ":neteq",
+ ":neteq_test_tools",
+ ":neteq_tools_minimal",
+ ":pcm16b",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+
+ rtc_executable("neteq_opus_quality_test") {
+ testonly = true
+
+ sources = [ "neteq/test/neteq_opus_quality_test.cc" ]
+
+ deps = [
+ ":neteq",
+ ":neteq_quality_test_support",
+ ":neteq_tools",
+ ":webrtc_opus",
+ "../../test:test_main",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ ]
+ }
+
+ rtc_executable("neteq_speed_test") {
+ testonly = true
+
+ sources = [ "neteq/test/neteq_speed_test.cc" ]
+
+ deps = [
+ ":neteq",
+ ":neteq_test_support",
+ "../../rtc_base:checks",
+ "../../test:test_support",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+
+ rtc_executable("neteq_ilbc_quality_test") {
+ testonly = true
+
+ sources = [ "neteq/test/neteq_ilbc_quality_test.cc" ]
+
+ deps = [
+ ":ilbc",
+ ":neteq",
+ ":neteq_quality_test_support",
+ ":neteq_tools",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ "../../test:fileutils",
+ "../../test:test_main",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ ]
+ }
+
+ rtc_executable("neteq_isac_quality_test") {
+ testonly = true
+
+ sources = [ "neteq/test/neteq_isac_quality_test.cc" ]
+
+ deps = [
+ ":isac_fix",
+ ":neteq",
+ ":neteq_quality_test_support",
+ "../../test:test_main",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ ]
+ }
+
+ rtc_executable("neteq_pcmu_quality_test") {
+ testonly = true
+
+ sources = [ "neteq/test/neteq_pcmu_quality_test.cc" ]
+
+ deps = [
+ ":g711",
+ ":neteq",
+ ":neteq_quality_test_support",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ "../../test:fileutils",
+ "../../test:test_main",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ ]
+ }
+
+ rtc_executable("neteq_pcm16b_quality_test") {
+ testonly = true
+
+ sources = [ "neteq/test/neteq_pcm16b_quality_test.cc" ]
+
+ deps = [
+ ":neteq",
+ ":neteq_quality_test_support",
+ ":pcm16b",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ "../../test:fileutils",
+ "../../test:test_main",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ ]
+ }
+ }
+
+ rtc_library("isac_test_util") {
+ testonly = true
+ sources = [
+ "codecs/isac/main/util/utility.c",
+ "codecs/isac/main/util/utility.h",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_executable("isac_test") {
+ testonly = true
+
+ sources = [ "codecs/isac/main/test/simpleKenny.c" ]
+
+ deps = [
+ ":isac",
+ ":isac_test_util",
+ "../../rtc_base:macromagic",
+ ]
+ }
+ }
+
+ rtc_executable("g711_test") {
+ testonly = true
+
+ sources = [ "codecs/g711/test/testG711.cc" ]
+
+ deps = [ ":g711" ]
+ }
+
+ rtc_executable("g722_test") {
+ testonly = true
+
+ sources = [ "codecs/g722/test/testG722.cc" ]
+
+ deps = [ ":g722" ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_executable("isac_api_test") {
+ testonly = true
+
+ sources = [ "codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc" ]
+
+ deps = [
+ ":isac",
+ ":isac_test_util",
+ "../../rtc_base:macromagic",
+ ]
+ }
+
+ rtc_executable("isac_switch_samprate_test") {
+ testonly = true
+
+ sources =
+ [ "codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc" ]
+
+ deps = [
+ ":isac",
+ ":isac_test_util",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ ]
+ }
+
+ rtc_executable("ilbc_test") {
+ testonly = true
+
+ sources = [ "codecs/ilbc/test/iLBC_test.c" ]
+
+ deps = [ ":ilbc" ]
+ }
+
+ rtc_executable("webrtc_opus_fec_test") {
+ testonly = true
+
+ sources = [ "codecs/opus/opus_fec_test.cc" ]
+
+ deps = [
+ ":webrtc_opus",
+ "../../common_audio",
+ "../../rtc_base:macromagic",
+ "../../test:fileutils",
+ "../../test:test_main",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ }
+
+ rtc_library("audio_coding_unittests") {
+ testonly = true
+ visibility += webrtc_default_visibility
+
+ sources = [
+ "acm2/acm_receiver_unittest.cc",
+ "acm2/acm_remixing_unittest.cc",
+ "acm2/audio_coding_module_unittest.cc",
+ "acm2/call_statistics_unittest.cc",
+ "audio_network_adaptor/audio_network_adaptor_impl_unittest.cc",
+ "audio_network_adaptor/bitrate_controller_unittest.cc",
+ "audio_network_adaptor/channel_controller_unittest.cc",
+ "audio_network_adaptor/controller_manager_unittest.cc",
+ "audio_network_adaptor/dtx_controller_unittest.cc",
+ "audio_network_adaptor/event_log_writer_unittest.cc",
+ "audio_network_adaptor/fec_controller_plr_based_unittest.cc",
+ "audio_network_adaptor/frame_length_controller_unittest.cc",
+ "audio_network_adaptor/frame_length_controller_v2_unittest.cc",
+ "audio_network_adaptor/util/threshold_curve_unittest.cc",
+ "codecs/builtin_audio_decoder_factory_unittest.cc",
+ "codecs/builtin_audio_encoder_factory_unittest.cc",
+ "codecs/cng/audio_encoder_cng_unittest.cc",
+ "codecs/cng/cng_unittest.cc",
+ "codecs/ilbc/ilbc_unittest.cc",
+ "codecs/isac/fix/source/filterbanks_unittest.cc",
+ "codecs/isac/fix/source/filters_unittest.cc",
+ "codecs/isac/fix/source/lpc_masking_model_unittest.cc",
+ "codecs/isac/fix/source/transform_unittest.cc",
+ "codecs/isac/isac_webrtc_api_test.cc",
+ "codecs/isac/main/source/audio_encoder_isac_unittest.cc",
+ "codecs/isac/main/source/isac_unittest.cc",
+ "codecs/legacy_encoded_audio_frame_unittest.cc",
+ "codecs/opus/audio_decoder_multi_channel_opus_unittest.cc",
+ "codecs/opus/audio_encoder_multi_channel_opus_unittest.cc",
+ "codecs/opus/audio_encoder_opus_unittest.cc",
+ "codecs/opus/opus_bandwidth_unittest.cc",
+ "codecs/opus/opus_unittest.cc",
+ "codecs/red/audio_encoder_copy_red_unittest.cc",
+ "neteq/audio_multi_vector_unittest.cc",
+ "neteq/audio_vector_unittest.cc",
+ "neteq/background_noise_unittest.cc",
+ "neteq/buffer_level_filter_unittest.cc",
+ "neteq/comfort_noise_unittest.cc",
+ "neteq/decision_logic_unittest.cc",
+ "neteq/decoder_database_unittest.cc",
+ "neteq/delay_manager_unittest.cc",
+ "neteq/dsp_helper_unittest.cc",
+ "neteq/dtmf_buffer_unittest.cc",
+ "neteq/dtmf_tone_generator_unittest.cc",
+ "neteq/expand_unittest.cc",
+ "neteq/histogram_unittest.cc",
+ "neteq/merge_unittest.cc",
+ "neteq/mock/mock_buffer_level_filter.h",
+ "neteq/mock/mock_decoder_database.h",
+ "neteq/mock/mock_delay_manager.h",
+ "neteq/mock/mock_dtmf_buffer.h",
+ "neteq/mock/mock_dtmf_tone_generator.h",
+ "neteq/mock/mock_expand.h",
+ "neteq/mock/mock_histogram.h",
+ "neteq/mock/mock_neteq_controller.h",
+ "neteq/mock/mock_packet_buffer.h",
+ "neteq/mock/mock_red_payload_splitter.h",
+ "neteq/mock/mock_statistics_calculator.h",
+ "neteq/nack_tracker_unittest.cc",
+ "neteq/neteq_decoder_plc_unittest.cc",
+ "neteq/neteq_impl_unittest.cc",
+ "neteq/neteq_network_stats_unittest.cc",
+ "neteq/neteq_stereo_unittest.cc",
+ "neteq/neteq_unittest.cc",
+ "neteq/normal_unittest.cc",
+ "neteq/packet_arrival_history_unittest.cc",
+ "neteq/packet_buffer_unittest.cc",
+ "neteq/post_decode_vad_unittest.cc",
+ "neteq/random_vector_unittest.cc",
+ "neteq/red_payload_splitter_unittest.cc",
+ "neteq/reorder_optimizer_unittest.cc",
+ "neteq/statistics_calculator_unittest.cc",
+ "neteq/sync_buffer_unittest.cc",
+ "neteq/time_stretch_unittest.cc",
+ "neteq/timestamp_scaler_unittest.cc",
+ "neteq/tools/input_audio_file_unittest.cc",
+ "neteq/tools/packet_unittest.cc",
+ "neteq/underrun_optimizer_unittest.cc",
+ ]
+
+ deps = [
+ ":acm_receive_test",
+ ":acm_send_test",
+ ":audio_coding",
+ ":audio_coding_module_typedefs",
+ ":audio_coding_modules_tests_shared",
+ ":audio_coding_opus_common",
+ ":audio_encoder_cng",
+ ":audio_network_adaptor",
+ ":default_neteq_factory",
+ ":g711",
+ ":ilbc",
+ ":isac",
+ ":isac_c",
+ ":isac_common",
+ ":isac_fix",
+ ":isac_fix_common",
+ ":legacy_encoded_audio_frame",
+ ":mocks",
+ ":neteq",
+ ":neteq_input_audio_tools",
+ ":neteq_test_support",
+ ":neteq_test_tools",
+ ":neteq_tools",
+ ":neteq_tools_minimal",
+ ":pcm16b",
+ ":red",
+ ":webrtc_cng",
+ ":webrtc_opus",
+ "..:module_api",
+ "..:module_api_public",
+ "../../api:array_view",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../api/audio_codecs/isac:audio_decoder_isac_fix",
+ "../../api/audio_codecs/isac:audio_decoder_isac_float",
+ "../../api/audio_codecs/isac:audio_encoder_isac_fix",
+ "../../api/audio_codecs/isac:audio_encoder_isac_float",
+ "../../api/audio_codecs/opus:audio_decoder_multiopus",
+ "../../api/audio_codecs/opus:audio_decoder_opus",
+ "../../api/audio_codecs/opus:audio_encoder_multiopus",
+ "../../api/audio_codecs/opus:audio_encoder_opus",
+ "../../api/neteq:default_neteq_controller_factory",
+ "../../api/neteq:neteq_api",
+ "../../api/neteq:neteq_controller_api",
+ "../../api/neteq:tick_timer",
+ "../../api/neteq:tick_timer_unittest",
+ "../../api/rtc_event_log",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../common_audio:mock_common_audio",
+ "../../logging:mocks",
+ "../../logging:rtc_event_audio",
+ "../../modules/rtp_rtcp:rtp_rtcp_format",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:sanitizer",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:arch",
+ "../../system_wrappers",
+ "../../test:audio_codec_mocks",
+ "../../test:audio_test_common",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:rtc_expect_death",
+ "../../test:rtp_test_utils",
+ "../../test:scoped_key_value_config",
+ "../../test:test_common",
+ "../../test:test_support",
+ "codecs/opus/test",
+ "codecs/opus/test:test_unittest",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ defines = audio_coding_defines
+
+ if (rtc_enable_protobuf) {
+ defines += [ "WEBRTC_NETEQ_UNITTEST_BITEXACT" ]
+ deps += [
+ ":ana_config_proto",
+ ":neteq_unittest_proto",
+ ]
+ }
+ }
+ }
+}
+
+# For backwards compatibility only! Use
+# webrtc/api/audio_codecs:audio_codecs_api instead.
+# TODO(kwiberg): Remove this.
+rtc_source_set("audio_decoder_interface") {
+ visibility += [ "*" ]
+ sources = [ "codecs/audio_decoder.h" ]
+ deps = [ "../../api/audio_codecs:audio_codecs_api" ]
+}
+
+# For backwards compatibility only! Use
+# webrtc/api/audio_codecs:audio_codecs_api instead.
+# TODO(ossu): Remove this.
+rtc_source_set("audio_encoder_interface") {
+ visibility += [ "*" ]
+ sources = [ "codecs/audio_encoder.h" ]
+ deps = [ "../../api/audio_codecs:audio_codecs_api" ]
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/DEPS b/third_party/libwebrtc/modules/audio_coding/DEPS
new file mode 100644
index 0000000000..3dc9624a4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+ "+call",
+ "+common_audio",
+ "+logging/rtc_event_log",
+ "+audio_coding/neteq/neteq_unittest.pb.h", # Different path.
+ "+system_wrappers",
+]
diff --git a/third_party/libwebrtc/modules/audio_coding/OWNERS b/third_party/libwebrtc/modules/audio_coding/OWNERS
new file mode 100644
index 0000000000..c27c2a8d2d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/OWNERS
@@ -0,0 +1,4 @@
+henrik.lundin@webrtc.org
+minyue@webrtc.org
+ivoc@webrtc.org
+jakobi@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.cc
new file mode 100644
index 0000000000..8bc76cd2af
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_receive_test.h"
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+AudioCodingModule::Config MakeAcmConfig(
+ Clock* clock,
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory) {
+ AudioCodingModule::Config config;
+ config.clock = clock;
+ config.decoder_factory = std::move(decoder_factory);
+ return config;
+}
+} // namespace
+
+AcmReceiveTestOldApi::AcmReceiveTestOldApi(
+ PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz,
+ NumOutputChannels exptected_output_channels,
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
+ : clock_(0),
+ acm_(webrtc::AudioCodingModule::Create(
+ MakeAcmConfig(&clock_, std::move(decoder_factory)))),
+ packet_source_(packet_source),
+ audio_sink_(audio_sink),
+ output_freq_hz_(output_freq_hz),
+ exptected_output_channels_(exptected_output_channels) {}
+
+AcmReceiveTestOldApi::~AcmReceiveTestOldApi() = default;
+
+void AcmReceiveTestOldApi::RegisterDefaultCodecs() {
+ acm_->SetReceiveCodecs({{103, {"ISAC", 16000, 1}},
+ {104, {"ISAC", 32000, 1}},
+ {107, {"L16", 8000, 1}},
+ {108, {"L16", 16000, 1}},
+ {109, {"L16", 32000, 1}},
+ {111, {"L16", 8000, 2}},
+ {112, {"L16", 16000, 2}},
+ {113, {"L16", 32000, 2}},
+ {0, {"PCMU", 8000, 1}},
+ {110, {"PCMU", 8000, 2}},
+ {8, {"PCMA", 8000, 1}},
+ {118, {"PCMA", 8000, 2}},
+ {102, {"ILBC", 8000, 1}},
+ {9, {"G722", 8000, 1}},
+ {119, {"G722", 8000, 2}},
+ {120, {"OPUS", 48000, 2, {{"stereo", "1"}}}},
+ {13, {"CN", 8000, 1}},
+ {98, {"CN", 16000, 1}},
+ {99, {"CN", 32000, 1}}});
+}
+
+// Remaps payload types from ACM's default to those used in the resource file
+// neteq_universal_new.rtp.
+void AcmReceiveTestOldApi::RegisterNetEqTestCodecs() {
+ acm_->SetReceiveCodecs({{103, {"ISAC", 16000, 1}},
+ {104, {"ISAC", 32000, 1}},
+ {93, {"L16", 8000, 1}},
+ {94, {"L16", 16000, 1}},
+ {95, {"L16", 32000, 1}},
+ {0, {"PCMU", 8000, 1}},
+ {8, {"PCMA", 8000, 1}},
+ {102, {"ILBC", 8000, 1}},
+ {9, {"G722", 8000, 1}},
+ {120, {"OPUS", 48000, 2}},
+ {13, {"CN", 8000, 1}},
+ {98, {"CN", 16000, 1}},
+ {99, {"CN", 32000, 1}}});
+}
+
+void AcmReceiveTestOldApi::Run() {
+ for (std::unique_ptr<Packet> packet(packet_source_->NextPacket()); packet;
+ packet = packet_source_->NextPacket()) {
+ // Pull audio until time to insert packet.
+ while (clock_.TimeInMilliseconds() < packet->time_ms()) {
+ AudioFrame output_frame;
+ bool muted;
+ EXPECT_EQ(0,
+ acm_->PlayoutData10Ms(output_freq_hz_, &output_frame, &muted));
+ ASSERT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
+ ASSERT_FALSE(muted);
+ const size_t samples_per_block =
+ static_cast<size_t>(output_freq_hz_ * 10 / 1000);
+ EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
+ if (exptected_output_channels_ != kArbitraryChannels) {
+ if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
+ // Don't check number of channels for PLC output, since each test run
+ // usually starts with a short period of mono PLC before decoding the
+ // first packet.
+ } else {
+ EXPECT_EQ(exptected_output_channels_, output_frame.num_channels_);
+ }
+ }
+ ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame));
+ clock_.AdvanceTimeMilliseconds(10);
+ AfterGetAudio();
+ }
+
+ EXPECT_EQ(0, acm_->IncomingPacket(
+ packet->payload(),
+ static_cast<int32_t>(packet->payload_length_bytes()),
+ packet->header()))
+ << "Failure when inserting packet:" << std::endl
+ << " PT = " << static_cast<int>(packet->header().payloadType)
+ << std::endl
+ << " TS = " << packet->header().timestamp << std::endl
+ << " SN = " << packet->header().sequenceNumber;
+ }
+}
+
+AcmReceiveTestToggleOutputFreqOldApi::AcmReceiveTestToggleOutputFreqOldApi(
+ PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz_1,
+ int output_freq_hz_2,
+ int toggle_period_ms,
+ NumOutputChannels exptected_output_channels)
+ : AcmReceiveTestOldApi(packet_source,
+ audio_sink,
+ output_freq_hz_1,
+ exptected_output_channels,
+ CreateBuiltinAudioDecoderFactory()),
+ output_freq_hz_1_(output_freq_hz_1),
+ output_freq_hz_2_(output_freq_hz_2),
+ toggle_period_ms_(toggle_period_ms),
+ last_toggle_time_ms_(clock_.TimeInMilliseconds()) {}
+
+void AcmReceiveTestToggleOutputFreqOldApi::AfterGetAudio() {
+ if (clock_.TimeInMilliseconds() >= last_toggle_time_ms_ + toggle_period_ms_) {
+ output_freq_hz_ = (output_freq_hz_ == output_freq_hz_1_)
+ ? output_freq_hz_2_
+ : output_freq_hz_1_;
+ last_toggle_time_ms_ = clock_.TimeInMilliseconds();
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.h b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.h
new file mode 100644
index 0000000000..2095ef9025
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receive_test.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_H_
+
+#include <stddef.h> // for size_t
+
+#include <memory>
+#include <string>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/scoped_refptr.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+class AudioCodingModule;
+class AudioDecoder;
+
+namespace test {
+class AudioSink;
+class PacketSource;
+
+class AcmReceiveTestOldApi {
+ public:
+ enum NumOutputChannels : size_t {
+ kArbitraryChannels = 0,
+ kMonoOutput = 1,
+ kStereoOutput = 2,
+ kQuadOutput = 4
+ };
+
+ AcmReceiveTestOldApi(PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz,
+ NumOutputChannels exptected_output_channels,
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory);
+ virtual ~AcmReceiveTestOldApi();
+
+ AcmReceiveTestOldApi(const AcmReceiveTestOldApi&) = delete;
+ AcmReceiveTestOldApi& operator=(const AcmReceiveTestOldApi&) = delete;
+
+ // Registers the codecs with default parameters from ACM.
+ void RegisterDefaultCodecs();
+
+ // Registers codecs with payload types matching the pre-encoded NetEq test
+ // files.
+ void RegisterNetEqTestCodecs();
+
+ // Runs the test and returns true if successful.
+ void Run();
+
+ AudioCodingModule* get_acm() { return acm_.get(); }
+
+ protected:
+ // Method is called after each block of output audio is received from ACM.
+ virtual void AfterGetAudio() {}
+
+ SimulatedClock clock_;
+ std::unique_ptr<AudioCodingModule> acm_;
+ PacketSource* packet_source_;
+ AudioSink* audio_sink_;
+ int output_freq_hz_;
+ NumOutputChannels exptected_output_channels_;
+};
+
+// This test toggles the output frequency every `toggle_period_ms`. The test
+// starts with `output_freq_hz_1`. Except for the toggling, it does the same
+// thing as AcmReceiveTestOldApi.
+class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi {
+ public:
+ AcmReceiveTestToggleOutputFreqOldApi(
+ PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz_1,
+ int output_freq_hz_2,
+ int toggle_period_ms,
+ NumOutputChannels exptected_output_channels);
+
+ protected:
+ void AfterGetAudio() override;
+
+ const int output_freq_hz_1_;
+ const int output_freq_hz_2_;
+ const int toggle_period_ms_;
+ int64_t last_toggle_time_ms_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc
new file mode 100644
index 0000000000..c6c2f0cf50
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_receiver.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/audio_coding/acm2/call_statistics.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/audio_format_to_string.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+namespace {
+
+std::unique_ptr<NetEq> CreateNetEq(
+ NetEqFactory* neteq_factory,
+ const NetEq::Config& config,
+ Clock* clock,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+ if (neteq_factory) {
+ return neteq_factory->CreateNetEq(config, decoder_factory, clock);
+ }
+ return DefaultNetEqFactory().CreateNetEq(config, decoder_factory, clock);
+}
+
+} // namespace
+
+AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config)
+ : last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
+ neteq_(CreateNetEq(config.neteq_factory,
+ config.neteq_config,
+ config.clock,
+ config.decoder_factory)),
+ clock_(config.clock),
+ resampled_last_output_frame_(true) {
+ RTC_DCHECK(clock_);
+ memset(last_audio_buffer_.get(), 0,
+ sizeof(int16_t) * AudioFrame::kMaxDataSizeSamples);
+}
+
+AcmReceiver::~AcmReceiver() = default;
+
+int AcmReceiver::SetMinimumDelay(int delay_ms) {
+ if (neteq_->SetMinimumDelay(delay_ms))
+ return 0;
+ RTC_LOG(LS_ERROR) << "AcmReceiver::SetExtraDelay " << delay_ms;
+ return -1;
+}
+
+int AcmReceiver::SetMaximumDelay(int delay_ms) {
+ if (neteq_->SetMaximumDelay(delay_ms))
+ return 0;
+ RTC_LOG(LS_ERROR) << "AcmReceiver::SetExtraDelay " << delay_ms;
+ return -1;
+}
+
+bool AcmReceiver::SetBaseMinimumDelayMs(int delay_ms) {
+ return neteq_->SetBaseMinimumDelayMs(delay_ms);
+}
+
+int AcmReceiver::GetBaseMinimumDelayMs() const {
+ return neteq_->GetBaseMinimumDelayMs();
+}
+
+absl::optional<int> AcmReceiver::last_packet_sample_rate_hz() const {
+ MutexLock lock(&mutex_);
+ if (!last_decoder_) {
+ return absl::nullopt;
+ }
+ return last_decoder_->sample_rate_hz;
+}
+
+int AcmReceiver::last_output_sample_rate_hz() const {
+ return neteq_->last_output_sample_rate_hz();
+}
+
+int AcmReceiver::InsertPacket(const RTPHeader& rtp_header,
+ rtc::ArrayView<const uint8_t> incoming_payload) {
+ if (incoming_payload.empty()) {
+ neteq_->InsertEmptyPacket(rtp_header);
+ return 0;
+ }
+
+ int payload_type = rtp_header.payloadType;
+ auto format = neteq_->GetDecoderFormat(payload_type);
+ if (format && absl::EqualsIgnoreCase(format->sdp_format.name, "red")) {
+ // This is a RED packet. Get the format of the audio codec.
+ payload_type = incoming_payload[0] & 0x7f;
+ format = neteq_->GetDecoderFormat(payload_type);
+ }
+ if (!format) {
+ RTC_LOG_F(LS_ERROR) << "Payload-type " << payload_type
+ << " is not registered.";
+ return -1;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ if (absl::EqualsIgnoreCase(format->sdp_format.name, "cn")) {
+ if (last_decoder_ && last_decoder_->num_channels > 1) {
+ // This is a CNG and the audio codec is not mono, so skip pushing in
+ // packets into NetEq.
+ return 0;
+ }
+ } else {
+ last_decoder_ = DecoderInfo{/*payload_type=*/payload_type,
+ /*sample_rate_hz=*/format->sample_rate_hz,
+ /*num_channels=*/format->num_channels,
+ /*sdp_format=*/std::move(format->sdp_format)};
+ last_audio_format_clockrate_hz_ = format->sdp_format.clockrate_hz;
+ }
+ } // `mutex_` is released.
+
+ if (neteq_->InsertPacket(rtp_header, incoming_payload) < 0) {
+ RTC_LOG(LS_ERROR) << "AcmReceiver::InsertPacket "
+ << static_cast<int>(rtp_header.payloadType)
+ << " Failed to insert packet";
+ return -1;
+ }
+ return 0;
+}
+
+int AcmReceiver::GetAudio(int desired_freq_hz,
+ AudioFrame* audio_frame,
+ bool* muted) {
+ RTC_DCHECK(muted);
+
+ int current_sample_rate_hz = 0;
+ if (neteq_->GetAudio(audio_frame, muted, &current_sample_rate_hz) !=
+ NetEq::kOK) {
+ RTC_LOG(LS_ERROR) << "AcmReceiver::GetAudio - NetEq Failed.";
+ return -1;
+ }
+
+ RTC_DCHECK_NE(current_sample_rate_hz, 0);
+
+ // Update if resampling is required.
+ const bool need_resampling =
+ (desired_freq_hz != -1) && (current_sample_rate_hz != desired_freq_hz);
+
+ // Accessing members, take the lock.
+ MutexLock lock(&mutex_);
+ if (need_resampling && !resampled_last_output_frame_) {
+ // Prime the resampler with the last frame.
+ int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
+ int samples_per_channel_int = resampler_.Resample10Msec(
+ last_audio_buffer_.get(), current_sample_rate_hz, desired_freq_hz,
+ audio_frame->num_channels_, AudioFrame::kMaxDataSizeSamples,
+ temp_output);
+ if (samples_per_channel_int < 0) {
+ RTC_LOG(LS_ERROR) << "AcmReceiver::GetAudio - "
+ "Resampling last_audio_buffer_ failed.";
+ return -1;
+ }
+ }
+
+ // TODO(bugs.webrtc.org/3923) Glitches in the output may appear if the output
+ // rate from NetEq changes.
+ if (need_resampling) {
+ // TODO(yujo): handle this more efficiently for muted frames.
+ int samples_per_channel_int = resampler_.Resample10Msec(
+ audio_frame->data(), current_sample_rate_hz, desired_freq_hz,
+ audio_frame->num_channels_, AudioFrame::kMaxDataSizeSamples,
+ audio_frame->mutable_data());
+ if (samples_per_channel_int < 0) {
+ RTC_LOG(LS_ERROR)
+ << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
+ return -1;
+ }
+ audio_frame->samples_per_channel_ =
+ static_cast<size_t>(samples_per_channel_int);
+ audio_frame->sample_rate_hz_ = desired_freq_hz;
+ RTC_DCHECK_EQ(
+ audio_frame->sample_rate_hz_,
+ rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
+ resampled_last_output_frame_ = true;
+ } else {
+ resampled_last_output_frame_ = false;
+ // We might end up here ONLY if codec is changed.
+ }
+
+ // Store current audio in `last_audio_buffer_` for next time.
+ memcpy(last_audio_buffer_.get(), audio_frame->data(),
+ sizeof(int16_t) * audio_frame->samples_per_channel_ *
+ audio_frame->num_channels_);
+
+ call_stats_.DecodedByNetEq(audio_frame->speech_type_, *muted);
+
+ return 0;
+}
+
+void AcmReceiver::SetCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+ neteq_->SetCodecs(codecs);
+}
+
+void AcmReceiver::FlushBuffers() {
+ neteq_->FlushBuffers();
+}
+
+void AcmReceiver::RemoveAllCodecs() {
+ MutexLock lock(&mutex_);
+ neteq_->RemoveAllPayloadTypes();
+ last_decoder_ = absl::nullopt;
+}
+
+absl::optional<uint32_t> AcmReceiver::GetPlayoutTimestamp() {
+ return neteq_->GetPlayoutTimestamp();
+}
+
+int AcmReceiver::FilteredCurrentDelayMs() const {
+ return neteq_->FilteredCurrentDelayMs();
+}
+
+int AcmReceiver::TargetDelayMs() const {
+ return neteq_->TargetDelayMs();
+}
+
+absl::optional<std::pair<int, SdpAudioFormat>> AcmReceiver::LastDecoder()
+ const {
+ MutexLock lock(&mutex_);
+ if (!last_decoder_) {
+ return absl::nullopt;
+ }
+ RTC_DCHECK_NE(-1, last_decoder_->payload_type);
+ return std::make_pair(last_decoder_->payload_type, last_decoder_->sdp_format);
+}
+
+void AcmReceiver::GetNetworkStatistics(
+ NetworkStatistics* acm_stat,
+ bool get_and_clear_legacy_stats /* = true */) const {
+ NetEqNetworkStatistics neteq_stat;
+ if (get_and_clear_legacy_stats) {
+ // NetEq function always returns zero, so we don't check the return value.
+ neteq_->NetworkStatistics(&neteq_stat);
+
+ acm_stat->currentExpandRate = neteq_stat.expand_rate;
+ acm_stat->currentSpeechExpandRate = neteq_stat.speech_expand_rate;
+ acm_stat->currentPreemptiveRate = neteq_stat.preemptive_rate;
+ acm_stat->currentAccelerateRate = neteq_stat.accelerate_rate;
+ acm_stat->currentSecondaryDecodedRate = neteq_stat.secondary_decoded_rate;
+ acm_stat->currentSecondaryDiscardedRate =
+ neteq_stat.secondary_discarded_rate;
+ acm_stat->meanWaitingTimeMs = neteq_stat.mean_waiting_time_ms;
+ acm_stat->maxWaitingTimeMs = neteq_stat.max_waiting_time_ms;
+ } else {
+ neteq_stat = neteq_->CurrentNetworkStatistics();
+ acm_stat->currentExpandRate = 0;
+ acm_stat->currentSpeechExpandRate = 0;
+ acm_stat->currentPreemptiveRate = 0;
+ acm_stat->currentAccelerateRate = 0;
+ acm_stat->currentSecondaryDecodedRate = 0;
+ acm_stat->currentSecondaryDiscardedRate = 0;
+ acm_stat->meanWaitingTimeMs = -1;
+ acm_stat->maxWaitingTimeMs = 1;
+ }
+ acm_stat->currentBufferSize = neteq_stat.current_buffer_size_ms;
+ acm_stat->preferredBufferSize = neteq_stat.preferred_buffer_size_ms;
+ acm_stat->jitterPeaksFound = neteq_stat.jitter_peaks_found ? true : false;
+
+ NetEqLifetimeStatistics neteq_lifetime_stat = neteq_->GetLifetimeStatistics();
+ acm_stat->totalSamplesReceived = neteq_lifetime_stat.total_samples_received;
+ acm_stat->concealedSamples = neteq_lifetime_stat.concealed_samples;
+ acm_stat->silentConcealedSamples =
+ neteq_lifetime_stat.silent_concealed_samples;
+ acm_stat->concealmentEvents = neteq_lifetime_stat.concealment_events;
+ acm_stat->jitterBufferDelayMs = neteq_lifetime_stat.jitter_buffer_delay_ms;
+ acm_stat->jitterBufferTargetDelayMs =
+ neteq_lifetime_stat.jitter_buffer_target_delay_ms;
+ acm_stat->jitterBufferMinimumDelayMs =
+ neteq_lifetime_stat.jitter_buffer_minimum_delay_ms;
+ acm_stat->jitterBufferEmittedCount =
+ neteq_lifetime_stat.jitter_buffer_emitted_count;
+ acm_stat->delayedPacketOutageSamples =
+ neteq_lifetime_stat.delayed_packet_outage_samples;
+ acm_stat->relativePacketArrivalDelayMs =
+ neteq_lifetime_stat.relative_packet_arrival_delay_ms;
+ acm_stat->interruptionCount = neteq_lifetime_stat.interruption_count;
+ acm_stat->totalInterruptionDurationMs =
+ neteq_lifetime_stat.total_interruption_duration_ms;
+ acm_stat->insertedSamplesForDeceleration =
+ neteq_lifetime_stat.inserted_samples_for_deceleration;
+ acm_stat->removedSamplesForAcceleration =
+ neteq_lifetime_stat.removed_samples_for_acceleration;
+ acm_stat->fecPacketsReceived = neteq_lifetime_stat.fec_packets_received;
+ acm_stat->fecPacketsDiscarded = neteq_lifetime_stat.fec_packets_discarded;
+ acm_stat->packetsDiscarded = neteq_lifetime_stat.packets_discarded;
+
+ NetEqOperationsAndState neteq_operations_and_state =
+ neteq_->GetOperationsAndState();
+ acm_stat->packetBufferFlushes =
+ neteq_operations_and_state.packet_buffer_flushes;
+}
+
+int AcmReceiver::LastAudioSampleRate() const {
+ return last_audio_format_clockrate_hz_;
+}
+
+int AcmReceiver::EnableNack(size_t max_nack_list_size) {
+ neteq_->EnableNack(max_nack_list_size);
+ return 0;
+}
+
+void AcmReceiver::DisableNack() {
+ neteq_->DisableNack();
+}
+
+std::vector<uint16_t> AcmReceiver::GetNackList(
+ int64_t round_trip_time_ms) const {
+ return neteq_->GetNackList(round_trip_time_ms);
+}
+
+void AcmReceiver::ResetInitialDelay() {
+ neteq_->SetMinimumDelay(0);
+ // TODO(turajs): Should NetEq Buffer be flushed?
+}
+
+uint32_t AcmReceiver::NowInTimestamp(int decoder_sampling_rate) const {
+ // Down-cast the time to (32-6)-bit since we only care about
+ // the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
+ // We masked 6 most significant bits of 32-bit so there is no overflow in
+ // the conversion from milliseconds to timestamp.
+ const uint32_t now_in_ms =
+ static_cast<uint32_t>(clock_->TimeInMilliseconds() & 0x03ffffff);
+ return static_cast<uint32_t>((decoder_sampling_rate / 1000) * now_in_ms);
+}
+
+void AcmReceiver::GetDecodingCallStatistics(
+ AudioDecodingCallStats* stats) const {
+ MutexLock lock(&mutex_);
+ *stats = call_stats_.GetDecodingStatistics();
+}
+
+} // namespace acm2
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.h b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.h
new file mode 100644
index 0000000000..e0dfcd4d09
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+#include <atomic>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/audio_codecs/audio_format.h"
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/audio_coding/acm2/call_statistics.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+class NetEq;
+struct RTPHeader;
+
+namespace acm2 {
+
+class AcmReceiver {
+ public:
+ // Constructor of the class
+ explicit AcmReceiver(const AudioCodingModule::Config& config);
+
+ // Destructor of the class.
+ ~AcmReceiver();
+
+ //
+ // Inserts a payload with its associated RTP-header into NetEq.
+ //
+ // Input:
+ // - rtp_header : RTP header for the incoming payload containing
+ // information about payload type, sequence number,
+ // timestamp, SSRC and marker bit.
+ // - incoming_payload : Incoming audio payload.
+ // - length_payload : Length of incoming audio payload in bytes.
+ //
+ // Return value : 0 if OK.
+ // <0 if NetEq returned an error.
+ //
+ int InsertPacket(const RTPHeader& rtp_header,
+ rtc::ArrayView<const uint8_t> incoming_payload);
+
+ //
+ // Asks NetEq for 10 milliseconds of decoded audio.
+ //
+ // Input:
+ // -desired_freq_hz : specifies the sampling rate [Hz] of the output
+ // audio. If set -1 indicates to resampling is
+ // is required and the audio returned at the
+ // sampling rate of the decoder.
+ //
+ // Output:
+ // -audio_frame : an audio frame were output data and
+ // associated parameters are written to.
+ // -muted : if true, the sample data in audio_frame is not
+ // populated, and must be interpreted as all zero.
+ //
+ // Return value : 0 if OK.
+ // -1 if NetEq returned an error.
+ //
+ int GetAudio(int desired_freq_hz, AudioFrame* audio_frame, bool* muted);
+
+ // Replace the current set of decoders with the specified set.
+ void SetCodecs(const std::map<int, SdpAudioFormat>& codecs);
+
+ //
+ // Sets a minimum delay for packet buffer. The given delay is maintained,
+ // unless channel condition dictates a higher delay.
+ //
+ // Input:
+ // - delay_ms : minimum delay in milliseconds.
+ //
+ // Return value : 0 if OK.
+ // <0 if NetEq returned an error.
+ //
+ int SetMinimumDelay(int delay_ms);
+
+ //
+ // Sets a maximum delay [ms] for the packet buffer. The target delay does not
+ // exceed the given value, even if channel condition requires so.
+ //
+ // Input:
+ // - delay_ms : maximum delay in milliseconds.
+ //
+ // Return value : 0 if OK.
+ // <0 if NetEq returned an error.
+ //
+ int SetMaximumDelay(int delay_ms);
+
+ // Sets a base minimum delay in milliseconds for the packet buffer.
+ // Base minimum delay sets lower bound minimum delay value which
+ // is set via SetMinimumDelay.
+ //
+ // Returns true if value was successfully set, false overwise.
+ bool SetBaseMinimumDelayMs(int delay_ms);
+
+ // Returns current value of base minimum delay in milliseconds.
+ int GetBaseMinimumDelayMs() const;
+
+ //
+ // Resets the initial delay to zero.
+ //
+ void ResetInitialDelay();
+
+ // Returns the sample rate of the decoder associated with the last incoming
+ // packet. If no packet of a registered non-CNG codec has been received, the
+ // return value is empty. Also, if the decoder was unregistered since the last
+ // packet was inserted, the return value is empty.
+ absl::optional<int> last_packet_sample_rate_hz() const;
+
+ // Returns last_output_sample_rate_hz from the NetEq instance.
+ int last_output_sample_rate_hz() const;
+
+ //
+ // Get the current network statistics from NetEq.
+ //
+ // Output:
+ // - statistics : The current network statistics.
+ //
+ void GetNetworkStatistics(NetworkStatistics* statistics,
+ bool get_and_clear_legacy_stats = true) const;
+
+ //
+ // Flushes the NetEq packet and speech buffers.
+ //
+ void FlushBuffers();
+
+ //
+ // Remove all registered codecs.
+ //
+ void RemoveAllCodecs();
+
+ // Returns the RTP timestamp for the last sample delivered by GetAudio().
+ // The return value will be empty if no valid timestamp is available.
+ absl::optional<uint32_t> GetPlayoutTimestamp();
+
+ // Returns the current total delay from NetEq (packet buffer and sync buffer)
+ // in ms, with smoothing applied to even out short-time fluctuations due to
+ // jitter. The packet buffer part of the delay is not updated during DTX/CNG
+ // periods.
+ //
+ int FilteredCurrentDelayMs() const;
+
+ // Returns the current target delay for NetEq in ms.
+ //
+ int TargetDelayMs() const;
+
+ //
+ // Get payload type and format of the last non-CNG/non-DTMF received payload.
+ // If no non-CNG/non-DTMF packet is received absl::nullopt is returned.
+ //
+ absl::optional<std::pair<int, SdpAudioFormat>> LastDecoder() const;
+
+ int LastAudioSampleRate() const;
+
+ //
+ // Enable NACK and set the maximum size of the NACK list. If NACK is already
+ // enabled then the maximum NACK list size is modified accordingly.
+ //
+ // If the sequence number of last received packet is N, the sequence numbers
+ // of NACK list are in the range of [N - `max_nack_list_size`, N).
+ //
+ // `max_nack_list_size` should be positive (none zero) and less than or
+ // equal to `Nack::kNackListSizeLimit`. Otherwise, No change is applied and -1
+ // is returned. 0 is returned at success.
+ //
+ int EnableNack(size_t max_nack_list_size);
+
+ // Disable NACK.
+ void DisableNack();
+
+ //
+ // Get a list of packets to be retransmitted. `round_trip_time_ms` is an
+ // estimate of the round-trip-time (in milliseconds). Missing packets which
+ // will be playout in a shorter time than the round-trip-time (with respect
+ // to the time this API is called) will not be included in the list.
+ //
+ // Negative `round_trip_time_ms` results is an error message and empty list
+ // is returned.
+ //
+ std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
+
+ //
+ // Get statistics of calls to GetAudio().
+ void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+
+ private:
+ struct DecoderInfo {
+ int payload_type;
+ int sample_rate_hz;
+ int num_channels;
+ SdpAudioFormat sdp_format;
+ };
+
+ uint32_t NowInTimestamp(int decoder_sampling_rate) const;
+
+ mutable Mutex mutex_;
+ absl::optional<DecoderInfo> last_decoder_ RTC_GUARDED_BY(mutex_);
+ ACMResampler resampler_;
+
+ // After construction, this is only ever touched on the thread that calls
+ // AcmReceiver::GetAudio, and only modified in this method.
+ std::unique_ptr<int16_t[]> last_audio_buffer_;
+ CallStatistics call_stats_;
+ const std::unique_ptr<NetEq> neteq_; // NetEq is thread-safe; no lock needed.
+ Clock* const clock_;
+ std::atomic<bool> resampled_last_output_frame_;
+ std::atomic<int> last_audio_format_clockrate_hz_;
+};
+
+} // namespace acm2
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc
new file mode 100644
index 0000000000..e73acc2338
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_receiver.h"
+
+#include <algorithm> // std::min
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+class AcmReceiverTestOldApi : public AudioPacketizationCallback,
+ public ::testing::Test {
+ protected:
+ AcmReceiverTestOldApi()
+ : timestamp_(0),
+ packet_sent_(false),
+ last_packet_send_timestamp_(timestamp_),
+ last_frame_type_(AudioFrameType::kEmptyFrame) {
+ config_.decoder_factory = decoder_factory_;
+ }
+
+ ~AcmReceiverTestOldApi() {}
+
+ void SetUp() override {
+ acm_.reset(AudioCodingModule::Create(config_));
+ receiver_.reset(new AcmReceiver(config_));
+ ASSERT_TRUE(receiver_.get() != NULL);
+ ASSERT_TRUE(acm_.get() != NULL);
+ acm_->InitializeReceiver();
+ acm_->RegisterTransportCallback(this);
+
+ rtp_header_.sequenceNumber = 0;
+ rtp_header_.timestamp = 0;
+ rtp_header_.markerBit = false;
+ rtp_header_.ssrc = 0x12345678; // Arbitrary.
+ rtp_header_.numCSRCs = 0;
+ rtp_header_.payloadType = 0;
+ }
+
+ void TearDown() override {}
+
+ AudioCodecInfo SetEncoder(int payload_type,
+ const SdpAudioFormat& format,
+ const std::map<int, int> cng_payload_types = {}) {
+ // Create the speech encoder.
+ AudioCodecInfo info = encoder_factory_->QueryAudioEncoder(format).value();
+ std::unique_ptr<AudioEncoder> enc =
+ encoder_factory_->MakeAudioEncoder(payload_type, format, absl::nullopt);
+
+ // If we have a compatible CN specification, stack a CNG on top.
+ auto it = cng_payload_types.find(info.sample_rate_hz);
+ if (it != cng_payload_types.end()) {
+ AudioEncoderCngConfig config;
+ config.speech_encoder = std::move(enc);
+ config.num_channels = 1;
+ config.payload_type = it->second;
+ config.vad_mode = Vad::kVadNormal;
+ enc = CreateComfortNoiseEncoder(std::move(config));
+ }
+
+ // Actually start using the new encoder.
+ acm_->SetEncoder(std::move(enc));
+ return info;
+ }
+
+ int InsertOnePacketOfSilence(const AudioCodecInfo& info) {
+ // Frame setup according to the codec.
+ AudioFrame frame;
+ frame.sample_rate_hz_ = info.sample_rate_hz;
+ frame.samples_per_channel_ = info.sample_rate_hz / 100; // 10 ms.
+ frame.num_channels_ = info.num_channels;
+ frame.Mute();
+ packet_sent_ = false;
+ last_packet_send_timestamp_ = timestamp_;
+ int num_10ms_frames = 0;
+ while (!packet_sent_) {
+ frame.timestamp_ = timestamp_;
+ timestamp_ += rtc::checked_cast<uint32_t>(frame.samples_per_channel_);
+ EXPECT_GE(acm_->Add10MsData(frame), 0);
+ ++num_10ms_frames;
+ }
+ return num_10ms_frames;
+ }
+
+ int SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) override {
+ if (frame_type == AudioFrameType::kEmptyFrame)
+ return 0;
+
+ rtp_header_.payloadType = payload_type;
+ rtp_header_.timestamp = timestamp;
+
+ int ret_val = receiver_->InsertPacket(
+ rtp_header_,
+ rtc::ArrayView<const uint8_t>(payload_data, payload_len_bytes));
+ if (ret_val < 0) {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ rtp_header_.sequenceNumber++;
+ packet_sent_ = true;
+ last_frame_type_ = frame_type;
+ return 0;
+ }
+
+ const rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_ =
+ CreateBuiltinAudioEncoderFactory();
+ const rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_ =
+ CreateBuiltinAudioDecoderFactory();
+ AudioCodingModule::Config config_;
+ std::unique_ptr<AcmReceiver> receiver_;
+ std::unique_ptr<AudioCodingModule> acm_;
+ RTPHeader rtp_header_;
+ uint32_t timestamp_;
+ bool packet_sent_; // Set when SendData is called reset when inserting audio.
+ uint32_t last_packet_send_timestamp_;
+ AudioFrameType last_frame_type_;
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SampleRate DISABLED_SampleRate
+#else
+#define MAYBE_SampleRate SampleRate
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_SampleRate) {
+ const std::map<int, SdpAudioFormat> codecs = {{0, {"ISAC", 16000, 1}},
+ {1, {"ISAC", 32000, 1}}};
+ receiver_->SetCodecs(codecs);
+
+ constexpr int kOutSampleRateHz = 8000; // Different than codec sample rate.
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ const int payload_type = rtc::checked_cast<int>(i);
+ const int num_10ms_frames =
+ InsertOnePacketOfSilence(SetEncoder(payload_type, codecs.at(i)));
+ for (int k = 0; k < num_10ms_frames; ++k) {
+ AudioFrame frame;
+ bool muted;
+ EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame, &muted));
+ }
+ EXPECT_EQ(encoder_factory_->QueryAudioEncoder(codecs.at(i))->sample_rate_hz,
+ receiver_->last_output_sample_rate_hz());
+ }
+}
+
+class AcmReceiverTestFaxModeOldApi : public AcmReceiverTestOldApi {
+ protected:
+ AcmReceiverTestFaxModeOldApi() {
+ config_.neteq_config.for_test_no_time_stretching = true;
+ }
+
+ void RunVerifyAudioFrame(const SdpAudioFormat& codec) {
+ // Make sure "fax mode" is enabled. This will avoid delay changes unless the
+ // packet-loss concealment is made. We do this in order to make the
+ // timestamp increments predictable; in normal mode, NetEq may decide to do
+ // accelerate or pre-emptive expand operations after some time, offsetting
+ // the timestamp.
+ EXPECT_TRUE(config_.neteq_config.for_test_no_time_stretching);
+
+ constexpr int payload_type = 17;
+ receiver_->SetCodecs({{payload_type, codec}});
+
+ const AudioCodecInfo info = SetEncoder(payload_type, codec);
+ const int output_sample_rate_hz = info.sample_rate_hz;
+ const size_t output_channels = info.num_channels;
+ const size_t samples_per_ms = rtc::checked_cast<size_t>(
+ rtc::CheckedDivExact(output_sample_rate_hz, 1000));
+ const AudioFrame::VADActivity expected_vad_activity =
+ output_sample_rate_hz > 16000 ? AudioFrame::kVadActive
+ : AudioFrame::kVadPassive;
+
+ // Expect the first output timestamp to be 5*fs/8000 samples before the
+ // first inserted timestamp (because of NetEq's look-ahead). (This value is
+ // defined in Expand::overlap_length_.)
+ uint32_t expected_output_ts =
+ last_packet_send_timestamp_ -
+ rtc::CheckedDivExact(5 * output_sample_rate_hz, 8000);
+
+ AudioFrame frame;
+ bool muted;
+ EXPECT_EQ(0, receiver_->GetAudio(output_sample_rate_hz, &frame, &muted));
+ // Expect timestamp = 0 before first packet is inserted.
+ EXPECT_EQ(0u, frame.timestamp_);
+ for (int i = 0; i < 5; ++i) {
+ const int num_10ms_frames = InsertOnePacketOfSilence(info);
+ for (int k = 0; k < num_10ms_frames; ++k) {
+ EXPECT_EQ(0,
+ receiver_->GetAudio(output_sample_rate_hz, &frame, &muted));
+ EXPECT_EQ(expected_output_ts, frame.timestamp_);
+ expected_output_ts += rtc::checked_cast<uint32_t>(10 * samples_per_ms);
+ EXPECT_EQ(10 * samples_per_ms, frame.samples_per_channel_);
+ EXPECT_EQ(output_sample_rate_hz, frame.sample_rate_hz_);
+ EXPECT_EQ(output_channels, frame.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, frame.speech_type_);
+ EXPECT_EQ(expected_vad_activity, frame.vad_activity_);
+ EXPECT_FALSE(muted);
+ }
+ }
+ }
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_VerifyAudioFramePCMU DISABLED_VerifyAudioFramePCMU
+#else
+#define MAYBE_VerifyAudioFramePCMU VerifyAudioFramePCMU
+#endif
+TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFramePCMU) {
+ RunVerifyAudioFrame({"PCMU", 8000, 1});
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_VerifyAudioFrameISAC DISABLED_VerifyAudioFrameISAC
+#else
+#define MAYBE_VerifyAudioFrameISAC VerifyAudioFrameISAC
+#endif
+TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFrameISAC) {
+ RunVerifyAudioFrame({"ISAC", 16000, 1});
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_VerifyAudioFrameOpus DISABLED_VerifyAudioFrameOpus
+#else
+#define MAYBE_VerifyAudioFrameOpus VerifyAudioFrameOpus
+#endif
+TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFrameOpus) {
+ RunVerifyAudioFrame({"opus", 48000, 2});
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
+#else
+#define MAYBE_PostdecodingVad PostdecodingVad
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_PostdecodingVad) {
+ EXPECT_TRUE(config_.neteq_config.enable_post_decode_vad);
+ constexpr int payload_type = 34;
+ const SdpAudioFormat codec = {"L16", 16000, 1};
+ const AudioCodecInfo info = SetEncoder(payload_type, codec);
+ receiver_->SetCodecs({{payload_type, codec}});
+ constexpr int kNumPackets = 5;
+ AudioFrame frame;
+ for (int n = 0; n < kNumPackets; ++n) {
+ const int num_10ms_frames = InsertOnePacketOfSilence(info);
+ for (int k = 0; k < num_10ms_frames; ++k) {
+ bool muted;
+ ASSERT_EQ(0, receiver_->GetAudio(info.sample_rate_hz, &frame, &muted));
+ }
+ }
+ EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
+}
+
+class AcmReceiverTestPostDecodeVadPassiveOldApi : public AcmReceiverTestOldApi {
+ protected:
+ AcmReceiverTestPostDecodeVadPassiveOldApi() {
+ config_.neteq_config.enable_post_decode_vad = false;
+ }
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
+#else
+#define MAYBE_PostdecodingVad PostdecodingVad
+#endif
+TEST_F(AcmReceiverTestPostDecodeVadPassiveOldApi, MAYBE_PostdecodingVad) {
+ EXPECT_FALSE(config_.neteq_config.enable_post_decode_vad);
+ constexpr int payload_type = 34;
+ const SdpAudioFormat codec = {"L16", 16000, 1};
+ const AudioCodecInfo info = SetEncoder(payload_type, codec);
+ auto const value = encoder_factory_->QueryAudioEncoder(codec);
+ ASSERT_TRUE(value.has_value());
+ receiver_->SetCodecs({{payload_type, codec}});
+ const int kNumPackets = 5;
+ AudioFrame frame;
+ for (int n = 0; n < kNumPackets; ++n) {
+ const int num_10ms_frames = InsertOnePacketOfSilence(info);
+ for (int k = 0; k < num_10ms_frames; ++k) {
+ bool muted;
+ ASSERT_EQ(0, receiver_->GetAudio(info.sample_rate_hz, &frame, &muted));
+ }
+ }
+ EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LastAudioCodec DISABLED_LastAudioCodec
+#else
+#define MAYBE_LastAudioCodec LastAudioCodec
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
+ const std::map<int, SdpAudioFormat> codecs = {{0, {"ISAC", 16000, 1}},
+ {1, {"PCMA", 8000, 1}},
+ {2, {"ISAC", 32000, 1}},
+ {3, {"L16", 32000, 1}}};
+ const std::map<int, int> cng_payload_types = {
+ {8000, 100}, {16000, 101}, {32000, 102}};
+ {
+ std::map<int, SdpAudioFormat> receive_codecs = codecs;
+ for (const auto& cng_type : cng_payload_types) {
+ receive_codecs.emplace(std::make_pair(
+ cng_type.second, SdpAudioFormat("CN", cng_type.first, 1)));
+ }
+ receiver_->SetCodecs(receive_codecs);
+ }
+
+ // No audio payload is received.
+ EXPECT_EQ(absl::nullopt, receiver_->LastDecoder());
+
+ // Start with sending DTX.
+ packet_sent_ = false;
+ InsertOnePacketOfSilence(
+ SetEncoder(0, codecs.at(0), cng_payload_types)); // Enough to test
+ // with one codec.
+ ASSERT_TRUE(packet_sent_);
+ EXPECT_EQ(AudioFrameType::kAudioFrameCN, last_frame_type_);
+
+ // Has received, only, DTX. Last Audio codec is undefined.
+ EXPECT_EQ(absl::nullopt, receiver_->LastDecoder());
+ EXPECT_EQ(absl::nullopt, receiver_->last_packet_sample_rate_hz());
+
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ // Set DTX off to send audio payload.
+ packet_sent_ = false;
+ const int payload_type = rtc::checked_cast<int>(i);
+ const AudioCodecInfo info_without_cng =
+ SetEncoder(payload_type, codecs.at(i));
+ InsertOnePacketOfSilence(info_without_cng);
+
+ // Sanity check if Actually an audio payload received, and it should be
+ // of type "speech."
+ ASSERT_TRUE(packet_sent_);
+ ASSERT_EQ(AudioFrameType::kAudioFrameSpeech, last_frame_type_);
+ EXPECT_EQ(info_without_cng.sample_rate_hz,
+ receiver_->last_packet_sample_rate_hz());
+
+ // Set VAD on to send DTX. Then check if the "Last Audio codec" returns
+ // the expected codec. Encode repeatedly until a DTX is sent.
+ const AudioCodecInfo info_with_cng =
+ SetEncoder(payload_type, codecs.at(i), cng_payload_types);
+ while (last_frame_type_ != AudioFrameType::kAudioFrameCN) {
+ packet_sent_ = false;
+ InsertOnePacketOfSilence(info_with_cng);
+ ASSERT_TRUE(packet_sent_);
+ }
+ EXPECT_EQ(info_with_cng.sample_rate_hz,
+ receiver_->last_packet_sample_rate_hz());
+ EXPECT_EQ(codecs.at(i), receiver_->LastDecoder()->second);
+ }
+}
+#endif
+
+// Check if the statistics are initialized correctly. Before any call to ACM
+// all fields have to be zero.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_InitializedToZero DISABLED_InitializedToZero
+#else
+#define MAYBE_InitializedToZero InitializedToZero
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_InitializedToZero) {
+ AudioDecodingCallStats stats;
+ receiver_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(0, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(0, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(0, stats.decoded_neteq_plc);
+ EXPECT_EQ(0, stats.decoded_plc_cng);
+ EXPECT_EQ(0, stats.decoded_muted_output);
+}
+
+// Insert some packets and pull audio. Check statistics are valid. Then,
+// simulate packet loss and check if PLC and PLC-to-CNG statistics are
+// correctly updated.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_NetEqCalls DISABLED_NetEqCalls
+#else
+#define MAYBE_NetEqCalls NetEqCalls
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_NetEqCalls) {
+ AudioDecodingCallStats stats;
+ const int kNumNormalCalls = 10;
+ const int kSampleRateHz = 16000;
+ const int kNumSamples10ms = kSampleRateHz / 100;
+ const int kFrameSizeMs = 10; // Multiple of 10.
+ const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
+ const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
+ const uint8_t kPayloadType = 111;
+ RTPHeader rtp_header;
+ AudioFrame audio_frame;
+ bool muted;
+
+ receiver_->SetCodecs(
+ {{kPayloadType, SdpAudioFormat("L16", kSampleRateHz, 1)}});
+ rtp_header.sequenceNumber = 0xABCD;
+ rtp_header.timestamp = 0xABCDEF01;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.markerBit = false;
+ rtp_header.ssrc = 0x1234;
+ rtp_header.numCSRCs = 0;
+ rtp_header.payload_type_frequency = kSampleRateHz;
+
+ for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
+ const uint8_t kPayload[kPayloadSizeBytes] = {0};
+ ASSERT_EQ(0, receiver_->InsertPacket(rtp_header, kPayload));
+ ++rtp_header.sequenceNumber;
+ rtp_header.timestamp += kFrameSizeSamples;
+ ASSERT_EQ(0, receiver_->GetAudio(-1, &audio_frame, &muted));
+ EXPECT_FALSE(muted);
+ }
+ receiver_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(0, stats.decoded_neteq_plc);
+ EXPECT_EQ(0, stats.decoded_plc_cng);
+ EXPECT_EQ(0, stats.decoded_muted_output);
+
+ const int kNumPlc = 3;
+ const int kNumPlcCng = 5;
+
+ // Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
+ for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
+ ASSERT_EQ(0, receiver_->GetAudio(-1, &audio_frame, &muted));
+ EXPECT_FALSE(muted);
+ }
+ receiver_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(kNumPlc, stats.decoded_neteq_plc);
+ EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
+ EXPECT_EQ(0, stats.decoded_muted_output);
+ // TODO(henrik.lundin) Add a test with muted state enabled.
+}
+
+} // namespace acm2
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.cc
new file mode 100644
index 0000000000..13709dbbee
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_remixing.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void DownMixFrame(const AudioFrame& input, rtc::ArrayView<int16_t> output) {
+ RTC_DCHECK_EQ(input.num_channels_, 2);
+ RTC_DCHECK_EQ(output.size(), input.samples_per_channel_);
+
+ if (input.muted()) {
+ std::fill(output.begin(), output.begin() + input.samples_per_channel_, 0);
+ } else {
+ const int16_t* const input_data = input.data();
+ for (size_t n = 0; n < input.samples_per_channel_; ++n) {
+ output[n] = rtc::dchecked_cast<int16_t>(
+ (int32_t{input_data[2 * n]} + int32_t{input_data[2 * n + 1]}) >> 1);
+ }
+ }
+}
+
+void ReMixFrame(const AudioFrame& input,
+ size_t num_output_channels,
+ std::vector<int16_t>* output) {
+ const size_t output_size = num_output_channels * input.samples_per_channel_;
+ RTC_DCHECK(!(input.num_channels_ == 0 && num_output_channels > 0 &&
+ input.samples_per_channel_ > 0));
+
+ if (output->size() != output_size) {
+ output->resize(output_size);
+ }
+
+ // For muted frames, fill the frame with zeros.
+ if (input.muted()) {
+ std::fill(output->begin(), output->end(), 0);
+ return;
+ }
+
+ // Ensure that the special case of zero input channels is handled correctly
+ // (zero samples per channel is already handled correctly in the code below).
+ if (input.num_channels_ == 0) {
+ return;
+ }
+
+ const int16_t* const input_data = input.data();
+ size_t out_index = 0;
+
+ // When upmixing is needed and the input is mono copy the left channel
+ // into the left and right channels, and set any remaining channels to zero.
+ if (input.num_channels_ == 1 && input.num_channels_ < num_output_channels) {
+ for (size_t k = 0; k < input.samples_per_channel_; ++k) {
+ (*output)[out_index++] = input_data[k];
+ (*output)[out_index++] = input_data[k];
+ for (size_t j = 2; j < num_output_channels; ++j) {
+ (*output)[out_index++] = 0;
+ }
+ RTC_DCHECK_EQ(out_index, (k + 1) * num_output_channels);
+ }
+ RTC_DCHECK_EQ(out_index, input.samples_per_channel_ * num_output_channels);
+ return;
+ }
+
+ size_t in_index = 0;
+
+ // When upmixing is needed and the output is surround, copy the available
+ // channels directly, and set the remaining channels to zero.
+ if (input.num_channels_ < num_output_channels) {
+ for (size_t k = 0; k < input.samples_per_channel_; ++k) {
+ for (size_t j = 0; j < input.num_channels_; ++j) {
+ (*output)[out_index++] = input_data[in_index++];
+ }
+ for (size_t j = input.num_channels_; j < num_output_channels; ++j) {
+ (*output)[out_index++] = 0;
+ }
+ RTC_DCHECK_EQ(in_index, (k + 1) * input.num_channels_);
+ RTC_DCHECK_EQ(out_index, (k + 1) * num_output_channels);
+ }
+ RTC_DCHECK_EQ(in_index, input.samples_per_channel_ * input.num_channels_);
+ RTC_DCHECK_EQ(out_index, input.samples_per_channel_ * num_output_channels);
+
+ return;
+ }
+
+ // When downmixing is needed, and the input is stereo, average the channels.
+ if (input.num_channels_ == 2) {
+ for (size_t n = 0; n < input.samples_per_channel_; ++n) {
+ (*output)[n] = rtc::dchecked_cast<int16_t>(
+ (int32_t{input_data[2 * n]} + int32_t{input_data[2 * n + 1]}) >> 1);
+ }
+ return;
+ }
+
+ // When downmixing is needed, and the input is multichannel, drop the surplus
+ // channels.
+ const size_t num_channels_to_drop = input.num_channels_ - num_output_channels;
+ for (size_t k = 0; k < input.samples_per_channel_; ++k) {
+ for (size_t j = 0; j < num_output_channels; ++j) {
+ (*output)[out_index++] = input_data[in_index++];
+ }
+ in_index += num_channels_to_drop;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.h b/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.h
new file mode 100644
index 0000000000..661569b033
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_REMIXING_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_REMIXING_H_
+
+#include <vector>
+
+#include "api/audio/audio_frame.h"
+
+namespace webrtc {
+
+// Stereo-to-mono downmixing. The length of the output must equal to the number
+// of samples per channel in the input.
+void DownMixFrame(const AudioFrame& input, rtc::ArrayView<int16_t> output);
+
+// Remixes the interleaved input frame to an interleaved output data vector. The
+// remixed data replaces the data in the output vector which is resized if
+// needed. The remixing supports any combination of input and output channels,
+// as well as any number of samples per channel.
+void ReMixFrame(const AudioFrame& input,
+ size_t num_output_channels,
+ std::vector<int16_t>* output);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_ACM2_ACM_REMIXING_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing_unittest.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing_unittest.cc
new file mode 100644
index 0000000000..a1a816f727
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing_unittest.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_remixing.h"
+
+#include <vector>
+
+#include "api/audio/audio_frame.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+using ::testing::AllOf;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::SizeIs;
+
+namespace webrtc {
+
+TEST(AcmRemixing, DownMixFrame) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 2;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[2 * k] = 2;
+ in_data[2 * k + 1] = 0;
+ }
+
+ DownMixFrame(in, out);
+
+ EXPECT_THAT(out, AllOf(SizeIs(480), Each(1)));
+}
+
+TEST(AcmRemixing, DownMixMutedFrame) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 2;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[2 * k] = 2;
+ in_data[2 * k + 1] = 0;
+ }
+
+ in.Mute();
+
+ DownMixFrame(in, out);
+
+ EXPECT_THAT(out, AllOf(SizeIs(480), Each(0)));
+}
+
+TEST(AcmRemixing, RemixMutedStereoFrameTo6Channels) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 2;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[2 * k] = 1;
+ in_data[2 * k + 1] = 2;
+ }
+ in.Mute();
+
+ ReMixFrame(in, 6, &out);
+ EXPECT_EQ(6 * 480u, out.size());
+
+ EXPECT_THAT(out, AllOf(SizeIs(in.samples_per_channel_ * 6), Each(0)));
+}
+
+TEST(AcmRemixing, RemixStereoFrameTo6Channels) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 2;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[2 * k] = 1;
+ in_data[2 * k + 1] = 2;
+ }
+
+ ReMixFrame(in, 6, &out);
+ EXPECT_EQ(6 * 480u, out.size());
+
+ std::vector<int16_t> expected_output(in.samples_per_channel_ * 6);
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ expected_output[6 * k] = 1;
+ expected_output[6 * k + 1] = 2;
+ }
+
+ EXPECT_THAT(out, ElementsAreArray(expected_output));
+}
+
+TEST(AcmRemixing, RemixMonoFrameTo6Channels) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 1;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[k] = 1;
+ }
+
+ ReMixFrame(in, 6, &out);
+ EXPECT_EQ(6 * 480u, out.size());
+
+ std::vector<int16_t> expected_output(in.samples_per_channel_ * 6, 0);
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ expected_output[6 * k] = 1;
+ expected_output[6 * k + 1] = 1;
+ }
+
+ EXPECT_THAT(out, ElementsAreArray(expected_output));
+}
+
+TEST(AcmRemixing, RemixStereoFrameToMono) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 2;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[2 * k] = 2;
+ in_data[2 * k + 1] = 0;
+ }
+
+ ReMixFrame(in, 1, &out);
+ EXPECT_EQ(480u, out.size());
+
+ EXPECT_THAT(out, AllOf(SizeIs(in.samples_per_channel_), Each(1)));
+}
+
+TEST(AcmRemixing, RemixMonoFrameToStereo) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 1;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ in_data[k] = 1;
+ }
+
+ ReMixFrame(in, 2, &out);
+ EXPECT_EQ(960u, out.size());
+
+ EXPECT_THAT(out, AllOf(SizeIs(2 * in.samples_per_channel_), Each(1)));
+}
+
+TEST(AcmRemixing, Remix3ChannelFrameToStereo) {
+ std::vector<int16_t> out(480, 0);
+ AudioFrame in;
+ in.num_channels_ = 3;
+ in.samples_per_channel_ = 480;
+
+ int16_t* const in_data = in.mutable_data();
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ for (size_t j = 0; j < 3; ++j) {
+ in_data[3 * k + j] = j;
+ }
+ }
+
+ ReMixFrame(in, 2, &out);
+ EXPECT_EQ(2 * 480u, out.size());
+
+ std::vector<int16_t> expected_output(in.samples_per_channel_ * 2);
+ for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+ for (size_t j = 0; j < 2; ++j) {
+ expected_output[2 * k + j] = static_cast<int>(j);
+ }
+ }
+
+ EXPECT_THAT(out, ElementsAreArray(expected_output));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.cc
new file mode 100644
index 0000000000..e307c6ca57
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_resampler.h"
+
+#include <string.h>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace acm2 {
+
+ACMResampler::ACMResampler() {}
+
+ACMResampler::~ACMResampler() {}
+
+int ACMResampler::Resample10Msec(const int16_t* in_audio,
+ int in_freq_hz,
+ int out_freq_hz,
+ size_t num_audio_channels,
+ size_t out_capacity_samples,
+ int16_t* out_audio) {
+ size_t in_length = in_freq_hz * num_audio_channels / 100;
+ if (in_freq_hz == out_freq_hz) {
+ if (out_capacity_samples < in_length) {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
+ return static_cast<int>(in_length / num_audio_channels);
+ }
+
+ if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
+ num_audio_channels) != 0) {
+ RTC_LOG(LS_ERROR) << "InitializeIfNeeded(" << in_freq_hz << ", "
+ << out_freq_hz << ", " << num_audio_channels
+ << ") failed.";
+ return -1;
+ }
+
+ int out_length =
+ resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
+ if (out_length == -1) {
+ RTC_LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", "
+ << out_audio << ", " << out_capacity_samples
+ << ") failed.";
+ return -1;
+ }
+
+ return static_cast<int>(out_length / num_audio_channels);
+}
+
+} // namespace acm2
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.h b/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.h
new file mode 100644
index 0000000000..96ba93a762
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "common_audio/resampler/include/push_resampler.h"
+
+namespace webrtc {
+namespace acm2 {
+
+class ACMResampler {
+ public:
+ ACMResampler();
+ ~ACMResampler();
+
+ int Resample10Msec(const int16_t* in_audio,
+ int in_freq_hz,
+ int out_freq_hz,
+ size_t num_audio_channels,
+ size_t out_capacity_samples,
+ int16_t* out_audio);
+
+ private:
+ PushResampler<int16_t> resampler_;
+};
+
+} // namespace acm2
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.cc
new file mode 100644
index 0000000000..3e65f94b0d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.cc
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_send_test.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/string_encode.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
+ int source_rate_hz,
+ int test_duration_ms)
+ : clock_(0),
+ acm_(webrtc::AudioCodingModule::Create([this] {
+ AudioCodingModule::Config config;
+ config.clock = &clock_;
+ config.decoder_factory = CreateBuiltinAudioDecoderFactory();
+ return config;
+ }())),
+ audio_source_(audio_source),
+ source_rate_hz_(source_rate_hz),
+ input_block_size_samples_(
+ static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
+ codec_registered_(false),
+ test_duration_ms_(test_duration_ms),
+ frame_type_(AudioFrameType::kAudioFrameSpeech),
+ payload_type_(0),
+ timestamp_(0),
+ sequence_number_(0) {
+ input_frame_.sample_rate_hz_ = source_rate_hz_;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = input_block_size_samples_;
+ RTC_DCHECK_LE(input_block_size_samples_ * input_frame_.num_channels_,
+ AudioFrame::kMaxDataSizeSamples);
+ acm_->RegisterTransportCallback(this);
+}
+
+AcmSendTestOldApi::~AcmSendTestOldApi() = default;
+
+bool AcmSendTestOldApi::RegisterCodec(absl::string_view payload_name,
+ int clockrate_hz,
+ int num_channels,
+ int payload_type,
+ int frame_size_samples) {
+ SdpAudioFormat format(payload_name, clockrate_hz, num_channels);
+ if (absl::EqualsIgnoreCase(payload_name, "g722")) {
+ RTC_CHECK_EQ(16000, clockrate_hz);
+ format.clockrate_hz = 8000;
+ } else if (absl::EqualsIgnoreCase(payload_name, "opus")) {
+ RTC_CHECK(num_channels == 1 || num_channels == 2);
+ if (num_channels == 2) {
+ format.parameters["stereo"] = "1";
+ }
+ format.num_channels = 2;
+ }
+ format.parameters["ptime"] = rtc::ToString(rtc::CheckedDivExact(
+ frame_size_samples, rtc::CheckedDivExact(clockrate_hz, 1000)));
+ auto factory = CreateBuiltinAudioEncoderFactory();
+ acm_->SetEncoder(
+ factory->MakeAudioEncoder(payload_type, format, absl::nullopt));
+ codec_registered_ = true;
+ input_frame_.num_channels_ = num_channels;
+ RTC_DCHECK_LE(input_block_size_samples_ * input_frame_.num_channels_,
+ AudioFrame::kMaxDataSizeSamples);
+ return codec_registered_;
+}
+
+void AcmSendTestOldApi::RegisterExternalCodec(
+ std::unique_ptr<AudioEncoder> external_speech_encoder) {
+ input_frame_.num_channels_ = external_speech_encoder->NumChannels();
+ acm_->SetEncoder(std::move(external_speech_encoder));
+ RTC_DCHECK_LE(input_block_size_samples_ * input_frame_.num_channels_,
+ AudioFrame::kMaxDataSizeSamples);
+ codec_registered_ = true;
+}
+
+std::unique_ptr<Packet> AcmSendTestOldApi::NextPacket() {
+ RTC_DCHECK(codec_registered_);
+ if (filter_.test(static_cast<size_t>(payload_type_))) {
+ // This payload type should be filtered out. Since the payload type is the
+ // same throughout the whole test run, no packet at all will be delivered.
+ // We can just as well signal that the test is over by returning NULL.
+ return nullptr;
+ }
+ // Insert audio and process until one packet is produced.
+ while (clock_.TimeInMilliseconds() < test_duration_ms_) {
+ clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
+ RTC_CHECK(audio_source_->Read(
+ input_block_size_samples_ * input_frame_.num_channels_,
+ input_frame_.mutable_data()));
+ data_to_send_ = false;
+ RTC_CHECK_GE(acm_->Add10MsData(input_frame_), 0);
+ input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
+ if (data_to_send_) {
+ // Encoded packet received.
+ return CreatePacket();
+ }
+ }
+ // Test ended.
+ return nullptr;
+}
+
+// This method receives the callback from ACM when a new packet is produced.
+int32_t AcmSendTestOldApi::SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) {
+ // Store the packet locally.
+ frame_type_ = frame_type;
+ payload_type_ = payload_type;
+ timestamp_ = timestamp;
+ last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+ RTC_DCHECK_EQ(last_payload_vec_.size(), payload_len_bytes);
+ data_to_send_ = true;
+ return 0;
+}
+
+std::unique_ptr<Packet> AcmSendTestOldApi::CreatePacket() {
+ const size_t kRtpHeaderSize = 12;
+ rtc::CopyOnWriteBuffer packet_buffer(last_payload_vec_.size() +
+ kRtpHeaderSize);
+ uint8_t* packet_memory = packet_buffer.MutableData();
+ // Populate the header bytes.
+ packet_memory[0] = 0x80;
+ packet_memory[1] = static_cast<uint8_t>(payload_type_);
+ packet_memory[2] = (sequence_number_ >> 8) & 0xFF;
+ packet_memory[3] = (sequence_number_)&0xFF;
+ packet_memory[4] = (timestamp_ >> 24) & 0xFF;
+ packet_memory[5] = (timestamp_ >> 16) & 0xFF;
+ packet_memory[6] = (timestamp_ >> 8) & 0xFF;
+ packet_memory[7] = timestamp_ & 0xFF;
+ // Set SSRC to 0x12345678.
+ packet_memory[8] = 0x12;
+ packet_memory[9] = 0x34;
+ packet_memory[10] = 0x56;
+ packet_memory[11] = 0x78;
+
+ ++sequence_number_;
+
+ // Copy the payload data.
+ memcpy(packet_memory + kRtpHeaderSize, &last_payload_vec_[0],
+ last_payload_vec_.size());
+ auto packet = std::make_unique<Packet>(std::move(packet_buffer),
+ clock_.TimeInMilliseconds());
+ RTC_DCHECK(packet);
+ RTC_DCHECK(packet->valid_header());
+ return packet;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.h b/third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.h
new file mode 100644
index 0000000000..0bd24705fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_send_test.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/audio/audio_frame.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+class AudioEncoder;
+
+namespace test {
+class InputAudioFile;
+class Packet;
+
+class AcmSendTestOldApi : public AudioPacketizationCallback,
+ public PacketSource {
+ public:
+ AcmSendTestOldApi(InputAudioFile* audio_source,
+ int source_rate_hz,
+ int test_duration_ms);
+ ~AcmSendTestOldApi() override;
+
+ AcmSendTestOldApi(const AcmSendTestOldApi&) = delete;
+ AcmSendTestOldApi& operator=(const AcmSendTestOldApi&) = delete;
+
+ // Registers the send codec. Returns true on success, false otherwise.
+ bool RegisterCodec(absl::string_view payload_name,
+ int sampling_freq_hz,
+ int channels,
+ int payload_type,
+ int frame_size_samples);
+
+ // Registers an external send codec.
+ void RegisterExternalCodec(
+ std::unique_ptr<AudioEncoder> external_speech_encoder);
+
+ // Inherited from PacketSource.
+ std::unique_ptr<Packet> NextPacket() override;
+
+ // Inherited from AudioPacketizationCallback.
+ int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) override;
+
+ AudioCodingModule* acm() { return acm_.get(); }
+
+ private:
+ static const int kBlockSizeMs = 10;
+
+ // Creates a Packet object from the last packet produced by ACM (and received
+ // through the SendData method as a callback).
+ std::unique_ptr<Packet> CreatePacket();
+
+ SimulatedClock clock_;
+ std::unique_ptr<AudioCodingModule> acm_;
+ InputAudioFile* audio_source_;
+ int source_rate_hz_;
+ const size_t input_block_size_samples_;
+ AudioFrame input_frame_;
+ bool codec_registered_;
+ int test_duration_ms_;
+ // The following member variables are set whenever SendData() is called.
+ AudioFrameType frame_type_;
+ int payload_type_;
+ uint32_t timestamp_;
+ uint16_t sequence_number_;
+ std::vector<uint8_t> last_payload_vec_;
+ bool data_to_send_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module.cc b/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module.cc
new file mode 100644
index 0000000000..81d40e5e53
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module.cc
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_coding/acm2/acm_receiver.h"
+#include "modules/audio_coding/acm2/acm_remixing.h"
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/include/module_common_types.h"
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+// Initial size for the buffer in InputBuffer. This matches 6 channels of 10 ms
+// 48 kHz data.
+constexpr size_t kInitialInputDataBufferSize = 6 * 480;
+
+constexpr int32_t kMaxInputSampleRateHz = 192000;
+
+class AudioCodingModuleImpl final : public AudioCodingModule {
+ public:
+ explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
+ ~AudioCodingModuleImpl() override;
+
+ /////////////////////////////////////////
+ // Sender
+ //
+
+ void ModifyEncoder(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
+ modifier) override;
+
+ // Register a transport callback which will be
+ // called to deliver the encoded buffers.
+ int RegisterTransportCallback(AudioPacketizationCallback* transport) override;
+
+ // Add 10 ms of raw (PCM) audio data to the encoder.
+ int Add10MsData(const AudioFrame& audio_frame) override;
+
+ /////////////////////////////////////////
+ // (FEC) Forward Error Correction (codec internal)
+ //
+
+ // Set target packet loss rate
+ int SetPacketLossRate(int loss_rate) override;
+
+ /////////////////////////////////////////
+ // Receiver
+ //
+
+ // Initialize receiver, resets codec database etc.
+ int InitializeReceiver() override;
+
+ void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) override;
+
+ int ReceiveSampleRate() const override;
+
+ // Incoming packet from network parsed and ready for decode.
+ int IncomingPacket(const uint8_t* incoming_payload,
+ const size_t payload_length,
+ const RTPHeader& rtp_info) override;
+
+ // Get 10 milliseconds of raw audio data to play out, and
+ // automatic resample to the requested frequency if > 0.
+ int PlayoutData10Ms(int desired_freq_hz,
+ AudioFrame* audio_frame,
+ bool* muted) override;
+
+ /////////////////////////////////////////
+ // Statistics
+ //
+
+ int GetNetworkStatistics(NetworkStatistics* statistics) override;
+
+ ANAStats GetANAStats() const override;
+
+ int GetTargetBitrate() const override;
+
+ private:
+ struct InputData {
+ InputData() : buffer(kInitialInputDataBufferSize) {}
+ uint32_t input_timestamp;
+ const int16_t* audio;
+ size_t length_per_channel;
+ size_t audio_channel;
+ // If a re-mix is required (up or down), this buffer will store a re-mixed
+ // version of the input.
+ std::vector<int16_t> buffer;
+ };
+
+ InputData input_data_ RTC_GUARDED_BY(acm_mutex_);
+
+ // This member class writes values to the named UMA histogram, but only if
+ // the value has changed since the last time (and always for the first call).
+ class ChangeLogger {
+ public:
+ explicit ChangeLogger(absl::string_view histogram_name)
+ : histogram_name_(histogram_name) {}
+ // Logs the new value if it is different from the last logged value, or if
+ // this is the first call.
+ void MaybeLog(int value);
+
+ private:
+ int last_value_ = 0;
+ int first_time_ = true;
+ const std::string histogram_name_;
+ };
+
+ int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
+
+ // TODO(bugs.webrtc.org/10739): change `absolute_capture_timestamp_ms` to
+ // int64_t when it always receives a valid value.
+ int Encode(const InputData& input_data,
+ absl::optional<int64_t> absolute_capture_timestamp_ms)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
+
+ int InitializeReceiverSafe() RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
+
+ bool HaveValidEncoder(absl::string_view caller_name) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
+
+ // Preprocessing of input audio, including resampling and down-mixing if
+ // required, before pushing audio into encoder's buffer.
+ //
+ // in_frame: input audio-frame
+ // ptr_out: pointer to output audio_frame. If no preprocessing is required
+ // `ptr_out` will be pointing to `in_frame`, otherwise pointing to
+ // `preprocess_frame_`.
+ //
+ // Return value:
+ // -1: if encountering an error.
+ // 0: otherwise.
+ int PreprocessToAddData(const AudioFrame& in_frame,
+ const AudioFrame** ptr_out)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
+
+ // Change required states after starting to receive the codec corresponding
+ // to `index`.
+ int UpdateUponReceivingCodec(int index);
+
+ mutable Mutex acm_mutex_;
+ rtc::Buffer encode_buffer_ RTC_GUARDED_BY(acm_mutex_);
+ uint32_t expected_codec_ts_ RTC_GUARDED_BY(acm_mutex_);
+ uint32_t expected_in_ts_ RTC_GUARDED_BY(acm_mutex_);
+ acm2::ACMResampler resampler_ RTC_GUARDED_BY(acm_mutex_);
+ acm2::AcmReceiver receiver_; // AcmReceiver has it's own internal lock.
+ ChangeLogger bitrate_logger_ RTC_GUARDED_BY(acm_mutex_);
+
+ // Current encoder stack, provided by a call to RegisterEncoder.
+ std::unique_ptr<AudioEncoder> encoder_stack_ RTC_GUARDED_BY(acm_mutex_);
+
+ // This is to keep track of CN instances where we can send DTMFs.
+ uint8_t previous_pltype_ RTC_GUARDED_BY(acm_mutex_);
+
+ bool receiver_initialized_ RTC_GUARDED_BY(acm_mutex_);
+
+ AudioFrame preprocess_frame_ RTC_GUARDED_BY(acm_mutex_);
+ bool first_10ms_data_ RTC_GUARDED_BY(acm_mutex_);
+
+ bool first_frame_ RTC_GUARDED_BY(acm_mutex_);
+ uint32_t last_timestamp_ RTC_GUARDED_BY(acm_mutex_);
+ uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(acm_mutex_);
+
+ Mutex callback_mutex_;
+ AudioPacketizationCallback* packetization_callback_
+ RTC_GUARDED_BY(callback_mutex_);
+
+ int codec_histogram_bins_log_[static_cast<size_t>(
+ AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes)];
+ int number_of_consecutive_empty_packets_;
+};
+
+// Adds a codec usage sample to the histogram.
+void UpdateCodecTypeHistogram(size_t codec_type) {
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.Encoder.CodecType", static_cast<int>(codec_type),
+ static_cast<int>(
+ webrtc::AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes));
+}
+
+void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
+ if (value != last_value_ || first_time_) {
+ first_time_ = false;
+ last_value_ = value;
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(histogram_name_, value);
+ }
+}
+
+AudioCodingModuleImpl::AudioCodingModuleImpl(
+ const AudioCodingModule::Config& config)
+ : expected_codec_ts_(0xD87F3F9F),
+ expected_in_ts_(0xD87F3F9F),
+ receiver_(config),
+ bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
+ encoder_stack_(nullptr),
+ previous_pltype_(255),
+ receiver_initialized_(false),
+ first_10ms_data_(false),
+ first_frame_(true),
+ packetization_callback_(NULL),
+ codec_histogram_bins_log_(),
+ number_of_consecutive_empty_packets_(0) {
+ if (InitializeReceiverSafe() < 0) {
+ RTC_LOG(LS_ERROR) << "Cannot initialize receiver";
+ }
+ RTC_LOG(LS_INFO) << "Created";
+}
+
+AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
+
+int32_t AudioCodingModuleImpl::Encode(
+ const InputData& input_data,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) {
+ // TODO(bugs.webrtc.org/10739): add dcheck that
+ // `audio_frame.absolute_capture_timestamp_ms()` always has a value.
+ AudioEncoder::EncodedInfo encoded_info;
+ uint8_t previous_pltype;
+
+ // Check if there is an encoder before.
+ if (!HaveValidEncoder("Process"))
+ return -1;
+
+ if (!first_frame_) {
+ RTC_DCHECK(IsNewerTimestamp(input_data.input_timestamp, last_timestamp_))
+ << "Time should not move backwards";
+ }
+
+ // Scale the timestamp to the codec's RTP timestamp rate.
+ uint32_t rtp_timestamp =
+ first_frame_
+ ? input_data.input_timestamp
+ : last_rtp_timestamp_ +
+ rtc::dchecked_cast<uint32_t>(rtc::CheckedDivExact(
+ int64_t{input_data.input_timestamp - last_timestamp_} *
+ encoder_stack_->RtpTimestampRateHz(),
+ int64_t{encoder_stack_->SampleRateHz()}));
+
+ last_timestamp_ = input_data.input_timestamp;
+ last_rtp_timestamp_ = rtp_timestamp;
+ first_frame_ = false;
+
+ // Clear the buffer before reuse - encoded data will get appended.
+ encode_buffer_.Clear();
+ encoded_info = encoder_stack_->Encode(
+ rtp_timestamp,
+ rtc::ArrayView<const int16_t>(
+ input_data.audio,
+ input_data.audio_channel * input_data.length_per_channel),
+ &encode_buffer_);
+
+ bitrate_logger_.MaybeLog(encoder_stack_->GetTargetBitrate() / 1000);
+ if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
+ // Not enough data.
+ return 0;
+ }
+ previous_pltype = previous_pltype_; // Read it while we have the critsect.
+
+ // Log codec type to histogram once every 500 packets.
+ if (encoded_info.encoded_bytes == 0) {
+ ++number_of_consecutive_empty_packets_;
+ } else {
+ size_t codec_type = static_cast<size_t>(encoded_info.encoder_type);
+ codec_histogram_bins_log_[codec_type] +=
+ number_of_consecutive_empty_packets_ + 1;
+ number_of_consecutive_empty_packets_ = 0;
+ if (codec_histogram_bins_log_[codec_type] >= 500) {
+ codec_histogram_bins_log_[codec_type] -= 500;
+ UpdateCodecTypeHistogram(codec_type);
+ }
+ }
+
+ AudioFrameType frame_type;
+ if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
+ frame_type = AudioFrameType::kEmptyFrame;
+ encoded_info.payload_type = previous_pltype;
+ } else {
+ RTC_DCHECK_GT(encode_buffer_.size(), 0);
+ frame_type = encoded_info.speech ? AudioFrameType::kAudioFrameSpeech
+ : AudioFrameType::kAudioFrameCN;
+ }
+
+ {
+ MutexLock lock(&callback_mutex_);
+ if (packetization_callback_) {
+ packetization_callback_->SendData(
+ frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
+ encode_buffer_.data(), encode_buffer_.size(),
+ absolute_capture_timestamp_ms.value_or(-1));
+ }
+ }
+ previous_pltype_ = encoded_info.payload_type;
+ return static_cast<int32_t>(encode_buffer_.size());
+}
+
+/////////////////////////////////////////
+// Sender
+//
+
+void AudioCodingModuleImpl::ModifyEncoder(
+ rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+ MutexLock lock(&acm_mutex_);
+ modifier(&encoder_stack_);
+}
+
+// Register a transport callback which will be called to deliver
+// the encoded buffers.
+int AudioCodingModuleImpl::RegisterTransportCallback(
+ AudioPacketizationCallback* transport) {
+ MutexLock lock(&callback_mutex_);
+ packetization_callback_ = transport;
+ return 0;
+}
+
+// Add 10MS of raw (PCM) audio data to the encoder.
+int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) {
+ MutexLock lock(&acm_mutex_);
+ int r = Add10MsDataInternal(audio_frame, &input_data_);
+ // TODO(bugs.webrtc.org/10739): add dcheck that
+ // `audio_frame.absolute_capture_timestamp_ms()` always has a value.
+ return r < 0
+ ? r
+ : Encode(input_data_, audio_frame.absolute_capture_timestamp_ms());
+}
+
+int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
+ InputData* input_data) {
+ if (audio_frame.samples_per_channel_ == 0) {
+ RTC_DCHECK_NOTREACHED();
+ RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, payload length is zero";
+ return -1;
+ }
+
+ if (audio_frame.sample_rate_hz_ > kMaxInputSampleRateHz) {
+ RTC_DCHECK_NOTREACHED();
+ RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid";
+ return -1;
+ }
+
+ // If the length and frequency matches. We currently just support raw PCM.
+ if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
+ audio_frame.samples_per_channel_) {
+ RTC_LOG(LS_ERROR)
+ << "Cannot Add 10 ms audio, input frequency and length doesn't match";
+ return -1;
+ }
+
+ if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2 &&
+ audio_frame.num_channels_ != 4 && audio_frame.num_channels_ != 6 &&
+ audio_frame.num_channels_ != 8) {
+ RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, invalid number of channels.";
+ return -1;
+ }
+
+ // Do we have a codec registered?
+ if (!HaveValidEncoder("Add10MsData")) {
+ return -1;
+ }
+
+ const AudioFrame* ptr_frame;
+ // Perform a resampling, also down-mix if it is required and can be
+ // performed before resampling (a down mix prior to resampling will take
+ // place if both primary and secondary encoders are mono and input is in
+ // stereo).
+ if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
+ return -1;
+ }
+
+ // Check whether we need an up-mix or down-mix?
+ const size_t current_num_channels = encoder_stack_->NumChannels();
+ const bool same_num_channels =
+ ptr_frame->num_channels_ == current_num_channels;
+
+ // TODO(yujo): Skip encode of muted frames.
+ input_data->input_timestamp = ptr_frame->timestamp_;
+ input_data->length_per_channel = ptr_frame->samples_per_channel_;
+ input_data->audio_channel = current_num_channels;
+
+ if (!same_num_channels) {
+ // Remixes the input frame to the output data and in the process resize the
+ // output data if needed.
+ ReMixFrame(*ptr_frame, current_num_channels, &input_data->buffer);
+
+ // For pushing data to primary, point the `ptr_audio` to correct buffer.
+ input_data->audio = input_data->buffer.data();
+ RTC_DCHECK_GE(input_data->buffer.size(),
+ input_data->length_per_channel * input_data->audio_channel);
+ } else {
+ // When adding data to encoders this pointer is pointing to an audio buffer
+ // with correct number of channels.
+ input_data->audio = ptr_frame->data();
+ }
+
+ return 0;
+}
+
+// Perform a resampling and down-mix if required. We down-mix only if
+// encoder is mono and input is stereo. In case of dual-streaming, both
+// encoders has to be mono for down-mix to take place.
+// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
+// is required, |*ptr_out| points to `in_frame`.
+// TODO(yujo): Make this more efficient for muted frames.
+int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
+ const AudioFrame** ptr_out) {
+ const bool resample =
+ in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz();
+
+ // This variable is true if primary codec and secondary codec (if exists)
+ // are both mono and input is stereo.
+ // TODO(henrik.lundin): This condition should probably be
+ // in_frame.num_channels_ > encoder_stack_->NumChannels()
+ const bool down_mix =
+ in_frame.num_channels_ == 2 && encoder_stack_->NumChannels() == 1;
+
+ if (!first_10ms_data_) {
+ expected_in_ts_ = in_frame.timestamp_;
+ expected_codec_ts_ = in_frame.timestamp_;
+ first_10ms_data_ = true;
+ } else if (in_frame.timestamp_ != expected_in_ts_) {
+ RTC_LOG(LS_WARNING) << "Unexpected input timestamp: " << in_frame.timestamp_
+ << ", expected: " << expected_in_ts_;
+ expected_codec_ts_ +=
+ (in_frame.timestamp_ - expected_in_ts_) *
+ static_cast<uint32_t>(
+ static_cast<double>(encoder_stack_->SampleRateHz()) /
+ static_cast<double>(in_frame.sample_rate_hz_));
+ expected_in_ts_ = in_frame.timestamp_;
+ }
+
+ if (!down_mix && !resample) {
+ // No pre-processing is required.
+ if (expected_in_ts_ == expected_codec_ts_) {
+ // If we've never resampled, we can use the input frame as-is
+ *ptr_out = &in_frame;
+ } else {
+ // Otherwise we'll need to alter the timestamp. Since in_frame is const,
+ // we'll have to make a copy of it.
+ preprocess_frame_.CopyFrom(in_frame);
+ preprocess_frame_.timestamp_ = expected_codec_ts_;
+ *ptr_out = &preprocess_frame_;
+ }
+
+ expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+ expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+ return 0;
+ }
+
+ *ptr_out = &preprocess_frame_;
+ preprocess_frame_.num_channels_ = in_frame.num_channels_;
+ preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
+ std::array<int16_t, AudioFrame::kMaxDataSizeSamples> audio;
+ const int16_t* src_ptr_audio;
+ if (down_mix) {
+ // If a resampling is required, the output of a down-mix is written into a
+ // local buffer, otherwise, it will be written to the output frame.
+ int16_t* dest_ptr_audio =
+ resample ? audio.data() : preprocess_frame_.mutable_data();
+ RTC_DCHECK_GE(audio.size(), preprocess_frame_.samples_per_channel_);
+ RTC_DCHECK_GE(audio.size(), in_frame.samples_per_channel_);
+ DownMixFrame(in_frame,
+ rtc::ArrayView<int16_t>(
+ dest_ptr_audio, preprocess_frame_.samples_per_channel_));
+ preprocess_frame_.num_channels_ = 1;
+
+ // Set the input of the resampler to the down-mixed signal.
+ src_ptr_audio = audio.data();
+ } else {
+ // Set the input of the resampler to the original data.
+ src_ptr_audio = in_frame.data();
+ }
+
+ preprocess_frame_.timestamp_ = expected_codec_ts_;
+ preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
+ // If it is required, we have to do a resampling.
+ if (resample) {
+ // The result of the resampler is written to output frame.
+ int16_t* dest_ptr_audio = preprocess_frame_.mutable_data();
+
+ int samples_per_channel = resampler_.Resample10Msec(
+ src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(),
+ preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
+ dest_ptr_audio);
+
+ if (samples_per_channel < 0) {
+ RTC_LOG(LS_ERROR) << "Cannot add 10 ms audio, resampling failed";
+ return -1;
+ }
+ preprocess_frame_.samples_per_channel_ =
+ static_cast<size_t>(samples_per_channel);
+ preprocess_frame_.sample_rate_hz_ = encoder_stack_->SampleRateHz();
+ }
+
+ expected_codec_ts_ +=
+ static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
+ expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+
+ return 0;
+}
+
+/////////////////////////////////////////
+// (FEC) Forward Error Correction (codec internal)
+//
+
+int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
+ MutexLock lock(&acm_mutex_);
+ if (HaveValidEncoder("SetPacketLossRate")) {
+ encoder_stack_->OnReceivedUplinkPacketLossFraction(loss_rate / 100.0);
+ }
+ return 0;
+}
+
+/////////////////////////////////////////
+// Receiver
+//
+
+int AudioCodingModuleImpl::InitializeReceiver() {
+ MutexLock lock(&acm_mutex_);
+ return InitializeReceiverSafe();
+}
+
+// Initialize receiver, resets codec database etc.
+int AudioCodingModuleImpl::InitializeReceiverSafe() {
+ // If the receiver is already initialized then we want to destroy any
+ // existing decoders. After a call to this function, we should have a clean
+ // start-up.
+ if (receiver_initialized_)
+ receiver_.RemoveAllCodecs();
+ receiver_.FlushBuffers();
+
+ receiver_initialized_ = true;
+ return 0;
+}
+
+void AudioCodingModuleImpl::SetReceiveCodecs(
+ const std::map<int, SdpAudioFormat>& codecs) {
+ MutexLock lock(&acm_mutex_);
+ receiver_.SetCodecs(codecs);
+}
+
+int AudioCodingModuleImpl::ReceiveSampleRate() const {
+ return receiver_.LastAudioSampleRate();
+}
+
+// Incoming packet from network parsed and ready for decode.
+int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
+ const size_t payload_length,
+ const RTPHeader& rtp_header) {
+ RTC_DCHECK_EQ(payload_length == 0, incoming_payload == nullptr);
+ return receiver_.InsertPacket(
+ rtp_header,
+ rtc::ArrayView<const uint8_t>(incoming_payload, payload_length));
+}
+
+// Get 10 milliseconds of raw audio data to play out.
+// Automatic resample to the requested frequency.
+int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
+ AudioFrame* audio_frame,
+ bool* muted) {
+ // GetAudio always returns 10 ms, at the requested sample rate.
+ if (receiver_.GetAudio(desired_freq_hz, audio_frame, muted) != 0) {
+ RTC_LOG(LS_ERROR) << "PlayoutData failed, RecOut Failed";
+ return -1;
+ }
+ return 0;
+}
+
+/////////////////////////////////////////
+// Statistics
+//
+
+// TODO(turajs) change the return value to void. Also change the corresponding
+// NetEq function.
+int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) {
+ receiver_.GetNetworkStatistics(statistics);
+ return 0;
+}
+
+bool AudioCodingModuleImpl::HaveValidEncoder(
+ absl::string_view caller_name) const {
+ if (!encoder_stack_) {
+ RTC_LOG(LS_ERROR) << caller_name << " failed: No send codec is registered.";
+ return false;
+ }
+ return true;
+}
+
+ANAStats AudioCodingModuleImpl::GetANAStats() const {
+ MutexLock lock(&acm_mutex_);
+ if (encoder_stack_)
+ return encoder_stack_->GetANAStats();
+ // If no encoder is set, return default stats.
+ return ANAStats();
+}
+
+int AudioCodingModuleImpl::GetTargetBitrate() const {
+ MutexLock lock(&acm_mutex_);
+ if (!encoder_stack_) {
+ return -1;
+ }
+ return encoder_stack_->GetTargetBitrate();
+}
+
+} // namespace
+
+AudioCodingModule::Config::Config(
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
+ : neteq_config(),
+ clock(Clock::GetRealTimeClockRaw()),
+ decoder_factory(decoder_factory) {
+ // Post-decode VAD is disabled by default in NetEq, however, Audio
+ // Conference Mixer relies on VAD decisions and fails without them.
+ neteq_config.enable_post_decode_vad = true;
+}
+
+AudioCodingModule::Config::Config(const Config&) = default;
+AudioCodingModule::Config::~Config() = default;
+
+AudioCodingModule* AudioCodingModule::Create(const Config& config) {
+ return new AudioCodingModuleImpl(config);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module_unittest.cc
new file mode 100644
index 0000000000..840f5662f6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module_unittest.cc
@@ -0,0 +1,1618 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
+#include "api/audio_codecs/opus/audio_decoder_opus.h"
+#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/acm2/acm_receive_test.h"
+#include "modules/audio_coding/acm2/acm_send_test.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/audio_coding/neteq/tools/audio_checksum.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_wav_file.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "rtc_base/event.h"
+#include "rtc_base/message_digest.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/arch.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/audio_decoder_proxy_factory.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder.h"
+#include "test/mock_audio_encoder.h"
+#include "test/testsupport/file_utils.h"
+#include "test/testsupport/rtc_expect_death.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+namespace {
+const int kSampleRateHz = 16000;
+const int kNumSamples10ms = kSampleRateHz / 100;
+const int kFrameSizeMs = 10; // Multiple of 10.
+const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
+const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
+const uint8_t kPayloadType = 111;
+} // namespace
+
+class RtpData {
+ public:
+ RtpData(int samples_per_packet, uint8_t payload_type)
+ : samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
+
+ virtual ~RtpData() {}
+
+ void Populate(RTPHeader* rtp_header) {
+ rtp_header->sequenceNumber = 0xABCD;
+ rtp_header->timestamp = 0xABCDEF01;
+ rtp_header->payloadType = payload_type_;
+ rtp_header->markerBit = false;
+ rtp_header->ssrc = 0x1234;
+ rtp_header->numCSRCs = 0;
+
+ rtp_header->payload_type_frequency = kSampleRateHz;
+ }
+
+ void Forward(RTPHeader* rtp_header) {
+ ++rtp_header->sequenceNumber;
+ rtp_header->timestamp += samples_per_packet_;
+ }
+
+ private:
+ int samples_per_packet_;
+ uint8_t payload_type_;
+};
+
+class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
+ public:
+ PacketizationCallbackStubOldApi()
+ : num_calls_(0),
+ last_frame_type_(AudioFrameType::kEmptyFrame),
+ last_payload_type_(-1),
+ last_timestamp_(0) {}
+
+ int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) override {
+ MutexLock lock(&mutex_);
+ ++num_calls_;
+ last_frame_type_ = frame_type;
+ last_payload_type_ = payload_type;
+ last_timestamp_ = timestamp;
+ last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+ return 0;
+ }
+
+ int num_calls() const {
+ MutexLock lock(&mutex_);
+ return num_calls_;
+ }
+
+ int last_payload_len_bytes() const {
+ MutexLock lock(&mutex_);
+ return rtc::checked_cast<int>(last_payload_vec_.size());
+ }
+
+ AudioFrameType last_frame_type() const {
+ MutexLock lock(&mutex_);
+ return last_frame_type_;
+ }
+
+ int last_payload_type() const {
+ MutexLock lock(&mutex_);
+ return last_payload_type_;
+ }
+
+ uint32_t last_timestamp() const {
+ MutexLock lock(&mutex_);
+ return last_timestamp_;
+ }
+
+ void SwapBuffers(std::vector<uint8_t>* payload) {
+ MutexLock lock(&mutex_);
+ last_payload_vec_.swap(*payload);
+ }
+
+ private:
+ int num_calls_ RTC_GUARDED_BY(mutex_);
+ AudioFrameType last_frame_type_ RTC_GUARDED_BY(mutex_);
+ int last_payload_type_ RTC_GUARDED_BY(mutex_);
+ uint32_t last_timestamp_ RTC_GUARDED_BY(mutex_);
+ std::vector<uint8_t> last_payload_vec_ RTC_GUARDED_BY(mutex_);
+ mutable Mutex mutex_;
+};
+
+class AudioCodingModuleTestOldApi : public ::testing::Test {
+ protected:
+ AudioCodingModuleTestOldApi()
+ : rtp_utility_(new RtpData(kFrameSizeSamples, kPayloadType)),
+ clock_(Clock::GetRealTimeClock()) {}
+
+ ~AudioCodingModuleTestOldApi() {}
+
+ void TearDown() {}
+
+ void SetUp() {
+ acm_.reset(AudioCodingModule::Create([this] {
+ AudioCodingModule::Config config;
+ config.clock = clock_;
+ config.decoder_factory = CreateBuiltinAudioDecoderFactory();
+ return config;
+ }()));
+
+ rtp_utility_->Populate(&rtp_header_);
+
+ input_frame_.sample_rate_hz_ = kSampleRateHz;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = kSampleRateHz * 10 / 1000; // 10 ms.
+ static_assert(kSampleRateHz * 10 / 1000 <= AudioFrame::kMaxDataSizeSamples,
+ "audio frame too small");
+ input_frame_.Mute();
+
+ ASSERT_EQ(0, acm_->RegisterTransportCallback(&packet_cb_));
+
+ SetUpL16Codec();
+ }
+
+ // Set up L16 codec.
+ virtual void SetUpL16Codec() {
+ audio_format_ = SdpAudioFormat("L16", kSampleRateHz, 1);
+ pac_size_ = 160;
+ }
+
+ virtual void RegisterCodec() {
+ acm_->SetReceiveCodecs({{kPayloadType, *audio_format_}});
+ acm_->SetEncoder(CreateBuiltinAudioEncoderFactory()->MakeAudioEncoder(
+ kPayloadType, *audio_format_, absl::nullopt));
+ }
+
+ virtual void InsertPacketAndPullAudio() {
+ InsertPacket();
+ PullAudio();
+ }
+
+ virtual void InsertPacket() {
+ const uint8_t kPayload[kPayloadSizeBytes] = {0};
+ ASSERT_EQ(0,
+ acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
+ rtp_utility_->Forward(&rtp_header_);
+ }
+
+ virtual void PullAudio() {
+ AudioFrame audio_frame;
+ bool muted;
+ ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame, &muted));
+ ASSERT_FALSE(muted);
+ }
+
+ virtual void InsertAudio() {
+ ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+ input_frame_.timestamp_ += kNumSamples10ms;
+ }
+
+ virtual void VerifyEncoding() {
+ int last_length = packet_cb_.last_payload_len_bytes();
+ EXPECT_TRUE(last_length == 2 * pac_size_ || last_length == 0)
+ << "Last encoded packet was " << last_length << " bytes.";
+ }
+
+ virtual void InsertAudioAndVerifyEncoding() {
+ InsertAudio();
+ VerifyEncoding();
+ }
+
+ std::unique_ptr<RtpData> rtp_utility_;
+ std::unique_ptr<AudioCodingModule> acm_;
+ PacketizationCallbackStubOldApi packet_cb_;
+ RTPHeader rtp_header_;
+ AudioFrame input_frame_;
+
+ absl::optional<SdpAudioFormat> audio_format_;
+ int pac_size_ = -1;
+
+ Clock* clock_;
+};
+
+class AudioCodingModuleTestOldApiDeathTest
+ : public AudioCodingModuleTestOldApi {};
+
+TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
+ AudioFrame audio_frame;
+ const int kSampleRateHz = 32000;
+ bool muted;
+ EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame, &muted));
+ ASSERT_FALSE(muted);
+ EXPECT_EQ(0u, audio_frame.timestamp_);
+ EXPECT_GT(audio_frame.num_channels_, 0u);
+ EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+ audio_frame.samples_per_channel_);
+ EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
+}
+
+// The below test is temporarily disabled on Windows due to problems
+// with clang debug builds.
+// TODO(tommi): Re-enable when we've figured out what the problem is.
+// http://crbug.com/615050
+#if !defined(WEBRTC_WIN) && defined(__clang__) && RTC_DCHECK_IS_ON && \
+ GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST_F(AudioCodingModuleTestOldApiDeathTest, FailOnZeroDesiredFrequency) {
+ AudioFrame audio_frame;
+ bool muted;
+ RTC_EXPECT_DEATH(acm_->PlayoutData10Ms(0, &audio_frame, &muted),
+ "dst_sample_rate_hz");
+}
+#endif
+
+// Checks that the transport callback is invoked once for each speech packet.
+// Also checks that the frame type is kAudioFrameSpeech.
+TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
+ const int k10MsBlocksPerPacket = 3;
+ pac_size_ = k10MsBlocksPerPacket * kSampleRateHz / 100;
+ audio_format_->parameters["ptime"] = "30";
+ RegisterCodec();
+ const int kLoops = 10;
+ for (int i = 0; i < kLoops; ++i) {
+ EXPECT_EQ(i / k10MsBlocksPerPacket, packet_cb_.num_calls());
+ if (packet_cb_.num_calls() > 0)
+ EXPECT_EQ(AudioFrameType::kAudioFrameSpeech,
+ packet_cb_.last_frame_type());
+ InsertAudioAndVerifyEncoding();
+ }
+ EXPECT_EQ(kLoops / k10MsBlocksPerPacket, packet_cb_.num_calls());
+ EXPECT_EQ(AudioFrameType::kAudioFrameSpeech, packet_cb_.last_frame_type());
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+// Verifies that the RTP timestamp series is not reset when the codec is
+// changed.
+TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) {
+ RegisterCodec(); // This registers the default codec.
+ uint32_t expected_ts = input_frame_.timestamp_;
+ int blocks_per_packet = pac_size_ / (kSampleRateHz / 100);
+ // Encode 5 packets of the first codec type.
+ const int kNumPackets1 = 5;
+ for (int j = 0; j < kNumPackets1; ++j) {
+ for (int i = 0; i < blocks_per_packet; ++i) {
+ EXPECT_EQ(j, packet_cb_.num_calls());
+ InsertAudio();
+ }
+ EXPECT_EQ(j + 1, packet_cb_.num_calls());
+ EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
+ expected_ts += pac_size_;
+ }
+
+ // Change codec.
+ audio_format_ = SdpAudioFormat("ISAC", kSampleRateHz, 1);
+ pac_size_ = 480;
+ RegisterCodec();
+ blocks_per_packet = pac_size_ / (kSampleRateHz / 100);
+ // Encode another 5 packets.
+ const int kNumPackets2 = 5;
+ for (int j = 0; j < kNumPackets2; ++j) {
+ for (int i = 0; i < blocks_per_packet; ++i) {
+ EXPECT_EQ(kNumPackets1 + j, packet_cb_.num_calls());
+ InsertAudio();
+ }
+ EXPECT_EQ(kNumPackets1 + j + 1, packet_cb_.num_calls());
+ EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
+ expected_ts += pac_size_;
+ }
+}
+#endif
+
+// Introduce this class to set different expectations on the number of encoded
+// bytes. This class expects all encoded packets to be 9 bytes (matching one
+// CNG SID frame) or 0 bytes. This test depends on `input_frame_` containing
+// (near-)zero values. It also introduces a way to register comfort noise with
+// a custom payload type.
+class AudioCodingModuleTestWithComfortNoiseOldApi
+ : public AudioCodingModuleTestOldApi {
+ protected:
+ void RegisterCngCodec(int rtp_payload_type) {
+ acm_->SetReceiveCodecs({{kPayloadType, *audio_format_},
+ {rtp_payload_type, {"cn", kSampleRateHz, 1}}});
+ acm_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* enc) {
+ AudioEncoderCngConfig config;
+ config.speech_encoder = std::move(*enc);
+ config.num_channels = 1;
+ config.payload_type = rtp_payload_type;
+ config.vad_mode = Vad::kVadNormal;
+ *enc = CreateComfortNoiseEncoder(std::move(config));
+ });
+ }
+
+ void VerifyEncoding() override {
+ int last_length = packet_cb_.last_payload_len_bytes();
+ EXPECT_TRUE(last_length == 9 || last_length == 0)
+ << "Last encoded packet was " << last_length << " bytes.";
+ }
+
+ void DoTest(int blocks_per_packet, int cng_pt) {
+ const int kLoops = 40;
+ // This array defines the expected frame types, and when they should arrive.
+ // We expect a frame to arrive each time the speech encoder would have
+ // produced a packet, and once every 100 ms the frame should be non-empty,
+ // that is contain comfort noise.
+ const struct {
+ int ix;
+ AudioFrameType type;
+ } expectation[] = {{2, AudioFrameType::kAudioFrameCN},
+ {5, AudioFrameType::kEmptyFrame},
+ {8, AudioFrameType::kEmptyFrame},
+ {11, AudioFrameType::kAudioFrameCN},
+ {14, AudioFrameType::kEmptyFrame},
+ {17, AudioFrameType::kEmptyFrame},
+ {20, AudioFrameType::kAudioFrameCN},
+ {23, AudioFrameType::kEmptyFrame},
+ {26, AudioFrameType::kEmptyFrame},
+ {29, AudioFrameType::kEmptyFrame},
+ {32, AudioFrameType::kAudioFrameCN},
+ {35, AudioFrameType::kEmptyFrame},
+ {38, AudioFrameType::kEmptyFrame}};
+ for (int i = 0; i < kLoops; ++i) {
+ int num_calls_before = packet_cb_.num_calls();
+ EXPECT_EQ(i / blocks_per_packet, num_calls_before);
+ InsertAudioAndVerifyEncoding();
+ int num_calls = packet_cb_.num_calls();
+ if (num_calls == num_calls_before + 1) {
+ EXPECT_EQ(expectation[num_calls - 1].ix, i);
+ EXPECT_EQ(expectation[num_calls - 1].type, packet_cb_.last_frame_type())
+ << "Wrong frame type for lap " << i;
+ EXPECT_EQ(cng_pt, packet_cb_.last_payload_type());
+ } else {
+ EXPECT_EQ(num_calls, num_calls_before);
+ }
+ }
+ }
+};
+
+// Checks that the transport callback is invoked once per frame period of the
+// underlying speech encoder, even when comfort noise is produced.
+// Also checks that the frame type is kAudioFrameCN or kEmptyFrame.
+TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
+ TransportCallbackTestForComfortNoiseRegisterCngLast) {
+ const int k10MsBlocksPerPacket = 3;
+ pac_size_ = k10MsBlocksPerPacket * kSampleRateHz / 100;
+ audio_format_->parameters["ptime"] = "30";
+ RegisterCodec();
+ const int kCngPayloadType = 105;
+ RegisterCngCodec(kCngPayloadType);
+ DoTest(k10MsBlocksPerPacket, kCngPayloadType);
+}
+
+// A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
+// codec, while the derive class AcmIsacMtTest is using iSAC.
+class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+ static const int kNumPackets = 500;
+ static const int kNumPullCalls = 500;
+
+ AudioCodingModuleMtTestOldApi()
+ : AudioCodingModuleTestOldApi(),
+ send_count_(0),
+ insert_packet_count_(0),
+ pull_audio_count_(0),
+ next_insert_packet_time_ms_(0),
+ fake_clock_(new SimulatedClock(0)) {
+ clock_ = fake_clock_.get();
+ }
+
+ void SetUp() {
+ AudioCodingModuleTestOldApi::SetUp();
+ RegisterCodec(); // Must be called before the threads start below.
+ StartThreads();
+ }
+
+ void StartThreads() {
+ quit_.store(false);
+
+ const auto attributes =
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
+ send_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!quit_.load()) {
+ CbSendImpl();
+ }
+ },
+ "send", attributes);
+ insert_packet_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!quit_.load()) {
+ CbInsertPacketImpl();
+ }
+ },
+ "insert_packet", attributes);
+ pull_audio_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!quit_.load()) {
+ CbPullAudioImpl();
+ }
+ },
+ "pull_audio", attributes);
+ }
+
+ void TearDown() {
+ AudioCodingModuleTestOldApi::TearDown();
+ quit_.store(true);
+ pull_audio_thread_.Finalize();
+ send_thread_.Finalize();
+ insert_packet_thread_.Finalize();
+ }
+
+ bool RunTest() {
+ return test_complete_.Wait(10 * 60 * 1000); // 10 minutes' timeout.
+ }
+
+ virtual bool TestDone() {
+ if (packet_cb_.num_calls() > kNumPackets) {
+ MutexLock lock(&mutex_);
+ if (pull_audio_count_ > kNumPullCalls) {
+ // Both conditions for completion are met. End the test.
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // The send thread doesn't have to care about the current simulated time,
+ // since only the AcmReceiver is using the clock.
+ void CbSendImpl() {
+ SleepMs(1);
+ if (HasFatalFailure()) {
+ // End the test early if a fatal failure (ASSERT_*) has occurred.
+ test_complete_.Set();
+ }
+ ++send_count_;
+ InsertAudioAndVerifyEncoding();
+ if (TestDone()) {
+ test_complete_.Set();
+ }
+ }
+
+ void CbInsertPacketImpl() {
+ SleepMs(1);
+ {
+ MutexLock lock(&mutex_);
+ if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+ return;
+ }
+ next_insert_packet_time_ms_ += 10;
+ }
+ // Now we're not holding the crit sect when calling ACM.
+ ++insert_packet_count_;
+ InsertPacket();
+ }
+
+ void CbPullAudioImpl() {
+ SleepMs(1);
+ {
+ MutexLock lock(&mutex_);
+ // Don't let the insert thread fall behind.
+ if (next_insert_packet_time_ms_ < clock_->TimeInMilliseconds()) {
+ return;
+ }
+ ++pull_audio_count_;
+ }
+ // Now we're not holding the crit sect when calling ACM.
+ PullAudio();
+ fake_clock_->AdvanceTimeMilliseconds(10);
+ }
+
+ rtc::PlatformThread send_thread_;
+ rtc::PlatformThread insert_packet_thread_;
+ rtc::PlatformThread pull_audio_thread_;
+ // Used to force worker threads to stop looping.
+ std::atomic<bool> quit_;
+
+ rtc::Event test_complete_;
+ int send_count_;
+ int insert_packet_count_;
+ int pull_audio_count_ RTC_GUARDED_BY(mutex_);
+ Mutex mutex_;
+ int64_t next_insert_packet_time_ms_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<SimulatedClock> fake_clock_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+TEST_F(AudioCodingModuleMtTestOldApi, MAYBE_DoTest) {
+ EXPECT_TRUE(RunTest());
+}
+
+// This is a multi-threaded ACM test using iSAC. The test encodes audio
+// from a PCM file. The most recent encoded frame is used as input to the
+// receiving part. Depending on timing, it may happen that the same RTP packet
+// is inserted into the receiver multiple times, but this is a valid use-case,
+// and simplifies the test code a lot.
+class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
+ protected:
+ static const int kNumPackets = 500;
+ static const int kNumPullCalls = 500;
+
+ AcmIsacMtTestOldApi()
+ : AudioCodingModuleMtTestOldApi(), last_packet_number_(0) {}
+
+ ~AcmIsacMtTestOldApi() {}
+
+ void SetUp() override {
+ AudioCodingModuleTestOldApi::SetUp();
+ RegisterCodec(); // Must be called before the threads start below.
+
+ // Set up input audio source to read from specified file, loop after 5
+ // seconds, and deliver blocks of 10 ms.
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+ audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+
+ // Generate one packet to have something to insert.
+ int loop_counter = 0;
+ while (packet_cb_.last_payload_len_bytes() == 0) {
+ InsertAudio();
+ ASSERT_LT(loop_counter++, 10);
+ }
+ // Set `last_packet_number_` to one less that `num_calls` so that the packet
+ // will be fetched in the next InsertPacket() call.
+ last_packet_number_ = packet_cb_.num_calls() - 1;
+
+ StartThreads();
+ }
+
+ void RegisterCodec() override {
+ static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
+ audio_format_ = SdpAudioFormat("isac", kSampleRateHz, 1);
+ pac_size_ = 480;
+
+ // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+ // registered in AudioCodingModuleTestOldApi::SetUp();
+ acm_->SetReceiveCodecs({{kPayloadType, *audio_format_}});
+ acm_->SetEncoder(CreateBuiltinAudioEncoderFactory()->MakeAudioEncoder(
+ kPayloadType, *audio_format_, absl::nullopt));
+ }
+
+ void InsertPacket() override {
+ int num_calls = packet_cb_.num_calls(); // Store locally for thread safety.
+ if (num_calls > last_packet_number_) {
+ // Get the new payload out from the callback handler.
+ // Note that since we swap buffers here instead of directly inserting
+ // a pointer to the data in `packet_cb_`, we avoid locking the callback
+ // for the duration of the IncomingPacket() call.
+ packet_cb_.SwapBuffers(&last_payload_vec_);
+ ASSERT_GT(last_payload_vec_.size(), 0u);
+ rtp_utility_->Forward(&rtp_header_);
+ last_packet_number_ = num_calls;
+ }
+ ASSERT_GT(last_payload_vec_.size(), 0u);
+ ASSERT_EQ(0, acm_->IncomingPacket(&last_payload_vec_[0],
+ last_payload_vec_.size(), rtp_header_));
+ }
+
+ void InsertAudio() override {
+ // TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
+ // this call confuses the number of samples with the number of bytes, and
+ // ends up copying only half of what it should.
+ memcpy(input_frame_.mutable_data(), audio_loop_.GetNextBlock().data(),
+ kNumSamples10ms);
+ AudioCodingModuleTestOldApi::InsertAudio();
+ }
+
+ // Override the verification function with no-op, since iSAC produces variable
+ // payload sizes.
+ void VerifyEncoding() override {}
+
+ // This method is the same as AudioCodingModuleMtTestOldApi::TestDone(), but
+ // here it is using the constants defined in this class (i.e., shorter test
+ // run).
+ bool TestDone() override {
+ if (packet_cb_.num_calls() > kNumPackets) {
+ MutexLock lock(&mutex_);
+ if (pull_audio_count_ > kNumPullCalls) {
+ // Both conditions for completion are met. End the test.
+ return true;
+ }
+ }
+ return false;
+ }
+
+ int last_packet_number_;
+ std::vector<uint8_t> last_payload_vec_;
+ test::AudioLoop audio_loop_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmIsacMtTestOldApi, MAYBE_DoTest) {
+ EXPECT_TRUE(RunTest());
+}
+#endif
+
+class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+ static const int kRegisterAfterNumPackets = 5;
+ static const int kNumPackets = 10;
+ static const int kPacketSizeMs = 30;
+ static const int kPacketSizeSamples = kPacketSizeMs * 16;
+
+ AcmReRegisterIsacMtTestOldApi()
+ : AudioCodingModuleTestOldApi(),
+ codec_registered_(false),
+ receive_packet_count_(0),
+ next_insert_packet_time_ms_(0),
+ fake_clock_(new SimulatedClock(0)) {
+ AudioEncoderIsacFloatImpl::Config config;
+ config.payload_type = kPayloadType;
+ isac_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
+ clock_ = fake_clock_.get();
+ }
+
+ void SetUp() override {
+ AudioCodingModuleTestOldApi::SetUp();
+ // Set up input audio source to read from specified file, loop after 5
+ // seconds, and deliver blocks of 10 ms.
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+ audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+ RegisterCodec(); // Must be called before the threads start below.
+ StartThreads();
+ }
+
+ void RegisterCodec() override {
+ // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+ // registered in AudioCodingModuleTestOldApi::SetUp();
+ // Only register the decoder for now. The encoder is registered later.
+ static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
+ acm_->SetReceiveCodecs({{kPayloadType, {"ISAC", kSampleRateHz, 1}}});
+ }
+
+ void StartThreads() {
+ quit_.store(false);
+ const auto attributes =
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
+ receive_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!quit_.load() && CbReceiveImpl()) {
+ }
+ },
+ "receive", attributes);
+ codec_registration_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!quit_.load()) {
+ CbCodecRegistrationImpl();
+ }
+ },
+ "codec_registration", attributes);
+ }
+
+ void TearDown() override {
+ AudioCodingModuleTestOldApi::TearDown();
+ quit_.store(true);
+ receive_thread_.Finalize();
+ codec_registration_thread_.Finalize();
+ }
+
+ bool RunTest() {
+ return test_complete_.Wait(10 * 60 * 1000); // 10 minutes' timeout.
+ }
+
+ bool CbReceiveImpl() {
+ SleepMs(1);
+ rtc::Buffer encoded;
+ AudioEncoder::EncodedInfo info;
+ {
+ MutexLock lock(&mutex_);
+ if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+ return true;
+ }
+ next_insert_packet_time_ms_ += kPacketSizeMs;
+ ++receive_packet_count_;
+
+ // Encode new frame.
+ uint32_t input_timestamp = rtp_header_.timestamp;
+ while (info.encoded_bytes == 0) {
+ info = isac_encoder_->Encode(input_timestamp,
+ audio_loop_.GetNextBlock(), &encoded);
+ input_timestamp += 160; // 10 ms at 16 kHz.
+ }
+ EXPECT_EQ(rtp_header_.timestamp + kPacketSizeSamples, input_timestamp);
+ EXPECT_EQ(rtp_header_.timestamp, info.encoded_timestamp);
+ EXPECT_EQ(rtp_header_.payloadType, info.payload_type);
+ }
+ // Now we're not holding the crit sect when calling ACM.
+
+ // Insert into ACM.
+ EXPECT_EQ(0, acm_->IncomingPacket(encoded.data(), info.encoded_bytes,
+ rtp_header_));
+
+ // Pull audio.
+ for (int i = 0; i < rtc::CheckedDivExact(kPacketSizeMs, 10); ++i) {
+ AudioFrame audio_frame;
+ bool muted;
+ EXPECT_EQ(0, acm_->PlayoutData10Ms(-1 /* default output frequency */,
+ &audio_frame, &muted));
+ if (muted) {
+ ADD_FAILURE();
+ return false;
+ }
+ fake_clock_->AdvanceTimeMilliseconds(10);
+ }
+ rtp_utility_->Forward(&rtp_header_);
+ return true;
+ }
+
+ void CbCodecRegistrationImpl() {
+ SleepMs(1);
+ if (HasFatalFailure()) {
+ // End the test early if a fatal failure (ASSERT_*) has occurred.
+ test_complete_.Set();
+ }
+ MutexLock lock(&mutex_);
+ if (!codec_registered_ &&
+ receive_packet_count_ > kRegisterAfterNumPackets) {
+ // Register the iSAC encoder.
+ acm_->SetEncoder(CreateBuiltinAudioEncoderFactory()->MakeAudioEncoder(
+ kPayloadType, *audio_format_, absl::nullopt));
+ codec_registered_ = true;
+ }
+ if (codec_registered_ && receive_packet_count_ > kNumPackets) {
+ test_complete_.Set();
+ }
+ }
+
+ rtc::PlatformThread receive_thread_;
+ rtc::PlatformThread codec_registration_thread_;
+ // Used to force worker threads to stop looping.
+ std::atomic<bool> quit_;
+
+ rtc::Event test_complete_;
+ Mutex mutex_;
+ bool codec_registered_ RTC_GUARDED_BY(mutex_);
+ int receive_packet_count_ RTC_GUARDED_BY(mutex_);
+ int64_t next_insert_packet_time_ms_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<AudioEncoderIsacFloatImpl> isac_encoder_;
+ std::unique_ptr<SimulatedClock> fake_clock_;
+ test::AudioLoop audio_loop_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmReRegisterIsacMtTestOldApi, MAYBE_DoTest) {
+ EXPECT_TRUE(RunTest());
+}
+#endif
+
+// Disabling all of these tests on iOS until file support has been added.
+// See https://code.google.com/p/webrtc/issues/detail?id=4752 for details.
+#if !defined(WEBRTC_IOS)
+
+// This test verifies bit exactness for the send-side of ACM. The test setup is
+// a chain of three different test classes:
+//
+// test::AcmSendTest -> AcmSenderBitExactness -> test::AcmReceiveTest
+//
+// The receiver side is driving the test by requesting new packets from
+// AcmSenderBitExactness::NextPacket(). This method, in turn, asks for the
+// packet from test::AcmSendTest::NextPacket, which inserts audio from the
+// input file until one packet is produced. (The input file loops indefinitely.)
+// Before passing the packet to the receiver, this test class verifies the
+// packet header and updates a payload checksum with the new payload. The
+// decoded output from the receiver is also verified with a (separate) checksum.
+class AcmSenderBitExactnessOldApi : public ::testing::Test,
+ public test::PacketSource {
+ protected:
+ static const int kTestDurationMs = 1000;
+
+ AcmSenderBitExactnessOldApi()
+ : frame_size_rtp_timestamps_(0),
+ packet_count_(0),
+ payload_type_(0),
+ last_sequence_number_(0),
+ last_timestamp_(0),
+ payload_checksum_(rtc::MessageDigestFactory::Create(rtc::DIGEST_MD5)) {}
+
+ // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+ // false.
+ bool SetUpSender(absl::string_view input_file_name, int source_rate) {
+ // Note that `audio_source_` will loop forever. The test duration is set
+ // explicitly by `kTestDurationMs`.
+ audio_source_.reset(new test::InputAudioFile(input_file_name));
+ send_test_.reset(new test::AcmSendTestOldApi(audio_source_.get(),
+ source_rate, kTestDurationMs));
+ return send_test_.get() != NULL;
+ }
+
+ // Registers a send codec in the test::AcmSendTest object. Returns true on
+ // success, false on failure.
+ bool RegisterSendCodec(absl::string_view payload_name,
+ int sampling_freq_hz,
+ int channels,
+ int payload_type,
+ int frame_size_samples,
+ int frame_size_rtp_timestamps) {
+ payload_type_ = payload_type;
+ frame_size_rtp_timestamps_ = frame_size_rtp_timestamps;
+ return send_test_->RegisterCodec(payload_name, sampling_freq_hz, channels,
+ payload_type, frame_size_samples);
+ }
+
+ void RegisterExternalSendCodec(
+ std::unique_ptr<AudioEncoder> external_speech_encoder,
+ int payload_type) {
+ payload_type_ = payload_type;
+ frame_size_rtp_timestamps_ = rtc::checked_cast<uint32_t>(
+ external_speech_encoder->Num10MsFramesInNextPacket() *
+ external_speech_encoder->RtpTimestampRateHz() / 100);
+ send_test_->RegisterExternalCodec(std::move(external_speech_encoder));
+ }
+
+ // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+ // before calling this method.
+ void Run(absl::string_view audio_checksum_ref,
+ absl::string_view payload_checksum_ref,
+ int expected_packets,
+ test::AcmReceiveTestOldApi::NumOutputChannels expected_channels,
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory = nullptr) {
+ if (!decoder_factory) {
+ decoder_factory = CreateBuiltinAudioDecoderFactory();
+ }
+ // Set up the receiver used to decode the packets and verify the decoded
+ // output.
+ test::AudioChecksum audio_checksum;
+ const std::string output_file_name =
+ webrtc::test::OutputPath() +
+ ::testing::UnitTest::GetInstance()
+ ->current_test_info()
+ ->test_case_name() +
+ "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+ "_output.wav";
+ const int kOutputFreqHz = 8000;
+ test::OutputWavFile output_file(output_file_name, kOutputFreqHz,
+ expected_channels);
+ // Have the output audio sent both to file and to the checksum calculator.
+ test::AudioSinkFork output(&audio_checksum, &output_file);
+ test::AcmReceiveTestOldApi receive_test(this, &output, kOutputFreqHz,
+ expected_channels, decoder_factory);
+ ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+
+ // This is where the actual test is executed.
+ receive_test.Run();
+
+ // Extract and verify the audio checksum.
+ std::string checksum_string = audio_checksum.Finish();
+ ExpectChecksumEq(audio_checksum_ref, checksum_string);
+
+ // Extract and verify the payload checksum.
+ rtc::Buffer checksum_result(payload_checksum_->Size());
+ payload_checksum_->Finish(checksum_result.data(), checksum_result.size());
+ checksum_string = rtc::hex_encode(checksum_result);
+ ExpectChecksumEq(payload_checksum_ref, checksum_string);
+
+ // Verify number of packets produced.
+ EXPECT_EQ(expected_packets, packet_count_);
+
+ // Delete the output file.
+ remove(output_file_name.c_str());
+ }
+
+ // Helper: result must be one the "|"-separated checksums.
+ void ExpectChecksumEq(absl::string_view ref, absl::string_view result) {
+ if (ref.size() == result.size()) {
+ // Only one checksum: clearer message.
+ EXPECT_EQ(ref, result);
+ } else {
+ EXPECT_NE(ref.find(result), absl::string_view::npos)
+ << result << " must be one of these:\n"
+ << ref;
+ }
+ }
+
+ // Inherited from test::PacketSource.
+ std::unique_ptr<test::Packet> NextPacket() override {
+ auto packet = send_test_->NextPacket();
+ if (!packet)
+ return NULL;
+
+ VerifyPacket(packet.get());
+ // TODO(henrik.lundin) Save the packet to file as well.
+
+ // Pass it on to the caller. The caller becomes the owner of `packet`.
+ return packet;
+ }
+
+ // Verifies the packet.
+ void VerifyPacket(const test::Packet* packet) {
+ EXPECT_TRUE(packet->valid_header());
+ // (We can check the header fields even if valid_header() is false.)
+ EXPECT_EQ(payload_type_, packet->header().payloadType);
+ if (packet_count_ > 0) {
+ // This is not the first packet.
+ uint16_t sequence_number_diff =
+ packet->header().sequenceNumber - last_sequence_number_;
+ EXPECT_EQ(1, sequence_number_diff);
+ uint32_t timestamp_diff = packet->header().timestamp - last_timestamp_;
+ EXPECT_EQ(frame_size_rtp_timestamps_, timestamp_diff);
+ }
+ ++packet_count_;
+ last_sequence_number_ = packet->header().sequenceNumber;
+ last_timestamp_ = packet->header().timestamp;
+ // Update the checksum.
+ payload_checksum_->Update(packet->payload(),
+ packet->payload_length_bytes());
+ }
+
+ void SetUpTest(absl::string_view codec_name,
+ int codec_sample_rate_hz,
+ int channels,
+ int payload_type,
+ int codec_frame_size_samples,
+ int codec_frame_size_rtp_timestamps) {
+ ASSERT_TRUE(SetUpSender(
+ channels == 1 ? kTestFileMono32kHz : kTestFileFakeStereo32kHz, 32000));
+ ASSERT_TRUE(RegisterSendCodec(codec_name, codec_sample_rate_hz, channels,
+ payload_type, codec_frame_size_samples,
+ codec_frame_size_rtp_timestamps));
+ }
+
+ void SetUpTestExternalEncoder(
+ std::unique_ptr<AudioEncoder> external_speech_encoder,
+ int payload_type) {
+ ASSERT_TRUE(send_test_);
+ RegisterExternalSendCodec(std::move(external_speech_encoder), payload_type);
+ }
+
+ std::unique_ptr<test::AcmSendTestOldApi> send_test_;
+ std::unique_ptr<test::InputAudioFile> audio_source_;
+ uint32_t frame_size_rtp_timestamps_;
+ int packet_count_;
+ uint8_t payload_type_;
+ uint16_t last_sequence_number_;
+ uint32_t last_timestamp_;
+ std::unique_ptr<rtc::MessageDigest> payload_checksum_;
+ const std::string kTestFileMono32kHz =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ const std::string kTestFileFakeStereo32kHz =
+ webrtc::test::ResourcePath("audio_coding/testfile_fake_stereo_32kHz",
+ "pcm");
+ const std::string kTestFileQuad48kHz = webrtc::test::ResourcePath(
+ "audio_coding/speech_4_channels_48k_one_second",
+ "wav");
+};
+
+class AcmSenderBitExactnessNewApi : public AcmSenderBitExactnessOldApi {};
+
+// Run bit exactness tests only for release builds.
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
+ defined(NDEBUG) && defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
+ Run(/*audio_checksum_ref=*/"a3077ac01b0137e8bbc237fb1f9816a5",
+ /*payload_checksum_ref=*/"3c79f16f34218271f3dca4e2b1dfe1bb",
+ /*expected_packets=*/33,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
+ Run(/*audio_checksum_ref=*/"76da9b7514f986fc2bb32b1c3170e8d4",
+ /*payload_checksum_ref=*/"9e0a0ab743ad987b55b8e14802769c56",
+ /*expected_packets=*/16,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+// Run bit exactness test only for release build.
+#if defined(WEBRTC_CODEC_ISAC) && defined(NDEBUG) && defined(WEBRTC_LINUX) && \
+ defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessOldApi, IsacSwb30ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
+ Run(/*audio_checksum_ref=*/"f4cf577f28a0dcbac33358b757518e0c",
+ /*payload_checksum_ref=*/"ce86106a93419aefb063097108ec94ab",
+ /*expected_packets=*/33,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+ Run(/*audio_checksum_ref=*/"69118ed438ac76252d023e0463819471",
+ /*payload_checksum_ref=*/"c1edd36339ce0326cc4550041ad719a0",
+ /*expected_packets=*/100,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_16000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 1, 108, 160, 160));
+ Run(/*audio_checksum_ref=*/"bc6ab94d12a464921763d7544fdbd07e",
+ /*payload_checksum_ref=*/"ad786526383178b08d80d6eee06e9bad",
+ /*expected_packets=*/100,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_32000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 1, 109, 320, 320));
+ Run(/*audio_checksum_ref=*/"c50244419c5c3a2f04cc69a022c266a2",
+ /*payload_checksum_ref=*/"5ef82ea885e922263606c6fdbc49f651",
+ /*expected_packets=*/100,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_8000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 2, 111, 80, 80));
+ Run(/*audio_checksum_ref=*/"4fccf4cc96f1e8e8de4b9fadf62ded9e",
+ /*payload_checksum_ref=*/"62ce5adb0d4965d0a52ec98ae7f98974",
+ /*expected_packets=*/100,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_16000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 2, 112, 160, 160));
+ Run(/*audio_checksum_ref=*/"e15e388d9d4af8c02a59fe1552fedee3",
+ /*payload_checksum_ref=*/"41ca8edac4b8c71cd54fd9f25ec14870",
+ /*expected_packets=*/100,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_32000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 2, 113, 320, 320));
+ Run(/*audio_checksum_ref=*/"b240520c0d05003fde7a174ae5957286",
+ /*payload_checksum_ref=*/"50e58502fb04421bf5b857dda4c96879",
+ /*expected_packets=*/100,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 1, 0, 160, 160));
+ Run(/*audio_checksum_ref=*/"c8d1fc677f33c2022ec5f83c7f302280",
+ /*payload_checksum_ref=*/"8f9b8750bd80fe26b6cbf6659b89f0f9",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 1, 8, 160, 160));
+ Run(/*audio_checksum_ref=*/"47eb60e855eb12d1b0e6da9c975754a4",
+ /*payload_checksum_ref=*/"6ad745e55aa48981bfc790d0eeef2dd1",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 2, 110, 160, 160));
+ Run(/*audio_checksum_ref=*/"6ef2f57d4934714787fd0a834e3ea18e",
+ /*payload_checksum_ref=*/"60b6f25e8d1e74cb679cfe756dd9bca5",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 2, 118, 160, 160));
+ Run(/*audio_checksum_ref=*/"a84d75e098d87ab6b260687eb4b612a2",
+ /*payload_checksum_ref=*/"92b282c83efd20e7eeef52ba40842cf7",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+#if defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_LINUX) && \
+ defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessOldApi, Ilbc_30ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
+ Run(/*audio_checksum_ref=*/"b14dba0de36efa5ec88a32c0b320b70f",
+ /*payload_checksum_ref=*/"cfae2e9f6aba96e145f2bcdd5050ce78",
+ /*expected_packets=*/33,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessOldApi, G722_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
+ Run(/*audio_checksum_ref=*/"a87a91ec0124510a64967f5d768554ff",
+ /*payload_checksum_ref=*/"fc68a87e1380614e658087cb35d5ca10",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessOldApi, G722_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
+ Run(/*audio_checksum_ref=*/"be0b8528ff9db3a2219f55ddd36faf7f",
+ /*payload_checksum_ref=*/"66516152eeaa1e650ad94ff85f668dac",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+#endif
+
+namespace {
+// Checksum depends on libopus being compiled with or without SSE.
+const std::string audio_checksum =
+ "6a76fe2ffba057c06eb63239b3c47abe"
+ "|0c4f9d33b4a7379a34ee0c0d5718afe6";
+const std::string payload_checksum =
+ "b43bdf7638b2bc2a5a6f30bdc640b9ed"
+ "|c30d463e7ed10bdd1da9045f80561f27";
+} // namespace
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+ Run(audio_checksum, payload_checksum, /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+#endif
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms) {
+ const auto config = AudioEncoderOpus::SdpToConfig(
+ SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}}));
+ ASSERT_TRUE(SetUpSender(kTestFileFakeStereo32kHz, 32000));
+ ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(
+ AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120));
+ Run(audio_checksum, payload_checksum, /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+#endif
+
+// TODO(webrtc:8649): Disabled until the Encoder counterpart of
+// https://webrtc-review.googlesource.com/c/src/+/129768 lands.
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusManyChannels) {
+ constexpr int kNumChannels = 4;
+ constexpr int kOpusPayloadType = 120;
+
+ // Read a 4 channel file at 48kHz.
+ ASSERT_TRUE(SetUpSender(kTestFileQuad48kHz, 48000));
+
+ const auto sdp_format = SdpAudioFormat("multiopus", 48000, kNumChannels,
+ {{"channel_mapping", "0,1,2,3"},
+ {"coupled_streams", "2"},
+ {"num_streams", "2"}});
+ const auto encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ ASSERT_TRUE(encoder_config.has_value());
+
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTestExternalEncoder(AudioEncoderMultiChannelOpus::MakeAudioEncoder(
+ *encoder_config, kOpusPayloadType),
+ kOpusPayloadType));
+
+ const auto decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+ const auto opus_decoder =
+ AudioDecoderMultiChannelOpus::MakeAudioDecoder(*decoder_config);
+
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory =
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(opus_decoder.get());
+
+ // Set up an EXTERNAL DECODER to parse 4 channels.
+ Run("audio checksum check downstream|8051617907766bec5f4e4a4f7c6d5291",
+ "payload checksum check downstream|b09c52e44b2bdd9a0809e3a5b1623a76",
+ /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kQuadOutput,
+ decoder_factory);
+}
+#endif
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
+TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms_voip) {
+ auto config = AudioEncoderOpus::SdpToConfig(
+ SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}}));
+ // If not set, default will be kAudio in case of stereo.
+ config->application = AudioEncoderOpusConfig::ApplicationMode::kVoip;
+ ASSERT_TRUE(SetUpSender(kTestFileFakeStereo32kHz, 32000));
+ ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(
+ AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120));
+ const std::string audio_maybe_sse =
+ "1010e60ad34cee73c939edaf563d0593"
+ "|ca54661b220cc35239c6864ab858d29a";
+ const std::string payload_maybe_sse =
+ "ea48d94e43217793af9b7e15ece94e54"
+ "|eb0752ce1b6f2436fefc2e19bd084fb5";
+ Run(audio_maybe_sse, payload_maybe_sse, /*expected_packets=*/50,
+ /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput);
+}
+#endif
+
+// This test is for verifying the SetBitRate function. The bitrate is changed at
+// the beginning, and the number of generated bytes are checked.
+class AcmSetBitRateTest : public ::testing::Test {
+ protected:
+ static const int kTestDurationMs = 1000;
+
+ // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+ // false.
+ bool SetUpSender() {
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ // Note that `audio_source_` will loop forever. The test duration is set
+ // explicitly by `kTestDurationMs`.
+ audio_source_.reset(new test::InputAudioFile(input_file_name));
+ static const int kSourceRateHz = 32000;
+ send_test_.reset(new test::AcmSendTestOldApi(
+ audio_source_.get(), kSourceRateHz, kTestDurationMs));
+ return send_test_.get();
+ }
+
+ // Registers a send codec in the test::AcmSendTest object. Returns true on
+ // success, false on failure.
+ virtual bool RegisterSendCodec(absl::string_view payload_name,
+ int sampling_freq_hz,
+ int channels,
+ int payload_type,
+ int frame_size_samples,
+ int frame_size_rtp_timestamps) {
+ return send_test_->RegisterCodec(payload_name, sampling_freq_hz, channels,
+ payload_type, frame_size_samples);
+ }
+
+ void RegisterExternalSendCodec(
+ std::unique_ptr<AudioEncoder> external_speech_encoder,
+ int payload_type) {
+ send_test_->RegisterExternalCodec(std::move(external_speech_encoder));
+ }
+
+ void RunInner(int min_expected_total_bits, int max_expected_total_bits) {
+ int nr_bytes = 0;
+ while (std::unique_ptr<test::Packet> next_packet =
+ send_test_->NextPacket()) {
+ nr_bytes += rtc::checked_cast<int>(next_packet->payload_length_bytes());
+ }
+ EXPECT_LE(min_expected_total_bits, nr_bytes * 8);
+ EXPECT_GE(max_expected_total_bits, nr_bytes * 8);
+ }
+
+ void SetUpTest(absl::string_view codec_name,
+ int codec_sample_rate_hz,
+ int channels,
+ int payload_type,
+ int codec_frame_size_samples,
+ int codec_frame_size_rtp_timestamps) {
+ ASSERT_TRUE(SetUpSender());
+ ASSERT_TRUE(RegisterSendCodec(codec_name, codec_sample_rate_hz, channels,
+ payload_type, codec_frame_size_samples,
+ codec_frame_size_rtp_timestamps));
+ }
+
+ std::unique_ptr<test::AcmSendTestOldApi> send_test_;
+ std::unique_ptr<test::InputAudioFile> audio_source_;
+};
+
+class AcmSetBitRateNewApi : public AcmSetBitRateTest {
+ protected:
+ // Runs the test. SetUpSender() must have been called and a codec must be set
+ // up before calling this method.
+ void Run(int min_expected_total_bits, int max_expected_total_bits) {
+ RunInner(min_expected_total_bits, max_expected_total_bits);
+ }
+};
+
+TEST_F(AcmSetBitRateNewApi, OpusFromFormat_48khz_20ms_10kbps) {
+ const auto config = AudioEncoderOpus::SdpToConfig(
+ SdpAudioFormat("opus", 48000, 2, {{"maxaveragebitrate", "10000"}}));
+ ASSERT_TRUE(SetUpSender());
+ RegisterExternalSendCodec(AudioEncoderOpus::MakeAudioEncoder(*config, 107),
+ 107);
+ RunInner(7000, 12000);
+}
+
+TEST_F(AcmSetBitRateNewApi, OpusFromFormat_48khz_20ms_50kbps) {
+ const auto config = AudioEncoderOpus::SdpToConfig(
+ SdpAudioFormat("opus", 48000, 2, {{"maxaveragebitrate", "50000"}}));
+ ASSERT_TRUE(SetUpSender());
+ RegisterExternalSendCodec(AudioEncoderOpus::MakeAudioEncoder(*config, 107),
+ 107);
+ RunInner(40000, 60000);
+}
+
+// Verify that it works when the data to send is mono and the encoder is set to
+// send surround audio.
+TEST_F(AudioCodingModuleTestOldApi, SendingMultiChannelForMonoInput) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kSamplesPerChannel = kSampleRateHz * 10 / 1000;
+
+ audio_format_ = SdpAudioFormat({"multiopus",
+ kSampleRateHz,
+ 6,
+ {{"minptime", "10"},
+ {"useinbandfec", "1"},
+ {"channel_mapping", "0,4,1,2,3,5"},
+ {"num_streams", "4"},
+ {"coupled_streams", "2"}}});
+
+ RegisterCodec();
+
+ input_frame_.sample_rate_hz_ = kSampleRateHz;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = kSamplesPerChannel;
+ for (size_t k = 0; k < 10; ++k) {
+ ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+ input_frame_.timestamp_ += kSamplesPerChannel;
+ }
+}
+
+// Verify that it works when the data to send is stereo and the encoder is set
+// to send surround audio.
+TEST_F(AudioCodingModuleTestOldApi, SendingMultiChannelForStereoInput) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kSamplesPerChannel = (kSampleRateHz * 10) / 1000;
+
+ audio_format_ = SdpAudioFormat({"multiopus",
+ kSampleRateHz,
+ 6,
+ {{"minptime", "10"},
+ {"useinbandfec", "1"},
+ {"channel_mapping", "0,4,1,2,3,5"},
+ {"num_streams", "4"},
+ {"coupled_streams", "2"}}});
+
+ RegisterCodec();
+
+ input_frame_.sample_rate_hz_ = kSampleRateHz;
+ input_frame_.num_channels_ = 2;
+ input_frame_.samples_per_channel_ = kSamplesPerChannel;
+ for (size_t k = 0; k < 10; ++k) {
+ ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+ input_frame_.timestamp_ += kSamplesPerChannel;
+ }
+}
+
+// Verify that it works when the data to send is mono and the encoder is set to
+// send stereo audio.
+TEST_F(AudioCodingModuleTestOldApi, SendingStereoForMonoInput) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kSamplesPerChannel = (kSampleRateHz * 10) / 1000;
+
+ audio_format_ = SdpAudioFormat("L16", kSampleRateHz, 2);
+
+ RegisterCodec();
+
+ input_frame_.sample_rate_hz_ = kSampleRateHz;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = kSamplesPerChannel;
+ for (size_t k = 0; k < 10; ++k) {
+ ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+ input_frame_.timestamp_ += kSamplesPerChannel;
+ }
+}
+
+// Verify that it works when the data to send is stereo and the encoder is set
+// to send mono audio.
+TEST_F(AudioCodingModuleTestOldApi, SendingMonoForStereoInput) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kSamplesPerChannel = (kSampleRateHz * 10) / 1000;
+
+ audio_format_ = SdpAudioFormat("L16", kSampleRateHz, 1);
+
+ RegisterCodec();
+
+ input_frame_.sample_rate_hz_ = kSampleRateHz;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = kSamplesPerChannel;
+ for (size_t k = 0; k < 10; ++k) {
+ ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+ input_frame_.timestamp_ += kSamplesPerChannel;
+ }
+}
+
+// The result on the Android platforms is inconsistent for this test case.
+// On android_rel the result is different from android and android arm64 rel.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_OpusFromFormat_48khz_20ms_100kbps \
+ DISABLED_OpusFromFormat_48khz_20ms_100kbps
+#else
+#define MAYBE_OpusFromFormat_48khz_20ms_100kbps \
+ OpusFromFormat_48khz_20ms_100kbps
+#endif
+TEST_F(AcmSetBitRateNewApi, MAYBE_OpusFromFormat_48khz_20ms_100kbps) {
+ const auto config = AudioEncoderOpus::SdpToConfig(
+ SdpAudioFormat("opus", 48000, 2, {{"maxaveragebitrate", "100000"}}));
+ ASSERT_TRUE(SetUpSender());
+ RegisterExternalSendCodec(AudioEncoderOpus::MakeAudioEncoder(*config, 107),
+ 107);
+ RunInner(80000, 120000);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
+ AudioEncoderPcmU::Config config;
+ config.frame_size_ms = 20;
+ config.num_channels = 1;
+ config.payload_type = 0;
+ AudioEncoderPcmU encoder(config);
+ auto mock_encoder = std::make_unique<MockAudioEncoder>();
+ // Set expectations on the mock encoder and also delegate the calls to the
+ // real encoder.
+ EXPECT_CALL(*mock_encoder, SampleRateHz())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SampleRateHz));
+ EXPECT_CALL(*mock_encoder, NumChannels())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::NumChannels));
+ EXPECT_CALL(*mock_encoder, RtpTimestampRateHz())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::RtpTimestampRateHz));
+ EXPECT_CALL(*mock_encoder, Num10MsFramesInNextPacket())
+ .Times(AtLeast(1))
+ .WillRepeatedly(
+ Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
+ EXPECT_CALL(*mock_encoder, GetTargetBitrate())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));
+ EXPECT_CALL(*mock_encoder, EncodeImpl(_, _, _))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(
+ &encoder, static_cast<AudioEncoder::EncodedInfo (AudioEncoder::*)(
+ uint32_t, rtc::ArrayView<const int16_t>, rtc::Buffer*)>(
+ &AudioEncoderPcmU::Encode)));
+ ASSERT_TRUE(SetUpSender(kTestFileMono32kHz, 32000));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTestExternalEncoder(std::move(mock_encoder), config.payload_type));
+ Run("c8d1fc677f33c2022ec5f83c7f302280", "8f9b8750bd80fe26b6cbf6659b89f0f9",
+ 50, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+// This test fixture is implemented to run ACM and change the desired output
+// frequency during the call. The input packets are simply PCM16b-wb encoded
+// payloads with a constant value of `kSampleValue`. The test fixture itself
+// acts as PacketSource in between the receive test class and the constant-
+// payload packet source class. The output is both written to file, and analyzed
+// in this test fixture.
+class AcmSwitchingOutputFrequencyOldApi : public ::testing::Test,
+ public test::PacketSource,
+ public test::AudioSink {
+ protected:
+ static const size_t kTestNumPackets = 50;
+ static const int kEncodedSampleRateHz = 16000;
+ static const size_t kPayloadLenSamples = 30 * kEncodedSampleRateHz / 1000;
+ static const int kPayloadType = 108; // Default payload type for PCM16b-wb.
+
+ AcmSwitchingOutputFrequencyOldApi()
+ : first_output_(true),
+ num_packets_(0),
+ packet_source_(kPayloadLenSamples,
+ kSampleValue,
+ kEncodedSampleRateHz,
+ kPayloadType),
+ output_freq_2_(0),
+ has_toggled_(false) {}
+
+ void Run(int output_freq_1, int output_freq_2, int toggle_period_ms) {
+ // Set up the receiver used to decode the packets and verify the decoded
+ // output.
+ const std::string output_file_name =
+ webrtc::test::OutputPath() +
+ ::testing::UnitTest::GetInstance()
+ ->current_test_info()
+ ->test_case_name() +
+ "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+ "_output.pcm";
+ test::OutputAudioFile output_file(output_file_name);
+ // Have the output audio sent both to file and to the WriteArray method in
+ // this class.
+ test::AudioSinkFork output(this, &output_file);
+ test::AcmReceiveTestToggleOutputFreqOldApi receive_test(
+ this, &output, output_freq_1, output_freq_2, toggle_period_ms,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+ ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+ output_freq_2_ = output_freq_2;
+
+ // This is where the actual test is executed.
+ receive_test.Run();
+
+ // Delete output file.
+ remove(output_file_name.c_str());
+ }
+
+ // Inherited from test::PacketSource.
+ std::unique_ptr<test::Packet> NextPacket() override {
+ // Check if it is time to terminate the test. The packet source is of type
+ // ConstantPcmPacketSource, which is infinite, so we must end the test
+ // "manually".
+ if (num_packets_++ > kTestNumPackets) {
+ EXPECT_TRUE(has_toggled_);
+ return NULL; // Test ended.
+ }
+
+ // Get the next packet from the source.
+ return packet_source_.NextPacket();
+ }
+
+ // Inherited from test::AudioSink.
+ bool WriteArray(const int16_t* audio, size_t num_samples) override {
+ // Skip checking the first output frame, since it has a number of zeros
+ // due to how NetEq is initialized.
+ if (first_output_) {
+ first_output_ = false;
+ return true;
+ }
+ for (size_t i = 0; i < num_samples; ++i) {
+ EXPECT_EQ(kSampleValue, audio[i]);
+ }
+ if (num_samples ==
+ static_cast<size_t>(output_freq_2_ / 100)) // Size of 10 ms frame.
+ has_toggled_ = true;
+ // The return value does not say if the values match the expectation, just
+ // that the method could process the samples.
+ return true;
+ }
+
+ const int16_t kSampleValue = 1000;
+ bool first_output_;
+ size_t num_packets_;
+ test::ConstantPcmPacketSource packet_source_;
+ int output_freq_2_;
+ bool has_toggled_;
+};
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, TestWithoutToggling) {
+ Run(16000, 16000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle16KhzTo32Khz) {
+ Run(16000, 32000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle32KhzTo16Khz) {
+ Run(32000, 16000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle16KhzTo8Khz) {
+ Run(16000, 8000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle8KhzTo16Khz) {
+ Run(8000, 16000, 1000);
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.cc b/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.cc
new file mode 100644
index 0000000000..9f3bdadc88
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/call_statistics.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type,
+ bool muted) {
+ ++decoding_stat_.calls_to_neteq;
+ if (muted) {
+ ++decoding_stat_.decoded_muted_output;
+ }
+ switch (speech_type) {
+ case AudioFrame::kNormalSpeech: {
+ ++decoding_stat_.decoded_normal;
+ break;
+ }
+ case AudioFrame::kPLC: {
+ ++decoding_stat_.decoded_neteq_plc;
+ break;
+ }
+ case AudioFrame::kCodecPLC: {
+ ++decoding_stat_.decoded_codec_plc;
+ break;
+ }
+ case AudioFrame::kCNG: {
+ ++decoding_stat_.decoded_cng;
+ break;
+ }
+ case AudioFrame::kPLCCNG: {
+ ++decoding_stat_.decoded_plc_cng;
+ break;
+ }
+ case AudioFrame::kUndefined: {
+ // If the audio is decoded by NetEq, `kUndefined` is not an option.
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+}
+
+void CallStatistics::DecodedBySilenceGenerator() {
+ ++decoding_stat_.calls_to_silence_generator;
+}
+
+const AudioDecodingCallStats& CallStatistics::GetDecodingStatistics() const {
+ return decoding_stat_;
+}
+
+} // namespace acm2
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.h b/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.h
new file mode 100644
index 0000000000..a2db2a29f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+#define MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+
+#include "api/audio/audio_frame.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+
+//
+// This class is for book keeping of calls to ACM. It is not useful to log API
+// calls which are supposed to be called every 10ms, e.g. PlayoutData10Ms(),
+// however, it is useful to know the number of such calls in a given time
+// interval. The current implementation covers calls to PlayoutData10Ms() with
+// detailed accounting of the decoded speech type.
+//
+// Thread Safety
+// =============
+// Please note that this class in not thread safe. The class must be protected
+// if different APIs are called from different threads.
+//
+
+namespace webrtc {
+
+namespace acm2 {
+
+class CallStatistics {
+ public:
+ CallStatistics() {}
+ ~CallStatistics() {}
+
+ // Call this method to indicate that NetEq engaged in decoding. `speech_type`
+ // is the audio-type according to NetEq, and `muted` indicates if the decoded
+ // frame was produced in muted state.
+ void DecodedByNetEq(AudioFrame::SpeechType speech_type, bool muted);
+
+ // Call this method to indicate that a decoding call resulted in generating
+ // silence, i.e. call to NetEq is bypassed and the output audio is zero.
+ void DecodedBySilenceGenerator();
+
+ // Get statistics for decoding. The statistics include the number of calls to
+ // NetEq and silence generator, as well as the type of speech pulled of off
+ // NetEq, c.f. declaration of AudioDecodingCallStats for detailed description.
+ const AudioDecodingCallStats& GetDecodingStatistics() const;
+
+ private:
+ // Reset the decoding statistics.
+ void ResetDecodingStatistics();
+
+ AudioDecodingCallStats decoding_stat_;
+};
+
+} // namespace acm2
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics_unittest.cc b/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics_unittest.cc
new file mode 100644
index 0000000000..b96977b8e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics_unittest.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/call_statistics.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+TEST(CallStatisticsTest, InitializedZero) {
+ CallStatistics call_stats;
+ AudioDecodingCallStats stats;
+
+ stats = call_stats.GetDecodingStatistics();
+ EXPECT_EQ(0, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(0, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(0, stats.decoded_neteq_plc);
+ EXPECT_EQ(0, stats.decoded_plc_cng);
+ EXPECT_EQ(0, stats.decoded_muted_output);
+}
+
+TEST(CallStatisticsTest, AllCalls) {
+ CallStatistics call_stats;
+ AudioDecodingCallStats stats;
+
+ call_stats.DecodedBySilenceGenerator();
+ call_stats.DecodedByNetEq(AudioFrame::kNormalSpeech, false);
+ call_stats.DecodedByNetEq(AudioFrame::kPLC, false);
+ call_stats.DecodedByNetEq(AudioFrame::kCodecPLC, false);
+ call_stats.DecodedByNetEq(AudioFrame::kPLCCNG, true); // Let this be muted.
+ call_stats.DecodedByNetEq(AudioFrame::kCNG, false);
+
+ stats = call_stats.GetDecodingStatistics();
+ EXPECT_EQ(5, stats.calls_to_neteq);
+ EXPECT_EQ(1, stats.calls_to_silence_generator);
+ EXPECT_EQ(1, stats.decoded_normal);
+ EXPECT_EQ(1, stats.decoded_cng);
+ EXPECT_EQ(1, stats.decoded_neteq_plc);
+ EXPECT_EQ(1, stats.decoded_codec_plc);
+ EXPECT_EQ(1, stats.decoded_plc_cng);
+ EXPECT_EQ(1, stats.decoded_muted_output);
+}
+
+} // namespace acm2
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_coding.gni b/third_party/libwebrtc/modules/audio_coding/audio_coding.gni
new file mode 100644
index 0000000000..78460e6420
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_coding.gni
@@ -0,0 +1,30 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+audio_codec_defines = []
+if (rtc_include_ilbc) {
+ audio_codec_defines += [ "WEBRTC_CODEC_ILBC" ]
+}
+if (rtc_include_opus) {
+ audio_codec_defines += [ "WEBRTC_CODEC_OPUS" ]
+}
+if (rtc_opus_support_120ms_ptime) {
+ audio_codec_defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=1" ]
+} else {
+ audio_codec_defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=0" ]
+}
+if (target_cpu == "arm") {
+ audio_codec_defines += [ "WEBRTC_CODEC_ISACFX" ]
+} else {
+ audio_codec_defines += [ "WEBRTC_CODEC_ISAC" ]
+}
+
+audio_coding_defines = audio_codec_defines
+neteq_defines = audio_codec_defines
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_coding_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/audio_coding_gn/moz.build
new file mode 100644
index 0000000000..3d193a4a34
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_coding_gn/moz.build
@@ -0,0 +1,216 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc",
+ "/third_party/libwebrtc/modules/audio_coding/acm2/acm_remixing.cc",
+ "/third_party/libwebrtc/modules/audio_coding/acm2/acm_resampler.cc",
+ "/third_party/libwebrtc/modules/audio_coding/acm2/audio_coding_module.cc",
+ "/third_party/libwebrtc/modules/audio_coding/acm2/call_statistics.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_coding_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_coding_module_typedefs_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/audio_coding_module_typedefs_gn/moz.build
new file mode 100644
index 0000000000..9f3c0d8abd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_coding_module_typedefs_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_coding_module_typedefs_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_coding_opus_common_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/audio_coding_opus_common_gn/moz.build
new file mode 100644
index 0000000000..132dbaad9b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_coding_opus_common_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_coding_opus_common_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_encoder_cng_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/audio_encoder_cng_gn/moz.build
new file mode 100644
index 0000000000..725d37c027
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_encoder_cng_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_encoder_cng_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc
new file mode 100644
index 0000000000..16fd2a1b9a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+AudioEncoderRuntimeConfig::AudioEncoderRuntimeConfig() = default;
+
+AudioEncoderRuntimeConfig::AudioEncoderRuntimeConfig(
+ const AudioEncoderRuntimeConfig& other) = default;
+
+AudioEncoderRuntimeConfig::~AudioEncoderRuntimeConfig() = default;
+
+AudioEncoderRuntimeConfig& AudioEncoderRuntimeConfig::operator=(
+ const AudioEncoderRuntimeConfig& other) = default;
+
+bool AudioEncoderRuntimeConfig::operator==(
+ const AudioEncoderRuntimeConfig& other) const {
+ return bitrate_bps == other.bitrate_bps &&
+ frame_length_ms == other.frame_length_ms &&
+ uplink_packet_loss_fraction == other.uplink_packet_loss_fraction &&
+ enable_fec == other.enable_fec && enable_dtx == other.enable_dtx &&
+ num_channels == other.num_channels;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
new file mode 100644
index 0000000000..64163f9118
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h"
+
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "modules/audio_coding/audio_network_adaptor/event_log_writer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kEventLogMinBitrateChangeBps = 5000;
+constexpr float kEventLogMinBitrateChangeFraction = 0.25;
+constexpr float kEventLogMinPacketLossChangeFraction = 0.5;
+} // namespace
+
+AudioNetworkAdaptorImpl::Config::Config() : event_log(nullptr) {}
+
+AudioNetworkAdaptorImpl::Config::~Config() = default;
+
+AudioNetworkAdaptorImpl::AudioNetworkAdaptorImpl(
+ const Config& config,
+ std::unique_ptr<ControllerManager> controller_manager,
+ std::unique_ptr<DebugDumpWriter> debug_dump_writer)
+ : config_(config),
+ controller_manager_(std::move(controller_manager)),
+ debug_dump_writer_(std::move(debug_dump_writer)),
+ event_log_writer_(
+ config.event_log
+ ? new EventLogWriter(config.event_log,
+ kEventLogMinBitrateChangeBps,
+ kEventLogMinBitrateChangeFraction,
+ kEventLogMinPacketLossChangeFraction)
+ : nullptr) {
+ RTC_DCHECK(controller_manager_);
+}
+
+AudioNetworkAdaptorImpl::~AudioNetworkAdaptorImpl() = default;
+
+void AudioNetworkAdaptorImpl::SetUplinkBandwidth(int uplink_bandwidth_bps) {
+ last_metrics_.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ DumpNetworkMetrics();
+
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) {
+ last_metrics_.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+ DumpNetworkMetrics();
+
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+ UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetRtt(int rtt_ms) {
+ last_metrics_.rtt_ms = rtt_ms;
+ DumpNetworkMetrics();
+
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.rtt_ms = rtt_ms;
+ UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetTargetAudioBitrate(
+ int target_audio_bitrate_bps) {
+ last_metrics_.target_audio_bitrate_bps = target_audio_bitrate_bps;
+ DumpNetworkMetrics();
+
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.target_audio_bitrate_bps = target_audio_bitrate_bps;
+ UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetOverhead(size_t overhead_bytes_per_packet) {
+ last_metrics_.overhead_bytes_per_packet = overhead_bytes_per_packet;
+ DumpNetworkMetrics();
+
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+ UpdateNetworkMetrics(network_metrics);
+}
+
+AudioEncoderRuntimeConfig AudioNetworkAdaptorImpl::GetEncoderRuntimeConfig() {
+ AudioEncoderRuntimeConfig config;
+ for (auto& controller :
+ controller_manager_->GetSortedControllers(last_metrics_))
+ controller->MakeDecision(&config);
+
+ // Update ANA stats.
+ auto increment_opt = [](absl::optional<uint32_t>& a) {
+ a = a.value_or(0) + 1;
+ };
+ if (prev_config_) {
+ if (config.bitrate_bps != prev_config_->bitrate_bps) {
+ increment_opt(stats_.bitrate_action_counter);
+ }
+ if (config.enable_dtx != prev_config_->enable_dtx) {
+ increment_opt(stats_.dtx_action_counter);
+ }
+ if (config.enable_fec != prev_config_->enable_fec) {
+ increment_opt(stats_.fec_action_counter);
+ }
+ if (config.frame_length_ms && prev_config_->frame_length_ms) {
+ if (*config.frame_length_ms > *prev_config_->frame_length_ms) {
+ increment_opt(stats_.frame_length_increase_counter);
+ } else if (*config.frame_length_ms < *prev_config_->frame_length_ms) {
+ increment_opt(stats_.frame_length_decrease_counter);
+ }
+ }
+ if (config.num_channels != prev_config_->num_channels) {
+ increment_opt(stats_.channel_action_counter);
+ }
+ if (config.uplink_packet_loss_fraction) {
+ stats_.uplink_packet_loss_fraction = *config.uplink_packet_loss_fraction;
+ }
+ }
+ prev_config_ = config;
+
+ if (debug_dump_writer_)
+ debug_dump_writer_->DumpEncoderRuntimeConfig(config, rtc::TimeMillis());
+
+ if (event_log_writer_)
+ event_log_writer_->MaybeLogEncoderConfig(config);
+
+ return config;
+}
+
+void AudioNetworkAdaptorImpl::StartDebugDump(FILE* file_handle) {
+ debug_dump_writer_ = DebugDumpWriter::Create(file_handle);
+}
+
+void AudioNetworkAdaptorImpl::StopDebugDump() {
+ debug_dump_writer_.reset(nullptr);
+}
+
+ANAStats AudioNetworkAdaptorImpl::GetStats() const {
+ return stats_;
+}
+
+void AudioNetworkAdaptorImpl::DumpNetworkMetrics() {
+ if (debug_dump_writer_)
+ debug_dump_writer_->DumpNetworkMetrics(last_metrics_, rtc::TimeMillis());
+}
+
+void AudioNetworkAdaptorImpl::UpdateNetworkMetrics(
+ const Controller::NetworkMetrics& network_metrics) {
+ for (auto& controller : controller_manager_->GetControllers())
+ controller->UpdateNetworkMetrics(network_metrics);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
new file mode 100644
index 0000000000..664e76bda5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_AUDIO_NETWORK_ADAPTOR_IMPL_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_AUDIO_NETWORK_ADAPTOR_IMPL_H_
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+class ControllerManager;
+class EventLogWriter;
+class RtcEventLog;
+
+class AudioNetworkAdaptorImpl final : public AudioNetworkAdaptor {
+ public:
+ struct Config {
+ Config();
+ ~Config();
+ RtcEventLog* event_log;
+ };
+
+ AudioNetworkAdaptorImpl(
+ const Config& config,
+ std::unique_ptr<ControllerManager> controller_manager,
+ std::unique_ptr<DebugDumpWriter> debug_dump_writer = nullptr);
+
+ ~AudioNetworkAdaptorImpl() override;
+
+ AudioNetworkAdaptorImpl(const AudioNetworkAdaptorImpl&) = delete;
+ AudioNetworkAdaptorImpl& operator=(const AudioNetworkAdaptorImpl&) = delete;
+
+ void SetUplinkBandwidth(int uplink_bandwidth_bps) override;
+
+ void SetUplinkPacketLossFraction(float uplink_packet_loss_fraction) override;
+
+ void SetRtt(int rtt_ms) override;
+
+ void SetTargetAudioBitrate(int target_audio_bitrate_bps) override;
+
+ void SetOverhead(size_t overhead_bytes_per_packet) override;
+
+ AudioEncoderRuntimeConfig GetEncoderRuntimeConfig() override;
+
+ void StartDebugDump(FILE* file_handle) override;
+
+ void StopDebugDump() override;
+
+ ANAStats GetStats() const override;
+
+ private:
+ void DumpNetworkMetrics();
+
+ void UpdateNetworkMetrics(const Controller::NetworkMetrics& network_metrics);
+
+ const Config config_;
+
+ std::unique_ptr<ControllerManager> controller_manager_;
+
+ std::unique_ptr<DebugDumpWriter> debug_dump_writer_;
+
+ const std::unique_ptr<EventLogWriter> event_log_writer_;
+
+ Controller::NetworkMetrics last_metrics_;
+
+ absl::optional<AudioEncoderRuntimeConfig> prev_config_;
+
+ ANAStats stats_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_AUDIO_NETWORK_ADAPTOR_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
new file mode 100644
index 0000000000..70a50d6de7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/rtc_event_log/rtc_event.h"
+#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h"
+#include "rtc_base/fake_clock.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace {
+
+constexpr size_t kNumControllers = 2;
+
+constexpr int64_t kClockInitialTimeMs = 12345678;
+
+MATCHER_P(NetworkMetricsIs, metric, "") {
+ return arg.uplink_bandwidth_bps == metric.uplink_bandwidth_bps &&
+ arg.target_audio_bitrate_bps == metric.target_audio_bitrate_bps &&
+ arg.rtt_ms == metric.rtt_ms &&
+ arg.overhead_bytes_per_packet == metric.overhead_bytes_per_packet &&
+ arg.uplink_packet_loss_fraction == metric.uplink_packet_loss_fraction;
+}
+
+MATCHER_P(IsRtcEventAnaConfigEqualTo, config, "") {
+ if (arg->GetType() != RtcEvent::Type::AudioNetworkAdaptation) {
+ return false;
+ }
+ auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
+ return ana_event->config() == config;
+}
+
+MATCHER_P(EncoderRuntimeConfigIs, config, "") {
+ return arg.bitrate_bps == config.bitrate_bps &&
+ arg.frame_length_ms == config.frame_length_ms &&
+ arg.uplink_packet_loss_fraction ==
+ config.uplink_packet_loss_fraction &&
+ arg.enable_fec == config.enable_fec &&
+ arg.enable_dtx == config.enable_dtx &&
+ arg.num_channels == config.num_channels;
+}
+
+struct AudioNetworkAdaptorStates {
+ std::unique_ptr<AudioNetworkAdaptorImpl> audio_network_adaptor;
+ std::vector<std::unique_ptr<MockController>> mock_controllers;
+ std::unique_ptr<MockRtcEventLog> event_log;
+ MockDebugDumpWriter* mock_debug_dump_writer;
+};
+
+AudioNetworkAdaptorStates CreateAudioNetworkAdaptor() {
+ AudioNetworkAdaptorStates states;
+ std::vector<Controller*> controllers;
+ for (size_t i = 0; i < kNumControllers; ++i) {
+ auto controller =
+ std::unique_ptr<MockController>(new NiceMock<MockController>());
+ EXPECT_CALL(*controller, Die());
+ controllers.push_back(controller.get());
+ states.mock_controllers.push_back(std::move(controller));
+ }
+
+ auto controller_manager = std::unique_ptr<MockControllerManager>(
+ new NiceMock<MockControllerManager>());
+
+ EXPECT_CALL(*controller_manager, Die());
+ EXPECT_CALL(*controller_manager, GetControllers())
+ .WillRepeatedly(Return(controllers));
+ EXPECT_CALL(*controller_manager, GetSortedControllers(_))
+ .WillRepeatedly(Return(controllers));
+
+ states.event_log.reset(new NiceMock<MockRtcEventLog>());
+
+ auto debug_dump_writer =
+ std::unique_ptr<MockDebugDumpWriter>(new NiceMock<MockDebugDumpWriter>());
+ EXPECT_CALL(*debug_dump_writer, Die());
+ states.mock_debug_dump_writer = debug_dump_writer.get();
+
+ AudioNetworkAdaptorImpl::Config config;
+ config.event_log = states.event_log.get();
+ // AudioNetworkAdaptorImpl governs the lifetime of controller manager.
+ states.audio_network_adaptor.reset(new AudioNetworkAdaptorImpl(
+ config, std::move(controller_manager), std::move(debug_dump_writer)));
+
+ return states;
+}
+
+void SetExpectCallToUpdateNetworkMetrics(
+ const std::vector<std::unique_ptr<MockController>>& controllers,
+ const Controller::NetworkMetrics& check) {
+ for (auto& mock_controller : controllers) {
+ EXPECT_CALL(*mock_controller,
+ UpdateNetworkMetrics(NetworkMetricsIs(check)));
+ }
+}
+
+} // namespace
+
+TEST(AudioNetworkAdaptorImplTest,
+ UpdateNetworkMetricsIsCalledOnSetUplinkBandwidth) {
+ auto states = CreateAudioNetworkAdaptor();
+ constexpr int kBandwidth = 16000;
+ Controller::NetworkMetrics check;
+ check.uplink_bandwidth_bps = kBandwidth;
+ SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+ states.audio_network_adaptor->SetUplinkBandwidth(kBandwidth);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+ UpdateNetworkMetricsIsCalledOnSetUplinkPacketLossFraction) {
+ auto states = CreateAudioNetworkAdaptor();
+ constexpr float kPacketLoss = 0.7f;
+ Controller::NetworkMetrics check;
+ check.uplink_packet_loss_fraction = kPacketLoss;
+ SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+ states.audio_network_adaptor->SetUplinkPacketLossFraction(kPacketLoss);
+}
+
+TEST(AudioNetworkAdaptorImplTest, UpdateNetworkMetricsIsCalledOnSetRtt) {
+ auto states = CreateAudioNetworkAdaptor();
+ constexpr int kRtt = 100;
+ Controller::NetworkMetrics check;
+ check.rtt_ms = kRtt;
+ SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+ states.audio_network_adaptor->SetRtt(kRtt);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+ UpdateNetworkMetricsIsCalledOnSetTargetAudioBitrate) {
+ auto states = CreateAudioNetworkAdaptor();
+ constexpr int kTargetAudioBitrate = 15000;
+ Controller::NetworkMetrics check;
+ check.target_audio_bitrate_bps = kTargetAudioBitrate;
+ SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+ states.audio_network_adaptor->SetTargetAudioBitrate(kTargetAudioBitrate);
+}
+
+TEST(AudioNetworkAdaptorImplTest, UpdateNetworkMetricsIsCalledOnSetOverhead) {
+ auto states = CreateAudioNetworkAdaptor();
+ constexpr size_t kOverhead = 64;
+ Controller::NetworkMetrics check;
+ check.overhead_bytes_per_packet = kOverhead;
+ SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+ states.audio_network_adaptor->SetOverhead(kOverhead);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+ MakeDecisionIsCalledOnGetEncoderRuntimeConfig) {
+ auto states = CreateAudioNetworkAdaptor();
+ for (auto& mock_controller : states.mock_controllers)
+ EXPECT_CALL(*mock_controller, MakeDecision(_));
+ states.audio_network_adaptor->GetEncoderRuntimeConfig();
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+ DumpEncoderRuntimeConfigIsCalledOnGetEncoderRuntimeConfig) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Audio-BitrateAdaptation/Enabled/WebRTC-Audio-FecAdaptation/"
+ "Enabled/");
+ rtc::ScopedFakeClock fake_clock;
+ fake_clock.AdvanceTime(TimeDelta::Millis(kClockInitialTimeMs));
+ auto states = CreateAudioNetworkAdaptor();
+ AudioEncoderRuntimeConfig config;
+ config.bitrate_bps = 32000;
+ config.enable_fec = true;
+
+ EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+ .WillOnce(SetArgPointee<0>(config));
+
+ EXPECT_CALL(*states.mock_debug_dump_writer,
+ DumpEncoderRuntimeConfig(EncoderRuntimeConfigIs(config),
+ kClockInitialTimeMs));
+ states.audio_network_adaptor->GetEncoderRuntimeConfig();
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+ DumpNetworkMetricsIsCalledOnSetNetworkMetrics) {
+ rtc::ScopedFakeClock fake_clock;
+ fake_clock.AdvanceTime(TimeDelta::Millis(kClockInitialTimeMs));
+
+ auto states = CreateAudioNetworkAdaptor();
+
+ constexpr int kBandwidth = 16000;
+ constexpr float kPacketLoss = 0.7f;
+ constexpr int kRtt = 100;
+ constexpr int kTargetAudioBitrate = 15000;
+ constexpr size_t kOverhead = 64;
+
+ Controller::NetworkMetrics check;
+ check.uplink_bandwidth_bps = kBandwidth;
+ int64_t timestamp_check = kClockInitialTimeMs;
+
+ EXPECT_CALL(*states.mock_debug_dump_writer,
+ DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+ states.audio_network_adaptor->SetUplinkBandwidth(kBandwidth);
+
+ fake_clock.AdvanceTime(TimeDelta::Millis(100));
+ timestamp_check += 100;
+ check.uplink_packet_loss_fraction = kPacketLoss;
+ EXPECT_CALL(*states.mock_debug_dump_writer,
+ DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+ states.audio_network_adaptor->SetUplinkPacketLossFraction(kPacketLoss);
+
+ fake_clock.AdvanceTime(TimeDelta::Millis(50));
+ timestamp_check += 50;
+
+ fake_clock.AdvanceTime(TimeDelta::Millis(200));
+ timestamp_check += 200;
+ check.rtt_ms = kRtt;
+ EXPECT_CALL(*states.mock_debug_dump_writer,
+ DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+ states.audio_network_adaptor->SetRtt(kRtt);
+
+ fake_clock.AdvanceTime(TimeDelta::Millis(150));
+ timestamp_check += 150;
+ check.target_audio_bitrate_bps = kTargetAudioBitrate;
+ EXPECT_CALL(*states.mock_debug_dump_writer,
+ DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+ states.audio_network_adaptor->SetTargetAudioBitrate(kTargetAudioBitrate);
+
+ fake_clock.AdvanceTime(TimeDelta::Millis(50));
+ timestamp_check += 50;
+ check.overhead_bytes_per_packet = kOverhead;
+ EXPECT_CALL(*states.mock_debug_dump_writer,
+ DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+ states.audio_network_adaptor->SetOverhead(kOverhead);
+}
+
+TEST(AudioNetworkAdaptorImplTest, LogRuntimeConfigOnGetEncoderRuntimeConfig) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Audio-BitrateAdaptation/Enabled/WebRTC-Audio-FecAdaptation/"
+ "Enabled/");
+ auto states = CreateAudioNetworkAdaptor();
+
+ AudioEncoderRuntimeConfig config;
+ config.bitrate_bps = 32000;
+ config.enable_fec = true;
+
+ EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+ .WillOnce(SetArgPointee<0>(config));
+
+ EXPECT_CALL(*states.event_log, LogProxy(IsRtcEventAnaConfigEqualTo(config)))
+ .Times(1);
+ states.audio_network_adaptor->GetEncoderRuntimeConfig();
+}
+
+TEST(AudioNetworkAdaptorImplTest, TestANAStats) {
+ auto states = CreateAudioNetworkAdaptor();
+
+ // Simulate some adaptation, otherwise the stats will not show anything.
+ AudioEncoderRuntimeConfig config1, config2;
+ config1.bitrate_bps = 32000;
+ config1.num_channels = 2;
+ config1.enable_fec = true;
+ config1.enable_dtx = true;
+ config1.frame_length_ms = 120;
+ config1.uplink_packet_loss_fraction = 0.1f;
+ config2.bitrate_bps = 16000;
+ config2.num_channels = 1;
+ config2.enable_fec = false;
+ config2.enable_dtx = false;
+ config2.frame_length_ms = 60;
+ config1.uplink_packet_loss_fraction = 0.1f;
+
+ EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+ .WillOnce(SetArgPointee<0>(config1));
+ states.audio_network_adaptor->GetEncoderRuntimeConfig();
+ EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+ .WillOnce(SetArgPointee<0>(config2));
+ states.audio_network_adaptor->GetEncoderRuntimeConfig();
+ EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+ .WillOnce(SetArgPointee<0>(config1));
+ states.audio_network_adaptor->GetEncoderRuntimeConfig();
+
+ auto ana_stats = states.audio_network_adaptor->GetStats();
+
+ EXPECT_EQ(ana_stats.bitrate_action_counter, 2u);
+ EXPECT_EQ(ana_stats.channel_action_counter, 2u);
+ EXPECT_EQ(ana_stats.dtx_action_counter, 2u);
+ EXPECT_EQ(ana_stats.fec_action_counter, 2u);
+ EXPECT_EQ(ana_stats.frame_length_increase_counter, 1u);
+ EXPECT_EQ(ana_stats.frame_length_decrease_counter, 1u);
+ EXPECT_EQ(ana_stats.uplink_packet_loss_fraction, 0.1f);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
new file mode 100644
index 0000000000..88ca38d074
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/bitrate_controller.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace audio_network_adaptor {
+
+BitrateController::Config::Config(int initial_bitrate_bps,
+ int initial_frame_length_ms,
+ int fl_increase_overhead_offset,
+ int fl_decrease_overhead_offset)
+ : initial_bitrate_bps(initial_bitrate_bps),
+ initial_frame_length_ms(initial_frame_length_ms),
+ fl_increase_overhead_offset(fl_increase_overhead_offset),
+ fl_decrease_overhead_offset(fl_decrease_overhead_offset) {}
+
+BitrateController::Config::~Config() = default;
+
+BitrateController::BitrateController(const Config& config)
+ : config_(config),
+ bitrate_bps_(config_.initial_bitrate_bps),
+ frame_length_ms_(config_.initial_frame_length_ms) {
+ RTC_DCHECK_GT(bitrate_bps_, 0);
+ RTC_DCHECK_GT(frame_length_ms_, 0);
+}
+
+BitrateController::~BitrateController() = default;
+
+void BitrateController::UpdateNetworkMetrics(
+ const NetworkMetrics& network_metrics) {
+ if (network_metrics.target_audio_bitrate_bps)
+ target_audio_bitrate_bps_ = network_metrics.target_audio_bitrate_bps;
+ if (network_metrics.overhead_bytes_per_packet) {
+ RTC_DCHECK_GT(*network_metrics.overhead_bytes_per_packet, 0);
+ overhead_bytes_per_packet_ = network_metrics.overhead_bytes_per_packet;
+ }
+}
+
+void BitrateController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+ // Decision on `bitrate_bps` should not have been made.
+ RTC_DCHECK(!config->bitrate_bps);
+ if (target_audio_bitrate_bps_ && overhead_bytes_per_packet_) {
+ if (config->frame_length_ms)
+ frame_length_ms_ = *config->frame_length_ms;
+ int offset = config->last_fl_change_increase
+ ? config_.fl_increase_overhead_offset
+ : config_.fl_decrease_overhead_offset;
+ // Check that
+ // -(*overhead_bytes_per_packet_) <= offset <= (*overhead_bytes_per_packet_)
+ RTC_DCHECK_GE(*overhead_bytes_per_packet_, -offset);
+ RTC_DCHECK_LE(offset, *overhead_bytes_per_packet_);
+ int overhead_rate_bps = static_cast<int>(
+ (*overhead_bytes_per_packet_ + offset) * 8 * 1000 / frame_length_ms_);
+ bitrate_bps_ = std::max(0, *target_audio_bitrate_bps_ - overhead_rate_bps);
+ }
+ config->bitrate_bps = bitrate_bps_;
+}
+
+} // namespace audio_network_adaptor
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.h
new file mode 100644
index 0000000000..c1032146cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_BITRATE_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_BITRATE_CONTROLLER_H_
+
+#include <stddef.h>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+namespace audio_network_adaptor {
+
+class BitrateController final : public Controller {
+ public:
+ struct Config {
+ Config(int initial_bitrate_bps,
+ int initial_frame_length_ms,
+ int fl_increase_overhead_offset,
+ int fl_decrease_overhead_offset);
+ ~Config();
+ int initial_bitrate_bps;
+ int initial_frame_length_ms;
+ int fl_increase_overhead_offset;
+ int fl_decrease_overhead_offset;
+ };
+
+ explicit BitrateController(const Config& config);
+
+ ~BitrateController() override;
+
+ BitrateController(const BitrateController&) = delete;
+ BitrateController& operator=(const BitrateController&) = delete;
+
+ void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+ void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+ const Config config_;
+ int bitrate_bps_;
+ int frame_length_ms_;
+ absl::optional<int> target_audio_bitrate_bps_;
+ absl::optional<size_t> overhead_bytes_per_packet_;
+};
+
+} // namespace audio_network_adaptor
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_BITRATE_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
new file mode 100644
index 0000000000..3155f198a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/bitrate_controller.h"
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace audio_network_adaptor {
+
+namespace {
+
+void UpdateNetworkMetrics(
+ BitrateController* controller,
+ const absl::optional<int>& target_audio_bitrate_bps,
+ const absl::optional<size_t>& overhead_bytes_per_packet) {
+ // UpdateNetworkMetrics can accept multiple network metric updates at once.
+ // However, currently, the most used case is to update one metric at a time.
+ // To reflect this fact, we separate the calls.
+ if (target_audio_bitrate_bps) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.target_audio_bitrate_bps = target_audio_bitrate_bps;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+ if (overhead_bytes_per_packet) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+}
+
+void CheckDecision(BitrateController* controller,
+ const absl::optional<int>& frame_length_ms,
+ int expected_bitrate_bps) {
+ AudioEncoderRuntimeConfig config;
+ config.frame_length_ms = frame_length_ms;
+ controller->MakeDecision(&config);
+ EXPECT_EQ(expected_bitrate_bps, config.bitrate_bps);
+}
+
+} // namespace
+
+// These tests are named AnaBitrateControllerTest to distinguish from
+// BitrateControllerTest in
+// modules/bitrate_controller/bitrate_controller_unittest.cc.
+
+TEST(AnaBitrateControllerTest, OutputInitValueWhenTargetBitrateUnknown) {
+ constexpr int kInitialBitrateBps = 32000;
+ constexpr int kInitialFrameLengthMs = 20;
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ BitrateController controller(BitrateController::Config(
+ kInitialBitrateBps, kInitialFrameLengthMs, 0, 0));
+ UpdateNetworkMetrics(&controller, absl::nullopt, kOverheadBytesPerPacket);
+ CheckDecision(&controller, kInitialFrameLengthMs * 2, kInitialBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, OutputInitValueWhenOverheadUnknown) {
+ constexpr int kInitialBitrateBps = 32000;
+ constexpr int kInitialFrameLengthMs = 20;
+ constexpr int kTargetBitrateBps = 48000;
+ BitrateController controller(BitrateController::Config(
+ kInitialBitrateBps, kInitialFrameLengthMs, 0, 0));
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, absl::nullopt);
+ CheckDecision(&controller, kInitialFrameLengthMs * 2, kInitialBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, ChangeBitrateOnTargetBitrateChanged) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ constexpr int kInitialFrameLengthMs = 20;
+ BitrateController controller(
+ BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+ constexpr int kTargetBitrateBps = 48000;
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ constexpr int kBitrateBps = kTargetBitrateBps - kOverheadBytesPerPacket * 8 *
+ 1000 /
+ kInitialFrameLengthMs;
+ // Frame length unchanged, bitrate changes in accordance with
+ // `metrics.target_audio_bitrate_bps` and `metrics.overhead_bytes_per_packet`.
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, kInitialFrameLengthMs, kBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, UpdateMultipleNetworkMetricsAtOnce) {
+ // This test is similar to ChangeBitrateOnTargetBitrateChanged. But instead of
+ // using ::UpdateNetworkMetrics(...), which calls
+ // BitrateController::UpdateNetworkMetrics(...) multiple times, we
+ // we call it only once. This is to verify that
+ // BitrateController::UpdateNetworkMetrics(...) can handle multiple
+ // network updates at once. This is, however, not a common use case in current
+ // audio_network_adaptor_impl.cc.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ constexpr int kInitialFrameLengthMs = 20;
+ BitrateController controller(
+ BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+ constexpr int kTargetBitrateBps = 48000;
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ constexpr int kBitrateBps = kTargetBitrateBps - kOverheadBytesPerPacket * 8 *
+ 1000 /
+ kInitialFrameLengthMs;
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.target_audio_bitrate_bps = kTargetBitrateBps;
+ network_metrics.overhead_bytes_per_packet = kOverheadBytesPerPacket;
+ controller.UpdateNetworkMetrics(network_metrics);
+ CheckDecision(&controller, kInitialFrameLengthMs, kBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, TreatUnknownFrameLengthAsFrameLengthUnchanged) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ constexpr int kInitialFrameLengthMs = 20;
+ BitrateController controller(
+ BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+ constexpr int kTargetBitrateBps = 48000;
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ constexpr int kBitrateBps = kTargetBitrateBps - kOverheadBytesPerPacket * 8 *
+ 1000 /
+ kInitialFrameLengthMs;
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, absl::nullopt, kBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, IncreaseBitrateOnFrameLengthIncreased) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ constexpr int kInitialFrameLengthMs = 20;
+ BitrateController controller(
+ BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+
+ constexpr int kTargetBitrateBps = 48000;
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ constexpr int kBitrateBps = kTargetBitrateBps - kOverheadBytesPerPacket * 8 *
+ 1000 /
+ kInitialFrameLengthMs;
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, absl::nullopt, kBitrateBps);
+
+ constexpr int kFrameLengthMs = 60;
+ constexpr size_t kPacketOverheadRateDiff =
+ kOverheadBytesPerPacket * 8 * 1000 / 20 -
+ kOverheadBytesPerPacket * 8 * 1000 / 60;
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, kFrameLengthMs,
+ kBitrateBps + kPacketOverheadRateDiff);
+}
+
+TEST(AnaBitrateControllerTest, DecreaseBitrateOnFrameLengthDecreased) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ constexpr int kInitialFrameLengthMs = 60;
+ BitrateController controller(
+ BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+
+ constexpr int kTargetBitrateBps = 48000;
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ constexpr int kBitrateBps = kTargetBitrateBps - kOverheadBytesPerPacket * 8 *
+ 1000 /
+ kInitialFrameLengthMs;
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, absl::nullopt, kBitrateBps);
+
+ constexpr int kFrameLengthMs = 20;
+ constexpr size_t kPacketOverheadRateDiff =
+ kOverheadBytesPerPacket * 8 * 1000 / 20 -
+ kOverheadBytesPerPacket * 8 * 1000 / 60;
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, kFrameLengthMs,
+ kBitrateBps - kPacketOverheadRateDiff);
+}
+
+TEST(AnaBitrateControllerTest, BitrateNeverBecomesNegative) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ BitrateController controller(BitrateController::Config(32000, 20, 0, 0));
+ constexpr size_t kOverheadBytesPerPacket = 64;
+ constexpr int kFrameLengthMs = 60;
+ // Set a target rate smaller than overhead rate, the bitrate is bounded by 0.
+ constexpr int kTargetBitrateBps =
+ kOverheadBytesPerPacket * 8 * 1000 / kFrameLengthMs - 1;
+ UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+ CheckDecision(&controller, kFrameLengthMs, 0);
+}
+
+TEST(AnaBitrateControllerTest, CheckBehaviorOnChangingCondition) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ BitrateController controller(BitrateController::Config(32000, 20, 0, 0));
+
+ // Start from an arbitrary overall bitrate.
+ int overall_bitrate = 34567;
+ size_t overhead_bytes_per_packet = 64;
+ int frame_length_ms = 20;
+ int current_bitrate = rtc::checked_cast<int>(
+ overall_bitrate - overhead_bytes_per_packet * 8 * 1000 / frame_length_ms);
+
+ UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+ CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+ // Next: increase overall bitrate.
+ overall_bitrate += 100;
+ current_bitrate += 100;
+ UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+ CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+ // Next: change frame length.
+ frame_length_ms = 60;
+ current_bitrate +=
+ rtc::checked_cast<int>(overhead_bytes_per_packet * 8 * 1000 / 20 -
+ overhead_bytes_per_packet * 8 * 1000 / 60);
+ UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+ CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+ // Next: change overhead.
+ overhead_bytes_per_packet -= 30;
+ current_bitrate += 30 * 8 * 1000 / frame_length_ms;
+ UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+ CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+ // Next: change frame length.
+ frame_length_ms = 20;
+ current_bitrate -=
+ rtc::checked_cast<int>(overhead_bytes_per_packet * 8 * 1000 / 20 -
+ overhead_bytes_per_packet * 8 * 1000 / 60);
+ UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+ CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+ // Next: decrease overall bitrate and frame length.
+ overall_bitrate -= 100;
+ current_bitrate -= 100;
+ frame_length_ms = 60;
+ current_bitrate +=
+ rtc::checked_cast<int>(overhead_bytes_per_packet * 8 * 1000 / 20 -
+ overhead_bytes_per_packet * 8 * 1000 / 60);
+
+ UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+ CheckDecision(&controller, frame_length_ms, current_bitrate);
+}
+
+} // namespace audio_network_adaptor
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.cc
new file mode 100644
index 0000000000..2ef2f4c4d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/channel_controller.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+ChannelController::Config::Config(size_t num_encoder_channels,
+ size_t intial_channels_to_encode,
+ int channel_1_to_2_bandwidth_bps,
+ int channel_2_to_1_bandwidth_bps)
+ : num_encoder_channels(num_encoder_channels),
+ intial_channels_to_encode(intial_channels_to_encode),
+ channel_1_to_2_bandwidth_bps(channel_1_to_2_bandwidth_bps),
+ channel_2_to_1_bandwidth_bps(channel_2_to_1_bandwidth_bps) {}
+
+ChannelController::ChannelController(const Config& config)
+ : config_(config), channels_to_encode_(config_.intial_channels_to_encode) {
+ RTC_DCHECK_GT(config_.intial_channels_to_encode, 0lu);
+ // Currently, we require `intial_channels_to_encode` to be <= 2.
+ RTC_DCHECK_LE(config_.intial_channels_to_encode, 2lu);
+ RTC_DCHECK_GE(config_.num_encoder_channels,
+ config_.intial_channels_to_encode);
+}
+
+ChannelController::~ChannelController() = default;
+
+void ChannelController::UpdateNetworkMetrics(
+ const NetworkMetrics& network_metrics) {
+ if (network_metrics.uplink_bandwidth_bps)
+ uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+}
+
+void ChannelController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+ // Decision on `num_channels` should not have been made.
+ RTC_DCHECK(!config->num_channels);
+
+ if (uplink_bandwidth_bps_) {
+ if (channels_to_encode_ == 2 &&
+ *uplink_bandwidth_bps_ <= config_.channel_2_to_1_bandwidth_bps) {
+ channels_to_encode_ = 1;
+ } else if (channels_to_encode_ == 1 &&
+ *uplink_bandwidth_bps_ >= config_.channel_1_to_2_bandwidth_bps) {
+ channels_to_encode_ =
+ std::min(static_cast<size_t>(2), config_.num_encoder_channels);
+ }
+ }
+ config->num_channels = channels_to_encode_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.h
new file mode 100644
index 0000000000..3cd4bb7dec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CHANNEL_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CHANNEL_CONTROLLER_H_
+
+#include <stddef.h>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+class ChannelController final : public Controller {
+ public:
+ struct Config {
+ Config(size_t num_encoder_channels,
+ size_t intial_channels_to_encode,
+ int channel_1_to_2_bandwidth_bps,
+ int channel_2_to_1_bandwidth_bps);
+ size_t num_encoder_channels;
+ size_t intial_channels_to_encode;
+ // Uplink bandwidth above which the number of encoded channels should switch
+ // from 1 to 2.
+ int channel_1_to_2_bandwidth_bps;
+ // Uplink bandwidth below which the number of encoded channels should switch
+ // from 2 to 1.
+ int channel_2_to_1_bandwidth_bps;
+ };
+
+ explicit ChannelController(const Config& config);
+
+ ~ChannelController() override;
+
+ ChannelController(const ChannelController&) = delete;
+ ChannelController& operator=(const ChannelController&) = delete;
+
+ void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+ void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+ const Config config_;
+ size_t channels_to_encode_;
+ absl::optional<int> uplink_bandwidth_bps_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CHANNEL_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc
new file mode 100644
index 0000000000..21504bcec0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/channel_controller.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kNumChannels = 2;
+constexpr int kChannel1To2BandwidthBps = 31000;
+constexpr int kChannel2To1BandwidthBps = 29000;
+constexpr int kMediumBandwidthBps =
+ (kChannel1To2BandwidthBps + kChannel2To1BandwidthBps) / 2;
+
+std::unique_ptr<ChannelController> CreateChannelController(int init_channels) {
+ std::unique_ptr<ChannelController> controller(
+ new ChannelController(ChannelController::Config(
+ kNumChannels, init_channels, kChannel1To2BandwidthBps,
+ kChannel2To1BandwidthBps)));
+ return controller;
+}
+
+void CheckDecision(ChannelController* controller,
+ const absl::optional<int>& uplink_bandwidth_bps,
+ size_t expected_num_channels) {
+ if (uplink_bandwidth_bps) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+ AudioEncoderRuntimeConfig config;
+ controller->MakeDecision(&config);
+ EXPECT_EQ(expected_num_channels, config.num_channels);
+}
+
+} // namespace
+
+TEST(ChannelControllerTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+ constexpr int kInitChannels = 2;
+ auto controller = CreateChannelController(kInitChannels);
+ CheckDecision(controller.get(), absl::nullopt, kInitChannels);
+}
+
+TEST(ChannelControllerTest, SwitchTo2ChannelsOnHighUplinkBandwidth) {
+ constexpr int kInitChannels = 1;
+ auto controller = CreateChannelController(kInitChannels);
+ // Use high bandwidth to check output switch to 2.
+ CheckDecision(controller.get(), kChannel1To2BandwidthBps, 2);
+}
+
+TEST(ChannelControllerTest, SwitchTo1ChannelOnLowUplinkBandwidth) {
+ constexpr int kInitChannels = 2;
+ auto controller = CreateChannelController(kInitChannels);
+ // Use low bandwidth to check output switch to 1.
+ CheckDecision(controller.get(), kChannel2To1BandwidthBps, 1);
+}
+
+TEST(ChannelControllerTest, Maintain1ChannelOnMediumUplinkBandwidth) {
+ constexpr int kInitChannels = 1;
+ auto controller = CreateChannelController(kInitChannels);
+ // Use between-thresholds bandwidth to check output remains at 1.
+ CheckDecision(controller.get(), kMediumBandwidthBps, 1);
+}
+
+TEST(ChannelControllerTest, Maintain2ChannelsOnMediumUplinkBandwidth) {
+ constexpr int kInitChannels = 2;
+ auto controller = CreateChannelController(kInitChannels);
+ // Use between-thresholds bandwidth to check output remains at 2.
+ CheckDecision(controller.get(), kMediumBandwidthBps, 2);
+}
+
+TEST(ChannelControllerTest, CheckBehaviorOnChangingUplinkBandwidth) {
+ constexpr int kInitChannels = 1;
+ auto controller = CreateChannelController(kInitChannels);
+
+ // Use between-thresholds bandwidth to check output remains at 1.
+ CheckDecision(controller.get(), kMediumBandwidthBps, 1);
+
+ // Use high bandwidth to check output switch to 2.
+ CheckDecision(controller.get(), kChannel1To2BandwidthBps, 2);
+
+ // Use between-thresholds bandwidth to check output remains at 2.
+ CheckDecision(controller.get(), kMediumBandwidthBps, 2);
+
+ // Use low bandwidth to check output switch to 1.
+ CheckDecision(controller.get(), kChannel2To1BandwidthBps, 1);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/config.proto b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/config.proto
new file mode 100644
index 0000000000..a815451993
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/config.proto
@@ -0,0 +1,196 @@
+syntax = "proto2";
+
+package webrtc.audio_network_adaptor.config;
+
+option optimize_for = LITE_RUNTIME;
+option java_package = "org.webrtc.AudioNetworkAdaptor";
+option java_outer_classname = "Config";
+option objc_class_prefix = "WANA";
+
+message FecController {
+ message Threshold {
+ // Threshold defines a curve in the bandwidth/packet-loss domain. The
+ // curve is characterized by the two conjunction points: A and B.
+ //
+ // packet ^ |
+ // loss | A|
+ // | \ A: (low_bandwidth_bps, low_bandwidth_packet_loss)
+ // | \ B: (high_bandwidth_bps, high_bandwidth_packet_loss)
+ // | B\________
+ // |---------------> bandwidth
+ optional int32 low_bandwidth_bps = 1;
+ optional float low_bandwidth_packet_loss = 2;
+ optional int32 high_bandwidth_bps = 3;
+ optional float high_bandwidth_packet_loss = 4;
+ }
+
+ // `fec_enabling_threshold` defines a curve, above which FEC should be
+ // enabled. `fec_disabling_threshold` defines a curve, under which FEC
+ // should be disabled. See below
+ //
+ // packet-loss ^ | |
+ // | | | FEC
+ // | \ \ ON
+ // | FEC \ \_______ fec_enabling_threshold
+ // | OFF \_________ fec_disabling_threshold
+ // |-----------------> bandwidth
+ optional Threshold fec_enabling_threshold = 1;
+ optional Threshold fec_disabling_threshold = 2;
+
+ // `time_constant_ms` is the time constant for an exponential filter, which
+ // is used for smoothing the packet loss fraction.
+ optional int32 time_constant_ms = 3;
+}
+
+message FecControllerRplrBased {
+ message Threshold {
+ // Threshold defines a curve in the bandwidth/recoverable-packet-loss
+ // domain.
+ // The curve is characterized by the two conjunction points: A and B.
+ //
+ // recoverable ^
+ // packet | |
+ // loss | A|
+ // | \ A: (low_bandwidth_bps,
+ // | \ low_bandwidth_recoverable_packet_loss)
+ // | \ B: (high_bandwidth_bps,
+ // | \ high_bandwidth_recoverable_packet_loss)
+ // | B\________
+ // |---------------> bandwidth
+ optional int32 low_bandwidth_bps = 1;
+ optional float low_bandwidth_recoverable_packet_loss = 2;
+ optional int32 high_bandwidth_bps = 3;
+ optional float high_bandwidth_recoverable_packet_loss = 4;
+ }
+
+ // `fec_enabling_threshold` defines a curve, above which FEC should be
+ // enabled. `fec_disabling_threshold` defines a curve, under which FEC
+ // should be disabled. See below
+ //
+ // packet-loss ^ | |
+ // | | | FEC
+ // | \ \ ON
+ // | FEC \ \_______ fec_enabling_threshold
+ // | OFF \_________ fec_disabling_threshold
+ // |-----------------> bandwidth
+ optional Threshold fec_enabling_threshold = 1;
+ optional Threshold fec_disabling_threshold = 2;
+}
+
+message FrameLengthController {
+ // Uplink packet loss fraction below which frame length can increase.
+ optional float fl_increasing_packet_loss_fraction = 1;
+
+ // Uplink packet loss fraction above which frame length should decrease.
+ optional float fl_decreasing_packet_loss_fraction = 2;
+
+ // Uplink bandwidth below which frame length can switch from 20ms to 60ms.
+ optional int32 fl_20ms_to_60ms_bandwidth_bps = 3;
+
+ // Uplink bandwidth above which frame length should switch from 60ms to 20ms.
+ optional int32 fl_60ms_to_20ms_bandwidth_bps = 4;
+
+ // Uplink bandwidth below which frame length can switch from 60ms to 120ms.
+ optional int32 fl_60ms_to_120ms_bandwidth_bps = 5;
+
+ // Uplink bandwidth above which frame length should switch from 120ms to 60ms.
+ optional int32 fl_120ms_to_60ms_bandwidth_bps = 6;
+
+ // Offset to apply to the per-packet overhead when increasing frame length.
+ optional int32 fl_increase_overhead_offset = 7;
+
+ // Offset to apply to the per-packet overhead when decreasing frame length.
+ optional int32 fl_decrease_overhead_offset = 8;
+
+ // Uplink bandwidth below which frame length can switch from 20ms to 40ms. In
+ // current implementation, defining this will invalidate
+ // fl_20ms_to_60ms_bandwidth_bps.
+ optional int32 fl_20ms_to_40ms_bandwidth_bps = 9;
+
+ // Uplink bandwidth above which frame length should switch from 40ms to 20ms.
+ optional int32 fl_40ms_to_20ms_bandwidth_bps = 10;
+
+ // Uplink bandwidth below which frame length can switch from 40ms to 60ms.
+ optional int32 fl_40ms_to_60ms_bandwidth_bps = 11;
+
+ // Uplink bandwidth above which frame length should switch from 60ms to 40ms.
+ // In current implementation, defining this will invalidate
+ // fl_60ms_to_20ms_bandwidth_bps.
+ optional int32 fl_60ms_to_40ms_bandwidth_bps = 12;
+}
+
+message FrameLengthControllerV2 {
+ // FrameLengthControllerV2 chooses the frame length by taking the target
+ // bitrate and subtracting the overhead bitrate to obtain the remaining
+ // bitrate for the payload. The chosen frame length is the shortest possible
+ // where the payload bitrate is more than `min_payload_bitrate_bps`.
+ optional int32 min_payload_bitrate_bps = 1;
+
+ // If true, uses the stable target bitrate to decide the frame length. This
+ // will result in less frame length toggling but spending more time at longer
+ // frame lengths compared to using the normal target bitrate.
+ optional bool use_slow_adaptation = 2;
+}
+
+message ChannelController {
+ // Uplink bandwidth above which the number of encoded channels should switch
+ // from 1 to 2.
+ optional int32 channel_1_to_2_bandwidth_bps = 1;
+
+ // Uplink bandwidth below which the number of encoded channels should switch
+ // from 2 to 1.
+ optional int32 channel_2_to_1_bandwidth_bps = 2;
+}
+
+message DtxController {
+ // Uplink bandwidth below which DTX should be switched on.
+ optional int32 dtx_enabling_bandwidth_bps = 1;
+
+ // Uplink bandwidth above which DTX should be switched off.
+ optional int32 dtx_disabling_bandwidth_bps = 2;
+}
+
+message BitrateController {
+ // Offset to apply to per-packet overhead when the frame length is increased.
+ optional int32 fl_increase_overhead_offset = 1;
+ // Offset to apply to per-packet overhead when the frame length is decreased.
+ optional int32 fl_decrease_overhead_offset = 2;
+}
+
+message Controller {
+ message ScoringPoint {
+ // `ScoringPoint` is a subspace of network condition. It is used for
+ // comparing the significance of controllers.
+ optional int32 uplink_bandwidth_bps = 1;
+ optional float uplink_packet_loss_fraction = 2;
+ }
+
+ // The distance from `scoring_point` to a given network condition defines
+ // the significance of this controller with respect that network condition.
+ // Shorter distance means higher significance. The significances of
+ // controllers determine their order in the processing pipeline. Controllers
+ // without `scoring_point` follow their default order in
+ // `ControllerManager::controllers`.
+ optional ScoringPoint scoring_point = 1;
+
+ oneof controller {
+ FecController fec_controller = 21;
+ FrameLengthController frame_length_controller = 22;
+ ChannelController channel_controller = 23;
+ DtxController dtx_controller = 24;
+ BitrateController bitrate_controller = 25;
+ FecControllerRplrBased fec_controller_rplr_based = 26;
+ FrameLengthControllerV2 frame_length_controller_v2 = 27;
+ }
+}
+
+message ControllerManager {
+ repeated Controller controllers = 1;
+
+ // Least time since last reordering for a new reordering to be made.
+ optional int32 min_reordering_time_ms = 2;
+
+ // Least squared distance from last scoring point for a new reordering to be
+ // made.
+ optional float min_reordering_squared_distance = 3;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.cc
new file mode 100644
index 0000000000..5e2dc859bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.cc
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+
+namespace webrtc {
+
+Controller::NetworkMetrics::NetworkMetrics() = default;
+
+Controller::NetworkMetrics::~NetworkMetrics() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.h
new file mode 100644
index 0000000000..b70ada01a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_H_
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+
+namespace webrtc {
+
+class Controller {
+ public:
+ struct NetworkMetrics {
+ NetworkMetrics();
+ ~NetworkMetrics();
+ absl::optional<int> uplink_bandwidth_bps;
+ absl::optional<float> uplink_packet_loss_fraction;
+ absl::optional<int> target_audio_bitrate_bps;
+ absl::optional<int> rtt_ms;
+ absl::optional<size_t> overhead_bytes_per_packet;
+ };
+
+ virtual ~Controller() = default;
+
+ // Informs network metrics update to this controller. Any non-empty field
+ // indicates an update on the corresponding network metric.
+ virtual void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) = 0;
+
+ virtual void MakeDecision(AudioEncoderRuntimeConfig* config) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.cc
new file mode 100644
index 0000000000..42dd8a8786
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.cc
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+
+#include <cmath>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/audio_network_adaptor/bitrate_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/channel_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "modules/audio_coding/audio_network_adaptor/dtx_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h"
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h"
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+namespace {
+
+#if WEBRTC_ENABLE_PROTOBUF
+
+std::unique_ptr<FecControllerPlrBased> CreateFecControllerPlrBased(
+ const audio_network_adaptor::config::FecController& config,
+ bool initial_fec_enabled) {
+ RTC_CHECK(config.has_fec_enabling_threshold());
+ RTC_CHECK(config.has_fec_disabling_threshold());
+ RTC_CHECK(config.has_time_constant_ms());
+
+ auto& fec_enabling_threshold = config.fec_enabling_threshold();
+ RTC_CHECK(fec_enabling_threshold.has_low_bandwidth_bps());
+ RTC_CHECK(fec_enabling_threshold.has_low_bandwidth_packet_loss());
+ RTC_CHECK(fec_enabling_threshold.has_high_bandwidth_bps());
+ RTC_CHECK(fec_enabling_threshold.has_high_bandwidth_packet_loss());
+
+ auto& fec_disabling_threshold = config.fec_disabling_threshold();
+ RTC_CHECK(fec_disabling_threshold.has_low_bandwidth_bps());
+ RTC_CHECK(fec_disabling_threshold.has_low_bandwidth_packet_loss());
+ RTC_CHECK(fec_disabling_threshold.has_high_bandwidth_bps());
+ RTC_CHECK(fec_disabling_threshold.has_high_bandwidth_packet_loss());
+
+ return std::unique_ptr<FecControllerPlrBased>(
+ new FecControllerPlrBased(FecControllerPlrBased::Config(
+ initial_fec_enabled,
+ ThresholdCurve(fec_enabling_threshold.low_bandwidth_bps(),
+ fec_enabling_threshold.low_bandwidth_packet_loss(),
+ fec_enabling_threshold.high_bandwidth_bps(),
+ fec_enabling_threshold.high_bandwidth_packet_loss()),
+ ThresholdCurve(fec_disabling_threshold.low_bandwidth_bps(),
+ fec_disabling_threshold.low_bandwidth_packet_loss(),
+ fec_disabling_threshold.high_bandwidth_bps(),
+ fec_disabling_threshold.high_bandwidth_packet_loss()),
+ config.time_constant_ms())));
+}
+
+std::unique_ptr<FrameLengthController> CreateFrameLengthController(
+ const audio_network_adaptor::config::FrameLengthController& config,
+ rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int initial_frame_length_ms,
+ int min_encoder_bitrate_bps) {
+ RTC_CHECK(config.has_fl_increasing_packet_loss_fraction());
+ RTC_CHECK(config.has_fl_decreasing_packet_loss_fraction());
+
+ std::map<FrameLengthController::Config::FrameLengthChange, int>
+ fl_changing_bandwidths_bps;
+
+ if (config.has_fl_20ms_to_60ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(
+ std::make_pair(FrameLengthController::Config::FrameLengthChange(20, 60),
+ config.fl_20ms_to_60ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_60ms_to_20ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(
+ std::make_pair(FrameLengthController::Config::FrameLengthChange(60, 20),
+ config.fl_60ms_to_20ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_20ms_to_40ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(
+ std::make_pair(FrameLengthController::Config::FrameLengthChange(20, 40),
+ config.fl_20ms_to_40ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_40ms_to_20ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(
+ std::make_pair(FrameLengthController::Config::FrameLengthChange(40, 20),
+ config.fl_40ms_to_20ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_40ms_to_60ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(
+ std::make_pair(FrameLengthController::Config::FrameLengthChange(40, 60),
+ config.fl_40ms_to_60ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_60ms_to_40ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(
+ std::make_pair(FrameLengthController::Config::FrameLengthChange(60, 40),
+ config.fl_60ms_to_40ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_60ms_to_120ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(std::make_pair(
+ FrameLengthController::Config::FrameLengthChange(60, 120),
+ config.fl_60ms_to_120ms_bandwidth_bps()));
+ }
+
+ if (config.has_fl_120ms_to_60ms_bandwidth_bps()) {
+ fl_changing_bandwidths_bps.insert(std::make_pair(
+ FrameLengthController::Config::FrameLengthChange(120, 60),
+ config.fl_120ms_to_60ms_bandwidth_bps()));
+ }
+
+ int fl_increase_overhead_offset = 0;
+ if (config.has_fl_increase_overhead_offset()) {
+ fl_increase_overhead_offset = config.fl_increase_overhead_offset();
+ }
+ int fl_decrease_overhead_offset = 0;
+ if (config.has_fl_decrease_overhead_offset()) {
+ fl_decrease_overhead_offset = config.fl_decrease_overhead_offset();
+ }
+
+ FrameLengthController::Config ctor_config(
+ std::set<int>(), initial_frame_length_ms, min_encoder_bitrate_bps,
+ config.fl_increasing_packet_loss_fraction(),
+ config.fl_decreasing_packet_loss_fraction(), fl_increase_overhead_offset,
+ fl_decrease_overhead_offset, std::move(fl_changing_bandwidths_bps));
+
+ for (auto frame_length : encoder_frame_lengths_ms)
+ ctor_config.encoder_frame_lengths_ms.insert(frame_length);
+
+ return std::unique_ptr<FrameLengthController>(
+ new FrameLengthController(ctor_config));
+}
+
+std::unique_ptr<ChannelController> CreateChannelController(
+ const audio_network_adaptor::config::ChannelController& config,
+ size_t num_encoder_channels,
+ size_t intial_channels_to_encode) {
+ RTC_CHECK(config.has_channel_1_to_2_bandwidth_bps());
+ RTC_CHECK(config.has_channel_2_to_1_bandwidth_bps());
+
+ return std::unique_ptr<ChannelController>(new ChannelController(
+ ChannelController::Config(num_encoder_channels, intial_channels_to_encode,
+ config.channel_1_to_2_bandwidth_bps(),
+ config.channel_2_to_1_bandwidth_bps())));
+}
+
+std::unique_ptr<DtxController> CreateDtxController(
+ const audio_network_adaptor::config::DtxController& dtx_config,
+ bool initial_dtx_enabled) {
+ RTC_CHECK(dtx_config.has_dtx_enabling_bandwidth_bps());
+ RTC_CHECK(dtx_config.has_dtx_disabling_bandwidth_bps());
+
+ return std::unique_ptr<DtxController>(new DtxController(DtxController::Config(
+ initial_dtx_enabled, dtx_config.dtx_enabling_bandwidth_bps(),
+ dtx_config.dtx_disabling_bandwidth_bps())));
+}
+
+using audio_network_adaptor::BitrateController;
+std::unique_ptr<BitrateController> CreateBitrateController(
+ const audio_network_adaptor::config::BitrateController& bitrate_config,
+ int initial_bitrate_bps,
+ int initial_frame_length_ms) {
+ int fl_increase_overhead_offset = 0;
+ if (bitrate_config.has_fl_increase_overhead_offset()) {
+ fl_increase_overhead_offset = bitrate_config.fl_increase_overhead_offset();
+ }
+ int fl_decrease_overhead_offset = 0;
+ if (bitrate_config.has_fl_decrease_overhead_offset()) {
+ fl_decrease_overhead_offset = bitrate_config.fl_decrease_overhead_offset();
+ }
+ return std::unique_ptr<BitrateController>(
+ new BitrateController(BitrateController::Config(
+ initial_bitrate_bps, initial_frame_length_ms,
+ fl_increase_overhead_offset, fl_decrease_overhead_offset)));
+}
+
+std::unique_ptr<FrameLengthControllerV2> CreateFrameLengthControllerV2(
+ const audio_network_adaptor::config::FrameLengthControllerV2& config,
+ rtc::ArrayView<const int> encoder_frame_lengths_ms) {
+ return std::make_unique<FrameLengthControllerV2>(
+ encoder_frame_lengths_ms, config.min_payload_bitrate_bps(),
+ config.use_slow_adaptation());
+}
+#endif // WEBRTC_ENABLE_PROTOBUF
+
+} // namespace
+
+ControllerManagerImpl::Config::Config(int min_reordering_time_ms,
+ float min_reordering_squared_distance)
+ : min_reordering_time_ms(min_reordering_time_ms),
+ min_reordering_squared_distance(min_reordering_squared_distance) {}
+
+ControllerManagerImpl::Config::~Config() = default;
+
+std::unique_ptr<ControllerManager> ControllerManagerImpl::Create(
+ absl::string_view config_string,
+ size_t num_encoder_channels,
+ rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int min_encoder_bitrate_bps,
+ size_t intial_channels_to_encode,
+ int initial_frame_length_ms,
+ int initial_bitrate_bps,
+ bool initial_fec_enabled,
+ bool initial_dtx_enabled) {
+ return Create(config_string, num_encoder_channels, encoder_frame_lengths_ms,
+ min_encoder_bitrate_bps, intial_channels_to_encode,
+ initial_frame_length_ms, initial_bitrate_bps,
+ initial_fec_enabled, initial_dtx_enabled, nullptr);
+}
+
+std::unique_ptr<ControllerManager> ControllerManagerImpl::Create(
+ absl::string_view config_string,
+ size_t num_encoder_channels,
+ rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int min_encoder_bitrate_bps,
+ size_t intial_channels_to_encode,
+ int initial_frame_length_ms,
+ int initial_bitrate_bps,
+ bool initial_fec_enabled,
+ bool initial_dtx_enabled,
+ DebugDumpWriter* debug_dump_writer) {
+#if WEBRTC_ENABLE_PROTOBUF
+ audio_network_adaptor::config::ControllerManager controller_manager_config;
+ RTC_CHECK(
+ controller_manager_config.ParseFromString(std::string(config_string)));
+ if (debug_dump_writer)
+ debug_dump_writer->DumpControllerManagerConfig(controller_manager_config,
+ rtc::TimeMillis());
+
+ std::vector<std::unique_ptr<Controller>> controllers;
+ std::map<const Controller*, std::pair<int, float>> scoring_points;
+
+ for (int i = 0; i < controller_manager_config.controllers_size(); ++i) {
+ auto& controller_config = controller_manager_config.controllers(i);
+ std::unique_ptr<Controller> controller;
+ switch (controller_config.controller_case()) {
+ case audio_network_adaptor::config::Controller::kFecController:
+ controller = CreateFecControllerPlrBased(
+ controller_config.fec_controller(), initial_fec_enabled);
+ break;
+ case audio_network_adaptor::config::Controller::kFecControllerRplrBased:
+ // FecControllerRplrBased has been removed and can't be used anymore.
+ RTC_DCHECK_NOTREACHED();
+ continue;
+ case audio_network_adaptor::config::Controller::kFrameLengthController:
+ controller = CreateFrameLengthController(
+ controller_config.frame_length_controller(),
+ encoder_frame_lengths_ms, initial_frame_length_ms,
+ min_encoder_bitrate_bps);
+ break;
+ case audio_network_adaptor::config::Controller::kChannelController:
+ controller = CreateChannelController(
+ controller_config.channel_controller(), num_encoder_channels,
+ intial_channels_to_encode);
+ break;
+ case audio_network_adaptor::config::Controller::kDtxController:
+ controller = CreateDtxController(controller_config.dtx_controller(),
+ initial_dtx_enabled);
+ break;
+ case audio_network_adaptor::config::Controller::kBitrateController:
+ controller = CreateBitrateController(
+ controller_config.bitrate_controller(), initial_bitrate_bps,
+ initial_frame_length_ms);
+ break;
+ case audio_network_adaptor::config::Controller::kFrameLengthControllerV2:
+ controller = CreateFrameLengthControllerV2(
+ controller_config.frame_length_controller_v2(),
+ encoder_frame_lengths_ms);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ if (controller_config.has_scoring_point()) {
+ auto& scoring_point = controller_config.scoring_point();
+ RTC_CHECK(scoring_point.has_uplink_bandwidth_bps());
+ RTC_CHECK(scoring_point.has_uplink_packet_loss_fraction());
+ scoring_points[controller.get()] = std::make_pair<int, float>(
+ scoring_point.uplink_bandwidth_bps(),
+ scoring_point.uplink_packet_loss_fraction());
+ }
+ controllers.push_back(std::move(controller));
+ }
+
+ if (scoring_points.size() == 0) {
+ return std::unique_ptr<ControllerManagerImpl>(
+ new ControllerManagerImpl(ControllerManagerImpl::Config(0, 0),
+ std::move(controllers), scoring_points));
+ } else {
+ RTC_CHECK(controller_manager_config.has_min_reordering_time_ms());
+ RTC_CHECK(controller_manager_config.has_min_reordering_squared_distance());
+ return std::unique_ptr<ControllerManagerImpl>(new ControllerManagerImpl(
+ ControllerManagerImpl::Config(
+ controller_manager_config.min_reordering_time_ms(),
+ controller_manager_config.min_reordering_squared_distance()),
+ std::move(controllers), scoring_points));
+ }
+
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif // WEBRTC_ENABLE_PROTOBUF
+}
+
+ControllerManagerImpl::ControllerManagerImpl(const Config& config)
+ : ControllerManagerImpl(
+ config,
+ std::vector<std::unique_ptr<Controller>>(),
+ std::map<const Controller*, std::pair<int, float>>()) {}
+
+ControllerManagerImpl::ControllerManagerImpl(
+ const Config& config,
+ std::vector<std::unique_ptr<Controller>> controllers,
+ const std::map<const Controller*, std::pair<int, float>>& scoring_points)
+ : config_(config),
+ controllers_(std::move(controllers)),
+ last_reordering_time_ms_(absl::nullopt),
+ last_scoring_point_(0, 0.0) {
+ for (auto& controller : controllers_)
+ default_sorted_controllers_.push_back(controller.get());
+ sorted_controllers_ = default_sorted_controllers_;
+ for (auto& controller_point : scoring_points) {
+ controller_scoring_points_.insert(std::make_pair(
+ controller_point.first, ScoringPoint(controller_point.second.first,
+ controller_point.second.second)));
+ }
+}
+
+ControllerManagerImpl::~ControllerManagerImpl() = default;
+
+std::vector<Controller*> ControllerManagerImpl::GetSortedControllers(
+ const Controller::NetworkMetrics& metrics) {
+ if (controller_scoring_points_.size() == 0)
+ return default_sorted_controllers_;
+
+ if (!metrics.uplink_bandwidth_bps || !metrics.uplink_packet_loss_fraction)
+ return sorted_controllers_;
+
+ const int64_t now_ms = rtc::TimeMillis();
+ if (last_reordering_time_ms_ &&
+ now_ms - *last_reordering_time_ms_ < config_.min_reordering_time_ms)
+ return sorted_controllers_;
+
+ ScoringPoint scoring_point(*metrics.uplink_bandwidth_bps,
+ *metrics.uplink_packet_loss_fraction);
+
+ if (last_reordering_time_ms_ &&
+ last_scoring_point_.SquaredDistanceTo(scoring_point) <
+ config_.min_reordering_squared_distance)
+ return sorted_controllers_;
+
+ // Sort controllers according to the distances of `scoring_point` to the
+ // scoring points of controllers.
+ //
+ // A controller that does not associate with any scoring point
+ // are treated as if
+ // 1) they are less important than any controller that has a scoring point,
+ // 2) they are equally important to any controller that has no scoring point,
+ // and their relative order will follow `default_sorted_controllers_`.
+ std::vector<Controller*> sorted_controllers(default_sorted_controllers_);
+ std::stable_sort(
+ sorted_controllers.begin(), sorted_controllers.end(),
+ [this, &scoring_point](const Controller* lhs, const Controller* rhs) {
+ auto lhs_scoring_point = controller_scoring_points_.find(lhs);
+ auto rhs_scoring_point = controller_scoring_points_.find(rhs);
+
+ if (lhs_scoring_point == controller_scoring_points_.end())
+ return false;
+
+ if (rhs_scoring_point == controller_scoring_points_.end())
+ return true;
+
+ return lhs_scoring_point->second.SquaredDistanceTo(scoring_point) <
+ rhs_scoring_point->second.SquaredDistanceTo(scoring_point);
+ });
+
+ if (sorted_controllers_ != sorted_controllers) {
+ sorted_controllers_ = sorted_controllers;
+ last_reordering_time_ms_ = now_ms;
+ last_scoring_point_ = scoring_point;
+ }
+ return sorted_controllers_;
+}
+
+std::vector<Controller*> ControllerManagerImpl::GetControllers() const {
+ return default_sorted_controllers_;
+}
+
+ControllerManagerImpl::ScoringPoint::ScoringPoint(
+ int uplink_bandwidth_bps,
+ float uplink_packet_loss_fraction)
+ : uplink_bandwidth_bps(uplink_bandwidth_bps),
+ uplink_packet_loss_fraction(uplink_packet_loss_fraction) {}
+
+namespace {
+
+constexpr int kMinUplinkBandwidthBps = 0;
+constexpr int kMaxUplinkBandwidthBps = 120000;
+
+float NormalizeUplinkBandwidth(int uplink_bandwidth_bps) {
+ uplink_bandwidth_bps =
+ std::min(kMaxUplinkBandwidthBps,
+ std::max(kMinUplinkBandwidthBps, uplink_bandwidth_bps));
+ return static_cast<float>(uplink_bandwidth_bps - kMinUplinkBandwidthBps) /
+ (kMaxUplinkBandwidthBps - kMinUplinkBandwidthBps);
+}
+
+float NormalizePacketLossFraction(float uplink_packet_loss_fraction) {
+ // `uplink_packet_loss_fraction` is seldom larger than 0.3, so we scale it up
+ // by 3.3333f.
+ return std::min(uplink_packet_loss_fraction * 3.3333f, 1.0f);
+}
+
+} // namespace
+
+float ControllerManagerImpl::ScoringPoint::SquaredDistanceTo(
+ const ScoringPoint& scoring_point) const {
+ float diff_normalized_bitrate_bps =
+ NormalizeUplinkBandwidth(scoring_point.uplink_bandwidth_bps) -
+ NormalizeUplinkBandwidth(uplink_bandwidth_bps);
+ float diff_normalized_packet_loss =
+ NormalizePacketLossFraction(scoring_point.uplink_packet_loss_fraction) -
+ NormalizePacketLossFraction(uplink_packet_loss_fraction);
+ return std::pow(diff_normalized_bitrate_bps, 2) +
+ std::pow(diff_normalized_packet_loss, 2);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.h
new file mode 100644
index 0000000000..47e8e0f5a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_MANAGER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_MANAGER_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+
+namespace webrtc {
+
+class DebugDumpWriter;
+
+class ControllerManager {
+ public:
+ virtual ~ControllerManager() = default;
+
+ // Sort controllers based on their significance.
+ virtual std::vector<Controller*> GetSortedControllers(
+ const Controller::NetworkMetrics& metrics) = 0;
+
+ virtual std::vector<Controller*> GetControllers() const = 0;
+};
+
+class ControllerManagerImpl final : public ControllerManager {
+ public:
+ struct Config {
+ Config(int min_reordering_time_ms, float min_reordering_squared_distance);
+ ~Config();
+ // Least time since last reordering for a new reordering to be made.
+ int min_reordering_time_ms;
+ // Least squared distance from last scoring point for a new reordering to be
+ // made.
+ float min_reordering_squared_distance;
+ };
+
+ static std::unique_ptr<ControllerManager> Create(
+ absl::string_view config_string,
+ size_t num_encoder_channels,
+ rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int min_encoder_bitrate_bps,
+ size_t intial_channels_to_encode,
+ int initial_frame_length_ms,
+ int initial_bitrate_bps,
+ bool initial_fec_enabled,
+ bool initial_dtx_enabled);
+
+ static std::unique_ptr<ControllerManager> Create(
+ absl::string_view config_string,
+ size_t num_encoder_channels,
+ rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int min_encoder_bitrate_bps,
+ size_t intial_channels_to_encode,
+ int initial_frame_length_ms,
+ int initial_bitrate_bps,
+ bool initial_fec_enabled,
+ bool initial_dtx_enabled,
+ DebugDumpWriter* debug_dump_writer);
+
+ explicit ControllerManagerImpl(const Config& config);
+
+ // Dependency injection for testing.
+ ControllerManagerImpl(
+ const Config& config,
+ std::vector<std::unique_ptr<Controller>> controllers,
+ const std::map<const Controller*, std::pair<int, float>>&
+ chracteristic_points);
+
+ ~ControllerManagerImpl() override;
+
+ ControllerManagerImpl(const ControllerManagerImpl&) = delete;
+ ControllerManagerImpl& operator=(const ControllerManagerImpl&) = delete;
+
+ // Sort controllers based on their significance.
+ std::vector<Controller*> GetSortedControllers(
+ const Controller::NetworkMetrics& metrics) override;
+
+ std::vector<Controller*> GetControllers() const override;
+
+ private:
+ // Scoring point is a subset of NetworkMetrics that is used for comparing the
+ // significance of controllers.
+ struct ScoringPoint {
+ // TODO(eladalon): Do we want to experiment with RPLR-based scoring?
+ ScoringPoint(int uplink_bandwidth_bps, float uplink_packet_loss_fraction);
+
+ // Calculate the normalized [0,1] distance between two scoring points.
+ float SquaredDistanceTo(const ScoringPoint& scoring_point) const;
+
+ int uplink_bandwidth_bps;
+ float uplink_packet_loss_fraction;
+ };
+
+ const Config config_;
+
+ std::vector<std::unique_ptr<Controller>> controllers_;
+
+ absl::optional<int64_t> last_reordering_time_ms_;
+ ScoringPoint last_scoring_point_;
+
+ std::vector<Controller*> default_sorted_controllers_;
+
+ std::vector<Controller*> sorted_controllers_;
+
+ // `scoring_points_` saves the scoring points of various
+ // controllers.
+ std::map<const Controller*, ScoringPoint> controller_scoring_points_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_MANAGER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
new file mode 100644
index 0000000000..3e6ecf6def
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/ignore_wundef.h"
+#include "test/gtest.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::NiceMock;
+
+namespace {
+
+constexpr size_t kNumControllers = 4;
+constexpr int kChracteristicBandwithBps[2] = {15000, 0};
+constexpr float kChracteristicPacketLossFraction[2] = {0.2f, 0.0f};
+constexpr int kMinReorderingTimeMs = 200;
+constexpr int kFactor = 100;
+constexpr float kMinReorderingSquareDistance = 1.0f / kFactor / kFactor;
+
+// `kMinUplinkBandwidthBps` and `kMaxUplinkBandwidthBps` are copied from
+// controller_manager.cc
+constexpr int kMinUplinkBandwidthBps = 0;
+constexpr int kMaxUplinkBandwidthBps = 120000;
+constexpr int kMinBandwithChangeBps =
+ (kMaxUplinkBandwidthBps - kMinUplinkBandwidthBps) / kFactor;
+
+struct ControllerManagerStates {
+ std::unique_ptr<ControllerManager> controller_manager;
+ std::vector<MockController*> mock_controllers;
+};
+
+ControllerManagerStates CreateControllerManager() {
+ ControllerManagerStates states;
+ std::vector<std::unique_ptr<Controller>> controllers;
+ std::map<const Controller*, std::pair<int, float>> chracteristic_points;
+ for (size_t i = 0; i < kNumControllers; ++i) {
+ auto controller =
+ std::unique_ptr<MockController>(new NiceMock<MockController>());
+ EXPECT_CALL(*controller, Die());
+ states.mock_controllers.push_back(controller.get());
+ controllers.push_back(std::move(controller));
+ }
+
+ // Assign characteristic points to the last two controllers.
+ chracteristic_points[states.mock_controllers[kNumControllers - 2]] =
+ std::make_pair(kChracteristicBandwithBps[0],
+ kChracteristicPacketLossFraction[0]);
+ chracteristic_points[states.mock_controllers[kNumControllers - 1]] =
+ std::make_pair(kChracteristicBandwithBps[1],
+ kChracteristicPacketLossFraction[1]);
+
+ states.controller_manager.reset(new ControllerManagerImpl(
+ ControllerManagerImpl::Config(kMinReorderingTimeMs,
+ kMinReorderingSquareDistance),
+ std::move(controllers), chracteristic_points));
+ return states;
+}
+
+// `expected_order` contains the expected indices of all controllers in the
+// vector of controllers returned by GetSortedControllers(). A negative index
+// means that we do not care about its exact place, but we do check that it
+// exists in the vector.
+void CheckControllersOrder(
+ ControllerManagerStates* states,
+ const absl::optional<int>& uplink_bandwidth_bps,
+ const absl::optional<float>& uplink_packet_loss_fraction,
+ const std::vector<int>& expected_order) {
+ RTC_DCHECK_EQ(kNumControllers, expected_order.size());
+ Controller::NetworkMetrics metrics;
+ metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ metrics.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+ auto check = states->controller_manager->GetSortedControllers(metrics);
+ EXPECT_EQ(states->mock_controllers.size(), check.size());
+ for (size_t i = 0; i < states->mock_controllers.size(); ++i) {
+ if (expected_order[i] >= 0) {
+ EXPECT_EQ(states->mock_controllers[i], check[expected_order[i]]);
+ } else {
+ EXPECT_NE(check.end(), std::find(check.begin(), check.end(),
+ states->mock_controllers[i]));
+ }
+ }
+}
+
+} // namespace
+
+TEST(ControllerManagerTest, GetControllersReturnAllControllers) {
+ auto states = CreateControllerManager();
+ auto check = states.controller_manager->GetControllers();
+ // Verify that controllers in `check` are one-to-one mapped to those in
+ // `mock_controllers_`.
+ EXPECT_EQ(states.mock_controllers.size(), check.size());
+ for (auto& controller : check)
+ EXPECT_NE(states.mock_controllers.end(),
+ std::find(states.mock_controllers.begin(),
+ states.mock_controllers.end(), controller));
+}
+
+TEST(ControllerManagerTest, ControllersInDefaultOrderOnEmptyNetworkMetrics) {
+ auto states = CreateControllerManager();
+ // `network_metrics` are empty, and the controllers are supposed to follow the
+ // default order.
+ CheckControllersOrder(&states, absl::nullopt, absl::nullopt, {0, 1, 2, 3});
+}
+
+TEST(ControllerManagerTest, ControllersWithoutCharPointAtEndAndInDefaultOrder) {
+ auto states = CreateControllerManager();
+ CheckControllersOrder(&states, 0, 0.0,
+ {kNumControllers - 2, kNumControllers - 1, -1, -1});
+}
+
+TEST(ControllerManagerTest, ControllersWithCharPointDependOnNetworkMetrics) {
+ auto states = CreateControllerManager();
+ CheckControllersOrder(&states, kChracteristicBandwithBps[1],
+ kChracteristicPacketLossFraction[1],
+ {kNumControllers - 2, kNumControllers - 1, 1, 0});
+}
+
+TEST(ControllerManagerTest, DoNotReorderBeforeMinReordingTime) {
+ rtc::ScopedFakeClock fake_clock;
+ auto states = CreateControllerManager();
+ CheckControllersOrder(&states, kChracteristicBandwithBps[0],
+ kChracteristicPacketLossFraction[0],
+ {kNumControllers - 2, kNumControllers - 1, 0, 1});
+ fake_clock.AdvanceTime(TimeDelta::Millis(kMinReorderingTimeMs - 1));
+ // Move uplink bandwidth and packet loss fraction to the other controller's
+ // characteristic point, which would cause controller manager to reorder the
+ // controllers if time had reached min reordering time.
+ CheckControllersOrder(&states, kChracteristicBandwithBps[1],
+ kChracteristicPacketLossFraction[1],
+ {kNumControllers - 2, kNumControllers - 1, 0, 1});
+}
+
+TEST(ControllerManagerTest, ReorderBeyondMinReordingTimeAndMinDistance) {
+ rtc::ScopedFakeClock fake_clock;
+ auto states = CreateControllerManager();
+ constexpr int kBandwidthBps =
+ (kChracteristicBandwithBps[0] + kChracteristicBandwithBps[1]) / 2;
+ constexpr float kPacketLossFraction = (kChracteristicPacketLossFraction[0] +
+ kChracteristicPacketLossFraction[1]) /
+ 2.0f;
+ // Set network metrics to be in the middle between the characteristic points
+ // of two controllers.
+ CheckControllersOrder(&states, kBandwidthBps, kPacketLossFraction,
+ {kNumControllers - 2, kNumControllers - 1, 0, 1});
+ fake_clock.AdvanceTime(TimeDelta::Millis(kMinReorderingTimeMs));
+ // Then let network metrics move a little towards the other controller.
+ CheckControllersOrder(&states, kBandwidthBps - kMinBandwithChangeBps - 1,
+ kPacketLossFraction,
+ {kNumControllers - 2, kNumControllers - 1, 1, 0});
+}
+
+TEST(ControllerManagerTest, DoNotReorderIfNetworkMetricsChangeTooSmall) {
+ rtc::ScopedFakeClock fake_clock;
+ auto states = CreateControllerManager();
+ constexpr int kBandwidthBps =
+ (kChracteristicBandwithBps[0] + kChracteristicBandwithBps[1]) / 2;
+ constexpr float kPacketLossFraction = (kChracteristicPacketLossFraction[0] +
+ kChracteristicPacketLossFraction[1]) /
+ 2.0f;
+ // Set network metrics to be in the middle between the characteristic points
+ // of two controllers.
+ CheckControllersOrder(&states, kBandwidthBps, kPacketLossFraction,
+ {kNumControllers - 2, kNumControllers - 1, 0, 1});
+ fake_clock.AdvanceTime(TimeDelta::Millis(kMinReorderingTimeMs));
+ // Then let network metrics move a little towards the other controller.
+ CheckControllersOrder(&states, kBandwidthBps - kMinBandwithChangeBps + 1,
+ kPacketLossFraction,
+ {kNumControllers - 2, kNumControllers - 1, 0, 1});
+}
+
+#if WEBRTC_ENABLE_PROTOBUF
+
+namespace {
+
+void AddBitrateControllerConfig(
+ audio_network_adaptor::config::ControllerManager* config) {
+ config->add_controllers()->mutable_bitrate_controller();
+}
+
+void AddChannelControllerConfig(
+ audio_network_adaptor::config::ControllerManager* config) {
+ auto controller_config =
+ config->add_controllers()->mutable_channel_controller();
+ controller_config->set_channel_1_to_2_bandwidth_bps(31000);
+ controller_config->set_channel_2_to_1_bandwidth_bps(29000);
+}
+
+void AddDtxControllerConfig(
+ audio_network_adaptor::config::ControllerManager* config) {
+ auto controller_config = config->add_controllers()->mutable_dtx_controller();
+ controller_config->set_dtx_enabling_bandwidth_bps(55000);
+ controller_config->set_dtx_disabling_bandwidth_bps(65000);
+}
+
+void AddFecControllerConfig(
+ audio_network_adaptor::config::ControllerManager* config) {
+ auto controller_config_ext = config->add_controllers();
+ auto controller_config = controller_config_ext->mutable_fec_controller();
+ auto fec_enabling_threshold =
+ controller_config->mutable_fec_enabling_threshold();
+ fec_enabling_threshold->set_low_bandwidth_bps(17000);
+ fec_enabling_threshold->set_low_bandwidth_packet_loss(0.1f);
+ fec_enabling_threshold->set_high_bandwidth_bps(64000);
+ fec_enabling_threshold->set_high_bandwidth_packet_loss(0.05f);
+ auto fec_disabling_threshold =
+ controller_config->mutable_fec_disabling_threshold();
+ fec_disabling_threshold->set_low_bandwidth_bps(15000);
+ fec_disabling_threshold->set_low_bandwidth_packet_loss(0.08f);
+ fec_disabling_threshold->set_high_bandwidth_bps(64000);
+ fec_disabling_threshold->set_high_bandwidth_packet_loss(0.01f);
+ controller_config->set_time_constant_ms(500);
+
+ auto scoring_point = controller_config_ext->mutable_scoring_point();
+ scoring_point->set_uplink_bandwidth_bps(kChracteristicBandwithBps[0]);
+ scoring_point->set_uplink_packet_loss_fraction(
+ kChracteristicPacketLossFraction[0]);
+}
+
+void AddFrameLengthControllerConfig(
+ audio_network_adaptor::config::ControllerManager* config) {
+ auto controller_config_ext = config->add_controllers();
+ auto controller_config =
+ controller_config_ext->mutable_frame_length_controller();
+ controller_config->set_fl_decreasing_packet_loss_fraction(0.05f);
+ controller_config->set_fl_increasing_packet_loss_fraction(0.04f);
+ controller_config->set_fl_20ms_to_40ms_bandwidth_bps(80000);
+ controller_config->set_fl_40ms_to_20ms_bandwidth_bps(88000);
+ controller_config->set_fl_40ms_to_60ms_bandwidth_bps(72000);
+ controller_config->set_fl_60ms_to_40ms_bandwidth_bps(80000);
+
+ auto scoring_point = controller_config_ext->mutable_scoring_point();
+ scoring_point->set_uplink_bandwidth_bps(kChracteristicBandwithBps[1]);
+ scoring_point->set_uplink_packet_loss_fraction(
+ kChracteristicPacketLossFraction[1]);
+}
+
+void AddFrameLengthControllerV2Config(
+ audio_network_adaptor::config::ControllerManager* config) {
+ auto controller =
+ config->add_controllers()->mutable_frame_length_controller_v2();
+ controller->set_min_payload_bitrate_bps(16000);
+ controller->set_use_slow_adaptation(true);
+}
+
+constexpr int kInitialBitrateBps = 24000;
+constexpr size_t kIntialChannelsToEncode = 1;
+constexpr bool kInitialDtxEnabled = true;
+constexpr bool kInitialFecEnabled = true;
+constexpr int kInitialFrameLengthMs = 60;
+constexpr int kMinBitrateBps = 6000;
+
+ControllerManagerStates CreateControllerManager(
+ absl::string_view config_string) {
+ ControllerManagerStates states;
+ constexpr size_t kNumEncoderChannels = 2;
+ const std::vector<int> encoder_frame_lengths_ms = {20, 60};
+ states.controller_manager = ControllerManagerImpl::Create(
+ config_string, kNumEncoderChannels, encoder_frame_lengths_ms,
+ kMinBitrateBps, kIntialChannelsToEncode, kInitialFrameLengthMs,
+ kInitialBitrateBps, kInitialFecEnabled, kInitialDtxEnabled);
+ return states;
+}
+
+enum class ControllerType : int8_t {
+ FEC,
+ CHANNEL,
+ DTX,
+ FRAME_LENGTH,
+ BIT_RATE
+};
+
+void CheckControllersOrder(const std::vector<Controller*>& controllers,
+ const std::vector<ControllerType>& expected_types) {
+ ASSERT_EQ(expected_types.size(), controllers.size());
+
+ // We also check that the controllers follow the initial settings.
+ AudioEncoderRuntimeConfig encoder_config;
+
+ for (size_t i = 0; i < controllers.size(); ++i) {
+ AudioEncoderRuntimeConfig encoder_config;
+ // We check the order of `controllers` by judging their decisions.
+ controllers[i]->MakeDecision(&encoder_config);
+
+ // Since controllers are not provided with network metrics, they give the
+ // initial values.
+ switch (expected_types[i]) {
+ case ControllerType::FEC:
+ EXPECT_EQ(kInitialFecEnabled, encoder_config.enable_fec);
+ break;
+ case ControllerType::CHANNEL:
+ EXPECT_EQ(kIntialChannelsToEncode, encoder_config.num_channels);
+ break;
+ case ControllerType::DTX:
+ EXPECT_EQ(kInitialDtxEnabled, encoder_config.enable_dtx);
+ break;
+ case ControllerType::FRAME_LENGTH:
+ EXPECT_EQ(kInitialFrameLengthMs, encoder_config.frame_length_ms);
+ break;
+ case ControllerType::BIT_RATE:
+ EXPECT_EQ(kInitialBitrateBps, encoder_config.bitrate_bps);
+ }
+ }
+}
+
+MATCHER_P(ControllerManagerEqual, value, "") {
+ std::string value_string;
+ std::string arg_string;
+ EXPECT_TRUE(arg.SerializeToString(&arg_string));
+ EXPECT_TRUE(value.SerializeToString(&value_string));
+ return arg_string == value_string;
+}
+
+} // namespace
+
+TEST(ControllerManagerTest, DebugDumpLoggedWhenCreateFromConfigString) {
+ audio_network_adaptor::config::ControllerManager config;
+ config.set_min_reordering_time_ms(kMinReorderingTimeMs);
+ config.set_min_reordering_squared_distance(kMinReorderingSquareDistance);
+
+ AddFecControllerConfig(&config);
+ AddChannelControllerConfig(&config);
+ AddDtxControllerConfig(&config);
+ AddFrameLengthControllerConfig(&config);
+ AddBitrateControllerConfig(&config);
+
+ std::string config_string;
+ config.SerializeToString(&config_string);
+
+ constexpr size_t kNumEncoderChannels = 2;
+ const std::vector<int> encoder_frame_lengths_ms = {20, 60};
+
+ constexpr int64_t kClockInitialTimeMs = 12345678;
+ rtc::ScopedFakeClock fake_clock;
+ fake_clock.AdvanceTime(TimeDelta::Millis(kClockInitialTimeMs));
+ auto debug_dump_writer =
+ std::unique_ptr<MockDebugDumpWriter>(new NiceMock<MockDebugDumpWriter>());
+ EXPECT_CALL(*debug_dump_writer, Die());
+ EXPECT_CALL(*debug_dump_writer,
+ DumpControllerManagerConfig(ControllerManagerEqual(config),
+ kClockInitialTimeMs));
+
+ ControllerManagerImpl::Create(config_string, kNumEncoderChannels,
+ encoder_frame_lengths_ms, kMinBitrateBps,
+ kIntialChannelsToEncode, kInitialFrameLengthMs,
+ kInitialBitrateBps, kInitialFecEnabled,
+ kInitialDtxEnabled, debug_dump_writer.get());
+}
+
+TEST(ControllerManagerTest, CreateFromConfigStringAndCheckDefaultOrder) {
+ audio_network_adaptor::config::ControllerManager config;
+ config.set_min_reordering_time_ms(kMinReorderingTimeMs);
+ config.set_min_reordering_squared_distance(kMinReorderingSquareDistance);
+
+ AddFecControllerConfig(&config);
+ AddChannelControllerConfig(&config);
+ AddDtxControllerConfig(&config);
+ AddFrameLengthControllerConfig(&config);
+ AddBitrateControllerConfig(&config);
+
+ std::string config_string;
+ config.SerializeToString(&config_string);
+
+ auto states = CreateControllerManager(config_string);
+ Controller::NetworkMetrics metrics;
+
+ auto controllers = states.controller_manager->GetSortedControllers(metrics);
+ CheckControllersOrder(
+ controllers,
+ std::vector<ControllerType>{
+ ControllerType::FEC, ControllerType::CHANNEL, ControllerType::DTX,
+ ControllerType::FRAME_LENGTH, ControllerType::BIT_RATE});
+}
+
+TEST(ControllerManagerTest, CreateCharPointFreeConfigAndCheckDefaultOrder) {
+ audio_network_adaptor::config::ControllerManager config;
+
+ // Following controllers have no characteristic points.
+ AddChannelControllerConfig(&config);
+ AddDtxControllerConfig(&config);
+ AddBitrateControllerConfig(&config);
+
+ std::string config_string;
+ config.SerializeToString(&config_string);
+
+ auto states = CreateControllerManager(config_string);
+ Controller::NetworkMetrics metrics;
+
+ auto controllers = states.controller_manager->GetSortedControllers(metrics);
+ CheckControllersOrder(
+ controllers,
+ std::vector<ControllerType>{ControllerType::CHANNEL, ControllerType::DTX,
+ ControllerType::BIT_RATE});
+}
+
+TEST(ControllerManagerTest, CreateFromConfigStringAndCheckReordering) {
+ rtc::ScopedFakeClock fake_clock;
+ audio_network_adaptor::config::ControllerManager config;
+ config.set_min_reordering_time_ms(kMinReorderingTimeMs);
+ config.set_min_reordering_squared_distance(kMinReorderingSquareDistance);
+
+ AddChannelControllerConfig(&config);
+
+ // Internally associated with characteristic point 0.
+ AddFecControllerConfig(&config);
+
+ AddDtxControllerConfig(&config);
+
+ // Internally associated with characteristic point 1.
+ AddFrameLengthControllerConfig(&config);
+
+ AddBitrateControllerConfig(&config);
+
+ std::string config_string;
+ config.SerializeToString(&config_string);
+
+ auto states = CreateControllerManager(config_string);
+
+ Controller::NetworkMetrics metrics;
+ metrics.uplink_bandwidth_bps = kChracteristicBandwithBps[0];
+ metrics.uplink_packet_loss_fraction = kChracteristicPacketLossFraction[0];
+
+ auto controllers = states.controller_manager->GetSortedControllers(metrics);
+ CheckControllersOrder(controllers,
+ std::vector<ControllerType>{
+ ControllerType::FEC, ControllerType::FRAME_LENGTH,
+ ControllerType::CHANNEL, ControllerType::DTX,
+ ControllerType::BIT_RATE});
+
+ metrics.uplink_bandwidth_bps = kChracteristicBandwithBps[1];
+ metrics.uplink_packet_loss_fraction = kChracteristicPacketLossFraction[1];
+ fake_clock.AdvanceTime(TimeDelta::Millis(kMinReorderingTimeMs - 1));
+ controllers = states.controller_manager->GetSortedControllers(metrics);
+ // Should not reorder since min reordering time is not met.
+ CheckControllersOrder(controllers,
+ std::vector<ControllerType>{
+ ControllerType::FEC, ControllerType::FRAME_LENGTH,
+ ControllerType::CHANNEL, ControllerType::DTX,
+ ControllerType::BIT_RATE});
+
+ fake_clock.AdvanceTime(TimeDelta::Millis(1));
+ controllers = states.controller_manager->GetSortedControllers(metrics);
+ // Reorder now.
+ CheckControllersOrder(controllers,
+ std::vector<ControllerType>{
+ ControllerType::FRAME_LENGTH, ControllerType::FEC,
+ ControllerType::CHANNEL, ControllerType::DTX,
+ ControllerType::BIT_RATE});
+}
+
+TEST(ControllerManagerTest, CreateFrameLengthControllerV2) {
+ audio_network_adaptor::config::ControllerManager config;
+ AddFrameLengthControllerV2Config(&config);
+ auto states = CreateControllerManager(config.SerializeAsString());
+ auto controllers = states.controller_manager->GetControllers();
+ EXPECT_TRUE(controllers.size() == 1);
+}
+#endif // WEBRTC_ENABLE_PROTOBUF
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump.proto b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump.proto
new file mode 100644
index 0000000000..3aa6a504f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump.proto
@@ -0,0 +1,42 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audio_network_adaptor.debug_dump;
+
+import "config.proto";
+
+message NetworkMetrics {
+ optional int32 uplink_bandwidth_bps = 1;
+ optional float uplink_packet_loss_fraction = 2;
+ optional int32 target_audio_bitrate_bps = 3;
+ optional int32 rtt_ms = 4;
+ optional int32 uplink_recoverable_packet_loss_fraction = 5;
+}
+
+message EncoderRuntimeConfig {
+ optional int32 bitrate_bps = 1;
+ optional int32 frame_length_ms = 2;
+ // Note: This is what we tell the encoder. It doesn't have to reflect
+ // the actual NetworkMetrics; it's subject to our decision.
+ optional float uplink_packet_loss_fraction = 3;
+ optional bool enable_fec = 4;
+ optional bool enable_dtx = 5;
+ // Some encoders can encode fewer channels than the actual input to make
+ // better use of the bandwidth. `num_channels` sets the number of channels
+ // to encode.
+ optional uint32 num_channels = 6;
+}
+
+message Event {
+ enum Type {
+ NETWORK_METRICS = 0;
+ ENCODER_RUNTIME_CONFIG = 1;
+ CONTROLLER_MANAGER_CONFIG = 2;
+ }
+ required Type type = 1;
+ required uint32 timestamp = 2;
+ optional NetworkMetrics network_metrics = 3;
+ optional EncoderRuntimeConfig encoder_runtime_config = 4;
+ optional webrtc.audio_network_adaptor.config.ControllerManager
+ controller_manager_config = 5;
+}
+
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc
new file mode 100644
index 0000000000..2616706ee5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+
+#include <string>
+
+#include "absl/types/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/system/file_wrapper.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/debug_dump.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/debug_dump.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+#if WEBRTC_ENABLE_PROTOBUF
+namespace {
+
+using audio_network_adaptor::debug_dump::EncoderRuntimeConfig;
+using audio_network_adaptor::debug_dump::Event;
+using audio_network_adaptor::debug_dump::NetworkMetrics;
+
+void DumpEventToFile(const Event& event, FileWrapper* dump_file) {
+ RTC_CHECK(dump_file->is_open());
+ std::string dump_data;
+ event.SerializeToString(&dump_data);
+ int32_t size = rtc::checked_cast<int32_t>(event.ByteSizeLong());
+ dump_file->Write(&size, sizeof(size));
+ dump_file->Write(dump_data.data(), dump_data.length());
+}
+
+} // namespace
+#endif // WEBRTC_ENABLE_PROTOBUF
+
+class DebugDumpWriterImpl final : public DebugDumpWriter {
+ public:
+ explicit DebugDumpWriterImpl(FILE* file_handle);
+ ~DebugDumpWriterImpl() override = default;
+
+ void DumpEncoderRuntimeConfig(const AudioEncoderRuntimeConfig& config,
+ int64_t timestamp) override;
+
+ void DumpNetworkMetrics(const Controller::NetworkMetrics& metrics,
+ int64_t timestamp) override;
+
+#if WEBRTC_ENABLE_PROTOBUF
+ void DumpControllerManagerConfig(
+ const audio_network_adaptor::config::ControllerManager&
+ controller_manager_config,
+ int64_t timestamp) override;
+#endif
+
+ private:
+ FileWrapper dump_file_;
+};
+
+DebugDumpWriterImpl::DebugDumpWriterImpl(FILE* file_handle) {
+#if WEBRTC_ENABLE_PROTOBUF
+ dump_file_ = FileWrapper(file_handle);
+ RTC_CHECK(dump_file_.is_open());
+#else
+ RTC_DCHECK_NOTREACHED();
+#endif
+}
+
+void DebugDumpWriterImpl::DumpNetworkMetrics(
+ const Controller::NetworkMetrics& metrics,
+ int64_t timestamp) {
+#if WEBRTC_ENABLE_PROTOBUF
+ Event event;
+ event.set_timestamp(timestamp);
+ event.set_type(Event::NETWORK_METRICS);
+ auto dump_metrics = event.mutable_network_metrics();
+
+ if (metrics.uplink_bandwidth_bps)
+ dump_metrics->set_uplink_bandwidth_bps(*metrics.uplink_bandwidth_bps);
+
+ if (metrics.uplink_packet_loss_fraction) {
+ dump_metrics->set_uplink_packet_loss_fraction(
+ *metrics.uplink_packet_loss_fraction);
+ }
+
+ if (metrics.target_audio_bitrate_bps) {
+ dump_metrics->set_target_audio_bitrate_bps(
+ *metrics.target_audio_bitrate_bps);
+ }
+
+ if (metrics.rtt_ms)
+ dump_metrics->set_rtt_ms(*metrics.rtt_ms);
+
+ DumpEventToFile(event, &dump_file_);
+#endif // WEBRTC_ENABLE_PROTOBUF
+}
+
+void DebugDumpWriterImpl::DumpEncoderRuntimeConfig(
+ const AudioEncoderRuntimeConfig& config,
+ int64_t timestamp) {
+#if WEBRTC_ENABLE_PROTOBUF
+ Event event;
+ event.set_timestamp(timestamp);
+ event.set_type(Event::ENCODER_RUNTIME_CONFIG);
+ auto dump_config = event.mutable_encoder_runtime_config();
+
+ if (config.bitrate_bps)
+ dump_config->set_bitrate_bps(*config.bitrate_bps);
+
+ if (config.frame_length_ms)
+ dump_config->set_frame_length_ms(*config.frame_length_ms);
+
+ if (config.uplink_packet_loss_fraction) {
+ dump_config->set_uplink_packet_loss_fraction(
+ *config.uplink_packet_loss_fraction);
+ }
+
+ if (config.enable_fec)
+ dump_config->set_enable_fec(*config.enable_fec);
+
+ if (config.enable_dtx)
+ dump_config->set_enable_dtx(*config.enable_dtx);
+
+ if (config.num_channels)
+ dump_config->set_num_channels(*config.num_channels);
+
+ DumpEventToFile(event, &dump_file_);
+#endif // WEBRTC_ENABLE_PROTOBUF
+}
+
+#if WEBRTC_ENABLE_PROTOBUF
+void DebugDumpWriterImpl::DumpControllerManagerConfig(
+ const audio_network_adaptor::config::ControllerManager&
+ controller_manager_config,
+ int64_t timestamp) {
+ Event event;
+ event.set_timestamp(timestamp);
+ event.set_type(Event::CONTROLLER_MANAGER_CONFIG);
+ event.mutable_controller_manager_config()->CopyFrom(
+ controller_manager_config);
+ DumpEventToFile(event, &dump_file_);
+}
+#endif // WEBRTC_ENABLE_PROTOBUF
+
+std::unique_ptr<DebugDumpWriter> DebugDumpWriter::Create(FILE* file_handle) {
+ return std::unique_ptr<DebugDumpWriter>(new DebugDumpWriterImpl(file_handle));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h
new file mode 100644
index 0000000000..8fdf2f7728
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DEBUG_DUMP_WRITER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DEBUG_DUMP_WRITER_H_
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/system/file_wrapper.h"
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+class DebugDumpWriter {
+ public:
+ static std::unique_ptr<DebugDumpWriter> Create(FILE* file_handle);
+
+ virtual ~DebugDumpWriter() = default;
+
+ virtual void DumpEncoderRuntimeConfig(const AudioEncoderRuntimeConfig& config,
+ int64_t timestamp) = 0;
+
+ virtual void DumpNetworkMetrics(const Controller::NetworkMetrics& metrics,
+ int64_t timestamp) = 0;
+
+#if WEBRTC_ENABLE_PROTOBUF
+ virtual void DumpControllerManagerConfig(
+ const audio_network_adaptor::config::ControllerManager&
+ controller_manager_config,
+ int64_t timestamp) = 0;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DEBUG_DUMP_WRITER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
new file mode 100644
index 0000000000..b0a7d5d59d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/dtx_controller.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+DtxController::Config::Config(bool initial_dtx_enabled,
+ int dtx_enabling_bandwidth_bps,
+ int dtx_disabling_bandwidth_bps)
+ : initial_dtx_enabled(initial_dtx_enabled),
+ dtx_enabling_bandwidth_bps(dtx_enabling_bandwidth_bps),
+ dtx_disabling_bandwidth_bps(dtx_disabling_bandwidth_bps) {}
+
+DtxController::DtxController(const Config& config)
+ : config_(config), dtx_enabled_(config_.initial_dtx_enabled) {}
+
+DtxController::~DtxController() = default;
+
+void DtxController::UpdateNetworkMetrics(
+ const NetworkMetrics& network_metrics) {
+ if (network_metrics.uplink_bandwidth_bps)
+ uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+}
+
+void DtxController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+ // Decision on `enable_dtx` should not have been made.
+ RTC_DCHECK(!config->enable_dtx);
+
+ if (uplink_bandwidth_bps_) {
+ if (dtx_enabled_ &&
+ *uplink_bandwidth_bps_ >= config_.dtx_disabling_bandwidth_bps) {
+ dtx_enabled_ = false;
+ } else if (!dtx_enabled_ &&
+ *uplink_bandwidth_bps_ <= config_.dtx_enabling_bandwidth_bps) {
+ dtx_enabled_ = true;
+ }
+ }
+ config->enable_dtx = dtx_enabled_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.h
new file mode 100644
index 0000000000..b8a8e476e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DTX_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DTX_CONTROLLER_H_
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+class DtxController final : public Controller {
+ public:
+ struct Config {
+ Config(bool initial_dtx_enabled,
+ int dtx_enabling_bandwidth_bps,
+ int dtx_disabling_bandwidth_bps);
+ bool initial_dtx_enabled;
+ // Uplink bandwidth below which DTX should be switched on.
+ int dtx_enabling_bandwidth_bps;
+ // Uplink bandwidth above which DTX should be switched off.
+ int dtx_disabling_bandwidth_bps;
+ };
+
+ explicit DtxController(const Config& config);
+
+ ~DtxController() override;
+
+ DtxController(const DtxController&) = delete;
+ DtxController& operator=(const DtxController&) = delete;
+
+ void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+ void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+ const Config config_;
+ bool dtx_enabled_;
+ absl::optional<int> uplink_bandwidth_bps_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DTX_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc
new file mode 100644
index 0000000000..567df6f76e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/dtx_controller.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDtxEnablingBandwidthBps = 55000;
+constexpr int kDtxDisablingBandwidthBps = 65000;
+constexpr int kMediumBandwidthBps =
+ (kDtxEnablingBandwidthBps + kDtxDisablingBandwidthBps) / 2;
+
+std::unique_ptr<DtxController> CreateController(int initial_dtx_enabled) {
+ std::unique_ptr<DtxController> controller(new DtxController(
+ DtxController::Config(initial_dtx_enabled, kDtxEnablingBandwidthBps,
+ kDtxDisablingBandwidthBps)));
+ return controller;
+}
+
+void CheckDecision(DtxController* controller,
+ const absl::optional<int>& uplink_bandwidth_bps,
+ bool expected_dtx_enabled) {
+ if (uplink_bandwidth_bps) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+ AudioEncoderRuntimeConfig config;
+ controller->MakeDecision(&config);
+ EXPECT_EQ(expected_dtx_enabled, config.enable_dtx);
+}
+
+} // namespace
+
+TEST(DtxControllerTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+ constexpr bool kInitialDtxEnabled = true;
+ auto controller = CreateController(kInitialDtxEnabled);
+ CheckDecision(controller.get(), absl::nullopt, kInitialDtxEnabled);
+}
+
+TEST(DtxControllerTest, TurnOnDtxForLowUplinkBandwidth) {
+ auto controller = CreateController(false);
+ CheckDecision(controller.get(), kDtxEnablingBandwidthBps, true);
+}
+
+TEST(DtxControllerTest, TurnOffDtxForHighUplinkBandwidth) {
+ auto controller = CreateController(true);
+ CheckDecision(controller.get(), kDtxDisablingBandwidthBps, false);
+}
+
+TEST(DtxControllerTest, MaintainDtxOffForMediumUplinkBandwidth) {
+ auto controller = CreateController(false);
+ CheckDecision(controller.get(), kMediumBandwidthBps, false);
+}
+
+TEST(DtxControllerTest, MaintainDtxOnForMediumUplinkBandwidth) {
+ auto controller = CreateController(true);
+ CheckDecision(controller.get(), kMediumBandwidthBps, true);
+}
+
+TEST(DtxControllerTest, CheckBehaviorOnChangingUplinkBandwidth) {
+ auto controller = CreateController(false);
+ CheckDecision(controller.get(), kMediumBandwidthBps, false);
+ CheckDecision(controller.get(), kDtxEnablingBandwidthBps, true);
+ CheckDecision(controller.get(), kMediumBandwidthBps, true);
+ CheckDecision(controller.get(), kDtxDisablingBandwidthBps, false);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.cc
new file mode 100644
index 0000000000..0a79484a16
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/event_log_writer.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <cstdlib>
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/rtc_event_log/rtc_event.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+EventLogWriter::EventLogWriter(RtcEventLog* event_log,
+ int min_bitrate_change_bps,
+ float min_bitrate_change_fraction,
+ float min_packet_loss_change_fraction)
+ : event_log_(event_log),
+ min_bitrate_change_bps_(min_bitrate_change_bps),
+ min_bitrate_change_fraction_(min_bitrate_change_fraction),
+ min_packet_loss_change_fraction_(min_packet_loss_change_fraction) {
+ RTC_DCHECK(event_log_);
+}
+
+EventLogWriter::~EventLogWriter() = default;
+
+void EventLogWriter::MaybeLogEncoderConfig(
+ const AudioEncoderRuntimeConfig& config) {
+ if (last_logged_config_.num_channels != config.num_channels)
+ return LogEncoderConfig(config);
+ if (last_logged_config_.enable_dtx != config.enable_dtx)
+ return LogEncoderConfig(config);
+ if (last_logged_config_.enable_fec != config.enable_fec)
+ return LogEncoderConfig(config);
+ if (last_logged_config_.frame_length_ms != config.frame_length_ms)
+ return LogEncoderConfig(config);
+ if ((!last_logged_config_.bitrate_bps && config.bitrate_bps) ||
+ (last_logged_config_.bitrate_bps && config.bitrate_bps &&
+ std::abs(*last_logged_config_.bitrate_bps - *config.bitrate_bps) >=
+ std::min(static_cast<int>(*last_logged_config_.bitrate_bps *
+ min_bitrate_change_fraction_),
+ min_bitrate_change_bps_))) {
+ return LogEncoderConfig(config);
+ }
+ if ((!last_logged_config_.uplink_packet_loss_fraction &&
+ config.uplink_packet_loss_fraction) ||
+ (last_logged_config_.uplink_packet_loss_fraction &&
+ config.uplink_packet_loss_fraction &&
+ fabs(*last_logged_config_.uplink_packet_loss_fraction -
+ *config.uplink_packet_loss_fraction) >=
+ min_packet_loss_change_fraction_ *
+ *last_logged_config_.uplink_packet_loss_fraction)) {
+ return LogEncoderConfig(config);
+ }
+}
+
+void EventLogWriter::LogEncoderConfig(const AudioEncoderRuntimeConfig& config) {
+ auto config_copy = std::make_unique<AudioEncoderRuntimeConfig>(config);
+ event_log_->Log(
+ std::make_unique<RtcEventAudioNetworkAdaptation>(std::move(config_copy)));
+ last_logged_config_ = config;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.h
new file mode 100644
index 0000000000..a147311fc7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+class RtcEventLog;
+
+class EventLogWriter final {
+ public:
+ EventLogWriter(RtcEventLog* event_log,
+ int min_bitrate_change_bps,
+ float min_bitrate_change_fraction,
+ float min_packet_loss_change_fraction);
+ ~EventLogWriter();
+
+ EventLogWriter(const EventLogWriter&) = delete;
+ EventLogWriter& operator=(const EventLogWriter&) = delete;
+
+ void MaybeLogEncoderConfig(const AudioEncoderRuntimeConfig& config);
+
+ private:
+ void LogEncoderConfig(const AudioEncoderRuntimeConfig& config);
+
+ RtcEventLog* const event_log_;
+ const int min_bitrate_change_bps_;
+ const float min_bitrate_change_fraction_;
+ const float min_packet_loss_change_fraction_;
+ AudioEncoderRuntimeConfig last_logged_config_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
new file mode 100644
index 0000000000..2c344534ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/event_log_writer.h"
+
+#include <memory>
+
+#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kMinBitrateChangeBps = 5000;
+constexpr float kMinPacketLossChangeFraction = 0.5;
+constexpr float kMinBitrateChangeFraction = 0.25;
+
+constexpr int kHighBitrateBps = 70000;
+constexpr int kLowBitrateBps = 10000;
+constexpr int kFrameLengthMs = 60;
+constexpr bool kEnableFec = true;
+constexpr bool kEnableDtx = true;
+constexpr float kPacketLossFraction = 0.05f;
+constexpr size_t kNumChannels = 1;
+
+MATCHER_P(IsRtcEventAnaConfigEqualTo, config, "") {
+ if (arg->GetType() != RtcEvent::Type::AudioNetworkAdaptation) {
+ return false;
+ }
+ auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
+ return ana_event->config() == config;
+}
+
+struct EventLogWriterStates {
+ std::unique_ptr<EventLogWriter> event_log_writer;
+ std::unique_ptr<testing::StrictMock<MockRtcEventLog>> event_log;
+ AudioEncoderRuntimeConfig runtime_config;
+};
+
+EventLogWriterStates CreateEventLogWriter() {
+ EventLogWriterStates state;
+ state.event_log.reset(new ::testing::StrictMock<MockRtcEventLog>());
+ state.event_log_writer.reset(new EventLogWriter(
+ state.event_log.get(), kMinBitrateChangeBps, kMinBitrateChangeFraction,
+ kMinPacketLossChangeFraction));
+ state.runtime_config.bitrate_bps = kHighBitrateBps;
+ state.runtime_config.frame_length_ms = kFrameLengthMs;
+ state.runtime_config.uplink_packet_loss_fraction = kPacketLossFraction;
+ state.runtime_config.enable_fec = kEnableFec;
+ state.runtime_config.enable_dtx = kEnableDtx;
+ state.runtime_config.num_channels = kNumChannels;
+ return state;
+}
+} // namespace
+
+TEST(EventLogWriterTest, FirstConfigIsLogged) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, SameConfigIsNotLogged) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogFecStateChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+ state.runtime_config.enable_fec = !kEnableFec;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogDtxStateChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+ state.runtime_config.enable_dtx = !kEnableDtx;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogChannelChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+ state.runtime_config.num_channels = kNumChannels + 1;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogFrameLengthChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+ state.runtime_config.frame_length_ms = 20;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, DoNotLogSmallBitrateChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ state.runtime_config.bitrate_bps = kHighBitrateBps + kMinBitrateChangeBps - 1;
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogLargeBitrateChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ // At high bitrate, the min fraction rule requires a larger change than the
+ // min change rule. We make sure that the min change rule applies.
+ RTC_DCHECK_GT(kHighBitrateBps * kMinBitrateChangeFraction,
+ kMinBitrateChangeBps);
+ state.runtime_config.bitrate_bps = kHighBitrateBps + kMinBitrateChangeBps;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogMinBitrateChangeFractionOnLowBitrateChange) {
+ auto state = CreateEventLogWriter();
+ state.runtime_config.bitrate_bps = kLowBitrateBps;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ // At high bitrate, the min change rule requires a larger change than the min
+ // fraction rule. We make sure that the min fraction rule applies.
+ state.runtime_config.bitrate_bps =
+ kLowBitrateBps + kLowBitrateBps * kMinBitrateChangeFraction;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, DoNotLogSmallPacketLossFractionChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ state.runtime_config.uplink_packet_loss_fraction =
+ kPacketLossFraction + kMinPacketLossChangeFraction * kPacketLossFraction -
+ 0.001f;
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogLargePacketLossFractionChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ state.runtime_config.uplink_packet_loss_fraction =
+ kPacketLossFraction + kMinPacketLossChangeFraction * kPacketLossFraction;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogJustOnceOnMultipleChanges) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ state.runtime_config.uplink_packet_loss_fraction =
+ kPacketLossFraction + kMinPacketLossChangeFraction * kPacketLossFraction;
+ state.runtime_config.frame_length_ms = 20;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogAfterGradualChange) {
+ auto state = CreateEventLogWriter();
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ state.runtime_config.bitrate_bps = kHighBitrateBps + kMinBitrateChangeBps;
+ EXPECT_CALL(*state.event_log,
+ LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+ .Times(1);
+ for (int bitrate_bps = kHighBitrateBps;
+ bitrate_bps <= kHighBitrateBps + kMinBitrateChangeBps; bitrate_bps++) {
+ state.runtime_config.bitrate_bps = bitrate_bps;
+ state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc
new file mode 100644
index 0000000000..c5e5fa76e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h"
+
+#include <string>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+class NullSmoothingFilter final : public SmoothingFilter {
+ public:
+ void AddSample(float sample) override { last_sample_ = sample; }
+
+ absl::optional<float> GetAverage() override { return last_sample_; }
+
+ bool SetTimeConstantMs(int time_constant_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ private:
+ absl::optional<float> last_sample_;
+};
+} // namespace
+
+FecControllerPlrBased::Config::Config(
+ bool initial_fec_enabled,
+ const ThresholdCurve& fec_enabling_threshold,
+ const ThresholdCurve& fec_disabling_threshold,
+ int time_constant_ms)
+ : initial_fec_enabled(initial_fec_enabled),
+ fec_enabling_threshold(fec_enabling_threshold),
+ fec_disabling_threshold(fec_disabling_threshold),
+ time_constant_ms(time_constant_ms) {}
+
+FecControllerPlrBased::FecControllerPlrBased(
+ const Config& config,
+ std::unique_ptr<SmoothingFilter> smoothing_filter)
+ : config_(config),
+ fec_enabled_(config.initial_fec_enabled),
+ packet_loss_smoother_(std::move(smoothing_filter)) {
+ RTC_DCHECK(config_.fec_disabling_threshold <= config_.fec_enabling_threshold);
+}
+
+FecControllerPlrBased::FecControllerPlrBased(const Config& config)
+ : FecControllerPlrBased(
+ config,
+ webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled"
+ ? std::unique_ptr<NullSmoothingFilter>(new NullSmoothingFilter())
+ : std::unique_ptr<SmoothingFilter>(
+ new SmoothingFilterImpl(config.time_constant_ms))) {}
+
+FecControllerPlrBased::~FecControllerPlrBased() = default;
+
+void FecControllerPlrBased::UpdateNetworkMetrics(
+ const NetworkMetrics& network_metrics) {
+ if (network_metrics.uplink_bandwidth_bps)
+ uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+ if (network_metrics.uplink_packet_loss_fraction) {
+ packet_loss_smoother_->AddSample(
+ *network_metrics.uplink_packet_loss_fraction);
+ }
+}
+
+void FecControllerPlrBased::MakeDecision(AudioEncoderRuntimeConfig* config) {
+ RTC_DCHECK(!config->enable_fec);
+ RTC_DCHECK(!config->uplink_packet_loss_fraction);
+
+ const auto& packet_loss = packet_loss_smoother_->GetAverage();
+
+ fec_enabled_ = fec_enabled_ ? !FecDisablingDecision(packet_loss)
+ : FecEnablingDecision(packet_loss);
+
+ config->enable_fec = fec_enabled_;
+
+ config->uplink_packet_loss_fraction = packet_loss ? *packet_loss : 0.0;
+}
+
+bool FecControllerPlrBased::FecEnablingDecision(
+ const absl::optional<float>& packet_loss) const {
+ if (!uplink_bandwidth_bps_ || !packet_loss) {
+ return false;
+ } else {
+ // Enable when above the curve or exactly on it.
+ return !config_.fec_enabling_threshold.IsBelowCurve(
+ {static_cast<float>(*uplink_bandwidth_bps_), *packet_loss});
+ }
+}
+
+bool FecControllerPlrBased::FecDisablingDecision(
+ const absl::optional<float>& packet_loss) const {
+ if (!uplink_bandwidth_bps_ || !packet_loss) {
+ return false;
+ } else {
+ // Disable when below the curve.
+ return config_.fec_disabling_threshold.IsBelowCurve(
+ {static_cast<float>(*uplink_bandwidth_bps_), *packet_loss});
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
new file mode 100644
index 0000000000..0c57ad1d1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_PLR_BASED_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_PLR_BASED_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "common_audio/smoothing_filter.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+
+namespace webrtc {
+
+class FecControllerPlrBased final : public Controller {
+ public:
+ struct Config {
+ // `fec_enabling_threshold` defines a curve, above which FEC should be
+ // enabled. `fec_disabling_threshold` defines a curve, under which FEC
+ // should be disabled. See below
+ //
+ // packet-loss ^ | |
+ // | | | FEC
+ // | \ \ ON
+ // | FEC \ \_______ fec_enabling_threshold
+ // | OFF \_________ fec_disabling_threshold
+ // |-----------------> bandwidth
+ Config(bool initial_fec_enabled,
+ const ThresholdCurve& fec_enabling_threshold,
+ const ThresholdCurve& fec_disabling_threshold,
+ int time_constant_ms);
+ bool initial_fec_enabled;
+ ThresholdCurve fec_enabling_threshold;
+ ThresholdCurve fec_disabling_threshold;
+ int time_constant_ms;
+ };
+
+ // Dependency injection for testing.
+ FecControllerPlrBased(const Config& config,
+ std::unique_ptr<SmoothingFilter> smoothing_filter);
+
+ explicit FecControllerPlrBased(const Config& config);
+
+ ~FecControllerPlrBased() override;
+
+ FecControllerPlrBased(const FecControllerPlrBased&) = delete;
+ FecControllerPlrBased& operator=(const FecControllerPlrBased&) = delete;
+
+ void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+ void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+ bool FecEnablingDecision(const absl::optional<float>& packet_loss) const;
+ bool FecDisablingDecision(const absl::optional<float>& packet_loss) const;
+
+ const Config config_;
+ bool fec_enabled_;
+ absl::optional<int> uplink_bandwidth_bps_;
+ const std::unique_ptr<SmoothingFilter> packet_loss_smoother_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_PLR_BASED_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
new file mode 100644
index 0000000000..743b087163
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h"
+
+#include <utility>
+
+#include "common_audio/mocks/mock_smoothing_filter.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+namespace {
+
+// The test uses the following settings:
+//
+// packet-loss ^ | |
+// | A| C| FEC
+// | \ \ ON
+// | FEC \ D\_______
+// | OFF B\_________
+// |-----------------> bandwidth
+//
+// A : (kDisablingBandwidthLow, kDisablingPacketLossAtLowBw)
+// B : (kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw)
+// C : (kEnablingBandwidthLow, kEnablingPacketLossAtLowBw)
+// D : (kEnablingBandwidthHigh, kEnablingPacketLossAtHighBw)
+
+constexpr int kDisablingBandwidthLow = 15000;
+constexpr float kDisablingPacketLossAtLowBw = 0.08f;
+constexpr int kDisablingBandwidthHigh = 64000;
+constexpr float kDisablingPacketLossAtHighBw = 0.01f;
+constexpr int kEnablingBandwidthLow = 17000;
+constexpr float kEnablingPacketLossAtLowBw = 0.1f;
+constexpr int kEnablingBandwidthHigh = 64000;
+constexpr float kEnablingPacketLossAtHighBw = 0.05f;
+
+constexpr float kEpsilon = 1e-5f;
+
+struct FecControllerPlrBasedTestStates {
+ std::unique_ptr<FecControllerPlrBased> controller;
+ MockSmoothingFilter* packet_loss_smoother;
+};
+
+FecControllerPlrBasedTestStates CreateFecControllerPlrBased(
+ bool initial_fec_enabled,
+ const ThresholdCurve& enabling_curve,
+ const ThresholdCurve& disabling_curve) {
+ FecControllerPlrBasedTestStates states;
+ std::unique_ptr<MockSmoothingFilter> mock_smoothing_filter(
+ new NiceMock<MockSmoothingFilter>());
+ states.packet_loss_smoother = mock_smoothing_filter.get();
+ states.controller.reset(new FecControllerPlrBased(
+ FecControllerPlrBased::Config(initial_fec_enabled, enabling_curve,
+ disabling_curve, 0),
+ std::move(mock_smoothing_filter)));
+ return states;
+}
+
+FecControllerPlrBasedTestStates CreateFecControllerPlrBased(
+ bool initial_fec_enabled) {
+ return CreateFecControllerPlrBased(
+ initial_fec_enabled,
+ ThresholdCurve(kEnablingBandwidthLow, kEnablingPacketLossAtLowBw,
+ kEnablingBandwidthHigh, kEnablingPacketLossAtHighBw),
+ ThresholdCurve(kDisablingBandwidthLow, kDisablingPacketLossAtLowBw,
+ kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw));
+}
+
+void UpdateNetworkMetrics(FecControllerPlrBasedTestStates* states,
+ const absl::optional<int>& uplink_bandwidth_bps,
+ const absl::optional<float>& uplink_packet_loss) {
+ // UpdateNetworkMetrics can accept multiple network metric updates at once.
+ // However, currently, the most used case is to update one metric at a time.
+ // To reflect this fact, we separate the calls.
+ if (uplink_bandwidth_bps) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ states->controller->UpdateNetworkMetrics(network_metrics);
+ }
+ if (uplink_packet_loss) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_packet_loss_fraction = uplink_packet_loss;
+ EXPECT_CALL(*states->packet_loss_smoother, AddSample(*uplink_packet_loss));
+ states->controller->UpdateNetworkMetrics(network_metrics);
+ // This is called during CheckDecision().
+ EXPECT_CALL(*states->packet_loss_smoother, GetAverage())
+ .WillOnce(Return(*uplink_packet_loss));
+ }
+}
+
+// Checks that the FEC decision and `uplink_packet_loss_fraction` given by
+// `states->controller->MakeDecision` matches `expected_enable_fec` and
+// `expected_uplink_packet_loss_fraction`, respectively.
+void CheckDecision(FecControllerPlrBasedTestStates* states,
+ bool expected_enable_fec,
+ float expected_uplink_packet_loss_fraction) {
+ AudioEncoderRuntimeConfig config;
+ states->controller->MakeDecision(&config);
+ EXPECT_EQ(expected_enable_fec, config.enable_fec);
+ EXPECT_EQ(expected_uplink_packet_loss_fraction,
+ config.uplink_packet_loss_fraction);
+}
+
+} // namespace
+
+TEST(FecControllerPlrBasedTest, OutputInitValueBeforeAnyInputsAreReceived) {
+ for (bool initial_fec_enabled : {false, true}) {
+ auto states = CreateFecControllerPlrBased(initial_fec_enabled);
+ CheckDecision(&states, initial_fec_enabled, 0);
+ }
+}
+
+TEST(FecControllerPlrBasedTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+ // Regardless of the initial FEC state and the packet-loss rate,
+ // the initial FEC state is maintained as long as the BWE is unknown.
+ for (bool initial_fec_enabled : {false, true}) {
+ for (float packet_loss :
+ {kDisablingPacketLossAtLowBw - kEpsilon, kDisablingPacketLossAtLowBw,
+ kDisablingPacketLossAtLowBw + kEpsilon,
+ kEnablingPacketLossAtLowBw - kEpsilon, kEnablingPacketLossAtLowBw,
+ kEnablingPacketLossAtLowBw + kEpsilon}) {
+ auto states = CreateFecControllerPlrBased(initial_fec_enabled);
+ UpdateNetworkMetrics(&states, absl::nullopt, packet_loss);
+ CheckDecision(&states, initial_fec_enabled, packet_loss);
+ }
+ }
+}
+
+TEST(FecControllerPlrBasedTest,
+ OutputInitValueWhenUplinkPacketLossFractionUnknown) {
+ // Regardless of the initial FEC state and the BWE, the initial FEC state
+ // is maintained as long as the packet-loss rate is unknown.
+ for (bool initial_fec_enabled : {false, true}) {
+ for (int bandwidth : {kDisablingBandwidthLow - 1, kDisablingBandwidthLow,
+ kDisablingBandwidthLow + 1, kEnablingBandwidthLow - 1,
+ kEnablingBandwidthLow, kEnablingBandwidthLow + 1}) {
+ auto states = CreateFecControllerPlrBased(initial_fec_enabled);
+ UpdateNetworkMetrics(&states, bandwidth, absl::nullopt);
+ CheckDecision(&states, initial_fec_enabled, 0.0);
+ }
+ }
+}
+
+TEST(FecControllerPlrBasedTest, EnableFecForHighBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ UpdateNetworkMetrics(&states, kEnablingBandwidthHigh,
+ kEnablingPacketLossAtHighBw);
+ CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+}
+
+TEST(FecControllerPlrBasedTest, UpdateMultipleNetworkMetricsAtOnce) {
+ // This test is similar to EnableFecForHighBandwidth. But instead of
+ // using ::UpdateNetworkMetrics(...), which calls
+ // FecControllerPlrBased::UpdateNetworkMetrics(...) multiple times, we
+ // we call it only once. This is to verify that
+ // FecControllerPlrBased::UpdateNetworkMetrics(...) can handle multiple
+ // network updates at once. This is, however, not a common use case in current
+ // audio_network_adaptor_impl.cc.
+ auto states = CreateFecControllerPlrBased(false);
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = kEnablingBandwidthHigh;
+ network_metrics.uplink_packet_loss_fraction = kEnablingPacketLossAtHighBw;
+ EXPECT_CALL(*states.packet_loss_smoother, GetAverage())
+ .WillOnce(Return(kEnablingPacketLossAtHighBw));
+ states.controller->UpdateNetworkMetrics(network_metrics);
+ CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForHighBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ constexpr float kPacketLoss = kEnablingPacketLossAtHighBw * 0.99f;
+ UpdateNetworkMetrics(&states, kEnablingBandwidthHigh, kPacketLoss);
+ CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, EnableFecForMediumBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ constexpr float kPacketLoss =
+ (kEnablingPacketLossAtLowBw + kEnablingPacketLossAtHighBw) / 2.0;
+ UpdateNetworkMetrics(&states,
+ (kEnablingBandwidthHigh + kEnablingBandwidthLow) / 2,
+ kPacketLoss);
+ CheckDecision(&states, true, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForMediumBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ constexpr float kPacketLoss =
+ kEnablingPacketLossAtLowBw * 0.49f + kEnablingPacketLossAtHighBw * 0.51f;
+ UpdateNetworkMetrics(&states,
+ (kEnablingBandwidthHigh + kEnablingBandwidthLow) / 2,
+ kPacketLoss);
+ CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, EnableFecForLowBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ UpdateNetworkMetrics(&states, kEnablingBandwidthLow,
+ kEnablingPacketLossAtLowBw);
+ CheckDecision(&states, true, kEnablingPacketLossAtLowBw);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForLowBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ constexpr float kPacketLoss = kEnablingPacketLossAtLowBw * 0.99f;
+ UpdateNetworkMetrics(&states, kEnablingBandwidthLow, kPacketLoss);
+ CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForVeryLowBandwidth) {
+ auto states = CreateFecControllerPlrBased(false);
+ // Below `kEnablingBandwidthLow`, no packet loss fraction can cause FEC to
+ // turn on.
+ UpdateNetworkMetrics(&states, kEnablingBandwidthLow - 1, 1.0);
+ CheckDecision(&states, false, 1.0);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecForHighBandwidth) {
+ auto states = CreateFecControllerPlrBased(true);
+ constexpr float kPacketLoss = kDisablingPacketLossAtHighBw - kEpsilon;
+ UpdateNetworkMetrics(&states, kDisablingBandwidthHigh, kPacketLoss);
+ CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOnForHighBandwidth) {
+ // Note: Disabling happens when the value is strictly below the threshold.
+ auto states = CreateFecControllerPlrBased(true);
+ UpdateNetworkMetrics(&states, kDisablingBandwidthHigh,
+ kDisablingPacketLossAtHighBw);
+ CheckDecision(&states, true, kDisablingPacketLossAtHighBw);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecOnMediumBandwidth) {
+ auto states = CreateFecControllerPlrBased(true);
+ constexpr float kPacketLoss =
+ (kDisablingPacketLossAtLowBw + kDisablingPacketLossAtHighBw) / 2.0f -
+ kEpsilon;
+ UpdateNetworkMetrics(&states,
+ (kDisablingBandwidthHigh + kDisablingBandwidthLow) / 2,
+ kPacketLoss);
+ CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOnForMediumBandwidth) {
+ auto states = CreateFecControllerPlrBased(true);
+ constexpr float kPacketLoss = kDisablingPacketLossAtLowBw * 0.51f +
+ kDisablingPacketLossAtHighBw * 0.49f - kEpsilon;
+ UpdateNetworkMetrics(&states,
+ (kEnablingBandwidthHigh + kDisablingBandwidthLow) / 2,
+ kPacketLoss);
+ CheckDecision(&states, true, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecForLowBandwidth) {
+ auto states = CreateFecControllerPlrBased(true);
+ constexpr float kPacketLoss = kDisablingPacketLossAtLowBw - kEpsilon;
+ UpdateNetworkMetrics(&states, kDisablingBandwidthLow, kPacketLoss);
+ CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecForVeryLowBandwidth) {
+ auto states = CreateFecControllerPlrBased(true);
+ // Below `kEnablingBandwidthLow`, any packet loss fraction can cause FEC to
+ // turn off.
+ UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
+ CheckDecision(&states, false, 1.0);
+}
+
+TEST(FecControllerPlrBasedTest, CheckBehaviorOnChangingNetworkMetrics) {
+ // In this test, we let the network metrics to traverse from 1 to 5.
+ // packet-loss ^ 1 | |
+ // | | 2|
+ // | \ \ 3
+ // | \4 \_______
+ // | \_________
+ // |---------5-------> bandwidth
+
+ auto states = CreateFecControllerPlrBased(true);
+ UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
+ CheckDecision(&states, false, 1.0);
+
+ UpdateNetworkMetrics(&states, kEnablingBandwidthLow,
+ kEnablingPacketLossAtLowBw * 0.99f);
+ CheckDecision(&states, false, kEnablingPacketLossAtLowBw * 0.99f);
+
+ UpdateNetworkMetrics(&states, kEnablingBandwidthHigh,
+ kEnablingPacketLossAtHighBw);
+ CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+
+ UpdateNetworkMetrics(&states, kDisablingBandwidthHigh,
+ kDisablingPacketLossAtHighBw);
+ CheckDecision(&states, true, kDisablingPacketLossAtHighBw);
+
+ UpdateNetworkMetrics(&states, kDisablingBandwidthHigh + 1, 0.0);
+ CheckDecision(&states, false, 0.0);
+}
+
+TEST(FecControllerPlrBasedTest, CheckBehaviorOnSpecialCurves) {
+ // We test a special configuration, where the points to define the FEC
+ // enabling/disabling curves are placed like the following, otherwise the test
+ // is the same as CheckBehaviorOnChangingNetworkMetrics.
+ //
+ // packet-loss ^ | |
+ // | | C|
+ // | | |
+ // | | D|_______
+ // | A|___B______
+ // |-----------------> bandwidth
+
+ constexpr int kEnablingBandwidthHigh = kEnablingBandwidthLow;
+ constexpr float kDisablingPacketLossAtLowBw = kDisablingPacketLossAtHighBw;
+ FecControllerPlrBasedTestStates states;
+ std::unique_ptr<MockSmoothingFilter> mock_smoothing_filter(
+ new NiceMock<MockSmoothingFilter>());
+ states.packet_loss_smoother = mock_smoothing_filter.get();
+ states.controller.reset(new FecControllerPlrBased(
+ FecControllerPlrBased::Config(
+ true,
+ ThresholdCurve(kEnablingBandwidthLow, kEnablingPacketLossAtLowBw,
+ kEnablingBandwidthHigh, kEnablingPacketLossAtHighBw),
+ ThresholdCurve(kDisablingBandwidthLow, kDisablingPacketLossAtLowBw,
+ kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw),
+ 0),
+ std::move(mock_smoothing_filter)));
+
+ UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
+ CheckDecision(&states, false, 1.0);
+
+ UpdateNetworkMetrics(&states, kEnablingBandwidthLow,
+ kEnablingPacketLossAtHighBw * 0.99f);
+ CheckDecision(&states, false, kEnablingPacketLossAtHighBw * 0.99f);
+
+ UpdateNetworkMetrics(&states, kEnablingBandwidthHigh,
+ kEnablingPacketLossAtHighBw);
+ CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+
+ UpdateNetworkMetrics(&states, kDisablingBandwidthHigh,
+ kDisablingPacketLossAtHighBw);
+ CheckDecision(&states, true, kDisablingPacketLossAtHighBw);
+
+ UpdateNetworkMetrics(&states, kDisablingBandwidthHigh + 1, 0.0);
+ CheckDecision(&states, false, 0.0);
+}
+
+TEST(FecControllerPlrBasedTest, SingleThresholdCurveForEnablingAndDisabling) {
+ // Note: To avoid numerical errors, keep kPacketLossAtLowBw and
+ // kPacketLossAthighBw as (negative) integer powers of 2.
+ // This is mostly relevant for the O3 case.
+ constexpr int kBandwidthLow = 10000;
+ constexpr float kPacketLossAtLowBw = 0.25f;
+ constexpr int kBandwidthHigh = 20000;
+ constexpr float kPacketLossAtHighBw = 0.125f;
+ auto curve = ThresholdCurve(kBandwidthLow, kPacketLossAtLowBw, kBandwidthHigh,
+ kPacketLossAtHighBw);
+
+ // B* stands for "below-curve", O* for "on-curve", and A* for "above-curve".
+ //
+ // //
+ // packet-loss ^ //
+ // | | //
+ // | B1 O1 //
+ // | | //
+ // | O2 //
+ // | \ A1 //
+ // | \ //
+ // | O3 A2 //
+ // | B2 \ //
+ // | \ //
+ // | O4--O5---- //
+ // | //
+ // | B3 //
+ // |-----------------> bandwidth //
+
+ struct NetworkState {
+ int bandwidth;
+ float packet_loss;
+ };
+
+ std::vector<NetworkState> below{
+ {kBandwidthLow - 1, kPacketLossAtLowBw + 0.1f}, // B1
+ {(kBandwidthLow + kBandwidthHigh) / 2,
+ (kPacketLossAtLowBw + kPacketLossAtHighBw) / 2 - kEpsilon}, // B2
+ {kBandwidthHigh + 1, kPacketLossAtHighBw - kEpsilon} // B3
+ };
+
+ std::vector<NetworkState> on{
+ {kBandwidthLow, kPacketLossAtLowBw + 0.1f}, // O1
+ {kBandwidthLow, kPacketLossAtLowBw}, // O2
+ {(kBandwidthLow + kBandwidthHigh) / 2,
+ (kPacketLossAtLowBw + kPacketLossAtHighBw) / 2}, // O3
+ {kBandwidthHigh, kPacketLossAtHighBw}, // O4
+ {kBandwidthHigh + 1, kPacketLossAtHighBw}, // O5
+ };
+
+ std::vector<NetworkState> above{
+ {(kBandwidthLow + kBandwidthHigh) / 2,
+ (kPacketLossAtLowBw + kPacketLossAtHighBw) / 2 + kEpsilon}, // A1
+ {kBandwidthHigh + 1, kPacketLossAtHighBw + kEpsilon}, // A2
+ };
+
+ // Test that FEC is turned off whenever we're below the curve, independent
+ // of the starting FEC state.
+ for (NetworkState net_state : below) {
+ for (bool initial_fec_enabled : {false, true}) {
+ auto states =
+ CreateFecControllerPlrBased(initial_fec_enabled, curve, curve);
+ UpdateNetworkMetrics(&states, net_state.bandwidth, net_state.packet_loss);
+ CheckDecision(&states, false, net_state.packet_loss);
+ }
+ }
+
+ // Test that FEC is turned on whenever we're on the curve or above it,
+ // independent of the starting FEC state.
+ for (const std::vector<NetworkState>& states_list : {on, above}) {
+ for (NetworkState net_state : states_list) {
+ for (bool initial_fec_enabled : {false, true}) {
+ auto states =
+ CreateFecControllerPlrBased(initial_fec_enabled, curve, curve);
+ UpdateNetworkMetrics(&states, net_state.bandwidth,
+ net_state.packet_loss);
+ CheckDecision(&states, true, net_state.packet_loss);
+ }
+ }
+ }
+}
+
+TEST(FecControllerPlrBasedTest, FecAlwaysOff) {
+ ThresholdCurve always_off_curve(0, 1.0f + kEpsilon, 0, 1.0f + kEpsilon);
+ for (bool initial_fec_enabled : {false, true}) {
+ for (int bandwidth : {0, 10000}) {
+ for (float packet_loss : {0.0f, 0.5f, 1.0f}) {
+ auto states = CreateFecControllerPlrBased(
+ initial_fec_enabled, always_off_curve, always_off_curve);
+ UpdateNetworkMetrics(&states, bandwidth, packet_loss);
+ CheckDecision(&states, false, packet_loss);
+ }
+ }
+ }
+}
+
+TEST(FecControllerPlrBasedTest, FecAlwaysOn) {
+ ThresholdCurve always_on_curve(0, 0.0f, 0, 0.0f);
+ for (bool initial_fec_enabled : {false, true}) {
+ for (int bandwidth : {0, 10000}) {
+ for (float packet_loss : {0.0f, 0.5f, 1.0f}) {
+ auto states = CreateFecControllerPlrBased(
+ initial_fec_enabled, always_on_curve, always_on_curve);
+ UpdateNetworkMetrics(&states, bandwidth, packet_loss);
+ CheckDecision(&states, true, packet_loss);
+ }
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(FecControllerPlrBasedDeathTest, InvalidConfig) {
+ FecControllerPlrBasedTestStates states;
+ std::unique_ptr<MockSmoothingFilter> mock_smoothing_filter(
+ new NiceMock<MockSmoothingFilter>());
+ states.packet_loss_smoother = mock_smoothing_filter.get();
+ EXPECT_DEATH(
+ states.controller.reset(new FecControllerPlrBased(
+ FecControllerPlrBased::Config(
+ true,
+ ThresholdCurve(kDisablingBandwidthLow - 1,
+ kEnablingPacketLossAtLowBw, kEnablingBandwidthHigh,
+ kEnablingPacketLossAtHighBw),
+ ThresholdCurve(
+ kDisablingBandwidthLow, kDisablingPacketLossAtLowBw,
+ kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw),
+ 0),
+ std::move(mock_smoothing_filter))),
+ "Check failed");
+}
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
new file mode 100644
index 0000000000..c47434f9aa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller.h"
+
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kPreventOveruseMarginBps = 5000;
+
+int OverheadRateBps(size_t overhead_bytes_per_packet, int frame_length_ms) {
+ return static_cast<int>(overhead_bytes_per_packet * 8 * 1000 /
+ frame_length_ms);
+}
+} // namespace
+
+FrameLengthController::Config::Config(
+ const std::set<int>& encoder_frame_lengths_ms,
+ int initial_frame_length_ms,
+ int min_encoder_bitrate_bps,
+ float fl_increasing_packet_loss_fraction,
+ float fl_decreasing_packet_loss_fraction,
+ int fl_increase_overhead_offset,
+ int fl_decrease_overhead_offset,
+ std::map<FrameLengthChange, int> fl_changing_bandwidths_bps)
+ : encoder_frame_lengths_ms(encoder_frame_lengths_ms),
+ initial_frame_length_ms(initial_frame_length_ms),
+ min_encoder_bitrate_bps(min_encoder_bitrate_bps),
+ fl_increasing_packet_loss_fraction(fl_increasing_packet_loss_fraction),
+ fl_decreasing_packet_loss_fraction(fl_decreasing_packet_loss_fraction),
+ fl_increase_overhead_offset(fl_increase_overhead_offset),
+ fl_decrease_overhead_offset(fl_decrease_overhead_offset),
+ fl_changing_bandwidths_bps(std::move(fl_changing_bandwidths_bps)) {}
+
+FrameLengthController::Config::Config(const Config& other) = default;
+
+FrameLengthController::Config::~Config() = default;
+
+FrameLengthController::FrameLengthController(const Config& config)
+ : config_(config) {
+ frame_length_ms_ = std::find(config_.encoder_frame_lengths_ms.begin(),
+ config_.encoder_frame_lengths_ms.end(),
+ config_.initial_frame_length_ms);
+ // `encoder_frame_lengths_ms` must contain `initial_frame_length_ms`.
+ RTC_DCHECK(frame_length_ms_ != config_.encoder_frame_lengths_ms.end());
+}
+
+FrameLengthController::~FrameLengthController() = default;
+
+void FrameLengthController::UpdateNetworkMetrics(
+ const NetworkMetrics& network_metrics) {
+ if (network_metrics.uplink_bandwidth_bps)
+ uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+ if (network_metrics.uplink_packet_loss_fraction)
+ uplink_packet_loss_fraction_ = network_metrics.uplink_packet_loss_fraction;
+ if (network_metrics.overhead_bytes_per_packet)
+ overhead_bytes_per_packet_ = network_metrics.overhead_bytes_per_packet;
+}
+
+void FrameLengthController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+ // Decision on `frame_length_ms` should not have been made.
+ RTC_DCHECK(!config->frame_length_ms);
+
+ if (FrameLengthIncreasingDecision(*config)) {
+ prev_decision_increase_ = true;
+ } else if (FrameLengthDecreasingDecision(*config)) {
+ prev_decision_increase_ = false;
+ }
+ config->last_fl_change_increase = prev_decision_increase_;
+ config->frame_length_ms = *frame_length_ms_;
+}
+
+FrameLengthController::Config::FrameLengthChange::FrameLengthChange(
+ int from_frame_length_ms,
+ int to_frame_length_ms)
+ : from_frame_length_ms(from_frame_length_ms),
+ to_frame_length_ms(to_frame_length_ms) {}
+
+bool FrameLengthController::Config::FrameLengthChange::operator<(
+ const FrameLengthChange& rhs) const {
+ return from_frame_length_ms < rhs.from_frame_length_ms ||
+ (from_frame_length_ms == rhs.from_frame_length_ms &&
+ to_frame_length_ms < rhs.to_frame_length_ms);
+}
+
+bool FrameLengthController::FrameLengthIncreasingDecision(
+ const AudioEncoderRuntimeConfig& config) {
+ // Increase frame length if
+ // 1. `uplink_bandwidth_bps` is known to be smaller or equal than
+ // `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
+ // current overhead rate OR all the following:
+ // 2. longer frame length is available AND
+ // 3. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+ // 4. `uplink_packet_loss_fraction` is known to be smaller than a threshold.
+
+ // Find next frame length to which a criterion is defined to shift from
+ // current frame length.
+ auto longer_frame_length_ms = std::next(frame_length_ms_);
+ auto increase_threshold = config_.fl_changing_bandwidths_bps.end();
+ while (longer_frame_length_ms != config_.encoder_frame_lengths_ms.end()) {
+ increase_threshold = config_.fl_changing_bandwidths_bps.find(
+ Config::FrameLengthChange(*frame_length_ms_, *longer_frame_length_ms));
+ if (increase_threshold != config_.fl_changing_bandwidths_bps.end())
+ break;
+ longer_frame_length_ms = std::next(longer_frame_length_ms);
+ }
+
+ if (increase_threshold == config_.fl_changing_bandwidths_bps.end())
+ return false;
+
+ // Check that
+ // -(*overhead_bytes_per_packet_) <= offset <= (*overhead_bytes_per_packet_)
+ RTC_DCHECK(
+ !overhead_bytes_per_packet_ ||
+ (overhead_bytes_per_packet_ &&
+ static_cast<size_t>(std::max(0, -config_.fl_increase_overhead_offset)) <=
+ *overhead_bytes_per_packet_ &&
+ static_cast<size_t>(std::max(0, config_.fl_increase_overhead_offset)) <=
+ *overhead_bytes_per_packet_));
+
+ if (uplink_bandwidth_bps_ && overhead_bytes_per_packet_ &&
+ *uplink_bandwidth_bps_ <=
+ config_.min_encoder_bitrate_bps + kPreventOveruseMarginBps +
+ OverheadRateBps(*overhead_bytes_per_packet_ +
+ config_.fl_increase_overhead_offset,
+ *frame_length_ms_)) {
+ frame_length_ms_ = longer_frame_length_ms;
+ return true;
+ }
+
+ if ((uplink_bandwidth_bps_ &&
+ *uplink_bandwidth_bps_ <= increase_threshold->second) &&
+ (uplink_packet_loss_fraction_ &&
+ *uplink_packet_loss_fraction_ <=
+ config_.fl_increasing_packet_loss_fraction)) {
+ frame_length_ms_ = longer_frame_length_ms;
+ return true;
+ }
+ return false;
+}
+
+bool FrameLengthController::FrameLengthDecreasingDecision(
+ const AudioEncoderRuntimeConfig& config) {
+ // Decrease frame length if
+ // 1. shorter frame length is available AND
+ // 2. `uplink_bandwidth_bps` is known to be bigger than
+ // `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
+ // overhead which would be produced with the shorter frame length AND
+ // one or more of the followings:
+ // 3. `uplink_bandwidth_bps` is known to be larger than a threshold,
+ // 4. `uplink_packet_loss_fraction` is known to be larger than a threshold,
+
+ // Find next frame length to which a criterion is defined to shift from
+ // current frame length.
+ auto shorter_frame_length_ms = frame_length_ms_;
+ auto decrease_threshold = config_.fl_changing_bandwidths_bps.end();
+ while (shorter_frame_length_ms != config_.encoder_frame_lengths_ms.begin()) {
+ shorter_frame_length_ms = std::prev(shorter_frame_length_ms);
+ decrease_threshold = config_.fl_changing_bandwidths_bps.find(
+ Config::FrameLengthChange(*frame_length_ms_, *shorter_frame_length_ms));
+ if (decrease_threshold != config_.fl_changing_bandwidths_bps.end())
+ break;
+ }
+
+ if (decrease_threshold == config_.fl_changing_bandwidths_bps.end())
+ return false;
+
+ if (uplink_bandwidth_bps_ && overhead_bytes_per_packet_ &&
+ *uplink_bandwidth_bps_ <=
+ config_.min_encoder_bitrate_bps + kPreventOveruseMarginBps +
+ OverheadRateBps(*overhead_bytes_per_packet_ +
+ config_.fl_decrease_overhead_offset,
+ *shorter_frame_length_ms)) {
+ return false;
+ }
+
+ if ((uplink_bandwidth_bps_ &&
+ *uplink_bandwidth_bps_ >= decrease_threshold->second) ||
+ (uplink_packet_loss_fraction_ &&
+ *uplink_packet_loss_fraction_ >=
+ config_.fl_decreasing_packet_loss_fraction)) {
+ frame_length_ms_ = shorter_frame_length_ms;
+ return true;
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.h
new file mode 100644
index 0000000000..04693f8db7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <set>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+// Determines target frame length based on the network metrics and the decision
+// of FEC controller.
+class FrameLengthController final : public Controller {
+ public:
+ struct Config {
+ struct FrameLengthChange {
+ FrameLengthChange(int from_frame_length_ms, int to_frame_length_ms);
+ bool operator<(const FrameLengthChange& rhs) const;
+ int from_frame_length_ms;
+ int to_frame_length_ms;
+ };
+ Config(const std::set<int>& encoder_frame_lengths_ms,
+ int initial_frame_length_ms,
+ int min_encoder_bitrate_bps,
+ float fl_increasing_packet_loss_fraction,
+ float fl_decreasing_packet_loss_fraction,
+ int fl_increase_overhead_offset,
+ int fl_decrease_overhead_offset,
+ std::map<FrameLengthChange, int> fl_changing_bandwidths_bps);
+ Config(const Config& other);
+ ~Config();
+ std::set<int> encoder_frame_lengths_ms;
+ int initial_frame_length_ms;
+ int min_encoder_bitrate_bps;
+ // Uplink packet loss fraction below which frame length can increase.
+ float fl_increasing_packet_loss_fraction;
+ // Uplink packet loss fraction below which frame length should decrease.
+ float fl_decreasing_packet_loss_fraction;
+ // Offset to apply to overhead calculation when increasing frame length.
+ int fl_increase_overhead_offset;
+ // Offset to apply to overhead calculation when decreasing frame length.
+ int fl_decrease_overhead_offset;
+ std::map<FrameLengthChange, int> fl_changing_bandwidths_bps;
+ };
+
+ explicit FrameLengthController(const Config& config);
+
+ ~FrameLengthController() override;
+
+ FrameLengthController(const FrameLengthController&) = delete;
+ FrameLengthController& operator=(const FrameLengthController&) = delete;
+
+ void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+ void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+ bool FrameLengthIncreasingDecision(const AudioEncoderRuntimeConfig& config);
+
+ bool FrameLengthDecreasingDecision(const AudioEncoderRuntimeConfig& config);
+
+ const Config config_;
+
+ std::set<int>::const_iterator frame_length_ms_;
+
+ absl::optional<int> uplink_bandwidth_bps_;
+
+ absl::optional<float> uplink_packet_loss_fraction_;
+
+ absl::optional<size_t> overhead_bytes_per_packet_;
+
+ // True if the previous frame length decision was an increase, otherwise
+ // false.
+ bool prev_decision_increase_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
new file mode 100644
index 0000000000..23123934dc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller.h"
+
+#include <memory>
+#include <utility>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kFlIncreasingPacketLossFraction = 0.04f;
+constexpr float kFlDecreasingPacketLossFraction = 0.05f;
+constexpr int kFlIncreaseOverheadOffset = 0;
+constexpr int kFlDecreaseOverheadOffset = 0;
+constexpr int kMinEncoderBitrateBps = 6000;
+constexpr int kPreventOveruseMarginBps = 5000;
+constexpr size_t kOverheadBytesPerPacket = 20;
+constexpr int kFl20msTo60msBandwidthBps = 40000;
+constexpr int kFl60msTo20msBandwidthBps = 50000;
+constexpr int kFl60msTo120msBandwidthBps = 30000;
+constexpr int kFl120msTo60msBandwidthBps = 40000;
+constexpr int kFl20msTo40msBandwidthBps = 45000;
+constexpr int kFl40msTo20msBandwidthBps = 50000;
+constexpr int kFl40msTo60msBandwidthBps = 40000;
+constexpr int kFl60msTo40msBandwidthBps = 45000;
+
+constexpr int kMediumBandwidthBps =
+ (kFl40msTo20msBandwidthBps + kFl20msTo40msBandwidthBps) / 2;
+constexpr float kMediumPacketLossFraction =
+ (kFlDecreasingPacketLossFraction + kFlIncreasingPacketLossFraction) / 2;
+const std::set<int> kDefaultEncoderFrameLengthsMs = {20, 40, 60, 120};
+
+int VeryLowBitrate(int frame_length_ms) {
+ return kMinEncoderBitrateBps + kPreventOveruseMarginBps +
+ (kOverheadBytesPerPacket * 8 * 1000 / frame_length_ms);
+}
+
+std::unique_ptr<FrameLengthController> CreateController(
+ const std::map<FrameLengthController::Config::FrameLengthChange, int>&
+ frame_length_change_criteria,
+ const std::set<int>& encoder_frame_lengths_ms,
+ int initial_frame_length_ms) {
+ std::unique_ptr<FrameLengthController> controller(
+ new FrameLengthController(FrameLengthController::Config(
+ encoder_frame_lengths_ms, initial_frame_length_ms,
+ kMinEncoderBitrateBps, kFlIncreasingPacketLossFraction,
+ kFlDecreasingPacketLossFraction, kFlIncreaseOverheadOffset,
+ kFlDecreaseOverheadOffset, frame_length_change_criteria)));
+
+ return controller;
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor20msAnd60ms() {
+ return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+ {FrameLengthController::Config::FrameLengthChange(20, 60),
+ kFl20msTo60msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 20),
+ kFl60msTo20msBandwidthBps}};
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor20msAnd40ms() {
+ return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+ {FrameLengthController::Config::FrameLengthChange(20, 40),
+ kFl20msTo40msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(40, 20),
+ kFl40msTo20msBandwidthBps}};
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor20ms60msAnd120ms() {
+ return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+ {FrameLengthController::Config::FrameLengthChange(20, 60),
+ kFl20msTo60msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 20),
+ kFl60msTo20msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 120),
+ kFl60msTo120msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(120, 60),
+ kFl120msTo60msBandwidthBps}};
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor20ms40ms60msAnd120ms() {
+ return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+ {FrameLengthController::Config::FrameLengthChange(20, 60),
+ kFl20msTo60msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 20),
+ kFl60msTo20msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(20, 40),
+ kFl20msTo40msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(40, 20),
+ kFl40msTo20msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(40, 60),
+ kFl40msTo60msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 40),
+ kFl60msTo40msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 120),
+ kFl60msTo120msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(120, 60),
+ kFl120msTo60msBandwidthBps}};
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor40msAnd60ms() {
+ return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+ {FrameLengthController::Config::FrameLengthChange(40, 60),
+ kFl40msTo60msBandwidthBps},
+ {FrameLengthController::Config::FrameLengthChange(60, 40),
+ kFl60msTo40msBandwidthBps}};
+}
+
+void UpdateNetworkMetrics(
+ FrameLengthController* controller,
+ const absl::optional<int>& uplink_bandwidth_bps,
+ const absl::optional<float>& uplink_packet_loss_fraction,
+ const absl::optional<size_t>& overhead_bytes_per_packet) {
+ // UpdateNetworkMetrics can accept multiple network metric updates at once.
+ // However, currently, the most used case is to update one metric at a time.
+ // To reflect this fact, we separate the calls.
+ if (uplink_bandwidth_bps) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+ if (uplink_packet_loss_fraction) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+ if (overhead_bytes_per_packet) {
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+ controller->UpdateNetworkMetrics(network_metrics);
+ }
+}
+
+void CheckDecision(FrameLengthController* controller,
+ int expected_frame_length_ms) {
+ AudioEncoderRuntimeConfig config;
+ controller->MakeDecision(&config);
+ EXPECT_EQ(expected_frame_length_ms, config.frame_length_ms);
+}
+
+} // namespace
+
+TEST(FrameLengthControllerTest, DecreaseTo20MsOnHighUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 60);
+ UpdateNetworkMetrics(controller.get(), kFl60msTo20msBandwidthBps,
+ absl::nullopt, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, DecreaseTo20MsOnHighUplinkPacketLossFraction) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 60);
+ UpdateNetworkMetrics(controller.get(), absl::nullopt,
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest,
+ Maintain60MsIf20MsNotInReceiverFrameLengthRange) {
+ auto controller =
+ CreateController(CreateChangeCriteriaFor20msAnd60ms(), {60}, 60);
+ // Set FEC on that would cause frame length to decrease if receiver frame
+ // length range included 20ms.
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, IncreaseTo40MsOnMultipleConditions) {
+ // Increase to 40ms frame length if
+ // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+ // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold
+ // AND
+ // 3. FEC is not decided or OFF.
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd40ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ UpdateNetworkMetrics(controller.get(), kFl20msTo40msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 40);
+}
+
+TEST(FrameLengthControllerTest, DecreaseTo40MsOnHighUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor40msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 40);
+ UpdateNetworkMetrics(controller.get(), kFl60msTo40msBandwidthBps,
+ absl::nullopt, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 40);
+}
+
+TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) {
+ // Maintain 60ms frame length if
+ // 1. `uplink_bandwidth_bps` is at medium level,
+ // 2. `uplink_packet_loss_fraction` is at medium,
+ // 3. FEC is not decided ON.
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 60);
+ UpdateNetworkMetrics(controller.get(), kMediumBandwidthBps,
+ kMediumPacketLossFraction, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, IncreaseTo60MsOnMultipleConditions) {
+ // Increase to 60ms frame length if
+ // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+ // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold
+ // AND
+ // 3. FEC is not decided or OFF.
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ UpdateNetworkMetrics(controller.get(), kFl20msTo60msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, IncreaseTo60MsOnVeryLowUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ // We set packet loss fraction to kFlDecreasingPacketLossFraction, which
+ // should have prevented frame length to increase, if the uplink bandwidth
+ // was not this low.
+ UpdateNetworkMetrics(controller.get(), VeryLowBitrate(20),
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, Maintain60MsOnVeryLowUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 60);
+ // We set packet loss fraction to FlDecreasingPacketLossFraction, which should
+ // have caused the frame length to decrease, if the uplink bandwidth was not
+ // this low.
+ UpdateNetworkMetrics(controller.get(), VeryLowBitrate(20),
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, UpdateMultipleNetworkMetricsAtOnce) {
+ // This test is similar to IncreaseTo60MsOnMultipleConditions. But instead of
+ // using ::UpdateNetworkMetrics(...), which calls
+ // FrameLengthController::UpdateNetworkMetrics(...) multiple times, we
+ // we call it only once. This is to verify that
+ // FrameLengthController::UpdateNetworkMetrics(...) can handle multiple
+ // network updates at once. This is, however, not a common use case in current
+ // audio_network_adaptor_impl.cc.
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ Controller::NetworkMetrics network_metrics;
+ network_metrics.uplink_bandwidth_bps = kFl20msTo60msBandwidthBps;
+ network_metrics.uplink_packet_loss_fraction = kFlIncreasingPacketLossFraction;
+ controller->UpdateNetworkMetrics(network_metrics);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest,
+ Maintain20MsIf60MsNotInReceiverFrameLengthRange) {
+ auto controller =
+ CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20}, 20);
+ // Use a low uplink bandwidth and a low uplink packet loss fraction that would
+ // cause frame length to increase if receiver frame length included 60ms.
+ UpdateNetworkMetrics(controller.get(), kFl20msTo60msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain20MsOnMediumUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ UpdateNetworkMetrics(controller.get(), kMediumBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain20MsOnMediumUplinkPacketLossFraction) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ // Use a low uplink bandwidth that would cause frame length to increase if
+ // uplink packet loss fraction was low.
+ UpdateNetworkMetrics(controller.get(), kFl20msTo60msBandwidthBps,
+ kMediumPacketLossFraction, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain60MsWhenNo120msCriteriaIsSet) {
+ auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
+ kDefaultEncoderFrameLengthsMs, 60);
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, From120MsTo20MsOnHighUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+ kDefaultEncoderFrameLengthsMs, 120);
+ // It takes two steps for frame length to go from 120ms to 20ms.
+ UpdateNetworkMetrics(controller.get(), kFl60msTo20msBandwidthBps,
+ absl::nullopt, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+
+ UpdateNetworkMetrics(controller.get(), kFl60msTo20msBandwidthBps,
+ absl::nullopt, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, From120MsTo20MsOnHighUplinkPacketLossFraction) {
+ auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+ kDefaultEncoderFrameLengthsMs, 120);
+ // It takes two steps for frame length to go from 120ms to 20ms.
+ UpdateNetworkMetrics(controller.get(), absl::nullopt,
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+
+ UpdateNetworkMetrics(controller.get(), absl::nullopt,
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain120MsOnVeryLowUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+ kDefaultEncoderFrameLengthsMs, 120);
+ // We set packet loss fraction to FlDecreasingPacketLossFraction, which should
+ // have caused the frame length to decrease, if the uplink bandwidth was not
+ // this low.
+ UpdateNetworkMetrics(controller.get(), VeryLowBitrate(60),
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 120);
+}
+
+TEST(FrameLengthControllerTest, From60MsTo120MsOnVeryLowUplinkBandwidth) {
+ auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+ kDefaultEncoderFrameLengthsMs, 60);
+ // We set packet loss fraction to FlDecreasingPacketLossFraction, which should
+ // have prevented frame length to increase, if the uplink bandwidth was not
+ // this low.
+ UpdateNetworkMetrics(controller.get(), VeryLowBitrate(60),
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 120);
+}
+
+TEST(FrameLengthControllerTest, From20MsTo120MsOnMultipleConditions) {
+ // Increase to 120ms frame length if
+ // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+ // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold.
+ auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ // It takes two steps for frame length to go from 20ms to 120ms.
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 120);
+}
+
+TEST(FrameLengthControllerTest, Stall60MsIf120MsNotInReceiverFrameLengthRange) {
+ auto controller =
+ CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(), {20, 60}, 20);
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, CheckBehaviorOnChangingNetworkMetrics) {
+ auto controller =
+ CreateController(CreateChangeCriteriaFor20ms40ms60msAnd120ms(),
+ kDefaultEncoderFrameLengthsMs, 20);
+ UpdateNetworkMetrics(controller.get(), kMediumBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+
+ UpdateNetworkMetrics(controller.get(), kFl20msTo40msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 40);
+
+ UpdateNetworkMetrics(controller.get(), kFl60msTo40msBandwidthBps,
+ kMediumPacketLossFraction, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 40);
+
+ UpdateNetworkMetrics(controller.get(), kFl20msTo60msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kMediumPacketLossFraction, kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+
+ UpdateNetworkMetrics(controller.get(), kFl60msTo120msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 120);
+
+ UpdateNetworkMetrics(controller.get(), kFl120msTo60msBandwidthBps,
+ kFlIncreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 60);
+
+ UpdateNetworkMetrics(controller.get(), kFl60msTo40msBandwidthBps,
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 40);
+
+ UpdateNetworkMetrics(controller.get(), kMediumBandwidthBps,
+ kFlDecreasingPacketLossFraction,
+ kOverheadBytesPerPacket);
+ CheckDecision(controller.get(), 20);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.cc
new file mode 100644
index 0000000000..36fc10ba82
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h"
+
+#include <algorithm>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+int OverheadBps(int overhead_bytes_per_packet, int frame_length_ms) {
+ return overhead_bytes_per_packet * 8 * 1000 / frame_length_ms;
+}
+
+} // namespace
+
+FrameLengthControllerV2::FrameLengthControllerV2(
+ rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int min_payload_bitrate_bps,
+ bool use_slow_adaptation)
+ : encoder_frame_lengths_ms_(encoder_frame_lengths_ms.begin(),
+ encoder_frame_lengths_ms.end()),
+ min_payload_bitrate_bps_(min_payload_bitrate_bps),
+ use_slow_adaptation_(use_slow_adaptation) {
+ RTC_CHECK(!encoder_frame_lengths_ms_.empty());
+ absl::c_sort(encoder_frame_lengths_ms_);
+}
+
+void FrameLengthControllerV2::UpdateNetworkMetrics(
+ const NetworkMetrics& network_metrics) {
+ if (network_metrics.target_audio_bitrate_bps) {
+ target_bitrate_bps_ = network_metrics.target_audio_bitrate_bps;
+ }
+ if (network_metrics.overhead_bytes_per_packet) {
+ overhead_bytes_per_packet_ = network_metrics.overhead_bytes_per_packet;
+ }
+ if (network_metrics.uplink_bandwidth_bps) {
+ uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+ }
+}
+
+void FrameLengthControllerV2::MakeDecision(AudioEncoderRuntimeConfig* config) {
+ if (!target_bitrate_bps_ || !overhead_bytes_per_packet_ ||
+ !uplink_bandwidth_bps_) {
+ return;
+ }
+
+ auto it =
+ absl::c_find_if(encoder_frame_lengths_ms_, [&](int frame_length_ms) {
+ int target = use_slow_adaptation_ ? *uplink_bandwidth_bps_
+ : *target_bitrate_bps_;
+ return target -
+ OverheadBps(*overhead_bytes_per_packet_, frame_length_ms) >
+ min_payload_bitrate_bps_;
+ });
+
+ // Longest frame length is chosen if none match our criteria.
+ config->frame_length_ms = it != encoder_frame_lengths_ms_.end()
+ ? *it
+ : encoder_frame_lengths_ms_.back();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h
new file mode 100644
index 0000000000..d7102b0b44
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_V2_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_V2_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+
+namespace webrtc {
+
+class FrameLengthControllerV2 final : public Controller {
+ public:
+ FrameLengthControllerV2(rtc::ArrayView<const int> encoder_frame_lengths_ms,
+ int min_payload_bitrate_bps,
+ bool use_slow_adaptation);
+
+ void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+ void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+ std::vector<int> encoder_frame_lengths_ms_;
+ const int min_payload_bitrate_bps_;
+ const bool use_slow_adaptation_;
+
+ absl::optional<int> uplink_bandwidth_bps_;
+ absl::optional<int> target_bitrate_bps_;
+ absl::optional<int> overhead_bytes_per_packet_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_V2_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2_unittest.cc
new file mode 100644
index 0000000000..1c88f47c58
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2_unittest.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kANASupportedFrameLengths[] = {20, 40, 60, 120};
+constexpr int kMinPayloadBitrateBps = 16000;
+
+} // namespace
+
+class FrameLengthControllerV2Test : public testing::Test {
+ protected:
+ AudioEncoderRuntimeConfig GetDecision() {
+ AudioEncoderRuntimeConfig config;
+ controller_->MakeDecision(&config);
+ return config;
+ }
+
+ void SetOverhead(int overhead_bytes_per_packet) {
+ overhead_bytes_per_packet_ = overhead_bytes_per_packet;
+ Controller::NetworkMetrics metrics;
+ metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+ controller_->UpdateNetworkMetrics(metrics);
+ }
+
+ void SetTargetBitrate(int target_audio_bitrate_bps) {
+ target_audio_bitrate_bps_ = target_audio_bitrate_bps;
+ Controller::NetworkMetrics metrics;
+ metrics.target_audio_bitrate_bps = target_audio_bitrate_bps;
+ controller_->UpdateNetworkMetrics(metrics);
+ }
+
+ void SetUplinkBandwidth(int uplink_bandwidth_bps) {
+ Controller::NetworkMetrics metrics;
+ metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+ controller_->UpdateNetworkMetrics(metrics);
+ }
+
+ void ExpectFrameLengthDecision(int expected_frame_length_ms) {
+ auto config = GetDecision();
+ EXPECT_EQ(*config.frame_length_ms, expected_frame_length_ms);
+ }
+
+ std::unique_ptr<FrameLengthControllerV2> controller_ =
+ std::make_unique<FrameLengthControllerV2>(kANASupportedFrameLengths,
+ kMinPayloadBitrateBps,
+ /*use_slow_adaptation=*/false);
+ absl::optional<int> target_audio_bitrate_bps_;
+ absl::optional<int> overhead_bytes_per_packet_;
+};
+
+// Don't return any decision if we haven't received all required network
+// metrics.
+TEST_F(FrameLengthControllerV2Test, RequireNetworkMetrics) {
+ auto config = GetDecision();
+ EXPECT_FALSE(config.bitrate_bps);
+ EXPECT_FALSE(config.frame_length_ms);
+
+ SetOverhead(30);
+ config = GetDecision();
+ EXPECT_FALSE(config.frame_length_ms);
+
+ SetTargetBitrate(32000);
+ config = GetDecision();
+ EXPECT_FALSE(config.frame_length_ms);
+
+ SetUplinkBandwidth(32000);
+ config = GetDecision();
+ EXPECT_TRUE(config.frame_length_ms);
+}
+
+TEST_F(FrameLengthControllerV2Test, UseFastAdaptation) {
+ SetOverhead(50);
+ SetTargetBitrate(50000);
+ SetUplinkBandwidth(50000);
+ ExpectFrameLengthDecision(20);
+
+ SetTargetBitrate(20000);
+ ExpectFrameLengthDecision(120);
+
+ SetTargetBitrate(30000);
+ ExpectFrameLengthDecision(40);
+
+ SetTargetBitrate(25000);
+ ExpectFrameLengthDecision(60);
+}
+
+TEST_F(FrameLengthControllerV2Test, UseSlowAdaptation) {
+ controller_ = std::make_unique<FrameLengthControllerV2>(
+ kANASupportedFrameLengths, kMinPayloadBitrateBps,
+ /*use_slow_adaptation=*/true);
+ SetOverhead(50);
+ SetTargetBitrate(50000);
+ SetUplinkBandwidth(20000);
+ ExpectFrameLengthDecision(120);
+
+ SetUplinkBandwidth(30000);
+ ExpectFrameLengthDecision(40);
+
+ SetUplinkBandwidth(40000);
+ ExpectFrameLengthDecision(20);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h
new file mode 100644
index 0000000000..346ed5db1a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_H_
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+// An AudioNetworkAdaptor optimizes the audio experience by suggesting a
+// suitable runtime configuration (bit rate, frame length, FEC, etc.) to the
+// encoder based on network metrics.
+class AudioNetworkAdaptor {
+ public:
+ virtual ~AudioNetworkAdaptor() = default;
+
+ virtual void SetUplinkBandwidth(int uplink_bandwidth_bps) = 0;
+
+ virtual void SetUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) = 0;
+
+ virtual void SetRtt(int rtt_ms) = 0;
+
+ virtual void SetTargetAudioBitrate(int target_audio_bitrate_bps) = 0;
+
+ virtual void SetOverhead(size_t overhead_bytes_per_packet) = 0;
+
+ virtual AudioEncoderRuntimeConfig GetEncoderRuntimeConfig() = 0;
+
+ virtual void StartDebugDump(FILE* file_handle) = 0;
+
+ virtual void StopDebugDump() = 0;
+
+ virtual ANAStats GetStats() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
new file mode 100644
index 0000000000..bd16292f7e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_CONFIG_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_CONFIG_H_
+
+#include <stddef.h>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+struct AudioEncoderRuntimeConfig {
+ AudioEncoderRuntimeConfig();
+ AudioEncoderRuntimeConfig(const AudioEncoderRuntimeConfig& other);
+ ~AudioEncoderRuntimeConfig();
+ AudioEncoderRuntimeConfig& operator=(const AudioEncoderRuntimeConfig& other);
+ bool operator==(const AudioEncoderRuntimeConfig& other) const;
+ absl::optional<int> bitrate_bps;
+ absl::optional<int> frame_length_ms;
+ // Note: This is what we tell the encoder. It doesn't have to reflect
+ // the actual NetworkMetrics; it's subject to our decision.
+ absl::optional<float> uplink_packet_loss_fraction;
+ absl::optional<bool> enable_fec;
+ absl::optional<bool> enable_dtx;
+
+ // Some encoders can encode fewer channels than the actual input to make
+ // better use of the bandwidth. `num_channels` sets the number of channels
+ // to encode.
+ absl::optional<size_t> num_channels;
+
+ // This is true if the last frame length change was an increase, and otherwise
+ // false.
+ // The value of this boolean is used to apply a different offset to the
+ // per-packet overhead that is reported by the BWE. The exact offset value
+ // is most important right after a frame length change, because the frame
+ // length change affects the overhead. In the steady state, the exact value is
+ // not important because the BWE will compensate.
+ bool last_fl_change_increase = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h
new file mode 100644
index 0000000000..26a9061745
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_AUDIO_NETWORK_ADAPTOR_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_AUDIO_NETWORK_ADAPTOR_H_
+
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockAudioNetworkAdaptor : public AudioNetworkAdaptor {
+ public:
+ ~MockAudioNetworkAdaptor() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+
+ MOCK_METHOD(void, SetUplinkBandwidth, (int uplink_bandwidth_bps), (override));
+
+ MOCK_METHOD(void,
+ SetUplinkPacketLossFraction,
+ (float uplink_packet_loss_fraction),
+ (override));
+
+ MOCK_METHOD(void, SetRtt, (int rtt_ms), (override));
+
+ MOCK_METHOD(void,
+ SetTargetAudioBitrate,
+ (int target_audio_bitrate_bps),
+ (override));
+
+ MOCK_METHOD(void,
+ SetOverhead,
+ (size_t overhead_bytes_per_packet),
+ (override));
+
+ MOCK_METHOD(AudioEncoderRuntimeConfig,
+ GetEncoderRuntimeConfig,
+ (),
+ (override));
+
+ MOCK_METHOD(void, StartDebugDump, (FILE * file_handle), (override));
+
+ MOCK_METHOD(void, StopDebugDump, (), (override));
+
+ MOCK_METHOD(ANAStats, GetStats, (), (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_AUDIO_NETWORK_ADAPTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h
new file mode 100644
index 0000000000..de554c0517
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockController : public Controller {
+ public:
+ ~MockController() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(void,
+ UpdateNetworkMetrics,
+ (const NetworkMetrics& network_metrics),
+ (override));
+ MOCK_METHOD(void,
+ MakeDecision,
+ (AudioEncoderRuntimeConfig * config),
+ (override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h
new file mode 100644
index 0000000000..9e2fa466fc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_MANAGER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_MANAGER_H_
+
+#include <vector>
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockControllerManager : public ControllerManager {
+ public:
+ ~MockControllerManager() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(std::vector<Controller*>,
+ GetSortedControllers,
+ (const Controller::NetworkMetrics& metrics),
+ (override));
+ MOCK_METHOD(std::vector<Controller*>, GetControllers, (), (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_MANAGER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h
new file mode 100644
index 0000000000..0c6a9efe1d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_DEBUG_DUMP_WRITER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_DEBUG_DUMP_WRITER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDebugDumpWriter : public DebugDumpWriter {
+ public:
+ ~MockDebugDumpWriter() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+
+ MOCK_METHOD(void,
+ DumpEncoderRuntimeConfig,
+ (const AudioEncoderRuntimeConfig& config, int64_t timestamp),
+ (override));
+ MOCK_METHOD(void,
+ DumpNetworkMetrics,
+ (const Controller::NetworkMetrics& metrics, int64_t timestamp),
+ (override));
+#if WEBRTC_ENABLE_PROTOBUF
+ MOCK_METHOD(void,
+ DumpControllerManagerConfig,
+ (const audio_network_adaptor::config::ControllerManager&
+ controller_manager_config,
+ int64_t timestamp),
+ (override));
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_DEBUG_DUMP_WRITER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py
new file mode 100755
index 0000000000..9c07c18c84
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python2
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# To run this script please copy "out/<build_name>//pyproto/webrtc/modules/
+# audio_coding/audio_network_adaptor/debug_dump_pb2.py" to this folder.
+# The you can run this script with:
+# "python parse_ana_dump.py -m uplink_bandwidth_bps -f dump_file.dat"
+# You can add as may metrics or decisions to the plot as you like.
+# form more information call:
+# "python parse_ana_dump.py --help"
+
+import struct
+from optparse import OptionParser
+
+import matplotlib.pyplot as plt
+
+import debug_dump_pb2
+
+
+def GetNextMessageSize(file_to_parse):
+ data = file_to_parse.read(4)
+ if data == '':
+ return 0
+ return struct.unpack('<I', data)[0]
+
+
+def GetNextMessageFromFile(file_to_parse):
+ message_size = GetNextMessageSize(file_to_parse)
+ if message_size == 0:
+ return None
+ try:
+ event = debug_dump_pb2.Event()
+ event.ParseFromString(file_to_parse.read(message_size))
+ except IOError:
+ print 'Invalid message in file'
+ return None
+ return event
+
+
+def InitMetrics():
+ metrics = {}
+ event = debug_dump_pb2.Event()
+ for metric in event.network_metrics.DESCRIPTOR.fields:
+ metrics[metric.name] = {'time': [], 'value': []}
+ return metrics
+
+
+def InitDecisions():
+ decisions = {}
+ event = debug_dump_pb2.Event()
+ for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
+ decisions[decision.name] = {'time': [], 'value': []}
+ return decisions
+
+
+def ParseAnaDump(dump_file_to_parse):
+ with open(dump_file_to_parse, 'rb') as file_to_parse:
+ metrics = InitMetrics()
+ decisions = InitDecisions()
+ first_time_stamp = None
+ while True:
+ event = GetNextMessageFromFile(file_to_parse)
+ if event is None:
+ break
+ if first_time_stamp is None:
+ first_time_stamp = event.timestamp
+ if event.type == debug_dump_pb2.Event.ENCODER_RUNTIME_CONFIG:
+ for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
+ if event.encoder_runtime_config.HasField(decision.name):
+ decisions[decision.name]['time'].append(
+ event.timestamp - first_time_stamp)
+ decisions[decision.name]['value'].append(
+ getattr(event.encoder_runtime_config,
+ decision.name))
+ if event.type == debug_dump_pb2.Event.NETWORK_METRICS:
+ for metric in event.network_metrics.DESCRIPTOR.fields:
+ if event.network_metrics.HasField(metric.name):
+ metrics[metric.name]['time'].append(event.timestamp -
+ first_time_stamp)
+ metrics[metric.name]['value'].append(
+ getattr(event.network_metrics, metric.name))
+ return (metrics, decisions)
+
+
+def main():
+ parser = OptionParser()
+ parser.add_option("-f",
+ "--dump_file",
+ dest="dump_file_to_parse",
+ help="dump file to parse")
+ parser.add_option('-m',
+ '--metric_plot',
+ default=[],
+ type=str,
+ help='metric key (name of the metric) to plot',
+ dest='metric_keys',
+ action='append')
+
+ parser.add_option('-d',
+ '--decision_plot',
+ default=[],
+ type=str,
+ help='decision key (name of the decision) to plot',
+ dest='decision_keys',
+ action='append')
+
+ options = parser.parse_args()[0]
+ if options.dump_file_to_parse is None:
+ print "No dump file to parse is set.\n"
+ parser.print_help()
+ exit()
+ (metrics, decisions) = ParseAnaDump(options.dump_file_to_parse)
+ metric_keys = options.metric_keys
+ decision_keys = options.decision_keys
+ plot_count = len(metric_keys) + len(decision_keys)
+ if plot_count == 0:
+ print "You have to set at least one metric or decision to plot.\n"
+ parser.print_help()
+ exit()
+ plots = []
+ if plot_count == 1:
+ f, mp_plot = plt.subplots()
+ plots.append(mp_plot)
+ else:
+ f, mp_plots = plt.subplots(plot_count, sharex=True)
+ plots.extend(mp_plots.tolist())
+
+ for key in metric_keys:
+ plot = plots.pop()
+ plot.grid(True)
+ plot.set_title(key + " (metric)")
+ plot.plot(metrics[key]['time'], metrics[key]['value'])
+ for key in decision_keys:
+ plot = plots.pop()
+ plot.grid(True)
+ plot.set_title(key + " (decision)")
+ plot.plot(decisions[key]['time'], decisions[key]['value'])
+ f.subplots_adjust(hspace=0.3)
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h
new file mode 100644
index 0000000000..0375386e39
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_UTIL_THRESHOLD_CURVE_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_UTIL_THRESHOLD_CURVE_H_
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class ThresholdCurve {
+ public:
+ struct Point {
+ constexpr Point(float x, float y) : x(x), y(y) {}
+ float x;
+ float y;
+ };
+
+ // ThresholdCurve defines a curve. The curve is characterized by the two
+ // conjunction points: A and B. The curve segments the metric space into
+ // three domains - above the curve, on it and below it.
+ //
+ // y-axis ^ |
+ // | A|
+ // | \ A: (a.x, a.y)
+ // | \ B: (b.x, b.y)
+ // | B\________
+ // |---------------> bandwidth
+ //
+ // If either a.x == b.x or a.y == b.y, the curve can be defined
+ // by a single point. (We merge the two points into one - either the lower or
+ // the leftmost one - for easier treatment.)
+ //
+ // y-axis ^ |
+ // | |
+ // | |
+ // | |
+ // | P|__________
+ // |---------------> bandwidth
+ ThresholdCurve(const Point& left, const Point& right)
+ : a(GetPoint(left, right, true)),
+ b(GetPoint(left, right, false)),
+ slope(b.x - a.x == 0.0f ? 0.0f : (b.y - a.y) / (b.x - a.x)),
+ offset(a.y - slope * a.x) {
+ // TODO(eladalon): We might want to introduce some numerical validations.
+ }
+
+ ThresholdCurve(float a_x, float a_y, float b_x, float b_y)
+ : ThresholdCurve(Point{a_x, a_y}, Point{b_x, b_y}) {}
+
+ // Checks if a point is strictly below the curve.
+ bool IsBelowCurve(const Point& p) const {
+ if (p.x < a.x) {
+ return true;
+ } else if (p.x == a.x) {
+ // In principle, we could merge this into the next else, but to avoid
+ // numerical errors, we treat it separately.
+ return p.y < a.y;
+ } else if (a.x < p.x && p.x < b.x) {
+ return p.y < offset + slope * p.x;
+ } else { // if (b.x <= p.x)
+ return p.y < b.y;
+ }
+ }
+
+ // Checks if a point is strictly above the curve.
+ bool IsAboveCurve(const Point& p) const {
+ if (p.x <= a.x) {
+ return false;
+ } else if (a.x < p.x && p.x < b.x) {
+ return p.y > offset + slope * p.x;
+ } else { // if (b.x <= p.x)
+ return p.y > b.y;
+ }
+ }
+
+ bool operator<=(const ThresholdCurve& rhs) const {
+ // This curve is <= the rhs curve if no point from this curve is
+ // above a corresponding point from the rhs curve.
+ return !IsBelowCurve(rhs.a) && !IsBelowCurve(rhs.b) &&
+ !rhs.IsAboveCurve(a) && !rhs.IsAboveCurve(b);
+ }
+
+ private:
+ static const Point& GetPoint(const Point& left,
+ const Point& right,
+ bool is_for_left) {
+ RTC_DCHECK_LE(left.x, right.x);
+ RTC_DCHECK_GE(left.y, right.y);
+
+ // Same X-value or Y-value triggers merging both points to the
+ // lower and/or left of the two points, respectively.
+ if (left.x == right.x) {
+ return right;
+ } else if (left.y == right.y) {
+ return left;
+ }
+
+ // If unmerged, boolean flag determines which of the points is desired.
+ return is_for_left ? left : right;
+ }
+
+ const Point a;
+ const Point b;
+ const float slope;
+ const float offset;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_UTIL_THRESHOLD_CURVE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc
new file mode 100644
index 0000000000..dc3aec0b18
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+// A threshold curve divides 2D space into three domains - below, on and above
+// the threshold curve.
+// The curve is defined by two points. Those points, P1 and P2, are ordered so
+// that (P1.x <= P2.x && P1.y >= P2.y).
+// The part of the curve which is between the two points is hereon referred
+// to as the "segment".
+// A "ray" extends from P1 directly upwards into infinity; that's the "vertical
+// ray". Likewise, a "horizontal ray" extends from P2 directly rightwards.
+//
+// ^ | //
+// | | vertical ray //
+// | | //
+// | | //
+// | P1| //
+// | \ //
+// | \ segment //
+// | \ //
+// | \ horizontal ray //
+// | P2 ------------------ //
+// *---------------------------> //
+
+namespace webrtc {
+
+namespace {
+enum RelativePosition { kBelow, kOn, kAbove };
+
+void CheckRelativePosition(const ThresholdCurve& curve,
+ ThresholdCurve::Point point,
+ RelativePosition pos) {
+ RTC_CHECK(pos == kBelow || pos == kOn || pos == kAbove);
+
+ EXPECT_EQ(pos == kBelow, curve.IsBelowCurve(point));
+ EXPECT_EQ(pos == kAbove, curve.IsAboveCurve(point));
+}
+} // namespace
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is a "normal" one - P1 and P2 are different in both their
+// X and Y values.
+TEST(ThresholdCurveTest, PointPositionToCommonCurve) {
+ // The points (P1-P2) define the curve. //
+ // All other points are above/below/on the curve. //
+ // //
+ // ^ //
+ // | | //
+ // | A F J R V //
+ // | | //
+ // | B P1 K S W //
+ // | \ //
+ // | \ //
+ // | \ L //
+ // | \ //
+ // | C G M T X //
+ // | \ //
+ // | N \ //
+ // | \ //
+ // | D H O P2--Y---------------- //
+ // | E I Q U Z //
+ // *----------------------------------> //
+ constexpr ThresholdCurve::Point p1{1000, 2000};
+ constexpr ThresholdCurve::Point p2{2000, 1000};
+
+ RTC_CHECK_GT((p1.x + p2.x) / 2, p1.x);
+ RTC_CHECK_LT((p1.x + p2.x) / 2, p2.x);
+ RTC_CHECK_LT((p1.y + p2.y) / 2, p1.y);
+ RTC_CHECK_GT((p1.y + p2.y) / 2, p2.y);
+
+ const ThresholdCurve curve(p1, p2);
+
+ {
+ // All cases where the point lies to the left of P1.
+ constexpr float x = p1.x - 1;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kBelow); // A
+ CheckRelativePosition(curve, {x, p1.y + 0}, kBelow); // B
+ CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kBelow); // C
+ CheckRelativePosition(curve, {x, p2.y + 0}, kBelow); // D
+ CheckRelativePosition(curve, {x, p2.y - 1}, kBelow); // E
+ }
+
+ {
+ // All cases where the point has the same x-value as P1.
+ constexpr float x = p1.x;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kOn); // F
+ CheckRelativePosition(curve, {x, p1.y + 0}, kOn); // P1
+ CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kBelow); // G
+ CheckRelativePosition(curve, {x, p2.y + 0}, kBelow); // H
+ CheckRelativePosition(curve, {x, p2.y - 1}, kBelow); // I
+ }
+
+ {
+ // To make sure we're really covering all of the cases, make sure that P1
+ // and P2 were chosen so that L would really be below K, and O would really
+ // be below N. (This would not hold if the Y values are too close together.)
+ RTC_CHECK_LT(((p1.y + p2.y) / 2) + 1, p1.y);
+ RTC_CHECK_LT(p2.y, ((p1.y + p2.y) / 2) - 1);
+
+ // All cases where the point's x-value is between P1 and P2.
+ constexpr float x = (p1.x + p2.x) / 2;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kAbove); // J
+ CheckRelativePosition(curve, {x, p1.y + 0}, kAbove); // K
+ CheckRelativePosition(curve, {x, ((p1.y + p2.y) / 2) + 1}, kAbove); // L
+ CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kOn); // M
+ CheckRelativePosition(curve, {x, ((p1.y + p2.y) / 2) - 1}, kBelow); // N
+ CheckRelativePosition(curve, {x, p2.y + 0}, kBelow); // O
+ CheckRelativePosition(curve, {x, p2.y - 1}, kBelow); // Q
+ }
+
+ {
+ // All cases where the point has the same x-value as P2.
+ constexpr float x = p2.x;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kAbove); // R
+ CheckRelativePosition(curve, {x, p1.y + 0}, kAbove); // S
+ CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kAbove); // T
+ CheckRelativePosition(curve, {x, p2.y + 0}, kOn); // P2
+ CheckRelativePosition(curve, {x, p2.y - 1}, kBelow); // U
+ }
+
+ {
+ // All cases where the point lies to the right of P2.
+ constexpr float x = p2.x + 1;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kAbove); // V
+ CheckRelativePosition(curve, {x, p1.y + 0}, kAbove); // W
+ CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kAbove); // X
+ CheckRelativePosition(curve, {x, p2.y + 0}, kOn); // Y
+ CheckRelativePosition(curve, {x, p2.y - 1}, kBelow); // Z
+ }
+}
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is defined by two points with the same Y value.
+TEST(ThresholdCurveTest, PointPositionToCurveWithHorizaontalSegment) {
+ // The points (P1-P2) define the curve.
+ // All other points are above/below/on the curve.
+ //
+ // ^
+ // | |
+ // | |
+ // | A D F I K
+ // | |
+ // | |
+ // | B P1--G--P2-L--
+ // | C E H J M
+ // *------------------>
+
+ constexpr ThresholdCurve::Point p1{100, 200};
+ constexpr ThresholdCurve::Point p2{p1.x + 1, p1.y};
+
+ RTC_CHECK_GT((p1.x + p2.x) / 2, p1.x);
+ RTC_CHECK_LT((p1.x + p2.x) / 2, p2.x);
+
+ const ThresholdCurve curve(p1, p2);
+
+ {
+ // All cases where the point lies to the left of P1.
+ constexpr float x = p1.x - 1;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kBelow); // A
+ CheckRelativePosition(curve, {x, p1.y + 0}, kBelow); // B
+ CheckRelativePosition(curve, {x, p1.y - 1}, kBelow); // C
+ }
+
+ {
+ // All cases where the point has the same x-value as P1.
+ constexpr float x = p1.x;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kOn); // D
+ CheckRelativePosition(curve, {x, p1.y + 0}, kOn); // P1
+ CheckRelativePosition(curve, {x, p1.y - 1}, kBelow); // E
+ }
+
+ {
+ // All cases where the point's x-value is between P1 and P2.
+ constexpr float x = (p1.x + p2.x) / 2;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kAbove); // F
+ CheckRelativePosition(curve, {x, p1.y + 0}, kOn); // G
+ CheckRelativePosition(curve, {x, p1.y - 1}, kBelow); // H
+ }
+
+ {
+ // All cases where the point has the same x-value as P2.
+ constexpr float x = p2.x;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kAbove); // I
+ CheckRelativePosition(curve, {x, p1.y + 0}, kOn); // P2
+ CheckRelativePosition(curve, {x, p1.y - 1}, kBelow); // J
+ }
+
+ {
+ // All cases where the point lies to the right of P2.
+ constexpr float x = p2.x + 1;
+ CheckRelativePosition(curve, {x, p1.y + 1}, kAbove); // K
+ CheckRelativePosition(curve, {x, p1.y + 0}, kOn); // L
+ CheckRelativePosition(curve, {x, p1.y - 1}, kBelow); // M
+ }
+}
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is defined by two points with the same X value.
+TEST(ThresholdCurveTest, PointPositionToCurveWithVerticalSegment) {
+ // The points (P1-P2) define the curve.
+ // All other points are above/below/on the curve.
+ //
+ // ^
+ // | |
+ // | A B C
+ // | |
+ // | D P1 E
+ // | |
+ // | F G H
+ // | |
+ // | I P2--J------
+ // | K L M
+ // *------------------>
+
+ constexpr ThresholdCurve::Point p1{100, 200};
+ constexpr ThresholdCurve::Point p2{p1.x, p1.y - 1};
+
+ constexpr float left = p1.x - 1;
+ constexpr float on = p1.x;
+ constexpr float right = p1.x + 1;
+
+ RTC_CHECK_LT((p1.y + p2.y) / 2, p1.y);
+ RTC_CHECK_GT((p1.y + p2.y) / 2, p2.y);
+
+ const ThresholdCurve curve(p1, p2);
+
+ {
+ // All cases where the point lies above P1.
+ constexpr float y = p1.y + 1;
+ CheckRelativePosition(curve, {left, y}, kBelow); // A
+ CheckRelativePosition(curve, {on, y}, kOn); // B
+ CheckRelativePosition(curve, {right, y}, kAbove); // C
+ }
+
+ {
+ // All cases where the point has the same y-value as P1.
+ constexpr float y = p1.y;
+ CheckRelativePosition(curve, {left, y}, kBelow); // D
+ CheckRelativePosition(curve, {on, y}, kOn); // P1
+ CheckRelativePosition(curve, {right, y}, kAbove); // E
+ }
+
+ {
+ // All cases where the point's y-value is between P1 and P2.
+ constexpr float y = (p1.y + p2.y) / 2;
+ CheckRelativePosition(curve, {left, y}, kBelow); // F
+ CheckRelativePosition(curve, {on, y}, kOn); // G
+ CheckRelativePosition(curve, {right, y}, kAbove); // H
+ }
+
+ {
+ // All cases where the point has the same y-value as P2.
+ constexpr float y = p2.y;
+ CheckRelativePosition(curve, {left, y}, kBelow); // I
+ CheckRelativePosition(curve, {on, y}, kOn); // P2
+ CheckRelativePosition(curve, {right, y}, kOn); // J
+ }
+
+ {
+ // All cases where the point lies below P2.
+ constexpr float y = p2.y - 1;
+ CheckRelativePosition(curve, {left, y}, kBelow); // K
+ CheckRelativePosition(curve, {on, y}, kBelow); // L
+ CheckRelativePosition(curve, {right, y}, kBelow); // M
+ }
+}
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is defined by two points which are identical.
+TEST(ThresholdCurveTest, PointPositionCurveWithNullSegment) {
+ // The points (P1-P2) define the curve.
+ // All other points are above/below/on the curve.
+ //
+ // ^
+ // | |
+ // | A D F
+ // | |
+ // | B P---G------
+ // | C E H
+ // *------------------>
+
+ constexpr ThresholdCurve::Point p{100, 200};
+
+ const ThresholdCurve curve(p, p);
+
+ {
+ // All cases where the point lies to the left of P.
+ constexpr float x = p.x - 1;
+ CheckRelativePosition(curve, {x, p.y + 1}, kBelow); // A
+ CheckRelativePosition(curve, {x, p.y + 0}, kBelow); // B
+ CheckRelativePosition(curve, {x, p.y - 1}, kBelow); // C
+ }
+
+ {
+ // All cases where the point has the same x-value as P.
+ constexpr float x = p.x + 0;
+ CheckRelativePosition(curve, {x, p.y + 1}, kOn); // D
+ CheckRelativePosition(curve, {x, p.y + 0}, kOn); // P
+ CheckRelativePosition(curve, {x, p.y - 1}, kBelow); // E
+ }
+
+ {
+ // All cases where the point lies to the right of P.
+ constexpr float x = p.x + 1;
+ CheckRelativePosition(curve, {x, p.y + 1}, kAbove); // F
+ CheckRelativePosition(curve, {x, p.y + 0}, kOn); // G
+ CheckRelativePosition(curve, {x, p.y - 1}, kBelow); // H
+ }
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the two curves have the same projection on the X-axis.
+TEST(ThresholdCurveTest, TwoCurvesSegmentHasSameProjectionAxisX) {
+ // ^ //
+ // | C1 + C2 //
+ // | | //
+ // | |\ //
+ // | | \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ -------- C2 //
+ // | --------- C1 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ // Same x-values, but higher on Y. (Can be parallel, but doesn't have to be.)
+ constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 20};
+ constexpr ThresholdCurve::Point c2_right{c1_right.x, c1_right.y + 10};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_TRUE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the higher curve's projection on the X-axis is a strict subset of the
+// lower curve's projection on the X-axis (on both ends).
+TEST(ThresholdCurveTest, TwoCurvesSegmentOfHigherSubsetProjectionAxisX) {
+ // ^ //
+ // | C1 C2 //
+ // | | | //
+ // | | | //
+ // | \ | //
+ // | \ | //
+ // | \ \ //
+ // | \ \ //
+ // | \ --------- C2 //
+ // | \ //
+ // | \ //
+ // | ---------C1 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ constexpr ThresholdCurve::Point c2_left{6, 11};
+ constexpr ThresholdCurve::Point c2_right{9, 7};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_TRUE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the higher curve's right point is above lower curve's horizontal ray (meaning
+// the higher curve's projection on the X-axis extends further right than
+// the lower curve's).
+TEST(ThresholdCurveTest,
+ TwoCurvesRightPointOfHigherCurveAboveHorizontalRayOfLower) {
+ // ^ //
+ // | C1 + C2 //
+ // | | //
+ // | |\ //
+ // | | \ //
+ // | | \ //
+ // | | \ //
+ // | | \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ ----- C2 //
+ // | --------- C1 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 1};
+ constexpr ThresholdCurve::Point c2_right{c1_right.x + 1, c1_right.y + 1};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_TRUE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the higher curve's points are on the lower curve's rays (left point on the
+// veritcal ray, right point on the horizontal ray).
+TEST(ThresholdCurveTest, TwoCurvesPointsOfHigherOnRaysOfLower) {
+ // ^
+ // | C1 + C2 //
+ // | | //
+ // | |\ //
+ // | | \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | ----- C1 + C2 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ // Same x-values, but one of the points is higher on Y (the other isn't).
+ constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 2};
+ constexpr ThresholdCurve::Point c2_right{c1_right.x + 3, c1_right.y};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_TRUE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the second curve's segment intersects the first curve's vertical ray.
+TEST(ThresholdCurveTest, SecondCurveCrossesVerticalRayOfFirstCurve) {
+ // ^ //
+ // | C2 C1 //
+ // | | | //
+ // | \| //
+ // | | //
+ // | |\ //
+ // | | \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ ------- C2 //
+ // | -------- C1 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ constexpr ThresholdCurve::Point c2_left{c1_left.x - 1, c1_left.y + 1};
+ constexpr ThresholdCurve::Point c2_right{c1_right.x, c1_right.y + 1};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_FALSE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the second curve's segment intersects the first curve's horizontal ray.
+TEST(ThresholdCurveTest, SecondCurveCrossesHorizontalRayOfFirstCurve) {
+ // ^ //
+ // | C1 + C2 //
+ // | | //
+ // | |\ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | \ \ //
+ // | ----------- C1 //
+ // | \ //
+ // | ------- C2 //
+ // *--------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 1};
+ constexpr ThresholdCurve::Point c2_right{c1_right.x + 2, c1_right.y - 1};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_FALSE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the second curve's segment intersects the first curve's segment.
+TEST(ThresholdCurveTest, TwoCurvesWithCrossingSegments) {
+ // ^ //
+ // | C2 C1 //
+ // | | | //
+ // | | | //
+ // | | \ //
+ // | | \ //
+ // | -_ \ //
+ // | -_ \ //
+ // | -_\ //
+ // | -_ //
+ // | \-_ //
+ // | \ ---------- C2 //
+ // | ----------- C1 //
+ // | //
+ // | //
+ // *-------------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_right);
+
+ constexpr ThresholdCurve::Point c2_left{4, 9};
+ constexpr ThresholdCurve::Point c2_right{10, 6};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ // The test is structured so that the two curves intersect at (8, 7).
+ RTC_CHECK(!c1_curve.IsAboveCurve({8, 7}));
+ RTC_CHECK(!c1_curve.IsBelowCurve({8, 7}));
+ RTC_CHECK(!c2_curve.IsAboveCurve({8, 7}));
+ RTC_CHECK(!c2_curve.IsBelowCurve({8, 7}));
+
+ EXPECT_FALSE(c1_curve <= c2_curve);
+ EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// both curves are identical.
+TEST(ThresholdCurveTest, IdenticalCurves) {
+ // ^ //
+ // | C1 + C2 //
+ // | | //
+ // | | //
+ // | \ //
+ // | \ //
+ // | \ //
+ // | ------- C1 + C2 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point left{5, 10};
+ constexpr ThresholdCurve::Point right{10, 5};
+
+ const ThresholdCurve c1_curve(left, right);
+ const ThresholdCurve c2_curve(left, right);
+
+ EXPECT_TRUE(c1_curve <= c2_curve);
+ EXPECT_TRUE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// they are "nearly identical" - the first curve's segment is contained within
+// the second curve's segment, but the second curve's segment extends further
+// to the left (which also produces separate vertical rays for the curves).
+TEST(ThresholdCurveTest, NearlyIdenticalCurvesSecondContinuesOnOtherLeftSide) {
+ // ^ //
+ // | C2 C1 //
+ // | | | //
+ // | | | //
+ // | \| //
+ // | | //
+ // | \ //
+ // | \ //
+ // | \ //
+ // | ----- C1 + C2 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_left);
+
+ constexpr ThresholdCurve::Point c2_left{c1_left.x - 1, c1_left.y + 1};
+ constexpr ThresholdCurve::Point c2_right = c1_right;
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_FALSE(c1_curve <= c2_curve);
+ EXPECT_TRUE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// they are "nearly identical" - the first curve's segment is contained within
+// the second curve's segment, but the second curve's segment extends further
+// to the right (which also produces separate horizontal rays for the curves).
+TEST(ThresholdCurveTest, NearlyIdenticalCurvesSecondContinuesOnOtherRightSide) {
+ // ^ //
+ // | C1 + C2 //
+ // | | //
+ // | | //
+ // | \ //
+ // | \ //
+ // | \ //
+ // | \----------- C1 //
+ // | \ //
+ // | ---------- C2 //
+ // *---------------------> //
+
+ constexpr ThresholdCurve::Point c1_left{5, 10};
+ constexpr ThresholdCurve::Point c1_right{10, 5};
+ const ThresholdCurve c1_curve(c1_left, c1_left);
+
+ constexpr ThresholdCurve::Point c2_left = c1_left;
+ constexpr ThresholdCurve::Point c2_right{c1_right.x + 1, c1_right.y - 1};
+ const ThresholdCurve c2_curve(c2_left, c2_right);
+
+ EXPECT_FALSE(c1_curve <= c2_curve);
+ EXPECT_TRUE(c2_curve <= c1_curve);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// The higher-left point must be given as the first point, and the lower-right
+// point must be given as the second.
+// This necessarily produces a non-positive slope.
+TEST(ThresholdCurveDeathTest, WrongOrderPoints) {
+ std::unique_ptr<ThresholdCurve> curve;
+ constexpr ThresholdCurve::Point left{5, 10};
+ constexpr ThresholdCurve::Point right{10, 5};
+ EXPECT_DEATH(curve.reset(new ThresholdCurve(right, left)), "");
+}
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_config_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_config_gn/moz.build
new file mode 100644
index 0000000000..7ef11d4892
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_config_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_network_adaptor_config_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_gn/moz.build
new file mode 100644
index 0000000000..b65fc49e52
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor_gn/moz.build
@@ -0,0 +1,222 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/channel_controller.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/controller_manager.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/dtx_controller.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/event_log_writer.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc",
+ "/third_party/libwebrtc/modules/audio_coding/audio_network_adaptor/frame_length_controller_v2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_network_adaptor_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/audio_decoder.h b/third_party/libwebrtc/modules/audio_coding/codecs/audio_decoder.h
new file mode 100644
index 0000000000..b7b15cdd6e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/audio_decoder.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is for backwards compatibility only! Use
+// webrtc/api/audio_codecs/audio_decoder.h instead!
+// TODO(kwiberg): Remove it.
+
+#ifndef MODULES_AUDIO_CODING_CODECS_AUDIO_DECODER_H_
+#define MODULES_AUDIO_CODING_CODECS_AUDIO_DECODER_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+
+#endif // MODULES_AUDIO_CODING_CODECS_AUDIO_DECODER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/audio_encoder.h b/third_party/libwebrtc/modules/audio_coding/codecs/audio_encoder.h
new file mode 100644
index 0000000000..010ae6705f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is for backwards compatibility only! Use
+// webrtc/api/audio_codecs/audio_encoder.h instead!
+// TODO(ossu): Remove it.
+
+#ifndef MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
+#define MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
+
+#include "api/audio_codecs/audio_encoder.h"
+
+#endif // MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc
new file mode 100644
index 0000000000..109da78eea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(AudioDecoderFactoryTest, CreateUnknownDecoder) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("rey", 8000, 1), absl::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreatePcmu) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // PCMu supports 8 kHz, and any number of channels.
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 0), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 1), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 2), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 3), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 16000, 1), absl::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreatePcma) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // PCMa supports 8 kHz, and any number of channels.
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 0), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 1), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 2), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 3), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("pcma", 16000, 1), absl::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreateIlbc) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // iLBC supports 8 kHz, 1 channel.
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 8000, 0), absl::nullopt));
+#ifdef WEBRTC_CODEC_ILBC
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 8000, 1), absl::nullopt));
+#endif
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 8000, 2), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 16000, 1), absl::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreateIsac) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // iSAC supports 16 kHz, 1 channel. The float implementation additionally
+ // supports 32 kHz, 1 channel.
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 0), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 1), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 2), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 8000, 1), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 48000, 1), absl::nullopt));
+#ifdef WEBRTC_ARCH_ARM
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 32000, 1), absl::nullopt));
+#else
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("isac", 32000, 1), absl::nullopt));
+#endif
+}
+
+TEST(AudioDecoderFactoryTest, CreateL16) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // L16 supports any clock rate and any number of channels up to 24.
+ const int clockrates[] = {8000, 16000, 32000, 48000};
+ const int num_channels[] = {1, 2, 3, 24};
+ for (int clockrate : clockrates) {
+ EXPECT_FALSE(adf->MakeAudioDecoder(SdpAudioFormat("l16", clockrate, 0),
+ absl::nullopt));
+ for (int channels : num_channels) {
+ EXPECT_TRUE(adf->MakeAudioDecoder(
+ SdpAudioFormat("l16", clockrate, channels), absl::nullopt));
+ }
+ }
+}
+
+// Tests that using more channels than the maximum does not work
+TEST(AudioDecoderFactoryTest, MaxNrOfChannels) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ std::vector<std::string> codecs = {
+#ifdef WEBRTC_CODEC_OPUS
+ "opus",
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+ "isac",
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+ "ilbc",
+#endif
+ "pcmu",
+ "pcma",
+ "l16",
+ "G722",
+ "G711",
+ };
+
+ for (auto codec : codecs) {
+ EXPECT_FALSE(adf->MakeAudioDecoder(
+ SdpAudioFormat(codec, 32000, AudioDecoder::kMaxNumberOfChannels + 1),
+ absl::nullopt));
+ }
+}
+
+TEST(AudioDecoderFactoryTest, CreateG722) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // g722 supports 8 kHz, 1-2 channels.
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 0), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 1), absl::nullopt));
+ EXPECT_TRUE(
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 2), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 3), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 16000, 1), absl::nullopt));
+ EXPECT_FALSE(
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 32000, 1), absl::nullopt));
+
+ // g722 actually uses a 16 kHz sample rate instead of the nominal 8 kHz.
+ std::unique_ptr<AudioDecoder> dec =
+ adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 1), absl::nullopt);
+ EXPECT_EQ(16000, dec->SampleRateHz());
+}
+
+TEST(AudioDecoderFactoryTest, CreateOpus) {
+ rtc::scoped_refptr<AudioDecoderFactory> adf =
+ CreateBuiltinAudioDecoderFactory();
+ ASSERT_TRUE(adf);
+ // Opus supports 48 kHz, 2 channels, and wants a "stereo" parameter whose
+ // value is either "0" or "1".
+ for (int hz : {8000, 16000, 32000, 48000}) {
+ for (int channels : {0, 1, 2, 3}) {
+ for (std::string stereo : {"XX", "0", "1", "2"}) {
+ SdpAudioFormat::Parameters params;
+ if (stereo != "XX") {
+ params["stereo"] = stereo;
+ }
+ const bool good = (hz == 48000 && channels == 2 &&
+ (stereo == "XX" || stereo == "0" || stereo == "1"));
+ EXPECT_EQ(good,
+ static_cast<bool>(adf->MakeAudioDecoder(
+ SdpAudioFormat("opus", hz, channels, std::move(params)),
+ absl::nullopt)));
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc
new file mode 100644
index 0000000000..26ae1eda8a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class AudioEncoderFactoryTest
+ : public ::testing::TestWithParam<rtc::scoped_refptr<AudioEncoderFactory>> {
+};
+
+TEST_P(AudioEncoderFactoryTest, SupportsAtLeastOneFormat) {
+ auto factory = GetParam();
+ auto supported_encoders = factory->GetSupportedEncoders();
+ EXPECT_FALSE(supported_encoders.empty());
+}
+
+TEST_P(AudioEncoderFactoryTest, CanQueryAllSupportedFormats) {
+ auto factory = GetParam();
+ auto supported_encoders = factory->GetSupportedEncoders();
+ for (const auto& spec : supported_encoders) {
+ auto info = factory->QueryAudioEncoder(spec.format);
+ EXPECT_TRUE(info);
+ }
+}
+
+TEST_P(AudioEncoderFactoryTest, CanConstructAllSupportedEncoders) {
+ auto factory = GetParam();
+ auto supported_encoders = factory->GetSupportedEncoders();
+ for (const auto& spec : supported_encoders) {
+ auto info = factory->QueryAudioEncoder(spec.format);
+ auto encoder = factory->MakeAudioEncoder(127, spec.format, absl::nullopt);
+ EXPECT_TRUE(encoder);
+ EXPECT_EQ(encoder->SampleRateHz(), info->sample_rate_hz);
+ EXPECT_EQ(encoder->NumChannels(), info->num_channels);
+ EXPECT_EQ(encoder->RtpTimestampRateHz(), spec.format.clockrate_hz);
+ }
+}
+
+TEST_P(AudioEncoderFactoryTest, CanRunAllSupportedEncoders) {
+ constexpr int kTestPayloadType = 127;
+ auto factory = GetParam();
+ auto supported_encoders = factory->GetSupportedEncoders();
+ for (const auto& spec : supported_encoders) {
+ auto encoder =
+ factory->MakeAudioEncoder(kTestPayloadType, spec.format, absl::nullopt);
+ EXPECT_TRUE(encoder);
+ encoder->Reset();
+ const int num_samples = rtc::checked_cast<int>(
+ encoder->SampleRateHz() * encoder->NumChannels() / 100);
+ rtc::Buffer out;
+ rtc::BufferT<int16_t> audio;
+ audio.SetData(num_samples, [](rtc::ArrayView<int16_t> audio) {
+ for (size_t i = 0; i != audio.size(); ++i) {
+ // Just put some numbers in there, ensure they're within range.
+ audio[i] =
+ static_cast<int16_t>(i & std::numeric_limits<int16_t>::max());
+ }
+ return audio.size();
+ });
+ // This is here to stop the test going forever with a broken encoder.
+ constexpr int kMaxEncodeCalls = 100;
+ int blocks = 0;
+ for (; blocks < kMaxEncodeCalls; ++blocks) {
+ AudioEncoder::EncodedInfo info = encoder->Encode(
+ blocks * encoder->RtpTimestampRateHz() / 100, audio, &out);
+ EXPECT_EQ(info.encoded_bytes, out.size());
+ if (info.encoded_bytes > 0) {
+ EXPECT_EQ(0u, info.encoded_timestamp);
+ EXPECT_EQ(kTestPayloadType, info.payload_type);
+ break;
+ }
+ }
+ ASSERT_LT(blocks, kMaxEncodeCalls);
+ const unsigned int next_timestamp =
+ blocks * encoder->RtpTimestampRateHz() / 100;
+ out.Clear();
+ for (; blocks < kMaxEncodeCalls; ++blocks) {
+ AudioEncoder::EncodedInfo info = encoder->Encode(
+ blocks * encoder->RtpTimestampRateHz() / 100, audio, &out);
+ EXPECT_EQ(info.encoded_bytes, out.size());
+ if (info.encoded_bytes > 0) {
+ EXPECT_EQ(next_timestamp, info.encoded_timestamp);
+ EXPECT_EQ(kTestPayloadType, info.payload_type);
+ break;
+ }
+ }
+ ASSERT_LT(blocks, kMaxEncodeCalls);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(BuiltinAudioEncoderFactoryTest,
+ AudioEncoderFactoryTest,
+ ::testing::Values(CreateBuiltinAudioEncoderFactory()));
+
+TEST(BuiltinAudioEncoderFactoryTest, SupportsTheExpectedFormats) {
+ using ::testing::ElementsAreArray;
+ // Check that we claim to support the formats we expect from build flags, and
+ // we've ordered them correctly.
+ auto factory = CreateBuiltinAudioEncoderFactory();
+ auto specs = factory->GetSupportedEncoders();
+
+ const std::vector<SdpAudioFormat> supported_formats = [&specs] {
+ std::vector<SdpAudioFormat> formats;
+ formats.reserve(specs.size());
+ for (const auto& spec : specs) {
+ formats.push_back(spec.format);
+ }
+ return formats;
+ }();
+
+ const std::vector<SdpAudioFormat> expected_formats = {
+#ifdef WEBRTC_CODEC_OPUS
+ {"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}},
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+ {"isac", 16000, 1},
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ {"isac", 32000, 1},
+#endif
+ {"G722", 8000, 1},
+#ifdef WEBRTC_CODEC_ILBC
+ {"ilbc", 8000, 1},
+#endif
+ {"pcmu", 8000, 1},
+ {"pcma", 8000, 1}
+ };
+
+ ASSERT_THAT(supported_formats, ElementsAreArray(expected_formats));
+}
+
+// Tests that using more channels than the maximum does not work.
+TEST(BuiltinAudioEncoderFactoryTest, MaxNrOfChannels) {
+ rtc::scoped_refptr<AudioEncoderFactory> aef =
+ CreateBuiltinAudioEncoderFactory();
+ std::vector<std::string> codecs = {
+#ifdef WEBRTC_CODEC_OPUS
+ "opus",
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+ "isac",
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+ "ilbc",
+#endif
+ "pcmu",
+ "pcma",
+ "l16",
+ "G722",
+ "G711",
+ };
+
+ for (auto codec : codecs) {
+ EXPECT_FALSE(aef->MakeAudioEncoder(
+ /*payload_type=*/111,
+ /*format=*/
+ SdpAudioFormat(codec, 32000, AudioEncoder::kMaxNumberOfChannels + 1),
+ /*codec_pair_id=*/absl::nullopt));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
new file mode 100644
index 0000000000..7546ac178f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kMaxFrameSizeMs = 60;
+
+class AudioEncoderCng final : public AudioEncoder {
+ public:
+ explicit AudioEncoderCng(AudioEncoderCngConfig&& config);
+ ~AudioEncoderCng() override;
+
+ // Not copyable or moveable.
+ AudioEncoderCng(const AudioEncoderCng&) = delete;
+ AudioEncoderCng(AudioEncoderCng&&) = delete;
+ AudioEncoderCng& operator=(const AudioEncoderCng&) = delete;
+ AudioEncoderCng& operator=(AudioEncoderCng&&) = delete;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ int RtpTimestampRateHz() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+ void Reset() override;
+ bool SetFec(bool enable) override;
+ bool SetDtx(bool enable) override;
+ bool SetApplication(Application application) override;
+ void SetMaxPlaybackRate(int frequency_hz) override;
+ rtc::ArrayView<std::unique_ptr<AudioEncoder>> ReclaimContainedEncoders()
+ override;
+ void OnReceivedUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) override;
+ void OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+
+ private:
+ EncodedInfo EncodePassive(size_t frames_to_encode, rtc::Buffer* encoded);
+ EncodedInfo EncodeActive(size_t frames_to_encode, rtc::Buffer* encoded);
+ size_t SamplesPer10msFrame() const;
+
+ std::unique_ptr<AudioEncoder> speech_encoder_;
+ const int cng_payload_type_;
+ const int num_cng_coefficients_;
+ const int sid_frame_interval_ms_;
+ std::vector<int16_t> speech_buffer_;
+ std::vector<uint32_t> rtp_timestamps_;
+ bool last_frame_active_;
+ std::unique_ptr<Vad> vad_;
+ std::unique_ptr<ComfortNoiseEncoder> cng_encoder_;
+};
+
+AudioEncoderCng::AudioEncoderCng(AudioEncoderCngConfig&& config)
+ : speech_encoder_((static_cast<void>([&] {
+ RTC_CHECK(config.IsOk()) << "Invalid configuration.";
+ }()),
+ std::move(config.speech_encoder))),
+ cng_payload_type_(config.payload_type),
+ num_cng_coefficients_(config.num_cng_coefficients),
+ sid_frame_interval_ms_(config.sid_frame_interval_ms),
+ last_frame_active_(true),
+ vad_(config.vad ? std::unique_ptr<Vad>(config.vad)
+ : CreateVad(config.vad_mode)),
+ cng_encoder_(new ComfortNoiseEncoder(SampleRateHz(),
+ sid_frame_interval_ms_,
+ num_cng_coefficients_)) {}
+
+AudioEncoderCng::~AudioEncoderCng() = default;
+
+int AudioEncoderCng::SampleRateHz() const {
+ return speech_encoder_->SampleRateHz();
+}
+
+size_t AudioEncoderCng::NumChannels() const {
+ return 1;
+}
+
+int AudioEncoderCng::RtpTimestampRateHz() const {
+ return speech_encoder_->RtpTimestampRateHz();
+}
+
+size_t AudioEncoderCng::Num10MsFramesInNextPacket() const {
+ return speech_encoder_->Num10MsFramesInNextPacket();
+}
+
+size_t AudioEncoderCng::Max10MsFramesInAPacket() const {
+ return speech_encoder_->Max10MsFramesInAPacket();
+}
+
+int AudioEncoderCng::GetTargetBitrate() const {
+ return speech_encoder_->GetTargetBitrate();
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+ RTC_CHECK_EQ(speech_buffer_.size(),
+ rtp_timestamps_.size() * samples_per_10ms_frame);
+ rtp_timestamps_.push_back(rtp_timestamp);
+ RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size());
+ speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend());
+ const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
+ if (rtp_timestamps_.size() < frames_to_encode) {
+ return EncodedInfo();
+ }
+ RTC_CHECK_LE(frames_to_encode * 10, kMaxFrameSizeMs)
+ << "Frame size cannot be larger than " << kMaxFrameSizeMs
+ << " ms when using VAD/CNG.";
+
+ // Group several 10 ms blocks per VAD call. Call VAD once or twice using the
+ // following split sizes:
+ // 10 ms = 10 + 0 ms; 20 ms = 20 + 0 ms; 30 ms = 30 + 0 ms;
+ // 40 ms = 20 + 20 ms; 50 ms = 30 + 20 ms; 60 ms = 30 + 30 ms.
+ size_t blocks_in_first_vad_call =
+ (frames_to_encode > 3 ? 3 : frames_to_encode);
+ if (frames_to_encode == 4)
+ blocks_in_first_vad_call = 2;
+ RTC_CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
+ const size_t blocks_in_second_vad_call =
+ frames_to_encode - blocks_in_first_vad_call;
+
+ // Check if all of the buffer is passive speech. Start with checking the first
+ // block.
+ Vad::Activity activity = vad_->VoiceActivity(
+ &speech_buffer_[0], samples_per_10ms_frame * blocks_in_first_vad_call,
+ SampleRateHz());
+ if (activity == Vad::kPassive && blocks_in_second_vad_call > 0) {
+ // Only check the second block if the first was passive.
+ activity = vad_->VoiceActivity(
+ &speech_buffer_[samples_per_10ms_frame * blocks_in_first_vad_call],
+ samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
+ }
+
+ EncodedInfo info;
+ switch (activity) {
+ case Vad::kPassive: {
+ info = EncodePassive(frames_to_encode, encoded);
+ last_frame_active_ = false;
+ break;
+ }
+ case Vad::kActive: {
+ info = EncodeActive(frames_to_encode, encoded);
+ last_frame_active_ = true;
+ break;
+ }
+ default: {
+ RTC_CHECK_NOTREACHED();
+ }
+ }
+
+ speech_buffer_.erase(
+ speech_buffer_.begin(),
+ speech_buffer_.begin() + frames_to_encode * samples_per_10ms_frame);
+ rtp_timestamps_.erase(rtp_timestamps_.begin(),
+ rtp_timestamps_.begin() + frames_to_encode);
+ return info;
+}
+
+void AudioEncoderCng::Reset() {
+ speech_encoder_->Reset();
+ speech_buffer_.clear();
+ rtp_timestamps_.clear();
+ last_frame_active_ = true;
+ vad_->Reset();
+ cng_encoder_.reset(new ComfortNoiseEncoder(
+ SampleRateHz(), sid_frame_interval_ms_, num_cng_coefficients_));
+}
+
+bool AudioEncoderCng::SetFec(bool enable) {
+ return speech_encoder_->SetFec(enable);
+}
+
+bool AudioEncoderCng::SetDtx(bool enable) {
+ return speech_encoder_->SetDtx(enable);
+}
+
+bool AudioEncoderCng::SetApplication(Application application) {
+ return speech_encoder_->SetApplication(application);
+}
+
+void AudioEncoderCng::SetMaxPlaybackRate(int frequency_hz) {
+ speech_encoder_->SetMaxPlaybackRate(frequency_hz);
+}
+
+rtc::ArrayView<std::unique_ptr<AudioEncoder>>
+AudioEncoderCng::ReclaimContainedEncoders() {
+ return rtc::ArrayView<std::unique_ptr<AudioEncoder>>(&speech_encoder_, 1);
+}
+
+void AudioEncoderCng::OnReceivedUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) {
+ speech_encoder_->OnReceivedUplinkPacketLossFraction(
+ uplink_packet_loss_fraction);
+}
+
+void AudioEncoderCng::OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) {
+ speech_encoder_->OnReceivedUplinkBandwidth(target_audio_bitrate_bps,
+ bwe_period_ms);
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderCng::GetFrameLengthRange() const {
+ return speech_encoder_->GetFrameLengthRange();
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
+ size_t frames_to_encode,
+ rtc::Buffer* encoded) {
+ bool force_sid = last_frame_active_;
+ bool output_produced = false;
+ const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+ AudioEncoder::EncodedInfo info;
+
+ for (size_t i = 0; i < frames_to_encode; ++i) {
+ // It's important not to pass &info.encoded_bytes directly to
+ // WebRtcCng_Encode(), since later loop iterations may return zero in
+ // that value, in which case we don't want to overwrite any value from
+ // an earlier iteration.
+ size_t encoded_bytes_tmp =
+ cng_encoder_->Encode(rtc::ArrayView<const int16_t>(
+ &speech_buffer_[i * samples_per_10ms_frame],
+ samples_per_10ms_frame),
+ force_sid, encoded);
+
+ if (encoded_bytes_tmp > 0) {
+ RTC_CHECK(!output_produced);
+ info.encoded_bytes = encoded_bytes_tmp;
+ output_produced = true;
+ force_sid = false;
+ }
+ }
+
+ info.encoded_timestamp = rtp_timestamps_.front();
+ info.payload_type = cng_payload_type_;
+ info.send_even_if_empty = true;
+ info.speech = false;
+ return info;
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(size_t frames_to_encode,
+ rtc::Buffer* encoded) {
+ const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+ AudioEncoder::EncodedInfo info;
+ for (size_t i = 0; i < frames_to_encode; ++i) {
+ info =
+ speech_encoder_->Encode(rtp_timestamps_.front(),
+ rtc::ArrayView<const int16_t>(
+ &speech_buffer_[i * samples_per_10ms_frame],
+ samples_per_10ms_frame),
+ encoded);
+ if (i + 1 == frames_to_encode) {
+ RTC_CHECK_GT(info.encoded_bytes, 0) << "Encoder didn't deliver data.";
+ } else {
+ RTC_CHECK_EQ(info.encoded_bytes, 0)
+ << "Encoder delivered data too early.";
+ }
+ }
+ return info;
+}
+
+size_t AudioEncoderCng::SamplesPer10msFrame() const {
+ return rtc::CheckedDivExact(10 * SampleRateHz(), 1000);
+}
+
+} // namespace
+
+AudioEncoderCngConfig::AudioEncoderCngConfig() = default;
+AudioEncoderCngConfig::AudioEncoderCngConfig(AudioEncoderCngConfig&&) = default;
+AudioEncoderCngConfig::~AudioEncoderCngConfig() = default;
+
+bool AudioEncoderCngConfig::IsOk() const {
+ if (num_channels != 1)
+ return false;
+ if (!speech_encoder)
+ return false;
+ if (num_channels != speech_encoder->NumChannels())
+ return false;
+ if (sid_frame_interval_ms <
+ static_cast<int>(speech_encoder->Max10MsFramesInAPacket() * 10))
+ return false;
+ if (num_cng_coefficients > WEBRTC_CNG_MAX_LPC_ORDER ||
+ num_cng_coefficients <= 0)
+ return false;
+ return true;
+}
+
+std::unique_ptr<AudioEncoder> CreateComfortNoiseEncoder(
+ AudioEncoderCngConfig&& config) {
+ return std::make_unique<AudioEncoderCng>(std::move(config));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h b/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
new file mode 100644
index 0000000000..8a1183489f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
+#define MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "common_audio/vad/include/vad.h"
+
+namespace webrtc {
+
+struct AudioEncoderCngConfig {
+ // Moveable, not copyable.
+ AudioEncoderCngConfig();
+ AudioEncoderCngConfig(AudioEncoderCngConfig&&);
+ ~AudioEncoderCngConfig();
+
+ bool IsOk() const;
+
+ size_t num_channels = 1;
+ int payload_type = 13;
+ std::unique_ptr<AudioEncoder> speech_encoder;
+ Vad::Aggressiveness vad_mode = Vad::kVadNormal;
+ int sid_frame_interval_ms = 100;
+ int num_cng_coefficients = 8;
+ // The Vad pointer is mainly for testing. If a NULL pointer is passed, the
+ // AudioEncoderCng creates (and destroys) a Vad object internally. If an
+ // object is passed, the AudioEncoderCng assumes ownership of the Vad
+ // object.
+ Vad* vad = nullptr;
+};
+
+std::unique_ptr<AudioEncoder> CreateComfortNoiseEncoder(
+ AudioEncoderCngConfig&& config);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
new file mode 100644
index 0000000000..c688004363
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/vad/mock/mock_vad.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+#include "test/testsupport/rtc_expect_death.h"
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::Not;
+using ::testing::Optional;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace webrtc {
+
+namespace {
+static const size_t kMaxNumSamples = 48 * 10 * 2; // 10 ms @ 48 kHz stereo.
+static const size_t kMockReturnEncodedBytes = 17;
+static const int kCngPayloadType = 18;
+} // namespace
+
+class AudioEncoderCngTest : public ::testing::Test {
+ protected:
+ AudioEncoderCngTest()
+ : mock_encoder_owner_(new MockAudioEncoder),
+ mock_encoder_(mock_encoder_owner_.get()),
+ mock_vad_(new MockVad),
+ timestamp_(4711),
+ num_audio_samples_10ms_(0),
+ sample_rate_hz_(8000) {
+ memset(audio_, 0, kMaxNumSamples * 2);
+ EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(1));
+ }
+
+ AudioEncoderCngTest(const AudioEncoderCngTest&) = delete;
+ AudioEncoderCngTest& operator=(const AudioEncoderCngTest&) = delete;
+
+ void TearDown() override {
+ EXPECT_CALL(*mock_vad_, Die()).Times(1);
+ cng_.reset();
+ }
+
+ AudioEncoderCngConfig MakeCngConfig() {
+ AudioEncoderCngConfig config;
+ config.speech_encoder = std::move(mock_encoder_owner_);
+ EXPECT_TRUE(config.speech_encoder);
+
+ // Let the AudioEncoderCng object use a MockVad instead of its internally
+ // created Vad object.
+ config.vad = mock_vad_;
+ config.payload_type = kCngPayloadType;
+
+ return config;
+ }
+
+ void CreateCng(AudioEncoderCngConfig&& config) {
+ num_audio_samples_10ms_ = static_cast<size_t>(10 * sample_rate_hz_ / 1000);
+ ASSERT_LE(num_audio_samples_10ms_, kMaxNumSamples);
+ if (config.speech_encoder) {
+ EXPECT_CALL(*mock_encoder_, SampleRateHz())
+ .WillRepeatedly(Return(sample_rate_hz_));
+ // Max10MsFramesInAPacket() is just used to verify that the SID frame
+ // period is not too small. The return value does not matter that much,
+ // as long as it is smaller than 10.
+ EXPECT_CALL(*mock_encoder_, Max10MsFramesInAPacket())
+ .WillOnce(Return(1u));
+ }
+ cng_ = CreateComfortNoiseEncoder(std::move(config));
+ }
+
+ void Encode() {
+ ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
+ encoded_info_ = cng_->Encode(
+ timestamp_,
+ rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms_),
+ &encoded_);
+ timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
+ }
+
+ // Expect `num_calls` calls to the encoder, all successful. The last call
+ // claims to have encoded `kMockReturnEncodedBytes` bytes, and all the
+ // preceding ones 0 bytes.
+ void ExpectEncodeCalls(size_t num_calls) {
+ InSequence s;
+ AudioEncoder::EncodedInfo info;
+ for (size_t j = 0; j < num_calls - 1; ++j) {
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).WillOnce(Return(info));
+ }
+ info.encoded_bytes = kMockReturnEncodedBytes;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(
+ Invoke(MockAudioEncoder::FakeEncoding(kMockReturnEncodedBytes)));
+ }
+
+ // Verifies that the cng_ object waits until it has collected
+ // `blocks_per_frame` blocks of audio, and then dispatches all of them to
+ // the underlying codec (speech or cng).
+ void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(blocks_per_frame));
+ auto config = MakeCngConfig();
+ const int num_cng_coefficients = config.num_cng_coefficients;
+ CreateCng(std::move(config));
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
+
+ // Don't expect any calls to the encoder yet.
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).Times(0);
+ for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
+ Encode();
+ EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+ }
+ if (active_speech)
+ ExpectEncodeCalls(blocks_per_frame);
+ Encode();
+ if (active_speech) {
+ EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
+ } else {
+ EXPECT_EQ(static_cast<size_t>(num_cng_coefficients + 1),
+ encoded_info_.encoded_bytes);
+ }
+ }
+
+ // Verifies that the audio is partitioned into larger blocks before calling
+ // the VAD.
+ void CheckVadInputSize(int input_frame_size_ms,
+ int expected_first_block_size_ms,
+ int expected_second_block_size_ms) {
+ const size_t blocks_per_frame =
+ static_cast<size_t>(input_frame_size_ms / 10);
+
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(blocks_per_frame));
+
+ // Expect nothing to happen before the last block is sent to cng_.
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)).Times(0);
+ for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
+ Encode();
+ }
+
+ // Let the VAD decision be passive, since an active decision may lead to
+ // early termination of the decision loop.
+ InSequence s;
+ EXPECT_CALL(
+ *mock_vad_,
+ VoiceActivity(_, expected_first_block_size_ms * sample_rate_hz_ / 1000,
+ sample_rate_hz_))
+ .WillOnce(Return(Vad::kPassive));
+ if (expected_second_block_size_ms > 0) {
+ EXPECT_CALL(*mock_vad_,
+ VoiceActivity(
+ _, expected_second_block_size_ms * sample_rate_hz_ / 1000,
+ sample_rate_hz_))
+ .WillOnce(Return(Vad::kPassive));
+ }
+
+ // With this call to Encode(), `mock_vad_` should be called according to the
+ // above expectations.
+ Encode();
+ }
+
+ // Tests a frame with both active and passive speech. Returns true if the
+ // decision was active speech, false if it was passive.
+ bool CheckMixedActivePassive(Vad::Activity first_type,
+ Vad::Activity second_type) {
+ // Set the speech encoder frame size to 60 ms, to ensure that the VAD will
+ // be called twice.
+ const size_t blocks_per_frame = 6;
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(blocks_per_frame));
+ InSequence s;
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillOnce(Return(first_type));
+ if (first_type == Vad::kPassive) {
+ // Expect a second call to the VAD only if the first frame was passive.
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillOnce(Return(second_type));
+ }
+ encoded_info_.payload_type = 0;
+ for (size_t i = 0; i < blocks_per_frame; ++i) {
+ Encode();
+ }
+ return encoded_info_.payload_type != kCngPayloadType;
+ }
+
+ std::unique_ptr<AudioEncoder> cng_;
+ std::unique_ptr<MockAudioEncoder> mock_encoder_owner_;
+ MockAudioEncoder* mock_encoder_;
+ MockVad* mock_vad_; // Ownership is transferred to `cng_`.
+ uint32_t timestamp_;
+ int16_t audio_[kMaxNumSamples];
+ size_t num_audio_samples_10ms_;
+ rtc::Buffer encoded_;
+ AudioEncoder::EncodedInfo encoded_info_;
+ int sample_rate_hz_;
+};
+
+TEST_F(AudioEncoderCngTest, CreateAndDestroy) {
+ CreateCng(MakeCngConfig());
+}
+
+TEST_F(AudioEncoderCngTest, CheckFrameSizePropagation) {
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillOnce(Return(17U));
+ EXPECT_EQ(17U, cng_->Num10MsFramesInNextPacket());
+}
+
+TEST_F(AudioEncoderCngTest, CheckTargetAudioBitratePropagation) {
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_encoder_,
+ OnReceivedUplinkBandwidth(4711, absl::optional<int64_t>()));
+ cng_->OnReceivedUplinkBandwidth(4711, absl::nullopt);
+}
+
+TEST_F(AudioEncoderCngTest, CheckPacketLossFractionPropagation) {
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_encoder_, OnReceivedUplinkPacketLossFraction(0.5));
+ cng_->OnReceivedUplinkPacketLossFraction(0.5);
+}
+
+TEST_F(AudioEncoderCngTest, CheckGetFrameLengthRangePropagation) {
+ CreateCng(MakeCngConfig());
+ auto expected_range =
+ std::make_pair(TimeDelta::Millis(20), TimeDelta::Millis(20));
+ EXPECT_CALL(*mock_encoder_, GetFrameLengthRange())
+ .WillRepeatedly(Return(absl::make_optional(expected_range)));
+ EXPECT_THAT(cng_->GetFrameLengthRange(), Optional(Eq(expected_range)));
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCallsVad) {
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(1U));
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillOnce(Return(Vad::kPassive));
+ Encode();
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects1BlockPassiveSpeech) {
+ CheckBlockGrouping(1, false);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects2BlocksPassiveSpeech) {
+ CheckBlockGrouping(2, false);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects3BlocksPassiveSpeech) {
+ CheckBlockGrouping(3, false);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects1BlockActiveSpeech) {
+ CheckBlockGrouping(1, true);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects2BlocksActiveSpeech) {
+ CheckBlockGrouping(2, true);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects3BlocksActiveSpeech) {
+ CheckBlockGrouping(3, true);
+}
+
+TEST_F(AudioEncoderCngTest, EncodePassive) {
+ const size_t kBlocksPerFrame = 3;
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(kBlocksPerFrame));
+ auto config = MakeCngConfig();
+ const auto sid_frame_interval_ms = config.sid_frame_interval_ms;
+ const auto num_cng_coefficients = config.num_cng_coefficients;
+ CreateCng(std::move(config));
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillRepeatedly(Return(Vad::kPassive));
+ // Expect no calls at all to the speech encoder mock.
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).Times(0);
+ uint32_t expected_timestamp = timestamp_;
+ for (size_t i = 0; i < 100; ++i) {
+ Encode();
+ // Check if it was time to call the cng encoder. This is done once every
+ // `kBlocksPerFrame` calls.
+ if ((i + 1) % kBlocksPerFrame == 0) {
+ // Now check if a SID interval has elapsed.
+ if ((i % (sid_frame_interval_ms / 10)) < kBlocksPerFrame) {
+ // If so, verify that we got a CNG encoding.
+ EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+ EXPECT_FALSE(encoded_info_.speech);
+ EXPECT_EQ(static_cast<size_t>(num_cng_coefficients) + 1,
+ encoded_info_.encoded_bytes);
+ EXPECT_EQ(expected_timestamp, encoded_info_.encoded_timestamp);
+ }
+ expected_timestamp += rtc::checked_cast<uint32_t>(
+ kBlocksPerFrame * num_audio_samples_10ms_);
+ } else {
+ // Otherwise, expect no output.
+ EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+ }
+ }
+}
+
+// Verifies that the correct action is taken for frames with both active and
+// passive speech.
+TEST_F(AudioEncoderCngTest, MixedActivePassive) {
+ CreateCng(MakeCngConfig());
+
+ // All of the frame is active speech.
+ ExpectEncodeCalls(6);
+ EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
+ EXPECT_TRUE(encoded_info_.speech);
+
+ // First half of the frame is active speech.
+ ExpectEncodeCalls(6);
+ EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
+ EXPECT_TRUE(encoded_info_.speech);
+
+ // Second half of the frame is active speech.
+ ExpectEncodeCalls(6);
+ EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
+ EXPECT_TRUE(encoded_info_.speech);
+
+ // All of the frame is passive speech. Expect no calls to `mock_encoder_`.
+ EXPECT_FALSE(CheckMixedActivePassive(Vad::kPassive, Vad::kPassive));
+ EXPECT_FALSE(encoded_info_.speech);
+}
+
+// These tests verify that the audio is partitioned into larger blocks before
+// calling the VAD.
+// The parameters for CheckVadInputSize are:
+// CheckVadInputSize(frame_size, expected_first_block_size,
+// expected_second_block_size);
+TEST_F(AudioEncoderCngTest, VadInputSize10Ms) {
+ CreateCng(MakeCngConfig());
+ CheckVadInputSize(10, 10, 0);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize20Ms) {
+ CreateCng(MakeCngConfig());
+ CheckVadInputSize(20, 20, 0);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize30Ms) {
+ CreateCng(MakeCngConfig());
+ CheckVadInputSize(30, 30, 0);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize40Ms) {
+ CreateCng(MakeCngConfig());
+ CheckVadInputSize(40, 20, 20);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize50Ms) {
+ CreateCng(MakeCngConfig());
+ CheckVadInputSize(50, 30, 20);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
+ CreateCng(MakeCngConfig());
+ CheckVadInputSize(60, 30, 30);
+}
+
+// Verifies that the correct payload type is set when CNG is encoded.
+TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).Times(0);
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillOnce(Return(Vad::kPassive));
+ encoded_info_.payload_type = 0;
+ Encode();
+ EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+}
+
+// Verifies that a SID frame is encoded immediately as the signal changes from
+// active speech to passive.
+TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
+ auto config = MakeCngConfig();
+ const auto num_cng_coefficients = config.num_cng_coefficients;
+ CreateCng(std::move(config));
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(1U));
+ // Start with encoding noise.
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .Times(2)
+ .WillRepeatedly(Return(Vad::kPassive));
+ Encode();
+ EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+ EXPECT_EQ(static_cast<size_t>(num_cng_coefficients) + 1,
+ encoded_info_.encoded_bytes);
+ // Encode again, and make sure we got no frame at all (since the SID frame
+ // period is 100 ms by default).
+ Encode();
+ EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+
+ // Now encode active speech.
+ encoded_info_.payload_type = 0;
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillOnce(Return(Vad::kActive));
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(
+ Invoke(MockAudioEncoder::FakeEncoding(kMockReturnEncodedBytes)));
+ Encode();
+ EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
+
+ // Go back to noise again, and verify that a SID frame is emitted.
+ EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+ .WillOnce(Return(Vad::kPassive));
+ Encode();
+ EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+ EXPECT_EQ(static_cast<size_t>(num_cng_coefficients) + 1,
+ encoded_info_.encoded_bytes);
+}
+
+// Resetting the CNG should reset both the VAD and the encoder.
+TEST_F(AudioEncoderCngTest, Reset) {
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_encoder_, Reset()).Times(1);
+ EXPECT_CALL(*mock_vad_, Reset()).Times(1);
+ cng_->Reset();
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// This test fixture tests various error conditions that makes the
+// AudioEncoderCng die via CHECKs.
+class AudioEncoderCngDeathTest : public AudioEncoderCngTest {
+ protected:
+ AudioEncoderCngDeathTest() : AudioEncoderCngTest() {
+ EXPECT_CALL(*mock_vad_, Die()).Times(1);
+ delete mock_vad_;
+ mock_vad_ = nullptr;
+ }
+
+ // Override AudioEncoderCngTest::TearDown, since that one expects a call to
+ // the destructor of `mock_vad_`. In this case, that object is already
+ // deleted.
+ void TearDown() override { cng_.reset(); }
+
+ AudioEncoderCngConfig MakeCngConfig() {
+ // Don't provide a Vad mock object, since it would leak when the test dies.
+ auto config = AudioEncoderCngTest::MakeCngConfig();
+ config.vad = nullptr;
+ return config;
+ }
+
+ void TryWrongNumCoefficients(int num) {
+ RTC_EXPECT_DEATH(
+ [&] {
+ auto config = MakeCngConfig();
+ config.num_cng_coefficients = num;
+ CreateCng(std::move(config));
+ }(),
+ "Invalid configuration");
+ }
+};
+
+TEST_F(AudioEncoderCngDeathTest, WrongFrameSize) {
+ CreateCng(MakeCngConfig());
+ num_audio_samples_10ms_ *= 2; // 20 ms frame.
+ RTC_EXPECT_DEATH(Encode(), "");
+ num_audio_samples_10ms_ = 0; // Zero samples.
+ RTC_EXPECT_DEATH(Encode(), "");
+}
+
+TEST_F(AudioEncoderCngDeathTest, WrongNumCoefficientsA) {
+ TryWrongNumCoefficients(-1);
+}
+
+TEST_F(AudioEncoderCngDeathTest, WrongNumCoefficientsB) {
+ TryWrongNumCoefficients(0);
+}
+
+TEST_F(AudioEncoderCngDeathTest, WrongNumCoefficientsC) {
+ TryWrongNumCoefficients(13);
+}
+
+TEST_F(AudioEncoderCngDeathTest, NullSpeechEncoder) {
+ auto config = MakeCngConfig();
+ config.speech_encoder = nullptr;
+ RTC_EXPECT_DEATH(CreateCng(std::move(config)), "");
+}
+
+TEST_F(AudioEncoderCngDeathTest, StereoEncoder) {
+ EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(2));
+ RTC_EXPECT_DEATH(CreateCng(MakeCngConfig()), "Invalid configuration");
+}
+
+TEST_F(AudioEncoderCngDeathTest, StereoConfig) {
+ RTC_EXPECT_DEATH(
+ [&] {
+ auto config = MakeCngConfig();
+ config.num_channels = 2;
+ CreateCng(std::move(config));
+ }(),
+ "Invalid configuration");
+}
+
+TEST_F(AudioEncoderCngDeathTest, EncoderFrameSizeTooLarge) {
+ CreateCng(MakeCngConfig());
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillRepeatedly(Return(7U));
+ for (int i = 0; i < 6; ++i)
+ Encode();
+ RTC_EXPECT_DEATH(
+ Encode(), "Frame size cannot be larger than 60 ms when using VAD/CNG.");
+}
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/cng/cng_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
new file mode 100644
index 0000000000..0e6ab79394
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+enum {
+ kSidShortIntervalUpdate = 1,
+ kSidNormalIntervalUpdate = 100,
+ kSidLongIntervalUpdate = 10000
+};
+
+enum : size_t {
+ kCNGNumParamsLow = 0,
+ kCNGNumParamsNormal = 8,
+ kCNGNumParamsHigh = WEBRTC_CNG_MAX_LPC_ORDER,
+ kCNGNumParamsTooHigh = WEBRTC_CNG_MAX_LPC_ORDER + 1
+};
+
+enum { kNoSid, kForceSid };
+
+class CngTest : public ::testing::Test {
+ protected:
+ virtual void SetUp();
+
+ void TestCngEncode(int sample_rate_hz, int quality);
+
+ int16_t speech_data_[640]; // Max size of CNG internal buffers.
+};
+
+class CngDeathTest : public CngTest {};
+
+void CngTest::SetUp() {
+ FILE* input_file;
+ const std::string file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ input_file = fopen(file_name.c_str(), "rb");
+ ASSERT_TRUE(input_file != NULL);
+ ASSERT_EQ(640, static_cast<int32_t>(
+ fread(speech_data_, sizeof(int16_t), 640, input_file)));
+ fclose(input_file);
+ input_file = NULL;
+}
+
+void CngTest::TestCngEncode(int sample_rate_hz, int quality) {
+ const size_t num_samples_10ms = rtc::CheckedDivExact(sample_rate_hz, 100);
+ rtc::Buffer sid_data;
+
+ ComfortNoiseEncoder cng_encoder(sample_rate_hz, kSidNormalIntervalUpdate,
+ quality);
+ EXPECT_EQ(0U, cng_encoder.Encode(rtc::ArrayView<const int16_t>(
+ speech_data_, num_samples_10ms),
+ kNoSid, &sid_data));
+ EXPECT_EQ(static_cast<size_t>(quality + 1),
+ cng_encoder.Encode(
+ rtc::ArrayView<const int16_t>(speech_data_, num_samples_10ms),
+ kForceSid, &sid_data));
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Create CNG encoder, init with faulty values, free CNG encoder.
+TEST_F(CngDeathTest, CngInitFail) {
+ // Call with too few parameters.
+ EXPECT_DEATH(
+ {
+ ComfortNoiseEncoder(8000, kSidNormalIntervalUpdate, kCNGNumParamsLow);
+ },
+ "");
+ // Call with too many parameters.
+ EXPECT_DEATH(
+ {
+ ComfortNoiseEncoder(8000, kSidNormalIntervalUpdate,
+ kCNGNumParamsTooHigh);
+ },
+ "");
+}
+
+// Encode Cng with too long input vector.
+TEST_F(CngDeathTest, CngEncodeTooLong) {
+ rtc::Buffer sid_data;
+
+ // Create encoder.
+ ComfortNoiseEncoder cng_encoder(8000, kSidNormalIntervalUpdate,
+ kCNGNumParamsNormal);
+ // Run encoder with too much data.
+ EXPECT_DEATH(
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 641),
+ kNoSid, &sid_data),
+ "");
+}
+#endif // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST_F(CngTest, CngEncode8000) {
+ TestCngEncode(8000, kCNGNumParamsNormal);
+}
+
+TEST_F(CngTest, CngEncode16000) {
+ TestCngEncode(16000, kCNGNumParamsNormal);
+}
+
+TEST_F(CngTest, CngEncode32000) {
+ TestCngEncode(32000, kCNGNumParamsHigh);
+}
+
+TEST_F(CngTest, CngEncode48000) {
+ TestCngEncode(48000, kCNGNumParamsNormal);
+}
+
+TEST_F(CngTest, CngEncode64000) {
+ TestCngEncode(64000, kCNGNumParamsNormal);
+}
+
+// Update SID parameters, for both 9 and 16 parameters.
+TEST_F(CngTest, CngUpdateSid) {
+ rtc::Buffer sid_data;
+
+ // Create and initialize encoder and decoder.
+ ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+ kCNGNumParamsNormal);
+ ComfortNoiseDecoder cng_decoder;
+
+ // Run normal Encode and UpdateSid.
+ EXPECT_EQ(kCNGNumParamsNormal + 1,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kForceSid, &sid_data));
+ cng_decoder.UpdateSid(sid_data);
+
+ // Reinit with new length.
+ cng_encoder.Reset(16000, kSidNormalIntervalUpdate, kCNGNumParamsHigh);
+ cng_decoder.Reset();
+
+ // Expect 0 because of unstable parameters after switching length.
+ EXPECT_EQ(0U,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kForceSid, &sid_data));
+ EXPECT_EQ(
+ kCNGNumParamsHigh + 1,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_ + 160, 160),
+ kForceSid, &sid_data));
+ cng_decoder.UpdateSid(
+ rtc::ArrayView<const uint8_t>(sid_data.data(), kCNGNumParamsNormal + 1));
+}
+
+// Update SID parameters, with wrong parameters or without calling decode.
+TEST_F(CngTest, CngUpdateSidErroneous) {
+ rtc::Buffer sid_data;
+
+ // Encode.
+ ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+ kCNGNumParamsNormal);
+ ComfortNoiseDecoder cng_decoder;
+ EXPECT_EQ(kCNGNumParamsNormal + 1,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kForceSid, &sid_data));
+
+ // First run with valid parameters, then with too many CNG parameters.
+ // The function will operate correctly by only reading the maximum number of
+ // parameters, skipping the extra.
+ EXPECT_EQ(kCNGNumParamsNormal + 1, sid_data.size());
+ cng_decoder.UpdateSid(sid_data);
+
+ // Make sure the input buffer is large enough. Since Encode() appends data, we
+ // need to set the size manually only afterwards, or the buffer will be bigger
+ // than anticipated.
+ sid_data.SetSize(kCNGNumParamsTooHigh + 1);
+ cng_decoder.UpdateSid(sid_data);
+}
+
+// Test to generate cng data, by forcing SID. Both normal and faulty condition.
+TEST_F(CngTest, CngGenerate) {
+ rtc::Buffer sid_data;
+ int16_t out_data[640];
+
+ // Create and initialize encoder and decoder.
+ ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+ kCNGNumParamsNormal);
+ ComfortNoiseDecoder cng_decoder;
+
+ // Normal Encode.
+ EXPECT_EQ(kCNGNumParamsNormal + 1,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kForceSid, &sid_data));
+
+ // Normal UpdateSid.
+ cng_decoder.UpdateSid(sid_data);
+
+ // Two normal Generate, one with new_period.
+ EXPECT_TRUE(cng_decoder.Generate(rtc::ArrayView<int16_t>(out_data, 640), 1));
+ EXPECT_TRUE(cng_decoder.Generate(rtc::ArrayView<int16_t>(out_data, 640), 0));
+
+ // Call Genereate with too much data.
+ EXPECT_FALSE(cng_decoder.Generate(rtc::ArrayView<int16_t>(out_data, 641), 0));
+}
+
+// Test automatic SID.
+TEST_F(CngTest, CngAutoSid) {
+ rtc::Buffer sid_data;
+
+ // Create and initialize encoder and decoder.
+ ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+ kCNGNumParamsNormal);
+ ComfortNoiseDecoder cng_decoder;
+
+ // Normal Encode, 100 msec, where no SID data should be generated.
+ for (int i = 0; i < 10; i++) {
+ EXPECT_EQ(
+ 0U, cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kNoSid, &sid_data));
+ }
+
+ // We have reached 100 msec, and SID data should be generated.
+ EXPECT_EQ(kCNGNumParamsNormal + 1,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kNoSid, &sid_data));
+}
+
+// Test automatic SID, with very short interval.
+TEST_F(CngTest, CngAutoSidShort) {
+ rtc::Buffer sid_data;
+
+ // Create and initialize encoder and decoder.
+ ComfortNoiseEncoder cng_encoder(16000, kSidShortIntervalUpdate,
+ kCNGNumParamsNormal);
+ ComfortNoiseDecoder cng_decoder;
+
+ // First call will never generate SID, unless forced to.
+ EXPECT_EQ(0U,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kNoSid, &sid_data));
+
+ // Normal Encode, 100 msec, SID data should be generated all the time.
+ for (int i = 0; i < 10; i++) {
+ EXPECT_EQ(
+ kCNGNumParamsNormal + 1,
+ cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+ kNoSid, &sid_data));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.cc b/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.cc
new file mode 100644
index 0000000000..48f1b8c296
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.cc
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+
+#include <algorithm>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kCngMaxOutsizeOrder = 640;
+
+// TODO(ossu): Rename the left-over WebRtcCng according to style guide.
+void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a);
+
+const int32_t WebRtcCng_kDbov[94] = {
+ 1081109975, 858756178, 682134279, 541838517, 430397633, 341876992,
+ 271562548, 215709799, 171344384, 136103682, 108110997, 85875618,
+ 68213428, 54183852, 43039763, 34187699, 27156255, 21570980,
+ 17134438, 13610368, 10811100, 8587562, 6821343, 5418385,
+ 4303976, 3418770, 2715625, 2157098, 1713444, 1361037,
+ 1081110, 858756, 682134, 541839, 430398, 341877,
+ 271563, 215710, 171344, 136104, 108111, 85876,
+ 68213, 54184, 43040, 34188, 27156, 21571,
+ 17134, 13610, 10811, 8588, 6821, 5418,
+ 4304, 3419, 2716, 2157, 1713, 1361,
+ 1081, 859, 682, 542, 430, 342,
+ 272, 216, 171, 136, 108, 86,
+ 68, 54, 43, 34, 27, 22,
+ 17, 14, 11, 9, 7, 5,
+ 4, 3, 3, 2, 2, 1,
+ 1, 1, 1, 1};
+
+const int16_t WebRtcCng_kCorrWindow[WEBRTC_CNG_MAX_LPC_ORDER] = {
+ 32702, 32636, 32570, 32505, 32439, 32374,
+ 32309, 32244, 32179, 32114, 32049, 31985};
+
+} // namespace
+
+ComfortNoiseDecoder::ComfortNoiseDecoder() {
+ /* Needed to get the right function pointers in SPLIB. */
+ Reset();
+}
+
+void ComfortNoiseDecoder::Reset() {
+ dec_seed_ = 7777; /* For debugging only. */
+ dec_target_energy_ = 0;
+ dec_used_energy_ = 0;
+ for (auto& c : dec_target_reflCoefs_)
+ c = 0;
+ for (auto& c : dec_used_reflCoefs_)
+ c = 0;
+ for (auto& c : dec_filtstate_)
+ c = 0;
+ for (auto& c : dec_filtstateLow_)
+ c = 0;
+ dec_order_ = 5;
+ dec_target_scale_factor_ = 0;
+ dec_used_scale_factor_ = 0;
+}
+
+void ComfortNoiseDecoder::UpdateSid(rtc::ArrayView<const uint8_t> sid) {
+ int16_t refCs[WEBRTC_CNG_MAX_LPC_ORDER];
+ int32_t targetEnergy;
+ size_t length = sid.size();
+ /* Throw away reflection coefficients of higher order than we can handle. */
+ if (length > (WEBRTC_CNG_MAX_LPC_ORDER + 1))
+ length = WEBRTC_CNG_MAX_LPC_ORDER + 1;
+
+ dec_order_ = static_cast<uint16_t>(length - 1);
+
+ uint8_t sid0 = std::min<uint8_t>(sid[0], 93);
+ targetEnergy = WebRtcCng_kDbov[sid0];
+ /* Take down target energy to 75%. */
+ targetEnergy = targetEnergy >> 1;
+ targetEnergy += targetEnergy >> 2;
+
+ dec_target_energy_ = targetEnergy;
+
+ /* Reconstruct coeffs with tweak for WebRtc implementation of RFC3389. */
+ if (dec_order_ == WEBRTC_CNG_MAX_LPC_ORDER) {
+ for (size_t i = 0; i < (dec_order_); i++) {
+ refCs[i] = sid[i + 1] << 8; /* Q7 to Q15*/
+ dec_target_reflCoefs_[i] = refCs[i];
+ }
+ } else {
+ for (size_t i = 0; i < (dec_order_); i++) {
+ refCs[i] = (sid[i + 1] - 127) * (1 << 8); /* Q7 to Q15. */
+ dec_target_reflCoefs_[i] = refCs[i];
+ }
+ }
+
+ for (size_t i = (dec_order_); i < WEBRTC_CNG_MAX_LPC_ORDER; i++) {
+ refCs[i] = 0;
+ dec_target_reflCoefs_[i] = refCs[i];
+ }
+}
+
+bool ComfortNoiseDecoder::Generate(rtc::ArrayView<int16_t> out_data,
+ bool new_period) {
+ int16_t excitation[kCngMaxOutsizeOrder];
+ int16_t low[kCngMaxOutsizeOrder];
+ int16_t lpPoly[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int16_t ReflBetaStd = 26214; /* 0.8 in q15. */
+ int16_t ReflBetaCompStd = 6553; /* 0.2 in q15. */
+ int16_t ReflBetaNewP = 19661; /* 0.6 in q15. */
+ int16_t ReflBetaCompNewP = 13107; /* 0.4 in q15. */
+ int16_t Beta, BetaC; /* These are in Q15. */
+ int32_t targetEnergy;
+ int16_t En;
+ int16_t temp16;
+ const size_t num_samples = out_data.size();
+
+ if (num_samples > kCngMaxOutsizeOrder) {
+ return false;
+ }
+
+ if (new_period) {
+ dec_used_scale_factor_ = dec_target_scale_factor_;
+ Beta = ReflBetaNewP;
+ BetaC = ReflBetaCompNewP;
+ } else {
+ Beta = ReflBetaStd;
+ BetaC = ReflBetaCompStd;
+ }
+
+ /* Calculate new scale factor in Q13 */
+ dec_used_scale_factor_ = rtc::checked_cast<int16_t>(
+ WEBRTC_SPL_MUL_16_16_RSFT(dec_used_scale_factor_, Beta >> 2, 13) +
+ WEBRTC_SPL_MUL_16_16_RSFT(dec_target_scale_factor_, BetaC >> 2, 13));
+
+ dec_used_energy_ = dec_used_energy_ >> 1;
+ dec_used_energy_ += dec_target_energy_ >> 1;
+
+ /* Do the same for the reflection coeffs, albeit in Q15. */
+ for (size_t i = 0; i < WEBRTC_CNG_MAX_LPC_ORDER; i++) {
+ dec_used_reflCoefs_[i] =
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(dec_used_reflCoefs_[i], Beta, 15);
+ dec_used_reflCoefs_[i] +=
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(dec_target_reflCoefs_[i], BetaC, 15);
+ }
+
+ /* Compute the polynomial coefficients. */
+ WebRtcCng_K2a16(dec_used_reflCoefs_, WEBRTC_CNG_MAX_LPC_ORDER, lpPoly);
+
+ targetEnergy = dec_used_energy_;
+
+ /* Calculate scaling factor based on filter energy. */
+ En = 8192; /* 1.0 in Q13. */
+ for (size_t i = 0; i < (WEBRTC_CNG_MAX_LPC_ORDER); i++) {
+ /* Floating point value for reference.
+ E *= 1.0 - (dec_used_reflCoefs_[i] / 32768.0) *
+ (dec_used_reflCoefs_[i] / 32768.0);
+ */
+
+ /* Same in fixed point. */
+ /* K(i).^2 in Q15. */
+ temp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(dec_used_reflCoefs_[i],
+ dec_used_reflCoefs_[i], 15);
+ /* 1 - K(i).^2 in Q15. */
+ temp16 = 0x7fff - temp16;
+ En = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(En, temp16, 15);
+ }
+
+ /* float scaling= sqrt(E * dec_target_energy_ / (1 << 24)); */
+
+ /* Calculate sqrt(En * target_energy / excitation energy) */
+ targetEnergy = WebRtcSpl_Sqrt(dec_used_energy_);
+
+ En = (int16_t)WebRtcSpl_Sqrt(En) << 6;
+ En = (En * 3) >> 1; /* 1.5 estimates sqrt(2). */
+ dec_used_scale_factor_ = (int16_t)((En * targetEnergy) >> 12);
+
+ /* Generate excitation. */
+ /* Excitation energy per sample is 2.^24 - Q13 N(0,1). */
+ for (size_t i = 0; i < num_samples; i++) {
+ excitation[i] = WebRtcSpl_RandN(&dec_seed_) >> 1;
+ }
+
+ /* Scale to correct energy. */
+ WebRtcSpl_ScaleVector(excitation, excitation, dec_used_scale_factor_,
+ num_samples, 13);
+
+ /* `lpPoly` - Coefficients in Q12.
+ * `excitation` - Speech samples.
+ * `nst->dec_filtstate` - State preservation.
+ * `out_data` - Filtered speech samples. */
+ WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation,
+ num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER,
+ dec_filtstateLow_, WEBRTC_CNG_MAX_LPC_ORDER,
+ out_data.data(), low, num_samples);
+
+ return true;
+}
+
+ComfortNoiseEncoder::ComfortNoiseEncoder(int fs, int interval, int quality)
+ : enc_nrOfCoefs_(quality),
+ enc_sampfreq_(fs),
+ enc_interval_(interval),
+ enc_msSinceSid_(0),
+ enc_Energy_(0),
+ enc_reflCoefs_{0},
+ enc_corrVector_{0},
+ enc_seed_(7777) /* For debugging only. */ {
+ RTC_CHECK_GT(quality, 0);
+ RTC_CHECK_LE(quality, WEBRTC_CNG_MAX_LPC_ORDER);
+}
+
+void ComfortNoiseEncoder::Reset(int fs, int interval, int quality) {
+ RTC_CHECK_GT(quality, 0);
+ RTC_CHECK_LE(quality, WEBRTC_CNG_MAX_LPC_ORDER);
+ enc_nrOfCoefs_ = quality;
+ enc_sampfreq_ = fs;
+ enc_interval_ = interval;
+ enc_msSinceSid_ = 0;
+ enc_Energy_ = 0;
+ for (auto& c : enc_reflCoefs_)
+ c = 0;
+ for (auto& c : enc_corrVector_)
+ c = 0;
+ enc_seed_ = 7777; /* For debugging only. */
+}
+
+size_t ComfortNoiseEncoder::Encode(rtc::ArrayView<const int16_t> speech,
+ bool force_sid,
+ rtc::Buffer* output) {
+ int16_t arCoefs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int32_t corrVector[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int16_t refCs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int16_t hanningW[kCngMaxOutsizeOrder];
+ int16_t ReflBeta = 19661; /* 0.6 in q15. */
+ int16_t ReflBetaComp = 13107; /* 0.4 in q15. */
+ int32_t outEnergy;
+ int outShifts;
+ size_t i;
+ int stab;
+ int acorrScale;
+ size_t index;
+ size_t ind, factor;
+ int32_t* bptr;
+ int32_t blo, bhi;
+ int16_t negate;
+ const int16_t* aptr;
+ int16_t speechBuf[kCngMaxOutsizeOrder];
+
+ const size_t num_samples = speech.size();
+ RTC_CHECK_LE(num_samples, kCngMaxOutsizeOrder);
+
+ for (i = 0; i < num_samples; i++) {
+ speechBuf[i] = speech[i];
+ }
+
+ factor = num_samples;
+
+ /* Calculate energy and a coefficients. */
+ outEnergy = WebRtcSpl_Energy(speechBuf, num_samples, &outShifts);
+ while (outShifts > 0) {
+ /* We can only do 5 shifts without destroying accuracy in
+ * division factor. */
+ if (outShifts > 5) {
+ outEnergy <<= (outShifts - 5);
+ outShifts = 5;
+ } else {
+ factor /= 2;
+ outShifts--;
+ }
+ }
+ outEnergy = WebRtcSpl_DivW32W16(outEnergy, (int16_t)factor);
+
+ if (outEnergy > 1) {
+ /* Create Hanning Window. */
+ WebRtcSpl_GetHanningWindow(hanningW, num_samples / 2);
+ for (i = 0; i < (num_samples / 2); i++)
+ hanningW[num_samples - i - 1] = hanningW[i];
+
+ WebRtcSpl_ElementwiseVectorMult(speechBuf, hanningW, speechBuf, num_samples,
+ 14);
+
+ WebRtcSpl_AutoCorrelation(speechBuf, num_samples, enc_nrOfCoefs_,
+ corrVector, &acorrScale);
+
+ if (*corrVector == 0)
+ *corrVector = WEBRTC_SPL_WORD16_MAX;
+
+ /* Adds the bandwidth expansion. */
+ aptr = WebRtcCng_kCorrWindow;
+ bptr = corrVector;
+
+ /* (zzz) lpc16_1 = 17+1+820+2+2 = 842 (ordo2=700). */
+ for (ind = 0; ind < enc_nrOfCoefs_; ind++) {
+ /* The below code multiplies the 16 b corrWindow values (Q15) with
+ * the 32 b corrvector (Q0) and shifts the result down 15 steps. */
+ negate = *bptr < 0;
+ if (negate)
+ *bptr = -*bptr;
+
+ blo = (int32_t)*aptr * (*bptr & 0xffff);
+ bhi = ((blo >> 16) & 0xffff) +
+ ((int32_t)(*aptr++) * ((*bptr >> 16) & 0xffff));
+ blo = (blo & 0xffff) | ((bhi & 0xffff) << 16);
+
+ *bptr = (((bhi >> 16) & 0x7fff) << 17) | ((uint32_t)blo >> 15);
+ if (negate)
+ *bptr = -*bptr;
+ bptr++;
+ }
+ /* End of bandwidth expansion. */
+
+ stab = WebRtcSpl_LevinsonDurbin(corrVector, arCoefs, refCs, enc_nrOfCoefs_);
+
+ if (!stab) {
+ /* Disregard from this frame */
+ return 0;
+ }
+
+ } else {
+ for (i = 0; i < enc_nrOfCoefs_; i++)
+ refCs[i] = 0;
+ }
+
+ if (force_sid) {
+ /* Read instantaneous values instead of averaged. */
+ for (i = 0; i < enc_nrOfCoefs_; i++)
+ enc_reflCoefs_[i] = refCs[i];
+ enc_Energy_ = outEnergy;
+ } else {
+ /* Average history with new values. */
+ for (i = 0; i < enc_nrOfCoefs_; i++) {
+ enc_reflCoefs_[i] =
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(enc_reflCoefs_[i], ReflBeta, 15);
+ enc_reflCoefs_[i] +=
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(refCs[i], ReflBetaComp, 15);
+ }
+ enc_Energy_ = (outEnergy >> 2) + (enc_Energy_ >> 1) + (enc_Energy_ >> 2);
+ }
+
+ if (enc_Energy_ < 1) {
+ enc_Energy_ = 1;
+ }
+
+ if ((enc_msSinceSid_ > (enc_interval_ - 1)) || force_sid) {
+ /* Search for best dbov value. */
+ index = 0;
+ for (i = 1; i < 93; i++) {
+ /* Always round downwards. */
+ if ((enc_Energy_ - WebRtcCng_kDbov[i]) > 0) {
+ index = i;
+ break;
+ }
+ }
+ if ((i == 93) && (index == 0))
+ index = 94;
+
+ const size_t output_coefs = enc_nrOfCoefs_ + 1;
+ output->AppendData(output_coefs, [&](rtc::ArrayView<uint8_t> output) {
+ output[0] = (uint8_t)index;
+
+ /* Quantize coefficients with tweak for WebRtc implementation of
+ * RFC3389. */
+ if (enc_nrOfCoefs_ == WEBRTC_CNG_MAX_LPC_ORDER) {
+ for (i = 0; i < enc_nrOfCoefs_; i++) {
+ /* Q15 to Q7 with rounding. */
+ output[i + 1] = ((enc_reflCoefs_[i] + 128) >> 8);
+ }
+ } else {
+ for (i = 0; i < enc_nrOfCoefs_; i++) {
+ /* Q15 to Q7 with rounding. */
+ output[i + 1] = (127 + ((enc_reflCoefs_[i] + 128) >> 8));
+ }
+ }
+
+ return output_coefs;
+ });
+
+ enc_msSinceSid_ =
+ static_cast<int16_t>((1000 * num_samples) / enc_sampfreq_);
+ return output_coefs;
+ } else {
+ enc_msSinceSid_ +=
+ static_cast<int16_t>((1000 * num_samples) / enc_sampfreq_);
+ return 0;
+ }
+}
+
+namespace {
+/* Values in `k` are Q15, and `a` Q12. */
+void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a) {
+ int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1];
+ int16_t* aptr;
+ int16_t* aptr2;
+ int16_t* anyptr;
+ const int16_t* kptr;
+ int m, i;
+
+ kptr = k;
+ *a = 4096; /* i.e., (Word16_MAX >> 3) + 1 */
+ *any = *a;
+ a[1] = (*k + 4) >> 3;
+ for (m = 1; m < useOrder; m++) {
+ kptr++;
+ aptr = a;
+ aptr++;
+ aptr2 = &a[m];
+ anyptr = any;
+ anyptr++;
+
+ any[m + 1] = (*kptr + 4) >> 3;
+ for (i = 0; i < m; i++) {
+ *anyptr++ =
+ (*aptr++) +
+ (int16_t)((((int32_t)(*aptr2--) * (int32_t)*kptr) + 16384) >> 15);
+ }
+
+ aptr = a;
+ anyptr = any;
+ for (i = 0; i < (m + 2); i++) {
+ *aptr++ = *anyptr++;
+ }
+ }
+}
+
+} // namespace
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.h b/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.h
new file mode 100644
index 0000000000..7afd243f81
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
+#define MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
+
+#include <stdint.h>
+
+#include <cstddef>
+
+#include "api/array_view.h"
+#include "rtc_base/buffer.h"
+
+#define WEBRTC_CNG_MAX_LPC_ORDER 12
+
+namespace webrtc {
+
+class ComfortNoiseDecoder {
+ public:
+ ComfortNoiseDecoder();
+ ~ComfortNoiseDecoder() = default;
+
+ ComfortNoiseDecoder(const ComfortNoiseDecoder&) = delete;
+ ComfortNoiseDecoder& operator=(const ComfortNoiseDecoder&) = delete;
+
+ void Reset();
+
+ // Updates the CN state when a new SID packet arrives.
+ // `sid` is a view of the SID packet without the headers.
+ void UpdateSid(rtc::ArrayView<const uint8_t> sid);
+
+ // Generates comfort noise.
+ // `out_data` will be filled with samples - its size determines the number of
+ // samples generated. When `new_period` is true, CNG history will be reset
+ // before any audio is generated. Returns `false` if outData is too large -
+ // currently 640 bytes (equalling 10ms at 64kHz).
+ // TODO(ossu): Specify better limits for the size of out_data. Either let it
+ // be unbounded or limit to 10ms in the current sample rate.
+ bool Generate(rtc::ArrayView<int16_t> out_data, bool new_period);
+
+ private:
+ uint32_t dec_seed_;
+ int32_t dec_target_energy_;
+ int32_t dec_used_energy_;
+ int16_t dec_target_reflCoefs_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int16_t dec_used_reflCoefs_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int16_t dec_filtstate_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int16_t dec_filtstateLow_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ uint16_t dec_order_;
+ int16_t dec_target_scale_factor_; /* Q29 */
+ int16_t dec_used_scale_factor_; /* Q29 */
+};
+
+class ComfortNoiseEncoder {
+ public:
+ // Creates a comfort noise encoder.
+ // `fs` selects sample rate: 8000 for narrowband or 16000 for wideband.
+ // `interval` sets the interval at which to generate SID data (in ms).
+ // `quality` selects the number of refl. coeffs. Maximum allowed is 12.
+ ComfortNoiseEncoder(int fs, int interval, int quality);
+ ~ComfortNoiseEncoder() = default;
+
+ ComfortNoiseEncoder(const ComfortNoiseEncoder&) = delete;
+ ComfortNoiseEncoder& operator=(const ComfortNoiseEncoder&) = delete;
+
+ // Resets the comfort noise encoder to its initial state.
+ // Parameters are set as during construction.
+ void Reset(int fs, int interval, int quality);
+
+ // Analyzes background noise from `speech` and appends coefficients to
+ // `output`. Returns the number of coefficients generated. If `force_sid` is
+ // true, a SID frame is forced and the internal sid interval counter is reset.
+ // Will fail if the input size is too large (> 640 samples, see
+ // ComfortNoiseDecoder::Generate).
+ size_t Encode(rtc::ArrayView<const int16_t> speech,
+ bool force_sid,
+ rtc::Buffer* output);
+
+ private:
+ size_t enc_nrOfCoefs_;
+ int enc_sampfreq_;
+ int16_t enc_interval_;
+ int16_t enc_msSinceSid_;
+ int32_t enc_Energy_;
+ int16_t enc_reflCoefs_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ int32_t enc_corrVector_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+ uint32_t enc_seed_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
new file mode 100644
index 0000000000..46ac671b30
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+
+#include <utility>
+
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+
+namespace webrtc {
+
+void AudioDecoderPcmU::Reset() {}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderPcmU::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ return LegacyEncodedAudioFrame::SplitBySamples(
+ this, std::move(payload), timestamp, 8 * num_channels_, 8);
+}
+
+int AudioDecoderPcmU::SampleRateHz() const {
+ return 8000;
+}
+
+size_t AudioDecoderPcmU::Channels() const {
+ return num_channels_;
+}
+
+int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+ // Adjust the encoded length down to ensure the same number of samples in each
+ // channel.
+ const size_t encoded_len_adjusted =
+ PacketDuration(encoded, encoded_len) *
+ Channels(); // 1 byte per sample per channel
+ int16_t temp_type = 1; // Default is speech.
+ size_t ret =
+ WebRtcG711_DecodeU(encoded, encoded_len_adjusted, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return static_cast<int>(ret);
+}
+
+int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // One encoded byte per sample per channel.
+ return static_cast<int>(encoded_len / Channels());
+}
+
+void AudioDecoderPcmA::Reset() {}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderPcmA::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ return LegacyEncodedAudioFrame::SplitBySamples(
+ this, std::move(payload), timestamp, 8 * num_channels_, 8);
+}
+
+int AudioDecoderPcmA::SampleRateHz() const {
+ return 8000;
+}
+
+size_t AudioDecoderPcmA::Channels() const {
+ return num_channels_;
+}
+
+int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+ // Adjust the encoded length down to ensure the same number of samples in each
+ // channel.
+ const size_t encoded_len_adjusted =
+ PacketDuration(encoded, encoded_len) *
+ Channels(); // 1 byte per sample per channel
+ int16_t temp_type = 1; // Default is speech.
+ size_t ret =
+ WebRtcG711_DecodeA(encoded, encoded_len_adjusted, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return static_cast<int>(ret);
+}
+
+int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // One encoded byte per sample per channel.
+ return static_cast<int>(encoded_len / Channels());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
new file mode 100644
index 0000000000..3fa42cba30
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
+#define MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class AudioDecoderPcmU final : public AudioDecoder {
+ public:
+ explicit AudioDecoderPcmU(size_t num_channels) : num_channels_(num_channels) {
+ RTC_DCHECK_GE(num_channels, 1);
+ }
+
+ AudioDecoderPcmU(const AudioDecoderPcmU&) = delete;
+ AudioDecoderPcmU& operator=(const AudioDecoderPcmU&) = delete;
+
+ void Reset() override;
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ const size_t num_channels_;
+};
+
+class AudioDecoderPcmA final : public AudioDecoder {
+ public:
+ explicit AudioDecoderPcmA(size_t num_channels) : num_channels_(num_channels) {
+ RTC_DCHECK_GE(num_channels, 1);
+ }
+
+ AudioDecoderPcmA(const AudioDecoderPcmA&) = delete;
+ AudioDecoderPcmA& operator=(const AudioDecoderPcmA&) = delete;
+
+ void Reset() override;
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ const size_t num_channels_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
new file mode 100644
index 0000000000..65e2da479d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+
+#include <cstdint>
+
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+bool AudioEncoderPcm::Config::IsOk() const {
+ return (frame_size_ms % 10 == 0) && (num_channels >= 1);
+}
+
+AudioEncoderPcm::AudioEncoderPcm(const Config& config, int sample_rate_hz)
+ : sample_rate_hz_(sample_rate_hz),
+ num_channels_(config.num_channels),
+ payload_type_(config.payload_type),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
+ full_frame_samples_(config.num_channels * config.frame_size_ms *
+ sample_rate_hz / 1000),
+ first_timestamp_in_buffer_(0) {
+ RTC_CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
+ RTC_CHECK_EQ(config.frame_size_ms % 10, 0)
+ << "Frame size must be an integer multiple of 10 ms.";
+ speech_buffer_.reserve(full_frame_samples_);
+}
+
+AudioEncoderPcm::~AudioEncoderPcm() = default;
+
+int AudioEncoderPcm::SampleRateHz() const {
+ return sample_rate_hz_;
+}
+
+size_t AudioEncoderPcm::NumChannels() const {
+ return num_channels_;
+}
+
+size_t AudioEncoderPcm::Num10MsFramesInNextPacket() const {
+ return num_10ms_frames_per_packet_;
+}
+
+size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
+ return num_10ms_frames_per_packet_;
+}
+
+int AudioEncoderPcm::GetTargetBitrate() const {
+ return static_cast<int>(8 * BytesPerSample() * SampleRateHz() *
+ NumChannels());
+}
+
+AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ if (speech_buffer_.empty()) {
+ first_timestamp_in_buffer_ = rtp_timestamp;
+ }
+ speech_buffer_.insert(speech_buffer_.end(), audio.begin(), audio.end());
+ if (speech_buffer_.size() < full_frame_samples_) {
+ return EncodedInfo();
+ }
+ RTC_CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
+ EncodedInfo info;
+ info.encoded_timestamp = first_timestamp_in_buffer_;
+ info.payload_type = payload_type_;
+ info.encoded_bytes = encoded->AppendData(
+ full_frame_samples_ * BytesPerSample(),
+ [&](rtc::ArrayView<uint8_t> encoded) {
+ return EncodeCall(&speech_buffer_[0], full_frame_samples_,
+ encoded.data());
+ });
+ speech_buffer_.clear();
+ info.encoder_type = GetCodecType();
+ return info;
+}
+
+void AudioEncoderPcm::Reset() {
+ speech_buffer_.clear();
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderPcm::GetFrameLengthRange() const {
+ return {{TimeDelta::Millis(num_10ms_frames_per_packet_ * 10),
+ TimeDelta::Millis(num_10ms_frames_per_packet_ * 10)}};
+}
+
+size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcG711_EncodeA(audio, input_len, encoded);
+}
+
+size_t AudioEncoderPcmA::BytesPerSample() const {
+ return 1;
+}
+
+AudioEncoder::CodecType AudioEncoderPcmA::GetCodecType() const {
+ return AudioEncoder::CodecType::kPcmA;
+}
+
+size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcG711_EncodeU(audio, input_len, encoded);
+}
+
+size_t AudioEncoderPcmU::BytesPerSample() const {
+ return 1;
+}
+
+AudioEncoder::CodecType AudioEncoderPcmU::GetCodecType() const {
+ return AudioEncoder::CodecType::kPcmU;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
new file mode 100644
index 0000000000..d50be4b457
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
+#define MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
+
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+class AudioEncoderPcm : public AudioEncoder {
+ public:
+ struct Config {
+ public:
+ bool IsOk() const;
+
+ int frame_size_ms;
+ size_t num_channels;
+ int payload_type;
+
+ protected:
+ explicit Config(int pt)
+ : frame_size_ms(20), num_channels(1), payload_type(pt) {}
+ };
+
+ ~AudioEncoderPcm() override;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+ void Reset() override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+
+ protected:
+ AudioEncoderPcm(const Config& config, int sample_rate_hz);
+
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+
+ virtual size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) = 0;
+
+ virtual size_t BytesPerSample() const = 0;
+
+ // Used to set EncodedInfoLeaf::encoder_type in
+ // AudioEncoderPcm::EncodeImpl
+ virtual AudioEncoder::CodecType GetCodecType() const = 0;
+
+ private:
+ const int sample_rate_hz_;
+ const size_t num_channels_;
+ const int payload_type_;
+ const size_t num_10ms_frames_per_packet_;
+ const size_t full_frame_samples_;
+ std::vector<int16_t> speech_buffer_;
+ uint32_t first_timestamp_in_buffer_;
+};
+
+class AudioEncoderPcmA final : public AudioEncoderPcm {
+ public:
+ struct Config : public AudioEncoderPcm::Config {
+ Config() : AudioEncoderPcm::Config(8) {}
+ };
+
+ explicit AudioEncoderPcmA(const Config& config)
+ : AudioEncoderPcm(config, kSampleRateHz) {}
+
+ AudioEncoderPcmA(const AudioEncoderPcmA&) = delete;
+ AudioEncoderPcmA& operator=(const AudioEncoderPcmA&) = delete;
+
+ protected:
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
+
+ size_t BytesPerSample() const override;
+
+ AudioEncoder::CodecType GetCodecType() const override;
+
+ private:
+ static const int kSampleRateHz = 8000;
+};
+
+class AudioEncoderPcmU final : public AudioEncoderPcm {
+ public:
+ struct Config : public AudioEncoderPcm::Config {
+ Config() : AudioEncoderPcm::Config(0) {}
+ };
+
+ explicit AudioEncoderPcmU(const Config& config)
+ : AudioEncoderPcm(config, kSampleRateHz) {}
+
+ AudioEncoderPcmU(const AudioEncoderPcmU&) = delete;
+ AudioEncoderPcmU& operator=(const AudioEncoderPcmU&) = delete;
+
+ protected:
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
+
+ size_t BytesPerSample() const override;
+
+ AudioEncoder::CodecType GetCodecType() const override;
+
+ private:
+ static const int kSampleRateHz = 8000;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.c b/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.c
new file mode 100644
index 0000000000..5fe1692ccb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "modules/third_party/g711/g711.h"
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t n;
+ for (n = 0; n < len; n++)
+ encoded[n] = linear_to_alaw(speechIn[n]);
+ return len;
+}
+
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t n;
+ for (n = 0; n < len; n++)
+ encoded[n] = linear_to_ulaw(speechIn[n]);
+ return len;
+}
+
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType) {
+ size_t n;
+ for (n = 0; n < len; n++)
+ decoded[n] = alaw_to_linear(encoded[n]);
+ *speechType = 1;
+ return len;
+}
+
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType) {
+ size_t n;
+ for (n = 0; n < len; n++)
+ decoded[n] = ulaw_to_linear(encoded[n]);
+ *speechType = 1;
+ return len;
+}
+
+int16_t WebRtcG711_Version(char* version, int16_t lenBytes) {
+ strncpy(version, "2.0.0", lenBytes);
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.h b/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.h
new file mode 100644
index 0000000000..83f9d378ed
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
+
+#include <stdint.h>
+
+// Comfort noise constants
+#define G711_WEBRTC_SPEECH 1
+#define G711_WEBRTC_CNG 2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcG711_EncodeA(...)
+ *
+ * This function encodes a G711 A-law frame and inserts it into a packet.
+ * Input speech length has be of any length.
+ *
+ * Input:
+ * - speechIn : Input speech vector
+ * - len : Samples in speechIn
+ *
+ * Output:
+ * - encoded : The encoded data vector
+ *
+ * Return value : Length (in bytes) of coded data.
+ * Always equal to len input parameter.
+ */
+
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcG711_EncodeU(...)
+ *
+ * This function encodes a G711 U-law frame and inserts it into a packet.
+ * Input speech length has be of any length.
+ *
+ * Input:
+ * - speechIn : Input speech vector
+ * - len : Samples in speechIn
+ *
+ * Output:
+ * - encoded : The encoded data vector
+ *
+ * Return value : Length (in bytes) of coded data.
+ * Always equal to len input parameter.
+ */
+
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcG711_DecodeA(...)
+ *
+ * This function decodes a packet G711 A-law frame.
+ *
+ * Input:
+ * - encoded : Encoded data
+ * - len : Bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ * - speechType : 1 normal, 2 CNG (for G711 it should
+ * always return 1 since G711 does not have a
+ * built-in DTX/CNG scheme)
+ *
+ * Return value : >0 - Samples in decoded vector
+ * -1 - Error
+ */
+
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/****************************************************************************
+ * WebRtcG711_DecodeU(...)
+ *
+ * This function decodes a packet G711 U-law frame.
+ *
+ * Input:
+ * - encoded : Encoded data
+ * - len : Bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ * - speechType : 1 normal, 2 CNG (for G711 it should
+ * always return 1 since G711 does not have a
+ * built-in DTX/CNG scheme)
+ *
+ * Return value : >0 - Samples in decoded vector
+ * -1 - Error
+ */
+
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/**********************************************************************
+ * WebRtcG711_Version(...)
+ *
+ * This function gives the version string of the G.711 codec.
+ *
+ * Input:
+ * - lenBytes: the size of Allocated space (in Bytes) where
+ * the version number is written to (in string format).
+ *
+ * Output:
+ * - version: Pointer to a buffer where the version number is
+ * written to.
+ *
+ */
+
+int16_t WebRtcG711_Version(char* version, int16_t lenBytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/test/testG711.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g711/test/testG711.cc
new file mode 100644
index 0000000000..f3a42f5d79
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/test/testG711.cc
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * testG711.cpp : Defines the entry point for the console application.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* include API */
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+
+/* Runtime statistics */
+#include <time.h>
+#define CLOCKS_PER_SEC_G711 1000
+
+/* function for reading audio data from PCM file */
+bool readframe(int16_t* data, FILE* inp, size_t length) {
+ size_t rlen = fread(data, sizeof(int16_t), length, inp);
+ if (rlen >= length)
+ return false;
+ memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
+ return true;
+}
+
+int main(int argc, char* argv[]) {
+ char inname[80], outname[40], bitname[40];
+ FILE* inp;
+ FILE* outp;
+ FILE* bitp = NULL;
+ int framecnt;
+ bool endfile;
+
+ size_t framelength = 80;
+
+ /* Runtime statistics */
+ double starttime;
+ double runtime;
+ double length_file;
+
+ size_t stream_len = 0;
+ int16_t shortdata[480];
+ int16_t decoded[480];
+ uint8_t streamdata[1000];
+ int16_t speechType[1];
+ char law[2];
+ char versionNumber[40];
+
+ /* handling wrong input arguments in the command line */
+ if ((argc != 5) && (argc != 6)) {
+ printf("\n\nWrong number of arguments or flag values.\n\n");
+
+ printf("\n");
+ printf("\nG.711 test application\n\n");
+ printf("Usage:\n\n");
+ printf("./testG711.exe framelength law infile outfile \n\n");
+ printf("framelength: Framelength in samples.\n");
+ printf("law : Coding law, A och u.\n");
+ printf("infile : Normal speech input file\n");
+ printf("outfile : Speech output file\n\n");
+ printf("outbits : Output bitstream file [optional]\n\n");
+ exit(0);
+ }
+
+ /* Get version and print */
+ WebRtcG711_Version(versionNumber, 40);
+
+ printf("-----------------------------------\n");
+ printf("G.711 version: %s\n\n", versionNumber);
+ /* Get frame length */
+ int framelength_int = atoi(argv[1]);
+ if (framelength_int < 0) {
+ printf(" G.722: Invalid framelength %d.\n", framelength_int);
+ exit(1);
+ }
+ framelength = static_cast<size_t>(framelength_int);
+
+ /* Get compression law */
+ strcpy(law, argv[2]);
+
+ /* Get Input and Output files */
+ sscanf(argv[3], "%s", inname);
+ sscanf(argv[4], "%s", outname);
+ if (argc == 6) {
+ sscanf(argv[5], "%s", bitname);
+ if ((bitp = fopen(bitname, "wb")) == NULL) {
+ printf(" G.711: Cannot read file %s.\n", bitname);
+ exit(1);
+ }
+ }
+
+ if ((inp = fopen(inname, "rb")) == NULL) {
+ printf(" G.711: Cannot read file %s.\n", inname);
+ exit(1);
+ }
+ if ((outp = fopen(outname, "wb")) == NULL) {
+ printf(" G.711: Cannot write file %s.\n", outname);
+ exit(1);
+ }
+ printf("\nInput: %s\nOutput: %s\n", inname, outname);
+ if (argc == 6) {
+ printf("\nBitfile: %s\n", bitname);
+ }
+
+ starttime = clock() / (double)CLOCKS_PER_SEC_G711; /* Runtime statistics */
+
+ /* Initialize encoder and decoder */
+ framecnt = 0;
+ endfile = false;
+ while (!endfile) {
+ framecnt++;
+ /* Read speech block */
+ endfile = readframe(shortdata, inp, framelength);
+
+ /* G.711 encoding */
+ if (!strcmp(law, "A")) {
+ /* A-law encoding */
+ stream_len = WebRtcG711_EncodeA(shortdata, framelength, streamdata);
+ if (argc == 6) {
+ /* Write bits to file */
+ if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
+ stream_len) {
+ return -1;
+ }
+ }
+ WebRtcG711_DecodeA(streamdata, stream_len, decoded, speechType);
+ } else if (!strcmp(law, "u")) {
+ /* u-law encoding */
+ stream_len = WebRtcG711_EncodeU(shortdata, framelength, streamdata);
+ if (argc == 6) {
+ /* Write bits to file */
+ if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
+ stream_len) {
+ return -1;
+ }
+ }
+ WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
+ } else {
+ printf("Wrong law mode\n");
+ exit(1);
+ }
+ /* Write coded speech to file */
+ if (fwrite(decoded, sizeof(short), framelength, outp) != framelength) {
+ return -1;
+ }
+ }
+
+ runtime = (double)(clock() / (double)CLOCKS_PER_SEC_G711 - starttime);
+ length_file = ((double)framecnt * (double)framelength / 8000);
+ printf("\n\nLength of speech file: %.1f s\n", length_file);
+ printf("Time to run G.711: %.2f s (%.2f %% of realtime)\n\n", runtime,
+ (100 * runtime / length_file));
+ printf("---------------------END----------------------\n");
+
+ fclose(inp);
+ fclose(outp);
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
new file mode 100644
index 0000000000..1ecc9bc3d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioDecoderG722Impl::AudioDecoderG722Impl() {
+ WebRtcG722_CreateDecoder(&dec_state_);
+ WebRtcG722_DecoderInit(dec_state_);
+}
+
+AudioDecoderG722Impl::~AudioDecoderG722Impl() {
+ WebRtcG722_FreeDecoder(dec_state_);
+}
+
+bool AudioDecoderG722Impl::HasDecodePlc() const {
+ return false;
+}
+
+int AudioDecoderG722Impl::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+ int16_t temp_type = 1; // Default is speech.
+ size_t ret =
+ WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return static_cast<int>(ret);
+}
+
+void AudioDecoderG722Impl::Reset() {
+ WebRtcG722_DecoderInit(dec_state_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderG722Impl::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ return LegacyEncodedAudioFrame::SplitBySamples(this, std::move(payload),
+ timestamp, 8, 16);
+}
+
+int AudioDecoderG722Impl::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // 1/2 encoded byte per sample per channel.
+ return static_cast<int>(2 * encoded_len / Channels());
+}
+
+int AudioDecoderG722Impl::SampleRateHz() const {
+ return 16000;
+}
+
+size_t AudioDecoderG722Impl::Channels() const {
+ return 1;
+}
+
+AudioDecoderG722StereoImpl::AudioDecoderG722StereoImpl() {
+ WebRtcG722_CreateDecoder(&dec_state_left_);
+ WebRtcG722_CreateDecoder(&dec_state_right_);
+ WebRtcG722_DecoderInit(dec_state_left_);
+ WebRtcG722_DecoderInit(dec_state_right_);
+}
+
+AudioDecoderG722StereoImpl::~AudioDecoderG722StereoImpl() {
+ WebRtcG722_FreeDecoder(dec_state_left_);
+ WebRtcG722_FreeDecoder(dec_state_right_);
+}
+
+int AudioDecoderG722StereoImpl::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+ // Adjust the encoded length down to ensure the same number of samples in each
+ // channel.
+ const size_t encoded_len_adjusted = PacketDuration(encoded, encoded_len) *
+ Channels() /
+ 2; // 1/2 byte per sample per channel
+ int16_t temp_type = 1; // Default is speech.
+ // De-interleave the bit-stream into two separate payloads.
+ uint8_t* encoded_deinterleaved = new uint8_t[encoded_len_adjusted];
+ SplitStereoPacket(encoded, encoded_len_adjusted, encoded_deinterleaved);
+ // Decode left and right.
+ size_t decoded_len =
+ WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
+ encoded_len_adjusted / 2, decoded, &temp_type);
+ size_t ret = WebRtcG722_Decode(
+ dec_state_right_, &encoded_deinterleaved[encoded_len_adjusted / 2],
+ encoded_len_adjusted / 2, &decoded[decoded_len], &temp_type);
+ if (ret == decoded_len) {
+ ret += decoded_len; // Return total number of samples.
+ // Interleave output.
+ for (size_t k = ret / 2; k < ret; k++) {
+ int16_t temp = decoded[k];
+ memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
+ (ret - k - 1) * sizeof(int16_t));
+ decoded[2 * k - ret + 1] = temp;
+ }
+ }
+ *speech_type = ConvertSpeechType(temp_type);
+ delete[] encoded_deinterleaved;
+ return static_cast<int>(ret);
+}
+
+int AudioDecoderG722StereoImpl::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // 1/2 encoded byte per sample per channel. Make sure the length represents
+ // an equal number of bytes per channel. Otherwise, we cannot de-interleave
+ // the encoded data later.
+ return static_cast<int>(2 * (encoded_len / Channels()));
+}
+
+int AudioDecoderG722StereoImpl::SampleRateHz() const {
+ return 16000;
+}
+
+size_t AudioDecoderG722StereoImpl::Channels() const {
+ return 2;
+}
+
+void AudioDecoderG722StereoImpl::Reset() {
+ WebRtcG722_DecoderInit(dec_state_left_);
+ WebRtcG722_DecoderInit(dec_state_right_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderG722StereoImpl::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ return LegacyEncodedAudioFrame::SplitBySamples(this, std::move(payload),
+ timestamp, 2 * 8, 16);
+}
+
+// Split the stereo packet and place left and right channel after each other
+// in the output array.
+void AudioDecoderG722StereoImpl::SplitStereoPacket(
+ const uint8_t* encoded,
+ size_t encoded_len,
+ uint8_t* encoded_deinterleaved) {
+ // Regroup the 4 bits/sample so |l1 l2| |r1 r2| |l3 l4| |r3 r4| ...,
+ // where "lx" is 4 bits representing left sample number x, and "rx" right
+ // sample. Two samples fit in one byte, represented with |...|.
+ for (size_t i = 0; i + 1 < encoded_len; i += 2) {
+ uint8_t right_byte = ((encoded[i] & 0x0F) << 4) + (encoded[i + 1] & 0x0F);
+ encoded_deinterleaved[i] = (encoded[i] & 0xF0) + (encoded[i + 1] >> 4);
+ encoded_deinterleaved[i + 1] = right_byte;
+ }
+
+ // Move one byte representing right channel each loop, and place it at the
+ // end of the bytestream vector. After looping the data is reordered to:
+ // |l1 l2| |l3 l4| ... |l(N-1) lN| |r1 r2| |r3 r4| ... |r(N-1) r(N)|,
+ // where N is the total number of samples.
+ for (size_t i = 0; i < encoded_len / 2; i++) {
+ uint8_t right_byte = encoded_deinterleaved[i + 1];
+ memmove(&encoded_deinterleaved[i + 1], &encoded_deinterleaved[i + 2],
+ encoded_len - i - 2);
+ encoded_deinterleaved[encoded_len - 1] = right_byte;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
new file mode 100644
index 0000000000..5872fad5de
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
+#define MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+
+typedef struct WebRtcG722DecInst G722DecInst;
+
+namespace webrtc {
+
+class AudioDecoderG722Impl final : public AudioDecoder {
+ public:
+ AudioDecoderG722Impl();
+ ~AudioDecoderG722Impl() override;
+
+ AudioDecoderG722Impl(const AudioDecoderG722Impl&) = delete;
+ AudioDecoderG722Impl& operator=(const AudioDecoderG722Impl&) = delete;
+
+ bool HasDecodePlc() const override;
+ void Reset() override;
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ G722DecInst* dec_state_;
+};
+
+class AudioDecoderG722StereoImpl final : public AudioDecoder {
+ public:
+ AudioDecoderG722StereoImpl();
+ ~AudioDecoderG722StereoImpl() override;
+
+ AudioDecoderG722StereoImpl(const AudioDecoderG722StereoImpl&) = delete;
+ AudioDecoderG722StereoImpl& operator=(const AudioDecoderG722StereoImpl&) =
+ delete;
+
+ void Reset() override;
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ int SampleRateHz() const override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ // Splits the stereo-interleaved payload in `encoded` into separate payloads
+ // for left and right channels. The separated payloads are written to
+ // `encoded_deinterleaved`, which must hold at least `encoded_len` samples.
+ // The left channel starts at offset 0, while the right channel starts at
+ // offset encoded_len / 2 into `encoded_deinterleaved`.
+ void SplitStereoPacket(const uint8_t* encoded,
+ size_t encoded_len,
+ uint8_t* encoded_deinterleaved);
+
+ G722DecInst* dec_state_left_;
+ G722DecInst* dec_state_right_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
new file mode 100644
index 0000000000..b7d34ba581
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+
+#include <cstdint>
+
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kSampleRateHz = 16000;
+
+} // namespace
+
+AudioEncoderG722Impl::AudioEncoderG722Impl(const AudioEncoderG722Config& config,
+ int payload_type)
+ : num_channels_(config.num_channels),
+ payload_type_(payload_type),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
+ num_10ms_frames_buffered_(0),
+ first_timestamp_in_buffer_(0),
+ encoders_(new EncoderState[num_channels_]),
+ interleave_buffer_(2 * num_channels_) {
+ RTC_CHECK(config.IsOk());
+ const size_t samples_per_channel =
+ kSampleRateHz / 100 * num_10ms_frames_per_packet_;
+ for (size_t i = 0; i < num_channels_; ++i) {
+ encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
+ encoders_[i].encoded_buffer.SetSize(samples_per_channel / 2);
+ }
+ Reset();
+}
+
+AudioEncoderG722Impl::~AudioEncoderG722Impl() = default;
+
+int AudioEncoderG722Impl::SampleRateHz() const {
+ return kSampleRateHz;
+}
+
+size_t AudioEncoderG722Impl::NumChannels() const {
+ return num_channels_;
+}
+
+int AudioEncoderG722Impl::RtpTimestampRateHz() const {
+ // The RTP timestamp rate for G.722 is 8000 Hz, even though it is a 16 kHz
+ // codec.
+ return kSampleRateHz / 2;
+}
+
+size_t AudioEncoderG722Impl::Num10MsFramesInNextPacket() const {
+ return num_10ms_frames_per_packet_;
+}
+
+size_t AudioEncoderG722Impl::Max10MsFramesInAPacket() const {
+ return num_10ms_frames_per_packet_;
+}
+
+int AudioEncoderG722Impl::GetTargetBitrate() const {
+ // 4 bits/sample, 16000 samples/s/channel.
+ return static_cast<int>(64000 * NumChannels());
+}
+
+void AudioEncoderG722Impl::Reset() {
+ num_10ms_frames_buffered_ = 0;
+ for (size_t i = 0; i < num_channels_; ++i)
+ RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderG722Impl::GetFrameLengthRange() const {
+ return {{TimeDelta::Millis(num_10ms_frames_per_packet_ * 10),
+ TimeDelta::Millis(num_10ms_frames_per_packet_ * 10)}};
+}
+
+AudioEncoder::EncodedInfo AudioEncoderG722Impl::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ if (num_10ms_frames_buffered_ == 0)
+ first_timestamp_in_buffer_ = rtp_timestamp;
+
+ // Deinterleave samples and save them in each channel's buffer.
+ const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
+ for (size_t i = 0; i < kSampleRateHz / 100; ++i)
+ for (size_t j = 0; j < num_channels_; ++j)
+ encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
+
+ // If we don't yet have enough samples for a packet, we're done for now.
+ if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
+ return EncodedInfo();
+ }
+
+ // Encode each channel separately.
+ RTC_CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+ num_10ms_frames_buffered_ = 0;
+ const size_t samples_per_channel = SamplesPerChannel();
+ for (size_t i = 0; i < num_channels_; ++i) {
+ const size_t bytes_encoded = WebRtcG722_Encode(
+ encoders_[i].encoder, encoders_[i].speech_buffer.get(),
+ samples_per_channel, encoders_[i].encoded_buffer.data());
+ RTC_CHECK_EQ(bytes_encoded, samples_per_channel / 2);
+ }
+
+ const size_t bytes_to_encode = samples_per_channel / 2 * num_channels_;
+ EncodedInfo info;
+ info.encoded_bytes = encoded->AppendData(
+ bytes_to_encode, [&](rtc::ArrayView<uint8_t> encoded) {
+ // Interleave the encoded bytes of the different channels. Each separate
+ // channel and the interleaved stream encodes two samples per byte, most
+ // significant half first.
+ for (size_t i = 0; i < samples_per_channel / 2; ++i) {
+ for (size_t j = 0; j < num_channels_; ++j) {
+ uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
+ interleave_buffer_.data()[j] = two_samples >> 4;
+ interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
+ }
+ for (size_t j = 0; j < num_channels_; ++j)
+ encoded[i * num_channels_ + j] =
+ interleave_buffer_.data()[2 * j] << 4 |
+ interleave_buffer_.data()[2 * j + 1];
+ }
+
+ return bytes_to_encode;
+ });
+ info.encoded_timestamp = first_timestamp_in_buffer_;
+ info.payload_type = payload_type_;
+ info.encoder_type = CodecType::kG722;
+ return info;
+}
+
+AudioEncoderG722Impl::EncoderState::EncoderState() {
+ RTC_CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
+}
+
+AudioEncoderG722Impl::EncoderState::~EncoderState() {
+ RTC_CHECK_EQ(0, WebRtcG722_FreeEncoder(encoder));
+}
+
+size_t AudioEncoderG722Impl::SamplesPerChannel() const {
+ return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
new file mode 100644
index 0000000000..a932aa8b7d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
+#define MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
+
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/g722/audio_encoder_g722_config.h"
+#include "api/units/time_delta.h"
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class AudioEncoderG722Impl final : public AudioEncoder {
+ public:
+ AudioEncoderG722Impl(const AudioEncoderG722Config& config, int payload_type);
+ ~AudioEncoderG722Impl() override;
+
+ AudioEncoderG722Impl(const AudioEncoderG722Impl&) = delete;
+ AudioEncoderG722Impl& operator=(const AudioEncoderG722Impl&) = delete;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ int RtpTimestampRateHz() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+ void Reset() override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+
+ protected:
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+
+ private:
+ // The encoder state for one channel.
+ struct EncoderState {
+ G722EncInst* encoder;
+ std::unique_ptr<int16_t[]> speech_buffer; // Queued up for encoding.
+ rtc::Buffer encoded_buffer; // Already encoded.
+ EncoderState();
+ ~EncoderState();
+ };
+
+ size_t SamplesPerChannel() const;
+
+ const size_t num_channels_;
+ const int payload_type_;
+ const size_t num_10ms_frames_per_packet_;
+ size_t num_10ms_frames_buffered_;
+ uint32_t first_timestamp_in_buffer_;
+ const std::unique_ptr<EncoderState[]> encoders_;
+ rtc::Buffer interleave_buffer_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.c b/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.c
new file mode 100644
index 0000000000..36ee6d92be
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "modules/third_party/g722/g722_enc_dec.h"
+
+int16_t WebRtcG722_CreateEncoder(G722EncInst **G722enc_inst)
+{
+ *G722enc_inst=(G722EncInst*)malloc(sizeof(G722EncoderState));
+ if (*G722enc_inst!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+int16_t WebRtcG722_EncoderInit(G722EncInst *G722enc_inst)
+{
+ // Create and/or reset the G.722 encoder
+ // Bitrate 64 kbps and wideband mode (2)
+ G722enc_inst = (G722EncInst *) WebRtc_g722_encode_init(
+ (G722EncoderState*) G722enc_inst, 64000, 2);
+ if (G722enc_inst == NULL) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst)
+{
+ // Free encoder memory
+ return WebRtc_g722_encode_release((G722EncoderState*) G722enc_inst);
+}
+
+size_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded)
+{
+ unsigned char *codechar = (unsigned char*) encoded;
+ // Encode the input speech vector
+ return WebRtc_g722_encode((G722EncoderState*) G722enc_inst, codechar,
+ speechIn, len);
+}
+
+int16_t WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst)
+{
+ *G722dec_inst=(G722DecInst*)malloc(sizeof(G722DecoderState));
+ if (*G722dec_inst!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+void WebRtcG722_DecoderInit(G722DecInst* inst) {
+ // Create and/or reset the G.722 decoder
+ // Bitrate 64 kbps and wideband mode (2)
+ WebRtc_g722_decode_init((G722DecoderState*)inst, 64000, 2);
+}
+
+int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst)
+{
+ // Free encoder memory
+ return WebRtc_g722_decode_release((G722DecoderState*) G722dec_inst);
+}
+
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+ const uint8_t *encoded,
+ size_t len,
+ int16_t *decoded,
+ int16_t *speechType)
+{
+ // Decode the G.722 encoder stream
+ *speechType=G722_WEBRTC_SPEECH;
+ return WebRtc_g722_decode((G722DecoderState*) G722dec_inst, decoded,
+ encoded, len);
+}
+
+int16_t WebRtcG722_Version(char *versionStr, short len)
+{
+ // Get version string
+ char version[30] = "2.0.0\n";
+ if (strlen(version) < (unsigned int)len)
+ {
+ strcpy(versionStr, version);
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.h b/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.h
new file mode 100644
index 0000000000..85c1cd02a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_
+
+#include <stdint.h>
+
+/*
+ * Solution to support multiple instances
+ */
+
+typedef struct WebRtcG722EncInst G722EncInst;
+typedef struct WebRtcG722DecInst G722DecInst;
+
+/*
+ * Comfort noise constants
+ */
+
+#define G722_WEBRTC_SPEECH 1
+#define G722_WEBRTC_CNG 2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcG722_CreateEncoder(...)
+ *
+ * Create memory used for G722 encoder
+ *
+ * Input:
+ * - G722enc_inst : G722 instance for encoder
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int16_t WebRtcG722_CreateEncoder(G722EncInst** G722enc_inst);
+
+/****************************************************************************
+ * WebRtcG722_EncoderInit(...)
+ *
+ * This function initializes a G722 instance
+ *
+ * Input:
+ * - G722enc_inst : G722 instance, i.e. the user that should receive
+ * be initialized
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcG722_EncoderInit(G722EncInst* G722enc_inst);
+
+/****************************************************************************
+ * WebRtcG722_FreeEncoder(...)
+ *
+ * Free the memory used for G722 encoder
+ *
+ * Input:
+ * - G722enc_inst : G722 instance for encoder
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int WebRtcG722_FreeEncoder(G722EncInst* G722enc_inst);
+
+/****************************************************************************
+ * WebRtcG722_Encode(...)
+ *
+ * This function encodes G722 encoded data.
+ *
+ * Input:
+ * - G722enc_inst : G722 instance, i.e. the user that should encode
+ * a packet
+ * - speechIn : Input speech vector
+ * - len : Samples in speechIn
+ *
+ * Output:
+ * - encoded : The encoded data vector
+ *
+ * Return value : Length (in bytes) of coded data
+ */
+
+size_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcG722_CreateDecoder(...)
+ *
+ * Create memory used for G722 encoder
+ *
+ * Input:
+ * - G722dec_inst : G722 instance for decoder
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int16_t WebRtcG722_CreateDecoder(G722DecInst** G722dec_inst);
+
+/****************************************************************************
+ * WebRtcG722_DecoderInit(...)
+ *
+ * This function initializes a G722 instance
+ *
+ * Input:
+ * - inst : G722 instance
+ */
+
+void WebRtcG722_DecoderInit(G722DecInst* inst);
+
+/****************************************************************************
+ * WebRtcG722_FreeDecoder(...)
+ *
+ * Free the memory used for G722 decoder
+ *
+ * Input:
+ * - G722dec_inst : G722 instance for decoder
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int WebRtcG722_FreeDecoder(G722DecInst* G722dec_inst);
+
+/****************************************************************************
+ * WebRtcG722_Decode(...)
+ *
+ * This function decodes a packet with G729 frame(s). Output speech length
+ * will be a multiple of 80 samples (80*frames/packet).
+ *
+ * Input:
+ * - G722dec_inst : G722 instance, i.e. the user that should decode
+ * a packet
+ * - encoded : Encoded G722 frame(s)
+ * - len : Bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ * - speechType : 1 normal, 2 CNG (Since G722 does not have its own
+ * DTX/CNG scheme it should always return 1)
+ *
+ * Return value : Samples in decoded vector
+ */
+
+size_t WebRtcG722_Decode(G722DecInst* G722dec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/****************************************************************************
+ * WebRtcG722_Version(...)
+ *
+ * Get a string with the current version of the codec
+ */
+
+int16_t WebRtcG722_Version(char* versionStr, short len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/test/testG722.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g722/test/testG722.cc
new file mode 100644
index 0000000000..9f2155d0f7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/test/testG722.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * testG722.cpp : Defines the entry point for the console application.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* include API */
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+
+/* Runtime statistics */
+#include <time.h>
+#define CLOCKS_PER_SEC_G722 100000
+
+// Forward declaration
+typedef struct WebRtcG722EncInst G722EncInst;
+typedef struct WebRtcG722DecInst G722DecInst;
+
+/* function for reading audio data from PCM file */
+bool readframe(int16_t* data, FILE* inp, size_t length) {
+ size_t rlen = fread(data, sizeof(int16_t), length, inp);
+ if (rlen >= length)
+ return false;
+ memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
+ return true;
+}
+
+int main(int argc, char* argv[]) {
+ char inname[60], outbit[40], outname[40];
+ FILE *inp, *outbitp, *outp;
+
+ int framecnt;
+ bool endfile;
+ size_t framelength = 160;
+ G722EncInst* G722enc_inst;
+ G722DecInst* G722dec_inst;
+
+ /* Runtime statistics */
+ double starttime;
+ double runtime = 0;
+ double length_file;
+
+ size_t stream_len = 0;
+ int16_t shortdata[960];
+ int16_t decoded[960];
+ uint8_t streamdata[80 * 6];
+ int16_t speechType[1];
+
+ /* handling wrong input arguments in the command line */
+ if (argc != 5) {
+ printf("\n\nWrong number of arguments or flag values.\n\n");
+
+ printf("\n");
+ printf("Usage:\n\n");
+ printf("./testG722.exe framelength infile outbitfile outspeechfile \n\n");
+ printf("with:\n");
+ printf("framelength : Framelength in samples.\n\n");
+ printf("infile : Normal speech input file\n\n");
+ printf("outbitfile : Bitstream output file\n\n");
+ printf("outspeechfile: Speech output file\n\n");
+ exit(0);
+ }
+
+ /* Get frame length */
+ int framelength_int = atoi(argv[1]);
+ if (framelength_int < 0) {
+ printf(" G.722: Invalid framelength %d.\n", framelength_int);
+ exit(1);
+ }
+ framelength = static_cast<size_t>(framelength_int);
+
+ /* Get Input and Output files */
+ sscanf(argv[2], "%s", inname);
+ sscanf(argv[3], "%s", outbit);
+ sscanf(argv[4], "%s", outname);
+
+ if ((inp = fopen(inname, "rb")) == NULL) {
+ printf(" G.722: Cannot read file %s.\n", inname);
+ exit(1);
+ }
+ if ((outbitp = fopen(outbit, "wb")) == NULL) {
+ printf(" G.722: Cannot write file %s.\n", outbit);
+ exit(1);
+ }
+ if ((outp = fopen(outname, "wb")) == NULL) {
+ printf(" G.722: Cannot write file %s.\n", outname);
+ exit(1);
+ }
+ printf("\nInput:%s\nOutput bitstream:%s\nOutput:%s\n", inname, outbit,
+ outname);
+
+ /* Create and init */
+ WebRtcG722_CreateEncoder((G722EncInst**)&G722enc_inst);
+ WebRtcG722_CreateDecoder((G722DecInst**)&G722dec_inst);
+ WebRtcG722_EncoderInit((G722EncInst*)G722enc_inst);
+ WebRtcG722_DecoderInit((G722DecInst*)G722dec_inst);
+
+ /* Initialize encoder and decoder */
+ framecnt = 0;
+ endfile = false;
+ while (!endfile) {
+ framecnt++;
+
+ /* Read speech block */
+ endfile = readframe(shortdata, inp, framelength);
+
+ /* Start clock before call to encoder and decoder */
+ starttime = clock() / (double)CLOCKS_PER_SEC_G722;
+
+ /* G.722 encoding + decoding */
+ stream_len = WebRtcG722_Encode((G722EncInst*)G722enc_inst, shortdata,
+ framelength, streamdata);
+ WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
+ speechType);
+
+ /* Stop clock after call to encoder and decoder */
+ runtime += (double)((clock() / (double)CLOCKS_PER_SEC_G722) - starttime);
+
+ /* Write coded bits to file */
+ if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
+ stream_len / 2) {
+ return -1;
+ }
+ /* Write coded speech to file */
+ if (fwrite(decoded, sizeof(short), framelength, outp) != framelength) {
+ return -1;
+ }
+ }
+
+ WebRtcG722_FreeEncoder((G722EncInst*)G722enc_inst);
+ WebRtcG722_FreeDecoder((G722DecInst*)G722dec_inst);
+
+ length_file = ((double)framecnt * (double)framelength / 16000);
+ printf("\n\nLength of speech file: %.1f s\n", length_file);
+ printf("Time to run G.722: %.2f s (%.2f %% of realtime)\n\n", runtime,
+ (100 * runtime / length_file));
+ printf("---------------------END----------------------\n");
+
+ fclose(inp);
+ fclose(outbitp);
+ fclose(outp);
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
new file mode 100644
index 0000000000..77da78ba7f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuant.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/abs_quant.h"
+
+#include "modules/audio_coding/codecs/ilbc/abs_quant_loop.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+
+/*----------------------------------------------------------------*
+ * predictive noise shaping encoding of scaled start state
+ * (subrutine for WebRtcIlbcfix_StateSearch)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AbsQuant(
+ IlbcEncoder *iLBCenc_inst,
+ /* (i) Encoder instance */
+ iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits (outputs idxForMax
+ and idxVec, uses state_first as
+ input) */
+ int16_t *in, /* (i) vector to encode */
+ int16_t *weightDenum /* (i) denominator of synthesis filter */
+ ) {
+ int16_t *syntOut;
+ size_t quantLen[2];
+
+ /* Stack based */
+ int16_t syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];
+ int16_t in_weightedVec[STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+ int16_t *in_weighted = &in_weightedVec[LPC_FILTERORDER];
+
+ /* Initialize the buffers */
+ WebRtcSpl_MemSetW16(syntOutBuf, 0, LPC_FILTERORDER+STATE_SHORT_LEN_30MS);
+ syntOut = &syntOutBuf[LPC_FILTERORDER];
+ /* Start with zero state */
+ WebRtcSpl_MemSetW16(in_weightedVec, 0, LPC_FILTERORDER);
+
+ /* Perform the quantization loop in two sections of length quantLen[i],
+ where the perceptual weighting filter is updated at the subframe
+ border */
+
+ if (iLBC_encbits->state_first) {
+ quantLen[0]=SUBL;
+ quantLen[1]=iLBCenc_inst->state_short_len-SUBL;
+ } else {
+ quantLen[0]=iLBCenc_inst->state_short_len-SUBL;
+ quantLen[1]=SUBL;
+ }
+
+ /* Calculate the weighted residual, switch perceptual weighting
+ filter at the subframe border */
+ WebRtcSpl_FilterARFastQ12(
+ in, in_weighted,
+ weightDenum, LPC_FILTERORDER+1, quantLen[0]);
+ WebRtcSpl_FilterARFastQ12(
+ &in[quantLen[0]], &in_weighted[quantLen[0]],
+ &weightDenum[LPC_FILTERORDER+1], LPC_FILTERORDER+1, quantLen[1]);
+
+ WebRtcIlbcfix_AbsQuantLoop(
+ syntOut,
+ in_weighted,
+ weightDenum,
+ quantLen,
+ iLBC_encbits->idxVec);
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.h
new file mode 100644
index 0000000000..c72e29cf29
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuant.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * predictive noise shaping encoding of scaled start state
+ * (subrutine for WebRtcIlbcfix_StateSearch)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AbsQuant(
+ IlbcEncoder* iLBCenc_inst,
+ /* (i) Encoder instance */
+ iLBC_bits* iLBC_encbits, /* (i/o) Encoded bits (outputs idxForMax
+ and idxVec, uses state_first as
+ input) */
+ int16_t* in, /* (i) vector to encode */
+ int16_t* weightDenum /* (i) denominator of synthesis filter */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
new file mode 100644
index 0000000000..cf9266299d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuantLoop.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/abs_quant_loop.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/sort_sq.h"
+
+void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
+ int16_t *weightDenumIN, size_t *quantLenIN,
+ int16_t *idxVecIN ) {
+ size_t k1, k2;
+ int16_t index;
+ int32_t toQW32;
+ int32_t toQ32;
+ int16_t tmp16a;
+ int16_t xq;
+
+ int16_t *syntOut = syntOutIN;
+ int16_t *in_weighted = in_weightedIN;
+ int16_t *weightDenum = weightDenumIN;
+ size_t *quantLen = quantLenIN;
+ int16_t *idxVec = idxVecIN;
+
+ for(k1=0;k1<2;k1++) {
+ for(k2=0;k2<quantLen[k1];k2++){
+
+ /* Filter to get the predicted value */
+ WebRtcSpl_FilterARFastQ12(
+ syntOut, syntOut,
+ weightDenum, LPC_FILTERORDER+1, 1);
+
+ /* the quantizer */
+ toQW32 = (int32_t)(*in_weighted) - (int32_t)(*syntOut);
+
+ toQ32 = (((int32_t)toQW32)<<2);
+
+ if (toQ32 > 32767) {
+ toQ32 = (int32_t) 32767;
+ } else if (toQ32 < -32768) {
+ toQ32 = (int32_t) -32768;
+ }
+
+ /* Quantize the state */
+ if (toQW32<(-7577)) {
+ /* To prevent negative overflow */
+ index=0;
+ } else if (toQW32>8151) {
+ /* To prevent positive overflow */
+ index=7;
+ } else {
+ /* Find the best quantization index
+ (state_sq3Tbl is in Q13 and toQ is in Q11)
+ */
+ WebRtcIlbcfix_SortSq(&xq, &index,
+ (int16_t)toQ32,
+ WebRtcIlbcfix_kStateSq3, 8);
+ }
+
+ /* Store selected index */
+ (*idxVec++) = index;
+
+ /* Compute decoded sample and update of the prediction filter */
+ tmp16a = ((WebRtcIlbcfix_kStateSq3[index] + 2 ) >> 2);
+
+ *syntOut = (int16_t) (tmp16a + (int32_t)(*in_weighted) - toQW32);
+
+ syntOut++; in_weighted++;
+ }
+ /* Update perceptual weighting filter at subframe border */
+ weightDenum += 11;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
new file mode 100644
index 0000000000..841d73b9fb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuantLoop.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_LOOP_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_LOOP_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * predictive noise shaping encoding of scaled start state
+ * (subrutine for WebRtcIlbcfix_StateSearch)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AbsQuantLoop(int16_t* syntOutIN,
+ int16_t* in_weightedIN,
+ int16_t* weightDenumIN,
+ size_t* quantLenIN,
+ int16_t* idxVecIN);
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
new file mode 100644
index 0000000000..57b5abbe23
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+AudioDecoderIlbcImpl::AudioDecoderIlbcImpl() {
+ WebRtcIlbcfix_DecoderCreate(&dec_state_);
+ WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
+}
+
+AudioDecoderIlbcImpl::~AudioDecoderIlbcImpl() {
+ WebRtcIlbcfix_DecoderFree(dec_state_);
+}
+
+bool AudioDecoderIlbcImpl::HasDecodePlc() const {
+ return true;
+}
+
+int AudioDecoderIlbcImpl::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 8000);
+ int16_t temp_type = 1; // Default is speech.
+ int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded,
+ &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+size_t AudioDecoderIlbcImpl::DecodePlc(size_t num_frames, int16_t* decoded) {
+ return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
+}
+
+void AudioDecoderIlbcImpl::Reset() {
+ WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderIlbcImpl::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ std::vector<ParseResult> results;
+ size_t bytes_per_frame;
+ int timestamps_per_frame;
+ if (payload.size() >= 950) {
+ RTC_LOG(LS_WARNING)
+ << "AudioDecoderIlbcImpl::ParsePayload: Payload too large";
+ return results;
+ }
+ if (payload.size() % 38 == 0) {
+ // 20 ms frames.
+ bytes_per_frame = 38;
+ timestamps_per_frame = 160;
+ } else if (payload.size() % 50 == 0) {
+ // 30 ms frames.
+ bytes_per_frame = 50;
+ timestamps_per_frame = 240;
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "AudioDecoderIlbcImpl::ParsePayload: Invalid payload";
+ return results;
+ }
+
+ RTC_DCHECK_EQ(0, payload.size() % bytes_per_frame);
+ if (payload.size() == bytes_per_frame) {
+ std::unique_ptr<EncodedAudioFrame> frame(
+ new LegacyEncodedAudioFrame(this, std::move(payload)));
+ results.emplace_back(timestamp, 0, std::move(frame));
+ } else {
+ size_t byte_offset;
+ uint32_t timestamp_offset;
+ for (byte_offset = 0, timestamp_offset = 0; byte_offset < payload.size();
+ byte_offset += bytes_per_frame,
+ timestamp_offset += timestamps_per_frame) {
+ std::unique_ptr<EncodedAudioFrame> frame(new LegacyEncodedAudioFrame(
+ this, rtc::Buffer(payload.data() + byte_offset, bytes_per_frame)));
+ results.emplace_back(timestamp + timestamp_offset, 0, std::move(frame));
+ }
+ }
+
+ return results;
+}
+
+int AudioDecoderIlbcImpl::SampleRateHz() const {
+ return 8000;
+}
+
+size_t AudioDecoderIlbcImpl::Channels() const {
+ return 1;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h
new file mode 100644
index 0000000000..46ba755148
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/buffer.h"
+
+typedef struct iLBC_decinst_t_ IlbcDecoderInstance;
+
+namespace webrtc {
+
+class AudioDecoderIlbcImpl final : public AudioDecoder {
+ public:
+ AudioDecoderIlbcImpl();
+ ~AudioDecoderIlbcImpl() override;
+
+ AudioDecoderIlbcImpl(const AudioDecoderIlbcImpl&) = delete;
+ AudioDecoderIlbcImpl& operator=(const AudioDecoderIlbcImpl&) = delete;
+
+ bool HasDecodePlc() const override;
+ size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
+ void Reset() override;
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ IlbcDecoderInstance* dec_state_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
new file mode 100644
index 0000000000..9fbf42ceeb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kSampleRateHz = 8000;
+
+int GetIlbcBitrate(int ptime) {
+ switch (ptime) {
+ case 20:
+ case 40:
+ // 38 bytes per frame of 20 ms => 15200 bits/s.
+ return 15200;
+ case 30:
+ case 60:
+ // 50 bytes per frame of 30 ms => (approx) 13333 bits/s.
+ return 13333;
+ default:
+ RTC_CHECK_NOTREACHED();
+ }
+}
+
+} // namespace
+
+AudioEncoderIlbcImpl::AudioEncoderIlbcImpl(const AudioEncoderIlbcConfig& config,
+ int payload_type)
+ : frame_size_ms_(config.frame_size_ms),
+ payload_type_(payload_type),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
+ encoder_(nullptr) {
+ RTC_CHECK(config.IsOk());
+ Reset();
+}
+
+AudioEncoderIlbcImpl::~AudioEncoderIlbcImpl() {
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+}
+
+int AudioEncoderIlbcImpl::SampleRateHz() const {
+ return kSampleRateHz;
+}
+
+size_t AudioEncoderIlbcImpl::NumChannels() const {
+ return 1;
+}
+
+size_t AudioEncoderIlbcImpl::Num10MsFramesInNextPacket() const {
+ return num_10ms_frames_per_packet_;
+}
+
+size_t AudioEncoderIlbcImpl::Max10MsFramesInAPacket() const {
+ return num_10ms_frames_per_packet_;
+}
+
+int AudioEncoderIlbcImpl::GetTargetBitrate() const {
+ return GetIlbcBitrate(rtc::dchecked_cast<int>(num_10ms_frames_per_packet_) *
+ 10);
+}
+
+AudioEncoder::EncodedInfo AudioEncoderIlbcImpl::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ // Save timestamp if starting a new packet.
+ if (num_10ms_frames_buffered_ == 0)
+ first_timestamp_in_buffer_ = rtp_timestamp;
+
+ // Buffer input.
+ std::copy(audio.cbegin(), audio.cend(),
+ input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_);
+
+ // If we don't yet have enough buffered input for a whole packet, we're done
+ // for now.
+ if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
+ return EncodedInfo();
+ }
+
+ // Encode buffered input.
+ RTC_DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+ num_10ms_frames_buffered_ = 0;
+ size_t encoded_bytes = encoded->AppendData(
+ RequiredOutputSizeBytes(), [&](rtc::ArrayView<uint8_t> encoded) {
+ const int r = WebRtcIlbcfix_Encode(
+ encoder_, input_buffer_,
+ kSampleRateHz / 100 * num_10ms_frames_per_packet_, encoded.data());
+ RTC_CHECK_GE(r, 0);
+
+ return static_cast<size_t>(r);
+ });
+
+ RTC_DCHECK_EQ(encoded_bytes, RequiredOutputSizeBytes());
+
+ EncodedInfo info;
+ info.encoded_bytes = encoded_bytes;
+ info.encoded_timestamp = first_timestamp_in_buffer_;
+ info.payload_type = payload_type_;
+ info.encoder_type = CodecType::kIlbc;
+ return info;
+}
+
+void AudioEncoderIlbcImpl::Reset() {
+ if (encoder_)
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
+ const int encoder_frame_size_ms =
+ frame_size_ms_ > 30 ? frame_size_ms_ / 2 : frame_size_ms_;
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderInit(encoder_, encoder_frame_size_ms));
+ num_10ms_frames_buffered_ = 0;
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderIlbcImpl::GetFrameLengthRange() const {
+ return {{TimeDelta::Millis(num_10ms_frames_per_packet_ * 10),
+ TimeDelta::Millis(num_10ms_frames_per_packet_ * 10)}};
+}
+
+size_t AudioEncoderIlbcImpl::RequiredOutputSizeBytes() const {
+ switch (num_10ms_frames_per_packet_) {
+ case 2:
+ return 38;
+ case 3:
+ return 50;
+ case 4:
+ return 2 * 38;
+ case 6:
+ return 2 * 50;
+ default:
+ RTC_CHECK_NOTREACHED();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
new file mode 100644
index 0000000000..c8dfa2ca6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h"
+#include "api/units/time_delta.h"
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+
+namespace webrtc {
+
+class AudioEncoderIlbcImpl final : public AudioEncoder {
+ public:
+ AudioEncoderIlbcImpl(const AudioEncoderIlbcConfig& config, int payload_type);
+ ~AudioEncoderIlbcImpl() override;
+
+ AudioEncoderIlbcImpl(const AudioEncoderIlbcImpl&) = delete;
+ AudioEncoderIlbcImpl& operator=(const AudioEncoderIlbcImpl&) = delete;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+ void Reset() override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+
+ private:
+ size_t RequiredOutputSizeBytes() const;
+
+ static constexpr size_t kMaxSamplesPerPacket = 480;
+ const int frame_size_ms_;
+ const int payload_type_;
+ const size_t num_10ms_frames_per_packet_;
+ size_t num_10ms_frames_buffered_;
+ uint32_t first_timestamp_in_buffer_;
+ int16_t input_buffer_[kMaxSamplesPerPacket];
+ IlbcEncoderInstance* encoder_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
new file mode 100644
index 0000000000..c915a2f9f0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AugmentedCbCorr.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/augmented_cb_corr.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_AugmentedCbCorr(
+ int16_t *target, /* (i) Target vector */
+ int16_t *buffer, /* (i) Memory buffer */
+ int16_t *interpSamples, /* (i) buffer with
+ interpolated samples */
+ int32_t *crossDot, /* (o) The cross correlation between
+ the target and the Augmented
+ vector */
+ size_t low, /* (i) Lag to start from (typically
+ 20) */
+ size_t high, /* (i) Lag to end at (typically 39) */
+ int scale) /* (i) Scale factor to use for
+ the crossDot */
+{
+ size_t lagcount;
+ size_t ilow;
+ int16_t *targetPtr;
+ int32_t *crossDotPtr;
+ int16_t *iSPtr=interpSamples;
+
+ /* Calculate the correlation between the target and the
+ interpolated codebook. The correlation is calculated in
+ 3 sections with the interpolated part in the middle */
+ crossDotPtr=crossDot;
+ for (lagcount=low; lagcount<=high; lagcount++) {
+
+ ilow = lagcount - 4;
+
+ /* Compute dot product for the first (lagcount-4) samples */
+ (*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);
+
+ /* Compute dot product on the interpolated samples */
+ (*crossDotPtr) += WebRtcSpl_DotProductWithScale(target+ilow, iSPtr, 4, scale);
+ targetPtr = target + lagcount;
+ iSPtr += lagcount-ilow;
+
+ /* Compute dot product for the remaining samples */
+ (*crossDotPtr) += WebRtcSpl_DotProductWithScale(targetPtr, buffer-lagcount, SUBL-lagcount, scale);
+ crossDotPtr++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
new file mode 100644
index 0000000000..2e9612e51a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AugmentedCbCorr.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_AUGMENTED_CB_CORR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_AUGMENTED_CB_CORR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Calculate correlation between target and Augmented codebooks
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AugmentedCbCorr(
+ int16_t* target, /* (i) Target vector */
+ int16_t* buffer, /* (i) Memory buffer */
+ int16_t* interpSamples, /* (i) buffer with
+ interpolated samples */
+ int32_t* crossDot, /* (o) The cross correlation between
+ the target and the Augmented
+ vector */
+ size_t low, /* (i) Lag to start from (typically
+ 20) */
+ size_t high, /* (i) Lag to end at (typically 39 */
+ int scale); /* (i) Scale factor to use for the crossDot */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.c
new file mode 100644
index 0000000000..1a9b882adf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_BwExpand.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * lpc bandwidth expansion
+ *---------------------------------------------------------------*/
+
+/* The output is in the same domain as the input */
+void WebRtcIlbcfix_BwExpand(
+ int16_t *out, /* (o) the bandwidth expanded lpc coefficients */
+ int16_t *in, /* (i) the lpc coefficients before bandwidth
+ expansion */
+ int16_t *coef, /* (i) the bandwidth expansion factor Q15 */
+ int16_t length /* (i) the length of lpc coefficient vectors */
+ ) {
+ int i;
+
+ out[0] = in[0];
+ for (i = 1; i < length; i++) {
+ /* out[i] = coef[i] * in[i] with rounding.
+ in[] and out[] are in Q12 and coef[] is in Q15
+ */
+ out[i] = (int16_t)((coef[i] * in[i] + 16384) >> 15);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.h
new file mode 100644
index 0000000000..ff9b0b302e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_BwExpand.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_BW_EXPAND_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_BW_EXPAND_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * lpc bandwidth expansion
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_BwExpand(
+ int16_t* out, /* (o) the bandwidth expanded lpc coefficients */
+ int16_t* in, /* (i) the lpc coefficients before bandwidth
+ expansion */
+ int16_t* coef, /* (i) the bandwidth expansion factor Q15 */
+ int16_t length /* (i) the length of lpc coefficient vectors */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
new file mode 100644
index 0000000000..1e9a7040c7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbConstruct.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_construct.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/gain_dequant.h"
+#include "modules/audio_coding/codecs/ilbc/get_cd_vec.h"
+#include "rtc_base/sanitizer.h"
+
+// An arithmetic operation that is allowed to overflow. (It's still undefined
+// behavior, so not a good idea; this just makes UBSan ignore the violation, so
+// that our old code can continue to do what it's always been doing.)
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+ OverflowingAddS32S32ToS32(int32_t a, int32_t b) {
+ return a + b;
+}
+
+/*----------------------------------------------------------------*
+ * Construct decoded vector from codebook and gains.
+ *---------------------------------------------------------------*/
+
+bool WebRtcIlbcfix_CbConstruct(
+ int16_t* decvector, /* (o) Decoded vector */
+ const int16_t* index, /* (i) Codebook indices */
+ const int16_t* gain_index, /* (i) Gain quantization indices */
+ int16_t* mem, /* (i) Buffer for codevector construction */
+ size_t lMem, /* (i) Length of buffer */
+ size_t veclen) { /* (i) Length of vector */
+ size_t j;
+ int16_t gain[CB_NSTAGES];
+ /* Stack based */
+ int16_t cbvec0[SUBL];
+ int16_t cbvec1[SUBL];
+ int16_t cbvec2[SUBL];
+ int32_t a32;
+ int16_t *gainPtr;
+
+ /* gain de-quantization */
+
+ gain[0] = WebRtcIlbcfix_GainDequant(gain_index[0], 16384, 0);
+ gain[1] = WebRtcIlbcfix_GainDequant(gain_index[1], gain[0], 1);
+ gain[2] = WebRtcIlbcfix_GainDequant(gain_index[2], gain[1], 2);
+
+ /* codebook vector construction and construction of total vector */
+
+ /* Stack based */
+ if (!WebRtcIlbcfix_GetCbVec(cbvec0, mem, (size_t)index[0], lMem, veclen))
+ return false; // Failure.
+ if (!WebRtcIlbcfix_GetCbVec(cbvec1, mem, (size_t)index[1], lMem, veclen))
+ return false; // Failure.
+ if (!WebRtcIlbcfix_GetCbVec(cbvec2, mem, (size_t)index[2], lMem, veclen))
+ return false; // Failure.
+
+ gainPtr = &gain[0];
+ for (j=0;j<veclen;j++) {
+ a32 = (*gainPtr++) * cbvec0[j];
+ a32 += (*gainPtr++) * cbvec1[j];
+ a32 = OverflowingAddS32S32ToS32(a32, (*gainPtr) * cbvec2[j]);
+ gainPtr -= 2;
+ decvector[j] = (int16_t)((a32 + 8192) >> 14);
+ }
+
+ return true; // Success.
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
new file mode 100644
index 0000000000..8f7c663164
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbConstruct.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_CONSTRUCT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_CONSTRUCT_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Construct decoded vector from codebook and gains.
+ *---------------------------------------------------------------*/
+
+// Returns true on success, false on failure.
+ABSL_MUST_USE_RESULT
+bool WebRtcIlbcfix_CbConstruct(
+ int16_t* decvector, /* (o) Decoded vector */
+ const int16_t* index, /* (i) Codebook indices */
+ const int16_t* gain_index, /* (i) Gain quantization indices */
+ int16_t* mem, /* (i) Buffer for codevector construction */
+ size_t lMem, /* (i) Length of buffer */
+ size_t veclen /* (i) Length of vector */
+);
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
new file mode 100644
index 0000000000..21e4197607
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergy.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy.h"
+
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Function WebRtcIlbcfix_CbMemEnergy computes the energy of all
+ * the vectors in the codebook memory that will be used in the
+ * following search for the best match.
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CbMemEnergy(
+ size_t range,
+ int16_t *CB, /* (i) The CB memory (1:st section) */
+ int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
+ size_t lMem, /* (i) Length of the CB memory */
+ size_t lTarget, /* (i) Length of the target vector */
+ int16_t *energyW16, /* (o) Energy in the CB vectors */
+ int16_t *energyShifts, /* (o) Shift value of the energy */
+ int scale, /* (i) The scaling of all energy values */
+ size_t base_size /* (i) Index to where energy values should be stored */
+ ) {
+ int16_t *ppi, *ppo, *pp;
+ int32_t energy, tmp32;
+
+ /* Compute the energy and store it in a vector. Also the
+ * corresponding shift values are stored. The energy values
+ * are reused in all three stages. */
+
+ /* Calculate the energy in the first block of 'lTarget' sampels. */
+ ppi = CB+lMem-lTarget-1;
+ ppo = CB+lMem-1;
+
+ pp=CB+lMem-lTarget;
+ energy = WebRtcSpl_DotProductWithScale( pp, pp, lTarget, scale);
+
+ /* Normalize the energy and store the number of shifts */
+ energyShifts[0] = (int16_t)WebRtcSpl_NormW32(energy);
+ tmp32 = energy << energyShifts[0];
+ energyW16[0] = (int16_t)(tmp32 >> 16);
+
+ /* Compute the energy of the rest of the cb memory
+ * by step wise adding and subtracting the next
+ * sample and the last sample respectively. */
+ WebRtcIlbcfix_CbMemEnergyCalc(energy, range, ppi, ppo, energyW16, energyShifts, scale, 0);
+
+ /* Next, precompute the energy values for the filtered cb section */
+ energy=0;
+ pp=filteredCB+lMem-lTarget;
+
+ energy = WebRtcSpl_DotProductWithScale( pp, pp, lTarget, scale);
+
+ /* Normalize the energy and store the number of shifts */
+ energyShifts[base_size] = (int16_t)WebRtcSpl_NormW32(energy);
+ tmp32 = energy << energyShifts[base_size];
+ energyW16[base_size] = (int16_t)(tmp32 >> 16);
+
+ ppi = filteredCB + lMem - 1 - lTarget;
+ ppo = filteredCB + lMem - 1;
+
+ WebRtcIlbcfix_CbMemEnergyCalc(energy, range, ppi, ppo, energyW16, energyShifts, scale, base_size);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
new file mode 100644
index 0000000000..17ec337dc6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergy.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+void WebRtcIlbcfix_CbMemEnergy(
+ size_t range,
+ int16_t* CB, /* (i) The CB memory (1:st section) */
+ int16_t* filteredCB, /* (i) The filtered CB memory (2:nd section) */
+ size_t lMem, /* (i) Length of the CB memory */
+ size_t lTarget, /* (i) Length of the target vector */
+ int16_t* energyW16, /* (o) Energy in the CB vectors */
+ int16_t* energyShifts, /* (o) Shift value of the energy */
+ int scale, /* (i) The scaling of all energy values */
+ size_t base_size /* (i) Index to where energy values should be stored */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
new file mode 100644
index 0000000000..0619bbe422
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyAugmentation.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_CbMemEnergyAugmentation(
+ int16_t *interpSamples, /* (i) The interpolated samples */
+ int16_t *CBmem, /* (i) The CB memory */
+ int scale, /* (i) The scaling of all energy values */
+ size_t base_size, /* (i) Index to where energy values should be stored */
+ int16_t *energyW16, /* (o) Energy in the CB vectors */
+ int16_t *energyShifts /* (o) Shift value of the energy */
+ ){
+ int32_t energy, tmp32;
+ int16_t *ppe, *pp, *interpSamplesPtr;
+ int16_t *CBmemPtr;
+ size_t lagcount;
+ int16_t *enPtr=&energyW16[base_size-20];
+ int16_t *enShPtr=&energyShifts[base_size-20];
+ int32_t nrjRecursive;
+
+ CBmemPtr = CBmem+147;
+ interpSamplesPtr = interpSamples;
+
+ /* Compute the energy for the first (low-5) noninterpolated samples */
+ nrjRecursive = WebRtcSpl_DotProductWithScale( CBmemPtr-19, CBmemPtr-19, 15, scale);
+ ppe = CBmemPtr - 20;
+
+ for (lagcount=20; lagcount<=39; lagcount++) {
+
+ /* Update the energy recursively to save complexity */
+ nrjRecursive += (*ppe * *ppe) >> scale;
+ ppe--;
+ energy = nrjRecursive;
+
+ /* interpolation */
+ energy += WebRtcSpl_DotProductWithScale(interpSamplesPtr, interpSamplesPtr, 4, scale);
+ interpSamplesPtr += 4;
+
+ /* Compute energy for the remaining samples */
+ pp = CBmemPtr - lagcount;
+ energy += WebRtcSpl_DotProductWithScale(pp, pp, SUBL-lagcount, scale);
+
+ /* Normalize the energy and store the number of shifts */
+ (*enShPtr) = (int16_t)WebRtcSpl_NormW32(energy);
+ tmp32 = energy << *enShPtr;
+ *enPtr = (int16_t)(tmp32 >> 16);
+ enShPtr++;
+ enPtr++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
new file mode 100644
index 0000000000..d7b7a0d97e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyAugmentation.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_AUGMENTATION_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_AUGMENTATION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+void WebRtcIlbcfix_CbMemEnergyAugmentation(
+ int16_t* interpSamples, /* (i) The interpolated samples */
+ int16_t* CBmem, /* (i) The CB memory */
+ int scale, /* (i) The scaling of all energy values */
+ size_t base_size, /* (i) Index to where energy values should be stored */
+ int16_t* energyW16, /* (o) Energy in the CB vectors */
+ int16_t* energyShifts /* (o) Shift value of the energy */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
new file mode 100644
index 0000000000..58c0c5fe6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyCalc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/* Compute the energy of the rest of the cb memory
+ * by step wise adding and subtracting the next
+ * sample and the last sample respectively */
+void WebRtcIlbcfix_CbMemEnergyCalc(
+ int32_t energy, /* (i) input start energy */
+ size_t range, /* (i) number of iterations */
+ int16_t *ppi, /* (i) input pointer 1 */
+ int16_t *ppo, /* (i) input pointer 2 */
+ int16_t *energyW16, /* (o) Energy in the CB vectors */
+ int16_t *energyShifts, /* (o) Shift value of the energy */
+ int scale, /* (i) The scaling of all energy values */
+ size_t base_size /* (i) Index to where energy values should be stored */
+ )
+{
+ size_t j;
+ int16_t shft;
+ int32_t tmp;
+ int16_t *eSh_ptr;
+ int16_t *eW16_ptr;
+
+
+ eSh_ptr = &energyShifts[1+base_size];
+ eW16_ptr = &energyW16[1+base_size];
+
+ for (j = 0; j + 1 < range; j++) {
+
+ /* Calculate next energy by a +/-
+ operation on the edge samples */
+ tmp = (*ppi) * (*ppi) - (*ppo) * (*ppo);
+ energy += tmp >> scale;
+ energy = WEBRTC_SPL_MAX(energy, 0);
+
+ ppi--;
+ ppo--;
+
+ /* Normalize the energy into a int16_t and store
+ the number of shifts */
+
+ shft = (int16_t)WebRtcSpl_NormW32(energy);
+ *eSh_ptr++ = shft;
+
+ tmp = energy << shft;
+ *eW16_ptr++ = (int16_t)(tmp >> 16);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
new file mode 100644
index 0000000000..1d1e8d62b9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyCalc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_CALC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_CALC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+void WebRtcIlbcfix_CbMemEnergyCalc(
+ int32_t energy, /* (i) input start energy */
+ size_t range, /* (i) number of iterations */
+ int16_t* ppi, /* (i) input pointer 1 */
+ int16_t* ppo, /* (i) input pointer 2 */
+ int16_t* energyW16, /* (o) Energy in the CB vectors */
+ int16_t* energyShifts, /* (o) Shift value of the energy */
+ int scale, /* (i) The scaling of all energy values */
+ size_t base_size /* (i) Index to where energy values should be stored */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.c
new file mode 100644
index 0000000000..24b5292354
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearch.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_search.h"
+
+#include "modules/audio_coding/codecs/ilbc/augmented_cb_corr.h"
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy.h"
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h"
+#include "modules/audio_coding/codecs/ilbc/cb_search_core.h"
+#include "modules/audio_coding/codecs/ilbc/cb_update_best_index.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/create_augmented_vec.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/energy_inverse.h"
+#include "modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h"
+#include "modules/audio_coding/codecs/ilbc/gain_quant.h"
+#include "modules/audio_coding/codecs/ilbc/interpolate_samples.h"
+
+/*----------------------------------------------------------------*
+ * Search routine for codebook encoding and gain quantization.
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CbSearch(
+ IlbcEncoder *iLBCenc_inst,
+ /* (i) the encoder state structure */
+ int16_t *index, /* (o) Codebook indices */
+ int16_t *gain_index, /* (o) Gain quantization indices */
+ int16_t *intarget, /* (i) Target vector for encoding */
+ int16_t *decResidual,/* (i) Decoded residual for codebook construction */
+ size_t lMem, /* (i) Length of buffer */
+ size_t lTarget, /* (i) Length of vector */
+ int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
+ size_t block /* (i) the subblock number */
+ ) {
+ size_t i, range;
+ int16_t ii, j, stage;
+ int16_t *pp;
+ int16_t tmp;
+ int scale;
+ int16_t bits, temp1, temp2;
+ size_t base_size;
+ int32_t codedEner, targetEner;
+ int16_t gains[CB_NSTAGES+1];
+ int16_t *cb_vecPtr;
+ size_t indexOffset, sInd, eInd;
+ int32_t CritMax=0;
+ int16_t shTotMax=WEBRTC_SPL_WORD16_MIN;
+ size_t bestIndex=0;
+ int16_t bestGain=0;
+ size_t indexNew;
+ int16_t CritNewSh;
+ int32_t CritNew;
+ int32_t *cDotPtr;
+ size_t noOfZeros;
+ int16_t *gainPtr;
+ int32_t t32, tmpW32;
+ int16_t *WebRtcIlbcfix_kGainSq5_ptr;
+ /* Stack based */
+ int16_t CBbuf[CB_MEML+LPC_FILTERORDER+CB_HALFFILTERLEN];
+ int32_t cDot[128];
+ int32_t Crit[128];
+ int16_t targetVec[SUBL+LPC_FILTERORDER];
+ int16_t cbvectors[CB_MEML + 1]; /* Adding one extra position for
+ Coverity warnings. */
+ int16_t codedVec[SUBL];
+ int16_t interpSamples[20*4];
+ int16_t interpSamplesFilt[20*4];
+ int16_t energyW16[CB_EXPAND*128];
+ int16_t energyShifts[CB_EXPAND*128];
+ int16_t *inverseEnergy=energyW16; /* Reuse memory */
+ int16_t *inverseEnergyShifts=energyShifts; /* Reuse memory */
+ int16_t *buf = &CBbuf[LPC_FILTERORDER];
+ int16_t *target = &targetVec[LPC_FILTERORDER];
+ int16_t *aug_vec = (int16_t*)cDot; /* length [SUBL], reuse memory */
+
+ /* Determine size of codebook sections */
+
+ base_size=lMem-lTarget+1;
+ if (lTarget==SUBL) {
+ base_size=lMem-19;
+ }
+
+ /* weighting of the CB memory */
+ noOfZeros=lMem-WebRtcIlbcfix_kFilterRange[block];
+ WebRtcSpl_MemSetW16(&buf[-LPC_FILTERORDER], 0, noOfZeros+LPC_FILTERORDER);
+ WebRtcSpl_FilterARFastQ12(
+ decResidual+noOfZeros, buf+noOfZeros,
+ weightDenum, LPC_FILTERORDER+1, WebRtcIlbcfix_kFilterRange[block]);
+
+ /* weighting of the target vector */
+ WEBRTC_SPL_MEMCPY_W16(&target[-LPC_FILTERORDER], buf+noOfZeros+WebRtcIlbcfix_kFilterRange[block]-LPC_FILTERORDER, LPC_FILTERORDER);
+ WebRtcSpl_FilterARFastQ12(
+ intarget, target,
+ weightDenum, LPC_FILTERORDER+1, lTarget);
+
+ /* Store target, towards the end codedVec is calculated as
+ the initial target minus the remaining target */
+ WEBRTC_SPL_MEMCPY_W16(codedVec, target, lTarget);
+
+ /* Find the highest absolute value to calculate proper
+ vector scale factor (so that it uses 12 bits) */
+ temp1 = WebRtcSpl_MaxAbsValueW16(buf, lMem);
+ temp2 = WebRtcSpl_MaxAbsValueW16(target, lTarget);
+
+ if ((temp1>0)&&(temp2>0)) {
+ temp1 = WEBRTC_SPL_MAX(temp1, temp2);
+ scale = WebRtcSpl_GetSizeInBits((uint32_t)(temp1 * temp1));
+ } else {
+ /* temp1 or temp2 is negative (maximum was -32768) */
+ scale = 30;
+ }
+
+ /* Scale to so that a mul-add 40 times does not overflow */
+ scale = scale - 25;
+ scale = WEBRTC_SPL_MAX(0, scale);
+
+ /* Compute energy of the original target */
+ targetEner = WebRtcSpl_DotProductWithScale(target, target, lTarget, scale);
+
+ /* Prepare search over one more codebook section. This section
+ is created by filtering the original buffer with a filter. */
+ WebRtcIlbcfix_FilteredCbVecs(cbvectors, buf, lMem, WebRtcIlbcfix_kFilterRange[block]);
+
+ range = WebRtcIlbcfix_kSearchRange[block][0];
+
+ if(lTarget == SUBL) {
+ /* Create the interpolated samples and store them for use in all stages */
+
+ /* First section, non-filtered half of the cb */
+ WebRtcIlbcfix_InterpolateSamples(interpSamples, buf, lMem);
+
+ /* Second section, filtered half of the cb */
+ WebRtcIlbcfix_InterpolateSamples(interpSamplesFilt, cbvectors, lMem);
+
+ /* Compute the CB vectors' energies for the first cb section (non-filtered) */
+ WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamples, buf,
+ scale, 20, energyW16, energyShifts);
+
+ /* Compute the CB vectors' energies for the second cb section (filtered cb) */
+ WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors, scale,
+ base_size + 20, energyW16,
+ energyShifts);
+
+ /* Compute the CB vectors' energies and store them in the vector
+ * energyW16. Also the corresponding shift values are stored. The
+ * energy values are used in all three stages. */
+ WebRtcIlbcfix_CbMemEnergy(range, buf, cbvectors, lMem,
+ lTarget, energyW16+20, energyShifts+20, scale, base_size);
+
+ } else {
+ /* Compute the CB vectors' energies and store them in the vector
+ * energyW16. Also the corresponding shift values are stored. The
+ * energy values are used in all three stages. */
+ WebRtcIlbcfix_CbMemEnergy(range, buf, cbvectors, lMem,
+ lTarget, energyW16, energyShifts, scale, base_size);
+
+ /* Set the energy positions 58-63 and 122-127 to zero
+ (otherwise they are uninitialized) */
+ WebRtcSpl_MemSetW16(energyW16+range, 0, (base_size-range));
+ WebRtcSpl_MemSetW16(energyW16+range+base_size, 0, (base_size-range));
+ }
+
+ /* Calculate Inverse Energy (energyW16 is already normalized
+ and will contain the inverse energy in Q29 after this call */
+ WebRtcIlbcfix_EnergyInverse(energyW16, base_size*CB_EXPAND);
+
+ /* The gain value computed in the previous stage is used
+ * as an upper limit to what the next stage gain value
+ * is allowed to be. In stage 0, 16384 (1.0 in Q14) is used as
+ * the upper limit. */
+ gains[0] = 16384;
+
+ for (stage=0; stage<CB_NSTAGES; stage++) {
+
+ /* Set up memories */
+ range = WebRtcIlbcfix_kSearchRange[block][stage];
+
+ /* initialize search measures */
+ CritMax=0;
+ shTotMax=-100;
+ bestIndex=0;
+ bestGain=0;
+
+ /* loop over lags 40+ in the first codebook section, full search */
+ cb_vecPtr = buf+lMem-lTarget;
+
+ /* Calculate all the cross correlations (augmented part of CB) */
+ if (lTarget==SUBL) {
+ WebRtcIlbcfix_AugmentedCbCorr(target, buf+lMem,
+ interpSamples, cDot,
+ 20, 39, scale);
+ cDotPtr=&cDot[20];
+ } else {
+ cDotPtr=cDot;
+ }
+ /* Calculate all the cross correlations (main part of CB) */
+ WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget, range, scale, -1);
+
+ /* Adjust the search range for the augmented vectors */
+ if (lTarget==SUBL) {
+ range=WebRtcIlbcfix_kSearchRange[block][stage]+20;
+ } else {
+ range=WebRtcIlbcfix_kSearchRange[block][stage];
+ }
+
+ indexOffset=0;
+
+ /* Search for best index in this part of the vector */
+ WebRtcIlbcfix_CbSearchCore(
+ cDot, range, stage, inverseEnergy,
+ inverseEnergyShifts, Crit,
+ &indexNew, &CritNew, &CritNewSh);
+
+ /* Update the global best index and the corresponding gain */
+ WebRtcIlbcfix_CbUpdateBestIndex(
+ CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew+indexOffset],
+ inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
+ &CritMax, &shTotMax, &bestIndex, &bestGain);
+
+ sInd = ((CB_RESRANGE >> 1) > bestIndex) ?
+ 0 : (bestIndex - (CB_RESRANGE >> 1));
+ eInd=sInd+CB_RESRANGE;
+ if (eInd>=range) {
+ eInd=range-1;
+ sInd=eInd-CB_RESRANGE;
+ }
+
+ range = WebRtcIlbcfix_kSearchRange[block][stage];
+
+ if (lTarget==SUBL) {
+ i=sInd;
+ if (sInd<20) {
+ WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors + lMem,
+ interpSamplesFilt, cDot, sInd + 20,
+ WEBRTC_SPL_MIN(39, (eInd + 20)), scale);
+ i=20;
+ cDotPtr = &cDot[20 - sInd];
+ } else {
+ cDotPtr = cDot;
+ }
+
+ cb_vecPtr = cbvectors+lMem-20-i;
+
+ /* Calculate the cross correlations (main part of the filtered CB) */
+ WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
+ eInd - i + 1, scale, -1);
+
+ } else {
+ cDotPtr = cDot;
+ cb_vecPtr = cbvectors+lMem-lTarget-sInd;
+
+ /* Calculate the cross correlations (main part of the filtered CB) */
+ WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
+ eInd - sInd + 1, scale, -1);
+
+ }
+
+ /* Adjust the search range for the augmented vectors */
+ indexOffset=base_size+sInd;
+
+ /* Search for best index in this part of the vector */
+ WebRtcIlbcfix_CbSearchCore(
+ cDot, eInd-sInd+1, stage, inverseEnergy+indexOffset,
+ inverseEnergyShifts+indexOffset, Crit,
+ &indexNew, &CritNew, &CritNewSh);
+
+ /* Update the global best index and the corresponding gain */
+ WebRtcIlbcfix_CbUpdateBestIndex(
+ CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew],
+ inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
+ &CritMax, &shTotMax, &bestIndex, &bestGain);
+
+ index[stage] = (int16_t)bestIndex;
+
+
+ bestGain = WebRtcIlbcfix_GainQuant(bestGain,
+ (int16_t)WEBRTC_SPL_ABS_W16(gains[stage]), stage, &gain_index[stage]);
+
+ /* Extract the best (according to measure) codebook vector
+ Also adjust the index, so that the augmented vectors are last.
+ Above these vectors were first...
+ */
+
+ if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
+
+ if((size_t)index[stage]<base_size) {
+ pp=buf+lMem-lTarget-index[stage];
+ } else {
+ pp=cbvectors+lMem-lTarget-
+ index[stage]+base_size;
+ }
+
+ } else {
+
+ if ((size_t)index[stage]<base_size) {
+ if (index[stage]>=20) {
+ /* Adjust index and extract vector */
+ index[stage]-=20;
+ pp=buf+lMem-lTarget-index[stage];
+ } else {
+ /* Adjust index and extract vector */
+ index[stage]+=(int16_t)(base_size-20);
+
+ WebRtcIlbcfix_CreateAugmentedVec(index[stage]-base_size+40,
+ buf+lMem, aug_vec);
+ pp = aug_vec;
+
+ }
+ } else {
+
+ if ((index[stage] - base_size) >= 20) {
+ /* Adjust index and extract vector */
+ index[stage]-=20;
+ pp=cbvectors+lMem-lTarget-
+ index[stage]+base_size;
+ } else {
+ /* Adjust index and extract vector */
+ index[stage]+=(int16_t)(base_size-20);
+ WebRtcIlbcfix_CreateAugmentedVec(index[stage]-2*base_size+40,
+ cbvectors+lMem, aug_vec);
+ pp = aug_vec;
+ }
+ }
+ }
+
+ /* Subtract the best codebook vector, according
+ to measure, from the target vector */
+
+ WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain),
+ (int32_t)8192, (int16_t)14, lTarget);
+
+ /* record quantized gain */
+ gains[stage+1] = bestGain;
+
+ } /* end of Main Loop. for (stage=0;... */
+
+ /* Calculte the coded vector (original target - what's left) */
+ for (i=0;i<lTarget;i++) {
+ codedVec[i]-=target[i];
+ }
+
+ /* Gain adjustment for energy matching */
+ codedEner = WebRtcSpl_DotProductWithScale(codedVec, codedVec, lTarget, scale);
+
+ j=gain_index[0];
+
+ temp1 = (int16_t)WebRtcSpl_NormW32(codedEner);
+ temp2 = (int16_t)WebRtcSpl_NormW32(targetEner);
+
+ if(temp1 < temp2) {
+ bits = 16 - temp1;
+ } else {
+ bits = 16 - temp2;
+ }
+
+ tmp = (int16_t)((gains[1] * gains[1]) >> 14);
+
+ targetEner = (int16_t)WEBRTC_SPL_SHIFT_W32(targetEner, -bits) * tmp;
+
+ tmpW32 = ((int32_t)(gains[1]-1))<<1;
+
+ /* Pointer to the table that contains
+ gain_sq5TblFIX * gain_sq5TblFIX in Q14 */
+ gainPtr=(int16_t*)WebRtcIlbcfix_kGainSq5Sq+gain_index[0];
+ temp1 = (int16_t)WEBRTC_SPL_SHIFT_W32(codedEner, -bits);
+
+ WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[j];
+
+ /* targetEner and codedEner are in Q(-2*scale) */
+ for (ii=gain_index[0];ii<32;ii++) {
+
+ /* Change the index if
+ (codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
+ gainTbl[i] < 2*gain[0]
+ */
+
+ t32 = temp1 * *gainPtr;
+ t32 = t32 - targetEner;
+ if (t32 < 0) {
+ if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
+ j=ii;
+ WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[ii];
+ }
+ }
+ gainPtr++;
+ }
+ gain_index[0]=j;
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.h
new file mode 100644
index 0000000000..84a52c7868
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearch.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_CbSearch(
+ IlbcEncoder* iLBCenc_inst,
+ /* (i) the encoder state structure */
+ int16_t* index, /* (o) Codebook indices */
+ int16_t* gain_index, /* (o) Gain quantization indices */
+ int16_t* intarget, /* (i) Target vector for encoding */
+ int16_t* decResidual, /* (i) Decoded residual for codebook construction */
+ size_t lMem, /* (i) Length of buffer */
+ size_t lTarget, /* (i) Length of vector */
+ int16_t* weightDenum, /* (i) weighting filter coefficients in Q12 */
+ size_t block /* (i) the subblock number */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
new file mode 100644
index 0000000000..a75e5b0ab8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearchCore.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_search_core.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_CbSearchCore(
+ int32_t *cDot, /* (i) Cross Correlation */
+ size_t range, /* (i) Search range */
+ int16_t stage, /* (i) Stage of this search */
+ int16_t *inverseEnergy, /* (i) Inversed energy */
+ int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
+ with the offset 2*16-29 */
+ int32_t *Crit, /* (o) The criteria */
+ size_t *bestIndex, /* (o) Index that corresponds to
+ maximum criteria (in this
+ vector) */
+ int32_t *bestCrit, /* (o) Value of critera for the
+ chosen index */
+ int16_t *bestCritSh) /* (o) The domain of the chosen
+ criteria */
+{
+ int32_t maxW32, tmp32;
+ int16_t max, sh, tmp16;
+ size_t i;
+ int32_t *cDotPtr;
+ int16_t cDotSqW16;
+ int16_t *inverseEnergyPtr;
+ int32_t *critPtr;
+ int16_t *inverseEnergyShiftPtr;
+
+ /* Don't allow negative values for stage 0 */
+ if (stage==0) {
+ cDotPtr=cDot;
+ for (i=0;i<range;i++) {
+ *cDotPtr=WEBRTC_SPL_MAX(0, (*cDotPtr));
+ cDotPtr++;
+ }
+ }
+
+ /* Normalize cDot to int16_t, calculate the square of cDot and store the upper int16_t */
+ maxW32 = WebRtcSpl_MaxAbsValueW32(cDot, range);
+
+ sh = (int16_t)WebRtcSpl_NormW32(maxW32);
+ cDotPtr = cDot;
+ inverseEnergyPtr = inverseEnergy;
+ critPtr = Crit;
+ inverseEnergyShiftPtr=inverseEnergyShift;
+ max=WEBRTC_SPL_WORD16_MIN;
+
+ for (i=0;i<range;i++) {
+ /* Calculate cDot*cDot and put the result in a int16_t */
+ tmp32 = *cDotPtr << sh;
+ tmp16 = (int16_t)(tmp32 >> 16);
+ cDotSqW16 = (int16_t)(((int32_t)(tmp16)*(tmp16))>>16);
+
+ /* Calculate the criteria (cDot*cDot/energy) */
+ *critPtr = cDotSqW16 * *inverseEnergyPtr;
+
+ /* Extract the maximum shift value under the constraint
+ that the criteria is not zero */
+ if ((*critPtr)!=0) {
+ max = WEBRTC_SPL_MAX((*inverseEnergyShiftPtr), max);
+ }
+
+ inverseEnergyPtr++;
+ inverseEnergyShiftPtr++;
+ critPtr++;
+ cDotPtr++;
+ }
+
+ /* If no max shifts still at initialization value, set shift to zero */
+ if (max==WEBRTC_SPL_WORD16_MIN) {
+ max = 0;
+ }
+
+ /* Modify the criterias, so that all of them use the same Q domain */
+ critPtr=Crit;
+ inverseEnergyShiftPtr=inverseEnergyShift;
+ for (i=0;i<range;i++) {
+ /* Guarantee that the shift value is less than 16
+ in order to simplify for DSP's (and guard against >31) */
+ tmp16 = WEBRTC_SPL_MIN(16, max-(*inverseEnergyShiftPtr));
+
+ (*critPtr)=WEBRTC_SPL_SHIFT_W32((*critPtr),-tmp16);
+ critPtr++;
+ inverseEnergyShiftPtr++;
+ }
+
+ /* Find the index of the best value */
+ *bestIndex = WebRtcSpl_MaxIndexW32(Crit, range);
+ *bestCrit = Crit[*bestIndex];
+
+ /* Calculate total shifts of this criteria */
+ *bestCritSh = 32 - 2*sh + max;
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
new file mode 100644
index 0000000000..5da70e0988
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearchCore.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_CORE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_CORE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+void WebRtcIlbcfix_CbSearchCore(
+ int32_t* cDot, /* (i) Cross Correlation */
+ size_t range, /* (i) Search range */
+ int16_t stage, /* (i) Stage of this search */
+ int16_t* inverseEnergy, /* (i) Inversed energy */
+ int16_t* inverseEnergyShift, /* (i) Shifts of inversed energy
+ with the offset 2*16-29 */
+ int32_t* Crit, /* (o) The criteria */
+ size_t* bestIndex, /* (o) Index that corresponds to
+ maximum criteria (in this
+ vector) */
+ int32_t* bestCrit, /* (o) Value of critera for the
+ chosen index */
+ int16_t* bestCritSh); /* (o) The domain of the chosen
+ criteria */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
new file mode 100644
index 0000000000..d6fa4d93d4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbUpdateBestIndex.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_update_best_index.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_CbUpdateBestIndex(
+ int32_t CritNew, /* (i) New Potentially best Criteria */
+ int16_t CritNewSh, /* (i) Shift value of above Criteria */
+ size_t IndexNew, /* (i) Index of new Criteria */
+ int32_t cDotNew, /* (i) Cross dot of new index */
+ int16_t invEnergyNew, /* (i) Inversed energy new index */
+ int16_t energyShiftNew, /* (i) Energy shifts of new index */
+ int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
+ int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
+ size_t *bestIndex, /* (i/o) Index that corresponds to
+ maximum criteria */
+ int16_t *bestGain) /* (i/o) Gain in Q14 that corresponds
+ to maximum criteria */
+{
+ int16_t shOld, shNew, tmp16;
+ int16_t scaleTmp;
+ int32_t gainW32;
+
+ /* Normalize the new and old Criteria to the same domain */
+ if (CritNewSh>(*shTotMax)) {
+ shOld=WEBRTC_SPL_MIN(31,CritNewSh-(*shTotMax));
+ shNew=0;
+ } else {
+ shOld=0;
+ shNew=WEBRTC_SPL_MIN(31,(*shTotMax)-CritNewSh);
+ }
+
+ /* Compare the two criterias. If the new one is better,
+ calculate the gain and store this index as the new best one
+ */
+
+ if ((CritNew >> shNew) > (*CritMax >> shOld)) {
+
+ tmp16 = (int16_t)WebRtcSpl_NormW32(cDotNew);
+ tmp16 = 16 - tmp16;
+
+ /* Calculate the gain in Q14
+ Compensate for inverseEnergyshift in Q29 and that the energy
+ value was stored in a int16_t (shifted down 16 steps)
+ => 29-14+16 = 31 */
+
+ scaleTmp = -energyShiftNew-tmp16+31;
+ scaleTmp = WEBRTC_SPL_MIN(31, scaleTmp);
+
+ gainW32 = ((int16_t)WEBRTC_SPL_SHIFT_W32(cDotNew, -tmp16) * invEnergyNew) >>
+ scaleTmp;
+
+ /* Check if criteria satisfies Gain criteria (max 1.3)
+ if it is larger set the gain to 1.3
+ (slightly different from FLP version)
+ */
+ if (gainW32>21299) {
+ *bestGain=21299;
+ } else if (gainW32<-21299) {
+ *bestGain=-21299;
+ } else {
+ *bestGain=(int16_t)gainW32;
+ }
+
+ *CritMax=CritNew;
+ *shTotMax=CritNewSh;
+ *bestIndex = IndexNew;
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
new file mode 100644
index 0000000000..1a95d531e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbUpdateBestIndex.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_UPDATE_BEST_INDEX_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_UPDATE_BEST_INDEX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+void WebRtcIlbcfix_CbUpdateBestIndex(
+ int32_t CritNew, /* (i) New Potentially best Criteria */
+ int16_t CritNewSh, /* (i) Shift value of above Criteria */
+ size_t IndexNew, /* (i) Index of new Criteria */
+ int32_t cDotNew, /* (i) Cross dot of new index */
+ int16_t invEnergyNew, /* (i) Inversed energy new index */
+ int16_t energyShiftNew, /* (i) Energy shifts of new index */
+ int32_t* CritMax, /* (i/o) Maximum Criteria (so far) */
+ int16_t* shTotMax, /* (i/o) Shifts of maximum criteria */
+ size_t* bestIndex, /* (i/o) Index that corresponds to
+ maximum criteria */
+ int16_t* bestGain); /* (i/o) Gain in Q14 that corresponds
+ to maximum criteria */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.c
new file mode 100644
index 0000000000..b4eee66219
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Chebyshev.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/chebyshev.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*------------------------------------------------------------------*
+ * Calculate the Chevyshev polynomial series
+ * F(w) = 2*exp(-j5w)*C(x)
+ * C(x) = (T_0(x) + f(1)T_1(x) + ... + f(4)T_1(x) + f(5)/2)
+ * T_i(x) is the i:th order Chebyshev polynomial
+ *------------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_Chebyshev(
+ /* (o) Result of C(x) */
+ int16_t x, /* (i) Value to the Chevyshev polynomial */
+ int16_t *f /* (i) The coefficients in the polynomial */
+ ) {
+ int16_t b1_high, b1_low; /* Use the high, low format to increase the accuracy */
+ int32_t b2;
+ int32_t tmp1W32;
+ int32_t tmp2W32;
+ int i;
+
+ b2 = (int32_t)0x1000000; /* b2 = 1.0 (Q23) */
+ /* Calculate b1 = 2*x + f[1] */
+ tmp1W32 = (x << 10) + (f[1] << 14);
+
+ for (i = 2; i < 5; i++) {
+ tmp2W32 = tmp1W32;
+
+ /* Split b1 (in tmp1W32) into a high and low part */
+ b1_high = (int16_t)(tmp1W32 >> 16);
+ b1_low = (int16_t)((tmp1W32 - ((int32_t)b1_high << 16)) >> 1);
+
+ /* Calculate 2*x*b1-b2+f[i] */
+ tmp1W32 = ((b1_high * x + ((b1_low * x) >> 15)) << 2) - b2 + (f[i] << 14);
+
+ /* Update b2 for next round */
+ b2 = tmp2W32;
+ }
+
+ /* Split b1 (in tmp1W32) into a high and low part */
+ b1_high = (int16_t)(tmp1W32 >> 16);
+ b1_low = (int16_t)((tmp1W32 - ((int32_t)b1_high << 16)) >> 1);
+
+ /* tmp1W32 = x*b1 - b2 + f[i]/2 */
+ tmp1W32 = ((b1_high * x) << 1) + (((b1_low * x) >> 15) << 1) -
+ b2 + (f[i] << 13);
+
+ /* Handle overflows and set to maximum or minimum int16_t instead */
+ if (tmp1W32>((int32_t)33553408)) {
+ return(WEBRTC_SPL_WORD16_MAX);
+ } else if (tmp1W32<((int32_t)-33554432)) {
+ return(WEBRTC_SPL_WORD16_MIN);
+ } else {
+ return (int16_t)(tmp1W32 >> 10);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.h
new file mode 100644
index 0000000000..7e7742c5cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Chebyshev.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CHEBYSHEV_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CHEBYSHEV_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*------------------------------------------------------------------*
+ * Calculate the Chevyshev polynomial series
+ * F(w) = 2*exp(-j5w)*C(x)
+ * C(x) = (T_0(x) + f(1)T_1(x) + ... + f(4)T_1(x) + f(5)/2)
+ * T_i(x) is the i:th order Chebyshev polynomial
+ *------------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_Chebyshev(
+ /* (o) Result of C(x) */
+ int16_t x, /* (i) Value to the Chevyshev polynomial */
+ int16_t* f /* (i) The coefficients in the polynomial */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
new file mode 100644
index 0000000000..452bc78e3b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CompCorr.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/comp_corr.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Compute cross correlation and pitch gain for pitch prediction
+ * of last subframe at given lag.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CompCorr(
+ int32_t *corr, /* (o) cross correlation */
+ int32_t *ener, /* (o) energy */
+ int16_t *buffer, /* (i) signal buffer */
+ size_t lag, /* (i) pitch lag */
+ size_t bLen, /* (i) length of buffer */
+ size_t sRange, /* (i) correlation search length */
+ int16_t scale /* (i) number of rightshifts to use */
+ ){
+ int16_t *w16ptr;
+
+ w16ptr=&buffer[bLen-sRange-lag];
+
+ /* Calculate correlation and energy */
+ (*corr)=WebRtcSpl_DotProductWithScale(&buffer[bLen-sRange], w16ptr, sRange, scale);
+ (*ener)=WebRtcSpl_DotProductWithScale(w16ptr, w16ptr, sRange, scale);
+
+ /* For zero energy set the energy to 0 in order to avoid potential
+ problems for coming divisions */
+ if (*ener == 0) {
+ *corr = 0;
+ *ener = 1;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
new file mode 100644
index 0000000000..010c6a1ce5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CompCorr.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_COMP_CORR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_COMP_CORR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Compute cross correlation and pitch gain for pitch prediction
+ * of last subframe at given lag.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CompCorr(int32_t* corr, /* (o) cross correlation */
+ int32_t* ener, /* (o) energy */
+ int16_t* buffer, /* (i) signal buffer */
+ size_t lag, /* (i) pitch lag */
+ size_t bLen, /* (i) length of buffer */
+ size_t sRange, /* (i) correlation search length */
+ int16_t scale /* (i) number of rightshifts to use */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/complexityMeasures.m b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/complexityMeasures.m
new file mode 100644
index 0000000000..4bda83622f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/complexityMeasures.m
@@ -0,0 +1,57 @@
+% % Copyright(c) 2011 The WebRTC project authors.All Rights Reserved.%
+ % Use of this source code is governed by a BSD
+ -
+ style license % that can be found in the LICENSE file in the root of the source
+ % tree.An additional intellectual property rights grant can be found
+ % in the file PATENTS.All contributing project authors may
+ % be found in the AUTHORS file in the root of the source tree.%
+
+ clear;
+pack;
+%
+% Enter the path to YOUR executable and remember to define the perprocessor
+% variable PRINT_MIPS te get the instructions printed to the screen.
+%
+command = '!iLBCtest.exe 30 speechAndBGnoise.pcm out1.bit out1.pcm tlm10_30ms.dat';
+cout=' > st.txt'; %saves to matlab variable 'st'
+eval(strcat(command,cout));
+if(length(cout)>3)
+ load st.txt
+else
+ disp('No cout file to load')
+end
+
+% initialize vector to zero
+index = find(st(1:end,1)==-1);
+indexnonzero = find(st(1:end,1)>0);
+frames = length(index)-indexnonzero(1)+1;
+start = indexnonzero(1) - 1;
+functionOrder=max(st(:,2));
+new=zeros(frames,functionOrder);
+
+for i = 1:frames,
+ for j = index(start-1+i)+1:(index(start+i)-1),
+ new(i,st(j,2)) = new(i,st(j,2)) + st(j,1);
+ end
+end
+
+result=zeros(functionOrder,3);
+for i=1:functionOrder
+ nonzeroelements = find(new(1:end,i)>0);
+ result(i,1)=i;
+
+ % Compute each function's mean complexity
+ % result(i,2)=(sum(new(nonzeroelements,i))/(length(nonzeroelements)*0.03))/1000000;
+
+ % Compute each function's maximum complexity in encoding
+ % and decoding respectively and then add it together:
+ % result(i,3)=(max(new(1:end,i))/0.03)/1000000;
+ result(i,3)=(max(new(1:size(new,1)/2,i))/0.03)/1000000 + (max(new(size(new,1)/2+1:end,i))/0.03)/1000000;
+end
+
+result
+
+% Compute maximum complexity for a single frame (enc/dec separately and together)
+maxEncComplexityInAFrame = (max(sum(new(1:size(new,1)/2,:),2))/0.03)/1000000
+maxDecComplexityInAFrame = (max(sum(new(size(new,1)/2+1:end,:),2))/0.03)/1000000
+totalComplexity = maxEncComplexityInAFrame + maxDecComplexityInAFrame
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.c
new file mode 100644
index 0000000000..22f2acb330
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ constants.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/* HP Filters {b[0] b[1] b[2] -a[1] -a[2]} */
+
+const int16_t WebRtcIlbcfix_kHpInCoefs[5] = {3798, -7596, 3798, 7807, -3733};
+const int16_t WebRtcIlbcfix_kHpOutCoefs[5] = {3849, -7699, 3849, 7918, -3833};
+
+/* Window in Q11 to window the energies of the 5 choises (3 for 20ms) in the choise for
+ the 80 sample start state
+*/
+const int16_t WebRtcIlbcfix_kStartSequenceEnrgWin[NSUB_MAX-1]= {
+ 1638, 1843, 2048, 1843, 1638
+};
+
+/* LP Filter coeffs used for downsampling */
+const int16_t WebRtcIlbcfix_kLpFiltCoefs[FILTERORDER_DS_PLUS1]= {
+ -273, 512, 1297, 1696, 1297, 512, -273
+};
+
+/* Constants used in the LPC calculations */
+
+/* Hanning LPC window (in Q15) */
+const int16_t WebRtcIlbcfix_kLpcWin[BLOCKL_MAX] = {
+ 6, 22, 50, 89, 139, 200, 272, 355, 449, 554, 669, 795,
+ 932, 1079, 1237, 1405, 1583, 1771, 1969, 2177, 2395, 2622, 2858, 3104,
+ 3359, 3622, 3894, 4175, 4464, 4761, 5066, 5379, 5699, 6026, 6361, 6702,
+ 7050, 7404, 7764, 8130, 8502, 8879, 9262, 9649, 10040, 10436, 10836, 11240,
+ 11647, 12058, 12471, 12887, 13306, 13726, 14148, 14572, 14997, 15423, 15850, 16277,
+ 16704, 17131, 17558, 17983, 18408, 18831, 19252, 19672, 20089, 20504, 20916, 21325,
+ 21730, 22132, 22530, 22924, 23314, 23698, 24078, 24452, 24821, 25185, 25542, 25893,
+ 26238, 26575, 26906, 27230, 27547, 27855, 28156, 28450, 28734, 29011, 29279, 29538,
+ 29788, 30029, 30261, 30483, 30696, 30899, 31092, 31275, 31448, 31611, 31764, 31906,
+ 32037, 32158, 32268, 32367, 32456, 32533, 32600, 32655, 32700, 32733, 32755, 32767,
+ 32767, 32755, 32733, 32700, 32655, 32600, 32533, 32456, 32367, 32268, 32158, 32037,
+ 31906, 31764, 31611, 31448, 31275, 31092, 30899, 30696, 30483, 30261, 30029, 29788,
+ 29538, 29279, 29011, 28734, 28450, 28156, 27855, 27547, 27230, 26906, 26575, 26238,
+ 25893, 25542, 25185, 24821, 24452, 24078, 23698, 23314, 22924, 22530, 22132, 21730,
+ 21325, 20916, 20504, 20089, 19672, 19252, 18831, 18408, 17983, 17558, 17131, 16704,
+ 16277, 15850, 15423, 14997, 14572, 14148, 13726, 13306, 12887, 12471, 12058, 11647,
+ 11240, 10836, 10436, 10040, 9649, 9262, 8879, 8502, 8130, 7764, 7404, 7050,
+ 6702, 6361, 6026, 5699, 5379, 5066, 4761, 4464, 4175, 3894, 3622, 3359,
+ 3104, 2858, 2622, 2395, 2177, 1969, 1771, 1583, 1405, 1237, 1079, 932,
+ 795, 669, 554, 449, 355, 272, 200, 139, 89, 50, 22, 6
+};
+
+/* Asymmetric LPC window (in Q15)*/
+const int16_t WebRtcIlbcfix_kLpcAsymWin[BLOCKL_MAX] = {
+ 2, 7, 15, 27, 42, 60, 81, 106, 135, 166, 201, 239,
+ 280, 325, 373, 424, 478, 536, 597, 661, 728, 798, 872, 949,
+ 1028, 1111, 1197, 1287, 1379, 1474, 1572, 1674, 1778, 1885, 1995, 2108,
+ 2224, 2343, 2465, 2589, 2717, 2847, 2980, 3115, 3254, 3395, 3538, 3684,
+ 3833, 3984, 4138, 4295, 4453, 4615, 4778, 4944, 5112, 5283, 5456, 5631,
+ 5808, 5987, 6169, 6352, 6538, 6725, 6915, 7106, 7300, 7495, 7692, 7891,
+ 8091, 8293, 8497, 8702, 8909, 9118, 9328, 9539, 9752, 9966, 10182, 10398,
+ 10616, 10835, 11055, 11277, 11499, 11722, 11947, 12172, 12398, 12625, 12852, 13080,
+ 13309, 13539, 13769, 14000, 14231, 14463, 14695, 14927, 15160, 15393, 15626, 15859,
+ 16092, 16326, 16559, 16792, 17026, 17259, 17492, 17725, 17957, 18189, 18421, 18653,
+ 18884, 19114, 19344, 19573, 19802, 20030, 20257, 20483, 20709, 20934, 21157, 21380,
+ 21602, 21823, 22042, 22261, 22478, 22694, 22909, 23123, 23335, 23545, 23755, 23962,
+ 24168, 24373, 24576, 24777, 24977, 25175, 25371, 25565, 25758, 25948, 26137, 26323,
+ 26508, 26690, 26871, 27049, 27225, 27399, 27571, 27740, 27907, 28072, 28234, 28394,
+ 28552, 28707, 28860, 29010, 29157, 29302, 29444, 29584, 29721, 29855, 29987, 30115,
+ 30241, 30364, 30485, 30602, 30717, 30828, 30937, 31043, 31145, 31245, 31342, 31436,
+ 31526, 31614, 31699, 31780, 31858, 31933, 32005, 32074, 32140, 32202, 32261, 32317,
+ 32370, 32420, 32466, 32509, 32549, 32585, 32618, 32648, 32675, 32698, 32718, 32734,
+ 32748, 32758, 32764, 32767, 32767, 32667, 32365, 31863, 31164, 30274, 29197, 27939,
+ 26510, 24917, 23170, 21281, 19261, 17121, 14876, 12540, 10126, 7650, 5126, 2571
+};
+
+/* Lag window for LPC (Q31) */
+const int32_t WebRtcIlbcfix_kLpcLagWin[LPC_FILTERORDER + 1]={
+ 2147483647, 2144885453, 2137754373, 2125918626, 2109459810,
+ 2088483140, 2063130336, 2033564590, 1999977009, 1962580174,
+ 1921610283};
+
+/* WebRtcIlbcfix_kLpcChirpSyntDenum vector in Q15 corresponding
+ * floating point vector {1 0.9025 0.9025^2 0.9025^3 ...}
+ */
+const int16_t WebRtcIlbcfix_kLpcChirpSyntDenum[LPC_FILTERORDER + 1] = {
+ 32767, 29573, 26690, 24087,
+ 21739, 19619, 17707, 15980,
+ 14422, 13016, 11747};
+
+/* WebRtcIlbcfix_kLpcChirpWeightDenum in Q15 corresponding to
+ * floating point vector {1 0.4222 0.4222^2... }
+ */
+const int16_t WebRtcIlbcfix_kLpcChirpWeightDenum[LPC_FILTERORDER + 1] = {
+ 32767, 13835, 5841, 2466, 1041, 440,
+ 186, 78, 33, 14, 6};
+
+/* LSF quantization Q13 domain */
+const int16_t WebRtcIlbcfix_kLsfCb[64 * 3 + 128 * 3 + 128 * 4] = {
+ 1273, 2238, 3696,
+ 3199, 5309, 8209,
+ 3606, 5671, 7829,
+ 2815, 5262, 8778,
+ 2608, 4027, 5493,
+ 1582, 3076, 5945,
+ 2983, 4181, 5396,
+ 2437, 4322, 6902,
+ 1861, 2998, 4613,
+ 2007, 3250, 5214,
+ 1388, 2459, 4262,
+ 2563, 3805, 5269,
+ 2036, 3522, 5129,
+ 1935, 4025, 6694,
+ 2744, 5121, 7338,
+ 2810, 4248, 5723,
+ 3054, 5405, 7745,
+ 1449, 2593, 4763,
+ 3411, 5128, 6596,
+ 2484, 4659, 7496,
+ 1668, 2879, 4818,
+ 1812, 3072, 5036,
+ 1638, 2649, 3900,
+ 2464, 3550, 4644,
+ 1853, 2900, 4158,
+ 2458, 4163, 5830,
+ 2556, 4036, 6254,
+ 2703, 4432, 6519,
+ 3062, 4953, 7609,
+ 1725, 3703, 6187,
+ 2221, 3877, 5427,
+ 2339, 3579, 5197,
+ 2021, 4633, 7037,
+ 2216, 3328, 4535,
+ 2961, 4739, 6667,
+ 2807, 3955, 5099,
+ 2788, 4501, 6088,
+ 1642, 2755, 4431,
+ 3341, 5282, 7333,
+ 2414, 3726, 5727,
+ 1582, 2822, 5269,
+ 2259, 3447, 4905,
+ 3117, 4986, 7054,
+ 1825, 3491, 5542,
+ 3338, 5736, 8627,
+ 1789, 3090, 5488,
+ 2566, 3720, 4923,
+ 2846, 4682, 7161,
+ 1950, 3321, 5976,
+ 1834, 3383, 6734,
+ 3238, 4769, 6094,
+ 2031, 3978, 5903,
+ 1877, 4068, 7436,
+ 2131, 4644, 8296,
+ 2764, 5010, 8013,
+ 2194, 3667, 6302,
+ 2053, 3127, 4342,
+ 3523, 6595, 10010,
+ 3134, 4457, 5748,
+ 3142, 5819, 9414,
+ 2223, 4334, 6353,
+ 2022, 3224, 4822,
+ 2186, 3458, 5544,
+ 2552, 4757, 6870,
+ 10905, 12917, 14578,
+ 9503, 11485, 14485,
+ 9518, 12494, 14052,
+ 6222, 7487, 9174,
+ 7759, 9186, 10506,
+ 8315, 12755, 14786,
+ 9609, 11486, 13866,
+ 8909, 12077, 13643,
+ 7369, 9054, 11520,
+ 9408, 12163, 14715,
+ 6436, 9911, 12843,
+ 7109, 9556, 11884,
+ 7557, 10075, 11640,
+ 6482, 9202, 11547,
+ 6463, 7914, 10980,
+ 8611, 10427, 12752,
+ 7101, 9676, 12606,
+ 7428, 11252, 13172,
+ 10197, 12955, 15842,
+ 7487, 10955, 12613,
+ 5575, 7858, 13621,
+ 7268, 11719, 14752,
+ 7476, 11744, 13795,
+ 7049, 8686, 11922,
+ 8234, 11314, 13983,
+ 6560, 11173, 14984,
+ 6405, 9211, 12337,
+ 8222, 12054, 13801,
+ 8039, 10728, 13255,
+ 10066, 12733, 14389,
+ 6016, 7338, 10040,
+ 6896, 8648, 10234,
+ 7538, 9170, 12175,
+ 7327, 12608, 14983,
+ 10516, 12643, 15223,
+ 5538, 7644, 12213,
+ 6728, 12221, 14253,
+ 7563, 9377, 12948,
+ 8661, 11023, 13401,
+ 7280, 8806, 11085,
+ 7723, 9793, 12333,
+ 12225, 14648, 16709,
+ 8768, 13389, 15245,
+ 10267, 12197, 13812,
+ 5301, 7078, 11484,
+ 7100, 10280, 11906,
+ 8716, 12555, 14183,
+ 9567, 12464, 15434,
+ 7832, 12305, 14300,
+ 7608, 10556, 12121,
+ 8913, 11311, 12868,
+ 7414, 9722, 11239,
+ 8666, 11641, 13250,
+ 9079, 10752, 12300,
+ 8024, 11608, 13306,
+ 10453, 13607, 16449,
+ 8135, 9573, 10909,
+ 6375, 7741, 10125,
+ 10025, 12217, 14874,
+ 6985, 11063, 14109,
+ 9296, 13051, 14642,
+ 8613, 10975, 12542,
+ 6583, 10414, 13534,
+ 6191, 9368, 13430,
+ 5742, 6859, 9260,
+ 7723, 9813, 13679,
+ 8137, 11291, 12833,
+ 6562, 8973, 10641,
+ 6062, 8462, 11335,
+ 6928, 8784, 12647,
+ 7501, 8784, 10031,
+ 8372, 10045, 12135,
+ 8191, 9864, 12746,
+ 5917, 7487, 10979,
+ 5516, 6848, 10318,
+ 6819, 9899, 11421,
+ 7882, 12912, 15670,
+ 9558, 11230, 12753,
+ 7752, 9327, 11472,
+ 8479, 9980, 11358,
+ 11418, 14072, 16386,
+ 7968, 10330, 14423,
+ 8423, 10555, 12162,
+ 6337, 10306, 14391,
+ 8850, 10879, 14276,
+ 6750, 11885, 15710,
+ 7037, 8328, 9764,
+ 6914, 9266, 13476,
+ 9746, 13949, 15519,
+ 11032, 14444, 16925,
+ 8032, 10271, 11810,
+ 10962, 13451, 15833,
+ 10021, 11667, 13324,
+ 6273, 8226, 12936,
+ 8543, 10397, 13496,
+ 7936, 10302, 12745,
+ 6769, 8138, 10446,
+ 6081, 7786, 11719,
+ 8637, 11795, 14975,
+ 8790, 10336, 11812,
+ 7040, 8490, 10771,
+ 7338, 10381, 13153,
+ 6598, 7888, 9358,
+ 6518, 8237, 12030,
+ 9055, 10763, 12983,
+ 6490, 10009, 12007,
+ 9589, 12023, 13632,
+ 6867, 9447, 10995,
+ 7930, 9816, 11397,
+ 10241, 13300, 14939,
+ 5830, 8670, 12387,
+ 9870, 11915, 14247,
+ 9318, 11647, 13272,
+ 6721, 10836, 12929,
+ 6543, 8233, 9944,
+ 8034, 10854, 12394,
+ 9112, 11787, 14218,
+ 9302, 11114, 13400,
+ 9022, 11366, 13816,
+ 6962, 10461, 12480,
+ 11288, 13333, 15222,
+ 7249, 8974, 10547,
+ 10566, 12336, 14390,
+ 6697, 11339, 13521,
+ 11851, 13944, 15826,
+ 6847, 8381, 11349,
+ 7509, 9331, 10939,
+ 8029, 9618, 11909,
+ 13973, 17644, 19647, 22474,
+ 14722, 16522, 20035, 22134,
+ 16305, 18179, 21106, 23048,
+ 15150, 17948, 21394, 23225,
+ 13582, 15191, 17687, 22333,
+ 11778, 15546, 18458, 21753,
+ 16619, 18410, 20827, 23559,
+ 14229, 15746, 17907, 22474,
+ 12465, 15327, 20700, 22831,
+ 15085, 16799, 20182, 23410,
+ 13026, 16935, 19890, 22892,
+ 14310, 16854, 19007, 22944,
+ 14210, 15897, 18891, 23154,
+ 14633, 18059, 20132, 22899,
+ 15246, 17781, 19780, 22640,
+ 16396, 18904, 20912, 23035,
+ 14618, 17401, 19510, 21672,
+ 15473, 17497, 19813, 23439,
+ 18851, 20736, 22323, 23864,
+ 15055, 16804, 18530, 20916,
+ 16490, 18196, 19990, 21939,
+ 11711, 15223, 21154, 23312,
+ 13294, 15546, 19393, 21472,
+ 12956, 16060, 20610, 22417,
+ 11628, 15843, 19617, 22501,
+ 14106, 16872, 19839, 22689,
+ 15655, 18192, 20161, 22452,
+ 12953, 15244, 20619, 23549,
+ 15322, 17193, 19926, 21762,
+ 16873, 18676, 20444, 22359,
+ 14874, 17871, 20083, 21959,
+ 11534, 14486, 19194, 21857,
+ 17766, 19617, 21338, 23178,
+ 13404, 15284, 19080, 23136,
+ 15392, 17527, 19470, 21953,
+ 14462, 16153, 17985, 21192,
+ 17734, 19750, 21903, 23783,
+ 16973, 19096, 21675, 23815,
+ 16597, 18936, 21257, 23461,
+ 15966, 17865, 20602, 22920,
+ 15416, 17456, 20301, 22972,
+ 18335, 20093, 21732, 23497,
+ 15548, 17217, 20679, 23594,
+ 15208, 16995, 20816, 22870,
+ 13890, 18015, 20531, 22468,
+ 13211, 15377, 19951, 22388,
+ 12852, 14635, 17978, 22680,
+ 16002, 17732, 20373, 23544,
+ 11373, 14134, 19534, 22707,
+ 17329, 19151, 21241, 23462,
+ 15612, 17296, 19362, 22850,
+ 15422, 19104, 21285, 23164,
+ 13792, 17111, 19349, 21370,
+ 15352, 17876, 20776, 22667,
+ 15253, 16961, 18921, 22123,
+ 14108, 17264, 20294, 23246,
+ 15785, 17897, 20010, 21822,
+ 17399, 19147, 20915, 22753,
+ 13010, 15659, 18127, 20840,
+ 16826, 19422, 22218, 24084,
+ 18108, 20641, 22695, 24237,
+ 18018, 20273, 22268, 23920,
+ 16057, 17821, 21365, 23665,
+ 16005, 17901, 19892, 23016,
+ 13232, 16683, 21107, 23221,
+ 13280, 16615, 19915, 21829,
+ 14950, 18575, 20599, 22511,
+ 16337, 18261, 20277, 23216,
+ 14306, 16477, 21203, 23158,
+ 12803, 17498, 20248, 22014,
+ 14327, 17068, 20160, 22006,
+ 14402, 17461, 21599, 23688,
+ 16968, 18834, 20896, 23055,
+ 15070, 17157, 20451, 22315,
+ 15419, 17107, 21601, 23946,
+ 16039, 17639, 19533, 21424,
+ 16326, 19261, 21745, 23673,
+ 16489, 18534, 21658, 23782,
+ 16594, 18471, 20549, 22807,
+ 18973, 21212, 22890, 24278,
+ 14264, 18674, 21123, 23071,
+ 15117, 16841, 19239, 23118,
+ 13762, 15782, 20478, 23230,
+ 14111, 15949, 20058, 22354,
+ 14990, 16738, 21139, 23492,
+ 13735, 16971, 19026, 22158,
+ 14676, 17314, 20232, 22807,
+ 16196, 18146, 20459, 22339,
+ 14747, 17258, 19315, 22437,
+ 14973, 17778, 20692, 23367,
+ 15715, 17472, 20385, 22349,
+ 15702, 18228, 20829, 23410,
+ 14428, 16188, 20541, 23630,
+ 16824, 19394, 21365, 23246,
+ 13069, 16392, 18900, 21121,
+ 12047, 16640, 19463, 21689,
+ 14757, 17433, 19659, 23125,
+ 15185, 16930, 19900, 22540,
+ 16026, 17725, 19618, 22399,
+ 16086, 18643, 21179, 23472,
+ 15462, 17248, 19102, 21196,
+ 17368, 20016, 22396, 24096,
+ 12340, 14475, 19665, 23362,
+ 13636, 16229, 19462, 22728,
+ 14096, 16211, 19591, 21635,
+ 12152, 14867, 19943, 22301,
+ 14492, 17503, 21002, 22728,
+ 14834, 16788, 19447, 21411,
+ 14650, 16433, 19326, 22308,
+ 14624, 16328, 19659, 23204,
+ 13888, 16572, 20665, 22488,
+ 12977, 16102, 18841, 22246,
+ 15523, 18431, 21757, 23738,
+ 14095, 16349, 18837, 20947,
+ 13266, 17809, 21088, 22839,
+ 15427, 18190, 20270, 23143,
+ 11859, 16753, 20935, 22486,
+ 12310, 17667, 21736, 23319,
+ 14021, 15926, 18702, 22002,
+ 12286, 15299, 19178, 21126,
+ 15703, 17491, 21039, 23151,
+ 12272, 14018, 18213, 22570,
+ 14817, 16364, 18485, 22598,
+ 17109, 19683, 21851, 23677,
+ 12657, 14903, 19039, 22061,
+ 14713, 16487, 20527, 22814,
+ 14635, 16726, 18763, 21715,
+ 15878, 18550, 20718, 22906
+};
+
+const int16_t WebRtcIlbcfix_kLsfDimCb[LSF_NSPLIT] = {3, 3, 4};
+const int16_t WebRtcIlbcfix_kLsfSizeCb[LSF_NSPLIT] = {64,128,128};
+
+const int16_t WebRtcIlbcfix_kLsfMean[LPC_FILTERORDER] = {
+ 2308, 3652, 5434, 7885,
+ 10255, 12559, 15160, 17513,
+ 20328, 22752};
+
+const int16_t WebRtcIlbcfix_kLspMean[LPC_FILTERORDER] = {
+ 31476, 29565, 25819, 18725, 10276,
+ 1236, -9049, -17600, -25884, -30618
+};
+
+/* Q14 */
+const int16_t WebRtcIlbcfix_kLsfWeight20ms[4] = {12288, 8192, 4096, 0};
+const int16_t WebRtcIlbcfix_kLsfWeight30ms[6] = {8192, 16384, 10923, 5461, 0, 0};
+
+/*
+ cos(x) in Q15
+ WebRtcIlbcfix_kCos[i] = cos(pi*i/64.0)
+ used in WebRtcIlbcfix_Lsp2Lsf()
+*/
+
+const int16_t WebRtcIlbcfix_kCos[64] = {
+ 32767, 32729, 32610, 32413, 32138, 31786, 31357, 30853,
+ 30274, 29622, 28899, 28106, 27246, 26320, 25330, 24279,
+ 23170, 22006, 20788, 19520, 18205, 16846, 15447, 14010,
+ 12540, 11039, 9512, 7962, 6393, 4808, 3212, 1608,
+ 0, -1608, -3212, -4808, -6393, -7962, -9512, -11039,
+ -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006,
+ -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622,
+ -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729
+};
+
+/*
+ Derivative in Q19, used to interpolate between the
+ WebRtcIlbcfix_kCos[] values to get a more exact y = cos(x)
+*/
+const int16_t WebRtcIlbcfix_kCosDerivative[64] = {
+ -632, -1893, -3150, -4399, -5638, -6863, -8072, -9261,
+ -10428, -11570, -12684, -13767, -14817, -15832, -16808, -17744,
+ -18637, -19486, -20287, -21039, -21741, -22390, -22986, -23526,
+ -24009, -24435, -24801, -25108, -25354, -25540, -25664, -25726,
+ -25726, -25664, -25540, -25354, -25108, -24801, -24435, -24009,
+ -23526, -22986, -22390, -21741, -21039, -20287, -19486, -18637,
+ -17744, -16808, -15832, -14817, -13767, -12684, -11570, -10428,
+ -9261, -8072, -6863, -5638, -4399, -3150, -1893, -632};
+
+/*
+ Table in Q15, used for a2lsf conversion
+ WebRtcIlbcfix_kCosGrid[i] = cos((2*pi*i)/(float)(2*COS_GRID_POINTS));
+*/
+
+const int16_t WebRtcIlbcfix_kCosGrid[COS_GRID_POINTS + 1] = {
+ 32760, 32723, 32588, 32364, 32051, 31651, 31164, 30591,
+ 29935, 29196, 28377, 27481, 26509, 25465, 24351, 23170,
+ 21926, 20621, 19260, 17846, 16384, 14876, 13327, 11743,
+ 10125, 8480, 6812, 5126, 3425, 1714, 0, -1714, -3425,
+ -5126, -6812, -8480, -10125, -11743, -13327, -14876,
+ -16384, -17846, -19260, -20621, -21926, -23170, -24351,
+ -25465, -26509, -27481, -28377, -29196, -29935, -30591,
+ -31164, -31651, -32051, -32364, -32588, -32723, -32760
+};
+
+/*
+ Derivative of y = acos(x) in Q12
+ used in WebRtcIlbcfix_Lsp2Lsf()
+*/
+
+const int16_t WebRtcIlbcfix_kAcosDerivative[64] = {
+ -26887, -8812, -5323, -3813, -2979, -2444, -2081, -1811,
+ -1608, -1450, -1322, -1219, -1132, -1059, -998, -946,
+ -901, -861, -827, -797, -772, -750, -730, -713,
+ -699, -687, -677, -668, -662, -657, -654, -652,
+ -652, -654, -657, -662, -668, -677, -687, -699,
+ -713, -730, -750, -772, -797, -827, -861, -901,
+ -946, -998, -1059, -1132, -1219, -1322, -1450, -1608,
+ -1811, -2081, -2444, -2979, -3813, -5323, -8812, -26887
+};
+
+
+/* Tables for quantization of start state */
+
+/* State quantization tables */
+const int16_t WebRtcIlbcfix_kStateSq3[8] = { /* Values in Q13 */
+ -30473, -17838, -9257, -2537,
+ 3639, 10893, 19958, 32636
+};
+
+/* This table defines the limits for the selection of the freqg
+ less or equal than value 0 => index = 0
+ less or equal than value k => index = k
+*/
+const int32_t WebRtcIlbcfix_kChooseFrgQuant[64] = {
+ 118, 163, 222, 305, 425, 604,
+ 851, 1174, 1617, 2222, 3080, 4191,
+ 5525, 7215, 9193, 11540, 14397, 17604,
+ 21204, 25209, 29863, 35720, 42531, 50375,
+ 59162, 68845, 80108, 93754, 110326, 129488,
+ 150654, 174328, 201962, 233195, 267843, 308239,
+ 354503, 405988, 464251, 531550, 608652, 697516,
+ 802526, 928793, 1080145, 1258120, 1481106, 1760881,
+ 2111111, 2546619, 3078825, 3748642, 4563142, 5573115,
+ 6887601, 8582108, 10797296, 14014513, 18625760, 25529599,
+ 37302935, 58819185, 109782723, WEBRTC_SPL_WORD32_MAX
+};
+
+const int16_t WebRtcIlbcfix_kScale[64] = {
+ /* Values in Q16 */
+ 29485, 25003, 21345, 18316, 15578, 13128, 10973, 9310, 7955,
+ 6762, 5789, 4877, 4255, 3699, 3258, 2904, 2595, 2328,
+ 2123, 1932, 1785, 1631, 1493, 1370, 1260, 1167, 1083,
+ /* Values in Q21 */
+ 32081, 29611, 27262, 25229, 23432, 21803, 20226, 18883, 17609,
+ 16408, 15311, 14327, 13390, 12513, 11693, 10919, 10163, 9435,
+ 8739, 8100, 7424, 6813, 6192, 5648, 5122, 4639, 4207, 3798,
+ 3404, 3048, 2706, 2348, 2036, 1713, 1393, 1087, 747
+};
+
+/*frgq in fixpoint, but already computed like this:
+ for(i=0; i<64; i++){
+ a = (pow(10,frgq[i])/4.5);
+ WebRtcIlbcfix_kFrgQuantMod[i] = round(a);
+ }
+
+ Value 0 :36 in Q8
+ 37:58 in Q5
+ 59:63 in Q3
+*/
+const int16_t WebRtcIlbcfix_kFrgQuantMod[64] = {
+ /* First 37 values in Q8 */
+ 569, 671, 786, 916, 1077, 1278,
+ 1529, 1802, 2109, 2481, 2898, 3440,
+ 3943, 4535, 5149, 5778, 6464, 7208,
+ 7904, 8682, 9397, 10285, 11240, 12246,
+ 13313, 14382, 15492, 16735, 18131, 19693,
+ 21280, 22912, 24624, 26544, 28432, 30488,
+ 32720,
+ /* 22 values in Q5 */
+ 4383, 4684, 5012, 5363, 5739, 6146,
+ 6603, 7113, 7679, 8285, 9040, 9850,
+ 10838, 11882, 13103, 14467, 15950, 17669,
+ 19712, 22016, 24800, 28576,
+ /* 5 values in Q3 */
+ 8240, 9792, 12040, 15440, 22472
+};
+
+/* Constants for codebook search and creation */
+
+/* Expansion filter to get additional cb section.
+ * Q12 and reversed compared to flp
+ */
+const int16_t WebRtcIlbcfix_kCbFiltersRev[CB_FILTERLEN]={
+ -140, 446, -755, 3302, 2922, -590, 343, -138};
+
+/* Weighting coefficients for short lags.
+ * [0.2 0.4 0.6 0.8] in Q15 */
+const int16_t WebRtcIlbcfix_kAlpha[4]={
+ 6554, 13107, 19661, 26214};
+
+/* Ranges for search and filters at different subframes */
+
+const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
+ {58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
+
+const size_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
+
+/* Gain Quantization for the codebook gains of the 3 stages */
+
+/* Q14 (one extra value (max int16_t) to simplify for the search) */
+const int16_t WebRtcIlbcfix_kGainSq3[9]={
+ -16384, -10813, -5407, 0, 4096, 8192,
+ 12288, 16384, 32767};
+
+/* Q14 (one extra value (max int16_t) to simplify for the search) */
+const int16_t WebRtcIlbcfix_kGainSq4[17]={
+ -17203, -14746, -12288, -9830, -7373, -4915,
+ -2458, 0, 2458, 4915, 7373, 9830,
+ 12288, 14746, 17203, 19661, 32767};
+
+/* Q14 (one extra value (max int16_t) to simplify for the search) */
+const int16_t WebRtcIlbcfix_kGainSq5[33]={
+ 614, 1229, 1843, 2458, 3072, 3686,
+ 4301, 4915, 5530, 6144, 6758, 7373,
+ 7987, 8602, 9216, 9830, 10445, 11059,
+ 11674, 12288, 12902, 13517, 14131, 14746,
+ 15360, 15974, 16589, 17203, 17818, 18432,
+ 19046, 19661, 32767};
+
+/* Q14 gain_sq5Tbl squared in Q14 */
+const int16_t WebRtcIlbcfix_kGainSq5Sq[32] = {
+ 23, 92, 207, 368, 576, 829,
+ 1129, 1474, 1866, 2304, 2787, 3317,
+ 3893, 4516, 5184, 5897, 6658, 7464,
+ 8318, 9216, 10160, 11151, 12187, 13271,
+ 14400, 15574, 16796, 18062, 19377, 20736,
+ 22140, 23593
+};
+
+const int16_t* const WebRtcIlbcfix_kGain[3] =
+{WebRtcIlbcfix_kGainSq5, WebRtcIlbcfix_kGainSq4, WebRtcIlbcfix_kGainSq3};
+
+
+/* Tables for the Enhancer, using upsamling factor 4 (ENH_UPS0 = 4) */
+
+const int16_t WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0][ENH_FLO_MULT2_PLUS1]={
+ {0, 0, 0, 4096, 0, 0, 0},
+ {64, -315, 1181, 3531, -436, 77, -64},
+ {97, -509, 2464, 2464, -509, 97, -97},
+ {77, -436, 3531, 1181, -315, 64, -77}
+};
+
+const int16_t WebRtcIlbcfix_kEnhWt[3] = {
+ 4800, 16384, 27968 /* Q16 */
+};
+
+const size_t WebRtcIlbcfix_kEnhPlocs[ENH_NBLOCKS_TOT] = {
+ 160, 480, 800, 1120, 1440, 1760, 2080, 2400 /* Q(-2) */
+};
+
+/* PLC table */
+
+const int16_t WebRtcIlbcfix_kPlcPerSqr[6] = { /* Grid points for square of periodiciy in Q15 */
+ 839, 1343, 2048, 2998, 4247, 5849
+};
+
+const int16_t WebRtcIlbcfix_kPlcPitchFact[6] = { /* Value of y=(x^4-0.4)/(0.7-0.4) in grid points in Q15 */
+ 0, 5462, 10922, 16384, 21846, 27306
+};
+
+const int16_t WebRtcIlbcfix_kPlcPfSlope[6] = { /* Slope of y=(x^4-0.4)/(0.7-0.4) in Q11 */
+ 26667, 18729, 13653, 10258, 7901, 6214
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.h
new file mode 100644
index 0000000000..a8645c00db
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ constants.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CONSTANTS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CONSTANTS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/* high pass filters */
+
+extern const int16_t WebRtcIlbcfix_kHpInCoefs[];
+extern const int16_t WebRtcIlbcfix_kHpOutCoefs[];
+
+/* Window for start state decision */
+extern const int16_t WebRtcIlbcfix_kStartSequenceEnrgWin[];
+
+/* low pass filter used for downsampling */
+extern const int16_t WebRtcIlbcfix_kLpFiltCoefs[];
+
+/* LPC analysis and quantization */
+
+extern const int16_t WebRtcIlbcfix_kLpcWin[];
+extern const int16_t WebRtcIlbcfix_kLpcAsymWin[];
+extern const int32_t WebRtcIlbcfix_kLpcLagWin[];
+extern const int16_t WebRtcIlbcfix_kLpcChirpSyntDenum[];
+extern const int16_t WebRtcIlbcfix_kLpcChirpWeightDenum[];
+extern const int16_t WebRtcIlbcfix_kLsfDimCb[];
+extern const int16_t WebRtcIlbcfix_kLsfSizeCb[];
+extern const int16_t WebRtcIlbcfix_kLsfCb[];
+extern const int16_t WebRtcIlbcfix_kLsfWeight20ms[];
+extern const int16_t WebRtcIlbcfix_kLsfWeight30ms[];
+extern const int16_t WebRtcIlbcfix_kLsfMean[];
+extern const int16_t WebRtcIlbcfix_kLspMean[];
+extern const int16_t WebRtcIlbcfix_kCos[];
+extern const int16_t WebRtcIlbcfix_kCosDerivative[];
+extern const int16_t WebRtcIlbcfix_kCosGrid[];
+extern const int16_t WebRtcIlbcfix_kAcosDerivative[];
+
+/* state quantization tables */
+
+extern const int16_t WebRtcIlbcfix_kStateSq3[];
+extern const int32_t WebRtcIlbcfix_kChooseFrgQuant[];
+extern const int16_t WebRtcIlbcfix_kScale[];
+extern const int16_t WebRtcIlbcfix_kFrgQuantMod[];
+
+/* Ranges for search and filters at different subframes */
+
+extern const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
+extern const size_t WebRtcIlbcfix_kFilterRange[];
+
+/* gain quantization tables */
+
+extern const int16_t WebRtcIlbcfix_kGainSq3[];
+extern const int16_t WebRtcIlbcfix_kGainSq4[];
+extern const int16_t WebRtcIlbcfix_kGainSq5[];
+extern const int16_t WebRtcIlbcfix_kGainSq5Sq[];
+extern const int16_t* const WebRtcIlbcfix_kGain[];
+
+/* adaptive codebook definitions */
+
+extern const int16_t WebRtcIlbcfix_kCbFiltersRev[];
+extern const int16_t WebRtcIlbcfix_kAlpha[];
+
+/* enhancer definitions */
+
+extern const int16_t WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0]
+ [ENH_FLO_MULT2_PLUS1];
+extern const int16_t WebRtcIlbcfix_kEnhWt[];
+extern const size_t WebRtcIlbcfix_kEnhPlocs[];
+
+/* PLC tables */
+
+extern const int16_t WebRtcIlbcfix_kPlcPerSqr[];
+extern const int16_t WebRtcIlbcfix_kPlcPitchFact[];
+extern const int16_t WebRtcIlbcfix_kPlcPfSlope[];
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
new file mode 100644
index 0000000000..7e21faee6c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CreateAugmentedVec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/create_augmented_vec.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "rtc_base/sanitizer.h"
+
+/*----------------------------------------------------------------*
+ * Recreate a specific codebook vector from the augmented part.
+ *
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CreateAugmentedVec(
+ size_t index, /* (i) Index for the augmented vector to be
+ created */
+ const int16_t* buffer, /* (i) Pointer to the end of the codebook memory
+ that is used for creation of the augmented
+ codebook */
+ int16_t* cbVec) { /* (o) The constructed codebook vector */
+ size_t ilow;
+ const int16_t *ppo, *ppi;
+ int16_t cbVecTmp[4];
+ /* Interpolation starts 4 elements before cbVec+index, but must not start
+ outside `cbVec`; clamping interp_len to stay within `cbVec`.
+ */
+ size_t interp_len = WEBRTC_SPL_MIN(index, 4);
+
+ rtc_MsanCheckInitialized(buffer - index - interp_len, sizeof(buffer[0]),
+ index + interp_len);
+
+ ilow = index - interp_len;
+
+ /* copy the first noninterpolated part */
+ ppo = buffer-index;
+ WEBRTC_SPL_MEMCPY_W16(cbVec, ppo, index);
+
+ /* interpolation */
+ ppo = buffer - interp_len;
+ ppi = buffer - index - interp_len;
+
+ /* perform cbVec[ilow+k] = ((ppi[k]*alphaTbl[k])>>15) +
+ ((ppo[k]*alphaTbl[interp_len-1-k])>>15);
+ for k = 0..interp_len-1
+ */
+ WebRtcSpl_ElementwiseVectorMult(&cbVec[ilow], ppi, WebRtcIlbcfix_kAlpha,
+ interp_len, 15);
+ WebRtcSpl_ReverseOrderMultArrayElements(
+ cbVecTmp, ppo, &WebRtcIlbcfix_kAlpha[interp_len - 1], interp_len, 15);
+ WebRtcSpl_AddVectorsAndShift(&cbVec[ilow], &cbVec[ilow], cbVecTmp, interp_len,
+ 0);
+
+ /* copy the second noninterpolated part */
+ ppo = buffer - index;
+ /* `tempbuff2` is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
+ long. `buffer` points one element past the end of that vector, i.e., at
+ tempbuff2+SUBL+5. Since ppo=buffer-index, we cannot read any more than
+ `index` elements from `ppo`.
+
+ `cbVec` is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
+ Therefore, we can only write SUBL-index elements to cbVec+index.
+
+ These two conditions limit the number of elements to copy.
+ */
+ WEBRTC_SPL_MEMCPY_W16(cbVec+index, ppo, WEBRTC_SPL_MIN(SUBL-index, index));
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
new file mode 100644
index 0000000000..d7e5be1c2f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CreateAugmentedVec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CREATE_AUGMENTED_VEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CREATE_AUGMENTED_VEC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Recreate a specific codebook vector from the augmented part.
+ *
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CreateAugmentedVec(
+ size_t index, /* (i) Index for the augmented vector to be
+ created */
+ const int16_t* buffer, /* (i) Pointer to the end of the codebook memory
+ that is used for creation of the augmented
+ codebook */
+ int16_t* cbVec); /* (o) The construced codebook vector */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.c
new file mode 100644
index 0000000000..d7621d5b65
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Decode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/decode.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/decode_residual.h"
+#include "modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/do_plc.h"
+#include "modules/audio_coding/codecs/ilbc/enhancer_interface.h"
+#include "modules/audio_coding/codecs/ilbc/hp_output.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+#include "modules/audio_coding/codecs/ilbc/init_decode.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+#include "modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h"
+#include "modules/audio_coding/codecs/ilbc/unpack_bits.h"
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+#include "rtc_base/system/arch.h"
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+#include "modules/audio_coding/codecs/ilbc/swap_bytes.h"
+#endif
+
+/*----------------------------------------------------------------*
+ * main decoder function
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_DecodeImpl(
+ int16_t *decblock, /* (o) decoded signal block */
+ const uint16_t *bytes, /* (i) encoded signal bits */
+ IlbcDecoder *iLBCdec_inst, /* (i/o) the decoder state
+ structure */
+ int16_t mode /* (i) 0: bad packet, PLC,
+ 1: normal */
+ ) {
+ const int old_mode = iLBCdec_inst->mode;
+ const int old_use_enhancer = iLBCdec_inst->use_enhancer;
+
+ size_t i;
+ int16_t order_plus_one;
+
+ int16_t last_bit;
+ int16_t *data;
+ /* Stack based */
+ int16_t decresidual[BLOCKL_MAX];
+ int16_t PLCresidual[BLOCKL_MAX + LPC_FILTERORDER];
+ int16_t syntdenum[NSUB_MAX*(LPC_FILTERORDER+1)];
+ int16_t PLClpc[LPC_FILTERORDER + 1];
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ uint16_t swapped[NO_OF_WORDS_30MS];
+#endif
+ iLBC_bits *iLBCbits_inst = (iLBC_bits*)PLCresidual;
+
+ /* Reuse some buffers that are non overlapping in order to save stack memory */
+ data = &PLCresidual[LPC_FILTERORDER];
+
+ if (mode) { /* the data are good */
+
+ /* decode data */
+
+ /* Unpacketize bits into parameters */
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ WebRtcIlbcfix_SwapBytes(bytes, iLBCdec_inst->no_of_words, swapped);
+ last_bit = WebRtcIlbcfix_UnpackBits(swapped, iLBCbits_inst, iLBCdec_inst->mode);
+#else
+ last_bit = WebRtcIlbcfix_UnpackBits(bytes, iLBCbits_inst, iLBCdec_inst->mode);
+#endif
+
+ /* Check for bit errors */
+ if (iLBCbits_inst->startIdx<1)
+ mode = 0;
+ if ((iLBCdec_inst->mode==20) && (iLBCbits_inst->startIdx>3))
+ mode = 0;
+ if ((iLBCdec_inst->mode==30) && (iLBCbits_inst->startIdx>5))
+ mode = 0;
+ if (last_bit==1)
+ mode = 0;
+
+ if (mode) { /* No bit errors was detected, continue decoding */
+ /* Stack based */
+ int16_t lsfdeq[LPC_FILTERORDER*LPC_N_MAX];
+ int16_t weightdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
+
+ /* adjust index */
+ WebRtcIlbcfix_IndexConvDec(iLBCbits_inst->cb_index);
+
+ /* decode the lsf */
+ WebRtcIlbcfix_SimpleLsfDeQ(lsfdeq, (int16_t*)(iLBCbits_inst->lsf), iLBCdec_inst->lpc_n);
+ WebRtcIlbcfix_LsfCheck(lsfdeq, LPC_FILTERORDER, iLBCdec_inst->lpc_n);
+ WebRtcIlbcfix_DecoderInterpolateLsp(syntdenum, weightdenum,
+ lsfdeq, LPC_FILTERORDER, iLBCdec_inst);
+
+ /* Decode the residual using the cb and gain indexes */
+ if (!WebRtcIlbcfix_DecodeResidual(iLBCdec_inst, iLBCbits_inst,
+ decresidual, syntdenum))
+ goto error;
+
+ /* preparing the plc for a future loss! */
+ WebRtcIlbcfix_DoThePlc(
+ PLCresidual, PLClpc, 0, decresidual,
+ syntdenum + (LPC_FILTERORDER + 1) * (iLBCdec_inst->nsub - 1),
+ iLBCdec_inst->last_lag, iLBCdec_inst);
+
+ /* Use the output from doThePLC */
+ WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
+ }
+
+ }
+
+ if (mode == 0) {
+ /* the data is bad (either a PLC call
+ * was made or a bit error was detected)
+ */
+
+ /* packet loss conceal */
+
+ WebRtcIlbcfix_DoThePlc(PLCresidual, PLClpc, 1, decresidual, syntdenum,
+ iLBCdec_inst->last_lag, iLBCdec_inst);
+
+ WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
+
+ order_plus_one = LPC_FILTERORDER + 1;
+
+ for (i = 0; i < iLBCdec_inst->nsub; i++) {
+ WEBRTC_SPL_MEMCPY_W16(syntdenum+(i*order_plus_one),
+ PLClpc, order_plus_one);
+ }
+ }
+
+ if ((*iLBCdec_inst).use_enhancer == 1) { /* Enhancer activated */
+
+ /* Update the filter and filter coefficients if there was a packet loss */
+ if (iLBCdec_inst->prev_enh_pl==2) {
+ for (i=0;i<iLBCdec_inst->nsub;i++) {
+ WEBRTC_SPL_MEMCPY_W16(&(iLBCdec_inst->old_syntdenum[i*(LPC_FILTERORDER+1)]),
+ syntdenum, (LPC_FILTERORDER+1));
+ }
+ }
+
+ /* post filtering */
+ (*iLBCdec_inst).last_lag =
+ WebRtcIlbcfix_EnhancerInterface(data, decresidual, iLBCdec_inst);
+
+ /* synthesis filtering */
+
+ /* Set up the filter state */
+ WEBRTC_SPL_MEMCPY_W16(&data[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
+
+ if (iLBCdec_inst->mode==20) {
+ /* Enhancer has 40 samples delay */
+ i=0;
+ WebRtcSpl_FilterARFastQ12(
+ data, data,
+ iLBCdec_inst->old_syntdenum + (i+iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1),
+ LPC_FILTERORDER+1, SUBL);
+
+ for (i=1; i < iLBCdec_inst->nsub; i++) {
+ WebRtcSpl_FilterARFastQ12(
+ data+i*SUBL, data+i*SUBL,
+ syntdenum+(i-1)*(LPC_FILTERORDER+1),
+ LPC_FILTERORDER+1, SUBL);
+ }
+
+ } else if (iLBCdec_inst->mode==30) {
+ /* Enhancer has 80 samples delay */
+ for (i=0; i < 2; i++) {
+ WebRtcSpl_FilterARFastQ12(
+ data+i*SUBL, data+i*SUBL,
+ iLBCdec_inst->old_syntdenum + (i+4)*(LPC_FILTERORDER+1),
+ LPC_FILTERORDER+1, SUBL);
+ }
+ for (i=2; i < iLBCdec_inst->nsub; i++) {
+ WebRtcSpl_FilterARFastQ12(
+ data+i*SUBL, data+i*SUBL,
+ syntdenum+(i-2)*(LPC_FILTERORDER+1),
+ LPC_FILTERORDER+1, SUBL);
+ }
+ }
+
+ /* Save the filter state */
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
+
+ } else { /* Enhancer not activated */
+ size_t lag;
+
+ /* Find last lag (since the enhancer is not called to give this info) */
+ lag = 20;
+ if (iLBCdec_inst->mode==20) {
+ lag = WebRtcIlbcfix_XcorrCoef(
+ &decresidual[iLBCdec_inst->blockl-60],
+ &decresidual[iLBCdec_inst->blockl-60-lag],
+ 60,
+ 80, lag, -1);
+ } else {
+ lag = WebRtcIlbcfix_XcorrCoef(
+ &decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
+ &decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
+ ENH_BLOCKL,
+ 100, lag, -1);
+ }
+
+ /* Store lag (it is needed if next packet is lost) */
+ (*iLBCdec_inst).last_lag = lag;
+
+ /* copy data and run synthesis filter */
+ WEBRTC_SPL_MEMCPY_W16(data, decresidual, iLBCdec_inst->blockl);
+
+ /* Set up the filter state */
+ WEBRTC_SPL_MEMCPY_W16(&data[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
+
+ for (i=0; i < iLBCdec_inst->nsub; i++) {
+ WebRtcSpl_FilterARFastQ12(
+ data+i*SUBL, data+i*SUBL,
+ syntdenum + i*(LPC_FILTERORDER+1),
+ LPC_FILTERORDER+1, SUBL);
+ }
+
+ /* Save the filter state */
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
+ }
+
+ WEBRTC_SPL_MEMCPY_W16(decblock,data,iLBCdec_inst->blockl);
+
+ /* High pass filter the signal (with upscaling a factor 2 and saturation) */
+ WebRtcIlbcfix_HpOutput(decblock, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
+ iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
+ iLBCdec_inst->blockl);
+
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->old_syntdenum,
+ syntdenum, iLBCdec_inst->nsub*(LPC_FILTERORDER+1));
+
+ iLBCdec_inst->prev_enh_pl=0;
+
+ if (mode==0) { /* PLC was used */
+ iLBCdec_inst->prev_enh_pl=1;
+ }
+
+ return 0; // Success.
+
+error:
+ // The decoder got sick from eating that data. Reset it and return.
+ WebRtcIlbcfix_InitDecode(iLBCdec_inst, old_mode, old_use_enhancer);
+ return -1; // Error
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.h
new file mode 100644
index 0000000000..a7d2910115
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Decode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_H_
+
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * main decoder function
+ *---------------------------------------------------------------*/
+
+// Returns 0 on success, -1 on error.
+ABSL_MUST_USE_RESULT
+int WebRtcIlbcfix_DecodeImpl(
+ int16_t* decblock, /* (o) decoded signal block */
+ const uint16_t* bytes, /* (i) encoded signal bits */
+ IlbcDecoder* iLBCdec_inst, /* (i/o) the decoder state
+ structure */
+ int16_t mode /* (i) 0: bad packet, PLC,
+ 1: normal */
+);
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
new file mode 100644
index 0000000000..a9668e2889
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecodeResidual.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/decode_residual.h"
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/cb_construct.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/do_plc.h"
+#include "modules/audio_coding/codecs/ilbc/enhancer_interface.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+#include "modules/audio_coding/codecs/ilbc/state_construct.h"
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+
+/*----------------------------------------------------------------*
+ * frame residual decoder function (subrutine to iLBC_decode)
+ *---------------------------------------------------------------*/
+
+bool WebRtcIlbcfix_DecodeResidual(
+ IlbcDecoder *iLBCdec_inst,
+ /* (i/o) the decoder state structure */
+ iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits, which are used
+ for the decoding */
+ int16_t *decresidual, /* (o) decoded residual frame */
+ int16_t *syntdenum /* (i) the decoded synthesis filter
+ coefficients */
+ ) {
+ size_t meml_gotten, diff, start_pos;
+ size_t subcount, subframe;
+ int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
+ int16_t *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */
+ int16_t *mem = &memVec[CB_HALFFILTERLEN]; /* Memory for codebook */
+
+ diff = STATE_LEN - iLBCdec_inst->state_short_len;
+
+ if (iLBC_encbits->state_first == 1) {
+ start_pos = (iLBC_encbits->startIdx-1)*SUBL;
+ } else {
+ start_pos = (iLBC_encbits->startIdx-1)*SUBL + diff;
+ }
+
+ /* decode scalar part of start state */
+
+ WebRtcIlbcfix_StateConstruct(iLBC_encbits->idxForMax,
+ iLBC_encbits->idxVec, &syntdenum[(iLBC_encbits->startIdx-1)*(LPC_FILTERORDER+1)],
+ &decresidual[start_pos], iLBCdec_inst->state_short_len
+ );
+
+ if (iLBC_encbits->state_first) { /* put adaptive part in the end */
+
+ /* setup memory */
+
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCdec_inst->state_short_len);
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCdec_inst->state_short_len, decresidual+start_pos,
+ iLBCdec_inst->state_short_len);
+
+ /* construct decoded vector */
+
+ if (!WebRtcIlbcfix_CbConstruct(
+ &decresidual[start_pos + iLBCdec_inst->state_short_len],
+ iLBC_encbits->cb_index, iLBC_encbits->gain_index,
+ mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL, diff))
+ return false; // Error.
+
+ }
+ else {/* put adaptive part in the beginning */
+
+ /* setup memory */
+
+ meml_gotten = iLBCdec_inst->state_short_len;
+ WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
+ decresidual+start_pos, meml_gotten);
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
+
+ /* construct decoded vector */
+
+ if (!WebRtcIlbcfix_CbConstruct(reverseDecresidual, iLBC_encbits->cb_index,
+ iLBC_encbits->gain_index,
+ mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL,
+ diff))
+ return false; // Error.
+
+ /* get decoded residual from reversed vector */
+
+ WebRtcSpl_MemCpyReversedOrder(&decresidual[start_pos-1],
+ reverseDecresidual, diff);
+ }
+
+ /* counter for predicted subframes */
+
+ subcount=1;
+
+ /* forward prediction of subframes */
+
+ if (iLBCdec_inst->nsub > iLBC_encbits->startIdx + 1) {
+
+ /* setup memory */
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-STATE_LEN,
+ decresidual+(iLBC_encbits->startIdx-1)*SUBL, STATE_LEN);
+
+ /* loop over subframes to encode */
+
+ size_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
+ for (subframe=0; subframe<Nfor; subframe++) {
+
+ /* construct decoded vector */
+ if (!WebRtcIlbcfix_CbConstruct(
+ &decresidual[(iLBC_encbits->startIdx + 1 + subframe) * SUBL],
+ iLBC_encbits->cb_index + subcount * CB_NSTAGES,
+ iLBC_encbits->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+ SUBL))
+ return false; // Error;
+
+ /* update memory */
+ memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+ &decresidual[(iLBC_encbits->startIdx+1+subframe)*SUBL], SUBL);
+
+ subcount++;
+ }
+
+ }
+
+ /* backward prediction of subframes */
+
+ if (iLBC_encbits->startIdx > 1) {
+
+ /* setup memory */
+
+ meml_gotten = SUBL*(iLBCdec_inst->nsub+1-iLBC_encbits->startIdx);
+ if( meml_gotten > CB_MEML ) {
+ meml_gotten=CB_MEML;
+ }
+
+ WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
+ decresidual+(iLBC_encbits->startIdx-1)*SUBL, meml_gotten);
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
+
+ /* loop over subframes to decode */
+
+ size_t Nback = iLBC_encbits->startIdx - 1;
+ for (subframe=0; subframe<Nback; subframe++) {
+
+ /* construct decoded vector */
+ if (!WebRtcIlbcfix_CbConstruct(
+ &reverseDecresidual[subframe * SUBL],
+ iLBC_encbits->cb_index + subcount * CB_NSTAGES,
+ iLBC_encbits->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+ SUBL))
+ return false; // Error.
+
+ /* update memory */
+ memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+ &reverseDecresidual[subframe*SUBL], SUBL);
+
+ subcount++;
+ }
+
+ /* get decoded residual from reversed vector */
+ WebRtcSpl_MemCpyReversedOrder(decresidual+SUBL*Nback-1,
+ reverseDecresidual, SUBL*Nback);
+ }
+
+ return true; // Success.
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.h
new file mode 100644
index 0000000000..d079577661
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecodeResidual.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_RESIDUAL_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_RESIDUAL_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * frame residual decoder function (subrutine to iLBC_decode)
+ *---------------------------------------------------------------*/
+
+// Returns true on success, false on failure. In case of failure, the decoder
+// state may be corrupted and needs resetting.
+ABSL_MUST_USE_RESULT
+bool WebRtcIlbcfix_DecodeResidual(
+ IlbcDecoder* iLBCdec_inst, /* (i/o) the decoder state structure */
+ iLBC_bits* iLBC_encbits, /* (i/o) Encoded bits, which are used
+ for the decoding */
+ int16_t* decresidual, /* (o) decoded residual frame */
+ int16_t* syntdenum /* (i) the decoded synthesis filter
+ coefficients */
+);
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
new file mode 100644
index 0000000000..d96bb9b2e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecoderInterpolateLsp.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h"
+
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h"
+
+/*----------------------------------------------------------------*
+ * obtain synthesis and weighting filters form lsf coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DecoderInterpolateLsp(
+ int16_t *syntdenum, /* (o) synthesis filter coefficients */
+ int16_t *weightdenum, /* (o) weighting denumerator
+ coefficients */
+ int16_t *lsfdeq, /* (i) dequantized lsf coefficients */
+ int16_t length, /* (i) length of lsf coefficient vector */
+ IlbcDecoder *iLBCdec_inst
+ /* (i) the decoder state structure */
+ ){
+ size_t i;
+ int pos, lp_length;
+ int16_t lp[LPC_FILTERORDER + 1], *lsfdeq2;
+
+ lsfdeq2 = lsfdeq + length;
+ lp_length = length + 1;
+
+ if (iLBCdec_inst->mode==30) {
+ /* subframe 1: Interpolation between old and first LSF */
+
+ WebRtcIlbcfix_LspInterpolate2PolyDec(lp, (*iLBCdec_inst).lsfdeqold, lsfdeq,
+ WebRtcIlbcfix_kLsfWeight30ms[0], length);
+ WEBRTC_SPL_MEMCPY_W16(syntdenum,lp,lp_length);
+ WebRtcIlbcfix_BwExpand(weightdenum, lp, (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, (int16_t)lp_length);
+
+ /* subframes 2 to 6: interpolation between first and last LSF */
+
+ pos = lp_length;
+ for (i = 1; i < 6; i++) {
+ WebRtcIlbcfix_LspInterpolate2PolyDec(lp, lsfdeq, lsfdeq2,
+ WebRtcIlbcfix_kLsfWeight30ms[i], length);
+ WEBRTC_SPL_MEMCPY_W16(syntdenum + pos,lp,lp_length);
+ WebRtcIlbcfix_BwExpand(weightdenum + pos, lp,
+ (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, (int16_t)lp_length);
+ pos += lp_length;
+ }
+ } else { /* iLBCdec_inst->mode=20 */
+ /* subframes 1 to 4: interpolation between old and new LSF */
+ pos = 0;
+ for (i = 0; i < iLBCdec_inst->nsub; i++) {
+ WebRtcIlbcfix_LspInterpolate2PolyDec(lp, iLBCdec_inst->lsfdeqold, lsfdeq,
+ WebRtcIlbcfix_kLsfWeight20ms[i], length);
+ WEBRTC_SPL_MEMCPY_W16(syntdenum+pos,lp,lp_length);
+ WebRtcIlbcfix_BwExpand(weightdenum+pos, lp,
+ (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, (int16_t)lp_length);
+ pos += lp_length;
+ }
+ }
+
+ /* update memory */
+
+ if (iLBCdec_inst->mode==30) {
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, lsfdeq2, length);
+ } else {
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, lsfdeq, length);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h
new file mode 100644
index 0000000000..8b08114467
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecoderInterpolateLsp.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODER_INTERPOLATE_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODER_INTERPOLATE_LSF_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * obtain synthesis and weighting filters form lsf coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DecoderInterpolateLsp(
+ int16_t* syntdenum, /* (o) synthesis filter coefficients */
+ int16_t* weightdenum, /* (o) weighting denumerator
+ coefficients */
+ int16_t* lsfdeq, /* (i) dequantized lsf coefficients */
+ int16_t length, /* (i) length of lsf coefficient vector */
+ IlbcDecoder* iLBCdec_inst
+ /* (i) the decoder state structure */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/defines.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/defines.h
new file mode 100644
index 0000000000..64135c4887
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/defines.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ define.h
+
+******************************************************************/
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DEFINES_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DEFINES_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+/* general codec settings */
+
+#define FS 8000
+#define BLOCKL_20MS 160
+#define BLOCKL_30MS 240
+#define BLOCKL_MAX 240
+#define NSUB_20MS 4
+#define NSUB_30MS 6
+#define NSUB_MAX 6
+#define NASUB_20MS 2
+#define NASUB_30MS 4
+#define NASUB_MAX 4
+#define SUBL 40
+#define STATE_LEN 80
+#define STATE_SHORT_LEN_30MS 58
+#define STATE_SHORT_LEN_20MS 57
+
+/* LPC settings */
+
+#define LPC_FILTERORDER 10
+#define LPC_LOOKBACK 60
+#define LPC_N_20MS 1
+#define LPC_N_30MS 2
+#define LPC_N_MAX 2
+#define LPC_ASYMDIFF 20
+#define LSF_NSPLIT 3
+#define LSF_NUMBER_OF_STEPS 4
+#define LPC_HALFORDER 5
+#define COS_GRID_POINTS 60
+
+/* cb settings */
+
+#define CB_NSTAGES 3
+#define CB_EXPAND 2
+#define CB_MEML 147
+#define CB_FILTERLEN (2 * 4)
+#define CB_HALFFILTERLEN 4
+#define CB_RESRANGE 34
+#define CB_MAXGAIN_FIXQ6 83 /* error = -0.24% */
+#define CB_MAXGAIN_FIXQ14 21299
+
+/* enhancer */
+
+#define ENH_BLOCKL 80 /* block length */
+#define ENH_BLOCKL_HALF (ENH_BLOCKL / 2)
+#define ENH_HL \
+ 3 /* 2*ENH_HL+1 is number blocks \
+ in said second \
+ sequence */
+#define ENH_SLOP \
+ 2 /* max difference estimated and \
+ correct pitch period */
+#define ENH_PLOCSL \
+ 8 /* pitch-estimates and \
+ pitch-locations buffer \
+ length */
+#define ENH_OVERHANG 2
+#define ENH_UPS0 4 /* upsampling rate */
+#define ENH_FL0 3 /* 2*FLO+1 is the length of each filter */
+#define ENH_FLO_MULT2_PLUS1 7
+#define ENH_VECTL (ENH_BLOCKL + 2 * ENH_FL0)
+#define ENH_CORRDIM (2 * ENH_SLOP + 1)
+#define ENH_NBLOCKS (BLOCKL / ENH_BLOCKL)
+#define ENH_NBLOCKS_EXTRA 5
+#define ENH_NBLOCKS_TOT 8 /* ENH_NBLOCKS+ENH_NBLOCKS_EXTRA */
+#define ENH_BUFL (ENH_NBLOCKS_TOT) * ENH_BLOCKL
+#define ENH_BUFL_FILTEROVERHEAD 3
+#define ENH_A0 819 /* Q14 */
+#define ENH_A0_MINUS_A0A0DIV4 848256041 /* Q34 */
+#define ENH_A0DIV2 26843546 /* Q30 */
+
+/* PLC */
+
+/* Down sampling */
+
+#define FILTERORDER_DS_PLUS1 7
+#define DELAY_DS 3
+#define FACTOR_DS 2
+
+/* bit stream defs */
+
+#define NO_OF_BYTES_20MS 38
+#define NO_OF_BYTES_30MS 50
+#define NO_OF_WORDS_20MS 19
+#define NO_OF_WORDS_30MS 25
+#define STATE_BITS 3
+#define BYTE_LEN 8
+#define ULP_CLASSES 3
+
+/* help parameters */
+
+#define TWO_PI_FIX 25736 /* Q12 */
+
+/* Constants for codebook search and creation */
+
+#define ST_MEM_L_TBL 85
+#define MEM_LF_TBL 147
+
+/* Struct for the bits */
+typedef struct iLBC_bits_t_ {
+ int16_t lsf[LSF_NSPLIT * LPC_N_MAX];
+ int16_t cb_index[CB_NSTAGES * (NASUB_MAX + 1)]; /* First CB_NSTAGES values
+ contains extra CB index */
+ int16_t gain_index[CB_NSTAGES * (NASUB_MAX + 1)]; /* First CB_NSTAGES values
+ contains extra CB gain */
+ size_t idxForMax;
+ int16_t state_first;
+ int16_t idxVec[STATE_SHORT_LEN_30MS];
+ int16_t firstbits;
+ size_t startIdx;
+} iLBC_bits;
+
+/* type definition encoder instance */
+typedef struct IlbcEncoder_ {
+ /* flag for frame size mode */
+ int16_t mode;
+
+ /* basic parameters for different frame sizes */
+ size_t blockl;
+ size_t nsub;
+ int16_t nasub;
+ size_t no_of_bytes, no_of_words;
+ int16_t lpc_n;
+ size_t state_short_len;
+
+ /* analysis filter state */
+ int16_t anaMem[LPC_FILTERORDER];
+
+ /* Fix-point old lsf parameters for interpolation */
+ int16_t lsfold[LPC_FILTERORDER];
+ int16_t lsfdeqold[LPC_FILTERORDER];
+
+ /* signal buffer for LP analysis */
+ int16_t lpc_buffer[LPC_LOOKBACK + BLOCKL_MAX];
+
+ /* state of input HP filter */
+ int16_t hpimemx[2];
+ int16_t hpimemy[4];
+
+#ifdef SPLIT_10MS
+ int16_t weightdenumbuf[66];
+ int16_t past_samples[160];
+ uint16_t bytes[25];
+ int16_t section;
+ int16_t Nfor_flag;
+ int16_t Nback_flag;
+ int16_t start_pos;
+ size_t diff;
+#endif
+
+} IlbcEncoder;
+
+/* type definition decoder instance */
+typedef struct IlbcDecoder_ {
+ /* flag for frame size mode */
+ int16_t mode;
+
+ /* basic parameters for different frame sizes */
+ size_t blockl;
+ size_t nsub;
+ int16_t nasub;
+ size_t no_of_bytes, no_of_words;
+ int16_t lpc_n;
+ size_t state_short_len;
+
+ /* synthesis filter state */
+ int16_t syntMem[LPC_FILTERORDER];
+
+ /* old LSF for interpolation */
+ int16_t lsfdeqold[LPC_FILTERORDER];
+
+ /* pitch lag estimated in enhancer and used in PLC */
+ size_t last_lag;
+
+ /* PLC state information */
+ int consPLICount, prev_enh_pl;
+ int16_t perSquare;
+
+ int16_t prevScale, prevPLI;
+ size_t prevLag;
+ int16_t prevLpc[LPC_FILTERORDER + 1];
+ int16_t prevResidual[NSUB_MAX * SUBL];
+ int16_t seed;
+
+ /* previous synthesis filter parameters */
+
+ int16_t old_syntdenum[(LPC_FILTERORDER + 1) * NSUB_MAX];
+
+ /* state of output HP filter */
+ int16_t hpimemx[2];
+ int16_t hpimemy[4];
+
+ /* enhancer state information */
+ int use_enhancer;
+ int16_t enh_buf[ENH_BUFL + ENH_BUFL_FILTEROVERHEAD];
+ size_t enh_period[ENH_NBLOCKS_TOT];
+
+} IlbcDecoder;
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.c
new file mode 100644
index 0000000000..9ca6ca48e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DoThePlc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/do_plc.h"
+
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/comp_corr.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Packet loss concealment routine. Conceals a residual signal
+ * and LP parameters. If no packet loss, update state.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DoThePlc(
+ int16_t *PLCresidual, /* (o) concealed residual */
+ int16_t *PLClpc, /* (o) concealed LP parameters */
+ int16_t PLI, /* (i) packet loss indicator
+ 0 - no PL, 1 = PL */
+ int16_t *decresidual, /* (i) decoded residual */
+ int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
+ size_t inlag, /* (i) pitch lag */
+ IlbcDecoder *iLBCdec_inst
+ /* (i/o) decoder instance */
+ ){
+ size_t i;
+ int32_t cross, ener, cross_comp, ener_comp = 0;
+ int32_t measure, maxMeasure, energy;
+ int32_t noise_energy_threshold_30dB;
+ int16_t max, crossSquareMax, crossSquare;
+ size_t j, lag, randlag;
+ int16_t tmp1, tmp2;
+ int16_t shift1, shift2, shift3, shiftMax;
+ int16_t scale3;
+ size_t corrLen;
+ int32_t tmpW32, tmp2W32;
+ int16_t use_gain;
+ int16_t tot_gain;
+ int16_t max_perSquare;
+ int16_t scale1, scale2;
+ int16_t totscale;
+ int32_t nom;
+ int16_t denom;
+ int16_t pitchfact;
+ size_t use_lag;
+ int ind;
+ int16_t randvec[BLOCKL_MAX];
+
+ /* Packet Loss */
+ if (PLI == 1) {
+
+ (*iLBCdec_inst).consPLICount += 1;
+
+ /* if previous frame not lost,
+ determine pitch pred. gain */
+
+ if (iLBCdec_inst->prevPLI != 1) {
+
+ /* Maximum 60 samples are correlated, preserve as high accuracy
+ as possible without getting overflow */
+ max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual,
+ iLBCdec_inst->blockl);
+ scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
+ if (scale3 < 0) {
+ scale3 = 0;
+ }
+
+ /* Store scale for use when interpolating between the
+ * concealment and the received packet */
+ iLBCdec_inst->prevScale = scale3;
+
+ /* Search around the previous lag +/-3 to find the
+ best pitch period */
+ lag = inlag - 3;
+
+ /* Guard against getting outside the frame */
+ corrLen = (size_t)WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
+
+ WebRtcIlbcfix_CompCorr( &cross, &ener,
+ iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
+
+ /* Normalize and store cross^2 and the number of shifts */
+ shiftMax = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(cross))-15;
+ crossSquareMax = (int16_t)((
+ (int16_t)WEBRTC_SPL_SHIFT_W32(cross, -shiftMax) *
+ (int16_t)WEBRTC_SPL_SHIFT_W32(cross, -shiftMax)) >> 15);
+
+ for (j=inlag-2;j<=inlag+3;j++) {
+ WebRtcIlbcfix_CompCorr( &cross_comp, &ener_comp,
+ iLBCdec_inst->prevResidual, j, iLBCdec_inst->blockl, corrLen, scale3);
+
+ /* Use the criteria (corr*corr)/energy to compare if
+ this lag is better or not. To avoid the division,
+ do a cross multiplication */
+ shift1 = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(cross_comp))-15;
+ crossSquare = (int16_t)((
+ (int16_t)WEBRTC_SPL_SHIFT_W32(cross_comp, -shift1) *
+ (int16_t)WEBRTC_SPL_SHIFT_W32(cross_comp, -shift1)) >> 15);
+
+ shift2 = WebRtcSpl_GetSizeInBits(ener)-15;
+ measure = (int16_t)WEBRTC_SPL_SHIFT_W32(ener, -shift2) * crossSquare;
+
+ shift3 = WebRtcSpl_GetSizeInBits(ener_comp)-15;
+ maxMeasure = (int16_t)WEBRTC_SPL_SHIFT_W32(ener_comp, -shift3) *
+ crossSquareMax;
+
+ /* Calculate shift value, so that the two measures can
+ be put in the same Q domain */
+ if(2 * shiftMax + shift3 > 2 * shift1 + shift2) {
+ tmp1 =
+ WEBRTC_SPL_MIN(31, 2 * shiftMax + shift3 - 2 * shift1 - shift2);
+ tmp2 = 0;
+ } else {
+ tmp1 = 0;
+ tmp2 =
+ WEBRTC_SPL_MIN(31, 2 * shift1 + shift2 - 2 * shiftMax - shift3);
+ }
+
+ if ((measure>>tmp1) > (maxMeasure>>tmp2)) {
+ /* New lag is better => record lag, measure and domain */
+ lag = j;
+ crossSquareMax = crossSquare;
+ cross = cross_comp;
+ shiftMax = shift1;
+ ener = ener_comp;
+ }
+ }
+
+ /* Calculate the periodicity for the lag with the maximum correlation.
+
+ Definition of the periodicity:
+ abs(corr(vec1, vec2))/(sqrt(energy(vec1))*sqrt(energy(vec2)))
+
+ Work in the Square domain to simplify the calculations
+ max_perSquare is less than 1 (in Q15)
+ */
+ tmp2W32=WebRtcSpl_DotProductWithScale(&iLBCdec_inst->prevResidual[iLBCdec_inst->blockl-corrLen],
+ &iLBCdec_inst->prevResidual[iLBCdec_inst->blockl-corrLen],
+ corrLen, scale3);
+
+ if ((tmp2W32>0)&&(ener_comp>0)) {
+ /* norm energies to int16_t, compute the product of the energies and
+ use the upper int16_t as the denominator */
+
+ scale1=(int16_t)WebRtcSpl_NormW32(tmp2W32)-16;
+ tmp1=(int16_t)WEBRTC_SPL_SHIFT_W32(tmp2W32, scale1);
+
+ scale2=(int16_t)WebRtcSpl_NormW32(ener)-16;
+ tmp2=(int16_t)WEBRTC_SPL_SHIFT_W32(ener, scale2);
+ denom = (int16_t)((tmp1 * tmp2) >> 16); /* in Q(scale1+scale2-16) */
+
+ /* Square the cross correlation and norm it such that max_perSquare
+ will be in Q15 after the division */
+
+ totscale = scale1+scale2-1;
+ tmp1 = (int16_t)WEBRTC_SPL_SHIFT_W32(cross, (totscale>>1));
+ tmp2 = (int16_t)WEBRTC_SPL_SHIFT_W32(cross, totscale-(totscale>>1));
+
+ nom = tmp1 * tmp2;
+ max_perSquare = (int16_t)WebRtcSpl_DivW32W16(nom, denom);
+
+ } else {
+ max_perSquare = 0;
+ }
+ }
+
+ /* previous frame lost, use recorded lag and gain */
+
+ else {
+ lag = iLBCdec_inst->prevLag;
+ max_perSquare = iLBCdec_inst->perSquare;
+ }
+
+ /* Attenuate signal and scale down pitch pred gain if
+ several frames lost consecutively */
+
+ use_gain = 32767; /* 1.0 in Q15 */
+
+ if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>320) {
+ use_gain = 29491; /* 0.9 in Q15 */
+ } else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>640) {
+ use_gain = 22938; /* 0.7 in Q15 */
+ } else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>960) {
+ use_gain = 16384; /* 0.5 in Q15 */
+ } else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>1280) {
+ use_gain = 0; /* 0.0 in Q15 */
+ }
+
+ /* Compute mixing factor of picth repeatition and noise:
+ for max_per>0.7 set periodicity to 1.0
+ 0.4<max_per<0.7 set periodicity to (maxper-0.4)/0.7-0.4)
+ max_per<0.4 set periodicity to 0.0
+ */
+
+ if (max_perSquare>7868) { /* periodicity > 0.7 (0.7^4=0.2401 in Q15) */
+ pitchfact = 32767;
+ } else if (max_perSquare>839) { /* 0.4 < periodicity < 0.7 (0.4^4=0.0256 in Q15) */
+ /* find best index and interpolate from that */
+ ind = 5;
+ while ((max_perSquare<WebRtcIlbcfix_kPlcPerSqr[ind])&&(ind>0)) {
+ ind--;
+ }
+ /* pitch fact is approximated by first order */
+ tmpW32 = (int32_t)WebRtcIlbcfix_kPlcPitchFact[ind] +
+ ((WebRtcIlbcfix_kPlcPfSlope[ind] *
+ (max_perSquare - WebRtcIlbcfix_kPlcPerSqr[ind])) >> 11);
+
+ pitchfact = (int16_t)WEBRTC_SPL_MIN(tmpW32, 32767); /* guard against overflow */
+
+ } else { /* periodicity < 0.4 */
+ pitchfact = 0;
+ }
+
+ /* avoid repetition of same pitch cycle (buzzyness) */
+ use_lag = lag;
+ if (lag<80) {
+ use_lag = 2*lag;
+ }
+
+ /* compute concealed residual */
+ noise_energy_threshold_30dB = (int32_t)iLBCdec_inst->blockl * 900;
+ energy = 0;
+ for (i=0; i<iLBCdec_inst->blockl; i++) {
+
+ /* noise component - 52 < randlagFIX < 117 */
+ iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
+ randlag = 53 + (iLBCdec_inst->seed & 63);
+ if (randlag > i) {
+ randvec[i] =
+ iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];
+ } else {
+ randvec[i] = iLBCdec_inst->prevResidual[i - randlag];
+ }
+
+ /* pitch repeatition component */
+ if (use_lag > i) {
+ PLCresidual[i] =
+ iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - use_lag];
+ } else {
+ PLCresidual[i] = PLCresidual[i - use_lag];
+ }
+
+ /* Attinuate total gain for each 10 ms */
+ if (i<80) {
+ tot_gain=use_gain;
+ } else if (i<160) {
+ tot_gain = (int16_t)((31130 * use_gain) >> 15); /* 0.95*use_gain */
+ } else {
+ tot_gain = (int16_t)((29491 * use_gain) >> 15); /* 0.9*use_gain */
+ }
+
+
+ /* mix noise and pitch repeatition */
+ PLCresidual[i] = (int16_t)((tot_gain *
+ ((pitchfact * PLCresidual[i] + (32767 - pitchfact) * randvec[i] +
+ 16384) >> 15)) >> 15);
+
+ /* Compute energy until threshold for noise energy is reached */
+ if (energy < noise_energy_threshold_30dB) {
+ energy += PLCresidual[i] * PLCresidual[i];
+ }
+ }
+
+ /* less than 30 dB, use only noise */
+ if (energy < noise_energy_threshold_30dB) {
+ for (i=0; i<iLBCdec_inst->blockl; i++) {
+ PLCresidual[i] = randvec[i];
+ }
+ }
+
+ /* use the old LPC */
+ WEBRTC_SPL_MEMCPY_W16(PLClpc, (*iLBCdec_inst).prevLpc, LPC_FILTERORDER+1);
+
+ /* Update state in case there are multiple frame losses */
+ iLBCdec_inst->prevLag = lag;
+ iLBCdec_inst->perSquare = max_perSquare;
+ }
+
+ /* no packet loss, copy input */
+
+ else {
+ WEBRTC_SPL_MEMCPY_W16(PLCresidual, decresidual, iLBCdec_inst->blockl);
+ WEBRTC_SPL_MEMCPY_W16(PLClpc, lpc, (LPC_FILTERORDER+1));
+ iLBCdec_inst->consPLICount = 0;
+ }
+
+ /* update state */
+ iLBCdec_inst->prevPLI = PLI;
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->prevLpc, PLClpc, (LPC_FILTERORDER+1));
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->prevResidual, PLCresidual, iLBCdec_inst->blockl);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.h
new file mode 100644
index 0000000000..c19c4eca32
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DoThePlc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DO_PLC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DO_PLC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Packet loss concealment routine. Conceals a residual signal
+ * and LP parameters. If no packet loss, update state.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DoThePlc(
+ int16_t* PLCresidual, /* (o) concealed residual */
+ int16_t* PLClpc, /* (o) concealed LP parameters */
+ int16_t PLI, /* (i) packet loss indicator
+ 0 - no PL, 1 = PL */
+ int16_t* decresidual, /* (i) decoded residual */
+ int16_t* lpc, /* (i) decoded LPC (only used for no PL) */
+ size_t inlag, /* (i) pitch lag */
+ IlbcDecoder* iLBCdec_inst
+ /* (i/o) decoder instance */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.c
new file mode 100644
index 0000000000..8e536221cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Encode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/encode.h"
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/cb_construct.h"
+#include "modules/audio_coding/codecs/ilbc/cb_search.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/frame_classify.h"
+#include "modules/audio_coding/codecs/ilbc/hp_input.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_enc.h"
+#include "modules/audio_coding/codecs/ilbc/lpc_encode.h"
+#include "modules/audio_coding/codecs/ilbc/pack_bits.h"
+#include "modules/audio_coding/codecs/ilbc/state_construct.h"
+#include "modules/audio_coding/codecs/ilbc/state_search.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/system/arch.h"
+
+#ifdef SPLIT_10MS
+#include "modules/audio_coding/codecs/ilbc/unpack_bits.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+#endif
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+#include "modules/audio_coding/codecs/ilbc/swap_bytes.h"
+#endif
+
+/*----------------------------------------------------------------*
+ * main encoder function
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EncodeImpl(
+ uint16_t *bytes, /* (o) encoded data bits iLBC */
+ const int16_t *block, /* (i) speech vector to encode */
+ IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
+ state */
+ ){
+ size_t n, meml_gotten, Nfor;
+ size_t diff, start_pos;
+ size_t index;
+ size_t subcount, subframe;
+ size_t start_count, end_count;
+ int16_t *residual;
+ int32_t en1, en2;
+ int16_t scale, max;
+ int16_t *syntdenum;
+ int16_t *decresidual;
+ int16_t *reverseResidual;
+ int16_t *reverseDecresidual;
+ /* Stack based */
+ int16_t weightdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
+ int16_t dataVec[BLOCKL_MAX + LPC_FILTERORDER];
+ int16_t memVec[CB_MEML+CB_FILTERLEN];
+ int16_t bitsMemory[sizeof(iLBC_bits)/sizeof(int16_t)];
+ iLBC_bits *iLBCbits_inst = (iLBC_bits*)bitsMemory;
+
+
+#ifdef SPLIT_10MS
+ int16_t *weightdenumbuf = iLBCenc_inst->weightdenumbuf;
+ int16_t last_bit;
+#endif
+
+ int16_t *data = &dataVec[LPC_FILTERORDER];
+ int16_t *mem = &memVec[CB_HALFFILTERLEN];
+
+ /* Reuse som buffers to save stack memory */
+ residual = &iLBCenc_inst->lpc_buffer[LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl];
+ syntdenum = mem; /* syntdenum[(LPC_FILTERORDER + 1)*NSUB_MAX] and mem are used non overlapping in the code */
+ decresidual = residual; /* Already encoded residual is overwritten by the decoded version */
+ reverseResidual = data; /* data and reverseResidual are used non overlapping in the code */
+ reverseDecresidual = reverseResidual; /* Already encoded residual is overwritten by the decoded version */
+
+#ifdef SPLIT_10MS
+
+ WebRtcSpl_MemSetW16 ( (int16_t *) iLBCbits_inst, 0,
+ sizeof(iLBC_bits) / sizeof(int16_t) );
+
+ start_pos = iLBCenc_inst->start_pos;
+ diff = iLBCenc_inst->diff;
+
+ if (iLBCenc_inst->section != 0){
+ WEBRTC_SPL_MEMCPY_W16 (weightdenum, weightdenumbuf,
+ SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+ /* Un-Packetize the frame into parameters */
+ last_bit = WebRtcIlbcfix_UnpackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+ if (last_bit)
+ return;
+ /* adjust index */
+ WebRtcIlbcfix_IndexConvDec (iLBCbits_inst->cb_index);
+
+ if (iLBCenc_inst->section == 1){
+ /* Save first 80 samples of a 160/240 sample frame for 20/30msec */
+ WEBRTC_SPL_MEMCPY_W16 (iLBCenc_inst->past_samples, block, 80);
+ }
+ else{ // iLBCenc_inst->section == 2 AND mode = 30ms
+ /* Save second 80 samples of a 240 sample frame for 30msec */
+ WEBRTC_SPL_MEMCPY_W16 (iLBCenc_inst->past_samples + 80, block, 80);
+ }
+ }
+ else{ // iLBCenc_inst->section == 0
+ /* form a complete frame of 160/240 for 20msec/30msec mode */
+ WEBRTC_SPL_MEMCPY_W16 (data + (iLBCenc_inst->mode * 8) - 80, block, 80);
+ WEBRTC_SPL_MEMCPY_W16 (data, iLBCenc_inst->past_samples,
+ (iLBCenc_inst->mode * 8) - 80);
+ iLBCenc_inst->Nfor_flag = 0;
+ iLBCenc_inst->Nback_flag = 0;
+#else
+ /* copy input block to data*/
+ WEBRTC_SPL_MEMCPY_W16(data,block,iLBCenc_inst->blockl);
+#endif
+
+ /* high pass filtering of input signal and scale down the residual (*0.5) */
+ WebRtcIlbcfix_HpInput(data, (int16_t*)WebRtcIlbcfix_kHpInCoefs,
+ iLBCenc_inst->hpimemy, iLBCenc_inst->hpimemx,
+ iLBCenc_inst->blockl);
+
+ /* LPC of hp filtered input data */
+ WebRtcIlbcfix_LpcEncode(syntdenum, weightdenum, iLBCbits_inst->lsf, data,
+ iLBCenc_inst);
+
+ /* Set up state */
+ WEBRTC_SPL_MEMCPY_W16(dataVec, iLBCenc_inst->anaMem, LPC_FILTERORDER);
+
+ /* inverse filter to get residual */
+ for (n=0; n<iLBCenc_inst->nsub; n++ ) {
+ WebRtcSpl_FilterMAFastQ12(
+ &data[n*SUBL], &residual[n*SUBL],
+ &syntdenum[n*(LPC_FILTERORDER+1)],
+ LPC_FILTERORDER+1, SUBL);
+ }
+
+ /* Copy the state for next frame */
+ WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->anaMem, &data[iLBCenc_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
+
+ /* find state location */
+
+ iLBCbits_inst->startIdx = WebRtcIlbcfix_FrameClassify(iLBCenc_inst,residual);
+
+ /* check if state should be in first or last part of the
+ two subframes */
+
+ index = (iLBCbits_inst->startIdx-1)*SUBL;
+ max=WebRtcSpl_MaxAbsValueW16(&residual[index], 2*SUBL);
+ scale = WebRtcSpl_GetSizeInBits((uint32_t)(max * max));
+
+ /* Scale to maximum 25 bits so that the MAC won't cause overflow */
+ scale = scale - 25;
+ if(scale < 0) {
+ scale = 0;
+ }
+
+ diff = STATE_LEN - iLBCenc_inst->state_short_len;
+ en1=WebRtcSpl_DotProductWithScale(&residual[index], &residual[index],
+ iLBCenc_inst->state_short_len, scale);
+ index += diff;
+ en2=WebRtcSpl_DotProductWithScale(&residual[index], &residual[index],
+ iLBCenc_inst->state_short_len, scale);
+ if (en1 > en2) {
+ iLBCbits_inst->state_first = 1;
+ start_pos = (iLBCbits_inst->startIdx-1)*SUBL;
+ } else {
+ iLBCbits_inst->state_first = 0;
+ start_pos = (iLBCbits_inst->startIdx-1)*SUBL + diff;
+ }
+
+ /* scalar quantization of state */
+
+ WebRtcIlbcfix_StateSearch(iLBCenc_inst, iLBCbits_inst, &residual[start_pos],
+ &syntdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
+ &weightdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)]);
+
+ WebRtcIlbcfix_StateConstruct(iLBCbits_inst->idxForMax, iLBCbits_inst->idxVec,
+ &syntdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
+ &decresidual[start_pos], iLBCenc_inst->state_short_len
+ );
+
+ /* predictive quantization in state */
+
+ if (iLBCbits_inst->state_first) { /* put adaptive part in the end */
+
+ /* setup memory */
+
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCenc_inst->state_short_len,
+ decresidual+start_pos, iLBCenc_inst->state_short_len);
+
+ /* encode subframes */
+
+ WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
+ &residual[start_pos+iLBCenc_inst->state_short_len],
+ mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL, diff,
+ &weightdenum[iLBCbits_inst->startIdx*(LPC_FILTERORDER+1)], 0);
+
+ /* construct decoded vector */
+
+ RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+ &decresidual[start_pos + iLBCenc_inst->state_short_len],
+ iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
+ mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL, diff));
+
+ }
+ else { /* put adaptive part in the beginning */
+
+ /* create reversed vectors for prediction */
+
+ WebRtcSpl_MemCpyReversedOrder(&reverseResidual[diff-1],
+ &residual[(iLBCbits_inst->startIdx+1)*SUBL-STATE_LEN], diff);
+
+ /* setup memory */
+
+ meml_gotten = iLBCenc_inst->state_short_len;
+ WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[start_pos], meml_gotten);
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
+
+ /* encode subframes */
+ WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
+ reverseResidual, mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL, diff,
+ &weightdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
+ 0);
+
+ /* construct decoded vector */
+ RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+ reverseDecresidual, iLBCbits_inst->cb_index,
+ iLBCbits_inst->gain_index, mem + CB_MEML - ST_MEM_L_TBL,
+ ST_MEM_L_TBL, diff));
+
+ /* get decoded residual from reversed vector */
+
+ WebRtcSpl_MemCpyReversedOrder(&decresidual[start_pos-1], reverseDecresidual, diff);
+ }
+
+#ifdef SPLIT_10MS
+ iLBCenc_inst->start_pos = start_pos;
+ iLBCenc_inst->diff = diff;
+ iLBCenc_inst->section++;
+ /* adjust index */
+ WebRtcIlbcfix_IndexConvEnc (iLBCbits_inst->cb_index);
+ /* Packetize the parameters into the frame */
+ WebRtcIlbcfix_PackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+ WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
+ SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+ return;
+ }
+#endif
+
+ /* forward prediction of subframes */
+
+ Nfor = iLBCenc_inst->nsub-iLBCbits_inst->startIdx-1;
+
+ /* counter for predicted subframes */
+#ifdef SPLIT_10MS
+ if (iLBCenc_inst->mode == 20)
+ {
+ subcount = 1;
+ }
+ if (iLBCenc_inst->mode == 30)
+ {
+ if (iLBCenc_inst->section == 1)
+ {
+ subcount = 1;
+ }
+ if (iLBCenc_inst->section == 2)
+ {
+ subcount = 3;
+ }
+ }
+#else
+ subcount=1;
+#endif
+
+ if( Nfor > 0 ){
+
+ /* setup memory */
+
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-STATE_LEN,
+ decresidual+(iLBCbits_inst->startIdx-1)*SUBL, STATE_LEN);
+
+#ifdef SPLIT_10MS
+ if (iLBCenc_inst->Nfor_flag > 0)
+ {
+ for (subframe = 0; subframe < WEBRTC_SPL_MIN (Nfor, 2); subframe++)
+ {
+ /* update memory */
+ WEBRTC_SPL_MEMCPY_W16 (mem, mem + SUBL, (CB_MEML - SUBL));
+ WEBRTC_SPL_MEMCPY_W16 (mem + CB_MEML - SUBL,
+ &decresidual[(iLBCbits_inst->startIdx + 1 +
+ subframe) * SUBL], SUBL);
+ }
+ }
+
+ iLBCenc_inst->Nfor_flag++;
+
+ if (iLBCenc_inst->mode == 20)
+ {
+ start_count = 0;
+ end_count = Nfor;
+ }
+ if (iLBCenc_inst->mode == 30)
+ {
+ if (iLBCenc_inst->section == 1)
+ {
+ start_count = 0;
+ end_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
+ }
+ if (iLBCenc_inst->section == 2)
+ {
+ start_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
+ end_count = Nfor;
+ }
+ }
+#else
+ start_count = 0;
+ end_count = Nfor;
+#endif
+
+ /* loop over subframes to encode */
+
+ for (subframe = start_count; subframe < end_count; subframe++){
+
+ /* encode subframe */
+
+ WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
+ iLBCbits_inst->gain_index+subcount*CB_NSTAGES,
+ &residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
+ mem, MEM_LF_TBL, SUBL,
+ &weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
+ subcount);
+
+ /* construct decoded vector */
+ RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+ &decresidual[(iLBCbits_inst->startIdx + 1 + subframe) * SUBL],
+ iLBCbits_inst->cb_index + subcount * CB_NSTAGES,
+ iLBCbits_inst->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+ SUBL));
+
+ /* update memory */
+
+ memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+ &decresidual[(iLBCbits_inst->startIdx+1+subframe)*SUBL], SUBL);
+
+ subcount++;
+ }
+ }
+
+#ifdef SPLIT_10MS
+ if ((iLBCenc_inst->section == 1) &&
+ (iLBCenc_inst->mode == 30) && (Nfor > 0) && (end_count == 2))
+ {
+ iLBCenc_inst->section++;
+ /* adjust index */
+ WebRtcIlbcfix_IndexConvEnc (iLBCbits_inst->cb_index);
+ /* Packetize the parameters into the frame */
+ WebRtcIlbcfix_PackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+ WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
+ SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+ return;
+ }
+#endif
+
+ /* backward prediction of subframes */
+
+ if (iLBCbits_inst->startIdx > 1) {
+
+ /* create reverse order vectors
+ (The decresidual does not need to be copied since it is
+ contained in the same vector as the residual)
+ */
+
+ size_t Nback = iLBCbits_inst->startIdx - 1;
+ WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
+
+ /* setup memory */
+
+ meml_gotten = SUBL*(iLBCenc_inst->nsub+1-iLBCbits_inst->startIdx);
+ if( meml_gotten > CB_MEML ) {
+ meml_gotten=CB_MEML;
+ }
+
+ WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[Nback*SUBL], meml_gotten);
+ WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
+
+#ifdef SPLIT_10MS
+ if (iLBCenc_inst->Nback_flag > 0)
+ {
+ for (subframe = 0; subframe < WEBRTC_SPL_MAX (2 - Nfor, 0); subframe++)
+ {
+ /* update memory */
+ WEBRTC_SPL_MEMCPY_W16 (mem, mem + SUBL, (CB_MEML - SUBL));
+ WEBRTC_SPL_MEMCPY_W16 (mem + CB_MEML - SUBL,
+ &reverseDecresidual[subframe * SUBL], SUBL);
+ }
+ }
+
+ iLBCenc_inst->Nback_flag++;
+
+
+ if (iLBCenc_inst->mode == 20)
+ {
+ start_count = 0;
+ end_count = Nback;
+ }
+ if (iLBCenc_inst->mode == 30)
+ {
+ if (iLBCenc_inst->section == 1)
+ {
+ start_count = 0;
+ end_count = (Nfor >= 2) ? 0 : (2 - NFor);
+ }
+ if (iLBCenc_inst->section == 2)
+ {
+ start_count = (Nfor >= 2) ? 0 : (2 - NFor);
+ end_count = Nback;
+ }
+ }
+#else
+ start_count = 0;
+ end_count = Nback;
+#endif
+
+ /* loop over subframes to encode */
+
+ for (subframe = start_count; subframe < end_count; subframe++){
+
+ /* encode subframe */
+
+ WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
+ iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
+ mem, MEM_LF_TBL, SUBL,
+ &weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
+ subcount);
+
+ /* construct decoded vector */
+ RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+ &reverseDecresidual[subframe * SUBL],
+ iLBCbits_inst->cb_index + subcount * CB_NSTAGES,
+ iLBCbits_inst->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+ SUBL));
+
+ /* update memory */
+ memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+ WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+ &reverseDecresidual[subframe*SUBL], SUBL);
+
+ subcount++;
+
+ }
+
+ /* get decoded residual from reversed vector */
+
+ WebRtcSpl_MemCpyReversedOrder(&decresidual[SUBL*Nback-1], reverseDecresidual, SUBL*Nback);
+ }
+ /* end encoding part */
+
+ /* adjust index */
+
+ WebRtcIlbcfix_IndexConvEnc(iLBCbits_inst->cb_index);
+
+ /* Packetize the parameters into the frame */
+
+#ifdef SPLIT_10MS
+ if( (iLBCenc_inst->mode==30) && (iLBCenc_inst->section==1) ){
+ WebRtcIlbcfix_PackBits(iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+ }
+ else{
+ WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
+ }
+#else
+ WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
+#endif
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ /* Swap bytes for LITTLE ENDIAN since the packbits()
+ function assumes BIG_ENDIAN machine */
+#ifdef SPLIT_10MS
+ if (( (iLBCenc_inst->section == 1) && (iLBCenc_inst->mode == 20) ) ||
+ ( (iLBCenc_inst->section == 2) && (iLBCenc_inst->mode == 30) )){
+ WebRtcIlbcfix_SwapBytes(bytes, iLBCenc_inst->no_of_words, bytes);
+ }
+#else
+ WebRtcIlbcfix_SwapBytes(bytes, iLBCenc_inst->no_of_words, bytes);
+#endif
+#endif
+
+#ifdef SPLIT_10MS
+ if (subcount == (iLBCenc_inst->nsub - 1))
+ {
+ iLBCenc_inst->section = 0;
+ }
+ else
+ {
+ iLBCenc_inst->section++;
+ WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
+ SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+ }
+#endif
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.h
new file mode 100644
index 0000000000..bc3e187d92
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Encode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENCODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENCODE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * main encoder function
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EncodeImpl(
+ uint16_t* bytes, /* (o) encoded data bits iLBC */
+ const int16_t* block, /* (i) speech vector to encode */
+ IlbcEncoder* iLBCenc_inst /* (i/o) the general encoder
+ state */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
new file mode 100644
index 0000000000..7f00254aea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnergyInverse.c
+
+******************************************************************/
+
+/* Inverses the in vector in into Q29 domain */
+
+#include "modules/audio_coding/codecs/ilbc/energy_inverse.h"
+
+void WebRtcIlbcfix_EnergyInverse(
+ int16_t *energy, /* (i/o) Energy and inverse
+ energy (in Q29) */
+ size_t noOfEnergies) /* (i) The length of the energy
+ vector */
+{
+ int32_t Nom=(int32_t)0x1FFFFFFF;
+ int16_t *energyPtr;
+ size_t i;
+
+ /* Set the minimum energy value to 16384 to avoid overflow */
+ energyPtr=energy;
+ for (i=0; i<noOfEnergies; i++) {
+ (*energyPtr)=WEBRTC_SPL_MAX((*energyPtr),16384);
+ energyPtr++;
+ }
+
+ /* Calculate inverse energy in Q29 */
+ energyPtr=energy;
+ for (i=0; i<noOfEnergies; i++) {
+ (*energyPtr) = (int16_t)WebRtcSpl_DivW32W16(Nom, (*energyPtr));
+ energyPtr++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
new file mode 100644
index 0000000000..15391cf230
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnergyInverse.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENERGY_INVERSE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENERGY_INVERSE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/* Inverses the in vector in into Q29 domain */
+
+void WebRtcIlbcfix_EnergyInverse(
+ int16_t*
+ energy, /* (i/o) Energy and inverse
+ energy (in Q29) */
+ size_t noOfEnergies); /* (i) The length of the energy
+ vector */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.c
new file mode 100644
index 0000000000..cd3d0a4db1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhUpsample.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/enh_upsample.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * upsample finite array assuming zeros outside bounds
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EnhUpsample(
+ int32_t *useq1, /* (o) upsampled output sequence */
+ int16_t *seq1 /* (i) unupsampled sequence */
+ ){
+ int j;
+ int32_t *pu1, *pu11;
+ int16_t *ps, *w16tmp;
+ const int16_t *pp;
+
+ /* filtering: filter overhangs left side of sequence */
+ pu1=useq1;
+ for (j=0;j<ENH_UPS0; j++) {
+ pu11=pu1;
+ /* i = 2 */
+ pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
+ ps=seq1+2;
+ *pu11 = (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ pu11+=ENH_UPS0;
+ /* i = 3 */
+ pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
+ ps=seq1+3;
+ *pu11 = (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ pu11+=ENH_UPS0;
+ /* i = 4 */
+ pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
+ ps=seq1+4;
+ *pu11 = (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ pu1++;
+ }
+
+ /* filtering: simple convolution=inner products
+ (not needed since the sequence is so short)
+ */
+
+ /* filtering: filter overhangs right side of sequence */
+
+ /* Code with loops, which is equivivalent to the expanded version below
+
+ filterlength = 5;
+ hf1 = 2;
+ for(j=0;j<ENH_UPS0; j++){
+ pu = useq1 + (filterlength-hfl)*ENH_UPS0 + j;
+ for(i=1; i<=hfl; i++){
+ *pu=0;
+ pp = polyp[j]+i;
+ ps = seq1+dim1-1;
+ for(k=0;k<filterlength-i;k++) {
+ *pu += (*ps--) * *pp++;
+ }
+ pu+=ENH_UPS0;
+ }
+ }
+ */
+ pu1 = useq1 + 12;
+ w16tmp = seq1+4;
+ for (j=0;j<ENH_UPS0; j++) {
+ pu11 = pu1;
+ /* i = 1 */
+ pp = WebRtcIlbcfix_kEnhPolyPhaser[j]+2;
+ ps = w16tmp;
+ *pu11 = (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ pu11+=ENH_UPS0;
+ /* i = 2 */
+ pp = WebRtcIlbcfix_kEnhPolyPhaser[j]+3;
+ ps = w16tmp;
+ *pu11 = (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ *pu11 += (*ps--) * *pp++;
+ pu11+=ENH_UPS0;
+
+ pu1++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.h
new file mode 100644
index 0000000000..b427eca50a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhUpsample.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENH_UPSAMPLE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENH_UPSAMPLE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * upsample finite array assuming zeros outside bounds
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EnhUpsample(
+ int32_t* useq1, /* (o) upsampled output sequence */
+ int16_t* seq1 /* (i) unupsampled sequence */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.c
new file mode 100644
index 0000000000..bd4e60015c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Enhancer.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/enhancer.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/get_sync_seq.h"
+#include "modules/audio_coding/codecs/ilbc/smooth.h"
+
+/*----------------------------------------------------------------*
+ * perform enhancement on idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Enhancer(
+ int16_t *odata, /* (o) smoothed block, dimension blockl */
+ int16_t *idata, /* (i) data buffer used for enhancing */
+ size_t idatal, /* (i) dimension idata */
+ size_t centerStartPos, /* (i) first sample current block within idata */
+ size_t *period, /* (i) pitch period array (pitch bward-in time) */
+ const size_t *plocs, /* (i) locations where period array values valid */
+ size_t periodl /* (i) dimension of period and plocs */
+ ){
+ /* Stack based */
+ int16_t surround[ENH_BLOCKL];
+
+ WebRtcSpl_MemSetW16(surround, 0, ENH_BLOCKL);
+
+ /* get said second sequence of segments */
+
+ WebRtcIlbcfix_GetSyncSeq(idata, idatal, centerStartPos, period, plocs,
+ periodl, ENH_HL, surround);
+
+ /* compute the smoothed output from said second sequence */
+
+ WebRtcIlbcfix_Smooth(odata, idata + centerStartPos, surround);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.h
new file mode 100644
index 0000000000..386949347a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Enhancer.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * perform enhancement on idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Enhancer(
+ int16_t* odata, /* (o) smoothed block, dimension blockl */
+ int16_t* idata, /* (i) data buffer used for enhancing */
+ size_t idatal, /* (i) dimension idata */
+ size_t centerStartPos, /* (i) first sample current block within idata */
+ size_t* period, /* (i) pitch period array (pitch bward-in time) */
+ const size_t* plocs, /* (i) locations where period array values valid */
+ size_t periodl /* (i) dimension of period and plocs */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
new file mode 100644
index 0000000000..ca23e19ae3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhancerInterface.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/enhancer_interface.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/enhancer.h"
+#include "modules/audio_coding/codecs/ilbc/hp_output.h"
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+
+
+
+/*----------------------------------------------------------------*
+ * interface for enhancer
+ *---------------------------------------------------------------*/
+
+size_t // (o) Estimated lag in end of in[]
+ WebRtcIlbcfix_EnhancerInterface(
+ int16_t* out, // (o) enhanced signal
+ const int16_t* in, // (i) unenhanced signal
+ IlbcDecoder* iLBCdec_inst) { // (i) buffers etc
+ size_t iblock;
+ size_t lag=20, tlag=20;
+ size_t inLen=iLBCdec_inst->blockl+120;
+ int16_t scale, scale1;
+ size_t plc_blockl;
+ int16_t *enh_buf;
+ size_t *enh_period;
+ int32_t tmp1, tmp2, max;
+ size_t new_blocks;
+ int16_t *enh_bufPtr1;
+ size_t i;
+ size_t k;
+ int16_t EnChange;
+ int16_t SqrtEnChange;
+ int16_t inc;
+ int16_t win;
+ int16_t *tmpW16ptr;
+ size_t startPos;
+ int16_t *plc_pred;
+ const int16_t *target, *regressor;
+ int16_t max16;
+ int shifts;
+ int32_t ener;
+ int16_t enerSh;
+ int16_t corrSh;
+ size_t ind;
+ int16_t sh;
+ size_t start, stop;
+ /* Stack based */
+ int16_t totsh[3];
+ int16_t downsampled[(BLOCKL_MAX+120)>>1]; /* length 180 */
+ int32_t corr32[50];
+ int32_t corrmax[3];
+ int16_t corr16[3];
+ int16_t en16[3];
+ size_t lagmax[3];
+
+ plc_pred = downsampled; /* Reuse memory since plc_pred[ENH_BLOCKL] and
+ downsampled are non overlapping */
+ enh_buf=iLBCdec_inst->enh_buf;
+ enh_period=iLBCdec_inst->enh_period;
+
+ /* Copy in the new data into the enhancer buffer */
+ memmove(enh_buf, &enh_buf[iLBCdec_inst->blockl],
+ (ENH_BUFL - iLBCdec_inst->blockl) * sizeof(*enh_buf));
+
+ WEBRTC_SPL_MEMCPY_W16(&enh_buf[ENH_BUFL-iLBCdec_inst->blockl], in,
+ iLBCdec_inst->blockl);
+
+ /* Set variables that are dependent on frame size */
+ if (iLBCdec_inst->mode==30) {
+ plc_blockl=ENH_BLOCKL;
+ new_blocks=3;
+ startPos=320; /* Start position for enhancement
+ (640-new_blocks*ENH_BLOCKL-80) */
+ } else {
+ plc_blockl=40;
+ new_blocks=2;
+ startPos=440; /* Start position for enhancement
+ (640-new_blocks*ENH_BLOCKL-40) */
+ }
+
+ /* Update the pitch prediction for each enhancer block, move the old ones */
+ memmove(enh_period, &enh_period[new_blocks],
+ (ENH_NBLOCKS_TOT - new_blocks) * sizeof(*enh_period));
+
+ WebRtcSpl_DownsampleFast(
+ enh_buf+ENH_BUFL-inLen, /* Input samples */
+ inLen + ENH_BUFL_FILTEROVERHEAD,
+ downsampled,
+ inLen / 2,
+ (int16_t*)WebRtcIlbcfix_kLpFiltCoefs, /* Coefficients in Q12 */
+ FILTERORDER_DS_PLUS1, /* Length of filter (order-1) */
+ FACTOR_DS,
+ DELAY_DS);
+
+ /* Estimate the pitch in the down sampled domain. */
+ for(iblock = 0; iblock<new_blocks; iblock++){
+
+ /* references */
+ target = downsampled + 60 + iblock * ENH_BLOCKL_HALF;
+ regressor = target - 10;
+
+ /* scaling */
+ max16 = WebRtcSpl_MaxAbsValueW16(&regressor[-50], ENH_BLOCKL_HALF + 50 - 1);
+ shifts = WebRtcSpl_GetSizeInBits((uint32_t)(max16 * max16)) - 25;
+ shifts = WEBRTC_SPL_MAX(0, shifts);
+
+ /* compute cross correlation */
+ WebRtcSpl_CrossCorrelation(corr32, target, regressor, ENH_BLOCKL_HALF, 50,
+ shifts, -1);
+
+ /* Find 3 highest correlations that should be compared for the
+ highest (corr*corr)/ener */
+
+ for (i=0;i<2;i++) {
+ lagmax[i] = WebRtcSpl_MaxIndexW32(corr32, 50);
+ corrmax[i] = corr32[lagmax[i]];
+ start = WEBRTC_SPL_MAX(2, lagmax[i]) - 2;
+ stop = WEBRTC_SPL_MIN(47, lagmax[i]) + 2;
+ for (k = start; k <= stop; k++) {
+ corr32[k] = 0;
+ }
+ }
+ lagmax[2] = WebRtcSpl_MaxIndexW32(corr32, 50);
+ corrmax[2] = corr32[lagmax[2]];
+
+ /* Calculate normalized corr^2 and ener */
+ for (i=0;i<3;i++) {
+ corrSh = 15-WebRtcSpl_GetSizeInBits(corrmax[i]);
+ ener = WebRtcSpl_DotProductWithScale(regressor - lagmax[i],
+ regressor - lagmax[i],
+ ENH_BLOCKL_HALF, shifts);
+ enerSh = 15-WebRtcSpl_GetSizeInBits(ener);
+ corr16[i] = (int16_t)WEBRTC_SPL_SHIFT_W32(corrmax[i], corrSh);
+ corr16[i] = (int16_t)((corr16[i] * corr16[i]) >> 16);
+ en16[i] = (int16_t)WEBRTC_SPL_SHIFT_W32(ener, enerSh);
+ totsh[i] = enerSh - 2 * corrSh;
+ }
+
+ /* Compare lagmax[0..3] for the (corr^2)/ener criteria */
+ ind = 0;
+ for (i=1; i<3; i++) {
+ if (totsh[ind] > totsh[i]) {
+ sh = WEBRTC_SPL_MIN(31, totsh[ind]-totsh[i]);
+ if (corr16[ind] * en16[i] < (corr16[i] * en16[ind]) >> sh) {
+ ind = i;
+ }
+ } else {
+ sh = WEBRTC_SPL_MIN(31, totsh[i]-totsh[ind]);
+ if ((corr16[ind] * en16[i]) >> sh < corr16[i] * en16[ind]) {
+ ind = i;
+ }
+ }
+ }
+
+ lag = lagmax[ind] + 10;
+
+ /* Store the estimated lag in the non-downsampled domain */
+ enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = lag * 8;
+
+ /* Store the estimated lag for backward PLC */
+ if (iLBCdec_inst->prev_enh_pl==1) {
+ if (!iblock) {
+ tlag = lag * 2;
+ }
+ } else {
+ if (iblock==1) {
+ tlag = lag * 2;
+ }
+ }
+
+ lag *= 2;
+ }
+
+ if ((iLBCdec_inst->prev_enh_pl==1)||(iLBCdec_inst->prev_enh_pl==2)) {
+
+ /* Calculate the best lag of the new frame
+ This is used to interpolate backwards and mix with the PLC'd data
+ */
+
+ /* references */
+ target=in;
+ regressor=in+tlag-1;
+
+ /* scaling */
+ // Note that this is not abs-max, so we will take the absolute value below.
+ max16 = WebRtcSpl_MaxAbsElementW16(regressor, plc_blockl + 3 - 1);
+ const int16_t max_target =
+ WebRtcSpl_MaxAbsElementW16(target, plc_blockl + 3 - 1);
+ const int64_t max_val = plc_blockl * abs(max16 * max_target);
+ const int32_t factor = max_val >> 31;
+ shifts = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+
+ /* compute cross correlation */
+ WebRtcSpl_CrossCorrelation(corr32, target, regressor, plc_blockl, 3, shifts,
+ 1);
+
+ /* find lag */
+ lag=WebRtcSpl_MaxIndexW32(corr32, 3);
+ lag+=tlag-1;
+
+ /* Copy the backward PLC to plc_pred */
+
+ if (iLBCdec_inst->prev_enh_pl==1) {
+ if (lag>plc_blockl) {
+ WEBRTC_SPL_MEMCPY_W16(plc_pred, &in[lag-plc_blockl], plc_blockl);
+ } else {
+ WEBRTC_SPL_MEMCPY_W16(&plc_pred[plc_blockl-lag], in, lag);
+ WEBRTC_SPL_MEMCPY_W16(
+ plc_pred, &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl+lag],
+ (plc_blockl-lag));
+ }
+ } else {
+ size_t pos;
+
+ pos = plc_blockl;
+
+ while (lag<pos) {
+ WEBRTC_SPL_MEMCPY_W16(&plc_pred[pos-lag], in, lag);
+ pos = pos - lag;
+ }
+ WEBRTC_SPL_MEMCPY_W16(plc_pred, &in[lag-pos], pos);
+
+ }
+
+ if (iLBCdec_inst->prev_enh_pl==1) {
+ /* limit energy change
+ if energy in backward PLC is more than 4 times higher than the forward
+ PLC, then reduce the energy in the backward PLC vector:
+ sample 1...len-16 set energy of the to 4 times forward PLC
+ sample len-15..len interpolate between 4 times fw PLC and bw PLC energy
+
+ Note: Compared to floating point code there is a slight change,
+ the window is 16 samples long instead of 10 samples to simplify the
+ calculations
+ */
+
+ max=WebRtcSpl_MaxAbsValueW16(
+ &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl], plc_blockl);
+ max16=WebRtcSpl_MaxAbsValueW16(plc_pred, plc_blockl);
+ max = WEBRTC_SPL_MAX(max, max16);
+ scale=22-(int16_t)WebRtcSpl_NormW32(max);
+ scale=WEBRTC_SPL_MAX(scale,0);
+
+ tmp2 = WebRtcSpl_DotProductWithScale(
+ &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl],
+ &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl],
+ plc_blockl, scale);
+ tmp1 = WebRtcSpl_DotProductWithScale(plc_pred, plc_pred,
+ plc_blockl, scale);
+
+ /* Check the energy difference */
+ if ((tmp1>0)&&((tmp1>>2)>tmp2)) {
+ /* EnChange is now guaranteed to be <0.5
+ Calculate EnChange=tmp2/tmp1 in Q16
+ */
+
+ scale1=(int16_t)WebRtcSpl_NormW32(tmp1);
+ tmp1=WEBRTC_SPL_SHIFT_W32(tmp1, (scale1-16)); /* using 15 bits */
+
+ tmp2=WEBRTC_SPL_SHIFT_W32(tmp2, (scale1));
+ EnChange = (int16_t)WebRtcSpl_DivW32W16(tmp2,
+ (int16_t)tmp1);
+
+ /* Calculate the Sqrt of the energy in Q15 ((14+16)/2) */
+ SqrtEnChange = (int16_t)WebRtcSpl_SqrtFloor(EnChange << 14);
+
+
+ /* Multiply first part of vector with 2*SqrtEnChange */
+ WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, plc_blockl-16,
+ 14);
+
+ /* Calculate increase parameter for window part (16 last samples) */
+ /* (1-2*SqrtEnChange)/16 in Q15 */
+ inc = 2048 - (SqrtEnChange >> 3);
+
+ win=0;
+ tmpW16ptr=&plc_pred[plc_blockl-16];
+
+ for (i=16;i>0;i--) {
+ *tmpW16ptr = (int16_t)(
+ (*tmpW16ptr * (SqrtEnChange + (win >> 1))) >> 14);
+ /* multiply by (2.0*SqrtEnChange+win) */
+
+ win += inc;
+ tmpW16ptr++;
+ }
+ }
+
+ /* Make the linear interpolation between the forward PLC'd data
+ and the backward PLC'd data (from the new frame)
+ */
+
+ if (plc_blockl==40) {
+ inc=400; /* 1/41 in Q14 */
+ } else { /* plc_blockl==80 */
+ inc=202; /* 1/81 in Q14 */
+ }
+ win=0;
+ enh_bufPtr1=&enh_buf[ENH_BUFL-1-iLBCdec_inst->blockl];
+ for (i=0; i<plc_blockl; i++) {
+ win+=inc;
+ *enh_bufPtr1 = (int16_t)((*enh_bufPtr1 * win) >> 14);
+ *enh_bufPtr1 += (int16_t)(
+ ((16384 - win) * plc_pred[plc_blockl - 1 - i]) >> 14);
+ enh_bufPtr1--;
+ }
+ } else {
+ int16_t *synt = &downsampled[LPC_FILTERORDER];
+
+ enh_bufPtr1=&enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl];
+ WEBRTC_SPL_MEMCPY_W16(enh_bufPtr1, plc_pred, plc_blockl);
+
+ /* Clear fileter memory */
+ WebRtcSpl_MemSetW16(iLBCdec_inst->syntMem, 0, LPC_FILTERORDER);
+ WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemy, 0, 4);
+ WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemx, 0, 2);
+
+ /* Initialize filter memory by filtering through 2 lags */
+ WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], iLBCdec_inst->syntMem,
+ LPC_FILTERORDER);
+ WebRtcSpl_FilterARFastQ12(
+ enh_bufPtr1,
+ synt,
+ &iLBCdec_inst->old_syntdenum[
+ (iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
+ LPC_FILTERORDER+1, lag);
+
+ WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], &synt[lag-LPC_FILTERORDER],
+ LPC_FILTERORDER);
+ WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
+ iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
+ lag);
+ WebRtcSpl_FilterARFastQ12(
+ enh_bufPtr1, synt,
+ &iLBCdec_inst->old_syntdenum[
+ (iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
+ LPC_FILTERORDER+1, lag);
+
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &synt[lag-LPC_FILTERORDER],
+ LPC_FILTERORDER);
+ WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
+ iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
+ lag);
+ }
+ }
+
+
+ /* Perform enhancement block by block */
+
+ for (iblock = 0; iblock<new_blocks; iblock++) {
+ WebRtcIlbcfix_Enhancer(out + iblock * ENH_BLOCKL,
+ enh_buf,
+ ENH_BUFL,
+ iblock * ENH_BLOCKL + startPos,
+ enh_period,
+ WebRtcIlbcfix_kEnhPlocs, ENH_NBLOCKS_TOT);
+ }
+
+ return (lag);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
new file mode 100644
index 0000000000..5022a47c3a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhancerInterface.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_INTERFACE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * interface for enhancer
+ *---------------------------------------------------------------*/
+
+size_t // (o) Estimated lag in end of in[]
+WebRtcIlbcfix_EnhancerInterface(int16_t* out, // (o) enhanced signal
+ const int16_t* in, // (i) unenhanced signal
+ IlbcDecoder* iLBCdec_inst); // (i) buffers etc
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
new file mode 100644
index 0000000000..6b4f30c96b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FilteredCbVecs.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Construct an additional codebook vector by filtering the
+ * initial codebook buffer. This vector is then used to expand
+ * the codebook with an additional section.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_FilteredCbVecs(
+ int16_t *cbvectors, /* (o) Codebook vector for the higher section */
+ int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
+ second CB section */
+ size_t lMem, /* (i) Length of codebook memory */
+ size_t samples /* (i) Number of samples to filter */
+ ) {
+
+ /* Set up the memory, start with zero state */
+ WebRtcSpl_MemSetW16(CBmem+lMem, 0, CB_HALFFILTERLEN);
+ WebRtcSpl_MemSetW16(CBmem-CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN);
+ WebRtcSpl_MemSetW16(cbvectors, 0, lMem-samples);
+
+ /* Filter to obtain the filtered CB memory */
+
+ WebRtcSpl_FilterMAFastQ12(
+ CBmem+CB_HALFFILTERLEN+lMem-samples, cbvectors+lMem-samples,
+ (int16_t*)WebRtcIlbcfix_kCbFiltersRev, CB_FILTERLEN, samples);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
new file mode 100644
index 0000000000..661262e42e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FilteredCbVecs.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FILTERED_CB_VECS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FILTERED_CB_VECS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Construct an additional codebook vector by filtering the
+ * initial codebook buffer. This vector is then used to expand
+ * the codebook with an additional section.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_FilteredCbVecs(
+ int16_t* cbvectors, /* (o) Codebook vector for the higher section */
+ int16_t* CBmem, /* (i) Codebook memory that is filtered to create a
+ second CB section */
+ size_t lMem, /* (i) Length of codebook memory */
+ size_t samples /* (i) Number of samples to filter */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
new file mode 100644
index 0000000000..c1084b1645
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FrameClassify.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/frame_classify.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Classification of subframes to localize start state
+ *---------------------------------------------------------------*/
+
+size_t WebRtcIlbcfix_FrameClassify(
+ /* (o) Index to the max-energy sub frame */
+ IlbcEncoder *iLBCenc_inst,
+ /* (i/o) the encoder state structure */
+ int16_t *residualFIX /* (i) lpc residual signal */
+ ){
+ int16_t max, scale;
+ int32_t ssqEn[NSUB_MAX-1];
+ int16_t *ssqPtr;
+ int32_t *seqEnPtr;
+ int32_t maxW32;
+ int16_t scale1;
+ size_t pos;
+ size_t n;
+
+ /*
+ Calculate the energy of each of the 80 sample blocks
+ in the draft the 4 first and last samples are windowed with 1/5...4/5
+ and 4/5...1/5 respectively. To simplify for the fixpoint we have changed
+ this to 0 0 1 1 and 1 1 0 0
+ */
+
+ max = WebRtcSpl_MaxAbsValueW16(residualFIX, iLBCenc_inst->blockl);
+ scale = WebRtcSpl_GetSizeInBits((uint32_t)(max * max));
+
+ /* Scale to maximum 24 bits so that it won't overflow for 76 samples */
+ scale = scale-24;
+ scale1 = WEBRTC_SPL_MAX(0, scale);
+
+ /* Calculate energies */
+ ssqPtr=residualFIX + 2;
+ seqEnPtr=ssqEn;
+ for (n=(iLBCenc_inst->nsub-1); n>0; n--) {
+ (*seqEnPtr) = WebRtcSpl_DotProductWithScale(ssqPtr, ssqPtr, 76, scale1);
+ ssqPtr += 40;
+ seqEnPtr++;
+ }
+
+ /* Scale to maximum 20 bits in order to allow for the 11 bit window */
+ maxW32 = WebRtcSpl_MaxValueW32(ssqEn, iLBCenc_inst->nsub - 1);
+ scale = WebRtcSpl_GetSizeInBits(maxW32) - 20;
+ scale1 = WEBRTC_SPL_MAX(0, scale);
+
+ /* Window each 80 block with the ssqEn_winTbl window to give higher probability for
+ the blocks in the middle
+ */
+ seqEnPtr=ssqEn;
+ if (iLBCenc_inst->mode==20) {
+ ssqPtr=(int16_t*)WebRtcIlbcfix_kStartSequenceEnrgWin+1;
+ } else {
+ ssqPtr=(int16_t*)WebRtcIlbcfix_kStartSequenceEnrgWin;
+ }
+ for (n=(iLBCenc_inst->nsub-1); n>0; n--) {
+ (*seqEnPtr)=WEBRTC_SPL_MUL(((*seqEnPtr)>>scale1), (*ssqPtr));
+ seqEnPtr++;
+ ssqPtr++;
+ }
+
+ /* Extract the best choise of start state */
+ pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
+
+ return(pos);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
new file mode 100644
index 0000000000..7615106d70
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FrameClassify.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+size_t WebRtcIlbcfix_FrameClassify(
+ /* (o) Index to the max-energy sub frame */
+ IlbcEncoder* iLBCenc_inst,
+ /* (i/o) the encoder state structure */
+ int16_t* residualFIX /* (i) lpc residual signal */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.c
new file mode 100644
index 0000000000..1357dece33
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainDequant.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/gain_dequant.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * decoder for quantized gains in the gain-shape coding of
+ * residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainDequant(
+ /* (o) quantized gain value (Q14) */
+ int16_t index, /* (i) quantization index */
+ int16_t maxIn, /* (i) maximum of unquantized gain (Q14) */
+ int16_t stage /* (i) The stage of the search */
+ ){
+ int16_t scale;
+ const int16_t *gain;
+
+ /* obtain correct scale factor */
+
+ scale=WEBRTC_SPL_ABS_W16(maxIn);
+ scale = WEBRTC_SPL_MAX(1638, scale); /* if lower than 0.1, set it to 0.1 */
+
+ /* select the quantization table and return the decoded value */
+ gain = WebRtcIlbcfix_kGain[stage];
+
+ return (int16_t)((scale * gain[index] + 8192) >> 14);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.h
new file mode 100644
index 0000000000..2b97550b6c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainDequant.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_DEQUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_DEQUANT_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * decoder for quantized gains in the gain-shape coding of
+ * residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainDequant(
+ /* (o) quantized gain value (Q14) */
+ int16_t index, /* (i) quantization index */
+ int16_t maxIn, /* (i) maximum of unquantized gain (Q14) */
+ int16_t stage /* (i) The stage of the search */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.c
new file mode 100644
index 0000000000..9a6d49d51a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainQuant.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/gain_quant.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * quantizer for the gain in the gain-shape coding of residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainQuant( /* (o) quantized gain value */
+ int16_t gain, /* (i) gain value Q14 */
+ int16_t maxIn, /* (i) maximum of gain value Q14 */
+ int16_t stage, /* (i) The stage of the search */
+ int16_t *index /* (o) quantization index */
+ ) {
+
+ int16_t scale, cblen;
+ int32_t gainW32, measure1, measure2;
+ const int16_t *cbPtr, *cb;
+ int loc, noMoves, noChecks, i;
+
+ /* ensure a lower bound (0.1) on the scaling factor */
+
+ scale = WEBRTC_SPL_MAX(1638, maxIn);
+
+ /* select the quantization table and calculate
+ the length of the table and the number of
+ steps in the binary search that are needed */
+ cb = WebRtcIlbcfix_kGain[stage];
+ cblen = 32>>stage;
+ noChecks = 4-stage;
+
+ /* Multiply the gain with 2^14 to make the comparison
+ easier and with higher precision */
+ gainW32 = gain << 14;
+
+ /* Do a binary search, starting in the middle of the CB
+ loc - defines the current position in the table
+ noMoves - defines the number of steps to move in the CB in order
+ to get next CB location
+ */
+
+ loc = cblen>>1;
+ noMoves = loc;
+ cbPtr = cb + loc; /* Centre of CB */
+
+ for (i=noChecks;i>0;i--) {
+ noMoves>>=1;
+ measure1 = scale * *cbPtr;
+
+ /* Move up if gain is larger, otherwise move down in table */
+ measure1 = measure1 - gainW32;
+
+ if (0>measure1) {
+ cbPtr+=noMoves;
+ loc+=noMoves;
+ } else {
+ cbPtr-=noMoves;
+ loc-=noMoves;
+ }
+ }
+
+ /* Check which value is the closest one: loc-1, loc or loc+1 */
+
+ measure1 = scale * *cbPtr;
+ if (gainW32>measure1) {
+ /* Check against value above loc */
+ measure2 = scale * cbPtr[1];
+ if ((measure2-gainW32)<(gainW32-measure1)) {
+ loc+=1;
+ }
+ } else {
+ /* Check against value below loc */
+ measure2 = scale * cbPtr[-1];
+ if ((gainW32-measure2)<=(measure1-gainW32)) {
+ loc-=1;
+ }
+ }
+
+ /* Guard against getting outside the table. The calculation above can give a location
+ which is one above the maximum value (in very rare cases) */
+ loc=WEBRTC_SPL_MIN(loc, (cblen-1));
+ *index=loc;
+
+ /* Calculate and return the quantized gain value (in Q14) */
+ return (int16_t)((scale * cb[loc] + 8192) >> 14);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.h
new file mode 100644
index 0000000000..761f7d2f79
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainQuant.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_QUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_QUANT_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * quantizer for the gain in the gain-shape coding of residual
+ *---------------------------------------------------------------*/
+
+int16_t
+WebRtcIlbcfix_GainQuant( /* (o) quantized gain value */
+ int16_t gain, /* (i) gain value Q14 */
+ int16_t maxIn, /* (i) maximum of gain value Q14 */
+ int16_t stage, /* (i) The stage of the search */
+ int16_t* index /* (o) quantization index */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
new file mode 100644
index 0000000000..e9cd2008e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetCbVec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/get_cd_vec.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/create_augmented_vec.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Construct codebook vector for given index.
+ *---------------------------------------------------------------*/
+
+bool WebRtcIlbcfix_GetCbVec(
+ int16_t *cbvec, /* (o) Constructed codebook vector */
+ int16_t *mem, /* (i) Codebook buffer */
+ size_t index, /* (i) Codebook index */
+ size_t lMem, /* (i) Length of codebook buffer */
+ size_t cbveclen /* (i) Codebook vector length */
+ ){
+ size_t k, base_size;
+ size_t lag;
+ /* Stack based */
+ int16_t tempbuff2[SUBL+5];
+
+ /* Determine size of codebook sections */
+
+ base_size=lMem-cbveclen+1;
+
+ if (cbveclen==SUBL) {
+ base_size += cbveclen / 2;
+ }
+
+ /* No filter -> First codebook section */
+
+ if (index<lMem-cbveclen+1) {
+
+ /* first non-interpolated vectors */
+
+ k=index+cbveclen;
+ /* get vector */
+ WEBRTC_SPL_MEMCPY_W16(cbvec, mem+lMem-k, cbveclen);
+
+ } else if (index < base_size) {
+
+ /* Calculate lag */
+
+ k = (2 * (index - (lMem - cbveclen + 1))) + cbveclen;
+
+ lag = k / 2;
+
+ WebRtcIlbcfix_CreateAugmentedVec(lag, mem+lMem, cbvec);
+
+ }
+
+ /* Higher codebbok section based on filtering */
+
+ else {
+
+ size_t memIndTest;
+
+ /* first non-interpolated vectors */
+
+ if (index-base_size<lMem-cbveclen+1) {
+
+ /* Set up filter memory, stuff zeros outside memory buffer */
+
+ memIndTest = lMem-(index-base_size+cbveclen);
+
+ WebRtcSpl_MemSetW16(mem-CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN);
+ WebRtcSpl_MemSetW16(mem+lMem, 0, CB_HALFFILTERLEN);
+
+ /* do filtering to get the codebook vector */
+
+ WebRtcSpl_FilterMAFastQ12(
+ &mem[memIndTest+4], cbvec, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
+ CB_FILTERLEN, cbveclen);
+ }
+
+ /* interpolated vectors */
+
+ else {
+ if (cbveclen < SUBL) {
+ // We're going to fill in cbveclen + 5 elements of tempbuff2 in
+ // WebRtcSpl_FilterMAFastQ12, less than the SUBL + 5 elements we'll be
+ // using in WebRtcIlbcfix_CreateAugmentedVec. This error is caused by
+ // bad values in `index` (which come from the encoded stream). Tell the
+ // caller that things went south, and that the decoder state is now
+ // corrupt (because it's half-way through an update that we can't
+ // complete).
+ return false;
+ }
+
+ /* Stuff zeros outside memory buffer */
+ memIndTest = lMem-cbveclen-CB_FILTERLEN;
+ WebRtcSpl_MemSetW16(mem+lMem, 0, CB_HALFFILTERLEN);
+
+ /* do filtering */
+ WebRtcSpl_FilterMAFastQ12(
+ &mem[memIndTest+7], tempbuff2, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
+ CB_FILTERLEN, cbveclen+5);
+
+ /* Calculate lag index */
+ lag = (cbveclen<<1)-20+index-base_size-lMem-1;
+
+ WebRtcIlbcfix_CreateAugmentedVec(lag, tempbuff2+SUBL+5, cbvec);
+ }
+ }
+
+ return true; // Success.
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
new file mode 100644
index 0000000000..99537dd0f7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetCbVec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_CD_VEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_CD_VEC_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+// Returns true on success, false on failure. In case of failure, the decoder
+// state may be corrupted and needs resetting.
+ABSL_MUST_USE_RESULT
+bool WebRtcIlbcfix_GetCbVec(
+ int16_t* cbvec, /* (o) Constructed codebook vector */
+ int16_t* mem, /* (i) Codebook buffer */
+ size_t index, /* (i) Codebook index */
+ size_t lMem, /* (i) Length of codebook buffer */
+ size_t cbveclen /* (i) Codebook vector length */
+);
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.c
new file mode 100644
index 0000000000..e0fb21caf0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetLspPoly.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/get_lsp_poly.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Construct the polynomials F1(z) and F2(z) from the LSP
+ * (Computations are done in Q24)
+ *
+ * The expansion is performed using the following recursion:
+ *
+ * f[0] = 1;
+ * tmp = -2.0 * lsp[0];
+ * f[1] = tmp;
+ * for (i=2; i<=5; i++) {
+ * b = -2.0 * lsp[2*i-2];
+ * f[i] = tmp*f[i-1] + 2.0*f[i-2];
+ * for (j=i; j>=2; j--) {
+ * f[j] = f[j] + tmp*f[j-1] + f[j-2];
+ * }
+ * f[i] = f[i] + tmp;
+ * }
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetLspPoly(
+ int16_t *lsp, /* (i) LSP in Q15 */
+ int32_t *f) /* (o) polonymial in Q24 */
+{
+ int32_t tmpW32;
+ int i, j;
+ int16_t high, low;
+ int16_t *lspPtr;
+ int32_t *fPtr;
+
+ lspPtr = lsp;
+ fPtr = f;
+ /* f[0] = 1.0 (Q24) */
+ (*fPtr) = (int32_t)16777216;
+ fPtr++;
+
+ (*fPtr) = WEBRTC_SPL_MUL((*lspPtr), -1024);
+ fPtr++;
+ lspPtr+=2;
+
+ for(i=2; i<=5; i++)
+ {
+ (*fPtr) = fPtr[-2];
+
+ for(j=i; j>1; j--)
+ {
+ /* Compute f[j] = f[j] + tmp*f[j-1] + f[j-2]; */
+ high = (int16_t)(fPtr[-1] >> 16);
+ low = (int16_t)((fPtr[-1] & 0xffff) >> 1);
+
+ tmpW32 = 4 * high * *lspPtr + 4 * ((low * *lspPtr) >> 15);
+
+ (*fPtr) += fPtr[-2];
+ (*fPtr) -= tmpW32;
+ fPtr--;
+ }
+ *fPtr -= *lspPtr * (1 << 10);
+
+ fPtr+=i;
+ lspPtr+=2;
+ }
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.h
new file mode 100644
index 0000000000..70c9c4d4b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetLspPoly.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_LSP_POLY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_LSP_POLY_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Construct the polynomials F1(z) and F2(z) from the LSP
+ * (Computations are done in Q24)
+ *
+ * The expansion is performed using the following recursion:
+ *
+ * f[0] = 1;
+ * tmp = -2.0 * lsp[0];
+ * f[1] = tmp;
+ * for (i=2; i<=5; i++) {
+ * b = -2.0 * lsp[2*i-2];
+ * f[i] = tmp*f[i-1] + 2.0*f[i-2];
+ * for (j=i; j>=2; j--) {
+ * f[j] = f[j] + tmp*f[j-1] + f[j-2];
+ * }
+ * f[i] = f[i] + tmp;
+ * }
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetLspPoly(int16_t* lsp, /* (i) LSP in Q15 */
+ int32_t* f); /* (o) polonymial in Q24 */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
new file mode 100644
index 0000000000..68a569a40a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetSyncSeq.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/get_sync_seq.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/nearest_neighbor.h"
+#include "modules/audio_coding/codecs/ilbc/refiner.h"
+
+/*----------------------------------------------------------------*
+ * get the pitch-synchronous sample sequence
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetSyncSeq(
+ int16_t *idata, /* (i) original data */
+ size_t idatal, /* (i) dimension of data */
+ size_t centerStartPos, /* (i) where current block starts */
+ size_t *period, /* (i) rough-pitch-period array (Q-2) */
+ const size_t *plocs, /* (i) where periods of period array are taken (Q-2) */
+ size_t periodl, /* (i) dimension period array */
+ size_t hl, /* (i) 2*hl+1 is the number of sequences */
+ int16_t *surround /* (i/o) The contribution from this sequence
+ summed with earlier contributions */
+ ){
+ size_t i, centerEndPos, q;
+ /* Stack based */
+ size_t lagBlock[2 * ENH_HL + 1];
+ size_t blockStartPos[2 * ENH_HL + 1]; /* The position to search around (Q2) */
+ size_t plocs2[ENH_PLOCSL];
+
+ centerEndPos = centerStartPos + ENH_BLOCKL - 1;
+
+ /* present (find predicted lag from this position) */
+
+ WebRtcIlbcfix_NearestNeighbor(lagBlock + hl,
+ plocs,
+ 2 * (centerStartPos + centerEndPos),
+ periodl);
+
+ blockStartPos[hl] = 4 * centerStartPos;
+
+ /* past (find predicted position and perform a refined
+ search to find the best sequence) */
+
+ for (q = hl; q > 0; q--) {
+ size_t qq = q - 1;
+ size_t period_q = period[lagBlock[q]];
+ /* Stop if this sequence would be outside the buffer; that means all
+ further-past sequences would also be outside the buffer. */
+ if (blockStartPos[q] < period_q + (4 * ENH_OVERHANG))
+ break;
+ blockStartPos[qq] = blockStartPos[q] - period_q;
+
+ size_t value = blockStartPos[qq] + 4 * ENH_BLOCKL_HALF;
+ value = (value > period_q) ? (value - period_q) : 0;
+ WebRtcIlbcfix_NearestNeighbor(lagBlock + qq, plocs, value, periodl);
+
+ /* Find the best possible sequence in the 4 times upsampled
+ domain around blockStartPos+q */
+ WebRtcIlbcfix_Refiner(blockStartPos + qq, idata, idatal, centerStartPos,
+ blockStartPos[qq], surround,
+ WebRtcIlbcfix_kEnhWt[qq]);
+ }
+
+ /* future (find predicted position and perform a refined
+ search to find the best sequence) */
+
+ for (i = 0; i < periodl; i++) {
+ plocs2[i] = plocs[i] - period[i];
+ }
+
+ for (q = hl + 1; q <= (2 * hl); q++) {
+
+ WebRtcIlbcfix_NearestNeighbor(
+ lagBlock + q,
+ plocs2,
+ blockStartPos[q - 1] + 4 * ENH_BLOCKL_HALF,
+ periodl);
+
+ blockStartPos[q]=blockStartPos[q-1]+period[lagBlock[q]];
+
+ if (blockStartPos[q] + 4 * (ENH_BLOCKL + ENH_OVERHANG) < 4 * idatal) {
+
+ /* Find the best possible sequence in the 4 times upsampled
+ domain around blockStartPos+q */
+ WebRtcIlbcfix_Refiner(blockStartPos + q, idata, idatal, centerStartPos,
+ blockStartPos[q], surround,
+ WebRtcIlbcfix_kEnhWt[2 * hl - q]);
+
+ } else {
+ /* Don't add anything since this sequence would
+ be outside the buffer */
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
new file mode 100644
index 0000000000..90962fa063
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetSyncSeq.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_SYNC_SEQ_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_SYNC_SEQ_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * get the pitch-synchronous sample sequence
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetSyncSeq(
+ int16_t* idata, /* (i) original data */
+ size_t idatal, /* (i) dimension of data */
+ size_t centerStartPos, /* (i) where current block starts */
+ size_t* period, /* (i) rough-pitch-period array (Q-2) */
+ const size_t* plocs, /* (i) where periods of period array are taken (Q-2) */
+ size_t periodl, /* (i) dimension period array */
+ size_t hl, /* (i) 2*hl+1 is the number of sequences */
+ int16_t* surround /* (i/o) The contribution from this sequence
+ summed with earlier contributions */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.c
new file mode 100644
index 0000000000..be582f2e23
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpInput.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/hp_input.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * high-pass filter of input with *0.5 and saturation
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_HpInput(
+ int16_t *signal, /* (i/o) signal vector */
+ int16_t *ba, /* (i) B- and A-coefficients (2:nd order)
+ {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+ is assumed to be 1.0 */
+ int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
+ yhi[n-2] ylow[n-2] */
+ int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
+ size_t len) /* (i) Number of samples to filter */
+{
+ size_t i;
+ int32_t tmpW32;
+ int32_t tmpW32b;
+
+ for (i=0; i<len; i++) {
+
+ /*
+ y[i] = b[0]*x[i] + b[1]*x[i-1] + b[2]*x[i-2]
+ + (-a[1])*y[i-1] + (-a[2])*y[i-2];
+ */
+
+ tmpW32 = y[1] * ba[3]; /* (-a[1])*y[i-1] (low part) */
+ tmpW32 += y[3] * ba[4]; /* (-a[2])*y[i-2] (low part) */
+ tmpW32 = (tmpW32>>15);
+ tmpW32 += y[0] * ba[3]; /* (-a[1])*y[i-1] (high part) */
+ tmpW32 += y[2] * ba[4]; /* (-a[2])*y[i-2] (high part) */
+ tmpW32 = (tmpW32<<1);
+
+ tmpW32 += signal[i] * ba[0]; /* b[0]*x[0] */
+ tmpW32 += x[0] * ba[1]; /* b[1]*x[i-1] */
+ tmpW32 += x[1] * ba[2]; /* b[2]*x[i-2] */
+
+ /* Update state (input part) */
+ x[1] = x[0];
+ x[0] = signal[i];
+
+ /* Rounding in Q(12+1), i.e. add 2^12 */
+ tmpW32b = tmpW32 + 4096;
+
+ /* Saturate (to 2^28) so that the HP filtered signal does not overflow */
+ tmpW32b = WEBRTC_SPL_SAT((int32_t)268435455, tmpW32b, (int32_t)-268435456);
+
+ /* Convert back to Q0 and multiply with 0.5 */
+ signal[i] = (int16_t)(tmpW32b >> 13);
+
+ /* Update state (filtered part) */
+ y[2] = y[0];
+ y[3] = y[1];
+
+ /* upshift tmpW32 by 3 with saturation */
+ if (tmpW32>268435455) {
+ tmpW32 = WEBRTC_SPL_WORD32_MAX;
+ } else if (tmpW32<-268435456) {
+ tmpW32 = WEBRTC_SPL_WORD32_MIN;
+ } else {
+ tmpW32 <<= 3;
+ }
+
+ y[0] = (int16_t)(tmpW32 >> 16);
+ y[1] = (int16_t)((tmpW32 - (y[0] << 16)) >> 1);
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.h
new file mode 100644
index 0000000000..9143d8efed
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpInput.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_INPUT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_INPUT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// clang-format off
+// Bad job here. https://bugs.llvm.org/show_bug.cgi?id=34274
+void WebRtcIlbcfix_HpInput(
+ int16_t* signal, /* (i/o) signal vector */
+ int16_t* ba, /* (i) B- and A-coefficients (2:nd order)
+ {b[0] b[1] b[2] -a[1] -a[2]}
+ a[0] is assumed to be 1.0 */
+ int16_t* y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
+ yhi[n-2] ylow[n-2] */
+ int16_t* x, /* (i/o) Filter state x[n-1] x[n-2] */
+ size_t len); /* (i) Number of samples to filter */
+// clang-format on
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.c
new file mode 100644
index 0000000000..cc5f6dcd37
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpOutput.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/hp_output.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * high-pass filter of output and *2 with saturation
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_HpOutput(
+ int16_t *signal, /* (i/o) signal vector */
+ int16_t *ba, /* (i) B- and A-coefficients (2:nd order)
+ {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+ is assumed to be 1.0 */
+ int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
+ yhi[n-2] ylow[n-2] */
+ int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
+ size_t len) /* (i) Number of samples to filter */
+{
+ size_t i;
+ int32_t tmpW32;
+ int32_t tmpW32b;
+
+ for (i=0; i<len; i++) {
+
+ /*
+ y[i] = b[0]*x[i] + b[1]*x[i-1] + b[2]*x[i-2]
+ + (-a[1])*y[i-1] + (-a[2])*y[i-2];
+ */
+
+ tmpW32 = y[1] * ba[3]; /* (-a[1])*y[i-1] (low part) */
+ tmpW32 += y[3] * ba[4]; /* (-a[2])*y[i-2] (low part) */
+ tmpW32 = (tmpW32>>15);
+ tmpW32 += y[0] * ba[3]; /* (-a[1])*y[i-1] (high part) */
+ tmpW32 += y[2] * ba[4]; /* (-a[2])*y[i-2] (high part) */
+ tmpW32 *= 2;
+
+ tmpW32 += signal[i] * ba[0]; /* b[0]*x[0] */
+ tmpW32 += x[0] * ba[1]; /* b[1]*x[i-1] */
+ tmpW32 += x[1] * ba[2]; /* b[2]*x[i-2] */
+
+ /* Update state (input part) */
+ x[1] = x[0];
+ x[0] = signal[i];
+
+ /* Rounding in Q(12-1), i.e. add 2^10 */
+ tmpW32b = tmpW32 + 1024;
+
+ /* Saturate (to 2^26) so that the HP filtered signal does not overflow */
+ tmpW32b = WEBRTC_SPL_SAT((int32_t)67108863, tmpW32b, (int32_t)-67108864);
+
+ /* Convert back to Q0 and multiply with 2 */
+ signal[i] = (int16_t)(tmpW32b >> 11);
+
+ /* Update state (filtered part) */
+ y[2] = y[0];
+ y[3] = y[1];
+
+ /* upshift tmpW32 by 3 with saturation */
+ if (tmpW32>268435455) {
+ tmpW32 = WEBRTC_SPL_WORD32_MAX;
+ } else if (tmpW32<-268435456) {
+ tmpW32 = WEBRTC_SPL_WORD32_MIN;
+ } else {
+ tmpW32 *= 8;
+ }
+
+ y[0] = (int16_t)(tmpW32 >> 16);
+ y[1] = (int16_t)((tmpW32 & 0xffff) >> 1);
+
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.h
new file mode 100644
index 0000000000..6d1bd3cd88
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpOutput.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_OUTPUT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_OUTPUT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// clang-format off
+// Bad job here. https://bugs.llvm.org/show_bug.cgi?id=34274
+void WebRtcIlbcfix_HpOutput(
+ int16_t* signal, /* (i/o) signal vector */
+ int16_t* ba, /* (i) B- and A-coefficients (2:nd order)
+ {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+ is assumed to be 1.0 */
+ int16_t* y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
+ yhi[n-2] ylow[n-2] */
+ int16_t* x, /* (i/o) Filter state x[n-1] x[n-2] */
+ size_t len); /* (i) Number of samples to filter */
+// clang-format on
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.c
new file mode 100644
index 0000000000..ba6c3e46c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ iLBCInterface.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/ilbc/decode.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/encode.h"
+#include "modules/audio_coding/codecs/ilbc/init_decode.h"
+#include "modules/audio_coding/codecs/ilbc/init_encode.h"
+#include "rtc_base/checks.h"
+
+int16_t WebRtcIlbcfix_EncoderAssign(IlbcEncoderInstance** iLBC_encinst,
+ int16_t* ILBCENC_inst_Addr,
+ int16_t* size) {
+ *iLBC_encinst=(IlbcEncoderInstance*)ILBCENC_inst_Addr;
+ *size=sizeof(IlbcEncoder)/sizeof(int16_t);
+ if (*iLBC_encinst!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+int16_t WebRtcIlbcfix_DecoderAssign(IlbcDecoderInstance** iLBC_decinst,
+ int16_t* ILBCDEC_inst_Addr,
+ int16_t* size) {
+ *iLBC_decinst=(IlbcDecoderInstance*)ILBCDEC_inst_Addr;
+ *size=sizeof(IlbcDecoder)/sizeof(int16_t);
+ if (*iLBC_decinst!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+int16_t WebRtcIlbcfix_EncoderCreate(IlbcEncoderInstance **iLBC_encinst) {
+ *iLBC_encinst=(IlbcEncoderInstance*)malloc(sizeof(IlbcEncoder));
+ if (*iLBC_encinst!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+int16_t WebRtcIlbcfix_DecoderCreate(IlbcDecoderInstance **iLBC_decinst) {
+ *iLBC_decinst=(IlbcDecoderInstance*)malloc(sizeof(IlbcDecoder));
+ if (*iLBC_decinst!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+int16_t WebRtcIlbcfix_EncoderFree(IlbcEncoderInstance *iLBC_encinst) {
+ free(iLBC_encinst);
+ return(0);
+}
+
+int16_t WebRtcIlbcfix_DecoderFree(IlbcDecoderInstance *iLBC_decinst) {
+ free(iLBC_decinst);
+ return(0);
+}
+
+int16_t WebRtcIlbcfix_EncoderInit(IlbcEncoderInstance* iLBCenc_inst,
+ int16_t mode) {
+ if ((mode==20)||(mode==30)) {
+ WebRtcIlbcfix_InitEncode((IlbcEncoder*) iLBCenc_inst, mode);
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t pos = 0;
+ size_t encpos = 0;
+
+ if ((len != ((IlbcEncoder*)iLBCenc_inst)->blockl) &&
+#ifdef SPLIT_10MS
+ (len != 80) &&
+#endif
+ (len != 2*((IlbcEncoder*)iLBCenc_inst)->blockl) &&
+ (len != 3*((IlbcEncoder*)iLBCenc_inst)->blockl))
+ {
+ /* A maximum of 3 frames/packet is allowed */
+ return(-1);
+ } else {
+
+ /* call encoder */
+ while (pos<len) {
+ WebRtcIlbcfix_EncodeImpl((uint16_t*)&encoded[2 * encpos], &speechIn[pos],
+ (IlbcEncoder*)iLBCenc_inst);
+#ifdef SPLIT_10MS
+ pos += 80;
+ if(((IlbcEncoder*)iLBCenc_inst)->section == 0)
+#else
+ pos += ((IlbcEncoder*)iLBCenc_inst)->blockl;
+#endif
+ encpos += ((IlbcEncoder*)iLBCenc_inst)->no_of_words;
+ }
+ return (int)(encpos*2);
+ }
+}
+
+int16_t WebRtcIlbcfix_DecoderInit(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t mode) {
+ if ((mode==20)||(mode==30)) {
+ WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, mode, 1);
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+void WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance* iLBCdec_inst) {
+ WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, 20, 1);
+}
+void WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance* iLBCdec_inst) {
+ WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, 30, 1);
+}
+
+
+int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType)
+{
+ size_t i=0;
+ /* Allow for automatic switching between the frame sizes
+ (although you do get some discontinuity) */
+ if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+ (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+ (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
+ /* ok, do nothing */
+ } else {
+ /* Test if the mode has changed */
+ if (((IlbcDecoder*)iLBCdec_inst)->mode==20) {
+ if ((len==NO_OF_BYTES_30MS)||
+ (len==2*NO_OF_BYTES_30MS)||
+ (len==3*NO_OF_BYTES_30MS)) {
+ WebRtcIlbcfix_InitDecode(
+ ((IlbcDecoder*)iLBCdec_inst), 30,
+ ((IlbcDecoder*)iLBCdec_inst)->use_enhancer);
+ } else {
+ /* Unsupported frame length */
+ return(-1);
+ }
+ } else {
+ if ((len==NO_OF_BYTES_20MS)||
+ (len==2*NO_OF_BYTES_20MS)||
+ (len==3*NO_OF_BYTES_20MS)) {
+ WebRtcIlbcfix_InitDecode(
+ ((IlbcDecoder*)iLBCdec_inst), 20,
+ ((IlbcDecoder*)iLBCdec_inst)->use_enhancer);
+ } else {
+ /* Unsupported frame length */
+ return(-1);
+ }
+ }
+ }
+
+ while ((i*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)<len) {
+ if (WebRtcIlbcfix_DecodeImpl(
+ &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl],
+ (const uint16_t*)&encoded
+ [2 * i * ((IlbcDecoder*)iLBCdec_inst)->no_of_words],
+ (IlbcDecoder*)iLBCdec_inst, 1) == -1)
+ return -1;
+ i++;
+ }
+ /* iLBC does not support VAD/CNG yet */
+ *speechType=1;
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType)
+{
+ size_t i=0;
+ if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+ (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+ (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
+ /* ok, do nothing */
+ } else {
+ return(-1);
+ }
+
+ while ((i*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)<len) {
+ if (!WebRtcIlbcfix_DecodeImpl(
+ &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl],
+ (const uint16_t*)&encoded
+ [2 * i * ((IlbcDecoder*)iLBCdec_inst)->no_of_words],
+ (IlbcDecoder*)iLBCdec_inst, 1))
+ return -1;
+ i++;
+ }
+ /* iLBC does not support VAD/CNG yet */
+ *speechType=1;
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType)
+{
+ size_t i=0;
+ if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+ (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+ (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
+ /* ok, do nothing */
+ } else {
+ return(-1);
+ }
+
+ while ((i*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)<len) {
+ if (!WebRtcIlbcfix_DecodeImpl(
+ &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl],
+ (const uint16_t*)&encoded
+ [2 * i * ((IlbcDecoder*)iLBCdec_inst)->no_of_words],
+ (IlbcDecoder*)iLBCdec_inst, 1))
+ return -1;
+ i++;
+ }
+ /* iLBC does not support VAD/CNG yet */
+ *speechType=1;
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ size_t i;
+ uint16_t dummy;
+
+ for (i=0;i<noOfLostFrames;i++) {
+ // PLC decoding shouldn't fail, because there is no external input data
+ // that can be bad.
+ int result = WebRtcIlbcfix_DecodeImpl(
+ &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl], &dummy,
+ (IlbcDecoder*)iLBCdec_inst, 0);
+ RTC_CHECK_EQ(result, 0);
+ }
+ return (noOfLostFrames*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ /* Two input parameters not used, but needed for function pointers in NetEQ */
+ (void)(decoded = NULL);
+ (void)(noOfLostFrames = 0);
+
+ WebRtcSpl_MemSetW16(((IlbcDecoder*)iLBCdec_inst)->enh_buf, 0, ENH_BUFL);
+ ((IlbcDecoder*)iLBCdec_inst)->prev_enh_pl = 2;
+
+ return (0);
+}
+
+void WebRtcIlbcfix_version(char *version)
+{
+ strcpy((char*)version, "1.1.1");
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.h
new file mode 100644
index 0000000000..de8cfde111
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * ilbc.h
+ *
+ * This header file contains all of the API's for iLBC.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * Solution to support multiple instances
+ * Customer has to cast instance to proper type
+ */
+
+typedef struct iLBC_encinst_t_ IlbcEncoderInstance;
+
+typedef struct iLBC_decinst_t_ IlbcDecoderInstance;
+
+/*
+ * Comfort noise constants
+ */
+
+#define ILBC_SPEECH 1
+#define ILBC_CNG 2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcIlbcfix_XxxAssign(...)
+ *
+ * These functions assigns the encoder/decoder instance to the specified
+ * memory location
+ *
+ * Input:
+ * - XXX_xxxinst : Pointer to created instance that should be
+ * assigned
+ * - ILBCXXX_inst_Addr : Pointer to the desired memory space
+ * - size : The size that this structure occupies (in Word16)
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIlbcfix_EncoderAssign(IlbcEncoderInstance** iLBC_encinst,
+ int16_t* ILBCENC_inst_Addr,
+ int16_t* size);
+int16_t WebRtcIlbcfix_DecoderAssign(IlbcDecoderInstance** iLBC_decinst,
+ int16_t* ILBCDEC_inst_Addr,
+ int16_t* size);
+
+/****************************************************************************
+ * WebRtcIlbcfix_XxxAssign(...)
+ *
+ * These functions create a instance to the specified structure
+ *
+ * Input:
+ * - XXX_inst : Pointer to created instance that should be created
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIlbcfix_EncoderCreate(IlbcEncoderInstance** iLBC_encinst);
+int16_t WebRtcIlbcfix_DecoderCreate(IlbcDecoderInstance** iLBC_decinst);
+
+/****************************************************************************
+ * WebRtcIlbcfix_XxxFree(...)
+ *
+ * These functions frees the dynamic memory of a specified instance
+ *
+ * Input:
+ * - XXX_inst : Pointer to created instance that should be freed
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIlbcfix_EncoderFree(IlbcEncoderInstance* iLBC_encinst);
+int16_t WebRtcIlbcfix_DecoderFree(IlbcDecoderInstance* iLBC_decinst);
+
+/****************************************************************************
+ * WebRtcIlbcfix_EncoderInit(...)
+ *
+ * This function initializes a iLBC instance
+ *
+ * Input:
+ * - iLBCenc_inst : iLBC instance, i.e. the user that should receive
+ * be initialized
+ * - frameLen : The frame length of the codec 20/30 (ms)
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIlbcfix_EncoderInit(IlbcEncoderInstance* iLBCenc_inst,
+ int16_t frameLen);
+
+/****************************************************************************
+ * WebRtcIlbcfix_Encode(...)
+ *
+ * This function encodes one iLBC frame. Input speech length has be a
+ * multiple of the frame length.
+ *
+ * Input:
+ * - iLBCenc_inst : iLBC instance, i.e. the user that should encode
+ * a package
+ * - speechIn : Input speech vector
+ * - len : Samples in speechIn (160, 240, 320 or 480)
+ *
+ * Output:
+ * - encoded : The encoded data vector
+ *
+ * Return value : >0 - Length (in bytes) of coded data
+ * -1 - Error
+ */
+
+int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcIlbcfix_DecoderInit(...)
+ *
+ * This function initializes a iLBC instance with either 20 or 30 ms frames
+ * Alternatively the WebRtcIlbcfix_DecoderInit_XXms can be used. Then it's
+ * not needed to specify the frame length with a variable.
+ *
+ * Input:
+ * - IlbcDecoderInstance : iLBC decoder instance
+ * - frameLen : The frame length of the codec 20/30 (ms)
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIlbcfix_DecoderInit(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t frameLen);
+void WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance* iLBCdec_inst);
+void WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance* iLBCdec_inst);
+
+/****************************************************************************
+ * WebRtcIlbcfix_Decode(...)
+ *
+ * This function decodes a packet with iLBC frame(s). Output speech length
+ * will be a multiple of 160 or 240 samples ((160 or 240)*frames/packet).
+ *
+ * Input:
+ * - iLBCdec_inst : iLBC instance, i.e. the user that should decode
+ * a packet
+ * - encoded : Encoded iLBC frame(s)
+ * - len : Bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ * - speechType : 1 normal, 2 CNG
+ *
+ * Return value : >0 - Samples in decoded vector
+ * -1 - Error
+ */
+
+int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/****************************************************************************
+ * WebRtcIlbcfix_DecodePlc(...)
+ *
+ * This function conducts PLC for iLBC frame(s). Output speech length
+ * will be a multiple of 160 or 240 samples.
+ *
+ * Input:
+ * - iLBCdec_inst : iLBC instance, i.e. the user that should perform
+ * a PLC
+ * - noOfLostFrames : Number of PLC frames to produce
+ *
+ * Output:
+ * - decoded : The "decoded" vector
+ *
+ * Return value : Samples in decoded PLC vector
+ */
+
+size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames);
+
+/****************************************************************************
+ * WebRtcIlbcfix_NetEqPlc(...)
+ *
+ * This function updates the decoder when a packet loss has occured, but it
+ * does not produce any PLC data. Function can be used if another PLC method
+ * is used (i.e NetEq).
+ *
+ * Input:
+ * - iLBCdec_inst : iLBC instance that should be updated
+ * - noOfLostFrames : Number of lost frames
+ *
+ * Output:
+ * - decoded : The "decoded" vector (nothing in this case)
+ *
+ * Return value : Samples in decoded PLC vector
+ */
+
+size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames);
+
+/****************************************************************************
+ * WebRtcIlbcfix_version(...)
+ *
+ * This function returns the version number of iLBC
+ *
+ * Output:
+ * - version : Version number of iLBC (maximum 20 char)
+ */
+
+void WebRtcIlbcfix_version(char* version);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc
new file mode 100644
index 0000000000..689292f131
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(IlbcTest, BadPacket) {
+ // Get a good packet.
+ AudioEncoderIlbcConfig config;
+ config.frame_size_ms = 20; // We need 20 ms rather than the default 30 ms;
+ // otherwise, all possible values of cb_index[2]
+ // are valid.
+ AudioEncoderIlbcImpl encoder(config, 102);
+ std::vector<int16_t> samples(encoder.SampleRateHz() / 100, 4711);
+ rtc::Buffer packet;
+ int num_10ms_chunks = 0;
+ while (packet.size() == 0) {
+ encoder.Encode(0, samples, &packet);
+ num_10ms_chunks += 1;
+ }
+
+ // Break the packet by setting all bits of the unsigned 7-bit number
+ // cb_index[2] to 1, giving it a value of 127. For a 20 ms packet, this is
+ // too large.
+ EXPECT_EQ(38u, packet.size());
+ rtc::Buffer bad_packet(packet.data(), packet.size());
+ bad_packet[29] |= 0x3f; // Bits 1-6.
+ bad_packet[30] |= 0x80; // Bit 0.
+
+ // Decode the bad packet. We expect the decoder to respond by returning -1.
+ AudioDecoderIlbcImpl decoder;
+ std::vector<int16_t> decoded_samples(num_10ms_chunks * samples.size());
+ AudioDecoder::SpeechType speech_type;
+ EXPECT_EQ(-1, decoder.Decode(bad_packet.data(), bad_packet.size(),
+ encoder.SampleRateHz(),
+ sizeof(int16_t) * decoded_samples.size(),
+ decoded_samples.data(), &speech_type));
+
+ // Decode the good packet. This should work, because the failed decoding
+ // should not have left the decoder in a broken state.
+ EXPECT_EQ(static_cast<int>(decoded_samples.size()),
+ decoder.Decode(packet.data(), packet.size(), encoder.SampleRateHz(),
+ sizeof(int16_t) * decoded_samples.size(),
+ decoded_samples.data(), &speech_type));
+}
+
+class SplitIlbcTest : public ::testing::TestWithParam<std::pair<int, int> > {
+ protected:
+ virtual void SetUp() {
+ const std::pair<int, int> parameters = GetParam();
+ num_frames_ = parameters.first;
+ frame_length_ms_ = parameters.second;
+ frame_length_bytes_ = (frame_length_ms_ == 20) ? 38 : 50;
+ }
+ size_t num_frames_;
+ int frame_length_ms_;
+ size_t frame_length_bytes_;
+};
+
+TEST_P(SplitIlbcTest, NumFrames) {
+ AudioDecoderIlbcImpl decoder;
+ const size_t frame_length_samples = frame_length_ms_ * 8;
+ const auto generate_payload = [](size_t payload_length_bytes) {
+ rtc::Buffer payload(payload_length_bytes);
+ // Fill payload with increasing integers {0, 1, 2, ...}.
+ for (size_t i = 0; i < payload.size(); ++i) {
+ payload[i] = static_cast<uint8_t>(i);
+ }
+ return payload;
+ };
+
+ const auto results = decoder.ParsePayload(
+ generate_payload(frame_length_bytes_ * num_frames_), 0);
+ EXPECT_EQ(num_frames_, results.size());
+
+ size_t frame_num = 0;
+ uint8_t payload_value = 0;
+ for (const auto& result : results) {
+ EXPECT_EQ(frame_length_samples * frame_num, result.timestamp);
+ const LegacyEncodedAudioFrame* frame =
+ static_cast<const LegacyEncodedAudioFrame*>(result.frame.get());
+ const rtc::Buffer& payload = frame->payload();
+ EXPECT_EQ(frame_length_bytes_, payload.size());
+ for (size_t i = 0; i < payload.size(); ++i, ++payload_value) {
+ EXPECT_EQ(payload_value, payload[i]);
+ }
+ ++frame_num;
+ }
+}
+
+// Test 1 through 5 frames of 20 and 30 ms size.
+// Also test the maximum number of frames in one packet for 20 and 30 ms.
+// The maximum is defined by the largest payload length that can be uniquely
+// resolved to a frame size of either 38 bytes (20 ms) or 50 bytes (30 ms).
+INSTANTIATE_TEST_SUITE_P(
+ IlbcTest,
+ SplitIlbcTest,
+ ::testing::Values(std::pair<int, int>(1, 20), // 1 frame, 20 ms.
+ std::pair<int, int>(2, 20), // 2 frames, 20 ms.
+ std::pair<int, int>(3, 20), // And so on.
+ std::pair<int, int>(4, 20),
+ std::pair<int, int>(5, 20),
+ std::pair<int, int>(24, 20),
+ std::pair<int, int>(1, 30),
+ std::pair<int, int>(2, 30),
+ std::pair<int, int>(3, 30),
+ std::pair<int, int>(4, 30),
+ std::pair<int, int>(5, 30),
+ std::pair<int, int>(18, 30)));
+
+// Test too large payload size.
+TEST(IlbcTest, SplitTooLargePayload) {
+ AudioDecoderIlbcImpl decoder;
+ constexpr size_t kPayloadLengthBytes = 950;
+ const auto results =
+ decoder.ParsePayload(rtc::Buffer(kPayloadLengthBytes), 0);
+ EXPECT_TRUE(results.empty());
+}
+
+// Payload not an integer number of frames.
+TEST(IlbcTest, SplitUnevenPayload) {
+ AudioDecoderIlbcImpl decoder;
+ constexpr size_t kPayloadLengthBytes = 39; // Not an even number of frames.
+ const auto results =
+ decoder.ParsePayload(rtc::Buffer(kPayloadLengthBytes), 0);
+ EXPECT_TRUE(results.empty());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.c
new file mode 100644
index 0000000000..d78f81a897
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvDec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_IndexConvDec(
+ int16_t *index /* (i/o) Codebook indexes */
+ ){
+ int k;
+
+ for (k=4;k<6;k++) {
+ /* Readjust the second and third codebook index for the first 40 sample
+ so that they look the same as the first (in terms of lag)
+ */
+ if ((index[k]>=44)&&(index[k]<108)) {
+ index[k]+=64;
+ } else if ((index[k]>=108)&&(index[k]<128)) {
+ index[k]+=128;
+ } else {
+ /* ERROR */
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.h
new file mode 100644
index 0000000000..4f08ce04df
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvDec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_DEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_DEC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_IndexConvDec(int16_t* index /* (i/o) Codebook indexes */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.c
new file mode 100644
index 0000000000..83144150b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ IiLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvEnc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/index_conv_enc.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Convert the codebook indexes to make the search easier
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_IndexConvEnc(
+ int16_t *index /* (i/o) Codebook indexes */
+ ){
+ int k;
+
+ for (k=4;k<6;k++) {
+ /* Readjust the second and third codebook index so that it is
+ packetized into 7 bits (before it was put in lag-wise the same
+ way as for the first codebook which uses 8 bits)
+ */
+ if ((index[k]>=108)&&(index[k]<172)) {
+ index[k]-=64;
+ } else if (index[k]>=236) {
+ index[k]-=128;
+ } else {
+ /* ERROR */
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.h
new file mode 100644
index 0000000000..4fbf98084e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvEnc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_ENC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_ENC_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Convert the codebook indexes to make the search easier
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_IndexConvEnc(int16_t* index /* (i/o) Codebook indexes */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.c
new file mode 100644
index 0000000000..3eb41e33b0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitDecode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/init_decode.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Initiation of decoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitDecode( /* (o) Number of decoded samples */
+ IlbcDecoder *iLBCdec_inst, /* (i/o) Decoder instance */
+ int16_t mode, /* (i) frame size mode */
+ int use_enhancer) { /* (i) 1: use enhancer, 0: no enhancer */
+ int i;
+
+ iLBCdec_inst->mode = mode;
+
+ /* Set all the variables that are dependent on the frame size mode */
+ if (mode==30) {
+ iLBCdec_inst->blockl = BLOCKL_30MS;
+ iLBCdec_inst->nsub = NSUB_30MS;
+ iLBCdec_inst->nasub = NASUB_30MS;
+ iLBCdec_inst->lpc_n = LPC_N_30MS;
+ iLBCdec_inst->no_of_bytes = NO_OF_BYTES_30MS;
+ iLBCdec_inst->no_of_words = NO_OF_WORDS_30MS;
+ iLBCdec_inst->state_short_len=STATE_SHORT_LEN_30MS;
+ }
+ else if (mode==20) {
+ iLBCdec_inst->blockl = BLOCKL_20MS;
+ iLBCdec_inst->nsub = NSUB_20MS;
+ iLBCdec_inst->nasub = NASUB_20MS;
+ iLBCdec_inst->lpc_n = LPC_N_20MS;
+ iLBCdec_inst->no_of_bytes = NO_OF_BYTES_20MS;
+ iLBCdec_inst->no_of_words = NO_OF_WORDS_20MS;
+ iLBCdec_inst->state_short_len=STATE_SHORT_LEN_20MS;
+ }
+ else {
+ return(-1);
+ }
+
+ /* Reset all the previous LSF to mean LSF */
+ WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, WebRtcIlbcfix_kLsfMean, LPC_FILTERORDER);
+
+ /* Clear the synthesis filter memory */
+ WebRtcSpl_MemSetW16(iLBCdec_inst->syntMem, 0, LPC_FILTERORDER);
+
+ /* Set the old synthesis filter to {1.0 0.0 ... 0.0} */
+ WebRtcSpl_MemSetW16(iLBCdec_inst->old_syntdenum, 0, ((LPC_FILTERORDER + 1)*NSUB_MAX));
+ for (i=0; i<NSUB_MAX; i++) {
+ iLBCdec_inst->old_syntdenum[i*(LPC_FILTERORDER+1)] = 4096;
+ }
+
+ /* Clear the variables that are used for the PLC */
+ iLBCdec_inst->last_lag = 20;
+ iLBCdec_inst->consPLICount = 0;
+ iLBCdec_inst->prevPLI = 0;
+ iLBCdec_inst->perSquare = 0;
+ iLBCdec_inst->prevLag = 120;
+ iLBCdec_inst->prevLpc[0] = 4096;
+ WebRtcSpl_MemSetW16(iLBCdec_inst->prevLpc+1, 0, LPC_FILTERORDER);
+ WebRtcSpl_MemSetW16(iLBCdec_inst->prevResidual, 0, BLOCKL_MAX);
+
+ /* Initialize the seed for the random number generator */
+ iLBCdec_inst->seed = 777;
+
+ /* Set the filter state of the HP filter to 0 */
+ WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemx, 0, 2);
+ WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemy, 0, 4);
+
+ /* Set the variables that are used in the ehnahcer */
+ iLBCdec_inst->use_enhancer = use_enhancer;
+ WebRtcSpl_MemSetW16(iLBCdec_inst->enh_buf, 0, (ENH_BUFL+ENH_BUFL_FILTEROVERHEAD));
+ for (i=0;i<ENH_NBLOCKS_TOT;i++) {
+ iLBCdec_inst->enh_period[i]=160; /* Q(-4) */
+ }
+
+ iLBCdec_inst->prev_enh_pl = 0;
+
+ return (int)(iLBCdec_inst->blockl);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.h
new file mode 100644
index 0000000000..a2b7b91287
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitDecode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_DECODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_DECODE_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Initiation of decoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitDecode(/* (o) Number of decoded samples */
+ IlbcDecoder*
+ iLBCdec_inst, /* (i/o) Decoder instance */
+ int16_t mode, /* (i) frame size mode */
+ int use_enhancer /* (i) 1 to use enhancer
+ 0 to run without enhancer */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.c
new file mode 100644
index 0000000000..aa858e94bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitEncode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/init_encode.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Initiation of encoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitEncode( /* (o) Number of bytes encoded */
+ IlbcEncoder *iLBCenc_inst, /* (i/o) Encoder instance */
+ int16_t mode) { /* (i) frame size mode */
+ iLBCenc_inst->mode = mode;
+
+ /* Set all the variables that are dependent on the frame size mode */
+ if (mode==30) {
+ iLBCenc_inst->blockl = BLOCKL_30MS;
+ iLBCenc_inst->nsub = NSUB_30MS;
+ iLBCenc_inst->nasub = NASUB_30MS;
+ iLBCenc_inst->lpc_n = LPC_N_30MS;
+ iLBCenc_inst->no_of_bytes = NO_OF_BYTES_30MS;
+ iLBCenc_inst->no_of_words = NO_OF_WORDS_30MS;
+ iLBCenc_inst->state_short_len=STATE_SHORT_LEN_30MS;
+ }
+ else if (mode==20) {
+ iLBCenc_inst->blockl = BLOCKL_20MS;
+ iLBCenc_inst->nsub = NSUB_20MS;
+ iLBCenc_inst->nasub = NASUB_20MS;
+ iLBCenc_inst->lpc_n = LPC_N_20MS;
+ iLBCenc_inst->no_of_bytes = NO_OF_BYTES_20MS;
+ iLBCenc_inst->no_of_words = NO_OF_WORDS_20MS;
+ iLBCenc_inst->state_short_len=STATE_SHORT_LEN_20MS;
+ }
+ else {
+ return(-1);
+ }
+
+ /* Clear the buffers and set the previous LSF and LSP to the mean value */
+ WebRtcSpl_MemSetW16(iLBCenc_inst->anaMem, 0, LPC_FILTERORDER);
+ WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lsfold, WebRtcIlbcfix_kLsfMean, LPC_FILTERORDER);
+ WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lsfdeqold, WebRtcIlbcfix_kLsfMean, LPC_FILTERORDER);
+ WebRtcSpl_MemSetW16(iLBCenc_inst->lpc_buffer, 0, LPC_LOOKBACK + BLOCKL_MAX);
+
+ /* Set the filter state of the HP filter to 0 */
+ WebRtcSpl_MemSetW16(iLBCenc_inst->hpimemx, 0, 2);
+ WebRtcSpl_MemSetW16(iLBCenc_inst->hpimemy, 0, 4);
+
+#ifdef SPLIT_10MS
+ /*Zeroing the past samples for 10msec Split*/
+ WebRtcSpl_MemSetW16(iLBCenc_inst->past_samples,0,160);
+ iLBCenc_inst->section = 0;
+#endif
+
+ return (int)(iLBCenc_inst->no_of_bytes);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.h
new file mode 100644
index 0000000000..4ada6a30c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitEncode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_ENCODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_ENCODE_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Initiation of encoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitEncode(/* (o) Number of bytes encoded */
+ IlbcEncoder*
+ iLBCenc_inst, /* (i/o) Encoder instance */
+ int16_t mode /* (i) frame size mode */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.c
new file mode 100644
index 0000000000..17ed244bd4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Interpolate.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/interpolate.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * interpolation between vectors
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Interpolate(
+ int16_t *out, /* (o) output vector */
+ int16_t *in1, /* (i) first input vector */
+ int16_t *in2, /* (i) second input vector */
+ int16_t coef, /* (i) weight coefficient in Q14 */
+ int16_t length) /* (i) number of sample is vectors */
+{
+ int i;
+ int16_t invcoef;
+
+ /*
+ Performs the operation out[i] = in[i]*coef + (1-coef)*in2[i] (with rounding)
+ */
+
+ invcoef = 16384 - coef; /* 16384 = 1.0 (Q14)*/
+ for (i = 0; i < length; i++) {
+ out[i] = (int16_t)((coef * in1[i] + invcoef * in2[i] + 8192) >> 14);
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.h
new file mode 100644
index 0000000000..892082b75c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Interpolate.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * interpolation between vectors
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Interpolate(
+ int16_t* out, /* (o) output vector */
+ int16_t* in1, /* (i) first input vector */
+ int16_t* in2, /* (i) second input vector */
+ int16_t coef, /* (i) weight coefficient in Q14 */
+ int16_t length); /* (i) number of sample is vectors */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
new file mode 100644
index 0000000000..6dddd6fb86
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InterpolateSamples.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/interpolate_samples.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_InterpolateSamples(
+ int16_t *interpSamples, /* (o) The interpolated samples */
+ int16_t *CBmem, /* (i) The CB memory */
+ size_t lMem /* (i) Length of the CB memory */
+ ) {
+ int16_t *ppi, *ppo, i, j, temp1, temp2;
+ int16_t *tmpPtr;
+
+ /* Calculate the 20 vectors of interpolated samples (4 samples each)
+ that are used in the codebooks for lag 20 to 39 */
+ tmpPtr = interpSamples;
+ for (j=0; j<20; j++) {
+ temp1 = 0;
+ temp2 = 3;
+ ppo = CBmem+lMem-4;
+ ppi = CBmem+lMem-j-24;
+ for (i=0; i<4; i++) {
+
+ *tmpPtr++ = (int16_t)((WebRtcIlbcfix_kAlpha[temp2] * *ppo) >> 15) +
+ (int16_t)((WebRtcIlbcfix_kAlpha[temp1] * *ppi) >> 15);
+
+ ppo++;
+ ppi++;
+ temp1++;
+ temp2--;
+ }
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
new file mode 100644
index 0000000000..bc665d7854
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InterpolateSamples.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_SAMPLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_SAMPLES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Construct the interpolated samples for the Augmented CB
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_InterpolateSamples(
+ int16_t* interpSamples, /* (o) The interpolated samples */
+ int16_t* CBmem, /* (i) The CB memory */
+ size_t lMem /* (i) Length of the CB memory */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.c
new file mode 100644
index 0000000000..89f6d29724
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LpcEncode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lpc_encode.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+#include "modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h"
+#include "modules/audio_coding/codecs/ilbc/simple_lsf_quant.h"
+
+/*----------------------------------------------------------------*
+ * lpc encoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LpcEncode(
+ int16_t *syntdenum, /* (i/o) synthesis filter coefficients
+ before/after encoding */
+ int16_t *weightdenum, /* (i/o) weighting denumerator coefficients
+ before/after encoding */
+ int16_t *lsf_index, /* (o) lsf quantization index */
+ int16_t *data, /* (i) Speech to do LPC analysis on */
+ IlbcEncoder *iLBCenc_inst
+ /* (i/o) the encoder state structure */
+ ) {
+ /* Stack based */
+ int16_t lsf[LPC_FILTERORDER * LPC_N_MAX];
+ int16_t lsfdeq[LPC_FILTERORDER * LPC_N_MAX];
+
+ /* Calculate LSF's from the input speech */
+ WebRtcIlbcfix_SimpleLpcAnalysis(lsf, data, iLBCenc_inst);
+
+ /* Quantize the LSF's */
+ WebRtcIlbcfix_SimpleLsfQ(lsfdeq, lsf_index, lsf, iLBCenc_inst->lpc_n);
+
+ /* Stableize the LSF's if needed */
+ WebRtcIlbcfix_LsfCheck(lsfdeq, LPC_FILTERORDER, iLBCenc_inst->lpc_n);
+
+ /* Calculate the synthesis and weighting filter coefficients from
+ the optimal LSF and the dequantized LSF */
+ WebRtcIlbcfix_SimpleInterpolateLsf(syntdenum, weightdenum,
+ lsf, lsfdeq, iLBCenc_inst->lsfold,
+ iLBCenc_inst->lsfdeqold, LPC_FILTERORDER, iLBCenc_inst);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.h
new file mode 100644
index 0000000000..a67b77acbf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LpcEncode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LPC_ENCODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LPC_ENCODE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * lpc encoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LpcEncode(
+ int16_t* syntdenum, /* (i/o) synthesis filter coefficients
+ before/after encoding */
+ int16_t* weightdenum, /* (i/o) weighting denumerator coefficients
+ before/after encoding */
+ int16_t* lsf_index, /* (o) lsf quantization index */
+ int16_t* data, /* (i) Speech to do LPC analysis on */
+ IlbcEncoder* iLBCenc_inst
+ /* (i/o) the encoder state structure */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.c
new file mode 100644
index 0000000000..9f0e19a2d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfCheck.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * check for stability of lsf coefficients
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_LsfCheck(
+ int16_t *lsf, /* LSF parameters */
+ int dim, /* dimension of LSF */
+ int NoAn) /* No of analysis per frame */
+{
+ int k,n,m, Nit=2, change=0,pos;
+ const int16_t eps=319; /* 0.039 in Q13 (50 Hz)*/
+ const int16_t eps2=160; /* eps/2.0 in Q13;*/
+ const int16_t maxlsf=25723; /* 3.14; (4000 Hz)*/
+ const int16_t minlsf=82; /* 0.01; (0 Hz)*/
+
+ /* LSF separation check*/
+ for (n=0;n<Nit;n++) { /* Run through a 2 times */
+ for (m=0;m<NoAn;m++) { /* Number of analyses per frame */
+ for (k=0;k<(dim-1);k++) {
+ pos=m*dim+k;
+
+ /* Seperate coefficients with a safety margin of 50 Hz */
+ if ((lsf[pos+1]-lsf[pos])<eps) {
+
+ if (lsf[pos+1]<lsf[pos]) {
+ lsf[pos+1]= lsf[pos]+eps2;
+ lsf[pos]= lsf[pos+1]-eps2;
+ } else {
+ lsf[pos]-=eps2;
+ lsf[pos+1]+=eps2;
+ }
+ change=1;
+ }
+
+ /* Limit minimum and maximum LSF */
+ if (lsf[pos]<minlsf) {
+ lsf[pos]=minlsf;
+ change=1;
+ }
+
+ if (lsf[pos]>maxlsf) {
+ lsf[pos]=maxlsf;
+ change=1;
+ }
+ }
+ }
+ }
+
+ return change;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.h
new file mode 100644
index 0000000000..9ba90a31e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfCheck.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_CHECK_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_CHECK_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * check for stability of lsf coefficients
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_LsfCheck(int16_t* lsf, /* LSF parameters */
+ int dim, /* dimension of LSF */
+ int NoAn); /* No of analysis per frame */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c
new file mode 100644
index 0000000000..04de5e7e6c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LspInterpolate2PolyDec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/interpolate.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_to_poly.h"
+
+/*----------------------------------------------------------------*
+ * interpolation of lsf coefficients for the decoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LspInterpolate2PolyDec(
+ int16_t *a, /* (o) lpc coefficients Q12 */
+ int16_t *lsf1, /* (i) first set of lsf coefficients Q13 */
+ int16_t *lsf2, /* (i) second set of lsf coefficients Q13 */
+ int16_t coef, /* (i) weighting coefficient to use between
+ lsf1 and lsf2 Q14 */
+ int16_t length /* (i) length of coefficient vectors */
+ ){
+ int16_t lsftmp[LPC_FILTERORDER];
+
+ /* interpolate LSF */
+ WebRtcIlbcfix_Interpolate(lsftmp, lsf1, lsf2, coef, length);
+
+ /* Compute the filter coefficients from the LSF */
+ WebRtcIlbcfix_Lsf2Poly(a, lsftmp);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h
new file mode 100644
index 0000000000..6cc9d9746d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LspInterpolate2PolyDec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_DEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_DEC_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * interpolation of lsf coefficients for the decoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LspInterpolate2PolyDec(
+ int16_t* a, /* (o) lpc coefficients Q12 */
+ int16_t* lsf1, /* (i) first set of lsf coefficients Q13 */
+ int16_t* lsf2, /* (i) second set of lsf coefficients Q13 */
+ int16_t coef, /* (i) weighting coefficient to use between
+ lsf1 and lsf2 Q14 */
+ int16_t length /* (i) length of coefficient vectors */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c
new file mode 100644
index 0000000000..618821216c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/interpolate.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_to_poly.h"
+
+/*----------------------------------------------------------------*
+ * lsf interpolator and conversion from lsf to a coefficients
+ * (subrutine to SimpleInterpolateLSF)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LsfInterpolate2PloyEnc(
+ int16_t *a, /* (o) lpc coefficients Q12 */
+ int16_t *lsf1, /* (i) first set of lsf coefficients Q13 */
+ int16_t *lsf2, /* (i) second set of lsf coefficients Q13 */
+ int16_t coef, /* (i) weighting coefficient to use between
+ lsf1 and lsf2 Q14 */
+ int16_t length /* (i) length of coefficient vectors */
+ ) {
+ /* Stack based */
+ int16_t lsftmp[LPC_FILTERORDER];
+
+ /* interpolate LSF */
+ WebRtcIlbcfix_Interpolate(lsftmp, lsf1, lsf2, coef, length);
+
+ /* Compute the filter coefficients from the LSF */
+ WebRtcIlbcfix_Lsf2Poly(a, lsftmp);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h
new file mode 100644
index 0000000000..b278a10f4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_ENC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_ENC_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * lsf interpolator and conversion from lsf to a coefficients
+ * (subrutine to SimpleInterpolateLSF)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LsfInterpolate2PloyEnc(
+ int16_t* a, /* (o) lpc coefficients Q12 */
+ int16_t* lsf1, /* (i) first set of lsf coefficients Q13 */
+ int16_t* lsf2, /* (i) second set of lsf coefficients Q13 */
+ int16_t coef, /* (i) weighting coefficient to use between
+ lsf1 and lsf2 Q14 */
+ int16_t length /* (i) length of coefficient vectors */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c
new file mode 100644
index 0000000000..ee8292f394
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Lsp.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsf_to_lsp.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * conversion from lsf to lsp coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsf2Lsp(
+ int16_t *lsf, /* (i) lsf in Q13 values between 0 and pi */
+ int16_t *lsp, /* (o) lsp in Q15 values between -1 and 1 */
+ int16_t m /* (i) number of coefficients */
+ ) {
+ int16_t i, k;
+ int16_t diff; /* difference, which is used for the
+ linear approximation (Q8) */
+ int16_t freq; /* normalized frequency in Q15 (0..1) */
+ int32_t tmpW32;
+
+ for(i=0; i<m; i++)
+ {
+ freq = (int16_t)((lsf[i] * 20861) >> 15);
+ /* 20861: 1.0/(2.0*PI) in Q17 */
+ /*
+ Upper 8 bits give the index k and
+ Lower 8 bits give the difference, which needs
+ to be approximated linearly
+ */
+ k = freq >> 8;
+ diff = (freq&0x00ff);
+
+ /* Guard against getting outside table */
+
+ if (k>63) {
+ k = 63;
+ }
+
+ /* Calculate linear approximation */
+ tmpW32 = WebRtcIlbcfix_kCosDerivative[k] * diff;
+ lsp[i] = WebRtcIlbcfix_kCos[k] + (int16_t)(tmpW32 >> 12);
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h
new file mode 100644
index 0000000000..6bc6c44dbd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Lsp.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_LSP_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_LSP_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * conversion from lsf to lsp coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsf2Lsp(
+ int16_t* lsf, /* (i) lsf in Q13 values between 0 and pi */
+ int16_t* lsp, /* (o) lsp in Q15 values between -1 and 1 */
+ int16_t m /* (i) number of coefficients */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.c
new file mode 100644
index 0000000000..8ca91d82f8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Poly.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsf_to_poly.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/get_lsp_poly.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_to_lsp.h"
+
+void WebRtcIlbcfix_Lsf2Poly(
+ int16_t *a, /* (o) predictor coefficients (order = 10) in Q12 */
+ int16_t *lsf /* (i) line spectral frequencies in Q13 */
+ ) {
+ int32_t f[2][6]; /* f[0][] and f[1][] corresponds to
+ F1(z) and F2(z) respectivly */
+ int32_t *f1ptr, *f2ptr;
+ int16_t *a1ptr, *a2ptr;
+ int32_t tmpW32;
+ int16_t lsp[10];
+ int i;
+
+ /* Convert lsf to lsp */
+ WebRtcIlbcfix_Lsf2Lsp(lsf, lsp, LPC_FILTERORDER);
+
+ /* Get F1(z) and F2(z) from the lsp */
+ f1ptr=f[0];
+ f2ptr=f[1];
+ WebRtcIlbcfix_GetLspPoly(&lsp[0],f1ptr);
+ WebRtcIlbcfix_GetLspPoly(&lsp[1],f2ptr);
+
+ /* for i = 5 down to 1
+ Compute f1[i] += f1[i-1];
+ and f2[i] += f2[i-1];
+ */
+ f1ptr=&f[0][5];
+ f2ptr=&f[1][5];
+ for (i=5; i>0; i--)
+ {
+ (*f1ptr) += (*(f1ptr-1));
+ (*f2ptr) -= (*(f2ptr-1));
+ f1ptr--;
+ f2ptr--;
+ }
+
+ /* Get the A(z) coefficients
+ a[0] = 1.0
+ for i = 1 to 5
+ a[i] = (f1[i] + f2[i] + round)>>13;
+ for i = 1 to 5
+ a[11-i] = (f1[i] - f2[i] + round)>>13;
+ */
+ a[0]=4096;
+ a1ptr=&a[1];
+ a2ptr=&a[10];
+ f1ptr=&f[0][1];
+ f2ptr=&f[1][1];
+ for (i=5; i>0; i--)
+ {
+ tmpW32 = (*f1ptr) + (*f2ptr);
+ *a1ptr = (int16_t)((tmpW32 + 4096) >> 13);
+
+ tmpW32 = (*f1ptr) - (*f2ptr);
+ *a2ptr = (int16_t)((tmpW32 + 4096) >> 13);
+
+ a1ptr++;
+ a2ptr--;
+ f1ptr++;
+ f2ptr++;
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.h
new file mode 100644
index 0000000000..f26d3a8d2d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Poly.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_POLY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_POLY_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Convert from LSF coefficients to A coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsf2Poly(
+ int16_t* a, /* (o) predictor coefficients (order = 10) in Q12 */
+ int16_t* lsf /* (i) line spectral frequencies in Q13 */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c
new file mode 100644
index 0000000000..227f4d45b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsp2Lsf.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsp_to_lsf.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * conversion from LSP coefficients to LSF coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsp2Lsf(
+ int16_t *lsp, /* (i) lsp vector -1...+1 in Q15 */
+ int16_t *lsf, /* (o) Lsf vector 0...Pi in Q13
+ (ordered, so that lsf[i]<lsf[i+1]) */
+ int16_t m /* (i) Number of coefficients */
+ )
+{
+ int16_t i, k;
+ int16_t diff; /* diff between table value and desired value (Q15) */
+ int16_t freq; /* lsf/(2*pi) (Q16) */
+ int16_t *lspPtr, *lsfPtr, *cosTblPtr;
+ int16_t tmp;
+
+ /* set the index to maximum index value in WebRtcIlbcfix_kCos */
+ k = 63;
+
+ /*
+ Start with the highest LSP and then work the way down
+ For each LSP the lsf is calculated by first order approximation
+ of the acos(x) function
+ */
+ lspPtr = &lsp[9];
+ lsfPtr = &lsf[9];
+ cosTblPtr=(int16_t*)&WebRtcIlbcfix_kCos[k];
+ for(i=m-1; i>=0; i--)
+ {
+ /*
+ locate value in the table, which is just above lsp[i],
+ basically an approximation to acos(x)
+ */
+ while( (((int32_t)(*cosTblPtr)-(*lspPtr)) < 0)&&(k>0) )
+ {
+ k-=1;
+ cosTblPtr--;
+ }
+
+ /* Calculate diff, which is used in the linear approximation of acos(x) */
+ diff = (*lspPtr)-(*cosTblPtr);
+
+ /*
+ The linear approximation of acos(lsp[i]) :
+ acos(lsp[i])= k*512 + (WebRtcIlbcfix_kAcosDerivative[ind]*offset >> 11)
+ */
+
+ /* tmp (linear offset) in Q16 */
+ tmp = (int16_t)((WebRtcIlbcfix_kAcosDerivative[k] * diff) >> 11);
+
+ /* freq in Q16 */
+ freq = (k << 9) + tmp;
+
+ /* lsf = freq*2*pi */
+ (*lsfPtr) = (int16_t)(((int32_t)freq*25736)>>15);
+
+ lsfPtr--;
+ lspPtr--;
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h
new file mode 100644
index 0000000000..c2f4b7692d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsp2Lsf.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSP_TO_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSP_TO_LSF_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * conversion from LSP coefficients to LSF coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsp2Lsf(
+ int16_t* lsp, /* (i) lsp vector -1...+1 in Q15 */
+ int16_t* lsf, /* (o) Lsf vector 0...Pi in Q13
+ (ordered, so that lsf[i]<lsf[i+1]) */
+ int16_t m /* (i) Number of coefficients */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.c
new file mode 100644
index 0000000000..9b870e0ef0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_MyCorr.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/my_corr.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * compute cross correlation between sequences
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_MyCorr(
+ int32_t* corr, /* (o) correlation of seq1 and seq2 */
+ const int16_t* seq1, /* (i) first sequence */
+ size_t dim1, /* (i) dimension first seq1 */
+ const int16_t* seq2, /* (i) second sequence */
+ size_t dim2 /* (i) dimension seq2 */
+ ){
+ uint32_t max1, max2;
+ size_t loops;
+ int right_shift;
+
+ // Calculate a right shift that will let us sum dim2 pairwise products of
+ // values from the two sequences without overflowing an int32_t. (The +1 in
+ // max1 and max2 are because WebRtcSpl_MaxAbsValueW16 will return 2**15 - 1
+ // if the input array contains -2**15.)
+ max1 = WebRtcSpl_MaxAbsValueW16(seq1, dim1) + 1;
+ max2 = WebRtcSpl_MaxAbsValueW16(seq2, dim2) + 1;
+ right_shift =
+ (64 - 31) - WebRtcSpl_CountLeadingZeros64((max1 * max2) * (uint64_t)dim2);
+ if (right_shift < 0) {
+ right_shift = 0;
+ }
+
+ loops=dim1-dim2+1;
+
+ /* Calculate the cross correlations */
+ WebRtcSpl_CrossCorrelation(corr, seq2, seq1, dim2, loops, right_shift, 1);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.h
new file mode 100644
index 0000000000..c0c2fa4a48
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_MyCorr.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_MY_CORR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_MY_CORR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * compute cross correlation between sequences
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_MyCorr(int32_t* corr, /* (o) correlation of seq1 and seq2 */
+ const int16_t* seq1, /* (i) first sequence */
+ size_t dim1, /* (i) dimension first seq1 */
+ const int16_t* seq2, /* (i) second sequence */
+ size_t dim2 /* (i) dimension seq2 */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
new file mode 100644
index 0000000000..1ecdd96d5a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_NearestNeighbor.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/nearest_neighbor.h"
+
+void WebRtcIlbcfix_NearestNeighbor(size_t* index,
+ const size_t* array,
+ size_t value,
+ size_t arlength) {
+ size_t i;
+ size_t min_diff = (size_t)-1;
+ for (i = 0; i < arlength; i++) {
+ const size_t diff =
+ (array[i] < value) ? (value - array[i]) : (array[i] - value);
+ if (diff < min_diff) {
+ *index = i;
+ min_diff = diff;
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
new file mode 100644
index 0000000000..704cf2a37d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_NearestNeighbor.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_NEAREST_NEIGHBOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_NEAREST_NEIGHBOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Find index in array such that the array element with said
+ * index is the element of said array closest to "value"
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_NearestNeighbor(
+ size_t* index, /* (o) index of array element closest to value */
+ const size_t* array, /* (i) data array (Q2) */
+ size_t value, /* (i) value (Q2) */
+ size_t arlength /* (i) dimension of data array (==ENH_NBLOCKS_TOT) */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.c
new file mode 100644
index 0000000000..dd44eb8fb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_PackBits.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/pack_bits.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_PackBits(
+ uint16_t *bitstream, /* (o) The packetized bitstream */
+ iLBC_bits *enc_bits, /* (i) Encoded bits */
+ int16_t mode /* (i) Codec mode (20 or 30) */
+ ){
+ uint16_t *bitstreamPtr;
+ int i, k;
+ int16_t *tmpPtr;
+
+ bitstreamPtr=bitstream;
+
+ /* Class 1 bits of ULP */
+ /* First int16_t */
+ (*bitstreamPtr) = ((uint16_t)enc_bits->lsf[0])<<10; /* Bit 0..5 */
+ (*bitstreamPtr) |= (enc_bits->lsf[1])<<3; /* Bit 6..12 */
+ (*bitstreamPtr) |= (enc_bits->lsf[2]&0x70)>>4; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* Second int16_t */
+ (*bitstreamPtr) = ((uint16_t)enc_bits->lsf[2]&0xF)<<12; /* Bit 0..3 */
+
+ if (mode==20) {
+ (*bitstreamPtr) |= (enc_bits->startIdx)<<10; /* Bit 4..5 */
+ (*bitstreamPtr) |= (enc_bits->state_first)<<9; /* Bit 6 */
+ (*bitstreamPtr) |= (enc_bits->idxForMax)<<3; /* Bit 7..12 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[0])&0x70)>>4; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* Third int16_t */
+ (*bitstreamPtr) = ((enc_bits->cb_index[0])&0xE)<<12; /* Bit 0..2 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[0])&0x18)<<8; /* Bit 3..4 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[1])&0x8)<<7; /* Bit 5 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[3])&0xFE)<<2; /* Bit 6..12 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[3])&0x10)>>2; /* Bit 13 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[4])&0x8)>>2; /* Bit 14 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[6])&0x10)>>4; /* Bit 15 */
+ } else { /* mode==30 */
+ (*bitstreamPtr) |= (enc_bits->lsf[3])<<6; /* Bit 4..9 */
+ (*bitstreamPtr) |= (enc_bits->lsf[4]&0x7E)>>1; /* Bit 10..15 */
+ bitstreamPtr++;
+ /* Third int16_t */
+ (*bitstreamPtr) = ((uint16_t)enc_bits->lsf[4]&0x1)<<15; /* Bit 0 */
+ (*bitstreamPtr) |= (enc_bits->lsf[5])<<8; /* Bit 1..7 */
+ (*bitstreamPtr) |= (enc_bits->startIdx)<<5; /* Bit 8..10 */
+ (*bitstreamPtr) |= (enc_bits->state_first)<<4; /* Bit 11 */
+ (*bitstreamPtr) |= ((enc_bits->idxForMax)&0x3C)>>2; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 4:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)enc_bits->idxForMax&0x3)<<14; /* Bit 0..1 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[0]&0x78)<<7; /* Bit 2..5 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[0]&0x10)<<5; /* Bit 6 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[1]&0x8)<<5; /* Bit 7 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[3]&0xFC); /* Bit 8..13 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[3]&0x10)>>3; /* Bit 14 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[4]&0x8)>>3; /* Bit 15 */
+ }
+ /* Class 2 bits of ULP */
+ /* 4:th to 6:th int16_t for 20 ms case
+ 5:th to 7:th int16_t for 30 ms case */
+ bitstreamPtr++;
+ tmpPtr=enc_bits->idxVec;
+ for (k=0; k<3; k++) {
+ (*bitstreamPtr) = 0;
+ for (i=15; i>=0; i--) {
+ (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x4)>>2)<<i;
+ /* Bit 15-i */
+ tmpPtr++;
+ }
+ bitstreamPtr++;
+ }
+
+ if (mode==20) {
+ /* 7:th int16_t */
+ (*bitstreamPtr) = 0;
+ for (i=15; i>6; i--) {
+ (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x4)>>2)<<i;
+ /* Bit 15-i */
+ tmpPtr++;
+ }
+ (*bitstreamPtr) |= (enc_bits->gain_index[1]&0x4)<<4; /* Bit 9 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[3]&0xC)<<2; /* Bit 10..11 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[4]&0x4)<<1; /* Bit 12 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[6]&0x8)>>1; /* Bit 13 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[7]&0xC)>>2; /* Bit 14..15 */
+
+ } else { /* mode==30 */
+ /* 8:th int16_t */
+ (*bitstreamPtr) = 0;
+ for (i=15; i>5; i--) {
+ (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x4)>>2)<<i;
+ /* Bit 15-i */
+ tmpPtr++;
+ }
+ (*bitstreamPtr) |= (enc_bits->cb_index[0]&0x6)<<3; /* Bit 10..11 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[0]&0x8); /* Bit 12 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[1]&0x4); /* Bit 13 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[3]&0x2); /* Bit 14 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[6]&0x80)>>7; /* Bit 15 */
+ bitstreamPtr++;
+ /* 9:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)enc_bits->cb_index[6]&0x7E)<<9;/* Bit 0..5 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[9]&0xFE)<<2; /* Bit 6..12 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[12]&0xE0)>>5; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* 10:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)enc_bits->cb_index[12]&0x1E)<<11;/* Bit 0..3 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[3]&0xC)<<8; /* Bit 4..5 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[4]&0x6)<<7; /* Bit 6..7 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[6]&0x18)<<3; /* Bit 8..9 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[7]&0xC)<<2; /* Bit 10..11 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[9]&0x10)>>1; /* Bit 12 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[10]&0x8)>>1; /* Bit 13 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[12]&0x10)>>3; /* Bit 14 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[13]&0x8)>>3; /* Bit 15 */
+ }
+ bitstreamPtr++;
+ /* Class 3 bits of ULP */
+ /* 8:th to 14:th int16_t for 20 ms case
+ 11:th to 17:th int16_t for 30 ms case */
+ tmpPtr=enc_bits->idxVec;
+ for (k=0; k<7; k++) {
+ (*bitstreamPtr) = 0;
+ for (i=14; i>=0; i-=2) {
+ (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x3))<<i; /* Bit 15-i..14-i*/
+ tmpPtr++;
+ }
+ bitstreamPtr++;
+ }
+
+ if (mode==20) {
+ /* 15:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)((enc_bits->idxVec[56])&0x3))<<14;/* Bit 0..1 */
+ (*bitstreamPtr) |= (((enc_bits->cb_index[0])&1))<<13; /* Bit 2 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[1]))<<6; /* Bit 3..9 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[2])&0x7E)>>1; /* Bit 10..15 */
+ bitstreamPtr++;
+ /* 16:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)((enc_bits->cb_index[2])&0x1))<<15;
+ /* Bit 0 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[0])&0x7)<<12; /* Bit 1..3 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[1])&0x3)<<10; /* Bit 4..5 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[2]))<<7; /* Bit 6..8 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[3])&0x1)<<6; /* Bit 9 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[4])&0x7E)>>1; /* Bit 10..15 */
+ bitstreamPtr++;
+ /* 17:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)((enc_bits->cb_index[4])&0x1))<<15;
+ /* Bit 0 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[5])<<8; /* Bit 1..7 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[6]); /* Bit 8..15 */
+ bitstreamPtr++;
+ /* 18:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[7]))<<8; /* Bit 0..7 */
+ (*bitstreamPtr) |= (enc_bits->cb_index[8]); /* Bit 8..15 */
+ bitstreamPtr++;
+ /* 19:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)((enc_bits->gain_index[3])&0x3))<<14;
+ /* Bit 0..1 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[4])&0x3)<<12; /* Bit 2..3 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[5]))<<9; /* Bit 4..6 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[6])&0x7)<<6; /* Bit 7..9 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[7])&0x3)<<4; /* Bit 10..11 */
+ (*bitstreamPtr) |= (enc_bits->gain_index[8])<<1; /* Bit 12..14 */
+ } else { /* mode==30 */
+ /* 18:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)((enc_bits->idxVec[56])&0x3))<<14;/* Bit 0..1 */
+ (*bitstreamPtr) |= (((enc_bits->idxVec[57])&0x3))<<12; /* Bit 2..3 */
+ (*bitstreamPtr) |= (((enc_bits->cb_index[0])&1))<<11; /* Bit 4 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[1]))<<4; /* Bit 5..11 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[2])&0x78)>>3; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 19:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[2])&0x7)<<13;
+ /* Bit 0..2 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[0])&0x7)<<10; /* Bit 3..5 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[1])&0x3)<<8; /* Bit 6..7 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[2])&0x7)<<5; /* Bit 8..10 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[3])&0x1)<<4; /* Bit 11 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[4])&0x78)>>3; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 20:th int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[4])&0x7)<<13;
+ /* Bit 0..2 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[5]))<<6; /* Bit 3..9 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[6])&0x1)<<5; /* Bit 10 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[7])&0xF8)>>3; /* Bit 11..15 */
+ bitstreamPtr++;
+ /* 21:st int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[7])&0x7)<<13;
+ /* Bit 0..2 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[8]))<<5; /* Bit 3..10 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[9])&0x1)<<4; /* Bit 11 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[10])&0xF0)>>4; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 22:nd int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[10])&0xF)<<12;
+ /* Bit 0..3 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[11]))<<4; /* Bit 4..11 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[12])&0x1)<<3; /* Bit 12 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[13])&0xE0)>>5; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* 23:rd int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[13])&0x1F)<<11;
+ /* Bit 0..4 */
+ (*bitstreamPtr) |= ((enc_bits->cb_index[14]))<<3; /* Bit 5..12 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[3])&0x3)<<1; /* Bit 13..14 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[4])&0x1); /* Bit 15 */
+ bitstreamPtr++;
+ /* 24:rd int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->gain_index[5]))<<13;
+ /* Bit 0..2 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[6])&0x7)<<10; /* Bit 3..5 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[7])&0x3)<<8; /* Bit 6..7 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[8]))<<5; /* Bit 8..10 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[9])&0xF)<<1; /* Bit 11..14 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[10])&0x4)>>2; /* Bit 15 */
+ bitstreamPtr++;
+ /* 25:rd int16_t */
+ (*bitstreamPtr) = ((uint16_t)(enc_bits->gain_index[10])&0x3)<<14;
+ /* Bit 0..1 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[11]))<<11; /* Bit 2..4 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[12])&0xF)<<7; /* Bit 5..8 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[13])&0x7)<<4; /* Bit 9..11 */
+ (*bitstreamPtr) |= ((enc_bits->gain_index[14]))<<1; /* Bit 12..14 */
+ }
+ /* Last bit is automatically zero */
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.h
new file mode 100644
index 0000000000..8dcf41ce08
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_PackBits.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_PACK_BITS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_PACK_BITS_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_PackBits(
+ uint16_t* bitstream, /* (o) The packetized bitstream */
+ iLBC_bits* enc_bits, /* (i) Encoded bits */
+ int16_t mode /* (i) Codec mode (20 or 30) */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.c
new file mode 100644
index 0000000000..7192eaab49
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsf.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/poly_to_lsf.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/lsp_to_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/poly_to_lsp.h"
+
+void WebRtcIlbcfix_Poly2Lsf(
+ int16_t *lsf, /* (o) lsf coefficients (Q13) */
+ int16_t *a /* (i) A coefficients (Q12) */
+ ) {
+ int16_t lsp[10];
+ WebRtcIlbcfix_Poly2Lsp(a, lsp, (int16_t*)WebRtcIlbcfix_kLspMean);
+ WebRtcIlbcfix_Lsp2Lsf(lsp, lsf, 10);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.h
new file mode 100644
index 0000000000..363e392bb2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsf.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSF_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * conversion from lpc coefficients to lsf coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Poly2Lsf(int16_t* lsf, /* (o) lsf coefficients (Q13) */
+ int16_t* a /* (i) A coefficients (Q12) */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.c
new file mode 100644
index 0000000000..ad0ecd70ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsp.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/poly_to_lsp.h"
+
+#include "modules/audio_coding/codecs/ilbc/chebyshev.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ * conversion from lpc coefficients to lsp coefficients
+ * function is only for 10:th order LPC
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Poly2Lsp(
+ int16_t *a, /* (o) A coefficients in Q12 */
+ int16_t *lsp, /* (i) LSP coefficients in Q15 */
+ int16_t *old_lsp /* (i) old LSP coefficients that are used if the new
+ coefficients turn out to be unstable */
+ ) {
+ int16_t f[2][6]; /* f[0][] represents f1 and f[1][] represents f2 */
+ int16_t *a_i_ptr, *a_10mi_ptr;
+ int16_t *f1ptr, *f2ptr;
+ int32_t tmpW32;
+ int16_t x, y, xlow, ylow, xmid, ymid, xhigh, yhigh, xint;
+ int16_t shifts, sign;
+ int i, j;
+ int foundFreqs;
+ int fi_select;
+
+ /*
+ Calculate the two polynomials f1(z) and f2(z)
+ (the sum and the diff polynomial)
+ f1[0] = f2[0] = 1.0;
+ f1[i+1] = a[i+1] + a[10-i] - f1[i];
+ f2[i+1] = a[i+1] - a[10-i] - f1[i];
+ */
+
+ a_i_ptr = a + 1;
+ a_10mi_ptr = a + 10;
+ f1ptr = f[0];
+ f2ptr = f[1];
+ (*f1ptr) = 1024; /* 1.0 in Q10 */
+ (*f2ptr) = 1024; /* 1.0 in Q10 */
+ for (i = 0; i < 5; i++) {
+ *(f1ptr + 1) =
+ (int16_t)((((int32_t)(*a_i_ptr) + *a_10mi_ptr) >> 2) - *f1ptr);
+ *(f2ptr + 1) =
+ (int16_t)((((int32_t)(*a_i_ptr) - *a_10mi_ptr) >> 2) + *f2ptr);
+ a_i_ptr++;
+ a_10mi_ptr--;
+ f1ptr++;
+ f2ptr++;
+ }
+
+ /*
+ find the LSPs using the Chebychev pol. evaluation
+ */
+
+ fi_select = 0; /* selector between f1 and f2, start with f1 */
+
+ foundFreqs = 0;
+
+ xlow = WebRtcIlbcfix_kCosGrid[0];
+ ylow = WebRtcIlbcfix_Chebyshev(xlow, f[fi_select]);
+
+ /*
+ Iterate until all the 10 LSP's have been found or
+ all the grid points have been tried. If the 10 LSP's can
+ not be found, set the LSP vector to previous LSP
+ */
+
+ for (j = 1; j < COS_GRID_POINTS && foundFreqs < 10; j++) {
+ xhigh = xlow;
+ yhigh = ylow;
+ xlow = WebRtcIlbcfix_kCosGrid[j];
+ ylow = WebRtcIlbcfix_Chebyshev(xlow, f[fi_select]);
+
+ if (ylow * yhigh <= 0) {
+ /* Run 4 times to reduce the interval */
+ for (i = 0; i < 4; i++) {
+ /* xmid =(xlow + xhigh)/2 */
+ xmid = (xlow >> 1) + (xhigh >> 1);
+ ymid = WebRtcIlbcfix_Chebyshev(xmid, f[fi_select]);
+
+ if (ylow * ymid <= 0) {
+ yhigh = ymid;
+ xhigh = xmid;
+ } else {
+ ylow = ymid;
+ xlow = xmid;
+ }
+ }
+
+ /*
+ Calculater xint by linear interpolation:
+ xint = xlow - ylow*(xhigh-xlow)/(yhigh-ylow);
+ */
+
+ x = xhigh - xlow;
+ y = yhigh - ylow;
+
+ if (y == 0) {
+ xint = xlow;
+ } else {
+ sign = y;
+ y = WEBRTC_SPL_ABS_W16(y);
+ shifts = (int16_t)WebRtcSpl_NormW32(y)-16;
+ y <<= shifts;
+ y = (int16_t)WebRtcSpl_DivW32W16(536838144, y); /* 1/(yhigh-ylow) */
+
+ tmpW32 = (x * y) >> (19 - shifts);
+
+ /* y=(xhigh-xlow)/(yhigh-ylow) */
+ y = (int16_t)(tmpW32&0xFFFF);
+
+ if (sign < 0) {
+ y = -y;
+ }
+ /* tmpW32 = ylow*(xhigh-xlow)/(yhigh-ylow) */
+ tmpW32 = (ylow * y) >> 10;
+ xint = xlow-(int16_t)(tmpW32&0xFFFF);
+ }
+
+ /* Store the calculated lsp */
+ lsp[foundFreqs] = (int16_t)xint;
+ foundFreqs++;
+
+ /* if needed, set xlow and ylow for next recursion */
+ if (foundFreqs<10) {
+ xlow = xint;
+ /* Swap between f1 and f2 (f[0][] and f[1][]) */
+ fi_select = ((fi_select+1)&0x1);
+
+ ylow = WebRtcIlbcfix_Chebyshev(xlow, f[fi_select]);
+ }
+ }
+ }
+
+ /* Check if M roots found, if not then use the old LSP */
+ if (foundFreqs < 10) {
+ WEBRTC_SPL_MEMCPY_W16(lsp, old_lsp, 10);
+ }
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.h
new file mode 100644
index 0000000000..928ee4efdb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsp.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSP_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSP_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * conversion from lpc coefficients to lsp coefficients
+ * function is only for 10:th order LPC
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Poly2Lsp(
+ int16_t* a, /* (o) A coefficients in Q12 */
+ int16_t* lsp, /* (i) LSP coefficients in Q15 */
+ int16_t* old_lsp /* (i) old LSP coefficients that are used if the new
+ coefficients turn out to be unstable */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.c
new file mode 100644
index 0000000000..5bdab7a4b0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Refiner.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/refiner.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/enh_upsample.h"
+#include "modules/audio_coding/codecs/ilbc/my_corr.h"
+
+/*----------------------------------------------------------------*
+ * find segment starting near idata+estSegPos that has highest
+ * correlation with idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1 segment is found at a
+ * resolution of ENH_UPSO times the original of the original
+ * sampling rate
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Refiner(
+ size_t *updStartPos, /* (o) updated start point (Q-2) */
+ int16_t *idata, /* (i) original data buffer */
+ size_t idatal, /* (i) dimension of idata */
+ size_t centerStartPos, /* (i) beginning center segment */
+ size_t estSegPos, /* (i) estimated beginning other segment (Q-2) */
+ int16_t *surround, /* (i/o) The contribution from this sequence
+ summed with earlier contributions */
+ int16_t gain /* (i) Gain to use for this sequence */
+ ){
+ size_t estSegPosRounded, searchSegStartPos, searchSegEndPos, corrdim;
+ size_t tloc, tloc2, i;
+
+ int32_t maxtemp, scalefact;
+ int16_t *filtStatePtr, *polyPtr;
+ /* Stack based */
+ int16_t filt[7];
+ int32_t corrVecUps[ENH_CORRDIM*ENH_UPS0];
+ int32_t corrVecTemp[ENH_CORRDIM];
+ int16_t vect[ENH_VECTL];
+ int16_t corrVec[ENH_CORRDIM];
+
+ /* defining array bounds */
+
+ estSegPosRounded = (estSegPos - 2) >> 2;
+
+ searchSegStartPos =
+ (estSegPosRounded < ENH_SLOP) ? 0 : (estSegPosRounded - ENH_SLOP);
+
+ searchSegEndPos = estSegPosRounded + ENH_SLOP;
+ if ((searchSegEndPos + ENH_BLOCKL) >= idatal) {
+ searchSegEndPos = idatal - ENH_BLOCKL - 1;
+ }
+
+ corrdim = searchSegEndPos + 1 - searchSegStartPos;
+
+ /* compute upsampled correlation and find
+ location of max */
+
+ WebRtcIlbcfix_MyCorr(corrVecTemp, idata + searchSegStartPos,
+ corrdim + ENH_BLOCKL - 1, idata + centerStartPos,
+ ENH_BLOCKL);
+
+ /* Calculate the rescaling factor for the correlation in order to
+ put the correlation in a int16_t vector instead */
+ maxtemp = WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
+
+ scalefact = WebRtcSpl_GetSizeInBits(maxtemp) - 15;
+
+ if (scalefact > 0) {
+ for (i = 0; i < corrdim; i++) {
+ corrVec[i] = (int16_t)(corrVecTemp[i] >> scalefact);
+ }
+ } else {
+ for (i = 0; i < corrdim; i++) {
+ corrVec[i] = (int16_t)corrVecTemp[i];
+ }
+ }
+ /* In order to guarantee that all values are initialized */
+ for (i = corrdim; i < ENH_CORRDIM; i++) {
+ corrVec[i] = 0;
+ }
+
+ /* Upsample the correlation */
+ WebRtcIlbcfix_EnhUpsample(corrVecUps, corrVec);
+
+ /* Find maximum */
+ tloc = WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
+
+ /* make vector can be upsampled without ever running outside
+ bounds */
+ *updStartPos = searchSegStartPos * 4 + tloc + 4;
+
+ tloc2 = (tloc + 3) >> 2;
+
+ /* initialize the vector to be filtered, stuff with zeros
+ when data is outside idata buffer */
+ if (ENH_FL0 > (searchSegStartPos + tloc2)) {
+ const size_t st = ENH_FL0 - searchSegStartPos - tloc2;
+ WebRtcSpl_MemSetW16(vect, 0, st);
+ WEBRTC_SPL_MEMCPY_W16(&vect[st], idata, ENH_VECTL - st);
+ } else {
+ const size_t st = searchSegStartPos + tloc2 - ENH_FL0;
+ if ((st + ENH_VECTL) > idatal) {
+ const size_t en = st + ENH_VECTL - idatal;
+ WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL - en);
+ WebRtcSpl_MemSetW16(&vect[ENH_VECTL - en], 0, en);
+ } else {
+ WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL);
+ }
+ }
+
+ /* compute the segment (this is actually a convolution) */
+ filtStatePtr = filt + 6;
+ polyPtr = (int16_t*)WebRtcIlbcfix_kEnhPolyPhaser[tloc2 * ENH_UPS0 - tloc];
+ for (i = 0; i < 7; i++) {
+ *filtStatePtr-- = *polyPtr++;
+ }
+
+ WebRtcSpl_FilterMAFastQ12(&vect[6], vect, filt, ENH_FLO_MULT2_PLUS1,
+ ENH_BLOCKL);
+
+ /* Add the contribution from this vector (scaled with gain) to the total
+ surround vector */
+ WebRtcSpl_AddAffineVectorToVector(surround, vect, gain, 32768, 16,
+ ENH_BLOCKL);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.h
new file mode 100644
index 0000000000..564c9d96e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Refiner.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_REFINER_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_REFINER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * find segment starting near idata+estSegPos that has highest
+ * correlation with idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1 segment is found at a
+ * resolution of ENH_UPSO times the original of the original
+ * sampling rate
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Refiner(
+ size_t* updStartPos, /* (o) updated start point (Q-2) */
+ int16_t* idata, /* (i) original data buffer */
+ size_t idatal, /* (i) dimension of idata */
+ size_t centerStartPos, /* (i) beginning center segment */
+ size_t estSegPos, /* (i) estimated beginning other segment (Q-2) */
+ int16_t* surround, /* (i/o) The contribution from this sequence
+ summed with earlier contributions */
+ int16_t gain /* (i) Gain to use for this sequence */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
new file mode 100644
index 0000000000..7343530a5e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleInterpolateLsf.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h"
+
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h"
+
+/*----------------------------------------------------------------*
+ * lsf interpolator (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleInterpolateLsf(
+ int16_t *syntdenum, /* (o) the synthesis filter denominator
+ resulting from the quantized
+ interpolated lsf Q12 */
+ int16_t *weightdenum, /* (o) the weighting filter denominator
+ resulting from the unquantized
+ interpolated lsf Q12 */
+ int16_t *lsf, /* (i) the unquantized lsf coefficients Q13 */
+ int16_t *lsfdeq, /* (i) the dequantized lsf coefficients Q13 */
+ int16_t *lsfold, /* (i) the unquantized lsf coefficients of
+ the previous signal frame Q13 */
+ int16_t *lsfdeqold, /* (i) the dequantized lsf coefficients of the
+ previous signal frame Q13 */
+ int16_t length, /* (i) should equate FILTERORDER */
+ IlbcEncoder *iLBCenc_inst
+ /* (i/o) the encoder state structure */
+ ) {
+ size_t i;
+ int pos, lp_length;
+
+ int16_t *lsf2, *lsfdeq2;
+ /* Stack based */
+ int16_t lp[LPC_FILTERORDER + 1];
+
+ lsf2 = lsf + length;
+ lsfdeq2 = lsfdeq + length;
+ lp_length = length + 1;
+
+ if (iLBCenc_inst->mode==30) {
+ /* subframe 1: Interpolation between old and first set of
+ lsf coefficients */
+
+ /* Calculate Analysis/Syntehsis filter from quantized LSF */
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfdeqold, lsfdeq,
+ WebRtcIlbcfix_kLsfWeight30ms[0],
+ length);
+ WEBRTC_SPL_MEMCPY_W16(syntdenum, lp, lp_length);
+
+ /* Calculate Weighting filter from quantized LSF */
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfold, lsf,
+ WebRtcIlbcfix_kLsfWeight30ms[0],
+ length);
+ WebRtcIlbcfix_BwExpand(weightdenum, lp,
+ (int16_t*)WebRtcIlbcfix_kLpcChirpWeightDenum,
+ (int16_t)lp_length);
+
+ /* subframe 2 to 6: Interpolation between first and second
+ set of lsf coefficients */
+
+ pos = lp_length;
+ for (i = 1; i < iLBCenc_inst->nsub; i++) {
+
+ /* Calculate Analysis/Syntehsis filter from quantized LSF */
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfdeq, lsfdeq2,
+ WebRtcIlbcfix_kLsfWeight30ms[i],
+ length);
+ WEBRTC_SPL_MEMCPY_W16(syntdenum + pos, lp, lp_length);
+
+ /* Calculate Weighting filter from quantized LSF */
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsf, lsf2,
+ WebRtcIlbcfix_kLsfWeight30ms[i],
+ length);
+ WebRtcIlbcfix_BwExpand(weightdenum + pos, lp,
+ (int16_t*)WebRtcIlbcfix_kLpcChirpWeightDenum,
+ (int16_t)lp_length);
+
+ pos += lp_length;
+ }
+
+ /* update memory */
+
+ WEBRTC_SPL_MEMCPY_W16(lsfold, lsf2, length);
+ WEBRTC_SPL_MEMCPY_W16(lsfdeqold, lsfdeq2, length);
+
+ } else { /* iLBCenc_inst->mode==20 */
+ pos = 0;
+ for (i = 0; i < iLBCenc_inst->nsub; i++) {
+
+ /* Calculate Analysis/Syntehsis filter from quantized LSF */
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfdeqold, lsfdeq,
+ WebRtcIlbcfix_kLsfWeight20ms[i],
+ length);
+ WEBRTC_SPL_MEMCPY_W16(syntdenum + pos, lp, lp_length);
+
+ /* Calculate Weighting filter from quantized LSF */
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfold, lsf,
+ WebRtcIlbcfix_kLsfWeight20ms[i],
+ length);
+ WebRtcIlbcfix_BwExpand(weightdenum+pos, lp,
+ (int16_t*)WebRtcIlbcfix_kLpcChirpWeightDenum,
+ (int16_t)lp_length);
+
+ pos += lp_length;
+ }
+
+ /* update memory */
+
+ WEBRTC_SPL_MEMCPY_W16(lsfold, lsf, length);
+ WEBRTC_SPL_MEMCPY_W16(lsfdeqold, lsfdeq, length);
+
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h
new file mode 100644
index 0000000000..ee53e4bd08
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleInterpolateLsf.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_INTERPOLATE_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_INTERPOLATE_LSF_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * lsf interpolator (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleInterpolateLsf(
+ int16_t* syntdenum, /* (o) the synthesis filter denominator
+ resulting from the quantized
+ interpolated lsf Q12 */
+ int16_t* weightdenum, /* (o) the weighting filter denominator
+ resulting from the unquantized
+ interpolated lsf Q12 */
+ int16_t* lsf, /* (i) the unquantized lsf coefficients Q13 */
+ int16_t* lsfdeq, /* (i) the dequantized lsf coefficients Q13 */
+ int16_t* lsfold, /* (i) the unquantized lsf coefficients of
+ the previous signal frame Q13 */
+ int16_t* lsfdeqold, /* (i) the dequantized lsf coefficients of the
+ previous signal frame Q13 */
+ int16_t length, /* (i) should equate FILTERORDER */
+ IlbcEncoder* iLBCenc_inst
+ /* (i/o) the encoder state structure */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
new file mode 100644
index 0000000000..fdc4553d95
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLpcAnalysis.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h"
+
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/poly_to_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/window32_w32.h"
+
+/*----------------------------------------------------------------*
+ * lpc analysis (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLpcAnalysis(
+ int16_t *lsf, /* (o) lsf coefficients */
+ int16_t *data, /* (i) new block of speech */
+ IlbcEncoder *iLBCenc_inst
+ /* (i/o) the encoder state structure */
+ ) {
+ int k;
+ int scale;
+ size_t is;
+ int16_t stability;
+ /* Stack based */
+ int16_t A[LPC_FILTERORDER + 1];
+ int32_t R[LPC_FILTERORDER + 1];
+ int16_t windowedData[BLOCKL_MAX];
+ int16_t rc[LPC_FILTERORDER];
+
+ is=LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl;
+ WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lpc_buffer+is,data,iLBCenc_inst->blockl);
+
+ /* No lookahead, last window is asymmetric */
+
+ for (k = 0; k < iLBCenc_inst->lpc_n; k++) {
+
+ is = LPC_LOOKBACK;
+
+ if (k < (iLBCenc_inst->lpc_n - 1)) {
+
+ /* Hanning table WebRtcIlbcfix_kLpcWin[] is in Q15-domain so the output is right-shifted 15 */
+ WebRtcSpl_ElementwiseVectorMult(windowedData, iLBCenc_inst->lpc_buffer, WebRtcIlbcfix_kLpcWin, BLOCKL_MAX, 15);
+ } else {
+
+ /* Hanning table WebRtcIlbcfix_kLpcAsymWin[] is in Q15-domain so the output is right-shifted 15 */
+ WebRtcSpl_ElementwiseVectorMult(windowedData, iLBCenc_inst->lpc_buffer+is, WebRtcIlbcfix_kLpcAsymWin, BLOCKL_MAX, 15);
+ }
+
+ /* Compute autocorrelation */
+ WebRtcSpl_AutoCorrelation(windowedData, BLOCKL_MAX, LPC_FILTERORDER, R, &scale);
+
+ /* Window autocorrelation vector */
+ WebRtcIlbcfix_Window32W32(R, R, WebRtcIlbcfix_kLpcLagWin, LPC_FILTERORDER + 1 );
+
+ /* Calculate the A coefficients from the Autocorrelation using Levinson Durbin algorithm */
+ stability=WebRtcSpl_LevinsonDurbin(R, A, rc, LPC_FILTERORDER);
+
+ /*
+ Set the filter to {1.0, 0.0, 0.0,...} if filter from Levinson Durbin algorithm is unstable
+ This should basically never happen...
+ */
+ if (stability!=1) {
+ A[0]=4096;
+ WebRtcSpl_MemSetW16(&A[1], 0, LPC_FILTERORDER);
+ }
+
+ /* Bandwidth expand the filter coefficients */
+ WebRtcIlbcfix_BwExpand(A, A, (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, LPC_FILTERORDER+1);
+
+ /* Convert from A to LSF representation */
+ WebRtcIlbcfix_Poly2Lsf(lsf + k*LPC_FILTERORDER, A);
+ }
+
+ is=LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl;
+ WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lpc_buffer,
+ iLBCenc_inst->lpc_buffer+LPC_LOOKBACK+BLOCKL_MAX-is, is);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h
new file mode 100644
index 0000000000..b5c839ba2a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLpcAnalysis.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LPC_ANALYSIS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LPC_ANALYSIS_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * lpc analysis (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLpcAnalysis(
+ int16_t* lsf, /* (o) lsf coefficients */
+ int16_t* data, /* (i) new block of speech */
+ IlbcEncoder* iLBCenc_inst
+ /* (i/o) the encoder state structure */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c
new file mode 100644
index 0000000000..e7494ceb59
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfDeQ.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * obtain dequantized lsf coefficients from quantization index
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfDeQ(
+ int16_t *lsfdeq, /* (o) dequantized lsf coefficients */
+ int16_t *index, /* (i) quantization index */
+ int16_t lpc_n /* (i) number of LPCs */
+ ){
+ int i, j, pos, cb_pos;
+
+ /* decode first LSF */
+
+ pos = 0;
+ cb_pos = 0;
+ for (i = 0; i < LSF_NSPLIT; i++) {
+ for (j = 0; j < WebRtcIlbcfix_kLsfDimCb[i]; j++) {
+ lsfdeq[pos + j] = WebRtcIlbcfix_kLsfCb[cb_pos + j + index[i] *
+ WebRtcIlbcfix_kLsfDimCb[i]];
+ }
+ pos += WebRtcIlbcfix_kLsfDimCb[i];
+ cb_pos += WebRtcIlbcfix_kLsfSizeCb[i] * WebRtcIlbcfix_kLsfDimCb[i];
+ }
+
+ if (lpc_n>1) {
+ /* decode last LSF */
+ pos = 0;
+ cb_pos = 0;
+ for (i = 0; i < LSF_NSPLIT; i++) {
+ for (j = 0; j < WebRtcIlbcfix_kLsfDimCb[i]; j++) {
+ lsfdeq[LPC_FILTERORDER + pos + j] = WebRtcIlbcfix_kLsfCb[
+ cb_pos + index[LSF_NSPLIT + i] * WebRtcIlbcfix_kLsfDimCb[i] + j];
+ }
+ pos += WebRtcIlbcfix_kLsfDimCb[i];
+ cb_pos += WebRtcIlbcfix_kLsfSizeCb[i] * WebRtcIlbcfix_kLsfDimCb[i];
+ }
+ }
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h
new file mode 100644
index 0000000000..6d97d3df33
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfDeQ.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_DEQUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_DEQUANT_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * obtain dequantized lsf coefficients from quantization index
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfDeQ(
+ int16_t* lsfdeq, /* (o) dequantized lsf coefficients */
+ int16_t* index, /* (i) quantization index */
+ int16_t lpc_n /* (i) number of LPCs */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c
new file mode 100644
index 0000000000..1291d1442e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfQ.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/simple_lsf_quant.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/split_vq.h"
+
+/*----------------------------------------------------------------*
+ * lsf quantizer (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfQ(
+ int16_t *lsfdeq, /* (o) dequantized lsf coefficients
+ (dimension FILTERORDER) Q13 */
+ int16_t *index, /* (o) quantization index */
+ int16_t *lsf, /* (i) the lsf coefficient vector to be
+ quantized (dimension FILTERORDER) Q13 */
+ int16_t lpc_n /* (i) number of lsf sets to quantize */
+ ){
+
+ /* Quantize first LSF with memoryless split VQ */
+ WebRtcIlbcfix_SplitVq( lsfdeq, index, lsf,
+ (int16_t*)WebRtcIlbcfix_kLsfCb, (int16_t*)WebRtcIlbcfix_kLsfDimCb, (int16_t*)WebRtcIlbcfix_kLsfSizeCb);
+
+ if (lpc_n==2) {
+ /* Quantize second LSF with memoryless split VQ */
+ WebRtcIlbcfix_SplitVq( lsfdeq + LPC_FILTERORDER, index + LSF_NSPLIT,
+ lsf + LPC_FILTERORDER, (int16_t*)WebRtcIlbcfix_kLsfCb,
+ (int16_t*)WebRtcIlbcfix_kLsfDimCb, (int16_t*)WebRtcIlbcfix_kLsfSizeCb);
+ }
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h
new file mode 100644
index 0000000000..66b553213a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfQ.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_QUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_QUANT_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * lsf quantizer (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfQ(
+ int16_t* lsfdeq, /* (o) dequantized lsf coefficients
+ (dimension FILTERORDER) Q13 */
+ int16_t* index, /* (o) quantization index */
+ int16_t* lsf, /* (i) the lsf coefficient vector to be
+ quantized (dimension FILTERORDER) Q13 */
+ int16_t lpc_n /* (i) number of lsf sets to quantize */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.c
new file mode 100644
index 0000000000..631b2f432a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/smooth.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/smooth_out_data.h"
+
+/*----------------------------------------------------------------*
+ * find the smoothed output data
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Smooth(
+ int16_t *odata, /* (o) smoothed output */
+ int16_t *current, /* (i) the un enhanced residual for
+ this block */
+ int16_t *surround /* (i) The approximation from the
+ surrounding sequences */
+ ) {
+ int16_t scale, scale1, scale2;
+ int16_t A, B, C, denomW16;
+ int32_t B_W32, denom, num;
+ int32_t errs;
+ int32_t w00,w10,w11, endiff, crit;
+ int32_t w00prim, w10prim, w11_div_w00;
+ int16_t w11prim;
+ int16_t bitsw00, bitsw10, bitsw11;
+ int32_t w11w00, w10w10, w00w00;
+ uint32_t max1, max2, max12;
+
+ /* compute some inner products (ensure no overflow by first calculating proper scale factor) */
+
+ w00 = w10 = w11 = 0;
+
+ // Calculate a right shift that will let us sum ENH_BLOCKL pairwise products
+ // of values from the two sequences without overflowing an int32_t. (The +1
+ // in max1 and max2 are because WebRtcSpl_MaxAbsValueW16 will return 2**15 -
+ // 1 if the input array contains -2**15.)
+ max1 = WebRtcSpl_MaxAbsValueW16(current, ENH_BLOCKL) + 1;
+ max2 = WebRtcSpl_MaxAbsValueW16(surround, ENH_BLOCKL) + 1;
+ max12 = WEBRTC_SPL_MAX(max1, max2);
+ scale = (64 - 31) -
+ WebRtcSpl_CountLeadingZeros64((max12 * max12) * (uint64_t)ENH_BLOCKL);
+ scale=WEBRTC_SPL_MAX(0, scale);
+
+ w00=WebRtcSpl_DotProductWithScale(current,current,ENH_BLOCKL,scale);
+ w11=WebRtcSpl_DotProductWithScale(surround,surround,ENH_BLOCKL,scale);
+ w10=WebRtcSpl_DotProductWithScale(surround,current,ENH_BLOCKL,scale);
+
+ if (w00<0) w00 = WEBRTC_SPL_WORD32_MAX;
+ if (w11<0) w11 = WEBRTC_SPL_WORD32_MAX;
+
+ /* Rescale w00 and w11 to w00prim and w11prim, so that w00prim/w11prim
+ is in Q16 */
+
+ bitsw00 = WebRtcSpl_GetSizeInBits(w00);
+ bitsw11 = WebRtcSpl_GetSizeInBits(w11);
+ bitsw10 = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(w10));
+ scale1 = 31 - bitsw00;
+ scale2 = 15 - bitsw11;
+
+ if (scale2>(scale1-16)) {
+ scale2 = scale1 - 16;
+ } else {
+ scale1 = scale2 + 16;
+ }
+
+ w00prim = w00 << scale1;
+ w11prim = (int16_t) WEBRTC_SPL_SHIFT_W32(w11, scale2);
+
+ /* Perform C = sqrt(w11/w00) (C is in Q11 since (16+6)/2=11) */
+ if (w11prim>64) {
+ endiff = WebRtcSpl_DivW32W16(w00prim, w11prim) << 6;
+ C = (int16_t)WebRtcSpl_SqrtFloor(endiff); /* C is in Q11 */
+ } else {
+ C = 1;
+ }
+
+ /* first try enhancement without power-constraint */
+
+ errs = WebRtcIlbcfix_Smooth_odata(odata, current, surround, C);
+
+
+
+ /* if constraint violated by first try, add constraint */
+
+ if ( (6-scale+scale1) > 31) {
+ crit=0;
+ } else {
+ /* crit = 0.05 * w00 (Result in Q-6) */
+ crit = WEBRTC_SPL_SHIFT_W32(
+ WEBRTC_SPL_MUL(ENH_A0, w00prim >> 14),
+ -(6-scale+scale1));
+ }
+
+ if (errs > crit) {
+
+ if( w00 < 1) {
+ w00=1;
+ }
+
+ /* Calculate w11*w00, w10*w10 and w00*w00 in the same Q domain */
+
+ scale1 = bitsw00-15;
+ scale2 = bitsw11-15;
+
+ if (scale2>scale1) {
+ scale = scale2;
+ } else {
+ scale = scale1;
+ }
+
+ w11w00 = (int16_t)WEBRTC_SPL_SHIFT_W32(w11, -scale) *
+ (int16_t)WEBRTC_SPL_SHIFT_W32(w00, -scale);
+
+ w10w10 = (int16_t)WEBRTC_SPL_SHIFT_W32(w10, -scale) *
+ (int16_t)WEBRTC_SPL_SHIFT_W32(w10, -scale);
+
+ w00w00 = (int16_t)WEBRTC_SPL_SHIFT_W32(w00, -scale) *
+ (int16_t)WEBRTC_SPL_SHIFT_W32(w00, -scale);
+
+ /* Calculate (w11*w00-w10*w10)/(w00*w00) in Q16 */
+ if (w00w00>65536) {
+ endiff = (w11w00-w10w10);
+ endiff = WEBRTC_SPL_MAX(0, endiff);
+ /* denom is in Q16 */
+ denom = WebRtcSpl_DivW32W16(endiff, (int16_t)(w00w00 >> 16));
+ } else {
+ denom = 65536;
+ }
+
+ if( denom > 7){ /* eliminates numerical problems
+ for if smooth */
+
+ scale=WebRtcSpl_GetSizeInBits(denom)-15;
+
+ if (scale>0) {
+ /* denomW16 is in Q(16+scale) */
+ denomW16 = (int16_t)(denom >> scale);
+
+ /* num in Q(34-scale) */
+ num = ENH_A0_MINUS_A0A0DIV4 >> scale;
+ } else {
+ /* denomW16 is in Q16 */
+ denomW16=(int16_t)denom;
+
+ /* num in Q34 */
+ num=ENH_A0_MINUS_A0A0DIV4;
+ }
+
+ /* A sqrt( (ENH_A0-(ENH_A0^2)/4)*(w00*w00)/(w11*w00 + w10*w10) ) in Q9 */
+ A = (int16_t)WebRtcSpl_SqrtFloor(WebRtcSpl_DivW32W16(num, denomW16));
+
+ /* B_W32 is in Q30 ( B = 1 - ENH_A0/2 - A * w10/w00 ) */
+ scale1 = 31-bitsw10;
+ scale2 = 21-scale1;
+ w10prim = w10 == 0 ? 0 : w10 * (1 << scale1);
+ w00prim = WEBRTC_SPL_SHIFT_W32(w00, -scale2);
+ scale = bitsw00-scale2-15;
+
+ if (scale>0) {
+ w10prim >>= scale;
+ w00prim >>= scale;
+ }
+
+ if ((w00prim>0)&&(w10prim>0)) {
+ w11_div_w00=WebRtcSpl_DivW32W16(w10prim, (int16_t)w00prim);
+
+ if (WebRtcSpl_GetSizeInBits(w11_div_w00)+WebRtcSpl_GetSizeInBits(A)>31) {
+ B_W32 = 0;
+ } else {
+ B_W32 = (int32_t)1073741824 - (int32_t)ENH_A0DIV2 -
+ WEBRTC_SPL_MUL(A, w11_div_w00);
+ }
+ B = (int16_t)(B_W32 >> 16); /* B in Q14. */
+ } else {
+ /* No smoothing */
+ A = 0;
+ B = 16384; /* 1 in Q14 */
+ }
+ }
+ else{ /* essentially no difference between cycles;
+ smoothing not needed */
+
+ A = 0;
+ B = 16384; /* 1 in Q14 */
+ }
+
+ /* create smoothed sequence */
+
+ WebRtcSpl_ScaleAndAddVectors(surround, A, 9,
+ current, B, 14,
+ odata, ENH_BLOCKL);
+ }
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.h
new file mode 100644
index 0000000000..c8752be64f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * find the smoothed output data
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Smooth(int16_t* odata, /* (o) smoothed output */
+ int16_t* current, /* (i) the un enhanced residual for
+ this block */
+ int16_t* surround /* (i) The approximation from the
+ surrounding sequences */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.c
new file mode 100644
index 0000000000..9f952bfb93
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth_odata.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/smooth_out_data.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "rtc_base/sanitizer.h"
+
+// An s32 + s32 -> s32 addition that's allowed to overflow. (It's still
+// undefined behavior, so not a good idea; this just makes UBSan ignore the
+// violation, so that our old code can continue to do what it's always been
+// doing.)
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+ OverflowingAdd_S32_S32_To_S32(int32_t a, int32_t b) {
+ return a + b;
+}
+
+int32_t WebRtcIlbcfix_Smooth_odata(
+ int16_t *odata,
+ int16_t *psseq,
+ int16_t *surround,
+ int16_t C)
+{
+ int i;
+
+ int16_t err;
+ int32_t errs;
+
+ for(i=0;i<80;i++) {
+ odata[i]= (int16_t)((C * surround[i] + 1024) >> 11);
+ }
+
+ errs=0;
+ for(i=0;i<80;i++) {
+ err = (psseq[i] - odata[i]) >> 3;
+ errs = OverflowingAdd_S32_S32_To_S32(errs, err * err); // errs in Q-6
+ }
+
+ return errs;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.h
new file mode 100644
index 0000000000..318e7b04a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth_odata.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_OUT_DATA_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_OUT_DATA_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * help function to WebRtcIlbcfix_Smooth()
+ *---------------------------------------------------------------*/
+
+int32_t WebRtcIlbcfix_Smooth_odata(int16_t* odata,
+ int16_t* psseq,
+ int16_t* surround,
+ int16_t C);
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.c
new file mode 100644
index 0000000000..c3a24750f0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SortSq.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/sort_sq.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * scalar quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SortSq(
+ int16_t *xq, /* (o) the quantized value */
+ int16_t *index, /* (o) the quantization index */
+ int16_t x, /* (i) the value to quantize */
+ const int16_t *cb, /* (i) the quantization codebook */
+ int16_t cb_size /* (i) the size of the quantization codebook */
+ ){
+ int i;
+
+ if (x <= cb[0]) {
+ *index = 0;
+ *xq = cb[0];
+ } else {
+ i = 0;
+ while ((x > cb[i]) && (i < (cb_size-1))) {
+ i++;
+ }
+
+ if (x > (((int32_t)cb[i] + cb[i - 1] + 1) >> 1)) {
+ *index = i;
+ *xq = cb[i];
+ } else {
+ *index = i - 1;
+ *xq = cb[i - 1];
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.h
new file mode 100644
index 0000000000..02028dae93
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SortSq.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SORT_SQ_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SORT_SQ_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * scalar quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SortSq(
+ int16_t* xq, /* (o) the quantized value */
+ int16_t* index, /* (o) the quantization index */
+ int16_t x, /* (i) the value to quantize */
+ const int16_t* cb, /* (i) the quantization codebook */
+ int16_t cb_size /* (i) the size of the quantization codebook */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.c
new file mode 100644
index 0000000000..c1f04d2287
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SplitVq.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/split_vq.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/vq3.h"
+#include "modules/audio_coding/codecs/ilbc/vq4.h"
+
+/*----------------------------------------------------------------*
+ * split vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SplitVq(
+ int16_t *qX, /* (o) the quantized vector in Q13 */
+ int16_t *index, /* (o) a vector of indexes for all vector
+ codebooks in the split */
+ int16_t *X, /* (i) the vector to quantize */
+ int16_t *CB, /* (i) the quantizer codebook in Q13 */
+ int16_t *dim, /* (i) the dimension of X and qX */
+ int16_t *cbsize /* (i) the number of vectors in the codebook */
+ ) {
+
+ int16_t *qXPtr, *indexPtr, *CBPtr, *XPtr;
+
+ /* Quantize X with the 3 vectror quantization tables */
+
+ qXPtr=qX;
+ indexPtr=index;
+ CBPtr=CB;
+ XPtr=X;
+ WebRtcIlbcfix_Vq3(qXPtr, indexPtr, CBPtr, XPtr, cbsize[0]);
+
+ qXPtr+=3;
+ indexPtr+=1;
+ CBPtr+=(dim[0]*cbsize[0]);
+ XPtr+=3;
+ WebRtcIlbcfix_Vq3(qXPtr, indexPtr, CBPtr, XPtr, cbsize[1]);
+
+ qXPtr+=3;
+ indexPtr+=1;
+ CBPtr+=(dim[1]*cbsize[1]);
+ XPtr+=3;
+ WebRtcIlbcfix_Vq4(qXPtr, indexPtr, CBPtr, XPtr, cbsize[2]);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.h
new file mode 100644
index 0000000000..e4b02a2bc2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SplitVq.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SPLIT_VQ_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SPLIT_VQ_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * split vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SplitVq(
+ int16_t* qX, /* (o) the quantized vector in Q13 */
+ int16_t* index, /* (o) a vector of indexes for all vector
+ codebooks in the split */
+ int16_t* X, /* (i) the vector to quantize */
+ int16_t* CB, /* (i) the quantizer codebook in Q13 */
+ int16_t* dim, /* (i) the dimension of X and qX */
+ int16_t* cbsize /* (i) the number of vectors in the codebook */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.c
new file mode 100644
index 0000000000..c58086c03b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateConstruct.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/state_construct.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * decoding of the start state
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateConstruct(
+ size_t idxForMax, /* (i) 6-bit index for the quantization of
+ max amplitude */
+ int16_t *idxVec, /* (i) vector of quantization indexes */
+ int16_t *syntDenum, /* (i) synthesis filter denumerator */
+ int16_t *Out_fix, /* (o) the decoded state vector */
+ size_t len /* (i) length of a state vector */
+ ) {
+ size_t k;
+ int16_t maxVal;
+ int16_t *tmp1, *tmp2, *tmp3;
+ /* Stack based */
+ int16_t numerator[1+LPC_FILTERORDER];
+ int16_t sampleValVec[2*STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+ int16_t sampleMaVec[2*STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+ int16_t *sampleVal = &sampleValVec[LPC_FILTERORDER];
+ int16_t *sampleMa = &sampleMaVec[LPC_FILTERORDER];
+ int16_t *sampleAr = &sampleValVec[LPC_FILTERORDER];
+
+ /* initialization of coefficients */
+
+ for (k=0; k<LPC_FILTERORDER+1; k++){
+ numerator[k] = syntDenum[LPC_FILTERORDER-k];
+ }
+
+ /* decoding of the maximum value */
+
+ maxVal = WebRtcIlbcfix_kFrgQuantMod[idxForMax];
+
+ /* decoding of the sample values */
+ tmp1 = sampleVal;
+ tmp2 = &idxVec[len-1];
+
+ if (idxForMax<37) {
+ for(k=0; k<len; k++){
+ /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 2097152 (= 0.5 << 22)
+ maxVal is in Q8 and result is in Q(-1) */
+ *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 2097152) >>
+ 22);
+ tmp1++;
+ tmp2--;
+ }
+ } else if (idxForMax<59) {
+ for(k=0; k<len; k++){
+ /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 262144 (= 0.5 << 19)
+ maxVal is in Q5 and result is in Q(-1) */
+ *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 262144) >>
+ 19);
+ tmp1++;
+ tmp2--;
+ }
+ } else {
+ for(k=0; k<len; k++){
+ /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 65536 (= 0.5 << 17)
+ maxVal is in Q3 and result is in Q(-1) */
+ *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 65536) >>
+ 17);
+ tmp1++;
+ tmp2--;
+ }
+ }
+
+ /* Set the rest of the data to zero */
+ WebRtcSpl_MemSetW16(&sampleVal[len], 0, len);
+
+ /* circular convolution with all-pass filter */
+
+ /* Set the state to zero */
+ WebRtcSpl_MemSetW16(sampleValVec, 0, (LPC_FILTERORDER));
+
+ /* Run MA filter + AR filter */
+ WebRtcSpl_FilterMAFastQ12(
+ sampleVal, sampleMa,
+ numerator, LPC_FILTERORDER+1, len + LPC_FILTERORDER);
+ WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
+ WebRtcSpl_FilterARFastQ12(
+ sampleMa, sampleAr,
+ syntDenum, LPC_FILTERORDER+1, 2 * len);
+
+ tmp1 = &sampleAr[len-1];
+ tmp2 = &sampleAr[2*len-1];
+ tmp3 = Out_fix;
+ for(k=0;k<len;k++){
+ (*tmp3) = (*tmp1) + (*tmp2);
+ tmp1--;
+ tmp2--;
+ tmp3++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.h
new file mode 100644
index 0000000000..4c3011937d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateConstruct.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_CONSTRUCT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_CONSTRUCT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Generate the start state from the quantized indexes
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateConstruct(
+ size_t idxForMax, /* (i) 6-bit index for the quantization of
+ max amplitude */
+ int16_t* idxVec, /* (i) vector of quantization indexes */
+ int16_t* syntDenum, /* (i) synthesis filter denumerator */
+ int16_t* Out_fix, /* (o) the decoded state vector */
+ size_t len /* (i) length of a state vector */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.c
new file mode 100644
index 0000000000..7227ac9d45
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateSearch.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/state_search.h"
+
+#include "modules/audio_coding/codecs/ilbc/abs_quant.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * encoding of start state
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateSearch(
+ IlbcEncoder *iLBCenc_inst,
+ /* (i) Encoder instance */
+ iLBC_bits *iLBC_encbits,/* (i/o) Encoded bits (output idxForMax
+ and idxVec, input state_first) */
+ int16_t *residual, /* (i) target residual vector */
+ int16_t *syntDenum, /* (i) lpc synthesis filter */
+ int16_t *weightDenum /* (i) weighting filter denuminator */
+ ) {
+ size_t k, index;
+ int16_t maxVal;
+ int16_t scale, shift;
+ int32_t maxValsq;
+ int16_t scaleRes;
+ int16_t max;
+ int i;
+ /* Stack based */
+ int16_t numerator[1+LPC_FILTERORDER];
+ int16_t residualLongVec[2*STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+ int16_t sampleMa[2*STATE_SHORT_LEN_30MS];
+ int16_t *residualLong = &residualLongVec[LPC_FILTERORDER];
+ int16_t *sampleAr = residualLong;
+
+ /* Scale to maximum 12 bits to avoid saturation in circular convolution filter */
+ max = WebRtcSpl_MaxAbsValueW16(residual, iLBCenc_inst->state_short_len);
+ scaleRes = WebRtcSpl_GetSizeInBits(max)-12;
+ scaleRes = WEBRTC_SPL_MAX(0, scaleRes);
+ /* Set up the filter coefficients for the circular convolution */
+ for (i=0; i<LPC_FILTERORDER+1; i++) {
+ numerator[i] = (syntDenum[LPC_FILTERORDER-i]>>scaleRes);
+ }
+
+ /* Copy the residual to a temporary buffer that we can filter
+ * and set the remaining samples to zero.
+ */
+ WEBRTC_SPL_MEMCPY_W16(residualLong, residual, iLBCenc_inst->state_short_len);
+ WebRtcSpl_MemSetW16(residualLong + iLBCenc_inst->state_short_len, 0, iLBCenc_inst->state_short_len);
+
+ /* Run the Zero-Pole filter (Ciurcular convolution) */
+ WebRtcSpl_MemSetW16(residualLongVec, 0, LPC_FILTERORDER);
+ WebRtcSpl_FilterMAFastQ12(residualLong, sampleMa, numerator,
+ LPC_FILTERORDER + 1,
+ iLBCenc_inst->state_short_len + LPC_FILTERORDER);
+ WebRtcSpl_MemSetW16(&sampleMa[iLBCenc_inst->state_short_len + LPC_FILTERORDER], 0, iLBCenc_inst->state_short_len - LPC_FILTERORDER);
+
+ WebRtcSpl_FilterARFastQ12(
+ sampleMa, sampleAr,
+ syntDenum, LPC_FILTERORDER+1, 2 * iLBCenc_inst->state_short_len);
+
+ for(k=0;k<iLBCenc_inst->state_short_len;k++){
+ sampleAr[k] += sampleAr[k+iLBCenc_inst->state_short_len];
+ }
+
+ /* Find maximum absolute value in the vector */
+ maxVal=WebRtcSpl_MaxAbsValueW16(sampleAr, iLBCenc_inst->state_short_len);
+
+ /* Find the best index */
+
+ if ((((int32_t)maxVal)<<scaleRes)<23170) {
+ maxValsq=((int32_t)maxVal*maxVal)<<(2+2*scaleRes);
+ } else {
+ maxValsq=(int32_t)WEBRTC_SPL_WORD32_MAX;
+ }
+
+ index=0;
+ for (i=0;i<63;i++) {
+
+ if (maxValsq>=WebRtcIlbcfix_kChooseFrgQuant[i]) {
+ index=i+1;
+ } else {
+ i=63;
+ }
+ }
+ iLBC_encbits->idxForMax=index;
+
+ /* Rescale the vector before quantization */
+ scale=WebRtcIlbcfix_kScale[index];
+
+ if (index<27) { /* scale table is in Q16, fout[] is in Q(-1) and we want the result to be in Q11 */
+ shift=4;
+ } else { /* scale table is in Q21, fout[] is in Q(-1) and we want the result to be in Q11 */
+ shift=9;
+ }
+
+ /* Set up vectors for AbsQuant and rescale it with the scale factor */
+ WebRtcSpl_ScaleVectorWithSat(sampleAr, sampleAr, scale,
+ iLBCenc_inst->state_short_len, (int16_t)(shift-scaleRes));
+
+ /* Quantize the values in fout[] */
+ WebRtcIlbcfix_AbsQuant(iLBCenc_inst, iLBC_encbits, sampleAr, weightDenum);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.h
new file mode 100644
index 0000000000..6469138a0e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateSearch.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_SEARCH_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_SEARCH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * encoding of start state
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateSearch(
+ IlbcEncoder* iLBCenc_inst,
+ /* (i) Encoder instance */
+ iLBC_bits* iLBC_encbits, /* (i/o) Encoded bits (output idxForMax
+ and idxVec, input state_first) */
+ int16_t* residual, /* (i) target residual vector */
+ int16_t* syntDenum, /* (i) lpc synthesis filter */
+ int16_t* weightDenum /* (i) weighting filter denuminator */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
new file mode 100644
index 0000000000..bbafc1a2ed
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SwapBytes.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/swap_bytes.h"
+
+/*----------------------------------------------------------------*
+ * Swap bytes (to simplify operations on Little Endian machines)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SwapBytes(
+ const uint16_t* input, /* (i) the sequence to swap */
+ size_t wordLength, /* (i) number or uint16_t to swap */
+ uint16_t* output /* (o) the swapped sequence */
+ ) {
+ size_t k;
+ for (k = wordLength; k > 0; k--) {
+ *output++ = (*input >> 8)|(*input << 8);
+ input++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
new file mode 100644
index 0000000000..c59bf3068a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SwapBytes.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SWAP_BYTES_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SWAP_BYTES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Swap bytes (to simplify operations on Little Endian machines)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SwapBytes(
+ const uint16_t* input, /* (i) the sequence to swap */
+ size_t wordLength, /* (i) number or uint16_t to swap */
+ uint16_t* output /* (o) the swapped sequence */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/empty.cc b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/empty.cc
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/empty.cc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
new file mode 100644
index 0000000000..e0ca075eda
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ iLBC_test.c
+
+******************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+
+/*---------------------------------------------------------------*
+ * Main program to test iLBC encoding and decoding
+ *
+ * Usage:
+ * exefile_name.exe <infile> <bytefile> <outfile> <channel>
+ *
+ * <infile> : Input file, speech for encoder (16-bit pcm file)
+ * <bytefile> : Bit stream output from the encoder
+ * <outfile> : Output file, decoded speech (16-bit pcm file)
+ * <channel> : Bit error file, optional (16-bit)
+ * 1 - Packet received correctly
+ * 0 - Packet Lost
+ *
+ *--------------------------------------------------------------*/
+
+#define BLOCKL_MAX 240
+#define ILBCNOOFWORDS_MAX 25
+
+
+int main(int argc, char* argv[])
+{
+
+ FILE *ifileid,*efileid,*ofileid, *cfileid;
+ int16_t data[BLOCKL_MAX];
+ uint8_t encoded_data[2 * ILBCNOOFWORDS_MAX];
+ int16_t decoded_data[BLOCKL_MAX];
+ int len_int, mode;
+ short pli;
+ int blockcount = 0;
+ size_t frameLen, len, len_i16s;
+ int16_t speechType;
+ IlbcEncoderInstance *Enc_Inst;
+ IlbcDecoderInstance *Dec_Inst;
+
+#ifdef __ILBC_WITH_40BITACC
+ /* Doublecheck that long long exists */
+ if (sizeof(long)>=sizeof(long long)) {
+ fprintf(stderr, "40-bit simulation is not be supported on this platform\n");
+ exit(0);
+ }
+#endif
+
+ /* get arguments and open files */
+
+ if ((argc!=5) && (argc!=6)) {
+ fprintf(stderr,
+ "\n*-----------------------------------------------*\n");
+ fprintf(stderr,
+ " %s <20,30> input encoded decoded (channel)\n\n",
+ argv[0]);
+ fprintf(stderr,
+ " mode : Frame size for the encoding/decoding\n");
+ fprintf(stderr,
+ " 20 - 20 ms\n");
+ fprintf(stderr,
+ " 30 - 30 ms\n");
+ fprintf(stderr,
+ " input : Speech for encoder (16-bit pcm file)\n");
+ fprintf(stderr,
+ " encoded : Encoded bit stream\n");
+ fprintf(stderr,
+ " decoded : Decoded speech (16-bit pcm file)\n");
+ fprintf(stderr,
+ " channel : Packet loss pattern, optional (16-bit)\n");
+ fprintf(stderr,
+ " 1 - Packet received correctly\n");
+ fprintf(stderr,
+ " 0 - Packet Lost\n");
+ fprintf(stderr,
+ "*-----------------------------------------------*\n\n");
+ exit(1);
+ }
+ mode=atoi(argv[1]);
+ if (mode != 20 && mode != 30) {
+ fprintf(stderr,"Wrong mode %s, must be 20, or 30\n",
+ argv[1]);
+ exit(2);
+ }
+ if ( (ifileid=fopen(argv[2],"rb")) == NULL) {
+ fprintf(stderr,"Cannot open input file %s\n", argv[2]);
+ exit(2);}
+ if ( (efileid=fopen(argv[3],"wb")) == NULL) {
+ fprintf(stderr, "Cannot open encoded file file %s\n",
+ argv[3]); exit(1);}
+ if ( (ofileid=fopen(argv[4],"wb")) == NULL) {
+ fprintf(stderr, "Cannot open decoded file %s\n",
+ argv[4]); exit(1);}
+ if (argc==6) {
+ if( (cfileid=fopen(argv[5],"rb")) == NULL) {
+ fprintf(stderr, "Cannot open channel file %s\n",
+ argv[5]);
+ exit(1);
+ }
+ } else {
+ cfileid=NULL;
+ }
+
+ /* print info */
+
+ fprintf(stderr, "\n");
+ fprintf(stderr,
+ "*---------------------------------------------------*\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "* iLBC test program *\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "*---------------------------------------------------*\n");
+ fprintf(stderr,"\nMode : %2d ms\n", mode);
+ fprintf(stderr,"Input file : %s\n", argv[2]);
+ fprintf(stderr,"Encoded file : %s\n", argv[3]);
+ fprintf(stderr,"Output file : %s\n", argv[4]);
+ if (argc==6) {
+ fprintf(stderr,"Channel file : %s\n", argv[5]);
+ }
+ fprintf(stderr,"\n");
+
+ /* Create structs */
+ WebRtcIlbcfix_EncoderCreate(&Enc_Inst);
+ WebRtcIlbcfix_DecoderCreate(&Dec_Inst);
+
+
+ /* Initialization */
+
+ WebRtcIlbcfix_EncoderInit(Enc_Inst, mode);
+ WebRtcIlbcfix_DecoderInit(Dec_Inst, mode);
+ frameLen = (size_t)(mode*8);
+
+ /* loop over input blocks */
+
+ while (fread(data,sizeof(int16_t),frameLen,ifileid) == frameLen) {
+
+ blockcount++;
+
+ /* encoding */
+
+ fprintf(stderr, "--- Encoding block %i --- ",blockcount);
+ len_int = WebRtcIlbcfix_Encode(Enc_Inst, data, frameLen, encoded_data);
+ if (len_int < 0) {
+ fprintf(stderr, "Error encoding\n");
+ exit(0);
+ }
+ len = (size_t)len_int;
+ fprintf(stderr, "\r");
+
+ /* write byte file */
+
+ len_i16s = (len + 1) / sizeof(int16_t);
+ if (fwrite(encoded_data, sizeof(int16_t), len_i16s, efileid) != len_i16s) {
+ return -1;
+ }
+
+ /* get channel data if provided */
+ if (argc==6) {
+ if (fread(&pli, sizeof(int16_t), 1, cfileid)) {
+ if ((pli!=0)&&(pli!=1)) {
+ fprintf(stderr, "Error in channel file\n");
+ exit(0);
+ }
+ if (pli==0) {
+ /* Packet loss -> remove info from frame */
+ memset(encoded_data, 0,
+ sizeof(int16_t)*ILBCNOOFWORDS_MAX);
+ }
+ } else {
+ fprintf(stderr, "Error. Channel file too short\n");
+ exit(0);
+ }
+ } else {
+ pli=1;
+ }
+
+ /* decoding */
+
+ fprintf(stderr, "--- Decoding block %i --- ",blockcount);
+ if (pli==1) {
+ len_int=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
+ len, decoded_data,&speechType);
+ if (len_int < 0) {
+ fprintf(stderr, "Error decoding\n");
+ exit(0);
+ }
+ len = (size_t)len_int;
+ } else {
+ len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1);
+ }
+ fprintf(stderr, "\r");
+
+ /* write output file */
+
+ if (fwrite(decoded_data, sizeof(int16_t), len, ofileid) != len) {
+ return -1;
+ }
+ }
+
+ /* close files */
+
+ fclose(ifileid); fclose(efileid); fclose(ofileid);
+ if (argc==6) {
+ fclose(cfileid);
+ }
+
+ /* Free structs */
+ WebRtcIlbcfix_EncoderFree(Enc_Inst);
+ WebRtcIlbcfix_DecoderFree(Dec_Inst);
+
+
+ printf("\nDone with simulation\n\n");
+
+ return(0);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
new file mode 100644
index 0000000000..132f3bdb37
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+iLBC Speech Coder ANSI-C Source Code
+
+iLBC_test.c
+
+******************************************************************/
+
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+
+//#define JUNK_DATA
+#ifdef JUNK_DATA
+#define SEED_FILE "randseed.txt"
+#endif
+
+
+/*----------------------------------------------------------------*
+* Main program to test iLBC encoding and decoding
+*
+* Usage:
+* exefile_name.exe <infile> <bytefile> <outfile>
+*
+*---------------------------------------------------------------*/
+
+int main(int argc, char* argv[])
+{
+ FILE *ifileid,*efileid,*ofileid, *chfileid;
+ short encoded_data[55], data[240], speechType;
+ int len_int, mode;
+ short pli;
+ size_t len, readlen;
+ int blockcount = 0;
+
+ IlbcEncoderInstance *Enc_Inst;
+ IlbcDecoderInstance *Dec_Inst;
+#ifdef JUNK_DATA
+ size_t i;
+ FILE *seedfile;
+ unsigned int random_seed = (unsigned int) time(NULL);//1196764538
+#endif
+
+ /* Create structs */
+ WebRtcIlbcfix_EncoderCreate(&Enc_Inst);
+ WebRtcIlbcfix_DecoderCreate(&Dec_Inst);
+
+ /* get arguments and open files */
+
+ if (argc != 6 ) {
+ fprintf(stderr, "%s mode inputfile bytefile outputfile channelfile\n",
+ argv[0]);
+ fprintf(stderr, "Example:\n");
+ fprintf(stderr, "%s <30,20> in.pcm byte.dat out.pcm T30.0.dat\n", argv[0]);
+ exit(1);
+ }
+ mode=atoi(argv[1]);
+ if (mode != 20 && mode != 30) {
+ fprintf(stderr,"Wrong mode %s, must be 20, or 30\n", argv[1]);
+ exit(2);
+ }
+ if ( (ifileid=fopen(argv[2],"rb")) == NULL) {
+ fprintf(stderr,"Cannot open input file %s\n", argv[2]);
+ exit(2);}
+ if ( (efileid=fopen(argv[3],"wb")) == NULL) {
+ fprintf(stderr, "Cannot open channelfile file %s\n",
+ argv[3]); exit(3);}
+ if( (ofileid=fopen(argv[4],"wb")) == NULL) {
+ fprintf(stderr, "Cannot open output file %s\n",
+ argv[4]); exit(3);}
+ if ( (chfileid=fopen(argv[5],"rb")) == NULL) {
+ fprintf(stderr,"Cannot open channel file file %s\n", argv[5]);
+ exit(2);
+ }
+ /* print info */
+ fprintf(stderr, "\n");
+ fprintf(stderr,
+ "*---------------------------------------------------*\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "* iLBCtest *\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "*---------------------------------------------------*\n");
+#ifdef SPLIT_10MS
+ fprintf(stderr,"\n10ms split with raw mode: %2d ms\n", mode);
+#else
+ fprintf(stderr,"\nMode : %2d ms\n", mode);
+#endif
+ fprintf(stderr,"\nInput file : %s\n", argv[2]);
+ fprintf(stderr,"Coded file : %s\n", argv[3]);
+ fprintf(stderr,"Output file : %s\n\n", argv[4]);
+ fprintf(stderr,"Channel file : %s\n\n", argv[5]);
+
+#ifdef JUNK_DATA
+ srand(random_seed);
+
+ if ( (seedfile = fopen(SEED_FILE, "a+t") ) == NULL ) {
+ fprintf(stderr, "Error: Could not open file %s\n", SEED_FILE);
+ }
+ else {
+ fprintf(seedfile, "%u\n", random_seed);
+ fclose(seedfile);
+ }
+#endif
+
+ /* Initialization */
+ WebRtcIlbcfix_EncoderInit(Enc_Inst, mode);
+ WebRtcIlbcfix_DecoderInit(Dec_Inst, mode);
+
+ /* loop over input blocks */
+#ifdef SPLIT_10MS
+ readlen = 80;
+#else
+ readlen = (size_t)(mode << 3);
+#endif
+ while(fread(data, sizeof(short), readlen, ifileid) == readlen) {
+ blockcount++;
+
+ /* encoding */
+ fprintf(stderr, "--- Encoding block %i --- ",blockcount);
+ len_int=WebRtcIlbcfix_Encode(Enc_Inst, data, readlen, encoded_data);
+ if (len_int < 0) {
+ fprintf(stderr, "Error encoding\n");
+ exit(0);
+ }
+ len = (size_t)len_int;
+ fprintf(stderr, "\r");
+
+#ifdef JUNK_DATA
+ for ( i = 0; i < len; i++) {
+ encoded_data[i] = (short) (encoded_data[i] + (short) rand());
+ }
+#endif
+ /* write byte file */
+ if(len != 0){ //len may be 0 in 10ms split case
+ fwrite(encoded_data,1,len,efileid);
+
+ /* get channel data if provided */
+ if (argc==6) {
+ if (fread(&pli, sizeof(int16_t), 1, chfileid)) {
+ if ((pli!=0)&&(pli!=1)) {
+ fprintf(stderr, "Error in channel file\n");
+ exit(0);
+ }
+ if (pli==0) {
+ /* Packet loss -> remove info from frame */
+ memset(encoded_data, 0, sizeof(int16_t)*25);
+ }
+ } else {
+ fprintf(stderr, "Error. Channel file too short\n");
+ exit(0);
+ }
+ } else {
+ pli=1;
+ }
+
+ /* decoding */
+ fprintf(stderr, "--- Decoding block %i --- ",blockcount);
+ if (pli==1) {
+ len_int = WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, len, data,
+ &speechType);
+ if (len_int < 0) {
+ fprintf(stderr, "Error decoding\n");
+ exit(0);
+ }
+ len = (size_t)len_int;
+ } else {
+ len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1);
+ }
+ fprintf(stderr, "\r");
+
+ /* write output file */
+ fwrite(data,sizeof(short),len,ofileid);
+ }
+ }
+
+#ifdef JUNK_DATA
+ if ( (seedfile = fopen(SEED_FILE, "a+t") ) == NULL ) {
+ fprintf(stderr, "Error: Could not open file %s\n", SEED_FILE);
+ }
+ else {
+ fprintf(seedfile, "ok\n\n");
+ fclose(seedfile);
+ }
+#endif
+
+ /* free structs */
+ WebRtcIlbcfix_EncoderFree(Enc_Inst);
+ WebRtcIlbcfix_DecoderFree(Dec_Inst);
+
+ /* close files */
+ fclose(ifileid);
+ fclose(efileid);
+ fclose(ofileid);
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c
new file mode 100644
index 0000000000..a62a42edf6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ iLBC_test.c
+
+******************************************************************/
+
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/nit_encode.h"
+#include "modules/audio_coding/codecs/ilbc/encode.h"
+#include "modules/audio_coding/codecs/ilbc/init_decode.h"
+#include "modules/audio_coding/codecs/ilbc/decode.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+
+#define ILBCNOOFWORDS_MAX (NO_OF_BYTES_30MS)/2
+
+/* Runtime statistics */
+#include <time.h>
+/* #define CLOCKS_PER_SEC 1000 */
+
+/*----------------------------------------------------------------*
+ * Encoder interface function
+ *---------------------------------------------------------------*/
+
+short encode( /* (o) Number of bytes encoded */
+ IlbcEncoder *iLBCenc_inst, /* (i/o) Encoder instance */
+ int16_t *encoded_data, /* (o) The encoded bytes */
+ int16_t *data /* (i) The signal block to encode */
+ ){
+
+ /* do the actual encoding */
+ WebRtcIlbcfix_Encode((uint16_t *)encoded_data, data, iLBCenc_inst);
+
+ return (iLBCenc_inst->no_of_bytes);
+}
+
+/*----------------------------------------------------------------*
+ * Decoder interface function
+ *---------------------------------------------------------------*/
+
+short decode( /* (o) Number of decoded samples */
+ IlbcDecoder *iLBCdec_inst, /* (i/o) Decoder instance */
+ short *decoded_data, /* (o) Decoded signal block */
+ short *encoded_data, /* (i) Encoded bytes */
+ short mode /* (i) 0=PL, 1=Normal */
+ ){
+
+ /* check if mode is valid */
+
+ if (mode<0 || mode>1) {
+ printf("\nERROR - Wrong mode - 0, 1 allowed\n"); exit(3);}
+
+ /* do actual decoding of block */
+
+ WebRtcIlbcfix_Decode(decoded_data, (uint16_t *)encoded_data,
+ iLBCdec_inst, mode);
+
+ return (iLBCdec_inst->blockl);
+}
+
+/*----------------------------------------------------------------*
+ * Main program to test iLBC encoding and decoding
+ *
+ * Usage:
+ * exefile_name.exe <infile> <bytefile> <outfile> <channelfile>
+ *
+ *---------------------------------------------------------------*/
+
+#define MAXFRAMES 10000
+#define MAXFILELEN (BLOCKL_MAX*MAXFRAMES)
+
+int main(int argc, char* argv[])
+{
+
+ /* Runtime statistics */
+
+ float starttime1, starttime2;
+ float runtime1, runtime2;
+ float outtime;
+
+ FILE *ifileid,*efileid,*ofileid, *chfileid;
+ short *inputdata, *encodeddata, *decodeddata;
+ short *channeldata;
+ int blockcount = 0, noOfBlocks=0, i, noOfLostBlocks=0;
+ short mode;
+ IlbcEncoder Enc_Inst;
+ IlbcDecoder Dec_Inst;
+
+ short frameLen;
+ short count;
+#ifdef SPLIT_10MS
+ short size;
+#endif
+
+ inputdata=(short*) malloc(MAXFILELEN*sizeof(short));
+ if (inputdata==NULL) {
+ fprintf(stderr,"Could not allocate memory for vector\n");
+ exit(0);
+ }
+ encodeddata=(short*) malloc(ILBCNOOFWORDS_MAX*MAXFRAMES*sizeof(short));
+ if (encodeddata==NULL) {
+ fprintf(stderr,"Could not allocate memory for vector\n");
+ free(inputdata);
+ exit(0);
+ }
+ decodeddata=(short*) malloc(MAXFILELEN*sizeof(short));
+ if (decodeddata==NULL) {
+ fprintf(stderr,"Could not allocate memory for vector\n");
+ free(inputdata);
+ free(encodeddata);
+ exit(0);
+ }
+ channeldata=(short*) malloc(MAXFRAMES*sizeof(short));
+ if (channeldata==NULL) {
+ fprintf(stderr,"Could not allocate memory for vector\n");
+ free(inputdata);
+ free(encodeddata);
+ free(decodeddata);
+ exit(0);
+ }
+
+ /* get arguments and open files */
+
+ if (argc != 6 ) {
+ fprintf(stderr, "%s mode inputfile bytefile outputfile channelfile\n",
+ argv[0]);
+ fprintf(stderr, "Example:\n");
+ fprintf(stderr, "%s <30,20> in.pcm byte.dat out.pcm T30.0.dat\n", argv[0]);
+ exit(1);
+ }
+ mode=atoi(argv[1]);
+ if (mode != 20 && mode != 30) {
+ fprintf(stderr,"Wrong mode %s, must be 20, or 30\n", argv[1]);
+ exit(2);
+ }
+ if ( (ifileid=fopen(argv[2],"rb")) == NULL) {
+ fprintf(stderr,"Cannot open input file %s\n", argv[2]);
+ exit(2);}
+ if ( (efileid=fopen(argv[3],"wb")) == NULL) {
+ fprintf(stderr, "Cannot open channelfile file %s\n",
+ argv[3]); exit(3);}
+ if( (ofileid=fopen(argv[4],"wb")) == NULL) {
+ fprintf(stderr, "Cannot open output file %s\n",
+ argv[4]); exit(3);}
+ if ( (chfileid=fopen(argv[5],"rb")) == NULL) {
+ fprintf(stderr,"Cannot open channel file file %s\n", argv[5]);
+ exit(2);}
+
+
+ /* print info */
+#ifndef PRINT_MIPS
+ fprintf(stderr, "\n");
+ fprintf(stderr,
+ "*---------------------------------------------------*\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "* iLBCtest *\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "* *\n");
+ fprintf(stderr,
+ "*---------------------------------------------------*\n");
+#ifdef SPLIT_10MS
+ fprintf(stderr,"\n10ms split with raw mode: %2d ms\n", mode);
+#else
+ fprintf(stderr,"\nMode : %2d ms\n", mode);
+#endif
+ fprintf(stderr,"\nInput file : %s\n", argv[2]);
+ fprintf(stderr,"Coded file : %s\n", argv[3]);
+ fprintf(stderr,"Output file : %s\n\n", argv[4]);
+ fprintf(stderr,"Channel file : %s\n\n", argv[5]);
+#endif
+
+ /* Initialization */
+
+ WebRtcIlbcfix_EncoderInit(&Enc_Inst, mode);
+ WebRtcIlbcfix_DecoderInit(&Dec_Inst, mode, 1);
+
+ /* extract the input file and channel file */
+
+#ifdef SPLIT_10MS
+ frameLen = (mode==20)? 80:160;
+ fread(Enc_Inst.past_samples, sizeof(short), frameLen, ifileid);
+ Enc_Inst.section = 0;
+
+ while( fread(&inputdata[noOfBlocks*80], sizeof(short),
+ 80, ifileid) == 80 ) {
+ noOfBlocks++;
+ }
+
+ noOfBlocks += frameLen/80;
+ frameLen = 80;
+#else
+ frameLen = Enc_Inst.blockl;
+
+ while( fread(&inputdata[noOfBlocks*Enc_Inst.blockl],sizeof(short),
+ Enc_Inst.blockl,ifileid)==(uint16_t)Enc_Inst.blockl){
+ noOfBlocks++;
+ }
+#endif
+
+
+ while ((fread(&channeldata[blockcount],sizeof(short), 1,chfileid)==1)
+ && ( blockcount < noOfBlocks/(Enc_Inst.blockl/frameLen) )) {
+ blockcount++;
+ }
+
+ if ( blockcount < noOfBlocks/(Enc_Inst.blockl/frameLen) ) {
+ fprintf(stderr,"Channel file %s is too short\n", argv[4]);
+ free(inputdata);
+ free(encodeddata);
+ free(decodeddata);
+ free(channeldata);
+ exit(0);
+ }
+
+ count=0;
+
+ /* Runtime statistics */
+
+ starttime1 = clock()/(float)CLOCKS_PER_SEC;
+
+ /* Encoding loop */
+#ifdef PRINT_MIPS
+ printf("-1 -1\n");
+#endif
+
+#ifdef SPLIT_10MS
+ /* "Enc_Inst.section != 0" is to make sure we run through full
+ lengths of all vectors for 10ms split mode.
+ */
+ // while( (count < noOfBlocks) || (Enc_Inst.section != 0) ) {
+ while( count < blockcount * (Enc_Inst.blockl/frameLen) ) {
+
+ encode(&Enc_Inst, &encodeddata[Enc_Inst.no_of_words *
+ (count/(Enc_Inst.nsub/2))],
+ &inputdata[frameLen * count] );
+#else
+ while (count < noOfBlocks) {
+ encode( &Enc_Inst, &encodeddata[Enc_Inst.no_of_words * count],
+ &inputdata[frameLen * count] );
+#endif
+
+#ifdef PRINT_MIPS
+ printf("-1 -1\n");
+#endif
+
+ count++;
+ }
+
+ count=0;
+
+ /* Runtime statistics */
+
+ starttime2=clock()/(float)CLOCKS_PER_SEC;
+ runtime1 = (float)(starttime2-starttime1);
+
+ /* Decoding loop */
+
+ while (count < blockcount) {
+ if (channeldata[count]==1) {
+ /* Normal decoding */
+ decode(&Dec_Inst, &decodeddata[count * Dec_Inst.blockl],
+ &encodeddata[Dec_Inst.no_of_words * count], 1);
+ } else if (channeldata[count]==0) {
+ /* PLC */
+ short emptydata[ILBCNOOFWORDS_MAX];
+ memset(emptydata, 0, Dec_Inst.no_of_words*sizeof(short));
+ decode(&Dec_Inst, &decodeddata[count*Dec_Inst.blockl],
+ emptydata, 0);
+ noOfLostBlocks++;
+ } else {
+ printf("Error in channel file (values have to be either 1 or 0)\n");
+ exit(0);
+ }
+#ifdef PRINT_MIPS
+ printf("-1 -1\n");
+#endif
+
+ count++;
+ }
+
+ /* Runtime statistics */
+
+ runtime2 = (float)(clock()/(float)CLOCKS_PER_SEC-starttime2);
+
+ outtime = (float)((float)blockcount*
+ (float)mode/1000.0);
+
+#ifndef PRINT_MIPS
+ printf("\nLength of speech file: %.1f s\n", outtime);
+ printf("Lost frames : %.1f%%\n\n", 100*(float)noOfLostBlocks/(float)blockcount);
+
+ printf("Time to run iLBC_encode+iLBC_decode:");
+ printf(" %.1f s (%.1f%% of realtime)\n", runtime1+runtime2,
+ (100*(runtime1+runtime2)/outtime));
+
+ printf("Time in iLBC_encode :");
+ printf(" %.1f s (%.1f%% of total runtime)\n",
+ runtime1, 100.0*runtime1/(runtime1+runtime2));
+
+ printf("Time in iLBC_decode :");
+ printf(" %.1f s (%.1f%% of total runtime)\n\n",
+ runtime2, 100.0*runtime2/(runtime1+runtime2));
+#endif
+
+ /* Write data to files */
+ for (i=0; i<blockcount; i++) {
+ fwrite(&encodeddata[i*Enc_Inst.no_of_words], sizeof(short),
+ Enc_Inst.no_of_words, efileid);
+ }
+ for (i=0;i<blockcount;i++) {
+ fwrite(&decodeddata[i*Enc_Inst.blockl],sizeof(short),Enc_Inst.blockl,ofileid);
+ }
+
+ /* return memory and close files */
+
+ free(inputdata);
+ free(encodeddata);
+ free(decodeddata);
+ free(channeldata);
+ fclose(ifileid); fclose(efileid); fclose(ofileid);
+ return(0);
+ }
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.c
new file mode 100644
index 0000000000..a9a0147b9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_UnpackBits.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/unpack_bits.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_UnpackBits( /* (o) "Empty" frame indicator */
+ const uint16_t *bitstream, /* (i) The packatized bitstream */
+ iLBC_bits *enc_bits, /* (o) Paramerers from bitstream */
+ int16_t mode /* (i) Codec mode (20 or 30) */
+ ) {
+ const uint16_t *bitstreamPtr;
+ int i, k;
+ int16_t *tmpPtr;
+
+ bitstreamPtr=bitstream;
+
+ /* First int16_t */
+ enc_bits->lsf[0] = (*bitstreamPtr)>>10; /* Bit 0..5 */
+ enc_bits->lsf[1] = ((*bitstreamPtr)>>3)&0x7F; /* Bit 6..12 */
+ enc_bits->lsf[2] = ((*bitstreamPtr)&0x7)<<4; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* Second int16_t */
+ enc_bits->lsf[2] |= ((*bitstreamPtr)>>12)&0xF; /* Bit 0..3 */
+
+ if (mode==20) {
+ enc_bits->startIdx = ((*bitstreamPtr)>>10)&0x3; /* Bit 4..5 */
+ enc_bits->state_first = ((*bitstreamPtr)>>9)&0x1; /* Bit 6 */
+ enc_bits->idxForMax = ((*bitstreamPtr)>>3)&0x3F; /* Bit 7..12 */
+ enc_bits->cb_index[0] = ((*bitstreamPtr)&0x7)<<4; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* Third int16_t */
+ enc_bits->cb_index[0] |= ((*bitstreamPtr)>>12)&0xE; /* Bit 0..2 */
+ enc_bits->gain_index[0] = ((*bitstreamPtr)>>8)&0x18; /* Bit 3..4 */
+ enc_bits->gain_index[1] = ((*bitstreamPtr)>>7)&0x8; /* Bit 5 */
+ enc_bits->cb_index[3] = ((*bitstreamPtr)>>2)&0xFE; /* Bit 6..12 */
+ enc_bits->gain_index[3] = ((*bitstreamPtr)<<2)&0x10; /* Bit 13 */
+ enc_bits->gain_index[4] = ((*bitstreamPtr)<<2)&0x8; /* Bit 14 */
+ enc_bits->gain_index[6] = ((*bitstreamPtr)<<4)&0x10; /* Bit 15 */
+ } else { /* mode==30 */
+ enc_bits->lsf[3] = ((*bitstreamPtr)>>6)&0x3F; /* Bit 4..9 */
+ enc_bits->lsf[4] = ((*bitstreamPtr)<<1)&0x7E; /* Bit 10..15 */
+ bitstreamPtr++;
+ /* Third int16_t */
+ enc_bits->lsf[4] |= ((*bitstreamPtr)>>15)&0x1; /* Bit 0 */
+ enc_bits->lsf[5] = ((*bitstreamPtr)>>8)&0x7F; /* Bit 1..7 */
+ enc_bits->startIdx = ((*bitstreamPtr)>>5)&0x7; /* Bit 8..10 */
+ enc_bits->state_first = ((*bitstreamPtr)>>4)&0x1; /* Bit 11 */
+ enc_bits->idxForMax = ((*bitstreamPtr)<<2)&0x3C; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 4:th int16_t */
+ enc_bits->idxForMax |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1 */
+ enc_bits->cb_index[0] = ((*bitstreamPtr)>>7)&0x78; /* Bit 2..5 */
+ enc_bits->gain_index[0] = ((*bitstreamPtr)>>5)&0x10; /* Bit 6 */
+ enc_bits->gain_index[1] = ((*bitstreamPtr)>>5)&0x8; /* Bit 7 */
+ enc_bits->cb_index[3] = ((*bitstreamPtr))&0xFC; /* Bit 8..13 */
+ enc_bits->gain_index[3] = ((*bitstreamPtr)<<3)&0x10; /* Bit 14 */
+ enc_bits->gain_index[4] = ((*bitstreamPtr)<<3)&0x8; /* Bit 15 */
+ }
+ /* Class 2 bits of ULP */
+ /* 4:th to 6:th int16_t for 20 ms case
+ 5:th to 7:th int16_t for 30 ms case */
+ bitstreamPtr++;
+ tmpPtr=enc_bits->idxVec;
+ for (k=0; k<3; k++) {
+ for (i=15; i>=0; i--) {
+ (*tmpPtr) = (((*bitstreamPtr)>>i)<<2)&0x4;
+ /* Bit 15-i */
+ tmpPtr++;
+ }
+ bitstreamPtr++;
+ }
+
+ if (mode==20) {
+ /* 7:th int16_t */
+ for (i=15; i>6; i--) {
+ (*tmpPtr) = (((*bitstreamPtr)>>i)<<2)&0x4;
+ /* Bit 15-i */
+ tmpPtr++;
+ }
+ enc_bits->gain_index[1] |= ((*bitstreamPtr)>>4)&0x4; /* Bit 9 */
+ enc_bits->gain_index[3] |= ((*bitstreamPtr)>>2)&0xC; /* Bit 10..11 */
+ enc_bits->gain_index[4] |= ((*bitstreamPtr)>>1)&0x4; /* Bit 12 */
+ enc_bits->gain_index[6] |= ((*bitstreamPtr)<<1)&0x8; /* Bit 13 */
+ enc_bits->gain_index[7] = ((*bitstreamPtr)<<2)&0xC; /* Bit 14..15 */
+
+ } else { /* mode==30 */
+ /* 8:th int16_t */
+ for (i=15; i>5; i--) {
+ (*tmpPtr) = (((*bitstreamPtr)>>i)<<2)&0x4;
+ /* Bit 15-i */
+ tmpPtr++;
+ }
+ enc_bits->cb_index[0] |= ((*bitstreamPtr)>>3)&0x6; /* Bit 10..11 */
+ enc_bits->gain_index[0] |= ((*bitstreamPtr))&0x8; /* Bit 12 */
+ enc_bits->gain_index[1] |= ((*bitstreamPtr))&0x4; /* Bit 13 */
+ enc_bits->cb_index[3] |= ((*bitstreamPtr))&0x2; /* Bit 14 */
+ enc_bits->cb_index[6] = ((*bitstreamPtr)<<7)&0x80; /* Bit 15 */
+ bitstreamPtr++;
+ /* 9:th int16_t */
+ enc_bits->cb_index[6] |= ((*bitstreamPtr)>>9)&0x7E; /* Bit 0..5 */
+ enc_bits->cb_index[9] = ((*bitstreamPtr)>>2)&0xFE; /* Bit 6..12 */
+ enc_bits->cb_index[12] = ((*bitstreamPtr)<<5)&0xE0; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* 10:th int16_t */
+ enc_bits->cb_index[12] |= ((*bitstreamPtr)>>11)&0x1E;/* Bit 0..3 */
+ enc_bits->gain_index[3] |= ((*bitstreamPtr)>>8)&0xC; /* Bit 4..5 */
+ enc_bits->gain_index[4] |= ((*bitstreamPtr)>>7)&0x6; /* Bit 6..7 */
+ enc_bits->gain_index[6] = ((*bitstreamPtr)>>3)&0x18; /* Bit 8..9 */
+ enc_bits->gain_index[7] = ((*bitstreamPtr)>>2)&0xC; /* Bit 10..11 */
+ enc_bits->gain_index[9] = ((*bitstreamPtr)<<1)&0x10; /* Bit 12 */
+ enc_bits->gain_index[10] = ((*bitstreamPtr)<<1)&0x8; /* Bit 13 */
+ enc_bits->gain_index[12] = ((*bitstreamPtr)<<3)&0x10; /* Bit 14 */
+ enc_bits->gain_index[13] = ((*bitstreamPtr)<<3)&0x8; /* Bit 15 */
+ }
+ bitstreamPtr++;
+ /* Class 3 bits of ULP */
+ /* 8:th to 14:th int16_t for 20 ms case
+ 11:th to 17:th int16_t for 30 ms case */
+ tmpPtr=enc_bits->idxVec;
+ for (k=0; k<7; k++) {
+ for (i=14; i>=0; i-=2) {
+ (*tmpPtr) |= ((*bitstreamPtr)>>i)&0x3; /* Bit 15-i..14-i*/
+ tmpPtr++;
+ }
+ bitstreamPtr++;
+ }
+
+ if (mode==20) {
+ /* 15:th int16_t */
+ enc_bits->idxVec[56] |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1 */
+ enc_bits->cb_index[0] |= ((*bitstreamPtr)>>13)&0x1; /* Bit 2 */
+ enc_bits->cb_index[1] = ((*bitstreamPtr)>>6)&0x7F; /* Bit 3..9 */
+ enc_bits->cb_index[2] = ((*bitstreamPtr)<<1)&0x7E; /* Bit 10..15 */
+ bitstreamPtr++;
+ /* 16:th int16_t */
+ enc_bits->cb_index[2] |= ((*bitstreamPtr)>>15)&0x1; /* Bit 0 */
+ enc_bits->gain_index[0] |= ((*bitstreamPtr)>>12)&0x7; /* Bit 1..3 */
+ enc_bits->gain_index[1] |= ((*bitstreamPtr)>>10)&0x3; /* Bit 4..5 */
+ enc_bits->gain_index[2] = ((*bitstreamPtr)>>7)&0x7; /* Bit 6..8 */
+ enc_bits->cb_index[3] |= ((*bitstreamPtr)>>6)&0x1; /* Bit 9 */
+ enc_bits->cb_index[4] = ((*bitstreamPtr)<<1)&0x7E; /* Bit 10..15 */
+ bitstreamPtr++;
+ /* 17:th int16_t */
+ enc_bits->cb_index[4] |= ((*bitstreamPtr)>>15)&0x1; /* Bit 0 */
+ enc_bits->cb_index[5] = ((*bitstreamPtr)>>8)&0x7F; /* Bit 1..7 */
+ enc_bits->cb_index[6] = ((*bitstreamPtr))&0xFF; /* Bit 8..15 */
+ bitstreamPtr++;
+ /* 18:th int16_t */
+ enc_bits->cb_index[7] = (*bitstreamPtr)>>8; /* Bit 0..7 */
+ enc_bits->cb_index[8] = (*bitstreamPtr)&0xFF; /* Bit 8..15 */
+ bitstreamPtr++;
+ /* 19:th int16_t */
+ enc_bits->gain_index[3] |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1 */
+ enc_bits->gain_index[4] |= ((*bitstreamPtr)>>12)&0x3; /* Bit 2..3 */
+ enc_bits->gain_index[5] = ((*bitstreamPtr)>>9)&0x7; /* Bit 4..6 */
+ enc_bits->gain_index[6] |= ((*bitstreamPtr)>>6)&0x7; /* Bit 7..9 */
+ enc_bits->gain_index[7] |= ((*bitstreamPtr)>>4)&0x3; /* Bit 10..11 */
+ enc_bits->gain_index[8] = ((*bitstreamPtr)>>1)&0x7; /* Bit 12..14 */
+ } else { /* mode==30 */
+ /* 18:th int16_t */
+ enc_bits->idxVec[56] |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1 */
+ enc_bits->idxVec[57] |= ((*bitstreamPtr)>>12)&0x3; /* Bit 2..3 */
+ enc_bits->cb_index[0] |= ((*bitstreamPtr)>>11)&1; /* Bit 4 */
+ enc_bits->cb_index[1] = ((*bitstreamPtr)>>4)&0x7F; /* Bit 5..11 */
+ enc_bits->cb_index[2] = ((*bitstreamPtr)<<3)&0x78; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 19:th int16_t */
+ enc_bits->cb_index[2] |= ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2 */
+ enc_bits->gain_index[0] |= ((*bitstreamPtr)>>10)&0x7; /* Bit 3..5 */
+ enc_bits->gain_index[1] |= ((*bitstreamPtr)>>8)&0x3; /* Bit 6..7 */
+ enc_bits->gain_index[2] = ((*bitstreamPtr)>>5)&0x7; /* Bit 8..10 */
+ enc_bits->cb_index[3] |= ((*bitstreamPtr)>>4)&0x1; /* Bit 11 */
+ enc_bits->cb_index[4] = ((*bitstreamPtr)<<3)&0x78; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 20:th int16_t */
+ enc_bits->cb_index[4] |= ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2 */
+ enc_bits->cb_index[5] = ((*bitstreamPtr)>>6)&0x7F; /* Bit 3..9 */
+ enc_bits->cb_index[6] |= ((*bitstreamPtr)>>5)&0x1; /* Bit 10 */
+ enc_bits->cb_index[7] = ((*bitstreamPtr)<<3)&0xF8; /* Bit 11..15 */
+ bitstreamPtr++;
+ /* 21:st int16_t */
+ enc_bits->cb_index[7] |= ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2 */
+ enc_bits->cb_index[8] = ((*bitstreamPtr)>>5)&0xFF; /* Bit 3..10 */
+ enc_bits->cb_index[9] |= ((*bitstreamPtr)>>4)&0x1; /* Bit 11 */
+ enc_bits->cb_index[10] = ((*bitstreamPtr)<<4)&0xF0; /* Bit 12..15 */
+ bitstreamPtr++;
+ /* 22:nd int16_t */
+ enc_bits->cb_index[10] |= ((*bitstreamPtr)>>12)&0xF; /* Bit 0..3 */
+ enc_bits->cb_index[11] = ((*bitstreamPtr)>>4)&0xFF; /* Bit 4..11 */
+ enc_bits->cb_index[12] |= ((*bitstreamPtr)>>3)&0x1; /* Bit 12 */
+ enc_bits->cb_index[13] = ((*bitstreamPtr)<<5)&0xE0; /* Bit 13..15 */
+ bitstreamPtr++;
+ /* 23:rd int16_t */
+ enc_bits->cb_index[13] |= ((*bitstreamPtr)>>11)&0x1F;/* Bit 0..4 */
+ enc_bits->cb_index[14] = ((*bitstreamPtr)>>3)&0xFF; /* Bit 5..12 */
+ enc_bits->gain_index[3] |= ((*bitstreamPtr)>>1)&0x3; /* Bit 13..14 */
+ enc_bits->gain_index[4] |= ((*bitstreamPtr)&0x1); /* Bit 15 */
+ bitstreamPtr++;
+ /* 24:rd int16_t */
+ enc_bits->gain_index[5] = ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2 */
+ enc_bits->gain_index[6] |= ((*bitstreamPtr)>>10)&0x7; /* Bit 3..5 */
+ enc_bits->gain_index[7] |= ((*bitstreamPtr)>>8)&0x3; /* Bit 6..7 */
+ enc_bits->gain_index[8] = ((*bitstreamPtr)>>5)&0x7; /* Bit 8..10 */
+ enc_bits->gain_index[9] |= ((*bitstreamPtr)>>1)&0xF; /* Bit 11..14 */
+ enc_bits->gain_index[10] |= ((*bitstreamPtr)<<2)&0x4; /* Bit 15 */
+ bitstreamPtr++;
+ /* 25:rd int16_t */
+ enc_bits->gain_index[10] |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1 */
+ enc_bits->gain_index[11] = ((*bitstreamPtr)>>11)&0x7; /* Bit 2..4 */
+ enc_bits->gain_index[12] |= ((*bitstreamPtr)>>7)&0xF; /* Bit 5..8 */
+ enc_bits->gain_index[13] |= ((*bitstreamPtr)>>4)&0x7; /* Bit 9..11 */
+ enc_bits->gain_index[14] = ((*bitstreamPtr)>>1)&0x7; /* Bit 12..14 */
+ }
+ /* Last bit should be zero, otherwise it's an "empty" frame */
+ if (((*bitstreamPtr)&0x1) == 1) {
+ return(1);
+ } else {
+ return(0);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.h
new file mode 100644
index 0000000000..1a63280e6b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_UnpackBits.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_UNPACK_BITS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_UNPACK_BITS_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+int16_t
+WebRtcIlbcfix_UnpackBits(/* (o) "Empty" frame indicator */
+ const uint16_t*
+ bitstream, /* (i) The packatized bitstream */
+ iLBC_bits*
+ enc_bits, /* (o) Paramerers from bitstream */
+ int16_t mode /* (i) Codec mode (20 or 30) */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.c
new file mode 100644
index 0000000000..d9375fb995
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq3.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/vq3.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ * vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq3(
+ int16_t *Xq, /* quantized vector (Q13) */
+ int16_t *index,
+ int16_t *CB, /* codebook in Q13 */
+ int16_t *X, /* vector to quantize (Q13) */
+ int16_t n_cb
+ ){
+ int16_t i, j;
+ int16_t pos, minindex=0;
+ int16_t tmp;
+ int32_t dist, mindist;
+
+ pos = 0;
+ mindist = WEBRTC_SPL_WORD32_MAX; /* start value */
+
+ /* Find the codebook with the lowest square distance */
+ for (j = 0; j < n_cb; j++) {
+ tmp = X[0] - CB[pos];
+ dist = tmp * tmp;
+ for (i = 1; i < 3; i++) {
+ tmp = X[i] - CB[pos + i];
+ dist += tmp * tmp;
+ }
+
+ if (dist < mindist) {
+ mindist = dist;
+ minindex = j;
+ }
+ pos += 3;
+ }
+
+ /* Store the quantized codebook and the index */
+ for (i = 0; i < 3; i++) {
+ Xq[i] = CB[minindex*3 + i];
+ }
+ *index = minindex;
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.h
new file mode 100644
index 0000000000..c946478a1a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq3.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ3_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ3_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Vector quantization of order 3 (based on MSE)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq3(
+ int16_t* Xq, /* (o) the quantized vector (Q13) */
+ int16_t* index, /* (o) the quantization index */
+ int16_t* CB, /* (i) the vector quantization codebook (Q13) */
+ int16_t* X, /* (i) the vector to quantize (Q13) */
+ int16_t n_cb /* (i) the number of vectors in the codebook */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.c
new file mode 100644
index 0000000000..c9a65aec2a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq4.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/vq4.h"
+
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ * vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq4(
+ int16_t *Xq, /* quantized vector (Q13) */
+ int16_t *index,
+ int16_t *CB, /* codebook in Q13 */
+ int16_t *X, /* vector to quantize (Q13) */
+ int16_t n_cb
+ ){
+ int16_t i, j;
+ int16_t pos, minindex=0;
+ int16_t tmp;
+ int32_t dist, mindist;
+
+ pos = 0;
+ mindist = WEBRTC_SPL_WORD32_MAX; /* start value */
+
+ /* Find the codebook with the lowest square distance */
+ for (j = 0; j < n_cb; j++) {
+ tmp = X[0] - CB[pos];
+ dist = tmp * tmp;
+ for (i = 1; i < 4; i++) {
+ tmp = X[i] - CB[pos + i];
+ dist += tmp * tmp;
+ }
+
+ if (dist < mindist) {
+ mindist = dist;
+ minindex = j;
+ }
+ pos += 4;
+ }
+
+ /* Store the quantized codebook and the index */
+ for (i = 0; i < 4; i++) {
+ Xq[i] = CB[minindex*4 + i];
+ }
+ *index = minindex;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.h
new file mode 100644
index 0000000000..6d14830c03
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq4.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ4_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ4_H_
+
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * Vector quantization of order 4 (based on MSE)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq4(
+ int16_t* Xq, /* (o) the quantized vector (Q13) */
+ int16_t* index, /* (o) the quantization index */
+ int16_t* CB, /* (i) the vector quantization codebook (Q13) */
+ int16_t* X, /* (i) the vector to quantize (Q13) */
+ int16_t n_cb /* (i) the number of vectors in the codebook */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
new file mode 100644
index 0000000000..e82d167220
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Window32W32.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/window32_w32.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * window multiplication
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Window32W32(
+ int32_t *z, /* Output */
+ int32_t *x, /* Input (same domain as Output)*/
+ const int32_t *y, /* Q31 Window */
+ size_t N /* length to process */
+ ) {
+ size_t i;
+ int16_t x_low, x_hi, y_low, y_hi;
+ int16_t left_shifts;
+ int32_t temp;
+
+ left_shifts = (int16_t)WebRtcSpl_NormW32(x[0]);
+ WebRtcSpl_VectorBitShiftW32(x, N, x, (int16_t)(-left_shifts));
+
+
+ /* The double precision numbers use a special representation:
+ * w32 = hi<<16 + lo<<1
+ */
+ for (i = 0; i < N; i++) {
+ /* Extract higher bytes */
+ x_hi = (int16_t)(x[i] >> 16);
+ y_hi = (int16_t)(y[i] >> 16);
+
+ /* Extract lower bytes, defined as (w32 - hi<<16)>>1 */
+ x_low = (int16_t)((x[i] - (x_hi << 16)) >> 1);
+
+ y_low = (int16_t)((y[i] - (y_hi << 16)) >> 1);
+
+ /* Calculate z by a 32 bit multiplication using both low and high from x and y */
+ temp = ((x_hi * y_hi) << 1) + ((x_hi * y_low) >> 14);
+
+ z[i] = temp + ((x_low * y_hi) >> 14);
+ }
+
+ WebRtcSpl_VectorBitShiftW32(z, N, z, left_shifts);
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
new file mode 100644
index 0000000000..15d72c5ba2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Window32W32.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_WINDOW32_W32_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_WINDOW32_W32_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * window multiplication
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Window32W32(int32_t* z, /* Output */
+ int32_t* x, /* Input (same domain as Output)*/
+ const int32_t* y, /* Q31 Window */
+ size_t N /* length to process */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
new file mode 100644
index 0000000000..9dc880b37e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_XcorrCoef.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * cross correlation which finds the optimal lag for the
+ * crossCorr*crossCorr/(energy) criteria
+ *---------------------------------------------------------------*/
+
+size_t WebRtcIlbcfix_XcorrCoef(
+ int16_t *target, /* (i) first array */
+ int16_t *regressor, /* (i) second array */
+ size_t subl, /* (i) dimension arrays */
+ size_t searchLen, /* (i) the search lenght */
+ size_t offset, /* (i) samples offset between arrays */
+ int16_t step /* (i) +1 or -1 */
+ ){
+ size_t k;
+ size_t maxlag;
+ int16_t pos;
+ int16_t max;
+ int16_t crossCorrScale, Energyscale;
+ int16_t crossCorrSqMod, crossCorrSqMod_Max;
+ int32_t crossCorr, Energy;
+ int16_t crossCorrmod, EnergyMod, EnergyMod_Max;
+ int16_t *tp, *rp;
+ int16_t *rp_beg, *rp_end;
+ int16_t totscale, totscale_max;
+ int16_t scalediff;
+ int32_t newCrit, maxCrit;
+ int shifts;
+
+ /* Initializations, to make sure that the first one is selected */
+ crossCorrSqMod_Max=0;
+ EnergyMod_Max=WEBRTC_SPL_WORD16_MAX;
+ totscale_max=-500;
+ maxlag=0;
+ pos=0;
+
+ /* Find scale value and start position */
+ if (step==1) {
+ max=WebRtcSpl_MaxAbsValueW16(regressor, subl + searchLen - 1);
+ rp_beg = regressor;
+ rp_end = regressor + subl;
+ } else { /* step==-1 */
+ max = WebRtcSpl_MaxAbsValueW16(regressor - searchLen, subl + searchLen - 1);
+ rp_beg = regressor - 1;
+ rp_end = regressor + subl - 1;
+ }
+
+ /* Introduce a scale factor on the Energy in int32_t in
+ order to make sure that the calculation does not
+ overflow */
+
+ if (max>5000) {
+ shifts=2;
+ } else {
+ shifts=0;
+ }
+
+ /* Calculate the first energy, then do a +/- to get the other energies */
+ Energy=WebRtcSpl_DotProductWithScale(regressor, regressor, subl, shifts);
+
+ for (k=0;k<searchLen;k++) {
+ tp = target;
+ rp = &regressor[pos];
+
+ crossCorr=WebRtcSpl_DotProductWithScale(tp, rp, subl, shifts);
+
+ if ((Energy>0)&&(crossCorr>0)) {
+
+ /* Put cross correlation and energy on 16 bit word */
+ crossCorrScale=(int16_t)WebRtcSpl_NormW32(crossCorr)-16;
+ crossCorrmod=(int16_t)WEBRTC_SPL_SHIFT_W32(crossCorr, crossCorrScale);
+ Energyscale=(int16_t)WebRtcSpl_NormW32(Energy)-16;
+ EnergyMod=(int16_t)WEBRTC_SPL_SHIFT_W32(Energy, Energyscale);
+
+ /* Square cross correlation and store upper int16_t */
+ crossCorrSqMod = (int16_t)((crossCorrmod * crossCorrmod) >> 16);
+
+ /* Calculate the total number of (dynamic) right shifts that have
+ been performed on (crossCorr*crossCorr)/energy
+ */
+ totscale=Energyscale-(crossCorrScale<<1);
+
+ /* Calculate the shift difference in order to be able to compare the two
+ (crossCorr*crossCorr)/energy in the same domain
+ */
+ scalediff=totscale-totscale_max;
+ scalediff=WEBRTC_SPL_MIN(scalediff,31);
+ scalediff=WEBRTC_SPL_MAX(scalediff,-31);
+
+ /* Compute the cross multiplication between the old best criteria
+ and the new one to be able to compare them without using a
+ division */
+
+ if (scalediff<0) {
+ newCrit = ((int32_t)crossCorrSqMod*EnergyMod_Max)>>(-scalediff);
+ maxCrit = ((int32_t)crossCorrSqMod_Max*EnergyMod);
+ } else {
+ newCrit = ((int32_t)crossCorrSqMod*EnergyMod_Max);
+ maxCrit = ((int32_t)crossCorrSqMod_Max*EnergyMod)>>scalediff;
+ }
+
+ /* Store the new lag value if the new criteria is larger
+ than previous largest criteria */
+
+ if (newCrit > maxCrit) {
+ crossCorrSqMod_Max = crossCorrSqMod;
+ EnergyMod_Max = EnergyMod;
+ totscale_max = totscale;
+ maxlag = k;
+ }
+ }
+ pos+=step;
+
+ /* Do a +/- to get the next energy */
+ Energy += step * ((*rp_end * *rp_end - *rp_beg * *rp_beg) >> shifts);
+ rp_beg+=step;
+ rp_end+=step;
+ }
+
+ return(maxlag+offset);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
new file mode 100644
index 0000000000..3be5a296b5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_XcorrCoef.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_XCORR_COEF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_XCORR_COEF_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------*
+ * cross correlation which finds the optimal lag for the
+ * crossCorr*crossCorr/(energy) criteria
+ *---------------------------------------------------------------*/
+
+size_t WebRtcIlbcfix_XcorrCoef(
+ int16_t* target, /* (i) first array */
+ int16_t* regressor, /* (i) second array */
+ size_t subl, /* (i) dimension arrays */
+ size_t searchLen, /* (i) the search lenght */
+ size_t offset, /* (i) samples offset between arrays */
+ int16_t step /* (i) +1 or -1 */
+ );
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h
new file mode 100644
index 0000000000..aae708f295
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/scoped_refptr.h"
+
+namespace webrtc {
+
+template <typename T>
+class AudioDecoderIsacT final : public AudioDecoder {
+ public:
+ struct Config {
+ bool IsOk() const;
+ int sample_rate_hz = 16000;
+ };
+ explicit AudioDecoderIsacT(const Config& config);
+ virtual ~AudioDecoderIsacT() override;
+
+ AudioDecoderIsacT(const AudioDecoderIsacT&) = delete;
+ AudioDecoderIsacT& operator=(const AudioDecoderIsacT&) = delete;
+
+ bool HasDecodePlc() const override;
+ size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
+ void Reset() override;
+ int ErrorCode() override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ typename T::instance_type* isac_state_;
+ int sample_rate_hz_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h
new file mode 100644
index 0000000000..2e43fd317f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+template <typename T>
+bool AudioDecoderIsacT<T>::Config::IsOk() const {
+ return (sample_rate_hz == 16000 || sample_rate_hz == 32000);
+}
+
+template <typename T>
+AudioDecoderIsacT<T>::AudioDecoderIsacT(const Config& config)
+ : sample_rate_hz_(config.sample_rate_hz) {
+ RTC_CHECK(config.IsOk()) << "Unsupported sample rate "
+ << config.sample_rate_hz;
+ RTC_CHECK_EQ(0, T::Create(&isac_state_));
+ T::DecoderInit(isac_state_);
+ RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz_));
+}
+
+template <typename T>
+AudioDecoderIsacT<T>::~AudioDecoderIsacT() {
+ RTC_CHECK_EQ(0, T::Free(isac_state_));
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
+ int16_t temp_type = 1; // Default is speech.
+ int ret =
+ T::DecodeInternal(isac_state_, encoded, encoded_len, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+template <typename T>
+bool AudioDecoderIsacT<T>::HasDecodePlc() const {
+ return false;
+}
+
+template <typename T>
+size_t AudioDecoderIsacT<T>::DecodePlc(size_t num_frames, int16_t* decoded) {
+ return T::DecodePlc(isac_state_, decoded, num_frames);
+}
+
+template <typename T>
+void AudioDecoderIsacT<T>::Reset() {
+ T::DecoderInit(isac_state_);
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::ErrorCode() {
+ return T::GetErrorCode(isac_state_);
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::SampleRateHz() const {
+ return sample_rate_hz_;
+}
+
+template <typename T>
+size_t AudioDecoderIsacT<T>::Channels() const {
+ return 1;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
new file mode 100644
index 0000000000..c382ea076e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
+
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/scoped_refptr.h"
+#include "api/units/time_delta.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+template <typename T>
+class AudioEncoderIsacT final : public AudioEncoder {
+ public:
+ // Allowed combinations of sample rate, frame size, and bit rate are
+ // - 16000 Hz, 30 ms, 10000-32000 bps
+ // - 16000 Hz, 60 ms, 10000-32000 bps
+ // - 32000 Hz, 30 ms, 10000-56000 bps (if T has super-wideband support)
+ struct Config {
+ bool IsOk() const;
+ int payload_type = 103;
+ int sample_rate_hz = 16000;
+ int frame_size_ms = 30;
+ int bit_rate = kDefaultBitRate; // Limit on the short-term average bit
+ // rate, in bits/s.
+ int max_payload_size_bytes = -1;
+ int max_bit_rate = -1;
+ };
+
+ explicit AudioEncoderIsacT(const Config& config);
+ ~AudioEncoderIsacT() override;
+
+ AudioEncoderIsacT(const AudioEncoderIsacT&) = delete;
+ AudioEncoderIsacT& operator=(const AudioEncoderIsacT&) = delete;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+ void SetTargetBitrate(int target_bps) override;
+ void OnReceivedTargetAudioBitrate(int target_bps) override;
+ void OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) override;
+ void OnReceivedUplinkAllocation(BitrateAllocationUpdate update) override;
+ void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+ void Reset() override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+
+ private:
+ // This value is taken from STREAM_SIZE_MAX_60 for iSAC float (60 ms) and
+ // STREAM_MAXW16_60MS for iSAC fix (60 ms).
+ static const size_t kSufficientEncodeBufferSizeBytes = 400;
+
+ static constexpr int kDefaultBitRate = 32000;
+ static constexpr int kMinBitrateBps = 10000;
+ static constexpr int MaxBitrateBps(int sample_rate_hz) {
+ return sample_rate_hz == 32000 ? 56000 : 32000;
+ }
+
+ void SetTargetBitrate(int target_bps, bool subtract_per_packet_overhead);
+
+ // Recreate the iSAC encoder instance with the given settings, and save them.
+ void RecreateEncoderInstance(const Config& config);
+
+ Config config_;
+ typename T::instance_type* isac_state_ = nullptr;
+
+ // Have we accepted input but not yet emitted it in a packet?
+ bool packet_in_progress_ = false;
+
+ // Timestamp of the first input of the currently in-progress packet.
+ uint32_t packet_timestamp_;
+
+ // Timestamp of the previously encoded packet.
+ uint32_t last_encoded_timestamp_;
+
+ // Cache the value of the "WebRTC-SendSideBwe-WithOverhead" field trial.
+ const bool send_side_bwe_with_overhead_ =
+ !field_trial::IsDisabled("WebRTC-SendSideBwe-WithOverhead");
+
+ // When we send a packet, expect this many bytes of headers to be added to it.
+ // Start out with a reasonable default that we can use until we receive a real
+ // value.
+ DataSize overhead_per_packet_ = DataSize::Bytes(28);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
new file mode 100644
index 0000000000..fa84515204
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+template <typename T>
+bool AudioEncoderIsacT<T>::Config::IsOk() const {
+ if (max_bit_rate < 32000 && max_bit_rate != -1)
+ return false;
+ if (max_payload_size_bytes < 120 && max_payload_size_bytes != -1)
+ return false;
+
+ switch (sample_rate_hz) {
+ case 16000:
+ if (max_bit_rate > 53400)
+ return false;
+ if (max_payload_size_bytes > 400)
+ return false;
+ return (frame_size_ms == 30 || frame_size_ms == 60) &&
+ (bit_rate == 0 || (bit_rate >= 10000 && bit_rate <= 32000));
+ case 32000:
+ if (max_bit_rate > 160000)
+ return false;
+ if (max_payload_size_bytes > 600)
+ return false;
+ return T::has_swb &&
+ (frame_size_ms == 30 &&
+ (bit_rate == 0 || (bit_rate >= 10000 && bit_rate <= 56000)));
+ default:
+ return false;
+ }
+}
+
+template <typename T>
+AudioEncoderIsacT<T>::AudioEncoderIsacT(const Config& config) {
+ RecreateEncoderInstance(config);
+}
+
+template <typename T>
+AudioEncoderIsacT<T>::~AudioEncoderIsacT() {
+ RTC_CHECK_EQ(0, T::Free(isac_state_));
+}
+
+template <typename T>
+int AudioEncoderIsacT<T>::SampleRateHz() const {
+ return T::EncSampRate(isac_state_);
+}
+
+template <typename T>
+size_t AudioEncoderIsacT<T>::NumChannels() const {
+ return 1;
+}
+
+template <typename T>
+size_t AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
+ const int samples_in_next_packet = T::GetNewFrameLen(isac_state_);
+ return static_cast<size_t>(rtc::CheckedDivExact(
+ samples_in_next_packet, rtc::CheckedDivExact(SampleRateHz(), 100)));
+}
+
+template <typename T>
+size_t AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
+ return 6; // iSAC puts at most 60 ms in a packet.
+}
+
+template <typename T>
+int AudioEncoderIsacT<T>::GetTargetBitrate() const {
+ return config_.bit_rate == 0 ? kDefaultBitRate : config_.bit_rate;
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::SetTargetBitrate(int target_bps) {
+ // Set target bitrate directly without subtracting per-packet overhead,
+ // because that's what AudioEncoderOpus does.
+ SetTargetBitrate(target_bps,
+ /*subtract_per_packet_overhead=*/false);
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::OnReceivedTargetAudioBitrate(int target_bps) {
+ // Set target bitrate directly without subtracting per-packet overhead,
+ // because that's what AudioEncoderOpus does.
+ SetTargetBitrate(target_bps,
+ /*subtract_per_packet_overhead=*/false);
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> /*bwe_period_ms*/) {
+ // Set target bitrate, subtracting the per-packet overhead if
+ // WebRTC-SendSideBwe-WithOverhead is enabled, because that's what
+ // AudioEncoderOpus does.
+ SetTargetBitrate(
+ target_audio_bitrate_bps,
+ /*subtract_per_packet_overhead=*/send_side_bwe_with_overhead_);
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::OnReceivedUplinkAllocation(
+ BitrateAllocationUpdate update) {
+ // Set target bitrate, subtracting the per-packet overhead if
+ // WebRTC-SendSideBwe-WithOverhead is enabled, because that's what
+ // AudioEncoderOpus does.
+ SetTargetBitrate(
+ update.target_bitrate.bps<int>(),
+ /*subtract_per_packet_overhead=*/send_side_bwe_with_overhead_);
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::OnReceivedOverhead(
+ size_t overhead_bytes_per_packet) {
+ overhead_per_packet_ = DataSize::Bytes(overhead_bytes_per_packet);
+}
+
+template <typename T>
+AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ if (!packet_in_progress_) {
+ // Starting a new packet; remember the timestamp for later.
+ packet_in_progress_ = true;
+ packet_timestamp_ = rtp_timestamp;
+ }
+ size_t encoded_bytes = encoded->AppendData(
+ kSufficientEncodeBufferSizeBytes, [&](rtc::ArrayView<uint8_t> encoded) {
+ int r = T::Encode(isac_state_, audio.data(), encoded.data());
+
+ if (T::GetErrorCode(isac_state_) == 6450) {
+ // Isac is not able to effectively compress all types of signals. This
+ // is a limitation of the codec that cannot be easily fixed.
+ r = 0;
+ }
+ RTC_CHECK_GE(r, 0) << "Encode failed (error code "
+ << T::GetErrorCode(isac_state_) << ")";
+
+ return static_cast<size_t>(r);
+ });
+
+ if (encoded_bytes == 0)
+ return EncodedInfo();
+
+ // Got enough input to produce a packet. Return the saved timestamp from
+ // the first chunk of input that went into the packet.
+ packet_in_progress_ = false;
+ EncodedInfo info;
+ info.encoded_bytes = encoded_bytes;
+ info.encoded_timestamp = packet_timestamp_;
+ info.payload_type = config_.payload_type;
+ info.encoder_type = CodecType::kIsac;
+ return info;
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::Reset() {
+ RecreateEncoderInstance(config_);
+}
+
+template <typename T>
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderIsacT<T>::GetFrameLengthRange() const {
+ return {{TimeDelta::Millis(config_.frame_size_ms),
+ TimeDelta::Millis(config_.frame_size_ms)}};
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::SetTargetBitrate(int target_bps,
+ bool subtract_per_packet_overhead) {
+ if (subtract_per_packet_overhead) {
+ const DataRate overhead_rate =
+ overhead_per_packet_ / TimeDelta::Millis(config_.frame_size_ms);
+ target_bps -= overhead_rate.bps();
+ }
+ target_bps = rtc::SafeClamp(target_bps, kMinBitrateBps,
+ MaxBitrateBps(config_.sample_rate_hz));
+ int result = T::Control(isac_state_, target_bps, config_.frame_size_ms);
+ RTC_DCHECK_EQ(result, 0);
+ config_.bit_rate = target_bps;
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::RecreateEncoderInstance(const Config& config) {
+ RTC_CHECK(config.IsOk());
+ packet_in_progress_ = false;
+ if (isac_state_)
+ RTC_CHECK_EQ(0, T::Free(isac_state_));
+ RTC_CHECK_EQ(0, T::Create(&isac_state_));
+ RTC_CHECK_EQ(0, T::EncoderInit(isac_state_, /*coding_mode=*/1));
+ RTC_CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
+ const int bit_rate = config.bit_rate == 0 ? kDefaultBitRate : config.bit_rate;
+ RTC_CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
+
+ if (config.max_payload_size_bytes != -1)
+ RTC_CHECK_EQ(
+ 0, T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
+ if (config.max_bit_rate != -1)
+ RTC_CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
+
+ // Set the decoder sample rate even though we just use the encoder. This
+ // doesn't appear to be necessary to produce a valid encoding, but without it
+ // we get an encoding that isn't bit-for-bit identical with what a combined
+ // encoder+decoder object produces.
+ RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, config.sample_rate_hz));
+
+ config_ = config;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/bandwidth_info.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/bandwidth_info.h
new file mode 100644
index 0000000000..c3830a5f7c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/bandwidth_info.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
+
+#include <stdint.h>
+
+typedef struct {
+ int in_use;
+ int32_t send_bw_avg;
+ int32_t send_max_delay_avg;
+ int16_t bottleneck_idx;
+ int16_t jitter_info;
+} IsacBandwidthInfo;
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/empty.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/empty.cc
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/empty.cc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h
new file mode 100644
index 0000000000..0b4eadd448
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
+
+namespace webrtc {
+
+using AudioDecoderIsacFixImpl = AudioDecoderIsacT<IsacFix>;
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h
new file mode 100644
index 0000000000..f0cc038328
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
+
+namespace webrtc {
+
+using AudioEncoderIsacFixImpl = AudioEncoderIsacT<IsacFix>;
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/isacfix.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/isacfix.h
new file mode 100644
index 0000000000..dcc7b0991d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/include/isacfix.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+
+typedef struct {
+ void* dummy;
+} ISACFIX_MainStruct;
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcIsacfix_Create(...)
+ *
+ * This function creates an ISAC instance, which will contain the state
+ * information for one coding/decoding channel.
+ *
+ * Input:
+ * - *ISAC_main_inst : a pointer to the coder instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Create(ISACFIX_MainStruct** ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_Free(...)
+ *
+ * This function frees the ISAC instance created at the beginning.
+ *
+ * Input:
+ * - ISAC_main_inst : a ISAC instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Free(ISACFIX_MainStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_EncoderInit(...)
+ *
+ * This function initializes an ISAC instance prior to the encoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - CodingMode : 0 - Bit rate and frame length are automatically
+ * adjusted to available bandwidth on
+ * transmission channel.
+ * 1 - User sets a frame length and a target bit
+ * rate which is taken as the maximum short-term
+ * average bit rate.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t CodingMode);
+
+/****************************************************************************
+ * WebRtcIsacfix_Encode(...)
+ *
+ * This function encodes 10ms frame(s) and inserts it into a package.
+ * Input speech length has to be 160 samples (10ms). The encoder buffers those
+ * 10ms frames until it reaches the chosen Framesize (480 or 960 samples
+ * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - speechIn : input speech vector.
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ * Return value : >0 - Length (in bytes) of coded data
+ * 0 - The buffer didn't reach the chosen framesize
+ * so it keeps buffering speech samples.
+ * -1 - Error
+ */
+
+int WebRtcIsacfix_Encode(ISACFIX_MainStruct* ISAC_main_inst,
+ const int16_t* speechIn,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcIsacfix_DecoderInit(...)
+ *
+ * This function initializes an ISAC instance prior to the decoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ */
+
+void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateBwEstimate1(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - packet_size : size of the packet in bytes.
+ * - rtp_seq_number : the RTP number of the packet.
+ * - arr_ts : the arrival time of the packet (from NetEq)
+ * in samples.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t arr_ts);
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateBwEstimate(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - packet_size : size of the packet in bytes.
+ * - rtp_seq_number : the RTP number of the packet.
+ * - send_ts : the send time of the packet from RTP header,
+ * in samples.
+ * - arr_ts : the arrival time of the packet (from NetEq)
+ * in samples.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts);
+
+/****************************************************************************
+ * WebRtcIsacfix_Decode(...)
+ *
+ * This function decodes an ISAC frame. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s)
+ * - len : bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : >0 - number of samples in decoded vector
+ * -1 - Error
+ */
+
+int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/****************************************************************************
+ * WebRtcIsacfix_DecodePlc(...)
+ *
+ * This function conducts PLC for ISAC frame(s) in wide-band (16kHz sampling).
+ * Output speech length will be "480*noOfLostFrames" samples
+ * that is equevalent of "30*noOfLostFrames" millisecond.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - noOfLostFrames : Number of PLC frames (480sample = 30ms)
+ * to produce
+ * NOTE! Maximum number is 2 (960 samples = 60ms)
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : Number of samples in decoded PLC vector
+ */
+
+size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames);
+
+/****************************************************************************
+ * WebRtcIsacfix_ReadFrameLen(...)
+ *
+ * This function returns the length of the frame represented in the packet.
+ *
+ * Input:
+ * - encoded : Encoded bitstream
+ * - encoded_len_bytes : Length of the bitstream in bytes.
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ *
+ */
+
+int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
+ size_t encoded_len_bytes,
+ size_t* frameLength);
+
+/****************************************************************************
+ * WebRtcIsacfix_Control(...)
+ *
+ * This function sets the limit on the short-term average bit rate and the
+ * frame length. Should be used only in Instantaneous mode.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rate : limit on the short-term average bit rate,
+ * in bits/second (between 10000 and 32000)
+ * - framesize : number of milliseconds per frame (30 or 60)
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Control(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t rate,
+ int framesize);
+
+void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second);
+
+/****************************************************************************
+ * WebRtcIsacfix_ControlBwe(...)
+ *
+ * This function sets the initial values of bottleneck and frame-size if
+ * iSAC is used in channel-adaptive mode. Through this API, users can
+ * enforce a frame-size for all values of bottleneck. Then iSAC will not
+ * automatically change the frame-size.
+ *
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rateBPS : initial value of bottleneck in bits/second
+ * 10000 <= rateBPS <= 32000 is accepted
+ * - frameSizeMs : number of milliseconds per frame (30 or 60)
+ * - enforceFrameSize : 1 to enforce the given frame-size through out
+ * the adaptation process, 0 to let iSAC change
+ * the frame-size if required.
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_ControlBwe(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t rateBPS,
+ int frameSizeMs,
+ int16_t enforceFrameSize);
+
+/****************************************************************************
+ * WebRtcIsacfix_version(...)
+ *
+ * This function returns the version number.
+ *
+ * Output:
+ * - version : Pointer to character string
+ *
+ */
+
+void WebRtcIsacfix_version(char* version);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetErrorCode(...)
+ *
+ * This function can be used to check the error code of an iSAC instance. When
+ * a function returns -1 a error code will be set for that instance. The
+ * function below extract the code of the last error that occured in the
+ * specified instance.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance
+ *
+ * Return value : Error code
+ */
+
+int16_t WebRtcIsacfix_GetErrorCode(ISACFIX_MainStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetUplinkBw(...)
+ *
+ * This function return iSAC send bitrate
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : <0 Error code
+ * else bitrate
+ */
+
+int32_t WebRtcIsacfix_GetUplinkBw(ISACFIX_MainStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_SetMaxPayloadSize(...)
+ *
+ * This function sets a limit for the maximum payload size of iSAC. The same
+ * value is used both for 30 and 60 msec packets.
+ * The absolute max will be valid until next time the function is called.
+ * NOTE! This function may override the function WebRtcIsacfix_SetMaxRate()
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxPayloadBytes : maximum size of the payload in bytes
+ * valid values are between 100 and 400 bytes
+ *
+ *
+ * Return value : 0 if sucessful
+ * -1 if error happens
+ */
+
+int16_t WebRtcIsacfix_SetMaxPayloadSize(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t maxPayloadBytes);
+
+/****************************************************************************
+ * WebRtcIsacfix_SetMaxRate(...)
+ *
+ * This function sets the maximum rate which the codec may not exceed for a
+ * singel packet. The maximum rate is set in bits per second.
+ * The codec has an absolute maximum rate of 53400 bits per second (200 bytes
+ * per 30 msec).
+ * It is possible to set a maximum rate between 32000 and 53400 bits per second.
+ *
+ * The rate limit is valid until next time the function is called.
+ *
+ * NOTE! Packet size will never go above the value set if calling
+ * WebRtcIsacfix_SetMaxPayloadSize() (default max packet size is 400 bytes).
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxRateInBytes : maximum rate in bits per second,
+ * valid values are 32000 to 53400 bits
+ *
+ * Return value : 0 if sucessful
+ * -1 if error happens
+ */
+
+int16_t WebRtcIsacfix_SetMaxRate(ISACFIX_MainStruct* ISAC_main_inst,
+ int32_t maxRate);
+
+/****************************************************************************
+ * WebRtcIsacfix_CreateInternal(...)
+ *
+ * This function creates the memory that is used to store data in the encoder
+ *
+ * Input:
+ * - *ISAC_main_inst : a pointer to the coder instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_CreateInternal(ISACFIX_MainStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_FreeInternal(...)
+ *
+ * This function frees the internal memory for storing encoder data.
+ *
+ * Input:
+ * - ISAC_main_inst : an ISAC instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_FreeInternal(ISACFIX_MainStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetNewBitStream(...)
+ *
+ * This function returns encoded data, with the received bwe-index in the
+ * stream. It should always return a complete packet, i.e. only called once
+ * even for 60 msec frames
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - bweIndex : index of bandwidth estimate to put in new
+ * bitstream - scale : factor for rate change (0.4 ~=> half the
+ * rate, 1 no change).
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ * Return value : >0 - Length (in bytes) of coded data
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_GetNewBitStream(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t bweIndex,
+ float scale,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownLinkBwIndex(...)
+ *
+ * This function returns index representing the Bandwidth estimate from
+ * other side to this side.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ *
+ * Output:
+ * - rateIndex : Bandwidth estimate to transmit to other side.
+ *
+ */
+
+int16_t WebRtcIsacfix_GetDownLinkBwIndex(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* rateIndex);
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBw(...)
+ *
+ * This function takes an index representing the Bandwidth estimate from
+ * this side to other side and updates BWE.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ * - rateIndex : Bandwidth estimate from other side.
+ *
+ */
+
+int16_t WebRtcIsacfix_UpdateUplinkBw(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t rateIndex);
+
+/****************************************************************************
+ * WebRtcIsacfix_ReadBwIndex(...)
+ *
+ * This function returns the index of the Bandwidth estimate from the bitstream.
+ *
+ * Input:
+ * - encoded : Encoded bitstream
+ * - encoded_len_bytes : Length of the bitstream in bytes.
+ *
+ * Output:
+ * - rateIndex : Bandwidth estimate in bitstream
+ *
+ */
+
+int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
+ size_t encoded_len_bytes,
+ int16_t* rateIndex);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetNewFrameLen(...)
+ *
+ * This function return the next frame length (in samples) of iSAC.
+ *
+ * Input:
+ * -ISAC_main_inst : iSAC instance
+ *
+ * Return value : frame lenght in samples
+ */
+
+int16_t WebRtcIsacfix_GetNewFrameLen(ISACFIX_MainStruct* ISAC_main_inst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines.c
new file mode 100644
index 0000000000..eaeef50f04
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routins.c
+ *
+ * This C file contains a function for finalizing the bitstream
+ * after arithmetic coding.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncTerminate(...)
+ *
+ * Final call to the arithmetic coder for an encoder call. This function
+ * terminates and return byte stream.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ *
+ * Return value : number of bytes in the stream
+ */
+int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc *streamData)
+{
+ uint16_t *streamPtr;
+ uint16_t negCarry;
+
+ /* point to the right place in the stream buffer */
+ streamPtr = streamData->stream + streamData->stream_index;
+
+ /* find minimum length (determined by current interval width) */
+ if ( streamData->W_upper > 0x01FFFFFF )
+ {
+ streamData->streamval += 0x01000000;
+
+ /* if result is less than the added value we must take care of the carry */
+ if (streamData->streamval < 0x01000000)
+ {
+ /* propagate carry */
+ if (streamData->full == 0) {
+ /* Add value to current value */
+ negCarry = *streamPtr;
+ negCarry += 0x0100;
+ *streamPtr = negCarry;
+
+ /* if value is too big, propagate carry to next byte, and so on */
+ while (!(negCarry))
+ {
+ negCarry = *--streamPtr;
+ negCarry++;
+ *streamPtr = negCarry;
+ }
+ } else {
+ /* propagate carry by adding one to the previous byte in the
+ * stream if that byte is 0xFFFF we need to propagate the carry
+ * furhter back in the stream */
+ while ( !(++(*--streamPtr)) );
+ }
+
+ /* put pointer back to the old value */
+ streamPtr = streamData->stream + streamData->stream_index;
+ }
+ /* write remaining data to bitstream, if "full == 0" first byte has data */
+ if (streamData->full == 0) {
+ *streamPtr++ += (uint16_t)(streamData->streamval >> 24);
+ streamData->full = 1;
+ } else {
+ *streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
+ streamData->full = 0;
+ }
+ }
+ else
+ {
+ streamData->streamval += 0x00010000;
+
+ /* if result is less than the added value we must take care of the carry */
+ if (streamData->streamval < 0x00010000)
+ {
+ /* propagate carry */
+ if (streamData->full == 0) {
+ /* Add value to current value */
+ negCarry = *streamPtr;
+ negCarry += 0x0100;
+ *streamPtr = negCarry;
+
+ /* if value to big, propagate carry to next byte, and so on */
+ while (!(negCarry))
+ {
+ negCarry = *--streamPtr;
+ negCarry++;
+ *streamPtr = negCarry;
+ }
+ } else {
+ /* Add carry to previous byte */
+ while ( !(++(*--streamPtr)) );
+ }
+
+ /* put pointer back to the old value */
+ streamPtr = streamData->stream + streamData->stream_index;
+ }
+ /* write remaining data (2 bytes) to bitstream */
+ if (streamData->full) {
+ *streamPtr++ = (uint16_t)(streamData->streamval >> 16);
+ } else {
+ *streamPtr++ |= (uint16_t)(streamData->streamval >> 24);
+ *streamPtr = (uint16_t)(streamData->streamval >> 8) & 0xFF00;
+ }
+ }
+
+ /* calculate stream length in bytes */
+ return (((streamPtr - streamData->stream)<<1) + !(streamData->full));
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c
new file mode 100644
index 0000000000..cad3056b37
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routinshist.c
+ *
+ * This C file contains arithmetic encoding and decoding.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncHistMulti(...)
+ *
+ * Encode the histogram interval
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - data : data vector
+ * - cdf : array of cdf arrays
+ * - lenData : data vector length
+ *
+ * Return value : 0 if ok
+ * <0 if error detected
+ */
+int WebRtcIsacfix_EncHistMulti(Bitstr_enc *streamData,
+ const int16_t *data,
+ const uint16_t *const *cdf,
+ const int16_t lenData)
+{
+ uint32_t W_lower;
+ uint32_t W_upper;
+ uint32_t W_upper_LSB;
+ uint32_t W_upper_MSB;
+ uint16_t *streamPtr;
+ uint16_t negCarry;
+ uint16_t *maxStreamPtr;
+ uint16_t *streamPtrCarry;
+ uint32_t cdfLo;
+ uint32_t cdfHi;
+ int k;
+
+
+ /* point to beginning of stream buffer
+ * and set maximum streamPtr value */
+ streamPtr = streamData->stream + streamData->stream_index;
+ maxStreamPtr = streamData->stream + STREAM_MAXW16_60MS - 1;
+
+ W_upper = streamData->W_upper;
+
+ for (k = lenData; k > 0; k--)
+ {
+ /* fetch cdf_lower and cdf_upper from cdf tables */
+ cdfLo = (uint32_t) *(*cdf + (uint32_t)*data);
+ cdfHi = (uint32_t) *(*cdf++ + (uint32_t)*data++ + 1);
+
+ /* update interval */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+ W_lower = WEBRTC_SPL_UMUL(W_upper_MSB, cdfLo);
+ W_lower += ((W_upper_LSB * cdfLo) >> 16);
+ W_upper = WEBRTC_SPL_UMUL(W_upper_MSB, cdfHi);
+ W_upper += ((W_upper_LSB * cdfHi) >> 16);
+
+ /* shift interval such that it begins at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamData->streamval += W_lower;
+
+ /* handle carry */
+ if (streamData->streamval < W_lower)
+ {
+ /* propagate carry */
+ streamPtrCarry = streamPtr;
+ if (streamData->full == 0) {
+ negCarry = *streamPtrCarry;
+ negCarry += 0x0100;
+ *streamPtrCarry = negCarry;
+ while (!(negCarry))
+ {
+ negCarry = *--streamPtrCarry;
+ negCarry++;
+ *streamPtrCarry = negCarry;
+ }
+ } else {
+ while ( !(++(*--streamPtrCarry)) );
+ }
+ }
+
+ /* renormalize interval, store most significant byte of streamval and update streamval
+ * W_upper < 2^24 */
+ while ( !(W_upper & 0xFF000000) )
+ {
+ W_upper <<= 8;
+ if (streamData->full == 0) {
+ *streamPtr++ += (uint16_t)(streamData->streamval >> 24);
+ streamData->full = 1;
+ } else {
+ *streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
+ streamData->full = 0;
+ }
+
+ if( streamPtr > maxStreamPtr ) {
+ return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
+ }
+ streamData->streamval <<= 8;
+ }
+ }
+
+ /* calculate new stream_index */
+ streamData->stream_index = streamPtr - streamData->stream;
+ streamData->W_upper = W_upper;
+
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistBisectMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, using
+ * method of bisection cdf tables should be of size 2^k-1 (which corresponds
+ * to an alphabet size of 2^k-2)
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - cdf : array of cdf arrays
+ * - cdfSize : array of cdf table sizes+1 (power of two: 2^k)
+ * - lenData : data vector length
+ *
+ * Output:
+ * - data : data vector
+ *
+ * Return value : number of bytes in the stream
+ * <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistBisectMulti(int16_t *data,
+ Bitstr_dec *streamData,
+ const uint16_t *const *cdf,
+ const uint16_t *cdfSize,
+ const int16_t lenData)
+{
+ uint32_t W_lower = 0;
+ uint32_t W_upper;
+ uint32_t W_tmp;
+ uint32_t W_upper_LSB;
+ uint32_t W_upper_MSB;
+ uint32_t streamval;
+ const uint16_t *streamPtr;
+ const uint16_t *cdfPtr;
+ int16_t sizeTmp;
+ int k;
+
+
+ streamPtr = streamData->stream + streamData->stream_index;
+ W_upper = streamData->W_upper;
+
+ /* Error check: should not be possible in normal operation */
+ if (W_upper == 0) {
+ return -2;
+ }
+
+ /* first time decoder is called for this stream */
+ if (streamData->stream_index == 0)
+ {
+ /* read first word from bytestream */
+ streamval = (uint32_t)*streamPtr++ << 16;
+ streamval |= *streamPtr++;
+ } else {
+ streamval = streamData->streamval;
+ }
+
+ for (k = lenData; k > 0; k--)
+ {
+ /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+
+ /* start halfway the cdf range */
+ sizeTmp = *cdfSize++ / 2;
+ cdfPtr = *cdf + (sizeTmp - 1);
+
+ /* method of bisection */
+ for ( ;; )
+ {
+ W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
+ W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+ sizeTmp /= 2;
+ if (sizeTmp == 0) {
+ break;
+ }
+
+ if (streamval > W_tmp)
+ {
+ W_lower = W_tmp;
+ cdfPtr += sizeTmp;
+ } else {
+ W_upper = W_tmp;
+ cdfPtr -= sizeTmp;
+ }
+ }
+ if (streamval > W_tmp)
+ {
+ W_lower = W_tmp;
+ *data++ = cdfPtr - *cdf++;
+ } else {
+ W_upper = W_tmp;
+ *data++ = cdfPtr - *cdf++ - 1;
+ }
+
+ /* shift interval to start at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamval -= W_lower;
+
+ /* renormalize interval and update streamval */
+ /* W_upper < 2^24 */
+ while ( !(W_upper & 0xFF000000) )
+ {
+ /* read next byte from stream */
+ if (streamData->full == 0) {
+ streamval = (streamval << 8) | (*streamPtr++ & 0x00FF);
+ streamData->full = 1;
+ } else {
+ streamval = (streamval << 8) | (*streamPtr >> 8);
+ streamData->full = 0;
+ }
+ W_upper <<= 8;
+ }
+
+
+ /* Error check: should not be possible in normal operation */
+ if (W_upper == 0) {
+ return -2;
+ }
+
+ }
+
+ streamData->stream_index = streamPtr - streamData->stream;
+ streamData->W_upper = W_upper;
+ streamData->streamval = streamval;
+
+ if ( W_upper > 0x01FFFFFF ) {
+ return (streamData->stream_index*2 - 3 + !streamData->full);
+ } else {
+ return (streamData->stream_index*2 - 2 + !streamData->full);
+ }
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistOneStepMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, taking
+ * single step up or down at a time.
+ * cdf tables can be of arbitrary size, but large tables may take a lot of
+ * iterations.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - cdf : array of cdf arrays
+ * - initIndex : vector of initial cdf table search entries
+ * - lenData : data vector length
+ *
+ * Output:
+ * - data : data vector
+ *
+ * Return value : number of bytes in original stream
+ * <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistOneStepMulti(int16_t *data,
+ Bitstr_dec *streamData,
+ const uint16_t *const *cdf,
+ const uint16_t *initIndex,
+ const int16_t lenData)
+{
+ uint32_t W_lower;
+ uint32_t W_upper;
+ uint32_t W_tmp;
+ uint32_t W_upper_LSB;
+ uint32_t W_upper_MSB;
+ uint32_t streamval;
+ const uint16_t *streamPtr;
+ const uint16_t *cdfPtr;
+ int k;
+
+
+ streamPtr = streamData->stream + streamData->stream_index;
+ W_upper = streamData->W_upper;
+ /* Error check: Should not be possible in normal operation */
+ if (W_upper == 0) {
+ return -2;
+ }
+
+ /* Check if it is the first time decoder is called for this stream */
+ if (streamData->stream_index == 0)
+ {
+ /* read first word from bytestream */
+ streamval = (uint32_t)(*streamPtr++) << 16;
+ streamval |= *streamPtr++;
+ } else {
+ streamval = streamData->streamval;
+ }
+
+ for (k = lenData; k > 0; k--)
+ {
+ /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
+
+ /* start at the specified table entry */
+ cdfPtr = *cdf + (*initIndex++);
+ W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
+ W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+
+ if (streamval > W_tmp)
+ {
+ for ( ;; )
+ {
+ W_lower = W_tmp;
+
+ /* range check */
+ if (cdfPtr[0] == 65535) {
+ return -3;
+ }
+
+ W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *++cdfPtr);
+ W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+
+ if (streamval <= W_tmp) {
+ break;
+ }
+ }
+ W_upper = W_tmp;
+ *data++ = cdfPtr - *cdf++ - 1;
+ } else {
+ for ( ;; )
+ {
+ W_upper = W_tmp;
+ --cdfPtr;
+
+ /* range check */
+ if (cdfPtr < *cdf) {
+ return -3;
+ }
+
+ W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
+ W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+
+ if (streamval > W_tmp) {
+ break;
+ }
+ }
+ W_lower = W_tmp;
+ *data++ = cdfPtr - *cdf++;
+ }
+
+ /* shift interval to start at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamval -= W_lower;
+
+ /* renormalize interval and update streamval */
+ /* W_upper < 2^24 */
+ while ( !(W_upper & 0xFF000000) )
+ {
+ /* read next byte from stream */
+ if (streamData->full == 0) {
+ streamval = (streamval << 8) | (*streamPtr++ & 0x00FF);
+ streamData->full = 1;
+ } else {
+ streamval = (streamval << 8) | (*streamPtr >> 8);
+ streamData->full = 0;
+ }
+ W_upper <<= 8;
+ }
+ }
+
+ streamData->stream_index = streamPtr - streamData->stream;
+ streamData->W_upper = W_upper;
+ streamData->streamval = streamval;
+
+ /* find number of bytes in original stream (determined by current interval width) */
+ if ( W_upper > 0x01FFFFFF ) {
+ return (streamData->stream_index*2 - 3 + !streamData->full);
+ } else {
+ return (streamData->stream_index*2 - 2 + !streamData->full);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c
new file mode 100644
index 0000000000..8e97960461
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routinslogist.c
+ *
+ * This C file contains arithmetic encode and decode logistic
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+
+/* Tables for piecewise linear cdf functions: y = k*x */
+
+/* x Points for function piecewise() in Q15 */
+static const int32_t kHistEdges[51] = {
+ -327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716,
+ -196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858, -91751, -78644,
+ -65536, -52429, -39322, -26215, -13108, 0, 13107, 26214, 39321, 52428,
+ 65536, 78643, 91750, 104857, 117964, 131072, 144179, 157286, 170393, 183500,
+ 196608, 209715, 222822, 235929, 249036, 262144, 275251, 288358, 301465, 314572,
+ 327680
+};
+
+
+/* k Points for function piecewise() in Q0 */
+static const uint16_t kCdfSlope[51] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 13, 23, 47, 87, 154, 315, 700, 1088,
+ 2471, 6064, 14221, 21463, 36634, 36924, 19750, 13270, 5806, 2312,
+ 1095, 660, 316, 145, 86, 41, 32, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 2,
+ 0
+};
+
+/* y Points for function piecewise() in Q0 */
+static const uint16_t kCdfLogistic[51] = {
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
+ 20, 22, 24, 29, 38, 57, 92, 153, 279, 559,
+ 994, 1983, 4408, 10097, 18682, 33336, 48105, 56005, 61313, 63636,
+ 64560, 64998, 65262, 65389, 65447, 65481, 65497, 65510, 65512, 65514,
+ 65516, 65518, 65520, 65522, 65524, 65526, 65528, 65530, 65532, 65534,
+ 65535
+};
+
+
+/****************************************************************************
+ * WebRtcIsacfix_Piecewise(...)
+ *
+ * Piecewise linear function
+ *
+ * Input:
+ * - xinQ15 : input value x in Q15
+ *
+ * Return value : korresponding y-value in Q0
+ */
+
+
+static __inline uint16_t WebRtcIsacfix_Piecewise(int32_t xinQ15) {
+ int32_t ind;
+ int32_t qtmp1;
+ uint16_t qtmp2;
+
+ /* Find index for x-value */
+ qtmp1 = WEBRTC_SPL_SAT(kHistEdges[50],xinQ15,kHistEdges[0]);
+ ind = WEBRTC_SPL_MUL(5, qtmp1 - kHistEdges[0]);
+ ind >>= 16;
+
+ /* Calculate corresponding y-value ans return*/
+ qtmp1 = qtmp1 - kHistEdges[ind];
+ qtmp2 = (uint16_t)WEBRTC_SPL_RSHIFT_U32(
+ WEBRTC_SPL_UMUL_32_16(qtmp1,kCdfSlope[ind]), 15);
+ return (kCdfLogistic[ind] + qtmp2);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_EncLogisticMulti2(...)
+ *
+ * Arithmetic coding of spectrum.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - dataQ7 : data vector in Q7
+ * - envQ8 : side info vector defining the width of the pdf
+ * in Q8
+ * - lenData : data vector length
+ *
+ * Return value : 0 if ok,
+ * <0 otherwise.
+ */
+int WebRtcIsacfix_EncLogisticMulti2(Bitstr_enc *streamData,
+ int16_t *dataQ7,
+ const uint16_t *envQ8,
+ const int16_t lenData)
+{
+ uint32_t W_lower;
+ uint32_t W_upper;
+ uint16_t W_upper_LSB;
+ uint16_t W_upper_MSB;
+ uint16_t *streamPtr;
+ uint16_t *maxStreamPtr;
+ uint16_t *streamPtrCarry;
+ uint16_t negcarry;
+ uint32_t cdfLo;
+ uint32_t cdfHi;
+ int k;
+
+ /* point to beginning of stream buffer
+ * and set maximum streamPtr value */
+ streamPtr = streamData->stream + streamData->stream_index;
+ maxStreamPtr = streamData->stream + STREAM_MAXW16_60MS - 1;
+ W_upper = streamData->W_upper;
+
+ for (k = 0; k < lenData; k++)
+ {
+ /* compute cdf_lower and cdf_upper by evaluating the
+ * WebRtcIsacfix_Piecewise linear cdf */
+ cdfLo = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(*dataQ7 - 64, *envQ8));
+ cdfHi = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(*dataQ7 + 64, *envQ8));
+
+ /* test and clip if probability gets too small */
+ while ((cdfLo + 1) >= cdfHi) {
+ /* clip */
+ if (*dataQ7 > 0) {
+ *dataQ7 -= 128;
+ cdfHi = cdfLo;
+ cdfLo = WebRtcIsacfix_Piecewise(
+ WEBRTC_SPL_MUL_16_U16(*dataQ7 - 64, *envQ8));
+ } else {
+ *dataQ7 += 128;
+ cdfLo = cdfHi;
+ cdfHi = WebRtcIsacfix_Piecewise(
+ WEBRTC_SPL_MUL_16_U16(*dataQ7 + 64, *envQ8));
+ }
+ }
+
+ dataQ7++;
+ /* increment only once per 4 iterations */
+ envQ8 += (k & 1) & (k >> 1);
+
+
+ /* update interval */
+ W_upper_LSB = (uint16_t)W_upper;
+ W_upper_MSB = (uint16_t)WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
+ W_lower = WEBRTC_SPL_UMUL_32_16(cdfLo, W_upper_MSB);
+ W_lower += (cdfLo * W_upper_LSB) >> 16;
+ W_upper = WEBRTC_SPL_UMUL_32_16(cdfHi, W_upper_MSB);
+ W_upper += (cdfHi * W_upper_LSB) >> 16;
+
+ /* shift interval such that it begins at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamData->streamval += W_lower;
+
+ /* handle carry */
+ if (streamData->streamval < W_lower)
+ {
+ /* propagate carry */
+ streamPtrCarry = streamPtr;
+ if (streamData->full == 0) {
+ negcarry = *streamPtrCarry;
+ negcarry += 0x0100;
+ *streamPtrCarry = negcarry;
+ while (!(negcarry))
+ {
+ negcarry = *--streamPtrCarry;
+ negcarry++;
+ *streamPtrCarry = negcarry;
+ }
+ } else {
+ while (!(++(*--streamPtrCarry)));
+ }
+ }
+
+ /* renormalize interval, store most significant byte of streamval and update streamval
+ * W_upper < 2^24 */
+ while ( !(W_upper & 0xFF000000) )
+ {
+ W_upper <<= 8;
+ if (streamData->full == 0) {
+ *streamPtr++ += (uint16_t) WEBRTC_SPL_RSHIFT_U32(
+ streamData->streamval, 24);
+ streamData->full = 1;
+ } else {
+ *streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
+ streamData->full = 0;
+ }
+
+ if( streamPtr > maxStreamPtr )
+ return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
+
+ streamData->streamval <<= 8;
+ }
+ }
+
+ /* calculate new stream_index */
+ streamData->stream_index = streamPtr - streamData->stream;
+ streamData->W_upper = W_upper;
+
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecLogisticMulti2(...)
+ *
+ * Arithmetic decoding of spectrum.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - envQ8 : side info vector defining the width of the pdf
+ * in Q8
+ * - lenData : data vector length
+ *
+ * Input/Output:
+ * - dataQ7 : input: dither vector, output: data vector
+ *
+ * Return value : number of bytes in the stream so far
+ * -1 if error detected
+ */
+int WebRtcIsacfix_DecLogisticMulti2(int16_t *dataQ7,
+ Bitstr_dec *streamData,
+ const int32_t *envQ8,
+ const int16_t lenData)
+{
+ uint32_t W_lower;
+ uint32_t W_upper;
+ uint32_t W_tmp;
+ uint16_t W_upper_LSB;
+ uint16_t W_upper_MSB;
+ uint32_t streamVal;
+ uint16_t cdfTmp;
+ int32_t res;
+ int32_t inSqrt;
+ int32_t newRes;
+ const uint16_t *streamPtr;
+ int16_t candQ7;
+ int16_t envCount;
+ uint16_t tmpARSpecQ8 = 0;
+ int k, i;
+ int offset = 0;
+
+ /* point to beginning of stream buffer */
+ streamPtr = streamData->stream + streamData->stream_index;
+ W_upper = streamData->W_upper;
+
+ /* Check if it is first time decoder is called for this stream */
+ if (streamData->stream_index == 0)
+ {
+ /* read first word from bytestream */
+ streamVal = (uint32_t)(*streamPtr++) << 16;
+ streamVal |= *streamPtr++;
+
+ } else {
+ streamVal = streamData->streamval;
+ }
+
+
+ res = 1 << (WebRtcSpl_GetSizeInBits(envQ8[0]) >> 1);
+ envCount = 0;
+
+ /* code assumes lenData%4 == 0 */
+ for (k = 0; k < lenData; k += 4)
+ {
+ int k4;
+
+ /* convert to magnitude spectrum, by doing square-roots (modified from SPLIB) */
+ inSqrt = envQ8[envCount];
+ i = 10;
+
+ /* For safty reasons */
+ if (inSqrt < 0)
+ inSqrt=-inSqrt;
+
+ newRes = (inSqrt / res + res) >> 1;
+ do
+ {
+ res = newRes;
+ newRes = (inSqrt / res + res) >> 1;
+ } while (newRes != res && i-- > 0);
+
+ tmpARSpecQ8 = (uint16_t)newRes;
+
+ for(k4 = 0; k4 < 4; k4++)
+ {
+ /* find the integer *data for which streamVal lies in [W_lower+1, W_upper] */
+ W_upper_LSB = (uint16_t) (W_upper & 0x0000FFFF);
+ W_upper_MSB = (uint16_t) WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
+
+ /* find first candidate by inverting the logistic cdf
+ * Input dither value collected from io-stream */
+ candQ7 = - *dataQ7 + 64;
+ cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+ W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+ W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+ if (streamVal > W_tmp)
+ {
+ W_lower = W_tmp;
+ candQ7 += 128;
+ cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+ W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+ W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+ while (streamVal > W_tmp)
+ {
+ W_lower = W_tmp;
+ candQ7 += 128;
+ cdfTmp = WebRtcIsacfix_Piecewise(
+ WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+ W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+ W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+ /* error check */
+ if (W_lower == W_tmp) {
+ return -1;
+ }
+ }
+ W_upper = W_tmp;
+
+ /* Output value put in dataQ7: another sample decoded */
+ *dataQ7 = candQ7 - 64;
+ }
+ else
+ {
+ W_upper = W_tmp;
+ candQ7 -= 128;
+ cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+ W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+ W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+ while ( !(streamVal > W_tmp) )
+ {
+ W_upper = W_tmp;
+ candQ7 -= 128;
+ cdfTmp = WebRtcIsacfix_Piecewise(
+ WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+ W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+ W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+ /* error check */
+ if (W_upper == W_tmp){
+ return -1;
+ }
+ }
+ W_lower = W_tmp;
+
+ /* Output value put in dataQ7: another sample decoded */
+ *dataQ7 = candQ7 + 64;
+ }
+
+ dataQ7++;
+
+ /* shift interval to start at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamVal -= W_lower;
+
+ /* renormalize interval and update streamVal
+ * W_upper < 2^24 */
+ while ( !(W_upper & 0xFF000000) )
+ {
+ if (streamPtr < streamData->stream + streamData->stream_size) {
+ /* read next byte from stream */
+ if (streamData->full == 0) {
+ streamVal = (streamVal << 8) | (*streamPtr++ & 0x00FF);
+ streamData->full = 1;
+ } else {
+ streamVal = (streamVal << 8) | (*streamPtr >> 8);
+ streamData->full = 0;
+ }
+ } else {
+ /* Intending to read outside the stream. This can happen for the last
+ * two or three bytes. It is how the algorithm is implemented. Do
+ * not read from the bit stream and insert zeros instead. */
+ streamVal <<= 8;
+ if (streamData->full == 0) {
+ offset++; // We would have incremented the pointer in this case.
+ streamData->full = 1;
+ } else {
+ streamData->full = 0;
+ }
+ }
+ W_upper <<= 8;
+ }
+ }
+ envCount++;
+ }
+
+ streamData->stream_index = streamPtr + offset - streamData->stream;
+ streamData->W_upper = W_upper;
+ streamData->streamval = streamVal;
+
+ /* find number of bytes in original stream (determined by current interval width) */
+ if ( W_upper > 0x01FFFFFF )
+ return (streamData->stream_index*2 - 3 + !streamData->full);
+ else
+ return (streamData->stream_index*2 - 2 + !streamData->full);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routins.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routins.h
new file mode 100644
index 0000000000..d112bfe7f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routins.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routins.h
+ *
+ * Functions for arithmetic coding.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+/****************************************************************************
+ * WebRtcIsacfix_EncLogisticMulti2(...)
+ *
+ * Arithmetic coding of spectrum.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - dataQ7 : data vector in Q7
+ * - envQ8 : side info vector defining the width of the pdf
+ * in Q8
+ * - lenData : data vector length
+ *
+ * Return value : 0 if ok,
+ * <0 otherwise.
+ */
+int WebRtcIsacfix_EncLogisticMulti2(Bitstr_enc* streamData,
+ int16_t* dataQ7,
+ const uint16_t* env,
+ int16_t lenData);
+
+/****************************************************************************
+ * WebRtcIsacfix_EncTerminate(...)
+ *
+ * Final call to the arithmetic coder for an encoder call. This function
+ * terminates and return byte stream.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ *
+ * Return value : number of bytes in the stream
+ */
+int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc* streamData);
+
+/****************************************************************************
+ * WebRtcIsacfix_DecLogisticMulti2(...)
+ *
+ * Arithmetic decoding of spectrum.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - envQ8 : side info vector defining the width of the pdf
+ * in Q8
+ * - lenData : data vector length
+ *
+ * Input/Output:
+ * - dataQ7 : input: dither vector, output: data vector, in Q7
+ *
+ * Return value : number of bytes in the stream so far
+ * <0 if error detected
+ */
+int WebRtcIsacfix_DecLogisticMulti2(int16_t* data,
+ Bitstr_dec* streamData,
+ const int32_t* env,
+ int16_t lenData);
+
+/****************************************************************************
+ * WebRtcIsacfix_EncHistMulti(...)
+ *
+ * Encode the histogram interval
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - data : data vector
+ * - cdf : array of cdf arrays
+ * - lenData : data vector length
+ *
+ * Return value : 0 if ok
+ * <0 if error detected
+ */
+int WebRtcIsacfix_EncHistMulti(Bitstr_enc* streamData,
+ const int16_t* data,
+ const uint16_t* const* cdf,
+ int16_t lenData);
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistBisectMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, using
+ * method of bisection.
+ * C df tables should be of size 2^k-1 (which corresponds to an
+ * alphabet size of 2^k-2)
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - cdf : array of cdf arrays
+ * - cdfSize : array of cdf table sizes+1 (power of two: 2^k)
+ * - lenData : data vector length
+ *
+ * Output:
+ * - data : data vector
+ *
+ * Return value : number of bytes in the stream
+ * <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistBisectMulti(int16_t* data,
+ Bitstr_dec* streamData,
+ const uint16_t* const* cdf,
+ const uint16_t* cdfSize,
+ int16_t lenData);
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistOneStepMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, taking
+ * single step up or down at a time.
+ * cdf tables can be of arbitrary size, but large tables may take a lot of
+ * iterations.
+ *
+ * Input:
+ * - streamData : in-/output struct containing bitstream
+ * - cdf : array of cdf arrays
+ * - initIndex : vector of initial cdf table search entries
+ * - lenData : data vector length
+ *
+ * Output:
+ * - data : data vector
+ *
+ * Return value : number of bytes in original stream
+ * <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistOneStepMulti(int16_t* data,
+ Bitstr_dec* streamData,
+ const uint16_t* const* cdf,
+ const uint16_t* initIndex,
+ int16_t lenData);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc
new file mode 100644
index 0000000000..21259ee2e2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioDecoderIsacT<IsacFix>;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
new file mode 100644
index 0000000000..0190ab91b6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioEncoderIsacT<IsacFix>;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
new file mode 100644
index 0000000000..8845357d59
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * bandwidth_estimator.c
+ *
+ * This file contains the code for the Bandwidth Estimator designed
+ * for iSAC.
+ *
+ * NOTE! Castings needed for C55, do not remove!
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/checks.h"
+
+/* array of quantization levels for bottle neck info; Matlab code: */
+/* sprintf('%4.1ff, ', logspace(log10(5000), log10(40000), 12)) */
+static const int16_t kQRateTable[12] = {
+ 10000, 11115, 12355, 13733, 15265, 16967,
+ 18860, 20963, 23301, 25900, 28789, 32000
+};
+
+/* 0.1 times the values in the table kQRateTable */
+/* values are in Q16 */
+static const int32_t KQRate01[12] = {
+ 65536000, 72843264, 80969728, 90000589, 100040704, 111194931,
+ 123600896, 137383117, 152705434, 169738240, 188671590, 209715200
+};
+
+/* Bits per Bytes Seconds
+ * 8 bits/byte * 1000 msec/sec * 1/framelength (in msec)->bits/byte*sec
+ * frame length will either be 30 or 60 msec. 8738 is 1/60 in Q19 and 1/30 in Q18
+ * The following number is either in Q15 or Q14 depending on the current frame length */
+static const int32_t kBitsByteSec = 4369000;
+
+/* Received header rate. First value is for 30 ms packets and second for 60 ms */
+static const int16_t kRecHeaderRate[2] = {
+ 9333, 4666
+};
+
+/* Inverted minimum and maximum bandwidth in Q30.
+ minBwInv 30 ms, maxBwInv 30 ms,
+ minBwInv 60 ms, maxBwInv 69 ms
+*/
+static const int32_t kInvBandwidth[4] = {
+ 55539, 25978,
+ 73213, 29284
+};
+
+/* Number of samples in 25 msec */
+static const int32_t kSamplesIn25msec = 400;
+
+
+/****************************************************************************
+ * WebRtcIsacfix_InitBandwidthEstimator(...)
+ *
+ * This function initializes the struct for the bandwidth estimator
+ *
+ * Input/Output:
+ * - bweStr : Struct containing bandwidth information.
+ *
+ * Return value : 0
+ */
+int32_t WebRtcIsacfix_InitBandwidthEstimator(BwEstimatorstr *bweStr)
+{
+ bweStr->prevFrameSizeMs = INIT_FRAME_LEN;
+ bweStr->prevRtpNumber = 0;
+ bweStr->prevSendTime = 0;
+ bweStr->prevArrivalTime = 0;
+ bweStr->prevRtpRate = 1;
+ bweStr->lastUpdate = 0;
+ bweStr->lastReduction = 0;
+ bweStr->countUpdates = -9;
+
+ /* INIT_BN_EST = 20000
+ * INIT_BN_EST_Q7 = 2560000
+ * INIT_HDR_RATE = 4666
+ * INIT_REC_BN_EST_Q5 = 789312
+ *
+ * recBwInv = 1/(INIT_BN_EST + INIT_HDR_RATE) in Q30
+ * recBwAvg = INIT_BN_EST + INIT_HDR_RATE in Q5
+ */
+ bweStr->recBwInv = 43531;
+ bweStr->recBw = INIT_BN_EST;
+ bweStr->recBwAvgQ = INIT_BN_EST_Q7;
+ bweStr->recBwAvg = INIT_REC_BN_EST_Q5;
+ bweStr->recJitter = (int32_t) 327680; /* 10 in Q15 */
+ bweStr->recJitterShortTerm = 0;
+ bweStr->recJitterShortTermAbs = (int32_t) 40960; /* 5 in Q13 */
+ bweStr->recMaxDelay = (int32_t) 10;
+ bweStr->recMaxDelayAvgQ = (int32_t) 5120; /* 10 in Q9 */
+ bweStr->recHeaderRate = INIT_HDR_RATE;
+ bweStr->countRecPkts = 0;
+ bweStr->sendBwAvg = INIT_BN_EST_Q7;
+ bweStr->sendMaxDelayAvg = (int32_t) 5120; /* 10 in Q9 */
+
+ bweStr->countHighSpeedRec = 0;
+ bweStr->highSpeedRec = 0;
+ bweStr->countHighSpeedSent = 0;
+ bweStr->highSpeedSend = 0;
+ bweStr->inWaitPeriod = 0;
+
+ /* Find the inverse of the max bw and min bw in Q30
+ * (1 / (MAX_ISAC_BW + INIT_HDR_RATE) in Q30
+ * (1 / (MIN_ISAC_BW + INIT_HDR_RATE) in Q30
+ */
+ bweStr->maxBwInv = kInvBandwidth[3];
+ bweStr->minBwInv = kInvBandwidth[2];
+
+ bweStr->external_bw_info.in_use = 0;
+
+ return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBwImpl(...)
+ *
+ * This function updates bottle neck rate received from other side in payload
+ * and calculates a new bottle neck to send to the other side.
+ *
+ * Input/Output:
+ * - bweStr : struct containing bandwidth information.
+ * - rtpNumber : value from RTP packet, from NetEq
+ * - frameSize : length of signal frame in ms, from iSAC decoder
+ * - sendTime : value in RTP header giving send time in samples
+ * - arrivalTime : value given by timeGetTime() time of arrival in
+ * samples of packet from NetEq
+ * - pksize : size of packet in bytes, from NetEq
+ * - Index : integer (range 0...23) indicating bottle neck &
+ * jitter as estimated by other side
+ *
+ * Return value : 0 if everything went fine,
+ * -1 otherwise
+ */
+int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bweStr,
+ const uint16_t rtpNumber,
+ const int16_t frameSize,
+ const uint32_t sendTime,
+ const uint32_t arrivalTime,
+ const size_t pksize,
+ const uint16_t Index)
+{
+ uint16_t weight = 0;
+ uint32_t currBwInv = 0;
+ uint16_t recRtpRate;
+ uint32_t arrTimeProj;
+ int32_t arrTimeDiff;
+ int32_t arrTimeNoise;
+ int32_t arrTimeNoiseAbs;
+ int32_t sendTimeDiff;
+
+ int32_t delayCorrFactor = DELAY_CORRECTION_MED;
+ int32_t lateDiff = 0;
+ int16_t immediateSet = 0;
+ int32_t frameSizeSampl;
+
+ int32_t temp;
+ int32_t msec;
+ uint32_t exponent;
+ uint32_t reductionFactor;
+ uint32_t numBytesInv;
+ int32_t sign;
+
+ uint32_t byteSecondsPerBit;
+ uint32_t tempLower;
+ uint32_t tempUpper;
+ int32_t recBwAvgInv;
+ int32_t numPktsExpected;
+
+ int16_t errCode;
+
+ RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+ /* UPDATE ESTIMATES FROM OTHER SIDE */
+
+ /* The function also checks if Index has a valid value */
+ errCode = WebRtcIsacfix_UpdateUplinkBwRec(bweStr, Index);
+ if (errCode <0) {
+ return(errCode);
+ }
+
+
+ /* UPDATE ESTIMATES ON THIS SIDE */
+
+ /* Bits per second per byte * 1/30 or 1/60 */
+ if (frameSize == 60) {
+ /* If frameSize changed since last call, from 30 to 60, recalculate some values */
+ if ( (frameSize != bweStr->prevFrameSizeMs) && (bweStr->countUpdates > 0)) {
+ bweStr->countUpdates = 10;
+ bweStr->recHeaderRate = kRecHeaderRate[1];
+
+ bweStr->maxBwInv = kInvBandwidth[3];
+ bweStr->minBwInv = kInvBandwidth[2];
+ bweStr->recBwInv = 1073741824 / (bweStr->recBw + bweStr->recHeaderRate);
+ }
+
+ /* kBitsByteSec is in Q15 */
+ recRtpRate = (int16_t)((kBitsByteSec * pksize) >> 15) +
+ bweStr->recHeaderRate;
+
+ } else {
+ /* If frameSize changed since last call, from 60 to 30, recalculate some values */
+ if ( (frameSize != bweStr->prevFrameSizeMs) && (bweStr->countUpdates > 0)) {
+ bweStr->countUpdates = 10;
+ bweStr->recHeaderRate = kRecHeaderRate[0];
+
+ bweStr->maxBwInv = kInvBandwidth[1];
+ bweStr->minBwInv = kInvBandwidth[0];
+ bweStr->recBwInv = 1073741824 / (bweStr->recBw + bweStr->recHeaderRate);
+ }
+
+ /* kBitsByteSec is in Q14 */
+ recRtpRate = (uint16_t)((kBitsByteSec * pksize) >> 14) +
+ bweStr->recHeaderRate;
+ }
+
+
+ /* Check for timer wrap-around */
+ if (arrivalTime < bweStr->prevArrivalTime) {
+ bweStr->prevArrivalTime = arrivalTime;
+ bweStr->lastUpdate = arrivalTime;
+ bweStr->lastReduction = arrivalTime + FS3;
+
+ bweStr->countRecPkts = 0;
+
+ /* store frame size */
+ bweStr->prevFrameSizeMs = frameSize;
+
+ /* store far-side transmission rate */
+ bweStr->prevRtpRate = recRtpRate;
+
+ /* store far-side RTP time stamp */
+ bweStr->prevRtpNumber = rtpNumber;
+
+ return 0;
+ }
+
+ bweStr->countRecPkts++;
+
+ /* Calculate framesize in msec */
+ frameSizeSampl = SAMPLES_PER_MSEC * frameSize;
+
+ /* Check that it's not one of the first 9 packets */
+ if ( bweStr->countUpdates > 0 ) {
+
+ /* Stay in Wait Period for 1.5 seconds (no updates in wait period) */
+ if(bweStr->inWaitPeriod) {
+ if ((arrivalTime - bweStr->startWaitPeriod)> FS_1_HALF) {
+ bweStr->inWaitPeriod = 0;
+ }
+ }
+
+ /* If not been updated for a long time, reduce the BN estimate */
+
+ /* Check send time difference between this packet and previous received */
+ sendTimeDiff = sendTime - bweStr->prevSendTime;
+ if (sendTimeDiff <= frameSizeSampl * 2) {
+
+ /* Only update if 3 seconds has past since last update */
+ if ((arrivalTime - bweStr->lastUpdate) > FS3) {
+
+ /* Calculate expected number of received packets since last update */
+ numPktsExpected = (arrivalTime - bweStr->lastUpdate) / frameSizeSampl;
+
+ /* If received number of packets is more than 90% of expected (922 = 0.9 in Q10): */
+ /* do the update, else not */
+ if ((int32_t)bweStr->countRecPkts << 10 > 922 * numPktsExpected) {
+ /* Q4 chosen to approx dividing by 16 */
+ msec = (arrivalTime - bweStr->lastReduction);
+
+ /* the number below represents 13 seconds, highly unlikely
+ but to insure no overflow when reduction factor is multiplied by recBw inverse */
+ if (msec > 208000) {
+ msec = 208000;
+ }
+
+ /* Q20 2^(negative number: - 76/1048576) = .99995
+ product is Q24 */
+ exponent = WEBRTC_SPL_UMUL(0x0000004C, msec);
+
+ /* do the approx with positive exponent so that value is actually rf^-1
+ and multiply by bw inverse */
+ reductionFactor = WEBRTC_SPL_RSHIFT_U32(0x01000000 | (exponent & 0x00FFFFFF),
+ WEBRTC_SPL_RSHIFT_U32(exponent, 24));
+
+ /* reductionFactor in Q13 */
+ reductionFactor = WEBRTC_SPL_RSHIFT_U32(reductionFactor, 11);
+
+ if ( reductionFactor != 0 ) {
+ bweStr->recBwInv = WEBRTC_SPL_MUL((int32_t)bweStr->recBwInv, (int32_t)reductionFactor);
+ bweStr->recBwInv = (int32_t)bweStr->recBwInv >> 13;
+
+ } else {
+ static const uint32_t kInitRate = INIT_BN_EST + INIT_HDR_RATE;
+ /* recBwInv = 1 / kInitRate in Q26 (Q30??)*/
+ bweStr->recBwInv = (1073741824 + kInitRate / 2) / kInitRate;
+ }
+
+ /* reset time-since-update counter */
+ bweStr->lastReduction = arrivalTime;
+ } else {
+ /* Delay last reduction with 3 seconds */
+ bweStr->lastReduction = arrivalTime + FS3;
+ bweStr->lastUpdate = arrivalTime;
+ bweStr->countRecPkts = 0;
+ }
+ }
+ } else {
+ bweStr->lastReduction = arrivalTime + FS3;
+ bweStr->lastUpdate = arrivalTime;
+ bweStr->countRecPkts = 0;
+ }
+
+
+ /* update only if previous packet was not lost */
+ if ( rtpNumber == bweStr->prevRtpNumber + 1 ) {
+ arrTimeDiff = arrivalTime - bweStr->prevArrivalTime;
+
+ if (!(bweStr->highSpeedSend && bweStr->highSpeedRec)) {
+ if (arrTimeDiff > frameSizeSampl) {
+ if (sendTimeDiff > 0) {
+ lateDiff = arrTimeDiff - sendTimeDiff - frameSizeSampl * 2;
+ } else {
+ lateDiff = arrTimeDiff - frameSizeSampl;
+ }
+
+ /* 8000 is 1/2 second (in samples at FS) */
+ if (lateDiff > 8000) {
+ delayCorrFactor = (int32_t) DELAY_CORRECTION_MAX;
+ bweStr->inWaitPeriod = 1;
+ bweStr->startWaitPeriod = arrivalTime;
+ immediateSet = 1;
+ } else if (lateDiff > 5120) {
+ delayCorrFactor = (int32_t) DELAY_CORRECTION_MED;
+ immediateSet = 1;
+ bweStr->inWaitPeriod = 1;
+ bweStr->startWaitPeriod = arrivalTime;
+ }
+ }
+ }
+
+ if ((bweStr->prevRtpRate > (int32_t)bweStr->recBwAvg >> 5) &&
+ (recRtpRate > (int32_t)bweStr->recBwAvg >> 5) &&
+ !bweStr->inWaitPeriod) {
+
+ /* test if still in initiation period and increment counter */
+ if (bweStr->countUpdates++ > 99) {
+ /* constant weight after initiation part, 0.01 in Q13 */
+ weight = (uint16_t) 82;
+ } else {
+ /* weight decreases with number of updates, 1/countUpdates in Q13 */
+ weight = (uint16_t) WebRtcSpl_DivW32W16(
+ 8192 + (bweStr->countUpdates >> 1),
+ (int16_t)bweStr->countUpdates);
+ }
+
+ /* Bottle Neck Estimation */
+
+ /* limit outliers, if more than 25 ms too much */
+ if (arrTimeDiff > frameSizeSampl + kSamplesIn25msec) {
+ arrTimeDiff = frameSizeSampl + kSamplesIn25msec;
+ }
+
+ /* don't allow it to be less than frame rate - 10 ms */
+ if (arrTimeDiff < frameSizeSampl - FRAMESAMPLES_10ms) {
+ arrTimeDiff = frameSizeSampl - FRAMESAMPLES_10ms;
+ }
+
+ /* compute inverse receiving rate for last packet, in Q19 */
+ numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
+ (int32_t)(524288 + ((pksize + HEADER_SIZE) >> 1)),
+ (int16_t)(pksize + HEADER_SIZE));
+
+ /* 8389 is ~ 1/128000 in Q30 */
+ byteSecondsPerBit = (uint32_t)(arrTimeDiff * 8389);
+
+ /* get upper N bits */
+ tempUpper = WEBRTC_SPL_RSHIFT_U32(byteSecondsPerBit, 15);
+
+ /* get lower 15 bits */
+ tempLower = byteSecondsPerBit & 0x00007FFF;
+
+ tempUpper = WEBRTC_SPL_MUL(tempUpper, numBytesInv);
+ tempLower = WEBRTC_SPL_MUL(tempLower, numBytesInv);
+ tempLower = WEBRTC_SPL_RSHIFT_U32(tempLower, 15);
+
+ currBwInv = tempUpper + tempLower;
+ currBwInv = WEBRTC_SPL_RSHIFT_U32(currBwInv, 4);
+
+ /* Limit inv rate. Note that minBwInv > maxBwInv! */
+ if(currBwInv < bweStr->maxBwInv) {
+ currBwInv = bweStr->maxBwInv;
+ } else if(currBwInv > bweStr->minBwInv) {
+ currBwInv = bweStr->minBwInv;
+ }
+
+ /* update bottle neck rate estimate */
+ bweStr->recBwInv = WEBRTC_SPL_UMUL(weight, currBwInv) +
+ WEBRTC_SPL_UMUL((uint32_t) 8192 - weight, bweStr->recBwInv);
+
+ /* Shift back to Q30 from Q40 (actual used bits shouldn't be more than 27 based on minBwInv)
+ up to 30 bits used with Q13 weight */
+ bweStr->recBwInv = WEBRTC_SPL_RSHIFT_U32(bweStr->recBwInv, 13);
+
+ /* reset time-since-update counter */
+ bweStr->lastUpdate = arrivalTime;
+ bweStr->lastReduction = arrivalTime + FS3;
+ bweStr->countRecPkts = 0;
+
+ /* to save resolution compute the inverse of recBwAvg in Q26 by left shifting numerator to 2^31
+ and NOT right shifting recBwAvg 5 bits to an integer
+ At max 13 bits are used
+ shift to Q5 */
+ recBwAvgInv = (0x80000000 + bweStr->recBwAvg / 2) / bweStr->recBwAvg;
+
+ /* Calculate Projected arrival time difference */
+
+ /* The numerator of the quotient can be 22 bits so right shift inv by 4 to avoid overflow
+ result in Q22 */
+ arrTimeProj = WEBRTC_SPL_MUL((int32_t)8000, recBwAvgInv);
+ /* shift to Q22 */
+ arrTimeProj = WEBRTC_SPL_RSHIFT_U32(arrTimeProj, 4);
+ /* complete calulation */
+ arrTimeProj = WEBRTC_SPL_MUL(((int32_t)pksize + HEADER_SIZE), arrTimeProj);
+ /* shift to Q10 */
+ arrTimeProj = WEBRTC_SPL_RSHIFT_U32(arrTimeProj, 12);
+
+ /* difference between projected and actual arrival time differences */
+ /* Q9 (only shift arrTimeDiff by 5 to simulate divide by 16 (need to revisit if change sampling rate) DH */
+ if ((arrTimeDiff << 6) > (int32_t)arrTimeProj) {
+ arrTimeNoise = (arrTimeDiff << 6) - arrTimeProj;
+ sign = 1;
+ } else {
+ arrTimeNoise = arrTimeProj - (arrTimeDiff << 6);
+ sign = -1;
+ }
+
+ /* Q9 */
+ arrTimeNoiseAbs = arrTimeNoise;
+
+ /* long term averaged absolute jitter, Q15 */
+ weight >>= 3;
+ bweStr->recJitter = weight * (arrTimeNoiseAbs << 5) +
+ (1024 - weight) * bweStr->recJitter;
+
+ /* remove the fractional portion */
+ bweStr->recJitter >>= 10;
+
+ /* Maximum jitter is 10 msec in Q15 */
+ if (bweStr->recJitter > (int32_t)327680) {
+ bweStr->recJitter = (int32_t)327680;
+ }
+
+ /* short term averaged absolute jitter */
+ /* Calculation in Q13 products in Q23 */
+ bweStr->recJitterShortTermAbs = 51 * (arrTimeNoiseAbs << 3) +
+ WEBRTC_SPL_MUL(973, bweStr->recJitterShortTermAbs);
+ bweStr->recJitterShortTermAbs >>= 10;
+
+ /* short term averaged jitter */
+ /* Calculation in Q13 products in Q23 */
+ bweStr->recJitterShortTerm = 205 * (arrTimeNoise << 3) * sign +
+ WEBRTC_SPL_MUL(3891, bweStr->recJitterShortTerm);
+
+ if (bweStr->recJitterShortTerm < 0) {
+ temp = -bweStr->recJitterShortTerm;
+ temp >>= 12;
+ bweStr->recJitterShortTerm = -temp;
+ } else {
+ bweStr->recJitterShortTerm >>= 12;
+ }
+ }
+ }
+ } else {
+ /* reset time-since-update counter when receiving the first 9 packets */
+ bweStr->lastUpdate = arrivalTime;
+ bweStr->lastReduction = arrivalTime + FS3;
+ bweStr->countRecPkts = 0;
+ bweStr->countUpdates++;
+ }
+
+ /* Limit to minimum or maximum bottle neck rate (in Q30) */
+ if (bweStr->recBwInv > bweStr->minBwInv) {
+ bweStr->recBwInv = bweStr->minBwInv;
+ } else if (bweStr->recBwInv < bweStr->maxBwInv) {
+ bweStr->recBwInv = bweStr->maxBwInv;
+ }
+
+
+ /* store frame length */
+ bweStr->prevFrameSizeMs = frameSize;
+
+ /* store far-side transmission rate */
+ bweStr->prevRtpRate = recRtpRate;
+
+ /* store far-side RTP time stamp */
+ bweStr->prevRtpNumber = rtpNumber;
+
+ /* Replace bweStr->recMaxDelay by the new value (atomic operation) */
+ if (bweStr->prevArrivalTime != 0xffffffff) {
+ bweStr->recMaxDelay = WEBRTC_SPL_MUL(3, bweStr->recJitter);
+ }
+
+ /* store arrival time stamp */
+ bweStr->prevArrivalTime = arrivalTime;
+ bweStr->prevSendTime = sendTime;
+
+ /* Replace bweStr->recBw by the new value */
+ bweStr->recBw = 1073741824 / bweStr->recBwInv - bweStr->recHeaderRate;
+
+ if (immediateSet) {
+ /* delay correction factor is in Q10 */
+ bweStr->recBw = WEBRTC_SPL_UMUL(delayCorrFactor, bweStr->recBw);
+ bweStr->recBw = WEBRTC_SPL_RSHIFT_U32(bweStr->recBw, 10);
+
+ if (bweStr->recBw < (int32_t) MIN_ISAC_BW) {
+ bweStr->recBw = (int32_t) MIN_ISAC_BW;
+ }
+
+ bweStr->recBwAvg = (bweStr->recBw + bweStr->recHeaderRate) << 5;
+
+ bweStr->recBwAvgQ = bweStr->recBw << 7;
+
+ bweStr->recJitterShortTerm = 0;
+
+ bweStr->recBwInv = 1073741824 / (bweStr->recBw + bweStr->recHeaderRate);
+
+ immediateSet = 0;
+ }
+
+
+ return 0;
+}
+
+/* This function updates the send bottle neck rate */
+/* Index - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise */
+int16_t WebRtcIsacfix_UpdateUplinkBwRec(BwEstimatorstr *bweStr,
+ const int16_t Index)
+{
+ uint16_t RateInd;
+
+ RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+ if ( (Index < 0) || (Index > 23) ) {
+ return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+ }
+
+ /* UPDATE ESTIMATES FROM OTHER SIDE */
+
+ if ( Index > 11 ) {
+ RateInd = Index - 12;
+ /* compute the jitter estimate as decoded on the other side in Q9 */
+ /* sendMaxDelayAvg = 0.9 * sendMaxDelayAvg + 0.1 * MAX_ISAC_MD */
+ bweStr->sendMaxDelayAvg = WEBRTC_SPL_MUL(461, bweStr->sendMaxDelayAvg) +
+ 51 * (MAX_ISAC_MD << 9);
+ bweStr->sendMaxDelayAvg >>= 9;
+
+ } else {
+ RateInd = Index;
+ /* compute the jitter estimate as decoded on the other side in Q9 */
+ /* sendMaxDelayAvg = 0.9 * sendMaxDelayAvg + 0.1 * MIN_ISAC_MD */
+ bweStr->sendMaxDelayAvg = WEBRTC_SPL_MUL(461, bweStr->sendMaxDelayAvg) +
+ 51 * (MIN_ISAC_MD << 9);
+ bweStr->sendMaxDelayAvg >>= 9;
+
+ }
+
+
+ /* compute the BN estimate as decoded on the other side */
+ /* sendBwAvg = 0.9 * sendBwAvg + 0.1 * kQRateTable[RateInd]; */
+ bweStr->sendBwAvg = 461 * bweStr->sendBwAvg +
+ 51 * ((uint32_t)kQRateTable[RateInd] << 7);
+ bweStr->sendBwAvg = WEBRTC_SPL_RSHIFT_U32(bweStr->sendBwAvg, 9);
+
+
+ if (WEBRTC_SPL_RSHIFT_U32(bweStr->sendBwAvg, 7) > 28000 && !bweStr->highSpeedSend) {
+ bweStr->countHighSpeedSent++;
+
+ /* approx 2 seconds with 30ms frames */
+ if (bweStr->countHighSpeedSent >= 66) {
+ bweStr->highSpeedSend = 1;
+ }
+ } else if (!bweStr->highSpeedSend) {
+ bweStr->countHighSpeedSent = 0;
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownlinkBwIndexImpl(...)
+ *
+ * This function calculates and returns the bandwidth/jitter estimation code
+ * (integer 0...23) to put in the sending iSAC payload.
+ *
+ * Input:
+ * - bweStr : BWE struct
+ *
+ * Return:
+ * bandwith and jitter index (0..23)
+ */
+uint16_t WebRtcIsacfix_GetDownlinkBwIndexImpl(BwEstimatorstr *bweStr)
+{
+ int32_t rate;
+ int32_t maxDelay;
+ uint16_t rateInd;
+ uint16_t maxDelayBit;
+ int32_t tempTerm1;
+ int32_t tempTerm2;
+ int32_t tempTermX;
+ int32_t tempTermY;
+ int32_t tempMin;
+ int32_t tempMax;
+
+ if (bweStr->external_bw_info.in_use)
+ return bweStr->external_bw_info.bottleneck_idx;
+
+ /* Get Rate Index */
+
+ /* Get unquantized rate. Always returns 10000 <= rate <= 32000 */
+ rate = WebRtcIsacfix_GetDownlinkBandwidth(bweStr);
+
+ /* Compute the averaged BN estimate on this side */
+
+ /* recBwAvg = 0.9 * recBwAvg + 0.1 * (rate + bweStr->recHeaderRate), 0.9 and 0.1 in Q9 */
+ bweStr->recBwAvg = 922 * bweStr->recBwAvg +
+ 102 * (((uint32_t)rate + bweStr->recHeaderRate) << 5);
+ bweStr->recBwAvg = WEBRTC_SPL_RSHIFT_U32(bweStr->recBwAvg, 10);
+
+ /* Find quantization index that gives the closest rate after averaging.
+ * Note that we don't need to check the last value, rate <= kQRateTable[11],
+ * because we will use rateInd = 11 even if rate > kQRateTable[11]. */
+ for (rateInd = 1; rateInd < 11; rateInd++) {
+ if (rate <= kQRateTable[rateInd]){
+ break;
+ }
+ }
+
+ /* find closest quantization index, and update quantized average by taking: */
+ /* 0.9*recBwAvgQ + 0.1*kQRateTable[rateInd] */
+
+ /* 0.9 times recBwAvgQ in Q16 */
+ /* 461/512 - 25/65536 =0.900009 */
+ tempTerm1 = WEBRTC_SPL_MUL(bweStr->recBwAvgQ, 25);
+ tempTerm1 >>= 7;
+ tempTermX = WEBRTC_SPL_UMUL(461, bweStr->recBwAvgQ) - tempTerm1;
+
+ /* rate in Q16 */
+ tempTermY = rate << 16;
+
+ /* 0.1 * kQRateTable[rateInd] = KQRate01[rateInd] */
+ tempTerm1 = tempTermX + KQRate01[rateInd] - tempTermY;
+ tempTerm2 = tempTermY - tempTermX - KQRate01[rateInd-1];
+
+ /* Compare (0.9 * recBwAvgQ + 0.1 * kQRateTable[rateInd] - rate) >
+ (rate - 0.9 * recBwAvgQ - 0.1 * kQRateTable[rateInd-1]) */
+ if (tempTerm1 > tempTerm2) {
+ rateInd--;
+ }
+
+ /* Update quantized average by taking: */
+ /* 0.9*recBwAvgQ + 0.1*kQRateTable[rateInd] */
+
+ /* Add 0.1 times kQRateTable[rateInd], in Q16 */
+ tempTermX += KQRate01[rateInd];
+
+ /* Shift back to Q7 */
+ bweStr->recBwAvgQ = tempTermX >> 9;
+
+ /* Count consecutive received bandwidth above 28000 kbps (28000 in Q7 = 3584000) */
+ /* If 66 high estimates in a row, set highSpeedRec to one */
+ /* 66 corresponds to ~2 seconds in 30 msec mode */
+ if ((bweStr->recBwAvgQ > 3584000) && !bweStr->highSpeedRec) {
+ bweStr->countHighSpeedRec++;
+ if (bweStr->countHighSpeedRec >= 66) {
+ bweStr->highSpeedRec = 1;
+ }
+ } else if (!bweStr->highSpeedRec) {
+ bweStr->countHighSpeedRec = 0;
+ }
+
+ /* Get Max Delay Bit */
+
+ /* get unquantized max delay */
+ maxDelay = WebRtcIsacfix_GetDownlinkMaxDelay(bweStr);
+
+ /* Update quantized max delay average */
+ tempMax = 652800; /* MAX_ISAC_MD * 0.1 in Q18 */
+ tempMin = 130560; /* MIN_ISAC_MD * 0.1 in Q18 */
+ tempTermX = WEBRTC_SPL_MUL((int32_t)bweStr->recMaxDelayAvgQ, (int32_t)461);
+ tempTermY = maxDelay << 18;
+
+ tempTerm1 = tempTermX + tempMax - tempTermY;
+ tempTerm2 = tempTermY - tempTermX - tempMin;
+
+ if ( tempTerm1 > tempTerm2) {
+ maxDelayBit = 0;
+ tempTerm1 = tempTermX + tempMin;
+
+ /* update quantized average, shift back to Q9 */
+ bweStr->recMaxDelayAvgQ = tempTerm1 >> 9;
+ } else {
+ maxDelayBit = 12;
+ tempTerm1 = tempTermX + tempMax;
+
+ /* update quantized average, shift back to Q9 */
+ bweStr->recMaxDelayAvgQ = tempTerm1 >> 9;
+ }
+
+ /* Return bandwitdh and jitter index (0..23) */
+ return (uint16_t)(rateInd + maxDelayBit);
+}
+
+/* get the bottle neck rate from far side to here, as estimated on this side */
+uint16_t WebRtcIsacfix_GetDownlinkBandwidth(const BwEstimatorstr *bweStr)
+{
+ uint32_t recBw;
+ int32_t jitter_sign; /* Q8 */
+ int32_t bw_adjust; /* Q16 */
+ int32_t rec_jitter_short_term_abs_inv; /* Q18 */
+ int32_t temp;
+
+ RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+ /* Q18 rec jitter short term abs is in Q13, multiply it by 2^13 to save precision
+ 2^18 then needs to be shifted 13 bits to 2^31 */
+ rec_jitter_short_term_abs_inv = 0x80000000u / bweStr->recJitterShortTermAbs;
+
+ /* Q27 = 9 + 18 */
+ jitter_sign = (bweStr->recJitterShortTerm >> 4) *
+ rec_jitter_short_term_abs_inv;
+
+ if (jitter_sign < 0) {
+ temp = -jitter_sign;
+ temp >>= 19;
+ jitter_sign = -temp;
+ } else {
+ jitter_sign >>= 19;
+ }
+
+ /* adjust bw proportionally to negative average jitter sign */
+ //bw_adjust = 1.0f - jitter_sign * (0.15f + 0.15f * jitter_sign * jitter_sign);
+ //Q8 -> Q16 .15 +.15 * jitter^2 first term is .15 in Q16 latter term is Q8*Q8*Q8
+ //38 in Q8 ~.15 9830 in Q16 ~.15
+ temp = 9830 + ((38 * jitter_sign * jitter_sign) >> 8);
+
+ if (jitter_sign < 0) {
+ temp = WEBRTC_SPL_MUL(jitter_sign, temp);
+ temp = -temp;
+ temp >>= 8;
+ bw_adjust = (uint32_t)65536 + temp; /* (1 << 16) + temp; */
+ } else {
+ /* (1 << 16) - ((jitter_sign * temp) >> 8); */
+ bw_adjust = 65536 - ((jitter_sign * temp) >> 8);
+ }
+
+ //make sure following multiplication won't overflow
+ //bw adjust now Q14
+ bw_adjust >>= 2; // See if good resolution is maintained.
+
+ /* adjust Rate if jitter sign is mostly constant */
+ recBw = WEBRTC_SPL_UMUL(bweStr->recBw, bw_adjust);
+
+ recBw >>= 14;
+
+ /* limit range of bottle neck rate */
+ if (recBw < MIN_ISAC_BW) {
+ recBw = MIN_ISAC_BW;
+ } else if (recBw > MAX_ISAC_BW) {
+ recBw = MAX_ISAC_BW;
+ }
+
+ return (uint16_t) recBw;
+}
+
+/* Returns the mmax delay (in ms) */
+int16_t WebRtcIsacfix_GetDownlinkMaxDelay(const BwEstimatorstr *bweStr)
+{
+ int16_t recMaxDelay = (int16_t)(bweStr->recMaxDelay >> 15);
+
+ RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+ /* limit range of jitter estimate */
+ if (recMaxDelay < MIN_ISAC_MD) {
+ recMaxDelay = MIN_ISAC_MD;
+ } else if (recMaxDelay > MAX_ISAC_MD) {
+ recMaxDelay = MAX_ISAC_MD;
+ }
+
+ return recMaxDelay;
+}
+
+/* Clamp val to the closed interval [min,max]. */
+static int16_t clamp(int16_t val, int16_t min, int16_t max) {
+ RTC_DCHECK_LE(min, max);
+ return val < min ? min : (val > max ? max : val);
+}
+
+int16_t WebRtcIsacfix_GetUplinkBandwidth(const BwEstimatorstr* bweStr) {
+ return bweStr->external_bw_info.in_use
+ ? bweStr->external_bw_info.send_bw_avg
+ : clamp(bweStr->sendBwAvg >> 7, MIN_ISAC_BW, MAX_ISAC_BW);
+}
+
+int16_t WebRtcIsacfix_GetUplinkMaxDelay(const BwEstimatorstr* bweStr) {
+ return bweStr->external_bw_info.in_use
+ ? bweStr->external_bw_info.send_max_delay_avg
+ : clamp(bweStr->sendMaxDelayAvg >> 9, MIN_ISAC_MD, MAX_ISAC_MD);
+}
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ * returns minimum payload size (bytes)
+ */
+uint16_t WebRtcIsacfix_GetMinBytes(RateModel *State,
+ int16_t StreamSize, /* bytes in bitstream */
+ const int16_t FrameSamples, /* samples per frame */
+ const int16_t BottleNeck, /* bottle neck rate; excl headers (bps) */
+ const int16_t DelayBuildUp) /* max delay from bottle neck buffering (ms) */
+{
+ int32_t MinRate = 0;
+ uint16_t MinBytes;
+ int16_t TransmissionTime;
+ int32_t inv_Q12;
+ int32_t den;
+
+
+ /* first 10 packets @ low rate, then INIT_BURST_LEN packets @ fixed rate of INIT_RATE bps */
+ if (State->InitCounter > 0) {
+ if (State->InitCounter-- <= INIT_BURST_LEN) {
+ MinRate = INIT_RATE;
+ } else {
+ MinRate = 0;
+ }
+ } else {
+ /* handle burst */
+ if (State->BurstCounter) {
+ if (State->StillBuffered <
+ (((512 - 512 / BURST_LEN) * DelayBuildUp) >> 9)) {
+ /* max bps derived from BottleNeck and DelayBuildUp values */
+ inv_Q12 = 4096 / (BURST_LEN * FrameSamples);
+ MinRate = (512 + SAMPLES_PER_MSEC * ((DelayBuildUp * inv_Q12) >> 3)) *
+ BottleNeck;
+ } else {
+ /* max bps derived from StillBuffered and DelayBuildUp values */
+ inv_Q12 = 4096 / FrameSamples;
+ if (DelayBuildUp > State->StillBuffered) {
+ MinRate = (512 + SAMPLES_PER_MSEC * (((DelayBuildUp -
+ State->StillBuffered) * inv_Q12) >> 3)) * BottleNeck;
+ } else if ((den = WEBRTC_SPL_MUL(SAMPLES_PER_MSEC, (State->StillBuffered - DelayBuildUp))) >= FrameSamples) {
+ /* MinRate will be negative here */
+ MinRate = 0;
+ } else {
+ MinRate = (512 - ((den * inv_Q12) >> 3)) * BottleNeck;
+ }
+ //if (MinRate < 1.04 * BottleNeck)
+ // MinRate = 1.04 * BottleNeck;
+ //Q9
+ if (MinRate < WEBRTC_SPL_MUL(532, BottleNeck)) {
+ MinRate += WEBRTC_SPL_MUL(22, BottleNeck);
+ }
+ }
+
+ State->BurstCounter--;
+ }
+ }
+
+
+ /* convert rate from bits/second to bytes/packet */
+ //round and shift before conversion
+ MinRate += 256;
+ MinRate >>= 9;
+ MinBytes = MinRate * FrameSamples / FS8;
+
+ /* StreamSize will be adjusted if less than MinBytes */
+ if (StreamSize < MinBytes) {
+ StreamSize = MinBytes;
+ }
+
+ /* keep track of when bottle neck was last exceeded by at least 1% */
+ //517/512 ~ 1.01
+ if ((StreamSize * (int32_t)FS8) / FrameSamples > (517 * BottleNeck) >> 9) {
+ if (State->PrevExceed) {
+ /* bottle_neck exceded twice in a row, decrease ExceedAgo */
+ State->ExceedAgo -= BURST_INTERVAL / (BURST_LEN - 1);
+ if (State->ExceedAgo < 0) {
+ State->ExceedAgo = 0;
+ }
+ } else {
+ State->ExceedAgo += FrameSamples / SAMPLES_PER_MSEC; /* ms */
+ State->PrevExceed = 1;
+ }
+ } else {
+ State->PrevExceed = 0;
+ State->ExceedAgo += FrameSamples / SAMPLES_PER_MSEC; /* ms */
+ }
+
+ /* set burst flag if bottle neck not exceeded for long time */
+ if ((State->ExceedAgo > BURST_INTERVAL) && (State->BurstCounter == 0)) {
+ if (State->PrevExceed) {
+ State->BurstCounter = BURST_LEN - 1;
+ } else {
+ State->BurstCounter = BURST_LEN;
+ }
+ }
+
+
+ /* Update buffer delay */
+ TransmissionTime = (StreamSize * 8000) / BottleNeck; /* ms */
+ State->StillBuffered += TransmissionTime;
+ State->StillBuffered -= FrameSamples / SAMPLES_PER_MSEC; /* ms */
+ if (State->StillBuffered < 0) {
+ State->StillBuffered = 0;
+ }
+
+ if (State->StillBuffered > 2000) {
+ State->StillBuffered = 2000;
+ }
+
+ return MinBytes;
+}
+
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsacfix_UpdateRateModel(RateModel *State,
+ int16_t StreamSize, /* bytes in bitstream */
+ const int16_t FrameSamples, /* samples per frame */
+ const int16_t BottleNeck) /* bottle neck rate; excl headers (bps) */
+{
+ const int16_t TransmissionTime = (StreamSize * 8000) / BottleNeck; /* ms */
+
+ /* avoid the initial "high-rate" burst */
+ State->InitCounter = 0;
+
+ /* Update buffer delay */
+ State->StillBuffered += TransmissionTime;
+ State->StillBuffered -= FrameSamples >> 4; /* ms */
+ if (State->StillBuffered < 0) {
+ State->StillBuffered = 0;
+ }
+
+}
+
+
+void WebRtcIsacfix_InitRateModel(RateModel *State)
+{
+ State->PrevExceed = 0; /* boolean */
+ State->ExceedAgo = 0; /* ms */
+ State->BurstCounter = 0; /* packets */
+ State->InitCounter = INIT_BURST_LEN + 10; /* packets */
+ State->StillBuffered = 1; /* ms */
+}
+
+
+
+
+
+int16_t WebRtcIsacfix_GetNewFrameLength(int16_t bottle_neck, int16_t current_framesamples)
+{
+ int16_t new_framesamples;
+
+ new_framesamples = current_framesamples;
+
+ /* find new framelength */
+ switch(current_framesamples) {
+ case 480:
+ if (bottle_neck < Thld_30_60) {
+ new_framesamples = 960;
+ }
+ break;
+ case 960:
+ if (bottle_neck >= Thld_60_30) {
+ new_framesamples = 480;
+ }
+ break;
+ default:
+ new_framesamples = -1; /* Error */
+ }
+
+ return new_framesamples;
+}
+
+int16_t WebRtcIsacfix_GetSnr(int16_t bottle_neck, int16_t framesamples)
+{
+ int16_t s2nr = 0;
+
+ /* find new SNR value */
+ //consider BottleNeck to be in Q10 ( * 1 in Q10)
+ switch(framesamples) {
+ // TODO(bjornv): The comments below confuses me. I don't know if there is a
+ // difference between frame lengths (in which case the implementation is
+ // wrong), or if it is frame length independent in which case we should
+ // correct the comment and simplify the implementation.
+ case 480:
+ /*s2nr = -1*(a_30 << 10) + ((b_30 * bottle_neck) >> 10);*/
+ s2nr = -22500 + (int16_t)(500 * bottle_neck >> 10);
+ break;
+ case 960:
+ /*s2nr = -1*(a_60 << 10) + ((b_60 * bottle_neck) >> 10);*/
+ s2nr = -22500 + (int16_t)(500 * bottle_neck >> 10);
+ break;
+ default:
+ s2nr = -1; /* Error */
+ }
+
+ return s2nr; //return in Q10
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
new file mode 100644
index 0000000000..f106746f14
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * bandwidth_estimator.h
+ *
+ * This header file contains the API for the Bandwidth Estimator
+ * designed for iSAC.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+/****************************************************************************
+ * WebRtcIsacfix_InitBandwidthEstimator(...)
+ *
+ * This function initializes the struct for the bandwidth estimator
+ *
+ * Input/Output:
+ * - bwest_str : Struct containing bandwidth information.
+ *
+ * Return value : 0
+ */
+
+int32_t WebRtcIsacfix_InitBandwidthEstimator(BwEstimatorstr* bwest_str);
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBwImpl(...)
+ *
+ * This function updates bottle neck rate received from other side in payload
+ * and calculates a new bottle neck to send to the other side.
+ *
+ * Input/Output:
+ * - bweStr : struct containing bandwidth information.
+ * - rtpNumber : value from RTP packet, from NetEq
+ * - frameSize : length of signal frame in ms, from iSAC decoder
+ * - sendTime : value in RTP header giving send time in samples
+ * - arrivalTime : value given by timeGetTime() time of arrival in
+ * samples of packet from NetEq
+ * - pksize : size of packet in bytes, from NetEq
+ * - Index : integer (range 0...23) indicating bottle neck &
+ * jitter as estimated by other side
+ *
+ * Return value : 0 if everything went fine,
+ * -1 otherwise
+ */
+
+int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr* bwest_str,
+ uint16_t rtp_number,
+ int16_t frameSize,
+ uint32_t send_ts,
+ uint32_t arr_ts,
+ size_t pksize,
+ uint16_t Index);
+
+/* Update receiving estimates. Used when we only receive BWE index, no iSAC data
+ * packet. */
+int16_t WebRtcIsacfix_UpdateUplinkBwRec(BwEstimatorstr* bwest_str,
+ int16_t Index);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownlinkBwIndexImpl(...)
+ *
+ * This function calculates and returns the bandwidth/jitter estimation code
+ * (integer 0...23) to put in the sending iSAC payload.
+ *
+ * Input:
+ * - bweStr : BWE struct
+ *
+ * Return:
+ * bandwith and jitter index (0..23)
+ */
+uint16_t WebRtcIsacfix_GetDownlinkBwIndexImpl(BwEstimatorstr* bwest_str);
+
+/* Returns the bandwidth estimation (in bps) */
+uint16_t WebRtcIsacfix_GetDownlinkBandwidth(const BwEstimatorstr* bwest_str);
+
+/* Returns the bandwidth that iSAC should send with in bps */
+int16_t WebRtcIsacfix_GetUplinkBandwidth(const BwEstimatorstr* bwest_str);
+
+/* Returns the max delay (in ms) */
+int16_t WebRtcIsacfix_GetDownlinkMaxDelay(const BwEstimatorstr* bwest_str);
+
+/* Returns the max delay value from the other side in ms */
+int16_t WebRtcIsacfix_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str);
+
+/*
+ * update amount of data in bottle neck buffer and burst handling
+ * returns minimum payload size (bytes)
+ */
+uint16_t WebRtcIsacfix_GetMinBytes(
+ RateModel* State,
+ int16_t StreamSize, /* bytes in bitstream */
+ int16_t FrameLen, /* ms per frame */
+ int16_t BottleNeck, /* bottle neck rate; excl headers (bps) */
+ int16_t DelayBuildUp); /* max delay from bottle neck buffering (ms) */
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsacfix_UpdateRateModel(
+ RateModel* State,
+ int16_t StreamSize, /* bytes in bitstream */
+ int16_t FrameSamples, /* samples per frame */
+ int16_t BottleNeck); /* bottle neck rate; excl headers (bps) */
+
+void WebRtcIsacfix_InitRateModel(RateModel* State);
+
+/* Returns the new framelength value (input argument: bottle_neck) */
+int16_t WebRtcIsacfix_GetNewFrameLength(int16_t bottle_neck,
+ int16_t current_framelength);
+
+/* Returns the new SNR value (input argument: bottle_neck) */
+// returns snr in Q10
+int16_t WebRtcIsacfix_GetSnr(int16_t bottle_neck, int16_t framesamples);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_ \
+ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/codec.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
new file mode 100644
index 0000000000..01d6fb907e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * codec.h
+ *
+ * This header file contains the calls to the internal encoder
+ * and decoder functions.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr* bwest_str,
+ Bitstr_dec* streamdata,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts);
+
+int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
+ IsacFixDecoderInstance* ISACdec_obj,
+ size_t* current_framesamples);
+
+void WebRtcIsacfix_DecodePlcImpl(int16_t* decoded,
+ IsacFixDecoderInstance* ISACdec_obj,
+ size_t* current_framesample);
+
+int WebRtcIsacfix_EncodeImpl(int16_t* in,
+ IsacFixEncoderInstance* ISACenc_obj,
+ BwEstimatorstr* bw_estimatordata,
+ int16_t CodingMode);
+
+int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance* ISACenc_obj,
+ int BWnumber,
+ float scale);
+
+/* initialization functions */
+
+void WebRtcIsacfix_InitMaskingEnc(MaskFiltstr_enc* maskdata);
+void WebRtcIsacfix_InitMaskingDec(MaskFiltstr_dec* maskdata);
+
+void WebRtcIsacfix_InitPreFilterbank(PreFiltBankstr* prefiltdata);
+
+void WebRtcIsacfix_InitPostFilterbank(PostFiltBankstr* postfiltdata);
+
+void WebRtcIsacfix_InitPitchFilter(PitchFiltstr* pitchfiltdata);
+
+void WebRtcIsacfix_InitPitchAnalysis(PitchAnalysisStruct* State);
+
+void WebRtcIsacfix_InitPlc(PLCstr* State);
+
+/* transform functions */
+
+void WebRtcIsacfix_InitTransform(void);
+
+typedef void (*Time2Spec)(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outre,
+ int16_t* outim);
+typedef void (*Spec2Time)(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16);
+
+extern Time2Spec WebRtcIsacfix_Time2Spec;
+extern Spec2Time WebRtcIsacfix_Spec2Time;
+
+void WebRtcIsacfix_Time2SpecC(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outre,
+ int16_t* outim);
+void WebRtcIsacfix_Spec2TimeC(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16);
+
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcIsacfix_Time2SpecNeon(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outre,
+ int16_t* outim);
+void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16);
+#endif
+
+#if defined(MIPS32_LE)
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outre,
+ int16_t* outim);
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16);
+#endif
+
+/* filterbank functions */
+
+void WebRtcIsacfix_SplitAndFilter1(int16_t* in,
+ int16_t* LP16,
+ int16_t* HP16,
+ PreFiltBankstr* prefiltdata);
+
+void WebRtcIsacfix_FilterAndCombine1(int16_t* tempin_ch1,
+ int16_t* tempin_ch2,
+ int16_t* out16,
+ PostFiltBankstr* postfiltdata);
+
+/* normalized lattice filters */
+
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
+ int32_t* stateGQ15,
+ int16_t* lat_inQ0,
+ int16_t* filt_coefQ15,
+ int32_t* gain_lo_hiQ17,
+ int16_t lo_hi,
+ int16_t* lat_outQ9);
+
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
+ int16_t* stateGQ0,
+ int32_t* lat_inQ25,
+ int16_t* filt_coefQ15,
+ int32_t* gain_lo_hiQ17,
+ int16_t lo_hi,
+ int16_t* lat_outQ0);
+
+/* TODO(kma): Remove the following functions into individual header files. */
+
+/* Internal functions in both C and ARM Neon versions */
+
+int WebRtcIsacfix_AutocorrC(int32_t* __restrict r,
+ const int16_t* __restrict x,
+ int16_t N,
+ int16_t order,
+ int16_t* __restrict scale);
+
+void WebRtcIsacfix_FilterMaLoopC(int16_t input0,
+ int16_t input1,
+ int32_t input2,
+ int32_t* ptr0,
+ int32_t* ptr1,
+ int32_t* ptr2);
+
+#if defined(WEBRTC_HAS_NEON)
+int WebRtcIsacfix_AutocorrNeon(int32_t* __restrict r,
+ const int16_t* __restrict x,
+ int16_t N,
+ int16_t order,
+ int16_t* __restrict scale);
+
+void WebRtcIsacfix_FilterMaLoopNeon(int16_t input0,
+ int16_t input1,
+ int32_t input2,
+ int32_t* ptr0,
+ int32_t* ptr1,
+ int32_t* ptr2);
+#endif
+
+#if defined(MIPS32_LE)
+int WebRtcIsacfix_AutocorrMIPS(int32_t* __restrict r,
+ const int16_t* __restrict x,
+ int16_t N,
+ int16_t order,
+ int16_t* __restrict scale);
+
+void WebRtcIsacfix_FilterMaLoopMIPS(int16_t input0,
+ int16_t input1,
+ int32_t input2,
+ int32_t* ptr0,
+ int32_t* ptr1,
+ int32_t* ptr2);
+#endif
+
+/* Function pointers associated with the above functions. */
+
+typedef int (*AutocorrFix)(int32_t* __restrict r,
+ const int16_t* __restrict x,
+ int16_t N,
+ int16_t order,
+ int16_t* __restrict scale);
+extern AutocorrFix WebRtcIsacfix_AutocorrFix;
+
+typedef void (*FilterMaLoopFix)(int16_t input0,
+ int16_t input1,
+ int32_t input2,
+ int32_t* ptr0,
+ int32_t* ptr1,
+ int32_t* ptr2);
+extern FilterMaLoopFix WebRtcIsacfix_FilterMaLoopFix;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
new file mode 100644
index 0000000000..144208818a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode.c
+ *
+ * This C file contains the internal decoding function.
+ *
+ */
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+
+
+
+int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
+ IsacFixDecoderInstance* ISACdec_obj,
+ size_t* current_framesamples)
+{
+ int k;
+ int err;
+ int16_t BWno;
+ int len = 0;
+
+ int16_t model;
+
+
+ int16_t Vector_Word16_1[FRAMESAMPLES/2];
+ int16_t Vector_Word16_2[FRAMESAMPLES/2];
+
+ int32_t Vector_Word32_1[FRAMESAMPLES/2];
+ int32_t Vector_Word32_2[FRAMESAMPLES/2];
+
+ int16_t lofilt_coefQ15[ORDERLO*SUBFRAMES]; //refl. coeffs
+ int16_t hifilt_coefQ15[ORDERHI*SUBFRAMES]; //refl. coeffs
+ int32_t gain_lo_hiQ17[2*SUBFRAMES];
+
+ int16_t PitchLags_Q7[PITCH_SUBFRAMES];
+ int16_t PitchGains_Q12[PITCH_SUBFRAMES];
+ int16_t AvgPitchGain_Q12;
+
+ int16_t tmp_1, tmp_2;
+ int32_t tmp32a;
+ int16_t gainQ13;
+
+
+ size_t frame_nb; /* counter */
+ size_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+ static const size_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
+
+ /* PLC */
+ int16_t overlapWin[ 240 ];
+
+ (ISACdec_obj->bitstr_obj).W_upper = 0xFFFFFFFF;
+ (ISACdec_obj->bitstr_obj).streamval = 0;
+ (ISACdec_obj->bitstr_obj).stream_index = 0;
+ (ISACdec_obj->bitstr_obj).full = 1;
+
+
+ /* decode framelength and BW estimation - not used, only for stream pointer*/
+ err = WebRtcIsacfix_DecodeFrameLen(&ISACdec_obj->bitstr_obj, current_framesamples);
+ if (err<0) // error check
+ return err;
+
+ frame_mode = *current_framesamples / MAX_FRAMESAMPLES; /* 0, or 1 */
+
+ err = WebRtcIsacfix_DecodeSendBandwidth(&ISACdec_obj->bitstr_obj, &BWno);
+ if (err<0) // error check
+ return err;
+
+ /* one loop if it's one frame (30ms), two loops if two frames bundled together
+ * (60ms) */
+ for (frame_nb = 0; frame_nb <= frame_mode; frame_nb++) {
+
+ /* decode & dequantize pitch parameters */
+ err = WebRtcIsacfix_DecodePitchGain(&(ISACdec_obj->bitstr_obj), PitchGains_Q12);
+ if (err<0) // error check
+ return err;
+
+ err = WebRtcIsacfix_DecodePitchLag(&ISACdec_obj->bitstr_obj, PitchGains_Q12, PitchLags_Q7);
+ if (err<0) // error check
+ return err;
+
+ AvgPitchGain_Q12 = (int16_t)(((int32_t)PitchGains_Q12[0] + PitchGains_Q12[1] + PitchGains_Q12[2] + PitchGains_Q12[3])>>2);
+
+ /* decode & dequantize FiltCoef */
+ err = WebRtcIsacfix_DecodeLpc(gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15,
+ &ISACdec_obj->bitstr_obj, &model);
+
+ if (err<0) // error check
+ return err;
+
+ /* decode & dequantize spectrum */
+ len = WebRtcIsacfix_DecodeSpec(&ISACdec_obj->bitstr_obj, Vector_Word16_1, Vector_Word16_2, AvgPitchGain_Q12);
+ if (len < 0) // error check
+ return len;
+
+ // Why does this need Q16 in and out? /JS
+ WebRtcIsacfix_Spec2Time(Vector_Word16_1, Vector_Word16_2, Vector_Word32_1, Vector_Word32_2);
+
+ for (k=0; k<FRAMESAMPLES/2; k++) {
+ // Q16 -> Q9.
+ Vector_Word16_1[k] = (int16_t)((Vector_Word32_1[k] + 64) >> 7);
+ }
+
+ /* ---- If this is recovery frame ---- */
+ if( (ISACdec_obj->plcstr_obj).used == PLC_WAS_USED )
+ {
+ (ISACdec_obj->plcstr_obj).used = PLC_NOT_USED;
+ if( (ISACdec_obj->plcstr_obj).B < 1000 )
+ {
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic = 4000;
+ }
+
+ ISACdec_obj->plcstr_obj.decayCoeffPriodic = WEBRTC_SPL_WORD16_MAX; /* DECAY_RATE is in Q15 */
+ ISACdec_obj->plcstr_obj.decayCoeffNoise = WEBRTC_SPL_WORD16_MAX; /* DECAY_RATE is in Q15 */
+ ISACdec_obj->plcstr_obj.pitchCycles = 0;
+
+ PitchGains_Q12[0] = (int16_t)(PitchGains_Q12[0] * 700 >> 10);
+
+ /* ---- Add-overlap ---- */
+ WebRtcSpl_GetHanningWindow( overlapWin, RECOVERY_OVERLAP );
+ for( k = 0; k < RECOVERY_OVERLAP; k++ )
+ Vector_Word16_1[k] = WebRtcSpl_AddSatW16(
+ (int16_t)(ISACdec_obj->plcstr_obj.overlapLP[k] *
+ overlapWin[RECOVERY_OVERLAP - k - 1] >> 14),
+ (int16_t)(Vector_Word16_1[k] * overlapWin[k] >> 14));
+
+
+
+ }
+
+ /* --- Store side info --- */
+ if( frame_nb == frame_mode )
+ {
+ /* --- LPC info */
+ WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).lofilt_coefQ15, &lofilt_coefQ15[(SUBFRAMES-1)*ORDERLO], ORDERLO );
+ WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).hifilt_coefQ15, &hifilt_coefQ15[(SUBFRAMES-1)*ORDERHI], ORDERHI );
+ (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[0] = gain_lo_hiQ17[(SUBFRAMES-1) * 2];
+ (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[1] = gain_lo_hiQ17[(SUBFRAMES-1) * 2 + 1];
+
+ /* --- LTP info */
+ (ISACdec_obj->plcstr_obj).AvgPitchGain_Q12 = PitchGains_Q12[3];
+ (ISACdec_obj->plcstr_obj).lastPitchGain_Q12 = PitchGains_Q12[3];
+ (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 = PitchLags_Q7[3];
+
+ if( PitchLags_Q7[3] < 3000 )
+ (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 += PitchLags_Q7[3];
+
+ WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).prevPitchInvIn, Vector_Word16_1, FRAMESAMPLES/2 );
+
+ }
+ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+
+ /* inverse pitch filter */
+ WebRtcIsacfix_PitchFilter(Vector_Word16_1, Vector_Word16_2, &ISACdec_obj->pitchfiltstr_obj, PitchLags_Q7, PitchGains_Q12, 4);
+
+ if( frame_nb == frame_mode )
+ {
+ WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).prevPitchInvOut, &(Vector_Word16_2[FRAMESAMPLES/2 - (PITCH_MAX_LAG + 10)]), PITCH_MAX_LAG );
+ }
+
+
+ /* reduce gain to compensate for pitch enhancer */
+ /* gain = 1.0f - 0.45f * AvgPitchGain; */
+ tmp32a = AvgPitchGain_Q12 * 29; // Q18
+ gainQ13 = (int16_t)((262144 - tmp32a) >> 5); // Q18 -> Q13.
+
+ for (k = 0; k < FRAMESAMPLES/2; k++)
+ {
+ Vector_Word32_1[k] = (Vector_Word16_2[k] * gainQ13) * (1 << 3); // Q25
+ }
+
+
+ /* perceptual post-filtering (using normalized lattice filter) */
+ WebRtcIsacfix_NormLatticeFilterAr(ORDERLO, (ISACdec_obj->maskfiltstr_obj).PostStateLoGQ0,
+ Vector_Word32_1, lofilt_coefQ15, gain_lo_hiQ17, 0, Vector_Word16_1);
+
+ /* --- Store Highpass Residual --- */
+ for (k = 0; k < FRAMESAMPLES/2; k++)
+ Vector_Word32_1[k] = Vector_Word32_2[k] * (1 << 9); // Q16 -> Q25
+
+ for( k = 0; k < PITCH_MAX_LAG + 10; k++ )
+ (ISACdec_obj->plcstr_obj).prevHP[k] = Vector_Word32_1[FRAMESAMPLES/2 - (PITCH_MAX_LAG + 10) + k];
+
+
+ WebRtcIsacfix_NormLatticeFilterAr(ORDERHI, (ISACdec_obj->maskfiltstr_obj).PostStateHiGQ0,
+ Vector_Word32_1, hifilt_coefQ15, gain_lo_hiQ17, 1, Vector_Word16_2);
+
+ /* recombine the 2 bands */
+
+ /* Form the polyphase signals, and compensate for DC offset */
+ for (k=0;k<FRAMESAMPLES/2;k++) {
+ tmp_1 = (int16_t)WebRtcSpl_SatW32ToW16(((int32_t)Vector_Word16_1[k]+Vector_Word16_2[k] + 1)); /* Construct a new upper channel signal*/
+ tmp_2 = (int16_t)WebRtcSpl_SatW32ToW16(((int32_t)Vector_Word16_1[k]-Vector_Word16_2[k])); /* Construct a new lower channel signal*/
+ Vector_Word16_1[k] = tmp_1;
+ Vector_Word16_2[k] = tmp_2;
+ }
+
+ WebRtcIsacfix_FilterAndCombine1(Vector_Word16_1,
+ Vector_Word16_2,
+ signal_out16 + frame_nb * kProcessedSamples,
+ &ISACdec_obj->postfiltbankstr_obj);
+
+ }
+ return len;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
new file mode 100644
index 0000000000..99676504cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode_bwe.c
+ *
+ * This C file contains the internal decode bandwidth estimate function.
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+
+
+
+int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
+ Bitstr_dec *streamdata,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts)
+{
+ int16_t index;
+ size_t frame_samples;
+ int err;
+
+ /* decode framelength */
+ err = WebRtcIsacfix_DecodeFrameLen(streamdata, &frame_samples);
+ /* error check */
+ if (err<0) {
+ return err;
+ }
+
+ /* decode BW estimation */
+ err = WebRtcIsacfix_DecodeSendBandwidth(streamdata, &index);
+ /* error check */
+ if (err<0) {
+ return err;
+ }
+
+ /* Update BWE with received data */
+ err = WebRtcIsacfix_UpdateUplinkBwImpl(
+ bwest_str,
+ rtp_seq_number,
+ (int16_t)(frame_samples * 1000 / FS),
+ send_ts,
+ arr_ts,
+ packet_size, /* in bytes */
+ index);
+
+ /* error check */
+ if (err<0) {
+ return err;
+ }
+
+ /* Succesful */
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
new file mode 100644
index 0000000000..873cf951ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode_plc.c
+ *
+ * Packet Loss Concealment.
+ *
+ */
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+
+#define NO_OF_PRIMES 8
+#define NOISE_FILTER_LEN 30
+
+/*
+ * function to decode the bitstream
+ * returns the total number of bytes in the stream
+ */
+
+static int16_t plc_filterma_Fast(
+ int16_t *In, /* (i) Vector to be filtered. InOut[-orderCoef+1]
+ to InOut[-1] contains state */
+ int16_t *Out, /* (o) Filtered vector */
+ int16_t *B, /* (i) The filter coefficients (in Q0) */
+ int16_t Blen, /* (i) Number of B coefficients */
+ int16_t len, /* (i) Number of samples to be filtered */
+ int16_t reduceDecay,
+ int16_t decay,
+ int16_t rshift )
+{
+ int i, j;
+ int32_t o;
+ int32_t lim = (1 << (15 + rshift)) - 1;
+
+ for (i = 0; i < len; i++)
+ {
+ const int16_t *b_ptr = &B[0];
+ const int16_t *x_ptr = &In[i];
+
+ o = (int32_t)0;
+
+ for (j = 0;j < Blen; j++)
+ {
+ o = WebRtcSpl_AddSatW32(o, *b_ptr * *x_ptr);
+ b_ptr++;
+ x_ptr--;
+ }
+
+ /* to round off correctly */
+ o = WebRtcSpl_AddSatW32(o, 1 << (rshift - 1));
+
+ /* saturate according to the domain of the filter coefficients */
+ o = WEBRTC_SPL_SAT((int32_t)lim, o, (int32_t)-lim);
+
+ /* o should be in the range of int16_t */
+ o >>= rshift;
+
+ /* decay the output signal; this is specific to plc */
+ *Out++ = (int16_t)((int16_t)o * decay >> 15);
+
+ /* change the decay */
+ decay -= reduceDecay;
+ if( decay < 0 )
+ decay = 0;
+ }
+ return( decay );
+}
+
+
+
+
+
+
+
+
+static __inline int32_t log2_Q8_T( uint32_t x ) {
+
+ int32_t zeros;
+ int16_t frac;
+
+ zeros=WebRtcSpl_NormU32(x);
+ frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
+
+ /* log2(magn(i)) */
+ return ((31 - zeros) << 8) + frac;
+}
+
+static __inline int16_t exp2_Q10_T(int16_t x) { // Both in and out in Q10
+
+ int16_t tmp16_1, tmp16_2;
+
+ tmp16_2=(int16_t)(0x0400|(x&0x03FF));
+ tmp16_1 = -(x >> 10);
+ if(tmp16_1>0)
+ return tmp16_2 >> tmp16_1;
+ else
+ return tmp16_2 << -tmp16_1;
+
+}
+
+
+/*
+ This is a fixed-point version of the above code with limLow = 700 and limHigh = 5000,
+ hard-coded. The values 700 and 5000 were experimentally obtained.
+
+ The function implements membership values for two sets. The mebership functions are
+ of second orders corresponding to half-bell-shapped pulses.
+*/
+static void MemshipValQ15( int16_t in, int16_t *A, int16_t *B )
+{
+ int16_t x;
+
+ in -= 700; /* translate the lowLim to 0, limHigh = 5000 - 700, M = 2150 */
+
+ if( in <= 2150 )
+ {
+ if( in > 0 )
+ {
+ /* b = in^2 / (2 * M^2), a = 1 - b in Q0.
+ We have to compute in Q15 */
+
+ /* x = in / 2150 {in Q15} = x * 15.2409 {in Q15} =
+ x*15 + (x*983)/(2^12); note that 983/2^12 = 0.23999 */
+
+ /* we are sure that x is in the range of int16_t */
+ x = (int16_t)(in * 15 + (in * 983 >> 12));
+ /* b = x^2 / 2 {in Q15} so a shift of 16 is required to
+ be in correct domain and one more for the division by 2 */
+ *B = (int16_t)((x * x + 0x00010000) >> 17);
+ *A = WEBRTC_SPL_WORD16_MAX - *B;
+ }
+ else
+ {
+ *B = 0;
+ *A = WEBRTC_SPL_WORD16_MAX;
+ }
+ }
+ else
+ {
+ if( in < 4300 )
+ {
+ /* This is a mirror case of the above */
+ in = 4300 - in;
+ x = (int16_t)(in * 15 + (in * 983 >> 12));
+ /* b = x^2 / 2 {in Q15} so a shift of 16 is required to
+ be in correct domain and one more for the division by 2 */
+ *A = (int16_t)((x * x + 0x00010000) >> 17);
+ *B = WEBRTC_SPL_WORD16_MAX - *A;
+
+ }
+ else
+ {
+ *A = 0;
+ *B = WEBRTC_SPL_WORD16_MAX;
+ }
+ }
+}
+
+
+
+
+static void LinearResampler(int16_t* in,
+ int16_t* out,
+ size_t lenIn,
+ size_t lenOut)
+{
+ size_t n = (lenIn - 1) * RESAMP_RES;
+ int16_t resOut, relativePos, diff; /* */
+ size_t i, j;
+ uint16_t udiff;
+
+ if( lenIn == lenOut )
+ {
+ WEBRTC_SPL_MEMCPY_W16( out, in, lenIn );
+ return;
+ }
+
+ resOut = WebRtcSpl_DivW32W16ResW16( (int32_t)n, (int16_t)(lenOut-1) );
+
+ out[0] = in[0];
+ for( i = 1, j = 0, relativePos = 0; i < lenOut; i++ )
+ {
+
+ relativePos += resOut;
+ while( relativePos > RESAMP_RES )
+ {
+ j++;
+ relativePos -= RESAMP_RES;
+ }
+
+
+ /* an overflow may happen and the differce in sample values may
+ * require more than 16 bits. We like to avoid 32 bit arithmatic
+ * as much as possible */
+
+ if( (in[ j ] > 0) && (in[j + 1] < 0) )
+ {
+ udiff = (uint16_t)(in[ j ] - in[j + 1]);
+ out[ i ] = in[ j ] - (uint16_t)( ((int32_t)( udiff * relativePos )) >> RESAMP_RES_BIT);
+ }
+ else
+ {
+ if( (in[j] < 0) && (in[j+1] > 0) )
+ {
+ udiff = (uint16_t)( in[j + 1] - in[ j ] );
+ out[ i ] = in[ j ] + (uint16_t)( ((int32_t)( udiff * relativePos )) >> RESAMP_RES_BIT);
+ }
+ else
+ {
+ diff = in[ j + 1 ] - in[ j ];
+ out[i] = in[j] + (int16_t)(diff * relativePos >> RESAMP_RES_BIT);
+ }
+ }
+ }
+}
+
+
+
+
+
+void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
+ IsacFixDecoderInstance *ISACdec_obj,
+ size_t *current_framesamples )
+{
+ int subframecnt;
+
+ int16_t* Vector_Word16_1;
+ int16_t Vector_Word16_Extended_1[FRAMESAMPLES_HALF + NOISE_FILTER_LEN];
+ int16_t* Vector_Word16_2;
+ int16_t Vector_Word16_Extended_2[FRAMESAMPLES_HALF + NOISE_FILTER_LEN];
+
+ int32_t Vector_Word32_1[FRAMESAMPLES_HALF];
+ int32_t Vector_Word32_2[FRAMESAMPLES_HALF];
+
+ int16_t lofilt_coefQ15[ORDERLO*SUBFRAMES]; //refl. coeffs
+ int16_t hifilt_coefQ15[ORDERHI*SUBFRAMES]; //refl. coeffs
+
+ int16_t pitchLags_Q7[PITCH_SUBFRAMES];
+ int16_t pitchGains_Q12[PITCH_SUBFRAMES];
+
+ int16_t tmp_1, tmp_2;
+ int32_t tmp32a, tmp32b;
+ int16_t gainQ13;
+
+ int16_t myDecayRate;
+
+ /* ---------- PLC variables ------------ */
+ size_t lag0, i, k;
+ int16_t noiseIndex;
+ int16_t stretchPitchLP[PITCH_MAX_LAG + 10], stretchPitchLP1[PITCH_MAX_LAG + 10];
+
+ int32_t gain_lo_hiQ17[2*SUBFRAMES];
+
+ int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16;
+ size_t minIdx;
+ int32_t nHP, pHP, wNoisyHP, wPriodicHP, corr, minCorr, maxCoeff;
+ int16_t noise1, rshift;
+
+
+ int16_t ltpGain, pitchGain, myVoiceIndicator, myAbs, maxAbs;
+ int32_t varIn, varOut, logVarIn, logVarOut, Q, logMaxAbs;
+ int rightShiftIn, rightShiftOut;
+
+
+ /* ------------------------------------- */
+
+
+ myDecayRate = (DECAY_RATE);
+ Vector_Word16_1 = &Vector_Word16_Extended_1[NOISE_FILTER_LEN];
+ Vector_Word16_2 = &Vector_Word16_Extended_2[NOISE_FILTER_LEN];
+
+
+ /* ----- Simply Copy Previous LPC parameters ------ */
+ for( subframecnt = 0; subframecnt < SUBFRAMES; subframecnt++ )
+ {
+ /* lower Band */
+ WEBRTC_SPL_MEMCPY_W16(&lofilt_coefQ15[ subframecnt * ORDERLO ],
+ (ISACdec_obj->plcstr_obj).lofilt_coefQ15, ORDERLO);
+ gain_lo_hiQ17[2*subframecnt] = (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[0];
+
+ /* Upper Band */
+ WEBRTC_SPL_MEMCPY_W16(&hifilt_coefQ15[ subframecnt * ORDERHI ],
+ (ISACdec_obj->plcstr_obj).hifilt_coefQ15, ORDERHI);
+ gain_lo_hiQ17[2*subframecnt + 1] = (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[1];
+ }
+
+
+
+
+ lag0 = (size_t)(((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1);
+
+
+ if( (ISACdec_obj->plcstr_obj).used != PLC_WAS_USED )
+ {
+ (ISACdec_obj->plcstr_obj).pitchCycles = 0;
+
+ (ISACdec_obj->plcstr_obj).lastPitchLP =
+ &((ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0]);
+ minCorr = WEBRTC_SPL_WORD32_MAX;
+
+ if ((FRAMESAMPLES_HALF - 10) > 2 * lag0)
+ {
+ minIdx = 11;
+ for( i = 0; i < 21; i++ )
+ {
+ corr = 0;
+ for( k = 0; k < lag0; k++ )
+ {
+ corr = WebRtcSpl_AddSatW32(corr, WEBRTC_SPL_ABS_W32(
+ WebRtcSpl_SubSatW16(
+ (ISACdec_obj->plcstr_obj).lastPitchLP[k],
+ (ISACdec_obj->plcstr_obj).prevPitchInvIn[
+ FRAMESAMPLES_HALF - 2*lag0 - 10 + i + k ] ) ) );
+ }
+ if( corr < minCorr )
+ {
+ minCorr = corr;
+ minIdx = i;
+ }
+ }
+ (ISACdec_obj->plcstr_obj).prevPitchLP =
+ &( (ISACdec_obj->plcstr_obj).prevPitchInvIn[
+ FRAMESAMPLES_HALF - lag0*2 - 10 + minIdx] );
+ }
+ else
+ {
+ (ISACdec_obj->plcstr_obj).prevPitchLP =
+ (ISACdec_obj->plcstr_obj).lastPitchLP;
+ }
+ pitchGain = (ISACdec_obj->plcstr_obj).lastPitchGain_Q12;
+
+ WebRtcSpl_AutoCorrelation(
+ &(ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0],
+ lag0, 0, &varIn, &rightShiftIn);
+ WebRtcSpl_AutoCorrelation(
+ &(ISACdec_obj->plcstr_obj).prevPitchInvOut[PITCH_MAX_LAG + 10 - lag0],
+ lag0, 0, &varOut, &rightShiftOut);
+
+ maxAbs = 0;
+ for( i = 0; i< lag0; i++)
+ {
+ myAbs = WEBRTC_SPL_ABS_W16(
+ (ISACdec_obj->plcstr_obj).prevPitchInvOut[
+ PITCH_MAX_LAG + 10 - lag0 + i] );
+ maxAbs = (myAbs > maxAbs)? myAbs:maxAbs;
+ }
+ logVarIn = log2_Q8_T( (uint32_t)( varIn ) ) +
+ (int32_t)(rightShiftIn << 8);
+ logVarOut = log2_Q8_T( (uint32_t)( varOut ) ) +
+ (int32_t)(rightShiftOut << 8);
+ logMaxAbs = log2_Q8_T( (uint32_t)( maxAbs ) );
+
+ ltpGain = (int16_t)(logVarOut - logVarIn);
+ Q = 2 * logMaxAbs - ( logVarOut - 1512 );
+
+ /*
+ * ---
+ * We are computing sqrt( (VarIn/lag0) / var( noise ) )
+ * var( noise ) is almost 256. we have already computed log2( VarIn ) in Q8
+ * so we actually compute 2^( 0.5*(log2( VarIn ) - log2( lag0 ) - log2( var(noise ) ) ).
+ * Note that put log function is in Q8 but the exponential function is in Q10.
+ * --
+ */
+
+ logVarIn -= log2_Q8_T( (uint32_t)( lag0 ) );
+ tmp16 = (int16_t)((logVarIn<<1) - (4<<10) );
+ rightShiftIn = 0;
+ if( tmp16 > 4096 )
+ {
+ tmp16 -= 4096;
+ tmp16 = exp2_Q10_T( tmp16 );
+ tmp16 >>= 6;
+ }
+ else
+ tmp16 = exp2_Q10_T( tmp16 )>>10;
+
+ (ISACdec_obj->plcstr_obj).std = tmp16 - 4;
+
+ if( (ltpGain < 110) || (ltpGain > 230) )
+ {
+ if( ltpGain < 100 && (pitchGain < 1800) )
+ {
+ (ISACdec_obj->plcstr_obj).A = WEBRTC_SPL_WORD16_MAX;
+ }
+ else
+ {
+ (ISACdec_obj->plcstr_obj).A = ((ltpGain < 110) && (Q < 800)
+ )? WEBRTC_SPL_WORD16_MAX:0;
+ }
+ (ISACdec_obj->plcstr_obj).B = WEBRTC_SPL_WORD16_MAX -
+ (ISACdec_obj->plcstr_obj).A;
+ }
+ else
+ {
+ if( (pitchGain < 450) || (pitchGain > 1600) )
+ {
+ (ISACdec_obj->plcstr_obj).A = ((pitchGain < 450)
+ )? WEBRTC_SPL_WORD16_MAX:0;
+ (ISACdec_obj->plcstr_obj).B = WEBRTC_SPL_WORD16_MAX -
+ (ISACdec_obj->plcstr_obj).A;
+ }
+ else
+ {
+ myVoiceIndicator = ltpGain * 2 + pitchGain;
+ MemshipValQ15( myVoiceIndicator,
+ &(ISACdec_obj->plcstr_obj).A, &(ISACdec_obj->plcstr_obj).B );
+ }
+ }
+
+
+
+ myVoiceIndicator = ltpGain * 16 + pitchGain * 2 + (pitchGain >> 8);
+ MemshipValQ15( myVoiceIndicator,
+ &(ISACdec_obj->plcstr_obj).A, &(ISACdec_obj->plcstr_obj).B );
+
+
+
+ (ISACdec_obj->plcstr_obj).stretchLag = lag0;
+ (ISACdec_obj->plcstr_obj).pitchIndex = 0;
+
+ }
+ else
+ {
+ myDecayRate = (DECAY_RATE<<2);
+ }
+
+ if( (ISACdec_obj->plcstr_obj).B < 1000 )
+ {
+ myDecayRate += (DECAY_RATE<<3);
+ }
+
+ /* ------------ reconstructing the residual signal ------------------ */
+
+ LinearResampler( (ISACdec_obj->plcstr_obj).lastPitchLP,
+ stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+ /* inverse pitch filter */
+
+ pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
+ (int16_t)((ISACdec_obj->plcstr_obj).stretchLag<<7);
+ pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
+ pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
+ pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
+ pitchGains_Q12[0] = (int16_t)(pitchGains_Q12[1] * 1010 >> 10);
+
+
+ /* most of the time either B or A are zero so seperating */
+ if( (ISACdec_obj->plcstr_obj).B == 0 )
+ {
+ for( i = 0; i < FRAMESAMPLES_HALF; i++ )
+ {
+ /* --- Low Pass */
+ (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+ (ISACdec_obj->plcstr_obj).seed );
+ Vector_Word16_1[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+ /* --- Highpass */
+ (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+ (ISACdec_obj->plcstr_obj).seed );
+ Vector_Word16_2[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+ }
+ for( i = 1; i < NOISE_FILTER_LEN; i++ )
+ {
+ (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+ (ISACdec_obj->plcstr_obj).seed );
+ Vector_Word16_Extended_1[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+ (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+ (ISACdec_obj->plcstr_obj).seed );
+ Vector_Word16_Extended_2[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+ }
+ plc_filterma_Fast(Vector_Word16_1, Vector_Word16_Extended_1,
+ &(ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF -
+ NOISE_FILTER_LEN], (int16_t) NOISE_FILTER_LEN,
+ (int16_t) FRAMESAMPLES_HALF, (int16_t)(5),
+ (ISACdec_obj->plcstr_obj).decayCoeffNoise, (int16_t)(6));
+
+ maxCoeff = WebRtcSpl_MaxAbsValueW32(
+ &(ISACdec_obj->plcstr_obj).prevHP[
+ PITCH_MAX_LAG + 10 - NOISE_FILTER_LEN], NOISE_FILTER_LEN );
+
+ rshift = 0;
+ while( maxCoeff > WEBRTC_SPL_WORD16_MAX )
+ {
+ maxCoeff >>= 1;
+ rshift++;
+ }
+ for( i = 0; i < NOISE_FILTER_LEN; i++ ) {
+ Vector_Word16_1[FRAMESAMPLES_HALF - NOISE_FILTER_LEN + i] =(int16_t)(
+ ISACdec_obj->plcstr_obj.prevHP[PITCH_MAX_LAG + 10 - NOISE_FILTER_LEN +
+ i] >> rshift);
+ }
+ (ISACdec_obj->plcstr_obj).decayCoeffNoise = plc_filterma_Fast(
+ Vector_Word16_2,
+ Vector_Word16_Extended_2,
+ &Vector_Word16_1[FRAMESAMPLES_HALF - NOISE_FILTER_LEN],
+ (int16_t) NOISE_FILTER_LEN,
+ (int16_t) FRAMESAMPLES_HALF,
+ (int16_t) (5),
+ (ISACdec_obj->plcstr_obj).decayCoeffNoise,
+ (int16_t) (7) );
+
+ for( i = 0; i < FRAMESAMPLES_HALF; i++ )
+ Vector_Word32_2[i] = Vector_Word16_Extended_2[i] << rshift;
+
+ Vector_Word16_1 = Vector_Word16_Extended_1;
+ }
+ else
+ {
+ if( (ISACdec_obj->plcstr_obj).A == 0 )
+ {
+ /* ------ Periodic Vector --- */
+ for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
+ {
+ /* --- Lowpass */
+ pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
+ ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
+
+ /* --- Highpass */
+ pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic,
+ (ISACdec_obj->plcstr_obj).prevHP[PITCH_MAX_LAG + 10 -
+ (ISACdec_obj->plcstr_obj).stretchLag +
+ (ISACdec_obj->plcstr_obj).pitchIndex] );
+
+ /* --- lower the muliplier (more decay at next sample) --- */
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic -= (myDecayRate);
+ if( (ISACdec_obj->plcstr_obj).decayCoeffPriodic < 0 )
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic = 0;
+
+ (ISACdec_obj->plcstr_obj).pitchIndex++;
+
+ if( (ISACdec_obj->plcstr_obj).pitchIndex ==
+ (ISACdec_obj->plcstr_obj).stretchLag )
+ {
+ (ISACdec_obj->plcstr_obj).pitchIndex = 0;
+ (ISACdec_obj->plcstr_obj).pitchCycles++;
+
+ if( (ISACdec_obj->plcstr_obj).stretchLag != (lag0 + 1) )
+ {
+ (ISACdec_obj->plcstr_obj).stretchLag = lag0 + 1;
+ }
+ else
+ {
+ (ISACdec_obj->plcstr_obj).stretchLag = lag0;
+ }
+
+ (ISACdec_obj->plcstr_obj).stretchLag = (
+ (ISACdec_obj->plcstr_obj).stretchLag > PITCH_MAX_LAG
+ )? (PITCH_MAX_LAG):(ISACdec_obj->plcstr_obj).stretchLag;
+
+ LinearResampler( (ISACdec_obj->plcstr_obj).lastPitchLP,
+ stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+ LinearResampler( (ISACdec_obj->plcstr_obj).prevPitchLP,
+ stretchPitchLP1, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+ switch( (ISACdec_obj->plcstr_obj).pitchCycles )
+ {
+ case 1:
+ {
+ for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+ {
+ stretchPitchLP[k] = (int16_t)((
+ (int32_t)stretchPitchLP[k]* 3 +
+ (int32_t)stretchPitchLP1[k])>>2);
+ }
+ break;
+ }
+ case 2:
+ {
+ for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+ {
+ stretchPitchLP[k] = (int16_t)((
+ (int32_t)stretchPitchLP[k] +
+ (int32_t)stretchPitchLP1[k] )>>1);
+ }
+ break;
+ }
+ case 3:
+ {
+ for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+ {
+ stretchPitchLP[k] = (int16_t)((stretchPitchLP[k] +
+ (int32_t)stretchPitchLP1[k]*3 )>>2);
+ }
+ break;
+ }
+ }
+
+ if( (ISACdec_obj->plcstr_obj).pitchCycles == 3 )
+ {
+ myDecayRate += 35; //(myDecayRate>>1);
+ (ISACdec_obj->plcstr_obj).pitchCycles = 0;
+ }
+
+ }
+
+ /* ------ Sum the noisy and periodic signals ------ */
+ Vector_Word16_1[i] = pLP;
+ Vector_Word32_2[i] = pHP;
+ }
+ }
+ else
+ {
+ for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
+ {
+
+ (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+ (ISACdec_obj->plcstr_obj).seed );
+
+ noise1 = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+ nLP = (int16_t)((int16_t)(noise1 * ISACdec_obj->plcstr_obj.std) *
+ ISACdec_obj->plcstr_obj.decayCoeffNoise >> 15);
+
+ /* --- Highpass */
+ (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+ (ISACdec_obj->plcstr_obj).seed );
+ noise1 = (ISACdec_obj->plcstr_obj.seed >> 11) - 8;
+
+ nHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
+ (ISACdec_obj->plcstr_obj).decayCoeffNoise,
+ (int32_t)(noise1*(ISACdec_obj->plcstr_obj).std) );
+
+ /* --- lower the muliplier (more decay at next sample) --- */
+ (ISACdec_obj->plcstr_obj).decayCoeffNoise -= (myDecayRate);
+ if( (ISACdec_obj->plcstr_obj).decayCoeffNoise < 0 )
+ (ISACdec_obj->plcstr_obj).decayCoeffNoise = 0;
+
+ /* ------ Periodic Vector --- */
+ /* --- Lowpass */
+ pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
+ ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
+
+ /* --- Highpass */
+ pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic,
+ (ISACdec_obj->plcstr_obj).prevHP[PITCH_MAX_LAG + 10 -
+ (ISACdec_obj->plcstr_obj).stretchLag +
+ (ISACdec_obj->plcstr_obj).pitchIndex] );
+
+ /* --- lower the muliplier (more decay at next sample) --- */
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic -= (myDecayRate);
+ if( (ISACdec_obj->plcstr_obj).decayCoeffPriodic < 0 )
+ {
+ (ISACdec_obj->plcstr_obj).decayCoeffPriodic = 0;
+ }
+
+ /* ------ Weighting the noisy and periodic vectors ------- */
+ wNoisyLP = (int16_t)(ISACdec_obj->plcstr_obj.A * nLP >> 15);
+ wNoisyHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
+ (ISACdec_obj->plcstr_obj).A, (nHP) ) );
+
+ wPriodicLP = (int16_t)(ISACdec_obj->plcstr_obj.B * pLP >> 15);
+ wPriodicHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
+ (ISACdec_obj->plcstr_obj).B, pHP));
+
+ (ISACdec_obj->plcstr_obj).pitchIndex++;
+
+ if((ISACdec_obj->plcstr_obj).pitchIndex ==
+ (ISACdec_obj->plcstr_obj).stretchLag)
+ {
+ (ISACdec_obj->plcstr_obj).pitchIndex = 0;
+ (ISACdec_obj->plcstr_obj).pitchCycles++;
+
+ if( (ISACdec_obj->plcstr_obj).stretchLag != (lag0 + 1) )
+ (ISACdec_obj->plcstr_obj).stretchLag = lag0 + 1;
+ else
+ (ISACdec_obj->plcstr_obj).stretchLag = lag0;
+
+ (ISACdec_obj->plcstr_obj).stretchLag = (
+ (ISACdec_obj->plcstr_obj).stretchLag > PITCH_MAX_LAG
+ )? (PITCH_MAX_LAG):(ISACdec_obj->plcstr_obj).stretchLag;
+ LinearResampler(
+ (ISACdec_obj->plcstr_obj).lastPitchLP,
+ stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+ LinearResampler((ISACdec_obj->plcstr_obj).prevPitchLP,
+ stretchPitchLP1, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+ switch((ISACdec_obj->plcstr_obj).pitchCycles)
+ {
+ case 1:
+ {
+ for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+ {
+ stretchPitchLP[k] = (int16_t)((
+ (int32_t)stretchPitchLP[k]* 3 +
+ (int32_t)stretchPitchLP1[k] )>>2);
+ }
+ break;
+ }
+ case 2:
+ {
+ for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+ {
+ stretchPitchLP[k] = (int16_t)((
+ (int32_t)stretchPitchLP[k] +
+ (int32_t)stretchPitchLP1[k])>>1);
+ }
+ break;
+ }
+ case 3:
+ {
+ for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+ {
+ stretchPitchLP[k] = (int16_t)(
+ (stretchPitchLP[k] +
+ (int32_t)stretchPitchLP1[k]*3 )>>2);
+ }
+ break;
+ }
+ }
+
+ if( (ISACdec_obj->plcstr_obj).pitchCycles == 3 )
+ {
+ myDecayRate += 55; //(myDecayRate>>1);
+ (ISACdec_obj->plcstr_obj).pitchCycles = 0;
+ }
+ }
+
+ /* ------ Sum the noisy and periodic signals ------ */
+ Vector_Word16_1[i] = WebRtcSpl_AddSatW16(wNoisyLP, wPriodicLP);
+ Vector_Word32_2[i] = WebRtcSpl_AddSatW32(wNoisyHP, wPriodicHP);
+ }
+ }
+ }
+ /* ----------------- residual signal is reconstructed ------------------ */
+
+ k = (ISACdec_obj->plcstr_obj).pitchIndex;
+ /* --- Write one pitch cycle for recovery block --- */
+
+ for( i = 0; i < RECOVERY_OVERLAP; i++ )
+ {
+ ISACdec_obj->plcstr_obj.overlapLP[i] = (int16_t)(
+ stretchPitchLP[k] * ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
+ k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
+ }
+
+ (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 =
+ (int16_t)((ISACdec_obj->plcstr_obj).stretchLag << 7);
+
+
+ /* --- Inverse Pitch Filter --- */
+ WebRtcIsacfix_PitchFilter(Vector_Word16_1, Vector_Word16_2,
+ &ISACdec_obj->pitchfiltstr_obj, pitchLags_Q7, pitchGains_Q12, 4);
+
+ /* reduce gain to compensate for pitch enhancer */
+ /* gain = 1.0f - 0.45f * AvgPitchGain; */
+ tmp32a = ISACdec_obj->plcstr_obj.AvgPitchGain_Q12 * 29; // Q18
+ tmp32b = 262144 - tmp32a; // Q18
+ gainQ13 = (int16_t) (tmp32b >> 5); // Q13
+
+ /* perceptual post-filtering (using normalized lattice filter) */
+ for (k = 0; k < FRAMESAMPLES_HALF; k++)
+ Vector_Word32_1[k] = (Vector_Word16_2[k] * gainQ13) << 3; // Q25
+
+
+ WebRtcIsacfix_NormLatticeFilterAr(ORDERLO,
+ (ISACdec_obj->maskfiltstr_obj).PostStateLoGQ0,
+ Vector_Word32_1, lofilt_coefQ15, gain_lo_hiQ17, 0, Vector_Word16_1);
+
+ WebRtcIsacfix_NormLatticeFilterAr(ORDERHI,
+ (ISACdec_obj->maskfiltstr_obj).PostStateHiGQ0,
+ Vector_Word32_2, hifilt_coefQ15, gain_lo_hiQ17, 1, Vector_Word16_2);
+
+ /* recombine the 2 bands */
+
+ /* Form the polyphase signals, and compensate for DC offset */
+ for (k=0;k<FRAMESAMPLES_HALF;k++)
+ {
+ /* Construct a new upper channel signal*/
+ tmp_1 = (int16_t)WebRtcSpl_SatW32ToW16(
+ ((int32_t)Vector_Word16_1[k]+Vector_Word16_2[k] + 1));
+ /* Construct a new lower channel signal*/
+ tmp_2 = (int16_t)WebRtcSpl_SatW32ToW16(
+ ((int32_t)Vector_Word16_1[k]-Vector_Word16_2[k]));
+ Vector_Word16_1[k] = tmp_1;
+ Vector_Word16_2[k] = tmp_2;
+ }
+
+
+ WebRtcIsacfix_FilterAndCombine1(Vector_Word16_1,
+ Vector_Word16_2, signal_out16, &ISACdec_obj->postfiltbankstr_obj);
+
+ (ISACdec_obj->plcstr_obj).used = PLC_WAS_USED;
+ *current_framesamples = 480;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/encode.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/encode.c
new file mode 100644
index 0000000000..ef3e320e2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/encode.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * encode.c
+ *
+ * Encoding function for the iSAC coder.
+ *
+ */
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+#include <stdio.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+
+int WebRtcIsacfix_EncodeImpl(int16_t *in,
+ IsacFixEncoderInstance *ISACenc_obj,
+ BwEstimatorstr *bw_estimatordata,
+ int16_t CodingMode)
+{
+ int16_t stream_length = 0;
+ int16_t usefulstr_len = 0;
+ int k;
+ int16_t BWno;
+
+ int16_t lofilt_coefQ15[(ORDERLO)*SUBFRAMES];
+ int16_t hifilt_coefQ15[(ORDERHI)*SUBFRAMES];
+ int32_t gain_lo_hiQ17[2*SUBFRAMES];
+
+ int16_t LPandHP[FRAMESAMPLES/2 + QLOOKAHEAD];
+ int16_t LP16a[FRAMESAMPLES/2 + QLOOKAHEAD];
+ int16_t HP16a[FRAMESAMPLES/2 + QLOOKAHEAD];
+
+ int16_t PitchLags_Q7[PITCH_SUBFRAMES];
+ int16_t PitchGains_Q12[PITCH_SUBFRAMES];
+ int16_t AvgPitchGain_Q12;
+
+ int16_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+ int16_t processed_samples;
+ int status;
+
+ int32_t bits_gainsQ11;
+ int16_t MinBytes;
+ int16_t bmodel;
+
+ transcode_obj transcodingParam;
+ int16_t payloadLimitBytes;
+ int16_t arithLenBeforeEncodingDFT;
+ int16_t iterCntr;
+
+ /* copy new frame length and bottle neck rate only for the first 10 ms data */
+ if (ISACenc_obj->buffer_index == 0) {
+ /* set the framelength for the next packet */
+ ISACenc_obj->current_framesamples = ISACenc_obj->new_framelength;
+ }
+
+ frame_mode = ISACenc_obj->current_framesamples/MAX_FRAMESAMPLES; /* 0 (30 ms) or 1 (60 ms) */
+ processed_samples = ISACenc_obj->current_framesamples/(frame_mode+1); /* 480 (30, 60 ms) */
+
+ /* buffer speech samples (by 10ms packet) until the framelength is reached (30 or 60 ms) */
+ /**************************************************************************************/
+ /* fill the buffer with 10ms input data */
+ for(k=0; k<FRAMESAMPLES_10ms; k++) {
+ ISACenc_obj->data_buffer_fix[k + ISACenc_obj->buffer_index] = in[k];
+ }
+ /* if buffersize is not equal to current framesize, and end of file is not reached yet, */
+ /* increase index and go back to main to get more speech samples */
+ if (ISACenc_obj->buffer_index + FRAMESAMPLES_10ms != processed_samples) {
+ ISACenc_obj->buffer_index = ISACenc_obj->buffer_index + FRAMESAMPLES_10ms;
+ return 0;
+ }
+ /* if buffer reached the right size, reset index and continue with encoding the frame */
+ ISACenc_obj->buffer_index = 0;
+
+ /* end of buffer function */
+ /**************************/
+
+ /* encoding */
+ /************/
+
+ if (frame_mode == 0 || ISACenc_obj->frame_nb == 0 )
+ {
+ /* reset bitstream */
+ ISACenc_obj->bitstr_obj.W_upper = 0xFFFFFFFF;
+ ISACenc_obj->bitstr_obj.streamval = 0;
+ ISACenc_obj->bitstr_obj.stream_index = 0;
+ ISACenc_obj->bitstr_obj.full = 1;
+
+ if (CodingMode == 0) {
+ ISACenc_obj->BottleNeck = WebRtcIsacfix_GetUplinkBandwidth(bw_estimatordata);
+ ISACenc_obj->MaxDelay = WebRtcIsacfix_GetUplinkMaxDelay(bw_estimatordata);
+ }
+ if (CodingMode == 0 && frame_mode == 0 && (ISACenc_obj->enforceFrameSize == 0)) {
+ ISACenc_obj->new_framelength = WebRtcIsacfix_GetNewFrameLength(ISACenc_obj->BottleNeck,
+ ISACenc_obj->current_framesamples);
+ }
+
+ // multiply the bottleneck by 0.88 before computing SNR, 0.88 is tuned by experimenting on TIMIT
+ // 901/1024 is 0.87988281250000
+ ISACenc_obj->s2nr = WebRtcIsacfix_GetSnr(
+ (int16_t)(ISACenc_obj->BottleNeck * 901 >> 10),
+ ISACenc_obj->current_framesamples);
+
+ /* encode frame length */
+ status = WebRtcIsacfix_EncodeFrameLen(ISACenc_obj->current_framesamples, &ISACenc_obj->bitstr_obj);
+ if (status < 0)
+ {
+ /* Wrong frame size */
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+
+ /* Save framelength for multiple packets memory */
+ if (ISACenc_obj->SaveEnc_ptr != NULL) {
+ (ISACenc_obj->SaveEnc_ptr)->framelength=ISACenc_obj->current_framesamples;
+ }
+
+ /* bandwidth estimation and coding */
+ BWno = WebRtcIsacfix_GetDownlinkBwIndexImpl(bw_estimatordata);
+ status = WebRtcIsacfix_EncodeReceiveBandwidth(&BWno, &ISACenc_obj->bitstr_obj);
+ if (status < 0)
+ {
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+ }
+
+ /* split signal in two bands */
+ WebRtcIsacfix_SplitAndFilter1(ISACenc_obj->data_buffer_fix, LP16a, HP16a, &ISACenc_obj->prefiltbankstr_obj );
+
+ /* estimate pitch parameters and pitch-filter lookahead signal */
+ WebRtcIsacfix_PitchAnalysis(LP16a+QLOOKAHEAD, LPandHP,
+ &ISACenc_obj->pitchanalysisstr_obj, PitchLags_Q7, PitchGains_Q12); /* LPandHP = LP_lookahead_pfQ0, */
+
+ /* Set where to store data in multiple packets memory */
+ if (ISACenc_obj->SaveEnc_ptr != NULL) {
+ if (frame_mode == 0 || ISACenc_obj->frame_nb == 0)
+ {
+ (ISACenc_obj->SaveEnc_ptr)->startIdx = 0;
+ }
+ else
+ {
+ (ISACenc_obj->SaveEnc_ptr)->startIdx = 1;
+ }
+ }
+
+ /* quantize & encode pitch parameters */
+ status = WebRtcIsacfix_EncodePitchGain(PitchGains_Q12, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
+ if (status < 0)
+ {
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+ status = WebRtcIsacfix_EncodePitchLag(PitchLags_Q7 , PitchGains_Q12, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
+ if (status < 0)
+ {
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+ AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
+ PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
+
+ /* find coefficients for perceptual pre-filters */
+ WebRtcIsacfix_GetLpcCoef(LPandHP, HP16a+QLOOKAHEAD, &ISACenc_obj->maskfiltstr_obj,
+ ISACenc_obj->s2nr, PitchGains_Q12,
+ gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15); /*LPandHP = LP_lookahead_pfQ0*/
+
+ // record LPC Gains for possible bit-rate reduction
+ for(k = 0; k < KLT_ORDER_GAIN; k++)
+ {
+ transcodingParam.lpcGains[k] = gain_lo_hiQ17[k];
+ }
+
+ /* code LPC model and shape - gains not quantized yet */
+ status = WebRtcIsacfix_EncodeLpc(gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15,
+ &bmodel, &bits_gainsQ11, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr, &transcodingParam);
+ if (status < 0)
+ {
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+ arithLenBeforeEncodingDFT = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full);
+
+ /* low-band filtering */
+ WebRtcIsacfix_NormLatticeFilterMa(ORDERLO, ISACenc_obj->maskfiltstr_obj.PreStateLoGQ15,
+ LP16a, lofilt_coefQ15, gain_lo_hiQ17, 0, LPandHP);/* LPandHP = LP16b */
+
+ /* pitch filter */
+ WebRtcIsacfix_PitchFilter(LPandHP, LP16a, &ISACenc_obj->pitchfiltstr_obj, PitchLags_Q7, PitchGains_Q12, 1);/* LPandHP = LP16b */
+
+ /* high-band filtering */
+ WebRtcIsacfix_NormLatticeFilterMa(ORDERHI, ISACenc_obj->maskfiltstr_obj.PreStateHiGQ15,
+ HP16a, hifilt_coefQ15, gain_lo_hiQ17, 1, LPandHP);/*LPandHP = HP16b*/
+
+ /* transform */
+ WebRtcIsacfix_Time2Spec(LP16a, LPandHP, LP16a, LPandHP); /*LPandHP = HP16b*/
+
+ /* Save data for multiple packets memory */
+ if (ISACenc_obj->SaveEnc_ptr != NULL) {
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ (ISACenc_obj->SaveEnc_ptr)->fre[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LP16a[k];
+ (ISACenc_obj->SaveEnc_ptr)->fim[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LPandHP[k];
+ }
+ (ISACenc_obj->SaveEnc_ptr)->AvgPitchGain[(ISACenc_obj->SaveEnc_ptr)->startIdx] = AvgPitchGain_Q12;
+ }
+
+ /* quantization and lossless coding */
+ status = WebRtcIsacfix_EncodeSpec(LP16a, LPandHP, &ISACenc_obj->bitstr_obj, AvgPitchGain_Q12);
+ if((status <= -1) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) /*LPandHP = HP16b*/
+ {
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+
+ if((frame_mode == 1) && (ISACenc_obj->frame_nb == 0))
+ {
+ // it is a 60ms and we are in the first 30ms
+ // then the limit at this point should be half of the assigned value
+ payloadLimitBytes = ISACenc_obj->payloadLimitBytes60 >> 1;
+ }
+ else if (frame_mode == 0)
+ {
+ // it is a 30ms frame
+ payloadLimitBytes = (ISACenc_obj->payloadLimitBytes30) - 3;
+ }
+ else
+ {
+ // this is the second half of a 60ms frame.
+ payloadLimitBytes = ISACenc_obj->payloadLimitBytes60 - 3; // subract 3 because termination process may add 3 bytes
+ }
+
+ iterCntr = 0;
+ while((((ISACenc_obj->bitstr_obj.stream_index) << 1) > payloadLimitBytes) ||
+ (status == -ISAC_DISALLOWED_BITSTREAM_LENGTH))
+ {
+ int16_t arithLenDFTByte;
+ int16_t bytesLeftQ5;
+ int16_t ratioQ5[8] = {0, 6, 9, 12, 16, 19, 22, 25};
+
+ // According to experiments on TIMIT the following is proper for audio, but it is not agressive enough for tonal inputs
+ // such as DTMF, sweep-sine, ...
+ //
+ // (0.55 - (0.8 - ratio[i]/32) * 5 / 6) * 2^14
+ // int16_t scaleQ14[8] = {0, 648, 1928, 3208, 4915, 6195, 7475, 8755};
+
+
+ // This is a supper-agressive scaling passed the tests (tonal inputs) tone with one iteration for payload limit
+ // of 120 (32kbps bottleneck), number of frames needed a rate-reduction was 58403
+ //
+ int16_t scaleQ14[8] = {0, 348, 828, 1408, 2015, 3195, 3500, 3500};
+ int16_t idx;
+
+ if(iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION)
+ {
+ // We were not able to limit the payload size
+
+ if((frame_mode == 1) && (ISACenc_obj->frame_nb == 0))
+ {
+ // This was the first 30ms of a 60ms frame. Although the payload is larger than it
+ // should be but we let the second 30ms be encoded. Maybe togetehr we won't exceed
+ // the limit.
+ ISACenc_obj->frame_nb = 1;
+ return 0;
+ }
+ else if((frame_mode == 1) && (ISACenc_obj->frame_nb == 1))
+ {
+ ISACenc_obj->frame_nb = 0;
+ }
+
+ if(status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)
+ {
+ return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
+ }
+ else
+ {
+ return status;
+ }
+ }
+ if(status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)
+ {
+ arithLenDFTByte = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full) - arithLenBeforeEncodingDFT;
+ bytesLeftQ5 = (payloadLimitBytes - arithLenBeforeEncodingDFT) << 5;
+
+ // bytesLeft / arithLenDFTBytes indicates how much scaling is required a rough estimate (agressive)
+ // scale = 0.55 - (0.8 - bytesLeft / arithLenDFTBytes) * 5 / 6
+ // bytesLeft / arithLenDFTBytes below 0.2 will have a scale of zero and above 0.8 are treated as 0.8
+ // to avoid division we do more simplification.
+ //
+ // values of (bytesLeft / arithLenDFTBytes)*32 between ratioQ5[i] and ratioQ5[i+1] are rounded to ratioQ5[i]
+ // and the corresponding scale is chosen
+
+ // we compare bytesLeftQ5 with ratioQ5[]*arithLenDFTByte;
+ idx = 4;
+ idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 2 : -2;
+ idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 1 : -1;
+ idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 0 : -1;
+ }
+ else
+ {
+ // we are here because the bit-stream did not fit into the buffer, in this case, the stream_index is not
+ // trustable, especially if the is the first 30ms of a packet. Thereforem, we will go for the most agressive
+ // case.
+ idx = 0;
+ }
+ // scale FFT coefficients to reduce the bit-rate
+ for(k = 0; k < FRAMESAMPLES_HALF; k++)
+ {
+ LP16a[k] = (int16_t)(LP16a[k] * scaleQ14[idx] >> 14);
+ LPandHP[k] = (int16_t)(LPandHP[k] * scaleQ14[idx] >> 14);
+ }
+
+ // Save data for multiple packets memory
+ if (ISACenc_obj->SaveEnc_ptr != NULL)
+ {
+ for(k = 0; k < FRAMESAMPLES_HALF; k++)
+ {
+ (ISACenc_obj->SaveEnc_ptr)->fre[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LP16a[k];
+ (ISACenc_obj->SaveEnc_ptr)->fim[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LPandHP[k];
+ }
+ }
+
+ // scale the unquantized LPC gains and save the scaled version for the future use
+ for(k = 0; k < KLT_ORDER_GAIN; k++)
+ {
+ gain_lo_hiQ17[k] = WEBRTC_SPL_MUL_16_32_RSFT14(scaleQ14[idx], transcodingParam.lpcGains[k]);//transcodingParam.lpcGains[k]; //
+ transcodingParam.lpcGains[k] = gain_lo_hiQ17[k];
+ }
+
+ // reset the bit-stream object to the state which it had before encoding LPC Gains
+ ISACenc_obj->bitstr_obj.full = transcodingParam.full;
+ ISACenc_obj->bitstr_obj.stream_index = transcodingParam.stream_index;
+ ISACenc_obj->bitstr_obj.streamval = transcodingParam.streamval;
+ ISACenc_obj->bitstr_obj.W_upper = transcodingParam.W_upper;
+ ISACenc_obj->bitstr_obj.stream[transcodingParam.stream_index-1] = transcodingParam.beforeLastWord;
+ ISACenc_obj->bitstr_obj.stream[transcodingParam.stream_index] = transcodingParam.lastWord;
+
+
+ // quantize and encode LPC gain
+ WebRtcIsacfix_EstCodeLpcGain(gain_lo_hiQ17, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
+ arithLenBeforeEncodingDFT = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full);
+ status = WebRtcIsacfix_EncodeSpec(LP16a, LPandHP, &ISACenc_obj->bitstr_obj, AvgPitchGain_Q12);
+ if((status <= -1) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) /*LPandHP = HP16b*/
+ {
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ // If this is the second 30ms of a 60ms frame reset this such that in the next call
+ // encoder starts fresh.
+ ISACenc_obj->frame_nb = 0;
+ }
+ return status;
+ }
+ iterCntr++;
+ }
+
+ if (frame_mode == 1 && ISACenc_obj->frame_nb == 0)
+ /* i.e. 60 ms framesize and just processed the first 30ms, */
+ /* go back to main function to buffer the other 30ms speech frame */
+ {
+ ISACenc_obj->frame_nb = 1;
+ return 0;
+ }
+ else if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+ {
+ ISACenc_obj->frame_nb = 0;
+ /* also update the framelength for next packet, in Adaptive mode only */
+ if (CodingMode == 0 && (ISACenc_obj->enforceFrameSize == 0)) {
+ ISACenc_obj->new_framelength = WebRtcIsacfix_GetNewFrameLength(ISACenc_obj->BottleNeck,
+ ISACenc_obj->current_framesamples);
+ }
+ }
+
+
+ /* complete arithmetic coding */
+ stream_length = WebRtcIsacfix_EncTerminate(&ISACenc_obj->bitstr_obj);
+ /* can this be negative? */
+
+ if(CodingMode == 0)
+ {
+
+ /* update rate model and get minimum number of bytes in this packet */
+ MinBytes = WebRtcIsacfix_GetMinBytes(&ISACenc_obj->rate_data_obj, (int16_t) stream_length,
+ ISACenc_obj->current_framesamples, ISACenc_obj->BottleNeck, ISACenc_obj->MaxDelay);
+
+ /* if bitstream is too short, add garbage at the end */
+
+ /* Store length of coded data */
+ usefulstr_len = stream_length;
+
+ /* Make sure MinBytes does not exceed packet size limit */
+ if ((ISACenc_obj->frame_nb == 0) && (MinBytes > ISACenc_obj->payloadLimitBytes30)) {
+ MinBytes = ISACenc_obj->payloadLimitBytes30;
+ } else if ((ISACenc_obj->frame_nb == 1) && (MinBytes > ISACenc_obj->payloadLimitBytes60)) {
+ MinBytes = ISACenc_obj->payloadLimitBytes60;
+ }
+
+ /* Make sure we don't allow more than 255 bytes of garbage data.
+ We store the length of the garbage data in 8 bits in the bitstream,
+ 255 is the max garbage lenght we can signal using 8 bits. */
+ if( MinBytes > usefulstr_len + 255 ) {
+ MinBytes = usefulstr_len + 255;
+ }
+
+ /* Save data for creation of multiple bitstreams */
+ if (ISACenc_obj->SaveEnc_ptr != NULL) {
+ (ISACenc_obj->SaveEnc_ptr)->minBytes = MinBytes;
+ }
+
+ while (stream_length < MinBytes)
+ {
+ RTC_DCHECK_GE(stream_length, 0);
+ if (stream_length & 0x0001){
+ ISACenc_obj->bitstr_seed = WEBRTC_SPL_RAND( ISACenc_obj->bitstr_seed );
+ ISACenc_obj->bitstr_obj.stream[stream_length / 2] |=
+ (uint16_t)(ISACenc_obj->bitstr_seed & 0xFF);
+ } else {
+ ISACenc_obj->bitstr_seed = WEBRTC_SPL_RAND( ISACenc_obj->bitstr_seed );
+ ISACenc_obj->bitstr_obj.stream[stream_length / 2] =
+ ((uint16_t)ISACenc_obj->bitstr_seed << 8);
+ }
+ stream_length++;
+ }
+
+ /* to get the real stream_length, without garbage */
+ if (usefulstr_len & 0x0001) {
+ ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] &= 0xFF00;
+ ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] += (MinBytes - usefulstr_len) & 0x00FF;
+ }
+ else {
+ ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] &= 0x00FF;
+ ISACenc_obj->bitstr_obj.stream[usefulstr_len >> 1] +=
+ ((uint16_t)((MinBytes - usefulstr_len) & 0x00FF) << 8);
+ }
+ }
+ else
+ {
+ /* update rate model */
+ WebRtcIsacfix_UpdateRateModel(&ISACenc_obj->rate_data_obj, (int16_t) stream_length,
+ ISACenc_obj->current_framesamples, ISACenc_obj->BottleNeck);
+ }
+ return stream_length;
+}
+
+/* This function is used to create a new bitstream with new BWE.
+ The same data as previously encoded with the fucntion WebRtcIsacfix_EncodeImpl()
+ is used. The data needed is taken from the struct, where it was stored
+ when calling the encoder. */
+int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance *ISACenc_obj,
+ int BWnumber,
+ float scale)
+{
+ int ii;
+ int status;
+ int16_t BWno = (int16_t)BWnumber;
+ int stream_length = 0;
+
+ int16_t model;
+ const uint16_t *Q_PitchGain_cdf_ptr[1];
+ const uint16_t **cdf;
+ const IsacSaveEncoderData *SaveEnc_str;
+ int32_t tmpLPCcoeffs_g[KLT_ORDER_GAIN<<1];
+ int16_t tmpLPCindex_g[KLT_ORDER_GAIN<<1];
+ int16_t tmp_fre[FRAMESAMPLES];
+ int16_t tmp_fim[FRAMESAMPLES];
+
+ SaveEnc_str = ISACenc_obj->SaveEnc_ptr;
+
+ /* Check if SaveEnc memory exists */
+ if (SaveEnc_str == NULL) {
+ return (-1);
+ }
+
+ /* Sanity Check - possible values for BWnumber is 0 - 23 */
+ if ((BWnumber < 0) || (BWnumber > 23)) {
+ return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+ }
+
+ /* reset bitstream */
+ ISACenc_obj->bitstr_obj.W_upper = 0xFFFFFFFF;
+ ISACenc_obj->bitstr_obj.streamval = 0;
+ ISACenc_obj->bitstr_obj.stream_index = 0;
+ ISACenc_obj->bitstr_obj.full = 1;
+
+ /* encode frame length */
+ status = WebRtcIsacfix_EncodeFrameLen(SaveEnc_str->framelength, &ISACenc_obj->bitstr_obj);
+ if (status < 0) {
+ /* Wrong frame size */
+ return status;
+ }
+
+ /* encode bandwidth estimate */
+ status = WebRtcIsacfix_EncodeReceiveBandwidth(&BWno, &ISACenc_obj->bitstr_obj);
+ if (status < 0) {
+ return status;
+ }
+
+ /* Transcoding */
+ /* If scale < 1, rescale data to produce lower bitrate signal */
+ if ((0.0 < scale) && (scale < 1.0)) {
+ /* Compensate LPC gain */
+ for (ii = 0; ii < (KLT_ORDER_GAIN*(1+SaveEnc_str->startIdx)); ii++) {
+ tmpLPCcoeffs_g[ii] = (int32_t) ((scale) * (float) SaveEnc_str->LPCcoeffs_g[ii]);
+ }
+
+ /* Scale DFT */
+ for (ii = 0; ii < (FRAMESAMPLES_HALF*(1+SaveEnc_str->startIdx)); ii++) {
+ tmp_fre[ii] = (int16_t) ((scale) * (float) SaveEnc_str->fre[ii]) ;
+ tmp_fim[ii] = (int16_t) ((scale) * (float) SaveEnc_str->fim[ii]) ;
+ }
+ } else {
+ for (ii = 0; ii < (KLT_ORDER_GAIN*(1+SaveEnc_str->startIdx)); ii++) {
+ tmpLPCindex_g[ii] = SaveEnc_str->LPCindex_g[ii];
+ }
+
+ for (ii = 0; ii < (FRAMESAMPLES_HALF*(1+SaveEnc_str->startIdx)); ii++) {
+ tmp_fre[ii] = SaveEnc_str->fre[ii];
+ tmp_fim[ii] = SaveEnc_str->fim[ii];
+ }
+ }
+
+ /* Loop over number of 30 msec */
+ for (ii = 0; ii <= SaveEnc_str->startIdx; ii++)
+ {
+
+ /* encode pitch gains */
+ *Q_PitchGain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
+ status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &SaveEnc_str->pitchGain_index[ii],
+ Q_PitchGain_cdf_ptr, 1);
+ if (status < 0) {
+ return status;
+ }
+
+ /* entropy coding of quantization pitch lags */
+ /* voicing classificiation */
+ if (SaveEnc_str->meanGain[ii] <= 819) {
+ cdf = WebRtcIsacfix_kPitchLagPtrLo;
+ } else if (SaveEnc_str->meanGain[ii] <= 1638) {
+ cdf = WebRtcIsacfix_kPitchLagPtrMid;
+ } else {
+ cdf = WebRtcIsacfix_kPitchLagPtrHi;
+ }
+ status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj,
+ &SaveEnc_str->pitchIndex[PITCH_SUBFRAMES*ii], cdf, PITCH_SUBFRAMES);
+ if (status < 0) {
+ return status;
+ }
+
+ /* LPC */
+ /* entropy coding of model number */
+ model = 0;
+ status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &model,
+ WebRtcIsacfix_kModelCdfPtr, 1);
+ if (status < 0) {
+ return status;
+ }
+
+ /* entropy coding of quantization indices - LPC shape only */
+ status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &SaveEnc_str->LPCindex_s[KLT_ORDER_SHAPE*ii],
+ WebRtcIsacfix_kCdfShapePtr[0], KLT_ORDER_SHAPE);
+ if (status < 0) {
+ return status;
+ }
+
+ /* If transcoding, get new LPC gain indices */
+ if (scale < 1.0) {
+ WebRtcIsacfix_TranscodeLpcCoef(&tmpLPCcoeffs_g[KLT_ORDER_GAIN*ii], &tmpLPCindex_g[KLT_ORDER_GAIN*ii]);
+ }
+
+ /* entropy coding of quantization indices - LPC gain */
+ status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &tmpLPCindex_g[KLT_ORDER_GAIN*ii],
+ WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
+ if (status < 0) {
+ return status;
+ }
+
+ /* quantization and lossless coding */
+ status = WebRtcIsacfix_EncodeSpec(&tmp_fre[ii*FRAMESAMPLES_HALF], &tmp_fim[ii*FRAMESAMPLES_HALF],
+ &ISACenc_obj->bitstr_obj, SaveEnc_str->AvgPitchGain[ii]);
+ if (status < 0) {
+ return status;
+ }
+ }
+
+ /* complete arithmetic coding */
+ stream_length = WebRtcIsacfix_EncTerminate(&ISACenc_obj->bitstr_obj);
+
+ return stream_length;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
new file mode 100644
index 0000000000..842e77f47e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
@@ -0,0 +1,2056 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.c
+ *
+ * This file contains all functions used to arithmetically
+ * encode the iSAC bistream.
+ *
+ */
+
+#include <stddef.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h"
+#include "rtc_base/sanitizer.h"
+
+/*
+ * Eenumerations for arguments to functions WebRtcIsacfix_MatrixProduct1()
+ * and WebRtcIsacfix_MatrixProduct2().
+*/
+
+enum matrix_index_factor {
+ kTIndexFactor1 = 1,
+ kTIndexFactor2 = 2,
+ kTIndexFactor3 = SUBFRAMES,
+ kTIndexFactor4 = LPC_SHAPE_ORDER
+};
+
+enum matrix_index_step {
+ kTIndexStep1 = 1,
+ kTIndexStep2 = SUBFRAMES,
+ kTIndexStep3 = LPC_SHAPE_ORDER
+};
+
+enum matrixprod_loop_count {
+ kTLoopCount1 = SUBFRAMES,
+ kTLoopCount2 = 2,
+ kTLoopCount3 = LPC_SHAPE_ORDER
+};
+
+enum matrix1_shift_value {
+ kTMatrix1_shift0 = 0,
+ kTMatrix1_shift1 = 1,
+ kTMatrix1_shift5 = 5
+};
+
+enum matrixprod_init_case {
+ kTInitCase0 = 0,
+ kTInitCase1 = 1
+};
+
+/*
+ This function implements the fix-point correspondant function to lrint.
+
+ FLP: (int32_t)floor(flt+.499999999999)
+ FIP: (fixVal+roundVal)>>qDomain
+
+ where roundVal = 2^(qDomain-1) = 1<<(qDomain-1)
+
+*/
+static __inline int32_t CalcLrIntQ(int32_t fixVal, int16_t qDomain) {
+ return (fixVal + (1 << (qDomain - 1))) >> qDomain;
+}
+
+/*
+ __inline uint32_t stepwise(int32_t dinQ10) {
+
+ int32_t ind, diQ10, dtQ10;
+
+ diQ10 = dinQ10;
+ if (diQ10 < DPMIN_Q10)
+ diQ10 = DPMIN_Q10;
+ if (diQ10 >= DPMAX_Q10)
+ diQ10 = DPMAX_Q10 - 1;
+
+ dtQ10 = diQ10 - DPMIN_Q10;*/ /* Q10 + Q10 = Q10 */
+/* ind = (dtQ10 * 5) >> 10; */ /* 2^10 / 5 = 0.2 in Q10 */
+/* Q10 -> Q0 */
+
+/* return rpointsFIX_Q10[ind];
+
+ }
+*/
+
+/* logN(x) = logN(2)*log2(x) = 0.6931*log2(x). Output in Q8. */
+/* The input argument X to logN(X) is 2^17 times higher than the
+ input floating point argument Y to log(Y), since the X value
+ is a Q17 value. This can be compensated for after the call, by
+ subraction a value Z for each Q-step. One Q-step means that
+ X gets 2 thimes higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+ 177.445678 should be subtracted (since logN() returns a Q8 value).
+ For a X value in Q17, the value 177.445678*17 = 3017 should be
+ subtracted */
+static int16_t CalcLogN(int32_t arg) {
+ int16_t zeros, log2, frac, logN;
+
+ zeros=WebRtcSpl_NormU32(arg);
+ frac = (int16_t)((uint32_t)((arg << zeros) & 0x7FFFFFFF) >> 23);
+ log2 = (int16_t)(((31 - zeros) << 8) + frac); // log2(x) in Q8
+ logN = (int16_t)(log2 * 22713 >> 15); // log(2) = 0.693147 = 22713 in Q15
+ logN=logN+11; //Scalar compensation which minimizes the (log(x)-logN(x))^2 error over all x.
+
+ return logN;
+}
+
+
+/*
+ expN(x) = 2^(a*x), where a = log2(e) ~= 1.442695
+
+ Input: Q8 (int16_t)
+ Output: Q17 (int32_t)
+
+ a = log2(e) = log2(exp(1)) ~= 1.442695 ==> a = 23637 in Q14 (1.442688)
+ To this value, 700 is added or subtracted in order to get an average error
+ nearer zero, instead of always same-sign.
+*/
+
+static int32_t CalcExpN(int16_t x) {
+ int16_t axINT, axFRAC;
+ int16_t exp16;
+ int32_t exp;
+ int16_t ax = (int16_t)(x * 23637 >> 14); // Q8
+
+ if (x>=0) {
+ axINT = ax >> 8; //Q0
+ axFRAC = ax&0x00FF;
+ exp16 = 1 << axINT; // Q0
+ axFRAC = axFRAC+256; //Q8
+ exp = exp16 * axFRAC; // Q0*Q8 = Q8
+ exp <<= 9; // Q17
+ } else {
+ ax = -ax;
+ axINT = 1 + (ax >> 8); //Q0
+ axFRAC = 0x00FF - (ax&0x00FF);
+ exp16 = (int16_t)(32768 >> axINT); // Q15
+ axFRAC = axFRAC+256; //Q8
+ exp = exp16 * axFRAC; // Q15*Q8 = Q23
+ exp >>= 6; // Q17
+ }
+
+ return exp;
+}
+
+
+/* compute correlation from power spectrum */
+static void CalcCorrelation(int32_t *PSpecQ12, int32_t *CorrQ7)
+{
+ int32_t summ[FRAMESAMPLES/8];
+ int32_t diff[FRAMESAMPLES/8];
+ int32_t sum;
+ int k, n;
+
+ for (k = 0; k < FRAMESAMPLES/8; k++) {
+ summ[k] = (PSpecQ12[k] + PSpecQ12[FRAMESAMPLES / 4 - 1 - k] + 16) >> 5;
+ diff[k] = (PSpecQ12[k] - PSpecQ12[FRAMESAMPLES / 4 - 1 - k] + 16) >> 5;
+ }
+
+ sum = 2;
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ sum += summ[n];
+ CorrQ7[0] = sum;
+
+ for (k = 0; k < AR_ORDER; k += 2) {
+ sum = 0;
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ sum += (WebRtcIsacfix_kCos[k][n] * diff[n] + 256) >> 9;
+ CorrQ7[k+1] = sum;
+ }
+
+ for (k=1; k<AR_ORDER; k+=2) {
+ sum = 0;
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ sum += (WebRtcIsacfix_kCos[k][n] * summ[n] + 256) >> 9;
+ CorrQ7[k+1] = sum;
+ }
+}
+
+// Some arithmetic operations that are allowed to overflow. (It's still
+// undefined behavior, so not a good idea; this just makes UBSan ignore the
+// violations, so that our old code can continue to do what it's always been
+// doing.)
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+ OverflowingMulS16S32ToS32(int16_t a, int32_t b) {
+ return a * b;
+}
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+ OverflowingAddS32S32ToS32(int32_t a, int32_t b) {
+ return a + b;
+}
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+ OverflowingSubS32S32ToS32(int32_t a, int32_t b) {
+ return a - b;
+}
+
+/* compute inverse AR power spectrum */
+static void CalcInvArSpec(const int16_t *ARCoefQ12,
+ const int32_t gainQ10,
+ int32_t *CurveQ16)
+{
+ int32_t CorrQ11[AR_ORDER+1];
+ int32_t sum, tmpGain;
+ int32_t diffQ16[FRAMESAMPLES/8];
+ const int16_t *CS_ptrQ9;
+ int k, n;
+ int16_t round, shftVal = 0, sh;
+
+ sum = 0;
+ for (n = 0; n < AR_ORDER+1; n++)
+ sum += WEBRTC_SPL_MUL(ARCoefQ12[n], ARCoefQ12[n]); /* Q24 */
+ sum = ((sum >> 6) * 65 + 32768) >> 16; /* Result in Q8. */
+ CorrQ11[0] = (sum * gainQ10 + 256) >> 9;
+
+ /* To avoid overflow, we shift down gainQ10 if it is large. We will not lose any precision */
+ if(gainQ10>400000){
+ tmpGain = gainQ10 >> 3;
+ round = 32;
+ shftVal = 6;
+ } else {
+ tmpGain = gainQ10;
+ round = 256;
+ shftVal = 9;
+ }
+
+ for (k = 1; k < AR_ORDER+1; k++) {
+ sum = 16384;
+ for (n = k; n < AR_ORDER+1; n++)
+ sum += WEBRTC_SPL_MUL(ARCoefQ12[n-k], ARCoefQ12[n]); /* Q24 */
+ sum >>= 15;
+ CorrQ11[k] = (sum * tmpGain + round) >> shftVal;
+ }
+ sum = CorrQ11[0] << 7;
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ CurveQ16[n] = sum;
+
+ for (k = 1; k < AR_ORDER; k += 2) {
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ CurveQ16[n] +=
+ (OverflowingMulS16S32ToS32(WebRtcIsacfix_kCos[k][n], CorrQ11[k + 1]) +
+ 2) >>
+ 2;
+ }
+
+ CS_ptrQ9 = WebRtcIsacfix_kCos[0];
+
+ /* If CorrQ11[1] too large we avoid getting overflow in the calculation by shifting */
+ sh=WebRtcSpl_NormW32(CorrQ11[1]);
+ if (CorrQ11[1]==0) /* Use next correlation */
+ sh=WebRtcSpl_NormW32(CorrQ11[2]);
+
+ if (sh<9)
+ shftVal = 9 - sh;
+ else
+ shftVal = 0;
+
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ diffQ16[n] = (CS_ptrQ9[n] * (CorrQ11[1] >> shftVal) + 2) >> 2;
+ for (k = 2; k < AR_ORDER; k += 2) {
+ CS_ptrQ9 = WebRtcIsacfix_kCos[k];
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ diffQ16[n] += (CS_ptrQ9[n] * (CorrQ11[k + 1] >> shftVal) + 2) >> 2;
+ }
+
+ for (k=0; k<FRAMESAMPLES/8; k++) {
+ int32_t diff_q16 = diffQ16[k] * (1 << shftVal);
+ CurveQ16[FRAMESAMPLES / 4 - 1 - k] =
+ OverflowingSubS32S32ToS32(CurveQ16[k], diff_q16);
+ CurveQ16[k] = OverflowingAddS32S32ToS32(CurveQ16[k], diff_q16);
+ }
+}
+
+static void CalcRootInvArSpec(const int16_t *ARCoefQ12,
+ const int32_t gainQ10,
+ uint16_t *CurveQ8)
+{
+ int32_t CorrQ11[AR_ORDER+1];
+ int32_t sum, tmpGain;
+ int32_t summQ16[FRAMESAMPLES/8];
+ int32_t diffQ16[FRAMESAMPLES/8];
+
+ const int16_t *CS_ptrQ9;
+ int k, n, i;
+ int16_t round, shftVal = 0, sh;
+ int32_t res, in_sqrt, newRes;
+
+ sum = 0;
+ for (n = 0; n < AR_ORDER+1; n++)
+ sum += WEBRTC_SPL_MUL(ARCoefQ12[n], ARCoefQ12[n]); /* Q24 */
+ sum = ((sum >> 6) * 65 + 32768) >> 16; /* Result in Q8. */
+ CorrQ11[0] = (sum * gainQ10 + 256) >> 9;
+
+ /* To avoid overflow, we shift down gainQ10 if it is large. We will not lose any precision */
+ if(gainQ10>400000){
+ tmpGain = gainQ10 >> 3;
+ round = 32;
+ shftVal = 6;
+ } else {
+ tmpGain = gainQ10;
+ round = 256;
+ shftVal = 9;
+ }
+
+ for (k = 1; k < AR_ORDER+1; k++) {
+ sum = 16384;
+ for (n = k; n < AR_ORDER+1; n++)
+ sum += WEBRTC_SPL_MUL(ARCoefQ12[n-k], ARCoefQ12[n]); /* Q24 */
+ sum >>= 15;
+ CorrQ11[k] = (sum * tmpGain + round) >> shftVal;
+ }
+ sum = CorrQ11[0] << 7;
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ summQ16[n] = sum;
+
+ for (k = 1; k < (AR_ORDER); k += 2) {
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ summQ16[n] += ((CorrQ11[k + 1] * WebRtcIsacfix_kCos[k][n]) + 2) >> 2;
+ }
+
+ CS_ptrQ9 = WebRtcIsacfix_kCos[0];
+
+ /* If CorrQ11[1] too large we avoid getting overflow in the calculation by shifting */
+ sh=WebRtcSpl_NormW32(CorrQ11[1]);
+ if (CorrQ11[1]==0) /* Use next correlation */
+ sh=WebRtcSpl_NormW32(CorrQ11[2]);
+
+ if (sh<9)
+ shftVal = 9 - sh;
+ else
+ shftVal = 0;
+
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ diffQ16[n] = (CS_ptrQ9[n] * (CorrQ11[1] >> shftVal) + 2) >> 2;
+ for (k = 2; k < AR_ORDER; k += 2) {
+ CS_ptrQ9 = WebRtcIsacfix_kCos[k];
+ for (n = 0; n < FRAMESAMPLES/8; n++)
+ diffQ16[n] += (CS_ptrQ9[n] * (CorrQ11[k + 1] >> shftVal) + 2) >> 2;
+ }
+
+ in_sqrt = summQ16[0] + (diffQ16[0] << shftVal);
+
+ /* convert to magnitude spectrum, by doing square-roots (modified from SPLIB) */
+ res = 1 << (WebRtcSpl_GetSizeInBits(in_sqrt) >> 1);
+
+ for (k = 0; k < FRAMESAMPLES/8; k++)
+ {
+ in_sqrt = summQ16[k] + (diffQ16[k] << shftVal);
+ i = 10;
+
+ /* make in_sqrt positive to prohibit sqrt of negative values */
+ if(in_sqrt<0)
+ in_sqrt=-in_sqrt;
+
+ newRes = (in_sqrt / res + res) >> 1;
+ do
+ {
+ res = newRes;
+ newRes = (in_sqrt / res + res) >> 1;
+ } while (newRes != res && i-- > 0);
+
+ CurveQ8[k] = (int16_t)newRes;
+ }
+ for (k = FRAMESAMPLES/8; k < FRAMESAMPLES/4; k++) {
+
+ in_sqrt = summQ16[FRAMESAMPLES / 4 - 1 - k] -
+ (diffQ16[FRAMESAMPLES / 4 - 1 - k] << shftVal);
+ i = 10;
+
+ /* make in_sqrt positive to prohibit sqrt of negative values */
+ if(in_sqrt<0)
+ in_sqrt=-in_sqrt;
+
+ newRes = (in_sqrt / res + res) >> 1;
+ do
+ {
+ res = newRes;
+ newRes = (in_sqrt / res + res) >> 1;
+ } while (newRes != res && i-- > 0);
+
+ CurveQ8[k] = (int16_t)newRes;
+ }
+
+}
+
+
+
+/* generate array of dither samples in Q7 */
+static void GenerateDitherQ7(int16_t *bufQ7,
+ uint32_t seed,
+ int16_t length,
+ int16_t AvgPitchGain_Q12)
+{
+ int k;
+ int16_t dither1_Q7, dither2_Q7, dither_gain_Q14, shft;
+
+ if (AvgPitchGain_Q12 < 614) /* this threshold should be equal to that in decode_spec() */
+ {
+ for (k = 0; k < length-2; k += 3)
+ {
+ /* new random unsigned int32_t */
+ seed = WEBRTC_SPL_UMUL(seed, 196314165) + 907633515;
+
+ /* fixed-point dither sample between -64 and 64 (Q7) */
+ dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ /* new random unsigned int32_t */
+ seed = WEBRTC_SPL_UMUL(seed, 196314165) + 907633515;
+
+ /* fixed-point dither sample between -64 and 64 */
+ dither2_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ shft = (int16_t)(WEBRTC_SPL_RSHIFT_U32(seed, 25) & 15);
+ if (shft < 5)
+ {
+ bufQ7[k] = dither1_Q7;
+ bufQ7[k+1] = dither2_Q7;
+ bufQ7[k+2] = 0;
+ }
+ else if (shft < 10)
+ {
+ bufQ7[k] = dither1_Q7;
+ bufQ7[k+1] = 0;
+ bufQ7[k+2] = dither2_Q7;
+ }
+ else
+ {
+ bufQ7[k] = 0;
+ bufQ7[k+1] = dither1_Q7;
+ bufQ7[k+2] = dither2_Q7;
+ }
+ }
+ }
+ else
+ {
+ dither_gain_Q14 = (int16_t)(22528 - WEBRTC_SPL_MUL(10, AvgPitchGain_Q12));
+
+ /* dither on half of the coefficients */
+ for (k = 0; k < length-1; k += 2)
+ {
+ /* new random unsigned int32_t */
+ seed = WEBRTC_SPL_UMUL(seed, 196314165) + 907633515;
+
+ /* fixed-point dither sample between -64 and 64 */
+ dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ /* dither sample is placed in either even or odd index */
+ shft = (int16_t)(WEBRTC_SPL_RSHIFT_U32(seed, 25) & 1); /* either 0 or 1 */
+
+ bufQ7[k + shft] = (int16_t)((dither_gain_Q14 * dither1_Q7 + 8192) >> 14);
+ bufQ7[k + 1 - shft] = 0;
+ }
+ }
+}
+
+
+
+
+/*
+ * function to decode the complex spectrum from the bitstream
+ * returns the total number of bytes in the stream
+ */
+int WebRtcIsacfix_DecodeSpec(Bitstr_dec *streamdata,
+ int16_t *frQ7,
+ int16_t *fiQ7,
+ int16_t AvgPitchGain_Q12)
+{
+ int16_t data[FRAMESAMPLES];
+ int32_t invARSpec2_Q16[FRAMESAMPLES/4];
+ int16_t ARCoefQ12[AR_ORDER+1];
+ int16_t RCQ15[AR_ORDER];
+ int16_t gainQ10;
+ int32_t gain2_Q10;
+ int len;
+ int k;
+
+ /* create dither signal */
+ GenerateDitherQ7(data, streamdata->W_upper, FRAMESAMPLES, AvgPitchGain_Q12); /* Dither is output in vector 'Data' */
+
+ /* decode model parameters */
+ if (WebRtcIsacfix_DecodeRcCoef(streamdata, RCQ15) < 0)
+ return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+
+ WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+ if (WebRtcIsacfix_DecodeGain2(streamdata, &gain2_Q10) < 0)
+ return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+ /* compute inverse AR power spectrum */
+ CalcInvArSpec(ARCoefQ12, gain2_Q10, invARSpec2_Q16);
+
+ /* arithmetic decoding of spectrum */
+ /* 'data' input and output. Input = Dither */
+ len = WebRtcIsacfix_DecLogisticMulti2(data, streamdata, invARSpec2_Q16, (int16_t)FRAMESAMPLES);
+
+ if (len<1)
+ return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+ /* subtract dither and scale down spectral samples with low SNR */
+ if (AvgPitchGain_Q12 <= 614)
+ {
+ for (k = 0; k < FRAMESAMPLES; k += 4)
+ {
+ gainQ10 = WebRtcSpl_DivW32W16ResW16(30 << 10,
+ (int16_t)((uint32_t)(invARSpec2_Q16[k >> 2] + 2195456) >> 16));
+ *frQ7++ = (int16_t)((data[k] * gainQ10 + 512) >> 10);
+ *fiQ7++ = (int16_t)((data[k + 1] * gainQ10 + 512) >> 10);
+ *frQ7++ = (int16_t)((data[k + 2] * gainQ10 + 512) >> 10);
+ *fiQ7++ = (int16_t)((data[k + 3] * gainQ10 + 512) >> 10);
+ }
+ }
+ else
+ {
+ for (k = 0; k < FRAMESAMPLES; k += 4)
+ {
+ gainQ10 = WebRtcSpl_DivW32W16ResW16(36 << 10,
+ (int16_t)((uint32_t)(invARSpec2_Q16[k >> 2] + 2654208) >> 16));
+ *frQ7++ = (int16_t)((data[k] * gainQ10 + 512) >> 10);
+ *fiQ7++ = (int16_t)((data[k + 1] * gainQ10 + 512) >> 10);
+ *frQ7++ = (int16_t)((data[k + 2] * gainQ10 + 512) >> 10);
+ *fiQ7++ = (int16_t)((data[k + 3] * gainQ10 + 512) >> 10);
+ }
+ }
+
+ return len;
+}
+
+
+int WebRtcIsacfix_EncodeSpec(const int16_t *fr,
+ const int16_t *fi,
+ Bitstr_enc *streamdata,
+ int16_t AvgPitchGain_Q12)
+{
+ int16_t dataQ7[FRAMESAMPLES];
+ int32_t PSpec[FRAMESAMPLES/4];
+ uint16_t invARSpecQ8[FRAMESAMPLES/4];
+ int32_t CorrQ7[AR_ORDER+1];
+ int32_t CorrQ7_norm[AR_ORDER+1];
+ int16_t RCQ15[AR_ORDER];
+ int16_t ARCoefQ12[AR_ORDER+1];
+ int32_t gain2_Q10;
+ int16_t val;
+ int32_t nrg;
+ uint32_t sum;
+ int16_t lft_shft;
+ int16_t status;
+ int k, n, j;
+
+
+ /* create dither_float signal */
+ GenerateDitherQ7(dataQ7, streamdata->W_upper, FRAMESAMPLES, AvgPitchGain_Q12);
+
+ /* add dither and quantize, and compute power spectrum */
+ /* Vector dataQ7 contains Dither in Q7 */
+ for (k = 0; k < FRAMESAMPLES; k += 4)
+ {
+ val = ((*fr++ + dataQ7[k] + 64) & 0xFF80) - dataQ7[k]; /* Data = Dither */
+ dataQ7[k] = val; /* New value in Data */
+ sum = WEBRTC_SPL_UMUL(val, val);
+
+ val = ((*fi++ + dataQ7[k+1] + 64) & 0xFF80) - dataQ7[k+1]; /* Data = Dither */
+ dataQ7[k+1] = val; /* New value in Data */
+ sum += WEBRTC_SPL_UMUL(val, val);
+
+ val = ((*fr++ + dataQ7[k+2] + 64) & 0xFF80) - dataQ7[k+2]; /* Data = Dither */
+ dataQ7[k+2] = val; /* New value in Data */
+ sum += WEBRTC_SPL_UMUL(val, val);
+
+ val = ((*fi++ + dataQ7[k+3] + 64) & 0xFF80) - dataQ7[k+3]; /* Data = Dither */
+ dataQ7[k+3] = val; /* New value in Data */
+ sum += WEBRTC_SPL_UMUL(val, val);
+
+ PSpec[k>>2] = WEBRTC_SPL_RSHIFT_U32(sum, 2);
+ }
+
+ /* compute correlation from power spectrum */
+ CalcCorrelation(PSpec, CorrQ7);
+
+
+ /* find AR coefficients */
+ /* number of bit shifts to 14-bit normalize CorrQ7[0] (leaving room for sign) */
+ lft_shft = WebRtcSpl_NormW32(CorrQ7[0]) - 18;
+
+ if (lft_shft > 0) {
+ for (k=0; k<AR_ORDER+1; k++)
+ CorrQ7_norm[k] = CorrQ7[k] << lft_shft;
+ } else {
+ for (k=0; k<AR_ORDER+1; k++)
+ CorrQ7_norm[k] = CorrQ7[k] >> -lft_shft;
+ }
+
+ /* find RC coefficients */
+ WebRtcSpl_AutoCorrToReflCoef(CorrQ7_norm, AR_ORDER, RCQ15);
+
+ /* quantize & code RC Coef */
+ status = WebRtcIsacfix_EncodeRcCoef(RCQ15, streamdata);
+ if (status < 0) {
+ return status;
+ }
+
+ /* RC -> AR coefficients */
+ WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+ /* compute ARCoef' * Corr * ARCoef in Q19 */
+ nrg = 0;
+ for (j = 0; j <= AR_ORDER; j++) {
+ for (n = 0; n <= j; n++)
+ nrg += (ARCoefQ12[j] * ((CorrQ7_norm[j - n] * ARCoefQ12[n] + 256) >> 9) +
+ 4) >> 3;
+ for (n = j+1; n <= AR_ORDER; n++)
+ nrg += (ARCoefQ12[j] * ((CorrQ7_norm[n - j] * ARCoefQ12[n] + 256) >> 9) +
+ 4) >> 3;
+ }
+
+ if (lft_shft > 0)
+ nrg >>= lft_shft;
+ else
+ nrg <<= -lft_shft;
+
+ if(nrg>131072)
+ gain2_Q10 = WebRtcSpl_DivResultInQ31(FRAMESAMPLES >> 2, nrg); /* also shifts 31 bits to the left! */
+ else
+ gain2_Q10 = FRAMESAMPLES >> 2;
+
+ /* quantize & code gain2_Q10 */
+ if (WebRtcIsacfix_EncodeGain2(&gain2_Q10, streamdata))
+ return -1;
+
+ /* compute inverse AR magnitude spectrum */
+ CalcRootInvArSpec(ARCoefQ12, gain2_Q10, invARSpecQ8);
+
+
+ /* arithmetic coding of spectrum */
+ status = WebRtcIsacfix_EncLogisticMulti2(streamdata, dataQ7, invARSpecQ8, (int16_t)FRAMESAMPLES);
+ if ( status )
+ return( status );
+
+ return 0;
+}
+
+
+/* Matlab's LAR definition */
+static void Rc2LarFix(const int16_t *rcQ15, int32_t *larQ17, int16_t order) {
+
+ /*
+
+ This is a piece-wise implemenetation of a rc2lar-function (all values in the comment
+ are Q15 values and are based on [0 24956/32768 30000/32768 32500/32768], i.e.
+ [0.76159667968750 0.91552734375000 0.99182128906250]
+
+ x0 x1 a k x0(again) b
+ ==================================================================================
+ 0.00 0.76: 0 2.625997508581 0 0
+ 0.76 0.91: 2.000012018559 7.284502668663 0.761596679688 -3.547841027073
+ 0.91 0.99: 3.121320351712 31.115835041229 0.915527343750 -25.366077452148
+ 0.99 1.00: 5.495270168700 686.663805654056 0.991821289063 -675.552510708011
+
+ The implementation is y(x)= a + (x-x0)*k, but this can be simplified to
+
+ y(x) = a-x0*k + x*k = b + x*k, where b = a-x0*k
+
+ akx=[0 2.625997508581 0
+ 2.000012018559 7.284502668663 0.761596679688
+ 3.121320351712 31.115835041229 0.915527343750
+ 5.495270168700 686.663805654056 0.991821289063];
+
+ b = akx(:,1) - akx(:,3).*akx(:,2)
+
+ [ 0.0
+ -3.547841027073
+ -25.366077452148
+ -675.552510708011]
+
+ */
+
+ int k;
+ int16_t rc;
+ int32_t larAbsQ17;
+
+ for (k = 0; k < order; k++) {
+
+ rc = WEBRTC_SPL_ABS_W16(rcQ15[k]); //Q15
+
+ /* Calculate larAbsQ17 in Q17 from rc in Q15 */
+
+ if (rc<24956) { //0.7615966 in Q15
+ // (Q15*Q13)>>11 = Q17
+ larAbsQ17 = rc * 21512 >> 11;
+ } else if (rc<30000) { //0.91552734375 in Q15
+ // Q17 + (Q15*Q12)>>10 = Q17
+ larAbsQ17 = -465024 + (rc * 29837 >> 10);
+ } else if (rc<32500) { //0.99182128906250 in Q15
+ // Q17 + (Q15*Q10)>>8 = Q17
+ larAbsQ17 = -3324784 + (rc * 31863 >> 8);
+ } else {
+ // Q17 + (Q15*Q5)>>3 = Q17
+ larAbsQ17 = -88546020 + (rc * 21973 >> 3);
+ }
+
+ if (rcQ15[k]>0) {
+ larQ17[k] = larAbsQ17;
+ } else {
+ larQ17[k] = -larAbsQ17;
+ }
+ }
+}
+
+
+static void Lar2RcFix(const int32_t *larQ17, int16_t *rcQ15, int16_t order) {
+
+ /*
+ This is a piece-wise implemenetation of a lar2rc-function
+ See comment in Rc2LarFix() about details.
+ */
+
+ int k;
+ int16_t larAbsQ11;
+ int32_t rc;
+
+ for (k = 0; k < order; k++) {
+
+ larAbsQ11 = (int16_t)WEBRTC_SPL_ABS_W32((larQ17[k] + 32) >> 6); // Q11
+
+ if (larAbsQ11<4097) { //2.000012018559 in Q11
+ // Q11*Q16>>12 = Q15
+ rc = larAbsQ11 * 24957 >> 12;
+ } else if (larAbsQ11<6393) { //3.121320351712 in Q11
+ // (Q11*Q17 + Q13)>>13 = Q15
+ rc = (larAbsQ11 * 17993 + 130738688) >> 13;
+ } else if (larAbsQ11<11255) { //5.495270168700 in Q11
+ // (Q11*Q19 + Q30)>>15 = Q15
+ rc = (larAbsQ11 * 16850 + 875329820) >> 15;
+ } else {
+ // (Q11*Q24>>16 + Q19)>>4 = Q15
+ rc = (((larAbsQ11 * 24433) >> 16) + 515804) >> 4;
+ }
+
+ if (larQ17[k]<=0) {
+ rc = -rc;
+ }
+
+ rcQ15[k] = (int16_t) rc; // Q15
+ }
+}
+
+static void Poly2LarFix(int16_t *lowbandQ15,
+ int16_t orderLo,
+ int16_t *hibandQ15,
+ int16_t orderHi,
+ int16_t Nsub,
+ int32_t *larsQ17) {
+
+ int k, n;
+ int32_t *outpQ17;
+ int16_t orderTot;
+ int32_t larQ17[MAX_ORDER]; // Size 7+6 is enough
+
+ orderTot = (orderLo + orderHi);
+ outpQ17 = larsQ17;
+ for (k = 0; k < Nsub; k++) {
+
+ Rc2LarFix(lowbandQ15, larQ17, orderLo);
+
+ for (n = 0; n < orderLo; n++)
+ outpQ17[n] = larQ17[n]; //Q17
+
+ Rc2LarFix(hibandQ15, larQ17, orderHi);
+
+ for (n = 0; n < orderHi; n++)
+ outpQ17[n + orderLo] = larQ17[n]; //Q17;
+
+ outpQ17 += orderTot;
+ lowbandQ15 += orderLo;
+ hibandQ15 += orderHi;
+ }
+}
+
+
+static void Lar2polyFix(int32_t *larsQ17,
+ int16_t *lowbandQ15,
+ int16_t orderLo,
+ int16_t *hibandQ15,
+ int16_t orderHi,
+ int16_t Nsub) {
+
+ int k, n;
+ int16_t orderTot;
+ int16_t *outplQ15, *outphQ15;
+ int32_t *inpQ17;
+ int16_t rcQ15[7+6];
+
+ orderTot = (orderLo + orderHi);
+ outplQ15 = lowbandQ15;
+ outphQ15 = hibandQ15;
+ inpQ17 = larsQ17;
+ for (k = 0; k < Nsub; k++) {
+
+ /* gains not handled here as in the FLP version */
+
+ /* Low band */
+ Lar2RcFix(&inpQ17[0], rcQ15, orderLo);
+ for (n = 0; n < orderLo; n++)
+ outplQ15[n] = rcQ15[n]; // Refl. coeffs
+
+ /* High band */
+ Lar2RcFix(&inpQ17[orderLo], rcQ15, orderHi);
+ for (n = 0; n < orderHi; n++)
+ outphQ15[n] = rcQ15[n]; // Refl. coeffs
+
+ inpQ17 += orderTot;
+ outplQ15 += orderLo;
+ outphQ15 += orderHi;
+ }
+}
+
+/*
+Function WebRtcIsacfix_MatrixProduct1C() does one form of matrix multiplication.
+It first shifts input data of one matrix, determines the right indexes for the
+two matrixes, multiply them, and write the results into an output buffer.
+
+Note that two factors (or, multipliers) determine the initialization values of
+the variable `matrix1_index` in the code. The relationship is
+`matrix1_index` = `matrix1_index_factor1` * `matrix1_index_factor2`, where
+`matrix1_index_factor1` is given by the argument while `matrix1_index_factor2`
+is determined by the value of argument `matrix1_index_init_case`;
+`matrix1_index_factor2` is the value of the outmost loop counter j (when
+`matrix1_index_init_case` is 0), or the value of the middle loop counter k (when
+`matrix1_index_init_case` is non-zero).
+
+`matrix0_index` is determined the same way.
+
+Arguments:
+ matrix0[]: matrix0 data in Q15 domain.
+ matrix1[]: matrix1 data.
+ matrix_product[]: output data (matrix product).
+ matrix1_index_factor1: The first of two factors determining the
+ initialization value of matrix1_index.
+ matrix0_index_factor1: The first of two factors determining the
+ initialization value of matrix0_index.
+ matrix1_index_init_case: Case number for selecting the second of two
+ factors determining the initialization value
+ of matrix1_index and matrix0_index.
+ matrix1_index_step: Incremental step for matrix1_index.
+ matrix0_index_step: Incremental step for matrix0_index.
+ inner_loop_count: Maximum count of the inner loop.
+ mid_loop_count: Maximum count of the intermediate loop.
+ shift: Left shift value for matrix1.
+*/
+void WebRtcIsacfix_MatrixProduct1C(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ const int matrix1_index_factor1,
+ const int matrix0_index_factor1,
+ const int matrix1_index_init_case,
+ const int matrix1_index_step,
+ const int matrix0_index_step,
+ const int inner_loop_count,
+ const int mid_loop_count,
+ const int shift) {
+ int j = 0, k = 0, n = 0;
+ int matrix0_index = 0, matrix1_index = 0, matrix_prod_index = 0;
+ int* matrix0_index_factor2 = &k;
+ int* matrix1_index_factor2 = &j;
+ if (matrix1_index_init_case != 0) {
+ matrix0_index_factor2 = &j;
+ matrix1_index_factor2 = &k;
+ }
+
+ for (j = 0; j < SUBFRAMES; j++) {
+ matrix_prod_index = mid_loop_count * j;
+ for (k = 0; k < mid_loop_count; k++) {
+ int32_t sum32 = 0;
+ matrix0_index = matrix0_index_factor1 * (*matrix0_index_factor2);
+ matrix1_index = matrix1_index_factor1 * (*matrix1_index_factor2);
+ for (n = 0; n < inner_loop_count; n++) {
+ sum32 += WEBRTC_SPL_MUL_16_32_RSFT16(
+ matrix0[matrix0_index], matrix1[matrix1_index] * (1 << shift));
+ matrix0_index += matrix0_index_step;
+ matrix1_index += matrix1_index_step;
+ }
+ matrix_product[matrix_prod_index] = sum32;
+ matrix_prod_index++;
+ }
+ }
+}
+
+/*
+Function WebRtcIsacfix_MatrixProduct2C() returns the product of two matrixes,
+one of which has two columns. It first has to determine the correct index of
+the first matrix before doing the actual element multiplication.
+
+Arguments:
+ matrix0[]: A matrix in Q15 domain.
+ matrix1[]: A matrix in Q21 domain.
+ matrix_product[]: Output data in Q17 domain.
+ matrix0_index_factor: A factor determining the initialization value
+ of matrix0_index.
+ matrix0_index_step: Incremental step for matrix0_index.
+*/
+void WebRtcIsacfix_MatrixProduct2C(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ const int matrix0_index_factor,
+ const int matrix0_index_step) {
+ int j = 0, n = 0;
+ int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ int32_t sum32 = 0, sum32_2 = 0;
+ matrix1_index = 0;
+ matrix0_index = matrix0_index_factor * j;
+ for (n = SUBFRAMES; n > 0; n--) {
+ sum32 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
+ matrix1[matrix1_index]));
+ sum32_2 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
+ matrix1[matrix1_index + 1]));
+ matrix1_index += 2;
+ matrix0_index += matrix0_index_step;
+ }
+ matrix_product[matrix_prod_index] = sum32 >> 3;
+ matrix_product[matrix_prod_index + 1] = sum32_2 >> 3;
+ matrix_prod_index += 2;
+ }
+}
+
+int WebRtcIsacfix_DecodeLpc(int32_t *gain_lo_hiQ17,
+ int16_t *LPCCoef_loQ15,
+ int16_t *LPCCoef_hiQ15,
+ Bitstr_dec *streamdata,
+ int16_t *outmodel) {
+
+ int32_t larsQ17[KLT_ORDER_SHAPE]; // KLT_ORDER_GAIN+KLT_ORDER_SHAPE == (ORDERLO+ORDERHI)*SUBFRAMES
+ int err;
+
+ err = WebRtcIsacfix_DecodeLpcCoef(streamdata, larsQ17, gain_lo_hiQ17, outmodel);
+ if (err<0) // error check
+ return -ISAC_RANGE_ERROR_DECODE_LPC;
+
+ Lar2polyFix(larsQ17, LPCCoef_loQ15, ORDERLO, LPCCoef_hiQ15, ORDERHI, SUBFRAMES);
+
+ return 0;
+}
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsacfix_DecodeLpcCoef(Bitstr_dec *streamdata,
+ int32_t *LPCCoefQ17,
+ int32_t *gain_lo_hiQ17,
+ int16_t *outmodel)
+{
+ int j, k, n;
+ int err;
+ int16_t pos, pos2, posg, poss;
+ int16_t gainpos;
+ int16_t model;
+ int16_t index_QQ[KLT_ORDER_SHAPE];
+ int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+ int16_t tmpcoeffs_sQ10[KLT_ORDER_SHAPE];
+ int32_t tmpcoeffs_sQ17[KLT_ORDER_SHAPE];
+ int32_t tmpcoeffs2_sQ18[KLT_ORDER_SHAPE];
+ int32_t sumQQ;
+ int16_t sumQQ16;
+ int32_t tmp32;
+
+
+
+ /* entropy decoding of model number */
+ err = WebRtcIsacfix_DecHistOneStepMulti(&model, streamdata, WebRtcIsacfix_kModelCdfPtr, WebRtcIsacfix_kModelInitIndex, 1);
+ if (err<0) // error check
+ return err;
+
+ /* entropy decoding of quantization indices */
+ err = WebRtcIsacfix_DecHistOneStepMulti(index_QQ, streamdata, WebRtcIsacfix_kCdfShapePtr[model], WebRtcIsacfix_kInitIndexShape[model], KLT_ORDER_SHAPE);
+ if (err<0) // error check
+ return err;
+ /* find quantization levels for coefficients */
+ for (k=0; k<KLT_ORDER_SHAPE; k++) {
+ tmpcoeffs_sQ10[WebRtcIsacfix_kSelIndShape[k]] = WebRtcIsacfix_kLevelsShapeQ10[WebRtcIsacfix_kOfLevelsShape[model]+WebRtcIsacfix_kOffsetShape[model][k] + index_QQ[k]];
+ }
+
+ err = WebRtcIsacfix_DecHistOneStepMulti(index_QQ, streamdata, WebRtcIsacfix_kCdfGainPtr[model], WebRtcIsacfix_kInitIndexGain[model], KLT_ORDER_GAIN);
+ if (err<0) // error check
+ return err;
+ /* find quantization levels for coefficients */
+ for (k=0; k<KLT_ORDER_GAIN; k++) {
+ tmpcoeffs_gQ17[WebRtcIsacfix_kSelIndGain[k]] = WebRtcIsacfix_kLevelsGainQ17[WebRtcIsacfix_kOfLevelsGain[model]+ WebRtcIsacfix_kOffsetGain[model][k] + index_QQ[k]];
+ }
+
+
+ /* inverse KLT */
+
+ /* left transform */ // Transpose matrix!
+ WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT1GainQ15[model], tmpcoeffs_gQ17,
+ tmpcoeffs2_gQ21, kTIndexFactor2, kTIndexFactor2,
+ kTInitCase0, kTIndexStep1, kTIndexStep1,
+ kTLoopCount2, kTLoopCount2, kTMatrix1_shift5);
+
+ poss = 0;
+ for (j=0; j<SUBFRAMES; j++) {
+ for (k=0; k<LPC_SHAPE_ORDER; k++) {
+ sumQQ = 0;
+ pos = LPC_SHAPE_ORDER * j;
+ pos2 = LPC_SHAPE_ORDER * k;
+ for (n=0; n<LPC_SHAPE_ORDER; n++) {
+ sumQQ += tmpcoeffs_sQ10[pos] *
+ WebRtcIsacfix_kT1ShapeQ15[model][pos2] >> 7; // (Q10*Q15)>>7 = Q18
+ pos++;
+ pos2++;
+ }
+ tmpcoeffs2_sQ18[poss] = sumQQ; //Q18
+ poss++;
+ }
+ }
+
+ /* right transform */ // Transpose matrix
+ WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+ tmpcoeffs_gQ17, kTIndexFactor1, kTIndexStep2);
+ WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT2ShapeQ15[model],
+ tmpcoeffs2_sQ18, tmpcoeffs_sQ17, kTIndexFactor1, kTIndexFactor1,
+ kTInitCase1, kTIndexStep3, kTIndexStep2, kTLoopCount1, kTLoopCount3,
+ kTMatrix1_shift0);
+
+ /* scaling, mean addition, and gain restoration */
+ gainpos = 0;
+ posg = 0;poss = 0;pos=0;
+ for (k=0; k<SUBFRAMES; k++) {
+
+ /* log gains */
+ // Divide by 4 and get Q17 to Q8, i.e. shift 2+9.
+ sumQQ16 = (int16_t)(tmpcoeffs_gQ17[posg] >> 11);
+ sumQQ16 += WebRtcIsacfix_kMeansGainQ8[model][posg];
+ sumQQ = CalcExpN(sumQQ16); // Q8 in and Q17 out
+ gain_lo_hiQ17[gainpos] = sumQQ; //Q17
+ gainpos++;
+ posg++;
+
+ // Divide by 4 and get Q17 to Q8, i.e. shift 2+9.
+ sumQQ16 = (int16_t)(tmpcoeffs_gQ17[posg] >> 11);
+ sumQQ16 += WebRtcIsacfix_kMeansGainQ8[model][posg];
+ sumQQ = CalcExpN(sumQQ16); // Q8 in and Q17 out
+ gain_lo_hiQ17[gainpos] = sumQQ; //Q17
+ gainpos++;
+ posg++;
+
+ /* lo band LAR coeffs */
+ for (n=0; n<ORDERLO; n++, pos++, poss++) {
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(31208, tmpcoeffs_sQ17[poss]); // (Q16*Q17)>>16 = Q17, with 1/2.1 = 0.47619047619 ~= 31208 in Q16
+ tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[model][poss]; // Q17+Q17 = Q17
+ LPCCoefQ17[pos] = tmp32;
+ }
+
+ /* hi band LAR coeffs */
+ for (n=0; n<ORDERHI; n++, pos++, poss++) {
+ // ((Q13*Q17)>>16)<<3 = Q17, with 1/0.45 = 2.222222222222 ~= 18204 in Q13
+ tmp32 =
+ WEBRTC_SPL_MUL_16_32_RSFT16(18204, tmpcoeffs_sQ17[poss]) * (1 << 3);
+ tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[model][poss]; // Q17+Q17 = Q17
+ LPCCoefQ17[pos] = tmp32;
+ }
+ }
+
+
+ *outmodel=model;
+
+ return 0;
+}
+
+/* estimate codel length of LPC Coef */
+static int EstCodeLpcCoef(int32_t *LPCCoefQ17,
+ int32_t *gain_lo_hiQ17,
+ int16_t *model,
+ int32_t *sizeQ11,
+ Bitstr_enc *streamdata,
+ IsacSaveEncoderData* encData,
+ transcode_obj *transcodingParam) {
+ int j, k, n;
+ int16_t posQQ, pos2QQ, gainpos;
+ int16_t pos, poss, posg, offsg;
+ int16_t index_gQQ[KLT_ORDER_GAIN], index_sQQ[KLT_ORDER_SHAPE];
+ int16_t index_ovr_gQQ[KLT_ORDER_GAIN], index_ovr_sQQ[KLT_ORDER_SHAPE];
+ int32_t BitsQQ;
+
+ int16_t tmpcoeffs_gQ6[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs_sQ17[KLT_ORDER_SHAPE];
+ int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs2_sQ17[KLT_ORDER_SHAPE];
+ int32_t sumQQ;
+ int32_t tmp32;
+ int16_t sumQQ16;
+ int status = 0;
+
+ /* write LAR coefficients to statistics file */
+ /* Save data for creation of multiple bitstreams (and transcoding) */
+ if (encData != NULL) {
+ for (k=0; k<KLT_ORDER_GAIN; k++) {
+ encData->LPCcoeffs_g[KLT_ORDER_GAIN*encData->startIdx + k] = gain_lo_hiQ17[k];
+ }
+ }
+
+ /* log gains, mean removal and scaling */
+ posg = 0;poss = 0;pos=0; gainpos=0;
+
+ for (k=0; k<SUBFRAMES; k++) {
+ /* log gains */
+
+ /* The input argument X to logN(X) is 2^17 times higher than the
+ input floating point argument Y to log(Y), since the X value
+ is a Q17 value. This can be compensated for after the call, by
+ subraction a value Z for each Q-step. One Q-step means that
+ X gets 2 times higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+ 177.445678 should be subtracted (since logN() returns a Q8 value).
+ For a X value in Q17, the value 177.445678*17 = 3017 should be
+ subtracted */
+ tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+ tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+ posg++; gainpos++;
+
+ tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+ tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+ posg++; gainpos++;
+
+ /* lo band LAR coeffs */
+ for (n=0; n<ORDERLO; n++, poss++, pos++) {
+ tmp32 = LPCCoefQ17[pos] - WebRtcIsacfix_kMeansShapeQ17[0][poss]; //Q17
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(17203, tmp32<<3); // tmp32 = 2.1*tmp32
+ tmpcoeffs_sQ17[poss] = tmp32; //Q17
+ }
+
+ /* hi band LAR coeffs */
+ for (n=0; n<ORDERHI; n++, poss++, pos++) {
+ tmp32 = LPCCoefQ17[pos] - WebRtcIsacfix_kMeansShapeQ17[0][poss]; //Q17
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(14746, tmp32<<1); // tmp32 = 0.45*tmp32
+ tmpcoeffs_sQ17[poss] = tmp32; //Q17
+ }
+
+ }
+
+
+ /* KLT */
+
+ /* left transform */
+ offsg = 0;
+ posg = 0;
+ for (j=0; j<SUBFRAMES; j++) {
+ // Q21 = Q6 * Q15
+ sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][0] +
+ tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][2];
+ tmpcoeffs2_gQ21[posg] = sumQQ;
+ posg++;
+
+ // Q21 = Q6 * Q15
+ sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][1] +
+ tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][3];
+ tmpcoeffs2_gQ21[posg] = sumQQ;
+ posg++;
+
+ offsg += 2;
+ }
+
+ WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT1ShapeQ15[0], tmpcoeffs_sQ17,
+ tmpcoeffs2_sQ17, kTIndexFactor4, kTIndexFactor1, kTInitCase0,
+ kTIndexStep1, kTIndexStep3, kTLoopCount3, kTLoopCount3, kTMatrix1_shift1);
+
+ /* right transform */
+ WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+ tmpcoeffs_gQ17, kTIndexFactor3, kTIndexStep1);
+
+ WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT2ShapeQ15[0], tmpcoeffs2_sQ17,
+ tmpcoeffs_sQ17, kTIndexFactor1, kTIndexFactor3, kTInitCase1, kTIndexStep3,
+ kTIndexStep1, kTLoopCount1, kTLoopCount3, kTMatrix1_shift1);
+
+ /* quantize coefficients */
+
+ BitsQQ = 0;
+ for (k=0; k<KLT_ORDER_GAIN; k++) //ATTN: ok?
+ {
+ posQQ = WebRtcIsacfix_kSelIndGain[k];
+ pos2QQ= (int16_t)CalcLrIntQ(tmpcoeffs_gQ17[posQQ], 17);
+
+ index_gQQ[k] = pos2QQ + WebRtcIsacfix_kQuantMinGain[k]; //ATTN: ok?
+ if (index_gQQ[k] < 0) {
+ index_gQQ[k] = 0;
+ }
+ else if (index_gQQ[k] > WebRtcIsacfix_kMaxIndGain[k]) {
+ index_gQQ[k] = WebRtcIsacfix_kMaxIndGain[k];
+ }
+ index_ovr_gQQ[k] = WebRtcIsacfix_kOffsetGain[0][k]+index_gQQ[k];
+ posQQ = WebRtcIsacfix_kOfLevelsGain[0] + index_ovr_gQQ[k];
+
+ /* Save data for creation of multiple bitstreams */
+ if (encData != NULL) {
+ encData->LPCindex_g[KLT_ORDER_GAIN*encData->startIdx + k] = index_gQQ[k];
+ }
+
+ /* determine number of bits */
+ sumQQ = WebRtcIsacfix_kCodeLenGainQ11[posQQ]; //Q11
+ BitsQQ += sumQQ;
+ }
+
+ for (k=0; k<KLT_ORDER_SHAPE; k++) //ATTN: ok?
+ {
+ index_sQQ[k] = (int16_t)(CalcLrIntQ(tmpcoeffs_sQ17[WebRtcIsacfix_kSelIndShape[k]], 17) + WebRtcIsacfix_kQuantMinShape[k]); //ATTN: ok?
+
+ if (index_sQQ[k] < 0)
+ index_sQQ[k] = 0;
+ else if (index_sQQ[k] > WebRtcIsacfix_kMaxIndShape[k])
+ index_sQQ[k] = WebRtcIsacfix_kMaxIndShape[k];
+ index_ovr_sQQ[k] = WebRtcIsacfix_kOffsetShape[0][k]+index_sQQ[k];
+
+ posQQ = WebRtcIsacfix_kOfLevelsShape[0] + index_ovr_sQQ[k];
+ sumQQ = WebRtcIsacfix_kCodeLenShapeQ11[posQQ]; //Q11
+ BitsQQ += sumQQ;
+ }
+
+
+
+ *model = 0;
+ *sizeQ11=BitsQQ;
+
+ /* entropy coding of model number */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, model, WebRtcIsacfix_kModelCdfPtr, 1);
+ if (status < 0) {
+ return status;
+ }
+
+ /* entropy coding of quantization indices - shape only */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, index_sQQ, WebRtcIsacfix_kCdfShapePtr[0], KLT_ORDER_SHAPE);
+ if (status < 0) {
+ return status;
+ }
+
+ /* Save data for creation of multiple bitstreams */
+ if (encData != NULL) {
+ for (k=0; k<KLT_ORDER_SHAPE; k++)
+ {
+ encData->LPCindex_s[KLT_ORDER_SHAPE*encData->startIdx + k] = index_sQQ[k];
+ }
+ }
+ /* save the state of the bitstream object 'streamdata' for the possible bit-rate reduction */
+ transcodingParam->full = streamdata->full;
+ transcodingParam->stream_index = streamdata->stream_index;
+ transcodingParam->streamval = streamdata->streamval;
+ transcodingParam->W_upper = streamdata->W_upper;
+ transcodingParam->beforeLastWord = streamdata->stream[streamdata->stream_index-1];
+ transcodingParam->lastWord = streamdata->stream[streamdata->stream_index];
+
+ /* entropy coding of index */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, index_gQQ, WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
+ if (status < 0) {
+ return status;
+ }
+
+ /* find quantization levels for shape coefficients */
+ for (k=0; k<KLT_ORDER_SHAPE; k++) {
+ tmpcoeffs_sQ17[WebRtcIsacfix_kSelIndShape[k]] = WEBRTC_SPL_MUL(128, WebRtcIsacfix_kLevelsShapeQ10[WebRtcIsacfix_kOfLevelsShape[0]+index_ovr_sQQ[k]]);
+
+ }
+ /* inverse KLT */
+
+ /* left transform */ // Transpose matrix!
+ WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT1ShapeQ15[0], tmpcoeffs_sQ17,
+ tmpcoeffs2_sQ17, kTIndexFactor4, kTIndexFactor4, kTInitCase0,
+ kTIndexStep1, kTIndexStep1, kTLoopCount3, kTLoopCount3, kTMatrix1_shift1);
+
+ /* right transform */ // Transpose matrix
+ WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT2ShapeQ15[0], tmpcoeffs2_sQ17,
+ tmpcoeffs_sQ17, kTIndexFactor1, kTIndexFactor1, kTInitCase1, kTIndexStep3,
+ kTIndexStep2, kTLoopCount1, kTLoopCount3, kTMatrix1_shift1);
+
+ /* scaling, mean addition, and gain restoration */
+ poss = 0;pos=0;
+ for (k=0; k<SUBFRAMES; k++) {
+
+ /* lo band LAR coeffs */
+ for (n=0; n<ORDERLO; n++, pos++, poss++) {
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(31208, tmpcoeffs_sQ17[poss]); // (Q16*Q17)>>16 = Q17, with 1/2.1 = 0.47619047619 ~= 31208 in Q16
+ tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[0][poss]; // Q17+Q17 = Q17
+ LPCCoefQ17[pos] = tmp32;
+ }
+
+ /* hi band LAR coeffs */
+ for (n=0; n<ORDERHI; n++, pos++, poss++) {
+ // ((Q13*Q17)>>16)<<3 = Q17, with 1/0.45 = 2.222222222222 ~= 18204 in Q13
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(18204, tmpcoeffs_sQ17[poss]) << 3;
+ tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[0][poss]; // Q17+Q17 = Q17
+ LPCCoefQ17[pos] = tmp32;
+ }
+
+ }
+
+ //to update tmpcoeffs_gQ17 to the proper state
+ for (k=0; k<KLT_ORDER_GAIN; k++) {
+ tmpcoeffs_gQ17[WebRtcIsacfix_kSelIndGain[k]] = WebRtcIsacfix_kLevelsGainQ17[WebRtcIsacfix_kOfLevelsGain[0]+index_ovr_gQQ[k]];
+ }
+
+
+
+ /* find quantization levels for coefficients */
+
+ /* left transform */
+ offsg = 0;
+ posg = 0;
+ for (j=0; j<SUBFRAMES; j++) {
+ // (Q15 * Q17) >> (16 - 1) = Q17; Q17 << 4 = Q21.
+ sumQQ = (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][0],
+ tmpcoeffs_gQ17[offsg]) << 1);
+ sumQQ += (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][1],
+ tmpcoeffs_gQ17[offsg + 1]) << 1);
+ tmpcoeffs2_gQ21[posg] = sumQQ << 4;
+ posg++;
+
+ sumQQ = (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][2],
+ tmpcoeffs_gQ17[offsg]) << 1);
+ sumQQ += (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][3],
+ tmpcoeffs_gQ17[offsg + 1]) << 1);
+ tmpcoeffs2_gQ21[posg] = sumQQ << 4;
+ posg++;
+ offsg += 2;
+ }
+
+ /* right transform */ // Transpose matrix
+ WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+ tmpcoeffs_gQ17, kTIndexFactor1, kTIndexStep2);
+
+ /* scaling, mean addition, and gain restoration */
+ posg = 0;
+ gainpos = 0;
+ for (k=0; k<2*SUBFRAMES; k++) {
+
+ // Divide by 4 and get Q17 to Q8, i.e. shift 2+9.
+ sumQQ16 = (int16_t)(tmpcoeffs_gQ17[posg] >> 11);
+ sumQQ16 += WebRtcIsacfix_kMeansGainQ8[0][posg];
+ sumQQ = CalcExpN(sumQQ16); // Q8 in and Q17 out
+ gain_lo_hiQ17[gainpos] = sumQQ; //Q17
+
+ gainpos++;
+ pos++;posg++;
+ }
+
+ return 0;
+}
+
+int WebRtcIsacfix_EstCodeLpcGain(int32_t *gain_lo_hiQ17,
+ Bitstr_enc *streamdata,
+ IsacSaveEncoderData* encData) {
+ int j, k;
+ int16_t posQQ, pos2QQ, gainpos;
+ int16_t posg;
+ int16_t index_gQQ[KLT_ORDER_GAIN];
+
+ int16_t tmpcoeffs_gQ6[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+ int32_t sumQQ;
+ int status = 0;
+
+ /* write LAR coefficients to statistics file */
+ /* Save data for creation of multiple bitstreams (and transcoding) */
+ if (encData != NULL) {
+ for (k=0; k<KLT_ORDER_GAIN; k++) {
+ encData->LPCcoeffs_g[KLT_ORDER_GAIN*encData->startIdx + k] = gain_lo_hiQ17[k];
+ }
+ }
+
+ /* log gains, mean removal and scaling */
+ posg = 0; gainpos = 0;
+
+ for (k=0; k<SUBFRAMES; k++) {
+ /* log gains */
+
+ /* The input argument X to logN(X) is 2^17 times higher than the
+ input floating point argument Y to log(Y), since the X value
+ is a Q17 value. This can be compensated for after the call, by
+ subraction a value Z for each Q-step. One Q-step means that
+ X gets 2 times higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+ 177.445678 should be subtracted (since logN() returns a Q8 value).
+ For a X value in Q17, the value 177.445678*17 = 3017 should be
+ subtracted */
+ tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+ tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+ posg++; gainpos++;
+
+ tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+ tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+ posg++; gainpos++;
+ }
+
+
+ /* KLT */
+
+ /* left transform */
+ posg = 0;
+ for (j=0; j<SUBFRAMES; j++) {
+ // Q21 = Q6 * Q15
+ sumQQ = tmpcoeffs_gQ6[j * 2] * WebRtcIsacfix_kT1GainQ15[0][0] +
+ tmpcoeffs_gQ6[j * 2 + 1] * WebRtcIsacfix_kT1GainQ15[0][2];
+ tmpcoeffs2_gQ21[posg] = sumQQ;
+ posg++;
+
+ sumQQ = tmpcoeffs_gQ6[j * 2] * WebRtcIsacfix_kT1GainQ15[0][1] +
+ tmpcoeffs_gQ6[j * 2 + 1] * WebRtcIsacfix_kT1GainQ15[0][3];
+ tmpcoeffs2_gQ21[posg] = sumQQ;
+ posg++;
+ }
+
+ /* right transform */
+ WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+ tmpcoeffs_gQ17, kTIndexFactor3, kTIndexStep1);
+
+ /* quantize coefficients */
+
+ for (k=0; k<KLT_ORDER_GAIN; k++) //ATTN: ok?
+ {
+ posQQ = WebRtcIsacfix_kSelIndGain[k];
+ pos2QQ= (int16_t)CalcLrIntQ(tmpcoeffs_gQ17[posQQ], 17);
+
+ index_gQQ[k] = pos2QQ + WebRtcIsacfix_kQuantMinGain[k]; //ATTN: ok?
+ if (index_gQQ[k] < 0) {
+ index_gQQ[k] = 0;
+ }
+ else if (index_gQQ[k] > WebRtcIsacfix_kMaxIndGain[k]) {
+ index_gQQ[k] = WebRtcIsacfix_kMaxIndGain[k];
+ }
+
+ /* Save data for creation of multiple bitstreams */
+ if (encData != NULL) {
+ encData->LPCindex_g[KLT_ORDER_GAIN*encData->startIdx + k] = index_gQQ[k];
+ }
+ }
+
+ /* entropy coding of index */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, index_gQQ, WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
+ if (status < 0) {
+ return status;
+ }
+
+ return 0;
+}
+
+
+int WebRtcIsacfix_EncodeLpc(int32_t *gain_lo_hiQ17,
+ int16_t *LPCCoef_loQ15,
+ int16_t *LPCCoef_hiQ15,
+ int16_t *model,
+ int32_t *sizeQ11,
+ Bitstr_enc *streamdata,
+ IsacSaveEncoderData* encData,
+ transcode_obj *transcodeParam)
+{
+ int status = 0;
+ int32_t larsQ17[KLT_ORDER_SHAPE]; // KLT_ORDER_SHAPE == (ORDERLO+ORDERHI)*SUBFRAMES
+ // = (6+12)*6 == 108
+
+ Poly2LarFix(LPCCoef_loQ15, ORDERLO, LPCCoef_hiQ15, ORDERHI, SUBFRAMES, larsQ17);
+
+ status = EstCodeLpcCoef(larsQ17, gain_lo_hiQ17, model, sizeQ11,
+ streamdata, encData, transcodeParam);
+ if (status < 0) {
+ return (status);
+ }
+
+ Lar2polyFix(larsQ17, LPCCoef_loQ15, ORDERLO, LPCCoef_hiQ15, ORDERHI, SUBFRAMES);
+
+ return 0;
+}
+
+
+/* decode & dequantize RC */
+int WebRtcIsacfix_DecodeRcCoef(Bitstr_dec *streamdata, int16_t *RCQ15)
+{
+ int k, err;
+ int16_t index[AR_ORDER];
+
+ /* entropy decoding of quantization indices */
+ err = WebRtcIsacfix_DecHistOneStepMulti(index, streamdata, WebRtcIsacfix_kRcCdfPtr, WebRtcIsacfix_kRcInitInd, AR_ORDER);
+ if (err<0) // error check
+ return err;
+
+ /* find quantization levels for reflection coefficients */
+ for (k=0; k<AR_ORDER; k++)
+ {
+ RCQ15[k] = *(WebRtcIsacfix_kRcLevPtr[k] + index[k]);
+ }
+
+ return 0;
+}
+
+
+
+/* quantize & code RC */
+int WebRtcIsacfix_EncodeRcCoef(int16_t *RCQ15, Bitstr_enc *streamdata)
+{
+ int k;
+ int16_t index[AR_ORDER];
+ int status;
+
+ /* quantize reflection coefficients (add noise feedback?) */
+ for (k=0; k<AR_ORDER; k++)
+ {
+ index[k] = WebRtcIsacfix_kRcInitInd[k];
+
+ if (RCQ15[k] > WebRtcIsacfix_kRcBound[index[k]])
+ {
+ while (RCQ15[k] > WebRtcIsacfix_kRcBound[index[k] + 1])
+ index[k]++;
+ }
+ else
+ {
+ while (RCQ15[k] < WebRtcIsacfix_kRcBound[--index[k]]) ;
+ }
+
+ RCQ15[k] = *(WebRtcIsacfix_kRcLevPtr[k] + index[k]);
+ }
+
+
+ /* entropy coding of quantization indices */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, index, WebRtcIsacfix_kRcCdfPtr, AR_ORDER);
+
+ /* If error in WebRtcIsacfix_EncHistMulti(), status will be negative, otherwise 0 */
+ return status;
+}
+
+
+/* decode & dequantize squared Gain */
+int WebRtcIsacfix_DecodeGain2(Bitstr_dec *streamdata, int32_t *gainQ10)
+{
+ int err;
+ int16_t index;
+
+ /* entropy decoding of quantization index */
+ err = WebRtcIsacfix_DecHistOneStepMulti(
+ &index,
+ streamdata,
+ WebRtcIsacfix_kGainPtr,
+ WebRtcIsacfix_kGainInitInd,
+ 1);
+ /* error check */
+ if (err<0) {
+ return err;
+ }
+
+ /* find quantization level */
+ *gainQ10 = WebRtcIsacfix_kGain2Lev[index];
+
+ return 0;
+}
+
+
+
+/* quantize & code squared Gain */
+int WebRtcIsacfix_EncodeGain2(int32_t *gainQ10, Bitstr_enc *streamdata)
+{
+ int16_t index;
+ int status = 0;
+
+ /* find quantization index */
+ index = WebRtcIsacfix_kGainInitInd[0];
+ if (*gainQ10 > WebRtcIsacfix_kGain2Bound[index])
+ {
+ while (*gainQ10 > WebRtcIsacfix_kGain2Bound[index + 1])
+ index++;
+ }
+ else
+ {
+ while (*gainQ10 < WebRtcIsacfix_kGain2Bound[--index]) ;
+ }
+
+ /* dequantize */
+ *gainQ10 = WebRtcIsacfix_kGain2Lev[index];
+
+ /* entropy coding of quantization index */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, &index, WebRtcIsacfix_kGainPtr, 1);
+
+ /* If error in WebRtcIsacfix_EncHistMulti(), status will be negative, otherwise 0 */
+ return status;
+}
+
+
+/* code and decode Pitch Gains and Lags functions */
+
+/* decode & dequantize Pitch Gains */
+int WebRtcIsacfix_DecodePitchGain(Bitstr_dec *streamdata, int16_t *PitchGains_Q12)
+{
+ int err;
+ int16_t index_comb;
+ const uint16_t *pitch_gain_cdf_ptr[1];
+
+ /* entropy decoding of quantization indices */
+ *pitch_gain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
+ err = WebRtcIsacfix_DecHistBisectMulti(&index_comb, streamdata, pitch_gain_cdf_ptr, WebRtcIsacfix_kCdfTableSizeGain, 1);
+ /* error check, Q_mean_Gain.. tables are of size 144 */
+ if ((err < 0) || (index_comb < 0) || (index_comb >= 144))
+ return -ISAC_RANGE_ERROR_DECODE_PITCH_GAIN;
+
+ /* unquantize back to pitch gains by table look-up */
+ PitchGains_Q12[0] = WebRtcIsacfix_kPitchGain1[index_comb];
+ PitchGains_Q12[1] = WebRtcIsacfix_kPitchGain2[index_comb];
+ PitchGains_Q12[2] = WebRtcIsacfix_kPitchGain3[index_comb];
+ PitchGains_Q12[3] = WebRtcIsacfix_kPitchGain4[index_comb];
+
+ return 0;
+}
+
+
+/* quantize & code Pitch Gains */
+int WebRtcIsacfix_EncodePitchGain(int16_t* PitchGains_Q12,
+ Bitstr_enc* streamdata,
+ IsacSaveEncoderData* encData) {
+ int k,j;
+ int16_t SQ15[PITCH_SUBFRAMES];
+ int16_t index[3];
+ int16_t index_comb;
+ const uint16_t *pitch_gain_cdf_ptr[1];
+ int32_t CQ17;
+ int status = 0;
+
+
+ /* get the approximate arcsine (almost linear)*/
+ for (k=0; k<PITCH_SUBFRAMES; k++)
+ SQ15[k] = (int16_t)(PitchGains_Q12[k] * 33 >> 2); // Q15
+
+
+ /* find quantization index; only for the first three transform coefficients */
+ for (k=0; k<3; k++)
+ {
+ /* transform */
+ CQ17=0;
+ for (j=0; j<PITCH_SUBFRAMES; j++) {
+ CQ17 += WebRtcIsacfix_kTransform[k][j] * SQ15[j] >> 10; // Q17
+ }
+
+ index[k] = (int16_t)((CQ17 + 8192)>>14); // Rounding and scaling with stepsize (=1/0.125=8)
+
+ /* check that the index is not outside the boundaries of the table */
+ if (index[k] < WebRtcIsacfix_kLowerlimiGain[k]) index[k] = WebRtcIsacfix_kLowerlimiGain[k];
+ else if (index[k] > WebRtcIsacfix_kUpperlimitGain[k]) index[k] = WebRtcIsacfix_kUpperlimitGain[k];
+ index[k] -= WebRtcIsacfix_kLowerlimiGain[k];
+ }
+
+ /* calculate unique overall index */
+ index_comb = (int16_t)(WEBRTC_SPL_MUL(WebRtcIsacfix_kMultsGain[0], index[0]) +
+ WEBRTC_SPL_MUL(WebRtcIsacfix_kMultsGain[1], index[1]) + index[2]);
+
+ /* unquantize back to pitch gains by table look-up */
+ // (Y)
+ PitchGains_Q12[0] = WebRtcIsacfix_kPitchGain1[index_comb];
+ PitchGains_Q12[1] = WebRtcIsacfix_kPitchGain2[index_comb];
+ PitchGains_Q12[2] = WebRtcIsacfix_kPitchGain3[index_comb];
+ PitchGains_Q12[3] = WebRtcIsacfix_kPitchGain4[index_comb];
+
+
+ /* entropy coding of quantization pitch gains */
+ *pitch_gain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
+ status = WebRtcIsacfix_EncHistMulti(streamdata, &index_comb, pitch_gain_cdf_ptr, 1);
+ if (status < 0) {
+ return status;
+ }
+
+ /* Save data for creation of multiple bitstreams */
+ if (encData != NULL) {
+ encData->pitchGain_index[encData->startIdx] = index_comb;
+ }
+
+ return 0;
+}
+
+
+
+/* Pitch LAG */
+
+
+/* decode & dequantize Pitch Lags */
+int WebRtcIsacfix_DecodePitchLag(Bitstr_dec *streamdata,
+ int16_t *PitchGain_Q12,
+ int16_t *PitchLags_Q7)
+{
+ int k, err;
+ int16_t index[PITCH_SUBFRAMES];
+ const int16_t *mean_val2Q10, *mean_val4Q10;
+
+ const int16_t *lower_limit;
+ const uint16_t *init_index;
+ const uint16_t *cdf_size;
+ const uint16_t **cdf;
+
+ int32_t meangainQ12;
+ int32_t CQ11, CQ10,tmp32a,tmp32b;
+ int16_t shft;
+
+ meangainQ12=0;
+ for (k = 0; k < 4; k++)
+ meangainQ12 += PitchGain_Q12[k];
+
+ meangainQ12 >>= 2; // Get average.
+
+ /* voicing classificiation */
+ if (meangainQ12 <= 819) { // mean_gain < 0.2
+ shft = -1; // StepSize=2.0;
+ cdf = WebRtcIsacfix_kPitchLagPtrLo;
+ cdf_size = WebRtcIsacfix_kPitchLagSizeLo;
+ mean_val2Q10 = WebRtcIsacfix_kMeanLag2Lo;
+ mean_val4Q10 = WebRtcIsacfix_kMeanLag4Lo;
+ lower_limit = WebRtcIsacfix_kLowerLimitLo;
+ init_index = WebRtcIsacfix_kInitIndLo;
+ } else if (meangainQ12 <= 1638) { // mean_gain < 0.4
+ shft = 0; // StepSize=1.0;
+ cdf = WebRtcIsacfix_kPitchLagPtrMid;
+ cdf_size = WebRtcIsacfix_kPitchLagSizeMid;
+ mean_val2Q10 = WebRtcIsacfix_kMeanLag2Mid;
+ mean_val4Q10 = WebRtcIsacfix_kMeanLag4Mid;
+ lower_limit = WebRtcIsacfix_kLowerLimitMid;
+ init_index = WebRtcIsacfix_kInitIndMid;
+ } else {
+ shft = 1; // StepSize=0.5;
+ cdf = WebRtcIsacfix_kPitchLagPtrHi;
+ cdf_size = WebRtcIsacfix_kPitchLagSizeHi;
+ mean_val2Q10 = WebRtcIsacfix_kMeanLag2Hi;
+ mean_val4Q10 = WebRtcIsacfix_kMeanLag4Hi;
+ lower_limit = WebRtcIsacfix_kLowerLimitHi;
+ init_index = WebRtcIsacfix_kInitIndHi;
+ }
+
+ /* entropy decoding of quantization indices */
+ err = WebRtcIsacfix_DecHistBisectMulti(index, streamdata, cdf, cdf_size, 1);
+ if ((err<0) || (index[0]<0)) // error check
+ return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+
+ err = WebRtcIsacfix_DecHistOneStepMulti(index+1, streamdata, cdf+1, init_index, 3);
+ if (err<0) // error check
+ return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+
+
+ /* unquantize back to transform coefficients and do the inverse transform: S = T'*C */
+ CQ11 = ((int32_t)index[0] + lower_limit[0]); // Q0
+ CQ11 = WEBRTC_SPL_SHIFT_W32(CQ11,11-shft); // Scale with StepSize, Q11
+ for (k=0; k<PITCH_SUBFRAMES; k++) {
+ tmp32a = WEBRTC_SPL_MUL_16_32_RSFT11(WebRtcIsacfix_kTransform[0][k], CQ11);
+ PitchLags_Q7[k] = (int16_t)(tmp32a >> 5);
+ }
+
+ CQ10 = mean_val2Q10[index[1]];
+ for (k=0; k<PITCH_SUBFRAMES; k++) {
+ tmp32b = WebRtcIsacfix_kTransform[1][k] * (int16_t)CQ10 >> 10;
+ PitchLags_Q7[k] += (int16_t)(tmp32b >> 5);
+ }
+
+ CQ10 = mean_val4Q10[index[3]];
+ for (k=0; k<PITCH_SUBFRAMES; k++) {
+ tmp32b = WebRtcIsacfix_kTransform[3][k] * (int16_t)CQ10 >> 10;
+ PitchLags_Q7[k] += (int16_t)(tmp32b >> 5);
+ }
+
+ return 0;
+}
+
+
+
+/* quantize & code Pitch Lags */
+int WebRtcIsacfix_EncodePitchLag(int16_t* PitchLagsQ7,
+ int16_t* PitchGain_Q12,
+ Bitstr_enc* streamdata,
+ IsacSaveEncoderData* encData) {
+ int k, j;
+ int16_t index[PITCH_SUBFRAMES];
+ int32_t meangainQ12, CQ17;
+ int32_t CQ11, CQ10,tmp32a;
+
+ const int16_t *mean_val2Q10,*mean_val4Q10;
+ const int16_t *lower_limit, *upper_limit;
+ const uint16_t **cdf;
+ int16_t shft, tmp16b;
+ int32_t tmp32b;
+ int status = 0;
+
+ /* compute mean pitch gain */
+ meangainQ12=0;
+ for (k = 0; k < 4; k++)
+ meangainQ12 += PitchGain_Q12[k];
+
+ meangainQ12 >>= 2;
+
+ /* Save data for creation of multiple bitstreams */
+ if (encData != NULL) {
+ encData->meanGain[encData->startIdx] = meangainQ12;
+ }
+
+ /* voicing classificiation */
+ if (meangainQ12 <= 819) { // mean_gain < 0.2
+ shft = -1; // StepSize=2.0;
+ cdf = WebRtcIsacfix_kPitchLagPtrLo;
+ mean_val2Q10 = WebRtcIsacfix_kMeanLag2Lo;
+ mean_val4Q10 = WebRtcIsacfix_kMeanLag4Lo;
+ lower_limit = WebRtcIsacfix_kLowerLimitLo;
+ upper_limit = WebRtcIsacfix_kUpperLimitLo;
+ } else if (meangainQ12 <= 1638) { // mean_gain < 0.4
+ shft = 0; // StepSize=1.0;
+ cdf = WebRtcIsacfix_kPitchLagPtrMid;
+ mean_val2Q10 = WebRtcIsacfix_kMeanLag2Mid;
+ mean_val4Q10 = WebRtcIsacfix_kMeanLag4Mid;
+ lower_limit = WebRtcIsacfix_kLowerLimitMid;
+ upper_limit = WebRtcIsacfix_kUpperLimitMid;
+ } else {
+ shft = 1; // StepSize=0.5;
+ cdf = WebRtcIsacfix_kPitchLagPtrHi;
+ mean_val2Q10 = WebRtcIsacfix_kMeanLag2Hi;
+ mean_val4Q10 = WebRtcIsacfix_kMeanLag4Hi;
+ lower_limit = WebRtcIsacfix_kLowerLimitHi;
+ upper_limit = WebRtcIsacfix_kUpperLimitHi;
+ }
+
+ /* find quantization index */
+ for (k=0; k<4; k++)
+ {
+ /* transform */
+ CQ17=0;
+ for (j=0; j<PITCH_SUBFRAMES; j++)
+ CQ17 += WebRtcIsacfix_kTransform[k][j] * PitchLagsQ7[j] >> 2; // Q17
+
+ CQ17 = WEBRTC_SPL_SHIFT_W32(CQ17,shft); // Scale with StepSize
+
+ /* quantize */
+ tmp16b = (int16_t)((CQ17 + 65536) >> 17);
+ index[k] = tmp16b;
+
+ /* check that the index is not outside the boundaries of the table */
+ if (index[k] < lower_limit[k]) index[k] = lower_limit[k];
+ else if (index[k] > upper_limit[k]) index[k] = upper_limit[k];
+ index[k] -= lower_limit[k];
+
+ /* Save data for creation of multiple bitstreams */
+ if(encData != NULL) {
+ encData->pitchIndex[PITCH_SUBFRAMES*encData->startIdx + k] = index[k];
+ }
+ }
+
+ /* unquantize back to transform coefficients and do the inverse transform: S = T'*C */
+ CQ11 = (index[0] + lower_limit[0]); // Q0
+ CQ11 = WEBRTC_SPL_SHIFT_W32(CQ11,11-shft); // Scale with StepSize, Q11
+
+ for (k=0; k<PITCH_SUBFRAMES; k++) {
+ tmp32a = WEBRTC_SPL_MUL_16_32_RSFT11(WebRtcIsacfix_kTransform[0][k], CQ11); // Q12
+ PitchLagsQ7[k] = (int16_t)(tmp32a >> 5); // Q7.
+ }
+
+ CQ10 = mean_val2Q10[index[1]];
+ for (k=0; k<PITCH_SUBFRAMES; k++) {
+ tmp32b = WebRtcIsacfix_kTransform[1][k] * (int16_t)CQ10 >> 10;
+ PitchLagsQ7[k] += (int16_t)(tmp32b >> 5); // Q7.
+ }
+
+ CQ10 = mean_val4Q10[index[3]];
+ for (k=0; k<PITCH_SUBFRAMES; k++) {
+ tmp32b = WebRtcIsacfix_kTransform[3][k] * (int16_t)CQ10 >> 10;
+ PitchLagsQ7[k] += (int16_t)(tmp32b >> 5); // Q7.
+ }
+
+ /* entropy coding of quantization pitch lags */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, index, cdf, PITCH_SUBFRAMES);
+
+ /* If error in WebRtcIsacfix_EncHistMulti(), status will be negative, otherwise 0 */
+ return status;
+}
+
+
+
+/* Routines for inband signaling of bandwitdh estimation */
+/* Histograms based on uniform distribution of indices */
+/* Move global variables later! */
+
+
+/* cdf array for frame length indicator */
+const uint16_t kFrameLenCdf[4] = {
+ 0, 21845, 43690, 65535};
+
+/* pointer to cdf array for frame length indicator */
+const uint16_t * const kFrameLenCdfPtr[1] = {kFrameLenCdf};
+
+/* initial cdf index for decoder of frame length indicator */
+const uint16_t kFrameLenInitIndex[1] = {1};
+
+
+int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
+ size_t *framesamples)
+{
+
+ int err;
+ int16_t frame_mode;
+
+ err = 0;
+ /* entropy decoding of frame length [1:30ms,2:60ms] */
+ err = WebRtcIsacfix_DecHistOneStepMulti(&frame_mode, streamdata, kFrameLenCdfPtr, kFrameLenInitIndex, 1);
+ if (err<0) // error check
+ return -ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH;
+
+ switch(frame_mode) {
+ case 1:
+ *framesamples = 480; /* 30ms */
+ break;
+ case 2:
+ *framesamples = 960; /* 60ms */
+ break;
+ default:
+ err = -ISAC_DISALLOWED_FRAME_MODE_DECODER;
+ }
+
+ return err;
+}
+
+
+int WebRtcIsacfix_EncodeFrameLen(int16_t framesamples, Bitstr_enc *streamdata) {
+
+ int status;
+ int16_t frame_mode;
+
+ status = 0;
+ frame_mode = 0;
+ /* entropy coding of frame length [1:480 samples,2:960 samples] */
+ switch(framesamples) {
+ case 480:
+ frame_mode = 1;
+ break;
+ case 960:
+ frame_mode = 2;
+ break;
+ default:
+ status = - ISAC_DISALLOWED_FRAME_MODE_ENCODER;
+ }
+
+ if (status < 0)
+ return status;
+
+ status = WebRtcIsacfix_EncHistMulti(streamdata, &frame_mode, kFrameLenCdfPtr, 1);
+
+ return status;
+}
+
+/* cdf array for estimated bandwidth */
+const uint16_t kBwCdf[25] = {
+ 0, 2731, 5461, 8192, 10923, 13653, 16384, 19114, 21845, 24576, 27306, 30037,
+ 32768, 35498, 38229, 40959, 43690, 46421, 49151, 51882, 54613, 57343, 60074,
+ 62804, 65535};
+
+/* pointer to cdf array for estimated bandwidth */
+const uint16_t * const kBwCdfPtr[1] = {kBwCdf};
+
+/* initial cdf index for decoder of estimated bandwidth*/
+const uint16_t kBwInitIndex[1] = {7};
+
+
+int WebRtcIsacfix_DecodeSendBandwidth(Bitstr_dec *streamdata, int16_t *BWno) {
+
+ int err;
+ int16_t BWno32;
+
+ /* entropy decoding of sender's BW estimation [0..23] */
+ err = WebRtcIsacfix_DecHistOneStepMulti(&BWno32, streamdata, kBwCdfPtr, kBwInitIndex, 1);
+ if (err<0) // error check
+ return -ISAC_RANGE_ERROR_DECODE_BANDWIDTH;
+ *BWno = (int16_t)BWno32;
+ return err;
+
+}
+
+
+int WebRtcIsacfix_EncodeReceiveBandwidth(int16_t *BWno, Bitstr_enc *streamdata)
+{
+ int status = 0;
+ /* entropy encoding of receiver's BW estimation [0..23] */
+ status = WebRtcIsacfix_EncHistMulti(streamdata, BWno, kBwCdfPtr, 1);
+
+ return status;
+}
+
+/* estimate codel length of LPC Coef */
+void WebRtcIsacfix_TranscodeLpcCoef(int32_t *gain_lo_hiQ17,
+ int16_t *index_gQQ) {
+ int j, k;
+ int16_t posQQ, pos2QQ;
+ int16_t posg, offsg, gainpos;
+ int32_t tmpcoeffs_gQ6[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+ int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+ int32_t sumQQ;
+
+
+ /* log gains, mean removal and scaling */
+ posg = 0; gainpos=0;
+
+ for (k=0; k<SUBFRAMES; k++) {
+ /* log gains */
+
+ /* The input argument X to logN(X) is 2^17 times higher than the
+ input floating point argument Y to log(Y), since the X value
+ is a Q17 value. This can be compensated for after the call, by
+ subraction a value Z for each Q-step. One Q-step means that
+ X gets 2 times higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+ 177.445678 should be subtracted (since logN() returns a Q8 value).
+ For a X value in Q17, the value 177.445678*17 = 3017 should be
+ subtracted */
+ tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+ tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+ posg++; gainpos++;
+
+ tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+ tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+ posg++; gainpos++;
+
+ }
+
+
+ /* KLT */
+
+ /* left transform */
+ for (j = 0, offsg = 0; j < SUBFRAMES; j++, offsg += 2) {
+ // Q21 = Q6 * Q15
+ sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][0] +
+ tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][2];
+ tmpcoeffs2_gQ21[offsg] = sumQQ;
+
+ // Q21 = Q6 * Q15
+ sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][1] +
+ tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][3];
+ tmpcoeffs2_gQ21[offsg + 1] = sumQQ;
+ }
+
+ /* right transform */
+ WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+ tmpcoeffs_gQ17, kTIndexFactor3, kTIndexStep1);
+
+ /* quantize coefficients */
+ for (k=0; k<KLT_ORDER_GAIN; k++) //ATTN: ok?
+ {
+ posQQ = WebRtcIsacfix_kSelIndGain[k];
+ pos2QQ= (int16_t)CalcLrIntQ(tmpcoeffs_gQ17[posQQ], 17);
+
+ index_gQQ[k] = pos2QQ + WebRtcIsacfix_kQuantMinGain[k]; //ATTN: ok?
+ if (index_gQQ[k] < 0) {
+ index_gQQ[k] = 0;
+ }
+ else if (index_gQQ[k] > WebRtcIsacfix_kMaxIndGain[k]) {
+ index_gQQ[k] = WebRtcIsacfix_kMaxIndGain[k];
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
new file mode 100644
index 0000000000..ae11394f7c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.h
+ *
+ * This header file contains all of the functions used to arithmetically
+ * encode the iSAC bistream
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+/* decode complex spectrum (return number of bytes in stream) */
+int WebRtcIsacfix_DecodeSpec(Bitstr_dec* streamdata,
+ int16_t* frQ7,
+ int16_t* fiQ7,
+ int16_t AvgPitchGain_Q12);
+
+/* encode complex spectrum */
+int WebRtcIsacfix_EncodeSpec(const int16_t* fr,
+ const int16_t* fi,
+ Bitstr_enc* streamdata,
+ int16_t AvgPitchGain_Q12);
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsacfix_DecodeLpcCoef(Bitstr_dec* streamdata,
+ int32_t* LPCCoefQ17,
+ int32_t* gain_lo_hiQ17,
+ int16_t* outmodel);
+
+int WebRtcIsacfix_DecodeLpc(int32_t* gain_lo_hiQ17,
+ int16_t* LPCCoef_loQ15,
+ int16_t* LPCCoef_hiQ15,
+ Bitstr_dec* streamdata,
+ int16_t* outmodel);
+
+/* quantize & code LPC Coef */
+int WebRtcIsacfix_EncodeLpc(int32_t* gain_lo_hiQ17,
+ int16_t* LPCCoef_loQ15,
+ int16_t* LPCCoef_hiQ15,
+ int16_t* model,
+ int32_t* sizeQ11,
+ Bitstr_enc* streamdata,
+ IsacSaveEncoderData* encData,
+ transcode_obj* transcodeParam);
+
+int WebRtcIsacfix_EstCodeLpcGain(int32_t* gain_lo_hiQ17,
+ Bitstr_enc* streamdata,
+ IsacSaveEncoderData* encData);
+/* decode & dequantize RC */
+int WebRtcIsacfix_DecodeRcCoef(Bitstr_dec* streamdata, int16_t* RCQ15);
+
+/* quantize & code RC */
+int WebRtcIsacfix_EncodeRcCoef(int16_t* RCQ15, Bitstr_enc* streamdata);
+
+/* decode & dequantize squared Gain */
+int WebRtcIsacfix_DecodeGain2(Bitstr_dec* streamdata, int32_t* Gain2);
+
+/* quantize & code squared Gain (input is squared gain) */
+int WebRtcIsacfix_EncodeGain2(int32_t* gain2, Bitstr_enc* streamdata);
+
+int WebRtcIsacfix_EncodePitchGain(int16_t* PitchGains_Q12,
+ Bitstr_enc* streamdata,
+ IsacSaveEncoderData* encData);
+
+int WebRtcIsacfix_EncodePitchLag(int16_t* PitchLagQ7,
+ int16_t* PitchGain_Q12,
+ Bitstr_enc* streamdata,
+ IsacSaveEncoderData* encData);
+
+int WebRtcIsacfix_DecodePitchGain(Bitstr_dec* streamdata,
+ int16_t* PitchGain_Q12);
+
+int WebRtcIsacfix_DecodePitchLag(Bitstr_dec* streamdata,
+ int16_t* PitchGain_Q12,
+ int16_t* PitchLagQ7);
+
+int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec* streamdata, size_t* framelength);
+
+int WebRtcIsacfix_EncodeFrameLen(int16_t framelength, Bitstr_enc* streamdata);
+
+int WebRtcIsacfix_DecodeSendBandwidth(Bitstr_dec* streamdata, int16_t* BWno);
+
+int WebRtcIsacfix_EncodeReceiveBandwidth(int16_t* BWno, Bitstr_enc* streamdata);
+
+void WebRtcIsacfix_TranscodeLpcCoef(int32_t* tmpcoeffs_gQ6, int16_t* index_gQQ);
+
+// Pointer functions for LPC transforms.
+
+typedef void (*MatrixProduct1)(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix1_index_factor1,
+ int matrix0_index_factor1,
+ int matrix1_index_init_case,
+ int matrix1_index_step,
+ int matrix0_index_step,
+ int inner_loop_count,
+ int mid_loop_count,
+ int shift);
+typedef void (*MatrixProduct2)(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix0_index_factor,
+ int matrix0_index_step);
+
+extern MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
+extern MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
+
+void WebRtcIsacfix_MatrixProduct1C(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix1_index_factor1,
+ int matrix0_index_factor1,
+ int matrix1_index_init_case,
+ int matrix1_index_step,
+ int matrix0_index_step,
+ int inner_loop_count,
+ int mid_loop_count,
+ int shift);
+void WebRtcIsacfix_MatrixProduct2C(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix0_index_factor,
+ int matrix0_index_step);
+
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix1_index_factor1,
+ int matrix0_index_factor1,
+ int matrix1_index_init_case,
+ int matrix1_index_step,
+ int matrix0_index_step,
+ int inner_loop_count,
+ int mid_loop_count,
+ int shift);
+void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix0_index_factor,
+ int matrix0_index_step);
+#endif
+
+#if defined(MIPS32_LE)
+void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix1_index_factor1,
+ int matrix0_index_factor1,
+ int matrix1_index_init_case,
+ int matrix1_index_step,
+ int matrix0_index_step,
+ int inner_loop_count,
+ int mid_loop_count,
+ int shift);
+
+void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ int matrix0_index_factor,
+ int matrix0_index_step);
+#endif
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c
new file mode 100644
index 0000000000..a66a43ef99
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// MIPS optimization of the function WebRtcIsacfix_MatrixProduct1.
+// Bit-exact with the function WebRtcIsacfix_MatrixProduct1C from
+// entropy_coding.c file.
+void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ const int matrix1_index_factor1,
+ const int matrix0_index_factor1,
+ const int matrix1_index_init_case,
+ const int matrix1_index_step,
+ const int matrix0_index_step,
+ const int inner_loop_count,
+ const int mid_loop_count,
+ const int shift) {
+ if (matrix1_index_init_case != 0) {
+ int j = SUBFRAMES, k = 0, n = 0;
+ int32_t r0, r1, r2, sum32;
+ int32_t* product_start = matrix_product;
+ int32_t* product_ptr;
+ const uint32_t product_step = 4 * mid_loop_count;
+ const uint32_t matrix0_step = 2 * matrix0_index_step;
+ const uint32_t matrix1_step = 4 * matrix1_index_step;
+ const uint32_t matrix0_step2 = 2 * matrix0_index_factor1;
+ const uint32_t matrix1_step2 = 4 * matrix1_index_factor1;
+ const int16_t* matrix0_start = matrix0;
+ const int32_t* matrix1_start = matrix1;
+ int16_t* matrix0_ptr;
+ int32_t* matrix1_ptr;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "addu %[product_ptr], %[product_start], $0 \n\t"
+ "addu %[k], %[product_step], $0 \n\t"
+ "addiu %[j], %[j], -1 \n\t"
+ "addu %[matrix1_start], %[matrix1], $0 \n\t"
+ "2: \n\t"
+ "addu %[matrix1_ptr], %[matrix1_start], $0 \n\t"
+ "addu %[matrix0_ptr], %[matrix0_start], $0 \n\t"
+ "addu %[n], %[inner_loop_count], $0 \n\t"
+ "mul %[sum32], $0, $0 \n\t"
+ "3: \n\t"
+ "lw %[r0], 0(%[matrix1_ptr]) \n\t"
+ "lh %[r1], 0(%[matrix0_ptr]) \n\t"
+ "addu %[matrix1_ptr], %[matrix1_ptr], %[matrix1_step] \n\t"
+ "sllv %[r0], %[r0], %[shift] \n\t"
+ "andi %[r2], %[r0], 0xffff \n\t"
+ "sra %[r2], %[r2], 1 \n\t"
+ "mul %[r2], %[r2], %[r1] \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "addu %[matrix0_ptr], %[matrix0_ptr], %[matrix0_step] \n\t"
+ "addiu %[n], %[n], -1 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r2], %[r2], 15 \n\t"
+#else
+ "addiu %[r2], %[r2], 0x4000 \n\t"
+ "sra %[r2], %[r2], 15 \n\t"
+#endif
+ "addu %[sum32], %[sum32], %[r2] \n\t"
+ "bgtz %[n], 3b \n\t"
+ " addu %[sum32], %[sum32], %[r0] \n\t"
+ "addiu %[k], %[k], -4 \n\t"
+ "addu %[matrix1_start], %[matrix1_start], %[matrix1_step2] \n\t"
+ "sw %[sum32], 0(%[product_ptr]) \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[product_ptr], %[product_ptr], 4 \n\t"
+ "addu %[matrix0_start], %[matrix0_start], %[matrix0_step2] \n\t"
+ "bgtz %[j], 1b \n\t"
+ " addu %[product_start], %[product_start], %[product_step] \n\t"
+ ".set pop \n\t"
+ : [product_ptr] "=&r" (product_ptr), [product_start] "+r" (product_start),
+ [k] "=&r" (k), [j] "+r" (j), [matrix1_start] "=&r"(matrix1_start),
+ [matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
+ [matrix0_start] "+r" (matrix0_start), [n] "=&r" (n), [r0] "=&r" (r0),
+ [sum32] "=&r" (sum32), [r1] "=&r" (r1),[r2] "=&r" (r2)
+ : [product_step] "r" (product_step), [matrix1] "r" (matrix1),
+ [inner_loop_count] "r" (inner_loop_count),
+ [matrix1_step] "r" (matrix1_step), [shift] "r" (shift),
+ [matrix0_step] "r" (matrix0_step), [matrix1_step2] "r" (matrix1_step2),
+ [matrix0_step2] "r" (matrix0_step2)
+ : "hi", "lo", "memory"
+ );
+ } else {
+ int j = SUBFRAMES, k = 0, n = 0;
+ int32_t r0, r1, r2, sum32;
+ int32_t* product_start = matrix_product;
+ int32_t* product_ptr;
+ const uint32_t product_step = 4 * mid_loop_count;
+ const uint32_t matrix0_step = 2 * matrix0_index_step;
+ const uint32_t matrix1_step = 4 * matrix1_index_step;
+ const uint32_t matrix0_step2 = 2 * matrix0_index_factor1;
+ const uint32_t matrix1_step2 = 4 * matrix1_index_factor1;
+ const int16_t* matrix0_start = matrix0;
+ const int32_t* matrix1_start = matrix1;
+ int16_t* matrix0_ptr;
+ int32_t* matrix1_ptr;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "addu %[product_ptr], %[product_start], $0 \n\t"
+ "addu %[k], %[product_step], $0 \n\t"
+ "addiu %[j], %[j], -1 \n\t"
+ "addu %[matrix0_start], %[matrix0], $0 \n\t"
+ "2: \n\t"
+ "addu %[matrix1_ptr], %[matrix1_start], $0 \n\t"
+ "addu %[matrix0_ptr], %[matrix0_start], $0 \n\t"
+ "addu %[n], %[inner_loop_count], $0 \n\t"
+ "mul %[sum32], $0, $0 \n\t"
+ "3: \n\t"
+ "lw %[r0], 0(%[matrix1_ptr]) \n\t"
+ "lh %[r1], 0(%[matrix0_ptr]) \n\t"
+ "addu %[matrix1_ptr], %[matrix1_ptr], %[matrix1_step] \n\t"
+ "sllv %[r0], %[r0], %[shift] \n\t"
+ "andi %[r2], %[r0], 0xffff \n\t"
+ "sra %[r2], %[r2], 1 \n\t"
+ "mul %[r2], %[r2], %[r1] \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "addu %[matrix0_ptr], %[matrix0_ptr], %[matrix0_step] \n\t"
+ "addiu %[n], %[n], -1 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r2], %[r2], 15 \n\t"
+#else
+ "addiu %[r2], %[r2], 0x4000 \n\t"
+ "sra %[r2], %[r2], 15 \n\t"
+#endif
+ "addu %[sum32], %[sum32], %[r2] \n\t"
+ "bgtz %[n], 3b \n\t"
+ " addu %[sum32], %[sum32], %[r0] \n\t"
+ "addiu %[k], %[k], -4 \n\t"
+ "addu %[matrix0_start], %[matrix0_start], %[matrix0_step2] \n\t"
+ "sw %[sum32], 0(%[product_ptr]) \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[product_ptr], %[product_ptr], 4 \n\t"
+ "addu %[matrix1_start], %[matrix1_start], %[matrix1_step2] \n\t"
+ "bgtz %[j], 1b \n\t"
+ " addu %[product_start], %[product_start], %[product_step] \n\t"
+ ".set pop \n\t"
+ : [product_ptr] "=&r" (product_ptr), [product_start] "+r" (product_start),
+ [k] "=&r" (k), [j] "+r" (j), [matrix1_start] "+r"(matrix1_start),
+ [matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
+ [matrix0_start] "=&r" (matrix0_start), [n] "=&r" (n), [r0] "=&r" (r0),
+ [sum32] "=&r" (sum32), [r1] "=&r" (r1),[r2] "=&r" (r2)
+ : [product_step] "r" (product_step), [matrix0] "r" (matrix0),
+ [inner_loop_count] "r" (inner_loop_count),
+ [matrix1_step] "r" (matrix1_step), [shift] "r" (shift),
+ [matrix0_step] "r" (matrix0_step), [matrix1_step2] "r" (matrix1_step2),
+ [matrix0_step2] "r" (matrix0_step2)
+ : "hi", "lo", "memory"
+ );
+ }
+}
+
+// MIPS optimization of the function WebRtcIsacfix_MatrixProduct2.
+// Bit-exact with the function WebRtcIsacfix_MatrixProduct2C from
+// entropy_coding.c file.
+void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ const int matrix0_index_factor,
+ const int matrix0_index_step) {
+ int j = 0, n = 0;
+ int loop_count = SUBFRAMES;
+ const int16_t* matrix0_ptr;
+ const int32_t* matrix1_ptr;
+ const int16_t* matrix0_start = matrix0;
+ const int matrix0_step = 2 * matrix0_index_step;
+ const int matrix0_step2 = 2 * matrix0_index_factor;
+ int32_t r0, r1, r2, r3, r4, sum32, sum32_2;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addu %[j], %[loop_count], $0 \n\t"
+ "addu %[matrix0_start], %[matrix0], $0 \n\t"
+ "1: \n\t"
+ "addu %[matrix1_ptr], %[matrix1], $0 \n\t"
+ "addu %[matrix0_ptr], %[matrix0_start], $0 \n\t"
+ "addu %[n], %[loop_count], $0 \n\t"
+ "mul %[sum32], $0, $0 \n\t"
+ "mul %[sum32_2], $0, $0 \n\t"
+ "2: \n\t"
+ "lw %[r0], 0(%[matrix1_ptr]) \n\t"
+ "lw %[r1], 4(%[matrix1_ptr]) \n\t"
+ "lh %[r2], 0(%[matrix0_ptr]) \n\t"
+ "andi %[r3], %[r0], 0xffff \n\t"
+ "sra %[r3], %[r3], 1 \n\t"
+ "mul %[r3], %[r3], %[r2] \n\t"
+ "andi %[r4], %[r1], 0xffff \n\t"
+ "sra %[r4], %[r4], 1 \n\t"
+ "mul %[r4], %[r4], %[r2] \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "mul %[r1], %[r1], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 15 \n\t"
+ "shra_r.w %[r4], %[r4], 15 \n\t"
+#else
+ "addiu %[r3], %[r3], 0x4000 \n\t"
+ "sra %[r3], %[r3], 15 \n\t"
+ "addiu %[r4], %[r4], 0x4000 \n\t"
+ "sra %[r4], %[r4], 15 \n\t"
+#endif
+ "addiu %[matrix1_ptr], %[matrix1_ptr], 8 \n\t"
+ "addu %[matrix0_ptr], %[matrix0_ptr], %[matrix0_step] \n\t"
+ "addiu %[n], %[n], -1 \n\t"
+ "addu %[sum32], %[sum32], %[r3] \n\t"
+ "addu %[sum32_2], %[sum32_2], %[r4] \n\t"
+ "addu %[sum32], %[sum32], %[r0] \n\t"
+ "bgtz %[n], 2b \n\t"
+ " addu %[sum32_2], %[sum32_2], %[r1] \n\t"
+ "sra %[sum32], %[sum32], 3 \n\t"
+ "sra %[sum32_2], %[sum32_2], 3 \n\t"
+ "addiu %[j], %[j], -1 \n\t"
+ "addu %[matrix0_start], %[matrix0_start], %[matrix0_step2] \n\t"
+ "sw %[sum32], 0(%[matrix_product]) \n\t"
+ "sw %[sum32_2], 4(%[matrix_product]) \n\t"
+ "bgtz %[j], 1b \n\t"
+ " addiu %[matrix_product], %[matrix_product], 8 \n\t"
+ ".set pop \n\t"
+ : [j] "=&r" (j), [matrix0_start] "=&r" (matrix0_start),
+ [matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
+ [n] "=&r" (n), [sum32] "=&r" (sum32), [sum32_2] "=&r" (sum32_2),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [matrix_product] "+r" (matrix_product)
+ : [loop_count] "r" (loop_count), [matrix0] "r" (matrix0),
+ [matrix1] "r" (matrix1), [matrix0_step] "r" (matrix0_step),
+ [matrix0_step2] "r" (matrix0_step2)
+ : "hi", "lo", "memory"
+ );
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c
new file mode 100644
index 0000000000..0200567880
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* This file contains WebRtcIsacfix_MatrixProduct1Neon() and
+ * WebRtcIsacfix_MatrixProduct2Neon() for ARM Neon platform. API's are in
+ * entropy_coding.c. Results are bit exact with the c code for
+ * generic platforms.
+ */
+
+#include <arm_neon.h>
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+
+void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ const int matrix1_index_factor1,
+ const int matrix0_index_factor1,
+ const int matrix1_index_init_case,
+ const int matrix1_index_step,
+ const int matrix0_index_step,
+ const int inner_loop_count,
+ const int mid_loop_count,
+ const int shift) {
+ int j = 0, k = 0, n = 0;
+ int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
+ int* matrix1_index_factor2 = &j;
+ int* matrix0_index_factor2 = &k;
+ if (matrix1_index_init_case != 0) {
+ matrix1_index_factor2 = &k;
+ matrix0_index_factor2 = &j;
+ }
+ int32x4_t shift32x4 = vdupq_n_s32(shift);
+ int32x2_t shift32x2 = vdup_n_s32(shift);
+ int32x4_t sum_32x4 = vdupq_n_s32(0);
+ int32x2_t sum_32x2 = vdup_n_s32(0);
+
+ RTC_DCHECK_EQ(0, inner_loop_count % 2);
+ RTC_DCHECK_EQ(0, mid_loop_count % 2);
+
+ if (matrix1_index_init_case != 0 && matrix1_index_factor1 == 1) {
+ for (j = 0; j < SUBFRAMES; j++) {
+ matrix_prod_index = mid_loop_count * j;
+ for (k = 0; k < (mid_loop_count >> 2) << 2; k += 4) {
+ sum_32x4 = veorq_s32(sum_32x4, sum_32x4); // Initialize to zeros.
+ matrix1_index = k;
+ matrix0_index = matrix0_index_factor1 * j;
+ for (n = 0; n < inner_loop_count; n++) {
+ int32x4_t matrix0_32x4 =
+ vdupq_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
+ int32x4_t matrix1_32x4 =
+ vshlq_s32(vld1q_s32(&matrix1[matrix1_index]), shift32x4);
+ int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
+ sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
+ matrix1_index += matrix1_index_step;
+ matrix0_index += matrix0_index_step;
+ }
+ vst1q_s32(&matrix_product[matrix_prod_index], sum_32x4);
+ matrix_prod_index += 4;
+ }
+ if (mid_loop_count % 4 > 1) {
+ sum_32x2 = veor_s32(sum_32x2, sum_32x2); // Initialize to zeros.
+ matrix1_index = k;
+ k += 2;
+ matrix0_index = matrix0_index_factor1 * j;
+ for (n = 0; n < inner_loop_count; n++) {
+ int32x2_t matrix0_32x2 =
+ vdup_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
+ int32x2_t matrix1_32x2 =
+ vshl_s32(vld1_s32(&matrix1[matrix1_index]), shift32x2);
+ int32x2_t multi_32x2 = vqdmulh_s32(matrix0_32x2, matrix1_32x2);
+ sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+ matrix1_index += matrix1_index_step;
+ matrix0_index += matrix0_index_step;
+ }
+ vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
+ matrix_prod_index += 2;
+ }
+ }
+ }
+ else if (matrix1_index_init_case == 0 && matrix0_index_factor1 == 1) {
+ int32x2_t multi_32x2 = vdup_n_s32(0);
+ int32x2_t matrix0_32x2 = vdup_n_s32(0);
+ for (j = 0; j < SUBFRAMES; j++) {
+ matrix_prod_index = mid_loop_count * j;
+ for (k = 0; k < (mid_loop_count >> 2) << 2; k += 4) {
+ sum_32x4 = veorq_s32(sum_32x4, sum_32x4); // Initialize to zeros.
+ matrix1_index = matrix1_index_factor1 * j;
+ matrix0_index = k;
+ for (n = 0; n < inner_loop_count; n++) {
+ int32x4_t matrix1_32x4 = vdupq_n_s32(matrix1[matrix1_index] << shift);
+ int32x4_t matrix0_32x4 =
+ vshll_n_s16(vld1_s16(&matrix0[matrix0_index]), 15);
+ int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
+ sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
+ matrix1_index += matrix1_index_step;
+ matrix0_index += matrix0_index_step;
+ }
+ vst1q_s32(&matrix_product[matrix_prod_index], sum_32x4);
+ matrix_prod_index += 4;
+ }
+ if (mid_loop_count % 4 > 1) {
+ sum_32x2 = veor_s32(sum_32x2, sum_32x2); // Initialize to zeros.
+ matrix1_index = matrix1_index_factor1 * j;
+ matrix0_index = k;
+ for (n = 0; n < inner_loop_count; n++) {
+ int32x2_t matrix1_32x2 = vdup_n_s32(matrix1[matrix1_index] << shift);
+ matrix0_32x2 =
+ vset_lane_s32((int32_t)matrix0[matrix0_index], matrix0_32x2, 0);
+ matrix0_32x2 = vset_lane_s32((int32_t)matrix0[matrix0_index + 1],
+ matrix0_32x2, 1);
+ matrix0_32x2 = vshl_n_s32(matrix0_32x2, 15);
+ multi_32x2 = vqdmulh_s32(matrix1_32x2, matrix0_32x2);
+ sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+ matrix1_index += matrix1_index_step;
+ matrix0_index += matrix0_index_step;
+ }
+ vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
+ matrix_prod_index += 2;
+ }
+ }
+ }
+ else if (matrix1_index_init_case == 0 &&
+ matrix1_index_step == 1 &&
+ matrix0_index_step == 1) {
+ int32x2_t multi_32x2 = vdup_n_s32(0);
+ int32x2_t matrix0_32x2 = vdup_n_s32(0);
+ for (j = 0; j < SUBFRAMES; j++) {
+ matrix_prod_index = mid_loop_count * j;
+ for (k = 0; k < mid_loop_count; k++) {
+ sum_32x4 = veorq_s32(sum_32x4, sum_32x4); // Initialize to zeros.
+ matrix1_index = matrix1_index_factor1 * j;
+ matrix0_index = matrix0_index_factor1 * k;
+ for (n = 0; n < (inner_loop_count >> 2) << 2; n += 4) {
+ int32x4_t matrix1_32x4 =
+ vshlq_s32(vld1q_s32(&matrix1[matrix1_index]), shift32x4);
+ int32x4_t matrix0_32x4 =
+ vshll_n_s16(vld1_s16(&matrix0[matrix0_index]), 15);
+ int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
+ sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
+ matrix1_index += 4;
+ matrix0_index += 4;
+ }
+ sum_32x2 = vqadd_s32(vget_low_s32(sum_32x4), vget_high_s32(sum_32x4));
+ if (inner_loop_count % 4 > 1) {
+ int32x2_t matrix1_32x2 =
+ vshl_s32(vld1_s32(&matrix1[matrix1_index]), shift32x2);
+ matrix0_32x2 =
+ vset_lane_s32((int32_t)matrix0[matrix0_index], matrix0_32x2, 0);
+ matrix0_32x2 = vset_lane_s32((int32_t)matrix0[matrix0_index + 1],
+ matrix0_32x2, 1);
+ matrix0_32x2 = vshl_n_s32(matrix0_32x2, 15);
+ multi_32x2 = vqdmulh_s32(matrix1_32x2, matrix0_32x2);
+ sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+ }
+ sum_32x2 = vpadd_s32(sum_32x2, sum_32x2);
+ vst1_lane_s32(&matrix_product[matrix_prod_index], sum_32x2, 0);
+ matrix_prod_index++;
+ }
+ }
+ }
+ else {
+ for (j = 0; j < SUBFRAMES; j++) {
+ matrix_prod_index = mid_loop_count * j;
+ for (k=0; k < mid_loop_count; k++) {
+ int32_t sum32 = 0;
+ matrix1_index = matrix1_index_factor1 * (*matrix1_index_factor2);
+ matrix0_index = matrix0_index_factor1 * (*matrix0_index_factor2);
+ for (n = 0; n < inner_loop_count; n++) {
+ sum32 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
+ matrix1[matrix1_index] << shift));
+ matrix1_index += matrix1_index_step;
+ matrix0_index += matrix0_index_step;
+ }
+ matrix_product[matrix_prod_index] = sum32;
+ matrix_prod_index++;
+ }
+ }
+ }
+}
+
+void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[],
+ const int32_t matrix1[],
+ int32_t matrix_product[],
+ const int matrix0_index_factor,
+ const int matrix0_index_step) {
+ int j = 0, n = 0;
+ int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
+ int32x2_t sum_32x2 = vdup_n_s32(0);
+ for (j = 0; j < SUBFRAMES; j++) {
+ sum_32x2 = veor_s32(sum_32x2, sum_32x2); // Initialize to zeros.
+ matrix1_index = 0;
+ matrix0_index = matrix0_index_factor * j;
+ for (n = SUBFRAMES; n > 0; n--) {
+ int32x2_t matrix0_32x2 =
+ vdup_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
+ int32x2_t matrix1_32x2 = vld1_s32(&matrix1[matrix1_index]);
+ int32x2_t multi_32x2 = vqdmulh_s32(matrix0_32x2, matrix1_32x2);
+ sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+ matrix1_index += 2;
+ matrix0_index += matrix0_index_step;
+ }
+ sum_32x2 = vshr_n_s32(sum_32x2, 3);
+ vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
+ matrix_prod_index += 2;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.c
new file mode 100644
index 0000000000..a0ed3f83ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * fft.c
+ *
+ * Fast Fourier Transform
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+
+static const int16_t kSortTabFft[240] = {
+ 0, 60, 120, 180, 20, 80, 140, 200, 40, 100, 160, 220,
+ 4, 64, 124, 184, 24, 84, 144, 204, 44, 104, 164, 224,
+ 8, 68, 128, 188, 28, 88, 148, 208, 48, 108, 168, 228,
+ 12, 72, 132, 192, 32, 92, 152, 212, 52, 112, 172, 232,
+ 16, 76, 136, 196, 36, 96, 156, 216, 56, 116, 176, 236,
+ 1, 61, 121, 181, 21, 81, 141, 201, 41, 101, 161, 221,
+ 5, 65, 125, 185, 25, 85, 145, 205, 45, 105, 165, 225,
+ 9, 69, 129, 189, 29, 89, 149, 209, 49, 109, 169, 229,
+ 13, 73, 133, 193, 33, 93, 153, 213, 53, 113, 173, 233,
+ 17, 77, 137, 197, 37, 97, 157, 217, 57, 117, 177, 237,
+ 2, 62, 122, 182, 22, 82, 142, 202, 42, 102, 162, 222,
+ 6, 66, 126, 186, 26, 86, 146, 206, 46, 106, 166, 226,
+ 10, 70, 130, 190, 30, 90, 150, 210, 50, 110, 170, 230,
+ 14, 74, 134, 194, 34, 94, 154, 214, 54, 114, 174, 234,
+ 18, 78, 138, 198, 38, 98, 158, 218, 58, 118, 178, 238,
+ 3, 63, 123, 183, 23, 83, 143, 203, 43, 103, 163, 223,
+ 7, 67, 127, 187, 27, 87, 147, 207, 47, 107, 167, 227,
+ 11, 71, 131, 191, 31, 91, 151, 211, 51, 111, 171, 231,
+ 15, 75, 135, 195, 35, 95, 155, 215, 55, 115, 175, 235,
+ 19, 79, 139, 199, 39, 99, 159, 219, 59, 119, 179, 239
+};
+
+/* Cosine table in Q14 */
+static const int16_t kCosTabFfftQ14[240] = {
+ 16384, 16378, 16362, 16333, 16294, 16244, 16182, 16110, 16026, 15931, 15826, 15709,
+ 15582, 15444, 15296, 15137, 14968, 14788, 14598, 14399, 14189, 13970, 13741, 13502,
+ 13255, 12998, 12733, 12458, 12176, 11885, 11585, 11278, 10963, 10641, 10311, 9974,
+ 9630, 9280, 8923, 8561, 8192, 7818, 7438, 7053, 6664, 6270, 5872, 5469,
+ 5063, 4653, 4240, 3825, 3406, 2986, 2563, 2139, 1713, 1285, 857, 429,
+ 0, -429, -857, -1285, -1713, -2139, -2563, -2986, -3406, -3825, -4240, -4653,
+ -5063, -5469, -5872, -6270, -6664, -7053, -7438, -7818, -8192, -8561, -8923, -9280,
+ -9630, -9974, -10311, -10641, -10963, -11278, -11585, -11885, -12176, -12458, -12733, -12998,
+ -13255, -13502, -13741, -13970, -14189, -14399, -14598, -14788, -14968, -15137, -15296, -15444,
+ -15582, -15709, -15826, -15931, -16026, -16110, -16182, -16244, -16294, -16333, -16362, -16378,
+ -16384, -16378, -16362, -16333, -16294, -16244, -16182, -16110, -16026, -15931, -15826, -15709,
+ -15582, -15444, -15296, -15137, -14968, -14788, -14598, -14399, -14189, -13970, -13741, -13502,
+ -13255, -12998, -12733, -12458, -12176, -11885, -11585, -11278, -10963, -10641, -10311, -9974,
+ -9630, -9280, -8923, -8561, -8192, -7818, -7438, -7053, -6664, -6270, -5872, -5469,
+ -5063, -4653, -4240, -3825, -3406, -2986, -2563, -2139, -1713, -1285, -857, -429,
+ 0, 429, 857, 1285, 1713, 2139, 2563, 2986, 3406, 3825, 4240, 4653,
+ 5063, 5469, 5872, 6270, 6664, 7053, 7438, 7818, 8192, 8561, 8923, 9280,
+ 9630, 9974, 10311, 10641, 10963, 11278, 11585, 11885, 12176, 12458, 12733, 12998,
+ 13255, 13502, 13741, 13970, 14189, 14399, 14598, 14788, 14968, 15137, 15296, 15444,
+ 15582, 15709, 15826, 15931, 16026, 16110, 16182, 16244, 16294, 16333, 16362, 16378
+};
+
+
+
+/* Uses 16x16 mul, without rounding, which is faster. Uses WEBRTC_SPL_MUL_16_16_RSFT */
+int16_t WebRtcIsacfix_FftRadix16Fastest(int16_t RexQx[], int16_t ImxQx[], int16_t iSign) {
+
+ int16_t dd, ee, ff, gg, hh, ii;
+ int16_t k0, k1, k2, k3, k4, kk;
+ int16_t tmp116, tmp216;
+
+ int16_t ccc1Q14, ccc2Q14, ccc3Q14, sss1Q14, sss2Q14, sss3Q14;
+ int16_t sss60Q14, ccc72Q14, sss72Q14;
+ int16_t aaQx, ajQx, akQx, ajmQx, ajpQx, akmQx, akpQx;
+ int16_t bbQx, bjQx, bkQx, bjmQx, bjpQx, bkmQx, bkpQx;
+
+ int16_t ReDATAQx[240], ImDATAQx[240];
+
+ sss60Q14 = kCosTabFfftQ14[20];
+ ccc72Q14 = kCosTabFfftQ14[48];
+ sss72Q14 = kCosTabFfftQ14[12];
+
+ if (iSign < 0) {
+ sss72Q14 = -sss72Q14;
+ sss60Q14 = -sss60Q14;
+ }
+ /* Complexity is: 10 cycles */
+
+ /* compute fourier transform */
+
+ // transform for factor of 4
+ for (kk=0; kk<60; kk++) {
+ k0 = kk;
+ k1 = k0 + 60;
+ k2 = k1 + 60;
+ k3 = k2 + 60;
+
+ akpQx = RexQx[k0] + RexQx[k2];
+ akmQx = RexQx[k0] - RexQx[k2];
+ ajpQx = RexQx[k1] + RexQx[k3];
+ ajmQx = RexQx[k1] - RexQx[k3];
+ bkpQx = ImxQx[k0] + ImxQx[k2];
+ bkmQx = ImxQx[k0] - ImxQx[k2];
+ bjpQx = ImxQx[k1] + ImxQx[k3];
+ bjmQx = ImxQx[k1] - ImxQx[k3];
+
+ RexQx[k0] = akpQx + ajpQx;
+ ImxQx[k0] = bkpQx + bjpQx;
+ ajpQx = akpQx - ajpQx;
+ bjpQx = bkpQx - bjpQx;
+ if (iSign < 0) {
+ akpQx = akmQx + bjmQx;
+ bkpQx = bkmQx - ajmQx;
+ akmQx -= bjmQx;
+ bkmQx += ajmQx;
+ } else {
+ akpQx = akmQx - bjmQx;
+ bkpQx = bkmQx + ajmQx;
+ akmQx += bjmQx;
+ bkmQx -= ajmQx;
+ }
+
+ ccc1Q14 = kCosTabFfftQ14[kk];
+ ccc2Q14 = kCosTabFfftQ14[2 * kk];
+ ccc3Q14 = kCosTabFfftQ14[3 * kk];
+ sss1Q14 = kCosTabFfftQ14[kk + 60];
+ sss2Q14 = kCosTabFfftQ14[2 * kk + 60];
+ sss3Q14 = kCosTabFfftQ14[3 * kk + 60];
+ if (iSign==1) {
+ sss1Q14 = -sss1Q14;
+ sss2Q14 = -sss2Q14;
+ sss3Q14 = -sss3Q14;
+ }
+
+ //Do several multiplications like Q14*Q16>>14 = Q16
+ // RexQ16[k1] = akpQ16 * ccc1Q14 - bkpQ16 * sss1Q14;
+ // RexQ16[k2] = ajpQ16 * ccc2Q14 - bjpQ16 * sss2Q14;
+ // RexQ16[k3] = akmQ16 * ccc3Q14 - bkmQ16 * sss3Q14;
+ // ImxQ16[k1] = akpQ16 * sss1Q14 + bkpQ16 * ccc1Q14;
+ // ImxQ16[k2] = ajpQ16 * sss2Q14 + bjpQ16 * ccc2Q14;
+ // ImxQ16[k3] = akmQ16 * sss3Q14 + bkmQ16 * ccc3Q14;
+
+ RexQx[k1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc1Q14, akpQx, 14) -
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss1Q14, bkpQx, 14); // 6 non-mul + 2 mul cycles, i.e. 8 cycles (6+2*7=20 cycles if 16x32mul)
+ RexQx[k2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, ajpQx, 14) -
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bjpQx, 14);
+ RexQx[k3] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc3Q14, akmQx, 14) -
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss3Q14, bkmQx, 14);
+ ImxQx[k1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss1Q14, akpQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc1Q14, bkpQx, 14);
+ ImxQx[k2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, ajpQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bjpQx, 14);
+ ImxQx[k3] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss3Q14, akmQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc3Q14, bkmQx, 14);
+ //This mul segment needs 6*8 = 48 cycles for 16x16 muls, but 6*20 = 120 cycles for 16x32 muls
+
+
+ }
+ /* Complexity is: 51+48 = 99 cycles for 16x16 muls, but 51+120 = 171 cycles for 16x32 muls*/
+
+ // transform for factor of 3
+ kk=0;
+ k1=20;
+ k2=40;
+
+ for (hh=0; hh<4; hh++) {
+ for (ii=0; ii<20; ii++) {
+ akQx = RexQx[kk];
+ bkQx = ImxQx[kk];
+ ajQx = RexQx[k1] + RexQx[k2];
+ bjQx = ImxQx[k1] + ImxQx[k2];
+ RexQx[kk] = akQx + ajQx;
+ ImxQx[kk] = bkQx + bjQx;
+ tmp116 = ajQx >> 1;
+ tmp216 = bjQx >> 1;
+ akQx = akQx - tmp116;
+ bkQx = bkQx - tmp216;
+ tmp116 = RexQx[k1] - RexQx[k2];
+ tmp216 = ImxQx[k1] - ImxQx[k2];
+
+ ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss60Q14, tmp116, 14); // Q14*Qx>>14 = Qx
+ bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss60Q14, tmp216, 14); // Q14*Qx>>14 = Qx
+ RexQx[k1] = akQx - bjQx;
+ RexQx[k2] = akQx + bjQx;
+ ImxQx[k1] = bkQx + ajQx;
+ ImxQx[k2] = bkQx - ajQx;
+
+ kk++;
+ k1++;
+ k2++;
+ }
+ /* Complexity : (31+6)*20 = 740 cycles for 16x16 muls, but (31+18)*20 = 980 cycles for 16x32 muls*/
+ kk=kk+40;
+ k1=k1+40;
+ k2=k2+40;
+ }
+ /* Complexity : 4*(740+3) = 2972 cycles for 16x16 muls, but 4*(980+3) = 3932 cycles for 16x32 muls*/
+
+ /* multiply by rotation factor for odd factor 3 or 5 (not for 4)
+ Same code (duplicated) for both ii=2 and ii=3 */
+ kk = 1;
+ ee = 0;
+ ff = 0;
+
+ for (gg=0; gg<19; gg++) {
+ kk += 20;
+ ff = ff+4;
+ for (hh=0; hh<2; hh++) {
+ ee = ff + hh * ff;
+ dd = ee + 60;
+ ccc2Q14 = kCosTabFfftQ14[ee];
+ sss2Q14 = kCosTabFfftQ14[dd];
+ if (iSign==1) {
+ sss2Q14 = -sss2Q14;
+ }
+ for (ii=0; ii<4; ii++) {
+ akQx = RexQx[kk];
+ bkQx = ImxQx[kk];
+ RexQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akQx, 14) - // Q14*Qx>>14 = Qx
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkQx, 14);
+ ImxQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akQx, 14) + // Q14*Qx>>14 = Qx
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkQx, 14);
+
+
+ kk += 60;
+ }
+ kk = kk - 220;
+ }
+ // Complexity: 2*(13+5+4*13+2) = 144 for 16x16 muls, but 2*(13+5+4*33+2) = 304 cycles for 16x32 muls
+ kk = kk - 59;
+ }
+ // Complexity: 19*144 = 2736 for 16x16 muls, but 19*304 = 5776 cycles for 16x32 muls
+
+ // transform for factor of 5
+ kk = 0;
+ ccc2Q14 = kCosTabFfftQ14[96];
+ sss2Q14 = kCosTabFfftQ14[84];
+ if (iSign==1) {
+ sss2Q14 = -sss2Q14;
+ }
+
+ for (hh=0; hh<4; hh++) {
+ for (ii=0; ii<12; ii++) {
+ k1 = kk + 4;
+ k2 = k1 + 4;
+ k3 = k2 + 4;
+ k4 = k3 + 4;
+
+ akpQx = RexQx[k1] + RexQx[k4];
+ akmQx = RexQx[k1] - RexQx[k4];
+ bkpQx = ImxQx[k1] + ImxQx[k4];
+ bkmQx = ImxQx[k1] - ImxQx[k4];
+ ajpQx = RexQx[k2] + RexQx[k3];
+ ajmQx = RexQx[k2] - RexQx[k3];
+ bjpQx = ImxQx[k2] + ImxQx[k3];
+ bjmQx = ImxQx[k2] - ImxQx[k3];
+ aaQx = RexQx[kk];
+ bbQx = ImxQx[kk];
+ RexQx[kk] = aaQx + akpQx + ajpQx;
+ ImxQx[kk] = bbQx + bkpQx + bjpQx;
+
+ akQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, akpQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, ajpQx, 14) + aaQx;
+ bkQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, bkpQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bjpQx, 14) + bbQx;
+ ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, akmQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, ajmQx, 14);
+ bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, bkmQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bjmQx, 14);
+ // 32+4*8=64 or 32+4*20=112
+
+ RexQx[k1] = akQx - bjQx;
+ RexQx[k4] = akQx + bjQx;
+ ImxQx[k1] = bkQx + ajQx;
+ ImxQx[k4] = bkQx - ajQx;
+
+ akQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akpQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, ajpQx, 14) + aaQx;
+ bkQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkpQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, bjpQx, 14) + bbQx;
+ ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akmQx, 14) -
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, ajmQx, 14);
+ bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkmQx, 14) -
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, bjmQx, 14);
+ // 8+4*8=40 or 8+4*20=88
+
+ RexQx[k2] = akQx - bjQx;
+ RexQx[k3] = akQx + bjQx;
+ ImxQx[k2] = bkQx + ajQx;
+ ImxQx[k3] = bkQx - ajQx;
+
+ kk = k4 + 4;
+ }
+ // Complexity: 12*(64+40+10) = 1368 for 16x16 muls, but 12*(112+88+10) = 2520 cycles for 16x32 muls
+ kk -= 239;
+ }
+ // Complexity: 4*1368 = 5472 for 16x16 muls, but 4*2520 = 10080 cycles for 16x32 muls
+
+ /* multiply by rotation factor for odd factor 3 or 5 (not for 4)
+ Same code (duplicated) for both ii=2 and ii=3 */
+ kk = 1;
+ ee=0;
+
+ for (gg=0; gg<3; gg++) {
+ kk += 4;
+ dd = 12 + 12 * gg;
+ ff = 0;
+ for (hh=0; hh<4; hh++) {
+ ff = ff+dd;
+ ee = ff+60;
+ for (ii=0; ii<12; ii++) {
+ akQx = RexQx[kk];
+ bkQx = ImxQx[kk];
+
+ ccc2Q14 = kCosTabFfftQ14[ff];
+ sss2Q14 = kCosTabFfftQ14[ee];
+
+ if (iSign==1) {
+ sss2Q14 = -sss2Q14;
+ }
+
+ RexQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akQx, 14) -
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkQx, 14);
+ ImxQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akQx, 14) +
+ (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkQx, 14);
+
+ kk += 20;
+ }
+ kk = kk - 236;
+ // Complexity: 12*(12+12) = 288 for 16x16 muls, but 12*(12+32) = 528 cycles for 16x32 muls
+ }
+ kk = kk - 19;
+ // Complexity: 4*288+6 for 16x16 muls, but 4*528+6 cycles for 16x32 muls
+ }
+ // Complexity: 3*4*288+6 = 3462 for 16x16 muls, but 3*4*528+6 = 6342 cycles for 16x32 muls
+
+
+ // last transform for factor of 4 */
+ for (kk=0; kk<240; kk=kk+4) {
+ k1 = kk + 1;
+ k2 = k1 + 1;
+ k3 = k2 + 1;
+
+ akpQx = RexQx[kk] + RexQx[k2];
+ akmQx = RexQx[kk] - RexQx[k2];
+ ajpQx = RexQx[k1] + RexQx[k3];
+ ajmQx = RexQx[k1] - RexQx[k3];
+ bkpQx = ImxQx[kk] + ImxQx[k2];
+ bkmQx = ImxQx[kk] - ImxQx[k2];
+ bjpQx = ImxQx[k1] + ImxQx[k3];
+ bjmQx = ImxQx[k1] - ImxQx[k3];
+ RexQx[kk] = akpQx + ajpQx;
+ ImxQx[kk] = bkpQx + bjpQx;
+ ajpQx = akpQx - ajpQx;
+ bjpQx = bkpQx - bjpQx;
+ if (iSign < 0) {
+ akpQx = akmQx + bjmQx;
+ bkpQx = bkmQx - ajmQx;
+ akmQx -= bjmQx;
+ bkmQx += ajmQx;
+ } else {
+ akpQx = akmQx - bjmQx;
+ bkpQx = bkmQx + ajmQx;
+ akmQx += bjmQx;
+ bkmQx -= ajmQx;
+ }
+ RexQx[k1] = akpQx;
+ RexQx[k2] = ajpQx;
+ RexQx[k3] = akmQx;
+ ImxQx[k1] = bkpQx;
+ ImxQx[k2] = bjpQx;
+ ImxQx[k3] = bkmQx;
+ }
+ // Complexity: 60*45 = 2700 for 16x16 muls, but 60*45 = 2700 cycles for 16x32 muls
+
+ /* permute the results to normal order */
+ for (ii=0; ii<240; ii++) {
+ ReDATAQx[ii]=RexQx[ii];
+ ImDATAQx[ii]=ImxQx[ii];
+ }
+ // Complexity: 240*2=480 cycles
+
+ for (ii=0; ii<240; ii++) {
+ RexQx[ii]=ReDATAQx[kSortTabFft[ii]];
+ ImxQx[ii]=ImDATAQx[kSortTabFft[ii]];
+ }
+ // Complexity: 240*2*2=960 cycles
+
+ // Total complexity:
+ // 16x16 16x32
+ // Complexity: 10 10
+ // Complexity: 99 171
+ // Complexity: 2972 3932
+ // Complexity: 2736 5776
+ // Complexity: 5472 10080
+ // Complexity: 3462 6342
+ // Complexity: 2700 2700
+ // Complexity: 480 480
+ // Complexity: 960 960
+ // =======================
+ // 18891 30451
+ //
+ // If this FFT is called 2 time each frame, i.e. 67 times per second, it will correspond to
+ // a C54 complexity of 67*18891/1000000 = 1.27 MIPS with 16x16-muls, and 67*30451/1000000 =
+ // = 2.04 MIPS with 16x32-muls. Note that this routine somtimes is called 6 times during the
+ // encoding of a frame, i.e. the max complexity would be 7/2*1.27 = 4.4 MIPS for the 16x16 mul case.
+
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.h
new file mode 100644
index 0000000000..4fe9b96be4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*--------------------------------*-C-*---------------------------------*
+ * File:
+ * fft.h
+ * ---------------------------------------------------------------------*
+ * Re[]: real value array
+ * Im[]: imaginary value array
+ * nTotal: total number of complex values
+ * nPass: number of elements involved in this pass of transform
+ * nSpan: nspan/nPass = number of bytes to increment pointer
+ * in Re[] and Im[]
+ * isign: exponent: +1 = forward -1 = reverse
+ * scaling: normalizing constant by which the final result is *divided*
+ * scaling == -1, normalize by total dimension of the transform
+ * scaling < -1, normalize by the square-root of the total dimension
+ *
+ * ----------------------------------------------------------------------
+ * See the comments in the code for correct usage!
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+int16_t WebRtcIsacfix_FftRadix16Fastest(int16_t RexQx[],
+ int16_t ImxQx[],
+ int16_t iSign);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
new file mode 100644
index 0000000000..f741e6f677
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_
+
+#include <stdint.h>
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+/* Arguments:
+ * io: Input/output, in Q0.
+ * len: Input, sample length.
+ * coefficient: Input.
+ * state: Input/output, filter state, in Q4.
+ */
+typedef void (*HighpassFilterFixDec32)(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+extern HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+#endif
+
+typedef void (*AllpassFilter2FixDec16)(
+ int16_t* data_ch1, // Input and output in channel 1, in Q0
+ int16_t* data_ch2, // Input and output in channel 2, in Q0
+ const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15
+ const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15
+ int length, // Length of the data buffers
+ int32_t* filter_state_ch1, // Filter state for channel 1, in Q16
+ int32_t* filter_state_ch2); // Filter state for channel 2, in Q16
+extern AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16;
+
+void WebRtcIsacfix_AllpassFilter2FixDec16C(int16_t* data_ch1,
+ int16_t* data_ch2,
+ const int16_t* factor_ch1,
+ const int16_t* factor_ch2,
+ int length,
+ int32_t* filter_state_ch1,
+ int32_t* filter_state_ch2);
+
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcIsacfix_AllpassFilter2FixDec16Neon(int16_t* data_ch1,
+ int16_t* data_ch2,
+ const int16_t* factor_ch1,
+ const int16_t* factor_ch2,
+ int length,
+ int32_t* filter_state_ch1,
+ int32_t* filter_state_ch2);
+#endif
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(int16_t* data_ch1,
+ int16_t* data_ch2,
+ const int16_t* factor_ch1,
+ const int16_t* factor_ch2,
+ int length,
+ int32_t* filter_state_ch1,
+ int32_t* filter_state_ch2);
+#endif
+
+#if defined(__cplusplus) || defined(c_plusplus)
+}
+#endif
+
+#endif
+/* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c
new file mode 100644
index 0000000000..f2dec79c2d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbank_tables.c
+ *
+ * This file contains variables that are used in
+ * filterbanks.c
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
+
+/* HPstcoeff_in_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
+ * In float, they are: {-1.94895953203325f, 0.94984516000000f,
+ * -0.05101826139794f, 0.05015484000000f};
+ */
+const int16_t WebRtcIsacfix_kHpStCoeffInQ30[8] = {
+ 16189, -31932, /* Q30 lo/hi pair */
+ 17243, 15562, /* Q30 lo/hi pair */
+ -17186, -26748, /* Q35 lo/hi pair */
+ -27476, 26296 /* Q35 lo/hi pair */
+};
+
+/* HPstcoeff_out_1_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
+ * In float, they are: {-1.99701049409000f, 0.99714204490000f,
+ * 0.01701049409000f, -0.01704204490000f};
+ */
+const int16_t WebRtcIsacfix_kHPStCoeffOut1Q30[8] = {
+ -1306, -32719, /* Q30 lo/hi pair */
+ 11486, 16337, /* Q30 lo/hi pair */
+ 26078, 8918, /* Q35 lo/hi pair */
+ 3956, -8935 /* Q35 lo/hi pair */
+};
+
+/* HPstcoeff_out_2_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
+ * In float, they are: {-1.98645294509837f, 0.98672435560000f,
+ * 0.00645294509837f, -0.00662435560000f};
+ */
+const int16_t WebRtcIsacfix_kHPStCoeffOut2Q30[8] = {
+ -2953, -32546, /* Q30 lo/hi pair */
+ 32233, 16166, /* Q30 lo/hi pair */
+ 13217, 3383, /* Q35 lo/hi pair */
+ -4597, -3473 /* Q35 lo/hi pair */
+};
+
+/* The upper channel all-pass filter factors */
+const int16_t WebRtcIsacfix_kUpperApFactorsQ15[2] = {
+ 1137, 12537
+};
+
+/* The lower channel all-pass filter factors */
+const int16_t WebRtcIsacfix_kLowerApFactorsQ15[2] = {
+ 5059, 24379
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h
new file mode 100644
index 0000000000..01e5a7ba85
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbank_tables.h
+ *
+ * Header file for variables that are defined in
+ * filterbank_tables.c.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_
+
+#include <stdint.h>
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+/********************* Coefficient Tables ************************/
+
+/* HPstcoeff_in_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
+extern const int16_t WebRtcIsacfix_kHpStCoeffInQ30[8];
+
+/* HPstcoeff_out_1_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
+extern const int16_t WebRtcIsacfix_kHPStCoeffOut1Q30[8];
+
+/* HPstcoeff_out_2_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
+extern const int16_t WebRtcIsacfix_kHPStCoeffOut2Q30[8];
+
+/* The upper channel all-pass filter factors */
+extern const int16_t WebRtcIsacfix_kUpperApFactorsQ15[2];
+
+/* The lower channel all-pass filter factors */
+extern const int16_t WebRtcIsacfix_kLowerApFactorsQ15[2];
+
+#if defined(__cplusplus) || defined(c_plusplus)
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
new file mode 100644
index 0000000000..57b3e70b89
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbanks.c
+ *
+ * This file contains function
+ * WebRtcIsacfix_SplitAndFilter, and WebRtcIsacfix_FilterAndCombine
+ * which implement filterbanks that produce decimated lowpass and
+ * highpass versions of a signal, and performs reconstruction.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/checks.h"
+
+// Declare a function pointer.
+AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16;
+
+void WebRtcIsacfix_AllpassFilter2FixDec16C(
+ int16_t *data_ch1, // Input and output in channel 1, in Q0
+ int16_t *data_ch2, // Input and output in channel 2, in Q0
+ const int16_t *factor_ch1, // Scaling factor for channel 1, in Q15
+ const int16_t *factor_ch2, // Scaling factor for channel 2, in Q15
+ const int length, // Length of the data buffers
+ int32_t *filter_state_ch1, // Filter state for channel 1, in Q16
+ int32_t *filter_state_ch2) { // Filter state for channel 2, in Q16
+ int n = 0;
+ int32_t state0_ch1 = filter_state_ch1[0], state1_ch1 = filter_state_ch1[1];
+ int32_t state0_ch2 = filter_state_ch2[0], state1_ch2 = filter_state_ch2[1];
+ int16_t in_out = 0;
+ int32_t a = 0, b = 0;
+
+ // Assembly file assumption.
+ RTC_DCHECK_EQ(0, length % 2);
+
+ for (n = 0; n < length; n++) {
+ // Process channel 1:
+ in_out = data_ch1[n];
+ a = factor_ch1[0] * in_out; // Q15 * Q0 = Q15
+ a *= 1 << 1; // Q15 -> Q16
+ b = WebRtcSpl_AddSatW32(a, state0_ch1);
+ a = -factor_ch1[0] * (int16_t)(b >> 16); // Q15
+ state0_ch1 =
+ WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
+ in_out = (int16_t) (b >> 16); // Save as Q0
+
+ a = factor_ch1[1] * in_out; // Q15 * Q0 = Q15
+ a *= 1 << 1; // Q15 -> Q16
+ b = WebRtcSpl_AddSatW32(a, state1_ch1); // Q16
+ a = -factor_ch1[1] * (int16_t)(b >> 16); // Q15
+ state1_ch1 =
+ WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
+ data_ch1[n] = (int16_t) (b >> 16); // Save as Q0
+
+ // Process channel 2:
+ in_out = data_ch2[n];
+ a = factor_ch2[0] * in_out; // Q15 * Q0 = Q15
+ a *= 1 << 1; // Q15 -> Q16
+ b = WebRtcSpl_AddSatW32(a, state0_ch2); // Q16
+ a = -factor_ch2[0] * (int16_t)(b >> 16); // Q15
+ state0_ch2 =
+ WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
+ in_out = (int16_t) (b >> 16); // Save as Q0
+
+ a = factor_ch2[1] * in_out; // Q15 * Q0 = Q15
+ a *= (1 << 1); // Q15 -> Q16
+ b = WebRtcSpl_AddSatW32(a, state1_ch2); // Q16
+ a = -factor_ch2[1] * (int16_t)(b >> 16); // Q15
+ state1_ch2 =
+ WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
+ data_ch2[n] = (int16_t) (b >> 16); // Save as Q0
+ }
+
+ filter_state_ch1[0] = state0_ch1;
+ filter_state_ch1[1] = state1_ch1;
+ filter_state_ch2[0] = state0_ch2;
+ filter_state_ch2[1] = state1_ch2;
+}
+
+// Declare a function pointer.
+HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t *io,
+ int16_t len,
+ const int16_t *coefficient,
+ int32_t *state)
+{
+ int k;
+ int32_t a1 = 0, b1 = 0, c = 0, in = 0;
+ int32_t a2 = 0, b2 = 0;
+ int32_t state0 = state[0];
+ int32_t state1 = state[1];
+
+ for (k=0; k<len; k++) {
+ in = (int32_t)io[k];
+
+#ifdef WEBRTC_ARCH_ARM_V7
+ {
+ register int tmp_coeff0;
+ register int tmp_coeff1;
+ __asm __volatile(
+ "ldr %[tmp_coeff0], [%[coeff]]\n\t"
+ "ldr %[tmp_coeff1], [%[coeff], #4]\n\t"
+ "smmulr %[a2], %[tmp_coeff0], %[state0]\n\t"
+ "smmulr %[b2], %[tmp_coeff1], %[state1]\n\t"
+ "ldr %[tmp_coeff0], [%[coeff], #8]\n\t"
+ "ldr %[tmp_coeff1], [%[coeff], #12]\n\t"
+ "smmulr %[a1], %[tmp_coeff0], %[state0]\n\t"
+ "smmulr %[b1], %[tmp_coeff1], %[state1]\n\t"
+ :[a2]"=&r"(a2),
+ [b2]"=&r"(b2),
+ [a1]"=&r"(a1),
+ [b1]"=r"(b1),
+ [tmp_coeff0]"=&r"(tmp_coeff0),
+ [tmp_coeff1]"=&r"(tmp_coeff1)
+ :[coeff]"r"(coefficient),
+ [state0]"r"(state0),
+ [state1]"r"(state1)
+ );
+ }
+#else
+ /* Q35 * Q4 = Q39 ; shift 32 bit => Q7 */
+ a1 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[5], state0) +
+ (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[4], state0) >> 16);
+ b1 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[7], state1) +
+ (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[6], state1) >> 16);
+
+ /* Q30 * Q4 = Q34 ; shift 32 bit => Q2 */
+ a2 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[1], state0) +
+ (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[0], state0) >> 16);
+ b2 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[3], state1) +
+ (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[2], state1) >> 16);
+#endif
+
+ c = in + ((a1 + b1) >> 7); // Q0.
+ io[k] = (int16_t)WebRtcSpl_SatW32ToW16(c); // Write output as Q0.
+
+ c = in * (1 << 2) - a2 - b2; // In Q2.
+ c = (int32_t)WEBRTC_SPL_SAT(536870911, c, -536870912);
+
+ state1 = state0;
+ state0 = c * (1 << 2); // Write state as Q4
+ }
+ state[0] = state0;
+ state[1] = state1;
+}
+
+
+void WebRtcIsacfix_SplitAndFilter1(int16_t *pin,
+ int16_t *LP16,
+ int16_t *HP16,
+ PreFiltBankstr *prefiltdata)
+{
+ /* Function WebRtcIsacfix_SplitAndFilter */
+ /* This function creates low-pass and high-pass decimated versions of part of
+ the input signal, and part of the signal in the input 'lookahead buffer'. */
+
+ int k;
+
+ int16_t tempin_ch1[FRAMESAMPLES/2 + QLOOKAHEAD];
+ int16_t tempin_ch2[FRAMESAMPLES/2 + QLOOKAHEAD];
+ int32_t tmpState_ch1[2 * (QORDER-1)]; /* 4 */
+ int32_t tmpState_ch2[2 * (QORDER-1)]; /* 4 */
+
+ /* High pass filter */
+ WebRtcIsacfix_HighpassFilterFixDec32(pin, FRAMESAMPLES, WebRtcIsacfix_kHpStCoeffInQ30, prefiltdata->HPstates_fix);
+
+
+ /* First Channel */
+ for (k=0;k<FRAMESAMPLES/2;k++) {
+ tempin_ch1[QLOOKAHEAD + k] = pin[1 + 2 * k];
+ }
+ for (k=0;k<QLOOKAHEAD;k++) {
+ tempin_ch1[k]=prefiltdata->INLABUF1_fix[k];
+ prefiltdata->INLABUF1_fix[k] = pin[FRAMESAMPLES + 1 - 2 * (QLOOKAHEAD - k)];
+ }
+
+ /* Second Channel. This is exactly like the first channel, except that the
+ even samples are now filtered instead (lower channel). */
+ for (k=0;k<FRAMESAMPLES/2;k++) {
+ tempin_ch2[QLOOKAHEAD + k] = pin[2 * k];
+ }
+ for (k=0;k<QLOOKAHEAD;k++) {
+ tempin_ch2[k]=prefiltdata->INLABUF2_fix[k];
+ prefiltdata->INLABUF2_fix[k] = pin[FRAMESAMPLES - 2 * (QLOOKAHEAD - k)];
+ }
+
+
+ /*obtain polyphase components by forward all-pass filtering through each channel */
+ /* The all pass filtering automatically updates the filter states which are exported in the
+ prefiltdata structure */
+ WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
+ tempin_ch2,
+ WebRtcIsacfix_kUpperApFactorsQ15,
+ WebRtcIsacfix_kLowerApFactorsQ15,
+ FRAMESAMPLES/2,
+ prefiltdata->INSTAT1_fix,
+ prefiltdata->INSTAT2_fix);
+
+ for (k = 0; k < 2 * (QORDER - 1); k++) {
+ tmpState_ch1[k] = prefiltdata->INSTAT1_fix[k];
+ tmpState_ch2[k] = prefiltdata->INSTAT2_fix[k];
+ }
+ WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1 + FRAMESAMPLES/2,
+ tempin_ch2 + FRAMESAMPLES/2,
+ WebRtcIsacfix_kUpperApFactorsQ15,
+ WebRtcIsacfix_kLowerApFactorsQ15,
+ QLOOKAHEAD,
+ tmpState_ch1,
+ tmpState_ch2);
+
+ /* Now Construct low-pass and high-pass signals as combinations of polyphase components */
+ for (k=0; k<FRAMESAMPLES/2 + QLOOKAHEAD; k++) {
+ int32_t tmp1, tmp2, tmp3;
+ tmp1 = (int32_t)tempin_ch1[k]; // Q0 -> Q0
+ tmp2 = (int32_t)tempin_ch2[k]; // Q0 -> Q0
+ tmp3 = (tmp1 + tmp2) >> 1; /* Low pass signal. */
+ LP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*low pass */
+ tmp3 = (tmp1 - tmp2) >> 1; /* High pass signal. */
+ HP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*high pass */
+ }
+
+}/*end of WebRtcIsacfix_SplitAndFilter */
+
+
+
+//////////////////////////////////////////////////////////
+////////// Combining
+/* Function WebRtcIsacfix_FilterAndCombine */
+/* This is a decoder function that takes the decimated
+ length FRAMESAMPLES/2 input low-pass and
+ high-pass signals and creates a reconstructed fullband
+ output signal of length FRAMESAMPLES. WebRtcIsacfix_FilterAndCombine
+ is the sibling function of WebRtcIsacfix_SplitAndFilter */
+/* INPUTS:
+ inLP: a length FRAMESAMPLES/2 array of input low-pass
+ samples.
+ inHP: a length FRAMESAMPLES/2 array of input high-pass
+ samples.
+ postfiltdata: input data structure containing the filterbank
+ states from the previous decoding iteration.
+ OUTPUTS:
+ Out: a length FRAMESAMPLES array of output reconstructed
+ samples (fullband) based on the input low-pass and
+ high-pass signals.
+ postfiltdata: the input data structure containing the filterbank
+ states is updated for the next decoding iteration */
+void WebRtcIsacfix_FilterAndCombine1(int16_t *tempin_ch1,
+ int16_t *tempin_ch2,
+ int16_t *out16,
+ PostFiltBankstr *postfiltdata)
+{
+ int k;
+ int16_t in[FRAMESAMPLES];
+
+ /* all-pass filter the new upper and lower channel signal.
+ For upper channel, use the all-pass filter factors that were used as a
+ lower channel at the encoding side. So at the decoder, the corresponding
+ all-pass filter factors for each channel are swapped.
+ For lower channel signal, since all-pass filter factors at the decoder are
+ swapped from the ones at the encoder, the 'upper' channel all-pass filter
+ factors (kUpperApFactors) are used to filter this new lower channel signal.
+ */
+ WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
+ tempin_ch2,
+ WebRtcIsacfix_kLowerApFactorsQ15,
+ WebRtcIsacfix_kUpperApFactorsQ15,
+ FRAMESAMPLES/2,
+ postfiltdata->STATE_0_UPPER_fix,
+ postfiltdata->STATE_0_LOWER_fix);
+
+ /* Merge outputs to form the full length output signal.*/
+ for (k=0;k<FRAMESAMPLES/2;k++) {
+ in[2 * k] = tempin_ch2[k];
+ in[2 * k + 1] = tempin_ch1[k];
+ }
+
+ /* High pass filter */
+ WebRtcIsacfix_HighpassFilterFixDec32(in, FRAMESAMPLES, WebRtcIsacfix_kHPStCoeffOut1Q30, postfiltdata->HPstates1_fix);
+ WebRtcIsacfix_HighpassFilterFixDec32(in, FRAMESAMPLES, WebRtcIsacfix_kHPStCoeffOut2Q30, postfiltdata->HPstates2_fix);
+
+ for (k=0;k<FRAMESAMPLES;k++) {
+ out16[k] = in[k];
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
new file mode 100644
index 0000000000..949bca70e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+
+// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c.
+void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
+ int16_t* data_ch1, // Input and output in channel 1, in Q0.
+ int16_t* data_ch2, // Input and output in channel 2, in Q0.
+ const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15.
+ const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15.
+ const int length, // Length of the data buffers.
+ int32_t* filter_state_ch1, // Filter state for channel 1, in Q16.
+ int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16.
+
+ int32_t st0_ch1, st1_ch1; // channel1 state variables.
+ int32_t st0_ch2, st1_ch2; // channel2 state variables.
+ int32_t f_ch10, f_ch11, f_ch20, f_ch21; // factor variables.
+ int32_t r0, r1, r2, r3, r4, r5; // temporary register variables.
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ // Load all the state and factor variables.
+ "lh %[f_ch10], 0(%[factor_ch1]) \n\t"
+ "lh %[f_ch20], 0(%[factor_ch2]) \n\t"
+ "lh %[f_ch11], 2(%[factor_ch1]) \n\t"
+ "lh %[f_ch21], 2(%[factor_ch2]) \n\t"
+ "lw %[st0_ch1], 0(%[filter_state_ch1]) \n\t"
+ "lw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
+ "lw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
+ "lw %[st1_ch2], 4(%[filter_state_ch2]) \n\t"
+ // Allpass filtering loop.
+ "1: \n\t"
+ "lh %[r0], 0(%[data_ch1]) \n\t"
+ "lh %[r1], 0(%[data_ch2]) \n\t"
+ "addiu %[length], %[length], -1 \n\t"
+ "mul %[r2], %[r0], %[f_ch10] \n\t"
+ "mul %[r3], %[r1], %[f_ch20] \n\t"
+ "sll %[r0], %[r0], 16 \n\t"
+ "sll %[r1], %[r1], 16 \n\t"
+ "sll %[r2], %[r2], 1 \n\t"
+ "addq_s.w %[r2], %[r2], %[st0_ch1] \n\t"
+ "sll %[r3], %[r3], 1 \n\t"
+ "addq_s.w %[r3], %[r3], %[st0_ch2] \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[st0_ch1], %[f_ch10], %[r2] \n\t"
+ "sra %[r3], %[r3], 16 \n\t"
+ "mul %[st0_ch2], %[f_ch20], %[r3] \n\t"
+ "mul %[r4], %[r2], %[f_ch11] \n\t"
+ "mul %[r5], %[r3], %[f_ch21] \n\t"
+ "sll %[st0_ch1], %[st0_ch1], 1 \n\t"
+ "subq_s.w %[st0_ch1], %[r0], %[st0_ch1] \n\t"
+ "sll %[st0_ch2], %[st0_ch2], 1 \n\t"
+ "subq_s.w %[st0_ch2], %[r1], %[st0_ch2] \n\t"
+ "sll %[r4], %[r4], 1 \n\t"
+ "addq_s.w %[r4], %[r4], %[st1_ch1] \n\t"
+ "sll %[r5], %[r5], 1 \n\t"
+ "addq_s.w %[r5], %[r5], %[st1_ch2] \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r0], %[r4], %[f_ch11] \n\t"
+ "sra %[r5], %[r5], 16 \n\t"
+ "mul %[r1], %[r5], %[f_ch21] \n\t"
+ "sh %[r4], 0(%[data_ch1]) \n\t"
+ "sh %[r5], 0(%[data_ch2]) \n\t"
+ "addiu %[data_ch1], %[data_ch1], 2 \n\t"
+ "sll %[r2], %[r2], 16 \n\t"
+ "sll %[r0], %[r0], 1 \n\t"
+ "subq_s.w %[st1_ch1], %[r2], %[r0] \n\t"
+ "sll %[r3], %[r3], 16 \n\t"
+ "sll %[r1], %[r1], 1 \n\t"
+ "subq_s.w %[st1_ch2], %[r3], %[r1] \n\t"
+ "bgtz %[length], 1b \n\t"
+ " addiu %[data_ch2], %[data_ch2], 2 \n\t"
+ // Store channel states.
+ "sw %[st0_ch1], 0(%[filter_state_ch1]) \n\t"
+ "sw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
+ "sw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
+ "sw %[st1_ch2], 4(%[filter_state_ch2]) \n\t"
+ ".set pop \n\t"
+ : [f_ch10] "=&r" (f_ch10), [f_ch20] "=&r" (f_ch20),
+ [f_ch11] "=&r" (f_ch11), [f_ch21] "=&r" (f_ch21),
+ [st0_ch1] "=&r" (st0_ch1), [st1_ch1] "=&r" (st1_ch1),
+ [st0_ch2] "=&r" (st0_ch2), [st1_ch2] "=&r" (st1_ch2),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5)
+ : [factor_ch1] "r" (factor_ch1), [factor_ch2] "r" (factor_ch2),
+ [filter_state_ch1] "r" (filter_state_ch1),
+ [filter_state_ch2] "r" (filter_state_ch2),
+ [data_ch1] "r" (data_ch1), [data_ch2] "r" (data_ch2),
+ [length] "r" (length)
+ : "memory", "hi", "lo"
+ );
+}
+
+// WebRtcIsacfix_HighpassFilterFixDec32 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_HighpassFilterFixDec32C from filterbanks.c.
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state) {
+ int k;
+ int32_t a1, a2, b1, b2, in;
+ int32_t state0 = state[0];
+ int32_t state1 = state[1];
+
+ int32_t c0, c1, c2, c3;
+ int32_t c4, c5, c6, c7;
+ int32_t state0_lo, state0_hi;
+ int32_t state1_lo, state1_hi;
+ int32_t t0, t1, t2, t3, t4, t5;
+
+ __asm __volatile (
+ "lh %[c0], 0(%[coeff_ptr]) \n\t"
+ "lh %[c1], 2(%[coeff_ptr]) \n\t"
+ "lh %[c2], 4(%[coeff_ptr]) \n\t"
+ "lh %[c3], 6(%[coeff_ptr]) \n\t"
+ "sra %[state0_hi], %[state0], 16 \n\t"
+ "sra %[state1_hi], %[state1], 16 \n\t"
+ "andi %[state0_lo], %[state0], 0xFFFF \n\t"
+ "andi %[state1_lo], %[state1], 0xFFFF \n\t"
+ "lh %[c4], 8(%[coeff_ptr]) \n\t"
+ "lh %[c5], 10(%[coeff_ptr]) \n\t"
+ "lh %[c6], 12(%[coeff_ptr]) \n\t"
+ "lh %[c7], 14(%[coeff_ptr]) \n\t"
+ "sra %[state0_lo], %[state0_lo], 1 \n\t"
+ "sra %[state1_lo], %[state1_lo], 1 \n\t"
+ : [c0] "=&r" (c0), [c1] "=&r" (c1), [c2] "=&r" (c2), [c3] "=&r" (c3),
+ [c4] "=&r" (c4), [c5] "=&r" (c5), [c6] "=&r" (c6), [c7] "=&r" (c7),
+ [state0_hi] "=&r" (state0_hi), [state0_lo] "=&r" (state0_lo),
+ [state1_hi] "=&r" (state1_hi), [state1_lo] "=&r" (state1_lo)
+ : [coeff_ptr] "r" (coefficient), [state0] "r" (state0),
+ [state1] "r" (state1)
+ : "memory"
+ );
+
+ for (k = 0; k < len; k++) {
+ in = (int32_t)io[k];
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mul %[t2], %[c4], %[state0_lo] \n\t"
+ "mul %[t0], %[c5], %[state0_lo] \n\t"
+ "mul %[t1], %[c4], %[state0_hi] \n\t"
+ "mul %[a1], %[c5], %[state0_hi] \n\t"
+ "mul %[t5], %[c6], %[state1_lo] \n\t"
+ "mul %[t3], %[c7], %[state1_lo] \n\t"
+ "mul %[t4], %[c6], %[state1_hi] \n\t"
+ "mul %[b1], %[c7], %[state1_hi] \n\t"
+ "shra_r.w %[t2], %[t2], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[a1], %[a1], %[t0] \n\t"
+ "sra %[t1], %[t1], 16 \n\t"
+ "addu %[a1], %[a1], %[t1] \n\t"
+ "shra_r.w %[t5], %[t5], 15 \n\t"
+ "shra_r.w %[t3], %[t3], 15 \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[b1], %[b1], %[t3] \n\t"
+ "sra %[t4], %[t4], 16 \n\t"
+ "addu %[b1], %[b1], %[t4] \n\t"
+ "mul %[t2], %[c0], %[state0_lo] \n\t"
+ "mul %[t0], %[c1], %[state0_lo] \n\t"
+ "mul %[t1], %[c0], %[state0_hi] \n\t"
+ "mul %[a2], %[c1], %[state0_hi] \n\t"
+ "mul %[t5], %[c2], %[state1_lo] \n\t"
+ "mul %[t3], %[c3], %[state1_lo] \n\t"
+ "mul %[t4], %[c2], %[state1_hi] \n\t"
+ "mul %[b2], %[c3], %[state1_hi] \n\t"
+ "shra_r.w %[t2], %[t2], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[a2], %[a2], %[t0] \n\t"
+ "sra %[t1], %[t1], 16 \n\t"
+ "addu %[a2], %[a2], %[t1] \n\t"
+ "shra_r.w %[t5], %[t5], 15 \n\t"
+ "shra_r.w %[t3], %[t3], 15 \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[b2], %[b2], %[t3] \n\t"
+ "sra %[t4], %[t4], 16 \n\t"
+ "addu %[b2], %[b2], %[t4] \n\t"
+ "addu %[a1], %[a1], %[b1] \n\t"
+ "sra %[a1], %[a1], 7 \n\t"
+ "addu %[a1], %[a1], %[in] \n\t"
+ "sll %[t0], %[in], 2 \n\t"
+ "addu %[a2], %[a2], %[b2] \n\t"
+ "subu %[t0], %[t0], %[a2] \n\t"
+ "shll_s.w %[a1], %[a1], 16 \n\t"
+ "shll_s.w %[t0], %[t0], 2 \n\t"
+ "sra %[a1], %[a1], 16 \n\t"
+ "addu %[state1_hi], %[state0_hi], $0 \n\t"
+ "addu %[state1_lo], %[state0_lo], $0 \n\t"
+ "sra %[state0_hi], %[t0], 16 \n\t"
+ "andi %[state0_lo], %[t0], 0xFFFF \n\t"
+ "sra %[state0_lo], %[state0_lo], 1 \n\t"
+ ".set pop \n\t"
+ : [a1] "=&r" (a1), [b1] "=&r" (b1), [a2] "=&r" (a2), [b2] "=&r" (b2),
+ [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+ [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo),
+ [t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2),
+ [t3] "=&r" (t3), [t4] "=&r" (t4), [t5] "=&r" (t5)
+ : [c0] "r" (c0), [c1] "r" (c1), [c2] "r" (c2), [c3] "r" (c3),
+ [c4] "r" (c4), [c5] "r" (c5), [c6] "r" (c6), [c7] "r" (c7),
+ [in] "r" (in)
+ : "hi", "lo"
+ );
+ io[k] = (int16_t)a1;
+ }
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+#if !defined(MIPS_DSP_R2_LE)
+ "sll %[state0_hi], %[state0_hi], 16 \n\t"
+ "sll %[state0_lo], %[state0_lo], 1 \n\t"
+ "sll %[state1_hi], %[state1_hi], 16 \n\t"
+ "sll %[state1_lo], %[state1_lo], 1 \n\t"
+ "or %[state0_hi], %[state0_hi], %[state0_lo] \n\t"
+ "or %[state1_hi], %[state1_hi], %[state1_lo] \n\t"
+#else
+ "sll %[state0_lo], %[state0_lo], 1 \n\t"
+ "sll %[state1_lo], %[state1_lo], 1 \n\t"
+ "precr_sra.ph.w %[state0_hi], %[state0_lo], 0 \n\t"
+ "precr_sra.ph.w %[state1_hi], %[state1_lo], 0 \n\t"
+#endif
+ "sw %[state0_hi], 0(%[state]) \n\t"
+ "sw %[state1_hi], 4(%[state]) \n\t"
+ ".set pop \n\t"
+ : [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+ [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo)
+ : [state] "r" (state)
+ : "memory"
+ );
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c
new file mode 100644
index 0000000000..a31cea6001
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Contains a function for WebRtcIsacfix_AllpassFilter2FixDec16Neon()
+// in iSAC codec, optimized for ARM Neon platform. Bit exact with function
+// WebRtcIsacfix_AllpassFilter2FixDec16C() in filterbanks.c. Prototype
+// C code is at end of this file.
+
+#include <arm_neon.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+#include "rtc_base/checks.h"
+
+void WebRtcIsacfix_AllpassFilter2FixDec16Neon(
+ int16_t* data_ch1, // Input and output in channel 1, in Q0
+ int16_t* data_ch2, // Input and output in channel 2, in Q0
+ const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15
+ const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15
+ const int length, // Length of the data buffers
+ int32_t* filter_state_ch1, // Filter state for channel 1, in Q16
+ int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16
+ RTC_DCHECK_EQ(0, length % 2);
+ int n = 0;
+ int16x4_t factorv;
+ int16x4_t datav;
+ int32x4_t statev;
+
+ // Load factor_ch1 and factor_ch2.
+ factorv = vld1_dup_s16(factor_ch1);
+ factorv = vld1_lane_s16(factor_ch1 + 1, factorv, 1);
+ factorv = vld1_lane_s16(factor_ch2, factorv, 2);
+ factorv = vld1_lane_s16(factor_ch2 + 1, factorv, 3);
+
+ // Load filter_state_ch1[0] and filter_state_ch2[0].
+ statev = vld1q_dup_s32(filter_state_ch1);
+ statev = vld1q_lane_s32(filter_state_ch2, statev, 2);
+
+ // Loop unrolling preprocessing.
+ int32x4_t a;
+ int16x4_t tmp1, tmp2;
+
+ // Load data_ch1[0] and data_ch2[0].
+ datav = vld1_dup_s16(data_ch1);
+ datav = vld1_lane_s16(data_ch2, datav, 2);
+
+ a = vqdmlal_s16(statev, datav, factorv);
+ tmp1 = vshrn_n_s32(a, 16);
+
+ // Update filter_state_ch1[0] and filter_state_ch2[0].
+ statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
+
+ // Load filter_state_ch1[1] and filter_state_ch2[1].
+ statev = vld1q_lane_s32(filter_state_ch1 + 1, statev, 1);
+ statev = vld1q_lane_s32(filter_state_ch2 + 1, statev, 3);
+
+ // Load data_ch1[1] and data_ch2[1].
+ tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1);
+ tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3);
+ datav = vrev32_s16(tmp1);
+
+ // Loop unrolling processing.
+ for (n = 0; n < length - 2; n += 2) {
+ a = vqdmlal_s16(statev, datav, factorv);
+ tmp1 = vshrn_n_s32(a, 16);
+ // Store data_ch1[n] and data_ch2[n].
+ vst1_lane_s16(data_ch1 + n, tmp1, 1);
+ vst1_lane_s16(data_ch2 + n, tmp1, 3);
+
+ // Update filter_state_ch1[0], filter_state_ch1[1]
+ // and filter_state_ch2[0], filter_state_ch2[1].
+ statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
+
+ // Load data_ch1[n + 2] and data_ch2[n + 2].
+ tmp1 = vld1_lane_s16(data_ch1 + n + 2, tmp1, 1);
+ tmp1 = vld1_lane_s16(data_ch2 + n + 2, tmp1, 3);
+ datav = vrev32_s16(tmp1);
+
+ a = vqdmlal_s16(statev, datav, factorv);
+ tmp2 = vshrn_n_s32(a, 16);
+ // Store data_ch1[n + 1] and data_ch2[n + 1].
+ vst1_lane_s16(data_ch1 + n + 1, tmp2, 1);
+ vst1_lane_s16(data_ch2 + n + 1, tmp2, 3);
+
+ // Update filter_state_ch1[0], filter_state_ch1[1]
+ // and filter_state_ch2[0], filter_state_ch2[1].
+ statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv);
+
+ // Load data_ch1[n + 3] and data_ch2[n + 3].
+ tmp2 = vld1_lane_s16(data_ch1 + n + 3, tmp2, 1);
+ tmp2 = vld1_lane_s16(data_ch2 + n + 3, tmp2, 3);
+ datav = vrev32_s16(tmp2);
+ }
+
+ // Loop unrolling post-processing.
+ a = vqdmlal_s16(statev, datav, factorv);
+ tmp1 = vshrn_n_s32(a, 16);
+ // Store data_ch1[n] and data_ch2[n].
+ vst1_lane_s16(data_ch1 + n, tmp1, 1);
+ vst1_lane_s16(data_ch2 + n, tmp1, 3);
+
+ // Update filter_state_ch1[0], filter_state_ch1[1]
+ // and filter_state_ch2[0], filter_state_ch2[1].
+ statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
+ // Store filter_state_ch1[0] and filter_state_ch2[0].
+ vst1q_lane_s32(filter_state_ch1, statev, 0);
+ vst1q_lane_s32(filter_state_ch2, statev, 2);
+
+ datav = vrev32_s16(tmp1);
+ a = vqdmlal_s16(statev, datav, factorv);
+ tmp2 = vshrn_n_s32(a, 16);
+ // Store data_ch1[n + 1] and data_ch2[n + 1].
+ vst1_lane_s16(data_ch1 + n + 1, tmp2, 1);
+ vst1_lane_s16(data_ch2 + n + 1, tmp2, 3);
+
+ // Update filter_state_ch1[1] and filter_state_ch2[1].
+ statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv);
+ // Store filter_state_ch1[1] and filter_state_ch2[1].
+ vst1q_lane_s32(filter_state_ch1 + 1, statev, 1);
+ vst1q_lane_s32(filter_state_ch2 + 1, statev, 3);
+}
+
+// This function is the prototype for above neon optimized function.
+//void AllpassFilter2FixDec16BothChannels(
+// int16_t *data_ch1, // Input and output in channel 1, in Q0
+// int16_t *data_ch2, // Input and output in channel 2, in Q0
+// const int16_t *factor_ch1, // Scaling factor for channel 1, in Q15
+// const int16_t *factor_ch2, // Scaling factor for channel 2, in Q15
+// const int length, // Length of the data buffers
+// int32_t *filter_state_ch1, // Filter state for channel 1, in Q16
+// int32_t *filter_state_ch2) { // Filter state for channel 2, in Q16
+// int n = 0;
+// int32_t state0_ch1 = filter_state_ch1[0], state1_ch1 = filter_state_ch1[1];
+// int32_t state0_ch2 = filter_state_ch2[0], state1_ch2 = filter_state_ch2[1];
+// int16_t sample0_ch1 = 0, sample0_ch2 = 0;
+// int16_t sample1_ch1 = 0, sample1_ch2 = 0;
+// int32_t a0_ch1 = 0, a0_ch2 = 0;
+// int32_t b0_ch1 = 0, b0_ch2 = 0;
+//
+// int32_t a1_ch1 = 0, a1_ch2 = 0;
+// int32_t b1_ch1 = 0, b1_ch2 = 0;
+// int32_t b2_ch1 = 0, b2_ch2 = 0;
+//
+// // Loop unrolling preprocessing.
+//
+// sample0_ch1 = data_ch1[n];
+// sample0_ch2 = data_ch2[n];
+//
+// a0_ch1 = (factor_ch1[0] * sample0_ch1) << 1;
+// a0_ch2 = (factor_ch2[0] * sample0_ch2) << 1;
+//
+// b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state0_ch1);
+// b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state0_ch2); //Q16+Q16=Q16
+//
+// a0_ch1 = -factor_ch1[0] * (int16_t)(b0_ch1 >> 16);
+// a0_ch2 = -factor_ch2[0] * (int16_t)(b0_ch2 >> 16);
+//
+// state0_ch1 = WebRtcSpl_AddSatW32(a0_ch1 <<1, (uint32_t)sample0_ch1 << 16);
+// state0_ch2 = WebRtcSpl_AddSatW32(a0_ch2 <<1, (uint32_t)sample0_ch2 << 16);
+//
+// sample1_ch1 = data_ch1[n + 1];
+// sample0_ch1 = (int16_t) (b0_ch1 >> 16); //Save as Q0
+// sample1_ch2 = data_ch2[n + 1];
+// sample0_ch2 = (int16_t) (b0_ch2 >> 16); //Save as Q0
+//
+//
+// for (n = 0; n < length - 2; n += 2) {
+// a1_ch1 = (factor_ch1[0] * sample1_ch1) << 1;
+// a0_ch1 = (factor_ch1[1] * sample0_ch1) << 1;
+// a1_ch2 = (factor_ch2[0] * sample1_ch2) << 1;
+// a0_ch2 = (factor_ch2[1] * sample0_ch2) << 1;
+//
+// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state0_ch1);
+// b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state1_ch1); //Q16+Q16=Q16
+// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state0_ch2); //Q16+Q16=Q16
+// b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state1_ch2); //Q16+Q16=Q16
+//
+// a1_ch1 = -factor_ch1[0] * (int16_t)(b1_ch1 >> 16);
+// a0_ch1 = -factor_ch1[1] * (int16_t)(b0_ch1 >> 16);
+// a1_ch2 = -factor_ch2[0] * (int16_t)(b1_ch2 >> 16);
+// a0_ch2 = -factor_ch2[1] * (int16_t)(b0_ch2 >> 16);
+//
+// state0_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1 <<16);
+// state1_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1 <<16);
+// state0_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2 <<16);
+// state1_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2 <<16);
+//
+// sample0_ch1 = data_ch1[n + 2];
+// sample1_ch1 = (int16_t) (b1_ch1 >> 16); //Save as Q0
+// sample0_ch2 = data_ch2[n + 2];
+// sample1_ch2 = (int16_t) (b1_ch2 >> 16); //Save as Q0
+//
+// a0_ch1 = (factor_ch1[0] * sample0_ch1) << 1;
+// a1_ch1 = (factor_ch1[1] * sample1_ch1) << 1;
+// a0_ch2 = (factor_ch2[0] * sample0_ch2) << 1;
+// a1_ch2 = (factor_ch2[1] * sample1_ch2) << 1;
+//
+// b2_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state0_ch1);
+// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state1_ch1); //Q16+Q16=Q16
+// b2_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state0_ch2); //Q16+Q16=Q16
+// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state1_ch2); //Q16+Q16=Q16
+//
+// a0_ch1 = -factor_ch1[0] * (int16_t)(b2_ch1 >> 16);
+// a1_ch1 = -factor_ch1[1] * (int16_t)(b1_ch1 >> 16);
+// a0_ch2 = -factor_ch2[0] * (int16_t)(b2_ch2 >> 16);
+// a1_ch2 = -factor_ch2[1] * (int16_t)(b1_ch2 >> 16);
+//
+// state0_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1<<16);
+// state1_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1<<16);
+// state0_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2<<16);
+// state1_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2<<16);
+//
+//
+// sample1_ch1 = data_ch1[n + 3];
+// sample0_ch1 = (int16_t) (b2_ch1 >> 16); //Save as Q0
+// sample1_ch2 = data_ch2[n + 3];
+// sample0_ch2 = (int16_t) (b2_ch2 >> 16); //Save as Q0
+//
+// data_ch1[n] = (int16_t) (b0_ch1 >> 16); //Save as Q0
+// data_ch1[n + 1] = (int16_t) (b1_ch1 >> 16); //Save as Q0
+// data_ch2[n] = (int16_t) (b0_ch2 >> 16);
+// data_ch2[n + 1] = (int16_t) (b1_ch2 >> 16);
+// }
+//
+// // Loop unrolling post-processing.
+//
+// a1_ch1 = (factor_ch1[0] * sample1_ch1) << 1;
+// a0_ch1 = (factor_ch1[1] * sample0_ch1) << 1;
+// a1_ch2 = (factor_ch2[0] * sample1_ch2) << 1;
+// a0_ch2 = (factor_ch2[1] * sample0_ch2) << 1;
+//
+// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state0_ch1);
+// b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state1_ch1);
+// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state0_ch2);
+// b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state1_ch2);
+//
+// a1_ch1 = -factor_ch1[0] * (int16_t)(b1_ch1 >> 16);
+// a0_ch1 = -factor_ch1[1] * (int16_t)(b0_ch1 >> 16);
+// a1_ch2 = -factor_ch2[0] * (int16_t)(b1_ch2 >> 16);
+// a0_ch2 = -factor_ch2[1] * (int16_t)(b0_ch2 >> 16);
+//
+// state0_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1 << 16);
+// state1_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1 << 16);
+// state0_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2 << 16);
+// state1_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2 << 16);
+//
+// data_ch1[n] = (int16_t) (b0_ch1 >> 16); //Save as Q0
+// data_ch2[n] = (int16_t) (b0_ch2 >> 16);
+//
+// sample1_ch1 = (int16_t) (b1_ch1 >> 16); //Save as Q0
+// sample1_ch2 = (int16_t) (b1_ch2 >> 16); //Save as Q0
+//
+// a1_ch1 = (factor_ch1[1] * sample1_ch1) << 1;
+// a1_ch2 = (factor_ch2[1] * sample1_ch2) << 1;
+//
+// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state1_ch1); //Q16+Q16=Q16
+// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state1_ch2); //Q16+Q16=Q16
+//
+// a1_ch1 = -factor_ch1[1] * (int16_t)(b1_ch1 >> 16);
+// a1_ch2 = -factor_ch2[1] * (int16_t)(b1_ch2 >> 16);
+//
+// state1_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1<<16);
+// state1_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2<<16);
+//
+// data_ch1[n + 1] = (int16_t) (b1_ch1 >> 16); //Save as Q0
+// data_ch2[n + 1] = (int16_t) (b1_ch2 >> 16);
+//
+// filter_state_ch1[0] = state0_ch1;
+// filter_state_ch1[1] = state1_ch1;
+// filter_state_ch2[0] = state0_ch2;
+// filter_state_ch2[1] = state1_ch2;
+//}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
new file mode 100644
index 0000000000..4a3db2324a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/sanitizer.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+class FilterBanksTest : public ::testing::Test {
+ protected:
+ // Pass a function pointer to the Tester function.
+ void RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/5513
+ CalculateResidualEnergyTester(
+ AllpassFilter2FixDec16 AllpassFilter2FixDec16Function) {
+ const int kSamples = QLOOKAHEAD;
+ const int kState = 2;
+ int16_t data_ch1[kSamples] = {0};
+ int16_t data_ch2[kSamples] = {0};
+ int32_t state_ch1[kState] = {0};
+ int32_t state_ch2[kState] = {0};
+ const int32_t out_state_ch1[kState] = {-809122714, 1645972152};
+ const int32_t out_state_ch2[kState] = {428019288, 1057309936};
+ const int32_t out_data_ch1[kSamples] = {
+ 0, 0, 347, 10618, 16718, -7089, 32767, 16913,
+ 27042, 8377, -22973, -28372, -27603, -14804, 398, -25332,
+ -11200, 18044, 25223, -6839, 1116, -23984, 32717, 7364};
+ const int32_t out_data_ch2[kSamples] = {
+ 0, 0, 3010, 22351, 21106, 16969, -2095, -664,
+ 3513, -30980, 32767, -23839, 13335, 20289, -6831, 339,
+ -17207, 32767, 4959, 6177, 32767, 16599, -4747, 20504};
+ int sign = 1;
+
+ for (int i = 0; i < kSamples; i++) {
+ sign *= -1;
+ data_ch1[i] = sign * WEBRTC_SPL_WORD32_MAX / (i * i + 1);
+ data_ch2[i] = sign * WEBRTC_SPL_WORD32_MIN / (i * i + 1);
+ // UBSan: -1 * -2147483648 cannot be represented in type 'int'
+ };
+
+ AllpassFilter2FixDec16Function(
+ data_ch1, data_ch2, WebRtcIsacfix_kUpperApFactorsQ15,
+ WebRtcIsacfix_kLowerApFactorsQ15, kSamples, state_ch1, state_ch2);
+
+ for (int i = 0; i < kSamples; i++) {
+ EXPECT_EQ(out_data_ch1[i], data_ch1[i]);
+ EXPECT_EQ(out_data_ch2[i], data_ch2[i]);
+ }
+ for (int i = 0; i < kState; i++) {
+ EXPECT_EQ(out_state_ch1[i], state_ch1[i]);
+ EXPECT_EQ(out_state_ch2[i], state_ch2[i]);
+ }
+ }
+};
+
+TEST_F(FilterBanksTest, AllpassFilter2FixDec16Test) {
+ CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16C);
+#if defined(WEBRTC_HAS_NEON)
+ CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16Neon);
+#endif
+}
+
+TEST_F(FilterBanksTest, HighpassFilterFixDec32Test) {
+ const int kSamples = 20;
+ int16_t in[kSamples];
+ int32_t state[2] = {12345, 987654};
+#ifdef WEBRTC_ARCH_ARM_V7
+ int32_t out[kSamples] = {-1040, -1035, -22875, -1397, -27604, 20018, 7917,
+ -1279, -8552, -14494, -7558, -23537, -27258, -30554,
+ -32768, -3432, -32768, 25215, -27536, 22436};
+#else
+ int32_t out[kSamples] = {-1040, -1035, -22875, -1397, -27604, 20017, 7915,
+ -1280, -8554, -14496, -7561, -23541, -27263, -30560,
+ -32768, -3441, -32768, 25203, -27550, 22419};
+#endif
+ HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+#if defined(MIPS_DSP_R1_LE)
+ WebRtcIsacfix_HighpassFilterFixDec32 =
+ WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#else
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+#endif
+
+ for (int i = 0; i < kSamples; i++) {
+ in[i] = WEBRTC_SPL_WORD32_MAX / (i + 1);
+ }
+
+ WebRtcIsacfix_HighpassFilterFixDec32(in, kSamples,
+ WebRtcIsacfix_kHPStCoeffOut1Q30, state);
+
+ for (int i = 0; i < kSamples; i++) {
+ EXPECT_EQ(out[i], in[i]);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters.c
new file mode 100644
index 0000000000..838ba4b3e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+// Autocorrelation function in fixed point.
+// NOTE! Different from SPLIB-version in how it scales the signal.
+int WebRtcIsacfix_AutocorrC(int32_t* __restrict r,
+ const int16_t* __restrict x,
+ int16_t N,
+ int16_t order,
+ int16_t* __restrict scale) {
+ int i = 0;
+ int j = 0;
+ int16_t scaling = 0;
+ int32_t sum = 0;
+ uint32_t temp = 0;
+ int64_t prod = 0;
+
+ // The ARM assembly code assumptoins.
+ RTC_DCHECK_EQ(0, N % 4);
+ RTC_DCHECK_GE(N, 8);
+
+ // Calculate r[0].
+ for (i = 0; i < N; i++) {
+ prod += x[i] * x[i];
+ }
+
+ // Calculate scaling (the value of shifting).
+ temp = (uint32_t)(prod >> 31);
+ if(temp == 0) {
+ scaling = 0;
+ } else {
+ scaling = 32 - WebRtcSpl_NormU32(temp);
+ }
+ r[0] = (int32_t)(prod >> scaling);
+
+ // Perform the actual correlation calculation.
+ for (i = 1; i < order + 1; i++) {
+ prod = 0;
+ for (j = 0; j < N - i; j++) {
+ prod += x[j] * x[i + j];
+ }
+ sum = (int32_t)(prod >> scaling);
+ r[i] = sum;
+ }
+
+ *scale = scaling;
+
+ return(order + 1);
+}
+
+static const int32_t kApUpperQ15[ALLPASSSECTIONS] = { 1137, 12537 };
+static const int32_t kApLowerQ15[ALLPASSSECTIONS] = { 5059, 24379 };
+
+
+static void AllpassFilterForDec32(int16_t *InOut16, //Q0
+ const int32_t *APSectionFactors, //Q15
+ int16_t lengthInOut,
+ int32_t *FilterState) //Q16
+{
+ int n, j;
+ int32_t a, b;
+
+ for (j=0; j<ALLPASSSECTIONS; j++) {
+ for (n=0;n<lengthInOut;n+=2){
+ a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15
+ a <<= 1; // Q15 -> Q16
+ b = WebRtcSpl_AddSatW32(a, FilterState[j]); //Q16+Q16=Q16
+ // `a` in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
+ a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]);
+ // FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16
+ FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16);
+ InOut16[n] = (int16_t)(b >> 16); // Save as Q0.
+ }
+ }
+}
+
+
+
+
+void WebRtcIsacfix_DecimateAllpass32(const int16_t *in,
+ int32_t *state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
+ int16_t N, /* number of input samples */
+ int16_t *out) /* array of size N/2 */
+{
+ int n;
+ int16_t data_vec[PITCH_FRAME_LEN];
+
+ /* copy input */
+ memcpy(data_vec + 1, in, sizeof(int16_t) * (N - 1));
+
+ data_vec[0] = (int16_t)(state_in[2 * ALLPASSSECTIONS] >> 16); // z^-1 state.
+ state_in[2 * ALLPASSSECTIONS] = (uint32_t)in[N - 1] << 16;
+
+
+
+ AllpassFilterForDec32(data_vec+1, kApUpperQ15, N, state_in);
+ AllpassFilterForDec32(data_vec, kApLowerQ15, N, state_in+ALLPASSSECTIONS);
+
+ for (n=0;n<N/2;n++) {
+ out[n] = WebRtcSpl_AddSatW16(data_vec[2 * n], data_vec[2 * n + 1]);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_mips.c
new file mode 100644
index 0000000000..ded3d03209
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_mips.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+// MIPS optimized implementation of the Autocorrelation function in fixed point.
+// NOTE! Different from SPLIB-version in how it scales the signal.
+int WebRtcIsacfix_AutocorrMIPS(int32_t* __restrict r,
+ const int16_t* __restrict x,
+ int16_t N,
+ int16_t order,
+ int16_t* __restrict scale) {
+ int i = 0;
+ int16_t scaling = 0;
+ int16_t* in = (int16_t*)x;
+ int loop_size = (int)(N >> 3);
+ int count = (int)(N & 7);
+ // Declare temporary variables used as registry values.
+ int32_t r0, r1, r2, r3;
+#if !defined(MIPS_DSP_R2_LE)
+ // For non-DSPR2 optimizations 4 more registers are used.
+ int32_t r4, r5, r6, r7;
+#endif
+
+ // Calculate r[0] and scaling needed.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mult $0, $0 \n\t"
+ // Loop is unrolled 8 times, set accumulator to zero in branch delay slot.
+ "beqz %[loop_size], 2f \n\t"
+ " mult $0, $0 \n\t"
+ "1: \n\t"
+ // Load 8 samples per loop iteration.
+#if defined(MIPS_DSP_R2_LE)
+ "ulw %[r0], 0(%[in]) \n\t"
+ "ulw %[r1], 4(%[in]) \n\t"
+ "ulw %[r2], 8(%[in]) \n\t"
+ "ulw %[r3], 12(%[in]) \n\t"
+#else
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 2(%[in]) \n\t"
+ "lh %[r2], 4(%[in]) \n\t"
+ "lh %[r3], 6(%[in]) \n\t"
+ "lh %[r4], 8(%[in]) \n\t"
+ "lh %[r5], 10(%[in]) \n\t"
+ "lh %[r6], 12(%[in]) \n\t"
+ "lh %[r7], 14(%[in]) \n\t"
+#endif
+ "addiu %[loop_size], %[loop_size], -1 \n\t"
+ // Multiply and accumulate.
+#if defined(MIPS_DSP_R2_LE)
+ "dpa.w.ph $ac0, %[r0], %[r0] \n\t"
+ "dpa.w.ph $ac0, %[r1], %[r1] \n\t"
+ "dpa.w.ph $ac0, %[r2], %[r2] \n\t"
+ "dpa.w.ph $ac0, %[r3], %[r3] \n\t"
+#else
+ "madd %[r0], %[r0] \n\t"
+ "madd %[r1], %[r1] \n\t"
+ "madd %[r2], %[r2] \n\t"
+ "madd %[r3], %[r3] \n\t"
+ "madd %[r4], %[r4] \n\t"
+ "madd %[r5], %[r5] \n\t"
+ "madd %[r6], %[r6] \n\t"
+ "madd %[r7], %[r7] \n\t"
+#endif
+ "bnez %[loop_size], 1b \n\t"
+ " addiu %[in], %[in], 16 \n\t"
+ "2: \n\t"
+ "beqz %[count], 4f \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ " extr.w %[r0], $ac0, 31 \n\t"
+#else
+ " mfhi %[r2] \n\t"
+#endif
+ // Process remaining samples (if any).
+ "3: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "madd %[r0], %[r0] \n\t"
+ "bnez %[count], 3b \n\t"
+ " addiu %[in], %[in], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "extr.w %[r0], $ac0, 31 \n\t"
+#else
+ "mfhi %[r2] \n\t"
+#endif
+ "4: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "mflo %[r3] \n\t"
+ "sll %[r0], %[r2], 1 \n\t"
+ "srl %[r1], %[r3], 31 \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+#endif
+ // Calculate scaling (the value of shifting).
+ "clz %[r1], %[r0] \n\t"
+ "addiu %[r1], %[r1], -32 \n\t"
+ "subu %[scaling], $0, %[r1] \n\t"
+ "slti %[r1], %[r0], 0x1 \n\t"
+ "movn %[scaling], $0, %[r1] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "extrv.w %[r0], $ac0, %[scaling] \n\t"
+ "mfhi %[r2], $ac0 \n\t"
+#else
+ "addiu %[r1], %[scaling], -32 \n\t"
+ "subu %[r1], $0, %[r1] \n\t"
+ "sllv %[r1], %[r2], %[r1] \n\t"
+ "srlv %[r0], %[r3], %[scaling] \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+#endif
+ "slti %[r1], %[scaling], 32 \n\t"
+ "movz %[r0], %[r2], %[r1] \n\t"
+ ".set pop \n\t"
+ : [loop_size] "+r" (loop_size), [in] "+r" (in), [r0] "=&r" (r0),
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+#if !defined(MIPS_DSP_R2_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+ [count] "+r" (count), [scaling] "=r" (scaling)
+ : [N] "r" (N)
+ : "memory", "hi", "lo"
+ );
+ r[0] = r0;
+
+ // Correlation calculation is divided in 3 cases depending on the scaling
+ // value (different accumulator manipulation needed). Three slightly different
+ // loops are written in order to avoid branches inside the loop.
+ if (scaling == 0) {
+ // In this case, the result will be in low part of the accumulator.
+ for (i = 1; i < order + 1; i++) {
+ in = (int16_t*)x;
+ int16_t* in1 = (int16_t*)x + i;
+ count = N - i;
+ loop_size = (count) >> 2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mult $0, $0 \n\t"
+ "beqz %[loop_size], 2f \n\t"
+ " andi %[count], %[count], 0x3 \n\t"
+ // Loop processing 4 pairs of samples per iteration.
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "ulw %[r0], 0(%[in]) \n\t"
+ "ulw %[r1], 0(%[in1]) \n\t"
+ "ulw %[r2], 4(%[in]) \n\t"
+ "ulw %[r3], 4(%[in1]) \n\t"
+#else
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 0(%[in1]) \n\t"
+ "lh %[r2], 2(%[in]) \n\t"
+ "lh %[r3], 2(%[in1]) \n\t"
+ "lh %[r4], 4(%[in]) \n\t"
+ "lh %[r5], 4(%[in1]) \n\t"
+ "lh %[r6], 6(%[in]) \n\t"
+ "lh %[r7], 6(%[in1]) \n\t"
+#endif
+ "addiu %[loop_size], %[loop_size], -1 \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "dpa.w.ph $ac0, %[r0], %[r1] \n\t"
+ "dpa.w.ph $ac0, %[r2], %[r3] \n\t"
+#else
+ "madd %[r0], %[r1] \n\t"
+ "madd %[r2], %[r3] \n\t"
+ "madd %[r4], %[r5] \n\t"
+ "madd %[r6], %[r7] \n\t"
+#endif
+ "addiu %[in], %[in], 8 \n\t"
+ "bnez %[loop_size], 1b \n\t"
+ " addiu %[in1], %[in1], 8 \n\t"
+ "2: \n\t"
+ "beqz %[count], 4f \n\t"
+ " mflo %[r0] \n\t"
+ // Process remaining samples (if any).
+ "3: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 0(%[in1]) \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "addiu %[in], %[in], 2 \n\t"
+ "madd %[r0], %[r1] \n\t"
+ "bnez %[count], 3b \n\t"
+ " addiu %[in1], %[in1], 2 \n\t"
+ "mflo %[r0] \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
+#if !defined(MIPS_DSP_R2_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [count] "+r" (count)
+ :
+ : "memory", "hi", "lo"
+ );
+ r[i] = r0;
+ }
+ } else if (scaling == 32) {
+ // In this case, the result will be high part of the accumulator.
+ for (i = 1; i < order + 1; i++) {
+ in = (int16_t*)x;
+ int16_t* in1 = (int16_t*)x + i;
+ count = N - i;
+ loop_size = (count) >> 2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mult $0, $0 \n\t"
+ "beqz %[loop_size], 2f \n\t"
+ " andi %[count], %[count], 0x3 \n\t"
+ // Loop processing 4 pairs of samples per iteration.
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "ulw %[r0], 0(%[in]) \n\t"
+ "ulw %[r1], 0(%[in1]) \n\t"
+ "ulw %[r2], 4(%[in]) \n\t"
+ "ulw %[r3], 4(%[in1]) \n\t"
+#else
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 0(%[in1]) \n\t"
+ "lh %[r2], 2(%[in]) \n\t"
+ "lh %[r3], 2(%[in1]) \n\t"
+ "lh %[r4], 4(%[in]) \n\t"
+ "lh %[r5], 4(%[in1]) \n\t"
+ "lh %[r6], 6(%[in]) \n\t"
+ "lh %[r7], 6(%[in1]) \n\t"
+#endif
+ "addiu %[loop_size], %[loop_size], -1 \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "dpa.w.ph $ac0, %[r0], %[r1] \n\t"
+ "dpa.w.ph $ac0, %[r2], %[r3] \n\t"
+#else
+ "madd %[r0], %[r1] \n\t"
+ "madd %[r2], %[r3] \n\t"
+ "madd %[r4], %[r5] \n\t"
+ "madd %[r6], %[r7] \n\t"
+#endif
+ "addiu %[in], %[in], 8 \n\t"
+ "bnez %[loop_size], 1b \n\t"
+ " addiu %[in1], %[in1], 8 \n\t"
+ "2: \n\t"
+ "beqz %[count], 4f \n\t"
+ " mfhi %[r0] \n\t"
+ // Process remaining samples (if any).
+ "3: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 0(%[in1]) \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "addiu %[in], %[in], 2 \n\t"
+ "madd %[r0], %[r1] \n\t"
+ "bnez %[count], 3b \n\t"
+ " addiu %[in1], %[in1], 2 \n\t"
+ "mfhi %[r0] \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
+#if !defined(MIPS_DSP_R2_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [count] "+r" (count)
+ :
+ : "memory", "hi", "lo"
+ );
+ r[i] = r0;
+ }
+ } else {
+ // In this case, the result is obtained by combining low and high parts
+ // of the accumulator.
+#if !defined(MIPS_DSP_R1_LE)
+ int32_t tmp_shift = 32 - scaling;
+#endif
+ for (i = 1; i < order + 1; i++) {
+ in = (int16_t*)x;
+ int16_t* in1 = (int16_t*)x + i;
+ count = N - i;
+ loop_size = (count) >> 2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mult $0, $0 \n\t"
+ "beqz %[loop_size], 2f \n\t"
+ " andi %[count], %[count], 0x3 \n\t"
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "ulw %[r0], 0(%[in]) \n\t"
+ "ulw %[r1], 0(%[in1]) \n\t"
+ "ulw %[r2], 4(%[in]) \n\t"
+ "ulw %[r3], 4(%[in1]) \n\t"
+#else
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 0(%[in1]) \n\t"
+ "lh %[r2], 2(%[in]) \n\t"
+ "lh %[r3], 2(%[in1]) \n\t"
+ "lh %[r4], 4(%[in]) \n\t"
+ "lh %[r5], 4(%[in1]) \n\t"
+ "lh %[r6], 6(%[in]) \n\t"
+ "lh %[r7], 6(%[in1]) \n\t"
+#endif
+ "addiu %[loop_size], %[loop_size], -1 \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "dpa.w.ph $ac0, %[r0], %[r1] \n\t"
+ "dpa.w.ph $ac0, %[r2], %[r3] \n\t"
+#else
+ "madd %[r0], %[r1] \n\t"
+ "madd %[r2], %[r3] \n\t"
+ "madd %[r4], %[r5] \n\t"
+ "madd %[r6], %[r7] \n\t"
+#endif
+ "addiu %[in], %[in], 8 \n\t"
+ "bnez %[loop_size], 1b \n\t"
+ " addiu %[in1], %[in1], 8 \n\t"
+ "2: \n\t"
+ "beqz %[count], 4f \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ " extrv.w %[r0], $ac0, %[scaling] \n\t"
+#else
+ " mfhi %[r0] \n\t"
+#endif
+ "3: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 0(%[in1]) \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "addiu %[in], %[in], 2 \n\t"
+ "madd %[r0], %[r1] \n\t"
+ "bnez %[count], 3b \n\t"
+ " addiu %[in1], %[in1], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "extrv.w %[r0], $ac0, %[scaling] \n\t"
+#else
+ "mfhi %[r0] \n\t"
+#endif
+ "4: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "mflo %[r1] \n\t"
+ "sllv %[r0], %[r0], %[tmp_shift] \n\t"
+ "srlv %[r1], %[r1], %[scaling] \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+#endif
+ ".set pop \n\t"
+ : [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
+#if !defined(MIPS_DSP_R2_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [count] "+r" (count)
+ : [scaling] "r" (scaling)
+#if !defined(MIPS_DSP_R1_LE)
+ , [tmp_shift] "r" (tmp_shift)
+#endif
+ : "memory", "hi", "lo"
+ );
+ r[i] = r0;
+ }
+ }
+ *scale = scaling;
+
+ return (order + 1);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_neon.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_neon.c
new file mode 100644
index 0000000000..1734a969cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_neon.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+// Autocorrelation function in fixed point.
+// NOTE! Different from SPLIB-version in how it scales the signal.
+int WebRtcIsacfix_AutocorrNeon(int32_t* __restrict r,
+ const int16_t* x,
+ int16_t n,
+ int16_t order,
+ int16_t* __restrict scale) {
+ int i = 0;
+ int16_t scaling = 0;
+ uint32_t temp = 0;
+ int64_t prod = 0;
+ int64_t prod_tail = 0;
+
+ RTC_DCHECK_EQ(0, n % 4);
+ RTC_DCHECK_GE(n, 8);
+
+ // Calculate r[0].
+ int16x4_t x0_v;
+ int32x4_t tmpa0_v;
+ int64x2_t tmpb_v;
+
+ tmpb_v = vdupq_n_s64(0);
+ const int16_t* x_start = x;
+ const int16_t* x_end0 = x_start + n;
+ while (x_start < x_end0) {
+ x0_v = vld1_s16(x_start);
+ tmpa0_v = vmull_s16(x0_v, x0_v);
+ tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
+ x_start += 4;
+ }
+
+#ifdef WEBRTC_ARCH_ARM64
+ prod = vaddvq_s64(tmpb_v);
+#else
+ prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)),
+ 0);
+#endif
+ // Calculate scaling (the value of shifting).
+ temp = (uint32_t)(prod >> 31);
+
+ scaling = temp ? 32 - WebRtcSpl_NormU32(temp) : 0;
+ r[0] = (int32_t)(prod >> scaling);
+
+ int16x8_t x1_v;
+ int16x8_t y_v;
+ int32x4_t tmpa1_v;
+ // Perform the actual correlation calculation.
+ for (i = 1; i < order + 1; i++) {
+ tmpb_v = vdupq_n_s64(0);
+ int rest = (n - i) % 8;
+ x_start = x;
+ x_end0 = x_start + n - i - rest;
+ const int16_t* y_start = x_start + i;
+ while (x_start < x_end0) {
+ x1_v = vld1q_s16(x_start);
+ y_v = vld1q_s16(y_start);
+ tmpa0_v = vmull_s16(vget_low_s16(x1_v), vget_low_s16(y_v));
+#ifdef WEBRTC_ARCH_ARM64
+ tmpa1_v = vmull_high_s16(x1_v, y_v);
+#else
+ tmpa1_v = vmull_s16(vget_high_s16(x1_v), vget_high_s16(y_v));
+#endif
+ tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
+ tmpb_v = vpadalq_s32(tmpb_v, tmpa1_v);
+ x_start += 8;
+ y_start += 8;
+ }
+ // The remaining calculation.
+ const int16_t* x_end1 = x + n - i;
+ if (rest >= 4) {
+ int16x4_t x2_v = vld1_s16(x_start);
+ int16x4_t y2_v = vld1_s16(y_start);
+ tmpa0_v = vmull_s16(x2_v, y2_v);
+ tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
+ x_start += 4;
+ y_start += 4;
+ }
+#ifdef WEBRTC_ARCH_ARM64
+ prod = vaddvq_s64(tmpb_v);
+#else
+ prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)),
+ 0);
+#endif
+
+ prod_tail = 0;
+ while (x_start < x_end1) {
+ prod_tail += *x_start * *y_start;
+ ++x_start;
+ ++y_start;
+ }
+
+ r[i] = (int32_t)((prod + prod_tail) >> scaling);
+ }
+
+ *scale = scaling;
+
+ return order + 1;
+}
+
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc
new file mode 100644
index 0000000000..192ef89f9f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+class FiltersTest : public ::testing::Test {
+ protected:
+ // Pass a function pointer to the Tester function.
+ void FiltersTester(AutocorrFix WebRtcIsacfix_AutocorrFixFunction) {
+ const int kOrder = 12;
+ const int kBuffer = 40;
+ int16_t scale = 0;
+ int32_t r_buffer[kOrder + 2] = {0};
+
+ // Test an overflow case.
+ const int16_t x_buffer_0[kBuffer] = {
+ 0, 0, 3010, 22351, 21106, 16969, -2095, -664,
+ 3513, -30980, 32767, -23839, 13335, 20289, -6831, 339,
+ -17207, 32767, 4959, 6177, 32767, 16599, -4747, 20504,
+ 3513, -30980, 32767, -23839, 13335, 20289, 0, -16969,
+ -2095, -664, 3513, 31981, 32767, -13839, 23336, 30281};
+ const int32_t r_expected_0[kOrder + 2] = {
+ 1872498461, -224288754, 203789985, 483400487, -208272635,
+ 2436500, 137785322, 266600814, -208486262, 329510080,
+ 137949184, -161738972, -26894267, 237630192};
+
+ WebRtcIsacfix_AutocorrFixFunction(r_buffer, x_buffer_0, kBuffer, kOrder + 1,
+ &scale);
+ for (int i = 0; i < kOrder + 2; i++) {
+ EXPECT_EQ(r_expected_0[i], r_buffer[i]);
+ }
+ EXPECT_EQ(3, scale);
+
+ // Test a no-overflow case.
+ const int16_t x_buffer_1[kBuffer] = {
+ 0, 0, 300, 21, 206, 169, -295, -664, 3513, -300,
+ 327, -29, 15, 289, -6831, 339, -107, 37, 59, 6177,
+ 327, 169, -4747, 204, 313, -980, 767, -9, 135, 289,
+ 0, -6969, -2095, -664, 0, 1, 7, -39, 236, 281};
+ const int32_t r_expected_1[kOrder + 2] = {
+ 176253864, 8126617, 1983287, -26196788, -3487363,
+ -42839676, -24644043, 3469813, 30559879, 31905045,
+ 5101567, 29328896, -55787438, -13163978};
+
+ WebRtcIsacfix_AutocorrFixFunction(r_buffer, x_buffer_1, kBuffer, kOrder + 1,
+ &scale);
+ for (int i = 0; i < kOrder + 2; i++) {
+ EXPECT_EQ(r_expected_1[i], r_buffer[i]);
+ }
+ EXPECT_EQ(0, scale);
+ }
+};
+
+TEST_F(FiltersTest, AutocorrFixTest) {
+ FiltersTester(WebRtcIsacfix_AutocorrC);
+#if defined(WEBRTC_HAS_NEON)
+ FiltersTester(WebRtcIsacfix_AutocorrNeon);
+#endif
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/initialize.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/initialize.c
new file mode 100644
index 0000000000..1b82958883
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/initialize.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * initialize.c
+ *
+ * Internal initfunctions
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+
+void WebRtcIsacfix_InitMaskingEnc(MaskFiltstr_enc *maskdata) {
+
+ int k;
+
+ for (k = 0; k < WINLEN; k++) {
+ maskdata->DataBufferLoQ0[k] = (int16_t) 0;
+ maskdata->DataBufferHiQ0[k] = (int16_t) 0;
+ }
+ for (k = 0; k < ORDERLO+1; k++) {
+ maskdata->CorrBufLoQQ[k] = (int32_t) 0;
+ maskdata->CorrBufLoQdom[k] = 0;
+
+ maskdata->PreStateLoGQ15[k] = 0;
+
+ }
+ for (k = 0; k < ORDERHI+1; k++) {
+ maskdata->CorrBufHiQQ[k] = (int32_t) 0;
+ maskdata->CorrBufHiQdom[k] = 0;
+ maskdata->PreStateHiGQ15[k] = 0;
+ }
+
+ maskdata->OldEnergy = 10;
+
+ return;
+}
+
+void WebRtcIsacfix_InitMaskingDec(MaskFiltstr_dec *maskdata) {
+
+ int k;
+
+ for (k = 0; k < ORDERLO+1; k++)
+ {
+ maskdata->PostStateLoGQ0[k] = 0;
+ }
+ for (k = 0; k < ORDERHI+1; k++)
+ {
+ maskdata->PostStateHiGQ0[k] = 0;
+ }
+
+ maskdata->OldEnergy = 10;
+
+ return;
+}
+
+
+
+
+
+
+
+void WebRtcIsacfix_InitPreFilterbank(PreFiltBankstr *prefiltdata)
+{
+ int k;
+
+ for (k = 0; k < QLOOKAHEAD; k++) {
+ prefiltdata->INLABUF1_fix[k] = 0;
+ prefiltdata->INLABUF2_fix[k] = 0;
+ }
+ for (k = 0; k < 2 * (QORDER - 1); k++) {
+ prefiltdata->INSTAT1_fix[k] = 0;
+ prefiltdata->INSTAT2_fix[k] = 0;
+ }
+
+ /* High pass filter states */
+ prefiltdata->HPstates_fix[0] = 0;
+ prefiltdata->HPstates_fix[1] = 0;
+
+ return;
+}
+
+void WebRtcIsacfix_InitPostFilterbank(PostFiltBankstr *postfiltdata)
+{
+ int k;
+
+ for (k = 0; k < 2 * POSTQORDER; k++) {
+ postfiltdata->STATE_0_LOWER_fix[k] = 0;
+ postfiltdata->STATE_0_UPPER_fix[k] = 0;
+ }
+
+ /* High pass filter states */
+
+ postfiltdata->HPstates1_fix[0] = 0;
+ postfiltdata->HPstates1_fix[1] = 0;
+
+ postfiltdata->HPstates2_fix[0] = 0;
+ postfiltdata->HPstates2_fix[1] = 0;
+
+ return;
+}
+
+
+void WebRtcIsacfix_InitPitchFilter(PitchFiltstr *pitchfiltdata)
+{
+ int k;
+
+ for (k = 0; k < PITCH_BUFFSIZE; k++)
+ pitchfiltdata->ubufQQ[k] = 0;
+ for (k = 0; k < (PITCH_DAMPORDER); k++)
+ pitchfiltdata->ystateQQ[k] = 0;
+
+ pitchfiltdata->oldlagQ7 = 6400; /* 50.0 in Q7 */
+ pitchfiltdata->oldgainQ12 = 0;
+}
+
+void WebRtcIsacfix_InitPitchAnalysis(PitchAnalysisStruct *State)
+{
+ int k;
+
+ for (k = 0; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k++) {
+ State->dec_buffer16[k] = 0;
+ }
+ for (k = 0; k < 2 * ALLPASSSECTIONS + 1; k++) {
+ State->decimator_state32[k] = 0;
+ }
+
+ for (k = 0; k < QLOOKAHEAD; k++)
+ State->inbuf[k] = 0;
+
+ WebRtcIsacfix_InitPitchFilter(&(State->PFstr_wght));
+
+ WebRtcIsacfix_InitPitchFilter(&(State->PFstr));
+}
+
+
+void WebRtcIsacfix_InitPlc( PLCstr *State )
+{
+ State->decayCoeffPriodic = WEBRTC_SPL_WORD16_MAX;
+ State->decayCoeffNoise = WEBRTC_SPL_WORD16_MAX;
+
+ State->used = PLC_WAS_USED;
+
+ WebRtcSpl_ZerosArrayW16(State->overlapLP, RECOVERY_OVERLAP);
+ WebRtcSpl_ZerosArrayW16(State->lofilt_coefQ15, ORDERLO);
+ WebRtcSpl_ZerosArrayW16(State->hifilt_coefQ15, ORDERHI );
+
+ State->AvgPitchGain_Q12 = 0;
+ State->lastPitchGain_Q12 = 0;
+ State->lastPitchLag_Q7 = 0;
+ State->gain_lo_hiQ17[0]=State->gain_lo_hiQ17[1] = 0;
+ WebRtcSpl_ZerosArrayW16(State->prevPitchInvIn, FRAMESAMPLES/2);
+ WebRtcSpl_ZerosArrayW16(State->prevPitchInvOut, PITCH_MAX_LAG + 10 );
+ WebRtcSpl_ZerosArrayW32(State->prevHP, PITCH_MAX_LAG + 10 );
+ State->pitchCycles = 0;
+ State->A = 0;
+ State->B = 0;
+ State->pitchIndex = 0;
+ State->stretchLag = 240;
+ State->seed = 4447;
+
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h
new file mode 100644
index 0000000000..512911a8bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
+
+#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class IsacFix {
+ public:
+ using instance_type = ISACFIX_MainStruct;
+ static const bool has_swb = false;
+ static inline int16_t Control(instance_type* inst,
+ int32_t rate,
+ int framesize) {
+ return WebRtcIsacfix_Control(inst, rate, framesize);
+ }
+ static inline int16_t ControlBwe(instance_type* inst,
+ int32_t rate_bps,
+ int frame_size_ms,
+ int16_t enforce_frame_size) {
+ return WebRtcIsacfix_ControlBwe(inst, rate_bps, frame_size_ms,
+ enforce_frame_size);
+ }
+ static inline int16_t Create(instance_type** inst) {
+ return WebRtcIsacfix_Create(inst);
+ }
+ static inline int DecodeInternal(instance_type* inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speech_type) {
+ return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
+ }
+ static inline size_t DecodePlc(instance_type* inst,
+ int16_t* decoded,
+ size_t num_lost_frames) {
+ return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
+ }
+ static inline void DecoderInit(instance_type* inst) {
+ WebRtcIsacfix_DecoderInit(inst);
+ }
+ static inline int Encode(instance_type* inst,
+ const int16_t* speech_in,
+ uint8_t* encoded) {
+ return WebRtcIsacfix_Encode(inst, speech_in, encoded);
+ }
+ static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
+ return WebRtcIsacfix_EncoderInit(inst, coding_mode);
+ }
+ static inline uint16_t EncSampRate(instance_type* inst) {
+ return kFixSampleRate;
+ }
+
+ static inline int16_t Free(instance_type* inst) {
+ return WebRtcIsacfix_Free(inst);
+ }
+ static inline int16_t GetErrorCode(instance_type* inst) {
+ return WebRtcIsacfix_GetErrorCode(inst);
+ }
+
+ static inline int16_t GetNewFrameLen(instance_type* inst) {
+ return WebRtcIsacfix_GetNewFrameLen(inst);
+ }
+ static inline int16_t SetDecSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+ return 0;
+ }
+ static inline int16_t SetEncSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+ return 0;
+ }
+ static inline void SetEncSampRateInDecoder(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+ }
+ static inline void SetInitialBweBottleneck(instance_type* inst,
+ int bottleneck_bits_per_second) {
+ WebRtcIsacfix_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
+ }
+ static inline int16_t SetMaxPayloadSize(instance_type* inst,
+ int16_t max_payload_size_bytes) {
+ return WebRtcIsacfix_SetMaxPayloadSize(inst, max_payload_size_bytes);
+ }
+ static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
+ return WebRtcIsacfix_SetMaxRate(inst, max_bit_rate);
+ }
+
+ private:
+ enum { kFixSampleRate = 16000 };
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
new file mode 100644
index 0000000000..a7d44e883d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -0,0 +1,1230 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * isacfix.c
+ *
+ * This C file contains the functions for the ISAC API
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
+
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+// Declare function pointers.
+FilterMaLoopFix WebRtcIsacfix_FilterMaLoopFix;
+Spec2Time WebRtcIsacfix_Spec2Time;
+Time2Spec WebRtcIsacfix_Time2Spec;
+MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
+MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
+
+/* This method assumes that `stream_size_bytes` is in valid range,
+ * i.e. >= 0 && <= STREAM_MAXW16_60MS
+ */
+static void InitializeDecoderBitstream(size_t stream_size_bytes,
+ Bitstr_dec* bitstream) {
+ bitstream->W_upper = 0xFFFFFFFF;
+ bitstream->streamval = 0;
+ bitstream->stream_index = 0;
+ bitstream->full = 1;
+ bitstream->stream_size = (stream_size_bytes + 1) >> 1;
+ memset(bitstream->stream, 0, sizeof(bitstream->stream));
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_Create(...)
+ *
+ * This function creates a ISAC instance, which will contain the state
+ * information for one coding/decoding channel.
+ *
+ * Input:
+ * - *ISAC_main_inst : a pointer to the coder instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Create(ISACFIX_MainStruct **ISAC_main_inst)
+{
+ ISACFIX_SubStruct *tempo;
+ tempo = malloc(1 * sizeof(ISACFIX_SubStruct));
+ *ISAC_main_inst = (ISACFIX_MainStruct *)tempo;
+ if (*ISAC_main_inst!=NULL) {
+ (*(ISACFIX_SubStruct**)ISAC_main_inst)->errorcode = 0;
+ (*(ISACFIX_SubStruct**)ISAC_main_inst)->initflag = 0;
+ (*(ISACFIX_SubStruct**)ISAC_main_inst)->ISACenc_obj.SaveEnc_ptr = NULL;
+ WebRtcIsacfix_InitBandwidthEstimator(&tempo->bwestimator_obj);
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_CreateInternal(...)
+ *
+ * This function creates the memory that is used to store data in the encoder
+ *
+ * Input:
+ * - *ISAC_main_inst : a pointer to the coder instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_CreateInternal(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Allocate memory for storing encoder data */
+ ISAC_inst->ISACenc_obj.SaveEnc_ptr = malloc(1 * sizeof(IsacSaveEncoderData));
+
+ if (ISAC_inst->ISACenc_obj.SaveEnc_ptr!=NULL) {
+ return(0);
+ } else {
+ return(-1);
+ }
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_Free(...)
+ *
+ * This function frees the ISAC instance created at the beginning.
+ *
+ * Input:
+ * - ISAC_main_inst : a ISAC instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Free(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ free(ISAC_main_inst);
+ return(0);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_FreeInternal(...)
+ *
+ * This function frees the internal memory for storing encoder data.
+ *
+ * Input:
+ * - ISAC_main_inst : a ISAC instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_FreeInternal(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Release memory */
+ free(ISAC_inst->ISACenc_obj.SaveEnc_ptr);
+
+ return(0);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_InitNeon(...)
+ *
+ * This function initializes function pointers for ARM Neon platform.
+ */
+
+#if defined(WEBRTC_HAS_NEON)
+static void WebRtcIsacfix_InitNeon(void) {
+ WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrNeon;
+ WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopNeon;
+ WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeNeon;
+ WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecNeon;
+ WebRtcIsacfix_AllpassFilter2FixDec16 =
+ WebRtcIsacfix_AllpassFilter2FixDec16Neon;
+ WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1Neon;
+ WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2Neon;
+}
+#endif
+
+/****************************************************************************
+ * WebRtcIsacfix_InitMIPS(...)
+ *
+ * This function initializes function pointers for MIPS platform.
+ */
+
+#if defined(MIPS32_LE)
+static void WebRtcIsacfix_InitMIPS(void) {
+ WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrMIPS;
+ WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopMIPS;
+ WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeMIPS;
+ WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecMIPS;
+ WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1MIPS;
+ WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2MIPS;
+#if defined(MIPS_DSP_R1_LE)
+ WebRtcIsacfix_AllpassFilter2FixDec16 =
+ WebRtcIsacfix_AllpassFilter2FixDec16MIPS;
+ WebRtcIsacfix_HighpassFilterFixDec32 =
+ WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#endif
+#if defined(MIPS_DSP_R2_LE)
+ WebRtcIsacfix_CalculateResidualEnergy =
+ WebRtcIsacfix_CalculateResidualEnergyMIPS;
+#endif
+}
+#endif
+
+static void InitFunctionPointers(void) {
+ WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrC;
+ WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopC;
+ WebRtcIsacfix_CalculateResidualEnergy =
+ WebRtcIsacfix_CalculateResidualEnergyC;
+ WebRtcIsacfix_AllpassFilter2FixDec16 = WebRtcIsacfix_AllpassFilter2FixDec16C;
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+ WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecC;
+ WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeC;
+ WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1C;
+ WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C;
+
+#if defined(WEBRTC_HAS_NEON)
+ WebRtcIsacfix_InitNeon();
+#endif
+
+#if defined(MIPS32_LE)
+ WebRtcIsacfix_InitMIPS();
+#endif
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_EncoderInit(...)
+ *
+ * This function initializes a ISAC instance prior to the encoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - CodingMode : 0 -> Bit rate and frame length are automatically
+ * adjusted to available bandwidth on
+ * transmission channel.
+ * 1 -> User sets a frame length and a target bit
+ * rate which is taken as the maximum short-term
+ * average bit rate.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t CodingMode)
+{
+ int k;
+ int16_t statusInit;
+ ISACFIX_SubStruct *ISAC_inst;
+
+ statusInit = 0;
+ /* typecast pointer to rela structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* flag encoder init */
+ ISAC_inst->initflag |= 2;
+
+ if (CodingMode == 0)
+ /* Adaptive mode */
+ ISAC_inst->ISACenc_obj.new_framelength = INITIAL_FRAMESAMPLES;
+ else if (CodingMode == 1)
+ /* Instantaneous mode */
+ ISAC_inst->ISACenc_obj.new_framelength = 480; /* default for I-mode */
+ else {
+ ISAC_inst->errorcode = ISAC_DISALLOWED_CODING_MODE;
+ statusInit = -1;
+ }
+
+ ISAC_inst->CodingMode = CodingMode;
+
+ WebRtcIsacfix_InitMaskingEnc(&ISAC_inst->ISACenc_obj.maskfiltstr_obj);
+ WebRtcIsacfix_InitPreFilterbank(&ISAC_inst->ISACenc_obj.prefiltbankstr_obj);
+ WebRtcIsacfix_InitPitchFilter(&ISAC_inst->ISACenc_obj.pitchfiltstr_obj);
+ WebRtcIsacfix_InitPitchAnalysis(&ISAC_inst->ISACenc_obj.pitchanalysisstr_obj);
+
+ WebRtcIsacfix_InitRateModel(&ISAC_inst->ISACenc_obj.rate_data_obj);
+
+
+ ISAC_inst->ISACenc_obj.buffer_index = 0;
+ ISAC_inst->ISACenc_obj.frame_nb = 0;
+ ISAC_inst->ISACenc_obj.BottleNeck = 32000; /* default for I-mode */
+ ISAC_inst->ISACenc_obj.MaxDelay = 10; /* default for I-mode */
+ ISAC_inst->ISACenc_obj.current_framesamples = 0;
+ ISAC_inst->ISACenc_obj.s2nr = 0;
+ ISAC_inst->ISACenc_obj.MaxBits = 0;
+ ISAC_inst->ISACenc_obj.bitstr_seed = 4447;
+ ISAC_inst->ISACenc_obj.payloadLimitBytes30 = STREAM_MAXW16_30MS << 1;
+ ISAC_inst->ISACenc_obj.payloadLimitBytes60 = STREAM_MAXW16_60MS << 1;
+ ISAC_inst->ISACenc_obj.maxPayloadBytes = STREAM_MAXW16_60MS << 1;
+ ISAC_inst->ISACenc_obj.maxRateInBytes = STREAM_MAXW16_30MS << 1;
+ ISAC_inst->ISACenc_obj.enforceFrameSize = 0;
+
+ /* Init the bistream data area to zero */
+ for (k=0; k<STREAM_MAXW16_60MS; k++){
+ ISAC_inst->ISACenc_obj.bitstr_obj.stream[k] = 0;
+ }
+
+ InitFunctionPointers();
+
+ return statusInit;
+}
+
+/* Read the given number of bytes of big-endian 16-bit integers from `src` and
+ write them to `dest` in host endian. If `nbytes` is odd, the number of
+ output elements is rounded up, and the least significant byte of the last
+ element is set to 0. */
+static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
+ size_t i;
+ for (i = 0; i < nbytes / 2; ++i)
+ dest[i] = src[2 * i] << 8 | src[2 * i + 1];
+ if (nbytes % 2 == 1)
+ dest[nbytes / 2] = src[nbytes - 1] << 8;
+}
+
+/* Read the given number of bytes of host-endian 16-bit integers from `src` and
+ write them to `dest` in big endian. If `nbytes` is odd, the number of source
+ elements is rounded up (but only the most significant byte of the last
+ element is used), and the number of output bytes written will be
+ nbytes + 1. */
+static void write_be16(const uint16_t* src, size_t nbytes, uint8_t* dest) {
+ size_t i;
+ for (i = 0; i < nbytes / 2; ++i) {
+ dest[2 * i] = src[i] >> 8;
+ dest[2 * i + 1] = src[i];
+ }
+ if (nbytes % 2 == 1) {
+ dest[nbytes - 1] = src[nbytes / 2] >> 8;
+ dest[nbytes] = 0;
+ }
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_Encode(...)
+ *
+ * This function encodes 10ms frame(s) and inserts it into a package.
+ * Input speech length has to be 160 samples (10ms). The encoder buffers those
+ * 10ms frames until it reaches the chosen Framesize (480 or 960 samples
+ * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - speechIn : input speech vector.
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ * Return value:
+ * : >0 - Length (in bytes) of coded data
+ * : 0 - The buffer didn't reach the chosen framesize
+ * so it keeps buffering speech samples.
+ * : -1 - Error
+ */
+
+int WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
+ const int16_t *speechIn,
+ uint8_t* encoded)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ int stream_len;
+
+ /* typecast pointer to rela structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+
+ /* check if encoder initiated */
+ if ((ISAC_inst->initflag & 2) != 2) {
+ ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+ return (-1);
+ }
+
+ stream_len = WebRtcIsacfix_EncodeImpl((int16_t*)speechIn,
+ &ISAC_inst->ISACenc_obj,
+ &ISAC_inst->bwestimator_obj,
+ ISAC_inst->CodingMode);
+ if (stream_len<0) {
+ ISAC_inst->errorcode = -(int16_t)stream_len;
+ return -1;
+ }
+
+ write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, (size_t)stream_len,
+ encoded);
+ return stream_len;
+
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetNewBitStream(...)
+ *
+ * This function returns encoded data, with the received bwe-index in the
+ * stream. It should always return a complete packet, i.e. only called once
+ * even for 60 msec frames
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - bweIndex : index of bandwidth estimate to put in new bitstream
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ * Return value:
+ * : >0 - Length (in bytes) of coded data
+ * : -1 - Error
+ */
+
+int16_t WebRtcIsacfix_GetNewBitStream(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t bweIndex,
+ float scale,
+ uint8_t* encoded)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ int16_t stream_len;
+
+ /* typecast pointer to rela structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+
+ /* check if encoder initiated */
+ if ((ISAC_inst->initflag & 2) != 2) {
+ ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+ return (-1);
+ }
+
+ stream_len = WebRtcIsacfix_EncodeStoredData(&ISAC_inst->ISACenc_obj,
+ bweIndex,
+ scale);
+ if (stream_len<0) {
+ ISAC_inst->errorcode = - stream_len;
+ return -1;
+ }
+
+ write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, stream_len, encoded);
+ return stream_len;
+}
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecoderInit(...)
+ *
+ * This function initializes a ISAC instance prior to the decoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ */
+
+void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+
+ InitFunctionPointers();
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* flag decoder init */
+ ISAC_inst->initflag |= 1;
+
+ WebRtcIsacfix_InitMaskingDec(&ISAC_inst->ISACdec_obj.maskfiltstr_obj);
+ WebRtcIsacfix_InitPostFilterbank(&ISAC_inst->ISACdec_obj.postfiltbankstr_obj);
+ WebRtcIsacfix_InitPitchFilter(&ISAC_inst->ISACdec_obj.pitchfiltstr_obj);
+
+ /* TS */
+ WebRtcIsacfix_InitPlc( &ISAC_inst->ISACdec_obj.plcstr_obj );
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateBwEstimate1(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - packet_size : size of the packet.
+ * - rtp_seq_number : the RTP number of the packet.
+ * - arr_ts : the arrival time of the packet (from NetEq)
+ * in samples.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t arr_ts)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ Bitstr_dec streamdata;
+ int16_t err;
+ const size_t kRequiredEncodedLenBytes = 10;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Sanity check of packet length */
+ if (packet_size == 0) {
+ /* return error code if the packet length is null or less */
+ ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+ return -1;
+ } else if (packet_size > (STREAM_MAXW16<<1)) {
+ /* return error code if length of stream is too long */
+ ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ /* check if decoder initiated */
+ if ((ISAC_inst->initflag & 1) != 1) {
+ ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+ return (-1);
+ }
+
+ InitializeDecoderBitstream(packet_size, &streamdata);
+
+ read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+ err = WebRtcIsacfix_EstimateBandwidth(&ISAC_inst->bwestimator_obj,
+ &streamdata,
+ packet_size,
+ rtp_seq_number,
+ 0,
+ arr_ts);
+
+
+ if (err < 0)
+ {
+ /* return error code if something went wrong */
+ ISAC_inst->errorcode = -err;
+ return -1;
+ }
+
+
+ return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateBwEstimate(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - packet_size : size of the packet.
+ * - rtp_seq_number : the RTP number of the packet.
+ * - send_ts : Send Time Stamp from RTP header
+ * - arr_ts : the arrival time of the packet (from NetEq)
+ * in samples.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ Bitstr_dec streamdata;
+ int16_t err;
+ const size_t kRequiredEncodedLenBytes = 10;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Sanity check of packet length */
+ if (packet_size == 0) {
+ /* return error code if the packet length is null or less */
+ ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+ return -1;
+ } else if (packet_size < kRequiredEncodedLenBytes) {
+ ISAC_inst->errorcode = ISAC_PACKET_TOO_SHORT;
+ return -1;
+ } else if (packet_size > (STREAM_MAXW16<<1)) {
+ /* return error code if length of stream is too long */
+ ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ /* check if decoder initiated */
+ if ((ISAC_inst->initflag & 1) != 1) {
+ ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+ return (-1);
+ }
+
+ InitializeDecoderBitstream(packet_size, &streamdata);
+
+ read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+ err = WebRtcIsacfix_EstimateBandwidth(&ISAC_inst->bwestimator_obj,
+ &streamdata,
+ packet_size,
+ rtp_seq_number,
+ send_ts,
+ arr_ts);
+
+ if (err < 0)
+ {
+ /* return error code if something went wrong */
+ ISAC_inst->errorcode = -err;
+ return -1;
+ }
+
+
+ return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_Decode(...)
+ *
+ * This function decodes a ISAC frame. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s)
+ * - len : bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : >0 - number of samples in decoded vector
+ * -1 - Error
+ */
+
+
+int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ /* number of samples (480 or 960), output from decoder */
+ /* that were actually used in the encoder/decoder (determined on the fly) */
+ size_t number_of_samples;
+ int declen_int = 0;
+ size_t declen;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* check if decoder initiated */
+ if ((ISAC_inst->initflag & 1) != 1) {
+ ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+ return (-1);
+ }
+
+ /* Sanity check of packet length */
+ if (len == 0) {
+ /* return error code if the packet length is null or less */
+ ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+ return -1;
+ } else if (len > (STREAM_MAXW16<<1)) {
+ /* return error code if length of stream is too long */
+ ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ InitializeDecoderBitstream(len, &ISAC_inst->ISACdec_obj.bitstr_obj);
+
+ read_be16(encoded, len, ISAC_inst->ISACdec_obj.bitstr_obj.stream);
+
+ /* added for NetEq purposes (VAD/DTX related) */
+ *speechType=1;
+
+ declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+ &number_of_samples);
+ if (declen_int < 0) {
+ /* Some error inside the decoder */
+ ISAC_inst->errorcode = -(int16_t)declen_int;
+ memset(decoded, 0, sizeof(int16_t) * MAX_FRAMESAMPLES);
+ return -1;
+ }
+ declen = (size_t)declen_int;
+
+ /* error check */
+
+ if (declen & 1) {
+ if (len != declen &&
+ len != declen +
+ ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) {
+ ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+ memset(decoded, 0, sizeof(int16_t) * number_of_samples);
+ return -1;
+ }
+ } else {
+ if (len != declen &&
+ len != declen +
+ ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) >> 8)) {
+ ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+ memset(decoded, 0, sizeof(int16_t) * number_of_samples);
+ return -1;
+ }
+ }
+
+ return (int)number_of_samples;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecodePlc(...)
+ *
+ * This function conducts PLC for ISAC frame(s) in wide-band (16kHz sampling).
+ * Output speech length will be "480*noOfLostFrames" samples
+ * that is equevalent of "30*noOfLostFrames" millisecond.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - noOfLostFrames : Number of PLC frames (480sample = 30ms)
+ * to produce
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : Number of samples in decoded PLC vector
+ */
+
+size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames)
+{
+
+ size_t no_of_samples, declen, k;
+ int16_t outframe16[MAX_FRAMESAMPLES];
+
+ ISACFIX_SubStruct *ISAC_inst;
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Limit number of frames to two = 60 msec. Otherwise we exceed data vectors */
+ if (noOfLostFrames > 2) {
+ noOfLostFrames = 2;
+ }
+ k = 0;
+ declen = 0;
+ while( noOfLostFrames > 0 )
+ {
+ WebRtcIsacfix_DecodePlcImpl(&(outframe16[k*480]), &ISAC_inst->ISACdec_obj,
+ &no_of_samples);
+ declen += no_of_samples;
+ noOfLostFrames--;
+ k++;
+ }
+
+ for (k=0;k<declen;k++) {
+ decoded[k] = outframe16[k];
+ }
+
+ return declen;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_Control(...)
+ *
+ * This function sets the limit on the short-term average bit rate and the
+ * frame length. Should be used only in Instantaneous mode.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rate : limit on the short-term average bit rate,
+ * in bits/second (between 10000 and 32000)
+ * - framesize : number of milliseconds per frame (30 or 60)
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Control(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t rate,
+ int framesize)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ if (ISAC_inst->CodingMode == 0)
+ {
+ /* in adaptive mode */
+ ISAC_inst->errorcode = ISAC_MODE_MISMATCH;
+ return -1;
+ }
+
+
+ if (rate >= 10000 && rate <= 32000)
+ ISAC_inst->ISACenc_obj.BottleNeck = rate;
+ else {
+ ISAC_inst->errorcode = ISAC_DISALLOWED_BOTTLENECK;
+ return -1;
+ }
+
+
+
+ if (framesize == 30 || framesize == 60)
+ ISAC_inst->ISACenc_obj.new_framelength = (int16_t)((FS/1000) * framesize);
+ else {
+ ISAC_inst->errorcode = ISAC_DISALLOWED_FRAME_LENGTH;
+ return -1;
+ }
+
+ return 0;
+}
+
+void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second) {
+ ISACFIX_SubStruct* inst = (ISACFIX_SubStruct*)ISAC_main_inst;
+ RTC_DCHECK_GE(bottleneck_bits_per_second, 10000);
+ RTC_DCHECK_LE(bottleneck_bits_per_second, 32000);
+ inst->bwestimator_obj.sendBwAvg = ((uint32_t)bottleneck_bits_per_second) << 7;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_ControlBwe(...)
+ *
+ * This function sets the initial values of bottleneck and frame-size if
+ * iSAC is used in channel-adaptive mode. Through this API, users can
+ * enforce a frame-size for all values of bottleneck. Then iSAC will not
+ * automatically change the frame-size.
+ *
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rateBPS : initial value of bottleneck in bits/second
+ * 10000 <= rateBPS <= 32000 is accepted
+ * For default bottleneck set rateBPS = 0
+ * - frameSizeMs : number of milliseconds per frame (30 or 60)
+ * - enforceFrameSize : 1 to enforce the given frame-size through out
+ * the adaptation process, 0 to let iSAC change
+ * the frame-size if required.
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsacfix_ControlBwe(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t rateBPS,
+ int frameSizeMs,
+ int16_t enforceFrameSize)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ /* Typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* check if encoder initiated */
+ if ((ISAC_inst->initflag & 2) != 2) {
+ ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+ return (-1);
+ }
+
+ /* Check that we are in channel-adaptive mode, otherwise, return -1 */
+ if (ISAC_inst->CodingMode != 0) {
+ ISAC_inst->errorcode = ISAC_MODE_MISMATCH;
+ return (-1);
+ }
+
+ /* Set struct variable if enforceFrameSize is set. ISAC will then keep the */
+ /* chosen frame size. */
+ ISAC_inst->ISACenc_obj.enforceFrameSize = (enforceFrameSize != 0)? 1:0;
+
+ /* Set initial rate, if value between 10000 and 32000, */
+ /* if rateBPS is 0, keep the default initial bottleneck value (15000) */
+ if ((rateBPS >= 10000) && (rateBPS <= 32000)) {
+ ISAC_inst->bwestimator_obj.sendBwAvg = (((uint32_t)rateBPS) << 7);
+ } else if (rateBPS != 0) {
+ ISAC_inst->errorcode = ISAC_DISALLOWED_BOTTLENECK;
+ return -1;
+ }
+
+ /* Set initial framesize. If enforceFrameSize is set the frame size will not change */
+ if ((frameSizeMs == 30) || (frameSizeMs == 60)) {
+ ISAC_inst->ISACenc_obj.new_framelength = (int16_t)((FS/1000) * frameSizeMs);
+ } else {
+ ISAC_inst->errorcode = ISAC_DISALLOWED_FRAME_LENGTH;
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownLinkBwIndex(...)
+ *
+ * This function returns index representing the Bandwidth estimate from
+ * other side to this side.
+ *
+ * Input:
+ * - ISAC_main_inst: iSAC struct
+ *
+ * Output:
+ * - rateIndex : Bandwidth estimate to transmit to other side.
+ *
+ */
+
+int16_t WebRtcIsacfix_GetDownLinkBwIndex(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* rateIndex)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Call function to get Bandwidth Estimate */
+ *rateIndex = WebRtcIsacfix_GetDownlinkBwIndexImpl(&ISAC_inst->bwestimator_obj);
+
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBw(...)
+ *
+ * This function takes an index representing the Bandwidth estimate from
+ * this side to other side and updates BWE.
+ *
+ * Input:
+ * - ISAC_main_inst: iSAC struct
+ * - rateIndex : Bandwidth estimate from other side.
+ *
+ */
+
+int16_t WebRtcIsacfix_UpdateUplinkBw(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t rateIndex)
+{
+ int16_t err = 0;
+ ISACFIX_SubStruct *ISAC_inst;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ /* Call function to update BWE with received Bandwidth Estimate */
+ err = WebRtcIsacfix_UpdateUplinkBwRec(&ISAC_inst->bwestimator_obj, rateIndex);
+ if (err < 0) {
+ ISAC_inst->errorcode = -err;
+ return (-1);
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_ReadFrameLen(...)
+ *
+ * This function returns the length of the frame represented in the packet.
+ *
+ * Input:
+ * - encoded : Encoded bitstream
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ *
+ */
+
+int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
+ size_t encoded_len_bytes,
+ size_t* frameLength)
+{
+ Bitstr_dec streamdata;
+ int16_t err;
+ const size_t kRequiredEncodedLenBytes = 10;
+
+ if (encoded_len_bytes < kRequiredEncodedLenBytes) {
+ return -1;
+ }
+
+ InitializeDecoderBitstream(encoded_len_bytes, &streamdata);
+
+ read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+ /* decode frame length */
+ err = WebRtcIsacfix_DecodeFrameLen(&streamdata, frameLength);
+ if (err<0) // error check
+ return err;
+
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_ReadBwIndex(...)
+ *
+ * This function returns the index of the Bandwidth estimate from the bitstream.
+ *
+ * Input:
+ * - encoded : Encoded bitstream
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ * - rateIndex : Bandwidth estimate in bitstream
+ *
+ */
+
+int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
+ size_t encoded_len_bytes,
+ int16_t* rateIndex)
+{
+ Bitstr_dec streamdata;
+ int16_t err;
+ const size_t kRequiredEncodedLenBytes = 10;
+
+ if (encoded_len_bytes < kRequiredEncodedLenBytes) {
+ return -1;
+ }
+
+ InitializeDecoderBitstream(encoded_len_bytes, &streamdata);
+
+ read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+ /* decode frame length, needed to get to the rateIndex in the bitstream */
+ size_t frameLength;
+ err = WebRtcIsacfix_DecodeFrameLen(&streamdata, &frameLength);
+ if (err<0) // error check
+ return err;
+
+ /* decode BW estimation */
+ err = WebRtcIsacfix_DecodeSendBandwidth(&streamdata, rateIndex);
+ if (err<0) // error check
+ return err;
+
+ return 0;
+}
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetErrorCode(...)
+ *
+ * This function can be used to check the error code of an iSAC instance. When
+ * a function returns -1 a error code will be set for that instance. The
+ * function below extract the code of the last error that occured in the
+ * specified instance.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance
+ *
+ * Return value : Error code
+ */
+
+int16_t WebRtcIsacfix_GetErrorCode(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ return ISAC_inst->errorcode;
+}
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetUplinkBw(...)
+ *
+ * This function returns the inst quantized iSAC send bitrate
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : bitrate
+ */
+
+int32_t WebRtcIsacfix_GetUplinkBw(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ ISACFIX_SubStruct *ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+ BwEstimatorstr * bw = (BwEstimatorstr*)&(ISAC_inst->bwestimator_obj);
+
+ return (int32_t) WebRtcIsacfix_GetUplinkBandwidth(bw);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_GetNewFrameLen(...)
+ *
+ * This function return the next frame length (in samples) of iSAC.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : frame lenght in samples
+ */
+
+int16_t WebRtcIsacfix_GetNewFrameLen(ISACFIX_MainStruct *ISAC_main_inst)
+{
+ ISACFIX_SubStruct *ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+ return ISAC_inst->ISACenc_obj.new_framelength;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_SetMaxPayloadSize(...)
+ *
+ * This function sets a limit for the maximum payload size of iSAC. The same
+ * value is used both for 30 and 60 msec packets.
+ * The absolute max will be valid until next time the function is called.
+ * NOTE! This function may override the function WebRtcIsacfix_SetMaxRate()
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxPayloadBytes : maximum size of the payload in bytes
+ * valid values are between 100 and 400 bytes
+ *
+ *
+ * Return value : 0 if sucessful
+ * -1 if error happens
+ */
+
+int16_t WebRtcIsacfix_SetMaxPayloadSize(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t maxPayloadBytes)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ if((maxPayloadBytes < 100) || (maxPayloadBytes > 400))
+ {
+ /* maxPayloadBytes is out of valid range */
+ return -1;
+ }
+ else
+ {
+ /* Set new absolute max, which will not change unless this function
+ is called again with a new value */
+ ISAC_inst->ISACenc_obj.maxPayloadBytes = maxPayloadBytes;
+
+ /* Set new maximum values for 30 and 60 msec packets */
+ if (maxPayloadBytes < ISAC_inst->ISACenc_obj.maxRateInBytes) {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes30 = maxPayloadBytes;
+ } else {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes30 = ISAC_inst->ISACenc_obj.maxRateInBytes;
+ }
+
+ if ( maxPayloadBytes < (ISAC_inst->ISACenc_obj.maxRateInBytes << 1)) {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes60 = maxPayloadBytes;
+ } else {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes60 = (ISAC_inst->ISACenc_obj.maxRateInBytes << 1);
+ }
+ }
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_SetMaxRate(...)
+ *
+ * This function sets the maximum rate which the codec may not exceed for a
+ * singel packet. The maximum rate is set in bits per second.
+ * The codec has an absolute maximum rate of 53400 bits per second (200 bytes
+ * per 30 msec).
+ * It is possible to set a maximum rate between 32000 and 53400 bits per second.
+ *
+ * The rate limit is valid until next time the function is called.
+ *
+ * NOTE! Packet size will never go above the value set if calling
+ * WebRtcIsacfix_SetMaxPayloadSize() (default max packet size is 400 bytes).
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxRateInBytes : maximum rate in bits per second,
+ * valid values are 32000 to 53400 bits
+ *
+ * Return value : 0 if sucessful
+ * -1 if error happens
+ */
+
+int16_t WebRtcIsacfix_SetMaxRate(ISACFIX_MainStruct *ISAC_main_inst,
+ int32_t maxRate)
+{
+ ISACFIX_SubStruct *ISAC_inst;
+ int16_t maxRateInBytes;
+
+ /* typecast pointer to real structure */
+ ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+ if((maxRate < 32000) || (maxRate > 53400))
+ {
+ /* maxRate is out of valid range */
+ return -1;
+ }
+ else
+ {
+ /* Calculate maximum number of bytes per 30 msec packets for the given
+ maximum rate. Multiply with 30/1000 to get number of bits per 30 msec,
+ divide by 8 to get number of bytes per 30 msec:
+ maxRateInBytes = floor((maxRate * 30/1000) / 8); */
+ maxRateInBytes = (int16_t)( WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_MUL(maxRate, 3), 800) );
+
+ /* Store the value for usage in the WebRtcIsacfix_SetMaxPayloadSize-function */
+ ISAC_inst->ISACenc_obj.maxRateInBytes = maxRateInBytes;
+
+ /* For 30 msec packets: if the new limit is below the maximum
+ payload size, set a new limit */
+ if (maxRateInBytes < ISAC_inst->ISACenc_obj.maxPayloadBytes) {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes30 = maxRateInBytes;
+ } else {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes30 = ISAC_inst->ISACenc_obj.maxPayloadBytes;
+ }
+
+ /* For 60 msec packets: if the new limit (times 2) is below the
+ maximum payload size, set a new limit */
+ if ( (maxRateInBytes << 1) < ISAC_inst->ISACenc_obj.maxPayloadBytes) {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes60 = (maxRateInBytes << 1);
+ } else {
+ ISAC_inst->ISACenc_obj.payloadLimitBytes60 = ISAC_inst->ISACenc_obj.maxPayloadBytes;
+ }
+ }
+
+ return 0;
+}
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_version(...)
+ *
+ * This function returns the version number.
+ *
+ * Output:
+ * - version : Pointer to character string
+ *
+ */
+
+void WebRtcIsacfix_version(char *version)
+{
+ strcpy(version, "3.6.0");
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
new file mode 100644
index 0000000000..7bbf4e054a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lattice.c
+ *
+ * Contains the normalized lattice filter routines (MA and AR) for iSAC codec
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/sanitizer.h"
+
+#define LATTICE_MUL_32_32_RSFT16(a32a, a32b, b32) \
+ ((int32_t)(WEBRTC_SPL_MUL(a32a, b32) + (WEBRTC_SPL_MUL_16_32_RSFT16(a32b, b32))))
+/* This macro is FORBIDDEN to use elsewhere than in a function in this file and
+ its corresponding neon version. It might give unpredictable results, since a
+ general int32_t*int32_t multiplication results in a 64 bit value.
+ The result is then shifted just 16 steps to the right, giving need for 48
+ bits, i.e. in the generel case, it will NOT fit in a int32_t. In the
+ cases used in here, the int32_t will be enough, since (for a good
+ reason) the involved multiplicands aren't big enough to overflow a
+ int32_t after shifting right 16 bits. I have compared the result of a
+ multiplication between t32 and tmp32, done in two ways:
+ 1) Using (int32_t) (((float)(tmp32))*((float)(tmp32b))/65536.0);
+ 2) Using LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
+ By running 25 files, I haven't found any bigger diff than 64 - this was in the
+ case when method 1) gave 650235648 and 2) gave 650235712.
+*/
+
+/* Function prototype: filtering ar_g_Q0[] and ar_f_Q0[] through an AR filter
+ with coefficients cth_Q15[] and sth_Q15[].
+ Implemented for both generic and ARMv7 platforms.
+ */
+void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0,
+ int16_t* ar_f_Q0,
+ int16_t* cth_Q15,
+ int16_t* sth_Q15,
+ size_t order_coef);
+
+/* Inner loop used for function WebRtcIsacfix_NormLatticeFilterMa(). It does:
+ for 0 <= n < HALF_SUBFRAMELEN - 1:
+ *ptr2 = input2 * (*ptr2) + input0 * (*ptr0));
+ *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+ Note, function WebRtcIsacfix_FilterMaLoopNeon and WebRtcIsacfix_FilterMaLoopC
+ are not bit-exact. The accuracy by the ARM Neon function is same or better.
+*/
+void WebRtcIsacfix_FilterMaLoopC(int16_t input0, // Filter coefficient
+ int16_t input1, // Filter coefficient
+ int32_t input2, // Inverse coeff. (1/input1)
+ int32_t* ptr0, // Sample buffer
+ int32_t* ptr1, // Sample buffer
+ int32_t* ptr2) { // Sample buffer
+ int n = 0;
+
+ // Separate the 32-bit variable input2 into two 16-bit integers (high 16 and
+ // low 16 bits), for using LATTICE_MUL_32_32_RSFT16 in the loop.
+ int16_t t16a = (int16_t)(input2 >> 16);
+ int16_t t16b = (int16_t)input2;
+ if (t16b < 0) t16a++;
+
+ // The loop filtering the samples *ptr0, *ptr1, *ptr2 with filter coefficients
+ // input0, input1, and input2.
+ for(n = 0; n < HALF_SUBFRAMELEN - 1; n++, ptr0++, ptr1++, ptr2++) {
+ int32_t tmp32a = 0;
+ int32_t tmp32b = 0;
+
+ // Calculate *ptr2 = input2 * (*ptr2 + input0 * (*ptr0));
+ tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr0); // Q15 * Q15 >> 15 = Q15
+ tmp32b = *ptr2 + tmp32a; // Q15 + Q15 = Q15
+ *ptr2 = LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
+
+ // Calculate *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+ tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input1, *ptr0); // Q15*Q15>>15 = Q15
+ tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr2); // Q15*Q15>>15 = Q15
+ *ptr1 = tmp32a + tmp32b; // Q15 + Q15 = Q15
+ }
+}
+
+/* filter the signal using normalized lattice filter */
+/* MA filter */
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
+ int32_t *stateGQ15,
+ int16_t *lat_inQ0,
+ int16_t *filt_coefQ15,
+ int32_t *gain_lo_hiQ17,
+ int16_t lo_hi,
+ int16_t *lat_outQ9)
+{
+ int16_t sthQ15[MAX_AR_MODEL_ORDER];
+ int16_t cthQ15[MAX_AR_MODEL_ORDER];
+
+ int u, n;
+ size_t i, k;
+ int16_t temp2,temp3;
+ size_t ord_1 = orderCoef+1;
+ int32_t inv_cthQ16[MAX_AR_MODEL_ORDER];
+
+ int32_t gain32, fQtmp;
+ int16_t gain16;
+ int16_t gain_sh;
+
+ int32_t tmp32, tmp32b;
+ int32_t fQ15vec[HALF_SUBFRAMELEN];
+ int32_t gQ15[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
+ int16_t sh;
+ int16_t t16a;
+ int16_t t16b;
+
+ for (u=0;u<SUBFRAMES;u++)
+ {
+ int32_t temp1 = u * HALF_SUBFRAMELEN;
+
+ /* set the Direct Form coefficients */
+ temp2 = (int16_t)(u * orderCoef);
+ temp3 = (int16_t)(2 * u + lo_hi);
+
+ /* compute lattice filter coefficients */
+ memcpy(sthQ15, &filt_coefQ15[temp2], orderCoef * sizeof(int16_t));
+
+ WebRtcSpl_SqrtOfOneMinusXSquared(sthQ15, orderCoef, cthQ15);
+
+ /* compute the gain */
+ gain32 = gain_lo_hiQ17[temp3];
+ gain_sh = WebRtcSpl_NormW32(gain32);
+ gain32 <<= gain_sh; // Q(17+gain_sh)
+
+ for (k=0;k<orderCoef;k++)
+ {
+ gain32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[k], gain32); //Q15*Q(17+gain_sh)>>15 = Q(17+gain_sh)
+ inv_cthQ16[k] = WebRtcSpl_DivW32W16((int32_t)2147483647, cthQ15[k]); // 1/cth[k] in Q31/Q15 = Q16
+ }
+ gain16 = (int16_t)(gain32 >> 16); // Q(1+gain_sh).
+
+ /* normalized lattice filter */
+ /*****************************/
+
+ /* initial conditions */
+ for (i=0;i<HALF_SUBFRAMELEN;i++)
+ {
+ fQ15vec[i] = lat_inQ0[i + temp1] << 15; // Q15
+ gQ15[0][i] = lat_inQ0[i + temp1] << 15; // Q15
+ }
+
+
+ fQtmp = fQ15vec[0];
+
+ /* get the state of f&g for the first input, for all orders */
+ for (i=1;i<ord_1;i++)
+ {
+ // Calculate f[i][0] = inv_cth[i-1]*(f[i-1][0] + sth[i-1]*stateG[i-1]);
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(sthQ15[i-1], stateGQ15[i-1]);//Q15*Q15>>15 = Q15
+ tmp32b= fQtmp + tmp32; //Q15+Q15=Q15
+ tmp32 = inv_cthQ16[i-1]; //Q16
+ t16a = (int16_t)(tmp32 >> 16);
+ t16b = (int16_t)(tmp32 - (t16a << 16));
+ if (t16b<0) t16a++;
+ tmp32 = LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
+ fQtmp = tmp32; // Q15
+
+ // Calculate g[i][0] = cth[i-1]*stateG[i-1] + sth[i-1]* f[i][0];
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[i-1], stateGQ15[i-1]); //Q15*Q15>>15 = Q15
+ tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(sthQ15[i-1], fQtmp); //Q15*Q15>>15 = Q15
+ tmp32 = tmp32 + tmp32b;//Q15+Q15 = Q15
+ gQ15[i][0] = tmp32; // Q15
+ }
+
+ /* filtering */
+ /* save the states */
+ for(k=0;k<orderCoef;k++)
+ {
+ // for 0 <= n < HALF_SUBFRAMELEN - 1:
+ // f[k+1][n+1] = inv_cth[k]*(f[k][n+1] + sth[k]*g[k][n]);
+ // g[k+1][n+1] = cth[k]*g[k][n] + sth[k]* f[k+1][n+1];
+ WebRtcIsacfix_FilterMaLoopFix(sthQ15[k], cthQ15[k], inv_cthQ16[k],
+ &gQ15[k][0], &gQ15[k+1][1], &fQ15vec[1]);
+ }
+
+ fQ15vec[0] = fQtmp;
+
+ for(n=0;n<HALF_SUBFRAMELEN;n++)
+ {
+ //gain32 >>= gain_sh; // Q(17+gain_sh) -> Q17
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(gain16, fQ15vec[n]); //Q(1+gain_sh)*Q15>>16 = Q(gain_sh)
+ sh = 9-gain_sh; //number of needed shifts to reach Q9
+ t16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32, sh);
+ lat_outQ9[n + temp1] = t16a;
+ }
+
+ /* save the states */
+ for (i=0;i<ord_1;i++)
+ {
+ stateGQ15[i] = gQ15[i][HALF_SUBFRAMELEN-1];
+ }
+ //process next frame
+ }
+
+ return;
+}
+
+// Left shift of an int32_t that's allowed to overflow. (It's still undefined
+// behavior, so not a good idea; this just makes UBSan ignore the violation, so
+// that our old code can continue to do what it's always been doing.)
+static inline int32_t RTC_NO_SANITIZE("shift")
+ OverflowingLShiftS32(int32_t x, int shift) {
+ return x << shift;
+}
+
+/* ----------------AR filter-------------------------*/
+/* filter the signal using normalized lattice filter */
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
+ int16_t *stateGQ0,
+ int32_t *lat_inQ25,
+ int16_t *filt_coefQ15,
+ int32_t *gain_lo_hiQ17,
+ int16_t lo_hi,
+ int16_t *lat_outQ0)
+{
+ size_t ii, k, i;
+ int n, u;
+ int16_t sthQ15[MAX_AR_MODEL_ORDER];
+ int16_t cthQ15[MAX_AR_MODEL_ORDER];
+ int32_t tmp32;
+
+
+ int16_t tmpAR;
+ int16_t ARfQ0vec[HALF_SUBFRAMELEN];
+ int16_t ARgQ0vec[MAX_AR_MODEL_ORDER+1];
+
+ int32_t inv_gain32;
+ int16_t inv_gain16;
+ int16_t den16;
+ int16_t sh;
+
+ int16_t temp2,temp3;
+ size_t ord_1 = orderCoef+1;
+
+ for (u=0;u<SUBFRAMES;u++)
+ {
+ int32_t temp1 = u * HALF_SUBFRAMELEN;
+
+ //set the denominator and numerator of the Direct Form
+ temp2 = (int16_t)(u * orderCoef);
+ temp3 = (int16_t)(2 * u + lo_hi);
+
+ for (ii=0; ii<orderCoef; ii++) {
+ sthQ15[ii] = filt_coefQ15[temp2+ii];
+ }
+
+ WebRtcSpl_SqrtOfOneMinusXSquared(sthQ15, orderCoef, cthQ15);
+
+ // Originally, this line was assumed to never overflow, since "[s]imulation
+ // of the 25 files shows that maximum value in the vector gain_lo_hiQ17[]
+ // is 441344, which means that it is log2((2^31)/441344) = 12.2 shifting
+ // bits from saturation. Therefore, it should be safe to use Q27 instead of
+ // Q17." However, a fuzzer test succeeded in provoking an overflow here,
+ // which we ignore on the theory that only "abnormal" inputs cause
+ // overflow.
+ tmp32 = OverflowingLShiftS32(gain_lo_hiQ17[temp3], 10); // Q27
+
+ for (k=0;k<orderCoef;k++) {
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[k], tmp32); // Q15*Q27>>15 = Q27
+ }
+
+ sh = WebRtcSpl_NormW32(tmp32); // tmp32 is the gain
+ den16 = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32, sh-16); //Q(27+sh-16) = Q(sh+11) (all 16 bits are value bits)
+ inv_gain32 = WebRtcSpl_DivW32W16((int32_t)2147483647, den16); // 1/gain in Q31/Q(sh+11) = Q(20-sh)
+
+ //initial conditions
+ inv_gain16 = (int16_t)(inv_gain32 >> 2); // 1/gain in Q(20-sh-2) = Q(18-sh)
+
+ for (i=0;i<HALF_SUBFRAMELEN;i++)
+ {
+ tmp32 = OverflowingLShiftS32(lat_inQ25[i + temp1], 1); // Q25->Q26
+ tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(inv_gain16, tmp32); //lat_in[]*inv_gain in (Q(18-sh)*Q26)>>16 = Q(28-sh)
+ tmp32 = WEBRTC_SPL_SHIFT_W32(tmp32, -(28-sh)); // lat_in[]*inv_gain in Q0
+
+ ARfQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
+ }
+
+ // Get the state of f & g for the first input, for all orders.
+ for (i = orderCoef; i > 0; i--)
+ {
+ tmp32 = (cthQ15[i - 1] * ARfQ0vec[0] - sthQ15[i - 1] * stateGQ0[i - 1] +
+ 16384) >> 15;
+ tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
+
+ tmp32 = (sthQ15[i - 1] * ARfQ0vec[0] + cthQ15[i - 1] * stateGQ0[i - 1] +
+ 16384) >> 15;
+ ARgQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
+ ARfQ0vec[0] = tmpAR;
+ }
+ ARgQ0vec[0] = ARfQ0vec[0];
+
+ // Filter ARgQ0vec[] and ARfQ0vec[] through coefficients cthQ15[] and sthQ15[].
+ WebRtcIsacfix_FilterArLoop(ARgQ0vec, ARfQ0vec, cthQ15, sthQ15, orderCoef);
+
+ for(n=0;n<HALF_SUBFRAMELEN;n++)
+ {
+ lat_outQ0[n + temp1] = ARfQ0vec[n];
+ }
+
+
+ /* cannot use memcpy in the following */
+
+ for (i=0;i<ord_1;i++)
+ {
+ stateGQ0[i] = ARgQ0vec[i];
+ }
+ }
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
new file mode 100644
index 0000000000..4c63227d7d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
@@ -0,0 +1,77 @@
+@
+@ Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS. All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+
+@ Contains a function for the core loop in the normalized lattice AR
+@ filter routine for iSAC codec, optimized for ARMv7 platforms.
+@
+@ Output is bit-exact with the reference C code in lattic_c.c
+@
+@ Register usage:
+@
+@ r0: &ar_g_Q0
+@ r1: &ar_f_Q0
+@ r2: &cth_Q15
+@ r3: &sth_Q15
+@ r4: out loop counter
+@ r5: tmpAR
+@ r9: inner loop counter
+@ r12: constant #16384
+@ r6, r7, r8, r10, r11: scratch
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/system/asm_defines.h"
+
+GLOBAL_FUNCTION WebRtcIsacfix_FilterArLoop
+.align 2
+DEFINE_FUNCTION WebRtcIsacfix_FilterArLoop
+ push {r4-r11}
+
+ add r1, #2 @ &ar_f_Q0[1]
+ mov r12, #16384
+ mov r4, #HALF_SUBFRAMELEN
+ sub r4, #1 @ Outer loop counter = HALF_SUBFRAMELEN - 1
+
+HALF_SUBFRAME_LOOP: @ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++)
+
+ ldr r9, [sp, #32] @ Restore the inner loop counter to order_coef
+ ldrh r5, [r1] @ tmpAR = ar_f_Q0[n+1]
+ add r0, r9, asl #1 @ Restore r0 to &ar_g_Q0[order_coef]
+ add r2, r9, asl #1 @ Restore r2 to &cth_Q15[order_coef]
+ add r3, r9, asl #1 @ Restore r3 to &sth_Q15[order_coef]
+
+ORDER_COEF_LOOP: @ for (k = order_coef; k > 0; k--)
+
+ ldrh r7, [r3, #-2]! @ sth_Q15[k - 1]
+ ldrh r6, [r2, #-2]! @ cth_Q15[k - 1]
+
+ ldrh r8, [r0, #-2] @ ar_g_Q0[k - 1]
+ smlabb r11, r7, r5, r12 @ sth_Q15[k - 1] * tmpAR + 16384
+ smlabb r10, r6, r5, r12 @ cth_Q15[k - 1] * tmpAR + 16384
+ smulbb r7, r7, r8 @ sth_Q15[k - 1] * ar_g_Q0[k - 1]
+ smlabb r11, r6, r8, r11 @ cth_Q15[k - 1] * ar_g_Q0[k - 1] +
+ @ (sth_Q15[k - 1] * tmpAR + 16384)
+
+ sub r10, r10, r7 @ cth_Q15[k - 1] * tmpAR + 16384 -
+ @ (sth_Q15[k - 1] * ar_g_Q0[k - 1])
+ ssat r11, #16, r11, asr #15
+ ssat r5, #16, r10, asr #15
+ strh r11, [r0], #-2 @ Output: ar_g_Q0[k]
+
+ subs r9, #1
+ bgt ORDER_COEF_LOOP
+
+ strh r5, [r0] @ Output: ar_g_Q0[0] = tmpAR;
+ strh r5, [r1], #2 @ Output: ar_f_Q0[n+1] = tmpAR;
+
+ subs r4, #1
+ bne HALF_SUBFRAME_LOOP
+
+ pop {r4-r11}
+ bx lr
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
new file mode 100644
index 0000000000..43406612e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Contains the core loop function for the lattice filter AR routine
+ * for iSAC codec.
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/* Filter ar_g_Q0[] and ar_f_Q0[] through an AR filter with coefficients
+ * cth_Q15[] and sth_Q15[].
+ */
+void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
+ int16_t* ar_f_Q0, // Input samples
+ int16_t* cth_Q15, // Filter coefficients
+ int16_t* sth_Q15, // Filter coefficients
+ size_t order_coef) { // order of the filter
+ int n = 0;
+
+ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
+ size_t k = 0;
+ int16_t tmpAR = 0;
+ int32_t tmp32 = 0;
+ int32_t tmp32_2 = 0;
+
+ tmpAR = ar_f_Q0[n + 1];
+ for (k = order_coef; k > 0; k--) {
+ tmp32 = (cth_Q15[k - 1] * tmpAR - sth_Q15[k - 1] * ar_g_Q0[k - 1] +
+ 16384) >> 15;
+ tmp32_2 = (sth_Q15[k - 1] * tmpAR + cth_Q15[k - 1] * ar_g_Q0[k - 1] +
+ 16384) >> 15;
+ tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32);
+ ar_g_Q0[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32_2);
+ }
+ ar_f_Q0[n + 1] = tmpAR;
+ ar_g_Q0[0] = tmpAR;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
new file mode 100644
index 0000000000..3189726629
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// Filter ar_g_Q0[] and ar_f_Q0[] through an AR filter with coefficients
+// cth_Q15[] and sth_Q15[].
+void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
+ int16_t* ar_f_Q0, // Input samples
+ int16_t* cth_Q15, // Filter coefficients
+ int16_t* sth_Q15, // Filter coefficients
+ size_t order_coef) { // order of the filter
+ int n = 0;
+
+ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
+ int count = (int)(order_coef - 1);
+ int offset;
+#if !defined(MIPS_DSP_R1_LE)
+ int16_t* tmp_cth;
+ int16_t* tmp_sth;
+ int16_t* tmp_arg;
+ int32_t max_q16 = 0x7fff;
+ int32_t min_q16 = 0xffff8000;
+#endif
+ // Declare variables used as temporary registers.
+ int32_t r0, r1, r2, t0, t1, t2, t_ar;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "bltz %[count], 2f \n\t"
+ " lh %[t_ar], 0(%[tmp]) \n\t"
+ // Inner loop
+ "1: \n\t"
+ "sll %[offset], %[count], 1 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r0], %[offset](%[cth_Q15]) \n\t"
+ "lhx %[r1], %[offset](%[sth_Q15]) \n\t"
+ "lhx %[r2], %[offset](%[ar_g_Q0]) \n\t"
+#else
+ "addu %[tmp_cth], %[cth_Q15], %[offset] \n\t"
+ "addu %[tmp_sth], %[sth_Q15], %[offset] \n\t"
+ "addu %[tmp_arg], %[ar_g_Q0], %[offset] \n\t"
+ "lh %[r0], 0(%[tmp_cth]) \n\t"
+ "lh %[r1], 0(%[tmp_sth]) \n\t"
+ "lh %[r2], 0(%[tmp_arg]) \n\t"
+#endif
+ "mul %[t0], %[r0], %[t_ar] \n\t"
+ "mul %[t1], %[r1], %[t_ar] \n\t"
+ "mul %[t2], %[r1], %[r2] \n\t"
+ "mul %[r0], %[r0], %[r2] \n\t"
+ "subu %[t0], %[t0], %[t2] \n\t"
+ "addu %[t1], %[t1], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[t1], %[t1], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+#else
+ "addiu %[t1], %[t1], 0x4000 \n\t"
+ "sra %[t1], %[t1], 15 \n\t"
+ "addiu %[t0], %[t0], 0x4000 \n\t"
+ "sra %[t0], %[t0], 15 \n\t"
+#endif
+ "addiu %[offset], %[offset], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shll_s.w %[t1], %[t1], 16 \n\t"
+ "shll_s.w %[t_ar], %[t0], 16 \n\t"
+#else
+ "slt %[r0], %[t1], %[max_q16] \n\t"
+ "slt %[r1], %[t0], %[max_q16] \n\t"
+ "movz %[t1], %[max_q16], %[r0] \n\t"
+ "movz %[t0], %[max_q16], %[r1] \n\t"
+#endif
+ "addu %[offset], %[offset], %[ar_g_Q0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "sra %[t1], %[t1], 16 \n\t"
+ "sra %[t_ar], %[t_ar], 16 \n\t"
+#else
+ "slt %[r0], %[t1], %[min_q16] \n\t"
+ "slt %[r1], %[t0], %[min_q16] \n\t"
+ "movn %[t1], %[min_q16], %[r0] \n\t"
+ "movn %[t0], %[min_q16], %[r1] \n\t"
+ "addu %[t_ar], $zero, %[t0] \n\t"
+#endif
+ "sh %[t1], 0(%[offset]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[count], %[count], -1 \n\t"
+ "2: \n\t"
+ "sh %[t_ar], 0(%[tmp]) \n\t"
+ "sh %[t_ar], 0(%[ar_g_Q0]) \n\t"
+ ".set pop \n\t"
+ : [t_ar] "=&r" (t_ar), [count] "+r" (count), [offset] "=&r" (offset),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [t0] "=&r" (t0),
+#if !defined(MIPS_DSP_R1_LE)
+ [tmp_cth] "=&r" (tmp_cth), [tmp_sth] "=&r" (tmp_sth),
+ [tmp_arg] "=&r" (tmp_arg),
+#endif
+ [t1] "=&r" (t1), [t2] "=&r" (t2)
+ : [tmp] "r" (&ar_f_Q0[n+1]), [cth_Q15] "r" (cth_Q15),
+#if !defined(MIPS_DSP_R1_LE)
+ [max_q16] "r" (max_q16), [min_q16] "r" (min_q16),
+#endif
+ [sth_Q15] "r" (sth_Q15), [ar_g_Q0] "r" (ar_g_Q0)
+ : "memory", "hi", "lo"
+ );
+ }
+}
+
+// MIPS optimization of the inner loop used for function
+// WebRtcIsacfix_NormLatticeFilterMa(). It does:
+//
+// for 0 <= n < HALF_SUBFRAMELEN - 1:
+// *ptr2 = input2 * (*ptr2) + input0 * (*ptr0));
+// *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+//
+// Note, function WebRtcIsacfix_FilterMaLoopMIPS and WebRtcIsacfix_FilterMaLoopC
+// are not bit-exact. The accuracy of the MIPS function is same or better.
+void WebRtcIsacfix_FilterMaLoopMIPS(int16_t input0, // Filter coefficient
+ int16_t input1, // Filter coefficient
+ int32_t input2, // Inverse coeff (1/input1)
+ int32_t* ptr0, // Sample buffer
+ int32_t* ptr1, // Sample buffer
+ int32_t* ptr2) { // Sample buffer
+#if defined(MIPS_DSP_R2_LE)
+ // MIPS DSPR2 version. 4 available accumulators allows loop unrolling 4 times.
+ // This variant is not bit-exact with WebRtcIsacfix_FilterMaLoopC, since we
+ // are exploiting 64-bit accumulators. The accuracy of the MIPS DSPR2 function
+ // is same or better.
+ int n = (HALF_SUBFRAMELEN - 1) >> 2;
+ int m = (HALF_SUBFRAMELEN - 1) & 3;
+
+ int r0, r1, r2, r3;
+ int t0, t1, t2, t3;
+ int s0, s1, s2, s3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[ptr0]) \n\t"
+ "lw %[r1], 4(%[ptr0]) \n\t"
+ "lw %[r2], 8(%[ptr0]) \n\t"
+ "lw %[r3], 12(%[ptr0]) \n\t"
+ "mult $ac0, %[r0], %[input0] \n\t"
+ "mult $ac1, %[r1], %[input0] \n\t"
+ "mult $ac2, %[r2], %[input0] \n\t"
+ "mult $ac3, %[r3], %[input0] \n\t"
+ "lw %[t0], 0(%[ptr2]) \n\t"
+ "extr_rs.w %[s0], $ac0, 15 \n\t"
+ "extr_rs.w %[s1], $ac1, 15 \n\t"
+ "extr_rs.w %[s2], $ac2, 15 \n\t"
+ "extr_rs.w %[s3], $ac3, 15 \n\t"
+ "lw %[t1], 4(%[ptr2]) \n\t"
+ "lw %[t2], 8(%[ptr2]) \n\t"
+ "lw %[t3], 12(%[ptr2]) \n\t"
+ "addu %[t0], %[t0], %[s0] \n\t"
+ "addu %[t1], %[t1], %[s1] \n\t"
+ "addu %[t2], %[t2], %[s2] \n\t"
+ "addu %[t3], %[t3], %[s3] \n\t"
+ "mult $ac0, %[t0], %[input2] \n\t"
+ "mult $ac1, %[t1], %[input2] \n\t"
+ "mult $ac2, %[t2], %[input2] \n\t"
+ "mult $ac3, %[t3], %[input2] \n\t"
+ "addiu %[ptr0], %[ptr0], 16 \n\t"
+ "extr_rs.w %[t0], $ac0, 16 \n\t"
+ "extr_rs.w %[t1], $ac1, 16 \n\t"
+ "extr_rs.w %[t2], $ac2, 16 \n\t"
+ "extr_rs.w %[t3], $ac3, 16 \n\t"
+ "addiu %[n], %[n], -1 \n\t"
+ "mult $ac0, %[r0], %[input1] \n\t"
+ "mult $ac1, %[r1], %[input1] \n\t"
+ "mult $ac2, %[r2], %[input1] \n\t"
+ "mult $ac3, %[r3], %[input1] \n\t"
+ "sw %[t0], 0(%[ptr2]) \n\t"
+ "extr_rs.w %[s0], $ac0, 15 \n\t"
+ "extr_rs.w %[s1], $ac1, 15 \n\t"
+ "extr_rs.w %[s2], $ac2, 15 \n\t"
+ "extr_rs.w %[s3], $ac3, 15 \n\t"
+ "sw %[t1], 4(%[ptr2]) \n\t"
+ "sw %[t2], 8(%[ptr2]) \n\t"
+ "sw %[t3], 12(%[ptr2]) \n\t"
+ "mult $ac0, %[t0], %[input0] \n\t"
+ "mult $ac1, %[t1], %[input0] \n\t"
+ "mult $ac2, %[t2], %[input0] \n\t"
+ "mult $ac3, %[t3], %[input0] \n\t"
+ "addiu %[ptr2], %[ptr2], 16 \n\t"
+ "extr_rs.w %[t0], $ac0, 15 \n\t"
+ "extr_rs.w %[t1], $ac1, 15 \n\t"
+ "extr_rs.w %[t2], $ac2, 15 \n\t"
+ "extr_rs.w %[t3], $ac3, 15 \n\t"
+ "addu %[t0], %[t0], %[s0] \n\t"
+ "addu %[t1], %[t1], %[s1] \n\t"
+ "addu %[t2], %[t2], %[s2] \n\t"
+ "addu %[t3], %[t3], %[s3] \n\t"
+ "sw %[t0], 0(%[ptr1]) \n\t"
+ "sw %[t1], 4(%[ptr1]) \n\t"
+ "sw %[t2], 8(%[ptr1]) \n\t"
+ "sw %[t3], 12(%[ptr1]) \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addiu %[ptr1], %[ptr1], 16 \n\t"
+ "beq %[m], %0, 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "lw %[r0], 0(%[ptr0]) \n\t"
+ "lw %[t0], 0(%[ptr2]) \n\t"
+ "addiu %[ptr0], %[ptr0], 4 \n\t"
+ "mult $ac0, %[r0], %[input0] \n\t"
+ "mult $ac1, %[r0], %[input1] \n\t"
+ "extr_rs.w %[r1], $ac0, 15 \n\t"
+ "extr_rs.w %[t1], $ac1, 15 \n\t"
+ "addu %[t0], %[t0], %[r1] \n\t"
+ "mult $ac0, %[t0], %[input2] \n\t"
+ "extr_rs.w %[t0], $ac0, 16 \n\t"
+ "sw %[t0], 0(%[ptr2]) \n\t"
+ "mult $ac0, %[t0], %[input0] \n\t"
+ "addiu %[ptr2], %[ptr2], 4 \n\t"
+ "addiu %[m], %[m], -1 \n\t"
+ "extr_rs.w %[t0], $ac0, 15 \n\t"
+ "addu %[t0], %[t0], %[t1] \n\t"
+ "sw %[t0], 0(%[ptr1]) \n\t"
+ "bgtz %[m], 2b \n\t"
+ " addiu %[ptr1], %[ptr1], 4 \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [t0] "=&r" (t0), [t1] "=&r" (t1),
+ [t2] "=&r" (t2), [t3] "=&r" (t3), [s0] "=&r" (s0),
+ [s1] "=&r" (s1), [s2] "=&r" (s2), [s3] "=&r" (s3),
+ [ptr0] "+r" (ptr0), [ptr1] "+r" (ptr1), [m] "+r" (m),
+ [ptr2] "+r" (ptr2), [n] "+r" (n)
+ : [input0] "r" (input0), [input1] "r" (input1),
+ [input2] "r" (input2)
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi",
+ "$ac2lo", "$ac3hi", "$ac3lo"
+ );
+#else
+ // Non-DSPR2 version of the function. Avoiding the accumulator usage due to
+ // large latencies. This variant is bit-exact with C code.
+ int n = HALF_SUBFRAMELEN - 1;
+ int32_t t16a, t16b;
+ int32_t r0, r1, r2, r3, r4;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "sra %[t16a], %[input2], 16 \n\t"
+ "andi %[t16b], %[input2], 0xFFFF \n\t"
+#if defined(MIPS32R2_LE)
+ "seh %[t16b], %[t16b] \n\t"
+ "seh %[input0], %[input0] \n\t"
+ "seh %[input1], %[input1] \n\t"
+#else
+ "sll %[t16b], %[t16b], 16 \n\t"
+ "sra %[t16b], %[t16b], 16 \n\t"
+ "sll %[input0], %[input0], 16 \n\t"
+ "sra %[input0], %[input0], 16 \n\t"
+ "sll %[input1], %[input1], 16 \n\t"
+ "sra %[input1], %[input1], 16 \n\t"
+#endif
+ "addiu %[r0], %[t16a], 1 \n\t"
+ "slt %[r1], %[t16b], $zero \n\t"
+ "movn %[t16a], %[r0], %[r1] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[ptr0]) \n\t"
+ "lw %[r1], 0(%[ptr2]) \n\t"
+ "addiu %[ptr0], %[ptr0], 4 \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r3], %[r2], %[input0] \n\t"
+ "mul %[r4], %[r0], %[input0] \n\t"
+ "mul %[r2], %[r2], %[input1] \n\t"
+ "mul %[r0], %[r0], %[input1] \n\t"
+ "addiu %[ptr2], %[ptr2], 4 \n\t"
+ "sll %[r3], %[r3], 1 \n\t"
+ "sra %[r4], %[r4], 1 \n\t"
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 1 \n\t"
+ "mul %[r1], %[r1], %[t16a] \n\t"
+ "mul %[r3], %[r3], %[t16b] \n\t"
+ "mul %[r4], %[r4], %[t16b] \n\t"
+ "sll %[r2], %[r2], 1 \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "addiu %[n], %[n], -1 \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "addiu %[r4], %[r4], 0x4000 \n\t"
+ "sra %[r4], %[r4], 15 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "sra %[r2], %[r1], 16 \n\t"
+ "andi %[r3], %[r1], 0xFFFF \n\t"
+ "mul %[r3], %[r3], %[input0] \n\t"
+ "mul %[r2], %[r2], %[input0] \n\t"
+ "sw %[r1], -4(%[ptr2]) \n\t"
+ "sra %[r3], %[r3], 1 \n\t"
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+ "addu %[r0], %[r0], %[r3] \n\t"
+ "sll %[r2], %[r2], 1 \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r0], 0(%[ptr1]) \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addiu %[ptr1], %[ptr1], 4 \n\t"
+ ".set pop \n\t"
+ : [t16a] "=&r" (t16a), [t16b] "=&r" (t16b), [r0] "=&r" (r0),
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [ptr0] "+r" (ptr0), [ptr1] "+r" (ptr1),
+ [ptr2] "+r" (ptr2), [n] "+r" (n)
+ : [input0] "r" (input0), [input1] "r" (input1),
+ [input2] "r" (input2)
+ : "hi", "lo", "memory"
+ );
+#endif
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c
new file mode 100644
index 0000000000..8ea9b63578
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// Contains a function for the core loop in the normalized lattice MA
+// filter routine for iSAC codec, optimized for ARM Neon platform.
+// It does:
+// for 0 <= n < HALF_SUBFRAMELEN - 1:
+// *ptr2 = input2 * ((*ptr2) + input0 * (*ptr0));
+// *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+// Output is not bit-exact with the reference C code, due to the replacement
+// of WEBRTC_SPL_MUL_16_32_RSFT15 and LATTICE_MUL_32_32_RSFT16 with Neon
+// instructions. The difference should not be bigger than 1.
+void WebRtcIsacfix_FilterMaLoopNeon(int16_t input0, // Filter coefficient
+ int16_t input1, // Filter coefficient
+ int32_t input2, // Inverse coefficient
+ int32_t* ptr0, // Sample buffer
+ int32_t* ptr1, // Sample buffer
+ int32_t* ptr2) // Sample buffer
+{
+ int n = 0;
+ int loop = (HALF_SUBFRAMELEN - 1) >> 3;
+ int loop_tail = (HALF_SUBFRAMELEN - 1) & 0x7;
+
+ int32x4_t input0_v = vdupq_n_s32((int32_t)input0 << 16);
+ int32x4_t input1_v = vdupq_n_s32((int32_t)input1 << 16);
+ int32x4_t input2_v = vdupq_n_s32(input2);
+ int32x4_t tmp0a, tmp1a, tmp2a, tmp3a;
+ int32x4_t tmp0b, tmp1b, tmp2b, tmp3b;
+ int32x4_t ptr0va, ptr1va, ptr2va;
+ int32x4_t ptr0vb, ptr1vb, ptr2vb;
+
+ int64x2_t tmp2al_low, tmp2al_high, tmp2bl_low, tmp2bl_high;
+ // Unroll to process 8 samples at once.
+ for (n = 0; n < loop; n++) {
+ ptr0va = vld1q_s32(ptr0);
+ ptr0vb = vld1q_s32(ptr0 + 4);
+ ptr0 += 8;
+
+ ptr2va = vld1q_s32(ptr2);
+ ptr2vb = vld1q_s32(ptr2 + 4);
+
+ // Calculate tmp0 = (*ptr0) * input0.
+ tmp0a = vqrdmulhq_s32(ptr0va, input0_v);
+ tmp0b = vqrdmulhq_s32(ptr0vb, input0_v);
+
+ // Calculate tmp1 = (*ptr0) * input1.
+ tmp1a = vqrdmulhq_s32(ptr0va, input1_v);
+ tmp1b = vqrdmulhq_s32(ptr0vb, input1_v);
+
+ // Calculate tmp2 = tmp0 + *(ptr2).
+ tmp2a = vaddq_s32(tmp0a, ptr2va);
+ tmp2b = vaddq_s32(tmp0b, ptr2vb);
+
+ // Calculate *ptr2 = input2 * tmp2.
+ tmp2al_low = vmull_s32(vget_low_s32(tmp2a), vget_low_s32(input2_v));
+#if defined(WEBRTC_ARCH_ARM64)
+ tmp2al_high = vmull_high_s32(tmp2a, input2_v);
+#else
+ tmp2al_high = vmull_s32(vget_high_s32(tmp2a), vget_high_s32(input2_v));
+#endif
+ ptr2va = vcombine_s32(vrshrn_n_s64(tmp2al_low, 16),
+ vrshrn_n_s64(tmp2al_high, 16));
+
+ tmp2bl_low = vmull_s32(vget_low_s32(tmp2b), vget_low_s32(input2_v));
+#if defined(WEBRTC_ARCH_ARM64)
+ tmp2bl_high = vmull_high_s32(tmp2b, input2_v);
+#else
+ tmp2bl_high = vmull_s32(vget_high_s32(tmp2b), vget_high_s32(input2_v));
+#endif
+ ptr2vb = vcombine_s32(vrshrn_n_s64(tmp2bl_low, 16),
+ vrshrn_n_s64(tmp2bl_high, 16));
+
+ vst1q_s32(ptr2, ptr2va);
+ vst1q_s32(ptr2 + 4, ptr2vb);
+ ptr2 += 8;
+
+ // Calculate tmp3 = ptr2v * input0.
+ tmp3a = vqrdmulhq_s32(ptr2va, input0_v);
+ tmp3b = vqrdmulhq_s32(ptr2vb, input0_v);
+
+ // Calculate *ptr1 = tmp1 + tmp3.
+ ptr1va = vaddq_s32(tmp1a, tmp3a);
+ ptr1vb = vaddq_s32(tmp1b, tmp3b);
+
+ vst1q_s32(ptr1, ptr1va);
+ vst1q_s32(ptr1 + 4, ptr1vb);
+ ptr1 += 8;
+ }
+
+ // Process four more samples.
+ if (loop_tail & 0x4) {
+ ptr0va = vld1q_s32(ptr0);
+ ptr2va = vld1q_s32(ptr2);
+ ptr0 += 4;
+
+ // Calculate tmp0 = (*ptr0) * input0.
+ tmp0a = vqrdmulhq_s32(ptr0va, input0_v);
+
+ // Calculate tmp1 = (*ptr0) * input1.
+ tmp1a = vqrdmulhq_s32(ptr0va, input1_v);
+
+ // Calculate tmp2 = tmp0 + *(ptr2).
+ tmp2a = vaddq_s32(tmp0a, ptr2va);
+
+ // Calculate *ptr2 = input2 * tmp2.
+ tmp2al_low = vmull_s32(vget_low_s32(tmp2a), vget_low_s32(input2_v));
+
+#if defined(WEBRTC_ARCH_ARM64)
+ tmp2al_high = vmull_high_s32(tmp2a, input2_v);
+#else
+ tmp2al_high = vmull_s32(vget_high_s32(tmp2a), vget_high_s32(input2_v));
+#endif
+ ptr2va = vcombine_s32(vrshrn_n_s64(tmp2al_low, 16),
+ vrshrn_n_s64(tmp2al_high, 16));
+
+ vst1q_s32(ptr2, ptr2va);
+ ptr2 += 4;
+
+ // Calculate tmp3 = *(ptr2) * input0.
+ tmp3a = vqrdmulhq_s32(ptr2va, input0_v);
+
+ // Calculate *ptr1 = tmp1 + tmp3.
+ ptr1va = vaddq_s32(tmp1a, tmp3a);
+
+ vst1q_s32(ptr1, ptr1va);
+ ptr1 += 4;
+ }
+
+ // Process two more samples.
+ if (loop_tail & 0x2) {
+ int32x2_t ptr0v_tail, ptr2v_tail, ptr1v_tail;
+ int32x2_t tmp0_tail, tmp1_tail, tmp2_tail, tmp3_tail;
+ int64x2_t tmp2l_tail;
+ ptr0v_tail = vld1_s32(ptr0);
+ ptr2v_tail = vld1_s32(ptr2);
+ ptr0 += 2;
+
+ // Calculate tmp0 = (*ptr0) * input0.
+ tmp0_tail = vqrdmulh_s32(ptr0v_tail, vget_low_s32(input0_v));
+
+ // Calculate tmp1 = (*ptr0) * input1.
+ tmp1_tail = vqrdmulh_s32(ptr0v_tail, vget_low_s32(input1_v));
+
+ // Calculate tmp2 = tmp0 + *(ptr2).
+ tmp2_tail = vadd_s32(tmp0_tail, ptr2v_tail);
+
+ // Calculate *ptr2 = input2 * tmp2.
+ tmp2l_tail = vmull_s32(tmp2_tail, vget_low_s32(input2_v));
+ ptr2v_tail = vrshrn_n_s64(tmp2l_tail, 16);
+
+ vst1_s32(ptr2, ptr2v_tail);
+ ptr2 += 2;
+
+ // Calculate tmp3 = *(ptr2) * input0.
+ tmp3_tail = vqrdmulh_s32(ptr2v_tail, vget_low_s32(input0_v));
+
+ // Calculate *ptr1 = tmp1 + tmp3.
+ ptr1v_tail = vadd_s32(tmp1_tail, tmp3_tail);
+
+ vst1_s32(ptr1, ptr1v_tail);
+ ptr1 += 2;
+ }
+
+ // Process one more sample.
+ if (loop_tail & 0x1) {
+ int16_t t16a = (int16_t)(input2 >> 16);
+ int16_t t16b = (int16_t)input2;
+ if (t16b < 0) t16a++;
+ int32_t tmp32a;
+ int32_t tmp32b;
+
+ // Calculate *ptr2 = input2 * (*ptr2 + input0 * (*ptr0)).
+ tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr0);
+ tmp32b = *ptr2 + tmp32a;
+ *ptr2 = (int32_t)(WEBRTC_SPL_MUL(t16a, tmp32b) +
+ (WEBRTC_SPL_MUL_16_32_RSFT16(t16b, tmp32b)));
+
+ // Calculate *ptr1 = input1 * (*ptr0) + input0 * (*ptr2).
+ tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input1, *ptr0);
+ tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr2);
+ *ptr1 = tmp32a + tmp32b;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
new file mode 100644
index 0000000000..f151cd1c88
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_masking_model.c
+ *
+ * LPC analysis and filtering functions
+ *
+ */
+
+#include "lpc_masking_model.h"
+
+#include <limits.h> /* For LLONG_MAX and LLONG_MIN. */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/* The conversion is implemented by the step-down algorithm */
+void WebRtcSpl_AToK_JSK(
+ int16_t *a16, /* Q11 */
+ int16_t useOrder,
+ int16_t *k16 /* Q15 */
+ )
+{
+ int m, k;
+ int32_t tmp32[MAX_AR_MODEL_ORDER];
+ int32_t tmp32b;
+ int32_t tmp_inv_denum32;
+ int16_t tmp_inv_denum16;
+
+ k16[useOrder-1] = a16[useOrder] << 4; // Q11<<4 => Q15
+
+ for (m=useOrder-1; m>0; m--) {
+ // (1 - k^2) in Q30
+ tmp_inv_denum32 = 1073741823 - k16[m] * k16[m];
+ tmp_inv_denum16 = (int16_t)(tmp_inv_denum32 >> 15); // (1 - k^2) in Q15.
+
+ for (k=1; k<=m; k++) {
+ tmp32b = (a16[k] << 16) - ((k16[m] * a16[m - k + 1]) << 1);
+
+ tmp32[k] = WebRtcSpl_DivW32W16(tmp32b, tmp_inv_denum16); //Q27/Q15 = Q12
+ }
+
+ for (k=1; k<m; k++) {
+ a16[k] = (int16_t)(tmp32[k] >> 1); // Q12>>1 => Q11
+ }
+
+ tmp32[m] = WEBRTC_SPL_SAT(4092, tmp32[m], -4092);
+ k16[m - 1] = (int16_t)(tmp32[m] << 3); // Q12<<3 => Q15
+ }
+
+ return;
+}
+
+
+
+
+
+int16_t WebRtcSpl_LevinsonW32_JSK(
+ int32_t *R, /* (i) Autocorrelation of length >= order+1 */
+ int16_t *A, /* (o) A[0..order] LPC coefficients (Q11) */
+ int16_t *K, /* (o) K[0...order-1] Reflection coefficients (Q15) */
+ int16_t order /* (i) filter order */
+ ) {
+ int16_t i, j;
+ int16_t R_hi[LEVINSON_MAX_ORDER+1], R_low[LEVINSON_MAX_ORDER+1];
+ /* Aurocorr coefficients in high precision */
+ int16_t A_hi[LEVINSON_MAX_ORDER+1], A_low[LEVINSON_MAX_ORDER+1];
+ /* LPC coefficients in high precicion */
+ int16_t A_upd_hi[LEVINSON_MAX_ORDER+1], A_upd_low[LEVINSON_MAX_ORDER+1];
+ /* LPC coefficients for next iteration */
+ int16_t K_hi, K_low; /* reflection coefficient in high precision */
+ int16_t Alpha_hi, Alpha_low, Alpha_exp; /* Prediction gain Alpha in high precision
+ and with scale factor */
+ int16_t tmp_hi, tmp_low;
+ int32_t temp1W32, temp2W32, temp3W32;
+ int16_t norm;
+
+ /* Normalize the autocorrelation R[0]...R[order+1] */
+
+ norm = WebRtcSpl_NormW32(R[0]);
+
+ for (i=order;i>=0;i--) {
+ temp1W32 = R[i] << norm;
+ /* Put R in hi and low format */
+ R_hi[i] = (int16_t)(temp1W32 >> 16);
+ R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] << 16)) >> 1);
+ }
+
+ /* K = A[1] = -R[1] / R[0] */
+
+ temp2W32 = (R_hi[1] << 16) + (R_low[1] << 1); /* R[1] in Q31 */
+ temp3W32 = WEBRTC_SPL_ABS_W32(temp2W32); /* abs R[1] */
+ temp1W32 = WebRtcSpl_DivW32HiLow(temp3W32, R_hi[0], R_low[0]); /* abs(R[1])/R[0] in Q31 */
+ /* Put back the sign on R[1] */
+ if (temp2W32 > 0) {
+ temp1W32 = -temp1W32;
+ }
+
+ /* Put K in hi and low format */
+ K_hi = (int16_t)(temp1W32 >> 16);
+ K_low = (int16_t)((temp1W32 - ((int32_t)K_hi << 16)) >> 1);
+
+ /* Store first reflection coefficient */
+ K[0] = K_hi;
+
+ temp1W32 >>= 4; /* A[1] in Q27. */
+
+ /* Put A[1] in hi and low format */
+ A_hi[1] = (int16_t)(temp1W32 >> 16);
+ A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] << 16)) >> 1);
+
+ /* Alpha = R[0] * (1-K^2) */
+
+ temp1W32 = (((K_hi * K_low) >> 14) + K_hi * K_hi) << 1; /* = k^2 in Q31 */
+
+ temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); /* Guard against <0 */
+ temp1W32 = (int32_t)0x7fffffffL - temp1W32; /* temp1W32 = (1 - K[0]*K[0]) in Q31 */
+
+ /* Store temp1W32 = 1 - K[0]*K[0] on hi and low format */
+ tmp_hi = (int16_t)(temp1W32 >> 16);
+ tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+ /* Calculate Alpha in Q31 */
+ temp1W32 = (R_hi[0] * tmp_hi + ((R_hi[0] * tmp_low) >> 15) +
+ ((R_low[0] * tmp_hi) >> 15)) << 1;
+
+ /* Normalize Alpha and put it in hi and low format */
+
+ Alpha_exp = WebRtcSpl_NormW32(temp1W32);
+ temp1W32 <<= Alpha_exp;
+ Alpha_hi = (int16_t)(temp1W32 >> 16);
+ Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi<< 16)) >> 1);
+
+ /* Perform the iterative calculations in the
+ Levinson Durbin algorithm */
+
+ for (i=2; i<=order; i++)
+ {
+
+ /* ----
+ \
+ temp1W32 = R[i] + > R[j]*A[i-j]
+ /
+ ----
+ j=1..i-1
+ */
+
+ temp1W32 = 0;
+
+ for(j=1; j<i; j++) {
+ /* temp1W32 is in Q31 */
+ temp1W32 += ((R_hi[j] * A_hi[i - j]) << 1) +
+ ((((R_hi[j] * A_low[i - j]) >> 15) +
+ ((R_low[j] * A_hi[i - j]) >> 15)) << 1);
+ }
+
+ temp1W32 <<= 4;
+ temp1W32 += (R_hi[i] << 16) + (R_low[i] << 1);
+
+ /* K = -temp1W32 / Alpha */
+ temp2W32 = WEBRTC_SPL_ABS_W32(temp1W32); /* abs(temp1W32) */
+ temp3W32 = WebRtcSpl_DivW32HiLow(temp2W32, Alpha_hi, Alpha_low); /* abs(temp1W32)/Alpha */
+
+ /* Put the sign of temp1W32 back again */
+ if (temp1W32 > 0) {
+ temp3W32 = -temp3W32;
+ }
+
+ /* Use the Alpha shifts from earlier to denormalize */
+ norm = WebRtcSpl_NormW32(temp3W32);
+ if ((Alpha_exp <= norm)||(temp3W32==0)) {
+ temp3W32 <<= Alpha_exp;
+ } else {
+ if (temp3W32 > 0)
+ {
+ temp3W32 = (int32_t)0x7fffffffL;
+ } else
+ {
+ temp3W32 = (int32_t)0x80000000L;
+ }
+ }
+
+ /* Put K on hi and low format */
+ K_hi = (int16_t)(temp3W32 >> 16);
+ K_low = (int16_t)((temp3W32 - ((int32_t)K_hi << 16)) >> 1);
+
+ /* Store Reflection coefficient in Q15 */
+ K[i-1] = K_hi;
+
+ /* Test for unstable filter. If unstable return 0 and let the
+ user decide what to do in that case
+ */
+
+ if ((int32_t)WEBRTC_SPL_ABS_W16(K_hi) > (int32_t)32740) {
+ return(-i); /* Unstable filter */
+ }
+
+ /*
+ Compute updated LPC coefficient: Anew[i]
+ Anew[j]= A[j] + K*A[i-j] for j=1..i-1
+ Anew[i]= K
+ */
+
+ for(j=1; j<i; j++)
+ {
+ temp1W32 = (A_hi[j] << 16) + (A_low[j] << 1); // temp1W32 = A[j] in Q27
+
+ temp1W32 += (K_hi * A_hi[i - j] + ((K_hi * A_low[i - j]) >> 15) +
+ ((K_low * A_hi[i - j]) >> 15)) << 1; // temp1W32 += K*A[i-j] in Q27.
+
+ /* Put Anew in hi and low format */
+ A_upd_hi[j] = (int16_t)(temp1W32 >> 16);
+ A_upd_low[j] = (int16_t)((temp1W32 - ((int32_t)A_upd_hi[j] << 16)) >> 1);
+ }
+
+ temp3W32 >>= 4; /* temp3W32 = K in Q27 (Convert from Q31 to Q27) */
+
+ /* Store Anew in hi and low format */
+ A_upd_hi[i] = (int16_t)(temp3W32 >> 16);
+ A_upd_low[i] = (int16_t)((temp3W32 - ((int32_t)A_upd_hi[i] << 16)) >> 1);
+
+ /* Alpha = Alpha * (1-K^2) */
+
+ temp1W32 = (((K_hi * K_low) >> 14) + K_hi * K_hi) << 1; /* K*K in Q31 */
+
+ temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); /* Guard against <0 */
+ temp1W32 = (int32_t)0x7fffffffL - temp1W32; /* 1 - K*K in Q31 */
+
+ /* Convert 1- K^2 in hi and low format */
+ tmp_hi = (int16_t)(temp1W32 >> 16);
+ tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+ /* Calculate Alpha = Alpha * (1-K^2) in Q31 */
+ temp1W32 = (Alpha_hi * tmp_hi + ((Alpha_hi * tmp_low) >> 15) +
+ ((Alpha_low * tmp_hi) >> 15)) << 1;
+
+ /* Normalize Alpha and store it on hi and low format */
+
+ norm = WebRtcSpl_NormW32(temp1W32);
+ temp1W32 <<= norm;
+
+ Alpha_hi = (int16_t)(temp1W32 >> 16);
+ Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1);
+
+ /* Update the total nomalization of Alpha */
+ Alpha_exp = Alpha_exp + norm;
+
+ /* Update A[] */
+
+ for(j=1; j<=i; j++)
+ {
+ A_hi[j] =A_upd_hi[j];
+ A_low[j] =A_upd_low[j];
+ }
+ }
+
+ /*
+ Set A[0] to 1.0 and store the A[i] i=1...order in Q12
+ (Convert from Q27 and use rounding)
+ */
+
+ A[0] = 2048;
+
+ for(i=1; i<=order; i++) {
+ /* temp1W32 in Q27 */
+ temp1W32 = (A_hi[i] << 16) + (A_low[i] << 1);
+ /* Round and store upper word */
+ A[i] = (int16_t)((temp1W32 + 32768) >> 16);
+ }
+ return(1); /* Stable filters */
+}
+
+
+
+
+
+/* window */
+/* Matlab generation of floating point code:
+ * t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
+ * for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
+ * All values are multiplyed with 2^21 in fixed point code.
+ */
+static const int16_t kWindowAutocorr[WINLEN] = {
+ 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 5, 6,
+ 8, 10, 12, 14, 17, 20, 24, 28, 33, 38, 43, 49,
+ 56, 63, 71, 79, 88, 98, 108, 119, 131, 143, 157, 171,
+ 186, 202, 219, 237, 256, 275, 296, 318, 341, 365, 390, 416,
+ 444, 472, 502, 533, 566, 600, 635, 671, 709, 748, 789, 831,
+ 875, 920, 967, 1015, 1065, 1116, 1170, 1224, 1281, 1339, 1399, 1461,
+ 1525, 1590, 1657, 1726, 1797, 1870, 1945, 2021, 2100, 2181, 2263, 2348,
+ 2434, 2523, 2614, 2706, 2801, 2898, 2997, 3099, 3202, 3307, 3415, 3525,
+ 3637, 3751, 3867, 3986, 4106, 4229, 4354, 4481, 4611, 4742, 4876, 5012,
+ 5150, 5291, 5433, 5578, 5725, 5874, 6025, 6178, 6333, 6490, 6650, 6811,
+ 6974, 7140, 7307, 7476, 7647, 7820, 7995, 8171, 8349, 8529, 8711, 8894,
+ 9079, 9265, 9453, 9642, 9833, 10024, 10217, 10412, 10607, 10803, 11000, 11199,
+ 11398, 11597, 11797, 11998, 12200, 12401, 12603, 12805, 13008, 13210, 13412, 13614,
+ 13815, 14016, 14216, 14416, 14615, 14813, 15009, 15205, 15399, 15591, 15782, 15971,
+ 16157, 16342, 16524, 16704, 16881, 17056, 17227, 17395, 17559, 17720, 17877, 18030,
+ 18179, 18323, 18462, 18597, 18727, 18851, 18970, 19082, 19189, 19290, 19384, 19471,
+ 19551, 19623, 19689, 19746, 19795, 19835, 19867, 19890, 19904, 19908, 19902, 19886,
+ 19860, 19823, 19775, 19715, 19644, 19561, 19465, 19357, 19237, 19102, 18955, 18793,
+ 18618, 18428, 18223, 18004, 17769, 17518, 17252, 16970, 16672, 16357, 16025, 15677,
+ 15311, 14929, 14529, 14111, 13677, 13225, 12755, 12268, 11764, 11243, 10706, 10152,
+ 9583, 8998, 8399, 7787, 7162, 6527, 5883, 5231, 4576, 3919, 3265, 2620,
+ 1990, 1386, 825, 333
+};
+
+
+/* By using a hearing threshold level in dB of -28 dB (higher value gives more noise),
+ the H_T_H (in float) can be calculated as:
+ H_T_H = pow(10.0, 0.05 * (-28.0)) = 0.039810717055350
+ In Q19, H_T_H becomes round(0.039810717055350*2^19) ~= 20872, i.e.
+ H_T_H = 20872/524288.0, and H_T_HQ19 = 20872;
+*/
+
+
+/* The bandwidth expansion vectors are created from:
+ kPolyVecLo=[0.900000,0.810000,0.729000,0.656100,0.590490,0.531441,0.478297,0.430467,0.387420,0.348678,0.313811,0.282430];
+ kPolyVecHi=[0.800000,0.640000,0.512000,0.409600,0.327680,0.262144];
+ round(kPolyVecLo*32768)
+ round(kPolyVecHi*32768)
+*/
+static const int16_t kPolyVecLo[12] = {
+ 29491, 26542, 23888, 21499, 19349, 17414, 15673, 14106, 12695, 11425, 10283, 9255
+};
+static const int16_t kPolyVecHi[6] = {
+ 26214, 20972, 16777, 13422, 10737, 8590
+};
+
+static __inline int32_t log2_Q8_LPC( uint32_t x ) {
+
+ int32_t zeros;
+ int16_t frac;
+
+ zeros=WebRtcSpl_NormU32(x);
+ frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
+
+ /* log2(x) */
+ return ((31 - zeros) << 8) + frac;
+}
+
+static const int16_t kMulPitchGain = -25; /* 200/256 in Q5 */
+static const int16_t kChngFactor = 3523; /* log10(2)*10/4*0.4/1.4=log10(2)/1.4= 0.2150 in Q14 */
+static const int16_t kExp2 = 11819; /* 1/log(2) */
+const int kShiftLowerBand = 11; /* Shift value for lower band in Q domain. */
+const int kShiftHigherBand = 12; /* Shift value for higher band in Q domain. */
+
+void WebRtcIsacfix_GetVars(const int16_t *input, const int16_t *pitchGains_Q12,
+ uint32_t *oldEnergy, int16_t *varscale)
+{
+ int k;
+ uint32_t nrgQ[4];
+ int16_t nrgQlog[4];
+ int16_t tmp16, chng1, chng2, chng3, chng4, tmp, chngQ, oldNrgQlog, pgQ, pg3;
+ int32_t expPg32;
+ int16_t expPg, divVal;
+ int16_t tmp16_1, tmp16_2;
+
+ /* Calculate energies of first and second frame halfs */
+ nrgQ[0]=0;
+ for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES/4 + QLOOKAHEAD) / 2; k++) {
+ nrgQ[0] += (uint32_t)(input[k] * input[k]);
+ }
+ nrgQ[1]=0;
+ for ( ; k < (FRAMESAMPLES/2 + QLOOKAHEAD) / 2; k++) {
+ nrgQ[1] += (uint32_t)(input[k] * input[k]);
+ }
+ nrgQ[2]=0;
+ for ( ; k < (FRAMESAMPLES * 3 / 4 + QLOOKAHEAD) / 2; k++) {
+ nrgQ[2] += (uint32_t)(input[k] * input[k]);
+ }
+ nrgQ[3]=0;
+ for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
+ nrgQ[3] += (uint32_t)(input[k] * input[k]);
+ }
+
+ for ( k=0; k<4; k++) {
+ nrgQlog[k] = (int16_t)log2_Q8_LPC(nrgQ[k]); /* log2(nrgQ) */
+ }
+ oldNrgQlog = (int16_t)log2_Q8_LPC(*oldEnergy);
+
+ /* Calculate average level change */
+ chng1 = WEBRTC_SPL_ABS_W16(nrgQlog[3]-nrgQlog[2]);
+ chng2 = WEBRTC_SPL_ABS_W16(nrgQlog[2]-nrgQlog[1]);
+ chng3 = WEBRTC_SPL_ABS_W16(nrgQlog[1]-nrgQlog[0]);
+ chng4 = WEBRTC_SPL_ABS_W16(nrgQlog[0]-oldNrgQlog);
+ tmp = chng1+chng2+chng3+chng4;
+ chngQ = (int16_t)(tmp * kChngFactor >> 10); /* Q12 */
+ chngQ += 2926; /* + 1.0/1.4 in Q12 */
+
+ /* Find average pitch gain */
+ pgQ = 0;
+ for (k=0; k<4; k++)
+ {
+ pgQ += pitchGains_Q12[k];
+ }
+
+ pg3 = (int16_t)(pgQ * pgQ >> 11); // pgQ in Q(12+2)=Q14. Q14*Q14>>11 => Q17
+ pg3 = (int16_t)(pgQ * pg3 >> 13); /* Q14*Q17>>13 =>Q18 */
+ /* kMulPitchGain = -25 = -200 in Q-3. */
+ pg3 = (int16_t)(pg3 * kMulPitchGain >> 5); // Q10
+ tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,pg3,13);/* Q13*Q10>>13 => Q10*/
+ if (tmp16<0) {
+ tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
+ tmp16_1 = ((uint16_t)(tmp16 ^ 0xFFFF) >> 10) - 3; /* Gives result in Q14 */
+ if (tmp16_1<0)
+ expPg = -(tmp16_2 << -tmp16_1);
+ else
+ expPg = -(tmp16_2 >> tmp16_1);
+ } else
+ expPg = (int16_t) -16384; /* 1 in Q14, since 2^0=1 */
+
+ expPg32 = (int32_t)expPg << 8; /* Q22 */
+ divVal = WebRtcSpl_DivW32W16ResW16(expPg32, chngQ); /* Q22/Q12=Q10 */
+
+ tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,divVal,13);/* Q13*Q10>>13 => Q10*/
+ if (tmp16<0) {
+ tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
+ tmp16_1 = ((uint16_t)(tmp16 ^ 0xFFFF) >> 10) - 3; /* Gives result in Q14 */
+ if (tmp16_1<0)
+ expPg = tmp16_2 << -tmp16_1;
+ else
+ expPg = tmp16_2 >> tmp16_1;
+ } else
+ expPg = (int16_t) 16384; /* 1 in Q14, since 2^0=1 */
+
+ *varscale = expPg-1;
+ *oldEnergy = nrgQ[3];
+}
+
+
+
+static __inline int16_t exp2_Q10_T(int16_t x) { // Both in and out in Q10
+
+ int16_t tmp16_1, tmp16_2;
+
+ tmp16_2=(int16_t)(0x0400|(x&0x03FF));
+ tmp16_1 = -(x >> 10);
+ if(tmp16_1>0)
+ return tmp16_2 >> tmp16_1;
+ else
+ return tmp16_2 << -tmp16_1;
+
+}
+
+
+// Declare function pointers.
+AutocorrFix WebRtcIsacfix_AutocorrFix;
+CalculateResidualEnergy WebRtcIsacfix_CalculateResidualEnergy;
+
+/* This routine calculates the residual energy for LPC.
+ * Formula as shown in comments inside.
+ */
+int32_t WebRtcIsacfix_CalculateResidualEnergyC(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy) {
+ int i = 0, j = 0;
+ int shift_internal = 0, shift_norm = 0;
+ int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
+ int64_t sum64 = 0, sum64_tmp = 0;
+
+ for (i = 0; i <= lpc_order; i++) {
+ for (j = i; j <= lpc_order; j++) {
+ /* For the case of i == 0: residual_energy +=
+ * a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
+ * For the case of i != 0: residual_energy +=
+ * a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
+ */
+
+ tmp32 = a_polynomial[j] * a_polynomial[j - i];
+ /* tmp32 in Q(q_val_polynomial * 2). */
+ if (i != 0) {
+ tmp32 <<= 1;
+ }
+ sum64_tmp = (int64_t)tmp32 * (int64_t)corr_coeffs[i];
+ sum64_tmp >>= shift_internal;
+
+ /* Test overflow and sum the result. */
+ if(((sum64_tmp > 0 && sum64 > 0) && (LLONG_MAX - sum64 < sum64_tmp)) ||
+ ((sum64_tmp < 0 && sum64 < 0) && (LLONG_MIN - sum64 > sum64_tmp))) {
+ /* Shift right for overflow. */
+ shift_internal += 1;
+ sum64 >>= 1;
+ sum64 += sum64_tmp >> 1;
+ } else {
+ sum64 += sum64_tmp;
+ }
+ }
+ }
+
+ word32_high = (int32_t)(sum64 >> 32);
+ word32_low = (int32_t)sum64;
+
+ // Calculate the value of shifting (shift_norm) for the 64-bit sum.
+ if(word32_high != 0) {
+ shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
+ residual_energy = (int32_t)(sum64 >> shift_norm);
+ } else {
+ if((word32_low & 0x80000000) != 0) {
+ shift_norm = 1;
+ residual_energy = (uint32_t)word32_low >> 1;
+ } else {
+ shift_norm = WebRtcSpl_NormW32(word32_low);
+ residual_energy = word32_low << shift_norm;
+ shift_norm = -shift_norm;
+ }
+ }
+
+ /* Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
+ * = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
+ */
+ *q_val_residual_energy = q_val_corr - shift_internal - shift_norm
+ + q_val_polynomial * 2;
+
+ return residual_energy;
+}
+
+void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
+ int16_t *inHiQ0,
+ MaskFiltstr_enc *maskdata,
+ int16_t snrQ10,
+ const int16_t *pitchGains_Q12,
+ int32_t *gain_lo_hiQ17,
+ int16_t *lo_coeffQ15,
+ int16_t *hi_coeffQ15)
+{
+ int k, n, ii;
+ int pos1, pos2;
+ int sh_lo, sh_hi, sh, ssh, shMem;
+ int16_t varscaleQ14;
+
+ int16_t tmpQQlo, tmpQQhi;
+ int32_t tmp32;
+ int16_t tmp16,tmp16b;
+
+ int16_t polyHI[ORDERHI+1];
+ int16_t rcQ15_lo[ORDERLO], rcQ15_hi[ORDERHI];
+
+
+ int16_t DataLoQ6[WINLEN], DataHiQ6[WINLEN];
+ int32_t corrloQQ[ORDERLO+2];
+ int32_t corrhiQQ[ORDERHI+1];
+ int32_t corrlo2QQ[ORDERLO+1];
+ int16_t scale;
+ int16_t QdomLO, QdomHI, newQdomHI, newQdomLO;
+
+ int32_t res_nrgQQ;
+ int32_t sqrt_nrg;
+
+ /* less-noise-at-low-frequencies factor */
+ int16_t aaQ14;
+
+ /* Multiplication with 1/sqrt(12) ~= 0.28901734104046 can be done by convertion to
+ Q15, i.e. round(0.28901734104046*32768) = 9471, and use 9471/32768.0 ~= 0.289032
+ */
+ int16_t snrq;
+ int shft;
+
+ int16_t tmp16a;
+ int32_t tmp32a, tmp32b, tmp32c;
+
+ int16_t a_LOQ11[ORDERLO+1];
+ int16_t k_vecloQ15[ORDERLO];
+ int16_t a_HIQ12[ORDERHI+1];
+ int16_t k_vechiQ15[ORDERHI];
+
+ int16_t stab;
+
+ snrq=snrQ10;
+
+ /* SNR= C * 2 ^ (D * snrq) ; C=0.289, D=0.05*log2(10)=0.166 (~=172 in Q10)*/
+ tmp16 = (int16_t)(snrq * 172 >> 10); // Q10
+ tmp16b = exp2_Q10_T(tmp16); // Q10
+ snrq = (int16_t)(tmp16b * 285 >> 10); // Q10
+
+ /* change quallevel depending on pitch gains and level fluctuations */
+ WebRtcIsacfix_GetVars(inLoQ0, pitchGains_Q12, &(maskdata->OldEnergy), &varscaleQ14);
+
+ /* less-noise-at-low-frequencies factor */
+ /* Calculation of 0.35 * (0.5 + 0.5 * varscale) in fixpoint:
+ With 0.35 in Q16 (0.35 ~= 22938/65536.0 = 0.3500061) and varscaleQ14 in Q14,
+ we get Q16*Q14>>16 = Q14
+ */
+ aaQ14 = (int16_t)((22938 * (8192 + (varscaleQ14 >> 1)) + 32768) >> 16);
+
+ /* Calculate tmp = (1.0 + aa*aa); in Q12 */
+ tmp16 = (int16_t)(aaQ14 * aaQ14 >> 15); // Q14*Q14>>15 = Q13
+ tmpQQlo = 4096 + (tmp16 >> 1); // Q12 + Q13>>1 = Q12.
+
+ /* Calculate tmp = (1.0+aa) * (1.0+aa); */
+ tmp16 = 8192 + (aaQ14 >> 1); // 1+a in Q13.
+ tmpQQhi = (int16_t)(tmp16 * tmp16 >> 14); // Q13*Q13>>14 = Q12
+
+ /* replace data in buffer by new look-ahead data */
+ for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++) {
+ maskdata->DataBufferLoQ0[pos1 + WINLEN - QLOOKAHEAD] = inLoQ0[pos1];
+ }
+
+ for (k = 0; k < SUBFRAMES; k++) {
+
+ /* Update input buffer and multiply signal with window */
+ for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
+ maskdata->DataBufferLoQ0[pos1] = maskdata->DataBufferLoQ0[pos1 + UPDATE/2];
+ maskdata->DataBufferHiQ0[pos1] = maskdata->DataBufferHiQ0[pos1 + UPDATE/2];
+ DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
+ kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
+ DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
+ kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
+ }
+ pos2 = (int16_t)(k * UPDATE / 2);
+ for (n = 0; n < UPDATE/2; n++, pos1++) {
+ maskdata->DataBufferLoQ0[pos1] = inLoQ0[QLOOKAHEAD + pos2];
+ maskdata->DataBufferHiQ0[pos1] = inHiQ0[pos2++];
+ DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
+ kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
+ DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
+ kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
+ }
+
+ /* Get correlation coefficients */
+ /* The highest absolute value measured inside DataLo in the test set
+ For DataHi, corresponding value was 160.
+
+ This means that it should be possible to represent the input values
+ to WebRtcSpl_AutoCorrelation() as Q6 values (since 307*2^6 =
+ 19648). Of course, Q0 will also work, but due to the low energy in
+ DataLo and DataHi, the outputted autocorrelation will be more accurate
+ and mimic the floating point code better, by being in an high as possible
+ Q-domain.
+ */
+
+ WebRtcIsacfix_AutocorrFix(corrloQQ,DataLoQ6,WINLEN, ORDERLO+1, &scale);
+ QdomLO = 12-scale; // QdomLO is the Q-domain of corrloQQ
+ sh_lo = WebRtcSpl_NormW32(corrloQQ[0]);
+ QdomLO += sh_lo;
+ for (ii=0; ii<ORDERLO+2; ii++) {
+ corrloQQ[ii] <<= sh_lo;
+ }
+ /* It is investigated whether it was possible to use 16 bits for the
+ 32-bit vector corrloQQ, but it didn't work. */
+
+ WebRtcIsacfix_AutocorrFix(corrhiQQ,DataHiQ6,WINLEN, ORDERHI, &scale);
+
+ QdomHI = 12-scale; // QdomHI is the Q-domain of corrhiQQ
+ sh_hi = WebRtcSpl_NormW32(corrhiQQ[0]);
+ QdomHI += sh_hi;
+ for (ii=0; ii<ORDERHI+1; ii++) {
+ corrhiQQ[ii] <<= sh_hi;
+ }
+
+ /* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
+
+ /* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/
+ // `corrlo2QQ` in Q(QdomLO-5).
+ corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) -
+ (WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2);
+
+ /* Calculate corrlo2[n] = tmpQQlo * corrlo[n] - tmpQQlo * (corrlo[n-1] + corrlo[n+1]);*/
+ for (n = 1; n <= ORDERLO; n++) {
+
+ tmp32 = (corrloQQ[n - 1] >> 1) + (corrloQQ[n + 1] >> 1); // Q(QdomLO-1).
+ corrlo2QQ[n] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[n]) >> 1) -
+ (WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, tmp32) >> 2);
+ }
+ QdomLO -= 5;
+
+ /* Calculate corrhi[n] = tmpQQhi * corrhi[n]; */
+ for (n = 0; n <= ORDERHI; n++) {
+ corrhiQQ[n] = WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQhi, corrhiQQ[n]); // Q(12+QdomHI-16) = Q(QdomHI-4)
+ }
+ QdomHI -= 4;
+
+ /* add white noise floor */
+ /* corrlo2QQ is in Q(QdomLO) and corrhiQQ is in Q(QdomHI) */
+ /* Calculate corrlo2[0] += 9.5367431640625e-7; and
+ corrhi[0] += 9.5367431640625e-7, where the constant is 1/2^20 */
+
+ tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t) 1, QdomLO-20);
+ corrlo2QQ[0] += tmp32;
+ tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t) 1, QdomHI-20);
+ corrhiQQ[0] += tmp32;
+
+ /* corrlo2QQ is in Q(QdomLO) and corrhiQQ is in Q(QdomHI) before the following
+ code segment, where we want to make sure we get a 1-bit margin */
+ for (n = 0; n <= ORDERLO; n++) {
+ corrlo2QQ[n] >>= 1; // Make sure we have a 1-bit margin.
+ }
+ QdomLO -= 1; // Now, corrlo2QQ is in Q(QdomLO), with a 1-bit margin
+
+ for (n = 0; n <= ORDERHI; n++) {
+ corrhiQQ[n] >>= 1; // Make sure we have a 1-bit margin.
+ }
+ QdomHI -= 1; // Now, corrhiQQ is in Q(QdomHI), with a 1-bit margin
+
+
+ newQdomLO = QdomLO;
+
+ for (n = 0; n <= ORDERLO; n++) {
+ int32_t tmp, tmpB, tmpCorr;
+ int16_t alpha=328; //0.01 in Q15
+ int16_t beta=324; //(1-0.01)*0.01=0.0099 in Q15
+ int16_t gamma=32440; //(1-0.01)=0.99 in Q15
+
+ if (maskdata->CorrBufLoQQ[n] != 0) {
+ shMem=WebRtcSpl_NormW32(maskdata->CorrBufLoQQ[n]);
+ sh = QdomLO - maskdata->CorrBufLoQdom[n];
+ if (sh<=shMem) {
+ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], sh); // Get CorrBufLoQQ to same domain as corrlo2
+ tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
+ } else if ((sh-shMem)<7){
+ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible
+ // Shift `alpha` the number of times required to get `tmp` in QdomLO.
+ tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
+ } else {
+ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
+ // Shift `alpha` as much as possible without overflow the number of
+ // times required to get `tmp` in QdomLO.
+ tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
+ tmpCorr = corrloQQ[n] >> (sh - shMem - 6);
+ tmp = tmp + tmpCorr;
+ maskdata->CorrBufLoQQ[n] = tmp;
+ newQdomLO = QdomLO-(sh-shMem-6);
+ maskdata->CorrBufLoQdom[n] = newQdomLO;
+ }
+ } else
+ tmp = 0;
+
+ tmp = tmp + corrlo2QQ[n];
+
+ maskdata->CorrBufLoQQ[n] = tmp;
+ maskdata->CorrBufLoQdom[n] = QdomLO;
+
+ tmp=WEBRTC_SPL_MUL_16_32_RSFT15(beta, tmp);
+ tmpB=WEBRTC_SPL_MUL_16_32_RSFT15(gamma, corrlo2QQ[n]);
+ corrlo2QQ[n] = tmp + tmpB;
+ }
+ if( newQdomLO!=QdomLO) {
+ for (n = 0; n <= ORDERLO; n++) {
+ if (maskdata->CorrBufLoQdom[n] != newQdomLO)
+ corrloQQ[n] >>= maskdata->CorrBufLoQdom[n] - newQdomLO;
+ }
+ QdomLO = newQdomLO;
+ }
+
+
+ newQdomHI = QdomHI;
+
+ for (n = 0; n <= ORDERHI; n++) {
+ int32_t tmp, tmpB, tmpCorr;
+ int16_t alpha=328; //0.01 in Q15
+ int16_t beta=324; //(1-0.01)*0.01=0.0099 in Q15
+ int16_t gamma=32440; //(1-0.01)=0.99 in Q1
+ if (maskdata->CorrBufHiQQ[n] != 0) {
+ shMem=WebRtcSpl_NormW32(maskdata->CorrBufHiQQ[n]);
+ sh = QdomHI - maskdata->CorrBufHiQdom[n];
+ if (sh<=shMem) {
+ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], sh); // Get CorrBufHiQQ to same domain as corrhi
+ tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
+ tmpCorr = corrhiQQ[n];
+ tmp = tmp + tmpCorr;
+ maskdata->CorrBufHiQQ[n] = tmp;
+ maskdata->CorrBufHiQdom[n] = QdomHI;
+ } else if ((sh-shMem)<7) {
+ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
+ // Shift `alpha` the number of times required to get `tmp` in QdomHI.
+ tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
+ tmpCorr = corrhiQQ[n];
+ tmp = tmp + tmpCorr;
+ maskdata->CorrBufHiQQ[n] = tmp;
+ maskdata->CorrBufHiQdom[n] = QdomHI;
+ } else {
+ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
+ // Shift `alpha` as much as possible without overflow the number of
+ // times required to get `tmp` in QdomHI.
+ tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
+ tmpCorr = corrhiQQ[n] >> (sh - shMem - 6);
+ tmp = tmp + tmpCorr;
+ maskdata->CorrBufHiQQ[n] = tmp;
+ newQdomHI = QdomHI-(sh-shMem-6);
+ maskdata->CorrBufHiQdom[n] = newQdomHI;
+ }
+ } else {
+ tmp = corrhiQQ[n];
+ tmpCorr = tmp;
+ maskdata->CorrBufHiQQ[n] = tmp;
+ maskdata->CorrBufHiQdom[n] = QdomHI;
+ }
+
+ tmp=WEBRTC_SPL_MUL_16_32_RSFT15(beta, tmp);
+ tmpB=WEBRTC_SPL_MUL_16_32_RSFT15(gamma, tmpCorr);
+ corrhiQQ[n] = tmp + tmpB;
+ }
+
+ if( newQdomHI!=QdomHI) {
+ for (n = 0; n <= ORDERHI; n++) {
+ if (maskdata->CorrBufHiQdom[n] != newQdomHI)
+ corrhiQQ[n] >>= maskdata->CorrBufHiQdom[n] - newQdomHI;
+ }
+ QdomHI = newQdomHI;
+ }
+
+ stab=WebRtcSpl_LevinsonW32_JSK(corrlo2QQ, a_LOQ11, k_vecloQ15, ORDERLO);
+
+ if (stab<0) { // If unstable use lower order
+ a_LOQ11[0]=2048;
+ for (n = 1; n <= ORDERLO; n++) {
+ a_LOQ11[n]=0;
+ }
+
+ stab=WebRtcSpl_LevinsonW32_JSK(corrlo2QQ, a_LOQ11, k_vecloQ15, 8);
+ }
+
+
+ WebRtcSpl_LevinsonDurbin(corrhiQQ, a_HIQ12, k_vechiQ15, ORDERHI);
+
+ /* bandwidth expansion */
+ for (n = 1; n <= ORDERLO; n++) {
+ a_LOQ11[n] = (int16_t)((kPolyVecLo[n - 1] * a_LOQ11[n] + (1 << 14)) >>
+ 15);
+ }
+
+
+ polyHI[0] = a_HIQ12[0];
+ for (n = 1; n <= ORDERHI; n++) {
+ a_HIQ12[n] = (int16_t)(((int32_t)(kPolyVecHi[n - 1] * a_HIQ12[n]) +
+ (1 << 14)) >> 15);
+ polyHI[n] = a_HIQ12[n];
+ }
+
+ /* Normalize the corrlo2 vector */
+ sh = WebRtcSpl_NormW32(corrlo2QQ[0]);
+ for (n = 0; n <= ORDERLO; n++) {
+ corrlo2QQ[n] <<= sh;
+ }
+ QdomLO += sh; /* Now, corrlo2QQ is still in Q(QdomLO) */
+
+
+ /* residual energy */
+
+ sh_lo = 31;
+ res_nrgQQ = WebRtcIsacfix_CalculateResidualEnergy(ORDERLO, QdomLO,
+ kShiftLowerBand, a_LOQ11, corrlo2QQ, &sh_lo);
+
+ /* Convert to reflection coefficients */
+ WebRtcSpl_AToK_JSK(a_LOQ11, ORDERLO, rcQ15_lo);
+
+ if (sh_lo & 0x0001) {
+ res_nrgQQ >>= 1;
+ sh_lo-=1;
+ }
+
+
+ if( res_nrgQQ > 0 )
+ {
+ sqrt_nrg=WebRtcSpl_Sqrt(res_nrgQQ);
+
+ /* add hearing threshold and compute the gain */
+ /* lo_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
+
+ tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
+ ssh = sh_lo >> 1; // sqrt_nrg is in Qssh.
+ sh = ssh - 14;
+ tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
+ tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)
+ tmp32a = varscaleQ14 * snrq; // Q24 (numerator)
+
+ sh = WebRtcSpl_NormW32(tmp32c);
+ shft = 16 - sh;
+ tmp16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32c, -shft); // Q(ssh-shft) (denominator)
+
+ tmp32b = WebRtcSpl_DivW32W16(tmp32a, tmp16a); // Q(24-ssh+shft)
+ sh = ssh-shft-7;
+ *gain_lo_hiQ17 = WEBRTC_SPL_SHIFT_W32(tmp32b, sh); // Gains in Q17
+ }
+ else
+ {
+ *gain_lo_hiQ17 = 100; // Gains in Q17
+ }
+ gain_lo_hiQ17++;
+
+ /* copy coefficients to output array */
+ for (n = 0; n < ORDERLO; n++) {
+ *lo_coeffQ15 = (int16_t) (rcQ15_lo[n]);
+ lo_coeffQ15++;
+ }
+ /* residual energy */
+ sh_hi = 31;
+ res_nrgQQ = WebRtcIsacfix_CalculateResidualEnergy(ORDERHI, QdomHI,
+ kShiftHigherBand, a_HIQ12, corrhiQQ, &sh_hi);
+
+ /* Convert to reflection coefficients */
+ WebRtcSpl_LpcToReflCoef(polyHI, ORDERHI, rcQ15_hi);
+
+ if (sh_hi & 0x0001) {
+ res_nrgQQ >>= 1;
+ sh_hi-=1;
+ }
+
+
+ if( res_nrgQQ > 0 )
+ {
+ sqrt_nrg=WebRtcSpl_Sqrt(res_nrgQQ);
+
+
+ /* add hearing threshold and compute the gain */
+ /* hi_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
+
+ tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
+
+ ssh = sh_hi >> 1; // `sqrt_nrg` is in Qssh.
+ sh = ssh - 14;
+ tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
+ tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)
+ tmp32a = varscaleQ14 * snrq; // Q24 (numerator)
+
+ sh = WebRtcSpl_NormW32(tmp32c);
+ shft = 16 - sh;
+ tmp16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32c, -shft); // Q(ssh-shft) (denominator)
+
+ tmp32b = WebRtcSpl_DivW32W16(tmp32a, tmp16a); // Q(24-ssh+shft)
+ sh = ssh-shft-7;
+ *gain_lo_hiQ17 = WEBRTC_SPL_SHIFT_W32(tmp32b, sh); // Gains in Q17
+ }
+ else
+ {
+ *gain_lo_hiQ17 = 100; // Gains in Q17
+ }
+ gain_lo_hiQ17++;
+
+
+ /* copy coefficients to output array */
+ for (n = 0; n < ORDERHI; n++) {
+ *hi_coeffQ15 = rcQ15_hi[n];
+ hi_coeffQ15++;
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
new file mode 100644
index 0000000000..40a99e8a77
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_masking_model.h
+ *
+ * LPC functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+void WebRtcIsacfix_GetVars(const int16_t* input,
+ const int16_t* pitchGains_Q12,
+ uint32_t* oldEnergy,
+ int16_t* varscale);
+
+void WebRtcIsacfix_GetLpcCoef(int16_t* inLoQ0,
+ int16_t* inHiQ0,
+ MaskFiltstr_enc* maskdata,
+ int16_t snrQ10,
+ const int16_t* pitchGains_Q12,
+ int32_t* gain_lo_hiQ17,
+ int16_t* lo_coeffQ15,
+ int16_t* hi_coeffQ15);
+
+typedef int32_t (*CalculateResidualEnergy)(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy);
+extern CalculateResidualEnergy WebRtcIsacfix_CalculateResidualEnergy;
+
+int32_t WebRtcIsacfix_CalculateResidualEnergyC(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy);
+
+#if defined(MIPS_DSP_R2_LE)
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy);
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
new file mode 100644
index 0000000000..727008da32
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+
+// MIPS DSPR2 optimization for function WebRtcIsacfix_CalculateResidualEnergy
+// Bit-exact with WebRtcIsacfix_CalculateResidualEnergyC from file
+// lpc_masking_model.c
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy) {
+
+ int i = 0, j = 0;
+ int shift_internal = 0, shift_norm = 0;
+ int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
+ int32_t tmp_corr_c = corr_coeffs[0];
+ int16_t* tmp_a_poly = &a_polynomial[0];
+ int32_t sum64_hi = 0;
+ int32_t sum64_lo = 0;
+
+ for (j = 0; j <= lpc_order; j++) {
+ // For the case of i == 0:
+ // residual_energy +=
+ // a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
+
+ int32_t tmp2, tmp3;
+ int16_t sign_1;
+ int16_t sign_2;
+ int16_t sign_3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp2], 0(%[tmp_a_poly]) \n\t"
+ "mul %[tmp32], %[tmp2], %[tmp2] \n\t"
+ "addiu %[tmp_a_poly], %[tmp_a_poly], 2 \n\t"
+ "sra %[sign_2], %[sum64_hi], 31 \n\t"
+ "mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
+ "shilov $ac0, %[shift_internal] \n\t"
+ "mfhi %[tmp2], $ac0 \n\t"
+ "mflo %[tmp3], $ac0 \n\t"
+ "sra %[sign_1], %[tmp2], 31 \n\t"
+ "xor %[sign_3], %[sign_1], %[sign_2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+ [tmp_a_poly] "+r" (tmp_a_poly), [sign_1] "=&r" (sign_1),
+ [sign_3] "=&r" (sign_3), [sign_2] "=&r" (sign_2),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+ : "hi", "lo", "memory"
+ );
+
+ if (sign_3 != 0) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+ : "hi", "lo", "memory"
+ );
+ } else {
+ if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+ ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+ // Shift right for overflow.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift_internal], %[shift_internal], 1 \n\t"
+ "prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
+ "sra %[sum64_hi], %[sum64_hi], 1 \n\t"
+ "prepend %[tmp3], %[tmp2], 1 \n\t"
+ "sra %[tmp2], %[tmp2], 1 \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [shift_internal] "+r" (shift_internal),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ } else {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+ : "hi", "lo", "memory"
+ );
+ }
+ }
+ }
+
+ for (i = 1; i <= lpc_order; i++) {
+ tmp_corr_c = corr_coeffs[i];
+ int16_t* tmp_a_poly_j = &a_polynomial[i];
+ int16_t* tmp_a_poly_j_i = &a_polynomial[0];
+ for (j = i; j <= lpc_order; j++) {
+ // For the case of i = 1 .. lpc_order:
+ // residual_energy +=
+ // a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
+
+ int32_t tmp2, tmp3;
+ int16_t sign_1;
+ int16_t sign_2;
+ int16_t sign_3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp3], 0(%[tmp_a_poly_j]) \n\t"
+ "lh %[tmp2], 0(%[tmp_a_poly_j_i]) \n\t"
+ "addiu %[tmp_a_poly_j], %[tmp_a_poly_j], 2 \n\t"
+ "addiu %[tmp_a_poly_j_i], %[tmp_a_poly_j_i], 2 \n\t"
+ "mul %[tmp32], %[tmp3], %[tmp2] \n\t"
+ "sll %[tmp32], %[tmp32], 1 \n\t"
+ "mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
+ "shilov $ac0, %[shift_internal] \n\t"
+ "mfhi %[tmp2], $ac0 \n\t"
+ "mflo %[tmp3], $ac0 \n\t"
+ "sra %[sign_1], %[tmp2], 31 \n\t"
+ "sra %[sign_2], %[sum64_hi], 31 \n\t"
+ "xor %[sign_3], %[sign_1], %[sign_2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+ [tmp_a_poly_j] "+r" (tmp_a_poly_j), [sign_1] "=&r" (sign_1),
+ [tmp_a_poly_j_i] "+r" (tmp_a_poly_j_i), [sign_2] "=&r" (sign_2),
+ [sign_3] "=&r" (sign_3), [sum64_hi] "+r" (sum64_hi),
+ [sum64_lo] "+r" (sum64_lo)
+ : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+ : "hi", "lo", "memory"
+ );
+ if (sign_3 != 0) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi),
+ [sum64_lo] "+r" (sum64_lo)
+ :
+ :"memory"
+ );
+ } else {
+ // Test overflow and sum the result.
+ if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+ ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+ // Shift right for overflow.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift_internal], %[shift_internal], 1 \n\t"
+ "prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
+ "sra %[sum64_hi], %[sum64_hi], 1 \n\t"
+ "prepend %[tmp3], %[tmp2], 1 \n\t"
+ "sra %[tmp2], %[tmp2], 1 \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [shift_internal] "+r" (shift_internal),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ } else {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ }
+ }
+ }
+ }
+ word32_high = sum64_hi;
+ word32_low = sum64_lo;
+
+ // Calculate the value of shifting (shift_norm) for the 64-bit sum.
+ if (word32_high != 0) {
+ shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
+ int tmp1;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "srl %[residual_energy], %[sum64_lo], %[shift_norm] \n\t"
+ "li %[tmp1], 32 \n\t"
+ "subu %[tmp1], %[tmp1], %[shift_norm] \n\t"
+ "sll %[tmp1], %[sum64_hi], %[tmp1] \n\t"
+ "or %[residual_energy], %[residual_energy], %[tmp1] \n\t"
+ ".set pop \n\t"
+ : [residual_energy] "=&r" (residual_energy), [tmp1]"=&r"(tmp1),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [shift_norm] "r" (shift_norm)
+ : "memory"
+ );
+ } else {
+ if ((word32_low & 0x80000000) != 0) {
+ shift_norm = 1;
+ residual_energy = (uint32_t)word32_low >> 1;
+ } else {
+ shift_norm = WebRtcSpl_NormW32(word32_low);
+ residual_energy = word32_low << shift_norm;
+ shift_norm = -shift_norm;
+ }
+ }
+
+ // Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
+ // = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
+ *q_val_residual_energy =
+ q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2;
+
+ return residual_energy;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc
new file mode 100644
index 0000000000..82793f1344
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+class LpcMaskingModelTest : public ::testing::Test {
+ protected:
+ // Pass a function pointer to the Tester function.
+ void CalculateResidualEnergyTester(
+ CalculateResidualEnergy CalculateResidualEnergyFunction) {
+ const int kIntOrder = 10;
+ const int32_t kInt32QDomain = 5;
+ const int kIntShift = 11;
+ int16_t a[kIntOrder + 1] = {32760, 122, 7, 0, -32760, -3958,
+ -48, 18745, 498, 9, 23456};
+ int32_t corr[kIntOrder + 1] = {11443647, -27495, 0, 98745, -11443600, 1,
+ 1, 498, 9, 888, 23456};
+ int q_shift_residual = 0;
+ int32_t residual_energy = 0;
+
+ // Test the code path where (residual_energy >= 0x10000).
+ residual_energy = CalculateResidualEnergyFunction(
+ kIntOrder, kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
+ EXPECT_EQ(1789023310, residual_energy);
+ EXPECT_EQ(2, q_shift_residual);
+
+ // Test the code path where (residual_energy < 0x10000)
+ // and ((energy & 0x8000) != 0).
+ for (int i = 0; i < kIntOrder + 1; i++) {
+ a[i] = 24575 >> i;
+ corr[i] = i;
+ }
+ residual_energy = CalculateResidualEnergyFunction(
+ kIntOrder, kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
+ EXPECT_EQ(1595279092, residual_energy);
+ EXPECT_EQ(26, q_shift_residual);
+
+ // Test the code path where (residual_energy <= 0x7fff).
+ for (int i = 0; i < kIntOrder + 1; i++) {
+ a[i] = 2457 >> i;
+ }
+ residual_energy = CalculateResidualEnergyFunction(
+ kIntOrder, kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
+ EXPECT_EQ(2029266944, residual_energy);
+ EXPECT_EQ(33, q_shift_residual);
+ }
+};
+
+TEST_F(LpcMaskingModelTest, CalculateResidualEnergyTest) {
+ CalculateResidualEnergyTester(WebRtcIsacfix_CalculateResidualEnergyC);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c
new file mode 100644
index 0000000000..d495d29235
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c
@@ -0,0 +1,1281 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_tables.c
+ *
+ * Coding tables for the KLT coefficients
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
+
+/* indices of KLT coefficients used */
+const uint16_t WebRtcIsacfix_kSelIndGain[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11};
+
+const uint16_t WebRtcIsacfix_kSelIndShape[108] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107
+};
+
+/* cdf array for model indicator */
+const uint16_t WebRtcIsacfix_kModelCdf[4] = {
+ 0, 15434, 37548, 65535
+};
+
+/* pointer to cdf array for model indicator */
+const uint16_t *WebRtcIsacfix_kModelCdfPtr[1] = {
+ WebRtcIsacfix_kModelCdf
+};
+
+/* initial cdf index for decoder of model indicator */
+const uint16_t WebRtcIsacfix_kModelInitIndex[1] = {
+ 1
+};
+
+/* offset to go from rounded value to quantization index */
+const int16_t WebRtcIsacfix_kQuantMinGain[12] ={
+ 3, 6, 4, 6, 6, 9, 5, 16, 11, 34, 32, 47
+};
+
+const int16_t WebRtcIsacfix_kQuantMinShape[108] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 2, 2, 2, 3, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 2, 2, 3, 0, 0, 0, 0,
+ 1, 0, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 4, 3, 5, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 2, 1, 2, 2, 3, 4,
+ 4, 7, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 2, 3, 2, 3, 4, 4, 5, 7, 13,
+ 0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
+ 5, 6, 7, 11, 9, 13, 12, 26
+};
+
+/* maximum quantization index */
+const uint16_t WebRtcIsacfix_kMaxIndGain[12] = {
+ 6, 12, 8, 14, 10, 19, 12, 31, 22, 56, 52, 138
+};
+
+const uint16_t WebRtcIsacfix_kMaxIndShape[108] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 2, 2, 2, 2, 4, 4, 5, 6, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 1, 2, 2,
+ 2, 2, 3, 4, 5, 7, 0, 0, 0, 0,
+ 2, 0, 2, 2, 2, 2, 3, 2, 2, 4,
+ 4, 6, 6, 9, 0, 0, 0, 0, 2, 2,
+ 2, 2, 2, 2, 3, 2, 4, 4, 7, 7,
+ 9, 13, 0, 0, 2, 2, 2, 2, 2, 2,
+ 3, 4, 5, 4, 6, 8, 8, 10, 16, 25,
+ 0, 2, 2, 4, 5, 4, 4, 4, 7, 8,
+ 9, 10, 13, 19, 17, 23, 25, 49
+};
+
+/* index offset */
+const uint16_t WebRtcIsacfix_kOffsetGain[3][12] = {
+ { 0, 7, 20, 29, 44, 55, 75, 88, 120, 143, 200, 253},
+ { 0, 7, 19, 27, 42, 53, 73, 86, 117, 140, 197, 249},
+ { 0, 7, 20, 28, 44, 55, 75, 89, 121, 145, 202, 257}
+};
+
+const uint16_t WebRtcIsacfix_kOffsetShape[3][108] = {
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 11, 14, 17, 20, 23, 28, 33, 39, 46, 47,
+ 48, 49, 50, 52, 53, 54, 55, 56, 58, 61,
+ 64, 67, 70, 74, 79, 85, 93, 94, 95, 96,
+ 97, 100, 101, 104, 107, 110, 113, 117, 120, 123,
+ 128, 133, 140, 147, 157, 158, 159, 160, 161, 164,
+ 167, 170, 173, 176, 179, 183, 186, 191, 196, 204,
+ 212, 222, 236, 237, 238, 241, 244, 247, 250, 253,
+ 256, 260, 265, 271, 276, 283, 292, 301, 312, 329,
+ 355, 356, 359, 362, 367, 373, 378, 383, 388, 396,
+ 405, 415, 426, 440, 460, 478, 502, 528
+ },
+ {
+ 0, 1, 2, 3, 4, 6, 7, 8, 9, 11,
+ 13, 16, 19, 22, 26, 29, 34, 39, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 55, 57, 60,
+ 63, 66, 70, 73, 78, 84, 91, 92, 93, 94,
+ 95, 96, 97, 99, 102, 105, 108, 111, 114, 118,
+ 123, 128, 134, 141, 151, 152, 153, 154, 156, 159,
+ 162, 165, 168, 171, 174, 177, 181, 186, 194, 200,
+ 208, 218, 233, 234, 235, 236, 239, 242, 245, 248,
+ 251, 254, 258, 263, 270, 277, 288, 297, 308, 324,
+ 349, 351, 354, 357, 361, 366, 372, 378, 383, 390,
+ 398, 407, 420, 431, 450, 472, 496, 524
+ },
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 11,
+ 14, 17, 20, 23, 26, 29, 34, 40, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 58, 61, 64,
+ 67, 70, 73, 77, 82, 88, 96, 97, 98, 99,
+ 101, 102, 104, 107, 110, 113, 116, 119, 122, 125,
+ 129, 134, 141, 150, 160, 161, 162, 163, 166, 168,
+ 171, 174, 177, 180, 183, 186, 190, 195, 201, 208,
+ 216, 226, 243, 244, 245, 248, 251, 254, 257, 260,
+ 263, 268, 273, 278, 284, 291, 299, 310, 323, 340,
+ 366, 368, 371, 374, 379, 383, 389, 394, 399, 406,
+ 414, 422, 433, 445, 461, 480, 505, 533
+ }
+};
+
+/* initial cdf index for KLT coefficients */
+const uint16_t WebRtcIsacfix_kInitIndexGain[3][12] = {
+ { 3, 6, 4, 7, 5, 10, 6, 16, 11, 28, 26, 69},
+ { 3, 6, 4, 7, 5, 10, 6, 15, 11, 28, 26, 69},
+ { 3, 6, 4, 8, 5, 10, 7, 16, 12, 28, 27, 70}
+};
+
+const uint16_t WebRtcIsacfix_kInitIndexShape[3][108] = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 2, 2, 3, 3, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 2, 2, 3, 4, 0, 0, 0, 0,
+ 1, 0, 1, 1, 1, 1, 2, 1, 1, 2,
+ 2, 3, 3, 5, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 2, 1, 2, 2, 4, 4,
+ 5, 7, 0, 0, 1, 1, 1, 1, 1, 1,
+ 2, 2, 3, 2, 3, 4, 4, 5, 8, 13,
+ 0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
+ 5, 5, 7, 10, 9, 12, 13, 25
+ },
+ {
+ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
+ 1, 1, 1, 2, 1, 2, 2, 3, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 1, 2, 3, 3, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 3, 3, 5, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 4, 3, 4,
+ 5, 7, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 2, 2, 3, 3, 5, 4, 5, 8, 12,
+ 1, 1, 1, 2, 2, 3, 3, 2, 3, 4,
+ 4, 6, 5, 9, 11, 12, 14, 25
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 2, 3, 3, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 2, 2, 3, 4, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 3, 4, 5, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 3, 3, 4,
+ 5, 8, 0, 0, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 3, 3, 4, 5, 6, 8, 13,
+ 1, 1, 1, 2, 2, 3, 2, 2, 3, 4,
+ 4, 5, 6, 8, 9, 12, 14, 25
+ }
+};
+
+/* offsets for quantizer representation levels*/
+const uint16_t WebRtcIsacfix_kOfLevelsGain[3] = {
+ 0, 392, 779
+};
+
+const uint16_t WebRtcIsacfix_kOfLevelsShape[3] = {
+ 0, 578, 1152
+};
+
+/* quantizer representation levels */
+
+
+
+const int32_t WebRtcIsacfix_kLevelsGainQ17[1176] = {
+ -364547,-231664,-102123,-573,104807,238257,368823,-758583,-640135,-510291
+ ,-377662,-252785,-113177,2627,112906,248601,389461,522691,644517,763974
+ ,-538963,-368179,-245823,-106095,-890,104299,241111,350730,493190,-800763
+ ,-646230,-510239,-382115,-248623,-111829,-2983,113852,251105,388114,519757
+ ,644048,774712,896334,1057931,-770009,-635310,-503690,-375087,-248106,-108525
+ ,-105,108259,243472,377948,519271,-1160885,-1032044,-914636,-777593,-647891
+ ,-518408,-388028,-254321,-115293,-598,117849,251296,385367,515510,652727
+ ,777432,920363,1038625,1153637,1316836,-632287,-505430,-379430,-248458,-118013
+ ,-888,118762,250266,381650,513327,652169,766471,932113,-2107480,-1971030
+ ,-1827020,-1698263,-1558670,-1436314,-1305377,-1172252,-1047355,-914202,-779651,-651001
+ ,-520999,-390394,-255761,-123490,-1893,126839,256703,385493,518607,651760
+ ,782750,908693,1044131,1163865,1311066,1424177,1582628,1709823,1831740,1955391
+ ,-1423044,-1288917,-1181281,-1043222,-911770,-780354,-646799,-522664,-386721,-258266
+ ,-128060,-1101,128233,259996,390336,519590,649290,778701,908010,1040796
+ ,1161235,1306889,1441882,-4446002,-4301031,-4194304,-4080591,-3947740,-3808975,-3686530
+ ,-3567839,-3383251,-3287089,-3136577,-3017405,-2869860,-2751321,-2619984,-2482932,-2354790
+ ,-2223147,-2090669,-1964135,-1831208,-1706697,-1570817,-1446008,-1305386,-1175773,-1046066
+ ,-915356,-785120,-653614,-524331,-393767,-260442,-130187,-799,128841,261466
+ ,393616,520542,652117,784613,914159,1045399,1181072,1308971,1442502,1570346
+ ,1693912,1843986,1966014,2090474,2224869,2364593,2475934,2628403,2752512,2856640
+ ,-4192441,-4063232,-3917821,-3799195,-3666233,-3519199,-3411021,-3269192,-3135684,-3008826
+ ,-2880875,-2747342,-2620981,-2494872,-2354979,-2229718,-2098939,-1964971,-1835399,-1703452
+ ,-1572806,-1440482,-1311794,-1179338,-1046521,-919823,-785914,-655335,-523416,-395507
+ ,-264833,-132184,-2546,131698,256217,391372,522688,651248,789964,909618
+ ,1035305,1179145,1313824,1436934,1552353,1693722,1815508,1972826,2096328,2228224
+ ,2359296,2490368,2598848,-6160384,-6029312,-5881382,-5767168,-5636096,-5505024,-5373952
+ ,-5228418,-5110384,-4954923,-4880576,-4710990,-4587364,-4471340,-4333905,-4211513,-4051293
+ ,-3907927,-3800105,-3675961,-3538640,-3413663,-3271148,-3152105,-3019103,-2869647,-2744015
+ ,-2620639,-2479385,-2364211,-2227611,-2095427,-1974497,-1834168,-1703561,-1568961,-1439826
+ ,-1309192,-1174050,-1050191,-917836,-786015,-656943,-518934,-394831,-257708,-128041
+ ,1610,128991,264442,393977,521383,653849,788164,918641,1049122,1181971
+ ,1308934,1439505,1571808,1706305,1836318,1966235,2097269,2228990,2357005,2490292
+ ,2617400,2749680,2881234,3014880,3145637,3276467,3409099,3536637,3671493,3802918
+ ,3929740,4065036,4194143,4325999,4456126,4586857,4717194,4843923,4978676,5110913
+ ,5245281,5371394,5499780,5633779,5762611,5897682,6028688,6167546,6296465,6421682
+ ,6548882,6682074,6809432,6941956,7078143,7204509,7334296,7475137,7609896,7732044
+ ,7861604,8002039,8131670,8259222,8390299,8522399,8650037,8782348,8908402,9037815
+ ,9164594,9300338,9434679,9574500,9699702,9833934,9948152,10083972,10244937,10332822
+ ,10485760,10600122,10760754,10892964,11010048,11111004,11272192,11403264,11525091,11624984
+ ,11796480,11915146,-393216,-262144,-101702,-740,100568,262144,393216,-786432
+ ,-655360,-524288,-383907,-243301,-94956,-156,95547,269629,416691,524288
+ ,655360,-393216,-262144,-88448,-37,87318,262144,393216,524288,-917504
+ ,-786432,-655360,-495894,-373308,-267503,-93211,4119,91308,250895,393216
+ ,526138,655360,786432,917504,-786432,-655360,-524288,-393216,-262144,-83497
+ ,222,86893,240922,393216,524288,-1048576,-917504,-790472,-655360,-508639
+ ,-383609,-262016,-95550,-3775,96692,256797,364847,534906,655360,786432
+ ,889679,1048576,1179648,1310720,1441792,-655360,-524288,-377684,-248408,-93690
+ ,1261,95441,227519,393216,524288,655360,786432,917504,-2097152,-1966080
+ ,-1809470,-1703936,-1572864,-1441792,-1314289,-1195149,-1056205,-917504,-809951,-657769
+ ,-521072,-383788,-248747,-106350,-2944,105550,243408,388548,521064,628732
+ ,786432,885456,1064548,1179648,1310720,1441792,1572864,1703936,1835008,-1441792
+ ,-1310720,-1179648,-1037570,-888492,-767774,-646634,-519935,-373458,-248029,-111915
+ ,760,111232,247735,379432,507672,672699,786432,917504,1048576,1179648
+ ,1310720,1441792,-4456448,-4325376,-4194304,-4063232,-3932160,-3801088,-3670016,-3538944
+ ,-3407872,-3276800,-3145728,-3014656,-2883584,-2752512,-2647002,-2490368,-2359296,-2228224
+ ,-2097152,-1951753,-1835008,-1703936,-1594177,-1462001,-1289150,-1160774,-1025917,-924928
+ ,-782509,-641294,-516191,-386630,-251910,-118886,5210,121226,253949,386008
+ ,517973,649374,780064,917783,1052462,1183856,1290593,1419389,1556641,1699884
+ ,1835008,1988314,2090470,2228224,2359296,2490368,2621440,2752512,2883584,-3801088
+ ,-3643514,-3539937,-3409931,-3263294,-3145658,-3012952,-2879230,-2752359,-2622556,-2483471
+ ,-2357556,-2226500,-2093112,-1965892,-1833664,-1701035,-1567767,-1440320,-1310556,-1178339
+ ,-1049625,-916812,-786477,-655277,-525050,-393773,-264828,-130696,-480,132126
+ ,260116,394197,527846,652294,785563,917183,1049511,1175958,1308161,1438759
+ ,1572253,1698835,1828535,1967072,2089391,2212798,2348901,2461547,2621440,2752512
+ ,2883584,-7309870,-7203780,-7062699,-6939106,-6790819,-6672036,-6553600,-6422317,-6288422
+ ,-6164694,-6026456,-5901410,-5754168,-5621459,-5502710,-5369686,-5240454,-5120712,-4976140
+ ,-4847970,-4723070,-4589083,-4450923,-4324680,-4189892,-4065551,-3931803,-3800209,-3668539
+ ,-3539395,-3404801,-3277470,-3141389,-3016710,-2885724,-2752612,-2618541,-2486762,-2354153
+ ,-2225059,-2094984,-1968194,-1830895,-1699508,-1575743,-1444516,-1308683,-1179714,-1053088
+ ,-917981,-783707,-653900,-524980,-395409,-260309,-131948,-3452,132113,263241
+ ,392185,522597,654134,788288,919810,1045795,1179210,1314201,1444235,1574447
+ ,1705193,1834009,1967332,2098102,2229019,2359147,2489859,2619878,2754966,2879671
+ ,3014438,3146143,3276733,3405958,3542196,3667493,3798815,3932961,4062458,4187125
+ ,4322346,4454875,4587752,4716809,4848274,4975027,5111957,5242215,5373085,5501158
+ ,5640140,5762918,5895358,6024008,6157906,6290628,6422713,6546339,6675888,6815606
+ ,6955288,7077501,7211630,7337893,7473635,7607175,7728310,7866475,7999658,8127888
+ ,8241758,8386483,8522550,8641582,8771915,8922139,9038632,9179385,9313426,9437184
+ ,9568256,9699328,9830400,9952933,10120004,10223616,10354688,10474645,10616832,-393216
+ ,-262144,-85425,-121,82533,262144,393216,-786432,-655360,-524288,-379928
+ ,-222821,-95200,287,95541,227093,393216,493567,655360,786432,-393216
+ ,-262144,-86805,510,86722,262144,393216,524288,-1048576,-917504,-786432
+ ,-624456,-529951,-395071,-241627,-101168,81,99975,241605,393216,524288
+ ,655360,786432,917504,-786432,-655360,-524288,-393216,-230359,-95619,-137
+ ,94425,226222,393216,524288,-1179648,-1048576,-917504,-773841,-655360,-492258
+ ,-379715,-244707,-103621,-434,104523,242680,381575,523659,650565,786432
+ ,917504,1048576,1179648,1310720,-786432,-629344,-524288,-376757,-242858,-101932
+ ,-2715,107155,239212,366480,514943,655360,786432,917504,-2228224,-2097152
+ ,-1966080,-1835008,-1703936,-1572864,-1441792,-1284584,-1179648,-1048819,-934658,-777181
+ ,-626371,-515660,-377493,-248975,-113036,436,113584,248354,379718,512475
+ ,653932,796494,917504,1048576,1179648,1310720,1441792,1572864,1703936,1835008
+ ,-1572864,-1441792,-1297608,-1161159,-1032316,-917092,-779770,-647384,-515529,-384269
+ ,-250003,-119252,1053,118111,249512,380545,512039,648101,770656,907003
+ ,1021725,1178082,1310720,1441792,-4587520,-4456448,-4325376,-4194304,-4063232,-3932160
+ ,-3801088,-3670016,-3538944,-3407872,-3276800,-3145728,-2999335,-2883584,-2752512,-2621440
+ ,-2490368,-2359296,-2228224,-2112691,-1966080,-1848781,-1709830,-1566109,-1438427,-1303530
+ ,-1176124,-1040936,-913876,-784585,-652025,-518361,-385267,-256342,-127297,-2733
+ ,125422,257792,389363,519911,651106,783805,909407,1044143,1174156,1309267
+ ,1436173,1553771,1708958,1814083,1967036,2095386,2255169,2359296,2478303,2621440
+ ,2752512,-4456448,-4325376,-4194304,-4063232,-3932160,-3797524,-3670016,-3560250,-3413217
+ ,-3257719,-3166416,-2986626,-2878000,-2781144,-2625383,-2495465,-2346792,-2230930,-2077063
+ ,-1949225,-1819274,-1697261,-1568664,-1443074,-1304302,-1175289,-1043794,-913423,-785561
+ ,-652104,-522835,-392667,-260517,-130088,-2,129509,260990,391931,522470
+ ,655770,784902,917093,1046445,1176951,1303121,1441362,1565401,1702022,1822856
+ ,1952852,2090384,2214607,2338436,2457483,2621440,-8781824,-8650752,-8519680,-8388608
+ ,-8260828,-8126464,-8003337,-7859030,-7750057,-7602176,-7471104,-7340032,-7193045,-7090588
+ ,-6946816,-6843344,-6676635,-6557575,-6447804,-6277614,-6159736,-6035729,-5884723,-5739567
+ ,-5634818,-5489867,-5372864,-5243300,-5098939,-4988639,-4856258,-4728494,-4591717,-4447428
+ ,-4322409,-4192918,-4062638,-3934141,-3797545,-3673373,-3531587,-3407391,-3277404,-3147797
+ ,-3013578,-2886548,-2749811,-2616428,-2490949,-2361301,-2228482,-2096883,-1964343,-1831754
+ ,-1702201,-1572495,-1442012,-1309242,-1182451,-1048996,-916905,-786510,-657079,-524730
+ ,-393672,-261313,-128743,166,130678,261334,393287,524155,655570,786839
+ ,917353,1052167,1179013,1309360,1442634,1571153,1703961,1832027,1965014,2097912
+ ,2224861,2355341,2490455,2623051,2753484,2877015,3015783,3144157,3273705,3405255
+ ,3542006,3669580,3802417,3935413,4065088,4190896,4333521,4456355,4579781,4713832
+ ,4845707,4978625,5113278,5243817,5382318,5500592,5638135,5761179,5900822,6029270
+ ,6186398,6297816,6436435,6559163,6666389,6806548,6950461,7086078,7195777,7350973
+ ,7480132,7614852,7743514,7847288,8014762,8126464,8257536,8388608,8519680,8650752
+ ,8781824,8912896,9043968,9175040,9306112,9437184
+};
+
+
+
+const int16_t WebRtcIsacfix_kLevelsShapeQ10[1735] = {
+ 0, 0, -1, 0, 0, 1, 0, 1, 0, -821
+ , 1, -763, -1, 656, -620, 0, 633, -636, 4, 615
+ , -630, 1, 649, -1773, -670, 5, 678, 1810, -1876, -676
+ , 0, 691, 1843, -1806, -743, -1, 749, 1795, 2920, -2872
+ , -1761, -772, -3, 790, 1763, 2942, 0, 0, 0, 0
+ , -792, 2, 0, 0, 1, 0, -854, 0, -702, -1
+ , 662, -624, -5, 638, -611, -6, 638, -647, 0, 651
+ , -685, -4, 679, 2123, -1814, -693, 0, 664, 1791, -1735
+ , -737, 0, 771, 1854, 2873, -2867, -1842, -793, -1, 821
+ , 1826, 2805, 3922, 0, 0, 0, -1, -779, 1, 786
+ , 1, -708, 0, 789, -799, 1, 797, -663, 2, 646
+ , -600, 3, 609, -600, 1, 658, 1807, -627, -3, 612
+ , -625, 3, 632, -1732, -674, 1, 672, 2048, -1768, -715
+ , 0, 724, 1784, -3881, -3072, -1774, -719, -1, 730, 1811
+ , -2963, -1829, -806, -1, 816, 1795, 3050, -5389, -3784, -2942
+ , -1893, -865, -12, 867, 1885, 2945, 3928, -2, 1, 4
+ , 0, -694, 2, 665, -598, 5, 587, -599, -1, 661
+ , -656, -7, 611, -607, 5, 603, -618, -4, 620, -1794
+ , -645, -2, 654, -655, -1, 658, -1801, -700, 5, 707
+ , 1927, -1752, -745, -8, 752, 1843, -2838, -1781, -801, 11
+ , 796, 1811, 2942, 3866, -3849, -3026, -1848, -819, 2, 827
+ , 1825, 2963, -3873, -2904, -1869, -910, -6, 903, 1902, 2885
+ , 3978, 5286, -7168, -6081, -4989, -3968, -2963, -1970, -943, -2
+ , 953, 1951, 2968, 3974, 5009, 6032, -2, 3, -1024, 2
+ , 1024, -637, 1, 669, -613, -7, 630, -603, 4, 612
+ , -612, 0, 590, -645, -11, 627, -657, -2, 671, 1849
+ , -1853, -694, 2, 702, 1838, -3304, -1780, -736, -8, 732
+ , 1772, -1709, -755, -6, 760, 1780, -2994, -1780, -800, 8
+ , 819, 1830, 2816, -4096, -2822, -1881, -851, -4, 855, 1872
+ , 2840, 3899, -3908, -2904, -1878, -887, 6, 897, 1872, 2942
+ , 4008, -4992, -3881, -2933, -1915, -928, 1, 937, 1919, 2900
+ , 4009, 4881, -6848, -6157, -5065, -3981, -2983, -1972, -978, -1
+ , 968, 1979, 2988, 4008, 5007, 6108, 7003, 8051, 9027,-13272
+ ,-12012,-11228,-10213, -9261, -8084, -7133, -6075, -5052, -4050, -3036
+ , -2014, -996, -4, 1007, 2031, 3038, 4049, 5074, 6134, 7069
+ , 8094, 9069, 10212, 11049, 12104, 51, -1024, -13, 1024, -609
+ , -107, 613, -2048, -687, -95, 667, 2048, -3072, -1724, -785
+ , -34, 732, 1819, -2048, -703, -26, 681, 2048, -2048, -686
+ , -9, 665, 2048, -2048, -702, 37, 748, 1723, -4096, -2786
+ , -1844, -837, 37, 811, 1742, 3072, -4096, -2783, -1848, -881
+ , 39, 898, 1843, 2792, 3764, -5120, -4096, -2923, -1833, -852
+ , -14, 862, 1824, 2834, 4096, -6144, -5120, -3914, -2842, -1870
+ , -886, -27, 888, 1929, 2931, 4051, -7168, -6144, -5120, -3866
+ , -2933, -1915, -927, 64, 933, 1902, 2929, 3912, 5063, 6144
+ ,-11264,-10240, -9216, -8192, -7086, -6144, -5039, -3972, -2943, -1929
+ , -941, 3, 938, 1942, 2959, 3933, 4905, 6088, 6983, 8192
+ , -9216, -8192, -7202, -6088, -4983, -4019, -2955, -1975, -966, 17
+ , 997, 1981, 2967, 3990, 4948, 6022, 6967, 8192,-13312,-12288
+ ,-11264,-10240, -9216, -8049, -6997, -6040, -5026, -4043, -3029, -2034
+ , -1015, -23, 984, 1997, 3010, 4038, 5002, 6015, 6946, 8061
+ , 9216, 10240,-12381,-11264,-10240, -9060, -8058, -7153, -6085, -5075
+ , -4051, -3042, -2037, -1017, -5, 1007, 2028, 3035, 4050, 5088
+ , 6111, 7160, 8156, 9215, 10095, 11229, 12202, 13016,-26624,-25600
+ ,-24582,-23671,-22674,-21400,-20355,-19508,-18315,-17269,-16361,-15299
+ ,-14363,-13294,-12262,-11237,-10203, -9227, -8165, -7156, -6116, -5122
+ , -4076, -3056, -2043, -1020, -8, 1027, 2047, 3065, 4110, 5130
+ , 6125, 7168, 8195, 9206, 10230, 11227, 12256, 13304, 14281, 15316
+ , 16374, 17382, 18428, 19388, 20361, 21468, 22448, 23781, 0, 0
+ , -1, 0, -2, 1024, 0, 0, 0, -1, 1024, -1024
+ , 1, -1024, 4, 1024, -1024, 2, 1024, -1024, 2, 1024
+ , -2048, -1024, -4, 1024, -1024, 2, 1024, -2048, -1024, -3
+ , 1024, 2048, -2048, -1024, 4, 1024, 2048, -3072, -2048, -1024
+ , -1, 662, 2048, 0, 1, 0, 0, 1, -2, -2
+ , 0, 2, 1024, -1, 1024, -1024, 4, 1024, -1024, 1
+ , 1024, -1024, 1, 1024, -2048, -781, -4, 844, -807, -5
+ , 866, -2048, -726, -13, 777, 2048, -2048, -643, -4, 617
+ , 2048, 3072, -3072, -2048, -629, 1, 630, 2048, 3072, 0
+ , -1, 1, -2, 2, 1, -1024, 5, -1024, 6, 1024
+ , -1024, 4, 1024, -1024, 1, 1024, -1024, -9, 1024, -673
+ , -7, 655, -2048, -665, -15, 716, -2048, -647, 4, 640
+ , 2048, -2048, -615, -1, 635, 2048, -2048, -613, 10, 637
+ , 2048, 3072, -3072, -2048, -647, -3, 641, 2048, 3072, -5120
+ , -4096, -3072, -2048, -681, 6, 685, 2048, 3072, 4096, 1
+ , 1, 0, -1, 1024, -1024, -3, 1024, -1024, 6, 1024
+ , -1024, -1, 769, -733, 0, 1024, -876, -2, 653, -1024
+ , -4, 786, -596, -13, 595, -634, -2, 638, 2048, -2048
+ , -620, -5, 620, 2048, -4096, -3072, -2048, -639, 11, 655
+ , 2048, 3072, -3072, -2048, -659, 5, 663, 2048, -3072, -1823
+ , -687, 22, 695, 2048, 3072, 4096, -4096, -3072, -1848, -715
+ , -3, 727, 1816, 3072, 4096, 5120, -8192, -7168, -6144, -5120
+ , -4096, -2884, -1771, -756, -14, 775, 1844, 3072, 4096, 5120
+ , 6144, -1, 1, 0, -1024, 2, 815, -768, 2, 708
+ , -1024, -3, 693, -661, -7, 607, -643, -5, 609, -624
+ , 3, 631, -682, -3, 691, 2048, -2048, -640, 5, 650
+ , 2048, -3072, -2048, -701, 9, 704, 2048, 3072, -3072, -2048
+ , -670, 10, 674, 2048, 3072, -5120, -4096, -3072, -1749, -738
+ , 0, 733, 1811, 3072, 4096, 5120, -4096, -3072, -1873, -753
+ , 0, 756, 1874, 3072, 4096, -5120, -4096, -2900, -1838, -793
+ , -6, 793, 1868, 2837, 4096, 5120, -7168, -6144, -5120, -4096
+ , -2832, -1891, -828, 1, 828, 1901, 2823, 3912, 5120, 6144
+ , 7168, 8192,-13312,-12288,-11264,-10240, -9216, -8192, -7168, -6144
+ , -5120, -3976, -3004, -1911, -869, 7, 869, 1932, 3024, 3992
+ , 5009, 6144, 7168, 8192, 9216, 10240, 11264, -4, 1024, -629
+ , -22, 609, -623, 9, 640, -2048, -768, 1, 682, -2048
+ , -741, 49, 722, 2048, -3072, -1706, -808, -20, 768, 1750
+ , -1684, -727, -29, 788, 1840, 3033, -1758, -784, 0, 801
+ , 1702, -3072, -1813, -814, 38, 820, 1884, 2927, -4096, -3241
+ , -1839, -922, 25, 882, 1886, 2812, -4096, -2982, -1923, -894
+ , 84, 912, 1869, 2778, 4096, -4928, -3965, -2902, -1920, -883
+ , 3, 917, 1953, 2921, 3957, 4922, 6144, 7168, -5120, -3916
+ , -2897, -1949, -930, 31, 959, 1934, 2901, 3851, 5120, -9216
+ , -8192, -7046, -6029, -5030, -4034, -2980, -1969, -1013, -76, 963
+ , 1963, 2901, 3929, 4893, 6270, 7168, 8192, 9216,-12288,-11264
+ ,-10240, -9216, -8192, -6846, -6123, -5108, -4008, -3000, -1963, -954
+ , -6, 958, 1992, 3009, 4020, 5085, 6097, 7168, 8192, 9216
+ ,-11264,-10139, -9194, -8127, -7156, -6102, -5053, -4049, -3036, -2025
+ , -1009, -34, 974, 1984, 3034, 4028, 5138, 6000, 7057, 8166
+ , 9070, 10033, 11360, 12288,-13312,-12288,-10932,-10190, -9120, -8123
+ , -7128, -6103, -5074, -4081, -3053, -2029, -989, -4, 1010, 2028
+ , 3051, 4073, 5071, 6099, 7132, 8147, 9295, 10159, 11023, 12263
+ , 13312, 14336,-25600,-24576,-23552,-22529,-21504,-20480,-19456,-18637
+ ,-17425,-16165,-15316,-14327,-13606,-12135,-11182,-10107, -9153, -8144
+ , -7146, -6160, -5129, -4095, -3064, -2038, -1025, 1, 1031, 2072
+ , 3074, 4088, 5123, 6149, 7157, 8173, 9198, 10244, 11250, 12268
+ , 13263, 14289, 15351, 16370, 17402, 18413, 19474, 20337, 21386, 22521
+ , 23367, 24350, 0, 0, 0, 0, 0, 0, 0, 0
+ , -1024, 0, 1024, -1024, 0, 1024, -1024, 0, 1024, -1024
+ , 0, 1024, -1024, 0, 1024, -773, 0, 1024, -674, 0
+ , 645, -2048, -745, 0, 628, 2048, -2048, -712, 0, 681
+ , 2048, 3072, -3072, -2048, -673, 0, 682, 1964, 3257, 0
+ , 0, 0, 0, 0, 0, 0, 0, -1024, 0, 1024
+ , -1024, 0, 1024, -1024, 0, 1024, -705, 0, 623, -771
+ , 0, 1024, -786, 0, 688, -631, 0, 652, 2048, -2048
+ , -627, -1, 666, 2048, -3072, -1756, -694, 0, 674, 2048
+ , -3098, -1879, -720, 5, 694, 1886, 2958, 4096, 0, 0
+ , 0, 0, 1024, 0, 0, 1024, -769, 0, 1024, -1024
+ , 0, 1024, -1024, 0, 1024, -817, 0, 734, -786, 0
+ , 651, -638, 0, 637, -623, 0, 671, -652, 0, 619
+ , 2048, -2048, -670, -1, 663, 2048, -1908, -680, 1, 686
+ , 2048, 3072, 4096, -4096, -3072, -1833, -711, 0, 727, 1747
+ , 3072, 4096, -4096, -2971, -1826, -762, 2, 766, 1832, 2852
+ , 3928, 5079, 0, 0, 0, -1024, 0, 1024, -1024, 0
+ , -656, 0, 1024, -599, 0, 620, -1024, 0, 1024, -603
+ , 0, 622, -643, 0, 660, -599, 0, 611, -641, -1
+ , 651, 2048, -2048, -648, -2, 647, 1798, -3072, -2048, -672
+ , 2, 670, 2048, -3072, -1780, -694, -1, 706, 1751, 3072
+ , -3072, -1862, -757, 7, 739, 1798, 3072, 4096, -5120, -4096
+ , -3253, -1811, -787, 3, 782, 1887, 3123, 4096, -7252, -6144
+ , -5354, -4060, -2864, -1863, -820, -11, 847, 1903, 2970, 3851
+ , 4921, 5957, 7168, 8192, 9306, 0, 0, -1024, 0, 1024
+ , -726, 0, 706, -692, 0, 593, -598, 0, 616, -624
+ , 0, 616, -605, 0, 613, -2048, -652, 1, 635, 2048
+ , -2048, -647, -1, 660, 2048, -1811, -668, -2, 685, 2048
+ , -1796, -731, -2, 730, 1702, 3072, -3072, -1766, -747, -4
+ , 756, 1770, 3072, -4096, -3024, -1762, -783, 4, 771, 1781
+ , 3072, -5120, -4057, -2807, -1832, -822, 0, 816, 1804, 2851
+ , 3949, 5120, -6144, -4899, -3927, -2920, -1893, -874, -2, 868
+ , 1881, 2905, 3960, 4912, 6144, -9216, -8192, -7168, -6225, -4963
+ , -3943, -2956, -1890, -902, 0, 897, 1914, 2916, 3984, 4990
+ , 6050, 7168,-11264,-10217, -9114, -8132, -7035, -5988, -4984, -4000
+ , -2980, -1962, -927, 7, 931, 1956, 2981, 4031, 4972, 6213
+ , 7227, 8192, 9216, 10240, 11170, 12288, 13312, 14336, 0, 1024
+ , -557, 1, 571, -606, -4, 612, -1676, -707, 10, 673
+ , 2048, -2048, -727, 5, 686, -3072, -1772, -755, 12, 716
+ , 1877, -1856, -786, 2, 786, 1712, -1685, -818, -16, 863
+ , 1729, -3072, -1762, -857, 3, 866, 1838, 2841, -3862, -2816
+ , -1864, -925, -2, 923, 1897, 2779, -2782, -1838, -920, -28
+ , 931, 1951, 2835, 3804, -4815, -4001, -2940, -1934, -959, -22
+ , 975, 1957, 2904, 3971, 4835, -5148, -3892, -2944, -1953, -986
+ , -11, 989, 1968, 2939, 3949, 4947, 5902, -9216, -8192, -6915
+ , -6004, -4965, -4013, -3009, -1977, -987, -1, 982, 1972, 3000
+ , 3960, 4939, 5814, -8976, -7888, -7084, -5955, -5043, -4009, -2991
+ , -2002, -1000, -8, 993, 2011, 3023, 4026, 5028, 6023, 7052
+ , 8014, 9216,-11240,-10036, -9125, -8118, -7105, -6062, -5048, -4047
+ , -3044, -2025, -1009, -1, 1011, 2023, 3042, 4074, 5085, 6108
+ , 7119, 8142, 9152, 10114, 11141, 12250, 13307,-15360,-14099,-13284
+ ,-12291,-11223,-10221, -9152, -8147, -7128, -6104, -5077, -4072, -3062
+ , -2033, -1020, 7, 1018, 2038, 3059, 4081, 5084, 6109, 7102
+ , 8128, 9134, 10125, 11239, 12080,-23552,-22528,-21504,-20480,-19456
+ ,-18159,-17240,-16291,-15364,-14285,-13305,-12271,-11233,-10217, -9198
+ , -8175, -7157, -6134, -5122, -4089, -3071, -2047, -1018, 3, 1026
+ , 2041, 3077, 4090, 5108, 6131, 7150, 8172, 9175, 10196, 11272
+ , 12303, 13273, 14328, 15332, 16334, 17381, 18409, 19423, 20423, 21451
+ , 22679, 23391, 24568, 25600, 26589
+};
+
+/* cdf tables for quantizer indices */
+const uint16_t WebRtcIsacfix_kCdfGain[1212] = {
+ 0, 13, 301, 3730, 61784, 65167, 65489, 65535, 0, 17,
+ 142, 314, 929, 2466, 7678, 56450, 63463, 64740, 65204, 65426,
+ 65527, 65535, 0, 8, 100, 724, 6301, 60105, 65125, 65510,
+ 65531, 65535, 0, 13, 117, 368, 1068, 3010, 11928, 53603,
+ 61177, 63404, 64505, 65108, 65422, 65502, 65531, 65535, 0, 4,
+ 17, 96, 410, 1859, 12125, 54361, 64103, 65305, 65497, 65535,
+ 0, 4, 88, 230, 469, 950, 1746, 3228, 6092, 16592,
+ 44756, 56848, 61256, 63308, 64325, 64920, 65309, 65460, 65502, 65522,
+ 65535, 0, 88, 352, 1675, 6339, 20749, 46686, 59284, 63525,
+ 64949, 65359, 65502, 65527, 65535, 0, 13, 38, 63, 117,
+ 234, 381, 641, 929, 1407, 2043, 2809, 4032, 5753, 8792,
+ 14407, 24308, 38941, 48947, 55403, 59293, 61411, 62688, 63630, 64329,
+ 64840, 65188, 65376, 65472, 65506, 65527, 65531, 65535, 0, 8,
+ 29, 75, 222, 615, 1327, 2801, 5623, 9931, 16094, 24966,
+ 34419, 43458, 50676, 56186, 60055, 62500, 63936, 64765, 65225, 65435,
+ 65514, 65535, 0, 8, 13, 15, 17, 21, 33, 59,
+ 71, 92, 151, 243, 360, 456, 674, 934, 1223, 1583,
+ 1989, 2504, 3031, 3617, 4354, 5154, 6163, 7411, 8780, 10747,
+ 12874, 15591, 18974, 23027, 27436, 32020, 36948, 41830, 46205, 49797,
+ 53042, 56094, 58418, 60360, 61763, 62818, 63559, 64103, 64509, 64798,
+ 65045, 65162, 65288, 65363, 65447, 65506, 65522, 65531, 65533, 65535,
+ 0, 4, 6, 25, 38, 71, 138, 264, 519, 808,
+ 1227, 1825, 2516, 3408, 4279, 5560, 7092, 9197, 11420, 14108,
+ 16947, 20300, 23926, 27459, 31164, 34827, 38575, 42178, 45540, 48747,
+ 51444, 54090, 56426, 58460, 60080, 61595, 62734, 63668, 64275, 64673,
+ 64936, 65112, 65217, 65334, 65426, 65464, 65477, 65489, 65518, 65527,
+ 65529, 65531, 65533, 65535, 0, 2, 4, 8, 10, 12,
+ 14, 16, 21, 33, 50, 71, 84, 92, 105, 138,
+ 180, 255, 318, 377, 435, 473, 511, 590, 682, 758,
+ 913, 1097, 1256, 1449, 1671, 1884, 2169, 2445, 2772, 3157,
+ 3563, 3944, 4375, 4848, 5334, 5820, 6448, 7101, 7716, 8378,
+ 9102, 9956, 10752, 11648, 12707, 13670, 14758, 15910, 17187, 18472,
+ 19627, 20649, 21951, 23169, 24283, 25552, 26862, 28227, 29391, 30764,
+ 31882, 33213, 34432, 35600, 36910, 38116, 39464, 40729, 41872, 43144,
+ 44371, 45514, 46762, 47813, 48968, 50069, 51032, 51974, 52908, 53737,
+ 54603, 55445, 56282, 56990, 57572, 58191, 58840, 59410, 59887, 60264,
+ 60607, 60946, 61269, 61516, 61771, 61960, 62198, 62408, 62558, 62776,
+ 62985, 63207, 63408, 63546, 63739, 63906, 64070, 64237, 64371, 64551,
+ 64677, 64836, 64999, 65095, 65213, 65284, 65338, 65380, 65426, 65447,
+ 65472, 65485, 65487, 65489, 65502, 65510, 65512, 65514, 65516, 65518,
+ 65522, 65531, 65533, 65535, 0, 2, 4, 6, 65528, 65531,
+ 65533, 65535, 0, 2, 4, 6, 8, 10, 222, 65321,
+ 65513, 65528, 65531, 65533, 65535, 0, 2, 4, 50, 65476,
+ 65529, 65531, 65533, 65535, 0, 2, 4, 6, 8, 12,
+ 38, 544, 64936, 65509, 65523, 65525, 65529, 65531, 65533, 65535,
+ 0, 2, 4, 6, 8, 10, 1055, 64508, 65528, 65531,
+ 65533, 65535, 0, 2, 4, 6, 8, 10, 12, 123,
+ 3956, 62999, 65372, 65495, 65515, 65521, 65523, 65525, 65527, 65529,
+ 65531, 65533, 65535, 0, 2, 4, 12, 53, 4707, 59445,
+ 65467, 65525, 65527, 65529, 65531, 65533, 65535, 0, 2, 4,
+ 6, 8, 10, 12, 14, 16, 38, 40, 50, 67,
+ 96, 234, 929, 14345, 55750, 64866, 65389, 65462, 65514, 65517,
+ 65519, 65521, 65523, 65525, 65527, 65529, 65531, 65533, 65535, 0,
+ 2, 4, 6, 8, 10, 15, 35, 91, 377, 1946,
+ 13618, 52565, 63714, 65184, 65465, 65520, 65523, 65525, 65527, 65529,
+ 65531, 65533, 65535, 0, 2, 4, 6, 8, 10, 12,
+ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32,
+ 34, 36, 38, 40, 42, 44, 46, 48, 50, 52,
+ 54, 82, 149, 362, 751, 1701, 4239, 12893, 38627, 55072,
+ 60875, 63071, 64158, 64702, 65096, 65283, 65412, 65473, 65494, 65505,
+ 65508, 65517, 65519, 65521, 65523, 65525, 65527, 65529, 65531, 65533,
+ 65535, 0, 2, 15, 23, 53, 143, 260, 418, 698,
+ 988, 1353, 1812, 2411, 3144, 4015, 5143, 6401, 7611, 8999,
+ 10653, 12512, 14636, 16865, 19404, 22154, 24798, 27521, 30326, 33102,
+ 35790, 38603, 41415, 43968, 46771, 49435, 52152, 54715, 57143, 59481,
+ 61178, 62507, 63603, 64489, 64997, 65257, 65427, 65473, 65503, 65520,
+ 65529, 65531, 65533, 65535, 0, 3, 6, 9, 26, 32,
+ 44, 46, 64, 94, 111, 164, 205, 254, 327, 409,
+ 506, 608, 733, 885, 1093, 1292, 1482, 1742, 1993, 2329,
+ 2615, 3029, 3374, 3798, 4257, 4870, 5405, 5992, 6618, 7225,
+ 7816, 8418, 9051, 9761, 10532, 11380, 12113, 13010, 13788, 14594,
+ 15455, 16361, 17182, 18088, 18997, 20046, 20951, 21968, 22947, 24124,
+ 25296, 26547, 27712, 28775, 29807, 30835, 31709, 32469, 33201, 34014,
+ 34876, 35773, 36696, 37620, 38558, 39547, 40406, 41277, 42367, 43290,
+ 44445, 45443, 46510, 47684, 48973, 50157, 51187, 52242, 53209, 54083,
+ 55006, 55871, 56618, 57293, 57965, 58556, 59222, 59722, 60180, 60554,
+ 60902, 61250, 61554, 61837, 62100, 62372, 62631, 62856, 63078, 63324,
+ 63557, 63768, 63961, 64089, 64235, 64352, 64501, 64633, 64770, 64887,
+ 65001, 65059, 65121, 65188, 65246, 65302, 65346, 65390, 65428, 65463,
+ 65477, 65506, 65515, 65517, 65519, 65521, 65523, 65525, 65527, 65529,
+ 65531, 65533, 65535, 0, 2, 4, 109, 65332, 65531, 65533,
+ 65535, 0, 2, 4, 6, 8, 25, 1817, 63874, 65511,
+ 65527, 65529, 65531, 65533, 65535, 0, 2, 4, 907, 65014,
+ 65529, 65531, 65533, 65535, 0, 2, 4, 6, 8, 10,
+ 12, 132, 2743, 62708, 65430, 65525, 65527, 65529, 65531, 65533,
+ 65535, 0, 2, 4, 6, 8, 35, 3743, 61666, 65485,
+ 65531, 65533, 65535, 0, 2, 4, 6, 8, 10, 23,
+ 109, 683, 6905, 58417, 64911, 65398, 65497, 65518, 65525, 65527,
+ 65529, 65531, 65533, 65535, 0, 2, 4, 6, 53, 510,
+ 10209, 55212, 64573, 65441, 65522, 65529, 65531, 65533, 65535, 0,
+ 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 32, 90, 266, 1037, 3349, 14468, 50488, 62394, 64685,
+ 65341, 65480, 65514, 65519, 65521, 65523, 65525, 65527, 65529, 65531,
+ 65533, 65535, 0, 2, 4, 6, 9, 16, 37, 106,
+ 296, 748, 1868, 5733, 18897, 45553, 60165, 63949, 64926, 65314,
+ 65441, 65508, 65524, 65529, 65531, 65533, 65535, 0, 2, 4,
+ 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38, 40, 42, 44,
+ 46, 48, 50, 83, 175, 344, 667, 1293, 2337, 4357,
+ 8033, 14988, 28600, 43244, 52011, 57042, 59980, 61779, 63065, 63869,
+ 64390, 64753, 64988, 65164, 65326, 65422, 65462, 65492, 65506, 65522,
+ 65524, 65526, 65531, 65533, 65535, 0, 2, 4, 6, 8,
+ 10, 12, 14, 16, 25, 39, 48, 55, 62, 65,
+ 85, 106, 139, 169, 194, 252, 323, 485, 688, 1074,
+ 1600, 2544, 3863, 5733, 8303, 11397, 15529, 20273, 25734, 31455,
+ 36853, 41891, 46410, 50306, 53702, 56503, 58673, 60479, 61880, 62989,
+ 63748, 64404, 64852, 65124, 65309, 65424, 65480, 65524, 65528, 65533,
+ 65535, 0, 2, 4, 6, 8, 10, 12, 14, 21,
+ 23, 25, 27, 29, 31, 39, 41, 43, 48, 60,
+ 72, 79, 106, 136, 166, 187, 224, 252, 323, 381,
+ 427, 478, 568, 660, 783, 912, 1046, 1175, 1365, 1567,
+ 1768, 2024, 2347, 2659, 3049, 3529, 4033, 4623, 5281, 5925,
+ 6726, 7526, 8417, 9468, 10783, 12141, 13571, 15222, 16916, 18659,
+ 20350, 22020, 23725, 25497, 27201, 29026, 30867, 32632, 34323, 36062,
+ 37829, 39466, 41144, 42654, 43981, 45343, 46579, 47759, 49013, 50171,
+ 51249, 52283, 53245, 54148, 54938, 55669, 56421, 57109, 57791, 58464,
+ 59092, 59674, 60105, 60653, 61083, 61407, 61757, 62095, 62388, 62649,
+ 62873, 63157, 63358, 63540, 63725, 63884, 64046, 64155, 64278, 64426,
+ 64548, 64654, 64806, 64906, 64994, 65077, 65137, 65215, 65277, 65324,
+ 65354, 65409, 65437, 65455, 65462, 65490, 65495, 65499, 65508, 65511,
+ 65513, 65515, 65517, 65519, 65521, 65523, 65525, 65527, 65529, 65531,
+ 65533, 65535
+};
+
+const uint16_t WebRtcIsacfix_kCdfShape[2059] = {
+ 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535,
+ 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 4,
+ 65535, 0, 8, 65514, 65535, 0, 29, 65481, 65535, 0,
+ 121, 65439, 65535, 0, 239, 65284, 65535, 0, 8, 779,
+ 64999, 65527, 65535, 0, 8, 888, 64693, 65522, 65535, 0,
+ 29, 2604, 62843, 65497, 65531, 65535, 0, 25, 176, 4576,
+ 61164, 65275, 65527, 65535, 0, 65535, 0, 65535, 0, 65535,
+ 0, 65535, 0, 4, 65535, 0, 65535, 0, 65535, 0,
+ 65535, 0, 65535, 0, 4, 65535, 0, 33, 65502, 65535,
+ 0, 54, 65481, 65535, 0, 251, 65309, 65535, 0, 611,
+ 65074, 65535, 0, 1273, 64292, 65527, 65535, 0, 4, 1809,
+ 63940, 65518, 65535, 0, 88, 4392, 60603, 65426, 65531, 65535,
+ 0, 25, 419, 7046, 57756, 64961, 65514, 65531, 65535, 0,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 4, 65531,
+ 65535, 0, 65535, 0, 8, 65531, 65535, 0, 4, 65527,
+ 65535, 0, 17, 65510, 65535, 0, 42, 65481, 65535, 0,
+ 197, 65342, 65531, 65535, 0, 385, 65154, 65535, 0, 1005,
+ 64522, 65535, 0, 8, 1985, 63469, 65533, 65535, 0, 38,
+ 3119, 61884, 65514, 65535, 0, 4, 6, 67, 4961, 60804,
+ 65472, 65535, 0, 17, 565, 9182, 56538, 65087, 65514, 65535,
+ 0, 8, 63, 327, 2118, 14490, 52774, 63839, 65376, 65522,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
+ 17, 65522, 65535, 0, 59, 65489, 65535, 0, 50, 65522,
+ 65535, 0, 54, 65489, 65535, 0, 310, 65179, 65535, 0,
+ 615, 64836, 65535, 0, 4, 1503, 63965, 65535, 0, 2780,
+ 63383, 65535, 0, 21, 3919, 61051, 65527, 65535, 0, 84,
+ 6674, 59929, 65435, 65535, 0, 4, 255, 7976, 55784, 65150,
+ 65518, 65531, 65535, 0, 4, 8, 582, 10726, 53465, 64949,
+ 65518, 65535, 0, 29, 339, 3006, 17555, 49517, 62956, 65200,
+ 65497, 65531, 65535, 0, 2, 33, 138, 565, 2324, 7670,
+ 22089, 45966, 58949, 63479, 64966, 65380, 65518, 65535, 0, 65535,
+ 0, 65535, 0, 2, 65533, 65535, 0, 46, 65514, 65535,
+ 0, 414, 65091, 65535, 0, 540, 64911, 65535, 0, 419,
+ 65162, 65535, 0, 976, 64790, 65535, 0, 2977, 62495, 65531,
+ 65535, 0, 4, 3852, 61034, 65527, 65535, 0, 4, 29,
+ 6021, 60243, 65468, 65535, 0, 84, 6711, 58066, 65418, 65535,
+ 0, 13, 281, 9550, 54917, 65125, 65506, 65535, 0, 2,
+ 63, 984, 12108, 52644, 64342, 65435, 65527, 65535, 0, 29,
+ 251, 2014, 14871, 47553, 62881, 65229, 65518, 65535, 0, 13,
+ 142, 749, 4220, 18497, 45200, 60913, 64823, 65426, 65527, 65535,
+ 0, 13, 71, 264, 1176, 3789, 10500, 24480, 43488, 56324,
+ 62315, 64493, 65242, 65464, 65514, 65522, 65531, 65535, 0, 4,
+ 13, 38, 109, 205, 448, 850, 1708, 3429, 6276, 11371,
+ 19221, 29734, 40955, 49391, 55411, 59460, 62102, 63793, 64656, 65150,
+ 65401, 65485, 65522, 65531, 65535, 0, 65535, 0, 2, 65533,
+ 65535, 0, 1160, 65476, 65535, 0, 2, 6640, 64763, 65533,
+ 65535, 0, 2, 38, 9923, 61009, 65527, 65535, 0, 2,
+ 4949, 63092, 65533, 65535, 0, 2, 3090, 63398, 65533, 65535,
+ 0, 2, 2520, 58744, 65510, 65535, 0, 2, 13, 544,
+ 8784, 51403, 65148, 65533, 65535, 0, 2, 25, 1017, 10412,
+ 43550, 63651, 65489, 65527, 65535, 0, 2, 4, 29, 783,
+ 13377, 52462, 64524, 65495, 65533, 65535, 0, 2, 4, 6,
+ 100, 1817, 18451, 52590, 63559, 65376, 65531, 65535, 0, 2,
+ 4, 6, 46, 385, 2562, 11225, 37416, 60488, 65026, 65487,
+ 65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 12,
+ 42, 222, 971, 5221, 19811, 45048, 60312, 64486, 65294, 65474,
+ 65525, 65529, 65533, 65535, 0, 2, 4, 8, 71, 167,
+ 666, 2533, 7875, 19622, 38082, 54359, 62108, 64633, 65290, 65495,
+ 65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 13,
+ 109, 586, 1930, 4949, 11600, 22641, 36125, 48312, 56899, 61495,
+ 63927, 64932, 65389, 65489, 65518, 65531, 65533, 65535, 0, 4,
+ 6, 8, 67, 209, 712, 1838, 4195, 8432, 14432, 22834,
+ 31723, 40523, 48139, 53929, 57865, 60657, 62403, 63584, 64363, 64907,
+ 65167, 65372, 65472, 65514, 65535, 0, 2, 4, 13, 25,
+ 42, 46, 50, 75, 113, 147, 281, 448, 657, 909,
+ 1185, 1591, 1976, 2600, 3676, 5317, 7398, 9914, 12941, 16169,
+ 19477, 22885, 26464, 29851, 33360, 37228, 41139, 44802, 48654, 52058,
+ 55181, 57676, 59581, 61022, 62190, 63107, 63676, 64199, 64547, 64924,
+ 65158, 65313, 65430, 65481, 65518, 65535, 0, 65535, 0, 65535,
+ 0, 65535, 0, 65535, 0, 65533, 65535, 0, 65535, 0,
+ 65535, 0, 65535, 0, 65533, 65535, 0, 2, 65535, 0,
+ 2, 65533, 65535, 0, 2, 65533, 65535, 0, 2, 65533,
+ 65535, 0, 2, 4, 65533, 65535, 0, 2, 65533, 65535,
+ 0, 2, 4, 65531, 65533, 65535, 0, 2, 4, 65531,
+ 65533, 65535, 0, 2, 4, 6, 65524, 65533, 65535, 0,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
+ 65535, 0, 65535, 0, 65535, 0, 65533, 65535, 0, 65533,
+ 65535, 0, 2, 65533, 65535, 0, 2, 65533, 65535, 0,
+ 2, 65533, 65535, 0, 2, 4, 65532, 65535, 0, 6,
+ 65523, 65535, 0, 2, 15, 65530, 65533, 65535, 0, 2,
+ 35, 65493, 65531, 65533, 65535, 0, 2, 4, 158, 65382,
+ 65531, 65533, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
+ 65535, 0, 65535, 0, 65535, 0, 2, 65535, 0, 2,
+ 65533, 65535, 0, 2, 65533, 65535, 0, 2, 65533, 65535,
+ 0, 2, 65533, 65535, 0, 9, 65512, 65535, 0, 2,
+ 12, 65529, 65535, 0, 2, 73, 65434, 65533, 65535, 0,
+ 2, 240, 65343, 65533, 65535, 0, 2, 476, 65017, 65531,
+ 65533, 65535, 0, 2, 4, 1046, 64686, 65531, 65533, 65535,
+ 0, 2, 4, 6, 8, 1870, 63898, 65529, 65531, 65533,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65533, 65535,
+ 0, 2, 65533, 65535, 0, 2, 65533, 65535, 0, 2,
+ 65532, 65535, 0, 6, 65533, 65535, 0, 6, 65523, 65535,
+ 0, 2, 65532, 65535, 0, 137, 65439, 65535, 0, 576,
+ 64899, 65533, 65535, 0, 2, 289, 65299, 65533, 65535, 0,
+ 2, 4, 6, 880, 64134, 65531, 65533, 65535, 0, 2,
+ 4, 1853, 63347, 65533, 65535, 0, 2, 6, 2516, 61762,
+ 65529, 65531, 65533, 65535, 0, 2, 4, 9, 3980, 61380,
+ 65503, 65529, 65531, 65533, 65535, 0, 2, 4, 6, 8,
+ 10, 12, 61, 6393, 59859, 65466, 65527, 65529, 65531, 65533,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 2, 65532,
+ 65535, 0, 3, 65529, 65535, 0, 2, 65529, 65535, 0,
+ 61, 65453, 65535, 0, 234, 65313, 65535, 0, 503, 65138,
+ 65535, 0, 155, 65402, 65533, 65535, 0, 2, 1058, 64554,
+ 65533, 65535, 0, 2, 4, 3138, 62109, 65531, 65533, 65535,
+ 0, 2, 4, 2031, 63339, 65531, 65533, 65535, 0, 2,
+ 4, 6, 9, 4155, 60778, 65523, 65529, 65531, 65533, 65535,
+ 0, 2, 4, 41, 6189, 59269, 65490, 65531, 65533, 65535,
+ 0, 2, 4, 6, 210, 8789, 57043, 65400, 65528, 65531,
+ 65533, 65535, 0, 2, 4, 6, 8, 26, 453, 10086,
+ 55499, 64948, 65483, 65524, 65527, 65529, 65531, 65533, 65535, 0,
+ 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 114, 1014, 11202, 52670, 64226, 65356, 65503, 65514, 65523, 65525,
+ 65527, 65529, 65531, 65533, 65535, 0, 65533, 65535, 0, 15,
+ 65301, 65535, 0, 152, 64807, 65535, 0, 2, 3328, 63308,
+ 65535, 0, 2, 4050, 59730, 65533, 65535, 0, 2, 164,
+ 10564, 61894, 65529, 65535, 0, 15, 6712, 59831, 65076, 65532,
+ 65535, 0, 32, 7712, 57449, 65459, 65535, 0, 2, 210,
+ 7849, 53110, 65021, 65523, 65535, 0, 2, 12, 1081, 13883,
+ 48262, 62870, 65477, 65535, 0, 2, 88, 847, 6145, 37852,
+ 62012, 65454, 65533, 65535, 0, 9, 47, 207, 1823, 14522,
+ 45521, 61069, 64891, 65481, 65528, 65531, 65533, 65535, 0, 2,
+ 9, 488, 2881, 12758, 38703, 58412, 64420, 65410, 65533, 65535,
+ 0, 2, 4, 6, 61, 333, 1891, 6486, 19720, 43188,
+ 57547, 62472, 64796, 65421, 65497, 65523, 65529, 65531, 65533, 65535,
+ 0, 2, 4, 6, 8, 10, 12, 29, 117, 447,
+ 1528, 6138, 21242, 43133, 56495, 62432, 64746, 65362, 65500, 65529,
+ 65531, 65533, 65535, 0, 2, 18, 105, 301, 760, 1490,
+ 3472, 7568, 15002, 26424, 40330, 53029, 60048, 62964, 64274, 64890,
+ 65337, 65445, 65489, 65513, 65527, 65530, 65533, 65535, 0, 2,
+ 4, 6, 41, 102, 409, 853, 2031, 4316, 7302, 11328,
+ 16869, 24825, 34926, 43481, 50877, 56126, 59874, 62103, 63281, 63857,
+ 64166, 64675, 65382, 65522, 65531, 65533, 65535, 0, 2, 4,
+ 6, 8, 10, 12, 14, 16, 18, 29, 38, 53,
+ 58, 96, 181, 503, 1183, 2849, 5590, 8600, 11379, 13942,
+ 16478, 19453, 22638, 26039, 29411, 32921, 37596, 41433, 44998, 48560,
+ 51979, 55106, 57666, 59892, 61485, 62616, 63484, 64018, 64375, 64685,
+ 64924, 65076, 65278, 65395, 65471, 65509, 65529, 65535, 0, 65535,
+ 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535,
+ 0, 65535, 0, 65535, 0, 2, 65533, 65535, 0, 2,
+ 65533, 65535, 0, 2, 65533, 65535, 0, 2, 65533, 65535,
+ 0, 2, 65533, 65535, 0, 2, 65533, 65535, 0, 7,
+ 65519, 65535, 0, 2, 14, 65491, 65533, 65535, 0, 2,
+ 81, 65427, 65531, 65533, 65535, 0, 2, 4, 312, 65293,
+ 65528, 65533, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
+ 2, 65533, 65535, 0, 2, 65533, 65535, 0, 2, 65533,
+ 65535, 0, 5, 65523, 65535, 0, 2, 65533, 65535, 0,
+ 7, 65526, 65535, 0, 46, 65464, 65533, 65535, 0, 2,
+ 120, 65309, 65533, 65535, 0, 2, 5, 362, 65097, 65533,
+ 65535, 0, 2, 18, 1164, 64785, 65528, 65531, 65533, 65535,
+ 0, 65535, 0, 65535, 0, 65535, 0, 65533, 65535, 0,
+ 65535, 0, 65533, 65535, 0, 2, 65533, 65535, 0, 2,
+ 65533, 65535, 0, 2, 65533, 65535, 0, 2, 65530, 65535,
+ 0, 2, 65523, 65535, 0, 69, 65477, 65535, 0, 141,
+ 65459, 65535, 0, 194, 65325, 65533, 65535, 0, 2, 543,
+ 64912, 65533, 65535, 0, 5, 1270, 64301, 65529, 65531, 65533,
+ 65535, 0, 2, 4, 12, 2055, 63538, 65508, 65531, 65533,
+ 65535, 0, 2, 7, 102, 3775, 61970, 65429, 65526, 65528,
+ 65533, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 2,
+ 65533, 65535, 0, 2, 65535, 0, 9, 65533, 65535, 0,
+ 25, 65512, 65535, 0, 2, 65533, 65535, 0, 44, 65480,
+ 65535, 0, 48, 65475, 65535, 0, 162, 65373, 65535, 0,
+ 637, 64806, 65533, 65535, 0, 2, 935, 64445, 65533, 65535,
+ 0, 2, 4, 1662, 64083, 65533, 65535, 0, 2, 12,
+ 3036, 62469, 65521, 65533, 65535, 0, 2, 120, 5405, 60468,
+ 65469, 65531, 65533, 65535, 0, 2, 4, 18, 254, 6663,
+ 58999, 65272, 65528, 65533, 65535, 0, 2, 4, 9, 12,
+ 67, 591, 8981, 56781, 64564, 65365, 65508, 65524, 65526, 65529,
+ 65531, 65533, 65535, 0, 65535, 0, 65535, 0, 2, 65533,
+ 65535, 0, 9, 65526, 65535, 0, 14, 65503, 65535, 0,
+ 127, 65390, 65535, 0, 517, 64990, 65535, 0, 178, 65330,
+ 65535, 0, 2, 1055, 64533, 65533, 65535, 0, 2, 1558,
+ 63942, 65533, 65535, 0, 2, 2205, 63173, 65533, 65535, 0,
+ 25, 4493, 60862, 65505, 65533, 65535, 0, 2, 48, 5890,
+ 59442, 65482, 65533, 65535, 0, 2, 4, 127, 7532, 58191,
+ 65394, 65533, 65535, 0, 2, 5, 32, 550, 10388, 54924,
+ 65046, 65510, 65531, 65533, 65535, 0, 2, 4, 30, 150,
+ 1685, 14340, 51375, 63619, 65288, 65503, 65528, 65533, 65535, 0,
+ 2, 4, 6, 8, 28, 97, 473, 2692, 15407, 50020,
+ 62880, 65064, 65445, 65508, 65531, 65533, 65535, 0, 2, 4,
+ 12, 32, 79, 150, 372, 907, 2184, 5868, 18207, 45431,
+ 59856, 64031, 65096, 65401, 65481, 65507, 65521, 65523, 65525, 65527,
+ 65529, 65531, 65533, 65535, 0, 65533, 65535, 0, 182, 65491,
+ 65535, 0, 877, 64286, 65535, 0, 9, 2708, 63612, 65533,
+ 65535, 0, 2, 6038, 59532, 65535, 0, 2, 92, 5500,
+ 60539, 65533, 65535, 0, 268, 8908, 56512, 65385, 65535, 0,
+ 129, 13110, 52742, 65036, 65535, 0, 2, 806, 14003, 51929,
+ 64732, 65523, 65535, 0, 7, 92, 2667, 18159, 47678, 62610,
+ 65355, 65535, 0, 32, 1836, 19676, 48237, 61677, 64960, 65526,
+ 65535, 0, 21, 159, 967, 5668, 22782, 44709, 58317, 64020,
+ 65406, 65528, 65535, 0, 7, 162, 1838, 8328, 23929, 43014,
+ 56394, 63374, 65216, 65484, 65521, 65535, 0, 2, 4, 6,
+ 28, 268, 1120, 3613, 10688, 24185, 40989, 54917, 61684, 64510,
+ 65403, 65530, 65535, 0, 2, 16, 44, 139, 492, 1739,
+ 5313, 13558, 26766, 41566, 52446, 58937, 62815, 64480, 65201, 65454,
+ 65524, 65533, 65535, 0, 7, 25, 76, 263, 612, 1466,
+ 3325, 6832, 12366, 20152, 29466, 39255, 47360, 53506, 57740, 60726,
+ 62845, 64131, 64882, 65260, 65459, 65521, 65528, 65530, 65535, 0,
+ 2, 4, 14, 48, 136, 312, 653, 1240, 2369, 4327,
+ 7028, 10759, 15449, 21235, 28027, 35386, 42938, 49562, 54990, 59119,
+ 62086, 63916, 64863, 65249, 65445, 65493, 65523, 65535, 0, 2,
+ 4, 6, 8, 10, 12, 21, 83, 208, 409, 723,
+ 1152, 1868, 2951, 4463, 6460, 8979, 11831, 15195, 18863, 22657,
+ 26762, 30881, 34963, 39098, 43054, 47069, 50620, 53871, 56821, 59386,
+ 61340, 62670, 63512, 64023, 64429, 64750, 64944, 65126, 65279, 65366,
+ 65413, 65445, 65473, 65505, 65510, 65521, 65528, 65530, 65535
+};
+
+/* pointers to cdf tables for quantizer indices */
+const uint16_t *WebRtcIsacfix_kCdfGainPtr[3][12] = {
+ { WebRtcIsacfix_kCdfGain +0 +0, WebRtcIsacfix_kCdfGain +0 +8, WebRtcIsacfix_kCdfGain +0 +22,
+ WebRtcIsacfix_kCdfGain +0 +32, WebRtcIsacfix_kCdfGain +0 +48, WebRtcIsacfix_kCdfGain +0 +60,
+ WebRtcIsacfix_kCdfGain +0 +81, WebRtcIsacfix_kCdfGain +0 +95, WebRtcIsacfix_kCdfGain +0 +128,
+ WebRtcIsacfix_kCdfGain +0 +152, WebRtcIsacfix_kCdfGain +0 +210, WebRtcIsacfix_kCdfGain +0 +264
+ },
+ { WebRtcIsacfix_kCdfGain +404 +0, WebRtcIsacfix_kCdfGain +404 +8, WebRtcIsacfix_kCdfGain +404 +21,
+ WebRtcIsacfix_kCdfGain +404 +30, WebRtcIsacfix_kCdfGain +404 +46, WebRtcIsacfix_kCdfGain +404 +58,
+ WebRtcIsacfix_kCdfGain +404 +79, WebRtcIsacfix_kCdfGain +404 +93, WebRtcIsacfix_kCdfGain +404 +125,
+ WebRtcIsacfix_kCdfGain +404 +149, WebRtcIsacfix_kCdfGain +404 +207, WebRtcIsacfix_kCdfGain +404 +260
+ },
+ { WebRtcIsacfix_kCdfGain +803 +0, WebRtcIsacfix_kCdfGain +803 +8, WebRtcIsacfix_kCdfGain +803 +22,
+ WebRtcIsacfix_kCdfGain +803 +31, WebRtcIsacfix_kCdfGain +803 +48, WebRtcIsacfix_kCdfGain +803 +60,
+ WebRtcIsacfix_kCdfGain +803 +81, WebRtcIsacfix_kCdfGain +803 +96, WebRtcIsacfix_kCdfGain +803 +129,
+ WebRtcIsacfix_kCdfGain +803 +154, WebRtcIsacfix_kCdfGain +803 +212, WebRtcIsacfix_kCdfGain +803 +268
+ }
+};
+
+const uint16_t *WebRtcIsacfix_kCdfShapePtr[3][108] = {
+ { WebRtcIsacfix_kCdfShape +0 +0, WebRtcIsacfix_kCdfShape +0 +2, WebRtcIsacfix_kCdfShape +0 +4,
+ WebRtcIsacfix_kCdfShape +0 +6, WebRtcIsacfix_kCdfShape +0 +8, WebRtcIsacfix_kCdfShape +0 +10,
+ WebRtcIsacfix_kCdfShape +0 +12, WebRtcIsacfix_kCdfShape +0 +14, WebRtcIsacfix_kCdfShape +0 +16,
+ WebRtcIsacfix_kCdfShape +0 +18, WebRtcIsacfix_kCdfShape +0 +21, WebRtcIsacfix_kCdfShape +0 +25,
+ WebRtcIsacfix_kCdfShape +0 +29, WebRtcIsacfix_kCdfShape +0 +33, WebRtcIsacfix_kCdfShape +0 +37,
+ WebRtcIsacfix_kCdfShape +0 +43, WebRtcIsacfix_kCdfShape +0 +49, WebRtcIsacfix_kCdfShape +0 +56,
+ WebRtcIsacfix_kCdfShape +0 +64, WebRtcIsacfix_kCdfShape +0 +66, WebRtcIsacfix_kCdfShape +0 +68,
+ WebRtcIsacfix_kCdfShape +0 +70, WebRtcIsacfix_kCdfShape +0 +72, WebRtcIsacfix_kCdfShape +0 +75,
+ WebRtcIsacfix_kCdfShape +0 +77, WebRtcIsacfix_kCdfShape +0 +79, WebRtcIsacfix_kCdfShape +0 +81,
+ WebRtcIsacfix_kCdfShape +0 +83, WebRtcIsacfix_kCdfShape +0 +86, WebRtcIsacfix_kCdfShape +0 +90,
+ WebRtcIsacfix_kCdfShape +0 +94, WebRtcIsacfix_kCdfShape +0 +98, WebRtcIsacfix_kCdfShape +0 +102,
+ WebRtcIsacfix_kCdfShape +0 +107, WebRtcIsacfix_kCdfShape +0 +113, WebRtcIsacfix_kCdfShape +0 +120,
+ WebRtcIsacfix_kCdfShape +0 +129, WebRtcIsacfix_kCdfShape +0 +131, WebRtcIsacfix_kCdfShape +0 +133,
+ WebRtcIsacfix_kCdfShape +0 +135, WebRtcIsacfix_kCdfShape +0 +137, WebRtcIsacfix_kCdfShape +0 +141,
+ WebRtcIsacfix_kCdfShape +0 +143, WebRtcIsacfix_kCdfShape +0 +147, WebRtcIsacfix_kCdfShape +0 +151,
+ WebRtcIsacfix_kCdfShape +0 +155, WebRtcIsacfix_kCdfShape +0 +159, WebRtcIsacfix_kCdfShape +0 +164,
+ WebRtcIsacfix_kCdfShape +0 +168, WebRtcIsacfix_kCdfShape +0 +172, WebRtcIsacfix_kCdfShape +0 +178,
+ WebRtcIsacfix_kCdfShape +0 +184, WebRtcIsacfix_kCdfShape +0 +192, WebRtcIsacfix_kCdfShape +0 +200,
+ WebRtcIsacfix_kCdfShape +0 +211, WebRtcIsacfix_kCdfShape +0 +213, WebRtcIsacfix_kCdfShape +0 +215,
+ WebRtcIsacfix_kCdfShape +0 +217, WebRtcIsacfix_kCdfShape +0 +219, WebRtcIsacfix_kCdfShape +0 +223,
+ WebRtcIsacfix_kCdfShape +0 +227, WebRtcIsacfix_kCdfShape +0 +231, WebRtcIsacfix_kCdfShape +0 +235,
+ WebRtcIsacfix_kCdfShape +0 +239, WebRtcIsacfix_kCdfShape +0 +243, WebRtcIsacfix_kCdfShape +0 +248,
+ WebRtcIsacfix_kCdfShape +0 +252, WebRtcIsacfix_kCdfShape +0 +258, WebRtcIsacfix_kCdfShape +0 +264,
+ WebRtcIsacfix_kCdfShape +0 +273, WebRtcIsacfix_kCdfShape +0 +282, WebRtcIsacfix_kCdfShape +0 +293,
+ WebRtcIsacfix_kCdfShape +0 +308, WebRtcIsacfix_kCdfShape +0 +310, WebRtcIsacfix_kCdfShape +0 +312,
+ WebRtcIsacfix_kCdfShape +0 +316, WebRtcIsacfix_kCdfShape +0 +320, WebRtcIsacfix_kCdfShape +0 +324,
+ WebRtcIsacfix_kCdfShape +0 +328, WebRtcIsacfix_kCdfShape +0 +332, WebRtcIsacfix_kCdfShape +0 +336,
+ WebRtcIsacfix_kCdfShape +0 +341, WebRtcIsacfix_kCdfShape +0 +347, WebRtcIsacfix_kCdfShape +0 +354,
+ WebRtcIsacfix_kCdfShape +0 +360, WebRtcIsacfix_kCdfShape +0 +368, WebRtcIsacfix_kCdfShape +0 +378,
+ WebRtcIsacfix_kCdfShape +0 +388, WebRtcIsacfix_kCdfShape +0 +400, WebRtcIsacfix_kCdfShape +0 +418,
+ WebRtcIsacfix_kCdfShape +0 +445, WebRtcIsacfix_kCdfShape +0 +447, WebRtcIsacfix_kCdfShape +0 +451,
+ WebRtcIsacfix_kCdfShape +0 +455, WebRtcIsacfix_kCdfShape +0 +461, WebRtcIsacfix_kCdfShape +0 +468,
+ WebRtcIsacfix_kCdfShape +0 +474, WebRtcIsacfix_kCdfShape +0 +480, WebRtcIsacfix_kCdfShape +0 +486,
+ WebRtcIsacfix_kCdfShape +0 +495, WebRtcIsacfix_kCdfShape +0 +505, WebRtcIsacfix_kCdfShape +0 +516,
+ WebRtcIsacfix_kCdfShape +0 +528, WebRtcIsacfix_kCdfShape +0 +543, WebRtcIsacfix_kCdfShape +0 +564,
+ WebRtcIsacfix_kCdfShape +0 +583, WebRtcIsacfix_kCdfShape +0 +608, WebRtcIsacfix_kCdfShape +0 +635
+ },
+ { WebRtcIsacfix_kCdfShape +686 +0, WebRtcIsacfix_kCdfShape +686 +2, WebRtcIsacfix_kCdfShape +686 +4,
+ WebRtcIsacfix_kCdfShape +686 +6, WebRtcIsacfix_kCdfShape +686 +8, WebRtcIsacfix_kCdfShape +686 +11,
+ WebRtcIsacfix_kCdfShape +686 +13, WebRtcIsacfix_kCdfShape +686 +15, WebRtcIsacfix_kCdfShape +686 +17,
+ WebRtcIsacfix_kCdfShape +686 +20, WebRtcIsacfix_kCdfShape +686 +23, WebRtcIsacfix_kCdfShape +686 +27,
+ WebRtcIsacfix_kCdfShape +686 +31, WebRtcIsacfix_kCdfShape +686 +35, WebRtcIsacfix_kCdfShape +686 +40,
+ WebRtcIsacfix_kCdfShape +686 +44, WebRtcIsacfix_kCdfShape +686 +50, WebRtcIsacfix_kCdfShape +686 +56,
+ WebRtcIsacfix_kCdfShape +686 +63, WebRtcIsacfix_kCdfShape +686 +65, WebRtcIsacfix_kCdfShape +686 +67,
+ WebRtcIsacfix_kCdfShape +686 +69, WebRtcIsacfix_kCdfShape +686 +71, WebRtcIsacfix_kCdfShape +686 +73,
+ WebRtcIsacfix_kCdfShape +686 +75, WebRtcIsacfix_kCdfShape +686 +77, WebRtcIsacfix_kCdfShape +686 +79,
+ WebRtcIsacfix_kCdfShape +686 +82, WebRtcIsacfix_kCdfShape +686 +85, WebRtcIsacfix_kCdfShape +686 +89,
+ WebRtcIsacfix_kCdfShape +686 +93, WebRtcIsacfix_kCdfShape +686 +97, WebRtcIsacfix_kCdfShape +686 +102,
+ WebRtcIsacfix_kCdfShape +686 +106, WebRtcIsacfix_kCdfShape +686 +112, WebRtcIsacfix_kCdfShape +686 +119,
+ WebRtcIsacfix_kCdfShape +686 +127, WebRtcIsacfix_kCdfShape +686 +129, WebRtcIsacfix_kCdfShape +686 +131,
+ WebRtcIsacfix_kCdfShape +686 +133, WebRtcIsacfix_kCdfShape +686 +135, WebRtcIsacfix_kCdfShape +686 +137,
+ WebRtcIsacfix_kCdfShape +686 +139, WebRtcIsacfix_kCdfShape +686 +142, WebRtcIsacfix_kCdfShape +686 +146,
+ WebRtcIsacfix_kCdfShape +686 +150, WebRtcIsacfix_kCdfShape +686 +154, WebRtcIsacfix_kCdfShape +686 +158,
+ WebRtcIsacfix_kCdfShape +686 +162, WebRtcIsacfix_kCdfShape +686 +167, WebRtcIsacfix_kCdfShape +686 +173,
+ WebRtcIsacfix_kCdfShape +686 +179, WebRtcIsacfix_kCdfShape +686 +186, WebRtcIsacfix_kCdfShape +686 +194,
+ WebRtcIsacfix_kCdfShape +686 +205, WebRtcIsacfix_kCdfShape +686 +207, WebRtcIsacfix_kCdfShape +686 +209,
+ WebRtcIsacfix_kCdfShape +686 +211, WebRtcIsacfix_kCdfShape +686 +214, WebRtcIsacfix_kCdfShape +686 +218,
+ WebRtcIsacfix_kCdfShape +686 +222, WebRtcIsacfix_kCdfShape +686 +226, WebRtcIsacfix_kCdfShape +686 +230,
+ WebRtcIsacfix_kCdfShape +686 +234, WebRtcIsacfix_kCdfShape +686 +238, WebRtcIsacfix_kCdfShape +686 +242,
+ WebRtcIsacfix_kCdfShape +686 +247, WebRtcIsacfix_kCdfShape +686 +253, WebRtcIsacfix_kCdfShape +686 +262,
+ WebRtcIsacfix_kCdfShape +686 +269, WebRtcIsacfix_kCdfShape +686 +278, WebRtcIsacfix_kCdfShape +686 +289,
+ WebRtcIsacfix_kCdfShape +686 +305, WebRtcIsacfix_kCdfShape +686 +307, WebRtcIsacfix_kCdfShape +686 +309,
+ WebRtcIsacfix_kCdfShape +686 +311, WebRtcIsacfix_kCdfShape +686 +315, WebRtcIsacfix_kCdfShape +686 +319,
+ WebRtcIsacfix_kCdfShape +686 +323, WebRtcIsacfix_kCdfShape +686 +327, WebRtcIsacfix_kCdfShape +686 +331,
+ WebRtcIsacfix_kCdfShape +686 +335, WebRtcIsacfix_kCdfShape +686 +340, WebRtcIsacfix_kCdfShape +686 +346,
+ WebRtcIsacfix_kCdfShape +686 +354, WebRtcIsacfix_kCdfShape +686 +362, WebRtcIsacfix_kCdfShape +686 +374,
+ WebRtcIsacfix_kCdfShape +686 +384, WebRtcIsacfix_kCdfShape +686 +396, WebRtcIsacfix_kCdfShape +686 +413,
+ WebRtcIsacfix_kCdfShape +686 +439, WebRtcIsacfix_kCdfShape +686 +442, WebRtcIsacfix_kCdfShape +686 +446,
+ WebRtcIsacfix_kCdfShape +686 +450, WebRtcIsacfix_kCdfShape +686 +455, WebRtcIsacfix_kCdfShape +686 +461,
+ WebRtcIsacfix_kCdfShape +686 +468, WebRtcIsacfix_kCdfShape +686 +475, WebRtcIsacfix_kCdfShape +686 +481,
+ WebRtcIsacfix_kCdfShape +686 +489, WebRtcIsacfix_kCdfShape +686 +498, WebRtcIsacfix_kCdfShape +686 +508,
+ WebRtcIsacfix_kCdfShape +686 +522, WebRtcIsacfix_kCdfShape +686 +534, WebRtcIsacfix_kCdfShape +686 +554,
+ WebRtcIsacfix_kCdfShape +686 +577, WebRtcIsacfix_kCdfShape +686 +602, WebRtcIsacfix_kCdfShape +686 +631
+ },
+ { WebRtcIsacfix_kCdfShape +1368 +0, WebRtcIsacfix_kCdfShape +1368 +2, WebRtcIsacfix_kCdfShape +1368 +4,
+ WebRtcIsacfix_kCdfShape +1368 +6, WebRtcIsacfix_kCdfShape +1368 +8, WebRtcIsacfix_kCdfShape +1368 +10,
+ WebRtcIsacfix_kCdfShape +1368 +12, WebRtcIsacfix_kCdfShape +1368 +14, WebRtcIsacfix_kCdfShape +1368 +16,
+ WebRtcIsacfix_kCdfShape +1368 +20, WebRtcIsacfix_kCdfShape +1368 +24, WebRtcIsacfix_kCdfShape +1368 +28,
+ WebRtcIsacfix_kCdfShape +1368 +32, WebRtcIsacfix_kCdfShape +1368 +36, WebRtcIsacfix_kCdfShape +1368 +40,
+ WebRtcIsacfix_kCdfShape +1368 +44, WebRtcIsacfix_kCdfShape +1368 +50, WebRtcIsacfix_kCdfShape +1368 +57,
+ WebRtcIsacfix_kCdfShape +1368 +65, WebRtcIsacfix_kCdfShape +1368 +67, WebRtcIsacfix_kCdfShape +1368 +69,
+ WebRtcIsacfix_kCdfShape +1368 +71, WebRtcIsacfix_kCdfShape +1368 +73, WebRtcIsacfix_kCdfShape +1368 +75,
+ WebRtcIsacfix_kCdfShape +1368 +77, WebRtcIsacfix_kCdfShape +1368 +79, WebRtcIsacfix_kCdfShape +1368 +81,
+ WebRtcIsacfix_kCdfShape +1368 +85, WebRtcIsacfix_kCdfShape +1368 +89, WebRtcIsacfix_kCdfShape +1368 +93,
+ WebRtcIsacfix_kCdfShape +1368 +97, WebRtcIsacfix_kCdfShape +1368 +101, WebRtcIsacfix_kCdfShape +1368 +105,
+ WebRtcIsacfix_kCdfShape +1368 +110, WebRtcIsacfix_kCdfShape +1368 +116, WebRtcIsacfix_kCdfShape +1368 +123,
+ WebRtcIsacfix_kCdfShape +1368 +132, WebRtcIsacfix_kCdfShape +1368 +134, WebRtcIsacfix_kCdfShape +1368 +136,
+ WebRtcIsacfix_kCdfShape +1368 +138, WebRtcIsacfix_kCdfShape +1368 +141, WebRtcIsacfix_kCdfShape +1368 +143,
+ WebRtcIsacfix_kCdfShape +1368 +146, WebRtcIsacfix_kCdfShape +1368 +150, WebRtcIsacfix_kCdfShape +1368 +154,
+ WebRtcIsacfix_kCdfShape +1368 +158, WebRtcIsacfix_kCdfShape +1368 +162, WebRtcIsacfix_kCdfShape +1368 +166,
+ WebRtcIsacfix_kCdfShape +1368 +170, WebRtcIsacfix_kCdfShape +1368 +174, WebRtcIsacfix_kCdfShape +1368 +179,
+ WebRtcIsacfix_kCdfShape +1368 +185, WebRtcIsacfix_kCdfShape +1368 +193, WebRtcIsacfix_kCdfShape +1368 +203,
+ WebRtcIsacfix_kCdfShape +1368 +214, WebRtcIsacfix_kCdfShape +1368 +216, WebRtcIsacfix_kCdfShape +1368 +218,
+ WebRtcIsacfix_kCdfShape +1368 +220, WebRtcIsacfix_kCdfShape +1368 +224, WebRtcIsacfix_kCdfShape +1368 +227,
+ WebRtcIsacfix_kCdfShape +1368 +231, WebRtcIsacfix_kCdfShape +1368 +235, WebRtcIsacfix_kCdfShape +1368 +239,
+ WebRtcIsacfix_kCdfShape +1368 +243, WebRtcIsacfix_kCdfShape +1368 +247, WebRtcIsacfix_kCdfShape +1368 +251,
+ WebRtcIsacfix_kCdfShape +1368 +256, WebRtcIsacfix_kCdfShape +1368 +262, WebRtcIsacfix_kCdfShape +1368 +269,
+ WebRtcIsacfix_kCdfShape +1368 +277, WebRtcIsacfix_kCdfShape +1368 +286, WebRtcIsacfix_kCdfShape +1368 +297,
+ WebRtcIsacfix_kCdfShape +1368 +315, WebRtcIsacfix_kCdfShape +1368 +317, WebRtcIsacfix_kCdfShape +1368 +319,
+ WebRtcIsacfix_kCdfShape +1368 +323, WebRtcIsacfix_kCdfShape +1368 +327, WebRtcIsacfix_kCdfShape +1368 +331,
+ WebRtcIsacfix_kCdfShape +1368 +335, WebRtcIsacfix_kCdfShape +1368 +339, WebRtcIsacfix_kCdfShape +1368 +343,
+ WebRtcIsacfix_kCdfShape +1368 +349, WebRtcIsacfix_kCdfShape +1368 +355, WebRtcIsacfix_kCdfShape +1368 +361,
+ WebRtcIsacfix_kCdfShape +1368 +368, WebRtcIsacfix_kCdfShape +1368 +376, WebRtcIsacfix_kCdfShape +1368 +385,
+ WebRtcIsacfix_kCdfShape +1368 +397, WebRtcIsacfix_kCdfShape +1368 +411, WebRtcIsacfix_kCdfShape +1368 +429,
+ WebRtcIsacfix_kCdfShape +1368 +456, WebRtcIsacfix_kCdfShape +1368 +459, WebRtcIsacfix_kCdfShape +1368 +463,
+ WebRtcIsacfix_kCdfShape +1368 +467, WebRtcIsacfix_kCdfShape +1368 +473, WebRtcIsacfix_kCdfShape +1368 +478,
+ WebRtcIsacfix_kCdfShape +1368 +485, WebRtcIsacfix_kCdfShape +1368 +491, WebRtcIsacfix_kCdfShape +1368 +497,
+ WebRtcIsacfix_kCdfShape +1368 +505, WebRtcIsacfix_kCdfShape +1368 +514, WebRtcIsacfix_kCdfShape +1368 +523,
+ WebRtcIsacfix_kCdfShape +1368 +535, WebRtcIsacfix_kCdfShape +1368 +548, WebRtcIsacfix_kCdfShape +1368 +565,
+ WebRtcIsacfix_kCdfShape +1368 +585, WebRtcIsacfix_kCdfShape +1368 +611, WebRtcIsacfix_kCdfShape +1368 +640
+ }
+};
+
+/* code length for all coefficients using different models */
+
+const int16_t WebRtcIsacfix_kCodeLenGainQ11[392] = {
+ 25189, 16036, 8717, 358, 8757, 15706, 21456, 24397, 18502, 17559
+ , 13794, 11088, 7480, 873, 6603, 11636, 14627, 16805, 19132, 26624
+ , 26624, 19408, 13751, 7280, 583, 7591, 15178, 23773, 28672, 25189
+ , 19045, 16442, 13412, 10397, 5893, 1338, 6376, 9992, 12074, 13853
+ , 15781, 19821, 22819, 28672, 28672, 25189, 19858, 15781, 11262, 5477
+ , 1298, 5632, 11814, 17234, 22020, 28672, 19677, 18125, 16587, 14521
+ , 13032, 11196, 9249, 5411, 2495, 4994, 7975, 10234, 12308, 13892
+ , 15148, 17944, 21725, 23917, 25189, 19539, 16293, 11531, 7808, 4475
+ , 2739, 4872, 8089, 11314, 14992, 18105, 23257, 26624, 25189, 23257
+ , 23257, 20982, 18697, 18023, 16338, 16036, 14539, 13695, 13146, 11763
+ , 10754, 9074, 7260, 5584, 4430, 5553, 6848, 8344, 10141, 11636
+ , 12535, 13416, 14342, 15477, 17296, 19282, 22349, 23773, 28672, 28672
+ , 26624, 23773, 21456, 18023, 15118, 13362, 11212, 9293, 8043, 6985
+ , 5908, 5721, 5853, 6518, 7316, 8360, 9716, 11289, 12912, 14652
+ , 16969, 19858, 23773, 26624, 28013, 30720, 30720, 28672, 25426, 23141
+ , 25426, 23773, 20720, 19408, 18697, 19282, 16859, 16338, 16026, 15377
+ , 15021, 14319, 14251, 13937, 13260, 13017, 12332, 11703, 11430, 10359
+ , 10128, 9405, 8757, 8223, 7974, 7859, 7646, 7673, 7997, 8580
+ , 8880, 9061, 9866, 10397, 11358, 12200, 13244, 14157, 15021, 16026
+ , 16490, 18697, 18479, 20011, 19677, 20720, 24576, 26276, 30720, 30720
+ , 28672, 30720, 24068, 25189, 22437, 20345, 18479, 16396, 16026, 14928
+ , 13877, 13450, 12696, 12766, 11626, 11098, 10159, 9998, 9437, 9275
+ , 8783, 8552, 8629, 8488, 8522, 8454, 8571, 8775, 8915, 9427
+ , 9483, 9851, 10260, 10933, 11131, 11974, 12560, 13833, 15080, 16304
+ , 17491, 19017, 18697, 19408, 22020, 25189, 25426, 22819, 26276, 30720
+ , 30720, 30720, 30720, 30720, 30720, 28672, 30720, 30720, 30720, 30720
+ , 28013, 25426, 24397, 23773, 25189, 26624, 25189, 22437, 21725, 20011
+ , 20527, 20720, 20771, 22020, 22020, 19858, 19408, 19972, 17866, 17360
+ , 17791, 17219, 16805, 16927, 16067, 16162, 15661, 15178, 15021, 15209
+ , 14845, 14570, 14490, 14490, 13733, 13617, 13794, 13577, 13312, 12824
+ , 13032, 12683, 12189, 12469, 12109, 11940, 11636, 11617, 11932, 12294
+ , 11578, 11775, 12039, 11654, 11560, 11439, 11909, 11421, 12029, 11513
+ , 11773, 11899, 11560, 11805, 11476, 11664, 11963, 11647, 11754, 11963
+ , 11703, 12211, 11932, 12074, 12469, 12535, 12560, 12912, 12783, 12866
+ , 12884, 13378, 13957, 13775, 13635, 14019, 14545, 15240, 15520, 15554
+ , 15697, 16490, 16396, 17281, 16599, 16969, 17963, 16859, 16983, 16805
+ , 17099, 18210, 17219, 17646, 17700, 17646, 18297, 17425, 18479, 17791
+ , 17718, 19282, 18672, 20173, 20982, 21725, 21456, 23773, 23257, 25189
+ , 30720, 30720, 25189, 26624, 30720, 30720, 30720, 30720, 28672, 26276
+ , 30720, 30720
+};
+
+const int16_t WebRtcIsacfix_kCodeLenShapeQ11[578] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 28672
+ , 0, 26624, 1, 23773, 22819, 4, 20982, 18598, 10, 19282
+ , 16587, 22, 16442, 26624, 13126, 60, 14245, 26624, 26624, 12736
+ , 79, 12912, 25189, 22819, 9563, 249, 9474, 22349, 28672, 23257
+ , 17944, 7980, 434, 8181, 16431, 26624, 0, 0, 0, 0
+ , 28672, 0, 0, 0, 0, 0, 28672, 0, 22437, 3
+ , 22437, 20982, 5, 20982, 16442, 22, 16752, 13814, 49, 14646
+ , 11645, 116, 11734, 26624, 28672, 10613, 158, 11010, 24397, 19539
+ , 8046, 453, 7709, 19017, 28672, 23257, 15110, 6770, 758, 6523
+ , 14108, 24397, 28672, 0, 0, 0, 0, 28672, 0, 28672
+ , 0, 26624, 1, 28672, 28672, 1, 26624, 24397, 2, 23257
+ , 21725, 4, 20982, 17158, 18, 17281, 28672, 15178, 35, 15209
+ , 12343, 92, 12320, 26624, 10344, 189, 10217, 30720, 22020, 9033
+ , 322, 8549, 23773, 28672, 30720, 20622, 7666, 473, 7806, 20527
+ , 24397, 14135, 5995, 960, 6018, 14872, 23773, 26624, 20928, 16293
+ , 10636, 4926, 1588, 5256, 11088, 18043, 25189, 0, 0, 0
+ , 0, 24397, 1, 25189, 20720, 5, 21456, 21209, 3, 25189
+ , 20982, 5, 21456, 15818, 30, 15410, 13794, 60, 13416, 28672
+ , 11162, 142, 11025, 9337, 231, 10094, 23773, 8338, 405, 7930
+ , 26624, 19677, 6787, 613, 7318, 19161, 28672, 16442, 6319, 932
+ , 5748, 15312, 25189, 28672, 28672, 28672, 13998, 5513, 1263, 5146
+ , 14024, 24397, 22819, 15818, 9460, 4447, 2122, 4681, 9970, 15945
+ , 22349, 28672, 30720, 22622, 19017, 14872, 10689, 7405, 4473, 2983
+ , 4783, 7894, 11186, 14964, 18210, 24397, 0, 0, 30720, 0
+ , 30720, 21456, 3, 23773, 14964, 39, 14757, 14179, 53, 13751
+ , 14928, 36, 15272, 12430, 79, 13228, 9135, 285, 9077, 28672
+ , 28672, 8377, 403, 7919, 26624, 28672, 23257, 7068, 560, 7473
+ , 20345, 19677, 6770, 720, 6464, 18697, 25189, 16249, 5779, 1087
+ , 5494, 15209, 22819, 30720, 20622, 12601, 5240, 1419, 5091, 12095
+ , 19408, 26624, 22819, 16805, 10683, 4812, 2056, 4293, 9836, 16026
+ , 24397, 25189, 18409, 13833, 8681, 4503, 2653, 4220, 8329, 13853
+ , 19132, 26624, 25189, 20771, 17219, 12630, 9520, 6733, 4565, 3657
+ , 4817, 7069, 10058, 13212, 16805, 21209, 26624, 26276, 28672, 28672
+ , 26276, 23257, 20173, 19282, 16538, 15051, 12811, 10754, 9267, 7547
+ , 6270, 5407, 5214, 6057, 7054, 8226, 9488, 10806, 12793, 14442
+ , 16442, 19677, 22099, 26276, 28672, 0, 30720, 0, 30720, 11920
+ , 56, 20720, 30720, 6766, 355, 13130, 30720, 30720, 22180, 5589
+ , 736, 7902, 26624, 30720, 7634, 354, 9721, 30720, 30720, 9027
+ , 246, 10117, 30720, 30720, 9630, 453, 6709, 23257, 30720, 25683
+ , 14228, 6127, 1271, 4615, 15178, 30720, 30720, 23504, 12382, 5739
+ , 2015, 3492, 10560, 22020, 26624, 30720, 30720, 23257, 13192, 4873
+ , 1527, 5001, 12445, 22020, 30720, 30720, 30720, 30720, 19344, 10761
+ , 4051, 1927, 5281, 10594, 17866, 28672, 30720, 30720, 30720, 21869
+ , 15554, 10060, 5979, 2710, 3085, 7889, 14646, 21725, 28672, 30720
+ , 30720, 30720, 30720, 30720, 30720, 30720, 22719, 17425, 13212, 8083
+ , 4439, 2820, 4305, 8136, 12988, 17425, 21151, 28672, 28672, 30720
+ , 30720, 30720, 28672, 20527, 19282, 14412, 10513, 7407, 5079, 3744
+ , 4115, 6308, 9621, 13599, 17040, 22349, 28672, 30720, 30720, 30720
+ , 30720, 30720, 30720, 29522, 19282, 14545, 11485, 9093, 6760, 5262
+ , 4672, 4970, 6005, 7852, 9732, 12343, 14672, 19161, 22819, 25189
+ , 30720, 30720, 28672, 30720, 30720, 20720, 18125, 14388, 12007, 9825
+ , 8092, 7064, 6069, 5903, 5932, 6359, 7169, 8310, 9324, 10711
+ , 11867, 13096, 14157, 16338, 17040, 19161, 21725, 23773, 30720, 30720
+ , 26276, 25426, 24397, 28672, 28672, 23257, 22020, 22349, 18297, 17646
+ , 16983, 16431, 16162, 15021, 15178, 13751, 12142, 10895, 10193, 9632
+ , 9086, 8896, 8823, 8735, 8591, 8754, 8649, 8361, 8329, 8522
+ , 8373, 8739, 8993, 9657, 10454, 11279, 11899, 12614, 14024, 14273
+ , 15477, 15240, 16649, 17866, 18697, 21151, 22099, 0
+ // The final 0 was added due to http://bugs.webrtc.org/10584.
+};
+
+/* left KLT transforms */
+const int16_t WebRtcIsacfix_kT1GainQ15[3][4] = {
+ { -26130, 19773, 19773, 26130 },
+ { -26664, 19046, 19046, 26664 },
+ { -23538, 22797, 22797, 23538 }
+};
+
+
+
+const int16_t WebRtcIsacfix_kT1ShapeQ15[3][324] = {
+ { 52,16,168,7,439,-138,-89,306,671,882,
+ 157,1301,291,1598,-3571,-1943,-1119,32404,96,-12,
+ 379,-64,-307,345,-836,539,1045,2541,-2865,-992,
+ 1683,-4717,5808,7427,30599,2319,183,-73,451,481,
+ 933,-198,781,-397,1244,-777,3690,-2414,149,-1356,
+ -2593,-31140,8289,-1737,-202,-14,-214,360,501,450,
+ -245,-7,797,3638,-2804,3042,-337,22137,-22103,2264,
+ 6838,-3381,305,172,263,-195,-355,351,179,513,
+ 2234,3343,5509,7531,19075,-17740,-16836,2244,-629,-1505,
+ -153,108,124,-324,2694,-124,1492,-850,5347,4285,
+ 7439,-10229,-22822,-12467,-12891,3645,822,-232,131,13,
+ 374,565,536,4681,1294,-1935,1926,-5734,-10643,26462,
+ -12480,-5589,-1038,-2468,964,-704,-247,-106,186,-558,
+ -4050,3760,2972,2141,-7393,6294,26740,11991,-3251,5461,
+ 5341,1574,2208,-51,-552,-297,-753,-154,2068,-5371,
+ 3578,4106,28043,-10533,8041,2353,2389,4609,3410,1906,
+ 351,-249,18,-15,1117,539,2870,9084,17585,-24528,
+ -366,-6490,2009,-3170,2942,1116,-232,1672,1065,606,
+ -399,-388,-518,38,3728,28948,-11936,4543,4104,-4441,
+ 1545,-4044,1485,622,-68,186,-473,135,-280,125,
+ -546,-1813,6989,6606,23711,19376,-2636,2870,-4553,-1687,
+ 878,-375,205,-208,-409,-108,-200,-45,-1670,-337,
+ 8213,-5524,-2334,5240,-12939,-26205,5937,-1582,-592,-959,
+ -5374,2449,3400,559,349,-492,668,12379,-27684,3419,
+ 5117,4415,-297,-8270,-1252,-3490,-1272,-1199,-3159,191,
+ 630,488,-797,-3071,12912,-27783,-10249,1047,647,619,
+ 111,-3722,-915,-1055,-502,5,-1384,-306,221,68,
+ 5219,13173,-26474,-11663,-5626,927,806,-1127,236,-589,
+ -522,-230,-312,-315,-428,-573,426,192,-11830,-26883,
+ -14121,-2785,-1429,-109,410,-832,-302,539,-459,104,
+ 1,-530,-202,-289,153,116,30082,-12944,-671,20,
+ 649,98,103,215,234,0,280,-51,-169,298,
+ 31,230,-73,-51
+ },
+ { -154,-7,-192,61,-739,-389,-947,-162,-60,94,
+ 511,-716,1520,-1428,4168,-2214,1816,32270,-123,-77,
+ -199,-99,-42,-588,203,-240,-930,-35,1580,234,
+ 3206,-5507,-1495,-10946,30000,-2667,-136,-176,-240,-175,
+ -204,-661,-1796,-1039,-1271,498,3143,734,2663,2699,
+ -8127,29333,10495,2356,-72,113,-91,118,-2840,-723,
+ -1733,-1158,-389,-2116,-3054,-3,-5179,8071,29546,6308,
+ 5657,-3178,-186,-294,-473,-635,1213,-983,-1437,-1715,
+ -1094,1280,-92,-9573,948,29576,-7060,-5921,2954,1349,
+ -337,-108,-1099,962,418,-413,-1149,-334,1241,3975,
+ -6825,26725,-14377,7051,-4772,-1707,2335,2008,-150,570,
+ 1371,42,-1649,-619,2039,3369,-1225,1583,-2755,-15207,
+ -27504,-4855,-4304,1495,2733,1324,15,-448,403,353,
+ 3016,-1242,2338,2673,2064,-7496,-30447,-3686,5833,-1301,
+ -2455,2122,1519,608,43,-653,773,-3072,912,-1537,
+ 4505,10284,30237,1549,3200,-691,205,1702,658,1014,
+ 1499,148,79,-322,-1162,-4639,-813,7536,3204,29109,
+ -10747,-26,1611,2286,2114,2561,1022,372,348,207,
+ 1062,-1088,-443,-9849,2381,5671,29097,-7612,-2927,3853,
+ 194,1155,275,1438,1438,1312,581,888,-784,906,
+ 112,-11103,25104,14438,-9311,-3068,1210,368,370,-940,
+ -2434,-1148,1925,392,657,258,-526,1475,-2281,-4265,
+ -1880,1534,2185,-1472,959,-30934,6306,3114,-4109,1768,
+ -2612,-703,45,644,2185,2033,5670,7211,19114,-22427,
+ 6432,5150,-4090,-2694,3860,1245,-596,293,1829,369,
+ -319,229,-3256,2170,-6374,-26216,-4570,-16053,-5766,-262,
+ -2006,2873,-1477,147,378,-1544,-344,-544,-985,-481,
+ 4210,4542,30757,-7291,-4863,1529,-2079,-628,-603,-783,
+ -408,1646,697,808,-620,-292,181,158,-13313,-29173,
+ 5984,-1262,859,-1776,-558,-24,-883,-1421,739,210,
+ -531,-285,131,-160,-246,-56,29345,-13706,-2859,-2966,
+ -300,-970,-2382,-268,-103,-636,-12,-62,-691,-253,
+ -147,-127,27,66
+ },
+ { 55,-212,-198,489,-274,81,682,399,328,-934,
+ -389,-37,1357,-3632,5276,6581,-9493,-29921,29,-45,
+ 2,190,172,-15,311,-130,-1085,-25,324,-684,
+ 3223,-6580,4485,-5280,-29521,9933,82,-320,-530,229,
+ -705,-533,-414,848,-1842,-4473,1390,-857,6717,-6692,
+ 4648,29397,576,8339,-68,-85,238,-330,264,-1012,
+ -381,-203,-3384,-3329,3906,6810,3790,-6250,28312,-8078,
+ 8089,1565,160,-569,-612,-613,-1063,-1928,-1125,3421,
+ -7481,-7484,4942,-6984,4330,-25591,-10574,-6982,5682,-1781,
+ -308,89,178,-1715,-420,-3530,-5776,1219,-8617,-7137,
+ 7015,4981,24875,12657,-5408,-3356,-785,-1972,326,-858,
+ -506,-3382,-986,-6258,-2259,4015,-8374,-10482,3127,23826,
+ -14126,-514,-5417,2178,-2912,-17,-587,80,67,-5881,
+ -1702,-5351,-4481,398,-10156,-225,20727,-15460,-11603,7752,
+ 3660,1714,-2001,-359,499,-527,-1225,-7820,-1297,-6326,
+ -8526,7900,-18328,13311,-17488,-2926,-196,-17,2281,873,
+ 480,-160,-624,471,780,-8729,1707,-14262,-20647,1721,
+ 18590,-2206,-1214,-1066,312,-2602,783,-412,-113,49,
+ -119,1305,-2371,-15132,-1833,-18252,20295,-8316,2227,341,
+ -2074,-702,3082,-262,-465,-198,430,30,-70,-788,
+ 2342,-25132,-4863,19783,-484,2137,2811,-1906,799,1586,
+ 962,-734,-191,-30,-129,-93,-1126,1729,5860,-2030,
+ 8953,603,-3338,-10869,-1144,22070,12130,10513,3191,-6881,
+ -3514,2090,711,-666,1843,-5997,-5681,2921,-17641,-2801,
+ 4969,18590,7169,12214,8587,4405,3008,-1074,-371,-77,
+ 253,331,-5611,5014,13152,-1985,18483,-1696,8043,20463,
+ 2381,-393,1688,-1205,618,1220,457,248,-83,176,
+ 7920,-13676,-22139,-3038,17402,2036,844,3258,994,719,
+ 2087,-44,426,494,12,-91,46,5,-14204,22912,
+ -18156,-361,442,2298,-829,2229,386,1433,1335,1323,
+ 55,-592,-139,49,-12,-57,27783,17134,350,-282,
+ 552,158,142,2488,465,329,1087,118,143,10,
+ 56,65,-15,-31
+ }
+};
+
+/* right KLT transforms */
+const int16_t WebRtcIsacfix_kT2GainQ15[3][36] = {
+ { 4775, -14892, 20313, -17104, 10533, -3613, -6782, 16044, -8889,
+ -11019, 21330, -10720, 13193, -15678, -11101, 14461, 12250, -13096,
+ -16951, 2167, 16066, 15569, -702, -16754, -19195, -12823, -4321,
+ 5128, 13348, 17825, 13232, 13404, 13494, 13490, 13383, 13261
+ },
+ { -3725, 11408, -18493, 20031, -13097, 3865, 9344, -19294, 10740,
+ 8856, -18432, 8982, 13975, -14444, -11930, 11774, 14285, -13594,
+ -16323, -4, 16340, 15609, 359, -17220, -18401, -13471, -4643,
+ 5225, 13375, 18053, 13124, 13463, 13621, 13583, 13393, 13072
+ },
+ { -3513, 11402, -17883, 19504, -14399, 4885, 8702, -19513, 12046,
+ 8533, -18110, 8447, 12778, -14838, -12444, 13177, 14107, -12759,
+ -17268, 914, 15822, 15661, 838, -16686, -18907, -12936, -4820,
+ 4175, 12398, 18830, 12913, 13215, 13433, 13572, 13601, 13518
+ }
+};
+
+const int16_t WebRtcIsacfix_kT2ShapeQ15[3][36] = {
+ { 4400, -11512, 17205, -19470, 14770, -5345, 9784, -19222, 11228,
+ 6842, -18371, 9909, 14191, -13496, -11563, 14015, 11827, -14839,
+ -15439, 948, 17802, 14827, -2053, -17132, 18723, 14516, 4135,
+ -6822, -13869, -16016, 12975, 13341, 13563, 13603, 13478, 13296
+ },
+ { 5420, -14215, 19060, -18073, 11709, -3911, 9645, -18335, 7717,
+ 10842, -19283, 9777, 14898, -12555, -13661, 11668, 13520, -13733,
+ -15936, -1358, 15671, 16728, 328, -17100, 17527, 13973, 5587,
+ -5194, -14165, -17677, 12970, 13446, 13693, 13660, 13462, 13015
+ },
+ { 4386, -12426, 18019, -18895, 13894, -5034, 9713, -19270, 10283,
+ 8692, -18439, 9317, 13992, -13454, -13241, 12850, 13366, -13336,
+ -16334, -498, 15976, 16213, -114, -16987, 18191, 13659, 4958,
+ -5116, -13444, -18021, 12911, 13424, 13718, 13674, 13464, 13054
+ }
+};
+
+/* means of log gains and LAR coefficients*/
+const int16_t WebRtcIsacfix_kMeansGainQ8[3][12] = {
+ { -1758, -1370, -1758, -1373, -1757, -1375,
+ -1758, -1374, -1758, -1373, -1755, -1370
+ },
+ { -1569, -1224, -1569, -1225, -1569, -1227,
+ -1569, -1226, -1567, -1225, -1565, -1224
+ },
+ { -1452, -957, -1447, -951, -1438, -944,
+ -1431, -938, -1419, -931, -1406, -926
+ }
+};
+
+
+const int32_t WebRtcIsacfix_kMeansShapeQ17[3][108] = {
+ { -119581, 34418, -44193, 11112, -4428, 18906, 9222, 8068, 1953, 5425,
+ 1871, 1689, 109933, 33751, 10471, -2566, 1090, 2320, -119219, 33728,
+ -43759, 11450, -4870, 19117, 9174, 8037, 1972, 5331, 1872, 1843,
+ 109899, 34301, 10629, -2316, 1272, 2562, -118608, 32318, -44012, 11591,
+ -4914, 18932, 9456, 8088, 1900, 5419, 1723, 1853, 109963, 35059,
+ 10745, -2335, 1161, 2520, -119174, 32107, -44462, 11635, -4694, 18611,
+ 9757, 8108, 1969, 5486, 1673, 1777, 109636, 34907, 10643, -2406,
+ 1034, 2420, -118597, 32320, -44590, 10854, -4569, 18821, 9701, 7866,
+ 2003, 5577, 1732, 1626, 109913, 34448, 10714, -2752, 990, 2228,
+ -118138, 32996, -44352, 10334, -3772, 18488, 9464, 7865, 2208, 5540,
+ 1745, 1664, 109880, 33381, 10640, -2779, 980, 2054
+ },
+ { -146328, 46370, 1047, 26431, 10035, 13933, 6415, 14359, -2368, 6661,
+ 2269, 1764, 96623, 7802, 4163, 10742, 1643, 2954, -146871, 46561, 1127,
+ 26225, 10113, 14096, 6771, 14323, -2037, 6788, 2297, 1761, 96324, 8382,
+ 4309, 10450, 1695, 3016, -146502, 46475, 1580, 26118, 10487, 14179, 6622,
+ 14439, -2034, 6757, 2342, 1761, 95869, 8966, 4347, 10358, 1999, 2855,
+ -146958, 47717, 826, 25952, 10263, 14061, 5266, 13681, -2417, 6582, 2047,
+ 1608, 96257, 9107, 4452, 10301, 1792, 2676, -146992, 47123, 446, 25822,
+ 10405, 14292, 5140, 13804, -2403, 6496, 1834, 1735, 97489, 9253, 4414,
+ 10684, 1549, 2721, -145811, 46182, 901, 26482, 10241, 14524, 6075, 14514,
+ -2147, 6691, 2196, 1899, 97011, 8178, 4102, 10758, 1638, 2869
+ },
+ { -166617, 46969, -43908, 17726, 6330, 25615, 6913, 5450, -2301, 1984,
+ 507, 2883, 149998, 28709, 19333, 16703, 11093, 8965, -168254, 46604,
+ -44315, 17862, 6474, 25746, 7018, 5373, -2343, 1930, 513, 2819, 150391,
+ 28627, 19194, 16678, 10998, 8929, -169093, 46084, -44767, 17427, 6401,
+ 25674, 7147, 5472, -2336, 1820, 491, 2802, 149860, 28430, 19064, 16524,
+ 10898, 8875, -170205, 46189, -44877, 17403, 6190, 25209, 7035, 5673, -2173,
+ 1894, 574, 2756, 148830, 28230, 18819, 16418, 10789, 8811, -171263, 45045,
+ -44834, 16858, 6103, 24726, 7014, 5713, -2103, 1877, 518, 2729, 147073,
+ 27744, 18629, 16277, 10690, 8703, -171720, 44153, -45062, 15951, 5872,
+ 24429, 7044, 5585, -2082, 1807, 519, 2769, 144791, 27402, 18490, 16126,
+ 10548, 8635
+ }
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h
new file mode 100644
index 0000000000..6965822952
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_tables.h
+ *
+ * header file for coding tables for the LPC coefficients
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_
+
+#include <stdint.h>
+
+/* indices of KLT coefficients used */
+extern const uint16_t WebRtcIsacfix_kSelIndGain[12];
+
+extern const uint16_t WebRtcIsacfix_kSelIndShape[108];
+
+/* cdf array for model indicator */
+extern const uint16_t WebRtcIsacfix_kModelCdf[KLT_NUM_MODELS + 1];
+
+/* pointer to cdf array for model indicator */
+extern const uint16_t* WebRtcIsacfix_kModelCdfPtr[1];
+
+/* initial cdf index for decoder of model indicator */
+extern const uint16_t WebRtcIsacfix_kModelInitIndex[1];
+
+/* offset to go from rounded value to quantization index */
+extern const int16_t WebRtcIsacfix_kQuantMinGain[12];
+
+extern const int16_t WebRtcIsacfix_kQuantMinShape[108];
+
+/* maximum quantization index */
+extern const uint16_t WebRtcIsacfix_kMaxIndGain[12];
+
+extern const uint16_t WebRtcIsacfix_kMaxIndShape[108];
+
+/* index offset */
+extern const uint16_t WebRtcIsacfix_kOffsetGain[KLT_NUM_MODELS][12];
+
+extern const uint16_t WebRtcIsacfix_kOffsetShape[KLT_NUM_MODELS][108];
+
+/* initial cdf index for KLT coefficients */
+extern const uint16_t WebRtcIsacfix_kInitIndexGain[KLT_NUM_MODELS][12];
+
+extern const uint16_t WebRtcIsacfix_kInitIndexShape[KLT_NUM_MODELS][108];
+
+/* offsets for quantizer representation levels */
+extern const uint16_t WebRtcIsacfix_kOfLevelsGain[3];
+
+extern const uint16_t WebRtcIsacfix_kOfLevelsShape[3];
+
+/* quantizer representation levels */
+extern const int32_t WebRtcIsacfix_kLevelsGainQ17[1176];
+
+extern const int16_t WebRtcIsacfix_kLevelsShapeQ10[1735];
+
+/* cdf tables for quantizer indices */
+extern const uint16_t WebRtcIsacfix_kCdfGain[1212];
+
+extern const uint16_t WebRtcIsacfix_kCdfShape[2059];
+
+/* pointers to cdf tables for quantizer indices */
+extern const uint16_t* WebRtcIsacfix_kCdfGainPtr[KLT_NUM_MODELS][12];
+
+extern const uint16_t* WebRtcIsacfix_kCdfShapePtr[KLT_NUM_MODELS][108];
+
+/* code length for all coefficients using different models */
+extern const int16_t WebRtcIsacfix_kCodeLenGainQ11[392];
+
+extern const int16_t WebRtcIsacfix_kCodeLenShapeQ11[578];
+
+/* left KLT transforms */
+extern const int16_t WebRtcIsacfix_kT1GainQ15[KLT_NUM_MODELS][4];
+
+extern const int16_t WebRtcIsacfix_kT1ShapeQ15[KLT_NUM_MODELS][324];
+
+/* right KLT transforms */
+extern const int16_t WebRtcIsacfix_kT2GainQ15[KLT_NUM_MODELS][36];
+
+extern const int16_t WebRtcIsacfix_kT2ShapeQ15[KLT_NUM_MODELS][36];
+
+/* means of log gains and LAR coefficients */
+extern const int16_t WebRtcIsacfix_kMeansGainQ8[KLT_NUM_MODELS][12];
+
+extern const int32_t WebRtcIsacfix_kMeansShapeQ17[3][108];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
new file mode 100644
index 0000000000..78cb93f7ae
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/compile_assert_c.h"
+
+/* log2[0.2, 0.5, 0.98] in Q8 */
+static const int16_t kLogLagWinQ8[3] = {
+ -594, -256, -7
+};
+
+/* [1 -0.75 0.25] in Q12 */
+static const int16_t kACoefQ12[3] = {
+ 4096, -3072, 1024
+};
+
+int32_t WebRtcIsacfix_Log2Q8(uint32_t x) {
+ int32_t zeros;
+ int16_t frac;
+
+ zeros=WebRtcSpl_NormU32(x);
+ frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
+ /* log2(magn(i)) */
+
+ return ((31 - zeros) << 8) + frac;
+}
+
+static __inline int16_t Exp2Q10(int16_t x) { // Both in and out in Q10
+
+ int16_t tmp16_1, tmp16_2;
+
+ tmp16_2=(int16_t)(0x0400|(x&0x03FF));
+ tmp16_1 = -(x >> 10);
+ if(tmp16_1>0)
+ return tmp16_2 >> tmp16_1;
+ else
+ return tmp16_2 << -tmp16_1;
+
+}
+
+
+
+/* 1D parabolic interpolation . All input and output values are in Q8 */
+static __inline void Intrp1DQ8(int32_t *x, int32_t *fx, int32_t *y, int32_t *fy) {
+
+ int16_t sign1=1, sign2=1;
+ int32_t r32, q32, t32, nom32, den32;
+ int16_t t16, tmp16, tmp16_1;
+
+ if ((fx[0]>0) && (fx[2]>0)) {
+ r32=fx[1]-fx[2];
+ q32=fx[0]-fx[1];
+ nom32=q32+r32;
+ den32 = (q32 - r32) * 2;
+ if (nom32<0)
+ sign1=-1;
+ if (den32<0)
+ sign2=-1;
+
+ /* t = (q32+r32)/(2*(q32-r32)) = (fx[0]-fx[1] + fx[1]-fx[2])/(2 * fx[0]-fx[1] - (fx[1]-fx[2]))*/
+ /* (Signs are removed because WebRtcSpl_DivResultInQ31 can't handle negative numbers) */
+ /* t in Q31, without signs */
+ t32 = WebRtcSpl_DivResultInQ31(nom32 * sign1, den32 * sign2);
+
+ t16 = (int16_t)(t32 >> 23); /* Q8 */
+ t16=t16*sign1*sign2; /* t in Q8 with signs */
+
+ *y = x[0]+t16; /* Q8 */
+ // *y = x[1]+t16; /* Q8 */
+
+ /* The following code calculates fy in three steps */
+ /* fy = 0.5 * t * (t-1) * fx[0] + (1-t*t) * fx[1] + 0.5 * t * (t+1) * fx[2]; */
+
+ /* Part I: 0.5 * t * (t-1) * fx[0] */
+ tmp16_1 = (int16_t)(t16 * t16); /* Q8*Q8=Q16 */
+ tmp16_1 >>= 2; /* Q16>>2 = Q14 */
+ t16 <<= 6; /* Q8<<6 = Q14 */
+ tmp16 = tmp16_1-t16;
+ *fy = WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[0]); /* (Q14 * Q8 >>15)/2 = Q8 */
+
+ /* Part II: (1-t*t) * fx[1] */
+ tmp16 = 16384-tmp16_1; /* 1 in Q14 - Q14 */
+ *fy += WEBRTC_SPL_MUL_16_32_RSFT14(tmp16, fx[1]);/* Q14 * Q8 >> 14 = Q8 */
+
+ /* Part III: 0.5 * t * (t+1) * fx[2] */
+ tmp16 = tmp16_1+t16;
+ *fy += WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[2]);/* (Q14 * Q8 >>15)/2 = Q8 */
+ } else {
+ *y = x[0];
+ *fy= fx[1];
+ }
+}
+
+
+static void FindFour32(int32_t *in, int16_t length, int16_t *bestind)
+{
+ int32_t best[4]= {-100, -100, -100, -100};
+ int16_t k;
+
+ for (k=0; k<length; k++) {
+ if (in[k] > best[3]) {
+ if (in[k] > best[2]) {
+ if (in[k] > best[1]) {
+ if (in[k] > best[0]) { // The Best
+ best[3] = best[2];
+ bestind[3] = bestind[2];
+ best[2] = best[1];
+ bestind[2] = bestind[1];
+ best[1] = best[0];
+ bestind[1] = bestind[0];
+ best[0] = in[k];
+ bestind[0] = k;
+ } else { // 2nd best
+ best[3] = best[2];
+ bestind[3] = bestind[2];
+ best[2] = best[1];
+ bestind[2] = bestind[1];
+ best[1] = in[k];
+ bestind[1] = k;
+ }
+ } else { // 3rd best
+ best[3] = best[2];
+ bestind[3] = bestind[2];
+ best[2] = in[k];
+ bestind[2] = k;
+ }
+ } else { // 4th best
+ best[3] = in[k];
+ bestind[3] = k;
+ }
+ }
+ }
+}
+
+
+
+
+
+extern void WebRtcIsacfix_PCorr2Q32(const int16_t *in, int32_t *logcorQ8);
+
+
+
+void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
+ PitchAnalysisStruct *State,
+ int16_t *lagsQ7 /* Q7 */
+ )
+{
+ int16_t buf_dec16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2];
+ int32_t *crrvecQ8_1,*crrvecQ8_2;
+ int32_t cv1q[PITCH_LAG_SPAN2+2],cv2q[PITCH_LAG_SPAN2+2], peakvq[PITCH_LAG_SPAN2+2];
+ int k;
+ int16_t peaks_indq;
+ int16_t peakiq[PITCH_LAG_SPAN2];
+ int32_t corr;
+ int32_t corr32, corr_max32, corr_max_o32;
+ int16_t npkq;
+ int16_t best4q[4]={0,0,0,0};
+ int32_t xq[3],yq[1],fyq[1];
+ int32_t *fxq;
+ int32_t best_lag1q, best_lag2q;
+ int32_t tmp32a,tmp32b,lag32,ratq;
+ int16_t start;
+ int16_t oldgQ12, tmp16a, tmp16b, gain_bias16,tmp16c, tmp16d, bias16;
+ int32_t tmp32c,tmp32d, tmp32e;
+ int16_t old_lagQ;
+ int32_t old_lagQ8;
+ int32_t lagsQ8[4];
+
+ old_lagQ = State->PFstr_wght.oldlagQ7; // Q7
+ old_lagQ8 = old_lagQ << 1; // Q8
+
+ oldgQ12= State->PFstr_wght.oldgainQ12;
+
+ crrvecQ8_1=&cv1q[1];
+ crrvecQ8_2=&cv2q[1];
+
+
+ /* copy old values from state buffer */
+ memcpy(buf_dec16, State->dec_buffer16, sizeof(State->dec_buffer16));
+
+ /* decimation; put result after the old values */
+ WebRtcIsacfix_DecimateAllpass32(in, State->decimator_state32, PITCH_FRAME_LEN,
+ &buf_dec16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2]);
+
+ /* low-pass filtering */
+ start= PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2;
+ WebRtcSpl_FilterARFastQ12(&buf_dec16[start],&buf_dec16[start],(int16_t*)kACoefQ12,3, PITCH_FRAME_LEN/2);
+
+ /* copy end part back into state buffer */
+ for (k = 0; k < (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2); k++)
+ State->dec_buffer16[k] = buf_dec16[k+PITCH_FRAME_LEN/2];
+
+
+ /* compute correlation for first and second half of the frame */
+ WebRtcIsacfix_PCorr2Q32(buf_dec16, crrvecQ8_1);
+ WebRtcIsacfix_PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
+
+
+ /* bias towards pitch lag of previous frame */
+ tmp32a = WebRtcIsacfix_Log2Q8((uint32_t) old_lagQ8) - 2304;
+ // log2(0.5*oldlag) in Q8
+ tmp32b = oldgQ12 * oldgQ12 >> 10; // Q12 & * 4.0;
+ gain_bias16 = (int16_t) tmp32b; //Q12
+ if (gain_bias16 > 3276) gain_bias16 = 3276; // 0.8 in Q12
+
+
+ for (k = 0; k < PITCH_LAG_SPAN2; k++)
+ {
+ if (crrvecQ8_1[k]>0) {
+ tmp32b = WebRtcIsacfix_Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
+ tmp16a = (int16_t) (tmp32b - tmp32a); // Q8 & fabs(ratio)<4
+ tmp32c = tmp16a * tmp16a >> 6; // Q10
+ tmp16b = (int16_t) tmp32c; // Q10 & <8
+ tmp32d = tmp16b * 177 >> 8; // mult with ln2 in Q8
+ tmp16c = (int16_t) tmp32d; // Q10 & <4
+ tmp16d = Exp2Q10((int16_t) -tmp16c); //Q10
+ tmp32c = gain_bias16 * tmp16d >> 13; // Q10 & * 0.5
+ bias16 = (int16_t) (1024 + tmp32c); // Q10
+ tmp32b = WebRtcIsacfix_Log2Q8((uint32_t)bias16) - 2560;
+ // Q10 in -> Q8 out with 10*2^8 offset
+ crrvecQ8_1[k] += tmp32b ; // -10*2^8 offset
+ }
+ }
+
+ /* taper correlation functions */
+ for (k = 0; k < 3; k++) {
+ crrvecQ8_1[k] += kLogLagWinQ8[k];
+ crrvecQ8_2[k] += kLogLagWinQ8[k];
+
+ crrvecQ8_1[PITCH_LAG_SPAN2-1-k] += kLogLagWinQ8[k];
+ crrvecQ8_2[PITCH_LAG_SPAN2-1-k] += kLogLagWinQ8[k];
+ }
+
+
+ /* Make zeropadded corr vectors */
+ cv1q[0]=0;
+ cv2q[0]=0;
+ cv1q[PITCH_LAG_SPAN2+1]=0;
+ cv2q[PITCH_LAG_SPAN2+1]=0;
+ corr_max32 = 0;
+
+ for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+ {
+
+
+ corr32=crrvecQ8_1[k-1];
+ if (corr32 > corr_max32)
+ corr_max32 = corr32;
+
+ corr32=crrvecQ8_2[k-1];
+ corr32 += -4; // Compensate for later (log2(0.99))
+
+ if (corr32 > corr_max32)
+ corr_max32 = corr32;
+
+ }
+
+ /* threshold value to qualify as a peak */
+ // corr_max32 += -726; // log(0.14)/log(2.0) in Q8
+ corr_max32 += -1000; // log(0.14)/log(2.0) in Q8
+ corr_max_o32 = corr_max32;
+
+
+ /* find peaks in corr1 */
+ peaks_indq = 0;
+ for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+ {
+ corr32=cv1q[k];
+ if (corr32>corr_max32) { // Disregard small peaks
+ if ((corr32>=cv1q[k-1]) && (corr32>cv1q[k+1])) { // Peak?
+ peakvq[peaks_indq] = corr32;
+ peakiq[peaks_indq++] = k;
+ }
+ }
+ }
+
+
+ /* find highest interpolated peak */
+ corr_max32=0;
+ best_lag1q =0;
+ if (peaks_indq > 0) {
+ FindFour32(peakvq, (int16_t) peaks_indq, best4q);
+ npkq = WEBRTC_SPL_MIN(peaks_indq, 4);
+
+ for (k=0;k<npkq;k++) {
+
+ lag32 = peakiq[best4q[k]];
+ fxq = &cv1q[peakiq[best4q[k]]-1];
+ xq[0]= lag32;
+ xq[0] <<= 8;
+ Intrp1DQ8(xq, fxq, yq, fyq);
+
+ tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+ /* Bias towards short lags */
+ /* log(pow(0.8, log(2.0 * *y )))/log(2.0) */
+ tmp32b = (int16_t)tmp32a * -42 >> 8;
+ tmp32c= tmp32b + 256;
+ *fyq += tmp32c;
+ if (*fyq > corr_max32) {
+ corr_max32 = *fyq;
+ best_lag1q = *yq;
+ }
+ }
+ tmp32b = (best_lag1q - OFFSET_Q8) * 2;
+ lagsQ8[0] = tmp32b + PITCH_MIN_LAG_Q8;
+ lagsQ8[1] = lagsQ8[0];
+ } else {
+ lagsQ8[0] = old_lagQ8;
+ lagsQ8[1] = lagsQ8[0];
+ }
+
+ /* Bias towards constant pitch */
+ tmp32a = lagsQ8[0] - PITCH_MIN_LAG_Q8;
+ ratq = (tmp32a >> 1) + OFFSET_Q8;
+
+ for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+ {
+ tmp32a = k << 7; // 0.5*k Q8
+ tmp32b = tmp32a * 2 - ratq; // Q8
+ tmp32c = (int16_t)tmp32b * (int16_t)tmp32b >> 8; // Q8
+
+ tmp32b = tmp32c + (ratq >> 1);
+ // (k-r)^2 + 0.5 * r Q8
+ tmp32c = WebRtcIsacfix_Log2Q8((uint32_t)tmp32a) - 2048;
+ // offset 8*2^8 , log2(0.5*k) Q8
+ tmp32d = WebRtcIsacfix_Log2Q8((uint32_t)tmp32b) - 2048;
+ // offset 8*2^8 , log2(0.5*k) Q8
+ tmp32e = tmp32c - tmp32d;
+
+ cv2q[k] += tmp32e >> 1;
+
+ }
+
+ /* find peaks in corr2 */
+ corr_max32 = corr_max_o32;
+ peaks_indq = 0;
+
+ for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+ {
+ corr=cv2q[k];
+ if (corr>corr_max32) { // Disregard small peaks
+ if ((corr>=cv2q[k-1]) && (corr>cv2q[k+1])) { // Peak?
+ peakvq[peaks_indq] = corr;
+ peakiq[peaks_indq++] = k;
+ }
+ }
+ }
+
+
+
+ /* find highest interpolated peak */
+ corr_max32 = 0;
+ best_lag2q =0;
+ if (peaks_indq > 0) {
+
+ FindFour32(peakvq, (int16_t) peaks_indq, best4q);
+ npkq = WEBRTC_SPL_MIN(peaks_indq, 4);
+ for (k=0;k<npkq;k++) {
+
+ lag32 = peakiq[best4q[k]];
+ fxq = &cv2q[peakiq[best4q[k]]-1];
+
+ xq[0]= lag32;
+ xq[0] <<= 8;
+ Intrp1DQ8(xq, fxq, yq, fyq);
+
+ /* Bias towards short lags */
+ /* log(pow(0.8, log(2.0f * *y )))/log(2.0f) */
+ tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+ tmp32b = (int16_t)tmp32a * -82 >> 8;
+ tmp32c= tmp32b + 256;
+ *fyq += tmp32c;
+ if (*fyq > corr_max32) {
+ corr_max32 = *fyq;
+ best_lag2q = *yq;
+ }
+ }
+
+ tmp32b = (best_lag2q - OFFSET_Q8) * 2;
+ lagsQ8[2] = tmp32b + PITCH_MIN_LAG_Q8;
+ lagsQ8[3] = lagsQ8[2];
+ } else {
+ lagsQ8[2] = lagsQ8[0];
+ lagsQ8[3] = lagsQ8[0];
+ }
+
+ lagsQ7[0] = (int16_t)(lagsQ8[0] >> 1);
+ lagsQ7[1] = (int16_t)(lagsQ8[1] >> 1);
+ lagsQ7[2] = (int16_t)(lagsQ8[2] >> 1);
+ lagsQ7[3] = (int16_t)(lagsQ8[3] >> 1);
+}
+
+
+
+void WebRtcIsacfix_PitchAnalysis(const int16_t *inn, /* PITCH_FRAME_LEN samples */
+ int16_t *outQ0, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+ PitchAnalysisStruct *State,
+ int16_t *PitchLags_Q7,
+ int16_t *PitchGains_Q12)
+{
+ int16_t inbufQ0[PITCH_FRAME_LEN + QLOOKAHEAD];
+ int16_t k;
+
+ /* inital pitch estimate */
+ WebRtcIsacfix_InitialPitch(inn, State, PitchLags_Q7);
+
+
+ /* Calculate gain */
+ WebRtcIsacfix_PitchFilterGains(inn, &(State->PFstr_wght), PitchLags_Q7, PitchGains_Q12);
+
+ /* concatenate previous input's end and current input */
+ for (k = 0; k < QLOOKAHEAD; k++) {
+ inbufQ0[k] = State->inbuf[k];
+ }
+ for (k = 0; k < PITCH_FRAME_LEN; k++) {
+ inbufQ0[k+QLOOKAHEAD] = (int16_t) inn[k];
+ }
+
+ /* lookahead pitch filtering for masking analysis */
+ WebRtcIsacfix_PitchFilter(inbufQ0, outQ0, &(State->PFstr), PitchLags_Q7,PitchGains_Q12, 2);
+
+
+ /* store last part of input */
+ for (k = 0; k < QLOOKAHEAD; k++) {
+ State->inbuf[k] = inbufQ0[k + PITCH_FRAME_LEN];
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
new file mode 100644
index 0000000000..4303c82711
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_estimator.h
+ *
+ * Pitch functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+void WebRtcIsacfix_PitchAnalysis(
+ const int16_t* in, /* PITCH_FRAME_LEN samples */
+ int16_t* outQ0, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+ PitchAnalysisStruct* State,
+ int16_t* lagsQ7,
+ int16_t* PitchGains_Q12);
+
+void WebRtcIsacfix_InitialPitch(const int16_t* in,
+ PitchAnalysisStruct* State,
+ int16_t* qlags);
+
+void WebRtcIsacfix_PitchFilter(int16_t* indatFix,
+ int16_t* outdatQQ,
+ PitchFiltstr* pfp,
+ int16_t* lagsQ7,
+ int16_t* gainsQ12,
+ int16_t type);
+
+void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+ int16_t gain,
+ size_t index,
+ int16_t sign,
+ int16_t* inputState,
+ int16_t* outputBuff2,
+ const int16_t* coefficient,
+ int16_t* inputBuf,
+ int16_t* outputBuf,
+ int* index2);
+
+void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
+ PitchFiltstr* pfp,
+ int16_t* lagsQ7,
+ int16_t* gainsQ12);
+
+void WebRtcIsacfix_DecimateAllpass32(
+ const int16_t* in,
+ int32_t* state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
+ int16_t N, /* number of input samples */
+ int16_t* out); /* array of size N/2 */
+
+int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
new file mode 100644
index 0000000000..c4af9ab32a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+#ifdef WEBRTC_HAS_NEON
+#include <arm_neon.h>
+#endif
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+ int16_t scaling,n,k;
+ int32_t csum32, lys, lcs;
+ int64_t ysum64;
+ const int32_t oneQ8 = 1 << 8; // 1.00 in Q8
+ const int16_t* x;
+ const int16_t* inptr;
+
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+ PITCH_CORR_LEN2,
+ PITCH_CORR_LEN2);
+ ysum64 = 1;
+ csum32 = 0;
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ ysum64 += in[n] * in[n] >> scaling; // Q0
+ csum32 += x[n] * in[n] >> scaling; // Q0
+ }
+ logcorQ8 += PITCH_LAG_SPAN2 - 1;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum64) >> 1; // Q8, sqrt(ysum)
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ inptr = &in[k];
+ ysum64 -= in[k - 1] * in[k - 1] >> scaling;
+ ysum64 += (int32_t)(in[PITCH_CORR_LEN2 + k - 1])
+ * in[PITCH_CORR_LEN2 + k - 1] >> scaling;
+
+#ifdef WEBRTC_HAS_NEON
+ {
+ int32_t vbuff[4];
+ int32x4_t int_32x4_sum = vmovq_n_s32(0);
+ // Can't shift a Neon register to right with a non-constant shift value.
+ int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
+ // Assert a codition used in loop unrolling at compile-time.
+ RTC_COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
+
+ for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
+ int16x4_t int_16x4_x = vld1_s16(&x[n]);
+ int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
+ int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
+ int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
+ int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
+ }
+
+ // Use vector store to avoid long stall from data trasferring
+ // from vector to general register.
+ vst1q_s32(vbuff, int_32x4_sum);
+ csum32 = vbuff[0] + vbuff[1];
+ csum32 += vbuff[2];
+ csum32 += vbuff[3];
+ }
+#else
+ int64_t csum64_tmp = 0;
+ if(scaling == 0) {
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ csum64_tmp += (int32_t)(x[n]) * inptr[n];
+ }
+ } else {
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ csum64_tmp += ((int32_t)(x[n]) * inptr[n]) >> scaling;
+ }
+ }
+ csum32 = csum64_tmp;
+#endif
+
+ logcorQ8--;
+
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum64) >> 1; // Q8, sqrt(ysum)
+
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
new file mode 100644
index 0000000000..4ead84c492
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+ int16_t scaling,n,k;
+ int32_t ysum32,csum32, lys, lcs;
+ const int32_t oneQ8 = 1 << 8; // 1.00 in Q8
+ const int16_t* x;
+ const int16_t* inptr;
+
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+ PITCH_CORR_LEN2,
+ PITCH_CORR_LEN2);
+ ysum32 = 1;
+ csum32 = 0;
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ {
+ const int16_t* tmp_x = x;
+ const int16_t* tmp_in = in;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ n = PITCH_CORR_LEN2;
+ RTC_COMPILE_ASSERT(PITCH_CORR_LEN2 % 4 == 0);
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[tmp_in]) \n\t"
+ "lh %[tmp2], 2(%[tmp_in]) \n\t"
+ "lh %[tmp3], 4(%[tmp_in]) \n\t"
+ "lh %[tmp4], 6(%[tmp_in]) \n\t"
+ "lh %[tmp5], 0(%[tmp_x]) \n\t"
+ "lh %[tmp6], 2(%[tmp_x]) \n\t"
+ "lh %[tmp7], 4(%[tmp_x]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp5], %[tmp1], %[tmp5] \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp1] \n\t"
+ "mul %[tmp6], %[tmp2], %[tmp6] \n\t"
+ "mul %[tmp2], %[tmp2], %[tmp2] \n\t"
+ "mul %[tmp7], %[tmp3], %[tmp7] \n\t"
+ "mul %[tmp3], %[tmp3], %[tmp3] \n\t"
+ "mul %[tmp8], %[tmp4], %[tmp8] \n\t"
+ "mul %[tmp4], %[tmp4], %[tmp4] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "srav %[tmp5], %[tmp5], %[scaling] \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp6], %[tmp6], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "srav %[tmp7], %[tmp7], %[scaling] \n\t"
+ "srav %[tmp3], %[tmp3], %[scaling] \n\t"
+ "srav %[tmp8], %[tmp8], %[scaling] \n\t"
+ "srav %[tmp4], %[tmp4], %[scaling] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp5] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp6] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp3] \n\t"
+ "addu %[csum32], %[csum32], %[tmp7] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp4] \n\t"
+ "addu %[csum32], %[csum32], %[tmp8] \n\t"
+ "addiu %[tmp_in], %[tmp_in], 8 \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+ [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+ [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [tmp_in] "+r" (tmp_in),
+ [ysum32] "+r" (ysum32), [tmp_x] "+r" (tmp_x), [csum32] "+r" (csum32),
+ [n] "+r" (n)
+ : [scaling] "r" (scaling)
+ : "memory", "hi", "lo"
+ );
+ }
+ logcorQ8 += PITCH_LAG_SPAN2 - 1;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ inptr = &in[k];
+ const int16_t* tmp_in1 = &in[k - 1];
+ const int16_t* tmp_in2 = &in[PITCH_CORR_LEN2 + k - 1];
+ const int16_t* tmp_x = x;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ n = PITCH_CORR_LEN2;
+ csum32 = 0;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp1], 0(%[tmp_in1]) \n\t"
+ "lh %[tmp2], 0(%[tmp_in2]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp1] \n\t"
+ "mul %[tmp2], %[tmp2], %[tmp2] \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "subu %[ysum32], %[ysum32], %[tmp1] \n\t"
+ "bnez %[scaling], 2f \n\t"
+ " addu %[ysum32], %[ysum32], %[tmp2] \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[inptr]) \n\t"
+ "lh %[tmp2], 0(%[tmp_x]) \n\t"
+ "lh %[tmp3], 2(%[inptr]) \n\t"
+ "lh %[tmp4], 2(%[tmp_x]) \n\t"
+ "lh %[tmp5], 4(%[inptr]) \n\t"
+ "lh %[tmp6], 4(%[tmp_x]) \n\t"
+ "lh %[tmp7], 6(%[inptr]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp2], %[tmp3], %[tmp4] \n\t"
+ "mul %[tmp3], %[tmp5], %[tmp6] \n\t"
+ "mul %[tmp4], %[tmp7], %[tmp8] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "addiu %[inptr], %[inptr], 8 \n\t"
+ "addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ "addu %[csum32], %[csum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp3] \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addu %[csum32], %[csum32], %[tmp4] \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "lh %[tmp1], 0(%[inptr]) \n\t"
+ "lh %[tmp2], 0(%[tmp_x]) \n\t"
+ "lh %[tmp3], 2(%[inptr]) \n\t"
+ "lh %[tmp4], 2(%[tmp_x]) \n\t"
+ "lh %[tmp5], 4(%[inptr]) \n\t"
+ "lh %[tmp6], 4(%[tmp_x]) \n\t"
+ "lh %[tmp7], 6(%[inptr]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp2], %[tmp3], %[tmp4] \n\t"
+ "mul %[tmp3], %[tmp5], %[tmp6] \n\t"
+ "mul %[tmp4], %[tmp7], %[tmp8] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "addiu %[inptr], %[inptr], 8 \n\t"
+ "addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "srav %[tmp3], %[tmp3], %[scaling] \n\t"
+ "srav %[tmp4], %[tmp4], %[scaling] \n\t"
+ "addu %[csum32], %[csum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp3] \n\t"
+ "bgtz %[n], 2b \n\t"
+ " addu %[csum32], %[csum32], %[tmp4] \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+ [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+ [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [inptr] "+r" (inptr),
+ [csum32] "+r" (csum32), [tmp_x] "+r" (tmp_x), [ysum32] "+r" (ysum32),
+ [n] "+r" (n)
+ : [tmp_in1] "r" (tmp_in1), [tmp_in2] "r" (tmp_in2),
+ [scaling] "r" (scaling)
+ : "memory", "hi", "lo"
+ );
+
+ logcorQ8--;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
new file mode 100644
index 0000000000..735533020e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "rtc_base/compile_assert_c.h"
+
+// Number of segments in a pitch subframe.
+static const int kSegments = 5;
+
+// A division factor of 1/5 in Q15.
+static const int16_t kDivFactor = 6553;
+
+// Interpolation coefficients; generated by design_pitch_filter.m.
+// Coefficients are stored in Q14.
+static const int16_t kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = {
+ {-367, 1090, -2706, 9945, 10596, -3318, 1626, -781, 287},
+ {-325, 953, -2292, 7301, 12963, -3320, 1570, -743, 271},
+ {-240, 693, -1622, 4634, 14809, -2782, 1262, -587, 212},
+ {-125, 358, -817, 2144, 15982, -1668, 721, -329, 118},
+ { 0, 0, -1, 1, 16380, 1, -1, 0, 0},
+ { 118, -329, 721, -1668, 15982, 2144, -817, 358, -125},
+ { 212, -587, 1262, -2782, 14809, 4634, -1622, 693, -240},
+ { 271, -743, 1570, -3320, 12963, 7301, -2292, 953, -325}
+};
+
+static __inline size_t CalcLrIntQ(int16_t fixVal,
+ int16_t qDomain) {
+ int32_t roundVal = 1 << (qDomain - 1);
+
+ return (fixVal + roundVal) >> qDomain;
+}
+
+void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
+ // Q0 if type is 2.
+ int16_t* outdatQQ,
+ PitchFiltstr* pfp,
+ int16_t* lagsQ7,
+ int16_t* gainsQ12,
+ int16_t type) {
+ int k, ind, cnt;
+ int16_t sign = 1;
+ int16_t inystateQQ[PITCH_DAMPORDER];
+ int16_t ubufQQ[PITCH_INTBUFFSIZE + QLOOKAHEAD];
+ const int16_t Gain = 21299; // 1.3 in Q14
+ int16_t oldLagQ7;
+ int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
+ size_t frcQQ = 0;
+ int32_t indW32 = 0;
+ const int16_t* fracoeffQQ = NULL;
+
+ // Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
+ RTC_COMPILE_ASSERT(PITCH_FRACORDER == 9);
+ RTC_COMPILE_ASSERT(PITCH_DAMPORDER == 5);
+
+ // Set up buffer and states.
+ memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
+ memcpy(inystateQQ, pfp->ystateQQ, sizeof(inystateQQ));
+
+ // Get old lag and gain value from memory.
+ oldLagQ7 = pfp->oldlagQ7;
+ oldGainQ12 = pfp->oldgainQ12;
+
+ if (type == 4) {
+ sign = -1;
+
+ // Make output more periodic.
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ gainsQ12[k] = (int16_t)(gainsQ12[k] * Gain >> 14);
+ }
+ }
+
+ // No interpolation if pitch lag step is big.
+ if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
+ oldLagQ7 = lagsQ7[0];
+ oldGainQ12 = gainsQ12[0];
+ }
+
+ ind = 0;
+
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ // Calculate interpolation steps.
+ lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
+ lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+ lagdeltaQ7, kDivFactor, 15);
+ curLagQ7 = oldLagQ7;
+ gaindeltaQ12 = gainsQ12[k] - oldGainQ12;
+ gaindeltaQ12 = (int16_t)(gaindeltaQ12 * kDivFactor >> 15);
+
+ curGainQ12 = oldGainQ12;
+ oldLagQ7 = lagsQ7[k];
+ oldGainQ12 = gainsQ12[k];
+
+ // Each frame has 4 60-sample pitch subframes, and each subframe has 5
+ // 12-sample segments. Each segment need to be processed with
+ // newly-updated parameters, so we break the pitch filtering into
+ // two for-loops (5 x 12) below. It's also why kDivFactor = 0.2 (in Q15).
+ for (cnt = 0; cnt < kSegments; cnt++) {
+ // Update parameters for each segment.
+ curGainQ12 += gaindeltaQ12;
+ curLagQ7 += lagdeltaQ7;
+ indW32 = CalcLrIntQ(curLagQ7, 7);
+ if (indW32 < PITCH_FRACORDER - 2) {
+ // WebRtcIsacfix_PitchFilterCore requires indW32 >= PITCH_FRACORDER -
+ // 2; otherwise, it will read from entries of ubufQQ that haven't been
+ // written yet. (This problem has only been seen in fuzzer tests, not
+ // in real life.) See Chromium bug 581901.
+ indW32 = PITCH_FRACORDER - 2;
+ }
+ frcQQ = ((indW32 << 7) + 64 - curLagQ7) >> 4;
+
+ if (frcQQ >= PITCH_FRACS) {
+ frcQQ = 0;
+ }
+ fracoeffQQ = kIntrpCoef[frcQQ];
+
+ // Pitch filtering.
+ WebRtcIsacfix_PitchFilterCore(PITCH_SUBFRAME_LEN / kSegments, curGainQ12,
+ indW32, sign, inystateQQ, ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
+ }
+ }
+
+ // Export buffer and states.
+ memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
+ memcpy(pfp->ystateQQ, inystateQQ, sizeof(pfp->ystateQQ));
+
+ pfp->oldlagQ7 = oldLagQ7;
+ pfp->oldgainQ12 = oldGainQ12;
+
+ if (type == 2) {
+ // Filter look-ahead segment.
+ WebRtcIsacfix_PitchFilterCore(QLOOKAHEAD, curGainQ12, indW32, 1, inystateQQ,
+ ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
+ }
+}
+
+
+void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
+ PitchFiltstr* pfp,
+ int16_t* lagsQ7,
+ int16_t* gainsQ12) {
+ int k, n, m;
+ size_t ind, pos, pos3QQ;
+
+ int16_t ubufQQ[PITCH_INTBUFFSIZE];
+ int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
+ const int16_t* fracoeffQQ = NULL;
+ int16_t scale;
+ int16_t cnt = 0, tmpW16;
+ size_t frcQQ, indW16 = 0;
+ int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;
+
+ // Set up buffer and states.
+ memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
+ oldLagQ7 = pfp->oldlagQ7;
+
+ // No interpolation if pitch lag step is big.
+ if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
+ oldLagQ7 = lagsQ7[0];
+ }
+
+ ind = 0;
+ pos = ind + PITCH_BUFFSIZE;
+ scale = 0;
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+
+ // Calculate interpolation steps.
+ lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
+ lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+ lagdeltaQ7, kDivFactor, 15);
+ curLagQ7 = oldLagQ7;
+ oldLagQ7 = lagsQ7[k];
+
+ csum1QQ = 1;
+ esumxQQ = 1;
+
+ // Same as function WebRtcIsacfix_PitchFilter(), we break the pitch
+ // filtering into two for-loops (5 x 12) below.
+ for (cnt = 0; cnt < kSegments; cnt++) {
+ // Update parameters for each segment.
+ curLagQ7 += lagdeltaQ7;
+ indW16 = CalcLrIntQ(curLagQ7, 7);
+ frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
+
+ if (frcQQ >= PITCH_FRACS) {
+ frcQQ = 0;
+ }
+ fracoeffQQ = kIntrpCoef[frcQQ];
+
+ pos3QQ = pos - (indW16 + 4);
+
+ for (n = 0; n < PITCH_SUBFRAME_LEN / kSegments; n++) {
+ // Filter to get fractional pitch.
+
+ tmpW32 = 0;
+ for (m = 0; m < PITCH_FRACORDER; m++) {
+ tmpW32 += ubufQQ[pos3QQ + m] * fracoeffQQ[m];
+ }
+
+ // Subtract from input and update buffer.
+ ubufQQ[pos] = indatQ0[ind];
+
+ tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
+ tmpW32 += 8192;
+ tmpW16 = tmpW32 >> 14;
+ tmpW32 = tmpW16 * tmpW16;
+
+ if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
+ (tmpW32 > 1073700000) || (esumxQQ > 1073700000)) { // 2^30
+ scale++;
+ csum1QQ >>= 1;
+ esumxQQ >>= 1;
+ }
+ csum1QQ += tmp2W32 >> scale;
+ esumxQQ += tmpW32 >> scale;
+
+ ind++;
+ pos++;
+ pos3QQ++;
+ }
+ }
+
+ if (csum1QQ < esumxQQ) {
+ tmp2W32 = WebRtcSpl_DivResultInQ31(csum1QQ, esumxQQ);
+
+ // Gain should be half the correlation.
+ tmpW32 = tmp2W32 >> 20;
+ } else {
+ tmpW32 = 4096;
+ }
+ gainsQ12[k] = (int16_t)WEBRTC_SPL_SAT(PITCH_MAX_GAIN_Q12, tmpW32, 0);
+ }
+
+ // Export buffer and states.
+ memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
+ pfp->oldlagQ7 = lagsQ7[PITCH_SUBFRAMES - 1];
+ pfp->oldgainQ12 = gainsQ12[PITCH_SUBFRAMES - 1];
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
new file mode 100644
index 0000000000..065946856f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
@@ -0,0 +1,143 @@
+@
+@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS. All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+
+@ Contains the core loop routine for the pitch filter function in iSAC,
+@ optimized for ARMv7 platforms.
+@
+@ Output is bit-exact with the reference C code in pitch_filter.c.
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/system/asm_defines.h"
+
+GLOBAL_FUNCTION WebRtcIsacfix_PitchFilterCore
+.align 2
+
+@ void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+@ int16_t gain,
+@ size_t index,
+@ int16_t sign,
+@ int16_t* inputState,
+@ int16_t* outputBuf2,
+@ const int16_t* coefficient,
+@ int16_t* inputBuf,
+@ int16_t* outputBuf,
+@ int* index2) {
+DEFINE_FUNCTION WebRtcIsacfix_PitchFilterCore
+ push {r4-r11}
+ sub sp, #8
+
+ str r0, [sp] @ loopNumber
+ str r3, [sp, #4] @ sign
+ ldr r3, [sp, #44] @ outputBuf2
+ ldr r6, [sp, #60] @ index2
+ ldr r7, [r6] @ *index2
+ ldr r8, [sp, #52] @ inputBuf
+ ldr r12, [sp, #56] @ outputBuf
+
+ add r4, r7, r0
+ str r4, [r6] @ Store return value to index2.
+
+ mov r10, r7, asl #1
+ add r12, r10 @ &outputBuf[*index2]
+ add r8, r10 @ &inputBuf[*index2]
+
+ add r4, r7, #PITCH_BUFFSIZE @ *index2 + PITCH_BUFFSIZE
+ add r6, r3, r4, lsl #1 @ &outputBuf2[*index2 + PITCH_BUFFSIZE]
+ sub r4, r2 @ r2: index
+ sub r4, #2 @ *index2 + PITCH_BUFFSIZE - index - 2
+ add r3, r4, lsl #1 @ &ubufQQpos2[*index2]
+ ldr r9, [sp, #48] @ coefficient
+
+LOOP:
+@ Usage of registers in the loop:
+@ r0: loop counter
+@ r1: gain
+@ r2: tmpW32
+@ r3: &ubufQQpos2[]
+@ r6: &outputBuf2[]
+@ r8: &inputBuf[]
+@ r9: &coefficient[]
+@ r12: &outputBuf[]
+@ r4, r5, r7, r10, r11: scratch
+
+ @ Filter to get fractional pitch.
+ @ The pitch filter loop here is unrolled with 9 multipications.
+ pld [r3]
+ ldr r10, [r3], #4 @ ubufQQpos2[*index2 + 0, *index2 + 1]
+ ldr r4, [r9], #4 @ coefficient[0, 1]
+ ldr r11, [r3], #4
+ ldr r5, [r9], #4
+ smuad r2, r10, r4
+ smlad r2, r11, r5, r2
+
+ ldr r10, [r3], #4
+ ldr r4, [r9], #4
+ ldr r11, [r3], #4
+ ldr r5, [r9], #4
+ smlad r2, r10, r4, r2
+ ldrh r10, [r3], #-14 @ r3 back to &ubufQQpos2[*index2].
+ ldrh r4, [r9], #-16 @ r9 back to &coefficient[0].
+ smlad r2, r11, r5, r2
+ smlabb r2, r10, r4, r2
+
+ @ Saturate to avoid overflow in tmpW16.
+ asr r2, #1
+ add r4, r2, #0x1000
+ ssat r7, #16, r4, asr #13
+
+ @ Shift low pass filter state, and excute the low pass filter.
+ @ The memmove() and the low pass filter loop are unrolled and mixed.
+ smulbb r5, r1, r7
+ add r7, r5, #0x800
+ asr r7, #12 @ Get the value for inputState[0].
+ ldr r11, [sp, #40] @ inputState
+ pld [r11]
+ adr r10, kDampFilter
+ ldrsh r4, [r10], #2 @ kDampFilter[0]
+ mul r2, r7, r4
+ ldr r4, [r11] @ inputState[0, 1], before shift.
+ strh r7, [r11] @ inputState[0], after shift.
+ ldr r5, [r11, #4] @ inputState[2, 3], before shift.
+ ldr r7, [r10], #4 @ kDampFilter[1, 2]
+ ldr r10, [r10] @ kDampFilter[3, 4]
+ str r4, [r11, #2] @ inputState[1, 2], after shift.
+ str r5, [r11, #6] @ inputState[3, 4], after shift.
+ smlad r2, r4, r7, r2
+ smlad r2, r5, r10, r2
+
+ @ Saturate to avoid overflow.
+ @ First shift the sample to the range of [0xC0000000, 0x3FFFFFFF],
+ @ to avoid overflow in the next saturation step.
+ asr r2, #1
+ add r10, r2, #0x2000
+ ssat r10, #16, r10, asr #14
+
+ @ Subtract from input and update buffer.
+ ldr r11, [sp, #4] @ sign
+ ldrsh r4, [r8]
+ ldrsh r7, [r8], #2 @ inputBuf[*index2]
+ smulbb r5, r11, r10
+ subs r0, #1
+ sub r4, r5
+ ssat r2, #16, r4
+ strh r2, [r12], #2 @ outputBuf[*index2]
+
+ add r2, r7
+ ssat r2, #16, r2
+ strh r2, [r6], #2 @ outputBuff2[*index2 + PITCH_BUFFSIZE]
+ bgt LOOP
+
+ add sp, #8
+ pop {r4-r11}
+ bx lr
+
+.align 2
+kDampFilter:
+ .short -2294, 8192, 20972, 8192, -2294
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
new file mode 100644
index 0000000000..f23d19de9c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+/* Filter coefficicients in Q15. */
+static const int16_t kDampFilter[PITCH_DAMPORDER] = {
+ -2294, 8192, 20972, 8192, -2294
+};
+
+void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+ int16_t gain,
+ size_t index,
+ int16_t sign,
+ int16_t* inputState,
+ int16_t* outputBuf2,
+ const int16_t* coefficient,
+ int16_t* inputBuf,
+ int16_t* outputBuf,
+ int* index2) {
+ int i = 0, j = 0; /* Loop counters. */
+ int16_t* ubufQQpos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)];
+ int16_t tmpW16 = 0;
+
+ for (i = 0; i < loopNumber; i++) {
+ int32_t tmpW32 = 0;
+
+ /* Filter to get fractional pitch. */
+ for (j = 0; j < PITCH_FRACORDER; j++) {
+ tmpW32 += ubufQQpos2[*index2 + j] * coefficient[j];
+ }
+
+ /* Saturate to avoid overflow in tmpW16. */
+ tmpW32 = WEBRTC_SPL_SAT(536862719, tmpW32, -536879104);
+ tmpW32 += 8192;
+ tmpW16 = (int16_t)(tmpW32 >> 14);
+
+ /* Shift low pass filter state. */
+ memmove(&inputState[1], &inputState[0],
+ (PITCH_DAMPORDER - 1) * sizeof(int16_t));
+ inputState[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+ gain, tmpW16, 12);
+
+ /* Low pass filter. */
+ tmpW32 = 0;
+ /* TODO(kma): Define a static inline function WebRtcSpl_DotProduct()
+ in spl_inl.h to replace this and other similar loops. */
+ for (j = 0; j < PITCH_DAMPORDER; j++) {
+ tmpW32 += inputState[j] * kDampFilter[j];
+ }
+
+ /* Saturate to avoid overflow in tmpW16. */
+ tmpW32 = WEBRTC_SPL_SAT(1073725439, tmpW32, -1073758208);
+ tmpW32 += 16384;
+ tmpW16 = (int16_t)(tmpW32 >> 15);
+
+ /* Subtract from input and update buffer. */
+ tmpW32 = inputBuf[*index2] - sign * tmpW16;
+ outputBuf[*index2] = WebRtcSpl_SatW32ToW16(tmpW32);
+ tmpW32 = inputBuf[*index2] + outputBuf[*index2];
+ outputBuf2[*index2 + PITCH_BUFFSIZE] = WebRtcSpl_SatW32ToW16(tmpW32);
+
+ (*index2)++;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
new file mode 100644
index 0000000000..785fd9464f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+ int16_t gain,
+ size_t index,
+ int16_t sign,
+ int16_t* inputState,
+ int16_t* outputBuf2,
+ const int16_t* coefficient,
+ int16_t* inputBuf,
+ int16_t* outputBuf,
+ int* index2) {
+ int ind2t = *index2;
+ int i = 0;
+ int16_t* out2_pos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)] + ind2t;
+ int32_t w1, w2, w3, w4, w5, gain32, sign32;
+ int32_t coef1, coef2, coef3, coef4, coef5 = 0;
+ // Define damp factors as int32_t (pair of int16_t)
+ int32_t kDampF0 = 0x0000F70A;
+ int32_t kDampF1 = 0x51EC2000;
+ int32_t kDampF2 = 0xF70A2000;
+ int16_t* input1 = inputBuf + ind2t;
+ int16_t* output1 = outputBuf + ind2t;
+ int16_t* output2 = outputBuf2 + ind2t + PITCH_BUFFSIZE;
+
+ // Load coefficients outside the loop and sign-extend gain and sign
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwl %[coef1], 3(%[coefficient]) \n\t"
+ "lwl %[coef2], 7(%[coefficient]) \n\t"
+ "lwl %[coef3], 11(%[coefficient]) \n\t"
+ "lwl %[coef4], 15(%[coefficient]) \n\t"
+ "lwr %[coef1], 0(%[coefficient]) \n\t"
+ "lwr %[coef2], 4(%[coefficient]) \n\t"
+ "lwr %[coef3], 8(%[coefficient]) \n\t"
+ "lwr %[coef4], 12(%[coefficient]) \n\t"
+ "lhu %[coef5], 16(%[coefficient]) \n\t"
+ "seh %[gain32], %[gain] \n\t"
+ "seh %[sign32], %[sign] \n\t"
+ ".set pop \n\t"
+ : [coef1] "=&r" (coef1), [coef2] "=&r" (coef2), [coef3] "=&r" (coef3),
+ [coef4] "=&r" (coef4), [coef5] "=&r" (coef5), [gain32] "=&r" (gain32),
+ [sign32] "=&r" (sign32)
+ : [coefficient] "r" (coefficient), [gain] "r" (gain),
+ [sign] "r" (sign)
+ : "memory"
+ );
+
+ for (i = 0; i < loopNumber; i++) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ // Filter to get fractional pitch
+ "li %[w1], 8192 \n\t"
+ "mtlo %[w1] \n\t"
+ "mthi $0 \n\t"
+ "lwl %[w1], 3(%[out2_pos2]) \n\t"
+ "lwl %[w2], 7(%[out2_pos2]) \n\t"
+ "lwl %[w3], 11(%[out2_pos2]) \n\t"
+ "lwl %[w4], 15(%[out2_pos2]) \n\t"
+ "lwr %[w1], 0(%[out2_pos2]) \n\t"
+ "lwr %[w2], 4(%[out2_pos2]) \n\t"
+ "lwr %[w3], 8(%[out2_pos2]) \n\t"
+ "lwr %[w4], 12(%[out2_pos2]) \n\t"
+ "lhu %[w5], 16(%[out2_pos2]) \n\t"
+ "dpa.w.ph $ac0, %[w1], %[coef1] \n\t"
+ "dpa.w.ph $ac0, %[w2], %[coef2] \n\t"
+ "dpa.w.ph $ac0, %[w3], %[coef3] \n\t"
+ "dpa.w.ph $ac0, %[w4], %[coef4] \n\t"
+ "dpa.w.ph $ac0, %[w5], %[coef5] \n\t"
+ "addiu %[out2_pos2], %[out2_pos2], 2 \n\t"
+ "mthi $0, $ac1 \n\t"
+ "lwl %[w2], 3(%[inputState]) \n\t"
+ "lwl %[w3], 7(%[inputState]) \n\t"
+ // Fractional pitch shift & saturation
+ "extr_s.h %[w1], $ac0, 14 \n\t"
+ "li %[w4], 16384 \n\t"
+ "lwr %[w2], 0(%[inputState]) \n\t"
+ "lwr %[w3], 4(%[inputState]) \n\t"
+ "mtlo %[w4], $ac1 \n\t"
+ // Shift low pass filter state
+ "swl %[w2], 5(%[inputState]) \n\t"
+ "swl %[w3], 9(%[inputState]) \n\t"
+ "mul %[w1], %[gain32], %[w1] \n\t"
+ "swr %[w2], 2(%[inputState]) \n\t"
+ "swr %[w3], 6(%[inputState]) \n\t"
+ // Low pass filter accumulation
+ "dpa.w.ph $ac1, %[kDampF1], %[w2] \n\t"
+ "dpa.w.ph $ac1, %[kDampF2], %[w3] \n\t"
+ "lh %[w4], 0(%[input1]) \n\t"
+ "addiu %[input1], %[input1], 2 \n\t"
+ "shra_r.w %[w1], %[w1], 12 \n\t"
+ "sh %[w1], 0(%[inputState]) \n\t"
+ "dpa.w.ph $ac1, %[kDampF0], %[w1] \n\t"
+ // Low pass filter shift & saturation
+ "extr_s.h %[w2], $ac1, 15 \n\t"
+ "mul %[w2], %[w2], %[sign32] \n\t"
+ // Buffer update
+ "subu %[w2], %[w4], %[w2] \n\t"
+ "shll_s.w %[w2], %[w2], 16 \n\t"
+ "sra %[w2], %[w2], 16 \n\t"
+ "sh %[w2], 0(%[output1]) \n\t"
+ "addu %[w2], %[w2], %[w4] \n\t"
+ "shll_s.w %[w2], %[w2], 16 \n\t"
+ "addiu %[output1], %[output1], 2 \n\t"
+ "sra %[w2], %[w2], 16 \n\t"
+ "sh %[w2], 0(%[output2]) \n\t"
+ "addiu %[output2], %[output2], 2 \n\t"
+ ".set pop \n\t"
+ : [w1] "=&r" (w1), [w2] "=&r" (w2), [w3] "=&r" (w3), [w4] "=&r" (w4),
+ [w5] "=&r" (w5), [input1] "+r" (input1), [out2_pos2] "+r" (out2_pos2),
+ [output1] "+r" (output1), [output2] "+r" (output2)
+ : [coefficient] "r" (coefficient), [inputState] "r" (inputState),
+ [gain32] "r" (gain32), [sign32] "r" (sign32), [kDampF0] "r" (kDampF0),
+ [kDampF1] "r" (kDampF1), [kDampF2] "r" (kDampF2),
+ [coef1] "r" (coef1), [coef2] "r" (coef2), [coef3] "r" (coef3),
+ [coef4] "r" (coef4), [coef5] "r" (coef5)
+ : "hi", "lo", "$ac1hi", "$ac1lo", "memory"
+ );
+ }
+ (*index2) += loopNumber;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c
new file mode 100644
index 0000000000..bfbab1950d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_gain_tables.c
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
+
+
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+
+/* cdf for quantized pitch filter gains */
+const uint16_t WebRtcIsacfix_kPitchGainCdf[255] = {
+ 0, 2, 4, 6, 64, 901, 903, 905, 16954, 16956,
+ 16961, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17411,
+ 17514, 17516, 17583, 18790, 18796, 18802, 20760, 20777, 20782, 21722,
+ 21724, 21728, 21738, 21740, 21742, 21744, 21746, 21748, 22224, 22227,
+ 22230, 23214, 23229, 23239, 25086, 25108, 25120, 26088, 26094, 26098,
+ 26175, 26177, 26179, 26181, 26183, 26185, 26484, 26507, 26522, 27705,
+ 27731, 27750, 29767, 29799, 29817, 30866, 30883, 30885, 31025, 31029,
+ 31031, 31033, 31035, 31037, 31114, 31126, 31134, 32687, 32722, 32767,
+ 35718, 35742, 35757, 36943, 36952, 36954, 37115, 37128, 37130, 37132,
+ 37134, 37136, 37143, 37145, 37152, 38843, 38863, 38897, 47458, 47467,
+ 47474, 49040, 49061, 49063, 49145, 49157, 49159, 49161, 49163, 49165,
+ 49167, 49169, 49171, 49757, 49770, 49782, 61333, 61344, 61346, 62860,
+ 62883, 62885, 62887, 62889, 62891, 62893, 62895, 62897, 62899, 62901,
+ 62903, 62905, 62907, 62909, 65496, 65498, 65500, 65521, 65523, 65525,
+ 65527, 65529, 65531, 65533, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerlimiGain[3] = {
+ -7, -2, -1
+};
+
+const int16_t WebRtcIsacfix_kUpperlimitGain[3] = {
+ 0, 3, 1
+};
+
+const uint16_t WebRtcIsacfix_kMultsGain[2] = {
+ 18, 3
+};
+
+/* size of cdf table */
+const uint16_t WebRtcIsacfix_kCdfTableSizeGain[1] = {
+ 256
+};
+
+/* mean values of pitch filter gains in FIXED point Q12 */
+const int16_t WebRtcIsacfix_kPitchGain1[144] = {
+ 843, 1092, 1336, 1222, 1405, 1656, 1500, 1815, 1843, 1838, 1839,
+ 1843, 1843, 1843, 1843, 1843, 1843, 1843, 814, 846, 1092, 1013,
+ 1174, 1383, 1391, 1511, 1584, 1734, 1753, 1843, 1843, 1843, 1843,
+ 1843, 1843, 1843, 524, 689, 777, 845, 947, 1069, 1090, 1263,
+ 1380, 1447, 1559, 1676, 1645, 1749, 1843, 1843, 1843, 1843, 81,
+ 477, 563, 611, 706, 806, 849, 1012, 1192, 1128, 1330, 1489,
+ 1425, 1576, 1826, 1741, 1843, 1843, 0, 290, 305, 356, 488,
+ 575, 602, 741, 890, 835, 1079, 1196, 1182, 1376, 1519, 1506,
+ 1680, 1843, 0, 47, 97, 69, 289, 381, 385, 474, 617,
+ 664, 803, 1079, 935, 1160, 1269, 1265, 1506, 1741, 0, 0,
+ 0, 0, 112, 120, 190, 283, 442, 343, 526, 809, 684,
+ 935, 1134, 1020, 1265, 1506, 0, 0, 0, 0, 0, 0,
+ 0, 111, 256, 87, 373, 597, 430, 684, 935, 770, 1020,
+ 1265
+};
+
+const int16_t WebRtcIsacfix_kPitchGain2[144] = {
+ 1760, 1525, 1285, 1747, 1671, 1393, 1843, 1826, 1555, 1843, 1784,
+ 1606, 1843, 1843, 1711, 1843, 1843, 1814, 1389, 1275, 1040, 1564,
+ 1414, 1252, 1610, 1495, 1343, 1753, 1592, 1405, 1804, 1720, 1475,
+ 1843, 1814, 1581, 1208, 1061, 856, 1349, 1148, 994, 1390, 1253,
+ 1111, 1495, 1343, 1178, 1770, 1465, 1234, 1814, 1581, 1342, 1040,
+ 793, 713, 1053, 895, 737, 1128, 1003, 861, 1277, 1094, 981,
+ 1475, 1192, 1019, 1581, 1342, 1098, 855, 570, 483, 833, 648,
+ 540, 948, 744, 572, 1009, 844, 636, 1234, 934, 685, 1342,
+ 1217, 984, 537, 318, 124, 603, 423, 350, 687, 479, 322,
+ 791, 581, 430, 987, 671, 488, 1098, 849, 597, 283, 27,
+ 0, 397, 222, 38, 513, 271, 124, 624, 325, 157, 737,
+ 484, 233, 849, 597, 343, 27, 0, 0, 141, 0, 0,
+ 256, 69, 0, 370, 87, 0, 484, 229, 0, 597, 343,
+ 87
+};
+
+const int16_t WebRtcIsacfix_kPitchGain3[144] = {
+ 1843, 1843, 1711, 1843, 1818, 1606, 1843, 1827, 1511, 1814, 1639,
+ 1393, 1760, 1525, 1285, 1656, 1419, 1176, 1835, 1718, 1475, 1841,
+ 1650, 1387, 1648, 1498, 1287, 1600, 1411, 1176, 1522, 1299, 1040,
+ 1419, 1176, 928, 1773, 1461, 1128, 1532, 1355, 1202, 1429, 1260,
+ 1115, 1398, 1151, 1025, 1172, 1080, 790, 1176, 928, 677, 1475,
+ 1147, 1019, 1276, 1096, 922, 1214, 1010, 901, 1057, 893, 800,
+ 1040, 796, 734, 928, 677, 424, 1137, 897, 753, 1120, 830,
+ 710, 875, 751, 601, 795, 642, 583, 790, 544, 475, 677,
+ 474, 140, 987, 750, 482, 697, 573, 450, 691, 487, 303,
+ 661, 394, 332, 537, 303, 220, 424, 168, 0, 737, 484,
+ 229, 624, 348, 153, 441, 261, 136, 397, 166, 51, 283,
+ 27, 0, 168, 0, 0, 484, 229, 0, 370, 57, 0,
+ 256, 43, 0, 141, 0, 0, 27, 0, 0, 0, 0,
+ 0
+};
+
+
+const int16_t WebRtcIsacfix_kPitchGain4[144] = {
+ 1843, 1843, 1843, 1843, 1841, 1843, 1500, 1821, 1843, 1222, 1434,
+ 1656, 843, 1092, 1336, 504, 757, 1007, 1843, 1843, 1843, 1838,
+ 1791, 1843, 1265, 1505, 1599, 965, 1219, 1425, 730, 821, 1092,
+ 249, 504, 757, 1783, 1819, 1843, 1351, 1567, 1727, 1096, 1268,
+ 1409, 805, 961, 1131, 444, 670, 843, 0, 249, 504, 1425,
+ 1655, 1743, 1096, 1324, 1448, 822, 1019, 1199, 490, 704, 867,
+ 81, 450, 555, 0, 0, 249, 1247, 1428, 1530, 881, 1073,
+ 1283, 610, 759, 939, 278, 464, 645, 0, 200, 270, 0,
+ 0, 0, 935, 1163, 1410, 528, 790, 1068, 377, 499, 717,
+ 173, 240, 274, 0, 43, 62, 0, 0, 0, 684, 935,
+ 1182, 343, 551, 735, 161, 262, 423, 0, 55, 27, 0,
+ 0, 0, 0, 0, 0, 430, 684, 935, 87, 377, 597,
+ 0, 46, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0
+};
+
+
+
+/* transform matrix in Q12*/
+const int16_t WebRtcIsacfix_kTransform[4][4] = {
+ { -2048, -2048, -2048, -2048 },
+ { 2748, 916, -916, -2748 },
+ { 2048, -2048, -2048, 2048 },
+ { 916, -2748, 2748, -916 }
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h
new file mode 100644
index 0000000000..59e1738bce
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_gain_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy
+ * coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_
+
+#include <stdint.h>
+
+/********************* Pitch Filter Gain Coefficient Tables
+ * ************************/
+/* cdf for quantized pitch filter gains */
+extern const uint16_t WebRtcIsacfix_kPitchGainCdf[255];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerlimiGain[3];
+extern const int16_t WebRtcIsacfix_kUpperlimitGain[3];
+extern const uint16_t WebRtcIsacfix_kMultsGain[2];
+
+/* mean values of pitch filter gains in Q12*/
+extern const int16_t WebRtcIsacfix_kPitchGain1[144];
+extern const int16_t WebRtcIsacfix_kPitchGain2[144];
+extern const int16_t WebRtcIsacfix_kPitchGain3[144];
+extern const int16_t WebRtcIsacfix_kPitchGain4[144];
+
+/* size of cdf table */
+extern const uint16_t WebRtcIsacfix_kCdfTableSizeGain[1];
+
+/* transform matrix */
+extern const int16_t WebRtcIsacfix_kTransform[4][4];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c
new file mode 100644
index 0000000000..894716e739
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_lag_tables.c
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
+
+
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsacfix_kPitchLagCdf1Lo[127] = {
+ 0, 134, 336, 549, 778, 998, 1264, 1512, 1777, 2070,
+ 2423, 2794, 3051, 3361, 3708, 3979, 4315, 4610, 4933, 5269,
+ 5575, 5896, 6155, 6480, 6816, 7129, 7477, 7764, 8061, 8358,
+ 8718, 9020, 9390, 9783, 10177, 10543, 10885, 11342, 11795, 12213,
+ 12680, 13096, 13524, 13919, 14436, 14903, 15349, 15795, 16267, 16734,
+ 17266, 17697, 18130, 18632, 19080, 19447, 19884, 20315, 20735, 21288,
+ 21764, 22264, 22723, 23193, 23680, 24111, 24557, 25022, 25537, 26082,
+ 26543, 27090, 27620, 28139, 28652, 29149, 29634, 30175, 30692, 31273,
+ 31866, 32506, 33059, 33650, 34296, 34955, 35629, 36295, 36967, 37726,
+ 38559, 39458, 40364, 41293, 42256, 43215, 44231, 45253, 46274, 47359,
+ 48482, 49678, 50810, 51853, 53016, 54148, 55235, 56263, 57282, 58363,
+ 59288, 60179, 61076, 61806, 62474, 63129, 63656, 64160, 64533, 64856,
+ 65152, 65535, 65535, 65535, 65535, 65535, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf2Lo[20] = {
+ 0, 429, 3558, 5861, 8558, 11639, 15210, 19502, 24773, 31983,
+ 42602, 48567, 52601, 55676, 58160, 60172, 61889, 63235, 65383, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf3Lo[2] = {
+ 0, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf4Lo[10] = {
+ 0, 2966, 6368, 11182, 19431, 37793, 48532, 55353, 60626, 65535
+};
+
+const uint16_t *WebRtcIsacfix_kPitchLagPtrLo[4] = {
+ WebRtcIsacfix_kPitchLagCdf1Lo,
+ WebRtcIsacfix_kPitchLagCdf2Lo,
+ WebRtcIsacfix_kPitchLagCdf3Lo,
+ WebRtcIsacfix_kPitchLagCdf4Lo
+};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsacfix_kPitchLagSizeLo[1] = {
+ 128
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerLimitLo[4] = {
+ -140, -9, 0, -4
+};
+
+const int16_t WebRtcIsacfix_kUpperLimitLo[4] = {
+ -20, 9, 0, 4
+};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsacfix_kInitIndLo[3] = {
+ 10, 1, 5
+};
+
+/* mean values of pitch filter lags in Q10 */
+
+const int16_t WebRtcIsacfix_kMeanLag2Lo[19] = {
+ -17627, -16207, -14409, -12319, -10253, -8200, -6054, -3986, -1948, -19,
+ 1937, 3974, 6064, 8155, 10229, 12270, 14296, 16127, 17520
+};
+
+const int16_t WebRtcIsacfix_kMeanLag4Lo[9] = {
+ -7949, -6063, -4036, -1941, 38, 1977, 4060, 6059
+};
+
+
+
+/* tables for use with medium pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsacfix_kPitchLagCdf1Mid[255] = {
+ 0, 28, 61, 88, 121, 149, 233, 331, 475, 559,
+ 624, 661, 689, 712, 745, 791, 815, 843, 866, 922,
+ 959, 1024, 1061, 1117, 1178, 1238, 1280, 1350, 1453, 1513,
+ 1564, 1625, 1671, 1741, 1788, 1904, 2072, 2421, 2626, 2770,
+ 2840, 2900, 2942, 3012, 3068, 3115, 3147, 3194, 3254, 3319,
+ 3366, 3520, 3678, 3780, 3850, 3911, 3957, 4032, 4106, 4185,
+ 4292, 4474, 4683, 4842, 5019, 5191, 5321, 5428, 5540, 5675,
+ 5763, 5847, 5959, 6127, 6304, 6564, 6839, 7090, 7263, 7421,
+ 7556, 7728, 7872, 7984, 8142, 8361, 8580, 8743, 8938, 9227,
+ 9409, 9539, 9674, 9795, 9930, 10060, 10177, 10382, 10614, 10861,
+ 11038, 11271, 11415, 11629, 11792, 12044, 12193, 12416, 12574, 12821,
+ 13007, 13235, 13445, 13654, 13901, 14134, 14488, 15000, 15703, 16285,
+ 16504, 16797, 17086, 17328, 17579, 17807, 17998, 18268, 18538, 18836,
+ 19087, 19274, 19474, 19716, 19935, 20270, 20833, 21303, 21532, 21741,
+ 21978, 22207, 22523, 22770, 23054, 23613, 23943, 24204, 24399, 24651,
+ 24832, 25074, 25270, 25549, 25759, 26015, 26150, 26424, 26713, 27048,
+ 27342, 27504, 27681, 27854, 28021, 28207, 28412, 28664, 28859, 29064,
+ 29278, 29548, 29748, 30107, 30377, 30656, 30856, 31164, 31452, 31755,
+ 32011, 32328, 32626, 32919, 33319, 33789, 34329, 34925, 35396, 35973,
+ 36443, 36964, 37551, 38156, 38724, 39357, 40023, 40908, 41587, 42602,
+ 43924, 45037, 45810, 46597, 47421, 48291, 49092, 50051, 51448, 52719,
+ 53440, 54241, 54944, 55977, 56676, 57299, 57872, 58389, 59059, 59688,
+ 60237, 60782, 61094, 61573, 61890, 62290, 62658, 63030, 63217, 63454,
+ 63622, 63882, 64003, 64273, 64427, 64529, 64581, 64697, 64758, 64902,
+ 65414, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf2Mid[36] = {
+ 0, 71, 335, 581, 836, 1039, 1323, 1795, 2258, 2608,
+ 3005, 3591, 4243, 5344, 7163, 10583, 16848, 28078, 49448, 57007,
+ 60357, 61850, 62837, 63437, 63872, 64188, 64377, 64614, 64774, 64949,
+ 65039, 65115, 65223, 65360, 65474, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf3Mid[2] = {
+ 0, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf4Mid[20] = {
+ 0, 28, 246, 459, 667, 1045, 1523, 2337, 4337, 11347,
+ 44231, 56709, 60781, 62243, 63161, 63969, 64608, 65062, 65502, 65535
+};
+
+const uint16_t *WebRtcIsacfix_kPitchLagPtrMid[4] = {
+ WebRtcIsacfix_kPitchLagCdf1Mid,
+ WebRtcIsacfix_kPitchLagCdf2Mid,
+ WebRtcIsacfix_kPitchLagCdf3Mid,
+ WebRtcIsacfix_kPitchLagCdf4Mid
+};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsacfix_kPitchLagSizeMid[1] = {
+ 256
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerLimitMid[4] = {
+ -280, -17, 0, -9
+};
+
+const int16_t WebRtcIsacfix_kUpperLimitMid[4] = {
+ -40, 17, 0, 9
+};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsacfix_kInitIndMid[3] = {
+ 18, 1, 10
+};
+
+/* mean values of pitch filter lags in Q10 */
+
+const int16_t WebRtcIsacfix_kMeanLag2Mid[35] = {
+ -17297, -16250, -15416, -14343, -13341, -12363, -11270,
+ -10355, -9122, -8217, -7172, -6083, -5102, -4004, -3060,
+ -1982, -952, -18, 935, 1976, 3040, 4032,
+ 5082, 6065, 7257, 8202, 9264, 10225, 11242,
+ 12234, 13337, 14336, 15374, 16187, 17347
+};
+
+
+const int16_t WebRtcIsacfix_kMeanLag4Mid[19] = {
+ -8811, -8081, -7203, -6003, -5057, -4025, -2983, -1964,
+ -891, 29, 921, 1920, 2988, 4064, 5187, 6079, 7173, 8074, 8849
+};
+
+
+/* tables for use with large pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsacfix_kPitchLagCdf1Hi[511] = {
+ 0, 7, 18, 33, 69, 105, 156, 228, 315, 612,
+ 680, 691, 709, 724, 735, 738, 742, 746, 749, 753,
+ 756, 760, 764, 774, 782, 785, 789, 796, 800, 803,
+ 807, 814, 818, 822, 829, 832, 847, 854, 858, 869,
+ 876, 883, 898, 908, 934, 977, 1010, 1050, 1060, 1064,
+ 1075, 1078, 1086, 1089, 1093, 1104, 1111, 1122, 1133, 1136,
+ 1151, 1162, 1183, 1209, 1252, 1281, 1339, 1364, 1386, 1401,
+ 1411, 1415, 1426, 1430, 1433, 1440, 1448, 1455, 1462, 1477,
+ 1487, 1495, 1502, 1506, 1509, 1516, 1524, 1531, 1535, 1542,
+ 1553, 1556, 1578, 1589, 1611, 1625, 1639, 1643, 1654, 1665,
+ 1672, 1687, 1694, 1705, 1708, 1719, 1730, 1744, 1752, 1759,
+ 1791, 1795, 1820, 1867, 1886, 1915, 1936, 1943, 1965, 1987,
+ 2041, 2099, 2161, 2175, 2200, 2211, 2226, 2233, 2244, 2251,
+ 2266, 2280, 2287, 2298, 2309, 2316, 2331, 2342, 2356, 2378,
+ 2403, 2418, 2447, 2497, 2544, 2602, 2863, 2895, 2903, 2935,
+ 2950, 2971, 3004, 3011, 3018, 3029, 3040, 3062, 3087, 3127,
+ 3152, 3170, 3199, 3243, 3293, 3322, 3340, 3377, 3402, 3427,
+ 3474, 3518, 3543, 3579, 3601, 3637, 3659, 3706, 3731, 3760,
+ 3818, 3847, 3869, 3901, 3920, 3952, 4068, 4169, 4220, 4271,
+ 4524, 4571, 4604, 4632, 4672, 4730, 4777, 4806, 4857, 4904,
+ 4951, 5002, 5031, 5060, 5107, 5150, 5212, 5266, 5331, 5382,
+ 5432, 5490, 5544, 5610, 5700, 5762, 5812, 5874, 5972, 6022,
+ 6091, 6163, 6232, 6305, 6402, 6540, 6685, 6880, 7090, 7271,
+ 7379, 7452, 7542, 7625, 7687, 7770, 7843, 7911, 7966, 8024,
+ 8096, 8190, 8252, 8320, 8411, 8501, 8585, 8639, 8751, 8842,
+ 8918, 8986, 9066, 9127, 9203, 9269, 9345, 9406, 9464, 9536,
+ 9612, 9667, 9735, 9844, 9931, 10036, 10119, 10199, 10260, 10358,
+ 10441, 10514, 10666, 10734, 10872, 10951, 11053, 11125, 11223, 11324,
+ 11516, 11664, 11737, 11816, 11892, 12008, 12120, 12200, 12280, 12392,
+ 12490, 12576, 12685, 12812, 12917, 13003, 13108, 13210, 13300, 13384,
+ 13470, 13579, 13673, 13771, 13879, 13999, 14136, 14201, 14368, 14614,
+ 14759, 14867, 14958, 15030, 15121, 15189, 15280, 15385, 15461, 15555,
+ 15653, 15768, 15884, 15971, 16069, 16145, 16210, 16279, 16380, 16463,
+ 16539, 16615, 16688, 16818, 16919, 17017, 18041, 18338, 18523, 18649,
+ 18790, 18917, 19047, 19167, 19315, 19460, 19601, 19731, 19858, 20068,
+ 20173, 20318, 20466, 20625, 20741, 20911, 21045, 21201, 21396, 21588,
+ 21816, 22022, 22305, 22547, 22786, 23072, 23322, 23600, 23879, 24168,
+ 24433, 24769, 25120, 25511, 25895, 26289, 26792, 27219, 27683, 28077,
+ 28566, 29094, 29546, 29977, 30491, 30991, 31573, 32105, 32594, 33173,
+ 33788, 34497, 35181, 35833, 36488, 37255, 37921, 38645, 39275, 39894,
+ 40505, 41167, 41790, 42431, 43096, 43723, 44385, 45134, 45858, 46607,
+ 47349, 48091, 48768, 49405, 49955, 50555, 51167, 51985, 52611, 53078,
+ 53494, 53965, 54435, 54996, 55601, 56125, 56563, 56838, 57244, 57566,
+ 57967, 58297, 58771, 59093, 59419, 59647, 59886, 60143, 60461, 60693,
+ 60917, 61170, 61416, 61634, 61891, 62122, 62310, 62455, 62632, 62839,
+ 63103, 63436, 63639, 63805, 63906, 64015, 64192, 64355, 64475, 64558,
+ 64663, 64742, 64811, 64865, 64916, 64956, 64981, 65025, 65068, 65115,
+ 65195, 65314, 65419, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf2Hi[68] = {
+ 0, 7, 11, 22, 37, 52, 56, 59, 81, 85,
+ 89, 96, 115, 130, 137, 152, 170, 181, 193, 200,
+ 207, 233, 237, 259, 289, 318, 363, 433, 592, 992,
+ 1607, 3062, 6149, 12206, 25522, 48368, 58223, 61918, 63640, 64584,
+ 64943, 65098, 65206, 65268, 65294, 65335, 65350, 65372, 65387, 65402,
+ 65413, 65420, 65428, 65435, 65439, 65450, 65454, 65468, 65472, 65476,
+ 65483, 65491, 65498, 65505, 65516, 65520, 65528, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf3Hi[2] = {
+ 0, 65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf4Hi[35] = {
+ 0, 7, 19, 30, 41, 48, 63, 74, 82, 96,
+ 122, 152, 215, 330, 701, 2611, 10931, 48106, 61177, 64341,
+ 65112, 65238, 65309, 65338, 65364, 65379, 65401, 65427, 65453,
+ 65465, 65476, 65490, 65509, 65528, 65535
+};
+
+const uint16_t *WebRtcIsacfix_kPitchLagPtrHi[4] = {
+ WebRtcIsacfix_kPitchLagCdf1Hi,
+ WebRtcIsacfix_kPitchLagCdf2Hi,
+ WebRtcIsacfix_kPitchLagCdf3Hi,
+ WebRtcIsacfix_kPitchLagCdf4Hi
+};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsacfix_kPitchLagSizeHi[1] = {
+ 512
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerLimitHi[4] = {
+ -552, -34, 0, -16
+};
+
+const int16_t WebRtcIsacfix_kUpperLimitHi[4] = {
+ -80, 32, 0, 17
+};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsacfix_kInitIndHi[3] = {
+ 34, 1, 18
+};
+
+/* mean values of pitch filter lags */
+
+const int16_t WebRtcIsacfix_kMeanLag2Hi[67] = {
+ -17482, -16896, -16220, -15929, -15329, -14848, -14336, -13807, -13312, -12800, -12218, -11720,
+ -11307, -10649, -10396, -9742, -9148, -8668, -8297, -7718, -7155, -6656, -6231, -5600, -5129,
+ -4610, -4110, -3521, -3040, -2525, -2016, -1506, -995, -477, -5, 469, 991, 1510, 2025, 2526, 3079,
+ 3555, 4124, 4601, 5131, 5613, 6194, 6671, 7140, 7645, 8207, 8601, 9132, 9728, 10359, 10752, 11302,
+ 11776, 12288, 12687, 13204, 13759, 14295, 14810, 15360, 15764, 16350
+};
+
+
+const int16_t WebRtcIsacfix_kMeanLag4Hi[34] = {
+ -8175, -7659, -7205, -6684, -6215, -5651, -5180, -4566, -4087, -3536, -3096,
+ -2532, -1990, -1482, -959, -440, 11, 451, 954, 1492, 2020, 2562, 3059,
+ 3577, 4113, 4618, 5134, 5724, 6060, 6758, 7015, 7716, 8066, 8741
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h
new file mode 100644
index 0000000000..228da26731
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_lag_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy
+ * coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_
+
+#include <stdint.h>
+
+/********************* Pitch Filter Lag Coefficient Tables
+ * ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Lo[127];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Lo[20];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Lo[2];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Lo[10];
+
+extern const uint16_t* WebRtcIsacfix_kPitchLagPtrLo[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsacfix_kPitchLagSizeLo[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerLimitLo[4];
+extern const int16_t WebRtcIsacfix_kUpperLimitLo[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsacfix_kInitIndLo[3];
+
+/* mean values of pitch filter lags */
+extern const int16_t WebRtcIsacfix_kMeanLag2Lo[19];
+extern const int16_t WebRtcIsacfix_kMeanLag4Lo[9];
+
+/* tables for use with medium pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Mid[255];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Mid[36];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Mid[2];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Mid[20];
+
+extern const uint16_t* WebRtcIsacfix_kPitchLagPtrMid[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsacfix_kPitchLagSizeMid[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerLimitMid[4];
+extern const int16_t WebRtcIsacfix_kUpperLimitMid[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsacfix_kInitIndMid[3];
+
+/* mean values of pitch filter lags */
+extern const int16_t WebRtcIsacfix_kMeanLag2Mid[35];
+extern const int16_t WebRtcIsacfix_kMeanLag4Mid[19];
+
+/* tables for use with large pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Hi[511];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Hi[68];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Hi[2];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Hi[35];
+
+extern const uint16_t* WebRtcIsacfix_kPitchLagPtrHi[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsacfix_kPitchLagSizeHi[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerLimitHi[4];
+extern const int16_t WebRtcIsacfix_kUpperLimitHi[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsacfix_kInitIndHi[3];
+
+/* mean values of pitch filter lags */
+extern const int16_t WebRtcIsacfix_kMeanLag2Hi[67];
+extern const int16_t WebRtcIsacfix_kMeanLag4Hi[34];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/settings.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/settings.h
new file mode 100644
index 0000000000..03a2d05457
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/settings.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * settings.h
+ *
+ * Declaration of #defines used in the iSAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_
+
+/* sampling frequency (Hz) */
+#define FS 16000
+/* 1.5 times Sampling frequency */
+#define FS_1_HALF (uint32_t)24000
+/* Three times Sampling frequency */
+#define FS3 (uint32_t)48000
+/* Eight times Sampling frequency */
+#define FS8 (uint32_t)128000
+
+/* number of samples per frame (either 480 (30ms) or 960 (60ms)) */
+#define INITIAL_FRAMESAMPLES 960
+
+/* miliseconds */
+#define FRAMESIZE 30
+/* number of samples per frame processed in the encoder (30ms) */
+#define FRAMESAMPLES 480 /* ((FRAMESIZE*FS)/1000) */
+#define FRAMESAMPLES_HALF 240
+/* max number of samples per frame (= 60 ms frame) */
+#define MAX_FRAMESAMPLES 960
+/* number of samples per 10ms frame */
+#define FRAMESAMPLES_10ms 160 /* ((10*FS)/1000) */
+/* Number of samples per 1 ms */
+#define SAMPLES_PER_MSEC 16
+/* number of subframes */
+#define SUBFRAMES 6
+/* length of a subframe */
+#define UPDATE 80
+/* length of half a subframe (low/high band) */
+#define HALF_SUBFRAMELEN 40 /* (UPDATE/2) */
+/* samples of look ahead (in a half-band, so actually half the samples of look
+ * ahead @ FS) */
+#define QLOOKAHEAD 24 /* 3 ms */
+
+/* order of AR model in spectral entropy coder */
+#define AR_ORDER 6
+#define MAX_ORDER 13
+#define LEVINSON_MAX_ORDER 12
+
+/* window length (masking analysis) */
+#define WINLEN 256
+/* order of low-band pole filter used to approximate masking curve */
+#define ORDERLO 12
+/* order of hi-band pole filter used to approximate masking curve */
+#define ORDERHI 6
+
+#define KLT_NUM_AVG_GAIN 0
+#define KLT_NUM_AVG_SHAPE 0
+#define KLT_NUM_MODELS 3
+#define LPC_SHAPE_ORDER 18 /* (ORDERLO + ORDERHI) */
+
+#define KLT_ORDER_GAIN 12 /* (2 * SUBFRAMES) */
+#define KLT_ORDER_SHAPE 108 /* (LPC_SHAPE_ORDER * SUBFRAMES) */
+
+/* order for post_filter_bank */
+#define POSTQORDER 3
+/* order for pre-filterbank */
+#define QORDER 3
+/* for decimator */
+#define ALLPASSSECTIONS 2
+/* The number of composite all-pass filter factors */
+#define NUMBEROFCOMPOSITEAPSECTIONS 4
+
+/* The number of all-pass filter factors in an upper or lower channel*/
+#define NUMBEROFCHANNELAPSECTIONS 2
+
+#define DPMIN_Q10 -10240 /* -10.00 in Q10 */
+#define DPMAX_Q10 10240 /* 10.00 in Q10 */
+#define MINBITS_Q10 10240 /* 10.0 in Q10 */
+
+/* array size for byte stream in number of Word16. */
+#define STREAM_MAXW16 \
+ 300 /* The old maximum size still needed for the decoding */
+#define STREAM_MAXW16_30MS \
+ 100 /* 100 Word16 = 200 bytes = 53.4 kbit/s @ 30 ms.framelength */
+#define STREAM_MAXW16_60MS \
+ 200 /* 200 Word16 = 400 bytes = 53.4 kbit/s @ 60 ms.framelength */
+/* This is used only at the decoder bit-stream struct.
+ * - The encoder and decoder bitstream containers are of different size because
+ * old iSAC limited the encoded bitstream to 600 bytes. But newer versions
+ * restrict to shorter bitstream.
+ * - We add 10 bytes of guards to the internal bitstream container. The reason
+ * is that entropy decoder might read few bytes (3 according to our
+ * observations) more than the actual size of the bitstream. To avoid reading
+ * outside memory, in rare occasion of full-size bitstream we add 10 bytes
+ * of guard. */
+#define INTERNAL_STREAM_SIZE_W16 (STREAM_MAXW16 + 5)
+
+/* storage size for bit counts */
+//#define BIT_COUNTER_SIZE 30
+/* maximum order of any AR model or filter */
+#define MAX_AR_MODEL_ORDER 12
+
+/* Maximum number of iterations allowed to limit payload size */
+#define MAX_PAYLOAD_LIMIT_ITERATION 1
+
+/* Bandwidth estimator */
+
+#define MIN_ISAC_BW 10000 /* Minimum bandwidth in bits per sec */
+#define MAX_ISAC_BW 32000 /* Maxmum bandwidth in bits per sec */
+#define MIN_ISAC_MD 5 /* Minimum Max Delay in ?? */
+#define MAX_ISAC_MD 25 /* Maxmum Max Delay in ?? */
+#define DELAY_CORRECTION_MAX 717
+#define DELAY_CORRECTION_MED 819
+#define Thld_30_60 18000
+#define Thld_60_30 27000
+
+/* assumed header size; we don't know the exact number (header compression may
+ * be used) */
+#define HEADER_SIZE 35 /* bytes */
+#define INIT_FRAME_LEN 60
+#define INIT_BN_EST 20000
+#define INIT_BN_EST_Q7 2560000 /* 20 kbps in Q7 */
+#define INIT_REC_BN_EST_Q5 789312 /* INIT_BN_EST + INIT_HDR_RATE in Q5 */
+
+/* 8738 in Q18 is ~ 1/30 */
+/* #define INIT_HDR_RATE (((HEADER_SIZE * 8 * 1000) * 8738) >> NUM_BITS_TO_SHIFT
+ * (INIT_FRAME_LEN)) */
+#define INIT_HDR_RATE 4666
+/* number of packets in a row for a high rate burst */
+#define BURST_LEN 3
+/* ms, max time between two full bursts */
+#define BURST_INTERVAL 800
+/* number of packets in a row for initial high rate burst */
+#define INIT_BURST_LEN 5
+/* bits/s, rate for the first BURST_LEN packets */
+#define INIT_RATE 10240000 /* INIT_BN_EST in Q9 */
+
+/* For pitch analysis */
+#define PITCH_FRAME_LEN 240 /* (FRAMESAMPLES/2) 30 ms */
+#define PITCH_MAX_LAG 140 /* 57 Hz */
+#define PITCH_MIN_LAG 20 /* 400 Hz */
+#define PITCH_MIN_LAG_Q8 5120 /* 256 * PITCH_MIN_LAG */
+#define OFFSET_Q8 768 /* 256 * 3 */
+
+#define PITCH_MAX_GAIN_Q12 1843 /* 0.45 */
+#define PITCH_LAG_SPAN2 65 /* (PITCH_MAX_LAG/2-PITCH_MIN_LAG/2+5) */
+#define PITCH_CORR_LEN2 60 /* 15 ms */
+#define PITCH_CORR_STEP2 60 /* (PITCH_FRAME_LEN/4) */
+#define PITCH_SUBFRAMES 4
+#define PITCH_SUBFRAME_LEN 60 /* (PITCH_FRAME_LEN/PITCH_SUBFRAMES) */
+
+/* For pitch filter */
+#define PITCH_BUFFSIZE \
+ 190 /* (PITCH_MAX_LAG + 50) Extra 50 for fraction and LP filters */
+#define PITCH_INTBUFFSIZE 430 /* (PITCH_FRAME_LEN+PITCH_BUFFSIZE) */
+#define PITCH_FRACS 8
+#define PITCH_FRACORDER 9
+#define PITCH_DAMPORDER 5
+
+/* Order of high pass filter */
+#define HPORDER 2
+
+/* PLC */
+#define DECAY_RATE \
+ 10 /* Q15, 20% of decay every lost frame apllied linearly sample by sample*/
+#define PLC_WAS_USED 1
+#define PLC_NOT_USED 3
+#define RECOVERY_OVERLAP 80
+#define RESAMP_RES 256
+#define RESAMP_RES_BIT 8
+
+/* Define Error codes */
+/* 6000 General */
+#define ISAC_MEMORY_ALLOCATION_FAILED 6010
+#define ISAC_MODE_MISMATCH 6020
+#define ISAC_DISALLOWED_BOTTLENECK 6030
+#define ISAC_DISALLOWED_FRAME_LENGTH 6040
+/* 6200 Bandwidth estimator */
+#define ISAC_RANGE_ERROR_BW_ESTIMATOR 6240
+/* 6400 Encoder */
+#define ISAC_ENCODER_NOT_INITIATED 6410
+#define ISAC_DISALLOWED_CODING_MODE 6420
+#define ISAC_DISALLOWED_FRAME_MODE_ENCODER 6430
+#define ISAC_DISALLOWED_BITSTREAM_LENGTH 6440
+#define ISAC_PAYLOAD_LARGER_THAN_LIMIT 6450
+/* 6600 Decoder */
+#define ISAC_DECODER_NOT_INITIATED 6610
+#define ISAC_EMPTY_PACKET 6620
+#define ISAC_PACKET_TOO_SHORT 6625
+#define ISAC_DISALLOWED_FRAME_MODE_DECODER 6630
+#define ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH 6640
+#define ISAC_RANGE_ERROR_DECODE_BANDWIDTH 6650
+#define ISAC_RANGE_ERROR_DECODE_PITCH_GAIN 6660
+#define ISAC_RANGE_ERROR_DECODE_PITCH_LAG 6670
+#define ISAC_RANGE_ERROR_DECODE_LPC 6680
+#define ISAC_RANGE_ERROR_DECODE_SPECTRUM 6690
+#define ISAC_LENGTH_MISMATCH 6730
+/* 6800 Call setup formats */
+#define ISAC_INCOMPATIBLE_FORMATS 6810
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c
new file mode 100644
index 0000000000..4ef9a338cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * spectrum_ar_model_tables.c
+ *
+ * This file contains tables with AR coefficients, Gain coefficients
+ * and cosine tables.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/********************* AR Coefficient Tables ************************/
+
+/* cdf for quantized reflection coefficient 1 */
+const uint16_t WebRtcIsacfix_kRc1Cdf[12] = {
+ 0, 2, 4, 129, 7707, 57485, 65495, 65527, 65529, 65531,
+ 65533, 65535
+};
+
+/* cdf for quantized reflection coefficient 2 */
+const uint16_t WebRtcIsacfix_kRc2Cdf[12] = {
+ 0, 2, 4, 7, 531, 25298, 64525, 65526, 65529, 65531,
+ 65533, 65535
+};
+
+/* cdf for quantized reflection coefficient 3 */
+const uint16_t WebRtcIsacfix_kRc3Cdf[12] = {
+ 0, 2, 4, 6, 620, 22898, 64843, 65527, 65529, 65531,
+ 65533, 65535
+};
+
+/* cdf for quantized reflection coefficient 4 */
+const uint16_t WebRtcIsacfix_kRc4Cdf[12] = {
+ 0, 2, 4, 6, 35, 10034, 60733, 65506, 65529, 65531,
+ 65533, 65535
+};
+
+/* cdf for quantized reflection coefficient 5 */
+const uint16_t WebRtcIsacfix_kRc5Cdf[12] = {
+ 0, 2, 4, 6, 36, 7567, 56727, 65385, 65529, 65531,
+ 65533, 65535
+};
+
+/* cdf for quantized reflection coefficient 6 */
+const uint16_t WebRtcIsacfix_kRc6Cdf[12] = {
+ 0, 2, 4, 6, 14, 6579, 57360, 65409, 65529, 65531,
+ 65533, 65535
+};
+
+/* representation levels for quantized reflection coefficient 1 */
+const int16_t WebRtcIsacfix_kRc1Levels[11] = {
+ -32104, -29007, -23202, -15496, -9279, -2577, 5934, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 2 */
+const int16_t WebRtcIsacfix_kRc2Levels[11] = {
+ -32104, -29503, -23494, -15261, -7309, -1399, 6158, 16381, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 3 */
+const int16_t WebRtcIsacfix_kRc3Levels[11] = {
+ -32104, -29503, -23157, -15186, -7347, -1359, 5829, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 4 */
+const int16_t WebRtcIsacfix_kRc4Levels[11] = {
+ -32104, -29503, -24512, -15362, -6665, -342, 6596, 14585, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 5 */
+const int16_t WebRtcIsacfix_kRc5Levels[11] = {
+ -32104, -29503, -24512, -15005, -6564, -106, 7123, 14920, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 6 */
+const int16_t WebRtcIsacfix_kRc6Levels[11] = {
+ -32104, -29503, -24512, -15096, -6656, -37, 7036, 14847, 24512, 29503, 32104
+};
+
+/* quantization boundary levels for reflection coefficients */
+const int16_t WebRtcIsacfix_kRcBound[12] = {
+ -32768, -31441, -27566, -21458, -13612, -4663,
+ 4663, 13612, 21458, 27566, 31441, 32767
+};
+
+/* initial index for AR reflection coefficient quantizer and cdf table search */
+const uint16_t WebRtcIsacfix_kRcInitInd[6] = {
+ 5, 5, 5, 5, 5, 5
+};
+
+/* pointers to AR cdf tables */
+const uint16_t *WebRtcIsacfix_kRcCdfPtr[AR_ORDER] = {
+ WebRtcIsacfix_kRc1Cdf,
+ WebRtcIsacfix_kRc2Cdf,
+ WebRtcIsacfix_kRc3Cdf,
+ WebRtcIsacfix_kRc4Cdf,
+ WebRtcIsacfix_kRc5Cdf,
+ WebRtcIsacfix_kRc6Cdf
+};
+
+/* pointers to AR representation levels tables */
+const int16_t *WebRtcIsacfix_kRcLevPtr[AR_ORDER] = {
+ WebRtcIsacfix_kRc1Levels,
+ WebRtcIsacfix_kRc2Levels,
+ WebRtcIsacfix_kRc3Levels,
+ WebRtcIsacfix_kRc4Levels,
+ WebRtcIsacfix_kRc5Levels,
+ WebRtcIsacfix_kRc6Levels
+};
+
+
+/******************** GAIN Coefficient Tables ***********************/
+
+/* cdf for Gain coefficient */
+const uint16_t WebRtcIsacfix_kGainCdf[19] = {
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1172,
+ 11119, 29411, 51699, 64445, 65527, 65529, 65531, 65533, 65535
+};
+
+/* representation levels for quantized squared Gain coefficient */
+const int32_t WebRtcIsacfix_kGain2Lev[18] = {
+ 128, 128, 128, 128, 128, 215, 364, 709, 1268,
+ 1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000
+};
+
+/* quantization boundary levels for squared Gain coefficient */
+const int32_t WebRtcIsacfix_kGain2Bound[19] = {
+ 0, 21, 35, 59, 99, 166, 280, 475, 815, 1414,
+ 2495, 4505, 8397, 16405, 34431, 81359, 240497, 921600, 0x7FFFFFFF
+};
+
+/* pointers to Gain cdf table */
+const uint16_t *WebRtcIsacfix_kGainPtr[1] = {
+ WebRtcIsacfix_kGainCdf
+};
+
+/* gain initial index for gain quantizer and cdf table search */
+const uint16_t WebRtcIsacfix_kGainInitInd[1] = {
+ 11
+};
+
+
+/************************* Cosine Tables ****************************/
+
+/* cosine table */
+const int16_t WebRtcIsacfix_kCos[6][60] = {
+ { 512, 512, 511, 510, 508, 507, 505, 502, 499, 496,
+ 493, 489, 485, 480, 476, 470, 465, 459, 453, 447,
+ 440, 433, 426, 418, 410, 402, 394, 385, 376, 367,
+ 357, 348, 338, 327, 317, 306, 295, 284, 273, 262,
+ 250, 238, 226, 214, 202, 190, 177, 165, 152, 139,
+ 126, 113, 100, 87, 73, 60, 47, 33, 20, 7 },
+ { 512, 510, 508, 503, 498, 491, 483, 473, 462, 450,
+ 437, 422, 406, 389, 371, 352, 333, 312, 290, 268,
+ 244, 220, 196, 171, 145, 120, 93, 67, 40, 13,
+ -13, -40, -67, -93, -120, -145, -171, -196, -220, -244,
+ -268, -290, -312, -333, -352, -371, -389, -406, -422, -437,
+ -450, -462, -473, -483, -491, -498, -503, -508, -510, -512 },
+ { 512, 508, 502, 493, 480, 465, 447, 426, 402, 376,
+ 348, 317, 284, 250, 214, 177, 139, 100, 60, 20,
+ -20, -60, -100, -139, -177, -214, -250, -284, -317, -348,
+ -376, -402, -426, -447, -465, -480, -493, -502, -508, -512,
+ -512, -508, -502, -493, -480, -465, -447, -426, -402, -376,
+ -348, -317, -284, -250, -214, -177, -139, -100, -60, -20 },
+ { 511, 506, 495, 478, 456, 429, 398, 362, 322, 279,
+ 232, 183, 133, 80, 27, -27, -80, -133, -183, -232,
+ -279, -322, -362, -398, -429, -456, -478, -495, -506, -511,
+ -511, -506, -495, -478, -456, -429, -398, -362, -322, -279,
+ -232, -183, -133, -80, -27, 27, 80, 133, 183, 232,
+ 279, 322, 362, 398, 429, 456, 478, 495, 506, 511 },
+ { 511, 502, 485, 459, 426, 385, 338, 284, 226, 165,
+ 100, 33, -33, -100, -165, -226, -284, -338, -385, -426,
+ -459, -485, -502, -511, -511, -502, -485, -459, -426, -385,
+ -338, -284, -226, -165, -100, -33, 33, 100, 165, 226,
+ 284, 338, 385, 426, 459, 485, 502, 511, 511, 502,
+ 485, 459, 426, 385, 338, 284, 226, 165, 100, 33 },
+ { 510, 498, 473, 437, 389, 333, 268, 196, 120, 40,
+ -40, -120, -196, -268, -333, -389, -437, -473, -498, -510,
+ -510, -498, -473, -437, -389, -333, -268, -196, -120, -40,
+ 40, 120, 196, 268, 333, 389, 437, 473, 498, 510,
+ 510, 498, 473, 437, 389, 333, 268, 196, 120, 40,
+ -40, -120, -196, -268, -333, -389, -437, -473, -498, -510 }
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h
new file mode 100644
index 0000000000..2282a369cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * spectrum_ar_model_tables.h
+ *
+ * This file contains definitions of tables with AR coefficients,
+ * Gain coefficients and cosine tables.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/********************* AR Coefficient Tables ************************/
+/* cdf for quantized reflection coefficient 1 */
+extern const uint16_t WebRtcIsacfix_kRc1Cdf[12];
+
+/* cdf for quantized reflection coefficient 2 */
+extern const uint16_t WebRtcIsacfix_kRc2Cdf[12];
+
+/* cdf for quantized reflection coefficient 3 */
+extern const uint16_t WebRtcIsacfix_kRc3Cdf[12];
+
+/* cdf for quantized reflection coefficient 4 */
+extern const uint16_t WebRtcIsacfix_kRc4Cdf[12];
+
+/* cdf for quantized reflection coefficient 5 */
+extern const uint16_t WebRtcIsacfix_kRc5Cdf[12];
+
+/* cdf for quantized reflection coefficient 6 */
+extern const uint16_t WebRtcIsacfix_kRc6Cdf[12];
+
+/* representation levels for quantized reflection coefficient 1 */
+extern const int16_t WebRtcIsacfix_kRc1Levels[11];
+
+/* representation levels for quantized reflection coefficient 2 */
+extern const int16_t WebRtcIsacfix_kRc2Levels[11];
+
+/* representation levels for quantized reflection coefficient 3 */
+extern const int16_t WebRtcIsacfix_kRc3Levels[11];
+
+/* representation levels for quantized reflection coefficient 4 */
+extern const int16_t WebRtcIsacfix_kRc4Levels[11];
+
+/* representation levels for quantized reflection coefficient 5 */
+extern const int16_t WebRtcIsacfix_kRc5Levels[11];
+
+/* representation levels for quantized reflection coefficient 6 */
+extern const int16_t WebRtcIsacfix_kRc6Levels[11];
+
+/* quantization boundary levels for reflection coefficients */
+extern const int16_t WebRtcIsacfix_kRcBound[12];
+
+/* initial indices for AR reflection coefficient quantizer and cdf table search
+ */
+extern const uint16_t WebRtcIsacfix_kRcInitInd[AR_ORDER];
+
+/* pointers to AR cdf tables */
+extern const uint16_t* WebRtcIsacfix_kRcCdfPtr[AR_ORDER];
+
+/* pointers to AR representation levels tables */
+extern const int16_t* WebRtcIsacfix_kRcLevPtr[AR_ORDER];
+
+/******************** GAIN Coefficient Tables ***********************/
+/* cdf for Gain coefficient */
+extern const uint16_t WebRtcIsacfix_kGainCdf[19];
+
+/* representation levels for quantized Gain coefficient */
+extern const int32_t WebRtcIsacfix_kGain2Lev[18];
+
+/* squared quantization boundary levels for Gain coefficient */
+extern const int32_t WebRtcIsacfix_kGain2Bound[19];
+
+/* pointer to Gain cdf table */
+extern const uint16_t* WebRtcIsacfix_kGainPtr[1];
+
+/* Gain initial index for gain quantizer and cdf table search */
+extern const uint16_t WebRtcIsacfix_kGainInitInd[1];
+
+/************************* Cosine Tables ****************************/
+/* Cosine table */
+extern const int16_t WebRtcIsacfix_kCos[6][60];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_ \
+ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/structs.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
new file mode 100644
index 0000000000..3044d5176b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * structs.h
+ *
+ * This header file contains all the structs used in the ISAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/* Bitstream struct for decoder */
+typedef struct Bitstreamstruct_dec {
+ uint16_t stream[INTERNAL_STREAM_SIZE_W16]; /* Array bytestream to decode */
+ uint32_t W_upper; /* Upper boundary of interval W */
+ uint32_t streamval;
+ uint16_t stream_index; /* Index to the current position in bytestream */
+ int16_t full; /* 0 - first byte in memory filled, second empty*/
+ /* 1 - both bytes are empty (we just filled the previous memory */
+
+ size_t stream_size; /* The size of stream in bytes. */
+} Bitstr_dec;
+
+/* Bitstream struct for encoder */
+typedef struct Bitstreamstruct_enc {
+ uint16_t
+ stream[STREAM_MAXW16_60MS]; /* Vector for adding encoded bytestream */
+ uint32_t W_upper; /* Upper boundary of interval W */
+ uint32_t streamval;
+ uint16_t stream_index; /* Index to the current position in bytestream */
+ int16_t full; /* 0 - first byte in memory filled, second empty*/
+ /* 1 - both bytes are empty (we just filled the previous memory */
+
+} Bitstr_enc;
+
+typedef struct {
+ int16_t DataBufferLoQ0[WINLEN];
+ int16_t DataBufferHiQ0[WINLEN];
+
+ int32_t CorrBufLoQQ[ORDERLO + 1];
+ int32_t CorrBufHiQQ[ORDERHI + 1];
+
+ int16_t CorrBufLoQdom[ORDERLO + 1];
+ int16_t CorrBufHiQdom[ORDERHI + 1];
+
+ int32_t PreStateLoGQ15[ORDERLO + 1];
+ int32_t PreStateHiGQ15[ORDERHI + 1];
+
+ uint32_t OldEnergy;
+
+} MaskFiltstr_enc;
+
+typedef struct {
+ int16_t PostStateLoGQ0[ORDERLO + 1];
+ int16_t PostStateHiGQ0[ORDERHI + 1];
+
+ uint32_t OldEnergy;
+
+} MaskFiltstr_dec;
+
+typedef struct {
+ // state vectors for each of the two analysis filters
+
+ int32_t INSTAT1_fix[2 * (QORDER - 1)];
+ int32_t INSTAT2_fix[2 * (QORDER - 1)];
+ int16_t INLABUF1_fix[QLOOKAHEAD];
+ int16_t INLABUF2_fix[QLOOKAHEAD];
+
+ /* High pass filter */
+ int32_t HPstates_fix[HPORDER];
+
+} PreFiltBankstr;
+
+typedef struct {
+ // state vectors for each of the two analysis filters
+ int32_t STATE_0_LOWER_fix[2 * POSTQORDER];
+ int32_t STATE_0_UPPER_fix[2 * POSTQORDER];
+
+ /* High pass filter */
+
+ int32_t HPstates1_fix[HPORDER];
+ int32_t HPstates2_fix[HPORDER];
+
+} PostFiltBankstr;
+
+typedef struct {
+ /* data buffer for pitch filter */
+ int16_t ubufQQ[PITCH_BUFFSIZE];
+
+ /* low pass state vector */
+ int16_t ystateQQ[PITCH_DAMPORDER];
+
+ /* old lag and gain */
+ int16_t oldlagQ7;
+ int16_t oldgainQ12;
+
+} PitchFiltstr;
+
+typedef struct {
+ // for inital estimator
+ int16_t dec_buffer16[PITCH_CORR_LEN2 + PITCH_CORR_STEP2 + PITCH_MAX_LAG / 2 -
+ PITCH_FRAME_LEN / 2 + 2];
+ int32_t decimator_state32[2 * ALLPASSSECTIONS + 1];
+ int16_t inbuf[QLOOKAHEAD];
+
+ PitchFiltstr PFstr_wght;
+ PitchFiltstr PFstr;
+
+} PitchAnalysisStruct;
+
+typedef struct {
+ /* Parameters used in PLC to avoid re-computation */
+
+ /* --- residual signals --- */
+ int16_t prevPitchInvIn[FRAMESAMPLES / 2];
+ int16_t prevPitchInvOut[PITCH_MAX_LAG + 10]; // [FRAMESAMPLES/2]; save 90
+ int32_t prevHP[PITCH_MAX_LAG + 10]; // [FRAMESAMPLES/2]; save 90
+
+ int16_t decayCoeffPriodic; /* how much to supress a sample */
+ int16_t decayCoeffNoise;
+ int16_t used; /* if PLC is used */
+
+ int16_t* lastPitchLP; // [FRAMESAMPLES/2]; saved 240;
+
+ /* --- LPC side info --- */
+ int16_t lofilt_coefQ15[ORDERLO];
+ int16_t hifilt_coefQ15[ORDERHI];
+ int32_t gain_lo_hiQ17[2];
+
+ /* --- LTP side info --- */
+ int16_t AvgPitchGain_Q12;
+ int16_t lastPitchGain_Q12;
+ int16_t lastPitchLag_Q7;
+
+ /* --- Add-overlap in recovery packet --- */
+ int16_t overlapLP[RECOVERY_OVERLAP]; // [FRAMESAMPLES/2]; saved 160
+
+ int16_t pitchCycles;
+ int16_t A;
+ int16_t B;
+ size_t pitchIndex;
+ size_t stretchLag;
+ int16_t* prevPitchLP; // [ FRAMESAMPLES/2 ]; saved 240
+ int16_t seed;
+
+ int16_t std;
+} PLCstr;
+
+/* Have instance of struct together with other iSAC structs */
+typedef struct {
+ int16_t prevFrameSizeMs; /* Previous frame size (in ms) */
+ uint16_t prevRtpNumber; /* Previous RTP timestamp from received packet */
+ /* (in samples relative beginning) */
+ uint32_t prevSendTime; /* Send time for previous packet, from RTP header */
+ uint32_t prevArrivalTime; /* Arrival time for previous packet (in ms using
+ timeGetTime()) */
+ uint16_t prevRtpRate; /* rate of previous packet, derived from RTP timestamps
+ (in bits/s) */
+ uint32_t lastUpdate; /* Time since the last update of the Bottle Neck estimate
+ (in samples) */
+ uint32_t lastReduction; /* Time sinse the last reduction (in samples) */
+ int32_t countUpdates; /* How many times the estimate was update in the
+ beginning */
+
+ /* The estimated bottle neck rate from there to here (in bits/s) */
+ uint32_t recBw;
+ uint32_t recBwInv;
+ uint32_t recBwAvg;
+ uint32_t recBwAvgQ;
+
+ uint32_t minBwInv;
+ uint32_t maxBwInv;
+
+ /* The estimated mean absolute jitter value, as seen on this side (in ms) */
+ int32_t recJitter;
+ int32_t recJitterShortTerm;
+ int32_t recJitterShortTermAbs;
+ int32_t recMaxDelay;
+ int32_t recMaxDelayAvgQ;
+
+ int16_t recHeaderRate; /* (assumed) bitrate for headers (bps) */
+
+ uint32_t sendBwAvg; /* The estimated bottle neck rate from here to there (in
+ bits/s) */
+ int32_t sendMaxDelayAvg; /* The estimated mean absolute jitter value, as seen
+ on the other siee (in ms) */
+
+ int16_t countRecPkts; /* number of packets received since last update */
+ int16_t highSpeedRec; /* flag for marking that a high speed network has been
+ detected downstream */
+
+ /* number of consecutive pkts sent during which the bwe estimate has
+ remained at a value greater than the downstream threshold for determining
+ highspeed network */
+ int16_t countHighSpeedRec;
+
+ /* flag indicating bwe should not adjust down immediately for very late pckts
+ */
+ int16_t inWaitPeriod;
+
+ /* variable holding the time of the start of a window of time when
+ bwe should not adjust down immediately for very late pckts */
+ uint32_t startWaitPeriod;
+
+ /* number of consecutive pkts sent during which the bwe estimate has
+ remained at a value greater than the upstream threshold for determining
+ highspeed network */
+ int16_t countHighSpeedSent;
+
+ /* flag indicated the desired number of packets over threshold rate have been
+ sent and bwe will assume the connection is over broadband network */
+ int16_t highSpeedSend;
+
+ IsacBandwidthInfo external_bw_info;
+} BwEstimatorstr;
+
+typedef struct {
+ /* boolean, flags if previous packet exceeded B.N. */
+ int16_t PrevExceed;
+ /* ms */
+ int16_t ExceedAgo;
+ /* packets left to send in current burst */
+ int16_t BurstCounter;
+ /* packets */
+ int16_t InitCounter;
+ /* ms remaining in buffer when next packet will be sent */
+ int16_t StillBuffered;
+
+} RateModel;
+
+/* The following strutc is used to store data from encoding, to make it
+ fast and easy to construct a new bitstream with a different Bandwidth
+ estimate. All values (except framelength and minBytes) is double size to
+ handle 60 ms of data.
+*/
+typedef struct {
+ /* Used to keep track of if it is first or second part of 60 msec packet */
+ int startIdx;
+
+ /* Frame length in samples */
+ int16_t framelength;
+
+ /* Pitch Gain */
+ int16_t pitchGain_index[2];
+
+ /* Pitch Lag */
+ int32_t meanGain[2];
+ int16_t pitchIndex[PITCH_SUBFRAMES * 2];
+
+ /* LPC */
+ int32_t LPCcoeffs_g[12 * 2]; /* KLT_ORDER_GAIN = 12 */
+ int16_t LPCindex_s[108 * 2]; /* KLT_ORDER_SHAPE = 108 */
+ int16_t LPCindex_g[12 * 2]; /* KLT_ORDER_GAIN = 12 */
+
+ /* Encode Spec */
+ int16_t fre[FRAMESAMPLES];
+ int16_t fim[FRAMESAMPLES];
+ int16_t AvgPitchGain[2];
+
+ /* Used in adaptive mode only */
+ int minBytes;
+
+} IsacSaveEncoderData;
+
+typedef struct {
+ Bitstr_enc bitstr_obj;
+ MaskFiltstr_enc maskfiltstr_obj;
+ PreFiltBankstr prefiltbankstr_obj;
+ PitchFiltstr pitchfiltstr_obj;
+ PitchAnalysisStruct pitchanalysisstr_obj;
+ RateModel rate_data_obj;
+
+ int16_t buffer_index;
+ int16_t current_framesamples;
+
+ int16_t data_buffer_fix[FRAMESAMPLES]; // the size was MAX_FRAMESAMPLES
+
+ int16_t frame_nb;
+ int16_t BottleNeck;
+ int16_t MaxDelay;
+ int16_t new_framelength;
+ int16_t s2nr;
+ uint16_t MaxBits;
+
+ int16_t bitstr_seed;
+
+ IsacSaveEncoderData* SaveEnc_ptr;
+ int16_t payloadLimitBytes30; /* Maximum allowed number of bits for a 30 msec
+ packet */
+ int16_t payloadLimitBytes60; /* Maximum allowed number of bits for a 30 msec
+ packet */
+ int16_t maxPayloadBytes; /* Maximum allowed number of bits for both 30 and 60
+ msec packet */
+ int16_t maxRateInBytes; /* Maximum allowed rate in bytes per 30 msec packet */
+ int16_t enforceFrameSize; /* If set iSAC will never change packet size */
+
+} IsacFixEncoderInstance;
+
+typedef struct {
+ Bitstr_dec bitstr_obj;
+ MaskFiltstr_dec maskfiltstr_obj;
+ PostFiltBankstr postfiltbankstr_obj;
+ PitchFiltstr pitchfiltstr_obj;
+ PLCstr plcstr_obj; /* TS; for packet loss concealment */
+} IsacFixDecoderInstance;
+
+typedef struct {
+ IsacFixEncoderInstance ISACenc_obj;
+ IsacFixDecoderInstance ISACdec_obj;
+ BwEstimatorstr bwestimator_obj;
+ int16_t CodingMode; /* 0 = adaptive; 1 = instantaneous */
+ int16_t errorcode;
+ int16_t initflag; /* 0 = nothing initiated; 1 = encoder or decoder */
+ /* not initiated; 2 = all initiated */
+} ISACFIX_SubStruct;
+
+typedef struct {
+ int32_t lpcGains[12]; /* 6 lower-band & 6 upper-band we may need to double it
+ for 60*/
+ /* */
+ uint32_t W_upper; /* Upper boundary of interval W */
+ uint32_t streamval;
+ uint16_t stream_index; /* Index to the current position in bytestream */
+ int16_t full; /* 0 - first byte in memory filled, second empty*/
+ /* 1 - both bytes are empty (we just filled the previous memory */
+ uint16_t beforeLastWord;
+ uint16_t lastWord;
+} transcode_obj;
+
+// Bitstr_enc myBitStr;
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform.c
new file mode 100644
index 0000000000..80b244b5f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * WebRtcIsacfix_kTransform.c
+ *
+ * Transform functions
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/third_party/fft/fft.h"
+
+/* Tables are defined in transform_tables.c file or ARM assembly files. */
+/* Cosine table 1 in Q14 */
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+/* Sine table 1 in Q14 */
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+/* Sine table 2 in Q14 */
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+void WebRtcIsacfix_Time2SpecC(int16_t *inre1Q9,
+ int16_t *inre2Q9,
+ int16_t *outreQ7,
+ int16_t *outimQ7)
+{
+
+ int k;
+ int32_t tmpreQ16[FRAMESAMPLES/2], tmpimQ16[FRAMESAMPLES/2];
+ int16_t tmp1rQ14, tmp1iQ14;
+ int32_t xrQ16, xiQ16, yrQ16, yiQ16;
+ int32_t v1Q16, v2Q16;
+ int16_t factQ19, sh;
+
+ /* Multiply with complex exponentials and combine into one complex vector */
+ factQ19 = 16921; // 0.5/sqrt(240) in Q19 is round(.5/sqrt(240)*(2^19)) = 16921
+ for (k = 0; k < FRAMESAMPLES/2; k++) {
+ tmp1rQ14 = WebRtcIsacfix_kCosTab1[k];
+ tmp1iQ14 = WebRtcIsacfix_kSinTab1[k];
+ xrQ16 = (tmp1rQ14 * inre1Q9[k] + tmp1iQ14 * inre2Q9[k]) >> 7;
+ xiQ16 = (tmp1rQ14 * inre2Q9[k] - tmp1iQ14 * inre1Q9[k]) >> 7;
+ // Q-domains below: (Q16*Q19>>16)>>3 = Q16
+ tmpreQ16[k] = (WEBRTC_SPL_MUL_16_32_RSFT16(factQ19, xrQ16) + 4) >> 3;
+ tmpimQ16[k] = (WEBRTC_SPL_MUL_16_32_RSFT16(factQ19, xiQ16) + 4) >> 3;
+ }
+
+
+ xrQ16 = WebRtcSpl_MaxAbsValueW32(tmpreQ16, FRAMESAMPLES/2);
+ yrQ16 = WebRtcSpl_MaxAbsValueW32(tmpimQ16, FRAMESAMPLES/2);
+ if (yrQ16>xrQ16) {
+ xrQ16 = yrQ16;
+ }
+
+ sh = WebRtcSpl_NormW32(xrQ16);
+ sh = sh-24; //if sh becomes >=0, then we should shift sh steps to the left, and the domain will become Q(16+sh)
+ //if sh becomes <0, then we should shift -sh steps to the right, and the domain will become Q(16+sh)
+
+ //"Fastest" vectors
+ if (sh>=0) {
+ for (k=0; k<FRAMESAMPLES/2; k++) {
+ inre1Q9[k] = (int16_t)(tmpreQ16[k] << sh); // Q(16+sh)
+ inre2Q9[k] = (int16_t)(tmpimQ16[k] << sh); // Q(16+sh)
+ }
+ } else {
+ int32_t round = 1 << (-sh - 1);
+ for (k=0; k<FRAMESAMPLES/2; k++) {
+ inre1Q9[k] = (int16_t)((tmpreQ16[k] + round) >> -sh); // Q(16+sh)
+ inre2Q9[k] = (int16_t)((tmpimQ16[k] + round) >> -sh); // Q(16+sh)
+ }
+ }
+
+ /* Get DFT */
+ WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
+
+ //"Fastest" vectors
+ if (sh>=0) {
+ for (k=0; k<FRAMESAMPLES/2; k++) {
+ tmpreQ16[k] = inre1Q9[k] >> sh; // Q(16+sh) -> Q16
+ tmpimQ16[k] = inre2Q9[k] >> sh; // Q(16+sh) -> Q16
+ }
+ } else {
+ for (k=0; k<FRAMESAMPLES/2; k++) {
+ tmpreQ16[k] = inre1Q9[k] << -sh; // Q(16+sh) -> Q16
+ tmpimQ16[k] = inre2Q9[k] << -sh; // Q(16+sh) -> Q16
+ }
+ }
+
+
+ /* Use symmetry to separate into two complex vectors and center frames in time around zero */
+ for (k = 0; k < FRAMESAMPLES/4; k++) {
+ xrQ16 = tmpreQ16[k] + tmpreQ16[FRAMESAMPLES/2 - 1 - k];
+ yiQ16 = -tmpreQ16[k] + tmpreQ16[FRAMESAMPLES/2 - 1 - k];
+ xiQ16 = tmpimQ16[k] - tmpimQ16[FRAMESAMPLES/2 - 1 - k];
+ yrQ16 = tmpimQ16[k] + tmpimQ16[FRAMESAMPLES/2 - 1 - k];
+ tmp1rQ14 = -WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 1 - k];
+ tmp1iQ14 = WebRtcIsacfix_kSinTab2[k];
+ v1Q16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, xrQ16) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, xiQ16);
+ v2Q16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, xrQ16) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, xiQ16);
+ outreQ7[k] = (int16_t)(v1Q16 >> 9);
+ outimQ7[k] = (int16_t)(v2Q16 >> 9);
+ v1Q16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, yrQ16) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, yiQ16);
+ v2Q16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, yrQ16) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, yiQ16);
+ // CalcLrIntQ(v1Q16, 9);
+ outreQ7[FRAMESAMPLES / 2 - 1 - k] = (int16_t)(v1Q16 >> 9);
+ // CalcLrIntQ(v2Q16, 9);
+ outimQ7[FRAMESAMPLES / 2 - 1 - k] = (int16_t)(v2Q16 >> 9);
+
+ }
+}
+
+
+void WebRtcIsacfix_Spec2TimeC(int16_t *inreQ7, int16_t *inimQ7, int32_t *outre1Q16, int32_t *outre2Q16)
+{
+
+ int k;
+ int16_t tmp1rQ14, tmp1iQ14;
+ int32_t xrQ16, xiQ16, yrQ16, yiQ16;
+ int32_t tmpInRe, tmpInIm, tmpInRe2, tmpInIm2;
+ int16_t factQ11;
+ int16_t sh;
+
+ for (k = 0; k < FRAMESAMPLES/4; k++) {
+ /* Move zero in time to beginning of frames */
+ tmp1rQ14 = -WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 1 - k];
+ tmp1iQ14 = WebRtcIsacfix_kSinTab2[k];
+
+ tmpInRe = inreQ7[k] * (1 << 9); // Q7 -> Q16
+ tmpInIm = inimQ7[k] * (1 << 9); // Q7 -> Q16
+ tmpInRe2 = inreQ7[FRAMESAMPLES / 2 - 1 - k] * (1 << 9); // Q7 -> Q16
+ tmpInIm2 = inimQ7[FRAMESAMPLES / 2 - 1 - k] * (1 << 9); // Q7 -> Q16
+
+ xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInRe) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInIm);
+ xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInIm) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInRe);
+ yrQ16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInIm2) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInRe2);
+ yiQ16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInRe2) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInIm2);
+
+ /* Combine into one vector, z = x + j * y */
+ outre1Q16[k] = xrQ16 - yiQ16;
+ outre1Q16[FRAMESAMPLES/2 - 1 - k] = xrQ16 + yiQ16;
+ outre2Q16[k] = xiQ16 + yrQ16;
+ outre2Q16[FRAMESAMPLES/2 - 1 - k] = -xiQ16 + yrQ16;
+ }
+
+ /* Get IDFT */
+ tmpInRe = WebRtcSpl_MaxAbsValueW32(outre1Q16, 240);
+ tmpInIm = WebRtcSpl_MaxAbsValueW32(outre2Q16, 240);
+ if (tmpInIm>tmpInRe) {
+ tmpInRe = tmpInIm;
+ }
+
+ sh = WebRtcSpl_NormW32(tmpInRe);
+ sh = sh-24; //if sh becomes >=0, then we should shift sh steps to the left, and the domain will become Q(16+sh)
+ //if sh becomes <0, then we should shift -sh steps to the right, and the domain will become Q(16+sh)
+
+ //"Fastest" vectors
+ if (sh>=0) {
+ for (k=0; k<240; k++) {
+ inreQ7[k] = (int16_t)(outre1Q16[k] << sh); // Q(16+sh)
+ inimQ7[k] = (int16_t)(outre2Q16[k] << sh); // Q(16+sh)
+ }
+ } else {
+ int32_t round = 1 << (-sh - 1);
+ for (k=0; k<240; k++) {
+ inreQ7[k] = (int16_t)((outre1Q16[k] + round) >> -sh); // Q(16+sh)
+ inimQ7[k] = (int16_t)((outre2Q16[k] + round) >> -sh); // Q(16+sh)
+ }
+ }
+
+ WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
+
+ //"Fastest" vectors
+ if (sh>=0) {
+ for (k=0; k<240; k++) {
+ outre1Q16[k] = inreQ7[k] >> sh; // Q(16+sh) -> Q16
+ outre2Q16[k] = inimQ7[k] >> sh; // Q(16+sh) -> Q16
+ }
+ } else {
+ for (k=0; k<240; k++) {
+ outre1Q16[k] = inreQ7[k] * (1 << -sh); // Q(16+sh) -> Q16
+ outre2Q16[k] = inimQ7[k] * (1 << -sh); // Q(16+sh) -> Q16
+ }
+ }
+
+ /* Divide through by the normalizing constant: */
+ /* scale all values with 1/240, i.e. with 273 in Q16 */
+ /* 273/65536 ~= 0.0041656 */
+ /* 1/240 ~= 0.0041666 */
+ for (k=0; k<240; k++) {
+ outre1Q16[k] = WEBRTC_SPL_MUL_16_32_RSFT16(273, outre1Q16[k]);
+ outre2Q16[k] = WEBRTC_SPL_MUL_16_32_RSFT16(273, outre2Q16[k]);
+ }
+
+ /* Demodulate and separate */
+ factQ11 = 31727; // sqrt(240) in Q11 is round(15.49193338482967*2048) = 31727
+ for (k = 0; k < FRAMESAMPLES/2; k++) {
+ tmp1rQ14 = WebRtcIsacfix_kCosTab1[k];
+ tmp1iQ14 = WebRtcIsacfix_kSinTab1[k];
+ xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, outre1Q16[k]) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, outre2Q16[k]);
+ xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, outre2Q16[k]) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, outre1Q16[k]);
+ xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT11(factQ11, xrQ16);
+ xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT11(factQ11, xiQ16);
+ outre2Q16[k] = xiQ16;
+ outre1Q16[k] = xrQ16;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_mips.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
new file mode 100644
index 0000000000..a87b3b54f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// The tables are defined in transform_tables.c file.
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4];
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+// MIPS DSPr2 version of the WebRtcIsacfix_Time2Spec function
+// is not bit-exact with the C version.
+// The accuracy of the MIPS DSPr2 version is same or better.
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outreQ7,
+ int16_t* outimQ7) {
+ int k = FRAMESAMPLES / 2;
+ int32_t tmpreQ16[FRAMESAMPLES / 2], tmpimQ16[FRAMESAMPLES / 2];
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
+ int32_t inre1, inre2, tmpre, tmpim, factor, max, max1;
+ int16_t* cosptr;
+ int16_t* sinptr;
+
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "addiu %[tmpre], %[tmpreQ16], 0 \n\t"
+ "addiu %[tmpim], %[tmpimQ16], 0 \n\t"
+ "addiu %[factor], $zero, 16921 \n\t"
+ "mul %[max], $zero, $zero \n\t"
+ // Multiply with complex exponentials and combine into one complex vector.
+ // Also, calculate the maximal absolute value in the same loop.
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "lwl %[r0], 0(%[inre1]) \n\t"
+ "lwl %[r2], 0(%[cosptr]) \n\t"
+ "lwl %[r3], 0(%[sinptr]) \n\t"
+ "lwl %[r1], 0(%[inre2]) \n\t"
+ "lwr %[r0], 0(%[inre1]) \n\t"
+ "lwr %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r3], 0(%[sinptr]) \n\t"
+ "lwr %[r1], 0(%[inre2]) \n\t"
+ "muleq_s.w.phr %[r4], %[r2], %[r0] \n\t"
+ "muleq_s.w.phr %[r5], %[r3], %[r0] \n\t"
+ "muleq_s.w.phr %[r6], %[r3], %[r1] \n\t"
+ "muleq_s.w.phr %[r7], %[r2], %[r1] \n\t"
+ "muleq_s.w.phl %[r8], %[r2], %[r0] \n\t"
+ "muleq_s.w.phl %[r0], %[r3], %[r0] \n\t"
+ "muleq_s.w.phl %[r3], %[r3], %[r1] \n\t"
+ "muleq_s.w.phl %[r1], %[r2], %[r1] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "subu %[r5], %[r7], %[r5] \n\t"
+ "sra %[r4], %[r4], 8 \n\t"
+ "sra %[r5], %[r5], 8 \n\t"
+ "mult $ac0, %[factor], %[r4] \n\t"
+ "mult $ac1, %[factor], %[r5] \n\t"
+ "addu %[r3], %[r8], %[r3] \n\t"
+ "subu %[r0], %[r1], %[r0] \n\t"
+ "sra %[r3], %[r3], 8 \n\t"
+ "sra %[r0], %[r0], 8 \n\t"
+ "mult $ac2, %[factor], %[r3] \n\t"
+ "mult $ac3, %[factor], %[r0] \n\t"
+ "extr_r.w %[r4], $ac0, 16 \n\t"
+ "extr_r.w %[r5], $ac1, 16 \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "addiu %[inre2], %[inre2], 4 \n\t"
+ "extr_r.w %[r6], $ac2, 16 \n\t"
+ "extr_r.w %[r7], $ac3, 16 \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "shra_r.w %[r4], %[r4], 3 \n\t"
+ "shra_r.w %[r5], %[r5], 3 \n\t"
+ "sw %[r4], 0(%[tmpre]) \n\t"
+ "absq_s.w %[r4], %[r4] \n\t"
+ "sw %[r5], 0(%[tmpim]) \n\t"
+ "absq_s.w %[r5], %[r5] \n\t"
+ "shra_r.w %[r6], %[r6], 3 \n\t"
+ "shra_r.w %[r7], %[r7], 3 \n\t"
+ "sw %[r6], 4(%[tmpre]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "sw %[r7], 4(%[tmpim]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r0], %[r4], %[r5] \n\t"
+ "movn %[r4], %[r5], %[r0] \n\t"
+ "slt %[r1], %[r6], %[r7] \n\t"
+ "movn %[r6], %[r7], %[r1] \n\t"
+ "slt %[r0], %[max], %[r4] \n\t"
+ "movn %[max], %[r4], %[r0] \n\t"
+ "slt %[r1], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r1] \n\t"
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 8 \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "mul %[r4], %[r0], %[r2] \n\t"
+ "mul %[r5], %[r1], %[r3] \n\t"
+ "mul %[r0], %[r0], %[r3] \n\t"
+ "mul %[r2], %[r1], %[r2] \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "addu %[r1], %[r4], %[r5] \n\t"
+ "sra %[r1], %[r1], 7 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r1], %[factor], %[r1] \n\t"
+ "mul %[r3], %[factor], %[r3] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "sra %[r0], %[r0], 7 \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "mul %[r0], %[factor], %[r0] \n\t"
+ "mul %[r2], %[factor], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r3], %[r1] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r1], %[r1], 3 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r1], %[r1], 4 \n\t"
+ "sra %[r1], %[r1], 3 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sw %[r1], 0(%[tmpre]) \n\t"
+ "addiu %[tmpre], %[tmpre], 4 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "absq_s.w %[r1], %[r1] \n\t"
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "negu %[r4], %[r1] \n\t"
+ "slt %[r3], %[r1], $zero \n\t"
+ "movn %[r1], %[r4], %[r3] \n\t"
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 3 \n\t"
+ "sw %[r0], 0(%[tmpim]) \n\t"
+ "absq_s.w %[r0], %[r0] \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 4 \n\t"
+ "sra %[r0], %[r0], 3 \n\t"
+ "sw %[r0], 0(%[tmpim]) \n\t"
+ "negu %[r2], %[r0] \n\t"
+ "slt %[r3], %[r0], $zero \n\t"
+ "movn %[r0], %[r2], %[r3] \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "slt %[r2], %[max], %[r1] \n\t"
+ "movn %[max], %[r1], %[r2] \n\t"
+ "slt %[r2], %[max], %[r0] \n\t"
+ "movn %[max], %[r0], %[r2] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ // Calculate WebRtcSpl_NormW32(max).
+ // If max gets value >=0, we should shift max steps to the left, and the
+ // domain will be Q(16+shift). If max gets value <0, we should shift -max
+ // steps to the right, and the domain will be Q(16+max)
+ "clz %[max], %[max] \n\t"
+ "addiu %[max], %[max], -25 \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [inre1] "=&r" (inre1), [inre2] "=&r" (inre2),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [tmpre] "=&r" (tmpre),
+ [tmpim] "=&r" (tmpim), [max] "=&r" (max), [factor] "=&r" (factor),
+#if defined(MIPS_DSP_R2_LE)
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8),
+#endif // #if defined(MIPS_DSP_R2_LE)
+ [r5] "=&r" (r5)
+ : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+ [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+ [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+ : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+ , "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ );
+
+ // "Fastest" vectors
+ k = FRAMESAMPLES / 4;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmpre], %[tmpreQ16], 0 \n\t"
+ "addiu %[tmpim], %[tmpimQ16], 0 \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "blez %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[tmpre]) \n\t"
+ "lw %[r1], 0(%[tmpim]) \n\t"
+ "lw %[r2], 4(%[tmpre]) \n\t"
+ "lw %[r3], 4(%[tmpim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "sllv %[r0], %[r0], %[max] \n\t"
+ "sllv %[r1], %[r1], %[max] \n\t"
+ "sllv %[r2], %[r2], %[max] \n\t"
+ "sllv %[r3], %[r3], %[max] \n\t"
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "addiu %[tmpim], %[tmpim], 8 \n\t"
+ "sh %[r0], 0(%[inre1]) \n\t"
+ "sh %[r1], 0(%[inre2]) \n\t"
+ "sh %[r2], 2(%[inre1]) \n\t"
+ "sh %[r3], 2(%[inre2]) \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[inre2], %[inre2], 4 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[max1], -1 \n\t"
+ "addiu %[r5], $zero, 1 \n\t"
+ "sllv %[r4], %[r5], %[r4] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "3: \n\t"
+ "lw %[r0], 0(%[tmpre]) \n\t"
+ "lw %[r1], 0(%[tmpim]) \n\t"
+ "lw %[r2], 4(%[tmpre]) \n\t"
+ "lw %[r3], 4(%[tmpim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shrav_r.w %[r0], %[r0], %[max1] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max1] \n\t"
+ "shrav_r.w %[r2], %[r2], %[max1] \n\t"
+ "shrav_r.w %[r3], %[r3], %[max1] \n\t"
+#else // #if !defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "addu %[r2], %[r2], %[r4] \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "srav %[r0], %[r0], %[max1] \n\t"
+ "srav %[r1], %[r1], %[max1] \n\t"
+ "srav %[r2], %[r2], %[max1] \n\t"
+ "srav %[r3], %[r3], %[max1] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "addiu %[tmpim], %[tmpim], 8 \n\t"
+ "sh %[r0], 0(%[inre1]) \n\t"
+ "sh %[r1], 0(%[inre2]) \n\t"
+ "sh %[r2], 2(%[inre1]) \n\t"
+ "sh %[r3], 2(%[inre2]) \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "bgtz %[k], 3b \n\t"
+ " addiu %[inre2], %[inre2], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [tmpre] "=&r" (tmpre), [tmpim] "=&r" (tmpim), [inre1] "=&r" (inre1),
+ [inre2] "=&r" (inre2), [k] "+r" (k), [max1] "=&r" (max1),
+#if !defined(MIPS_DSP_R1_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+ [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9), [max] "r" (max)
+ : "memory"
+ );
+
+ // Get DFT
+ WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
+
+ // "Fastest" vectors and
+ // Use symmetry to separate into two complex vectors
+ // and center frames in time around zero
+ // merged into one loop
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+ k = FRAMESAMPLES / 4;
+ factor = FRAMESAMPLES - 2; // offset for FRAMESAMPLES / 2 - 1 array member
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "addiu %[tmpre], %[outreQ7], 0 \n\t"
+ "addiu %[tmpim], %[outimQ7], 0 \n\t"
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addu %[r4], %[inre1], %[offset] \n\t"
+ "addu %[r5], %[inre2], %[offset] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r2], %[offset](%[inre1]) \n\t"
+ "lhx %[r3], %[offset](%[inre2]) \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r2], 0(%[r4]) \n\t"
+ "lh %[r3], 0(%[r5]) \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "srav %[r0], %[r0], %[max] \n\t"
+ "srav %[r1], %[r1], %[max] \n\t"
+ "srav %[r2], %[r2], %[max] \n\t"
+ "srav %[r3], %[r3], %[max] \n\t"
+ "addu %[r4], %[r0], %[r2] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "subu %[r2], %[r1], %[r3] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "lh %[r3], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "andi %[r6], %[r4], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r7], %[r3], %[r6] \n\t"
+ "mul %[r8], %[r3], %[r4] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r4], %[r5], %[r4] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r4], %[r4], 2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "andi %[r6], %[r2], 0xFFFF \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[r7], %[r5], %[r6] \n\t"
+ "mul %[r9], %[r5], %[r2] \n\t"
+ "mul %[r6], %[r3], %[r6] \n\t"
+ "mul %[r2], %[r3], %[r2] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r7], %[r9] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r2], %[r2], 2 \n\t"
+ "addu %[r2], %[r6], %[r2] \n\t"
+ "subu %[r8], %[r8], %[r9] \n\t"
+ "sra %[r8], %[r8], 9 \n\t"
+ "addu %[r2], %[r4], %[r2] \n\t"
+ "sra %[r2], %[r2], 9 \n\t"
+ "sh %[r8], 0(%[tmpre]) \n\t"
+ "sh %[r2], 0(%[tmpim]) \n\t"
+
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "andi %[r6], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r7], %[r5], %[r4] \n\t"
+ "mul %[r9], %[r5], %[r1] \n\t"
+ "mul %[r4], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r3], %[r1] \n\t"
+ "mul %[r8], %[r3], %[r0] \n\t"
+ "mul %[r3], %[r3], %[r6] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r0], %[r5], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r9], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r4], %[r4], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r1], %[r1], 2 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r3] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r0], %[r0], 2 \n\t"
+ "addu %[r0], %[r0], %[r6] \n\t"
+ "addu %[r3], %[tmpre], %[offset] \n\t"
+ "addu %[r2], %[tmpim], %[offset] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "negu %[r9], %[r9] \n\t"
+ "sra %[r9], %[r9], 9 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "sh %[r9], 0(%[r3]) \n\t"
+ "sh %[r0], 0(%[r2]) \n\t"
+ "addiu %[tmpre], %[tmpre], 2 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 2 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addu %[r4], %[inre1], %[offset] \n\t"
+ "addu %[r5], %[inre2], %[offset] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r2], %[offset](%[inre1]) \n\t"
+ "lhx %[r3], %[offset](%[inre2]) \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r2], 0(%[r4]) \n\t"
+ "lh %[r3], 0(%[r5]) \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "sllv %[r2], %[r2], %[max1] \n\t"
+ "sllv %[r3], %[r3], %[max1] \n\t"
+ "addu %[r4], %[r0], %[r2] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "subu %[r2], %[r1], %[r3] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "lh %[r3], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "andi %[r6], %[r4], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r7], %[r3], %[r6] \n\t"
+ "mul %[r8], %[r3], %[r4] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r4], %[r5], %[r4] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r4], %[r4], 2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "andi %[r6], %[r2], 0xFFFF \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[r7], %[r5], %[r6] \n\t"
+ "mul %[r9], %[r5], %[r2] \n\t"
+ "mul %[r6], %[r3], %[r6] \n\t"
+ "mul %[r2], %[r3], %[r2] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r7], %[r9] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r2], %[r2], 2 \n\t"
+ "addu %[r2], %[r6], %[r2] \n\t"
+ "subu %[r8], %[r8], %[r9] \n\t"
+ "sra %[r8], %[r8], 9 \n\t"
+ "addu %[r2], %[r4], %[r2] \n\t"
+ "sra %[r2], %[r2], 9 \n\t"
+ "sh %[r8], 0(%[tmpre]) \n\t"
+ "sh %[r2], 0(%[tmpim]) \n\t"
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "andi %[r6], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r7], %[r5], %[r4] \n\t"
+ "mul %[r9], %[r5], %[r1] \n\t"
+ "mul %[r4], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r3], %[r1] \n\t"
+ "mul %[r8], %[r3], %[r0] \n\t"
+ "mul %[r3], %[r3], %[r6] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r0], %[r5], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r9], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r4], %[r4], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+#endif
+ "sll %[r1], %[r1], 2 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r3] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r0], %[r0], 2 \n\t"
+ "addu %[r0], %[r0], %[r6] \n\t"
+ "addu %[r3], %[tmpre], %[offset] \n\t"
+ "addu %[r2], %[tmpim], %[offset] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "negu %[r9], %[r9] \n\t"
+ "sra %[r9], %[r9], 9 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "sra %[r0], %[r0], 9 \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "sh %[r9], 0(%[r3]) \n\t"
+ "sh %[r0], 0(%[r2]) \n\t"
+ "addiu %[tmpre], %[tmpre], 2 \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[tmpim], %[tmpim], 2 \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [inre1] "=&r" (inre1), [inre2] "=&r" (inre2), [tmpre] "=&r" (tmpre),
+ [tmpim] "=&r" (tmpim), [offset] "+r" (factor), [k] "+r" (k),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1)
+ : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+ [outreQ7] "r" (outreQ7), [outimQ7] "r" (outimQ7),
+ [max] "r" (max), [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+ : "hi", "lo", "memory"
+ );
+}
+
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t *inreQ7,
+ int16_t *inimQ7,
+ int32_t *outre1Q16,
+ int32_t *outre2Q16) {
+ int k = FRAMESAMPLES / 4;
+ int16_t* inre;
+ int16_t* inim;
+ int32_t* outre1;
+ int32_t* outre2;
+ int16_t* cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+ int16_t* sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, max, max1;
+#if defined(MIPS_DSP_R1_LE)
+ int32_t offset = FRAMESAMPLES - 4;
+#else // #if defined(MIPS_DSP_R1_LE)
+ int32_t offset = FRAMESAMPLES - 2;
+#endif // #if defined(MIPS_DSP_R1_LE)
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim] , %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "mul %[max], $zero, $zero \n\t"
+ "1: \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ // Process two samples in one iteration avoiding left shift before
+ // multiplication. MaxAbsValueW32 function inlined into the loop.
+ "addu %[r8], %[inre], %[offset] \n\t"
+ "addu %[r9], %[inim], %[offset] \n\t"
+ "lwl %[r4], 0(%[r8]) \n\t"
+ "lwl %[r5], 0(%[r9]) \n\t"
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lwl %[r2], 0(%[cosptr]) \n\t"
+ "lwl %[r3], 0(%[sinptr]) \n\t"
+ "lwr %[r4], 0(%[r8]) \n\t"
+ "lwr %[r5], 0(%[r9]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lwr %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r3], 0(%[sinptr]) \n\t"
+ "packrl.ph %[r4], %[r4], %[r4] \n\t"
+ "packrl.ph %[r5], %[r5], %[r5] \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r2] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r3] \n\t"
+ "muleq_s.w.phr %[r8], %[r4], %[r2] \n\t"
+ "muleq_s.w.phr %[r9], %[r5], %[r3] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "subu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r6], %[r9] \n\t"
+ "sw %[r7], 0(%[outre1]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "sll %[r7], %[offset], 1 \n\t"
+ "addu %[r7], %[outre1], %[r7] \n\t"
+ "sw %[r6], 4(%[r7]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phl %[r6], %[r0], %[r2] \n\t"
+ "muleq_s.w.phl %[r7], %[r1], %[r3] \n\t"
+ "muleq_s.w.phl %[r8], %[r4], %[r2] \n\t"
+ "muleq_s.w.phl %[r9], %[r5], %[r3] \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "subu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r6], %[r9] \n\t"
+ "sw %[r7], 4(%[outre1]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "sll %[r7], %[offset], 1 \n\t"
+ "addu %[r7], %[outre1], %[r7] \n\t"
+ "sw %[r6], 0(%[r7]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phr %[r6], %[r1], %[r2] \n\t"
+ "muleq_s.w.phr %[r7], %[r0], %[r3] \n\t"
+ "muleq_s.w.phr %[r8], %[r5], %[r2] \n\t"
+ "muleq_s.w.phr %[r9], %[r4], %[r3] \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r9], %[r6] \n\t"
+ "negu %[r6], %[r6] \n\t"
+ "sw %[r7], 0(%[outre2]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "sll %[r7], %[offset], 1 \n\t"
+ "addu %[r7], %[outre2], %[r7] \n\t"
+ "sw %[r6], 4(%[r7]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phl %[r6], %[r1], %[r2] \n\t"
+ "muleq_s.w.phl %[r7], %[r0], %[r3] \n\t"
+ "muleq_s.w.phl %[r8], %[r5], %[r2] \n\t"
+ "muleq_s.w.phl %[r9], %[r4], %[r3] \n\t"
+ "addiu %[offset], %[offset], -8 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r9], %[r6] \n\t"
+ "negu %[r6], %[r6] \n\t"
+ "sw %[r7], 4(%[outre2]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "sll %[r7], %[offset], 1 \n\t"
+ "addu %[r7], %[outre2], %[r7] \n\t"
+ "sw %[r6], 0(%[r7]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "lh %[r4], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "mul %[r2], %[r0], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "mul %[r3], %[r1], %[r5] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "addu %[r8], %[inre], %[offset] \n\t"
+ "addu %[r9], %[inim], %[offset] \n\t"
+ "addiu %[r2], %[r2], 16 \n\t"
+ "sra %[r2], %[r2], 5 \n\t"
+ "addiu %[r0], %[r0], 16 \n\t"
+ "sra %[r0], %[r0], 5 \n\t"
+ "addiu %[r3], %[r3], 16 \n\t"
+ "sra %[r3], %[r3], 5 \n\t"
+ "lh %[r6], 0(%[r8]) \n\t"
+ "lh %[r7], 0(%[r9]) \n\t"
+ "addiu %[r1], %[r1], 16 \n\t"
+ "sra %[r1], %[r1], 5 \n\t"
+ "mul %[r8], %[r7], %[r4] \n\t"
+ "mul %[r7], %[r7], %[r5] \n\t"
+ "mul %[r9], %[r6], %[r4] \n\t"
+ "mul %[r6], %[r6], %[r5] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "subu %[r1], %[r1], %[r0] \n\t"
+ "sll %[r0], %[offset], 1 \n\t"
+ "addu %[r4], %[outre1], %[r0] \n\t"
+ "addu %[r5], %[outre2], %[r0] \n\t"
+ "addiu %[r8], %[r8], 16 \n\t"
+ "sra %[r8], %[r8], 5 \n\t"
+ "addiu %[r7], %[r7], 16 \n\t"
+ "sra %[r7], %[r7], 5 \n\t"
+ "addiu %[r6], %[r6], 16 \n\t"
+ "sra %[r6], %[r6], 5 \n\t"
+ "addiu %[r9], %[r9], 16 \n\t"
+ "sra %[r9], %[r9], 5 \n\t"
+ "addu %[r8], %[r8], %[r6] \n\t"
+ "negu %[r8], %[r8] \n\t"
+ "subu %[r7], %[r7], %[r9] \n\t"
+ "subu %[r6], %[r2], %[r7] \n\t"
+ "addu %[r0], %[r2], %[r7] \n\t"
+ "addu %[r3], %[r1], %[r8] \n\t"
+ "subu %[r1], %[r8], %[r1] \n\t"
+ "sw %[r6], 0(%[outre1]) \n\t"
+ "sw %[r0], 0(%[r4]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "sw %[r1], 0(%[r5]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ // Inlined WebRtcSpl_MaxAbsValueW32
+ "negu %[r5], %[r6] \n\t"
+ "slt %[r2], %[r6], $zero \n\t"
+ "movn %[r6], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r0] \n\t"
+ "slt %[r2], %[r0], $zero \n\t"
+ "movn %[r0], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r3] \n\t"
+ "slt %[r2], %[r3], $zero \n\t"
+ "movn %[r3], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r1] \n\t"
+ "slt %[r2], %[r1], $zero \n\t"
+ "movn %[r1], %[r5], %[r2] \n\t"
+ "slt %[r2], %[r6], %[r0] \n\t"
+ "slt %[r5], %[r3], %[r1] \n\t"
+ "movn %[r6], %[r0], %[r2] \n\t"
+ "movn %[r3], %[r1], %[r5] \n\t"
+ "slt %[r2], %[r6], %[r3] \n\t"
+ "movn %[r6], %[r3], %[r2] \n\t"
+ "slt %[r2], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r2] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "clz %[max], %[max] \n\t"
+ "addiu %[max], %[max], -25 \n\t"
+ ".set pop \n\t"
+ : [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+ [offset] "+r" (offset), [k] "+r" (k), [r0] "=&r" (r0),
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6),
+ [r7] "=&r" (r7), [r8] "=&r" (r8), [r9] "=&r" (r9),
+ [max] "=&r" (max)
+ : [inreQ7] "r" (inreQ7), [inimQ7] "r" (inimQ7),
+ [cosptr] "r" (cosptr), [sinptr] "r" (sinptr),
+ [outre1Q16] "r" (outre1Q16), [outre2Q16] "r" (outre2Q16)
+ : "hi", "lo", "memory"
+ );
+
+ // "Fastest" vectors
+ k = FRAMESAMPLES / 4;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim], %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[outre1]) \n\t"
+ "lw %[r1], 0(%[outre2]) \n\t"
+ "lw %[r2], 4(%[outre1]) \n\t"
+ "lw %[r3], 4(%[outre2]) \n\t"
+ "sllv %[r0], %[r0], %[max] \n\t"
+ "sllv %[r1], %[r1], %[max] \n\t"
+ "sllv %[r2], %[r2], %[max] \n\t"
+ "sllv %[r3], %[r3], %[max] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "addiu %[outre2], %[outre2], 8 \n\t"
+ "sh %[r0], 0(%[inre]) \n\t"
+ "sh %[r1], 0(%[inim]) \n\t"
+ "sh %[r2], 2(%[inre]) \n\t"
+ "sh %[r3], 2(%[inim]) \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[inim], %[inim], 4 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], $zero, 1 \n\t"
+ "addiu %[r5], %[max1], -1 \n\t"
+ "sllv %[r4], %[r4], %[r5] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "3: \n\t"
+ "lw %[r0], 0(%[outre1]) \n\t"
+ "lw %[r1], 0(%[outre2]) \n\t"
+ "lw %[r2], 4(%[outre1]) \n\t"
+ "lw %[r3], 4(%[outre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shrav_r.w %[r0], %[r0], %[max1] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max1] \n\t"
+ "shrav_r.w %[r2], %[r2], %[max1] \n\t"
+ "shrav_r.w %[r3], %[r3], %[max1] \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "addu %[r2], %[r2], %[r4] \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "srav %[r0], %[r0], %[max1] \n\t"
+ "srav %[r1], %[r1], %[max1] \n\t"
+ "srav %[r2], %[r2], %[max1] \n\t"
+ "srav %[r3], %[r3], %[max1] \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "addiu %[outre2], %[outre2], 8 \n\t"
+ "sh %[r0], 0(%[inre]) \n\t"
+ "sh %[r1], 0(%[inim]) \n\t"
+ "sh %[r2], 2(%[inre]) \n\t"
+ "sh %[r3], 2(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "bgtz %[k], 3b \n\t"
+ " addiu %[inim], %[inim], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [max1] "=&r" (max1), [r0] "=&r" (r0),
+ [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+#if !defined(MIPS_DSP_R1_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [max] "r" (max), [inreQ7] "r" (inreQ7),
+ [inimQ7] "r" (inimQ7), [outre1Q16] "r" (outre1Q16),
+ [outre2Q16] "r" (outre2Q16)
+ : "memory"
+ );
+
+ WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
+
+ // All the remaining processing is done inside a single loop to avoid
+ // unnecessary memory accesses. MIPS DSPr2 version processes two samples
+ // at a time.
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+ k = FRAMESAMPLES / 2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim], %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "addiu %[r4], $zero, 273 \n\t"
+ "addiu %[r5], $zero, 31727 \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max], %[max], 16 \n\t"
+ "replv.ph %[r4], %[r4] \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max], %[max], 1 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r4] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r4] \n\t"
+ "muleq_s.w.phl %[r0], %[r0], %[r4] \n\t"
+ "muleq_s.w.phl %[r1], %[r1], %[r4] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "shrav_r.w %[r6], %[r6], %[max] \n\t"
+ "shrav_r.w %[r7], %[r7], %[max] \n\t"
+ "mult $ac0, %[r2], %[r6] \n\t"
+ "mult $ac1, %[r3], %[r7] \n\t"
+ "mult $ac2, %[r2], %[r7] \n\t"
+ "mult $ac3, %[r3], %[r6] \n\t"
+ "lh %[r2], 2(%[cosptr]) \n\t"
+ "lh %[r3], 2(%[sinptr]) \n\t"
+ "extr_r.w %[r6], $ac0, 14 \n\t"
+ "extr_r.w %[r7], $ac1, 14 \n\t"
+ "extr_r.w %[r8], $ac2, 14 \n\t"
+ "extr_r.w %[r9], $ac3, 14 \n\t"
+ "shrav_r.w %[r0], %[r0], %[max] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max] \n\t"
+ "mult $ac0, %[r2], %[r0] \n\t"
+ "mult $ac1, %[r3], %[r1] \n\t"
+ "mult $ac2, %[r2], %[r1] \n\t"
+ "mult $ac3, %[r3], %[r0] \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "extr_r.w %[r0], $ac0, 14 \n\t"
+ "extr_r.w %[r1], $ac1, 14 \n\t"
+ "extr_r.w %[r2], $ac2, 14 \n\t"
+ "extr_r.w %[r3], $ac3, 14 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r8], %[r8], %[r9] \n\t"
+ "mult $ac0, %[r5], %[r6] \n\t"
+ "mult $ac1, %[r5], %[r8] \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "extr_r.w %[r1], $ac0, 11 \n\t"
+ "extr_r.w %[r3], $ac1, 11 \n\t"
+ "mult $ac2, %[r5], %[r0] \n\t"
+ "mult $ac3, %[r5], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "extr_r.w %[r0], $ac2, 11 \n\t"
+ "extr_r.w %[r2], $ac3, 11 \n\t"
+ "sw %[r0], -4(%[outre1]) \n\t"
+ "sw %[r2], 4(%[outre2]) \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+ "b 3f \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "srav %[r0], %[r0], %[max] \n\t"
+ "srav %[r1], %[r1], %[max] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r2], %[r2], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r4] \n\t"
+ "mul %[r3], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ "lh %[r6], 0(%[cosptr]) \n\t"
+ "lh %[r7], 0(%[sinptr]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r2], %[r0] \n\t"
+ "addu %[r1], %[r3], %[r1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r9], %[r2], %[r6] \n\t"
+ "mul %[r2], %[r2], %[r7] \n\t"
+ "mul %[r8], %[r0], %[r6] \n\t"
+ "mul %[r0], %[r0], %[r7] \n\t"
+ "sra %[r3], %[r3], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sll %[r9], %[r9], 2 \n\t"
+ "sll %[r2], %[r2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ "mul %[r0], %[r3], %[r6] \n\t"
+ "mul %[r3], %[r3], %[r7] \n\t"
+ "mul %[r8], %[r1], %[r6] \n\t"
+ "mul %[r1], %[r1], %[r8] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "sll %[r0], %[r0], 2 \n\t"
+ "sll %[r3], %[r3], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r1], %[r1], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r1], %[r1], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r1], %[r1], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r8] \n\t"
+ "addu %[r3], %[r3], %[r1] \n\t"
+ "subu %[r9], %[r9], %[r3] \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r9], 16 \n\t"
+ "andi %[r9], %[r9], 0xFFFF \n\t"
+ "mul %[r1], %[r1], %[r5] \n\t"
+ "mul %[r9], %[r9], %[r5] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r2], %[r2], %[r5] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "sll %[r1], %[r1], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r9], %[r9], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r9], %[r9], 0x400 \n\t"
+ "sra %[r9], %[r9], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r1], %[r9] \n\t"
+ "sll %[r2], %[r2], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x400 \n\t"
+ "sra %[r0], %[r0], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "sw %[r0], 0(%[outre2]) \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "2: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max1], %[max1], -1 \n\t"
+ "21: \n\t"
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r4] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r4] \n\t"
+ "muleq_s.w.phl %[r0], %[r0], %[r4] \n\t"
+ "muleq_s.w.phl %[r1], %[r1], %[r4] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "sllv %[r6], %[r6], %[max1] \n\t"
+ "sllv %[r7], %[r7], %[max1] \n\t"
+ "mult $ac0, %[r2], %[r6] \n\t"
+ "mult $ac1, %[r3], %[r7] \n\t"
+ "mult $ac2, %[r2], %[r7] \n\t"
+ "mult $ac3, %[r3], %[r6] \n\t"
+ "lh %[r2], 2(%[cosptr]) \n\t"
+ "lh %[r3], 2(%[sinptr]) \n\t"
+ "extr_r.w %[r6], $ac0, 14 \n\t"
+ "extr_r.w %[r7], $ac1, 14 \n\t"
+ "extr_r.w %[r8], $ac2, 14 \n\t"
+ "extr_r.w %[r9], $ac3, 14 \n\t"
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "mult $ac0, %[r2], %[r0] \n\t"
+ "mult $ac1, %[r3], %[r1] \n\t"
+ "mult $ac2, %[r2], %[r1] \n\t"
+ "mult $ac3, %[r3], %[r0] \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "extr_r.w %[r0], $ac0, 14 \n\t"
+ "extr_r.w %[r1], $ac1, 14 \n\t"
+ "extr_r.w %[r2], $ac2, 14 \n\t"
+ "extr_r.w %[r3], $ac3, 14 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r8], %[r8], %[r9] \n\t"
+ "mult $ac0, %[r5], %[r6] \n\t"
+ "mult $ac1, %[r5], %[r8] \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "extr_r.w %[r1], $ac0, 11 \n\t"
+ "extr_r.w %[r3], $ac1, 11 \n\t"
+ "mult $ac2, %[r5], %[r0] \n\t"
+ "mult $ac3, %[r5], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "extr_r.w %[r0], $ac2, 11 \n\t"
+ "extr_r.w %[r2], $ac3, 11 \n\t"
+ "sw %[r0], -4(%[outre1]) \n\t"
+ "sw %[r2], 4(%[outre2]) \n\t"
+ "bgtz %[k], 21b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r2], %[r2], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r4] \n\t"
+ "mul %[r3], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ "lh %[r6], 0(%[cosptr]) \n\t"
+ "lh %[r7], 0(%[sinptr]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r2], %[r0] \n\t"
+ "addu %[r1], %[r3], %[r1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r9], %[r2], %[r6] \n\t"
+ "mul %[r2], %[r2], %[r7] \n\t"
+ "mul %[r8], %[r0], %[r6] \n\t"
+ "mul %[r0], %[r0], %[r7] \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sll %[r9], %[r9], 2 \n\t"
+ "sll %[r2], %[r2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ "mul %[r0], %[r3], %[r6] \n\t"
+ "mul %[r3], %[r3], %[r7] \n\t"
+ "mul %[r8], %[r1], %[r6] \n\t"
+ "mul %[r1], %[r1], %[r7] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "sll %[r0], %[r0], 2 \n\t"
+ "sll %[r3], %[r3], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r1], %[r1], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r1], %[r1], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r1], %[r1], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r8] \n\t"
+ "addu %[r3], %[r3], %[r1] \n\t"
+ "subu %[r9], %[r9], %[r3] \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r9], 16 \n\t"
+ "andi %[r9], %[r9], 0xFFFF \n\t"
+ "mul %[r1], %[r1], %[r5] \n\t"
+ "mul %[r9], %[r9], %[r5] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r2], %[r2], %[r5] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "sll %[r1], %[r1], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r9], %[r9], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r9], %[r9], 0x400 \n\t"
+ "sra %[r9], %[r9], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r1], %[r9] \n\t"
+ "sll %[r2], %[r2], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x400 \n\t"
+ "sra %[r0], %[r0], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "sw %[r0], 0(%[outre2]) \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "3: \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [r0] "=&r" (r0), [r1] "=&r" (r1),
+ [r2] "=&r" (r2), [r3] "=&r" (r3), [r4] "=&r" (r4),
+ [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1),
+ [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2)
+ : [max] "r" (max), [inreQ7] "r" (inreQ7),
+ [inimQ7] "r" (inimQ7), [cosptr] "r" (cosptr),
+ [sinptr] "r" (sinptr), [outre1Q16] "r" (outre1Q16),
+ [outre2Q16] "r" (outre2Q16)
+ : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+ , "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ );
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_neon.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_neon.c
new file mode 100644
index 0000000000..79dadc4600
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_neon.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// Tables are defined in transform_tables.c file.
+// Cosine table 1 in Q14.
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+// Sine table 1 in Q14.
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+// Sine table 2 in Q14.
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+static inline int32_t ComplexMulAndFindMaxNeon(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int32_t* outreQ16,
+ int32_t* outimQ16) {
+ int k;
+ const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0];
+ const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0];
+ // 0.5 / sqrt(240) in Q19 is round((.5 / sqrt(240)) * (2^19)) = 16921.
+ // Use "16921 << 5" and vqdmulh, instead of ">> 26" as in the C code.
+ int32_t fact = 16921 << 5;
+ int32x4_t factq = vdupq_n_s32(fact);
+ uint32x4_t max_r = vdupq_n_u32(0);
+ uint32x4_t max_i = vdupq_n_u32(0);
+
+ for (k = 0; k < FRAMESAMPLES/2; k += 8) {
+ int16x8_t tmpr = vld1q_s16(kCosTab);
+ int16x8_t tmpi = vld1q_s16(kSinTab);
+ int16x8_t inre1 = vld1q_s16(inre1Q9);
+ int16x8_t inre2 = vld1q_s16(inre2Q9);
+ kCosTab += 8;
+ kSinTab += 8;
+ inre1Q9 += 8;
+ inre2Q9 += 8;
+
+ // Use ">> 26", instead of ">> 7", ">> 16" and then ">> 3" as in the C code.
+ int32x4_t tmp0 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre1));
+ int32x4_t tmp1 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre2));
+ tmp0 = vmlal_s16(tmp0, vget_low_s16(tmpi), vget_low_s16(inre2));
+ tmp1 = vmlsl_s16(tmp1, vget_low_s16(tmpi), vget_low_s16(inre1));
+#if defined(WEBRTC_ARCH_ARM64)
+ int32x4_t tmp2 = vmull_high_s16(tmpr, inre1);
+ int32x4_t tmp3 = vmull_high_s16(tmpr, inre2);
+ tmp2 = vmlal_high_s16(tmp2, tmpi, inre2);
+ tmp3 = vmlsl_high_s16(tmp3, tmpi, inre1);
+#else
+ int32x4_t tmp2 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre1));
+ int32x4_t tmp3 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre2));
+ tmp2 = vmlal_s16(tmp2, vget_high_s16(tmpi), vget_high_s16(inre2));
+ tmp3 = vmlsl_s16(tmp3, vget_high_s16(tmpi), vget_high_s16(inre1));
+#endif
+
+ int32x4_t outr_0 = vqdmulhq_s32(tmp0, factq);
+ int32x4_t outr_1 = vqdmulhq_s32(tmp2, factq);
+ int32x4_t outi_0 = vqdmulhq_s32(tmp1, factq);
+ int32x4_t outi_1 = vqdmulhq_s32(tmp3, factq);
+ vst1q_s32(outreQ16, outr_0);
+ outreQ16 += 4;
+ vst1q_s32(outreQ16, outr_1);
+ outreQ16 += 4;
+ vst1q_s32(outimQ16, outi_0);
+ outimQ16 += 4;
+ vst1q_s32(outimQ16, outi_1);
+ outimQ16 += 4;
+
+ // Find the absolute maximum in the vectors.
+ tmp0 = vabsq_s32(outr_0);
+ tmp1 = vabsq_s32(outr_1);
+ tmp2 = vabsq_s32(outi_0);
+ tmp3 = vabsq_s32(outi_1);
+ // vabs doesn't change the value of 0x80000000.
+ // Use u32 so we don't lose the value 0x80000000.
+ max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp0));
+ max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp2));
+ max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp1));
+ max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp3));
+ }
+
+ max_r = vmaxq_u32(max_r, max_i);
+#if defined(WEBRTC_ARCH_ARM64)
+ uint32_t maximum = vmaxvq_u32(max_r);
+#else
+ uint32x2_t max32x2_r = vmax_u32(vget_low_u32(max_r), vget_high_u32(max_r));
+ max32x2_r = vpmax_u32(max32x2_r, max32x2_r);
+ uint32_t maximum = vget_lane_u32(max32x2_r, 0);
+#endif
+
+ return (int32_t)maximum;
+}
+
+static inline void PreShiftW32toW16Neon(int32_t* inre,
+ int32_t* inim,
+ int16_t* outre,
+ int16_t* outim,
+ int32_t sh) {
+ int k;
+ int32x4_t sh32x4 = vdupq_n_s32(sh);
+ for (k = 0; k < FRAMESAMPLES/2; k += 16) {
+ int32x4x4_t inre32x4x4 = vld4q_s32(inre);
+ int32x4x4_t inim32x4x4 = vld4q_s32(inim);
+ inre += 16;
+ inim += 16;
+ inre32x4x4.val[0] = vrshlq_s32(inre32x4x4.val[0], sh32x4);
+ inre32x4x4.val[1] = vrshlq_s32(inre32x4x4.val[1], sh32x4);
+ inre32x4x4.val[2] = vrshlq_s32(inre32x4x4.val[2], sh32x4);
+ inre32x4x4.val[3] = vrshlq_s32(inre32x4x4.val[3], sh32x4);
+ inim32x4x4.val[0] = vrshlq_s32(inim32x4x4.val[0], sh32x4);
+ inim32x4x4.val[1] = vrshlq_s32(inim32x4x4.val[1], sh32x4);
+ inim32x4x4.val[2] = vrshlq_s32(inim32x4x4.val[2], sh32x4);
+ inim32x4x4.val[3] = vrshlq_s32(inim32x4x4.val[3], sh32x4);
+ int16x4x4_t outre16x4x4;
+ int16x4x4_t outim16x4x4;
+ outre16x4x4.val[0] = vmovn_s32(inre32x4x4.val[0]);
+ outre16x4x4.val[1] = vmovn_s32(inre32x4x4.val[1]);
+ outre16x4x4.val[2] = vmovn_s32(inre32x4x4.val[2]);
+ outre16x4x4.val[3] = vmovn_s32(inre32x4x4.val[3]);
+ outim16x4x4.val[0] = vmovn_s32(inim32x4x4.val[0]);
+ outim16x4x4.val[1] = vmovn_s32(inim32x4x4.val[1]);
+ outim16x4x4.val[2] = vmovn_s32(inim32x4x4.val[2]);
+ outim16x4x4.val[3] = vmovn_s32(inim32x4x4.val[3]);
+ vst4_s16(outre, outre16x4x4);
+ vst4_s16(outim, outim16x4x4);
+ outre += 16;
+ outim += 16;
+ }
+}
+
+static inline void PostShiftAndSeparateNeon(int16_t* inre,
+ int16_t* inim,
+ int16_t* outre,
+ int16_t* outim,
+ int32_t sh) {
+ int k;
+ int16_t* inre1 = inre;
+ int16_t* inre2 = &inre[FRAMESAMPLES/2 - 4];
+ int16_t* inim1 = inim;
+ int16_t* inim2 = &inim[FRAMESAMPLES/2 - 4];
+ int16_t* outre1 = outre;
+ int16_t* outre2 = &outre[FRAMESAMPLES/2 - 4];
+ int16_t* outim1 = outim;
+ int16_t* outim2 = &outim[FRAMESAMPLES/2 - 4];
+ const int16_t* kSinTab1 = &WebRtcIsacfix_kSinTab2[0];
+ const int16_t* kSinTab2 = &WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 -4];
+ // By vshl, we effectively did "<< (-sh - 23)", instead of "<< (-sh)",
+ // ">> 14" and then ">> 9" as in the C code.
+ int32x4_t shift = vdupq_n_s32(-sh - 23);
+
+ for (k = 0; k < FRAMESAMPLES/4; k += 4) {
+ int16x4_t tmpi = vld1_s16(kSinTab1);
+ kSinTab1 += 4;
+ int16x4_t tmpr = vld1_s16(kSinTab2);
+ kSinTab2 -= 4;
+ int16x4_t inre_0 = vld1_s16(inre1);
+ inre1 += 4;
+ int16x4_t inre_1 = vld1_s16(inre2);
+ inre2 -= 4;
+ int16x4_t inim_0 = vld1_s16(inim1);
+ inim1 += 4;
+ int16x4_t inim_1 = vld1_s16(inim2);
+ inim2 -= 4;
+ tmpr = vneg_s16(tmpr);
+ inre_1 = vrev64_s16(inre_1);
+ inim_1 = vrev64_s16(inim_1);
+ tmpr = vrev64_s16(tmpr);
+
+ int16x4_t xr = vqadd_s16(inre_0, inre_1);
+ int16x4_t xi = vqsub_s16(inim_0, inim_1);
+ int16x4_t yr = vqadd_s16(inim_0, inim_1);
+ int16x4_t yi = vqsub_s16(inre_1, inre_0);
+
+ int32x4_t outr0 = vmull_s16(tmpr, xr);
+ int32x4_t outi0 = vmull_s16(tmpi, xr);
+ int32x4_t outr1 = vmull_s16(tmpi, yr);
+ int32x4_t outi1 = vmull_s16(tmpi, yi);
+ outr0 = vmlsl_s16(outr0, tmpi, xi);
+ outi0 = vmlal_s16(outi0, tmpr, xi);
+ outr1 = vmlal_s16(outr1, tmpr, yi);
+ outi1 = vmlsl_s16(outi1, tmpr, yr);
+
+ outr0 = vshlq_s32(outr0, shift);
+ outi0 = vshlq_s32(outi0, shift);
+ outr1 = vshlq_s32(outr1, shift);
+ outi1 = vshlq_s32(outi1, shift);
+ outr1 = vnegq_s32(outr1);
+
+ int16x4_t outre_0 = vmovn_s32(outr0);
+ int16x4_t outim_0 = vmovn_s32(outi0);
+ int16x4_t outre_1 = vmovn_s32(outr1);
+ int16x4_t outim_1 = vmovn_s32(outi1);
+ outre_1 = vrev64_s16(outre_1);
+ outim_1 = vrev64_s16(outim_1);
+
+ vst1_s16(outre1, outre_0);
+ outre1 += 4;
+ vst1_s16(outim1, outim_0);
+ outim1 += 4;
+ vst1_s16(outre2, outre_1);
+ outre2 -= 4;
+ vst1_s16(outim2, outim_1);
+ outim2 -= 4;
+ }
+}
+
+void WebRtcIsacfix_Time2SpecNeon(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outreQ7,
+ int16_t* outimQ7) {
+ int32_t tmpreQ16[FRAMESAMPLES/2], tmpimQ16[FRAMESAMPLES/2];
+ int32_t max;
+ int32_t sh;
+
+ // Multiply with complex exponentials and combine into one complex vector.
+ // And find the maximum.
+ max = ComplexMulAndFindMaxNeon(inre1Q9, inre2Q9, tmpreQ16, tmpimQ16);
+
+ sh = (int32_t)WebRtcSpl_NormW32(max);
+ sh = sh - 24;
+
+ // If sh becomes >= 0, then we should shift sh steps to the left,
+ // and the domain will become Q(16 + sh).
+ // If sh becomes < 0, then we should shift -sh steps to the right,
+ // and the domain will become Q(16 + sh).
+ PreShiftW32toW16Neon(tmpreQ16, tmpimQ16, inre1Q9, inre2Q9, sh);
+
+ // Get DFT.
+ WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1);
+
+ // If sh >= 0, shift sh steps to the right,
+ // If sh < 0, shift -sh steps to the left.
+ // Use symmetry to separate into two complex vectors
+ // and center frames in time around zero.
+ PostShiftAndSeparateNeon(inre1Q9, inre2Q9, outreQ7, outimQ7, sh);
+}
+
+static inline int32_t TransformAndFindMaxNeon(int16_t* inre,
+ int16_t* inim,
+ int32_t* outre,
+ int32_t* outim) {
+ int k;
+ int16_t* inre1 = inre;
+ int16_t* inre2 = &inre[FRAMESAMPLES/2 - 4];
+ int16_t* inim1 = inim;
+ int16_t* inim2 = &inim[FRAMESAMPLES/2 - 4];
+ int32_t* outre1 = outre;
+ int32_t* outre2 = &outre[FRAMESAMPLES/2 - 4];
+ int32_t* outim1 = outim;
+ int32_t* outim2 = &outim[FRAMESAMPLES/2 - 4];
+ const int16_t* kSinTab1 = &WebRtcIsacfix_kSinTab2[0];
+ const int16_t* kSinTab2 = &WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 4];
+ uint32x4_t max_r = vdupq_n_u32(0);
+ uint32x4_t max_i = vdupq_n_u32(0);
+
+ // Use ">> 5", instead of "<< 9" and then ">> 14" as in the C code.
+ for (k = 0; k < FRAMESAMPLES/4; k += 4) {
+ int16x4_t tmpi = vld1_s16(kSinTab1);
+ kSinTab1 += 4;
+ int16x4_t tmpr = vld1_s16(kSinTab2);
+ kSinTab2 -= 4;
+ int16x4_t inre_0 = vld1_s16(inre1);
+ inre1 += 4;
+ int16x4_t inre_1 = vld1_s16(inre2);
+ inre2 -= 4;
+ int16x4_t inim_0 = vld1_s16(inim1);
+ inim1 += 4;
+ int16x4_t inim_1 = vld1_s16(inim2);
+ inim2 -= 4;
+ tmpr = vneg_s16(tmpr);
+ inre_1 = vrev64_s16(inre_1);
+ inim_1 = vrev64_s16(inim_1);
+ tmpr = vrev64_s16(tmpr);
+
+ int32x4_t xr = vmull_s16(tmpr, inre_0);
+ int32x4_t xi = vmull_s16(tmpr, inim_0);
+ int32x4_t yr = vmull_s16(tmpr, inim_1);
+ int32x4_t yi = vmull_s16(tmpi, inim_1);
+ xr = vmlal_s16(xr, tmpi, inim_0);
+ xi = vmlsl_s16(xi, tmpi, inre_0);
+ yr = vmlal_s16(yr, tmpi, inre_1);
+ yi = vmlsl_s16(yi, tmpr, inre_1);
+ yr = vnegq_s32(yr);
+
+ xr = vshrq_n_s32(xr, 5);
+ xi = vshrq_n_s32(xi, 5);
+ yr = vshrq_n_s32(yr, 5);
+ yi = vshrq_n_s32(yi, 5);
+
+ int32x4_t outr0 = vsubq_s32(xr, yi);
+ int32x4_t outr1 = vaddq_s32(xr, yi);
+ int32x4_t outi0 = vaddq_s32(xi, yr);
+ int32x4_t outi1 = vsubq_s32(yr, xi);
+
+ // Find the absolute maximum in the vectors.
+ int32x4_t tmp0 = vabsq_s32(outr0);
+ int32x4_t tmp1 = vabsq_s32(outr1);
+ int32x4_t tmp2 = vabsq_s32(outi0);
+ int32x4_t tmp3 = vabsq_s32(outi1);
+ // vabs doesn't change the value of 0x80000000.
+ // Use u32 so we don't lose the value 0x80000000.
+ max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp0));
+ max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp2));
+ max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp1));
+ max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp3));
+
+ // Store the vectors.
+ outr1 = vrev64q_s32(outr1);
+ outi1 = vrev64q_s32(outi1);
+ int32x4_t outr_1 = vcombine_s32(vget_high_s32(outr1), vget_low_s32(outr1));
+ int32x4_t outi_1 = vcombine_s32(vget_high_s32(outi1), vget_low_s32(outi1));
+
+ vst1q_s32(outre1, outr0);
+ outre1 += 4;
+ vst1q_s32(outim1, outi0);
+ outim1 += 4;
+ vst1q_s32(outre2, outr_1);
+ outre2 -= 4;
+ vst1q_s32(outim2, outi_1);
+ outim2 -= 4;
+ }
+
+ max_r = vmaxq_u32(max_r, max_i);
+#if defined(WEBRTC_ARCH_ARM64)
+ uint32_t maximum = vmaxvq_u32(max_r);
+#else
+ uint32x2_t max32x2_r = vmax_u32(vget_low_u32(max_r), vget_high_u32(max_r));
+ max32x2_r = vpmax_u32(max32x2_r, max32x2_r);
+ uint32_t maximum = vget_lane_u32(max32x2_r, 0);
+#endif
+
+ return (int32_t)maximum;
+}
+
+static inline void PostShiftAndDivideAndDemodulateNeon(int16_t* inre,
+ int16_t* inim,
+ int32_t* outre1,
+ int32_t* outre2,
+ int32_t sh) {
+ int k;
+ int16_t* p_inre = inre;
+ int16_t* p_inim = inim;
+ int32_t* p_outre1 = outre1;
+ int32_t* p_outre2 = outre2;
+ const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0];
+ const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0];
+ int32x4_t shift = vdupq_n_s32(-sh - 16);
+ // Divide through by the normalizing constant:
+ // scale all values with 1/240, i.e. with 273 in Q16.
+ // 273/65536 ~= 0.0041656
+ // 1/240 ~= 0.0041666
+ int16x8_t scale = vdupq_n_s16(273);
+ // Sqrt(240) in Q11 is round(15.49193338482967 * 2048) = 31727.
+ int factQ19 = 31727 << 16;
+ int32x4_t fact = vdupq_n_s32(factQ19);
+
+ for (k = 0; k < FRAMESAMPLES/2; k += 8) {
+ int16x8_t inre16x8 = vld1q_s16(p_inre);
+ int16x8_t inim16x8 = vld1q_s16(p_inim);
+ p_inre += 8;
+ p_inim += 8;
+ int16x8_t tmpr = vld1q_s16(kCosTab);
+ int16x8_t tmpi = vld1q_s16(kSinTab);
+ kCosTab += 8;
+ kSinTab += 8;
+ // By vshl and vmull, we effectively did "<< (-sh - 16)",
+ // instead of "<< (-sh)" and ">> 16" as in the C code.
+ int32x4_t outre1_0 = vmull_s16(vget_low_s16(inre16x8), vget_low_s16(scale));
+ int32x4_t outre2_0 = vmull_s16(vget_low_s16(inim16x8), vget_low_s16(scale));
+#if defined(WEBRTC_ARCH_ARM64)
+ int32x4_t outre1_1 = vmull_high_s16(inre16x8, scale);
+ int32x4_t outre2_1 = vmull_high_s16(inim16x8, scale);
+#else
+ int32x4_t outre1_1 = vmull_s16(vget_high_s16(inre16x8),
+ vget_high_s16(scale));
+ int32x4_t outre2_1 = vmull_s16(vget_high_s16(inim16x8),
+ vget_high_s16(scale));
+#endif
+
+ outre1_0 = vshlq_s32(outre1_0, shift);
+ outre1_1 = vshlq_s32(outre1_1, shift);
+ outre2_0 = vshlq_s32(outre2_0, shift);
+ outre2_1 = vshlq_s32(outre2_1, shift);
+
+ // Demodulate and separate.
+ int32x4_t tmpr_0 = vmovl_s16(vget_low_s16(tmpr));
+ int32x4_t tmpi_0 = vmovl_s16(vget_low_s16(tmpi));
+#if defined(WEBRTC_ARCH_ARM64)
+ int32x4_t tmpr_1 = vmovl_high_s16(tmpr);
+ int32x4_t tmpi_1 = vmovl_high_s16(tmpi);
+#else
+ int32x4_t tmpr_1 = vmovl_s16(vget_high_s16(tmpr));
+ int32x4_t tmpi_1 = vmovl_s16(vget_high_s16(tmpi));
+#endif
+
+ int64x2_t xr0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre1_0));
+ int64x2_t xi0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre2_0));
+ int64x2_t xr2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre1_1));
+ int64x2_t xi2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre2_1));
+ xr0 = vmlsl_s32(xr0, vget_low_s32(tmpi_0), vget_low_s32(outre2_0));
+ xi0 = vmlal_s32(xi0, vget_low_s32(tmpi_0), vget_low_s32(outre1_0));
+ xr2 = vmlsl_s32(xr2, vget_low_s32(tmpi_1), vget_low_s32(outre2_1));
+ xi2 = vmlal_s32(xi2, vget_low_s32(tmpi_1), vget_low_s32(outre1_1));
+
+#if defined(WEBRTC_ARCH_ARM64)
+ int64x2_t xr1 = vmull_high_s32(tmpr_0, outre1_0);
+ int64x2_t xi1 = vmull_high_s32(tmpr_0, outre2_0);
+ int64x2_t xr3 = vmull_high_s32(tmpr_1, outre1_1);
+ int64x2_t xi3 = vmull_high_s32(tmpr_1, outre2_1);
+ xr1 = vmlsl_high_s32(xr1, tmpi_0, outre2_0);
+ xi1 = vmlal_high_s32(xi1, tmpi_0, outre1_0);
+ xr3 = vmlsl_high_s32(xr3, tmpi_1, outre2_1);
+ xi3 = vmlal_high_s32(xi3, tmpi_1, outre1_1);
+#else
+ int64x2_t xr1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre1_0));
+ int64x2_t xi1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre2_0));
+ int64x2_t xr3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre1_1));
+ int64x2_t xi3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre2_1));
+ xr1 = vmlsl_s32(xr1, vget_high_s32(tmpi_0), vget_high_s32(outre2_0));
+ xi1 = vmlal_s32(xi1, vget_high_s32(tmpi_0), vget_high_s32(outre1_0));
+ xr3 = vmlsl_s32(xr3, vget_high_s32(tmpi_1), vget_high_s32(outre2_1));
+ xi3 = vmlal_s32(xi3, vget_high_s32(tmpi_1), vget_high_s32(outre1_1));
+#endif
+
+ outre1_0 = vcombine_s32(vrshrn_n_s64(xr0, 10), vrshrn_n_s64(xr1, 10));
+ outre2_0 = vcombine_s32(vrshrn_n_s64(xi0, 10), vrshrn_n_s64(xi1, 10));
+ outre1_1 = vcombine_s32(vrshrn_n_s64(xr2, 10), vrshrn_n_s64(xr3, 10));
+ outre2_1 = vcombine_s32(vrshrn_n_s64(xi2, 10), vrshrn_n_s64(xi3, 10));
+ outre1_0 = vqdmulhq_s32(outre1_0, fact);
+ outre2_0 = vqdmulhq_s32(outre2_0, fact);
+ outre1_1 = vqdmulhq_s32(outre1_1, fact);
+ outre2_1 = vqdmulhq_s32(outre2_1, fact);
+
+ vst1q_s32(p_outre1, outre1_0);
+ p_outre1 += 4;
+ vst1q_s32(p_outre1, outre1_1);
+ p_outre1 += 4;
+ vst1q_s32(p_outre2, outre2_0);
+ p_outre2 += 4;
+ vst1q_s32(p_outre2, outre2_1);
+ p_outre2 += 4;
+ }
+}
+
+void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16) {
+ int32_t max;
+ int32_t sh;
+
+ max = TransformAndFindMaxNeon(inreQ7, inimQ7, outre1Q16, outre2Q16);
+
+
+ sh = (int32_t)WebRtcSpl_NormW32(max);
+ sh = sh - 24;
+ // If sh becomes >= 0, then we should shift sh steps to the left,
+ // and the domain will become Q(16 + sh).
+ // If sh becomes < 0, then we should shift -sh steps to the right,
+ // and the domain will become Q(16 + sh).
+
+ // "Fastest" vectors.
+ PreShiftW32toW16Neon(outre1Q16, outre2Q16, inreQ7, inimQ7, sh);
+
+ // Get IDFT.
+ WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1);
+
+ PostShiftAndDivideAndDemodulateNeon(inreQ7, inimQ7, outre1Q16, outre2Q16, sh);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
new file mode 100644
index 0000000000..e661effdde
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains trigonometric functions look-up tables used in
+ * transform functions WebRtcIsacfix_Time2Spec and WebRtcIsacfix_Spec2Time.
+ */
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/* Cosine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
+ 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+ 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+ 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+ 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+ 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+ 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+ 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+ 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
+ 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
+ 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
+ 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
+ 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214,
+ 0, -214, -429, -643, -857, -1072, -1285, -1499, -1713, -1926,
+ -2139, -2351, -2563, -2775, -2986, -3196, -3406, -3616, -3825, -4033,
+ -4240, -4447, -4653, -4859, -5063, -5266, -5469, -5671, -5872, -6071,
+ -6270, -6467, -6664, -6859, -7053, -7246, -7438, -7629, -7818, -8006,
+ -8192, -8377, -8561, -8743, -8923, -9102, -9280, -9456, -9630, -9803,
+ -9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
+ -11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
+ -12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
+ -13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
+ -14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
+ -15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
+ -16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
+ -16333, -16349, -16362, -16371, -16378, -16383
+};
+
+/* Sine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
+ 0, 214, 429, 643, 857, 1072, 1285, 1499, 1713, 1926,
+ 2139, 2351, 2563, 2775, 2986, 3196, 3406, 3616, 3825, 4033,
+ 4240, 4447, 4653, 4859, 5063, 5266, 5469, 5671, 5872, 6071,
+ 6270, 6467, 6664, 6859, 7053, 7246, 7438, 7629, 7818, 8006,
+ 8192, 8377, 8561, 8743, 8923, 9102, 9280, 9456, 9630, 9803,
+ 9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
+ 11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
+ 12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
+ 14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
+ 15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
+ 15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
+ 16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
+ 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+ 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+ 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+ 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+ 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+ 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+ 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+ 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
+ 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
+ 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
+ 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
+ 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214
+};
+
+
+/* Sine table 2 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
+ 16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
+ 16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
+ 15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
+ 15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
+ 14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
+ 12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
+ 11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
+ 9889, -9717, 9543, -9368, 9191, -9013, 8833, -8652, 8469, -8285,
+ 8099, -7912, 7723, -7534, 7342, -7150, 6957, -6762, 6566, -6369,
+ 6171, -5971, 5771, -5570, 5368, -5165, 4961, -4756, 4550, -4344,
+ 4137, -3929, 3720, -3511, 3301, -3091, 2880, -2669, 2457, -2245,
+ 2032, -1819, 1606, -1392, 1179, -965, 750, -536, 322, -107
+};
+
+#if defined(MIPS32_LE)
+/* Cosine table 2 in Q14. Used only on MIPS platforms. */
+const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4] = {
+ 107, -322, 536, -750, 965, -1179, 1392, -1606, 1819, -2032,
+ 2245, -2457, 2669, -2880, 3091, -3301, 3511, -3720, 3929, -4137,
+ 4344, -4550, 4756, -4961, 5165, -5368, 5570, -5771, 5971, -6171,
+ 6369, -6566, 6762, -6957, 7150, -7342, 7534, -7723, 7912, -8099,
+ 8285, -8469, 8652, -8833, 9013, -9191, 9368, -9543, 9717, -9889,
+ 10059, -10227, 10394, -10559, 10722, -10883, 11042, -11200, 11356, -11509,
+ 11661, -11810, 11958, -12104, 12247, -12389, 12528, -12665, 12800, -12933,
+ 13063, -13192, 13318, -13441, 13563, -13682, 13799, -13913, 14025, -14135,
+ 14242, -14347, 14449, -14549, 14647, -14741, 14834, -14924, 15011, -15095,
+ 15178, -15257, 15334, -15408, 15480, -15549, 15615, -15679, 15739, -15798,
+ 15853, -15906, 15956, -16003, 16048, -16090, 16129, -16165, 16199, -16229,
+ 16257, -16283, 16305, -16325, 16342, -16356, 16367, -16375, 16381, -16384
+};
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc
new file mode 100644
index 0000000000..433ec534fe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+static const int kSamples = FRAMESAMPLES / 2;
+static const int32_t spec2time_out_expected_1[kSamples] = {
+ -3366470, -2285227, -3415765, -2310215, -3118030, -2222470, -3030254,
+ -2192091, -3423170, -2216041, -3305541, -2171936, -3195767, -2095779,
+ -3153304, -2157560, -3071167, -2032108, -3101190, -1972016, -3103824,
+ -2089118, -3139811, -1898337, -3102801, -2055082, -3029665, -1854140,
+ -2962586, -1966454, -3071167, -1894588, -2851743, -1917315, -2848087,
+ -1594932, -2799242, -1462184, -2845887, -1437599, -2691776, -1329637,
+ -2770659, -1268491, -2625161, -1578991, -2460299, -1186385, -2365613,
+ -1039354, -2322608, -958518, -2271749, -789860, -2254538, -850308,
+ -2384436, -850959, -2133734, -587678, -2093316, -495115, -1973364,
+ -475177, -1801282, -173507, -1848516, -158015, -1792018, -62648,
+ -1643313, 214746, -1500758, 267077, -1450193, 560521, -1521579,
+ 675283, -1345408, 857559, -1300822, 1116332, -1294533, 1241117,
+ -1070027, 1263503, -983816, 1529821, -1019586, 1910421, -955420,
+ 2073688, -836459, 2401105, -653905, 2690474, -731425, 2930131,
+ -935234, 3299500, -875978, 3523432, -878906, 3924822, -1081630,
+ 4561267, -1203023, 5105274, -1510983, 6052762, -2294646, 7021597,
+ -3108053, 8826736, -4935222, 11678789, -8442713, 18725700, -21526692,
+ 25420577, 19589811, -28108666, 12634054, -14483066, 6263217, -9979706,
+ 3665661, -7909736, 2531530, -6434896, 1700772, -5525393, 1479473,
+ -4894262, 1231760, -4353044, 1032940, -3786590, 941152, -3331614,
+ 665090, -2851619, 830696, -2762201, 958007, -2483118, 788233,
+ -2184965, 804825, -1967306, 1007255, -1862474, 920889, -1457506,
+ 755406, -1405841, 890230, -1302124, 1161599, -701867, 1154163,
+ -1083366, 1204743, -513581, 1547264, -650636, 1493384, -285543,
+ 1771863, -277906, 1841343, -9078, 1751863, 230222, 1819578,
+ 207170, 1978972, 398137, 2106468, 552155, 1997624, 685213,
+ 2129520, 601078, 2238736, 944591, 2441879, 1194178, 2355280,
+ 986124, 2393328, 1049005, 2417944, 1208368, 2489516, 1352023,
+ 2572118, 1445283, 2856081, 1532997, 2742279, 1615877, 2915274,
+ 1808036, 2856871, 1806936, 3241747, 1622461, 2978558, 1841297,
+ 3010378, 1923666, 3271367, 2126700, 3070935, 1956958, 3107588,
+ 2128405, 3288872, 2114911, 3315952, 2406651, 3344038, 2370199,
+ 3368980, 2144361, 3305030, 2183803, 3401450, 2523102, 3405463,
+ 2452475, 3463355, 2421678, 3551968, 2431949, 3477251, 2148125,
+ 3244489, 2174090};
+static const int32_t spec2time_out_expected_2[kSamples] = {
+ 1691694, -2499988, -2035547, 1060469, 988634, -2044502, -306271,
+ 2041000, 201454, -2289456, 93694, 2129427, -369152, -1887834,
+ 860796, 2089102, -929424, -1673956, 1395291, 1785651, -1619673,
+ -1380109, 1963449, 1093311, -2111007, -840456, 2372786, 578119,
+ -2242702, 89774, 2463304, -132717, -2121480, 643634, 2277636,
+ -1125999, -1995858, 1543748, 2227861, -1483779, -1495491, 2102642,
+ 1833876, -1920568, -958378, 2485101, 772261, -2454257, -24942,
+ 2918714, 136838, -2500453, 816118, 3039735, -746560, -2365815,
+ 1586396, 2714951, -1511696, -1942334, 2571792, 2182827, -2325335,
+ -1311543, 3055970, 1367220, -2737182, -110626, 3889222, 631008,
+ -3280879, 853066, 4122279, -706638, -3334449, 2148311, 3993512,
+ -1846301, -3004894, 3426779, 3329522, -3165264, -2242423, 4756866,
+ 2557711, -4131280, -805259, 5702711, 1120592, -4852821, 743664,
+ 6476444, -621186, -5465828, 2815787, 6768835, -3017442, -5338409,
+ 5658126, 6838454, -5492288, -4682382, 8874947, 6153814, -8832561,
+ -2649251, 12817398, 4237692, -13000247, 1190661, 18986363, -115738,
+ -19693978, 9908367, 30660381, -10632635, -37962068, 47022884, 89744622,
+ -42087632, 40279224, -88869341, -47542383, 38572364, 10441576, -30339718,
+ -9926740, 19896578, 28009, -18886612, -1124047, 13232498, -4150304,
+ -12770551, 2637074, 9051831, -6162211, -8713972, 4557937, 5489716,
+ -6862312, -5532349, 5415449, 2791310, -6999367, -2790102, 5375806,
+ 546222, -6486452, -821261, 4994973, -1278840, -5645501, 1060484,
+ 3996285, -2503954, -4653629, 2220549, 3036977, -3282133, -3318585,
+ 2780636, 1789880, -4004589, -2041031, 3105373, 574819, -3992722,
+ -971004, 3001703, -676739, -3841508, 417284, 2897970, -1427018,
+ -3058480, 1189948, 2210960, -2268992, -2603272, 1949785, 1576172,
+ -2720404, -1891738, 2309456, 769178, -2975646, -707150, 2424652,
+ -88039, -2966660, -65452, 2320780, -957557, -2798978, 744640,
+ 1879794, -1672081, -2365319, 1253309, 1366383, -2204082, -1544367,
+ 1801452, 613828, -2531994, -983847, 2064842, 118326, -2613790,
+ -203220, 2219635, -730341, -2641861, 563557, 1765434, -1329916,
+ -2272927, 1037138, 1266725, -1939220, -1588643, 1754528, 816552,
+ -2376303, -1099167, 1864999, 122477, -2422762, -400027, 1889228,
+ -579916, -2490353, 287139, 2011318, -1176657, -2502978, 812896,
+ 1116502, -1940211};
+static const int16_t time2spec_out_expected_1[kSamples] = {
+ 20342, 23889, -10063, -9419, 3242, 7280, -2012, -5029, 332, 4478,
+ -97, -3244, -891, 3117, 773, -2204, -1335, 2009, 1236, -1469,
+ -1562, 1277, 1366, -815, -1619, 599, 1449, -177, -1507, 116,
+ 1294, 263, -1338, -244, 1059, 553, -1045, -549, 829, 826,
+ -731, -755, 516, 909, -427, -853, 189, 1004, -184, -828,
+ -108, 888, 72, -700, -280, 717, 342, -611, -534, 601,
+ 534, -374, -646, 399, 567, -171, -720, 234, 645, -11,
+ -712, -26, 593, 215, -643, -172, 536, 361, -527, -403,
+ 388, 550, -361, -480, 208, 623, -206, -585, 41, 578,
+ 12, -504, -182, 583, 218, -437, -339, 499, 263, -354,
+ -450, 347, 456, -193, -524, 212, 475, -74, -566, 94,
+ 511, 112, -577, -201, 408, 217, -546, -295, 338, 387,
+ -13, 4, -46, 2, -76, 103, -83, 108, -55, 100,
+ -150, 131, -156, 141, -171, 179, -190, 128, -227, 172,
+ -214, 215, -189, 265, -244, 322, -335, 337, -352, 358,
+ -368, 362, -355, 366, -381, 403, -395, 411, -392, 446,
+ -458, 504, -449, 507, -464, 452, -491, 481, -534, 486,
+ -516, 560, -535, 525, -537, 559, -554, 570, -616, 591,
+ -585, 627, -509, 588, -584, 547, -610, 580, -614, 635,
+ -620, 655, -554, 546, -591, 642, -590, 660, -656, 629,
+ -604, 620, -580, 617, -645, 648, -573, 612, -604, 584,
+ -571, 597, -562, 627, -550, 560, -606, 529, -584, 568,
+ -503, 532, -463, 512, -440, 399, -457, 437, -349, 278,
+ -317, 257, -220, 163, -8, -61, 18, -161, 367, -1306};
+static const int16_t time2spec_out_expected_2[kSamples] = {
+ 14283, -11552, -15335, 6626, 7554, -2150, -6309, 1307, 4523, -4,
+ -3908, -314, 3001, 914, -2715, -1042, 2094, 1272, -1715, -1399,
+ 1263, 1508, -1021, -1534, 735, 1595, -439, -1447, 155, 1433,
+ 22, -1325, -268, 1205, 424, -1030, -608, 950, 643, -733,
+ -787, 661, 861, -502, -888, 331, 852, -144, -849, 19,
+ 833, 99, -826, -154, 771, 368, -735, -459, 645, 513,
+ -491, -604, 431, 630, -314, -598, 183, 622, -78, -612,
+ -48, 641, 154, -645, -257, 610, 281, -529, -444, 450,
+ 441, -327, -506, 274, 476, -232, -570, 117, 554, -86,
+ -531, -21, 572, 151, -606, -221, 496, 322, -407, -388,
+ 407, 394, -268, -428, 280, 505, -115, -588, 19, 513,
+ -29, -539, -109, 468, 173, -501, -242, 442, 278, -478,
+ -680, 656, -659, 656, -669, 602, -688, 612, -667, 612,
+ -642, 627, -648, 653, -676, 596, -680, 655, -649, 678,
+ -672, 587, -608, 637, -645, 637, -620, 556, -580, 553,
+ -635, 518, -599, 583, -501, 536, -544, 473, -552, 583,
+ -511, 541, -532, 563, -486, 461, -453, 486, -388, 424,
+ -416, 432, -374, 399, -462, 364, -346, 293, -329, 331,
+ -313, 281, -247, 309, -337, 241, -190, 207, -194, 179,
+ -163, 155, -156, 117, -135, 107, -126, 29, -22, 81,
+ -8, 17, -61, -10, 8, -37, 80, -44, 72, -88,
+ 65, -89, 130, -114, 181, -215, 189, -245, 260, -288,
+ 294, -339, 344, -396, 407, -429, 438, -439, 485, -556,
+ 629, -612, 637, -645, 661, -737, 829, -830, 831, -1041};
+
+class TransformTest : public ::testing::Test {
+ protected:
+ // Pass a function pointer to the Tester function.
+ void Time2SpecTester(Time2Spec Time2SpecFunction) {
+ // WebRtcIsacfix_Time2Spec functions hard coded the buffer lengths. It's a
+ // large buffer but we have to test it here.
+ int16_t data_in_1[kSamples] = {0};
+ int16_t data_in_2[kSamples] = {0};
+ int16_t data_out_1[kSamples] = {0};
+ int16_t data_out_2[kSamples] = {0};
+
+ for (int i = 0; i < kSamples; i++) {
+ data_in_1[i] = i * i + 1777;
+ data_in_2[i] = WEBRTC_SPL_WORD16_MAX / (i + 1) + 17;
+ }
+
+ Time2SpecFunction(data_in_1, data_in_2, data_out_1, data_out_2);
+
+ for (int i = 0; i < kSamples; i++) {
+ // We don't require bit-exact for ARM assembly code.
+ EXPECT_LE(abs(time2spec_out_expected_1[i] - data_out_1[i]), 1);
+ EXPECT_LE(abs(time2spec_out_expected_2[i] - data_out_2[i]), 1);
+ }
+ }
+
+ // Pass a function pointer to the Tester function.
+ void Spec2TimeTester(Spec2Time Spec2TimeFunction) {
+ // WebRtcIsacfix_Spec2Time functions hard coded the buffer lengths. It's a
+ // large buffer but we have to test it here.
+ int16_t data_in_1[kSamples] = {0};
+ int16_t data_in_2[kSamples] = {0};
+ int32_t data_out_1[kSamples] = {0};
+ int32_t data_out_2[kSamples] = {0};
+ for (int i = 0; i < kSamples; i++) {
+ data_in_1[i] = i * i + 1777;
+ data_in_2[i] = WEBRTC_SPL_WORD16_MAX / (i + 1) + 17;
+ }
+
+ Spec2TimeFunction(data_in_1, data_in_2, data_out_1, data_out_2);
+
+ for (int i = 0; i < kSamples; i++) {
+ // We don't require bit-exact for ARM assembly code.
+ EXPECT_LE(abs(spec2time_out_expected_1[i] - data_out_1[i]), 16);
+ EXPECT_LE(abs(spec2time_out_expected_2[i] - data_out_2[i]), 16);
+ }
+ }
+};
+
+TEST_F(TransformTest, Time2SpecTest) {
+ Time2SpecTester(WebRtcIsacfix_Time2SpecC);
+#if defined(WEBRTC_HAS_NEON)
+ Time2SpecTester(WebRtcIsacfix_Time2SpecNeon);
+#endif
+}
+
+TEST_F(TransformTest, Spec2TimeTest) {
+ Spec2TimeTester(WebRtcIsacfix_Spec2TimeC);
+#if defined(WEBRTC_HAS_NEON)
+ Spec2TimeTester(WebRtcIsacfix_Spec2TimeNeon);
+#endif
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
new file mode 100644
index 0000000000..903ac64aff
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
+#include "rtc_base/checks.h"
+
+using std::string;
+
+namespace webrtc {
+
+static const int kIsacBlockDurationMs = 30;
+static const int kIsacInputSamplingKhz = 16;
+static const int kIsacOutputSamplingKhz = 16;
+
+class IsacSpeedTest : public AudioCodecSpeedTest {
+ protected:
+ IsacSpeedTest();
+ void SetUp() override;
+ void TearDown() override;
+ float EncodeABlock(int16_t* in_data,
+ uint8_t* bit_stream,
+ size_t max_bytes,
+ size_t* encoded_bytes) override;
+ float DecodeABlock(const uint8_t* bit_stream,
+ size_t encoded_bytes,
+ int16_t* out_data) override;
+ ISACFIX_MainStruct* ISACFIX_main_inst_;
+};
+
+IsacSpeedTest::IsacSpeedTest()
+ : AudioCodecSpeedTest(kIsacBlockDurationMs,
+ kIsacInputSamplingKhz,
+ kIsacOutputSamplingKhz),
+ ISACFIX_main_inst_(NULL) {}
+
+void IsacSpeedTest::SetUp() {
+ AudioCodecSpeedTest::SetUp();
+
+ // Check whether the allocated buffer for the bit stream is large enough.
+ EXPECT_GE(max_bytes_, static_cast<size_t>(STREAM_MAXW16_60MS));
+
+ // Create encoder memory.
+ EXPECT_EQ(0, WebRtcIsacfix_Create(&ISACFIX_main_inst_));
+ EXPECT_EQ(0, WebRtcIsacfix_EncoderInit(ISACFIX_main_inst_, 1));
+ WebRtcIsacfix_DecoderInit(ISACFIX_main_inst_);
+ // Set bitrate and block length.
+ EXPECT_EQ(0, WebRtcIsacfix_Control(ISACFIX_main_inst_, bit_rate_,
+ block_duration_ms_));
+}
+
+void IsacSpeedTest::TearDown() {
+ AudioCodecSpeedTest::TearDown();
+ // Free memory.
+ EXPECT_EQ(0, WebRtcIsacfix_Free(ISACFIX_main_inst_));
+}
+
+float IsacSpeedTest::EncodeABlock(int16_t* in_data,
+ uint8_t* bit_stream,
+ size_t max_bytes,
+ size_t* encoded_bytes) {
+ // ISAC takes 10 ms everycall
+ const int subblocks = block_duration_ms_ / 10;
+ const int subblock_length = 10 * input_sampling_khz_;
+ int value = 0;
+
+ clock_t clocks = clock();
+ size_t pointer = 0;
+ for (int idx = 0; idx < subblocks; idx++, pointer += subblock_length) {
+ value =
+ WebRtcIsacfix_Encode(ISACFIX_main_inst_, &in_data[pointer], bit_stream);
+ if (idx == subblocks - 1)
+ EXPECT_GT(value, 0);
+ else
+ EXPECT_EQ(0, value);
+ }
+ clocks = clock() - clocks;
+ *encoded_bytes = static_cast<size_t>(value);
+ RTC_DCHECK_LE(*encoded_bytes, max_bytes);
+ return 1000.0 * clocks / CLOCKS_PER_SEC;
+}
+
+float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
+ size_t encoded_bytes,
+ int16_t* out_data) {
+ int value;
+ int16_t audio_type;
+ clock_t clocks = clock();
+ value = WebRtcIsacfix_Decode(ISACFIX_main_inst_, bit_stream, encoded_bytes,
+ out_data, &audio_type);
+ clocks = clock() - clocks;
+ EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
+ return 1000.0 * clocks / CLOCKS_PER_SEC;
+}
+
+TEST_P(IsacSpeedTest, IsacEncodeDecodeTest) {
+ size_t kDurationSec = 400; // Test audio length in second.
+ EncodeDecode(kDurationSec);
+}
+
+const coding_param param_set[] = {
+ std::make_tuple(1,
+ 32000,
+ string("audio_coding/speech_mono_16kHz"),
+ string("pcm"),
+ true)};
+
+INSTANTIATE_TEST_SUITE_P(AllTest,
+ IsacSpeedTest,
+ ::testing::ValuesIn(param_set));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc
new file mode 100644
index 0000000000..cafca75e46
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/audio_codecs/isac/audio_decoder_isac_fix.h"
+#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
+#include "api/audio_codecs/isac/audio_encoder_isac_fix.h"
+#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
+#include "modules/audio_coding/test/PCMFile.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kPayloadType = 42;
+
+enum class IsacImpl { kFixed, kFloat };
+
+absl::string_view IsacImplToString(IsacImpl impl) {
+ switch (impl) {
+ case IsacImpl::kFixed:
+ return "fixed";
+ case IsacImpl::kFloat:
+ return "float";
+ }
+}
+
+std::unique_ptr<PCMFile> GetPcmTestFileReader(int sample_rate_hz) {
+ std::string filename;
+ switch (sample_rate_hz) {
+ case 16000:
+ filename = test::ResourcePath("audio_coding/testfile16kHz", "pcm");
+ break;
+ case 32000:
+ filename = test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED()
+ << "No test file available for " << sample_rate_hz << " Hz.";
+ }
+ auto pcm_file = std::make_unique<PCMFile>();
+ pcm_file->ReadStereo(false);
+ pcm_file->Open(filename, sample_rate_hz, "rb", /*auto_rewind=*/true);
+ pcm_file->FastForward(/*num_10ms_blocks=*/100); // Skip initial silence.
+ RTC_CHECK(!pcm_file->EndOfFile());
+ return pcm_file;
+}
+
+// Returns a view to the interleaved samples of an AudioFrame object.
+rtc::ArrayView<const int16_t> AudioFrameToView(const AudioFrame& audio_frame) {
+ return {audio_frame.data(),
+ audio_frame.samples_per_channel() * audio_frame.num_channels()};
+}
+
+std::unique_ptr<AudioEncoder> CreateEncoder(IsacImpl impl,
+ int sample_rate_hz,
+ int frame_size_ms,
+ int bitrate_bps) {
+ RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000);
+ RTC_CHECK(frame_size_ms == 30 || frame_size_ms == 60);
+ RTC_CHECK_GT(bitrate_bps, 0);
+ switch (impl) {
+ case IsacImpl::kFixed: {
+ AudioEncoderIsacFix::Config config;
+ config.bit_rate = bitrate_bps;
+ config.frame_size_ms = frame_size_ms;
+ RTC_CHECK_EQ(16000, sample_rate_hz);
+ return AudioEncoderIsacFix::MakeAudioEncoder(config, kPayloadType);
+ }
+ case IsacImpl::kFloat: {
+ AudioEncoderIsacFloat::Config config;
+ config.bit_rate = bitrate_bps;
+ config.frame_size_ms = frame_size_ms;
+ config.sample_rate_hz = sample_rate_hz;
+ return AudioEncoderIsacFloat::MakeAudioEncoder(config, kPayloadType);
+ }
+ }
+}
+
+std::unique_ptr<AudioDecoder> CreateDecoder(IsacImpl impl, int sample_rate_hz) {
+ RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000);
+ switch (impl) {
+ case IsacImpl::kFixed: {
+ webrtc::AudioDecoderIsacFix::Config config;
+ RTC_CHECK_EQ(16000, sample_rate_hz);
+ return webrtc::AudioDecoderIsacFix::MakeAudioDecoder(config);
+ }
+ case IsacImpl::kFloat: {
+ webrtc::AudioDecoderIsacFloat::Config config;
+ config.sample_rate_hz = sample_rate_hz;
+ return webrtc::AudioDecoderIsacFloat::MakeAudioDecoder(config);
+ }
+ }
+}
+
+struct EncoderTestParams {
+ IsacImpl impl;
+ int sample_rate_hz;
+ int frame_size_ms;
+};
+
+class EncoderTest : public testing::TestWithParam<EncoderTestParams> {
+ protected:
+ EncoderTest() = default;
+ IsacImpl GetIsacImpl() const { return GetParam().impl; }
+ int GetSampleRateHz() const { return GetParam().sample_rate_hz; }
+ int GetFrameSizeMs() const { return GetParam().frame_size_ms; }
+};
+
+TEST_P(EncoderTest, TestConfig) {
+ for (int bitrate_bps : {10000, 21000, 32000}) {
+ SCOPED_TRACE(bitrate_bps);
+ auto encoder = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
+ GetFrameSizeMs(), bitrate_bps);
+ EXPECT_EQ(GetSampleRateHz(), encoder->SampleRateHz());
+ EXPECT_EQ(size_t{1}, encoder->NumChannels());
+ EXPECT_EQ(bitrate_bps, encoder->GetTargetBitrate());
+ }
+}
+
+// Encodes an input audio sequence with a low and a high target bitrate and
+// checks that the number of produces bytes in the first case is less than that
+// of the second case.
+TEST_P(EncoderTest, TestDifferentBitrates) {
+ auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
+ constexpr int kLowBps = 20000;
+ constexpr int kHighBps = 25000;
+ auto encoder_low = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
+ GetFrameSizeMs(), kLowBps);
+ auto encoder_high = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
+ GetFrameSizeMs(), kHighBps);
+ int num_bytes_low = 0;
+ int num_bytes_high = 0;
+ constexpr int kNumFrames = 12;
+ for (int i = 0; i < kNumFrames; ++i) {
+ AudioFrame in;
+ pcm_file->Read10MsData(in);
+ rtc::Buffer low, high;
+ encoder_low->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &low);
+ encoder_high->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &high);
+ num_bytes_low += low.size();
+ num_bytes_high += high.size();
+ }
+ EXPECT_LT(num_bytes_low, num_bytes_high);
+}
+
+// Encodes an input audio sequence first with a low, then with a high target
+// bitrate *using the same encoder* and checks that the number of emitted bytes
+// in the first case is less than in the second case.
+TEST_P(EncoderTest, TestDynamicBitrateChange) {
+ constexpr int kLowBps = 20000;
+ constexpr int kHighBps = 25000;
+ constexpr int kStartBps = 30000;
+ auto encoder = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
+ GetFrameSizeMs(), kStartBps);
+ std::map<int, int> num_bytes;
+ constexpr int kNumFrames = 200; // 2 seconds.
+ for (int bitrate_bps : {kLowBps, kHighBps}) {
+ auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
+ encoder->OnReceivedTargetAudioBitrate(bitrate_bps);
+ for (int i = 0; i < kNumFrames; ++i) {
+ AudioFrame in;
+ pcm_file->Read10MsData(in);
+ rtc::Buffer buf;
+ encoder->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &buf);
+ num_bytes[bitrate_bps] += buf.size();
+ }
+ }
+ // kHighBps / kLowBps == 1.25, so require the high-bitrate run to produce at
+ // least 1.195 times the number of bytes.
+ EXPECT_LT(1.195 * num_bytes[kLowBps], num_bytes[kHighBps]);
+}
+
+// Checks that, given a target bitrate, the encoder does not overshoot too much.
+TEST_P(EncoderTest, DoNotOvershootTargetBitrate) {
+ for (int bitrate_bps : {10000, 15000, 20000, 26000, 32000}) {
+ SCOPED_TRACE(bitrate_bps);
+ auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
+ auto e = CreateEncoder(GetIsacImpl(), GetSampleRateHz(), GetFrameSizeMs(),
+ bitrate_bps);
+ int num_bytes = 0;
+ constexpr int kNumFrames = 200; // 2 seconds.
+ for (int i = 0; i < kNumFrames; ++i) {
+ AudioFrame in;
+ pcm_file->Read10MsData(in);
+ rtc::Buffer encoded;
+ e->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
+ num_bytes += encoded.size();
+ }
+ // Inverse of the duration of `kNumFrames` 10 ms frames (unit: seconds^-1).
+ constexpr float kAudioDurationInv = 100.f / kNumFrames;
+ const int measured_bitrate_bps = 8 * num_bytes * kAudioDurationInv;
+ EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2250); // Max 2250 bps extra.
+ }
+}
+
+// Creates tests for different encoder configurations and implementations.
+INSTANTIATE_TEST_SUITE_P(
+ IsacApiTest,
+ EncoderTest,
+ ::testing::ValuesIn([] {
+ std::vector<EncoderTestParams> cases;
+ for (IsacImpl impl : {IsacImpl::kFloat, IsacImpl::kFixed}) {
+ for (int frame_size_ms : {30, 60}) {
+ cases.push_back({impl, 16000, frame_size_ms});
+ }
+ }
+ cases.push_back({IsacImpl::kFloat, 32000, 30});
+ return cases;
+ }()),
+ [](const ::testing::TestParamInfo<EncoderTestParams>& info) {
+ rtc::StringBuilder b;
+ const auto& p = info.param;
+ b << IsacImplToString(p.impl) << "_" << p.sample_rate_hz << "_"
+ << p.frame_size_ms;
+ return b.Release();
+ });
+
+struct DecoderTestParams {
+ IsacImpl impl;
+ int sample_rate_hz;
+};
+
+class DecoderTest : public testing::TestWithParam<DecoderTestParams> {
+ protected:
+ DecoderTest() = default;
+ IsacImpl GetIsacImpl() const { return GetParam().impl; }
+ int GetSampleRateHz() const { return GetParam().sample_rate_hz; }
+};
+
+TEST_P(DecoderTest, TestConfig) {
+ auto decoder = CreateDecoder(GetIsacImpl(), GetSampleRateHz());
+ EXPECT_EQ(GetSampleRateHz(), decoder->SampleRateHz());
+ EXPECT_EQ(size_t{1}, decoder->Channels());
+}
+
+// Creates tests for different decoder configurations and implementations.
+INSTANTIATE_TEST_SUITE_P(
+ IsacApiTest,
+ DecoderTest,
+ ::testing::ValuesIn({DecoderTestParams{IsacImpl::kFixed, 16000},
+ DecoderTestParams{IsacImpl::kFloat, 16000},
+ DecoderTestParams{IsacImpl::kFloat, 32000}}),
+ [](const ::testing::TestParamInfo<DecoderTestParams>& info) {
+ const auto& p = info.param;
+ return (rtc::StringBuilder()
+ << IsacImplToString(p.impl) << "_" << p.sample_rate_hz)
+ .Release();
+ });
+
+struct EncoderDecoderPairTestParams {
+ int sample_rate_hz;
+ int frame_size_ms;
+ IsacImpl encoder_impl;
+ IsacImpl decoder_impl;
+};
+
+class EncoderDecoderPairTest
+ : public testing::TestWithParam<EncoderDecoderPairTestParams> {
+ protected:
+ EncoderDecoderPairTest() = default;
+ int GetSampleRateHz() const { return GetParam().sample_rate_hz; }
+ int GetEncoderFrameSizeMs() const { return GetParam().frame_size_ms; }
+ IsacImpl GetEncoderIsacImpl() const { return GetParam().encoder_impl; }
+ IsacImpl GetDecoderIsacImpl() const { return GetParam().decoder_impl; }
+ int GetEncoderFrameSize() const {
+ return GetEncoderFrameSizeMs() * GetSampleRateHz() / 1000;
+ }
+};
+
+// Checks that the number of encoded and decoded samples match.
+TEST_P(EncoderDecoderPairTest, EncodeDecode) {
+ auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
+ auto encoder = CreateEncoder(GetEncoderIsacImpl(), GetSampleRateHz(),
+ GetEncoderFrameSizeMs(), /*bitrate_bps=*/20000);
+ auto decoder = CreateDecoder(GetDecoderIsacImpl(), GetSampleRateHz());
+ const int encoder_frame_size = GetEncoderFrameSize();
+ std::vector<int16_t> out(encoder_frame_size);
+ size_t num_encoded_samples = 0;
+ size_t num_decoded_samples = 0;
+ constexpr int kNumFrames = 12;
+ for (int i = 0; i < kNumFrames; ++i) {
+ AudioFrame in;
+ pcm_file->Read10MsData(in);
+ rtc::Buffer encoded;
+ encoder->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
+ num_encoded_samples += in.samples_per_channel();
+ if (encoded.empty()) {
+ continue;
+ }
+ // Decode.
+ const std::vector<AudioDecoder::ParseResult> parse_result =
+ decoder->ParsePayload(std::move(encoded), /*timestamp=*/0);
+ EXPECT_EQ(parse_result.size(), size_t{1});
+ auto decode_result = parse_result[0].frame->Decode(out);
+ EXPECT_TRUE(decode_result.has_value());
+ EXPECT_EQ(out.size(), decode_result->num_decoded_samples);
+ num_decoded_samples += decode_result->num_decoded_samples;
+ }
+ EXPECT_EQ(num_encoded_samples, num_decoded_samples);
+}
+
+// Creates tests for different encoder frame sizes and different
+// encoder/decoder implementations.
+INSTANTIATE_TEST_SUITE_P(
+ IsacApiTest,
+ EncoderDecoderPairTest,
+ ::testing::ValuesIn([] {
+ std::vector<EncoderDecoderPairTestParams> cases;
+ for (int frame_size_ms : {30, 60}) {
+ for (IsacImpl enc : {IsacImpl::kFloat, IsacImpl::kFixed}) {
+ for (IsacImpl dec : {IsacImpl::kFloat, IsacImpl::kFixed}) {
+ cases.push_back({16000, frame_size_ms, enc, dec});
+ }
+ }
+ }
+ cases.push_back({32000, 30, IsacImpl::kFloat, IsacImpl::kFloat});
+ return cases;
+ }()),
+ [](const ::testing::TestParamInfo<EncoderDecoderPairTestParams>& info) {
+ rtc::StringBuilder b;
+ const auto& p = info.param;
+ b << p.sample_rate_hz << "_" << p.frame_size_ms << "_"
+ << IsacImplToString(p.encoder_impl) << "_"
+ << IsacImplToString(p.decoder_impl);
+ return b.Release();
+ });
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h
new file mode 100644
index 0000000000..fae2f3d4a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
+
+namespace webrtc {
+
+using AudioDecoderIsacFloatImpl = AudioDecoderIsacT<IsacFloat>;
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h
new file mode 100644
index 0000000000..dc32bcdde6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
+
+namespace webrtc {
+
+using AudioEncoderIsacFloatImpl = AudioEncoderIsacT<IsacFloat>;
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/isac.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/isac.h
new file mode 100644
index 0000000000..3b05a8bcda
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/include/isac.h
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+
+typedef struct WebRtcISACStruct ISACStruct;
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/******************************************************************************
+ * WebRtcIsac_Create(...)
+ *
+ * This function creates an ISAC instance, which will contain the state
+ * information for one coding/decoding channel.
+ *
+ * Input:
+ * - *ISAC_main_inst : a pointer to the coder instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsac_Create(ISACStruct** ISAC_main_inst);
+
+/******************************************************************************
+ * WebRtcIsac_Free(...)
+ *
+ * This function frees the ISAC instance created at the beginning.
+ *
+ * Input:
+ * - ISAC_main_inst : an ISAC instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsac_Free(ISACStruct* ISAC_main_inst);
+
+/******************************************************************************
+ * WebRtcIsac_EncoderInit(...)
+ *
+ * This function initializes an ISAC instance prior to the encoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - CodingMode : 0 -> Bit rate and frame length are
+ * automatically adjusted to available bandwidth
+ * on transmission channel, just valid if codec
+ * is created to work in wideband mode.
+ * 1 -> User sets a frame length and a target bit
+ * rate which is taken as the maximum
+ * short-term average bit rate.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsac_EncoderInit(ISACStruct* ISAC_main_inst, int16_t CodingMode);
+
+/******************************************************************************
+ * WebRtcIsac_Encode(...)
+ *
+ * This function encodes 10ms audio blocks and inserts it into a package.
+ * Input speech length has 160 samples if operating at 16 kHz sampling
+ * rate, or 320 if operating at 32 kHz sampling rate. The encoder buffers the
+ * input audio until the whole frame is buffered then proceeds with encoding.
+ *
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - speechIn : input speech vector.
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ * Return value:
+ * : >0 - Length (in bytes) of coded data
+ * : 0 - The buffer didn't reach the chosen
+ * frame-size so it keeps buffering speech
+ * samples.
+ * : -1 - Error
+ */
+
+int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
+ const int16_t* speechIn,
+ uint8_t* encoded);
+
+/******************************************************************************
+ * WebRtcIsac_DecoderInit(...)
+ *
+ * This function initializes an ISAC instance prior to the decoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ */
+
+void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst);
+
+/******************************************************************************
+ * WebRtcIsac_UpdateBwEstimate(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - packet_size : size of the packet.
+ * - rtp_seq_number : the RTP number of the packet.
+ * - send_ts : the RTP send timestamp, given in samples
+ * - arr_ts : the arrival time of the packet (from NetEq)
+ * in samples.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts);
+
+/******************************************************************************
+ * WebRtcIsac_Decode(...)
+ *
+ * This function decodes an ISAC frame. At 16 kHz sampling rate, the length
+ * of the output audio could be either 480 or 960 samples, equivalent to
+ * 30 or 60 ms respectively. At 32 kHz sampling rate, the length of the
+ * output audio is 960 samples, which is 30 ms.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - len : bytes in encoded vector.
+ *
+ * Output:
+ * - decoded : The decoded vector.
+ *
+ * Return value : >0 - number of samples in decoded vector.
+ * -1 - Error.
+ */
+
+int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/******************************************************************************
+ * WebRtcIsac_DecodePlc(...)
+ *
+ * This function conducts PLC for ISAC frame(s). Output speech length
+ * will be a multiple of frames, i.e. multiples of 30 ms audio. Therefore,
+ * the output is multiple of 480 samples if operating at 16 kHz and multiple
+ * of 960 if operating at 32 kHz.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - noOfLostFrames : Number of PLC frames to produce.
+ *
+ * Output:
+ * - decoded : The decoded vector.
+ *
+ * Return value : Number of samples in decoded PLC vector
+ */
+
+size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames);
+
+/******************************************************************************
+ * WebRtcIsac_Control(...)
+ *
+ * This function sets the limit on the short-term average bit-rate and the
+ * frame length. Should be used only in Instantaneous mode. At 16 kHz sampling
+ * rate, an average bit-rate between 10000 to 32000 bps is valid and a
+ * frame-size of 30 or 60 ms is acceptable. At 32 kHz, an average bit-rate
+ * between 10000 to 56000 is acceptable, and the valid frame-size is 30 ms.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rate : limit on the short-term average bit rate,
+ * in bits/second.
+ * - framesize : frame-size in millisecond.
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsac_Control(ISACStruct* ISAC_main_inst,
+ int32_t rate,
+ int framesize);
+
+void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second);
+
+/******************************************************************************
+ * WebRtcIsac_ControlBwe(...)
+ *
+ * This function sets the initial values of bottleneck and frame-size if
+ * iSAC is used in channel-adaptive mode. Therefore, this API is not
+ * applicable if the codec is created to operate in super-wideband mode.
+ *
+ * Through this API, users can enforce a frame-size for all values of
+ * bottleneck. Then iSAC will not automatically change the frame-size.
+ *
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rateBPS : initial value of bottleneck in bits/second
+ * 10000 <= rateBPS <= 56000 is accepted
+ * For default bottleneck set rateBPS = 0
+ * - frameSizeMs : number of milliseconds per frame (30 or 60)
+ * - enforceFrameSize : 1 to enforce the given frame-size through
+ * out the adaptation process, 0 to let iSAC
+ * change the frame-size if required.
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+
+int16_t WebRtcIsac_ControlBwe(ISACStruct* ISAC_main_inst,
+ int32_t rateBPS,
+ int frameSizeMs,
+ int16_t enforceFrameSize);
+
+/******************************************************************************
+ * WebRtcIsac_ReadFrameLen(...)
+ *
+ * This function returns the length of the frame represented in the packet.
+ *
+ * Input:
+ * - encoded : Encoded bit-stream
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ *
+ */
+
+int16_t WebRtcIsac_ReadFrameLen(const ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ int16_t* frameLength);
+
+/******************************************************************************
+ * WebRtcIsac_version(...)
+ *
+ * This function returns the version number.
+ *
+ * Output:
+ * - version : Pointer to character string
+ *
+ */
+
+void WebRtcIsac_version(char* version);
+
+/******************************************************************************
+ * WebRtcIsac_GetErrorCode(...)
+ *
+ * This function can be used to check the error code of an iSAC instance. When
+ * a function returns -1 a error code will be set for that instance. The
+ * function below extract the code of the last error that occurred in the
+ * specified instance.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance
+ *
+ * Return value : Error code
+ */
+
+int16_t WebRtcIsac_GetErrorCode(ISACStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsac_GetUplinkBw(...)
+ *
+ * This function outputs the target bottleneck of the codec. In
+ * channel-adaptive mode, the target bottleneck is specified through in-band
+ * signalling retreived by bandwidth estimator.
+ * In channel-independent, also called instantaneous mode, the target
+ * bottleneck is provided to the encoder by calling xxx_control(...). If
+ * xxx_control is never called the default values is returned. The default
+ * value for bottleneck at 16 kHz encoder sampling rate is 32000 bits/sec,
+ * and it is 56000 bits/sec for 32 kHz sampling rate.
+ * Note that the output is the iSAC internal operating bottleneck which might
+ * differ slightly from the one provided through xxx_control().
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Output:
+ * - *bottleneck : bottleneck in bits/sec
+ *
+ * Return value : -1 if error happens
+ * 0 bit-rates computed correctly.
+ */
+
+int16_t WebRtcIsac_GetUplinkBw(ISACStruct* ISAC_main_inst, int32_t* bottleneck);
+
+/******************************************************************************
+ * WebRtcIsac_SetMaxPayloadSize(...)
+ *
+ * This function sets a limit for the maximum payload size of iSAC. The same
+ * value is used both for 30 and 60 ms packets. If the encoder sampling rate
+ * is 16 kHz the maximum payload size is between 120 and 400 bytes. If the
+ * encoder sampling rate is 32 kHz the maximum payload size is between 120
+ * and 600 bytes.
+ *
+ * If an out of range limit is used, the function returns -1, but the closest
+ * valid value will be applied.
+ *
+ * ---------------
+ * IMPORTANT NOTES
+ * ---------------
+ * The size of a packet is limited to the minimum of 'max-payload-size' and
+ * 'max-rate.' For instance, let's assume the max-payload-size is set to
+ * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+ * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+ * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+ * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+ * 170 bytes, i.e. min(170, 300).
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxPayloadBytes : maximum size of the payload in bytes
+ * valid values are between 120 and 400 bytes
+ * if encoder sampling rate is 16 kHz. For
+ * 32 kHz encoder sampling rate valid values
+ * are between 120 and 600 bytes.
+ *
+ * Return value : 0 if successful
+ * -1 if error happens
+ */
+
+int16_t WebRtcIsac_SetMaxPayloadSize(ISACStruct* ISAC_main_inst,
+ int16_t maxPayloadBytes);
+
+/******************************************************************************
+ * WebRtcIsac_SetMaxRate(...)
+ *
+ * This function sets the maximum rate which the codec may not exceed for
+ * any signal packet. The maximum rate is defined and payload-size per
+ * frame-size in bits per second.
+ *
+ * The codec has a maximum rate of 53400 bits per second (200 bytes per 30
+ * ms) if the encoder sampling rate is 16kHz, and 160 kbps (600 bytes/30 ms)
+ * if the encoder sampling rate is 32 kHz.
+ *
+ * It is possible to set a maximum rate between 32000 and 53400 bits/sec
+ * in wideband mode, and 32000 to 160000 bits/sec in super-wideband mode.
+ *
+ * If an out of range limit is used, the function returns -1, but the closest
+ * valid value will be applied.
+ *
+ * ---------------
+ * IMPORTANT NOTES
+ * ---------------
+ * The size of a packet is limited to the minimum of 'max-payload-size' and
+ * 'max-rate.' For instance, let's assume the max-payload-size is set to
+ * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+ * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+ * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+ * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+ * 170 bytes, min(170, 300).
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxRate : maximum rate in bits per second,
+ * valid values are 32000 to 53400 bits/sec in
+ * wideband mode, and 32000 to 160000 bits/sec in
+ * super-wideband mode.
+ *
+ * Return value : 0 if successful
+ * -1 if error happens
+ */
+
+int16_t WebRtcIsac_SetMaxRate(ISACStruct* ISAC_main_inst, int32_t maxRate);
+
+/******************************************************************************
+ * WebRtcIsac_DecSampRate()
+ * Return the sampling rate of the decoded audio.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : sampling frequency in Hertz.
+ *
+ */
+
+uint16_t WebRtcIsac_DecSampRate(ISACStruct* ISAC_main_inst);
+
+/******************************************************************************
+ * WebRtcIsac_EncSampRate()
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : sampling rate in Hertz.
+ *
+ */
+
+uint16_t WebRtcIsac_EncSampRate(ISACStruct* ISAC_main_inst);
+
+/******************************************************************************
+ * WebRtcIsac_SetDecSampRate()
+ * Set the sampling rate of the decoder. Initialization of the decoder WILL
+ * NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
+ * which is set when the instance is created.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - sampRate : sampling rate in Hertz.
+ *
+ * Return value : 0 if successful
+ * -1 if failed.
+ */
+
+int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
+ uint16_t samp_rate_hz);
+
+/******************************************************************************
+ * WebRtcIsac_SetEncSampRate()
+ * Set the sampling rate of the encoder. Initialization of the encoder WILL
+ * NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
+ * which is set when the instance is created. The encoding-mode and the
+ * bottleneck remain unchanged by this call, however, the maximum rate and
+ * maximum payload-size will reset to their default value.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - sampRate : sampling rate in Hertz.
+ *
+ * Return value : 0 if successful
+ * -1 if failed.
+ */
+
+int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
+ uint16_t sample_rate_hz);
+
+/******************************************************************************
+ * WebRtcIsac_GetNewBitStream(...)
+ *
+ * This function returns encoded data, with the received bwe-index in the
+ * stream. If the rate is set to a value less than bottleneck of codec
+ * the new bistream will be re-encoded with the given target rate.
+ * It should always return a complete packet, i.e. only called once
+ * even for 60 msec frames.
+ *
+ * NOTE 1! This function does not write in the ISACStruct, it is not allowed.
+ * NOTE 2! Currently not implemented for SWB mode.
+ * NOTE 3! Rates larger than the bottleneck of the codec will be limited
+ * to the current bottleneck.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - bweIndex : Index of bandwidth estimate to put in new
+ * bitstream
+ * - rate : target rate of the transcoder is bits/sec.
+ * Valid values are the accepted rate in iSAC,
+ * i.e. 10000 to 56000.
+ * - isRCU : if the new bit-stream is an RCU
+ * stream. Note that the rate parameter always indicates the target rate of the
+ * main payload, regardless of 'isRCU' value.
+ *
+ * Output:
+ * - encoded : The encoded data vector
+ *
+ * Return value : >0 - Length (in bytes) of coded data
+ * -1 - Error or called in SWB mode
+ * NOTE! No error code is written to
+ * the struct since it is only allowed to read
+ * the struct.
+ */
+int16_t WebRtcIsac_GetNewBitStream(ISACStruct* ISAC_main_inst,
+ int16_t bweIndex,
+ int16_t jitterInfo,
+ int32_t rate,
+ uint8_t* encoded,
+ int16_t isRCU);
+
+/****************************************************************************
+ * WebRtcIsac_GetDownLinkBwIndex(...)
+ *
+ * This function returns index representing the Bandwidth estimate from
+ * other side to this side.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ *
+ * Output:
+ * - bweIndex : Bandwidth estimate to transmit to other side.
+ *
+ */
+
+int16_t WebRtcIsac_GetDownLinkBwIndex(ISACStruct* ISAC_main_inst,
+ int16_t* bweIndex,
+ int16_t* jitterInfo);
+
+/****************************************************************************
+ * WebRtcIsac_UpdateUplinkBw(...)
+ *
+ * This function takes an index representing the Bandwidth estimate from
+ * this side to other side and updates BWE.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ * - bweIndex : Bandwidth estimate from other side.
+ *
+ */
+
+int16_t WebRtcIsac_UpdateUplinkBw(ISACStruct* ISAC_main_inst, int16_t bweIndex);
+
+/****************************************************************************
+ * WebRtcIsac_ReadBwIndex(...)
+ *
+ * This function returns the index of the Bandwidth estimate from the bitstream.
+ *
+ * Input:
+ * - encoded : Encoded bitstream
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ * - bweIndex : Bandwidth estimate in bitstream
+ *
+ */
+
+int16_t WebRtcIsac_ReadBwIndex(const uint8_t* encoded, int16_t* bweIndex);
+
+/*******************************************************************************
+ * WebRtcIsac_GetNewFrameLen(...)
+ *
+ * returns the frame lenght (in samples) of the next packet. In the case of
+ * channel-adaptive mode, iSAC decides on its frame lenght based on the
+ * estimated bottleneck this allows a user to prepare for the next packet (at
+ * the encoder)
+ *
+ * The primary usage is in CE to make the iSAC works in channel-adaptive mode
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ *
+ * Return Value : frame lenght in samples
+ *
+ */
+
+int16_t WebRtcIsac_GetNewFrameLen(ISACStruct* ISAC_main_inst);
+
+/****************************************************************************
+ * WebRtcIsac_GetRedPayload(...)
+ *
+ * Populates "encoded" with the redundant payload of the recently encoded
+ * frame. This function has to be called once that WebRtcIsac_Encode(...)
+ * returns a positive value. Regardless of the frame-size this function will
+ * be called only once after encoding is completed.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ *
+ * Return value:
+ * : >0 - Length (in bytes) of coded data
+ * : -1 - Error
+ *
+ *
+ */
+int16_t WebRtcIsac_GetRedPayload(ISACStruct* ISAC_main_inst, uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcIsac_DecodeRcu(...)
+ *
+ * This function decodes a redundant (RCU) iSAC frame. Function is called in
+ * NetEq with a stored RCU payload i case of packet loss. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC RCU frame(s)
+ * - len : bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : >0 - number of samples in decoded vector
+ * -1 - Error
+ */
+int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
+
+/* If `inst` is a decoder but not an encoder: tell it what sample rate the
+ encoder is using, for bandwidth estimation purposes. */
+void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.c
new file mode 100644
index 0000000000..9d5c6930b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+
+/*
+ * terminate and return byte stream;
+ * returns the number of bytes in the stream
+ */
+int WebRtcIsac_EncTerminate(Bitstr *streamdata) /* in-/output struct containing bitstream */
+{
+ uint8_t *stream_ptr;
+
+
+ /* point to the right place in the stream buffer */
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+
+ /* find minimum length (determined by current interval width) */
+ if ( streamdata->W_upper > 0x01FFFFFF )
+ {
+ streamdata->streamval += 0x01000000;
+ /* add carry to buffer */
+ if (streamdata->streamval < 0x01000000)
+ {
+ /* propagate carry */
+ while ( !(++(*--stream_ptr)) );
+ /* put pointer back to the old value */
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ }
+ /* write remaining data to bitstream */
+ *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+ }
+ else
+ {
+ streamdata->streamval += 0x00010000;
+ /* add carry to buffer */
+ if (streamdata->streamval < 0x00010000)
+ {
+ /* propagate carry */
+ while ( !(++(*--stream_ptr)) );
+ /* put pointer back to the old value */
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ }
+ /* write remaining data to bitstream */
+ *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+ *stream_ptr++ = (uint8_t) ((streamdata->streamval >> 16) & 0x00FF);
+ }
+
+ /* calculate stream length */
+ return (int)(stream_ptr - streamdata->stream);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.h
new file mode 100644
index 0000000000..3f9f6de7bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routines.h
+ *
+ * Functions for arithmetic coding.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+int WebRtcIsac_EncLogisticMulti2(
+ Bitstr* streamdata, /* in-/output struct containing bitstream */
+ int16_t* dataQ7, /* input: data vector */
+ const uint16_t*
+ env, /* input: side info vector defining the width of the pdf */
+ int N, /* input: data vector length */
+ int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
+
+/* returns the number of bytes in the stream */
+int WebRtcIsac_EncTerminate(
+ Bitstr* streamdata); /* in-/output struct containing bitstream */
+
+/* returns the number of bytes in the stream so far */
+int WebRtcIsac_DecLogisticMulti2(
+ int16_t* data, /* output: data vector */
+ Bitstr* streamdata, /* in-/output struct containing bitstream */
+ const uint16_t*
+ env, /* input: side info vector defining the width of the pdf */
+ const int16_t* dither, /* input: dither vector */
+ int N, /* input: data vector length */
+ int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
+
+void WebRtcIsac_EncHistMulti(
+ Bitstr* streamdata, /* in-/output struct containing bitstream */
+ const int* data, /* input: data vector */
+ const uint16_t* const* cdf, /* input: array of cdf arrays */
+ int N); /* input: data vector length */
+
+int WebRtcIsac_DecHistBisectMulti(
+ int* data, /* output: data vector */
+ Bitstr* streamdata, /* in-/output struct containing bitstream */
+ const uint16_t* const* cdf, /* input: array of cdf arrays */
+ const uint16_t*
+ cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
+ int N); /* input: data vector length */
+
+int WebRtcIsac_DecHistOneStepMulti(
+ int* data, /* output: data vector */
+ Bitstr* streamdata, /* in-/output struct containing bitstream */
+ const uint16_t* const* cdf, /* input: array of cdf arrays */
+ const uint16_t*
+ init_index, /* input: vector of initial cdf table search entries */
+ int N); /* input: data vector length */
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c
new file mode 100644
index 0000000000..e948979fd7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+
+
+/*
+ * code symbols into arithmetic bytestream
+ */
+void WebRtcIsac_EncHistMulti(Bitstr *streamdata, /* in-/output struct containing bitstream */
+ const int *data, /* input: data vector */
+ const uint16_t *const *cdf, /* input: array of cdf arrays */
+ const int N) /* input: data vector length */
+{
+ uint32_t W_lower, W_upper;
+ uint32_t W_upper_LSB, W_upper_MSB;
+ uint8_t *stream_ptr;
+ uint8_t *stream_ptr_carry;
+ uint32_t cdf_lo, cdf_hi;
+ int k;
+
+
+ /* point to beginning of stream buffer */
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ W_upper = streamdata->W_upper;
+
+ for (k=N; k>0; k--)
+ {
+ /* fetch cdf_lower and cdf_upper from cdf tables */
+ cdf_lo = (uint32_t) *(*cdf + *data);
+ cdf_hi = (uint32_t) *(*cdf++ + *data++ + 1);
+
+ /* update interval */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+ W_lower = W_upper_MSB * cdf_lo;
+ W_lower += (W_upper_LSB * cdf_lo) >> 16;
+ W_upper = W_upper_MSB * cdf_hi;
+ W_upper += (W_upper_LSB * cdf_hi) >> 16;
+
+ /* shift interval such that it begins at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamdata->streamval += W_lower;
+
+ /* handle carry */
+ if (streamdata->streamval < W_lower)
+ {
+ /* propagate carry */
+ stream_ptr_carry = stream_ptr;
+ while (!(++(*--stream_ptr_carry)));
+ }
+
+ /* renormalize interval, store most significant byte of streamval and update streamval */
+ while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
+ {
+ W_upper <<= 8;
+ *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+ streamdata->streamval <<= 8;
+ }
+ }
+
+ /* calculate new stream_index */
+ streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+ streamdata->W_upper = W_upper;
+
+ return;
+}
+
+
+
+/*
+ * function to decode more symbols from the arithmetic bytestream, using method of bisection
+ * cdf tables should be of size 2^k-1 (which corresponds to an alphabet size of 2^k-2)
+ */
+int WebRtcIsac_DecHistBisectMulti(int *data, /* output: data vector */
+ Bitstr *streamdata, /* in-/output struct containing bitstream */
+ const uint16_t *const *cdf, /* input: array of cdf arrays */
+ const uint16_t *cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
+ const int N) /* input: data vector length */
+{
+ uint32_t W_lower, W_upper;
+ uint32_t W_tmp;
+ uint32_t W_upper_LSB, W_upper_MSB;
+ uint32_t streamval;
+ const uint8_t *stream_ptr;
+ const uint16_t *cdf_ptr;
+ int size_tmp;
+ int k;
+
+ W_lower = 0; //to remove warning -DH
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ W_upper = streamdata->W_upper;
+ if (W_upper == 0)
+ /* Should not be possible in normal operation */
+ return -2;
+
+ if (streamdata->stream_index == 0) /* first time decoder is called for this stream */
+ {
+ /* read first word from bytestream */
+ streamval = *stream_ptr << 24;
+ streamval |= *++stream_ptr << 16;
+ streamval |= *++stream_ptr << 8;
+ streamval |= *++stream_ptr;
+ } else {
+ streamval = streamdata->streamval;
+ }
+
+ for (k=N; k>0; k--)
+ {
+ /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+
+ /* start halfway the cdf range */
+ size_tmp = *cdf_size++ >> 1;
+ cdf_ptr = *cdf + (size_tmp - 1);
+
+ /* method of bisection */
+ for ( ;; )
+ {
+ W_tmp = W_upper_MSB * *cdf_ptr;
+ W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+ size_tmp >>= 1;
+ if (size_tmp == 0) break;
+ if (streamval > W_tmp)
+ {
+ W_lower = W_tmp;
+ cdf_ptr += size_tmp;
+ } else {
+ W_upper = W_tmp;
+ cdf_ptr -= size_tmp;
+ }
+ }
+ if (streamval > W_tmp)
+ {
+ W_lower = W_tmp;
+ *data++ = (int)(cdf_ptr - *cdf++);
+ } else {
+ W_upper = W_tmp;
+ *data++ = (int)(cdf_ptr - *cdf++ - 1);
+ }
+
+ /* shift interval to start at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamval -= W_lower;
+
+ /* renormalize interval and update streamval */
+ while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
+ {
+ /* read next byte from stream */
+ streamval = (streamval << 8) | *++stream_ptr;
+ W_upper <<= 8;
+ }
+
+ if (W_upper == 0)
+ /* Should not be possible in normal operation */
+ return -2;
+
+
+ }
+
+ streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+ streamdata->W_upper = W_upper;
+ streamdata->streamval = streamval;
+
+
+ /* find number of bytes in original stream (determined by current interval width) */
+ if ( W_upper > 0x01FFFFFF )
+ return streamdata->stream_index - 2;
+ else
+ return streamdata->stream_index - 1;
+}
+
+
+
+/*
+ * function to decode more symbols from the arithmetic bytestream, taking single step up or
+ * down at a time
+ * cdf tables can be of arbitrary size, but large tables may take a lot of iterations
+ */
+int WebRtcIsac_DecHistOneStepMulti(int *data, /* output: data vector */
+ Bitstr *streamdata, /* in-/output struct containing bitstream */
+ const uint16_t *const *cdf, /* input: array of cdf arrays */
+ const uint16_t *init_index, /* input: vector of initial cdf table search entries */
+ const int N) /* input: data vector length */
+{
+ uint32_t W_lower, W_upper;
+ uint32_t W_tmp;
+ uint32_t W_upper_LSB, W_upper_MSB;
+ uint32_t streamval;
+ const uint8_t *stream_ptr;
+ const uint16_t *cdf_ptr;
+ int k;
+
+
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ W_upper = streamdata->W_upper;
+ if (W_upper == 0)
+ /* Should not be possible in normal operation */
+ return -2;
+
+ if (streamdata->stream_index == 0) /* first time decoder is called for this stream */
+ {
+ /* read first word from bytestream */
+ streamval = (uint32_t)(*stream_ptr) << 24;
+ streamval |= (uint32_t)(*++stream_ptr) << 16;
+ streamval |= (uint32_t)(*++stream_ptr) << 8;
+ streamval |= (uint32_t)(*++stream_ptr);
+ } else {
+ streamval = streamdata->streamval;
+ }
+
+
+ for (k=N; k>0; k--)
+ {
+ /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+
+ /* start at the specified table entry */
+ cdf_ptr = *cdf + (*init_index++);
+ W_tmp = W_upper_MSB * *cdf_ptr;
+ W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+ if (streamval > W_tmp)
+ {
+ for ( ;; )
+ {
+ W_lower = W_tmp;
+ if (cdf_ptr[0]==65535)
+ /* range check */
+ return -3;
+ W_tmp = W_upper_MSB * *++cdf_ptr;
+ W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+ if (streamval <= W_tmp) break;
+ }
+ W_upper = W_tmp;
+ *data++ = (int)(cdf_ptr - *cdf++ - 1);
+ } else {
+ for ( ;; )
+ {
+ W_upper = W_tmp;
+ --cdf_ptr;
+ if (cdf_ptr<*cdf) {
+ /* range check */
+ return -3;
+ }
+ W_tmp = W_upper_MSB * *cdf_ptr;
+ W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+ if (streamval > W_tmp) break;
+ }
+ W_lower = W_tmp;
+ *data++ = (int)(cdf_ptr - *cdf++);
+ }
+
+ /* shift interval to start at zero */
+ W_upper -= ++W_lower;
+ /* add integer to bitstream */
+ streamval -= W_lower;
+
+ /* renormalize interval and update streamval */
+ while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
+ {
+ /* read next byte from stream */
+ streamval = (streamval << 8) | *++stream_ptr;
+ W_upper <<= 8;
+ }
+ }
+
+ streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+ streamdata->W_upper = W_upper;
+ streamdata->streamval = streamval;
+
+
+ /* find number of bytes in original stream (determined by current interval width) */
+ if ( W_upper > 0x01FFFFFF )
+ return streamdata->stream_index - 2;
+ else
+ return streamdata->stream_index - 1;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c
new file mode 100644
index 0000000000..777780f54f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routines.h
+ *
+ * This file contains functions for arithmatically encoding and
+ * decoding DFT coefficients.
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+
+
+
+static const int32_t kHistEdgesQ15[51] = {
+ -327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716,
+ -196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858, -91751, -78644,
+ -65536, -52429, -39322, -26215, -13108, 0, 13107, 26214, 39321, 52428,
+ 65536, 78643, 91750, 104857, 117964, 131072, 144179, 157286, 170393, 183500,
+ 196608, 209715, 222822, 235929, 249036, 262144, 275251, 288358, 301465, 314572,
+ 327680};
+
+
+static const int kCdfSlopeQ0[51] = { /* Q0 */
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 13, 23, 47, 87, 154, 315, 700, 1088,
+ 2471, 6064, 14221, 21463, 36634, 36924, 19750, 13270, 5806, 2312,
+ 1095, 660, 316, 145, 86, 41, 32, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 0};
+
+
+static const int kCdfQ16[51] = { /* Q16 */
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
+ 20, 22, 24, 29, 38, 57, 92, 153, 279, 559,
+ 994, 1983, 4408, 10097, 18682, 33336, 48105, 56005, 61313, 63636,
+ 64560, 64998, 65262, 65389, 65447, 65481, 65497, 65510, 65512, 65514,
+ 65516, 65518, 65520, 65522, 65524, 65526, 65528, 65530, 65532, 65534,
+ 65535};
+
+
+
+/* function to be converted to fixed point */
+static __inline uint32_t piecewise(int32_t xinQ15) {
+
+ int32_t ind, qtmp1, qtmp2, qtmp3;
+ uint32_t tmpUW32;
+
+
+ qtmp2 = xinQ15;
+
+ if (qtmp2 < kHistEdgesQ15[0]) {
+ qtmp2 = kHistEdgesQ15[0];
+ }
+ if (qtmp2 > kHistEdgesQ15[50]) {
+ qtmp2 = kHistEdgesQ15[50];
+ }
+
+ qtmp1 = qtmp2 - kHistEdgesQ15[0]; /* Q15 - Q15 = Q15 */
+ ind = (qtmp1 * 5) >> 16; /* 2^16 / 5 = 0.4 in Q15 */
+ /* Q15 -> Q0 */
+ qtmp1 = qtmp2 - kHistEdgesQ15[ind]; /* Q15 - Q15 = Q15 */
+ qtmp2 = kCdfSlopeQ0[ind] * qtmp1; /* Q0 * Q15 = Q15 */
+ qtmp3 = qtmp2>>15; /* Q15 -> Q0 */
+
+ tmpUW32 = kCdfQ16[ind] + qtmp3; /* Q0 + Q0 = Q0 */
+ return tmpUW32;
+}
+
+
+
+int WebRtcIsac_EncLogisticMulti2(
+ Bitstr *streamdata, /* in-/output struct containing bitstream */
+ int16_t *dataQ7, /* input: data vector */
+ const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */
+ const int N, /* input: data vector length / 2 */
+ const int16_t isSWB12kHz)
+{
+ uint32_t W_lower, W_upper;
+ uint32_t W_upper_LSB, W_upper_MSB;
+ uint8_t *stream_ptr;
+ uint8_t *maxStreamPtr;
+ uint8_t *stream_ptr_carry;
+ uint32_t cdf_lo, cdf_hi;
+ int k;
+
+ /* point to beginning of stream buffer */
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ W_upper = streamdata->W_upper;
+
+ maxStreamPtr = streamdata->stream + STREAM_SIZE_MAX_60 - 1;
+ for (k = 0; k < N; k++)
+ {
+ /* compute cdf_lower and cdf_upper by evaluating the piecewise linear cdf */
+ cdf_lo = piecewise((*dataQ7 - 64) * *envQ8);
+ cdf_hi = piecewise((*dataQ7 + 64) * *envQ8);
+
+ /* test and clip if probability gets too small */
+ while (cdf_lo+1 >= cdf_hi) {
+ /* clip */
+ if (*dataQ7 > 0) {
+ *dataQ7 -= 128;
+ cdf_hi = cdf_lo;
+ cdf_lo = piecewise((*dataQ7 - 64) * *envQ8);
+ } else {
+ *dataQ7 += 128;
+ cdf_lo = cdf_hi;
+ cdf_hi = piecewise((*dataQ7 + 64) * *envQ8);
+ }
+ }
+
+ dataQ7++;
+ // increment only once per 4 iterations for SWB-16kHz or WB
+ // increment only once per 2 iterations for SWB-12kHz
+ envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1));
+
+
+ /* update interval */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+ W_lower = W_upper_MSB * cdf_lo;
+ W_lower += (W_upper_LSB * cdf_lo) >> 16;
+ W_upper = W_upper_MSB * cdf_hi;
+ W_upper += (W_upper_LSB * cdf_hi) >> 16;
+
+ /* shift interval such that it begins at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamdata->streamval += W_lower;
+
+ /* handle carry */
+ if (streamdata->streamval < W_lower)
+ {
+ /* propagate carry */
+ stream_ptr_carry = stream_ptr;
+ while (!(++(*--stream_ptr_carry)));
+ }
+
+ /* renormalize interval, store most significant byte of streamval and update streamval */
+ while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
+ {
+ W_upper <<= 8;
+ *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+
+ if(stream_ptr > maxStreamPtr)
+ {
+ return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
+ }
+ streamdata->streamval <<= 8;
+ }
+ }
+
+ /* calculate new stream_index */
+ streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+ streamdata->W_upper = W_upper;
+
+ return 0;
+}
+
+
+
+int WebRtcIsac_DecLogisticMulti2(
+ int16_t *dataQ7, /* output: data vector */
+ Bitstr *streamdata, /* in-/output struct containing bitstream */
+ const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */
+ const int16_t *ditherQ7,/* input: dither vector */
+ const int N, /* input: data vector length */
+ const int16_t isSWB12kHz)
+{
+ uint32_t W_lower, W_upper;
+ uint32_t W_tmp;
+ uint32_t W_upper_LSB, W_upper_MSB;
+ uint32_t streamval;
+ const uint8_t *stream_ptr;
+ uint32_t cdf_tmp;
+ int16_t candQ7;
+ int k;
+
+ // Position just past the end of the stream. STREAM_SIZE_MAX_60 instead of
+ // STREAM_SIZE_MAX (which is the size of the allocated buffer) because that's
+ // the limit to how much data is filled in.
+ const uint8_t* const stream_end = streamdata->stream + STREAM_SIZE_MAX_60;
+
+ stream_ptr = streamdata->stream + streamdata->stream_index;
+ W_upper = streamdata->W_upper;
+ if (streamdata->stream_index == 0) /* first time decoder is called for this stream */
+ {
+ /* read first word from bytestream */
+ if (stream_ptr + 3 >= stream_end)
+ return -1; // Would read out of bounds. Malformed input?
+ streamval = *stream_ptr << 24;
+ streamval |= *++stream_ptr << 16;
+ streamval |= *++stream_ptr << 8;
+ streamval |= *++stream_ptr;
+ } else {
+ streamval = streamdata->streamval;
+ }
+
+
+ for (k = 0; k < N; k++)
+ {
+ /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+ W_upper_LSB = W_upper & 0x0000FFFF;
+ W_upper_MSB = W_upper >> 16;
+
+ /* find first candidate by inverting the logistic cdf */
+ candQ7 = - *ditherQ7 + 64;
+ cdf_tmp = piecewise(candQ7 * *envQ8);
+
+ W_tmp = W_upper_MSB * cdf_tmp;
+ W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+ if (streamval > W_tmp)
+ {
+ W_lower = W_tmp;
+ candQ7 += 128;
+ cdf_tmp = piecewise(candQ7 * *envQ8);
+
+ W_tmp = W_upper_MSB * cdf_tmp;
+ W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+ while (streamval > W_tmp)
+ {
+ W_lower = W_tmp;
+ candQ7 += 128;
+ cdf_tmp = piecewise(candQ7 * *envQ8);
+
+ W_tmp = W_upper_MSB * cdf_tmp;
+ W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+
+ /* error check */
+ if (W_lower == W_tmp) return -1;
+ }
+ W_upper = W_tmp;
+
+ /* another sample decoded */
+ *dataQ7 = candQ7 - 64;
+ }
+ else
+ {
+ W_upper = W_tmp;
+ candQ7 -= 128;
+ cdf_tmp = piecewise(candQ7 * *envQ8);
+
+ W_tmp = W_upper_MSB * cdf_tmp;
+ W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+ while ( !(streamval > W_tmp) )
+ {
+ W_upper = W_tmp;
+ candQ7 -= 128;
+ cdf_tmp = piecewise(candQ7 * *envQ8);
+
+ W_tmp = W_upper_MSB * cdf_tmp;
+ W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+
+ /* error check */
+ if (W_upper == W_tmp) return -1;
+ }
+ W_lower = W_tmp;
+
+ /* another sample decoded */
+ *dataQ7 = candQ7 + 64;
+ }
+ ditherQ7++;
+ dataQ7++;
+ // increment only once per 4 iterations for SWB-16kHz or WB
+ // increment only once per 2 iterations for SWB-12kHz
+ envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1));
+
+ /* shift interval to start at zero */
+ W_upper -= ++W_lower;
+
+ /* add integer to bitstream */
+ streamval -= W_lower;
+
+ /* renormalize interval and update streamval */
+ while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
+ {
+ /* read next byte from stream */
+ if (stream_ptr + 1 >= stream_end)
+ return -1; // Would read out of bounds. Malformed input?
+ streamval = (streamval << 8) | *++stream_ptr;
+ W_upper <<= 8;
+ }
+ }
+
+ streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+ streamdata->W_upper = W_upper;
+ streamdata->streamval = streamval;
+
+ /* find number of bytes in original stream (determined by current interval width) */
+ if ( W_upper > 0x01FFFFFF )
+ return streamdata->stream_index - 2;
+ else
+ return streamdata->stream_index - 1;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc
new file mode 100644
index 0000000000..b671002e1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioDecoderIsacT<IsacFloat>;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
new file mode 100644
index 0000000000..b7f2c0b1af
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioEncoderIsacT<IsacFloat>;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
new file mode 100644
index 0000000000..07bab055e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+
+#include <limits>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void TestBadConfig(const AudioEncoderIsacFloatImpl::Config& config) {
+ EXPECT_FALSE(config.IsOk());
+}
+
+void TestGoodConfig(const AudioEncoderIsacFloatImpl::Config& config) {
+ EXPECT_TRUE(config.IsOk());
+ AudioEncoderIsacFloatImpl aei(config);
+}
+
+// Wrap subroutine calls that test things in this, so that the error messages
+// will be accompanied by stack traces that make it possible to tell which
+// subroutine invocation caused the failure.
+#define S(x) \
+ do { \
+ SCOPED_TRACE(#x); \
+ x; \
+ } while (0)
+
+} // namespace
+
+TEST(AudioEncoderIsacTest, TestConfigBitrate) {
+ AudioEncoderIsacFloatImpl::Config config;
+
+ // The default value is some real, positive value.
+ EXPECT_GT(config.bit_rate, 1);
+ S(TestGoodConfig(config));
+
+ // 0 is another way to ask for the default value.
+ config.bit_rate = 0;
+ S(TestGoodConfig(config));
+
+ // Try some unreasonable values and watch them fail.
+ config.bit_rate = -1;
+ S(TestBadConfig(config));
+ config.bit_rate = 1;
+ S(TestBadConfig(config));
+ config.bit_rate = std::numeric_limits<int>::max();
+ S(TestBadConfig(config));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
new file mode 100644
index 0000000000..486cd95914
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * BwEstimator.c
+ *
+ * This file contains the code for the Bandwidth Estimator designed
+ * for iSAC.
+ *
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "rtc_base/checks.h"
+
+/* array of quantization levels for bottle neck info; Matlab code: */
+/* sprintf('%4.1ff, ', logspace(log10(5000), log10(40000), 12)) */
+static const float kQRateTableWb[12] =
+{
+ 10000.0f, 11115.3f, 12355.1f, 13733.1f, 15264.8f, 16967.3f,
+ 18859.8f, 20963.3f, 23301.4f, 25900.3f, 28789.0f, 32000.0f};
+
+
+static const float kQRateTableSwb[24] =
+{
+ 10000.0f, 11115.3f, 12355.1f, 13733.1f, 15264.8f, 16967.3f,
+ 18859.8f, 20963.3f, 23153.1f, 25342.9f, 27532.7f, 29722.5f,
+ 31912.3f, 34102.1f, 36291.9f, 38481.7f, 40671.4f, 42861.2f,
+ 45051.0f, 47240.8f, 49430.6f, 51620.4f, 53810.2f, 56000.0f,
+};
+
+
+
+
+int32_t WebRtcIsac_InitBandwidthEstimator(
+ BwEstimatorstr* bwest_str,
+ enum IsacSamplingRate encoderSampRate,
+ enum IsacSamplingRate decoderSampRate)
+{
+ switch(encoderSampRate)
+ {
+ case kIsacWideband:
+ {
+ bwest_str->send_bw_avg = INIT_BN_EST_WB;
+ break;
+ }
+ case kIsacSuperWideband:
+ {
+ bwest_str->send_bw_avg = INIT_BN_EST_SWB;
+ break;
+ }
+ }
+
+ switch(decoderSampRate)
+ {
+ case kIsacWideband:
+ {
+ bwest_str->prev_frame_length = INIT_FRAME_LEN_WB;
+ bwest_str->rec_bw_inv = 1.0f /
+ (INIT_BN_EST_WB + INIT_HDR_RATE_WB);
+ bwest_str->rec_bw = (int32_t)INIT_BN_EST_WB;
+ bwest_str->rec_bw_avg_Q = INIT_BN_EST_WB;
+ bwest_str->rec_bw_avg = INIT_BN_EST_WB + INIT_HDR_RATE_WB;
+ bwest_str->rec_header_rate = INIT_HDR_RATE_WB;
+ break;
+ }
+ case kIsacSuperWideband:
+ {
+ bwest_str->prev_frame_length = INIT_FRAME_LEN_SWB;
+ bwest_str->rec_bw_inv = 1.0f /
+ (INIT_BN_EST_SWB + INIT_HDR_RATE_SWB);
+ bwest_str->rec_bw = (int32_t)INIT_BN_EST_SWB;
+ bwest_str->rec_bw_avg_Q = INIT_BN_EST_SWB;
+ bwest_str->rec_bw_avg = INIT_BN_EST_SWB + INIT_HDR_RATE_SWB;
+ bwest_str->rec_header_rate = INIT_HDR_RATE_SWB;
+ break;
+ }
+ }
+
+ bwest_str->prev_rec_rtp_number = 0;
+ bwest_str->prev_rec_arr_ts = 0;
+ bwest_str->prev_rec_send_ts = 0;
+ bwest_str->prev_rec_rtp_rate = 1.0f;
+ bwest_str->last_update_ts = 0;
+ bwest_str->last_reduction_ts = 0;
+ bwest_str->count_tot_updates_rec = -9;
+ bwest_str->rec_jitter = 10.0f;
+ bwest_str->rec_jitter_short_term = 0.0f;
+ bwest_str->rec_jitter_short_term_abs = 5.0f;
+ bwest_str->rec_max_delay = 10.0f;
+ bwest_str->rec_max_delay_avg_Q = 10.0f;
+ bwest_str->num_pkts_rec = 0;
+
+ bwest_str->send_max_delay_avg = 10.0f;
+
+ bwest_str->hsn_detect_rec = 0;
+
+ bwest_str->num_consec_rec_pkts_over_30k = 0;
+
+ bwest_str->hsn_detect_snd = 0;
+
+ bwest_str->num_consec_snt_pkts_over_30k = 0;
+
+ bwest_str->in_wait_period = 0;
+
+ bwest_str->change_to_WB = 0;
+
+ bwest_str->numConsecLatePkts = 0;
+ bwest_str->consecLatency = 0;
+ bwest_str->inWaitLatePkts = 0;
+ bwest_str->senderTimestamp = 0;
+ bwest_str->receiverTimestamp = 0;
+
+ bwest_str->external_bw_info.in_use = 0;
+
+ return 0;
+}
+
+/* This function updates both bottle neck rates */
+/* Parameters: */
+/* rtp_number - value from RTP packet, from NetEq */
+/* frame length - length of signal frame in ms, from iSAC decoder */
+/* send_ts - value in RTP header giving send time in samples */
+/* arr_ts - value given by timeGetTime() time of arrival in samples of packet from NetEq */
+/* pksize - size of packet in bytes, from NetEq */
+/* Index - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise */
+int16_t WebRtcIsac_UpdateBandwidthEstimator(
+ BwEstimatorstr* bwest_str,
+ const uint16_t rtp_number,
+ const int32_t frame_length,
+ const uint32_t send_ts,
+ const uint32_t arr_ts,
+ const size_t pksize
+ /*, const uint16_t Index*/)
+{
+ float weight = 0.0f;
+ float curr_bw_inv = 0.0f;
+ float rec_rtp_rate;
+ float t_diff_proj;
+ float arr_ts_diff;
+ float send_ts_diff;
+ float arr_time_noise;
+ float arr_time_noise_abs;
+
+ float delay_correction_factor = 1;
+ float late_diff = 0.0f;
+ int immediate_set = 0;
+ int num_pkts_expected;
+
+ RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+ // We have to adjust the header-rate if the first packet has a
+ // frame-size different than the initialized value.
+ if ( frame_length != bwest_str->prev_frame_length )
+ {
+ bwest_str->rec_header_rate = (float)HEADER_SIZE * 8.0f *
+ 1000.0f / (float)frame_length; /* bits/s */
+ }
+
+ /* UPDATE ESTIMATES ON THIS SIDE */
+ /* compute far-side transmission rate */
+ rec_rtp_rate = ((float)pksize * 8.0f * 1000.0f / (float)frame_length) +
+ bwest_str->rec_header_rate;
+ // rec_rtp_rate packet bits/s + header bits/s
+
+ /* check for timer wrap-around */
+ if (arr_ts < bwest_str->prev_rec_arr_ts)
+ {
+ bwest_str->prev_rec_arr_ts = arr_ts;
+ bwest_str->last_update_ts = arr_ts;
+ bwest_str->last_reduction_ts = arr_ts + 3*FS;
+ bwest_str->num_pkts_rec = 0;
+
+ /* store frame length */
+ bwest_str->prev_frame_length = frame_length;
+
+ /* store far-side transmission rate */
+ bwest_str->prev_rec_rtp_rate = rec_rtp_rate;
+
+ /* store far-side RTP time stamp */
+ bwest_str->prev_rec_rtp_number = rtp_number;
+
+ return 0;
+ }
+
+ bwest_str->num_pkts_rec++;
+
+ /* check that it's not one of the first 9 packets */
+ if ( bwest_str->count_tot_updates_rec > 0 )
+ {
+ if(bwest_str->in_wait_period > 0 )
+ {
+ bwest_str->in_wait_period--;
+ }
+
+ bwest_str->inWaitLatePkts -= ((bwest_str->inWaitLatePkts > 0)? 1:0);
+ send_ts_diff = (float)(send_ts - bwest_str->prev_rec_send_ts);
+
+ if (send_ts_diff <= (16 * frame_length)*2)
+ //doesn't allow for a dropped packet, not sure necessary to be
+ // that strict -DH
+ {
+ /* if not been updated for a long time, reduce the BN estimate */
+ if((uint32_t)(arr_ts - bwest_str->last_update_ts) *
+ 1000.0f / FS > 3000)
+ {
+ //how many frames should have been received since the last
+ // update if too many have been dropped or there have been
+ // big delays won't allow this reduction may no longer need
+ // the send_ts_diff here
+ num_pkts_expected = (int)(((float)(arr_ts -
+ bwest_str->last_update_ts) * 1000.0f /(float) FS) /
+ (float)frame_length);
+
+ if(((float)bwest_str->num_pkts_rec/(float)num_pkts_expected) >
+ 0.9)
+ {
+ float inv_bitrate = (float) pow( 0.99995,
+ (double)((uint32_t)(arr_ts -
+ bwest_str->last_reduction_ts)*1000.0f/FS) );
+
+ if ( inv_bitrate )
+ {
+ bwest_str->rec_bw_inv /= inv_bitrate;
+
+ //precautionary, likely never necessary
+ if (bwest_str->hsn_detect_snd &&
+ bwest_str->hsn_detect_rec)
+ {
+ if (bwest_str->rec_bw_inv > 0.000066f)
+ {
+ bwest_str->rec_bw_inv = 0.000066f;
+ }
+ }
+ }
+ else
+ {
+ bwest_str->rec_bw_inv = 1.0f /
+ (INIT_BN_EST_WB + INIT_HDR_RATE_WB);
+ }
+ /* reset time-since-update counter */
+ bwest_str->last_reduction_ts = arr_ts;
+ }
+ else
+ //reset here?
+ {
+ bwest_str->last_reduction_ts = arr_ts + 3*FS;
+ bwest_str->last_update_ts = arr_ts;
+ bwest_str->num_pkts_rec = 0;
+ }
+ }
+ }
+ else
+ {
+ bwest_str->last_reduction_ts = arr_ts + 3*FS;
+ bwest_str->last_update_ts = arr_ts;
+ bwest_str->num_pkts_rec = 0;
+ }
+
+
+ /* temporarily speed up adaptation if frame length has changed */
+ if ( frame_length != bwest_str->prev_frame_length )
+ {
+ bwest_str->count_tot_updates_rec = 10;
+ bwest_str->rec_header_rate = (float)HEADER_SIZE * 8.0f *
+ 1000.0f / (float)frame_length; /* bits/s */
+
+ bwest_str->rec_bw_inv = 1.0f /((float)bwest_str->rec_bw +
+ bwest_str->rec_header_rate);
+ }
+
+ ////////////////////////
+ arr_ts_diff = (float)(arr_ts - bwest_str->prev_rec_arr_ts);
+
+ if (send_ts_diff > 0 )
+ {
+ late_diff = arr_ts_diff - send_ts_diff;
+ }
+ else
+ {
+ late_diff = arr_ts_diff - (float)(16 * frame_length);
+ }
+
+ if((late_diff > 0) && !bwest_str->inWaitLatePkts)
+ {
+ bwest_str->numConsecLatePkts++;
+ bwest_str->consecLatency += late_diff;
+ }
+ else
+ {
+ bwest_str->numConsecLatePkts = 0;
+ bwest_str->consecLatency = 0;
+ }
+ if(bwest_str->numConsecLatePkts > 50)
+ {
+ float latencyMs = bwest_str->consecLatency/(FS/1000);
+ float averageLatencyMs = latencyMs / bwest_str->numConsecLatePkts;
+ delay_correction_factor = frame_length / (frame_length + averageLatencyMs);
+ immediate_set = 1;
+ bwest_str->inWaitLatePkts = (int16_t)((bwest_str->consecLatency/(FS/1000)) / 30);// + 150;
+ bwest_str->start_wait_period = arr_ts;
+ }
+ ///////////////////////////////////////////////
+
+
+
+ /* update only if previous packet was not lost */
+ if ( rtp_number == bwest_str->prev_rec_rtp_number + 1 )
+ {
+
+
+ if (!(bwest_str->hsn_detect_snd && bwest_str->hsn_detect_rec))
+ {
+ if ((arr_ts_diff > (float)(16 * frame_length)))
+ {
+ //1/2 second
+ if ((late_diff > 8000.0f) && !bwest_str->in_wait_period)
+ {
+ delay_correction_factor = 0.7f;
+ bwest_str->in_wait_period = 55;
+ bwest_str->start_wait_period = arr_ts;
+ immediate_set = 1;
+ }
+ //320 ms
+ else if (late_diff > 5120.0f && !bwest_str->in_wait_period)
+ {
+ delay_correction_factor = 0.8f;
+ immediate_set = 1;
+ bwest_str->in_wait_period = 44;
+ bwest_str->start_wait_period = arr_ts;
+ }
+ }
+ }
+
+
+ if ((bwest_str->prev_rec_rtp_rate > bwest_str->rec_bw_avg) &&
+ (rec_rtp_rate > bwest_str->rec_bw_avg) &&
+ !bwest_str->in_wait_period)
+ {
+ /* test if still in initiation period and increment counter */
+ if (bwest_str->count_tot_updates_rec++ > 99)
+ {
+ /* constant weight after initiation part */
+ weight = 0.01f;
+ }
+ else
+ {
+ /* weight decreases with number of updates */
+ weight = 1.0f / (float) bwest_str->count_tot_updates_rec;
+ }
+ /* Bottle Neck Estimation */
+
+ /* limit outliers */
+ /* if more than 25 ms too much */
+ if (arr_ts_diff > frame_length * FS/1000 + 400.0f)
+ {
+ // in samples, why 25ms??
+ arr_ts_diff = frame_length * FS/1000 + 400.0f;
+ }
+ if(arr_ts_diff < (frame_length * FS/1000) - 160.0f)
+ {
+ /* don't allow it to be less than frame rate - 10 ms */
+ arr_ts_diff = (float)frame_length * FS/1000 - 160.0f;
+ }
+
+ /* compute inverse receiving rate for last packet */
+ curr_bw_inv = arr_ts_diff / ((float)(pksize + HEADER_SIZE) *
+ 8.0f * FS); // (180+35)*8*16000 = 27.5 Mbit....
+
+
+ if(curr_bw_inv <
+ (1.0f / (MAX_ISAC_BW + bwest_str->rec_header_rate)))
+ {
+ // don't allow inv rate to be larger than MAX
+ curr_bw_inv = (1.0f /
+ (MAX_ISAC_BW + bwest_str->rec_header_rate));
+ }
+
+ /* update bottle neck rate estimate */
+ bwest_str->rec_bw_inv = weight * curr_bw_inv +
+ (1.0f - weight) * bwest_str->rec_bw_inv;
+
+ /* reset time-since-update counter */
+ bwest_str->last_update_ts = arr_ts;
+ bwest_str->last_reduction_ts = arr_ts + 3 * FS;
+ bwest_str->num_pkts_rec = 0;
+
+ /* Jitter Estimation */
+ /* projected difference between arrival times */
+ t_diff_proj = ((float)(pksize + HEADER_SIZE) * 8.0f *
+ 1000.0f) / bwest_str->rec_bw_avg;
+
+
+ // difference between projected and actual
+ // arrival time differences
+ arr_time_noise = (float)(arr_ts_diff*1000.0f/FS) -
+ t_diff_proj;
+ arr_time_noise_abs = (float) fabs( arr_time_noise );
+
+ /* long term averaged absolute jitter */
+ bwest_str->rec_jitter = weight * arr_time_noise_abs +
+ (1.0f - weight) * bwest_str->rec_jitter;
+ if (bwest_str->rec_jitter > 10.0f)
+ {
+ bwest_str->rec_jitter = 10.0f;
+ }
+ /* short term averaged absolute jitter */
+ bwest_str->rec_jitter_short_term_abs = 0.05f *
+ arr_time_noise_abs + 0.95f *
+ bwest_str->rec_jitter_short_term_abs;
+
+ /* short term averaged jitter */
+ bwest_str->rec_jitter_short_term = 0.05f * arr_time_noise +
+ 0.95f * bwest_str->rec_jitter_short_term;
+ }
+ }
+ }
+ else
+ {
+ // reset time-since-update counter when
+ // receiving the first 9 packets
+ bwest_str->last_update_ts = arr_ts;
+ bwest_str->last_reduction_ts = arr_ts + 3*FS;
+ bwest_str->num_pkts_rec = 0;
+
+ bwest_str->count_tot_updates_rec++;
+ }
+
+ /* limit minimum bottle neck rate */
+ if (bwest_str->rec_bw_inv > 1.0f / ((float)MIN_ISAC_BW +
+ bwest_str->rec_header_rate))
+ {
+ bwest_str->rec_bw_inv = 1.0f / ((float)MIN_ISAC_BW +
+ bwest_str->rec_header_rate);
+ }
+
+ // limit maximum bitrate
+ if (bwest_str->rec_bw_inv < 1.0f / ((float)MAX_ISAC_BW +
+ bwest_str->rec_header_rate))
+ {
+ bwest_str->rec_bw_inv = 1.0f / ((float)MAX_ISAC_BW +
+ bwest_str->rec_header_rate);
+ }
+
+ /* store frame length */
+ bwest_str->prev_frame_length = frame_length;
+
+ /* store far-side transmission rate */
+ bwest_str->prev_rec_rtp_rate = rec_rtp_rate;
+
+ /* store far-side RTP time stamp */
+ bwest_str->prev_rec_rtp_number = rtp_number;
+
+ // Replace bwest_str->rec_max_delay by the new
+ // value (atomic operation)
+ bwest_str->rec_max_delay = 3.0f * bwest_str->rec_jitter;
+
+ /* store send and arrival time stamp */
+ bwest_str->prev_rec_arr_ts = arr_ts ;
+ bwest_str->prev_rec_send_ts = send_ts;
+
+ /* Replace bwest_str->rec_bw by the new value (atomic operation) */
+ bwest_str->rec_bw = (int32_t)(1.0f / bwest_str->rec_bw_inv -
+ bwest_str->rec_header_rate);
+
+ if (immediate_set)
+ {
+ bwest_str->rec_bw = (int32_t) (delay_correction_factor *
+ (float) bwest_str->rec_bw);
+
+ if (bwest_str->rec_bw < (int32_t) MIN_ISAC_BW)
+ {
+ bwest_str->rec_bw = (int32_t) MIN_ISAC_BW;
+ }
+
+ bwest_str->rec_bw_avg = bwest_str->rec_bw +
+ bwest_str->rec_header_rate;
+
+ bwest_str->rec_bw_avg_Q = (float) bwest_str->rec_bw;
+
+ bwest_str->rec_jitter_short_term = 0.0f;
+
+ bwest_str->rec_bw_inv = 1.0f / (bwest_str->rec_bw +
+ bwest_str->rec_header_rate);
+
+ bwest_str->count_tot_updates_rec = 1;
+
+ immediate_set = 0;
+ bwest_str->consecLatency = 0;
+ bwest_str->numConsecLatePkts = 0;
+ }
+
+ return 0;
+}
+
+
+/* This function updates the send bottle neck rate */
+/* Index - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise */
+int16_t WebRtcIsac_UpdateUplinkBwImpl(
+ BwEstimatorstr* bwest_str,
+ int16_t index,
+ enum IsacSamplingRate encoderSamplingFreq)
+{
+ RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+ if((index < 0) || (index > 23))
+ {
+ return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+ }
+
+ /* UPDATE ESTIMATES FROM OTHER SIDE */
+ if(encoderSamplingFreq == kIsacWideband)
+ {
+ if(index > 11)
+ {
+ index -= 12;
+ /* compute the jitter estimate as decoded on the other side */
+ bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+ 0.1f * (float)MAX_ISAC_MD;
+ }
+ else
+ {
+ /* compute the jitter estimate as decoded on the other side */
+ bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+ 0.1f * (float)MIN_ISAC_MD;
+ }
+
+ /* compute the BN estimate as decoded on the other side */
+ bwest_str->send_bw_avg = 0.9f * bwest_str->send_bw_avg +
+ 0.1f * kQRateTableWb[index];
+ }
+ else
+ {
+ /* compute the BN estimate as decoded on the other side */
+ bwest_str->send_bw_avg = 0.9f * bwest_str->send_bw_avg +
+ 0.1f * kQRateTableSwb[index];
+ }
+
+ if (bwest_str->send_bw_avg > (float) 28000 && !bwest_str->hsn_detect_snd)
+ {
+ bwest_str->num_consec_snt_pkts_over_30k++;
+
+ if (bwest_str->num_consec_snt_pkts_over_30k >= 66)
+ {
+ //approx 2 seconds with 30ms frames
+ bwest_str->hsn_detect_snd = 1;
+ }
+ }
+ else if (!bwest_str->hsn_detect_snd)
+ {
+ bwest_str->num_consec_snt_pkts_over_30k = 0;
+ }
+ return 0;
+}
+
+// called when there is upper-band bit-stream to update jitter
+// statistics.
+int16_t WebRtcIsac_UpdateUplinkJitter(
+ BwEstimatorstr* bwest_str,
+ int32_t index)
+{
+ RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+ if((index < 0) || (index > 23))
+ {
+ return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+ }
+
+ if(index > 0)
+ {
+ /* compute the jitter estimate as decoded on the other side */
+ bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+ 0.1f * (float)MAX_ISAC_MD;
+ }
+ else
+ {
+ /* compute the jitter estimate as decoded on the other side */
+ bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+ 0.1f * (float)MIN_ISAC_MD;
+ }
+
+ return 0;
+}
+
+
+
+// Returns the bandwidth/jitter estimation code (integer 0...23)
+// to put in the sending iSAC payload
+void
+WebRtcIsac_GetDownlinkBwJitIndexImpl(
+ BwEstimatorstr* bwest_str,
+ int16_t* bottleneckIndex,
+ int16_t* jitterInfo,
+ enum IsacSamplingRate decoderSamplingFreq)
+{
+ float MaxDelay;
+ //uint16_t MaxDelayBit;
+
+ float rate;
+ float r;
+ float e1, e2;
+ const float weight = 0.1f;
+ const float* ptrQuantizationTable;
+ int16_t addJitterInfo;
+ int16_t minInd;
+ int16_t maxInd;
+ int16_t midInd;
+
+ if (bwest_str->external_bw_info.in_use) {
+ *bottleneckIndex = bwest_str->external_bw_info.bottleneck_idx;
+ *jitterInfo = bwest_str->external_bw_info.jitter_info;
+ return;
+ }
+
+ /* Get Max Delay Bit */
+ /* get unquantized max delay */
+ MaxDelay = (float)WebRtcIsac_GetDownlinkMaxDelay(bwest_str);
+
+ if ( ((1.f - weight) * bwest_str->rec_max_delay_avg_Q + weight *
+ MAX_ISAC_MD - MaxDelay) > (MaxDelay - (1.f-weight) *
+ bwest_str->rec_max_delay_avg_Q - weight * MIN_ISAC_MD) )
+ {
+ jitterInfo[0] = 0;
+ /* update quantized average */
+ bwest_str->rec_max_delay_avg_Q =
+ (1.f - weight) * bwest_str->rec_max_delay_avg_Q + weight *
+ (float)MIN_ISAC_MD;
+ }
+ else
+ {
+ jitterInfo[0] = 1;
+ /* update quantized average */
+ bwest_str->rec_max_delay_avg_Q =
+ (1.f-weight) * bwest_str->rec_max_delay_avg_Q + weight *
+ (float)MAX_ISAC_MD;
+ }
+
+ // Get unquantized rate.
+ rate = (float)WebRtcIsac_GetDownlinkBandwidth(bwest_str);
+
+ /* Get Rate Index */
+ if(decoderSamplingFreq == kIsacWideband)
+ {
+ ptrQuantizationTable = kQRateTableWb;
+ addJitterInfo = 1;
+ maxInd = 11;
+ }
+ else
+ {
+ ptrQuantizationTable = kQRateTableSwb;
+ addJitterInfo = 0;
+ maxInd = 23;
+ }
+
+ minInd = 0;
+ while(maxInd > minInd + 1)
+ {
+ midInd = (maxInd + minInd) >> 1;
+ if(rate > ptrQuantizationTable[midInd])
+ {
+ minInd = midInd;
+ }
+ else
+ {
+ maxInd = midInd;
+ }
+ }
+ // Chose the index which gives results an average which is closest
+ // to rate
+ r = (1 - weight) * bwest_str->rec_bw_avg_Q - rate;
+ e1 = weight * ptrQuantizationTable[minInd] + r;
+ e2 = weight * ptrQuantizationTable[maxInd] + r;
+ e1 = (e1 > 0)? e1:-e1;
+ e2 = (e2 > 0)? e2:-e2;
+ if(e1 < e2)
+ {
+ bottleneckIndex[0] = minInd;
+ }
+ else
+ {
+ bottleneckIndex[0] = maxInd;
+ }
+
+ bwest_str->rec_bw_avg_Q = (1 - weight) * bwest_str->rec_bw_avg_Q +
+ weight * ptrQuantizationTable[bottleneckIndex[0]];
+ bottleneckIndex[0] += jitterInfo[0] * 12 * addJitterInfo;
+
+ bwest_str->rec_bw_avg = (1 - weight) * bwest_str->rec_bw_avg + weight *
+ (rate + bwest_str->rec_header_rate);
+}
+
+
+
+/* get the bottle neck rate from far side to here, as estimated on this side */
+int32_t WebRtcIsac_GetDownlinkBandwidth( const BwEstimatorstr *bwest_str)
+{
+ int32_t rec_bw;
+ float jitter_sign;
+ float bw_adjust;
+
+ RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+ /* create a value between -1.0 and 1.0 indicating "average sign" of jitter */
+ jitter_sign = bwest_str->rec_jitter_short_term /
+ bwest_str->rec_jitter_short_term_abs;
+
+ /* adjust bw proportionally to negative average jitter sign */
+ bw_adjust = 1.0f - jitter_sign * (0.15f + 0.15f * jitter_sign * jitter_sign);
+
+ /* adjust Rate if jitter sign is mostly constant */
+ rec_bw = (int32_t)(bwest_str->rec_bw * bw_adjust);
+
+ /* limit range of bottle neck rate */
+ if (rec_bw < MIN_ISAC_BW)
+ {
+ rec_bw = MIN_ISAC_BW;
+ }
+ else if (rec_bw > MAX_ISAC_BW)
+ {
+ rec_bw = MAX_ISAC_BW;
+ }
+ return rec_bw;
+}
+
+/* Returns the max delay (in ms) */
+int32_t
+WebRtcIsac_GetDownlinkMaxDelay(const BwEstimatorstr *bwest_str)
+{
+ int32_t rec_max_delay;
+
+ RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+ rec_max_delay = (int32_t)(bwest_str->rec_max_delay);
+
+ /* limit range of jitter estimate */
+ if (rec_max_delay < MIN_ISAC_MD)
+ {
+ rec_max_delay = MIN_ISAC_MD;
+ }
+ else if (rec_max_delay > MAX_ISAC_MD)
+ {
+ rec_max_delay = MAX_ISAC_MD;
+ }
+ return rec_max_delay;
+}
+
+/* Clamp val to the closed interval [min,max]. */
+static int32_t clamp(int32_t val, int32_t min, int32_t max) {
+ RTC_DCHECK_LE(min, max);
+ return val < min ? min : (val > max ? max : val);
+}
+
+int32_t WebRtcIsac_GetUplinkBandwidth(const BwEstimatorstr* bwest_str) {
+ return bwest_str->external_bw_info.in_use
+ ? bwest_str->external_bw_info.send_bw_avg
+ : clamp(bwest_str->send_bw_avg, MIN_ISAC_BW, MAX_ISAC_BW);
+}
+
+int32_t WebRtcIsac_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str) {
+ return bwest_str->external_bw_info.in_use
+ ? bwest_str->external_bw_info.send_max_delay_avg
+ : clamp(bwest_str->send_max_delay_avg, MIN_ISAC_MD, MAX_ISAC_MD);
+}
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ * returns minimum payload size (bytes)
+ */
+int WebRtcIsac_GetMinBytes(
+ RateModel* State,
+ int StreamSize, /* bytes in bitstream */
+ const int FrameSamples, /* samples per frame */
+ const double BottleNeck, /* bottle neck rate; excl headers (bps) */
+ const double DelayBuildUp, /* max delay from bottleneck buffering (ms) */
+ enum ISACBandwidth bandwidth
+ /*,int16_t frequentLargePackets*/)
+{
+ double MinRate = 0.0;
+ int MinBytes;
+ double TransmissionTime;
+ int burstInterval = BURST_INTERVAL;
+
+ // first 10 packets @ low rate, then INIT_BURST_LEN packets @
+ // fixed rate of INIT_RATE bps
+ if (State->InitCounter > 0)
+ {
+ if (State->InitCounter-- <= INIT_BURST_LEN)
+ {
+ if(bandwidth == isac8kHz)
+ {
+ MinRate = INIT_RATE_WB;
+ }
+ else
+ {
+ MinRate = INIT_RATE_SWB;
+ }
+ }
+ else
+ {
+ MinRate = 0;
+ }
+ }
+ else
+ {
+ /* handle burst */
+ if (State->BurstCounter)
+ {
+ if (State->StillBuffered < (1.0 - 1.0/BURST_LEN) * DelayBuildUp)
+ {
+ /* max bps derived from BottleNeck and DelayBuildUp values */
+ MinRate = (1.0 + (FS/1000) * DelayBuildUp /
+ (double)(BURST_LEN * FrameSamples)) * BottleNeck;
+ }
+ else
+ {
+ // max bps derived from StillBuffered and DelayBuildUp
+ // values
+ MinRate = (1.0 + (FS/1000) * (DelayBuildUp -
+ State->StillBuffered) / (double)FrameSamples) * BottleNeck;
+ if (MinRate < 1.04 * BottleNeck)
+ {
+ MinRate = 1.04 * BottleNeck;
+ }
+ }
+ State->BurstCounter--;
+ }
+ }
+
+
+ /* convert rate from bits/second to bytes/packet */
+ MinBytes = (int) (MinRate * FrameSamples / (8.0 * FS));
+
+ /* StreamSize will be adjusted if less than MinBytes */
+ if (StreamSize < MinBytes)
+ {
+ StreamSize = MinBytes;
+ }
+
+ /* keep track of when bottle neck was last exceeded by at least 1% */
+ if (StreamSize * 8.0 * FS / FrameSamples > 1.01 * BottleNeck) {
+ if (State->PrevExceed) {
+ /* bottle_neck exceded twice in a row, decrease ExceedAgo */
+ State->ExceedAgo -= /*BURST_INTERVAL*/ burstInterval / (BURST_LEN - 1);
+ if (State->ExceedAgo < 0)
+ State->ExceedAgo = 0;
+ }
+ else
+ {
+ State->ExceedAgo += (FrameSamples * 1000) / FS; /* ms */
+ State->PrevExceed = 1;
+ }
+ }
+ else
+ {
+ State->PrevExceed = 0;
+ State->ExceedAgo += (FrameSamples * 1000) / FS; /* ms */
+ }
+
+ /* set burst flag if bottle neck not exceeded for long time */
+ if ((State->ExceedAgo > burstInterval) &&
+ (State->BurstCounter == 0))
+ {
+ if (State->PrevExceed)
+ {
+ State->BurstCounter = BURST_LEN - 1;
+ }
+ else
+ {
+ State->BurstCounter = BURST_LEN;
+ }
+ }
+
+
+ /* Update buffer delay */
+ TransmissionTime = StreamSize * 8.0 * 1000.0 / BottleNeck; /* ms */
+ State->StillBuffered += TransmissionTime;
+ State->StillBuffered -= (FrameSamples * 1000) / FS; /* ms */
+ if (State->StillBuffered < 0.0)
+ {
+ State->StillBuffered = 0.0;
+ }
+
+ return MinBytes;
+}
+
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsac_UpdateRateModel(
+ RateModel *State,
+ int StreamSize, /* bytes in bitstream */
+ const int FrameSamples, /* samples per frame */
+ const double BottleNeck) /* bottle neck rate; excl headers (bps) */
+{
+ double TransmissionTime;
+
+ /* avoid the initial "high-rate" burst */
+ State->InitCounter = 0;
+
+ /* Update buffer delay */
+ TransmissionTime = StreamSize * 8.0 * 1000.0 / BottleNeck; /* ms */
+ State->StillBuffered += TransmissionTime;
+ State->StillBuffered -= (FrameSamples * 1000) / FS; /* ms */
+ if (State->StillBuffered < 0.0)
+ State->StillBuffered = 0.0;
+
+}
+
+
+void WebRtcIsac_InitRateModel(
+ RateModel *State)
+{
+ State->PrevExceed = 0; /* boolean */
+ State->ExceedAgo = 0; /* ms */
+ State->BurstCounter = 0; /* packets */
+ State->InitCounter = INIT_BURST_LEN + 10; /* packets */
+ State->StillBuffered = 1.0; /* ms */
+}
+
+int WebRtcIsac_GetNewFrameLength(
+ double bottle_neck,
+ int current_framesamples)
+{
+ int new_framesamples;
+
+ const int Thld_20_30 = 20000;
+
+ //const int Thld_30_20 = 30000;
+ const int Thld_30_20 = 1000000; // disable 20 ms frames
+
+ const int Thld_30_60 = 18000;
+ //const int Thld_30_60 = 0; // disable 60 ms frames
+
+ const int Thld_60_30 = 27000;
+
+
+ new_framesamples = current_framesamples;
+
+ /* find new framelength */
+ switch(current_framesamples) {
+ case 320:
+ if (bottle_neck < Thld_20_30)
+ new_framesamples = 480;
+ break;
+ case 480:
+ if (bottle_neck < Thld_30_60)
+ new_framesamples = 960;
+ else if (bottle_neck > Thld_30_20)
+ new_framesamples = 320;
+ break;
+ case 960:
+ if (bottle_neck >= Thld_60_30)
+ new_framesamples = 480;
+ break;
+ }
+
+ return new_framesamples;
+}
+
+double WebRtcIsac_GetSnr(
+ double bottle_neck,
+ int framesamples)
+{
+ double s2nr;
+
+ const double a_20 = -30.0;
+ const double b_20 = 0.8;
+ const double c_20 = 0.0;
+
+ const double a_30 = -23.0;
+ const double b_30 = 0.48;
+ const double c_30 = 0.0;
+
+ const double a_60 = -23.0;
+ const double b_60 = 0.53;
+ const double c_60 = 0.0;
+
+
+ /* find new SNR value */
+ switch(framesamples) {
+ case 320:
+ s2nr = a_20 + b_20 * bottle_neck * 0.001 + c_20 * bottle_neck *
+ bottle_neck * 0.000001;
+ break;
+ case 480:
+ s2nr = a_30 + b_30 * bottle_neck * 0.001 + c_30 * bottle_neck *
+ bottle_neck * 0.000001;
+ break;
+ case 960:
+ s2nr = a_60 + b_60 * bottle_neck * 0.001 + c_60 * bottle_neck *
+ bottle_neck * 0.000001;
+ break;
+ default:
+ s2nr = 0;
+ }
+
+ return s2nr;
+
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
new file mode 100644
index 0000000000..5f4550a3a5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * bandwidth_estimator.h
+ *
+ * This header file contains the API for the Bandwidth Estimator
+ * designed for iSAC.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#define MIN_ISAC_BW 10000
+#define MIN_ISAC_BW_LB 10000
+#define MIN_ISAC_BW_UB 25000
+
+#define MAX_ISAC_BW 56000
+#define MAX_ISAC_BW_UB 32000
+#define MAX_ISAC_BW_LB 32000
+
+#define MIN_ISAC_MD 5
+#define MAX_ISAC_MD 25
+
+// assumed header size, in bytes; we don't know the exact number
+// (header compression may be used)
+#define HEADER_SIZE 35
+
+// Initial Frame-Size, in ms, for Wideband & Super-Wideband Mode
+#define INIT_FRAME_LEN_WB 60
+#define INIT_FRAME_LEN_SWB 30
+
+// Initial Bottleneck Estimate, in bits/sec, for
+// Wideband & Super-wideband mode
+#define INIT_BN_EST_WB 20e3f
+#define INIT_BN_EST_SWB 56e3f
+
+// Initial Header rate (header rate depends on frame-size),
+// in bits/sec, for Wideband & Super-Wideband mode.
+#define INIT_HDR_RATE_WB \
+ ((float)HEADER_SIZE * 8.0f * 1000.0f / (float)INIT_FRAME_LEN_WB)
+#define INIT_HDR_RATE_SWB \
+ ((float)HEADER_SIZE * 8.0f * 1000.0f / (float)INIT_FRAME_LEN_SWB)
+
+// number of packets in a row for a high rate burst
+#define BURST_LEN 3
+
+// ms, max time between two full bursts
+#define BURST_INTERVAL 500
+
+// number of packets in a row for initial high rate burst
+#define INIT_BURST_LEN 5
+
+// bits/s, rate for the first BURST_LEN packets
+#define INIT_RATE_WB INIT_BN_EST_WB
+#define INIT_RATE_SWB INIT_BN_EST_SWB
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* This function initializes the struct */
+/* to be called before using the struct for anything else */
+/* returns 0 if everything went fine, -1 otherwise */
+int32_t WebRtcIsac_InitBandwidthEstimator(
+ BwEstimatorstr* bwest_str,
+ enum IsacSamplingRate encoderSampRate,
+ enum IsacSamplingRate decoderSampRate);
+
+/* This function updates the receiving estimate */
+/* Parameters: */
+/* rtp_number - value from RTP packet, from NetEq */
+/* frame length - length of signal frame in ms, from iSAC decoder */
+/* send_ts - value in RTP header giving send time in samples */
+/* arr_ts - value given by timeGetTime() time of arrival in samples of
+ * packet from NetEq */
+/* pksize - size of packet in bytes, from NetEq */
+/* Index - integer (range 0...23) indicating bottle neck & jitter as
+ * estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise */
+int16_t WebRtcIsac_UpdateBandwidthEstimator(BwEstimatorstr* bwest_str,
+ uint16_t rtp_number,
+ int32_t frame_length,
+ uint32_t send_ts,
+ uint32_t arr_ts,
+ size_t pksize);
+
+/* Update receiving estimates. Used when we only receive BWE index, no iSAC data
+ * packet. */
+int16_t WebRtcIsac_UpdateUplinkBwImpl(
+ BwEstimatorstr* bwest_str,
+ int16_t Index,
+ enum IsacSamplingRate encoderSamplingFreq);
+
+/* Returns the bandwidth/jitter estimation code (integer 0...23) to put in the
+ * sending iSAC payload */
+void WebRtcIsac_GetDownlinkBwJitIndexImpl(
+ BwEstimatorstr* bwest_str,
+ int16_t* bottleneckIndex,
+ int16_t* jitterInfo,
+ enum IsacSamplingRate decoderSamplingFreq);
+
+/* Returns the bandwidth estimation (in bps) */
+int32_t WebRtcIsac_GetDownlinkBandwidth(const BwEstimatorstr* bwest_str);
+
+/* Returns the max delay (in ms) */
+int32_t WebRtcIsac_GetDownlinkMaxDelay(const BwEstimatorstr* bwest_str);
+
+/* Returns the bandwidth that iSAC should send with in bps */
+int32_t WebRtcIsac_GetUplinkBandwidth(const BwEstimatorstr* bwest_str);
+
+/* Returns the max delay value from the other side in ms */
+int32_t WebRtcIsac_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str);
+
+/*
+ * update amount of data in bottle neck buffer and burst handling
+ * returns minimum payload size (bytes)
+ */
+int WebRtcIsac_GetMinBytes(
+ RateModel* State,
+ int StreamSize, /* bytes in bitstream */
+ int FrameLen, /* ms per frame */
+ double BottleNeck, /* bottle neck rate; excl headers (bps) */
+ double DelayBuildUp, /* max delay from bottleneck buffering (ms) */
+ enum ISACBandwidth bandwidth
+ /*,int16_t frequentLargePackets*/);
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsac_UpdateRateModel(
+ RateModel* State,
+ int StreamSize, /* bytes in bitstream */
+ int FrameSamples, /* samples per frame */
+ double BottleNeck); /* bottle neck rate; excl headers (bps) */
+
+void WebRtcIsac_InitRateModel(RateModel* State);
+
+/* Returns the new framelength value (input argument: bottle_neck) */
+int WebRtcIsac_GetNewFrameLength(double bottle_neck, int current_framelength);
+
+/* Returns the new SNR value (input argument: bottle_neck) */
+double WebRtcIsac_GetSnr(double bottle_neck, int new_framelength);
+
+int16_t WebRtcIsac_UpdateUplinkJitter(BwEstimatorstr* bwest_str, int32_t index);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_ \
+ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/codec.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/codec.h
new file mode 100644
index 0000000000..a7c7ddc14a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/codec.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * codec.h
+ *
+ * This header file contains the calls to the internal encoder
+ * and decoder functions.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/third_party/fft/fft.h"
+
+void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
+
+int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str,
+ Bitstr* streamdata,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts,
+ enum IsacSamplingRate encoderSampRate,
+ enum IsacSamplingRate decoderSampRate);
+
+int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
+ float* signal_out,
+ ISACLBDecStruct* ISACdec_obj,
+ int16_t* current_framesamples,
+ int16_t isRCUPayload);
+
+int WebRtcIsac_DecodeRcuLb(float* signal_out,
+ ISACLBDecStruct* ISACdec_obj,
+ int16_t* current_framesamples);
+
+int WebRtcIsac_EncodeLb(const TransformTables* transform_tables,
+ float* in,
+ ISACLBEncStruct* ISACencLB_obj,
+ int16_t codingMode,
+ int16_t bottleneckIndex);
+
+int WebRtcIsac_EncodeStoredDataLb(const IsacSaveEncoderData* ISACSavedEnc_obj,
+ Bitstr* ISACBitStr_obj,
+ int BWnumber,
+ float scale);
+
+int WebRtcIsac_EncodeStoredDataUb(
+ const ISACUBSaveEncDataStruct* ISACSavedEnc_obj,
+ Bitstr* bitStream,
+ int32_t jitterInfo,
+ float scale,
+ enum ISACBandwidth bandwidth);
+
+int16_t WebRtcIsac_GetRedPayloadUb(
+ const ISACUBSaveEncDataStruct* ISACSavedEncObj,
+ Bitstr* bitStreamObj,
+ enum ISACBandwidth bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_RateAllocation()
+ * Internal function to perform a rate-allocation for upper and lower-band,
+ * given a total rate.
+ *
+ * Input:
+ * - inRateBitPerSec : a total bit-rate in bits/sec.
+ *
+ * Output:
+ * - rateLBBitPerSec : a bit-rate allocated to the lower-band
+ * in bits/sec.
+ * - rateUBBitPerSec : a bit-rate allocated to the upper-band
+ * in bits/sec.
+ *
+ * Return value : 0 if rate allocation has been successful.
+ * -1 if failed to allocate rates.
+ */
+
+int16_t WebRtcIsac_RateAllocation(int32_t inRateBitPerSec,
+ double* rateLBBitPerSec,
+ double* rateUBBitPerSec,
+ enum ISACBandwidth* bandwidthKHz);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeUb16()
+ *
+ * Decode the upper-band if the codec is in 0-16 kHz mode.
+ *
+ * Input/Output:
+ * -ISACdec_obj : pointer to the upper-band decoder object. The
+ * bit-stream is stored inside the decoder object.
+ *
+ * Output:
+ * -signal_out : decoded audio, 480 samples 30 ms.
+ *
+ * Return value : >0 number of decoded bytes.
+ * <0 if an error occurred.
+ */
+int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
+ float* signal_out,
+ ISACUBDecStruct* ISACdec_obj,
+ int16_t isRCUPayload);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeUb12()
+ *
+ * Decode the upper-band if the codec is in 0-12 kHz mode.
+ *
+ * Input/Output:
+ * -ISACdec_obj : pointer to the upper-band decoder object. The
+ * bit-stream is stored inside the decoder object.
+ *
+ * Output:
+ * -signal_out : decoded audio, 480 samples 30 ms.
+ *
+ * Return value : >0 number of decoded bytes.
+ * <0 if an error occurred.
+ */
+int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
+ float* signal_out,
+ ISACUBDecStruct* ISACdec_obj,
+ int16_t isRCUPayload);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeUb16()
+ *
+ * Encode the upper-band if the codec is in 0-16 kHz mode.
+ *
+ * Input:
+ * -in : upper-band audio, 160 samples (10 ms).
+ *
+ * Input/Output:
+ * -ISACdec_obj : pointer to the upper-band encoder object. The
+ * bit-stream is stored inside the encoder object.
+ *
+ * Return value : >0 number of encoded bytes.
+ * <0 if an error occurred.
+ */
+int WebRtcIsac_EncodeUb16(const TransformTables* transform_tables,
+ float* in,
+ ISACUBEncStruct* ISACenc_obj,
+ int32_t jitterInfo);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeUb12()
+ *
+ * Encode the upper-band if the codec is in 0-12 kHz mode.
+ *
+ * Input:
+ * -in : upper-band audio, 160 samples (10 ms).
+ *
+ * Input/Output:
+ * -ISACdec_obj : pointer to the upper-band encoder object. The
+ * bit-stream is stored inside the encoder object.
+ *
+ * Return value : >0 number of encoded bytes.
+ * <0 if an error occurred.
+ */
+int WebRtcIsac_EncodeUb12(const TransformTables* transform_tables,
+ float* in,
+ ISACUBEncStruct* ISACenc_obj,
+ int32_t jitterInfo);
+
+/************************** initialization functions *************************/
+
+void WebRtcIsac_InitMasking(MaskFiltstr* maskdata);
+
+void WebRtcIsac_InitPostFilterbank(PostFiltBankstr* postfiltdata);
+
+/**************************** transform functions ****************************/
+
+void WebRtcIsac_InitTransform(TransformTables* tables);
+
+void WebRtcIsac_Time2Spec(const TransformTables* tables,
+ double* inre1,
+ double* inre2,
+ int16_t* outre,
+ int16_t* outim,
+ FFTstr* fftstr_obj);
+
+void WebRtcIsac_Spec2time(const TransformTables* tables,
+ double* inre,
+ double* inim,
+ double* outre1,
+ double* outre2,
+ FFTstr* fftstr_obj);
+
+/***************************** filterbank functions **************************/
+
+void WebRtcIsac_FilterAndCombineFloat(float* InLP,
+ float* InHP,
+ float* Out,
+ PostFiltBankstr* postfiltdata);
+
+/************************* normalized lattice filters ************************/
+
+void WebRtcIsac_NormLatticeFilterMa(int orderCoef,
+ float* stateF,
+ float* stateG,
+ float* lat_in,
+ double* filtcoeflo,
+ double* lat_out);
+
+void WebRtcIsac_NormLatticeFilterAr(int orderCoef,
+ float* stateF,
+ float* stateG,
+ double* lat_in,
+ double* lo_filt_coef,
+ float* lat_out);
+
+void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.c
new file mode 100644
index 0000000000..1bb0827031
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/crc.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#define POLYNOMIAL 0x04c11db7L
+
+
+static const uint32_t kCrcTable[256] = {
+ 0, 0x4c11db7, 0x9823b6e, 0xd4326d9, 0x130476dc, 0x17c56b6b,
+ 0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+ 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
+ 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+ 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
+ 0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+ 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
+ 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+ 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
+ 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+ 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
+ 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+ 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x18aeb13, 0x54bf6a4,
+ 0x808d07d, 0xcc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+ 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
+ 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+ 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
+ 0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+ 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
+ 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+ 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
+ 0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+ 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
+ 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+ 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
+ 0x3f9b762c, 0x3b5a6b9b, 0x315d626, 0x7d4cb91, 0xa97ed48, 0xe56f0ff,
+ 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
+ 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+ 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
+ 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+ 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
+ 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+ 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
+ 0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+ 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
+ 0x18197087, 0x1cd86d30, 0x29f3d35, 0x65e2082, 0xb1d065b, 0xfdc1bec,
+ 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
+ 0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+ 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
+ 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+ 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
+ 0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+ 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+};
+
+
+
+
+/****************************************************************************
+ * WebRtcIsac_GetCrc(...)
+ *
+ * This function returns a 32 bit CRC checksum of a bit stream
+ *
+ * Input:
+ * - bitstream : payload bitstream
+ * - len_bitstream_in_bytes : number of 8-bit words in the bit stream
+ *
+ * Output:
+ * - crc : checksum
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int WebRtcIsac_GetCrc(const int16_t* bitstream,
+ int len_bitstream_in_bytes,
+ uint32_t* crc)
+{
+ uint8_t* bitstream_ptr_uw8;
+ uint32_t crc_state;
+ int byte_cntr;
+ int crc_tbl_indx;
+
+ /* Sanity Check. */
+ if (bitstream == NULL) {
+ return -1;
+ }
+ /* cast to UWord8 pointer */
+ bitstream_ptr_uw8 = (uint8_t *)bitstream;
+
+ /* initialize */
+ crc_state = 0xFFFFFFFF;
+
+ for (byte_cntr = 0; byte_cntr < len_bitstream_in_bytes; byte_cntr++) {
+ crc_tbl_indx = (WEBRTC_SPL_RSHIFT_U32(crc_state, 24) ^
+ bitstream_ptr_uw8[byte_cntr]) & 0xFF;
+ crc_state = (crc_state << 8) ^ kCrcTable[crc_tbl_indx];
+ }
+
+ *crc = ~crc_state;
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.h
new file mode 100644
index 0000000000..f031019ed3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * crc.h
+ *
+ * Checksum functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_
+
+#include <stdint.h>
+
+/****************************************************************************
+ * WebRtcIsac_GetCrc(...)
+ *
+ * This function returns a 32 bit CRC checksum of a bit stream
+ *
+ * Input:
+ * - encoded : payload bit stream
+ * - no_of_word8s : number of 8-bit words in the bit stream
+ *
+ * Output:
+ * - crc : checksum
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+
+int WebRtcIsac_GetCrc(const int16_t* encoded, int no_of_word8s, uint32_t* crc);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode.c
new file mode 100644
index 0000000000..6e114e4a2b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode_B.c
+ *
+ * This file contains definition of funtions for decoding.
+ * Decoding of lower-band, including normal-decoding and RCU decoding.
+ * Decoding of upper-band, including 8-12 kHz, when the bandwidth is
+ * 0-12 kHz, and 8-16 kHz, when the bandwidth is 0-16 kHz.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_filter.h"
+
+/*
+ * function to decode the bitstream
+ * returns the total number of bytes in the stream
+ */
+int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
+ float* signal_out, ISACLBDecStruct* ISACdecLB_obj,
+ int16_t* current_framesamples,
+ int16_t isRCUPayload) {
+ int k;
+ int len, err;
+ int16_t bandwidthInd;
+
+ float LP_dec_float[FRAMESAMPLES_HALF];
+ float HP_dec_float[FRAMESAMPLES_HALF];
+
+ double LPw[FRAMESAMPLES_HALF];
+ double HPw[FRAMESAMPLES_HALF];
+ double LPw_pf[FRAMESAMPLES_HALF];
+
+ double lo_filt_coef[(ORDERLO + 1)*SUBFRAMES];
+ double hi_filt_coef[(ORDERHI + 1)*SUBFRAMES];
+
+ double real_f[FRAMESAMPLES_HALF];
+ double imag_f[FRAMESAMPLES_HALF];
+
+ double PitchLags[4];
+ double PitchGains[4];
+ double AvgPitchGain;
+ int16_t PitchGains_Q12[4];
+ int16_t AvgPitchGain_Q12;
+
+ float gain;
+
+ int frame_nb; /* counter */
+ int frame_mode; /* 0 30ms, 1 for 60ms */
+ /* Processed_samples: 480 (30, 60 ms). Cannot take other values. */
+
+ WebRtcIsac_ResetBitstream(&(ISACdecLB_obj->bitstr_obj));
+
+ len = 0;
+
+ /* Decode framelength and BW estimation - not used,
+ only for stream pointer*/
+ err = WebRtcIsac_DecodeFrameLen(&ISACdecLB_obj->bitstr_obj,
+ current_framesamples);
+ if (err < 0) {
+ return err;
+ }
+
+ /* Frame_mode:
+ * 0: indicates 30 ms frame (480 samples)
+ * 1: indicates 60 ms frame (960 samples) */
+ frame_mode = *current_framesamples / MAX_FRAMESAMPLES;
+
+ err = WebRtcIsac_DecodeSendBW(&ISACdecLB_obj->bitstr_obj, &bandwidthInd);
+ if (err < 0) {
+ return err;
+ }
+
+ /* One loop if it's one frame (20 or 30ms), 2 loops if 2 frames
+ bundled together (60ms). */
+ for (frame_nb = 0; frame_nb <= frame_mode; frame_nb++) {
+ /* Decode & de-quantize pitch parameters */
+ err = WebRtcIsac_DecodePitchGain(&ISACdecLB_obj->bitstr_obj,
+ PitchGains_Q12);
+ if (err < 0) {
+ return err;
+ }
+
+ err = WebRtcIsac_DecodePitchLag(&ISACdecLB_obj->bitstr_obj, PitchGains_Q12,
+ PitchLags);
+ if (err < 0) {
+ return err;
+ }
+
+ AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
+ PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
+
+ /* Decode & de-quantize filter coefficients. */
+ err = WebRtcIsac_DecodeLpc(&ISACdecLB_obj->bitstr_obj, lo_filt_coef,
+ hi_filt_coef);
+ if (err < 0) {
+ return err;
+ }
+ /* Decode & de-quantize spectrum. */
+ len = WebRtcIsac_DecodeSpec(&ISACdecLB_obj->bitstr_obj, AvgPitchGain_Q12,
+ kIsacLowerBand, real_f, imag_f);
+ if (len < 0) {
+ return len;
+ }
+
+ /* Inverse transform. */
+ WebRtcIsac_Spec2time(transform_tables, real_f, imag_f, LPw, HPw,
+ &ISACdecLB_obj->fftstr_obj);
+
+ /* Convert PitchGains back to float for pitchfilter_post */
+ for (k = 0; k < 4; k++) {
+ PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
+ }
+ if (isRCUPayload) {
+ for (k = 0; k < 240; k++) {
+ LPw[k] *= RCU_TRANSCODING_SCALE_INVERSE;
+ HPw[k] *= RCU_TRANSCODING_SCALE_INVERSE;
+ }
+ }
+
+ /* Inverse pitch filter. */
+ WebRtcIsac_PitchfilterPost(LPw, LPw_pf, &ISACdecLB_obj->pitchfiltstr_obj,
+ PitchLags, PitchGains);
+ /* Convert AvgPitchGain back to float for computation of gain. */
+ AvgPitchGain = ((float)AvgPitchGain_Q12) / 4096;
+ gain = 1.0f - 0.45f * (float)AvgPitchGain;
+
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ /* Reduce gain to compensate for pitch enhancer. */
+ LPw_pf[k] *= gain;
+ }
+
+ if (isRCUPayload) {
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ /* Compensation for transcoding gain changes. */
+ LPw_pf[k] *= RCU_TRANSCODING_SCALE;
+ HPw[k] *= RCU_TRANSCODING_SCALE;
+ }
+ }
+ /* Perceptual post-filtering (using normalized lattice filter). */
+ WebRtcIsac_NormLatticeFilterAr(
+ ORDERLO, ISACdecLB_obj->maskfiltstr_obj.PostStateLoF,
+ (ISACdecLB_obj->maskfiltstr_obj).PostStateLoG, LPw_pf, lo_filt_coef,
+ LP_dec_float);
+ WebRtcIsac_NormLatticeFilterAr(
+ ORDERHI, ISACdecLB_obj->maskfiltstr_obj.PostStateHiF,
+ (ISACdecLB_obj->maskfiltstr_obj).PostStateHiG, HPw, hi_filt_coef,
+ HP_dec_float);
+
+ /* Recombine the 2 bands. */
+ WebRtcIsac_FilterAndCombineFloat(LP_dec_float, HP_dec_float,
+ signal_out + frame_nb * FRAMESAMPLES,
+ &ISACdecLB_obj->postfiltbankstr_obj);
+ }
+ return len;
+}
+
+
+/*
+ * This decode function is called when the codec is operating in 16 kHz
+ * bandwidth to decode the upperband, i.e. 8-16 kHz.
+ *
+ * Contrary to lower-band, the upper-band (8-16 kHz) is not split in
+ * frequency, but split to 12 sub-frames, i.e. twice as lower-band.
+ */
+int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
+ float* signal_out, ISACUBDecStruct* ISACdecUB_obj,
+ int16_t isRCUPayload) {
+ int len, err;
+
+ double halfFrameFirst[FRAMESAMPLES_HALF];
+ double halfFrameSecond[FRAMESAMPLES_HALF];
+
+ double percepFilterParam[(UB_LPC_ORDER + 1) * (SUBFRAMES << 1) +
+ (UB_LPC_ORDER + 1)];
+
+ double real_f[FRAMESAMPLES_HALF];
+ double imag_f[FRAMESAMPLES_HALF];
+ const int16_t kAveragePitchGain = 0; /* No pitch-gain for upper-band. */
+ len = 0;
+
+ /* Decode & de-quantize filter coefficients. */
+ memset(percepFilterParam, 0, sizeof(percepFilterParam));
+ err = WebRtcIsac_DecodeInterpolLpcUb(&ISACdecUB_obj->bitstr_obj,
+ percepFilterParam, isac16kHz);
+ if (err < 0) {
+ return err;
+ }
+
+ /* Decode & de-quantize spectrum. */
+ len = WebRtcIsac_DecodeSpec(&ISACdecUB_obj->bitstr_obj, kAveragePitchGain,
+ kIsacUpperBand16, real_f, imag_f);
+ if (len < 0) {
+ return len;
+ }
+ if (isRCUPayload) {
+ int n;
+ for (n = 0; n < 240; n++) {
+ real_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+ imag_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+ }
+ }
+ /* Inverse transform. */
+ WebRtcIsac_Spec2time(transform_tables,
+ real_f, imag_f, halfFrameFirst, halfFrameSecond,
+ &ISACdecUB_obj->fftstr_obj);
+
+ /* Perceptual post-filtering (using normalized lattice filter). */
+ WebRtcIsac_NormLatticeFilterAr(
+ UB_LPC_ORDER, ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
+ (ISACdecUB_obj->maskfiltstr_obj).PostStateLoG, halfFrameFirst,
+ &percepFilterParam[(UB_LPC_ORDER + 1)], signal_out);
+
+ WebRtcIsac_NormLatticeFilterAr(
+ UB_LPC_ORDER, ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
+ (ISACdecUB_obj->maskfiltstr_obj).PostStateLoG, halfFrameSecond,
+ &percepFilterParam[(UB_LPC_ORDER + 1) * SUBFRAMES + (UB_LPC_ORDER + 1)],
+ &signal_out[FRAMESAMPLES_HALF]);
+
+ return len;
+}
+
+/*
+ * This decode function is called when the codec operates at 0-12 kHz
+ * bandwidth to decode the upperband, i.e. 8-12 kHz.
+ *
+ * At the encoder the upper-band is split into two band, 8-12 kHz & 12-16
+ * kHz, and only 8-12 kHz is encoded. At the decoder, 8-12 kHz band is
+ * reconstructed and 12-16 kHz replaced with zeros. Then two bands
+ * are combined, to reconstruct the upperband 8-16 kHz.
+ */
+int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
+ float* signal_out, ISACUBDecStruct* ISACdecUB_obj,
+ int16_t isRCUPayload) {
+ int len, err;
+
+ float LP_dec_float[FRAMESAMPLES_HALF];
+ float HP_dec_float[FRAMESAMPLES_HALF];
+
+ double LPw[FRAMESAMPLES_HALF];
+ double HPw[FRAMESAMPLES_HALF];
+
+ double percepFilterParam[(UB_LPC_ORDER + 1)*SUBFRAMES];
+
+ double real_f[FRAMESAMPLES_HALF];
+ double imag_f[FRAMESAMPLES_HALF];
+ const int16_t kAveragePitchGain = 0; /* No pitch-gain for upper-band. */
+ len = 0;
+
+ /* Decode & dequantize filter coefficients. */
+ err = WebRtcIsac_DecodeInterpolLpcUb(&ISACdecUB_obj->bitstr_obj,
+ percepFilterParam, isac12kHz);
+ if (err < 0) {
+ return err;
+ }
+
+ /* Decode & de-quantize spectrum. */
+ len = WebRtcIsac_DecodeSpec(&ISACdecUB_obj->bitstr_obj, kAveragePitchGain,
+ kIsacUpperBand12, real_f, imag_f);
+ if (len < 0) {
+ return len;
+ }
+
+ if (isRCUPayload) {
+ int n;
+ for (n = 0; n < 240; n++) {
+ real_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+ imag_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+ }
+ }
+ /* Inverse transform. */
+ WebRtcIsac_Spec2time(transform_tables,
+ real_f, imag_f, LPw, HPw, &ISACdecUB_obj->fftstr_obj);
+ /* perceptual post-filtering (using normalized lattice filter) */
+ WebRtcIsac_NormLatticeFilterAr(UB_LPC_ORDER,
+ ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
+ (ISACdecUB_obj->maskfiltstr_obj).PostStateLoG,
+ LPw, percepFilterParam, LP_dec_float);
+ /* Zero for 12-16 kHz. */
+ memset(HP_dec_float, 0, sizeof(float) * (FRAMESAMPLES_HALF));
+ /* Recombine the 2 bands. */
+ WebRtcIsac_FilterAndCombineFloat(HP_dec_float, LP_dec_float, signal_out,
+ &ISACdecUB_obj->postfiltbankstr_obj);
+ return len;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
new file mode 100644
index 0000000000..89d970fc75
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+
+
+int
+WebRtcIsac_EstimateBandwidth(
+ BwEstimatorstr* bwest_str,
+ Bitstr* streamdata,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts,
+ enum IsacSamplingRate encoderSampRate,
+ enum IsacSamplingRate decoderSampRate)
+{
+ int16_t index;
+ int16_t frame_samples;
+ uint32_t sendTimestampIn16kHz;
+ uint32_t arrivalTimestampIn16kHz;
+ uint32_t diffSendTime;
+ uint32_t diffArrivalTime;
+ int err;
+
+ /* decode framelength and BW estimation */
+ err = WebRtcIsac_DecodeFrameLen(streamdata, &frame_samples);
+ if(err < 0) // error check
+ {
+ return err;
+ }
+ err = WebRtcIsac_DecodeSendBW(streamdata, &index);
+ if(err < 0) // error check
+ {
+ return err;
+ }
+
+ /* UPDATE ESTIMATES FROM OTHER SIDE */
+ err = WebRtcIsac_UpdateUplinkBwImpl(bwest_str, index, encoderSampRate);
+ if(err < 0)
+ {
+ return err;
+ }
+
+ // We like BWE to work at 16 kHz sampling rate,
+ // therefore, we have to change the timestamps accordingly.
+ // translate the send timestamp if required
+ diffSendTime = (uint32_t)((uint32_t)send_ts -
+ (uint32_t)bwest_str->senderTimestamp);
+ bwest_str->senderTimestamp = send_ts;
+
+ diffArrivalTime = (uint32_t)((uint32_t)arr_ts -
+ (uint32_t)bwest_str->receiverTimestamp);
+ bwest_str->receiverTimestamp = arr_ts;
+
+ if(decoderSampRate == kIsacSuperWideband)
+ {
+ diffArrivalTime = (uint32_t)diffArrivalTime >> 1;
+ diffSendTime = (uint32_t)diffSendTime >> 1;
+ }
+
+ // arrival timestamp in 16 kHz
+ arrivalTimestampIn16kHz = (uint32_t)((uint32_t)
+ bwest_str->prev_rec_arr_ts + (uint32_t)diffArrivalTime);
+ // send timestamp in 16 kHz
+ sendTimestampIn16kHz = (uint32_t)((uint32_t)
+ bwest_str->prev_rec_send_ts + (uint32_t)diffSendTime);
+
+ err = WebRtcIsac_UpdateBandwidthEstimator(bwest_str, rtp_seq_number,
+ (frame_samples * 1000) / FS, sendTimestampIn16kHz,
+ arrivalTimestampIn16kHz, packet_size);
+ // error check
+ if(err < 0)
+ {
+ return err;
+ }
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode.c
new file mode 100644
index 0000000000..bf92d02c53
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode.c
@@ -0,0 +1,1260 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * encode.c
+ *
+ * This file contains definition of funtions for encoding.
+ * Decoding of upper-band, including 8-12 kHz, when the bandwidth is
+ * 0-12 kHz, and 8-16 kHz, when the bandwidth is 0-16 kHz.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_filter.h"
+
+
+#define UB_LOOKAHEAD 24
+
+
+/*
+ Rate allocation tables of lower and upper-band bottleneck for
+ 12kHz & 16kHz bandwidth.
+
+ 12 kHz bandwidth
+ -----------------
+ The overall bottleneck of the coder is between 38 kbps and 45 kbps. We have
+ considered 7 enteries, uniformly distributed in this interval, i.e. 38,
+ 39.17, 40.33, 41.5, 42.67, 43.83 and 45. For every entery, the lower-band
+ and the upper-band bottlenecks are specified in
+ 'kLowerBandBitRate12' and 'kUpperBandBitRate12'
+ tables, respectively. E.g. the overall rate of 41.5 kbps corresponts to a
+ bottleneck of 31 kbps for lower-band and 27 kbps for upper-band. Given an
+ overall bottleneck of the codec, we use linear interpolation to get
+ lower-band and upper-band bottlenecks.
+
+ 16 kHz bandwidth
+ -----------------
+ The overall bottleneck of the coder is between 50 kbps and 56 kbps. We have
+ considered 7 enteries, uniformly distributed in this interval, i.e. 50, 51.2,
+ 52.4, 53.6, 54.8 and 56. For every entery, the lower-band and the upper-band
+ bottlenecks are specified in 'kLowerBandBitRate16' and
+ 'kUpperBandBitRate16' tables, respectively. E.g. the overall rate
+ of 53.6 kbps corresponts to a bottleneck of 32 kbps for lower-band and 30
+ kbps for upper-band. Given an overall bottleneck of the codec, we use linear
+ interpolation to get lower-band and upper-band bottlenecks.
+
+ */
+
+/* 38 39.17 40.33 41.5 42.67 43.83 45 */
+static const int16_t kLowerBandBitRate12[7] = {
+ 29000, 30000, 30000, 31000, 31000, 32000, 32000 };
+static const int16_t kUpperBandBitRate12[7] = {
+ 25000, 25000, 27000, 27000, 29000, 29000, 32000 };
+
+/* 50 51.2 52.4 53.6 54.8 56 */
+static const int16_t kLowerBandBitRate16[6] = {
+ 31000, 31000, 32000, 32000, 32000, 32000 };
+static const int16_t kUpperBandBitRate16[6] = {
+ 28000, 29000, 29000, 30000, 31000, 32000 };
+
+/******************************************************************************
+ * WebRtcIsac_RateAllocation()
+ * Internal function to perform a rate-allocation for upper and lower-band,
+ * given a total rate.
+ *
+ * Input:
+ * - inRateBitPerSec : a total bottleneck in bits/sec.
+ *
+ * Output:
+ * - rateLBBitPerSec : a bottleneck allocated to the lower-band
+ * in bits/sec.
+ * - rateUBBitPerSec : a bottleneck allocated to the upper-band
+ * in bits/sec.
+ *
+ * Return value : 0 if rate allocation has been successful.
+ * -1 if failed to allocate rates.
+ */
+
+int16_t WebRtcIsac_RateAllocation(int32_t inRateBitPerSec,
+ double* rateLBBitPerSec,
+ double* rateUBBitPerSec,
+ enum ISACBandwidth* bandwidthKHz) {
+ int16_t idx;
+ double idxD;
+ double idxErr;
+ if (inRateBitPerSec < 38000) {
+ /* If the given overall bottleneck is less than 38000 then
+ * then codec has to operate in wideband mode, i.e. 8 kHz
+ * bandwidth. */
+ *rateLBBitPerSec = (int16_t)((inRateBitPerSec > 32000) ?
+ 32000 : inRateBitPerSec);
+ *rateUBBitPerSec = 0;
+ *bandwidthKHz = isac8kHz;
+ } else if ((inRateBitPerSec >= 38000) && (inRateBitPerSec < 50000)) {
+ /* At a bottleneck between 38 and 50 kbps the codec is operating
+ * at 12 kHz bandwidth. Using xxxBandBitRate12[] to calculates
+ * upper/lower bottleneck */
+
+ /* Find the bottlenecks by linear interpolation,
+ * step is (45000 - 38000)/6.0 we use the inverse of it. */
+ const double stepSizeInv = 8.5714286e-4;
+ idxD = (inRateBitPerSec - 38000) * stepSizeInv;
+ idx = (idxD >= 6) ? 6 : ((int16_t)idxD);
+ idxErr = idxD - idx;
+ *rateLBBitPerSec = kLowerBandBitRate12[idx];
+ *rateUBBitPerSec = kUpperBandBitRate12[idx];
+
+ if (idx < 6) {
+ *rateLBBitPerSec += (int16_t)(
+ idxErr * (kLowerBandBitRate12[idx + 1] - kLowerBandBitRate12[idx]));
+ *rateUBBitPerSec += (int16_t)(
+ idxErr * (kUpperBandBitRate12[idx + 1] - kUpperBandBitRate12[idx]));
+ }
+ *bandwidthKHz = isac12kHz;
+ } else if ((inRateBitPerSec >= 50000) && (inRateBitPerSec <= 56000)) {
+ /* A bottleneck between 50 and 56 kbps corresponds to bandwidth
+ * of 16 kHz. Using xxxBandBitRate16[] to calculates
+ * upper/lower bottleneck. */
+
+ /* Find the bottlenecks by linear interpolation
+ * step is (56000 - 50000)/5 we use the inverse of it. */
+ const double stepSizeInv = 8.3333333e-4;
+ idxD = (inRateBitPerSec - 50000) * stepSizeInv;
+ idx = (idxD >= 5) ? 5 : ((int16_t)idxD);
+ idxErr = idxD - idx;
+ *rateLBBitPerSec = kLowerBandBitRate16[idx];
+ *rateUBBitPerSec = kUpperBandBitRate16[idx];
+
+ if (idx < 5) {
+ *rateLBBitPerSec += (int16_t)(idxErr *
+ (kLowerBandBitRate16[idx + 1] -
+ kLowerBandBitRate16[idx]));
+
+ *rateUBBitPerSec += (int16_t)(idxErr *
+ (kUpperBandBitRate16[idx + 1] -
+ kUpperBandBitRate16[idx]));
+ }
+ *bandwidthKHz = isac16kHz;
+ } else {
+ /* Out-of-range botlteneck value. */
+ return -1;
+ }
+
+ /* limit the values. */
+ *rateLBBitPerSec = (*rateLBBitPerSec > 32000) ? 32000 : *rateLBBitPerSec;
+ *rateUBBitPerSec = (*rateUBBitPerSec > 32000) ? 32000 : *rateUBBitPerSec;
+ return 0;
+}
+
+
+void WebRtcIsac_ResetBitstream(Bitstr* bit_stream) {
+ bit_stream->W_upper = 0xFFFFFFFF;
+ bit_stream->stream_index = 0;
+ bit_stream->streamval = 0;
+}
+
+int WebRtcIsac_EncodeLb(const TransformTables* transform_tables,
+ float* in, ISACLBEncStruct* ISACencLB_obj,
+ int16_t codingMode,
+ int16_t bottleneckIndex) {
+ int stream_length = 0;
+ int err;
+ int k;
+ int iterCntr;
+
+ double lofilt_coef[(ORDERLO + 1)*SUBFRAMES];
+ double hifilt_coef[(ORDERHI + 1)*SUBFRAMES];
+ float LP[FRAMESAMPLES_HALF];
+ float HP[FRAMESAMPLES_HALF];
+
+ double LP_lookahead[FRAMESAMPLES_HALF];
+ double HP_lookahead[FRAMESAMPLES_HALF];
+ double LP_lookahead_pf[FRAMESAMPLES_HALF + QLOOKAHEAD];
+ double LPw[FRAMESAMPLES_HALF];
+
+ double HPw[FRAMESAMPLES_HALF];
+ double LPw_pf[FRAMESAMPLES_HALF];
+ int16_t fre[FRAMESAMPLES_HALF]; /* Q7 */
+ int16_t fim[FRAMESAMPLES_HALF]; /* Q7 */
+
+ double PitchLags[4];
+ double PitchGains[4];
+ int16_t PitchGains_Q12[4];
+ int16_t AvgPitchGain_Q12;
+
+ int frame_mode; /* 0 for 30ms, 1 for 60ms */
+ int status = 0;
+ int my_index;
+ transcode_obj transcodingParam;
+ double bytesLeftSpecCoding;
+ uint16_t payloadLimitBytes;
+
+ /* Copy new frame-length and bottleneck rate only for the first 10 ms data */
+ if (ISACencLB_obj->buffer_index == 0) {
+ /* Set the framelength for the next packet. */
+ ISACencLB_obj->current_framesamples = ISACencLB_obj->new_framelength;
+ }
+ /* 'frame_mode' is 0 (30 ms) or 1 (60 ms). */
+ frame_mode = ISACencLB_obj->current_framesamples / MAX_FRAMESAMPLES;
+
+ /* buffer speech samples (by 10ms packet) until the frame-length */
+ /* is reached (30 or 60 ms). */
+ /*****************************************************************/
+
+ /* fill the buffer with 10ms input data */
+ for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+ ISACencLB_obj->data_buffer_float[k + ISACencLB_obj->buffer_index] = in[k];
+ }
+
+ /* If buffersize is not equal to current framesize then increase index
+ * and return. We do no encoding untill we have enough audio. */
+ if (ISACencLB_obj->buffer_index + FRAMESAMPLES_10ms != FRAMESAMPLES) {
+ ISACencLB_obj->buffer_index += FRAMESAMPLES_10ms;
+ return 0;
+ }
+ /* If buffer reached the right size, reset index and continue with
+ * encoding the frame. */
+ ISACencLB_obj->buffer_index = 0;
+
+ /* End of buffer function. */
+ /**************************/
+
+ /* Encoding */
+ /************/
+
+ if (frame_mode == 0 || ISACencLB_obj->frame_nb == 0) {
+ /* This is to avoid Linux warnings until we change 'int' to 'Word32'
+ * at all places. */
+ int intVar;
+ /* reset bitstream */
+ WebRtcIsac_ResetBitstream(&(ISACencLB_obj->bitstr_obj));
+
+ if ((codingMode == 0) && (frame_mode == 0) &&
+ (ISACencLB_obj->enforceFrameSize == 0)) {
+ ISACencLB_obj->new_framelength = WebRtcIsac_GetNewFrameLength(
+ ISACencLB_obj->bottleneck, ISACencLB_obj->current_framesamples);
+ }
+
+ ISACencLB_obj->s2nr = WebRtcIsac_GetSnr(
+ ISACencLB_obj->bottleneck, ISACencLB_obj->current_framesamples);
+
+ /* Encode frame length. */
+ status = WebRtcIsac_EncodeFrameLen(
+ ISACencLB_obj->current_framesamples, &ISACencLB_obj->bitstr_obj);
+ if (status < 0) {
+ /* Wrong frame size. */
+ return status;
+ }
+ /* Save framelength for multiple packets memory. */
+ ISACencLB_obj->SaveEnc_obj.framelength =
+ ISACencLB_obj->current_framesamples;
+
+ /* To be used for Redundant Coding. */
+ ISACencLB_obj->lastBWIdx = bottleneckIndex;
+ intVar = (int)bottleneckIndex;
+ WebRtcIsac_EncodeReceiveBw(&intVar, &ISACencLB_obj->bitstr_obj);
+ }
+
+ /* Split signal in two bands. */
+ WebRtcIsac_SplitAndFilterFloat(ISACencLB_obj->data_buffer_float, LP, HP,
+ LP_lookahead, HP_lookahead,
+ &ISACencLB_obj->prefiltbankstr_obj);
+
+ /* estimate pitch parameters and pitch-filter lookahead signal */
+ WebRtcIsac_PitchAnalysis(LP_lookahead, LP_lookahead_pf,
+ &ISACencLB_obj->pitchanalysisstr_obj, PitchLags,
+ PitchGains);
+
+ /* Encode in FIX Q12. */
+
+ /* Convert PitchGain to Fixed point. */
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchGains_Q12[k] = (int16_t)(PitchGains[k] * 4096.0);
+ }
+
+ /* Set where to store data in multiple packets memory. */
+ if (frame_mode == 0 || ISACencLB_obj->frame_nb == 0) {
+ ISACencLB_obj->SaveEnc_obj.startIdx = 0;
+ } else {
+ ISACencLB_obj->SaveEnc_obj.startIdx = 1;
+ }
+
+ /* Quantize & encode pitch parameters. */
+ WebRtcIsac_EncodePitchGain(PitchGains_Q12, &ISACencLB_obj->bitstr_obj,
+ &ISACencLB_obj->SaveEnc_obj);
+ WebRtcIsac_EncodePitchLag(PitchLags, PitchGains_Q12,
+ &ISACencLB_obj->bitstr_obj,
+ &ISACencLB_obj->SaveEnc_obj);
+
+ AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
+ PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
+
+ /* Find coefficients for perceptual pre-filters. */
+ WebRtcIsac_GetLpcCoefLb(LP_lookahead_pf, HP_lookahead,
+ &ISACencLB_obj->maskfiltstr_obj, ISACencLB_obj->s2nr,
+ PitchGains_Q12, lofilt_coef, hifilt_coef);
+
+ /* Code LPC model and shape - gains not quantized yet. */
+ WebRtcIsac_EncodeLpcLb(lofilt_coef, hifilt_coef, &ISACencLB_obj->bitstr_obj,
+ &ISACencLB_obj->SaveEnc_obj);
+
+ /* Convert PitchGains back to FLOAT for pitchfilter_pre. */
+ for (k = 0; k < 4; k++) {
+ PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
+ }
+
+ /* Store the state of arithmetic coder before coding LPC gains. */
+ transcodingParam.W_upper = ISACencLB_obj->bitstr_obj.W_upper;
+ transcodingParam.stream_index = ISACencLB_obj->bitstr_obj.stream_index;
+ transcodingParam.streamval = ISACencLB_obj->bitstr_obj.streamval;
+ transcodingParam.stream[0] =
+ ISACencLB_obj->bitstr_obj.stream[ISACencLB_obj->bitstr_obj.stream_index -
+ 2];
+ transcodingParam.stream[1] =
+ ISACencLB_obj->bitstr_obj.stream[ISACencLB_obj->bitstr_obj.stream_index -
+ 1];
+ transcodingParam.stream[2] =
+ ISACencLB_obj->bitstr_obj.stream[ISACencLB_obj->bitstr_obj.stream_index];
+
+ /* Store LPC Gains before encoding them. */
+ for (k = 0; k < SUBFRAMES; k++) {
+ transcodingParam.loFiltGain[k] = lofilt_coef[(LPC_LOBAND_ORDER + 1) * k];
+ transcodingParam.hiFiltGain[k] = hifilt_coef[(LPC_HIBAND_ORDER + 1) * k];
+ }
+
+ /* Code gains */
+ WebRtcIsac_EncodeLpcGainLb(lofilt_coef, hifilt_coef,
+ &ISACencLB_obj->bitstr_obj,
+ &ISACencLB_obj->SaveEnc_obj);
+
+ /* Get the correct value for the payload limit and calculate the
+ * number of bytes left for coding the spectrum. */
+ if ((frame_mode == 1) && (ISACencLB_obj->frame_nb == 0)) {
+ /* It is a 60ms and we are in the first 30ms then the limit at
+ * this point should be half of the assigned value. */
+ payloadLimitBytes = ISACencLB_obj->payloadLimitBytes60 >> 1;
+ } else if (frame_mode == 0) {
+ /* It is a 30ms frame */
+ /* Subract 3 because termination process may add 3 bytes. */
+ payloadLimitBytes = ISACencLB_obj->payloadLimitBytes30 - 3;
+ } else {
+ /* This is the second half of a 60ms frame. */
+ /* Subract 3 because termination process may add 3 bytes. */
+ payloadLimitBytes = ISACencLB_obj->payloadLimitBytes60 - 3;
+ }
+ bytesLeftSpecCoding = payloadLimitBytes - transcodingParam.stream_index;
+
+ /* Perceptual pre-filtering (using normalized lattice filter). */
+ /* Low-band filtering. */
+ WebRtcIsac_NormLatticeFilterMa(ORDERLO,
+ ISACencLB_obj->maskfiltstr_obj.PreStateLoF,
+ ISACencLB_obj->maskfiltstr_obj.PreStateLoG,
+ LP, lofilt_coef, LPw);
+ /* High-band filtering. */
+ WebRtcIsac_NormLatticeFilterMa(ORDERHI,
+ ISACencLB_obj->maskfiltstr_obj.PreStateHiF,
+ ISACencLB_obj->maskfiltstr_obj.PreStateHiG,
+ HP, hifilt_coef, HPw);
+ /* Pitch filter. */
+ WebRtcIsac_PitchfilterPre(LPw, LPw_pf, &ISACencLB_obj->pitchfiltstr_obj,
+ PitchLags, PitchGains);
+ /* Transform */
+ WebRtcIsac_Time2Spec(transform_tables,
+ LPw_pf, HPw, fre, fim, &ISACencLB_obj->fftstr_obj);
+
+ /* Save data for multiple packets memory. */
+ my_index = ISACencLB_obj->SaveEnc_obj.startIdx * FRAMESAMPLES_HALF;
+ memcpy(&ISACencLB_obj->SaveEnc_obj.fre[my_index], fre, sizeof(fre));
+ memcpy(&ISACencLB_obj->SaveEnc_obj.fim[my_index], fim, sizeof(fim));
+
+ ISACencLB_obj->SaveEnc_obj.AvgPitchGain[ISACencLB_obj->SaveEnc_obj.startIdx] =
+ AvgPitchGain_Q12;
+
+ /* Quantization and loss-less coding. */
+ err = WebRtcIsac_EncodeSpec(fre, fim, AvgPitchGain_Q12, kIsacLowerBand,
+ &ISACencLB_obj->bitstr_obj);
+ if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ /* There has been an error but it was not too large payload
+ (we can cure too large payload). */
+ if (frame_mode == 1 && ISACencLB_obj->frame_nb == 1) {
+ /* If this is the second 30ms of a 60ms frame reset
+ this such that in the next call encoder starts fresh. */
+ ISACencLB_obj->frame_nb = 0;
+ }
+ return err;
+ }
+ iterCntr = 0;
+ while ((ISACencLB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+ (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ double bytesSpecCoderUsed;
+ double transcodeScale;
+
+ if (iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION) {
+ /* We were not able to limit the payload size */
+ if ((frame_mode == 1) && (ISACencLB_obj->frame_nb == 0)) {
+ /* This was the first 30ms of a 60ms frame. Although
+ the payload is larger than it should be but we let
+ the second 30ms be encoded. Maybe together we
+ won't exceed the limit. */
+ ISACencLB_obj->frame_nb = 1;
+ return 0;
+ } else if ((frame_mode == 1) && (ISACencLB_obj->frame_nb == 1)) {
+ ISACencLB_obj->frame_nb = 0;
+ }
+
+ if (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH) {
+ return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
+ } else {
+ return status;
+ }
+ }
+
+ if (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH) {
+ bytesSpecCoderUsed = STREAM_SIZE_MAX;
+ /* Being conservative */
+ transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed * 0.5;
+ } else {
+ bytesSpecCoderUsed = ISACencLB_obj->bitstr_obj.stream_index -
+ transcodingParam.stream_index;
+ transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed;
+ }
+
+ /* To be safe, we reduce the scale depending on
+ the number of iterations. */
+ transcodeScale *= (1.0 - (0.9 * (double)iterCntr /
+ (double)MAX_PAYLOAD_LIMIT_ITERATION));
+
+ /* Scale the LPC Gains. */
+ for (k = 0; k < SUBFRAMES; k++) {
+ lofilt_coef[(LPC_LOBAND_ORDER + 1) * k] =
+ transcodingParam.loFiltGain[k] * transcodeScale;
+ hifilt_coef[(LPC_HIBAND_ORDER + 1) * k] =
+ transcodingParam.hiFiltGain[k] * transcodeScale;
+ transcodingParam.loFiltGain[k] = lofilt_coef[(LPC_LOBAND_ORDER + 1) * k];
+ transcodingParam.hiFiltGain[k] = hifilt_coef[(LPC_HIBAND_ORDER + 1) * k];
+ }
+
+ /* Scale DFT coefficients. */
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ fre[k] = (int16_t)(fre[k] * transcodeScale);
+ fim[k] = (int16_t)(fim[k] * transcodeScale);
+ }
+
+ /* Save data for multiple packets memory. */
+ my_index = ISACencLB_obj->SaveEnc_obj.startIdx * FRAMESAMPLES_HALF;
+ memcpy(&ISACencLB_obj->SaveEnc_obj.fre[my_index], fre, sizeof(fre));
+ memcpy(&ISACencLB_obj->SaveEnc_obj.fim[my_index], fim, sizeof(fim));
+
+ /* Re-store the state of arithmetic coder before coding LPC gains. */
+ ISACencLB_obj->bitstr_obj.W_upper = transcodingParam.W_upper;
+ ISACencLB_obj->bitstr_obj.stream_index = transcodingParam.stream_index;
+ ISACencLB_obj->bitstr_obj.streamval = transcodingParam.streamval;
+ ISACencLB_obj->bitstr_obj.stream[transcodingParam.stream_index - 2] =
+ transcodingParam.stream[0];
+ ISACencLB_obj->bitstr_obj.stream[transcodingParam.stream_index - 1] =
+ transcodingParam.stream[1];
+ ISACencLB_obj->bitstr_obj.stream[transcodingParam.stream_index] =
+ transcodingParam.stream[2];
+
+ /* Code gains. */
+ WebRtcIsac_EncodeLpcGainLb(lofilt_coef, hifilt_coef,
+ &ISACencLB_obj->bitstr_obj,
+ &ISACencLB_obj->SaveEnc_obj);
+
+ /* Update the number of bytes left for encoding the spectrum. */
+ bytesLeftSpecCoding = payloadLimitBytes - transcodingParam.stream_index;
+
+ /* Encode the spectrum. */
+ err = WebRtcIsac_EncodeSpec(fre, fim, AvgPitchGain_Q12, kIsacLowerBand,
+ &ISACencLB_obj->bitstr_obj);
+
+ if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ /* There has been an error but it was not too large
+ payload (we can cure too large payload). */
+ if (frame_mode == 1 && ISACencLB_obj->frame_nb == 1) {
+ /* If this is the second 30 ms of a 60 ms frame reset
+ this such that in the next call encoder starts fresh. */
+ ISACencLB_obj->frame_nb = 0;
+ }
+ return err;
+ }
+ iterCntr++;
+ }
+
+ /* If 60 ms frame-size and just processed the first 30 ms, */
+ /* go back to main function to buffer the other 30 ms speech frame. */
+ if (frame_mode == 1) {
+ if (ISACencLB_obj->frame_nb == 0) {
+ ISACencLB_obj->frame_nb = 1;
+ return 0;
+ } else if (ISACencLB_obj->frame_nb == 1) {
+ ISACencLB_obj->frame_nb = 0;
+ /* Also update the frame-length for next packet,
+ in Adaptive mode only. */
+ if (codingMode == 0 && (ISACencLB_obj->enforceFrameSize == 0)) {
+ ISACencLB_obj->new_framelength =
+ WebRtcIsac_GetNewFrameLength(ISACencLB_obj->bottleneck,
+ ISACencLB_obj->current_framesamples);
+ }
+ }
+ } else {
+ ISACencLB_obj->frame_nb = 0;
+ }
+
+ /* Complete arithmetic coding. */
+ stream_length = WebRtcIsac_EncTerminate(&ISACencLB_obj->bitstr_obj);
+ return stream_length;
+}
+
+
+
+static int LimitPayloadUb(ISACUBEncStruct* ISACencUB_obj,
+ uint16_t payloadLimitBytes,
+ double bytesLeftSpecCoding,
+ transcode_obj* transcodingParam,
+ int16_t* fre, int16_t* fim,
+ double* lpcGains, enum ISACBand band, int status) {
+
+ int iterCntr = 0;
+ int k;
+ double bytesSpecCoderUsed;
+ double transcodeScale;
+ const int16_t kAveragePitchGain = 0.0;
+
+ do {
+ if (iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION) {
+ /* We were not able to limit the payload size. */
+ return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
+ }
+
+ if (status == -ISAC_DISALLOWED_BITSTREAM_LENGTH) {
+ bytesSpecCoderUsed = STREAM_SIZE_MAX;
+ /* Being conservative. */
+ transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed * 0.5;
+ } else {
+ bytesSpecCoderUsed = ISACencUB_obj->bitstr_obj.stream_index -
+ transcodingParam->stream_index;
+ transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed;
+ }
+
+ /* To be safe, we reduce the scale depending on the
+ number of iterations. */
+ transcodeScale *= (1.0 - (0.9 * (double)iterCntr /
+ (double)MAX_PAYLOAD_LIMIT_ITERATION));
+
+ /* Scale the LPC Gains. */
+ if (band == kIsacUpperBand16) {
+ /* Two sets of coefficients if 16 kHz. */
+ for (k = 0; k < SUBFRAMES; k++) {
+ transcodingParam->loFiltGain[k] *= transcodeScale;
+ transcodingParam->hiFiltGain[k] *= transcodeScale;
+ }
+ } else {
+ /* One sets of coefficients if 12 kHz. */
+ for (k = 0; k < SUBFRAMES; k++) {
+ transcodingParam->loFiltGain[k] *= transcodeScale;
+ }
+ }
+
+ /* Scale DFT coefficients. */
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ fre[k] = (int16_t)(fre[k] * transcodeScale + 0.5);
+ fim[k] = (int16_t)(fim[k] * transcodeScale + 0.5);
+ }
+ /* Store FFT coefficients for multiple encoding. */
+ memcpy(ISACencUB_obj->SaveEnc_obj.realFFT, fre,
+ sizeof(ISACencUB_obj->SaveEnc_obj.realFFT));
+ memcpy(ISACencUB_obj->SaveEnc_obj.imagFFT, fim,
+ sizeof(ISACencUB_obj->SaveEnc_obj.imagFFT));
+
+ /* Store the state of arithmetic coder before coding LPC gains */
+ ISACencUB_obj->bitstr_obj.W_upper = transcodingParam->W_upper;
+ ISACencUB_obj->bitstr_obj.stream_index = transcodingParam->stream_index;
+ ISACencUB_obj->bitstr_obj.streamval = transcodingParam->streamval;
+ ISACencUB_obj->bitstr_obj.stream[transcodingParam->stream_index - 2] =
+ transcodingParam->stream[0];
+ ISACencUB_obj->bitstr_obj.stream[transcodingParam->stream_index - 1] =
+ transcodingParam->stream[1];
+ ISACencUB_obj->bitstr_obj.stream[transcodingParam->stream_index] =
+ transcodingParam->stream[2];
+
+ /* Store the gains for multiple encoding. */
+ memcpy(ISACencUB_obj->SaveEnc_obj.lpcGain, lpcGains,
+ SUBFRAMES * sizeof(double));
+ /* Entropy Code lpc-gains, indices are stored for a later use.*/
+ WebRtcIsac_EncodeLpcGainUb(transcodingParam->loFiltGain,
+ &ISACencUB_obj->bitstr_obj,
+ ISACencUB_obj->SaveEnc_obj.lpcGainIndex);
+
+ /* If 16kHz should do one more set. */
+ if (band == kIsacUpperBand16) {
+ /* Store the gains for multiple encoding. */
+ memcpy(&ISACencUB_obj->SaveEnc_obj.lpcGain[SUBFRAMES],
+ &lpcGains[SUBFRAMES], SUBFRAMES * sizeof(double));
+ /* Entropy Code lpc-gains, indices are stored for a later use.*/
+ WebRtcIsac_EncodeLpcGainUb(
+ transcodingParam->hiFiltGain, &ISACencUB_obj->bitstr_obj,
+ &ISACencUB_obj->SaveEnc_obj.lpcGainIndex[SUBFRAMES]);
+ }
+
+ /* Update the number of bytes left for encoding the spectrum. */
+ bytesLeftSpecCoding = payloadLimitBytes -
+ ISACencUB_obj->bitstr_obj.stream_index;
+
+ /* Save the bit-stream object at this point for FEC. */
+ memcpy(&ISACencUB_obj->SaveEnc_obj.bitStreamObj,
+ &ISACencUB_obj->bitstr_obj, sizeof(Bitstr));
+
+ /* Encode the spectrum. */
+ status = WebRtcIsac_EncodeSpec(fre, fim, kAveragePitchGain,
+ band, &ISACencUB_obj->bitstr_obj);
+ if ((status < 0) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ /* There has been an error but it was not too large payload
+ (we can cure too large payload). */
+ return status;
+ }
+ iterCntr++;
+ } while ((ISACencUB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+ (status == -ISAC_DISALLOWED_BITSTREAM_LENGTH));
+ return 0;
+}
+
+int WebRtcIsac_EncodeUb16(const TransformTables* transform_tables,
+ float* in, ISACUBEncStruct* ISACencUB_obj,
+ int32_t jitterInfo) {
+ int err;
+ int k;
+
+ double lpcVecs[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+ double percepFilterParams[(1 + UB_LPC_ORDER) * (SUBFRAMES << 1) +
+ (1 + UB_LPC_ORDER)];
+
+ double LP_lookahead[FRAMESAMPLES];
+ int16_t fre[FRAMESAMPLES_HALF]; /* Q7 */
+ int16_t fim[FRAMESAMPLES_HALF]; /* Q7 */
+
+ int status = 0;
+
+ double varscale[2];
+ double corr[SUBFRAMES << 1][UB_LPC_ORDER + 1];
+ double lpcGains[SUBFRAMES << 1];
+ transcode_obj transcodingParam;
+ uint16_t payloadLimitBytes;
+ double s2nr;
+ const int16_t kAveragePitchGain = 0.0;
+ int bytesLeftSpecCoding;
+
+ /* Buffer speech samples (by 10ms packet) until the frame-length is */
+ /* reached (30 ms). */
+ /*********************************************************************/
+
+ /* fill the buffer with 10ms input data */
+ memcpy(&ISACencUB_obj->data_buffer_float[ISACencUB_obj->buffer_index], in,
+ FRAMESAMPLES_10ms * sizeof(float));
+
+ /* If buffer size is not equal to current frame-size, and end of file is
+ * not reached yet, we don't do encoding unless we have the whole frame. */
+ if (ISACencUB_obj->buffer_index + FRAMESAMPLES_10ms < FRAMESAMPLES) {
+ ISACencUB_obj->buffer_index += FRAMESAMPLES_10ms;
+ return 0;
+ }
+
+ /* End of buffer function. */
+ /**************************/
+
+ /* Encoding */
+ /************/
+
+ /* Reset bit-stream */
+ WebRtcIsac_ResetBitstream(&(ISACencUB_obj->bitstr_obj));
+
+ /* Encoding of bandwidth information. */
+ WebRtcIsac_EncodeJitterInfo(jitterInfo, &ISACencUB_obj->bitstr_obj);
+
+ status = WebRtcIsac_EncodeBandwidth(isac16kHz, &ISACencUB_obj->bitstr_obj);
+ if (status < 0) {
+ return status;
+ }
+
+ s2nr = WebRtcIsac_GetSnr(ISACencUB_obj->bottleneck, FRAMESAMPLES);
+
+ memcpy(lpcVecs, ISACencUB_obj->lastLPCVec, UB_LPC_ORDER * sizeof(double));
+
+ for (k = 0; k < FRAMESAMPLES; k++) {
+ LP_lookahead[k] = ISACencUB_obj->data_buffer_float[UB_LOOKAHEAD + k];
+ }
+
+ /* Find coefficients for perceptual pre-filters. */
+ WebRtcIsac_GetLpcCoefUb(LP_lookahead, &ISACencUB_obj->maskfiltstr_obj,
+ &lpcVecs[UB_LPC_ORDER], corr, varscale, isac16kHz);
+
+ memcpy(ISACencUB_obj->lastLPCVec,
+ &lpcVecs[(UB16_LPC_VEC_PER_FRAME - 1) * (UB_LPC_ORDER)],
+ sizeof(double) * UB_LPC_ORDER);
+
+ /* Code LPC model and shape - gains not quantized yet. */
+ WebRtcIsac_EncodeLpcUB(lpcVecs, &ISACencUB_obj->bitstr_obj,
+ percepFilterParams, isac16kHz,
+ &ISACencUB_obj->SaveEnc_obj);
+
+ /* the first set of lpc parameters are from the last sub-frame of
+ * the previous frame. so we don't care about them. */
+ WebRtcIsac_GetLpcGain(s2nr, &percepFilterParams[UB_LPC_ORDER + 1],
+ (SUBFRAMES << 1), lpcGains, corr, varscale);
+
+ /* Store the state of arithmetic coder before coding LPC gains */
+ transcodingParam.stream_index = ISACencUB_obj->bitstr_obj.stream_index;
+ transcodingParam.W_upper = ISACencUB_obj->bitstr_obj.W_upper;
+ transcodingParam.streamval = ISACencUB_obj->bitstr_obj.streamval;
+ transcodingParam.stream[0] =
+ ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+ 2];
+ transcodingParam.stream[1] =
+ ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+ 1];
+ transcodingParam.stream[2] =
+ ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index];
+
+ /* Store LPC Gains before encoding them. */
+ for (k = 0; k < SUBFRAMES; k++) {
+ transcodingParam.loFiltGain[k] = lpcGains[k];
+ transcodingParam.hiFiltGain[k] = lpcGains[SUBFRAMES + k];
+ }
+
+ /* Store the gains for multiple encoding. */
+ memcpy(ISACencUB_obj->SaveEnc_obj.lpcGain, lpcGains,
+ (SUBFRAMES << 1) * sizeof(double));
+
+ WebRtcIsac_EncodeLpcGainUb(lpcGains, &ISACencUB_obj->bitstr_obj,
+ ISACencUB_obj->SaveEnc_obj.lpcGainIndex);
+ WebRtcIsac_EncodeLpcGainUb(
+ &lpcGains[SUBFRAMES], &ISACencUB_obj->bitstr_obj,
+ &ISACencUB_obj->SaveEnc_obj.lpcGainIndex[SUBFRAMES]);
+
+ /* Get the correct value for the payload limit and calculate the number of
+ bytes left for coding the spectrum. It is a 30ms frame
+ Subract 3 because termination process may add 3 bytes */
+ payloadLimitBytes = ISACencUB_obj->maxPayloadSizeBytes -
+ ISACencUB_obj->numBytesUsed - 3;
+ bytesLeftSpecCoding = payloadLimitBytes -
+ ISACencUB_obj->bitstr_obj.stream_index;
+
+ for (k = 0; k < (SUBFRAMES << 1); k++) {
+ percepFilterParams[k * (UB_LPC_ORDER + 1) + (UB_LPC_ORDER + 1)] =
+ lpcGains[k];
+ }
+
+ /* LPC filtering (using normalized lattice filter), */
+ /* first half-frame. */
+ WebRtcIsac_NormLatticeFilterMa(UB_LPC_ORDER,
+ ISACencUB_obj->maskfiltstr_obj.PreStateLoF,
+ ISACencUB_obj->maskfiltstr_obj.PreStateLoG,
+ &ISACencUB_obj->data_buffer_float[0],
+ &percepFilterParams[UB_LPC_ORDER + 1],
+ &LP_lookahead[0]);
+
+ /* Second half-frame filtering. */
+ WebRtcIsac_NormLatticeFilterMa(
+ UB_LPC_ORDER, ISACencUB_obj->maskfiltstr_obj.PreStateLoF,
+ ISACencUB_obj->maskfiltstr_obj.PreStateLoG,
+ &ISACencUB_obj->data_buffer_float[FRAMESAMPLES_HALF],
+ &percepFilterParams[(UB_LPC_ORDER + 1) + SUBFRAMES * (UB_LPC_ORDER + 1)],
+ &LP_lookahead[FRAMESAMPLES_HALF]);
+
+ WebRtcIsac_Time2Spec(transform_tables,
+ &LP_lookahead[0], &LP_lookahead[FRAMESAMPLES_HALF],
+ fre, fim, &ISACencUB_obj->fftstr_obj);
+
+ /* Store FFT coefficients for multiple encoding. */
+ memcpy(ISACencUB_obj->SaveEnc_obj.realFFT, fre, sizeof(fre));
+ memcpy(ISACencUB_obj->SaveEnc_obj.imagFFT, fim, sizeof(fim));
+
+ /* Prepare the audio buffer for the next packet
+ * move the last 3 ms to the beginning of the buffer. */
+ memcpy(ISACencUB_obj->data_buffer_float,
+ &ISACencUB_obj->data_buffer_float[FRAMESAMPLES],
+ LB_TOTAL_DELAY_SAMPLES * sizeof(float));
+ /* start writing with 3 ms delay to compensate for the delay
+ * of the lower-band. */
+ ISACencUB_obj->buffer_index = LB_TOTAL_DELAY_SAMPLES;
+
+ /* Save the bit-stream object at this point for FEC. */
+ memcpy(&ISACencUB_obj->SaveEnc_obj.bitStreamObj, &ISACencUB_obj->bitstr_obj,
+ sizeof(Bitstr));
+
+ /* Qantization and lossless coding */
+ /* Note that there is no pitch-gain for this band so kAveragePitchGain = 0
+ * is passed to the function. In fact, the function ignores the 3rd parameter
+ * for this band. */
+ err = WebRtcIsac_EncodeSpec(fre, fim, kAveragePitchGain, kIsacUpperBand16,
+ &ISACencUB_obj->bitstr_obj);
+ if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ return err;
+ }
+
+ if ((ISACencUB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+ (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ err = LimitPayloadUb(ISACencUB_obj, payloadLimitBytes, bytesLeftSpecCoding,
+ &transcodingParam, fre, fim, lpcGains,
+ kIsacUpperBand16, err);
+ }
+ if (err < 0) {
+ return err;
+ }
+ /* Complete arithmetic coding. */
+ return WebRtcIsac_EncTerminate(&ISACencUB_obj->bitstr_obj);
+}
+
+
+int WebRtcIsac_EncodeUb12(const TransformTables* transform_tables,
+ float* in, ISACUBEncStruct* ISACencUB_obj,
+ int32_t jitterInfo) {
+ int err;
+ int k;
+
+ double lpcVecs[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+ double percepFilterParams[(1 + UB_LPC_ORDER) * SUBFRAMES];
+ float LP[FRAMESAMPLES_HALF];
+ float HP[FRAMESAMPLES_HALF];
+
+ double LP_lookahead[FRAMESAMPLES_HALF];
+ double HP_lookahead[FRAMESAMPLES_HALF];
+ double LPw[FRAMESAMPLES_HALF];
+
+ double HPw[FRAMESAMPLES_HALF];
+ int16_t fre[FRAMESAMPLES_HALF]; /* Q7 */
+ int16_t fim[FRAMESAMPLES_HALF]; /* Q7 */
+
+ int status = 0;
+
+ double varscale[1];
+
+ double corr[UB_LPC_GAIN_DIM][UB_LPC_ORDER + 1];
+ double lpcGains[SUBFRAMES];
+ transcode_obj transcodingParam;
+ uint16_t payloadLimitBytes;
+ double s2nr;
+ const int16_t kAveragePitchGain = 0.0;
+ double bytesLeftSpecCoding;
+
+ /* Buffer speech samples (by 10ms packet) until the framelength is */
+ /* reached (30 ms). */
+ /********************************************************************/
+
+ /* Fill the buffer with 10ms input data. */
+ memcpy(&ISACencUB_obj->data_buffer_float[ISACencUB_obj->buffer_index], in,
+ FRAMESAMPLES_10ms * sizeof(float));
+
+ /* if buffer-size is not equal to current frame-size then increase the
+ index and return. We do the encoding when we have enough audio. */
+ if (ISACencUB_obj->buffer_index + FRAMESAMPLES_10ms < FRAMESAMPLES) {
+ ISACencUB_obj->buffer_index += FRAMESAMPLES_10ms;
+ return 0;
+ }
+ /* If buffer reached the right size, reset index and continue
+ with encoding the frame */
+ ISACencUB_obj->buffer_index = 0;
+
+ /* End of buffer function */
+ /**************************/
+
+ /* Encoding */
+ /************/
+
+ /* Reset bit-stream. */
+ WebRtcIsac_ResetBitstream(&(ISACencUB_obj->bitstr_obj));
+
+ /* Encoding bandwidth information. */
+ WebRtcIsac_EncodeJitterInfo(jitterInfo, &ISACencUB_obj->bitstr_obj);
+ status = WebRtcIsac_EncodeBandwidth(isac12kHz, &ISACencUB_obj->bitstr_obj);
+ if (status < 0) {
+ return status;
+ }
+
+ s2nr = WebRtcIsac_GetSnr(ISACencUB_obj->bottleneck, FRAMESAMPLES);
+
+ /* Split signal in two bands. */
+ WebRtcIsac_SplitAndFilterFloat(ISACencUB_obj->data_buffer_float, HP, LP,
+ HP_lookahead, LP_lookahead,
+ &ISACencUB_obj->prefiltbankstr_obj);
+
+ /* Find coefficients for perceptual pre-filters. */
+ WebRtcIsac_GetLpcCoefUb(LP_lookahead, &ISACencUB_obj->maskfiltstr_obj,
+ lpcVecs, corr, varscale, isac12kHz);
+
+ /* Code LPC model and shape - gains not quantized yet. */
+ WebRtcIsac_EncodeLpcUB(lpcVecs, &ISACencUB_obj->bitstr_obj,
+ percepFilterParams, isac12kHz,
+ &ISACencUB_obj->SaveEnc_obj);
+
+ WebRtcIsac_GetLpcGain(s2nr, percepFilterParams, SUBFRAMES, lpcGains, corr,
+ varscale);
+
+ /* Store the state of arithmetic coder before coding LPC gains. */
+ transcodingParam.W_upper = ISACencUB_obj->bitstr_obj.W_upper;
+ transcodingParam.stream_index = ISACencUB_obj->bitstr_obj.stream_index;
+ transcodingParam.streamval = ISACencUB_obj->bitstr_obj.streamval;
+ transcodingParam.stream[0] =
+ ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+ 2];
+ transcodingParam.stream[1] =
+ ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+ 1];
+ transcodingParam.stream[2] =
+ ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index];
+
+ /* Store LPC Gains before encoding them. */
+ for (k = 0; k < SUBFRAMES; k++) {
+ transcodingParam.loFiltGain[k] = lpcGains[k];
+ }
+
+ /* Store the gains for multiple encoding. */
+ memcpy(ISACencUB_obj->SaveEnc_obj.lpcGain, lpcGains, SUBFRAMES *
+ sizeof(double));
+
+ WebRtcIsac_EncodeLpcGainUb(lpcGains, &ISACencUB_obj->bitstr_obj,
+ ISACencUB_obj->SaveEnc_obj.lpcGainIndex);
+
+ for (k = 0; k < SUBFRAMES; k++) {
+ percepFilterParams[k * (UB_LPC_ORDER + 1)] = lpcGains[k];
+ }
+
+ /* perceptual pre-filtering (using normalized lattice filter) */
+ /* low-band filtering */
+ WebRtcIsac_NormLatticeFilterMa(UB_LPC_ORDER,
+ ISACencUB_obj->maskfiltstr_obj.PreStateLoF,
+ ISACencUB_obj->maskfiltstr_obj.PreStateLoG, LP,
+ percepFilterParams, LPw);
+
+ /* Get the correct value for the payload limit and calculate the number
+ of bytes left for coding the spectrum. It is a 30ms frame Subract 3
+ because termination process may add 3 bytes */
+ payloadLimitBytes = ISACencUB_obj->maxPayloadSizeBytes -
+ ISACencUB_obj->numBytesUsed - 3;
+ bytesLeftSpecCoding = payloadLimitBytes -
+ ISACencUB_obj->bitstr_obj.stream_index;
+
+ memset(HPw, 0, sizeof(HPw));
+
+ /* Transform */
+ WebRtcIsac_Time2Spec(transform_tables,
+ LPw, HPw, fre, fim, &ISACencUB_obj->fftstr_obj);
+
+ /* Store FFT coefficients for multiple encoding. */
+ memcpy(ISACencUB_obj->SaveEnc_obj.realFFT, fre,
+ sizeof(ISACencUB_obj->SaveEnc_obj.realFFT));
+ memcpy(ISACencUB_obj->SaveEnc_obj.imagFFT, fim,
+ sizeof(ISACencUB_obj->SaveEnc_obj.imagFFT));
+
+ /* Save the bit-stream object at this point for FEC. */
+ memcpy(&ISACencUB_obj->SaveEnc_obj.bitStreamObj,
+ &ISACencUB_obj->bitstr_obj, sizeof(Bitstr));
+
+ /* Quantization and loss-less coding */
+ /* The 4th parameter to this function is pitch-gain, which is only used
+ * when encoding 0-8 kHz band, and irrelevant in this function, therefore,
+ * we insert zero here. */
+ err = WebRtcIsac_EncodeSpec(fre, fim, kAveragePitchGain, kIsacUpperBand12,
+ &ISACencUB_obj->bitstr_obj);
+ if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ /* There has been an error but it was not too large
+ payload (we can cure too large payload) */
+ return err;
+ }
+
+ if ((ISACencUB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+ (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+ err = LimitPayloadUb(ISACencUB_obj, payloadLimitBytes, bytesLeftSpecCoding,
+ &transcodingParam, fre, fim, lpcGains,
+ kIsacUpperBand12, err);
+ }
+ if (err < 0) {
+ return err;
+ }
+ /* Complete arithmetic coding. */
+ return WebRtcIsac_EncTerminate(&ISACencUB_obj->bitstr_obj);
+}
+
+
+
+
+
+
+/* This function is used to create a new bit-stream with new BWE.
+ The same data as previously encoded with the function WebRtcIsac_Encoder().
+ The data needed is taken from the structure, where it was stored
+ when calling the encoder. */
+
+int WebRtcIsac_EncodeStoredDataLb(const IsacSaveEncoderData* ISACSavedEnc_obj,
+ Bitstr* ISACBitStr_obj, int BWnumber,
+ float scale) {
+ int ii;
+ int status;
+ int BWno = BWnumber;
+
+ const uint16_t* WebRtcIsac_kQPitchGainCdf_ptr[1];
+ const uint16_t** cdf;
+
+ double tmpLPCcoeffs_lo[(ORDERLO + 1)*SUBFRAMES * 2];
+ double tmpLPCcoeffs_hi[(ORDERHI + 1)*SUBFRAMES * 2];
+ int tmpLPCindex_g[12 * 2];
+ int16_t tmp_fre[FRAMESAMPLES], tmp_fim[FRAMESAMPLES];
+ const int kModel = 0;
+
+ /* Sanity Check - possible values for BWnumber is 0 - 23. */
+ if ((BWnumber < 0) || (BWnumber > 23)) {
+ return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+ }
+
+ /* Reset bit-stream. */
+ WebRtcIsac_ResetBitstream(ISACBitStr_obj);
+
+ /* Encode frame length */
+ status = WebRtcIsac_EncodeFrameLen(ISACSavedEnc_obj->framelength,
+ ISACBitStr_obj);
+ if (status < 0) {
+ /* Wrong frame size. */
+ return status;
+ }
+
+ /* Transcoding */
+ if ((scale > 0.0) && (scale < 1.0)) {
+ /* Compensate LPC gain. */
+ for (ii = 0;
+ ii < ((ORDERLO + 1)* SUBFRAMES * (1 + ISACSavedEnc_obj->startIdx));
+ ii++) {
+ tmpLPCcoeffs_lo[ii] = scale * ISACSavedEnc_obj->LPCcoeffs_lo[ii];
+ }
+ for (ii = 0;
+ ii < ((ORDERHI + 1) * SUBFRAMES * (1 + ISACSavedEnc_obj->startIdx));
+ ii++) {
+ tmpLPCcoeffs_hi[ii] = scale * ISACSavedEnc_obj->LPCcoeffs_hi[ii];
+ }
+ /* Scale DFT. */
+ for (ii = 0;
+ ii < (FRAMESAMPLES_HALF * (1 + ISACSavedEnc_obj->startIdx));
+ ii++) {
+ tmp_fre[ii] = (int16_t)((scale) * (float)ISACSavedEnc_obj->fre[ii]);
+ tmp_fim[ii] = (int16_t)((scale) * (float)ISACSavedEnc_obj->fim[ii]);
+ }
+ } else {
+ for (ii = 0;
+ ii < (KLT_ORDER_GAIN * (1 + ISACSavedEnc_obj->startIdx));
+ ii++) {
+ tmpLPCindex_g[ii] = ISACSavedEnc_obj->LPCindex_g[ii];
+ }
+ for (ii = 0;
+ ii < (FRAMESAMPLES_HALF * (1 + ISACSavedEnc_obj->startIdx));
+ ii++) {
+ tmp_fre[ii] = ISACSavedEnc_obj->fre[ii];
+ tmp_fim[ii] = ISACSavedEnc_obj->fim[ii];
+ }
+ }
+
+ /* Encode bandwidth estimate. */
+ WebRtcIsac_EncodeReceiveBw(&BWno, ISACBitStr_obj);
+
+ /* Loop over number of 30 msec */
+ for (ii = 0; ii <= ISACSavedEnc_obj->startIdx; ii++) {
+ /* Encode pitch gains. */
+ *WebRtcIsac_kQPitchGainCdf_ptr = WebRtcIsac_kQPitchGainCdf;
+ WebRtcIsac_EncHistMulti(ISACBitStr_obj,
+ &ISACSavedEnc_obj->pitchGain_index[ii],
+ WebRtcIsac_kQPitchGainCdf_ptr, 1);
+
+ /* Entropy coding of quantization pitch lags */
+ /* Voicing classification. */
+ if (ISACSavedEnc_obj->meanGain[ii] < 0.2) {
+ cdf = WebRtcIsac_kQPitchLagCdfPtrLo;
+ } else if (ISACSavedEnc_obj->meanGain[ii] < 0.4) {
+ cdf = WebRtcIsac_kQPitchLagCdfPtrMid;
+ } else {
+ cdf = WebRtcIsac_kQPitchLagCdfPtrHi;
+ }
+ WebRtcIsac_EncHistMulti(ISACBitStr_obj,
+ &ISACSavedEnc_obj->pitchIndex[PITCH_SUBFRAMES * ii],
+ cdf, PITCH_SUBFRAMES);
+
+ /* LPC */
+ /* Only one model exists. The entropy coding is done only for backward
+ * compatibility. */
+ WebRtcIsac_EncHistMulti(ISACBitStr_obj, &kModel,
+ WebRtcIsac_kQKltModelCdfPtr, 1);
+ /* Entropy coding of quantization indices - LPC shape only. */
+ WebRtcIsac_EncHistMulti(ISACBitStr_obj,
+ &ISACSavedEnc_obj->LPCindex_s[KLT_ORDER_SHAPE * ii],
+ WebRtcIsac_kQKltCdfPtrShape,
+ KLT_ORDER_SHAPE);
+
+ /* If transcoding, get new LPC gain indices */
+ if (scale < 1.0) {
+ WebRtcIsac_TranscodeLPCCoef(
+ &tmpLPCcoeffs_lo[(ORDERLO + 1) * SUBFRAMES * ii],
+ &tmpLPCcoeffs_hi[(ORDERHI + 1)*SUBFRAMES * ii],
+ &tmpLPCindex_g[KLT_ORDER_GAIN * ii]);
+ }
+
+ /* Entropy coding of quantization indices - LPC gain. */
+ WebRtcIsac_EncHistMulti(ISACBitStr_obj, &tmpLPCindex_g[KLT_ORDER_GAIN * ii],
+ WebRtcIsac_kQKltCdfPtrGain, KLT_ORDER_GAIN);
+
+ /* Quantization and loss-less coding. */
+ status = WebRtcIsac_EncodeSpec(&tmp_fre[ii * FRAMESAMPLES_HALF],
+ &tmp_fim[ii * FRAMESAMPLES_HALF],
+ ISACSavedEnc_obj->AvgPitchGain[ii],
+ kIsacLowerBand, ISACBitStr_obj);
+ if (status < 0) {
+ return status;
+ }
+ }
+ /* Complete arithmetic coding. */
+ return WebRtcIsac_EncTerminate(ISACBitStr_obj);
+}
+
+
+int WebRtcIsac_EncodeStoredDataUb(
+ const ISACUBSaveEncDataStruct* ISACSavedEnc_obj,
+ Bitstr* bitStream,
+ int32_t jitterInfo,
+ float scale,
+ enum ISACBandwidth bandwidth) {
+ int n;
+ int err;
+ double lpcGain[SUBFRAMES];
+ int16_t realFFT[FRAMESAMPLES_HALF];
+ int16_t imagFFT[FRAMESAMPLES_HALF];
+ const uint16_t** shape_cdf;
+ int shape_len;
+ const int16_t kAveragePitchGain = 0.0;
+ enum ISACBand band;
+ /* Reset bitstream. */
+ WebRtcIsac_ResetBitstream(bitStream);
+
+ /* Encode jitter index. */
+ WebRtcIsac_EncodeJitterInfo(jitterInfo, bitStream);
+
+ err = WebRtcIsac_EncodeBandwidth(bandwidth, bitStream);
+ if (err < 0) {
+ return err;
+ }
+
+ /* Encode LPC-shape. */
+ if (bandwidth == isac12kHz) {
+ shape_cdf = WebRtcIsac_kLpcShapeCdfMatUb12;
+ shape_len = UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME;
+ band = kIsacUpperBand12;
+ } else {
+ shape_cdf = WebRtcIsac_kLpcShapeCdfMatUb16;
+ shape_len = UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME;
+ band = kIsacUpperBand16;
+ }
+ WebRtcIsac_EncHistMulti(bitStream, ISACSavedEnc_obj->indexLPCShape,
+ shape_cdf, shape_len);
+
+ if ((scale <= 0.0) || (scale >= 1.0)) {
+ /* We only consider scales between zero and one. */
+ WebRtcIsac_EncHistMulti(bitStream, ISACSavedEnc_obj->lpcGainIndex,
+ WebRtcIsac_kLpcGainCdfMat, UB_LPC_GAIN_DIM);
+ if (bandwidth == isac16kHz) {
+ /* Store gain indices of the second half. */
+ WebRtcIsac_EncHistMulti(bitStream,
+ &ISACSavedEnc_obj->lpcGainIndex[SUBFRAMES],
+ WebRtcIsac_kLpcGainCdfMat, UB_LPC_GAIN_DIM);
+ }
+ /* Store FFT coefficients. */
+ err = WebRtcIsac_EncodeSpec(ISACSavedEnc_obj->realFFT,
+ ISACSavedEnc_obj->imagFFT, kAveragePitchGain,
+ band, bitStream);
+ } else {
+ /* Scale LPC gain and FFT coefficients. */
+ for (n = 0; n < SUBFRAMES; n++) {
+ lpcGain[n] = scale * ISACSavedEnc_obj->lpcGain[n];
+ }
+ /* Store LPC gains. */
+ WebRtcIsac_StoreLpcGainUb(lpcGain, bitStream);
+
+ if (bandwidth == isac16kHz) {
+ /* Scale and code the gains of the second half of the frame, if 16kHz. */
+ for (n = 0; n < SUBFRAMES; n++) {
+ lpcGain[n] = scale * ISACSavedEnc_obj->lpcGain[n + SUBFRAMES];
+ }
+ WebRtcIsac_StoreLpcGainUb(lpcGain, bitStream);
+ }
+
+ for (n = 0; n < FRAMESAMPLES_HALF; n++) {
+ realFFT[n] = (int16_t)(scale * (float)ISACSavedEnc_obj->realFFT[n] +
+ 0.5f);
+ imagFFT[n] = (int16_t)(scale * (float)ISACSavedEnc_obj->imagFFT[n] +
+ 0.5f);
+ }
+ /* Store FFT coefficients. */
+ err = WebRtcIsac_EncodeSpec(realFFT, imagFFT, kAveragePitchGain,
+ band, bitStream);
+ }
+ if (err < 0) {
+ /* Error happened while encoding FFT coefficients. */
+ return err;
+ }
+
+ /* Complete arithmetic coding. */
+ return WebRtcIsac_EncTerminate(bitStream);
+}
+
+int16_t WebRtcIsac_GetRedPayloadUb(
+ const ISACUBSaveEncDataStruct* ISACSavedEncObj,
+ Bitstr* bitStreamObj,
+ enum ISACBandwidth bandwidth) {
+ int n;
+ int16_t status;
+ int16_t realFFT[FRAMESAMPLES_HALF];
+ int16_t imagFFT[FRAMESAMPLES_HALF];
+ enum ISACBand band;
+ const int16_t kAveragePitchGain = 0.0;
+ /* Store bit-stream object. */
+ memcpy(bitStreamObj, &ISACSavedEncObj->bitStreamObj, sizeof(Bitstr));
+
+ /* Scale FFT coefficients. */
+ for (n = 0; n < FRAMESAMPLES_HALF; n++) {
+ realFFT[n] = (int16_t)((float)ISACSavedEncObj->realFFT[n] *
+ RCU_TRANSCODING_SCALE_UB + 0.5);
+ imagFFT[n] = (int16_t)((float)ISACSavedEncObj->imagFFT[n] *
+ RCU_TRANSCODING_SCALE_UB + 0.5);
+ }
+
+ band = (bandwidth == isac12kHz) ? kIsacUpperBand12 : kIsacUpperBand16;
+ status = WebRtcIsac_EncodeSpec(realFFT, imagFFT, kAveragePitchGain, band,
+ bitStreamObj);
+ if (status < 0) {
+ return status;
+ } else {
+ /* Terminate entropy coding */
+ return WebRtcIsac_EncTerminate(bitStreamObj);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c
new file mode 100644
index 0000000000..7b02e64a01
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c
@@ -0,0 +1,706 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * code_LPC_UB.c
+ *
+ * This file contains definition of functions used to
+ * encode LPC parameters (Shape & gain) of the upper band.
+ *
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/******************************************************************************
+ * WebRtcIsac_RemoveLarMean()
+ *
+ * Remove the means from LAR coefficients.
+ *
+ * Input:
+ * -lar : pointer to lar vectors. LAR vectors are
+ * concatenated.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -lar : pointer to mean-removed LAR:s.
+ *
+ *
+ */
+int16_t
+WebRtcIsac_RemoveLarMean(
+ double* lar,
+ int16_t bandwidth)
+{
+ int16_t coeffCntr;
+ int16_t vecCntr;
+ int16_t numVec;
+ const double* meanLAR;
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ numVec = UB_LPC_VEC_PER_FRAME;
+ meanLAR = WebRtcIsac_kMeanLarUb12;
+ break;
+ }
+ case isac16kHz:
+ {
+ numVec = UB16_LPC_VEC_PER_FRAME;
+ meanLAR = WebRtcIsac_kMeanLarUb16;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ for(vecCntr = 0; vecCntr < numVec; vecCntr++)
+ {
+ for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+ {
+ // REMOVE MEAN
+ *lar++ -= meanLAR[coeffCntr];
+ }
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateIntraVec()
+ *
+ * Remove the correlation amonge the components of LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from left.
+ *
+ * Input:
+ * -inLar : pointer to mean-removed LAR vecrtors.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : decorrelated LAR vectors.
+ */
+int16_t
+WebRtcIsac_DecorrelateIntraVec(
+ const double* data,
+ double* out,
+ int16_t bandwidth)
+{
+ const double* ptrData;
+ const double* ptrRow;
+ int16_t rowCntr;
+ int16_t colCntr;
+ int16_t larVecCntr;
+ int16_t numVec;
+ const double* decorrMat;
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ decorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
+ numVec = UB_LPC_VEC_PER_FRAME;
+ break;
+ }
+ case isac16kHz:
+ {
+ decorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
+ numVec = UB16_LPC_VEC_PER_FRAME;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ //
+ // decorrMat * data
+ //
+ // data is assumed to contain 'numVec' of LAR
+ // vectors (mean removed) each of dimension 'UB_LPC_ORDER'
+ // concatenated one after the other.
+ //
+
+ ptrData = data;
+ for(larVecCntr = 0; larVecCntr < numVec; larVecCntr++)
+ {
+ for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
+ {
+ ptrRow = &decorrMat[rowCntr * UB_LPC_ORDER];
+ *out = 0;
+ for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
+ {
+ *out += ptrData[colCntr] * ptrRow[colCntr];
+ }
+ out++;
+ }
+ ptrData += UB_LPC_ORDER;
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateInterVec()
+ *
+ * Remover the correlation among mean-removed LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from right.
+ *
+ * Input:
+ * -data : pointer to matrix of LAR vectors. The matrix
+ * is stored column-wise.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : decorrelated LAR vectors.
+ */
+int16_t
+WebRtcIsac_DecorrelateInterVec(
+ const double* data,
+ double* out,
+ int16_t bandwidth)
+{
+ int16_t coeffCntr;
+ int16_t rowCntr;
+ int16_t colCntr;
+ const double* decorrMat;
+ int16_t interVecDim;
+
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ decorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
+ interVecDim = UB_LPC_VEC_PER_FRAME;
+ break;
+ }
+ case isac16kHz:
+ {
+ decorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
+ interVecDim = UB16_LPC_VEC_PER_FRAME;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ //
+ // data * decorrMat
+ //
+ // data is of size 'interVecDim' * 'UB_LPC_ORDER'
+ // That is 'interVecDim' of LAR vectors (mean removed)
+ // in columns each of dimension 'UB_LPC_ORDER'.
+ // matrix is stored column-wise.
+ //
+
+ for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+ {
+ for(colCntr = 0; colCntr < interVecDim; colCntr++)
+ {
+ out[coeffCntr + colCntr * UB_LPC_ORDER] = 0;
+ for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
+ {
+ out[coeffCntr + colCntr * UB_LPC_ORDER] +=
+ data[coeffCntr + rowCntr * UB_LPC_ORDER] *
+ decorrMat[rowCntr * interVecDim + colCntr];
+ }
+ }
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeUncorrLar()
+ *
+ * Quantize the uncorrelated parameters.
+ *
+ * Input:
+ * -data : uncorrelated LAR vectors.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -data : quantized version of the input.
+ * -idx : pointer to quantization indices.
+ */
+double
+WebRtcIsac_QuantizeUncorrLar(
+ double* data,
+ int* recIdx,
+ int16_t bandwidth)
+{
+ int16_t cntr;
+ int32_t idx;
+ int16_t interVecDim;
+ const double* leftRecPoint;
+ double quantizationStepSize;
+ const int16_t* numQuantCell;
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb12;
+ quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
+ numQuantCell = WebRtcIsac_kLpcShapeNumRecPointUb12;
+ interVecDim = UB_LPC_VEC_PER_FRAME;
+ break;
+ }
+ case isac16kHz:
+ {
+ leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb16;
+ quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
+ numQuantCell = WebRtcIsac_kLpcShapeNumRecPointUb16;
+ interVecDim = UB16_LPC_VEC_PER_FRAME;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ //
+ // Quantize the parametrs.
+ //
+ for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
+ {
+ idx = (int32_t)floor((*data - leftRecPoint[cntr]) /
+ quantizationStepSize + 0.5);
+ if(idx < 0)
+ {
+ idx = 0;
+ }
+ else if(idx >= numQuantCell[cntr])
+ {
+ idx = numQuantCell[cntr] - 1;
+ }
+
+ *data++ = leftRecPoint[cntr] + idx * quantizationStepSize;
+ *recIdx++ = idx;
+ }
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcParam()
+ *
+ * Get the quantized value of uncorrelated LARs given the quantization indices.
+ *
+ * Input:
+ * -idx : pointer to quantiztion indices.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : pointer to quantized values.
+ */
+int16_t
+WebRtcIsac_DequantizeLpcParam(
+ const int* idx,
+ double* out,
+ int16_t bandwidth)
+{
+ int16_t cntr;
+ int16_t interVecDim;
+ const double* leftRecPoint;
+ double quantizationStepSize;
+
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb12;
+ quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
+ interVecDim = UB_LPC_VEC_PER_FRAME;
+ break;
+ }
+ case isac16kHz:
+ {
+ leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb16;
+ quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
+ interVecDim = UB16_LPC_VEC_PER_FRAME;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ //
+ // Dequantize given the quantization indices
+ //
+
+ for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
+ {
+ *out++ = leftRecPoint[cntr] + *idx++ * quantizationStepSize;
+ }
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateIntraVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateIntraVec().
+ *
+ * Input:
+ * -data : uncorrelated parameters.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : correlated parametrs.
+ */
+int16_t
+WebRtcIsac_CorrelateIntraVec(
+ const double* data,
+ double* out,
+ int16_t bandwidth)
+{
+ int16_t vecCntr;
+ int16_t rowCntr;
+ int16_t colCntr;
+ int16_t numVec;
+ const double* ptrData;
+ const double* intraVecDecorrMat;
+
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ numVec = UB_LPC_VEC_PER_FRAME;
+ intraVecDecorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
+ break;
+ }
+ case isac16kHz:
+ {
+ numVec = UB16_LPC_VEC_PER_FRAME;
+ intraVecDecorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
+ break;
+ }
+ default:
+ return -1;
+ }
+
+
+ ptrData = data;
+ for(vecCntr = 0; vecCntr < numVec; vecCntr++)
+ {
+ for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
+ {
+ *out = 0;
+ for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
+ {
+ *out += ptrData[rowCntr] *
+ intraVecDecorrMat[rowCntr * UB_LPC_ORDER + colCntr];
+ }
+ out++;
+ }
+ ptrData += UB_LPC_ORDER;
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateInterVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateInterVec().
+ *
+ * Input:
+ * -data
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : correlated parametrs.
+ */
+int16_t
+WebRtcIsac_CorrelateInterVec(
+ const double* data,
+ double* out,
+ int16_t bandwidth)
+{
+ int16_t coeffCntr;
+ int16_t rowCntr;
+ int16_t colCntr;
+ int16_t interVecDim;
+ double myVec[UB16_LPC_VEC_PER_FRAME] = {0.0};
+ const double* interVecDecorrMat;
+
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ interVecDim = UB_LPC_VEC_PER_FRAME;
+ interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
+ break;
+ }
+ case isac16kHz:
+ {
+ interVecDim = UB16_LPC_VEC_PER_FRAME;
+ interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+ {
+ for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
+ {
+ myVec[rowCntr] = 0;
+ for(colCntr = 0; colCntr < interVecDim; colCntr++)
+ {
+ myVec[rowCntr] += data[coeffCntr + colCntr * UB_LPC_ORDER] * //*ptrData *
+ interVecDecorrMat[rowCntr * interVecDim + colCntr];
+ //ptrData += UB_LPC_ORDER;
+ }
+ }
+
+ for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
+ {
+ out[coeffCntr + rowCntr * UB_LPC_ORDER] = myVec[rowCntr];
+ }
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_AddLarMean()
+ *
+ * This is the inverse of WebRtcIsac_RemoveLarMean()
+ *
+ * Input:
+ * -data : pointer to mean-removed LAR:s.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -data : pointer to LARs.
+ */
+int16_t
+WebRtcIsac_AddLarMean(
+ double* data,
+ int16_t bandwidth)
+{
+ int16_t coeffCntr;
+ int16_t vecCntr;
+ int16_t numVec;
+ const double* meanLAR;
+
+ switch(bandwidth)
+ {
+ case isac12kHz:
+ {
+ numVec = UB_LPC_VEC_PER_FRAME;
+ meanLAR = WebRtcIsac_kMeanLarUb12;
+ break;
+ }
+ case isac16kHz:
+ {
+ numVec = UB16_LPC_VEC_PER_FRAME;
+ meanLAR = WebRtcIsac_kMeanLarUb16;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ for(vecCntr = 0; vecCntr < numVec; vecCntr++)
+ {
+ for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+ {
+ *data++ += meanLAR[coeffCntr];
+ }
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_ToLogDomainRemoveMean()
+ *
+ * Transform the LPC gain to log domain then remove the mean value.
+ *
+ * Input:
+ * -lpcGain : pointer to LPC Gain, expecting 6 LPC gains
+ *
+ * Output:
+ * -lpcGain : mean-removed in log domain.
+ */
+int16_t
+WebRtcIsac_ToLogDomainRemoveMean(
+ double* data)
+{
+ int16_t coeffCntr;
+ for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+ {
+ data[coeffCntr] = log(data[coeffCntr]) - WebRtcIsac_kMeanLpcGain;
+ }
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateLPGain()
+ *
+ * Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
+ * multiplying gain vector with decorrelating matrix.
+ *
+ * Input:
+ * -data : LPC gain in log-domain with mean removed.
+ *
+ * Output:
+ * -out : decorrelated parameters.
+ */
+int16_t WebRtcIsac_DecorrelateLPGain(
+ const double* data,
+ double* out)
+{
+ int16_t rowCntr;
+ int16_t colCntr;
+
+ for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
+ {
+ *out = 0;
+ for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
+ {
+ *out += data[rowCntr] * WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr];
+ }
+ out++;
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeLpcGain()
+ *
+ * Quantize the decorrelated log-domain gains.
+ *
+ * Input:
+ * -lpcGain : uncorrelated LPC gains.
+ *
+ * Output:
+ * -idx : quantization indices
+ * -lpcGain : quantized value of the inpt.
+ */
+double WebRtcIsac_QuantizeLpcGain(
+ double* data,
+ int* idx)
+{
+ int16_t coeffCntr;
+ for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+ {
+ *idx = (int)floor((*data - WebRtcIsac_kLeftRecPointLpcGain[coeffCntr]) /
+ WebRtcIsac_kQSizeLpcGain + 0.5);
+
+ if(*idx < 0)
+ {
+ *idx = 0;
+ }
+ else if(*idx >= WebRtcIsac_kNumQCellLpcGain[coeffCntr])
+ {
+ *idx = WebRtcIsac_kNumQCellLpcGain[coeffCntr] - 1;
+ }
+ *data = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
+ WebRtcIsac_kQSizeLpcGain;
+
+ data++;
+ idx++;
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcGain()
+ *
+ * Get the quantized values given the quantization indices.
+ *
+ * Input:
+ * -idx : pointer to quantization indices.
+ *
+ * Output:
+ * -lpcGains : quantized values of the given parametes.
+ */
+int16_t WebRtcIsac_DequantizeLpcGain(
+ const int* idx,
+ double* out)
+{
+ int16_t coeffCntr;
+ for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+ {
+ *out = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
+ WebRtcIsac_kQSizeLpcGain;
+ out++;
+ idx++;
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateLpcGain()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateLPGain().
+ *
+ * Input:
+ * -data : decorrelated parameters.
+ *
+ * Output:
+ * -out : correlated parameters.
+ */
+int16_t WebRtcIsac_CorrelateLpcGain(
+ const double* data,
+ double* out)
+{
+ int16_t rowCntr;
+ int16_t colCntr;
+
+ for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
+ {
+ *out = 0;
+ for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
+ {
+ *out += WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr] * data[colCntr];
+ }
+ out++;
+ }
+
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_AddMeanToLinearDomain()
+ *
+ * This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
+ *
+ * Input:
+ * -lpcGain : LPC gain in log-domain & mean removed
+ *
+ * Output:
+ * -lpcGain : LPC gain in normal domain.
+ */
+int16_t WebRtcIsac_AddMeanToLinearDomain(
+ double* lpcGains)
+{
+ int16_t coeffCntr;
+ for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+ {
+ lpcGains[coeffCntr] = exp(lpcGains[coeffCntr] + WebRtcIsac_kMeanLpcGain);
+ }
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h
new file mode 100644
index 0000000000..8bc3d752c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * encode_lpc_swb.h
+ *
+ * This file contains declaration of functions used to
+ * encode LPC parameters (Shape & gain) of the upper band.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+/******************************************************************************
+ * WebRtcIsac_RemoveLarMean()
+ *
+ * Remove the means from LAR coefficients.
+ *
+ * Input:
+ * -lar : pointer to lar vectors. LAR vectors are
+ * concatenated.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -lar : pointer to mean-removed LAR:s.
+ *
+ *
+ */
+int16_t WebRtcIsac_RemoveLarMean(double* lar, int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateIntraVec()
+ *
+ * Remove the correlation amonge the components of LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from left.
+ *
+ * Input:
+ * -inLar : pointer to mean-removed LAR vecrtors.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : decorrelated LAR vectors.
+ */
+int16_t WebRtcIsac_DecorrelateIntraVec(const double* inLAR,
+ double* out,
+ int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateInterVec()
+ *
+ * Remover the correlation among mean-removed LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from right.
+ *
+ * Input:
+ * -data : pointer to matrix of LAR vectors. The matrix
+ * is stored column-wise.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : decorrelated LAR vectors.
+ */
+int16_t WebRtcIsac_DecorrelateInterVec(const double* data,
+ double* out,
+ int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeUncorrLar()
+ *
+ * Quantize the uncorrelated parameters.
+ *
+ * Input:
+ * -data : uncorrelated LAR vectors.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -data : quantized version of the input.
+ * -idx : pointer to quantization indices.
+ */
+double WebRtcIsac_QuantizeUncorrLar(double* data, int* idx, int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateIntraVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateIntraVec().
+ *
+ * Input:
+ * -data : uncorrelated parameters.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : correlated parametrs.
+ */
+int16_t WebRtcIsac_CorrelateIntraVec(const double* data,
+ double* out,
+ int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateInterVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateInterVec().
+ *
+ * Input:
+ * -data
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : correlated parametrs.
+ */
+int16_t WebRtcIsac_CorrelateInterVec(const double* data,
+ double* out,
+ int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_AddLarMean()
+ *
+ * This is the inverse of WebRtcIsac_RemoveLarMean()
+ *
+ * Input:
+ * -data : pointer to mean-removed LAR:s.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -data : pointer to LARs.
+ */
+int16_t WebRtcIsac_AddLarMean(double* data, int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcParam()
+ *
+ * Get the quantized value of uncorrelated LARs given the quantization indices.
+ *
+ * Input:
+ * -idx : pointer to quantiztion indices.
+ * -bandwidth : indicates if the given LAR vectors belong
+ * to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ * -out : pointer to quantized values.
+ */
+int16_t WebRtcIsac_DequantizeLpcParam(const int* idx,
+ double* out,
+ int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_ToLogDomainRemoveMean()
+ *
+ * Transform the LPC gain to log domain then remove the mean value.
+ *
+ * Input:
+ * -lpcGain : pointer to LPC Gain, expecting 6 LPC gains
+ *
+ * Output:
+ * -lpcGain : mean-removed in log domain.
+ */
+int16_t WebRtcIsac_ToLogDomainRemoveMean(double* lpGains);
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateLPGain()
+ *
+ * Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
+ * multiplying gain vector with decorrelating matrix.
+ *
+ * Input:
+ * -data : LPC gain in log-domain with mean removed.
+ *
+ * Output:
+ * -out : decorrelated parameters.
+ */
+int16_t WebRtcIsac_DecorrelateLPGain(const double* data, double* out);
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeLpcGain()
+ *
+ * Quantize the decorrelated log-domain gains.
+ *
+ * Input:
+ * -lpcGain : uncorrelated LPC gains.
+ *
+ * Output:
+ * -idx : quantization indices
+ * -lpcGain : quantized value of the inpt.
+ */
+double WebRtcIsac_QuantizeLpcGain(double* lpGains, int* idx);
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcGain()
+ *
+ * Get the quantized values given the quantization indices.
+ *
+ * Input:
+ * -idx : pointer to quantization indices.
+ *
+ * Output:
+ * -lpcGains : quantized values of the given parametes.
+ */
+int16_t WebRtcIsac_DequantizeLpcGain(const int* idx, double* lpGains);
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateLpcGain()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateLPGain().
+ *
+ * Input:
+ * -data : decorrelated parameters.
+ *
+ * Output:
+ * -out : correlated parameters.
+ */
+int16_t WebRtcIsac_CorrelateLpcGain(const double* data, double* out);
+
+/******************************************************************************
+ * WebRtcIsac_AddMeanToLinearDomain()
+ *
+ * This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
+ *
+ * Input:
+ * -lpcGain : LPC gain in log-domain & mean removed
+ *
+ * Output:
+ * -lpcGain : LPC gain in normal domain.
+ */
+int16_t WebRtcIsac_AddMeanToLinearDomain(double* lpcGains);
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
new file mode 100644
index 0000000000..188c8f6b86
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
@@ -0,0 +1,2066 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.c
+ *
+ * This header file defines all of the functions used to arithmetically
+ * encode the iSAC bistream
+ *
+ */
+
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+#include "modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+
+#include <math.h>
+#include <string.h>
+
+static const uint16_t kLpcVecPerSegmentUb12 = 5;
+static const uint16_t kLpcVecPerSegmentUb16 = 4;
+
+/* CDF array for encoder bandwidth (12 vs 16 kHz) indicator. */
+static const uint16_t kOneBitEqualProbCdf[3] = {
+ 0, 32768, 65535 };
+
+/* Pointer to cdf array for encoder bandwidth (12 vs 16 kHz) indicator. */
+static const uint16_t* const kOneBitEqualProbCdf_ptr[1] = {
+ kOneBitEqualProbCdf };
+
+/*
+ * Initial cdf index for decoder of encoded bandwidth
+ * (12 vs 16 kHz) indicator.
+ */
+static const uint16_t kOneBitEqualProbInitIndex[1] = { 1 };
+
+
+static const int kIsSWB12 = 1;
+
+/* compute correlation from power spectrum */
+static void FindCorrelation(int32_t* PSpecQ12, int32_t* CorrQ7) {
+ int32_t summ[FRAMESAMPLES / 8];
+ int32_t diff[FRAMESAMPLES / 8];
+ const int16_t* CS_ptrQ9;
+ int32_t sum;
+ int k, n;
+
+ for (k = 0; k < FRAMESAMPLES / 8; k++) {
+ summ[k] = (PSpecQ12[k] + PSpecQ12[FRAMESAMPLES_QUARTER - 1 - k] + 16) >> 5;
+ diff[k] = (PSpecQ12[k] - PSpecQ12[FRAMESAMPLES_QUARTER - 1 - k] + 16) >> 5;
+ }
+
+ sum = 2;
+ for (n = 0; n < FRAMESAMPLES / 8; n++) {
+ sum += summ[n];
+ }
+ CorrQ7[0] = sum;
+
+ for (k = 0; k < AR_ORDER; k += 2) {
+ sum = 0;
+ CS_ptrQ9 = WebRtcIsac_kCos[k];
+ for (n = 0; n < FRAMESAMPLES / 8; n++)
+ sum += (CS_ptrQ9[n] * diff[n] + 256) >> 9;
+ CorrQ7[k + 1] = sum;
+ }
+
+ for (k = 1; k < AR_ORDER; k += 2) {
+ sum = 0;
+ CS_ptrQ9 = WebRtcIsac_kCos[k];
+ for (n = 0; n < FRAMESAMPLES / 8; n++)
+ sum += (CS_ptrQ9[n] * summ[n] + 256) >> 9;
+ CorrQ7[k + 1] = sum;
+ }
+}
+
+/* compute inverse AR power spectrum */
+/* Changed to the function used in iSAC FIX for compatibility reasons */
+static void FindInvArSpec(const int16_t* ARCoefQ12,
+ const int32_t gainQ10,
+ int32_t* CurveQ16) {
+ int32_t CorrQ11[AR_ORDER + 1];
+ int64_t sum, tmpGain;
+ int32_t diffQ16[FRAMESAMPLES / 8];
+ const int16_t* CS_ptrQ9;
+ int k, n;
+ int16_t round, shftVal = 0, sh;
+
+ sum = 0;
+ for (n = 0; n < AR_ORDER + 1; n++) {
+ sum += WEBRTC_SPL_MUL(ARCoefQ12[n], ARCoefQ12[n]); /* Q24 */
+ }
+ sum = ((sum >> 6) * 65 + 32768) >> 16; /* Q8 */
+ CorrQ11[0] = (sum * gainQ10 + 256) >> 9;
+
+ /* To avoid overflow, we shift down gainQ10 if it is large.
+ * We will not lose any precision */
+ if (gainQ10 > 400000) {
+ tmpGain = gainQ10 >> 3;
+ round = 32;
+ shftVal = 6;
+ } else {
+ tmpGain = gainQ10;
+ round = 256;
+ shftVal = 9;
+ }
+
+ for (k = 1; k < AR_ORDER + 1; k++) {
+ sum = 16384;
+ for (n = k; n < AR_ORDER + 1; n++)
+ sum += WEBRTC_SPL_MUL(ARCoefQ12[n - k], ARCoefQ12[n]); /* Q24 */
+ sum >>= 15;
+ CorrQ11[k] = (sum * tmpGain + round) >> shftVal;
+ }
+ sum = CorrQ11[0] << 7;
+ for (n = 0; n < FRAMESAMPLES / 8; n++) {
+ CurveQ16[n] = sum;
+ }
+ for (k = 1; k < AR_ORDER; k += 2) {
+ for (n = 0; n < FRAMESAMPLES / 8; n++) {
+ CurveQ16[n] += (WebRtcIsac_kCos[k][n] * CorrQ11[k + 1] + 2) >> 2;
+ }
+ }
+
+ CS_ptrQ9 = WebRtcIsac_kCos[0];
+
+ /* If CorrQ11[1] too large we avoid getting overflow in the
+ * calculation by shifting */
+ sh = WebRtcSpl_NormW32(CorrQ11[1]);
+ if (CorrQ11[1] == 0) { /* Use next correlation */
+ sh = WebRtcSpl_NormW32(CorrQ11[2]);
+ }
+ if (sh < 9) {
+ shftVal = 9 - sh;
+ } else {
+ shftVal = 0;
+ }
+ for (n = 0; n < FRAMESAMPLES / 8; n++) {
+ diffQ16[n] = (CS_ptrQ9[n] * (CorrQ11[1] >> shftVal) + 2) >> 2;
+ }
+ for (k = 2; k < AR_ORDER; k += 2) {
+ CS_ptrQ9 = WebRtcIsac_kCos[k];
+ for (n = 0; n < FRAMESAMPLES / 8; n++) {
+ diffQ16[n] += (CS_ptrQ9[n] * (CorrQ11[k + 1] >> shftVal) + 2) >> 2;
+ }
+ }
+
+ for (k = 0; k < FRAMESAMPLES / 8; k++) {
+ int32_t diff_q16_shifted = (int32_t)((uint32_t)(diffQ16[k]) << shftVal);
+ CurveQ16[FRAMESAMPLES_QUARTER - 1 - k] = CurveQ16[k] - diff_q16_shifted;
+ CurveQ16[k] += diff_q16_shifted;
+ }
+}
+
+/* Generate array of dither samples in Q7. */
+static void GenerateDitherQ7Lb(int16_t* bufQ7, uint32_t seed,
+ int length, int16_t AvgPitchGain_Q12) {
+ int k, shft;
+ int16_t dither1_Q7, dither2_Q7, dither_gain_Q14;
+
+ /* This threshold should be equal to that in decode_spec(). */
+ if (AvgPitchGain_Q12 < 614) {
+ for (k = 0; k < length - 2; k += 3) {
+ /* New random unsigned int. */
+ seed = (seed * 196314165) + 907633515;
+
+ /* Fixed-point dither sample between -64 and 64 (Q7). */
+ /* dither = seed * 128 / 4294967295 */
+ dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ /* New random unsigned int. */
+ seed = (seed * 196314165) + 907633515;
+
+ /* Fixed-point dither sample between -64 and 64. */
+ dither2_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ shft = (seed >> 25) & 15;
+ if (shft < 5) {
+ bufQ7[k] = dither1_Q7;
+ bufQ7[k + 1] = dither2_Q7;
+ bufQ7[k + 2] = 0;
+ } else if (shft < 10) {
+ bufQ7[k] = dither1_Q7;
+ bufQ7[k + 1] = 0;
+ bufQ7[k + 2] = dither2_Q7;
+ } else {
+ bufQ7[k] = 0;
+ bufQ7[k + 1] = dither1_Q7;
+ bufQ7[k + 2] = dither2_Q7;
+ }
+ }
+ } else {
+ dither_gain_Q14 = (int16_t)(22528 - 10 * AvgPitchGain_Q12);
+
+ /* Dither on half of the coefficients. */
+ for (k = 0; k < length - 1; k += 2) {
+ /* New random unsigned int */
+ seed = (seed * 196314165) + 907633515;
+
+ /* Fixed-point dither sample between -64 and 64. */
+ dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ /* Dither sample is placed in either even or odd index. */
+ shft = (seed >> 25) & 1; /* Either 0 or 1 */
+
+ bufQ7[k + shft] = (((dither_gain_Q14 * dither1_Q7) + 8192) >> 14);
+ bufQ7[k + 1 - shft] = 0;
+ }
+ }
+}
+
+
+
+/******************************************************************************
+ * GenerateDitherQ7LbUB()
+ *
+ * generate array of dither samples in Q7 There are less zeros in dither
+ * vector compared to GenerateDitherQ7Lb.
+ *
+ * A uniform random number generator with the range of [-64 64] is employed
+ * but the generated dithers are scaled by 0.35, a heuristic scaling.
+ *
+ * Input:
+ * -seed : the initial seed for the random number generator.
+ * -length : the number of dither values to be generated.
+ *
+ * Output:
+ * -bufQ7 : pointer to a buffer where dithers are written to.
+ */
+static void GenerateDitherQ7LbUB(
+ int16_t* bufQ7,
+ uint32_t seed,
+ int length) {
+ int k;
+ for (k = 0; k < length; k++) {
+ /* new random unsigned int */
+ seed = (seed * 196314165) + 907633515;
+
+ /* Fixed-point dither sample between -64 and 64 (Q7). */
+ /* bufQ7 = seed * 128 / 4294967295 */
+ bufQ7[k] = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+ /* Scale by 0.35. */
+ bufQ7[k] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(bufQ7[k], 2048, 13);
+ }
+}
+
+/*
+ * Function to decode the complex spectrum from the bit stream
+ * returns the total number of bytes in the stream.
+ */
+int WebRtcIsac_DecodeSpec(Bitstr* streamdata, int16_t AvgPitchGain_Q12,
+ enum ISACBand band, double* fr, double* fi) {
+ int16_t DitherQ7[FRAMESAMPLES];
+ int16_t data[FRAMESAMPLES];
+ int32_t invARSpec2_Q16[FRAMESAMPLES_QUARTER];
+ uint16_t invARSpecQ8[FRAMESAMPLES_QUARTER];
+ int16_t ARCoefQ12[AR_ORDER + 1];
+ int16_t RCQ15[AR_ORDER];
+ int16_t gainQ10;
+ int32_t gain2_Q10, res;
+ int32_t in_sqrt;
+ int32_t newRes;
+ int k, len, i;
+ int is_12khz = !kIsSWB12;
+ int num_dft_coeff = FRAMESAMPLES;
+ /* Create dither signal. */
+ if (band == kIsacLowerBand) {
+ GenerateDitherQ7Lb(DitherQ7, streamdata->W_upper, FRAMESAMPLES,
+ AvgPitchGain_Q12);
+ } else {
+ GenerateDitherQ7LbUB(DitherQ7, streamdata->W_upper, FRAMESAMPLES);
+ if (band == kIsacUpperBand12) {
+ is_12khz = kIsSWB12;
+ num_dft_coeff = FRAMESAMPLES_HALF;
+ }
+ }
+
+ /* Decode model parameters. */
+ if (WebRtcIsac_DecodeRc(streamdata, RCQ15) < 0)
+ return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+ WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+ if (WebRtcIsac_DecodeGain2(streamdata, &gain2_Q10) < 0)
+ return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+ /* Compute inverse AR power spectrum. */
+ FindInvArSpec(ARCoefQ12, gain2_Q10, invARSpec2_Q16);
+
+ /* Convert to magnitude spectrum,
+ * by doing square-roots (modified from SPLIB). */
+ res = 1 << (WebRtcSpl_GetSizeInBits(invARSpec2_Q16[0]) >> 1);
+ for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+ in_sqrt = invARSpec2_Q16[k];
+ i = 10;
+
+ /* Negative values make no sense for a real sqrt-function. */
+ if (in_sqrt < 0)
+ in_sqrt = -in_sqrt;
+
+ newRes = (in_sqrt / res + res) >> 1;
+ do {
+ res = newRes;
+ newRes = (in_sqrt / res + res) >> 1;
+ } while (newRes != res && i-- > 0);
+
+ invARSpecQ8[k] = (int16_t)newRes;
+ }
+
+ len = WebRtcIsac_DecLogisticMulti2(data, streamdata, invARSpecQ8, DitherQ7,
+ num_dft_coeff, is_12khz);
+ /* Arithmetic decoding of spectrum. */
+ if (len < 1) {
+ return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+ }
+
+ switch (band) {
+ case kIsacLowerBand: {
+ /* Scale down spectral samples with low SNR. */
+ int32_t p1;
+ int32_t p2;
+ if (AvgPitchGain_Q12 <= 614) {
+ p1 = 30 << 10;
+ p2 = 32768 + (33 << 16);
+ } else {
+ p1 = 36 << 10;
+ p2 = 32768 + (40 << 16);
+ }
+ for (k = 0; k < FRAMESAMPLES; k += 4) {
+ gainQ10 = WebRtcSpl_DivW32W16ResW16(p1, (int16_t)(
+ (invARSpec2_Q16[k >> 2] + p2) >> 16));
+ *fr++ = (double)((data[ k ] * gainQ10 + 512) >> 10) / 128.0;
+ *fi++ = (double)((data[k + 1] * gainQ10 + 512) >> 10) / 128.0;
+ *fr++ = (double)((data[k + 2] * gainQ10 + 512) >> 10) / 128.0;
+ *fi++ = (double)((data[k + 3] * gainQ10 + 512) >> 10) / 128.0;
+ }
+ break;
+ }
+ case kIsacUpperBand12: {
+ for (k = 0, i = 0; k < FRAMESAMPLES_HALF; k += 4) {
+ fr[i] = (double)data[ k ] / 128.0;
+ fi[i] = (double)data[k + 1] / 128.0;
+ i++;
+ fr[i] = (double)data[k + 2] / 128.0;
+ fi[i] = (double)data[k + 3] / 128.0;
+ i++;
+ }
+ /* The second half of real and imaginary coefficients is zero. This is
+ * due to using the old FFT module which requires two signals as input
+ * while in 0-12 kHz mode we only have 8-12 kHz band, and the second
+ * signal is set to zero. */
+ memset(&fr[FRAMESAMPLES_QUARTER], 0, FRAMESAMPLES_QUARTER *
+ sizeof(double));
+ memset(&fi[FRAMESAMPLES_QUARTER], 0, FRAMESAMPLES_QUARTER *
+ sizeof(double));
+ break;
+ }
+ case kIsacUpperBand16: {
+ for (i = 0, k = 0; k < FRAMESAMPLES; k += 4, i++) {
+ fr[i] = (double)data[ k ] / 128.0;
+ fi[i] = (double)data[k + 1] / 128.0;
+ fr[(FRAMESAMPLES_HALF) - 1 - i] = (double)data[k + 2] / 128.0;
+ fi[(FRAMESAMPLES_HALF) - 1 - i] = (double)data[k + 3] / 128.0;
+ }
+ break;
+ }
+ }
+ return len;
+}
+
+
+int WebRtcIsac_EncodeSpec(const int16_t* fr, const int16_t* fi,
+ int16_t AvgPitchGain_Q12, enum ISACBand band,
+ Bitstr* streamdata) {
+ int16_t ditherQ7[FRAMESAMPLES];
+ int16_t dataQ7[FRAMESAMPLES];
+ int32_t PSpec[FRAMESAMPLES_QUARTER];
+ int32_t invARSpec2_Q16[FRAMESAMPLES_QUARTER];
+ uint16_t invARSpecQ8[FRAMESAMPLES_QUARTER];
+ int32_t CorrQ7[AR_ORDER + 1];
+ int32_t CorrQ7_norm[AR_ORDER + 1];
+ int16_t RCQ15[AR_ORDER];
+ int16_t ARCoefQ12[AR_ORDER + 1];
+ int32_t gain2_Q10;
+ int16_t val;
+ int32_t nrg, res;
+ uint32_t sum;
+ int32_t in_sqrt;
+ int32_t newRes;
+ int16_t err;
+ uint32_t nrg_u32;
+ int shift_var;
+ int k, n, j, i;
+ int is_12khz = !kIsSWB12;
+ int num_dft_coeff = FRAMESAMPLES;
+
+ /* Create dither signal. */
+ if (band == kIsacLowerBand) {
+ GenerateDitherQ7Lb(ditherQ7, streamdata->W_upper, FRAMESAMPLES,
+ AvgPitchGain_Q12);
+ } else {
+ GenerateDitherQ7LbUB(ditherQ7, streamdata->W_upper, FRAMESAMPLES);
+ if (band == kIsacUpperBand12) {
+ is_12khz = kIsSWB12;
+ num_dft_coeff = FRAMESAMPLES_HALF;
+ }
+ }
+
+ /* add dither and quantize, and compute power spectrum */
+ switch (band) {
+ case kIsacLowerBand: {
+ for (k = 0; k < FRAMESAMPLES; k += 4) {
+ val = ((*fr++ + ditherQ7[k] + 64) & 0xFF80) - ditherQ7[k];
+ dataQ7[k] = val;
+ sum = val * val;
+
+ val = ((*fi++ + ditherQ7[k + 1] + 64) & 0xFF80) - ditherQ7[k + 1];
+ dataQ7[k + 1] = val;
+ sum += val * val;
+
+ val = ((*fr++ + ditherQ7[k + 2] + 64) & 0xFF80) - ditherQ7[k + 2];
+ dataQ7[k + 2] = val;
+ sum += val * val;
+
+ val = ((*fi++ + ditherQ7[k + 3] + 64) & 0xFF80) - ditherQ7[k + 3];
+ dataQ7[k + 3] = val;
+ sum += val * val;
+
+ PSpec[k >> 2] = sum >> 2;
+ }
+ break;
+ }
+ case kIsacUpperBand12: {
+ for (k = 0, j = 0; k < FRAMESAMPLES_HALF; k += 4) {
+ val = ((*fr++ + ditherQ7[k] + 64) & 0xFF80) - ditherQ7[k];
+ dataQ7[k] = val;
+ sum = val * val;
+
+ val = ((*fi++ + ditherQ7[k + 1] + 64) & 0xFF80) - ditherQ7[k + 1];
+ dataQ7[k + 1] = val;
+ sum += val * val;
+
+ PSpec[j++] = sum >> 1;
+
+ val = ((*fr++ + ditherQ7[k + 2] + 64) & 0xFF80) - ditherQ7[k + 2];
+ dataQ7[k + 2] = val;
+ sum = val * val;
+
+ val = ((*fi++ + ditherQ7[k + 3] + 64) & 0xFF80) - ditherQ7[k + 3];
+ dataQ7[k + 3] = val;
+ sum += val * val;
+
+ PSpec[j++] = sum >> 1;
+ }
+ break;
+ }
+ case kIsacUpperBand16: {
+ for (j = 0, k = 0; k < FRAMESAMPLES; k += 4, j++) {
+ val = ((fr[j] + ditherQ7[k] + 64) & 0xFF80) - ditherQ7[k];
+ dataQ7[k] = val;
+ sum = val * val;
+
+ val = ((fi[j] + ditherQ7[k + 1] + 64) & 0xFF80) - ditherQ7[k + 1];
+ dataQ7[k + 1] = val;
+ sum += val * val;
+
+ val = ((fr[(FRAMESAMPLES_HALF) - 1 - j] + ditherQ7[k + 2] + 64) &
+ 0xFF80) - ditherQ7[k + 2];
+ dataQ7[k + 2] = val;
+ sum += val * val;
+
+ val = ((fi[(FRAMESAMPLES_HALF) - 1 - j] + ditherQ7[k + 3] + 64) &
+ 0xFF80) - ditherQ7[k + 3];
+ dataQ7[k + 3] = val;
+ sum += val * val;
+
+ PSpec[k >> 2] = sum >> 2;
+ }
+ break;
+ }
+ }
+
+ /* compute correlation from power spectrum */
+ FindCorrelation(PSpec, CorrQ7);
+
+ /* Find AR coefficients */
+ /* Aumber of bit shifts to 14-bit normalize CorrQ7[0]
+ * (leaving room for sign) */
+ shift_var = WebRtcSpl_NormW32(CorrQ7[0]) - 18;
+
+ if (shift_var > 0) {
+ for (k = 0; k < AR_ORDER + 1; k++) {
+ CorrQ7_norm[k] = CorrQ7[k] << shift_var;
+ }
+ } else {
+ for (k = 0; k < AR_ORDER + 1; k++) {
+ CorrQ7_norm[k] = CorrQ7[k] >> (-shift_var);
+ }
+ }
+
+ /* Find RC coefficients. */
+ WebRtcSpl_AutoCorrToReflCoef(CorrQ7_norm, AR_ORDER, RCQ15);
+
+ /* Quantize & code RC Coefficient. */
+ WebRtcIsac_EncodeRc(RCQ15, streamdata);
+
+ /* RC -> AR coefficients */
+ WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+ /* Compute ARCoef' * Corr * ARCoef in Q19. */
+ nrg = 0;
+ for (j = 0; j <= AR_ORDER; j++) {
+ for (n = 0; n <= j; n++) {
+ nrg += (ARCoefQ12[j] * ((CorrQ7_norm[j - n] * ARCoefQ12[n] + 256) >> 9) +
+ 4) >> 3;
+ }
+ for (n = j + 1; n <= AR_ORDER; n++) {
+ nrg += (ARCoefQ12[j] * ((CorrQ7_norm[n - j] * ARCoefQ12[n] + 256) >> 9) +
+ 4) >> 3;
+ }
+ }
+
+ nrg_u32 = (uint32_t)nrg;
+ if (shift_var > 0) {
+ nrg_u32 = nrg_u32 >> shift_var;
+ } else {
+ nrg_u32 = nrg_u32 << (-shift_var);
+ }
+ if (nrg_u32 > 0x7FFFFFFF) {
+ nrg = 0x7FFFFFFF;
+ } else {
+ nrg = (int32_t)nrg_u32;
+ }
+ /* Also shifts 31 bits to the left! */
+ gain2_Q10 = WebRtcSpl_DivResultInQ31(FRAMESAMPLES_QUARTER, nrg);
+
+ /* Quantize & code gain2_Q10. */
+ if (WebRtcIsac_EncodeGain2(&gain2_Q10, streamdata)) {
+ return -1;
+ }
+
+ /* Compute inverse AR power spectrum. */
+ FindInvArSpec(ARCoefQ12, gain2_Q10, invARSpec2_Q16);
+ /* Convert to magnitude spectrum, by doing square-roots
+ * (modified from SPLIB). */
+ res = 1 << (WebRtcSpl_GetSizeInBits(invARSpec2_Q16[0]) >> 1);
+ for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+ in_sqrt = invARSpec2_Q16[k];
+ i = 10;
+ /* Negative values make no sense for a real sqrt-function. */
+ if (in_sqrt < 0) {
+ in_sqrt = -in_sqrt;
+ }
+ newRes = (in_sqrt / res + res) >> 1;
+ do {
+ res = newRes;
+ newRes = (in_sqrt / res + res) >> 1;
+ } while (newRes != res && i-- > 0);
+
+ invARSpecQ8[k] = (int16_t)newRes;
+ }
+ /* arithmetic coding of spectrum */
+ err = WebRtcIsac_EncLogisticMulti2(streamdata, dataQ7, invARSpecQ8,
+ num_dft_coeff, is_12khz);
+ if (err < 0) {
+ return (err);
+ }
+ return 0;
+}
+
+
+/* step-up */
+void WebRtcIsac_Rc2Poly(double* RC, int N, double* a) {
+ int m, k;
+ double tmp[MAX_AR_MODEL_ORDER];
+
+ a[0] = 1.0;
+ tmp[0] = 1.0;
+ for (m = 1; m <= N; m++) {
+ /* copy */
+ memcpy(&tmp[1], &a[1], (m - 1) * sizeof(double));
+ a[m] = RC[m - 1];
+ for (k = 1; k < m; k++) {
+ a[k] += RC[m - 1] * tmp[m - k];
+ }
+ }
+ return;
+}
+
+/* step-down */
+void WebRtcIsac_Poly2Rc(double* a, int N, double* RC) {
+ int m, k;
+ double tmp[MAX_AR_MODEL_ORDER];
+ double tmp_inv;
+
+ RC[N - 1] = a[N];
+ for (m = N - 1; m > 0; m--) {
+ tmp_inv = 1.0 / (1.0 - RC[m] * RC[m]);
+ for (k = 1; k <= m; k++) {
+ tmp[k] = (a[k] - RC[m] * a[m - k + 1]) * tmp_inv;
+ }
+
+ memcpy(&a[1], &tmp[1], (m - 1) * sizeof(double));
+ RC[m - 1] = tmp[m];
+ }
+ return;
+}
+
+
+#define MAX_ORDER 100
+
+/* Matlab's LAR definition */
+void WebRtcIsac_Rc2Lar(const double* refc, double* lar, int order) {
+ int k;
+ for (k = 0; k < order; k++) {
+ lar[k] = log((1 + refc[k]) / (1 - refc[k]));
+ }
+}
+
+
+void WebRtcIsac_Lar2Rc(const double* lar, double* refc, int order) {
+ int k;
+ double tmp;
+
+ for (k = 0; k < order; k++) {
+ tmp = exp(lar[k]);
+ refc[k] = (tmp - 1) / (tmp + 1);
+ }
+}
+
+void WebRtcIsac_Poly2Lar(double* lowband, int orderLo, double* hiband,
+ int orderHi, int Nsub, double* lars) {
+ int k;
+ double rc[MAX_ORDER], *inpl, *inph, *outp;
+
+ inpl = lowband;
+ inph = hiband;
+ outp = lars;
+ for (k = 0; k < Nsub; k++) {
+ /* gains */
+ outp[0] = inpl[0];
+ outp[1] = inph[0];
+ outp += 2;
+
+ /* Low band */
+ inpl[0] = 1.0;
+ WebRtcIsac_Poly2Rc(inpl, orderLo, rc);
+ WebRtcIsac_Rc2Lar(rc, outp, orderLo);
+ outp += orderLo;
+
+ /* High band */
+ inph[0] = 1.0;
+ WebRtcIsac_Poly2Rc(inph, orderHi, rc);
+ WebRtcIsac_Rc2Lar(rc, outp, orderHi);
+ outp += orderHi;
+
+ inpl += orderLo + 1;
+ inph += orderHi + 1;
+ }
+}
+
+
+int16_t WebRtcIsac_Poly2LarUB(double* lpcVecs, int16_t bandwidth) {
+ double poly[MAX_ORDER];
+ double rc[MAX_ORDER];
+ double* ptrIO;
+ int16_t vecCntr;
+ int16_t vecSize;
+ int16_t numVec;
+
+ vecSize = UB_LPC_ORDER;
+ switch (bandwidth) {
+ case isac12kHz: {
+ numVec = UB_LPC_VEC_PER_FRAME;
+ break;
+ }
+ case isac16kHz: {
+ numVec = UB16_LPC_VEC_PER_FRAME;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ ptrIO = lpcVecs;
+ poly[0] = 1.0;
+ for (vecCntr = 0; vecCntr < numVec; vecCntr++) {
+ memcpy(&poly[1], ptrIO, sizeof(double) * vecSize);
+ WebRtcIsac_Poly2Rc(poly, vecSize, rc);
+ WebRtcIsac_Rc2Lar(rc, ptrIO, vecSize);
+ ptrIO += vecSize;
+ }
+ return 0;
+}
+
+
+void WebRtcIsac_Lar2Poly(double* lars, double* lowband, int orderLo,
+ double* hiband, int orderHi, int Nsub) {
+ int k, orderTot;
+ double rc[MAX_ORDER], *outpl, *outph, *inp;
+
+ orderTot = (orderLo + orderHi + 2);
+ outpl = lowband;
+ outph = hiband;
+ /* First two elements of 'inp' store gains*/
+ inp = lars;
+ for (k = 0; k < Nsub; k++) {
+ /* Low band */
+ WebRtcIsac_Lar2Rc(&inp[2], rc, orderLo);
+ WebRtcIsac_Rc2Poly(rc, orderLo, outpl);
+
+ /* High band */
+ WebRtcIsac_Lar2Rc(&inp[orderLo + 2], rc, orderHi);
+ WebRtcIsac_Rc2Poly(rc, orderHi, outph);
+
+ /* gains */
+ outpl[0] = inp[0];
+ outph[0] = inp[1];
+
+ outpl += orderLo + 1;
+ outph += orderHi + 1;
+ inp += orderTot;
+ }
+}
+
+/*
+ * assumes 2 LAR vectors interpolates to 'numPolyVec' A-polynomials
+ * Note: 'numPolyVecs' includes the first and the last point of the interval
+ */
+void WebRtcIsac_Lar2PolyInterpolUB(double* larVecs, double* percepFilterParams,
+ int numPolyVecs) {
+ int polyCntr, coeffCntr;
+ double larInterpol[UB_LPC_ORDER];
+ double rc[UB_LPC_ORDER];
+ double delta[UB_LPC_ORDER];
+
+ /* calculate the step-size for linear interpolation coefficients */
+ for (coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++) {
+ delta[coeffCntr] = (larVecs[UB_LPC_ORDER + coeffCntr] -
+ larVecs[coeffCntr]) / (numPolyVecs - 1);
+ }
+
+ for (polyCntr = 0; polyCntr < numPolyVecs; polyCntr++) {
+ for (coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++) {
+ larInterpol[coeffCntr] = larVecs[coeffCntr] +
+ delta[coeffCntr] * polyCntr;
+ }
+ WebRtcIsac_Lar2Rc(larInterpol, rc, UB_LPC_ORDER);
+
+ /* convert to A-polynomial, the following function returns A[0] = 1;
+ * which is written where gains had to be written. Then we write the
+ * gain (outside this function). This way we say a memcpy. */
+ WebRtcIsac_Rc2Poly(rc, UB_LPC_ORDER, percepFilterParams);
+ percepFilterParams += (UB_LPC_ORDER + 1);
+ }
+}
+
+int WebRtcIsac_DecodeLpc(Bitstr* streamdata, double* LPCCoef_lo,
+ double* LPCCoef_hi) {
+ double lars[KLT_ORDER_GAIN + KLT_ORDER_SHAPE];
+ int err;
+
+ err = WebRtcIsac_DecodeLpcCoef(streamdata, lars);
+ if (err < 0) {
+ return -ISAC_RANGE_ERROR_DECODE_LPC;
+ }
+ WebRtcIsac_Lar2Poly(lars, LPCCoef_lo, ORDERLO, LPCCoef_hi, ORDERHI,
+ SUBFRAMES);
+ return 0;
+}
+
+int16_t WebRtcIsac_DecodeInterpolLpcUb(Bitstr* streamdata,
+ double* percepFilterParams,
+ int16_t bandwidth) {
+ double lpcCoeff[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+ int err;
+ int interpolCntr;
+ int subframeCntr;
+ int16_t numSegments;
+ int16_t numVecPerSegment;
+ int16_t numGains;
+
+ double percepFilterGains[SUBFRAMES << 1];
+ double* ptrOutParam = percepFilterParams;
+
+ err = WebRtcIsac_DecodeLpcCoefUB(streamdata, lpcCoeff, percepFilterGains,
+ bandwidth);
+ if (err < 0) {
+ return -ISAC_RANGE_ERROR_DECODE_LPC;
+ }
+
+ switch (bandwidth) {
+ case isac12kHz: {
+ numGains = SUBFRAMES;
+ numSegments = UB_LPC_VEC_PER_FRAME - 1;
+ numVecPerSegment = kLpcVecPerSegmentUb12;
+ break;
+ }
+ case isac16kHz: {
+ numGains = SUBFRAMES << 1;
+ numSegments = UB16_LPC_VEC_PER_FRAME - 1;
+ numVecPerSegment = kLpcVecPerSegmentUb16;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ for (interpolCntr = 0; interpolCntr < numSegments; interpolCntr++) {
+ WebRtcIsac_Lar2PolyInterpolUB(&lpcCoeff[interpolCntr * UB_LPC_ORDER],
+ ptrOutParam, numVecPerSegment + 1);
+ ptrOutParam += (numVecPerSegment * (UB_LPC_ORDER + 1));
+ }
+
+ ptrOutParam = percepFilterParams;
+
+ if (bandwidth == isac16kHz) {
+ ptrOutParam += (1 + UB_LPC_ORDER);
+ }
+
+ for (subframeCntr = 0; subframeCntr < numGains; subframeCntr++) {
+ *ptrOutParam = percepFilterGains[subframeCntr];
+ ptrOutParam += (1 + UB_LPC_ORDER);
+ }
+ return 0;
+}
+
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsac_DecodeLpcCoef(Bitstr* streamdata, double* LPCCoef) {
+ int j, k, n, pos, pos2, posg, poss, offsg, offss, offs2;
+ int index_g[KLT_ORDER_GAIN], index_s[KLT_ORDER_SHAPE];
+ double tmpcoeffs_g[KLT_ORDER_GAIN], tmpcoeffs_s[KLT_ORDER_SHAPE];
+ double tmpcoeffs2_g[KLT_ORDER_GAIN], tmpcoeffs2_s[KLT_ORDER_SHAPE];
+ double sum;
+ int err;
+ int model = 1;
+
+ /* entropy decoding of model number */
+ /* We are keeping this for backward compatibility of bit-streams. */
+ err = WebRtcIsac_DecHistOneStepMulti(&model, streamdata,
+ WebRtcIsac_kQKltModelCdfPtr,
+ WebRtcIsac_kQKltModelInitIndex, 1);
+ if (err < 0) {
+ return err;
+ }
+ /* Only accepted value of model is 0. It is kept in bit-stream for backward
+ * compatibility. */
+ if (model != 0) {
+ return -ISAC_DISALLOWED_LPC_MODEL;
+ }
+
+ /* entropy decoding of quantization indices */
+ err = WebRtcIsac_DecHistOneStepMulti(
+ index_s, streamdata, WebRtcIsac_kQKltCdfPtrShape,
+ WebRtcIsac_kQKltInitIndexShape, KLT_ORDER_SHAPE);
+ if (err < 0) {
+ return err;
+ }
+ err = WebRtcIsac_DecHistOneStepMulti(
+ index_g, streamdata, WebRtcIsac_kQKltCdfPtrGain,
+ WebRtcIsac_kQKltInitIndexGain, KLT_ORDER_GAIN);
+ if (err < 0) {
+ return err;
+ }
+
+ /* find quantization levels for coefficients */
+ for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+ tmpcoeffs_s[k] =
+ WebRtcIsac_kQKltLevelsShape[WebRtcIsac_kQKltOffsetShape[k] +
+ index_s[k]];
+ }
+ for (k = 0; k < KLT_ORDER_GAIN; k++) {
+ tmpcoeffs_g[k] = WebRtcIsac_kQKltLevelsGain[WebRtcIsac_kQKltOffsetGain[k] +
+ index_g[k]];
+ }
+
+ /* Inverse KLT */
+
+ /* Left transform, transpose matrix! */
+ offsg = 0;
+ offss = 0;
+ posg = 0;
+ poss = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ offs2 = 0;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = offsg;
+ pos2 = offs2;
+ for (n = 0; n < LPC_GAIN_ORDER; n++) {
+ sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2++];
+ }
+ tmpcoeffs2_g[posg++] = sum;
+ offs2 += LPC_GAIN_ORDER;
+ }
+ offs2 = 0;
+ for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+ sum = 0;
+ pos = offss;
+ pos2 = offs2;
+ for (n = 0; n < LPC_SHAPE_ORDER; n++) {
+ sum += tmpcoeffs_s[pos++] * WebRtcIsac_kKltT1Shape[pos2++];
+ }
+ tmpcoeffs2_s[poss++] = sum;
+ offs2 += LPC_SHAPE_ORDER;
+ }
+ offsg += LPC_GAIN_ORDER;
+ offss += LPC_SHAPE_ORDER;
+ }
+
+ /* Right transform, transpose matrix */
+ offsg = 0;
+ offss = 0;
+ posg = 0;
+ poss = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ posg = offsg;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = j;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2];
+ pos += LPC_GAIN_ORDER;
+ pos2 += SUBFRAMES;
+
+ }
+ tmpcoeffs_g[posg++] = sum;
+ }
+ poss = offss;
+ for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = j;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_s[pos] * WebRtcIsac_kKltT2Shape[pos2];
+ pos += LPC_SHAPE_ORDER;
+ pos2 += SUBFRAMES;
+ }
+ tmpcoeffs_s[poss++] = sum;
+ }
+ offsg += LPC_GAIN_ORDER;
+ offss += LPC_SHAPE_ORDER;
+ }
+
+ /* scaling, mean addition, and gain restoration */
+ posg = 0;
+ poss = 0;
+ pos = 0;
+ for (k = 0; k < SUBFRAMES; k++) {
+ /* log gains */
+ LPCCoef[pos] = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+ LPCCoef[pos] += WebRtcIsac_kLpcMeansGain[posg];
+ LPCCoef[pos] = exp(LPCCoef[pos]);
+ pos++;
+ posg++;
+ LPCCoef[pos] = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+ LPCCoef[pos] += WebRtcIsac_kLpcMeansGain[posg];
+ LPCCoef[pos] = exp(LPCCoef[pos]);
+ pos++;
+ posg++;
+
+ /* Low-band LAR coefficients. */
+ for (n = 0; n < LPC_LOBAND_ORDER; n++, pos++, poss++) {
+ LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_LOBAND_SCALE;
+ LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+ }
+
+ /* High-band LAR coefficients. */
+ for (n = 0; n < LPC_HIBAND_ORDER; n++, pos++, poss++) {
+ LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_HIBAND_SCALE;
+ LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+ }
+ }
+ return 0;
+}
+
+/* Encode LPC in LAR domain. */
+void WebRtcIsac_EncodeLar(double* LPCCoef, Bitstr* streamdata,
+ IsacSaveEncoderData* encData) {
+ int j, k, n, pos, pos2, poss, offss, offs2;
+ int index_s[KLT_ORDER_SHAPE];
+ int index_ovr_s[KLT_ORDER_SHAPE];
+ double tmpcoeffs_s[KLT_ORDER_SHAPE];
+ double tmpcoeffs2_s[KLT_ORDER_SHAPE];
+ double sum;
+ const int kModel = 0;
+
+ /* Mean removal and scaling. */
+ poss = 0;
+ pos = 0;
+ for (k = 0; k < SUBFRAMES; k++) {
+ /* First two element are gains, move over them. */
+ pos += 2;
+
+ /* Low-band LAR coefficients. */
+ for (n = 0; n < LPC_LOBAND_ORDER; n++, poss++, pos++) {
+ tmpcoeffs_s[poss] = LPCCoef[pos] - WebRtcIsac_kLpcMeansShape[poss];
+ tmpcoeffs_s[poss] *= LPC_LOBAND_SCALE;
+ }
+
+ /* High-band LAR coefficients. */
+ for (n = 0; n < LPC_HIBAND_ORDER; n++, poss++, pos++) {
+ tmpcoeffs_s[poss] = LPCCoef[pos] - WebRtcIsac_kLpcMeansShape[poss];
+ tmpcoeffs_s[poss] *= LPC_HIBAND_SCALE;
+ }
+ }
+
+ /* KLT */
+
+ /* Left transform. */
+ offss = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ poss = offss;
+ for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+ sum = 0;
+ pos = offss;
+ pos2 = k;
+ for (n = 0; n < LPC_SHAPE_ORDER; n++) {
+ sum += tmpcoeffs_s[pos++] * WebRtcIsac_kKltT1Shape[pos2];
+ pos2 += LPC_SHAPE_ORDER;
+ }
+ tmpcoeffs2_s[poss++] = sum;
+ }
+ offss += LPC_SHAPE_ORDER;
+ }
+
+ /* Right transform. */
+ offss = 0;
+ offs2 = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ poss = offss;
+ for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = offs2;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_s[pos] * WebRtcIsac_kKltT2Shape[pos2++];
+ pos += LPC_SHAPE_ORDER;
+ }
+ tmpcoeffs_s[poss++] = sum;
+ }
+ offs2 += SUBFRAMES;
+ offss += LPC_SHAPE_ORDER;
+ }
+
+ /* Quantize coefficients. */
+ for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+ index_s[k] = (WebRtcIsac_lrint(tmpcoeffs_s[k] / KLT_STEPSIZE)) +
+ WebRtcIsac_kQKltQuantMinShape[k];
+ if (index_s[k] < 0) {
+ index_s[k] = 0;
+ } else if (index_s[k] > WebRtcIsac_kQKltMaxIndShape[k]) {
+ index_s[k] = WebRtcIsac_kQKltMaxIndShape[k];
+ }
+ index_ovr_s[k] = WebRtcIsac_kQKltOffsetShape[k] + index_s[k];
+ }
+
+
+ /* Only one model remains in this version of the code, kModel = 0. We
+ * are keeping for bit-streams to be backward compatible. */
+ /* entropy coding of model number */
+ WebRtcIsac_EncHistMulti(streamdata, &kModel, WebRtcIsac_kQKltModelCdfPtr, 1);
+
+ /* Save data for creation of multiple bit streams */
+ /* Entropy coding of quantization indices - shape only. */
+ WebRtcIsac_EncHistMulti(streamdata, index_s, WebRtcIsac_kQKltCdfPtrShape,
+ KLT_ORDER_SHAPE);
+
+ /* Save data for creation of multiple bit streams. */
+ for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+ encData->LPCindex_s[KLT_ORDER_SHAPE * encData->startIdx + k] = index_s[k];
+ }
+
+ /* Find quantization levels for shape coefficients. */
+ for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+ tmpcoeffs_s[k] = WebRtcIsac_kQKltLevelsShape[index_ovr_s[k]];
+ }
+ /* Inverse KLT. */
+ /* Left transform, transpose matrix.! */
+ offss = 0;
+ poss = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ offs2 = 0;
+ for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+ sum = 0;
+ pos = offss;
+ pos2 = offs2;
+ for (n = 0; n < LPC_SHAPE_ORDER; n++) {
+ sum += tmpcoeffs_s[pos++] * WebRtcIsac_kKltT1Shape[pos2++];
+ }
+ tmpcoeffs2_s[poss++] = sum;
+ offs2 += LPC_SHAPE_ORDER;
+ }
+ offss += LPC_SHAPE_ORDER;
+ }
+
+ /* Right transform, Transpose matrix */
+ offss = 0;
+ poss = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ poss = offss;
+ for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = j;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_s[pos] * WebRtcIsac_kKltT2Shape[pos2];
+ pos += LPC_SHAPE_ORDER;
+ pos2 += SUBFRAMES;
+ }
+ tmpcoeffs_s[poss++] = sum;
+ }
+ offss += LPC_SHAPE_ORDER;
+ }
+
+ /* Scaling, mean addition, and gain restoration. */
+ poss = 0;
+ pos = 0;
+ for (k = 0; k < SUBFRAMES; k++) {
+ /* Ignore gains. */
+ pos += 2;
+
+ /* Low band LAR coefficients. */
+ for (n = 0; n < LPC_LOBAND_ORDER; n++, pos++, poss++) {
+ LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_LOBAND_SCALE;
+ LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+ }
+
+ /* High band LAR coefficients. */
+ for (n = 0; n < LPC_HIBAND_ORDER; n++, pos++, poss++) {
+ LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_HIBAND_SCALE;
+ LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+ }
+ }
+}
+
+
+void WebRtcIsac_EncodeLpcLb(double* LPCCoef_lo, double* LPCCoef_hi,
+ Bitstr* streamdata, IsacSaveEncoderData* encData) {
+ double lars[KLT_ORDER_GAIN + KLT_ORDER_SHAPE];
+ int k;
+
+ WebRtcIsac_Poly2Lar(LPCCoef_lo, ORDERLO, LPCCoef_hi, ORDERHI, SUBFRAMES,
+ lars);
+ WebRtcIsac_EncodeLar(lars, streamdata, encData);
+ WebRtcIsac_Lar2Poly(lars, LPCCoef_lo, ORDERLO, LPCCoef_hi, ORDERHI,
+ SUBFRAMES);
+ /* Save data for creation of multiple bit streams (and transcoding). */
+ for (k = 0; k < (ORDERLO + 1)*SUBFRAMES; k++) {
+ encData->LPCcoeffs_lo[(ORDERLO + 1)*SUBFRAMES * encData->startIdx + k] =
+ LPCCoef_lo[k];
+ }
+ for (k = 0; k < (ORDERHI + 1)*SUBFRAMES; k++) {
+ encData->LPCcoeffs_hi[(ORDERHI + 1)*SUBFRAMES * encData->startIdx + k] =
+ LPCCoef_hi[k];
+ }
+}
+
+
+int16_t WebRtcIsac_EncodeLpcUB(double* lpcVecs, Bitstr* streamdata,
+ double* interpolLPCCoeff,
+ int16_t bandwidth,
+ ISACUBSaveEncDataStruct* encData) {
+ double U[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+ int idx[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+ int interpolCntr;
+
+ WebRtcIsac_Poly2LarUB(lpcVecs, bandwidth);
+ WebRtcIsac_RemoveLarMean(lpcVecs, bandwidth);
+ WebRtcIsac_DecorrelateIntraVec(lpcVecs, U, bandwidth);
+ WebRtcIsac_DecorrelateInterVec(U, lpcVecs, bandwidth);
+ WebRtcIsac_QuantizeUncorrLar(lpcVecs, idx, bandwidth);
+
+ WebRtcIsac_CorrelateInterVec(lpcVecs, U, bandwidth);
+ WebRtcIsac_CorrelateIntraVec(U, lpcVecs, bandwidth);
+ WebRtcIsac_AddLarMean(lpcVecs, bandwidth);
+
+ switch (bandwidth) {
+ case isac12kHz: {
+ /* Store the indices to be used for multiple encoding. */
+ memcpy(encData->indexLPCShape, idx, UB_LPC_ORDER *
+ UB_LPC_VEC_PER_FRAME * sizeof(int));
+ WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcShapeCdfMatUb12,
+ UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME);
+ for (interpolCntr = 0; interpolCntr < UB_INTERPOL_SEGMENTS;
+ interpolCntr++) {
+ WebRtcIsac_Lar2PolyInterpolUB(lpcVecs, interpolLPCCoeff,
+ kLpcVecPerSegmentUb12 + 1);
+ lpcVecs += UB_LPC_ORDER;
+ interpolLPCCoeff += (kLpcVecPerSegmentUb12 * (UB_LPC_ORDER + 1));
+ }
+ break;
+ }
+ case isac16kHz: {
+ /* Store the indices to be used for multiple encoding. */
+ memcpy(encData->indexLPCShape, idx, UB_LPC_ORDER *
+ UB16_LPC_VEC_PER_FRAME * sizeof(int));
+ WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcShapeCdfMatUb16,
+ UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME);
+ for (interpolCntr = 0; interpolCntr < UB16_INTERPOL_SEGMENTS;
+ interpolCntr++) {
+ WebRtcIsac_Lar2PolyInterpolUB(lpcVecs, interpolLPCCoeff,
+ kLpcVecPerSegmentUb16 + 1);
+ lpcVecs += UB_LPC_ORDER;
+ interpolLPCCoeff += (kLpcVecPerSegmentUb16 * (UB_LPC_ORDER + 1));
+ }
+ break;
+ }
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+void WebRtcIsac_EncodeLpcGainLb(double* LPCCoef_lo, double* LPCCoef_hi,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData) {
+ int j, k, n, pos, pos2, posg, offsg, offs2;
+ int index_g[KLT_ORDER_GAIN];
+ int index_ovr_g[KLT_ORDER_GAIN];
+ double tmpcoeffs_g[KLT_ORDER_GAIN];
+ double tmpcoeffs2_g[KLT_ORDER_GAIN];
+ double sum;
+ /* log gains, mean removal and scaling */
+ posg = 0;
+ for (k = 0; k < SUBFRAMES; k++) {
+ tmpcoeffs_g[posg] = log(LPCCoef_lo[(LPC_LOBAND_ORDER + 1) * k]);
+ tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+ tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+ posg++;
+ tmpcoeffs_g[posg] = log(LPCCoef_hi[(LPC_HIBAND_ORDER + 1) * k]);
+ tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+ tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+ posg++;
+ }
+
+ /* KLT */
+
+ /* Left transform. */
+ offsg = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ posg = offsg;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = offsg;
+ pos2 = k;
+ for (n = 0; n < LPC_GAIN_ORDER; n++) {
+ sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2];
+ pos2 += LPC_GAIN_ORDER;
+ }
+ tmpcoeffs2_g[posg++] = sum;
+ }
+ offsg += LPC_GAIN_ORDER;
+ }
+
+ /* Right transform. */
+ offsg = 0;
+ offs2 = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ posg = offsg;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = offs2;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2++];
+ pos += LPC_GAIN_ORDER;
+ }
+ tmpcoeffs_g[posg++] = sum;
+ }
+ offs2 += SUBFRAMES;
+ offsg += LPC_GAIN_ORDER;
+ }
+
+ /* Quantize coefficients. */
+ for (k = 0; k < KLT_ORDER_GAIN; k++) {
+ /* Get index. */
+ pos2 = WebRtcIsac_lrint(tmpcoeffs_g[k] / KLT_STEPSIZE);
+ index_g[k] = (pos2) + WebRtcIsac_kQKltQuantMinGain[k];
+ if (index_g[k] < 0) {
+ index_g[k] = 0;
+ } else if (index_g[k] > WebRtcIsac_kQKltMaxIndGain[k]) {
+ index_g[k] = WebRtcIsac_kQKltMaxIndGain[k];
+ }
+ index_ovr_g[k] = WebRtcIsac_kQKltOffsetGain[k] + index_g[k];
+
+ /* Find quantization levels for coefficients. */
+ tmpcoeffs_g[k] = WebRtcIsac_kQKltLevelsGain[index_ovr_g[k]];
+
+ /* Save data for creation of multiple bit streams. */
+ encData->LPCindex_g[KLT_ORDER_GAIN * encData->startIdx + k] = index_g[k];
+ }
+
+ /* Entropy coding of quantization indices - gain. */
+ WebRtcIsac_EncHistMulti(streamdata, index_g, WebRtcIsac_kQKltCdfPtrGain,
+ KLT_ORDER_GAIN);
+
+ /* Find quantization levels for coefficients. */
+ /* Left transform. */
+ offsg = 0;
+ posg = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ offs2 = 0;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = offsg;
+ pos2 = offs2;
+ for (n = 0; n < LPC_GAIN_ORDER; n++)
+ sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2++];
+ tmpcoeffs2_g[posg++] = sum;
+ offs2 += LPC_GAIN_ORDER;
+ }
+ offsg += LPC_GAIN_ORDER;
+ }
+
+ /* Right transform, transpose matrix. */
+ offsg = 0;
+ posg = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ posg = offsg;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = j;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2];
+ pos += LPC_GAIN_ORDER;
+ pos2 += SUBFRAMES;
+ }
+ tmpcoeffs_g[posg++] = sum;
+ }
+ offsg += LPC_GAIN_ORDER;
+ }
+
+
+ /* Scaling, mean addition, and gain restoration. */
+ posg = 0;
+ for (k = 0; k < SUBFRAMES; k++) {
+ sum = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+ sum += WebRtcIsac_kLpcMeansGain[posg];
+ LPCCoef_lo[k * (LPC_LOBAND_ORDER + 1)] = exp(sum);
+ pos++;
+ posg++;
+ sum = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+ sum += WebRtcIsac_kLpcMeansGain[posg];
+ LPCCoef_hi[k * (LPC_HIBAND_ORDER + 1)] = exp(sum);
+ pos++;
+ posg++;
+ }
+
+}
+
+void WebRtcIsac_EncodeLpcGainUb(double* lpGains, Bitstr* streamdata,
+ int* lpcGainIndex) {
+ double U[UB_LPC_GAIN_DIM];
+ int idx[UB_LPC_GAIN_DIM];
+ WebRtcIsac_ToLogDomainRemoveMean(lpGains);
+ WebRtcIsac_DecorrelateLPGain(lpGains, U);
+ WebRtcIsac_QuantizeLpcGain(U, idx);
+ /* Store the index for re-encoding for FEC. */
+ memcpy(lpcGainIndex, idx, UB_LPC_GAIN_DIM * sizeof(int));
+ WebRtcIsac_CorrelateLpcGain(U, lpGains);
+ WebRtcIsac_AddMeanToLinearDomain(lpGains);
+ WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcGainCdfMat,
+ UB_LPC_GAIN_DIM);
+}
+
+
+void WebRtcIsac_StoreLpcGainUb(double* lpGains, Bitstr* streamdata) {
+ double U[UB_LPC_GAIN_DIM];
+ int idx[UB_LPC_GAIN_DIM];
+ WebRtcIsac_ToLogDomainRemoveMean(lpGains);
+ WebRtcIsac_DecorrelateLPGain(lpGains, U);
+ WebRtcIsac_QuantizeLpcGain(U, idx);
+ WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcGainCdfMat,
+ UB_LPC_GAIN_DIM);
+}
+
+
+
+int16_t WebRtcIsac_DecodeLpcGainUb(double* lpGains, Bitstr* streamdata) {
+ double U[UB_LPC_GAIN_DIM];
+ int idx[UB_LPC_GAIN_DIM];
+ int err;
+ err = WebRtcIsac_DecHistOneStepMulti(idx, streamdata,
+ WebRtcIsac_kLpcGainCdfMat,
+ WebRtcIsac_kLpcGainEntropySearch,
+ UB_LPC_GAIN_DIM);
+ if (err < 0) {
+ return -1;
+ }
+ WebRtcIsac_DequantizeLpcGain(idx, U);
+ WebRtcIsac_CorrelateLpcGain(U, lpGains);
+ WebRtcIsac_AddMeanToLinearDomain(lpGains);
+ return 0;
+}
+
+
+
+/* decode & dequantize RC */
+int WebRtcIsac_DecodeRc(Bitstr* streamdata, int16_t* RCQ15) {
+ int k, err;
+ int index[AR_ORDER];
+
+ /* entropy decoding of quantization indices */
+ err = WebRtcIsac_DecHistOneStepMulti(index, streamdata,
+ WebRtcIsac_kQArRcCdfPtr,
+ WebRtcIsac_kQArRcInitIndex, AR_ORDER);
+ if (err < 0)
+ return err;
+
+ /* find quantization levels for reflection coefficients */
+ for (k = 0; k < AR_ORDER; k++) {
+ RCQ15[k] = *(WebRtcIsac_kQArRcLevelsPtr[k] + index[k]);
+ }
+ return 0;
+}
+
+
+/* quantize & code RC */
+void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata) {
+ int k;
+ int index[AR_ORDER];
+
+ /* quantize reflection coefficients (add noise feedback?) */
+ for (k = 0; k < AR_ORDER; k++) {
+ index[k] = WebRtcIsac_kQArRcInitIndex[k];
+ // The safe-guards in following while conditions are to suppress gcc 4.8.3
+ // warnings, Issue 2888. Otherwise, first and last elements of
+ // `WebRtcIsac_kQArBoundaryLevels` are such that the following search
+ // *never* cause an out-of-boundary read.
+ if (RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k]]) {
+ while (index[k] + 1 < NUM_AR_RC_QUANT_BAUNDARY &&
+ RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k] + 1]) {
+ index[k]++;
+ }
+ } else {
+ while (index[k] > 0 &&
+ RCQ15[k] < WebRtcIsac_kQArBoundaryLevels[--index[k]]) ;
+ }
+ RCQ15[k] = *(WebRtcIsac_kQArRcLevelsPtr[k] + index[k]);
+ }
+
+ /* entropy coding of quantization indices */
+ WebRtcIsac_EncHistMulti(streamdata, index, WebRtcIsac_kQArRcCdfPtr, AR_ORDER);
+}
+
+
+/* decode & dequantize squared Gain */
+int WebRtcIsac_DecodeGain2(Bitstr* streamdata, int32_t* gainQ10) {
+ int index, err;
+
+ /* entropy decoding of quantization index */
+ err = WebRtcIsac_DecHistOneStepMulti(&index, streamdata,
+ WebRtcIsac_kQGainCdf_ptr,
+ WebRtcIsac_kQGainInitIndex, 1);
+ if (err < 0) {
+ return err;
+ }
+ /* find quantization level */
+ *gainQ10 = WebRtcIsac_kQGain2Levels[index];
+ return 0;
+}
+
+
+/* quantize & code squared Gain */
+int WebRtcIsac_EncodeGain2(int32_t* gainQ10, Bitstr* streamdata) {
+ int index;
+
+ /* find quantization index */
+ index = WebRtcIsac_kQGainInitIndex[0];
+ if (*gainQ10 > WebRtcIsac_kQGain2BoundaryLevels[index]) {
+ while (*gainQ10 > WebRtcIsac_kQGain2BoundaryLevels[index + 1]) {
+ index++;
+ }
+ } else {
+ while (*gainQ10 < WebRtcIsac_kQGain2BoundaryLevels[--index]) ;
+ }
+ /* De-quantize */
+ *gainQ10 = WebRtcIsac_kQGain2Levels[index];
+
+ /* entropy coding of quantization index */
+ WebRtcIsac_EncHistMulti(streamdata, &index, WebRtcIsac_kQGainCdf_ptr, 1);
+ return 0;
+}
+
+
+/* code and decode Pitch Gains and Lags functions */
+
+/* decode & dequantize Pitch Gains */
+int WebRtcIsac_DecodePitchGain(Bitstr* streamdata,
+ int16_t* PitchGains_Q12) {
+ int index_comb, err;
+ const uint16_t* WebRtcIsac_kQPitchGainCdf_ptr[1];
+
+ /* Entropy decoding of quantization indices */
+ *WebRtcIsac_kQPitchGainCdf_ptr = WebRtcIsac_kQPitchGainCdf;
+ err = WebRtcIsac_DecHistBisectMulti(&index_comb, streamdata,
+ WebRtcIsac_kQPitchGainCdf_ptr,
+ WebRtcIsac_kQCdfTableSizeGain, 1);
+ /* Error check, Q_mean_Gain.. tables are of size 144 */
+ if ((err < 0) || (index_comb < 0) || (index_comb >= 144)) {
+ return -ISAC_RANGE_ERROR_DECODE_PITCH_GAIN;
+ }
+ /* De-quantize back to pitch gains by table look-up. */
+ PitchGains_Q12[0] = WebRtcIsac_kQMeanGain1Q12[index_comb];
+ PitchGains_Q12[1] = WebRtcIsac_kQMeanGain2Q12[index_comb];
+ PitchGains_Q12[2] = WebRtcIsac_kQMeanGain3Q12[index_comb];
+ PitchGains_Q12[3] = WebRtcIsac_kQMeanGain4Q12[index_comb];
+ return 0;
+}
+
+
+/* Quantize & code Pitch Gains. */
+void WebRtcIsac_EncodePitchGain(int16_t* PitchGains_Q12,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData) {
+ int k, j;
+ double C;
+ double S[PITCH_SUBFRAMES];
+ int index[3];
+ int index_comb;
+ const uint16_t* WebRtcIsac_kQPitchGainCdf_ptr[1];
+ double PitchGains[PITCH_SUBFRAMES] = {0, 0, 0, 0};
+
+ /* Take the asin. */
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
+ S[k] = asin(PitchGains[k]);
+ }
+
+ /* Find quantization index; only for the first three
+ * transform coefficients. */
+ for (k = 0; k < 3; k++) {
+ /* transform */
+ C = 0.0;
+ for (j = 0; j < PITCH_SUBFRAMES; j++) {
+ C += WebRtcIsac_kTransform[k][j] * S[j];
+ }
+ /* Quantize */
+ index[k] = WebRtcIsac_lrint(C / PITCH_GAIN_STEPSIZE);
+
+ /* Check that the index is not outside the boundaries of the table. */
+ if (index[k] < WebRtcIsac_kIndexLowerLimitGain[k]) {
+ index[k] = WebRtcIsac_kIndexLowerLimitGain[k];
+ } else if (index[k] > WebRtcIsac_kIndexUpperLimitGain[k]) {
+ index[k] = WebRtcIsac_kIndexUpperLimitGain[k];
+ }
+ index[k] -= WebRtcIsac_kIndexLowerLimitGain[k];
+ }
+
+ /* Calculate unique overall index. */
+ index_comb = WebRtcIsac_kIndexMultsGain[0] * index[0] +
+ WebRtcIsac_kIndexMultsGain[1] * index[1] + index[2];
+
+ /* unquantize back to pitch gains by table look-up */
+ PitchGains_Q12[0] = WebRtcIsac_kQMeanGain1Q12[index_comb];
+ PitchGains_Q12[1] = WebRtcIsac_kQMeanGain2Q12[index_comb];
+ PitchGains_Q12[2] = WebRtcIsac_kQMeanGain3Q12[index_comb];
+ PitchGains_Q12[3] = WebRtcIsac_kQMeanGain4Q12[index_comb];
+
+ /* entropy coding of quantization pitch gains */
+ *WebRtcIsac_kQPitchGainCdf_ptr = WebRtcIsac_kQPitchGainCdf;
+ WebRtcIsac_EncHistMulti(streamdata, &index_comb,
+ WebRtcIsac_kQPitchGainCdf_ptr, 1);
+ encData->pitchGain_index[encData->startIdx] = index_comb;
+}
+
+
+
+/* Pitch LAG */
+/* Decode & de-quantize Pitch Lags. */
+int WebRtcIsac_DecodePitchLag(Bitstr* streamdata, int16_t* PitchGain_Q12,
+ double* PitchLags) {
+ int k, err;
+ double StepSize;
+ double C;
+ int index[PITCH_SUBFRAMES];
+ double mean_gain;
+ const double* mean_val2, *mean_val3, *mean_val4;
+ const int16_t* lower_limit;
+ const uint16_t* init_index;
+ const uint16_t* cdf_size;
+ const uint16_t** cdf;
+ double PitchGain[4] = {0, 0, 0, 0};
+
+ /* compute mean pitch gain */
+ mean_gain = 0.0;
+ for (k = 0; k < 4; k++) {
+ PitchGain[k] = ((float)PitchGain_Q12[k]) / 4096;
+ mean_gain += PitchGain[k];
+ }
+ mean_gain /= 4.0;
+
+ /* voicing classification. */
+ if (mean_gain < 0.2) {
+ StepSize = WebRtcIsac_kQPitchLagStepsizeLo;
+ cdf = WebRtcIsac_kQPitchLagCdfPtrLo;
+ cdf_size = WebRtcIsac_kQPitchLagCdfSizeLo;
+ mean_val2 = WebRtcIsac_kQMeanLag2Lo;
+ mean_val3 = WebRtcIsac_kQMeanLag3Lo;
+ mean_val4 = WebRtcIsac_kQMeanLag4Lo;
+ lower_limit = WebRtcIsac_kQIndexLowerLimitLagLo;
+ init_index = WebRtcIsac_kQInitIndexLagLo;
+ } else if (mean_gain < 0.4) {
+ StepSize = WebRtcIsac_kQPitchLagStepsizeMid;
+ cdf = WebRtcIsac_kQPitchLagCdfPtrMid;
+ cdf_size = WebRtcIsac_kQPitchLagCdfSizeMid;
+ mean_val2 = WebRtcIsac_kQMeanLag2Mid;
+ mean_val3 = WebRtcIsac_kQMeanLag3Mid;
+ mean_val4 = WebRtcIsac_kQMeanLag4Mid;
+ lower_limit = WebRtcIsac_kQIndexLowerLimitLagMid;
+ init_index = WebRtcIsac_kQInitIndexLagMid;
+ } else {
+ StepSize = WebRtcIsac_kQPitchLagStepsizeHi;
+ cdf = WebRtcIsac_kQPitchLagCdfPtrHi;
+ cdf_size = WebRtcIsac_kQPitchLagCdfSizeHi;
+ mean_val2 = WebRtcIsac_kQMeanLag2Hi;
+ mean_val3 = WebRtcIsac_kQMeanLag3Hi;
+ mean_val4 = WebRtcIsac_kQMeanLag4Hi;
+ lower_limit = WebRtcIsac_kQindexLowerLimitLagHi;
+ init_index = WebRtcIsac_kQInitIndexLagHi;
+ }
+
+ /* Entropy decoding of quantization indices. */
+ err = WebRtcIsac_DecHistBisectMulti(index, streamdata, cdf, cdf_size, 1);
+ if ((err < 0) || (index[0] < 0)) {
+ return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+ }
+ err = WebRtcIsac_DecHistOneStepMulti(index + 1, streamdata, cdf + 1,
+ init_index, 3);
+ if (err < 0) {
+ return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+ }
+
+ /* Unquantize back to transform coefficients and do the inverse transform:
+ * S = T'*C. */
+ C = (index[0] + lower_limit[0]) * StepSize;
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] = WebRtcIsac_kTransformTranspose[k][0] * C;
+ }
+ C = mean_val2[index[1]];
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] += WebRtcIsac_kTransformTranspose[k][1] * C;
+ }
+ C = mean_val3[index[2]];
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] += WebRtcIsac_kTransformTranspose[k][2] * C;
+ }
+ C = mean_val4[index[3]];
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] += WebRtcIsac_kTransformTranspose[k][3] * C;
+ }
+ return 0;
+}
+
+
+
+/* Quantize & code pitch lags. */
+void WebRtcIsac_EncodePitchLag(double* PitchLags, int16_t* PitchGain_Q12,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData) {
+ int k, j;
+ double StepSize;
+ double C;
+ int index[PITCH_SUBFRAMES];
+ double mean_gain;
+ const double* mean_val2, *mean_val3, *mean_val4;
+ const int16_t* lower_limit, *upper_limit;
+ const uint16_t** cdf;
+ double PitchGain[4] = {0, 0, 0, 0};
+
+ /* compute mean pitch gain */
+ mean_gain = 0.0;
+ for (k = 0; k < 4; k++) {
+ PitchGain[k] = ((float)PitchGain_Q12[k]) / 4096;
+ mean_gain += PitchGain[k];
+ }
+ mean_gain /= 4.0;
+
+ /* Save data for creation of multiple bit streams */
+ encData->meanGain[encData->startIdx] = mean_gain;
+
+ /* Voicing classification. */
+ if (mean_gain < 0.2) {
+ StepSize = WebRtcIsac_kQPitchLagStepsizeLo;
+ cdf = WebRtcIsac_kQPitchLagCdfPtrLo;
+ mean_val2 = WebRtcIsac_kQMeanLag2Lo;
+ mean_val3 = WebRtcIsac_kQMeanLag3Lo;
+ mean_val4 = WebRtcIsac_kQMeanLag4Lo;
+ lower_limit = WebRtcIsac_kQIndexLowerLimitLagLo;
+ upper_limit = WebRtcIsac_kQIndexUpperLimitLagLo;
+ } else if (mean_gain < 0.4) {
+ StepSize = WebRtcIsac_kQPitchLagStepsizeMid;
+ cdf = WebRtcIsac_kQPitchLagCdfPtrMid;
+ mean_val2 = WebRtcIsac_kQMeanLag2Mid;
+ mean_val3 = WebRtcIsac_kQMeanLag3Mid;
+ mean_val4 = WebRtcIsac_kQMeanLag4Mid;
+ lower_limit = WebRtcIsac_kQIndexLowerLimitLagMid;
+ upper_limit = WebRtcIsac_kQIndexUpperLimitLagMid;
+ } else {
+ StepSize = WebRtcIsac_kQPitchLagStepsizeHi;
+ cdf = WebRtcIsac_kQPitchLagCdfPtrHi;
+ mean_val2 = WebRtcIsac_kQMeanLag2Hi;
+ mean_val3 = WebRtcIsac_kQMeanLag3Hi;
+ mean_val4 = WebRtcIsac_kQMeanLag4Hi;
+ lower_limit = WebRtcIsac_kQindexLowerLimitLagHi;
+ upper_limit = WebRtcIsac_kQindexUpperLimitLagHi;
+ }
+
+ /* find quantization index */
+ for (k = 0; k < 4; k++) {
+ /* transform */
+ C = 0.0;
+ for (j = 0; j < PITCH_SUBFRAMES; j++) {
+ C += WebRtcIsac_kTransform[k][j] * PitchLags[j];
+ }
+ /* quantize */
+ index[k] = WebRtcIsac_lrint(C / StepSize);
+
+ /* check that the index is not outside the boundaries of the table */
+ if (index[k] < lower_limit[k]) {
+ index[k] = lower_limit[k];
+ } else if (index[k] > upper_limit[k]) index[k] = upper_limit[k]; {
+ index[k] -= lower_limit[k];
+ }
+ /* Save data for creation of multiple bit streams */
+ encData->pitchIndex[PITCH_SUBFRAMES * encData->startIdx + k] = index[k];
+ }
+
+ /* Un-quantize back to transform coefficients and do the inverse transform:
+ * S = T'*C */
+ C = (index[0] + lower_limit[0]) * StepSize;
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] = WebRtcIsac_kTransformTranspose[k][0] * C;
+ }
+ C = mean_val2[index[1]];
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] += WebRtcIsac_kTransformTranspose[k][1] * C;
+ }
+ C = mean_val3[index[2]];
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] += WebRtcIsac_kTransformTranspose[k][2] * C;
+ }
+ C = mean_val4[index[3]];
+ for (k = 0; k < PITCH_SUBFRAMES; k++) {
+ PitchLags[k] += WebRtcIsac_kTransformTranspose[k][3] * C;
+ }
+ /* entropy coding of quantization pitch lags */
+ WebRtcIsac_EncHistMulti(streamdata, index, cdf, PITCH_SUBFRAMES);
+}
+
+
+
+/* Routines for in-band signaling of bandwidth estimation */
+/* Histograms based on uniform distribution of indices */
+/* Move global variables later! */
+
+
+/* cdf array for frame length indicator */
+const uint16_t WebRtcIsac_kFrameLengthCdf[4] = {
+ 0, 21845, 43690, 65535 };
+
+/* pointer to cdf array for frame length indicator */
+const uint16_t* WebRtcIsac_kFrameLengthCdf_ptr[1] = {
+ WebRtcIsac_kFrameLengthCdf };
+
+/* initial cdf index for decoder of frame length indicator */
+const uint16_t WebRtcIsac_kFrameLengthInitIndex[1] = { 1 };
+
+
+int WebRtcIsac_DecodeFrameLen(Bitstr* streamdata, int16_t* framesamples) {
+ int frame_mode, err;
+ err = 0;
+ /* entropy decoding of frame length [1:30ms,2:60ms] */
+ err = WebRtcIsac_DecHistOneStepMulti(&frame_mode, streamdata,
+ WebRtcIsac_kFrameLengthCdf_ptr,
+ WebRtcIsac_kFrameLengthInitIndex, 1);
+ if (err < 0)
+ return -ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH;
+
+ switch (frame_mode) {
+ case 1:
+ *framesamples = 480; /* 30ms */
+ break;
+ case 2:
+ *framesamples = 960; /* 60ms */
+ break;
+ default:
+ err = -ISAC_DISALLOWED_FRAME_MODE_DECODER;
+ }
+ return err;
+}
+
+int WebRtcIsac_EncodeFrameLen(int16_t framesamples, Bitstr* streamdata) {
+ int frame_mode, status;
+
+ status = 0;
+ frame_mode = 0;
+ /* entropy coding of frame length [1:480 samples,2:960 samples] */
+ switch (framesamples) {
+ case 480:
+ frame_mode = 1;
+ break;
+ case 960:
+ frame_mode = 2;
+ break;
+ default:
+ status = - ISAC_DISALLOWED_FRAME_MODE_ENCODER;
+ }
+
+ if (status < 0)
+ return status;
+
+ WebRtcIsac_EncHistMulti(streamdata, &frame_mode,
+ WebRtcIsac_kFrameLengthCdf_ptr, 1);
+ return status;
+}
+
+/* cdf array for estimated bandwidth */
+static const uint16_t kBwCdf[25] = {
+ 0, 2731, 5461, 8192, 10923, 13653, 16384, 19114, 21845, 24576, 27306, 30037,
+ 32768, 35498, 38229, 40959, 43690, 46421, 49151, 51882, 54613, 57343, 60074,
+ 62804, 65535 };
+
+/* pointer to cdf array for estimated bandwidth */
+static const uint16_t* const kBwCdfPtr[1] = { kBwCdf };
+
+/* initial cdf index for decoder of estimated bandwidth*/
+static const uint16_t kBwInitIndex[1] = { 7 };
+
+
+int WebRtcIsac_DecodeSendBW(Bitstr* streamdata, int16_t* BWno) {
+ int BWno32, err;
+
+ /* entropy decoding of sender's BW estimation [0..23] */
+ err = WebRtcIsac_DecHistOneStepMulti(&BWno32, streamdata, kBwCdfPtr,
+ kBwInitIndex, 1);
+ if (err < 0) {
+ return -ISAC_RANGE_ERROR_DECODE_BANDWIDTH;
+ }
+ *BWno = (int16_t)BWno32;
+ return err;
+}
+
+void WebRtcIsac_EncodeReceiveBw(int* BWno, Bitstr* streamdata) {
+ /* entropy encoding of receiver's BW estimation [0..23] */
+ WebRtcIsac_EncHistMulti(streamdata, BWno, kBwCdfPtr, 1);
+}
+
+
+/* estimate code length of LPC Coef */
+void WebRtcIsac_TranscodeLPCCoef(double* LPCCoef_lo, double* LPCCoef_hi,
+ int* index_g) {
+ int j, k, n, pos, pos2, posg, offsg, offs2;
+ int index_ovr_g[KLT_ORDER_GAIN];
+ double tmpcoeffs_g[KLT_ORDER_GAIN];
+ double tmpcoeffs2_g[KLT_ORDER_GAIN];
+ double sum;
+
+ /* log gains, mean removal and scaling */
+ posg = 0;
+ for (k = 0; k < SUBFRAMES; k++) {
+ tmpcoeffs_g[posg] = log(LPCCoef_lo[(LPC_LOBAND_ORDER + 1) * k]);
+ tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+ tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+ posg++;
+ tmpcoeffs_g[posg] = log(LPCCoef_hi[(LPC_HIBAND_ORDER + 1) * k]);
+ tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+ tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+ posg++;
+ }
+
+ /* KLT */
+
+ /* Left transform. */
+ offsg = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ posg = offsg;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = offsg;
+ pos2 = k;
+ for (n = 0; n < LPC_GAIN_ORDER; n++) {
+ sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2];
+ pos2 += LPC_GAIN_ORDER;
+ }
+ tmpcoeffs2_g[posg++] = sum;
+ }
+ offsg += LPC_GAIN_ORDER;
+ }
+
+ /* Right transform. */
+ offsg = 0;
+ offs2 = 0;
+ for (j = 0; j < SUBFRAMES; j++) {
+ posg = offsg;
+ for (k = 0; k < LPC_GAIN_ORDER; k++) {
+ sum = 0;
+ pos = k;
+ pos2 = offs2;
+ for (n = 0; n < SUBFRAMES; n++) {
+ sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2++];
+ pos += LPC_GAIN_ORDER;
+ }
+ tmpcoeffs_g[posg++] = sum;
+ }
+ offs2 += SUBFRAMES;
+ offsg += LPC_GAIN_ORDER;
+ }
+
+
+ /* quantize coefficients */
+ for (k = 0; k < KLT_ORDER_GAIN; k++) {
+ /* Get index. */
+ pos2 = WebRtcIsac_lrint(tmpcoeffs_g[k] / KLT_STEPSIZE);
+ index_g[k] = (pos2) + WebRtcIsac_kQKltQuantMinGain[k];
+ if (index_g[k] < 0) {
+ index_g[k] = 0;
+ } else if (index_g[k] > WebRtcIsac_kQKltMaxIndGain[k]) {
+ index_g[k] = WebRtcIsac_kQKltMaxIndGain[k];
+ }
+ index_ovr_g[k] = WebRtcIsac_kQKltOffsetGain[k] + index_g[k];
+
+ /* find quantization levels for coefficients */
+ tmpcoeffs_g[k] = WebRtcIsac_kQKltLevelsGain[index_ovr_g[k]];
+ }
+}
+
+
+/* Decode & de-quantize LPC Coefficients. */
+int WebRtcIsac_DecodeLpcCoefUB(Bitstr* streamdata, double* lpcVecs,
+ double* percepFilterGains,
+ int16_t bandwidth) {
+ int index_s[KLT_ORDER_SHAPE];
+
+ double U[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+ int err;
+
+ /* Entropy decoding of quantization indices. */
+ switch (bandwidth) {
+ case isac12kHz: {
+ err = WebRtcIsac_DecHistOneStepMulti(
+ index_s, streamdata, WebRtcIsac_kLpcShapeCdfMatUb12,
+ WebRtcIsac_kLpcShapeEntropySearchUb12, UB_LPC_ORDER *
+ UB_LPC_VEC_PER_FRAME);
+ break;
+ }
+ case isac16kHz: {
+ err = WebRtcIsac_DecHistOneStepMulti(
+ index_s, streamdata, WebRtcIsac_kLpcShapeCdfMatUb16,
+ WebRtcIsac_kLpcShapeEntropySearchUb16, UB_LPC_ORDER *
+ UB16_LPC_VEC_PER_FRAME);
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if (err < 0) {
+ return err;
+ }
+
+ WebRtcIsac_DequantizeLpcParam(index_s, lpcVecs, bandwidth);
+ WebRtcIsac_CorrelateInterVec(lpcVecs, U, bandwidth);
+ WebRtcIsac_CorrelateIntraVec(U, lpcVecs, bandwidth);
+ WebRtcIsac_AddLarMean(lpcVecs, bandwidth);
+ WebRtcIsac_DecodeLpcGainUb(percepFilterGains, streamdata);
+
+ if (bandwidth == isac16kHz) {
+ /* Decode another set of Gains. */
+ WebRtcIsac_DecodeLpcGainUb(&percepFilterGains[SUBFRAMES], streamdata);
+ }
+ return 0;
+}
+
+int16_t WebRtcIsac_EncodeBandwidth(enum ISACBandwidth bandwidth,
+ Bitstr* streamData) {
+ int bandwidthMode;
+ switch (bandwidth) {
+ case isac12kHz: {
+ bandwidthMode = 0;
+ break;
+ }
+ case isac16kHz: {
+ bandwidthMode = 1;
+ break;
+ }
+ default:
+ return -ISAC_DISALLOWED_ENCODER_BANDWIDTH;
+ }
+ WebRtcIsac_EncHistMulti(streamData, &bandwidthMode, kOneBitEqualProbCdf_ptr,
+ 1);
+ return 0;
+}
+
+int16_t WebRtcIsac_DecodeBandwidth(Bitstr* streamData,
+ enum ISACBandwidth* bandwidth) {
+ int bandwidthMode;
+ if (WebRtcIsac_DecHistOneStepMulti(&bandwidthMode, streamData,
+ kOneBitEqualProbCdf_ptr,
+ kOneBitEqualProbInitIndex, 1) < 0) {
+ return -ISAC_RANGE_ERROR_DECODE_BANDWITH;
+ }
+ switch (bandwidthMode) {
+ case 0: {
+ *bandwidth = isac12kHz;
+ break;
+ }
+ case 1: {
+ *bandwidth = isac16kHz;
+ break;
+ }
+ default:
+ return -ISAC_DISALLOWED_BANDWIDTH_MODE_DECODER;
+ }
+ return 0;
+}
+
+int16_t WebRtcIsac_EncodeJitterInfo(int32_t jitterIndex,
+ Bitstr* streamData) {
+ /* This is to avoid LINUX warning until we change 'int' to 'Word32'. */
+ int intVar;
+
+ if ((jitterIndex < 0) || (jitterIndex > 1)) {
+ return -1;
+ }
+ intVar = (int)(jitterIndex);
+ /* Use the same CDF table as for bandwidth
+ * both take two values with equal probability.*/
+ WebRtcIsac_EncHistMulti(streamData, &intVar, kOneBitEqualProbCdf_ptr, 1);
+ return 0;
+}
+
+int16_t WebRtcIsac_DecodeJitterInfo(Bitstr* streamData,
+ int32_t* jitterInfo) {
+ int intVar;
+ /* Use the same CDF table as for bandwidth
+ * both take two values with equal probability. */
+ if (WebRtcIsac_DecHistOneStepMulti(&intVar, streamData,
+ kOneBitEqualProbCdf_ptr,
+ kOneBitEqualProbInitIndex, 1) < 0) {
+ return -ISAC_RANGE_ERROR_DECODE_BANDWITH;
+ }
+ *jitterInfo = (int16_t)(intVar);
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.h
new file mode 100644
index 0000000000..6c2b8d3cc1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.h
+ *
+ * This header file declares all of the functions used to arithmetically
+ * encode the iSAC bistream
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+/******************************************************************************
+ * WebRtcIsac_DecodeSpec()
+ * Decode real and imaginary part of the DFT coefficients, given a bit-stream.
+ * The decoded DFT coefficient can be transformed to time domain by
+ * WebRtcIsac_Time2Spec().
+ *
+ * Input:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ * - AvgPitchGain_Q12 : average pitch-gain of the frame. This is only
+ * relevant for 0-4 kHz band, and the input value is
+ * not used in other bands.
+ * - band : specifies which band's DFT should be decoded.
+ *
+ * Output:
+ * - *fr : pointer to a buffer where the real part of DFT
+ * coefficients are written to.
+ * - *fi : pointer to a buffer where the imaginary part
+ * of DFT coefficients are written to.
+ *
+ * Return value : < 0 if an error occures
+ * 0 if succeeded.
+ */
+int WebRtcIsac_DecodeSpec(Bitstr* streamdata,
+ int16_t AvgPitchGain_Q12,
+ enum ISACBand band,
+ double* fr,
+ double* fi);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeSpec()
+ * Encode real and imaginary part of the DFT coefficients into the given
+ * bit-stream.
+ *
+ * Input:
+ * - *fr : pointer to a buffer where the real part of DFT
+ * coefficients are written to.
+ * - *fi : pointer to a buffer where the imaginary part
+ * of DFT coefficients are written to.
+ * - AvgPitchGain_Q12 : average pitch-gain of the frame. This is only
+ * relevant for 0-4 kHz band, and the input value is
+ * not used in other bands.
+ * - band : specifies which band's DFT should be decoded.
+ *
+ * Output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Return value : < 0 if an error occures
+ * 0 if succeeded.
+ */
+int WebRtcIsac_EncodeSpec(const int16_t* fr,
+ const int16_t* fi,
+ int16_t AvgPitchGain_Q12,
+ enum ISACBand band,
+ Bitstr* streamdata);
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsac_DecodeLpcCoef(Bitstr* streamdata, double* LPCCoef);
+int WebRtcIsac_DecodeLpcCoefUB(Bitstr* streamdata,
+ double* lpcVecs,
+ double* percepFilterGains,
+ int16_t bandwidth);
+
+int WebRtcIsac_DecodeLpc(Bitstr* streamdata,
+ double* LPCCoef_lo,
+ double* LPCCoef_hi);
+
+/* quantize & code LPC Coef */
+void WebRtcIsac_EncodeLpcLb(double* LPCCoef_lo,
+ double* LPCCoef_hi,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData);
+
+void WebRtcIsac_EncodeLpcGainLb(double* LPCCoef_lo,
+ double* LPCCoef_hi,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeLpcUB()
+ * Encode LPC parameters, given as A-polynomial, of upper-band. The encoding
+ * is performed in LAR domain.
+ * For the upper-band, we compute and encode LPC of some sub-frames, LPC of
+ * other sub-frames are computed by linear interpolation, in LAR domain. This
+ * function performs the interpolation and returns the LPC of all sub-frames.
+ *
+ * Inputs:
+ * - lpcCoef : a buffer containing A-polynomials of sub-frames
+ * (excluding first coefficient that is 1).
+ * - bandwidth : specifies if the codec is operating at 0-12 kHz
+ * or 0-16 kHz mode.
+ *
+ * Input/output:
+ * - streamdata : pointer to a structure containing the encoded
+ * data and the parameters needed for entropy
+ * coding.
+ *
+ * Output:
+ * - interpolLPCCoeff : Decoded and interpolated LPC (A-polynomial)
+ * of all sub-frames.
+ * If LP analysis is of order K, and there are N
+ * sub-frames then this is a buffer of size
+ * (k + 1) * N, each vector starts with the LPC gain
+ * of the corresponding sub-frame. The LPC gains
+ * are encoded and inserted after this function is
+ * called. The first A-coefficient which is 1 is not
+ * included.
+ *
+ * Return value : 0 if encoding is successful,
+ * <0 if failed to encode.
+ */
+int16_t WebRtcIsac_EncodeLpcUB(double* lpcCoeff,
+ Bitstr* streamdata,
+ double* interpolLPCCoeff,
+ int16_t bandwidth,
+ ISACUBSaveEncDataStruct* encData);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeInterpolLpcUb()
+ * Decode LPC coefficients and interpolate to get the coefficients fo all
+ * sub-frmaes.
+ *
+ * Inputs:
+ * - bandwidth : spepecifies if the codec is in 0-12 kHz or
+ * 0-16 kHz mode.
+ *
+ * Input/output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Output:
+ * - percepFilterParam : Decoded and interpolated LPC (A-polynomial) of
+ * all sub-frames.
+ * If LP analysis is of order K, and there are N
+ * sub-frames then this is a buffer of size
+ * (k + 1) * N, each vector starts with the LPC gain
+ * of the corresponding sub-frame. The LPC gains
+ * are encoded and inserted after this function is
+ * called. The first A-coefficient which is 1 is not
+ * included.
+ *
+ * Return value : 0 if encoding is successful,
+ * <0 if failed to encode.
+ */
+int16_t WebRtcIsac_DecodeInterpolLpcUb(Bitstr* streamdata,
+ double* percepFilterParam,
+ int16_t bandwidth);
+
+/* Decode & dequantize RC */
+int WebRtcIsac_DecodeRc(Bitstr* streamdata, int16_t* RCQ15);
+
+/* Quantize & code RC */
+void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata);
+
+/* Decode & dequantize squared Gain */
+int WebRtcIsac_DecodeGain2(Bitstr* streamdata, int32_t* Gain2);
+
+/* Quantize & code squared Gain (input is squared gain) */
+int WebRtcIsac_EncodeGain2(int32_t* gain2, Bitstr* streamdata);
+
+void WebRtcIsac_EncodePitchGain(int16_t* PitchGains_Q12,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData);
+
+void WebRtcIsac_EncodePitchLag(double* PitchLags,
+ int16_t* PitchGain_Q12,
+ Bitstr* streamdata,
+ IsacSaveEncoderData* encData);
+
+int WebRtcIsac_DecodePitchGain(Bitstr* streamdata, int16_t* PitchGain_Q12);
+int WebRtcIsac_DecodePitchLag(Bitstr* streamdata,
+ int16_t* PitchGain_Q12,
+ double* PitchLag);
+
+int WebRtcIsac_DecodeFrameLen(Bitstr* streamdata, int16_t* framelength);
+int WebRtcIsac_EncodeFrameLen(int16_t framelength, Bitstr* streamdata);
+int WebRtcIsac_DecodeSendBW(Bitstr* streamdata, int16_t* BWno);
+void WebRtcIsac_EncodeReceiveBw(int* BWno, Bitstr* streamdata);
+
+/* Step-down */
+void WebRtcIsac_Poly2Rc(double* a, int N, double* RC);
+
+/* Step-up */
+void WebRtcIsac_Rc2Poly(double* RC, int N, double* a);
+
+void WebRtcIsac_TranscodeLPCCoef(double* LPCCoef_lo,
+ double* LPCCoef_hi,
+ int* index_g);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeLpcGainUb()
+ * Encode LPC gains of sub-Frames.
+ *
+ * Input/outputs:
+ * - lpGains : a buffer which contains 'SUBFRAME' number of
+ * LP gains to be encoded. The input values are
+ * overwritten by the quantized values.
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Output:
+ * - lpcGainIndex : quantization indices for lpc gains, these will
+ * be stored to be used for FEC.
+ */
+void WebRtcIsac_EncodeLpcGainUb(double* lpGains,
+ Bitstr* streamdata,
+ int* lpcGainIndex);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeLpcGainUb()
+ * Store LPC gains of sub-Frames in 'streamdata'.
+ *
+ * Input:
+ * - lpGains : a buffer which contains 'SUBFRAME' number of
+ * LP gains to be encoded.
+ * Input/outputs:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ */
+void WebRtcIsac_StoreLpcGainUb(double* lpGains, Bitstr* streamdata);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeLpcGainUb()
+ * Decode the LPC gain of sub-frames.
+ *
+ * Input/output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Output:
+ * - lpGains : a buffer where decoded LPC gians will be stored.
+ *
+ * Return value : 0 if succeeded.
+ * <0 if failed.
+ */
+int16_t WebRtcIsac_DecodeLpcGainUb(double* lpGains, Bitstr* streamdata);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeBandwidth()
+ * Encode if the bandwidth of encoded audio is 0-12 kHz or 0-16 kHz.
+ *
+ * Input:
+ * - bandwidth : an enumerator specifying if the codec in is
+ * 0-12 kHz or 0-16 kHz mode.
+ *
+ * Input/output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Return value : 0 if succeeded.
+ * <0 if failed.
+ */
+int16_t WebRtcIsac_EncodeBandwidth(enum ISACBandwidth bandwidth,
+ Bitstr* streamData);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeBandwidth()
+ * Decode the bandwidth of the encoded audio, i.e. if the bandwidth is 0-12 kHz
+ * or 0-16 kHz.
+ *
+ * Input/output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Output:
+ * - bandwidth : an enumerator specifying if the codec is in
+ * 0-12 kHz or 0-16 kHz mode.
+ *
+ * Return value : 0 if succeeded.
+ * <0 if failed.
+ */
+int16_t WebRtcIsac_DecodeBandwidth(Bitstr* streamData,
+ enum ISACBandwidth* bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeJitterInfo()
+ * Decode the jitter information.
+ *
+ * Input/output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Input:
+ * - jitterInfo : one bit of info specifying if the channel is
+ * in high/low jitter. Zero indicates low jitter
+ * and one indicates high jitter.
+ *
+ * Return value : 0 if succeeded.
+ * <0 if failed.
+ */
+int16_t WebRtcIsac_EncodeJitterInfo(int32_t jitterIndex, Bitstr* streamData);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeJitterInfo()
+ * Decode the jitter information.
+ *
+ * Input/output:
+ * - streamdata : pointer to a stucture containg the encoded
+ * data and theparameters needed for entropy
+ * coding.
+ *
+ * Output:
+ * - jitterInfo : one bit of info specifying if the channel is
+ * in high/low jitter. Zero indicates low jitter
+ * and one indicates high jitter.
+ *
+ * Return value : 0 if succeeded.
+ * <0 if failed.
+ */
+int16_t WebRtcIsac_DecodeJitterInfo(Bitstr* streamData, int32_t* jitterInfo);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
new file mode 100644
index 0000000000..a4f297c5a1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <stdlib.h>
+#endif
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+
+static void WebRtcIsac_AllPoleFilter(double* InOut,
+ double* Coef,
+ size_t lengthInOut,
+ int orderCoef) {
+ /* the state of filter is assumed to be in InOut[-1] to InOut[-orderCoef] */
+ double scal;
+ double sum;
+ size_t n;
+ int k;
+
+ //if (fabs(Coef[0]-1.0)<0.001) {
+ if ( (Coef[0] > 0.9999) && (Coef[0] < 1.0001) )
+ {
+ for(n = 0; n < lengthInOut; n++)
+ {
+ sum = Coef[1] * InOut[-1];
+ for(k = 2; k <= orderCoef; k++){
+ sum += Coef[k] * InOut[-k];
+ }
+ *InOut++ -= sum;
+ }
+ }
+ else
+ {
+ scal = 1.0 / Coef[0];
+ for(n=0;n<lengthInOut;n++)
+ {
+ *InOut *= scal;
+ for(k=1;k<=orderCoef;k++){
+ *InOut -= scal*Coef[k]*InOut[-k];
+ }
+ InOut++;
+ }
+ }
+}
+
+static void WebRtcIsac_AllZeroFilter(double* In,
+ double* Coef,
+ size_t lengthInOut,
+ int orderCoef,
+ double* Out) {
+ /* the state of filter is assumed to be in In[-1] to In[-orderCoef] */
+
+ size_t n;
+ int k;
+ double tmp;
+
+ for(n = 0; n < lengthInOut; n++)
+ {
+ tmp = In[0] * Coef[0];
+
+ for(k = 1; k <= orderCoef; k++){
+ tmp += Coef[k] * In[-k];
+ }
+
+ *Out++ = tmp;
+ In++;
+ }
+}
+
+static void WebRtcIsac_ZeroPoleFilter(double* In,
+ double* ZeroCoef,
+ double* PoleCoef,
+ size_t lengthInOut,
+ int orderCoef,
+ double* Out) {
+ /* the state of the zero section is assumed to be in In[-1] to In[-orderCoef] */
+ /* the state of the pole section is assumed to be in Out[-1] to Out[-orderCoef] */
+
+ WebRtcIsac_AllZeroFilter(In,ZeroCoef,lengthInOut,orderCoef,Out);
+ WebRtcIsac_AllPoleFilter(Out,PoleCoef,lengthInOut,orderCoef);
+}
+
+
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order) {
+ size_t lag, n;
+ double sum, prod;
+ const double *x_lag;
+
+ for (lag = 0; lag <= order; lag++)
+ {
+ sum = 0.0f;
+ x_lag = &x[lag];
+ prod = x[0] * x_lag[0];
+ for (n = 1; n < N - lag; n++) {
+ sum += prod;
+ prod = x[n] * x_lag[n];
+ }
+ sum += prod;
+ r[lag] = sum;
+ }
+
+}
+
+static void WebRtcIsac_BwExpand(double* out,
+ double* in,
+ double coef,
+ size_t length) {
+ size_t i;
+ double chirp;
+
+ chirp = coef;
+
+ out[0] = in[0];
+ for (i = 1; i < length; i++) {
+ out[i] = chirp * in[i];
+ chirp *= coef;
+ }
+}
+
+void WebRtcIsac_WeightingFilter(const double* in,
+ double* weiout,
+ double* whiout,
+ WeightFiltstr* wfdata) {
+ double tmpbuffer[PITCH_FRAME_LEN + PITCH_WLPCBUFLEN];
+ double corr[PITCH_WLPCORDER+1], rc[PITCH_WLPCORDER+1];
+ double apol[PITCH_WLPCORDER+1], apolr[PITCH_WLPCORDER+1];
+ double rho=0.9, *inp, *dp, *dp2;
+ double whoutbuf[PITCH_WLPCBUFLEN + PITCH_WLPCORDER];
+ double weoutbuf[PITCH_WLPCBUFLEN + PITCH_WLPCORDER];
+ double *weo, *who, opol[PITCH_WLPCORDER+1], ext[PITCH_WLPCWINLEN];
+ int k, n, endpos, start;
+
+ /* Set up buffer and states */
+ memcpy(tmpbuffer, wfdata->buffer, sizeof(double) * PITCH_WLPCBUFLEN);
+ memcpy(tmpbuffer+PITCH_WLPCBUFLEN, in, sizeof(double) * PITCH_FRAME_LEN);
+ memcpy(wfdata->buffer, tmpbuffer+PITCH_FRAME_LEN, sizeof(double) * PITCH_WLPCBUFLEN);
+
+ dp=weoutbuf;
+ dp2=whoutbuf;
+ for (k=0;k<PITCH_WLPCORDER;k++) {
+ *dp++ = wfdata->weostate[k];
+ *dp2++ = wfdata->whostate[k];
+ opol[k]=0.0;
+ }
+ opol[0]=1.0;
+ opol[PITCH_WLPCORDER]=0.0;
+ weo=dp;
+ who=dp2;
+
+ endpos=PITCH_WLPCBUFLEN + PITCH_SUBFRAME_LEN;
+ inp=tmpbuffer + PITCH_WLPCBUFLEN;
+
+ for (n=0; n<PITCH_SUBFRAMES; n++) {
+ /* Windowing */
+ start=endpos-PITCH_WLPCWINLEN;
+ for (k=0; k<PITCH_WLPCWINLEN; k++) {
+ ext[k]=wfdata->window[k]*tmpbuffer[start+k];
+ }
+
+ /* Get LPC polynomial */
+ WebRtcIsac_AutoCorr(corr, ext, PITCH_WLPCWINLEN, PITCH_WLPCORDER);
+ corr[0]=1.01*corr[0]+1.0; /* White noise correction */
+ WebRtcIsac_LevDurb(apol, rc, corr, PITCH_WLPCORDER);
+ WebRtcIsac_BwExpand(apolr, apol, rho, PITCH_WLPCORDER+1);
+
+ /* Filtering */
+ WebRtcIsac_ZeroPoleFilter(inp, apol, apolr, PITCH_SUBFRAME_LEN, PITCH_WLPCORDER, weo);
+ WebRtcIsac_ZeroPoleFilter(inp, apolr, opol, PITCH_SUBFRAME_LEN, PITCH_WLPCORDER, who);
+
+ inp+=PITCH_SUBFRAME_LEN;
+ endpos+=PITCH_SUBFRAME_LEN;
+ weo+=PITCH_SUBFRAME_LEN;
+ who+=PITCH_SUBFRAME_LEN;
+ }
+
+ /* Export filter states */
+ for (k=0;k<PITCH_WLPCORDER;k++) {
+ wfdata->weostate[k]=weoutbuf[PITCH_FRAME_LEN+k];
+ wfdata->whostate[k]=whoutbuf[PITCH_FRAME_LEN+k];
+ }
+
+ /* Export output data */
+ memcpy(weiout, weoutbuf+PITCH_WLPCORDER, sizeof(double) * PITCH_FRAME_LEN);
+ memcpy(whiout, whoutbuf+PITCH_WLPCORDER, sizeof(double) * PITCH_FRAME_LEN);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.h
new file mode 100644
index 0000000000..48a9b7426b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTER_FUNCTIONS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTER_FUNCTIONS_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);
+
+void WebRtcIsac_WeightingFilter(const double* in,
+ double* weiout,
+ double* whiout,
+ WeightFiltstr* wfdata);
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTER_FUNCTIONS_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c
new file mode 100644
index 0000000000..d57b55022d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbanks.c
+ *
+ * This file contains function WebRtcIsac_AllPassFilter2Float,
+ * WebRtcIsac_SplitAndFilter, and WebRtcIsac_FilterAndCombine
+ * which implement filterbanks that produce decimated lowpass and
+ * highpass versions of a signal, and performs reconstruction.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+
+/* Combining */
+
+/* HPstcoeff_out_1 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+static const float kHpStCoefOut1Float[4] =
+{-1.99701049409000f, 0.99714204490000f, 0.01701049409000f, -0.01704204490000f};
+
+/* HPstcoeff_out_2 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+static const float kHpStCoefOut2Float[4] =
+{-1.98645294509837f, 0.98672435560000f, 0.00645294509837f, -0.00662435560000f};
+
+
+/* Function WebRtcIsac_FilterAndCombine */
+/* This is a decoder function that takes the decimated
+ length FRAMESAMPLES_HALF input low-pass and
+ high-pass signals and creates a reconstructed fullband
+ output signal of length FRAMESAMPLES. WebRtcIsac_FilterAndCombine
+ is the sibling function of WebRtcIsac_SplitAndFilter */
+/* INPUTS:
+ inLP: a length FRAMESAMPLES_HALF array of input low-pass
+ samples.
+ inHP: a length FRAMESAMPLES_HALF array of input high-pass
+ samples.
+ postfiltdata: input data structure containing the filterbank
+ states from the previous decoding iteration.
+ OUTPUTS:
+ Out: a length FRAMESAMPLES array of output reconstructed
+ samples (fullband) based on the input low-pass and
+ high-pass signals.
+ postfiltdata: the input data structure containing the filterbank
+ states is updated for the next decoding iteration */
+void WebRtcIsac_FilterAndCombineFloat(float *InLP,
+ float *InHP,
+ float *Out,
+ PostFiltBankstr *postfiltdata)
+{
+ int k;
+ float tempin_ch1[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+ float tempin_ch2[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+ float ftmp, ftmp2;
+
+ /* Form the polyphase signals*/
+ for (k=0;k<FRAMESAMPLES_HALF;k++) {
+ tempin_ch1[k]=InLP[k]+InHP[k]; /* Construct a new upper channel signal*/
+ tempin_ch2[k]=InLP[k]-InHP[k]; /* Construct a new lower channel signal*/
+ }
+
+
+ /* all-pass filter the new upper channel signal. HOWEVER, use the all-pass filter factors
+ that were used as a lower channel at the encoding side. So at the decoder, the
+ corresponding all-pass filter factors for each channel are swapped.*/
+ WebRtcIsac_AllPassFilter2Float(tempin_ch1, WebRtcIsac_kLowerApFactorsFloat,
+ FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_UPPER_float);
+
+ /* Now, all-pass filter the new lower channel signal. But since all-pass filter factors
+ at the decoder are swapped from the ones at the encoder, the 'upper' channel
+ all-pass filter factors (WebRtcIsac_kUpperApFactorsFloat) are used to filter this new
+ lower channel signal */
+ WebRtcIsac_AllPassFilter2Float(tempin_ch2, WebRtcIsac_kUpperApFactorsFloat,
+ FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_LOWER_float);
+
+
+ /* Merge outputs to form the full length output signal.*/
+ for (k=0;k<FRAMESAMPLES_HALF;k++) {
+ Out[2*k]=tempin_ch2[k];
+ Out[2*k+1]=tempin_ch1[k];
+ }
+
+
+ /* High pass filter */
+
+ for (k=0;k<FRAMESAMPLES;k++) {
+ ftmp2 = Out[k] + kHpStCoefOut1Float[2] * postfiltdata->HPstates1_float[0] +
+ kHpStCoefOut1Float[3] * postfiltdata->HPstates1_float[1];
+ ftmp = Out[k] - kHpStCoefOut1Float[0] * postfiltdata->HPstates1_float[0] -
+ kHpStCoefOut1Float[1] * postfiltdata->HPstates1_float[1];
+ postfiltdata->HPstates1_float[1] = postfiltdata->HPstates1_float[0];
+ postfiltdata->HPstates1_float[0] = ftmp;
+ Out[k] = ftmp2;
+ }
+
+ for (k=0;k<FRAMESAMPLES;k++) {
+ ftmp2 = Out[k] + kHpStCoefOut2Float[2] * postfiltdata->HPstates2_float[0] +
+ kHpStCoefOut2Float[3] * postfiltdata->HPstates2_float[1];
+ ftmp = Out[k] - kHpStCoefOut2Float[0] * postfiltdata->HPstates2_float[0] -
+ kHpStCoefOut2Float[1] * postfiltdata->HPstates2_float[1];
+ postfiltdata->HPstates2_float[1] = postfiltdata->HPstates2_float[0];
+ postfiltdata->HPstates2_float[0] = ftmp;
+ Out[k] = ftmp2;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/intialize.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/intialize.c
new file mode 100644
index 0000000000..5c951f6e9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/intialize.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* encode.c - Encoding function for the iSAC coder */
+
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+
+void WebRtcIsac_InitMasking(MaskFiltstr *maskdata) {
+
+ int k;
+
+ for (k = 0; k < WINLEN; k++) {
+ maskdata->DataBufferLo[k] = 0.0;
+ maskdata->DataBufferHi[k] = 0.0;
+ }
+ for (k = 0; k < ORDERLO+1; k++) {
+ maskdata->CorrBufLo[k] = 0.0;
+ maskdata->PreStateLoF[k] = 0.0;
+ maskdata->PreStateLoG[k] = 0.0;
+ maskdata->PostStateLoF[k] = 0.0;
+ maskdata->PostStateLoG[k] = 0.0;
+ }
+ for (k = 0; k < ORDERHI+1; k++) {
+ maskdata->CorrBufHi[k] = 0.0;
+ maskdata->PreStateHiF[k] = 0.0;
+ maskdata->PreStateHiG[k] = 0.0;
+ maskdata->PostStateHiF[k] = 0.0;
+ maskdata->PostStateHiG[k] = 0.0;
+ }
+
+ maskdata->OldEnergy = 10.0;
+ return;
+}
+
+void WebRtcIsac_InitPostFilterbank(PostFiltBankstr *postfiltdata)
+{
+ int k;
+
+ for (k = 0; k < 2*POSTQORDER; k++) {
+ postfiltdata->STATE_0_LOWER[k] = 0;
+ postfiltdata->STATE_0_UPPER[k] = 0;
+
+ postfiltdata->STATE_0_LOWER_float[k] = 0;
+ postfiltdata->STATE_0_UPPER_float[k] = 0;
+ }
+
+ /* High pass filter states */
+ postfiltdata->HPstates1[0] = 0.0;
+ postfiltdata->HPstates1[1] = 0.0;
+
+ postfiltdata->HPstates2[0] = 0.0;
+ postfiltdata->HPstates2[1] = 0.0;
+
+ postfiltdata->HPstates1_float[0] = 0.0f;
+ postfiltdata->HPstates1_float[1] = 0.0f;
+
+ postfiltdata->HPstates2_float[0] = 0.0f;
+ postfiltdata->HPstates2_float[1] = 0.0f;
+
+ return;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac.c
new file mode 100644
index 0000000000..456f447d9a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -0,0 +1,2307 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * isac.c
+ *
+ * This C file contains the functions for the ISAC API
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/crc.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+#include "rtc_base/system/arch.h"
+
+#define BIT_MASK_DEC_INIT 0x0001
+#define BIT_MASK_ENC_INIT 0x0002
+
+#define LEN_CHECK_SUM_WORD8 4
+#define MAX_NUM_LAYERS 10
+
+
+/****************************************************************************
+ * UpdatePayloadSizeLimit(...)
+ *
+ * Call this function to update the limit on the payload size. The limit on
+ * payload size might change i) if a user ''directly changes the limit by
+ * calling xxx_setMaxPayloadSize() or xxx_setMaxRate(), or ii) indirectly
+ * when bandwidth is changing. The latter might be the result of bandwidth
+ * adaptation, or direct change of the bottleneck in instantaneous mode.
+ *
+ * This function takes the current overall limit on payload, and translates it
+ * to the limits on lower and upper-band. If the codec is in wideband mode,
+ * then the overall limit and the limit on the lower-band is the same.
+ * Otherwise, a fraction of the limit should be allocated to lower-band
+ * leaving some room for the upper-band bit-stream. That is why an update
+ * of limit is required every time that the bandwidth is changing.
+ *
+ */
+static void UpdatePayloadSizeLimit(ISACMainStruct* instISAC) {
+ int16_t lim30MsPayloadBytes = WEBRTC_SPL_MIN(
+ (instISAC->maxPayloadSizeBytes),
+ (instISAC->maxRateBytesPer30Ms));
+ int16_t lim60MsPayloadBytes = WEBRTC_SPL_MIN(
+ (instISAC->maxPayloadSizeBytes),
+ (instISAC->maxRateBytesPer30Ms << 1));
+
+ /* The only time that iSAC will have 60 ms
+ * frame-size is when operating in wideband, so
+ * there is no upper-band bit-stream. */
+
+ if (instISAC->bandwidthKHz == isac8kHz) {
+ /* At 8 kHz there is no upper-band bit-stream,
+ * therefore, the lower-band limit is the overall limit. */
+ instISAC->instLB.ISACencLB_obj.payloadLimitBytes60 =
+ lim60MsPayloadBytes;
+ instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+ lim30MsPayloadBytes;
+ } else {
+ /* When in super-wideband, we only have 30 ms frames.
+ * Do a rate allocation for the given limit. */
+ if (lim30MsPayloadBytes > 250) {
+ /* 4/5 to lower-band the rest for upper-band. */
+ instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+ (lim30MsPayloadBytes << 2) / 5;
+ } else if (lim30MsPayloadBytes > 200) {
+ /* For the interval of 200 to 250 the share of
+ * upper-band linearly grows from 20 to 50. */
+ instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+ (lim30MsPayloadBytes << 1) / 5 + 100;
+ } else {
+ /* Allocate only 20 for upper-band. */
+ instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+ lim30MsPayloadBytes - 20;
+ }
+ instISAC->instUB.ISACencUB_obj.maxPayloadSizeBytes =
+ lim30MsPayloadBytes;
+ }
+}
+
+
+/****************************************************************************
+ * UpdateBottleneck(...)
+ *
+ * This function updates the bottleneck only if the codec is operating in
+ * channel-adaptive mode. Furthermore, as the update of bottleneck might
+ * result in an update of bandwidth, therefore, the bottlenech should be
+ * updated just right before the first 10ms of a frame is pushed into encoder.
+ *
+ */
+static void UpdateBottleneck(ISACMainStruct* instISAC) {
+ /* Read the bottleneck from bandwidth estimator for the
+ * first 10 ms audio. This way, if there is a change
+ * in bandwidth, upper and lower-band will be in sync. */
+ if ((instISAC->codingMode == 0) &&
+ (instISAC->instLB.ISACencLB_obj.buffer_index == 0) &&
+ (instISAC->instLB.ISACencLB_obj.frame_nb == 0)) {
+ int32_t bottleneck =
+ WebRtcIsac_GetUplinkBandwidth(&instISAC->bwestimator_obj);
+
+ /* Adding hysteresis when increasing signal bandwidth. */
+ if ((instISAC->bandwidthKHz == isac8kHz)
+ && (bottleneck > 37000)
+ && (bottleneck < 41000)) {
+ bottleneck = 37000;
+ }
+
+ /* Switching from 12 kHz to 16 kHz is not allowed at this revision.
+ * If we let this happen, we have to take care of buffer_index and
+ * the last LPC vector. */
+ if ((instISAC->bandwidthKHz != isac16kHz) &&
+ (bottleneck > 46000)) {
+ bottleneck = 46000;
+ }
+
+ /* We might need a rate allocation. */
+ if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+ /* Wideband is the only choice we have here. */
+ instISAC->instLB.ISACencLB_obj.bottleneck =
+ (bottleneck > 32000) ? 32000 : bottleneck;
+ instISAC->bandwidthKHz = isac8kHz;
+ } else {
+ /* Do the rate-allocation and get the new bandwidth. */
+ enum ISACBandwidth bandwidth;
+ WebRtcIsac_RateAllocation(bottleneck,
+ &(instISAC->instLB.ISACencLB_obj.bottleneck),
+ &(instISAC->instUB.ISACencUB_obj.bottleneck),
+ &bandwidth);
+ if (bandwidth != isac8kHz) {
+ instISAC->instLB.ISACencLB_obj.new_framelength = 480;
+ }
+ if (bandwidth != instISAC->bandwidthKHz) {
+ /* Bandwidth is changing. */
+ instISAC->bandwidthKHz = bandwidth;
+ UpdatePayloadSizeLimit(instISAC);
+ if (bandwidth == isac12kHz) {
+ instISAC->instLB.ISACencLB_obj.buffer_index = 0;
+ }
+ /* Currently we don't let the bandwidth to switch to 16 kHz
+ * if in adaptive mode. If we let this happen, we have to take
+ * care of buffer_index and the last LPC vector. */
+ }
+ }
+ }
+}
+
+
+/****************************************************************************
+ * GetSendBandwidthInfo(...)
+ *
+ * This is called to get the bandwidth info. This info is the bandwidth and
+ * the jitter of 'there-to-here' channel, estimated 'here.' These info
+ * is signaled in an in-band fashion to the other side.
+ *
+ * The call to the bandwidth estimator triggers a recursive averaging which
+ * has to be synchronized between encoder & decoder, therefore, the call to
+ * BWE should be once per packet. As the BWE info is inserted into bit-stream
+ * We need a valid info right before the encodeLB function is going to
+ * generate a bit-stream. That is when lower-band buffer has already 20ms
+ * of audio, and the 3rd block of 10ms is going to be injected into encoder.
+ *
+ * Inputs:
+ * - instISAC : iSAC instance.
+ *
+ * Outputs:
+ * - bandwidthIndex : an index which has to be encoded in
+ * lower-band bit-stream, indicating the
+ * bandwidth of there-to-here channel.
+ * - jitterInfo : this indicates if the jitter is high
+ * or low and it is encoded in upper-band
+ * bit-stream.
+ *
+ */
+static void GetSendBandwidthInfo(ISACMainStruct* instISAC,
+ int16_t* bandwidthIndex,
+ int16_t* jitterInfo) {
+ if ((instISAC->instLB.ISACencLB_obj.buffer_index ==
+ (FRAMESAMPLES_10ms << 1)) &&
+ (instISAC->instLB.ISACencLB_obj.frame_nb == 0)) {
+ /* Bandwidth estimation and coding. */
+ WebRtcIsac_GetDownlinkBwJitIndexImpl(&(instISAC->bwestimator_obj),
+ bandwidthIndex, jitterInfo,
+ instISAC->decoderSamplingRateKHz);
+ }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Create(...)
+ *
+ * This function creates an ISAC instance, which will contain the state
+ * information for one coding/decoding channel.
+ *
+ * Input:
+ * - ISAC_main_inst : address of the pointer to the coder instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int16_t WebRtcIsac_Create(ISACStruct** ISAC_main_inst) {
+ ISACMainStruct* instISAC;
+
+ if (ISAC_main_inst != NULL) {
+ instISAC = (ISACMainStruct*)malloc(sizeof(ISACMainStruct));
+ *ISAC_main_inst = (ISACStruct*)instISAC;
+ if (*ISAC_main_inst != NULL) {
+ instISAC->errorCode = 0;
+ instISAC->initFlag = 0;
+ /* Default is wideband. */
+ instISAC->bandwidthKHz = isac8kHz;
+ instISAC->encoderSamplingRateKHz = kIsacWideband;
+ instISAC->decoderSamplingRateKHz = kIsacWideband;
+ instISAC->in_sample_rate_hz = 16000;
+
+ WebRtcIsac_InitTransform(&instISAC->transform_tables);
+ return 0;
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Free(...)
+ *
+ * This function frees the ISAC instance created at the beginning.
+ *
+ * Input:
+ * - ISAC_main_inst : a ISAC instance.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int16_t WebRtcIsac_Free(ISACStruct* ISAC_main_inst) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ free(instISAC);
+ return 0;
+}
+
+
+/****************************************************************************
+ * EncoderInitLb(...) - internal function for initialization of
+ * Lower Band
+ * EncoderInitUb(...) - internal function for initialization of
+ * Upper Band
+ * WebRtcIsac_EncoderInit(...) - API function
+ *
+ * This function initializes a ISAC instance prior to the encoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - CodingMode : 0 -> Bit rate and frame length are automatically
+ * adjusted to available bandwidth on
+ * transmission channel, applicable just to
+ * wideband mode.
+ * 1 -> User sets a frame length and a target bit
+ * rate which is taken as the maximum
+ * short-term average bit rate.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+static int16_t EncoderInitLb(ISACLBStruct* instLB,
+ int16_t codingMode,
+ enum IsacSamplingRate sampRate) {
+ int16_t statusInit = 0;
+ int k;
+
+ /* Init stream vector to zero */
+ for (k = 0; k < STREAM_SIZE_MAX_60; k++) {
+ instLB->ISACencLB_obj.bitstr_obj.stream[k] = 0;
+ }
+
+ if ((codingMode == 1) || (sampRate == kIsacSuperWideband)) {
+ /* 30 ms frame-size if either in super-wideband or
+ * instantaneous mode (I-mode). */
+ instLB->ISACencLB_obj.new_framelength = 480;
+ } else {
+ instLB->ISACencLB_obj.new_framelength = INITIAL_FRAMESAMPLES;
+ }
+
+ WebRtcIsac_InitMasking(&instLB->ISACencLB_obj.maskfiltstr_obj);
+ WebRtcIsac_InitPreFilterbank(&instLB->ISACencLB_obj.prefiltbankstr_obj);
+ WebRtcIsac_InitPitchFilter(&instLB->ISACencLB_obj.pitchfiltstr_obj);
+ WebRtcIsac_InitPitchAnalysis(
+ &instLB->ISACencLB_obj.pitchanalysisstr_obj);
+
+ instLB->ISACencLB_obj.buffer_index = 0;
+ instLB->ISACencLB_obj.frame_nb = 0;
+ /* Default for I-mode. */
+ instLB->ISACencLB_obj.bottleneck = 32000;
+ instLB->ISACencLB_obj.current_framesamples = 0;
+ instLB->ISACencLB_obj.s2nr = 0;
+ instLB->ISACencLB_obj.payloadLimitBytes30 = STREAM_SIZE_MAX_30;
+ instLB->ISACencLB_obj.payloadLimitBytes60 = STREAM_SIZE_MAX_60;
+ instLB->ISACencLB_obj.maxPayloadBytes = STREAM_SIZE_MAX_60;
+ instLB->ISACencLB_obj.maxRateInBytes = STREAM_SIZE_MAX_30;
+ instLB->ISACencLB_obj.enforceFrameSize = 0;
+ /* Invalid value prevents getRedPayload to
+ run before encoder is called. */
+ instLB->ISACencLB_obj.lastBWIdx = -1;
+ return statusInit;
+}
+
+static int16_t EncoderInitUb(ISACUBStruct* instUB,
+ int16_t bandwidth) {
+ int16_t statusInit = 0;
+ int k;
+
+ /* Init stream vector to zero. */
+ for (k = 0; k < STREAM_SIZE_MAX_60; k++) {
+ instUB->ISACencUB_obj.bitstr_obj.stream[k] = 0;
+ }
+
+ WebRtcIsac_InitMasking(&instUB->ISACencUB_obj.maskfiltstr_obj);
+ WebRtcIsac_InitPreFilterbank(&instUB->ISACencUB_obj.prefiltbankstr_obj);
+
+ if (bandwidth == isac16kHz) {
+ instUB->ISACencUB_obj.buffer_index = LB_TOTAL_DELAY_SAMPLES;
+ } else {
+ instUB->ISACencUB_obj.buffer_index = 0;
+ }
+ /* Default for I-mode. */
+ instUB->ISACencUB_obj.bottleneck = 32000;
+ /* These store the limits for the wideband + super-wideband bit-stream. */
+ instUB->ISACencUB_obj.maxPayloadSizeBytes = STREAM_SIZE_MAX_30 << 1;
+ /* This has to be updated after each lower-band encoding to guarantee
+ * a correct payload-limitation. */
+ instUB->ISACencUB_obj.numBytesUsed = 0;
+ memset(instUB->ISACencUB_obj.data_buffer_float, 0,
+ (MAX_FRAMESAMPLES + LB_TOTAL_DELAY_SAMPLES) * sizeof(float));
+
+ memcpy(&(instUB->ISACencUB_obj.lastLPCVec),
+ WebRtcIsac_kMeanLarUb16, sizeof(double) * UB_LPC_ORDER);
+
+ return statusInit;
+}
+
+
+int16_t WebRtcIsac_EncoderInit(ISACStruct* ISAC_main_inst,
+ int16_t codingMode) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ int16_t status;
+
+ if ((codingMode != 0) && (codingMode != 1)) {
+ instISAC->errorCode = ISAC_DISALLOWED_CODING_MODE;
+ return -1;
+ }
+ /* Default bottleneck. */
+ instISAC->bottleneck = MAX_ISAC_BW;
+
+ if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+ instISAC->bandwidthKHz = isac8kHz;
+ instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX_60;
+ instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX_30;
+ } else {
+ instISAC->bandwidthKHz = isac16kHz;
+ instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX;
+ instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX;
+ }
+
+ /* Channel-adaptive = 0; Instantaneous (Channel-independent) = 1. */
+ instISAC->codingMode = codingMode;
+
+ WebRtcIsac_InitBandwidthEstimator(&instISAC->bwestimator_obj,
+ instISAC->encoderSamplingRateKHz,
+ instISAC->decoderSamplingRateKHz);
+
+ WebRtcIsac_InitRateModel(&instISAC->rate_data_obj);
+ /* Default for I-mode. */
+ instISAC->MaxDelay = 10.0;
+
+ status = EncoderInitLb(&instISAC->instLB, codingMode,
+ instISAC->encoderSamplingRateKHz);
+ if (status < 0) {
+ instISAC->errorCode = -status;
+ return -1;
+ }
+
+ if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+ /* Initialize encoder filter-bank. */
+ memset(instISAC->analysisFBState1, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+ memset(instISAC->analysisFBState2, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+
+ status = EncoderInitUb(&(instISAC->instUB),
+ instISAC->bandwidthKHz);
+ if (status < 0) {
+ instISAC->errorCode = -status;
+ return -1;
+ }
+ }
+ /* Initialization is successful, set the flag. */
+ instISAC->initFlag |= BIT_MASK_ENC_INIT;
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Encode(...)
+ *
+ * This function encodes 10ms frame(s) and inserts it into a package.
+ * Input speech length has to be 160 samples (10ms). The encoder buffers those
+ * 10ms frames until it reaches the chosen Framesize (480 or 960 samples
+ * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - speechIn : input speech vector.
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ * Return value:
+ * : >0 - Length (in bytes) of coded data
+ * : 0 - The buffer didn't reach the chosen
+ * frameSize so it keeps buffering speech
+ * samples.
+ * : -1 - Error
+ */
+int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
+ const int16_t* speechIn,
+ uint8_t* encoded) {
+ float inFrame[FRAMESAMPLES_10ms];
+ int16_t speechInLB[FRAMESAMPLES_10ms];
+ int16_t speechInUB[FRAMESAMPLES_10ms];
+ int streamLenLB = 0;
+ int streamLenUB = 0;
+ int streamLen = 0;
+ size_t k = 0;
+ uint8_t garbageLen = 0;
+ int32_t bottleneck = 0;
+ int16_t bottleneckIdx = 0;
+ int16_t jitterInfo = 0;
+
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ ISACLBStruct* instLB = &(instISAC->instLB);
+ ISACUBStruct* instUB = &(instISAC->instUB);
+
+ /* Check if encoder initiated. */
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+
+ if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+ WebRtcSpl_AnalysisQMF(speechIn, SWBFRAMESAMPLES_10ms, speechInLB,
+ speechInUB, instISAC->analysisFBState1,
+ instISAC->analysisFBState2);
+
+ /* Convert from fixed to floating point. */
+ for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+ inFrame[k] = (float)speechInLB[k];
+ }
+ } else {
+ for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+ inFrame[k] = (float) speechIn[k];
+ }
+ }
+
+ /* Add some noise to avoid denormal numbers. */
+ inFrame[0] += (float)1.23455334e-3;
+ inFrame[1] -= (float)2.04324239e-3;
+ inFrame[2] += (float)1.90854954e-3;
+ inFrame[9] += (float)1.84854878e-3;
+
+ /* This function will update the bottleneck if required. */
+ UpdateBottleneck(instISAC);
+
+ /* Get the bandwith information which has to be sent to the other side. */
+ GetSendBandwidthInfo(instISAC, &bottleneckIdx, &jitterInfo);
+
+ /* Encode lower-band. */
+ streamLenLB = WebRtcIsac_EncodeLb(&instISAC->transform_tables,
+ inFrame, &instLB->ISACencLB_obj,
+ instISAC->codingMode, bottleneckIdx);
+ if (streamLenLB < 0) {
+ return -1;
+ }
+
+ if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+ instUB = &(instISAC->instUB);
+
+ /* Convert to float. */
+ for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+ inFrame[k] = (float) speechInUB[k];
+ }
+
+ /* Add some noise to avoid denormal numbers. */
+ inFrame[0] += (float)1.23455334e-3;
+ inFrame[1] -= (float)2.04324239e-3;
+ inFrame[2] += (float)1.90854954e-3;
+ inFrame[9] += (float)1.84854878e-3;
+
+ /* Tell to upper-band the number of bytes used so far.
+ * This is for payload limitation. */
+ instUB->ISACencUB_obj.numBytesUsed =
+ (int16_t)(streamLenLB + 1 + LEN_CHECK_SUM_WORD8);
+ /* Encode upper-band. */
+ switch (instISAC->bandwidthKHz) {
+ case isac12kHz: {
+ streamLenUB = WebRtcIsac_EncodeUb12(&instISAC->transform_tables,
+ inFrame, &instUB->ISACencUB_obj,
+ jitterInfo);
+ break;
+ }
+ case isac16kHz: {
+ streamLenUB = WebRtcIsac_EncodeUb16(&instISAC->transform_tables,
+ inFrame, &instUB->ISACencUB_obj,
+ jitterInfo);
+ break;
+ }
+ case isac8kHz: {
+ streamLenUB = 0;
+ break;
+ }
+ }
+
+ if ((streamLenUB < 0) && (streamLenUB != -ISAC_PAYLOAD_LARGER_THAN_LIMIT)) {
+ /* An error has happened but this is not the error due to a
+ * bit-stream larger than the limit. */
+ return -1;
+ }
+
+ if (streamLenLB == 0) {
+ return 0;
+ }
+
+ /* One byte is allocated for the length. According to older decoders
+ so the length bit-stream plus one byte for size and
+ LEN_CHECK_SUM_WORD8 for the checksum should be less than or equal
+ to 255. */
+ if ((streamLenUB > (255 - (LEN_CHECK_SUM_WORD8 + 1))) ||
+ (streamLenUB == -ISAC_PAYLOAD_LARGER_THAN_LIMIT)) {
+ /* We have got a too long bit-stream we skip the upper-band
+ * bit-stream for this frame. */
+ streamLenUB = 0;
+ }
+
+ memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
+ streamLen = streamLenLB;
+ if (streamLenUB > 0) {
+ encoded[streamLenLB] = (uint8_t)(streamLenUB + 1 + LEN_CHECK_SUM_WORD8);
+ memcpy(&encoded[streamLenLB + 1],
+ instUB->ISACencUB_obj.bitstr_obj.stream,
+ streamLenUB);
+ streamLen += encoded[streamLenLB];
+ } else {
+ encoded[streamLenLB] = 0;
+ }
+ } else {
+ if (streamLenLB == 0) {
+ return 0;
+ }
+ memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
+ streamLenUB = 0;
+ streamLen = streamLenLB;
+ }
+
+ /* Add Garbage if required. */
+ bottleneck = WebRtcIsac_GetUplinkBandwidth(&instISAC->bwestimator_obj);
+ if (instISAC->codingMode == 0) {
+ int minBytes;
+ int limit;
+ uint8_t* ptrGarbage;
+
+ instISAC->MaxDelay = (double)WebRtcIsac_GetUplinkMaxDelay(
+ &instISAC->bwestimator_obj);
+
+ /* Update rate model and get minimum number of bytes in this packet. */
+ minBytes = WebRtcIsac_GetMinBytes(
+ &(instISAC->rate_data_obj), streamLen,
+ instISAC->instLB.ISACencLB_obj.current_framesamples, bottleneck,
+ instISAC->MaxDelay, instISAC->bandwidthKHz);
+
+ /* Make sure MinBytes does not exceed packet size limit. */
+ if (instISAC->bandwidthKHz == isac8kHz) {
+ if (instLB->ISACencLB_obj.current_framesamples == FRAMESAMPLES) {
+ limit = instLB->ISACencLB_obj.payloadLimitBytes30;
+ } else {
+ limit = instLB->ISACencLB_obj.payloadLimitBytes60;
+ }
+ } else {
+ limit = instUB->ISACencUB_obj.maxPayloadSizeBytes;
+ }
+ minBytes = (minBytes > limit) ? limit : minBytes;
+
+ /* Make sure we don't allow more than 255 bytes of garbage data.
+ * We store the length of the garbage data in 8 bits in the bitstream,
+ * 255 is the max garbage length we can signal using 8 bits. */
+ if ((instISAC->bandwidthKHz == isac8kHz) ||
+ (streamLenUB == 0)) {
+ ptrGarbage = &encoded[streamLenLB];
+ limit = streamLen + 255;
+ } else {
+ ptrGarbage = &encoded[streamLenLB + 1 + streamLenUB];
+ limit = streamLen + (255 - encoded[streamLenLB]);
+ }
+ minBytes = (minBytes > limit) ? limit : minBytes;
+
+ garbageLen = (minBytes > streamLen) ? (uint8_t)(minBytes - streamLen) : 0;
+
+ /* Save data for creation of multiple bit-streams. */
+ /* If bit-stream too short then add garbage at the end. */
+ if (garbageLen > 0) {
+ /* Overwrite the garbage area to avoid leaking possibly sensitive data
+ over the network. This also makes the output deterministic. */
+ memset(ptrGarbage, 0, garbageLen);
+
+ /* For a correct length of the upper-band bit-stream together
+ * with the garbage. Garbage is embeded in upper-band bit-stream.
+ * That is the only way to preserve backward compatibility. */
+ if ((instISAC->bandwidthKHz == isac8kHz) ||
+ (streamLenUB == 0)) {
+ encoded[streamLenLB] = garbageLen;
+ } else {
+ encoded[streamLenLB] += garbageLen;
+ /* Write the length of the garbage at the end of the upper-band
+ * bit-stream, if exists. This helps for sanity check. */
+ encoded[streamLenLB + 1 + streamLenUB] = garbageLen;
+
+ }
+ streamLen += garbageLen;
+ }
+ } else {
+ /* update rate model */
+ WebRtcIsac_UpdateRateModel(
+ &instISAC->rate_data_obj, streamLen,
+ instISAC->instLB.ISACencLB_obj.current_framesamples, bottleneck);
+ garbageLen = 0;
+ }
+
+ /* Generate CRC if required. */
+ if ((instISAC->bandwidthKHz != isac8kHz) && (streamLenUB > 0)) {
+ uint32_t crc;
+
+ WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
+ streamLenUB + garbageLen, &crc);
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+ encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] =
+ (uint8_t)(crc >> (24 - k * 8));
+ }
+#else
+ memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc, LEN_CHECK_SUM_WORD8);
+#endif
+ }
+ return streamLen;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_GetNewBitStream(...)
+ *
+ * This function returns encoded data, with the received bwe-index in the
+ * stream. If the rate is set to a value less than bottleneck of codec
+ * the new bistream will be re-encoded with the given target rate.
+ * It should always return a complete packet, i.e. only called once
+ * even for 60 msec frames.
+ *
+ * NOTE 1! This function does not write in the ISACStruct, it is not allowed.
+ * NOTE 2! Rates larger than the bottleneck of the codec will be limited
+ * to the current bottleneck.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - bweIndex : Index of bandwidth estimate to put in new
+ * bitstream
+ * - rate : target rate of the transcoder is bits/sec.
+ * Valid values are the accepted rate in iSAC,
+ * i.e. 10000 to 56000.
+ *
+ * Output:
+ * - encoded : The encoded data vector
+ *
+ * Return value : >0 - Length (in bytes) of coded data
+ * -1 - Error or called in SWB mode
+ * NOTE! No error code is written to
+ * the struct since it is only allowed to read
+ * the struct.
+ */
+int16_t WebRtcIsac_GetNewBitStream(ISACStruct* ISAC_main_inst,
+ int16_t bweIndex,
+ int16_t jitterInfo,
+ int32_t rate,
+ uint8_t* encoded,
+ int16_t isRCU) {
+ Bitstr iSACBitStreamInst; /* Local struct for bitstream handling */
+ int16_t streamLenLB;
+ int16_t streamLenUB;
+ int16_t totalStreamLen;
+ double gain2;
+ double gain1;
+ float scale;
+ enum ISACBandwidth bandwidthKHz;
+ double rateLB;
+ double rateUB;
+ int32_t currentBN;
+ uint32_t crc;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ int16_t k;
+#endif
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ return -1;
+ }
+
+ /* Get the bottleneck of this iSAC and limit the
+ * given rate to the current bottleneck. */
+ WebRtcIsac_GetUplinkBw(ISAC_main_inst, &currentBN);
+ if (rate > currentBN) {
+ rate = currentBN;
+ }
+
+ if (WebRtcIsac_RateAllocation(rate, &rateLB, &rateUB, &bandwidthKHz) < 0) {
+ return -1;
+ }
+
+ /* Cannot transcode from 16 kHz to 12 kHz. */
+ if ((bandwidthKHz == isac12kHz) &&
+ (instISAC->bandwidthKHz == isac16kHz)) {
+ return -1;
+ }
+
+ /* A gain [dB] for the given rate. */
+ gain1 = WebRtcIsac_GetSnr(
+ rateLB, instISAC->instLB.ISACencLB_obj.current_framesamples);
+ /* The gain [dB] of this iSAC. */
+ gain2 = WebRtcIsac_GetSnr(
+ instISAC->instLB.ISACencLB_obj.bottleneck,
+ instISAC->instLB.ISACencLB_obj.current_framesamples);
+
+ /* Scale is the ratio of two gains in normal domain. */
+ scale = (float)pow(10, (gain1 - gain2) / 20.0);
+ /* Change the scale if this is a RCU bit-stream. */
+ scale = (isRCU) ? (scale * RCU_TRANSCODING_SCALE) : scale;
+
+ streamLenLB = WebRtcIsac_EncodeStoredDataLb(
+ &instISAC->instLB.ISACencLB_obj.SaveEnc_obj,
+ &iSACBitStreamInst, bweIndex, scale);
+
+ if (streamLenLB < 0) {
+ return -1;
+ }
+
+ /* Convert from bytes to int16_t. */
+ memcpy(encoded, iSACBitStreamInst.stream, streamLenLB);
+
+ if (bandwidthKHz == isac8kHz) {
+ return streamLenLB;
+ }
+
+ totalStreamLen = streamLenLB;
+ /* super-wideband is always at 30ms.
+ * These gains are in dB.
+ * Gain for the given rate. */
+ gain1 = WebRtcIsac_GetSnr(rateUB, FRAMESAMPLES);
+ /* Gain of this iSAC */
+ gain2 = WebRtcIsac_GetSnr(instISAC->instUB.ISACencUB_obj.bottleneck,
+ FRAMESAMPLES);
+
+ /* Scale is the ratio of two gains in normal domain. */
+ scale = (float)pow(10, (gain1 - gain2) / 20.0);
+
+ /* Change the scale if this is a RCU bit-stream. */
+ scale = (isRCU)? (scale * RCU_TRANSCODING_SCALE_UB) : scale;
+
+ streamLenUB = WebRtcIsac_EncodeStoredDataUb(
+ &(instISAC->instUB.ISACencUB_obj.SaveEnc_obj),
+ &iSACBitStreamInst, jitterInfo, scale,
+ instISAC->bandwidthKHz);
+
+ if (streamLenUB < 0) {
+ return -1;
+ }
+
+ if (streamLenUB + 1 + LEN_CHECK_SUM_WORD8 > 255) {
+ return streamLenLB;
+ }
+
+ totalStreamLen = streamLenLB + streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+ encoded[streamLenLB] = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+
+ memcpy(&encoded[streamLenLB + 1], iSACBitStreamInst.stream,
+ streamLenUB);
+
+ WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
+ streamLenUB, &crc);
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+ encoded[totalStreamLen - LEN_CHECK_SUM_WORD8 + k] =
+ (uint8_t)((crc >> (24 - k * 8)) & 0xFF);
+ }
+#else
+ memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc,
+ LEN_CHECK_SUM_WORD8);
+#endif
+ return totalStreamLen;
+}
+
+
+/****************************************************************************
+ * DecoderInitLb(...) - internal function for initialization of
+ * Lower Band
+ * DecoderInitUb(...) - internal function for initialization of
+ * Upper Band
+ * WebRtcIsac_DecoderInit(...) - API function
+ *
+ * This function initializes a ISAC instance prior to the decoder calls.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ */
+static void DecoderInitLb(ISACLBStruct* instISAC) {
+ int i;
+ /* Initialize stream vector to zero. */
+ for (i = 0; i < STREAM_SIZE_MAX_60; i++) {
+ instISAC->ISACdecLB_obj.bitstr_obj.stream[i] = 0;
+ }
+
+ WebRtcIsac_InitMasking(&instISAC->ISACdecLB_obj.maskfiltstr_obj);
+ WebRtcIsac_InitPostFilterbank(
+ &instISAC->ISACdecLB_obj.postfiltbankstr_obj);
+ WebRtcIsac_InitPitchFilter(&instISAC->ISACdecLB_obj.pitchfiltstr_obj);
+}
+
+static void DecoderInitUb(ISACUBStruct* instISAC) {
+ int i;
+ /* Init stream vector to zero */
+ for (i = 0; i < STREAM_SIZE_MAX_60; i++) {
+ instISAC->ISACdecUB_obj.bitstr_obj.stream[i] = 0;
+ }
+
+ WebRtcIsac_InitMasking(&instISAC->ISACdecUB_obj.maskfiltstr_obj);
+ WebRtcIsac_InitPostFilterbank(
+ &instISAC->ISACdecUB_obj.postfiltbankstr_obj);
+}
+
+void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ DecoderInitLb(&instISAC->instLB);
+ if (instISAC->decoderSamplingRateKHz == kIsacSuperWideband) {
+ memset(instISAC->synthesisFBState1, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+ memset(instISAC->synthesisFBState2, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+ DecoderInitUb(&(instISAC->instUB));
+ }
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) != BIT_MASK_ENC_INIT) {
+ WebRtcIsac_InitBandwidthEstimator(&instISAC->bwestimator_obj,
+ instISAC->encoderSamplingRateKHz,
+ instISAC->decoderSamplingRateKHz);
+ }
+ instISAC->initFlag |= BIT_MASK_DEC_INIT;
+ instISAC->resetFlag_8kHz = 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_UpdateBwEstimate(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * NOTE:
+ * The estimates of bandwidth is not valid if the sample rate of the far-end
+ * encoder is set to 48 kHz and send timestamps are increamented according to
+ * 48 kHz sampling rate.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s).
+ * - packet_size : size of the packet.
+ * - rtp_seq_number : the RTP number of the packet.
+ * - arr_ts : the arrival time of the packet (from NetEq)
+ * in samples.
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ Bitstr streamdata;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ int k;
+#endif
+ int16_t err;
+
+ /* Check if decoder initiated. */
+ if ((instISAC->initFlag & BIT_MASK_DEC_INIT) != BIT_MASK_DEC_INIT) {
+ instISAC->errorCode = ISAC_DECODER_NOT_INITIATED;
+ return -1;
+ }
+
+ /* Check that the size of the packet is valid, and if not return without
+ * updating the bandwidth estimate. A valid size is at least 10 bytes. */
+ if (packet_size < 10) {
+ /* Return error code if the packet length is null. */
+ instISAC->errorCode = ISAC_EMPTY_PACKET;
+ return -1;
+ }
+
+ WebRtcIsac_ResetBitstream(&(streamdata));
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ for (k = 0; k < 10; k++) {
+ uint16_t ek = ((const uint16_t*)encoded)[k >> 1];
+ streamdata.stream[k] = (uint8_t)((ek >> ((k & 1) << 3)) & 0xff);
+ }
+#else
+ memcpy(streamdata.stream, encoded, 10);
+#endif
+
+ err = WebRtcIsac_EstimateBandwidth(&instISAC->bwestimator_obj, &streamdata,
+ packet_size, rtp_seq_number, send_ts,
+ arr_ts, instISAC->encoderSamplingRateKHz,
+ instISAC->decoderSamplingRateKHz);
+ if (err < 0) {
+ /* Return error code if something went wrong. */
+ instISAC->errorCode = -err;
+ return -1;
+ }
+ return 0;
+}
+
+static int Decode(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t lenEncodedBytes,
+ int16_t* decoded,
+ int16_t* speechType,
+ int16_t isRCUPayload) {
+ /* Number of samples (480 or 960), output from decoder
+ that were actually used in the encoder/decoder
+ (determined on the fly). */
+ int16_t numSamplesLB;
+ int16_t numSamplesUB;
+ int16_t speechIdx;
+ float outFrame[MAX_FRAMESAMPLES];
+ int16_t outFrameLB[MAX_FRAMESAMPLES];
+ int16_t outFrameUB[MAX_FRAMESAMPLES];
+ int numDecodedBytesLBint;
+ size_t numDecodedBytesLB;
+ int numDecodedBytesUB;
+ size_t lenEncodedLBBytes;
+ int16_t validChecksum = 1;
+ int16_t k;
+ uint16_t numLayer;
+ size_t totSizeBytes;
+ int16_t err;
+
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ ISACUBDecStruct* decInstUB = &(instISAC->instUB.ISACdecUB_obj);
+ ISACLBDecStruct* decInstLB = &(instISAC->instLB.ISACdecLB_obj);
+
+ /* Check if decoder initiated. */
+ if ((instISAC->initFlag & BIT_MASK_DEC_INIT) !=
+ BIT_MASK_DEC_INIT) {
+ instISAC->errorCode = ISAC_DECODER_NOT_INITIATED;
+ return -1;
+ }
+
+ if (lenEncodedBytes == 0) {
+ /* return error code if the packet length is null. */
+ instISAC->errorCode = ISAC_EMPTY_PACKET;
+ return -1;
+ }
+
+ /* The size of the encoded lower-band is bounded by
+ * STREAM_SIZE_MAX. If a payload with the size larger than STREAM_SIZE_MAX
+ * is received, it is not considered erroneous. */
+ lenEncodedLBBytes = (lenEncodedBytes > STREAM_SIZE_MAX) ?
+ STREAM_SIZE_MAX : lenEncodedBytes;
+
+ /* Copy to lower-band bit-stream structure. */
+ memcpy(instISAC->instLB.ISACdecLB_obj.bitstr_obj.stream, encoded,
+ lenEncodedLBBytes);
+
+ /* We need to initialize numSamplesLB to something; otherwise, in the test
+ for whether we should return -1 below, the compiler might generate code
+ that fools Memcheck (Valgrind) into thinking that the control flow depends
+ on the uninitialized value in numSamplesLB (since WebRtcIsac_DecodeLb will
+ not fill it in if it fails and returns -1). */
+ numSamplesLB = 0;
+
+ /* Regardless of that the current codec is setup to work in
+ * wideband or super-wideband, the decoding of the lower-band
+ * has to be performed. */
+ numDecodedBytesLBint = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
+ outFrame, decInstLB,
+ &numSamplesLB, isRCUPayload);
+ numDecodedBytesLB = (size_t)numDecodedBytesLBint;
+ if ((numDecodedBytesLBint < 0) ||
+ (numDecodedBytesLB > lenEncodedLBBytes) ||
+ (numSamplesLB > MAX_FRAMESAMPLES)) {
+ instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ /* Error Check, we accept multi-layer bit-stream This will limit number
+ * of iterations of the while loop. Even without this the number
+ * of iterations is limited. */
+ numLayer = 1;
+ totSizeBytes = numDecodedBytesLB;
+ while (totSizeBytes != lenEncodedBytes) {
+ if ((totSizeBytes > lenEncodedBytes) ||
+ (encoded[totSizeBytes] == 0) ||
+ (numLayer > MAX_NUM_LAYERS)) {
+ instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+ totSizeBytes += encoded[totSizeBytes];
+ numLayer++;
+ }
+
+ if (instISAC->decoderSamplingRateKHz == kIsacWideband) {
+ for (k = 0; k < numSamplesLB; k++) {
+ if (outFrame[k] > 32767) {
+ decoded[k] = 32767;
+ } else if (outFrame[k] < -32768) {
+ decoded[k] = -32768;
+ } else {
+ decoded[k] = (int16_t)WebRtcIsac_lrint(outFrame[k]);
+ }
+ }
+ numSamplesUB = 0;
+ } else {
+ uint32_t crc;
+ /* We don't accept larger than 30ms (480 samples at lower-band)
+ * frame-size. */
+ for (k = 0; k < numSamplesLB; k++) {
+ if (outFrame[k] > 32767) {
+ outFrameLB[k] = 32767;
+ } else if (outFrame[k] < -32768) {
+ outFrameLB[k] = -32768;
+ } else {
+ outFrameLB[k] = (int16_t)WebRtcIsac_lrint(outFrame[k]);
+ }
+ }
+
+ /* Check for possible error, and if upper-band stream exists. */
+ if (numDecodedBytesLB == lenEncodedBytes) {
+ /* Decoding was successful. No super-wideband bit-stream exists. */
+ numSamplesUB = numSamplesLB;
+ memset(outFrameUB, 0, sizeof(int16_t) * numSamplesUB);
+
+ /* Prepare for the potential increase of signal bandwidth. */
+ instISAC->resetFlag_8kHz = 2;
+ } else {
+ /* This includes the checksum and the bytes that stores the length. */
+ int16_t lenNextStream = encoded[numDecodedBytesLB];
+
+ /* Is this garbage or valid super-wideband bit-stream?
+ * Check if checksum is valid. */
+ if (lenNextStream <= (LEN_CHECK_SUM_WORD8 + 1)) {
+ /* Such a small second layer cannot be super-wideband layer.
+ * It must be a short garbage. */
+ validChecksum = 0;
+ } else {
+ /* Run CRC to see if the checksum match. */
+ WebRtcIsac_GetCrc((int16_t*)(&encoded[numDecodedBytesLB + 1]),
+ lenNextStream - LEN_CHECK_SUM_WORD8 - 1, &crc);
+
+ validChecksum = 1;
+ for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+ validChecksum &= (((crc >> (24 - k * 8)) & 0xFF) ==
+ encoded[numDecodedBytesLB + lenNextStream -
+ LEN_CHECK_SUM_WORD8 + k]);
+ }
+ }
+
+ if (!validChecksum) {
+ /* This is a garbage, we have received a wideband
+ * bit-stream with garbage. */
+ numSamplesUB = numSamplesLB;
+ memset(outFrameUB, 0, sizeof(int16_t) * numSamplesUB);
+ } else {
+ /* A valid super-wideband biststream exists. */
+ enum ISACBandwidth bandwidthKHz;
+ int32_t maxDelayBit;
+
+ /* If we have super-wideband bit-stream, we cannot
+ * have 60 ms frame-size. */
+ if (numSamplesLB > FRAMESAMPLES) {
+ instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ /* The rest of the bit-stream contains the upper-band
+ * bit-stream curently this is the only thing there,
+ * however, we might add more layers. */
+
+ /* Have to exclude one byte where the length is stored
+ * and last 'LEN_CHECK_SUM_WORD8' bytes where the
+ * checksum is stored. */
+ lenNextStream -= (LEN_CHECK_SUM_WORD8 + 1);
+
+ memcpy(decInstUB->bitstr_obj.stream,
+ &encoded[numDecodedBytesLB + 1], lenNextStream);
+
+ /* Reset bit-stream object, this is the first decoding. */
+ WebRtcIsac_ResetBitstream(&(decInstUB->bitstr_obj));
+
+ /* Decode jitter information. */
+ err = WebRtcIsac_DecodeJitterInfo(&decInstUB->bitstr_obj, &maxDelayBit);
+ if (err < 0) {
+ instISAC->errorCode = -err;
+ return -1;
+ }
+
+ /* Update jitter info which is in the upper-band bit-stream
+ * only if the encoder is in super-wideband. Otherwise,
+ * the jitter info is already embedded in bandwidth index
+ * and has been updated. */
+ if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+ err = WebRtcIsac_UpdateUplinkJitter(
+ &(instISAC->bwestimator_obj), maxDelayBit);
+ if (err < 0) {
+ instISAC->errorCode = -err;
+ return -1;
+ }
+ }
+
+ /* Decode bandwidth information. */
+ err = WebRtcIsac_DecodeBandwidth(&decInstUB->bitstr_obj,
+ &bandwidthKHz);
+ if (err < 0) {
+ instISAC->errorCode = -err;
+ return -1;
+ }
+
+ switch (bandwidthKHz) {
+ case isac12kHz: {
+ numDecodedBytesUB = WebRtcIsac_DecodeUb12(
+ &instISAC->transform_tables, outFrame, decInstUB, isRCUPayload);
+
+ /* Hang-over for transient alleviation -
+ * wait two frames to add the upper band going up from 8 kHz. */
+ if (instISAC->resetFlag_8kHz > 0) {
+ if (instISAC->resetFlag_8kHz == 2) {
+ /* Silence first and a half frame. */
+ memset(outFrame, 0, MAX_FRAMESAMPLES *
+ sizeof(float));
+ } else {
+ const float rampStep = 2.0f / MAX_FRAMESAMPLES;
+ float rampVal = 0;
+ memset(outFrame, 0, (MAX_FRAMESAMPLES >> 1) *
+ sizeof(float));
+
+ /* Ramp up second half of second frame. */
+ for (k = MAX_FRAMESAMPLES / 2; k < MAX_FRAMESAMPLES; k++) {
+ outFrame[k] *= rampVal;
+ rampVal += rampStep;
+ }
+ }
+ instISAC->resetFlag_8kHz -= 1;
+ }
+
+ break;
+ }
+ case isac16kHz: {
+ numDecodedBytesUB = WebRtcIsac_DecodeUb16(
+ &instISAC->transform_tables, outFrame, decInstUB, isRCUPayload);
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if (numDecodedBytesUB < 0) {
+ instISAC->errorCode = numDecodedBytesUB;
+ return -1;
+ }
+ if (numDecodedBytesLB + numDecodedBytesUB > lenEncodedBytes) {
+ // We have supposedly decoded more bytes than we were given. Likely
+ // caused by bad input data.
+ instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ /* It might be less due to garbage. */
+ if ((numDecodedBytesUB != lenNextStream) &&
+ (numDecodedBytesLB + 1 + numDecodedBytesUB >= lenEncodedBytes ||
+ numDecodedBytesUB !=
+ (lenNextStream -
+ encoded[numDecodedBytesLB + 1 + numDecodedBytesUB]))) {
+ instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+ return -1;
+ }
+
+ /* If there is no error Upper-band always decodes
+ * 30 ms (480 samples). */
+ numSamplesUB = FRAMESAMPLES;
+
+ /* Convert to W16. */
+ for (k = 0; k < numSamplesUB; k++) {
+ if (outFrame[k] > 32767) {
+ outFrameUB[k] = 32767;
+ } else if (outFrame[k] < -32768) {
+ outFrameUB[k] = -32768;
+ } else {
+ outFrameUB[k] = (int16_t)WebRtcIsac_lrint(
+ outFrame[k]);
+ }
+ }
+ }
+ }
+
+ speechIdx = 0;
+ while (speechIdx < numSamplesLB) {
+ WebRtcSpl_SynthesisQMF(&outFrameLB[speechIdx], &outFrameUB[speechIdx],
+ FRAMESAMPLES_10ms, &decoded[(speechIdx << 1)],
+ instISAC->synthesisFBState1,
+ instISAC->synthesisFBState2);
+
+ speechIdx += FRAMESAMPLES_10ms;
+ }
+ }
+ *speechType = 0;
+ return (numSamplesLB + numSamplesUB);
+}
+
+
+
+
+
+
+
+/****************************************************************************
+ * WebRtcIsac_Decode(...)
+ *
+ * This function decodes a ISAC frame. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the frameSize (30 or 60 ms).
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC frame(s)
+ * - len : bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : >0 - number of samples in decoded vector
+ * -1 - Error
+ */
+
+int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t lenEncodedBytes,
+ int16_t* decoded,
+ int16_t* speechType) {
+ int16_t isRCUPayload = 0;
+ return Decode(ISAC_main_inst, encoded, lenEncodedBytes, decoded,
+ speechType, isRCUPayload);
+}
+
+/****************************************************************************
+ * WebRtcIsac_DecodeRcu(...)
+ *
+ * This function decodes a redundant (RCU) iSAC frame. Function is called in
+ * NetEq with a stored RCU payload in case of packet loss. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - encoded : encoded ISAC RCU frame(s)
+ * - len : bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : >0 - number of samples in decoded vector
+ * -1 - Error
+ */
+
+
+
+int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ size_t lenEncodedBytes,
+ int16_t* decoded,
+ int16_t* speechType) {
+ int16_t isRCUPayload = 1;
+ return Decode(ISAC_main_inst, encoded, lenEncodedBytes, decoded,
+ speechType, isRCUPayload);
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_DecodePlc(...)
+ *
+ * This function conducts PLC for ISAC frame(s). Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the frameSize (30 or 60 ms).
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - noOfLostFrames : Number of PLC frames to produce
+ *
+ * Output:
+ * - decoded : The decoded vector
+ *
+ * Return value : Number of samples in decoded PLC vector
+ */
+size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ size_t numSamples = 0;
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ /* Limit number of frames to two = 60 millisecond.
+ * Otherwise we exceed data vectors. */
+ if (noOfLostFrames > 2) {
+ noOfLostFrames = 2;
+ }
+
+ /* Get the number of samples per frame */
+ switch (instISAC->decoderSamplingRateKHz) {
+ case kIsacWideband: {
+ numSamples = 480 * noOfLostFrames;
+ break;
+ }
+ case kIsacSuperWideband: {
+ numSamples = 960 * noOfLostFrames;
+ break;
+ }
+ }
+
+ /* Set output samples to zero. */
+ memset(decoded, 0, numSamples * sizeof(int16_t));
+ return numSamples;
+}
+
+
+/****************************************************************************
+ * ControlLb(...) - Internal function for controlling Lower Band
+ * ControlUb(...) - Internal function for controlling Upper Band
+ * WebRtcIsac_Control(...) - API function
+ *
+ * This function sets the limit on the short-term average bit rate and the
+ * frame length. Should be used only in Instantaneous mode.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rate : limit on the short-term average bit rate,
+ * in bits/second (between 10000 and 32000)
+ * - frameSize : number of milliseconds per frame (30 or 60)
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+static int16_t ControlLb(ISACLBStruct* instISAC, double rate,
+ int16_t frameSize) {
+ if ((rate >= 10000) && (rate <= 32000)) {
+ instISAC->ISACencLB_obj.bottleneck = rate;
+ } else {
+ return -ISAC_DISALLOWED_BOTTLENECK;
+ }
+
+ if ((frameSize == 30) || (frameSize == 60)) {
+ instISAC->ISACencLB_obj.new_framelength = (FS / 1000) * frameSize;
+ } else {
+ return -ISAC_DISALLOWED_FRAME_LENGTH;
+ }
+
+ return 0;
+}
+
+static int16_t ControlUb(ISACUBStruct* instISAC, double rate) {
+ if ((rate >= 10000) && (rate <= 32000)) {
+ instISAC->ISACencUB_obj.bottleneck = rate;
+ } else {
+ return -ISAC_DISALLOWED_BOTTLENECK;
+ }
+ return 0;
+}
+
+int16_t WebRtcIsac_Control(ISACStruct* ISAC_main_inst,
+ int32_t bottleneckBPS,
+ int frameSize) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ int16_t status;
+ double rateLB;
+ double rateUB;
+ enum ISACBandwidth bandwidthKHz;
+
+ if (instISAC->codingMode == 0) {
+ /* In adaptive mode. */
+ instISAC->errorCode = ISAC_MODE_MISMATCH;
+ return -1;
+ }
+
+ /* Check if encoder initiated */
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+
+ if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+ /* If the sampling rate is 16kHz then bandwith should be 8kHz,
+ * regardless of bottleneck. */
+ bandwidthKHz = isac8kHz;
+ rateLB = (bottleneckBPS > 32000) ? 32000 : bottleneckBPS;
+ rateUB = 0;
+ } else {
+ if (WebRtcIsac_RateAllocation(bottleneckBPS, &rateLB, &rateUB,
+ &bandwidthKHz) < 0) {
+ return -1;
+ }
+ }
+
+ if ((instISAC->encoderSamplingRateKHz == kIsacSuperWideband) &&
+ (frameSize != 30) &&
+ (bandwidthKHz != isac8kHz)) {
+ /* Cannot have 60 ms in super-wideband. */
+ instISAC->errorCode = ISAC_DISALLOWED_FRAME_LENGTH;
+ return -1;
+ }
+
+ status = ControlLb(&instISAC->instLB, rateLB, (int16_t)frameSize);
+ if (status < 0) {
+ instISAC->errorCode = -status;
+ return -1;
+ }
+ if (bandwidthKHz != isac8kHz) {
+ status = ControlUb(&(instISAC->instUB), rateUB);
+ if (status < 0) {
+ instISAC->errorCode = -status;
+ return -1;
+ }
+ }
+
+
+ /* Check if bandwidth is changing from wideband to super-wideband
+ * then we have to synch data buffer of lower & upper-band. Also
+ * clean up the upper-band data buffer. */
+
+ if ((instISAC->bandwidthKHz == isac8kHz) && (bandwidthKHz != isac8kHz)) {
+ memset(instISAC->instUB.ISACencUB_obj.data_buffer_float, 0,
+ sizeof(float) * (MAX_FRAMESAMPLES + LB_TOTAL_DELAY_SAMPLES));
+
+ if (bandwidthKHz == isac12kHz) {
+ instISAC->instUB.ISACencUB_obj.buffer_index =
+ instISAC->instLB.ISACencLB_obj.buffer_index;
+ } else {
+ instISAC->instUB.ISACencUB_obj.buffer_index =
+ LB_TOTAL_DELAY_SAMPLES + instISAC->instLB.ISACencLB_obj.buffer_index;
+
+ memcpy(&(instISAC->instUB.ISACencUB_obj.lastLPCVec),
+ WebRtcIsac_kMeanLarUb16, sizeof(double) * UB_LPC_ORDER);
+ }
+ }
+
+ /* Update the payload limit if the bandwidth is changing. */
+ if (instISAC->bandwidthKHz != bandwidthKHz) {
+ instISAC->bandwidthKHz = bandwidthKHz;
+ UpdatePayloadSizeLimit(instISAC);
+ }
+ instISAC->bottleneck = bottleneckBPS;
+ return 0;
+}
+
+void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ RTC_DCHECK_GE(bottleneck_bits_per_second, 10000);
+ RTC_DCHECK_LE(bottleneck_bits_per_second, 32000);
+ instISAC->bwestimator_obj.send_bw_avg = (float)bottleneck_bits_per_second;
+}
+
+/****************************************************************************
+ * WebRtcIsac_ControlBwe(...)
+ *
+ * This function sets the initial values of bottleneck and frame-size if
+ * iSAC is used in channel-adaptive mode. Through this API, users can
+ * enforce a frame-size for all values of bottleneck. Then iSAC will not
+ * automatically change the frame-size.
+ *
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance.
+ * - rateBPS : initial value of bottleneck in bits/second
+ * 10000 <= rateBPS <= 32000 is accepted
+ * For default bottleneck set rateBPS = 0
+ * - frameSizeMs : number of milliseconds per frame (30 or 60)
+ * - enforceFrameSize : 1 to enforce the given frame-size through out
+ * the adaptation process, 0 to let iSAC change
+ * the frame-size if required.
+ *
+ * Return value : 0 - ok
+ * -1 - Error
+ */
+int16_t WebRtcIsac_ControlBwe(ISACStruct* ISAC_main_inst,
+ int32_t bottleneckBPS,
+ int frameSizeMs,
+ int16_t enforceFrameSize) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ enum ISACBandwidth bandwidth;
+
+ /* Check if encoder initiated */
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+
+ /* Check that we are in channel-adaptive mode, otherwise, return (-1) */
+ if (instISAC->codingMode != 0) {
+ instISAC->errorCode = ISAC_MODE_MISMATCH;
+ return -1;
+ }
+ if ((frameSizeMs != 30) &&
+ (instISAC->encoderSamplingRateKHz == kIsacSuperWideband)) {
+ return -1;
+ }
+
+ /* Set structure variable if enforceFrameSize is set. ISAC will then
+ * keep the chosen frame size. */
+ if (enforceFrameSize != 0) {
+ instISAC->instLB.ISACencLB_obj.enforceFrameSize = 1;
+ } else {
+ instISAC->instLB.ISACencLB_obj.enforceFrameSize = 0;
+ }
+
+ /* Set the initial rate. If the input value is zero then the default intial
+ * rate is used. Otehrwise, values between 10 to 32 kbps are accepted. */
+ if (bottleneckBPS != 0) {
+ double rateLB;
+ double rateUB;
+ if (WebRtcIsac_RateAllocation(bottleneckBPS, &rateLB, &rateUB,
+ &bandwidth) < 0) {
+ return -1;
+ }
+ instISAC->bwestimator_obj.send_bw_avg = (float)bottleneckBPS;
+ instISAC->bandwidthKHz = bandwidth;
+ }
+
+ /* Set the initial frame-size. If 'enforceFrameSize' is set, the frame-size
+ * will not change */
+ if (frameSizeMs != 0) {
+ if ((frameSizeMs == 30) || (frameSizeMs == 60)) {
+ instISAC->instLB.ISACencLB_obj.new_framelength =
+ (int16_t)((FS / 1000) * frameSizeMs);
+ } else {
+ instISAC->errorCode = ISAC_DISALLOWED_FRAME_LENGTH;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetDownLinkBwIndex(...)
+ *
+ * This function returns index representing the Bandwidth estimate from
+ * the other side to this side.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC structure
+ *
+ * Output:
+ * - bweIndex : Bandwidth estimate to transmit to other side.
+ *
+ */
+int16_t WebRtcIsac_GetDownLinkBwIndex(ISACStruct* ISAC_main_inst,
+ int16_t* bweIndex,
+ int16_t* jitterInfo) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ /* Check if encoder initialized. */
+ if ((instISAC->initFlag & BIT_MASK_DEC_INIT) !=
+ BIT_MASK_DEC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+
+ /* Call function to get Bandwidth Estimate. */
+ WebRtcIsac_GetDownlinkBwJitIndexImpl(&(instISAC->bwestimator_obj), bweIndex,
+ jitterInfo,
+ instISAC->decoderSamplingRateKHz);
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_UpdateUplinkBw(...)
+ *
+ * This function takes an index representing the Bandwidth estimate from
+ * this side to other side and updates BWE.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC structure
+ * - rateIndex : Bandwidth estimate from other side.
+ *
+ * Return value : 0 - ok
+ * -1 - index out of range
+ */
+int16_t WebRtcIsac_UpdateUplinkBw(ISACStruct* ISAC_main_inst,
+ int16_t bweIndex) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ int16_t returnVal;
+
+ /* Check if encoder initiated. */
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+
+ /* Call function to get Bandwidth Estimate. */
+ returnVal = WebRtcIsac_UpdateUplinkBwImpl(
+ &(instISAC->bwestimator_obj), bweIndex,
+ instISAC->encoderSamplingRateKHz);
+
+ if (returnVal < 0) {
+ instISAC->errorCode = -returnVal;
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_ReadBwIndex(...)
+ *
+ * This function returns the index of the Bandwidth estimate from the
+ * bit-stream.
+ *
+ * Input:
+ * - encoded : Encoded bit-stream
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ * - bweIndex : Bandwidth estimate in bit-stream
+ *
+ */
+int16_t WebRtcIsac_ReadBwIndex(const uint8_t* encoded,
+ int16_t* bweIndex) {
+ Bitstr streamdata;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ int k;
+#endif
+ int16_t err;
+
+ WebRtcIsac_ResetBitstream(&(streamdata));
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ for (k = 0; k < 10; k++) {
+ int16_t ek2 = ((const int16_t*)encoded)[k >> 1];
+ streamdata.stream[k] = (uint8_t)((ek2 >> ((k & 1) << 3)) & 0xff);
+ }
+#else
+ memcpy(streamdata.stream, encoded, 10);
+#endif
+
+ /* Decode frame length. */
+ err = WebRtcIsac_DecodeFrameLen(&streamdata, bweIndex);
+ if (err < 0) {
+ return err;
+ }
+
+ /* Decode BW estimation. */
+ err = WebRtcIsac_DecodeSendBW(&streamdata, bweIndex);
+ if (err < 0) {
+ return err;
+ }
+
+ return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_ReadFrameLen(...)
+ *
+ * This function returns the number of samples the decoder will generate if
+ * the given payload is decoded.
+ *
+ * Input:
+ * - encoded : Encoded bitstream
+ *
+ * Output:
+ * - frameLength : Length of frame in packet (in samples)
+ *
+ */
+int16_t WebRtcIsac_ReadFrameLen(const ISACStruct* ISAC_main_inst,
+ const uint8_t* encoded,
+ int16_t* frameLength) {
+ Bitstr streamdata;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ int k;
+#endif
+ int16_t err;
+ ISACMainStruct* instISAC;
+
+ WebRtcIsac_ResetBitstream(&(streamdata));
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ for (k = 0; k < 10; k++) {
+ int16_t ek2 = ((const int16_t*)encoded)[k >> 1];
+ streamdata.stream[k] = (uint8_t)((ek2 >> ((k & 1) << 3)) & 0xff);
+ }
+#else
+ memcpy(streamdata.stream, encoded, 10);
+#endif
+
+ /* Decode frame length. */
+ err = WebRtcIsac_DecodeFrameLen(&streamdata, frameLength);
+ if (err < 0) {
+ return -1;
+ }
+ instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ if (instISAC->decoderSamplingRateKHz == kIsacSuperWideband) {
+ /* The decoded frame length indicates the number of samples in
+ * lower-band in this case, multiply by 2 to get the total number
+ * of samples. */
+ *frameLength <<= 1;
+ }
+ return 0;
+}
+
+
+/*******************************************************************************
+ * WebRtcIsac_GetNewFrameLen(...)
+ *
+ * This function returns the frame length (in samples) of the next packet.
+ * In the case of channel-adaptive mode, iSAC decides on its frame length based
+ * on the estimated bottleneck, this AOI allows a user to prepare for the next
+ * packet (at the encoder).
+ *
+ * The primary usage is in CE to make the iSAC works in channel-adaptive mode
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ *
+ * Return Value : frame lenght in samples
+ *
+ */
+int16_t WebRtcIsac_GetNewFrameLen(ISACStruct* ISAC_main_inst) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ /* Return new frame length. */
+ if (instISAC->in_sample_rate_hz == 16000)
+ return (instISAC->instLB.ISACencLB_obj.new_framelength);
+ else /* 32000 Hz */
+ return ((instISAC->instLB.ISACencLB_obj.new_framelength) * 2);
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetErrorCode(...)
+ *
+ * This function can be used to check the error code of an iSAC instance.
+ * When a function returns -1 an error code will be set for that instance.
+ * The function below extracts the code of the last error that occurred in
+ * the specified instance.
+ *
+ * Input:
+ * - ISAC_main_inst : ISAC instance
+ *
+ * Return value : Error code
+ */
+int16_t WebRtcIsac_GetErrorCode(ISACStruct* ISAC_main_inst) {
+ return ((ISACMainStruct*)ISAC_main_inst)->errorCode;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetUplinkBw(...)
+ *
+ * This function outputs the target bottleneck of the codec. In
+ * channel-adaptive mode, the target bottleneck is specified through an in-band
+ * signalling retrieved by bandwidth estimator.
+ * In channel-independent, also called instantaneous mode, the target
+ * bottleneck is provided to the encoder by calling xxx_control(...) (if
+ * xxx_control is never called, the default values are used.).
+ * Note that the output is the iSAC internal operating bottleneck which might
+ * differ slightly from the one provided through xxx_control().
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Output:
+ * - *bottleneck : bottleneck in bits/sec
+ *
+ * Return value : -1 if error happens
+ * 0 bit-rates computed correctly.
+ */
+int16_t WebRtcIsac_GetUplinkBw(ISACStruct* ISAC_main_inst,
+ int32_t* bottleneck) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+ if (instISAC->codingMode == 0) {
+ /* We are in adaptive mode then get the bottleneck from BWE. */
+ *bottleneck = (int32_t)instISAC->bwestimator_obj.send_bw_avg;
+ } else {
+ *bottleneck = instISAC->bottleneck;
+ }
+
+ if ((*bottleneck > 32000) && (*bottleneck < 38000)) {
+ *bottleneck = 32000;
+ } else if ((*bottleneck > 45000) && (*bottleneck < 50000)) {
+ *bottleneck = 45000;
+ } else if (*bottleneck > 56000) {
+ *bottleneck = 56000;
+ }
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetMaxPayloadSize(...)
+ *
+ * This function sets a limit for the maximum payload size of iSAC. The same
+ * value is used both for 30 and 60 ms packets. If the encoder sampling rate
+ * is 16 kHz the maximum payload size is between 120 and 400 bytes. If the
+ * encoder sampling rate is 32 kHz the maximum payload size is between 120
+ * and 600 bytes.
+ *
+ * ---------------
+ * IMPORTANT NOTES
+ * ---------------
+ * The size of a packet is limited to the minimum of 'max-payload-size' and
+ * 'max-rate.' For instance, let's assume the max-payload-size is set to
+ * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+ * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+ * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+ * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+ * 170 bytes, i.e. min(170, 300).
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxPayloadBytes : maximum size of the payload in bytes
+ * valid values are between 100 and 400 bytes
+ * if encoder sampling rate is 16 kHz. For
+ * 32 kHz encoder sampling rate valid values
+ * are between 100 and 600 bytes.
+ *
+ * Return value : 0 if successful
+ * -1 if error happens
+ */
+int16_t WebRtcIsac_SetMaxPayloadSize(ISACStruct* ISAC_main_inst,
+ int16_t maxPayloadBytes) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ int16_t status = 0;
+
+ /* Check if encoder initiated */
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+
+ if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+ /* Sanity check. */
+ if (maxPayloadBytes < 120) {
+ /* 'maxRate' is out of valid range
+ * set to the acceptable value and return -1. */
+ maxPayloadBytes = 120;
+ status = -1;
+ }
+
+ /* sanity check */
+ if (maxPayloadBytes > STREAM_SIZE_MAX) {
+ /* maxRate is out of valid range,
+ * set to the acceptable value and return -1. */
+ maxPayloadBytes = STREAM_SIZE_MAX;
+ status = -1;
+ }
+ } else {
+ if (maxPayloadBytes < 120) {
+ /* Max payload-size is out of valid range
+ * set to the acceptable value and return -1. */
+ maxPayloadBytes = 120;
+ status = -1;
+ }
+ if (maxPayloadBytes > STREAM_SIZE_MAX_60) {
+ /* Max payload-size is out of valid range
+ * set to the acceptable value and return -1. */
+ maxPayloadBytes = STREAM_SIZE_MAX_60;
+ status = -1;
+ }
+ }
+ instISAC->maxPayloadSizeBytes = maxPayloadBytes;
+ UpdatePayloadSizeLimit(instISAC);
+ return status;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetMaxRate(...)
+ *
+ * This function sets the maximum rate which the codec may not exceed for
+ * any signal packet. The maximum rate is defined and payload-size per
+ * frame-size in bits per second.
+ *
+ * The codec has a maximum rate of 53400 bits per second (200 bytes per 30
+ * ms) if the encoder sampling rate is 16kHz, and 160 kbps (600 bytes/30 ms)
+ * if the encoder sampling rate is 32 kHz.
+ *
+ * It is possible to set a maximum rate between 32000 and 53400 bits/sec
+ * in wideband mode, and 32000 to 160000 bits/sec in super-wideband mode.
+ *
+ * ---------------
+ * IMPORTANT NOTES
+ * ---------------
+ * The size of a packet is limited to the minimum of 'max-payload-size' and
+ * 'max-rate.' For instance, let's assume the max-payload-size is set to
+ * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+ * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+ * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+ * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+ * 170 bytes, min(170, 300).
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - maxRate : maximum rate in bits per second,
+ * valid values are 32000 to 53400 bits/sec in
+ * wideband mode, and 32000 to 160000 bits/sec in
+ * super-wideband mode.
+ *
+ * Return value : 0 if successful
+ * -1 if error happens
+ */
+int16_t WebRtcIsac_SetMaxRate(ISACStruct* ISAC_main_inst,
+ int32_t maxRate) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ int16_t maxRateInBytesPer30Ms;
+ int16_t status = 0;
+
+ /* check if encoder initiated */
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) != BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ return -1;
+ }
+ /* Calculate maximum number of bytes per 30 msec packets for the
+ given maximum rate. Multiply with 30/1000 to get number of
+ bits per 30 ms, divide by 8 to get number of bytes per 30 ms:
+ maxRateInBytes = floor((maxRate * 30/1000) / 8); */
+ maxRateInBytesPer30Ms = (int16_t)(maxRate * 3 / 800);
+
+ if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+ if (maxRate < 32000) {
+ /* 'maxRate' is out of valid range.
+ * Set to the acceptable value and return -1. */
+ maxRateInBytesPer30Ms = 120;
+ status = -1;
+ }
+
+ if (maxRate > 53400) {
+ /* 'maxRate' is out of valid range.
+ * Set to the acceptable value and return -1. */
+ maxRateInBytesPer30Ms = 200;
+ status = -1;
+ }
+ } else {
+ if (maxRateInBytesPer30Ms < 120) {
+ /* 'maxRate' is out of valid range
+ * Set to the acceptable value and return -1. */
+ maxRateInBytesPer30Ms = 120;
+ status = -1;
+ }
+
+ if (maxRateInBytesPer30Ms > STREAM_SIZE_MAX) {
+ /* 'maxRate' is out of valid range.
+ * Set to the acceptable value and return -1. */
+ maxRateInBytesPer30Ms = STREAM_SIZE_MAX;
+ status = -1;
+ }
+ }
+ instISAC->maxRateBytesPer30Ms = maxRateInBytesPer30Ms;
+ UpdatePayloadSizeLimit(instISAC);
+ return status;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetRedPayload(...)
+ *
+ * This function populates "encoded" with the redundant payload of the recently
+ * encodedframe. This function has to be called once that WebRtcIsac_Encode(...)
+ * returns a positive value. Regardless of the frame-size this function will
+ * be called only once after encoding is completed. The bit-stream is
+ * targeted for 16000 bit/sec.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC struct
+ *
+ * Output:
+ * - encoded : the encoded data vector
+ *
+ *
+ * Return value : >0 - Length (in bytes) of coded data
+ * : -1 - Error
+ */
+int16_t WebRtcIsac_GetRedPayload(ISACStruct* ISAC_main_inst,
+ uint8_t* encoded) {
+ Bitstr iSACBitStreamInst;
+ int16_t streamLenLB;
+ int16_t streamLenUB;
+ int16_t streamLen;
+ int16_t totalLenUB;
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ int k;
+#endif
+
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+ }
+
+ WebRtcIsac_ResetBitstream(&(iSACBitStreamInst));
+
+ streamLenLB = WebRtcIsac_EncodeStoredDataLb(
+ &instISAC->instLB.ISACencLB_obj.SaveEnc_obj,
+ &iSACBitStreamInst,
+ instISAC->instLB.ISACencLB_obj.lastBWIdx,
+ RCU_TRANSCODING_SCALE);
+ if (streamLenLB < 0) {
+ return -1;
+ }
+
+ /* convert from bytes to int16_t. */
+ memcpy(encoded, iSACBitStreamInst.stream, streamLenLB);
+ streamLen = streamLenLB;
+ if (instISAC->bandwidthKHz == isac8kHz) {
+ return streamLenLB;
+ }
+
+ streamLenUB = WebRtcIsac_GetRedPayloadUb(
+ &instISAC->instUB.ISACencUB_obj.SaveEnc_obj,
+ &iSACBitStreamInst, instISAC->bandwidthKHz);
+ if (streamLenUB < 0) {
+ /* An error has happened but this is not the error due to a
+ * bit-stream larger than the limit. */
+ return -1;
+ }
+
+ /* We have one byte to write the total length of the upper-band.
+ * The length includes the bit-stream length, check-sum and the
+ * single byte where the length is written to. This is according to
+ * iSAC wideband and how the "garbage" is dealt. */
+ totalLenUB = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+ if (totalLenUB > 255) {
+ streamLenUB = 0;
+ }
+
+ /* Generate CRC if required. */
+ if ((instISAC->bandwidthKHz != isac8kHz) &&
+ (streamLenUB > 0)) {
+ uint32_t crc;
+ streamLen += totalLenUB;
+ encoded[streamLenLB] = (uint8_t)totalLenUB;
+ memcpy(&encoded[streamLenLB + 1], iSACBitStreamInst.stream,
+ streamLenUB);
+
+ WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
+ streamLenUB, &crc);
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+ encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] =
+ (uint8_t)((crc >> (24 - k * 8)) & 0xFF);
+ }
+#else
+ memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc,
+ LEN_CHECK_SUM_WORD8);
+#endif
+ }
+ return streamLen;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_version(...)
+ *
+ * This function returns the version number.
+ *
+ * Output:
+ * - version : Pointer to character string
+ *
+ */
+void WebRtcIsac_version(char* version) {
+ strcpy(version, "4.3.0");
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetEncSampRate()
+ * This function sets the sampling rate of the encoder. Initialization of the
+ * encoder WILL NOT overwrite the sampling rate of the encoder. The default
+ * value is 16 kHz which is set when the instance is created. The encoding-mode
+ * and the bottleneck remain unchanged by this call, however, the maximum rate
+ * and maximum payload-size will be reset to their default values.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - sample_rate_hz : sampling rate in Hertz, valid values are 16000
+ * and 32000.
+ *
+ * Return value : 0 if successful
+ * -1 if failed.
+ */
+int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
+ uint16_t sample_rate_hz) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ enum IsacSamplingRate encoder_operational_rate;
+
+ if ((sample_rate_hz != 16000) && (sample_rate_hz != 32000)) {
+ /* Sampling Frequency is not supported. */
+ instISAC->errorCode = ISAC_UNSUPPORTED_SAMPLING_FREQUENCY;
+ return -1;
+ }
+ if (sample_rate_hz == 16000) {
+ encoder_operational_rate = kIsacWideband;
+ } else {
+ encoder_operational_rate = kIsacSuperWideband;
+ }
+
+ if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+ BIT_MASK_ENC_INIT) {
+ if (encoder_operational_rate == kIsacWideband) {
+ instISAC->bandwidthKHz = isac8kHz;
+ } else {
+ instISAC->bandwidthKHz = isac16kHz;
+ }
+ } else {
+ ISACUBStruct* instUB = &(instISAC->instUB);
+ ISACLBStruct* instLB = &(instISAC->instLB);
+ int32_t bottleneck = instISAC->bottleneck;
+ int16_t codingMode = instISAC->codingMode;
+ int16_t frameSizeMs = instLB->ISACencLB_obj.new_framelength /
+ (FS / 1000);
+
+ if ((encoder_operational_rate == kIsacWideband) &&
+ (instISAC->encoderSamplingRateKHz == kIsacSuperWideband)) {
+ /* Changing from super-wideband to wideband.
+ * we don't need to re-initialize the encoder of the lower-band. */
+ instISAC->bandwidthKHz = isac8kHz;
+ if (codingMode == 1) {
+ ControlLb(instLB,
+ (bottleneck > 32000) ? 32000 : bottleneck, FRAMESIZE);
+ }
+ instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX_60;
+ instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX_30;
+ } else if ((encoder_operational_rate == kIsacSuperWideband) &&
+ (instISAC->encoderSamplingRateKHz == kIsacWideband)) {
+ double bottleneckLB = 0;
+ double bottleneckUB = 0;
+ if (codingMode == 1) {
+ WebRtcIsac_RateAllocation(bottleneck, &bottleneckLB, &bottleneckUB,
+ &(instISAC->bandwidthKHz));
+ }
+
+ instISAC->bandwidthKHz = isac16kHz;
+ instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX;
+ instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX;
+
+ EncoderInitLb(instLB, codingMode, encoder_operational_rate);
+ EncoderInitUb(instUB, instISAC->bandwidthKHz);
+
+ memset(instISAC->analysisFBState1, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+ memset(instISAC->analysisFBState2, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+
+ if (codingMode == 1) {
+ instISAC->bottleneck = bottleneck;
+ ControlLb(instLB, bottleneckLB,
+ (instISAC->bandwidthKHz == isac8kHz) ? frameSizeMs:FRAMESIZE);
+ if (instISAC->bandwidthKHz > isac8kHz) {
+ ControlUb(instUB, bottleneckUB);
+ }
+ } else {
+ instLB->ISACencLB_obj.enforceFrameSize = 0;
+ instLB->ISACencLB_obj.new_framelength = FRAMESAMPLES;
+ }
+ }
+ }
+ instISAC->encoderSamplingRateKHz = encoder_operational_rate;
+ instISAC->in_sample_rate_hz = sample_rate_hz;
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetDecSampRate()
+ * This function sets the sampling rate of the decoder. Initialization of the
+ * decoder WILL NOT overwrite the sampling rate of the encoder. The default
+ * value is 16 kHz which is set when the instance is created.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ * - sample_rate_hz : sampling rate in Hertz, valid values are 16000
+ * and 32000.
+ *
+ * Return value : 0 if successful
+ * -1 if failed.
+ */
+int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
+ uint16_t sample_rate_hz) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ enum IsacSamplingRate decoder_operational_rate;
+
+ if (sample_rate_hz == 16000) {
+ decoder_operational_rate = kIsacWideband;
+ } else if (sample_rate_hz == 32000) {
+ decoder_operational_rate = kIsacSuperWideband;
+ } else {
+ /* Sampling Frequency is not supported. */
+ instISAC->errorCode = ISAC_UNSUPPORTED_SAMPLING_FREQUENCY;
+ return -1;
+ }
+
+ if ((instISAC->decoderSamplingRateKHz == kIsacWideband) &&
+ (decoder_operational_rate == kIsacSuperWideband)) {
+ /* Switching from wideband to super-wideband at the decoder
+ * we need to reset the filter-bank and initialize upper-band decoder. */
+ memset(instISAC->synthesisFBState1, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+ memset(instISAC->synthesisFBState2, 0,
+ FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+
+ DecoderInitUb(&instISAC->instUB);
+ }
+ instISAC->decoderSamplingRateKHz = decoder_operational_rate;
+ return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_EncSampRate()
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : sampling rate in Hertz. The input to encoder
+ * is expected to be sampled in this rate.
+ *
+ */
+uint16_t WebRtcIsac_EncSampRate(ISACStruct* ISAC_main_inst) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ return instISAC->in_sample_rate_hz;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_DecSampRate()
+ * Return the sampling rate of the decoded audio.
+ *
+ * Input:
+ * - ISAC_main_inst : iSAC instance
+ *
+ * Return value : sampling rate in Hertz. Decoder output is
+ * sampled at this rate.
+ *
+ */
+uint16_t WebRtcIsac_DecSampRate(ISACStruct* ISAC_main_inst) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ return instISAC->decoderSamplingRateKHz == kIsacWideband ? 16000 : 32000;
+}
+
+void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst,
+ int sample_rate_hz) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)inst;
+ RTC_DCHECK_NE(0, instISAC->initFlag & BIT_MASK_DEC_INIT);
+ RTC_DCHECK(!(instISAC->initFlag & BIT_MASK_ENC_INIT));
+ RTC_DCHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000);
+ instISAC->encoderSamplingRateKHz = sample_rate_hz / 1000;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h
new file mode 100644
index 0000000000..511bc97ee6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
+
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+
+namespace webrtc {
+
+struct IsacFloat {
+ using instance_type = ISACStruct;
+ static const bool has_swb = true;
+ static inline int16_t Control(instance_type* inst,
+ int32_t rate,
+ int framesize) {
+ return WebRtcIsac_Control(inst, rate, framesize);
+ }
+ static inline int16_t ControlBwe(instance_type* inst,
+ int32_t rate_bps,
+ int frame_size_ms,
+ int16_t enforce_frame_size) {
+ return WebRtcIsac_ControlBwe(inst, rate_bps, frame_size_ms,
+ enforce_frame_size);
+ }
+ static inline int16_t Create(instance_type** inst) {
+ return WebRtcIsac_Create(inst);
+ }
+ static inline int DecodeInternal(instance_type* inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speech_type) {
+ return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
+ }
+ static inline size_t DecodePlc(instance_type* inst,
+ int16_t* decoded,
+ size_t num_lost_frames) {
+ return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
+ }
+
+ static inline void DecoderInit(instance_type* inst) {
+ WebRtcIsac_DecoderInit(inst);
+ }
+ static inline int Encode(instance_type* inst,
+ const int16_t* speech_in,
+ uint8_t* encoded) {
+ return WebRtcIsac_Encode(inst, speech_in, encoded);
+ }
+ static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
+ return WebRtcIsac_EncoderInit(inst, coding_mode);
+ }
+ static inline uint16_t EncSampRate(instance_type* inst) {
+ return WebRtcIsac_EncSampRate(inst);
+ }
+
+ static inline int16_t Free(instance_type* inst) {
+ return WebRtcIsac_Free(inst);
+ }
+ static inline int16_t GetErrorCode(instance_type* inst) {
+ return WebRtcIsac_GetErrorCode(inst);
+ }
+
+ static inline int16_t GetNewFrameLen(instance_type* inst) {
+ return WebRtcIsac_GetNewFrameLen(inst);
+ }
+ static inline int16_t SetDecSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ return WebRtcIsac_SetDecSampRate(inst, sample_rate_hz);
+ }
+ static inline int16_t SetEncSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ return WebRtcIsac_SetEncSampRate(inst, sample_rate_hz);
+ }
+ static inline void SetEncSampRateInDecoder(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ WebRtcIsac_SetEncSampRateInDecoder(inst, sample_rate_hz);
+ }
+ static inline void SetInitialBweBottleneck(instance_type* inst,
+ int bottleneck_bits_per_second) {
+ WebRtcIsac_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
+ }
+ static inline int16_t SetMaxPayloadSize(instance_type* inst,
+ int16_t max_payload_size_bytes) {
+ return WebRtcIsac_SetMaxPayloadSize(inst, max_payload_size_bytes);
+ }
+ static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
+ return WebRtcIsac_SetMaxRate(inst, max_bit_rate);
+ }
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
new file mode 100644
index 0000000000..c98b21d86f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+
+#include <string>
+
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+struct WebRtcISACStruct;
+
+namespace webrtc {
+
+// Number of samples in a 60 ms, sampled at 32 kHz.
+const int kIsacNumberOfSamples = 320 * 6;
+// Maximum number of bytes in output bitstream.
+const size_t kMaxBytes = 1000;
+
+class IsacTest : public ::testing::Test {
+ protected:
+ IsacTest();
+ virtual void SetUp();
+
+ WebRtcISACStruct* isac_codec_;
+
+ int16_t speech_data_[kIsacNumberOfSamples];
+ int16_t output_data_[kIsacNumberOfSamples];
+ uint8_t bitstream_[kMaxBytes];
+ uint8_t bitstream_small_[7]; // Simulate sync packets.
+};
+
+IsacTest::IsacTest() : isac_codec_(NULL) {}
+
+void IsacTest::SetUp() {
+ // Read some samples from a speech file, to be used in the encode test.
+ FILE* input_file;
+ const std::string file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ input_file = fopen(file_name.c_str(), "rb");
+ ASSERT_TRUE(input_file != NULL);
+ ASSERT_EQ(kIsacNumberOfSamples,
+ static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
+ kIsacNumberOfSamples, input_file)));
+ fclose(input_file);
+ input_file = NULL;
+}
+
+// Test failing Create.
+TEST_F(IsacTest, IsacCreateFail) {
+ // Test to see that an invalid pointer is caught.
+ EXPECT_EQ(-1, WebRtcIsac_Create(NULL));
+}
+
+// Test failing Free.
+TEST_F(IsacTest, IsacFreeFail) {
+ // Test to see that free function doesn't crash.
+ EXPECT_EQ(0, WebRtcIsac_Free(NULL));
+}
+
+// Test normal Create and Free.
+TEST_F(IsacTest, IsacCreateFree) {
+ EXPECT_EQ(0, WebRtcIsac_Create(&isac_codec_));
+ EXPECT_TRUE(isac_codec_ != NULL);
+ EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));
+}
+
+TEST_F(IsacTest, IsacUpdateBWE) {
+ // Create encoder memory.
+ EXPECT_EQ(0, WebRtcIsac_Create(&isac_codec_));
+
+ // Init encoder (adaptive mode) and decoder.
+ WebRtcIsac_EncoderInit(isac_codec_, 0);
+ WebRtcIsac_DecoderInit(isac_codec_);
+
+ int encoded_bytes;
+
+ // Test with call with a small packet (sync packet).
+ EXPECT_EQ(-1, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_small_, 7, 1,
+ 12345, 56789));
+
+ // Encode 60 ms of data (needed to create a first packet).
+ encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_EQ(0, encoded_bytes);
+ encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_EQ(0, encoded_bytes);
+ encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_EQ(0, encoded_bytes);
+ encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_EQ(0, encoded_bytes);
+ encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_EQ(0, encoded_bytes);
+ encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_GT(encoded_bytes, 0);
+
+ // Call to update bandwidth estimator with real data.
+ EXPECT_EQ(0, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_,
+ static_cast<size_t>(encoded_bytes),
+ 1, 12345, 56789));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.c
new file mode 100644
index 0000000000..57cf0c39da
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+
+#include <math.h>
+
+void WebRtcIsac_InitPitchFilter(PitchFiltstr* pitchfiltdata) {
+ int k;
+
+ for (k = 0; k < PITCH_BUFFSIZE; k++) {
+ pitchfiltdata->ubuf[k] = 0.0;
+ }
+ pitchfiltdata->ystate[0] = 0.0;
+ for (k = 1; k < (PITCH_DAMPORDER); k++) {
+ pitchfiltdata->ystate[k] = 0.0;
+ }
+ pitchfiltdata->oldlagp[0] = 50.0;
+ pitchfiltdata->oldgainp[0] = 0.0;
+}
+
+static void WebRtcIsac_InitWeightingFilter(WeightFiltstr* wfdata) {
+ int k;
+ double t, dtmp, dtmp2, denum, denum2;
+
+ for (k = 0; k < PITCH_WLPCBUFLEN; k++)
+ wfdata->buffer[k] = 0.0;
+
+ for (k = 0; k < PITCH_WLPCORDER; k++) {
+ wfdata->istate[k] = 0.0;
+ wfdata->weostate[k] = 0.0;
+ wfdata->whostate[k] = 0.0;
+ }
+
+ /* next part should be in Matlab, writing to a global table */
+ t = 0.5;
+ denum = 1.0 / ((double)PITCH_WLPCWINLEN);
+ denum2 = denum * denum;
+ for (k = 0; k < PITCH_WLPCWINLEN; k++) {
+ dtmp = PITCH_WLPCASYM * t * denum + (1 - PITCH_WLPCASYM) * t * t * denum2;
+ dtmp *= 3.14159265;
+ dtmp2 = sin(dtmp);
+ wfdata->window[k] = dtmp2 * dtmp2;
+ t++;
+ }
+}
+
+void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct* State) {
+ int k;
+
+ for (k = 0; k < PITCH_CORR_LEN2 + PITCH_CORR_STEP2 + PITCH_MAX_LAG / 2 -
+ PITCH_FRAME_LEN / 2 + 2;
+ k++)
+ State->dec_buffer[k] = 0.0;
+ for (k = 0; k < 2 * ALLPASSSECTIONS + 1; k++)
+ State->decimator_state[k] = 0.0;
+ for (k = 0; k < 2; k++)
+ State->hp_state[k] = 0.0;
+ for (k = 0; k < QLOOKAHEAD; k++)
+ State->whitened_buf[k] = 0.0;
+ for (k = 0; k < QLOOKAHEAD; k++)
+ State->inbuf[k] = 0.0;
+
+ WebRtcIsac_InitPitchFilter(&(State->PFstr_wght));
+
+ WebRtcIsac_InitPitchFilter(&(State->PFstr));
+
+ WebRtcIsac_InitWeightingFilter(&(State->Wghtstr));
+}
+
+void WebRtcIsac_InitPreFilterbank(PreFiltBankstr* prefiltdata) {
+ int k;
+
+ for (k = 0; k < QLOOKAHEAD; k++) {
+ prefiltdata->INLABUF1[k] = 0;
+ prefiltdata->INLABUF2[k] = 0;
+
+ prefiltdata->INLABUF1_float[k] = 0;
+ prefiltdata->INLABUF2_float[k] = 0;
+ }
+ for (k = 0; k < 2 * (QORDER - 1); k++) {
+ prefiltdata->INSTAT1[k] = 0;
+ prefiltdata->INSTAT2[k] = 0;
+ prefiltdata->INSTATLA1[k] = 0;
+ prefiltdata->INSTATLA2[k] = 0;
+
+ prefiltdata->INSTAT1_float[k] = 0;
+ prefiltdata->INSTAT2_float[k] = 0;
+ prefiltdata->INSTATLA1_float[k] = 0;
+ prefiltdata->INSTATLA2_float[k] = 0;
+ }
+
+ /* High pass filter states */
+ prefiltdata->HPstates[0] = 0.0;
+ prefiltdata->HPstates[1] = 0.0;
+
+ prefiltdata->HPstates_float[0] = 0.0f;
+ prefiltdata->HPstates_float[1] = 0.0f;
+
+ return;
+}
+
+double WebRtcIsac_LevDurb(double* a, double* k, double* r, size_t order) {
+ const double LEVINSON_EPS = 1.0e-10;
+
+ double sum, alpha;
+ size_t m, m_h, i;
+ alpha = 0; // warning -DH
+ a[0] = 1.0;
+ if (r[0] < LEVINSON_EPS) { /* if r[0] <= 0, set LPC coeff. to zero */
+ for (i = 0; i < order; i++) {
+ k[i] = 0;
+ a[i + 1] = 0;
+ }
+ } else {
+ a[1] = k[0] = -r[1] / r[0];
+ alpha = r[0] + r[1] * k[0];
+ for (m = 1; m < order; m++) {
+ sum = r[m + 1];
+ for (i = 0; i < m; i++) {
+ sum += a[i + 1] * r[m - i];
+ }
+ k[m] = -sum / alpha;
+ alpha += k[m] * sum;
+ m_h = (m + 1) >> 1;
+ for (i = 0; i < m_h; i++) {
+ sum = a[i + 1] + k[m] * a[m - i];
+ a[m - i] += k[m] * a[i + 1];
+ a[i + 1] = sum;
+ }
+ a[m + 1] = k[m];
+ }
+ }
+ return alpha;
+}
+
+/* The upper channel all-pass filter factors */
+const float WebRtcIsac_kUpperApFactorsFloat[2] = {0.03470000000000f,
+ 0.38260000000000f};
+
+/* The lower channel all-pass filter factors */
+const float WebRtcIsac_kLowerApFactorsFloat[2] = {0.15440000000000f,
+ 0.74400000000000f};
+
+/* This function performs all-pass filtering--a series of first order all-pass
+ * sections are used to filter the input in a cascade manner.
+ * The input is overwritten!!
+ */
+void WebRtcIsac_AllPassFilter2Float(float* InOut,
+ const float* APSectionFactors,
+ int lengthInOut,
+ int NumberOfSections,
+ float* FilterState) {
+ int n, j;
+ float temp;
+ for (j = 0; j < NumberOfSections; j++) {
+ for (n = 0; n < lengthInOut; n++) {
+ temp = FilterState[j] + APSectionFactors[j] * InOut[n];
+ FilterState[j] = -APSectionFactors[j] * temp + InOut[n];
+ InOut[n] = temp;
+ }
+ }
+}
+
+/* The number of composite all-pass filter factors */
+#define NUMBEROFCOMPOSITEAPSECTIONS 4
+
+/* Function WebRtcIsac_SplitAndFilter
+ * This function creates low-pass and high-pass decimated versions of part of
+ the input signal, and part of the signal in the input 'lookahead buffer'.
+
+ INPUTS:
+ in: a length FRAMESAMPLES array of input samples
+ prefiltdata: input data structure containing the filterbank states
+ and lookahead samples from the previous encoding
+ iteration.
+ OUTPUTS:
+ LP: a FRAMESAMPLES_HALF array of low-pass filtered samples that
+ have been phase equalized. The first QLOOKAHEAD samples are
+ based on the samples in the two prefiltdata->INLABUFx arrays
+ each of length QLOOKAHEAD.
+ The remaining FRAMESAMPLES_HALF-QLOOKAHEAD samples are based
+ on the first FRAMESAMPLES_HALF-QLOOKAHEAD samples of the input
+ array in[].
+ HP: a FRAMESAMPLES_HALF array of high-pass filtered samples that
+ have been phase equalized. The first QLOOKAHEAD samples are
+ based on the samples in the two prefiltdata->INLABUFx arrays
+ each of length QLOOKAHEAD.
+ The remaining FRAMESAMPLES_HALF-QLOOKAHEAD samples are based
+ on the first FRAMESAMPLES_HALF-QLOOKAHEAD samples of the input
+ array in[].
+
+ LP_la: a FRAMESAMPLES_HALF array of low-pass filtered samples.
+ These samples are not phase equalized. They are computed
+ from the samples in the in[] array.
+ HP_la: a FRAMESAMPLES_HALF array of high-pass filtered samples
+ that are not phase equalized. They are computed from
+ the in[] vector.
+ prefiltdata: this input data structure's filterbank state and
+ lookahead sample buffers are updated for the next
+ encoding iteration.
+*/
+void WebRtcIsac_SplitAndFilterFloat(float* pin,
+ float* LP,
+ float* HP,
+ double* LP_la,
+ double* HP_la,
+ PreFiltBankstr* prefiltdata) {
+ int k, n;
+ float CompositeAPFilterState[NUMBEROFCOMPOSITEAPSECTIONS];
+ float ForTransform_CompositeAPFilterState[NUMBEROFCOMPOSITEAPSECTIONS];
+ float ForTransform_CompositeAPFilterState2[NUMBEROFCOMPOSITEAPSECTIONS];
+ float tempinoutvec[FRAMESAMPLES + MAX_AR_MODEL_ORDER];
+ float tempin_ch1[FRAMESAMPLES + MAX_AR_MODEL_ORDER];
+ float tempin_ch2[FRAMESAMPLES + MAX_AR_MODEL_ORDER];
+ float in[FRAMESAMPLES];
+ float ftmp;
+
+ /* HPstcoeff_in = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+ static const float kHpStCoefInFloat[4] = {
+ -1.94895953203325f, 0.94984516000000f, -0.05101826139794f,
+ 0.05015484000000f};
+
+ /* The composite all-pass filter factors */
+ static const float WebRtcIsac_kCompositeApFactorsFloat[4] = {
+ 0.03470000000000f, 0.15440000000000f, 0.38260000000000f,
+ 0.74400000000000f};
+
+ // The matrix for transforming the backward composite state to upper channel
+ // state.
+ static const float WebRtcIsac_kTransform1Float[8] = {
+ -0.00158678506084f, 0.00127157815343f, -0.00104805672709f,
+ 0.00084837248079f, 0.00134467983258f, -0.00107756549387f,
+ 0.00088814793277f, -0.00071893072525f};
+
+ // The matrix for transforming the backward composite state to lower channel
+ // state.
+ static const float WebRtcIsac_kTransform2Float[8] = {
+ -0.00170686041697f, 0.00136780109829f, -0.00112736532350f,
+ 0.00091257055385f, 0.00103094281812f, -0.00082615076557f,
+ 0.00068092756088f, -0.00055119165484f};
+
+ /* High pass filter */
+
+ for (k = 0; k < FRAMESAMPLES; k++) {
+ in[k] = pin[k] + kHpStCoefInFloat[2] * prefiltdata->HPstates_float[0] +
+ kHpStCoefInFloat[3] * prefiltdata->HPstates_float[1];
+ ftmp = pin[k] - kHpStCoefInFloat[0] * prefiltdata->HPstates_float[0] -
+ kHpStCoefInFloat[1] * prefiltdata->HPstates_float[1];
+ prefiltdata->HPstates_float[1] = prefiltdata->HPstates_float[0];
+ prefiltdata->HPstates_float[0] = ftmp;
+ }
+
+ /* First Channel */
+
+ /*initial state of composite filter is zero */
+ for (k = 0; k < NUMBEROFCOMPOSITEAPSECTIONS; k++) {
+ CompositeAPFilterState[k] = 0.0;
+ }
+ /* put every other sample of input into a temporary vector in reverse
+ * (backward) order*/
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tempinoutvec[k] = in[FRAMESAMPLES - 1 - 2 * k];
+ }
+
+ /* now all-pass filter the backwards vector. Output values overwrite the
+ * input vector. */
+ WebRtcIsac_AllPassFilter2Float(
+ tempinoutvec, WebRtcIsac_kCompositeApFactorsFloat, FRAMESAMPLES_HALF,
+ NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+ /* save the backwards filtered output for later forward filtering,
+ but write it in forward order*/
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tempin_ch1[FRAMESAMPLES_HALF + QLOOKAHEAD - 1 - k] = tempinoutvec[k];
+ }
+
+ /* save the backwards filter state becaue it will be transformed
+ later into a forward state */
+ for (k = 0; k < NUMBEROFCOMPOSITEAPSECTIONS; k++) {
+ ForTransform_CompositeAPFilterState[k] = CompositeAPFilterState[k];
+ }
+
+ /* now backwards filter the samples in the lookahead buffer. The samples were
+ placed there in the encoding of the previous frame. The output samples
+ overwrite the input samples */
+ WebRtcIsac_AllPassFilter2Float(
+ prefiltdata->INLABUF1_float, WebRtcIsac_kCompositeApFactorsFloat,
+ QLOOKAHEAD, NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+ /* save the output, but write it in forward order */
+ /* write the lookahead samples for the next encoding iteration. Every other
+ sample at the end of the input frame is written in reverse order for the
+ lookahead length. Exported in the prefiltdata structure. */
+ for (k = 0; k < QLOOKAHEAD; k++) {
+ tempin_ch1[QLOOKAHEAD - 1 - k] = prefiltdata->INLABUF1_float[k];
+ prefiltdata->INLABUF1_float[k] = in[FRAMESAMPLES - 1 - 2 * k];
+ }
+
+ /* Second Channel. This is exactly like the first channel, except that the
+ even samples are now filtered instead (lower channel). */
+ for (k = 0; k < NUMBEROFCOMPOSITEAPSECTIONS; k++) {
+ CompositeAPFilterState[k] = 0.0;
+ }
+
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tempinoutvec[k] = in[FRAMESAMPLES - 2 - 2 * k];
+ }
+
+ WebRtcIsac_AllPassFilter2Float(
+ tempinoutvec, WebRtcIsac_kCompositeApFactorsFloat, FRAMESAMPLES_HALF,
+ NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tempin_ch2[FRAMESAMPLES_HALF + QLOOKAHEAD - 1 - k] = tempinoutvec[k];
+ }
+
+ for (k = 0; k < NUMBEROFCOMPOSITEAPSECTIONS; k++) {
+ ForTransform_CompositeAPFilterState2[k] = CompositeAPFilterState[k];
+ }
+
+ WebRtcIsac_AllPassFilter2Float(
+ prefiltdata->INLABUF2_float, WebRtcIsac_kCompositeApFactorsFloat,
+ QLOOKAHEAD, NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+ for (k = 0; k < QLOOKAHEAD; k++) {
+ tempin_ch2[QLOOKAHEAD - 1 - k] = prefiltdata->INLABUF2_float[k];
+ prefiltdata->INLABUF2_float[k] = in[FRAMESAMPLES - 2 - 2 * k];
+ }
+
+ /* Transform filter states from backward to forward */
+ /*At this point, each of the states of the backwards composite filters for the
+ two channels are transformed into forward filtering states for the
+ corresponding forward channel filters. Each channel's forward filtering
+ state from the previous
+ encoding iteration is added to the transformed state to get a proper forward
+ state */
+
+ /* So the existing NUMBEROFCOMPOSITEAPSECTIONS x 1 (4x1) state vector is
+ multiplied by a NUMBEROFCHANNELAPSECTIONSxNUMBEROFCOMPOSITEAPSECTIONS (2x4)
+ transform matrix to get the new state that is added to the previous 2x1
+ input state */
+
+ for (k = 0; k < NUMBEROFCHANNELAPSECTIONS; k++) { /* k is row variable */
+ for (n = 0; n < NUMBEROFCOMPOSITEAPSECTIONS;
+ n++) { /* n is column variable */
+ prefiltdata->INSTAT1_float[k] +=
+ ForTransform_CompositeAPFilterState[n] *
+ WebRtcIsac_kTransform1Float[k * NUMBEROFCHANNELAPSECTIONS + n];
+ prefiltdata->INSTAT2_float[k] +=
+ ForTransform_CompositeAPFilterState2[n] *
+ WebRtcIsac_kTransform2Float[k * NUMBEROFCHANNELAPSECTIONS + n];
+ }
+ }
+
+ /*obtain polyphase components by forward all-pass filtering through each
+ * channel */
+ /* the backward filtered samples are now forward filtered with the
+ * corresponding channel filters */
+ /* The all pass filtering automatically updates the filter states which are
+ exported in the prefiltdata structure */
+ WebRtcIsac_AllPassFilter2Float(tempin_ch1, WebRtcIsac_kUpperApFactorsFloat,
+ FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,
+ prefiltdata->INSTAT1_float);
+ WebRtcIsac_AllPassFilter2Float(tempin_ch2, WebRtcIsac_kLowerApFactorsFloat,
+ FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,
+ prefiltdata->INSTAT2_float);
+
+ /* Now Construct low-pass and high-pass signals as combinations of polyphase
+ * components */
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ LP[k] = 0.5f * (tempin_ch1[k] + tempin_ch2[k]); /* low pass signal*/
+ HP[k] = 0.5f * (tempin_ch1[k] - tempin_ch2[k]); /* high pass signal*/
+ }
+
+ /* Lookahead LP and HP signals */
+ /* now create low pass and high pass signals of the input vector. However, no
+ backwards filtering is performed, and hence no phase equalization is
+ involved. Also, the input contains some samples that are lookahead samples.
+ The high pass and low pass signals that are created are used outside this
+ function for analysis (not encoding) purposes */
+
+ /* set up input */
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tempin_ch1[k] = in[2 * k + 1];
+ tempin_ch2[k] = in[2 * k];
+ }
+
+ /* the input filter states are passed in and updated by the all-pass filtering
+ routine and exported in the prefiltdata structure*/
+ WebRtcIsac_AllPassFilter2Float(tempin_ch1, WebRtcIsac_kUpperApFactorsFloat,
+ FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,
+ prefiltdata->INSTATLA1_float);
+ WebRtcIsac_AllPassFilter2Float(tempin_ch2, WebRtcIsac_kLowerApFactorsFloat,
+ FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,
+ prefiltdata->INSTATLA2_float);
+
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ LP_la[k] = (float)(0.5f * (tempin_ch1[k] + tempin_ch2[k])); /*low pass */
+ HP_la[k] = (double)(0.5f * (tempin_ch1[k] - tempin_ch2[k])); /* high pass */
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.h
new file mode 100644
index 0000000000..1aecfc4046
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_VAD_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_VAD_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_InitPitchFilter(PitchFiltstr* pitchfiltdata);
+void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct* state);
+void WebRtcIsac_InitPreFilterbank(PreFiltBankstr* prefiltdata);
+
+double WebRtcIsac_LevDurb(double* a, double* k, double* r, size_t order);
+
+/* The number of all-pass filter factors in an upper or lower channel*/
+#define NUMBEROFCHANNELAPSECTIONS 2
+
+/* The upper channel all-pass filter factors */
+extern const float WebRtcIsac_kUpperApFactorsFloat[2];
+
+/* The lower channel all-pass filter factors */
+extern const float WebRtcIsac_kLowerApFactorsFloat[2];
+
+void WebRtcIsac_AllPassFilter2Float(float* InOut,
+ const float* APSectionFactors,
+ int lengthInOut,
+ int NumberOfSections,
+ float* FilterState);
+void WebRtcIsac_SplitAndFilterFloat(float* in,
+ float* LP,
+ float* HP,
+ double* LP_la,
+ double* HP_la,
+ PreFiltBankstr* prefiltdata);
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_VAD_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lattice.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lattice.c
new file mode 100644
index 0000000000..d9d2d65665
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lattice.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lattice.c
+ *
+ * contains the normalized lattice filter routines (MA and AR) for iSAC codec
+ *
+ */
+
+#include <math.h>
+#include <memory.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <stdlib.h>
+#endif
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+
+/* filter the signal using normalized lattice filter */
+/* MA filter */
+void WebRtcIsac_NormLatticeFilterMa(int orderCoef,
+ float *stateF,
+ float *stateG,
+ float *lat_in,
+ double *filtcoeflo,
+ double *lat_out)
+{
+ int n,k,i,u,temp1;
+ int ord_1 = orderCoef+1;
+ float sth[MAX_AR_MODEL_ORDER];
+ float cth[MAX_AR_MODEL_ORDER];
+ float inv_cth[MAX_AR_MODEL_ORDER];
+ double a[MAX_AR_MODEL_ORDER+1];
+ float f[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN], g[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
+ float gain1;
+
+ for (u=0;u<SUBFRAMES;u++)
+ {
+ /* set the Direct Form coefficients */
+ temp1 = u*ord_1;
+ a[0] = 1;
+ memcpy(a+1, filtcoeflo+temp1+1, sizeof(double) * (ord_1-1));
+
+ /* compute lattice filter coefficients */
+ WebRtcIsac_Dir2Lat(a,orderCoef,sth,cth);
+
+ /* compute the gain */
+ gain1 = (float)filtcoeflo[temp1];
+ for (k=0;k<orderCoef;k++)
+ {
+ gain1 *= cth[k];
+ inv_cth[k] = 1/cth[k];
+ }
+
+ /* normalized lattice filter */
+ /*****************************/
+
+ /* initial conditions */
+ for (i=0;i<HALF_SUBFRAMELEN;i++)
+ {
+ f[0][i] = lat_in[i + u * HALF_SUBFRAMELEN];
+ g[0][i] = lat_in[i + u * HALF_SUBFRAMELEN];
+ }
+
+ /* get the state of f&g for the first input, for all orders */
+ for (i=1;i<ord_1;i++)
+ {
+ f[i][0] = inv_cth[i-1]*(f[i-1][0] + sth[i-1]*stateG[i-1]);
+ g[i][0] = cth[i-1]*stateG[i-1] + sth[i-1]* f[i][0];
+ }
+
+ /* filtering */
+ for(k=0;k<orderCoef;k++)
+ {
+ for(n=0;n<(HALF_SUBFRAMELEN-1);n++)
+ {
+ f[k+1][n+1] = inv_cth[k]*(f[k][n+1] + sth[k]*g[k][n]);
+ g[k+1][n+1] = cth[k]*g[k][n] + sth[k]* f[k+1][n+1];
+ }
+ }
+
+ for(n=0;n<HALF_SUBFRAMELEN;n++)
+ {
+ lat_out[n + u * HALF_SUBFRAMELEN] = gain1 * f[orderCoef][n];
+ }
+
+ /* save the states */
+ for (i=0;i<ord_1;i++)
+ {
+ stateF[i] = f[i][HALF_SUBFRAMELEN-1];
+ stateG[i] = g[i][HALF_SUBFRAMELEN-1];
+ }
+ /* process next frame */
+ }
+
+ return;
+}
+
+
+/*///////////////////AR filter ///////////////////////////////*/
+/* filter the signal using normalized lattice filter */
+void WebRtcIsac_NormLatticeFilterAr(int orderCoef,
+ float *stateF,
+ float *stateG,
+ double *lat_in,
+ double *lo_filt_coef,
+ float *lat_out)
+{
+ int n,k,i,u,temp1;
+ int ord_1 = orderCoef+1;
+ float sth[MAX_AR_MODEL_ORDER];
+ float cth[MAX_AR_MODEL_ORDER];
+ double a[MAX_AR_MODEL_ORDER+1];
+ float ARf[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN], ARg[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
+ float gain1,inv_gain1;
+
+ for (u=0;u<SUBFRAMES;u++)
+ {
+ /* set the denominator and numerator of the Direct Form */
+ temp1 = u*ord_1;
+ a[0] = 1;
+
+ memcpy(a+1, lo_filt_coef+temp1+1, sizeof(double) * (ord_1-1));
+
+ WebRtcIsac_Dir2Lat(a,orderCoef,sth,cth);
+
+ gain1 = (float)lo_filt_coef[temp1];
+ for (k=0;k<orderCoef;k++)
+ {
+ gain1 = cth[k]*gain1;
+ }
+
+ /* initial conditions */
+ inv_gain1 = 1/gain1;
+ for (i=0;i<HALF_SUBFRAMELEN;i++)
+ {
+ ARf[orderCoef][i] = (float)lat_in[i + u * HALF_SUBFRAMELEN]*inv_gain1;
+ }
+
+
+ for (i=orderCoef-1;i>=0;i--) //get the state of f&g for the first input, for all orders
+ {
+ ARf[i][0] = cth[i]*ARf[i+1][0] - sth[i]*stateG[i];
+ ARg[i+1][0] = sth[i]*ARf[i+1][0] + cth[i]* stateG[i];
+ }
+ ARg[0][0] = ARf[0][0];
+
+ for(n=0;n<(HALF_SUBFRAMELEN-1);n++)
+ {
+ for(k=orderCoef-1;k>=0;k--)
+ {
+ ARf[k][n+1] = cth[k]*ARf[k+1][n+1] - sth[k]*ARg[k][n];
+ ARg[k+1][n+1] = sth[k]*ARf[k+1][n+1] + cth[k]* ARg[k][n];
+ }
+ ARg[0][n+1] = ARf[0][n+1];
+ }
+
+ memcpy(lat_out+u * HALF_SUBFRAMELEN, &(ARf[0][0]), sizeof(float) * HALF_SUBFRAMELEN);
+
+ /* cannot use memcpy in the following */
+ for (i=0;i<ord_1;i++)
+ {
+ stateF[i] = ARf[i][HALF_SUBFRAMELEN-1];
+ stateG[i] = ARg[i][HALF_SUBFRAMELEN-1];
+ }
+
+ }
+
+ return;
+}
+
+
+/* compute the reflection coefficients using the step-down procedure*/
+/* converts the direct form parameters to lattice form.*/
+/* a and b are vectors which contain the direct form coefficients,
+ according to
+ A(z) = a(1) + a(2)*z + a(3)*z^2 + ... + a(M+1)*z^M
+ B(z) = b(1) + b(2)*z + b(3)*z^2 + ... + b(M+1)*z^M
+*/
+
+void WebRtcIsac_Dir2Lat(double *a,
+ int orderCoef,
+ float *sth,
+ float *cth)
+{
+ int m, k;
+ float tmp[MAX_AR_MODEL_ORDER];
+ float tmp_inv, cth2;
+
+ sth[orderCoef-1] = (float)a[orderCoef];
+ cth2 = 1.0f - sth[orderCoef-1] * sth[orderCoef-1];
+ cth[orderCoef-1] = (float)sqrt(cth2);
+ for (m=orderCoef-1; m>0; m--)
+ {
+ tmp_inv = 1.0f / cth2;
+ for (k=1; k<=m; k++)
+ {
+ tmp[k] = ((float)a[k] - sth[m] * (float)a[m-k+1]) * tmp_inv;
+ }
+
+ for (k=1; k<m; k++)
+ {
+ a[k] = tmp[k];
+ }
+
+ sth[m-1] = tmp[m];
+ cth2 = 1 - sth[m-1] * sth[m-1];
+ cth[m-1] = (float)sqrt(cth2);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
new file mode 100644
index 0000000000..0fda73bda6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/filter_functions.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+
+/* window */
+/* Matlab generation code:
+ * t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
+ * for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
+ */
+static const double kLpcCorrWindow[WINLEN] = {
+ 0.00000000, 0.00000001, 0.00000004, 0.00000010, 0.00000020,
+ 0.00000035, 0.00000055, 0.00000083, 0.00000118, 0.00000163,
+ 0.00000218, 0.00000283, 0.00000361, 0.00000453, 0.00000558, 0.00000679,
+ 0.00000817, 0.00000973, 0.00001147, 0.00001342, 0.00001558,
+ 0.00001796, 0.00002058, 0.00002344, 0.00002657, 0.00002997,
+ 0.00003365, 0.00003762, 0.00004190, 0.00004651, 0.00005144, 0.00005673,
+ 0.00006236, 0.00006837, 0.00007476, 0.00008155, 0.00008875,
+ 0.00009636, 0.00010441, 0.00011290, 0.00012186, 0.00013128,
+ 0.00014119, 0.00015160, 0.00016252, 0.00017396, 0.00018594, 0.00019846,
+ 0.00021155, 0.00022521, 0.00023946, 0.00025432, 0.00026978,
+ 0.00028587, 0.00030260, 0.00031998, 0.00033802, 0.00035674,
+ 0.00037615, 0.00039626, 0.00041708, 0.00043863, 0.00046092, 0.00048396,
+ 0.00050775, 0.00053233, 0.00055768, 0.00058384, 0.00061080,
+ 0.00063858, 0.00066720, 0.00069665, 0.00072696, 0.00075813,
+ 0.00079017, 0.00082310, 0.00085692, 0.00089164, 0.00092728, 0.00096384,
+ 0.00100133, 0.00103976, 0.00107914, 0.00111947, 0.00116077,
+ 0.00120304, 0.00124630, 0.00129053, 0.00133577, 0.00138200,
+ 0.00142924, 0.00147749, 0.00152676, 0.00157705, 0.00162836, 0.00168070,
+ 0.00173408, 0.00178850, 0.00184395, 0.00190045, 0.00195799,
+ 0.00201658, 0.00207621, 0.00213688, 0.00219860, 0.00226137,
+ 0.00232518, 0.00239003, 0.00245591, 0.00252284, 0.00259079, 0.00265977,
+ 0.00272977, 0.00280078, 0.00287280, 0.00294582, 0.00301984,
+ 0.00309484, 0.00317081, 0.00324774, 0.00332563, 0.00340446,
+ 0.00348421, 0.00356488, 0.00364644, 0.00372889, 0.00381220, 0.00389636,
+ 0.00398135, 0.00406715, 0.00415374, 0.00424109, 0.00432920,
+ 0.00441802, 0.00450754, 0.00459773, 0.00468857, 0.00478001,
+ 0.00487205, 0.00496464, 0.00505775, 0.00515136, 0.00524542, 0.00533990,
+ 0.00543476, 0.00552997, 0.00562548, 0.00572125, 0.00581725,
+ 0.00591342, 0.00600973, 0.00610612, 0.00620254, 0.00629895,
+ 0.00639530, 0.00649153, 0.00658758, 0.00668341, 0.00677894, 0.00687413,
+ 0.00696891, 0.00706322, 0.00715699, 0.00725016, 0.00734266,
+ 0.00743441, 0.00752535, 0.00761540, 0.00770449, 0.00779254,
+ 0.00787947, 0.00796519, 0.00804963, 0.00813270, 0.00821431, 0.00829437,
+ 0.00837280, 0.00844949, 0.00852436, 0.00859730, 0.00866822,
+ 0.00873701, 0.00880358, 0.00886781, 0.00892960, 0.00898884,
+ 0.00904542, 0.00909923, 0.00915014, 0.00919805, 0.00924283, 0.00928436,
+ 0.00932252, 0.00935718, 0.00938821, 0.00941550, 0.00943890,
+ 0.00945828, 0.00947351, 0.00948446, 0.00949098, 0.00949294,
+ 0.00949020, 0.00948262, 0.00947005, 0.00945235, 0.00942938, 0.00940099,
+ 0.00936704, 0.00932738, 0.00928186, 0.00923034, 0.00917268,
+ 0.00910872, 0.00903832, 0.00896134, 0.00887763, 0.00878706,
+ 0.00868949, 0.00858478, 0.00847280, 0.00835343, 0.00822653, 0.00809199,
+ 0.00794970, 0.00779956, 0.00764145, 0.00747530, 0.00730103,
+ 0.00711857, 0.00692787, 0.00672888, 0.00652158, 0.00630597,
+ 0.00608208, 0.00584994, 0.00560962, 0.00536124, 0.00510493, 0.00484089,
+ 0.00456935, 0.00429062, 0.00400505, 0.00371310, 0.00341532,
+ 0.00311238, 0.00280511, 0.00249452, 0.00218184, 0.00186864,
+ 0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
+};
+
+static void WebRtcIsac_GetVars(const double* input,
+ const int16_t* pitchGains_Q12,
+ double* oldEnergy,
+ double* varscale) {
+ double nrg[4], chng, pg;
+ int k;
+
+ double pitchGains[4]={0,0,0,0};;
+
+ /* Calculate energies of first and second frame halfs */
+ nrg[0] = 0.0001;
+ for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES_QUARTER + QLOOKAHEAD) / 2; k++) {
+ nrg[0] += input[k]*input[k];
+ }
+ nrg[1] = 0.0001;
+ for ( ; k < (FRAMESAMPLES_HALF + QLOOKAHEAD) / 2; k++) {
+ nrg[1] += input[k]*input[k];
+ }
+ nrg[2] = 0.0001;
+ for ( ; k < (FRAMESAMPLES*3/4 + QLOOKAHEAD) / 2; k++) {
+ nrg[2] += input[k]*input[k];
+ }
+ nrg[3] = 0.0001;
+ for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
+ nrg[3] += input[k]*input[k];
+ }
+
+ /* Calculate average level change */
+ chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
+ fabs(10.0 * log10(nrg[2] / nrg[1])) +
+ fabs(10.0 * log10(nrg[1] / nrg[0])) +
+ fabs(10.0 * log10(nrg[0] / *oldEnergy)));
+
+
+ /* Find average pitch gain */
+ pg = 0.0;
+ for (k=0; k<4; k++)
+ {
+ pitchGains[k] = ((float)pitchGains_Q12[k])/4096;
+ pg += pitchGains[k];
+ }
+ pg *= 0.25;
+
+ /* If pitch gain is low and energy constant - increase noise level*/
+ /* Matlab code:
+ pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
+ */
+ *varscale = 0.0 + 1.0 * exp( -1.4 * exp(-200.0 * pg*pg*pg) / (1.0 + 0.4 * chng) );
+
+ *oldEnergy = nrg[3];
+}
+
+static void WebRtcIsac_GetVarsUB(const double* input,
+ double* oldEnergy,
+ double* varscale) {
+ double nrg[4], chng;
+ int k;
+
+ /* Calculate energies of first and second frame halfs */
+ nrg[0] = 0.0001;
+ for (k = 0; k < (FRAMESAMPLES_QUARTER) / 2; k++) {
+ nrg[0] += input[k]*input[k];
+ }
+ nrg[1] = 0.0001;
+ for ( ; k < (FRAMESAMPLES_HALF) / 2; k++) {
+ nrg[1] += input[k]*input[k];
+ }
+ nrg[2] = 0.0001;
+ for ( ; k < (FRAMESAMPLES*3/4) / 2; k++) {
+ nrg[2] += input[k]*input[k];
+ }
+ nrg[3] = 0.0001;
+ for ( ; k < (FRAMESAMPLES) / 2; k++) {
+ nrg[3] += input[k]*input[k];
+ }
+
+ /* Calculate average level change */
+ chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
+ fabs(10.0 * log10(nrg[2] / nrg[1])) +
+ fabs(10.0 * log10(nrg[1] / nrg[0])) +
+ fabs(10.0 * log10(nrg[0] / *oldEnergy)));
+
+
+ /* If pitch gain is low and energy constant - increase noise level*/
+ /* Matlab code:
+ pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
+ */
+ *varscale = exp( -1.4 / (1.0 + 0.4 * chng) );
+
+ *oldEnergy = nrg[3];
+}
+
+void WebRtcIsac_GetLpcCoefLb(double *inLo, double *inHi, MaskFiltstr *maskdata,
+ double signal_noise_ratio, const int16_t *pitchGains_Q12,
+ double *lo_coeff, double *hi_coeff)
+{
+ int k, n, j, pos1, pos2;
+ double varscale;
+
+ double DataLo[WINLEN], DataHi[WINLEN];
+ double corrlo[ORDERLO+2], corrlo2[ORDERLO+1];
+ double corrhi[ORDERHI+1];
+ double k_veclo[ORDERLO], k_vechi[ORDERHI];
+
+ double a_LO[ORDERLO+1], a_HI[ORDERHI+1];
+ double tmp, res_nrg;
+
+ double FwdA, FwdB;
+
+ /* hearing threshold level in dB; higher value gives more noise */
+ const double HearThresOffset = -28.0;
+
+ /* bandwdith expansion factors for low- and high band */
+ const double gammaLo = 0.9;
+ const double gammaHi = 0.8;
+
+ /* less-noise-at-low-frequencies factor */
+ double aa;
+
+
+ /* convert from dB to signal level */
+ const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
+ double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46; /* divide by sqrt(12) */
+
+ /* change quallevel depending on pitch gains and level fluctuations */
+ WebRtcIsac_GetVars(inLo, pitchGains_Q12, &(maskdata->OldEnergy), &varscale);
+
+ /* less-noise-at-low-frequencies factor */
+ aa = 0.35 * (0.5 + 0.5 * varscale);
+
+ /* replace data in buffer by new look-ahead data */
+ for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++)
+ maskdata->DataBufferLo[pos1 + WINLEN - QLOOKAHEAD] = inLo[pos1];
+
+ for (k = 0; k < SUBFRAMES; k++) {
+
+ /* Update input buffer and multiply signal with window */
+ for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
+ maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 + UPDATE/2];
+ maskdata->DataBufferHi[pos1] = maskdata->DataBufferHi[pos1 + UPDATE/2];
+ DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+ DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
+ }
+ pos2 = k * UPDATE/2;
+ for (n = 0; n < UPDATE/2; n++, pos1++) {
+ maskdata->DataBufferLo[pos1] = inLo[QLOOKAHEAD + pos2];
+ maskdata->DataBufferHi[pos1] = inHi[pos2++];
+ DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+ DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
+ }
+
+ /* Get correlation coefficients */
+ WebRtcIsac_AutoCorr(corrlo, DataLo, WINLEN, ORDERLO+1); /* computing autocorrelation */
+ WebRtcIsac_AutoCorr(corrhi, DataHi, WINLEN, ORDERHI);
+
+
+ /* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
+ corrlo2[0] = (1.0+aa*aa) * corrlo[0] - 2.0*aa * corrlo[1];
+ tmp = (1.0 + aa*aa);
+ for (n = 1; n <= ORDERLO; n++) {
+ corrlo2[n] = tmp * corrlo[n] - aa * (corrlo[n-1] + corrlo[n+1]);
+ }
+ tmp = (1.0+aa) * (1.0+aa);
+ for (n = 0; n <= ORDERHI; n++) {
+ corrhi[n] = tmp * corrhi[n];
+ }
+
+ /* add white noise floor */
+ corrlo2[0] += 1e-6;
+ corrhi[0] += 1e-6;
+
+
+ FwdA = 0.01;
+ FwdB = 0.01;
+
+ /* recursive filtering of correlation over subframes */
+ for (n = 0; n <= ORDERLO; n++) {
+ maskdata->CorrBufLo[n] = FwdA * maskdata->CorrBufLo[n] + corrlo2[n];
+ corrlo2[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufLo[n] + (1.0-FwdB) * corrlo2[n];
+ }
+ for (n = 0; n <= ORDERHI; n++) {
+ maskdata->CorrBufHi[n] = FwdA * maskdata->CorrBufHi[n] + corrhi[n];
+ corrhi[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufHi[n] + (1.0-FwdB) * corrhi[n];
+ }
+
+ /* compute prediction coefficients */
+ WebRtcIsac_LevDurb(a_LO, k_veclo, corrlo2, ORDERLO);
+ WebRtcIsac_LevDurb(a_HI, k_vechi, corrhi, ORDERHI);
+
+ /* bandwidth expansion */
+ tmp = gammaLo;
+ for (n = 1; n <= ORDERLO; n++) {
+ a_LO[n] *= tmp;
+ tmp *= gammaLo;
+ }
+
+ /* residual energy */
+ res_nrg = 0.0;
+ for (j = 0; j <= ORDERLO; j++) {
+ for (n = 0; n <= j; n++) {
+ res_nrg += a_LO[j] * corrlo2[j-n] * a_LO[n];
+ }
+ for (n = j+1; n <= ORDERLO; n++) {
+ res_nrg += a_LO[j] * corrlo2[n-j] * a_LO[n];
+ }
+ }
+
+ /* add hearing threshold and compute the gain */
+ *lo_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
+
+ /* copy coefficients to output array */
+ for (n = 1; n <= ORDERLO; n++) {
+ *lo_coeff++ = a_LO[n];
+ }
+
+
+ /* bandwidth expansion */
+ tmp = gammaHi;
+ for (n = 1; n <= ORDERHI; n++) {
+ a_HI[n] *= tmp;
+ tmp *= gammaHi;
+ }
+
+ /* residual energy */
+ res_nrg = 0.0;
+ for (j = 0; j <= ORDERHI; j++) {
+ for (n = 0; n <= j; n++) {
+ res_nrg += a_HI[j] * corrhi[j-n] * a_HI[n];
+ }
+ for (n = j+1; n <= ORDERHI; n++) {
+ res_nrg += a_HI[j] * corrhi[n-j] * a_HI[n];
+ }
+ }
+
+ /* add hearing threshold and compute of the gain */
+ *hi_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
+
+ /* copy coefficients to output array */
+ for (n = 1; n <= ORDERHI; n++) {
+ *hi_coeff++ = a_HI[n];
+ }
+ }
+}
+
+
+
+/******************************************************************************
+ * WebRtcIsac_GetLpcCoefUb()
+ *
+ * Compute LP coefficients and correlation coefficients. At 12 kHz LP
+ * coefficients of the first and the last sub-frame is computed. At 16 kHz
+ * LP coefficients of 4th, 8th and 12th sub-frames are computed. We always
+ * compute correlation coefficients of all sub-frames.
+ *
+ * Inputs:
+ * -inSignal : Input signal
+ * -maskdata : a structure keeping signal from previous frame.
+ * -bandwidth : specifies if the codec is in 0-16 kHz mode or
+ * 0-12 kHz mode.
+ *
+ * Outputs:
+ * -lpCoeff : pointer to a buffer where A-polynomials are
+ * written to (first coeff is 1 and it is not
+ * written)
+ * -corrMat : a matrix where correlation coefficients of each
+ * sub-frame are written to one row.
+ * -varscale : a scale used to compute LPC gains.
+ */
+void
+WebRtcIsac_GetLpcCoefUb(
+ double* inSignal,
+ MaskFiltstr* maskdata,
+ double* lpCoeff,
+ double corrMat[][UB_LPC_ORDER + 1],
+ double* varscale,
+ int16_t bandwidth)
+{
+ int frameCntr, activeFrameCntr, n, pos1, pos2;
+ int16_t criterion1;
+ int16_t criterion2;
+ int16_t numSubFrames = SUBFRAMES * (1 + (bandwidth == isac16kHz));
+ double data[WINLEN];
+ double corrSubFrame[UB_LPC_ORDER+2];
+ double reflecCoeff[UB_LPC_ORDER];
+
+ double aPolynom[UB_LPC_ORDER+1];
+ double tmp;
+
+ /* bandwdith expansion factors */
+ const double gamma = 0.9;
+
+ /* change quallevel depending on pitch gains and level fluctuations */
+ WebRtcIsac_GetVarsUB(inSignal, &(maskdata->OldEnergy), varscale);
+
+ /* replace data in buffer by new look-ahead data */
+ for(frameCntr = 0, activeFrameCntr = 0; frameCntr < numSubFrames;
+ frameCntr++)
+ {
+ if(frameCntr == SUBFRAMES)
+ {
+ // we are in 16 kHz
+ varscale++;
+ WebRtcIsac_GetVarsUB(&inSignal[FRAMESAMPLES_HALF],
+ &(maskdata->OldEnergy), varscale);
+ }
+ /* Update input buffer and multiply signal with window */
+ for(pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++)
+ {
+ maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 +
+ UPDATE/2];
+ data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+ }
+ pos2 = frameCntr * UPDATE/2;
+ for(n = 0; n < UPDATE/2; n++, pos1++, pos2++)
+ {
+ maskdata->DataBufferLo[pos1] = inSignal[pos2];
+ data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+ }
+
+ /* Get correlation coefficients */
+ /* computing autocorrelation */
+ WebRtcIsac_AutoCorr(corrSubFrame, data, WINLEN, UB_LPC_ORDER+1);
+ memcpy(corrMat[frameCntr], corrSubFrame,
+ (UB_LPC_ORDER+1)*sizeof(double));
+
+ criterion1 = ((frameCntr == 0) || (frameCntr == (SUBFRAMES - 1))) &&
+ (bandwidth == isac12kHz);
+ criterion2 = (((frameCntr+1) % 4) == 0) &&
+ (bandwidth == isac16kHz);
+ if(criterion1 || criterion2)
+ {
+ /* add noise */
+ corrSubFrame[0] += 1e-6;
+ /* compute prediction coefficients */
+ WebRtcIsac_LevDurb(aPolynom, reflecCoeff, corrSubFrame,
+ UB_LPC_ORDER);
+
+ /* bandwidth expansion */
+ tmp = gamma;
+ for (n = 1; n <= UB_LPC_ORDER; n++)
+ {
+ *lpCoeff++ = aPolynom[n] * tmp;
+ tmp *= gamma;
+ }
+ activeFrameCntr++;
+ }
+ }
+}
+
+
+
+/******************************************************************************
+ * WebRtcIsac_GetLpcGain()
+ *
+ * Compute the LPC gains for each sub-frame, given the LPC of each sub-frame
+ * and the corresponding correlation coefficients.
+ *
+ * Inputs:
+ * -signal_noise_ratio : the desired SNR in dB.
+ * -numVecs : number of sub-frames
+ * -corrMat : a matrix of correlation coefficients where
+ * each row is a set of correlation coefficients of
+ * one sub-frame.
+ * -varscale : a scale computed when WebRtcIsac_GetLpcCoefUb()
+ * is called.
+ *
+ * Outputs:
+ * -gain : pointer to a buffer where LP gains are written.
+ *
+ */
+void
+WebRtcIsac_GetLpcGain(
+ double signal_noise_ratio,
+ const double* filtCoeffVecs,
+ int numVecs,
+ double* gain,
+ double corrMat[][UB_LPC_ORDER + 1],
+ const double* varscale)
+{
+ int16_t j, n;
+ int16_t subFrameCntr;
+ double aPolynom[ORDERLO + 1];
+ double res_nrg;
+
+ const double HearThresOffset = -28.0;
+ const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
+ /* divide by sqrt(12) = 3.46 */
+ const double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46;
+
+ aPolynom[0] = 1;
+ for(subFrameCntr = 0; subFrameCntr < numVecs; subFrameCntr++)
+ {
+ if(subFrameCntr == SUBFRAMES)
+ {
+ // we are in second half of a SWB frame. use new varscale
+ varscale++;
+ }
+ memcpy(&aPolynom[1], &filtCoeffVecs[(subFrameCntr * (UB_LPC_ORDER + 1)) +
+ 1], sizeof(double) * UB_LPC_ORDER);
+
+ /* residual energy */
+ res_nrg = 0.0;
+ for(j = 0; j <= UB_LPC_ORDER; j++)
+ {
+ for(n = 0; n <= j; n++)
+ {
+ res_nrg += aPolynom[j] * corrMat[subFrameCntr][j-n] *
+ aPolynom[n];
+ }
+ for(n = j+1; n <= UB_LPC_ORDER; n++)
+ {
+ res_nrg += aPolynom[j] * corrMat[subFrameCntr][n-j] *
+ aPolynom[n];
+ }
+ }
+
+ /* add hearing threshold and compute the gain */
+ gain[subFrameCntr] = S_N_R / (sqrt(res_nrg) / *varscale + H_T_H);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
new file mode 100644
index 0000000000..5503e2d49b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_analysis.h
+ *
+ * LPC functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_GetLpcCoefLb(double* inLo,
+ double* inHi,
+ MaskFiltstr* maskdata,
+ double signal_noise_ratio,
+ const int16_t* pitchGains_Q12,
+ double* lo_coeff,
+ double* hi_coeff);
+
+void WebRtcIsac_GetLpcGain(double signal_noise_ratio,
+ const double* filtCoeffVecs,
+ int numVecs,
+ double* gain,
+ double corrLo[][UB_LPC_ORDER + 1],
+ const double* varscale);
+
+void WebRtcIsac_GetLpcCoefUb(double* inSignal,
+ MaskFiltstr* maskdata,
+ double* lpCoeff,
+ double corr[][UB_LPC_ORDER + 1],
+ double* varscale,
+ int16_t bandwidth);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYIS_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c
new file mode 100644
index 0000000000..670754065f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB_KLT_Tables_LPCGain.c
+ *
+ * This file defines tables used for entropy coding of LPC Gain
+ * of upper-band.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+const double WebRtcIsac_kQSizeLpcGain = 0.100000;
+
+const double WebRtcIsac_kMeanLpcGain = -3.3822;
+
+/*
+* The smallest reconstruction points for quantiztion of
+* LPC gains.
+*/
+const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES] =
+{
+ -0.800000, -1.000000, -1.200000, -2.200000, -3.000000, -12.700000
+};
+
+/*
+* Number of reconstruction points of quantizers for LPC Gains.
+*/
+const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES] =
+{
+ 17, 20, 25, 45, 77, 170
+};
+/*
+* Starting index for entropy decoder to search for the right interval,
+* one entry per LAR coefficient
+*/
+const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES] =
+{
+ 8, 10, 12, 22, 38, 85
+};
+
+/*
+* The following 6 vectors define CDF of 6 decorrelated LPC
+* gains.
+*/
+const uint16_t WebRtcIsac_kLpcGainCdfVec0[18] =
+{
+ 0, 10, 27, 83, 234, 568, 1601, 4683, 16830, 57534, 63437,
+ 64767, 65229, 65408, 65483, 65514, 65527, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec1[21] =
+{
+ 0, 15, 33, 84, 185, 385, 807, 1619, 3529, 7850, 19488,
+ 51365, 62437, 64548, 65088, 65304, 65409, 65484, 65507, 65522, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec2[26] =
+{
+ 0, 15, 29, 54, 89, 145, 228, 380, 652, 1493, 4260,
+ 12359, 34133, 50749, 57224, 60814, 62927, 64078, 64742, 65103, 65311, 65418,
+ 65473, 65509, 65521, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec3[46] =
+{
+ 0, 8, 12, 16, 26, 42, 56, 76, 111, 164, 247,
+ 366, 508, 693, 1000, 1442, 2155, 3188, 4854, 7387, 11249, 17617,
+ 30079, 46711, 56291, 60127, 62140, 63258, 63954, 64384, 64690, 64891, 65031,
+ 65139, 65227, 65293, 65351, 65399, 65438, 65467, 65492, 65504, 65510, 65518,
+ 65523, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec4[78] =
+{
+ 0, 17, 29, 39, 51, 70, 104, 154, 234, 324, 443,
+ 590, 760, 971, 1202, 1494, 1845, 2274, 2797, 3366, 4088, 4905,
+ 5899, 7142, 8683, 10625, 12983, 16095, 20637, 28216, 38859, 47237, 51537,
+ 54150, 56066, 57583, 58756, 59685, 60458, 61103, 61659, 62144, 62550, 62886,
+ 63186, 63480, 63743, 63954, 64148, 64320, 64467, 64600, 64719, 64837, 64939,
+ 65014, 65098, 65160, 65211, 65250, 65290, 65325, 65344, 65366, 65391, 65410,
+ 65430, 65447, 65460, 65474, 65487, 65494, 65501, 65509, 65513, 65518, 65520,
+ 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec5[171] =
+{
+ 0, 10, 12, 14, 16, 18, 23, 29, 35, 42, 51,
+ 58, 65, 72, 78, 87, 96, 103, 111, 122, 134, 150,
+ 167, 184, 202, 223, 244, 265, 289, 315, 346, 379, 414,
+ 450, 491, 532, 572, 613, 656, 700, 751, 802, 853, 905,
+ 957, 1021, 1098, 1174, 1250, 1331, 1413, 1490, 1565, 1647, 1730,
+ 1821, 1913, 2004, 2100, 2207, 2314, 2420, 2532, 2652, 2783, 2921,
+ 3056, 3189, 3327, 3468, 3640, 3817, 3993, 4171, 4362, 4554, 4751,
+ 4948, 5142, 5346, 5566, 5799, 6044, 6301, 6565, 6852, 7150, 7470,
+ 7797, 8143, 8492, 8835, 9181, 9547, 9919, 10315, 10718, 11136, 11566,
+ 12015, 12482, 12967, 13458, 13953, 14432, 14903, 15416, 15936, 16452, 16967,
+ 17492, 18024, 18600, 19173, 19736, 20311, 20911, 21490, 22041, 22597, 23157,
+ 23768, 24405, 25034, 25660, 26280, 26899, 27614, 28331, 29015, 29702, 30403,
+ 31107, 31817, 32566, 33381, 34224, 35099, 36112, 37222, 38375, 39549, 40801,
+ 42074, 43350, 44626, 45982, 47354, 48860, 50361, 51845, 53312, 54739, 56026,
+ 57116, 58104, 58996, 59842, 60658, 61488, 62324, 63057, 63769, 64285, 64779,
+ 65076, 65344, 65430, 65500, 65517, 65535
+};
+
+/*
+* An array of pointers to CDFs of decorrelated LPC Gains
+*/
+const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES] =
+{
+ WebRtcIsac_kLpcGainCdfVec0, WebRtcIsac_kLpcGainCdfVec1,
+ WebRtcIsac_kLpcGainCdfVec2, WebRtcIsac_kLpcGainCdfVec3,
+ WebRtcIsac_kLpcGainCdfVec4, WebRtcIsac_kLpcGainCdfVec5
+};
+
+/*
+* A matrix to decorrellate LPC gains of subframes.
+*/
+const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES] =
+{
+ {-0.150860, 0.327872, 0.367220, 0.504613, 0.559270, 0.409234},
+ { 0.457128, -0.613591, -0.289283, -0.029734, 0.393760, 0.418240},
+ {-0.626043, 0.136489, -0.439118, -0.448323, 0.135987, 0.420869},
+ { 0.526617, 0.480187, 0.242552, -0.488754, -0.158713, 0.411331},
+ {-0.302587, -0.494953, 0.588112, -0.063035, -0.404290, 0.387510},
+ { 0.086378, 0.147714, -0.428875, 0.548300, -0.570121, 0.401391}
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h
new file mode 100644
index 0000000000..39c4a24ef4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB_KLT_Tables_LPCGain.h
+ *
+ * This file declares tables used for entropy coding of LPC Gain
+ * of upper-band.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+extern const double WebRtcIsac_kQSizeLpcGain;
+
+extern const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES];
+
+extern const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES];
+
+extern const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec0[18];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec1[21];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec2[26];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec3[46];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec4[78];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec5[171];
+
+extern const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES];
+
+extern const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES];
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c
new file mode 100644
index 0000000000..e3600a7fab
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB_KLT_Tables.c
+ *
+ * This file defines tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 12 kHz.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/*
+* Mean value of LAR
+*/
+const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER] =
+{
+ 0.03748928306641, 0.09453441192543, -0.01112522344398, 0.03800237516842
+};
+
+/*
+* A rotation matrix to decorrelate intra-vector correlation,
+* i.e. correlation among components of LAR vector.
+*/
+const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER][UB_LPC_ORDER] =
+{
+ {-0.00075365493856, -0.05809964887743, -0.23397966154116, 0.97050367376411},
+ { 0.00625021257734, -0.17299965610679, 0.95977735920651, 0.22104179375008},
+ { 0.20543384258374, -0.96202143495696, -0.15301870801552, -0.09432375099565},
+ {-0.97865075648479, -0.20300322280841, -0.02581111653779, -0.01913568980258}
+};
+
+/*
+* A rotation matrix to remove correlation among LAR coefficients
+* of different LAR vectors. One might guess that decorrelation matrix
+* for the first component should differ from the second component
+* but we haven't observed a significant benefit of having different
+* decorrelation matrices for different components.
+*/
+const double WebRtcIsac_kInterVecDecorrMatUb12
+[UB_LPC_VEC_PER_FRAME][UB_LPC_VEC_PER_FRAME] =
+{
+ { 0.70650597970460, -0.70770707262373},
+ {-0.70770707262373, -0.70650597970460}
+};
+
+/*
+* LAR quantization step-size.
+*/
+const double WebRtcIsac_kLpcShapeQStepSizeUb12 = 0.150000;
+
+/*
+* The smallest reconstruction points for quantiztion of LAR coefficients.
+*/
+const double WebRtcIsac_kLpcShapeLeftRecPointUb12
+[UB_LPC_ORDER*UB_LPC_VEC_PER_FRAME] =
+{
+ -0.900000, -1.050000, -1.350000, -1.800000, -1.350000, -1.650000,
+ -2.250000, -3.450000
+};
+
+/*
+* Number of reconstruction points of quantizers for LAR coefficients.
+*/
+const int16_t WebRtcIsac_kLpcShapeNumRecPointUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
+{
+ 13, 15, 19, 27, 19, 24, 32, 48
+};
+
+/*
+* Starting index for entropy decoder to search for the right interval,
+* one entry per LAR coefficient
+*/
+const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
+{
+ 6, 7, 9, 13, 9, 12, 16, 24
+};
+
+/*
+* The following 8 vectors define CDF of 8 decorrelated LAR
+* coefficients.
+*/
+const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14] =
+{
+ 0, 13, 95, 418, 1687, 6498, 21317, 44200, 59029, 63849, 65147,
+ 65449, 65525, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16] =
+{
+ 0, 10, 59, 255, 858, 2667, 8200, 22609, 42988, 57202, 62947,
+ 64743, 65308, 65476, 65522, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20] =
+{
+ 0, 18, 40, 118, 332, 857, 2017, 4822, 11321, 24330, 41279,
+ 54342, 60637, 63394, 64659, 65184, 65398, 65482, 65518, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28] =
+{
+ 0, 21, 38, 90, 196, 398, 770, 1400, 2589, 4650, 8211,
+ 14933, 26044, 39592, 50814, 57452, 60971, 62884, 63995, 64621, 65019, 65273,
+ 65410, 65480, 65514, 65522, 65531, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20] =
+{
+ 0, 7, 46, 141, 403, 969, 2132, 4649, 10633, 24902, 43254,
+ 54665, 59928, 62674, 64173, 64938, 65293, 65464, 65523, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25] =
+{
+ 0, 7, 22, 72, 174, 411, 854, 1737, 3545, 6774, 13165,
+ 25221, 40980, 52821, 58714, 61706, 63472, 64437, 64989, 65287, 65430, 65503,
+ 65525, 65529, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33] =
+{
+ 0, 11, 21, 36, 65, 128, 228, 401, 707, 1241, 2126,
+ 3589, 6060, 10517, 18853, 31114, 42477, 49770, 54271, 57467, 59838, 61569,
+ 62831, 63772, 64433, 64833, 65123, 65306, 65419, 65466, 65499, 65519, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49] =
+{
+ 0, 14, 34, 67, 107, 167, 245, 326, 449, 645, 861,
+ 1155, 1508, 2003, 2669, 3544, 4592, 5961, 7583, 9887, 13256, 18765,
+ 26519, 34077, 40034, 44349, 47795, 50663, 53262, 55473, 57458, 59122, 60592,
+ 61742, 62690, 63391, 63997, 64463, 64794, 65045, 65207, 65309, 65394, 65443,
+ 65478, 65504, 65514, 65523, 65535
+};
+
+/*
+* An array of pointers to CDFs of decorrelated LARs
+*/
+const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
+{
+ WebRtcIsac_kLpcShapeCdfVec0Ub12, WebRtcIsac_kLpcShapeCdfVec1Ub12,
+ WebRtcIsac_kLpcShapeCdfVec2Ub12, WebRtcIsac_kLpcShapeCdfVec3Ub12,
+ WebRtcIsac_kLpcShapeCdfVec4Ub12, WebRtcIsac_kLpcShapeCdfVec5Ub12,
+ WebRtcIsac_kLpcShapeCdfVec6Ub12, WebRtcIsac_kLpcShapeCdfVec7Ub12
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h
new file mode 100644
index 0000000000..7448a1e76b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_shape_swb12_tables.h
+ *
+ * This file declares tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 12 kHz.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+extern const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kMeanLpcGain;
+
+extern const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER]
+ [UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kInterVecDecorrMatUb12[UB_LPC_VEC_PER_FRAME]
+ [UB_LPC_VEC_PER_FRAME];
+
+extern const double WebRtcIsac_kLpcShapeQStepSizeUb12;
+
+extern const double
+ WebRtcIsac_kLpcShapeLeftRecPointUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+extern const int16_t
+ WebRtcIsac_kLpcShapeNumRecPointUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+extern const uint16_t
+ WebRtcIsac_kLpcShapeEntropySearchUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49];
+
+extern const uint16_t*
+ WebRtcIsac_kLpcShapeCdfMatUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c
new file mode 100644
index 0000000000..59617fd274
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB16_KLT_Tables.c
+ *
+ * This file defines tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 16 kHz.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/*
+* Mean value of LAR
+*/
+const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER] =
+{
+0.454978, 0.364747, 0.102999, 0.104523
+};
+
+/*
+* A rotation matrix to decorrelate intra-vector correlation,
+* i.e. correlation among components of LAR vector.
+*/
+const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER][UB_LPC_ORDER] =
+{
+ {-0.020528, -0.085858, -0.002431, 0.996093},
+ {-0.033155, 0.036102, 0.998786, 0.004866},
+ { 0.202627, 0.974853, -0.028940, 0.088132},
+ {-0.978479, 0.202454, -0.039785, -0.002811}
+};
+
+/*
+* A rotation matrix to remove correlation among LAR coefficients
+* of different LAR vectors. One might guess that decorrelation matrix
+* for the first component should differ from the second component
+* but we haven't observed a significant benefit of having different
+* decorrelation matrices for different components.
+*/
+const double WebRtcIsac_kInterVecDecorrMatUb16
+[UB16_LPC_VEC_PER_FRAME][UB16_LPC_VEC_PER_FRAME] =
+{
+ { 0.291675, -0.515786, 0.644927, 0.482658},
+ {-0.647220, 0.479712, 0.289556, 0.516856},
+ { 0.643084, 0.485489, -0.289307, 0.516763},
+ {-0.287185, -0.517823, -0.645389, 0.482553}
+};
+
+/*
+* The following 16 vectors define CDF of 16 decorrelated LAR
+* coefficients.
+*/
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14] =
+{
+ 0, 2, 20, 159, 1034, 5688, 20892, 44653,
+ 59849, 64485, 65383, 65518, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16] =
+{
+ 0, 1, 7, 43, 276, 1496, 6681, 21653,
+ 43891, 58859, 64022, 65248, 65489, 65529, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18] =
+{
+ 0, 1, 9, 54, 238, 933, 3192, 9461,
+ 23226, 42146, 56138, 62413, 64623, 65300, 65473, 65521,
+ 65533, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30] =
+{
+ 0, 2, 4, 8, 17, 36, 75, 155,
+ 329, 683, 1376, 2662, 5047, 9508, 17526, 29027,
+ 40363, 48997, 55096, 59180, 61789, 63407, 64400, 64967,
+ 65273, 65429, 65497, 65526, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16] =
+{
+ 0, 1, 10, 63, 361, 1785, 7407, 22242,
+ 43337, 58125, 63729, 65181, 65472, 65527, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17] =
+{
+ 0, 1, 7, 29, 134, 599, 2443, 8590,
+ 22962, 42635, 56911, 63060, 64940, 65408, 65513, 65531,
+ 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21] =
+{
+ 0, 1, 5, 16, 57, 191, 611, 1808,
+ 4847, 11755, 24612, 40910, 53789, 60698, 63729, 64924,
+ 65346, 65486, 65523, 65532, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36] =
+{
+ 0, 1, 4, 12, 25, 55, 104, 184,
+ 314, 539, 926, 1550, 2479, 3861, 5892, 8845,
+ 13281, 20018, 29019, 38029, 45581, 51557, 56057, 59284,
+ 61517, 63047, 64030, 64648, 65031, 65261, 65402, 65480,
+ 65518, 65530, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21] =
+{
+ 0, 1, 2, 7, 26, 103, 351, 1149,
+ 3583, 10204, 23846, 41711, 55361, 61917, 64382, 65186,
+ 65433, 65506, 65528, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21] =
+{
+ 0, 6, 19, 63, 205, 638, 1799, 4784,
+ 11721, 24494, 40803, 53805, 60886, 63822, 64931, 65333,
+ 65472, 65517, 65530, 65533, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28] =
+{
+ 0, 1, 3, 11, 31, 86, 221, 506,
+ 1101, 2296, 4486, 8477, 15356, 26079, 38941, 49952,
+ 57165, 61257, 63426, 64549, 65097, 65351, 65463, 65510,
+ 65526, 65532, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55] =
+{
+ 0, 3, 12, 23, 42, 65, 89, 115,
+ 150, 195, 248, 327, 430, 580, 784, 1099,
+ 1586, 2358, 3651, 5899, 9568, 14312, 19158, 23776,
+ 28267, 32663, 36991, 41153, 45098, 48680, 51870, 54729,
+ 57141, 59158, 60772, 62029, 63000, 63761, 64322, 64728,
+ 65000, 65192, 65321, 65411, 65463, 65496, 65514, 65523,
+ 65527, 65529, 65531, 65532, 65533, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26] =
+{
+ 0, 2, 4, 10, 21, 48, 114, 280,
+ 701, 1765, 4555, 11270, 24267, 41213, 54285, 61003,
+ 63767, 64840, 65254, 65421, 65489, 65514, 65526, 65532,
+ 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28] =
+{
+ 0, 1, 3, 6, 15, 36, 82, 196,
+ 453, 1087, 2557, 5923, 13016, 25366, 40449, 52582,
+ 59539, 62896, 64389, 65033, 65316, 65442, 65494, 65519,
+ 65529, 65533, 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34] =
+{
+ 0, 2, 4, 8, 18, 35, 73, 146,
+ 279, 524, 980, 1789, 3235, 5784, 10040, 16998,
+ 27070, 38543, 48499, 55421, 59712, 62257, 63748, 64591,
+ 65041, 65278, 65410, 65474, 65508, 65522, 65530, 65533,
+ 65534, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71] =
+{
+ 0, 1, 2, 6, 13, 26, 55, 92,
+ 141, 191, 242, 296, 355, 429, 522, 636,
+ 777, 947, 1162, 1428, 1753, 2137, 2605, 3140,
+ 3743, 4409, 5164, 6016, 6982, 8118, 9451, 10993,
+ 12754, 14810, 17130, 19780, 22864, 26424, 30547, 35222,
+ 40140, 44716, 48698, 52056, 54850, 57162, 59068, 60643,
+ 61877, 62827, 63561, 64113, 64519, 64807, 65019, 65167,
+ 65272, 65343, 65399, 65440, 65471, 65487, 65500, 65509,
+ 65518, 65524, 65527, 65531, 65533, 65534, 65535
+};
+
+/*
+* An array of pointers to CDFs of decorrelated LARs
+*/
+const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] = {
+ WebRtcIsac_kLpcShapeCdfVec01Ub16,
+ WebRtcIsac_kLpcShapeCdfVec1Ub16,
+ WebRtcIsac_kLpcShapeCdfVec2Ub16,
+ WebRtcIsac_kLpcShapeCdfVec3Ub16,
+ WebRtcIsac_kLpcShapeCdfVec4Ub16,
+ WebRtcIsac_kLpcShapeCdfVec5Ub16,
+ WebRtcIsac_kLpcShapeCdfVec6Ub16,
+ WebRtcIsac_kLpcShapeCdfVec7Ub16,
+ WebRtcIsac_kLpcShapeCdfVec8Ub16,
+ WebRtcIsac_kLpcShapeCdfVec01Ub160,
+ WebRtcIsac_kLpcShapeCdfVec01Ub161,
+ WebRtcIsac_kLpcShapeCdfVec01Ub162,
+ WebRtcIsac_kLpcShapeCdfVec01Ub163,
+ WebRtcIsac_kLpcShapeCdfVec01Ub164,
+ WebRtcIsac_kLpcShapeCdfVec01Ub165,
+ WebRtcIsac_kLpcShapeCdfVec01Ub166
+};
+
+/*
+* The smallest reconstruction points for quantiztion of LAR coefficients.
+*/
+const double WebRtcIsac_kLpcShapeLeftRecPointUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
+{
+ -0.8250, -0.9750, -1.1250, -2.1750, -0.9750, -1.1250, -1.4250,
+ -2.6250, -1.4250, -1.2750, -1.8750, -3.6750, -1.7250, -1.8750,
+ -2.3250, -5.4750
+};
+
+/*
+* Number of reconstruction points of quantizers for LAR coefficients.
+*/
+const int16_t WebRtcIsac_kLpcShapeNumRecPointUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
+{
+ 13, 15, 17, 29, 15, 16, 20, 35, 20,
+ 20, 27, 54, 25, 27, 33, 70
+};
+
+/*
+* Starting index for entropy decoder to search for the right interval,
+* one entry per LAR coefficient
+*/
+const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
+{
+ 6, 7, 8, 14, 7, 8, 10, 17, 10,
+ 10, 13, 27, 12, 13, 16, 35
+};
+
+/*
+* LAR quantization step-size.
+*/
+const double WebRtcIsac_kLpcShapeQStepSizeUb16 = 0.150000;
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h
new file mode 100644
index 0000000000..51101db936
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_shape_swb16_tables.h
+ *
+ * This file declares tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 16 kHz.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
+
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+extern const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER]
+ [UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kInterVecDecorrMatUb16[UB16_LPC_VEC_PER_FRAME]
+ [UB16_LPC_VEC_PER_FRAME];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71];
+
+extern const uint16_t*
+ WebRtcIsac_kLpcShapeCdfMatUb16[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const double
+ WebRtcIsac_kLpcShapeLeftRecPointUb16[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const int16_t
+ WebRtcIsac_kLpcShapeNumRecPointUb16[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const uint16_t
+ WebRtcIsac_kLpcShapeEntropySearchUb16[UB_LPC_ORDER *
+ UB16_LPC_VEC_PER_FRAME];
+
+extern const double WebRtcIsac_kLpcShapeQStepSizeUb16;
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.c
new file mode 100644
index 0000000000..461b92eb8a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* coding tables for the KLT coefficients */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* cdf array for model indicator */
+const uint16_t WebRtcIsac_kQKltModelCdf[4] = {
+ 0, 15434, 37548, 65535 };
+
+/* pointer to cdf array for model indicator */
+const uint16_t *WebRtcIsac_kQKltModelCdfPtr[1] = {
+ WebRtcIsac_kQKltModelCdf };
+
+/* initial cdf index for decoder of model indicator */
+const uint16_t WebRtcIsac_kQKltModelInitIndex[1] = { 1 };
+
+/* offset to go from rounded value to quantization index */
+const short WebRtcIsac_kQKltQuantMinGain[12] = {
+ 3, 6, 4, 6, 6, 9, 5, 16, 11, 34, 32, 47 };
+
+
+const short WebRtcIsac_kQKltQuantMinShape[108] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 2, 2, 2, 3, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 2, 2, 3, 0, 0, 0, 0,
+ 1, 0, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 4, 3, 5, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 2, 1, 2, 2, 3, 4,
+ 4, 7, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 2, 3, 2, 3, 4, 4, 5, 7, 13,
+ 0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
+ 5, 6, 7, 11, 9, 13, 12, 26 };
+
+/* maximum quantization index */
+const uint16_t WebRtcIsac_kQKltMaxIndGain[12] = {
+ 6, 12, 8, 14, 10, 19, 12, 31, 22, 56, 52, 138 };
+
+const uint16_t WebRtcIsac_kQKltMaxIndShape[108] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 2, 2, 2, 2, 4, 4, 5, 6, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 1, 2, 2,
+ 2, 2, 3, 4, 5, 7, 0, 0, 0, 0,
+ 2, 0, 2, 2, 2, 2, 3, 2, 2, 4,
+ 4, 6, 6, 9, 0, 0, 0, 0, 2, 2,
+ 2, 2, 2, 2, 3, 2, 4, 4, 7, 7,
+ 9, 13, 0, 0, 2, 2, 2, 2, 2, 2,
+ 3, 4, 5, 4, 6, 8, 8, 10, 16, 25,
+ 0, 2, 2, 4, 5, 4, 4, 4, 7, 8,
+ 9, 10, 13, 19, 17, 23, 25, 49 };
+
+/* index offset */
+const uint16_t WebRtcIsac_kQKltOffsetGain[12] = {
+ 0, 7, 20, 29, 44, 55, 75, 88, 120, 143, 200, 253 };
+
+const uint16_t WebRtcIsac_kQKltOffsetShape[108] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 11, 14, 17, 20, 23, 28, 33, 39, 46, 47,
+ 48, 49, 50, 52, 53, 54, 55, 56, 58, 61,
+ 64, 67, 70, 74, 79, 85, 93, 94, 95, 96,
+ 97, 100, 101, 104, 107, 110, 113, 117, 120, 123,
+ 128, 133, 140, 147, 157, 158, 159, 160, 161, 164,
+ 167, 170, 173, 176, 179, 183, 186, 191, 196, 204,
+ 212, 222, 236, 237, 238, 241, 244, 247, 250, 253,
+ 256, 260, 265, 271, 276, 283, 292, 301, 312, 329,
+ 355, 356, 359, 362, 367, 373, 378, 383, 388, 396,
+ 405, 415, 426, 440, 460, 478, 502, 528 };
+
+/* initial cdf index for KLT coefficients */
+const uint16_t WebRtcIsac_kQKltInitIndexGain[12] = {
+ 3, 6, 4, 7, 5, 10, 6, 16, 11, 28, 26, 69};
+
+const uint16_t WebRtcIsac_kQKltInitIndexShape[108] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 2, 2, 3, 3, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 2, 2, 3, 4, 0, 0, 0, 0,
+ 1, 0, 1, 1, 1, 1, 2, 1, 1, 2,
+ 2, 3, 3, 5, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 2, 1, 2, 2, 4, 4,
+ 5, 7, 0, 0, 1, 1, 1, 1, 1, 1,
+ 2, 2, 3, 2, 3, 4, 4, 5, 8, 13,
+ 0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
+ 5, 5, 7, 10, 9, 12, 13, 25 };
+
+
+/* quantizer representation levels */
+const double WebRtcIsac_kQKltLevelsGain[392] = {
+ -2.78127126, -1.76745590, -0.77913790, -0.00437329, 0.79961206,
+ 1.81775776, 2.81389782, -5.78753143, -4.88384084, -3.89320940,
+ -2.88133610, -1.92859977, -0.86347396, 0.02003888, 0.86140400,
+ 1.89667156, 2.97134967, 3.98781964, 4.91727277, 5.82865898,
+ -4.11195874, -2.80898424, -1.87547977, -0.80943825, -0.00679084,
+ 0.79573851, 1.83953397, 2.67586037, 3.76274082, -6.10933968,
+ -4.93034581, -3.89281296, -2.91530625, -1.89684163, -0.85319130,
+ -0.02275767, 0.86862017, 1.91578276, 2.96107339, 3.96543056,
+ 4.91369908, 5.91058154, 6.83848343, 8.07136925, -5.87470395,
+ -4.84703049, -3.84284597, -2.86168446, -1.89290192, -0.82798145,
+ -0.00080013, 0.82594974, 1.85754329, 2.88351798, 3.96172628,
+ -8.85684885, -7.87387461, -6.97811862, -5.93256270, -4.94301439,
+ -3.95513701, -2.96041544, -1.94031192, -0.87961478, -0.00456201,
+ 0.89911505, 1.91723376, 2.94011511, 3.93302540, 4.97990967,
+ 5.93133404, 7.02181199, 7.92407762, 8.80155440, 10.04665814,
+ -4.82396678, -3.85612158, -2.89482244, -1.89558408, -0.90036978,
+ -0.00677823, 0.90607989, 1.90937981, 2.91175777, 3.91637730,
+ 4.97565723, 5.84771228, 7.11145863, -16.07879840, -15.03776309,
+ -13.93905670, -12.95671800, -11.89171202, -10.95820934, -9.95923714,
+ -8.94357334, -7.99068299, -6.97481009, -5.94826231, -4.96673988,
+ -3.97490466, -2.97846970, -1.95130435, -0.94215262, -0.01444043,
+ 0.96770704, 1.95848598, 2.94107862, 3.95666119, 4.97253085,
+ 5.97191122, 6.93277360, 7.96608727, 8.87958779, 10.00264269,
+ 10.86560820, 12.07449071, 13.04491775, 13.97507061, 14.91845261,
+ -10.85696295, -9.83365357, -9.01245635, -7.95915145, -6.95625003,
+ -5.95362618, -4.93468444, -3.98760978, -2.95044407, -1.97041277,
+ -0.97701799, -0.00840234, 0.97834289, 1.98361415, 2.97802439,
+ 3.96415871, 4.95369042, 5.94101770, 6.92756798, 7.94063998,
+ 8.85951828, 9.97077022, 11.00068503, -33.92030406, -32.81426422,
+ -32.00000000, -31.13243639, -30.11886909, -29.06017570, -28.12598824,
+ -27.22045482, -25.81215858, -25.07849962, -23.93018013, -23.02097643,
+ -21.89529725, -20.99091085, -19.98889048, -18.94327044, -17.96562071,
+ -16.96126218, -15.95054062, -14.98516200, -13.97101012, -13.02106500,
+ -11.98438006, -11.03216748, -9.95930286, -8.97043946, -7.98085082,
+ -6.98360995, -5.98998802, -4.98668173, -4.00032906, -3.00420619,
+ -1.98701132, -0.99324682, -0.00609324, 0.98297834, 1.99483076,
+ 3.00305044, 3.97142097, 4.97525759, 5.98612258, 6.97448236,
+ 7.97575900, 9.01086211, 9.98665542, 11.00541438, 11.98078628,
+ 12.92352471, 14.06849675, 14.99949430, 15.94904834, 16.97440321,
+ 18.04040916, 18.88987609, 20.05312391, 21.00000000, 21.79443341,
+ -31.98578825, -31.00000000, -29.89060567, -28.98555686, -27.97114102,
+ -26.84935410, -26.02402230, -24.94195278, -23.92336849, -22.95552382,
+ -21.97932836, -20.96055470, -19.99649553, -19.03436122, -17.96706525,
+ -17.01139515, -16.01363516, -14.99154248, -14.00298333, -12.99630613,
+ -11.99955519, -10.99000421, -10.00819092, -8.99763648, -7.98431793,
+ -7.01769025, -5.99604690, -4.99980697, -3.99334671, -3.01748192,
+ -2.02051217, -1.00848371, -0.01942358, 1.00477757, 1.95477872,
+ 2.98593031, 3.98779079, 4.96862849, 6.02694771, 6.93983733,
+ 7.89874717, 8.99615862, 10.02367921, 10.96293452, 11.84351528,
+ 12.92207187, 13.85122329, 15.05146877, 15.99371264, 17.00000000,
+ 18.00000000, 19.00000000, 19.82763573, -47.00000000, -46.00000000,
+ -44.87138498, -44.00000000, -43.00000000, -42.00000000, -41.00000000,
+ -39.88966612, -38.98913239, -37.80306486, -37.23584325, -35.94200288,
+ -34.99881301, -34.11361858, -33.06507360, -32.13129135, -30.90891364,
+ -29.81511907, -28.99250380, -28.04535391, -26.99767800, -26.04418164,
+ -24.95687851, -24.04865595, -23.03392645, -21.89366707, -20.93517364,
+ -19.99388660, -18.91620943, -18.03749683, -16.99532379, -15.98683813,
+ -15.06421479, -13.99359211, -12.99714098, -11.97022520, -10.98500279,
+ -9.98834422, -8.95729330, -8.01232284, -7.00253661, -5.99681626,
+ -5.01207817, -3.95914904, -3.01232178, -1.96615919, -0.97687670,
+ 0.01228030, 0.98412288, 2.01753544, 3.00580570, 3.97783510,
+ 4.98846894, 6.01321400, 7.00867732, 8.00416375, 9.01771966,
+ 9.98637729, 10.98255180, 11.99194163, 13.01807333, 14.00999545,
+ 15.00118556, 16.00089224, 17.00584148, 17.98251763, 18.99942091,
+ 19.96917690, 20.97839265, 21.98207297, 23.00171271, 23.99930737,
+ 24.99746061, 26.00936304, 26.98240132, 28.01126868, 29.01395915,
+ 29.98153507, 31.01376711, 31.99876818, 33.00475317, 33.99753994,
+ 34.99493913, 35.98933585, 36.95620160, 37.98428461, 38.99317544,
+ 40.01832073, 40.98048133, 41.95999283, 42.98232091, 43.96523612,
+ 44.99574268, 45.99524194, 47.05464025, 48.03821548, 48.99354366,
+ 49.96400411, 50.98017973, 51.95184408, 52.96291806, 54.00194392,
+ 54.96603783, 55.95623778, 57.03076595, 58.05889901, 58.99081551,
+ 59.97928121, 61.05071612, 62.03971580, 63.01286038, 64.01290338,
+ 65.02074503, 65.99454594, 67.00399425, 67.96571257, 68.95305727,
+ 69.92030664, 70.95594862, 71.98088567, 73.04764124, 74.00285480,
+ 75.02696330, 75.89837673, 76.93459997, 78.16266309, 78.83317543,
+ 80.00000000, 80.87251574, 82.09803524, 83.10671664, 84.00000000,
+ 84.77023523, 86.00000000, 87.00000000, 87.92946897, 88.69159118,
+ 90.00000000, 90.90535270 };
+
+const double WebRtcIsac_kQKltLevelsShape[578] = {
+ 0.00032397, 0.00008053, -0.00061202, -0.00012620, 0.00030437,
+ 0.00054764, -0.00027902, 0.00069360, 0.00029449, -0.80219239,
+ 0.00091089, -0.74514927, -0.00094283, 0.64030631, -0.60509119,
+ 0.00035575, 0.61851665, -0.62129957, 0.00375219, 0.60054900,
+ -0.61554359, 0.00054977, 0.63362016, -1.73118727, -0.65422341,
+ 0.00524568, 0.66165298, 1.76785515, -1.83182018, -0.65997434,
+ -0.00011887, 0.67524299, 1.79933938, -1.76344480, -0.72547708,
+ -0.00133017, 0.73104704, 1.75305377, 2.85164534, -2.80423916,
+ -1.71959639, -0.75419722, -0.00329945, 0.77196760, 1.72211069,
+ 2.87339653, 0.00031089, -0.00015311, 0.00018201, -0.00035035,
+ -0.77357251, 0.00154647, -0.00047625, -0.00045299, 0.00086590,
+ 0.00044762, -0.83383829, 0.00024787, -0.68526258, -0.00122472,
+ 0.64643255, -0.60904942, -0.00448987, 0.62309184, -0.59626442,
+ -0.00574132, 0.62296546, -0.63222115, 0.00013441, 0.63609545,
+ -0.66911055, -0.00369971, 0.66346095, 2.07281301, -1.77184694,
+ -0.67640425, -0.00010145, 0.64818392, 1.74948973, -1.69420224,
+ -0.71943894, -0.00004680, 0.75303493, 1.81075983, 2.80610041,
+ -2.80005755, -1.79866753, -0.77409777, -0.00084220, 0.80141293,
+ 1.78291081, 2.73954236, 3.82994169, 0.00015140, -0.00012766,
+ -0.00034241, -0.00119125, -0.76113497, 0.00069246, 0.76722027,
+ 0.00132862, -0.69107530, 0.00010656, 0.77061578, -0.78012970,
+ 0.00095947, 0.77828502, -0.64787758, 0.00217168, 0.63050167,
+ -0.58601125, 0.00306596, 0.59466308, -0.58603410, 0.00059779,
+ 0.64257970, 1.76512766, -0.61193600, -0.00259517, 0.59767574,
+ -0.61026273, 0.00315811, 0.61725479, -1.69169719, -0.65816029,
+ 0.00067575, 0.65576890, 2.00000000, -1.72689193, -0.69780808,
+ -0.00040990, 0.70668487, 1.74198458, -3.79028154, -3.00000000,
+ -1.73194459, -0.70179341, -0.00106695, 0.71302629, 1.76849782,
+ -2.89332364, -1.78585007, -0.78731491, -0.00132610, 0.79692976,
+ 1.75247009, 2.97828682, -5.26238694, -3.69559829, -2.87286122,
+ -1.84908818, -0.84434577, -0.01167975, 0.84641753, 1.84087672,
+ 2.87628156, 3.83556679, -0.00190204, 0.00092642, 0.00354385,
+ -0.00012982, -0.67742785, 0.00229509, 0.64935672, -0.58444751,
+ 0.00470733, 0.57299534, -0.58456202, -0.00097715, 0.64593607,
+ -0.64060330, -0.00638534, 0.59680157, -0.59287537, 0.00490772,
+ 0.58919707, -0.60306173, -0.00417464, 0.60562100, -1.75218757,
+ -0.63018569, -0.00225922, 0.63863300, -0.63949939, -0.00126421,
+ 0.64268914, -1.75851182, -0.68318060, 0.00510418, 0.69049211,
+ 1.88178506, -1.71136148, -0.72710534, -0.00815559, 0.73412917,
+ 1.79996711, -2.77111145, -1.73940498, -0.78212945, 0.01074476,
+ 0.77688916, 1.76873972, 2.87281379, 3.77554698, -3.75832725,
+ -2.95463235, -1.80451491, -0.80017226, 0.00149902, 0.80729206,
+ 1.78265046, 2.89391793, -3.78236148, -2.83640598, -1.82532067,
+ -0.88844327, -0.00620952, 0.88208030, 1.85757631, 2.81712391,
+ 3.88430176, 5.16179367, -7.00000000, -5.93805408, -4.87172597,
+ -3.87524433, -2.89399744, -1.92359563, -0.92136341, -0.00172725,
+ 0.93087018, 1.90528280, 2.89809686, 3.88085708, 4.89147740,
+ 5.89078692, -0.00239502, 0.00312564, -1.00000000, 0.00178325,
+ 1.00000000, -0.62198029, 0.00143254, 0.65344051, -0.59851220,
+ -0.00676987, 0.61510140, -0.58894151, 0.00385055, 0.59794203,
+ -0.59808568, -0.00038214, 0.57625703, -0.63009713, -0.01107985,
+ 0.61278758, -0.64206758, -0.00154369, 0.65480598, 1.80604162,
+ -1.80909286, -0.67810514, 0.00205762, 0.68571097, 1.79453891,
+ -3.22682422, -1.73808453, -0.71870305, -0.00738594, 0.71486172,
+ 1.73005326, -1.66891897, -0.73689615, -0.00616203, 0.74262409,
+ 1.73807899, -2.92417482, -1.73866741, -0.78133871, 0.00764425,
+ 0.80027264, 1.78668732, 2.74992588, -4.00000000, -2.75578740,
+ -1.83697516, -0.83117035, -0.00355191, 0.83527172, 1.82814700,
+ 2.77377675, 3.80718693, -3.81667698, -2.83575471, -1.83372350,
+ -0.86579471, 0.00547578, 0.87582281, 1.82858793, 2.87265007,
+ 3.91405377, -4.87521600, -3.78999094, -2.86437014, -1.86964365,
+ -0.90618018, 0.00128243, 0.91497811, 1.87374952, 2.83199819,
+ 3.91519130, 4.76632822, -6.68713448, -6.01252467, -4.94587936,
+ -3.88795368, -2.91299088, -1.92592211, -0.95504570, -0.00089980,
+ 0.94565200, 1.93239633, 2.91832808, 3.91363475, 4.88920034,
+ 5.96471415, 6.83905252, 7.86195009, 8.81571018,-12.96141759,
+ -11.73039516,-10.96459719, -9.97382433, -9.04414433, -7.89460619,
+ -6.96628608, -5.93236595, -4.93337924, -3.95479990, -2.96451499,
+ -1.96635876, -0.97271229, -0.00402238, 0.98343930, 1.98348291,
+ 2.96641164, 3.95456471, 4.95517089, 5.98975714, 6.90322073,
+ 7.90468849, 8.85639467, 9.97255498, 10.79006309, 11.81988596,
+ 0.04950500, -1.00000000, -0.01226628, 1.00000000, -0.59479469,
+ -0.10438305, 0.59822144, -2.00000000, -0.67109149, -0.09256692,
+ 0.65171621, 2.00000000, -3.00000000, -1.68391999, -0.76681039,
+ -0.03354151, 0.71509146, 1.77615472, -2.00000000, -0.68661511,
+ -0.02497881, 0.66478398, 2.00000000, -2.00000000, -0.67032784,
+ -0.00920582, 0.64892756, 2.00000000, -2.00000000, -0.68561894,
+ 0.03641869, 0.73021611, 1.68293863, -4.00000000, -2.72024184,
+ -1.80096059, -0.81696185, 0.03604685, 0.79232033, 1.70070730,
+ 3.00000000, -4.00000000, -2.71795670, -1.80482986, -0.86001162,
+ 0.03764903, 0.87723968, 1.79970771, 2.72685932, 3.67589143,
+ -5.00000000, -4.00000000, -2.85492548, -1.78996365, -0.83250358,
+ -0.01376828, 0.84195506, 1.78161105, 2.76754458, 4.00000000,
+ -6.00000000, -5.00000000, -3.82268811, -2.77563624, -1.82608163,
+ -0.86486114, -0.02671886, 0.86693165, 1.88422879, 2.86248347,
+ 3.95632216, -7.00000000, -6.00000000, -5.00000000, -3.77533988,
+ -2.86391432, -1.87052039, -0.90513658, 0.06271236, 0.91083620,
+ 1.85734756, 2.86031688, 3.82019418, 4.94420394, 6.00000000,
+ -11.00000000,-10.00000000, -9.00000000, -8.00000000, -6.91952415,
+ -6.00000000, -4.92044374, -3.87845165, -2.87392362, -1.88413020,
+ -0.91915740, 0.00318517, 0.91602800, 1.89664838, 2.88925058,
+ 3.84123856, 4.78988651, 5.94526812, 6.81953917, 8.00000000,
+ -9.00000000, -8.00000000, -7.03319143, -5.94530963, -4.86669720,
+ -3.92438007, -2.88620396, -1.92848070, -0.94365985, 0.01671855,
+ 0.97349410, 1.93419878, 2.89740109, 3.89662823, 4.83235583,
+ 5.88106535, 6.80328232, 8.00000000,-13.00000000,-12.00000000,
+ -11.00000000,-10.00000000, -9.00000000, -7.86033489, -6.83344055,
+ -5.89844215, -4.90811454, -3.94841298, -2.95820490, -1.98627966,
+ -0.99161468, -0.02286136, 0.96055651, 1.95052433, 2.93969396,
+ 3.94304346, 4.88522624, 5.87434241, 6.78309433, 7.87244101,
+ 9.00000000, 10.00000000,-12.09117356,-11.00000000,-10.00000000,
+ -8.84766108, -7.86934236, -6.98544896, -5.94233429, -4.95583292,
+ -3.95575986, -2.97085529, -1.98955811, -0.99359873, -0.00485413,
+ 0.98298870, 1.98093258, 2.96430203, 3.95540216, 4.96915010,
+ 5.96775124, 6.99236918, 7.96503302, 8.99864542, 9.85857723,
+ 10.96541926, 11.91647197, 12.71060069,-26.00000000,-25.00000000,
+ -24.00585596,-23.11642573,-22.14271284,-20.89800711,-19.87815799,
+ -19.05036354,-17.88555651,-16.86471209,-15.97711073,-14.94012359,
+ -14.02661226,-12.98243228,-11.97489256,-10.97402777, -9.96425624,
+ -9.01085220, -7.97372506, -6.98795002, -5.97271328, -5.00191694,
+ -3.98055849, -2.98458048, -1.99470442, -0.99656768, -0.00825666,
+ 1.00272004, 1.99922218, 2.99357669, 4.01407905, 5.01003897,
+ 5.98115528, 7.00018958, 8.00338125, 8.98981046, 9.98990318,
+ 10.96341479, 11.96866930, 12.99175139, 13.94580443, 14.95745083,
+ 15.98992869, 16.97484646, 17.99630043, 18.93396897, 19.88347741,
+ 20.96532482, 21.92191032, 23.22314702 };
+
+
+/* cdf tables for quantizer indices */
+const uint16_t WebRtcIsac_kQKltCdfGain[404] = {
+ 0, 13, 301, 3730, 61784, 65167, 65489, 65535, 0, 17,
+ 142, 314, 929, 2466, 7678, 56450, 63463, 64740, 65204, 65426,
+ 65527, 65535, 0, 8, 100, 724, 6301, 60105, 65125, 65510,
+ 65531, 65535, 0, 13, 117, 368, 1068, 3010, 11928, 53603,
+ 61177, 63404, 64505, 65108, 65422, 65502, 65531, 65535, 0, 4,
+ 17, 96, 410, 1859, 12125, 54361, 64103, 65305, 65497, 65535,
+ 0, 4, 88, 230, 469, 950, 1746, 3228, 6092, 16592,
+ 44756, 56848, 61256, 63308, 64325, 64920, 65309, 65460, 65502,
+ 65522, 65535, 0, 88, 352, 1675, 6339, 20749, 46686, 59284, 63525,
+ 64949, 65359, 65502, 65527, 65535, 0, 13, 38, 63, 117,
+ 234, 381, 641, 929, 1407, 2043, 2809, 4032, 5753, 8792,
+ 14407, 24308, 38941, 48947, 55403, 59293, 61411, 62688, 63630,
+ 64329, 64840, 65188, 65376, 65472, 65506, 65527, 65531, 65535,
+ 0, 8, 29, 75, 222, 615, 1327, 2801, 5623, 9931, 16094, 24966,
+ 34419, 43458, 50676, 56186, 60055, 62500, 63936, 64765, 65225,
+ 65435, 65514, 65535, 0, 8, 13, 15, 17, 21, 33, 59,
+ 71, 92, 151, 243, 360, 456, 674, 934, 1223, 1583,
+ 1989, 2504, 3031, 3617, 4354, 5154, 6163, 7411, 8780, 10747,
+ 12874, 15591, 18974, 23027, 27436, 32020, 36948, 41830, 46205,
+ 49797, 53042, 56094, 58418, 60360, 61763, 62818, 63559, 64103,
+ 64509, 64798, 65045, 65162, 65288, 65363, 65447, 65506, 65522,
+ 65531, 65533, 65535, 0, 4, 6, 25, 38, 71, 138, 264, 519, 808,
+ 1227, 1825, 2516, 3408, 4279, 5560, 7092, 9197, 11420, 14108,
+ 16947, 20300, 23926, 27459, 31164, 34827, 38575, 42178, 45540,
+ 48747, 51444, 54090, 56426, 58460, 60080, 61595, 62734, 63668,
+ 64275, 64673, 64936, 65112, 65217, 65334, 65426, 65464, 65477,
+ 65489, 65518, 65527, 65529, 65531, 65533, 65535, 0, 2, 4, 8, 10,
+ 12, 14, 16, 21, 33, 50, 71, 84, 92, 105, 138, 180, 255, 318,
+ 377, 435, 473, 511, 590, 682, 758, 913, 1097, 1256, 1449, 1671,
+ 1884, 2169, 2445, 2772, 3157, 3563, 3944, 4375, 4848, 5334, 5820,
+ 6448, 7101, 7716, 8378, 9102, 9956, 10752, 11648, 12707, 13670,
+ 14758, 15910, 17187, 18472, 19627, 20649, 21951, 23169, 24283,
+ 25552, 26862, 28227, 29391, 30764, 31882, 33213, 34432, 35600,
+ 36910, 38116, 39464, 40729, 41872, 43144, 44371, 45514, 46762,
+ 47813, 48968, 50069, 51032, 51974, 52908, 53737, 54603, 55445,
+ 56282, 56990, 57572, 58191, 58840, 59410, 59887, 60264, 60607,
+ 60946, 61269, 61516, 61771, 61960, 62198, 62408, 62558, 62776,
+ 62985, 63207, 63408, 63546, 63739, 63906, 64070, 64237, 64371,
+ 64551, 64677, 64836, 64999, 65095, 65213, 65284, 65338, 65380,
+ 65426, 65447, 65472, 65485, 65487, 65489, 65502, 65510, 65512,
+ 65514, 65516, 65518, 65522, 65531, 65533, 65535 };
+
+
+const uint16_t WebRtcIsac_kQKltCdfShape[686] = {
+ 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535,
+ 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 4,
+ 65535, 0, 8, 65514, 65535, 0, 29, 65481, 65535, 0,
+ 121, 65439, 65535, 0, 239, 65284, 65535, 0, 8, 779,
+ 64999, 65527, 65535, 0, 8, 888, 64693, 65522, 65535, 0,
+ 29, 2604, 62843, 65497, 65531, 65535, 0, 25, 176, 4576,
+ 61164, 65275, 65527, 65535, 0, 65535, 0, 65535, 0, 65535,
+ 0, 65535, 0, 4, 65535, 0, 65535, 0, 65535, 0,
+ 65535, 0, 65535, 0, 4, 65535, 0, 33, 65502, 65535,
+ 0, 54, 65481, 65535, 0, 251, 65309, 65535, 0, 611,
+ 65074, 65535, 0, 1273, 64292, 65527, 65535, 0, 4, 1809,
+ 63940, 65518, 65535, 0, 88, 4392, 60603, 65426, 65531, 65535,
+ 0, 25, 419, 7046, 57756, 64961, 65514, 65531, 65535, 0,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 4, 65531,
+ 65535, 0, 65535, 0, 8, 65531, 65535, 0, 4, 65527,
+ 65535, 0, 17, 65510, 65535, 0, 42, 65481, 65535, 0,
+ 197, 65342, 65531, 65535, 0, 385, 65154, 65535, 0, 1005,
+ 64522, 65535, 0, 8, 1985, 63469, 65533, 65535, 0, 38,
+ 3119, 61884, 65514, 65535, 0, 4, 6, 67, 4961, 60804,
+ 65472, 65535, 0, 17, 565, 9182, 56538, 65087, 65514, 65535,
+ 0, 8, 63, 327, 2118, 14490, 52774, 63839, 65376, 65522,
+ 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
+ 17, 65522, 65535, 0, 59, 65489, 65535, 0, 50, 65522,
+ 65535, 0, 54, 65489, 65535, 0, 310, 65179, 65535, 0,
+ 615, 64836, 65535, 0, 4, 1503, 63965, 65535, 0, 2780,
+ 63383, 65535, 0, 21, 3919, 61051, 65527, 65535, 0, 84,
+ 6674, 59929, 65435, 65535, 0, 4, 255, 7976, 55784, 65150,
+ 65518, 65531, 65535, 0, 4, 8, 582, 10726, 53465, 64949,
+ 65518, 65535, 0, 29, 339, 3006, 17555, 49517, 62956, 65200,
+ 65497, 65531, 65535, 0, 2, 33, 138, 565, 2324, 7670,
+ 22089, 45966, 58949, 63479, 64966, 65380, 65518, 65535, 0, 65535,
+ 0, 65535, 0, 2, 65533, 65535, 0, 46, 65514, 65535,
+ 0, 414, 65091, 65535, 0, 540, 64911, 65535, 0, 419,
+ 65162, 65535, 0, 976, 64790, 65535, 0, 2977, 62495, 65531,
+ 65535, 0, 4, 3852, 61034, 65527, 65535, 0, 4, 29,
+ 6021, 60243, 65468, 65535, 0, 84, 6711, 58066, 65418, 65535,
+ 0, 13, 281, 9550, 54917, 65125, 65506, 65535, 0, 2,
+ 63, 984, 12108, 52644, 64342, 65435, 65527, 65535, 0, 29,
+ 251, 2014, 14871, 47553, 62881, 65229, 65518, 65535, 0, 13,
+ 142, 749, 4220, 18497, 45200, 60913, 64823, 65426, 65527, 65535,
+ 0, 13, 71, 264, 1176, 3789, 10500, 24480, 43488, 56324,
+ 62315, 64493, 65242, 65464, 65514, 65522, 65531, 65535, 0, 4,
+ 13, 38, 109, 205, 448, 850, 1708, 3429, 6276, 11371,
+ 19221, 29734, 40955, 49391, 55411, 59460, 62102, 63793, 64656,
+ 65150, 65401, 65485, 65522, 65531, 65535, 0, 65535, 0, 2, 65533,
+ 65535, 0, 1160, 65476, 65535, 0, 2, 6640, 64763, 65533,
+ 65535, 0, 2, 38, 9923, 61009, 65527, 65535, 0, 2,
+ 4949, 63092, 65533, 65535, 0, 2, 3090, 63398, 65533, 65535,
+ 0, 2, 2520, 58744, 65510, 65535, 0, 2, 13, 544,
+ 8784, 51403, 65148, 65533, 65535, 0, 2, 25, 1017, 10412,
+ 43550, 63651, 65489, 65527, 65535, 0, 2, 4, 29, 783,
+ 13377, 52462, 64524, 65495, 65533, 65535, 0, 2, 4, 6,
+ 100, 1817, 18451, 52590, 63559, 65376, 65531, 65535, 0, 2,
+ 4, 6, 46, 385, 2562, 11225, 37416, 60488, 65026, 65487,
+ 65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 12,
+ 42, 222, 971, 5221, 19811, 45048, 60312, 64486, 65294, 65474,
+ 65525, 65529, 65533, 65535, 0, 2, 4, 8, 71, 167,
+ 666, 2533, 7875, 19622, 38082, 54359, 62108, 64633, 65290, 65495,
+ 65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 13,
+ 109, 586, 1930, 4949, 11600, 22641, 36125, 48312, 56899, 61495,
+ 63927, 64932, 65389, 65489, 65518, 65531, 65533, 65535, 0, 4,
+ 6, 8, 67, 209, 712, 1838, 4195, 8432, 14432, 22834,
+ 31723, 40523, 48139, 53929, 57865, 60657, 62403, 63584, 64363,
+ 64907, 65167, 65372, 65472, 65514, 65535, 0, 2, 4, 13, 25,
+ 42, 46, 50, 75, 113, 147, 281, 448, 657, 909,
+ 1185, 1591, 1976, 2600, 3676, 5317, 7398, 9914, 12941, 16169,
+ 19477, 22885, 26464, 29851, 33360, 37228, 41139, 44802, 48654,
+ 52058, 55181, 57676, 59581, 61022, 62190, 63107, 63676, 64199,
+ 64547, 64924, 65158, 65313, 65430, 65481, 65518, 65535 };
+
+
+/* pointers to cdf tables for quantizer indices */
+const uint16_t *WebRtcIsac_kQKltCdfPtrGain[12] = {
+ WebRtcIsac_kQKltCdfGain +0 +0, WebRtcIsac_kQKltCdfGain +0 +8,
+ WebRtcIsac_kQKltCdfGain +0 +22, WebRtcIsac_kQKltCdfGain +0 +32,
+ WebRtcIsac_kQKltCdfGain +0 +48, WebRtcIsac_kQKltCdfGain +0 +60,
+ WebRtcIsac_kQKltCdfGain +0 +81, WebRtcIsac_kQKltCdfGain +0 +95,
+ WebRtcIsac_kQKltCdfGain +0 +128, WebRtcIsac_kQKltCdfGain +0 +152,
+ WebRtcIsac_kQKltCdfGain +0 +210, WebRtcIsac_kQKltCdfGain +0 +264 };
+
+const uint16_t *WebRtcIsac_kQKltCdfPtrShape[108] = {
+ WebRtcIsac_kQKltCdfShape +0 +0, WebRtcIsac_kQKltCdfShape +0 +2,
+ WebRtcIsac_kQKltCdfShape +0 +4, WebRtcIsac_kQKltCdfShape +0 +6,
+ WebRtcIsac_kQKltCdfShape +0 +8, WebRtcIsac_kQKltCdfShape +0 +10,
+ WebRtcIsac_kQKltCdfShape +0 +12, WebRtcIsac_kQKltCdfShape +0 +14,
+ WebRtcIsac_kQKltCdfShape +0 +16, WebRtcIsac_kQKltCdfShape +0 +18,
+ WebRtcIsac_kQKltCdfShape +0 +21, WebRtcIsac_kQKltCdfShape +0 +25,
+ WebRtcIsac_kQKltCdfShape +0 +29, WebRtcIsac_kQKltCdfShape +0 +33,
+ WebRtcIsac_kQKltCdfShape +0 +37, WebRtcIsac_kQKltCdfShape +0 +43,
+ WebRtcIsac_kQKltCdfShape +0 +49, WebRtcIsac_kQKltCdfShape +0 +56,
+ WebRtcIsac_kQKltCdfShape +0 +64, WebRtcIsac_kQKltCdfShape +0 +66,
+ WebRtcIsac_kQKltCdfShape +0 +68, WebRtcIsac_kQKltCdfShape +0 +70,
+ WebRtcIsac_kQKltCdfShape +0 +72, WebRtcIsac_kQKltCdfShape +0 +75,
+ WebRtcIsac_kQKltCdfShape +0 +77, WebRtcIsac_kQKltCdfShape +0 +79,
+ WebRtcIsac_kQKltCdfShape +0 +81, WebRtcIsac_kQKltCdfShape +0 +83,
+ WebRtcIsac_kQKltCdfShape +0 +86, WebRtcIsac_kQKltCdfShape +0 +90,
+ WebRtcIsac_kQKltCdfShape +0 +94, WebRtcIsac_kQKltCdfShape +0 +98,
+ WebRtcIsac_kQKltCdfShape +0 +102, WebRtcIsac_kQKltCdfShape +0 +107,
+ WebRtcIsac_kQKltCdfShape +0 +113, WebRtcIsac_kQKltCdfShape +0 +120,
+ WebRtcIsac_kQKltCdfShape +0 +129, WebRtcIsac_kQKltCdfShape +0 +131,
+ WebRtcIsac_kQKltCdfShape +0 +133, WebRtcIsac_kQKltCdfShape +0 +135,
+ WebRtcIsac_kQKltCdfShape +0 +137, WebRtcIsac_kQKltCdfShape +0 +141,
+ WebRtcIsac_kQKltCdfShape +0 +143, WebRtcIsac_kQKltCdfShape +0 +147,
+ WebRtcIsac_kQKltCdfShape +0 +151, WebRtcIsac_kQKltCdfShape +0 +155,
+ WebRtcIsac_kQKltCdfShape +0 +159, WebRtcIsac_kQKltCdfShape +0 +164,
+ WebRtcIsac_kQKltCdfShape +0 +168, WebRtcIsac_kQKltCdfShape +0 +172,
+ WebRtcIsac_kQKltCdfShape +0 +178, WebRtcIsac_kQKltCdfShape +0 +184,
+ WebRtcIsac_kQKltCdfShape +0 +192, WebRtcIsac_kQKltCdfShape +0 +200,
+ WebRtcIsac_kQKltCdfShape +0 +211, WebRtcIsac_kQKltCdfShape +0 +213,
+ WebRtcIsac_kQKltCdfShape +0 +215, WebRtcIsac_kQKltCdfShape +0 +217,
+ WebRtcIsac_kQKltCdfShape +0 +219, WebRtcIsac_kQKltCdfShape +0 +223,
+ WebRtcIsac_kQKltCdfShape +0 +227, WebRtcIsac_kQKltCdfShape +0 +231,
+ WebRtcIsac_kQKltCdfShape +0 +235, WebRtcIsac_kQKltCdfShape +0 +239,
+ WebRtcIsac_kQKltCdfShape +0 +243, WebRtcIsac_kQKltCdfShape +0 +248,
+ WebRtcIsac_kQKltCdfShape +0 +252, WebRtcIsac_kQKltCdfShape +0 +258,
+ WebRtcIsac_kQKltCdfShape +0 +264, WebRtcIsac_kQKltCdfShape +0 +273,
+ WebRtcIsac_kQKltCdfShape +0 +282, WebRtcIsac_kQKltCdfShape +0 +293,
+ WebRtcIsac_kQKltCdfShape +0 +308, WebRtcIsac_kQKltCdfShape +0 +310,
+ WebRtcIsac_kQKltCdfShape +0 +312, WebRtcIsac_kQKltCdfShape +0 +316,
+ WebRtcIsac_kQKltCdfShape +0 +320, WebRtcIsac_kQKltCdfShape +0 +324,
+ WebRtcIsac_kQKltCdfShape +0 +328, WebRtcIsac_kQKltCdfShape +0 +332,
+ WebRtcIsac_kQKltCdfShape +0 +336, WebRtcIsac_kQKltCdfShape +0 +341,
+ WebRtcIsac_kQKltCdfShape +0 +347, WebRtcIsac_kQKltCdfShape +0 +354,
+ WebRtcIsac_kQKltCdfShape +0 +360, WebRtcIsac_kQKltCdfShape +0 +368,
+ WebRtcIsac_kQKltCdfShape +0 +378, WebRtcIsac_kQKltCdfShape +0 +388,
+ WebRtcIsac_kQKltCdfShape +0 +400, WebRtcIsac_kQKltCdfShape +0 +418,
+ WebRtcIsac_kQKltCdfShape +0 +445, WebRtcIsac_kQKltCdfShape +0 +447,
+ WebRtcIsac_kQKltCdfShape +0 +451, WebRtcIsac_kQKltCdfShape +0 +455,
+ WebRtcIsac_kQKltCdfShape +0 +461, WebRtcIsac_kQKltCdfShape +0 +468,
+ WebRtcIsac_kQKltCdfShape +0 +474, WebRtcIsac_kQKltCdfShape +0 +480,
+ WebRtcIsac_kQKltCdfShape +0 +486, WebRtcIsac_kQKltCdfShape +0 +495,
+ WebRtcIsac_kQKltCdfShape +0 +505, WebRtcIsac_kQKltCdfShape +0 +516,
+ WebRtcIsac_kQKltCdfShape +0 +528, WebRtcIsac_kQKltCdfShape +0 +543,
+ WebRtcIsac_kQKltCdfShape +0 +564, WebRtcIsac_kQKltCdfShape +0 +583,
+ WebRtcIsac_kQKltCdfShape +0 +608, WebRtcIsac_kQKltCdfShape +0 +635 };
+
+
+/* left KLT transforms */
+const double WebRtcIsac_kKltT1Gain[4] = {
+ -0.79742827, 0.60341375, 0.60341375, 0.79742827 };
+
+const double WebRtcIsac_kKltT1Shape[324] = {
+ 0.00159597, 0.00049320, 0.00513821, 0.00021066, 0.01338581,
+ -0.00422367, -0.00272072, 0.00935107, 0.02047622, 0.02691189,
+ 0.00478236, 0.03969702, 0.00886698, 0.04877604, -0.10898362,
+ -0.05930891, -0.03415047, 0.98889721, 0.00293558, -0.00035282,
+ 0.01156321, -0.00195341, -0.00937631, 0.01052213, -0.02551163,
+ 0.01644059, 0.03189927, 0.07754773, -0.08742313, -0.03026338,
+ 0.05136248, -0.14395974, 0.17725040, 0.22664856, 0.93380230,
+ 0.07076411, 0.00557890, -0.00222834, 0.01377569, 0.01466808,
+ 0.02847361, -0.00603178, 0.02382480, -0.01210452, 0.03797267,
+ -0.02371480, 0.11260335, -0.07366682, 0.00453436, -0.04136941,
+ -0.07912843, -0.95031418, 0.25295337, -0.05302216, -0.00617554,
+ -0.00044040, -0.00653778, 0.01097838, 0.01529174, 0.01374431,
+ -0.00748512, -0.00020034, 0.02432713, 0.11101570, -0.08556891,
+ 0.09282249, -0.01029446, 0.67556443, -0.67454300, 0.06910063,
+ 0.20866865, -0.10318050, 0.00932175, 0.00524058, 0.00803610,
+ -0.00594676, -0.01082578, 0.01069906, 0.00546768, 0.01565291,
+ 0.06816200, 0.10201227, 0.16812734, 0.22984074, 0.58213170,
+ -0.54138651, -0.51379962, 0.06847390, -0.01920037, -0.04592324,
+ -0.00467394, 0.00328858, 0.00377424, -0.00987448, 0.08222096,
+ -0.00377301, 0.04551941, -0.02592517, 0.16317082, 0.13077530,
+ 0.22702921, -0.31215289, -0.69645962, -0.38047101, -0.39339411,
+ 0.11124777, 0.02508035, -0.00708074, 0.00400344, 0.00040331,
+ 0.01142402, 0.01725406, 0.01635170, 0.14285366, 0.03949233,
+ -0.05905676, 0.05877154, -0.17497577, -0.32479440, 0.80754464,
+ -0.38085603, -0.17055430, -0.03168622, -0.07531451, 0.02942002,
+ -0.02148095, -0.00754114, -0.00322372, 0.00567812, -0.01701521,
+ -0.12358320, 0.11473564, 0.09070136, 0.06533068, -0.22560802,
+ 0.19209022, 0.81605094, 0.36592275, -0.09919829, 0.16667122,
+ 0.16300725, 0.04803807, 0.06739263, -0.00156752, -0.01685302,
+ -0.00905240, -0.02297836, -0.00469939, 0.06310613, -0.16391930,
+ 0.10919511, 0.12529293, 0.85581322, -0.32145522, 0.24539076,
+ 0.07181839, 0.07289591, 0.14066759, 0.10406711, 0.05815518,
+ 0.01072680, -0.00759339, 0.00053486, -0.00044865, 0.03407361,
+ 0.01645348, 0.08758579, 0.27722240, 0.53665485, -0.74853376,
+ -0.01118192, -0.19805430, 0.06130619, -0.09675299, 0.08978480,
+ 0.03405255, -0.00706867, 0.05102045, 0.03250746, 0.01849966,
+ -0.01216314, -0.01184187, -0.01579288, 0.00114807, 0.11376166,
+ 0.88342114, -0.36425379, 0.13863190, 0.12524180, -0.13553892,
+ 0.04715856, -0.12341103, 0.04531568, 0.01899360, -0.00206897,
+ 0.00567768, -0.01444163, 0.00411946, -0.00855896, 0.00381663,
+ -0.01664861, -0.05534280, 0.21328278, 0.20161162, 0.72360394,
+ 0.59130708, -0.08043791, 0.08757349, -0.13893918, -0.05147377,
+ 0.02680690, -0.01144070, 0.00625162, -0.00634215, -0.01248947,
+ -0.00329455, -0.00609625, -0.00136305, -0.05097048, -0.01029851,
+ 0.25065384, -0.16856837, -0.07123372, 0.15992623, -0.39487617,
+ -0.79972301, 0.18118185, -0.04826639, -0.01805578, -0.02927253,
+ -0.16400618, 0.07472763, 0.10376449, 0.01705406, 0.01065801,
+ -0.01500498, 0.02039914, 0.37776349, -0.84484186, 0.10434286,
+ 0.15616990, 0.13474456, -0.00906238, -0.25238368, -0.03820885,
+ -0.10650905, -0.03880833, -0.03660028, -0.09640894, 0.00583314,
+ 0.01922097, 0.01489911, -0.02431117, -0.09372217, 0.39404721,
+ -0.84786223, -0.31277121, 0.03193850, 0.01974060, 0.01887901,
+ 0.00337911, -0.11359599, -0.02792521, -0.03220184, -0.01533311,
+ 0.00015962, -0.04225043, -0.00933965, 0.00675311, 0.00206060,
+ 0.15926771, 0.40199829, -0.80792558, -0.35591604, -0.17169764,
+ 0.02830436, 0.02459982, -0.03438589, 0.00718705, -0.01798329,
+ -0.01594508, -0.00702430, -0.00952419, -0.00962701, -0.01307212,
+ -0.01749740, 0.01299602, 0.00587270, -0.36103108, -0.82039266,
+ -0.43092844, -0.08500097, -0.04361674, -0.00333482, 0.01250434,
+ -0.02538295, -0.00921797, 0.01645071, -0.01400872, 0.00317607,
+ 0.00003277, -0.01617646, -0.00616863, -0.00882661, 0.00466157,
+ 0.00353237, 0.91803104, -0.39503305, -0.02048964, 0.00060125,
+ 0.01980634, 0.00300109, 0.00313880, 0.00657337, 0.00715163,
+ 0.00000261, 0.00854276, -0.00154825, -0.00516128, 0.00909527,
+ 0.00095609, 0.00701196, -0.00221867, -0.00156741 };
+
+/* right KLT transforms */
+const double WebRtcIsac_kKltT2Gain[36] = {
+ 0.14572837, -0.45446306, 0.61990621, -0.52197033, 0.32145074,
+ -0.11026900, -0.20698282, 0.48962182, -0.27127933, -0.33627476,
+ 0.65094037, -0.32715751, 0.40262573, -0.47844405, -0.33876075,
+ 0.44130653, 0.37383966, -0.39964662, -0.51730480, 0.06611973,
+ 0.49030187, 0.47512886, -0.02141226, -0.51129451, -0.58578569,
+ -0.39132064, -0.13187771, 0.15649421, 0.40735596, 0.54396897,
+ 0.40381276, 0.40904942, 0.41179766, 0.41167576, 0.40840251,
+ 0.40468132 };
+
+const double WebRtcIsac_kKltT2Shape[36] = {
+ 0.13427386, -0.35132558, 0.52506528, -0.59419077, 0.45075085,
+ -0.16312057, 0.29857439, -0.58660147, 0.34265431, 0.20879510,
+ -0.56063262, 0.30238345, 0.43308283, -0.41186999, -0.35288681,
+ 0.42768996, 0.36094634, -0.45284910, -0.47116680, 0.02893449,
+ 0.54326135, 0.45249040, -0.06264420, -0.52283830, 0.57137758,
+ 0.44298139, 0.12617554, -0.20819946, -0.42324603, -0.48876443,
+ 0.39597050, 0.40713935, 0.41389880, 0.41512486, 0.41130400,
+ 0.40575001 };
+
+/* means of log gains and LAR coefficients*/
+const double WebRtcIsac_kLpcMeansGain[12] = {
+ -6.86881911, -5.35075273, -6.86792680, -5.36200897, -6.86401538,
+ -5.36921533, -6.86802969, -5.36893966, -6.86538097, -5.36315063,
+ -6.85535304, -5.35155315 };
+
+const double WebRtcIsac_kLpcMeansShape[108] = {
+ -0.91232981, 0.26258634, -0.33716701, 0.08477430, -0.03378426,
+ 0.14423909, 0.07036185, 0.06155019, 0.01490385, 0.04138740,
+ 0.01427317, 0.01288970, 0.83872106, 0.25750199, 0.07988929,
+ -0.01957923, 0.00831390, 0.01770300, -0.90957164, 0.25732216,
+ -0.33385344, 0.08735740, -0.03715332, 0.14584917, 0.06998990,
+ 0.06131968, 0.01504379, 0.04067339, 0.01428039, 0.01406460,
+ 0.83846243, 0.26169862, 0.08109025, -0.01767055, 0.00970539,
+ 0.01954310, -0.90490803, 0.24656405, -0.33578607, 0.08843286,
+ -0.03749139, 0.14443959, 0.07214669, 0.06170993, 0.01449947,
+ 0.04134309, 0.01314762, 0.01413471, 0.83895203, 0.26748062,
+ 0.08197507, -0.01781298, 0.00885967, 0.01922394, -0.90922472,
+ 0.24495889, -0.33921540, 0.08877169, -0.03581332, 0.14199172,
+ 0.07444032, 0.06185940, 0.01502054, 0.04185113, 0.01276579,
+ 0.01355457, 0.83645358, 0.26631720, 0.08119697, -0.01835449,
+ 0.00788512, 0.01846446, -0.90482253, 0.24658310, -0.34019734,
+ 0.08281090, -0.03486038, 0.14359248, 0.07401336, 0.06001471,
+ 0.01528421, 0.04254560, 0.01321472, 0.01240799, 0.83857127,
+ 0.26281654, 0.08174380, -0.02099842, 0.00755176, 0.01699448,
+ -0.90132307, 0.25174308, -0.33838268, 0.07883863, -0.02877906,
+ 0.14105407, 0.07220290, 0.06000352, 0.01684879, 0.04226844,
+ 0.01331331, 0.01269244, 0.83832138, 0.25467485, 0.08118028,
+ -0.02120528, 0.00747832, 0.01567212 };
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.h
new file mode 100644
index 0000000000..56ff22c06c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_tables.h
+ *
+ * header file for coding tables for the LPC coefficients
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#define KLT_STEPSIZE 1.00000000
+#define KLT_NUM_AVG_GAIN 0
+#define KLT_NUM_AVG_SHAPE 0
+#define KLT_NUM_MODELS 3
+#define LPC_GAIN_SCALE 4.000f
+#define LPC_LOBAND_SCALE 2.100f
+#define LPC_LOBAND_ORDER ORDERLO
+#define LPC_HIBAND_SCALE 0.450f
+#define LPC_HIBAND_ORDER ORDERHI
+#define LPC_GAIN_ORDER 2
+
+#define LPC_SHAPE_ORDER (LPC_LOBAND_ORDER + LPC_HIBAND_ORDER)
+
+#define KLT_ORDER_GAIN (LPC_GAIN_ORDER * SUBFRAMES)
+#define KLT_ORDER_SHAPE (LPC_SHAPE_ORDER * SUBFRAMES)
+
+/* cdf array for model indicator */
+extern const uint16_t WebRtcIsac_kQKltModelCdf[KLT_NUM_MODELS + 1];
+
+/* pointer to cdf array for model indicator */
+extern const uint16_t* WebRtcIsac_kQKltModelCdfPtr[1];
+
+/* initial cdf index for decoder of model indicator */
+extern const uint16_t WebRtcIsac_kQKltModelInitIndex[1];
+
+/* offset to go from rounded value to quantization index */
+extern const short WebRtcIsac_kQKltQuantMinGain[12];
+
+extern const short WebRtcIsac_kQKltQuantMinShape[108];
+
+/* maximum quantization index */
+extern const uint16_t WebRtcIsac_kQKltMaxIndGain[12];
+
+extern const uint16_t WebRtcIsac_kQKltMaxIndShape[108];
+
+/* index offset */
+extern const uint16_t WebRtcIsac_kQKltOffsetGain[12];
+
+extern const uint16_t WebRtcIsac_kQKltOffsetShape[108];
+
+/* initial cdf index for KLT coefficients */
+extern const uint16_t WebRtcIsac_kQKltInitIndexGain[12];
+
+extern const uint16_t WebRtcIsac_kQKltInitIndexShape[108];
+
+/* quantizer representation levels */
+extern const double WebRtcIsac_kQKltLevelsGain[392];
+
+extern const double WebRtcIsac_kQKltLevelsShape[578];
+
+/* cdf tables for quantizer indices */
+extern const uint16_t WebRtcIsac_kQKltCdfGain[404];
+
+extern const uint16_t WebRtcIsac_kQKltCdfShape[686];
+
+/* pointers to cdf tables for quantizer indices */
+extern const uint16_t* WebRtcIsac_kQKltCdfPtrGain[12];
+
+extern const uint16_t* WebRtcIsac_kQKltCdfPtrShape[108];
+
+/* left KLT transforms */
+extern const double WebRtcIsac_kKltT1Gain[4];
+
+extern const double WebRtcIsac_kKltT1Shape[324];
+
+/* right KLT transforms */
+extern const double WebRtcIsac_kKltT2Gain[36];
+
+extern const double WebRtcIsac_kKltT2Shape[36];
+
+/* means of log gains and LAR coefficients */
+extern const double WebRtcIsac_kLpcMeansGain[12];
+
+extern const double WebRtcIsac_kLpcMeansShape[108];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h
new file mode 100644
index 0000000000..fe9afa4ba2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
+
+#include <math.h>
+
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_POSIX)
+#define WebRtcIsac_lrint lrint
+#elif (defined(WEBRTC_ARCH_X86) && defined(WIN32))
+static __inline long int WebRtcIsac_lrint(double x_dbl) {
+ long int x_int;
+
+ __asm {
+ fld x_dbl
+ fistp x_int
+ }
+ ;
+
+ return x_int;
+}
+#else // Do a slow but correct implementation of lrint
+
+static __inline long int WebRtcIsac_lrint(double x_dbl) {
+ long int x_int;
+ x_int = (long int)floor(x_dbl + 0.499999999999);
+ return x_int;
+}
+
+#endif
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c
new file mode 100644
index 0000000000..8a19ac1710
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+
+#include <math.h>
+#include <memory.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <stdlib.h>
+#endif
+
+#include "modules/audio_coding/codecs/isac/main/source/filter_functions.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_filter.h"
+#include "rtc_base/system/ignore_warnings.h"
+
+static const double kInterpolWin[8] = {-0.00067556028640, 0.02184247643159, -0.12203175715679, 0.60086484101160,
+ 0.60086484101160, -0.12203175715679, 0.02184247643159, -0.00067556028640};
+
+/* interpolation filter */
+__inline static void IntrepolFilter(double *data_ptr, double *intrp)
+{
+ *intrp = kInterpolWin[0] * data_ptr[-3];
+ *intrp += kInterpolWin[1] * data_ptr[-2];
+ *intrp += kInterpolWin[2] * data_ptr[-1];
+ *intrp += kInterpolWin[3] * data_ptr[0];
+ *intrp += kInterpolWin[4] * data_ptr[1];
+ *intrp += kInterpolWin[5] * data_ptr[2];
+ *intrp += kInterpolWin[6] * data_ptr[3];
+ *intrp += kInterpolWin[7] * data_ptr[4];
+}
+
+
+/* 2D parabolic interpolation */
+/* probably some 0.5 factors can be eliminated, and the square-roots can be removed from the Cholesky fact. */
+__inline static void Intrpol2D(double T[3][3], double *x, double *y, double *peak_val)
+{
+ double c, b[2], A[2][2];
+ double t1, t2, d;
+ double delta1, delta2;
+
+
+ // double T[3][3] = {{-1.25, -.25,-.25}, {-.25, .75, .75}, {-.25, .75, .75}};
+ // should result in: delta1 = 0.5; delta2 = 0.0; peak_val = 1.0
+
+ c = T[1][1];
+ b[0] = 0.5 * (T[1][2] + T[2][1] - T[0][1] - T[1][0]);
+ b[1] = 0.5 * (T[1][0] + T[2][1] - T[0][1] - T[1][2]);
+ A[0][1] = -0.5 * (T[0][1] + T[2][1] - T[1][0] - T[1][2]);
+ t1 = 0.5 * (T[0][0] + T[2][2]) - c;
+ t2 = 0.5 * (T[2][0] + T[0][2]) - c;
+ d = (T[0][1] + T[1][2] + T[1][0] + T[2][1]) - 4.0 * c - t1 - t2;
+ A[0][0] = -t1 - 0.5 * d;
+ A[1][1] = -t2 - 0.5 * d;
+
+ /* deal with singularities or ill-conditioned cases */
+ if ( (A[0][0] < 1e-7) || ((A[0][0] * A[1][1] - A[0][1] * A[0][1]) < 1e-7) ) {
+ *peak_val = T[1][1];
+ return;
+ }
+
+ /* Cholesky decomposition: replace A by upper-triangular factor */
+ A[0][0] = sqrt(A[0][0]);
+ A[0][1] = A[0][1] / A[0][0];
+ A[1][1] = sqrt(A[1][1] - A[0][1] * A[0][1]);
+
+ /* compute [x; y] = -0.5 * inv(A) * b */
+ t1 = b[0] / A[0][0];
+ t2 = (b[1] - t1 * A[0][1]) / A[1][1];
+ delta2 = t2 / A[1][1];
+ delta1 = 0.5 * (t1 - delta2 * A[0][1]) / A[0][0];
+ delta2 *= 0.5;
+
+ /* limit norm */
+ t1 = delta1 * delta1 + delta2 * delta2;
+ if (t1 > 1.0) {
+ delta1 /= t1;
+ delta2 /= t1;
+ }
+
+ *peak_val = 0.5 * (b[0] * delta1 + b[1] * delta2) + c;
+
+ *x += delta1;
+ *y += delta2;
+}
+
+
+static void PCorr(const double *in, double *outcorr)
+{
+ double sum, ysum, prod;
+ const double *x, *inptr;
+ int k, n;
+
+ //ysum = 1e-6; /* use this with float (i.s.o. double)! */
+ ysum = 1e-13;
+ sum = 0.0;
+ x = in + PITCH_MAX_LAG/2 + 2;
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ ysum += in[n] * in[n];
+ sum += x[n] * in[n];
+ }
+
+ outcorr += PITCH_LAG_SPAN2 - 1; /* index of last element in array */
+ *outcorr = sum / sqrt(ysum);
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ ysum -= in[k-1] * in[k-1];
+ ysum += in[PITCH_CORR_LEN2 + k - 1] * in[PITCH_CORR_LEN2 + k - 1];
+ sum = 0.0;
+ inptr = &in[k];
+ prod = x[0] * inptr[0];
+ for (n = 1; n < PITCH_CORR_LEN2; n++) {
+ sum += prod;
+ prod = x[n] * inptr[n];
+ }
+ sum += prod;
+ outcorr--;
+ *outcorr = sum / sqrt(ysum);
+ }
+}
+
+static void WebRtcIsac_AllpassFilterForDec(double* InOut,
+ const double* APSectionFactors,
+ size_t lengthInOut,
+ double* FilterState) {
+ // This performs all-pass filtering--a series of first order all-pass
+ // sections are used to filter the input in a cascade manner.
+ size_t n, j;
+ double temp;
+ for (j = 0; j < ALLPASSSECTIONS; j++) {
+ for (n = 0; n < lengthInOut; n += 2) {
+ temp = InOut[n]; // store input
+ InOut[n] = FilterState[j] + APSectionFactors[j] * temp;
+ FilterState[j] = -APSectionFactors[j] * InOut[n] + temp;
+ }
+ }
+}
+
+static void WebRtcIsac_DecimateAllpass(
+ const double* in,
+ double* state_in, // array of size: 2*ALLPASSSECTIONS+1
+ size_t N, // number of input samples
+ double* out) { // array of size N/2
+
+ static const double APupper[ALLPASSSECTIONS] = {0.0347, 0.3826};
+ static const double APlower[ALLPASSSECTIONS] = {0.1544, 0.744};
+
+ size_t n;
+ double data_vec[PITCH_FRAME_LEN];
+
+ /* copy input */
+ memcpy(data_vec + 1, in, sizeof(double) * (N - 1));
+
+ data_vec[0] = state_in[2 * ALLPASSSECTIONS]; // the z^(-1) state
+ state_in[2 * ALLPASSSECTIONS] = in[N - 1];
+
+ WebRtcIsac_AllpassFilterForDec(data_vec + 1, APupper, N, state_in);
+ WebRtcIsac_AllpassFilterForDec(data_vec, APlower, N,
+ state_in + ALLPASSSECTIONS);
+
+ for (n = 0; n < N / 2; n++)
+ out[n] = data_vec[2 * n] + data_vec[2 * n + 1];
+}
+
+RTC_PUSH_IGNORING_WFRAME_LARGER_THAN()
+
+static void WebRtcIsac_InitializePitch(const double* in,
+ const double old_lag,
+ const double old_gain,
+ PitchAnalysisStruct* State,
+ double* lags) {
+ double buf_dec[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2];
+ double ratio, log_lag, gain_bias;
+ double bias;
+ double corrvec1[PITCH_LAG_SPAN2];
+ double corrvec2[PITCH_LAG_SPAN2];
+ int m, k;
+ // Allocating 10 extra entries at the begining of the CorrSurf
+ double corrSurfBuff[10 + (2*PITCH_BW+3)*(PITCH_LAG_SPAN2+4)];
+ double* CorrSurf[2*PITCH_BW+3];
+ double *CorrSurfPtr1, *CorrSurfPtr2;
+ double LagWin[3] = {0.2, 0.5, 0.98};
+ int ind1, ind2, peaks_ind, peak, max_ind;
+ int peaks[PITCH_MAX_NUM_PEAKS];
+ double adj, gain_tmp;
+ double corr, corr_max;
+ double intrp_a, intrp_b, intrp_c, intrp_d;
+ double peak_vals[PITCH_MAX_NUM_PEAKS];
+ double lags1[PITCH_MAX_NUM_PEAKS];
+ double lags2[PITCH_MAX_NUM_PEAKS];
+ double T[3][3];
+ int row;
+
+ for(k = 0; k < 2*PITCH_BW+3; k++)
+ {
+ CorrSurf[k] = &corrSurfBuff[10 + k * (PITCH_LAG_SPAN2+4)];
+ }
+ /* reset CorrSurf matrix */
+ memset(corrSurfBuff, 0, sizeof(double) * (10 + (2*PITCH_BW+3) * (PITCH_LAG_SPAN2+4)));
+
+ //warnings -DH
+ max_ind = 0;
+ peak = 0;
+
+ /* copy old values from state buffer */
+ memcpy(buf_dec, State->dec_buffer, sizeof(double) * (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2));
+
+ /* decimation; put result after the old values */
+ WebRtcIsac_DecimateAllpass(in, State->decimator_state, PITCH_FRAME_LEN,
+ &buf_dec[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2]);
+
+ /* low-pass filtering */
+ for (k = PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2; k++)
+ buf_dec[k] += 0.75 * buf_dec[k-1] - 0.25 * buf_dec[k-2];
+
+ /* copy end part back into state buffer */
+ memcpy(State->dec_buffer, buf_dec+PITCH_FRAME_LEN/2, sizeof(double) * (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2));
+
+ /* compute correlation for first and second half of the frame */
+ PCorr(buf_dec, corrvec1);
+ PCorr(buf_dec + PITCH_CORR_STEP2, corrvec2);
+
+ /* bias towards pitch lag of previous frame */
+ log_lag = log(0.5 * old_lag);
+ gain_bias = 4.0 * old_gain * old_gain;
+ if (gain_bias > 0.8) gain_bias = 0.8;
+ for (k = 0; k < PITCH_LAG_SPAN2; k++)
+ {
+ ratio = log((double) (k + (PITCH_MIN_LAG/2-2))) - log_lag;
+ bias = 1.0 + gain_bias * exp(-5.0 * ratio * ratio);
+ corrvec1[k] *= bias;
+ }
+
+ /* taper correlation functions */
+ for (k = 0; k < 3; k++) {
+ gain_tmp = LagWin[k];
+ corrvec1[k] *= gain_tmp;
+ corrvec2[k] *= gain_tmp;
+ corrvec1[PITCH_LAG_SPAN2-1-k] *= gain_tmp;
+ corrvec2[PITCH_LAG_SPAN2-1-k] *= gain_tmp;
+ }
+
+ corr_max = 0.0;
+ /* fill middle row of correlation surface */
+ ind1 = 0;
+ ind2 = 0;
+ CorrSurfPtr1 = &CorrSurf[PITCH_BW][2];
+ for (k = 0; k < PITCH_LAG_SPAN2; k++) {
+ corr = corrvec1[ind1++] + corrvec2[ind2++];
+ CorrSurfPtr1[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+ }
+ }
+ /* fill first and last rows of correlation surface */
+ ind1 = 0;
+ ind2 = PITCH_BW;
+ CorrSurfPtr1 = &CorrSurf[0][2];
+ CorrSurfPtr2 = &CorrSurf[2*PITCH_BW][PITCH_BW+2];
+ for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW; k++) {
+ ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
+ adj = 0.2 * ratio * (2.0 - ratio); /* adjustment factor; inverse parabola as a function of ratio */
+ corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
+ CorrSurfPtr1[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+ }
+ corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
+ CorrSurfPtr2[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
+ }
+ }
+ /* fill second and next to last rows of correlation surface */
+ ind1 = 0;
+ ind2 = PITCH_BW-1;
+ CorrSurfPtr1 = &CorrSurf[1][2];
+ CorrSurfPtr2 = &CorrSurf[2*PITCH_BW-1][PITCH_BW+1];
+ for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW+1; k++) {
+ ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
+ adj = 0.9 * ratio * (2.0 - ratio); /* adjustment factor; inverse parabola as a function of ratio */
+ corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
+ CorrSurfPtr1[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+ }
+ corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
+ CorrSurfPtr2[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
+ }
+ }
+ /* fill remainder of correlation surface */
+ for (m = 2; m < PITCH_BW; m++) {
+ ind1 = 0;
+ ind2 = PITCH_BW - m; /* always larger than ind1 */
+ CorrSurfPtr1 = &CorrSurf[m][2];
+ CorrSurfPtr2 = &CorrSurf[2*PITCH_BW-m][PITCH_BW+2-m];
+ for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW+m; k++) {
+ ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
+ adj = ratio * (2.0 - ratio); /* adjustment factor; inverse parabola as a function of ratio */
+ corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
+ CorrSurfPtr1[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+ }
+ corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
+ CorrSurfPtr2[k] = corr;
+ if (corr > corr_max) {
+ corr_max = corr; /* update maximum */
+ max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
+ }
+ }
+ }
+
+ /* threshold value to qualify as a peak */
+ corr_max *= 0.6;
+
+ peaks_ind = 0;
+ /* find peaks */
+ for (m = 1; m < PITCH_BW+1; m++) {
+ if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+ CorrSurfPtr1 = &CorrSurf[m][2];
+ for (k = 2; k < PITCH_LAG_SPAN2-PITCH_BW-2+m; k++) {
+ corr = CorrSurfPtr1[k];
+ if (corr > corr_max) {
+ if ( (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+5)]) && (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+4)]) ) {
+ if ( (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+4)]) && (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+5)]) ) {
+ /* found a peak; store index into matrix */
+ peaks[peaks_ind++] = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+ if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+ }
+ }
+ }
+ }
+ }
+ for (m = PITCH_BW+1; m < 2*PITCH_BW; m++) {
+ if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+ CorrSurfPtr1 = &CorrSurf[m][2];
+ for (k = 2+m-PITCH_BW; k < PITCH_LAG_SPAN2-2; k++) {
+ corr = CorrSurfPtr1[k];
+ if (corr > corr_max) {
+ if ( (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+5)]) && (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+4)]) ) {
+ if ( (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+4)]) && (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+5)]) ) {
+ /* found a peak; store index into matrix */
+ peaks[peaks_ind++] = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+ if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+ }
+ }
+ }
+ }
+ }
+
+ if (peaks_ind > 0) {
+ /* examine each peak */
+ CorrSurfPtr1 = &CorrSurf[0][0];
+ for (k = 0; k < peaks_ind; k++) {
+ peak = peaks[k];
+
+ /* compute four interpolated values around current peak */
+ IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)], &intrp_a);
+ IntrepolFilter(&CorrSurfPtr1[peak - 1 ], &intrp_b);
+ IntrepolFilter(&CorrSurfPtr1[peak ], &intrp_c);
+ IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)], &intrp_d);
+
+ /* determine maximum of the interpolated values */
+ corr = CorrSurfPtr1[peak];
+ corr_max = intrp_a;
+ if (intrp_b > corr_max) corr_max = intrp_b;
+ if (intrp_c > corr_max) corr_max = intrp_c;
+ if (intrp_d > corr_max) corr_max = intrp_d;
+
+ /* determine where the peak sits and fill a 3x3 matrix around it */
+ row = peak / (PITCH_LAG_SPAN2+4);
+ lags1[k] = (double) ((peak - row * (PITCH_LAG_SPAN2+4)) + PITCH_MIN_LAG/2 - 4);
+ lags2[k] = (double) (lags1[k] + PITCH_BW - row);
+ if ( corr > corr_max ) {
+ T[0][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
+ T[2][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
+ T[1][1] = corr;
+ T[0][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
+ T[2][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
+ T[1][0] = intrp_a;
+ T[0][1] = intrp_b;
+ T[2][1] = intrp_c;
+ T[1][2] = intrp_d;
+ } else {
+ if (intrp_a == corr_max) {
+ lags1[k] -= 0.5;
+ lags2[k] += 0.5;
+ IntrepolFilter(&CorrSurfPtr1[peak - 2*(PITCH_LAG_SPAN2+5)], &T[0][0]);
+ IntrepolFilter(&CorrSurfPtr1[peak - (2*PITCH_LAG_SPAN2+9)], &T[2][0]);
+ T[1][1] = intrp_a;
+ T[0][2] = intrp_b;
+ T[2][2] = intrp_c;
+ T[1][0] = CorrSurfPtr1[peak - (2*PITCH_LAG_SPAN2+9)];
+ T[0][1] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
+ T[2][1] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
+ T[1][2] = corr;
+ } else if (intrp_b == corr_max) {
+ lags1[k] -= 0.5;
+ lags2[k] -= 0.5;
+ IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+6)], &T[0][0]);
+ T[2][0] = intrp_a;
+ T[1][1] = intrp_b;
+ IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+3)], &T[0][2]);
+ T[2][2] = intrp_d;
+ T[1][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
+ T[0][1] = CorrSurfPtr1[peak - 1];
+ T[2][1] = corr;
+ T[1][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
+ } else if (intrp_c == corr_max) {
+ lags1[k] += 0.5;
+ lags2[k] += 0.5;
+ T[0][0] = intrp_a;
+ IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)], &T[2][0]);
+ T[1][1] = intrp_c;
+ T[0][2] = intrp_d;
+ IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)], &T[2][2]);
+ T[1][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
+ T[0][1] = corr;
+ T[2][1] = CorrSurfPtr1[peak + 1];
+ T[1][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
+ } else {
+ lags1[k] += 0.5;
+ lags2[k] -= 0.5;
+ T[0][0] = intrp_b;
+ T[2][0] = intrp_c;
+ T[1][1] = intrp_d;
+ IntrepolFilter(&CorrSurfPtr1[peak + 2*(PITCH_LAG_SPAN2+4)], &T[0][2]);
+ IntrepolFilter(&CorrSurfPtr1[peak + (2*PITCH_LAG_SPAN2+9)], &T[2][2]);
+ T[1][0] = corr;
+ T[0][1] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
+ T[2][1] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
+ T[1][2] = CorrSurfPtr1[peak + (2*PITCH_LAG_SPAN2+9)];
+ }
+ }
+
+ /* 2D parabolic interpolation gives more accurate lags and peak value */
+ Intrpol2D(T, &lags1[k], &lags2[k], &peak_vals[k]);
+ }
+
+ /* determine the highest peak, after applying a bias towards short lags */
+ corr_max = 0.0;
+ for (k = 0; k < peaks_ind; k++) {
+ corr = peak_vals[k] * pow(PITCH_PEAK_DECAY, log(lags1[k] + lags2[k]));
+ if (corr > corr_max) {
+ corr_max = corr;
+ peak = k;
+ }
+ }
+
+ lags1[peak] *= 2.0;
+ lags2[peak] *= 2.0;
+
+ if (lags1[peak] < (double) PITCH_MIN_LAG) lags1[peak] = (double) PITCH_MIN_LAG;
+ if (lags2[peak] < (double) PITCH_MIN_LAG) lags2[peak] = (double) PITCH_MIN_LAG;
+ if (lags1[peak] > (double) PITCH_MAX_LAG) lags1[peak] = (double) PITCH_MAX_LAG;
+ if (lags2[peak] > (double) PITCH_MAX_LAG) lags2[peak] = (double) PITCH_MAX_LAG;
+
+ /* store lags of highest peak in output array */
+ lags[0] = lags1[peak];
+ lags[1] = lags1[peak];
+ lags[2] = lags2[peak];
+ lags[3] = lags2[peak];
+ }
+ else
+ {
+ row = max_ind / (PITCH_LAG_SPAN2+4);
+ lags1[0] = (double) ((max_ind - row * (PITCH_LAG_SPAN2+4)) + PITCH_MIN_LAG/2 - 4);
+ lags2[0] = (double) (lags1[0] + PITCH_BW - row);
+
+ if (lags1[0] < (double) PITCH_MIN_LAG) lags1[0] = (double) PITCH_MIN_LAG;
+ if (lags2[0] < (double) PITCH_MIN_LAG) lags2[0] = (double) PITCH_MIN_LAG;
+ if (lags1[0] > (double) PITCH_MAX_LAG) lags1[0] = (double) PITCH_MAX_LAG;
+ if (lags2[0] > (double) PITCH_MAX_LAG) lags2[0] = (double) PITCH_MAX_LAG;
+
+ /* store lags of highest peak in output array */
+ lags[0] = lags1[0];
+ lags[1] = lags1[0];
+ lags[2] = lags2[0];
+ lags[3] = lags2[0];
+ }
+}
+
+RTC_POP_IGNORING_WFRAME_LARGER_THAN()
+
+/* create weighting matrix by orthogonalizing a basis of polynomials of increasing order
+ * t = (0:4)';
+ * A = [t.^0, t.^1, t.^2, t.^3, t.^4];
+ * [Q, dummy] = qr(A);
+ * P.Weight = Q * diag([0, .1, .5, 1, 1]) * Q'; */
+static const double kWeight[5][5] = {
+ { 0.29714285714286, -0.30857142857143, -0.05714285714286, 0.05142857142857, 0.01714285714286},
+ {-0.30857142857143, 0.67428571428571, -0.27142857142857, -0.14571428571429, 0.05142857142857},
+ {-0.05714285714286, -0.27142857142857, 0.65714285714286, -0.27142857142857, -0.05714285714286},
+ { 0.05142857142857, -0.14571428571429, -0.27142857142857, 0.67428571428571, -0.30857142857143},
+ { 0.01714285714286, 0.05142857142857, -0.05714285714286, -0.30857142857143, 0.29714285714286}
+};
+
+/* second order high-pass filter */
+static void WebRtcIsac_Highpass(const double* in,
+ double* out,
+ double* state,
+ size_t N) {
+ /* create high-pass filter ocefficients
+ * z = 0.998 * exp(j*2*pi*35/8000);
+ * p = 0.94 * exp(j*2*pi*140/8000);
+ * HP_b = [1, -2*real(z), abs(z)^2];
+ * HP_a = [1, -2*real(p), abs(p)^2]; */
+ static const double a_coef[2] = { 1.86864659625574, -0.88360000000000};
+ static const double b_coef[2] = {-1.99524591718270, 0.99600400000000};
+
+ size_t k;
+
+ for (k=0; k<N; k++) {
+ *out = *in + state[1];
+ state[1] = state[0] + b_coef[0] * *in + a_coef[0] * *out;
+ state[0] = b_coef[1] * *in++ + a_coef[1] * *out++;
+ }
+}
+
+RTC_PUSH_IGNORING_WFRAME_LARGER_THAN()
+
+void WebRtcIsac_PitchAnalysis(const double *in, /* PITCH_FRAME_LEN samples */
+ double *out, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+ PitchAnalysisStruct *State,
+ double *lags,
+ double *gains)
+{
+ double HPin[PITCH_FRAME_LEN];
+ double Weighted[PITCH_FRAME_LEN];
+ double Whitened[PITCH_FRAME_LEN + QLOOKAHEAD];
+ double inbuf[PITCH_FRAME_LEN + QLOOKAHEAD];
+ double out_G[PITCH_FRAME_LEN + QLOOKAHEAD]; // could be removed by using out instead
+ double out_dG[4][PITCH_FRAME_LEN + QLOOKAHEAD];
+ double old_lag, old_gain;
+ double nrg_wht, tmp;
+ double Wnrg, Wfluct, Wgain;
+ double H[4][4];
+ double grad[4];
+ double dG[4];
+ int k, m, n, iter;
+
+ /* high pass filtering using second order pole-zero filter */
+ WebRtcIsac_Highpass(in, HPin, State->hp_state, PITCH_FRAME_LEN);
+
+ /* copy from state into buffer */
+ memcpy(Whitened, State->whitened_buf, sizeof(double) * QLOOKAHEAD);
+
+ /* compute weighted and whitened signals */
+ WebRtcIsac_WeightingFilter(HPin, &Weighted[0], &Whitened[QLOOKAHEAD], &(State->Wghtstr));
+
+ /* copy from buffer into state */
+ memcpy(State->whitened_buf, Whitened+PITCH_FRAME_LEN, sizeof(double) * QLOOKAHEAD);
+
+ old_lag = State->PFstr_wght.oldlagp[0];
+ old_gain = State->PFstr_wght.oldgainp[0];
+
+ /* inital pitch estimate */
+ WebRtcIsac_InitializePitch(Weighted, old_lag, old_gain, State, lags);
+
+
+ /* Iterative optimization of lags - to be done */
+
+ /* compute energy of whitened signal */
+ nrg_wht = 0.0;
+ for (k = 0; k < PITCH_FRAME_LEN + QLOOKAHEAD; k++)
+ nrg_wht += Whitened[k] * Whitened[k];
+
+
+ /* Iterative optimization of gains */
+
+ /* set weights for energy, gain fluctiation, and spectral gain penalty functions */
+ Wnrg = 1.0 / nrg_wht;
+ Wgain = 0.005;
+ Wfluct = 3.0;
+
+ /* set initial gains */
+ for (k = 0; k < 4; k++)
+ gains[k] = PITCH_MAX_GAIN_06;
+
+ /* two iterations should be enough */
+ for (iter = 0; iter < 2; iter++) {
+ /* compute Jacobian of pre-filter output towards gains */
+ WebRtcIsac_PitchfilterPre_gains(Whitened, out_G, out_dG, &(State->PFstr_wght), lags, gains);
+
+ /* gradient and approximate Hessian (lower triangle) for minimizing the filter's output power */
+ for (k = 0; k < 4; k++) {
+ tmp = 0.0;
+ for (n = 0; n < PITCH_FRAME_LEN + QLOOKAHEAD; n++)
+ tmp += out_G[n] * out_dG[k][n];
+ grad[k] = tmp * Wnrg;
+ }
+ for (k = 0; k < 4; k++) {
+ for (m = 0; m <= k; m++) {
+ tmp = 0.0;
+ for (n = 0; n < PITCH_FRAME_LEN + QLOOKAHEAD; n++)
+ tmp += out_dG[m][n] * out_dG[k][n];
+ H[k][m] = tmp * Wnrg;
+ }
+ }
+
+ /* add gradient and Hessian (lower triangle) for dampening fast gain changes */
+ for (k = 0; k < 4; k++) {
+ tmp = kWeight[k+1][0] * old_gain;
+ for (m = 0; m < 4; m++)
+ tmp += kWeight[k+1][m+1] * gains[m];
+ grad[k] += tmp * Wfluct;
+ }
+ for (k = 0; k < 4; k++) {
+ for (m = 0; m <= k; m++) {
+ H[k][m] += kWeight[k+1][m+1] * Wfluct;
+ }
+ }
+
+ /* add gradient and Hessian for dampening gain */
+ for (k = 0; k < 3; k++) {
+ tmp = 1.0 / (1 - gains[k]);
+ grad[k] += tmp * tmp * Wgain;
+ H[k][k] += 2.0 * tmp * (tmp * tmp * Wgain);
+ }
+ tmp = 1.0 / (1 - gains[3]);
+ grad[3] += 1.33 * (tmp * tmp * Wgain);
+ H[3][3] += 2.66 * tmp * (tmp * tmp * Wgain);
+
+
+ /* compute Cholesky factorization of Hessian
+ * by overwritting the upper triangle; scale factors on diagonal
+ * (for non pc-platforms store the inverse of the diagonals seperately to minimize divisions) */
+ H[0][1] = H[1][0] / H[0][0];
+ H[0][2] = H[2][0] / H[0][0];
+ H[0][3] = H[3][0] / H[0][0];
+ H[1][1] -= H[0][0] * H[0][1] * H[0][1];
+ H[1][2] = (H[2][1] - H[0][1] * H[2][0]) / H[1][1];
+ H[1][3] = (H[3][1] - H[0][1] * H[3][0]) / H[1][1];
+ H[2][2] -= H[0][0] * H[0][2] * H[0][2] + H[1][1] * H[1][2] * H[1][2];
+ H[2][3] = (H[3][2] - H[0][2] * H[3][0] - H[1][2] * H[1][1] * H[1][3]) / H[2][2];
+ H[3][3] -= H[0][0] * H[0][3] * H[0][3] + H[1][1] * H[1][3] * H[1][3] + H[2][2] * H[2][3] * H[2][3];
+
+ /* Compute update as delta_gains = -inv(H) * grad */
+ /* copy and negate */
+ for (k = 0; k < 4; k++)
+ dG[k] = -grad[k];
+ /* back substitution */
+ dG[1] -= dG[0] * H[0][1];
+ dG[2] -= dG[0] * H[0][2] + dG[1] * H[1][2];
+ dG[3] -= dG[0] * H[0][3] + dG[1] * H[1][3] + dG[2] * H[2][3];
+ /* scale */
+ for (k = 0; k < 4; k++)
+ dG[k] /= H[k][k];
+ /* back substitution */
+ dG[2] -= dG[3] * H[2][3];
+ dG[1] -= dG[3] * H[1][3] + dG[2] * H[1][2];
+ dG[0] -= dG[3] * H[0][3] + dG[2] * H[0][2] + dG[1] * H[0][1];
+
+ /* update gains and check range */
+ for (k = 0; k < 4; k++) {
+ gains[k] += dG[k];
+ if (gains[k] > PITCH_MAX_GAIN)
+ gains[k] = PITCH_MAX_GAIN;
+ else if (gains[k] < 0.0)
+ gains[k] = 0.0;
+ }
+ }
+
+ /* update state for next frame */
+ WebRtcIsac_PitchfilterPre(Whitened, out, &(State->PFstr_wght), lags, gains);
+
+ /* concatenate previous input's end and current input */
+ memcpy(inbuf, State->inbuf, sizeof(double) * QLOOKAHEAD);
+ memcpy(inbuf+QLOOKAHEAD, in, sizeof(double) * PITCH_FRAME_LEN);
+
+ /* lookahead pitch filtering for masking analysis */
+ WebRtcIsac_PitchfilterPre_la(inbuf, out, &(State->PFstr), lags, gains);
+
+ /* store last part of input */
+ for (k = 0; k < QLOOKAHEAD; k++)
+ State->inbuf[k] = inbuf[k + PITCH_FRAME_LEN];
+}
+
+RTC_POP_IGNORING_WFRAME_LARGER_THAN()
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
new file mode 100644
index 0000000000..4ab78c20ad
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_estimator.h
+ *
+ * Pitch functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_PitchAnalysis(
+ const double* in, /* PITCH_FRAME_LEN samples */
+ double* out, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+ PitchAnalysisStruct* State,
+ double* lags,
+ double* gains);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
new file mode 100644
index 0000000000..bf03dfff2e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+#include "rtc_base/compile_assert_c.h"
+
+/*
+ * We are implementing the following filters;
+ *
+ * Pre-filtering:
+ * y(z) = x(z) + damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
+ *
+ * Post-filtering:
+ * y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
+ *
+ * Note that `lag` is a floating number so we perform an interpolation to
+ * obtain the correct `lag`.
+ *
+ */
+
+static const double kDampFilter[PITCH_DAMPORDER] = {-0.07, 0.25, 0.64, 0.25,
+ -0.07};
+
+/* interpolation coefficients; generated by design_pitch_filter.m */
+static const double kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = {
+ {-0.02239172458614, 0.06653315052934, -0.16515880017569, 0.60701333734125,
+ 0.64671399919202, -0.20249000396417, 0.09926548334755, -0.04765933793109,
+ 0.01754159521746},
+ {-0.01985640750434, 0.05816126837866, -0.13991265473714, 0.44560418147643,
+ 0.79117042386876, -0.20266133815188, 0.09585268418555, -0.04533310458084,
+ 0.01654127246314},
+ {-0.01463300534216, 0.04229888475060, -0.09897034715253, 0.28284326017787,
+ 0.90385267956632, -0.16976950138649, 0.07704272393639, -0.03584218578311,
+ 0.01295781500709},
+ {-0.00764851320885, 0.02184035544377, -0.04985561057281, 0.13083306574393,
+ 0.97545011664662, -0.10177807997561, 0.04400901776474, -0.02010737175166,
+ 0.00719783432422},
+ {-0.00000000000000, 0.00000000000000, -0.00000000000001, 0.00000000000001,
+ 0.99999999999999, 0.00000000000001, -0.00000000000001, 0.00000000000000,
+ -0.00000000000000},
+ {0.00719783432422, -0.02010737175166, 0.04400901776474, -0.10177807997562,
+ 0.97545011664663, 0.13083306574393, -0.04985561057280, 0.02184035544377,
+ -0.00764851320885},
+ {0.01295781500710, -0.03584218578312, 0.07704272393640, -0.16976950138650,
+ 0.90385267956634, 0.28284326017785, -0.09897034715252, 0.04229888475059,
+ -0.01463300534216},
+ {0.01654127246315, -0.04533310458085, 0.09585268418557, -0.20266133815190,
+ 0.79117042386878, 0.44560418147640, -0.13991265473712, 0.05816126837865,
+ -0.01985640750433}
+};
+
+/*
+ * Enumerating the operation of the filter.
+ * iSAC has 4 different pitch-filter which are very similar in their structure.
+ *
+ * kPitchFilterPre : In this mode the filter is operating as pitch
+ * pre-filter. This is used at the encoder.
+ * kPitchFilterPost : In this mode the filter is operating as pitch
+ * post-filter. This is the inverse of pre-filter and used
+ * in the decoder.
+ * kPitchFilterPreLa : This is, in structure, similar to pre-filtering but
+ * utilizing 3 millisecond lookahead. It is used to
+ * obtain the signal for LPC analysis.
+ * kPitchFilterPreGain : This is, in structure, similar to pre-filtering but
+ * differential changes in gain is considered. This is
+ * used to find the optimal gain.
+ */
+typedef enum {
+ kPitchFilterPre, kPitchFilterPost, kPitchFilterPreLa, kPitchFilterPreGain
+} PitchFilterOperation;
+
+/*
+ * Structure with parameters used for pitch-filtering.
+ * buffer : a buffer where the sum of previous inputs and outputs
+ * are stored.
+ * damper_state : the state of the damping filter. The filter is defined by
+ * `kDampFilter`.
+ * interpol_coeff : pointer to a set of coefficient which are used to utilize
+ * fractional pitch by interpolation.
+ * gain : pitch-gain to be applied to the current segment of input.
+ * lag : pitch-lag for the current segment of input.
+ * lag_offset : the offset of lag w.r.t. current sample.
+ * sub_frame : sub-frame index, there are 4 pitch sub-frames in an iSAC
+ * frame.
+ * This specifies the usage of the filter. See
+ * 'PitchFilterOperation' for operational modes.
+ * num_samples : number of samples to be processed in each segment.
+ * index : index of the input and output sample.
+ * damper_state_dg : state of damping filter for different trial gains.
+ * gain_mult : differential changes to gain.
+ */
+typedef struct {
+ double buffer[PITCH_INTBUFFSIZE + QLOOKAHEAD];
+ double damper_state[PITCH_DAMPORDER];
+ const double *interpol_coeff;
+ double gain;
+ double lag;
+ int lag_offset;
+
+ int sub_frame;
+ PitchFilterOperation mode;
+ int num_samples;
+ int index;
+
+ double damper_state_dg[4][PITCH_DAMPORDER];
+ double gain_mult[4];
+} PitchFilterParam;
+
+/**********************************************************************
+ * FilterSegment()
+ * Filter one segment, a quarter of a frame.
+ *
+ * Inputs
+ * in_data : pointer to the input signal of 30 ms at 8 kHz sample-rate.
+ * filter_param : pitch filter parameters.
+ *
+ * Outputs
+ * out_data : pointer to a buffer where the filtered signal is written to.
+ * out_dg : [only used in kPitchFilterPreGain] pointer to a buffer
+ * where the output of different gain values (differential
+ * change to gain) is written.
+ */
+static void FilterSegment(const double* in_data, PitchFilterParam* parameters,
+ double* out_data,
+ double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD]) {
+ int n;
+ int m;
+ int j;
+ double sum;
+ double sum2;
+ /* Index of `parameters->buffer` where the output is written to. */
+ int pos = parameters->index + PITCH_BUFFSIZE;
+ /* Index of `parameters->buffer` where samples are read for fractional-lag
+ * computation. */
+ int pos_lag = pos - parameters->lag_offset;
+
+ for (n = 0; n < parameters->num_samples; ++n) {
+ /* Shift low pass filter states. */
+ for (m = PITCH_DAMPORDER - 1; m > 0; --m) {
+ parameters->damper_state[m] = parameters->damper_state[m - 1];
+ }
+ /* Filter to get fractional pitch. */
+ sum = 0.0;
+ for (m = 0; m < PITCH_FRACORDER; ++m) {
+ sum += parameters->buffer[pos_lag + m] * parameters->interpol_coeff[m];
+ }
+ /* Multiply with gain. */
+ parameters->damper_state[0] = parameters->gain * sum;
+
+ if (parameters->mode == kPitchFilterPreGain) {
+ int lag_index = parameters->index - parameters->lag_offset;
+ int m_tmp = (lag_index < 0) ? -lag_index : 0;
+ /* Update the damper state for the new sample. */
+ for (m = PITCH_DAMPORDER - 1; m > 0; --m) {
+ for (j = 0; j < 4; ++j) {
+ parameters->damper_state_dg[j][m] =
+ parameters->damper_state_dg[j][m - 1];
+ }
+ }
+
+ for (j = 0; j < parameters->sub_frame + 1; ++j) {
+ /* Filter for fractional pitch. */
+ sum2 = 0.0;
+ for (m = PITCH_FRACORDER-1; m >= m_tmp; --m) {
+ /* `lag_index + m` is always larger than or equal to zero, see how
+ * m_tmp is computed. This is equivalent to assume samples outside
+ * `out_dg[j]` are zero. */
+ sum2 += out_dg[j][lag_index + m] * parameters->interpol_coeff[m];
+ }
+ /* Add the contribution of differential gain change. */
+ parameters->damper_state_dg[j][0] = parameters->gain_mult[j] * sum +
+ parameters->gain * sum2;
+ }
+
+ /* Filter with damping filter, and store the results. */
+ for (j = 0; j < parameters->sub_frame + 1; ++j) {
+ sum = 0.0;
+ for (m = 0; m < PITCH_DAMPORDER; ++m) {
+ sum -= parameters->damper_state_dg[j][m] * kDampFilter[m];
+ }
+ out_dg[j][parameters->index] = sum;
+ }
+ }
+ /* Filter with damping filter. */
+ sum = 0.0;
+ for (m = 0; m < PITCH_DAMPORDER; ++m) {
+ sum += parameters->damper_state[m] * kDampFilter[m];
+ }
+
+ /* Subtract from input and update buffer. */
+ out_data[parameters->index] = in_data[parameters->index] - sum;
+ parameters->buffer[pos] = in_data[parameters->index] +
+ out_data[parameters->index];
+
+ ++parameters->index;
+ ++pos;
+ ++pos_lag;
+ }
+ return;
+}
+
+/* Update filter parameters based on the pitch-gains and pitch-lags. */
+static void Update(PitchFilterParam* parameters) {
+ double fraction;
+ int fraction_index;
+ /* Compute integer lag-offset. */
+ parameters->lag_offset = WebRtcIsac_lrint(parameters->lag + PITCH_FILTDELAY +
+ 0.5);
+ /* Find correct set of coefficients for computing fractional pitch. */
+ fraction = parameters->lag_offset - (parameters->lag + PITCH_FILTDELAY);
+ fraction_index = WebRtcIsac_lrint(PITCH_FRACS * fraction - 0.5);
+ parameters->interpol_coeff = kIntrpCoef[fraction_index];
+
+ if (parameters->mode == kPitchFilterPreGain) {
+ /* If in this mode make a differential change to pitch gain. */
+ parameters->gain_mult[parameters->sub_frame] += 0.2;
+ if (parameters->gain_mult[parameters->sub_frame] > 1.0) {
+ parameters->gain_mult[parameters->sub_frame] = 1.0;
+ }
+ if (parameters->sub_frame > 0) {
+ parameters->gain_mult[parameters->sub_frame - 1] -= 0.2;
+ }
+ }
+}
+
+/******************************************************************************
+ * FilterFrame()
+ * Filter a frame of 30 millisecond, given pitch-lags and pitch-gains.
+ *
+ * Inputs
+ * in_data : pointer to the input signal of 30 ms at 8 kHz sample-rate.
+ * lags : pointer to pitch-lags, 4 lags per frame.
+ * gains : pointer to pitch-gians, 4 gains per frame.
+ * mode : defining the functionality of the filter. It takes the
+ * following values.
+ * kPitchFilterPre: Pitch pre-filter, used at encoder.
+ * kPitchFilterPost: Pitch post-filter, used at decoder.
+ * kPitchFilterPreLa: Pitch pre-filter with lookahead.
+ * kPitchFilterPreGain: Pitch pre-filter used to otain optimal
+ * pitch-gains.
+ *
+ * Outputs
+ * out_data : pointer to a buffer where the filtered signal is written to.
+ * out_dg : [only used in kPitchFilterPreGain] pointer to a buffer
+ * where the output of different gain values (differential
+ * change to gain) is written.
+ */
+static void FilterFrame(const double* in_data, PitchFiltstr* filter_state,
+ double* lags, double* gains, PitchFilterOperation mode,
+ double* out_data,
+ double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD]) {
+ PitchFilterParam filter_parameters;
+ double gain_delta, lag_delta;
+ double old_lag, old_gain;
+ int n;
+ int m;
+ const double kEnhancer = 1.3;
+
+ /* Set up buffer and states. */
+ filter_parameters.index = 0;
+ filter_parameters.lag_offset = 0;
+ filter_parameters.mode = mode;
+ /* Copy states to local variables. */
+ memcpy(filter_parameters.buffer, filter_state->ubuf,
+ sizeof(filter_state->ubuf));
+ RTC_COMPILE_ASSERT(sizeof(filter_parameters.buffer) >=
+ sizeof(filter_state->ubuf));
+ memset(filter_parameters.buffer +
+ sizeof(filter_state->ubuf) / sizeof(filter_state->ubuf[0]),
+ 0, sizeof(filter_parameters.buffer) - sizeof(filter_state->ubuf));
+ memcpy(filter_parameters.damper_state, filter_state->ystate,
+ sizeof(filter_state->ystate));
+
+ if (mode == kPitchFilterPreGain) {
+ /* Clear buffers. */
+ memset(filter_parameters.gain_mult, 0, sizeof(filter_parameters.gain_mult));
+ memset(filter_parameters.damper_state_dg, 0,
+ sizeof(filter_parameters.damper_state_dg));
+ for (n = 0; n < PITCH_SUBFRAMES; ++n) {
+ //memset(out_dg[n], 0, sizeof(double) * (PITCH_FRAME_LEN + QLOOKAHEAD));
+ memset(out_dg[n], 0, sizeof(out_dg[n]));
+ }
+ } else if (mode == kPitchFilterPost) {
+ /* Make output more periodic. Negative sign is to change the structure
+ * of the filter. */
+ for (n = 0; n < PITCH_SUBFRAMES; ++n) {
+ gains[n] *= -kEnhancer;
+ }
+ }
+
+ old_lag = *filter_state->oldlagp;
+ old_gain = *filter_state->oldgainp;
+
+ /* No interpolation if pitch lag step is big. */
+ if ((lags[0] > (PITCH_UPSTEP * old_lag)) ||
+ (lags[0] < (PITCH_DOWNSTEP * old_lag))) {
+ old_lag = lags[0];
+ old_gain = gains[0];
+
+ if (mode == kPitchFilterPreGain) {
+ filter_parameters.gain_mult[0] = 1.0;
+ }
+ }
+
+ filter_parameters.num_samples = PITCH_UPDATE;
+ for (m = 0; m < PITCH_SUBFRAMES; ++m) {
+ /* Set the sub-frame value. */
+ filter_parameters.sub_frame = m;
+ /* Calculate interpolation steps for pitch-lag and pitch-gain. */
+ lag_delta = (lags[m] - old_lag) / PITCH_GRAN_PER_SUBFRAME;
+ filter_parameters.lag = old_lag;
+ gain_delta = (gains[m] - old_gain) / PITCH_GRAN_PER_SUBFRAME;
+ filter_parameters.gain = old_gain;
+ /* Store for the next sub-frame. */
+ old_lag = lags[m];
+ old_gain = gains[m];
+
+ for (n = 0; n < PITCH_GRAN_PER_SUBFRAME; ++n) {
+ /* Step-wise interpolation of pitch gains and lags. As pitch-lag changes,
+ * some parameters of filter need to be update. */
+ filter_parameters.gain += gain_delta;
+ filter_parameters.lag += lag_delta;
+ /* Update parameters according to new lag value. */
+ Update(&filter_parameters);
+ /* Filter a segment of input. */
+ FilterSegment(in_data, &filter_parameters, out_data, out_dg);
+ }
+ }
+
+ if (mode != kPitchFilterPreGain) {
+ /* Export buffer and states. */
+ memcpy(filter_state->ubuf, &filter_parameters.buffer[PITCH_FRAME_LEN],
+ sizeof(filter_state->ubuf));
+ memcpy(filter_state->ystate, filter_parameters.damper_state,
+ sizeof(filter_state->ystate));
+
+ /* Store for the next frame. */
+ *filter_state->oldlagp = old_lag;
+ *filter_state->oldgainp = old_gain;
+ }
+
+ if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) {
+ /* Filter the lookahead segment, this is treated as the last sub-frame. So
+ * set `pf_param` to last sub-frame. */
+ filter_parameters.sub_frame = PITCH_SUBFRAMES - 1;
+ filter_parameters.num_samples = QLOOKAHEAD;
+ FilterSegment(in_data, &filter_parameters, out_data, out_dg);
+ }
+}
+
+void WebRtcIsac_PitchfilterPre(double* in_data, double* out_data,
+ PitchFiltstr* pf_state, double* lags,
+ double* gains) {
+ FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPre, out_data, NULL);
+}
+
+void WebRtcIsac_PitchfilterPre_la(double* in_data, double* out_data,
+ PitchFiltstr* pf_state, double* lags,
+ double* gains) {
+ FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPreLa, out_data,
+ NULL);
+}
+
+void WebRtcIsac_PitchfilterPre_gains(
+ double* in_data, double* out_data,
+ double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD], PitchFiltstr *pf_state,
+ double* lags, double* gains) {
+ FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPreGain, out_data,
+ out_dg);
+}
+
+void WebRtcIsac_PitchfilterPost(double* in_data, double* out_data,
+ PitchFiltstr* pf_state, double* lags,
+ double* gains) {
+ FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPost, out_data, NULL);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.h
new file mode 100644
index 0000000000..9a232de87b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_FILTER_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_FILTER_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_PitchfilterPre(double* indat,
+ double* outdat,
+ PitchFiltstr* pfp,
+ double* lags,
+ double* gains);
+
+void WebRtcIsac_PitchfilterPost(double* indat,
+ double* outdat,
+ PitchFiltstr* pfp,
+ double* lags,
+ double* gains);
+
+void WebRtcIsac_PitchfilterPre_la(double* indat,
+ double* outdat,
+ PitchFiltstr* pfp,
+ double* lags,
+ double* gains);
+
+void WebRtcIsac_PitchfilterPre_gains(
+ double* indat,
+ double* outdat,
+ double out_dG[][PITCH_FRAME_LEN + QLOOKAHEAD],
+ PitchFiltstr* pfp,
+ double* lags,
+ double* gains);
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c
new file mode 100644
index 0000000000..080432c3a5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* header file for coding tables for the pitch filter side-info in the entropy coder */
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+/* cdf for quantized pitch filter gains */
+const uint16_t WebRtcIsac_kQPitchGainCdf[255] = {
+ 0, 2, 4, 6, 64, 901, 903, 905, 16954, 16956,
+ 16961, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17411,
+ 17514, 17516, 17583, 18790, 18796, 18802, 20760, 20777, 20782, 21722,
+ 21724, 21728, 21738, 21740, 21742, 21744, 21746, 21748, 22224, 22227,
+ 22230, 23214, 23229, 23239, 25086, 25108, 25120, 26088, 26094, 26098,
+ 26175, 26177, 26179, 26181, 26183, 26185, 26484, 26507, 26522, 27705,
+ 27731, 27750, 29767, 29799, 29817, 30866, 30883, 30885, 31025, 31029,
+ 31031, 31033, 31035, 31037, 31114, 31126, 31134, 32687, 32722, 32767,
+ 35718, 35742, 35757, 36943, 36952, 36954, 37115, 37128, 37130, 37132,
+ 37134, 37136, 37143, 37145, 37152, 38843, 38863, 38897, 47458, 47467,
+ 47474, 49040, 49061, 49063, 49145, 49157, 49159, 49161, 49163, 49165,
+ 49167, 49169, 49171, 49757, 49770, 49782, 61333, 61344, 61346, 62860,
+ 62883, 62885, 62887, 62889, 62891, 62893, 62895, 62897, 62899, 62901,
+ 62903, 62905, 62907, 62909, 65496, 65498, 65500, 65521, 65523, 65525,
+ 65527, 65529, 65531, 65533, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kIndexLowerLimitGain[3] = {
+ -7, -2, -1};
+
+const int16_t WebRtcIsac_kIndexUpperLimitGain[3] = {
+ 0, 3, 1};
+
+const uint16_t WebRtcIsac_kIndexMultsGain[2] = {
+ 18, 3};
+
+/* size of cdf table */
+const uint16_t WebRtcIsac_kQCdfTableSizeGain[1] = {
+ 256};
+
+///////////////////////////FIXED POINT
+/* mean values of pitch filter gains in FIXED point */
+const int16_t WebRtcIsac_kQMeanGain1Q12[144] = {
+ 843, 1092, 1336, 1222, 1405, 1656, 1500, 1815, 1843, 1838, 1839, 1843, 1843, 1843, 1843, 1843,
+ 1843, 1843, 814, 846, 1092, 1013, 1174, 1383, 1391, 1511, 1584, 1734, 1753, 1843, 1843, 1843,
+ 1843, 1843, 1843, 1843, 524, 689, 777, 845, 947, 1069, 1090, 1263, 1380, 1447, 1559, 1676,
+ 1645, 1749, 1843, 1843, 1843, 1843, 81, 477, 563, 611, 706, 806, 849, 1012, 1192, 1128,
+ 1330, 1489, 1425, 1576, 1826, 1741, 1843, 1843, 0, 290, 305, 356, 488, 575, 602, 741,
+ 890, 835, 1079, 1196, 1182, 1376, 1519, 1506, 1680, 1843, 0, 47, 97, 69, 289, 381,
+ 385, 474, 617, 664, 803, 1079, 935, 1160, 1269, 1265, 1506, 1741, 0, 0, 0, 0,
+ 112, 120, 190, 283, 442, 343, 526, 809, 684, 935, 1134, 1020, 1265, 1506, 0, 0,
+ 0, 0, 0, 0, 0, 111, 256, 87, 373, 597, 430, 684, 935, 770, 1020, 1265};
+
+const int16_t WebRtcIsac_kQMeanGain2Q12[144] = {
+ 1760, 1525, 1285, 1747, 1671, 1393, 1843, 1826, 1555, 1843, 1784, 1606, 1843, 1843, 1711, 1843,
+ 1843, 1814, 1389, 1275, 1040, 1564, 1414, 1252, 1610, 1495, 1343, 1753, 1592, 1405, 1804, 1720,
+ 1475, 1843, 1814, 1581, 1208, 1061, 856, 1349, 1148, 994, 1390, 1253, 1111, 1495, 1343, 1178,
+ 1770, 1465, 1234, 1814, 1581, 1342, 1040, 793, 713, 1053, 895, 737, 1128, 1003, 861, 1277,
+ 1094, 981, 1475, 1192, 1019, 1581, 1342, 1098, 855, 570, 483, 833, 648, 540, 948, 744,
+ 572, 1009, 844, 636, 1234, 934, 685, 1342, 1217, 984, 537, 318, 124, 603, 423, 350,
+ 687, 479, 322, 791, 581, 430, 987, 671, 488, 1098, 849, 597, 283, 27, 0, 397,
+ 222, 38, 513, 271, 124, 624, 325, 157, 737, 484, 233, 849, 597, 343, 27, 0,
+ 0, 141, 0, 0, 256, 69, 0, 370, 87, 0, 484, 229, 0, 597, 343, 87};
+
+const int16_t WebRtcIsac_kQMeanGain3Q12[144] = {
+ 1843, 1843, 1711, 1843, 1818, 1606, 1843, 1827, 1511, 1814, 1639, 1393, 1760, 1525, 1285, 1656,
+ 1419, 1176, 1835, 1718, 1475, 1841, 1650, 1387, 1648, 1498, 1287, 1600, 1411, 1176, 1522, 1299,
+ 1040, 1419, 1176, 928, 1773, 1461, 1128, 1532, 1355, 1202, 1429, 1260, 1115, 1398, 1151, 1025,
+ 1172, 1080, 790, 1176, 928, 677, 1475, 1147, 1019, 1276, 1096, 922, 1214, 1010, 901, 1057,
+ 893, 800, 1040, 796, 734, 928, 677, 424, 1137, 897, 753, 1120, 830, 710, 875, 751,
+ 601, 795, 642, 583, 790, 544, 475, 677, 474, 140, 987, 750, 482, 697, 573, 450,
+ 691, 487, 303, 661, 394, 332, 537, 303, 220, 424, 168, 0, 737, 484, 229, 624,
+ 348, 153, 441, 261, 136, 397, 166, 51, 283, 27, 0, 168, 0, 0, 484, 229,
+ 0, 370, 57, 0, 256, 43, 0, 141, 0, 0, 27, 0, 0, 0, 0, 0};
+
+
+const int16_t WebRtcIsac_kQMeanGain4Q12[144] = {
+ 1843, 1843, 1843, 1843, 1841, 1843, 1500, 1821, 1843, 1222, 1434, 1656, 843, 1092, 1336, 504,
+ 757, 1007, 1843, 1843, 1843, 1838, 1791, 1843, 1265, 1505, 1599, 965, 1219, 1425, 730, 821,
+ 1092, 249, 504, 757, 1783, 1819, 1843, 1351, 1567, 1727, 1096, 1268, 1409, 805, 961, 1131,
+ 444, 670, 843, 0, 249, 504, 1425, 1655, 1743, 1096, 1324, 1448, 822, 1019, 1199, 490,
+ 704, 867, 81, 450, 555, 0, 0, 249, 1247, 1428, 1530, 881, 1073, 1283, 610, 759,
+ 939, 278, 464, 645, 0, 200, 270, 0, 0, 0, 935, 1163, 1410, 528, 790, 1068,
+ 377, 499, 717, 173, 240, 274, 0, 43, 62, 0, 0, 0, 684, 935, 1182, 343,
+ 551, 735, 161, 262, 423, 0, 55, 27, 0, 0, 0, 0, 0, 0, 430, 684,
+ 935, 87, 377, 597, 0, 46, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h
new file mode 100644
index 0000000000..145fd4e6aa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_gain_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy
+ * coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
+
+#include <stdint.h>
+
+/* header file for coding tables for the pitch filter side-info in the entropy
+ * coder */
+/********************* Pitch Filter Gain Coefficient Tables
+ * ************************/
+/* cdf for quantized pitch filter gains */
+extern const uint16_t WebRtcIsac_kQPitchGainCdf[255];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kIndexLowerLimitGain[3];
+
+extern const int16_t WebRtcIsac_kIndexUpperLimitGain[3];
+extern const uint16_t WebRtcIsac_kIndexMultsGain[2];
+
+/* mean values of pitch filter gains */
+//(Y)
+extern const int16_t WebRtcIsac_kQMeanGain1Q12[144];
+extern const int16_t WebRtcIsac_kQMeanGain2Q12[144];
+extern const int16_t WebRtcIsac_kQMeanGain3Q12[144];
+extern const int16_t WebRtcIsac_kQMeanGain4Q12[144];
+//(Y)
+
+/* size of cdf table */
+extern const uint16_t WebRtcIsac_kQCdfTableSizeGain[1];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c
new file mode 100644
index 0000000000..57d12021ac
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* header file for coding tables for the pitch filter side-info in the entropy coder */
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsac_kQPitchLagCdf1Lo[127] = {
+ 0, 134, 336, 549, 778, 998, 1264, 1512, 1777, 2070,
+ 2423, 2794, 3051, 3361, 3708, 3979, 4315, 4610, 4933, 5269,
+ 5575, 5896, 6155, 6480, 6816, 7129, 7477, 7764, 8061, 8358,
+ 8718, 9020, 9390, 9783, 10177, 10543, 10885, 11342, 11795, 12213,
+ 12680, 13096, 13524, 13919, 14436, 14903, 15349, 15795, 16267, 16734,
+ 17266, 17697, 18130, 18632, 19080, 19447, 19884, 20315, 20735, 21288,
+ 21764, 22264, 22723, 23193, 23680, 24111, 24557, 25022, 25537, 26082,
+ 26543, 27090, 27620, 28139, 28652, 29149, 29634, 30175, 30692, 31273,
+ 31866, 32506, 33059, 33650, 34296, 34955, 35629, 36295, 36967, 37726,
+ 38559, 39458, 40364, 41293, 42256, 43215, 44231, 45253, 46274, 47359,
+ 48482, 49678, 50810, 51853, 53016, 54148, 55235, 56263, 57282, 58363,
+ 59288, 60179, 61076, 61806, 62474, 63129, 63656, 64160, 64533, 64856,
+ 65152, 65535, 65535, 65535, 65535, 65535, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf2Lo[20] = {
+ 0, 429, 3558, 5861, 8558, 11639, 15210, 19502, 24773, 31983,
+ 42602, 48567, 52601, 55676, 58160, 60172, 61889, 63235, 65383, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf3Lo[2] = {
+ 0, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf4Lo[10] = {
+ 0, 2966, 6368, 11182, 19431, 37793, 48532, 55353, 60626, 65535};
+
+const uint16_t *WebRtcIsac_kQPitchLagCdfPtrLo[4] = {WebRtcIsac_kQPitchLagCdf1Lo, WebRtcIsac_kQPitchLagCdf2Lo, WebRtcIsac_kQPitchLagCdf3Lo, WebRtcIsac_kQPitchLagCdf4Lo};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsac_kQPitchLagCdfSizeLo[1] = {128};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kQIndexLowerLimitLagLo[4] = {
+-140, -9, 0, -4};
+
+const int16_t WebRtcIsac_kQIndexUpperLimitLagLo[4] = {
+-20, 9, 0, 4};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsac_kQInitIndexLagLo[3] = {
+ 10, 1, 5};
+
+/* mean values of pitch filter lags */
+const double WebRtcIsac_kQMeanLag2Lo[19] = {
+-17.21385070, -15.82678944, -14.07123081, -12.03003877, -10.01311864, -8.00794627, -5.91162987, -3.89231876, -1.90220980, -0.01879275,
+ 1.89144232, 3.88123171, 5.92146992, 7.96435361, 9.98923648, 11.98266347, 13.96101002, 15.74855713, 17.10976611};
+
+const double WebRtcIsac_kQMeanLag3Lo[1] = {
+ 0.00000000};
+
+const double WebRtcIsac_kQMeanLag4Lo[9] = {
+-7.76246496, -5.92083980, -3.94095226, -1.89502305, 0.03724681, 1.93054221, 3.96443467, 5.91726366, 7.78434291};
+
+const double WebRtcIsac_kQPitchLagStepsizeLo = 2.000000;
+
+
+/* tables for use with medium pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsac_kQPitchLagCdf1Mid[255] = {
+ 0, 28, 61, 88, 121, 149, 233, 331, 475, 559,
+ 624, 661, 689, 712, 745, 791, 815, 843, 866, 922,
+ 959, 1024, 1061, 1117, 1178, 1238, 1280, 1350, 1453, 1513,
+ 1564, 1625, 1671, 1741, 1788, 1904, 2072, 2421, 2626, 2770,
+ 2840, 2900, 2942, 3012, 3068, 3115, 3147, 3194, 3254, 3319,
+ 3366, 3520, 3678, 3780, 3850, 3911, 3957, 4032, 4106, 4185,
+ 4292, 4474, 4683, 4842, 5019, 5191, 5321, 5428, 5540, 5675,
+ 5763, 5847, 5959, 6127, 6304, 6564, 6839, 7090, 7263, 7421,
+ 7556, 7728, 7872, 7984, 8142, 8361, 8580, 8743, 8938, 9227,
+ 9409, 9539, 9674, 9795, 9930, 10060, 10177, 10382, 10614, 10861,
+ 11038, 11271, 11415, 11629, 11792, 12044, 12193, 12416, 12574, 12821,
+ 13007, 13235, 13445, 13654, 13901, 14134, 14488, 15000, 15703, 16285,
+ 16504, 16797, 17086, 17328, 17579, 17807, 17998, 18268, 18538, 18836,
+ 19087, 19274, 19474, 19716, 19935, 20270, 20833, 21303, 21532, 21741,
+ 21978, 22207, 22523, 22770, 23054, 23613, 23943, 24204, 24399, 24651,
+ 24832, 25074, 25270, 25549, 25759, 26015, 26150, 26424, 26713, 27048,
+ 27342, 27504, 27681, 27854, 28021, 28207, 28412, 28664, 28859, 29064,
+ 29278, 29548, 29748, 30107, 30377, 30656, 30856, 31164, 31452, 31755,
+ 32011, 32328, 32626, 32919, 33319, 33789, 34329, 34925, 35396, 35973,
+ 36443, 36964, 37551, 38156, 38724, 39357, 40023, 40908, 41587, 42602,
+ 43924, 45037, 45810, 46597, 47421, 48291, 49092, 50051, 51448, 52719,
+ 53440, 54241, 54944, 55977, 56676, 57299, 57872, 58389, 59059, 59688,
+ 60237, 60782, 61094, 61573, 61890, 62290, 62658, 63030, 63217, 63454,
+ 63622, 63882, 64003, 64273, 64427, 64529, 64581, 64697, 64758, 64902,
+ 65414, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf2Mid[36] = {
+ 0, 71, 335, 581, 836, 1039, 1323, 1795, 2258, 2608,
+ 3005, 3591, 4243, 5344, 7163, 10583, 16848, 28078, 49448, 57007,
+ 60357, 61850, 62837, 63437, 63872, 64188, 64377, 64614, 64774, 64949,
+ 65039, 65115, 65223, 65360, 65474, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf3Mid[2] = {
+ 0, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf4Mid[20] = {
+ 0, 28, 246, 459, 667, 1045, 1523, 2337, 4337, 11347,
+ 44231, 56709, 60781, 62243, 63161, 63969, 64608, 65062, 65502, 65535};
+
+const uint16_t *WebRtcIsac_kQPitchLagCdfPtrMid[4] = {WebRtcIsac_kQPitchLagCdf1Mid, WebRtcIsac_kQPitchLagCdf2Mid, WebRtcIsac_kQPitchLagCdf3Mid, WebRtcIsac_kQPitchLagCdf4Mid};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsac_kQPitchLagCdfSizeMid[1] = {256};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kQIndexLowerLimitLagMid[4] = {
+-280, -17, 0, -9};
+
+const int16_t WebRtcIsac_kQIndexUpperLimitLagMid[4] = {
+-40, 17, 0, 9};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsac_kQInitIndexLagMid[3] = {
+ 18, 1, 10};
+
+/* mean values of pitch filter lags */
+const double WebRtcIsac_kQMeanLag2Mid[35] = {
+-16.89183900, -15.86949778, -15.05476653, -14.00664348, -13.02793036, -12.07324237, -11.00542532, -10.11250602, -8.90792971, -8.02474753,
+-7.00426767, -5.94055287, -4.98251338, -3.91053158, -2.98820425, -1.93524245, -0.92978085, -0.01722509, 0.91317387, 1.92973955,
+ 2.96908851, 3.93728974, 4.96308471, 5.92244151, 7.08673497, 8.00993708, 9.04656316, 9.98538742, 10.97851694, 11.94772884,
+ 13.02426166, 14.00039951, 15.01347042, 15.80758023, 16.94086895};
+
+const double WebRtcIsac_kQMeanLag3Mid[1] = {
+ 0.00000000};
+
+const double WebRtcIsac_kQMeanLag4Mid[19] = {
+-8.60409403, -7.89198395, -7.03450280, -5.86260421, -4.93822322, -3.93078706, -2.91302322, -1.91824007, -0.87003282, 0.02822649,
+ 0.89951758, 1.87495484, 2.91802604, 3.96874074, 5.06571703, 5.93618227, 7.00520185, 7.88497726, 8.64160364};
+
+const double WebRtcIsac_kQPitchLagStepsizeMid = 1.000000;
+
+
+/* tables for use with large pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsac_kQPitchLagCdf1Hi[511] = {
+ 0, 7, 18, 33, 69, 105, 156, 228, 315, 612,
+ 680, 691, 709, 724, 735, 738, 742, 746, 749, 753,
+ 756, 760, 764, 774, 782, 785, 789, 796, 800, 803,
+ 807, 814, 818, 822, 829, 832, 847, 854, 858, 869,
+ 876, 883, 898, 908, 934, 977, 1010, 1050, 1060, 1064,
+ 1075, 1078, 1086, 1089, 1093, 1104, 1111, 1122, 1133, 1136,
+ 1151, 1162, 1183, 1209, 1252, 1281, 1339, 1364, 1386, 1401,
+ 1411, 1415, 1426, 1430, 1433, 1440, 1448, 1455, 1462, 1477,
+ 1487, 1495, 1502, 1506, 1509, 1516, 1524, 1531, 1535, 1542,
+ 1553, 1556, 1578, 1589, 1611, 1625, 1639, 1643, 1654, 1665,
+ 1672, 1687, 1694, 1705, 1708, 1719, 1730, 1744, 1752, 1759,
+ 1791, 1795, 1820, 1867, 1886, 1915, 1936, 1943, 1965, 1987,
+ 2041, 2099, 2161, 2175, 2200, 2211, 2226, 2233, 2244, 2251,
+ 2266, 2280, 2287, 2298, 2309, 2316, 2331, 2342, 2356, 2378,
+ 2403, 2418, 2447, 2497, 2544, 2602, 2863, 2895, 2903, 2935,
+ 2950, 2971, 3004, 3011, 3018, 3029, 3040, 3062, 3087, 3127,
+ 3152, 3170, 3199, 3243, 3293, 3322, 3340, 3377, 3402, 3427,
+ 3474, 3518, 3543, 3579, 3601, 3637, 3659, 3706, 3731, 3760,
+ 3818, 3847, 3869, 3901, 3920, 3952, 4068, 4169, 4220, 4271,
+ 4524, 4571, 4604, 4632, 4672, 4730, 4777, 4806, 4857, 4904,
+ 4951, 5002, 5031, 5060, 5107, 5150, 5212, 5266, 5331, 5382,
+ 5432, 5490, 5544, 5610, 5700, 5762, 5812, 5874, 5972, 6022,
+ 6091, 6163, 6232, 6305, 6402, 6540, 6685, 6880, 7090, 7271,
+ 7379, 7452, 7542, 7625, 7687, 7770, 7843, 7911, 7966, 8024,
+ 8096, 8190, 8252, 8320, 8411, 8501, 8585, 8639, 8751, 8842,
+ 8918, 8986, 9066, 9127, 9203, 9269, 9345, 9406, 9464, 9536,
+ 9612, 9667, 9735, 9844, 9931, 10036, 10119, 10199, 10260, 10358,
+ 10441, 10514, 10666, 10734, 10872, 10951, 11053, 11125, 11223, 11324,
+ 11516, 11664, 11737, 11816, 11892, 12008, 12120, 12200, 12280, 12392,
+ 12490, 12576, 12685, 12812, 12917, 13003, 13108, 13210, 13300, 13384,
+ 13470, 13579, 13673, 13771, 13879, 13999, 14136, 14201, 14368, 14614,
+ 14759, 14867, 14958, 15030, 15121, 15189, 15280, 15385, 15461, 15555,
+ 15653, 15768, 15884, 15971, 16069, 16145, 16210, 16279, 16380, 16463,
+ 16539, 16615, 16688, 16818, 16919, 17017, 18041, 18338, 18523, 18649,
+ 18790, 18917, 19047, 19167, 19315, 19460, 19601, 19731, 19858, 20068,
+ 20173, 20318, 20466, 20625, 20741, 20911, 21045, 21201, 21396, 21588,
+ 21816, 22022, 22305, 22547, 22786, 23072, 23322, 23600, 23879, 24168,
+ 24433, 24769, 25120, 25511, 25895, 26289, 26792, 27219, 27683, 28077,
+ 28566, 29094, 29546, 29977, 30491, 30991, 31573, 32105, 32594, 33173,
+ 33788, 34497, 35181, 35833, 36488, 37255, 37921, 38645, 39275, 39894,
+ 40505, 41167, 41790, 42431, 43096, 43723, 44385, 45134, 45858, 46607,
+ 47349, 48091, 48768, 49405, 49955, 50555, 51167, 51985, 52611, 53078,
+ 53494, 53965, 54435, 54996, 55601, 56125, 56563, 56838, 57244, 57566,
+ 57967, 58297, 58771, 59093, 59419, 59647, 59886, 60143, 60461, 60693,
+ 60917, 61170, 61416, 61634, 61891, 62122, 62310, 62455, 62632, 62839,
+ 63103, 63436, 63639, 63805, 63906, 64015, 64192, 64355, 64475, 64558,
+ 64663, 64742, 64811, 64865, 64916, 64956, 64981, 65025, 65068, 65115,
+ 65195, 65314, 65419, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
+ 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf2Hi[68] = {
+ 0, 7, 11, 22, 37, 52, 56, 59, 81, 85,
+ 89, 96, 115, 130, 137, 152, 170, 181, 193, 200,
+ 207, 233, 237, 259, 289, 318, 363, 433, 592, 992,
+ 1607, 3062, 6149, 12206, 25522, 48368, 58223, 61918, 63640, 64584,
+ 64943, 65098, 65206, 65268, 65294, 65335, 65350, 65372, 65387, 65402,
+ 65413, 65420, 65428, 65435, 65439, 65450, 65454, 65468, 65472, 65476,
+ 65483, 65491, 65498, 65505, 65516, 65520, 65528, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf3Hi[2] = {
+ 0, 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf4Hi[35] = {
+ 0, 7, 19, 30, 41, 48, 63, 74, 82, 96,
+ 122, 152, 215, 330, 701, 2611, 10931, 48106, 61177, 64341,
+ 65112, 65238, 65309, 65338, 65364, 65379, 65401, 65427, 65453, 65465,
+ 65476, 65490, 65509, 65528, 65535};
+
+const uint16_t *WebRtcIsac_kQPitchLagCdfPtrHi[4] = {WebRtcIsac_kQPitchLagCdf1Hi, WebRtcIsac_kQPitchLagCdf2Hi, WebRtcIsac_kQPitchLagCdf3Hi, WebRtcIsac_kQPitchLagCdf4Hi};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsac_kQPitchLagCdfSizeHi[1] = {512};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kQindexLowerLimitLagHi[4] = {
+-552, -34, 0, -16};
+
+const int16_t WebRtcIsac_kQindexUpperLimitLagHi[4] = {
+-80, 32, 0, 17};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsac_kQInitIndexLagHi[3] = {
+ 34, 1, 18};
+
+/* mean values of pitch filter lags */
+const double WebRtcIsac_kQMeanLag2Hi[67] = {
+-17.07263295, -16.50000000, -15.83966081, -15.55613708, -14.96948007, -14.50000000, -14.00000000, -13.48377986, -13.00000000, -12.50000000,
+-11.93199636, -11.44530414, -11.04197641, -10.39910301, -10.15202337, -9.51322461, -8.93357741, -8.46456632, -8.10270672, -7.53751847,
+-6.98686404, -6.50000000, -6.08463150, -5.46872991, -5.00864717, -4.50163760, -4.01382410, -3.43856708, -2.96898001, -2.46554810,
+-1.96861004, -1.47106701, -0.97197237, -0.46561654, -0.00531409, 0.45767857, 0.96777907, 1.47507903, 1.97740425, 2.46695420,
+ 3.00695774, 3.47167185, 4.02712538, 4.49280007, 5.01087640, 5.48191963, 6.04916550, 6.51511058, 6.97297819, 7.46565499,
+ 8.01489405, 8.39912001, 8.91819757, 9.50000000, 10.11654065, 10.50000000, 11.03712583, 11.50000000, 12.00000000, 12.38964346,
+ 12.89466127, 13.43657881, 13.96013840, 14.46279912, 15.00000000, 15.39412269, 15.96662441};
+
+const double WebRtcIsac_kQMeanLag3Hi[1] = {
+ 0.00000000};
+
+const double WebRtcIsac_kQMeanLag4Hi[34] = {
+-7.98331221, -7.47988769, -7.03626557, -6.52708003, -6.06982173, -5.51856292, -5.05827033, -4.45909878, -3.99125864, -3.45308135,
+-3.02328139, -2.47297273, -1.94341995, -1.44699056, -0.93612243, -0.43012406, 0.01120357, 0.44054812, 0.93199883, 1.45669587,
+ 1.97218322, 2.50187419, 2.98748690, 3.49343202, 4.01660147, 4.50984306, 5.01402683, 5.58936797, 5.91787793, 6.59998900,
+ 6.85034315, 7.53503316, 7.87711194, 8.53631648};
+
+const double WebRtcIsac_kQPitchLagStepsizeHi = 0.500000;
+
+/* transform matrix */
+const double WebRtcIsac_kTransform[4][4] = {
+{-0.50000000, -0.50000000, -0.50000000, -0.50000000},
+{ 0.67082039, 0.22360680, -0.22360680, -0.67082039},
+{ 0.50000000, -0.50000000, -0.50000000, 0.50000000},
+{ 0.22360680, -0.67082039, 0.67082039, -0.22360680}};
+
+/* transpose transform matrix */
+const double WebRtcIsac_kTransformTranspose[4][4] = {
+{-0.50000000, 0.67082039, 0.50000000, 0.22360680},
+{-0.50000000, 0.22360680, -0.50000000, -0.67082039},
+{-0.50000000, -0.22360680, -0.50000000, 0.67082039},
+{-0.50000000, -0.67082039, 0.50000000, -0.22360680}};
+
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h
new file mode 100644
index 0000000000..b48e358a5a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_lag_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy
+ * coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_LAG_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_LAG_TABLES_H_
+
+#include <stdint.h>
+
+/* header file for coding tables for the pitch filter side-info in the entropy
+ * coder */
+/********************* Pitch Filter Lag Coefficient Tables
+ * ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsac_kQPitchLagCdf1Lo[127];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf2Lo[20];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf3Lo[2];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf4Lo[10];
+
+extern const uint16_t* WebRtcIsac_kQPitchLagCdfPtrLo[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsac_kQPitchLagCdfSizeLo[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kQIndexLowerLimitLagLo[4];
+extern const int16_t WebRtcIsac_kQIndexUpperLimitLagLo[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsac_kQInitIndexLagLo[3];
+
+/* mean values of pitch filter lags */
+extern const double WebRtcIsac_kQMeanLag2Lo[19];
+extern const double WebRtcIsac_kQMeanLag3Lo[1];
+extern const double WebRtcIsac_kQMeanLag4Lo[9];
+
+extern const double WebRtcIsac_kQPitchLagStepsizeLo;
+
+/* tables for use with medium pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsac_kQPitchLagCdf1Mid[255];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf2Mid[36];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf3Mid[2];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf4Mid[20];
+
+extern const uint16_t* WebRtcIsac_kQPitchLagCdfPtrMid[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsac_kQPitchLagCdfSizeMid[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kQIndexLowerLimitLagMid[4];
+extern const int16_t WebRtcIsac_kQIndexUpperLimitLagMid[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsac_kQInitIndexLagMid[3];
+
+/* mean values of pitch filter lags */
+extern const double WebRtcIsac_kQMeanLag2Mid[35];
+extern const double WebRtcIsac_kQMeanLag3Mid[1];
+extern const double WebRtcIsac_kQMeanLag4Mid[19];
+
+extern const double WebRtcIsac_kQPitchLagStepsizeMid;
+
+/* tables for use with large pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsac_kQPitchLagCdf1Hi[511];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf2Hi[68];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf3Hi[2];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf4Hi[35];
+
+extern const uint16_t* WebRtcIsac_kQPitchLagCdfPtrHi[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsac_kQPitchLagCdfSizeHi[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kQindexLowerLimitLagHi[4];
+extern const int16_t WebRtcIsac_kQindexUpperLimitLagHi[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsac_kQInitIndexLagHi[3];
+
+/* mean values of pitch filter lags */
+extern const double WebRtcIsac_kQMeanLag2Hi[67];
+extern const double WebRtcIsac_kQMeanLag3Hi[1];
+extern const double WebRtcIsac_kQMeanLag4Hi[34];
+
+extern const double WebRtcIsac_kQPitchLagStepsizeHi;
+
+/* transform matrix */
+extern const double WebRtcIsac_kTransform[4][4];
+
+/* transpose transform matrix */
+extern const double WebRtcIsac_kTransformTranspose[4][4];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_LAG_TABLES_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/settings.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/settings.h
new file mode 100644
index 0000000000..abce90c4f5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/settings.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * settings.h
+ *
+ * Declaration of #defines used in the iSAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SETTINGS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SETTINGS_H_
+
+/* sampling frequency (Hz) */
+#define FS 16000
+
+/* number of samples per frame (either 320 (20ms), 480 (30ms) or 960 (60ms)) */
+#define INITIAL_FRAMESAMPLES 960
+
+/* do not modify the following; this will have to be modified if we
+ * have a 20ms framesize option */
+/**********************************************************************/
+/* miliseconds */
+#define FRAMESIZE 30
+/* number of samples per frame processed in the encoder, 480 */
+#define FRAMESAMPLES 480 /* ((FRAMESIZE*FS)/1000) */
+#define FRAMESAMPLES_HALF 240
+#define FRAMESAMPLES_QUARTER 120
+/**********************************************************************/
+
+/* max number of samples per frame (= 60 ms frame) */
+#define MAX_FRAMESAMPLES 960
+#define MAX_SWBFRAMESAMPLES (MAX_FRAMESAMPLES * 2)
+/* number of samples per 10ms frame */
+#define FRAMESAMPLES_10ms ((10 * FS) / 1000)
+#define SWBFRAMESAMPLES_10ms (FRAMESAMPLES_10ms * 2)
+/* number of samples in 30 ms frame */
+#define FRAMESAMPLES_30ms 480
+/* number of subframes */
+#define SUBFRAMES 6
+/* length of a subframe */
+#define UPDATE 80
+/* length of half a subframe (low/high band) */
+#define HALF_SUBFRAMELEN (UPDATE / 2)
+/* samples of look ahead (in a half-band, so actually
+ * half the samples of look ahead @ FS) */
+#define QLOOKAHEAD 24 /* 3 ms */
+/* order of AR model in spectral entropy coder */
+#define AR_ORDER 6
+/* order of LP model in spectral entropy coder */
+#define LP_ORDER 0
+
+/* window length (masking analysis) */
+#define WINLEN 256
+/* order of low-band pole filter used to approximate masking curve */
+#define ORDERLO 12
+/* order of hi-band pole filter used to approximate masking curve */
+#define ORDERHI 6
+
+#define UB_LPC_ORDER 4
+#define UB_LPC_VEC_PER_FRAME 2
+#define UB16_LPC_VEC_PER_FRAME 4
+#define UB_ACTIVE_SUBFRAMES 2
+#define UB_MAX_LPC_ORDER 6
+#define UB_INTERPOL_SEGMENTS 1
+#define UB16_INTERPOL_SEGMENTS 3
+#define LB_TOTAL_DELAY_SAMPLES 48
+enum ISACBandwidth { isac8kHz = 8, isac12kHz = 12, isac16kHz = 16 };
+enum ISACBand {
+ kIsacLowerBand = 0,
+ kIsacUpperBand12 = 1,
+ kIsacUpperBand16 = 2
+};
+enum IsacSamplingRate { kIsacWideband = 16, kIsacSuperWideband = 32 };
+#define UB_LPC_GAIN_DIM SUBFRAMES
+#define FB_STATE_SIZE_WORD32 6
+
+/* order for post_filter_bank */
+#define POSTQORDER 3
+/* order for pre-filterbank */
+#define QORDER 3
+/* another order */
+#define QORDER_ALL (POSTQORDER + QORDER - 1)
+/* for decimator */
+#define ALLPASSSECTIONS 2
+
+/* array size for byte stream in number of bytes. */
+/* The old maximum size still needed for the decoding */
+#define STREAM_SIZE_MAX 600
+#define STREAM_SIZE_MAX_30 200 /* 200 bytes=53.4 kbps @ 30 ms.framelength */
+#define STREAM_SIZE_MAX_60 400 /* 400 bytes=53.4 kbps @ 60 ms.framelength */
+
+/* storage size for bit counts */
+#define BIT_COUNTER_SIZE 30
+/* maximum order of any AR model or filter */
+#define MAX_AR_MODEL_ORDER 12 // 50
+
+/* For pitch analysis */
+#define PITCH_FRAME_LEN (FRAMESAMPLES_HALF) /* 30 ms */
+#define PITCH_MAX_LAG 140 /* 57 Hz */
+#define PITCH_MIN_LAG 20 /* 400 Hz */
+#define PITCH_MAX_GAIN 0.45
+#define PITCH_MAX_GAIN_06 0.27 /* PITCH_MAX_GAIN*0.6 */
+#define PITCH_MAX_GAIN_Q12 1843
+#define PITCH_LAG_SPAN2 (PITCH_MAX_LAG / 2 - PITCH_MIN_LAG / 2 + 5)
+#define PITCH_CORR_LEN2 60 /* 15 ms */
+#define PITCH_CORR_STEP2 (PITCH_FRAME_LEN / 4)
+#define PITCH_BW 11 /* half the band width of correlation surface */
+#define PITCH_SUBFRAMES 4
+#define PITCH_GRAN_PER_SUBFRAME 5
+#define PITCH_SUBFRAME_LEN (PITCH_FRAME_LEN / PITCH_SUBFRAMES)
+#define PITCH_UPDATE (PITCH_SUBFRAME_LEN / PITCH_GRAN_PER_SUBFRAME)
+/* maximum number of peaks to be examined in correlation surface */
+#define PITCH_MAX_NUM_PEAKS 10
+#define PITCH_PEAK_DECAY 0.85
+/* For weighting filter */
+#define PITCH_WLPCORDER 6
+#define PITCH_WLPCWINLEN PITCH_FRAME_LEN
+#define PITCH_WLPCASYM 0.3 /* asymmetry parameter */
+#define PITCH_WLPCBUFLEN PITCH_WLPCWINLEN
+/* For pitch filter */
+/* Extra 50 for fraction and LP filters */
+#define PITCH_BUFFSIZE (PITCH_MAX_LAG + 50)
+#define PITCH_INTBUFFSIZE (PITCH_FRAME_LEN + PITCH_BUFFSIZE)
+/* Max rel. step for interpolation */
+#define PITCH_UPSTEP 1.5
+/* Max rel. step for interpolation */
+#define PITCH_DOWNSTEP 0.67
+#define PITCH_FRACS 8
+#define PITCH_FRACORDER 9
+#define PITCH_DAMPORDER 5
+#define PITCH_FILTDELAY 1.5f
+/* stepsize for quantization of the pitch Gain */
+#define PITCH_GAIN_STEPSIZE 0.125
+
+/* Order of high pass filter */
+#define HPORDER 2
+
+/* some mathematical constants */
+/* log2(exp) */
+#define LOG2EXP 1.44269504088896
+#define PI 3.14159265358979
+
+/* Maximum number of iterations allowed to limit payload size */
+#define MAX_PAYLOAD_LIMIT_ITERATION 5
+
+/* Redundant Coding */
+#define RCU_BOTTLENECK_BPS 16000
+#define RCU_TRANSCODING_SCALE 0.40f
+#define RCU_TRANSCODING_SCALE_INVERSE 2.5f
+
+#define RCU_TRANSCODING_SCALE_UB 0.50f
+#define RCU_TRANSCODING_SCALE_UB_INVERSE 2.0f
+
+/* Define Error codes */
+/* 6000 General */
+#define ISAC_MEMORY_ALLOCATION_FAILED 6010
+#define ISAC_MODE_MISMATCH 6020
+#define ISAC_DISALLOWED_BOTTLENECK 6030
+#define ISAC_DISALLOWED_FRAME_LENGTH 6040
+#define ISAC_UNSUPPORTED_SAMPLING_FREQUENCY 6050
+
+/* 6200 Bandwidth estimator */
+#define ISAC_RANGE_ERROR_BW_ESTIMATOR 6240
+/* 6400 Encoder */
+#define ISAC_ENCODER_NOT_INITIATED 6410
+#define ISAC_DISALLOWED_CODING_MODE 6420
+#define ISAC_DISALLOWED_FRAME_MODE_ENCODER 6430
+#define ISAC_DISALLOWED_BITSTREAM_LENGTH 6440
+#define ISAC_PAYLOAD_LARGER_THAN_LIMIT 6450
+#define ISAC_DISALLOWED_ENCODER_BANDWIDTH 6460
+/* 6600 Decoder */
+#define ISAC_DECODER_NOT_INITIATED 6610
+#define ISAC_EMPTY_PACKET 6620
+#define ISAC_DISALLOWED_FRAME_MODE_DECODER 6630
+#define ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH 6640
+#define ISAC_RANGE_ERROR_DECODE_BANDWIDTH 6650
+#define ISAC_RANGE_ERROR_DECODE_PITCH_GAIN 6660
+#define ISAC_RANGE_ERROR_DECODE_PITCH_LAG 6670
+#define ISAC_RANGE_ERROR_DECODE_LPC 6680
+#define ISAC_RANGE_ERROR_DECODE_SPECTRUM 6690
+#define ISAC_LENGTH_MISMATCH 6730
+#define ISAC_RANGE_ERROR_DECODE_BANDWITH 6740
+#define ISAC_DISALLOWED_BANDWIDTH_MODE_DECODER 6750
+#define ISAC_DISALLOWED_LPC_MODEL 6760
+/* 6800 Call setup formats */
+#define ISAC_INCOMPATIBLE_FORMATS 6810
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SETTINGS_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c
new file mode 100644
index 0000000000..839d5d4586
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/********************* AR Coefficient Tables ************************/
+/* cdf for quantized reflection coefficient 1 */
+const uint16_t WebRtcIsac_kQArRc1Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0, 2, 4, 129, 7707, 57485, 65495, 65527, 65529, 65531,
+ 65533, 65535};
+
+/* cdf for quantized reflection coefficient 2 */
+const uint16_t WebRtcIsac_kQArRc2Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0, 2, 4, 7, 531, 25298, 64525, 65526, 65529, 65531,
+ 65533, 65535};
+
+/* cdf for quantized reflection coefficient 3 */
+const uint16_t WebRtcIsac_kQArRc3Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0, 2, 4, 6, 620, 22898, 64843, 65527, 65529, 65531,
+ 65533, 65535};
+
+/* cdf for quantized reflection coefficient 4 */
+const uint16_t WebRtcIsac_kQArRc4Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0, 2, 4, 6, 35, 10034, 60733, 65506, 65529, 65531,
+ 65533, 65535};
+
+/* cdf for quantized reflection coefficient 5 */
+const uint16_t WebRtcIsac_kQArRc5Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0, 2, 4, 6, 36, 7567, 56727, 65385, 65529, 65531,
+ 65533, 65535};
+
+/* cdf for quantized reflection coefficient 6 */
+const uint16_t WebRtcIsac_kQArRc6Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0, 2, 4, 6, 14, 6579, 57360, 65409, 65529, 65531,
+ 65533, 65535};
+
+/* representation levels for quantized reflection coefficient 1 */
+const int16_t WebRtcIsac_kQArRc1Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+ -32104, -29007, -23202, -15496, -9279, -2577, 5934, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 2 */
+const int16_t WebRtcIsac_kQArRc2Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+ -32104, -29503, -23494, -15261, -7309, -1399, 6158, 16381, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 3 */
+const int16_t WebRtcIsac_kQArRc3Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -23157, -15186, -7347, -1359, 5829, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 4 */
+const int16_t WebRtcIsac_kQArRc4Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -24512, -15362, -6665, -342, 6596, 14585, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 5 */
+const int16_t WebRtcIsac_kQArRc5Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -24512, -15005, -6564, -106, 7123, 14920, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 6 */
+const int16_t WebRtcIsac_kQArRc6Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -24512, -15096, -6656, -37, 7036, 14847, 24512, 29503, 32104
+};
+
+/* quantization boundary levels for reflection coefficients */
+const int16_t WebRtcIsac_kQArBoundaryLevels[NUM_AR_RC_QUANT_BAUNDARY] = {
+-32768, -31441, -27566, -21458, -13612, -4663, 4663, 13612, 21458, 27566, 31441,
+32767
+};
+
+/* initial index for AR reflection coefficient quantizer and cdf table search */
+const uint16_t WebRtcIsac_kQArRcInitIndex[6] = {
+ 5, 5, 5, 5, 5, 5};
+
+/* pointers to AR cdf tables */
+const uint16_t *WebRtcIsac_kQArRcCdfPtr[AR_ORDER] = {
+ WebRtcIsac_kQArRc1Cdf, WebRtcIsac_kQArRc2Cdf, WebRtcIsac_kQArRc3Cdf,
+ WebRtcIsac_kQArRc4Cdf, WebRtcIsac_kQArRc5Cdf, WebRtcIsac_kQArRc6Cdf
+};
+
+/* pointers to AR representation levels tables */
+const int16_t *WebRtcIsac_kQArRcLevelsPtr[AR_ORDER] = {
+ WebRtcIsac_kQArRc1Levels, WebRtcIsac_kQArRc2Levels, WebRtcIsac_kQArRc3Levels,
+ WebRtcIsac_kQArRc4Levels, WebRtcIsac_kQArRc5Levels, WebRtcIsac_kQArRc6Levels
+};
+
+
+/******************** GAIN Coefficient Tables ***********************/
+/* cdf for Gain coefficient */
+const uint16_t WebRtcIsac_kQGainCdf[19] = {
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1172,
+ 11119, 29411, 51699, 64445, 65527, 65529, 65531, 65533, 65535};
+
+/* representation levels for quantized squared Gain coefficient */
+const int32_t WebRtcIsac_kQGain2Levels[18] = {
+// 17, 28, 46, 76, 128, 215, 364, 709, 1268, 1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000};
+ 128, 128, 128, 128, 128, 215, 364, 709, 1268, 1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000};
+/* quantization boundary levels for squared Gain coefficient */
+const int32_t WebRtcIsac_kQGain2BoundaryLevels[19] = {
+0, 21, 35, 59, 99, 166, 280, 475, 815, 1414, 2495, 4505, 8397, 16405, 34431, 81359, 240497, 921600, 0x7FFFFFFF};
+
+/* pointers to Gain cdf table */
+const uint16_t *WebRtcIsac_kQGainCdf_ptr[1] = {WebRtcIsac_kQGainCdf};
+
+/* Gain initial index for gain quantizer and cdf table search */
+const uint16_t WebRtcIsac_kQGainInitIndex[1] = {11};
+
+/************************* Cosine Tables ****************************/
+/* Cosine table */
+const int16_t WebRtcIsac_kCos[6][60] = {
+{512, 512, 511, 510, 508, 507, 505, 502, 499, 496, 493, 489, 485, 480, 476, 470, 465, 459, 453, 447,
+440, 433, 426, 418, 410, 402, 394, 385, 376, 367, 357, 348, 338, 327, 317, 306, 295, 284, 273, 262,
+250, 238, 226, 214, 202, 190, 177, 165, 152, 139, 126, 113, 100, 87, 73, 60, 47, 33, 20, 7},
+{512, 510, 508, 503, 498, 491, 483, 473, 462, 450, 437, 422, 406, 389, 371, 352, 333, 312, 290, 268,
+244, 220, 196, 171, 145, 120, 93, 67, 40, 13, -13, -40, -67, -93, -120, -145, -171, -196, -220, -244,
+-268, -290, -312, -333, -352, -371, -389, -406, -422, -437, -450, -462, -473, -483, -491, -498, -503, -508, -510, -512},
+{512, 508, 502, 493, 480, 465, 447, 426, 402, 376, 348, 317, 284, 250, 214, 177, 139, 100, 60, 20,
+-20, -60, -100, -139, -177, -214, -250, -284, -317, -348, -376, -402, -426, -447, -465, -480, -493, -502, -508, -512,
+-512, -508, -502, -493, -480, -465, -447, -426, -402, -376, -348, -317, -284, -250, -214, -177, -139, -100, -60, -20},
+{511, 506, 495, 478, 456, 429, 398, 362, 322, 279, 232, 183, 133, 80, 27, -27, -80, -133, -183, -232,
+-279, -322, -362, -398, -429, -456, -478, -495, -506, -511, -511, -506, -495, -478, -456, -429, -398, -362, -322, -279,
+-232, -183, -133, -80, -27, 27, 80, 133, 183, 232, 279, 322, 362, 398, 429, 456, 478, 495, 506, 511},
+{511, 502, 485, 459, 426, 385, 338, 284, 226, 165, 100, 33, -33, -100, -165, -226, -284, -338, -385, -426,
+-459, -485, -502, -511, -511, -502, -485, -459, -426, -385, -338, -284, -226, -165, -100, -33, 33, 100, 165, 226,
+284, 338, 385, 426, 459, 485, 502, 511, 511, 502, 485, 459, 426, 385, 338, 284, 226, 165, 100, 33},
+{510, 498, 473, 437, 389, 333, 268, 196, 120, 40, -40, -120, -196, -268, -333, -389, -437, -473, -498, -510,
+-510, -498, -473, -437, -389, -333, -268, -196, -120, -40, 40, 120, 196, 268, 333, 389, 437, 473, 498, 510,
+510, 498, 473, 437, 389, 333, 268, 196, 120, 40, -40, -120, -196, -268, -333, -389, -437, -473, -498, -510}
+};
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h
new file mode 100644
index 0000000000..d272be0dc3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * spectrum_ar_model_tables.h
+ *
+ * This file contains definitions of tables with AR coefficients,
+ * Gain coefficients and cosine tables.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#define NUM_AR_RC_QUANT_BAUNDARY 12
+
+/********************* AR Coefficient Tables ************************/
+/* cdf for quantized reflection coefficient 1 */
+extern const uint16_t WebRtcIsac_kQArRc1Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 2 */
+extern const uint16_t WebRtcIsac_kQArRc2Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 3 */
+extern const uint16_t WebRtcIsac_kQArRc3Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 4 */
+extern const uint16_t WebRtcIsac_kQArRc4Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 5 */
+extern const uint16_t WebRtcIsac_kQArRc5Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 6 */
+extern const uint16_t WebRtcIsac_kQArRc6Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* quantization boundary levels for reflection coefficients */
+extern const int16_t WebRtcIsac_kQArBoundaryLevels[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* initial indices for AR reflection coefficient quantizer and cdf table search
+ */
+extern const uint16_t WebRtcIsac_kQArRcInitIndex[AR_ORDER];
+
+/* pointers to AR cdf tables */
+extern const uint16_t* WebRtcIsac_kQArRcCdfPtr[AR_ORDER];
+
+/* pointers to AR representation levels tables */
+extern const int16_t* WebRtcIsac_kQArRcLevelsPtr[AR_ORDER];
+
+/******************** GAIN Coefficient Tables ***********************/
+/* cdf for Gain coefficient */
+extern const uint16_t WebRtcIsac_kQGainCdf[19];
+
+/* representation levels for quantized Gain coefficient */
+extern const int32_t WebRtcIsac_kQGain2Levels[18];
+
+/* squared quantization boundary levels for Gain coefficient */
+extern const int32_t WebRtcIsac_kQGain2BoundaryLevels[19];
+
+/* pointer to Gain cdf table */
+extern const uint16_t* WebRtcIsac_kQGainCdf_ptr[1];
+
+/* Gain initial index for gain quantizer and cdf table search */
+extern const uint16_t WebRtcIsac_kQGainInitIndex[1];
+
+/************************* Cosine Tables ****************************/
+/* Cosine table */
+extern const int16_t WebRtcIsac_kCos[6][60];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_ \
+ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/structs.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/structs.h
new file mode 100644
index 0000000000..6861ca42bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/structs.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * structs.h
+ *
+ * This header file contains all the structs used in the ISAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_STRUCTS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_STRUCTS_H_
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/third_party/fft/fft.h"
+
+typedef struct Bitstreamstruct {
+ uint8_t stream[STREAM_SIZE_MAX];
+ uint32_t W_upper;
+ uint32_t streamval;
+ uint32_t stream_index;
+
+} Bitstr;
+
+typedef struct {
+ double DataBufferLo[WINLEN];
+ double DataBufferHi[WINLEN];
+
+ double CorrBufLo[ORDERLO + 1];
+ double CorrBufHi[ORDERHI + 1];
+
+ float PreStateLoF[ORDERLO + 1];
+ float PreStateLoG[ORDERLO + 1];
+ float PreStateHiF[ORDERHI + 1];
+ float PreStateHiG[ORDERHI + 1];
+ float PostStateLoF[ORDERLO + 1];
+ float PostStateLoG[ORDERLO + 1];
+ float PostStateHiF[ORDERHI + 1];
+ float PostStateHiG[ORDERHI + 1];
+
+ double OldEnergy;
+
+} MaskFiltstr;
+
+typedef struct {
+ // state vectors for each of the two analysis filters
+ double INSTAT1[2 * (QORDER - 1)];
+ double INSTAT2[2 * (QORDER - 1)];
+ double INSTATLA1[2 * (QORDER - 1)];
+ double INSTATLA2[2 * (QORDER - 1)];
+ double INLABUF1[QLOOKAHEAD];
+ double INLABUF2[QLOOKAHEAD];
+
+ float INSTAT1_float[2 * (QORDER - 1)];
+ float INSTAT2_float[2 * (QORDER - 1)];
+ float INSTATLA1_float[2 * (QORDER - 1)];
+ float INSTATLA2_float[2 * (QORDER - 1)];
+ float INLABUF1_float[QLOOKAHEAD];
+ float INLABUF2_float[QLOOKAHEAD];
+
+ /* High pass filter */
+ double HPstates[HPORDER];
+ float HPstates_float[HPORDER];
+
+} PreFiltBankstr;
+
+typedef struct {
+ // state vectors for each of the two analysis filters
+ double STATE_0_LOWER[2 * POSTQORDER];
+ double STATE_0_UPPER[2 * POSTQORDER];
+
+ /* High pass filter */
+ double HPstates1[HPORDER];
+ double HPstates2[HPORDER];
+
+ float STATE_0_LOWER_float[2 * POSTQORDER];
+ float STATE_0_UPPER_float[2 * POSTQORDER];
+
+ float HPstates1_float[HPORDER];
+ float HPstates2_float[HPORDER];
+
+} PostFiltBankstr;
+
+typedef struct {
+ // data buffer for pitch filter
+ double ubuf[PITCH_BUFFSIZE];
+
+ // low pass state vector
+ double ystate[PITCH_DAMPORDER];
+
+ // old lag and gain
+ double oldlagp[1];
+ double oldgainp[1];
+
+} PitchFiltstr;
+
+typedef struct {
+ // data buffer
+ double buffer[PITCH_WLPCBUFLEN];
+
+ // state vectors
+ double istate[PITCH_WLPCORDER];
+ double weostate[PITCH_WLPCORDER];
+ double whostate[PITCH_WLPCORDER];
+
+ // LPC window -> should be a global array because constant
+ double window[PITCH_WLPCWINLEN];
+
+} WeightFiltstr;
+
+typedef struct {
+ // for inital estimator
+ double dec_buffer[PITCH_CORR_LEN2 + PITCH_CORR_STEP2 + PITCH_MAX_LAG / 2 -
+ PITCH_FRAME_LEN / 2 + 2];
+ double decimator_state[2 * ALLPASSSECTIONS + 1];
+ double hp_state[2];
+
+ double whitened_buf[QLOOKAHEAD];
+
+ double inbuf[QLOOKAHEAD];
+
+ PitchFiltstr PFstr_wght;
+ PitchFiltstr PFstr;
+ WeightFiltstr Wghtstr;
+
+} PitchAnalysisStruct;
+
+/* Have instance of struct together with other iSAC structs */
+typedef struct {
+ /* Previous frame length (in ms) */
+ int32_t prev_frame_length;
+
+ /* Previous RTP timestamp from received
+ packet (in samples relative beginning) */
+ int32_t prev_rec_rtp_number;
+
+ /* Send timestamp for previous packet (in ms using timeGetTime()) */
+ uint32_t prev_rec_send_ts;
+
+ /* Arrival time for previous packet (in ms using timeGetTime()) */
+ uint32_t prev_rec_arr_ts;
+
+ /* rate of previous packet, derived from RTP timestamps (in bits/s) */
+ float prev_rec_rtp_rate;
+
+ /* Time sinse the last update of the BN estimate (in ms) */
+ uint32_t last_update_ts;
+
+ /* Time sinse the last reduction (in ms) */
+ uint32_t last_reduction_ts;
+
+ /* How many times the estimate was update in the beginning */
+ int32_t count_tot_updates_rec;
+
+ /* The estimated bottle neck rate from there to here (in bits/s) */
+ int32_t rec_bw;
+ float rec_bw_inv;
+ float rec_bw_avg;
+ float rec_bw_avg_Q;
+
+ /* The estimated mean absolute jitter value,
+ as seen on this side (in ms) */
+ float rec_jitter;
+ float rec_jitter_short_term;
+ float rec_jitter_short_term_abs;
+ float rec_max_delay;
+ float rec_max_delay_avg_Q;
+
+ /* (assumed) bitrate for headers (bps) */
+ float rec_header_rate;
+
+ /* The estimated bottle neck rate from here to there (in bits/s) */
+ float send_bw_avg;
+
+ /* The estimated mean absolute jitter value, as seen on
+ the other siee (in ms) */
+ float send_max_delay_avg;
+
+ // number of packets received since last update
+ int num_pkts_rec;
+
+ int num_consec_rec_pkts_over_30k;
+
+ // flag for marking that a high speed network has been
+ // detected downstream
+ int hsn_detect_rec;
+
+ int num_consec_snt_pkts_over_30k;
+
+ // flag for marking that a high speed network has
+ // been detected upstream
+ int hsn_detect_snd;
+
+ uint32_t start_wait_period;
+
+ int in_wait_period;
+
+ int change_to_WB;
+
+ uint32_t senderTimestamp;
+ uint32_t receiverTimestamp;
+ // enum IsacSamplingRate incomingStreamSampFreq;
+ uint16_t numConsecLatePkts;
+ float consecLatency;
+ int16_t inWaitLatePkts;
+
+ IsacBandwidthInfo external_bw_info;
+} BwEstimatorstr;
+
+typedef struct {
+ /* boolean, flags if previous packet exceeded B.N. */
+ int PrevExceed;
+ /* ms */
+ int ExceedAgo;
+ /* packets left to send in current burst */
+ int BurstCounter;
+ /* packets */
+ int InitCounter;
+ /* ms remaining in buffer when next packet will be sent */
+ double StillBuffered;
+
+} RateModel;
+
+/* The following strutc is used to store data from encoding, to make it
+ fast and easy to construct a new bitstream with a different Bandwidth
+ estimate. All values (except framelength and minBytes) is double size to
+ handle 60 ms of data.
+*/
+typedef struct {
+ /* Used to keep track of if it is first or second part of 60 msec packet */
+ int startIdx;
+
+ /* Frame length in samples */
+ int16_t framelength;
+
+ /* Pitch Gain */
+ int pitchGain_index[2];
+
+ /* Pitch Lag */
+ double meanGain[2];
+ int pitchIndex[PITCH_SUBFRAMES * 2];
+
+ /* LPC */
+ int LPCindex_s[108 * 2]; /* KLT_ORDER_SHAPE = 108 */
+ int LPCindex_g[12 * 2]; /* KLT_ORDER_GAIN = 12 */
+ double LPCcoeffs_lo[(ORDERLO + 1) * SUBFRAMES * 2];
+ double LPCcoeffs_hi[(ORDERHI + 1) * SUBFRAMES * 2];
+
+ /* Encode Spec */
+ int16_t fre[FRAMESAMPLES];
+ int16_t fim[FRAMESAMPLES];
+ int16_t AvgPitchGain[2];
+
+ /* Used in adaptive mode only */
+ int minBytes;
+
+} IsacSaveEncoderData;
+
+typedef struct {
+ int indexLPCShape[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+ double lpcGain[SUBFRAMES << 1];
+ int lpcGainIndex[SUBFRAMES << 1];
+
+ Bitstr bitStreamObj;
+
+ int16_t realFFT[FRAMESAMPLES_HALF];
+ int16_t imagFFT[FRAMESAMPLES_HALF];
+} ISACUBSaveEncDataStruct;
+
+typedef struct {
+ Bitstr bitstr_obj;
+ MaskFiltstr maskfiltstr_obj;
+ PreFiltBankstr prefiltbankstr_obj;
+ PitchFiltstr pitchfiltstr_obj;
+ PitchAnalysisStruct pitchanalysisstr_obj;
+ FFTstr fftstr_obj;
+ IsacSaveEncoderData SaveEnc_obj;
+
+ int buffer_index;
+ int16_t current_framesamples;
+
+ float data_buffer_float[FRAMESAMPLES_30ms];
+
+ int frame_nb;
+ double bottleneck;
+ int16_t new_framelength;
+ double s2nr;
+
+ /* Maximum allowed number of bits for a 30 msec packet */
+ int16_t payloadLimitBytes30;
+ /* Maximum allowed number of bits for a 30 msec packet */
+ int16_t payloadLimitBytes60;
+ /* Maximum allowed number of bits for both 30 and 60 msec packet */
+ int16_t maxPayloadBytes;
+ /* Maximum allowed rate in bytes per 30 msec packet */
+ int16_t maxRateInBytes;
+
+ /*---
+ If set to 1 iSAC will not adapt the frame-size, if used in
+ channel-adaptive mode. The initial value will be used for all rates.
+ ---*/
+ int16_t enforceFrameSize;
+
+ /*-----
+ This records the BWE index the encoder injected into the bit-stream.
+ It will be used in RCU. The same BWE index of main payload will be in
+ the redundant payload. We can not retrieve it from BWE because it is
+ a recursive procedure (WebRtcIsac_GetDownlinkBwJitIndexImpl) and has to be
+ called only once per each encode.
+ -----*/
+ int16_t lastBWIdx;
+} ISACLBEncStruct;
+
+typedef struct {
+ Bitstr bitstr_obj;
+ MaskFiltstr maskfiltstr_obj;
+ PreFiltBankstr prefiltbankstr_obj;
+ FFTstr fftstr_obj;
+ ISACUBSaveEncDataStruct SaveEnc_obj;
+
+ int buffer_index;
+ float data_buffer_float[MAX_FRAMESAMPLES + LB_TOTAL_DELAY_SAMPLES];
+ double bottleneck;
+ /* Maximum allowed number of bits for a 30 msec packet */
+ // int16_t payloadLimitBytes30;
+ /* Maximum allowed number of bits for both 30 and 60 msec packet */
+ // int16_t maxPayloadBytes;
+ int16_t maxPayloadSizeBytes;
+
+ double lastLPCVec[UB_LPC_ORDER];
+ int16_t numBytesUsed;
+ int16_t lastJitterInfo;
+} ISACUBEncStruct;
+
+typedef struct {
+ Bitstr bitstr_obj;
+ MaskFiltstr maskfiltstr_obj;
+ PostFiltBankstr postfiltbankstr_obj;
+ PitchFiltstr pitchfiltstr_obj;
+ FFTstr fftstr_obj;
+
+} ISACLBDecStruct;
+
+typedef struct {
+ Bitstr bitstr_obj;
+ MaskFiltstr maskfiltstr_obj;
+ PostFiltBankstr postfiltbankstr_obj;
+ FFTstr fftstr_obj;
+
+} ISACUBDecStruct;
+
+typedef struct {
+ ISACLBEncStruct ISACencLB_obj;
+ ISACLBDecStruct ISACdecLB_obj;
+} ISACLBStruct;
+
+typedef struct {
+ ISACUBEncStruct ISACencUB_obj;
+ ISACUBDecStruct ISACdecUB_obj;
+} ISACUBStruct;
+
+/*
+ This struct is used to take a snapshot of the entropy coder and LPC gains
+ right before encoding LPC gains. This allows us to go back to that state
+ if we like to limit the payload size.
+*/
+typedef struct {
+ /* 6 lower-band & 6 upper-band */
+ double loFiltGain[SUBFRAMES];
+ double hiFiltGain[SUBFRAMES];
+ /* Upper boundary of interval W */
+ uint32_t W_upper;
+ uint32_t streamval;
+ /* Index to the current position in bytestream */
+ uint32_t stream_index;
+ uint8_t stream[3];
+} transcode_obj;
+
+typedef struct {
+ // TODO(kwiberg): The size of these tables could be reduced by storing floats
+ // instead of doubles, and by making use of the identity cos(x) =
+ // sin(x+pi/2). They could also be made global constants that we fill in at
+ // compile time.
+ double costab1[FRAMESAMPLES_HALF];
+ double sintab1[FRAMESAMPLES_HALF];
+ double costab2[FRAMESAMPLES_QUARTER];
+ double sintab2[FRAMESAMPLES_QUARTER];
+} TransformTables;
+
+typedef struct {
+ // lower-band codec instance
+ ISACLBStruct instLB;
+ // upper-band codec instance
+ ISACUBStruct instUB;
+
+ // Bandwidth Estimator and model for the rate.
+ BwEstimatorstr bwestimator_obj;
+ RateModel rate_data_obj;
+ double MaxDelay;
+
+ /* 0 = adaptive; 1 = instantaneous */
+ int16_t codingMode;
+
+ // overall bottleneck of the codec
+ int32_t bottleneck;
+
+ // QMF Filter state
+ int32_t analysisFBState1[FB_STATE_SIZE_WORD32];
+ int32_t analysisFBState2[FB_STATE_SIZE_WORD32];
+ int32_t synthesisFBState1[FB_STATE_SIZE_WORD32];
+ int32_t synthesisFBState2[FB_STATE_SIZE_WORD32];
+
+ // Error Code
+ int16_t errorCode;
+
+ // bandwidth of the encoded audio 8, 12 or 16 kHz
+ enum ISACBandwidth bandwidthKHz;
+ // Sampling rate of audio, encoder and decode, 8 or 16 kHz
+ enum IsacSamplingRate encoderSamplingRateKHz;
+ enum IsacSamplingRate decoderSamplingRateKHz;
+ // Flag to keep track of initializations, lower & upper-band
+ // encoder and decoder.
+ int16_t initFlag;
+
+ // Flag to to indicate signal bandwidth switch
+ int16_t resetFlag_8kHz;
+
+ // Maximum allowed rate, measured in Bytes per 30 ms.
+ int16_t maxRateBytesPer30Ms;
+ // Maximum allowed payload-size, measured in Bytes.
+ int16_t maxPayloadSizeBytes;
+ /* The expected sampling rate of the input signal. Valid values are 16000
+ * and 32000. This is not the operation sampling rate of the codec. */
+ uint16_t in_sample_rate_hz;
+
+ // Trig tables for WebRtcIsac_Time2Spec and WebRtcIsac_Spec2time.
+ TransformTables transform_tables;
+} ISACMainStruct;
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_STRUCTS_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/transform.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/transform.c
new file mode 100644
index 0000000000..082ad941c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/transform.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+#include "modules/third_party/fft/fft.h"
+
+void WebRtcIsac_InitTransform(TransformTables* tables) {
+ int k;
+ double fact, phase;
+
+ fact = PI / (FRAMESAMPLES_HALF);
+ phase = 0.0;
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tables->costab1[k] = cos(phase);
+ tables->sintab1[k] = sin(phase);
+ phase += fact;
+ }
+
+ fact = PI * ((double) (FRAMESAMPLES_HALF - 1)) / ((double) FRAMESAMPLES_HALF);
+ phase = 0.5 * fact;
+ for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+ tables->costab2[k] = cos(phase);
+ tables->sintab2[k] = sin(phase);
+ phase += fact;
+ }
+}
+
+void WebRtcIsac_Time2Spec(const TransformTables* tables,
+ double* inre1,
+ double* inre2,
+ int16_t* outreQ7,
+ int16_t* outimQ7,
+ FFTstr* fftstr_obj) {
+ int k;
+ int dims[1];
+ double tmp1r, tmp1i, xr, xi, yr, yi, fact;
+ double tmpre[FRAMESAMPLES_HALF], tmpim[FRAMESAMPLES_HALF];
+
+
+ dims[0] = FRAMESAMPLES_HALF;
+
+
+ /* Multiply with complex exponentials and combine into one complex vector */
+ fact = 0.5 / sqrt(FRAMESAMPLES_HALF);
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tmp1r = tables->costab1[k];
+ tmp1i = tables->sintab1[k];
+ tmpre[k] = (inre1[k] * tmp1r + inre2[k] * tmp1i) * fact;
+ tmpim[k] = (inre2[k] * tmp1r - inre1[k] * tmp1i) * fact;
+ }
+
+
+ /* Get DFT */
+ WebRtcIsac_Fftns(1, dims, tmpre, tmpim, -1, 1.0, fftstr_obj);
+
+ /* Use symmetry to separate into two complex vectors and center frames in time around zero */
+ for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+ xr = tmpre[k] + tmpre[FRAMESAMPLES_HALF - 1 - k];
+ yi = -tmpre[k] + tmpre[FRAMESAMPLES_HALF - 1 - k];
+ xi = tmpim[k] - tmpim[FRAMESAMPLES_HALF - 1 - k];
+ yr = tmpim[k] + tmpim[FRAMESAMPLES_HALF - 1 - k];
+
+ tmp1r = tables->costab2[k];
+ tmp1i = tables->sintab2[k];
+ outreQ7[k] = (int16_t)WebRtcIsac_lrint((xr * tmp1r - xi * tmp1i) * 128.0);
+ outimQ7[k] = (int16_t)WebRtcIsac_lrint((xr * tmp1i + xi * tmp1r) * 128.0);
+ outreQ7[FRAMESAMPLES_HALF - 1 - k] = (int16_t)WebRtcIsac_lrint((-yr * tmp1i - yi * tmp1r) * 128.0);
+ outimQ7[FRAMESAMPLES_HALF - 1 - k] = (int16_t)WebRtcIsac_lrint((-yr * tmp1r + yi * tmp1i) * 128.0);
+ }
+}
+
+void WebRtcIsac_Spec2time(const TransformTables* tables,
+ double* inre,
+ double* inim,
+ double* outre1,
+ double* outre2,
+ FFTstr* fftstr_obj) {
+ int k;
+ double tmp1r, tmp1i, xr, xi, yr, yi, fact;
+
+ int dims;
+
+ dims = FRAMESAMPLES_HALF;
+
+ for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+ /* Move zero in time to beginning of frames */
+ tmp1r = tables->costab2[k];
+ tmp1i = tables->sintab2[k];
+ xr = inre[k] * tmp1r + inim[k] * tmp1i;
+ xi = inim[k] * tmp1r - inre[k] * tmp1i;
+ yr = -inim[FRAMESAMPLES_HALF - 1 - k] * tmp1r - inre[FRAMESAMPLES_HALF - 1 - k] * tmp1i;
+ yi = -inre[FRAMESAMPLES_HALF - 1 - k] * tmp1r + inim[FRAMESAMPLES_HALF - 1 - k] * tmp1i;
+
+ /* Combine into one vector, z = x + j * y */
+ outre1[k] = xr - yi;
+ outre1[FRAMESAMPLES_HALF - 1 - k] = xr + yi;
+ outre2[k] = xi + yr;
+ outre2[FRAMESAMPLES_HALF - 1 - k] = -xi + yr;
+ }
+
+
+ /* Get IDFT */
+ WebRtcIsac_Fftns(1, &dims, outre1, outre2, 1, FRAMESAMPLES_HALF, fftstr_obj);
+
+
+ /* Demodulate and separate */
+ fact = sqrt(FRAMESAMPLES_HALF);
+ for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+ tmp1r = tables->costab1[k];
+ tmp1i = tables->sintab1[k];
+ xr = (outre1[k] * tmp1r - outre2[k] * tmp1i) * fact;
+ outre2[k] = (outre2[k] * tmp1r + outre1[k] * tmp1i) * fact;
+ outre1[k] = xr;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
new file mode 100644
index 0000000000..ee72b07dc3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -0,0 +1,942 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// ReleaseTest-API.cpp : Defines the entry point for the console application.
+//
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <iostream>
+
+/* include API */
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "modules/audio_coding/codecs/isac/main/util/utility.h"
+
+/* Defines */
+#define SEED_FILE \
+ "randseed.txt" /* Used when running decoder on garbage data \
+ */
+#define MAX_FRAMESAMPLES \
+ 960 /* max number of samples per frame \
+ (= 60 ms frame & 16 kHz) or \
+ (= 30 ms frame & 32 kHz) */
+#define FRAMESAMPLES_10ms 160 /* number of samples per 10ms frame */
+#define SWBFRAMESAMPLES_10ms 320
+//#define FS 16000 /* sampling frequency (Hz) */
+
+#ifdef WIN32
+#ifndef CLOCKS_PER_SEC
+#define CLOCKS_PER_SEC 1000 /* Runtime statistics */
+#endif
+#endif
+
+int main(int argc, char* argv[]) {
+ char inname[100], outname[100], bottleneck_file[100], vadfile[100];
+ FILE *inp, *outp, *f_bn = NULL, *vadp = NULL, *bandwidthp;
+ int framecnt, endfile;
+
+ size_t i;
+ int errtype, VADusage = 0, packetLossPercent = 0;
+ int16_t CodingMode;
+ int32_t bottleneck = 0;
+ int framesize = 30; /* ms */
+ int cur_framesmpls, err;
+
+ /* Runtime statistics */
+ double starttime, runtime, length_file;
+
+ size_t stream_len = 0;
+ int declen = 0, declenTC = 0;
+ bool lostFrame = false;
+
+ int16_t shortdata[SWBFRAMESAMPLES_10ms];
+ int16_t vaddata[SWBFRAMESAMPLES_10ms * 3];
+ int16_t decoded[MAX_FRAMESAMPLES << 1];
+ int16_t decodedTC[MAX_FRAMESAMPLES << 1];
+ uint16_t streamdata[500];
+ int16_t speechType[1];
+ int16_t rateBPS = 0;
+ int16_t fixedFL = 0;
+ int16_t payloadSize = 0;
+ int32_t payloadRate = 0;
+ int setControlBWE = 0;
+ short FL, testNum;
+ char version_number[20];
+ FILE* plFile;
+ int32_t sendBN;
+
+#if !defined(NDEBUG)
+ FILE* fy;
+ double kbps;
+#endif
+ size_t totalbits = 0;
+ int totalsmpls = 0;
+
+ /* If use GNS file */
+ FILE* fp_gns = NULL;
+ char gns_file[100];
+ size_t maxStreamLen30 = 0;
+ size_t maxStreamLen60 = 0;
+ short sampFreqKHz = 32;
+ short samplesIn10Ms;
+ // FILE logFile;
+ bool doTransCoding = false;
+ int32_t rateTransCoding = 0;
+ uint8_t streamDataTransCoding[1200];
+ size_t streamLenTransCoding = 0;
+ FILE* transCodingFile = NULL;
+ FILE* transcodingBitstream = NULL;
+ size_t numTransCodingBytes = 0;
+
+ /* only one structure used for ISAC encoder */
+ ISACStruct* ISAC_main_inst = NULL;
+ ISACStruct* decoderTransCoding = NULL;
+
+ BottleNeckModel BN_data;
+
+#if !defined(NDEBUG)
+ fy = fopen("bit_rate.dat", "w");
+ fclose(fy);
+ fy = fopen("bytes_frames.dat", "w");
+ fclose(fy);
+#endif
+
+ /* Handling wrong input arguments in the command line */
+ if ((argc < 3) || (argc > 17)) {
+ printf("\n\nWrong number of arguments or flag values.\n\n");
+
+ printf("\n");
+ WebRtcIsac_version(version_number);
+ printf("iSAC-swb version %s \n\n", version_number);
+
+ printf("Usage:\n\n");
+ printf("%s [-I] bottleneck_value infile outfile \n\n", argv[0]);
+ printf("with:\n");
+ printf("[-FS num] : sampling frequency in kHz, valid values are\n");
+ printf(" 16 & 32, with 16 as default.\n");
+ printf("[-I] : if -I option is specified, the coder will use\n");
+ printf(" an instantaneous Bottleneck value. If not, it\n");
+ printf(" will be an adaptive Bottleneck value.\n");
+ printf("[-assign] : Use Assign API.\n");
+ printf("[-B num] : the value of the bottleneck provided either\n");
+ printf(" as a fixed value in bits/sec (e.g. 25000) or\n");
+ printf(" read from a file (e.g. bottleneck.txt)\n");
+ printf("[-INITRATE num] : Set a new value for initial rate. Note! Only\n");
+ printf(" used in adaptive mode.\n");
+ printf("[-FL num] : Set (initial) frame length in msec. Valid\n");
+ printf(" lengths are 30 and 60 msec.\n");
+ printf("[-FIXED_FL] : Frame length will be fixed to initial value.\n");
+ printf("[-MAX num] : Set the limit for the payload size of iSAC\n");
+ printf(" in bytes. Minimum 100 maximum 400.\n");
+ printf("[-MAXRATE num] : Set the maxrate for iSAC in bits per second.\n");
+ printf(" Minimum 32000, maximum 53400.\n");
+ printf("[-F num] : if -F option is specified, the test function\n");
+ printf(" will run the iSAC API fault scenario\n");
+ printf(" specified by the supplied number.\n");
+ printf(" F 1 - Call encoder prior to init encoder call\n");
+ printf(" F 2 - Call decoder prior to init decoder call\n");
+ printf(" F 3 - Call decoder prior to encoder call\n");
+ printf(" F 4 - Call decoder with a too short coded\n");
+ printf(" sequence\n");
+ printf(" F 5 - Call decoder with a too long coded\n");
+ printf(" sequence\n");
+ printf(" F 6 - Call decoder with random bit stream\n");
+ printf(" F 7 - Call init encoder/decoder at random\n");
+ printf(" during a call\n");
+ printf(" F 8 - Call encoder/decoder without having\n");
+ printf(" allocated memory for encoder/decoder\n");
+ printf(" instance\n");
+ printf(" F 9 - Call decodeB without calling decodeA\n");
+ printf(" F 10 - Call decodeB with garbage data\n");
+ printf("[-PL num] : if -PL option is specified \n");
+ printf("[-T rate file] : test trans-coding with target bottleneck\n");
+ printf(" 'rate' bits/sec\n");
+ printf(" the output file is written to 'file'\n");
+ printf("[-LOOP num] : number of times to repeat coding the input\n");
+ printf(" file for stress testing\n");
+ // printf("[-CE num] : Test of APIs used by Conference Engine.\n");
+ // printf(" CE 1 - getNewBitstream, getBWE \n");
+ // printf(" (CE 2 - RESERVED for transcoding)\n");
+ // printf(" CE 3 - getSendBWE, setSendBWE. \n");
+ // printf("-L filename : write the logging info into file
+ // (appending)\n");
+ printf("infile : Normal speech input file\n");
+ printf("outfile : Speech output file\n");
+ exit(0);
+ }
+
+ /* Print version number */
+ printf("-------------------------------------------------\n");
+ WebRtcIsac_version(version_number);
+ printf("iSAC version %s \n\n", version_number);
+
+ /* Loop over all command line arguments */
+ CodingMode = 0;
+ testNum = 0;
+ // logFile = NULL;
+ char transCodingFileName[500];
+ int16_t totFileLoop = 0;
+ int16_t numFileLoop = 0;
+ for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
+ if (!strcmp("-LOOP", argv[i])) {
+ i++;
+ totFileLoop = (int16_t)atol(argv[i]);
+ if (totFileLoop <= 0) {
+ fprintf(stderr, "Invalid number of runs for the given input file, %d.",
+ totFileLoop);
+ exit(0);
+ }
+ }
+
+ if (!strcmp("-T", argv[i])) {
+ doTransCoding = true;
+ i++;
+ rateTransCoding = atoi(argv[i]);
+ i++;
+ strcpy(transCodingFileName, argv[i]);
+ }
+
+ /* Set Sampling Rate */
+ if (!strcmp("-FS", argv[i])) {
+ i++;
+ sampFreqKHz = atoi(argv[i]);
+ }
+
+ /* Instantaneous mode */
+ if (!strcmp("-I", argv[i])) {
+ printf("Instantaneous BottleNeck\n");
+ CodingMode = 1;
+ }
+
+ /* Set (initial) bottleneck value */
+ if (!strcmp("-INITRATE", argv[i])) {
+ rateBPS = atoi(argv[i + 1]);
+ setControlBWE = 1;
+ if ((rateBPS < 10000) || (rateBPS > 32000)) {
+ printf(
+ "\n%d is not a initial rate. Valid values are in the range "
+ "10000 to 32000.\n",
+ rateBPS);
+ exit(0);
+ }
+ printf("New initial rate: %d\n", rateBPS);
+ i++;
+ }
+
+ /* Set (initial) framelength */
+ if (!strcmp("-FL", argv[i])) {
+ framesize = atoi(argv[i + 1]);
+ if ((framesize != 30) && (framesize != 60)) {
+ printf(
+ "\n%d is not a valid frame length. Valid length are 30 and 60 "
+ "msec.\n",
+ framesize);
+ exit(0);
+ }
+ setControlBWE = 1;
+ printf("Frame Length: %d\n", framesize);
+ i++;
+ }
+
+ /* Fixed frame length */
+ if (!strcmp("-FIXED_FL", argv[i])) {
+ fixedFL = 1;
+ setControlBWE = 1;
+ printf("Fixed Frame Length\n");
+ }
+
+ /* Set maximum allowed payload size in bytes */
+ if (!strcmp("-MAX", argv[i])) {
+ payloadSize = atoi(argv[i + 1]);
+ printf("Maximum Payload Size: %d\n", payloadSize);
+ i++;
+ }
+
+ /* Set maximum rate in bytes */
+ if (!strcmp("-MAXRATE", argv[i])) {
+ payloadRate = atoi(argv[i + 1]);
+ printf("Maximum Rate in kbps: %d\n", payloadRate);
+ i++;
+ }
+
+ /* Test of fault scenarious */
+ if (!strcmp("-F", argv[i])) {
+ testNum = atoi(argv[i + 1]);
+ printf("Fault test: %d\n", testNum);
+ if (testNum < 1 || testNum > 10) {
+ printf(
+ "\n%d is not a valid Fault Scenario number. Valid Fault "
+ "Scenarios are numbered 1-10.\n",
+ testNum);
+ exit(0);
+ }
+ i++;
+ }
+
+ /* Packet loss test */
+ if (!strcmp("-PL", argv[i])) {
+ if (isdigit(static_cast<unsigned char>(*argv[i + 1]))) {
+ packetLossPercent = atoi(argv[i + 1]);
+ if ((packetLossPercent < 0) | (packetLossPercent > 100)) {
+ printf("\nInvalid packet loss perentage \n");
+ exit(0);
+ }
+ if (packetLossPercent > 0) {
+ printf("Simulating %d %% of independent packet loss\n",
+ packetLossPercent);
+ } else {
+ printf("\nNo Packet Loss Is Simulated \n");
+ }
+ } else {
+ plFile = fopen(argv[i + 1], "rb");
+ if (plFile == NULL) {
+ printf("\n couldn't open the frameloss file: %s\n", argv[i + 1]);
+ exit(0);
+ }
+ printf("Simulating packet loss through the given channel file: %s\n",
+ argv[i + 1]);
+ }
+ i++;
+ }
+
+ /* Random packetlosses */
+ if (!strcmp("-rnd", argv[i])) {
+ srand((unsigned int)time(NULL));
+ printf("Random pattern in lossed packets \n");
+ }
+
+ /* Use gns file */
+ if (!strcmp("-G", argv[i])) {
+ sscanf(argv[i + 1], "%s", gns_file);
+ fp_gns = fopen(gns_file, "rb");
+ if (fp_gns == NULL) {
+ printf("Cannot read file %s.\n", gns_file);
+ exit(0);
+ }
+ i++;
+ }
+
+ // make it with '-B'
+ /* Get Bottleneck value */
+ if (!strcmp("-B", argv[i])) {
+ i++;
+ bottleneck = atoi(argv[i]);
+ if (bottleneck == 0) {
+ sscanf(argv[i], "%s", bottleneck_file);
+ f_bn = fopen(bottleneck_file, "rb");
+ if (f_bn == NULL) {
+ printf(
+ "Error No value provided for BottleNeck and cannot read file "
+ "%s.\n",
+ bottleneck_file);
+ exit(0);
+ } else {
+ printf("reading bottleneck rates from file %s\n\n", bottleneck_file);
+ if (fscanf(f_bn, "%d", &bottleneck) == EOF) {
+ /* Set pointer to beginning of file */
+ fseek(f_bn, 0L, SEEK_SET);
+ if (fscanf(f_bn, "%d", &bottleneck) == EOF) {
+ exit(0);
+ }
+ }
+
+ /* Bottleneck is a cosine function
+ * Matlab code for writing the bottleneck file:
+ * BottleNeck_10ms = 20e3 + 10e3 * cos((0:5999)/5999*2*pi);
+ * fid = fopen('bottleneck.txt', 'wb');
+ * fprintf(fid, '%d\n', BottleNeck_10ms); fclose(fid);
+ */
+ }
+ } else {
+ printf("\nfixed bottleneck rate of %d bits/s\n\n", bottleneck);
+ }
+ }
+ /* Run Conference Engine APIs */
+ // Do not test it in the first release
+ //
+ // if(!strcmp ("-CE", argv[i]))
+ // {
+ // testCE = atoi(argv[i + 1]);
+ // if(testCE==1)
+ // {
+ // i++;
+ // scale = (float)atof( argv[i+1] );
+ // }
+ // else if(testCE == 2)
+ // {
+ // printf("\nCE-test 2 (transcoding) not implemented.\n");
+ // exit(0);
+ // }
+ // else if(testCE < 1 || testCE > 3)
+ // {
+ // printf("\n%d is not a valid CE-test number. Valid CE tests
+ // are 1-3.\n", testCE);
+ // exit(0);
+ // }
+ // printf("CE-test number: %d\n", testCE);
+ // i++;
+ // }
+ }
+
+ if (CodingMode == 0) {
+ printf("\nAdaptive BottleNeck\n");
+ }
+
+ switch (sampFreqKHz) {
+ case 16: {
+ printf("iSAC Wideband.\n");
+ samplesIn10Ms = FRAMESAMPLES_10ms;
+ break;
+ }
+ case 32: {
+ printf("iSAC Supper-Wideband.\n");
+ samplesIn10Ms = SWBFRAMESAMPLES_10ms;
+ break;
+ }
+ default:
+ printf("Unsupported sampling frequency %d kHz", sampFreqKHz);
+ exit(0);
+ }
+
+ /* Get Input and Output files */
+ sscanf(argv[argc - 2], "%s", inname);
+ sscanf(argv[argc - 1], "%s", outname);
+ printf("\nInput file: %s\n", inname);
+ printf("Output file: %s\n\n", outname);
+ if ((inp = fopen(inname, "rb")) == NULL) {
+ printf(" Error iSAC Cannot read file %s.\n", inname);
+ std::cout << std::flush;
+ exit(1);
+ }
+
+ if ((outp = fopen(outname, "wb")) == NULL) {
+ printf(" Error iSAC Cannot write file %s.\n", outname);
+ std::cout << std::flush;
+ getc(stdin);
+ exit(1);
+ }
+ if (VADusage) {
+ if ((vadp = fopen(vadfile, "rb")) == NULL) {
+ printf(" Error iSAC Cannot read file %s.\n", vadfile);
+ std::cout << std::flush;
+ exit(1);
+ }
+ }
+
+ if ((bandwidthp = fopen("bwe.pcm", "wb")) == NULL) {
+ printf(" Error iSAC Cannot read file %s.\n", "bwe.pcm");
+ std::cout << std::flush;
+ exit(1);
+ }
+
+ starttime = clock() / (double)CLOCKS_PER_SEC; /* Runtime statistics */
+
+ /* Initialize the ISAC and BN structs */
+ if (testNum != 8) {
+ err = WebRtcIsac_Create(&ISAC_main_inst);
+ WebRtcIsac_SetEncSampRate(ISAC_main_inst, sampFreqKHz * 1000);
+ WebRtcIsac_SetDecSampRate(ISAC_main_inst,
+ sampFreqKHz >= 32 ? 32000 : 16000);
+ /* Error check */
+ if (err < 0) {
+ printf("\n\n Error in create.\n\n");
+ std::cout << std::flush;
+ exit(EXIT_FAILURE);
+ }
+ }
+ BN_data.arrival_time = 0;
+ BN_data.sample_count = 0;
+ BN_data.rtp_number = 0;
+
+ /* Initialize encoder and decoder */
+ framecnt = 0;
+ endfile = 0;
+
+ if (doTransCoding) {
+ WebRtcIsac_Create(&decoderTransCoding);
+ WebRtcIsac_SetEncSampRate(decoderTransCoding, sampFreqKHz * 1000);
+ WebRtcIsac_SetDecSampRate(decoderTransCoding,
+ sampFreqKHz >= 32 ? 32000 : 16000);
+ WebRtcIsac_DecoderInit(decoderTransCoding);
+ transCodingFile = fopen(transCodingFileName, "wb");
+ if (transCodingFile == NULL) {
+ printf("Could not open %s to output trans-coding.\n",
+ transCodingFileName);
+ exit(0);
+ }
+ strcat(transCodingFileName, ".bit");
+ transcodingBitstream = fopen(transCodingFileName, "wb");
+ if (transcodingBitstream == NULL) {
+ printf("Could not open %s to write the bit-stream of transcoder.\n",
+ transCodingFileName);
+ exit(0);
+ }
+ }
+
+ if (testNum != 1) {
+ if (WebRtcIsac_EncoderInit(ISAC_main_inst, CodingMode) < 0) {
+ printf("Error could not initialize the encoder \n");
+ std::cout << std::flush;
+ return 0;
+ }
+ }
+ if (testNum != 2)
+ WebRtcIsac_DecoderInit(ISAC_main_inst);
+ if (CodingMode == 1) {
+ err = WebRtcIsac_Control(ISAC_main_inst, bottleneck, framesize);
+ if (err < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ printf("\n\n Error in initialization (control): %d.\n\n", errtype);
+ std::cout << std::flush;
+ if (testNum == 0) {
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+
+ if ((setControlBWE) && (CodingMode == 0)) {
+ err = WebRtcIsac_ControlBwe(ISAC_main_inst, rateBPS, framesize, fixedFL);
+ if (err < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+
+ printf("\n\n Error in Control BWE: %d.\n\n", errtype);
+ std::cout << std::flush;
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (payloadSize != 0) {
+ err = WebRtcIsac_SetMaxPayloadSize(ISAC_main_inst, payloadSize);
+ if (err < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ printf("\n\n Error in SetMaxPayloadSize: %d.\n\n", errtype);
+ std::cout << std::flush;
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (payloadRate != 0) {
+ err = WebRtcIsac_SetMaxRate(ISAC_main_inst, payloadRate);
+ if (err < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ printf("\n\n Error in SetMaxRateInBytes: %d.\n\n", errtype);
+ std::cout << std::flush;
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ *speechType = 1;
+
+ std::cout << "\n" << std::flush;
+
+ length_file = 0;
+ int16_t bnIdxTC = 0;
+ int16_t jitterInfoTC = 0;
+ while (endfile == 0) {
+ /* Call init functions at random, fault test number 7 */
+ if (testNum == 7 && (rand() % 2 == 0)) {
+ err = WebRtcIsac_EncoderInit(ISAC_main_inst, CodingMode);
+ /* Error check */
+ if (err < 0) {
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ printf("\n\n Error in encoderinit: %d.\n\n", errtype);
+ std::cout << std::flush;
+ }
+
+ WebRtcIsac_DecoderInit(ISAC_main_inst);
+ }
+
+ cur_framesmpls = 0;
+ while (1) {
+ int stream_len_int = 0;
+
+ /* Read 10 ms speech block */
+ endfile = readframe(shortdata, inp, samplesIn10Ms);
+
+ if (endfile) {
+ numFileLoop++;
+ if (numFileLoop < totFileLoop) {
+ rewind(inp);
+ framecnt = 0;
+ fprintf(stderr, "\n");
+ endfile = readframe(shortdata, inp, samplesIn10Ms);
+ }
+ }
+
+ if (testNum == 7) {
+ srand((unsigned int)time(NULL));
+ }
+
+ /* iSAC encoding */
+ if (!(testNum == 3 && framecnt == 0)) {
+ stream_len_int =
+ WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
+ if ((payloadSize != 0) && (stream_len_int > payloadSize)) {
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+
+ printf("\nError: Streamsize out of range %d\n",
+ stream_len_int - payloadSize);
+ std::cout << std::flush;
+ }
+
+ WebRtcIsac_GetUplinkBw(ISAC_main_inst, &sendBN);
+
+ if (stream_len_int > 0) {
+ if (doTransCoding) {
+ int16_t indexStream;
+ uint8_t auxUW8;
+
+ /******************** Main Transcoding stream ********************/
+ WebRtcIsac_GetDownLinkBwIndex(ISAC_main_inst, &bnIdxTC,
+ &jitterInfoTC);
+ int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
+ ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
+ streamDataTransCoding, false);
+ if (streamLenTransCoding_int < 0) {
+ fprintf(stderr, "Error in trans-coding\n");
+ exit(0);
+ }
+ streamLenTransCoding =
+ static_cast<size_t>(streamLenTransCoding_int);
+ auxUW8 = (uint8_t)(((streamLenTransCoding & 0xFF00) >> 8) & 0x00FF);
+ if (fwrite(&auxUW8, sizeof(uint8_t), 1, transcodingBitstream) !=
+ 1) {
+ return -1;
+ }
+
+ auxUW8 = (uint8_t)(streamLenTransCoding & 0x00FF);
+ if (fwrite(&auxUW8, sizeof(uint8_t), 1, transcodingBitstream) !=
+ 1) {
+ return -1;
+ }
+
+ if (fwrite(streamDataTransCoding, sizeof(uint8_t),
+ streamLenTransCoding,
+ transcodingBitstream) != streamLenTransCoding) {
+ return -1;
+ }
+
+ WebRtcIsac_ReadBwIndex(streamDataTransCoding, &indexStream);
+ if (indexStream != bnIdxTC) {
+ fprintf(stderr,
+ "Error in inserting Bandwidth index into transcoding "
+ "stream.\n");
+ exit(0);
+ }
+ numTransCodingBytes += streamLenTransCoding;
+ }
+ }
+ } else {
+ break;
+ }
+
+ if (stream_len_int < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ fprintf(stderr, "Error in encoder: %d.\n", errtype);
+ std::cout << std::flush;
+ exit(0);
+ }
+ stream_len = static_cast<size_t>(stream_len_int);
+
+ cur_framesmpls += samplesIn10Ms;
+ /* exit encoder loop if the encoder returned a bitstream */
+ if (stream_len != 0)
+ break;
+ }
+
+ /* read next bottleneck rate */
+ if (f_bn != NULL) {
+ if (fscanf(f_bn, "%d", &bottleneck) == EOF) {
+ /* Set pointer to beginning of file */
+ fseek(f_bn, 0L, SEEK_SET);
+ if (fscanf(f_bn, "%d", &bottleneck) == EOF) {
+ exit(0);
+ }
+ }
+ if (CodingMode == 1) {
+ WebRtcIsac_Control(ISAC_main_inst, bottleneck, framesize);
+ }
+ }
+
+ length_file += cur_framesmpls;
+ if (cur_framesmpls == (3 * samplesIn10Ms)) {
+ maxStreamLen30 =
+ (stream_len > maxStreamLen30) ? stream_len : maxStreamLen30;
+ } else {
+ maxStreamLen60 =
+ (stream_len > maxStreamLen60) ? stream_len : maxStreamLen60;
+ }
+
+ if (!lostFrame) {
+ lostFrame = ((rand() % 100) < packetLossPercent);
+ } else {
+ lostFrame = false;
+ }
+
+ // RED.
+ if (lostFrame) {
+ int stream_len_int = WebRtcIsac_GetRedPayload(
+ ISAC_main_inst, reinterpret_cast<uint8_t*>(streamdata));
+ if (stream_len_int < 0) {
+ fprintf(stderr, "Error getting RED payload\n");
+ exit(0);
+ }
+ stream_len = static_cast<size_t>(stream_len_int);
+
+ if (doTransCoding) {
+ int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
+ ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
+ streamDataTransCoding, true);
+ if (streamLenTransCoding_int < 0) {
+ fprintf(stderr, "Error in RED trans-coding\n");
+ exit(0);
+ }
+ streamLenTransCoding = static_cast<size_t>(streamLenTransCoding_int);
+ }
+ }
+
+ /* make coded sequence to short be inreasing */
+ /* the length the decoder expects */
+ if (testNum == 4) {
+ stream_len += 10;
+ }
+
+ /* make coded sequence to long be decreasing */
+ /* the length the decoder expects */
+ if (testNum == 5) {
+ stream_len -= 10;
+ }
+
+ if (testNum == 6) {
+ srand((unsigned int)time(NULL));
+ for (i = 0; i < stream_len; i++) {
+ streamdata[i] = rand();
+ }
+ }
+
+ if (VADusage) {
+ readframe(vaddata, vadp, samplesIn10Ms * 3);
+ }
+
+ /* simulate packet handling through NetEq and the modem */
+ if (!(testNum == 3 && framecnt == 0)) {
+ get_arrival_time(cur_framesmpls, stream_len, bottleneck, &BN_data,
+ sampFreqKHz * 1000, sampFreqKHz * 1000);
+ }
+
+ if (VADusage && (framecnt > 10 && vaddata[0] == 0)) {
+ BN_data.rtp_number--;
+ } else {
+ /* Error test number 10, garbage data */
+ if (testNum == 10) {
+ /* Test to run decoder with garbage data */
+ for (i = 0; i < stream_len; i++) {
+ streamdata[i] = (short)(streamdata[i]) + (short)rand();
+ }
+ }
+
+ if (testNum != 9) {
+ err = WebRtcIsac_UpdateBwEstimate(
+ ISAC_main_inst, reinterpret_cast<const uint8_t*>(streamdata),
+ stream_len, BN_data.rtp_number, BN_data.sample_count,
+ BN_data.arrival_time);
+
+ if (err < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+
+ printf("Error: in decoder: %d.", errtype);
+ std::cout << std::flush;
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ }
+ }
+
+ /* Call getFramelen, only used here for function test */
+ err = WebRtcIsac_ReadFrameLen(
+ ISAC_main_inst, reinterpret_cast<const uint8_t*>(streamdata), &FL);
+ if (err < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ printf(" Error: in getFrameLen %d.", errtype);
+ std::cout << std::flush;
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ }
+
+ // iSAC decoding
+
+ if (lostFrame) {
+ declen = WebRtcIsac_DecodeRcu(
+ ISAC_main_inst, reinterpret_cast<const uint8_t*>(streamdata),
+ stream_len, decoded, speechType);
+
+ if (doTransCoding) {
+ declenTC =
+ WebRtcIsac_DecodeRcu(decoderTransCoding, streamDataTransCoding,
+ streamLenTransCoding, decodedTC, speechType);
+ }
+ } else {
+ declen = WebRtcIsac_Decode(ISAC_main_inst,
+ reinterpret_cast<const uint8_t*>(streamdata),
+ stream_len, decoded, speechType);
+ if (doTransCoding) {
+ declenTC =
+ WebRtcIsac_Decode(decoderTransCoding, streamDataTransCoding,
+ streamLenTransCoding, decodedTC, speechType);
+ }
+ }
+
+ if (declen < 0) {
+ /* exit if returned with error */
+ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ printf(" Error: in decoder %d.", errtype);
+ std::cout << std::flush;
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ }
+
+ if (declenTC < 0) {
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ printf(" Error: in decoding the transcoded stream");
+ std::cout << std::flush;
+ if (testNum == 0) {
+ printf("\n\n");
+ }
+ }
+ }
+ /* Write decoded speech frame to file */
+ if ((declen > 0) && (numFileLoop == 0)) {
+ if (fwrite(decoded, sizeof(int16_t), declen, outp) !=
+ static_cast<size_t>(declen)) {
+ return -1;
+ }
+ }
+
+ if ((declenTC > 0) && (numFileLoop == 0)) {
+ if (fwrite(decodedTC, sizeof(int16_t), declen, transCodingFile) !=
+ static_cast<size_t>(declen)) {
+ return -1;
+ }
+ }
+
+ fprintf(stderr, "\rframe = %5d ", framecnt);
+ fflush(stderr);
+ framecnt++;
+
+ /* Error test number 10, garbage data */
+ // if (testNum == 10)
+ // {
+ // /* Test to run decoder with garbage data */
+ // if ((seedfile = fopen(SEED_FILE, "a+t")) == NULL) {
+ // fprintf(stderr, "Error: Could not open file %s\n", SEED_FILE);
+ // } else {
+ // fprintf(seedfile, "ok\n\n");
+ // fclose(seedfile);
+ // }
+ // }
+ /* Error test number 10, garbage data */
+ // if (testNum == 10) {
+ // /* Test to run decoder with garbage data */
+ // for (i = 0; i < stream_len; i++) {
+ // streamdata[i] = (short) (streamdata[i] + (short) rand());
+ // }
+ // }
+
+ totalsmpls += declen;
+ totalbits += 8 * stream_len;
+#if !defined(NDEBUG)
+ kbps = ((double)sampFreqKHz * 1000.) / ((double)cur_framesmpls) * 8.0 *
+ stream_len / 1000.0; // kbits/s
+ fy = fopen("bit_rate.dat", "a");
+ fprintf(fy, "Frame %i = %0.14f\n", framecnt, kbps);
+ fclose(fy);
+
+#endif
+ }
+ printf("\n");
+ printf("total bits = %zu bits\n", totalbits);
+ printf("measured average bitrate = %0.3f kbits/s\n",
+ (double)totalbits * (sampFreqKHz) / totalsmpls);
+ if (doTransCoding) {
+ printf("Transcoding average bit-rate = %0.3f kbps\n",
+ (double)numTransCodingBytes * 8.0 * (sampFreqKHz) / totalsmpls);
+ fclose(transCodingFile);
+ }
+ printf("\n");
+
+ /* Runtime statistics */
+ runtime = (double)(clock() / (double)CLOCKS_PER_SEC - starttime);
+ length_file = length_file / (sampFreqKHz * 1000.);
+
+ printf("\n\nLength of speech file: %.1f s\n", length_file);
+ printf("Time to run iSAC: %.2f s (%.2f %% of realtime)\n\n", runtime,
+ (100 * runtime / length_file));
+
+ if (maxStreamLen30 != 0) {
+ printf(
+ "Maximum payload size 30ms Frames %zu"
+ " bytes (%0.3f kbps)\n",
+ maxStreamLen30, maxStreamLen30 * 8 / 30.);
+ }
+ if (maxStreamLen60 != 0) {
+ printf(
+ "Maximum payload size 60ms Frames %zu"
+ " bytes (%0.3f kbps)\n",
+ maxStreamLen60, maxStreamLen60 * 8 / 60.);
+ }
+ // fprintf(stderr, "\n");
+
+ fprintf(stderr, " %.1f s", length_file);
+ fprintf(stderr, " %0.1f kbps",
+ (double)totalbits * (sampFreqKHz) / totalsmpls);
+ if (maxStreamLen30 != 0) {
+ fprintf(stderr, " plmax-30ms %zu bytes (%0.0f kbps)", maxStreamLen30,
+ maxStreamLen30 * 8 / 30.);
+ }
+ if (maxStreamLen60 != 0) {
+ fprintf(stderr, " plmax-60ms %zu bytes (%0.0f kbps)", maxStreamLen60,
+ maxStreamLen60 * 8 / 60.);
+ }
+ if (doTransCoding) {
+ fprintf(stderr, " transcoding rate %.0f kbps",
+ (double)numTransCodingBytes * 8.0 * (sampFreqKHz) / totalsmpls);
+ }
+
+ fclose(inp);
+ fclose(outp);
+ WebRtcIsac_Free(ISAC_main_inst);
+
+ exit(0);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
new file mode 100644
index 0000000000..549163fc44
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// SwitchingSampRate.cpp : Defines the entry point for the console
+// application.
+//
+
+#include <iostream>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "modules/audio_coding/codecs/isac/main/util/utility.h"
+
+#define MAX_FILE_NAME 500
+#define MAX_NUM_CLIENTS 2
+
+#define NUM_CLIENTS 2
+
+int main(int argc, char* argv[]) {
+ char fileNameWB[MAX_FILE_NAME];
+ char fileNameSWB[MAX_FILE_NAME];
+
+ char outFileName[MAX_NUM_CLIENTS][MAX_FILE_NAME];
+
+ FILE* inFile[MAX_NUM_CLIENTS];
+ FILE* outFile[MAX_NUM_CLIENTS];
+
+ ISACStruct* codecInstance[MAX_NUM_CLIENTS];
+ int32_t resamplerState[MAX_NUM_CLIENTS][8];
+
+ int encoderSampRate[MAX_NUM_CLIENTS];
+
+ int minBn = 16000;
+ int maxBn = 56000;
+
+ int bnWB = 32000;
+ int bnSWB = 56000;
+
+ strcpy(outFileName[0], "switchSampRate_out1.pcm");
+ strcpy(outFileName[1], "switchSampRate_out2.pcm");
+
+ short clientCntr;
+
+ size_t lenEncodedInBytes[MAX_NUM_CLIENTS];
+ unsigned int lenAudioIn10ms[MAX_NUM_CLIENTS];
+ size_t lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
+ unsigned int lenAudioIn10msTmp[MAX_NUM_CLIENTS];
+ BottleNeckModel* packetData[MAX_NUM_CLIENTS];
+
+ char versionNumber[100];
+ short samplesIn10ms[MAX_NUM_CLIENTS];
+ int bottleneck[MAX_NUM_CLIENTS];
+
+ printf("\n\n");
+ printf("____________________________________________\n\n");
+ WebRtcIsac_version(versionNumber);
+ printf(" iSAC-swb version %s\n", versionNumber);
+ printf("____________________________________________\n");
+
+ fileNameWB[0] = '\0';
+ fileNameSWB[0] = '\0';
+
+ char myFlag[20];
+ strcpy(myFlag, "-wb");
+ // READ THE WIDEBAND AND SUPER-WIDEBAND FILE NAMES
+ if (readParamString(argc, argv, myFlag, fileNameWB, MAX_FILE_NAME) <= 0) {
+ printf("No wideband file is specified");
+ }
+
+ strcpy(myFlag, "-swb");
+ if (readParamString(argc, argv, myFlag, fileNameSWB, MAX_FILE_NAME) <= 0) {
+ printf("No super-wideband file is specified");
+ }
+
+ // THE FIRST CLIENT STARTS IN WIDEBAND
+ encoderSampRate[0] = 16000;
+ OPEN_FILE_RB(inFile[0], fileNameWB);
+
+ // THE SECOND CLIENT STARTS IN SUPER-WIDEBAND
+ encoderSampRate[1] = 32000;
+ OPEN_FILE_RB(inFile[1], fileNameSWB);
+
+ strcpy(myFlag, "-I");
+ short codingMode = readSwitch(argc, argv, myFlag);
+
+ for (clientCntr = 0; clientCntr < NUM_CLIENTS; clientCntr++) {
+ codecInstance[clientCntr] = NULL;
+
+ printf("\n");
+ printf("Client %d\n", clientCntr + 1);
+ printf("---------\n");
+ printf("Starting %s", (encoderSampRate[clientCntr] == 16000)
+ ? "wideband"
+ : "super-wideband");
+
+ // Open output File Name
+ OPEN_FILE_WB(outFile[clientCntr], outFileName[clientCntr]);
+ printf("Output File...................... %s\n", outFileName[clientCntr]);
+
+ samplesIn10ms[clientCntr] = encoderSampRate[clientCntr] * 10;
+
+ if (codingMode == 1) {
+ bottleneck[clientCntr] = (clientCntr) ? bnSWB : bnWB;
+ } else {
+ bottleneck[clientCntr] = (clientCntr) ? minBn : maxBn;
+ }
+
+ printf("Bottleneck....................... %0.3f kbits/sec \n",
+ bottleneck[clientCntr] / 1000.0);
+
+ // coding-mode
+ printf(
+ "Encoding Mode.................... %s\n",
+ (codingMode == 1) ? "Channel-Independent (Instantaneous)" : "Adaptive");
+
+ lenEncodedInBytes[clientCntr] = 0;
+ lenAudioIn10ms[clientCntr] = 0;
+ lenEncodedInBytesTmp[clientCntr] = 0;
+ lenAudioIn10msTmp[clientCntr] = 0;
+
+ packetData[clientCntr] = (BottleNeckModel*)new (BottleNeckModel);
+ if (packetData[clientCntr] == NULL) {
+ printf("Could not allocate memory for packetData \n");
+ return -1;
+ }
+ memset(packetData[clientCntr], 0, sizeof(BottleNeckModel));
+ memset(resamplerState[clientCntr], 0, sizeof(int32_t) * 8);
+ }
+
+ for (clientCntr = 0; clientCntr < NUM_CLIENTS; clientCntr++) {
+ // Create
+ if (WebRtcIsac_Create(&codecInstance[clientCntr])) {
+ printf("Could not creat client %d\n", clientCntr + 1);
+ return -1;
+ }
+
+ WebRtcIsac_SetEncSampRate(codecInstance[clientCntr],
+ encoderSampRate[clientCntr]);
+
+ WebRtcIsac_SetDecSampRate(
+ codecInstance[clientCntr],
+ encoderSampRate[clientCntr + (1 - ((clientCntr & 1) << 1))]);
+
+ // Initialize Encoder
+ if (WebRtcIsac_EncoderInit(codecInstance[clientCntr], codingMode) < 0) {
+ printf("Could not initialize client, %d\n", clientCntr + 1);
+ return -1;
+ }
+
+ WebRtcIsac_DecoderInit(codecInstance[clientCntr]);
+
+ // setup Rate if in Instantaneous mode
+ if (codingMode != 0) {
+ // ONLY Clients who are not in Adaptive mode
+ if (WebRtcIsac_Control(codecInstance[clientCntr], bottleneck[clientCntr],
+ 30) < 0) {
+ printf("Could not setup bottleneck and frame-size for client %d\n",
+ clientCntr + 1);
+ return -1;
+ }
+ }
+ }
+
+ size_t streamLen;
+ short numSamplesRead;
+ size_t lenDecodedAudio;
+ short senderIdx;
+ short receiverIdx;
+
+ printf("\n");
+ short num10ms[MAX_NUM_CLIENTS];
+ memset(num10ms, 0, sizeof(short) * MAX_NUM_CLIENTS);
+ FILE* arrivalTimeFile1 = fopen("arrivalTime1.dat", "wb");
+ FILE* arrivalTimeFile2 = fopen("arrivalTime2.dat", "wb");
+ short numPrint[MAX_NUM_CLIENTS];
+ memset(numPrint, 0, sizeof(short) * MAX_NUM_CLIENTS);
+
+ // Audio Buffers
+ short silence10ms[10 * 32];
+ memset(silence10ms, 0, 320 * sizeof(short));
+ short audioBuff10ms[10 * 32];
+ short audioBuff60ms[60 * 32];
+ short resampledAudio60ms[60 * 32];
+
+ unsigned short bitStream[600 + 600];
+ short speechType[1];
+
+ short numSampFreqChanged = 0;
+ while (numSampFreqChanged < 10) {
+ for (clientCntr = 0; clientCntr < NUM_CLIENTS; clientCntr++) {
+ // Encoding/decoding for this pair of clients, if there is
+ // audio for any of them
+ // if(audioLeft[clientCntr] || audioLeft[clientCntr + 1])
+ //{
+ // for(pairCntr = 0; pairCntr < 2; pairCntr++)
+ //{
+ senderIdx = clientCntr; // + pairCntr;
+ receiverIdx = 1 - clientCntr; // + (1 - pairCntr);
+
+ // if(num10ms[senderIdx] > 6)
+ //{
+ // printf("Too many frames read for client %d",
+ // senderIdx + 1);
+ // return -1;
+ //}
+
+ numSamplesRead =
+ (short)fread(audioBuff10ms, sizeof(short), samplesIn10ms[senderIdx],
+ inFile[senderIdx]);
+ if (numSamplesRead != samplesIn10ms[senderIdx]) {
+ // file finished switch encoder sampling frequency.
+ printf("Changing Encoder Sampling frequency in client %d to ",
+ senderIdx + 1);
+ fclose(inFile[senderIdx]);
+ numSampFreqChanged++;
+ if (encoderSampRate[senderIdx] == 16000) {
+ printf("super-wideband.\n");
+ OPEN_FILE_RB(inFile[senderIdx], fileNameSWB);
+ encoderSampRate[senderIdx] = 32000;
+ } else {
+ printf("wideband.\n");
+ OPEN_FILE_RB(inFile[senderIdx], fileNameWB);
+ encoderSampRate[senderIdx] = 16000;
+ }
+ WebRtcIsac_SetEncSampRate(codecInstance[senderIdx],
+ encoderSampRate[senderIdx]);
+ WebRtcIsac_SetDecSampRate(codecInstance[receiverIdx],
+ encoderSampRate[senderIdx]);
+
+ samplesIn10ms[clientCntr] = encoderSampRate[clientCntr] * 10;
+
+ numSamplesRead =
+ (short)fread(audioBuff10ms, sizeof(short), samplesIn10ms[senderIdx],
+ inFile[senderIdx]);
+ if (numSamplesRead != samplesIn10ms[senderIdx]) {
+ printf(" File %s for client %d has not enough audio\n",
+ (encoderSampRate[senderIdx] == 16000) ? "wideband"
+ : "super-wideband",
+ senderIdx + 1);
+ return -1;
+ }
+ }
+ num10ms[senderIdx]++;
+
+ // sanity check
+ // if(num10ms[senderIdx] > 6)
+ //{
+ // printf("Client %d has got more than 60 ms audio and encoded no
+ // packet.\n",
+ // senderIdx);
+ // return -1;
+ //}
+
+ // Encode
+
+ int streamLen_int = WebRtcIsac_Encode(codecInstance[senderIdx],
+ audioBuff10ms, (uint8_t*)bitStream);
+ int16_t ggg;
+ if (streamLen_int > 0) {
+ if ((WebRtcIsac_ReadFrameLen(
+ codecInstance[receiverIdx],
+ reinterpret_cast<const uint8_t*>(bitStream), &ggg)) < 0)
+ printf("ERROR\n");
+ }
+
+ // Sanity check
+ if (streamLen_int < 0) {
+ printf(" Encoder error in client %d \n", senderIdx + 1);
+ return -1;
+ }
+ streamLen = static_cast<size_t>(streamLen_int);
+
+ if (streamLen > 0) {
+ // Packet generated; model sending through a channel, do bandwidth
+ // estimation at the receiver and decode.
+ lenEncodedInBytes[senderIdx] += streamLen;
+ lenAudioIn10ms[senderIdx] += (unsigned int)num10ms[senderIdx];
+ lenEncodedInBytesTmp[senderIdx] += streamLen;
+ lenAudioIn10msTmp[senderIdx] += (unsigned int)num10ms[senderIdx];
+
+ // Print after ~5 sec.
+ if (lenAudioIn10msTmp[senderIdx] >= 100) {
+ numPrint[senderIdx]++;
+ printf(" %d, %6.3f => %6.3f ", senderIdx + 1,
+ bottleneck[senderIdx] / 1000.0,
+ lenEncodedInBytesTmp[senderIdx] * 0.8 /
+ lenAudioIn10msTmp[senderIdx]);
+
+ if (codingMode == 0) {
+ int32_t bn;
+ WebRtcIsac_GetUplinkBw(codecInstance[senderIdx], &bn);
+ printf("[%d] ", bn);
+ }
+ // int16_t rateIndexLB;
+ // int16_t rateIndexUB;
+ // WebRtcIsac_GetDownLinkBwIndex(codecInstance[receiverIdx],
+ // &rateIndexLB, &rateIndexUB);
+ // printf(" (%2d, %2d) ", rateIndexLB, rateIndexUB);
+
+ std::cout << std::flush;
+ lenEncodedInBytesTmp[senderIdx] = 0;
+ lenAudioIn10msTmp[senderIdx] = 0;
+ // if(senderIdx == (NUM_CLIENTS - 1))
+ //{
+ printf(" %0.1f \n", lenAudioIn10ms[senderIdx] * 10. / 1000);
+ //}
+
+ // After ~20 sec change the bottleneck.
+ // if((numPrint[senderIdx] == 4) && (codingMode == 0))
+ // {
+ // numPrint[senderIdx] = 0;
+ // if(codingMode == 0)
+ // {
+ // int newBottleneck = bottleneck[senderIdx] +
+ // (bottleneckChange[senderIdx] * 1000);
+
+ // if(bottleneckChange[senderIdx] > 0)
+ // {
+ // if(newBottleneck >maxBn)
+ // {
+ // bottleneckChange[senderIdx] = -1;
+ // newBottleneck = bottleneck[senderIdx] +
+ // (bottleneckChange[senderIdx] * 1000);
+ // if(newBottleneck > minBn)
+ // {
+ // bottleneck[senderIdx] = newBottleneck;
+ // }
+ // }
+ // else
+ // {
+ // bottleneck[senderIdx] = newBottleneck;
+ // }
+ // }
+ // else
+ // {
+ // if(newBottleneck < minBn)
+ // {
+ // bottleneckChange[senderIdx] = 1;
+ // newBottleneck = bottleneck[senderIdx] +
+ // (bottleneckChange[senderIdx] * 1000);
+ // if(newBottleneck < maxBn)
+ // {
+ // bottleneck[senderIdx] = newBottleneck;
+ // }
+ // }
+ // else
+ // {
+ // bottleneck[senderIdx] = newBottleneck;
+ // }
+ // }
+ // }
+ // }
+ }
+
+ // model a channel of given bottleneck, to get the receive timestamp
+ get_arrival_time(num10ms[senderIdx] * samplesIn10ms[senderIdx],
+ streamLen, bottleneck[senderIdx],
+ packetData[senderIdx],
+ encoderSampRate[senderIdx] * 1000,
+ encoderSampRate[senderIdx] * 1000);
+
+ // Write the arrival time.
+ if (senderIdx == 0) {
+ if (fwrite(&(packetData[senderIdx]->arrival_time),
+ sizeof(unsigned int), 1, arrivalTimeFile1) != 1) {
+ return -1;
+ }
+ } else {
+ if (fwrite(&(packetData[senderIdx]->arrival_time),
+ sizeof(unsigned int), 1, arrivalTimeFile2) != 1) {
+ return -1;
+ }
+ }
+
+ // BWE
+ if (WebRtcIsac_UpdateBwEstimate(
+ codecInstance[receiverIdx],
+ reinterpret_cast<const uint8_t*>(bitStream), streamLen,
+ packetData[senderIdx]->rtp_number,
+ packetData[senderIdx]->sample_count,
+ packetData[senderIdx]->arrival_time) < 0) {
+ printf(" BWE Error at client %d \n", receiverIdx + 1);
+ return -1;
+ }
+ /**/
+ // Decode
+ int lenDecodedAudio_int =
+ WebRtcIsac_Decode(codecInstance[receiverIdx],
+ reinterpret_cast<const uint8_t*>(bitStream),
+ streamLen, audioBuff60ms, speechType);
+ if (lenDecodedAudio_int < 0) {
+ printf(" Decoder error in client %d \n", receiverIdx + 1);
+ return -1;
+ }
+ lenDecodedAudio = static_cast<size_t>(lenDecodedAudio_int);
+
+ if (encoderSampRate[senderIdx] == 16000) {
+ WebRtcSpl_UpsampleBy2(audioBuff60ms, lenDecodedAudio,
+ resampledAudio60ms,
+ resamplerState[receiverIdx]);
+ if (fwrite(resampledAudio60ms, sizeof(short), lenDecodedAudio << 1,
+ outFile[receiverIdx]) != lenDecodedAudio << 1) {
+ return -1;
+ }
+ } else {
+ if (fwrite(audioBuff60ms, sizeof(short), lenDecodedAudio,
+ outFile[receiverIdx]) != lenDecodedAudio) {
+ return -1;
+ }
+ }
+ num10ms[senderIdx] = 0;
+ }
+ //}
+ //}
+ }
+ }
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
new file mode 100644
index 0000000000..4446ff7806
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* kenny.c - Main function for the iSAC coder */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef WIN32
+#include "windows.h"
+#ifndef CLOCKS_PER_SEC
+#define CLOCKS_PER_SEC 1000
+#endif
+#endif
+
+#include <math.h>
+
+/* include API */
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "modules/audio_coding/codecs/isac/main/util/utility.h"
+
+/* max number of samples per frame (= 60 ms frame) */
+#define MAX_FRAMESAMPLES_SWB 1920
+/* number of samples per 10ms frame */
+#define FRAMESAMPLES_SWB_10ms 320
+#define FRAMESAMPLES_WB_10ms 160
+
+/* sampling frequency (Hz) */
+#define FS_SWB 32000
+#define FS_WB 16000
+
+unsigned long framecnt = 0;
+
+int main(int argc, char* argv[]) {
+ //--- File IO ----
+ FILE* inp;
+ FILE* outp;
+ char inname[500];
+ char outname[500];
+
+ /* Runtime statistics */
+ double rate;
+ double rateRCU;
+ size_t totalbits = 0;
+ unsigned long totalBitsRCU = 0;
+ unsigned long totalsmpls = 0;
+
+ int32_t bottleneck = 39;
+ int frameSize = 30; /* ms */
+ int16_t codingMode = 1;
+ int16_t shortdata[FRAMESAMPLES_SWB_10ms];
+ int16_t decoded[MAX_FRAMESAMPLES_SWB];
+ int16_t speechType[1];
+ int16_t payloadLimit;
+ int32_t rateLimit;
+ ISACStruct* ISAC_main_inst;
+
+ size_t stream_len = 0;
+ int declen = 0;
+ int16_t err;
+ int cur_framesmpls;
+ int endfile;
+#ifdef WIN32
+ double length_file;
+ double runtime;
+ char outDrive[10];
+ char outPath[500];
+ char outPrefix[500];
+ char outSuffix[500];
+ char bitrateFileName[500];
+ FILE* bitrateFile;
+ double starttime;
+ double rateLB = 0;
+ double rateUB = 0;
+#endif
+ FILE* histFile;
+ FILE* averageFile;
+ int sampFreqKHz;
+ int samplesIn10Ms;
+ size_t maxStreamLen = 0;
+ char histFileName[500];
+ char averageFileName[500];
+ unsigned int hist[600];
+ double tmpSumStreamLen = 0;
+ unsigned int packetCntr = 0;
+ unsigned int lostPacketCntr = 0;
+ uint8_t payload[1200];
+ uint8_t payloadRCU[1200];
+ uint16_t packetLossPercent = 0;
+ int16_t rcuStreamLen = 0;
+ int onlyEncode;
+ int onlyDecode;
+
+ BottleNeckModel packetData;
+ packetData.arrival_time = 0;
+ packetData.sample_count = 0;
+ packetData.rtp_number = 0;
+ memset(hist, 0, sizeof(hist));
+
+ /* handling wrong input arguments in the command line */
+ if (argc < 5) {
+ printf("\n\nWrong number of arguments or flag values.\n\n");
+
+ printf("Usage:\n\n");
+ printf("%s infile outfile -bn bottleneck [options]\n\n", argv[0]);
+ printf("with:\n");
+ printf("-I.............. indicates encoding in instantaneous mode.\n");
+ printf("-bn bottleneck.. the value of the bottleneck in bit/sec, e.g.\n");
+ printf(" 39742, in instantaneous (channel-independent)\n");
+ printf(" mode.\n\n");
+ printf("infile.......... Normal speech input file\n\n");
+ printf("outfile......... Speech output file\n\n");
+ printf("OPTIONS\n");
+ printf("-------\n");
+ printf("-fs sampFreq.... sampling frequency of codec 16 or 32 (default)\n");
+ printf(" kHz.\n");
+ printf("-plim payloadLim payload limit in bytes, default is the maximum\n");
+ printf(" possible.\n");
+ printf("-rlim rateLim... rate limit in bits/sec, default is the maximum\n");
+ printf(" possible.\n");
+ printf("-h file......... record histogram and *append* to 'file'.\n");
+ printf("-ave file....... record average rate of 3 sec intervales and\n");
+ printf(" *append* to 'file'.\n");
+ printf("-ploss.......... packet-loss percentage.\n");
+ printf("-enc............ do only encoding and store the bit-stream\n");
+ printf("-dec............ the input file is a bit-stream, decode it.\n\n");
+ printf("Example usage:\n\n");
+ printf("%s speechIn.pcm speechOut.pcm -B 40000 -fs 32\n\n", argv[0]);
+
+ exit(0);
+ }
+
+ /* Get Bottleneck value */
+ bottleneck = readParamInt(argc, argv, "-bn", 50000);
+ fprintf(stderr, "\nfixed bottleneck rate of %d bits/s\n\n", bottleneck);
+
+ /* Get Input and Output files */
+ sscanf(argv[1], "%s", inname);
+ sscanf(argv[2], "%s", outname);
+ codingMode = readSwitch(argc, argv, "-I");
+ sampFreqKHz = (int16_t)readParamInt(argc, argv, "-fs", 32);
+ if (readParamString(argc, argv, "-h", histFileName, 500) > 0) {
+ histFile = fopen(histFileName, "a");
+ if (histFile == NULL) {
+ printf("cannot open hist file %s", histFileName);
+ exit(0);
+ }
+ } else {
+ // NO recording of hitstogram
+ histFile = NULL;
+ }
+
+ packetLossPercent = readParamInt(argc, argv, "-ploss", 0);
+
+ if (readParamString(argc, argv, "-ave", averageFileName, 500) > 0) {
+ averageFile = fopen(averageFileName, "a");
+ if (averageFile == NULL) {
+ printf("cannot open file to write rate %s", averageFileName);
+ exit(0);
+ }
+ } else {
+ averageFile = NULL;
+ }
+
+ onlyEncode = readSwitch(argc, argv, "-enc");
+ onlyDecode = readSwitch(argc, argv, "-dec");
+
+ switch (sampFreqKHz) {
+ case 16: {
+ samplesIn10Ms = 160;
+ break;
+ }
+ case 32: {
+ samplesIn10Ms = 320;
+ break;
+ }
+ default:
+ printf("A sampling frequency of %d kHz is not supported, valid values are"
+ " 8 and 16.\n", sampFreqKHz);
+ exit(-1);
+ }
+ payloadLimit = (int16_t)readParamInt(argc, argv, "-plim", 400);
+ rateLimit = readParamInt(argc, argv, "-rlim", 106800);
+
+ if ((inp = fopen(inname, "rb")) == NULL) {
+ printf(" iSAC: Cannot read file %s.\n", inname);
+ exit(1);
+ }
+ if ((outp = fopen(outname, "wb")) == NULL) {
+ printf(" iSAC: Cannot write file %s.\n", outname);
+ exit(1);
+ }
+
+#ifdef WIN32
+ _splitpath(outname, outDrive, outPath, outPrefix, outSuffix);
+ _makepath(bitrateFileName, outDrive, outPath, "bitrate", ".txt");
+
+ bitrateFile = fopen(bitrateFileName, "a");
+ fprintf(bitrateFile, "%% %s \n", inname);
+#endif
+
+ printf("\n");
+ printf("Input.................... %s\n", inname);
+ printf("Output................... %s\n", outname);
+ printf("Encoding Mode............ %s\n",
+ (codingMode == 1) ? "Channel-Independent" : "Channel-Adaptive");
+ printf("Bottleneck............... %d bits/sec\n", bottleneck);
+ printf("Packet-loss Percentage... %d\n", packetLossPercent);
+ printf("\n");
+
+#ifdef WIN32
+ starttime = clock() / (double)CLOCKS_PER_SEC; /* Runtime statistics */
+#endif
+
+ /* Initialize the ISAC and BN structs */
+ err = WebRtcIsac_Create(&ISAC_main_inst);
+
+ WebRtcIsac_SetEncSampRate(ISAC_main_inst, sampFreqKHz * 1000);
+ WebRtcIsac_SetDecSampRate(ISAC_main_inst, sampFreqKHz >= 32 ? 32000 : 16000);
+ /* Error check */
+ if (err < 0) {
+ fprintf(stderr, "\n\n Error in create.\n\n");
+ exit(EXIT_FAILURE);
+ }
+
+ framecnt = 0;
+ endfile = 0;
+
+ /* Initialize encoder and decoder */
+ if (WebRtcIsac_EncoderInit(ISAC_main_inst, codingMode) < 0) {
+ printf("cannot initialize encoder\n");
+ return -1;
+ }
+ WebRtcIsac_DecoderInit(ISAC_main_inst);
+
+ if (codingMode == 1) {
+ if (WebRtcIsac_Control(ISAC_main_inst, bottleneck, frameSize) < 0) {
+ printf("cannot set bottleneck\n");
+ return -1;
+ }
+ } else {
+ if (WebRtcIsac_ControlBwe(ISAC_main_inst, 15000, 30, 1) < 0) {
+ printf("cannot configure BWE\n");
+ return -1;
+ }
+ }
+
+ if (WebRtcIsac_SetMaxPayloadSize(ISAC_main_inst, payloadLimit) < 0) {
+ printf("cannot set maximum payload size %d.\n", payloadLimit);
+ return -1;
+ }
+
+ if (rateLimit < 106800) {
+ if (WebRtcIsac_SetMaxRate(ISAC_main_inst, rateLimit) < 0) {
+ printf("cannot set the maximum rate %d.\n", rateLimit);
+ return -1;
+ }
+ }
+
+ while (endfile == 0) {
+ fprintf(stderr, " \rframe = %7li", framecnt);
+
+ //============== Readind from the file and encoding =================
+ cur_framesmpls = 0;
+ stream_len = 0;
+
+ if (onlyDecode) {
+ uint8_t auxUW8;
+ if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
+ break;
+ }
+ stream_len = auxUW8 << 8;
+ if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
+ break;
+ }
+ stream_len |= auxUW8;
+ if (fread(payload, 1, stream_len, inp) < stream_len) {
+ printf("last payload is corrupted\n");
+ break;
+ }
+ } else {
+ while (stream_len == 0) {
+ int stream_len_int;
+
+ // Read 10 ms speech block
+ endfile = readframe(shortdata, inp, samplesIn10Ms);
+ if (endfile) {
+ break;
+ }
+ cur_framesmpls += samplesIn10Ms;
+
+ //-------- iSAC encoding ---------
+ stream_len_int = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
+
+ if (stream_len_int < 0) {
+ // exit if returned with error
+ // errType=WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ fprintf(stderr, "\nError in encoder\n");
+ getc(stdin);
+ exit(EXIT_FAILURE);
+ }
+ stream_len = (size_t)stream_len_int;
+ }
+ //===================================================================
+ if (endfile) {
+ break;
+ }
+
+ rcuStreamLen = WebRtcIsac_GetRedPayload(ISAC_main_inst, payloadRCU);
+ if (rcuStreamLen < 0) {
+ fprintf(stderr, "\nError getting RED payload\n");
+ getc(stdin);
+ exit(EXIT_FAILURE);
+ }
+
+ get_arrival_time(cur_framesmpls, stream_len, bottleneck, &packetData,
+ sampFreqKHz * 1000, sampFreqKHz * 1000);
+ if (WebRtcIsac_UpdateBwEstimate(
+ ISAC_main_inst, payload, stream_len, packetData.rtp_number,
+ packetData.sample_count, packetData.arrival_time) < 0) {
+ printf(" BWE Error at client\n");
+ return -1;
+ }
+ }
+
+ if (endfile) {
+ break;
+ }
+
+ maxStreamLen = (stream_len > maxStreamLen) ? stream_len : maxStreamLen;
+ packetCntr++;
+
+ hist[stream_len]++;
+ if (averageFile != NULL) {
+ tmpSumStreamLen += stream_len;
+ if (packetCntr == 100) {
+ // kbps
+ fprintf(averageFile, "%8.3f ",
+ tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
+ packetCntr = 0;
+ tmpSumStreamLen = 0;
+ }
+ }
+
+ if (onlyEncode) {
+ uint8_t auxUW8;
+ auxUW8 = (uint8_t)(((stream_len & 0x7F00) >> 8) & 0xFF);
+ if (fwrite(&auxUW8, sizeof(uint8_t), 1, outp) != 1) {
+ return -1;
+ }
+
+ auxUW8 = (uint8_t)(stream_len & 0xFF);
+ if (fwrite(&auxUW8, sizeof(uint8_t), 1, outp) != 1) {
+ return -1;
+ }
+ if (fwrite(payload, 1, stream_len, outp) != stream_len) {
+ return -1;
+ }
+ } else {
+ //======================= iSAC decoding ===========================
+
+ if ((rand() % 100) < packetLossPercent) {
+ declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU,
+ (size_t)rcuStreamLen, decoded,
+ speechType);
+ lostPacketCntr++;
+ } else {
+ declen = WebRtcIsac_Decode(ISAC_main_inst, payload, stream_len, decoded,
+ speechType);
+ }
+ if (declen <= 0) {
+ // errType=WebRtcIsac_GetErrorCode(ISAC_main_inst);
+ fprintf(stderr, "\nError in decoder.\n");
+ getc(stdin);
+ exit(1);
+ }
+
+ // Write decoded speech frame to file
+ if (fwrite(decoded, sizeof(int16_t), declen, outp) != (size_t)declen) {
+ return -1;
+ }
+ cur_framesmpls = declen;
+ }
+ // Update Statistics
+ framecnt++;
+ totalsmpls += cur_framesmpls;
+ if (stream_len > 0) {
+ totalbits += 8 * stream_len;
+ }
+ if (rcuStreamLen > 0) {
+ totalBitsRCU += 8 * rcuStreamLen;
+ }
+ }
+
+ rate = ((double)totalbits * (sampFreqKHz)) / (double)totalsmpls;
+ rateRCU = ((double)totalBitsRCU * (sampFreqKHz)) / (double)totalsmpls;
+
+ printf("\n\n");
+ printf("Sampling Rate............... %d kHz\n", sampFreqKHz);
+ printf("Payload Limit............... %d bytes \n", payloadLimit);
+ printf("Rate Limit.................. %d bits/sec \n", rateLimit);
+
+#ifdef WIN32
+ fprintf(bitrateFile, "%d %10lu %d %6.3f %6.3f %6.3f\n",
+ sampFreqKHz, framecnt, bottleneck, rateLB, rateUB, rate);
+ fclose(bitrateFile);
+#endif // WIN32
+
+ printf("\n");
+ printf("Measured bit-rate........... %0.3f kbps\n", rate);
+ printf("Measured RCU bit-ratre...... %0.3f kbps\n", rateRCU);
+ printf("Maximum bit-rate/payloadsize %0.3f / %zu\n",
+ maxStreamLen * 8 / 0.03, maxStreamLen);
+ printf("Measured packet-loss........ %0.1f%% \n",
+ 100.0f * (float)lostPacketCntr / (float)packetCntr);
+
+ printf("\n");
+
+/* Runtime statistics */
+#ifdef WIN32
+ runtime = (double)(clock() / (double)CLOCKS_PER_SEC - starttime);
+ length_file = ((double)framecnt * (double)declen / (sampFreqKHz * 1000));
+ printf("Length of speech file....... %.1f s\n", length_file);
+ printf("Time to run iSAC............ %.2f s (%.2f %% of realtime)\n\n",
+ runtime, (100 * runtime / length_file));
+#endif
+ printf("\n\n_______________________________________________\n");
+
+ if (histFile != NULL) {
+ int n;
+ for (n = 0; n < 600; n++) {
+ fprintf(histFile, "%6d ", hist[n]);
+ }
+ fprintf(histFile, "\n");
+ fclose(histFile);
+ }
+ if (averageFile != NULL) {
+ if (packetCntr > 0) {
+ fprintf(averageFile, "%8.3f ",
+ tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
+ }
+ fprintf(averageFile, "\n");
+ fclose(averageFile);
+ }
+
+ fclose(inp);
+ fclose(outp);
+
+ WebRtcIsac_Free(ISAC_main_inst);
+
+ exit(0);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.c b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.c
new file mode 100644
index 0000000000..56547b11c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/util/utility.h"
+
+/* function for reading audio data from PCM file */
+int
+readframe(
+ short* data,
+ FILE* inp,
+ int length)
+{
+ short k, rlen, status = 0;
+ unsigned char* ptrUChar;
+ ptrUChar = (unsigned char*)data;
+
+ rlen = (short)fread(data, sizeof(short), length, inp);
+ if (rlen < length) {
+ for (k = rlen; k < length; k++)
+ data[k] = 0;
+ status = 1;
+ }
+
+ // Assuming that our PCM files are written in Intel machines
+ for(k = 0; k < length; k++)
+ {
+ data[k] = (short)ptrUChar[k<<1] | ((((short)ptrUChar[(k<<1) + 1]) << 8) & 0xFF00);
+ }
+
+ return status;
+}
+
+short
+readSwitch(
+ int argc,
+ char* argv[],
+ char* strID)
+{
+ short n;
+ for(n = 0; n < argc; n++)
+ {
+ if(strcmp(argv[n], strID) == 0)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+double
+readParamDouble(
+ int argc,
+ char* argv[],
+ char* strID,
+ double defaultVal)
+{
+ double returnVal = defaultVal;
+ short n;
+ for(n = 0; n < argc; n++)
+ {
+ if(strcmp(argv[n], strID) == 0)
+ {
+ n++;
+ if(n < argc)
+ {
+ returnVal = atof(argv[n]);
+ }
+ break;
+ }
+ }
+ return returnVal;
+}
+
+int
+readParamInt(
+ int argc,
+ char* argv[],
+ char* strID,
+ int defaultVal)
+{
+ int returnVal = defaultVal;
+ short n;
+ for(n = 0; n < argc; n++)
+ {
+ if(strcmp(argv[n], strID) == 0)
+ {
+ n++;
+ if(n < argc)
+ {
+ returnVal = atoi(argv[n]);
+ }
+ break;
+ }
+ }
+ return returnVal;
+}
+
+int
+readParamString(
+ int argc,
+ char* argv[],
+ char* strID,
+ char* stringParam,
+ int maxSize)
+{
+ int paramLenght = 0;
+ short n;
+ for(n = 0; n < argc; n++)
+ {
+ if(strcmp(argv[n], strID) == 0)
+ {
+ n++;
+ if(n < argc)
+ {
+ strncpy(stringParam, argv[n], maxSize);
+ paramLenght = (int)strlen(argv[n]);
+ }
+ break;
+ }
+ }
+ return paramLenght;
+}
+
+void
+get_arrival_time(
+ int current_framesamples, /* samples */
+ size_t packet_size, /* bytes */
+ int bottleneck, /* excluding headers; bits/s */
+ BottleNeckModel* BN_data,
+ short senderSampFreqHz,
+ short receiverSampFreqHz)
+{
+ unsigned int travelTimeMs;
+ const int headerSizeByte = 35;
+
+ int headerRate;
+
+ BN_data->whenPackGeneratedMs += (current_framesamples / (senderSampFreqHz / 1000));
+
+ headerRate = headerSizeByte * 8 * senderSampFreqHz / current_framesamples; /* bits/s */
+
+ /* everything in samples */
+ BN_data->sample_count = BN_data->sample_count + current_framesamples;
+
+ //travelTimeMs = ((packet_size + HeaderSize) * 8 * sampFreqHz) /
+ // (bottleneck + HeaderRate)
+ travelTimeMs = (unsigned int)floor((double)((packet_size + headerSizeByte) * 8 * 1000)
+ / (double)(bottleneck + headerRate) + 0.5);
+
+ if(BN_data->whenPrevPackLeftMs > BN_data->whenPackGeneratedMs)
+ {
+ BN_data->whenPrevPackLeftMs += travelTimeMs;
+ }
+ else
+ {
+ BN_data->whenPrevPackLeftMs = BN_data->whenPackGeneratedMs +
+ travelTimeMs;
+ }
+
+ BN_data->arrival_time = (BN_data->whenPrevPackLeftMs *
+ (receiverSampFreqHz / 1000));
+
+// if (BN_data->arrival_time < BN_data->sample_count)
+// BN_data->arrival_time = BN_data->sample_count;
+
+ BN_data->rtp_number++;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.h b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.h
new file mode 100644
index 0000000000..1acc54251b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/util/utility.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_UTIL_UTILITY_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_UTIL_UTILITY_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define OPEN_FILE_WB(filePtr, fullPath) \
+ do { \
+ if (fullPath != NULL) { \
+ filePtr = fopen(fullPath, "wb"); \
+ if (filePtr == NULL) { \
+ printf("could not open %s to write to.", fullPath); \
+ return -1; \
+ } \
+ } else { \
+ filePtr = NULL; \
+ } \
+ } while (0)
+
+#define OPEN_FILE_AB(filePtr, fullPath) \
+ do { \
+ if (fullPath != NULL) { \
+ filePtr = fopen(fullPath, "ab"); \
+ if (filePtr == NULL) { \
+ printf("could not open %s to write to.", fullPath); \
+ return -1; \
+ } \
+ } else { \
+ filePtr = NULL; \
+ } \
+ } while (0)
+
+#define OPEN_FILE_RB(filePtr, fullPath) \
+ do { \
+ if (fullPath != NULL) { \
+ filePtr = fopen(fullPath, "rb"); \
+ if (filePtr == NULL) { \
+ printf("could not open %s to read from.", fullPath); \
+ return -1; \
+ } \
+ } else { \
+ filePtr = NULL; \
+ } \
+ } while (0)
+
+#define WRITE_FILE_D(bufferPtr, len, filePtr) \
+ do { \
+ if (filePtr != NULL) { \
+ double dummy[1000]; \
+ int cntr; \
+ for (cntr = 0; cntr < (len); cntr++) { \
+ dummy[cntr] = (double)bufferPtr[cntr]; \
+ } \
+ fwrite(dummy, sizeof(double), len, filePtr); \
+ fflush(filePtr); \
+ } \
+ } while (0)
+
+typedef struct {
+ unsigned int whenPackGeneratedMs;
+ unsigned int whenPrevPackLeftMs;
+ unsigned int sendTimeMs; /* milisecond */
+ unsigned int arrival_time; /* samples */
+ unsigned int sample_count; /* samples, also used as "send time stamp" */
+ unsigned int rtp_number;
+} BottleNeckModel;
+
+void get_arrival_time(int current_framesamples, /* samples */
+ size_t packet_size, /* bytes */
+ int bottleneck, /* excluding headers; bits/s */
+ BottleNeckModel* BN_data,
+ short senderSampFreqHz,
+ short receiverSampFreqHz);
+
+/* function for reading audio data from PCM file */
+int readframe(short* data, FILE* inp, int length);
+
+short readSwitch(int argc, char* argv[], char* strID);
+
+double readParamDouble(int argc, char* argv[], char* strID, double defaultVal);
+
+int readParamInt(int argc, char* argv[], char* strID, int defaultVal);
+
+int readParamString(int argc,
+ char* argv[],
+ char* strID,
+ char* stringParam,
+ int maxSize);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc b/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
new file mode 100644
index 0000000000..dacf325082
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+LegacyEncodedAudioFrame::LegacyEncodedAudioFrame(AudioDecoder* decoder,
+ rtc::Buffer&& payload)
+ : decoder_(decoder), payload_(std::move(payload)) {}
+
+LegacyEncodedAudioFrame::~LegacyEncodedAudioFrame() = default;
+
+size_t LegacyEncodedAudioFrame::Duration() const {
+ const int ret = decoder_->PacketDuration(payload_.data(), payload_.size());
+ return (ret < 0) ? 0 : static_cast<size_t>(ret);
+}
+
+absl::optional<AudioDecoder::EncodedAudioFrame::DecodeResult>
+LegacyEncodedAudioFrame::Decode(rtc::ArrayView<int16_t> decoded) const {
+ AudioDecoder::SpeechType speech_type = AudioDecoder::kSpeech;
+ const int ret = decoder_->Decode(
+ payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+ decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+
+ if (ret < 0)
+ return absl::nullopt;
+
+ return DecodeResult{static_cast<size_t>(ret), speech_type};
+}
+
+std::vector<AudioDecoder::ParseResult> LegacyEncodedAudioFrame::SplitBySamples(
+ AudioDecoder* decoder,
+ rtc::Buffer&& payload,
+ uint32_t timestamp,
+ size_t bytes_per_ms,
+ uint32_t timestamps_per_ms) {
+ RTC_DCHECK(payload.data());
+ std::vector<AudioDecoder::ParseResult> results;
+ size_t split_size_bytes = payload.size();
+
+ // Find a "chunk size" >= 20 ms and < 40 ms.
+ const size_t min_chunk_size = bytes_per_ms * 20;
+ if (min_chunk_size >= payload.size()) {
+ std::unique_ptr<LegacyEncodedAudioFrame> frame(
+ new LegacyEncodedAudioFrame(decoder, std::move(payload)));
+ results.emplace_back(timestamp, 0, std::move(frame));
+ } else {
+ // Reduce the split size by half as long as `split_size_bytes` is at least
+ // twice the minimum chunk size (so that the resulting size is at least as
+ // large as the minimum chunk size).
+ while (split_size_bytes >= 2 * min_chunk_size) {
+ split_size_bytes /= 2;
+ }
+
+ const uint32_t timestamps_per_chunk = static_cast<uint32_t>(
+ split_size_bytes * timestamps_per_ms / bytes_per_ms);
+ size_t byte_offset;
+ uint32_t timestamp_offset;
+ for (byte_offset = 0, timestamp_offset = 0; byte_offset < payload.size();
+ byte_offset += split_size_bytes,
+ timestamp_offset += timestamps_per_chunk) {
+ split_size_bytes =
+ std::min(split_size_bytes, payload.size() - byte_offset);
+ rtc::Buffer new_payload(payload.data() + byte_offset, split_size_bytes);
+ std::unique_ptr<LegacyEncodedAudioFrame> frame(
+ new LegacyEncodedAudioFrame(decoder, std::move(new_payload)));
+ results.emplace_back(timestamp + timestamp_offset, 0, std::move(frame));
+ }
+ }
+
+ return results;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.h b/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.h
new file mode 100644
index 0000000000..21da1367ed
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_LEGACY_ENCODED_AUDIO_FRAME_H_
+#define MODULES_AUDIO_CODING_CODECS_LEGACY_ENCODED_AUDIO_FRAME_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class LegacyEncodedAudioFrame final : public AudioDecoder::EncodedAudioFrame {
+ public:
+ LegacyEncodedAudioFrame(AudioDecoder* decoder, rtc::Buffer&& payload);
+ ~LegacyEncodedAudioFrame() override;
+
+ static std::vector<AudioDecoder::ParseResult> SplitBySamples(
+ AudioDecoder* decoder,
+ rtc::Buffer&& payload,
+ uint32_t timestamp,
+ size_t bytes_per_ms,
+ uint32_t timestamps_per_ms);
+
+ size_t Duration() const override;
+
+ absl::optional<DecodeResult> Decode(
+ rtc::ArrayView<int16_t> decoded) const override;
+
+ // For testing:
+ const rtc::Buffer& payload() const { return payload_; }
+
+ private:
+ AudioDecoder* const decoder_;
+ const rtc::Buffer payload_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_LEGACY_ENCODED_AUDIO_FRAME_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc
new file mode 100644
index 0000000000..f81aeeea80
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+enum class NetEqDecoder {
+ kDecoderPCMu,
+ kDecoderPCMa,
+ kDecoderPCMu_2ch,
+ kDecoderPCMa_2ch,
+ kDecoderPCM16B,
+ kDecoderPCM16Bwb,
+ kDecoderPCM16Bswb32kHz,
+ kDecoderPCM16Bswb48kHz,
+ kDecoderPCM16B_2ch,
+ kDecoderPCM16Bwb_2ch,
+ kDecoderPCM16Bswb32kHz_2ch,
+ kDecoderPCM16Bswb48kHz_2ch,
+ kDecoderPCM16B_5ch,
+ kDecoderG722,
+};
+
+class SplitBySamplesTest : public ::testing::TestWithParam<NetEqDecoder> {
+ protected:
+ virtual void SetUp() {
+ decoder_type_ = GetParam();
+ switch (decoder_type_) {
+ case NetEqDecoder::kDecoderPCMu:
+ case NetEqDecoder::kDecoderPCMa:
+ bytes_per_ms_ = 8;
+ samples_per_ms_ = 8;
+ break;
+ case NetEqDecoder::kDecoderPCMu_2ch:
+ case NetEqDecoder::kDecoderPCMa_2ch:
+ bytes_per_ms_ = 2 * 8;
+ samples_per_ms_ = 8;
+ break;
+ case NetEqDecoder::kDecoderG722:
+ bytes_per_ms_ = 8;
+ samples_per_ms_ = 16;
+ break;
+ case NetEqDecoder::kDecoderPCM16B:
+ bytes_per_ms_ = 16;
+ samples_per_ms_ = 8;
+ break;
+ case NetEqDecoder::kDecoderPCM16Bwb:
+ bytes_per_ms_ = 32;
+ samples_per_ms_ = 16;
+ break;
+ case NetEqDecoder::kDecoderPCM16Bswb32kHz:
+ bytes_per_ms_ = 64;
+ samples_per_ms_ = 32;
+ break;
+ case NetEqDecoder::kDecoderPCM16Bswb48kHz:
+ bytes_per_ms_ = 96;
+ samples_per_ms_ = 48;
+ break;
+ case NetEqDecoder::kDecoderPCM16B_2ch:
+ bytes_per_ms_ = 2 * 16;
+ samples_per_ms_ = 8;
+ break;
+ case NetEqDecoder::kDecoderPCM16Bwb_2ch:
+ bytes_per_ms_ = 2 * 32;
+ samples_per_ms_ = 16;
+ break;
+ case NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch:
+ bytes_per_ms_ = 2 * 64;
+ samples_per_ms_ = 32;
+ break;
+ case NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch:
+ bytes_per_ms_ = 2 * 96;
+ samples_per_ms_ = 48;
+ break;
+ case NetEqDecoder::kDecoderPCM16B_5ch:
+ bytes_per_ms_ = 5 * 16;
+ samples_per_ms_ = 8;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ }
+ size_t bytes_per_ms_;
+ int samples_per_ms_;
+ NetEqDecoder decoder_type_;
+};
+
+// Test splitting sample-based payloads.
+TEST_P(SplitBySamplesTest, PayloadSizes) {
+ constexpr uint32_t kBaseTimestamp = 0x12345678;
+ struct ExpectedSplit {
+ size_t payload_size_ms;
+ size_t num_frames;
+ // For simplicity. We only expect up to two packets per split.
+ size_t frame_sizes[2];
+ };
+ // The payloads are expected to be split as follows:
+ // 10 ms -> 10 ms
+ // 20 ms -> 20 ms
+ // 30 ms -> 30 ms
+ // 40 ms -> 20 + 20 ms
+ // 50 ms -> 25 + 25 ms
+ // 60 ms -> 30 + 30 ms
+ ExpectedSplit expected_splits[] = {{10, 1, {10}}, {20, 1, {20}},
+ {30, 1, {30}}, {40, 2, {20, 20}},
+ {50, 2, {25, 25}}, {60, 2, {30, 30}}};
+
+ for (const auto& expected_split : expected_splits) {
+ // The payload values are set to steadily increase (modulo 256), so that the
+ // resulting frames can be checked and we can be reasonably certain no
+ // sample was missed or repeated.
+ const auto generate_payload = [](size_t num_bytes) {
+ rtc::Buffer payload(num_bytes);
+ uint8_t value = 0;
+ // Allow wrap-around of value in counter below.
+ for (size_t i = 0; i != payload.size(); ++i, ++value) {
+ payload[i] = value;
+ }
+ return payload;
+ };
+
+ const auto results = LegacyEncodedAudioFrame::SplitBySamples(
+ nullptr,
+ generate_payload(expected_split.payload_size_ms * bytes_per_ms_),
+ kBaseTimestamp, bytes_per_ms_, samples_per_ms_);
+
+ EXPECT_EQ(expected_split.num_frames, results.size());
+ uint32_t expected_timestamp = kBaseTimestamp;
+ uint8_t value = 0;
+ for (size_t i = 0; i != expected_split.num_frames; ++i) {
+ const auto& result = results[i];
+ const LegacyEncodedAudioFrame* frame =
+ static_cast<const LegacyEncodedAudioFrame*>(result.frame.get());
+ const size_t length_bytes = expected_split.frame_sizes[i] * bytes_per_ms_;
+ EXPECT_EQ(length_bytes, frame->payload().size());
+ EXPECT_EQ(expected_timestamp, result.timestamp);
+ const rtc::Buffer& payload = frame->payload();
+ // Allow wrap-around of value in counter below.
+ for (size_t i = 0; i != payload.size(); ++i, ++value) {
+ ASSERT_EQ(value, payload[i]);
+ }
+
+ expected_timestamp += rtc::checked_cast<uint32_t>(
+ expected_split.frame_sizes[i] * samples_per_ms_);
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ LegacyEncodedAudioFrame,
+ SplitBySamplesTest,
+ ::testing::Values(NetEqDecoder::kDecoderPCMu,
+ NetEqDecoder::kDecoderPCMa,
+ NetEqDecoder::kDecoderPCMu_2ch,
+ NetEqDecoder::kDecoderPCMa_2ch,
+ NetEqDecoder::kDecoderG722,
+ NetEqDecoder::kDecoderPCM16B,
+ NetEqDecoder::kDecoderPCM16Bwb,
+ NetEqDecoder::kDecoderPCM16Bswb32kHz,
+ NetEqDecoder::kDecoderPCM16Bswb48kHz,
+ NetEqDecoder::kDecoderPCM16B_2ch,
+ NetEqDecoder::kDecoderPCM16Bwb_2ch,
+ NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch,
+ NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch,
+ NetEqDecoder::kDecoderPCM16B_5ch));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/DEPS b/third_party/libwebrtc/modules/audio_coding/codecs/opus/DEPS
new file mode 100644
index 0000000000..c2530726ad
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ "opus_inst\.h": [
+ "+third_party/opus",
+ ],
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.cc
new file mode 100644
index 0000000000..03c02186d0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+
+absl::optional<std::string> GetFormatParameter(const SdpAudioFormat& format,
+ absl::string_view param) {
+ auto it = format.parameters.find(std::string(param));
+ if (it == format.parameters.end())
+ return absl::nullopt;
+
+ return it->second;
+}
+
+// Parses a comma-separated string "1,2,0,6" into a std::vector<unsigned char>.
+template <>
+absl::optional<std::vector<unsigned char>> GetFormatParameter(
+ const SdpAudioFormat& format,
+ absl::string_view param) {
+ std::vector<unsigned char> result;
+ const std::string comma_separated_list =
+ GetFormatParameter(format, param).value_or("");
+ size_t pos = 0;
+ while (pos < comma_separated_list.size()) {
+ const size_t next_comma = comma_separated_list.find(',', pos);
+ const size_t distance_to_next_comma = next_comma == std::string::npos
+ ? std::string::npos
+ : (next_comma - pos);
+ auto substring_with_number =
+ comma_separated_list.substr(pos, distance_to_next_comma);
+ auto conv = rtc::StringToNumber<int>(substring_with_number);
+ if (!conv.has_value()) {
+ return absl::nullopt;
+ }
+ result.push_back(*conv);
+ pos += substring_with_number.size() + 1;
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.h
new file mode 100644
index 0000000000..5ebb51b577
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_coder_opus_common.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_CODER_OPUS_COMMON_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_CODER_OPUS_COMMON_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/audio_codecs/audio_format.h"
+#include "rtc_base/string_to_number.h"
+
+namespace webrtc {
+
+absl::optional<std::string> GetFormatParameter(const SdpAudioFormat& format,
+ absl::string_view param);
+
+template <typename T>
+absl::optional<T> GetFormatParameter(const SdpAudioFormat& format,
+ absl::string_view param) {
+ return rtc::StringToNumber<T>(GetFormatParameter(format, param).value_or(""));
+}
+
+template <>
+absl::optional<std::vector<unsigned char>> GetFormatParameter(
+ const SdpAudioFormat& format,
+ absl::string_view param);
+
+class OpusFrame : public AudioDecoder::EncodedAudioFrame {
+ public:
+ OpusFrame(AudioDecoder* decoder,
+ rtc::Buffer&& payload,
+ bool is_primary_payload)
+ : decoder_(decoder),
+ payload_(std::move(payload)),
+ is_primary_payload_(is_primary_payload) {}
+
+ size_t Duration() const override {
+ int ret;
+ if (is_primary_payload_) {
+ ret = decoder_->PacketDuration(payload_.data(), payload_.size());
+ } else {
+ ret = decoder_->PacketDurationRedundant(payload_.data(), payload_.size());
+ }
+ return (ret < 0) ? 0 : static_cast<size_t>(ret);
+ }
+
+ bool IsDtxPacket() const override { return payload_.size() <= 2; }
+
+ absl::optional<DecodeResult> Decode(
+ rtc::ArrayView<int16_t> decoded) const override {
+ AudioDecoder::SpeechType speech_type = AudioDecoder::kSpeech;
+ int ret;
+ if (is_primary_payload_) {
+ ret = decoder_->Decode(
+ payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+ decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+ } else {
+ ret = decoder_->DecodeRedundant(
+ payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+ decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+ }
+
+ if (ret < 0)
+ return absl::nullopt;
+
+ return DecodeResult{static_cast<size_t>(ret), speech_type};
+ }
+
+ private:
+ AudioDecoder* const decoder_;
+ const rtc::Buffer payload_;
+ const bool is_primary_payload_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_CODER_OPUS_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc
new file mode 100644
index 0000000000..285ea89959
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
+#include "rtc_base/string_to_number.h"
+
+namespace webrtc {
+
+std::unique_ptr<AudioDecoderMultiChannelOpusImpl>
+AudioDecoderMultiChannelOpusImpl::MakeAudioDecoder(
+ AudioDecoderMultiChannelOpusConfig config) {
+ if (!config.IsOk()) {
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+ }
+ // Fill the pointer with a working decoder through the C interface. This
+ // allocates memory.
+ OpusDecInst* dec_state = nullptr;
+ const int error = WebRtcOpus_MultistreamDecoderCreate(
+ &dec_state, config.num_channels, config.num_streams,
+ config.coupled_streams, config.channel_mapping.data());
+ if (error != 0) {
+ return nullptr;
+ }
+
+ // Pass the ownership to DecoderImpl. Not using 'make_unique' because the
+ // c-tor is private.
+ return std::unique_ptr<AudioDecoderMultiChannelOpusImpl>(
+ new AudioDecoderMultiChannelOpusImpl(dec_state, config));
+}
+
+AudioDecoderMultiChannelOpusImpl::AudioDecoderMultiChannelOpusImpl(
+ OpusDecInst* dec_state,
+ AudioDecoderMultiChannelOpusConfig config)
+ : dec_state_(dec_state), config_(config) {
+ RTC_DCHECK(dec_state);
+ WebRtcOpus_DecoderInit(dec_state_);
+}
+
+AudioDecoderMultiChannelOpusImpl::~AudioDecoderMultiChannelOpusImpl() {
+ WebRtcOpus_DecoderFree(dec_state_);
+}
+
+absl::optional<AudioDecoderMultiChannelOpusConfig>
+AudioDecoderMultiChannelOpusImpl::SdpToConfig(const SdpAudioFormat& format) {
+ AudioDecoderMultiChannelOpusConfig config;
+ config.num_channels = format.num_channels;
+ auto num_streams = GetFormatParameter<int>(format, "num_streams");
+ if (!num_streams.has_value()) {
+ return absl::nullopt;
+ }
+ config.num_streams = *num_streams;
+
+ auto coupled_streams = GetFormatParameter<int>(format, "coupled_streams");
+ if (!coupled_streams.has_value()) {
+ return absl::nullopt;
+ }
+ config.coupled_streams = *coupled_streams;
+
+ auto channel_mapping =
+ GetFormatParameter<std::vector<unsigned char>>(format, "channel_mapping");
+ if (!channel_mapping.has_value()) {
+ return absl::nullopt;
+ }
+ config.channel_mapping = *channel_mapping;
+ if (!config.IsOk()) {
+ return absl::nullopt;
+ }
+ return config;
+}
+
+std::vector<AudioDecoder::ParseResult>
+AudioDecoderMultiChannelOpusImpl::ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ std::vector<ParseResult> results;
+
+ if (PacketHasFec(payload.data(), payload.size())) {
+ const int duration =
+ PacketDurationRedundant(payload.data(), payload.size());
+ RTC_DCHECK_GE(duration, 0);
+ rtc::Buffer payload_copy(payload.data(), payload.size());
+ std::unique_ptr<EncodedAudioFrame> fec_frame(
+ new OpusFrame(this, std::move(payload_copy), false));
+ results.emplace_back(timestamp - duration, 1, std::move(fec_frame));
+ }
+ std::unique_ptr<EncodedAudioFrame> frame(
+ new OpusFrame(this, std::move(payload), true));
+ results.emplace_back(timestamp, 0, std::move(frame));
+ return results;
+}
+
+int AudioDecoderMultiChannelOpusImpl::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 48000);
+ int16_t temp_type = 1; // Default is speech.
+ int ret =
+ WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+ if (ret > 0)
+ ret *= static_cast<int>(
+ config_.num_channels); // Return total number of samples.
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+int AudioDecoderMultiChannelOpusImpl::DecodeRedundantInternal(
+ const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ if (!PacketHasFec(encoded, encoded_len)) {
+ // This packet is a RED packet.
+ return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
+ speech_type);
+ }
+
+ RTC_DCHECK_EQ(sample_rate_hz, 48000);
+ int16_t temp_type = 1; // Default is speech.
+ int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
+ &temp_type);
+ if (ret > 0)
+ ret *= static_cast<int>(
+ config_.num_channels); // Return total number of samples.
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+void AudioDecoderMultiChannelOpusImpl::Reset() {
+ WebRtcOpus_DecoderInit(dec_state_);
+}
+
+int AudioDecoderMultiChannelOpusImpl::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len);
+}
+
+int AudioDecoderMultiChannelOpusImpl::PacketDurationRedundant(
+ const uint8_t* encoded,
+ size_t encoded_len) const {
+ if (!PacketHasFec(encoded, encoded_len)) {
+ // This packet is a RED packet.
+ return PacketDuration(encoded, encoded_len);
+ }
+
+ return WebRtcOpus_FecDurationEst(encoded, encoded_len, 48000);
+}
+
+bool AudioDecoderMultiChannelOpusImpl::PacketHasFec(const uint8_t* encoded,
+ size_t encoded_len) const {
+ int fec;
+ fec = WebRtcOpus_PacketHasFec(encoded, encoded_len);
+ return (fec == 1);
+}
+
+int AudioDecoderMultiChannelOpusImpl::SampleRateHz() const {
+ return 48000;
+}
+
+size_t AudioDecoderMultiChannelOpusImpl::Channels() const {
+ return config_.num_channels;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h
new file mode 100644
index 0000000000..2ff47a8a53
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_IMPL_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class AudioDecoderMultiChannelOpusImpl final : public AudioDecoder {
+ public:
+ static std::unique_ptr<AudioDecoderMultiChannelOpusImpl> MakeAudioDecoder(
+ AudioDecoderMultiChannelOpusConfig config);
+
+ ~AudioDecoderMultiChannelOpusImpl() override;
+
+ AudioDecoderMultiChannelOpusImpl(const AudioDecoderMultiChannelOpusImpl&) =
+ delete;
+ AudioDecoderMultiChannelOpusImpl& operator=(
+ const AudioDecoderMultiChannelOpusImpl&) = delete;
+
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
+ bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ static absl::optional<AudioDecoderMultiChannelOpusConfig> SdpToConfig(
+ const SdpAudioFormat& format);
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+ int DecodeRedundantInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ AudioDecoderMultiChannelOpusImpl(OpusDecInst* dec_state,
+ AudioDecoderMultiChannelOpusConfig config);
+
+ OpusDecInst* dec_state_;
+ const AudioDecoderMultiChannelOpusConfig config_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc
new file mode 100644
index 0000000000..57e2107f3c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
+
+#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+using ::testing::NiceMock;
+using ::testing::Return;
+
+TEST(AudioDecoderMultiOpusTest, GetFormatParameter) {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 4,
+ {{"channel_mapping", "0,1,2,3"},
+ {"coupled_streams", "2"},
+ {"num_streams", "2"}});
+
+ EXPECT_EQ(GetFormatParameter(sdp_format, "channel_mapping"),
+ absl::optional<std::string>("0,1,2,3"));
+
+ EXPECT_EQ(GetFormatParameter<int>(sdp_format, "coupled_streams"),
+ absl::optional<int>(2));
+
+ EXPECT_EQ(GetFormatParameter(sdp_format, "missing"), absl::nullopt);
+
+ EXPECT_EQ(GetFormatParameter<int>(sdp_format, "channel_mapping"),
+ absl::nullopt);
+}
+
+TEST(AudioDecoderMultiOpusTest, InvalidChannelMappings) {
+ {
+ // Can't use channel 3 if there are only 2 channels.
+ const SdpAudioFormat sdp_format("multiopus", 48000, 2,
+ {{"channel_mapping", "3,0"},
+ {"coupled_streams", "1"},
+ {"num_streams", "2"}});
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+ EXPECT_FALSE(decoder_config.has_value());
+ }
+ {
+ // The mapping is too long. There are only 5 channels, but 6 elements in the
+ // mapping.
+ const SdpAudioFormat sdp_format("multiopus", 48000, 5,
+ {{"channel_mapping", "0,1,2,3,4,5"},
+ {"coupled_streams", "0"},
+ {"num_streams", "2"}});
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+ EXPECT_FALSE(decoder_config.has_value());
+ }
+ {
+ // The mapping doesn't parse correctly.
+ const SdpAudioFormat sdp_format(
+ "multiopus", 48000, 5,
+ {{"channel_mapping", "0,1,two,3,4"}, {"coupled_streams", "0"}});
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+ EXPECT_FALSE(decoder_config.has_value());
+ }
+}
+
+TEST(AudioDecoderMultiOpusTest, ValidSdpToConfigProducesCorrectConfig) {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 4,
+ {{"channel_mapping", "3,1,2,0"},
+ {"coupled_streams", "2"},
+ {"num_streams", "2"}});
+
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ ASSERT_TRUE(decoder_config.has_value());
+ EXPECT_TRUE(decoder_config->IsOk());
+ EXPECT_EQ(decoder_config->coupled_streams, 2);
+ EXPECT_THAT(decoder_config->channel_mapping,
+ ::testing::ContainerEq(std::vector<unsigned char>({3, 1, 2, 0})));
+}
+
+TEST(AudioDecoderMultiOpusTest, InvalidSdpToConfigDoesNotProduceConfig) {
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 4,
+ {{"channel_mapping", "0,1,2,3"},
+ {"coupled_stream", "2"},
+ {"num_streams", "2"}});
+
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ EXPECT_FALSE(decoder_config.has_value());
+ }
+
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 4,
+ {{"channel_mapping", "0,1,2 3"},
+ {"coupled_streams", "2"},
+ {"num_streams", "2"}});
+
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ EXPECT_FALSE(decoder_config.has_value());
+ }
+}
+
+TEST(AudioDecoderMultiOpusTest, CodecsCanBeCreated) {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 4,
+ {{"channel_mapping", "0,1,2,3"},
+ {"coupled_streams", "2"},
+ {"num_streams", "2"}});
+
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ ASSERT_TRUE(decoder_config.has_value());
+
+ const std::unique_ptr<AudioDecoder> opus_decoder =
+ AudioDecoderMultiChannelOpus::MakeAudioDecoder(*decoder_config);
+
+ EXPECT_TRUE(opus_decoder);
+}
+
+TEST(AudioDecoderMultiOpusTest, AdvertisedCodecsCanBeCreated) {
+ std::vector<AudioCodecSpec> specs;
+ AudioDecoderMultiChannelOpus::AppendSupportedDecoders(&specs);
+
+ EXPECT_FALSE(specs.empty());
+
+ for (const AudioCodecSpec& spec : specs) {
+ const absl::optional<AudioDecoderMultiChannelOpus::Config> decoder_config =
+ AudioDecoderMultiChannelOpus::SdpToConfig(spec.format);
+ ASSERT_TRUE(decoder_config.has_value());
+
+ const std::unique_ptr<AudioDecoder> opus_decoder =
+ AudioDecoderMultiChannelOpus::MakeAudioDecoder(*decoder_config);
+
+ EXPECT_TRUE(opus_decoder);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
new file mode 100644
index 0000000000..cff9685548
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioDecoderOpusImpl::AudioDecoderOpusImpl(size_t num_channels,
+ int sample_rate_hz)
+ : channels_{num_channels}, sample_rate_hz_{sample_rate_hz} {
+ RTC_DCHECK(num_channels == 1 || num_channels == 2);
+ RTC_DCHECK(sample_rate_hz == 16000 || sample_rate_hz == 48000);
+ const int error =
+ WebRtcOpus_DecoderCreate(&dec_state_, channels_, sample_rate_hz_);
+ RTC_DCHECK(error == 0);
+ WebRtcOpus_DecoderInit(dec_state_);
+}
+
+AudioDecoderOpusImpl::~AudioDecoderOpusImpl() {
+ WebRtcOpus_DecoderFree(dec_state_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderOpusImpl::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ std::vector<ParseResult> results;
+
+ if (PacketHasFec(payload.data(), payload.size())) {
+ const int duration =
+ PacketDurationRedundant(payload.data(), payload.size());
+ RTC_DCHECK_GE(duration, 0);
+ rtc::Buffer payload_copy(payload.data(), payload.size());
+ std::unique_ptr<EncodedAudioFrame> fec_frame(
+ new OpusFrame(this, std::move(payload_copy), false));
+ results.emplace_back(timestamp - duration, 1, std::move(fec_frame));
+ }
+ std::unique_ptr<EncodedAudioFrame> frame(
+ new OpusFrame(this, std::move(payload), true));
+ results.emplace_back(timestamp, 0, std::move(frame));
+ return results;
+}
+
+int AudioDecoderOpusImpl::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, sample_rate_hz_);
+ int16_t temp_type = 1; // Default is speech.
+ int ret =
+ WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+ if (ret > 0)
+ ret *= static_cast<int>(channels_); // Return total number of samples.
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+int AudioDecoderOpusImpl::DecodeRedundantInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ if (!PacketHasFec(encoded, encoded_len)) {
+ // This packet is a RED packet.
+ return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
+ speech_type);
+ }
+
+ RTC_DCHECK_EQ(sample_rate_hz, sample_rate_hz_);
+ int16_t temp_type = 1; // Default is speech.
+ int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
+ &temp_type);
+ if (ret > 0)
+ ret *= static_cast<int>(channels_); // Return total number of samples.
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+void AudioDecoderOpusImpl::Reset() {
+ WebRtcOpus_DecoderInit(dec_state_);
+}
+
+int AudioDecoderOpusImpl::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len);
+}
+
+int AudioDecoderOpusImpl::PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const {
+ if (!PacketHasFec(encoded, encoded_len)) {
+ // This packet is a RED packet.
+ return PacketDuration(encoded, encoded_len);
+ }
+
+ return WebRtcOpus_FecDurationEst(encoded, encoded_len, sample_rate_hz_);
+}
+
+bool AudioDecoderOpusImpl::PacketHasFec(const uint8_t* encoded,
+ size_t encoded_len) const {
+ int fec;
+ fec = WebRtcOpus_PacketHasFec(encoded, encoded_len);
+ return (fec == 1);
+}
+
+int AudioDecoderOpusImpl::SampleRateHz() const {
+ return sample_rate_hz_;
+}
+
+size_t AudioDecoderOpusImpl::Channels() const {
+ return channels_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
new file mode 100644
index 0000000000..e8fd0440bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class AudioDecoderOpusImpl final : public AudioDecoder {
+ public:
+ explicit AudioDecoderOpusImpl(size_t num_channels,
+ int sample_rate_hz = 48000);
+ ~AudioDecoderOpusImpl() override;
+
+ AudioDecoderOpusImpl(const AudioDecoderOpusImpl&) = delete;
+ AudioDecoderOpusImpl& operator=(const AudioDecoderOpusImpl&) = delete;
+
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
+ bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+ int DecodeRedundantInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ OpusDecInst* dec_state_;
+ const size_t channels_;
+ const int sample_rate_hz_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc
new file mode 100644
index 0000000000..38a11c123d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * LEFT TO DO:
+ * - WRITE TESTS for the stuff in this file.
+ * - Check the creation, maybe make it safer by returning an empty optional or
+ * unique_ptr. --- It looks OK, but RecreateEncoderInstance can perhaps crash
+ * on a valid config. Can run it in the fuzzer for some time. Should prbl also
+ * fuzz the config.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_to_number.h"
+
+namespace webrtc {
+
+namespace {
+
+// Recommended bitrates for one channel:
+// 8-12 kb/s for NB speech,
+// 16-20 kb/s for WB speech,
+// 28-40 kb/s for FB speech,
+// 48-64 kb/s for FB mono music, and
+// 64-128 kb/s for FB stereo music.
+// The current implementation multiplies these values by the number of channels.
+constexpr int kOpusBitrateNbBps = 12000;
+constexpr int kOpusBitrateWbBps = 20000;
+constexpr int kOpusBitrateFbBps = 32000;
+
+constexpr int kDefaultMaxPlaybackRate = 48000;
+// These two lists must be sorted from low to high
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+constexpr int kOpusSupportedFrameLengths[] = {10, 20, 40, 60, 120};
+#else
+constexpr int kOpusSupportedFrameLengths[] = {10, 20, 40, 60};
+#endif
+
+int GetBitrateBps(const AudioEncoderMultiChannelOpusConfig& config) {
+ RTC_DCHECK(config.IsOk());
+ return config.bitrate_bps;
+}
+int GetMaxPlaybackRate(const SdpAudioFormat& format) {
+ const auto param = GetFormatParameter<int>(format, "maxplaybackrate");
+ if (param && *param >= 8000) {
+ return std::min(*param, kDefaultMaxPlaybackRate);
+ }
+ return kDefaultMaxPlaybackRate;
+}
+
+int GetFrameSizeMs(const SdpAudioFormat& format) {
+ const auto ptime = GetFormatParameter<int>(format, "ptime");
+ if (ptime.has_value()) {
+ // Pick the next highest supported frame length from
+ // kOpusSupportedFrameLengths.
+ for (const int supported_frame_length : kOpusSupportedFrameLengths) {
+ if (supported_frame_length >= *ptime) {
+ return supported_frame_length;
+ }
+ }
+ // If none was found, return the largest supported frame length.
+ return *(std::end(kOpusSupportedFrameLengths) - 1);
+ }
+
+ return AudioEncoderOpusConfig::kDefaultFrameSizeMs;
+}
+
+int CalculateDefaultBitrate(int max_playback_rate, size_t num_channels) {
+ const int bitrate = [&] {
+ if (max_playback_rate <= 8000) {
+ return kOpusBitrateNbBps * rtc::dchecked_cast<int>(num_channels);
+ } else if (max_playback_rate <= 16000) {
+ return kOpusBitrateWbBps * rtc::dchecked_cast<int>(num_channels);
+ } else {
+ return kOpusBitrateFbBps * rtc::dchecked_cast<int>(num_channels);
+ }
+ }();
+ RTC_DCHECK_GE(bitrate, AudioEncoderMultiChannelOpusConfig::kMinBitrateBps);
+ return bitrate;
+}
+
+// Get the maxaveragebitrate parameter in string-form, so we can properly figure
+// out how invalid it is and accurately log invalid values.
+int CalculateBitrate(int max_playback_rate_hz,
+ size_t num_channels,
+ absl::optional<std::string> bitrate_param) {
+ const int default_bitrate =
+ CalculateDefaultBitrate(max_playback_rate_hz, num_channels);
+
+ if (bitrate_param) {
+ const auto bitrate = rtc::StringToNumber<int>(*bitrate_param);
+ if (bitrate) {
+ const int chosen_bitrate =
+ std::max(AudioEncoderOpusConfig::kMinBitrateBps,
+ std::min(*bitrate, AudioEncoderOpusConfig::kMaxBitrateBps));
+ if (bitrate != chosen_bitrate) {
+ RTC_LOG(LS_WARNING) << "Invalid maxaveragebitrate " << *bitrate
+ << " clamped to " << chosen_bitrate;
+ }
+ return chosen_bitrate;
+ }
+ RTC_LOG(LS_WARNING) << "Invalid maxaveragebitrate \"" << *bitrate_param
+ << "\" replaced by default bitrate " << default_bitrate;
+ }
+
+ return default_bitrate;
+}
+
+} // namespace
+
+std::unique_ptr<AudioEncoder>
+AudioEncoderMultiChannelOpusImpl::MakeAudioEncoder(
+ const AudioEncoderMultiChannelOpusConfig& config,
+ int payload_type) {
+ if (!config.IsOk()) {
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+ }
+ return std::make_unique<AudioEncoderMultiChannelOpusImpl>(config,
+ payload_type);
+}
+
+AudioEncoderMultiChannelOpusImpl::AudioEncoderMultiChannelOpusImpl(
+ const AudioEncoderMultiChannelOpusConfig& config,
+ int payload_type)
+ : payload_type_(payload_type), inst_(nullptr) {
+ RTC_DCHECK(0 <= payload_type && payload_type <= 127);
+
+ RTC_CHECK(RecreateEncoderInstance(config));
+}
+
+AudioEncoderMultiChannelOpusImpl::~AudioEncoderMultiChannelOpusImpl() {
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+}
+
+size_t AudioEncoderMultiChannelOpusImpl::SufficientOutputBufferSize() const {
+ // Calculate the number of bytes we expect the encoder to produce,
+ // then multiply by two to give a wide margin for error.
+ const size_t bytes_per_millisecond =
+ static_cast<size_t>(GetBitrateBps(config_) / (1000 * 8) + 1);
+ const size_t approx_encoded_bytes =
+ Num10msFramesPerPacket() * 10 * bytes_per_millisecond;
+ return 2 * approx_encoded_bytes;
+}
+
+void AudioEncoderMultiChannelOpusImpl::Reset() {
+ RTC_CHECK(RecreateEncoderInstance(config_));
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderMultiChannelOpusImpl::GetFrameLengthRange() const {
+ return {{TimeDelta::Millis(config_.frame_size_ms),
+ TimeDelta::Millis(config_.frame_size_ms)}};
+}
+
+// If the given config is OK, recreate the Opus encoder instance with those
+// settings, save the config, and return true. Otherwise, do nothing and return
+// false.
+bool AudioEncoderMultiChannelOpusImpl::RecreateEncoderInstance(
+ const AudioEncoderMultiChannelOpusConfig& config) {
+ if (!config.IsOk())
+ return false;
+ config_ = config;
+ if (inst_)
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+ input_buffer_.clear();
+ input_buffer_.reserve(Num10msFramesPerPacket() * SamplesPer10msFrame());
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_MultistreamEncoderCreate(
+ &inst_, config.num_channels,
+ config.application ==
+ AudioEncoderMultiChannelOpusConfig::ApplicationMode::kVoip
+ ? 0
+ : 1,
+ config.num_streams, config.coupled_streams,
+ config.channel_mapping.data()));
+ const int bitrate = GetBitrateBps(config);
+ RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, bitrate));
+ RTC_LOG(LS_VERBOSE) << "Set Opus bitrate to " << bitrate << " bps.";
+ if (config.fec_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+ RTC_LOG(LS_VERBOSE) << "Opus enable FEC";
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+ RTC_LOG(LS_VERBOSE) << "Opus disable FEC";
+ }
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
+ RTC_LOG(LS_VERBOSE) << "Set Opus playback rate to "
+ << config.max_playback_rate_hz << " hz.";
+
+ // Use the DEFAULT complexity.
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetComplexity(inst_, AudioEncoderOpusConfig().complexity));
+ RTC_LOG(LS_VERBOSE) << "Set Opus coding complexity to "
+ << AudioEncoderOpusConfig().complexity;
+
+ if (config.dtx_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+ RTC_LOG(LS_VERBOSE) << "Opus enable DTX";
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+ RTC_LOG(LS_VERBOSE) << "Opus disable DTX";
+ }
+
+ if (config.cbr_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableCbr(inst_));
+ RTC_LOG(LS_VERBOSE) << "Opus enable CBR";
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableCbr(inst_));
+ RTC_LOG(LS_VERBOSE) << "Opus disable CBR";
+ }
+ num_channels_to_encode_ = NumChannels();
+ next_frame_length_ms_ = config_.frame_size_ms;
+ RTC_LOG(LS_VERBOSE) << "Set Opus frame length to " << config_.frame_size_ms
+ << " ms";
+ return true;
+}
+
+absl::optional<AudioEncoderMultiChannelOpusConfig>
+AudioEncoderMultiChannelOpusImpl::SdpToConfig(const SdpAudioFormat& format) {
+ if (!absl::EqualsIgnoreCase(format.name, "multiopus") ||
+ format.clockrate_hz != 48000) {
+ return absl::nullopt;
+ }
+
+ AudioEncoderMultiChannelOpusConfig config;
+ config.num_channels = format.num_channels;
+ config.frame_size_ms = GetFrameSizeMs(format);
+ config.max_playback_rate_hz = GetMaxPlaybackRate(format);
+ config.fec_enabled = (GetFormatParameter(format, "useinbandfec") == "1");
+ config.dtx_enabled = (GetFormatParameter(format, "usedtx") == "1");
+ config.cbr_enabled = (GetFormatParameter(format, "cbr") == "1");
+ config.bitrate_bps =
+ CalculateBitrate(config.max_playback_rate_hz, config.num_channels,
+ GetFormatParameter(format, "maxaveragebitrate"));
+ config.application =
+ config.num_channels == 1
+ ? AudioEncoderMultiChannelOpusConfig::ApplicationMode::kVoip
+ : AudioEncoderMultiChannelOpusConfig::ApplicationMode::kAudio;
+
+ config.supported_frame_lengths_ms.clear();
+ std::copy(std::begin(kOpusSupportedFrameLengths),
+ std::end(kOpusSupportedFrameLengths),
+ std::back_inserter(config.supported_frame_lengths_ms));
+
+ auto num_streams = GetFormatParameter<int>(format, "num_streams");
+ if (!num_streams.has_value()) {
+ return absl::nullopt;
+ }
+ config.num_streams = *num_streams;
+
+ auto coupled_streams = GetFormatParameter<int>(format, "coupled_streams");
+ if (!coupled_streams.has_value()) {
+ return absl::nullopt;
+ }
+ config.coupled_streams = *coupled_streams;
+
+ auto channel_mapping =
+ GetFormatParameter<std::vector<unsigned char>>(format, "channel_mapping");
+ if (!channel_mapping.has_value()) {
+ return absl::nullopt;
+ }
+ config.channel_mapping = *channel_mapping;
+
+ if (!config.IsOk()) {
+ return absl::nullopt;
+ }
+ return config;
+}
+
+AudioCodecInfo AudioEncoderMultiChannelOpusImpl::QueryAudioEncoder(
+ const AudioEncoderMultiChannelOpusConfig& config) {
+ RTC_DCHECK(config.IsOk());
+ AudioCodecInfo info(48000, config.num_channels, config.bitrate_bps,
+ AudioEncoderOpusConfig::kMinBitrateBps,
+ AudioEncoderOpusConfig::kMaxBitrateBps);
+ info.allow_comfort_noise = false;
+ info.supports_network_adaption = false;
+ return info;
+}
+
+size_t AudioEncoderMultiChannelOpusImpl::Num10msFramesPerPacket() const {
+ return static_cast<size_t>(rtc::CheckedDivExact(config_.frame_size_ms, 10));
+}
+size_t AudioEncoderMultiChannelOpusImpl::SamplesPer10msFrame() const {
+ return rtc::CheckedDivExact(48000, 100) * config_.num_channels;
+}
+int AudioEncoderMultiChannelOpusImpl::SampleRateHz() const {
+ return 48000;
+}
+size_t AudioEncoderMultiChannelOpusImpl::NumChannels() const {
+ return config_.num_channels;
+}
+size_t AudioEncoderMultiChannelOpusImpl::Num10MsFramesInNextPacket() const {
+ return Num10msFramesPerPacket();
+}
+size_t AudioEncoderMultiChannelOpusImpl::Max10MsFramesInAPacket() const {
+ return Num10msFramesPerPacket();
+}
+int AudioEncoderMultiChannelOpusImpl::GetTargetBitrate() const {
+ return GetBitrateBps(config_);
+}
+
+AudioEncoder::EncodedInfo AudioEncoderMultiChannelOpusImpl::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ if (input_buffer_.empty())
+ first_timestamp_in_buffer_ = rtp_timestamp;
+
+ input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
+ if (input_buffer_.size() <
+ (Num10msFramesPerPacket() * SamplesPer10msFrame())) {
+ return EncodedInfo();
+ }
+ RTC_CHECK_EQ(input_buffer_.size(),
+ Num10msFramesPerPacket() * SamplesPer10msFrame());
+
+ const size_t max_encoded_bytes = SufficientOutputBufferSize();
+ EncodedInfo info;
+ info.encoded_bytes = encoded->AppendData(
+ max_encoded_bytes, [&](rtc::ArrayView<uint8_t> encoded) {
+ int status = WebRtcOpus_Encode(
+ inst_, &input_buffer_[0],
+ rtc::CheckedDivExact(input_buffer_.size(), config_.num_channels),
+ rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded.data());
+
+ RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.
+
+ return static_cast<size_t>(status);
+ });
+ input_buffer_.clear();
+
+ // Will use new packet size for next encoding.
+ config_.frame_size_ms = next_frame_length_ms_;
+
+ info.encoded_timestamp = first_timestamp_in_buffer_;
+ info.payload_type = payload_type_;
+ info.send_even_if_empty = true; // Allows Opus to send empty packets.
+
+ info.speech = true;
+ info.encoder_type = CodecType::kOther;
+
+ return info;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h
new file mode 100644
index 0000000000..8a7210515c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_IMPL_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_IMPL_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
+#include "api/units/time_delta.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+
+namespace webrtc {
+
+class RtcEventLog;
+
+class AudioEncoderMultiChannelOpusImpl final : public AudioEncoder {
+ public:
+ AudioEncoderMultiChannelOpusImpl(
+ const AudioEncoderMultiChannelOpusConfig& config,
+ int payload_type);
+ ~AudioEncoderMultiChannelOpusImpl() override;
+
+ AudioEncoderMultiChannelOpusImpl(const AudioEncoderMultiChannelOpusImpl&) =
+ delete;
+ AudioEncoderMultiChannelOpusImpl& operator=(
+ const AudioEncoderMultiChannelOpusImpl&) = delete;
+
+ // Static interface for use by BuiltinAudioEncoderFactory.
+ static constexpr const char* GetPayloadName() { return "multiopus"; }
+ static absl::optional<AudioCodecInfo> QueryAudioEncoder(
+ const SdpAudioFormat& format);
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+
+ void Reset() override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+
+ protected:
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+
+ private:
+ static absl::optional<AudioEncoderMultiChannelOpusConfig> SdpToConfig(
+ const SdpAudioFormat& format);
+ static AudioCodecInfo QueryAudioEncoder(
+ const AudioEncoderMultiChannelOpusConfig& config);
+ static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
+ const AudioEncoderMultiChannelOpusConfig&,
+ int payload_type);
+
+ size_t Num10msFramesPerPacket() const;
+ size_t SamplesPer10msFrame() const;
+ size_t SufficientOutputBufferSize() const;
+ bool RecreateEncoderInstance(
+ const AudioEncoderMultiChannelOpusConfig& config);
+ void SetFrameLength(int frame_length_ms);
+ void SetNumChannelsToEncode(size_t num_channels_to_encode);
+ void SetProjectedPacketLossRate(float fraction);
+
+ AudioEncoderMultiChannelOpusConfig config_;
+ const int payload_type_;
+ std::vector<int16_t> input_buffer_;
+ OpusEncInst* inst_;
+ uint32_t first_timestamp_in_buffer_;
+ size_t num_channels_to_encode_;
+ int next_frame_length_ms_;
+
+ friend struct AudioEncoderMultiChannelOpus;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc
new file mode 100644
index 0000000000..92f6f2c169
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+using ::testing::NiceMock;
+using ::testing::Return;
+
+namespace {
+constexpr int kOpusPayloadType = 120;
+} // namespace
+
+TEST(AudioEncoderMultiOpusTest, CheckConfigValidity) {
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 2,
+ {{"channel_mapping", "3,0"},
+ {"coupled_streams", "1"},
+ {"num_streams", "2"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ // Maps input channel 0 to coded channel 3, which doesn't exist.
+ EXPECT_FALSE(encoder_config.has_value());
+ }
+
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 2,
+ {{"channel_mapping", "0"},
+ {"coupled_streams", "1"},
+ {"num_streams", "2"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ // The mapping is too short.
+ EXPECT_FALSE(encoder_config.has_value());
+ }
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 3,
+ {{"channel_mapping", "0,0,0"},
+ {"coupled_streams", "0"},
+ {"num_streams", "1"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ // Coded channel 0 comes from both input channels 0, 1 and 2.
+ EXPECT_FALSE(encoder_config.has_value());
+ }
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 3,
+ {{"channel_mapping", "0,255,255"},
+ {"coupled_streams", "0"},
+ {"num_streams", "1"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+ ASSERT_TRUE(encoder_config.has_value());
+
+ // This is fine, because channels 1, 2 are set to be ignored.
+ EXPECT_TRUE(encoder_config->IsOk());
+ }
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 3,
+ {{"channel_mapping", "0,255,255"},
+ {"coupled_streams", "0"},
+ {"num_streams", "2"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+
+ // This is NOT fine, because channels nothing says how coded channel 1
+ // should be coded.
+ EXPECT_FALSE(encoder_config.has_value());
+ }
+}
+
+TEST(AudioEncoderMultiOpusTest, ConfigValuesAreParsedCorrectly) {
+ SdpAudioFormat sdp_format({"multiopus",
+ 48000,
+ 6,
+ {{"minptime", "10"},
+ {"useinbandfec", "1"},
+ {"channel_mapping", "0,4,1,2,3,5"},
+ {"num_streams", "4"},
+ {"coupled_streams", "2"}}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+ ASSERT_TRUE(encoder_config.has_value());
+
+ EXPECT_EQ(encoder_config->coupled_streams, 2);
+ EXPECT_EQ(encoder_config->num_streams, 4);
+ EXPECT_THAT(
+ encoder_config->channel_mapping,
+ testing::ContainerEq(std::vector<unsigned char>({0, 4, 1, 2, 3, 5})));
+}
+
+TEST(AudioEncoderMultiOpusTest, CreateFromValidConfig) {
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 3,
+ {{"channel_mapping", "0,255,255"},
+ {"coupled_streams", "0"},
+ {"num_streams", "2"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+ ASSERT_FALSE(encoder_config.has_value());
+ }
+ {
+ const SdpAudioFormat sdp_format("multiopus", 48000, 3,
+ {{"channel_mapping", "1,255,0"},
+ {"coupled_streams", "1"},
+ {"num_streams", "1"}});
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format);
+ ASSERT_TRUE(encoder_config.has_value());
+
+ EXPECT_THAT(encoder_config->channel_mapping,
+ testing::ContainerEq(std::vector<unsigned char>({1, 255, 0})));
+
+ EXPECT_TRUE(encoder_config->IsOk());
+
+ const std::unique_ptr<AudioEncoder> opus_encoder =
+ AudioEncoderMultiChannelOpus::MakeAudioEncoder(*encoder_config,
+ kOpusPayloadType);
+
+ // Creating an encoder from a valid config should work.
+ EXPECT_TRUE(opus_encoder);
+ }
+}
+
+TEST(AudioEncoderMultiOpusTest, AdvertisedCodecsCanBeCreated) {
+ std::vector<AudioCodecSpec> specs;
+ AudioEncoderMultiChannelOpus::AppendSupportedEncoders(&specs);
+
+ EXPECT_FALSE(specs.empty());
+
+ for (const AudioCodecSpec& spec : specs) {
+ const absl::optional<AudioEncoderMultiChannelOpus::Config> encoder_config =
+ AudioEncoderMultiChannelOpus::SdpToConfig(spec.format);
+ ASSERT_TRUE(encoder_config.has_value());
+
+ const std::unique_ptr<AudioEncoder> opus_encoder =
+ AudioEncoderMultiChannelOpus::MakeAudioEncoder(*encoder_config,
+ kOpusPayloadType);
+
+ EXPECT_TRUE(opus_encoder);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
new file mode 100644
index 0000000000..c9b1aefe69
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h"
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/string_to_number.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+
+// Codec parameters for Opus.
+// draft-spittka-payload-rtp-opus-03
+
+// Recommended bitrates:
+// 8-12 kb/s for NB speech,
+// 16-20 kb/s for WB speech,
+// 28-40 kb/s for FB speech,
+// 48-64 kb/s for FB mono music, and
+// 64-128 kb/s for FB stereo music.
+// The current implementation applies the following values to mono signals,
+// and multiplies them by 2 for stereo.
+constexpr int kOpusBitrateNbBps = 12000;
+constexpr int kOpusBitrateWbBps = 20000;
+constexpr int kOpusBitrateFbBps = 32000;
+
+constexpr int kRtpTimestampRateHz = 48000;
+constexpr int kDefaultMaxPlaybackRate = 48000;
+
+// These two lists must be sorted from low to high
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+constexpr int kANASupportedFrameLengths[] = {20, 40, 60, 120};
+constexpr int kOpusSupportedFrameLengths[] = {10, 20, 40, 60, 120};
+#else
+constexpr int kANASupportedFrameLengths[] = {20, 40, 60};
+constexpr int kOpusSupportedFrameLengths[] = {10, 20, 40, 60};
+#endif
+
+// PacketLossFractionSmoother uses an exponential filter with a time constant
+// of -1.0 / ln(0.9999) = 10000 ms.
+constexpr float kAlphaForPacketLossFractionSmoother = 0.9999f;
+constexpr float kMaxPacketLossFraction = 0.2f;
+
+int CalculateDefaultBitrate(int max_playback_rate, size_t num_channels) {
+ const int bitrate = [&] {
+ if (max_playback_rate <= 8000) {
+ return kOpusBitrateNbBps * rtc::dchecked_cast<int>(num_channels);
+ } else if (max_playback_rate <= 16000) {
+ return kOpusBitrateWbBps * rtc::dchecked_cast<int>(num_channels);
+ } else {
+ return kOpusBitrateFbBps * rtc::dchecked_cast<int>(num_channels);
+ }
+ }();
+ RTC_DCHECK_GE(bitrate, AudioEncoderOpusConfig::kMinBitrateBps);
+ RTC_DCHECK_LE(bitrate, AudioEncoderOpusConfig::kMaxBitrateBps);
+ return bitrate;
+}
+
+// Get the maxaveragebitrate parameter in string-form, so we can properly figure
+// out how invalid it is and accurately log invalid values.
+int CalculateBitrate(int max_playback_rate_hz,
+ size_t num_channels,
+ absl::optional<std::string> bitrate_param) {
+ const int default_bitrate =
+ CalculateDefaultBitrate(max_playback_rate_hz, num_channels);
+
+ if (bitrate_param) {
+ const auto bitrate = rtc::StringToNumber<int>(*bitrate_param);
+ if (bitrate) {
+ const int chosen_bitrate =
+ std::max(AudioEncoderOpusConfig::kMinBitrateBps,
+ std::min(*bitrate, AudioEncoderOpusConfig::kMaxBitrateBps));
+ if (bitrate != chosen_bitrate) {
+ RTC_LOG(LS_WARNING) << "Invalid maxaveragebitrate " << *bitrate
+ << " clamped to " << chosen_bitrate;
+ }
+ return chosen_bitrate;
+ }
+ RTC_LOG(LS_WARNING) << "Invalid maxaveragebitrate \"" << *bitrate_param
+ << "\" replaced by default bitrate " << default_bitrate;
+ }
+
+ return default_bitrate;
+}
+
+int GetChannelCount(const SdpAudioFormat& format) {
+ const auto param = GetFormatParameter(format, "stereo");
+ if (param == "1") {
+ return 2;
+ } else {
+ return 1;
+ }
+}
+
+int GetMaxPlaybackRate(const SdpAudioFormat& format) {
+ const auto param = GetFormatParameter<int>(format, "maxplaybackrate");
+ if (param && *param >= 8000) {
+ return std::min(*param, kDefaultMaxPlaybackRate);
+ }
+ return kDefaultMaxPlaybackRate;
+}
+
+int GetFrameSizeMs(const SdpAudioFormat& format) {
+ const auto ptime = GetFormatParameter<int>(format, "ptime");
+ if (ptime) {
+ // Pick the next highest supported frame length from
+ // kOpusSupportedFrameLengths.
+ for (const int supported_frame_length : kOpusSupportedFrameLengths) {
+ if (supported_frame_length >= *ptime) {
+ return supported_frame_length;
+ }
+ }
+ // If none was found, return the largest supported frame length.
+ return *(std::end(kOpusSupportedFrameLengths) - 1);
+ }
+
+ return AudioEncoderOpusConfig::kDefaultFrameSizeMs;
+}
+
+void FindSupportedFrameLengths(int min_frame_length_ms,
+ int max_frame_length_ms,
+ std::vector<int>* out) {
+ out->clear();
+ std::copy_if(std::begin(kANASupportedFrameLengths),
+ std::end(kANASupportedFrameLengths), std::back_inserter(*out),
+ [&](int frame_length_ms) {
+ return frame_length_ms >= min_frame_length_ms &&
+ frame_length_ms <= max_frame_length_ms;
+ });
+ RTC_DCHECK(std::is_sorted(out->begin(), out->end()));
+}
+
+int GetBitrateBps(const AudioEncoderOpusConfig& config) {
+ RTC_DCHECK(config.IsOk());
+ return *config.bitrate_bps;
+}
+
+std::vector<float> GetBitrateMultipliers() {
+ constexpr char kBitrateMultipliersName[] =
+ "WebRTC-Audio-OpusBitrateMultipliers";
+ const bool use_bitrate_multipliers =
+ webrtc::field_trial::IsEnabled(kBitrateMultipliersName);
+ if (use_bitrate_multipliers) {
+ const std::string field_trial_string =
+ webrtc::field_trial::FindFullName(kBitrateMultipliersName);
+ std::vector<std::string> pieces;
+ rtc::tokenize(field_trial_string, '-', &pieces);
+ if (pieces.size() < 2 || pieces[0] != "Enabled") {
+ RTC_LOG(LS_WARNING) << "Invalid parameters for "
+ << kBitrateMultipliersName
+ << ", not using custom values.";
+ return std::vector<float>();
+ }
+ std::vector<float> multipliers(pieces.size() - 1);
+ for (size_t i = 1; i < pieces.size(); i++) {
+ if (!rtc::FromString(pieces[i], &multipliers[i - 1])) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid parameters for " << kBitrateMultipliersName
+ << ", not using custom values.";
+ return std::vector<float>();
+ }
+ }
+ RTC_LOG(LS_INFO) << "Using custom bitrate multipliers: "
+ << field_trial_string;
+ return multipliers;
+ }
+ return std::vector<float>();
+}
+
+int GetMultipliedBitrate(int bitrate, const std::vector<float>& multipliers) {
+ // The multipliers are valid from 5 kbps.
+ const size_t bitrate_kbps = static_cast<size_t>(bitrate / 1000);
+ if (bitrate_kbps < 5 || bitrate_kbps >= multipliers.size() + 5) {
+ return bitrate;
+ }
+ return static_cast<int>(multipliers[bitrate_kbps - 5] * bitrate);
+}
+} // namespace
+
+void AudioEncoderOpusImpl::AppendSupportedEncoders(
+ std::vector<AudioCodecSpec>* specs) {
+ const SdpAudioFormat fmt = {"opus",
+ kRtpTimestampRateHz,
+ 2,
+ {{"minptime", "10"}, {"useinbandfec", "1"}}};
+ const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
+ specs->push_back({fmt, info});
+}
+
+AudioCodecInfo AudioEncoderOpusImpl::QueryAudioEncoder(
+ const AudioEncoderOpusConfig& config) {
+ RTC_DCHECK(config.IsOk());
+ AudioCodecInfo info(config.sample_rate_hz, config.num_channels,
+ *config.bitrate_bps,
+ AudioEncoderOpusConfig::kMinBitrateBps,
+ AudioEncoderOpusConfig::kMaxBitrateBps);
+ info.allow_comfort_noise = false;
+ info.supports_network_adaption = true;
+ return info;
+}
+
+std::unique_ptr<AudioEncoder> AudioEncoderOpusImpl::MakeAudioEncoder(
+ const AudioEncoderOpusConfig& config,
+ int payload_type) {
+ if (!config.IsOk()) {
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+ }
+ return std::make_unique<AudioEncoderOpusImpl>(config, payload_type);
+}
+
+absl::optional<AudioEncoderOpusConfig> AudioEncoderOpusImpl::SdpToConfig(
+ const SdpAudioFormat& format) {
+ if (!absl::EqualsIgnoreCase(format.name, "opus") ||
+ format.clockrate_hz != kRtpTimestampRateHz) {
+ return absl::nullopt;
+ }
+
+ AudioEncoderOpusConfig config;
+ config.num_channels = GetChannelCount(format);
+ config.frame_size_ms = GetFrameSizeMs(format);
+ config.max_playback_rate_hz = GetMaxPlaybackRate(format);
+ config.fec_enabled = (GetFormatParameter(format, "useinbandfec") == "1");
+ config.dtx_enabled = (GetFormatParameter(format, "usedtx") == "1");
+ config.cbr_enabled = (GetFormatParameter(format, "cbr") == "1");
+ config.bitrate_bps =
+ CalculateBitrate(config.max_playback_rate_hz, config.num_channels,
+ GetFormatParameter(format, "maxaveragebitrate"));
+ config.application = config.num_channels == 1
+ ? AudioEncoderOpusConfig::ApplicationMode::kVoip
+ : AudioEncoderOpusConfig::ApplicationMode::kAudio;
+
+ constexpr int kMinANAFrameLength = kANASupportedFrameLengths[0];
+ constexpr int kMaxANAFrameLength =
+ kANASupportedFrameLengths[arraysize(kANASupportedFrameLengths) - 1];
+
+ // For now, minptime and maxptime are only used with ANA. If ptime is outside
+ // of this range, it will get adjusted once ANA takes hold. Ideally, we'd know
+ // if ANA was to be used when setting up the config, and adjust accordingly.
+ const int min_frame_length_ms =
+ GetFormatParameter<int>(format, "minptime").value_or(kMinANAFrameLength);
+ const int max_frame_length_ms =
+ GetFormatParameter<int>(format, "maxptime").value_or(kMaxANAFrameLength);
+
+ FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
+ &config.supported_frame_lengths_ms);
+ if (!config.IsOk()) {
+ RTC_DCHECK_NOTREACHED();
+ return absl::nullopt;
+ }
+ return config;
+}
+
+absl::optional<int> AudioEncoderOpusImpl::GetNewComplexity(
+ const AudioEncoderOpusConfig& config) {
+ RTC_DCHECK(config.IsOk());
+ const int bitrate_bps = GetBitrateBps(config);
+ if (bitrate_bps >= config.complexity_threshold_bps -
+ config.complexity_threshold_window_bps &&
+ bitrate_bps <= config.complexity_threshold_bps +
+ config.complexity_threshold_window_bps) {
+ // Within the hysteresis window; make no change.
+ return absl::nullopt;
+ } else {
+ return bitrate_bps <= config.complexity_threshold_bps
+ ? config.low_rate_complexity
+ : config.complexity;
+ }
+}
+
+absl::optional<int> AudioEncoderOpusImpl::GetNewBandwidth(
+ const AudioEncoderOpusConfig& config,
+ OpusEncInst* inst) {
+ constexpr int kMinWidebandBitrate = 8000;
+ constexpr int kMaxNarrowbandBitrate = 9000;
+ constexpr int kAutomaticThreshold = 11000;
+ RTC_DCHECK(config.IsOk());
+ const int bitrate = GetBitrateBps(config);
+ if (bitrate > kAutomaticThreshold) {
+ return absl::optional<int>(OPUS_AUTO);
+ }
+ const int bandwidth = WebRtcOpus_GetBandwidth(inst);
+ RTC_DCHECK_GE(bandwidth, 0);
+ if (bitrate > kMaxNarrowbandBitrate && bandwidth < OPUS_BANDWIDTH_WIDEBAND) {
+ return absl::optional<int>(OPUS_BANDWIDTH_WIDEBAND);
+ } else if (bitrate < kMinWidebandBitrate &&
+ bandwidth > OPUS_BANDWIDTH_NARROWBAND) {
+ return absl::optional<int>(OPUS_BANDWIDTH_NARROWBAND);
+ }
+ return absl::optional<int>();
+}
+
+class AudioEncoderOpusImpl::PacketLossFractionSmoother {
+ public:
+ explicit PacketLossFractionSmoother()
+ : last_sample_time_ms_(rtc::TimeMillis()),
+ smoother_(kAlphaForPacketLossFractionSmoother) {}
+
+ // Gets the smoothed packet loss fraction.
+ float GetAverage() const {
+ float value = smoother_.filtered();
+ return (value == rtc::ExpFilter::kValueUndefined) ? 0.0f : value;
+ }
+
+ // Add new observation to the packet loss fraction smoother.
+ void AddSample(float packet_loss_fraction) {
+ int64_t now_ms = rtc::TimeMillis();
+ smoother_.Apply(static_cast<float>(now_ms - last_sample_time_ms_),
+ packet_loss_fraction);
+ last_sample_time_ms_ = now_ms;
+ }
+
+ private:
+ int64_t last_sample_time_ms_;
+
+ // An exponential filter is used to smooth the packet loss fraction.
+ rtc::ExpFilter smoother_;
+};
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(const AudioEncoderOpusConfig& config,
+ int payload_type)
+ : AudioEncoderOpusImpl(
+ config,
+ payload_type,
+ [this](absl::string_view config_string, RtcEventLog* event_log) {
+ return DefaultAudioNetworkAdaptorCreator(config_string, event_log);
+ },
+ // We choose 5sec as initial time constant due to empirical data.
+ std::make_unique<SmoothingFilterImpl>(5000)) {}
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(
+ const AudioEncoderOpusConfig& config,
+ int payload_type,
+ const AudioNetworkAdaptorCreator& audio_network_adaptor_creator,
+ std::unique_ptr<SmoothingFilter> bitrate_smoother)
+ : payload_type_(payload_type),
+ send_side_bwe_with_overhead_(
+ !webrtc::field_trial::IsDisabled("WebRTC-SendSideBwe-WithOverhead")),
+ use_stable_target_for_adaptation_(!webrtc::field_trial::IsDisabled(
+ "WebRTC-Audio-StableTargetAdaptation")),
+ adjust_bandwidth_(
+ webrtc::field_trial::IsEnabled("WebRTC-AdjustOpusBandwidth")),
+ bitrate_changed_(true),
+ bitrate_multipliers_(GetBitrateMultipliers()),
+ packet_loss_rate_(0.0),
+ inst_(nullptr),
+ packet_loss_fraction_smoother_(new PacketLossFractionSmoother()),
+ audio_network_adaptor_creator_(audio_network_adaptor_creator),
+ bitrate_smoother_(std::move(bitrate_smoother)),
+ consecutive_dtx_frames_(0) {
+ RTC_DCHECK(0 <= payload_type && payload_type <= 127);
+
+ // Sanity check of the redundant payload type field that we want to get rid
+ // of. See https://bugs.chromium.org/p/webrtc/issues/detail?id=7847
+ RTC_CHECK(config.payload_type == -1 || config.payload_type == payload_type);
+
+ RTC_CHECK(RecreateEncoderInstance(config));
+ SetProjectedPacketLossRate(packet_loss_rate_);
+}
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(int payload_type,
+ const SdpAudioFormat& format)
+ : AudioEncoderOpusImpl(*SdpToConfig(format), payload_type) {}
+
+AudioEncoderOpusImpl::~AudioEncoderOpusImpl() {
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+}
+
+int AudioEncoderOpusImpl::SampleRateHz() const {
+ return config_.sample_rate_hz;
+}
+
+size_t AudioEncoderOpusImpl::NumChannels() const {
+ return config_.num_channels;
+}
+
+int AudioEncoderOpusImpl::RtpTimestampRateHz() const {
+ return kRtpTimestampRateHz;
+}
+
+size_t AudioEncoderOpusImpl::Num10MsFramesInNextPacket() const {
+ return Num10msFramesPerPacket();
+}
+
+size_t AudioEncoderOpusImpl::Max10MsFramesInAPacket() const {
+ return Num10msFramesPerPacket();
+}
+
+int AudioEncoderOpusImpl::GetTargetBitrate() const {
+ return GetBitrateBps(config_);
+}
+
+void AudioEncoderOpusImpl::Reset() {
+ RTC_CHECK(RecreateEncoderInstance(config_));
+}
+
+bool AudioEncoderOpusImpl::SetFec(bool enable) {
+ if (enable) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+ }
+ config_.fec_enabled = enable;
+ return true;
+}
+
+bool AudioEncoderOpusImpl::SetDtx(bool enable) {
+ if (enable) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+ }
+ config_.dtx_enabled = enable;
+ return true;
+}
+
+bool AudioEncoderOpusImpl::GetDtx() const {
+ return config_.dtx_enabled;
+}
+
+bool AudioEncoderOpusImpl::SetApplication(Application application) {
+ auto conf = config_;
+ switch (application) {
+ case Application::kSpeech:
+ conf.application = AudioEncoderOpusConfig::ApplicationMode::kVoip;
+ break;
+ case Application::kAudio:
+ conf.application = AudioEncoderOpusConfig::ApplicationMode::kAudio;
+ break;
+ }
+ return RecreateEncoderInstance(conf);
+}
+
+void AudioEncoderOpusImpl::SetMaxPlaybackRate(int frequency_hz) {
+ auto conf = config_;
+ conf.max_playback_rate_hz = frequency_hz;
+ RTC_CHECK(RecreateEncoderInstance(conf));
+}
+
+bool AudioEncoderOpusImpl::EnableAudioNetworkAdaptor(
+ const std::string& config_string,
+ RtcEventLog* event_log) {
+ audio_network_adaptor_ =
+ audio_network_adaptor_creator_(config_string, event_log);
+ return audio_network_adaptor_.get() != nullptr;
+}
+
+void AudioEncoderOpusImpl::DisableAudioNetworkAdaptor() {
+ audio_network_adaptor_.reset(nullptr);
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) {
+ if (audio_network_adaptor_) {
+ audio_network_adaptor_->SetUplinkPacketLossFraction(
+ uplink_packet_loss_fraction);
+ ApplyAudioNetworkAdaptor();
+ }
+ packet_loss_fraction_smoother_->AddSample(uplink_packet_loss_fraction);
+ float average_fraction_loss = packet_loss_fraction_smoother_->GetAverage();
+ SetProjectedPacketLossRate(average_fraction_loss);
+}
+
+void AudioEncoderOpusImpl::OnReceivedTargetAudioBitrate(
+ int target_audio_bitrate_bps) {
+ SetTargetBitrate(target_audio_bitrate_bps);
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms,
+ absl::optional<int64_t> stable_target_bitrate_bps) {
+ if (audio_network_adaptor_) {
+ audio_network_adaptor_->SetTargetAudioBitrate(target_audio_bitrate_bps);
+ if (use_stable_target_for_adaptation_) {
+ if (stable_target_bitrate_bps)
+ audio_network_adaptor_->SetUplinkBandwidth(*stable_target_bitrate_bps);
+ } else {
+ // We give smoothed bitrate allocation to audio network adaptor as
+ // the uplink bandwidth.
+ // The BWE spikes should not affect the bitrate smoother more than 25%.
+ // To simplify the calculations we use a step response as input signal.
+ // The step response of an exponential filter is
+ // u(t) = 1 - e^(-t / time_constant).
+ // In order to limit the affect of a BWE spike within 25% of its value
+ // before
+ // the next BWE update, we would choose a time constant that fulfills
+ // 1 - e^(-bwe_period_ms / time_constant) < 0.25
+ // Then 4 * bwe_period_ms is a good choice.
+ if (bwe_period_ms)
+ bitrate_smoother_->SetTimeConstantMs(*bwe_period_ms * 4);
+ bitrate_smoother_->AddSample(target_audio_bitrate_bps);
+ }
+
+ ApplyAudioNetworkAdaptor();
+ } else if (send_side_bwe_with_overhead_) {
+ if (!overhead_bytes_per_packet_) {
+ RTC_LOG(LS_INFO)
+ << "AudioEncoderOpusImpl: Overhead unknown, target audio bitrate "
+ << target_audio_bitrate_bps << " bps is ignored.";
+ return;
+ }
+ const int overhead_bps = static_cast<int>(
+ *overhead_bytes_per_packet_ * 8 * 100 / Num10MsFramesInNextPacket());
+ SetTargetBitrate(
+ std::min(AudioEncoderOpusConfig::kMaxBitrateBps,
+ std::max(AudioEncoderOpusConfig::kMinBitrateBps,
+ target_audio_bitrate_bps - overhead_bps)));
+ } else {
+ SetTargetBitrate(target_audio_bitrate_bps);
+ }
+}
+void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) {
+ OnReceivedUplinkBandwidth(target_audio_bitrate_bps, bwe_period_ms,
+ absl::nullopt);
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkAllocation(
+ BitrateAllocationUpdate update) {
+ OnReceivedUplinkBandwidth(update.target_bitrate.bps(), update.bwe_period.ms(),
+ update.stable_target_bitrate.bps());
+}
+
+void AudioEncoderOpusImpl::OnReceivedRtt(int rtt_ms) {
+ if (!audio_network_adaptor_)
+ return;
+ audio_network_adaptor_->SetRtt(rtt_ms);
+ ApplyAudioNetworkAdaptor();
+}
+
+void AudioEncoderOpusImpl::OnReceivedOverhead(
+ size_t overhead_bytes_per_packet) {
+ if (audio_network_adaptor_) {
+ audio_network_adaptor_->SetOverhead(overhead_bytes_per_packet);
+ ApplyAudioNetworkAdaptor();
+ } else {
+ overhead_bytes_per_packet_ = overhead_bytes_per_packet;
+ }
+}
+
+void AudioEncoderOpusImpl::SetReceiverFrameLengthRange(
+ int min_frame_length_ms,
+ int max_frame_length_ms) {
+ // Ensure that `SetReceiverFrameLengthRange` is called before
+ // `EnableAudioNetworkAdaptor`, otherwise we need to recreate
+ // `audio_network_adaptor_`, which is not a needed use case.
+ RTC_DCHECK(!audio_network_adaptor_);
+ FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
+ &config_.supported_frame_lengths_ms);
+}
+
+AudioEncoder::EncodedInfo AudioEncoderOpusImpl::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ MaybeUpdateUplinkBandwidth();
+
+ if (input_buffer_.empty())
+ first_timestamp_in_buffer_ = rtp_timestamp;
+
+ input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
+ if (input_buffer_.size() <
+ (Num10msFramesPerPacket() * SamplesPer10msFrame())) {
+ return EncodedInfo();
+ }
+ RTC_CHECK_EQ(input_buffer_.size(),
+ Num10msFramesPerPacket() * SamplesPer10msFrame());
+
+ const size_t max_encoded_bytes = SufficientOutputBufferSize();
+ EncodedInfo info;
+ info.encoded_bytes = encoded->AppendData(
+ max_encoded_bytes, [&](rtc::ArrayView<uint8_t> encoded) {
+ int status = WebRtcOpus_Encode(
+ inst_, &input_buffer_[0],
+ rtc::CheckedDivExact(input_buffer_.size(), config_.num_channels),
+ rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded.data());
+
+ RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.
+
+ return static_cast<size_t>(status);
+ });
+ input_buffer_.clear();
+
+ bool dtx_frame = (info.encoded_bytes <= 2);
+
+ // Will use new packet size for next encoding.
+ config_.frame_size_ms = next_frame_length_ms_;
+
+ if (adjust_bandwidth_ && bitrate_changed_) {
+ const auto bandwidth = GetNewBandwidth(config_, inst_);
+ if (bandwidth) {
+ RTC_CHECK_EQ(0, WebRtcOpus_SetBandwidth(inst_, *bandwidth));
+ }
+ bitrate_changed_ = false;
+ }
+
+ info.encoded_timestamp = first_timestamp_in_buffer_;
+ info.payload_type = payload_type_;
+ info.send_even_if_empty = true; // Allows Opus to send empty packets.
+ // After 20 DTX frames (MAX_CONSECUTIVE_DTX) Opus will send a frame
+ // coding the background noise. Avoid flagging this frame as speech
+ // (even though there is a probability of the frame being speech).
+ info.speech = !dtx_frame && (consecutive_dtx_frames_ != 20);
+ info.encoder_type = CodecType::kOpus;
+
+ // Increase or reset DTX counter.
+ consecutive_dtx_frames_ = (dtx_frame) ? (consecutive_dtx_frames_ + 1) : (0);
+
+ return info;
+}
+
+size_t AudioEncoderOpusImpl::Num10msFramesPerPacket() const {
+ return static_cast<size_t>(rtc::CheckedDivExact(config_.frame_size_ms, 10));
+}
+
+size_t AudioEncoderOpusImpl::SamplesPer10msFrame() const {
+ return rtc::CheckedDivExact(config_.sample_rate_hz, 100) *
+ config_.num_channels;
+}
+
+size_t AudioEncoderOpusImpl::SufficientOutputBufferSize() const {
+ // Calculate the number of bytes we expect the encoder to produce,
+ // then multiply by two to give a wide margin for error.
+ const size_t bytes_per_millisecond =
+ static_cast<size_t>(GetBitrateBps(config_) / (1000 * 8) + 1);
+ const size_t approx_encoded_bytes =
+ Num10msFramesPerPacket() * 10 * bytes_per_millisecond;
+ return 2 * approx_encoded_bytes;
+}
+
+// If the given config is OK, recreate the Opus encoder instance with those
+// settings, save the config, and return true. Otherwise, do nothing and return
+// false.
+bool AudioEncoderOpusImpl::RecreateEncoderInstance(
+ const AudioEncoderOpusConfig& config) {
+ if (!config.IsOk())
+ return false;
+ config_ = config;
+ if (inst_)
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+ input_buffer_.clear();
+ input_buffer_.reserve(Num10msFramesPerPacket() * SamplesPer10msFrame());
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderCreate(
+ &inst_, config.num_channels,
+ config.application ==
+ AudioEncoderOpusConfig::ApplicationMode::kVoip
+ ? 0
+ : 1,
+ config.sample_rate_hz));
+ const int bitrate = GetBitrateBps(config);
+ RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, bitrate));
+ RTC_LOG(LS_VERBOSE) << "Set Opus bitrate to " << bitrate << " bps.";
+ if (config.fec_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+ }
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
+ // Use the default complexity if the start bitrate is within the hysteresis
+ // window.
+ complexity_ = GetNewComplexity(config).value_or(config.complexity);
+ RTC_CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, complexity_));
+ bitrate_changed_ = true;
+ if (config.dtx_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+ }
+ RTC_CHECK_EQ(0,
+ WebRtcOpus_SetPacketLossRate(
+ inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+ if (config.cbr_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableCbr(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableCbr(inst_));
+ }
+ num_channels_to_encode_ = NumChannels();
+ next_frame_length_ms_ = config_.frame_size_ms;
+ return true;
+}
+
+void AudioEncoderOpusImpl::SetFrameLength(int frame_length_ms) {
+ if (next_frame_length_ms_ != frame_length_ms) {
+ RTC_LOG(LS_VERBOSE) << "Update Opus frame length "
+ << "from " << next_frame_length_ms_ << " ms "
+ << "to " << frame_length_ms << " ms.";
+ }
+ next_frame_length_ms_ = frame_length_ms;
+}
+
+void AudioEncoderOpusImpl::SetNumChannelsToEncode(
+ size_t num_channels_to_encode) {
+ RTC_DCHECK_GT(num_channels_to_encode, 0);
+ RTC_DCHECK_LE(num_channels_to_encode, config_.num_channels);
+
+ if (num_channels_to_encode_ == num_channels_to_encode)
+ return;
+
+ RTC_CHECK_EQ(0, WebRtcOpus_SetForceChannels(inst_, num_channels_to_encode));
+ num_channels_to_encode_ = num_channels_to_encode;
+}
+
+void AudioEncoderOpusImpl::SetProjectedPacketLossRate(float fraction) {
+ fraction = std::min(std::max(fraction, 0.0f), kMaxPacketLossFraction);
+ if (packet_loss_rate_ != fraction) {
+ packet_loss_rate_ = fraction;
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetPacketLossRate(
+ inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+ }
+}
+
+void AudioEncoderOpusImpl::SetTargetBitrate(int bits_per_second) {
+ const int new_bitrate = rtc::SafeClamp<int>(
+ bits_per_second, AudioEncoderOpusConfig::kMinBitrateBps,
+ AudioEncoderOpusConfig::kMaxBitrateBps);
+ if (config_.bitrate_bps && *config_.bitrate_bps != new_bitrate) {
+ config_.bitrate_bps = new_bitrate;
+ RTC_DCHECK(config_.IsOk());
+ const int bitrate = GetBitrateBps(config_);
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetBitRate(
+ inst_, GetMultipliedBitrate(bitrate, bitrate_multipliers_)));
+ RTC_LOG(LS_VERBOSE) << "Set Opus bitrate to " << bitrate << " bps.";
+ bitrate_changed_ = true;
+ }
+
+ const auto new_complexity = GetNewComplexity(config_);
+ if (new_complexity && complexity_ != *new_complexity) {
+ complexity_ = *new_complexity;
+ RTC_CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, complexity_));
+ }
+}
+
+void AudioEncoderOpusImpl::ApplyAudioNetworkAdaptor() {
+ auto config = audio_network_adaptor_->GetEncoderRuntimeConfig();
+
+ if (config.bitrate_bps)
+ SetTargetBitrate(*config.bitrate_bps);
+ if (config.frame_length_ms)
+ SetFrameLength(*config.frame_length_ms);
+ if (config.enable_dtx)
+ SetDtx(*config.enable_dtx);
+ if (config.num_channels)
+ SetNumChannelsToEncode(*config.num_channels);
+}
+
+std::unique_ptr<AudioNetworkAdaptor>
+AudioEncoderOpusImpl::DefaultAudioNetworkAdaptorCreator(
+ absl::string_view config_string,
+ RtcEventLog* event_log) const {
+ AudioNetworkAdaptorImpl::Config config;
+ config.event_log = event_log;
+ return std::unique_ptr<AudioNetworkAdaptor>(new AudioNetworkAdaptorImpl(
+ config, ControllerManagerImpl::Create(
+ config_string, NumChannels(), supported_frame_lengths_ms(),
+ AudioEncoderOpusConfig::kMinBitrateBps,
+ num_channels_to_encode_, next_frame_length_ms_,
+ GetTargetBitrate(), config_.fec_enabled, GetDtx())));
+}
+
+void AudioEncoderOpusImpl::MaybeUpdateUplinkBandwidth() {
+ if (audio_network_adaptor_ && !use_stable_target_for_adaptation_) {
+ int64_t now_ms = rtc::TimeMillis();
+ if (!bitrate_smoother_last_update_time_ ||
+ now_ms - *bitrate_smoother_last_update_time_ >=
+ config_.uplink_bandwidth_update_interval_ms) {
+ absl::optional<float> smoothed_bitrate = bitrate_smoother_->GetAverage();
+ if (smoothed_bitrate)
+ audio_network_adaptor_->SetUplinkBandwidth(*smoothed_bitrate);
+ bitrate_smoother_last_update_time_ = now_ms;
+ }
+ }
+}
+
+ANAStats AudioEncoderOpusImpl::GetANAStats() const {
+ if (audio_network_adaptor_) {
+ return audio_network_adaptor_->GetStats();
+ }
+ return ANAStats();
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta> >
+AudioEncoderOpusImpl::GetFrameLengthRange() const {
+ if (config_.supported_frame_lengths_ms.empty()) {
+ return absl::nullopt;
+ } else if (audio_network_adaptor_) {
+ return {{TimeDelta::Millis(config_.supported_frame_lengths_ms.front()),
+ TimeDelta::Millis(config_.supported_frame_lengths_ms.back())}};
+ } else {
+ return {{TimeDelta::Millis(config_.frame_size_ms),
+ TimeDelta::Millis(config_.frame_size_ms)}};
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
new file mode 100644
index 0000000000..a0c42af121
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
+#include "common_audio/smoothing_filter.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+
+namespace webrtc {
+
+class RtcEventLog;
+
+class AudioEncoderOpusImpl final : public AudioEncoder {
+ public:
+ // Returns empty if the current bitrate falls within the hysteresis window,
+ // defined by complexity_threshold_bps +/- complexity_threshold_window_bps.
+ // Otherwise, returns the current complexity depending on whether the
+ // current bitrate is above or below complexity_threshold_bps.
+ static absl::optional<int> GetNewComplexity(
+ const AudioEncoderOpusConfig& config);
+
+ // Returns OPUS_AUTO if the the current bitrate is above wideband threshold.
+ // Returns empty if it is below, but bandwidth coincides with the desired one.
+ // Otherwise returns the desired bandwidth.
+ static absl::optional<int> GetNewBandwidth(
+ const AudioEncoderOpusConfig& config,
+ OpusEncInst* inst);
+
+ using AudioNetworkAdaptorCreator =
+ std::function<std::unique_ptr<AudioNetworkAdaptor>(absl::string_view,
+ RtcEventLog*)>;
+
+ AudioEncoderOpusImpl(const AudioEncoderOpusConfig& config, int payload_type);
+
+ // Dependency injection for testing.
+ AudioEncoderOpusImpl(
+ const AudioEncoderOpusConfig& config,
+ int payload_type,
+ const AudioNetworkAdaptorCreator& audio_network_adaptor_creator,
+ std::unique_ptr<SmoothingFilter> bitrate_smoother);
+
+ AudioEncoderOpusImpl(int payload_type, const SdpAudioFormat& format);
+ ~AudioEncoderOpusImpl() override;
+
+ AudioEncoderOpusImpl(const AudioEncoderOpusImpl&) = delete;
+ AudioEncoderOpusImpl& operator=(const AudioEncoderOpusImpl&) = delete;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ int RtpTimestampRateHz() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+
+ void Reset() override;
+ bool SetFec(bool enable) override;
+
+ // Set Opus DTX. Once enabled, Opus stops transmission, when it detects
+ // voice being inactive. During that, it still sends 2 packets (one for
+ // content, one for signaling) about every 400 ms.
+ bool SetDtx(bool enable) override;
+ bool GetDtx() const override;
+
+ bool SetApplication(Application application) override;
+ void SetMaxPlaybackRate(int frequency_hz) override;
+ bool EnableAudioNetworkAdaptor(const std::string& config_string,
+ RtcEventLog* event_log) override;
+ void DisableAudioNetworkAdaptor() override;
+ void OnReceivedUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) override;
+ void OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) override;
+ void OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) override;
+ void OnReceivedUplinkAllocation(BitrateAllocationUpdate update) override;
+ void OnReceivedRtt(int rtt_ms) override;
+ void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
+ void SetReceiverFrameLengthRange(int min_frame_length_ms,
+ int max_frame_length_ms) override;
+ ANAStats GetANAStats() const override;
+ absl::optional<std::pair<TimeDelta, TimeDelta> > GetFrameLengthRange()
+ const override;
+ rtc::ArrayView<const int> supported_frame_lengths_ms() const {
+ return config_.supported_frame_lengths_ms;
+ }
+
+ // Getters for testing.
+ float packet_loss_rate() const { return packet_loss_rate_; }
+ AudioEncoderOpusConfig::ApplicationMode application() const {
+ return config_.application;
+ }
+ bool fec_enabled() const { return config_.fec_enabled; }
+ size_t num_channels_to_encode() const { return num_channels_to_encode_; }
+ int next_frame_length_ms() const { return next_frame_length_ms_; }
+
+ protected:
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+
+ private:
+ class PacketLossFractionSmoother;
+
+ static absl::optional<AudioEncoderOpusConfig> SdpToConfig(
+ const SdpAudioFormat& format);
+ static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
+ static AudioCodecInfo QueryAudioEncoder(const AudioEncoderOpusConfig& config);
+ static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
+ const AudioEncoderOpusConfig&,
+ int payload_type);
+
+ size_t Num10msFramesPerPacket() const;
+ size_t SamplesPer10msFrame() const;
+ size_t SufficientOutputBufferSize() const;
+ bool RecreateEncoderInstance(const AudioEncoderOpusConfig& config);
+ void SetFrameLength(int frame_length_ms);
+ void SetNumChannelsToEncode(size_t num_channels_to_encode);
+ void SetProjectedPacketLossRate(float fraction);
+
+ void OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms,
+ absl::optional<int64_t> link_capacity_allocation);
+
+ // TODO(minyue): remove "override" when we can deprecate
+ // `AudioEncoder::SetTargetBitrate`.
+ void SetTargetBitrate(int target_bps) override;
+
+ void ApplyAudioNetworkAdaptor();
+ std::unique_ptr<AudioNetworkAdaptor> DefaultAudioNetworkAdaptorCreator(
+ absl::string_view config_string,
+ RtcEventLog* event_log) const;
+
+ void MaybeUpdateUplinkBandwidth();
+
+ AudioEncoderOpusConfig config_;
+ const int payload_type_;
+ const bool send_side_bwe_with_overhead_;
+ const bool use_stable_target_for_adaptation_;
+ const bool adjust_bandwidth_;
+ bool bitrate_changed_;
+ // A multiplier for bitrates at 5 kbps and higher. The target bitrate
+ // will be multiplied by these multipliers, each multiplier is applied to a
+ // 1 kbps range.
+ std::vector<float> bitrate_multipliers_;
+ float packet_loss_rate_;
+ std::vector<int16_t> input_buffer_;
+ OpusEncInst* inst_;
+ uint32_t first_timestamp_in_buffer_;
+ size_t num_channels_to_encode_;
+ int next_frame_length_ms_;
+ int complexity_;
+ std::unique_ptr<PacketLossFractionSmoother> packet_loss_fraction_smoother_;
+ const AudioNetworkAdaptorCreator audio_network_adaptor_creator_;
+ std::unique_ptr<AudioNetworkAdaptor> audio_network_adaptor_;
+ absl::optional<size_t> overhead_bytes_per_packet_;
+ const std::unique_ptr<SmoothingFilter> bitrate_smoother_;
+ absl::optional<int64_t> bitrate_smoother_last_update_time_;
+ int consecutive_dtx_frames_;
+
+ friend struct AudioEncoderOpus;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
new file mode 100644
index 0000000000..43e8a7a80f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+
+#include <array>
+#include <memory>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "common_audio/mocks/mock_smoothing_filter.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h"
+#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fake_clock.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+using ::testing::NiceMock;
+using ::testing::Return;
+
+namespace {
+
+constexpr int kDefaultOpusPayloadType = 105;
+constexpr int kDefaultOpusRate = 32000;
+constexpr int kDefaultOpusPacSize = 960;
+constexpr int64_t kInitialTimeUs = 12345678;
+
+AudioEncoderOpusConfig CreateConfigWithParameters(
+ const SdpAudioFormat::Parameters& params) {
+ const SdpAudioFormat format("opus", 48000, 2, params);
+ return *AudioEncoderOpus::SdpToConfig(format);
+}
+
+struct AudioEncoderOpusStates {
+ MockAudioNetworkAdaptor* mock_audio_network_adaptor;
+ MockSmoothingFilter* mock_bitrate_smoother;
+ std::unique_ptr<AudioEncoderOpusImpl> encoder;
+ std::unique_ptr<rtc::ScopedFakeClock> fake_clock;
+ AudioEncoderOpusConfig config;
+};
+
+std::unique_ptr<AudioEncoderOpusStates> CreateCodec(int sample_rate_hz,
+ size_t num_channels) {
+ std::unique_ptr<AudioEncoderOpusStates> states =
+ std::make_unique<AudioEncoderOpusStates>();
+ states->mock_audio_network_adaptor = nullptr;
+ states->fake_clock.reset(new rtc::ScopedFakeClock());
+ states->fake_clock->SetTime(Timestamp::Micros(kInitialTimeUs));
+
+ MockAudioNetworkAdaptor** mock_ptr = &states->mock_audio_network_adaptor;
+ AudioEncoderOpusImpl::AudioNetworkAdaptorCreator creator =
+ [mock_ptr](absl::string_view, RtcEventLog* event_log) {
+ std::unique_ptr<MockAudioNetworkAdaptor> adaptor(
+ new NiceMock<MockAudioNetworkAdaptor>());
+ EXPECT_CALL(*adaptor, Die());
+ *mock_ptr = adaptor.get();
+ return adaptor;
+ };
+
+ AudioEncoderOpusConfig config;
+ config.frame_size_ms = rtc::CheckedDivExact(kDefaultOpusPacSize, 48);
+ config.sample_rate_hz = sample_rate_hz;
+ config.num_channels = num_channels;
+ config.bitrate_bps = kDefaultOpusRate;
+ config.application = num_channels == 1
+ ? AudioEncoderOpusConfig::ApplicationMode::kVoip
+ : AudioEncoderOpusConfig::ApplicationMode::kAudio;
+ config.supported_frame_lengths_ms.push_back(config.frame_size_ms);
+ states->config = config;
+
+ std::unique_ptr<MockSmoothingFilter> bitrate_smoother(
+ new MockSmoothingFilter());
+ states->mock_bitrate_smoother = bitrate_smoother.get();
+
+ states->encoder.reset(
+ new AudioEncoderOpusImpl(states->config, kDefaultOpusPayloadType, creator,
+ std::move(bitrate_smoother)));
+ return states;
+}
+
+AudioEncoderRuntimeConfig CreateEncoderRuntimeConfig() {
+ constexpr int kBitrate = 40000;
+ constexpr int kFrameLength = 60;
+ constexpr bool kEnableDtx = false;
+ constexpr size_t kNumChannels = 1;
+ AudioEncoderRuntimeConfig config;
+ config.bitrate_bps = kBitrate;
+ config.frame_length_ms = kFrameLength;
+ config.enable_dtx = kEnableDtx;
+ config.num_channels = kNumChannels;
+ return config;
+}
+
+void CheckEncoderRuntimeConfig(const AudioEncoderOpusImpl* encoder,
+ const AudioEncoderRuntimeConfig& config) {
+ EXPECT_EQ(*config.bitrate_bps, encoder->GetTargetBitrate());
+ EXPECT_EQ(*config.frame_length_ms, encoder->next_frame_length_ms());
+ EXPECT_EQ(*config.enable_dtx, encoder->GetDtx());
+ EXPECT_EQ(*config.num_channels, encoder->num_channels_to_encode());
+}
+
+// Create 10ms audio data blocks for a total packet size of "packet_size_ms".
+std::unique_ptr<test::AudioLoop> Create10msAudioBlocks(
+ const std::unique_ptr<AudioEncoderOpusImpl>& encoder,
+ int packet_size_ms) {
+ const std::string file_name =
+ test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+
+ std::unique_ptr<test::AudioLoop> speech_data(new test::AudioLoop());
+ int audio_samples_per_ms =
+ rtc::CheckedDivExact(encoder->SampleRateHz(), 1000);
+ if (!speech_data->Init(
+ file_name,
+ packet_size_ms * audio_samples_per_ms *
+ encoder->num_channels_to_encode(),
+ 10 * audio_samples_per_ms * encoder->num_channels_to_encode()))
+ return nullptr;
+ return speech_data;
+}
+
+} // namespace
+
+class AudioEncoderOpusTest : public ::testing::TestWithParam<int> {
+ protected:
+ int sample_rate_hz_{GetParam()};
+};
+INSTANTIATE_TEST_SUITE_P(Param,
+ AudioEncoderOpusTest,
+ ::testing::Values(16000, 48000));
+
+TEST_P(AudioEncoderOpusTest, DefaultApplicationModeMono) {
+ auto states = CreateCodec(sample_rate_hz_, 1);
+ EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+ states->encoder->application());
+}
+
+TEST_P(AudioEncoderOpusTest, DefaultApplicationModeStereo) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kAudio,
+ states->encoder->application());
+}
+
+TEST_P(AudioEncoderOpusTest, ChangeApplicationMode) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ EXPECT_TRUE(
+ states->encoder->SetApplication(AudioEncoder::Application::kSpeech));
+ EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+ states->encoder->application());
+}
+
+TEST_P(AudioEncoderOpusTest, ResetWontChangeApplicationMode) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+
+ // Trigger a reset.
+ states->encoder->Reset();
+ // Verify that the mode is still kAudio.
+ EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kAudio,
+ states->encoder->application());
+
+ // Now change to kVoip.
+ EXPECT_TRUE(
+ states->encoder->SetApplication(AudioEncoder::Application::kSpeech));
+ EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+ states->encoder->application());
+
+ // Trigger a reset again.
+ states->encoder->Reset();
+ // Verify that the mode is still kVoip.
+ EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+ states->encoder->application());
+}
+
+TEST_P(AudioEncoderOpusTest, ToggleDtx) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ // Enable DTX
+ EXPECT_TRUE(states->encoder->SetDtx(true));
+ EXPECT_TRUE(states->encoder->GetDtx());
+ // Turn off DTX.
+ EXPECT_TRUE(states->encoder->SetDtx(false));
+ EXPECT_FALSE(states->encoder->GetDtx());
+}
+
+TEST_P(AudioEncoderOpusTest,
+ OnReceivedUplinkBandwidthWithoutAudioNetworkAdaptor) {
+ auto states = CreateCodec(sample_rate_hz_, 1);
+ // Constants are replicated from audio_states->encoderopus.cc.
+ const int kMinBitrateBps = 6000;
+ const int kMaxBitrateBps = 510000;
+ const int kOverheadBytesPerPacket = 64;
+ states->encoder->OnReceivedOverhead(kOverheadBytesPerPacket);
+ const int kOverheadBps = 8 * kOverheadBytesPerPacket *
+ rtc::CheckedDivExact(48000, kDefaultOpusPacSize);
+ // Set a too low bitrate.
+ states->encoder->OnReceivedUplinkBandwidth(kMinBitrateBps + kOverheadBps - 1,
+ absl::nullopt);
+ EXPECT_EQ(kMinBitrateBps, states->encoder->GetTargetBitrate());
+ // Set a too high bitrate.
+ states->encoder->OnReceivedUplinkBandwidth(kMaxBitrateBps + kOverheadBps + 1,
+ absl::nullopt);
+ EXPECT_EQ(kMaxBitrateBps, states->encoder->GetTargetBitrate());
+ // Set the minimum rate.
+ states->encoder->OnReceivedUplinkBandwidth(kMinBitrateBps + kOverheadBps,
+ absl::nullopt);
+ EXPECT_EQ(kMinBitrateBps, states->encoder->GetTargetBitrate());
+ // Set the maximum rate.
+ states->encoder->OnReceivedUplinkBandwidth(kMaxBitrateBps + kOverheadBps,
+ absl::nullopt);
+ EXPECT_EQ(kMaxBitrateBps, states->encoder->GetTargetBitrate());
+ // Set rates from kMaxBitrateBps up to 32000 bps.
+ for (int rate = kMinBitrateBps + kOverheadBps; rate <= 32000 + kOverheadBps;
+ rate += 1000) {
+ states->encoder->OnReceivedUplinkBandwidth(rate, absl::nullopt);
+ EXPECT_EQ(rate - kOverheadBps, states->encoder->GetTargetBitrate());
+ }
+}
+
+TEST_P(AudioEncoderOpusTest, SetReceiverFrameLengthRange) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ // Before calling to `SetReceiverFrameLengthRange`,
+ // `supported_frame_lengths_ms` should contain only the frame length being
+ // used.
+ using ::testing::ElementsAre;
+ EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
+ ElementsAre(states->encoder->next_frame_length_ms()));
+ states->encoder->SetReceiverFrameLengthRange(0, 12345);
+ states->encoder->SetReceiverFrameLengthRange(21, 60);
+ EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
+ ElementsAre(40, 60));
+ states->encoder->SetReceiverFrameLengthRange(20, 59);
+ EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
+ ElementsAre(20, 40));
+}
+
+TEST_P(AudioEncoderOpusTest,
+ InvokeAudioNetworkAdaptorOnReceivedUplinkPacketLossFraction) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+ auto config = CreateEncoderRuntimeConfig();
+ EXPECT_CALL(*states->mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+ .WillOnce(Return(config));
+
+ // Since using mock audio network adaptor, any packet loss fraction is fine.
+ constexpr float kUplinkPacketLoss = 0.1f;
+ EXPECT_CALL(*states->mock_audio_network_adaptor,
+ SetUplinkPacketLossFraction(kUplinkPacketLoss));
+ states->encoder->OnReceivedUplinkPacketLossFraction(kUplinkPacketLoss);
+
+ CheckEncoderRuntimeConfig(states->encoder.get(), config);
+}
+
+TEST_P(AudioEncoderOpusTest,
+ InvokeAudioNetworkAdaptorOnReceivedUplinkBandwidth) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Audio-StableTargetAdaptation/Disabled/");
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+ auto config = CreateEncoderRuntimeConfig();
+ EXPECT_CALL(*states->mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+ .WillOnce(Return(config));
+
+ // Since using mock audio network adaptor, any target audio bitrate is fine.
+ constexpr int kTargetAudioBitrate = 30000;
+ constexpr int64_t kProbingIntervalMs = 3000;
+ EXPECT_CALL(*states->mock_audio_network_adaptor,
+ SetTargetAudioBitrate(kTargetAudioBitrate));
+ EXPECT_CALL(*states->mock_bitrate_smoother,
+ SetTimeConstantMs(kProbingIntervalMs * 4));
+ EXPECT_CALL(*states->mock_bitrate_smoother, AddSample(kTargetAudioBitrate));
+ states->encoder->OnReceivedUplinkBandwidth(kTargetAudioBitrate,
+ kProbingIntervalMs);
+
+ CheckEncoderRuntimeConfig(states->encoder.get(), config);
+}
+
+TEST_P(AudioEncoderOpusTest,
+ InvokeAudioNetworkAdaptorOnReceivedUplinkAllocation) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+ auto config = CreateEncoderRuntimeConfig();
+ EXPECT_CALL(*states->mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+ .WillOnce(Return(config));
+
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::BitsPerSec(30000);
+ update.stable_target_bitrate = DataRate::BitsPerSec(20000);
+ update.bwe_period = TimeDelta::Millis(200);
+ EXPECT_CALL(*states->mock_audio_network_adaptor,
+ SetTargetAudioBitrate(update.target_bitrate.bps()));
+ EXPECT_CALL(*states->mock_audio_network_adaptor,
+ SetUplinkBandwidth(update.stable_target_bitrate.bps()));
+ states->encoder->OnReceivedUplinkAllocation(update);
+
+ CheckEncoderRuntimeConfig(states->encoder.get(), config);
+}
+
+TEST_P(AudioEncoderOpusTest, InvokeAudioNetworkAdaptorOnReceivedRtt) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+ auto config = CreateEncoderRuntimeConfig();
+ EXPECT_CALL(*states->mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+ .WillOnce(Return(config));
+
+ // Since using mock audio network adaptor, any rtt is fine.
+ constexpr int kRtt = 30;
+ EXPECT_CALL(*states->mock_audio_network_adaptor, SetRtt(kRtt));
+ states->encoder->OnReceivedRtt(kRtt);
+
+ CheckEncoderRuntimeConfig(states->encoder.get(), config);
+}
+
+TEST_P(AudioEncoderOpusTest, InvokeAudioNetworkAdaptorOnReceivedOverhead) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+ auto config = CreateEncoderRuntimeConfig();
+ EXPECT_CALL(*states->mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+ .WillOnce(Return(config));
+
+ // Since using mock audio network adaptor, any overhead is fine.
+ constexpr size_t kOverhead = 64;
+ EXPECT_CALL(*states->mock_audio_network_adaptor, SetOverhead(kOverhead));
+ states->encoder->OnReceivedOverhead(kOverhead);
+
+ CheckEncoderRuntimeConfig(states->encoder.get(), config);
+}
+
+TEST_P(AudioEncoderOpusTest,
+ PacketLossFractionSmoothedOnSetUplinkPacketLossFraction) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+
+ // The values are carefully chosen so that if no smoothing is made, the test
+ // will fail.
+ constexpr float kPacketLossFraction_1 = 0.02f;
+ constexpr float kPacketLossFraction_2 = 0.198f;
+ // `kSecondSampleTimeMs` is chosen to ease the calculation since
+ // 0.9999 ^ 6931 = 0.5.
+ constexpr int64_t kSecondSampleTimeMs = 6931;
+
+ // First time, no filtering.
+ states->encoder->OnReceivedUplinkPacketLossFraction(kPacketLossFraction_1);
+ EXPECT_FLOAT_EQ(0.02f, states->encoder->packet_loss_rate());
+
+ states->fake_clock->AdvanceTime(TimeDelta::Millis(kSecondSampleTimeMs));
+ states->encoder->OnReceivedUplinkPacketLossFraction(kPacketLossFraction_2);
+
+ // Now the output of packet loss fraction smoother should be
+ // (0.02 + 0.198) / 2 = 0.109.
+ EXPECT_NEAR(0.109f, states->encoder->packet_loss_rate(), 0.001);
+}
+
+TEST_P(AudioEncoderOpusTest, PacketLossRateUpperBounded) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+
+ states->encoder->OnReceivedUplinkPacketLossFraction(0.5);
+ EXPECT_FLOAT_EQ(0.2f, states->encoder->packet_loss_rate());
+}
+
+TEST_P(AudioEncoderOpusTest, DoNotInvokeSetTargetBitrateIfOverheadUnknown) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+
+ auto states = CreateCodec(sample_rate_hz_, 2);
+
+ states->encoder->OnReceivedUplinkBandwidth(kDefaultOpusRate * 2,
+ absl::nullopt);
+
+ // Since `OnReceivedOverhead` has not been called, the codec bitrate should
+ // not change.
+ EXPECT_EQ(kDefaultOpusRate, states->encoder->GetTargetBitrate());
+}
+
+// Verifies that the complexity adaptation in the config works as intended.
+TEST(AudioEncoderOpusTest, ConfigComplexityAdaptation) {
+ AudioEncoderOpusConfig config;
+ config.low_rate_complexity = 8;
+ config.complexity = 6;
+
+ // Bitrate within hysteresis window. Expect empty output.
+ config.bitrate_bps = 12500;
+ EXPECT_EQ(absl::nullopt, AudioEncoderOpusImpl::GetNewComplexity(config));
+
+ // Bitrate below hysteresis window. Expect higher complexity.
+ config.bitrate_bps = 10999;
+ EXPECT_EQ(8, AudioEncoderOpusImpl::GetNewComplexity(config));
+
+ // Bitrate within hysteresis window. Expect empty output.
+ config.bitrate_bps = 12500;
+ EXPECT_EQ(absl::nullopt, AudioEncoderOpusImpl::GetNewComplexity(config));
+
+ // Bitrate above hysteresis window. Expect lower complexity.
+ config.bitrate_bps = 14001;
+ EXPECT_EQ(6, AudioEncoderOpusImpl::GetNewComplexity(config));
+}
+
+// Verifies that the bandwidth adaptation in the config works as intended.
+TEST_P(AudioEncoderOpusTest, ConfigBandwidthAdaptation) {
+ AudioEncoderOpusConfig config;
+ const size_t opus_rate_khz = rtc::CheckedDivExact(sample_rate_hz_, 1000);
+ const std::vector<int16_t> silence(
+ opus_rate_khz * config.frame_size_ms * config.num_channels, 0);
+ constexpr size_t kMaxBytes = 1000;
+ uint8_t bitstream[kMaxBytes];
+
+ OpusEncInst* inst;
+ EXPECT_EQ(0, WebRtcOpus_EncoderCreate(
+ &inst, config.num_channels,
+ config.application ==
+ AudioEncoderOpusConfig::ApplicationMode::kVoip
+ ? 0
+ : 1,
+ sample_rate_hz_));
+
+ // Bitrate below minmum wideband. Expect narrowband.
+ config.bitrate_bps = absl::optional<int>(7999);
+ auto bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+ EXPECT_EQ(absl::optional<int>(OPUS_BANDWIDTH_NARROWBAND), bandwidth);
+ WebRtcOpus_SetBandwidth(inst, *bandwidth);
+ // It is necessary to encode here because Opus has some logic in the encoder
+ // that goes from the user-set bandwidth to the used and returned one.
+ WebRtcOpus_Encode(inst, silence.data(),
+ rtc::CheckedDivExact(silence.size(), config.num_channels),
+ kMaxBytes, bitstream);
+
+ // Bitrate not yet above maximum narrowband. Expect empty.
+ config.bitrate_bps = absl::optional<int>(9000);
+ bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+ EXPECT_EQ(absl::optional<int>(), bandwidth);
+
+ // Bitrate above maximum narrowband. Expect wideband.
+ config.bitrate_bps = absl::optional<int>(9001);
+ bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+ EXPECT_EQ(absl::optional<int>(OPUS_BANDWIDTH_WIDEBAND), bandwidth);
+ WebRtcOpus_SetBandwidth(inst, *bandwidth);
+ // It is necessary to encode here because Opus has some logic in the encoder
+ // that goes from the user-set bandwidth to the used and returned one.
+ WebRtcOpus_Encode(inst, silence.data(),
+ rtc::CheckedDivExact(silence.size(), config.num_channels),
+ kMaxBytes, bitstream);
+
+ // Bitrate not yet below minimum wideband. Expect empty.
+ config.bitrate_bps = absl::optional<int>(8000);
+ bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+ EXPECT_EQ(absl::optional<int>(), bandwidth);
+
+ // Bitrate above automatic threshold. Expect automatic.
+ config.bitrate_bps = absl::optional<int>(12001);
+ bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+ EXPECT_EQ(absl::optional<int>(OPUS_AUTO), bandwidth);
+
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(inst));
+}
+
+TEST_P(AudioEncoderOpusTest, EmptyConfigDoesNotAffectEncoderSettings) {
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+ auto config = CreateEncoderRuntimeConfig();
+ AudioEncoderRuntimeConfig empty_config;
+
+ EXPECT_CALL(*states->mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+ .WillOnce(Return(config))
+ .WillOnce(Return(empty_config));
+
+ constexpr size_t kOverhead = 64;
+ EXPECT_CALL(*states->mock_audio_network_adaptor, SetOverhead(kOverhead))
+ .Times(2);
+ states->encoder->OnReceivedOverhead(kOverhead);
+ states->encoder->OnReceivedOverhead(kOverhead);
+
+ CheckEncoderRuntimeConfig(states->encoder.get(), config);
+}
+
+TEST_P(AudioEncoderOpusTest, UpdateUplinkBandwidthInAudioNetworkAdaptor) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Audio-StableTargetAdaptation/Disabled/");
+ auto states = CreateCodec(sample_rate_hz_, 2);
+ states->encoder->EnableAudioNetworkAdaptor("", nullptr);
+ const size_t opus_rate_khz = rtc::CheckedDivExact(sample_rate_hz_, 1000);
+ const std::vector<int16_t> audio(opus_rate_khz * 10 * 2, 0);
+ rtc::Buffer encoded;
+ EXPECT_CALL(*states->mock_bitrate_smoother, GetAverage())
+ .WillOnce(Return(50000));
+ EXPECT_CALL(*states->mock_audio_network_adaptor, SetUplinkBandwidth(50000));
+ states->encoder->Encode(
+ 0, rtc::ArrayView<const int16_t>(audio.data(), audio.size()), &encoded);
+
+ // Repeat update uplink bandwidth tests.
+ for (int i = 0; i < 5; i++) {
+ // Don't update till it is time to update again.
+ states->fake_clock->AdvanceTime(TimeDelta::Millis(
+ states->config.uplink_bandwidth_update_interval_ms - 1));
+ states->encoder->Encode(
+ 0, rtc::ArrayView<const int16_t>(audio.data(), audio.size()), &encoded);
+
+ // Update when it is time to update.
+ EXPECT_CALL(*states->mock_bitrate_smoother, GetAverage())
+ .WillOnce(Return(40000));
+ EXPECT_CALL(*states->mock_audio_network_adaptor, SetUplinkBandwidth(40000));
+ states->fake_clock->AdvanceTime(TimeDelta::Millis(1));
+ states->encoder->Encode(
+ 0, rtc::ArrayView<const int16_t>(audio.data(), audio.size()), &encoded);
+ }
+}
+
+TEST_P(AudioEncoderOpusTest, EncodeAtMinBitrate) {
+ auto states = CreateCodec(sample_rate_hz_, 1);
+ constexpr int kNumPacketsToEncode = 2;
+ auto audio_frames =
+ Create10msAudioBlocks(states->encoder, kNumPacketsToEncode * 20);
+ ASSERT_TRUE(audio_frames) << "Create10msAudioBlocks failed";
+ rtc::Buffer encoded;
+ uint32_t rtp_timestamp = 12345; // Just a number not important to this test.
+
+ states->encoder->OnReceivedUplinkBandwidth(0, absl::nullopt);
+ for (int packet_index = 0; packet_index < kNumPacketsToEncode;
+ packet_index++) {
+ // Make sure we are not encoding before we have enough data for
+ // a 20ms packet.
+ for (int index = 0; index < 1; index++) {
+ states->encoder->Encode(rtp_timestamp, audio_frames->GetNextBlock(),
+ &encoded);
+ EXPECT_EQ(0u, encoded.size());
+ }
+
+ // Should encode now.
+ states->encoder->Encode(rtp_timestamp, audio_frames->GetNextBlock(),
+ &encoded);
+ EXPECT_GT(encoded.size(), 0u);
+ encoded.Clear();
+ }
+}
+
+TEST(AudioEncoderOpusTest, TestConfigDefaults) {
+ const auto config_opt = AudioEncoderOpus::SdpToConfig({"opus", 48000, 2});
+ ASSERT_TRUE(config_opt);
+ EXPECT_EQ(48000, config_opt->max_playback_rate_hz);
+ EXPECT_EQ(1u, config_opt->num_channels);
+ EXPECT_FALSE(config_opt->fec_enabled);
+ EXPECT_FALSE(config_opt->dtx_enabled);
+ EXPECT_EQ(20, config_opt->frame_size_ms);
+}
+
+TEST(AudioEncoderOpusTest, TestConfigFromParams) {
+ const auto config1 = CreateConfigWithParameters({{"stereo", "0"}});
+ EXPECT_EQ(1U, config1.num_channels);
+
+ const auto config2 = CreateConfigWithParameters({{"stereo", "1"}});
+ EXPECT_EQ(2U, config2.num_channels);
+
+ const auto config3 = CreateConfigWithParameters({{"useinbandfec", "0"}});
+ EXPECT_FALSE(config3.fec_enabled);
+
+ const auto config4 = CreateConfigWithParameters({{"useinbandfec", "1"}});
+ EXPECT_TRUE(config4.fec_enabled);
+
+ const auto config5 = CreateConfigWithParameters({{"usedtx", "0"}});
+ EXPECT_FALSE(config5.dtx_enabled);
+
+ const auto config6 = CreateConfigWithParameters({{"usedtx", "1"}});
+ EXPECT_TRUE(config6.dtx_enabled);
+
+ const auto config7 = CreateConfigWithParameters({{"cbr", "0"}});
+ EXPECT_FALSE(config7.cbr_enabled);
+
+ const auto config8 = CreateConfigWithParameters({{"cbr", "1"}});
+ EXPECT_TRUE(config8.cbr_enabled);
+
+ const auto config9 =
+ CreateConfigWithParameters({{"maxplaybackrate", "12345"}});
+ EXPECT_EQ(12345, config9.max_playback_rate_hz);
+
+ const auto config10 =
+ CreateConfigWithParameters({{"maxaveragebitrate", "96000"}});
+ EXPECT_EQ(96000, config10.bitrate_bps);
+
+ const auto config11 = CreateConfigWithParameters({{"maxptime", "40"}});
+ for (int frame_length : config11.supported_frame_lengths_ms) {
+ EXPECT_LE(frame_length, 40);
+ }
+
+ const auto config12 = CreateConfigWithParameters({{"minptime", "40"}});
+ for (int frame_length : config12.supported_frame_lengths_ms) {
+ EXPECT_GE(frame_length, 40);
+ }
+
+ const auto config13 = CreateConfigWithParameters({{"ptime", "40"}});
+ EXPECT_EQ(40, config13.frame_size_ms);
+
+ constexpr int kMinSupportedFrameLength = 10;
+ constexpr int kMaxSupportedFrameLength =
+ WEBRTC_OPUS_SUPPORT_120MS_PTIME ? 120 : 60;
+
+ const auto config14 = CreateConfigWithParameters({{"ptime", "1"}});
+ EXPECT_EQ(kMinSupportedFrameLength, config14.frame_size_ms);
+
+ const auto config15 = CreateConfigWithParameters({{"ptime", "2000"}});
+ EXPECT_EQ(kMaxSupportedFrameLength, config15.frame_size_ms);
+}
+
+TEST(AudioEncoderOpusTest, TestConfigFromInvalidParams) {
+ const webrtc::SdpAudioFormat format("opus", 48000, 2);
+ const auto default_config = *AudioEncoderOpus::SdpToConfig(format);
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+ const std::vector<int> default_supported_frame_lengths_ms({20, 40, 60, 120});
+#else
+ const std::vector<int> default_supported_frame_lengths_ms({20, 40, 60});
+#endif
+
+ AudioEncoderOpusConfig config;
+ config = CreateConfigWithParameters({{"stereo", "invalid"}});
+ EXPECT_EQ(default_config.num_channels, config.num_channels);
+
+ config = CreateConfigWithParameters({{"useinbandfec", "invalid"}});
+ EXPECT_EQ(default_config.fec_enabled, config.fec_enabled);
+
+ config = CreateConfigWithParameters({{"usedtx", "invalid"}});
+ EXPECT_EQ(default_config.dtx_enabled, config.dtx_enabled);
+
+ config = CreateConfigWithParameters({{"cbr", "invalid"}});
+ EXPECT_EQ(default_config.dtx_enabled, config.dtx_enabled);
+
+ config = CreateConfigWithParameters({{"maxplaybackrate", "0"}});
+ EXPECT_EQ(default_config.max_playback_rate_hz, config.max_playback_rate_hz);
+
+ config = CreateConfigWithParameters({{"maxplaybackrate", "-23"}});
+ EXPECT_EQ(default_config.max_playback_rate_hz, config.max_playback_rate_hz);
+
+ config = CreateConfigWithParameters({{"maxplaybackrate", "not a number!"}});
+ EXPECT_EQ(default_config.max_playback_rate_hz, config.max_playback_rate_hz);
+
+ config = CreateConfigWithParameters({{"maxaveragebitrate", "0"}});
+ EXPECT_EQ(6000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters({{"maxaveragebitrate", "-1000"}});
+ EXPECT_EQ(6000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters({{"maxaveragebitrate", "1024000"}});
+ EXPECT_EQ(510000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters({{"maxaveragebitrate", "not a number!"}});
+ EXPECT_EQ(default_config.bitrate_bps, config.bitrate_bps);
+
+ config = CreateConfigWithParameters({{"maxptime", "invalid"}});
+ EXPECT_EQ(default_supported_frame_lengths_ms,
+ config.supported_frame_lengths_ms);
+
+ config = CreateConfigWithParameters({{"minptime", "invalid"}});
+ EXPECT_EQ(default_supported_frame_lengths_ms,
+ config.supported_frame_lengths_ms);
+
+ config = CreateConfigWithParameters({{"ptime", "invalid"}});
+ EXPECT_EQ(default_supported_frame_lengths_ms,
+ config.supported_frame_lengths_ms);
+}
+
+// Test that bitrate will be overridden by the "maxaveragebitrate" parameter.
+// Also test that the "maxaveragebitrate" can't be set to values outside the
+// range of 6000 and 510000
+TEST(AudioEncoderOpusTest, SetSendCodecOpusMaxAverageBitrate) {
+ // Ignore if less than 6000.
+ const auto config1 = AudioEncoderOpus::SdpToConfig(
+ {"opus", 48000, 2, {{"maxaveragebitrate", "5999"}}});
+ EXPECT_EQ(6000, config1->bitrate_bps);
+
+ // Ignore if larger than 510000.
+ const auto config2 = AudioEncoderOpus::SdpToConfig(
+ {"opus", 48000, 2, {{"maxaveragebitrate", "510001"}}});
+ EXPECT_EQ(510000, config2->bitrate_bps);
+
+ const auto config3 = AudioEncoderOpus::SdpToConfig(
+ {"opus", 48000, 2, {{"maxaveragebitrate", "200000"}}});
+ EXPECT_EQ(200000, config3->bitrate_bps);
+}
+
+// Test maxplaybackrate <= 8000 triggers Opus narrow band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateNb) {
+ auto config = CreateConfigWithParameters({{"maxplaybackrate", "8000"}});
+ EXPECT_EQ(8000, config.max_playback_rate_hz);
+ EXPECT_EQ(12000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters(
+ {{"maxplaybackrate", "8000"}, {"stereo", "1"}});
+ EXPECT_EQ(8000, config.max_playback_rate_hz);
+ EXPECT_EQ(24000, config.bitrate_bps);
+}
+
+// Test 8000 < maxplaybackrate <= 12000 triggers Opus medium band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateMb) {
+ auto config = CreateConfigWithParameters({{"maxplaybackrate", "8001"}});
+ EXPECT_EQ(8001, config.max_playback_rate_hz);
+ EXPECT_EQ(20000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters(
+ {{"maxplaybackrate", "8001"}, {"stereo", "1"}});
+ EXPECT_EQ(8001, config.max_playback_rate_hz);
+ EXPECT_EQ(40000, config.bitrate_bps);
+}
+
+// Test 12000 < maxplaybackrate <= 16000 triggers Opus wide band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateWb) {
+ auto config = CreateConfigWithParameters({{"maxplaybackrate", "12001"}});
+ EXPECT_EQ(12001, config.max_playback_rate_hz);
+ EXPECT_EQ(20000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters(
+ {{"maxplaybackrate", "12001"}, {"stereo", "1"}});
+ EXPECT_EQ(12001, config.max_playback_rate_hz);
+ EXPECT_EQ(40000, config.bitrate_bps);
+}
+
+// Test 16000 < maxplaybackrate <= 24000 triggers Opus super wide band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateSwb) {
+ auto config = CreateConfigWithParameters({{"maxplaybackrate", "16001"}});
+ EXPECT_EQ(16001, config.max_playback_rate_hz);
+ EXPECT_EQ(32000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters(
+ {{"maxplaybackrate", "16001"}, {"stereo", "1"}});
+ EXPECT_EQ(16001, config.max_playback_rate_hz);
+ EXPECT_EQ(64000, config.bitrate_bps);
+}
+
+// Test 24000 < maxplaybackrate triggers Opus full band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateFb) {
+ auto config = CreateConfigWithParameters({{"maxplaybackrate", "24001"}});
+ EXPECT_EQ(24001, config.max_playback_rate_hz);
+ EXPECT_EQ(32000, config.bitrate_bps);
+
+ config = CreateConfigWithParameters(
+ {{"maxplaybackrate", "24001"}, {"stereo", "1"}});
+ EXPECT_EQ(24001, config.max_playback_rate_hz);
+ EXPECT_EQ(64000, config.bitrate_bps);
+}
+
+TEST_P(AudioEncoderOpusTest, OpusFlagDtxAsNonSpeech) {
+ // Create encoder with DTX enabled.
+ AudioEncoderOpusConfig config;
+ config.dtx_enabled = true;
+ config.sample_rate_hz = sample_rate_hz_;
+ constexpr int payload_type = 17;
+ const auto encoder = AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+
+ // Open file containing speech and silence.
+ const std::string kInputFileName =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ test::AudioLoop audio_loop;
+ // Use the file as if it were sampled at our desired input rate.
+ const size_t max_loop_length_samples =
+ sample_rate_hz_ * 10; // Max 10 second loop.
+ const size_t input_block_size_samples =
+ 10 * sample_rate_hz_ / 1000; // 10 ms.
+ EXPECT_TRUE(audio_loop.Init(kInputFileName, max_loop_length_samples,
+ input_block_size_samples));
+
+ // Encode.
+ AudioEncoder::EncodedInfo info;
+ rtc::Buffer encoded(500);
+ int nonspeech_frames = 0;
+ int max_nonspeech_frames = 0;
+ int dtx_frames = 0;
+ int max_dtx_frames = 0;
+ uint32_t rtp_timestamp = 0u;
+ for (size_t i = 0; i < 500; ++i) {
+ encoded.Clear();
+
+ // Every second call to the encoder will generate an Opus packet.
+ for (int j = 0; j < 2; j++) {
+ info =
+ encoder->Encode(rtp_timestamp, audio_loop.GetNextBlock(), &encoded);
+ rtp_timestamp += input_block_size_samples;
+ }
+
+ // Bookkeeping of number of DTX frames.
+ if (info.encoded_bytes <= 2) {
+ ++dtx_frames;
+ } else {
+ if (dtx_frames > max_dtx_frames)
+ max_dtx_frames = dtx_frames;
+ dtx_frames = 0;
+ }
+
+ // Bookkeeping of number of non-speech frames.
+ if (info.speech == 0) {
+ ++nonspeech_frames;
+ } else {
+ if (nonspeech_frames > max_nonspeech_frames)
+ max_nonspeech_frames = nonspeech_frames;
+ nonspeech_frames = 0;
+ }
+ }
+
+ // Maximum number of consecutive non-speech packets should exceed 15.
+ EXPECT_GT(max_nonspeech_frames, 15);
+}
+
+TEST(AudioEncoderOpusTest, OpusDtxFilteringHighEnergyRefreshPackets) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Audio-OpusAvoidNoisePumpingDuringDtx/Enabled/");
+ const std::string kInputFileName =
+ webrtc::test::ResourcePath("audio_coding/testfile16kHz", "pcm");
+ constexpr int kSampleRateHz = 16000;
+ AudioEncoderOpusConfig config;
+ config.dtx_enabled = true;
+ config.sample_rate_hz = kSampleRateHz;
+ constexpr int payload_type = 17;
+ const auto encoder = AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+ test::AudioLoop audio_loop;
+ constexpr size_t kMaxLoopLengthSaples = kSampleRateHz * 11.6f;
+ constexpr size_t kInputBlockSizeSamples = kSampleRateHz / 100;
+ EXPECT_TRUE(audio_loop.Init(kInputFileName, kMaxLoopLengthSaples,
+ kInputBlockSizeSamples));
+ AudioEncoder::EncodedInfo info;
+ rtc::Buffer encoded(500);
+ // Encode the audio file and store the last part that corresponds to silence.
+ constexpr size_t kSilenceDurationSamples = kSampleRateHz * 0.2f;
+ std::array<int16_t, kSilenceDurationSamples> silence;
+ uint32_t rtp_timestamp = 0;
+ bool last_packet_dtx_frame = false;
+ bool opus_entered_dtx = false;
+ bool silence_filled = false;
+ size_t timestamp_start_silence = 0;
+ while (!silence_filled && rtp_timestamp < kMaxLoopLengthSaples) {
+ encoded.Clear();
+ // Every second call to the encoder will generate an Opus packet.
+ for (int j = 0; j < 2; j++) {
+ auto next_frame = audio_loop.GetNextBlock();
+ info = encoder->Encode(rtp_timestamp, next_frame, &encoded);
+ if (opus_entered_dtx) {
+ size_t silence_frame_start = rtp_timestamp - timestamp_start_silence;
+ silence_filled = silence_frame_start >= kSilenceDurationSamples;
+ if (!silence_filled) {
+ std::copy(next_frame.begin(), next_frame.end(),
+ silence.begin() + silence_frame_start);
+ }
+ }
+ rtp_timestamp += kInputBlockSizeSamples;
+ }
+ EXPECT_TRUE(info.encoded_bytes > 0 || last_packet_dtx_frame);
+ last_packet_dtx_frame = info.encoded_bytes > 0 ? info.encoded_bytes <= 2
+ : last_packet_dtx_frame;
+ if (info.encoded_bytes <= 2 && !opus_entered_dtx) {
+ timestamp_start_silence = rtp_timestamp;
+ }
+ opus_entered_dtx = info.encoded_bytes <= 2;
+ }
+
+ EXPECT_TRUE(silence_filled);
+ // The copied 200 ms of silence is used for creating 6 bursts that are fed to
+ // the encoder, the first three ones with a larger energy and the last three
+ // with a lower energy. This test verifies that the encoder just sends refresh
+ // DTX packets during the last bursts.
+ int number_non_empty_packets_during_increase = 0;
+ int number_non_empty_packets_during_decrease = 0;
+ for (size_t burst = 0; burst < 6; ++burst) {
+ uint32_t rtp_timestamp_start = rtp_timestamp;
+ const bool increase_noise = burst < 3;
+ const float gain = increase_noise ? 1.4f : 0.0f;
+ while (rtp_timestamp < rtp_timestamp_start + kSilenceDurationSamples) {
+ encoded.Clear();
+ // Every second call to the encoder will generate an Opus packet.
+ for (int j = 0; j < 2; j++) {
+ std::array<int16_t, kInputBlockSizeSamples> silence_frame;
+ size_t silence_frame_start = rtp_timestamp - rtp_timestamp_start;
+ std::transform(
+ silence.begin() + silence_frame_start,
+ silence.begin() + silence_frame_start + kInputBlockSizeSamples,
+ silence_frame.begin(), [gain](float s) { return gain * s; });
+ info = encoder->Encode(rtp_timestamp, silence_frame, &encoded);
+ rtp_timestamp += kInputBlockSizeSamples;
+ }
+ EXPECT_TRUE(info.encoded_bytes > 0 || last_packet_dtx_frame);
+ last_packet_dtx_frame = info.encoded_bytes > 0 ? info.encoded_bytes <= 2
+ : last_packet_dtx_frame;
+ // Tracking the number of non empty packets.
+ if (increase_noise && info.encoded_bytes > 2) {
+ number_non_empty_packets_during_increase++;
+ }
+ if (!increase_noise && info.encoded_bytes > 2) {
+ number_non_empty_packets_during_decrease++;
+ }
+ }
+ }
+ // Check that the refresh DTX packets are just sent during the decrease energy
+ // region.
+ EXPECT_EQ(number_non_empty_packets_during_increase, 0);
+ EXPECT_GT(number_non_empty_packets_during_decrease, 0);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc
new file mode 100644
index 0000000000..38b60c6187
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_decoder_opus.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/window_generator.h"
+#include "modules/audio_coding/codecs/opus/test/lapped_transform.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kNumChannels = 1u;
+constexpr int kSampleRateHz = 48000;
+constexpr size_t kMaxLoopLengthSamples = kSampleRateHz * 50; // 50 seconds.
+constexpr size_t kInputBlockSizeSamples = 10 * kSampleRateHz / 1000; // 10 ms
+constexpr size_t kOutputBlockSizeSamples = 20 * kSampleRateHz / 1000; // 20 ms
+constexpr size_t kFftSize = 1024;
+constexpr size_t kNarrowbandSize = 4000 * kFftSize / kSampleRateHz;
+constexpr float kKbdAlpha = 1.5f;
+
+class PowerRatioEstimator : public LappedTransform::Callback {
+ public:
+ PowerRatioEstimator() : low_pow_(0.f), high_pow_(0.f) {
+ WindowGenerator::KaiserBesselDerived(kKbdAlpha, kFftSize, window_);
+ transform_.reset(new LappedTransform(kNumChannels, 0u,
+ kInputBlockSizeSamples, window_,
+ kFftSize, kFftSize / 2, this));
+ }
+
+ void ProcessBlock(float* data) { transform_->ProcessChunk(&data, nullptr); }
+
+ float PowerRatio() { return high_pow_ / low_pow_; }
+
+ protected:
+ void ProcessAudioBlock(const std::complex<float>* const* input,
+ size_t num_input_channels,
+ size_t num_freq_bins,
+ size_t num_output_channels,
+ std::complex<float>* const* output) override {
+ float low_pow = 0.f;
+ float high_pow = 0.f;
+ for (size_t i = 0u; i < num_input_channels; ++i) {
+ for (size_t j = 0u; j < kNarrowbandSize; ++j) {
+ float low_mag = std::abs(input[i][j]);
+ low_pow += low_mag * low_mag;
+ float high_mag = std::abs(input[i][j + kNarrowbandSize]);
+ high_pow += high_mag * high_mag;
+ }
+ }
+ low_pow_ += low_pow / (num_input_channels * kFftSize);
+ high_pow_ += high_pow / (num_input_channels * kFftSize);
+ }
+
+ private:
+ std::unique_ptr<LappedTransform> transform_;
+ float window_[kFftSize];
+ float low_pow_;
+ float high_pow_;
+};
+
+float EncodedPowerRatio(AudioEncoder* encoder,
+ AudioDecoder* decoder,
+ test::AudioLoop* audio_loop) {
+ // Encode and decode.
+ uint32_t rtp_timestamp = 0u;
+ constexpr size_t kBufferSize = 500;
+ rtc::Buffer encoded(kBufferSize);
+ std::vector<int16_t> decoded(kOutputBlockSizeSamples);
+ std::vector<float> decoded_float(kOutputBlockSizeSamples);
+ AudioDecoder::SpeechType speech_type = AudioDecoder::kSpeech;
+ PowerRatioEstimator power_ratio_estimator;
+ for (size_t i = 0; i < 1000; ++i) {
+ encoded.Clear();
+ AudioEncoder::EncodedInfo encoder_info =
+ encoder->Encode(rtp_timestamp, audio_loop->GetNextBlock(), &encoded);
+ rtp_timestamp += kInputBlockSizeSamples;
+ if (encoded.size() > 0) {
+ int decoder_info = decoder->Decode(
+ encoded.data(), encoded.size(), kSampleRateHz,
+ decoded.size() * sizeof(decoded[0]), decoded.data(), &speech_type);
+ if (decoder_info > 0) {
+ S16ToFloat(decoded.data(), decoded.size(), decoded_float.data());
+ power_ratio_estimator.ProcessBlock(decoded_float.data());
+ }
+ }
+ }
+ return power_ratio_estimator.PowerRatio();
+}
+
+} // namespace
+
+// TODO(ivoc): Remove this test, WebRTC-AdjustOpusBandwidth is obsolete.
+TEST(BandwidthAdaptationTest, BandwidthAdaptationTest) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-AdjustOpusBandwidth/Enabled/");
+
+ constexpr float kMaxNarrowbandRatio = 0.0035f;
+ constexpr float kMinWidebandRatio = 0.01f;
+
+ // Create encoder.
+ AudioEncoderOpusConfig enc_config;
+ enc_config.bitrate_bps = absl::optional<int>(7999);
+ enc_config.num_channels = kNumChannels;
+ constexpr int payload_type = 17;
+ auto encoder = AudioEncoderOpus::MakeAudioEncoder(enc_config, payload_type);
+
+ // Create decoder.
+ AudioDecoderOpus::Config dec_config;
+ dec_config.num_channels = kNumChannels;
+ auto decoder = AudioDecoderOpus::MakeAudioDecoder(dec_config);
+
+ // Open speech file.
+ const std::string kInputFileName =
+ webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
+ test::AudioLoop audio_loop;
+ EXPECT_EQ(kSampleRateHz, encoder->SampleRateHz());
+ ASSERT_TRUE(audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+ kInputBlockSizeSamples));
+
+ EXPECT_LT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+ kMaxNarrowbandRatio);
+
+ encoder->OnReceivedTargetAudioBitrate(9000);
+ EXPECT_LT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+ kMaxNarrowbandRatio);
+
+ encoder->OnReceivedTargetAudioBitrate(9001);
+ EXPECT_GT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+ kMinWidebandRatio);
+
+ encoder->OnReceivedTargetAudioBitrate(8000);
+ EXPECT_GT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+ kMinWidebandRatio);
+
+ encoder->OnReceivedTargetAudioBitrate(12001);
+ EXPECT_GT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+ kMinWidebandRatio);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc
new file mode 100644
index 0000000000..6388f33303
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "rtc_base/time_utils.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+#include "test/testsupport/perf_test.h"
+
+namespace webrtc {
+
+namespace {
+int64_t RunComplexityTest(const AudioEncoderOpusConfig& config) {
+ // Create encoder.
+ constexpr int payload_type = 17;
+ const auto encoder = AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+ // Open speech file.
+ const std::string kInputFileName =
+ webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
+ test::AudioLoop audio_loop;
+ constexpr int kSampleRateHz = 48000;
+ EXPECT_EQ(kSampleRateHz, encoder->SampleRateHz());
+ constexpr size_t kMaxLoopLengthSamples =
+ kSampleRateHz * 10; // 10 second loop.
+ constexpr size_t kInputBlockSizeSamples =
+ 10 * kSampleRateHz / 1000; // 60 ms.
+ EXPECT_TRUE(audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+ kInputBlockSizeSamples));
+ // Encode.
+ const int64_t start_time_ms = rtc::TimeMillis();
+ AudioEncoder::EncodedInfo info;
+ rtc::Buffer encoded(500);
+ uint32_t rtp_timestamp = 0u;
+ for (size_t i = 0; i < 10000; ++i) {
+ encoded.Clear();
+ info = encoder->Encode(rtp_timestamp, audio_loop.GetNextBlock(), &encoded);
+ rtp_timestamp += kInputBlockSizeSamples;
+ }
+ return rtc::TimeMillis() - start_time_ms;
+}
+} // namespace
+
+// This test encodes an audio file using Opus twice with different bitrates
+// (~11 kbps and 15.5 kbps). The runtime for each is measured, and the ratio
+// between the two is calculated and tracked. This test explicitly sets the
+// low_rate_complexity to 9. When running on desktop platforms, this is the same
+// as the regular complexity, and the expectation is that the resulting ratio
+// should be less than 100% (since the encoder runs faster at lower bitrates,
+// given a fixed complexity setting). On the other hand, when running on
+// mobiles, the regular complexity is 5, and we expect the resulting ratio to
+// be higher, since we have explicitly asked for a higher complexity setting at
+// the lower rate.
+TEST(AudioEncoderOpusComplexityAdaptationTest, Adaptation_On) {
+ // Create config.
+ AudioEncoderOpusConfig config;
+ // The limit -- including the hysteresis window -- at which the complexity
+ // shuold be increased.
+ config.bitrate_bps = 11000 - 1;
+ config.low_rate_complexity = 9;
+ int64_t runtime_10999bps = RunComplexityTest(config);
+
+ config.bitrate_bps = 15500;
+ int64_t runtime_15500bps = RunComplexityTest(config);
+
+ test::PrintResult("opus_encoding_complexity_ratio", "", "adaptation_on",
+ 100.0 * runtime_10999bps / runtime_15500bps, "percent",
+ true);
+}
+
+// This test is identical to the one above, but without the complexity
+// adaptation enabled (neither on desktop, nor on mobile). The expectation is
+// that the resulting ratio is less than 100% at all times.
+TEST(AudioEncoderOpusComplexityAdaptationTest, Adaptation_Off) {
+ // Create config.
+ AudioEncoderOpusConfig config;
+ // The limit -- including the hysteresis window -- at which the complexity
+ // shuold be increased (but not in this test since complexity adaptation is
+ // disabled).
+ config.bitrate_bps = 11000 - 1;
+ int64_t runtime_10999bps = RunComplexityTest(config);
+
+ config.bitrate_bps = 15500;
+ int64_t runtime_15500bps = RunComplexityTest(config);
+
+ test::PrintResult("opus_encoding_complexity_ratio", "", "adaptation_off",
+ 100.0 * runtime_10999bps / runtime_15500bps, "percent",
+ true);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
new file mode 100644
index 0000000000..815f26e31c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+using std::get;
+using std::string;
+using std::tuple;
+using ::testing::TestWithParam;
+
+namespace webrtc {
+
+// Define coding parameter as <channels, bit_rate, filename, extension>.
+typedef tuple<size_t, int, string, string> coding_param;
+typedef struct mode mode;
+
+struct mode {
+ bool fec;
+ uint8_t target_packet_loss_rate;
+};
+
+const int kOpusBlockDurationMs = 20;
+const int kOpusSamplingKhz = 48;
+
+class OpusFecTest : public TestWithParam<coding_param> {
+ protected:
+ OpusFecTest();
+
+ void SetUp() override;
+ void TearDown() override;
+
+ virtual void EncodeABlock();
+
+ virtual void DecodeABlock(bool lost_previous, bool lost_current);
+
+ int block_duration_ms_;
+ int sampling_khz_;
+ size_t block_length_sample_;
+
+ size_t channels_;
+ int bit_rate_;
+
+ size_t data_pointer_;
+ size_t loop_length_samples_;
+ size_t max_bytes_;
+ size_t encoded_bytes_;
+
+ WebRtcOpusEncInst* opus_encoder_;
+ WebRtcOpusDecInst* opus_decoder_;
+
+ string in_filename_;
+
+ std::unique_ptr<int16_t[]> in_data_;
+ std::unique_ptr<int16_t[]> out_data_;
+ std::unique_ptr<uint8_t[]> bit_stream_;
+};
+
+void OpusFecTest::SetUp() {
+ channels_ = get<0>(GetParam());
+ bit_rate_ = get<1>(GetParam());
+ printf("Coding %zu channel signal at %d bps.\n", channels_, bit_rate_);
+
+ in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
+
+ FILE* fp = fopen(in_filename_.c_str(), "rb");
+ ASSERT_FALSE(fp == NULL);
+
+ // Obtain file size.
+ fseek(fp, 0, SEEK_END);
+ loop_length_samples_ = ftell(fp) / sizeof(int16_t);
+ rewind(fp);
+
+ // Allocate memory to contain the whole file.
+ in_data_.reset(
+ new int16_t[loop_length_samples_ + block_length_sample_ * channels_]);
+
+ // Copy the file into the buffer.
+ ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
+ loop_length_samples_);
+ fclose(fp);
+
+ // The audio will be used in a looped manner. To ease the acquisition of an
+ // audio frame that crosses the end of the excerpt, we add an extra block
+ // length of samples to the end of the array, starting over again from the
+ // beginning of the array. Audio frames cross the end of the excerpt always
+ // appear as a continuum of memory.
+ memcpy(&in_data_[loop_length_samples_], &in_data_[0],
+ block_length_sample_ * channels_ * sizeof(int16_t));
+
+ // Maximum number of bytes in output bitstream.
+ max_bytes_ = block_length_sample_ * channels_ * sizeof(int16_t);
+
+ out_data_.reset(new int16_t[2 * block_length_sample_ * channels_]);
+ bit_stream_.reset(new uint8_t[max_bytes_]);
+
+ // If channels_ == 1, use Opus VOIP mode, otherwise, audio mode.
+ int app = channels_ == 1 ? 0 : 1;
+
+ // Create encoder memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, app, 48000));
+ EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_, 48000));
+ // Set bitrate.
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_));
+}
+
+void OpusFecTest::TearDown() {
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+OpusFecTest::OpusFecTest()
+ : block_duration_ms_(kOpusBlockDurationMs),
+ sampling_khz_(kOpusSamplingKhz),
+ block_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * sampling_khz_)),
+ data_pointer_(0),
+ max_bytes_(0),
+ encoded_bytes_(0),
+ opus_encoder_(NULL),
+ opus_decoder_(NULL) {}
+
+void OpusFecTest::EncodeABlock() {
+ int value =
+ WebRtcOpus_Encode(opus_encoder_, &in_data_[data_pointer_],
+ block_length_sample_, max_bytes_, &bit_stream_[0]);
+ EXPECT_GT(value, 0);
+
+ encoded_bytes_ = static_cast<size_t>(value);
+}
+
+void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
+ int16_t audio_type;
+ int value_1 = 0, value_2 = 0;
+
+ if (lost_previous) {
+ // Decode previous frame.
+ if (!lost_current &&
+ WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_) == 1) {
+ value_1 =
+ WebRtcOpus_DecodeFec(opus_decoder_, &bit_stream_[0], encoded_bytes_,
+ &out_data_[0], &audio_type);
+ } else {
+ // Call decoder PLC.
+ while (value_1 < static_cast<int>(block_length_sample_)) {
+ int ret = WebRtcOpus_Decode(opus_decoder_, NULL, 0, &out_data_[value_1],
+ &audio_type);
+ EXPECT_EQ(ret, sampling_khz_ * 10); // Should return 10 ms of samples.
+ value_1 += ret;
+ }
+ }
+ EXPECT_EQ(static_cast<int>(block_length_sample_), value_1);
+ }
+
+ if (!lost_current) {
+ // Decode current frame.
+ value_2 = WebRtcOpus_Decode(opus_decoder_, &bit_stream_[0], encoded_bytes_,
+ &out_data_[value_1 * channels_], &audio_type);
+ EXPECT_EQ(static_cast<int>(block_length_sample_), value_2);
+ }
+}
+
+TEST_P(OpusFecTest, RandomPacketLossTest) {
+ const int kDurationMs = 200000;
+ int time_now_ms, fec_frames;
+ int actual_packet_loss_rate;
+ bool lost_current, lost_previous;
+ mode mode_set[3] = {{true, 0}, {false, 0}, {true, 50}};
+
+ lost_current = false;
+ for (int i = 0; i < 3; i++) {
+ if (mode_set[i].fec) {
+ EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(
+ opus_encoder_, mode_set[i].target_packet_loss_rate));
+ printf("FEC is ON, target at packet loss rate %d percent.\n",
+ mode_set[i].target_packet_loss_rate);
+ } else {
+ EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_encoder_));
+ printf("FEC is OFF.\n");
+ }
+ // In this test, we let the target packet loss rate match the actual rate.
+ actual_packet_loss_rate = mode_set[i].target_packet_loss_rate;
+ // Run every mode a certain time.
+ time_now_ms = 0;
+ fec_frames = 0;
+ while (time_now_ms < kDurationMs) {
+ // Encode & decode.
+ EncodeABlock();
+
+ // Check if payload has FEC.
+ int fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
+
+ // If FEC is disabled or the target packet loss rate is set to 0, there
+ // should be no FEC in the bit stream.
+ if (!mode_set[i].fec || mode_set[i].target_packet_loss_rate == 0) {
+ EXPECT_EQ(fec, 0);
+ } else if (fec == 1) {
+ fec_frames++;
+ }
+
+ lost_previous = lost_current;
+ lost_current = rand() < actual_packet_loss_rate * (RAND_MAX / 100);
+ DecodeABlock(lost_previous, lost_current);
+
+ time_now_ms += block_duration_ms_;
+
+ // `data_pointer_` is incremented and wrapped across
+ // `loop_length_samples_`.
+ data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
+ loop_length_samples_;
+ }
+ if (mode_set[i].fec) {
+ printf("%.2f percent frames has FEC.\n",
+ static_cast<float>(fec_frames) * block_duration_ms_ / 2000);
+ }
+ }
+}
+
+const coding_param param_set[] = {
+ std::make_tuple(1,
+ 64000,
+ string("audio_coding/testfile32kHz"),
+ string("pcm")),
+ std::make_tuple(1,
+ 32000,
+ string("audio_coding/testfile32kHz"),
+ string("pcm")),
+ std::make_tuple(2,
+ 64000,
+ string("audio_coding/teststereo32kHz"),
+ string("pcm"))};
+
+// 64 kbps, stereo
+INSTANTIATE_TEST_SUITE_P(AllTest, OpusFecTest, ::testing::ValuesIn(param_set));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_inst.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_inst.h
new file mode 100644
index 0000000000..b3a493b157
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_inst.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
+
+#include <stddef.h>
+
+#include "rtc_base/ignore_wundef.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#if defined(WEBRTC_MOZILLA_BUILD)
+#include "opus.h"
+#include "opus_multistream.h"
+#else
+#include "third_party/opus/src/include/opus.h"
+#include "third_party/opus/src/include/opus_multistream.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+struct WebRtcOpusEncInst {
+ OpusEncoder* encoder;
+ OpusMSEncoder* multistream_encoder;
+ size_t channels;
+ int in_dtx_mode;
+ bool avoid_noise_pumping_during_dtx;
+ int sample_rate_hz;
+ float smooth_energy_non_active_frames;
+};
+
+struct WebRtcOpusDecInst {
+ OpusDecoder* decoder;
+ OpusMSDecoder* multistream_decoder;
+ int prev_decoded_samples;
+ bool plc_use_prev_decoded_samples;
+ size_t channels;
+ int in_dtx_mode;
+ int sample_rate_hz;
+};
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.cc
new file mode 100644
index 0000000000..033791971f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.cc
@@ -0,0 +1,878 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+
+#include <cstdlib>
+
+#include <numeric>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+enum {
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+ /* Maximum supported frame size in WebRTC is 120 ms. */
+ kWebRtcOpusMaxEncodeFrameSizeMs = 120,
+#else
+ /* Maximum supported frame size in WebRTC is 60 ms. */
+ kWebRtcOpusMaxEncodeFrameSizeMs = 60,
+#endif
+
+ /* The format allows up to 120 ms frames. Since we don't control the other
+ * side, we must allow for packets of that size. NetEq is currently limited
+ * to 60 ms on the receive side. */
+ kWebRtcOpusMaxDecodeFrameSizeMs = 120,
+
+ // Duration of audio that each call to packet loss concealment covers.
+ kWebRtcOpusPlcFrameSizeMs = 10,
+};
+
+constexpr char kPlcUsePrevDecodedSamplesFieldTrial[] =
+ "WebRTC-Audio-OpusPlcUsePrevDecodedSamples";
+
+constexpr char kAvoidNoisePumpingDuringDtxFieldTrial[] =
+ "WebRTC-Audio-OpusAvoidNoisePumpingDuringDtx";
+
+static int FrameSizePerChannel(int frame_size_ms, int sample_rate_hz) {
+ RTC_DCHECK_GT(frame_size_ms, 0);
+ RTC_DCHECK_EQ(frame_size_ms % 10, 0);
+ RTC_DCHECK_GT(sample_rate_hz, 0);
+ RTC_DCHECK_EQ(sample_rate_hz % 1000, 0);
+ return frame_size_ms * (sample_rate_hz / 1000);
+}
+
+// Maximum sample count per channel.
+static int MaxFrameSizePerChannel(int sample_rate_hz) {
+ return FrameSizePerChannel(kWebRtcOpusMaxDecodeFrameSizeMs, sample_rate_hz);
+}
+
+// Default sample count per channel.
+static int DefaultFrameSizePerChannel(int sample_rate_hz) {
+ return FrameSizePerChannel(20, sample_rate_hz);
+}
+
+// Returns true if the `encoded` payload corresponds to a refresh DTX packet
+// whose energy is larger than the expected for non activity packets.
+static bool WebRtcOpus_IsHighEnergyRefreshDtxPacket(
+ OpusEncInst* inst,
+ rtc::ArrayView<const int16_t> frame,
+ rtc::ArrayView<const uint8_t> encoded) {
+ if (encoded.size() <= 2) {
+ return false;
+ }
+ int number_frames =
+ frame.size() / DefaultFrameSizePerChannel(inst->sample_rate_hz);
+ if (number_frames > 0 &&
+ WebRtcOpus_PacketHasVoiceActivity(encoded.data(), encoded.size()) == 0) {
+ const float average_frame_energy =
+ std::accumulate(frame.begin(), frame.end(), 0.0f,
+ [](float a, int32_t b) { return a + b * b; }) /
+ number_frames;
+ if (WebRtcOpus_GetInDtx(inst) == 1 &&
+ average_frame_energy >= inst->smooth_energy_non_active_frames * 0.5f) {
+ // This is a refresh DTX packet as the encoder is in DTX and has
+ // produced a payload > 2 bytes. This refresh packet has a higher energy
+ // than the smooth energy of non activity frames (with a 3 dB negative
+ // margin) and, therefore, it is flagged as a high energy refresh DTX
+ // packet.
+ return true;
+ }
+ // The average energy is tracked in a similar way as the modeling of the
+ // comfort noise in the Silk decoder in Opus
+ // (third_party/opus/src/silk/CNG.c).
+ if (average_frame_energy < inst->smooth_energy_non_active_frames * 0.5f) {
+ inst->smooth_energy_non_active_frames = average_frame_energy;
+ } else {
+ inst->smooth_energy_non_active_frames +=
+ (average_frame_energy - inst->smooth_energy_non_active_frames) *
+ 0.25f;
+ }
+ }
+ return false;
+}
+
+int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
+ size_t channels,
+ int32_t application,
+ int sample_rate_hz) {
+ int opus_app;
+ if (!inst)
+ return -1;
+
+ switch (application) {
+ case 0:
+ opus_app = OPUS_APPLICATION_VOIP;
+ break;
+ case 1:
+ opus_app = OPUS_APPLICATION_AUDIO;
+ break;
+ default:
+ return -1;
+ }
+
+ OpusEncInst* state =
+ reinterpret_cast<OpusEncInst*>(calloc(1, sizeof(OpusEncInst)));
+ RTC_DCHECK(state);
+
+ int error;
+ state->encoder = opus_encoder_create(
+ sample_rate_hz, static_cast<int>(channels), opus_app, &error);
+
+ if (error != OPUS_OK || (!state->encoder && !state->multistream_encoder)) {
+ WebRtcOpus_EncoderFree(state);
+ return -1;
+ }
+
+ state->in_dtx_mode = 0;
+ state->channels = channels;
+ state->sample_rate_hz = sample_rate_hz;
+ state->smooth_energy_non_active_frames = 0.0f;
+ state->avoid_noise_pumping_during_dtx =
+ webrtc::field_trial::IsEnabled(kAvoidNoisePumpingDuringDtxFieldTrial);
+
+ *inst = state;
+ return 0;
+}
+
+int16_t WebRtcOpus_MultistreamEncoderCreate(
+ OpusEncInst** inst,
+ size_t channels,
+ int32_t application,
+ size_t streams,
+ size_t coupled_streams,
+ const unsigned char* channel_mapping) {
+ int opus_app;
+ if (!inst)
+ return -1;
+
+ switch (application) {
+ case 0:
+ opus_app = OPUS_APPLICATION_VOIP;
+ break;
+ case 1:
+ opus_app = OPUS_APPLICATION_AUDIO;
+ break;
+ default:
+ return -1;
+ }
+
+ OpusEncInst* state =
+ reinterpret_cast<OpusEncInst*>(calloc(1, sizeof(OpusEncInst)));
+ RTC_DCHECK(state);
+
+ int error;
+ const int sample_rate_hz = 48000;
+ state->multistream_encoder = opus_multistream_encoder_create(
+ sample_rate_hz, channels, streams, coupled_streams, channel_mapping,
+ opus_app, &error);
+
+ if (error != OPUS_OK || (!state->encoder && !state->multistream_encoder)) {
+ WebRtcOpus_EncoderFree(state);
+ return -1;
+ }
+
+ state->in_dtx_mode = 0;
+ state->channels = channels;
+ state->sample_rate_hz = sample_rate_hz;
+ state->smooth_energy_non_active_frames = 0.0f;
+ state->avoid_noise_pumping_during_dtx = false;
+
+ *inst = state;
+ return 0;
+}
+
+int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
+ if (inst) {
+ if (inst->encoder) {
+ opus_encoder_destroy(inst->encoder);
+ } else {
+ opus_multistream_encoder_destroy(inst->multistream_encoder);
+ }
+ free(inst);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int WebRtcOpus_Encode(OpusEncInst* inst,
+ const int16_t* audio_in,
+ size_t samples,
+ size_t length_encoded_buffer,
+ uint8_t* encoded) {
+ int res;
+
+ if (samples > 48 * kWebRtcOpusMaxEncodeFrameSizeMs) {
+ return -1;
+ }
+
+ if (inst->encoder) {
+ res = opus_encode(inst->encoder, (const opus_int16*)audio_in,
+ static_cast<int>(samples), encoded,
+ static_cast<opus_int32>(length_encoded_buffer));
+ } else {
+ res = opus_multistream_encode(
+ inst->multistream_encoder, (const opus_int16*)audio_in,
+ static_cast<int>(samples), encoded,
+ static_cast<opus_int32>(length_encoded_buffer));
+ }
+
+ if (res <= 0) {
+ return -1;
+ }
+
+ if (res <= 2) {
+ // Indicates DTX since the packet has nothing but a header. In principle,
+ // there is no need to send this packet. However, we do transmit the first
+ // occurrence to let the decoder know that the encoder enters DTX mode.
+ if (inst->in_dtx_mode) {
+ return 0;
+ } else {
+ inst->in_dtx_mode = 1;
+ return res;
+ }
+ }
+
+ if (inst->avoid_noise_pumping_during_dtx && WebRtcOpus_GetUseDtx(inst) == 1 &&
+ WebRtcOpus_IsHighEnergyRefreshDtxPacket(
+ inst, rtc::MakeArrayView(audio_in, samples),
+ rtc::MakeArrayView(encoded, res))) {
+ // This packet is a high energy refresh DTX packet. For avoiding an increase
+ // of the energy in the DTX region at the decoder, this packet is
+ // substituted by a TOC byte with one empty frame.
+ // The number of frames described in the TOC byte
+ // (https://tools.ietf.org/html/rfc6716#section-3.1) are overwritten to
+ // always indicate one frame (last two bits equal to 0).
+ encoded[0] = encoded[0] & 0b11111100;
+ inst->in_dtx_mode = 1;
+ // The payload is just the TOC byte and has 1 byte as length.
+ return 1;
+ }
+ inst->in_dtx_mode = 0;
+ return res;
+}
+
+#define ENCODER_CTL(inst, vargs) \
+ (inst->encoder \
+ ? opus_encoder_ctl(inst->encoder, vargs) \
+ : opus_multistream_encoder_ctl(inst->multistream_encoder, vargs))
+
+int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_BITRATE(rate));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_PACKET_LOSS_PERC(loss_rate));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_SetMaxPlaybackRate(OpusEncInst* inst, int32_t frequency_hz) {
+ opus_int32 set_bandwidth;
+
+ if (!inst)
+ return -1;
+
+ if (frequency_hz <= 8000) {
+ set_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
+ } else if (frequency_hz <= 12000) {
+ set_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
+ } else if (frequency_hz <= 16000) {
+ set_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
+ } else if (frequency_hz <= 24000) {
+ set_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
+ } else {
+ set_bandwidth = OPUS_BANDWIDTH_FULLBAND;
+ }
+ return ENCODER_CTL(inst, OPUS_SET_MAX_BANDWIDTH(set_bandwidth));
+}
+
+int16_t WebRtcOpus_GetMaxPlaybackRate(OpusEncInst* const inst,
+ int32_t* result_hz) {
+ if (inst->encoder) {
+ if (opus_encoder_ctl(inst->encoder, OPUS_GET_MAX_BANDWIDTH(result_hz)) ==
+ OPUS_OK) {
+ return 0;
+ }
+ return -1;
+ }
+
+ opus_int32 max_bandwidth;
+ int s;
+ int ret;
+
+ max_bandwidth = 0;
+ ret = OPUS_OK;
+ s = 0;
+ while (ret == OPUS_OK) {
+ OpusEncoder* enc;
+ opus_int32 bandwidth;
+
+ ret = ENCODER_CTL(inst, OPUS_MULTISTREAM_GET_ENCODER_STATE(s, &enc));
+ if (ret == OPUS_BAD_ARG)
+ break;
+ if (ret != OPUS_OK)
+ return -1;
+ if (opus_encoder_ctl(enc, OPUS_GET_MAX_BANDWIDTH(&bandwidth)) != OPUS_OK)
+ return -1;
+
+ if (max_bandwidth != 0 && max_bandwidth != bandwidth)
+ return -1;
+
+ max_bandwidth = bandwidth;
+ s++;
+ }
+ *result_hz = max_bandwidth;
+ return 0;
+}
+
+int16_t WebRtcOpus_EnableFec(OpusEncInst* inst) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_INBAND_FEC(1));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_DisableFec(OpusEncInst* inst) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_INBAND_FEC(0));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_EnableDtx(OpusEncInst* inst) {
+ if (!inst) {
+ return -1;
+ }
+
+ // To prevent Opus from entering CELT-only mode by forcing signal type to
+ // voice to make sure that DTX behaves correctly. Currently, DTX does not
+ // last long during a pure silence, if the signal type is not forced.
+ // TODO(minyue): Remove the signal type forcing when Opus DTX works properly
+ // without it.
+ int ret = ENCODER_CTL(inst, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE));
+ if (ret != OPUS_OK)
+ return ret;
+
+ return ENCODER_CTL(inst, OPUS_SET_DTX(1));
+}
+
+int16_t WebRtcOpus_DisableDtx(OpusEncInst* inst) {
+ if (inst) {
+ int ret = ENCODER_CTL(inst, OPUS_SET_SIGNAL(OPUS_AUTO));
+ if (ret != OPUS_OK)
+ return ret;
+ return ENCODER_CTL(inst, OPUS_SET_DTX(0));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_GetUseDtx(OpusEncInst* inst) {
+ if (inst) {
+ opus_int32 use_dtx;
+ if (ENCODER_CTL(inst, OPUS_GET_DTX(&use_dtx)) == 0) {
+ return use_dtx;
+ }
+ }
+ return -1;
+}
+
+int16_t WebRtcOpus_EnableCbr(OpusEncInst* inst) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_VBR(0));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_DisableCbr(OpusEncInst* inst) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_VBR(1));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_COMPLEXITY(complexity));
+ } else {
+ return -1;
+ }
+}
+
+int32_t WebRtcOpus_GetBandwidth(OpusEncInst* inst) {
+ if (!inst) {
+ return -1;
+ }
+ int32_t bandwidth;
+ if (ENCODER_CTL(inst, OPUS_GET_BANDWIDTH(&bandwidth)) == 0) {
+ return bandwidth;
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_SetBandwidth(OpusEncInst* inst, int32_t bandwidth) {
+ if (inst) {
+ return ENCODER_CTL(inst, OPUS_SET_BANDWIDTH(bandwidth));
+ } else {
+ return -1;
+ }
+}
+
+int16_t WebRtcOpus_SetForceChannels(OpusEncInst* inst, size_t num_channels) {
+ if (!inst)
+ return -1;
+ if (num_channels == 0) {
+ return ENCODER_CTL(inst, OPUS_SET_FORCE_CHANNELS(OPUS_AUTO));
+ } else if (num_channels == 1 || num_channels == 2) {
+ return ENCODER_CTL(inst, OPUS_SET_FORCE_CHANNELS(num_channels));
+ } else {
+ return -1;
+ }
+}
+
+int32_t WebRtcOpus_GetInDtx(OpusEncInst* inst) {
+ if (!inst) {
+ return -1;
+ }
+#ifdef OPUS_GET_IN_DTX
+ int32_t in_dtx;
+ if (ENCODER_CTL(inst, OPUS_GET_IN_DTX(&in_dtx)) == 0) {
+ return in_dtx;
+ }
+#endif
+ return -1;
+}
+
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst,
+ size_t channels,
+ int sample_rate_hz) {
+ int error;
+ OpusDecInst* state;
+
+ if (inst != NULL) {
+ // Create Opus decoder state.
+ state = reinterpret_cast<OpusDecInst*>(calloc(1, sizeof(OpusDecInst)));
+ if (state == NULL) {
+ return -1;
+ }
+
+ state->decoder =
+ opus_decoder_create(sample_rate_hz, static_cast<int>(channels), &error);
+ if (error == OPUS_OK && state->decoder) {
+ // Creation of memory all ok.
+ state->channels = channels;
+ state->sample_rate_hz = sample_rate_hz;
+ state->plc_use_prev_decoded_samples =
+ webrtc::field_trial::IsEnabled(kPlcUsePrevDecodedSamplesFieldTrial);
+ if (state->plc_use_prev_decoded_samples) {
+ state->prev_decoded_samples =
+ DefaultFrameSizePerChannel(state->sample_rate_hz);
+ }
+ state->in_dtx_mode = 0;
+ *inst = state;
+ return 0;
+ }
+
+ // If memory allocation was unsuccessful, free the entire state.
+ if (state->decoder) {
+ opus_decoder_destroy(state->decoder);
+ }
+ free(state);
+ }
+ return -1;
+}
+
+int16_t WebRtcOpus_MultistreamDecoderCreate(
+ OpusDecInst** inst,
+ size_t channels,
+ size_t streams,
+ size_t coupled_streams,
+ const unsigned char* channel_mapping) {
+ int error;
+ OpusDecInst* state;
+
+ if (inst != NULL) {
+ // Create Opus decoder state.
+ state = reinterpret_cast<OpusDecInst*>(calloc(1, sizeof(OpusDecInst)));
+ if (state == NULL) {
+ return -1;
+ }
+
+ // Create new memory, always at 48000 Hz.
+ state->multistream_decoder = opus_multistream_decoder_create(
+ 48000, channels, streams, coupled_streams, channel_mapping, &error);
+
+ if (error == OPUS_OK && state->multistream_decoder) {
+ // Creation of memory all ok.
+ state->channels = channels;
+ state->sample_rate_hz = 48000;
+ state->plc_use_prev_decoded_samples =
+ webrtc::field_trial::IsEnabled(kPlcUsePrevDecodedSamplesFieldTrial);
+ if (state->plc_use_prev_decoded_samples) {
+ state->prev_decoded_samples =
+ DefaultFrameSizePerChannel(state->sample_rate_hz);
+ }
+ state->in_dtx_mode = 0;
+ *inst = state;
+ return 0;
+ }
+
+ // If memory allocation was unsuccessful, free the entire state.
+ opus_multistream_decoder_destroy(state->multistream_decoder);
+ free(state);
+ }
+ return -1;
+}
+
+int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
+ if (inst) {
+ if (inst->decoder) {
+ opus_decoder_destroy(inst->decoder);
+ } else if (inst->multistream_decoder) {
+ opus_multistream_decoder_destroy(inst->multistream_decoder);
+ }
+ free(inst);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
+ return inst->channels;
+}
+
+void WebRtcOpus_DecoderInit(OpusDecInst* inst) {
+ if (inst->decoder) {
+ opus_decoder_ctl(inst->decoder, OPUS_RESET_STATE);
+ } else {
+ opus_multistream_decoder_ctl(inst->multistream_decoder, OPUS_RESET_STATE);
+ }
+ inst->in_dtx_mode = 0;
+}
+
+/* For decoder to determine if it is to output speech or comfort noise. */
+static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
+ // Audio type becomes comfort noise if `encoded_byte` is 1 and keeps
+ // to be so if the following `encoded_byte` are 0 or 1.
+ if (encoded_bytes == 0 && inst->in_dtx_mode) {
+ return 2; // Comfort noise.
+ } else if (encoded_bytes == 1 || encoded_bytes == 2) {
+ // TODO(henrik.lundin): There is a slight risk that a 2-byte payload is in
+ // fact a 1-byte TOC with a 1-byte payload. That will be erroneously
+ // interpreted as comfort noise output, but such a payload is probably
+ // faulty anyway.
+
+ // TODO(webrtc:10218): This is wrong for multistream opus. Then are several
+ // single-stream packets glued together with some packet size bytes in
+ // between. See https://tools.ietf.org/html/rfc6716#appendix-B
+ inst->in_dtx_mode = 1;
+ return 2; // Comfort noise.
+ } else {
+ inst->in_dtx_mode = 0;
+ return 0; // Speech.
+ }
+}
+
+/* `frame_size` is set to maximum Opus frame size in the normal case, and
+ * is set to the number of samples needed for PLC in case of losses.
+ * It is up to the caller to make sure the value is correct. */
+static int DecodeNative(OpusDecInst* inst,
+ const uint8_t* encoded,
+ size_t encoded_bytes,
+ int frame_size,
+ int16_t* decoded,
+ int16_t* audio_type,
+ int decode_fec) {
+ int res = -1;
+ if (inst->decoder) {
+ res = opus_decode(
+ inst->decoder, encoded, static_cast<opus_int32>(encoded_bytes),
+ reinterpret_cast<opus_int16*>(decoded), frame_size, decode_fec);
+ } else {
+ res = opus_multistream_decode(inst->multistream_decoder, encoded,
+ static_cast<opus_int32>(encoded_bytes),
+ reinterpret_cast<opus_int16*>(decoded),
+ frame_size, decode_fec);
+ }
+
+ if (res <= 0)
+ return -1;
+
+ *audio_type = DetermineAudioType(inst, encoded_bytes);
+
+ return res;
+}
+
+static int DecodePlc(OpusDecInst* inst, int16_t* decoded) {
+ int16_t audio_type = 0;
+ int decoded_samples;
+ int plc_samples =
+ FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz);
+
+ if (inst->plc_use_prev_decoded_samples) {
+ /* The number of samples we ask for is `number_of_lost_frames` times
+ * `prev_decoded_samples_`. Limit the number of samples to maximum
+ * `MaxFrameSizePerChannel()`. */
+ plc_samples = inst->prev_decoded_samples;
+ const int max_samples_per_channel =
+ MaxFrameSizePerChannel(inst->sample_rate_hz);
+ plc_samples = plc_samples <= max_samples_per_channel
+ ? plc_samples
+ : max_samples_per_channel;
+ }
+ decoded_samples =
+ DecodeNative(inst, NULL, 0, plc_samples, decoded, &audio_type, 0);
+ if (decoded_samples < 0) {
+ return -1;
+ }
+
+ return decoded_samples;
+}
+
+int WebRtcOpus_Decode(OpusDecInst* inst,
+ const uint8_t* encoded,
+ size_t encoded_bytes,
+ int16_t* decoded,
+ int16_t* audio_type) {
+ int decoded_samples;
+
+ if (encoded_bytes == 0) {
+ *audio_type = DetermineAudioType(inst, encoded_bytes);
+ decoded_samples = DecodePlc(inst, decoded);
+ } else {
+ decoded_samples = DecodeNative(inst, encoded, encoded_bytes,
+ MaxFrameSizePerChannel(inst->sample_rate_hz),
+ decoded, audio_type, 0);
+ }
+ if (decoded_samples < 0) {
+ return -1;
+ }
+
+ if (inst->plc_use_prev_decoded_samples) {
+ /* Update decoded sample memory, to be used by the PLC in case of losses. */
+ inst->prev_decoded_samples = decoded_samples;
+ }
+
+ return decoded_samples;
+}
+
+int WebRtcOpus_DecodeFec(OpusDecInst* inst,
+ const uint8_t* encoded,
+ size_t encoded_bytes,
+ int16_t* decoded,
+ int16_t* audio_type) {
+ int decoded_samples;
+ int fec_samples;
+
+ if (WebRtcOpus_PacketHasFec(encoded, encoded_bytes) != 1) {
+ return 0;
+ }
+
+ fec_samples =
+ opus_packet_get_samples_per_frame(encoded, inst->sample_rate_hz);
+
+ decoded_samples = DecodeNative(inst, encoded, encoded_bytes, fec_samples,
+ decoded, audio_type, 1);
+ if (decoded_samples < 0) {
+ return -1;
+ }
+
+ return decoded_samples;
+}
+
+int WebRtcOpus_DurationEst(OpusDecInst* inst,
+ const uint8_t* payload,
+ size_t payload_length_bytes) {
+ if (payload_length_bytes == 0) {
+ // WebRtcOpus_Decode calls PLC when payload length is zero. So we return
+ // PLC duration correspondingly.
+ return WebRtcOpus_PlcDuration(inst);
+ }
+
+ int frames, samples;
+ frames = opus_packet_get_nb_frames(
+ payload, static_cast<opus_int32>(payload_length_bytes));
+ if (frames < 0) {
+ /* Invalid payload data. */
+ return 0;
+ }
+ samples =
+ frames * opus_packet_get_samples_per_frame(payload, inst->sample_rate_hz);
+ if (samples > 120 * inst->sample_rate_hz / 1000) {
+ // More than 120 ms' worth of samples.
+ return 0;
+ }
+ return samples;
+}
+
+int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
+ if (inst->plc_use_prev_decoded_samples) {
+ /* The number of samples we ask for is `number_of_lost_frames` times
+ * `prev_decoded_samples_`. Limit the number of samples to maximum
+ * `MaxFrameSizePerChannel()`. */
+ const int plc_samples = inst->prev_decoded_samples;
+ const int max_samples_per_channel =
+ MaxFrameSizePerChannel(inst->sample_rate_hz);
+ return plc_samples <= max_samples_per_channel ? plc_samples
+ : max_samples_per_channel;
+ }
+ return FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz);
+}
+
+int WebRtcOpus_FecDurationEst(const uint8_t* payload,
+ size_t payload_length_bytes,
+ int sample_rate_hz) {
+ if (WebRtcOpus_PacketHasFec(payload, payload_length_bytes) != 1) {
+ return 0;
+ }
+ const int samples =
+ opus_packet_get_samples_per_frame(payload, sample_rate_hz);
+ const int samples_per_ms = sample_rate_hz / 1000;
+ if (samples < 10 * samples_per_ms || samples > 120 * samples_per_ms) {
+ /* Invalid payload duration. */
+ return 0;
+ }
+ return samples;
+}
+
+int WebRtcOpus_NumSilkFrames(const uint8_t* payload) {
+ // For computing the payload length in ms, the sample rate is not important
+ // since it cancels out. We use 48 kHz, but any valid sample rate would work.
+ int payload_length_ms =
+ opus_packet_get_samples_per_frame(payload, 48000) / 48;
+ if (payload_length_ms < 10)
+ payload_length_ms = 10;
+
+ int silk_frames;
+ switch (payload_length_ms) {
+ case 10:
+ case 20:
+ silk_frames = 1;
+ break;
+ case 40:
+ silk_frames = 2;
+ break;
+ case 60:
+ silk_frames = 3;
+ break;
+ default:
+ return 0; // It is actually even an invalid packet.
+ }
+ return silk_frames;
+}
+
+// This method is based on Definition of the Opus Audio Codec
+// (https://tools.ietf.org/html/rfc6716). Basically, this method is based on
+// parsing the LP layer of an Opus packet, particularly the LBRR flag.
+int WebRtcOpus_PacketHasFec(const uint8_t* payload,
+ size_t payload_length_bytes) {
+ if (payload == NULL || payload_length_bytes == 0)
+ return 0;
+
+ // In CELT_ONLY mode, packets should not have FEC.
+ if (payload[0] & 0x80)
+ return 0;
+
+ int silk_frames = WebRtcOpus_NumSilkFrames(payload);
+ if (silk_frames == 0)
+ return 0; // Not valid.
+
+ const int channels = opus_packet_get_nb_channels(payload);
+ RTC_DCHECK(channels == 1 || channels == 2);
+
+ // Max number of frames in an Opus packet is 48.
+ opus_int16 frame_sizes[48];
+ const unsigned char* frame_data[48];
+
+ // Parse packet to get the frames. But we only care about the first frame,
+ // since we can only decode the FEC from the first one.
+ if (opus_packet_parse(payload, static_cast<opus_int32>(payload_length_bytes),
+ NULL, frame_data, frame_sizes, NULL) < 0) {
+ return 0;
+ }
+
+ if (frame_sizes[0] < 1) {
+ return 0;
+ }
+
+ // A frame starts with the LP layer. The LP layer begins with two to eight
+ // header bits.These consist of one VAD bit per SILK frame (up to 3),
+ // followed by a single flag indicating the presence of LBRR frames.
+ // For a stereo packet, these first flags correspond to the mid channel, and
+ // a second set of flags is included for the side channel. Because these are
+ // the first symbols decoded by the range coder and because they are coded
+ // as binary values with uniform probability, they can be extracted directly
+ // from the most significant bits of the first byte of compressed data.
+ for (int n = 0; n < channels; n++) {
+ // The LBRR bit for channel 1 is on the (`silk_frames` + 1)-th bit, and
+ // that of channel 2 is on the |(`silk_frames` + 1) * 2 + 1|-th bit.
+ if (frame_data[0][0] & (0x80 >> ((n + 1) * (silk_frames + 1) - 1)))
+ return 1;
+ }
+
+ return 0;
+}
+
+int WebRtcOpus_PacketHasVoiceActivity(const uint8_t* payload,
+ size_t payload_length_bytes) {
+ if (payload == NULL || payload_length_bytes == 0)
+ return 0;
+
+ // In CELT_ONLY mode we can not determine whether there is VAD.
+ if (payload[0] & 0x80)
+ return -1;
+
+ int silk_frames = WebRtcOpus_NumSilkFrames(payload);
+ if (silk_frames == 0)
+ return -1;
+
+ const int channels = opus_packet_get_nb_channels(payload);
+ RTC_DCHECK(channels == 1 || channels == 2);
+
+ // Max number of frames in an Opus packet is 48.
+ opus_int16 frame_sizes[48];
+ const unsigned char* frame_data[48];
+
+ // Parse packet to get the frames.
+ int frames =
+ opus_packet_parse(payload, static_cast<opus_int32>(payload_length_bytes),
+ NULL, frame_data, frame_sizes, NULL);
+ if (frames < 0)
+ return -1;
+
+ // Iterate over all Opus frames which may contain multiple SILK frames.
+ for (int frame = 0; frame < frames; frame++) {
+ if (frame_sizes[frame] < 1) {
+ continue;
+ }
+ if (frame_data[frame][0] >> (8 - silk_frames))
+ return 1;
+ if (channels == 2 &&
+ (frame_data[frame][0] << (silk_frames + 1)) >> (8 - silk_frames))
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.h
new file mode 100644
index 0000000000..89159ce1c0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/codecs/opus/opus_inst.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Opaque wrapper types for the codec state.
+typedef struct WebRtcOpusEncInst OpusEncInst;
+typedef struct WebRtcOpusDecInst OpusDecInst;
+
+/****************************************************************************
+ * WebRtcOpus_EncoderCreate(...)
+ *
+ * This function creates an Opus encoder that encodes mono or stereo.
+ *
+ * Input:
+ * - channels : number of channels; 1 or 2.
+ * - application : 0 - VOIP applications.
+ * Favor speech intelligibility.
+ * 1 - Audio applications.
+ * Favor faithfulness to the original input.
+ * - sample_rate_hz : sample rate of input audio
+ *
+ * Output:
+ * - inst : a pointer to Encoder context that is created
+ * if success.
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
+ size_t channels,
+ int32_t application,
+ int sample_rate_hz);
+
+/****************************************************************************
+ * WebRtcOpus_MultistreamEncoderCreate(...)
+ *
+ * This function creates an Opus encoder with any supported channel count.
+ *
+ * Input:
+ * - channels : number of channels in the input of the encoder.
+ * - application : 0 - VOIP applications.
+ * Favor speech intelligibility.
+ * 1 - Audio applications.
+ * Favor faithfulness to the original input.
+ * - streams : number of streams, as described in RFC 7845.
+ * - coupled_streams : number of coupled streams, as described in
+ * RFC 7845.
+ * - channel_mapping : the channel mapping; pointer to array of
+ * `channel` bytes, as described in RFC 7845.
+ *
+ * Output:
+ * - inst : a pointer to Encoder context that is created
+ * if success.
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_MultistreamEncoderCreate(
+ OpusEncInst** inst,
+ size_t channels,
+ int32_t application,
+ size_t streams,
+ size_t coupled_streams,
+ const unsigned char* channel_mapping);
+
+int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_Encode(...)
+ *
+ * This function encodes audio as a series of Opus frames and inserts
+ * it into a packet. Input buffer can be any length.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - audio_in : Input speech data buffer
+ * - samples : Samples per channel in audio_in
+ * - length_encoded_buffer : Output buffer size
+ *
+ * Output:
+ * - encoded : Output compressed data buffer
+ *
+ * Return value : >=0 - Length (in bytes) of coded data
+ * -1 - Error
+ */
+int WebRtcOpus_Encode(OpusEncInst* inst,
+ const int16_t* audio_in,
+ size_t samples,
+ size_t length_encoded_buffer,
+ uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcOpus_SetBitRate(...)
+ *
+ * This function adjusts the target bitrate of the encoder.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - rate : New target bitrate
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate);
+
+/****************************************************************************
+ * WebRtcOpus_SetPacketLossRate(...)
+ *
+ * This function configures the encoder's expected packet loss percentage.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - loss_rate : loss percentage in the range 0-100, inclusive.
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate);
+
+/****************************************************************************
+ * WebRtcOpus_SetMaxPlaybackRate(...)
+ *
+ * Configures the maximum playback rate for encoding. Due to hardware
+ * limitations, the receiver may render audio up to a playback rate. Opus
+ * encoder can use this information to optimize for network usage and encoding
+ * complexity. This will affect the audio bandwidth in the coded audio. However,
+ * the input/output sample rate is not affected.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - frequency_hz : Maximum playback rate in Hz.
+ * This parameter can take any value. The relation
+ * between the value and the Opus internal mode is
+ * as following:
+ * frequency_hz <= 8000 narrow band
+ * 8000 < frequency_hz <= 12000 medium band
+ * 12000 < frequency_hz <= 16000 wide band
+ * 16000 < frequency_hz <= 24000 super wide band
+ * frequency_hz > 24000 full band
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_SetMaxPlaybackRate(OpusEncInst* inst, int32_t frequency_hz);
+
+/****************************************************************************
+ * WebRtcOpus_GetMaxPlaybackRate(...)
+ *
+ * Queries the maximum playback rate for encoding. If different single-stream
+ * encoders have different maximum playback rates, this function fails.
+ *
+ * Input:
+ * - inst : Encoder context.
+ * Output:
+ * - result_hz : The maximum playback rate in Hz.
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_GetMaxPlaybackRate(OpusEncInst* const inst,
+ int32_t* result_hz);
+
+/* TODO(minyue): Check whether an API to check the FEC and the packet loss rate
+ * is needed. It might not be very useful since there are not many use cases and
+ * the caller can always maintain the states. */
+
+/****************************************************************************
+ * WebRtcOpus_EnableFec()
+ *
+ * This function enables FEC for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_EnableFec(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DisableFec()
+ *
+ * This function disables FEC for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_DisableFec(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_EnableDtx()
+ *
+ * This function enables Opus internal DTX for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_EnableDtx(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DisableDtx()
+ *
+ * This function disables Opus internal DTX for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_DisableDtx(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_GetUseDtx()
+ *
+ * This function gets the DTX configuration used for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Encoder does not use DTX.
+ * 1 - Encoder uses DTX.
+ * -1 - Error.
+ */
+int16_t WebRtcOpus_GetUseDtx(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_EnableCbr()
+ *
+ * This function enables CBR for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_EnableCbr(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DisableCbr()
+ *
+ * This function disables CBR for encoding.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_DisableCbr(OpusEncInst* inst);
+
+/*
+ * WebRtcOpus_SetComplexity(...)
+ *
+ * This function adjusts the computational complexity. The effect is the same as
+ * calling the complexity setting of Opus as an Opus encoder related CTL.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - complexity : New target complexity (0-10, inclusive)
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity);
+
+/*
+ * WebRtcOpus_GetBandwidth(...)
+ *
+ * This function returns the current bandwidth.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : Bandwidth - Success
+ * -1 - Error
+ */
+int32_t WebRtcOpus_GetBandwidth(OpusEncInst* inst);
+
+/*
+ * WebRtcOpus_SetBandwidth(...)
+ *
+ * By default Opus decides which bandwidth to encode the signal in depending on
+ * the the bitrate. This function overrules the previous setting and forces the
+ * encoder to encode in narrowband/wideband/fullband/etc.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - bandwidth : New target bandwidth. Valid values are:
+ * OPUS_BANDWIDTH_NARROWBAND
+ * OPUS_BANDWIDTH_MEDIUMBAND
+ * OPUS_BANDWIDTH_WIDEBAND
+ * OPUS_BANDWIDTH_SUPERWIDEBAND
+ * OPUS_BANDWIDTH_FULLBAND
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_SetBandwidth(OpusEncInst* inst, int32_t bandwidth);
+
+/*
+ * WebRtcOpus_GetInDtx(...)
+ *
+ * Gets the DTX state of the encoder.
+ *
+ * Input:
+ * - inst : Encoder context
+ *
+ * Return value : -1 - Error.
+ * 1 - Last encoded frame was comfort noise update during DTX.
+ * 0 - Last encoded frame was encoded with encoder not in DTX.
+ */
+int32_t WebRtcOpus_GetInDtx(OpusEncInst* inst);
+
+/*
+ * WebRtcOpus_SetForceChannels(...)
+ *
+ * If the encoder is initialized as a stereo encoder, Opus will by default
+ * decide whether to encode in mono or stereo based on the bitrate. This
+ * function overrules the previous setting, and forces the encoder to encode
+ * in auto/mono/stereo.
+ *
+ * If the Encoder is initialized as a mono encoder, and one tries to force
+ * stereo, the function will return an error.
+ *
+ * Input:
+ * - inst : Encoder context
+ * - num_channels : 0 - Not forced
+ * 1 - Mono
+ * 2 - Stereo
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_SetForceChannels(OpusEncInst* inst, size_t num_channels);
+
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst,
+ size_t channels,
+ int sample_rate_hz);
+
+/****************************************************************************
+ * WebRtcOpus_MultistreamDecoderCreate(...)
+ *
+ * This function creates an Opus decoder with any supported channel count.
+ *
+ * Input:
+ * - channels : number of output channels that the decoder
+ * will produce.
+ * - streams : number of encoded streams, as described in
+ * RFC 7845.
+ * - coupled_streams : number of coupled streams, as described in
+ * RFC 7845.
+ * - channel_mapping : the channel mapping; pointer to array of
+ * `channel` bytes, as described in RFC 7845.
+ *
+ * Output:
+ * - inst : a pointer to a Decoder context that is created
+ * if success.
+ *
+ * Return value : 0 - Success
+ * -1 - Error
+ */
+int16_t WebRtcOpus_MultistreamDecoderCreate(
+ OpusDecInst** inst,
+ size_t channels,
+ size_t streams,
+ size_t coupled_streams,
+ const unsigned char* channel_mapping);
+
+int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DecoderChannels(...)
+ *
+ * This function returns the number of channels created for Opus decoder.
+ */
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DecoderInit(...)
+ *
+ * This function resets state of the decoder.
+ *
+ * Input:
+ * - inst : Decoder context
+ */
+void WebRtcOpus_DecoderInit(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_Decode(...)
+ *
+ * This function decodes an Opus packet into one or more audio frames at the
+ * ACM interface's sampling rate (32 kHz).
+ *
+ * Input:
+ * - inst : Decoder context
+ * - encoded : Encoded data
+ * - encoded_bytes : Bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector
+ * - audio_type : 1 normal, 2 CNG (for Opus it should
+ * always return 1 since we're not using Opus's
+ * built-in DTX/CNG scheme)
+ *
+ * Return value : >0 - Samples per channel in decoded vector
+ * -1 - Error
+ */
+int WebRtcOpus_Decode(OpusDecInst* inst,
+ const uint8_t* encoded,
+ size_t encoded_bytes,
+ int16_t* decoded,
+ int16_t* audio_type);
+
+/****************************************************************************
+ * WebRtcOpus_DecodeFec(...)
+ *
+ * This function decodes the FEC data from an Opus packet into one or more audio
+ * frames at the ACM interface's sampling rate (32 kHz).
+ *
+ * Input:
+ * - inst : Decoder context
+ * - encoded : Encoded data
+ * - encoded_bytes : Bytes in encoded vector
+ *
+ * Output:
+ * - decoded : The decoded vector (previous frame)
+ *
+ * Return value : >0 - Samples per channel in decoded vector
+ * 0 - No FEC data in the packet
+ * -1 - Error
+ */
+int WebRtcOpus_DecodeFec(OpusDecInst* inst,
+ const uint8_t* encoded,
+ size_t encoded_bytes,
+ int16_t* decoded,
+ int16_t* audio_type);
+
+/****************************************************************************
+ * WebRtcOpus_DurationEst(...)
+ *
+ * This function calculates the duration of an opus packet.
+ * Input:
+ * - inst : Decoder context
+ * - payload : Encoded data pointer
+ * - payload_length_bytes : Bytes of encoded data
+ *
+ * Return value : The duration of the packet, in samples per
+ * channel.
+ */
+int WebRtcOpus_DurationEst(OpusDecInst* inst,
+ const uint8_t* payload,
+ size_t payload_length_bytes);
+
+/****************************************************************************
+ * WebRtcOpus_PlcDuration(...)
+ *
+ * This function calculates the duration of a frame returned by packet loss
+ * concealment (PLC).
+ *
+ * Input:
+ * - inst : Decoder context
+ *
+ * Return value : The duration of a frame returned by PLC, in
+ * samples per channel.
+ */
+int WebRtcOpus_PlcDuration(OpusDecInst* inst);
+
+/* TODO(minyue): Check whether it is needed to add a decoder context to the
+ * arguments, like WebRtcOpus_DurationEst(...). In fact, the packet itself tells
+ * the duration. The decoder context in WebRtcOpus_DurationEst(...) is not used.
+ * So it may be advisable to remove it from WebRtcOpus_DurationEst(...). */
+
+/****************************************************************************
+ * WebRtcOpus_FecDurationEst(...)
+ *
+ * This function calculates the duration of the FEC data within an opus packet.
+ * Input:
+ * - payload : Encoded data pointer
+ * - payload_length_bytes : Bytes of encoded data
+ * - sample_rate_hz : Sample rate of output audio
+ *
+ * Return value : >0 - The duration of the FEC data in the
+ * packet in samples per channel.
+ * 0 - No FEC data in the packet.
+ */
+int WebRtcOpus_FecDurationEst(const uint8_t* payload,
+ size_t payload_length_bytes,
+ int sample_rate_hz);
+
+/****************************************************************************
+ * WebRtcOpus_PacketHasFec(...)
+ *
+ * This function detects if an opus packet has FEC.
+ * Input:
+ * - payload : Encoded data pointer
+ * - payload_length_bytes : Bytes of encoded data
+ *
+ * Return value : 0 - the packet does NOT contain FEC.
+ * 1 - the packet contains FEC.
+ */
+int WebRtcOpus_PacketHasFec(const uint8_t* payload,
+ size_t payload_length_bytes);
+
+/****************************************************************************
+ * WebRtcOpus_PacketHasVoiceActivity(...)
+ *
+ * This function returns the SILK VAD information encoded in the opus packet.
+ * For CELT-only packets that do not have VAD information, it returns -1.
+ * Input:
+ * - payload : Encoded data pointer
+ * - payload_length_bytes : Bytes of encoded data
+ *
+ * Return value : 0 - no frame had the VAD flag set.
+ * 1 - at least one frame had the VAD flag set.
+ * -1 - VAD status could not be determined.
+ */
+int WebRtcOpus_PacketHasVoiceActivity(const uint8_t* payload,
+ size_t payload_length_bytes);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
new file mode 100644
index 0000000000..4477e8a5f8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
+
+using ::std::string;
+
+namespace webrtc {
+
+static const int kOpusBlockDurationMs = 20;
+static const int kOpusSamplingKhz = 48;
+
+class OpusSpeedTest : public AudioCodecSpeedTest {
+ protected:
+ OpusSpeedTest();
+ void SetUp() override;
+ void TearDown() override;
+ float EncodeABlock(int16_t* in_data,
+ uint8_t* bit_stream,
+ size_t max_bytes,
+ size_t* encoded_bytes) override;
+ float DecodeABlock(const uint8_t* bit_stream,
+ size_t encoded_bytes,
+ int16_t* out_data) override;
+ WebRtcOpusEncInst* opus_encoder_;
+ WebRtcOpusDecInst* opus_decoder_;
+};
+
+OpusSpeedTest::OpusSpeedTest()
+ : AudioCodecSpeedTest(kOpusBlockDurationMs,
+ kOpusSamplingKhz,
+ kOpusSamplingKhz),
+ opus_encoder_(NULL),
+ opus_decoder_(NULL) {}
+
+void OpusSpeedTest::SetUp() {
+ AudioCodecSpeedTest::SetUp();
+ // If channels_ == 1, use Opus VOIP mode, otherwise, audio mode.
+ int app = channels_ == 1 ? 0 : 1;
+ /* Create encoder memory. */
+ EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, app, 48000));
+ EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_, 48000));
+ /* Set bitrate. */
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_));
+}
+
+void OpusSpeedTest::TearDown() {
+ AudioCodecSpeedTest::TearDown();
+ /* Free memory. */
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+float OpusSpeedTest::EncodeABlock(int16_t* in_data,
+ uint8_t* bit_stream,
+ size_t max_bytes,
+ size_t* encoded_bytes) {
+ clock_t clocks = clock();
+ int value = WebRtcOpus_Encode(opus_encoder_, in_data, input_length_sample_,
+ max_bytes, bit_stream);
+ clocks = clock() - clocks;
+ EXPECT_GT(value, 0);
+ *encoded_bytes = static_cast<size_t>(value);
+ return 1000.0 * clocks / CLOCKS_PER_SEC;
+}
+
+float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
+ size_t encoded_bytes,
+ int16_t* out_data) {
+ int value;
+ int16_t audio_type;
+ clock_t clocks = clock();
+ value = WebRtcOpus_Decode(opus_decoder_, bit_stream, encoded_bytes, out_data,
+ &audio_type);
+ clocks = clock() - clocks;
+ EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
+ return 1000.0 * clocks / CLOCKS_PER_SEC;
+}
+
+/* Test audio length in second. */
+constexpr size_t kDurationSec = 400;
+
+#define ADD_TEST(complexity) \
+ TEST_P(OpusSpeedTest, OpusSetComplexityTest##complexity) { \
+ /* Set complexity. */ \
+ printf("Setting complexity to %d ...\n", complexity); \
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, complexity)); \
+ EncodeDecode(kDurationSec); \
+ }
+
+ADD_TEST(10)
+ADD_TEST(9)
+ADD_TEST(8)
+ADD_TEST(7)
+ADD_TEST(6)
+ADD_TEST(5)
+ADD_TEST(4)
+ADD_TEST(3)
+ADD_TEST(2)
+ADD_TEST(1)
+ADD_TEST(0)
+
+#define ADD_BANDWIDTH_TEST(bandwidth) \
+ TEST_P(OpusSpeedTest, OpusSetBandwidthTest##bandwidth) { \
+ /* Set bandwidth. */ \
+ printf("Setting bandwidth to %d ...\n", bandwidth); \
+ EXPECT_EQ(0, WebRtcOpus_SetBandwidth(opus_encoder_, bandwidth)); \
+ EncodeDecode(kDurationSec); \
+ }
+
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_NARROWBAND)
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_MEDIUMBAND)
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_WIDEBAND)
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_SUPERWIDEBAND)
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_FULLBAND)
+
+// List all test cases: (channel, bit rat, filename, extension).
+const coding_param param_set[] = {
+ std::make_tuple(1,
+ 64000,
+ string("audio_coding/speech_mono_32_48kHz"),
+ string("pcm"),
+ true),
+ std::make_tuple(1,
+ 32000,
+ string("audio_coding/speech_mono_32_48kHz"),
+ string("pcm"),
+ true),
+ std::make_tuple(2,
+ 64000,
+ string("audio_coding/music_stereo_48kHz"),
+ string("pcm"),
+ true)};
+
+INSTANTIATE_TEST_SUITE_P(AllTest,
+ OpusSpeedTest,
+ ::testing::ValuesIn(param_set));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
new file mode 100644
index 0000000000..b40d73805f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -0,0 +1,978 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/codecs/opus/opus_inst.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+// Equivalent to SDP params
+// {{"channel_mapping", "0,1,2,3"}, {"coupled_streams", "2"}}.
+constexpr unsigned char kQuadChannelMapping[] = {0, 1, 2, 3};
+constexpr int kQuadTotalStreams = 2;
+constexpr int kQuadCoupledStreams = 2;
+
+constexpr unsigned char kStereoChannelMapping[] = {0, 1};
+constexpr int kStereoTotalStreams = 1;
+constexpr int kStereoCoupledStreams = 1;
+
+constexpr unsigned char kMonoChannelMapping[] = {0};
+constexpr int kMonoTotalStreams = 1;
+constexpr int kMonoCoupledStreams = 0;
+
+void CreateSingleOrMultiStreamEncoder(WebRtcOpusEncInst** opus_encoder,
+ int channels,
+ int application,
+ bool use_multistream,
+ int encoder_sample_rate_hz) {
+ EXPECT_TRUE(channels == 1 || channels == 2 || use_multistream);
+ if (use_multistream) {
+ EXPECT_EQ(encoder_sample_rate_hz, 48000);
+ if (channels == 1) {
+ EXPECT_EQ(0, WebRtcOpus_MultistreamEncoderCreate(
+ opus_encoder, channels, application, kMonoTotalStreams,
+ kMonoCoupledStreams, kMonoChannelMapping));
+ } else if (channels == 2) {
+ EXPECT_EQ(0, WebRtcOpus_MultistreamEncoderCreate(
+ opus_encoder, channels, application, kStereoTotalStreams,
+ kStereoCoupledStreams, kStereoChannelMapping));
+ } else if (channels == 4) {
+ EXPECT_EQ(0, WebRtcOpus_MultistreamEncoderCreate(
+ opus_encoder, channels, application, kQuadTotalStreams,
+ kQuadCoupledStreams, kQuadChannelMapping));
+ } else {
+ EXPECT_TRUE(false) << channels;
+ }
+ } else {
+ EXPECT_EQ(0, WebRtcOpus_EncoderCreate(opus_encoder, channels, application,
+ encoder_sample_rate_hz));
+ }
+}
+
+void CreateSingleOrMultiStreamDecoder(WebRtcOpusDecInst** opus_decoder,
+ int channels,
+ bool use_multistream,
+ int decoder_sample_rate_hz) {
+ EXPECT_TRUE(channels == 1 || channels == 2 || use_multistream);
+ if (use_multistream) {
+ EXPECT_EQ(decoder_sample_rate_hz, 48000);
+ if (channels == 1) {
+ EXPECT_EQ(0, WebRtcOpus_MultistreamDecoderCreate(
+ opus_decoder, channels, kMonoTotalStreams,
+ kMonoCoupledStreams, kMonoChannelMapping));
+ } else if (channels == 2) {
+ EXPECT_EQ(0, WebRtcOpus_MultistreamDecoderCreate(
+ opus_decoder, channels, kStereoTotalStreams,
+ kStereoCoupledStreams, kStereoChannelMapping));
+ } else if (channels == 4) {
+ EXPECT_EQ(0, WebRtcOpus_MultistreamDecoderCreate(
+ opus_decoder, channels, kQuadTotalStreams,
+ kQuadCoupledStreams, kQuadChannelMapping));
+ } else {
+ EXPECT_TRUE(false) << channels;
+ }
+ } else {
+ EXPECT_EQ(0, WebRtcOpus_DecoderCreate(opus_decoder, channels,
+ decoder_sample_rate_hz));
+ }
+}
+
+int SamplesPerChannel(int sample_rate_hz, int duration_ms) {
+ const int samples_per_ms = rtc::CheckedDivExact(sample_rate_hz, 1000);
+ return samples_per_ms * duration_ms;
+}
+
+using test::AudioLoop;
+using ::testing::Combine;
+using ::testing::TestWithParam;
+using ::testing::Values;
+
+// Maximum number of bytes in output bitstream.
+const size_t kMaxBytes = 2000;
+
+class OpusTest
+ : public TestWithParam<::testing::tuple<size_t, int, bool, int, int>> {
+ protected:
+ OpusTest() = default;
+
+ void TestDtxEffect(bool dtx, int block_length_ms);
+
+ void TestCbrEffect(bool dtx, int block_length_ms);
+
+ // Prepare `speech_data_` for encoding, read from a hard-coded file.
+ // After preparation, `speech_data_.GetNextBlock()` returns a pointer to a
+ // block of `block_length_ms` milliseconds. The data is looped every
+ // `loop_length_ms` milliseconds.
+ void PrepareSpeechData(int block_length_ms, int loop_length_ms);
+
+ int EncodeDecode(WebRtcOpusEncInst* encoder,
+ rtc::ArrayView<const int16_t> input_audio,
+ WebRtcOpusDecInst* decoder,
+ int16_t* output_audio,
+ int16_t* audio_type);
+
+ void SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
+ opus_int32 expect,
+ int32_t set);
+
+ void CheckAudioBounded(const int16_t* audio,
+ size_t samples,
+ size_t channels,
+ uint16_t bound) const;
+
+ WebRtcOpusEncInst* opus_encoder_ = nullptr;
+ WebRtcOpusDecInst* opus_decoder_ = nullptr;
+ AudioLoop speech_data_;
+ uint8_t bitstream_[kMaxBytes];
+ size_t encoded_bytes_ = 0;
+ const size_t channels_{std::get<0>(GetParam())};
+ const int application_{std::get<1>(GetParam())};
+ const bool use_multistream_{std::get<2>(GetParam())};
+ const int encoder_sample_rate_hz_{std::get<3>(GetParam())};
+ const int decoder_sample_rate_hz_{std::get<4>(GetParam())};
+};
+
+} // namespace
+
+// Singlestream: Try all combinations.
+INSTANTIATE_TEST_SUITE_P(Singlestream,
+ OpusTest,
+ testing::Combine(testing::Values(1, 2),
+ testing::Values(0, 1),
+ testing::Values(false),
+ testing::Values(16000, 48000),
+ testing::Values(16000, 48000)));
+
+// Multistream: Some representative cases (only 48 kHz for now).
+INSTANTIATE_TEST_SUITE_P(
+ Multistream,
+ OpusTest,
+ testing::Values(std::make_tuple(1, 0, true, 48000, 48000),
+ std::make_tuple(2, 1, true, 48000, 48000),
+ std::make_tuple(4, 0, true, 48000, 48000),
+ std::make_tuple(4, 1, true, 48000, 48000)));
+
+void OpusTest::PrepareSpeechData(int block_length_ms, int loop_length_ms) {
+ std::map<int, std::string> channel_to_basename = {
+ {1, "audio_coding/testfile32kHz"},
+ {2, "audio_coding/teststereo32kHz"},
+ {4, "audio_coding/speech_4_channels_48k_one_second"}};
+ std::map<int, std::string> channel_to_suffix = {
+ {1, "pcm"}, {2, "pcm"}, {4, "wav"}};
+ const std::string file_name = webrtc::test::ResourcePath(
+ channel_to_basename[channels_], channel_to_suffix[channels_]);
+ if (loop_length_ms < block_length_ms) {
+ loop_length_ms = block_length_ms;
+ }
+ const int sample_rate_khz =
+ rtc::CheckedDivExact(encoder_sample_rate_hz_, 1000);
+ EXPECT_TRUE(speech_data_.Init(file_name,
+ loop_length_ms * sample_rate_khz * channels_,
+ block_length_ms * sample_rate_khz * channels_));
+}
+
+void OpusTest::SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
+ opus_int32 expect,
+ int32_t set) {
+ opus_int32 bandwidth;
+ EXPECT_EQ(0, WebRtcOpus_SetMaxPlaybackRate(opus_encoder_, set));
+ EXPECT_EQ(0, WebRtcOpus_GetMaxPlaybackRate(opus_encoder_, &bandwidth));
+ EXPECT_EQ(expect, bandwidth);
+}
+
+void OpusTest::CheckAudioBounded(const int16_t* audio,
+ size_t samples,
+ size_t channels,
+ uint16_t bound) const {
+ for (size_t i = 0; i < samples; ++i) {
+ for (size_t c = 0; c < channels; ++c) {
+ ASSERT_GE(audio[i * channels + c], -bound);
+ ASSERT_LE(audio[i * channels + c], bound);
+ }
+ }
+}
+
+int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
+ rtc::ArrayView<const int16_t> input_audio,
+ WebRtcOpusDecInst* decoder,
+ int16_t* output_audio,
+ int16_t* audio_type) {
+ const int input_samples_per_channel =
+ rtc::CheckedDivExact(input_audio.size(), channels_);
+ int encoded_bytes_int =
+ WebRtcOpus_Encode(encoder, input_audio.data(), input_samples_per_channel,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
+ encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
+ if (encoded_bytes_ != 0) {
+ int est_len = WebRtcOpus_DurationEst(decoder, bitstream_, encoded_bytes_);
+ int act_len = WebRtcOpus_Decode(decoder, bitstream_, encoded_bytes_,
+ output_audio, audio_type);
+ EXPECT_EQ(est_len, act_len);
+ return act_len;
+ } else {
+ int total_dtx_len = 0;
+ const int output_samples_per_channel = input_samples_per_channel *
+ decoder_sample_rate_hz_ /
+ encoder_sample_rate_hz_;
+ while (total_dtx_len < output_samples_per_channel) {
+ int est_len = WebRtcOpus_DurationEst(decoder, NULL, 0);
+ int act_len = WebRtcOpus_Decode(decoder, NULL, 0,
+ &output_audio[total_dtx_len * channels_],
+ audio_type);
+ EXPECT_EQ(est_len, act_len);
+ total_dtx_len += act_len;
+ }
+ return total_dtx_len;
+ }
+}
+
+// Test if encoder/decoder can enter DTX mode properly and do not enter DTX when
+// they should not. This test is signal dependent.
+void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
+ PrepareSpeechData(block_length_ms, 2000);
+ const size_t input_samples =
+ rtc::CheckedDivExact(encoder_sample_rate_hz_, 1000) * block_length_ms;
+ const size_t output_samples =
+ rtc::CheckedDivExact(decoder_sample_rate_hz_, 1000) * block_length_ms;
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ // Set bitrate.
+ EXPECT_EQ(
+ 0, WebRtcOpus_SetBitRate(opus_encoder_, channels_ == 1 ? 32000 : 64000));
+
+ // Set input audio as silence.
+ std::vector<int16_t> silence(input_samples * channels_, 0);
+
+ // Setting DTX.
+ EXPECT_EQ(0, dtx ? WebRtcOpus_EnableDtx(opus_encoder_)
+ : WebRtcOpus_DisableDtx(opus_encoder_));
+
+ int16_t audio_type;
+ int16_t* output_data_decode = new int16_t[output_samples * channels_];
+
+ for (int i = 0; i < 100; ++i) {
+ EXPECT_EQ(output_samples,
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+ output_data_decode, &audio_type)));
+ // If not DTX, it should never enter DTX mode. If DTX, we do not care since
+ // whether it enters DTX depends on the signal type.
+ if (!dtx) {
+ EXPECT_GT(encoded_bytes_, 1U);
+ EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(0, audio_type); // Speech.
+ }
+ }
+
+ // We input some silent segments. In DTX mode, the encoder will stop sending.
+ // However, DTX may happen after a while.
+ for (int i = 0; i < 30; ++i) {
+ EXPECT_EQ(output_samples, static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, opus_decoder_,
+ output_data_decode, &audio_type)));
+ if (!dtx) {
+ EXPECT_GT(encoded_bytes_, 1U);
+ EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(0, audio_type); // Speech.
+ } else if (encoded_bytes_ == 1) {
+ EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(2, audio_type); // Comfort noise.
+ break;
+ }
+ }
+
+ // When Opus is in DTX, it wakes up in a regular basis. It sends two packets,
+ // one with an arbitrary size and the other of 1-byte, then stops sending for
+ // a certain number of frames.
+
+ // `max_dtx_frames` is the maximum number of frames Opus can stay in DTX.
+ // TODO(kwiberg): Why does this number depend on the encoding sample rate?
+ const int max_dtx_frames =
+ (encoder_sample_rate_hz_ == 16000 ? 800 : 400) / block_length_ms + 1;
+
+ // We run `kRunTimeMs` milliseconds of pure silence.
+ const int kRunTimeMs = 4500;
+
+ // We check that, after a `kCheckTimeMs` milliseconds (given that the CNG in
+ // Opus needs time to adapt), the absolute values of DTX decoded signal are
+ // bounded by `kOutputValueBound`.
+ const int kCheckTimeMs = 4000;
+
+#if defined(OPUS_FIXED_POINT)
+ // Fixed-point Opus generates a random (comfort) noise, which has a less
+ // predictable value bound than its floating-point Opus. This value depends on
+ // input signal, and the time window for checking the output values (between
+ // `kCheckTimeMs` and `kRunTimeMs`).
+ const uint16_t kOutputValueBound = 30;
+
+#else
+ const uint16_t kOutputValueBound = 2;
+#endif
+
+ int time = 0;
+ while (time < kRunTimeMs) {
+ // DTX mode is maintained for maximum `max_dtx_frames` frames.
+ int i = 0;
+ for (; i < max_dtx_frames; ++i) {
+ time += block_length_ms;
+ EXPECT_EQ(output_samples, static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, opus_decoder_,
+ output_data_decode, &audio_type)));
+ if (dtx) {
+ if (encoded_bytes_ > 1)
+ break;
+ EXPECT_EQ(0U, encoded_bytes_) // Send 0 byte.
+ << "Opus should have entered DTX mode.";
+ EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(2, audio_type); // Comfort noise.
+ if (time >= kCheckTimeMs) {
+ CheckAudioBounded(output_data_decode, output_samples, channels_,
+ kOutputValueBound);
+ }
+ } else {
+ EXPECT_GT(encoded_bytes_, 1U);
+ EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(0, audio_type); // Speech.
+ }
+ }
+
+ if (dtx) {
+ // With DTX, Opus must stop transmission for some time.
+ EXPECT_GT(i, 1);
+ }
+
+ // We expect a normal payload.
+ EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(0, audio_type); // Speech.
+
+ // Enters DTX again immediately.
+ time += block_length_ms;
+ EXPECT_EQ(output_samples, static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, opus_decoder_,
+ output_data_decode, &audio_type)));
+ if (dtx) {
+ EXPECT_EQ(1U, encoded_bytes_); // Send 1 byte.
+ EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(2, audio_type); // Comfort noise.
+ if (time >= kCheckTimeMs) {
+ CheckAudioBounded(output_data_decode, output_samples, channels_,
+ kOutputValueBound);
+ }
+ } else {
+ EXPECT_GT(encoded_bytes_, 1U);
+ EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(0, audio_type); // Speech.
+ }
+ }
+
+ silence[0] = 10000;
+ if (dtx) {
+ // Verify that encoder/decoder can jump out from DTX mode.
+ EXPECT_EQ(output_samples, static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, opus_decoder_,
+ output_data_decode, &audio_type)));
+ EXPECT_GT(encoded_bytes_, 1U);
+ EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+ EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+ EXPECT_EQ(0, audio_type); // Speech.
+ }
+
+ // Free memory.
+ delete[] output_data_decode;
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+// Test if CBR does what we expect.
+void OpusTest::TestCbrEffect(bool cbr, int block_length_ms) {
+ PrepareSpeechData(block_length_ms, 2000);
+ const size_t output_samples =
+ rtc::CheckedDivExact(decoder_sample_rate_hz_, 1000) * block_length_ms;
+
+ int32_t max_pkt_size_diff = 0;
+ int32_t prev_pkt_size = 0;
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ // Set bitrate.
+ EXPECT_EQ(
+ 0, WebRtcOpus_SetBitRate(opus_encoder_, channels_ == 1 ? 32000 : 64000));
+
+ // Setting CBR.
+ EXPECT_EQ(0, cbr ? WebRtcOpus_EnableCbr(opus_encoder_)
+ : WebRtcOpus_DisableCbr(opus_encoder_));
+
+ int16_t audio_type;
+ std::vector<int16_t> audio_out(output_samples * channels_);
+ for (int i = 0; i < 100; ++i) {
+ EXPECT_EQ(output_samples,
+ static_cast<size_t>(
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, audio_out.data(), &audio_type)));
+
+ if (prev_pkt_size > 0) {
+ int32_t diff = std::abs((int32_t)encoded_bytes_ - prev_pkt_size);
+ max_pkt_size_diff = std::max(max_pkt_size_diff, diff);
+ }
+ prev_pkt_size = rtc::checked_cast<int32_t>(encoded_bytes_);
+ }
+
+ if (cbr) {
+ EXPECT_EQ(max_pkt_size_diff, 0);
+ } else {
+ EXPECT_GT(max_pkt_size_diff, 0);
+ }
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+// Test failing Create.
+TEST(OpusTest, OpusCreateFail) {
+ WebRtcOpusEncInst* opus_encoder;
+ WebRtcOpusDecInst* opus_decoder;
+
+ // Test to see that an invalid pointer is caught.
+ EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(NULL, 1, 0, 48000));
+ // Invalid channel number.
+ EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_encoder, 257, 0, 48000));
+ // Invalid applciation mode.
+ EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_encoder, 1, 2, 48000));
+ // Invalid sample rate.
+ EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_encoder, 1, 0, 12345));
+
+ EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(NULL, 1, 48000));
+ // Invalid channel number.
+ EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(&opus_decoder, 257, 48000));
+ // Invalid sample rate.
+ EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(&opus_decoder, 1, 12345));
+}
+
+// Test failing Free.
+TEST(OpusTest, OpusFreeFail) {
+ // Test to see that an invalid pointer is caught.
+ EXPECT_EQ(-1, WebRtcOpus_EncoderFree(NULL));
+ EXPECT_EQ(-1, WebRtcOpus_DecoderFree(NULL));
+}
+
+// Test normal Create and Free.
+TEST_P(OpusTest, OpusCreateFree) {
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+ EXPECT_TRUE(opus_encoder_ != NULL);
+ EXPECT_TRUE(opus_decoder_ != NULL);
+ // Free encoder and decoder memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+#define ENCODER_CTL(inst, vargs) \
+ inst->encoder \
+ ? opus_encoder_ctl(inst->encoder, vargs) \
+ : opus_multistream_encoder_ctl(inst->multistream_encoder, vargs)
+
+TEST_P(OpusTest, OpusEncodeDecode) {
+ PrepareSpeechData(20, 20);
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ // Set bitrate.
+ EXPECT_EQ(
+ 0, WebRtcOpus_SetBitRate(opus_encoder_, channels_ == 1 ? 32000 : 64000));
+
+ // Check number of channels for decoder.
+ EXPECT_EQ(channels_, WebRtcOpus_DecoderChannels(opus_decoder_));
+
+ // Check application mode.
+ opus_int32 app;
+ ENCODER_CTL(opus_encoder_, OPUS_GET_APPLICATION(&app));
+ EXPECT_EQ(application_ == 0 ? OPUS_APPLICATION_VOIP : OPUS_APPLICATION_AUDIO,
+ app);
+
+ // Encode & decode.
+ int16_t audio_type;
+ const int decode_samples_per_channel =
+ SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/20);
+ int16_t* output_data_decode =
+ new int16_t[decode_samples_per_channel * channels_];
+ EXPECT_EQ(decode_samples_per_channel,
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, output_data_decode, &audio_type));
+
+ // Free memory.
+ delete[] output_data_decode;
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusSetBitRate) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_SetBitRate(opus_encoder_, 60000));
+
+ // Create encoder memory, try with different bitrates.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 30000));
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 60000));
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 300000));
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 600000));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusSetComplexity) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_encoder_, 9));
+
+ // Create encoder memory, try with different complexities.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, 0));
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, 10));
+ EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_encoder_, 11));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusSetBandwidth) {
+ if (channels_ > 2) {
+ // TODO(webrtc:10217): investigate why multi-stream Opus reports
+ // narrowband when it's configured with FULLBAND.
+ return;
+ }
+ PrepareSpeechData(20, 20);
+
+ int16_t audio_type;
+ const int decode_samples_per_channel =
+ SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/20);
+ std::unique_ptr<int16_t[]> output_data_decode(
+ new int16_t[decode_samples_per_channel * channels_]());
+
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1,
+ WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND));
+ EXPECT_EQ(-1, WebRtcOpus_GetBandwidth(opus_encoder_));
+
+ // Create encoder memory, try with different bandwidths.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ EXPECT_EQ(-1, WebRtcOpus_SetBandwidth(opus_encoder_,
+ OPUS_BANDWIDTH_NARROWBAND - 1));
+ EXPECT_EQ(0,
+ WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND));
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+ output_data_decode.get(), &audio_type);
+ EXPECT_EQ(OPUS_BANDWIDTH_NARROWBAND, WebRtcOpus_GetBandwidth(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_FULLBAND));
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+ output_data_decode.get(), &audio_type);
+ EXPECT_EQ(encoder_sample_rate_hz_ == 16000 ? OPUS_BANDWIDTH_WIDEBAND
+ : OPUS_BANDWIDTH_FULLBAND,
+ WebRtcOpus_GetBandwidth(opus_encoder_));
+ EXPECT_EQ(
+ -1, WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_FULLBAND + 1));
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+ output_data_decode.get(), &audio_type);
+ EXPECT_EQ(encoder_sample_rate_hz_ == 16000 ? OPUS_BANDWIDTH_WIDEBAND
+ : OPUS_BANDWIDTH_FULLBAND,
+ WebRtcOpus_GetBandwidth(opus_encoder_));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusForceChannels) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_SetForceChannels(opus_encoder_, 1));
+
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ ASSERT_NE(nullptr, opus_encoder_);
+
+ if (channels_ >= 2) {
+ EXPECT_EQ(-1, WebRtcOpus_SetForceChannels(opus_encoder_, 3));
+ EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 2));
+ EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 1));
+ EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 0));
+ } else {
+ EXPECT_EQ(-1, WebRtcOpus_SetForceChannels(opus_encoder_, 2));
+ EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 1));
+ EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 0));
+ }
+
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+// Encode and decode one frame, initialize the decoder and
+// decode once more.
+TEST_P(OpusTest, OpusDecodeInit) {
+ PrepareSpeechData(20, 20);
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ // Encode & decode.
+ int16_t audio_type;
+ const int decode_samples_per_channel =
+ SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/20);
+ int16_t* output_data_decode =
+ new int16_t[decode_samples_per_channel * channels_];
+ EXPECT_EQ(decode_samples_per_channel,
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, output_data_decode, &audio_type));
+
+ WebRtcOpus_DecoderInit(opus_decoder_);
+
+ EXPECT_EQ(decode_samples_per_channel,
+ WebRtcOpus_Decode(opus_decoder_, bitstream_, encoded_bytes_,
+ output_data_decode, &audio_type));
+
+ // Free memory.
+ delete[] output_data_decode;
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusEnableDisableFec) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_EnableFec(opus_encoder_));
+ EXPECT_EQ(-1, WebRtcOpus_DisableFec(opus_encoder_));
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+
+ EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_encoder_));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusEnableDisableDtx) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_EnableDtx(opus_encoder_));
+ EXPECT_EQ(-1, WebRtcOpus_DisableDtx(opus_encoder_));
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+
+ opus_int32 dtx;
+
+ // DTX is off by default.
+ ENCODER_CTL(opus_encoder_, OPUS_GET_DTX(&dtx));
+ EXPECT_EQ(0, dtx);
+
+ // Test to enable DTX.
+ EXPECT_EQ(0, WebRtcOpus_EnableDtx(opus_encoder_));
+ ENCODER_CTL(opus_encoder_, OPUS_GET_DTX(&dtx));
+ EXPECT_EQ(1, dtx);
+
+ // Test to disable DTX.
+ EXPECT_EQ(0, WebRtcOpus_DisableDtx(opus_encoder_));
+ ENCODER_CTL(opus_encoder_, OPUS_GET_DTX(&dtx));
+ EXPECT_EQ(0, dtx);
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusDtxOff) {
+ TestDtxEffect(false, 10);
+ TestDtxEffect(false, 20);
+ TestDtxEffect(false, 40);
+}
+
+TEST_P(OpusTest, OpusDtxOn) {
+ if (channels_ > 2) {
+ // TODO(webrtc:10218): adapt the test to the sizes and order of multi-stream
+ // DTX packets.
+ return;
+ }
+ TestDtxEffect(true, 10);
+ TestDtxEffect(true, 20);
+ TestDtxEffect(true, 40);
+}
+
+TEST_P(OpusTest, OpusCbrOff) {
+ TestCbrEffect(false, 10);
+ TestCbrEffect(false, 20);
+ TestCbrEffect(false, 40);
+}
+
+TEST_P(OpusTest, OpusCbrOn) {
+ TestCbrEffect(true, 10);
+ TestCbrEffect(true, 20);
+ TestCbrEffect(true, 40);
+}
+
+TEST_P(OpusTest, OpusSetPacketLossRate) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_encoder_, 50));
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+
+ EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_, 50));
+ EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_encoder_, -1));
+ EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_encoder_, 101));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusSetMaxPlaybackRate) {
+ // Test without creating encoder memory.
+ EXPECT_EQ(-1, WebRtcOpus_SetMaxPlaybackRate(opus_encoder_, 20000));
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_FULLBAND, 48000);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_FULLBAND, 24001);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_SUPERWIDEBAND, 24000);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_SUPERWIDEBAND, 16001);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_WIDEBAND, 16000);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_WIDEBAND, 12001);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_MEDIUMBAND, 12000);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_MEDIUMBAND, 8001);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND, 8000);
+ SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND, 4000);
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+// Test PLC.
+TEST_P(OpusTest, OpusDecodePlc) {
+ PrepareSpeechData(20, 20);
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ // Set bitrate.
+ EXPECT_EQ(
+ 0, WebRtcOpus_SetBitRate(opus_encoder_, channels_ == 1 ? 32000 : 64000));
+
+ // Check number of channels for decoder.
+ EXPECT_EQ(channels_, WebRtcOpus_DecoderChannels(opus_decoder_));
+
+ // Encode & decode.
+ int16_t audio_type;
+ const int decode_samples_per_channel =
+ SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/20);
+ int16_t* output_data_decode =
+ new int16_t[decode_samples_per_channel * channels_];
+ EXPECT_EQ(decode_samples_per_channel,
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, output_data_decode, &audio_type));
+
+ // Call decoder PLC.
+ constexpr int kPlcDurationMs = 10;
+ const int plc_samples = decoder_sample_rate_hz_ * kPlcDurationMs / 1000;
+ int16_t* plc_buffer = new int16_t[plc_samples * channels_];
+ EXPECT_EQ(plc_samples,
+ WebRtcOpus_Decode(opus_decoder_, NULL, 0, plc_buffer, &audio_type));
+
+ // Free memory.
+ delete[] plc_buffer;
+ delete[] output_data_decode;
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+// Duration estimation.
+TEST_P(OpusTest, OpusDurationEstimation) {
+ PrepareSpeechData(20, 20);
+
+ // Create.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+
+ // 10 ms. We use only first 10 ms of a 20 ms block.
+ auto speech_block = speech_data_.GetNextBlock();
+ int encoded_bytes_int = WebRtcOpus_Encode(
+ opus_encoder_, speech_block.data(),
+ rtc::CheckedDivExact(speech_block.size(), 2 * channels_), kMaxBytes,
+ bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
+ EXPECT_EQ(SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/10),
+ WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
+ static_cast<size_t>(encoded_bytes_int)));
+
+ // 20 ms
+ speech_block = speech_data_.GetNextBlock();
+ encoded_bytes_int =
+ WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
+ rtc::CheckedDivExact(speech_block.size(), channels_),
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
+ EXPECT_EQ(SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/20),
+ WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
+ static_cast<size_t>(encoded_bytes_int)));
+
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusDecodeRepacketized) {
+ if (channels_ > 2) {
+ // As per the Opus documentation
+ // https://mf4.xiph.org/jenkins/view/opus/job/opus/ws/doc/html/group__opus__repacketizer.html#details,
+ // multiple streams are not supported.
+ return;
+ }
+ constexpr size_t kPackets = 6;
+
+ PrepareSpeechData(20, 20 * kPackets);
+
+ // Create encoder memory.
+ CreateSingleOrMultiStreamEncoder(&opus_encoder_, channels_, application_,
+ use_multistream_, encoder_sample_rate_hz_);
+ ASSERT_NE(nullptr, opus_encoder_);
+ CreateSingleOrMultiStreamDecoder(&opus_decoder_, channels_, use_multistream_,
+ decoder_sample_rate_hz_);
+ ASSERT_NE(nullptr, opus_decoder_);
+
+ // Set bitrate.
+ EXPECT_EQ(
+ 0, WebRtcOpus_SetBitRate(opus_encoder_, channels_ == 1 ? 32000 : 64000));
+
+ // Check number of channels for decoder.
+ EXPECT_EQ(channels_, WebRtcOpus_DecoderChannels(opus_decoder_));
+
+ // Encode & decode.
+ int16_t audio_type;
+ const int decode_samples_per_channel =
+ SamplesPerChannel(decoder_sample_rate_hz_, /*ms=*/20);
+ std::unique_ptr<int16_t[]> output_data_decode(
+ new int16_t[kPackets * decode_samples_per_channel * channels_]);
+ OpusRepacketizer* rp = opus_repacketizer_create();
+
+ size_t num_packets = 0;
+ constexpr size_t kMaxCycles = 100;
+ for (size_t idx = 0; idx < kMaxCycles; ++idx) {
+ auto speech_block = speech_data_.GetNextBlock();
+ encoded_bytes_ =
+ WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
+ rtc::CheckedDivExact(speech_block.size(), channels_),
+ kMaxBytes, bitstream_);
+ if (opus_repacketizer_cat(rp, bitstream_,
+ rtc::checked_cast<opus_int32>(encoded_bytes_)) ==
+ OPUS_OK) {
+ ++num_packets;
+ if (num_packets == kPackets) {
+ break;
+ }
+ } else {
+ // Opus repacketizer cannot guarantee a success. We try again if it fails.
+ opus_repacketizer_init(rp);
+ num_packets = 0;
+ }
+ }
+ EXPECT_EQ(kPackets, num_packets);
+
+ encoded_bytes_ = opus_repacketizer_out(rp, bitstream_, kMaxBytes);
+
+ EXPECT_EQ(decode_samples_per_channel * kPackets,
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_, encoded_bytes_)));
+
+ EXPECT_EQ(decode_samples_per_channel * kPackets,
+ static_cast<size_t>(
+ WebRtcOpus_Decode(opus_decoder_, bitstream_, encoded_bytes_,
+ output_data_decode.get(), &audio_type)));
+
+ // Free memory.
+ opus_repacketizer_destroy(rp);
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST(OpusVadTest, CeltUnknownStatus) {
+ const uint8_t celt[] = {0x80};
+ EXPECT_EQ(WebRtcOpus_PacketHasVoiceActivity(celt, 1), -1);
+}
+
+TEST(OpusVadTest, Mono20msVadSet) {
+ uint8_t silk20msMonoVad[] = {0x78, 0x80};
+ EXPECT_TRUE(WebRtcOpus_PacketHasVoiceActivity(silk20msMonoVad, 2));
+}
+
+TEST(OpusVadTest, Mono20MsVadUnset) {
+ uint8_t silk20msMonoSilence[] = {0x78, 0x00};
+ EXPECT_FALSE(WebRtcOpus_PacketHasVoiceActivity(silk20msMonoSilence, 2));
+}
+
+TEST(OpusVadTest, Stereo20MsVadOnSideChannel) {
+ uint8_t silk20msStereoVadSideChannel[] = {0x78 | 0x04, 0x20};
+ EXPECT_TRUE(
+ WebRtcOpus_PacketHasVoiceActivity(silk20msStereoVadSideChannel, 2));
+}
+
+TEST(OpusVadTest, TwoOpusMonoFramesVadOnSecond) {
+ uint8_t twoMonoFrames[] = {0x78 | 0x1, 0x00, 0x80};
+ EXPECT_TRUE(WebRtcOpus_PacketHasVoiceActivity(twoMonoFrames, 3));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/BUILD.gn b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/BUILD.gn
new file mode 100644
index 0000000000..8bc0bf5e0e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/BUILD.gn
@@ -0,0 +1,55 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../../../webrtc.gni")
+
+visibility = [
+ ":*",
+ "../../../:*",
+]
+
+if (rtc_include_tests) {
+ rtc_library("test") {
+ testonly = true
+
+ sources = [
+ "audio_ring_buffer.cc",
+ "audio_ring_buffer.h",
+ "blocker.cc",
+ "blocker.h",
+ "lapped_transform.cc",
+ "lapped_transform.h",
+ ]
+
+ deps = [
+ "../../../../../common_audio",
+ "../../../../../common_audio:common_audio_c",
+ "../../../../../rtc_base:checks",
+ "../../../../../rtc_base/memory:aligned_malloc",
+ ]
+ }
+
+ rtc_library("test_unittest") {
+ testonly = true
+
+ sources = [
+ "audio_ring_buffer_unittest.cc",
+ "blocker_unittest.cc",
+ "lapped_transform_unittest.cc",
+ ]
+
+ deps = [
+ ":test",
+ "../../../../../common_audio",
+ "../../../../../common_audio:common_audio_c",
+ "../../../../../rtc_base:macromagic",
+ "../../../../../test:test_support",
+ "//testing/gtest",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.cc
new file mode 100644
index 0000000000..2a71b43d2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.cc
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/test/audio_ring_buffer.h"
+
+#include "common_audio/ring_buffer.h"
+#include "rtc_base/checks.h"
+
+// This is a simple multi-channel wrapper over the ring_buffer.h C interface.
+
+namespace webrtc {
+
+AudioRingBuffer::AudioRingBuffer(size_t channels, size_t max_frames) {
+ buffers_.reserve(channels);
+ for (size_t i = 0; i < channels; ++i)
+ buffers_.push_back(WebRtc_CreateBuffer(max_frames, sizeof(float)));
+}
+
+AudioRingBuffer::~AudioRingBuffer() {
+ for (auto* buf : buffers_)
+ WebRtc_FreeBuffer(buf);
+}
+
+void AudioRingBuffer::Write(const float* const* data,
+ size_t channels,
+ size_t frames) {
+ RTC_DCHECK_EQ(buffers_.size(), channels);
+ for (size_t i = 0; i < channels; ++i) {
+ const size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
+ RTC_CHECK_EQ(written, frames);
+ }
+}
+
+void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
+ RTC_DCHECK_EQ(buffers_.size(), channels);
+ for (size_t i = 0; i < channels; ++i) {
+ const size_t read =
+ WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
+ RTC_CHECK_EQ(read, frames);
+ }
+}
+
+size_t AudioRingBuffer::ReadFramesAvailable() const {
+ // All buffers have the same amount available.
+ return WebRtc_available_read(buffers_[0]);
+}
+
+size_t AudioRingBuffer::WriteFramesAvailable() const {
+ // All buffers have the same amount available.
+ return WebRtc_available_write(buffers_[0]);
+}
+
+void AudioRingBuffer::MoveReadPositionForward(size_t frames) {
+ for (auto* buf : buffers_) {
+ const size_t moved =
+ static_cast<size_t>(WebRtc_MoveReadPtr(buf, static_cast<int>(frames)));
+ RTC_CHECK_EQ(moved, frames);
+ }
+}
+
+void AudioRingBuffer::MoveReadPositionBackward(size_t frames) {
+ for (auto* buf : buffers_) {
+ const size_t moved = static_cast<size_t>(
+ -WebRtc_MoveReadPtr(buf, -static_cast<int>(frames)));
+ RTC_CHECK_EQ(moved, frames);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
new file mode 100644
index 0000000000..a280ca2410
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_TEST_AUDIO_RING_BUFFER_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_TEST_AUDIO_RING_BUFFER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+struct RingBuffer;
+
+namespace webrtc {
+
+// A ring buffer tailored for float deinterleaved audio. Any operation that
+// cannot be performed as requested will cause a crash (e.g. insufficient data
+// in the buffer to fulfill a read request.)
+class AudioRingBuffer final {
+ public:
+ // Specify the number of channels and maximum number of frames the buffer will
+ // contain.
+ AudioRingBuffer(size_t channels, size_t max_frames);
+ ~AudioRingBuffer();
+
+ // Copies `data` to the buffer and advances the write pointer. `channels` must
+ // be the same as at creation time.
+ void Write(const float* const* data, size_t channels, size_t frames);
+
+ // Copies from the buffer to `data` and advances the read pointer. `channels`
+ // must be the same as at creation time.
+ void Read(float* const* data, size_t channels, size_t frames);
+
+ size_t ReadFramesAvailable() const;
+ size_t WriteFramesAvailable() const;
+
+ // Moves the read position. The forward version advances the read pointer
+ // towards the write pointer and the backward verison withdraws the read
+ // pointer away from the write pointer (i.e. flushing and stuffing the buffer
+ // respectively.)
+ void MoveReadPositionForward(size_t frames);
+ void MoveReadPositionBackward(size_t frames);
+
+ private:
+ // TODO(kwiberg): Use std::vector<std::unique_ptr<RingBuffer>> instead.
+ std::vector<RingBuffer*> buffers_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_TEST_AUDIO_RING_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer_unittest.cc
new file mode 100644
index 0000000000..6dbc8ee9fe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/audio_ring_buffer_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/test/audio_ring_buffer.h"
+
+#include <memory>
+
+#include "common_audio/channel_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class AudioRingBufferTest
+ : public ::testing::TestWithParam< ::testing::tuple<int, int, int, int> > {
+};
+
+void ReadAndWriteTest(const ChannelBuffer<float>& input,
+ size_t num_write_chunk_frames,
+ size_t num_read_chunk_frames,
+ size_t buffer_frames,
+ ChannelBuffer<float>* output) {
+ const size_t num_channels = input.num_channels();
+ const size_t total_frames = input.num_frames();
+ AudioRingBuffer buf(num_channels, buffer_frames);
+ std::unique_ptr<float*[]> slice(new float*[num_channels]);
+
+ size_t input_pos = 0;
+ size_t output_pos = 0;
+ while (input_pos + buf.WriteFramesAvailable() < total_frames) {
+ // Write until the buffer is as full as possible.
+ while (buf.WriteFramesAvailable() >= num_write_chunk_frames) {
+ buf.Write(input.Slice(slice.get(), input_pos), num_channels,
+ num_write_chunk_frames);
+ input_pos += num_write_chunk_frames;
+ }
+ // Read until the buffer is as empty as possible.
+ while (buf.ReadFramesAvailable() >= num_read_chunk_frames) {
+ EXPECT_LT(output_pos, total_frames);
+ buf.Read(output->Slice(slice.get(), output_pos), num_channels,
+ num_read_chunk_frames);
+ output_pos += num_read_chunk_frames;
+ }
+ }
+
+ // Write and read the last bit.
+ if (input_pos < total_frames) {
+ buf.Write(input.Slice(slice.get(), input_pos), num_channels,
+ total_frames - input_pos);
+ }
+ if (buf.ReadFramesAvailable()) {
+ buf.Read(output->Slice(slice.get(), output_pos), num_channels,
+ buf.ReadFramesAvailable());
+ }
+ EXPECT_EQ(0u, buf.ReadFramesAvailable());
+}
+
+TEST_P(AudioRingBufferTest, ReadDataMatchesWrittenData) {
+ const size_t kFrames = 5000;
+ const size_t num_channels = ::testing::get<3>(GetParam());
+
+ // Initialize the input data to an increasing sequence.
+ ChannelBuffer<float> input(kFrames, static_cast<int>(num_channels));
+ for (size_t i = 0; i < num_channels; ++i)
+ for (size_t j = 0; j < kFrames; ++j)
+ input.channels()[i][j] = (i + 1) * (j + 1);
+
+ ChannelBuffer<float> output(kFrames, static_cast<int>(num_channels));
+ ReadAndWriteTest(input, ::testing::get<0>(GetParam()),
+ ::testing::get<1>(GetParam()), ::testing::get<2>(GetParam()),
+ &output);
+
+ // Verify the read data matches the input.
+ for (size_t i = 0; i < num_channels; ++i)
+ for (size_t j = 0; j < kFrames; ++j)
+ EXPECT_EQ(input.channels()[i][j], output.channels()[i][j]);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioRingBufferTest,
+ AudioRingBufferTest,
+ ::testing::Combine(::testing::Values(10, 20, 42), // num_write_chunk_frames
+ ::testing::Values(1, 10, 17), // num_read_chunk_frames
+ ::testing::Values(100, 256), // buffer_frames
+ ::testing::Values(1, 4))); // num_channels
+
+TEST_F(AudioRingBufferTest, MoveReadPosition) {
+ const size_t kNumChannels = 1;
+ const float kInputArray[] = {1, 2, 3, 4};
+ const size_t kNumFrames = sizeof(kInputArray) / sizeof(*kInputArray);
+ ChannelBuffer<float> input(kNumFrames, kNumChannels);
+ input.SetDataForTesting(kInputArray, kNumFrames);
+ AudioRingBuffer buf(kNumChannels, kNumFrames);
+ buf.Write(input.channels(), kNumChannels, kNumFrames);
+
+ buf.MoveReadPositionForward(3);
+ ChannelBuffer<float> output(1, kNumChannels);
+ buf.Read(output.channels(), kNumChannels, 1);
+ EXPECT_EQ(4, output.channels()[0][0]);
+ buf.MoveReadPositionBackward(3);
+ buf.Read(output.channels(), kNumChannels, 1);
+ EXPECT_EQ(2, output.channels()[0][0]);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.cc
new file mode 100644
index 0000000000..33406cead9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.cc
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/test/blocker.h"
+
+#include <string.h>
+
+#include "rtc_base/checks.h"
+
+namespace {
+
+// Adds `a` and `b` frame by frame into `result` (basically matrix addition).
+void AddFrames(const float* const* a,
+ size_t a_start_index,
+ const float* const* b,
+ int b_start_index,
+ size_t num_frames,
+ size_t num_channels,
+ float* const* result,
+ size_t result_start_index) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ for (size_t j = 0; j < num_frames; ++j) {
+ result[i][j + result_start_index] =
+ a[i][j + a_start_index] + b[i][j + b_start_index];
+ }
+ }
+}
+
+// Copies `src` into `dst` channel by channel.
+void CopyFrames(const float* const* src,
+ size_t src_start_index,
+ size_t num_frames,
+ size_t num_channels,
+ float* const* dst,
+ size_t dst_start_index) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ memcpy(&dst[i][dst_start_index], &src[i][src_start_index],
+ num_frames * sizeof(dst[i][dst_start_index]));
+ }
+}
+
+// Moves `src` into `dst` channel by channel.
+void MoveFrames(const float* const* src,
+ size_t src_start_index,
+ size_t num_frames,
+ size_t num_channels,
+ float* const* dst,
+ size_t dst_start_index) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ memmove(&dst[i][dst_start_index], &src[i][src_start_index],
+ num_frames * sizeof(dst[i][dst_start_index]));
+ }
+}
+
+void ZeroOut(float* const* buffer,
+ size_t starting_idx,
+ size_t num_frames,
+ size_t num_channels) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ memset(&buffer[i][starting_idx], 0,
+ num_frames * sizeof(buffer[i][starting_idx]));
+ }
+}
+
+// Pointwise multiplies each channel of `frames` with `window`. Results are
+// stored in `frames`.
+void ApplyWindow(const float* window,
+ size_t num_frames,
+ size_t num_channels,
+ float* const* frames) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ for (size_t j = 0; j < num_frames; ++j) {
+ frames[i][j] = frames[i][j] * window[j];
+ }
+ }
+}
+
+size_t gcd(size_t a, size_t b) {
+ size_t tmp;
+ while (b) {
+ tmp = a;
+ a = b;
+ b = tmp % b;
+ }
+ return a;
+}
+
+} // namespace
+
+namespace webrtc {
+
+Blocker::Blocker(size_t chunk_size,
+ size_t block_size,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ const float* window,
+ size_t shift_amount,
+ BlockerCallback* callback)
+ : chunk_size_(chunk_size),
+ block_size_(block_size),
+ num_input_channels_(num_input_channels),
+ num_output_channels_(num_output_channels),
+ initial_delay_(block_size_ - gcd(chunk_size, shift_amount)),
+ frame_offset_(0),
+ input_buffer_(num_input_channels_, chunk_size_ + initial_delay_),
+ output_buffer_(chunk_size_ + initial_delay_, num_output_channels_),
+ input_block_(block_size_, num_input_channels_),
+ output_block_(block_size_, num_output_channels_),
+ window_(new float[block_size_]),
+ shift_amount_(shift_amount),
+ callback_(callback) {
+ RTC_CHECK_LE(num_output_channels_, num_input_channels_);
+ RTC_CHECK_LE(shift_amount_, block_size_);
+
+ memcpy(window_.get(), window, block_size_ * sizeof(*window_.get()));
+ input_buffer_.MoveReadPositionBackward(initial_delay_);
+}
+
+Blocker::~Blocker() = default;
+
+// When block_size < chunk_size the input and output buffers look like this:
+//
+// delay* chunk_size chunk_size + delay*
+// buffer: <-------------|---------------------|---------------|>
+// _a_ _b_ _c_
+//
+// On each call to ProcessChunk():
+// 1. New input gets read into sections _b_ and _c_ of the input buffer.
+// 2. We block starting from frame_offset.
+// 3. We block until we reach a block `bl` that doesn't contain any frames
+// from sections _a_ or _b_ of the input buffer.
+// 4. We window the current block, fire the callback for processing, window
+// again, and overlap/add to the output buffer.
+// 5. We copy sections _a_ and _b_ of the output buffer into output.
+// 6. For both the input and the output buffers, we copy section _c_ into
+// section _a_.
+// 7. We set the new frame_offset to be the difference between the first frame
+// of `bl` and the border between sections _b_ and _c_.
+//
+// When block_size > chunk_size the input and output buffers look like this:
+//
+// chunk_size delay* chunk_size + delay*
+// buffer: <-------------|---------------------|---------------|>
+// _a_ _b_ _c_
+//
+// On each call to ProcessChunk():
+// The procedure is the same as above, except for:
+// 1. New input gets read into section _c_ of the input buffer.
+// 3. We block until we reach a block `bl` that doesn't contain any frames
+// from section _a_ of the input buffer.
+// 5. We copy section _a_ of the output buffer into output.
+// 6. For both the input and the output buffers, we copy sections _b_ and _c_
+// into section _a_ and _b_.
+// 7. We set the new frame_offset to be the difference between the first frame
+// of `bl` and the border between sections _a_ and _b_.
+//
+// * delay here refers to inintial_delay_
+//
+// TODO(claguna): Look at using ring buffers to eliminate some copies.
+void Blocker::ProcessChunk(const float* const* input,
+ size_t chunk_size,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output) {
+ RTC_CHECK_EQ(chunk_size, chunk_size_);
+ RTC_CHECK_EQ(num_input_channels, num_input_channels_);
+ RTC_CHECK_EQ(num_output_channels, num_output_channels_);
+
+ input_buffer_.Write(input, num_input_channels, chunk_size_);
+ size_t first_frame_in_block = frame_offset_;
+
+ // Loop through blocks.
+ while (first_frame_in_block < chunk_size_) {
+ input_buffer_.Read(input_block_.channels(), num_input_channels,
+ block_size_);
+ input_buffer_.MoveReadPositionBackward(block_size_ - shift_amount_);
+
+ ApplyWindow(window_.get(), block_size_, num_input_channels_,
+ input_block_.channels());
+ callback_->ProcessBlock(input_block_.channels(), block_size_,
+ num_input_channels_, num_output_channels_,
+ output_block_.channels());
+ ApplyWindow(window_.get(), block_size_, num_output_channels_,
+ output_block_.channels());
+
+ AddFrames(output_buffer_.channels(), first_frame_in_block,
+ output_block_.channels(), 0, block_size_, num_output_channels_,
+ output_buffer_.channels(), first_frame_in_block);
+
+ first_frame_in_block += shift_amount_;
+ }
+
+ // Copy output buffer to output
+ CopyFrames(output_buffer_.channels(), 0, chunk_size_, num_output_channels_,
+ output, 0);
+
+ // Copy output buffer [chunk_size_, chunk_size_ + initial_delay]
+ // to output buffer [0, initial_delay], zero the rest.
+ MoveFrames(output_buffer_.channels(), chunk_size, initial_delay_,
+ num_output_channels_, output_buffer_.channels(), 0);
+ ZeroOut(output_buffer_.channels(), initial_delay_, chunk_size_,
+ num_output_channels_);
+
+ // Calculate new starting frames.
+ frame_offset_ = first_frame_in_block - chunk_size_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.h
new file mode 100644
index 0000000000..59b7e29621
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_TEST_BLOCKER_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_TEST_BLOCKER_H_
+
+#include <memory>
+
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_coding/codecs/opus/test/audio_ring_buffer.h"
+
+namespace webrtc {
+
+// The callback function to process audio in the time domain. Input has already
+// been windowed, and output will be windowed. The number of input channels
+// must be >= the number of output channels.
+class BlockerCallback {
+ public:
+ virtual ~BlockerCallback() {}
+
+ virtual void ProcessBlock(const float* const* input,
+ size_t num_frames,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output) = 0;
+};
+
+// The main purpose of Blocker is to abstract away the fact that often we
+// receive a different number of audio frames than our transform takes. For
+// example, most FFTs work best when the fft-size is a power of 2, but suppose
+// we receive 20ms of audio at a sample rate of 48000. That comes to 960 frames
+// of audio, which is not a power of 2. Blocker allows us to specify the
+// transform and all other necessary processing via the Process() callback
+// function without any constraints on the transform-size
+// (read: `block_size_`) or received-audio-size (read: `chunk_size_`).
+// We handle this for the multichannel audio case, allowing for different
+// numbers of input and output channels (for example, beamforming takes 2 or
+// more input channels and returns 1 output channel). Audio signals are
+// represented as deinterleaved floats in the range [-1, 1].
+//
+// Blocker is responsible for:
+// - blocking audio while handling potential discontinuities on the edges
+// of chunks
+// - windowing blocks before sending them to Process()
+// - windowing processed blocks, and overlap-adding them together before
+// sending back a processed chunk
+//
+// To use blocker:
+// 1. Impelment a BlockerCallback object `bc`.
+// 2. Instantiate a Blocker object `b`, passing in `bc`.
+// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
+//
+// A small amount of delay is added to the first received chunk to deal with
+// the difference in chunk/block sizes. This delay is <= chunk_size.
+//
+// Ownership of window is retained by the caller. That is, Blocker makes a
+// copy of window and does not attempt to delete it.
+class Blocker {
+ public:
+ Blocker(size_t chunk_size,
+ size_t block_size,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ const float* window,
+ size_t shift_amount,
+ BlockerCallback* callback);
+ ~Blocker();
+
+ void ProcessChunk(const float* const* input,
+ size_t chunk_size,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output);
+
+ size_t initial_delay() const { return initial_delay_; }
+
+ private:
+ const size_t chunk_size_;
+ const size_t block_size_;
+ const size_t num_input_channels_;
+ const size_t num_output_channels_;
+
+ // The number of frames of delay to add at the beginning of the first chunk.
+ const size_t initial_delay_;
+
+ // The frame index into the input buffer where the first block should be read
+ // from. This is necessary because shift_amount_ is not necessarily a
+ // multiple of chunk_size_, so blocks won't line up at the start of the
+ // buffer.
+ size_t frame_offset_;
+
+ // Since blocks nearly always overlap, there are certain blocks that require
+ // frames from the end of one chunk and the beginning of the next chunk. The
+ // input and output buffers are responsible for saving those frames between
+ // calls to ProcessChunk().
+ //
+ // Both contain |initial delay| + `chunk_size` frames. The input is a fairly
+ // standard FIFO, but due to the overlap-add it's harder to use an
+ // AudioRingBuffer for the output.
+ AudioRingBuffer input_buffer_;
+ ChannelBuffer<float> output_buffer_;
+
+ // Space for the input block (can't wrap because of windowing).
+ ChannelBuffer<float> input_block_;
+
+ // Space for the output block (can't wrap because of overlap/add).
+ ChannelBuffer<float> output_block_;
+
+ std::unique_ptr<float[]> window_;
+
+ // The amount of frames between the start of contiguous blocks. For example,
+ // `shift_amount_` = `block_size_` / 2 for a Hann window.
+ size_t shift_amount_;
+
+ BlockerCallback* callback_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_TEST_BLOCKER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker_unittest.cc
new file mode 100644
index 0000000000..9c8e789ba9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/blocker_unittest.cc
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/test/blocker.h"
+
+#include <memory>
+
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+namespace {
+
+// Callback Function to add 3 to every sample in the signal.
+class PlusThreeBlockerCallback : public webrtc::BlockerCallback {
+ public:
+ void ProcessBlock(const float* const* input,
+ size_t num_frames,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output) override {
+ for (size_t i = 0; i < num_output_channels; ++i) {
+ for (size_t j = 0; j < num_frames; ++j) {
+ output[i][j] = input[i][j] + 3;
+ }
+ }
+ }
+};
+
+// No-op Callback Function.
+class CopyBlockerCallback : public webrtc::BlockerCallback {
+ public:
+ void ProcessBlock(const float* const* input,
+ size_t num_frames,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output) override {
+ for (size_t i = 0; i < num_output_channels; ++i) {
+ for (size_t j = 0; j < num_frames; ++j) {
+ output[i][j] = input[i][j];
+ }
+ }
+ }
+};
+
+} // namespace
+
+namespace webrtc {
+
+// Tests blocking with a window that multiplies the signal by 2, a callback
+// that adds 3 to each sample in the signal, and different combinations of chunk
+// size, block size, and shift amount.
+class BlockerTest : public ::testing::Test {
+ protected:
+ void RunTest(Blocker* blocker,
+ size_t chunk_size,
+ size_t num_frames,
+ const float* const* input,
+ float* const* input_chunk,
+ float* const* output,
+ float* const* output_chunk,
+ size_t num_input_channels,
+ size_t num_output_channels) {
+ size_t start = 0;
+ size_t end = chunk_size - 1;
+ while (end < num_frames) {
+ CopyTo(input_chunk, 0, start, num_input_channels, chunk_size, input);
+ blocker->ProcessChunk(input_chunk, chunk_size, num_input_channels,
+ num_output_channels, output_chunk);
+ CopyTo(output, start, 0, num_output_channels, chunk_size, output_chunk);
+
+ start += chunk_size;
+ end += chunk_size;
+ }
+ }
+
+ void ValidateSignalEquality(const float* const* expected,
+ const float* const* actual,
+ size_t num_channels,
+ size_t num_frames) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ for (size_t j = 0; j < num_frames; ++j) {
+ EXPECT_FLOAT_EQ(expected[i][j], actual[i][j]);
+ }
+ }
+ }
+
+ void ValidateInitialDelay(const float* const* output,
+ size_t num_channels,
+ size_t num_frames,
+ size_t initial_delay) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ for (size_t j = 0; j < num_frames; ++j) {
+ if (j < initial_delay) {
+ EXPECT_FLOAT_EQ(output[i][j], 0.f);
+ } else {
+ EXPECT_GT(output[i][j], 0.f);
+ }
+ }
+ }
+ }
+
+ static void CopyTo(float* const* dst,
+ size_t start_index_dst,
+ size_t start_index_src,
+ size_t num_channels,
+ size_t num_frames,
+ const float* const* src) {
+ for (size_t i = 0; i < num_channels; ++i) {
+ memcpy(&dst[i][start_index_dst], &src[i][start_index_src],
+ num_frames * sizeof(float));
+ }
+ }
+};
+
+TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
+ const size_t kNumInputChannels = 3;
+ const size_t kNumOutputChannels = 2;
+ const size_t kNumFrames = 10;
+ const size_t kBlockSize = 4;
+ const size_t kChunkSize = 5;
+ const size_t kShiftAmount = 2;
+
+ const float kInput[kNumInputChannels][kNumFrames] = {
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+ {3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
+ ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+ input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
+
+ const float kExpectedOutput[kNumInputChannels][kNumFrames] = {
+ {6, 6, 12, 20, 20, 20, 20, 20, 20, 20},
+ {6, 6, 12, 28, 28, 28, 28, 28, 28, 28}};
+ ChannelBuffer<float> expected_output_cb(kNumFrames, kNumInputChannels);
+ expected_output_cb.SetDataForTesting(
+ kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
+
+ const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
+
+ ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
+ ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
+ ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
+
+ PlusThreeBlockerCallback callback;
+ Blocker blocker(kChunkSize, kBlockSize, kNumInputChannels, kNumOutputChannels,
+ kWindow, kShiftAmount, &callback);
+
+ RunTest(&blocker, kChunkSize, kNumFrames, input_cb.channels(),
+ input_chunk_cb.channels(), actual_output_cb.channels(),
+ output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
+
+ ValidateSignalEquality(expected_output_cb.channels(),
+ actual_output_cb.channels(), kNumOutputChannels,
+ kNumFrames);
+}
+
+TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
+ const size_t kNumInputChannels = 3;
+ const size_t kNumOutputChannels = 2;
+ const size_t kNumFrames = 12;
+ const size_t kBlockSize = 4;
+ const size_t kChunkSize = 6;
+ const size_t kShiftAmount = 3;
+
+ const float kInput[kNumInputChannels][kNumFrames] = {
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+ {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
+ ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+ input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
+
+ const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
+ {6, 10, 10, 20, 10, 10, 20, 10, 10, 20, 10, 10},
+ {6, 14, 14, 28, 14, 14, 28, 14, 14, 28, 14, 14}};
+ ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
+ expected_output_cb.SetDataForTesting(
+ kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
+
+ const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
+
+ ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
+ ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
+ ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
+
+ PlusThreeBlockerCallback callback;
+ Blocker blocker(kChunkSize, kBlockSize, kNumInputChannels, kNumOutputChannels,
+ kWindow, kShiftAmount, &callback);
+
+ RunTest(&blocker, kChunkSize, kNumFrames, input_cb.channels(),
+ input_chunk_cb.channels(), actual_output_cb.channels(),
+ output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
+
+ ValidateSignalEquality(expected_output_cb.channels(),
+ actual_output_cb.channels(), kNumOutputChannels,
+ kNumFrames);
+}
+
+TEST_F(BlockerTest, TestBlockerNoOverlap) {
+ const size_t kNumInputChannels = 3;
+ const size_t kNumOutputChannels = 2;
+ const size_t kNumFrames = 12;
+ const size_t kBlockSize = 4;
+ const size_t kChunkSize = 4;
+ const size_t kShiftAmount = 4;
+
+ const float kInput[kNumInputChannels][kNumFrames] = {
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+ {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
+ ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+ input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
+
+ const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
+ {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}};
+ ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
+ expected_output_cb.SetDataForTesting(
+ kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
+
+ const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
+
+ ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
+ ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
+ ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
+
+ PlusThreeBlockerCallback callback;
+ Blocker blocker(kChunkSize, kBlockSize, kNumInputChannels, kNumOutputChannels,
+ kWindow, kShiftAmount, &callback);
+
+ RunTest(&blocker, kChunkSize, kNumFrames, input_cb.channels(),
+ input_chunk_cb.channels(), actual_output_cb.channels(),
+ output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
+
+ ValidateSignalEquality(expected_output_cb.channels(),
+ actual_output_cb.channels(), kNumOutputChannels,
+ kNumFrames);
+}
+
+TEST_F(BlockerTest, InitialDelaysAreMinimum) {
+ const size_t kNumInputChannels = 3;
+ const size_t kNumOutputChannels = 2;
+ const size_t kNumFrames = 1280;
+ const size_t kChunkSize[] = {80, 80, 80, 80, 80, 80,
+ 160, 160, 160, 160, 160, 160};
+ const size_t kBlockSize[] = {64, 64, 64, 128, 128, 128,
+ 128, 128, 128, 256, 256, 256};
+ const size_t kShiftAmount[] = {16, 32, 64, 32, 64, 128,
+ 32, 64, 128, 64, 128, 256};
+ const size_t kInitialDelay[] = {48, 48, 48, 112, 112, 112,
+ 96, 96, 96, 224, 224, 224};
+
+ float input[kNumInputChannels][kNumFrames];
+ for (size_t i = 0; i < kNumInputChannels; ++i) {
+ for (size_t j = 0; j < kNumFrames; ++j) {
+ input[i][j] = i + 1;
+ }
+ }
+ ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+ input_cb.SetDataForTesting(input[0], sizeof(input) / sizeof(**input));
+
+ ChannelBuffer<float> output_cb(kNumFrames, kNumOutputChannels);
+
+ CopyBlockerCallback callback;
+
+ for (size_t i = 0; i < arraysize(kChunkSize); ++i) {
+ std::unique_ptr<float[]> window(new float[kBlockSize[i]]);
+ for (size_t j = 0; j < kBlockSize[i]; ++j) {
+ window[j] = 1.f;
+ }
+
+ ChannelBuffer<float> input_chunk_cb(kChunkSize[i], kNumInputChannels);
+ ChannelBuffer<float> output_chunk_cb(kChunkSize[i], kNumOutputChannels);
+
+ Blocker blocker(kChunkSize[i], kBlockSize[i], kNumInputChannels,
+ kNumOutputChannels, window.get(), kShiftAmount[i],
+ &callback);
+
+ RunTest(&blocker, kChunkSize[i], kNumFrames, input_cb.channels(),
+ input_chunk_cb.channels(), output_cb.channels(),
+ output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
+
+ ValidateInitialDelay(output_cb.channels(), kNumOutputChannels, kNumFrames,
+ kInitialDelay[i]);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.cc
new file mode 100644
index 0000000000..b1a6526bba
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/test/lapped_transform.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <cstring>
+
+#include "common_audio/real_fourier.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
+ size_t num_frames,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output) {
+ RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
+ RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
+ RTC_CHECK_EQ(parent_->block_length_, num_frames);
+
+ for (size_t i = 0; i < num_input_channels; ++i) {
+ memcpy(parent_->real_buf_.Row(i), input[i], num_frames * sizeof(*input[0]));
+ parent_->fft_->Forward(parent_->real_buf_.Row(i),
+ parent_->cplx_pre_.Row(i));
+ }
+
+ size_t block_length =
+ RealFourier::ComplexLength(RealFourier::FftOrder(num_frames));
+ RTC_CHECK_EQ(parent_->cplx_length_, block_length);
+ parent_->block_processor_->ProcessAudioBlock(
+ parent_->cplx_pre_.Array(), num_input_channels, parent_->cplx_length_,
+ num_output_channels, parent_->cplx_post_.Array());
+
+ for (size_t i = 0; i < num_output_channels; ++i) {
+ parent_->fft_->Inverse(parent_->cplx_post_.Row(i),
+ parent_->real_buf_.Row(i));
+ memcpy(output[i], parent_->real_buf_.Row(i),
+ num_frames * sizeof(*input[0]));
+ }
+}
+
+LappedTransform::LappedTransform(size_t num_in_channels,
+ size_t num_out_channels,
+ size_t chunk_length,
+ const float* window,
+ size_t block_length,
+ size_t shift_amount,
+ Callback* callback)
+ : blocker_callback_(this),
+ num_in_channels_(num_in_channels),
+ num_out_channels_(num_out_channels),
+ block_length_(block_length),
+ chunk_length_(chunk_length),
+ block_processor_(callback),
+ blocker_(chunk_length_,
+ block_length_,
+ num_in_channels_,
+ num_out_channels_,
+ window,
+ shift_amount,
+ &blocker_callback_),
+ fft_(RealFourier::Create(RealFourier::FftOrder(block_length_))),
+ cplx_length_(RealFourier::ComplexLength(fft_->order())),
+ real_buf_(num_in_channels,
+ block_length_,
+ RealFourier::kFftBufferAlignment),
+ cplx_pre_(num_in_channels,
+ cplx_length_,
+ RealFourier::kFftBufferAlignment),
+ cplx_post_(num_out_channels,
+ cplx_length_,
+ RealFourier::kFftBufferAlignment) {
+ RTC_CHECK(num_in_channels_ > 0);
+ RTC_CHECK_GT(block_length_, 0);
+ RTC_CHECK_GT(chunk_length_, 0);
+ RTC_CHECK(block_processor_);
+
+ // block_length_ power of 2?
+ RTC_CHECK_EQ(0, block_length_ & (block_length_ - 1));
+}
+
+LappedTransform::~LappedTransform() = default;
+
+void LappedTransform::ProcessChunk(const float* const* in_chunk,
+ float* const* out_chunk) {
+ blocker_.ProcessChunk(in_chunk, chunk_length_, num_in_channels_,
+ num_out_channels_, out_chunk);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.h
new file mode 100644
index 0000000000..bb25c34a9e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_TEST_LAPPED_TRANSFORM_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_TEST_LAPPED_TRANSFORM_H_
+
+#include <complex>
+#include <memory>
+
+#include "common_audio/real_fourier.h"
+#include "modules/audio_coding/codecs/opus/test/blocker.h"
+#include "rtc_base/memory/aligned_malloc.h"
+
+namespace webrtc {
+
+// Wrapper class for aligned arrays. Every row (and the first dimension) are
+// aligned to the given byte alignment.
+template <typename T>
+class AlignedArray {
+ public:
+ AlignedArray(size_t rows, size_t cols, size_t alignment)
+ : rows_(rows), cols_(cols) {
+ RTC_CHECK_GT(alignment, 0);
+ head_row_ =
+ static_cast<T**>(AlignedMalloc(rows_ * sizeof(*head_row_), alignment));
+ for (size_t i = 0; i < rows_; ++i) {
+ head_row_[i] = static_cast<T*>(
+ AlignedMalloc(cols_ * sizeof(**head_row_), alignment));
+ }
+ }
+
+ ~AlignedArray() {
+ for (size_t i = 0; i < rows_; ++i) {
+ AlignedFree(head_row_[i]);
+ }
+ AlignedFree(head_row_);
+ }
+
+ T* const* Array() { return head_row_; }
+
+ const T* const* Array() const { return head_row_; }
+
+ T* Row(size_t row) {
+ RTC_CHECK_LE(row, rows_);
+ return head_row_[row];
+ }
+
+ const T* Row(size_t row) const {
+ RTC_CHECK_LE(row, rows_);
+ return head_row_[row];
+ }
+
+ private:
+ size_t rows_;
+ size_t cols_;
+ T** head_row_;
+};
+
+// Helper class for audio processing modules which operate on frequency domain
+// input derived from the windowed time domain audio stream.
+//
+// The input audio chunk is sliced into possibly overlapping blocks, multiplied
+// by a window and transformed with an FFT implementation. The transformed data
+// is supplied to the given callback for processing. The processed output is
+// then inverse transformed into the time domain and spliced back into a chunk
+// which constitutes the final output of this processing module.
+class LappedTransform {
+ public:
+ class Callback {
+ public:
+ virtual ~Callback() {}
+
+ virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
+ size_t num_in_channels,
+ size_t frames,
+ size_t num_out_channels,
+ std::complex<float>* const* out_block) = 0;
+ };
+
+ // Construct a transform instance. `chunk_length` is the number of samples in
+ // each channel. `window` defines the window, owned by the caller (a copy is
+ // made internally); `window` should have length equal to `block_length`.
+ // `block_length` defines the length of a block, in samples.
+ // `shift_amount` is in samples. `callback` is the caller-owned audio
+ // processing function called for each block of the input chunk.
+ LappedTransform(size_t num_in_channels,
+ size_t num_out_channels,
+ size_t chunk_length,
+ const float* window,
+ size_t block_length,
+ size_t shift_amount,
+ Callback* callback);
+ ~LappedTransform();
+
+ // Main audio processing helper method. Internally slices `in_chunk` into
+ // blocks, transforms them to frequency domain, calls the callback for each
+ // block and returns a de-blocked time domain chunk of audio through
+ // `out_chunk`. Both buffers are caller-owned.
+ void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
+
+ // Get the chunk length.
+ //
+ // The chunk length is the number of samples per channel that must be passed
+ // to ProcessChunk via the parameter in_chunk.
+ //
+ // Returns the same chunk_length passed to the LappedTransform constructor.
+ size_t chunk_length() const { return chunk_length_; }
+
+ // Get the number of input channels.
+ //
+ // This is the number of arrays that must be passed to ProcessChunk via
+ // in_chunk.
+ //
+ // Returns the same num_in_channels passed to the LappedTransform constructor.
+ size_t num_in_channels() const { return num_in_channels_; }
+
+ // Get the number of output channels.
+ //
+ // This is the number of arrays that must be passed to ProcessChunk via
+ // out_chunk.
+ //
+ // Returns the same num_out_channels passed to the LappedTransform
+ // constructor.
+ size_t num_out_channels() const { return num_out_channels_; }
+
+ // Returns the initial delay.
+ //
+ // This is the delay introduced by the `blocker_` to be able to get and return
+ // chunks of `chunk_length`, but process blocks of `block_length`.
+ size_t initial_delay() const { return blocker_.initial_delay(); }
+
+ private:
+ // Internal middleware callback, given to the blocker. Transforms each block
+ // and hands it over to the processing method given at construction time.
+ class BlockThunk : public BlockerCallback {
+ public:
+ explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
+
+ void ProcessBlock(const float* const* input,
+ size_t num_frames,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ float* const* output) override;
+
+ private:
+ LappedTransform* const parent_;
+ } blocker_callback_;
+
+ const size_t num_in_channels_;
+ const size_t num_out_channels_;
+
+ const size_t block_length_;
+ const size_t chunk_length_;
+
+ Callback* const block_processor_;
+ Blocker blocker_;
+
+ // TODO(alessiob): Replace RealFourier with a different FFT library.
+ std::unique_ptr<RealFourier> fft_;
+ const size_t cplx_length_;
+ AlignedArray<float> real_buf_;
+ AlignedArray<std::complex<float> > cplx_pre_;
+ AlignedArray<std::complex<float> > cplx_post_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_OPUS_TEST_LAPPED_TRANSFORM_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform_unittest.cc
new file mode 100644
index 0000000000..1003ed52e5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/test/lapped_transform_unittest.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/test/lapped_transform.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+
+#include "test/gtest.h"
+
+using std::complex;
+
+namespace {
+
+class NoopCallback : public webrtc::LappedTransform::Callback {
+ public:
+ NoopCallback() : block_num_(0) {}
+
+ void ProcessAudioBlock(const complex<float>* const* in_block,
+ size_t in_channels,
+ size_t frames,
+ size_t out_channels,
+ complex<float>* const* out_block) override {
+ RTC_CHECK_EQ(in_channels, out_channels);
+ for (size_t i = 0; i < out_channels; ++i) {
+ memcpy(out_block[i], in_block[i], sizeof(**in_block) * frames);
+ }
+ ++block_num_;
+ }
+
+ size_t block_num() { return block_num_; }
+
+ private:
+ size_t block_num_;
+};
+
+class FftCheckerCallback : public webrtc::LappedTransform::Callback {
+ public:
+ FftCheckerCallback() : block_num_(0) {}
+
+ void ProcessAudioBlock(const complex<float>* const* in_block,
+ size_t in_channels,
+ size_t frames,
+ size_t out_channels,
+ complex<float>* const* out_block) override {
+ RTC_CHECK_EQ(in_channels, out_channels);
+
+ size_t full_length = (frames - 1) * 2;
+ ++block_num_;
+
+ if (block_num_ > 0) {
+ ASSERT_NEAR(in_block[0][0].real(), static_cast<float>(full_length),
+ 1e-5f);
+ ASSERT_NEAR(in_block[0][0].imag(), 0.0f, 1e-5f);
+ for (size_t i = 1; i < frames; ++i) {
+ ASSERT_NEAR(in_block[0][i].real(), 0.0f, 1e-5f);
+ ASSERT_NEAR(in_block[0][i].imag(), 0.0f, 1e-5f);
+ }
+ }
+ }
+
+ size_t block_num() { return block_num_; }
+
+ private:
+ size_t block_num_;
+};
+
+void SetFloatArray(float value, int rows, int cols, float* const* array) {
+ for (int i = 0; i < rows; ++i) {
+ for (int j = 0; j < cols; ++j) {
+ array[i][j] = value;
+ }
+ }
+}
+
+} // namespace
+
+namespace webrtc {
+
+TEST(LappedTransformTest, Windowless) {
+ const size_t kChannels = 3;
+ const size_t kChunkLength = 512;
+ const size_t kBlockLength = 64;
+ const size_t kShiftAmount = 64;
+ NoopCallback noop;
+
+ // Rectangular window.
+ float window[kBlockLength];
+ std::fill(window, &window[kBlockLength], 1.0f);
+
+ LappedTransform trans(kChannels, kChannels, kChunkLength, window,
+ kBlockLength, kShiftAmount, &noop);
+ float in_buffer[kChannels][kChunkLength];
+ float* in_chunk[kChannels];
+ float out_buffer[kChannels][kChunkLength];
+ float* out_chunk[kChannels];
+
+ in_chunk[0] = in_buffer[0];
+ in_chunk[1] = in_buffer[1];
+ in_chunk[2] = in_buffer[2];
+ out_chunk[0] = out_buffer[0];
+ out_chunk[1] = out_buffer[1];
+ out_chunk[2] = out_buffer[2];
+ SetFloatArray(2.0f, kChannels, kChunkLength, in_chunk);
+ SetFloatArray(-1.0f, kChannels, kChunkLength, out_chunk);
+
+ trans.ProcessChunk(in_chunk, out_chunk);
+
+ for (size_t i = 0; i < kChannels; ++i) {
+ for (size_t j = 0; j < kChunkLength; ++j) {
+ ASSERT_NEAR(out_chunk[i][j], 2.0f, 1e-5f);
+ }
+ }
+
+ ASSERT_EQ(kChunkLength / kBlockLength, noop.block_num());
+}
+
+TEST(LappedTransformTest, IdentityProcessor) {
+ const size_t kChunkLength = 512;
+ const size_t kBlockLength = 64;
+ const size_t kShiftAmount = 32;
+ NoopCallback noop;
+
+ // Identity window for |overlap = block_size / 2|.
+ float window[kBlockLength];
+ std::fill(window, &window[kBlockLength], std::sqrt(0.5f));
+
+ LappedTransform trans(1, 1, kChunkLength, window, kBlockLength, kShiftAmount,
+ &noop);
+ float in_buffer[kChunkLength];
+ float* in_chunk = in_buffer;
+ float out_buffer[kChunkLength];
+ float* out_chunk = out_buffer;
+
+ SetFloatArray(2.0f, 1, kChunkLength, &in_chunk);
+ SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
+
+ trans.ProcessChunk(&in_chunk, &out_chunk);
+
+ for (size_t i = 0; i < kChunkLength; ++i) {
+ ASSERT_NEAR(out_chunk[i], (i < kBlockLength - kShiftAmount) ? 0.0f : 2.0f,
+ 1e-5f);
+ }
+
+ ASSERT_EQ(kChunkLength / kShiftAmount, noop.block_num());
+}
+
+TEST(LappedTransformTest, Callbacks) {
+ const size_t kChunkLength = 512;
+ const size_t kBlockLength = 64;
+ FftCheckerCallback call;
+
+ // Rectangular window.
+ float window[kBlockLength];
+ std::fill(window, &window[kBlockLength], 1.0f);
+
+ LappedTransform trans(1, 1, kChunkLength, window, kBlockLength, kBlockLength,
+ &call);
+ float in_buffer[kChunkLength];
+ float* in_chunk = in_buffer;
+ float out_buffer[kChunkLength];
+ float* out_chunk = out_buffer;
+
+ SetFloatArray(1.0f, 1, kChunkLength, &in_chunk);
+ SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
+
+ trans.ProcessChunk(&in_chunk, &out_chunk);
+
+ ASSERT_EQ(kChunkLength / kBlockLength, call.block_num());
+}
+
+TEST(LappedTransformTest, chunk_length) {
+ const size_t kBlockLength = 64;
+ FftCheckerCallback call;
+ const float window[kBlockLength] = {};
+
+ // Make sure that chunk_length returns the same value passed to the
+ // LappedTransform constructor.
+ {
+ const size_t kExpectedChunkLength = 512;
+ const LappedTransform trans(1, 1, kExpectedChunkLength, window,
+ kBlockLength, kBlockLength, &call);
+
+ EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
+ }
+ {
+ const size_t kExpectedChunkLength = 160;
+ const LappedTransform trans(1, 1, kExpectedChunkLength, window,
+ kBlockLength, kBlockLength, &call);
+
+ EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
new file mode 100644
index 0000000000..7761efe8b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
+
+#include <utility>
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioDecoderPcm16B::AudioDecoderPcm16B(int sample_rate_hz, size_t num_channels)
+ : sample_rate_hz_(sample_rate_hz), num_channels_(num_channels) {
+ RTC_DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+ sample_rate_hz == 32000 || sample_rate_hz == 48000)
+ << "Unsupported sample rate " << sample_rate_hz;
+ RTC_DCHECK_GE(num_channels, 1);
+}
+
+void AudioDecoderPcm16B::Reset() {}
+
+int AudioDecoderPcm16B::SampleRateHz() const {
+ return sample_rate_hz_;
+}
+
+size_t AudioDecoderPcm16B::Channels() const {
+ return num_channels_;
+}
+
+int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz_, sample_rate_hz);
+ // Adjust the encoded length down to ensure the same number of samples in each
+ // channel.
+ const size_t encoded_len_adjusted =
+ PacketDuration(encoded, encoded_len) * 2 *
+ Channels(); // 2 bytes per sample per channel
+ size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len_adjusted, decoded);
+ *speech_type = ConvertSpeechType(1);
+ return static_cast<int>(ret);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderPcm16B::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ const int samples_per_ms = rtc::CheckedDivExact(sample_rate_hz_, 1000);
+ return LegacyEncodedAudioFrame::SplitBySamples(
+ this, std::move(payload), timestamp, samples_per_ms * 2 * num_channels_,
+ samples_per_ms);
+}
+
+int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // Two encoded byte per sample per channel.
+ return static_cast<int>(encoded_len / (2 * Channels()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
new file mode 100644
index 0000000000..6f50161d3f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class AudioDecoderPcm16B final : public AudioDecoder {
+ public:
+ AudioDecoderPcm16B(int sample_rate_hz, size_t num_channels);
+
+ AudioDecoderPcm16B(const AudioDecoderPcm16B&) = delete;
+ AudioDecoderPcm16B& operator=(const AudioDecoderPcm16B&) = delete;
+
+ void Reset() override;
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int SampleRateHz() const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ const int sample_rate_hz_;
+ const size_t num_channels_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
new file mode 100644
index 0000000000..9445b1ee3e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcPcm16b_Encode(audio, input_len, encoded);
+}
+
+size_t AudioEncoderPcm16B::BytesPerSample() const {
+ return 2;
+}
+
+AudioEncoder::CodecType AudioEncoderPcm16B::GetCodecType() const {
+ return CodecType::kOther;
+}
+
+bool AudioEncoderPcm16B::Config::IsOk() const {
+ if ((sample_rate_hz != 8000) && (sample_rate_hz != 16000) &&
+ (sample_rate_hz != 32000) && (sample_rate_hz != 48000))
+ return false;
+ return AudioEncoderPcm::Config::IsOk();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
new file mode 100644
index 0000000000..c363b40b3f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
+
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+
+namespace webrtc {
+
+class AudioEncoderPcm16B final : public AudioEncoderPcm {
+ public:
+ struct Config : public AudioEncoderPcm::Config {
+ public:
+ Config() : AudioEncoderPcm::Config(107), sample_rate_hz(8000) {}
+ bool IsOk() const;
+
+ int sample_rate_hz;
+ };
+
+ explicit AudioEncoderPcm16B(const Config& config)
+ : AudioEncoderPcm(config, config.sample_rate_hz) {}
+
+ AudioEncoderPcm16B(const AudioEncoderPcm16B&) = delete;
+ AudioEncoderPcm16B& operator=(const AudioEncoderPcm16B&) = delete;
+
+ protected:
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
+
+ size_t BytesPerSample() const override;
+
+ AudioEncoder::CodecType GetCodecType() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
new file mode 100644
index 0000000000..2f6dce5f41
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+ size_t len,
+ uint8_t* encoded) {
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ uint16_t s = speech[i];
+ encoded[2 * i] = s >> 8;
+ encoded[2 * i + 1] = s;
+ }
+ return 2 * len;
+}
+
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+ size_t len,
+ int16_t* speech) {
+ size_t i;
+ for (i = 0; i < len / 2; ++i)
+ speech[i] = encoded[2 * i] << 8 | encoded[2 * i + 1];
+ return len / 2;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h
new file mode 100644
index 0000000000..75d1efda3b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_
+/*
+ * Define the fixpoint numeric formats
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcPcm16b_Encode(...)
+ *
+ * "Encode" a sample vector to 16 bit linear (Encoded standard is big endian)
+ *
+ * Input:
+ * - speech : Input speech vector
+ * - len : Number of samples in speech vector
+ *
+ * Output:
+ * - encoded : Encoded data vector (big endian 16 bit)
+ *
+ * Returned value : Length (in bytes) of coded data.
+ * Always equal to twice the len input parameter.
+ */
+
+size_t WebRtcPcm16b_Encode(const int16_t* speech, size_t len, uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcPcm16b_Decode(...)
+ *
+ * "Decode" a vector to 16 bit linear (Encoded standard is big endian)
+ *
+ * Input:
+ * - encoded : Encoded data vector (big endian 16 bit)
+ * - len : Number of bytes in encoded
+ *
+ * Output:
+ * - speech : Decoded speech vector
+ *
+ * Returned value : Samples in speech
+ */
+
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded, size_t len, int16_t* speech);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_ */
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc
new file mode 100644
index 0000000000..ecf91b45ac
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
+
+#include <stdint.h>
+
+#include <initializer_list>
+
+namespace webrtc {
+
+void Pcm16BAppendSupportedCodecSpecs(std::vector<AudioCodecSpec>* specs) {
+ for (uint8_t num_channels : {1, 2}) {
+ for (int sample_rate_hz : {8000, 16000, 32000}) {
+ specs->push_back(
+ {{"L16", sample_rate_hz, num_channels},
+ {sample_rate_hz, num_channels, sample_rate_hz * num_channels * 16}});
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.h b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.h
new file mode 100644
index 0000000000..3fae717ff3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_COMMON_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_COMMON_H_
+
+#include <vector>
+
+#include "api/audio_codecs/audio_format.h"
+
+namespace webrtc {
+void Pcm16BAppendSupportedCodecSpecs(std::vector<AudioCodecSpec>* specs);
+}
+
+#endif // MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
new file mode 100644
index 0000000000..724bba52d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+
+#include <string.h>
+
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/byte_order.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+static constexpr const int kRedMaxPacketSize =
+ 1 << 10; // RED packets must be less than 1024 bytes to fit the 10 bit
+ // block length.
+static constexpr const size_t kRedMaxTimestampDelta =
+ 1 << 14; // RED packets can encode a timestamp delta of 14 bits.
+static constexpr const size_t kAudioMaxRtpPacketLen =
+ 1200; // The typical MTU is 1200 bytes.
+
+static constexpr size_t kRedHeaderLength = 4; // 4 bytes RED header.
+static constexpr size_t kRedLastHeaderLength =
+ 1; // reduced size for last RED header.
+
+static constexpr size_t kRedNumberOfRedundantEncodings =
+ 1; // The level of redundancy we support.
+
+AudioEncoderCopyRed::Config::Config() = default;
+AudioEncoderCopyRed::Config::Config(Config&&) = default;
+AudioEncoderCopyRed::Config::~Config() = default;
+
+size_t GetMaxRedundancyFromFieldTrial(const FieldTrialsView& field_trials) {
+ const std::string red_trial =
+ field_trials.Lookup("WebRTC-Audio-Red-For-Opus");
+ size_t redundancy = 0;
+ if (sscanf(red_trial.c_str(), "Enabled-%zu", &redundancy) != 1 ||
+ redundancy > 9) {
+ return kRedNumberOfRedundantEncodings;
+ }
+ return redundancy;
+}
+
+AudioEncoderCopyRed::AudioEncoderCopyRed(Config&& config,
+ const FieldTrialsView& field_trials)
+ : speech_encoder_(std::move(config.speech_encoder)),
+ primary_encoded_(0, kAudioMaxRtpPacketLen),
+ max_packet_length_(kAudioMaxRtpPacketLen),
+ red_payload_type_(config.payload_type) {
+ RTC_CHECK(speech_encoder_) << "Speech encoder not provided.";
+
+ auto number_of_redundant_encodings =
+ GetMaxRedundancyFromFieldTrial(field_trials);
+ for (size_t i = 0; i < number_of_redundant_encodings; i++) {
+ std::pair<EncodedInfo, rtc::Buffer> redundant;
+ redundant.second.EnsureCapacity(kAudioMaxRtpPacketLen);
+ redundant_encodings_.push_front(std::move(redundant));
+ }
+}
+
+AudioEncoderCopyRed::~AudioEncoderCopyRed() = default;
+
+int AudioEncoderCopyRed::SampleRateHz() const {
+ return speech_encoder_->SampleRateHz();
+}
+
+size_t AudioEncoderCopyRed::NumChannels() const {
+ return speech_encoder_->NumChannels();
+}
+
+int AudioEncoderCopyRed::RtpTimestampRateHz() const {
+ return speech_encoder_->RtpTimestampRateHz();
+}
+
+size_t AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
+ return speech_encoder_->Num10MsFramesInNextPacket();
+}
+
+size_t AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
+ return speech_encoder_->Max10MsFramesInAPacket();
+}
+
+int AudioEncoderCopyRed::GetTargetBitrate() const {
+ return speech_encoder_->GetTargetBitrate();
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) {
+ primary_encoded_.Clear();
+ EncodedInfo info =
+ speech_encoder_->Encode(rtp_timestamp, audio, &primary_encoded_);
+ RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
+ RTC_DCHECK_EQ(primary_encoded_.size(), info.encoded_bytes);
+
+ if (info.encoded_bytes == 0 || info.encoded_bytes >= kRedMaxPacketSize) {
+ return info;
+ }
+ RTC_DCHECK_GT(max_packet_length_, info.encoded_bytes);
+
+ size_t header_length_bytes = kRedLastHeaderLength;
+ size_t bytes_available = max_packet_length_ - info.encoded_bytes;
+ auto it = redundant_encodings_.begin();
+
+ // Determine how much redundancy we can fit into our packet by
+ // iterating forward. This is determined both by the length as well
+ // as the timestamp difference. The latter can occur with opus DTX which
+ // has timestamp gaps of 400ms which exceeds REDs timestamp delta field size.
+ for (; it != redundant_encodings_.end(); it++) {
+ if (bytes_available < kRedHeaderLength + it->first.encoded_bytes) {
+ break;
+ }
+ if (it->first.encoded_bytes == 0) {
+ break;
+ }
+ if (rtp_timestamp - it->first.encoded_timestamp >= kRedMaxTimestampDelta) {
+ break;
+ }
+ bytes_available -= kRedHeaderLength + it->first.encoded_bytes;
+ header_length_bytes += kRedHeaderLength;
+ }
+
+ // Allocate room for RFC 2198 header.
+ encoded->SetSize(header_length_bytes);
+
+ // Iterate backwards and append the data.
+ size_t header_offset = 0;
+ while (it-- != redundant_encodings_.begin()) {
+ encoded->AppendData(it->second);
+
+ const uint32_t timestamp_delta =
+ info.encoded_timestamp - it->first.encoded_timestamp;
+ encoded->data()[header_offset] = it->first.payload_type | 0x80;
+ rtc::SetBE16(static_cast<uint8_t*>(encoded->data()) + header_offset + 1,
+ (timestamp_delta << 2) | (it->first.encoded_bytes >> 8));
+ encoded->data()[header_offset + 3] = it->first.encoded_bytes & 0xff;
+ header_offset += kRedHeaderLength;
+ info.redundant.push_back(it->first);
+ }
+
+ // `info` will be implicitly cast to an EncodedInfoLeaf struct, effectively
+ // discarding the (empty) vector of redundant information. This is
+ // intentional.
+ if (header_length_bytes > kRedHeaderLength) {
+ info.redundant.push_back(info);
+ RTC_DCHECK_EQ(info.speech,
+ info.redundant[info.redundant.size() - 1].speech);
+ }
+
+ encoded->AppendData(primary_encoded_);
+ RTC_DCHECK_EQ(header_offset, header_length_bytes - 1);
+ encoded->data()[header_offset] = info.payload_type;
+
+ // Shift the redundant encodings.
+ auto rit = redundant_encodings_.rbegin();
+ for (auto next = std::next(rit); next != redundant_encodings_.rend();
+ rit++, next = std::next(rit)) {
+ rit->first = next->first;
+ rit->second.SetData(next->second);
+ }
+ it = redundant_encodings_.begin();
+ if (it != redundant_encodings_.end()) {
+ it->first = info;
+ it->second.SetData(primary_encoded_);
+ }
+
+ // Update main EncodedInfo.
+ info.payload_type = red_payload_type_;
+ info.encoded_bytes = encoded->size();
+ return info;
+}
+
+void AudioEncoderCopyRed::Reset() {
+ speech_encoder_->Reset();
+ auto number_of_redundant_encodings = redundant_encodings_.size();
+ redundant_encodings_.clear();
+ for (size_t i = 0; i < number_of_redundant_encodings; i++) {
+ std::pair<EncodedInfo, rtc::Buffer> redundant;
+ redundant.second.EnsureCapacity(kAudioMaxRtpPacketLen);
+ redundant_encodings_.push_front(std::move(redundant));
+ }
+}
+
+bool AudioEncoderCopyRed::SetFec(bool enable) {
+ return speech_encoder_->SetFec(enable);
+}
+
+bool AudioEncoderCopyRed::SetDtx(bool enable) {
+ return speech_encoder_->SetDtx(enable);
+}
+
+bool AudioEncoderCopyRed::GetDtx() const {
+ return speech_encoder_->GetDtx();
+}
+
+bool AudioEncoderCopyRed::SetApplication(Application application) {
+ return speech_encoder_->SetApplication(application);
+}
+
+void AudioEncoderCopyRed::SetMaxPlaybackRate(int frequency_hz) {
+ speech_encoder_->SetMaxPlaybackRate(frequency_hz);
+}
+
+bool AudioEncoderCopyRed::EnableAudioNetworkAdaptor(
+ const std::string& config_string,
+ RtcEventLog* event_log) {
+ return speech_encoder_->EnableAudioNetworkAdaptor(config_string, event_log);
+}
+
+void AudioEncoderCopyRed::DisableAudioNetworkAdaptor() {
+ speech_encoder_->DisableAudioNetworkAdaptor();
+}
+
+void AudioEncoderCopyRed::OnReceivedUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) {
+ speech_encoder_->OnReceivedUplinkPacketLossFraction(
+ uplink_packet_loss_fraction);
+}
+
+void AudioEncoderCopyRed::OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) {
+ speech_encoder_->OnReceivedUplinkBandwidth(target_audio_bitrate_bps,
+ bwe_period_ms);
+}
+
+void AudioEncoderCopyRed::OnReceivedUplinkAllocation(
+ BitrateAllocationUpdate update) {
+ speech_encoder_->OnReceivedUplinkAllocation(update);
+}
+
+absl::optional<std::pair<TimeDelta, TimeDelta>>
+AudioEncoderCopyRed::GetFrameLengthRange() const {
+ return speech_encoder_->GetFrameLengthRange();
+}
+
+void AudioEncoderCopyRed::OnReceivedRtt(int rtt_ms) {
+ speech_encoder_->OnReceivedRtt(rtt_ms);
+}
+
+void AudioEncoderCopyRed::OnReceivedOverhead(size_t overhead_bytes_per_packet) {
+ max_packet_length_ = kAudioMaxRtpPacketLen - overhead_bytes_per_packet;
+ return speech_encoder_->OnReceivedOverhead(overhead_bytes_per_packet);
+}
+
+void AudioEncoderCopyRed::SetReceiverFrameLengthRange(int min_frame_length_ms,
+ int max_frame_length_ms) {
+ return speech_encoder_->SetReceiverFrameLengthRange(min_frame_length_ms,
+ max_frame_length_ms);
+}
+
+ANAStats AudioEncoderCopyRed::GetANAStats() const {
+ return speech_encoder_->GetANAStats();
+}
+
+rtc::ArrayView<std::unique_ptr<AudioEncoder>>
+AudioEncoderCopyRed::ReclaimContainedEncoders() {
+ return rtc::ArrayView<std::unique_ptr<AudioEncoder>>(&speech_encoder_, 1);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
new file mode 100644
index 0000000000..359b5eaa17
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
+#define MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/field_trials_view.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+// This class implements redundant audio coding as described in
+// https://tools.ietf.org/html/rfc2198
+// The class object will have an underlying AudioEncoder object that performs
+// the actual encodings. The current class will gather the N latest encodings
+// from the underlying codec into one packet. Currently N is hard-coded to 2.
+
+class AudioEncoderCopyRed final : public AudioEncoder {
+ public:
+ struct Config {
+ Config();
+ Config(Config&&);
+ ~Config();
+ int payload_type;
+ std::unique_ptr<AudioEncoder> speech_encoder;
+ };
+
+ AudioEncoderCopyRed(Config&& config, const FieldTrialsView& field_trials);
+
+ ~AudioEncoderCopyRed() override;
+
+ AudioEncoderCopyRed(const AudioEncoderCopyRed&) = delete;
+ AudioEncoderCopyRed& operator=(const AudioEncoderCopyRed&) = delete;
+
+ int SampleRateHz() const override;
+ size_t NumChannels() const override;
+ int RtpTimestampRateHz() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
+ int GetTargetBitrate() const override;
+
+ void Reset() override;
+ bool SetFec(bool enable) override;
+
+ bool SetDtx(bool enable) override;
+ bool GetDtx() const override;
+
+ bool SetApplication(Application application) override;
+ void SetMaxPlaybackRate(int frequency_hz) override;
+ bool EnableAudioNetworkAdaptor(const std::string& config_string,
+ RtcEventLog* event_log) override;
+ void DisableAudioNetworkAdaptor() override;
+ void OnReceivedUplinkPacketLossFraction(
+ float uplink_packet_loss_fraction) override;
+ void OnReceivedUplinkBandwidth(
+ int target_audio_bitrate_bps,
+ absl::optional<int64_t> bwe_period_ms) override;
+ void OnReceivedUplinkAllocation(BitrateAllocationUpdate update) override;
+ void OnReceivedRtt(int rtt_ms) override;
+ void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
+ void SetReceiverFrameLengthRange(int min_frame_length_ms,
+ int max_frame_length_ms) override;
+ ANAStats GetANAStats() const override;
+ absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
+ const override;
+ rtc::ArrayView<std::unique_ptr<AudioEncoder>> ReclaimContainedEncoders()
+ override;
+
+ protected:
+ EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ rtc::Buffer* encoded) override;
+
+ private:
+ std::unique_ptr<AudioEncoder> speech_encoder_;
+ rtc::Buffer primary_encoded_;
+ size_t max_packet_length_;
+ int red_payload_type_;
+ std::list<std::pair<EncodedInfo, rtc::Buffer>> redundant_encodings_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
new file mode 100644
index 0000000000..795a996624
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+
+#include <memory>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+#include "test/scoped_key_value_config.h"
+#include "test/testsupport/rtc_expect_death.h"
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MockFunction;
+using ::testing::Not;
+using ::testing::Optional;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace webrtc {
+
+namespace {
+static const size_t kMaxNumSamples = 48 * 10 * 2; // 10 ms @ 48 kHz stereo.
+static const size_t kRedLastHeaderLength =
+ 1; // 1 byte RED header for the last element.
+}
+
+class AudioEncoderCopyRedTest : public ::testing::Test {
+ protected:
+ AudioEncoderCopyRedTest()
+ : mock_encoder_(new MockAudioEncoder),
+ timestamp_(4711),
+ sample_rate_hz_(16000),
+ num_audio_samples_10ms(sample_rate_hz_ / 100),
+ red_payload_type_(200) {
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type_;
+ config.speech_encoder = std::unique_ptr<AudioEncoder>(mock_encoder_);
+ red_.reset(new AudioEncoderCopyRed(std::move(config), field_trials_));
+ memset(audio_, 0, sizeof(audio_));
+ EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(1U));
+ EXPECT_CALL(*mock_encoder_, SampleRateHz())
+ .WillRepeatedly(Return(sample_rate_hz_));
+ }
+
+ void TearDown() override { red_.reset(); }
+
+ void Encode() {
+ ASSERT_TRUE(red_.get() != NULL);
+ encoded_.Clear();
+ encoded_info_ = red_->Encode(
+ timestamp_,
+ rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
+ &encoded_);
+ timestamp_ += rtc::checked_cast<uint32_t>(num_audio_samples_10ms);
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ MockAudioEncoder* mock_encoder_;
+ std::unique_ptr<AudioEncoderCopyRed> red_;
+ uint32_t timestamp_;
+ int16_t audio_[kMaxNumSamples];
+ const int sample_rate_hz_;
+ size_t num_audio_samples_10ms;
+ rtc::Buffer encoded_;
+ AudioEncoder::EncodedInfo encoded_info_;
+ const int red_payload_type_;
+};
+
+TEST_F(AudioEncoderCopyRedTest, CreateAndDestroy) {}
+
+TEST_F(AudioEncoderCopyRedTest, CheckSampleRatePropagation) {
+ EXPECT_CALL(*mock_encoder_, SampleRateHz()).WillOnce(Return(17));
+ EXPECT_EQ(17, red_->SampleRateHz());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckNumChannelsPropagation) {
+ EXPECT_CALL(*mock_encoder_, NumChannels()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->NumChannels());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
+ EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+ .WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->Num10MsFramesInNextPacket());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckMaxFrameSizePropagation) {
+ EXPECT_CALL(*mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->Max10MsFramesInAPacket());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckTargetAudioBitratePropagation) {
+ EXPECT_CALL(*mock_encoder_,
+ OnReceivedUplinkBandwidth(4711, absl::optional<int64_t>()));
+ red_->OnReceivedUplinkBandwidth(4711, absl::nullopt);
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckPacketLossFractionPropagation) {
+ EXPECT_CALL(*mock_encoder_, OnReceivedUplinkPacketLossFraction(0.5));
+ red_->OnReceivedUplinkPacketLossFraction(0.5);
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckGetFrameLengthRangePropagation) {
+ auto expected_range =
+ std::make_pair(TimeDelta::Millis(20), TimeDelta::Millis(20));
+ EXPECT_CALL(*mock_encoder_, GetFrameLengthRange())
+ .WillRepeatedly(Return(absl::make_optional(expected_range)));
+ EXPECT_THAT(red_->GetFrameLengthRange(), Optional(Eq(expected_range)));
+}
+
+// Checks that the an Encode() call is immediately propagated to the speech
+// encoder.
+TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
+ // Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
+ // check ensures that exactly one call to EncodeImpl happens in each
+ // Encode call.
+ InSequence s;
+ MockFunction<void(int check_point_id)> check;
+ for (int i = 1; i <= 6; ++i) {
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
+ EXPECT_CALL(check, Call(i));
+ Encode();
+ check.Call(i);
+ }
+}
+
+// Checks that no output is produced if the underlying codec doesn't emit any
+// new data, even if the RED codec is loaded with a secondary encoding.
+TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
+ static const size_t kEncodedSize = 17;
+ static const size_t kHeaderLenBytes = 5;
+ {
+ InSequence s;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(kEncodedSize)))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(0)))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(kEncodedSize)));
+ }
+
+ // Start with one Encode() call that will produce output.
+ Encode();
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ EXPECT_EQ(0u, encoded_info_.redundant.size());
+ EXPECT_EQ(kEncodedSize + kRedLastHeaderLength, encoded_info_.encoded_bytes);
+
+ // Next call to the speech encoder will not produce any output.
+ Encode();
+ EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+
+ // Final call to the speech encoder will produce output.
+ Encode();
+ EXPECT_EQ(2 * kEncodedSize + kHeaderLenBytes, encoded_info_.encoded_bytes);
+ ASSERT_EQ(2u, encoded_info_.redundant.size());
+}
+
+// Checks that the correct payload sizes are populated into the redundancy
+// information for a redundancy level of 1.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes1) {
+ // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence
+ // of calls.
+ static const int kNumPackets = 10;
+ InSequence s;
+ for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size)));
+ }
+
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ Encode();
+ EXPECT_EQ(0u, encoded_info_.redundant.size());
+ EXPECT_EQ(kRedLastHeaderLength + 1u, encoded_info_.encoded_bytes);
+
+ for (size_t i = 2; i <= kNumPackets; ++i) {
+ Encode();
+ ASSERT_EQ(2u, encoded_info_.redundant.size());
+ EXPECT_EQ(i, encoded_info_.redundant[1].encoded_bytes);
+ EXPECT_EQ(i - 1, encoded_info_.redundant[0].encoded_bytes);
+ EXPECT_EQ(5 + i + (i - 1), encoded_info_.encoded_bytes);
+ }
+}
+
+// Checks that the correct payload sizes are populated into the redundancy
+// information for a redundancy level of 0.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes0) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Audio-Red-For-Opus/Enabled-0/");
+ // Recreate the RED encoder to take the new field trial setting into account.
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type_;
+ config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]);
+ red_.reset(new AudioEncoderCopyRed(std::move(config), field_trials));
+
+ // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence
+ // of calls.
+ static const int kNumPackets = 10;
+ InSequence s;
+ for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size)));
+ }
+
+ for (size_t i = 1; i <= kNumPackets; ++i) {
+ Encode();
+ ASSERT_EQ(0u, encoded_info_.redundant.size());
+ EXPECT_EQ(1 + i, encoded_info_.encoded_bytes);
+ }
+}
+// Checks that the correct payload sizes are populated into the redundancy
+// information for a redundancy level of 2.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes2) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Audio-Red-For-Opus/Enabled-2/");
+ // Recreate the RED encoder to take the new field trial setting into account.
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type_;
+ config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]);
+ red_.reset(new AudioEncoderCopyRed(std::move(config), field_trials));
+
+ // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence
+ // of calls.
+ static const int kNumPackets = 10;
+ InSequence s;
+ for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size)));
+ }
+
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ Encode();
+ EXPECT_EQ(0u, encoded_info_.redundant.size());
+ EXPECT_EQ(kRedLastHeaderLength + 1u, encoded_info_.encoded_bytes);
+
+ // Second call is also special since it does not include a tertiary
+ // payload.
+ Encode();
+ EXPECT_EQ(2u, encoded_info_.redundant.size());
+ EXPECT_EQ(8u, encoded_info_.encoded_bytes);
+
+ for (size_t i = 3; i <= kNumPackets; ++i) {
+ Encode();
+ ASSERT_EQ(3u, encoded_info_.redundant.size());
+ EXPECT_EQ(i, encoded_info_.redundant[2].encoded_bytes);
+ EXPECT_EQ(i - 1, encoded_info_.redundant[1].encoded_bytes);
+ EXPECT_EQ(i - 2, encoded_info_.redundant[0].encoded_bytes);
+ EXPECT_EQ(9 + i + (i - 1) + (i - 2), encoded_info_.encoded_bytes);
+ }
+}
+
+// Checks that the correct payload sizes are populated into the redundancy
+// information for a redundancy level of 3.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes3) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Audio-Red-For-Opus/Enabled-3/");
+ // Recreate the RED encoder to take the new field trial setting into account.
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type_;
+ config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]);
+ red_.reset(new AudioEncoderCopyRed(std::move(config), field_trials_));
+
+ // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence
+ // of calls.
+ static const int kNumPackets = 10;
+ InSequence s;
+ for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size)));
+ }
+
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ Encode();
+ EXPECT_EQ(0u, encoded_info_.redundant.size());
+ EXPECT_EQ(kRedLastHeaderLength + 1u, encoded_info_.encoded_bytes);
+
+ // Second call is also special since it does not include a tertiary
+ // payload.
+ Encode();
+ EXPECT_EQ(2u, encoded_info_.redundant.size());
+ EXPECT_EQ(8u, encoded_info_.encoded_bytes);
+
+ // Third call is also special since it does not include a quaternary
+ // payload.
+ Encode();
+ EXPECT_EQ(3u, encoded_info_.redundant.size());
+ EXPECT_EQ(15u, encoded_info_.encoded_bytes);
+
+ for (size_t i = 4; i <= kNumPackets; ++i) {
+ Encode();
+ ASSERT_EQ(4u, encoded_info_.redundant.size());
+ EXPECT_EQ(i, encoded_info_.redundant[3].encoded_bytes);
+ EXPECT_EQ(i - 1, encoded_info_.redundant[2].encoded_bytes);
+ EXPECT_EQ(i - 2, encoded_info_.redundant[1].encoded_bytes);
+ EXPECT_EQ(i - 3, encoded_info_.redundant[0].encoded_bytes);
+ EXPECT_EQ(13 + i + (i - 1) + (i - 2) + (i - 3),
+ encoded_info_.encoded_bytes);
+ }
+}
+
+// Checks that the correct timestamps are returned.
+TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
+ uint32_t primary_timestamp = timestamp_;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 17;
+ info.encoded_timestamp = timestamp_;
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ Encode();
+ EXPECT_EQ(primary_timestamp, encoded_info_.encoded_timestamp);
+
+ uint32_t secondary_timestamp = primary_timestamp;
+ primary_timestamp = timestamp_;
+ info.encoded_timestamp = timestamp_;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+ Encode();
+ ASSERT_EQ(2u, encoded_info_.redundant.size());
+ EXPECT_EQ(primary_timestamp, encoded_info_.redundant[1].encoded_timestamp);
+ EXPECT_EQ(secondary_timestamp, encoded_info_.redundant[0].encoded_timestamp);
+ EXPECT_EQ(primary_timestamp, encoded_info_.encoded_timestamp);
+}
+
+// Checks that the primary and secondary payloads are written correctly.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
+ // Let the mock encoder write payloads with increasing values. The first
+ // payload will have values 0, 1, 2, ..., kPayloadLenBytes - 1.
+ static const size_t kPayloadLenBytes = 5;
+ static const size_t kHeaderLenBytes = 5;
+ uint8_t payload[kPayloadLenBytes];
+ for (uint8_t i = 0; i < kPayloadLenBytes; ++i) {
+ payload[i] = i;
+ }
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillRepeatedly(Invoke(MockAudioEncoder::CopyEncoding(payload)));
+
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ Encode();
+ EXPECT_EQ(kRedLastHeaderLength + kPayloadLenBytes,
+ encoded_info_.encoded_bytes);
+ for (size_t i = 0; i < kPayloadLenBytes; ++i) {
+ EXPECT_EQ(i, encoded_.data()[kRedLastHeaderLength + i]);
+ }
+
+ for (int j = 0; j < 1; ++j) {
+ // Increment all values of the payload by 10.
+ for (size_t i = 0; i < kPayloadLenBytes; ++i)
+ payload[i] += 10;
+
+ Encode();
+ ASSERT_EQ(2u, encoded_info_.redundant.size());
+ EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[0].encoded_bytes);
+ EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[1].encoded_bytes);
+ for (size_t i = 0; i < kPayloadLenBytes; ++i) {
+ // Check secondary payload.
+ EXPECT_EQ(j * 10 + i, encoded_.data()[kHeaderLenBytes + i]);
+
+ // Check primary payload.
+ EXPECT_EQ((j + 1) * 10 + i,
+ encoded_.data()[kHeaderLenBytes + i + kPayloadLenBytes]);
+ }
+ }
+}
+
+// Checks correct propagation of payload type.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
+ const int primary_payload_type = red_payload_type_ + 1;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 17;
+ info.payload_type = primary_payload_type;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+ // First call is a special case, since it does not include a secondary
+ // payload.
+ Encode();
+ ASSERT_EQ(0u, encoded_info_.redundant.size());
+
+ const int secondary_payload_type = red_payload_type_ + 2;
+ info.payload_type = secondary_payload_type;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+ Encode();
+ ASSERT_EQ(2u, encoded_info_.redundant.size());
+ EXPECT_EQ(secondary_payload_type, encoded_info_.redundant[1].payload_type);
+ EXPECT_EQ(primary_payload_type, encoded_info_.redundant[0].payload_type);
+ EXPECT_EQ(red_payload_type_, encoded_info_.payload_type);
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckRFC2198Header) {
+ const int primary_payload_type = red_payload_type_ + 1;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 10;
+ info.encoded_timestamp = timestamp_;
+ info.payload_type = primary_payload_type;
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode();
+ info.encoded_timestamp = timestamp_; // update timestamp.
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Second call will produce a redundant encoding.
+
+ EXPECT_EQ(encoded_.size(),
+ 5u + 2 * 10u); // header size + two encoded payloads.
+ EXPECT_EQ(encoded_[0], primary_payload_type | 0x80);
+
+ uint32_t timestamp_delta = encoded_info_.encoded_timestamp -
+ encoded_info_.redundant[0].encoded_timestamp;
+ // Timestamp delta is encoded as a 14 bit value.
+ EXPECT_EQ(encoded_[1], timestamp_delta >> 6);
+ EXPECT_EQ(static_cast<uint8_t>(encoded_[2] >> 2), timestamp_delta & 0x3f);
+ // Redundant length is encoded as 10 bit value.
+ EXPECT_EQ(encoded_[2] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8);
+ EXPECT_EQ(encoded_[3], encoded_info_.redundant[1].encoded_bytes & 0xff);
+ EXPECT_EQ(encoded_[4], primary_payload_type);
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Third call will produce a redundant encoding with double
+ // redundancy.
+
+ EXPECT_EQ(encoded_.size(),
+ 5u + 2 * 10u); // header size + two encoded payloads.
+ EXPECT_EQ(encoded_[0], primary_payload_type | 0x80);
+
+ timestamp_delta = encoded_info_.encoded_timestamp -
+ encoded_info_.redundant[0].encoded_timestamp;
+ // Timestamp delta is encoded as a 14 bit value.
+ EXPECT_EQ(encoded_[1], timestamp_delta >> 6);
+ EXPECT_EQ(static_cast<uint8_t>(encoded_[2] >> 2), timestamp_delta & 0x3f);
+ // Redundant length is encoded as 10 bit value.
+ EXPECT_EQ(encoded_[2] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8);
+ EXPECT_EQ(encoded_[3], encoded_info_.redundant[1].encoded_bytes & 0xff);
+
+ EXPECT_EQ(encoded_[4], primary_payload_type | 0x80);
+ timestamp_delta = encoded_info_.encoded_timestamp -
+ encoded_info_.redundant[1].encoded_timestamp;
+}
+
+// Variant with a redundancy of 0.
+TEST_F(AudioEncoderCopyRedTest, CheckRFC2198Header0) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Audio-Red-For-Opus/Enabled-0/");
+ // Recreate the RED encoder to take the new field trial setting into account.
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type_;
+ config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]);
+ red_.reset(new AudioEncoderCopyRed(std::move(config), field_trials));
+
+ const int primary_payload_type = red_payload_type_ + 1;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 10;
+ info.encoded_timestamp = timestamp_;
+ info.payload_type = primary_payload_type;
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode();
+ info.encoded_timestamp = timestamp_; // update timestamp.
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Second call will not produce a redundant encoding.
+
+ EXPECT_EQ(encoded_.size(),
+ 1u + 1 * 10u); // header size + one encoded payloads.
+ EXPECT_EQ(encoded_[0], primary_payload_type);
+}
+// Variant with a redundancy of 2.
+TEST_F(AudioEncoderCopyRedTest, CheckRFC2198Header2) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Audio-Red-For-Opus/Enabled-2/");
+ // Recreate the RED encoder to take the new field trial setting into account.
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type_;
+ config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]);
+ red_.reset(new AudioEncoderCopyRed(std::move(config), field_trials));
+
+ const int primary_payload_type = red_payload_type_ + 1;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 10;
+ info.encoded_timestamp = timestamp_;
+ info.payload_type = primary_payload_type;
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode();
+ info.encoded_timestamp = timestamp_; // update timestamp.
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Second call will produce a redundant encoding.
+
+ EXPECT_EQ(encoded_.size(),
+ 5u + 2 * 10u); // header size + two encoded payloads.
+ EXPECT_EQ(encoded_[0], primary_payload_type | 0x80);
+
+ uint32_t timestamp_delta = encoded_info_.encoded_timestamp -
+ encoded_info_.redundant[0].encoded_timestamp;
+ // Timestamp delta is encoded as a 14 bit value.
+ EXPECT_EQ(encoded_[1], timestamp_delta >> 6);
+ EXPECT_EQ(static_cast<uint8_t>(encoded_[2] >> 2), timestamp_delta & 0x3f);
+ // Redundant length is encoded as 10 bit value.
+ EXPECT_EQ(encoded_[2] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8);
+ EXPECT_EQ(encoded_[3], encoded_info_.redundant[1].encoded_bytes & 0xff);
+ EXPECT_EQ(encoded_[4], primary_payload_type);
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Third call will produce a redundant encoding with double
+ // redundancy.
+
+ EXPECT_EQ(encoded_.size(),
+ 9u + 3 * 10u); // header size + three encoded payloads.
+ EXPECT_EQ(encoded_[0], primary_payload_type | 0x80);
+
+ timestamp_delta = encoded_info_.encoded_timestamp -
+ encoded_info_.redundant[0].encoded_timestamp;
+ // Timestamp delta is encoded as a 14 bit value.
+ EXPECT_EQ(encoded_[1], timestamp_delta >> 6);
+ EXPECT_EQ(static_cast<uint8_t>(encoded_[2] >> 2), timestamp_delta & 0x3f);
+ // Redundant length is encoded as 10 bit value.
+ EXPECT_EQ(encoded_[2] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8);
+ EXPECT_EQ(encoded_[3], encoded_info_.redundant[1].encoded_bytes & 0xff);
+
+ EXPECT_EQ(encoded_[4], primary_payload_type | 0x80);
+ timestamp_delta = encoded_info_.encoded_timestamp -
+ encoded_info_.redundant[1].encoded_timestamp;
+ // Timestamp delta is encoded as a 14 bit value.
+ EXPECT_EQ(encoded_[5], timestamp_delta >> 6);
+ EXPECT_EQ(static_cast<uint8_t>(encoded_[6] >> 2), timestamp_delta & 0x3f);
+ // Redundant length is encoded as 10 bit value.
+ EXPECT_EQ(encoded_[6] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8);
+ EXPECT_EQ(encoded_[7], encoded_info_.redundant[1].encoded_bytes & 0xff);
+ EXPECT_EQ(encoded_[8], primary_payload_type);
+}
+
+TEST_F(AudioEncoderCopyRedTest, RespectsPayloadMTU) {
+ const int primary_payload_type = red_payload_type_ + 1;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 600;
+ info.encoded_timestamp = timestamp_;
+ info.payload_type = primary_payload_type;
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode();
+ info.encoded_timestamp = timestamp_; // update timestamp.
+ info.encoded_bytes = 500;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Second call will produce a redundant encoding.
+
+ EXPECT_EQ(encoded_.size(), 5u + 600u + 500u);
+
+ info.encoded_timestamp = timestamp_; // update timestamp.
+ info.encoded_bytes = 400;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode(); // Third call will drop the oldest packet.
+ EXPECT_EQ(encoded_.size(), 5u + 500u + 400u);
+}
+
+TEST_F(AudioEncoderCopyRedTest, LargeTimestampGap) {
+ const int primary_payload_type = red_payload_type_ + 1;
+ AudioEncoder::EncodedInfo info;
+ info.encoded_bytes = 100;
+ info.encoded_timestamp = timestamp_;
+ info.payload_type = primary_payload_type;
+
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode();
+ // Update timestamp to simulate a 400ms gap like the one
+ // opus DTX causes.
+ timestamp_ += 19200;
+ info.encoded_timestamp = timestamp_; // update timestamp.
+ info.encoded_bytes = 200;
+ EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+ .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+ Encode();
+
+ // The old packet will be dropped.
+ EXPECT_EQ(encoded_.size(), 1u + 200u);
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// This test fixture tests various error conditions that makes the
+// AudioEncoderCng die via CHECKs.
+class AudioEncoderCopyRedDeathTest : public AudioEncoderCopyRedTest {
+ protected:
+ AudioEncoderCopyRedDeathTest() : AudioEncoderCopyRedTest() {}
+};
+
+TEST_F(AudioEncoderCopyRedDeathTest, WrongFrameSize) {
+ num_audio_samples_10ms *= 2; // 20 ms frame.
+ RTC_EXPECT_DEATH(Encode(), "");
+ num_audio_samples_10ms = 0; // Zero samples.
+ RTC_EXPECT_DEATH(Encode(), "");
+}
+
+TEST_F(AudioEncoderCopyRedDeathTest, NullSpeechEncoder) {
+ test::ScopedKeyValueConfig field_trials;
+ AudioEncoderCopyRed* red = NULL;
+ AudioEncoderCopyRed::Config config;
+ config.speech_encoder = NULL;
+ RTC_EXPECT_DEATH(
+ red = new AudioEncoderCopyRed(std::move(config), field_trials),
+ "Speech encoder not provided.");
+ // The delete operation is needed to avoid leak reports from memcheck.
+ delete red;
+}
+
+#endif // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
new file mode 100644
index 0000000000..537e6fcede
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
+
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+using ::std::get;
+
+namespace webrtc {
+
+AudioCodecSpeedTest::AudioCodecSpeedTest(int block_duration_ms,
+ int input_sampling_khz,
+ int output_sampling_khz)
+ : block_duration_ms_(block_duration_ms),
+ input_sampling_khz_(input_sampling_khz),
+ output_sampling_khz_(output_sampling_khz),
+ input_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * input_sampling_khz_)),
+ output_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * output_sampling_khz_)),
+ data_pointer_(0),
+ loop_length_samples_(0),
+ max_bytes_(0),
+ encoded_bytes_(0),
+ encoding_time_ms_(0.0),
+ decoding_time_ms_(0.0),
+ out_file_(NULL) {}
+
+void AudioCodecSpeedTest::SetUp() {
+ channels_ = get<0>(GetParam());
+ bit_rate_ = get<1>(GetParam());
+ in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
+ save_out_data_ = get<4>(GetParam());
+
+ FILE* fp = fopen(in_filename_.c_str(), "rb");
+ RTC_DCHECK(fp);
+
+ // Obtain file size.
+ fseek(fp, 0, SEEK_END);
+ loop_length_samples_ = ftell(fp) / sizeof(int16_t);
+ rewind(fp);
+
+ // Allocate memory to contain the whole file.
+ in_data_.reset(
+ new int16_t[loop_length_samples_ + input_length_sample_ * channels_]);
+
+ data_pointer_ = 0;
+
+ // Copy the file into the buffer.
+ ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
+ loop_length_samples_);
+ fclose(fp);
+
+ // Add an extra block length of samples to the end of the array, starting
+ // over again from the beginning of the array. This is done to simplify
+ // the reading process when reading over the end of the loop.
+ memcpy(&in_data_[loop_length_samples_], &in_data_[0],
+ input_length_sample_ * channels_ * sizeof(int16_t));
+
+ max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+ out_data_.reset(new int16_t[output_length_sample_ * channels_]);
+ bit_stream_.reset(new uint8_t[max_bytes_]);
+
+ if (save_out_data_) {
+ std::string out_filename =
+ ::testing::UnitTest::GetInstance()->current_test_info()->name();
+
+ // Erase '/'
+ size_t found;
+ while ((found = out_filename.find('/')) != std::string::npos)
+ out_filename.replace(found, 1, "_");
+
+ out_filename = test::OutputPath() + out_filename + ".pcm";
+
+ out_file_ = fopen(out_filename.c_str(), "wb");
+ RTC_DCHECK(out_file_);
+
+ printf("Output to be saved in %s.\n", out_filename.c_str());
+ }
+}
+
+void AudioCodecSpeedTest::TearDown() {
+ if (save_out_data_) {
+ fclose(out_file_);
+ }
+}
+
+void AudioCodecSpeedTest::EncodeDecode(size_t audio_duration_sec) {
+ size_t time_now_ms = 0;
+ float time_ms;
+
+ printf("Coding %d kHz-sampled %zu-channel audio at %d bps ...\n",
+ input_sampling_khz_, channels_, bit_rate_);
+
+ while (time_now_ms < audio_duration_sec * 1000) {
+ // Encode & decode.
+ time_ms = EncodeABlock(&in_data_[data_pointer_], &bit_stream_[0],
+ max_bytes_, &encoded_bytes_);
+ encoding_time_ms_ += time_ms;
+ time_ms = DecodeABlock(&bit_stream_[0], encoded_bytes_, &out_data_[0]);
+ decoding_time_ms_ += time_ms;
+ if (save_out_data_) {
+ fwrite(&out_data_[0], sizeof(int16_t), output_length_sample_ * channels_,
+ out_file_);
+ }
+ data_pointer_ = (data_pointer_ + input_length_sample_ * channels_) %
+ loop_length_samples_;
+ time_now_ms += block_duration_ms_;
+ }
+
+ printf("Encoding: %.2f%% real time,\nDecoding: %.2f%% real time.\n",
+ (encoding_time_ms_ / audio_duration_sec) / 10.0,
+ (decoding_time_ms_ / audio_duration_sec) / 10.0);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
new file mode 100644
index 0000000000..c5f1d7c259
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_TOOLS_AUDIO_CODEC_SPEED_TEST_H_
+#define MODULES_AUDIO_CODING_CODECS_TOOLS_AUDIO_CODEC_SPEED_TEST_H_
+
+#include <memory>
+#include <string>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Define coding parameter as
+// <channels, bit_rate, file_name, extension, if_save_output>.
+typedef std::tuple<size_t, int, std::string, std::string, bool> coding_param;
+
+class AudioCodecSpeedTest : public ::testing::TestWithParam<coding_param> {
+ protected:
+ AudioCodecSpeedTest(int block_duration_ms,
+ int input_sampling_khz,
+ int output_sampling_khz);
+ virtual void SetUp();
+ virtual void TearDown();
+
+ // EncodeABlock(...) does the following:
+ // 1. encodes a block of audio, saved in `in_data`,
+ // 2. save the bit stream to `bit_stream` of `max_bytes` bytes in size,
+ // 3. assign `encoded_bytes` with the length of the bit stream (in bytes),
+ // 4. return the cost of time (in millisecond) spent on actual encoding.
+ virtual float EncodeABlock(int16_t* in_data,
+ uint8_t* bit_stream,
+ size_t max_bytes,
+ size_t* encoded_bytes) = 0;
+
+ // DecodeABlock(...) does the following:
+ // 1. decodes the bit stream in `bit_stream` with a length of `encoded_bytes`
+ // (in bytes),
+ // 2. save the decoded audio in `out_data`,
+ // 3. return the cost of time (in millisecond) spent on actual decoding.
+ virtual float DecodeABlock(const uint8_t* bit_stream,
+ size_t encoded_bytes,
+ int16_t* out_data) = 0;
+
+ // Encoding and decode an audio of `audio_duration` (in seconds) and
+ // record the runtime for encoding and decoding separately.
+ void EncodeDecode(size_t audio_duration);
+
+ int block_duration_ms_;
+ int input_sampling_khz_;
+ int output_sampling_khz_;
+
+ // Number of samples-per-channel in a frame.
+ size_t input_length_sample_;
+
+ // Expected output number of samples-per-channel in a frame.
+ size_t output_length_sample_;
+
+ std::unique_ptr<int16_t[]> in_data_;
+ std::unique_ptr<int16_t[]> out_data_;
+ size_t data_pointer_;
+ size_t loop_length_samples_;
+ std::unique_ptr<uint8_t[]> bit_stream_;
+
+ // Maximum number of bytes in output bitstream for a frame of audio.
+ size_t max_bytes_;
+
+ size_t encoded_bytes_;
+ float encoding_time_ms_;
+ float decoding_time_ms_;
+ FILE* out_file_;
+
+ size_t channels_;
+
+ // Bit rate is in bit-per-second.
+ int bit_rate_;
+
+ std::string in_filename_;
+
+ // Determines whether to save the output to file.
+ bool save_out_data_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_CODECS_TOOLS_AUDIO_CODEC_SPEED_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/default_neteq_factory_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/default_neteq_factory_gn/moz.build
new file mode 100644
index 0000000000..15764353c9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/default_neteq_factory_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("default_neteq_factory_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/g3doc/index.md b/third_party/libwebrtc/modules/audio_coding/g3doc/index.md
new file mode 100644
index 0000000000..d0f6b9f81b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/g3doc/index.md
@@ -0,0 +1,32 @@
+<?% config.freshness.owner = 'minyue' %?>
+<?% config.freshness.reviewed = '2021-04-13' %?>
+
+# The WebRTC Audio Coding Module
+
+WebRTC audio coding module can handle both audio sending and receiving. Folder
+[`acm2`][acm2] contains implementations of the APIs.
+
+* Audio Sending Audio frames, each of which should always contain 10 ms worth
+ of data, are provided to the audio coding module through
+ [`Add10MsData()`][Add10MsData]. The audio coding module uses a provided
+ audio encoder to encoded audio frames and deliver the data to a
+ pre-registered audio packetization callback, which is supposed to wrap the
+ encoded audio into RTP packets and send them over a transport. Built-in
+ audio codecs are included the [`codecs`][codecs] folder. The
+ [audio network adaptor][ANA] provides an add-on functionality to an audio
+ encoder (currently limited to Opus) to make the audio encoder adaptive to
+ network conditions (bandwidth, packet loss rate, etc).
+
+* Audio Receiving Audio packets are provided to the audio coding module
+ through [`IncomingPacket()`][IncomingPacket], and are processed by an audio
+ jitter buffer ([NetEq][NetEq]), which includes decoding of the packets.
+ Audio decoders are provided by an audio decoder factory. Decoded audio
+ samples should be queried by calling [`PlayoutData10Ms()`][PlayoutData10Ms].
+
+[acm2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/acm2/;drc=854d59f7501aac9e9bccfa7b4d1f7f4db7842719
+[Add10MsData]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=136;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6
+[codecs]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/codecs/;drc=883fea1548d58e0080f98d66fab2e0c744dfb556
+[ANA]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/audio_network_adaptor/;drc=1f99551775cd876c116d1d90cba94c8a4670d184
+[IncomingPacket]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=192;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6
+[NetEq]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/neteq/;drc=213dc2cfc5f1b360b1c6fc51d393491f5de49d3d
+[PlayoutData10Ms]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=216;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6
diff --git a/third_party/libwebrtc/modules/audio_coding/g711_c_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/g711_c_gn/moz.build
new file mode 100644
index 0000000000..ea97f94f45
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/g711_c_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/g711/g711_interface.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("g711_c_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/g711_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/g711_gn/moz.build
new file mode 100644
index 0000000000..80fb9807c1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/g711_gn/moz.build
@@ -0,0 +1,206 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("g711_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/g722_c_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/g722_c_gn/moz.build
new file mode 100644
index 0000000000..87571de304
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/g722_c_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/g722/g722_interface.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("g722_c_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/g722_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/g722_gn/moz.build
new file mode 100644
index 0000000000..be80bff221
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/g722_gn/moz.build
@@ -0,0 +1,206 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("g722_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/ilbc_c_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/ilbc_c_gn/moz.build
new file mode 100644
index 0000000000..771a415b58
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/ilbc_c_gn/moz.build
@@ -0,0 +1,280 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/bw_expand.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_construct.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/chebyshev.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/comp_corr.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/constants.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decode_residual.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/do_plc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/encode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enh_upsample.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/frame_classify.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_dequant.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/gain_quant.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_lsp_poly.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_input.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/hp_output.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/ilbc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_dec.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/index_conv_enc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_decode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/init_encode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lpc_encode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_check.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsf_to_poly.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/my_corr.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/pack_bits.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsf.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/poly_to_lsp.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/refiner.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/smooth_out_data.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/sort_sq.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/split_vq.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_construct.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/state_search.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/unpack_bits.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq3.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/vq4.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/window32_w32.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("ilbc_c_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/ilbc_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/ilbc_gn/moz.build
new file mode 100644
index 0000000000..a652760054
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/ilbc_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("ilbc_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/include/audio_coding_module.h b/third_party/libwebrtc/modules/audio_coding/include/audio_coding_module.h
new file mode 100644
index 0000000000..197591b2d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/include/audio_coding_module.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
+#define MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/function_view.h"
+#include "api/neteq/neteq.h"
+#include "api/neteq/neteq_factory.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+// forward declarations
+class AudioDecoder;
+class AudioEncoder;
+class AudioFrame;
+struct RTPHeader;
+
+// Callback class used for sending data ready to be packetized
+class AudioPacketizationCallback {
+ public:
+ virtual ~AudioPacketizationCallback() {}
+
+ virtual int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) {
+ // TODO(bugs.webrtc.org/10739): Deprecate the old SendData and make this one
+ // pure virtual.
+ return SendData(frame_type, payload_type, timestamp, payload_data,
+ payload_len_bytes);
+ }
+ virtual int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes) {
+ RTC_DCHECK_NOTREACHED() << "This method must be overridden, or not used.";
+ return -1;
+ }
+};
+
+class AudioCodingModule {
+ protected:
+ AudioCodingModule() {}
+
+ public:
+ struct Config {
+ explicit Config(
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory = nullptr);
+ Config(const Config&);
+ ~Config();
+
+ NetEq::Config neteq_config;
+ Clock* clock;
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory;
+ NetEqFactory* neteq_factory = nullptr;
+ };
+
+ static AudioCodingModule* Create(const Config& config);
+ virtual ~AudioCodingModule() = default;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Sender
+ //
+
+ // `modifier` is called exactly once with one argument: a pointer to the
+ // unique_ptr that holds the current encoder (which is null if there is no
+ // current encoder). For the duration of the call, `modifier` has exclusive
+ // access to the unique_ptr; it may call the encoder, steal the encoder and
+ // replace it with another encoder or with nullptr, etc.
+ virtual void ModifyEncoder(
+ rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) = 0;
+
+ // Utility method for simply replacing the existing encoder with a new one.
+ void SetEncoder(std::unique_ptr<AudioEncoder> new_encoder) {
+ ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ *encoder = std::move(new_encoder);
+ });
+ }
+
+ // int32_t RegisterTransportCallback()
+ // Register a transport callback which will be called to deliver
+ // the encoded buffers whenever Process() is called and a
+ // bit-stream is ready.
+ //
+ // Input:
+ // -transport : pointer to the callback class
+ // transport->SendData() is called whenever
+ // Process() is called and bit-stream is ready
+ // to deliver.
+ //
+ // Return value:
+ // -1 if the transport callback could not be registered
+ // 0 if registration is successful.
+ //
+ virtual int32_t RegisterTransportCallback(
+ AudioPacketizationCallback* transport) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int32_t Add10MsData()
+ // Add 10MS of raw (PCM) audio data and encode it. If the sampling
+ // frequency of the audio does not match the sampling frequency of the
+ // current encoder ACM will resample the audio. If an encoded packet was
+ // produced, it will be delivered via the callback object registered using
+ // RegisterTransportCallback, and the return value from this function will
+ // be the number of bytes encoded.
+ //
+ // Input:
+ // -audio_frame : the input audio frame, containing raw audio
+ // sampling frequency etc.
+ //
+ // Return value:
+ // >= 0 number of bytes encoded.
+ // -1 some error occurred.
+ //
+ virtual int32_t Add10MsData(const AudioFrame& audio_frame) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int SetPacketLossRate()
+ // Sets expected packet loss rate for encoding. Some encoders provide packet
+ // loss gnostic encoding to make stream less sensitive to packet losses,
+ // through e.g., FEC. No effects on codecs that do not provide such encoding.
+ //
+ // Input:
+ // -packet_loss_rate : expected packet loss rate (0 -- 100 inclusive).
+ //
+ // Return value
+ // -1 if failed to set packet loss rate,
+ // 0 if succeeded.
+ //
+ // This is only used in test code that rely on old ACM APIs.
+ // TODO(minyue): Remove it when possible.
+ virtual int SetPacketLossRate(int packet_loss_rate) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Receiver
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int32_t InitializeReceiver()
+ // Any decoder-related state of ACM will be initialized to the
+ // same state when ACM is created. This will not interrupt or
+ // effect encoding functionality of ACM. ACM would lose all the
+ // decoding-related settings by calling this function.
+ // For instance, all registered codecs are deleted and have to be
+ // registered again.
+ //
+ // Return value:
+ // -1 if failed to initialize,
+ // 0 if succeeded.
+ //
+ virtual int32_t InitializeReceiver() = 0;
+
+ // Replace any existing decoders with the given payload type -> decoder map.
+ virtual void SetReceiveCodecs(
+ const std::map<int, SdpAudioFormat>& codecs) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int ReceiveSampleRate()
+ //
+ // Mozilla extension.
+ // Return the sample-rate of the inbound audio stream.
+ //
+ // Return value:
+ // 0 if no audio has been received, the sample-rate of the inbound audio
+ // otherwise.
+ virtual int ReceiveSampleRate() const = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int32_t IncomingPacket()
+ // Call this function to insert a parsed RTP packet into ACM.
+ //
+ // Inputs:
+ // -incoming_payload : received payload.
+ // -payload_len_bytes : the length of payload in bytes.
+ // -rtp_info : the relevant information retrieved from RTP
+ // header.
+ //
+ // Return value:
+ // -1 if failed to push in the payload
+ // 0 if payload is successfully pushed in.
+ //
+ virtual int32_t IncomingPacket(const uint8_t* incoming_payload,
+ size_t payload_len_bytes,
+ const RTPHeader& rtp_header) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int32_t PlayoutData10Ms(
+ // Get 10 milliseconds of raw audio data for playout, at the given sampling
+ // frequency. ACM will perform a resampling if required.
+ //
+ // Input:
+ // -desired_freq_hz : the desired sampling frequency, in Hertz, of the
+ // output audio. If set to -1, the function returns
+ // the audio at the current sampling frequency.
+ //
+ // Output:
+ // -audio_frame : output audio frame which contains raw audio data
+ // and other relevant parameters.
+ // -muted : if true, the sample data in audio_frame is not
+ // populated, and must be interpreted as all zero.
+ //
+ // Return value:
+ // -1 if the function fails,
+ // 0 if the function succeeds.
+ //
+ virtual int32_t PlayoutData10Ms(int32_t desired_freq_hz,
+ AudioFrame* audio_frame,
+ bool* muted) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // statistics
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // int32_t GetNetworkStatistics()
+ // Get network statistics. Note that the internal statistics of NetEq are
+ // reset by this call.
+ //
+ // Input:
+ // -network_statistics : a structure that contains network statistics.
+ //
+ // Return value:
+ // -1 if failed to set the network statistics,
+ // 0 if statistics are set successfully.
+ //
+ virtual int32_t GetNetworkStatistics(
+ NetworkStatistics* network_statistics) = 0;
+
+ virtual ANAStats GetANAStats() const = 0;
+
+ virtual int GetTargetBitrate() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/include/audio_coding_module_typedefs.h b/third_party/libwebrtc/modules/audio_coding/include/audio_coding_module_typedefs.h
new file mode 100644
index 0000000000..9d2fcfe22e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/include/audio_coding_module_typedefs.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+#define MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+
+#include <map>
+
+namespace webrtc {
+
+///////////////////////////////////////////////////////////////////////////
+// enum ACMVADMode
+// An enumerator for aggressiveness of VAD
+// -VADNormal : least aggressive mode.
+// -VADLowBitrate : more aggressive than "VADNormal" to save on
+// bit-rate.
+// -VADAggr : an aggressive mode.
+// -VADVeryAggr : the most agressive mode.
+//
+enum ACMVADMode {
+ VADNormal = 0,
+ VADLowBitrate = 1,
+ VADAggr = 2,
+ VADVeryAggr = 3
+};
+
+enum class AudioFrameType {
+ kEmptyFrame = 0,
+ kAudioFrameSpeech = 1,
+ kAudioFrameCN = 2,
+};
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Enumeration of Opus mode for intended application.
+//
+// kVoip : optimized for voice signals.
+// kAudio : optimized for non-voice signals like music.
+//
+enum OpusApplicationMode {
+ kVoip = 0,
+ kAudio = 1,
+};
+
+// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
+struct AudioDecodingCallStats {
+ AudioDecodingCallStats()
+ : calls_to_silence_generator(0),
+ calls_to_neteq(0),
+ decoded_normal(0),
+ decoded_neteq_plc(0),
+ decoded_codec_plc(0),
+ decoded_cng(0),
+ decoded_plc_cng(0),
+ decoded_muted_output(0) {}
+
+ int calls_to_silence_generator; // Number of calls where silence generated,
+ // and NetEq was disengaged from decoding.
+ int calls_to_neteq; // Number of calls to NetEq.
+ int decoded_normal; // Number of calls where audio RTP packet decoded.
+ int decoded_neteq_plc; // Number of calls resulted in NetEq PLC.
+ int decoded_codec_plc; // Number of calls resulted in codec PLC.
+ int decoded_cng; // Number of calls where comfort noise generated due to DTX.
+ int decoded_plc_cng; // Number of calls resulted where PLC faded to CNG.
+ int decoded_muted_output; // Number of calls returning a muted state output.
+};
+
+// NETEQ statistics.
+struct NetworkStatistics {
+ // current jitter buffer size in ms
+ uint16_t currentBufferSize;
+ // preferred (optimal) buffer size in ms
+ uint16_t preferredBufferSize;
+ // adding extra delay due to "peaky jitter"
+ bool jitterPeaksFound;
+ // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats
+ uint64_t totalSamplesReceived;
+ uint64_t concealedSamples;
+ uint64_t silentConcealedSamples;
+ uint64_t concealmentEvents;
+ uint64_t jitterBufferDelayMs;
+ uint64_t jitterBufferTargetDelayMs;
+ uint64_t jitterBufferMinimumDelayMs;
+ uint64_t jitterBufferEmittedCount;
+ uint64_t insertedSamplesForDeceleration;
+ uint64_t removedSamplesForAcceleration;
+ uint64_t fecPacketsReceived;
+ uint64_t fecPacketsDiscarded;
+ // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats
+ uint64_t packetsDiscarded;
+ // Stats below DO NOT correspond directly to anything in the WebRTC stats
+ // fraction (of original stream) of synthesized audio inserted through
+ // expansion (in Q14)
+ uint16_t currentExpandRate;
+ // fraction (of original stream) of synthesized speech inserted through
+ // expansion (in Q14)
+ uint16_t currentSpeechExpandRate;
+ // fraction of synthesized speech inserted through pre-emptive expansion
+ // (in Q14)
+ uint16_t currentPreemptiveRate;
+ // fraction of data removed through acceleration (in Q14)
+ uint16_t currentAccelerateRate;
+ // fraction of data coming from secondary decoding (in Q14)
+ uint16_t currentSecondaryDecodedRate;
+ // Fraction of secondary data, including FEC and RED, that is discarded (in
+ // Q14). Discarding of secondary data can be caused by the reception of the
+ // primary data, obsoleting the secondary data. It can also be caused by early
+ // or late arrival of secondary data.
+ uint16_t currentSecondaryDiscardedRate;
+ // average packet waiting time in the jitter buffer (ms)
+ int meanWaitingTimeMs;
+ // max packet waiting time in the jitter buffer (ms)
+ int maxWaitingTimeMs;
+ // count of the number of buffer flushes
+ uint64_t packetBufferFlushes;
+ // number of samples expanded due to delayed packets
+ uint64_t delayedPacketOutageSamples;
+ // arrival delay of incoming packets
+ uint64_t relativePacketArrivalDelayMs;
+ // number of audio interruptions
+ int32_t interruptionCount;
+ // total duration of audio interruptions
+ int32_t totalInterruptionDurationMs;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_bwinfo_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_bwinfo_gn/moz.build
new file mode 100644
index 0000000000..1cf433ded0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_bwinfo_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("isac_bwinfo_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_c_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_c_gn/moz.build
new file mode 100644
index 0000000000..09da7ad673
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_c_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/crc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/entropy_coding.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filterbanks.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/intialize.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lattice.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/lpc_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/transform.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "m",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("isac_c_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_common_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_common_gn/moz.build
new file mode 100644
index 0000000000..bdc092498c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_common_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("isac_common_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_fix_c_arm_asm_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_fix_c_arm_asm_gn/moz.build
new file mode 100644
index 0000000000..2e44cd30b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_fix_c_arm_asm_gn/moz.build
@@ -0,0 +1,89 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ARCH_ARM"] = True
+DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_HAS_NEON"] = True
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_LINUX"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_POSIX"] = True
+DEFINES["_GNU_SOURCE"] = True
+DEFINES["__STDC_CONSTANT_MACROS"] = True
+DEFINES["__STDC_FORMAT_MACROS"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+
+ OS_LIBS += [
+ "android_support",
+ "log",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("isac_fix_c_arm_asm_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_fix_c_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_fix_c_gn/moz.build
new file mode 100644
index 0000000000..ad5cffa07f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_fix_c_gn/moz.build
@@ -0,0 +1,118 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+CFLAGS += [
+ "-mfpu=neon"
+]
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ARCH_ARM"] = True
+DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_HAS_NEON"] = True
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_LINUX"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_POSIX"] = True
+DEFINES["_GNU_SOURCE"] = True
+DEFINES["__STDC_CONSTANT_MACROS"] = True
+DEFINES["__STDC_FORMAT_MACROS"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/encode.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/initialize.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+
+ OS_LIBS += [
+ "android_support",
+ "log",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("isac_fix_c_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_fix_common_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_fix_common_gn/moz.build
new file mode 100644
index 0000000000..bbb4268d35
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_fix_common_gn/moz.build
@@ -0,0 +1,93 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+CFLAGS += [
+ "-mfpu=neon"
+]
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ARCH_ARM"] = True
+DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_HAS_NEON"] = True
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_LINUX"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_POSIX"] = True
+DEFINES["_GNU_SOURCE"] = True
+DEFINES["__STDC_CONSTANT_MACROS"] = True
+DEFINES["__STDC_FORMAT_MACROS"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/fft.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_tables.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+
+ OS_LIBS += [
+ "android_support",
+ "log",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("isac_fix_common_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_fix_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_fix_gn/moz.build
new file mode 100644
index 0000000000..afe4e88ce1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_fix_gn/moz.build
@@ -0,0 +1,93 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+CXXFLAGS += [
+ "-mfpu=neon"
+]
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ARCH_ARM"] = True
+DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_HAS_NEON"] = True
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_LINUX"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_POSIX"] = True
+DEFINES["_GNU_SOURCE"] = True
+DEFINES["__STDC_CONSTANT_MACROS"] = True
+DEFINES["__STDC_FORMAT_MACROS"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+
+ OS_LIBS += [
+ "android_support",
+ "log",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("isac_fix_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_gn/moz.build
new file mode 100644
index 0000000000..5e75a5ca4f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_gn/moz.build
@@ -0,0 +1,192 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "m",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("isac_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_neon_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_neon_gn/moz.build
new file mode 100644
index 0000000000..d260776887
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_neon_gn/moz.build
@@ -0,0 +1,96 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+CFLAGS += [
+ "-mfpu=neon"
+]
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ARCH_ARM"] = True
+DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_HAS_NEON"] = True
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_LINUX"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_POSIX"] = True
+DEFINES["_GNU_SOURCE"] = True
+DEFINES["__STDC_CONSTANT_MACROS"] = True
+DEFINES["__STDC_FORMAT_MACROS"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/filters_neon.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/fix/source/transform_neon.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+
+ OS_LIBS += [
+ "android_support",
+ "log",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("isac_neon_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/isac_vad_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/isac_vad_gn/moz.build
new file mode 100644
index 0000000000..2769ce21ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/isac_vad_gn/moz.build
@@ -0,0 +1,200 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/isac_vad.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/isac/main/source/pitch_filter.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("isac_vad_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/legacy_encoded_audio_frame_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/legacy_encoded_audio_frame_gn/moz.build
new file mode 100644
index 0000000000..81572e4104
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/legacy_encoded_audio_frame_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("legacy_encoded_audio_frame_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.cc b/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.cc
new file mode 100644
index 0000000000..f4ef6cdccb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/accelerate.h"
+
+
+#include "api/array_view.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+namespace webrtc {
+
+Accelerate::ReturnCodes Accelerate::Process(const int16_t* input,
+ size_t input_length,
+ bool fast_accelerate,
+ AudioMultiVector* output,
+ size_t* length_change_samples) {
+ // Input length must be (almost) 30 ms.
+ static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
+ if (num_channels_ == 0 ||
+ input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_) {
+ // Length of input data too short to do accelerate. Simply move all data
+ // from input to output.
+ output->PushBackInterleaved(
+ rtc::ArrayView<const int16_t>(input, input_length));
+ return kError;
+ }
+ return TimeStretch::Process(input, input_length, fast_accelerate, output,
+ length_change_samples);
+}
+
+void Accelerate::SetParametersForPassiveSpeech(size_t /*len*/,
+ int16_t* best_correlation,
+ size_t* /*peak_index*/) const {
+ // When the signal does not contain any active speech, the correlation does
+ // not matter. Simply set it to zero.
+ *best_correlation = 0;
+}
+
+Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch(
+ const int16_t* input,
+ size_t input_length,
+ size_t peak_index,
+ int16_t best_correlation,
+ bool active_speech,
+ bool fast_mode,
+ AudioMultiVector* output) const {
+ // Check for strong correlation or passive speech.
+ // Use 8192 (0.5 in Q14) in fast mode.
+ const int correlation_threshold = fast_mode ? 8192 : kCorrelationThreshold;
+ if ((best_correlation > correlation_threshold) || !active_speech) {
+ // Do accelerate operation by overlap add.
+
+ // Pre-calculate common multiplication with `fs_mult_`.
+ // 120 corresponds to 15 ms.
+ size_t fs_mult_120 = fs_mult_ * 120;
+
+ if (fast_mode) {
+ // Fit as many multiples of `peak_index` as possible in fs_mult_120.
+ // TODO(henrik.lundin) Consider finding multiple correlation peaks and
+ // pick the one with the longest correlation lag in this case.
+ peak_index = (fs_mult_120 / peak_index) * peak_index;
+ }
+
+ RTC_DCHECK_GE(fs_mult_120, peak_index); // Should be handled in Process().
+ // Copy first part; 0 to 15 ms.
+ output->PushBackInterleaved(
+ rtc::ArrayView<const int16_t>(input, fs_mult_120 * num_channels_));
+ // Copy the `peak_index` starting at 15 ms to `temp_vector`.
+ AudioMultiVector temp_vector(num_channels_);
+ temp_vector.PushBackInterleaved(rtc::ArrayView<const int16_t>(
+ &input[fs_mult_120 * num_channels_], peak_index * num_channels_));
+ // Cross-fade `temp_vector` onto the end of `output`.
+ output->CrossFade(temp_vector, peak_index);
+ // Copy the last unmodified part, 15 ms + pitch period until the end.
+ output->PushBackInterleaved(rtc::ArrayView<const int16_t>(
+ &input[(fs_mult_120 + peak_index) * num_channels_],
+ input_length - (fs_mult_120 + peak_index) * num_channels_));
+
+ if (active_speech) {
+ return kSuccess;
+ } else {
+ return kSuccessLowEnergy;
+ }
+ } else {
+ // Accelerate not allowed. Simply move all data from decoded to outData.
+ output->PushBackInterleaved(
+ rtc::ArrayView<const int16_t>(input, input_length));
+ return kNoStretch;
+ }
+}
+
+Accelerate* AccelerateFactory::Create(
+ int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise) const {
+ return new Accelerate(sample_rate_hz, num_channels, background_noise);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.h b/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.h
new file mode 100644
index 0000000000..01fe874d54
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_ACCELERATE_H_
+#define MODULES_AUDIO_CODING_NETEQ_ACCELERATE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/neteq/time_stretch.h"
+
+namespace webrtc {
+
+class AudioMultiVector;
+class BackgroundNoise;
+
+// This class implements the Accelerate operation. Most of the work is done
+// in the base class TimeStretch, which is shared with the PreemptiveExpand
+// operation. In the Accelerate class, the operations that are specific to
+// Accelerate are implemented.
+class Accelerate : public TimeStretch {
+ public:
+ Accelerate(int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise)
+ : TimeStretch(sample_rate_hz, num_channels, background_noise) {}
+
+ Accelerate(const Accelerate&) = delete;
+ Accelerate& operator=(const Accelerate&) = delete;
+
+ // This method performs the actual Accelerate operation. The samples are
+ // read from `input`, of length `input_length` elements, and are written to
+ // `output`. The number of samples removed through time-stretching is
+ // is provided in the output `length_change_samples`. The method returns
+ // the outcome of the operation as an enumerator value. If `fast_accelerate`
+ // is true, the algorithm will relax the requirements on finding strong
+ // correlations, and may remove multiple pitch periods if possible.
+ ReturnCodes Process(const int16_t* input,
+ size_t input_length,
+ bool fast_accelerate,
+ AudioMultiVector* output,
+ size_t* length_change_samples);
+
+ protected:
+ // Sets the parameters `best_correlation` and `peak_index` to suitable
+ // values when the signal contains no active speech.
+ void SetParametersForPassiveSpeech(size_t len,
+ int16_t* best_correlation,
+ size_t* peak_index) const override;
+
+ // Checks the criteria for performing the time-stretching operation and,
+ // if possible, performs the time-stretching.
+ ReturnCodes CheckCriteriaAndStretch(const int16_t* input,
+ size_t input_length,
+ size_t peak_index,
+ int16_t best_correlation,
+ bool active_speech,
+ bool fast_mode,
+ AudioMultiVector* output) const override;
+};
+
+struct AccelerateFactory {
+ AccelerateFactory() {}
+ virtual ~AccelerateFactory() {}
+
+ virtual Accelerate* Create(int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise) const;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_ACCELERATE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
new file mode 100644
index 0000000000..bb5c6d167b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -0,0 +1,678 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include <array>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
+#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
+#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
+#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "rtc_base/system/arch.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kOverheadBytesPerPacket = 50;
+
+// The absolute difference between the input and output (the first channel) is
+// compared vs `tolerance`. The parameter `delay` is used to correct for codec
+// delays.
+void CompareInputOutput(const std::vector<int16_t>& input,
+ const std::vector<int16_t>& output,
+ size_t num_samples,
+ size_t channels,
+ int tolerance,
+ int delay) {
+ ASSERT_LE(num_samples, input.size());
+ ASSERT_LE(num_samples * channels, output.size());
+ for (unsigned int n = 0; n < num_samples - delay; ++n) {
+ ASSERT_NEAR(input[n], output[channels * n + delay], tolerance)
+ << "Exit test on first diff; n = " << n;
+ }
+}
+
+// The absolute difference between the first two channels in `output` is
+// compared vs `tolerance`.
+void CompareTwoChannels(const std::vector<int16_t>& output,
+ size_t samples_per_channel,
+ size_t channels,
+ int tolerance) {
+ ASSERT_GE(channels, 2u);
+ ASSERT_LE(samples_per_channel * channels, output.size());
+ for (unsigned int n = 0; n < samples_per_channel; ++n)
+ ASSERT_NEAR(output[channels * n], output[channels * n + 1], tolerance)
+ << "Stereo samples differ.";
+}
+
+// Calculates mean-squared error between input and output (the first channel).
+// The parameter `delay` is used to correct for codec delays.
+double MseInputOutput(const std::vector<int16_t>& input,
+ const std::vector<int16_t>& output,
+ size_t num_samples,
+ size_t channels,
+ int delay) {
+ RTC_DCHECK_LT(delay, static_cast<int>(num_samples));
+ RTC_DCHECK_LE(num_samples, input.size());
+ RTC_DCHECK_LE(num_samples * channels, output.size());
+ if (num_samples == 0)
+ return 0.0;
+ double squared_sum = 0.0;
+ for (unsigned int n = 0; n < num_samples - delay; ++n) {
+ squared_sum += (input[n] - output[channels * n + delay]) *
+ (input[n] - output[channels * n + delay]);
+ }
+ return squared_sum / (num_samples - delay);
+}
+} // namespace
+
+class AudioDecoderTest : public ::testing::Test {
+ protected:
+ AudioDecoderTest()
+ : input_audio_(
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000),
+ codec_input_rate_hz_(32000), // Legacy default value.
+ frame_size_(0),
+ data_length_(0),
+ channels_(1),
+ payload_type_(17),
+ decoder_(NULL) {}
+
+ ~AudioDecoderTest() override {}
+
+ void SetUp() override {
+ if (audio_encoder_)
+ codec_input_rate_hz_ = audio_encoder_->SampleRateHz();
+ // Create arrays.
+ ASSERT_GT(data_length_, 0u) << "The test must set data_length_ > 0";
+ }
+
+ void TearDown() override {
+ delete decoder_;
+ decoder_ = NULL;
+ }
+
+ virtual void InitEncoder() {}
+
+ // TODO(henrik.lundin) Change return type to size_t once most/all overriding
+ // implementations are gone.
+ virtual int EncodeFrame(const int16_t* input,
+ size_t input_len_samples,
+ rtc::Buffer* output) {
+ AudioEncoder::EncodedInfo encoded_info;
+ const size_t samples_per_10ms = audio_encoder_->SampleRateHz() / 100;
+ RTC_CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
+ input_len_samples);
+ std::unique_ptr<int16_t[]> interleaved_input(
+ new int16_t[channels_ * samples_per_10ms]);
+ for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
+ EXPECT_EQ(0u, encoded_info.encoded_bytes);
+
+ // Duplicate the mono input signal to however many channels the test
+ // wants.
+ test::InputAudioFile::DuplicateInterleaved(input + i * samples_per_10ms,
+ samples_per_10ms, channels_,
+ interleaved_input.get());
+
+ encoded_info =
+ audio_encoder_->Encode(0,
+ rtc::ArrayView<const int16_t>(
+ interleaved_input.get(),
+ audio_encoder_->NumChannels() *
+ audio_encoder_->SampleRateHz() / 100),
+ output);
+ }
+ EXPECT_EQ(payload_type_, encoded_info.payload_type);
+ return static_cast<int>(encoded_info.encoded_bytes);
+ }
+
+ // Encodes and decodes audio. The absolute difference between the input and
+ // output is compared vs `tolerance`, and the mean-squared error is compared
+ // with `mse`. The encoded stream should contain `expected_bytes`. For stereo
+ // audio, the absolute difference between the two channels is compared vs
+ // `channel_diff_tolerance`.
+ void EncodeDecodeTest(size_t expected_bytes,
+ int tolerance,
+ double mse,
+ int delay = 0,
+ int channel_diff_tolerance = 0) {
+ ASSERT_GE(tolerance, 0) << "Test must define a tolerance >= 0";
+ ASSERT_GE(channel_diff_tolerance, 0)
+ << "Test must define a channel_diff_tolerance >= 0";
+ size_t processed_samples = 0u;
+ size_t encoded_bytes = 0u;
+ InitEncoder();
+ std::vector<int16_t> input;
+ std::vector<int16_t> decoded;
+ while (processed_samples + frame_size_ <= data_length_) {
+ // Extend input vector with `frame_size_`.
+ input.resize(input.size() + frame_size_, 0);
+ // Read from input file.
+ ASSERT_GE(input.size() - processed_samples, frame_size_);
+ ASSERT_TRUE(input_audio_.Read(frame_size_, codec_input_rate_hz_,
+ &input[processed_samples]));
+ rtc::Buffer encoded;
+ size_t enc_len =
+ EncodeFrame(&input[processed_samples], frame_size_, &encoded);
+ // Make sure that frame_size_ * channels_ samples are allocated and free.
+ decoded.resize((processed_samples + frame_size_) * channels_, 0);
+
+ const std::vector<AudioDecoder::ParseResult> parse_result =
+ decoder_->ParsePayload(std::move(encoded), /*timestamp=*/0);
+ RTC_CHECK_EQ(parse_result.size(), size_t{1});
+ auto decode_result = parse_result[0].frame->Decode(
+ rtc::ArrayView<int16_t>(&decoded[processed_samples * channels_],
+ frame_size_ * channels_ * sizeof(int16_t)));
+ RTC_CHECK(decode_result.has_value());
+ EXPECT_EQ(frame_size_ * channels_, decode_result->num_decoded_samples);
+ encoded_bytes += enc_len;
+ processed_samples += frame_size_;
+ }
+ // For some codecs it doesn't make sense to check expected number of bytes,
+ // since the number can vary for different platforms. Opus and iSAC are
+ // such codecs. In this case expected_bytes is set to 0.
+ if (expected_bytes) {
+ EXPECT_EQ(expected_bytes, encoded_bytes);
+ }
+ CompareInputOutput(input, decoded, processed_samples, channels_, tolerance,
+ delay);
+ if (channels_ == 2)
+ CompareTwoChannels(decoded, processed_samples, channels_,
+ channel_diff_tolerance);
+ EXPECT_LE(
+ MseInputOutput(input, decoded, processed_samples, channels_, delay),
+ mse);
+ }
+
+ // Encodes a payload and decodes it twice with decoder re-init before each
+ // decode. Verifies that the decoded result is the same.
+ void ReInitTest() {
+ InitEncoder();
+ std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
+ ASSERT_TRUE(
+ input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
+ std::array<rtc::Buffer, 2> encoded;
+ EncodeFrame(input.get(), frame_size_, &encoded[0]);
+ // Make a copy.
+ encoded[1].SetData(encoded[0].data(), encoded[0].size());
+
+ std::array<std::vector<int16_t>, 2> outputs;
+ for (size_t i = 0; i < outputs.size(); ++i) {
+ outputs[i].resize(frame_size_ * channels_);
+ decoder_->Reset();
+ const std::vector<AudioDecoder::ParseResult> parse_result =
+ decoder_->ParsePayload(std::move(encoded[i]), /*timestamp=*/0);
+ RTC_CHECK_EQ(parse_result.size(), size_t{1});
+ auto decode_result = parse_result[0].frame->Decode(outputs[i]);
+ RTC_CHECK(decode_result.has_value());
+ EXPECT_EQ(frame_size_ * channels_, decode_result->num_decoded_samples);
+ }
+ EXPECT_EQ(outputs[0], outputs[1]);
+ }
+
+ // Call DecodePlc and verify that the correct number of samples is produced.
+ void DecodePlcTest() {
+ InitEncoder();
+ std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
+ ASSERT_TRUE(
+ input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
+ rtc::Buffer encoded;
+ EncodeFrame(input.get(), frame_size_, &encoded);
+ decoder_->Reset();
+ std::vector<int16_t> output(frame_size_ * channels_);
+ const std::vector<AudioDecoder::ParseResult> parse_result =
+ decoder_->ParsePayload(std::move(encoded), /*timestamp=*/0);
+ RTC_CHECK_EQ(parse_result.size(), size_t{1});
+ auto decode_result = parse_result[0].frame->Decode(output);
+ RTC_CHECK(decode_result.has_value());
+ EXPECT_EQ(frame_size_ * channels_, decode_result->num_decoded_samples);
+ // Call DecodePlc and verify that we get one frame of data.
+ // (Overwrite the output from the above Decode call, but that does not
+ // matter.)
+ size_t dec_len =
+ decoder_->DecodePlc(/*num_frames=*/1, /*decoded=*/output.data());
+ EXPECT_EQ(frame_size_ * channels_, dec_len);
+ }
+
+ test::ResampleInputAudioFile input_audio_;
+ int codec_input_rate_hz_;
+ size_t frame_size_;
+ size_t data_length_;
+ size_t channels_;
+ const int payload_type_;
+ AudioDecoder* decoder_;
+ std::unique_ptr<AudioEncoder> audio_encoder_;
+};
+
+class AudioDecoderPcmUTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderPcmUTest() : AudioDecoderTest() {
+ frame_size_ = 160;
+ data_length_ = 10 * frame_size_;
+ decoder_ = new AudioDecoderPcmU(1);
+ AudioEncoderPcmU::Config config;
+ config.frame_size_ms = static_cast<int>(frame_size_ / 8);
+ config.payload_type = payload_type_;
+ audio_encoder_.reset(new AudioEncoderPcmU(config));
+ }
+};
+
+class AudioDecoderPcmATest : public AudioDecoderTest {
+ protected:
+ AudioDecoderPcmATest() : AudioDecoderTest() {
+ frame_size_ = 160;
+ data_length_ = 10 * frame_size_;
+ decoder_ = new AudioDecoderPcmA(1);
+ AudioEncoderPcmA::Config config;
+ config.frame_size_ms = static_cast<int>(frame_size_ / 8);
+ config.payload_type = payload_type_;
+ audio_encoder_.reset(new AudioEncoderPcmA(config));
+ }
+};
+
+class AudioDecoderPcm16BTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderPcm16BTest() : AudioDecoderTest() {
+ codec_input_rate_hz_ = 16000;
+ frame_size_ = 20 * codec_input_rate_hz_ / 1000;
+ data_length_ = 10 * frame_size_;
+ decoder_ = new AudioDecoderPcm16B(codec_input_rate_hz_, 1);
+ RTC_DCHECK(decoder_);
+ AudioEncoderPcm16B::Config config;
+ config.sample_rate_hz = codec_input_rate_hz_;
+ config.frame_size_ms =
+ static_cast<int>(frame_size_ / (config.sample_rate_hz / 1000));
+ config.payload_type = payload_type_;
+ audio_encoder_.reset(new AudioEncoderPcm16B(config));
+ }
+};
+
+class AudioDecoderIlbcTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderIlbcTest() : AudioDecoderTest() {
+ codec_input_rate_hz_ = 8000;
+ frame_size_ = 240;
+ data_length_ = 10 * frame_size_;
+ decoder_ = new AudioDecoderIlbcImpl;
+ RTC_DCHECK(decoder_);
+ AudioEncoderIlbcConfig config;
+ config.frame_size_ms = 30;
+ audio_encoder_.reset(new AudioEncoderIlbcImpl(config, payload_type_));
+ }
+
+ // Overload the default test since iLBC's function WebRtcIlbcfix_NetEqPlc does
+ // not return any data. It simply resets a few states and returns 0.
+ void DecodePlcTest() {
+ InitEncoder();
+ std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
+ ASSERT_TRUE(
+ input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
+ rtc::Buffer encoded;
+ size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
+ AudioDecoder::SpeechType speech_type;
+ decoder_->Reset();
+ std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
+ size_t dec_len = decoder_->Decode(
+ encoded.data(), enc_len, codec_input_rate_hz_,
+ frame_size_ * channels_ * sizeof(int16_t), output.get(), &speech_type);
+ EXPECT_EQ(frame_size_, dec_len);
+ // Simply call DecodePlc and verify that we get 0 as return value.
+ EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));
+ }
+};
+
+class AudioDecoderIsacFloatTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderIsacFloatTest() : AudioDecoderTest() {
+ codec_input_rate_hz_ = 16000;
+ frame_size_ = 480;
+ data_length_ = 10 * frame_size_;
+ AudioEncoderIsacFloatImpl::Config config;
+ config.payload_type = payload_type_;
+ config.sample_rate_hz = codec_input_rate_hz_;
+ config.frame_size_ms =
+ 1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
+ audio_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
+ audio_encoder_->OnReceivedOverhead(kOverheadBytesPerPacket);
+
+ AudioDecoderIsacFloatImpl::Config decoder_config;
+ decoder_config.sample_rate_hz = codec_input_rate_hz_;
+ decoder_ = new AudioDecoderIsacFloatImpl(decoder_config);
+ }
+};
+
+class AudioDecoderIsacSwbTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderIsacSwbTest() : AudioDecoderTest() {
+ codec_input_rate_hz_ = 32000;
+ frame_size_ = 960;
+ data_length_ = 10 * frame_size_;
+ AudioEncoderIsacFloatImpl::Config config;
+ config.payload_type = payload_type_;
+ config.sample_rate_hz = codec_input_rate_hz_;
+ config.frame_size_ms =
+ 1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
+ audio_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
+ audio_encoder_->OnReceivedOverhead(kOverheadBytesPerPacket);
+
+ AudioDecoderIsacFloatImpl::Config decoder_config;
+ decoder_config.sample_rate_hz = codec_input_rate_hz_;
+ decoder_ = new AudioDecoderIsacFloatImpl(decoder_config);
+ }
+};
+
+class AudioDecoderIsacFixTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderIsacFixTest() : AudioDecoderTest() {
+ codec_input_rate_hz_ = 16000;
+ frame_size_ = 480;
+ data_length_ = 10 * frame_size_;
+ AudioEncoderIsacFixImpl::Config config;
+ config.payload_type = payload_type_;
+ config.sample_rate_hz = codec_input_rate_hz_;
+ config.frame_size_ms =
+ 1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
+ audio_encoder_.reset(new AudioEncoderIsacFixImpl(config));
+ audio_encoder_->OnReceivedOverhead(kOverheadBytesPerPacket);
+
+ AudioDecoderIsacFixImpl::Config decoder_config;
+ decoder_config.sample_rate_hz = codec_input_rate_hz_;
+ decoder_ = new AudioDecoderIsacFixImpl(decoder_config);
+ }
+};
+
+class AudioDecoderG722Test : public AudioDecoderTest {
+ protected:
+ AudioDecoderG722Test() : AudioDecoderTest() {
+ codec_input_rate_hz_ = 16000;
+ frame_size_ = 160;
+ data_length_ = 10 * frame_size_;
+ decoder_ = new AudioDecoderG722Impl;
+ RTC_DCHECK(decoder_);
+ AudioEncoderG722Config config;
+ config.frame_size_ms = 10;
+ config.num_channels = 1;
+ audio_encoder_.reset(new AudioEncoderG722Impl(config, payload_type_));
+ }
+};
+
+class AudioDecoderG722StereoTest : public AudioDecoderTest {
+ protected:
+ AudioDecoderG722StereoTest() : AudioDecoderTest() {
+ channels_ = 2;
+ codec_input_rate_hz_ = 16000;
+ frame_size_ = 160;
+ data_length_ = 10 * frame_size_;
+ decoder_ = new AudioDecoderG722StereoImpl;
+ RTC_DCHECK(decoder_);
+ AudioEncoderG722Config config;
+ config.frame_size_ms = 10;
+ config.num_channels = 2;
+ audio_encoder_.reset(new AudioEncoderG722Impl(config, payload_type_));
+ }
+};
+
+class AudioDecoderOpusTest
+ : public AudioDecoderTest,
+ public testing::WithParamInterface<std::tuple<int, int>> {
+ protected:
+ AudioDecoderOpusTest() : AudioDecoderTest() {
+ channels_ = opus_num_channels_;
+ codec_input_rate_hz_ = opus_sample_rate_hz_;
+ frame_size_ = rtc::CheckedDivExact(opus_sample_rate_hz_, 100);
+ data_length_ = 10 * frame_size_;
+ decoder_ =
+ new AudioDecoderOpusImpl(opus_num_channels_, opus_sample_rate_hz_);
+ AudioEncoderOpusConfig config;
+ config.frame_size_ms = 10;
+ config.sample_rate_hz = opus_sample_rate_hz_;
+ config.num_channels = opus_num_channels_;
+ config.application = opus_num_channels_ == 1
+ ? AudioEncoderOpusConfig::ApplicationMode::kVoip
+ : AudioEncoderOpusConfig::ApplicationMode::kAudio;
+ audio_encoder_ = AudioEncoderOpus::MakeAudioEncoder(config, payload_type_);
+ audio_encoder_->OnReceivedOverhead(kOverheadBytesPerPacket);
+ }
+ const int opus_sample_rate_hz_{std::get<0>(GetParam())};
+ const int opus_num_channels_{std::get<1>(GetParam())};
+};
+
+INSTANTIATE_TEST_SUITE_P(Param,
+ AudioDecoderOpusTest,
+ testing::Combine(testing::Values(16000, 48000),
+ testing::Values(1, 2)));
+
+TEST_F(AudioDecoderPcmUTest, EncodeDecode) {
+ int tolerance = 251;
+ double mse = 1734.0;
+ EncodeDecodeTest(data_length_, tolerance, mse);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+namespace {
+int SetAndGetTargetBitrate(AudioEncoder* audio_encoder, int rate) {
+ audio_encoder->OnReceivedUplinkBandwidth(rate, absl::nullopt);
+ return audio_encoder->GetTargetBitrate();
+}
+void TestSetAndGetTargetBitratesWithFixedCodec(AudioEncoder* audio_encoder,
+ int fixed_rate) {
+ EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, 32000));
+ EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, fixed_rate - 1));
+ EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, fixed_rate));
+ EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, fixed_rate + 1));
+}
+} // namespace
+
+TEST_F(AudioDecoderPcmUTest, SetTargetBitrate) {
+ TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
+}
+
+TEST_F(AudioDecoderPcmATest, EncodeDecode) {
+ int tolerance = 308;
+ double mse = 1931.0;
+ EncodeDecodeTest(data_length_, tolerance, mse);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderPcmATest, SetTargetBitrate) {
+ TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
+}
+
+TEST_F(AudioDecoderPcm16BTest, EncodeDecode) {
+ int tolerance = 0;
+ double mse = 0.0;
+ EncodeDecodeTest(2 * data_length_, tolerance, mse);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderPcm16BTest, SetTargetBitrate) {
+ TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(),
+ codec_input_rate_hz_ * 16);
+}
+
+TEST_F(AudioDecoderIlbcTest, EncodeDecode) {
+ int tolerance = 6808;
+ double mse = 2.13e6;
+ int delay = 80; // Delay from input to output.
+ EncodeDecodeTest(500, tolerance, mse, delay);
+ ReInitTest();
+ EXPECT_TRUE(decoder_->HasDecodePlc());
+ DecodePlcTest();
+}
+
+TEST_F(AudioDecoderIlbcTest, SetTargetBitrate) {
+ TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 13333);
+}
+
+TEST_F(AudioDecoderIsacFloatTest, EncodeDecode) {
+ int tolerance = 3399;
+ double mse = 434951.0;
+ int delay = 48; // Delay from input to output.
+ EncodeDecodeTest(0, tolerance, mse, delay);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderIsacFloatTest, SetTargetBitrate) {
+ const int overhead_rate =
+ 8 * kOverheadBytesPerPacket * codec_input_rate_hz_ / frame_size_;
+ EXPECT_EQ(10000,
+ SetAndGetTargetBitrate(audio_encoder_.get(), 9999 + overhead_rate));
+ EXPECT_EQ(10000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 10000 + overhead_rate));
+ EXPECT_EQ(23456, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 23456 + overhead_rate));
+ EXPECT_EQ(32000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 32000 + overhead_rate));
+ EXPECT_EQ(32000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 32001 + overhead_rate));
+}
+
+TEST_F(AudioDecoderIsacSwbTest, EncodeDecode) {
+ int tolerance = 19757;
+ double mse = 8.18e6;
+ int delay = 160; // Delay from input to output.
+ EncodeDecodeTest(0, tolerance, mse, delay);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderIsacSwbTest, SetTargetBitrate) {
+ const int overhead_rate =
+ 8 * kOverheadBytesPerPacket * codec_input_rate_hz_ / frame_size_;
+ EXPECT_EQ(10000,
+ SetAndGetTargetBitrate(audio_encoder_.get(), 9999 + overhead_rate));
+ EXPECT_EQ(10000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 10000 + overhead_rate));
+ EXPECT_EQ(23456, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 23456 + overhead_rate));
+ EXPECT_EQ(56000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 56000 + overhead_rate));
+ EXPECT_EQ(56000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 56001 + overhead_rate));
+}
+
+// Run bit exactness test only for release builds.
+#if defined(NDEBUG)
+TEST_F(AudioDecoderIsacFixTest, EncodeDecode) {
+ int tolerance = 11034;
+ double mse = 3.46e6;
+ int delay = 54; // Delay from input to output.
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM)
+ static const int kEncodedBytes = 685;
+#elif defined(WEBRTC_MAC) && defined(WEBRTC_ARCH_ARM64) // M1 Mac
+ static const int kEncodedBytes = 673;
+#elif defined(WEBRTC_ARCH_ARM64)
+ static const int kEncodedBytes = 673;
+#elif defined(WEBRTC_WIN) && defined(_MSC_VER) && !defined(__clang__)
+ static const int kEncodedBytes = 671;
+#elif defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_X86_64)
+ static const int kEncodedBytes = 671;
+#else
+ static const int kEncodedBytes = 671;
+#endif
+ EncodeDecodeTest(kEncodedBytes, tolerance, mse, delay);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+#endif
+
+TEST_F(AudioDecoderIsacFixTest, SetTargetBitrate) {
+ const int overhead_rate =
+ 8 * kOverheadBytesPerPacket * codec_input_rate_hz_ / frame_size_;
+ EXPECT_EQ(10000,
+ SetAndGetTargetBitrate(audio_encoder_.get(), 9999 + overhead_rate));
+ EXPECT_EQ(10000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 10000 + overhead_rate));
+ EXPECT_EQ(23456, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 23456 + overhead_rate));
+ EXPECT_EQ(32000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 32000 + overhead_rate));
+ EXPECT_EQ(32000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 32001 + overhead_rate));
+}
+
+TEST_F(AudioDecoderG722Test, EncodeDecode) {
+ int tolerance = 6176;
+ double mse = 238630.0;
+ int delay = 22; // Delay from input to output.
+ EncodeDecodeTest(data_length_ / 2, tolerance, mse, delay);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderG722Test, SetTargetBitrate) {
+ TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
+}
+
+TEST_F(AudioDecoderG722StereoTest, EncodeDecode) {
+ int tolerance = 6176;
+ int channel_diff_tolerance = 0;
+ double mse = 238630.0;
+ int delay = 22; // Delay from input to output.
+ EncodeDecodeTest(data_length_, tolerance, mse, delay, channel_diff_tolerance);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderG722StereoTest, SetTargetBitrate) {
+ TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 128000);
+}
+
+// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been
+// updated.
+TEST_P(AudioDecoderOpusTest, DISABLED_EncodeDecode) {
+ constexpr int tolerance = 6176;
+ constexpr int channel_diff_tolerance = 6;
+ constexpr double mse = 238630.0;
+ constexpr int delay = 22; // Delay from input to output.
+ EncodeDecodeTest(0, tolerance, mse, delay, channel_diff_tolerance);
+ ReInitTest();
+ EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_P(AudioDecoderOpusTest, SetTargetBitrate) {
+ const int overhead_rate =
+ 8 * kOverheadBytesPerPacket * codec_input_rate_hz_ / frame_size_;
+ EXPECT_EQ(6000,
+ SetAndGetTargetBitrate(audio_encoder_.get(), 5999 + overhead_rate));
+ EXPECT_EQ(6000,
+ SetAndGetTargetBitrate(audio_encoder_.get(), 6000 + overhead_rate));
+ EXPECT_EQ(32000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 32000 + overhead_rate));
+ EXPECT_EQ(510000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 510000 + overhead_rate));
+ EXPECT_EQ(510000, SetAndGetTargetBitrate(audio_encoder_.get(),
+ 511000 + overhead_rate));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.cc b/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.cc
new file mode 100644
index 0000000000..220d5a17d7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioMultiVector::AudioMultiVector(size_t N) {
+ RTC_DCHECK_GT(N, 0);
+ if (N < 1)
+ N = 1;
+ for (size_t n = 0; n < N; ++n) {
+ channels_.push_back(new AudioVector);
+ }
+ num_channels_ = N;
+}
+
+AudioMultiVector::AudioMultiVector(size_t N, size_t initial_size) {
+ RTC_DCHECK_GT(N, 0);
+ if (N < 1)
+ N = 1;
+ for (size_t n = 0; n < N; ++n) {
+ channels_.push_back(new AudioVector(initial_size));
+ }
+ num_channels_ = N;
+}
+
+AudioMultiVector::~AudioMultiVector() {
+ std::vector<AudioVector*>::iterator it = channels_.begin();
+ while (it != channels_.end()) {
+ delete (*it);
+ ++it;
+ }
+}
+
+void AudioMultiVector::Clear() {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->Clear();
+ }
+}
+
+void AudioMultiVector::Zeros(size_t length) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->Clear();
+ channels_[i]->Extend(length);
+ }
+}
+
+void AudioMultiVector::CopyTo(AudioMultiVector* copy_to) const {
+ if (copy_to) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->CopyTo(&(*copy_to)[i]);
+ }
+ }
+}
+
+void AudioMultiVector::PushBackInterleaved(
+ rtc::ArrayView<const int16_t> append_this) {
+ RTC_DCHECK_EQ(append_this.size() % num_channels_, 0);
+ if (num_channels_ == 1) {
+ // Special case to avoid extra allocation and data shuffling.
+ channels_[0]->PushBack(append_this.data(), append_this.size());
+ return;
+ }
+ size_t length_per_channel = append_this.size() / num_channels_;
+ int16_t* temp_array = new int16_t[length_per_channel]; // Temporary storage.
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ // Copy elements to `temp_array`.
+ // Set `source_ptr` to first element of this channel.
+ const int16_t* source_ptr = &append_this[channel];
+ for (size_t i = 0; i < length_per_channel; ++i) {
+ temp_array[i] = *source_ptr;
+ source_ptr += num_channels_; // Jump to next element of this channel.
+ }
+ channels_[channel]->PushBack(temp_array, length_per_channel);
+ }
+ delete[] temp_array;
+}
+
+void AudioMultiVector::PushBack(const AudioMultiVector& append_this) {
+ RTC_DCHECK_EQ(num_channels_, append_this.num_channels_);
+ if (num_channels_ == append_this.num_channels_) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->PushBack(append_this[i]);
+ }
+ }
+}
+
+void AudioMultiVector::PushBackFromIndex(const AudioMultiVector& append_this,
+ size_t index) {
+ RTC_DCHECK_LT(index, append_this.Size());
+ index = std::min(index, append_this.Size() - 1);
+ size_t length = append_this.Size() - index;
+ RTC_DCHECK_EQ(num_channels_, append_this.num_channels_);
+ if (num_channels_ == append_this.num_channels_) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->PushBack(append_this[i], length, index);
+ }
+ }
+}
+
+void AudioMultiVector::PopFront(size_t length) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->PopFront(length);
+ }
+}
+
+void AudioMultiVector::PopBack(size_t length) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->PopBack(length);
+ }
+}
+
+size_t AudioMultiVector::ReadInterleaved(size_t length,
+ int16_t* destination) const {
+ return ReadInterleavedFromIndex(0, length, destination);
+}
+
+size_t AudioMultiVector::ReadInterleavedFromIndex(size_t start_index,
+ size_t length,
+ int16_t* destination) const {
+ RTC_DCHECK(destination);
+ size_t index = 0; // Number of elements written to `destination` so far.
+ RTC_DCHECK_LE(start_index, Size());
+ start_index = std::min(start_index, Size());
+ if (length + start_index > Size()) {
+ length = Size() - start_index;
+ }
+ if (num_channels_ == 1) {
+ // Special case to avoid the nested for loop below.
+ (*this)[0].CopyTo(length, start_index, destination);
+ return length;
+ }
+ for (size_t i = 0; i < length; ++i) {
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ destination[index] = (*this)[channel][i + start_index];
+ ++index;
+ }
+ }
+ return index;
+}
+
+size_t AudioMultiVector::ReadInterleavedFromEnd(size_t length,
+ int16_t* destination) const {
+ length = std::min(length, Size()); // Cannot read more than Size() elements.
+ return ReadInterleavedFromIndex(Size() - length, length, destination);
+}
+
+void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this,
+ size_t length,
+ size_t position) {
+ RTC_DCHECK_EQ(num_channels_, insert_this.num_channels_);
+ // Cap `length` at the length of `insert_this`.
+ RTC_DCHECK_LE(length, insert_this.Size());
+ length = std::min(length, insert_this.Size());
+ if (num_channels_ == insert_this.num_channels_) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->OverwriteAt(insert_this[i], length, position);
+ }
+ }
+}
+
+void AudioMultiVector::CrossFade(const AudioMultiVector& append_this,
+ size_t fade_length) {
+ RTC_DCHECK_EQ(num_channels_, append_this.num_channels_);
+ if (num_channels_ == append_this.num_channels_) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ channels_[i]->CrossFade(append_this[i], fade_length);
+ }
+ }
+}
+
+size_t AudioMultiVector::Channels() const {
+ return num_channels_;
+}
+
+size_t AudioMultiVector::Size() const {
+ RTC_DCHECK(channels_[0]);
+ return channels_[0]->Size();
+}
+
+void AudioMultiVector::AssertSize(size_t required_size) {
+ if (Size() < required_size) {
+ size_t extend_length = required_size - Size();
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ channels_[channel]->Extend(extend_length);
+ }
+ }
+}
+
+bool AudioMultiVector::Empty() const {
+ RTC_DCHECK(channels_[0]);
+ return channels_[0]->Empty();
+}
+
+void AudioMultiVector::CopyChannel(size_t from_channel, size_t to_channel) {
+ RTC_DCHECK_LT(from_channel, num_channels_);
+ RTC_DCHECK_LT(to_channel, num_channels_);
+ channels_[from_channel]->CopyTo(channels_[to_channel]);
+}
+
+const AudioVector& AudioMultiVector::operator[](size_t index) const {
+ return *(channels_[index]);
+}
+
+AudioVector& AudioMultiVector::operator[](size_t index) {
+ return *(channels_[index]);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.h b/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.h
new file mode 100644
index 0000000000..715ec6dfc7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_AUDIO_MULTI_VECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_AUDIO_MULTI_VECTOR_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+namespace webrtc {
+
+class AudioMultiVector {
+ public:
+ // Creates an empty AudioMultiVector with `N` audio channels. `N` must be
+ // larger than 0.
+ explicit AudioMultiVector(size_t N);
+
+ // Creates an AudioMultiVector with `N` audio channels, each channel having
+ // an initial size. `N` must be larger than 0.
+ AudioMultiVector(size_t N, size_t initial_size);
+
+ virtual ~AudioMultiVector();
+
+ AudioMultiVector(const AudioMultiVector&) = delete;
+ AudioMultiVector& operator=(const AudioMultiVector&) = delete;
+
+ // Deletes all values and make the vector empty.
+ virtual void Clear();
+
+ // Clears the vector and inserts `length` zeros into each channel.
+ virtual void Zeros(size_t length);
+
+ // Copies all values from this vector to `copy_to`. Any contents in `copy_to`
+ // are deleted. After the operation is done, `copy_to` will be an exact
+ // replica of this object. The source and the destination must have the same
+ // number of channels.
+ virtual void CopyTo(AudioMultiVector* copy_to) const;
+
+ // Appends the contents of `append_this` to the end of this object. The array
+ // is assumed to be channel-interleaved. The length must be an even multiple
+ // of this object's number of channels. The length of this object is increased
+ // with the length of the array divided by the number of channels.
+ void PushBackInterleaved(rtc::ArrayView<const int16_t> append_this);
+
+ // Appends the contents of AudioMultiVector `append_this` to this object. The
+ // length of this object is increased with the length of `append_this`.
+ virtual void PushBack(const AudioMultiVector& append_this);
+
+ // Appends the contents of AudioMultiVector `append_this` to this object,
+ // taken from `index` up until the end of `append_this`. The length of this
+ // object is increased.
+ virtual void PushBackFromIndex(const AudioMultiVector& append_this,
+ size_t index);
+
+ // Removes `length` elements from the beginning of this object, from each
+ // channel.
+ virtual void PopFront(size_t length);
+
+ // Removes `length` elements from the end of this object, from each
+ // channel.
+ virtual void PopBack(size_t length);
+
+ // Reads `length` samples from each channel and writes them interleaved to
+ // `destination`. The total number of elements written to `destination` is
+ // returned, i.e., `length` * number of channels. If the AudioMultiVector
+ // contains less than `length` samples per channel, this is reflected in the
+ // return value.
+ virtual size_t ReadInterleaved(size_t length, int16_t* destination) const;
+
+ // Like ReadInterleaved() above, but reads from `start_index` instead of from
+ // the beginning.
+ virtual size_t ReadInterleavedFromIndex(size_t start_index,
+ size_t length,
+ int16_t* destination) const;
+
+ // Like ReadInterleaved() above, but reads from the end instead of from
+ // the beginning.
+ virtual size_t ReadInterleavedFromEnd(size_t length,
+ int16_t* destination) const;
+
+ // Overwrites each channel in this AudioMultiVector with values taken from
+ // `insert_this`. The values are taken from the beginning of `insert_this` and
+ // are inserted starting at `position`. `length` values are written into each
+ // channel. If `length` and `position` are selected such that the new data
+ // extends beyond the end of the current AudioVector, the vector is extended
+ // to accommodate the new data. `length` is limited to the length of
+ // `insert_this`.
+ virtual void OverwriteAt(const AudioMultiVector& insert_this,
+ size_t length,
+ size_t position);
+
+ // Appends `append_this` to the end of the current vector. Lets the two
+ // vectors overlap by `fade_length` samples (per channel), and cross-fade
+ // linearly in this region.
+ virtual void CrossFade(const AudioMultiVector& append_this,
+ size_t fade_length);
+
+ // Returns the number of channels.
+ virtual size_t Channels() const;
+
+ // Returns the number of elements per channel in this AudioMultiVector.
+ virtual size_t Size() const;
+
+ // Verify that each channel can hold at least `required_size` elements. If
+ // not, extend accordingly.
+ virtual void AssertSize(size_t required_size);
+
+ virtual bool Empty() const;
+
+ // Copies the data between two channels in the AudioMultiVector. The method
+ // does not add any new channel. Thus, `from_channel` and `to_channel` must
+ // both be valid channel numbers.
+ virtual void CopyChannel(size_t from_channel, size_t to_channel);
+
+ // Accesses and modifies a channel (i.e., an AudioVector object) of this
+ // AudioMultiVector.
+ const AudioVector& operator[](size_t index) const;
+ AudioVector& operator[](size_t index);
+
+ protected:
+ std::vector<AudioVector*> channels_;
+ size_t num_channels_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_AUDIO_MULTI_VECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
new file mode 100644
index 0000000000..329377a18e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+#include <stdlib.h>
+
+#include <string>
+#include <vector>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// This is a value-parameterized test. The test cases are instantiated with
+// different values for the test parameter, which is used to determine the
+// number of channels in the AudioMultiBuffer. Note that it is not possible
+// to combine typed testing with value-parameterized testing, and since the
+// tests for AudioVector already covers a number of different type parameters,
+// this test focuses on testing different number of channels, and keeping the
+// value type constant.
+
+class AudioMultiVectorTest : public ::testing::TestWithParam<size_t> {
+ protected:
+ AudioMultiVectorTest()
+ : num_channels_(GetParam()), // Get the test parameter.
+ array_interleaved_(num_channels_ * array_length()) {}
+
+ ~AudioMultiVectorTest() = default;
+
+ virtual void SetUp() {
+ // Populate test arrays.
+ for (size_t i = 0; i < array_length(); ++i) {
+ array_[i] = static_cast<int16_t>(i);
+ }
+ int16_t* ptr = array_interleaved_.data();
+ // Write 100, 101, 102, ... for first channel.
+ // Write 200, 201, 202, ... for second channel.
+ // And so on.
+ for (size_t i = 0; i < array_length(); ++i) {
+ for (size_t j = 1; j <= num_channels_; ++j) {
+ *ptr = rtc::checked_cast<int16_t>(j * 100 + i);
+ ++ptr;
+ }
+ }
+ }
+
+ size_t array_length() const { return sizeof(array_) / sizeof(array_[0]); }
+
+ const size_t num_channels_;
+ int16_t array_[10];
+ std::vector<int16_t> array_interleaved_;
+};
+
+// Create and destroy AudioMultiVector objects, both empty and with a predefined
+// length.
+TEST_P(AudioMultiVectorTest, CreateAndDestroy) {
+ AudioMultiVector vec1(num_channels_);
+ EXPECT_TRUE(vec1.Empty());
+ EXPECT_EQ(num_channels_, vec1.Channels());
+ EXPECT_EQ(0u, vec1.Size());
+
+ size_t initial_size = 17;
+ AudioMultiVector vec2(num_channels_, initial_size);
+ EXPECT_FALSE(vec2.Empty());
+ EXPECT_EQ(num_channels_, vec2.Channels());
+ EXPECT_EQ(initial_size, vec2.Size());
+}
+
+// Test the subscript operator [] for getting and setting.
+TEST_P(AudioMultiVectorTest, SubscriptOperator) {
+ AudioMultiVector vec(num_channels_, array_length());
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ for (size_t i = 0; i < array_length(); ++i) {
+ vec[channel][i] = static_cast<int16_t>(i);
+ // Make sure to use the const version.
+ const AudioVector& audio_vec = vec[channel];
+ EXPECT_EQ(static_cast<int16_t>(i), audio_vec[i]);
+ }
+ }
+}
+
+// Test the PushBackInterleaved method and the CopyFrom method. The Clear
+// method is also invoked.
+TEST_P(AudioMultiVectorTest, PushBackInterleavedAndCopy) {
+ AudioMultiVector vec(num_channels_);
+ vec.PushBackInterleaved(array_interleaved_);
+ AudioMultiVector vec_copy(num_channels_);
+ vec.CopyTo(&vec_copy); // Copy from `vec` to `vec_copy`.
+ ASSERT_EQ(num_channels_, vec.Channels());
+ ASSERT_EQ(array_length(), vec.Size());
+ ASSERT_EQ(num_channels_, vec_copy.Channels());
+ ASSERT_EQ(array_length(), vec_copy.Size());
+ for (size_t channel = 0; channel < vec.Channels(); ++channel) {
+ for (size_t i = 0; i < array_length(); ++i) {
+ EXPECT_EQ(static_cast<int16_t>((channel + 1) * 100 + i), vec[channel][i]);
+ EXPECT_EQ(vec[channel][i], vec_copy[channel][i]);
+ }
+ }
+
+ // Clear `vec` and verify that it is empty.
+ vec.Clear();
+ EXPECT_TRUE(vec.Empty());
+
+ // Now copy the empty vector and verify that the copy becomes empty too.
+ vec.CopyTo(&vec_copy);
+ EXPECT_TRUE(vec_copy.Empty());
+}
+
+// Try to copy to a NULL pointer. Nothing should happen.
+TEST_P(AudioMultiVectorTest, CopyToNull) {
+ AudioMultiVector vec(num_channels_);
+ AudioMultiVector* vec_copy = NULL;
+ vec.PushBackInterleaved(array_interleaved_);
+ vec.CopyTo(vec_copy);
+}
+
+// Test the PushBack method with another AudioMultiVector as input argument.
+TEST_P(AudioMultiVectorTest, PushBackVector) {
+ AudioMultiVector vec1(num_channels_, array_length());
+ AudioMultiVector vec2(num_channels_, array_length());
+ // Set the first vector to [0, 1, ..., array_length() - 1] +
+ // 100 * channel_number.
+ // Set the second vector to [array_length(), array_length() + 1, ...,
+ // 2 * array_length() - 1] + 100 * channel_number.
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ for (size_t i = 0; i < array_length(); ++i) {
+ vec1[channel][i] = static_cast<int16_t>(i + 100 * channel);
+ vec2[channel][i] =
+ static_cast<int16_t>(i + 100 * channel + array_length());
+ }
+ }
+ // Append vec2 to the back of vec1.
+ vec1.PushBack(vec2);
+ ASSERT_EQ(2u * array_length(), vec1.Size());
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ for (size_t i = 0; i < 2 * array_length(); ++i) {
+ EXPECT_EQ(static_cast<int16_t>(i + 100 * channel), vec1[channel][i]);
+ }
+ }
+}
+
+// Test the PushBackFromIndex method.
+TEST_P(AudioMultiVectorTest, PushBackFromIndex) {
+ AudioMultiVector vec1(num_channels_);
+ vec1.PushBackInterleaved(array_interleaved_);
+ AudioMultiVector vec2(num_channels_);
+
+ // Append vec1 to the back of vec2 (which is empty). Read vec1 from the second
+ // last element.
+ vec2.PushBackFromIndex(vec1, array_length() - 2);
+ ASSERT_EQ(2u, vec2.Size());
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ for (size_t i = 0; i < 2; ++i) {
+ EXPECT_EQ(array_interleaved_[channel +
+ num_channels_ * (array_length() - 2 + i)],
+ vec2[channel][i]);
+ }
+ }
+}
+
+// Starts with pushing some values to the vector, then test the Zeros method.
+TEST_P(AudioMultiVectorTest, Zeros) {
+ AudioMultiVector vec(num_channels_);
+ vec.PushBackInterleaved(array_interleaved_);
+ vec.Zeros(2 * array_length());
+ ASSERT_EQ(num_channels_, vec.Channels());
+ ASSERT_EQ(2u * array_length(), vec.Size());
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ for (size_t i = 0; i < 2 * array_length(); ++i) {
+ EXPECT_EQ(0, vec[channel][i]);
+ }
+ }
+}
+
+// Test the ReadInterleaved method
+TEST_P(AudioMultiVectorTest, ReadInterleaved) {
+ AudioMultiVector vec(num_channels_);
+ vec.PushBackInterleaved(array_interleaved_);
+ int16_t* output = new int16_t[array_interleaved_.size()];
+ // Read 5 samples.
+ size_t read_samples = 5;
+ EXPECT_EQ(num_channels_ * read_samples,
+ vec.ReadInterleaved(read_samples, output));
+ EXPECT_EQ(0, memcmp(array_interleaved_.data(), output,
+ read_samples * sizeof(int16_t)));
+
+ // Read too many samples. Expect to get all samples from the vector.
+ EXPECT_EQ(array_interleaved_.size(),
+ vec.ReadInterleaved(array_length() + 1, output));
+ EXPECT_EQ(0, memcmp(array_interleaved_.data(), output,
+ read_samples * sizeof(int16_t)));
+
+ delete[] output;
+}
+
+// Test the PopFront method.
+TEST_P(AudioMultiVectorTest, PopFront) {
+ AudioMultiVector vec(num_channels_);
+ vec.PushBackInterleaved(array_interleaved_);
+ vec.PopFront(1); // Remove one element from each channel.
+ ASSERT_EQ(array_length() - 1u, vec.Size());
+ // Let `ptr` point to the second element of the first channel in the
+ // interleaved array.
+ int16_t* ptr = &array_interleaved_[num_channels_];
+ for (size_t i = 0; i < array_length() - 1; ++i) {
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ EXPECT_EQ(*ptr, vec[channel][i]);
+ ++ptr;
+ }
+ }
+ vec.PopFront(array_length()); // Remove more elements than vector size.
+ EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the PopBack method.
+TEST_P(AudioMultiVectorTest, PopBack) {
+ AudioMultiVector vec(num_channels_);
+ vec.PushBackInterleaved(array_interleaved_);
+ vec.PopBack(1); // Remove one element from each channel.
+ ASSERT_EQ(array_length() - 1u, vec.Size());
+ // Let `ptr` point to the first element of the first channel in the
+ // interleaved array.
+ int16_t* ptr = array_interleaved_.data();
+ for (size_t i = 0; i < array_length() - 1; ++i) {
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ EXPECT_EQ(*ptr, vec[channel][i]);
+ ++ptr;
+ }
+ }
+ vec.PopBack(array_length()); // Remove more elements than vector size.
+ EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the AssertSize method.
+TEST_P(AudioMultiVectorTest, AssertSize) {
+ AudioMultiVector vec(num_channels_, array_length());
+ EXPECT_EQ(array_length(), vec.Size());
+ // Start with asserting with smaller sizes than already allocated.
+ vec.AssertSize(0);
+ vec.AssertSize(array_length() - 1);
+ // Nothing should have changed.
+ EXPECT_EQ(array_length(), vec.Size());
+ // Assert with one element longer than already allocated.
+ vec.AssertSize(array_length() + 1);
+ // Expect vector to have grown.
+ EXPECT_EQ(array_length() + 1, vec.Size());
+ // Also check the individual AudioVectors.
+ for (size_t channel = 0; channel < vec.Channels(); ++channel) {
+ EXPECT_EQ(array_length() + 1u, vec[channel].Size());
+ }
+}
+
+// Test the PushBack method with another AudioMultiVector as input argument.
+TEST_P(AudioMultiVectorTest, OverwriteAt) {
+ AudioMultiVector vec1(num_channels_);
+ vec1.PushBackInterleaved(array_interleaved_);
+ AudioMultiVector vec2(num_channels_);
+ vec2.Zeros(3); // 3 zeros in each channel.
+ // Overwrite vec2 at position 5.
+ vec1.OverwriteAt(vec2, 3, 5);
+ // Verify result.
+ // Length remains the same.
+ ASSERT_EQ(array_length(), vec1.Size());
+ int16_t* ptr = array_interleaved_.data();
+ for (size_t i = 0; i < array_length() - 1; ++i) {
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ if (i >= 5 && i <= 7) {
+ // Elements 5, 6, 7 should have been replaced with zeros.
+ EXPECT_EQ(0, vec1[channel][i]);
+ } else {
+ EXPECT_EQ(*ptr, vec1[channel][i]);
+ }
+ ++ptr;
+ }
+ }
+}
+
+// Test the CopyChannel method, when the test is instantiated with at least two
+// channels.
+TEST_P(AudioMultiVectorTest, CopyChannel) {
+ if (num_channels_ < 2)
+ return;
+
+ AudioMultiVector vec(num_channels_);
+ vec.PushBackInterleaved(array_interleaved_);
+ // Create a reference copy.
+ AudioMultiVector ref(num_channels_);
+ ref.PushBack(vec);
+ // Copy from first to last channel.
+ vec.CopyChannel(0, num_channels_ - 1);
+ // Verify that the first and last channels are identical; the others should
+ // be left untouched.
+ for (size_t i = 0; i < array_length(); ++i) {
+ // Verify that all but the last channel are untouched.
+ for (size_t channel = 0; channel < num_channels_ - 1; ++channel) {
+ EXPECT_EQ(ref[channel][i], vec[channel][i]);
+ }
+ // Verify that the last and the first channels are identical.
+ EXPECT_EQ(vec[0][i], vec[num_channels_ - 1][i]);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(TestNumChannels,
+ AudioMultiVectorTest,
+ ::testing::Values(static_cast<size_t>(1),
+ static_cast<size_t>(2),
+ static_cast<size_t>(5)));
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.cc b/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.cc
new file mode 100644
index 0000000000..10e8936447
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.cc
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+
+#include <algorithm>
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioVector::AudioVector() : AudioVector(kDefaultInitialSize) {
+ Clear();
+}
+
+AudioVector::AudioVector(size_t initial_size)
+ : array_(new int16_t[initial_size + 1]),
+ capacity_(initial_size + 1),
+ begin_index_(0),
+ end_index_(capacity_ - 1) {
+ memset(array_.get(), 0, capacity_ * sizeof(int16_t));
+}
+
+AudioVector::~AudioVector() = default;
+
+void AudioVector::Clear() {
+ end_index_ = begin_index_ = 0;
+}
+
+void AudioVector::CopyTo(AudioVector* copy_to) const {
+ RTC_DCHECK(copy_to);
+ copy_to->Reserve(Size());
+ CopyTo(Size(), 0, copy_to->array_.get());
+ copy_to->begin_index_ = 0;
+ copy_to->end_index_ = Size();
+}
+
+void AudioVector::CopyTo(size_t length,
+ size_t position,
+ int16_t* copy_to) const {
+ if (length == 0)
+ return;
+ length = std::min(length, Size() - position);
+ const size_t copy_index = (begin_index_ + position) % capacity_;
+ const size_t first_chunk_length = std::min(length, capacity_ - copy_index);
+ memcpy(copy_to, &array_[copy_index], first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(&copy_to[first_chunk_length], array_.get(),
+ remaining_length * sizeof(int16_t));
+ }
+}
+
+void AudioVector::PushFront(const AudioVector& prepend_this) {
+ const size_t length = prepend_this.Size();
+ if (length == 0)
+ return;
+
+ // Although the subsequent calling to PushFront does Reserve in it, it is
+ // always more efficient to do a big Reserve first.
+ Reserve(Size() + length);
+
+ const size_t first_chunk_length =
+ std::min(length, prepend_this.capacity_ - prepend_this.begin_index_);
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0)
+ PushFront(prepend_this.array_.get(), remaining_length);
+ PushFront(&prepend_this.array_[prepend_this.begin_index_],
+ first_chunk_length);
+}
+
+void AudioVector::PushFront(const int16_t* prepend_this, size_t length) {
+ if (length == 0)
+ return;
+ Reserve(Size() + length);
+ const size_t first_chunk_length = std::min(length, begin_index_);
+ memcpy(&array_[begin_index_ - first_chunk_length],
+ &prepend_this[length - first_chunk_length],
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(&array_[capacity_ - remaining_length], prepend_this,
+ remaining_length * sizeof(int16_t));
+ }
+ begin_index_ = (begin_index_ + capacity_ - length) % capacity_;
+}
+
+void AudioVector::PushBack(const AudioVector& append_this) {
+ PushBack(append_this, append_this.Size(), 0);
+}
+
+void AudioVector::PushBack(const AudioVector& append_this,
+ size_t length,
+ size_t position) {
+ RTC_DCHECK_LE(position, append_this.Size());
+ RTC_DCHECK_LE(length, append_this.Size() - position);
+
+ if (length == 0)
+ return;
+
+ // Although the subsequent calling to PushBack does Reserve in it, it is
+ // always more efficient to do a big Reserve first.
+ Reserve(Size() + length);
+
+ const size_t start_index =
+ (append_this.begin_index_ + position) % append_this.capacity_;
+ const size_t first_chunk_length =
+ std::min(length, append_this.capacity_ - start_index);
+ PushBack(&append_this.array_[start_index], first_chunk_length);
+
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0)
+ PushBack(append_this.array_.get(), remaining_length);
+}
+
+void AudioVector::PushBack(const int16_t* append_this, size_t length) {
+ if (length == 0)
+ return;
+ Reserve(Size() + length);
+ const size_t first_chunk_length = std::min(length, capacity_ - end_index_);
+ memcpy(&array_[end_index_], append_this,
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(array_.get(), &append_this[first_chunk_length],
+ remaining_length * sizeof(int16_t));
+ }
+ end_index_ = (end_index_ + length) % capacity_;
+}
+
+void AudioVector::PopFront(size_t length) {
+ if (length == 0)
+ return;
+ length = std::min(length, Size());
+ begin_index_ = (begin_index_ + length) % capacity_;
+}
+
+void AudioVector::PopBack(size_t length) {
+ if (length == 0)
+ return;
+ // Never remove more than what is in the array.
+ length = std::min(length, Size());
+ end_index_ = (end_index_ + capacity_ - length) % capacity_;
+}
+
+void AudioVector::Extend(size_t extra_length) {
+ if (extra_length == 0)
+ return;
+ InsertZerosByPushBack(extra_length, Size());
+}
+
+void AudioVector::InsertAt(const int16_t* insert_this,
+ size_t length,
+ size_t position) {
+ if (length == 0)
+ return;
+ // Cap the insert position at the current array length.
+ position = std::min(Size(), position);
+
+ // When inserting to a position closer to the beginning, it is more efficient
+ // to insert by pushing front than to insert by pushing back, since less data
+ // will be moved, vice versa.
+ if (position <= Size() - position) {
+ InsertByPushFront(insert_this, length, position);
+ } else {
+ InsertByPushBack(insert_this, length, position);
+ }
+}
+
+void AudioVector::InsertZerosAt(size_t length, size_t position) {
+ if (length == 0)
+ return;
+ // Cap the insert position at the current array length.
+ position = std::min(Size(), position);
+
+ // When inserting to a position closer to the beginning, it is more efficient
+ // to insert by pushing front than to insert by pushing back, since less data
+ // will be moved, vice versa.
+ if (position <= Size() - position) {
+ InsertZerosByPushFront(length, position);
+ } else {
+ InsertZerosByPushBack(length, position);
+ }
+}
+
+void AudioVector::OverwriteAt(const AudioVector& insert_this,
+ size_t length,
+ size_t position) {
+ RTC_DCHECK_LE(length, insert_this.Size());
+ if (length == 0)
+ return;
+
+ // Cap the insert position at the current array length.
+ position = std::min(Size(), position);
+
+ // Although the subsequent calling to OverwriteAt does Reserve in it, it is
+ // always more efficient to do a big Reserve first.
+ size_t new_size = std::max(Size(), position + length);
+ Reserve(new_size);
+
+ const size_t first_chunk_length =
+ std::min(length, insert_this.capacity_ - insert_this.begin_index_);
+ OverwriteAt(&insert_this.array_[insert_this.begin_index_], first_chunk_length,
+ position);
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ OverwriteAt(insert_this.array_.get(), remaining_length,
+ position + first_chunk_length);
+ }
+}
+
+void AudioVector::OverwriteAt(const int16_t* insert_this,
+ size_t length,
+ size_t position) {
+ if (length == 0)
+ return;
+ // Cap the insert position at the current array length.
+ position = std::min(Size(), position);
+
+ size_t new_size = std::max(Size(), position + length);
+ Reserve(new_size);
+
+ const size_t overwrite_index = (begin_index_ + position) % capacity_;
+ const size_t first_chunk_length =
+ std::min(length, capacity_ - overwrite_index);
+ memcpy(&array_[overwrite_index], insert_this,
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(array_.get(), &insert_this[first_chunk_length],
+ remaining_length * sizeof(int16_t));
+ }
+
+ end_index_ = (begin_index_ + new_size) % capacity_;
+}
+
+void AudioVector::CrossFade(const AudioVector& append_this,
+ size_t fade_length) {
+ // Fade length cannot be longer than the current vector or `append_this`.
+ RTC_DCHECK_LE(fade_length, Size());
+ RTC_DCHECK_LE(fade_length, append_this.Size());
+ fade_length = std::min(fade_length, Size());
+ fade_length = std::min(fade_length, append_this.Size());
+ size_t position = Size() - fade_length + begin_index_;
+ // Cross fade the overlapping regions.
+ // `alpha` is the mixing factor in Q14.
+ // TODO(hlundin): Consider skipping +1 in the denominator to produce a
+ // smoother cross-fade, in particular at the end of the fade.
+ int alpha_step = 16384 / (static_cast<int>(fade_length) + 1);
+ int alpha = 16384;
+ for (size_t i = 0; i < fade_length; ++i) {
+ alpha -= alpha_step;
+ array_[(position + i) % capacity_] =
+ (alpha * array_[(position + i) % capacity_] +
+ (16384 - alpha) * append_this[i] + 8192) >>
+ 14;
+ }
+ RTC_DCHECK_GE(alpha, 0); // Verify that the slope was correct.
+ // Append what is left of `append_this`.
+ size_t samples_to_push_back = append_this.Size() - fade_length;
+ if (samples_to_push_back > 0)
+ PushBack(append_this, samples_to_push_back, fade_length);
+}
+
+// Returns the number of elements in this AudioVector.
+size_t AudioVector::Size() const {
+ return (end_index_ + capacity_ - begin_index_) % capacity_;
+}
+
+// Returns true if this AudioVector is empty.
+bool AudioVector::Empty() const {
+ return begin_index_ == end_index_;
+}
+
+void AudioVector::Reserve(size_t n) {
+ if (capacity_ > n)
+ return;
+ const size_t length = Size();
+ // Reserve one more sample to remove the ambiguity between empty vector and
+ // full vector. Therefore `begin_index_` == `end_index_` indicates empty
+ // vector, and `begin_index_` == (`end_index_` + 1) % capacity indicates
+ // full vector.
+ std::unique_ptr<int16_t[]> temp_array(new int16_t[n + 1]);
+ CopyTo(length, 0, temp_array.get());
+ array_.swap(temp_array);
+ begin_index_ = 0;
+ end_index_ = length;
+ capacity_ = n + 1;
+}
+
+void AudioVector::InsertByPushBack(const int16_t* insert_this,
+ size_t length,
+ size_t position) {
+ const size_t move_chunk_length = Size() - position;
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (move_chunk_length > 0) {
+ // TODO(minyue): see if it is possible to avoid copying to a buffer.
+ temp_array.reset(new int16_t[move_chunk_length]);
+ CopyTo(move_chunk_length, position, temp_array.get());
+ PopBack(move_chunk_length);
+ }
+
+ Reserve(Size() + length + move_chunk_length);
+ PushBack(insert_this, length);
+ if (move_chunk_length > 0)
+ PushBack(temp_array.get(), move_chunk_length);
+}
+
+void AudioVector::InsertByPushFront(const int16_t* insert_this,
+ size_t length,
+ size_t position) {
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (position > 0) {
+ // TODO(minyue): see if it is possible to avoid copying to a buffer.
+ temp_array.reset(new int16_t[position]);
+ CopyTo(position, 0, temp_array.get());
+ PopFront(position);
+ }
+
+ Reserve(Size() + length + position);
+ PushFront(insert_this, length);
+ if (position > 0)
+ PushFront(temp_array.get(), position);
+}
+
+void AudioVector::InsertZerosByPushBack(size_t length, size_t position) {
+ const size_t move_chunk_length = Size() - position;
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (move_chunk_length > 0) {
+ temp_array.reset(new int16_t[move_chunk_length]);
+ CopyTo(move_chunk_length, position, temp_array.get());
+ PopBack(move_chunk_length);
+ }
+
+ Reserve(Size() + length + move_chunk_length);
+
+ const size_t first_zero_chunk_length =
+ std::min(length, capacity_ - end_index_);
+ memset(&array_[end_index_], 0, first_zero_chunk_length * sizeof(int16_t));
+ const size_t remaining_zero_length = length - first_zero_chunk_length;
+ if (remaining_zero_length > 0)
+ memset(array_.get(), 0, remaining_zero_length * sizeof(int16_t));
+ end_index_ = (end_index_ + length) % capacity_;
+
+ if (move_chunk_length > 0)
+ PushBack(temp_array.get(), move_chunk_length);
+}
+
+void AudioVector::InsertZerosByPushFront(size_t length, size_t position) {
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (position > 0) {
+ temp_array.reset(new int16_t[position]);
+ CopyTo(position, 0, temp_array.get());
+ PopFront(position);
+ }
+
+ Reserve(Size() + length + position);
+
+ const size_t first_zero_chunk_length = std::min(length, begin_index_);
+ memset(&array_[begin_index_ - first_zero_chunk_length], 0,
+ first_zero_chunk_length * sizeof(int16_t));
+ const size_t remaining_zero_length = length - first_zero_chunk_length;
+ if (remaining_zero_length > 0)
+ memset(&array_[capacity_ - remaining_zero_length], 0,
+ remaining_zero_length * sizeof(int16_t));
+ begin_index_ = (begin_index_ + capacity_ - length) % capacity_;
+
+ if (position > 0)
+ PushFront(temp_array.get(), position);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.h b/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.h
new file mode 100644
index 0000000000..d68f3ec6be
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_AUDIO_VECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_AUDIO_VECTOR_H_
+
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class AudioVector {
+ public:
+ // Creates an empty AudioVector.
+ AudioVector();
+
+ // Creates an AudioVector with an initial size.
+ explicit AudioVector(size_t initial_size);
+
+ virtual ~AudioVector();
+
+ AudioVector(const AudioVector&) = delete;
+ AudioVector& operator=(const AudioVector&) = delete;
+
+ // Deletes all values and make the vector empty.
+ virtual void Clear();
+
+ // Copies all values from this vector to `copy_to`. Any contents in `copy_to`
+ // are deleted before the copy operation. After the operation is done,
+ // `copy_to` will be an exact replica of this object.
+ virtual void CopyTo(AudioVector* copy_to) const;
+
+ // Copies `length` values from `position` in this vector to `copy_to`.
+ virtual void CopyTo(size_t length, size_t position, int16_t* copy_to) const;
+
+ // Prepends the contents of AudioVector `prepend_this` to this object. The
+ // length of this object is increased with the length of `prepend_this`.
+ virtual void PushFront(const AudioVector& prepend_this);
+
+ // Same as above, but with an array `prepend_this` with `length` elements as
+ // source.
+ virtual void PushFront(const int16_t* prepend_this, size_t length);
+
+ // Same as PushFront but will append to the end of this object.
+ virtual void PushBack(const AudioVector& append_this);
+
+ // Appends a segment of `append_this` to the end of this object. The segment
+ // starts from `position` and has `length` samples.
+ virtual void PushBack(const AudioVector& append_this,
+ size_t length,
+ size_t position);
+
+ // Same as PushFront but will append to the end of this object.
+ virtual void PushBack(const int16_t* append_this, size_t length);
+
+ // Removes `length` elements from the beginning of this object.
+ virtual void PopFront(size_t length);
+
+ // Removes `length` elements from the end of this object.
+ virtual void PopBack(size_t length);
+
+ // Extends this object with `extra_length` elements at the end. The new
+ // elements are initialized to zero.
+ virtual void Extend(size_t extra_length);
+
+ // Inserts `length` elements taken from the array `insert_this` and insert
+ // them at `position`. The length of the AudioVector is increased by `length`.
+ // `position` = 0 means that the new values are prepended to the vector.
+ // `position` = Size() means that the new values are appended to the vector.
+ virtual void InsertAt(const int16_t* insert_this,
+ size_t length,
+ size_t position);
+
+ // Like InsertAt, but inserts `length` zero elements at `position`.
+ virtual void InsertZerosAt(size_t length, size_t position);
+
+ // Overwrites `length` elements of this AudioVector starting from `position`
+ // with first values in `AudioVector`. The definition of `position`
+ // is the same as for InsertAt(). If `length` and `position` are selected
+ // such that the new data extends beyond the end of the current AudioVector,
+ // the vector is extended to accommodate the new data.
+ virtual void OverwriteAt(const AudioVector& insert_this,
+ size_t length,
+ size_t position);
+
+ // Overwrites `length` elements of this AudioVector with values taken from the
+ // array `insert_this`, starting at `position`. The definition of `position`
+ // is the same as for InsertAt(). If `length` and `position` are selected
+ // such that the new data extends beyond the end of the current AudioVector,
+ // the vector is extended to accommodate the new data.
+ virtual void OverwriteAt(const int16_t* insert_this,
+ size_t length,
+ size_t position);
+
+ // Appends `append_this` to the end of the current vector. Lets the two
+ // vectors overlap by `fade_length` samples, and cross-fade linearly in this
+ // region.
+ virtual void CrossFade(const AudioVector& append_this, size_t fade_length);
+
+ // Returns the number of elements in this AudioVector.
+ virtual size_t Size() const;
+
+ // Returns true if this AudioVector is empty.
+ virtual bool Empty() const;
+
+ // Accesses and modifies an element of AudioVector.
+ inline const int16_t& operator[](size_t index) const {
+ return array_[WrapIndex(index, begin_index_, capacity_)];
+ }
+
+ inline int16_t& operator[](size_t index) {
+ return array_[WrapIndex(index, begin_index_, capacity_)];
+ }
+
+ private:
+ static const size_t kDefaultInitialSize = 10;
+
+ // This method is used by the [] operators to calculate an index within the
+ // capacity of the array, but without using the modulo operation (%).
+ static inline size_t WrapIndex(size_t index,
+ size_t begin_index,
+ size_t capacity) {
+ RTC_DCHECK_LT(index, capacity);
+ RTC_DCHECK_LT(begin_index, capacity);
+ size_t ix = begin_index + index;
+ RTC_DCHECK_GE(ix, index); // Check for overflow.
+ if (ix >= capacity) {
+ ix -= capacity;
+ }
+ RTC_DCHECK_LT(ix, capacity);
+ return ix;
+ }
+
+ void Reserve(size_t n);
+
+ void InsertByPushBack(const int16_t* insert_this,
+ size_t length,
+ size_t position);
+
+ void InsertByPushFront(const int16_t* insert_this,
+ size_t length,
+ size_t position);
+
+ void InsertZerosByPushBack(size_t length, size_t position);
+
+ void InsertZerosByPushFront(size_t length, size_t position);
+
+ std::unique_ptr<int16_t[]> array_;
+
+ size_t capacity_; // Allocated number of samples in the array.
+
+ // The index of the first sample in `array_`, except when
+ // |begin_index_ == end_index_|, which indicates an empty buffer.
+ size_t begin_index_;
+
+ // The index of the sample after the last sample in `array_`.
+ size_t end_index_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_AUDIO_VECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector_unittest.cc
new file mode 100644
index 0000000000..ae9dd88606
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector_unittest.cc
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+#include <stdlib.h>
+
+#include <string>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class AudioVectorTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ // Populate test array.
+ for (size_t i = 0; i < array_length(); ++i) {
+ array_[i] = rtc::checked_cast<int16_t>(i);
+ }
+ }
+
+ size_t array_length() const { return sizeof(array_) / sizeof(array_[0]); }
+
+ int16_t array_[10];
+};
+
+// Create and destroy AudioVector objects, both empty and with a predefined
+// length.
+TEST_F(AudioVectorTest, CreateAndDestroy) {
+ AudioVector vec1;
+ EXPECT_TRUE(vec1.Empty());
+ EXPECT_EQ(0u, vec1.Size());
+
+ size_t initial_size = 17;
+ AudioVector vec2(initial_size);
+ EXPECT_FALSE(vec2.Empty());
+ EXPECT_EQ(initial_size, vec2.Size());
+}
+
+// Test the subscript operator [] for getting and setting.
+TEST_F(AudioVectorTest, SubscriptOperator) {
+ AudioVector vec(array_length());
+ for (size_t i = 0; i < array_length(); ++i) {
+ vec[i] = static_cast<int16_t>(i);
+ const int16_t& value = vec[i]; // Make sure to use the const version.
+ EXPECT_EQ(static_cast<int16_t>(i), value);
+ }
+}
+
+// Test the PushBack method and the CopyFrom method. The Clear method is also
+// invoked.
+TEST_F(AudioVectorTest, PushBackAndCopy) {
+ AudioVector vec;
+ AudioVector vec_copy;
+ vec.PushBack(array_, array_length());
+ vec.CopyTo(&vec_copy); // Copy from `vec` to `vec_copy`.
+ ASSERT_EQ(array_length(), vec.Size());
+ ASSERT_EQ(array_length(), vec_copy.Size());
+ for (size_t i = 0; i < array_length(); ++i) {
+ EXPECT_EQ(array_[i], vec[i]);
+ EXPECT_EQ(array_[i], vec_copy[i]);
+ }
+
+ // Clear `vec` and verify that it is empty.
+ vec.Clear();
+ EXPECT_TRUE(vec.Empty());
+
+ // Now copy the empty vector and verify that the copy becomes empty too.
+ vec.CopyTo(&vec_copy);
+ EXPECT_TRUE(vec_copy.Empty());
+}
+
+// Test the PushBack method with another AudioVector as input argument.
+TEST_F(AudioVectorTest, PushBackVector) {
+ static const size_t kLength = 10;
+ AudioVector vec1(kLength);
+ AudioVector vec2(kLength);
+ // Set the first vector to [0, 1, ..., kLength - 1].
+ // Set the second vector to [kLength, kLength + 1, ..., 2 * kLength - 1].
+ for (size_t i = 0; i < kLength; ++i) {
+ vec1[i] = static_cast<int16_t>(i);
+ vec2[i] = static_cast<int16_t>(i + kLength);
+ }
+ // Append vec2 to the back of vec1.
+ vec1.PushBack(vec2);
+ ASSERT_EQ(2 * kLength, vec1.Size());
+ for (size_t i = 0; i < 2 * kLength; ++i) {
+ EXPECT_EQ(static_cast<int16_t>(i), vec1[i]);
+ }
+}
+
+// Test the PushFront method.
+TEST_F(AudioVectorTest, PushFront) {
+ AudioVector vec;
+ vec.PushFront(array_, array_length());
+ ASSERT_EQ(array_length(), vec.Size());
+ for (size_t i = 0; i < array_length(); ++i) {
+ EXPECT_EQ(array_[i], vec[i]);
+ }
+}
+
+// Test the PushFront method with another AudioVector as input argument.
+TEST_F(AudioVectorTest, PushFrontVector) {
+ static const size_t kLength = 10;
+ AudioVector vec1(kLength);
+ AudioVector vec2(kLength);
+ // Set the first vector to [0, 1, ..., kLength - 1].
+ // Set the second vector to [kLength, kLength + 1, ..., 2 * kLength - 1].
+ for (size_t i = 0; i < kLength; ++i) {
+ vec1[i] = static_cast<int16_t>(i);
+ vec2[i] = static_cast<int16_t>(i + kLength);
+ }
+ // Prepend vec1 to the front of vec2.
+ vec2.PushFront(vec1);
+ ASSERT_EQ(2 * kLength, vec2.Size());
+ for (size_t i = 0; i < 2 * kLength; ++i) {
+ EXPECT_EQ(static_cast<int16_t>(i), vec2[i]);
+ }
+}
+
+// Test the PopFront method.
+TEST_F(AudioVectorTest, PopFront) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ vec.PopFront(1); // Remove one element.
+ EXPECT_EQ(array_length() - 1u, vec.Size());
+ for (size_t i = 0; i < array_length() - 1; ++i) {
+ EXPECT_EQ(static_cast<int16_t>(i + 1), vec[i]);
+ }
+ vec.PopFront(array_length()); // Remove more elements than vector size.
+ EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the PopBack method.
+TEST_F(AudioVectorTest, PopBack) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ vec.PopBack(1); // Remove one element.
+ EXPECT_EQ(array_length() - 1u, vec.Size());
+ for (size_t i = 0; i < array_length() - 1; ++i) {
+ EXPECT_EQ(static_cast<int16_t>(i), vec[i]);
+ }
+ vec.PopBack(array_length()); // Remove more elements than vector size.
+ EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the Extend method.
+TEST_F(AudioVectorTest, Extend) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ vec.Extend(5); // Extend with 5 elements, which should all be zeros.
+ ASSERT_EQ(array_length() + 5u, vec.Size());
+ // Verify that all are zero.
+ for (size_t i = array_length(); i < array_length() + 5; ++i) {
+ EXPECT_EQ(0, vec[i]);
+ }
+}
+
+// Test the InsertAt method with an insert position in the middle of the vector.
+TEST_F(AudioVectorTest, InsertAt) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int16_t new_array[kNewLength];
+ // Set array elements to {100, 101, 102, ... }.
+ for (int i = 0; i < kNewLength; ++i) {
+ new_array[i] = 100 + i;
+ }
+ int insert_position = 5;
+ vec.InsertAt(new_array, kNewLength, insert_position);
+ // Verify that the vector looks as follows:
+ // {0, 1, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
+ // `insert_position`, `insert_position` + 1, ..., kLength - 1}.
+ size_t pos = 0;
+ for (int i = 0; i < insert_position; ++i) {
+ EXPECT_EQ(array_[i], vec[pos]);
+ ++pos;
+ }
+ for (int i = 0; i < kNewLength; ++i) {
+ EXPECT_EQ(new_array[i], vec[pos]);
+ ++pos;
+ }
+ for (size_t i = insert_position; i < array_length(); ++i) {
+ EXPECT_EQ(array_[i], vec[pos]);
+ ++pos;
+ }
+}
+
+// Test the InsertZerosAt method with an insert position in the middle of the
+// vector. Use the InsertAt method as reference.
+TEST_F(AudioVectorTest, InsertZerosAt) {
+ AudioVector vec;
+ AudioVector vec_ref;
+ vec.PushBack(array_, array_length());
+ vec_ref.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int insert_position = 5;
+ vec.InsertZerosAt(kNewLength, insert_position);
+ int16_t new_array[kNewLength] = {0}; // All zero elements.
+ vec_ref.InsertAt(new_array, kNewLength, insert_position);
+ // Verify that the vectors are identical.
+ ASSERT_EQ(vec_ref.Size(), vec.Size());
+ for (size_t i = 0; i < vec.Size(); ++i) {
+ EXPECT_EQ(vec_ref[i], vec[i]);
+ }
+}
+
+// Test the InsertAt method with an insert position at the start of the vector.
+TEST_F(AudioVectorTest, InsertAtBeginning) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int16_t new_array[kNewLength];
+ // Set array elements to {100, 101, 102, ... }.
+ for (int i = 0; i < kNewLength; ++i) {
+ new_array[i] = 100 + i;
+ }
+ int insert_position = 0;
+ vec.InsertAt(new_array, kNewLength, insert_position);
+ // Verify that the vector looks as follows:
+ // {100, 101, ..., 100 + kNewLength - 1,
+ // 0, 1, ..., kLength - 1}.
+ size_t pos = 0;
+ for (int i = 0; i < kNewLength; ++i) {
+ EXPECT_EQ(new_array[i], vec[pos]);
+ ++pos;
+ }
+ for (size_t i = insert_position; i < array_length(); ++i) {
+ EXPECT_EQ(array_[i], vec[pos]);
+ ++pos;
+ }
+}
+
+// Test the InsertAt method with an insert position at the end of the vector.
+TEST_F(AudioVectorTest, InsertAtEnd) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int16_t new_array[kNewLength];
+ // Set array elements to {100, 101, 102, ... }.
+ for (int i = 0; i < kNewLength; ++i) {
+ new_array[i] = 100 + i;
+ }
+ int insert_position = rtc::checked_cast<int>(array_length());
+ vec.InsertAt(new_array, kNewLength, insert_position);
+ // Verify that the vector looks as follows:
+ // {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
+ size_t pos = 0;
+ for (size_t i = 0; i < array_length(); ++i) {
+ EXPECT_EQ(array_[i], vec[pos]);
+ ++pos;
+ }
+ for (int i = 0; i < kNewLength; ++i) {
+ EXPECT_EQ(new_array[i], vec[pos]);
+ ++pos;
+ }
+}
+
+// Test the InsertAt method with an insert position beyond the end of the
+// vector. Verify that a position beyond the end of the vector does not lead to
+// an error. The expected outcome is the same as if the vector end was used as
+// input position. That is, the input position should be capped at the maximum
+// allowed value.
+TEST_F(AudioVectorTest, InsertBeyondEnd) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int16_t new_array[kNewLength];
+ // Set array elements to {100, 101, 102, ... }.
+ for (int i = 0; i < kNewLength; ++i) {
+ new_array[i] = 100 + i;
+ }
+ int insert_position =
+ rtc::checked_cast<int>(array_length() + 10); // Too large.
+ vec.InsertAt(new_array, kNewLength, insert_position);
+ // Verify that the vector looks as follows:
+ // {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
+ size_t pos = 0;
+ for (size_t i = 0; i < array_length(); ++i) {
+ EXPECT_EQ(array_[i], vec[pos]);
+ ++pos;
+ }
+ for (int i = 0; i < kNewLength; ++i) {
+ EXPECT_EQ(new_array[i], vec[pos]);
+ ++pos;
+ }
+}
+
+// Test the OverwriteAt method with a position such that all of the new values
+// fit within the old vector.
+TEST_F(AudioVectorTest, OverwriteAt) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int16_t new_array[kNewLength];
+ // Set array elements to {100, 101, 102, ... }.
+ for (int i = 0; i < kNewLength; ++i) {
+ new_array[i] = 100 + i;
+ }
+ size_t insert_position = 2;
+ vec.OverwriteAt(new_array, kNewLength, insert_position);
+ // Verify that the vector looks as follows:
+ // {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
+ // `insert_position`, `insert_position` + 1, ..., kLength - 1}.
+ size_t pos = 0;
+ for (pos = 0; pos < insert_position; ++pos) {
+ EXPECT_EQ(array_[pos], vec[pos]);
+ }
+ for (int i = 0; i < kNewLength; ++i) {
+ EXPECT_EQ(new_array[i], vec[pos]);
+ ++pos;
+ }
+ for (; pos < array_length(); ++pos) {
+ EXPECT_EQ(array_[pos], vec[pos]);
+ }
+}
+
+// Test the OverwriteAt method with a position such that some of the new values
+// extend beyond the end of the current vector. This is valid, and the vector is
+// expected to expand to accommodate the new values.
+TEST_F(AudioVectorTest, OverwriteBeyondEnd) {
+ AudioVector vec;
+ vec.PushBack(array_, array_length());
+ static const int kNewLength = 5;
+ int16_t new_array[kNewLength];
+ // Set array elements to {100, 101, 102, ... }.
+ for (int i = 0; i < kNewLength; ++i) {
+ new_array[i] = 100 + i;
+ }
+ int insert_position = rtc::checked_cast<int>(array_length() - 2);
+ vec.OverwriteAt(new_array, kNewLength, insert_position);
+ ASSERT_EQ(array_length() - 2u + kNewLength, vec.Size());
+ // Verify that the vector looks as follows:
+ // {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
+ // `insert_position`, `insert_position` + 1, ..., kLength - 1}.
+ int pos = 0;
+ for (pos = 0; pos < insert_position; ++pos) {
+ EXPECT_EQ(array_[pos], vec[pos]);
+ }
+ for (int i = 0; i < kNewLength; ++i) {
+ EXPECT_EQ(new_array[i], vec[pos]);
+ ++pos;
+ }
+ // Verify that we checked to the end of `vec`.
+ EXPECT_EQ(vec.Size(), static_cast<size_t>(pos));
+}
+
+TEST_F(AudioVectorTest, CrossFade) {
+ static const size_t kLength = 100;
+ static const size_t kFadeLength = 10;
+ AudioVector vec1(kLength);
+ AudioVector vec2(kLength);
+ // Set all vector elements to 0 in `vec1` and 100 in `vec2`.
+ for (size_t i = 0; i < kLength; ++i) {
+ vec1[i] = 0;
+ vec2[i] = 100;
+ }
+ vec1.CrossFade(vec2, kFadeLength);
+ ASSERT_EQ(2 * kLength - kFadeLength, vec1.Size());
+ // First part untouched.
+ for (size_t i = 0; i < kLength - kFadeLength; ++i) {
+ EXPECT_EQ(0, vec1[i]);
+ }
+ // Check mixing zone.
+ for (size_t i = 0; i < kFadeLength; ++i) {
+ EXPECT_NEAR((i + 1) * 100 / (kFadeLength + 1),
+ vec1[kLength - kFadeLength + i], 1);
+ }
+ // Second part untouched.
+ for (size_t i = kLength; i < vec1.Size(); ++i) {
+ EXPECT_EQ(100, vec1[i]);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc
new file mode 100644
index 0000000000..2c95d3b390
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/background_noise.h"
+
+#include <string.h> // memcpy
+
+#include <algorithm> // min, max
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kMaxSampleRate = 48000;
+
+} // namespace
+
+// static
+constexpr size_t BackgroundNoise::kMaxLpcOrder;
+
+BackgroundNoise::BackgroundNoise(size_t num_channels)
+ : num_channels_(num_channels),
+ channel_parameters_(new ChannelParameters[num_channels_]) {
+ Reset();
+}
+
+BackgroundNoise::~BackgroundNoise() {}
+
+void BackgroundNoise::Reset() {
+ initialized_ = false;
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ channel_parameters_[channel].Reset();
+ }
+}
+
+bool BackgroundNoise::Update(const AudioMultiVector& input,
+ const PostDecodeVad& vad) {
+ bool filter_params_saved = false;
+ if (vad.running() && vad.active_speech()) {
+ // Do not update the background noise parameters if we know that the signal
+ // is active speech.
+ return filter_params_saved;
+ }
+
+ int32_t auto_correlation[kMaxLpcOrder + 1];
+ int16_t fiter_output[kMaxLpcOrder + kResidualLength];
+ int16_t reflection_coefficients[kMaxLpcOrder];
+ int16_t lpc_coefficients[kMaxLpcOrder + 1];
+
+ for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
+ ChannelParameters& parameters = channel_parameters_[channel_ix];
+ int16_t temp_signal_array[kVecLen + kMaxLpcOrder] = {0};
+ int16_t* temp_signal = &temp_signal_array[kMaxLpcOrder];
+ RTC_DCHECK_GE(input.Size(), kVecLen);
+ input[channel_ix].CopyTo(kVecLen, input.Size() - kVecLen, temp_signal);
+ int32_t sample_energy =
+ CalculateAutoCorrelation(temp_signal, kVecLen, auto_correlation);
+
+ if ((!vad.running() &&
+ sample_energy < parameters.energy_update_threshold) ||
+ (vad.running() && !vad.active_speech())) {
+ // Generate LPC coefficients.
+ if (auto_correlation[0] <= 0) {
+ // Center value in auto-correlation is not positive. Do not update.
+ return filter_params_saved;
+ }
+
+ // Regardless of whether the filter is actually updated or not,
+ // update energy threshold levels, since we have in fact observed
+ // a low energy signal.
+ if (sample_energy < parameters.energy_update_threshold) {
+ // Never go under 1.0 in average sample energy.
+ parameters.energy_update_threshold = std::max(sample_energy, 1);
+ parameters.low_energy_update_threshold = 0;
+ }
+
+ // Only update BGN if filter is stable, i.e., if return value from
+ // Levinson-Durbin function is 1.
+ if (WebRtcSpl_LevinsonDurbin(auto_correlation, lpc_coefficients,
+ reflection_coefficients,
+ kMaxLpcOrder) != 1) {
+ return filter_params_saved;
+ }
+
+ // Generate the CNG gain factor by looking at the energy of the residual.
+ WebRtcSpl_FilterMAFastQ12(temp_signal + kVecLen - kResidualLength,
+ fiter_output, lpc_coefficients,
+ kMaxLpcOrder + 1, kResidualLength);
+ int32_t residual_energy = WebRtcSpl_DotProductWithScale(
+ fiter_output, fiter_output, kResidualLength, 0);
+
+ // Check spectral flatness.
+ // Comparing the residual variance with the input signal variance tells
+ // if the spectrum is flat or not.
+ // If 5 * residual_energy >= 16 * sample_energy, the spectrum is flat
+ // enough. Also ensure that the energy is non-zero.
+ if ((sample_energy > 0) &&
+ (int64_t{5} * residual_energy >= int64_t{16} * sample_energy)) {
+ // Spectrum is flat enough; save filter parameters.
+ // `temp_signal` + `kVecLen` - `kMaxLpcOrder` points at the first of the
+ // `kMaxLpcOrder` samples in the residual signal, which will form the
+ // filter state for the next noise generation.
+ SaveParameters(channel_ix, lpc_coefficients,
+ temp_signal + kVecLen - kMaxLpcOrder, sample_energy,
+ residual_energy);
+ filter_params_saved = true;
+ }
+ } else {
+ // Will only happen if post-decode VAD is disabled and `sample_energy` is
+ // not low enough. Increase the threshold for update so that it increases
+ // by a factor 4 in 4 seconds.
+ IncrementEnergyThreshold(channel_ix, sample_energy);
+ }
+ }
+ return filter_params_saved;
+}
+
+void BackgroundNoise::GenerateBackgroundNoise(
+ rtc::ArrayView<const int16_t> random_vector,
+ size_t channel,
+ int mute_slope,
+ bool too_many_expands,
+ size_t num_noise_samples,
+ int16_t* buffer) {
+ constexpr size_t kNoiseLpcOrder = kMaxLpcOrder;
+ int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
+ RTC_DCHECK_LE(num_noise_samples, (kMaxSampleRate / 8000 * 125));
+ RTC_DCHECK_GE(random_vector.size(), num_noise_samples);
+ int16_t* noise_samples = &buffer[kNoiseLpcOrder];
+ if (initialized()) {
+ // Use background noise parameters.
+ memcpy(noise_samples - kNoiseLpcOrder, FilterState(channel),
+ sizeof(int16_t) * kNoiseLpcOrder);
+
+ int dc_offset = 0;
+ if (ScaleShift(channel) > 1) {
+ dc_offset = 1 << (ScaleShift(channel) - 1);
+ }
+
+ // Scale random vector to correct energy level.
+ WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector.data(),
+ Scale(channel), dc_offset,
+ ScaleShift(channel), num_noise_samples);
+
+ WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
+ Filter(channel), kNoiseLpcOrder + 1,
+ num_noise_samples);
+
+ SetFilterState(
+ channel,
+ {&(noise_samples[num_noise_samples - kNoiseLpcOrder]), kNoiseLpcOrder});
+
+ // Unmute the background noise.
+ int16_t bgn_mute_factor = MuteFactor(channel);
+ if (bgn_mute_factor < 16384) {
+ WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
+ bgn_mute_factor, 8192, 14,
+ num_noise_samples);
+ }
+ // Update mute_factor in BackgroundNoise class.
+ SetMuteFactor(channel, bgn_mute_factor);
+ } else {
+ // BGN parameters have not been initialized; use zero noise.
+ memset(noise_samples, 0, sizeof(int16_t) * num_noise_samples);
+ }
+}
+
+int32_t BackgroundNoise::Energy(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].energy;
+}
+
+void BackgroundNoise::SetMuteFactor(size_t channel, int16_t value) {
+ RTC_DCHECK_LT(channel, num_channels_);
+ channel_parameters_[channel].mute_factor = value;
+}
+
+int16_t BackgroundNoise::MuteFactor(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].mute_factor;
+}
+
+const int16_t* BackgroundNoise::Filter(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].filter;
+}
+
+const int16_t* BackgroundNoise::FilterState(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].filter_state;
+}
+
+void BackgroundNoise::SetFilterState(size_t channel,
+ rtc::ArrayView<const int16_t> input) {
+ RTC_DCHECK_LT(channel, num_channels_);
+ size_t length = std::min(input.size(), kMaxLpcOrder);
+ memcpy(channel_parameters_[channel].filter_state, input.data(),
+ length * sizeof(int16_t));
+}
+
+int16_t BackgroundNoise::Scale(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].scale;
+}
+int16_t BackgroundNoise::ScaleShift(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].scale_shift;
+}
+
+int32_t BackgroundNoise::CalculateAutoCorrelation(
+ const int16_t* signal,
+ size_t length,
+ int32_t* auto_correlation) const {
+ static const int kCorrelationStep = -1;
+ const int correlation_scale =
+ CrossCorrelationWithAutoShift(signal, signal, length, kMaxLpcOrder + 1,
+ kCorrelationStep, auto_correlation);
+
+ // Number of shifts to normalize energy to energy/sample.
+ int energy_sample_shift = kLogVecLen - correlation_scale;
+ return auto_correlation[0] >> energy_sample_shift;
+}
+
+void BackgroundNoise::IncrementEnergyThreshold(size_t channel,
+ int32_t sample_energy) {
+ // TODO(hlundin): Simplify the below threshold update. What this code
+ // does is simply "threshold += (increment * threshold) >> 16", but due
+ // to the limited-width operations, it is not exactly the same. The
+ // difference should be inaudible, but bit-exactness would not be
+ // maintained.
+ RTC_DCHECK_LT(channel, num_channels_);
+ ChannelParameters& parameters = channel_parameters_[channel];
+ int32_t temp_energy =
+ (kThresholdIncrement * parameters.low_energy_update_threshold) >> 16;
+ temp_energy +=
+ kThresholdIncrement * (parameters.energy_update_threshold & 0xFF);
+ temp_energy +=
+ (kThresholdIncrement * ((parameters.energy_update_threshold >> 8) & 0xFF))
+ << 8;
+ parameters.low_energy_update_threshold += temp_energy;
+
+ parameters.energy_update_threshold +=
+ kThresholdIncrement * (parameters.energy_update_threshold >> 16);
+ parameters.energy_update_threshold +=
+ parameters.low_energy_update_threshold >> 16;
+ parameters.low_energy_update_threshold =
+ parameters.low_energy_update_threshold & 0x0FFFF;
+
+ // Update maximum energy.
+ // Decrease by a factor 1/1024 each time.
+ parameters.max_energy = parameters.max_energy - (parameters.max_energy >> 10);
+ if (sample_energy > parameters.max_energy) {
+ parameters.max_energy = sample_energy;
+ }
+
+ // Set `energy_update_threshold` to no less than 60 dB lower than
+ // `max_energy_`. Adding 524288 assures proper rounding.
+ int32_t energy_update_threshold = (parameters.max_energy + 524288) >> 20;
+ if (energy_update_threshold > parameters.energy_update_threshold) {
+ parameters.energy_update_threshold = energy_update_threshold;
+ }
+}
+
+void BackgroundNoise::SaveParameters(size_t channel,
+ const int16_t* lpc_coefficients,
+ const int16_t* filter_state,
+ int32_t sample_energy,
+ int32_t residual_energy) {
+ RTC_DCHECK_LT(channel, num_channels_);
+ ChannelParameters& parameters = channel_parameters_[channel];
+ memcpy(parameters.filter, lpc_coefficients,
+ (kMaxLpcOrder + 1) * sizeof(int16_t));
+ memcpy(parameters.filter_state, filter_state, kMaxLpcOrder * sizeof(int16_t));
+ // Save energy level and update energy threshold levels.
+ // Never get under 1.0 in average sample energy.
+ parameters.energy = std::max(sample_energy, 1);
+ parameters.energy_update_threshold = parameters.energy;
+ parameters.low_energy_update_threshold = 0;
+
+ // Normalize residual_energy to 29 or 30 bits before sqrt.
+ int16_t norm_shift = WebRtcSpl_NormW32(residual_energy) - 1;
+ if (norm_shift & 0x1) {
+ norm_shift -= 1; // Even number of shifts required.
+ }
+ residual_energy = WEBRTC_SPL_SHIFT_W32(residual_energy, norm_shift);
+
+ // Calculate scale and shift factor.
+ parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
+ // Add 13 to the `scale_shift_`, since the random numbers table is in
+ // Q13.
+ // TODO(hlundin): Move the "13" to where the `scale_shift_` is used?
+ parameters.scale_shift =
+ static_cast<int16_t>(13 + ((kLogResidualLength + norm_shift) / 2));
+
+ initialized_ = true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h
new file mode 100644
index 0000000000..8e6d5890a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_BACKGROUND_NOISE_H_
+#define MODULES_AUDIO_CODING_NETEQ_BACKGROUND_NOISE_H_
+
+#include <string.h> // size_t
+
+#include <memory>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class AudioMultiVector;
+class PostDecodeVad;
+
+// This class handles estimation of background noise parameters.
+class BackgroundNoise {
+ public:
+ // TODO(hlundin): For 48 kHz support, increase kMaxLpcOrder to 10.
+ // Will work anyway, but probably sound a little worse.
+ static constexpr size_t kMaxLpcOrder = 8; // 32000 / 8000 + 4.
+
+ explicit BackgroundNoise(size_t num_channels);
+ virtual ~BackgroundNoise();
+
+ BackgroundNoise(const BackgroundNoise&) = delete;
+ BackgroundNoise& operator=(const BackgroundNoise&) = delete;
+
+ void Reset();
+
+ // Updates the parameter estimates based on the signal currently in the
+ // `sync_buffer`, and on the latest decision in `vad` if it is running.
+ // Returns true if the filter parameters are updated.
+ bool Update(const AudioMultiVector& sync_buffer, const PostDecodeVad& vad);
+
+ // Generates background noise given a random vector and writes the output to
+ // `buffer`.
+ void GenerateBackgroundNoise(rtc::ArrayView<const int16_t> random_vector,
+ size_t channel,
+ int mute_slope,
+ bool too_many_expands,
+ size_t num_noise_samples,
+ int16_t* buffer);
+
+ // Returns `energy_` for `channel`.
+ int32_t Energy(size_t channel) const;
+
+ // Sets the value of `mute_factor_` for `channel` to `value`.
+ void SetMuteFactor(size_t channel, int16_t value);
+
+ // Returns `mute_factor_` for `channel`.
+ int16_t MuteFactor(size_t channel) const;
+
+ // Returns a pointer to `filter_` for `channel`.
+ const int16_t* Filter(size_t channel) const;
+
+ // Returns a pointer to `filter_state_` for `channel`.
+ const int16_t* FilterState(size_t channel) const;
+
+ // Copies `input` to the filter state. Will not copy more than `kMaxLpcOrder`
+ // elements.
+ void SetFilterState(size_t channel, rtc::ArrayView<const int16_t> input);
+
+ // Returns `scale_` for `channel`.
+ int16_t Scale(size_t channel) const;
+
+ // Returns `scale_shift_` for `channel`.
+ int16_t ScaleShift(size_t channel) const;
+
+ // Accessors.
+ bool initialized() const { return initialized_; }
+
+ private:
+ static const int kThresholdIncrement = 229; // 0.0035 in Q16.
+ static const size_t kVecLen = 256;
+ static const int kLogVecLen = 8; // log2(kVecLen).
+ static const size_t kResidualLength = 64;
+ static const int16_t kLogResidualLength = 6; // log2(kResidualLength)
+
+ struct ChannelParameters {
+ // Constructor.
+ ChannelParameters() { Reset(); }
+
+ void Reset() {
+ energy = 2500;
+ max_energy = 0;
+ energy_update_threshold = 500000;
+ low_energy_update_threshold = 0;
+ memset(filter_state, 0, sizeof(filter_state));
+ memset(filter, 0, sizeof(filter));
+ filter[0] = 4096;
+ mute_factor = 0;
+ scale = 20000;
+ scale_shift = 24;
+ }
+
+ int32_t energy;
+ int32_t max_energy;
+ int32_t energy_update_threshold;
+ int32_t low_energy_update_threshold;
+ int16_t filter_state[kMaxLpcOrder];
+ int16_t filter[kMaxLpcOrder + 1];
+ int16_t mute_factor;
+ int16_t scale;
+ int16_t scale_shift;
+ };
+
+ int32_t CalculateAutoCorrelation(const int16_t* signal,
+ size_t length,
+ int32_t* auto_correlation) const;
+
+ // Increments the energy threshold by a factor 1 + `kThresholdIncrement`.
+ void IncrementEnergyThreshold(size_t channel, int32_t sample_energy);
+
+ // Updates the filter parameters.
+ void SaveParameters(size_t channel,
+ const int16_t* lpc_coefficients,
+ const int16_t* filter_state,
+ int32_t sample_energy,
+ int32_t residual_energy);
+
+ size_t num_channels_;
+ std::unique_ptr<ChannelParameters[]> channel_parameters_;
+ bool initialized_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_BACKGROUND_NOISE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise_unittest.cc
new file mode 100644
index 0000000000..e32492f57e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise_unittest.cc
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for BackgroundNoise class.
+
+#include "modules/audio_coding/neteq/background_noise.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(BackgroundNoise, CreateAndDestroy) {
+ size_t channels = 1;
+ BackgroundNoise bgn(channels);
+}
+
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.cc b/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.cc
new file mode 100644
index 0000000000..2c42d0d13f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+BufferLevelFilter::BufferLevelFilter() {
+ Reset();
+}
+
+void BufferLevelFilter::Reset() {
+ filtered_current_level_ = 0;
+ level_factor_ = 253;
+}
+
+void BufferLevelFilter::Update(size_t buffer_size_samples,
+ int time_stretched_samples) {
+ // Filter:
+ // `filtered_current_level_` = `level_factor_` * `filtered_current_level_` +
+ // (1 - `level_factor_`) * `buffer_size_samples`
+ // `level_factor_` and `filtered_current_level_` are in Q8.
+ // `buffer_size_samples` is in Q0.
+ const int64_t filtered_current_level =
+ (level_factor_ * int64_t{filtered_current_level_} >> 8) +
+ (256 - level_factor_) * rtc::dchecked_cast<int64_t>(buffer_size_samples);
+
+ // Account for time-scale operations (accelerate and pre-emptive expand) and
+ // make sure that the filtered value remains non-negative.
+ filtered_current_level_ = rtc::saturated_cast<int>(std::max<int64_t>(
+ 0, filtered_current_level - int64_t{time_stretched_samples} * (1 << 8)));
+}
+
+void BufferLevelFilter::SetFilteredBufferLevel(int buffer_size_samples) {
+ filtered_current_level_ =
+ rtc::saturated_cast<int>(int64_t{buffer_size_samples} * 256);
+}
+
+void BufferLevelFilter::SetTargetBufferLevel(int target_buffer_level_ms) {
+ if (target_buffer_level_ms <= 20) {
+ level_factor_ = 251;
+ } else if (target_buffer_level_ms <= 60) {
+ level_factor_ = 252;
+ } else if (target_buffer_level_ms <= 140) {
+ level_factor_ = 253;
+ } else {
+ level_factor_ = 254;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.h b/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.h
new file mode 100644
index 0000000000..ced36da9c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+class BufferLevelFilter {
+ public:
+ BufferLevelFilter();
+ virtual ~BufferLevelFilter() {}
+
+ BufferLevelFilter(const BufferLevelFilter&) = delete;
+ BufferLevelFilter& operator=(const BufferLevelFilter&) = delete;
+
+ virtual void Reset();
+
+ // Updates the filter. Current buffer size is `buffer_size_samples`.
+ // `time_stretched_samples` is subtracted from the filtered value (thus
+ // bypassing the filter operation).
+ virtual void Update(size_t buffer_size_samples, int time_stretched_samples);
+
+ // Set the filtered buffer level to a particular value directly. This should
+ // only be used in case of large changes in buffer size, such as buffer
+ // flushes.
+ virtual void SetFilteredBufferLevel(int buffer_size_samples);
+
+ // The target level is used to select the appropriate filter coefficient.
+ virtual void SetTargetBufferLevel(int target_buffer_level_ms);
+
+ // Returns filtered current level in number of samples.
+ virtual int filtered_current_level() const {
+ // Round to nearest whole sample.
+ return (int64_t{filtered_current_level_} + (1 << 7)) >> 8;
+ }
+
+ private:
+ int level_factor_; // Filter factor for the buffer level filter in Q8.
+ int filtered_current_level_; // Filtered current buffer level in Q8.
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
new file mode 100644
index 0000000000..6773e96f58
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for BufferLevelFilter class.
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+
+#include <math.h> // Access to pow function.
+
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(BufferLevelFilter, CreateAndDestroy) {
+ BufferLevelFilter* filter = new BufferLevelFilter();
+ EXPECT_EQ(0, filter->filtered_current_level());
+ delete filter;
+}
+
+TEST(BufferLevelFilter, ConvergenceTest) {
+ BufferLevelFilter filter;
+ for (int times = 10; times <= 50; times += 10) {
+ for (int value = 100; value <= 200; value += 10) {
+ filter.Reset();
+ filter.SetTargetBufferLevel(20); // Makes filter coefficient 251/256.
+ rtc::StringBuilder ss;
+ ss << "times = " << times << ", value = " << value;
+ SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
+ for (int i = 0; i < times; ++i) {
+ filter.Update(value, 0 /* time_stretched_samples */);
+ }
+ // Expect the filtered value to be (theoretically)
+ // (1 - (251/256) ^ `times`) * `value`.
+ double expected_value_double = (1 - pow(251.0 / 256.0, times)) * value;
+ int expected_value = static_cast<int>(expected_value_double);
+
+ // The actual value may differ slightly from the expected value due to
+ // intermediate-stage rounding errors in the filter implementation.
+ // This is why we have to use EXPECT_NEAR with a tolerance of +/-1.
+ EXPECT_NEAR(expected_value, filter.filtered_current_level(), 1);
+ }
+ }
+}
+
+// Verify that target buffer level impacts on the filter convergence.
+TEST(BufferLevelFilter, FilterFactor) {
+ BufferLevelFilter filter;
+ // Update 10 times with value 100.
+ const int kTimes = 10;
+ const int kValue = 100;
+
+ filter.SetTargetBufferLevel(60); // Makes filter coefficient 252/256.
+ for (int i = 0; i < kTimes; ++i) {
+ filter.Update(kValue, 0 /* time_stretched_samples */);
+ }
+ // Expect the filtered value to be
+ // (1 - (252/256) ^ `kTimes`) * `kValue`.
+ int expected_value = 15;
+ EXPECT_EQ(expected_value, filter.filtered_current_level());
+
+ filter.Reset();
+ filter.SetTargetBufferLevel(140); // Makes filter coefficient 253/256.
+ for (int i = 0; i < kTimes; ++i) {
+ filter.Update(kValue, 0 /* time_stretched_samples */);
+ }
+ // Expect the filtered value to be
+ // (1 - (253/256) ^ `kTimes`) * `kValue`.
+ expected_value = 11;
+ EXPECT_EQ(expected_value, filter.filtered_current_level());
+
+ filter.Reset();
+ filter.SetTargetBufferLevel(160); // Makes filter coefficient 254/256.
+ for (int i = 0; i < kTimes; ++i) {
+ filter.Update(kValue, 0 /* time_stretched_samples */);
+ }
+ // Expect the filtered value to be
+ // (1 - (254/256) ^ `kTimes`) * `kValue`.
+ expected_value = 8;
+ EXPECT_EQ(expected_value, filter.filtered_current_level());
+}
+
+TEST(BufferLevelFilter, TimeStretchedSamples) {
+ BufferLevelFilter filter;
+ filter.SetTargetBufferLevel(20); // Makes filter coefficient 251/256.
+ // Update 10 times with value 100.
+ const int kTimes = 10;
+ const int kValue = 100;
+ const int kTimeStretchedSamples = 3;
+ for (int i = 0; i < kTimes; ++i) {
+ filter.Update(kValue, 0);
+ }
+ // Expect the filtered value to be
+ // (1 - (251/256) ^ `kTimes`) * `kValue`.
+ const int kExpectedValue = 18;
+ EXPECT_EQ(kExpectedValue, filter.filtered_current_level());
+
+ // Update filter again, now with non-zero value for packet length.
+ // Set the current filtered value to be the input, in order to isolate the
+ // impact of `kTimeStretchedSamples`.
+ filter.Update(filter.filtered_current_level(), kTimeStretchedSamples);
+ EXPECT_EQ(kExpectedValue - kTimeStretchedSamples,
+ filter.filtered_current_level());
+ // Try negative value and verify that we come back to the previous result.
+ filter.Update(filter.filtered_current_level(), -kTimeStretchedSamples);
+ EXPECT_EQ(kExpectedValue, filter.filtered_current_level());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.cc b/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.cc
new file mode 100644
index 0000000000..a2ce888f45
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/comfort_noise.h"
+
+
+#include <cstdint>
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/audio_vector.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+void ComfortNoise::Reset() {
+ first_call_ = true;
+}
+
+int ComfortNoise::UpdateParameters(const Packet& packet) {
+ // Get comfort noise decoder.
+ if (decoder_database_->SetActiveCngDecoder(packet.payload_type) != kOK) {
+ return kUnknownPayloadType;
+ }
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ RTC_DCHECK(cng_decoder);
+ cng_decoder->UpdateSid(packet.payload);
+ return kOK;
+}
+
+int ComfortNoise::Generate(size_t requested_length, AudioMultiVector* output) {
+ // TODO(hlundin): Change to an enumerator and skip assert.
+ RTC_DCHECK(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
+ fs_hz_ == 48000);
+ // Not adapted for multi-channel yet.
+ if (output->Channels() != 1) {
+ RTC_LOG(LS_ERROR) << "No multi-channel support";
+ return kMultiChannelNotSupported;
+ }
+
+ size_t number_of_samples = requested_length;
+ bool new_period = false;
+ if (first_call_) {
+ // Generate noise and overlap slightly with old data.
+ number_of_samples = requested_length + overlap_length_;
+ new_period = true;
+ }
+ output->AssertSize(number_of_samples);
+ // Get the decoder from the database.
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ if (!cng_decoder) {
+ RTC_LOG(LS_ERROR) << "Unknwown payload type";
+ return kUnknownPayloadType;
+ }
+
+ std::unique_ptr<int16_t[]> temp(new int16_t[number_of_samples]);
+ if (!cng_decoder->Generate(
+ rtc::ArrayView<int16_t>(temp.get(), number_of_samples), new_period)) {
+ // Error returned.
+ output->Zeros(requested_length);
+ RTC_LOG(LS_ERROR)
+ << "ComfortNoiseDecoder::Genererate failed to generate comfort noise";
+ return kInternalError;
+ }
+ (*output)[0].OverwriteAt(temp.get(), number_of_samples, 0);
+
+ if (first_call_) {
+ // Set tapering window parameters. Values are in Q15.
+ int16_t muting_window; // Mixing factor for overlap data.
+ int16_t muting_window_increment; // Mixing factor increment (negative).
+ int16_t unmuting_window; // Mixing factor for comfort noise.
+ int16_t unmuting_window_increment; // Mixing factor increment.
+ if (fs_hz_ == 8000) {
+ muting_window = DspHelper::kMuteFactorStart8kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement8kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart8kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement8kHz;
+ } else if (fs_hz_ == 16000) {
+ muting_window = DspHelper::kMuteFactorStart16kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement16kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart16kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement16kHz;
+ } else if (fs_hz_ == 32000) {
+ muting_window = DspHelper::kMuteFactorStart32kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement32kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart32kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement32kHz;
+ } else { // fs_hz_ == 48000
+ muting_window = DspHelper::kMuteFactorStart48kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement48kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart48kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement48kHz;
+ }
+
+ // Do overlap-add between new vector and overlap.
+ size_t start_ix = sync_buffer_->Size() - overlap_length_;
+ for (size_t i = 0; i < overlap_length_; i++) {
+ /* overlapVec[i] = WinMute * overlapVec[i] + WinUnMute * outData[i] */
+ // The expression (*output)[0][i] is the i-th element in the first
+ // channel.
+ (*sync_buffer_)[0][start_ix + i] =
+ (((*sync_buffer_)[0][start_ix + i] * muting_window) +
+ ((*output)[0][i] * unmuting_window) + 16384) >>
+ 15;
+ muting_window += muting_window_increment;
+ unmuting_window += unmuting_window_increment;
+ }
+ // Remove `overlap_length_` samples from the front of `output` since they
+ // were mixed into `sync_buffer_` above.
+ output->PopFront(overlap_length_);
+ }
+ first_call_ = false;
+ return kOK;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.h b/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.h
new file mode 100644
index 0000000000..31fcee31d0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_COMFORT_NOISE_H_
+#define MODULES_AUDIO_CODING_NETEQ_COMFORT_NOISE_H_
+
+#include <stddef.h>
+
+namespace webrtc {
+
+// Forward declarations.
+class AudioMultiVector;
+class DecoderDatabase;
+class SyncBuffer;
+struct Packet;
+
+// This class acts as an interface to the CNG generator.
+class ComfortNoise {
+ public:
+ enum ReturnCodes {
+ kOK = 0,
+ kUnknownPayloadType,
+ kInternalError,
+ kMultiChannelNotSupported
+ };
+
+ ComfortNoise(int fs_hz,
+ DecoderDatabase* decoder_database,
+ SyncBuffer* sync_buffer)
+ : fs_hz_(fs_hz),
+ first_call_(true),
+ overlap_length_(5 * fs_hz_ / 8000),
+ decoder_database_(decoder_database),
+ sync_buffer_(sync_buffer) {}
+
+ ComfortNoise(const ComfortNoise&) = delete;
+ ComfortNoise& operator=(const ComfortNoise&) = delete;
+
+ // Resets the state. Should be called before each new comfort noise period.
+ void Reset();
+
+ // Update the comfort noise generator with the parameters in `packet`.
+ int UpdateParameters(const Packet& packet);
+
+ // Generates `requested_length` samples of comfort noise and writes to
+ // `output`. If this is the first in call after Reset (or first after creating
+ // the object), it will also mix in comfort noise at the end of the
+ // SyncBuffer object provided in the constructor.
+ int Generate(size_t requested_length, AudioMultiVector* output);
+
+ // Returns the last error code that was produced by the comfort noise
+ // decoder. Returns 0 if no error has been encountered since the last reset.
+ int internal_error_code() { return internal_error_code_; }
+
+ private:
+ int fs_hz_;
+ bool first_call_;
+ size_t overlap_length_;
+ DecoderDatabase* decoder_database_;
+ SyncBuffer* sync_buffer_;
+ int internal_error_code_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_COMFORT_NOISE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise_unittest.cc
new file mode 100644
index 0000000000..b436800061
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise_unittest.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for ComfortNoise class.
+
+#include "modules/audio_coding/neteq/comfort_noise.h"
+
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ComfortNoise, CreateAndDestroy) {
+ int fs = 8000;
+ MockDecoderDatabase db;
+ SyncBuffer sync_buffer(1, 1000);
+ ComfortNoise cn(fs, &db, &sync_buffer);
+ EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
+}
+
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.cc b/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.cc
new file mode 100644
index 0000000000..37ed9374f0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/cross_correlation.h"
+
+#include <cstdlib>
+#include <limits>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+// This function decides the overflow-protecting scaling and calls
+// WebRtcSpl_CrossCorrelation.
+int CrossCorrelationWithAutoShift(const int16_t* sequence_1,
+ const int16_t* sequence_2,
+ size_t sequence_1_length,
+ size_t cross_correlation_length,
+ int cross_correlation_step,
+ int32_t* cross_correlation) {
+ // Find the element that has the maximum absolute value of sequence_1 and 2.
+ // Note that these values may be negative.
+ const int16_t max_1 =
+ WebRtcSpl_MaxAbsElementW16(sequence_1, sequence_1_length);
+ const int sequence_2_shift =
+ cross_correlation_step * (static_cast<int>(cross_correlation_length) - 1);
+ const int16_t* sequence_2_start =
+ sequence_2_shift >= 0 ? sequence_2 : sequence_2 + sequence_2_shift;
+ const size_t sequence_2_length =
+ sequence_1_length + std::abs(sequence_2_shift);
+ const int16_t max_2 =
+ WebRtcSpl_MaxAbsElementW16(sequence_2_start, sequence_2_length);
+
+ // In order to avoid overflow when computing the sum we should scale the
+ // samples so that (in_vector_length * max_1 * max_2) will not overflow.
+ const int64_t max_value =
+ abs(max_1 * max_2) * static_cast<int64_t>(sequence_1_length);
+ const int32_t factor = max_value >> 31;
+ const int scaling = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+
+ WebRtcSpl_CrossCorrelation(cross_correlation, sequence_1, sequence_2,
+ sequence_1_length, cross_correlation_length,
+ scaling, cross_correlation_step);
+
+ return scaling;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.h b/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.h
new file mode 100644
index 0000000000..5082ce6a30
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
+#define MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+// The function calculates the cross-correlation between two sequences
+// `sequence_1` and `sequence_2`. `sequence_1` is taken as reference, with
+// `sequence_1_length` as its length. `sequence_2` slides for the calculation of
+// cross-correlation. The result will be saved in `cross_correlation`.
+// `cross_correlation_length` correlation points are calculated.
+// The corresponding lag starts from 0, and increases with a step of
+// `cross_correlation_step`. The result is without normalization. To avoid
+// overflow, the result will be right shifted. The amount of shifts will be
+// returned.
+//
+// Input:
+// - sequence_1 : First sequence (reference).
+// - sequence_2 : Second sequence (sliding during calculation).
+// - sequence_1_length : Length of `sequence_1`.
+// - cross_correlation_length : Number of cross-correlations to calculate.
+// - cross_correlation_step : Step in the lag for the cross-correlation.
+//
+// Output:
+// - cross_correlation : The cross-correlation in Q(-right_shifts)
+//
+// Return:
+// Number of right shifts in cross_correlation.
+
+int CrossCorrelationWithAutoShift(const int16_t* sequence_1,
+ const int16_t* sequence_2,
+ size_t sequence_1_length,
+ size_t cross_correlation_length,
+ int cross_correlation_step,
+ int32_t* cross_correlation);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc
new file mode 100644
index 0000000000..558774dcb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -0,0 +1,508 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decision_logic.h"
+
+#include <stdio.h>
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/neteq/neteq.h"
+#include "api/neteq/neteq_controller.h"
+#include "modules/audio_coding/neteq/packet_arrival_history.h"
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kPostponeDecodingLevel = 50;
+constexpr int kTargetLevelWindowMs = 100;
+constexpr int kMaxWaitForPacketTicks = 10;
+// The granularity of delay adjustments (accelerate/preemptive expand) is 15ms,
+// but round up since the clock has a granularity of 10ms.
+constexpr int kDelayAdjustmentGranularityMs = 20;
+
+std::unique_ptr<DelayManager> CreateDelayManager(
+ const NetEqController::Config& neteq_config) {
+ DelayManager::Config config;
+ config.max_packets_in_buffer = neteq_config.max_packets_in_buffer;
+ config.base_minimum_delay_ms = neteq_config.base_min_delay_ms;
+ config.Log();
+ return std::make_unique<DelayManager>(config, neteq_config.tick_timer);
+}
+
+bool IsTimestretch(NetEq::Mode mode) {
+ return mode == NetEq::Mode::kAccelerateSuccess ||
+ mode == NetEq::Mode::kAccelerateLowEnergy ||
+ mode == NetEq::Mode::kPreemptiveExpandSuccess ||
+ mode == NetEq::Mode::kPreemptiveExpandLowEnergy;
+}
+
+bool IsCng(NetEq::Mode mode) {
+ return mode == NetEq::Mode::kRfc3389Cng ||
+ mode == NetEq::Mode::kCodecInternalCng;
+}
+
+bool IsExpand(NetEq::Mode mode) {
+ return mode == NetEq::Mode::kExpand || mode == NetEq::Mode::kCodecPlc;
+}
+
+} // namespace
+
+DecisionLogic::Config::Config() {
+ StructParametersParser::Create(
+ "enable_stable_playout_delay", &enable_stable_playout_delay, //
+ "reinit_after_expands", &reinit_after_expands, //
+ "packet_history_size_ms", &packet_history_size_ms, //
+ "deceleration_target_level_offset_ms",
+ &deceleration_target_level_offset_ms)
+ ->Parse(webrtc::field_trial::FindFullName(
+ "WebRTC-Audio-NetEqDecisionLogicConfig"));
+ RTC_LOG(LS_INFO) << "NetEq decision logic config:"
+ << " enable_stable_playout_delay="
+ << enable_stable_playout_delay
+ << " reinit_after_expands=" << reinit_after_expands
+ << " packet_history_size_ms=" << packet_history_size_ms
+ << " deceleration_target_level_offset_ms="
+ << deceleration_target_level_offset_ms;
+}
+
+DecisionLogic::DecisionLogic(NetEqController::Config config)
+ : DecisionLogic(config,
+ CreateDelayManager(config),
+ std::make_unique<BufferLevelFilter>()) {}
+
+DecisionLogic::DecisionLogic(
+ NetEqController::Config config,
+ std::unique_ptr<DelayManager> delay_manager,
+ std::unique_ptr<BufferLevelFilter> buffer_level_filter)
+ : delay_manager_(std::move(delay_manager)),
+ buffer_level_filter_(std::move(buffer_level_filter)),
+ packet_arrival_history_(config_.packet_history_size_ms),
+ tick_timer_(config.tick_timer),
+ disallow_time_stretching_(!config.allow_time_stretching),
+ timescale_countdown_(
+ tick_timer_->GetNewCountdown(kMinTimescaleInterval + 1)) {}
+
+DecisionLogic::~DecisionLogic() = default;
+
+void DecisionLogic::SoftReset() {
+ packet_length_samples_ = 0;
+ sample_memory_ = 0;
+ prev_time_scale_ = false;
+ timescale_countdown_ =
+ tick_timer_->GetNewCountdown(kMinTimescaleInterval + 1);
+ time_stretched_cn_samples_ = 0;
+ delay_manager_->Reset();
+ buffer_level_filter_->Reset();
+ packet_arrival_history_.Reset();
+ last_playout_delay_ms_ = 0;
+}
+
+void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
+ // TODO(hlundin): Change to an enumerator and skip assert.
+ RTC_DCHECK(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 ||
+ fs_hz == 48000);
+ sample_rate_khz_ = fs_hz / 1000;
+ output_size_samples_ = output_size_samples;
+ packet_arrival_history_.set_sample_rate(fs_hz);
+}
+
+NetEq::Operation DecisionLogic::GetDecision(const NetEqStatus& status,
+ bool* reset_decoder) {
+ // If last mode was CNG (or Expand, since this could be covering up for
+ // a lost CNG packet), remember that CNG is on. This is needed if comfort
+ // noise is interrupted by DTMF.
+ if (status.last_mode == NetEq::Mode::kRfc3389Cng) {
+ cng_state_ = kCngRfc3389On;
+ } else if (status.last_mode == NetEq::Mode::kCodecInternalCng) {
+ cng_state_ = kCngInternalOn;
+ }
+
+ if (IsExpand(status.last_mode)) {
+ ++num_consecutive_expands_;
+ } else {
+ num_consecutive_expands_ = 0;
+ }
+
+ if (!IsExpand(status.last_mode) && !IsCng(status.last_mode)) {
+ last_playout_delay_ms_ = GetPlayoutDelayMs(status);
+ }
+
+ prev_time_scale_ = prev_time_scale_ && IsTimestretch(status.last_mode);
+ if (prev_time_scale_) {
+ timescale_countdown_ = tick_timer_->GetNewCountdown(kMinTimescaleInterval);
+ }
+ if (!IsCng(status.last_mode)) {
+ FilterBufferLevel(status.packet_buffer_info.span_samples);
+ }
+
+ // Guard for errors, to avoid getting stuck in error mode.
+ if (status.last_mode == NetEq::Mode::kError) {
+ if (!status.next_packet) {
+ return NetEq::Operation::kExpand;
+ } else {
+ // Use kUndefined to flag for a reset.
+ return NetEq::Operation::kUndefined;
+ }
+ }
+
+ if (status.next_packet && status.next_packet->is_cng) {
+ return CngOperation(status);
+ }
+
+ // Handle the case with no packet at all available (except maybe DTMF).
+ if (!status.next_packet) {
+ return NoPacket(status);
+ }
+
+ // If the expand period was very long, reset NetEQ since it is likely that the
+ // sender was restarted.
+ if (num_consecutive_expands_ > config_.reinit_after_expands) {
+ *reset_decoder = true;
+ return NetEq::Operation::kNormal;
+ }
+
+ // Make sure we don't restart audio too soon after an expansion to avoid
+ // running out of data right away again. We should only wait if there are no
+ // DTX or CNG packets in the buffer (otherwise we should just play out what we
+ // have, since we cannot know the exact duration of DTX or CNG packets), and
+ // if the mute factor is low enough (otherwise the expansion was short enough
+ // to not be noticable).
+ // Note that the MuteFactor is in Q14, so a value of 16384 corresponds to 1.
+ const int target_level_samples = TargetLevelMs() * sample_rate_khz_;
+ if (!config_.enable_stable_playout_delay && IsExpand(status.last_mode) &&
+ status.expand_mutefactor < 16384 / 2 &&
+ status.packet_buffer_info.span_samples <
+ static_cast<size_t>(target_level_samples * kPostponeDecodingLevel /
+ 100) &&
+ !status.packet_buffer_info.dtx_or_cng) {
+ return NetEq::Operation::kExpand;
+ }
+
+ const uint32_t five_seconds_samples =
+ static_cast<uint32_t>(5000 * sample_rate_khz_);
+ // Check if the required packet is available.
+ if (status.target_timestamp == status.next_packet->timestamp) {
+ return ExpectedPacketAvailable(status);
+ }
+ if (!PacketBuffer::IsObsoleteTimestamp(status.next_packet->timestamp,
+ status.target_timestamp,
+ five_seconds_samples)) {
+ return FuturePacketAvailable(status);
+ }
+ // This implies that available_timestamp < target_timestamp, which can
+ // happen when a new stream or codec is received. Signal for a reset.
+ return NetEq::Operation::kUndefined;
+}
+
+void DecisionLogic::NotifyMutedState() {
+ ++num_consecutive_expands_;
+}
+
+int DecisionLogic::TargetLevelMs() const {
+ int target_delay_ms = delay_manager_->TargetDelayMs();
+ if (!config_.enable_stable_playout_delay) {
+ target_delay_ms =
+ std::max(target_delay_ms,
+ static_cast<int>(packet_length_samples_ / sample_rate_khz_));
+ }
+ return target_delay_ms;
+}
+
+int DecisionLogic::UnlimitedTargetLevelMs() const {
+ return delay_manager_->UnlimitedTargetLevelMs();
+}
+
+int DecisionLogic::GetFilteredBufferLevel() const {
+ if (config_.enable_stable_playout_delay) {
+ return last_playout_delay_ms_ * sample_rate_khz_;
+ }
+ return buffer_level_filter_->filtered_current_level();
+}
+
+absl::optional<int> DecisionLogic::PacketArrived(
+ int fs_hz,
+ bool should_update_stats,
+ const PacketArrivedInfo& info) {
+ buffer_flush_ = buffer_flush_ || info.buffer_flush;
+ if (!should_update_stats || info.is_cng_or_dtmf) {
+ return absl::nullopt;
+ }
+ if (info.packet_length_samples > 0 && fs_hz > 0 &&
+ info.packet_length_samples != packet_length_samples_) {
+ packet_length_samples_ = info.packet_length_samples;
+ delay_manager_->SetPacketAudioLength(packet_length_samples_ * 1000 / fs_hz);
+ }
+ int64_t time_now_ms = tick_timer_->ticks() * tick_timer_->ms_per_tick();
+ packet_arrival_history_.Insert(info.main_timestamp, time_now_ms);
+ if (packet_arrival_history_.size() < 2) {
+ // No meaningful delay estimate unless at least 2 packets have arrived.
+ return absl::nullopt;
+ }
+ int arrival_delay_ms =
+ packet_arrival_history_.GetDelayMs(info.main_timestamp, time_now_ms);
+ bool reordered =
+ !packet_arrival_history_.IsNewestRtpTimestamp(info.main_timestamp);
+ delay_manager_->Update(arrival_delay_ms, reordered);
+ return arrival_delay_ms;
+}
+
+void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples) {
+ buffer_level_filter_->SetTargetBufferLevel(TargetLevelMs());
+
+ int time_stretched_samples = time_stretched_cn_samples_;
+ if (prev_time_scale_) {
+ time_stretched_samples += sample_memory_;
+ }
+
+ if (buffer_flush_) {
+ buffer_level_filter_->SetFilteredBufferLevel(buffer_size_samples);
+ buffer_flush_ = false;
+ } else {
+ buffer_level_filter_->Update(buffer_size_samples, time_stretched_samples);
+ }
+ prev_time_scale_ = false;
+ time_stretched_cn_samples_ = 0;
+}
+
+NetEq::Operation DecisionLogic::CngOperation(
+ NetEqController::NetEqStatus status) {
+ // Signed difference between target and available timestamp.
+ int32_t timestamp_diff = static_cast<int32_t>(
+ static_cast<uint32_t>(status.generated_noise_samples +
+ status.target_timestamp) -
+ status.next_packet->timestamp);
+ int optimal_level_samp = TargetLevelMs() * sample_rate_khz_;
+ const int64_t excess_waiting_time_samp =
+ -static_cast<int64_t>(timestamp_diff) - optimal_level_samp;
+
+ if (excess_waiting_time_samp > optimal_level_samp / 2) {
+ // The waiting time for this packet will be longer than 1.5
+ // times the wanted buffer delay. Apply fast-forward to cut the
+ // waiting time down to the optimal.
+ noise_fast_forward_ = rtc::saturated_cast<size_t>(noise_fast_forward_ +
+ excess_waiting_time_samp);
+ timestamp_diff =
+ rtc::saturated_cast<int32_t>(timestamp_diff + excess_waiting_time_samp);
+ }
+
+ if (timestamp_diff < 0 && status.last_mode == NetEq::Mode::kRfc3389Cng) {
+ // Not time to play this packet yet. Wait another round before using this
+ // packet. Keep on playing CNG from previous CNG parameters.
+ return NetEq::Operation::kRfc3389CngNoPacket;
+ } else {
+ // Otherwise, go for the CNG packet now.
+ noise_fast_forward_ = 0;
+ return NetEq::Operation::kRfc3389Cng;
+ }
+}
+
+NetEq::Operation DecisionLogic::NoPacket(NetEqController::NetEqStatus status) {
+ if (cng_state_ == kCngRfc3389On) {
+ // Keep on playing comfort noise.
+ return NetEq::Operation::kRfc3389CngNoPacket;
+ } else if (cng_state_ == kCngInternalOn) {
+ // Keep on playing codec internal comfort noise.
+ return NetEq::Operation::kCodecInternalCng;
+ } else if (status.play_dtmf) {
+ return NetEq::Operation::kDtmf;
+ } else {
+ // Nothing to play, do expand.
+ return NetEq::Operation::kExpand;
+ }
+}
+
+NetEq::Operation DecisionLogic::ExpectedPacketAvailable(
+ NetEqController::NetEqStatus status) {
+ if (!disallow_time_stretching_ && status.last_mode != NetEq::Mode::kExpand &&
+ !status.play_dtmf) {
+ if (config_.enable_stable_playout_delay) {
+ const int playout_delay_ms = GetPlayoutDelayMs(status);
+ if (playout_delay_ms >= HighThreshold() << 2) {
+ return NetEq::Operation::kFastAccelerate;
+ }
+ if (TimescaleAllowed()) {
+ if (playout_delay_ms >= HighThreshold()) {
+ return NetEq::Operation::kAccelerate;
+ }
+ if (playout_delay_ms < LowThreshold()) {
+ return NetEq::Operation::kPreemptiveExpand;
+ }
+ }
+ } else {
+ const int target_level_samples = TargetLevelMs() * sample_rate_khz_;
+ const int low_limit = std::max(
+ target_level_samples * 3 / 4,
+ target_level_samples -
+ config_.deceleration_target_level_offset_ms * sample_rate_khz_);
+ const int high_limit = std::max(
+ target_level_samples,
+ low_limit + kDelayAdjustmentGranularityMs * sample_rate_khz_);
+
+ const int buffer_level_samples =
+ buffer_level_filter_->filtered_current_level();
+ if (buffer_level_samples >= high_limit << 2)
+ return NetEq::Operation::kFastAccelerate;
+ if (TimescaleAllowed()) {
+ if (buffer_level_samples >= high_limit)
+ return NetEq::Operation::kAccelerate;
+ if (buffer_level_samples < low_limit)
+ return NetEq::Operation::kPreemptiveExpand;
+ }
+ }
+ }
+ return NetEq::Operation::kNormal;
+}
+
+NetEq::Operation DecisionLogic::FuturePacketAvailable(
+ NetEqController::NetEqStatus status) {
+ // Required packet is not available, but a future packet is.
+ // Check if we should continue with an ongoing expand because the new packet
+ // is too far into the future.
+ if (IsExpand(status.last_mode) && ShouldContinueExpand(status)) {
+ if (status.play_dtmf) {
+ // Still have DTMF to play, so do not do expand.
+ return NetEq::Operation::kDtmf;
+ } else {
+ // Nothing to play.
+ return NetEq::Operation::kExpand;
+ }
+ }
+
+ if (status.last_mode == NetEq::Mode::kCodecPlc) {
+ return NetEq::Operation::kNormal;
+ }
+
+ // If previous was comfort noise, then no merge is needed.
+ if (IsCng(status.last_mode)) {
+ uint32_t timestamp_leap =
+ status.next_packet->timestamp - status.target_timestamp;
+ const bool generated_enough_noise =
+ status.generated_noise_samples >= timestamp_leap;
+
+ int playout_delay_ms = GetNextPacketDelayMs(status);
+ const bool above_target_delay = playout_delay_ms > HighThresholdCng();
+ const bool below_target_delay = playout_delay_ms < LowThresholdCng();
+ // Keep the delay same as before CNG, but make sure that it is within the
+ // target window.
+ if ((generated_enough_noise && !below_target_delay) || above_target_delay) {
+ time_stretched_cn_samples_ =
+ timestamp_leap - status.generated_noise_samples;
+ return NetEq::Operation::kNormal;
+ }
+
+ if (status.last_mode == NetEq::Mode::kRfc3389Cng) {
+ return NetEq::Operation::kRfc3389CngNoPacket;
+ }
+ return NetEq::Operation::kCodecInternalCng;
+ }
+
+ // Do not merge unless we have done an expand before.
+ if (status.last_mode == NetEq::Mode::kExpand) {
+ return NetEq::Operation::kMerge;
+ } else if (status.play_dtmf) {
+ // Play DTMF instead of expand.
+ return NetEq::Operation::kDtmf;
+ } else {
+ return NetEq::Operation::kExpand;
+ }
+}
+
+bool DecisionLogic::UnderTargetLevel() const {
+ return buffer_level_filter_->filtered_current_level() <
+ TargetLevelMs() * sample_rate_khz_;
+}
+
+bool DecisionLogic::ReinitAfterExpands(uint32_t timestamp_leap) const {
+ return timestamp_leap >= static_cast<uint32_t>(output_size_samples_ *
+ config_.reinit_after_expands);
+}
+
+bool DecisionLogic::PacketTooEarly(uint32_t timestamp_leap) const {
+ return timestamp_leap >
+ static_cast<uint32_t>(output_size_samples_ * num_consecutive_expands_);
+}
+
+bool DecisionLogic::MaxWaitForPacket() const {
+ return num_consecutive_expands_ >= kMaxWaitForPacketTicks;
+}
+
+bool DecisionLogic::ShouldContinueExpand(
+ NetEqController::NetEqStatus status) const {
+ uint32_t timestamp_leap =
+ status.next_packet->timestamp - status.target_timestamp;
+ if (config_.enable_stable_playout_delay) {
+ return GetNextPacketDelayMs(status) < HighThreshold() &&
+ PacketTooEarly(timestamp_leap);
+ }
+ return !ReinitAfterExpands(timestamp_leap) && !MaxWaitForPacket() &&
+ PacketTooEarly(timestamp_leap) && UnderTargetLevel();
+}
+
+int DecisionLogic::GetNextPacketDelayMs(
+ NetEqController::NetEqStatus status) const {
+ if (config_.enable_stable_playout_delay) {
+ return packet_arrival_history_.GetDelayMs(
+ status.next_packet->timestamp,
+ tick_timer_->ticks() * tick_timer_->ms_per_tick());
+ }
+ return status.packet_buffer_info.span_samples / sample_rate_khz_;
+}
+
+int DecisionLogic::GetPlayoutDelayMs(
+ NetEqController::NetEqStatus status) const {
+ uint32_t playout_timestamp =
+ status.target_timestamp - status.sync_buffer_samples;
+ return packet_arrival_history_.GetDelayMs(
+ playout_timestamp, tick_timer_->ticks() * tick_timer_->ms_per_tick());
+}
+
+int DecisionLogic::LowThreshold() const {
+ int target_delay_ms = TargetLevelMs();
+ return std::max(
+ target_delay_ms * 3 / 4,
+ target_delay_ms - config_.deceleration_target_level_offset_ms);
+}
+
+int DecisionLogic::HighThreshold() const {
+ if (config_.enable_stable_playout_delay) {
+ return std::max(TargetLevelMs(), packet_arrival_history_.GetMaxDelayMs()) +
+ kDelayAdjustmentGranularityMs;
+ }
+ return std::max(TargetLevelMs(),
+ LowThreshold() + kDelayAdjustmentGranularityMs);
+}
+
+int DecisionLogic::LowThresholdCng() const {
+ if (config_.enable_stable_playout_delay) {
+ return LowThreshold();
+ }
+ return std::max(0, TargetLevelMs() - kTargetLevelWindowMs / 2);
+}
+
+int DecisionLogic::HighThresholdCng() const {
+ if (config_.enable_stable_playout_delay) {
+ return HighThreshold();
+ }
+ return TargetLevelMs() + kTargetLevelWindowMs / 2;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.h b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.h
new file mode 100644
index 0000000000..2e55322f8f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_H_
+#define MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_H_
+
+#include <memory>
+
+#include "api/neteq/neteq.h"
+#include "api/neteq/neteq_controller.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "modules/audio_coding/neteq/packet_arrival_history.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+
+// This is the class for the decision tree implementation.
+class DecisionLogic : public NetEqController {
+ public:
+ DecisionLogic(NetEqController::Config config);
+ DecisionLogic(NetEqController::Config config,
+ std::unique_ptr<DelayManager> delay_manager,
+ std::unique_ptr<BufferLevelFilter> buffer_level_filter);
+
+ ~DecisionLogic() override;
+
+ DecisionLogic(const DecisionLogic&) = delete;
+ DecisionLogic& operator=(const DecisionLogic&) = delete;
+
+ // Not used.
+ void Reset() override {}
+
+ // Resets parts of the state. Typically done when switching codecs.
+ void SoftReset() override;
+
+ // Sets the sample rate and the output block size.
+ void SetSampleRate(int fs_hz, size_t output_size_samples) override;
+
+ // Given info about the latest received packet, and current jitter buffer
+ // status, returns the operation. `target_timestamp` and `expand_mutefactor`
+ // are provided for reference. `last_packet_samples` is the number of samples
+ // obtained from the last decoded frame. If there is a packet available, it
+ // should be supplied in `packet`; otherwise it should be NULL. The mode
+ // resulting from the last call to NetEqImpl::GetAudio is supplied in
+ // `last_mode`. If there is a DTMF event to play, `play_dtmf` should be set to
+ // true. The output variable `reset_decoder` will be set to true if a reset is
+ // required; otherwise it is left unchanged (i.e., it can remain true if it
+ // was true before the call).
+ NetEq::Operation GetDecision(const NetEqController::NetEqStatus& status,
+ bool* reset_decoder) override;
+
+ // These methods test the `cng_state_` for different conditions.
+ bool CngRfc3389On() const override { return cng_state_ == kCngRfc3389On; }
+ bool CngOff() const override { return cng_state_ == kCngOff; }
+
+ // Resets the `cng_state_` to kCngOff.
+ void SetCngOff() override { cng_state_ = kCngOff; }
+
+ void ExpandDecision(NetEq::Operation operation) override {}
+
+ // Adds `value` to `sample_memory_`.
+ void AddSampleMemory(int32_t value) override { sample_memory_ += value; }
+
+ int TargetLevelMs() const override;
+
+ int UnlimitedTargetLevelMs() const override;
+
+ absl::optional<int> PacketArrived(int fs_hz,
+ bool should_update_stats,
+ const PacketArrivedInfo& info) override;
+
+ void RegisterEmptyPacket() override {}
+
+ void NotifyMutedState() override;
+
+ bool SetMaximumDelay(int delay_ms) override {
+ return delay_manager_->SetMaximumDelay(delay_ms);
+ }
+ bool SetMinimumDelay(int delay_ms) override {
+ return delay_manager_->SetMinimumDelay(delay_ms);
+ }
+ bool SetBaseMinimumDelay(int delay_ms) override {
+ return delay_manager_->SetBaseMinimumDelay(delay_ms);
+ }
+ int GetBaseMinimumDelay() const override {
+ return delay_manager_->GetBaseMinimumDelay();
+ }
+ bool PeakFound() const override { return false; }
+
+ int GetFilteredBufferLevel() const override;
+
+ // Accessors and mutators.
+ void set_sample_memory(int32_t value) override { sample_memory_ = value; }
+ size_t noise_fast_forward() const override { return noise_fast_forward_; }
+ size_t packet_length_samples() const override {
+ return packet_length_samples_;
+ }
+ void set_packet_length_samples(size_t value) override {
+ packet_length_samples_ = value;
+ }
+ void set_prev_time_scale(bool value) override { prev_time_scale_ = value; }
+
+ private:
+ // The value 5 sets maximum time-stretch rate to about 100 ms/s.
+ static const int kMinTimescaleInterval = 5;
+
+ enum CngState { kCngOff, kCngRfc3389On, kCngInternalOn };
+
+ // Updates the `buffer_level_filter_` with the current buffer level
+ // `buffer_size_samples`.
+ void FilterBufferLevel(size_t buffer_size_samples);
+
+ // Returns the operation given that the next available packet is a comfort
+ // noise payload (RFC 3389 only, not codec-internal).
+ virtual NetEq::Operation CngOperation(NetEqController::NetEqStatus status);
+
+ // Returns the operation given that no packets are available (except maybe
+ // a DTMF event, flagged by setting `play_dtmf` true).
+ virtual NetEq::Operation NoPacket(NetEqController::NetEqStatus status);
+
+ // Returns the operation to do given that the expected packet is available.
+ virtual NetEq::Operation ExpectedPacketAvailable(
+ NetEqController::NetEqStatus status);
+
+ // Returns the operation to do given that the expected packet is not
+ // available, but a packet further into the future is at hand.
+ virtual NetEq::Operation FuturePacketAvailable(
+ NetEqController::NetEqStatus status);
+
+ // Checks if enough time has elapsed since the last successful timescale
+ // operation was done (i.e., accelerate or preemptive expand).
+ bool TimescaleAllowed() const {
+ return !timescale_countdown_ || timescale_countdown_->Finished();
+ }
+
+ // Checks if the current (filtered) buffer level is under the target level.
+ bool UnderTargetLevel() const;
+
+ // Checks if `timestamp_leap` is so long into the future that a reset due
+ // to exceeding kReinitAfterExpands will be done.
+ bool ReinitAfterExpands(uint32_t timestamp_leap) const;
+
+ // Checks if we still have not done enough expands to cover the distance from
+ // the last decoded packet to the next available packet, the distance beeing
+ // conveyed in `timestamp_leap`.
+ bool PacketTooEarly(uint32_t timestamp_leap) const;
+
+ bool MaxWaitForPacket() const;
+
+ bool ShouldContinueExpand(NetEqController::NetEqStatus status) const;
+
+ int GetNextPacketDelayMs(NetEqController::NetEqStatus status) const;
+ int GetPlayoutDelayMs(NetEqController::NetEqStatus status) const;
+
+ int LowThreshold() const;
+ int HighThreshold() const;
+ int LowThresholdCng() const;
+ int HighThresholdCng() const;
+
+ // Runtime configurable options through field trial
+ // WebRTC-Audio-NetEqDecisionLogicConfig.
+ struct Config {
+ Config();
+
+ bool enable_stable_playout_delay = false;
+ int reinit_after_expands = 100;
+ int deceleration_target_level_offset_ms = 85;
+ int packet_history_size_ms = 2000;
+ };
+ Config config_;
+ std::unique_ptr<DelayManager> delay_manager_;
+ std::unique_ptr<BufferLevelFilter> buffer_level_filter_;
+ PacketArrivalHistory packet_arrival_history_;
+ const TickTimer* tick_timer_;
+ int sample_rate_khz_;
+ size_t output_size_samples_;
+ CngState cng_state_ = kCngOff; // Remember if comfort noise is interrupted by
+ // other event (e.g., DTMF).
+ size_t noise_fast_forward_ = 0;
+ size_t packet_length_samples_ = 0;
+ int sample_memory_ = 0;
+ bool prev_time_scale_ = false;
+ bool disallow_time_stretching_;
+ std::unique_ptr<TickTimer::Countdown> timescale_countdown_;
+ int num_consecutive_expands_ = 0;
+ int time_stretched_cn_samples_ = 0;
+ bool buffer_flush_ = false;
+ int last_playout_delay_ms_ = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
new file mode 100644
index 0000000000..d70e3070f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DecisionLogic class and derived classes.
+
+#include "modules/audio_coding/neteq/decision_logic.h"
+
+#include "api/neteq/neteq_controller.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "modules/audio_coding/neteq/mock/mock_buffer_level_filter.h"
+#include "modules/audio_coding/neteq/mock/mock_delay_manager.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kSampleRate = 8000;
+constexpr int kSamplesPerMs = kSampleRate / 1000;
+constexpr int kOutputSizeSamples = kSamplesPerMs * 10;
+constexpr int kMinTimescaleInterval = 5;
+
+NetEqController::NetEqStatus CreateNetEqStatus(NetEq::Mode last_mode,
+ int current_delay_ms) {
+ NetEqController::NetEqStatus status;
+ status.play_dtmf = false;
+ status.last_mode = last_mode;
+ status.target_timestamp = 1234;
+ status.generated_noise_samples = 0;
+ status.expand_mutefactor = 0;
+ status.packet_buffer_info.num_samples = current_delay_ms * kSamplesPerMs;
+ status.packet_buffer_info.span_samples = current_delay_ms * kSamplesPerMs;
+ status.packet_buffer_info.span_samples_no_dtx =
+ current_delay_ms * kSamplesPerMs;
+ status.packet_buffer_info.dtx_or_cng = false;
+ status.next_packet = {status.target_timestamp, false, false};
+ return status;
+}
+
+using ::testing::Return;
+
+} // namespace
+
+class DecisionLogicTest : public ::testing::Test {
+ protected:
+ DecisionLogicTest() {
+ NetEqController::Config config;
+ config.tick_timer = &tick_timer_;
+ config.allow_time_stretching = true;
+ auto delay_manager = std::make_unique<MockDelayManager>(
+ DelayManager::Config(), config.tick_timer);
+ mock_delay_manager_ = delay_manager.get();
+ auto buffer_level_filter = std::make_unique<MockBufferLevelFilter>();
+ mock_buffer_level_filter_ = buffer_level_filter.get();
+ decision_logic_ = std::make_unique<DecisionLogic>(
+ config, std::move(delay_manager), std::move(buffer_level_filter));
+ decision_logic_->SetSampleRate(kSampleRate, kOutputSizeSamples);
+ }
+
+ TickTimer tick_timer_;
+ std::unique_ptr<DecisionLogic> decision_logic_;
+ MockDelayManager* mock_delay_manager_;
+ MockBufferLevelFilter* mock_buffer_level_filter_;
+};
+
+TEST_F(DecisionLogicTest, NormalOperation) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(100));
+ EXPECT_CALL(*mock_buffer_level_filter_, filtered_current_level())
+ .WillRepeatedly(Return(90 * kSamplesPerMs));
+
+ bool reset_decoder = false;
+ tick_timer_.Increment(kMinTimescaleInterval + 1);
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kNormal, 100), &reset_decoder),
+ NetEq::Operation::kNormal);
+ EXPECT_FALSE(reset_decoder);
+}
+
+TEST_F(DecisionLogicTest, Accelerate) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(100));
+ EXPECT_CALL(*mock_buffer_level_filter_, filtered_current_level())
+ .WillRepeatedly(Return(110 * kSamplesPerMs));
+
+ bool reset_decoder = false;
+ tick_timer_.Increment(kMinTimescaleInterval + 1);
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kNormal, 100), &reset_decoder),
+ NetEq::Operation::kAccelerate);
+ EXPECT_FALSE(reset_decoder);
+}
+
+TEST_F(DecisionLogicTest, FastAccelerate) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(100));
+ EXPECT_CALL(*mock_buffer_level_filter_, filtered_current_level())
+ .WillRepeatedly(Return(400 * kSamplesPerMs));
+
+ bool reset_decoder = false;
+ tick_timer_.Increment(kMinTimescaleInterval + 1);
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kNormal, 100), &reset_decoder),
+ NetEq::Operation::kFastAccelerate);
+ EXPECT_FALSE(reset_decoder);
+}
+
+TEST_F(DecisionLogicTest, PreemptiveExpand) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(100));
+ EXPECT_CALL(*mock_buffer_level_filter_, filtered_current_level())
+ .WillRepeatedly(Return(50 * kSamplesPerMs));
+
+ bool reset_decoder = false;
+ tick_timer_.Increment(kMinTimescaleInterval + 1);
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kNormal, 100), &reset_decoder),
+ NetEq::Operation::kPreemptiveExpand);
+ EXPECT_FALSE(reset_decoder);
+}
+
+TEST_F(DecisionLogicTest, DecelerationTargetLevelOffset) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(500));
+ EXPECT_CALL(*mock_buffer_level_filter_, filtered_current_level())
+ .WillRepeatedly(Return(400 * kSamplesPerMs));
+
+ bool reset_decoder = false;
+ tick_timer_.Increment(kMinTimescaleInterval + 1);
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kNormal, 400), &reset_decoder),
+ NetEq::Operation::kPreemptiveExpand);
+ EXPECT_FALSE(reset_decoder);
+}
+
+TEST_F(DecisionLogicTest, PostponeDecodeAfterExpand) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(500));
+
+ // Below 50% target delay threshold.
+ bool reset_decoder = false;
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kExpand, 200), &reset_decoder),
+ NetEq::Operation::kExpand);
+ EXPECT_FALSE(reset_decoder);
+
+ // Above 50% target delay threshold.
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kExpand, 250), &reset_decoder),
+ NetEq::Operation::kNormal);
+ EXPECT_FALSE(reset_decoder);
+}
+
+TEST_F(DecisionLogicTest, TimeStrechComfortNoise) {
+ EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
+ .WillRepeatedly(Return(500));
+
+ {
+ bool reset_decoder = false;
+ // Below target window.
+ auto status = CreateNetEqStatus(NetEq::Mode::kCodecInternalCng, 400);
+ status.generated_noise_samples = 400 * kSamplesPerMs;
+ status.next_packet->timestamp =
+ status.target_timestamp + 400 * kSamplesPerMs;
+ EXPECT_EQ(decision_logic_->GetDecision(status, &reset_decoder),
+ NetEq::Operation::kCodecInternalCng);
+ EXPECT_FALSE(reset_decoder);
+ }
+
+ {
+ bool reset_decoder = false;
+ // Above target window.
+ auto status = CreateNetEqStatus(NetEq::Mode::kCodecInternalCng, 600);
+ status.generated_noise_samples = 200 * kSamplesPerMs;
+ status.next_packet->timestamp =
+ status.target_timestamp + 400 * kSamplesPerMs;
+ EXPECT_EQ(decision_logic_->GetDecision(status, &reset_decoder),
+ NetEq::Operation::kNormal);
+ EXPECT_FALSE(reset_decoder);
+
+ // The buffer level filter should be adjusted with the number of samples
+ // that was skipped.
+ int timestamp_leap = status.next_packet->timestamp -
+ status.target_timestamp -
+ status.generated_noise_samples;
+ EXPECT_CALL(*mock_buffer_level_filter_,
+ Update(400 * kSamplesPerMs, timestamp_leap));
+ EXPECT_EQ(decision_logic_->GetDecision(
+ CreateNetEqStatus(NetEq::Mode::kNormal, 400), &reset_decoder),
+ NetEq::Operation::kNormal);
+ EXPECT_FALSE(reset_decoder);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.cc b/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.cc
new file mode 100644
index 0000000000..3447ced1da
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.cc
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <list>
+#include <type_traits>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/audio_format_to_string.h"
+
+namespace webrtc {
+
+DecoderDatabase::DecoderDatabase(
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory,
+ absl::optional<AudioCodecPairId> codec_pair_id)
+ : active_decoder_type_(-1),
+ active_cng_decoder_type_(-1),
+ decoder_factory_(decoder_factory),
+ codec_pair_id_(codec_pair_id) {}
+
+DecoderDatabase::~DecoderDatabase() = default;
+
+DecoderDatabase::DecoderInfo::DecoderInfo(
+ const SdpAudioFormat& audio_format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ AudioDecoderFactory* factory,
+ absl::string_view codec_name)
+ : name_(codec_name),
+ audio_format_(audio_format),
+ codec_pair_id_(codec_pair_id),
+ factory_(factory),
+ cng_decoder_(CngDecoder::Create(audio_format)),
+ subtype_(SubtypeFromFormat(audio_format)) {}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(
+ const SdpAudioFormat& audio_format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ AudioDecoderFactory* factory)
+ : DecoderInfo(audio_format, codec_pair_id, factory, audio_format.name) {}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(DecoderInfo&&) = default;
+DecoderDatabase::DecoderInfo::~DecoderInfo() = default;
+
+AudioDecoder* DecoderDatabase::DecoderInfo::GetDecoder() const {
+ if (subtype_ != Subtype::kNormal) {
+ // These are handled internally, so they have no AudioDecoder objects.
+ return nullptr;
+ }
+ if (!decoder_) {
+ // TODO(ossu): Keep a check here for now, since a number of tests create
+ // DecoderInfos without factories.
+ RTC_DCHECK(factory_);
+ decoder_ = factory_->MakeAudioDecoder(audio_format_, codec_pair_id_);
+ }
+ RTC_DCHECK(decoder_) << "Failed to create: " << rtc::ToString(audio_format_);
+ return decoder_.get();
+}
+
+bool DecoderDatabase::DecoderInfo::IsType(absl::string_view name) const {
+ return absl::EqualsIgnoreCase(audio_format_.name, name);
+}
+
+absl::optional<DecoderDatabase::DecoderInfo::CngDecoder>
+DecoderDatabase::DecoderInfo::CngDecoder::Create(const SdpAudioFormat& format) {
+ if (absl::EqualsIgnoreCase(format.name, "CN")) {
+ // CN has a 1:1 RTP clock rate to sample rate ratio.
+ const int sample_rate_hz = format.clockrate_hz;
+ RTC_DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+ sample_rate_hz == 32000 || sample_rate_hz == 48000);
+ return DecoderDatabase::DecoderInfo::CngDecoder{sample_rate_hz};
+ } else {
+ return absl::nullopt;
+ }
+}
+
+DecoderDatabase::DecoderInfo::Subtype
+DecoderDatabase::DecoderInfo::SubtypeFromFormat(const SdpAudioFormat& format) {
+ if (absl::EqualsIgnoreCase(format.name, "CN")) {
+ return Subtype::kComfortNoise;
+ } else if (absl::EqualsIgnoreCase(format.name, "telephone-event")) {
+ return Subtype::kDtmf;
+ } else if (absl::EqualsIgnoreCase(format.name, "red")) {
+ return Subtype::kRed;
+ }
+
+ return Subtype::kNormal;
+}
+
+bool DecoderDatabase::Empty() const {
+ return decoders_.empty();
+}
+
+int DecoderDatabase::Size() const {
+ return static_cast<int>(decoders_.size());
+}
+
+std::vector<int> DecoderDatabase::SetCodecs(
+ const std::map<int, SdpAudioFormat>& codecs) {
+ // First collect all payload types that we'll remove or reassign, then remove
+ // them from the database.
+ std::vector<int> changed_payload_types;
+ for (const std::pair<uint8_t, const DecoderInfo&> kv : decoders_) {
+ auto i = codecs.find(kv.first);
+ if (i == codecs.end() || i->second != kv.second.GetFormat()) {
+ changed_payload_types.push_back(kv.first);
+ }
+ }
+ for (int pl_type : changed_payload_types) {
+ Remove(pl_type);
+ }
+
+ // Enter the new and changed payload type mappings into the database.
+ for (const auto& kv : codecs) {
+ const int& rtp_payload_type = kv.first;
+ const SdpAudioFormat& audio_format = kv.second;
+ RTC_DCHECK_GE(rtp_payload_type, 0);
+ RTC_DCHECK_LE(rtp_payload_type, 0x7f);
+ if (decoders_.count(rtp_payload_type) == 0) {
+ decoders_.insert(std::make_pair(
+ rtp_payload_type,
+ DecoderInfo(audio_format, codec_pair_id_, decoder_factory_.get())));
+ } else {
+ // The mapping for this payload type hasn't changed.
+ }
+ }
+
+ return changed_payload_types;
+}
+
+int DecoderDatabase::RegisterPayload(int rtp_payload_type,
+ const SdpAudioFormat& audio_format) {
+ if (rtp_payload_type < 0 || rtp_payload_type > 0x7f) {
+ return kInvalidRtpPayloadType;
+ }
+ const auto ret = decoders_.insert(std::make_pair(
+ rtp_payload_type,
+ DecoderInfo(audio_format, codec_pair_id_, decoder_factory_.get())));
+ if (ret.second == false) {
+ // Database already contains a decoder with type `rtp_payload_type`.
+ return kDecoderExists;
+ }
+ return kOK;
+}
+
+int DecoderDatabase::Remove(uint8_t rtp_payload_type) {
+ if (decoders_.erase(rtp_payload_type) == 0) {
+ // No decoder with that `rtp_payload_type`.
+ return kDecoderNotFound;
+ }
+ if (active_decoder_type_ == rtp_payload_type) {
+ active_decoder_type_ = -1; // No active decoder.
+ }
+ if (active_cng_decoder_type_ == rtp_payload_type) {
+ active_cng_decoder_type_ = -1; // No active CNG decoder.
+ }
+ return kOK;
+}
+
+void DecoderDatabase::RemoveAll() {
+ decoders_.clear();
+ active_decoder_type_ = -1; // No active decoder.
+ active_cng_decoder_type_ = -1; // No active CNG decoder.
+}
+
+const DecoderDatabase::DecoderInfo* DecoderDatabase::GetDecoderInfo(
+ uint8_t rtp_payload_type) const {
+ DecoderMap::const_iterator it = decoders_.find(rtp_payload_type);
+ if (it == decoders_.end()) {
+ // Decoder not found.
+ return NULL;
+ }
+ return &it->second;
+}
+
+int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type,
+ bool* new_decoder) {
+ // Check that `rtp_payload_type` exists in the database.
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+ if (!info) {
+ // Decoder not found.
+ return kDecoderNotFound;
+ }
+ RTC_CHECK(!info->IsComfortNoise());
+ RTC_DCHECK(new_decoder);
+ *new_decoder = false;
+ if (active_decoder_type_ < 0) {
+ // This is the first active decoder.
+ *new_decoder = true;
+ } else if (active_decoder_type_ != rtp_payload_type) {
+ // Moving from one active decoder to another. Delete the first one.
+ const DecoderInfo* old_info = GetDecoderInfo(active_decoder_type_);
+ RTC_DCHECK(old_info);
+ old_info->DropDecoder();
+ *new_decoder = true;
+ }
+ active_decoder_type_ = rtp_payload_type;
+ return kOK;
+}
+
+AudioDecoder* DecoderDatabase::GetActiveDecoder() const {
+ if (active_decoder_type_ < 0) {
+ // No active decoder.
+ return NULL;
+ }
+ return GetDecoder(active_decoder_type_);
+}
+
+int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
+ // Check that `rtp_payload_type` exists in the database.
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+ if (!info) {
+ // Decoder not found.
+ return kDecoderNotFound;
+ }
+ if (active_cng_decoder_type_ >= 0 &&
+ active_cng_decoder_type_ != rtp_payload_type) {
+ // Moving from one active CNG decoder to another. Delete the first one.
+ RTC_DCHECK(active_cng_decoder_);
+ active_cng_decoder_.reset();
+ }
+ active_cng_decoder_type_ = rtp_payload_type;
+ return kOK;
+}
+
+ComfortNoiseDecoder* DecoderDatabase::GetActiveCngDecoder() const {
+ if (active_cng_decoder_type_ < 0) {
+ // No active CNG decoder.
+ return NULL;
+ }
+ if (!active_cng_decoder_) {
+ active_cng_decoder_.reset(new ComfortNoiseDecoder);
+ }
+ return active_cng_decoder_.get();
+}
+
+AudioDecoder* DecoderDatabase::GetDecoder(uint8_t rtp_payload_type) const {
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+ return info ? info->GetDecoder() : nullptr;
+}
+
+bool DecoderDatabase::IsComfortNoise(uint8_t rtp_payload_type) const {
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+ return info && info->IsComfortNoise();
+}
+
+bool DecoderDatabase::IsDtmf(uint8_t rtp_payload_type) const {
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+ return info && info->IsDtmf();
+}
+
+bool DecoderDatabase::IsRed(uint8_t rtp_payload_type) const {
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+ return info && info->IsRed();
+}
+
+int DecoderDatabase::CheckPayloadTypes(const PacketList& packet_list) const {
+ PacketList::const_iterator it;
+ for (it = packet_list.begin(); it != packet_list.end(); ++it) {
+ if (!GetDecoderInfo(it->payload_type)) {
+ // Payload type is not found.
+ RTC_LOG(LS_WARNING) << "CheckPayloadTypes: unknown RTP payload type "
+ << static_cast<int>(it->payload_type);
+ return kDecoderNotFound;
+ }
+ }
+ return kOK;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.h b/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.h
new file mode 100644
index 0000000000..8cf2019135
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
+#define MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/scoped_refptr.h"
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "modules/audio_coding/neteq/packet.h"
+
+namespace webrtc {
+
+class DecoderDatabase {
+ public:
+ enum DatabaseReturnCodes {
+ kOK = 0,
+ kInvalidRtpPayloadType = -1,
+ kCodecNotSupported = -2,
+ kInvalidSampleRate = -3,
+ kDecoderExists = -4,
+ kDecoderNotFound = -5,
+ kInvalidPointer = -6
+ };
+
+ // Class that stores decoder info in the database.
+ class DecoderInfo {
+ public:
+ DecoderInfo(const SdpAudioFormat& audio_format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ AudioDecoderFactory* factory,
+ absl::string_view codec_name);
+ explicit DecoderInfo(const SdpAudioFormat& audio_format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ AudioDecoderFactory* factory = nullptr);
+ DecoderInfo(DecoderInfo&&);
+ ~DecoderInfo();
+
+ // Get the AudioDecoder object, creating it first if necessary.
+ AudioDecoder* GetDecoder() const;
+
+ // Delete the AudioDecoder object, unless it's external. (This means we can
+ // always recreate it later if we need it.)
+ void DropDecoder() const { decoder_.reset(); }
+
+ int SampleRateHz() const {
+ if (IsDtmf()) {
+ // DTMF has a 1:1 mapping between clock rate and sample rate.
+ return audio_format_.clockrate_hz;
+ }
+ const AudioDecoder* decoder = GetDecoder();
+ RTC_DCHECK_EQ(1, !!decoder + !!cng_decoder_);
+ return decoder ? decoder->SampleRateHz() : cng_decoder_->sample_rate_hz;
+ }
+
+ const SdpAudioFormat& GetFormat() const { return audio_format_; }
+
+ // Returns true if the decoder's format is comfort noise.
+ bool IsComfortNoise() const {
+ RTC_DCHECK_EQ(!!cng_decoder_, subtype_ == Subtype::kComfortNoise);
+ return subtype_ == Subtype::kComfortNoise;
+ }
+
+ // Returns true if the decoder's format is DTMF.
+ bool IsDtmf() const { return subtype_ == Subtype::kDtmf; }
+
+ // Returns true if the decoder's format is RED.
+ bool IsRed() const { return subtype_ == Subtype::kRed; }
+
+ // Returns true if the decoder's format is named `name`.
+ bool IsType(absl::string_view name) const;
+
+ const std::string& get_name() const { return name_; }
+
+ private:
+ // TODO(ossu): `name_` is kept here while we retain the old external
+ // decoder interface. Remove this once using an
+ // AudioDecoderFactory has supplanted the old functionality.
+ const std::string name_;
+
+ const SdpAudioFormat audio_format_;
+ const absl::optional<AudioCodecPairId> codec_pair_id_;
+ AudioDecoderFactory* const factory_;
+ mutable std::unique_ptr<AudioDecoder> decoder_;
+
+ // Set iff this is a comfort noise decoder.
+ struct CngDecoder {
+ static absl::optional<CngDecoder> Create(const SdpAudioFormat& format);
+ int sample_rate_hz;
+ };
+ const absl::optional<CngDecoder> cng_decoder_;
+
+ enum class Subtype : int8_t { kNormal, kComfortNoise, kDtmf, kRed };
+
+ static Subtype SubtypeFromFormat(const SdpAudioFormat& format);
+
+ const Subtype subtype_;
+ };
+
+ // Maximum value for 8 bits, and an invalid RTP payload type (since it is
+ // only 7 bits).
+ static const uint8_t kRtpPayloadTypeError = 0xFF;
+
+ DecoderDatabase(
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory,
+ absl::optional<AudioCodecPairId> codec_pair_id);
+
+ virtual ~DecoderDatabase();
+
+ DecoderDatabase(const DecoderDatabase&) = delete;
+ DecoderDatabase& operator=(const DecoderDatabase&) = delete;
+
+ // Returns true if the database is empty.
+ virtual bool Empty() const;
+
+ // Returns the number of decoders registered in the database.
+ virtual int Size() const;
+
+ // Replaces the existing set of decoders with the given set. Returns the
+ // payload types that were reassigned or removed while doing so.
+ virtual std::vector<int> SetCodecs(
+ const std::map<int, SdpAudioFormat>& codecs);
+
+ // Registers a decoder for the given payload type. Returns kOK on success;
+ // otherwise an error code.
+ virtual int RegisterPayload(int rtp_payload_type,
+ const SdpAudioFormat& audio_format);
+
+ // Removes the entry for `rtp_payload_type` from the database.
+ // Returns kDecoderNotFound or kOK depending on the outcome of the operation.
+ virtual int Remove(uint8_t rtp_payload_type);
+
+ // Remove all entries.
+ virtual void RemoveAll();
+
+ // Returns a pointer to the DecoderInfo struct for `rtp_payload_type`. If
+ // no decoder is registered with that `rtp_payload_type`, NULL is returned.
+ virtual const DecoderInfo* GetDecoderInfo(uint8_t rtp_payload_type) const;
+
+ // Sets the active decoder to be `rtp_payload_type`. If this call results in a
+ // change of active decoder, `new_decoder` is set to true. The previous active
+ // decoder's AudioDecoder object is deleted.
+ virtual int SetActiveDecoder(uint8_t rtp_payload_type, bool* new_decoder);
+
+ // Returns the current active decoder, or NULL if no active decoder exists.
+ virtual AudioDecoder* GetActiveDecoder() const;
+
+ // Sets the active comfort noise decoder to be `rtp_payload_type`. If this
+ // call results in a change of active comfort noise decoder, the previous
+ // active decoder's AudioDecoder object is deleted.
+ virtual int SetActiveCngDecoder(uint8_t rtp_payload_type);
+
+ // Returns the current active comfort noise decoder, or NULL if no active
+ // comfort noise decoder exists.
+ virtual ComfortNoiseDecoder* GetActiveCngDecoder() const;
+
+ // The following are utility methods: they will look up DecoderInfo through
+ // GetDecoderInfo and call the respective method on that info object, if it
+ // exists.
+
+ // Returns a pointer to the AudioDecoder object associated with
+ // `rtp_payload_type`, or NULL if none is registered. If the AudioDecoder
+ // object does not exist for that decoder, the object is created.
+ AudioDecoder* GetDecoder(uint8_t rtp_payload_type) const;
+
+ // Returns true if `rtp_payload_type` is registered as comfort noise.
+ bool IsComfortNoise(uint8_t rtp_payload_type) const;
+
+ // Returns true if `rtp_payload_type` is registered as DTMF.
+ bool IsDtmf(uint8_t rtp_payload_type) const;
+
+ // Returns true if `rtp_payload_type` is registered as RED.
+ bool IsRed(uint8_t rtp_payload_type) const;
+
+ // Returns kOK if all packets in `packet_list` carry payload types that are
+ // registered in the database. Otherwise, returns kDecoderNotFound.
+ int CheckPayloadTypes(const PacketList& packet_list) const;
+
+ private:
+ typedef std::map<uint8_t, DecoderInfo> DecoderMap;
+
+ DecoderMap decoders_;
+ int active_decoder_type_;
+ int active_cng_decoder_type_;
+ mutable std::unique_ptr<ComfortNoiseDecoder> active_cng_decoder_;
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ const absl::optional<AudioCodecPairId> codec_pair_id_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
new file mode 100644
index 0000000000..445c21924b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+
+#include <stdlib.h>
+
+#include <string>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder.h"
+#include "test/mock_audio_decoder_factory.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+TEST(DecoderDatabase, CreateAndDestroy) {
+ DecoderDatabase db(rtc::make_ref_counted<MockAudioDecoderFactory>(),
+ absl::nullopt);
+ EXPECT_EQ(0, db.Size());
+ EXPECT_TRUE(db.Empty());
+}
+
+TEST(DecoderDatabase, InsertAndRemove) {
+ auto factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ DecoderDatabase db(factory, absl::nullopt);
+ const uint8_t kPayloadType = 0;
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadType, SdpAudioFormat(kCodecName, 8000, 1)));
+ EXPECT_EQ(1, db.Size());
+ EXPECT_FALSE(db.Empty());
+ EXPECT_EQ(DecoderDatabase::kOK, db.Remove(kPayloadType));
+ EXPECT_EQ(0, db.Size());
+ EXPECT_TRUE(db.Empty());
+}
+
+TEST(DecoderDatabase, InsertAndRemoveAll) {
+ auto factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ DecoderDatabase db(factory, absl::nullopt);
+ const std::string kCodecName1 = "Robert\'); DROP TABLE Students;";
+ const std::string kCodecName2 = "https://xkcd.com/327/";
+ EXPECT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(0, SdpAudioFormat(kCodecName1, 8000, 1)));
+ EXPECT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(1, SdpAudioFormat(kCodecName2, 8000, 1)));
+ EXPECT_EQ(2, db.Size());
+ EXPECT_FALSE(db.Empty());
+ db.RemoveAll();
+ EXPECT_EQ(0, db.Size());
+ EXPECT_TRUE(db.Empty());
+}
+
+TEST(DecoderDatabase, GetDecoderInfo) {
+ auto factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ auto* decoder = new MockAudioDecoder;
+ EXPECT_CALL(*factory, MakeAudioDecoderMock(_, _, _))
+ .WillOnce(Invoke([decoder](const SdpAudioFormat& format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ std::unique_ptr<AudioDecoder>* dec) {
+ EXPECT_EQ("pcmu", format.name);
+ dec->reset(decoder);
+ }));
+ DecoderDatabase db(factory, absl::nullopt);
+ const uint8_t kPayloadType = 0;
+ const std::string kCodecName = "pcmu";
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadType, SdpAudioFormat(kCodecName, 8000, 1)));
+ const DecoderDatabase::DecoderInfo* info;
+ info = db.GetDecoderInfo(kPayloadType);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_TRUE(info->IsType("pcmu"));
+ EXPECT_EQ(kCodecName, info->get_name());
+ EXPECT_EQ(decoder, db.GetDecoder(kPayloadType));
+ info = db.GetDecoderInfo(kPayloadType + 1); // Other payload type.
+ EXPECT_TRUE(info == NULL); // Should not be found.
+}
+
+TEST(DecoderDatabase, GetDecoder) {
+ DecoderDatabase db(CreateBuiltinAudioDecoderFactory(), absl::nullopt);
+ const uint8_t kPayloadType = 0;
+ EXPECT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadType, SdpAudioFormat("l16", 8000, 1)));
+ AudioDecoder* dec = db.GetDecoder(kPayloadType);
+ ASSERT_TRUE(dec != NULL);
+}
+
+TEST(DecoderDatabase, TypeTests) {
+ auto factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ DecoderDatabase db(factory, absl::nullopt);
+ const uint8_t kPayloadTypePcmU = 0;
+ const uint8_t kPayloadTypeCng = 13;
+ const uint8_t kPayloadTypeDtmf = 100;
+ const uint8_t kPayloadTypeRed = 101;
+ const uint8_t kPayloadNotUsed = 102;
+ // Load into database.
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypePcmU, SdpAudioFormat("pcmu", 8000, 1)));
+ EXPECT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypeCng, SdpAudioFormat("cn", 8000, 1)));
+ EXPECT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypeDtmf,
+ SdpAudioFormat("telephone-event", 8000, 1)));
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypeRed, SdpAudioFormat("red", 8000, 1)));
+ EXPECT_EQ(4, db.Size());
+ // Test.
+ EXPECT_FALSE(db.IsComfortNoise(kPayloadNotUsed));
+ EXPECT_FALSE(db.IsDtmf(kPayloadNotUsed));
+ EXPECT_FALSE(db.IsRed(kPayloadNotUsed));
+ EXPECT_FALSE(db.IsComfortNoise(kPayloadTypePcmU));
+ EXPECT_FALSE(db.IsDtmf(kPayloadTypePcmU));
+ EXPECT_FALSE(db.IsRed(kPayloadTypePcmU));
+ EXPECT_TRUE(db.IsComfortNoise(kPayloadTypeCng));
+ EXPECT_TRUE(db.IsDtmf(kPayloadTypeDtmf));
+ EXPECT_TRUE(db.IsRed(kPayloadTypeRed));
+}
+
+TEST(DecoderDatabase, CheckPayloadTypes) {
+ constexpr int kNumPayloads = 10;
+ auto factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ DecoderDatabase db(factory, absl::nullopt);
+ // Load a number of payloads into the database. Payload types are 0, 1, ...,
+ // while the decoder type is the same for all payload types (this does not
+ // matter for the test).
+ for (uint8_t payload_type = 0; payload_type < kNumPayloads; ++payload_type) {
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(payload_type, SdpAudioFormat("pcmu", 8000, 1)));
+ }
+ PacketList packet_list;
+ for (int i = 0; i < kNumPayloads + 1; ++i) {
+ // Create packet with payload type `i`. The last packet will have a payload
+ // type that is not registered in the decoder database.
+ Packet packet;
+ packet.payload_type = i;
+ packet_list.push_back(std::move(packet));
+ }
+
+ // Expect to return false, since the last packet is of an unknown type.
+ EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
+ db.CheckPayloadTypes(packet_list));
+
+ packet_list.pop_back(); // Remove the unknown one.
+
+ EXPECT_EQ(DecoderDatabase::kOK, db.CheckPayloadTypes(packet_list));
+
+ // Delete all packets.
+ PacketList::iterator it = packet_list.begin();
+ while (it != packet_list.end()) {
+ it = packet_list.erase(it);
+ }
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
+// Test the methods for setting and getting active speech and CNG decoders.
+TEST(DecoderDatabase, IF_ISAC(ActiveDecoders)) {
+ DecoderDatabase db(CreateBuiltinAudioDecoderFactory(), absl::nullopt);
+ // Load payload types.
+ ASSERT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(0, SdpAudioFormat("pcmu", 8000, 1)));
+ ASSERT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(103, SdpAudioFormat("isac", 16000, 1)));
+ ASSERT_EQ(DecoderDatabase::kOK,
+ db.RegisterPayload(13, SdpAudioFormat("cn", 8000, 1)));
+ // Verify that no decoders are active from the start.
+ EXPECT_EQ(NULL, db.GetActiveDecoder());
+ EXPECT_EQ(NULL, db.GetActiveCngDecoder());
+
+ // Set active speech codec.
+ bool changed; // Should be true when the active decoder changed.
+ EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveDecoder(0, &changed));
+ EXPECT_TRUE(changed);
+ AudioDecoder* decoder = db.GetActiveDecoder();
+ ASSERT_FALSE(decoder == NULL); // Should get a decoder here.
+
+ // Set the same again. Expect no change.
+ EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveDecoder(0, &changed));
+ EXPECT_FALSE(changed);
+ decoder = db.GetActiveDecoder();
+ ASSERT_FALSE(decoder == NULL); // Should get a decoder here.
+
+ // Change active decoder.
+ EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveDecoder(103, &changed));
+ EXPECT_TRUE(changed);
+ decoder = db.GetActiveDecoder();
+ ASSERT_FALSE(decoder == NULL); // Should get a decoder here.
+
+ // Remove the active decoder, and verify that the active becomes NULL.
+ EXPECT_EQ(DecoderDatabase::kOK, db.Remove(103));
+ EXPECT_EQ(NULL, db.GetActiveDecoder());
+
+ // Set active CNG codec.
+ EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveCngDecoder(13));
+ ComfortNoiseDecoder* cng = db.GetActiveCngDecoder();
+ ASSERT_FALSE(cng == NULL); // Should get a decoder here.
+
+ // Remove the active CNG decoder, and verify that the active becomes NULL.
+ EXPECT_EQ(DecoderDatabase::kOK, db.Remove(13));
+ EXPECT_EQ(NULL, db.GetActiveCngDecoder());
+
+ // Try to set non-existing codecs as active.
+ EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
+ db.SetActiveDecoder(17, &changed));
+ EXPECT_EQ(DecoderDatabase::kDecoderNotFound, db.SetActiveCngDecoder(17));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.cc b/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.cc
new file mode 100644
index 0000000000..487450fe0f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+
+#include <utility>
+
+#include "modules/audio_coding/neteq/neteq_impl.h"
+
+namespace webrtc {
+
+DefaultNetEqFactory::DefaultNetEqFactory() = default;
+DefaultNetEqFactory::~DefaultNetEqFactory() = default;
+
+std::unique_ptr<NetEq> DefaultNetEqFactory::CreateNetEq(
+ const NetEq::Config& config,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory,
+ Clock* clock) const {
+ return std::make_unique<NetEqImpl>(
+ config, NetEqImpl::Dependencies(config, clock, decoder_factory,
+ controller_factory_));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.h b/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.h
new file mode 100644
index 0000000000..24d2bae419
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/default_neteq_factory.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DEFAULT_NETEQ_FACTORY_H_
+#define MODULES_AUDIO_CODING_NETEQ_DEFAULT_NETEQ_FACTORY_H_
+
+#include <memory>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/neteq/default_neteq_controller_factory.h"
+#include "api/neteq/neteq_factory.h"
+#include "api/scoped_refptr.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class DefaultNetEqFactory : public NetEqFactory {
+ public:
+ DefaultNetEqFactory();
+ ~DefaultNetEqFactory() override;
+ DefaultNetEqFactory(const DefaultNetEqFactory&) = delete;
+ DefaultNetEqFactory& operator=(const DefaultNetEqFactory&) = delete;
+
+ std::unique_ptr<NetEq> CreateNetEq(
+ const NetEq::Config& config,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory,
+ Clock* clock) const override;
+
+ private:
+ const DefaultNetEqControllerFactory controller_factory_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DEFAULT_NETEQ_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.cc b/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.cc
new file mode 100644
index 0000000000..bf3a0f18a1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.cc
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/delay_manager.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <string>
+
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kMinBaseMinimumDelayMs = 0;
+constexpr int kMaxBaseMinimumDelayMs = 10000;
+constexpr int kStartDelayMs = 80;
+
+std::unique_ptr<ReorderOptimizer> MaybeCreateReorderOptimizer(
+ const DelayManager::Config& config) {
+ if (!config.use_reorder_optimizer) {
+ return nullptr;
+ }
+ return std::make_unique<ReorderOptimizer>(
+ (1 << 15) * config.reorder_forget_factor, config.ms_per_loss_percent,
+ config.start_forget_weight);
+}
+
+} // namespace
+
+DelayManager::Config::Config() {
+ StructParametersParser::Create( //
+ "quantile", &quantile, //
+ "forget_factor", &forget_factor, //
+ "start_forget_weight", &start_forget_weight, //
+ "resample_interval_ms", &resample_interval_ms, //
+ "use_reorder_optimizer", &use_reorder_optimizer, //
+ "reorder_forget_factor", &reorder_forget_factor, //
+ "ms_per_loss_percent", &ms_per_loss_percent)
+ ->Parse(webrtc::field_trial::FindFullName(
+ "WebRTC-Audio-NetEqDelayManagerConfig"));
+}
+
+void DelayManager::Config::Log() {
+ RTC_LOG(LS_INFO) << "Delay manager config:"
+ " quantile="
+ << quantile << " forget_factor=" << forget_factor
+ << " start_forget_weight=" << start_forget_weight.value_or(0)
+ << " resample_interval_ms="
+ << resample_interval_ms.value_or(0)
+ << " use_reorder_optimizer=" << use_reorder_optimizer
+ << " reorder_forget_factor=" << reorder_forget_factor
+ << " ms_per_loss_percent=" << ms_per_loss_percent;
+}
+
+DelayManager::DelayManager(const Config& config, const TickTimer* tick_timer)
+ : max_packets_in_buffer_(config.max_packets_in_buffer),
+ underrun_optimizer_(tick_timer,
+ (1 << 30) * config.quantile,
+ (1 << 15) * config.forget_factor,
+ config.start_forget_weight,
+ config.resample_interval_ms),
+ reorder_optimizer_(MaybeCreateReorderOptimizer(config)),
+ base_minimum_delay_ms_(config.base_minimum_delay_ms),
+ effective_minimum_delay_ms_(config.base_minimum_delay_ms),
+ minimum_delay_ms_(0),
+ maximum_delay_ms_(0),
+ target_level_ms_(kStartDelayMs) {
+ RTC_DCHECK_GE(base_minimum_delay_ms_, 0);
+
+ Reset();
+}
+
+DelayManager::~DelayManager() {}
+
+void DelayManager::Update(int arrival_delay_ms, bool reordered) {
+ if (!reorder_optimizer_ || !reordered) {
+ underrun_optimizer_.Update(arrival_delay_ms);
+ }
+ target_level_ms_ =
+ underrun_optimizer_.GetOptimalDelayMs().value_or(kStartDelayMs);
+ if (reorder_optimizer_) {
+ reorder_optimizer_->Update(arrival_delay_ms, reordered, target_level_ms_);
+ target_level_ms_ = std::max(
+ target_level_ms_, reorder_optimizer_->GetOptimalDelayMs().value_or(0));
+ }
+ unlimited_target_level_ms_ = target_level_ms_;
+ target_level_ms_ = std::max(target_level_ms_, effective_minimum_delay_ms_);
+ if (maximum_delay_ms_ > 0) {
+ target_level_ms_ = std::min(target_level_ms_, maximum_delay_ms_);
+ }
+ if (packet_len_ms_ > 0) {
+ // Limit to 75% of maximum buffer size.
+ target_level_ms_ = std::min(
+ target_level_ms_, 3 * max_packets_in_buffer_ * packet_len_ms_ / 4);
+ }
+}
+
+int DelayManager::SetPacketAudioLength(int length_ms) {
+ if (length_ms <= 0) {
+ RTC_LOG_F(LS_ERROR) << "length_ms = " << length_ms;
+ return -1;
+ }
+ packet_len_ms_ = length_ms;
+ return 0;
+}
+
+void DelayManager::Reset() {
+ packet_len_ms_ = 0;
+ underrun_optimizer_.Reset();
+ target_level_ms_ = kStartDelayMs;
+ if (reorder_optimizer_) {
+ reorder_optimizer_->Reset();
+ }
+}
+
+int DelayManager::TargetDelayMs() const {
+ return target_level_ms_;
+}
+
+int DelayManager::UnlimitedTargetLevelMs() const {
+ return unlimited_target_level_ms_;
+}
+
+bool DelayManager::IsValidMinimumDelay(int delay_ms) const {
+ return 0 <= delay_ms && delay_ms <= MinimumDelayUpperBound();
+}
+
+bool DelayManager::IsValidBaseMinimumDelay(int delay_ms) const {
+ return kMinBaseMinimumDelayMs <= delay_ms &&
+ delay_ms <= kMaxBaseMinimumDelayMs;
+}
+
+bool DelayManager::SetMinimumDelay(int delay_ms) {
+ if (!IsValidMinimumDelay(delay_ms)) {
+ return false;
+ }
+
+ minimum_delay_ms_ = delay_ms;
+ UpdateEffectiveMinimumDelay();
+ return true;
+}
+
+bool DelayManager::SetMaximumDelay(int delay_ms) {
+ // If `delay_ms` is zero then it unsets the maximum delay and target level is
+ // unconstrained by maximum delay.
+ if (delay_ms != 0 && delay_ms < minimum_delay_ms_) {
+ // Maximum delay shouldn't be less than minimum delay or less than a packet.
+ return false;
+ }
+
+ maximum_delay_ms_ = delay_ms;
+ UpdateEffectiveMinimumDelay();
+ return true;
+}
+
+bool DelayManager::SetBaseMinimumDelay(int delay_ms) {
+ if (!IsValidBaseMinimumDelay(delay_ms)) {
+ return false;
+ }
+
+ base_minimum_delay_ms_ = delay_ms;
+ UpdateEffectiveMinimumDelay();
+ return true;
+}
+
+int DelayManager::GetBaseMinimumDelay() const {
+ return base_minimum_delay_ms_;
+}
+
+void DelayManager::UpdateEffectiveMinimumDelay() {
+ // Clamp `base_minimum_delay_ms_` into the range which can be effectively
+ // used.
+ const int base_minimum_delay_ms =
+ rtc::SafeClamp(base_minimum_delay_ms_, 0, MinimumDelayUpperBound());
+ effective_minimum_delay_ms_ =
+ std::max(minimum_delay_ms_, base_minimum_delay_ms);
+}
+
+int DelayManager::MinimumDelayUpperBound() const {
+ // Choose the lowest possible bound discarding 0 cases which mean the value
+ // is not set and unconstrained.
+ int q75 = max_packets_in_buffer_ * packet_len_ms_ * 3 / 4;
+ q75 = q75 > 0 ? q75 : kMaxBaseMinimumDelayMs;
+ const int maximum_delay_ms =
+ maximum_delay_ms_ > 0 ? maximum_delay_ms_ : kMaxBaseMinimumDelayMs;
+ return std::min(maximum_delay_ms, q75);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.h b/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.h
new file mode 100644
index 0000000000..a333681535
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DELAY_MANAGER_H_
+#define MODULES_AUDIO_CODING_NETEQ_DELAY_MANAGER_H_
+
+#include <string.h> // Provide access to size_t.
+
+#include <deque>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/histogram.h"
+#include "modules/audio_coding/neteq/reorder_optimizer.h"
+#include "modules/audio_coding/neteq/underrun_optimizer.h"
+
+namespace webrtc {
+
+class DelayManager {
+ public:
+ struct Config {
+ Config();
+ void Log();
+
+ // Options that can be configured via field trial.
+ double quantile = 0.95;
+ double forget_factor = 0.983;
+ absl::optional<double> start_forget_weight = 2;
+ absl::optional<int> resample_interval_ms = 500;
+
+ bool use_reorder_optimizer = true;
+ double reorder_forget_factor = 0.9993;
+ int ms_per_loss_percent = 20;
+
+ // Options that are externally populated.
+ int max_packets_in_buffer = 200;
+ int base_minimum_delay_ms = 0;
+ };
+
+ DelayManager(const Config& config, const TickTimer* tick_timer);
+
+ virtual ~DelayManager();
+
+ DelayManager(const DelayManager&) = delete;
+ DelayManager& operator=(const DelayManager&) = delete;
+
+ // Updates the delay manager that a new packet arrived with delay
+ // `arrival_delay_ms`. This updates the statistics and a new target buffer
+ // level is calculated. The `reordered` flag indicates if the packet was
+ // reordered.
+ virtual void Update(int arrival_delay_ms, bool reordered);
+
+ // Resets all state.
+ virtual void Reset();
+
+ // Gets the target buffer level in milliseconds. If a minimum or maximum delay
+ // has been set, the target delay reported here also respects the configured
+ // min/max delay.
+ virtual int TargetDelayMs() const;
+
+ // Reports the target delay that would be used if no minimum/maximum delay
+ // would be set.
+ virtual int UnlimitedTargetLevelMs() const;
+
+ // Notifies the DelayManager of how much audio data is carried in each packet.
+ virtual int SetPacketAudioLength(int length_ms);
+
+ // Accessors and mutators.
+ // Assuming `delay` is in valid range.
+ virtual bool SetMinimumDelay(int delay_ms);
+ virtual bool SetMaximumDelay(int delay_ms);
+ virtual bool SetBaseMinimumDelay(int delay_ms);
+ virtual int GetBaseMinimumDelay() const;
+
+ // These accessors are only intended for testing purposes.
+ int effective_minimum_delay_ms_for_test() const {
+ return effective_minimum_delay_ms_;
+ }
+
+ private:
+ // Provides value which minimum delay can't exceed based on current buffer
+ // size and given `maximum_delay_ms_`. Lower bound is a constant 0.
+ int MinimumDelayUpperBound() const;
+
+ // Updates `effective_minimum_delay_ms_` delay based on current
+ // `minimum_delay_ms_`, `base_minimum_delay_ms_` and `maximum_delay_ms_`
+ // and buffer size.
+ void UpdateEffectiveMinimumDelay();
+
+ // Makes sure that `delay_ms` is less than maximum delay, if any maximum
+ // is set. Also, if possible check `delay_ms` to be less than 75% of
+ // `max_packets_in_buffer_`.
+ bool IsValidMinimumDelay(int delay_ms) const;
+
+ bool IsValidBaseMinimumDelay(int delay_ms) const;
+
+ // TODO(jakobi): set maximum buffer delay instead of number of packets.
+ const int max_packets_in_buffer_;
+ UnderrunOptimizer underrun_optimizer_;
+ std::unique_ptr<ReorderOptimizer> reorder_optimizer_;
+
+ int base_minimum_delay_ms_;
+ int effective_minimum_delay_ms_; // Used as lower bound for target delay.
+ int minimum_delay_ms_; // Externally set minimum delay.
+ int maximum_delay_ms_; // Externally set maximum allowed delay.
+
+ int packet_len_ms_ = 0;
+ int target_level_ms_ = 0; // Currently preferred buffer level.
+ int unlimited_target_level_ms_ = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DELAY_MANAGER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager_unittest.cc
new file mode 100644
index 0000000000..da5f53188c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager_unittest.cc
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DelayManager class.
+
+#include "modules/audio_coding/neteq/delay_manager.h"
+
+#include <math.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/histogram.h"
+#include "modules/audio_coding/neteq/mock/mock_histogram.h"
+#include "modules/audio_coding/neteq/mock/mock_statistics_calculator.h"
+#include "rtc_base/checks.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kMaxNumberOfPackets = 200;
+constexpr int kTimeStepMs = 10;
+constexpr int kFrameSizeMs = 20;
+constexpr int kMaxBufferSizeMs = kMaxNumberOfPackets * kFrameSizeMs;
+
+} // namespace
+
+class DelayManagerTest : public ::testing::Test {
+ protected:
+ DelayManagerTest();
+ virtual void SetUp();
+ void Update(int delay);
+ void IncreaseTime(int inc_ms);
+
+ TickTimer tick_timer_;
+ DelayManager dm_;
+};
+
+DelayManagerTest::DelayManagerTest()
+ : dm_(DelayManager::Config(), &tick_timer_) {}
+
+void DelayManagerTest::SetUp() {
+ dm_.SetPacketAudioLength(kFrameSizeMs);
+}
+
+void DelayManagerTest::Update(int delay) {
+ dm_.Update(delay, false);
+}
+
+void DelayManagerTest::IncreaseTime(int inc_ms) {
+ for (int t = 0; t < inc_ms; t += kTimeStepMs) {
+ tick_timer_.Increment();
+ }
+}
+
+TEST_F(DelayManagerTest, CreateAndDestroy) {
+ // Nothing to do here. The test fixture creates and destroys the DelayManager
+ // object.
+}
+
+TEST_F(DelayManagerTest, UpdateNormal) {
+ for (int i = 0; i < 50; ++i) {
+ Update(0);
+ IncreaseTime(kFrameSizeMs);
+ }
+ EXPECT_EQ(20, dm_.TargetDelayMs());
+}
+
+TEST_F(DelayManagerTest, MaxDelay) {
+ Update(0);
+ const int kMaxDelayMs = 60;
+ EXPECT_GT(dm_.TargetDelayMs(), kMaxDelayMs);
+ EXPECT_TRUE(dm_.SetMaximumDelay(kMaxDelayMs));
+ Update(0);
+ EXPECT_EQ(kMaxDelayMs, dm_.TargetDelayMs());
+}
+
+TEST_F(DelayManagerTest, MinDelay) {
+ Update(0);
+ int kMinDelayMs = 7 * kFrameSizeMs;
+ EXPECT_LT(dm_.TargetDelayMs(), kMinDelayMs);
+ dm_.SetMinimumDelay(kMinDelayMs);
+ IncreaseTime(kFrameSizeMs);
+ Update(0);
+ EXPECT_EQ(kMinDelayMs, dm_.TargetDelayMs());
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelayCheckValidRange) {
+ // Base minimum delay should be between [0, 10000] milliseconds.
+ EXPECT_FALSE(dm_.SetBaseMinimumDelay(-1));
+ EXPECT_FALSE(dm_.SetBaseMinimumDelay(10001));
+ EXPECT_EQ(dm_.GetBaseMinimumDelay(), 0);
+
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(7999));
+ EXPECT_EQ(dm_.GetBaseMinimumDelay(), 7999);
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelayLowerThanMinimumDelay) {
+ constexpr int kBaseMinimumDelayMs = 100;
+ constexpr int kMinimumDelayMs = 200;
+
+ // Base minimum delay sets lower bound on minimum. That is why when base
+ // minimum delay is lower than minimum delay we use minimum delay.
+ RTC_DCHECK_LT(kBaseMinimumDelayMs, kMinimumDelayMs);
+
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs));
+ EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs));
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMinimumDelayMs);
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanMinimumDelay) {
+ constexpr int kBaseMinimumDelayMs = 70;
+ constexpr int kMinimumDelayMs = 30;
+
+ // Base minimum delay sets lower bound on minimum. That is why when base
+ // minimum delay is greater than minimum delay we use base minimum delay.
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMinimumDelayMs);
+
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs));
+ EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs));
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMs);
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanBufferSize) {
+ constexpr int kBaseMinimumDelayMs = kMaxBufferSizeMs + 1;
+ constexpr int kMinimumDelayMs = 12;
+ constexpr int kMaximumDelayMs = 20;
+ constexpr int kMaxBufferSizeMsQ75 = 3 * kMaxBufferSizeMs / 4;
+
+ EXPECT_TRUE(dm_.SetMaximumDelay(kMaximumDelayMs));
+
+ // Base minimum delay is greater than minimum delay, that is why we clamp
+ // it to current the highest possible value which is maximum delay.
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMinimumDelayMs);
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMaxBufferSizeMs);
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMaximumDelayMs);
+ RTC_DCHECK_LT(kMaximumDelayMs, kMaxBufferSizeMsQ75);
+
+ EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs));
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs));
+
+ // Unset maximum value.
+ EXPECT_TRUE(dm_.SetMaximumDelay(0));
+
+ // With maximum value unset, the highest possible value now is 75% of
+ // currently possible maximum buffer size.
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMaxBufferSizeMsQ75);
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanMaximumDelay) {
+ constexpr int kMaximumDelayMs = 400;
+ constexpr int kBaseMinimumDelayMs = kMaximumDelayMs + 1;
+ constexpr int kMinimumDelayMs = 20;
+
+ // Base minimum delay is greater than minimum delay, that is why we clamp
+ // it to current the highest possible value which is kMaximumDelayMs.
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMinimumDelayMs);
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMaximumDelayMs);
+ RTC_DCHECK_LT(kMaximumDelayMs, kMaxBufferSizeMs);
+
+ EXPECT_TRUE(dm_.SetMaximumDelay(kMaximumDelayMs));
+ EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs));
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs));
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMaximumDelayMs);
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelayLowerThanMaxSize) {
+ constexpr int kMaximumDelayMs = 400;
+ constexpr int kBaseMinimumDelayMs = kMaximumDelayMs - 1;
+ constexpr int kMinimumDelayMs = 20;
+
+ // Base minimum delay is greater than minimum delay, and lower than maximum
+ // delays that is why it is used.
+ RTC_DCHECK_GT(kBaseMinimumDelayMs, kMinimumDelayMs);
+ RTC_DCHECK_LT(kBaseMinimumDelayMs, kMaximumDelayMs);
+
+ EXPECT_TRUE(dm_.SetMaximumDelay(kMaximumDelayMs));
+ EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs));
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs));
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMs);
+}
+
+TEST_F(DelayManagerTest, MinimumDelayMemorization) {
+ // Check that when we increase base minimum delay to value higher than
+ // minimum delay then minimum delay is still memorized. This allows to
+ // restore effective minimum delay to memorized minimum delay value when we
+ // decrease base minimum delay.
+ constexpr int kBaseMinimumDelayMsLow = 10;
+ constexpr int kMinimumDelayMs = 20;
+ constexpr int kBaseMinimumDelayMsHigh = 30;
+
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMsLow));
+ EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs));
+ // Minimum delay is used as it is higher than base minimum delay.
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMinimumDelayMs);
+
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMsHigh));
+ // Base minimum delay is used as it is now higher than minimum delay.
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMsHigh);
+
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMsLow));
+ // Check that minimum delay is memorized and is used again.
+ EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMinimumDelayMs);
+}
+
+TEST_F(DelayManagerTest, BaseMinimumDelay) {
+ // First packet arrival.
+ Update(0);
+
+ constexpr int kBaseMinimumDelayMs = 7 * kFrameSizeMs;
+ EXPECT_LT(dm_.TargetDelayMs(), kBaseMinimumDelayMs);
+ EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs));
+ EXPECT_EQ(dm_.GetBaseMinimumDelay(), kBaseMinimumDelayMs);
+
+ IncreaseTime(kFrameSizeMs);
+ Update(0);
+ EXPECT_EQ(dm_.GetBaseMinimumDelay(), kBaseMinimumDelayMs);
+ EXPECT_EQ(kBaseMinimumDelayMs, dm_.TargetDelayMs());
+}
+
+TEST_F(DelayManagerTest, Failures) {
+ // Wrong packet size.
+ EXPECT_EQ(-1, dm_.SetPacketAudioLength(0));
+ EXPECT_EQ(-1, dm_.SetPacketAudioLength(-1));
+
+ // Minimum delay higher than a maximum delay is not accepted.
+ EXPECT_TRUE(dm_.SetMaximumDelay(20));
+ EXPECT_FALSE(dm_.SetMinimumDelay(40));
+
+ // Maximum delay less than minimum delay is not accepted.
+ EXPECT_TRUE(dm_.SetMaximumDelay(100));
+ EXPECT_TRUE(dm_.SetMinimumDelay(80));
+ EXPECT_FALSE(dm_.SetMaximumDelay(60));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.cc b/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.cc
new file mode 100644
index 0000000000..a979f94214
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.cc
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dsp_helper.h"
+
+#include <string.h> // Access to memset.
+
+#include <algorithm> // Access to min, max.
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+// Table of constants used in method DspHelper::ParabolicFit().
+const int16_t DspHelper::kParabolaCoefficients[17][3] = {
+ {120, 32, 64}, {140, 44, 75}, {150, 50, 80}, {160, 57, 85},
+ {180, 72, 96}, {200, 89, 107}, {210, 98, 112}, {220, 108, 117},
+ {240, 128, 128}, {260, 150, 139}, {270, 162, 144}, {280, 174, 149},
+ {300, 200, 160}, {320, 228, 171}, {330, 242, 176}, {340, 257, 181},
+ {360, 288, 192}};
+
+// Filter coefficients used when downsampling from the indicated sample rates
+// (8, 16, 32, 48 kHz) to 4 kHz. Coefficients are in Q12. The corresponding Q0
+// values are provided in the comments before each array.
+
+// Q0 values: {0.3, 0.4, 0.3}.
+const int16_t DspHelper::kDownsample8kHzTbl[3] = {1229, 1638, 1229};
+
+// Q0 values: {0.15, 0.2, 0.3, 0.2, 0.15}.
+const int16_t DspHelper::kDownsample16kHzTbl[5] = {614, 819, 1229, 819, 614};
+
+// Q0 values: {0.1425, 0.1251, 0.1525, 0.1628, 0.1525, 0.1251, 0.1425}.
+const int16_t DspHelper::kDownsample32kHzTbl[7] = {584, 512, 625, 667,
+ 625, 512, 584};
+
+// Q0 values: {0.2487, 0.0952, 0.1042, 0.1074, 0.1042, 0.0952, 0.2487}.
+const int16_t DspHelper::kDownsample48kHzTbl[7] = {1019, 390, 427, 440,
+ 427, 390, 1019};
+
+int DspHelper::RampSignal(const int16_t* input,
+ size_t length,
+ int factor,
+ int increment,
+ int16_t* output) {
+ int factor_q20 = (factor << 6) + 32;
+ // TODO(hlundin): Add 32 to factor_q20 when converting back to Q14?
+ for (size_t i = 0; i < length; ++i) {
+ output[i] = (factor * input[i] + 8192) >> 14;
+ factor_q20 += increment;
+ factor_q20 = std::max(factor_q20, 0); // Never go negative.
+ factor = std::min(factor_q20 >> 6, 16384);
+ }
+ return factor;
+}
+
+int DspHelper::RampSignal(int16_t* signal,
+ size_t length,
+ int factor,
+ int increment) {
+ return RampSignal(signal, length, factor, increment, signal);
+}
+
+int DspHelper::RampSignal(AudioVector* signal,
+ size_t start_index,
+ size_t length,
+ int factor,
+ int increment) {
+ int factor_q20 = (factor << 6) + 32;
+ // TODO(hlundin): Add 32 to factor_q20 when converting back to Q14?
+ for (size_t i = start_index; i < start_index + length; ++i) {
+ (*signal)[i] = (factor * (*signal)[i] + 8192) >> 14;
+ factor_q20 += increment;
+ factor_q20 = std::max(factor_q20, 0); // Never go negative.
+ factor = std::min(factor_q20 >> 6, 16384);
+ }
+ return factor;
+}
+
+int DspHelper::RampSignal(AudioMultiVector* signal,
+ size_t start_index,
+ size_t length,
+ int factor,
+ int increment) {
+ RTC_DCHECK_LE(start_index + length, signal->Size());
+ if (start_index + length > signal->Size()) {
+ // Wrong parameters. Do nothing and return the scale factor unaltered.
+ return factor;
+ }
+ int end_factor = 0;
+ // Loop over the channels, starting at the same `factor` each time.
+ for (size_t channel = 0; channel < signal->Channels(); ++channel) {
+ end_factor =
+ RampSignal(&(*signal)[channel], start_index, length, factor, increment);
+ }
+ return end_factor;
+}
+
+void DspHelper::PeakDetection(int16_t* data,
+ size_t data_length,
+ size_t num_peaks,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value) {
+ size_t min_index = 0;
+ size_t max_index = 0;
+
+ for (size_t i = 0; i <= num_peaks - 1; i++) {
+ if (num_peaks == 1) {
+ // Single peak. The parabola fit assumes that an extra point is
+ // available; worst case it gets a zero on the high end of the signal.
+ // TODO(hlundin): This can potentially get much worse. It breaks the
+ // API contract, that the length of `data` is `data_length`.
+ data_length++;
+ }
+
+ peak_index[i] = WebRtcSpl_MaxIndexW16(data, data_length - 1);
+
+ if (i != num_peaks - 1) {
+ min_index = (peak_index[i] > 2) ? (peak_index[i] - 2) : 0;
+ max_index = std::min(data_length - 1, peak_index[i] + 2);
+ }
+
+ if ((peak_index[i] != 0) && (peak_index[i] != (data_length - 2))) {
+ ParabolicFit(&data[peak_index[i] - 1], fs_mult, &peak_index[i],
+ &peak_value[i]);
+ } else {
+ if (peak_index[i] == data_length - 2) {
+ if (data[peak_index[i]] > data[peak_index[i] + 1]) {
+ ParabolicFit(&data[peak_index[i] - 1], fs_mult, &peak_index[i],
+ &peak_value[i]);
+ } else if (data[peak_index[i]] <= data[peak_index[i] + 1]) {
+ // Linear approximation.
+ peak_value[i] = (data[peak_index[i]] + data[peak_index[i] + 1]) >> 1;
+ peak_index[i] = (peak_index[i] * 2 + 1) * fs_mult;
+ }
+ } else {
+ peak_value[i] = data[peak_index[i]];
+ peak_index[i] = peak_index[i] * 2 * fs_mult;
+ }
+ }
+
+ if (i != num_peaks - 1) {
+ memset(&data[min_index], 0,
+ sizeof(data[0]) * (max_index - min_index + 1));
+ }
+ }
+}
+
+void DspHelper::ParabolicFit(int16_t* signal_points,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value) {
+ uint16_t fit_index[13];
+ if (fs_mult == 1) {
+ fit_index[0] = 0;
+ fit_index[1] = 8;
+ fit_index[2] = 16;
+ } else if (fs_mult == 2) {
+ fit_index[0] = 0;
+ fit_index[1] = 4;
+ fit_index[2] = 8;
+ fit_index[3] = 12;
+ fit_index[4] = 16;
+ } else if (fs_mult == 4) {
+ fit_index[0] = 0;
+ fit_index[1] = 2;
+ fit_index[2] = 4;
+ fit_index[3] = 6;
+ fit_index[4] = 8;
+ fit_index[5] = 10;
+ fit_index[6] = 12;
+ fit_index[7] = 14;
+ fit_index[8] = 16;
+ } else {
+ fit_index[0] = 0;
+ fit_index[1] = 1;
+ fit_index[2] = 3;
+ fit_index[3] = 4;
+ fit_index[4] = 5;
+ fit_index[5] = 7;
+ fit_index[6] = 8;
+ fit_index[7] = 9;
+ fit_index[8] = 11;
+ fit_index[9] = 12;
+ fit_index[10] = 13;
+ fit_index[11] = 15;
+ fit_index[12] = 16;
+ }
+
+ // num = -3 * signal_points[0] + 4 * signal_points[1] - signal_points[2];
+ // den = signal_points[0] - 2 * signal_points[1] + signal_points[2];
+ int32_t num =
+ (signal_points[0] * -3) + (signal_points[1] * 4) - signal_points[2];
+ int32_t den = signal_points[0] + (signal_points[1] * -2) + signal_points[2];
+ int32_t temp = num * 120;
+ int flag = 1;
+ int16_t stp = kParabolaCoefficients[fit_index[fs_mult]][0] -
+ kParabolaCoefficients[fit_index[fs_mult - 1]][0];
+ int16_t strt = (kParabolaCoefficients[fit_index[fs_mult]][0] +
+ kParabolaCoefficients[fit_index[fs_mult - 1]][0]) /
+ 2;
+ int16_t lmt;
+ if (temp < -den * strt) {
+ lmt = strt - stp;
+ while (flag) {
+ if ((flag == fs_mult) || (temp > -den * lmt)) {
+ *peak_value =
+ (den * kParabolaCoefficients[fit_index[fs_mult - flag]][1] +
+ num * kParabolaCoefficients[fit_index[fs_mult - flag]][2] +
+ signal_points[0] * 256) /
+ 256;
+ *peak_index = *peak_index * 2 * fs_mult - flag;
+ flag = 0;
+ } else {
+ flag++;
+ lmt -= stp;
+ }
+ }
+ } else if (temp > -den * (strt + stp)) {
+ lmt = strt + 2 * stp;
+ while (flag) {
+ if ((flag == fs_mult) || (temp < -den * lmt)) {
+ int32_t temp_term_1 =
+ den * kParabolaCoefficients[fit_index[fs_mult + flag]][1];
+ int32_t temp_term_2 =
+ num * kParabolaCoefficients[fit_index[fs_mult + flag]][2];
+ int32_t temp_term_3 = signal_points[0] * 256;
+ *peak_value = (temp_term_1 + temp_term_2 + temp_term_3) / 256;
+ *peak_index = *peak_index * 2 * fs_mult + flag;
+ flag = 0;
+ } else {
+ flag++;
+ lmt += stp;
+ }
+ }
+ } else {
+ *peak_value = signal_points[1];
+ *peak_index = *peak_index * 2 * fs_mult;
+ }
+}
+
+size_t DspHelper::MinDistortion(const int16_t* signal,
+ size_t min_lag,
+ size_t max_lag,
+ size_t length,
+ int32_t* distortion_value) {
+ size_t best_index = 0;
+ int32_t min_distortion = WEBRTC_SPL_WORD32_MAX;
+ for (size_t i = min_lag; i <= max_lag; i++) {
+ int32_t sum_diff = 0;
+ const int16_t* data1 = signal;
+ const int16_t* data2 = signal - i;
+ for (size_t j = 0; j < length; j++) {
+ sum_diff += WEBRTC_SPL_ABS_W32(data1[j] - data2[j]);
+ }
+ // Compare with previous minimum.
+ if (sum_diff < min_distortion) {
+ min_distortion = sum_diff;
+ best_index = i;
+ }
+ }
+ *distortion_value = min_distortion;
+ return best_index;
+}
+
+void DspHelper::CrossFade(const int16_t* input1,
+ const int16_t* input2,
+ size_t length,
+ int16_t* mix_factor,
+ int16_t factor_decrement,
+ int16_t* output) {
+ int16_t factor = *mix_factor;
+ int16_t complement_factor = 16384 - factor;
+ for (size_t i = 0; i < length; i++) {
+ output[i] =
+ (factor * input1[i] + complement_factor * input2[i] + 8192) >> 14;
+ factor -= factor_decrement;
+ complement_factor += factor_decrement;
+ }
+ *mix_factor = factor;
+}
+
+void DspHelper::UnmuteSignal(const int16_t* input,
+ size_t length,
+ int16_t* factor,
+ int increment,
+ int16_t* output) {
+ uint16_t factor_16b = *factor;
+ int32_t factor_32b = (static_cast<int32_t>(factor_16b) << 6) + 32;
+ for (size_t i = 0; i < length; i++) {
+ output[i] = (factor_16b * input[i] + 8192) >> 14;
+ factor_32b = std::max(factor_32b + increment, 0);
+ factor_16b = std::min(16384, factor_32b >> 6);
+ }
+ *factor = factor_16b;
+}
+
+void DspHelper::MuteSignal(int16_t* signal, int mute_slope, size_t length) {
+ int32_t factor = (16384 << 6) + 32;
+ for (size_t i = 0; i < length; i++) {
+ signal[i] = ((factor >> 6) * signal[i] + 8192) >> 14;
+ factor -= mute_slope;
+ }
+}
+
+int DspHelper::DownsampleTo4kHz(const int16_t* input,
+ size_t input_length,
+ size_t output_length,
+ int input_rate_hz,
+ bool compensate_delay,
+ int16_t* output) {
+ // Set filter parameters depending on input frequency.
+ // NOTE: The phase delay values are wrong compared to the true phase delay
+ // of the filters. However, the error is preserved (through the +1 term) for
+ // consistency.
+ const int16_t* filter_coefficients; // Filter coefficients.
+ size_t filter_length; // Number of coefficients.
+ size_t filter_delay; // Phase delay in samples.
+ int16_t factor; // Conversion rate (inFsHz / 8000).
+ switch (input_rate_hz) {
+ case 8000: {
+ filter_length = 3;
+ factor = 2;
+ filter_coefficients = kDownsample8kHzTbl;
+ filter_delay = 1 + 1;
+ break;
+ }
+ case 16000: {
+ filter_length = 5;
+ factor = 4;
+ filter_coefficients = kDownsample16kHzTbl;
+ filter_delay = 2 + 1;
+ break;
+ }
+ case 32000: {
+ filter_length = 7;
+ factor = 8;
+ filter_coefficients = kDownsample32kHzTbl;
+ filter_delay = 3 + 1;
+ break;
+ }
+ case 48000: {
+ filter_length = 7;
+ factor = 12;
+ filter_coefficients = kDownsample48kHzTbl;
+ filter_delay = 3 + 1;
+ break;
+ }
+ default: {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ }
+
+ if (!compensate_delay) {
+ // Disregard delay compensation.
+ filter_delay = 0;
+ }
+
+ // Returns -1 if input signal is too short; 0 otherwise.
+ return WebRtcSpl_DownsampleFast(
+ &input[filter_length - 1], input_length - filter_length + 1, output,
+ output_length, filter_coefficients, filter_length, factor, filter_delay);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.h b/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.h
new file mode 100644
index 0000000000..4aead7df18
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DSP_HELPER_H_
+#define MODULES_AUDIO_CODING_NETEQ_DSP_HELPER_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+namespace webrtc {
+
+// This class contains various signal processing functions, all implemented as
+// static methods.
+class DspHelper {
+ public:
+ // Filter coefficients used when downsampling from the indicated sample rates
+ // (8, 16, 32, 48 kHz) to 4 kHz. Coefficients are in Q12.
+ static const int16_t kDownsample8kHzTbl[3];
+ static const int16_t kDownsample16kHzTbl[5];
+ static const int16_t kDownsample32kHzTbl[7];
+ static const int16_t kDownsample48kHzTbl[7];
+
+ // Constants used to mute and unmute over 5 samples. The coefficients are
+ // in Q15.
+ static const int kMuteFactorStart8kHz = 27307;
+ static const int kMuteFactorIncrement8kHz = -5461;
+ static const int kUnmuteFactorStart8kHz = 5461;
+ static const int kUnmuteFactorIncrement8kHz = 5461;
+ static const int kMuteFactorStart16kHz = 29789;
+ static const int kMuteFactorIncrement16kHz = -2979;
+ static const int kUnmuteFactorStart16kHz = 2979;
+ static const int kUnmuteFactorIncrement16kHz = 2979;
+ static const int kMuteFactorStart32kHz = 31208;
+ static const int kMuteFactorIncrement32kHz = -1560;
+ static const int kUnmuteFactorStart32kHz = 1560;
+ static const int kUnmuteFactorIncrement32kHz = 1560;
+ static const int kMuteFactorStart48kHz = 31711;
+ static const int kMuteFactorIncrement48kHz = -1057;
+ static const int kUnmuteFactorStart48kHz = 1057;
+ static const int kUnmuteFactorIncrement48kHz = 1057;
+
+ // Multiplies the signal with a gradually changing factor.
+ // The first sample is multiplied with `factor` (in Q14). For each sample,
+ // `factor` is increased (additive) by the `increment` (in Q20), which can
+ // be negative. Returns the scale factor after the last increment.
+ static int RampSignal(const int16_t* input,
+ size_t length,
+ int factor,
+ int increment,
+ int16_t* output);
+
+ // Same as above, but with the samples of `signal` being modified in-place.
+ static int RampSignal(int16_t* signal,
+ size_t length,
+ int factor,
+ int increment);
+
+ // Same as above, but processes `length` samples from `signal`, starting at
+ // `start_index`.
+ static int RampSignal(AudioVector* signal,
+ size_t start_index,
+ size_t length,
+ int factor,
+ int increment);
+
+ // Same as above, but for an AudioMultiVector.
+ static int RampSignal(AudioMultiVector* signal,
+ size_t start_index,
+ size_t length,
+ int factor,
+ int increment);
+
+ // Peak detection with parabolic fit. Looks for `num_peaks` maxima in `data`,
+ // having length `data_length` and sample rate multiplier `fs_mult`. The peak
+ // locations and values are written to the arrays `peak_index` and
+ // `peak_value`, respectively. Both arrays must hold at least `num_peaks`
+ // elements.
+ static void PeakDetection(int16_t* data,
+ size_t data_length,
+ size_t num_peaks,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value);
+
+ // Estimates the height and location of a maximum. The three values in the
+ // array `signal_points` are used as basis for a parabolic fit, which is then
+ // used to find the maximum in an interpolated signal. The `signal_points` are
+ // assumed to be from a 4 kHz signal, while the maximum, written to
+ // `peak_index` and `peak_value` is given in the full sample rate, as
+ // indicated by the sample rate multiplier `fs_mult`.
+ static void ParabolicFit(int16_t* signal_points,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value);
+
+ // Calculates the sum-abs-diff for `signal` when compared to a displaced
+ // version of itself. Returns the displacement lag that results in the minimum
+ // distortion. The resulting distortion is written to `distortion_value`.
+ // The values of `min_lag` and `max_lag` are boundaries for the search.
+ static size_t MinDistortion(const int16_t* signal,
+ size_t min_lag,
+ size_t max_lag,
+ size_t length,
+ int32_t* distortion_value);
+
+ // Mixes `length` samples from `input1` and `input2` together and writes the
+ // result to `output`. The gain for `input1` starts at `mix_factor` (Q14) and
+ // is decreased by `factor_decrement` (Q14) for each sample. The gain for
+ // `input2` is the complement 16384 - mix_factor.
+ static void CrossFade(const int16_t* input1,
+ const int16_t* input2,
+ size_t length,
+ int16_t* mix_factor,
+ int16_t factor_decrement,
+ int16_t* output);
+
+ // Scales `input` with an increasing gain. Applies `factor` (Q14) to the first
+ // sample and increases the gain by `increment` (Q20) for each sample. The
+ // result is written to `output`. `length` samples are processed.
+ static void UnmuteSignal(const int16_t* input,
+ size_t length,
+ int16_t* factor,
+ int increment,
+ int16_t* output);
+
+ // Starts at unity gain and gradually fades out `signal`. For each sample,
+ // the gain is reduced by `mute_slope` (Q14). `length` samples are processed.
+ static void MuteSignal(int16_t* signal, int mute_slope, size_t length);
+
+ // Downsamples `input` from `sample_rate_hz` to 4 kHz sample rate. The input
+ // has `input_length` samples, and the method will write `output_length`
+ // samples to `output`. Compensates for the phase delay of the downsampling
+ // filters if `compensate_delay` is true. Returns -1 if the input is too short
+ // to produce `output_length` samples, otherwise 0.
+ static int DownsampleTo4kHz(const int16_t* input,
+ size_t input_length,
+ size_t output_length,
+ int input_rate_hz,
+ bool compensate_delay,
+ int16_t* output);
+
+ DspHelper(const DspHelper&) = delete;
+ DspHelper& operator=(const DspHelper&) = delete;
+
+ private:
+ // Table of constants used in method DspHelper::ParabolicFit().
+ static const int16_t kParabolaCoefficients[17][3];
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DSP_HELPER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper_unittest.cc
new file mode 100644
index 0000000000..09247417d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper_unittest.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dsp_helper.h"
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(DspHelper, RampSignalArray) {
+ static const int kLen = 100;
+ int16_t input[kLen];
+ int16_t output[kLen];
+ // Fill input with 1000.
+ for (int i = 0; i < kLen; ++i) {
+ input[i] = 1000;
+ }
+ int start_factor = 0;
+ // Ramp from 0 to 1 (in Q14) over the array. Note that `increment` is in Q20,
+ // while the factor is in Q14, hence the shift by 6.
+ int increment = (16384 << 6) / kLen;
+
+ // Test first method.
+ int stop_factor =
+ DspHelper::RampSignal(input, kLen, start_factor, increment, output);
+ EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
+ for (int i = 0; i < kLen; ++i) {
+ EXPECT_EQ(1000 * i / kLen, output[i]);
+ }
+
+ // Test second method. (Note that this modifies `input`.)
+ stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment);
+ EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
+ for (int i = 0; i < kLen; ++i) {
+ EXPECT_EQ(1000 * i / kLen, input[i]);
+ }
+}
+
+TEST(DspHelper, RampSignalAudioMultiVector) {
+ static const int kLen = 100;
+ static const int kChannels = 5;
+ AudioMultiVector input(kChannels, kLen * 3);
+ // Fill input with 1000.
+ for (int i = 0; i < kLen * 3; ++i) {
+ for (int channel = 0; channel < kChannels; ++channel) {
+ input[channel][i] = 1000;
+ }
+ }
+ // We want to start ramping at `start_index` and keep ramping for `kLen`
+ // samples.
+ int start_index = kLen;
+ int start_factor = 0;
+ // Ramp from 0 to 1 (in Q14) in `kLen` samples. Note that `increment` is in
+ // Q20, while the factor is in Q14, hence the shift by 6.
+ int increment = (16384 << 6) / kLen;
+
+ int stop_factor =
+ DspHelper::RampSignal(&input, start_index, kLen, start_factor, increment);
+ EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
+ // Verify that the first `kLen` samples are left untouched.
+ int i;
+ for (i = 0; i < kLen; ++i) {
+ for (int channel = 0; channel < kChannels; ++channel) {
+ EXPECT_EQ(1000, input[channel][i]);
+ }
+ }
+ // Verify that the next block of `kLen` samples are ramped.
+ for (; i < 2 * kLen; ++i) {
+ for (int channel = 0; channel < kChannels; ++channel) {
+ EXPECT_EQ(1000 * (i - kLen) / kLen, input[channel][i]);
+ }
+ }
+ // Verify the last `kLen` samples are left untouched.
+ for (; i < 3 * kLen; ++i) {
+ for (int channel = 0; channel < kChannels; ++channel) {
+ EXPECT_EQ(1000, input[channel][i]);
+ }
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.cc b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.cc
new file mode 100644
index 0000000000..115bfcf97b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.cc
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+
+#include <algorithm> // max
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Modify the code to obtain backwards bit-exactness. Once bit-exactness is no
+// longer required, this #define should be removed (and the code that it
+// enables).
+#define LEGACY_BITEXACT
+
+namespace webrtc {
+
+DtmfBuffer::DtmfBuffer(int fs_hz) {
+ SetSampleRate(fs_hz);
+}
+
+DtmfBuffer::~DtmfBuffer() = default;
+
+void DtmfBuffer::Flush() {
+ buffer_.clear();
+}
+
+// The ParseEvent method parses 4 bytes from `payload` according to this format
+// from RFC 4733:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | event |E|R| volume | duration |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Legend (adapted from RFC 4733)
+// - event: The event field is a number between 0 and 255 identifying a
+// specific telephony event. The buffer will not accept any event
+// numbers larger than 15.
+// - E: If set to a value of one, the "end" bit indicates that this
+// packet contains the end of the event. For long-lasting events
+// that have to be split into segments, only the final packet for
+// the final segment will have the E bit set.
+// - R: Reserved.
+// - volume: For DTMF digits and other events representable as tones, this
+// field describes the power level of the tone, expressed in dBm0
+// after dropping the sign. Power levels range from 0 to -63 dBm0.
+// Thus, larger values denote lower volume. The buffer discards
+// values larger than 36 (i.e., lower than -36 dBm0).
+// - duration: The duration field indicates the duration of the event or segment
+// being reported, in timestamp units, expressed as an unsigned
+// integer in network byte order. For a non-zero value, the event
+// or segment began at the instant identified by the RTP timestamp
+// and has so far lasted as long as indicated by this parameter.
+// The event may or may not have ended. If the event duration
+// exceeds the maximum representable by the duration field, the
+// event is split into several contiguous segments. The buffer will
+// discard zero-duration events.
+//
+int DtmfBuffer::ParseEvent(uint32_t rtp_timestamp,
+ const uint8_t* payload,
+ size_t payload_length_bytes,
+ DtmfEvent* event) {
+ RTC_CHECK(payload);
+ RTC_CHECK(event);
+ if (payload_length_bytes < 4) {
+ RTC_LOG(LS_WARNING) << "ParseEvent payload too short";
+ return kPayloadTooShort;
+ }
+
+ event->event_no = payload[0];
+ event->end_bit = ((payload[1] & 0x80) != 0);
+ event->volume = (payload[1] & 0x3F);
+ event->duration = payload[2] << 8 | payload[3];
+ event->timestamp = rtp_timestamp;
+ return kOK;
+}
+
+// Inserts a DTMF event into the buffer. The event should be parsed from the
+// bit stream using the ParseEvent method above before inserting it in the
+// buffer.
+// DTMF events can be quite long, and in most cases the duration of the event
+// is not known when the first packet describing it is sent. To deal with that,
+// the RFC 4733 specifies that multiple packets are sent for one and the same
+// event as it is being created (typically, as the user is pressing the key).
+// These packets will all share the same start timestamp and event number,
+// while the duration will be the cumulative duration from the start. When
+// inserting a new event, the InsertEvent method tries to find a matching event
+// already in the buffer. If so, the new event is simply merged with the
+// existing one.
+int DtmfBuffer::InsertEvent(const DtmfEvent& event) {
+ if (event.event_no < 0 || event.event_no > 15 || event.volume < 0 ||
+ event.volume > 63 || event.duration <= 0 || event.duration > 65535) {
+ RTC_LOG(LS_WARNING) << "InsertEvent invalid parameters";
+ return kInvalidEventParameters;
+ }
+ DtmfList::iterator it = buffer_.begin();
+ while (it != buffer_.end()) {
+ if (MergeEvents(it, event)) {
+ // A matching event was found and the new event was merged.
+ return kOK;
+ }
+ ++it;
+ }
+ buffer_.push_back(event);
+ // Sort the buffer using CompareEvents to rank the events.
+ buffer_.sort(CompareEvents);
+ return kOK;
+}
+
+bool DtmfBuffer::GetEvent(uint32_t current_timestamp, DtmfEvent* event) {
+ DtmfList::iterator it = buffer_.begin();
+ while (it != buffer_.end()) {
+ // `event_end` is an estimate of where the current event ends. If the end
+ // bit is set, we know that the event ends at `timestamp` + `duration`.
+ uint32_t event_end = it->timestamp + it->duration;
+#ifdef LEGACY_BITEXACT
+ bool next_available = false;
+#endif
+ if (!it->end_bit) {
+ // If the end bit is not set, we allow extrapolation of the event for
+ // some time.
+ event_end += max_extrapolation_samples_;
+ DtmfList::iterator next = it;
+ ++next;
+ if (next != buffer_.end()) {
+ // If there is a next event in the buffer, we will not extrapolate over
+ // the start of that new event.
+ event_end = std::min(event_end, next->timestamp);
+#ifdef LEGACY_BITEXACT
+ next_available = true;
+#endif
+ }
+ }
+ if (current_timestamp >= it->timestamp &&
+ current_timestamp <= event_end) { // TODO(hlundin): Change to <.
+ // Found a matching event.
+ if (event) {
+ event->event_no = it->event_no;
+ event->end_bit = it->end_bit;
+ event->volume = it->volume;
+ event->duration = it->duration;
+ event->timestamp = it->timestamp;
+ }
+#ifdef LEGACY_BITEXACT
+ if (it->end_bit && current_timestamp + frame_len_samples_ >= event_end) {
+ // We are done playing this. Erase the event.
+ buffer_.erase(it);
+ }
+#endif
+ return true;
+ } else if (current_timestamp > event_end) { // TODO(hlundin): Change to >=.
+// Erase old event. Operation returns a valid pointer to the next element
+// in the list.
+#ifdef LEGACY_BITEXACT
+ if (!next_available) {
+ if (event) {
+ event->event_no = it->event_no;
+ event->end_bit = it->end_bit;
+ event->volume = it->volume;
+ event->duration = it->duration;
+ event->timestamp = it->timestamp;
+ }
+ it = buffer_.erase(it);
+ return true;
+ } else {
+ it = buffer_.erase(it);
+ }
+#else
+ it = buffer_.erase(it);
+#endif
+ } else {
+ ++it;
+ }
+ }
+ return false;
+}
+
+size_t DtmfBuffer::Length() const {
+ return buffer_.size();
+}
+
+bool DtmfBuffer::Empty() const {
+ return buffer_.empty();
+}
+
+int DtmfBuffer::SetSampleRate(int fs_hz) {
+ if (fs_hz != 8000 &&
+ fs_hz != 16000 &&
+ fs_hz != 32000 &&
+ fs_hz != 44100 &&
+ fs_hz != 48000) {
+ return kInvalidSampleRate;
+ }
+ max_extrapolation_samples_ = 7 * fs_hz / 100;
+ frame_len_samples_ = fs_hz / 100;
+ return kOK;
+}
+
+// The method returns true if the two events are considered to be the same.
+// The are defined as equal if they share the same timestamp and event number.
+// The special case with long-lasting events that have to be split into segments
+// is not handled in this method. These will be treated as separate events in
+// the buffer.
+bool DtmfBuffer::SameEvent(const DtmfEvent& a, const DtmfEvent& b) {
+ return (a.event_no == b.event_no) && (a.timestamp == b.timestamp);
+}
+
+bool DtmfBuffer::MergeEvents(DtmfList::iterator it, const DtmfEvent& event) {
+ if (SameEvent(*it, event)) {
+ if (!it->end_bit) {
+ // Do not extend the duration of an event for which the end bit was
+ // already received.
+ it->duration = std::max(event.duration, it->duration);
+ }
+ if (event.end_bit) {
+ it->end_bit = true;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// Returns true if `a` goes before `b` in the sorting order ("`a` < `b`").
+// The events are ranked using their start timestamp (taking wrap-around into
+// account). In the unlikely situation that two events share the same start
+// timestamp, the event number is used to rank the two. Note that packets
+// that belong to the same events, and therefore sharing the same start
+// timestamp, have already been merged before the sort method is called.
+bool DtmfBuffer::CompareEvents(const DtmfEvent& a, const DtmfEvent& b) {
+ if (a.timestamp == b.timestamp) {
+ return a.event_no < b.event_no;
+ }
+ // Take wrap-around into account.
+ return (static_cast<uint32_t>(b.timestamp - a.timestamp) < 0xFFFFFFFF / 2);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.h b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.h
new file mode 100644
index 0000000000..62b751525c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DTMF_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_DTMF_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+
+namespace webrtc {
+
+struct DtmfEvent {
+ uint32_t timestamp;
+ int event_no;
+ int volume;
+ int duration;
+ bool end_bit;
+
+ // Constructors
+ DtmfEvent()
+ : timestamp(0), event_no(0), volume(0), duration(0), end_bit(false) {}
+ DtmfEvent(uint32_t ts, int ev, int vol, int dur, bool end)
+ : timestamp(ts), event_no(ev), volume(vol), duration(dur), end_bit(end) {}
+};
+
+// This is the buffer holding DTMF events while waiting for them to be played.
+class DtmfBuffer {
+ public:
+ enum BufferReturnCodes {
+ kOK = 0,
+ kInvalidPointer,
+ kPayloadTooShort,
+ kInvalidEventParameters,
+ kInvalidSampleRate
+ };
+
+ // Set up the buffer for use at sample rate `fs_hz`.
+ explicit DtmfBuffer(int fs_hz);
+
+ virtual ~DtmfBuffer();
+
+ DtmfBuffer(const DtmfBuffer&) = delete;
+ DtmfBuffer& operator=(const DtmfBuffer&) = delete;
+
+ // Flushes the buffer.
+ virtual void Flush();
+
+ // Static method to parse 4 bytes from `payload` as a DTMF event (RFC 4733)
+ // and write the parsed information into the struct `event`. Input variable
+ // `rtp_timestamp` is simply copied into the struct.
+ static int ParseEvent(uint32_t rtp_timestamp,
+ const uint8_t* payload,
+ size_t payload_length_bytes,
+ DtmfEvent* event);
+
+ // Inserts `event` into the buffer. The method looks for a matching event and
+ // merges the two if a match is found.
+ virtual int InsertEvent(const DtmfEvent& event);
+
+ // Checks if a DTMF event should be played at time `current_timestamp`. If so,
+ // the method returns true; otherwise false. The parameters of the event to
+ // play will be written to `event`.
+ virtual bool GetEvent(uint32_t current_timestamp, DtmfEvent* event);
+
+ // Number of events in the buffer.
+ virtual size_t Length() const;
+
+ virtual bool Empty() const;
+
+ // Set a new sample rate.
+ virtual int SetSampleRate(int fs_hz);
+
+ private:
+ typedef std::list<DtmfEvent> DtmfList;
+
+ int max_extrapolation_samples_;
+ int frame_len_samples_; // TODO(hlundin): Remove this later.
+
+ // Compares two events and returns true if they are the same.
+ static bool SameEvent(const DtmfEvent& a, const DtmfEvent& b);
+
+ // Merges `event` to the event pointed out by `it`. The method checks that
+ // the two events are the same (using the SameEvent method), and merges them
+ // if that was the case, returning true. If the events are not the same, false
+ // is returned.
+ bool MergeEvents(DtmfList::iterator it, const DtmfEvent& event);
+
+ // Method used by the sort algorithm to rank events in the buffer.
+ static bool CompareEvents(const DtmfEvent& a, const DtmfEvent& b);
+
+ DtmfList buffer_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DTMF_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
new file mode 100644
index 0000000000..83745b6c09
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+
+#ifdef WIN32
+#include <winsock2.h> // ntohl()
+#else
+#include <arpa/inet.h> // ntohl()
+#endif
+
+#include <iostream>
+
+#include "test/gtest.h"
+
+// Modify the tests so that they pass with the modifications done to DtmfBuffer
+// for backwards bit-exactness. Once bit-exactness is no longer required, this
+// #define should be removed (and the code that it enables).
+#define LEGACY_BITEXACT
+
+namespace webrtc {
+
+static int sample_rate_hz = 8000;
+
+static uint32_t MakeDtmfPayload(int event, bool end, int volume, int duration) {
+ uint32_t payload = 0;
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | event |E|R| volume | duration |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ payload |= (event & 0x00FF) << 24;
+ payload |= (end ? 0x00800000 : 0x00000000);
+ payload |= (volume & 0x003F) << 16;
+ payload |= (duration & 0xFFFF);
+ payload = ntohl(payload);
+ return payload;
+}
+
+static bool EqualEvents(const DtmfEvent& a, const DtmfEvent& b) {
+ return (a.duration == b.duration && a.end_bit == b.end_bit &&
+ a.event_no == b.event_no && a.timestamp == b.timestamp &&
+ a.volume == b.volume);
+}
+
+TEST(DtmfBuffer, CreateAndDestroy) {
+ DtmfBuffer* buffer = new DtmfBuffer(sample_rate_hz);
+ delete buffer;
+}
+
+// Test the event parser.
+TEST(DtmfBuffer, ParseEvent) {
+ int event_no = 7;
+ bool end_bit = true;
+ int volume = 17;
+ int duration = 4711;
+ uint32_t timestamp = 0x12345678;
+ uint32_t payload = MakeDtmfPayload(event_no, end_bit, volume, duration);
+ uint8_t* payload_ptr = reinterpret_cast<uint8_t*>(&payload);
+ DtmfEvent event;
+ EXPECT_EQ(DtmfBuffer::kOK, DtmfBuffer::ParseEvent(timestamp, payload_ptr,
+ sizeof(payload), &event));
+ EXPECT_EQ(duration, event.duration);
+ EXPECT_EQ(end_bit, event.end_bit);
+ EXPECT_EQ(event_no, event.event_no);
+ EXPECT_EQ(timestamp, event.timestamp);
+ EXPECT_EQ(volume, event.volume);
+
+ EXPECT_EQ(DtmfBuffer::kPayloadTooShort,
+ DtmfBuffer::ParseEvent(timestamp, payload_ptr, 3, &event));
+}
+
+TEST(DtmfBuffer, SimpleInsertAndGet) {
+ int event_no = 7;
+ bool end_bit = true;
+ int volume = 17;
+ int duration = 4711;
+ uint32_t timestamp = 0x12345678;
+ DtmfEvent event(timestamp, event_no, volume, duration, end_bit);
+ DtmfBuffer buffer(sample_rate_hz);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+ EXPECT_EQ(1u, buffer.Length());
+ EXPECT_FALSE(buffer.Empty());
+ DtmfEvent out_event;
+ // Too early to get event.
+ EXPECT_FALSE(buffer.GetEvent(timestamp - 10, &out_event));
+ EXPECT_EQ(1u, buffer.Length());
+ EXPECT_FALSE(buffer.Empty());
+ // Get the event at its starting timestamp.
+ EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+ EXPECT_TRUE(EqualEvents(event, out_event));
+ EXPECT_EQ(1u, buffer.Length());
+ EXPECT_FALSE(buffer.Empty());
+ // Get the event some time into the event.
+ EXPECT_TRUE(buffer.GetEvent(timestamp + duration / 2, &out_event));
+ EXPECT_TRUE(EqualEvents(event, out_event));
+ EXPECT_EQ(1u, buffer.Length());
+ EXPECT_FALSE(buffer.Empty());
+// Give a "current" timestamp after the event has ended.
+#ifdef LEGACY_BITEXACT
+ EXPECT_TRUE(buffer.GetEvent(timestamp + duration + 10, &out_event));
+#endif
+ EXPECT_FALSE(buffer.GetEvent(timestamp + duration + 10, &out_event));
+ EXPECT_EQ(0u, buffer.Length());
+ EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(DtmfBuffer, MergingPackets) {
+ int event_no = 0;
+ bool end_bit = false;
+ int volume = 17;
+ int duration = 80;
+ uint32_t timestamp = 0x12345678;
+ DtmfEvent event(timestamp, event_no, volume, duration, end_bit);
+ DtmfBuffer buffer(sample_rate_hz);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+
+ event.duration += 80;
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+
+ event.duration += 80;
+ event.end_bit = true;
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+
+ EXPECT_EQ(1u, buffer.Length());
+
+ DtmfEvent out_event;
+ EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+ EXPECT_TRUE(EqualEvents(event, out_event));
+}
+
+// This test case inserts one shorter event completely overlapped by one longer
+// event. The expected outcome is that only the longer event is played.
+TEST(DtmfBuffer, OverlappingEvents) {
+ int event_no = 0;
+ bool end_bit = true;
+ int volume = 1;
+ int duration = 80;
+ uint32_t timestamp = 0x12345678 + 80;
+ DtmfEvent short_event(timestamp, event_no, volume, duration, end_bit);
+ DtmfBuffer buffer(sample_rate_hz);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(short_event));
+
+ event_no = 10;
+ end_bit = false;
+ timestamp = 0x12345678;
+ DtmfEvent long_event(timestamp, event_no, volume, duration, end_bit);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(long_event));
+
+ long_event.duration += 80;
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(long_event));
+
+ long_event.duration += 80;
+ long_event.end_bit = true;
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(long_event));
+
+ EXPECT_EQ(2u, buffer.Length());
+
+ DtmfEvent out_event;
+ // Expect to get the long event.
+ EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+ EXPECT_TRUE(EqualEvents(long_event, out_event));
+// Expect no more events.
+#ifdef LEGACY_BITEXACT
+ EXPECT_TRUE(
+ buffer.GetEvent(timestamp + long_event.duration + 10, &out_event));
+ EXPECT_TRUE(EqualEvents(long_event, out_event));
+ EXPECT_TRUE(
+ buffer.GetEvent(timestamp + long_event.duration + 10, &out_event));
+ EXPECT_TRUE(EqualEvents(short_event, out_event));
+#else
+ EXPECT_FALSE(
+ buffer.GetEvent(timestamp + long_event.duration + 10, &out_event));
+#endif
+ EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(DtmfBuffer, ExtrapolationTime) {
+ int event_no = 0;
+ bool end_bit = false;
+ int volume = 1;
+ int duration = 80;
+ uint32_t timestamp = 0x12345678;
+ DtmfEvent event1(timestamp, event_no, volume, duration, end_bit);
+ DtmfBuffer buffer(sample_rate_hz);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event1));
+ EXPECT_EQ(1u, buffer.Length());
+
+ DtmfEvent out_event;
+ // Get the event at the start.
+ EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+ EXPECT_TRUE(EqualEvents(event1, out_event));
+ // Also get the event 100 samples after the end of the event (since we're
+ // missing the end bit).
+ uint32_t timestamp_now = timestamp + duration + 100;
+ EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
+ EXPECT_TRUE(EqualEvents(event1, out_event));
+ // Insert another event starting back-to-back with the previous event.
+ timestamp += duration;
+ event_no = 1;
+ DtmfEvent event2(timestamp, event_no, volume, duration, end_bit);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
+ EXPECT_EQ(2u, buffer.Length());
+ // Now we expect to get the new event when supplying `timestamp_now`.
+ EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
+ EXPECT_TRUE(EqualEvents(event2, out_event));
+ // Expect the the first event to be erased now.
+ EXPECT_EQ(1u, buffer.Length());
+ // Move `timestamp_now` to more than 560 samples after the end of the second
+ // event. Expect that event to be erased.
+ timestamp_now = timestamp + duration + 600;
+#ifdef LEGACY_BITEXACT
+ EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
+#endif
+ EXPECT_FALSE(buffer.GetEvent(timestamp_now, &out_event));
+ EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(DtmfBuffer, TimestampWraparound) {
+ int event_no = 0;
+ bool end_bit = true;
+ int volume = 1;
+ int duration = 80;
+ uint32_t timestamp1 = 0xFFFFFFFF - duration;
+ DtmfEvent event1(timestamp1, event_no, volume, duration, end_bit);
+ uint32_t timestamp2 = 0;
+ DtmfEvent event2(timestamp2, event_no, volume, duration, end_bit);
+ DtmfBuffer buffer(sample_rate_hz);
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event1));
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
+ EXPECT_EQ(2u, buffer.Length());
+ DtmfEvent out_event;
+ EXPECT_TRUE(buffer.GetEvent(timestamp1, &out_event));
+ EXPECT_TRUE(EqualEvents(event1, out_event));
+#ifdef LEGACY_BITEXACT
+ EXPECT_EQ(1u, buffer.Length());
+#else
+ EXPECT_EQ(2u, buffer.Length());
+#endif
+
+ buffer.Flush();
+ // Reverse the insert order. Expect same results.
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event1));
+ EXPECT_EQ(2u, buffer.Length());
+ EXPECT_TRUE(buffer.GetEvent(timestamp1, &out_event));
+ EXPECT_TRUE(EqualEvents(event1, out_event));
+#ifdef LEGACY_BITEXACT
+ EXPECT_EQ(1u, buffer.Length());
+#else
+ EXPECT_EQ(2u, buffer.Length());
+#endif
+}
+
+TEST(DtmfBuffer, InvalidEvents) {
+ int event_no = 0;
+ bool end_bit = true;
+ int volume = 1;
+ int duration = 80;
+ uint32_t timestamp = 0x12345678;
+ DtmfEvent event(timestamp, event_no, volume, duration, end_bit);
+ DtmfBuffer buffer(sample_rate_hz);
+
+ // Invalid event number.
+ event.event_no = -1;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.event_no = 16;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.event_no = 0; // Valid value;
+
+ // Invalid volume.
+ event.volume = -1;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.volume = 64;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.volume = 0; // Valid value;
+
+ // Invalid duration.
+ event.duration = -1;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.duration = 0;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.duration = 0xFFFF + 1;
+ EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+ event.duration = 1; // Valid value;
+
+ // Finish with a valid event, just to verify that all is ok.
+ EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
new file mode 100644
index 0000000000..9061e27c67
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This class provides a generator for DTMF tones. The tone generation is based
+// on a sinusoid recursion. Each sinusoid is generated using a recursion
+// formula; x[n] = a * x[n-1] - x[n-2], where the coefficient
+// a = 2*cos(2*pi*f/fs). The recursion is started with x[-1] = 0 and
+// x[-2] = sin(2*pi*f/fs). (Note that with this initialization, the resulting
+// sinusoid gets a "negative" rotation; x[n] = sin(-2*pi*f/fs * n + phi), but
+// kept this way due to historical reasons.)
+// TODO(hlundin): Change to positive rotation?
+//
+// Each key on the telephone keypad corresponds to an "event", 0-15. Each event
+// is mapped to a tone pair, with a low and a high frequency. There are four
+// low and four high frequencies, each corresponding to a row and column,
+// respectively, on the keypad as illustrated below.
+//
+// 1209 Hz 1336 Hz 1477 Hz 1633 Hz
+// 697 Hz 1 2 3 12
+// 770 Hz 4 5 6 13
+// 852 Hz 7 8 9 14
+// 941 Hz 10 0 11 15
+
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// The filter coefficient a = 2*cos(2*pi*f/fs) for the low frequency tone, for
+// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
+// Values are in Q14.
+const int DtmfToneGenerator::kCoeff1[4][16] = {
+ {24219, 27980, 27980, 27980, 26956, 26956, 26956, 25701, 25701, 25701,
+ 24219, 24219, 27980, 26956, 25701, 24219},
+ {30556, 31548, 31548, 31548, 31281, 31281, 31281, 30951, 30951, 30951,
+ 30556, 30556, 31548, 31281, 30951, 30556},
+ {32210, 32462, 32462, 32462, 32394, 32394, 32394, 32311, 32311, 32311,
+ 32210, 32210, 32462, 32394, 32311, 32210},
+ {32520, 32632, 32632, 32632, 32602, 32602, 32602, 32564, 32564, 32564,
+ 32520, 32520, 32632, 32602, 32564, 32520}};
+
+// The filter coefficient a = 2*cos(2*pi*f/fs) for the high frequency tone, for
+// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
+// Values are in Q14.
+const int DtmfToneGenerator::kCoeff2[4][16] = {
+ {16325, 19073, 16325, 13085, 19073, 16325, 13085, 19073, 16325, 13085,
+ 19073, 13085, 9315, 9315, 9315, 9315},
+ {28361, 29144, 28361, 27409, 29144, 28361, 27409, 29144, 28361, 27409,
+ 29144, 27409, 26258, 26258, 26258, 26258},
+ {31647, 31849, 31647, 31400, 31849, 31647, 31400, 31849, 31647, 31400,
+ 31849, 31400, 31098, 31098, 31098, 31098},
+ {32268, 32359, 32268, 32157, 32359, 32268, 32157, 32359, 32268, 32157,
+ 32359, 32157, 32022, 32022, 32022, 32022}};
+
+// The initialization value x[-2] = sin(2*pi*f/fs) for the low frequency tone,
+// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
+// Values are in Q14.
+const int DtmfToneGenerator::kInitValue1[4][16] = {
+ {11036, 8528, 8528, 8528, 9315, 9315, 9315, 10163, 10163, 10163, 11036,
+ 11036, 8528, 9315, 10163, 11036},
+ {5918, 4429, 4429, 4429, 4879, 4879, 4879, 5380, 5380, 5380, 5918, 5918,
+ 4429, 4879, 5380, 5918},
+ {3010, 2235, 2235, 2235, 2468, 2468, 2468, 2728, 2728, 2728, 3010, 3010,
+ 2235, 2468, 2728, 3010},
+ {2013, 1493, 1493, 1493, 1649, 1649, 1649, 1823, 1823, 1823, 2013, 2013,
+ 1493, 1649, 1823, 2013}};
+
+// The initialization value x[-2] = sin(2*pi*f/fs) for the high frequency tone,
+// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
+// Values are in Q14.
+const int DtmfToneGenerator::kInitValue2[4][16] = {
+ {14206, 13323, 14206, 15021, 13323, 14206, 15021, 13323, 14206, 15021,
+ 13323, 15021, 15708, 15708, 15708, 15708},
+ {8207, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8979,
+ 9801, 9801, 9801, 9801},
+ {4249, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4685,
+ 5164, 5164, 5164, 5164},
+ {2851, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 3148,
+ 3476, 3476, 3476, 3476}};
+
+// Amplitude multipliers for volume values 0 through 63, corresponding to
+// 0 dBm0 through -63 dBm0. Values are in Q14.
+// for a in range(0, 64):
+// print round(16141.0 * 10**(-float(a)/20))
+const int DtmfToneGenerator::kAmplitude[64] = {
+ 16141, 14386, 12821, 11427, 10184, 9077, 8090, 7210, 6426, 5727, 5104,
+ 4549, 4054, 3614, 3221, 2870, 2558, 2280, 2032, 1811, 1614, 1439,
+ 1282, 1143, 1018, 908, 809, 721, 643, 573, 510, 455, 405,
+ 361, 322, 287, 256, 228, 203, 181, 161, 144, 128, 114,
+ 102, 91, 81, 72, 64, 57, 51, 45, 41, 36, 32,
+ 29, 26, 23, 20, 18, 16, 14, 13, 11};
+
+// Constructor.
+DtmfToneGenerator::DtmfToneGenerator()
+ : initialized_(false), coeff1_(0), coeff2_(0), amplitude_(0) {}
+
+// Initialize the DTMF generator with sample rate fs Hz (8000, 16000, 32000,
+// 48000), event (0-15) and attenuation (0-36 dB).
+// Returns 0 on success, otherwise an error code.
+int DtmfToneGenerator::Init(int fs, int event, int attenuation) {
+ initialized_ = false;
+ size_t fs_index;
+ if (fs == 8000) {
+ fs_index = 0;
+ } else if (fs == 16000) {
+ fs_index = 1;
+ } else if (fs == 32000) {
+ fs_index = 2;
+ } else if (fs == 48000) {
+ fs_index = 3;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ fs_index = 1; // Default to 8000 Hz.
+ }
+
+ if (event < 0 || event > 15) {
+ return kParameterError; // Invalid event number.
+ }
+
+ if (attenuation < 0 || attenuation > 63) {
+ return kParameterError; // Invalid attenuation.
+ }
+
+ // Look up oscillator coefficient for low and high frequencies.
+ RTC_DCHECK_LE(0, fs_index);
+ RTC_DCHECK_GT(arraysize(kCoeff1), fs_index);
+ RTC_DCHECK_GT(arraysize(kCoeff2), fs_index);
+ RTC_DCHECK_LE(0, event);
+ RTC_DCHECK_GT(arraysize(kCoeff1[fs_index]), event);
+ RTC_DCHECK_GT(arraysize(kCoeff2[fs_index]), event);
+ coeff1_ = kCoeff1[fs_index][event];
+ coeff2_ = kCoeff2[fs_index][event];
+
+ // Look up amplitude multiplier.
+ RTC_DCHECK_LE(0, attenuation);
+ RTC_DCHECK_GT(arraysize(kAmplitude), attenuation);
+ amplitude_ = kAmplitude[attenuation];
+
+ // Initialize sample history.
+ RTC_DCHECK_LE(0, fs_index);
+ RTC_DCHECK_GT(arraysize(kInitValue1), fs_index);
+ RTC_DCHECK_GT(arraysize(kInitValue2), fs_index);
+ RTC_DCHECK_LE(0, event);
+ RTC_DCHECK_GT(arraysize(kInitValue1[fs_index]), event);
+ RTC_DCHECK_GT(arraysize(kInitValue2[fs_index]), event);
+ sample_history1_[0] = kInitValue1[fs_index][event];
+ sample_history1_[1] = 0;
+ sample_history2_[0] = kInitValue2[fs_index][event];
+ sample_history2_[1] = 0;
+
+ initialized_ = true;
+ return 0;
+}
+
+// Reset tone generator to uninitialized state.
+void DtmfToneGenerator::Reset() {
+ initialized_ = false;
+}
+
+// Generate num_samples of DTMF signal and write to `output`.
+int DtmfToneGenerator::Generate(size_t num_samples, AudioMultiVector* output) {
+ if (!initialized_) {
+ return kNotInitialized;
+ }
+
+ if (!output) {
+ return kParameterError;
+ }
+
+ output->AssertSize(num_samples);
+ for (size_t i = 0; i < num_samples; ++i) {
+ // Use recursion formula y[n] = a * y[n - 1] - y[n - 2].
+ int16_t temp_val_low =
+ ((coeff1_ * sample_history1_[1] + 8192) >> 14) - sample_history1_[0];
+ int16_t temp_val_high =
+ ((coeff2_ * sample_history2_[1] + 8192) >> 14) - sample_history2_[0];
+
+ // Update recursion memory.
+ sample_history1_[0] = sample_history1_[1];
+ sample_history1_[1] = temp_val_low;
+ sample_history2_[0] = sample_history2_[1];
+ sample_history2_[1] = temp_val_high;
+
+ // Attenuate the low frequency tone 3 dB.
+ int32_t temp_val =
+ kAmpMultiplier * temp_val_low + temp_val_high * (1 << 15);
+ // Normalize the signal to Q14 with proper rounding.
+ temp_val = (temp_val + 16384) >> 15;
+ // Scale the signal to correct volume.
+ (*output)[0][i] =
+ static_cast<int16_t>((temp_val * amplitude_ + 8192) >> 14);
+ }
+ // Copy first channel to all other channels.
+ for (size_t channel = 1; channel < output->Channels(); ++channel) {
+ output->CopyChannel(0, channel);
+ }
+
+ return static_cast<int>(num_samples);
+}
+
+bool DtmfToneGenerator::initialized() const {
+ return initialized_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.h b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
new file mode 100644
index 0000000000..35114f4f49
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+namespace webrtc {
+
+// This class provides a generator for DTMF tones.
+class DtmfToneGenerator {
+ public:
+ enum ReturnCodes {
+ kNotInitialized = -1,
+ kParameterError = -2,
+ };
+
+ DtmfToneGenerator();
+ virtual ~DtmfToneGenerator() {}
+
+ DtmfToneGenerator(const DtmfToneGenerator&) = delete;
+ DtmfToneGenerator& operator=(const DtmfToneGenerator&) = delete;
+
+ virtual int Init(int fs, int event, int attenuation);
+ virtual void Reset();
+ virtual int Generate(size_t num_samples, AudioMultiVector* output);
+ virtual bool initialized() const;
+
+ private:
+ static const int kCoeff1[4][16]; // 1st oscillator model coefficient table.
+ static const int kCoeff2[4][16]; // 2nd oscillator model coefficient table.
+ static const int kInitValue1[4][16]; // Initialization for 1st oscillator.
+ static const int kInitValue2[4][16]; // Initialization for 2nd oscillator.
+ static const int kAmplitude[64]; // Amplitude for 0 through -63 dBm0.
+ static const int16_t kAmpMultiplier = 23171; // 3 dB attenuation (in Q15).
+
+ bool initialized_; // True if generator is initialized properly.
+ int coeff1_; // 1st oscillator coefficient for this event.
+ int coeff2_; // 2nd oscillator coefficient for this event.
+ int amplitude_; // Amplitude for this event.
+ int16_t sample_history1_[2]; // Last 2 samples for the 1st oscillator.
+ int16_t sample_history2_[2]; // Last 2 samples for the 2nd oscillator.
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
new file mode 100644
index 0000000000..e843706dd3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DtmfToneGenerator class.
+
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+
+#include <math.h>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class DtmfToneGeneratorTest : public ::testing::Test {
+ protected:
+ static const double kLowFreqHz[16];
+ static const double kHighFreqHz[16];
+ // This is the attenuation applied to all cases.
+ const double kBaseAttenuation = 16141.0 / 16384.0;
+ const double k3dbAttenuation = 23171.0 / 32768;
+ const int kNumSamples = 10;
+
+ void TestAllTones(int fs_hz, int channels) {
+ AudioMultiVector signal(channels);
+
+ for (int event = 0; event <= 15; ++event) {
+ rtc::StringBuilder ss;
+ ss << "Checking event " << event << " at sample rate " << fs_hz;
+ SCOPED_TRACE(ss.str());
+ const int kAttenuation = 0;
+ ASSERT_EQ(0, tone_gen_.Init(fs_hz, event, kAttenuation));
+ EXPECT_TRUE(tone_gen_.initialized());
+ EXPECT_EQ(kNumSamples, tone_gen_.Generate(kNumSamples, &signal));
+
+ double f1 = kLowFreqHz[event];
+ double f2 = kHighFreqHz[event];
+ const double pi = 3.14159265358979323846;
+
+ for (int n = 0; n < kNumSamples; ++n) {
+ double x = k3dbAttenuation * sin(2.0 * pi * f1 / fs_hz * (-n - 1)) +
+ sin(2.0 * pi * f2 / fs_hz * (-n - 1));
+ x *= kBaseAttenuation;
+ x = ldexp(x, 14); // Scale to Q14.
+ for (int channel = 0; channel < channels; ++channel) {
+ EXPECT_NEAR(x, static_cast<double>(signal[channel][n]), 25);
+ }
+ }
+
+ tone_gen_.Reset();
+ EXPECT_FALSE(tone_gen_.initialized());
+ }
+ }
+
+ void TestAmplitudes(int fs_hz, int channels) {
+ AudioMultiVector signal(channels);
+ AudioMultiVector ref_signal(channels);
+
+ const int event_vec[] = {0, 4, 9, 13}; // Test a few events.
+ for (int e = 0; e < 4; ++e) {
+ int event = event_vec[e];
+ // Create full-scale reference.
+ ASSERT_EQ(0, tone_gen_.Init(fs_hz, event, 0)); // 0 attenuation.
+ EXPECT_EQ(kNumSamples, tone_gen_.Generate(kNumSamples, &ref_signal));
+ // Test every 5 steps (to save time).
+ for (int attenuation = 1; attenuation <= 63; attenuation += 5) {
+ rtc::StringBuilder ss;
+ ss << "Checking event " << event << " at sample rate " << fs_hz;
+ ss << "; attenuation " << attenuation;
+ SCOPED_TRACE(ss.str());
+ ASSERT_EQ(0, tone_gen_.Init(fs_hz, event, attenuation));
+ EXPECT_EQ(kNumSamples, tone_gen_.Generate(kNumSamples, &signal));
+ for (int n = 0; n < kNumSamples; ++n) {
+ double attenuation_factor =
+ DbToRatio(-static_cast<float>(attenuation));
+ // Verify that the attenuation is correct.
+ for (int channel = 0; channel < channels; ++channel) {
+ EXPECT_NEAR(attenuation_factor * ref_signal[channel][n],
+ signal[channel][n], 2);
+ }
+ }
+
+ tone_gen_.Reset();
+ }
+ }
+ }
+
+ DtmfToneGenerator tone_gen_;
+};
+
+// Low and high frequencies for events 0 through 15.
+const double DtmfToneGeneratorTest::kLowFreqHz[16] = {
+ 941.0, 697.0, 697.0, 697.0, 770.0, 770.0, 770.0, 852.0,
+ 852.0, 852.0, 941.0, 941.0, 697.0, 770.0, 852.0, 941.0};
+const double DtmfToneGeneratorTest::kHighFreqHz[16] = {
+ 1336.0, 1209.0, 1336.0, 1477.0, 1209.0, 1336.0, 1477.0, 1209.0,
+ 1336.0, 1477.0, 1209.0, 1477.0, 1633.0, 1633.0, 1633.0, 1633.0};
+
+TEST_F(DtmfToneGeneratorTest, Test8000Mono) {
+ TestAllTones(8000, 1);
+ TestAmplitudes(8000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test16000Mono) {
+ TestAllTones(16000, 1);
+ TestAmplitudes(16000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test32000Mono) {
+ TestAllTones(32000, 1);
+ TestAmplitudes(32000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test48000Mono) {
+ TestAllTones(48000, 1);
+ TestAmplitudes(48000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test8000Stereo) {
+ TestAllTones(8000, 2);
+ TestAmplitudes(8000, 2);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test16000Stereo) {
+ TestAllTones(16000, 2);
+ TestAmplitudes(16000, 2);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test32000Stereo) {
+ TestAllTones(32000, 2);
+ TestAmplitudes(32000, 2);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test48000Stereo) {
+ TestAllTones(48000, 2);
+ TestAmplitudes(48000, 2);
+}
+
+TEST(DtmfToneGenerator, TestErrors) {
+ DtmfToneGenerator tone_gen;
+ const int kNumSamples = 10;
+ AudioMultiVector signal(1); // One channel.
+
+ // Try to generate tones without initializing.
+ EXPECT_EQ(DtmfToneGenerator::kNotInitialized,
+ tone_gen.Generate(kNumSamples, &signal));
+
+ const int fs = 16000; // Valid sample rate.
+ const int event = 7; // Valid event.
+ const int attenuation = 0; // Valid attenuation.
+ // Initialize with invalid event -1.
+ EXPECT_EQ(DtmfToneGenerator::kParameterError,
+ tone_gen.Init(fs, -1, attenuation));
+ // Initialize with invalid event 16.
+ EXPECT_EQ(DtmfToneGenerator::kParameterError,
+ tone_gen.Init(fs, 16, attenuation));
+ // Initialize with invalid attenuation -1.
+ EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Init(fs, event, -1));
+ // Initialize with invalid attenuation 64.
+ EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Init(fs, event, 64));
+ EXPECT_FALSE(tone_gen.initialized()); // Should still be uninitialized.
+
+ // Initialize with valid parameters.
+ ASSERT_EQ(0, tone_gen.Init(fs, event, attenuation));
+ EXPECT_TRUE(tone_gen.initialized());
+ // NULL pointer to destination.
+ EXPECT_EQ(DtmfToneGenerator::kParameterError,
+ tone_gen.Generate(kNumSamples, NULL));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/expand.cc b/third_party/libwebrtc/modules/audio_coding/neteq/expand.cc
new file mode 100644
index 0000000000..9c3274609f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/expand.cc
@@ -0,0 +1,888 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/expand.h"
+
+#include <string.h> // memset
+
+#include <algorithm> // min, max
+#include <limits> // numeric_limits<T>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+Expand::Expand(BackgroundNoise* background_noise,
+ SyncBuffer* sync_buffer,
+ RandomVector* random_vector,
+ StatisticsCalculator* statistics,
+ int fs,
+ size_t num_channels)
+ : random_vector_(random_vector),
+ sync_buffer_(sync_buffer),
+ first_expand_(true),
+ fs_hz_(fs),
+ num_channels_(num_channels),
+ consecutive_expands_(0),
+ background_noise_(background_noise),
+ statistics_(statistics),
+ overlap_length_(5 * fs / 8000),
+ lag_index_direction_(0),
+ current_lag_index_(0),
+ stop_muting_(false),
+ expand_duration_samples_(0),
+ channel_parameters_(new ChannelParameters[num_channels_]) {
+ RTC_DCHECK(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
+ RTC_DCHECK_LE(fs,
+ static_cast<int>(kMaxSampleRate)); // Should not be possible.
+ RTC_DCHECK_GT(num_channels_, 0);
+ memset(expand_lags_, 0, sizeof(expand_lags_));
+ Reset();
+}
+
+Expand::~Expand() = default;
+
+void Expand::Reset() {
+ first_expand_ = true;
+ consecutive_expands_ = 0;
+ max_lag_ = 0;
+ for (size_t ix = 0; ix < num_channels_; ++ix) {
+ channel_parameters_[ix].expand_vector0.Clear();
+ channel_parameters_[ix].expand_vector1.Clear();
+ }
+}
+
+int Expand::Process(AudioMultiVector* output) {
+ int16_t random_vector[kMaxSampleRate / 8000 * 120 + 30];
+ int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
+ static const int kTempDataSize = 3600;
+ int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
+ int16_t* voiced_vector_storage = temp_data;
+ int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
+ int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
+ int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
+
+ int fs_mult = fs_hz_ / 8000;
+
+ if (first_expand_) {
+ // Perform initial setup if this is the first expansion since last reset.
+ AnalyzeSignal(random_vector);
+ first_expand_ = false;
+ expand_duration_samples_ = 0;
+ } else {
+ // This is not the first expansion, parameters are already estimated.
+ // Extract a noise segment.
+ size_t rand_length = max_lag_;
+ // This only applies to SWB where length could be larger than 256.
+ RTC_DCHECK_LE(rand_length, kMaxSampleRate / 8000 * 120 + 30);
+ GenerateRandomVector(2, rand_length, random_vector);
+ }
+
+ // Generate signal.
+ UpdateLagIndex();
+
+ // Voiced part.
+ // Generate a weighted vector with the current lag.
+ size_t expansion_vector_length = max_lag_ + overlap_length_;
+ size_t current_lag = expand_lags_[current_lag_index_];
+ // Copy lag+overlap data.
+ size_t expansion_vector_position =
+ expansion_vector_length - current_lag - overlap_length_;
+ size_t temp_length = current_lag + overlap_length_;
+ for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
+ ChannelParameters& parameters = channel_parameters_[channel_ix];
+ if (current_lag_index_ == 0) {
+ // Use only expand_vector0.
+ RTC_DCHECK_LE(expansion_vector_position + temp_length,
+ parameters.expand_vector0.Size());
+ parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+ voiced_vector_storage);
+ } else if (current_lag_index_ == 1) {
+ std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
+ parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+ temp_0.get());
+ std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
+ parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
+ temp_1.get());
+ // Mix 3/4 of expand_vector0 with 1/4 of expand_vector1.
+ WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 3, temp_1.get(), 1, 2,
+ voiced_vector_storage, temp_length);
+ } else if (current_lag_index_ == 2) {
+ // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
+ RTC_DCHECK_LE(expansion_vector_position + temp_length,
+ parameters.expand_vector0.Size());
+ RTC_DCHECK_LE(expansion_vector_position + temp_length,
+ parameters.expand_vector1.Size());
+
+ std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
+ parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+ temp_0.get());
+ std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
+ parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
+ temp_1.get());
+ WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 1, temp_1.get(), 1, 1,
+ voiced_vector_storage, temp_length);
+ }
+
+ // Get tapering window parameters. Values are in Q15.
+ int16_t muting_window, muting_window_increment;
+ int16_t unmuting_window, unmuting_window_increment;
+ if (fs_hz_ == 8000) {
+ muting_window = DspHelper::kMuteFactorStart8kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement8kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart8kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement8kHz;
+ } else if (fs_hz_ == 16000) {
+ muting_window = DspHelper::kMuteFactorStart16kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement16kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart16kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement16kHz;
+ } else if (fs_hz_ == 32000) {
+ muting_window = DspHelper::kMuteFactorStart32kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement32kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart32kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement32kHz;
+ } else { // fs_ == 48000
+ muting_window = DspHelper::kMuteFactorStart48kHz;
+ muting_window_increment = DspHelper::kMuteFactorIncrement48kHz;
+ unmuting_window = DspHelper::kUnmuteFactorStart48kHz;
+ unmuting_window_increment = DspHelper::kUnmuteFactorIncrement48kHz;
+ }
+
+ // Smooth the expanded if it has not been muted to a low amplitude and
+ // `current_voice_mix_factor` is larger than 0.5.
+ if ((parameters.mute_factor > 819) &&
+ (parameters.current_voice_mix_factor > 8192)) {
+ size_t start_ix = sync_buffer_->Size() - overlap_length_;
+ for (size_t i = 0; i < overlap_length_; i++) {
+ // Do overlap add between new vector and overlap.
+ (*sync_buffer_)[channel_ix][start_ix + i] =
+ (((*sync_buffer_)[channel_ix][start_ix + i] * muting_window) +
+ (((parameters.mute_factor * voiced_vector_storage[i]) >> 14) *
+ unmuting_window) +
+ 16384) >>
+ 15;
+ muting_window += muting_window_increment;
+ unmuting_window += unmuting_window_increment;
+ }
+ } else if (parameters.mute_factor == 0) {
+ // The expanded signal will consist of only comfort noise if
+ // mute_factor = 0. Set the output length to 15 ms for best noise
+ // production.
+ // TODO(hlundin): This has been disabled since the length of
+ // parameters.expand_vector0 and parameters.expand_vector1 no longer
+ // match with expand_lags_, causing invalid reads and writes. Is it a good
+ // idea to enable this again, and solve the vector size problem?
+ // max_lag_ = fs_mult * 120;
+ // expand_lags_[0] = fs_mult * 120;
+ // expand_lags_[1] = fs_mult * 120;
+ // expand_lags_[2] = fs_mult * 120;
+ }
+
+ // Unvoiced part.
+ // Filter `scaled_random_vector` through `ar_filter_`.
+ memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state,
+ sizeof(int16_t) * kUnvoicedLpcOrder);
+ int32_t add_constant = 0;
+ if (parameters.ar_gain_scale > 0) {
+ add_constant = 1 << (parameters.ar_gain_scale - 1);
+ }
+ WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
+ parameters.ar_gain, add_constant,
+ parameters.ar_gain_scale, current_lag);
+ WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
+ parameters.ar_filter, kUnvoicedLpcOrder + 1,
+ current_lag);
+ memcpy(parameters.ar_filter_state,
+ &(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
+ sizeof(int16_t) * kUnvoicedLpcOrder);
+
+ // Combine voiced and unvoiced contributions.
+
+ // Set a suitable cross-fading slope.
+ // For lag =
+ // <= 31 * fs_mult => go from 1 to 0 in about 8 ms;
+ // (>= 31 .. <= 63) * fs_mult => go from 1 to 0 in about 16 ms;
+ // >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
+ // temp_shift = getbits(max_lag_) - 5.
+ int temp_shift =
+ (31 - WebRtcSpl_NormW32(rtc::dchecked_cast<int32_t>(max_lag_))) - 5;
+ int16_t mix_factor_increment = 256 >> temp_shift;
+ if (stop_muting_) {
+ mix_factor_increment = 0;
+ }
+
+ // Create combined signal by shifting in more and more of unvoiced part.
+ temp_shift = 8 - temp_shift; // = getbits(mix_factor_increment).
+ size_t temp_length =
+ (parameters.current_voice_mix_factor - parameters.voice_mix_factor) >>
+ temp_shift;
+ temp_length = std::min(temp_length, current_lag);
+ DspHelper::CrossFade(voiced_vector, unvoiced_vector, temp_length,
+ &parameters.current_voice_mix_factor,
+ mix_factor_increment, temp_data);
+
+ // End of cross-fading period was reached before end of expanded signal
+ // path. Mix the rest with a fixed mixing factor.
+ if (temp_length < current_lag) {
+ if (mix_factor_increment != 0) {
+ parameters.current_voice_mix_factor = parameters.voice_mix_factor;
+ }
+ int16_t temp_scale = 16384 - parameters.current_voice_mix_factor;
+ WebRtcSpl_ScaleAndAddVectorsWithRound(
+ voiced_vector + temp_length, parameters.current_voice_mix_factor,
+ unvoiced_vector + temp_length, temp_scale, 14,
+ temp_data + temp_length, current_lag - temp_length);
+ }
+
+ // Select muting slope depending on how many consecutive expands we have
+ // done.
+ if (consecutive_expands_ == 3) {
+ // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms.
+ // mute_slope = 0.0010 / fs_mult in Q20.
+ parameters.mute_slope = std::max(parameters.mute_slope, 1049 / fs_mult);
+ }
+ if (consecutive_expands_ == 7) {
+ // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms.
+ // mute_slope = 0.0020 / fs_mult in Q20.
+ parameters.mute_slope = std::max(parameters.mute_slope, 2097 / fs_mult);
+ }
+
+ // Mute segment according to slope value.
+ if ((consecutive_expands_ != 0) || !parameters.onset) {
+ // Mute to the previous level, then continue with the muting.
+ WebRtcSpl_AffineTransformVector(
+ temp_data, temp_data, parameters.mute_factor, 8192, 14, current_lag);
+
+ if (!stop_muting_) {
+ DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
+
+ // Shift by 6 to go from Q20 to Q14.
+ // TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong.
+ // Legacy.
+ int16_t gain = static_cast<int16_t>(
+ 16384 - (((current_lag * parameters.mute_slope) + 8192) >> 6));
+ gain = ((gain * parameters.mute_factor) + 8192) >> 14;
+
+ // Guard against getting stuck with very small (but sometimes audible)
+ // gain.
+ if ((consecutive_expands_ > 3) && (gain >= parameters.mute_factor)) {
+ parameters.mute_factor = 0;
+ } else {
+ parameters.mute_factor = gain;
+ }
+ }
+ }
+
+ // Background noise part.
+ background_noise_->GenerateBackgroundNoise(
+ random_vector, channel_ix, channel_parameters_[channel_ix].mute_slope,
+ TooManyExpands(), current_lag, unvoiced_array_memory);
+
+ // Add background noise to the combined voiced-unvoiced signal.
+ for (size_t i = 0; i < current_lag; i++) {
+ temp_data[i] = temp_data[i] + noise_vector[i];
+ }
+ if (channel_ix == 0) {
+ output->AssertSize(current_lag);
+ } else {
+ RTC_DCHECK_EQ(output->Size(), current_lag);
+ }
+ (*output)[channel_ix].OverwriteAt(temp_data, current_lag, 0);
+ }
+
+ // Increase call number and cap it.
+ consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands
+ ? kMaxConsecutiveExpands
+ : consecutive_expands_ + 1;
+ expand_duration_samples_ += output->Size();
+ // Clamp the duration counter at 2 seconds.
+ expand_duration_samples_ = std::min(expand_duration_samples_,
+ rtc::dchecked_cast<size_t>(fs_hz_ * 2));
+ return 0;
+}
+
+void Expand::SetParametersForNormalAfterExpand() {
+ current_lag_index_ = 0;
+ lag_index_direction_ = 0;
+ stop_muting_ = true; // Do not mute signal any more.
+ statistics_->LogDelayedPacketOutageEvent(expand_duration_samples_, fs_hz_);
+ statistics_->EndExpandEvent(fs_hz_);
+}
+
+void Expand::SetParametersForMergeAfterExpand() {
+ current_lag_index_ = -1; /* out of the 3 possible ones */
+ lag_index_direction_ = 1; /* make sure we get the "optimal" lag */
+ stop_muting_ = true;
+ statistics_->EndExpandEvent(fs_hz_);
+}
+
+bool Expand::Muted() const {
+ if (first_expand_ || stop_muting_)
+ return false;
+ RTC_DCHECK(channel_parameters_);
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ if (channel_parameters_[ch].mute_factor != 0)
+ return false;
+ }
+ return true;
+}
+
+size_t Expand::overlap_length() const {
+ return overlap_length_;
+}
+
+void Expand::InitializeForAnExpandPeriod() {
+ lag_index_direction_ = 1;
+ current_lag_index_ = -1;
+ stop_muting_ = false;
+ random_vector_->set_seed_increment(1);
+ consecutive_expands_ = 0;
+ for (size_t ix = 0; ix < num_channels_; ++ix) {
+ channel_parameters_[ix].current_voice_mix_factor = 16384; // 1.0 in Q14.
+ channel_parameters_[ix].mute_factor = 16384; // 1.0 in Q14.
+ // Start with 0 gain for background noise.
+ background_noise_->SetMuteFactor(ix, 0);
+ }
+}
+
+bool Expand::TooManyExpands() {
+ return consecutive_expands_ >= kMaxConsecutiveExpands;
+}
+
+void Expand::AnalyzeSignal(int16_t* random_vector) {
+ int32_t auto_correlation[kUnvoicedLpcOrder + 1];
+ int16_t reflection_coeff[kUnvoicedLpcOrder];
+ int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
+ size_t best_correlation_index[kNumCorrelationCandidates];
+ int16_t best_correlation[kNumCorrelationCandidates];
+ size_t best_distortion_index[kNumCorrelationCandidates];
+ int16_t best_distortion[kNumCorrelationCandidates];
+ int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
+ int32_t best_distortion_w32[kNumCorrelationCandidates];
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
+ int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
+
+ int fs_mult = fs_hz_ / 8000;
+
+ // Pre-calculate common multiplications with fs_mult.
+ size_t fs_mult_4 = static_cast<size_t>(fs_mult * 4);
+ size_t fs_mult_20 = static_cast<size_t>(fs_mult * 20);
+ size_t fs_mult_120 = static_cast<size_t>(fs_mult * 120);
+ size_t fs_mult_dist_len = fs_mult * kDistortionLength;
+ size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
+
+ const size_t signal_length = static_cast<size_t>(256 * fs_mult);
+
+ const size_t audio_history_position = sync_buffer_->Size() - signal_length;
+ std::unique_ptr<int16_t[]> audio_history(new int16_t[signal_length]);
+ (*sync_buffer_)[0].CopyTo(signal_length, audio_history_position,
+ audio_history.get());
+
+ // Initialize.
+ InitializeForAnExpandPeriod();
+
+ // Calculate correlation in downsampled domain (4 kHz sample rate).
+ size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
+ // If it is decided to break bit-exactness `correlation_length` should be
+ // initialized to the return value of Correlation().
+ Correlation(audio_history.get(), signal_length, correlation_vector);
+
+ // Find peaks in correlation vector.
+ DspHelper::PeakDetection(correlation_vector, correlation_length,
+ kNumCorrelationCandidates, fs_mult,
+ best_correlation_index, best_correlation);
+
+ // Adjust peak locations; cross-correlation lags start at 2.5 ms
+ // (20 * fs_mult samples).
+ best_correlation_index[0] += fs_mult_20;
+ best_correlation_index[1] += fs_mult_20;
+ best_correlation_index[2] += fs_mult_20;
+
+ // Calculate distortion around the `kNumCorrelationCandidates` best lags.
+ int distortion_scale = 0;
+ for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
+ size_t min_index =
+ std::max(fs_mult_20, best_correlation_index[i] - fs_mult_4);
+ size_t max_index =
+ std::min(fs_mult_120 - 1, best_correlation_index[i] + fs_mult_4);
+ best_distortion_index[i] = DspHelper::MinDistortion(
+ &(audio_history[signal_length - fs_mult_dist_len]), min_index,
+ max_index, fs_mult_dist_len, &best_distortion_w32[i]);
+ distortion_scale = std::max(16 - WebRtcSpl_NormW32(best_distortion_w32[i]),
+ distortion_scale);
+ }
+ // Shift the distortion values to fit in 16 bits.
+ WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates,
+ best_distortion_w32, distortion_scale);
+
+ // Find the maximizing index `i` of the cost function
+ // f[i] = best_correlation[i] / best_distortion[i].
+ int32_t best_ratio = std::numeric_limits<int32_t>::min();
+ size_t best_index = std::numeric_limits<size_t>::max();
+ for (size_t i = 0; i < kNumCorrelationCandidates; ++i) {
+ int32_t ratio;
+ if (best_distortion[i] > 0) {
+ ratio = (best_correlation[i] * (1 << 16)) / best_distortion[i];
+ } else if (best_correlation[i] == 0) {
+ ratio = 0; // No correlation set result to zero.
+ } else {
+ ratio = std::numeric_limits<int32_t>::max(); // Denominator is zero.
+ }
+ if (ratio > best_ratio) {
+ best_index = i;
+ best_ratio = ratio;
+ }
+ }
+
+ size_t distortion_lag = best_distortion_index[best_index];
+ size_t correlation_lag = best_correlation_index[best_index];
+ max_lag_ = std::max(distortion_lag, correlation_lag);
+
+ // Calculate the exact best correlation in the range between
+ // `correlation_lag` and `distortion_lag`.
+ correlation_length = std::max(std::min(distortion_lag + 10, fs_mult_120),
+ static_cast<size_t>(60 * fs_mult));
+
+ size_t start_index = std::min(distortion_lag, correlation_lag);
+ size_t correlation_lags = static_cast<size_t>(
+ WEBRTC_SPL_ABS_W16((distortion_lag - correlation_lag)) + 1);
+ RTC_DCHECK_LE(correlation_lags, static_cast<size_t>(99 * fs_mult + 1));
+
+ for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
+ ChannelParameters& parameters = channel_parameters_[channel_ix];
+ if (channel_ix > 0) {
+ // When channel_ix == 0, audio_history contains the correct audio. For the
+ // other cases, we will have to copy the correct channel into
+ // audio_history.
+ (*sync_buffer_)[channel_ix].CopyTo(signal_length, audio_history_position,
+ audio_history.get());
+ }
+
+ // Calculate suitable scaling.
+ int16_t signal_max = WebRtcSpl_MaxAbsValueW16(
+ &audio_history[signal_length - correlation_length - start_index -
+ correlation_lags],
+ correlation_length + start_index + correlation_lags - 1);
+ int correlation_scale =
+ (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
+ (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
+ correlation_scale = std::max(0, correlation_scale);
+
+ // Calculate the correlation, store in `correlation_vector2`.
+ WebRtcSpl_CrossCorrelation(
+ correlation_vector2,
+ &(audio_history[signal_length - correlation_length]),
+ &(audio_history[signal_length - correlation_length - start_index]),
+ correlation_length, correlation_lags, correlation_scale, -1);
+
+ // Find maximizing index.
+ best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags);
+ int32_t max_correlation = correlation_vector2[best_index];
+ // Compensate index with start offset.
+ best_index = best_index + start_index;
+
+ // Calculate energies.
+ int32_t energy1 = WebRtcSpl_DotProductWithScale(
+ &(audio_history[signal_length - correlation_length]),
+ &(audio_history[signal_length - correlation_length]),
+ correlation_length, correlation_scale);
+ int32_t energy2 = WebRtcSpl_DotProductWithScale(
+ &(audio_history[signal_length - correlation_length - best_index]),
+ &(audio_history[signal_length - correlation_length - best_index]),
+ correlation_length, correlation_scale);
+
+ // Calculate the correlation coefficient between the two portions of the
+ // signal.
+ int32_t corr_coefficient;
+ if ((energy1 > 0) && (energy2 > 0)) {
+ int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0);
+ int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
+ // Make sure total scaling is even (to simplify scale factor after sqrt).
+ if ((energy1_scale + energy2_scale) & 1) {
+ // If sum is odd, add 1 to make it even.
+ energy1_scale += 1;
+ }
+ int32_t scaled_energy1 = energy1 >> energy1_scale;
+ int32_t scaled_energy2 = energy2 >> energy2_scale;
+ int16_t sqrt_energy_product = static_cast<int16_t>(
+ WebRtcSpl_SqrtFloor(scaled_energy1 * scaled_energy2));
+ // Calculate max_correlation / sqrt(energy1 * energy2) in Q14.
+ int cc_shift = 14 - (energy1_scale + energy2_scale) / 2;
+ max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift);
+ corr_coefficient =
+ WebRtcSpl_DivW32W16(max_correlation, sqrt_energy_product);
+ // Cap at 1.0 in Q14.
+ corr_coefficient = std::min(16384, corr_coefficient);
+ } else {
+ corr_coefficient = 0;
+ }
+
+ // Extract the two vectors expand_vector0 and expand_vector1 from
+ // `audio_history`.
+ size_t expansion_length = max_lag_ + overlap_length_;
+ const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
+ const int16_t* vector2 = vector1 - distortion_lag;
+ // Normalize the second vector to the same energy as the first.
+ energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length,
+ correlation_scale);
+ energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length,
+ correlation_scale);
+ // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0,
+ // i.e., energy1 / energy2 is within 0.25 - 4.
+ int16_t amplitude_ratio;
+ if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) {
+ // Energy constraint fulfilled. Use both vectors and scale them
+ // accordingly.
+ int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
+ int32_t scaled_energy1 = scaled_energy2 - 13;
+ // Calculate scaled_energy1 / scaled_energy2 in Q13.
+ int32_t energy_ratio =
+ WebRtcSpl_DivW32W16(WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
+ static_cast<int16_t>(energy2 >> scaled_energy2));
+ // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
+ amplitude_ratio =
+ static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
+ // Copy the two vectors and give them the same energy.
+ parameters.expand_vector0.Clear();
+ parameters.expand_vector0.PushBack(vector1, expansion_length);
+ parameters.expand_vector1.Clear();
+ if (parameters.expand_vector1.Size() < expansion_length) {
+ parameters.expand_vector1.Extend(expansion_length -
+ parameters.expand_vector1.Size());
+ }
+ std::unique_ptr<int16_t[]> temp_1(new int16_t[expansion_length]);
+ WebRtcSpl_AffineTransformVector(
+ temp_1.get(), const_cast<int16_t*>(vector2), amplitude_ratio, 4096,
+ 13, expansion_length);
+ parameters.expand_vector1.OverwriteAt(temp_1.get(), expansion_length, 0);
+ } else {
+ // Energy change constraint not fulfilled. Only use last vector.
+ parameters.expand_vector0.Clear();
+ parameters.expand_vector0.PushBack(vector1, expansion_length);
+ // Copy from expand_vector0 to expand_vector1.
+ parameters.expand_vector0.CopyTo(&parameters.expand_vector1);
+ // Set the energy_ratio since it is used by muting slope.
+ if ((energy1 / 4 < energy2) || (energy2 == 0)) {
+ amplitude_ratio = 4096; // 0.5 in Q13.
+ } else {
+ amplitude_ratio = 16384; // 2.0 in Q13.
+ }
+ }
+
+ // Set the 3 lag values.
+ if (distortion_lag == correlation_lag) {
+ expand_lags_[0] = distortion_lag;
+ expand_lags_[1] = distortion_lag;
+ expand_lags_[2] = distortion_lag;
+ } else {
+ // `distortion_lag` and `correlation_lag` are not equal; use different
+ // combinations of the two.
+ // First lag is `distortion_lag` only.
+ expand_lags_[0] = distortion_lag;
+ // Second lag is the average of the two.
+ expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
+ // Third lag is the average again, but rounding towards `correlation_lag`.
+ if (distortion_lag > correlation_lag) {
+ expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
+ } else {
+ expand_lags_[2] = (distortion_lag + correlation_lag + 1) / 2;
+ }
+ }
+
+ // Calculate the LPC and the gain of the filters.
+
+ // Calculate kUnvoicedLpcOrder + 1 lags of the auto-correlation function.
+ size_t temp_index =
+ signal_length - fs_mult_lpc_analysis_len - kUnvoicedLpcOrder;
+ // Copy signal to temporary vector to be able to pad with leading zeros.
+ int16_t* temp_signal =
+ new int16_t[fs_mult_lpc_analysis_len + kUnvoicedLpcOrder];
+ memset(temp_signal, 0,
+ sizeof(int16_t) * (fs_mult_lpc_analysis_len + kUnvoicedLpcOrder));
+ memcpy(&temp_signal[kUnvoicedLpcOrder],
+ &audio_history[temp_index + kUnvoicedLpcOrder],
+ sizeof(int16_t) * fs_mult_lpc_analysis_len);
+ CrossCorrelationWithAutoShift(
+ &temp_signal[kUnvoicedLpcOrder], &temp_signal[kUnvoicedLpcOrder],
+ fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1, -1, auto_correlation);
+ delete[] temp_signal;
+
+ // Verify that variance is positive.
+ if (auto_correlation[0] > 0) {
+ // Estimate AR filter parameters using Levinson-Durbin algorithm;
+ // kUnvoicedLpcOrder + 1 filter coefficients.
+ int16_t stability =
+ WebRtcSpl_LevinsonDurbin(auto_correlation, parameters.ar_filter,
+ reflection_coeff, kUnvoicedLpcOrder);
+
+ // Keep filter parameters only if filter is stable.
+ if (stability != 1) {
+ // Set first coefficient to 4096 (1.0 in Q12).
+ parameters.ar_filter[0] = 4096;
+ // Set remaining `kUnvoicedLpcOrder` coefficients to zero.
+ WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder);
+ }
+ }
+
+ if (channel_ix == 0) {
+ // Extract a noise segment.
+ size_t noise_length;
+ if (distortion_lag < 40) {
+ noise_length = 2 * distortion_lag + 30;
+ } else {
+ noise_length = distortion_lag + 30;
+ }
+ if (noise_length <= RandomVector::kRandomTableSize) {
+ memcpy(random_vector, RandomVector::kRandomTable,
+ sizeof(int16_t) * noise_length);
+ } else {
+ // Only applies to SWB where length could be larger than
+ // `kRandomTableSize`.
+ memcpy(random_vector, RandomVector::kRandomTable,
+ sizeof(int16_t) * RandomVector::kRandomTableSize);
+ RTC_DCHECK_LE(noise_length, kMaxSampleRate / 8000 * 120 + 30);
+ random_vector_->IncreaseSeedIncrement(2);
+ random_vector_->Generate(
+ noise_length - RandomVector::kRandomTableSize,
+ &random_vector[RandomVector::kRandomTableSize]);
+ }
+ }
+
+ // Set up state vector and calculate scale factor for unvoiced filtering.
+ memcpy(parameters.ar_filter_state,
+ &(audio_history[signal_length - kUnvoicedLpcOrder]),
+ sizeof(int16_t) * kUnvoicedLpcOrder);
+ memcpy(unvoiced_vector - kUnvoicedLpcOrder,
+ &(audio_history[signal_length - 128 - kUnvoicedLpcOrder]),
+ sizeof(int16_t) * kUnvoicedLpcOrder);
+ WebRtcSpl_FilterMAFastQ12(&audio_history[signal_length - 128],
+ unvoiced_vector, parameters.ar_filter,
+ kUnvoicedLpcOrder + 1, 128);
+ const int unvoiced_max_abs = [&] {
+ const int16_t max_abs = WebRtcSpl_MaxAbsValueW16(unvoiced_vector, 128);
+ // Since WebRtcSpl_MaxAbsValueW16 returns 2^15 - 1 when the input contains
+ // -2^15, we have to conservatively bump the return value by 1
+ // if it is 2^15 - 1.
+ return max_abs == WEBRTC_SPL_WORD16_MAX ? max_abs + 1 : max_abs;
+ }();
+ // Pick the smallest n such that 2^n > unvoiced_max_abs; then the maximum
+ // value of the dot product is less than 2^7 * 2^(2*n) = 2^(2*n + 7), so to
+ // prevent overflows we want 2n + 7 <= 31, which means we should shift by
+ // 2n + 7 - 31 bits, if this value is greater than zero.
+ int unvoiced_prescale =
+ std::max(0, 2 * WebRtcSpl_GetSizeInBits(unvoiced_max_abs) - 24);
+
+ int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(
+ unvoiced_vector, unvoiced_vector, 128, unvoiced_prescale);
+
+ // Normalize `unvoiced_energy` to 28 or 29 bits to preserve sqrt() accuracy.
+ int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
+ // Make sure we do an odd number of shifts since we already have 7 shifts
+ // from dividing with 128 earlier. This will make the total scale factor
+ // even, which is suitable for the sqrt.
+ unvoiced_scale += ((unvoiced_scale & 0x1) ^ 0x1);
+ unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
+ int16_t unvoiced_gain =
+ static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
+ parameters.ar_gain_scale =
+ 13 + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
+ parameters.ar_gain = unvoiced_gain;
+
+ // Calculate voice_mix_factor from corr_coefficient.
+ // Let x = corr_coefficient. Then, we compute:
+ // if (x > 0.48)
+ // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096;
+ // else
+ // voice_mix_factor = 0;
+ if (corr_coefficient > 7875) {
+ int16_t x1, x2, x3;
+ // `corr_coefficient` is in Q14.
+ x1 = static_cast<int16_t>(corr_coefficient);
+ x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14.
+ x3 = (x1 * x2) >> 14;
+ static const int kCoefficients[4] = {-5179, 19931, -16422, 5776};
+ int32_t temp_sum = kCoefficients[0] * 16384;
+ temp_sum += kCoefficients[1] * x1;
+ temp_sum += kCoefficients[2] * x2;
+ temp_sum += kCoefficients[3] * x3;
+ parameters.voice_mix_factor =
+ static_cast<int16_t>(std::min(temp_sum / 4096, 16384));
+ parameters.voice_mix_factor =
+ std::max(parameters.voice_mix_factor, static_cast<int16_t>(0));
+ } else {
+ parameters.voice_mix_factor = 0;
+ }
+
+ // Calculate muting slope. Reuse value from earlier scaling of
+ // `expand_vector0` and `expand_vector1`.
+ int16_t slope = amplitude_ratio;
+ if (slope > 12288) {
+ // slope > 1.5.
+ // Calculate (1 - (1 / slope)) / distortion_lag =
+ // (slope - 1) / (distortion_lag * slope).
+ // `slope` is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
+ // the division.
+ // Shift the denominator from Q13 to Q5 before the division. The result of
+ // the division will then be in Q20.
+ int16_t denom =
+ rtc::saturated_cast<int16_t>((distortion_lag * slope) >> 8);
+ int temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12, denom);
+ if (slope > 14746) {
+ // slope > 1.8.
+ // Divide by 2, with proper rounding.
+ parameters.mute_slope = (temp_ratio + 1) / 2;
+ } else {
+ // Divide by 8, with proper rounding.
+ parameters.mute_slope = (temp_ratio + 4) / 8;
+ }
+ parameters.onset = true;
+ } else {
+ // Calculate (1 - slope) / distortion_lag.
+ // Shift `slope` by 7 to Q20 before the division. The result is in Q20.
+ parameters.mute_slope = WebRtcSpl_DivW32W16(
+ (8192 - slope) * 128, static_cast<int16_t>(distortion_lag));
+ if (parameters.voice_mix_factor <= 13107) {
+ // Make sure the mute factor decreases from 1.0 to 0.9 in no more than
+ // 6.25 ms.
+ // mute_slope >= 0.005 / fs_mult in Q20.
+ parameters.mute_slope = std::max(5243 / fs_mult, parameters.mute_slope);
+ } else if (slope > 8028) {
+ parameters.mute_slope = 0;
+ }
+ parameters.onset = false;
+ }
+ }
+}
+
+Expand::ChannelParameters::ChannelParameters()
+ : mute_factor(16384),
+ ar_gain(0),
+ ar_gain_scale(0),
+ voice_mix_factor(0),
+ current_voice_mix_factor(0),
+ onset(false),
+ mute_slope(0) {
+ memset(ar_filter, 0, sizeof(ar_filter));
+ memset(ar_filter_state, 0, sizeof(ar_filter_state));
+}
+
+void Expand::Correlation(const int16_t* input,
+ size_t input_length,
+ int16_t* output) const {
+ // Set parameters depending on sample rate.
+ const int16_t* filter_coefficients;
+ size_t num_coefficients;
+ int16_t downsampling_factor;
+ if (fs_hz_ == 8000) {
+ num_coefficients = 3;
+ downsampling_factor = 2;
+ filter_coefficients = DspHelper::kDownsample8kHzTbl;
+ } else if (fs_hz_ == 16000) {
+ num_coefficients = 5;
+ downsampling_factor = 4;
+ filter_coefficients = DspHelper::kDownsample16kHzTbl;
+ } else if (fs_hz_ == 32000) {
+ num_coefficients = 7;
+ downsampling_factor = 8;
+ filter_coefficients = DspHelper::kDownsample32kHzTbl;
+ } else { // fs_hz_ == 48000.
+ num_coefficients = 7;
+ downsampling_factor = 12;
+ filter_coefficients = DspHelper::kDownsample48kHzTbl;
+ }
+
+ // Correlate from lag 10 to lag 60 in downsampled domain.
+ // (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
+ static const size_t kCorrelationStartLag = 10;
+ static const size_t kNumCorrelationLags = 54;
+ static const size_t kCorrelationLength = 60;
+ // Downsample to 4 kHz sample rate.
+ static const size_t kDownsampledLength =
+ kCorrelationStartLag + kNumCorrelationLags + kCorrelationLength;
+ int16_t downsampled_input[kDownsampledLength];
+ static const size_t kFilterDelay = 0;
+ WebRtcSpl_DownsampleFast(
+ input + input_length - kDownsampledLength * downsampling_factor,
+ kDownsampledLength * downsampling_factor, downsampled_input,
+ kDownsampledLength, filter_coefficients, num_coefficients,
+ downsampling_factor, kFilterDelay);
+
+ // Normalize `downsampled_input` to using all 16 bits.
+ int16_t max_value =
+ WebRtcSpl_MaxAbsValueW16(downsampled_input, kDownsampledLength);
+ int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);
+ WebRtcSpl_VectorBitShiftW16(downsampled_input, kDownsampledLength,
+ downsampled_input, norm_shift);
+
+ int32_t correlation[kNumCorrelationLags];
+ CrossCorrelationWithAutoShift(
+ &downsampled_input[kDownsampledLength - kCorrelationLength],
+ &downsampled_input[kDownsampledLength - kCorrelationLength -
+ kCorrelationStartLag],
+ kCorrelationLength, kNumCorrelationLags, -1, correlation);
+
+ // Normalize and move data from 32-bit to 16-bit vector.
+ int32_t max_correlation =
+ WebRtcSpl_MaxAbsValueW32(correlation, kNumCorrelationLags);
+ int16_t norm_shift2 = static_cast<int16_t>(
+ std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
+ WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
+ norm_shift2);
+}
+
+void Expand::UpdateLagIndex() {
+ current_lag_index_ = current_lag_index_ + lag_index_direction_;
+ // Change direction if needed.
+ if (current_lag_index_ <= 0) {
+ lag_index_direction_ = 1;
+ }
+ if (current_lag_index_ >= kNumLags - 1) {
+ lag_index_direction_ = -1;
+ }
+}
+
+Expand* ExpandFactory::Create(BackgroundNoise* background_noise,
+ SyncBuffer* sync_buffer,
+ RandomVector* random_vector,
+ StatisticsCalculator* statistics,
+ int fs,
+ size_t num_channels) const {
+ return new Expand(background_noise, sync_buffer, random_vector, statistics,
+ fs, num_channels);
+}
+
+void Expand::GenerateRandomVector(int16_t seed_increment,
+ size_t length,
+ int16_t* random_vector) {
+ // TODO(turajs): According to hlundin The loop should not be needed. Should be
+ // just as good to generate all of the vector in one call.
+ size_t samples_generated = 0;
+ const size_t kMaxRandSamples = RandomVector::kRandomTableSize;
+ while (samples_generated < length) {
+ size_t rand_length = std::min(length - samples_generated, kMaxRandSamples);
+ random_vector_->IncreaseSeedIncrement(seed_increment);
+ random_vector_->Generate(rand_length, &random_vector[samples_generated]);
+ samples_generated += rand_length;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/expand.h b/third_party/libwebrtc/modules/audio_coding/neteq/expand.h
new file mode 100644
index 0000000000..2e64583ec2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/expand.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_EXPAND_H_
+#define MODULES_AUDIO_CODING_NETEQ_EXPAND_H_
+
+
+#include <memory>
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class AudioMultiVector;
+class BackgroundNoise;
+class RandomVector;
+class StatisticsCalculator;
+class SyncBuffer;
+
+// This class handles extrapolation of audio data from the sync_buffer to
+// produce packet-loss concealment.
+// TODO(hlundin): Refactor this class to divide the long methods into shorter
+// ones.
+class Expand {
+ public:
+ Expand(BackgroundNoise* background_noise,
+ SyncBuffer* sync_buffer,
+ RandomVector* random_vector,
+ StatisticsCalculator* statistics,
+ int fs,
+ size_t num_channels);
+
+ virtual ~Expand();
+
+ Expand(const Expand&) = delete;
+ Expand& operator=(const Expand&) = delete;
+
+ // Resets the object.
+ virtual void Reset();
+
+ // The main method to produce concealment data. The data is appended to the
+ // end of `output`.
+ virtual int Process(AudioMultiVector* output);
+
+ // Prepare the object to do extra expansion during normal operation following
+ // a period of expands.
+ virtual void SetParametersForNormalAfterExpand();
+
+ // Prepare the object to do extra expansion during merge operation following
+ // a period of expands.
+ virtual void SetParametersForMergeAfterExpand();
+
+ // Returns the mute factor for `channel`.
+ int16_t MuteFactor(size_t channel) const {
+ RTC_DCHECK_LT(channel, num_channels_);
+ return channel_parameters_[channel].mute_factor;
+ }
+
+ // Returns true if expansion has been faded down to zero amplitude (for all
+ // channels); false otherwise.
+ bool Muted() const;
+
+ // Accessors and mutators.
+ virtual size_t overlap_length() const;
+ size_t max_lag() const { return max_lag_; }
+
+ protected:
+ static const int kMaxConsecutiveExpands = 200;
+ void GenerateRandomVector(int16_t seed_increment,
+ size_t length,
+ int16_t* random_vector);
+
+ // Initializes member variables at the beginning of an expand period.
+ void InitializeForAnExpandPeriod();
+
+ bool TooManyExpands();
+
+ // Analyzes the signal history in `sync_buffer_`, and set up all parameters
+ // necessary to produce concealment data.
+ void AnalyzeSignal(int16_t* random_vector);
+
+ RandomVector* const random_vector_;
+ SyncBuffer* const sync_buffer_;
+ bool first_expand_;
+ const int fs_hz_;
+ const size_t num_channels_;
+ int consecutive_expands_;
+
+ private:
+ static const size_t kUnvoicedLpcOrder = 6;
+ static const size_t kNumCorrelationCandidates = 3;
+ static const size_t kDistortionLength = 20;
+ static const size_t kLpcAnalysisLength = 160;
+ static const size_t kMaxSampleRate = 48000;
+ static const int kNumLags = 3;
+
+ struct ChannelParameters {
+ ChannelParameters();
+ int16_t mute_factor;
+ int16_t ar_filter[kUnvoicedLpcOrder + 1];
+ int16_t ar_filter_state[kUnvoicedLpcOrder];
+ int16_t ar_gain;
+ int16_t ar_gain_scale;
+ int16_t voice_mix_factor; /* Q14 */
+ int16_t current_voice_mix_factor; /* Q14 */
+ AudioVector expand_vector0;
+ AudioVector expand_vector1;
+ bool onset;
+ int mute_slope; /* Q20 */
+ };
+
+ // Calculate the auto-correlation of `input`, with length `input_length`
+ // samples. The correlation is calculated from a downsampled version of
+ // `input`, and is written to `output`.
+ void Correlation(const int16_t* input,
+ size_t input_length,
+ int16_t* output) const;
+
+ void UpdateLagIndex();
+
+ BackgroundNoise* const background_noise_;
+ StatisticsCalculator* const statistics_;
+ const size_t overlap_length_;
+ size_t max_lag_;
+ size_t expand_lags_[kNumLags];
+ int lag_index_direction_;
+ int current_lag_index_;
+ bool stop_muting_;
+ size_t expand_duration_samples_;
+ std::unique_ptr<ChannelParameters[]> channel_parameters_;
+};
+
+struct ExpandFactory {
+ ExpandFactory() {}
+ virtual ~ExpandFactory() {}
+
+ virtual Expand* Create(BackgroundNoise* background_noise,
+ SyncBuffer* sync_buffer,
+ RandomVector* random_vector,
+ StatisticsCalculator* statistics,
+ int fs,
+ size_t num_channels) const;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_EXPAND_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.cc b/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.cc
new file mode 100644
index 0000000000..a91358b489
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.cc
@@ -0,0 +1,71 @@
+/* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/expand_uma_logger.h"
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+std::unique_ptr<TickTimer::Countdown> GetNewCountdown(
+ const TickTimer& tick_timer,
+ int logging_period_s) {
+ return tick_timer.GetNewCountdown((logging_period_s * 1000) /
+ tick_timer.ms_per_tick());
+}
+} // namespace
+
+ExpandUmaLogger::ExpandUmaLogger(absl::string_view uma_name,
+ int logging_period_s,
+ const TickTimer* tick_timer)
+ : uma_name_(uma_name),
+ logging_period_s_(logging_period_s),
+ tick_timer_(*tick_timer),
+ timer_(GetNewCountdown(tick_timer_, logging_period_s_)) {
+ RTC_DCHECK(tick_timer);
+ RTC_DCHECK_GT(logging_period_s_, 0);
+}
+
+ExpandUmaLogger::~ExpandUmaLogger() = default;
+
+void ExpandUmaLogger::UpdateSampleCounter(uint64_t samples,
+ int sample_rate_hz) {
+ if ((last_logged_value_ && *last_logged_value_ > samples) ||
+ sample_rate_hz_ != sample_rate_hz) {
+ // Sanity checks. The incremental counter moved backwards, or sample rate
+ // changed.
+ last_logged_value_.reset();
+ }
+ last_value_ = samples;
+ sample_rate_hz_ = sample_rate_hz;
+ if (!last_logged_value_) {
+ last_logged_value_ = absl::optional<uint64_t>(samples);
+ }
+
+ if (!timer_->Finished()) {
+ // Not yet time to log.
+ return;
+ }
+
+ RTC_DCHECK(last_logged_value_);
+ RTC_DCHECK_GE(last_value_, *last_logged_value_);
+ const uint64_t diff = last_value_ - *last_logged_value_;
+ last_logged_value_ = absl::optional<uint64_t>(last_value_);
+ // Calculate rate in percent.
+ RTC_DCHECK_GT(sample_rate_hz, 0);
+ const int rate = (100 * diff) / (sample_rate_hz * logging_period_s_);
+ RTC_DCHECK_GE(rate, 0);
+ RTC_DCHECK_LE(rate, 100);
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE(uma_name_, rate);
+ timer_ = GetNewCountdown(tick_timer_, logging_period_s_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.h b/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.h
new file mode 100644
index 0000000000..cc5c20a886
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_EXPAND_UMA_LOGGER_H_
+#define MODULES_AUDIO_CODING_NETEQ_EXPAND_UMA_LOGGER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/neteq/tick_timer.h"
+
+namespace webrtc {
+
+// This class is used to periodically log values to a UMA histogram. The caller
+// is expected to update this class with an incremental sample counter which
+// counts expand samples. At the end of each logging period, the class will
+// calculate the fraction of samples that were expand samples during that period
+// and report that in percent. The logging period must be strictly positive.
+// Does not take ownership of tick_timer and the pointer must refer to a valid
+// object that outlives the one constructed.
+class ExpandUmaLogger {
+ public:
+ ExpandUmaLogger(absl::string_view uma_name,
+ int logging_period_s,
+ const TickTimer* tick_timer);
+
+ ~ExpandUmaLogger();
+
+ ExpandUmaLogger(const ExpandUmaLogger&) = delete;
+ ExpandUmaLogger& operator=(const ExpandUmaLogger&) = delete;
+
+ // In this call, value should be an incremental sample counter. The sample
+ // rate must be strictly positive.
+ void UpdateSampleCounter(uint64_t value, int sample_rate_hz);
+
+ private:
+ const std::string uma_name_;
+ const int logging_period_s_;
+ const TickTimer& tick_timer_;
+ std::unique_ptr<TickTimer::Countdown> timer_;
+ absl::optional<uint64_t> last_logged_value_;
+ uint64_t last_value_ = 0;
+ int sample_rate_hz_ = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_EXPAND_UMA_LOGGER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/expand_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/expand_unittest.cc
new file mode 100644
index 0000000000..9355fce5e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/expand_unittest.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Expand class.
+
+#include "modules/audio_coding/neteq/expand.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(Expand, CreateAndDestroy) {
+ int fs = 8000;
+ size_t channels = 1;
+ BackgroundNoise bgn(channels);
+ SyncBuffer sync_buffer(1, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
+}
+
+TEST(Expand, CreateUsingFactory) {
+ int fs = 8000;
+ size_t channels = 1;
+ BackgroundNoise bgn(channels);
+ SyncBuffer sync_buffer(1, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ ExpandFactory expand_factory;
+ Expand* expand = expand_factory.Create(&bgn, &sync_buffer, &random_vector,
+ &statistics, fs, channels);
+ EXPECT_TRUE(expand != NULL);
+ delete expand;
+}
+
+namespace {
+class FakeStatisticsCalculator : public StatisticsCalculator {
+ public:
+ void LogDelayedPacketOutageEvent(int num_samples, int fs_hz) override {
+ last_outage_duration_samples_ = num_samples;
+ }
+
+ int last_outage_duration_samples() const {
+ return last_outage_duration_samples_;
+ }
+
+ private:
+ int last_outage_duration_samples_ = 0;
+};
+
+// This is the same size that is given to the SyncBuffer object in NetEq.
+const size_t kNetEqSyncBufferLengthMs = 720;
+} // namespace
+
+class ExpandTest : public ::testing::Test {
+ protected:
+ ExpandTest()
+ : input_file_(test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000),
+ test_sample_rate_hz_(32000),
+ num_channels_(1),
+ background_noise_(num_channels_),
+ sync_buffer_(num_channels_,
+ kNetEqSyncBufferLengthMs * test_sample_rate_hz_ / 1000),
+ expand_(&background_noise_,
+ &sync_buffer_,
+ &random_vector_,
+ &statistics_,
+ test_sample_rate_hz_,
+ num_channels_) {
+ input_file_.set_output_rate_hz(test_sample_rate_hz_);
+ }
+
+ void SetUp() override {
+ // Fast-forward the input file until there is speech (about 1.1 second into
+ // the file).
+ const int speech_start_samples =
+ static_cast<int>(test_sample_rate_hz_ * 1.1f);
+ ASSERT_TRUE(input_file_.Seek(speech_start_samples));
+
+ // Pre-load the sync buffer with speech data.
+ std::unique_ptr<int16_t[]> temp(new int16_t[sync_buffer_.Size()]);
+ ASSERT_TRUE(input_file_.Read(sync_buffer_.Size(), temp.get()));
+ sync_buffer_.Channel(0).OverwriteAt(temp.get(), sync_buffer_.Size(), 0);
+ ASSERT_EQ(1u, num_channels_) << "Fix: Must populate all channels.";
+ }
+
+ test::ResampleInputAudioFile input_file_;
+ int test_sample_rate_hz_;
+ size_t num_channels_;
+ BackgroundNoise background_noise_;
+ SyncBuffer sync_buffer_;
+ RandomVector random_vector_;
+ FakeStatisticsCalculator statistics_;
+ Expand expand_;
+};
+
+// This test calls the expand object to produce concealment data a few times,
+// and then ends by calling SetParametersForNormalAfterExpand. This simulates
+// the situation where the packet next up for decoding was just delayed, not
+// lost.
+TEST_F(ExpandTest, DelayedPacketOutage) {
+ AudioMultiVector output(num_channels_);
+ size_t sum_output_len_samples = 0;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ sum_output_len_samples += output.Size();
+ EXPECT_EQ(0, statistics_.last_outage_duration_samples());
+ }
+ expand_.SetParametersForNormalAfterExpand();
+ // Convert `sum_output_len_samples` to milliseconds.
+ EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
+ statistics_.last_outage_duration_samples());
+}
+
+// This test is similar to DelayedPacketOutage, but ends by calling
+// SetParametersForMergeAfterExpand. This simulates the situation where the
+// packet next up for decoding was actually lost (or at least a later packet
+// arrived before it).
+TEST_F(ExpandTest, LostPacketOutage) {
+ AudioMultiVector output(num_channels_);
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ EXPECT_EQ(0, statistics_.last_outage_duration_samples());
+ }
+ expand_.SetParametersForMergeAfterExpand();
+ EXPECT_EQ(0, statistics_.last_outage_duration_samples());
+}
+
+// This test is similar to the DelayedPacketOutage test above, but with the
+// difference that Expand::Reset() is called after 5 calls to Expand::Process().
+// This should reset the statistics, and will in the end lead to an outage of
+// 5 periods instead of 10.
+TEST_F(ExpandTest, CheckOutageStatsAfterReset) {
+ AudioMultiVector output(num_channels_);
+ size_t sum_output_len_samples = 0;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ sum_output_len_samples += output.Size();
+ if (i == 5) {
+ expand_.Reset();
+ sum_output_len_samples = 0;
+ }
+ EXPECT_EQ(0, statistics_.last_outage_duration_samples());
+ }
+ expand_.SetParametersForNormalAfterExpand();
+ // Convert `sum_output_len_samples` to milliseconds.
+ EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
+ statistics_.last_outage_duration_samples());
+}
+
+namespace {
+// Runs expand until Muted() returns true. Times out after 1000 calls.
+void ExpandUntilMuted(size_t num_channels, Expand* expand) {
+ EXPECT_FALSE(expand->Muted()) << "Instance is muted from the start";
+ AudioMultiVector output(num_channels);
+ int num_calls = 0;
+ while (!expand->Muted()) {
+ ASSERT_LT(num_calls++, 1000) << "Test timed out";
+ EXPECT_EQ(0, expand->Process(&output));
+ }
+}
+} // namespace
+
+// Verifies that Muted() returns true after a long expand period. Also verifies
+// that Muted() is reset to false after calling Reset(),
+// SetParametersForMergeAfterExpand() and SetParametersForNormalAfterExpand().
+TEST_F(ExpandTest, Muted) {
+ ExpandUntilMuted(num_channels_, &expand_);
+ expand_.Reset();
+ EXPECT_FALSE(expand_.Muted()); // Should be back to unmuted.
+
+ ExpandUntilMuted(num_channels_, &expand_);
+ expand_.SetParametersForMergeAfterExpand();
+ EXPECT_FALSE(expand_.Muted()); // Should be back to unmuted.
+
+ expand_.Reset(); // Must reset in order to start a new expand period.
+ ExpandUntilMuted(num_channels_, &expand_);
+ expand_.SetParametersForNormalAfterExpand();
+ EXPECT_FALSE(expand_.Muted()); // Should be back to unmuted.
+}
+
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/g3doc/index.md b/third_party/libwebrtc/modules/audio_coding/neteq/g3doc/index.md
new file mode 100644
index 0000000000..e97324d89d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/g3doc/index.md
@@ -0,0 +1,102 @@
+<?% config.freshness.reviewed = '2021-04-13' %?>
+<?% config.freshness.owner = 'jakobi' %?>
+
+# NetEq
+
+NetEq is the audio jitter buffer and packet loss concealer. The jitter buffer is
+an adaptive jitter buffer, meaning that the buffering delay is continuously
+optimized based on the network conditions. Its main goal is to ensure a smooth
+playout of incoming audio packets from the network with a low amount of audio
+artifacts (alterations to the original content of the packets) while at the same
+time keep the delay as low as possible.
+
+## API
+
+At a high level, the NetEq API has two main functions:
+[`InsertPacket`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=198;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72)
+and
+[`GetAudio`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=219;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72).
+
+### InsertPacket
+
+[`InsertPacket`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=198;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72)
+delivers an RTP packet from the network to NetEq where the following happens:
+
+1. The packet is discarded if it is too late for playout (for example if it was
+ reordered). Otherwize it is put into the packet buffer where it is stored
+ until it is time for playout. If the buffer is full, discard all the
+ existing packets (this should be rare).
+2. The interarrival time between packets is analyzed and statistics is updated
+ which is used to derive a new target playout delay. The interarrival time is
+ measured in the number of GetAudio ‘ticks’ and thus clock drift between the
+ sender and receiver can be accounted for.
+
+### GetAudio
+
+[`GetAudio`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=219;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72)
+pulls 10 ms of audio from NetEq for playout. A much simplified decision logic is
+as follows:
+
+1. If there is 10 ms audio in the sync buffer then return that.
+2. If the next packet is available (based on RTP timestamp) in the packet
+ buffer then decode it and append the result to the sync buffer.
+ 1. Compare the current delay estimate (filtered buffer level) with the
+ target delay and time stretch (accelerate or decelerate) the contents of
+ the sync buffer if the buffer level is too high or too low.
+ 2. Return 10 ms of audio from the sync buffer.
+3. If the last decoded packet was a discontinuous transmission (DTX) packet
+ then generate comfort noise.
+4. If there is no available packet for decoding due to the next packet having
+ not arrived or been lost then generate packet loss concealment by
+ extrapolating the remaining audio in the sync buffer or by asking the
+ decoder to produce it.
+
+In summary, the output is the result one of the following operations:
+
+* Normal: audio decoded from a packet.
+* Acceleration: accelerated playout of a decoded packet.
+* Preemptive expand: decelerated playout of a decoded packet.
+* Expand: packet loss concealment generated by NetEq or the decoder.
+* Merge: audio stitched together from packet loss concealment to decoded data
+ in case of a loss.
+* Comfort noise (CNG): comfort noise generated by NetEq or the decoder between
+ talk spurts due to discontinuous transmission of packets (DTX).
+
+## Statistics
+
+There are a number of functions that can be used to query the internal state of
+NetEq, statistics about the type of audio output and latency metrics such as how
+long time packets have waited in the buffer.
+
+* [`NetworkStatistics`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=273;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72):
+ instantaneous values or stats averaged over the duration since last call to
+ this function.
+* [`GetLifetimeStatistics`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=280;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72):
+ cumulative stats that persist over the lifetime of the class.
+* [`GetOperationsAndState`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=284;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72):
+ information about the internal state of NetEq (is only inteded to be used
+ for testing and debugging).
+
+## Tests and tools
+
+* [`neteq_rtpplay`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc;drc=cee751abff598fc19506f77de08bea7c61b9dcca):
+ Simulate NetEq behavior based on either an RTP dump, a PCAP file or an RTC
+ event log. A replacement audio file can also be used instead of the original
+ payload. Outputs aggregated statistics and optionally an audio file to
+ listen to.
+* [`neteq_speed_test`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc;drc=2ab97f6f8e27b47c0d9beeb8b6ca5387bda9f55c):
+ Measure performance of NetEq, used on perf bots.
+* Unit tests including bit exactness tests where RTP file is used as an input
+ to NetEq, the output is concatenated and a checksum is calculated and
+ compared against a reference.
+
+## Other responsibilities
+
+* Dual-tone multi-frequency signaling (DTMF): receive telephone events and
+ produce dual tone waveforms.
+* Forward error correction (RED or codec inband FEC): split inserted packets
+ and prioritize the payloads.
+* NACK (negative acknowledgement): keep track of lost packets and generate a
+ list of packets to NACK.
+* Audio/video sync: NetEq can be instructed to increase the latency in order
+ to keep audio and video in sync.
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/histogram.cc b/third_party/libwebrtc/modules/audio_coding/neteq/histogram.cc
new file mode 100644
index 0000000000..e4b7f10379
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/histogram.cc
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/histogram.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <numeric>
+
+#include "absl/types/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+Histogram::Histogram(size_t num_buckets,
+ int forget_factor,
+ absl::optional<double> start_forget_weight)
+ : buckets_(num_buckets, 0),
+ forget_factor_(0),
+ base_forget_factor_(forget_factor),
+ add_count_(0),
+ start_forget_weight_(start_forget_weight) {
+ RTC_DCHECK_LT(base_forget_factor_, 1 << 15);
+}
+
+Histogram::~Histogram() {}
+
+// Each element in the vector is first multiplied by the forgetting factor
+// `forget_factor_`. Then the vector element indicated by `iat_packets` is then
+// increased (additive) by 1 - `forget_factor_`. This way, the probability of
+// `value` is slightly increased, while the sum of the histogram remains
+// constant (=1).
+// Due to inaccuracies in the fixed-point arithmetic, the histogram may no
+// longer sum up to 1 (in Q30) after the update. To correct this, a correction
+// term is added or subtracted from the first element (or elements) of the
+// vector.
+// The forgetting factor `forget_factor_` is also updated. When the DelayManager
+// is reset, the factor is set to 0 to facilitate rapid convergence in the
+// beginning. With each update of the histogram, the factor is increased towards
+// the steady-state value `base_forget_factor_`.
+void Histogram::Add(int value) {
+ RTC_DCHECK(value >= 0);
+ RTC_DCHECK(value < static_cast<int>(buckets_.size()));
+ int vector_sum = 0; // Sum up the vector elements as they are processed.
+ // Multiply each element in `buckets_` with `forget_factor_`.
+ for (int& bucket : buckets_) {
+ bucket = (static_cast<int64_t>(bucket) * forget_factor_) >> 15;
+ vector_sum += bucket;
+ }
+
+ // Increase the probability for the currently observed inter-arrival time
+ // by 1 - `forget_factor_`. The factor is in Q15, `buckets_` in Q30.
+ // Thus, left-shift 15 steps to obtain result in Q30.
+ buckets_[value] += (32768 - forget_factor_) << 15;
+ vector_sum += (32768 - forget_factor_) << 15; // Add to vector sum.
+
+ // `buckets_` should sum up to 1 (in Q30), but it may not due to
+ // fixed-point rounding errors.
+ vector_sum -= 1 << 30; // Should be zero. Compensate if not.
+ if (vector_sum != 0) {
+ // Modify a few values early in `buckets_`.
+ int flip_sign = vector_sum > 0 ? -1 : 1;
+ for (int& bucket : buckets_) {
+ // Add/subtract 1/16 of the element, but not more than `vector_sum`.
+ int correction = flip_sign * std::min(std::abs(vector_sum), bucket >> 4);
+ bucket += correction;
+ vector_sum += correction;
+ if (std::abs(vector_sum) == 0) {
+ break;
+ }
+ }
+ }
+ RTC_DCHECK(vector_sum == 0); // Verify that the above is correct.
+
+ ++add_count_;
+
+ // Update `forget_factor_` (changes only during the first seconds after a
+ // reset). The factor converges to `base_forget_factor_`.
+ if (start_forget_weight_) {
+ if (forget_factor_ != base_forget_factor_) {
+ int old_forget_factor = forget_factor_;
+ int forget_factor =
+ (1 << 15) * (1 - start_forget_weight_.value() / (add_count_ + 1));
+ forget_factor_ =
+ std::max(0, std::min(base_forget_factor_, forget_factor));
+ // The histogram is updated recursively by forgetting the old histogram
+ // with `forget_factor_` and adding a new sample multiplied by |1 -
+ // forget_factor_|. We need to make sure that the effective weight on the
+ // new sample is no smaller than those on the old samples, i.e., to
+ // satisfy the following DCHECK.
+ RTC_DCHECK_GE((1 << 15) - forget_factor_,
+ ((1 << 15) - old_forget_factor) * forget_factor_ >> 15);
+ }
+ } else {
+ forget_factor_ += (base_forget_factor_ - forget_factor_ + 3) >> 2;
+ }
+}
+
+int Histogram::Quantile(int probability) {
+ // Find the bucket for which the probability of observing an
+ // inter-arrival time larger than or equal to `index` is larger than or
+ // equal to `probability`. The sought probability is estimated using
+ // the histogram as the reverse cumulant PDF, i.e., the sum of elements from
+ // the end up until `index`. Now, since the sum of all elements is 1
+ // (in Q30) by definition, and since the solution is often a low value for
+ // `iat_index`, it is more efficient to start with `sum` = 1 and subtract
+ // elements from the start of the histogram.
+ int inverse_probability = (1 << 30) - probability;
+ size_t index = 0; // Start from the beginning of `buckets_`.
+ int sum = 1 << 30; // Assign to 1 in Q30.
+ sum -= buckets_[index];
+
+ while ((sum > inverse_probability) && (index < buckets_.size() - 1)) {
+ // Subtract the probabilities one by one until the sum is no longer greater
+ // than `inverse_probability`.
+ ++index;
+ sum -= buckets_[index];
+ }
+ return static_cast<int>(index);
+}
+
+// Set the histogram vector to an exponentially decaying distribution
+// buckets_[i] = 0.5^(i+1), i = 0, 1, 2, ...
+// buckets_ is in Q30.
+void Histogram::Reset() {
+ // Set temp_prob to (slightly more than) 1 in Q14. This ensures that the sum
+ // of buckets_ is 1.
+ uint16_t temp_prob = 0x4002; // 16384 + 2 = 100000000000010 binary.
+ for (int& bucket : buckets_) {
+ temp_prob >>= 1;
+ bucket = temp_prob << 16;
+ }
+ forget_factor_ = 0; // Adapt the histogram faster for the first few packets.
+ add_count_ = 0;
+}
+
+int Histogram::NumBuckets() const {
+ return buckets_.size();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/histogram.h b/third_party/libwebrtc/modules/audio_coding/neteq/histogram.h
new file mode 100644
index 0000000000..265a10e00a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/histogram.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_HISTOGRAM_H_
+#define MODULES_AUDIO_CODING_NETEQ_HISTOGRAM_H_
+
+#include <string.h> // Provide access to size_t.
+
+#include <vector>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+class Histogram {
+ public:
+ // Creates histogram with capacity `num_buckets` and `forget_factor` in Q15.
+ Histogram(size_t num_buckets,
+ int forget_factor,
+ absl::optional<double> start_forget_weight = absl::nullopt);
+
+ virtual ~Histogram();
+
+ // Resets the histogram to the default start distribution.
+ virtual void Reset();
+
+ // Add entry in bucket `index`.
+ virtual void Add(int index);
+
+ // Calculates the quantile at `probability` (in Q30) of the histogram
+ // distribution.
+ virtual int Quantile(int probability);
+
+ // Returns the number of buckets in the histogram.
+ virtual int NumBuckets() const;
+
+ // Returns the probability for each bucket in Q30.
+ const std::vector<int>& buckets() const { return buckets_; }
+
+ // Accessors only intended for testing purposes.
+ int base_forget_factor_for_testing() const { return base_forget_factor_; }
+ int forget_factor_for_testing() const { return forget_factor_; }
+ absl::optional<double> start_forget_weight_for_testing() const {
+ return start_forget_weight_;
+ }
+
+ private:
+ std::vector<int> buckets_;
+ int forget_factor_; // Q15
+ const int base_forget_factor_;
+ int add_count_;
+ const absl::optional<double> start_forget_weight_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_HISTOGRAM_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/histogram_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/histogram_unittest.cc
new file mode 100644
index 0000000000..e30a2956dc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/histogram_unittest.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/histogram.h"
+
+#include <cmath>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(HistogramTest, Initialization) {
+ Histogram histogram(65, 32440);
+ histogram.Reset();
+ const auto& buckets = histogram.buckets();
+ double sum = 0.0;
+ for (size_t i = 0; i < buckets.size(); i++) {
+ EXPECT_NEAR(ldexp(std::pow(0.5, static_cast<int>(i + 1)), 30), buckets[i],
+ 65537);
+ // Tolerance 65537 in Q30 corresponds to a delta of approximately 0.00006.
+ sum += buckets[i];
+ }
+ EXPECT_EQ(1 << 30, static_cast<int>(sum)); // Should be 1 in Q30.
+}
+
+TEST(HistogramTest, Add) {
+ Histogram histogram(10, 32440);
+ histogram.Reset();
+ const std::vector<int> before = histogram.buckets();
+ const int index = 5;
+ histogram.Add(index);
+ const std::vector<int> after = histogram.buckets();
+ EXPECT_GT(after[index], before[index]);
+ int sum = 0;
+ for (int bucket : after) {
+ sum += bucket;
+ }
+ EXPECT_EQ(1 << 30, sum);
+}
+
+TEST(HistogramTest, ForgetFactor) {
+ Histogram histogram(10, 32440);
+ histogram.Reset();
+ const std::vector<int> before = histogram.buckets();
+ const int index = 4;
+ histogram.Add(index);
+ const std::vector<int> after = histogram.buckets();
+ for (int i = 0; i < histogram.NumBuckets(); ++i) {
+ if (i != index) {
+ EXPECT_LT(after[i], before[i]);
+ }
+ }
+}
+
+TEST(HistogramTest, ReachSteadyStateForgetFactor) {
+ static constexpr int kSteadyStateForgetFactor = (1 << 15) * 0.9993;
+ Histogram histogram(100, kSteadyStateForgetFactor, 1.0);
+ histogram.Reset();
+ int n = (1 << 15) / ((1 << 15) - kSteadyStateForgetFactor);
+ for (int i = 0; i < n; ++i) {
+ histogram.Add(0);
+ }
+ EXPECT_EQ(histogram.forget_factor_for_testing(), kSteadyStateForgetFactor);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/merge.cc b/third_party/libwebrtc/modules/audio_coding/neteq/merge.cc
new file mode 100644
index 0000000000..0aec6d2597
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/merge.cc
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/merge.h"
+
+#include <string.h> // memmove, memcpy, memset, size_t
+
+#include <algorithm> // min, max
+#include <memory>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+Merge::Merge(int fs_hz,
+ size_t num_channels,
+ Expand* expand,
+ SyncBuffer* sync_buffer)
+ : fs_hz_(fs_hz),
+ num_channels_(num_channels),
+ fs_mult_(fs_hz_ / 8000),
+ timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
+ expand_(expand),
+ sync_buffer_(sync_buffer),
+ expanded_(num_channels_) {
+ RTC_DCHECK_GT(num_channels_, 0);
+}
+
+Merge::~Merge() = default;
+
+size_t Merge::Process(int16_t* input,
+ size_t input_length,
+ AudioMultiVector* output) {
+ // TODO(hlundin): Change to an enumerator and skip assert.
+ RTC_DCHECK(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
+ fs_hz_ == 48000);
+ RTC_DCHECK_LE(fs_hz_, kMaxSampleRate); // Should not be possible.
+ if (input_length == 0) {
+ return 0;
+ }
+
+ size_t old_length;
+ size_t expand_period;
+ // Get expansion data to overlap and mix with.
+ size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
+
+ // Transfer input signal to an AudioMultiVector.
+ AudioMultiVector input_vector(num_channels_);
+ input_vector.PushBackInterleaved(
+ rtc::ArrayView<const int16_t>(input, input_length));
+ size_t input_length_per_channel = input_vector.Size();
+ RTC_DCHECK_EQ(input_length_per_channel, input_length / num_channels_);
+
+ size_t best_correlation_index = 0;
+ size_t output_length = 0;
+
+ std::unique_ptr<int16_t[]> input_channel(
+ new int16_t[input_length_per_channel]);
+ std::unique_ptr<int16_t[]> expanded_channel(new int16_t[expanded_length]);
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ input_vector[channel].CopyTo(input_length_per_channel, 0,
+ input_channel.get());
+ expanded_[channel].CopyTo(expanded_length, 0, expanded_channel.get());
+
+ const int16_t new_mute_factor = std::min<int16_t>(
+ 16384, SignalScaling(input_channel.get(), input_length_per_channel,
+ expanded_channel.get()));
+
+ if (channel == 0) {
+ // Downsample, correlate, and find strongest correlation period for the
+ // reference (i.e., first) channel only.
+ // Downsample to 4kHz sample rate.
+ Downsample(input_channel.get(), input_length_per_channel,
+ expanded_channel.get(), expanded_length);
+
+ // Calculate the lag of the strongest correlation period.
+ best_correlation_index = CorrelateAndPeakSearch(
+ old_length, input_length_per_channel, expand_period);
+ }
+
+ temp_data_.resize(input_length_per_channel + best_correlation_index);
+ int16_t* decoded_output = temp_data_.data() + best_correlation_index;
+
+ // Mute the new decoded data if needed (and unmute it linearly).
+ // This is the overlapping part of expanded_signal.
+ size_t interpolation_length =
+ std::min(kMaxCorrelationLength * fs_mult_,
+ expanded_length - best_correlation_index);
+ interpolation_length =
+ std::min(interpolation_length, input_length_per_channel);
+
+ RTC_DCHECK_LE(new_mute_factor, 16384);
+ int16_t mute_factor =
+ std::max(expand_->MuteFactor(channel), new_mute_factor);
+ RTC_DCHECK_GE(mute_factor, 0);
+
+ if (mute_factor < 16384) {
+ // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
+ // and so on, or as fast as it takes to come back to full gain within the
+ // frame length.
+ const int back_to_fullscale_inc = static_cast<int>(
+ ((16384 - mute_factor) << 6) / input_length_per_channel);
+ const int increment = std::max(4194 / fs_mult_, back_to_fullscale_inc);
+ mute_factor = static_cast<int16_t>(DspHelper::RampSignal(
+ input_channel.get(), interpolation_length, mute_factor, increment));
+ DspHelper::UnmuteSignal(&input_channel[interpolation_length],
+ input_length_per_channel - interpolation_length,
+ &mute_factor, increment,
+ &decoded_output[interpolation_length]);
+ } else {
+ // No muting needed.
+ memmove(
+ &decoded_output[interpolation_length],
+ &input_channel[interpolation_length],
+ sizeof(int16_t) * (input_length_per_channel - interpolation_length));
+ }
+
+ // Do overlap and mix linearly.
+ int16_t increment =
+ static_cast<int16_t>(16384 / (interpolation_length + 1)); // In Q14.
+ int16_t local_mute_factor = 16384 - increment;
+ memmove(temp_data_.data(), expanded_channel.get(),
+ sizeof(int16_t) * best_correlation_index);
+ DspHelper::CrossFade(&expanded_channel[best_correlation_index],
+ input_channel.get(), interpolation_length,
+ &local_mute_factor, increment, decoded_output);
+
+ output_length = best_correlation_index + input_length_per_channel;
+ if (channel == 0) {
+ RTC_DCHECK(output->Empty()); // Output should be empty at this point.
+ output->AssertSize(output_length);
+ } else {
+ RTC_DCHECK_EQ(output->Size(), output_length);
+ }
+ (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0);
+ }
+
+ // Copy back the first part of the data to `sync_buffer_` and remove it from
+ // `output`.
+ sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
+ output->PopFront(old_length);
+
+ // Return new added length. `old_length` samples were borrowed from
+ // `sync_buffer_`.
+ RTC_DCHECK_GE(output_length, old_length);
+ return output_length - old_length;
+}
+
+size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
+ // Check how much data that is left since earlier.
+ *old_length = sync_buffer_->FutureLength();
+ // Should never be less than overlap_length.
+ RTC_DCHECK_GE(*old_length, expand_->overlap_length());
+ // Generate data to merge the overlap with using expand.
+ expand_->SetParametersForMergeAfterExpand();
+
+ if (*old_length >= 210 * kMaxSampleRate / 8000) {
+ // TODO(hlundin): Write test case for this.
+ // The number of samples available in the sync buffer is more than what fits
+ // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples,
+ // but shift them towards the end of the buffer. This is ok, since all of
+ // the buffer will be expand data anyway, so as long as the beginning is
+ // left untouched, we're fine.
+ size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
+ sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
+ *old_length = 210 * kMaxSampleRate / 8000;
+ // This is the truncated length.
+ }
+ // This assert should always be true thanks to the if statement above.
+ RTC_DCHECK_GE(210 * kMaxSampleRate / 8000, *old_length);
+
+ AudioMultiVector expanded_temp(num_channels_);
+ expand_->Process(&expanded_temp);
+ *expand_period = expanded_temp.Size(); // Samples per channel.
+
+ expanded_.Clear();
+ // Copy what is left since earlier into the expanded vector.
+ expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
+ RTC_DCHECK_EQ(expanded_.Size(), *old_length);
+ RTC_DCHECK_GT(expanded_temp.Size(), 0);
+ // Do "ugly" copy and paste from the expanded in order to generate more data
+ // to correlate (but not interpolate) with.
+ const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
+ if (expanded_.Size() < required_length) {
+ while (expanded_.Size() < required_length) {
+ // Append one more pitch period each time.
+ expanded_.PushBack(expanded_temp);
+ }
+ // Trim the length to exactly `required_length`.
+ expanded_.PopBack(expanded_.Size() - required_length);
+ }
+ RTC_DCHECK_GE(expanded_.Size(), required_length);
+ return required_length;
+}
+
+int16_t Merge::SignalScaling(const int16_t* input,
+ size_t input_length,
+ const int16_t* expanded_signal) const {
+ // Adjust muting factor if new vector is more or less of the BGN energy.
+ const auto mod_input_length = rtc::SafeMin<size_t>(
+ 64 * rtc::dchecked_cast<size_t>(fs_mult_), input_length);
+
+ // Missing input, do no muting
+ if (mod_input_length == 0) {
+ return 16384;
+ }
+
+ const int16_t expanded_max =
+ WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
+ int32_t factor =
+ (expanded_max * expanded_max) / (std::numeric_limits<int32_t>::max() /
+ static_cast<int32_t>(mod_input_length));
+ const int expanded_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+ int32_t energy_expanded = WebRtcSpl_DotProductWithScale(
+ expanded_signal, expanded_signal, mod_input_length, expanded_shift);
+
+ // Calculate energy of input signal.
+ const int16_t input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
+ factor = (input_max * input_max) / (std::numeric_limits<int32_t>::max() /
+ static_cast<int32_t>(mod_input_length));
+ const int input_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+ int32_t energy_input = WebRtcSpl_DotProductWithScale(
+ input, input, mod_input_length, input_shift);
+
+ // Align to the same Q-domain.
+ if (input_shift > expanded_shift) {
+ energy_expanded = energy_expanded >> (input_shift - expanded_shift);
+ } else {
+ energy_input = energy_input >> (expanded_shift - input_shift);
+ }
+
+ // Calculate muting factor to use for new frame.
+ int16_t mute_factor;
+ if (energy_input > energy_expanded) {
+ // Normalize `energy_input` to 14 bits.
+ int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
+ energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
+ // Put `energy_expanded` in a domain 14 higher, so that
+ // energy_expanded / energy_input is in Q14.
+ energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
+ // Calculate sqrt(energy_expanded / energy_input) in Q14.
+ mute_factor = static_cast<int16_t>(
+ WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
+ } else {
+ // Set to 1 (in Q14) when `expanded` has higher energy than `input`.
+ mute_factor = 16384;
+ }
+
+ return mute_factor;
+}
+
+// TODO(hlundin): There are some parameter values in this method that seem
+// strange. Compare with Expand::Correlation.
+void Merge::Downsample(const int16_t* input,
+ size_t input_length,
+ const int16_t* expanded_signal,
+ size_t expanded_length) {
+ const int16_t* filter_coefficients;
+ size_t num_coefficients;
+ int decimation_factor = fs_hz_ / 4000;
+ static const size_t kCompensateDelay = 0;
+ size_t length_limit = static_cast<size_t>(fs_hz_ / 100); // 10 ms in samples.
+ if (fs_hz_ == 8000) {
+ filter_coefficients = DspHelper::kDownsample8kHzTbl;
+ num_coefficients = 3;
+ } else if (fs_hz_ == 16000) {
+ filter_coefficients = DspHelper::kDownsample16kHzTbl;
+ num_coefficients = 5;
+ } else if (fs_hz_ == 32000) {
+ filter_coefficients = DspHelper::kDownsample32kHzTbl;
+ num_coefficients = 7;
+ } else { // fs_hz_ == 48000
+ filter_coefficients = DspHelper::kDownsample48kHzTbl;
+ num_coefficients = 7;
+ }
+ size_t signal_offset = num_coefficients - 1;
+ WebRtcSpl_DownsampleFast(
+ &expanded_signal[signal_offset], expanded_length - signal_offset,
+ expanded_downsampled_, kExpandDownsampLength, filter_coefficients,
+ num_coefficients, decimation_factor, kCompensateDelay);
+ if (input_length <= length_limit) {
+ // Not quite long enough, so we have to cheat a bit.
+ // If the input is shorter than the offset, we consider the input to be 0
+ // length. This will cause us to skip the downsampling since it makes no
+ // sense anyway, and input_downsampled_ will be filled with zeros. This is
+ // clearly a pathological case, and the signal quality will suffer, but
+ // there is not much we can do.
+ const size_t temp_len =
+ input_length > signal_offset ? input_length - signal_offset : 0;
+ // TODO(hlundin): Should `downsamp_temp_len` be corrected for round-off
+ // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
+ size_t downsamp_temp_len = temp_len / decimation_factor;
+ if (downsamp_temp_len > 0) {
+ WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
+ input_downsampled_, downsamp_temp_len,
+ filter_coefficients, num_coefficients,
+ decimation_factor, kCompensateDelay);
+ }
+ memset(&input_downsampled_[downsamp_temp_len], 0,
+ sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
+ } else {
+ WebRtcSpl_DownsampleFast(
+ &input[signal_offset], input_length - signal_offset, input_downsampled_,
+ kInputDownsampLength, filter_coefficients, num_coefficients,
+ decimation_factor, kCompensateDelay);
+ }
+}
+
+size_t Merge::CorrelateAndPeakSearch(size_t start_position,
+ size_t input_length,
+ size_t expand_period) const {
+ // Calculate correlation without any normalization.
+ const size_t max_corr_length = kMaxCorrelationLength;
+ size_t stop_position_downsamp =
+ std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
+
+ int32_t correlation[kMaxCorrelationLength];
+ CrossCorrelationWithAutoShift(input_downsampled_, expanded_downsampled_,
+ kInputDownsampLength, stop_position_downsamp, 1,
+ correlation);
+
+ // Normalize correlation to 14 bits and copy to a 16-bit array.
+ const size_t pad_length = expand_->overlap_length() - 1;
+ const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
+ std::unique_ptr<int16_t[]> correlation16(
+ new int16_t[correlation_buffer_size]);
+ memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
+ int16_t* correlation_ptr = &correlation16[pad_length];
+ int32_t max_correlation =
+ WebRtcSpl_MaxAbsValueW32(correlation, stop_position_downsamp);
+ int norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
+ WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
+ correlation, norm_shift);
+
+ // Calculate allowed starting point for peak finding.
+ // The peak location bestIndex must fulfill two criteria:
+ // (1) w16_bestIndex + input_length <
+ // timestamps_per_call_ + expand_->overlap_length();
+ // (2) w16_bestIndex + input_length < start_position.
+ size_t start_index = timestamps_per_call_ + expand_->overlap_length();
+ start_index = std::max(start_position, start_index);
+ start_index = (input_length > start_index) ? 0 : (start_index - input_length);
+ // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
+ size_t start_index_downsamp = start_index / (fs_mult_ * 2);
+
+ // Calculate a modified `stop_position_downsamp` to account for the increased
+ // start index `start_index_downsamp` and the effective array length.
+ size_t modified_stop_pos =
+ std::min(stop_position_downsamp,
+ kMaxCorrelationLength + pad_length - start_index_downsamp);
+ size_t best_correlation_index;
+ int16_t best_correlation;
+ static const size_t kNumCorrelationCandidates = 1;
+ DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
+ modified_stop_pos, kNumCorrelationCandidates,
+ fs_mult_, &best_correlation_index,
+ &best_correlation);
+ // Compensate for modified start index.
+ best_correlation_index += start_index;
+
+ // Ensure that underrun does not occur for 10ms case => we have to get at
+ // least 10ms + overlap . (This should never happen thanks to the above
+ // modification of peak-finding starting point.)
+ while (((best_correlation_index + input_length) <
+ (timestamps_per_call_ + expand_->overlap_length())) ||
+ ((best_correlation_index + input_length) < start_position)) {
+ RTC_DCHECK_NOTREACHED(); // Should never happen.
+ best_correlation_index += expand_period; // Jump one lag ahead.
+ }
+ return best_correlation_index;
+}
+
+size_t Merge::RequiredFutureSamples() {
+ return fs_hz_ / 100 * num_channels_; // 10 ms.
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/merge.h b/third_party/libwebrtc/modules/audio_coding/neteq/merge.h
new file mode 100644
index 0000000000..2f27106bfe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/merge.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MERGE_H_
+#define MODULES_AUDIO_CODING_NETEQ_MERGE_H_
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class Expand;
+class SyncBuffer;
+
+// This class handles the transition from expansion to normal operation.
+// When a packet is not available for decoding when needed, the expand operation
+// is called to generate extrapolation data. If the missing packet arrives,
+// i.e., it was just delayed, it can be decoded and appended directly to the
+// end of the expanded data (thanks to how the Expand class operates). However,
+// if a later packet arrives instead, the loss is a fact, and the new data must
+// be stitched together with the end of the expanded data. This stitching is
+// what the Merge class does.
+class Merge {
+ public:
+ Merge(int fs_hz,
+ size_t num_channels,
+ Expand* expand,
+ SyncBuffer* sync_buffer);
+ virtual ~Merge();
+
+ Merge(const Merge&) = delete;
+ Merge& operator=(const Merge&) = delete;
+
+ // The main method to produce the audio data. The decoded data is supplied in
+ // `input`, having `input_length` samples in total for all channels
+ // (interleaved). The result is written to `output`. The number of channels
+ // allocated in `output` defines the number of channels that will be used when
+ // de-interleaving `input`.
+ virtual size_t Process(int16_t* input,
+ size_t input_length,
+ AudioMultiVector* output);
+
+ virtual size_t RequiredFutureSamples();
+
+ protected:
+ const int fs_hz_;
+ const size_t num_channels_;
+
+ private:
+ static const int kMaxSampleRate = 48000;
+ static const size_t kExpandDownsampLength = 100;
+ static const size_t kInputDownsampLength = 40;
+ static const size_t kMaxCorrelationLength = 60;
+
+ // Calls `expand_` to get more expansion data to merge with. The data is
+ // written to `expanded_signal_`. Returns the length of the expanded data,
+ // while `expand_period` will be the number of samples in one expansion period
+ // (typically one pitch period). The value of `old_length` will be the number
+ // of samples that were taken from the `sync_buffer_`.
+ size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
+
+ // Analyzes `input` and `expanded_signal` and returns muting factor (Q14) to
+ // be used on the new data.
+ int16_t SignalScaling(const int16_t* input,
+ size_t input_length,
+ const int16_t* expanded_signal) const;
+
+ // Downsamples `input` (`input_length` samples) and `expanded_signal` to
+ // 4 kHz sample rate. The downsampled signals are written to
+ // `input_downsampled_` and `expanded_downsampled_`, respectively.
+ void Downsample(const int16_t* input,
+ size_t input_length,
+ const int16_t* expanded_signal,
+ size_t expanded_length);
+
+ // Calculates cross-correlation between `input_downsampled_` and
+ // `expanded_downsampled_`, and finds the correlation maximum. The maximizing
+ // lag is returned.
+ size_t CorrelateAndPeakSearch(size_t start_position,
+ size_t input_length,
+ size_t expand_period) const;
+
+ const int fs_mult_; // fs_hz_ / 8000.
+ const size_t timestamps_per_call_;
+ Expand* expand_;
+ SyncBuffer* sync_buffer_;
+ int16_t expanded_downsampled_[kExpandDownsampLength];
+ int16_t input_downsampled_[kInputDownsampLength];
+ AudioMultiVector expanded_;
+ std::vector<int16_t> temp_data_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MERGE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/merge_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/merge_unittest.cc
new file mode 100644
index 0000000000..d5a55eb056
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/merge_unittest.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Merge class.
+
+#include "modules/audio_coding/neteq/merge.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(Merge, CreateAndDestroy) {
+ int fs = 8000;
+ size_t channels = 1;
+ BackgroundNoise bgn(channels);
+ SyncBuffer sync_buffer(1, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
+ Merge merge(fs, channels, &expand, &sync_buffer);
+}
+
+namespace {
+// This is the same size that is given to the SyncBuffer object in NetEq.
+const size_t kNetEqSyncBufferLengthMs = 720;
+} // namespace
+
+class MergeTest : public testing::TestWithParam<size_t> {
+ protected:
+ MergeTest()
+ : input_file_(test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000),
+ test_sample_rate_hz_(8000),
+ num_channels_(1),
+ background_noise_(num_channels_),
+ sync_buffer_(num_channels_,
+ kNetEqSyncBufferLengthMs * test_sample_rate_hz_ / 1000),
+ expand_(&background_noise_,
+ &sync_buffer_,
+ &random_vector_,
+ &statistics_,
+ test_sample_rate_hz_,
+ num_channels_),
+ merge_(test_sample_rate_hz_, num_channels_, &expand_, &sync_buffer_) {
+ input_file_.set_output_rate_hz(test_sample_rate_hz_);
+ }
+
+ void SetUp() override {
+ // Fast-forward the input file until there is speech (about 1.1 second into
+ // the file).
+ const int speech_start_samples =
+ static_cast<int>(test_sample_rate_hz_ * 1.1f);
+ ASSERT_TRUE(input_file_.Seek(speech_start_samples));
+
+ // Pre-load the sync buffer with speech data.
+ std::unique_ptr<int16_t[]> temp(new int16_t[sync_buffer_.Size()]);
+ ASSERT_TRUE(input_file_.Read(sync_buffer_.Size(), temp.get()));
+ sync_buffer_.Channel(0).OverwriteAt(temp.get(), sync_buffer_.Size(), 0);
+ // Move index such that the sync buffer appears to have 5 ms left to play.
+ sync_buffer_.set_next_index(sync_buffer_.next_index() -
+ test_sample_rate_hz_ * 5 / 1000);
+ ASSERT_EQ(1u, num_channels_) << "Fix: Must populate all channels.";
+ ASSERT_GT(sync_buffer_.FutureLength(), 0u);
+ }
+
+ test::ResampleInputAudioFile input_file_;
+ int test_sample_rate_hz_;
+ size_t num_channels_;
+ BackgroundNoise background_noise_;
+ SyncBuffer sync_buffer_;
+ RandomVector random_vector_;
+ StatisticsCalculator statistics_;
+ Expand expand_;
+ Merge merge_;
+};
+
+TEST_P(MergeTest, Process) {
+ AudioMultiVector output(num_channels_);
+ // Start by calling Expand once, to prime the state.
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ output.Clear();
+ // Now call Merge, but with a very short decoded input. Try different length
+ // if the input.
+ const size_t input_len = GetParam();
+ std::vector<int16_t> input(input_len, 17);
+ merge_.Process(input.data(), input_len, &output);
+ EXPECT_GT(output.Size(), 0u);
+}
+
+// Instantiate with values for the input length that are interesting in
+// Merge::Downsample. Why are these values interesting?
+// - In 8000 Hz sample rate, signal_offset in Merge::Downsample will be 2, so
+// the values 1, 2, 3 are just around that value.
+// - Also in 8000 Hz, the variable length_limit in the same method will be 80,
+// so values 80 and 81 will be on either side of the branch point
+// "input_length <= length_limit".
+// - Finally, 160 is simply 20 ms in 8000 Hz, which is a common packet size.
+INSTANTIATE_TEST_SUITE_P(DifferentInputLengths,
+ MergeTest,
+ testing::Values(1, 2, 3, 80, 81, 160));
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
new file mode 100644
index 0000000000..503f6ac6bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockBufferLevelFilter : public BufferLevelFilter {
+ public:
+ MOCK_METHOD(void,
+ Update,
+ (size_t buffer_size_samples, int time_stretched_samples));
+ MOCK_METHOD(int, filtered_current_level, (), (const));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
new file mode 100644
index 0000000000..2394120e99
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
+
+#include <string>
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDecoderDatabase : public DecoderDatabase {
+ public:
+ explicit MockDecoderDatabase(
+ rtc::scoped_refptr<AudioDecoderFactory> factory = nullptr)
+ : DecoderDatabase(factory, absl::nullopt) {}
+ ~MockDecoderDatabase() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(bool, Empty, (), (const, override));
+ MOCK_METHOD(int, Size, (), (const, override));
+ MOCK_METHOD(int,
+ RegisterPayload,
+ (int rtp_payload_type, const SdpAudioFormat& audio_format),
+ (override));
+ MOCK_METHOD(int, Remove, (uint8_t rtp_payload_type), (override));
+ MOCK_METHOD(void, RemoveAll, (), (override));
+ MOCK_METHOD(const DecoderInfo*,
+ GetDecoderInfo,
+ (uint8_t rtp_payload_type),
+ (const, override));
+ MOCK_METHOD(int,
+ SetActiveDecoder,
+ (uint8_t rtp_payload_type, bool* new_decoder),
+ (override));
+ MOCK_METHOD(AudioDecoder*, GetActiveDecoder, (), (const, override));
+ MOCK_METHOD(int, SetActiveCngDecoder, (uint8_t rtp_payload_type), (override));
+ MOCK_METHOD(ComfortNoiseDecoder*, GetActiveCngDecoder, (), (const, override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
new file mode 100644
index 0000000000..d783f8743b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_
+
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDelayManager : public DelayManager {
+ public:
+ MockDelayManager(const MockDelayManager::Config& config,
+ const TickTimer* tick_timer)
+ : DelayManager(config, tick_timer) {}
+ MOCK_METHOD(int, TargetDelayMs, (), (const));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
new file mode 100644
index 0000000000..c60c56d36b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_BUFFER_H_
+
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDtmfBuffer : public DtmfBuffer {
+ public:
+ MockDtmfBuffer(int fs) : DtmfBuffer(fs) {}
+ ~MockDtmfBuffer() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(void, Flush, (), (override));
+ MOCK_METHOD(int, InsertEvent, (const DtmfEvent& event), (override));
+ MOCK_METHOD(bool,
+ GetEvent,
+ (uint32_t current_timestamp, DtmfEvent* event),
+ (override));
+ MOCK_METHOD(size_t, Length, (), (const, override));
+ MOCK_METHOD(bool, Empty, (), (const, override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
new file mode 100644
index 0000000000..60de167c29
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_TONE_GENERATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_TONE_GENERATOR_H_
+
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDtmfToneGenerator : public DtmfToneGenerator {
+ public:
+ ~MockDtmfToneGenerator() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(int, Init, (int fs, int event, int attenuation), (override));
+ MOCK_METHOD(void, Reset, (), (override));
+ MOCK_METHOD(int,
+ Generate,
+ (size_t num_samples, AudioMultiVector* output),
+ (override));
+ MOCK_METHOD(bool, initialized, (), (const, override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_TONE_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_expand.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_expand.h
new file mode 100644
index 0000000000..9d66779021
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_expand.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXPAND_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXPAND_H_
+
+#include "modules/audio_coding/neteq/expand.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockExpand : public Expand {
+ public:
+ MockExpand(BackgroundNoise* background_noise,
+ SyncBuffer* sync_buffer,
+ RandomVector* random_vector,
+ StatisticsCalculator* statistics,
+ int fs,
+ size_t num_channels)
+ : Expand(background_noise,
+ sync_buffer,
+ random_vector,
+ statistics,
+ fs,
+ num_channels) {}
+ ~MockExpand() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(void, Reset, (), (override));
+ MOCK_METHOD(int, Process, (AudioMultiVector * output), (override));
+ MOCK_METHOD(void, SetParametersForNormalAfterExpand, (), (override));
+ MOCK_METHOD(void, SetParametersForMergeAfterExpand, (), (override));
+ MOCK_METHOD(size_t, overlap_length, (), (const, override));
+};
+
+} // namespace webrtc
+
+namespace webrtc {
+
+class MockExpandFactory : public ExpandFactory {
+ public:
+ MOCK_METHOD(Expand*,
+ Create,
+ (BackgroundNoise * background_noise,
+ SyncBuffer* sync_buffer,
+ RandomVector* random_vector,
+ StatisticsCalculator* statistics,
+ int fs,
+ size_t num_channels),
+ (const, override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXPAND_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_histogram.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_histogram.h
new file mode 100644
index 0000000000..03abbc1d4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_histogram.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_HISTOGRAM_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_HISTOGRAM_H_
+
+#include "modules/audio_coding/neteq/histogram.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockHistogram : public Histogram {
+ public:
+ MockHistogram(size_t num_buckets, int forget_factor)
+ : Histogram(num_buckets, forget_factor) {}
+ virtual ~MockHistogram() {}
+
+ MOCK_METHOD(void, Add, (int), (override));
+ MOCK_METHOD(int, Quantile, (int), (override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_HISTOGRAM_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_neteq_controller.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_neteq_controller.h
new file mode 100644
index 0000000000..6d88e09216
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_neteq_controller.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_NETEQ_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_NETEQ_CONTROLLER_H_
+
+#include "api/neteq/neteq_controller.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockNetEqController : public NetEqController {
+ public:
+ MockNetEqController() = default;
+ ~MockNetEqController() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(void, Reset, (), (override));
+ MOCK_METHOD(void, SoftReset, (), (override));
+ MOCK_METHOD(NetEq::Operation,
+ GetDecision,
+ (const NetEqStatus& neteq_status, bool* reset_decoder),
+ (override));
+ MOCK_METHOD(void, RegisterEmptyPacket, (), (override));
+ MOCK_METHOD(void,
+ SetSampleRate,
+ (int fs_hz, size_t output_size_samples),
+ (override));
+ MOCK_METHOD(bool, SetMaximumDelay, (int delay_ms), (override));
+ MOCK_METHOD(bool, SetMinimumDelay, (int delay_ms), (override));
+ MOCK_METHOD(bool, SetBaseMinimumDelay, (int delay_ms), (override));
+ MOCK_METHOD(int, GetBaseMinimumDelay, (), (const, override));
+ MOCK_METHOD(bool, CngRfc3389On, (), (const, override));
+ MOCK_METHOD(bool, CngOff, (), (const, override));
+ MOCK_METHOD(void, SetCngOff, (), (override));
+ MOCK_METHOD(void, ExpandDecision, (NetEq::Operation operation), (override));
+ MOCK_METHOD(void, AddSampleMemory, (int32_t value), (override));
+ MOCK_METHOD(int, TargetLevelMs, (), (const, override));
+ MOCK_METHOD(absl::optional<int>,
+ PacketArrived,
+ (int fs_hz,
+ bool should_update_stats,
+ const PacketArrivedInfo& info),
+ (override));
+ MOCK_METHOD(void, NotifyMutedState, (), (override));
+ MOCK_METHOD(bool, PeakFound, (), (const, override));
+ MOCK_METHOD(int, GetFilteredBufferLevel, (), (const, override));
+ MOCK_METHOD(void, set_sample_memory, (int32_t value), (override));
+ MOCK_METHOD(size_t, noise_fast_forward, (), (const, override));
+ MOCK_METHOD(size_t, packet_length_samples, (), (const, override));
+ MOCK_METHOD(void, set_packet_length_samples, (size_t value), (override));
+ MOCK_METHOD(void, set_prev_time_scale, (bool value), (override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_NETEQ_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
new file mode 100644
index 0000000000..48357ea466
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_BUFFER_H_
+
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockPacketBuffer : public PacketBuffer {
+ public:
+ MockPacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer)
+ : PacketBuffer(max_number_of_packets, tick_timer) {}
+ ~MockPacketBuffer() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+ MOCK_METHOD(void, Flush, (StatisticsCalculator * stats), (override));
+ MOCK_METHOD(void,
+ PartialFlush,
+ (int target_level_ms,
+ size_t sample_rate,
+ size_t last_decoded_length,
+ StatisticsCalculator* stats),
+ (override));
+ MOCK_METHOD(bool, Empty, (), (const, override));
+ MOCK_METHOD(int,
+ InsertPacket,
+ (Packet && packet,
+ StatisticsCalculator* stats,
+ size_t last_decoded_length,
+ size_t sample_rate,
+ int target_level_ms,
+ const DecoderDatabase& decoder_database),
+ (override));
+ MOCK_METHOD(int,
+ InsertPacketList,
+ (PacketList * packet_list,
+ const DecoderDatabase& decoder_database,
+ absl::optional<uint8_t>* current_rtp_payload_type,
+ absl::optional<uint8_t>* current_cng_rtp_payload_type,
+ StatisticsCalculator* stats,
+ size_t last_decoded_length,
+ size_t sample_rate,
+ int target_level_ms),
+ (override));
+ MOCK_METHOD(int,
+ NextTimestamp,
+ (uint32_t * next_timestamp),
+ (const, override));
+ MOCK_METHOD(int,
+ NextHigherTimestamp,
+ (uint32_t timestamp, uint32_t* next_timestamp),
+ (const, override));
+ MOCK_METHOD(const Packet*, PeekNextPacket, (), (const, override));
+ MOCK_METHOD(absl::optional<Packet>, GetNextPacket, (), (override));
+ MOCK_METHOD(int,
+ DiscardNextPacket,
+ (StatisticsCalculator * stats),
+ (override));
+ MOCK_METHOD(void,
+ DiscardOldPackets,
+ (uint32_t timestamp_limit,
+ uint32_t horizon_samples,
+ StatisticsCalculator* stats),
+ (override));
+ MOCK_METHOD(void,
+ DiscardAllOldPackets,
+ (uint32_t timestamp_limit, StatisticsCalculator* stats),
+ (override));
+ MOCK_METHOD(size_t, NumPacketsInBuffer, (), (const, override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h
new file mode 100644
index 0000000000..9daf571a80
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_RED_PAYLOAD_SPLITTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_RED_PAYLOAD_SPLITTER_H_
+
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRedPayloadSplitter : public RedPayloadSplitter {
+ public:
+ MOCK_METHOD(bool, SplitRed, (PacketList * packet_list), (override));
+ MOCK_METHOD(void,
+ CheckRedPayloads,
+ (PacketList * packet_list,
+ const DecoderDatabase& decoder_database),
+ (override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_RED_PAYLOAD_SPLITTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_statistics_calculator.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_statistics_calculator.h
new file mode 100644
index 0000000000..f8812478d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_statistics_calculator.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_STATISTICS_CALCULATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_STATISTICS_CALCULATOR_H_
+
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockStatisticsCalculator : public StatisticsCalculator {
+ public:
+ MOCK_METHOD(void, PacketsDiscarded, (size_t num_packets), (override));
+ MOCK_METHOD(void,
+ SecondaryPacketsDiscarded,
+ (size_t num_packets),
+ (override));
+ MOCK_METHOD(void, RelativePacketArrivalDelay, (size_t delay_ms), (override));
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_STATISTICS_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.cc b/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.cc
new file mode 100644
index 0000000000..04cc5b52e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/nack_tracker.h"
+
+#include <cstdint>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+const int kDefaultSampleRateKhz = 48;
+const int kMaxPacketSizeMs = 120;
+constexpr char kNackTrackerConfigFieldTrial[] =
+ "WebRTC-Audio-NetEqNackTrackerConfig";
+
+} // namespace
+
+NackTracker::Config::Config() {
+ auto parser = StructParametersParser::Create(
+ "packet_loss_forget_factor", &packet_loss_forget_factor,
+ "ms_per_loss_percent", &ms_per_loss_percent, "never_nack_multiple_times",
+ &never_nack_multiple_times, "require_valid_rtt", &require_valid_rtt,
+ "max_loss_rate", &max_loss_rate);
+ parser->Parse(
+ webrtc::field_trial::FindFullName(kNackTrackerConfigFieldTrial));
+ RTC_LOG(LS_INFO) << "Nack tracker config:"
+ " packet_loss_forget_factor="
+ << packet_loss_forget_factor
+ << " ms_per_loss_percent=" << ms_per_loss_percent
+ << " never_nack_multiple_times=" << never_nack_multiple_times
+ << " require_valid_rtt=" << require_valid_rtt
+ << " max_loss_rate=" << max_loss_rate;
+}
+
+NackTracker::NackTracker()
+ : sequence_num_last_received_rtp_(0),
+ timestamp_last_received_rtp_(0),
+ any_rtp_received_(false),
+ sequence_num_last_decoded_rtp_(0),
+ timestamp_last_decoded_rtp_(0),
+ any_rtp_decoded_(false),
+ sample_rate_khz_(kDefaultSampleRateKhz),
+ max_nack_list_size_(kNackListSizeLimit) {}
+
+NackTracker::~NackTracker() = default;
+
+void NackTracker::UpdateSampleRate(int sample_rate_hz) {
+ RTC_DCHECK_GT(sample_rate_hz, 0);
+ sample_rate_khz_ = sample_rate_hz / 1000;
+}
+
+void NackTracker::UpdateLastReceivedPacket(uint16_t sequence_number,
+ uint32_t timestamp) {
+ // Just record the value of sequence number and timestamp if this is the
+ // first packet.
+ if (!any_rtp_received_) {
+ sequence_num_last_received_rtp_ = sequence_number;
+ timestamp_last_received_rtp_ = timestamp;
+ any_rtp_received_ = true;
+ // If no packet is decoded, to have a reasonable estimate of time-to-play
+ // use the given values.
+ if (!any_rtp_decoded_) {
+ sequence_num_last_decoded_rtp_ = sequence_number;
+ timestamp_last_decoded_rtp_ = timestamp;
+ }
+ return;
+ }
+
+ if (sequence_number == sequence_num_last_received_rtp_)
+ return;
+
+ // Received RTP should not be in the list.
+ nack_list_.erase(sequence_number);
+
+ // If this is an old sequence number, no more action is required, return.
+ if (IsNewerSequenceNumber(sequence_num_last_received_rtp_, sequence_number))
+ return;
+
+ UpdatePacketLossRate(sequence_number - sequence_num_last_received_rtp_ - 1);
+
+ UpdateList(sequence_number, timestamp);
+
+ sequence_num_last_received_rtp_ = sequence_number;
+ timestamp_last_received_rtp_ = timestamp;
+ LimitNackListSize();
+}
+
+absl::optional<int> NackTracker::GetSamplesPerPacket(
+ uint16_t sequence_number_current_received_rtp,
+ uint32_t timestamp_current_received_rtp) const {
+ uint32_t timestamp_increase =
+ timestamp_current_received_rtp - timestamp_last_received_rtp_;
+ uint16_t sequence_num_increase =
+ sequence_number_current_received_rtp - sequence_num_last_received_rtp_;
+
+ int samples_per_packet = timestamp_increase / sequence_num_increase;
+ if (samples_per_packet == 0 ||
+ samples_per_packet > kMaxPacketSizeMs * sample_rate_khz_) {
+ // Not a valid samples per packet.
+ return absl::nullopt;
+ }
+ return samples_per_packet;
+}
+
+void NackTracker::UpdateList(uint16_t sequence_number_current_received_rtp,
+ uint32_t timestamp_current_received_rtp) {
+ if (!IsNewerSequenceNumber(sequence_number_current_received_rtp,
+ sequence_num_last_received_rtp_ + 1)) {
+ return;
+ }
+ RTC_DCHECK(!any_rtp_decoded_ ||
+ IsNewerSequenceNumber(sequence_number_current_received_rtp,
+ sequence_num_last_decoded_rtp_));
+
+ absl::optional<int> samples_per_packet = GetSamplesPerPacket(
+ sequence_number_current_received_rtp, timestamp_current_received_rtp);
+ if (!samples_per_packet) {
+ return;
+ }
+
+ for (uint16_t n = sequence_num_last_received_rtp_ + 1;
+ IsNewerSequenceNumber(sequence_number_current_received_rtp, n); ++n) {
+ uint32_t timestamp = EstimateTimestamp(n, *samples_per_packet);
+ NackElement nack_element(TimeToPlay(timestamp), timestamp);
+ nack_list_.insert(nack_list_.end(), std::make_pair(n, nack_element));
+ }
+}
+
+uint32_t NackTracker::EstimateTimestamp(uint16_t sequence_num,
+ int samples_per_packet) {
+ uint16_t sequence_num_diff = sequence_num - sequence_num_last_received_rtp_;
+ return sequence_num_diff * samples_per_packet + timestamp_last_received_rtp_;
+}
+
+void NackTracker::UpdateEstimatedPlayoutTimeBy10ms() {
+ while (!nack_list_.empty() &&
+ nack_list_.begin()->second.time_to_play_ms <= 10)
+ nack_list_.erase(nack_list_.begin());
+
+ for (NackList::iterator it = nack_list_.begin(); it != nack_list_.end(); ++it)
+ it->second.time_to_play_ms -= 10;
+}
+
+void NackTracker::UpdateLastDecodedPacket(uint16_t sequence_number,
+ uint32_t timestamp) {
+ if (IsNewerSequenceNumber(sequence_number, sequence_num_last_decoded_rtp_) ||
+ !any_rtp_decoded_) {
+ sequence_num_last_decoded_rtp_ = sequence_number;
+ timestamp_last_decoded_rtp_ = timestamp;
+ // Packets in the list with sequence numbers less than the
+ // sequence number of the decoded RTP should be removed from the lists.
+ // They will be discarded by the jitter buffer if they arrive.
+ nack_list_.erase(nack_list_.begin(),
+ nack_list_.upper_bound(sequence_num_last_decoded_rtp_));
+
+ // Update estimated time-to-play.
+ for (NackList::iterator it = nack_list_.begin(); it != nack_list_.end();
+ ++it)
+ it->second.time_to_play_ms = TimeToPlay(it->second.estimated_timestamp);
+ } else {
+ RTC_DCHECK_EQ(sequence_number, sequence_num_last_decoded_rtp_);
+
+ // Same sequence number as before. 10 ms is elapsed, update estimations for
+ // time-to-play.
+ UpdateEstimatedPlayoutTimeBy10ms();
+
+ // Update timestamp for better estimate of time-to-play, for packets which
+ // are added to NACK list later on.
+ timestamp_last_decoded_rtp_ += sample_rate_khz_ * 10;
+ }
+ any_rtp_decoded_ = true;
+}
+
+NackTracker::NackList NackTracker::GetNackList() const {
+ return nack_list_;
+}
+
+void NackTracker::Reset() {
+ nack_list_.clear();
+
+ sequence_num_last_received_rtp_ = 0;
+ timestamp_last_received_rtp_ = 0;
+ any_rtp_received_ = false;
+ sequence_num_last_decoded_rtp_ = 0;
+ timestamp_last_decoded_rtp_ = 0;
+ any_rtp_decoded_ = false;
+ sample_rate_khz_ = kDefaultSampleRateKhz;
+}
+
+void NackTracker::SetMaxNackListSize(size_t max_nack_list_size) {
+ RTC_CHECK_GT(max_nack_list_size, 0);
+ // Ugly hack to get around the problem of passing static consts by reference.
+ const size_t kNackListSizeLimitLocal = NackTracker::kNackListSizeLimit;
+ RTC_CHECK_LE(max_nack_list_size, kNackListSizeLimitLocal);
+
+ max_nack_list_size_ = max_nack_list_size;
+ LimitNackListSize();
+}
+
+void NackTracker::LimitNackListSize() {
+ uint16_t limit = sequence_num_last_received_rtp_ -
+ static_cast<uint16_t>(max_nack_list_size_) - 1;
+ nack_list_.erase(nack_list_.begin(), nack_list_.upper_bound(limit));
+}
+
+int64_t NackTracker::TimeToPlay(uint32_t timestamp) const {
+ uint32_t timestamp_increase = timestamp - timestamp_last_decoded_rtp_;
+ return timestamp_increase / sample_rate_khz_;
+}
+
+// We don't erase elements with time-to-play shorter than round-trip-time.
+std::vector<uint16_t> NackTracker::GetNackList(int64_t round_trip_time_ms) {
+ RTC_DCHECK_GE(round_trip_time_ms, 0);
+ std::vector<uint16_t> sequence_numbers;
+ if (round_trip_time_ms == 0) {
+ if (config_.require_valid_rtt) {
+ return sequence_numbers;
+ } else {
+ round_trip_time_ms = config_.default_rtt_ms;
+ }
+ }
+ if (packet_loss_rate_ >
+ static_cast<uint32_t>(config_.max_loss_rate * (1 << 30))) {
+ return sequence_numbers;
+ }
+ // The estimated packet loss is between 0 and 1, so we need to multiply by 100
+ // here.
+ int max_wait_ms =
+ 100.0 * config_.ms_per_loss_percent * packet_loss_rate_ / (1 << 30);
+ for (NackList::const_iterator it = nack_list_.begin(); it != nack_list_.end();
+ ++it) {
+ int64_t time_since_packet_ms =
+ (timestamp_last_received_rtp_ - it->second.estimated_timestamp) /
+ sample_rate_khz_;
+ if (it->second.time_to_play_ms > round_trip_time_ms ||
+ time_since_packet_ms + round_trip_time_ms < max_wait_ms)
+ sequence_numbers.push_back(it->first);
+ }
+ if (config_.never_nack_multiple_times) {
+ nack_list_.clear();
+ }
+ return sequence_numbers;
+}
+
+void NackTracker::UpdatePacketLossRate(int packets_lost) {
+ const uint64_t alpha_q30 = (1 << 30) * config_.packet_loss_forget_factor;
+ // Exponential filter.
+ packet_loss_rate_ = (alpha_q30 * packet_loss_rate_) >> 30;
+ for (int i = 0; i < packets_lost; ++i) {
+ packet_loss_rate_ =
+ ((alpha_q30 * packet_loss_rate_) >> 30) + ((1 << 30) - alpha_q30);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.h b/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.h
new file mode 100644
index 0000000000..14ba2166d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
+#define MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/gtest_prod_util.h"
+
+//
+// The NackTracker class keeps track of the lost packets, an estimate of
+// time-to-play for each packet is also given.
+//
+// Every time a packet is pushed into NetEq, LastReceivedPacket() has to be
+// called to update the NACK list.
+//
+// Every time 10ms audio is pulled from NetEq LastDecodedPacket() should be
+// called, and time-to-play is updated at that moment.
+//
+// If packet N is received, any packet prior to N which has not arrived is
+// considered lost, and should be labeled as "missing" (the size of
+// the list might be limited and older packet eliminated from the list).
+//
+// The NackTracker class has to know about the sample rate of the packets to
+// compute time-to-play. So sample rate should be set as soon as the first
+// packet is received. If there is a change in the receive codec (sender changes
+// codec) then NackTracker should be reset. This is because NetEQ would flush
+// its buffer and re-transmission is meaning less for old packet. Therefore, in
+// that case, after reset the sampling rate has to be updated.
+//
+// Thread Safety
+// =============
+// Please note that this class in not thread safe. The class must be protected
+// if different APIs are called from different threads.
+//
+namespace webrtc {
+
+class NackTracker {
+ public:
+ // A limit for the size of the NACK list.
+ static const size_t kNackListSizeLimit = 500; // 10 seconds for 20 ms frame
+ // packets.
+ NackTracker();
+ ~NackTracker();
+
+ // Set a maximum for the size of the NACK list. If the last received packet
+ // has sequence number of N, then NACK list will not contain any element
+ // with sequence number earlier than N - `max_nack_list_size`.
+ //
+ // The largest maximum size is defined by `kNackListSizeLimit`
+ void SetMaxNackListSize(size_t max_nack_list_size);
+
+ // Set the sampling rate.
+ //
+ // If associated sampling rate of the received packets is changed, call this
+ // function to update sampling rate. Note that if there is any change in
+ // received codec then NetEq will flush its buffer and NACK has to be reset.
+ // After Reset() is called sampling rate has to be set.
+ void UpdateSampleRate(int sample_rate_hz);
+
+ // Update the sequence number and the timestamp of the last decoded RTP. This
+ // API should be called every time 10 ms audio is pulled from NetEq.
+ void UpdateLastDecodedPacket(uint16_t sequence_number, uint32_t timestamp);
+
+ // Update the sequence number and the timestamp of the last received RTP. This
+ // API should be called every time a packet pushed into ACM.
+ void UpdateLastReceivedPacket(uint16_t sequence_number, uint32_t timestamp);
+
+ // Get a list of "missing" packets which have expected time-to-play larger
+ // than the given round-trip-time (in milliseconds).
+ // Note: Late packets are not included.
+ // Calling this method multiple times may give different results, since the
+ // internal nack list may get flushed if never_nack_multiple_times_ is true.
+ std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms);
+
+ // Reset to default values. The NACK list is cleared.
+ // `max_nack_list_size_` preserves its value.
+ void Reset();
+
+ // Returns the estimated packet loss rate in Q30, for testing only.
+ uint32_t GetPacketLossRateForTest() { return packet_loss_rate_; }
+
+ private:
+ // This test need to access the private method GetNackList().
+ FRIEND_TEST_ALL_PREFIXES(NackTrackerTest, EstimateTimestampAndTimeToPlay);
+
+ // Options that can be configured via field trial.
+ struct Config {
+ Config();
+
+ // The exponential decay factor used to estimate the packet loss rate.
+ double packet_loss_forget_factor = 0.996;
+ // How many additional ms we are willing to wait (at most) for nacked
+ // packets for each additional percentage of packet loss.
+ int ms_per_loss_percent = 20;
+ // If true, never nack packets more than once.
+ bool never_nack_multiple_times = false;
+ // Only nack if the RTT is valid.
+ bool require_valid_rtt = false;
+ // Default RTT to use unless `require_valid_rtt` is set.
+ int default_rtt_ms = 100;
+ // Do not nack if the loss rate is above this value.
+ double max_loss_rate = 1.0;
+ };
+
+ struct NackElement {
+ NackElement(int64_t initial_time_to_play_ms, uint32_t initial_timestamp)
+ : time_to_play_ms(initial_time_to_play_ms),
+ estimated_timestamp(initial_timestamp) {}
+
+ // Estimated time (ms) left for this packet to be decoded. This estimate is
+ // updated every time jitter buffer decodes a packet.
+ int64_t time_to_play_ms;
+
+ // A guess about the timestamp of the missing packet, it is used for
+ // estimation of `time_to_play_ms`. The estimate might be slightly wrong if
+ // there has been frame-size change since the last received packet and the
+ // missing packet. However, the risk of this is low, and in case of such
+ // errors, there will be a minor misestimation in time-to-play of missing
+ // packets. This will have a very minor effect on NACK performance.
+ uint32_t estimated_timestamp;
+ };
+
+ class NackListCompare {
+ public:
+ bool operator()(uint16_t sequence_number_old,
+ uint16_t sequence_number_new) const {
+ return IsNewerSequenceNumber(sequence_number_new, sequence_number_old);
+ }
+ };
+
+ typedef std::map<uint16_t, NackElement, NackListCompare> NackList;
+
+ // This API is used only for testing to assess whether time-to-play is
+ // computed correctly.
+ NackList GetNackList() const;
+
+ // This function subtracts 10 ms of time-to-play for all packets in NACK list.
+ // This is called when 10 ms elapsed with no new RTP packet decoded.
+ void UpdateEstimatedPlayoutTimeBy10ms();
+
+ // Returns a valid number of samples per packet given the current received
+ // sequence number and timestamp or nullopt of none could be computed.
+ absl::optional<int> GetSamplesPerPacket(
+ uint16_t sequence_number_current_received_rtp,
+ uint32_t timestamp_current_received_rtp) const;
+
+ // Given the `sequence_number_current_received_rtp` of currently received RTP
+ // update the list. Packets that are older than the received packet are added
+ // to the nack list.
+ void UpdateList(uint16_t sequence_number_current_received_rtp,
+ uint32_t timestamp_current_received_rtp);
+
+ // Packets which have sequence number older that
+ // `sequence_num_last_received_rtp_` - `max_nack_list_size_` are removed
+ // from the NACK list.
+ void LimitNackListSize();
+
+ // Estimate timestamp of a missing packet given its sequence number.
+ uint32_t EstimateTimestamp(uint16_t sequence_number, int samples_per_packet);
+
+ // Compute time-to-play given a timestamp.
+ int64_t TimeToPlay(uint32_t timestamp) const;
+
+ // Updates the estimated packet lost rate.
+ void UpdatePacketLossRate(int packets_lost);
+
+ const Config config_;
+
+ // Valid if a packet is received.
+ uint16_t sequence_num_last_received_rtp_;
+ uint32_t timestamp_last_received_rtp_;
+ bool any_rtp_received_; // If any packet received.
+
+ // Valid if a packet is decoded.
+ uint16_t sequence_num_last_decoded_rtp_;
+ uint32_t timestamp_last_decoded_rtp_;
+ bool any_rtp_decoded_; // If any packet decoded.
+
+ int sample_rate_khz_; // Sample rate in kHz.
+
+ // A list of missing packets to be retransmitted. Components of the list
+ // contain the sequence number of missing packets and the estimated time that
+ // each pack is going to be played out.
+ NackList nack_list_;
+
+ // NACK list will not keep track of missing packets prior to
+ // `sequence_num_last_received_rtp_` - `max_nack_list_size_`.
+ size_t max_nack_list_size_;
+
+ // Current estimate of the packet loss rate in Q30.
+ uint32_t packet_loss_rate_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker_unittest.cc
new file mode 100644
index 0000000000..bcc5120ff3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker_unittest.cc
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/nack_tracker.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kSampleRateHz = 16000;
+const int kPacketSizeMs = 30;
+const uint32_t kTimestampIncrement = 480; // 30 ms.
+const int64_t kShortRoundTripTimeMs = 1;
+
+bool IsNackListCorrect(const std::vector<uint16_t>& nack_list,
+ const uint16_t* lost_sequence_numbers,
+ size_t num_lost_packets) {
+ if (nack_list.size() != num_lost_packets)
+ return false;
+
+ if (num_lost_packets == 0)
+ return true;
+
+ for (size_t k = 0; k < nack_list.size(); ++k) {
+ int seq_num = nack_list[k];
+ bool seq_num_matched = false;
+ for (size_t n = 0; n < num_lost_packets; ++n) {
+ if (seq_num == lost_sequence_numbers[n]) {
+ seq_num_matched = true;
+ break;
+ }
+ }
+ if (!seq_num_matched)
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+TEST(NackTrackerTest, EmptyListWhenNoPacketLoss) {
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ int seq_num = 1;
+ uint32_t timestamp = 0;
+
+ std::vector<uint16_t> nack_list;
+ for (int n = 0; n < 100; n++) {
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+ }
+}
+
+TEST(NackTrackerTest, LatePacketsMovedToNackThenNackListDoesNotChange) {
+ const uint16_t kSequenceNumberLostPackets[] = {2, 3, 4, 5, 6, 7, 8, 9};
+ static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) /
+ sizeof(kSequenceNumberLostPackets[0]);
+
+ for (int k = 0; k < 2; k++) { // Two iteration with/without wrap around.
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ uint16_t sequence_num_lost_packets[kNumAllLostPackets];
+ for (int n = 0; n < kNumAllLostPackets; n++) {
+ sequence_num_lost_packets[n] =
+ kSequenceNumberLostPackets[n] +
+ k * 65531; // Have wrap around in sequence numbers for |k == 1|.
+ }
+ uint16_t seq_num = sequence_num_lost_packets[0] - 1;
+
+ uint32_t timestamp = 0;
+ std::vector<uint16_t> nack_list;
+
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+
+ seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1;
+ timestamp += kTimestampIncrement * (kNumAllLostPackets + 1);
+ int num_lost_packets = std::max(0, kNumAllLostPackets);
+
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets,
+ num_lost_packets));
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ num_lost_packets++;
+
+ for (int n = 0; n < 100; ++n) {
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets,
+ kNumAllLostPackets));
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ }
+ }
+}
+
+TEST(NackTrackerTest, ArrivedPacketsAreRemovedFromNackList) {
+ const uint16_t kSequenceNumberLostPackets[] = {2, 3, 4, 5, 6, 7, 8, 9};
+ static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) /
+ sizeof(kSequenceNumberLostPackets[0]);
+
+ for (int k = 0; k < 2; ++k) { // Two iteration with/without wrap around.
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ uint16_t sequence_num_lost_packets[kNumAllLostPackets];
+ for (int n = 0; n < kNumAllLostPackets; ++n) {
+ sequence_num_lost_packets[n] = kSequenceNumberLostPackets[n] +
+ k * 65531; // Wrap around for |k == 1|.
+ }
+
+ uint16_t seq_num = sequence_num_lost_packets[0] - 1;
+ uint32_t timestamp = 0;
+
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ std::vector<uint16_t> nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+
+ size_t index_retransmitted_rtp = 0;
+ uint32_t timestamp_retransmitted_rtp = timestamp + kTimestampIncrement;
+
+ seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1;
+ timestamp += kTimestampIncrement * (kNumAllLostPackets + 1);
+ size_t num_lost_packets = kNumAllLostPackets;
+ for (int n = 0; n < kNumAllLostPackets; ++n) {
+ // Number of lost packets does not change for the first
+ // |kNackThreshold + 1| packets, one is added to the list and one is
+ // removed. Thereafter, the list shrinks every iteration.
+ if (n >= 1)
+ num_lost_packets--;
+
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(IsNackListCorrect(
+ nack_list, &sequence_num_lost_packets[index_retransmitted_rtp],
+ num_lost_packets));
+ seq_num++;
+ timestamp += kTimestampIncrement;
+
+ // Retransmission of a lost RTP.
+ nack.UpdateLastReceivedPacket(
+ sequence_num_lost_packets[index_retransmitted_rtp],
+ timestamp_retransmitted_rtp);
+ index_retransmitted_rtp++;
+ timestamp_retransmitted_rtp += kTimestampIncrement;
+
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(IsNackListCorrect(
+ nack_list, &sequence_num_lost_packets[index_retransmitted_rtp],
+ num_lost_packets - 1)); // One less lost packet in the list.
+ }
+ ASSERT_TRUE(nack_list.empty());
+ }
+}
+
+// Assess if estimation of timestamps and time-to-play is correct. Introduce all
+// combinations that timestamps and sequence numbers might have wrap around.
+TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) {
+ const uint16_t kLostPackets[] = {2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15};
+ static const int kNumAllLostPackets =
+ sizeof(kLostPackets) / sizeof(kLostPackets[0]);
+
+ for (int k = 0; k < 4; ++k) {
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ // Sequence number wrap around if `k` is 2 or 3;
+ int seq_num_offset = (k < 2) ? 0 : 65531;
+
+ // Timestamp wrap around if `k` is 1 or 3.
+ uint32_t timestamp_offset =
+ (k & 0x1) ? static_cast<uint32_t>(0xffffffff) - 6 : 0;
+
+ uint32_t timestamp_lost_packets[kNumAllLostPackets];
+ uint16_t seq_num_lost_packets[kNumAllLostPackets];
+ for (int n = 0; n < kNumAllLostPackets; ++n) {
+ timestamp_lost_packets[n] =
+ timestamp_offset + kLostPackets[n] * kTimestampIncrement;
+ seq_num_lost_packets[n] = seq_num_offset + kLostPackets[n];
+ }
+
+ // We and to push two packets before lost burst starts.
+ uint16_t seq_num = seq_num_lost_packets[0] - 2;
+ uint32_t timestamp = timestamp_lost_packets[0] - 2 * kTimestampIncrement;
+
+ const uint16_t first_seq_num = seq_num;
+ const uint32_t first_timestamp = timestamp;
+
+ // Two consecutive packets to have a correct estimate of timestamp increase.
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ // A packet after the last one which is supposed to be lost.
+ seq_num = seq_num_lost_packets[kNumAllLostPackets - 1] + 1;
+ timestamp =
+ timestamp_lost_packets[kNumAllLostPackets - 1] + kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ NackTracker::NackList nack_list = nack.GetNackList();
+ EXPECT_EQ(static_cast<size_t>(kNumAllLostPackets), nack_list.size());
+
+ // Pretend the first packet is decoded.
+ nack.UpdateLastDecodedPacket(first_seq_num, first_timestamp);
+ nack_list = nack.GetNackList();
+
+ NackTracker::NackList::iterator it = nack_list.begin();
+ while (it != nack_list.end()) {
+ seq_num = it->first - seq_num_offset;
+ int index = seq_num - kLostPackets[0];
+ EXPECT_EQ(timestamp_lost_packets[index], it->second.estimated_timestamp);
+ EXPECT_EQ((index + 2) * kPacketSizeMs, it->second.time_to_play_ms);
+ ++it;
+ }
+
+ // Pretend 10 ms is passed, and we had pulled audio from NetEq, it still
+ // reports the same sequence number as decoded, time-to-play should be
+ // updated by 10 ms.
+ nack.UpdateLastDecodedPacket(first_seq_num, first_timestamp);
+ nack_list = nack.GetNackList();
+ it = nack_list.begin();
+ while (it != nack_list.end()) {
+ seq_num = it->first - seq_num_offset;
+ int index = seq_num - kLostPackets[0];
+ EXPECT_EQ((index + 2) * kPacketSizeMs - 10, it->second.time_to_play_ms);
+ ++it;
+ }
+ }
+}
+
+TEST(NackTrackerTest,
+ MissingPacketsPriorToLastDecodedRtpShouldNotBeInNackList) {
+ for (int m = 0; m < 2; ++m) {
+ uint16_t seq_num_offset = (m == 0) ? 0 : 65531; // Wrap around if `m` is 1.
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ // Two consecutive packets to have a correct estimate of timestamp increase.
+ uint16_t seq_num = 0;
+ nack.UpdateLastReceivedPacket(seq_num_offset + seq_num,
+ seq_num * kTimestampIncrement);
+ seq_num++;
+ nack.UpdateLastReceivedPacket(seq_num_offset + seq_num,
+ seq_num * kTimestampIncrement);
+
+ // Skip 10 packets (larger than NACK threshold).
+ const int kNumLostPackets = 10;
+ seq_num += kNumLostPackets + 1;
+ nack.UpdateLastReceivedPacket(seq_num_offset + seq_num,
+ seq_num * kTimestampIncrement);
+
+ const size_t kExpectedListSize = kNumLostPackets;
+ std::vector<uint16_t> nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_EQ(kExpectedListSize, nack_list.size());
+
+ for (int k = 0; k < 2; ++k) {
+ // Decoding of the first and the second arrived packets.
+ for (int n = 0; n < kPacketSizeMs / 10; ++n) {
+ nack.UpdateLastDecodedPacket(seq_num_offset + k,
+ k * kTimestampIncrement);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_EQ(kExpectedListSize, nack_list.size());
+ }
+ }
+
+ // Decoding of the last received packet.
+ nack.UpdateLastDecodedPacket(seq_num + seq_num_offset,
+ seq_num * kTimestampIncrement);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+
+ // Make sure list of late packets is also empty. To check that, push few
+ // packets, if the late list is not empty its content will pop up in NACK
+ // list.
+ for (int n = 0; n < 10; ++n) {
+ seq_num++;
+ nack.UpdateLastReceivedPacket(seq_num_offset + seq_num,
+ seq_num * kTimestampIncrement);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+ }
+ }
+}
+
+TEST(NackTrackerTest, Reset) {
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ // Two consecutive packets to have a correct estimate of timestamp increase.
+ uint16_t seq_num = 0;
+ nack.UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
+ seq_num++;
+ nack.UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
+
+ // Skip 10 packets (larger than NACK threshold).
+ const int kNumLostPackets = 10;
+ seq_num += kNumLostPackets + 1;
+ nack.UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
+
+ const size_t kExpectedListSize = kNumLostPackets;
+ std::vector<uint16_t> nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_EQ(kExpectedListSize, nack_list.size());
+
+ nack.Reset();
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+}
+
+TEST(NackTrackerTest, ListSizeAppliedFromBeginning) {
+ const size_t kNackListSize = 10;
+ for (int m = 0; m < 2; ++m) {
+ uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if `m` is 1.
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+
+ uint16_t seq_num = seq_num_offset;
+ uint32_t timestamp = 0x12345678;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ // Packet lost more than NACK-list size limit.
+ uint16_t num_lost_packets = kNackListSize + 5;
+
+ seq_num += num_lost_packets + 1;
+ timestamp += (num_lost_packets + 1) * kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ std::vector<uint16_t> nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_EQ(kNackListSize, nack_list.size());
+ }
+}
+
+TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) {
+ const size_t kNackListSize = 10;
+ for (int m = 0; m < 2; ++m) {
+ uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if `m` is 1.
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+
+ uint16_t seq_num = seq_num_offset;
+ uint32_t timestamp = 0x87654321;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ // Packet lost more than NACK-list size limit.
+ uint16_t num_lost_packets = kNackListSize + 5;
+
+ std::unique_ptr<uint16_t[]> seq_num_lost(new uint16_t[num_lost_packets]);
+ for (int n = 0; n < num_lost_packets; ++n) {
+ seq_num_lost[n] = ++seq_num;
+ }
+
+ ++seq_num;
+ timestamp += (num_lost_packets + 1) * kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ size_t expected_size = num_lost_packets;
+
+ std::vector<uint16_t> nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_EQ(expected_size, nack_list.size());
+
+ nack.SetMaxNackListSize(kNackListSize);
+ expected_size = kNackListSize;
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(IsNackListCorrect(
+ nack_list, &seq_num_lost[num_lost_packets - kNackListSize],
+ expected_size));
+
+ // NACK list should shrink.
+ for (size_t n = 1; n < kNackListSize; ++n) {
+ ++seq_num;
+ timestamp += kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ --expected_size;
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(IsNackListCorrect(
+ nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n],
+ expected_size));
+ }
+
+ // After this packet, NACK list should be empty.
+ ++seq_num;
+ timestamp += kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ nack_list = nack.GetNackList(kShortRoundTripTimeMs);
+ EXPECT_TRUE(nack_list.empty());
+ }
+}
+
+TEST(NackTrackerTest, RoudTripTimeIsApplied) {
+ const int kNackListSize = 200;
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0x87654321;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ // Packet lost more than NACK-list size limit.
+ uint16_t kNumLostPackets = 5;
+
+ seq_num += (1 + kNumLostPackets);
+ timestamp += (1 + kNumLostPackets) * kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ // Expected time-to-play are:
+ // kPacketSizeMs - 10, 2*kPacketSizeMs - 10, 3*kPacketSizeMs - 10, ...
+ //
+ // sequence number: 1, 2, 3, 4, 5
+ // time-to-play: 20, 50, 80, 110, 140
+ //
+ std::vector<uint16_t> nack_list = nack.GetNackList(100);
+ ASSERT_EQ(2u, nack_list.size());
+ EXPECT_EQ(4, nack_list[0]);
+ EXPECT_EQ(5, nack_list[1]);
+}
+
+// Set never_nack_multiple_times to true with a field trial and verify that
+// packets are not nacked multiple times.
+TEST(NackTrackerTest, DoNotNackMultipleTimes) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-Audio-NetEqNackTrackerConfig/"
+ "packet_loss_forget_factor:0.996,ms_per_loss_percent:20,"
+ "never_nack_multiple_times:true/");
+ const int kNackListSize = 200;
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0x87654321;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ uint16_t kNumLostPackets = 3;
+
+ seq_num += (1 + kNumLostPackets);
+ timestamp += (1 + kNumLostPackets) * kTimestampIncrement;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+
+ std::vector<uint16_t> nack_list = nack.GetNackList(10);
+ ASSERT_EQ(3u, nack_list.size());
+ EXPECT_EQ(1, nack_list[0]);
+ EXPECT_EQ(2, nack_list[1]);
+ EXPECT_EQ(3, nack_list[2]);
+ // When we get the nack list again, it should be empty.
+ std::vector<uint16_t> nack_list2 = nack.GetNackList(10);
+ EXPECT_TRUE(nack_list2.empty());
+}
+
+// Test if estimated packet loss rate is correct.
+TEST(NackTrackerTest, PacketLossRateCorrect) {
+ const int kNackListSize = 200;
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0x87654321;
+ auto add_packet = [&nack, &seq_num, &timestamp](bool received) {
+ if (received) {
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ }
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ };
+ // Add some packets, but every fourth packet is lost.
+ for (int i = 0; i < 300; i++) {
+ add_packet(true);
+ add_packet(true);
+ add_packet(true);
+ add_packet(false);
+ }
+ // 1 << 28 is 0.25 in Q30. We expect the packet loss estimate to be within
+ // 0.01 of that.
+ EXPECT_NEAR(nack.GetPacketLossRateForTest(), 1 << 28, (1 << 30) / 100);
+}
+
+TEST(NackTrackerTest, DoNotNackAfterDtx) {
+ const int kNackListSize = 200;
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0x87654321;
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ EXPECT_TRUE(nack.GetNackList(0).empty());
+ constexpr int kDtxPeriod = 400;
+ nack.UpdateLastReceivedPacket(seq_num + 2,
+ timestamp + kDtxPeriod * kSampleRateHz / 1000);
+ EXPECT_TRUE(nack.GetNackList(0).empty());
+}
+
+TEST(NackTrackerTest, DoNotNackIfLossRateIsTooHigh) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-Audio-NetEqNackTrackerConfig/max_loss_rate:0.4/");
+ const int kNackListSize = 200;
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0x87654321;
+ auto add_packet = [&nack, &seq_num, &timestamp](bool received) {
+ if (received) {
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ }
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ };
+ for (int i = 0; i < 500; i++) {
+ add_packet(true);
+ add_packet(false);
+ }
+ // Expect 50% loss rate which is higher that the configured maximum 40%.
+ EXPECT_NEAR(nack.GetPacketLossRateForTest(), 1 << 29, (1 << 30) / 100);
+ EXPECT_TRUE(nack.GetNackList(0).empty());
+}
+
+TEST(NackTrackerTest, OnlyNackIfRttIsValid) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-Audio-NetEqNackTrackerConfig/require_valid_rtt:true/");
+ const int kNackListSize = 200;
+ NackTracker nack;
+ nack.UpdateSampleRate(kSampleRateHz);
+ nack.SetMaxNackListSize(kNackListSize);
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0x87654321;
+ auto add_packet = [&nack, &seq_num, &timestamp](bool received) {
+ if (received) {
+ nack.UpdateLastReceivedPacket(seq_num, timestamp);
+ }
+ seq_num++;
+ timestamp += kTimestampIncrement;
+ };
+ add_packet(true);
+ add_packet(false);
+ add_packet(true);
+ EXPECT_TRUE(nack.GetNackList(0).empty());
+ EXPECT_FALSE(nack.GetNackList(10).empty());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_decoder_plc_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_decoder_plc_unittest.cc
new file mode 100644
index 0000000000..cf310d1efb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_decoder_plc_unittest.cc
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Test to verify correct operation when using the decoder-internal PLC.
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#include "modules/audio_coding/neteq/tools/audio_checksum.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/encode_neteq_input.h"
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/audio_decoder_proxy_factory.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr int kSampleRateHz = 32000;
+constexpr int kRunTimeMs = 10000;
+
+// This class implements a fake decoder. The decoder will read audio from a file
+// and present as output, both for regular decoding and for PLC.
+class AudioDecoderPlc : public AudioDecoder {
+ public:
+ AudioDecoderPlc(std::unique_ptr<InputAudioFile> input, int sample_rate_hz)
+ : input_(std::move(input)), sample_rate_hz_(sample_rate_hz) {}
+
+ void Reset() override {}
+ int SampleRateHz() const override { return sample_rate_hz_; }
+ size_t Channels() const override { return 1; }
+ int DecodeInternal(const uint8_t* /*encoded*/,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override {
+ RTC_CHECK_GE(encoded_len / 2, 10 * sample_rate_hz_ / 1000);
+ RTC_CHECK_LE(encoded_len / 2, 2 * 10 * sample_rate_hz_ / 1000);
+ RTC_CHECK_EQ(sample_rate_hz, sample_rate_hz_);
+ RTC_CHECK(decoded);
+ RTC_CHECK(speech_type);
+ RTC_CHECK(input_->Read(encoded_len / 2, decoded));
+ *speech_type = kSpeech;
+ last_was_plc_ = false;
+ return encoded_len / 2;
+ }
+
+ void GeneratePlc(size_t requested_samples_per_channel,
+ rtc::BufferT<int16_t>* concealment_audio) override {
+ // Instead of generating random data for GeneratePlc we use the same data as
+ // the input, so we can check that we produce the same result independently
+ // of the losses.
+ RTC_DCHECK_EQ(requested_samples_per_channel, 10 * sample_rate_hz_ / 1000);
+
+ // Must keep a local copy of this since DecodeInternal sets it to false.
+ const bool last_was_plc = last_was_plc_;
+
+ std::vector<int16_t> decoded(5760);
+ SpeechType speech_type;
+ int dec_len = DecodeInternal(nullptr, 2 * 10 * sample_rate_hz_ / 1000,
+ sample_rate_hz_, decoded.data(), &speech_type);
+ concealment_audio->AppendData(decoded.data(), dec_len);
+ concealed_samples_ += rtc::checked_cast<size_t>(dec_len);
+
+ if (!last_was_plc) {
+ ++concealment_events_;
+ }
+ last_was_plc_ = true;
+ }
+
+ size_t concealed_samples() { return concealed_samples_; }
+ size_t concealment_events() { return concealment_events_; }
+
+ private:
+ const std::unique_ptr<InputAudioFile> input_;
+ const int sample_rate_hz_;
+ size_t concealed_samples_ = 0;
+ size_t concealment_events_ = 0;
+ bool last_was_plc_ = false;
+};
+
+// An input sample generator which generates only zero-samples.
+class ZeroSampleGenerator : public EncodeNetEqInput::Generator {
+ public:
+ rtc::ArrayView<const int16_t> Generate(size_t num_samples) override {
+ vec.resize(num_samples, 0);
+ rtc::ArrayView<const int16_t> view(vec);
+ RTC_DCHECK_EQ(view.size(), num_samples);
+ return view;
+ }
+
+ private:
+ std::vector<int16_t> vec;
+};
+
+// A NetEqInput which connects to another NetEqInput, but drops a number of
+// consecutive packets on the way
+class LossyInput : public NetEqInput {
+ public:
+ LossyInput(int loss_cadence,
+ int burst_length,
+ std::unique_ptr<NetEqInput> input)
+ : loss_cadence_(loss_cadence),
+ burst_length_(burst_length),
+ input_(std::move(input)) {}
+
+ absl::optional<int64_t> NextPacketTime() const override {
+ return input_->NextPacketTime();
+ }
+
+ absl::optional<int64_t> NextOutputEventTime() const override {
+ return input_->NextOutputEventTime();
+ }
+
+ std::unique_ptr<PacketData> PopPacket() override {
+ if (loss_cadence_ != 0 && (++count_ % loss_cadence_) == 0) {
+ // Pop `burst_length_` packets to create the loss.
+ auto packet_to_return = input_->PopPacket();
+ for (int i = 0; i < burst_length_; i++) {
+ input_->PopPacket();
+ }
+ return packet_to_return;
+ }
+ return input_->PopPacket();
+ }
+
+ void AdvanceOutputEvent() override { return input_->AdvanceOutputEvent(); }
+
+ bool ended() const override { return input_->ended(); }
+
+ absl::optional<RTPHeader> NextHeader() const override {
+ return input_->NextHeader();
+ }
+
+ private:
+ const int loss_cadence_;
+ const int burst_length_;
+ int count_ = 0;
+ const std::unique_ptr<NetEqInput> input_;
+};
+
+class AudioChecksumWithOutput : public AudioChecksum {
+ public:
+ explicit AudioChecksumWithOutput(std::string* output_str)
+ : output_str_(*output_str) {}
+ ~AudioChecksumWithOutput() { output_str_ = Finish(); }
+
+ private:
+ std::string& output_str_;
+};
+
+struct TestStatistics {
+ NetEqNetworkStatistics network;
+ NetEqLifetimeStatistics lifetime;
+};
+
+TestStatistics RunTest(int loss_cadence,
+ int burst_length,
+ std::string* checksum) {
+ NetEq::Config config;
+ config.for_test_no_time_stretching = true;
+
+ // The input is mostly useless. It sends zero-samples to a PCM16b encoder,
+ // but the actual encoded samples will never be used by the decoder in the
+ // test. See below about the decoder.
+ auto generator = std::make_unique<ZeroSampleGenerator>();
+ constexpr int kPayloadType = 100;
+ AudioEncoderPcm16B::Config encoder_config;
+ encoder_config.sample_rate_hz = kSampleRateHz;
+ encoder_config.payload_type = kPayloadType;
+ auto encoder = std::make_unique<AudioEncoderPcm16B>(encoder_config);
+ auto input = std::make_unique<EncodeNetEqInput>(
+ std::move(generator), std::move(encoder), kRunTimeMs);
+ // Wrap the input in a loss function.
+ auto lossy_input = std::make_unique<LossyInput>(loss_cadence, burst_length,
+ std::move(input));
+
+ // Setting up decoders.
+ NetEqTest::DecoderMap decoders;
+ // Using a fake decoder which simply reads the output audio from a file.
+ auto input_file = std::make_unique<InputAudioFile>(
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"));
+ AudioDecoderPlc dec(std::move(input_file), kSampleRateHz);
+ // Masquerading as a PCM16b decoder.
+ decoders.emplace(kPayloadType, SdpAudioFormat("l16", 32000, 1));
+
+ // Output is simply a checksum calculator.
+ auto output = std::make_unique<AudioChecksumWithOutput>(checksum);
+
+ // No callback objects.
+ NetEqTest::Callbacks callbacks;
+
+ NetEqTest neteq_test(
+ config, /*decoder_factory=*/
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&dec),
+ /*codecs=*/decoders, /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
+ /*input=*/std::move(lossy_input), std::move(output), callbacks);
+ EXPECT_LE(kRunTimeMs, neteq_test.Run());
+
+ auto lifetime_stats = neteq_test.LifetimeStats();
+ EXPECT_EQ(dec.concealed_samples(), lifetime_stats.concealed_samples);
+ EXPECT_EQ(dec.concealment_events(), lifetime_stats.concealment_events);
+ return {neteq_test.SimulationStats(), neteq_test.LifetimeStats()};
+}
+} // namespace
+
+// Check that some basic metrics are produced in the right direction. In
+// particular, expand_rate should only increase if there are losses present. Our
+// dummy decoder is designed such as the checksum should always be the same
+// regardless of the losses given that calls are executed in the right order.
+TEST(NetEqDecoderPlc, BasicMetrics) {
+ std::string checksum;
+
+ // Drop 1 packet every 10 packets.
+ auto stats = RunTest(10, 1, &checksum);
+
+ std::string checksum_no_loss;
+ auto stats_no_loss = RunTest(0, 0, &checksum_no_loss);
+
+ EXPECT_EQ(checksum, checksum_no_loss);
+
+ EXPECT_EQ(stats.network.preemptive_rate,
+ stats_no_loss.network.preemptive_rate);
+ EXPECT_EQ(stats.network.accelerate_rate,
+ stats_no_loss.network.accelerate_rate);
+ EXPECT_EQ(0, stats_no_loss.network.expand_rate);
+ EXPECT_GT(stats.network.expand_rate, 0);
+}
+
+// Checks that interruptions are not counted in small losses but they are
+// correctly counted in long interruptions.
+TEST(NetEqDecoderPlc, CountInterruptions) {
+ std::string checksum;
+ std::string checksum_2;
+ std::string checksum_3;
+
+ // Half of the packets lost but in short interruptions.
+ auto stats_no_interruptions = RunTest(1, 1, &checksum);
+ // One lost of 500 ms (250 packets).
+ auto stats_one_interruption = RunTest(200, 250, &checksum_2);
+ // Two losses of 250ms each (125 packets).
+ auto stats_two_interruptions = RunTest(125, 125, &checksum_3);
+
+ EXPECT_EQ(checksum, checksum_2);
+ EXPECT_EQ(checksum, checksum_3);
+ EXPECT_GT(stats_no_interruptions.network.expand_rate, 0);
+ EXPECT_EQ(stats_no_interruptions.lifetime.total_interruption_duration_ms, 0);
+ EXPECT_EQ(stats_no_interruptions.lifetime.interruption_count, 0);
+
+ EXPECT_GT(stats_one_interruption.network.expand_rate, 0);
+ EXPECT_EQ(stats_one_interruption.lifetime.total_interruption_duration_ms,
+ 5000);
+ EXPECT_EQ(stats_one_interruption.lifetime.interruption_count, 1);
+
+ EXPECT_GT(stats_two_interruptions.network.expand_rate, 0);
+ EXPECT_EQ(stats_two_interruptions.lifetime.total_interruption_duration_ms,
+ 5000);
+ EXPECT_EQ(stats_two_interruptions.lifetime.interruption_count, 2);
+}
+
+// Checks that small losses do not produce interruptions.
+TEST(NetEqDecoderPlc, NoInterruptionsInSmallLosses) {
+ std::string checksum_1;
+ std::string checksum_4;
+
+ auto stats_1 = RunTest(300, 1, &checksum_1);
+ auto stats_4 = RunTest(300, 4, &checksum_4);
+
+ EXPECT_EQ(checksum_1, checksum_4);
+
+ EXPECT_EQ(stats_1.lifetime.interruption_count, 0);
+ EXPECT_EQ(stats_1.lifetime.total_interruption_duration_ms, 0);
+ EXPECT_EQ(stats_1.lifetime.concealed_samples, 640u); // 20ms of concealment.
+ EXPECT_EQ(stats_1.lifetime.concealment_events, 1u); // in just one event.
+
+ EXPECT_EQ(stats_4.lifetime.interruption_count, 0);
+ EXPECT_EQ(stats_4.lifetime.total_interruption_duration_ms, 0);
+ EXPECT_EQ(stats_4.lifetime.concealed_samples, 2560u); // 80ms of concealment.
+ EXPECT_EQ(stats_4.lifetime.concealment_events, 1u); // in just one event.
+}
+
+// Checks that interruptions of different sizes report correct duration.
+TEST(NetEqDecoderPlc, InterruptionsReportCorrectSize) {
+ std::string checksum;
+
+ for (int burst_length = 5; burst_length < 10; burst_length++) {
+ auto stats = RunTest(300, burst_length, &checksum);
+ auto duration = stats.lifetime.total_interruption_duration_ms;
+ if (burst_length < 8) {
+ EXPECT_EQ(duration, 0);
+ } else {
+ EXPECT_EQ(duration, burst_length * 20);
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc
new file mode 100644
index 0000000000..6a6367d045
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -0,0 +1,2141 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/neteq_impl.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <list>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/neteq/tick_timer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "modules/audio_coding/neteq/accelerate.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/comfort_noise.h"
+#include "modules/audio_coding/neteq/decision_logic.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/merge.h"
+#include "modules/audio_coding/neteq/nack_tracker.h"
+#include "modules/audio_coding/neteq/normal.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/time_stretch.h"
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/sanitizer.h"
+#include "rtc_base/strings/audio_format_to_string.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+
+std::unique_ptr<NetEqController> CreateNetEqController(
+ const NetEqControllerFactory& controller_factory,
+ int base_min_delay,
+ int max_packets_in_buffer,
+ bool allow_time_stretching,
+ TickTimer* tick_timer,
+ webrtc::Clock* clock) {
+ NetEqController::Config config;
+ config.base_min_delay_ms = base_min_delay;
+ config.max_packets_in_buffer = max_packets_in_buffer;
+ config.allow_time_stretching = allow_time_stretching;
+ config.tick_timer = tick_timer;
+ config.clock = clock;
+ return controller_factory.CreateNetEqController(config);
+}
+
+} // namespace
+
+NetEqImpl::Dependencies::Dependencies(
+ const NetEq::Config& config,
+ Clock* clock,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory,
+ const NetEqControllerFactory& controller_factory)
+ : clock(clock),
+ tick_timer(new TickTimer),
+ stats(new StatisticsCalculator),
+ decoder_database(
+ new DecoderDatabase(decoder_factory, config.codec_pair_id)),
+ dtmf_buffer(new DtmfBuffer(config.sample_rate_hz)),
+ dtmf_tone_generator(new DtmfToneGenerator),
+ packet_buffer(
+ new PacketBuffer(config.max_packets_in_buffer, tick_timer.get())),
+ neteq_controller(
+ CreateNetEqController(controller_factory,
+ config.min_delay_ms,
+ config.max_packets_in_buffer,
+ !config.for_test_no_time_stretching,
+ tick_timer.get(),
+ clock)),
+ red_payload_splitter(new RedPayloadSplitter),
+ timestamp_scaler(new TimestampScaler(*decoder_database)),
+ accelerate_factory(new AccelerateFactory),
+ expand_factory(new ExpandFactory),
+ preemptive_expand_factory(new PreemptiveExpandFactory) {}
+
+NetEqImpl::Dependencies::~Dependencies() = default;
+
+NetEqImpl::NetEqImpl(const NetEq::Config& config,
+ Dependencies&& deps,
+ bool create_components)
+ : clock_(deps.clock),
+ tick_timer_(std::move(deps.tick_timer)),
+ decoder_database_(std::move(deps.decoder_database)),
+ dtmf_buffer_(std::move(deps.dtmf_buffer)),
+ dtmf_tone_generator_(std::move(deps.dtmf_tone_generator)),
+ packet_buffer_(std::move(deps.packet_buffer)),
+ red_payload_splitter_(std::move(deps.red_payload_splitter)),
+ timestamp_scaler_(std::move(deps.timestamp_scaler)),
+ vad_(new PostDecodeVad()),
+ expand_factory_(std::move(deps.expand_factory)),
+ accelerate_factory_(std::move(deps.accelerate_factory)),
+ preemptive_expand_factory_(std::move(deps.preemptive_expand_factory)),
+ stats_(std::move(deps.stats)),
+ controller_(std::move(deps.neteq_controller)),
+ last_mode_(Mode::kNormal),
+ decoded_buffer_length_(kMaxFrameSize),
+ decoded_buffer_(new int16_t[decoded_buffer_length_]),
+ playout_timestamp_(0),
+ new_codec_(false),
+ timestamp_(0),
+ reset_decoder_(false),
+ first_packet_(true),
+ enable_fast_accelerate_(config.enable_fast_accelerate),
+ nack_enabled_(false),
+ enable_muted_state_(config.enable_muted_state),
+ expand_uma_logger_("WebRTC.Audio.ExpandRatePercent",
+ 10, // Report once every 10 s.
+ tick_timer_.get()),
+ speech_expand_uma_logger_("WebRTC.Audio.SpeechExpandRatePercent",
+ 10, // Report once every 10 s.
+ tick_timer_.get()),
+ no_time_stretching_(config.for_test_no_time_stretching) {
+ RTC_LOG(LS_INFO) << "NetEq config: " << config.ToString();
+ int fs = config.sample_rate_hz;
+ if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) {
+ RTC_LOG(LS_ERROR) << "Sample rate " << fs
+ << " Hz not supported. "
+ "Changing to 8000 Hz.";
+ fs = 8000;
+ }
+ controller_->SetMaximumDelay(config.max_delay_ms);
+ fs_hz_ = fs;
+ fs_mult_ = fs / 8000;
+ last_output_sample_rate_hz_ = fs;
+ output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
+ controller_->SetSampleRate(fs_hz_, output_size_samples_);
+ decoder_frame_length_ = 2 * output_size_samples_; // 20 ms.
+ if (create_components) {
+ SetSampleRateAndChannels(fs, 1); // Default is 1 channel.
+ }
+ RTC_DCHECK(!vad_->enabled());
+ if (config.enable_post_decode_vad) {
+ vad_->Enable();
+ }
+}
+
+NetEqImpl::~NetEqImpl() = default;
+
+int NetEqImpl::InsertPacket(const RTPHeader& rtp_header,
+ rtc::ArrayView<const uint8_t> payload) {
+ rtc::MsanCheckInitialized(payload);
+ TRACE_EVENT0("webrtc", "NetEqImpl::InsertPacket");
+ MutexLock lock(&mutex_);
+ if (InsertPacketInternal(rtp_header, payload) != 0) {
+ return kFail;
+ }
+ return kOK;
+}
+
+void NetEqImpl::InsertEmptyPacket(const RTPHeader& rtp_header) {
+ MutexLock lock(&mutex_);
+ if (nack_enabled_) {
+ nack_->UpdateLastReceivedPacket(rtp_header.sequenceNumber,
+ rtp_header.timestamp);
+ }
+ controller_->RegisterEmptyPacket();
+}
+
+namespace {
+void SetAudioFrameActivityAndType(bool vad_enabled,
+ NetEqImpl::OutputType type,
+ AudioFrame::VADActivity last_vad_activity,
+ AudioFrame* audio_frame) {
+ switch (type) {
+ case NetEqImpl::OutputType::kNormalSpeech: {
+ audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+ audio_frame->vad_activity_ = AudioFrame::kVadActive;
+ break;
+ }
+ case NetEqImpl::OutputType::kVadPassive: {
+ // This should only be reached if the VAD is enabled.
+ RTC_DCHECK(vad_enabled);
+ audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+ audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+ break;
+ }
+ case NetEqImpl::OutputType::kCNG: {
+ audio_frame->speech_type_ = AudioFrame::kCNG;
+ audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+ break;
+ }
+ case NetEqImpl::OutputType::kPLC: {
+ audio_frame->speech_type_ = AudioFrame::kPLC;
+ audio_frame->vad_activity_ = last_vad_activity;
+ break;
+ }
+ case NetEqImpl::OutputType::kPLCCNG: {
+ audio_frame->speech_type_ = AudioFrame::kPLCCNG;
+ audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+ break;
+ }
+ case NetEqImpl::OutputType::kCodecPLC: {
+ audio_frame->speech_type_ = AudioFrame::kCodecPLC;
+ audio_frame->vad_activity_ = last_vad_activity;
+ break;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ if (!vad_enabled) {
+ // Always set kVadUnknown when receive VAD is inactive.
+ audio_frame->vad_activity_ = AudioFrame::kVadUnknown;
+ }
+}
+} // namespace
+
+int NetEqImpl::GetAudio(AudioFrame* audio_frame,
+ bool* muted,
+ int* current_sample_rate_hz,
+ absl::optional<Operation> action_override) {
+ TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
+ MutexLock lock(&mutex_);
+ if (GetAudioInternal(audio_frame, muted, action_override) != 0) {
+ return kFail;
+ }
+ RTC_DCHECK_EQ(
+ audio_frame->sample_rate_hz_,
+ rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
+ RTC_DCHECK_EQ(*muted, audio_frame->muted());
+ SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(),
+ last_vad_activity_, audio_frame);
+ last_vad_activity_ = audio_frame->vad_activity_;
+ last_output_sample_rate_hz_ = audio_frame->sample_rate_hz_;
+ RTC_DCHECK(last_output_sample_rate_hz_ == 8000 ||
+ last_output_sample_rate_hz_ == 16000 ||
+ last_output_sample_rate_hz_ == 32000 ||
+ last_output_sample_rate_hz_ == 48000)
+ << "Unexpected sample rate " << last_output_sample_rate_hz_;
+
+ if (current_sample_rate_hz) {
+ *current_sample_rate_hz = last_output_sample_rate_hz_;
+ }
+
+ return kOK;
+}
+
+void NetEqImpl::SetCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+ MutexLock lock(&mutex_);
+ const std::vector<int> changed_payload_types =
+ decoder_database_->SetCodecs(codecs);
+ for (const int pt : changed_payload_types) {
+ packet_buffer_->DiscardPacketsWithPayloadType(pt, stats_.get());
+ }
+}
+
+bool NetEqImpl::RegisterPayloadType(int rtp_payload_type,
+ const SdpAudioFormat& audio_format) {
+ RTC_LOG(LS_VERBOSE) << "NetEqImpl::RegisterPayloadType: payload type "
+ << rtp_payload_type << ", codec "
+ << rtc::ToString(audio_format);
+ MutexLock lock(&mutex_);
+ return decoder_database_->RegisterPayload(rtp_payload_type, audio_format) ==
+ DecoderDatabase::kOK;
+}
+
+int NetEqImpl::RemovePayloadType(uint8_t rtp_payload_type) {
+ MutexLock lock(&mutex_);
+ int ret = decoder_database_->Remove(rtp_payload_type);
+ if (ret == DecoderDatabase::kOK || ret == DecoderDatabase::kDecoderNotFound) {
+ packet_buffer_->DiscardPacketsWithPayloadType(rtp_payload_type,
+ stats_.get());
+ return kOK;
+ }
+ return kFail;
+}
+
+void NetEqImpl::RemoveAllPayloadTypes() {
+ MutexLock lock(&mutex_);
+ decoder_database_->RemoveAll();
+}
+
+bool NetEqImpl::SetMinimumDelay(int delay_ms) {
+ MutexLock lock(&mutex_);
+ if (delay_ms >= 0 && delay_ms <= 10000) {
+ RTC_DCHECK(controller_.get());
+ return controller_->SetMinimumDelay(delay_ms);
+ }
+ return false;
+}
+
+bool NetEqImpl::SetMaximumDelay(int delay_ms) {
+ MutexLock lock(&mutex_);
+ if (delay_ms >= 0 && delay_ms <= 10000) {
+ RTC_DCHECK(controller_.get());
+ return controller_->SetMaximumDelay(delay_ms);
+ }
+ return false;
+}
+
+bool NetEqImpl::SetBaseMinimumDelayMs(int delay_ms) {
+ MutexLock lock(&mutex_);
+ if (delay_ms >= 0 && delay_ms <= 10000) {
+ return controller_->SetBaseMinimumDelay(delay_ms);
+ }
+ return false;
+}
+
+int NetEqImpl::GetBaseMinimumDelayMs() const {
+ MutexLock lock(&mutex_);
+ return controller_->GetBaseMinimumDelay();
+}
+
+int NetEqImpl::TargetDelayMs() const {
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(controller_.get());
+ return controller_->TargetLevelMs();
+}
+
+int NetEqImpl::FilteredCurrentDelayMs() const {
+ MutexLock lock(&mutex_);
+ // Sum up the filtered packet buffer level with the future length of the sync
+ // buffer.
+ const int delay_samples =
+ controller_->GetFilteredBufferLevel() + sync_buffer_->FutureLength();
+ // The division below will truncate. The return value is in ms.
+ return delay_samples / rtc::CheckedDivExact(fs_hz_, 1000);
+}
+
+int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(decoder_database_.get());
+ *stats = CurrentNetworkStatisticsInternal();
+ stats_->GetNetworkStatistics(decoder_frame_length_, stats);
+ return 0;
+}
+
+NetEqNetworkStatistics NetEqImpl::CurrentNetworkStatistics() const {
+ MutexLock lock(&mutex_);
+ return CurrentNetworkStatisticsInternal();
+}
+
+NetEqNetworkStatistics NetEqImpl::CurrentNetworkStatisticsInternal() const {
+ RTC_DCHECK(decoder_database_.get());
+ NetEqNetworkStatistics stats;
+ const size_t total_samples_in_buffers =
+ packet_buffer_->NumSamplesInBuffer(decoder_frame_length_) +
+ sync_buffer_->FutureLength();
+
+ RTC_DCHECK(controller_.get());
+ stats.preferred_buffer_size_ms = controller_->TargetLevelMs();
+ stats.jitter_peaks_found = controller_->PeakFound();
+ RTC_DCHECK_GT(fs_hz_, 0);
+ stats.current_buffer_size_ms =
+ static_cast<uint16_t>(total_samples_in_buffers * 1000 / fs_hz_);
+ return stats;
+}
+
+NetEqLifetimeStatistics NetEqImpl::GetLifetimeStatistics() const {
+ MutexLock lock(&mutex_);
+ return stats_->GetLifetimeStatistics();
+}
+
+NetEqOperationsAndState NetEqImpl::GetOperationsAndState() const {
+ MutexLock lock(&mutex_);
+ auto result = stats_->GetOperationsAndState();
+ result.current_buffer_size_ms =
+ (packet_buffer_->NumSamplesInBuffer(decoder_frame_length_) +
+ sync_buffer_->FutureLength()) *
+ 1000 / fs_hz_;
+ result.current_frame_size_ms = decoder_frame_length_ * 1000 / fs_hz_;
+ result.next_packet_available = packet_buffer_->PeekNextPacket() &&
+ packet_buffer_->PeekNextPacket()->timestamp ==
+ sync_buffer_->end_timestamp();
+ return result;
+}
+
+void NetEqImpl::EnableVad() {
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(vad_.get());
+ vad_->Enable();
+}
+
+void NetEqImpl::DisableVad() {
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(vad_.get());
+ vad_->Disable();
+}
+
+absl::optional<uint32_t> NetEqImpl::GetPlayoutTimestamp() const {
+ MutexLock lock(&mutex_);
+ if (first_packet_ || last_mode_ == Mode::kRfc3389Cng ||
+ last_mode_ == Mode::kCodecInternalCng) {
+ // We don't have a valid RTP timestamp until we have decoded our first
+ // RTP packet. Also, the RTP timestamp is not accurate while playing CNG,
+ // which is indicated by returning an empty value.
+ return absl::nullopt;
+ }
+ return timestamp_scaler_->ToExternal(playout_timestamp_);
+}
+
+int NetEqImpl::last_output_sample_rate_hz() const {
+ MutexLock lock(&mutex_);
+ return last_output_sample_rate_hz_;
+}
+
+absl::optional<NetEq::DecoderFormat> NetEqImpl::GetDecoderFormat(
+ int payload_type) const {
+ MutexLock lock(&mutex_);
+ const DecoderDatabase::DecoderInfo* const di =
+ decoder_database_->GetDecoderInfo(payload_type);
+ if (di) {
+ const AudioDecoder* const decoder = di->GetDecoder();
+ // TODO(kwiberg): Why the special case for RED?
+ return DecoderFormat{
+ /*sample_rate_hz=*/di->IsRed() ? 8000 : di->SampleRateHz(),
+ /*num_channels=*/
+ decoder ? rtc::dchecked_cast<int>(decoder->Channels()) : 1,
+ /*sdp_format=*/di->GetFormat()};
+ } else {
+ // Payload type not registered.
+ return absl::nullopt;
+ }
+}
+
+void NetEqImpl::FlushBuffers() {
+ MutexLock lock(&mutex_);
+ RTC_LOG(LS_VERBOSE) << "FlushBuffers";
+ packet_buffer_->Flush(stats_.get());
+ RTC_DCHECK(sync_buffer_.get());
+ RTC_DCHECK(expand_.get());
+ sync_buffer_->Flush();
+ sync_buffer_->set_next_index(sync_buffer_->next_index() -
+ expand_->overlap_length());
+ // Set to wait for new codec.
+ first_packet_ = true;
+}
+
+void NetEqImpl::EnableNack(size_t max_nack_list_size) {
+ MutexLock lock(&mutex_);
+ if (!nack_enabled_) {
+ nack_ = std::make_unique<NackTracker>();
+ nack_enabled_ = true;
+ nack_->UpdateSampleRate(fs_hz_);
+ }
+ nack_->SetMaxNackListSize(max_nack_list_size);
+}
+
+void NetEqImpl::DisableNack() {
+ MutexLock lock(&mutex_);
+ nack_.reset();
+ nack_enabled_ = false;
+}
+
+std::vector<uint16_t> NetEqImpl::GetNackList(int64_t round_trip_time_ms) const {
+ MutexLock lock(&mutex_);
+ if (!nack_enabled_) {
+ return std::vector<uint16_t>();
+ }
+ RTC_DCHECK(nack_.get());
+ return nack_->GetNackList(round_trip_time_ms);
+}
+
+int NetEqImpl::SyncBufferSizeMs() const {
+ MutexLock lock(&mutex_);
+ return rtc::dchecked_cast<int>(sync_buffer_->FutureLength() /
+ rtc::CheckedDivExact(fs_hz_, 1000));
+}
+
+const SyncBuffer* NetEqImpl::sync_buffer_for_test() const {
+ MutexLock lock(&mutex_);
+ return sync_buffer_.get();
+}
+
+NetEq::Operation NetEqImpl::last_operation_for_test() const {
+ MutexLock lock(&mutex_);
+ return last_operation_;
+}
+
+// Methods below this line are private.
+
+int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
+ rtc::ArrayView<const uint8_t> payload) {
+ if (payload.empty()) {
+ RTC_LOG_F(LS_ERROR) << "payload is empty";
+ return kInvalidPointer;
+ }
+
+ Timestamp receive_time = clock_->CurrentTime();
+ stats_->ReceivedPacket();
+
+ PacketList packet_list;
+ // Insert packet in a packet list.
+ packet_list.push_back([&rtp_header, &payload, &receive_time] {
+ // Convert to Packet.
+ Packet packet;
+ packet.payload_type = rtp_header.payloadType;
+ packet.sequence_number = rtp_header.sequenceNumber;
+ packet.timestamp = rtp_header.timestamp;
+ packet.payload.SetData(payload.data(), payload.size());
+ packet.packet_info = RtpPacketInfo(rtp_header, receive_time);
+ // Waiting time will be set upon inserting the packet in the buffer.
+ RTC_DCHECK(!packet.waiting_time);
+ return packet;
+ }());
+
+ bool update_sample_rate_and_channels = first_packet_;
+
+ if (update_sample_rate_and_channels) {
+ // Reset timestamp scaling.
+ timestamp_scaler_->Reset();
+ }
+
+ if (!decoder_database_->IsRed(rtp_header.payloadType)) {
+ // Scale timestamp to internal domain (only for some codecs).
+ timestamp_scaler_->ToInternal(&packet_list);
+ }
+
+ // Store these for later use, since the first packet may very well disappear
+ // before we need these values.
+ uint32_t main_timestamp = packet_list.front().timestamp;
+ uint8_t main_payload_type = packet_list.front().payload_type;
+ uint16_t main_sequence_number = packet_list.front().sequence_number;
+
+ // Reinitialize NetEq if it's needed (changed SSRC or first call).
+ if (update_sample_rate_and_channels) {
+ // Note: `first_packet_` will be cleared further down in this method, once
+ // the packet has been successfully inserted into the packet buffer.
+
+ // Flush the packet buffer and DTMF buffer.
+ packet_buffer_->Flush(stats_.get());
+ dtmf_buffer_->Flush();
+
+ // Update audio buffer timestamp.
+ sync_buffer_->IncreaseEndTimestamp(main_timestamp - timestamp_);
+
+ // Update codecs.
+ timestamp_ = main_timestamp;
+ }
+
+ if (nack_enabled_) {
+ RTC_DCHECK(nack_);
+ if (update_sample_rate_and_channels) {
+ nack_->Reset();
+ }
+ nack_->UpdateLastReceivedPacket(main_sequence_number, main_timestamp);
+ }
+
+ // Check for RED payload type, and separate payloads into several packets.
+ if (decoder_database_->IsRed(rtp_header.payloadType)) {
+ if (!red_payload_splitter_->SplitRed(&packet_list)) {
+ return kRedundancySplitError;
+ }
+ // Only accept a few RED payloads of the same type as the main data,
+ // DTMF events and CNG.
+ red_payload_splitter_->CheckRedPayloads(&packet_list, *decoder_database_);
+ if (packet_list.empty()) {
+ return kRedundancySplitError;
+ }
+ }
+
+ // Check payload types.
+ if (decoder_database_->CheckPayloadTypes(packet_list) ==
+ DecoderDatabase::kDecoderNotFound) {
+ return kUnknownRtpPayloadType;
+ }
+
+ RTC_DCHECK(!packet_list.empty());
+
+ // Update main_timestamp, if new packets appear in the list
+ // after RED splitting.
+ if (decoder_database_->IsRed(rtp_header.payloadType)) {
+ timestamp_scaler_->ToInternal(&packet_list);
+ main_timestamp = packet_list.front().timestamp;
+ main_payload_type = packet_list.front().payload_type;
+ main_sequence_number = packet_list.front().sequence_number;
+ }
+
+ // Process DTMF payloads. Cycle through the list of packets, and pick out any
+ // DTMF payloads found.
+ PacketList::iterator it = packet_list.begin();
+ while (it != packet_list.end()) {
+ const Packet& current_packet = (*it);
+ RTC_DCHECK(!current_packet.payload.empty());
+ if (decoder_database_->IsDtmf(current_packet.payload_type)) {
+ DtmfEvent event;
+ int ret = DtmfBuffer::ParseEvent(current_packet.timestamp,
+ current_packet.payload.data(),
+ current_packet.payload.size(), &event);
+ if (ret != DtmfBuffer::kOK) {
+ return kDtmfParsingError;
+ }
+ if (dtmf_buffer_->InsertEvent(event) != DtmfBuffer::kOK) {
+ return kDtmfInsertError;
+ }
+ it = packet_list.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ PacketList parsed_packet_list;
+ bool is_dtx = false;
+ while (!packet_list.empty()) {
+ Packet& packet = packet_list.front();
+ const DecoderDatabase::DecoderInfo* info =
+ decoder_database_->GetDecoderInfo(packet.payload_type);
+ if (!info) {
+ RTC_LOG(LS_WARNING) << "SplitAudio unknown payload type";
+ return kUnknownRtpPayloadType;
+ }
+
+ if (info->IsComfortNoise()) {
+ // Carry comfort noise packets along.
+ parsed_packet_list.splice(parsed_packet_list.end(), packet_list,
+ packet_list.begin());
+ } else {
+ const auto sequence_number = packet.sequence_number;
+ const auto payload_type = packet.payload_type;
+ const Packet::Priority original_priority = packet.priority;
+ const auto& packet_info = packet.packet_info;
+ auto packet_from_result = [&](AudioDecoder::ParseResult& result) {
+ Packet new_packet;
+ new_packet.sequence_number = sequence_number;
+ new_packet.payload_type = payload_type;
+ new_packet.timestamp = result.timestamp;
+ new_packet.priority.codec_level = result.priority;
+ new_packet.priority.red_level = original_priority.red_level;
+ new_packet.packet_info = packet_info;
+ new_packet.frame = std::move(result.frame);
+ return new_packet;
+ };
+
+ std::vector<AudioDecoder::ParseResult> results =
+ info->GetDecoder()->ParsePayload(std::move(packet.payload),
+ packet.timestamp);
+ if (results.empty()) {
+ packet_list.pop_front();
+ } else {
+ bool first = true;
+ for (auto& result : results) {
+ RTC_DCHECK(result.frame);
+ RTC_DCHECK_GE(result.priority, 0);
+ is_dtx = is_dtx || result.frame->IsDtxPacket();
+ if (first) {
+ // Re-use the node and move it to parsed_packet_list.
+ packet_list.front() = packet_from_result(result);
+ parsed_packet_list.splice(parsed_packet_list.end(), packet_list,
+ packet_list.begin());
+ first = false;
+ } else {
+ parsed_packet_list.push_back(packet_from_result(result));
+ }
+ }
+ }
+ }
+ }
+
+ // Calculate the number of primary (non-FEC/RED) packets.
+ const size_t number_of_primary_packets = std::count_if(
+ parsed_packet_list.begin(), parsed_packet_list.end(),
+ [](const Packet& in) { return in.priority.codec_level == 0; });
+ if (number_of_primary_packets < parsed_packet_list.size()) {
+ stats_->SecondaryPacketsReceived(parsed_packet_list.size() -
+ number_of_primary_packets);
+ }
+
+ // Insert packets in buffer.
+ const int target_level_ms = controller_->TargetLevelMs();
+ const int ret = packet_buffer_->InsertPacketList(
+ &parsed_packet_list, *decoder_database_, &current_rtp_payload_type_,
+ &current_cng_rtp_payload_type_, stats_.get(), decoder_frame_length_,
+ last_output_sample_rate_hz_, target_level_ms);
+ bool buffer_flush_occured = false;
+ if (ret == PacketBuffer::kFlushed) {
+ // Reset DSP timestamp etc. if packet buffer flushed.
+ new_codec_ = true;
+ update_sample_rate_and_channels = true;
+ buffer_flush_occured = true;
+ } else if (ret == PacketBuffer::kPartialFlush) {
+ // Forward sync buffer timestamp
+ timestamp_ = packet_buffer_->PeekNextPacket()->timestamp;
+ sync_buffer_->IncreaseEndTimestamp(timestamp_ -
+ sync_buffer_->end_timestamp());
+ buffer_flush_occured = true;
+ } else if (ret != PacketBuffer::kOK) {
+ return kOtherError;
+ }
+
+ if (first_packet_) {
+ first_packet_ = false;
+ // Update the codec on the next GetAudio call.
+ new_codec_ = true;
+ }
+
+ if (current_rtp_payload_type_) {
+ RTC_DCHECK(decoder_database_->GetDecoderInfo(*current_rtp_payload_type_))
+ << "Payload type " << static_cast<int>(*current_rtp_payload_type_)
+ << " is unknown where it shouldn't be";
+ }
+
+ if (update_sample_rate_and_channels && !packet_buffer_->Empty()) {
+ // We do not use `current_rtp_payload_type_` to |set payload_type|, but
+ // get the next RTP header from `packet_buffer_` to obtain the payload type.
+ // The reason for it is the following corner case. If NetEq receives a
+ // CNG packet with a sample rate different than the current CNG then it
+ // flushes its buffer, assuming send codec must have been changed. However,
+ // payload type of the hypothetically new send codec is not known.
+ const Packet* next_packet = packet_buffer_->PeekNextPacket();
+ RTC_DCHECK(next_packet);
+ const int payload_type = next_packet->payload_type;
+ size_t channels = 1;
+ if (!decoder_database_->IsComfortNoise(payload_type)) {
+ AudioDecoder* decoder = decoder_database_->GetDecoder(payload_type);
+ RTC_DCHECK(decoder); // Payloads are already checked to be valid.
+ channels = decoder->Channels();
+ }
+ const DecoderDatabase::DecoderInfo* decoder_info =
+ decoder_database_->GetDecoderInfo(payload_type);
+ RTC_DCHECK(decoder_info);
+ if (decoder_info->SampleRateHz() != fs_hz_ ||
+ channels != algorithm_buffer_->Channels()) {
+ SetSampleRateAndChannels(decoder_info->SampleRateHz(), channels);
+ }
+ if (nack_enabled_) {
+ RTC_DCHECK(nack_);
+ // Update the sample rate even if the rate is not new, because of Reset().
+ nack_->UpdateSampleRate(fs_hz_);
+ }
+ }
+
+ const DecoderDatabase::DecoderInfo* dec_info =
+ decoder_database_->GetDecoderInfo(main_payload_type);
+ RTC_DCHECK(dec_info); // Already checked that the payload type is known.
+
+ NetEqController::PacketArrivedInfo info;
+ info.is_cng_or_dtmf = dec_info->IsComfortNoise() || dec_info->IsDtmf();
+ info.packet_length_samples =
+ number_of_primary_packets * decoder_frame_length_;
+ info.main_timestamp = main_timestamp;
+ info.main_sequence_number = main_sequence_number;
+ info.is_dtx = is_dtx;
+ info.buffer_flush = buffer_flush_occured;
+
+ const bool should_update_stats = !new_codec_;
+ auto relative_delay =
+ controller_->PacketArrived(fs_hz_, should_update_stats, info);
+ if (relative_delay) {
+ stats_->RelativePacketArrivalDelay(relative_delay.value());
+ }
+ return 0;
+}
+
+int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
+ bool* muted,
+ absl::optional<Operation> action_override) {
+ PacketList packet_list;
+ DtmfEvent dtmf_event;
+ Operation operation;
+ bool play_dtmf;
+ *muted = false;
+ last_decoded_packet_infos_.clear();
+ tick_timer_->Increment();
+ stats_->IncreaseCounter(output_size_samples_, fs_hz_);
+ const auto lifetime_stats = stats_->GetLifetimeStatistics();
+ expand_uma_logger_.UpdateSampleCounter(lifetime_stats.concealed_samples,
+ fs_hz_);
+ speech_expand_uma_logger_.UpdateSampleCounter(
+ lifetime_stats.concealed_samples -
+ lifetime_stats.silent_concealed_samples,
+ fs_hz_);
+
+ // Check for muted state.
+ if (enable_muted_state_ && expand_->Muted() && packet_buffer_->Empty()) {
+ RTC_DCHECK_EQ(last_mode_, Mode::kExpand);
+ audio_frame->Reset();
+ RTC_DCHECK(audio_frame->muted()); // Reset() should mute the frame.
+ playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
+ audio_frame->sample_rate_hz_ = fs_hz_;
+ // Make sure the total number of samples fits in the AudioFrame.
+ if (output_size_samples_ * sync_buffer_->Channels() >
+ AudioFrame::kMaxDataSizeSamples) {
+ return kSampleUnderrun;
+ }
+ audio_frame->samples_per_channel_ = output_size_samples_;
+ audio_frame->timestamp_ =
+ first_packet_
+ ? 0
+ : timestamp_scaler_->ToExternal(playout_timestamp_) -
+ static_cast<uint32_t>(audio_frame->samples_per_channel_);
+ audio_frame->num_channels_ = sync_buffer_->Channels();
+ stats_->ExpandedNoiseSamples(output_size_samples_, false);
+ controller_->NotifyMutedState();
+ *muted = true;
+ return 0;
+ }
+ int return_value = GetDecision(&operation, &packet_list, &dtmf_event,
+ &play_dtmf, action_override);
+ if (return_value != 0) {
+ last_mode_ = Mode::kError;
+ return return_value;
+ }
+
+ AudioDecoder::SpeechType speech_type;
+ int length = 0;
+ const size_t start_num_packets = packet_list.size();
+ int decode_return_value =
+ Decode(&packet_list, &operation, &length, &speech_type);
+
+ RTC_DCHECK(vad_.get());
+ bool sid_frame_available =
+ (operation == Operation::kRfc3389Cng && !packet_list.empty());
+ vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
+ sid_frame_available, fs_hz_);
+
+ // This is the criterion that we did decode some data through the speech
+ // decoder, and the operation resulted in comfort noise.
+ const bool codec_internal_sid_frame =
+ (speech_type == AudioDecoder::kComfortNoise &&
+ start_num_packets > packet_list.size());
+
+ if (sid_frame_available || codec_internal_sid_frame) {
+ // Start a new stopwatch since we are decoding a new CNG packet.
+ generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+ }
+
+ algorithm_buffer_->Clear();
+ switch (operation) {
+ case Operation::kNormal: {
+ DoNormal(decoded_buffer_.get(), length, speech_type, play_dtmf);
+ if (length > 0) {
+ stats_->DecodedOutputPlayed();
+ }
+ break;
+ }
+ case Operation::kMerge: {
+ DoMerge(decoded_buffer_.get(), length, speech_type, play_dtmf);
+ break;
+ }
+ case Operation::kExpand: {
+ RTC_DCHECK_EQ(return_value, 0);
+ if (!current_rtp_payload_type_ || !DoCodecPlc()) {
+ return_value = DoExpand(play_dtmf);
+ }
+ RTC_DCHECK_GE(sync_buffer_->FutureLength() - expand_->overlap_length(),
+ output_size_samples_);
+ break;
+ }
+ case Operation::kAccelerate:
+ case Operation::kFastAccelerate: {
+ const bool fast_accelerate =
+ enable_fast_accelerate_ && (operation == Operation::kFastAccelerate);
+ return_value = DoAccelerate(decoded_buffer_.get(), length, speech_type,
+ play_dtmf, fast_accelerate);
+ break;
+ }
+ case Operation::kPreemptiveExpand: {
+ return_value = DoPreemptiveExpand(decoded_buffer_.get(), length,
+ speech_type, play_dtmf);
+ break;
+ }
+ case Operation::kRfc3389Cng:
+ case Operation::kRfc3389CngNoPacket: {
+ return_value = DoRfc3389Cng(&packet_list, play_dtmf);
+ break;
+ }
+ case Operation::kCodecInternalCng: {
+ // This handles the case when there is no transmission and the decoder
+ // should produce internal comfort noise.
+ // TODO(hlundin): Write test for codec-internal CNG.
+ DoCodecInternalCng(decoded_buffer_.get(), length);
+ break;
+ }
+ case Operation::kDtmf: {
+ // TODO(hlundin): Write test for this.
+ return_value = DoDtmf(dtmf_event, &play_dtmf);
+ break;
+ }
+ case Operation::kUndefined: {
+ RTC_LOG(LS_ERROR) << "Invalid operation kUndefined.";
+ RTC_DCHECK_NOTREACHED(); // This should not happen.
+ last_mode_ = Mode::kError;
+ return kInvalidOperation;
+ }
+ } // End of switch.
+ last_operation_ = operation;
+ if (return_value < 0) {
+ return return_value;
+ }
+
+ if (last_mode_ != Mode::kRfc3389Cng) {
+ comfort_noise_->Reset();
+ }
+
+ // We treat it as if all packets referenced to by `last_decoded_packet_infos_`
+ // were mashed together when creating the samples in `algorithm_buffer_`.
+ RtpPacketInfos packet_infos(last_decoded_packet_infos_);
+
+ // Copy samples from `algorithm_buffer_` to `sync_buffer_`.
+ //
+ // TODO(bugs.webrtc.org/10757):
+ // We would in the future also like to pass `packet_infos` so that we can do
+ // sample-perfect tracking of that information across `sync_buffer_`.
+ sync_buffer_->PushBack(*algorithm_buffer_);
+
+ // Extract data from `sync_buffer_` to `output`.
+ size_t num_output_samples_per_channel = output_size_samples_;
+ size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels();
+ if (num_output_samples > AudioFrame::kMaxDataSizeSamples) {
+ RTC_LOG(LS_WARNING) << "Output array is too short. "
+ << AudioFrame::kMaxDataSizeSamples << " < "
+ << output_size_samples_ << " * "
+ << sync_buffer_->Channels();
+ num_output_samples = AudioFrame::kMaxDataSizeSamples;
+ num_output_samples_per_channel =
+ AudioFrame::kMaxDataSizeSamples / sync_buffer_->Channels();
+ }
+ sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
+ audio_frame);
+ audio_frame->sample_rate_hz_ = fs_hz_;
+ // TODO(bugs.webrtc.org/10757):
+ // We don't have the ability to properly track individual packets once their
+ // audio samples have entered `sync_buffer_`. So for now, treat it as if
+ // `packet_infos` from packets decoded by the current `GetAudioInternal()`
+ // call were all consumed assembling the current audio frame and the current
+ // audio frame only.
+ audio_frame->packet_infos_ = std::move(packet_infos);
+ if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
+ // The sync buffer should always contain `overlap_length` samples, but now
+ // too many samples have been extracted. Reinstall the `overlap_length`
+ // lookahead by moving the index.
+ const size_t missing_lookahead_samples =
+ expand_->overlap_length() - sync_buffer_->FutureLength();
+ RTC_DCHECK_GE(sync_buffer_->next_index(), missing_lookahead_samples);
+ sync_buffer_->set_next_index(sync_buffer_->next_index() -
+ missing_lookahead_samples);
+ }
+ if (audio_frame->samples_per_channel_ != output_size_samples_) {
+ RTC_LOG(LS_ERROR) << "audio_frame->samples_per_channel_ ("
+ << audio_frame->samples_per_channel_
+ << ") != output_size_samples_ (" << output_size_samples_
+ << ")";
+ // TODO(minyue): treatment of under-run, filling zeros
+ audio_frame->Mute();
+ return kSampleUnderrun;
+ }
+
+ // Should always have overlap samples left in the `sync_buffer_`.
+ RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
+
+ // TODO(yujo): For muted frames, this can be a copy rather than an addition.
+ if (play_dtmf) {
+ return_value = DtmfOverdub(dtmf_event, sync_buffer_->Channels(),
+ audio_frame->mutable_data());
+ }
+
+ // Update the background noise parameters if last operation wrote data
+ // straight from the decoder to the `sync_buffer_`. That is, none of the
+ // operations that modify the signal can be followed by a parameter update.
+ if ((last_mode_ == Mode::kNormal) || (last_mode_ == Mode::kAccelerateFail) ||
+ (last_mode_ == Mode::kPreemptiveExpandFail) ||
+ (last_mode_ == Mode::kRfc3389Cng) ||
+ (last_mode_ == Mode::kCodecInternalCng)) {
+ background_noise_->Update(*sync_buffer_, *vad_.get());
+ }
+
+ if (operation == Operation::kDtmf) {
+ // DTMF data was written the end of `sync_buffer_`.
+ // Update index to end of DTMF data in `sync_buffer_`.
+ sync_buffer_->set_dtmf_index(sync_buffer_->Size());
+ }
+
+ if (last_mode_ != Mode::kExpand && last_mode_ != Mode::kCodecPlc) {
+ // If last operation was not expand, calculate the `playout_timestamp_` from
+ // the `sync_buffer_`. However, do not update the `playout_timestamp_` if it
+ // would be moved "backwards".
+ uint32_t temp_timestamp =
+ sync_buffer_->end_timestamp() -
+ static_cast<uint32_t>(sync_buffer_->FutureLength());
+ if (static_cast<int32_t>(temp_timestamp - playout_timestamp_) > 0) {
+ playout_timestamp_ = temp_timestamp;
+ }
+ } else {
+ // Use dead reckoning to estimate the `playout_timestamp_`.
+ playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
+ }
+ // Set the timestamp in the audio frame to zero before the first packet has
+ // been inserted. Otherwise, subtract the frame size in samples to get the
+ // timestamp of the first sample in the frame (playout_timestamp_ is the
+ // last + 1).
+ audio_frame->timestamp_ =
+ first_packet_
+ ? 0
+ : timestamp_scaler_->ToExternal(playout_timestamp_) -
+ static_cast<uint32_t>(audio_frame->samples_per_channel_);
+
+ if (!(last_mode_ == Mode::kRfc3389Cng ||
+ last_mode_ == Mode::kCodecInternalCng || last_mode_ == Mode::kExpand ||
+ last_mode_ == Mode::kCodecPlc)) {
+ generated_noise_stopwatch_.reset();
+ }
+
+ if (decode_return_value)
+ return decode_return_value;
+ return return_value;
+}
+
+int NetEqImpl::GetDecision(Operation* operation,
+ PacketList* packet_list,
+ DtmfEvent* dtmf_event,
+ bool* play_dtmf,
+ absl::optional<Operation> action_override) {
+ // Initialize output variables.
+ *play_dtmf = false;
+ *operation = Operation::kUndefined;
+
+ RTC_DCHECK(sync_buffer_.get());
+ uint32_t end_timestamp = sync_buffer_->end_timestamp();
+ if (!new_codec_) {
+ const uint32_t five_seconds_samples = 5 * fs_hz_;
+ packet_buffer_->DiscardOldPackets(end_timestamp, five_seconds_samples,
+ stats_.get());
+ }
+ const Packet* packet = packet_buffer_->PeekNextPacket();
+
+ RTC_DCHECK(!generated_noise_stopwatch_ ||
+ generated_noise_stopwatch_->ElapsedTicks() >= 1);
+ uint64_t generated_noise_samples =
+ generated_noise_stopwatch_ ? (generated_noise_stopwatch_->ElapsedTicks() -
+ 1) * output_size_samples_ +
+ controller_->noise_fast_forward()
+ : 0;
+
+ if (controller_->CngRfc3389On() || last_mode_ == Mode::kRfc3389Cng) {
+ // Because of timestamp peculiarities, we have to "manually" disallow using
+ // a CNG packet with the same timestamp as the one that was last played.
+ // This can happen when using redundancy and will cause the timing to shift.
+ while (packet && decoder_database_->IsComfortNoise(packet->payload_type) &&
+ (end_timestamp >= packet->timestamp ||
+ end_timestamp + generated_noise_samples > packet->timestamp)) {
+ // Don't use this packet, discard it.
+ if (packet_buffer_->DiscardNextPacket(stats_.get()) !=
+ PacketBuffer::kOK) {
+ RTC_DCHECK_NOTREACHED(); // Must be ok by design.
+ }
+ // Check buffer again.
+ if (!new_codec_) {
+ packet_buffer_->DiscardOldPackets(end_timestamp, 5 * fs_hz_,
+ stats_.get());
+ }
+ packet = packet_buffer_->PeekNextPacket();
+ }
+ }
+
+ RTC_DCHECK(expand_.get());
+ const int samples_left = static_cast<int>(sync_buffer_->FutureLength() -
+ expand_->overlap_length());
+ if (last_mode_ == Mode::kAccelerateSuccess ||
+ last_mode_ == Mode::kAccelerateLowEnergy ||
+ last_mode_ == Mode::kPreemptiveExpandSuccess ||
+ last_mode_ == Mode::kPreemptiveExpandLowEnergy) {
+ // Subtract (samples_left + output_size_samples_) from sampleMemory.
+ controller_->AddSampleMemory(
+ -(samples_left + rtc::dchecked_cast<int>(output_size_samples_)));
+ }
+
+ // Check if it is time to play a DTMF event.
+ if (dtmf_buffer_->GetEvent(
+ static_cast<uint32_t>(end_timestamp + generated_noise_samples),
+ dtmf_event)) {
+ *play_dtmf = true;
+ }
+
+ // Get instruction.
+ RTC_DCHECK(sync_buffer_.get());
+ RTC_DCHECK(expand_.get());
+ generated_noise_samples =
+ generated_noise_stopwatch_
+ ? generated_noise_stopwatch_->ElapsedTicks() * output_size_samples_ +
+ controller_->noise_fast_forward()
+ : 0;
+ NetEqController::NetEqStatus status;
+ status.packet_buffer_info.dtx_or_cng =
+ packet_buffer_->ContainsDtxOrCngPacket(decoder_database_.get());
+ status.packet_buffer_info.num_samples =
+ packet_buffer_->NumSamplesInBuffer(decoder_frame_length_);
+ status.packet_buffer_info.span_samples = packet_buffer_->GetSpanSamples(
+ decoder_frame_length_, last_output_sample_rate_hz_, true);
+ status.packet_buffer_info.span_samples_no_dtx =
+ packet_buffer_->GetSpanSamples(decoder_frame_length_,
+ last_output_sample_rate_hz_, false);
+ status.packet_buffer_info.num_packets = packet_buffer_->NumPacketsInBuffer();
+ status.target_timestamp = sync_buffer_->end_timestamp();
+ status.expand_mutefactor = expand_->MuteFactor(0);
+ status.last_packet_samples = decoder_frame_length_;
+ status.last_mode = last_mode_;
+ status.play_dtmf = *play_dtmf;
+ status.generated_noise_samples = generated_noise_samples;
+ status.sync_buffer_samples = sync_buffer_->FutureLength();
+ if (packet) {
+ status.next_packet = {
+ packet->timestamp, packet->frame && packet->frame->IsDtxPacket(),
+ decoder_database_->IsComfortNoise(packet->payload_type)};
+ }
+ *operation = controller_->GetDecision(status, &reset_decoder_);
+
+ // Disallow time stretching if this packet is DTX, because such a decision may
+ // be based on earlier buffer level estimate, as we do not update buffer level
+ // during DTX. When we have a better way to update buffer level during DTX,
+ // this can be discarded.
+ if (packet && packet->frame && packet->frame->IsDtxPacket() &&
+ (*operation == Operation::kMerge ||
+ *operation == Operation::kAccelerate ||
+ *operation == Operation::kFastAccelerate ||
+ *operation == Operation::kPreemptiveExpand)) {
+ *operation = Operation::kNormal;
+ }
+
+ if (action_override) {
+ // Use the provided action instead of the decision NetEq decided on.
+ *operation = *action_override;
+ }
+ // Check if we already have enough samples in the `sync_buffer_`. If so,
+ // change decision to normal, unless the decision was merge, accelerate, or
+ // preemptive expand.
+ if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
+ *operation != Operation::kMerge && *operation != Operation::kAccelerate &&
+ *operation != Operation::kFastAccelerate &&
+ *operation != Operation::kPreemptiveExpand) {
+ *operation = Operation::kNormal;
+ return 0;
+ }
+
+ controller_->ExpandDecision(*operation);
+ if ((last_mode_ == Mode::kCodecPlc) && (*operation != Operation::kExpand)) {
+ // Getting out of the PLC expand mode, reporting interruptions.
+ // NetEq PLC reports this metrics in expand.cc
+ stats_->EndExpandEvent(fs_hz_);
+ }
+
+ // Check conditions for reset.
+ if (new_codec_ || *operation == Operation::kUndefined) {
+ // The only valid reason to get kUndefined is that new_codec_ is set.
+ RTC_DCHECK(new_codec_);
+ if (*play_dtmf && !packet) {
+ timestamp_ = dtmf_event->timestamp;
+ } else {
+ if (!packet) {
+ RTC_LOG(LS_ERROR) << "Packet missing where it shouldn't.";
+ return -1;
+ }
+ timestamp_ = packet->timestamp;
+ if (*operation == Operation::kRfc3389CngNoPacket &&
+ decoder_database_->IsComfortNoise(packet->payload_type)) {
+ // Change decision to CNG packet, since we do have a CNG packet, but it
+ // was considered too early to use. Now, use it anyway.
+ *operation = Operation::kRfc3389Cng;
+ } else if (*operation != Operation::kRfc3389Cng) {
+ *operation = Operation::kNormal;
+ }
+ }
+ // Adjust `sync_buffer_` timestamp before setting `end_timestamp` to the
+ // new value.
+ sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp);
+ end_timestamp = timestamp_;
+ new_codec_ = false;
+ controller_->SoftReset();
+ stats_->ResetMcu();
+ }
+
+ size_t required_samples = output_size_samples_;
+ const size_t samples_10_ms = static_cast<size_t>(80 * fs_mult_);
+ const size_t samples_20_ms = 2 * samples_10_ms;
+ const size_t samples_30_ms = 3 * samples_10_ms;
+
+ switch (*operation) {
+ case Operation::kExpand: {
+ timestamp_ = end_timestamp;
+ return 0;
+ }
+ case Operation::kRfc3389CngNoPacket:
+ case Operation::kCodecInternalCng: {
+ return 0;
+ }
+ case Operation::kDtmf: {
+ // TODO(hlundin): Write test for this.
+ // Update timestamp.
+ timestamp_ = end_timestamp;
+ const uint64_t generated_noise_samples =
+ generated_noise_stopwatch_
+ ? generated_noise_stopwatch_->ElapsedTicks() *
+ output_size_samples_ +
+ controller_->noise_fast_forward()
+ : 0;
+ if (generated_noise_samples > 0 && last_mode_ != Mode::kDtmf) {
+ // Make a jump in timestamp due to the recently played comfort noise.
+ uint32_t timestamp_jump =
+ static_cast<uint32_t>(generated_noise_samples);
+ sync_buffer_->IncreaseEndTimestamp(timestamp_jump);
+ timestamp_ += timestamp_jump;
+ }
+ return 0;
+ }
+ case Operation::kAccelerate:
+ case Operation::kFastAccelerate: {
+ // In order to do an accelerate we need at least 30 ms of audio data.
+ if (samples_left >= static_cast<int>(samples_30_ms)) {
+ // Already have enough data, so we do not need to extract any more.
+ controller_->set_sample_memory(samples_left);
+ controller_->set_prev_time_scale(true);
+ return 0;
+ } else if (samples_left >= static_cast<int>(samples_10_ms) &&
+ decoder_frame_length_ >= samples_30_ms) {
+ // Avoid decoding more data as it might overflow the playout buffer.
+ *operation = Operation::kNormal;
+ return 0;
+ } else if (samples_left < static_cast<int>(samples_20_ms) &&
+ decoder_frame_length_ < samples_30_ms) {
+ // Build up decoded data by decoding at least 20 ms of audio data. Do
+ // not perform accelerate yet, but wait until we only need to do one
+ // decoding.
+ required_samples = 2 * output_size_samples_;
+ *operation = Operation::kNormal;
+ }
+ // If none of the above is true, we have one of two possible situations:
+ // (1) 20 ms <= samples_left < 30 ms and decoder_frame_length_ < 30 ms; or
+ // (2) samples_left < 10 ms and decoder_frame_length_ >= 30 ms.
+ // In either case, we move on with the accelerate decision, and decode one
+ // frame now.
+ break;
+ }
+ case Operation::kPreemptiveExpand: {
+ // In order to do a preemptive expand we need at least 30 ms of decoded
+ // audio data.
+ if ((samples_left >= static_cast<int>(samples_30_ms)) ||
+ (samples_left >= static_cast<int>(samples_10_ms) &&
+ decoder_frame_length_ >= samples_30_ms)) {
+ // Already have enough data, so we do not need to extract any more.
+ // Or, avoid decoding more data as it might overflow the playout buffer.
+ // Still try preemptive expand, though.
+ controller_->set_sample_memory(samples_left);
+ controller_->set_prev_time_scale(true);
+ return 0;
+ }
+ if (samples_left < static_cast<int>(samples_20_ms) &&
+ decoder_frame_length_ < samples_30_ms) {
+ // Build up decoded data by decoding at least 20 ms of audio data.
+ // Still try to perform preemptive expand.
+ required_samples = 2 * output_size_samples_;
+ }
+ // Move on with the preemptive expand decision.
+ break;
+ }
+ case Operation::kMerge: {
+ required_samples =
+ std::max(merge_->RequiredFutureSamples(), required_samples);
+ break;
+ }
+ default: {
+ // Do nothing.
+ }
+ }
+
+ // Get packets from buffer.
+ int extracted_samples = 0;
+ if (packet) {
+ sync_buffer_->IncreaseEndTimestamp(packet->timestamp - end_timestamp);
+
+ if (*operation != Operation::kRfc3389Cng) {
+ // We are about to decode and use a non-CNG packet.
+ controller_->SetCngOff();
+ }
+
+ extracted_samples = ExtractPackets(required_samples, packet_list);
+ if (extracted_samples < 0) {
+ return kPacketBufferCorruption;
+ }
+ }
+
+ if (*operation == Operation::kAccelerate ||
+ *operation == Operation::kFastAccelerate ||
+ *operation == Operation::kPreemptiveExpand) {
+ controller_->set_sample_memory(samples_left + extracted_samples);
+ controller_->set_prev_time_scale(true);
+ }
+
+ if (*operation == Operation::kAccelerate ||
+ *operation == Operation::kFastAccelerate) {
+ // Check that we have enough data (30ms) to do accelerate.
+ if (extracted_samples + samples_left < static_cast<int>(samples_30_ms)) {
+ // TODO(hlundin): Write test for this.
+ // Not enough, do normal operation instead.
+ *operation = Operation::kNormal;
+ }
+ }
+
+ timestamp_ = sync_buffer_->end_timestamp();
+ return 0;
+}
+
+int NetEqImpl::Decode(PacketList* packet_list,
+ Operation* operation,
+ int* decoded_length,
+ AudioDecoder::SpeechType* speech_type) {
+ *speech_type = AudioDecoder::kSpeech;
+
+ // When packet_list is empty, we may be in kCodecInternalCng mode, and for
+ // that we use current active decoder.
+ AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
+
+ if (!packet_list->empty()) {
+ const Packet& packet = packet_list->front();
+ uint8_t payload_type = packet.payload_type;
+ if (!decoder_database_->IsComfortNoise(payload_type)) {
+ decoder = decoder_database_->GetDecoder(payload_type);
+ RTC_DCHECK(decoder);
+ if (!decoder) {
+ RTC_LOG(LS_WARNING)
+ << "Unknown payload type " << static_cast<int>(payload_type);
+ packet_list->clear();
+ return kDecoderNotFound;
+ }
+ bool decoder_changed;
+ decoder_database_->SetActiveDecoder(payload_type, &decoder_changed);
+ if (decoder_changed) {
+ // We have a new decoder. Re-init some values.
+ const DecoderDatabase::DecoderInfo* decoder_info =
+ decoder_database_->GetDecoderInfo(payload_type);
+ RTC_DCHECK(decoder_info);
+ if (!decoder_info) {
+ RTC_LOG(LS_WARNING)
+ << "Unknown payload type " << static_cast<int>(payload_type);
+ packet_list->clear();
+ return kDecoderNotFound;
+ }
+ // If sampling rate or number of channels has changed, we need to make
+ // a reset.
+ if (decoder_info->SampleRateHz() != fs_hz_ ||
+ decoder->Channels() != algorithm_buffer_->Channels()) {
+ // TODO(tlegrand): Add unittest to cover this event.
+ SetSampleRateAndChannels(decoder_info->SampleRateHz(),
+ decoder->Channels());
+ }
+ sync_buffer_->set_end_timestamp(timestamp_);
+ playout_timestamp_ = timestamp_;
+ }
+ }
+ }
+
+ if (reset_decoder_) {
+ // TODO(hlundin): Write test for this.
+ if (decoder)
+ decoder->Reset();
+
+ // Reset comfort noise decoder.
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ if (cng_decoder)
+ cng_decoder->Reset();
+
+ reset_decoder_ = false;
+ }
+
+ *decoded_length = 0;
+ // Update codec-internal PLC state.
+ if ((*operation == Operation::kMerge) && decoder && decoder->HasDecodePlc()) {
+ decoder->DecodePlc(1, &decoded_buffer_[*decoded_length]);
+ }
+
+ int return_value;
+ if (*operation == Operation::kCodecInternalCng) {
+ RTC_DCHECK(packet_list->empty());
+ return_value = DecodeCng(decoder, decoded_length, speech_type);
+ } else {
+ return_value = DecodeLoop(packet_list, *operation, decoder, decoded_length,
+ speech_type);
+ }
+
+ if (*decoded_length < 0) {
+ // Error returned from the decoder.
+ *decoded_length = 0;
+ sync_buffer_->IncreaseEndTimestamp(
+ static_cast<uint32_t>(decoder_frame_length_));
+ int error_code = 0;
+ if (decoder)
+ error_code = decoder->ErrorCode();
+ if (error_code != 0) {
+ // Got some error code from the decoder.
+ return_value = kDecoderErrorCode;
+ RTC_LOG(LS_WARNING) << "Decoder returned error code: " << error_code;
+ } else {
+ // Decoder does not implement error codes. Return generic error.
+ return_value = kOtherDecoderError;
+ RTC_LOG(LS_WARNING) << "Decoder error (no error code)";
+ }
+ *operation = Operation::kExpand; // Do expansion to get data instead.
+ }
+ if (*speech_type != AudioDecoder::kComfortNoise) {
+ // Don't increment timestamp if codec returned CNG speech type
+ // since in this case, the we will increment the CNGplayedTS counter.
+ // Increase with number of samples per channel.
+ RTC_DCHECK(*decoded_length == 0 ||
+ (decoder && decoder->Channels() == sync_buffer_->Channels()));
+ sync_buffer_->IncreaseEndTimestamp(
+ *decoded_length / static_cast<int>(sync_buffer_->Channels()));
+ }
+ return return_value;
+}
+
+int NetEqImpl::DecodeCng(AudioDecoder* decoder,
+ int* decoded_length,
+ AudioDecoder::SpeechType* speech_type) {
+ if (!decoder) {
+ // This happens when active decoder is not defined.
+ *decoded_length = -1;
+ return 0;
+ }
+
+ while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
+ const int length = decoder->Decode(
+ nullptr, 0, fs_hz_,
+ (decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
+ &decoded_buffer_[*decoded_length], speech_type);
+ if (length > 0) {
+ *decoded_length += length;
+ } else {
+ // Error.
+ RTC_LOG(LS_WARNING) << "Failed to decode CNG";
+ *decoded_length = -1;
+ break;
+ }
+ if (*decoded_length > static_cast<int>(decoded_buffer_length_)) {
+ // Guard against overflow.
+ RTC_LOG(LS_WARNING) << "Decoded too much CNG.";
+ return kDecodedTooMuch;
+ }
+ }
+ stats_->GeneratedNoiseSamples(*decoded_length);
+ return 0;
+}
+
+int NetEqImpl::DecodeLoop(PacketList* packet_list,
+ const Operation& operation,
+ AudioDecoder* decoder,
+ int* decoded_length,
+ AudioDecoder::SpeechType* speech_type) {
+ RTC_DCHECK(last_decoded_packet_infos_.empty());
+
+ // Do decoding.
+ while (!packet_list->empty() && !decoder_database_->IsComfortNoise(
+ packet_list->front().payload_type)) {
+ RTC_DCHECK(decoder); // At this point, we must have a decoder object.
+ // The number of channels in the `sync_buffer_` should be the same as the
+ // number decoder channels.
+ RTC_DCHECK_EQ(sync_buffer_->Channels(), decoder->Channels());
+ RTC_DCHECK_GE(decoded_buffer_length_, kMaxFrameSize * decoder->Channels());
+ RTC_DCHECK(operation == Operation::kNormal ||
+ operation == Operation::kAccelerate ||
+ operation == Operation::kFastAccelerate ||
+ operation == Operation::kMerge ||
+ operation == Operation::kPreemptiveExpand);
+
+ auto opt_result = packet_list->front().frame->Decode(
+ rtc::ArrayView<int16_t>(&decoded_buffer_[*decoded_length],
+ decoded_buffer_length_ - *decoded_length));
+ last_decoded_packet_infos_.push_back(
+ std::move(packet_list->front().packet_info));
+ packet_list->pop_front();
+ if (opt_result) {
+ const auto& result = *opt_result;
+ *speech_type = result.speech_type;
+ if (result.num_decoded_samples > 0) {
+ *decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
+ // Update `decoder_frame_length_` with number of samples per channel.
+ decoder_frame_length_ =
+ result.num_decoded_samples / decoder->Channels();
+ }
+ } else {
+ // Error.
+ // TODO(ossu): What to put here?
+ RTC_LOG(LS_WARNING) << "Decode error";
+ *decoded_length = -1;
+ last_decoded_packet_infos_.clear();
+ packet_list->clear();
+ break;
+ }
+ if (*decoded_length > rtc::dchecked_cast<int>(decoded_buffer_length_)) {
+ // Guard against overflow.
+ RTC_LOG(LS_WARNING) << "Decoded too much.";
+ packet_list->clear();
+ return kDecodedTooMuch;
+ }
+ } // End of decode loop.
+
+ // If the list is not empty at this point, either a decoding error terminated
+ // the while-loop, or list must hold exactly one CNG packet.
+ RTC_DCHECK(
+ packet_list->empty() || *decoded_length < 0 ||
+ (packet_list->size() == 1 &&
+ decoder_database_->IsComfortNoise(packet_list->front().payload_type)));
+ return 0;
+}
+
+void NetEqImpl::DoNormal(const int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) {
+ RTC_DCHECK(normal_.get());
+ normal_->Process(decoded_buffer, decoded_length, last_mode_,
+ algorithm_buffer_.get());
+ if (decoded_length != 0) {
+ last_mode_ = Mode::kNormal;
+ }
+
+ // If last packet was decoded as an inband CNG, set mode to CNG instead.
+ if ((speech_type == AudioDecoder::kComfortNoise) ||
+ ((last_mode_ == Mode::kCodecInternalCng) && (decoded_length == 0))) {
+ // TODO(hlundin): Remove second part of || statement above.
+ last_mode_ = Mode::kCodecInternalCng;
+ }
+
+ if (!play_dtmf) {
+ dtmf_tone_generator_->Reset();
+ }
+}
+
+void NetEqImpl::DoMerge(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) {
+ RTC_DCHECK(merge_.get());
+ size_t new_length =
+ merge_->Process(decoded_buffer, decoded_length, algorithm_buffer_.get());
+ // Correction can be negative.
+ int expand_length_correction =
+ rtc::dchecked_cast<int>(new_length) -
+ rtc::dchecked_cast<int>(decoded_length / algorithm_buffer_->Channels());
+
+ // Update in-call and post-call statistics.
+ if (expand_->MuteFactor(0) == 0) {
+ // Expand generates only noise.
+ stats_->ExpandedNoiseSamplesCorrection(expand_length_correction);
+ } else {
+ // Expansion generates more than only noise.
+ stats_->ExpandedVoiceSamplesCorrection(expand_length_correction);
+ }
+
+ last_mode_ = Mode::kMerge;
+ // If last packet was decoded as an inband CNG, set mode to CNG instead.
+ if (speech_type == AudioDecoder::kComfortNoise) {
+ last_mode_ = Mode::kCodecInternalCng;
+ }
+ expand_->Reset();
+ if (!play_dtmf) {
+ dtmf_tone_generator_->Reset();
+ }
+}
+
+bool NetEqImpl::DoCodecPlc() {
+ AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
+ if (!decoder) {
+ return false;
+ }
+ const size_t channels = algorithm_buffer_->Channels();
+ const size_t requested_samples_per_channel =
+ output_size_samples_ -
+ (sync_buffer_->FutureLength() - expand_->overlap_length());
+ concealment_audio_.Clear();
+ decoder->GeneratePlc(requested_samples_per_channel, &concealment_audio_);
+ if (concealment_audio_.empty()) {
+ // Nothing produced. Resort to regular expand.
+ return false;
+ }
+ RTC_CHECK_GE(concealment_audio_.size(),
+ requested_samples_per_channel * channels);
+ sync_buffer_->PushBackInterleaved(concealment_audio_);
+ RTC_DCHECK_NE(algorithm_buffer_->Channels(), 0);
+ const size_t concealed_samples_per_channel =
+ concealment_audio_.size() / channels;
+
+ // Update in-call and post-call statistics.
+ const bool is_new_concealment_event = (last_mode_ != Mode::kCodecPlc);
+ if (std::all_of(concealment_audio_.cbegin(), concealment_audio_.cend(),
+ [](int16_t i) { return i == 0; })) {
+ // Expand operation generates only noise.
+ stats_->ExpandedNoiseSamples(concealed_samples_per_channel,
+ is_new_concealment_event);
+ } else {
+ // Expand operation generates more than only noise.
+ stats_->ExpandedVoiceSamples(concealed_samples_per_channel,
+ is_new_concealment_event);
+ }
+ last_mode_ = Mode::kCodecPlc;
+ if (!generated_noise_stopwatch_) {
+ // Start a new stopwatch since we may be covering for a lost CNG packet.
+ generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+ }
+ return true;
+}
+
+int NetEqImpl::DoExpand(bool play_dtmf) {
+ while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
+ output_size_samples_) {
+ algorithm_buffer_->Clear();
+ int return_value = expand_->Process(algorithm_buffer_.get());
+ size_t length = algorithm_buffer_->Size();
+ bool is_new_concealment_event = (last_mode_ != Mode::kExpand);
+
+ // Update in-call and post-call statistics.
+ if (expand_->MuteFactor(0) == 0) {
+ // Expand operation generates only noise.
+ stats_->ExpandedNoiseSamples(length, is_new_concealment_event);
+ } else {
+ // Expand operation generates more than only noise.
+ stats_->ExpandedVoiceSamples(length, is_new_concealment_event);
+ }
+
+ last_mode_ = Mode::kExpand;
+
+ if (return_value < 0) {
+ return return_value;
+ }
+
+ sync_buffer_->PushBack(*algorithm_buffer_);
+ algorithm_buffer_->Clear();
+ }
+ if (!play_dtmf) {
+ dtmf_tone_generator_->Reset();
+ }
+
+ if (!generated_noise_stopwatch_) {
+ // Start a new stopwatch since we may be covering for a lost CNG packet.
+ generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+ }
+
+ return 0;
+}
+
+int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf,
+ bool fast_accelerate) {
+ const size_t required_samples =
+ static_cast<size_t>(240 * fs_mult_); // Must have 30 ms.
+ size_t borrowed_samples_per_channel = 0;
+ size_t num_channels = algorithm_buffer_->Channels();
+ size_t decoded_length_per_channel = decoded_length / num_channels;
+ if (decoded_length_per_channel < required_samples) {
+ // Must move data from the `sync_buffer_` in order to get 30 ms.
+ borrowed_samples_per_channel =
+ static_cast<int>(required_samples - decoded_length_per_channel);
+ memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
+ decoded_buffer, sizeof(int16_t) * decoded_length);
+ sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
+ decoded_buffer);
+ decoded_length = required_samples * num_channels;
+ }
+
+ size_t samples_removed = 0;
+ Accelerate::ReturnCodes return_code =
+ accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate,
+ algorithm_buffer_.get(), &samples_removed);
+ stats_->AcceleratedSamples(samples_removed);
+ switch (return_code) {
+ case Accelerate::kSuccess:
+ last_mode_ = Mode::kAccelerateSuccess;
+ break;
+ case Accelerate::kSuccessLowEnergy:
+ last_mode_ = Mode::kAccelerateLowEnergy;
+ break;
+ case Accelerate::kNoStretch:
+ last_mode_ = Mode::kAccelerateFail;
+ break;
+ case Accelerate::kError:
+ // TODO(hlundin): Map to Modes::kError instead?
+ last_mode_ = Mode::kAccelerateFail;
+ return kAccelerateError;
+ }
+
+ if (borrowed_samples_per_channel > 0) {
+ // Copy borrowed samples back to the `sync_buffer_`.
+ size_t length = algorithm_buffer_->Size();
+ if (length < borrowed_samples_per_channel) {
+ // This destroys the beginning of the buffer, but will not cause any
+ // problems.
+ sync_buffer_->ReplaceAtIndex(
+ *algorithm_buffer_,
+ sync_buffer_->Size() - borrowed_samples_per_channel);
+ sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length);
+ algorithm_buffer_->PopFront(length);
+ RTC_DCHECK(algorithm_buffer_->Empty());
+ } else {
+ sync_buffer_->ReplaceAtIndex(
+ *algorithm_buffer_, borrowed_samples_per_channel,
+ sync_buffer_->Size() - borrowed_samples_per_channel);
+ algorithm_buffer_->PopFront(borrowed_samples_per_channel);
+ }
+ }
+
+ // If last packet was decoded as an inband CNG, set mode to CNG instead.
+ if (speech_type == AudioDecoder::kComfortNoise) {
+ last_mode_ = Mode::kCodecInternalCng;
+ }
+ if (!play_dtmf) {
+ dtmf_tone_generator_->Reset();
+ }
+ expand_->Reset();
+ return 0;
+}
+
+int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) {
+ const size_t required_samples =
+ static_cast<size_t>(240 * fs_mult_); // Must have 30 ms.
+ size_t num_channels = algorithm_buffer_->Channels();
+ size_t borrowed_samples_per_channel = 0;
+ size_t old_borrowed_samples_per_channel = 0;
+ size_t decoded_length_per_channel = decoded_length / num_channels;
+ if (decoded_length_per_channel < required_samples) {
+ // Must move data from the `sync_buffer_` in order to get 30 ms.
+ borrowed_samples_per_channel =
+ required_samples - decoded_length_per_channel;
+ // Calculate how many of these were already played out.
+ old_borrowed_samples_per_channel =
+ (borrowed_samples_per_channel > sync_buffer_->FutureLength())
+ ? (borrowed_samples_per_channel - sync_buffer_->FutureLength())
+ : 0;
+ memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
+ decoded_buffer, sizeof(int16_t) * decoded_length);
+ sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
+ decoded_buffer);
+ decoded_length = required_samples * num_channels;
+ }
+
+ size_t samples_added = 0;
+ PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
+ decoded_buffer, decoded_length, old_borrowed_samples_per_channel,
+ algorithm_buffer_.get(), &samples_added);
+ stats_->PreemptiveExpandedSamples(samples_added);
+ switch (return_code) {
+ case PreemptiveExpand::kSuccess:
+ last_mode_ = Mode::kPreemptiveExpandSuccess;
+ break;
+ case PreemptiveExpand::kSuccessLowEnergy:
+ last_mode_ = Mode::kPreemptiveExpandLowEnergy;
+ break;
+ case PreemptiveExpand::kNoStretch:
+ last_mode_ = Mode::kPreemptiveExpandFail;
+ break;
+ case PreemptiveExpand::kError:
+ // TODO(hlundin): Map to Modes::kError instead?
+ last_mode_ = Mode::kPreemptiveExpandFail;
+ return kPreemptiveExpandError;
+ }
+
+ if (borrowed_samples_per_channel > 0) {
+ // Copy borrowed samples back to the `sync_buffer_`.
+ sync_buffer_->ReplaceAtIndex(
+ *algorithm_buffer_, borrowed_samples_per_channel,
+ sync_buffer_->Size() - borrowed_samples_per_channel);
+ algorithm_buffer_->PopFront(borrowed_samples_per_channel);
+ }
+
+ // If last packet was decoded as an inband CNG, set mode to CNG instead.
+ if (speech_type == AudioDecoder::kComfortNoise) {
+ last_mode_ = Mode::kCodecInternalCng;
+ }
+ if (!play_dtmf) {
+ dtmf_tone_generator_->Reset();
+ }
+ expand_->Reset();
+ return 0;
+}
+
+int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) {
+ if (!packet_list->empty()) {
+ // Must have exactly one SID frame at this point.
+ RTC_DCHECK_EQ(packet_list->size(), 1);
+ const Packet& packet = packet_list->front();
+ if (!decoder_database_->IsComfortNoise(packet.payload_type)) {
+ RTC_LOG(LS_ERROR) << "Trying to decode non-CNG payload as CNG.";
+ return kOtherError;
+ }
+ if (comfort_noise_->UpdateParameters(packet) ==
+ ComfortNoise::kInternalError) {
+ algorithm_buffer_->Zeros(output_size_samples_);
+ return -comfort_noise_->internal_error_code();
+ }
+ }
+ int cn_return =
+ comfort_noise_->Generate(output_size_samples_, algorithm_buffer_.get());
+ expand_->Reset();
+ last_mode_ = Mode::kRfc3389Cng;
+ if (!play_dtmf) {
+ dtmf_tone_generator_->Reset();
+ }
+ if (cn_return == ComfortNoise::kInternalError) {
+ RTC_LOG(LS_WARNING) << "Comfort noise generator returned error code: "
+ << comfort_noise_->internal_error_code();
+ return kComfortNoiseErrorCode;
+ } else if (cn_return == ComfortNoise::kUnknownPayloadType) {
+ return kUnknownRtpPayloadType;
+ }
+ return 0;
+}
+
+void NetEqImpl::DoCodecInternalCng(const int16_t* decoded_buffer,
+ size_t decoded_length) {
+ RTC_DCHECK(normal_.get());
+ normal_->Process(decoded_buffer, decoded_length, last_mode_,
+ algorithm_buffer_.get());
+ last_mode_ = Mode::kCodecInternalCng;
+ expand_->Reset();
+}
+
+int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
+ // This block of the code and the block further down, handling `dtmf_switch`
+ // are commented out. Otherwise playing out-of-band DTMF would fail in VoE
+ // test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is
+ // equivalent to `dtmf_switch` always be false.
+ //
+ // See http://webrtc-codereview.appspot.com/1195004/ for discussion
+ // On this issue. This change might cause some glitches at the point of
+ // switch from audio to DTMF. Issue 1545 is filed to track this.
+ //
+ // bool dtmf_switch = false;
+ // if ((last_mode_ != Modes::kDtmf) &&
+ // dtmf_tone_generator_->initialized()) {
+ // // Special case; see below.
+ // // We must catch this before calling Generate, since `initialized` is
+ // // modified in that call.
+ // dtmf_switch = true;
+ // }
+
+ int dtmf_return_value = 0;
+ if (!dtmf_tone_generator_->initialized()) {
+ // Initialize if not already done.
+ dtmf_return_value = dtmf_tone_generator_->Init(fs_hz_, dtmf_event.event_no,
+ dtmf_event.volume);
+ }
+
+ if (dtmf_return_value == 0) {
+ // Generate DTMF signal.
+ dtmf_return_value = dtmf_tone_generator_->Generate(output_size_samples_,
+ algorithm_buffer_.get());
+ }
+
+ if (dtmf_return_value < 0) {
+ algorithm_buffer_->Zeros(output_size_samples_);
+ return dtmf_return_value;
+ }
+
+ // if (dtmf_switch) {
+ // // This is the special case where the previous operation was DTMF
+ // // overdub, but the current instruction is "regular" DTMF. We must make
+ // // sure that the DTMF does not have any discontinuities. The first DTMF
+ // // sample that we generate now must be played out immediately, therefore
+ // // it must be copied to the speech buffer.
+ // // TODO(hlundin): This code seems incorrect. (Legacy.) Write test and
+ // // verify correct operation.
+ // RTC_DCHECK_NOTREACHED();
+ // // Must generate enough data to replace all of the `sync_buffer_`
+ // // "future".
+ // int required_length = sync_buffer_->FutureLength();
+ // RTC_DCHECK(dtmf_tone_generator_->initialized());
+ // dtmf_return_value = dtmf_tone_generator_->Generate(required_length,
+ // algorithm_buffer_);
+ // RTC_DCHECK((size_t) required_length == algorithm_buffer_->Size());
+ // if (dtmf_return_value < 0) {
+ // algorithm_buffer_->Zeros(output_size_samples_);
+ // return dtmf_return_value;
+ // }
+ //
+ // // Overwrite the "future" part of the speech buffer with the new DTMF
+ // // data.
+ // // TODO(hlundin): It seems that this overwriting has gone lost.
+ // // Not adapted for multi-channel yet.
+ // RTC_DCHECK(algorithm_buffer_->Channels() == 1);
+ // if (algorithm_buffer_->Channels() != 1) {
+ // RTC_LOG(LS_WARNING) << "DTMF not supported for more than one channel";
+ // return kStereoNotSupported;
+ // }
+ // // Shuffle the remaining data to the beginning of algorithm buffer.
+ // algorithm_buffer_->PopFront(sync_buffer_->FutureLength());
+ // }
+
+ sync_buffer_->IncreaseEndTimestamp(
+ static_cast<uint32_t>(output_size_samples_));
+ expand_->Reset();
+ last_mode_ = Mode::kDtmf;
+
+ // Set to false because the DTMF is already in the algorithm buffer.
+ *play_dtmf = false;
+ return 0;
+}
+
+int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event,
+ size_t num_channels,
+ int16_t* output) const {
+ size_t out_index = 0;
+ size_t overdub_length = output_size_samples_; // Default value.
+
+ if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
+ // Special operation for transition from "DTMF only" to "DTMF overdub".
+ out_index =
+ std::min(sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
+ output_size_samples_);
+ overdub_length = output_size_samples_ - out_index;
+ }
+
+ AudioMultiVector dtmf_output(num_channels);
+ int dtmf_return_value = 0;
+ if (!dtmf_tone_generator_->initialized()) {
+ dtmf_return_value = dtmf_tone_generator_->Init(fs_hz_, dtmf_event.event_no,
+ dtmf_event.volume);
+ }
+ if (dtmf_return_value == 0) {
+ dtmf_return_value =
+ dtmf_tone_generator_->Generate(overdub_length, &dtmf_output);
+ RTC_DCHECK_EQ(overdub_length, dtmf_output.Size());
+ }
+ dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
+ return dtmf_return_value < 0 ? dtmf_return_value : 0;
+}
+
+int NetEqImpl::ExtractPackets(size_t required_samples,
+ PacketList* packet_list) {
+ bool first_packet = true;
+ uint8_t prev_payload_type = 0;
+ uint32_t prev_timestamp = 0;
+ uint16_t prev_sequence_number = 0;
+ bool next_packet_available = false;
+
+ const Packet* next_packet = packet_buffer_->PeekNextPacket();
+ RTC_DCHECK(next_packet);
+ if (!next_packet) {
+ RTC_LOG(LS_ERROR) << "Packet buffer unexpectedly empty.";
+ return -1;
+ }
+ uint32_t first_timestamp = next_packet->timestamp;
+ size_t extracted_samples = 0;
+
+ // Packet extraction loop.
+ do {
+ timestamp_ = next_packet->timestamp;
+ absl::optional<Packet> packet = packet_buffer_->GetNextPacket();
+ // `next_packet` may be invalid after the `packet_buffer_` operation.
+ next_packet = nullptr;
+ if (!packet) {
+ RTC_LOG(LS_ERROR) << "Should always be able to extract a packet here";
+ RTC_DCHECK_NOTREACHED(); // Should always be able to extract a packet
+ // here.
+ return -1;
+ }
+ const uint64_t waiting_time_ms = packet->waiting_time->ElapsedMs();
+ stats_->StoreWaitingTime(waiting_time_ms);
+ RTC_DCHECK(!packet->empty());
+
+ if (first_packet) {
+ first_packet = false;
+ if (nack_enabled_) {
+ RTC_DCHECK(nack_);
+ // TODO(henrik.lundin): Should we update this for all decoded packets?
+ nack_->UpdateLastDecodedPacket(packet->sequence_number,
+ packet->timestamp);
+ }
+ prev_sequence_number = packet->sequence_number;
+ prev_timestamp = packet->timestamp;
+ prev_payload_type = packet->payload_type;
+ }
+
+ const bool has_cng_packet =
+ decoder_database_->IsComfortNoise(packet->payload_type);
+ // Store number of extracted samples.
+ size_t packet_duration = 0;
+ if (packet->frame) {
+ packet_duration = packet->frame->Duration();
+ // TODO(ossu): Is this the correct way to track Opus FEC packets?
+ if (packet->priority.codec_level > 0) {
+ stats_->SecondaryDecodedSamples(
+ rtc::dchecked_cast<int>(packet_duration));
+ }
+ } else if (!has_cng_packet) {
+ RTC_LOG(LS_WARNING) << "Unknown payload type "
+ << static_cast<int>(packet->payload_type);
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ if (packet_duration == 0) {
+ // Decoder did not return a packet duration. Assume that the packet
+ // contains the same number of samples as the previous one.
+ packet_duration = decoder_frame_length_;
+ }
+ extracted_samples = packet->timestamp - first_timestamp + packet_duration;
+
+ RTC_DCHECK(controller_);
+ stats_->JitterBufferDelay(packet_duration, waiting_time_ms,
+ controller_->TargetLevelMs(),
+ controller_->UnlimitedTargetLevelMs());
+
+ packet_list->push_back(std::move(*packet)); // Store packet in list.
+ packet = absl::nullopt; // Ensure it's never used after the move.
+
+ // Check what packet is available next.
+ next_packet = packet_buffer_->PeekNextPacket();
+ next_packet_available = false;
+ if (next_packet && prev_payload_type == next_packet->payload_type &&
+ !has_cng_packet) {
+ int16_t seq_no_diff = next_packet->sequence_number - prev_sequence_number;
+ size_t ts_diff = next_packet->timestamp - prev_timestamp;
+ if ((seq_no_diff == 1 || seq_no_diff == 0) &&
+ ts_diff <= packet_duration) {
+ // The next sequence number is available, or the next part of a packet
+ // that was split into pieces upon insertion.
+ next_packet_available = true;
+ }
+ prev_sequence_number = next_packet->sequence_number;
+ prev_timestamp = next_packet->timestamp;
+ }
+ } while (extracted_samples < required_samples && next_packet_available);
+
+ if (extracted_samples > 0) {
+ // Delete old packets only when we are going to decode something. Otherwise,
+ // we could end up in the situation where we never decode anything, since
+ // all incoming packets are considered too old but the buffer will also
+ // never be flooded and flushed.
+ packet_buffer_->DiscardAllOldPackets(timestamp_, stats_.get());
+ }
+
+ return rtc::dchecked_cast<int>(extracted_samples);
+}
+
+void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) {
+ // Delete objects and create new ones.
+ expand_.reset(expand_factory_->Create(background_noise_.get(),
+ sync_buffer_.get(), &random_vector_,
+ stats_.get(), fs_hz, channels));
+ merge_.reset(new Merge(fs_hz, channels, expand_.get(), sync_buffer_.get()));
+}
+
+void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
+ RTC_LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " "
+ << channels;
+ // TODO(hlundin): Change to an enumerator and skip assert.
+ RTC_DCHECK(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 ||
+ fs_hz == 48000);
+ RTC_DCHECK_GT(channels, 0);
+
+ // Before changing the sample rate, end and report any ongoing expand event.
+ stats_->EndExpandEvent(fs_hz_);
+ fs_hz_ = fs_hz;
+ fs_mult_ = fs_hz / 8000;
+ output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
+ decoder_frame_length_ = 3 * output_size_samples_; // Initialize to 30ms.
+
+ last_mode_ = Mode::kNormal;
+
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ if (cng_decoder)
+ cng_decoder->Reset();
+
+ // Reinit post-decode VAD with new sample rate.
+ RTC_DCHECK(vad_.get()); // Cannot be NULL here.
+ vad_->Init();
+
+ // Delete algorithm buffer and create a new one.
+ algorithm_buffer_.reset(new AudioMultiVector(channels));
+
+ // Delete sync buffer and create a new one.
+ sync_buffer_.reset(new SyncBuffer(channels, kSyncBufferSize * fs_mult_));
+
+ // Delete BackgroundNoise object and create a new one.
+ background_noise_.reset(new BackgroundNoise(channels));
+
+ // Reset random vector.
+ random_vector_.Reset();
+
+ UpdatePlcComponents(fs_hz, channels);
+
+ // Move index so that we create a small set of future samples (all 0).
+ sync_buffer_->set_next_index(sync_buffer_->next_index() -
+ expand_->overlap_length());
+
+ normal_.reset(new Normal(fs_hz, decoder_database_.get(), *background_noise_,
+ expand_.get(), stats_.get()));
+ accelerate_.reset(
+ accelerate_factory_->Create(fs_hz, channels, *background_noise_));
+ preemptive_expand_.reset(preemptive_expand_factory_->Create(
+ fs_hz, channels, *background_noise_, expand_->overlap_length()));
+
+ // Delete ComfortNoise object and create a new one.
+ comfort_noise_.reset(
+ new ComfortNoise(fs_hz, decoder_database_.get(), sync_buffer_.get()));
+
+ // Verify that `decoded_buffer_` is long enough.
+ if (decoded_buffer_length_ < kMaxFrameSize * channels) {
+ // Reallocate to larger size.
+ decoded_buffer_length_ = kMaxFrameSize * channels;
+ decoded_buffer_.reset(new int16_t[decoded_buffer_length_]);
+ }
+ RTC_CHECK(controller_) << "Unexpectedly found no NetEqController";
+ controller_->SetSampleRate(fs_hz_, output_size_samples_);
+}
+
+NetEqImpl::OutputType NetEqImpl::LastOutputType() {
+ RTC_DCHECK(vad_.get());
+ RTC_DCHECK(expand_.get());
+ if (last_mode_ == Mode::kCodecInternalCng ||
+ last_mode_ == Mode::kRfc3389Cng) {
+ return OutputType::kCNG;
+ } else if (last_mode_ == Mode::kExpand && expand_->MuteFactor(0) == 0) {
+ // Expand mode has faded down to background noise only (very long expand).
+ return OutputType::kPLCCNG;
+ } else if (last_mode_ == Mode::kExpand) {
+ return OutputType::kPLC;
+ } else if (vad_->running() && !vad_->active_speech()) {
+ return OutputType::kVadPassive;
+ } else if (last_mode_ == Mode::kCodecPlc) {
+ return OutputType::kCodecPLC;
+ } else {
+ return OutputType::kNormalSpeech;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h
new file mode 100644
index 0000000000..6120eab5b6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
+#define MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio/audio_frame.h"
+#include "api/neteq/neteq.h"
+#include "api/neteq/neteq_controller.h"
+#include "api/neteq/neteq_controller_factory.h"
+#include "api/neteq/tick_timer.h"
+#include "api/rtp_packet_info.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/expand_uma_logger.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class Accelerate;
+class BackgroundNoise;
+class Clock;
+class ComfortNoise;
+class DecoderDatabase;
+class DtmfBuffer;
+class DtmfToneGenerator;
+class Expand;
+class Merge;
+class NackTracker;
+class Normal;
+class PacketBuffer;
+class RedPayloadSplitter;
+class PostDecodeVad;
+class PreemptiveExpand;
+class RandomVector;
+class SyncBuffer;
+class TimestampScaler;
+struct AccelerateFactory;
+struct DtmfEvent;
+struct ExpandFactory;
+struct PreemptiveExpandFactory;
+
+class NetEqImpl : public webrtc::NetEq {
+ public:
+ enum class OutputType {
+ kNormalSpeech,
+ kPLC,
+ kCNG,
+ kPLCCNG,
+ kVadPassive,
+ kCodecPLC
+ };
+
+ enum ErrorCodes {
+ kNoError = 0,
+ kOtherError,
+ kUnknownRtpPayloadType,
+ kDecoderNotFound,
+ kInvalidPointer,
+ kAccelerateError,
+ kPreemptiveExpandError,
+ kComfortNoiseErrorCode,
+ kDecoderErrorCode,
+ kOtherDecoderError,
+ kInvalidOperation,
+ kDtmfParsingError,
+ kDtmfInsertError,
+ kSampleUnderrun,
+ kDecodedTooMuch,
+ kRedundancySplitError,
+ kPacketBufferCorruption
+ };
+
+ struct Dependencies {
+ // The constructor populates the Dependencies struct with the default
+ // implementations of the objects. They can all be replaced by the user
+ // before sending the struct to the NetEqImpl constructor. However, there
+ // are dependencies between some of the classes inside the struct, so
+ // swapping out one may make it necessary to re-create another one.
+ Dependencies(const NetEq::Config& config,
+ Clock* clock,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory,
+ const NetEqControllerFactory& controller_factory);
+ ~Dependencies();
+
+ Clock* const clock;
+ std::unique_ptr<TickTimer> tick_timer;
+ std::unique_ptr<StatisticsCalculator> stats;
+ std::unique_ptr<DecoderDatabase> decoder_database;
+ std::unique_ptr<DtmfBuffer> dtmf_buffer;
+ std::unique_ptr<DtmfToneGenerator> dtmf_tone_generator;
+ std::unique_ptr<PacketBuffer> packet_buffer;
+ std::unique_ptr<NetEqController> neteq_controller;
+ std::unique_ptr<RedPayloadSplitter> red_payload_splitter;
+ std::unique_ptr<TimestampScaler> timestamp_scaler;
+ std::unique_ptr<AccelerateFactory> accelerate_factory;
+ std::unique_ptr<ExpandFactory> expand_factory;
+ std::unique_ptr<PreemptiveExpandFactory> preemptive_expand_factory;
+ };
+
+ // Creates a new NetEqImpl object.
+ NetEqImpl(const NetEq::Config& config,
+ Dependencies&& deps,
+ bool create_components = true);
+
+ ~NetEqImpl() override;
+
+ NetEqImpl(const NetEqImpl&) = delete;
+ NetEqImpl& operator=(const NetEqImpl&) = delete;
+
+ // Inserts a new packet into NetEq. Returns 0 on success, -1 on failure.
+ int InsertPacket(const RTPHeader& rtp_header,
+ rtc::ArrayView<const uint8_t> payload) override;
+
+ void InsertEmptyPacket(const RTPHeader& rtp_header) override;
+
+ int GetAudio(
+ AudioFrame* audio_frame,
+ bool* muted,
+ int* current_sample_rate_hz = nullptr,
+ absl::optional<Operation> action_override = absl::nullopt) override;
+
+ void SetCodecs(const std::map<int, SdpAudioFormat>& codecs) override;
+
+ bool RegisterPayloadType(int rtp_payload_type,
+ const SdpAudioFormat& audio_format) override;
+
+ // Removes `rtp_payload_type` from the codec database. Returns 0 on success,
+ // -1 on failure.
+ int RemovePayloadType(uint8_t rtp_payload_type) override;
+
+ void RemoveAllPayloadTypes() override;
+
+ bool SetMinimumDelay(int delay_ms) override;
+
+ bool SetMaximumDelay(int delay_ms) override;
+
+ bool SetBaseMinimumDelayMs(int delay_ms) override;
+
+ int GetBaseMinimumDelayMs() const override;
+
+ int TargetDelayMs() const override;
+
+ int FilteredCurrentDelayMs() const override;
+
+ // Writes the current network statistics to `stats`. The statistics are reset
+ // after the call.
+ int NetworkStatistics(NetEqNetworkStatistics* stats) override;
+
+ NetEqNetworkStatistics CurrentNetworkStatistics() const override;
+
+ NetEqLifetimeStatistics GetLifetimeStatistics() const override;
+
+ NetEqOperationsAndState GetOperationsAndState() const override;
+
+ // Enables post-decode VAD. When enabled, GetAudio() will return
+ // kOutputVADPassive when the signal contains no speech.
+ void EnableVad() override;
+
+ // Disables post-decode VAD.
+ void DisableVad() override;
+
+ absl::optional<uint32_t> GetPlayoutTimestamp() const override;
+
+ int last_output_sample_rate_hz() const override;
+
+ absl::optional<DecoderFormat> GetDecoderFormat(
+ int payload_type) const override;
+
+ // Flushes both the packet buffer and the sync buffer.
+ void FlushBuffers() override;
+
+ void EnableNack(size_t max_nack_list_size) override;
+
+ void DisableNack() override;
+
+ std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const override;
+
+ int SyncBufferSizeMs() const override;
+
+ // This accessor method is only intended for testing purposes.
+ const SyncBuffer* sync_buffer_for_test() const;
+ Operation last_operation_for_test() const;
+
+ protected:
+ static const int kOutputSizeMs = 10;
+ static const size_t kMaxFrameSize = 5760; // 120 ms @ 48 kHz.
+ // TODO(hlundin): Provide a better value for kSyncBufferSize.
+ // Current value is kMaxFrameSize + 60 ms * 48 kHz, which is enough for
+ // calculating correlations of current frame against history.
+ static const size_t kSyncBufferSize = kMaxFrameSize + 60 * 48;
+
+ // Inserts a new packet into NetEq. This is used by the InsertPacket method
+ // above. Returns 0 on success, otherwise an error code.
+ // TODO(hlundin): Merge this with InsertPacket above?
+ int InsertPacketInternal(const RTPHeader& rtp_header,
+ rtc::ArrayView<const uint8_t> payload)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Delivers 10 ms of audio data. The data is written to `audio_frame`.
+ // Returns 0 on success, otherwise an error code.
+ int GetAudioInternal(AudioFrame* audio_frame,
+ bool* muted,
+ absl::optional<Operation> action_override)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Provides a decision to the GetAudioInternal method. The decision what to
+ // do is written to `operation`. Packets to decode are written to
+ // `packet_list`, and a DTMF event to play is written to `dtmf_event`. When
+ // DTMF should be played, `play_dtmf` is set to true by the method.
+ // Returns 0 on success, otherwise an error code.
+ int GetDecision(Operation* operation,
+ PacketList* packet_list,
+ DtmfEvent* dtmf_event,
+ bool* play_dtmf,
+ absl::optional<Operation> action_override)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Decodes the speech packets in `packet_list`, and writes the results to
+ // `decoded_buffer`, which is allocated to hold `decoded_buffer_length`
+ // elements. The length of the decoded data is written to `decoded_length`.
+ // The speech type -- speech or (codec-internal) comfort noise -- is written
+ // to `speech_type`. If `packet_list` contains any SID frames for RFC 3389
+ // comfort noise, those are not decoded.
+ int Decode(PacketList* packet_list,
+ Operation* operation,
+ int* decoded_length,
+ AudioDecoder::SpeechType* speech_type)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method to Decode(). Performs codec internal CNG.
+ int DecodeCng(AudioDecoder* decoder,
+ int* decoded_length,
+ AudioDecoder::SpeechType* speech_type)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method to Decode(). Performs the actual decoding.
+ int DecodeLoop(PacketList* packet_list,
+ const Operation& operation,
+ AudioDecoder* decoder,
+ int* decoded_length,
+ AudioDecoder::SpeechType* speech_type)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method which calls the Normal class to perform the normal operation.
+ void DoNormal(const int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method which calls the Merge class to perform the merge operation.
+ void DoMerge(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ bool DoCodecPlc() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method which calls the Expand class to perform the expand operation.
+ int DoExpand(bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method which calls the Accelerate class to perform the accelerate
+ // operation.
+ int DoAccelerate(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf,
+ bool fast_accelerate) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method which calls the PreemptiveExpand class to perform the
+ // preemtive expand operation.
+ int DoPreemptiveExpand(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort
+ // noise. `packet_list` can either contain one SID frame to update the
+ // noise parameters, or no payload at all, in which case the previously
+ // received parameters are used.
+ int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Calls the audio decoder to generate codec-internal comfort noise when
+ // no packet was received.
+ void DoCodecInternalCng(const int16_t* decoded_buffer, size_t decoded_length)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Calls the DtmfToneGenerator class to generate DTMF tones.
+ int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Overdub DTMF on top of `output`.
+ int DtmfOverdub(const DtmfEvent& dtmf_event,
+ size_t num_channels,
+ int16_t* output) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Extracts packets from `packet_buffer_` to produce at least
+ // `required_samples` samples. The packets are inserted into `packet_list`.
+ // Returns the number of samples that the packets in the list will produce, or
+ // -1 in case of an error.
+ int ExtractPackets(size_t required_samples, PacketList* packet_list)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Resets various variables and objects to new values based on the sample rate
+ // `fs_hz` and `channels` number audio channels.
+ void SetSampleRateAndChannels(int fs_hz, size_t channels)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns the output type for the audio produced by the latest call to
+ // GetAudio().
+ OutputType LastOutputType() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Updates Expand and Merge.
+ virtual void UpdatePlcComponents(int fs_hz, size_t channels)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ NetEqNetworkStatistics CurrentNetworkStatisticsInternal() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* const clock_;
+
+ mutable Mutex mutex_;
+ const std::unique_ptr<TickTimer> tick_timer_ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<DecoderDatabase> decoder_database_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<DtmfBuffer> dtmf_buffer_ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<DtmfToneGenerator> dtmf_tone_generator_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<PacketBuffer> packet_buffer_ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<RedPayloadSplitter> red_payload_splitter_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<TimestampScaler> timestamp_scaler_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<PostDecodeVad> vad_ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<ExpandFactory> expand_factory_ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<AccelerateFactory> accelerate_factory_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<PreemptiveExpandFactory> preemptive_expand_factory_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<StatisticsCalculator> stats_ RTC_GUARDED_BY(mutex_);
+
+ std::unique_ptr<BackgroundNoise> background_noise_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<NetEqController> controller_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<AudioMultiVector> algorithm_buffer_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<SyncBuffer> sync_buffer_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<Expand> expand_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<Normal> normal_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<Merge> merge_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<Accelerate> accelerate_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<PreemptiveExpand> preemptive_expand_ RTC_GUARDED_BY(mutex_);
+ RandomVector random_vector_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<ComfortNoise> comfort_noise_ RTC_GUARDED_BY(mutex_);
+ int fs_hz_ RTC_GUARDED_BY(mutex_);
+ int fs_mult_ RTC_GUARDED_BY(mutex_);
+ int last_output_sample_rate_hz_ RTC_GUARDED_BY(mutex_);
+ size_t output_size_samples_ RTC_GUARDED_BY(mutex_);
+ size_t decoder_frame_length_ RTC_GUARDED_BY(mutex_);
+ Mode last_mode_ RTC_GUARDED_BY(mutex_);
+ Operation last_operation_ RTC_GUARDED_BY(mutex_);
+ size_t decoded_buffer_length_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<int16_t[]> decoded_buffer_ RTC_GUARDED_BY(mutex_);
+ uint32_t playout_timestamp_ RTC_GUARDED_BY(mutex_);
+ bool new_codec_ RTC_GUARDED_BY(mutex_);
+ uint32_t timestamp_ RTC_GUARDED_BY(mutex_);
+ bool reset_decoder_ RTC_GUARDED_BY(mutex_);
+ absl::optional<uint8_t> current_rtp_payload_type_ RTC_GUARDED_BY(mutex_);
+ absl::optional<uint8_t> current_cng_rtp_payload_type_ RTC_GUARDED_BY(mutex_);
+ bool first_packet_ RTC_GUARDED_BY(mutex_);
+ bool enable_fast_accelerate_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<NackTracker> nack_ RTC_GUARDED_BY(mutex_);
+ bool nack_enabled_ RTC_GUARDED_BY(mutex_);
+ const bool enable_muted_state_ RTC_GUARDED_BY(mutex_);
+ AudioFrame::VADActivity last_vad_activity_ RTC_GUARDED_BY(mutex_) =
+ AudioFrame::kVadPassive;
+ std::unique_ptr<TickTimer::Stopwatch> generated_noise_stopwatch_
+ RTC_GUARDED_BY(mutex_);
+ std::vector<RtpPacketInfo> last_decoded_packet_infos_ RTC_GUARDED_BY(mutex_);
+ ExpandUmaLogger expand_uma_logger_ RTC_GUARDED_BY(mutex_);
+ ExpandUmaLogger speech_expand_uma_logger_ RTC_GUARDED_BY(mutex_);
+ bool no_time_stretching_ RTC_GUARDED_BY(mutex_); // Only used for test.
+ rtc::BufferT<int16_t> concealment_audio_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
new file mode 100644
index 0000000000..ce2be656ef
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -0,0 +1,1871 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/neteq_impl.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/default_neteq_controller_factory.h"
+#include "api/neteq/neteq.h"
+#include "api/neteq/neteq_controller.h"
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "modules/audio_coding/neteq/accelerate.h"
+#include "modules/audio_coding/neteq/decision_logic.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/histogram.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/mock/mock_dtmf_buffer.h"
+#include "modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h"
+#include "modules/audio_coding/neteq/mock/mock_neteq_controller.h"
+#include "modules/audio_coding/neteq/mock/mock_packet_buffer.h"
+#include "modules/audio_coding/neteq/mock/mock_red_payload_splitter.h"
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+#include "test/audio_decoder_proxy_factory.h"
+#include "test/function_audio_decoder_factory.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder.h"
+#include "test/mock_audio_decoder_factory.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::ElementsAre;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::IsEmpty;
+using ::testing::IsNull;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::ReturnNull;
+using ::testing::SetArgPointee;
+using ::testing::SetArrayArgument;
+using ::testing::SizeIs;
+using ::testing::WithArg;
+
+namespace webrtc {
+
+// This function is called when inserting a packet list into the mock packet
+// buffer. The purpose is to delete all inserted packets properly, to avoid
+// memory leaks in the test.
+int DeletePacketsAndReturnOk(PacketList* packet_list) {
+ packet_list->clear();
+ return PacketBuffer::kOK;
+}
+
+class NetEqImplTest : public ::testing::Test {
+ protected:
+ NetEqImplTest() : clock_(0) { config_.sample_rate_hz = 8000; }
+
+ void CreateInstance(
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+ ASSERT_TRUE(decoder_factory);
+ config_.enable_muted_state = enable_muted_state_;
+ NetEqImpl::Dependencies deps(config_, &clock_, decoder_factory,
+ DefaultNetEqControllerFactory());
+
+ // Get a local pointer to NetEq's TickTimer object.
+ tick_timer_ = deps.tick_timer.get();
+
+ if (use_mock_decoder_database_) {
+ std::unique_ptr<MockDecoderDatabase> mock(new MockDecoderDatabase);
+ mock_decoder_database_ = mock.get();
+ EXPECT_CALL(*mock_decoder_database_, GetActiveCngDecoder())
+ .WillOnce(ReturnNull());
+ deps.decoder_database = std::move(mock);
+ }
+ decoder_database_ = deps.decoder_database.get();
+
+ if (use_mock_dtmf_buffer_) {
+ std::unique_ptr<MockDtmfBuffer> mock(
+ new MockDtmfBuffer(config_.sample_rate_hz));
+ mock_dtmf_buffer_ = mock.get();
+ deps.dtmf_buffer = std::move(mock);
+ }
+ dtmf_buffer_ = deps.dtmf_buffer.get();
+
+ if (use_mock_dtmf_tone_generator_) {
+ std::unique_ptr<MockDtmfToneGenerator> mock(new MockDtmfToneGenerator);
+ mock_dtmf_tone_generator_ = mock.get();
+ deps.dtmf_tone_generator = std::move(mock);
+ }
+ dtmf_tone_generator_ = deps.dtmf_tone_generator.get();
+
+ if (use_mock_packet_buffer_) {
+ std::unique_ptr<MockPacketBuffer> mock(
+ new MockPacketBuffer(config_.max_packets_in_buffer, tick_timer_));
+ mock_packet_buffer_ = mock.get();
+ deps.packet_buffer = std::move(mock);
+ }
+ packet_buffer_ = deps.packet_buffer.get();
+
+ if (use_mock_neteq_controller_) {
+ std::unique_ptr<MockNetEqController> mock(new MockNetEqController());
+ mock_neteq_controller_ = mock.get();
+ deps.neteq_controller = std::move(mock);
+ } else {
+ deps.stats = std::make_unique<StatisticsCalculator>();
+ NetEqController::Config controller_config;
+ controller_config.tick_timer = tick_timer_;
+ controller_config.base_min_delay_ms = config_.min_delay_ms;
+ controller_config.allow_time_stretching = true;
+ controller_config.max_packets_in_buffer = config_.max_packets_in_buffer;
+ controller_config.clock = &clock_;
+ deps.neteq_controller =
+ std::make_unique<DecisionLogic>(std::move(controller_config));
+ }
+ neteq_controller_ = deps.neteq_controller.get();
+
+ if (use_mock_payload_splitter_) {
+ std::unique_ptr<MockRedPayloadSplitter> mock(new MockRedPayloadSplitter);
+ mock_payload_splitter_ = mock.get();
+ deps.red_payload_splitter = std::move(mock);
+ }
+ red_payload_splitter_ = deps.red_payload_splitter.get();
+
+ deps.timestamp_scaler = std::unique_ptr<TimestampScaler>(
+ new TimestampScaler(*deps.decoder_database.get()));
+
+ neteq_.reset(new NetEqImpl(config_, std::move(deps)));
+ ASSERT_TRUE(neteq_ != NULL);
+ }
+
+ void CreateInstance() { CreateInstance(CreateBuiltinAudioDecoderFactory()); }
+
+ void UseNoMocks() {
+ ASSERT_TRUE(neteq_ == NULL) << "Must call UseNoMocks before CreateInstance";
+ use_mock_decoder_database_ = false;
+ use_mock_neteq_controller_ = false;
+ use_mock_dtmf_buffer_ = false;
+ use_mock_dtmf_tone_generator_ = false;
+ use_mock_packet_buffer_ = false;
+ use_mock_payload_splitter_ = false;
+ }
+
+ virtual ~NetEqImplTest() {
+ if (use_mock_decoder_database_) {
+ EXPECT_CALL(*mock_decoder_database_, Die()).Times(1);
+ }
+ if (use_mock_neteq_controller_) {
+ EXPECT_CALL(*mock_neteq_controller_, Die()).Times(1);
+ }
+ if (use_mock_dtmf_buffer_) {
+ EXPECT_CALL(*mock_dtmf_buffer_, Die()).Times(1);
+ }
+ if (use_mock_dtmf_tone_generator_) {
+ EXPECT_CALL(*mock_dtmf_tone_generator_, Die()).Times(1);
+ }
+ if (use_mock_packet_buffer_) {
+ EXPECT_CALL(*mock_packet_buffer_, Die()).Times(1);
+ }
+ }
+
+ void TestDtmfPacket(int sample_rate_hz) {
+ const size_t kPayloadLength = 4;
+ const uint8_t kPayloadType = 110;
+ const int kSampleRateHz = 16000;
+ config_.sample_rate_hz = kSampleRateHz;
+ UseNoMocks();
+ CreateInstance();
+ // Event: 2, E bit, Volume: 17, Length: 4336.
+ uint8_t payload[kPayloadLength] = {0x02, 0x80 + 0x11, 0x10, 0xF0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(
+ kPayloadType, SdpAudioFormat("telephone-event", sample_rate_hz, 1)));
+
+ // Insert first packet.
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ // Pull audio once.
+ const size_t kMaxOutputSize =
+ static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
+ ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+ // DTMF packets are immediately consumed by `InsertPacket()` and won't be
+ // returned by `GetAudio()`.
+ EXPECT_THAT(output.packet_infos_, IsEmpty());
+
+ // Verify first 64 samples of actual output.
+ const std::vector<int16_t> kOutput(
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -1578, -2816, -3460, -3403, -2709, -1594, -363, 671, 1269, 1328,
+ 908, 202, -513, -964, -955, -431, 504, 1617, 2602, 3164,
+ 3101, 2364, 1073, -511, -2047, -3198, -3721, -3525, -2688, -1440,
+ -99, 1015, 1663, 1744, 1319, 588, -171, -680, -747, -315,
+ 515, 1512, 2378, 2828, 2674, 1877, 568, -986, -2446, -3482,
+ -3864, -3516, -2534, -1163});
+ ASSERT_GE(kMaxOutputSize, kOutput.size());
+ EXPECT_TRUE(std::equal(kOutput.begin(), kOutput.end(), output.data()));
+ }
+
+ std::unique_ptr<NetEqImpl> neteq_;
+ NetEq::Config config_;
+ SimulatedClock clock_;
+ TickTimer* tick_timer_ = nullptr;
+ MockDecoderDatabase* mock_decoder_database_ = nullptr;
+ DecoderDatabase* decoder_database_ = nullptr;
+ bool use_mock_decoder_database_ = true;
+ MockNetEqController* mock_neteq_controller_ = nullptr;
+ NetEqController* neteq_controller_ = nullptr;
+ bool use_mock_neteq_controller_ = true;
+ MockDtmfBuffer* mock_dtmf_buffer_ = nullptr;
+ DtmfBuffer* dtmf_buffer_ = nullptr;
+ bool use_mock_dtmf_buffer_ = true;
+ MockDtmfToneGenerator* mock_dtmf_tone_generator_ = nullptr;
+ DtmfToneGenerator* dtmf_tone_generator_ = nullptr;
+ bool use_mock_dtmf_tone_generator_ = true;
+ MockPacketBuffer* mock_packet_buffer_ = nullptr;
+ PacketBuffer* packet_buffer_ = nullptr;
+ bool use_mock_packet_buffer_ = true;
+ MockRedPayloadSplitter* mock_payload_splitter_ = nullptr;
+ RedPayloadSplitter* red_payload_splitter_ = nullptr;
+ bool use_mock_payload_splitter_ = true;
+ bool enable_muted_state_ = false;
+};
+
+// This tests the interface class NetEq.
+// TODO(hlundin): Move to separate file?
+TEST(NetEq, CreateAndDestroy) {
+ NetEq::Config config;
+ SimulatedClock clock(0);
+ auto decoder_factory = CreateBuiltinAudioDecoderFactory();
+ std::unique_ptr<NetEq> neteq =
+ DefaultNetEqFactory().CreateNetEq(config, decoder_factory, &clock);
+}
+
+TEST_F(NetEqImplTest, RegisterPayloadType) {
+ CreateInstance();
+ constexpr int rtp_payload_type = 0;
+ const SdpAudioFormat format("pcmu", 8000, 1);
+ EXPECT_CALL(*mock_decoder_database_,
+ RegisterPayload(rtp_payload_type, format));
+ neteq_->RegisterPayloadType(rtp_payload_type, format);
+}
+
+TEST_F(NetEqImplTest, RemovePayloadType) {
+ CreateInstance();
+ uint8_t rtp_payload_type = 0;
+ EXPECT_CALL(*mock_decoder_database_, Remove(rtp_payload_type))
+ .WillOnce(Return(DecoderDatabase::kDecoderNotFound));
+ // Check that kOK is returned when database returns kDecoderNotFound, because
+ // removing a payload type that was never registered is not an error.
+ EXPECT_EQ(NetEq::kOK, neteq_->RemovePayloadType(rtp_payload_type));
+}
+
+TEST_F(NetEqImplTest, RemoveAllPayloadTypes) {
+ CreateInstance();
+ EXPECT_CALL(*mock_decoder_database_, RemoveAll()).WillOnce(Return());
+ neteq_->RemoveAllPayloadTypes();
+}
+
+TEST_F(NetEqImplTest, InsertPacket) {
+ using ::testing::AllOf;
+ using ::testing::Field;
+ CreateInstance();
+ const size_t kPayloadLength = 100;
+ const uint8_t kPayloadType = 0;
+ const uint16_t kFirstSequenceNumber = 0x1234;
+ const uint32_t kFirstTimestamp = 0x12345678;
+ const uint32_t kSsrc = 0x87654321;
+ uint8_t payload[kPayloadLength] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = kFirstSequenceNumber;
+ rtp_header.timestamp = kFirstTimestamp;
+ rtp_header.ssrc = kSsrc;
+ Packet fake_packet;
+ fake_packet.payload_type = kPayloadType;
+ fake_packet.sequence_number = kFirstSequenceNumber;
+ fake_packet.timestamp = kFirstTimestamp;
+
+ auto mock_decoder_factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ EXPECT_CALL(*mock_decoder_factory, MakeAudioDecoderMock(_, _, _))
+ .WillOnce(Invoke([&](const SdpAudioFormat& format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ std::unique_ptr<AudioDecoder>* dec) {
+ EXPECT_EQ("pcmu", format.name);
+
+ std::unique_ptr<MockAudioDecoder> mock_decoder(new MockAudioDecoder);
+ EXPECT_CALL(*mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(*mock_decoder, SampleRateHz()).WillRepeatedly(Return(8000));
+ EXPECT_CALL(*mock_decoder, Die()).Times(1); // Called when deleted.
+
+ *dec = std::move(mock_decoder);
+ }));
+ DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, mock_decoder_factory.get());
+
+ // Expectations for decoder database.
+ EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ // Expectations for packet buffer.
+ EXPECT_CALL(*mock_packet_buffer_, Empty())
+ .WillOnce(Return(false)); // Called once after first packet is inserted.
+ EXPECT_CALL(*mock_packet_buffer_, Flush(_)).Times(1);
+ EXPECT_CALL(*mock_packet_buffer_, InsertPacketList(_, _, _, _, _, _, _, _))
+ .Times(2)
+ .WillRepeatedly(DoAll(SetArgPointee<2>(kPayloadType),
+ WithArg<0>(Invoke(DeletePacketsAndReturnOk))));
+ // SetArgPointee<2>(kPayloadType) means that the third argument (zero-based
+ // index) is a pointer, and the variable pointed to is set to kPayloadType.
+ // Also invoke the function DeletePacketsAndReturnOk to properly delete all
+ // packets in the list (to avoid memory leaks in the test).
+ EXPECT_CALL(*mock_packet_buffer_, PeekNextPacket())
+ .Times(1)
+ .WillOnce(Return(&fake_packet));
+
+ // Expectations for DTMF buffer.
+ EXPECT_CALL(*mock_dtmf_buffer_, Flush()).Times(1);
+
+ // Expectations for delay manager.
+ {
+ // All expectations within this block must be called in this specific order.
+ InSequence sequence; // Dummy variable.
+ // Expectations when the first packet is inserted.
+ EXPECT_CALL(
+ *mock_neteq_controller_,
+ PacketArrived(
+ /*fs_hz*/ 8000,
+ /*should_update_stats*/ _,
+ /*info*/
+ AllOf(
+ Field(&NetEqController::PacketArrivedInfo::is_cng_or_dtmf,
+ false),
+ Field(&NetEqController::PacketArrivedInfo::main_sequence_number,
+ kFirstSequenceNumber),
+ Field(&NetEqController::PacketArrivedInfo::main_timestamp,
+ kFirstTimestamp))));
+ EXPECT_CALL(
+ *mock_neteq_controller_,
+ PacketArrived(
+ /*fs_hz*/ 8000,
+ /*should_update_stats*/ _,
+ /*info*/
+ AllOf(
+ Field(&NetEqController::PacketArrivedInfo::is_cng_or_dtmf,
+ false),
+ Field(&NetEqController::PacketArrivedInfo::main_sequence_number,
+ kFirstSequenceNumber + 1),
+ Field(&NetEqController::PacketArrivedInfo::main_timestamp,
+ kFirstTimestamp + 160))));
+ }
+
+ // Insert first packet.
+ neteq_->InsertPacket(rtp_header, payload);
+
+ // Insert second packet.
+ rtp_header.timestamp += 160;
+ rtp_header.sequenceNumber += 1;
+ neteq_->InsertPacket(rtp_header, payload);
+}
+
+TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
+ UseNoMocks();
+ CreateInstance();
+
+ const int kPayloadLengthSamples = 80;
+ const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples; // PCM 16-bit.
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("l16", 8000, 1)));
+
+ // Insert packets. The buffer should not flush.
+ for (size_t i = 1; i <= config_.max_packets_in_buffer; ++i) {
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ rtp_header.timestamp += kPayloadLengthSamples;
+ rtp_header.sequenceNumber += 1;
+ EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
+ }
+
+ // Insert one more packet and make sure the buffer got flushed. That is, it
+ // should only hold one single packet.
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
+ const Packet* test_packet = packet_buffer_->PeekNextPacket();
+ EXPECT_EQ(rtp_header.timestamp, test_packet->timestamp);
+ EXPECT_EQ(rtp_header.sequenceNumber, test_packet->sequence_number);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT) {
+ TestDtmfPacket(8000);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT16kHz) {
+ TestDtmfPacket(16000);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT32kHz) {
+ TestDtmfPacket(32000);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT48kHz) {
+ TestDtmfPacket(48000);
+}
+
+// This test verifies that timestamps propagate from the incoming packets
+// through to the sync buffer and to the playout timestamp.
+TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = kPayloadLengthSamples;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+ rtp_header.numCSRCs = 3;
+ rtp_header.arrOfCSRCs[0] = 43;
+ rtp_header.arrOfCSRCs[1] = 65;
+ rtp_header.arrOfCSRCs[2] = 17;
+
+ // This is a dummy decoder that produces as many output samples as the input
+ // has bytes. The output is an increasing series, starting at 1 for the first
+ // sample, and then increasing by 1 for each sample.
+ class CountingSamplesDecoder : public AudioDecoder {
+ public:
+ CountingSamplesDecoder() : next_value_(1) {}
+
+ // Produce as many samples as input bytes (`encoded_len`).
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int /* sample_rate_hz */,
+ int16_t* decoded,
+ SpeechType* speech_type) override {
+ for (size_t i = 0; i < encoded_len; ++i) {
+ decoded[i] = next_value_++;
+ }
+ *speech_type = kSpeech;
+ return rtc::checked_cast<int>(encoded_len);
+ }
+
+ void Reset() override { next_value_ = 1; }
+
+ int SampleRateHz() const override { return kSampleRateHz; }
+
+ size_t Channels() const override { return 1; }
+
+ uint16_t next_value() const { return next_value_; }
+
+ private:
+ int16_t next_value_;
+ } decoder_;
+
+ auto decoder_factory =
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&decoder_);
+
+ UseNoMocks();
+ CreateInstance(decoder_factory);
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("L16", 8000, 1)));
+
+ // Insert one packet.
+ clock_.AdvanceTimeMilliseconds(123456);
+ Timestamp expected_receive_time = clock_.CurrentTime();
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ // Pull audio once.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
+ ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+ // Verify `output.packet_infos_`.
+ ASSERT_THAT(output.packet_infos_, SizeIs(1));
+ {
+ const auto& packet_info = output.packet_infos_[0];
+ EXPECT_EQ(packet_info.ssrc(), rtp_header.ssrc);
+ EXPECT_THAT(packet_info.csrcs(), ElementsAre(43, 65, 17));
+ EXPECT_EQ(packet_info.rtp_timestamp(), rtp_header.timestamp);
+ EXPECT_FALSE(packet_info.audio_level().has_value());
+ EXPECT_EQ(packet_info.receive_time(), expected_receive_time);
+ }
+
+ // Start with a simple check that the fake decoder is behaving as expected.
+ EXPECT_EQ(kPayloadLengthSamples,
+ static_cast<size_t>(decoder_.next_value() - 1));
+
+ // The value of the last of the output samples is the same as the number of
+ // samples played from the decoded packet. Thus, this number + the RTP
+ // timestamp should match the playout timestamp.
+ // Wrap the expected value in an absl::optional to compare them as such.
+ EXPECT_EQ(
+ absl::optional<uint32_t>(rtp_header.timestamp +
+ output.data()[output.samples_per_channel_ - 1]),
+ neteq_->GetPlayoutTimestamp());
+
+ // Check the timestamp for the last value in the sync buffer. This should
+ // be one full frame length ahead of the RTP timestamp.
+ const SyncBuffer* sync_buffer = neteq_->sync_buffer_for_test();
+ ASSERT_TRUE(sync_buffer != NULL);
+ EXPECT_EQ(rtp_header.timestamp + kPayloadLengthSamples,
+ sync_buffer->end_timestamp());
+
+ // Check that the number of samples still to play from the sync buffer add
+ // up with what was already played out.
+ EXPECT_EQ(
+ kPayloadLengthSamples - output.data()[output.samples_per_channel_ - 1],
+ sync_buffer->FutureLength());
+}
+
+TEST_F(NetEqImplTest, ReorderedPacket) {
+ UseNoMocks();
+
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+
+ CreateInstance(
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&mock_decoder));
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = kPayloadLengthSamples;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+ rtp_header.extension.hasAudioLevel = true;
+ rtp_header.extension.audioLevel = 42;
+
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, SampleRateHz())
+ .WillRepeatedly(Return(kSampleRateHz));
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+ int16_t dummy_output[kPayloadLengthSamples] = {0};
+ // The below expectation will make the mock decoder write
+ // `kPayloadLengthSamples` zeros to the output array, and mark it as speech.
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
+ kSampleRateHz, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kPayloadLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
+ Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("L16", 8000, 1)));
+
+ // Insert one packet.
+ clock_.AdvanceTimeMilliseconds(123456);
+ Timestamp expected_receive_time = clock_.CurrentTime();
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ // Pull audio once.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+ // Verify `output.packet_infos_`.
+ ASSERT_THAT(output.packet_infos_, SizeIs(1));
+ {
+ const auto& packet_info = output.packet_infos_[0];
+ EXPECT_EQ(packet_info.ssrc(), rtp_header.ssrc);
+ EXPECT_THAT(packet_info.csrcs(), IsEmpty());
+ EXPECT_EQ(packet_info.rtp_timestamp(), rtp_header.timestamp);
+ EXPECT_EQ(packet_info.audio_level(), rtp_header.extension.audioLevel);
+ EXPECT_EQ(packet_info.receive_time(), expected_receive_time);
+ }
+
+ // Insert two more packets. The first one is out of order, and is already too
+ // old, the second one is the expected next packet.
+ rtp_header.sequenceNumber -= 1;
+ rtp_header.timestamp -= kPayloadLengthSamples;
+ rtp_header.extension.audioLevel = 1;
+ payload[0] = 1;
+ clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ rtp_header.sequenceNumber += 2;
+ rtp_header.timestamp += 2 * kPayloadLengthSamples;
+ rtp_header.extension.audioLevel = 2;
+ payload[0] = 2;
+ clock_.AdvanceTimeMilliseconds(2000);
+ expected_receive_time = clock_.CurrentTime();
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ // Expect only the second packet to be decoded (the one with "2" as the first
+ // payload byte).
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(2), kPayloadLengthBytes,
+ kSampleRateHz, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kPayloadLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
+ Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+ // Pull audio once.
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+ // Now check the packet buffer, and make sure it is empty, since the
+ // out-of-order packet should have been discarded.
+ EXPECT_TRUE(packet_buffer_->Empty());
+
+ // NetEq `packets_discarded` should capture this packet discard.
+ EXPECT_EQ(1u, neteq_->GetLifetimeStatistics().packets_discarded);
+
+ // Verify `output.packet_infos_`. Expect to only see the second packet.
+ ASSERT_THAT(output.packet_infos_, SizeIs(1));
+ {
+ const auto& packet_info = output.packet_infos_[0];
+ EXPECT_EQ(packet_info.ssrc(), rtp_header.ssrc);
+ EXPECT_THAT(packet_info.csrcs(), IsEmpty());
+ EXPECT_EQ(packet_info.rtp_timestamp(), rtp_header.timestamp);
+ EXPECT_EQ(packet_info.audio_level(), rtp_header.extension.audioLevel);
+ EXPECT_EQ(packet_info.receive_time(), expected_receive_time);
+ }
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test verifies that NetEq can handle the situation where the first
+// incoming packet is rejected.
+TEST_F(NetEqImplTest, FirstPacketUnknown) {
+ UseNoMocks();
+ CreateInstance();
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = kPayloadLengthSamples * 2;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ // Insert one packet. Note that we have not registered any payload type, so
+ // this packet will be rejected.
+ EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_header, payload));
+
+ // Pull audio once.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_LE(output.samples_per_channel_, kMaxOutputSize);
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
+ EXPECT_THAT(output.packet_infos_, IsEmpty());
+
+ // Register the payload type.
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("l16", 8000, 1)));
+
+ // Insert 10 packets.
+ for (size_t i = 0; i < 10; ++i) {
+ rtp_header.sequenceNumber++;
+ rtp_header.timestamp += kPayloadLengthSamples;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ EXPECT_EQ(i + 1, packet_buffer_->NumPacketsInBuffer());
+ }
+
+ // Pull audio repeatedly and make sure we get normal output, that is not PLC.
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_LE(output.samples_per_channel_, kMaxOutputSize);
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_)
+ << "NetEq did not decode the packets as expected.";
+ EXPECT_THAT(output.packet_infos_, SizeIs(1));
+ }
+}
+
+// This test verifies that audio interruption is not logged for the initial
+// PLC period before the first packet is deocoded.
+// TODO(henrik.lundin) Maybe move this test to neteq_network_stats_unittest.cc.
+// Make the test parametrized, so that we can test with different initial
+// sample rates in NetEq.
+class NetEqImplTestSampleRateParameter
+ : public NetEqImplTest,
+ public testing::WithParamInterface<int> {
+ protected:
+ NetEqImplTestSampleRateParameter()
+ : NetEqImplTest(), initial_sample_rate_hz_(GetParam()) {
+ config_.sample_rate_hz = initial_sample_rate_hz_;
+ }
+
+ const int initial_sample_rate_hz_;
+};
+
+class NetEqImplTestSdpFormatParameter
+ : public NetEqImplTest,
+ public testing::WithParamInterface<SdpAudioFormat> {
+ protected:
+ NetEqImplTestSdpFormatParameter()
+ : NetEqImplTest(), sdp_format_(GetParam()) {}
+ const SdpAudioFormat sdp_format_;
+};
+
+// This test does the following:
+// 0. Set up NetEq with initial sample rate given by test parameter, and a codec
+// sample rate of 16000.
+// 1. Start calling GetAudio before inserting any encoded audio. The audio
+// produced will be PLC.
+// 2. Insert a number of encoded audio packets.
+// 3. Keep calling GetAudio and verify that no audio interruption was logged.
+// Call GetAudio until NetEq runs out of data again; PLC starts.
+// 4. Insert one more packet.
+// 5. Call GetAudio until that packet is decoded and the PLC ends.
+
+TEST_P(NetEqImplTestSampleRateParameter,
+ NoAudioInterruptionLoggedBeforeFirstDecode) {
+ UseNoMocks();
+ CreateInstance();
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kPayloadSampleRateHz = 16000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kPayloadSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = kPayloadLengthSamples * 2;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ // Register the payload type.
+ EXPECT_TRUE(neteq_->RegisterPayloadType(
+ kPayloadType, SdpAudioFormat("l16", kPayloadSampleRateHz, 1)));
+
+ // Pull audio several times. No packets have been inserted yet.
+ const size_t initial_output_size =
+ static_cast<size_t>(10 * initial_sample_rate_hz_ / 1000); // 10 ms
+ AudioFrame output;
+ bool muted;
+ for (int i = 0; i < 100; ++i) {
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(initial_output_size, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_NE(AudioFrame::kNormalSpeech, output.speech_type_);
+ EXPECT_THAT(output.packet_infos_, IsEmpty());
+ }
+
+ // Lambda for inserting packets.
+ auto insert_packet = [&]() {
+ rtp_header.sequenceNumber++;
+ rtp_header.timestamp += kPayloadLengthSamples;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ };
+ // Insert 10 packets.
+ for (size_t i = 0; i < 10; ++i) {
+ insert_packet();
+ EXPECT_EQ(i + 1, packet_buffer_->NumPacketsInBuffer());
+ }
+
+ // Pull audio repeatedly and make sure we get normal output, that is not PLC.
+ constexpr size_t kOutputSize =
+ static_cast<size_t>(10 * kPayloadSampleRateHz / 1000); // 10 ms
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_)
+ << "NetEq did not decode the packets as expected.";
+ EXPECT_THAT(output.packet_infos_, SizeIs(1));
+ }
+
+ // Verify that no interruption was logged.
+ auto lifetime_stats = neteq_->GetLifetimeStatistics();
+ EXPECT_EQ(0, lifetime_stats.interruption_count);
+
+ // Keep pulling audio data until a new PLC period is started.
+ size_t count_loops = 0;
+ while (output.speech_type_ == AudioFrame::kNormalSpeech) {
+ // Make sure we don't hang the test if we never go to PLC.
+ ASSERT_LT(++count_loops, 100u);
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ }
+
+ // Insert a few packets to avoid postpone decoding after expand.
+ for (size_t i = 0; i < 5; ++i) {
+ insert_packet();
+ }
+
+ // Pull audio until the newly inserted packet is decoded and the PLC ends.
+ while (output.speech_type_ != AudioFrame::kNormalSpeech) {
+ // Make sure we don't hang the test if we never go to PLC.
+ ASSERT_LT(++count_loops, 100u);
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ }
+
+ // Verify that no interruption was logged.
+ lifetime_stats = neteq_->GetLifetimeStatistics();
+ EXPECT_EQ(0, lifetime_stats.interruption_count);
+}
+
+// This test does the following:
+// 0. Set up NetEq with initial sample rate given by test parameter, and a codec
+// sample rate of 16000.
+// 1. Insert a number of encoded audio packets.
+// 2. Call GetAudio and verify that decoded audio is produced.
+// 3. Keep calling GetAudio until NetEq runs out of data; PLC starts.
+// 4. Keep calling GetAudio until PLC has been produced for at least 150 ms.
+// 5. Insert one more packet.
+// 6. Call GetAudio until that packet is decoded and the PLC ends.
+// 7. Verify that an interruption was logged.
+
+TEST_P(NetEqImplTestSampleRateParameter, AudioInterruptionLogged) {
+ UseNoMocks();
+ CreateInstance();
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kPayloadSampleRateHz = 16000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kPayloadSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = kPayloadLengthSamples * 2;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ // Register the payload type.
+ EXPECT_TRUE(neteq_->RegisterPayloadType(
+ kPayloadType, SdpAudioFormat("l16", kPayloadSampleRateHz, 1)));
+
+ // Lambda for inserting packets.
+ auto insert_packet = [&]() {
+ rtp_header.sequenceNumber++;
+ rtp_header.timestamp += kPayloadLengthSamples;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ };
+ // Insert 10 packets.
+ for (size_t i = 0; i < 10; ++i) {
+ insert_packet();
+ EXPECT_EQ(i + 1, packet_buffer_->NumPacketsInBuffer());
+ }
+
+ AudioFrame output;
+ bool muted;
+ // Keep pulling audio data until a new PLC period is started.
+ size_t count_loops = 0;
+ do {
+ // Make sure we don't hang the test if we never go to PLC.
+ ASSERT_LT(++count_loops, 100u);
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ } while (output.speech_type_ == AudioFrame::kNormalSpeech);
+
+ // Pull audio 15 times, which produces 150 ms of output audio. This should
+ // all be produced as PLC. The total length of the gap will then be 150 ms
+ // plus an initial fraction of 10 ms at the start and the end of the PLC
+ // period. In total, less than 170 ms.
+ for (size_t i = 0; i < 15; ++i) {
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_NE(AudioFrame::kNormalSpeech, output.speech_type_);
+ }
+
+ // Insert a few packets to avoid postpone decoding after expand.
+ for (size_t i = 0; i < 5; ++i) {
+ insert_packet();
+ }
+
+ // Pull audio until the newly inserted packet is decoded and the PLC ends.
+ while (output.speech_type_ != AudioFrame::kNormalSpeech) {
+ // Make sure we don't hang the test if we never go to PLC.
+ ASSERT_LT(++count_loops, 100u);
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ }
+
+ // Verify that the interruption was logged.
+ auto lifetime_stats = neteq_->GetLifetimeStatistics();
+ EXPECT_EQ(1, lifetime_stats.interruption_count);
+ EXPECT_GT(lifetime_stats.total_interruption_duration_ms, 150);
+ EXPECT_LT(lifetime_stats.total_interruption_duration_ms, 170);
+}
+
+INSTANTIATE_TEST_SUITE_P(SampleRates,
+ NetEqImplTestSampleRateParameter,
+ testing::Values(8000, 16000, 32000, 48000));
+
+TEST_P(NetEqImplTestSdpFormatParameter, GetNackListScaledTimestamp) {
+ UseNoMocks();
+ CreateInstance();
+
+ neteq_->EnableNack(128);
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kPayloadSampleRateHz = sdp_format_.clockrate_hz;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kPayloadSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = kPayloadLengthSamples * 2;
+ std::vector<uint8_t> payload(kPayloadLengthBytes, 0);
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType, sdp_format_));
+
+ auto insert_packet = [&](bool lost = false) {
+ rtp_header.sequenceNumber++;
+ rtp_header.timestamp += kPayloadLengthSamples;
+ if (!lost)
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ };
+
+ // Insert and decode 10 packets.
+ for (size_t i = 0; i < 10; ++i) {
+ insert_packet();
+ }
+ AudioFrame output;
+ size_t count_loops = 0;
+ do {
+ bool muted;
+ // Make sure we don't hang the test if we never go to PLC.
+ ASSERT_LT(++count_loops, 100u);
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ } while (output.speech_type_ == AudioFrame::kNormalSpeech);
+
+ insert_packet();
+
+ insert_packet(/*lost=*/true);
+
+ // Ensure packet gets marked as missing.
+ for (int i = 0; i < 5; ++i) {
+ insert_packet();
+ }
+
+ // Missing packet recoverable with 5ms RTT.
+ EXPECT_THAT(neteq_->GetNackList(5), Not(IsEmpty()));
+
+ // No packets should have TimeToPlay > 500ms.
+ EXPECT_THAT(neteq_->GetNackList(500), IsEmpty());
+}
+
+INSTANTIATE_TEST_SUITE_P(GetNackList,
+ NetEqImplTestSdpFormatParameter,
+ testing::Values(SdpAudioFormat("g722", 8000, 1),
+ SdpAudioFormat("opus", 48000, 2)));
+
+// This test verifies that NetEq can handle comfort noise and enters/quits codec
+// internal CNG mode properly.
+TEST_F(NetEqImplTest, CodecInternalCng) {
+ UseNoMocks();
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+ CreateInstance(
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&mock_decoder));
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateKhz = 48;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(20 * kSampleRateKhz); // 20 ms.
+ const size_t kPayloadLengthBytes = 10;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ int16_t dummy_output[kPayloadLengthSamples] = {0};
+
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, SampleRateHz())
+ .WillRepeatedly(Return(kSampleRateKhz * 1000));
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+ // Packed duration when asking the decoder for more CNG data (without a new
+ // packet).
+ EXPECT_CALL(mock_decoder, PacketDuration(nullptr, 0))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("opus", 48000, 2)));
+
+ struct Packet {
+ int sequence_number_delta;
+ int timestamp_delta;
+ AudioDecoder::SpeechType decoder_output_type;
+ };
+ std::vector<Packet> packets = {
+ {0, 0, AudioDecoder::kSpeech},
+ {1, kPayloadLengthSamples, AudioDecoder::kComfortNoise},
+ {2, 2 * kPayloadLengthSamples, AudioDecoder::kSpeech},
+ {1, kPayloadLengthSamples, AudioDecoder::kSpeech}};
+
+ for (size_t i = 0; i < packets.size(); ++i) {
+ rtp_header.sequenceNumber += packets[i].sequence_number_delta;
+ rtp_header.timestamp += packets[i].timestamp_delta;
+ payload[0] = i;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ // Pointee(x) verifies that first byte of the payload equals x, this makes
+ // it possible to verify that the correct payload is fed to Decode().
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(i), kPayloadLengthBytes,
+ kSampleRateKhz * 1000, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(
+ dummy_output, dummy_output + kPayloadLengthSamples),
+ SetArgPointee<4>(packets[i].decoder_output_type),
+ Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+ }
+
+ // Expect comfort noise to be returned by the decoder.
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(IsNull(), 0, kSampleRateKhz * 1000, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kPayloadLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
+ Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+ std::vector<AudioFrame::SpeechType> expected_output = {
+ AudioFrame::kNormalSpeech, AudioFrame::kCNG, AudioFrame::kNormalSpeech};
+ size_t output_index = 0;
+
+ int timeout_counter = 0;
+ while (!packet_buffer_->Empty()) {
+ ASSERT_LT(timeout_counter++, 20) << "Test timed out";
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ if (output_index + 1 < expected_output.size() &&
+ output.speech_type_ == expected_output[output_index + 1]) {
+ ++output_index;
+ } else {
+ EXPECT_EQ(output.speech_type_, expected_output[output_index]);
+ }
+ }
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+TEST_F(NetEqImplTest, UnsupportedDecoder) {
+ UseNoMocks();
+ ::testing::NiceMock<MockAudioDecoder> decoder;
+
+ CreateInstance(
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&decoder));
+ static const size_t kNetEqMaxFrameSize = 5760; // 120 ms @ 48 kHz.
+ static const size_t kChannels = 2;
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = 1;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ int16_t dummy_output[kPayloadLengthSamples * kChannels] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ const uint8_t kFirstPayloadValue = 1;
+ const uint8_t kSecondPayloadValue = 2;
+
+ EXPECT_CALL(decoder,
+ PacketDuration(Pointee(kFirstPayloadValue), kPayloadLengthBytes))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kNetEqMaxFrameSize + 1)));
+
+ EXPECT_CALL(decoder, DecodeInternal(Pointee(kFirstPayloadValue), _, _, _, _))
+ .Times(0);
+
+ EXPECT_CALL(decoder, DecodeInternal(Pointee(kSecondPayloadValue),
+ kPayloadLengthBytes, kSampleRateHz, _, _))
+ .Times(1)
+ .WillOnce(DoAll(
+ SetArrayArgument<3>(dummy_output,
+ dummy_output + kPayloadLengthSamples * kChannels),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
+ Return(static_cast<int>(kPayloadLengthSamples * kChannels))));
+
+ EXPECT_CALL(decoder,
+ PacketDuration(Pointee(kSecondPayloadValue), kPayloadLengthBytes))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kNetEqMaxFrameSize)));
+
+ EXPECT_CALL(decoder, SampleRateHz()).WillRepeatedly(Return(kSampleRateHz));
+
+ EXPECT_CALL(decoder, Channels()).WillRepeatedly(Return(kChannels));
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("L16", 8000, 1)));
+
+ // Insert one packet.
+ payload[0] = kFirstPayloadValue; // This will make Decode() fail.
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ // Insert another packet.
+ payload[0] = kSecondPayloadValue; // This will make Decode() successful.
+ rtp_header.sequenceNumber++;
+ // The second timestamp needs to be at least 30 ms after the first to make
+ // the second packet get decoded.
+ rtp_header.timestamp += 3 * kPayloadLengthSamples;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ AudioFrame output;
+ bool muted;
+ // First call to GetAudio will try to decode the "faulty" packet.
+ // Expect kFail return value.
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
+ // Output size and number of channels should be correct.
+ const size_t kExpectedOutputSize = 10 * (kSampleRateHz / 1000) * kChannels;
+ EXPECT_EQ(kExpectedOutputSize, output.samples_per_channel_ * kChannels);
+ EXPECT_EQ(kChannels, output.num_channels_);
+ EXPECT_THAT(output.packet_infos_, IsEmpty());
+
+ // Second call to GetAudio will decode the packet that is ok. No errors are
+ // expected.
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kExpectedOutputSize, output.samples_per_channel_ * kChannels);
+ EXPECT_EQ(kChannels, output.num_channels_);
+ EXPECT_THAT(output.packet_infos_, SizeIs(1));
+
+ // Die isn't called through NiceMock (since it's called by the
+ // MockAudioDecoder constructor), so it needs to be mocked explicitly.
+ EXPECT_CALL(decoder, Die());
+}
+
+// This test inserts packets until the buffer is flushed. After that, it asks
+// NetEq for the network statistics. The purpose of the test is to make sure
+// that even though the buffer size increment is negative (which it becomes when
+// the packet causing a flush is inserted), the packet length stored in the
+// decision logic remains valid.
+TEST_F(NetEqImplTest, FloodBufferAndGetNetworkStats) {
+ UseNoMocks();
+ CreateInstance();
+
+ const size_t kPayloadLengthSamples = 80;
+ const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples; // PCM 16-bit.
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("l16", 8000, 1)));
+
+ // Insert packets until the buffer flushes.
+ for (size_t i = 0; i <= config_.max_packets_in_buffer; ++i) {
+ EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ rtp_header.timestamp += rtc::checked_cast<uint32_t>(kPayloadLengthSamples);
+ ++rtp_header.sequenceNumber;
+ }
+ EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
+
+ // Ask for network statistics. This should not crash.
+ NetEqNetworkStatistics stats;
+ EXPECT_EQ(NetEq::kOK, neteq_->NetworkStatistics(&stats));
+}
+
+TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
+ UseNoMocks();
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+
+ CreateInstance(
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&mock_decoder));
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, SampleRateHz())
+ .WillRepeatedly(Return(kSampleRateHz));
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+ int16_t dummy_output[kPayloadLengthSamples] = {0};
+ // The below expectation will make the mock decoder write
+ // `kPayloadLengthSamples` - 5 zeros to the output array, and mark it as
+ // speech. That is, the decoded length is 5 samples shorter than the expected.
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+ .WillOnce(
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kPayloadLengthSamples - 5),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
+ Return(rtc::checked_cast<int>(kPayloadLengthSamples - 5))));
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("L16", 8000, 1)));
+
+ // Insert one packet.
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+
+ EXPECT_EQ(5u, neteq_->sync_buffer_for_test()->FutureLength());
+
+ // Pull audio once.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+ EXPECT_THAT(output.packet_infos_, SizeIs(1));
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test checks the behavior of NetEq when audio decoder fails.
+TEST_F(NetEqImplTest, DecodingError) {
+ UseNoMocks();
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+
+ CreateInstance(
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&mock_decoder));
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+ const int kDecoderErrorCode = -97; // Any negative number.
+
+ // We let decoder return 5 ms each time, and therefore, 2 packets make 10 ms.
+ const size_t kFrameLengthSamples =
+ static_cast<size_t>(5 * kSampleRateHz / 1000);
+
+ const size_t kPayloadLengthBytes = 1; // This can be arbitrary.
+
+ uint8_t payload[kPayloadLengthBytes] = {0};
+
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, SampleRateHz())
+ .WillRepeatedly(Return(kSampleRateHz));
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kFrameLengthSamples)));
+ EXPECT_CALL(mock_decoder, ErrorCode()).WillOnce(Return(kDecoderErrorCode));
+ EXPECT_CALL(mock_decoder, HasDecodePlc()).WillOnce(Return(false));
+ int16_t dummy_output[kFrameLengthSamples] = {0};
+
+ {
+ InSequence sequence; // Dummy variable.
+ // Mock decoder works normally the first time.
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+ .Times(3)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
+ Return(rtc::checked_cast<int>(kFrameLengthSamples))))
+ .RetiresOnSaturation();
+
+ // Then mock decoder fails. A common reason for failure can be buffer being
+ // too short
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+ .WillOnce(Return(-1))
+ .RetiresOnSaturation();
+
+ // Mock decoder finally returns to normal.
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+ .Times(2)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
+ Return(rtc::checked_cast<int>(kFrameLengthSamples))));
+ }
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("L16", 8000, 1)));
+
+ // Insert packets.
+ for (int i = 0; i < 20; ++i) {
+ rtp_header.sequenceNumber += 1;
+ rtp_header.timestamp += kFrameLengthSamples;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ }
+
+ // Pull audio.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+ EXPECT_THAT(output.packet_infos_, SizeIs(2)); // 5 ms packets vs 10 ms output
+
+ // Pull audio again. Decoder fails.
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ // We are not expecting anything for output.speech_type_, since an error was
+ // returned.
+
+ // Pull audio again, should behave normal.
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+ EXPECT_THAT(output.packet_infos_, SizeIs(2)); // 5 ms packets vs 10 ms output
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test checks the behavior of NetEq when audio decoder fails during CNG.
+TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
+ UseNoMocks();
+
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+ CreateInstance(
+ rtc::make_ref_counted<test::AudioDecoderProxyFactory>(&mock_decoder));
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const int kSampleRateHz = 8000;
+ const int kDecoderErrorCode = -97; // Any negative number.
+
+ // We let decoder return 5 ms each time, and therefore, 2 packets make 10 ms.
+ const size_t kFrameLengthSamples =
+ static_cast<size_t>(5 * kSampleRateHz / 1000);
+
+ const size_t kPayloadLengthBytes = 1; // This can be arbitrary.
+
+ uint8_t payload[kPayloadLengthBytes] = {0};
+
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, SampleRateHz())
+ .WillRepeatedly(Return(kSampleRateHz));
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .WillRepeatedly(Return(rtc::checked_cast<int>(kFrameLengthSamples)));
+ EXPECT_CALL(mock_decoder, ErrorCode()).WillOnce(Return(kDecoderErrorCode));
+ int16_t dummy_output[kFrameLengthSamples] = {0};
+
+ {
+ InSequence sequence; // Dummy variable.
+ // Mock decoder works normally the first 2 times.
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+ .Times(2)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
+ Return(rtc::checked_cast<int>(kFrameLengthSamples))))
+ .RetiresOnSaturation();
+
+ // Then mock decoder fails. A common reason for failure can be buffer being
+ // too short
+ EXPECT_CALL(mock_decoder, DecodeInternal(nullptr, 0, kSampleRateHz, _, _))
+ .WillOnce(Return(-1))
+ .RetiresOnSaturation();
+
+ // Mock decoder finally returns to normal.
+ EXPECT_CALL(mock_decoder, DecodeInternal(nullptr, 0, kSampleRateHz, _, _))
+ .Times(2)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
+ Return(rtc::checked_cast<int>(kFrameLengthSamples))));
+ }
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("l16", 8000, 1)));
+
+ // Insert 2 packets. This will make netEq into codec internal CNG mode.
+ for (int i = 0; i < 2; ++i) {
+ rtp_header.sequenceNumber += 1;
+ rtp_header.timestamp += kFrameLengthSamples;
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ }
+
+ // Pull audio.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kCNG, output.speech_type_);
+
+ // Pull audio again. Decoder fails.
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ // We are not expecting anything for output.speech_type_, since an error was
+ // returned.
+
+ // Pull audio again, should resume codec CNG.
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+ EXPECT_EQ(1u, output.num_channels_);
+ EXPECT_EQ(AudioFrame::kCNG, output.speech_type_);
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+// Tests that the return value from last_output_sample_rate_hz() is equal to the
+// configured inital sample rate.
+TEST_F(NetEqImplTest, InitialLastOutputSampleRate) {
+ UseNoMocks();
+ config_.sample_rate_hz = 48000;
+ CreateInstance();
+ EXPECT_EQ(48000, neteq_->last_output_sample_rate_hz());
+}
+
+TEST_F(NetEqImplTest, TickTimerIncrement) {
+ UseNoMocks();
+ CreateInstance();
+ ASSERT_TRUE(tick_timer_);
+ EXPECT_EQ(0u, tick_timer_->ticks());
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(1u, tick_timer_->ticks());
+}
+
+TEST_F(NetEqImplTest, SetBaseMinimumDelay) {
+ UseNoMocks();
+ use_mock_neteq_controller_ = true;
+ CreateInstance();
+
+ EXPECT_CALL(*mock_neteq_controller_, SetBaseMinimumDelay(_))
+ .WillOnce(Return(true))
+ .WillOnce(Return(false));
+
+ const int delay_ms = 200;
+
+ EXPECT_EQ(true, neteq_->SetBaseMinimumDelayMs(delay_ms));
+ EXPECT_EQ(false, neteq_->SetBaseMinimumDelayMs(delay_ms));
+}
+
+TEST_F(NetEqImplTest, GetBaseMinimumDelayMs) {
+ UseNoMocks();
+ use_mock_neteq_controller_ = true;
+ CreateInstance();
+
+ const int delay_ms = 200;
+
+ EXPECT_CALL(*mock_neteq_controller_, GetBaseMinimumDelay())
+ .WillOnce(Return(delay_ms));
+
+ EXPECT_EQ(delay_ms, neteq_->GetBaseMinimumDelayMs());
+}
+
+TEST_F(NetEqImplTest, TargetDelayMs) {
+ UseNoMocks();
+ use_mock_neteq_controller_ = true;
+ CreateInstance();
+ constexpr int kTargetLevelMs = 510;
+ EXPECT_CALL(*mock_neteq_controller_, TargetLevelMs())
+ .WillOnce(Return(kTargetLevelMs));
+ EXPECT_EQ(510, neteq_->TargetDelayMs());
+}
+
+TEST_F(NetEqImplTest, InsertEmptyPacket) {
+ UseNoMocks();
+ use_mock_neteq_controller_ = true;
+ CreateInstance();
+
+ RTPHeader rtp_header;
+ rtp_header.payloadType = 17;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_CALL(*mock_neteq_controller_, RegisterEmptyPacket());
+ neteq_->InsertEmptyPacket(rtp_header);
+}
+
+TEST_F(NetEqImplTest, NotifyControllerOfReorderedPacket) {
+ using ::testing::AllOf;
+ using ::testing::Field;
+ UseNoMocks();
+ use_mock_neteq_controller_ = true;
+ CreateInstance();
+ EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _))
+ .Times(1)
+ .WillOnce(Return(NetEq::Operation::kNormal));
+
+ const int kPayloadLengthSamples = 80;
+ const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples; // PCM 16-bit.
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = 0x1234;
+ rtp_header.timestamp = 0x12345678;
+ rtp_header.ssrc = 0x87654321;
+
+ EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("l16", 8000, 1)));
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+
+ // Insert second packet that was sent before the first packet.
+ rtp_header.sequenceNumber -= 1;
+ rtp_header.timestamp -= kPayloadLengthSamples;
+ EXPECT_CALL(
+ *mock_neteq_controller_,
+ PacketArrived(
+ /*fs_hz*/ 8000,
+ /*should_update_stats*/ true,
+ /*info*/
+ AllOf(
+ Field(&NetEqController::PacketArrivedInfo::packet_length_samples,
+ kPayloadLengthSamples),
+ Field(&NetEqController::PacketArrivedInfo::main_sequence_number,
+ rtp_header.sequenceNumber),
+ Field(&NetEqController::PacketArrivedInfo::main_timestamp,
+ rtp_header.timestamp))));
+
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+}
+
+// When using a codec with 1000 channels, there should be no crashes.
+TEST_F(NetEqImplTest, NoCrashWith1000Channels) {
+ using ::testing::AllOf;
+ using ::testing::Field;
+ UseNoMocks();
+ use_mock_decoder_database_ = true;
+ enable_muted_state_ = true;
+ CreateInstance();
+ const size_t kPayloadLength = 100;
+ const uint8_t kPayloadType = 0;
+ const uint16_t kFirstSequenceNumber = 0x1234;
+ const uint32_t kFirstTimestamp = 0x12345678;
+ const uint32_t kSsrc = 0x87654321;
+ uint8_t payload[kPayloadLength] = {0};
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = kFirstSequenceNumber;
+ rtp_header.timestamp = kFirstTimestamp;
+ rtp_header.ssrc = kSsrc;
+ Packet fake_packet;
+ fake_packet.payload_type = kPayloadType;
+ fake_packet.sequence_number = kFirstSequenceNumber;
+ fake_packet.timestamp = kFirstTimestamp;
+
+ AudioDecoder* decoder = nullptr;
+
+ auto mock_decoder_factory = rtc::make_ref_counted<MockAudioDecoderFactory>();
+ EXPECT_CALL(*mock_decoder_factory, MakeAudioDecoderMock(_, _, _))
+ .WillOnce(Invoke([&](const SdpAudioFormat& format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ std::unique_ptr<AudioDecoder>* dec) {
+ EXPECT_EQ("pcmu", format.name);
+ *dec = std::make_unique<AudioDecoderPcmU>(1000);
+ decoder = dec->get();
+ }));
+ DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, mock_decoder_factory.get());
+ // Expectations for decoder database.
+ EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType))
+ .WillRepeatedly(Return(&info));
+ EXPECT_CALL(*mock_decoder_database_, GetActiveCngDecoder())
+ .WillRepeatedly(ReturnNull());
+ EXPECT_CALL(*mock_decoder_database_, GetActiveDecoder())
+ .WillRepeatedly(Return(decoder));
+ EXPECT_CALL(*mock_decoder_database_, SetActiveDecoder(_, _))
+ .WillOnce(Invoke([](uint8_t rtp_payload_type, bool* new_decoder) {
+ *new_decoder = true;
+ return 0;
+ }));
+
+ // Insert first packet.
+ neteq_->InsertPacket(rtp_header, payload);
+
+ AudioFrame audio_frame;
+ bool muted;
+
+ // Repeat 40 times to ensure we enter muted state.
+ for (int i = 0; i < 40; i++) {
+ // GetAudio should return an error, and not crash, even in muted state.
+ EXPECT_NE(0, neteq_->GetAudio(&audio_frame, &muted));
+ }
+}
+
+class Decoder120ms : public AudioDecoder {
+ public:
+ Decoder120ms(int sample_rate_hz, SpeechType speech_type)
+ : sample_rate_hz_(sample_rate_hz),
+ next_value_(1),
+ speech_type_(speech_type) {}
+
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override {
+ EXPECT_EQ(sample_rate_hz_, sample_rate_hz);
+ size_t decoded_len =
+ rtc::CheckedDivExact(sample_rate_hz, 1000) * 120 * Channels();
+ for (size_t i = 0; i < decoded_len; ++i) {
+ decoded[i] = next_value_++;
+ }
+ *speech_type = speech_type_;
+ return rtc::checked_cast<int>(decoded_len);
+ }
+
+ void Reset() override { next_value_ = 1; }
+ int SampleRateHz() const override { return sample_rate_hz_; }
+ size_t Channels() const override { return 2; }
+
+ private:
+ int sample_rate_hz_;
+ int16_t next_value_;
+ SpeechType speech_type_;
+};
+
+class NetEqImplTest120ms : public NetEqImplTest {
+ protected:
+ NetEqImplTest120ms() : NetEqImplTest() {}
+ virtual ~NetEqImplTest120ms() {}
+
+ void CreateInstanceNoMocks() {
+ UseNoMocks();
+ CreateInstance(decoder_factory_);
+ EXPECT_TRUE(neteq_->RegisterPayloadType(
+ kPayloadType, SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}})));
+ }
+
+ void CreateInstanceWithDelayManagerMock() {
+ UseNoMocks();
+ use_mock_neteq_controller_ = true;
+ CreateInstance(decoder_factory_);
+ EXPECT_TRUE(neteq_->RegisterPayloadType(
+ kPayloadType, SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}})));
+ }
+
+ uint32_t timestamp_diff_between_packets() const {
+ return rtc::CheckedDivExact(kSamplingFreq_, 1000u) * 120;
+ }
+
+ uint32_t first_timestamp() const { return 10u; }
+
+ void GetFirstPacket() {
+ bool muted;
+ for (int i = 0; i < 12; i++) {
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_FALSE(muted);
+ }
+ }
+
+ void InsertPacket(uint32_t timestamp) {
+ RTPHeader rtp_header;
+ rtp_header.payloadType = kPayloadType;
+ rtp_header.sequenceNumber = sequence_number_;
+ rtp_header.timestamp = timestamp;
+ rtp_header.ssrc = 15;
+ const size_t kPayloadLengthBytes = 1; // This can be arbitrary.
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
+ sequence_number_++;
+ }
+
+ void Register120msCodec(AudioDecoder::SpeechType speech_type) {
+ const uint32_t sampling_freq = kSamplingFreq_;
+ decoder_factory_ = rtc::make_ref_counted<test::FunctionAudioDecoderFactory>(
+ [sampling_freq, speech_type]() {
+ std::unique_ptr<AudioDecoder> decoder =
+ std::make_unique<Decoder120ms>(sampling_freq, speech_type);
+ RTC_CHECK_EQ(2, decoder->Channels());
+ return decoder;
+ });
+ }
+
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ AudioFrame output_;
+ const uint32_t kPayloadType = 17;
+ const uint32_t kSamplingFreq_ = 48000;
+ uint16_t sequence_number_ = 1;
+};
+
+TEST_F(NetEqImplTest120ms, CodecInternalCng) {
+ Register120msCodec(AudioDecoder::kComfortNoise);
+ CreateInstanceNoMocks();
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(NetEq::Operation::kCodecInternalCng,
+ neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Normal) {
+ Register120msCodec(AudioDecoder::kSpeech);
+ CreateInstanceNoMocks();
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ EXPECT_EQ(NetEq::Operation::kNormal, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Merge) {
+ Register120msCodec(AudioDecoder::kSpeech);
+ CreateInstanceWithDelayManagerMock();
+
+ EXPECT_CALL(*mock_neteq_controller_, CngOff()).WillRepeatedly(Return(true));
+ InsertPacket(first_timestamp());
+
+ GetFirstPacket();
+ bool muted;
+ EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _))
+ .WillOnce(Return(NetEq::Operation::kExpand));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+
+ InsertPacket(first_timestamp() + 2 * timestamp_diff_between_packets());
+
+ EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _))
+ .WillOnce(Return(NetEq::Operation::kMerge));
+
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(NetEq::Operation::kMerge, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Expand) {
+ Register120msCodec(AudioDecoder::kSpeech);
+ CreateInstanceNoMocks();
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(NetEq::Operation::kExpand, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, FastAccelerate) {
+ Register120msCodec(AudioDecoder::kSpeech);
+ CreateInstanceWithDelayManagerMock();
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+ InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+ EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _))
+ .Times(1)
+ .WillOnce(Return(NetEq::Operation::kFastAccelerate));
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(NetEq::Operation::kFastAccelerate,
+ neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, PreemptiveExpand) {
+ Register120msCodec(AudioDecoder::kSpeech);
+ CreateInstanceWithDelayManagerMock();
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+ EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _))
+ .Times(1)
+ .WillOnce(Return(NetEq::Operation::kPreemptiveExpand));
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(NetEq::Operation::kPreemptiveExpand,
+ neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Accelerate) {
+ Register120msCodec(AudioDecoder::kSpeech);
+ CreateInstanceWithDelayManagerMock();
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+ EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _))
+ .Times(1)
+ .WillOnce(Return(NetEq::Operation::kAccelerate));
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(NetEq::Operation::kAccelerate, neteq_->last_operation_for_test());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
new file mode 100644
index 0000000000..a669ad727e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "absl/memory/memory.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "system_wrappers/include/clock.h"
+#include "test/audio_decoder_proxy_factory.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+std::unique_ptr<NetEq> CreateNetEq(
+ const NetEq::Config& config,
+ Clock* clock,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+ return DefaultNetEqFactory().CreateNetEq(config, decoder_factory, clock);
+}
+
+} // namespace
+
+using ::testing::_;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+class MockAudioDecoder final : public AudioDecoder {
+ public:
+ static const int kPacketDuration = 960; // 48 kHz * 20 ms
+
+ MockAudioDecoder(int sample_rate_hz, size_t num_channels)
+ : sample_rate_hz_(sample_rate_hz),
+ num_channels_(num_channels),
+ fec_enabled_(false) {}
+ ~MockAudioDecoder() override { Die(); }
+ MOCK_METHOD(void, Die, ());
+
+ MOCK_METHOD(void, Reset, (), (override));
+
+ class MockFrame : public AudioDecoder::EncodedAudioFrame {
+ public:
+ MockFrame(size_t num_channels) : num_channels_(num_channels) {}
+
+ size_t Duration() const override { return kPacketDuration; }
+
+ absl::optional<DecodeResult> Decode(
+ rtc::ArrayView<int16_t> decoded) const override {
+ const size_t output_size =
+ sizeof(int16_t) * kPacketDuration * num_channels_;
+ if (decoded.size() >= output_size) {
+ memset(decoded.data(), 0,
+ sizeof(int16_t) * kPacketDuration * num_channels_);
+ return DecodeResult{kPacketDuration * num_channels_, kSpeech};
+ } else {
+ ADD_FAILURE() << "Expected decoded.size() to be >= output_size ("
+ << decoded.size() << " vs. " << output_size << ")";
+ return absl::nullopt;
+ }
+ }
+
+ private:
+ const size_t num_channels_;
+ };
+
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override {
+ std::vector<ParseResult> results;
+ if (fec_enabled_) {
+ std::unique_ptr<MockFrame> fec_frame(new MockFrame(num_channels_));
+ results.emplace_back(timestamp - kPacketDuration, 1,
+ std::move(fec_frame));
+ }
+
+ std::unique_ptr<MockFrame> frame(new MockFrame(num_channels_));
+ results.emplace_back(timestamp, 0, std::move(frame));
+ return results;
+ }
+
+ int PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const override {
+ ADD_FAILURE() << "Since going through ParsePayload, PacketDuration should "
+ "never get called.";
+ return kPacketDuration;
+ }
+
+ bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override {
+ ADD_FAILURE() << "Since going through ParsePayload, PacketHasFec should "
+ "never get called.";
+ return fec_enabled_;
+ }
+
+ int SampleRateHz() const override { return sample_rate_hz_; }
+
+ size_t Channels() const override { return num_channels_; }
+
+ void set_fec_enabled(bool enable_fec) { fec_enabled_ = enable_fec; }
+
+ bool fec_enabled() const { return fec_enabled_; }
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override {
+ ADD_FAILURE() << "Since going through ParsePayload, DecodeInternal should "
+ "never get called.";
+ return -1;
+ }
+
+ private:
+ const int sample_rate_hz_;
+ const size_t num_channels_;
+ bool fec_enabled_;
+};
+
+class NetEqNetworkStatsTest {
+ public:
+ static const int kPayloadSizeByte = 30;
+ static const int kFrameSizeMs = 20;
+ static const uint8_t kPayloadType = 95;
+ static const int kOutputLengthMs = 10;
+
+ enum logic {
+ kIgnore,
+ kEqual,
+ kSmallerThan,
+ kLargerThan,
+ };
+
+ struct NetEqNetworkStatsCheck {
+ logic current_buffer_size_ms;
+ logic preferred_buffer_size_ms;
+ logic jitter_peaks_found;
+ logic packet_loss_rate;
+ logic expand_rate;
+ logic speech_expand_rate;
+ logic preemptive_rate;
+ logic accelerate_rate;
+ logic secondary_decoded_rate;
+ logic secondary_discarded_rate;
+ logic added_zero_samples;
+ NetEqNetworkStatistics stats_ref;
+ };
+
+ NetEqNetworkStatsTest(const SdpAudioFormat& format, MockAudioDecoder* decoder)
+ : decoder_(decoder),
+ decoder_factory_(
+ rtc::make_ref_counted<AudioDecoderProxyFactory>(decoder)),
+ samples_per_ms_(format.clockrate_hz / 1000),
+ frame_size_samples_(kFrameSizeMs * samples_per_ms_),
+ rtp_generator_(new RtpGenerator(samples_per_ms_)),
+ last_lost_time_(0),
+ packet_loss_interval_(0xffffffff) {
+ NetEq::Config config;
+ config.sample_rate_hz = format.clockrate_hz;
+ neteq_ = CreateNetEq(config, Clock::GetRealTimeClock(), decoder_factory_);
+ neteq_->RegisterPayloadType(kPayloadType, format);
+ }
+
+ bool Lost(uint32_t send_time) {
+ if (send_time - last_lost_time_ >= packet_loss_interval_) {
+ last_lost_time_ = send_time;
+ return true;
+ }
+ return false;
+ }
+
+ void SetPacketLossRate(double loss_rate) {
+ packet_loss_interval_ =
+ (loss_rate >= 1e-3 ? static_cast<double>(kFrameSizeMs) / loss_rate
+ : 0xffffffff);
+ }
+
+ // `stats_ref`
+ // expects.x = -1, do not care
+ // expects.x = 0, 'x' in current stats should equal 'x' in `stats_ref`
+ // expects.x = 1, 'x' in current stats should < 'x' in `stats_ref`
+ // expects.x = 2, 'x' in current stats should > 'x' in `stats_ref`
+ void CheckNetworkStatistics(NetEqNetworkStatsCheck expects) {
+ NetEqNetworkStatistics stats;
+ neteq_->NetworkStatistics(&stats);
+
+#define CHECK_NETEQ_NETWORK_STATS(x) \
+ switch (expects.x) { \
+ case kEqual: \
+ EXPECT_EQ(stats.x, expects.stats_ref.x); \
+ break; \
+ case kSmallerThan: \
+ EXPECT_LT(stats.x, expects.stats_ref.x); \
+ break; \
+ case kLargerThan: \
+ EXPECT_GT(stats.x, expects.stats_ref.x); \
+ break; \
+ default: \
+ break; \
+ }
+
+ CHECK_NETEQ_NETWORK_STATS(current_buffer_size_ms);
+ CHECK_NETEQ_NETWORK_STATS(preferred_buffer_size_ms);
+ CHECK_NETEQ_NETWORK_STATS(jitter_peaks_found);
+ CHECK_NETEQ_NETWORK_STATS(expand_rate);
+ CHECK_NETEQ_NETWORK_STATS(speech_expand_rate);
+ CHECK_NETEQ_NETWORK_STATS(preemptive_rate);
+ CHECK_NETEQ_NETWORK_STATS(accelerate_rate);
+ CHECK_NETEQ_NETWORK_STATS(secondary_decoded_rate);
+ CHECK_NETEQ_NETWORK_STATS(secondary_discarded_rate);
+
+#undef CHECK_NETEQ_NETWORK_STATS
+ }
+
+ void RunTest(int num_loops, NetEqNetworkStatsCheck expects) {
+ uint32_t time_now;
+ uint32_t next_send_time;
+
+ // Initiate `last_lost_time_`.
+ time_now = next_send_time = last_lost_time_ = rtp_generator_->GetRtpHeader(
+ kPayloadType, frame_size_samples_, &rtp_header_);
+ for (int k = 0; k < num_loops; ++k) {
+ // Delay by one frame such that the FEC can come in.
+ while (time_now + kFrameSizeMs >= next_send_time) {
+ next_send_time = rtp_generator_->GetRtpHeader(
+ kPayloadType, frame_size_samples_, &rtp_header_);
+ if (!Lost(next_send_time)) {
+ static const uint8_t payload[kPayloadSizeByte] = {0};
+ ASSERT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header_, payload));
+ }
+ }
+ bool muted = true;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_frame_, &muted));
+ ASSERT_FALSE(muted);
+ EXPECT_EQ(decoder_->Channels(), output_frame_.num_channels_);
+ EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * samples_per_ms_),
+ output_frame_.samples_per_channel_);
+ EXPECT_EQ(48000, neteq_->last_output_sample_rate_hz());
+
+ time_now += kOutputLengthMs;
+ }
+ CheckNetworkStatistics(expects);
+ neteq_->FlushBuffers();
+ }
+
+ void DecodeFecTest() {
+ decoder_->set_fec_enabled(false);
+ NetEqNetworkStatsCheck expects = {kIgnore, // current_buffer_size_ms
+ kIgnore, // preferred_buffer_size_ms
+ kIgnore, // jitter_peaks_found
+ kEqual, // packet_loss_rate
+ kEqual, // expand_rate
+ kEqual, // voice_expand_rate
+ kIgnore, // preemptive_rate
+ kEqual, // accelerate_rate
+ kEqual, // decoded_fec_rate
+ kEqual, // discarded_fec_rate
+ kEqual, // added_zero_samples
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
+ RunTest(50, expects);
+
+ // Next we introduce packet losses.
+ SetPacketLossRate(0.1);
+ expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 898;
+ RunTest(50, expects);
+
+ // Next we enable FEC.
+ decoder_->set_fec_enabled(true);
+ // If FEC fills in the lost packets, no packet loss will be counted.
+ expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 0;
+ expects.stats_ref.secondary_decoded_rate = 2006;
+ expects.stats_ref.secondary_discarded_rate = 14336;
+ RunTest(50, expects);
+ }
+
+ void NoiseExpansionTest() {
+ NetEqNetworkStatsCheck expects = {kIgnore, // current_buffer_size_ms
+ kIgnore, // preferred_buffer_size_ms
+ kIgnore, // jitter_peaks_found
+ kEqual, // packet_loss_rate
+ kEqual, // expand_rate
+ kEqual, // speech_expand_rate
+ kIgnore, // preemptive_rate
+ kEqual, // accelerate_rate
+ kEqual, // decoded_fec_rate
+ kEqual, // discard_fec_rate
+ kEqual, // added_zero_samples
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
+ RunTest(50, expects);
+
+ SetPacketLossRate(1);
+ expects.stats_ref.expand_rate = 16384;
+ expects.stats_ref.speech_expand_rate = 5324;
+ RunTest(10, expects); // Lost 10 * 20ms in a row.
+ }
+
+ private:
+ MockAudioDecoder* decoder_;
+ rtc::scoped_refptr<AudioDecoderProxyFactory> decoder_factory_;
+ std::unique_ptr<NetEq> neteq_;
+
+ const int samples_per_ms_;
+ const size_t frame_size_samples_;
+ std::unique_ptr<RtpGenerator> rtp_generator_;
+ RTPHeader rtp_header_;
+ uint32_t last_lost_time_;
+ uint32_t packet_loss_interval_;
+ AudioFrame output_frame_;
+};
+
+TEST(NetEqNetworkStatsTest, DecodeFec) {
+ MockAudioDecoder decoder(48000, 1);
+ NetEqNetworkStatsTest test(SdpAudioFormat("opus", 48000, 2), &decoder);
+ test.DecodeFecTest();
+ EXPECT_CALL(decoder, Die()).Times(1);
+}
+
+TEST(NetEqNetworkStatsTest, StereoDecodeFec) {
+ MockAudioDecoder decoder(48000, 2);
+ NetEqNetworkStatsTest test(SdpAudioFormat("opus", 48000, 2), &decoder);
+ test.DecodeFecTest();
+ EXPECT_CALL(decoder, Die()).Times(1);
+}
+
+TEST(NetEqNetworkStatsTest, NoiseExpansionTest) {
+ MockAudioDecoder decoder(48000, 1);
+ NetEqNetworkStatsTest test(SdpAudioFormat("opus", 48000, 2), &decoder);
+ test.NoiseExpansionTest();
+ EXPECT_CALL(decoder, Die()).Times(1);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
new file mode 100644
index 0000000000..6fa56fd1c1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Test to verify correct stereo and multi-channel operation.
+
+#include <algorithm>
+#include <list>
+#include <memory>
+#include <string>
+
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+struct TestParameters {
+ int frame_size;
+ int sample_rate;
+ size_t num_channels;
+};
+
+// This is a parameterized test. The test parameters are supplied through a
+// TestParameters struct, which is obtained through the GetParam() method.
+//
+// The objective of the test is to create a mono input signal and a
+// multi-channel input signal, where each channel is identical to the mono
+// input channel. The two input signals are processed through their respective
+// NetEq instances. After that, the output signals are compared. The expected
+// result is that each channel in the multi-channel output is identical to the
+// mono output.
+class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
+ protected:
+ static const int kTimeStepMs = 10;
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const uint8_t kPayloadTypeMono = 95;
+ static const uint8_t kPayloadTypeMulti = 96;
+
+ NetEqStereoTest()
+ : num_channels_(GetParam().num_channels),
+ sample_rate_hz_(GetParam().sample_rate),
+ samples_per_ms_(sample_rate_hz_ / 1000),
+ frame_size_ms_(GetParam().frame_size),
+ frame_size_samples_(
+ static_cast<size_t>(frame_size_ms_ * samples_per_ms_)),
+ output_size_samples_(10 * samples_per_ms_),
+ clock_(0),
+ rtp_generator_mono_(samples_per_ms_),
+ rtp_generator_(samples_per_ms_),
+ payload_size_bytes_(0),
+ multi_payload_size_bytes_(0),
+ last_send_time_(0),
+ last_arrival_time_(0) {
+ NetEq::Config config;
+ config.sample_rate_hz = sample_rate_hz_;
+ DefaultNetEqFactory neteq_factory;
+ auto decoder_factory = CreateBuiltinAudioDecoderFactory();
+ neteq_mono_ = neteq_factory.CreateNetEq(config, decoder_factory, &clock_);
+ neteq_ = neteq_factory.CreateNetEq(config, decoder_factory, &clock_);
+ input_ = new int16_t[frame_size_samples_];
+ encoded_ = new uint8_t[2 * frame_size_samples_];
+ input_multi_channel_ = new int16_t[frame_size_samples_ * num_channels_];
+ encoded_multi_channel_ =
+ new uint8_t[frame_size_samples_ * 2 * num_channels_];
+ }
+
+ ~NetEqStereoTest() {
+ delete[] input_;
+ delete[] encoded_;
+ delete[] input_multi_channel_;
+ delete[] encoded_multi_channel_;
+ }
+
+ virtual void SetUp() {
+ const std::string file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ input_file_.reset(new test::InputAudioFile(file_name));
+ RTC_CHECK_GE(num_channels_, 2);
+ ASSERT_TRUE(neteq_mono_->RegisterPayloadType(
+ kPayloadTypeMono, SdpAudioFormat("l16", sample_rate_hz_, 1)));
+ ASSERT_TRUE(neteq_->RegisterPayloadType(
+ kPayloadTypeMulti,
+ SdpAudioFormat("l16", sample_rate_hz_, num_channels_)));
+ }
+
+ virtual void TearDown() {}
+
+ int GetNewPackets() {
+ if (!input_file_->Read(frame_size_samples_, input_)) {
+ return -1;
+ }
+ payload_size_bytes_ =
+ WebRtcPcm16b_Encode(input_, frame_size_samples_, encoded_);
+ if (frame_size_samples_ * 2 != payload_size_bytes_) {
+ return -1;
+ }
+ int next_send_time = rtp_generator_mono_.GetRtpHeader(
+ kPayloadTypeMono, frame_size_samples_, &rtp_header_mono_);
+ MakeMultiChannelInput();
+ multi_payload_size_bytes_ = WebRtcPcm16b_Encode(
+ input_multi_channel_, frame_size_samples_ * num_channels_,
+ encoded_multi_channel_);
+ if (frame_size_samples_ * 2 * num_channels_ != multi_payload_size_bytes_) {
+ return -1;
+ }
+ rtp_generator_.GetRtpHeader(kPayloadTypeMulti, frame_size_samples_,
+ &rtp_header_);
+ return next_send_time;
+ }
+
+ virtual void MakeMultiChannelInput() {
+ test::InputAudioFile::DuplicateInterleaved(
+ input_, frame_size_samples_, num_channels_, input_multi_channel_);
+ }
+
+ virtual void VerifyOutput(size_t num_samples) {
+ const int16_t* output_data = output_.data();
+ const int16_t* output_multi_channel_data = output_multi_channel_.data();
+ for (size_t i = 0; i < num_samples; ++i) {
+ for (size_t j = 0; j < num_channels_; ++j) {
+ ASSERT_EQ(output_data[i],
+ output_multi_channel_data[i * num_channels_ + j])
+ << "Diff in sample " << i << ", channel " << j << ".";
+ }
+ }
+ }
+
+ virtual int GetArrivalTime(int send_time) {
+ int arrival_time = last_arrival_time_ + (send_time - last_send_time_);
+ last_send_time_ = send_time;
+ last_arrival_time_ = arrival_time;
+ return arrival_time;
+ }
+
+ virtual bool Lost() { return false; }
+
+ void RunTest(int num_loops) {
+ // Get next input packets (mono and multi-channel).
+ int next_send_time;
+ int next_arrival_time;
+ do {
+ next_send_time = GetNewPackets();
+ ASSERT_NE(-1, next_send_time);
+ next_arrival_time = GetArrivalTime(next_send_time);
+ } while (Lost()); // If lost, immediately read the next packet.
+
+ int time_now = 0;
+ for (int k = 0; k < num_loops; ++k) {
+ while (time_now >= next_arrival_time) {
+ // Insert packet in mono instance.
+ ASSERT_EQ(NetEq::kOK,
+ neteq_mono_->InsertPacket(
+ rtp_header_mono_, rtc::ArrayView<const uint8_t>(
+ encoded_, payload_size_bytes_)));
+ // Insert packet in multi-channel instance.
+ ASSERT_EQ(NetEq::kOK, neteq_->InsertPacket(
+ rtp_header_, rtc::ArrayView<const uint8_t>(
+ encoded_multi_channel_,
+ multi_payload_size_bytes_)));
+ // Get next input packets (mono and multi-channel).
+ do {
+ next_send_time = GetNewPackets();
+ ASSERT_NE(-1, next_send_time);
+ next_arrival_time = GetArrivalTime(next_send_time);
+ } while (Lost()); // If lost, immediately read the next packet.
+ }
+ // Get audio from mono instance.
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_mono_->GetAudio(&output_, &muted));
+ ASSERT_FALSE(muted);
+ EXPECT_EQ(1u, output_.num_channels_);
+ EXPECT_EQ(output_size_samples_, output_.samples_per_channel_);
+ // Get audio from multi-channel instance.
+ ASSERT_EQ(NetEq::kOK, neteq_->GetAudio(&output_multi_channel_, &muted));
+ ASSERT_FALSE(muted);
+ EXPECT_EQ(num_channels_, output_multi_channel_.num_channels_);
+ EXPECT_EQ(output_size_samples_,
+ output_multi_channel_.samples_per_channel_);
+ rtc::StringBuilder ss;
+ ss << "Lap number " << k << ".";
+ SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
+ // Compare mono and multi-channel.
+ ASSERT_NO_FATAL_FAILURE(VerifyOutput(output_size_samples_));
+
+ time_now += kTimeStepMs;
+ clock_.AdvanceTimeMilliseconds(kTimeStepMs);
+ }
+ }
+
+ const size_t num_channels_;
+ const int sample_rate_hz_;
+ const int samples_per_ms_;
+ const int frame_size_ms_;
+ const size_t frame_size_samples_;
+ const size_t output_size_samples_;
+ SimulatedClock clock_;
+ std::unique_ptr<NetEq> neteq_mono_;
+ std::unique_ptr<NetEq> neteq_;
+ test::RtpGenerator rtp_generator_mono_;
+ test::RtpGenerator rtp_generator_;
+ int16_t* input_;
+ int16_t* input_multi_channel_;
+ uint8_t* encoded_;
+ uint8_t* encoded_multi_channel_;
+ AudioFrame output_;
+ AudioFrame output_multi_channel_;
+ RTPHeader rtp_header_mono_;
+ RTPHeader rtp_header_;
+ size_t payload_size_bytes_;
+ size_t multi_payload_size_bytes_;
+ int last_send_time_;
+ int last_arrival_time_;
+ std::unique_ptr<test::InputAudioFile> input_file_;
+};
+
+class NetEqStereoTestNoJitter : public NetEqStereoTest {
+ protected:
+ NetEqStereoTestNoJitter() : NetEqStereoTest() {
+ // Start the sender 100 ms before the receiver to pre-fill the buffer.
+ // This is to avoid doing preemptive expand early in the test.
+ // TODO(hlundin): Mock the decision making instead to control the modes.
+ last_arrival_time_ = -100;
+ }
+};
+
+TEST_P(NetEqStereoTestNoJitter, RunTest) {
+ RunTest(8);
+}
+
+class NetEqStereoTestPositiveDrift : public NetEqStereoTest {
+ protected:
+ NetEqStereoTestPositiveDrift() : NetEqStereoTest(), drift_factor(0.9) {
+ // Start the sender 100 ms before the receiver to pre-fill the buffer.
+ // This is to avoid doing preemptive expand early in the test.
+ // TODO(hlundin): Mock the decision making instead to control the modes.
+ last_arrival_time_ = -100;
+ }
+ virtual int GetArrivalTime(int send_time) {
+ int arrival_time =
+ last_arrival_time_ + drift_factor * (send_time - last_send_time_);
+ last_send_time_ = send_time;
+ last_arrival_time_ = arrival_time;
+ return arrival_time;
+ }
+
+ double drift_factor;
+};
+
+TEST_P(NetEqStereoTestPositiveDrift, RunTest) {
+ RunTest(100);
+}
+
+class NetEqStereoTestNegativeDrift : public NetEqStereoTestPositiveDrift {
+ protected:
+ NetEqStereoTestNegativeDrift() : NetEqStereoTestPositiveDrift() {
+ drift_factor = 1.1;
+ last_arrival_time_ = 0;
+ }
+};
+
+TEST_P(NetEqStereoTestNegativeDrift, RunTest) {
+ RunTest(100);
+}
+
+class NetEqStereoTestDelays : public NetEqStereoTest {
+ protected:
+ static const int kDelayInterval = 10;
+ static const int kDelay = 1000;
+ NetEqStereoTestDelays() : NetEqStereoTest(), frame_index_(0) {}
+
+ virtual int GetArrivalTime(int send_time) {
+ // Deliver immediately, unless we have a back-log.
+ int arrival_time = std::min(last_arrival_time_, send_time);
+ if (++frame_index_ % kDelayInterval == 0) {
+ // Delay this packet.
+ arrival_time += kDelay;
+ }
+ last_send_time_ = send_time;
+ last_arrival_time_ = arrival_time;
+ return arrival_time;
+ }
+
+ int frame_index_;
+};
+
+TEST_P(NetEqStereoTestDelays, RunTest) {
+ RunTest(1000);
+}
+
+class NetEqStereoTestLosses : public NetEqStereoTest {
+ protected:
+ static const int kLossInterval = 10;
+ NetEqStereoTestLosses() : NetEqStereoTest(), frame_index_(0) {}
+
+ virtual bool Lost() { return (++frame_index_) % kLossInterval == 0; }
+
+ // TODO(hlundin): NetEq is not giving bitexact results for these cases.
+ virtual void VerifyOutput(size_t num_samples) {
+ for (size_t i = 0; i < num_samples; ++i) {
+ const int16_t* output_data = output_.data();
+ const int16_t* output_multi_channel_data = output_multi_channel_.data();
+ auto first_channel_sample = output_multi_channel_data[i * num_channels_];
+ for (size_t j = 0; j < num_channels_; ++j) {
+ const int kErrorMargin = 200;
+ EXPECT_NEAR(output_data[i],
+ output_multi_channel_data[i * num_channels_ + j],
+ kErrorMargin)
+ << "Diff in sample " << i << ", channel " << j << ".";
+ EXPECT_EQ(first_channel_sample,
+ output_multi_channel_data[i * num_channels_ + j]);
+ }
+ }
+ }
+
+ int frame_index_;
+};
+
+TEST_P(NetEqStereoTestLosses, RunTest) {
+ RunTest(100);
+}
+
+class NetEqStereoTestSingleActiveChannelPlc : public NetEqStereoTestLosses {
+ protected:
+ NetEqStereoTestSingleActiveChannelPlc() : NetEqStereoTestLosses() {}
+
+ virtual void MakeMultiChannelInput() override {
+ // Create a multi-channel input by copying the mono channel from file to the
+ // first channel, and setting the others to zero.
+ memset(input_multi_channel_, 0,
+ frame_size_samples_ * num_channels_ * sizeof(int16_t));
+ for (size_t i = 0; i < frame_size_samples_; ++i) {
+ input_multi_channel_[i * num_channels_] = input_[i];
+ }
+ }
+
+ virtual void VerifyOutput(size_t num_samples) override {
+ // Simply verify that all samples in channels other than the first are zero.
+ const int16_t* output_multi_channel_data = output_multi_channel_.data();
+ for (size_t i = 0; i < num_samples; ++i) {
+ for (size_t j = 1; j < num_channels_; ++j) {
+ EXPECT_EQ(0, output_multi_channel_data[i * num_channels_ + j])
+ << "Sample " << i << ", channel " << j << " is non-zero.";
+ }
+ }
+ }
+};
+
+TEST_P(NetEqStereoTestSingleActiveChannelPlc, RunTest) {
+ RunTest(100);
+}
+
+// Creates a list of parameter sets.
+std::list<TestParameters> GetTestParameters() {
+ std::list<TestParameters> l;
+ const int sample_rates[] = {8000, 16000, 32000};
+ const int num_rates = sizeof(sample_rates) / sizeof(sample_rates[0]);
+ // Loop through sample rates.
+ for (int rate_index = 0; rate_index < num_rates; ++rate_index) {
+ int sample_rate = sample_rates[rate_index];
+ // Loop through all frame sizes between 10 and 60 ms.
+ for (int frame_size = 10; frame_size <= 60; frame_size += 10) {
+ TestParameters p;
+ p.frame_size = frame_size;
+ p.sample_rate = sample_rate;
+ p.num_channels = 2;
+ l.push_back(p);
+ if (sample_rate == 8000) {
+ // Add a five-channel test for 8000 Hz.
+ p.num_channels = 5;
+ l.push_back(p);
+ }
+ }
+ }
+ return l;
+}
+
+// Pretty-printing the test parameters in case of an error.
+void PrintTo(const TestParameters& p, ::std::ostream* os) {
+ *os << "{frame_size = " << p.frame_size
+ << ", num_channels = " << p.num_channels
+ << ", sample_rate = " << p.sample_rate << "}";
+}
+
+// Instantiate the tests. Each test is instantiated using the function above,
+// so that all different parameter combinations are tested.
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ NetEqStereoTestNoJitter,
+ ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ NetEqStereoTestPositiveDrift,
+ ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ NetEqStereoTestNegativeDrift,
+ ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ NetEqStereoTestDelays,
+ ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ NetEqStereoTestLosses,
+ ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ NetEqStereoTestSingleActiveChannelPlc,
+ ::testing::ValuesIn(GetTestParameters()));
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc
new file mode 100644
index 0000000000..451e0c9587
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/neteq/neteq.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h> // memset
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/test/neteq_decoding_test.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/message_digest.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/arch.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+ABSL_FLAG(bool, gen_ref, false, "Generate reference files.");
+
+namespace webrtc {
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \
+ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
+ (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
+ defined(WEBRTC_CODEC_ILBC)
+#define MAYBE_TestBitExactness TestBitExactness
+#else
+#define MAYBE_TestBitExactness DISABLED_TestBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
+ const std::string input_rtp_file =
+ webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
+
+ const std::string output_checksum =
+ "dee7a10ab92526876a70a85bc48a4906901af3df";
+
+ const std::string network_stats_checksum =
+ "911dbf5fd97f48d25b8f0967286eb73c9d6f6158";
+
+ DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
+ absl::GetFlag(FLAGS_gen_ref));
+}
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \
+ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS)
+#define MAYBE_TestOpusBitExactness TestOpusBitExactness
+#else
+#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
+ const std::string input_rtp_file =
+ webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
+
+ // The checksum depends on SSE being enabled, the second part is the non-SSE
+ // checksum.
+ const std::string output_checksum =
+ "fec6827bb9ee0b21770bbbb4a3a6f8823bf537dc|"
+ "c5eb0a8fcf7e8255a40f821cb815e1096619efeb";
+
+ const std::string network_stats_checksum =
+ "3d043e47e5f4bb81d37e7bce8c44bf802965c853|"
+ "076662525572dba753b11578330bd491923f7f5e";
+
+ DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
+ absl::GetFlag(FLAGS_gen_ref));
+}
+
+#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \
+ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS)
+#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness
+#else
+#define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestOpusDtxBitExactness) {
+ const std::string input_rtp_file =
+ webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
+
+ // The checksum depends on SSE being enabled, the second part is the non-SSE
+ // checksum.
+ const std::string output_checksum =
+ "b3c4899eab5378ef5e54f2302948872149f6ad5e|"
+ "e97e32a77355e7ce46a2dc2f43bf1c2805530fcb";
+
+ const std::string network_stats_checksum =
+ "dc8447b9fee1a21fd5d1f4045d62b982a3fb0215";
+
+ DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
+ absl::GetFlag(FLAGS_gen_ref));
+}
+
+// Use fax mode to avoid time-scaling. This is to simplify the testing of
+// packet waiting times in the packet buffer.
+class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
+ protected:
+ NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
+ config_.for_test_no_time_stretching = true;
+ }
+ void TestJitterBufferDelay(bool apply_packet_loss);
+};
+
+TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
+ // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
+ size_t num_frames = 30;
+ const size_t kSamples = 10 * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ for (size_t i = 0; i < num_frames; ++i) {
+ const uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
+ rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
+ rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
+ rtp_info.payloadType = 94; // PCM16b WB codec.
+ rtp_info.markerBit = 0;
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ }
+ // Pull out all data.
+ for (size_t i = 0; i < num_frames; ++i) {
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ }
+
+ NetEqNetworkStatistics stats;
+ EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+ // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
+ // spacing (per definition), we expect the delay to increase with 10 ms for
+ // each packet. Thus, we are calculating the statistics for a series from 10
+ // to 300, in steps of 10 ms.
+ EXPECT_EQ(155, stats.mean_waiting_time_ms);
+ EXPECT_EQ(155, stats.median_waiting_time_ms);
+ EXPECT_EQ(10, stats.min_waiting_time_ms);
+ EXPECT_EQ(300, stats.max_waiting_time_ms);
+
+ // Check statistics again and make sure it's been reset.
+ EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+ EXPECT_EQ(-1, stats.mean_waiting_time_ms);
+ EXPECT_EQ(-1, stats.median_waiting_time_ms);
+ EXPECT_EQ(-1, stats.min_waiting_time_ms);
+ EXPECT_EQ(-1, stats.max_waiting_time_ms);
+}
+
+
+TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
+ // Apply a clock drift of -25 ms / s (sender faster than receiver).
+ const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
+ const double kNetworkFreezeTimeMs = 0.0;
+ const bool kGetAudioDuringFreezeRecovery = false;
+ const int kDelayToleranceMs = 20;
+ const int kMaxTimeToSpeechMs = 100;
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
+ kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
+ // Apply a clock drift of +25 ms / s (sender slower than receiver).
+ const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
+ const double kNetworkFreezeTimeMs = 0.0;
+ const bool kGetAudioDuringFreezeRecovery = false;
+ const int kDelayToleranceMs = 40;
+ const int kMaxTimeToSpeechMs = 100;
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
+ kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
+ // Apply a clock drift of -25 ms / s (sender faster than receiver).
+ const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
+ const double kNetworkFreezeTimeMs = 5000.0;
+ const bool kGetAudioDuringFreezeRecovery = false;
+ const int kDelayToleranceMs = 60;
+ const int kMaxTimeToSpeechMs = 200;
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
+ kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
+ // Apply a clock drift of +25 ms / s (sender slower than receiver).
+ const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
+ const double kNetworkFreezeTimeMs = 5000.0;
+ const bool kGetAudioDuringFreezeRecovery = false;
+ const int kDelayToleranceMs = 40;
+ const int kMaxTimeToSpeechMs = 100;
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
+ kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
+ // Apply a clock drift of +25 ms / s (sender slower than receiver).
+ const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
+ const double kNetworkFreezeTimeMs = 5000.0;
+ const bool kGetAudioDuringFreezeRecovery = true;
+ const int kDelayToleranceMs = 40;
+ const int kMaxTimeToSpeechMs = 100;
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
+ kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
+ const double kDriftFactor = 1.0; // No drift.
+ const double kNetworkFreezeTimeMs = 0.0;
+ const bool kGetAudioDuringFreezeRecovery = false;
+ const int kDelayToleranceMs = 10;
+ const int kMaxTimeToSpeechMs = 50;
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
+ kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, UnknownPayloadType) {
+ const size_t kPayloadBytes = 100;
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(0, 0, &rtp_info);
+ rtp_info.payloadType = 1; // Not registered as a decoder.
+ EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload));
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define MAYBE_DecoderError DecoderError
+#else
+#define MAYBE_DecoderError DISABLED_DecoderError
+#endif
+
+TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
+ const size_t kPayloadBytes = 100;
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(0, 0, &rtp_info);
+ rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ // Set all of `out_data_` to 1, and verify that it was set to 0 by the call
+ // to GetAudio.
+ int16_t* out_frame_data = out_frame_.mutable_data();
+ for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
+ out_frame_data[i] = 1;
+ }
+ bool muted;
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
+
+ // Verify that the first 160 samples are set to 0.
+ static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
+ const int16_t* const_out_frame_data = out_frame_.data();
+ for (int i = 0; i < kExpectedOutputLength; ++i) {
+ rtc::StringBuilder ss;
+ ss << "i = " << i;
+ SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
+ EXPECT_EQ(0, const_out_frame_data[i]);
+ }
+}
+
+TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
+ // Set all of `out_data_` to 1, and verify that it was set to 0 by the call
+ // to GetAudio.
+ int16_t* out_frame_data = out_frame_.mutable_data();
+ for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
+ out_frame_data[i] = 1;
+ }
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
+ // Verify that the first block of samples is set to 0.
+ static const int kExpectedOutputLength =
+ kInitSampleRateHz / 100; // 10 ms at initial sample rate.
+ const int16_t* const_out_frame_data = out_frame_.data();
+ for (int i = 0; i < kExpectedOutputLength; ++i) {
+ rtc::StringBuilder ss;
+ ss << "i = " << i;
+ SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
+ EXPECT_EQ(0, const_out_frame_data[i]);
+ }
+ // Verify that the sample rate did not change from the initial configuration.
+ EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
+}
+
+class NetEqBgnTest : public NetEqDecodingTest {
+ protected:
+ void CheckBgn(int sampling_rate_hz) {
+ size_t expected_samples_per_channel = 0;
+ uint8_t payload_type = 0xFF; // Invalid.
+ if (sampling_rate_hz == 8000) {
+ expected_samples_per_channel = kBlockSize8kHz;
+ payload_type = 93; // PCM 16, 8 kHz.
+ } else if (sampling_rate_hz == 16000) {
+ expected_samples_per_channel = kBlockSize16kHz;
+ payload_type = 94; // PCM 16, 16 kHZ.
+ } else if (sampling_rate_hz == 32000) {
+ expected_samples_per_channel = kBlockSize32kHz;
+ payload_type = 95; // PCM 16, 32 kHz.
+ } else {
+ ASSERT_TRUE(false); // Unsupported test case.
+ }
+
+ AudioFrame output;
+ test::AudioLoop input;
+ // We are using the same 32 kHz input file for all tests, regardless of
+ // `sampling_rate_hz`. The output may sound weird, but the test is still
+ // valid.
+ ASSERT_TRUE(input.Init(
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 10 * sampling_rate_hz, // Max 10 seconds loop length.
+ expected_samples_per_channel));
+
+ // Payload of 10 ms of PCM16 32 kHz.
+ uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
+ RTPHeader rtp_info;
+ PopulateRtpInfo(0, 0, &rtp_info);
+ rtp_info.payloadType = payload_type;
+
+ bool muted;
+ for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
+ auto block = input.GetNextBlock();
+ ASSERT_EQ(expected_samples_per_channel, block.size());
+ size_t enc_len_bytes =
+ WebRtcPcm16b_Encode(block.data(), block.size(), payload);
+ ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
+
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, enc_len_bytes)));
+ output.Reset();
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_EQ(1u, output.num_channels_);
+ ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
+ ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+ // Next packet.
+ rtp_info.timestamp +=
+ rtc::checked_cast<uint32_t>(expected_samples_per_channel);
+ rtp_info.sequenceNumber++;
+ }
+
+ output.Reset();
+
+ // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
+ // one frame without checking speech-type. This is the first frame pulled
+ // without inserting any packet, and might not be labeled as PLC.
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_EQ(1u, output.num_channels_);
+ ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
+
+ // To be able to test the fading of background noise we need at lease to
+ // pull 611 frames.
+ const int kFadingThreshold = 611;
+
+ // Test several CNG-to-PLC packet for the expected behavior. The number 20
+ // is arbitrary, but sufficiently large to test enough number of frames.
+ const int kNumPlcToCngTestFrames = 20;
+ bool plc_to_cng = false;
+ for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
+ output.Reset();
+ // Set to non-zero.
+ memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
+ ASSERT_EQ(1u, output.num_channels_);
+ ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
+ if (output.speech_type_ == AudioFrame::kPLCCNG) {
+ plc_to_cng = true;
+ double sum_squared = 0;
+ const int16_t* output_data = output.data();
+ for (size_t k = 0;
+ k < output.num_channels_ * output.samples_per_channel_; ++k)
+ sum_squared += output_data[k] * output_data[k];
+ EXPECT_EQ(0, sum_squared);
+ } else {
+ EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
+ }
+ }
+ EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
+ }
+};
+
+TEST_F(NetEqBgnTest, RunTest) {
+ CheckBgn(8000);
+ CheckBgn(16000);
+ CheckBgn(32000);
+}
+
+TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
+ // Start with a sequence number that will soon wrap.
+ std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
+ WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
+}
+
+TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
+ // Start with a sequence number that will soon wrap.
+ std::set<uint16_t> drop_seq_numbers;
+ drop_seq_numbers.insert(0xFFFF);
+ drop_seq_numbers.insert(0x0);
+ WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
+}
+
+TEST_F(NetEqDecodingTest, TimestampWrap) {
+ // Start with a timestamp that will soon wrap.
+ std::set<uint16_t> drop_seq_numbers;
+ WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
+}
+
+TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
+ // Start with a timestamp and a sequence number that will wrap at the same
+ // time.
+ std::set<uint16_t> drop_seq_numbers;
+ WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
+}
+
+TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
+ uint16_t seq_no = 0;
+ uint32_t timestamp = 0;
+ const int kFrameSizeMs = 10;
+ const int kSampleRateKhz = 16;
+ const int kSamples = kFrameSizeMs * kSampleRateKhz;
+ const size_t kPayloadBytes = kSamples * 2;
+
+ const int algorithmic_delay_samples =
+ std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
+ // Insert three speech packets. Three are needed to get the frame length
+ // correct.
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ bool muted;
+ for (int i = 0; i < 3; ++i) {
+ PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ ++seq_no;
+ timestamp += kSamples;
+
+ // Pull audio once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ }
+ // Verify speech output.
+ EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+
+ // Insert same CNG packet twice.
+ const int kCngPeriodMs = 100;
+ const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
+ size_t payload_len;
+ PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+ // This is the first time this CNG packet is inserted.
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, payload_len)));
+
+ // Pull audio once and make sure CNG is played.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+ EXPECT_FALSE(
+ neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
+ EXPECT_EQ(timestamp - algorithmic_delay_samples,
+ out_frame_.timestamp_ + out_frame_.samples_per_channel_);
+
+ // Insert the same CNG packet again. Note that at this point it is old, since
+ // we have already decoded the first copy of it.
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, payload_len)));
+
+ // Pull audio until we have played `kCngPeriodMs` of CNG. Start at 10 ms since
+ // we have already pulled out CNG once.
+ for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+ EXPECT_FALSE(
+ neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
+ EXPECT_EQ(timestamp - algorithmic_delay_samples,
+ out_frame_.timestamp_ + out_frame_.samples_per_channel_);
+ }
+
+ ++seq_no;
+ timestamp += kCngPeriodSamples;
+ uint32_t first_speech_timestamp = timestamp;
+ // Insert speech again.
+ for (int i = 0; i < 3; ++i) {
+ PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ ++seq_no;
+ timestamp += kSamples;
+ }
+
+ // Pull audio once and verify that the output is speech again.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+ absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
+ ASSERT_TRUE(playout_timestamp);
+ EXPECT_EQ(first_speech_timestamp + kSamples - algorithmic_delay_samples,
+ *playout_timestamp);
+}
+
+TEST_F(NetEqDecodingTest, CngFirst) {
+ uint16_t seq_no = 0;
+ uint32_t timestamp = 0;
+ const int kFrameSizeMs = 10;
+ const int kSampleRateKhz = 16;
+ const int kSamples = kFrameSizeMs * kSampleRateKhz;
+ const int kPayloadBytes = kSamples * 2;
+ const int kCngPeriodMs = 100;
+ const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
+ size_t payload_len;
+
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+
+ PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+ ASSERT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(
+ rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len)));
+ ++seq_no;
+ timestamp += kCngPeriodSamples;
+
+ // Pull audio once and make sure CNG is played.
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+
+ // Insert some speech packets.
+ const uint32_t first_speech_timestamp = timestamp;
+ int timeout_counter = 0;
+ do {
+ ASSERT_LT(timeout_counter++, 20) << "Test timed out";
+ PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ ++seq_no;
+ timestamp += kSamples;
+
+ // Pull audio once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
+ // Verify speech output.
+ EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+}
+
+class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
+ public:
+ NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
+ config_.enable_muted_state = true;
+ }
+
+ protected:
+ static constexpr size_t kSamples = 10 * 16;
+ static constexpr size_t kPayloadBytes = kSamples * 2;
+
+ void InsertPacket(uint32_t rtp_timestamp) {
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ }
+
+ void InsertCngPacket(uint32_t rtp_timestamp) {
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ size_t payload_len;
+ PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, payload_len)));
+ }
+
+ bool GetAudioReturnMuted() {
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ return muted;
+ }
+
+ void GetAudioUntilMuted() {
+ while (!GetAudioReturnMuted()) {
+ ASSERT_LT(counter_++, 1000) << "Test timed out";
+ }
+ }
+
+ void GetAudioUntilNormal() {
+ bool muted = false;
+ while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_LT(counter_++, 1000) << "Test timed out";
+ }
+ EXPECT_FALSE(muted);
+ }
+
+ int counter_ = 0;
+};
+
+// Verifies that NetEq goes in and out of muted state as expected.
+TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+ EXPECT_TRUE(out_frame_.muted());
+
+ // Verify that output audio is not written during muted mode. Other parameters
+ // should be correct, though.
+ AudioFrame new_frame;
+ int16_t* frame_data = new_frame.mutable_data();
+ for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
+ frame_data[i] = 17;
+ }
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
+ EXPECT_TRUE(muted);
+ EXPECT_TRUE(out_frame_.muted());
+ for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
+ EXPECT_EQ(17, frame_data[i]);
+ }
+ EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
+ new_frame.timestamp_);
+ EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
+ EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
+ EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
+ EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
+ EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
+
+ // Insert new data. Timestamp is corrected for the time elapsed since the last
+ // packet. Verify that normal operation resumes.
+ InsertPacket(kSamples * counter_);
+ GetAudioUntilNormal();
+ EXPECT_FALSE(out_frame_.muted());
+
+ NetEqNetworkStatistics stats;
+ EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+ // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
+ // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
+ // concealment samples in this test.
+ EXPECT_GT(stats.expand_rate, 14000);
+ // And, it should be greater than the speech_expand_rate.
+ EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
+}
+
+// Verifies that NetEq goes out of muted state when given a delayed packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+ // Insert new data. Timestamp is only corrected for the half of the time
+ // elapsed since the last packet. That is, the new packet is delayed. Verify
+ // that normal operation resumes.
+ InsertPacket(kSamples * counter_ / 2);
+ GetAudioUntilNormal();
+}
+
+// Verifies that NetEq goes out of muted state when given a future packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+ // Insert new data. Timestamp is over-corrected for the time elapsed since the
+ // last packet. That is, the new packet is too early. Verify that normal
+ // operation resumes.
+ InsertPacket(kSamples * counter_ * 2);
+ GetAudioUntilNormal();
+}
+
+// Verifies that NetEq goes out of muted state when given an old packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+
+ EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+ // Insert a few packets which are older than the first packet.
+ for (int i = 0; i < 5; ++i) {
+ InsertPacket(kSamples * (i - 1000));
+ }
+ EXPECT_FALSE(GetAudioReturnMuted());
+ EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+}
+
+// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
+// packet stream is suspended for a long time.
+TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
+ // Insert one CNG packet.
+ InsertCngPacket(0);
+
+ // Pull 10 seconds of audio (10 ms audio generated per lap).
+ for (int i = 0; i < 1000; ++i) {
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
+ }
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+}
+
+// Verifies that NetEq goes back to normal after a long CNG period with the
+// packet stream suspended.
+TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
+ // Insert one CNG packet.
+ InsertCngPacket(0);
+
+ // Pull 10 seconds of audio (10 ms audio generated per lap).
+ for (int i = 0; i < 1000; ++i) {
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ }
+
+ // Insert new data. Timestamp is corrected for the time elapsed since the last
+ // packet. Verify that normal operation resumes.
+ InsertPacket(kSamples * counter_);
+ GetAudioUntilNormal();
+}
+
+namespace {
+::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
+ const AudioFrame& b) {
+ if (a.timestamp_ != b.timestamp_)
+ return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
+ << " != " << b.timestamp_ << ")";
+ if (a.sample_rate_hz_ != b.sample_rate_hz_)
+ return ::testing::AssertionFailure()
+ << "sample_rate_hz_ diff (" << a.sample_rate_hz_
+ << " != " << b.sample_rate_hz_ << ")";
+ if (a.samples_per_channel_ != b.samples_per_channel_)
+ return ::testing::AssertionFailure()
+ << "samples_per_channel_ diff (" << a.samples_per_channel_
+ << " != " << b.samples_per_channel_ << ")";
+ if (a.num_channels_ != b.num_channels_)
+ return ::testing::AssertionFailure()
+ << "num_channels_ diff (" << a.num_channels_
+ << " != " << b.num_channels_ << ")";
+ if (a.speech_type_ != b.speech_type_)
+ return ::testing::AssertionFailure()
+ << "speech_type_ diff (" << a.speech_type_
+ << " != " << b.speech_type_ << ")";
+ if (a.vad_activity_ != b.vad_activity_)
+ return ::testing::AssertionFailure()
+ << "vad_activity_ diff (" << a.vad_activity_
+ << " != " << b.vad_activity_ << ")";
+ return ::testing::AssertionSuccess();
+}
+
+::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
+ const AudioFrame& b) {
+ ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
+ if (!res)
+ return res;
+ if (memcmp(a.data(), b.data(),
+ a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) !=
+ 0) {
+ return ::testing::AssertionFailure() << "data_ diff";
+ }
+ return ::testing::AssertionSuccess();
+}
+
+} // namespace
+
+TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
+ ASSERT_FALSE(config_.enable_muted_state);
+ config2_.enable_muted_state = true;
+ CreateSecondInstance();
+
+ // Insert one speech packet into both NetEqs.
+ const size_t kSamples = 10 * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(0, 0, &rtp_info);
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
+
+ AudioFrame out_frame1, out_frame2;
+ bool muted;
+ for (int i = 0; i < 1000; ++i) {
+ rtc::StringBuilder ss;
+ ss << "i = " << i;
+ SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
+ EXPECT_FALSE(muted);
+ EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
+ if (muted) {
+ EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
+ } else {
+ EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
+ }
+ }
+ EXPECT_TRUE(muted);
+
+ // Insert new data. Timestamp is corrected for the time elapsed since the last
+ // packet.
+ for (int i = 0; i < 5; ++i) {
+ PopulateRtpInfo(0, kSamples * 1000 + kSamples * i, &rtp_info);
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
+ }
+
+ int counter = 0;
+ while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
+ ASSERT_LT(counter++, 1000) << "Test timed out";
+ rtc::StringBuilder ss;
+ ss << "counter = " << counter;
+ SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
+ EXPECT_FALSE(muted);
+ EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
+ if (muted) {
+ EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
+ } else {
+ EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
+ }
+ }
+ EXPECT_FALSE(muted);
+}
+
+TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
+ const int kNumConcealmentEvents = 19;
+ const size_t kSamples = 10 * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ int seq_no = 0;
+ RTPHeader rtp_info;
+ rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
+ rtp_info.payloadType = 94; // PCM16b WB codec.
+ rtp_info.markerBit = 0;
+ const uint8_t payload[kPayloadBytes] = {0};
+ bool muted;
+
+ for (int i = 0; i < kNumConcealmentEvents; i++) {
+ // Insert some packets of 10 ms size.
+ for (int j = 0; j < 10; j++) {
+ rtp_info.sequenceNumber = seq_no++;
+ rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
+ neteq_->InsertPacket(rtp_info, payload);
+ neteq_->GetAudio(&out_frame_, &muted);
+ }
+
+ // Lose a number of packets.
+ int num_lost = 1 + i;
+ for (int j = 0; j < num_lost; j++) {
+ seq_no++;
+ neteq_->GetAudio(&out_frame_, &muted);
+ }
+ }
+
+ // Check number of concealment events.
+ NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
+ EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
+}
+
+// Test that the jitter buffer delay stat is computed correctly.
+void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
+ const int kNumPackets = 10;
+ const int kDelayInNumPackets = 2;
+ const int kPacketLenMs = 10; // All packets are of 10 ms size.
+ const size_t kSamples = kPacketLenMs * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ RTPHeader rtp_info;
+ rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
+ rtp_info.payloadType = 94; // PCM16b WB codec.
+ rtp_info.markerBit = 0;
+ const uint8_t payload[kPayloadBytes] = {0};
+ bool muted;
+ int packets_sent = 0;
+ int packets_received = 0;
+ int expected_delay = 0;
+ int expected_target_delay = 0;
+ uint64_t expected_emitted_count = 0;
+ while (packets_received < kNumPackets) {
+ // Insert packet.
+ if (packets_sent < kNumPackets) {
+ rtp_info.sequenceNumber = packets_sent++;
+ rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
+ neteq_->InsertPacket(rtp_info, payload);
+ }
+
+ // Get packet.
+ if (packets_sent > kDelayInNumPackets) {
+ neteq_->GetAudio(&out_frame_, &muted);
+ packets_received++;
+
+ // The delay reported by the jitter buffer never exceeds
+ // the number of samples previously fetched with GetAudio
+ // (hence the min()).
+ int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
+
+ // The increase of the expected delay is the product of
+ // the current delay of the jitter buffer in ms * the
+ // number of samples that are sent for play out.
+ int current_delay_ms = packets_delay * kPacketLenMs;
+ expected_delay += current_delay_ms * kSamples;
+ expected_target_delay += neteq_->TargetDelayMs() * kSamples;
+ expected_emitted_count += kSamples;
+ }
+ }
+
+ if (apply_packet_loss) {
+ // Extra call to GetAudio to cause concealment.
+ neteq_->GetAudio(&out_frame_, &muted);
+ }
+
+ // Check jitter buffer delay.
+ NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
+ EXPECT_EQ(expected_delay,
+ rtc::checked_cast<int>(stats.jitter_buffer_delay_ms));
+ EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count);
+ EXPECT_EQ(expected_target_delay,
+ rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
+}
+
+TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
+ TestJitterBufferDelay(false);
+}
+
+TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
+ TestJitterBufferDelay(true);
+}
+
+TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
+ const int kPacketLenMs = 10; // All packets are of 10 ms size.
+ const size_t kSamples = kPacketLenMs * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ RTPHeader rtp_info;
+ rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
+ rtp_info.payloadType = 94; // PCM16b WB codec.
+ rtp_info.markerBit = 0;
+ const uint8_t payload[kPayloadBytes] = {0};
+
+ int expected_target_delay = neteq_->TargetDelayMs() * kSamples;
+ neteq_->InsertPacket(rtp_info, payload);
+
+ bool muted;
+ neteq_->GetAudio(&out_frame_, &muted);
+
+ rtp_info.sequenceNumber += 1;
+ rtp_info.timestamp += kSamples;
+ neteq_->InsertPacket(rtp_info, payload);
+ rtp_info.sequenceNumber += 1;
+ rtp_info.timestamp += kSamples;
+ neteq_->InsertPacket(rtp_info, payload);
+
+ expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples;
+ // We have two packets in the buffer and kAccelerate operation will
+ // extract 20 ms of data.
+ neteq_->GetAudio(&out_frame_, &muted, nullptr, NetEq::Operation::kAccelerate);
+
+ // Check jitter buffer delay.
+ NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
+ EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms);
+ EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count);
+ EXPECT_EQ(expected_target_delay,
+ rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
+}
+
+namespace test {
+TEST(NetEqNoTimeStretchingMode, RunTest) {
+ NetEq::Config config;
+ config.for_test_no_time_stretching = true;
+ auto codecs = NetEqTest::StandardDecoderMap();
+ NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
+ {1, kRtpExtensionAudioLevel},
+ {3, kRtpExtensionAbsoluteSendTime},
+ {5, kRtpExtensionTransportSequenceNumber},
+ {7, kRtpExtensionVideoContentType},
+ {8, kRtpExtensionVideoTiming}};
+ std::unique_ptr<NetEqInput> input(new NetEqRtpDumpInput(
+ webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
+ rtp_ext_map, absl::nullopt /*No SSRC filter*/));
+ std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
+ new TimeLimitedNetEqInput(std::move(input), 20000));
+ std::unique_ptr<AudioSink> output(new VoidAudioSink);
+ NetEqTest::Callbacks callbacks;
+ NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
+ /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
+ /*input=*/std::move(input_time_limit), std::move(output),
+ callbacks);
+ test.Run();
+ const auto stats = test.SimulationStats();
+ EXPECT_EQ(0, stats.accelerate_rate);
+ EXPECT_EQ(0, stats.preemptive_rate);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.proto b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.proto
new file mode 100644
index 0000000000..b4b4253c3d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.proto
@@ -0,0 +1,31 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.neteq_unittest;
+
+message NetEqNetworkStatistics {
+ // Next field number 18.
+ optional uint32 current_buffer_size_ms = 1;
+ optional uint32 preferred_buffer_size_ms = 2;
+ optional uint32 jitter_peaks_found = 3;
+ reserved 4; // Was packet_loss_rate.
+ reserved 5; // Was packet_discard_rate.
+ optional uint32 expand_rate = 6;
+ optional uint32 speech_expand_rate = 7;
+ optional uint32 preemptive_rate = 8;
+ optional uint32 accelerate_rate = 9;
+ optional uint32 secondary_decoded_rate = 10;
+ optional uint32 secondary_discarded_rate = 17;
+ optional int32 clockdrift_ppm = 11;
+ reserved 12; // Was added_zero_samples.
+ optional int32 mean_waiting_time_ms = 13;
+ optional int32 median_waiting_time_ms = 14;
+ optional int32 min_waiting_time_ms = 15;
+ optional int32 max_waiting_time_ms = 16;
+}
+
+message RtcpStatistics {
+ optional uint32 fraction_lost = 1;
+ optional uint32 cumulative_lost = 2;
+ optional uint32 extended_max_sequence_number = 3;
+ optional uint32 jitter = 4;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/normal.cc b/third_party/libwebrtc/modules/audio_coding/neteq/normal.cc
new file mode 100644
index 0000000000..461ee7fa4a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/normal.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/normal.h"
+
+#include <string.h> // memset, memcpy
+
+#include <algorithm> // min
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+int Normal::Process(const int16_t* input,
+ size_t length,
+ NetEq::Mode last_mode,
+ AudioMultiVector* output) {
+ if (length == 0) {
+ // Nothing to process.
+ output->Clear();
+ return static_cast<int>(length);
+ }
+
+ RTC_DCHECK(output->Empty());
+ // Output should be empty at this point.
+ if (length % output->Channels() != 0) {
+ // The length does not match the number of channels.
+ output->Clear();
+ return 0;
+ }
+ output->PushBackInterleaved(rtc::ArrayView<const int16_t>(input, length));
+
+ const int fs_mult = fs_hz_ / 8000;
+ RTC_DCHECK_GT(fs_mult, 0);
+ // fs_shift = log2(fs_mult), rounded down.
+ // Note that `fs_shift` is not "exact" for 48 kHz.
+ // TODO(hlundin): Investigate this further.
+ const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
+
+ // If last call resulted in a CodedPlc we don't need to do cross-fading but we
+ // need to report the end of the interruption once we are back to normal
+ // operation.
+ if (last_mode == NetEq::Mode::kCodecPlc) {
+ statistics_->EndExpandEvent(fs_hz_);
+ }
+
+ // Check if last RecOut call resulted in an Expand. If so, we have to take
+ // care of some cross-fading and unmuting.
+ if (last_mode == NetEq::Mode::kExpand) {
+ // Generate interpolation data using Expand.
+ // First, set Expand parameters to appropriate values.
+ expand_->SetParametersForNormalAfterExpand();
+
+ // Call Expand.
+ AudioMultiVector expanded(output->Channels());
+ expand_->Process(&expanded);
+ expand_->Reset();
+
+ size_t length_per_channel = length / output->Channels();
+ std::unique_ptr<int16_t[]> signal(new int16_t[length_per_channel]);
+ for (size_t channel_ix = 0; channel_ix < output->Channels(); ++channel_ix) {
+ // Set muting factor to the same as expand muting factor.
+ int16_t mute_factor = expand_->MuteFactor(channel_ix);
+
+ (*output)[channel_ix].CopyTo(length_per_channel, 0, signal.get());
+
+ // Find largest absolute value in new data.
+ int16_t decoded_max =
+ WebRtcSpl_MaxAbsValueW16(signal.get(), length_per_channel);
+ // Adjust muting factor if needed (to BGN level).
+ size_t energy_length =
+ std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
+ int scaling = 6 + fs_shift - WebRtcSpl_NormW32(decoded_max * decoded_max);
+ scaling = std::max(scaling, 0); // `scaling` should always be >= 0.
+ int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(),
+ energy_length, scaling);
+ int32_t scaled_energy_length =
+ static_cast<int32_t>(energy_length >> scaling);
+ if (scaled_energy_length > 0) {
+ energy = energy / scaled_energy_length;
+ } else {
+ energy = 0;
+ }
+
+ int local_mute_factor = 16384; // 1.0 in Q14.
+ if ((energy != 0) && (energy > background_noise_.Energy(channel_ix))) {
+ // Normalize new frame energy to 15 bits.
+ scaling = WebRtcSpl_NormW32(energy) - 16;
+ // We want background_noise_.energy() / energy in Q14.
+ int32_t bgn_energy = WEBRTC_SPL_SHIFT_W32(
+ background_noise_.Energy(channel_ix), scaling + 14);
+ int16_t energy_scaled =
+ static_cast<int16_t>(WEBRTC_SPL_SHIFT_W32(energy, scaling));
+ int32_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled);
+ local_mute_factor =
+ std::min(local_mute_factor, WebRtcSpl_SqrtFloor(ratio << 14));
+ }
+ mute_factor = std::max<int16_t>(mute_factor, local_mute_factor);
+ RTC_DCHECK_LE(mute_factor, 16384);
+ RTC_DCHECK_GE(mute_factor, 0);
+
+ // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14),
+ // or as fast as it takes to come back to full gain within the frame
+ // length.
+ const int back_to_fullscale_inc =
+ static_cast<int>((16384 - mute_factor) / length_per_channel);
+ const int increment = std::max(64 / fs_mult, back_to_fullscale_inc);
+ for (size_t i = 0; i < length_per_channel; i++) {
+ // Scale with mute factor.
+ RTC_DCHECK_LT(channel_ix, output->Channels());
+ RTC_DCHECK_LT(i, output->Size());
+ int32_t scaled_signal = (*output)[channel_ix][i] * mute_factor;
+ // Shift 14 with proper rounding.
+ (*output)[channel_ix][i] =
+ static_cast<int16_t>((scaled_signal + 8192) >> 14);
+ // Increase mute_factor towards 16384.
+ mute_factor =
+ static_cast<int16_t>(std::min(mute_factor + increment, 16384));
+ }
+
+ // Interpolate the expanded data into the new vector.
+ // (NB/WB/SWB32/SWB48 8/16/32/48 samples.)
+ size_t win_length = samples_per_ms_;
+ int16_t win_slope_Q14 = default_win_slope_Q14_;
+ RTC_DCHECK_LT(channel_ix, output->Channels());
+ if (win_length > output->Size()) {
+ win_length = output->Size();
+ win_slope_Q14 = (1 << 14) / static_cast<int16_t>(win_length);
+ }
+ int16_t win_up_Q14 = 0;
+ for (size_t i = 0; i < win_length; i++) {
+ win_up_Q14 += win_slope_Q14;
+ (*output)[channel_ix][i] =
+ (win_up_Q14 * (*output)[channel_ix][i] +
+ ((1 << 14) - win_up_Q14) * expanded[channel_ix][i] + (1 << 13)) >>
+ 14;
+ }
+ RTC_DCHECK_GT(win_up_Q14,
+ (1 << 14) - 32); // Worst case rouding is a length of 34
+ }
+ } else if (last_mode == NetEq::Mode::kRfc3389Cng) {
+ RTC_DCHECK_EQ(output->Channels(), 1); // Not adapted for multi-channel yet.
+ static const size_t kCngLength = 48;
+ RTC_DCHECK_LE(8 * fs_mult, kCngLength);
+ int16_t cng_output[kCngLength];
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+
+ if (cng_decoder) {
+ // Generate long enough for 48kHz.
+ if (!cng_decoder->Generate(cng_output, false)) {
+ // Error returned; set return vector to all zeros.
+ memset(cng_output, 0, sizeof(cng_output));
+ }
+ } else {
+ // If no CNG instance is defined, just copy from the decoded data.
+ // (This will result in interpolating the decoded with itself.)
+ (*output)[0].CopyTo(fs_mult * 8, 0, cng_output);
+ }
+ // Interpolate the CNG into the new vector.
+ // (NB/WB/SWB32/SWB48 8/16/32/48 samples.)
+ size_t win_length = samples_per_ms_;
+ int16_t win_slope_Q14 = default_win_slope_Q14_;
+ if (win_length > kCngLength) {
+ win_length = kCngLength;
+ win_slope_Q14 = (1 << 14) / static_cast<int16_t>(win_length);
+ }
+ int16_t win_up_Q14 = 0;
+ for (size_t i = 0; i < win_length; i++) {
+ win_up_Q14 += win_slope_Q14;
+ (*output)[0][i] =
+ (win_up_Q14 * (*output)[0][i] +
+ ((1 << 14) - win_up_Q14) * cng_output[i] + (1 << 13)) >>
+ 14;
+ }
+ RTC_DCHECK_GT(win_up_Q14,
+ (1 << 14) - 32); // Worst case rouding is a length of 34
+ }
+
+ return static_cast<int>(length);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/normal.h b/third_party/libwebrtc/modules/audio_coding/neteq/normal.h
new file mode 100644
index 0000000000..772293b605
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/normal.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NORMAL_H_
+#define MODULES_AUDIO_CODING_NETEQ_NORMAL_H_
+
+#include <stdint.h>
+#include <string.h> // Access to size_t.
+
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class AudioMultiVector;
+class BackgroundNoise;
+class DecoderDatabase;
+class Expand;
+
+// This class provides the "Normal" DSP operation, that is performed when
+// there is no data loss, no need to stretch the timing of the signal, and
+// no other "special circumstances" are at hand.
+class Normal {
+ public:
+ Normal(int fs_hz,
+ DecoderDatabase* decoder_database,
+ const BackgroundNoise& background_noise,
+ Expand* expand,
+ StatisticsCalculator* statistics)
+ : fs_hz_(fs_hz),
+ decoder_database_(decoder_database),
+ background_noise_(background_noise),
+ expand_(expand),
+ samples_per_ms_(rtc::CheckedDivExact(fs_hz_, 1000)),
+ default_win_slope_Q14_(
+ rtc::dchecked_cast<uint16_t>((1 << 14) / samples_per_ms_)),
+ statistics_(statistics) {}
+
+ virtual ~Normal() {}
+
+ Normal(const Normal&) = delete;
+ Normal& operator=(const Normal&) = delete;
+
+ // Performs the "Normal" operation. The decoder data is supplied in `input`,
+ // having `length` samples in total for all channels (interleaved). The
+ // result is written to `output`. The number of channels allocated in
+ // `output` defines the number of channels that will be used when
+ // de-interleaving `input`. `last_mode` contains the mode used in the previous
+ // GetAudio call (i.e., not the current one).
+ int Process(const int16_t* input,
+ size_t length,
+ NetEq::Mode last_mode,
+ AudioMultiVector* output);
+
+ private:
+ int fs_hz_;
+ DecoderDatabase* decoder_database_;
+ const BackgroundNoise& background_noise_;
+ Expand* expand_;
+ const size_t samples_per_ms_;
+ const int16_t default_win_slope_Q14_;
+ StatisticsCalculator* const statistics_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_NORMAL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/normal_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/normal_unittest.cc
new file mode 100644
index 0000000000..4554d79576
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/normal_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Normal class.
+
+#include "modules/audio_coding/neteq/normal.h"
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/mock/mock_expand.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+namespace {
+
+int ExpandProcess120ms(AudioMultiVector* output) {
+ AudioMultiVector dummy_audio(1, 11520u);
+ dummy_audio.CopyTo(output);
+ return 0;
+}
+
+} // namespace
+
+TEST(Normal, CreateAndDestroy) {
+ MockDecoderDatabase db;
+ int fs = 8000;
+ size_t channels = 1;
+ BackgroundNoise bgn(channels);
+ SyncBuffer sync_buffer(1, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
+ Normal normal(fs, &db, bgn, &expand, &statistics);
+ EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
+}
+
+TEST(Normal, AvoidDivideByZero) {
+ MockDecoderDatabase db;
+ int fs = 8000;
+ size_t channels = 1;
+ BackgroundNoise bgn(channels);
+ SyncBuffer sync_buffer(1, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs,
+ channels);
+ Normal normal(fs, &db, bgn, &expand, &statistics);
+
+ int16_t input[1000] = {0};
+ AudioMultiVector output(channels);
+
+ // Zero input length.
+ EXPECT_EQ(0, normal.Process(input, 0, NetEq::Mode::kExpand, &output));
+ EXPECT_EQ(0u, output.Size());
+
+ // Try to make energy_length >> scaling = 0;
+ EXPECT_CALL(expand, SetParametersForNormalAfterExpand());
+ EXPECT_CALL(expand, Process(_));
+ EXPECT_CALL(expand, Reset());
+ // If input_size_samples < 64, then energy_length in Normal::Process() will
+ // be equal to input_size_samples. Since the input is all zeros, decoded_max
+ // will be zero, and scaling will be >= 6. Thus, energy_length >> scaling = 0,
+ // and using this as a denominator would lead to problems.
+ int input_size_samples = 63;
+ EXPECT_EQ(input_size_samples, normal.Process(input, input_size_samples,
+ NetEq::Mode::kExpand, &output));
+
+ EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
+ EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope.
+}
+
+TEST(Normal, InputLengthAndChannelsDoNotMatch) {
+ MockDecoderDatabase db;
+ int fs = 8000;
+ size_t channels = 2;
+ BackgroundNoise bgn(channels);
+ SyncBuffer sync_buffer(channels, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs,
+ channels);
+ Normal normal(fs, &db, bgn, &expand, &statistics);
+
+ int16_t input[1000] = {0};
+ AudioMultiVector output(channels);
+
+ // Let the number of samples be one sample less than 80 samples per channel.
+ size_t input_len = 80 * channels - 1;
+ EXPECT_EQ(0, normal.Process(input, input_len, NetEq::Mode::kExpand, &output));
+ EXPECT_EQ(0u, output.Size());
+
+ EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
+ EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope.
+}
+
+TEST(Normal, LastModeExpand120msPacket) {
+ MockDecoderDatabase db;
+ const int kFs = 48000;
+ const size_t kPacketsizeBytes = 11520u;
+ const size_t kChannels = 1;
+ BackgroundNoise bgn(kChannels);
+ SyncBuffer sync_buffer(kChannels, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, kFs,
+ kChannels);
+ Normal normal(kFs, &db, bgn, &expand, &statistics);
+
+ int16_t input[kPacketsizeBytes] = {0};
+ AudioMultiVector output(kChannels);
+
+ EXPECT_CALL(expand, SetParametersForNormalAfterExpand());
+ EXPECT_CALL(expand, Process(_)).WillOnce(Invoke(ExpandProcess120ms));
+ EXPECT_CALL(expand, Reset());
+ EXPECT_EQ(
+ static_cast<int>(kPacketsizeBytes),
+ normal.Process(input, kPacketsizeBytes, NetEq::Mode::kExpand, &output));
+
+ EXPECT_EQ(kPacketsizeBytes, output.Size());
+
+ EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
+ EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope.
+}
+
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet.cc
new file mode 100644
index 0000000000..333f161229
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/packet.h"
+
+namespace webrtc {
+
+Packet::Packet() = default;
+Packet::Packet(Packet&& b) = default;
+
+Packet::~Packet() = default;
+
+Packet& Packet::operator=(Packet&& b) = default;
+
+Packet Packet::Clone() const {
+ RTC_CHECK(!frame);
+
+ Packet clone;
+ clone.timestamp = timestamp;
+ clone.sequence_number = sequence_number;
+ clone.payload_type = payload_type;
+ clone.payload.SetData(payload.data(), payload.size());
+ clone.priority = priority;
+ clone.packet_info = packet_info;
+
+ return clone;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet.h b/third_party/libwebrtc/modules/audio_coding/neteq/packet.h
new file mode 100644
index 0000000000..0c6f204edb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PACKET_H_
+#define MODULES_AUDIO_CODING_NETEQ_PACKET_H_
+
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/neteq/tick_timer.h"
+#include "api/rtp_packet_info.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for holding RTP packets.
+struct Packet {
+ struct Priority {
+ Priority() : codec_level(0), red_level(0) {}
+ Priority(int codec_level, int red_level)
+ : codec_level(codec_level), red_level(red_level) {
+ CheckInvariant();
+ }
+
+ int codec_level;
+ int red_level;
+
+ // Priorities are sorted low-to-high, first on the level the codec
+ // prioritizes it, then on the level of RED packet it is; i.e. if it is a
+ // primary or secondary payload of a RED packet. For example: with Opus, an
+ // Fec packet (which the decoder prioritizes lower than a regular packet)
+ // will not be used if there is _any_ RED payload for the same
+ // timeframe. The highest priority packet will have levels {0, 0}. Negative
+ // priorities are not allowed.
+ bool operator<(const Priority& b) const {
+ CheckInvariant();
+ b.CheckInvariant();
+ if (codec_level == b.codec_level)
+ return red_level < b.red_level;
+
+ return codec_level < b.codec_level;
+ }
+ bool operator==(const Priority& b) const {
+ CheckInvariant();
+ b.CheckInvariant();
+ return codec_level == b.codec_level && red_level == b.red_level;
+ }
+ bool operator!=(const Priority& b) const { return !(*this == b); }
+ bool operator>(const Priority& b) const { return b < *this; }
+ bool operator<=(const Priority& b) const { return !(b > *this); }
+ bool operator>=(const Priority& b) const { return !(b < *this); }
+
+ private:
+ void CheckInvariant() const {
+ RTC_DCHECK_GE(codec_level, 0);
+ RTC_DCHECK_GE(red_level, 0);
+ }
+ };
+
+ uint32_t timestamp;
+ uint16_t sequence_number;
+ uint8_t payload_type;
+ // Datagram excluding RTP header and header extension.
+ rtc::Buffer payload;
+ Priority priority;
+ RtpPacketInfo packet_info;
+ std::unique_ptr<TickTimer::Stopwatch> waiting_time;
+ std::unique_ptr<AudioDecoder::EncodedAudioFrame> frame;
+
+ Packet();
+ Packet(Packet&& b);
+ ~Packet();
+
+ // Packets should generally be moved around but sometimes it's useful to make
+ // a copy, for example for testing purposes. NOTE: Will only work for
+ // un-parsed packets, i.e. `frame` must be unset. The payload will, however,
+ // be copied. `waiting_time` will also not be copied.
+ Packet Clone() const;
+
+ Packet& operator=(Packet&& b);
+
+ // Comparison operators. Establish a packet ordering based on (1) timestamp,
+ // (2) sequence number and (3) redundancy.
+ // Timestamp and sequence numbers are compared taking wrap-around into
+ // account. For two packets with the same sequence number and timestamp a
+ // primary payload is considered "smaller" than a secondary.
+ bool operator==(const Packet& rhs) const {
+ return (this->timestamp == rhs.timestamp &&
+ this->sequence_number == rhs.sequence_number &&
+ this->priority == rhs.priority);
+ }
+ bool operator!=(const Packet& rhs) const { return !operator==(rhs); }
+ bool operator<(const Packet& rhs) const {
+ if (this->timestamp == rhs.timestamp) {
+ if (this->sequence_number == rhs.sequence_number) {
+ // Timestamp and sequence numbers are identical - deem the left hand
+ // side to be "smaller" (i.e., "earlier") if it has higher priority.
+ return this->priority < rhs.priority;
+ }
+ return (static_cast<uint16_t>(rhs.sequence_number -
+ this->sequence_number) < 0xFFFF / 2);
+ }
+ return (static_cast<uint32_t>(rhs.timestamp - this->timestamp) <
+ 0xFFFFFFFF / 2);
+ }
+ bool operator>(const Packet& rhs) const { return rhs.operator<(*this); }
+ bool operator<=(const Packet& rhs) const { return !operator>(rhs); }
+ bool operator>=(const Packet& rhs) const { return !operator<(rhs); }
+
+ bool empty() const { return !frame && payload.empty(); }
+};
+
+// A list of packets.
+typedef std::list<Packet> PacketList;
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_PACKET_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc
new file mode 100644
index 0000000000..7196a6e393
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/packet_arrival_history.h"
+
+#include <algorithm>
+
+#include "api/neteq/tick_timer.h"
+#include "modules/include/module_common_types_public.h"
+
+namespace webrtc {
+
+PacketArrivalHistory::PacketArrivalHistory(int window_size_ms)
+ : window_size_ms_(window_size_ms) {}
+
+void PacketArrivalHistory::Insert(uint32_t rtp_timestamp,
+ int64_t arrival_time_ms) {
+ RTC_DCHECK(sample_rate_khz_ > 0);
+ int64_t unwrapped_rtp_timestamp = timestamp_unwrapper_.Unwrap(rtp_timestamp);
+ if (!newest_rtp_timestamp_ ||
+ unwrapped_rtp_timestamp > *newest_rtp_timestamp_) {
+ newest_rtp_timestamp_ = unwrapped_rtp_timestamp;
+ }
+ history_.emplace_back(unwrapped_rtp_timestamp / sample_rate_khz_,
+ arrival_time_ms);
+ MaybeUpdateCachedArrivals(history_.back());
+ while (history_.front().rtp_timestamp_ms + window_size_ms_ <
+ unwrapped_rtp_timestamp / sample_rate_khz_) {
+ if (&history_.front() == min_packet_arrival_) {
+ min_packet_arrival_ = nullptr;
+ }
+ if (&history_.front() == max_packet_arrival_) {
+ max_packet_arrival_ = nullptr;
+ }
+ history_.pop_front();
+ }
+ if (!min_packet_arrival_ || !max_packet_arrival_) {
+ for (const PacketArrival& packet : history_) {
+ MaybeUpdateCachedArrivals(packet);
+ }
+ }
+}
+
+void PacketArrivalHistory::MaybeUpdateCachedArrivals(
+ const PacketArrival& packet_arrival) {
+ if (!min_packet_arrival_ || packet_arrival <= *min_packet_arrival_) {
+ min_packet_arrival_ = &packet_arrival;
+ }
+ if (!max_packet_arrival_ || packet_arrival >= *max_packet_arrival_) {
+ max_packet_arrival_ = &packet_arrival;
+ }
+}
+
+void PacketArrivalHistory::Reset() {
+ history_.clear();
+ min_packet_arrival_ = nullptr;
+ max_packet_arrival_ = nullptr;
+ timestamp_unwrapper_ = TimestampUnwrapper();
+ newest_rtp_timestamp_ = absl::nullopt;
+}
+
+int PacketArrivalHistory::GetDelayMs(uint32_t rtp_timestamp,
+ int64_t time_ms) const {
+ RTC_DCHECK(sample_rate_khz_ > 0);
+ int64_t unwrapped_rtp_timestamp_ms =
+ timestamp_unwrapper_.UnwrapWithoutUpdate(rtp_timestamp) /
+ sample_rate_khz_;
+ PacketArrival packet(unwrapped_rtp_timestamp_ms, time_ms);
+ return GetPacketArrivalDelayMs(packet);
+}
+
+int PacketArrivalHistory::GetMaxDelayMs() const {
+ if (!max_packet_arrival_) {
+ return 0;
+ }
+ return GetPacketArrivalDelayMs(*max_packet_arrival_);
+}
+
+bool PacketArrivalHistory::IsNewestRtpTimestamp(uint32_t rtp_timestamp) const {
+ if (!newest_rtp_timestamp_) {
+ return false;
+ }
+ int64_t unwrapped_rtp_timestamp =
+ timestamp_unwrapper_.UnwrapWithoutUpdate(rtp_timestamp);
+ return unwrapped_rtp_timestamp == *newest_rtp_timestamp_;
+}
+
+int PacketArrivalHistory::GetPacketArrivalDelayMs(
+ const PacketArrival& packet_arrival) const {
+ if (!min_packet_arrival_) {
+ return 0;
+ }
+ return std::max(static_cast<int>(packet_arrival.arrival_time_ms -
+ min_packet_arrival_->arrival_time_ms -
+ (packet_arrival.rtp_timestamp_ms -
+ min_packet_arrival_->rtp_timestamp_ms)),
+ 0);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h
new file mode 100644
index 0000000000..79fc9176bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PACKET_ARRIVAL_HISTORY_H_
+#define MODULES_AUDIO_CODING_NETEQ_PACKET_ARRIVAL_HISTORY_H_
+
+#include <cstdint>
+#include <deque>
+
+#include "absl/types/optional.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/include/module_common_types_public.h"
+
+namespace webrtc {
+
+// Stores timing information about previously received packets.
+// The history has a fixed window size beyond which old data is automatically
+// pruned.
+class PacketArrivalHistory {
+ public:
+ explicit PacketArrivalHistory(int window_size_ms);
+
+ // Insert packet with `rtp_timestamp` and `arrival_time_ms` into the history.
+ void Insert(uint32_t rtp_timestamp, int64_t arrival_time_ms);
+
+ // The delay for `rtp_timestamp` at `time_ms` is calculated as
+ // `(time_ms - p.arrival_time_ms) - (rtp_timestamp - p.rtp_timestamp)`
+ // where `p` is chosen as the packet arrival in the history that maximizes the
+ // delay.
+ int GetDelayMs(uint32_t rtp_timestamp, int64_t time_ms) const;
+
+ // Get the maximum packet arrival delay observed in the history.
+ int GetMaxDelayMs() const;
+
+ bool IsNewestRtpTimestamp(uint32_t rtp_timestamp) const;
+
+ void Reset();
+
+ void set_sample_rate(int sample_rate) {
+ sample_rate_khz_ = sample_rate / 1000;
+ }
+
+ size_t size() const { return history_.size(); }
+
+ private:
+ struct PacketArrival {
+ PacketArrival(int64_t rtp_timestamp_ms, int64_t arrival_time_ms)
+ : rtp_timestamp_ms(rtp_timestamp_ms),
+ arrival_time_ms(arrival_time_ms) {}
+ int64_t rtp_timestamp_ms;
+ int64_t arrival_time_ms;
+ bool operator<=(const PacketArrival& other) const {
+ return arrival_time_ms - rtp_timestamp_ms <=
+ other.arrival_time_ms - other.rtp_timestamp_ms;
+ }
+ bool operator>=(const PacketArrival& other) const {
+ return arrival_time_ms - rtp_timestamp_ms >=
+ other.arrival_time_ms - other.rtp_timestamp_ms;
+ }
+ };
+ std::deque<PacketArrival> history_;
+ int GetPacketArrivalDelayMs(const PacketArrival& packet_arrival) const;
+ // Updates `min_packet_arrival_` and `max_packet_arrival_`.
+ void MaybeUpdateCachedArrivals(const PacketArrival& packet);
+ const PacketArrival* min_packet_arrival_ = nullptr;
+ const PacketArrival* max_packet_arrival_ = nullptr;
+ const int window_size_ms_;
+ TimestampUnwrapper timestamp_unwrapper_;
+ absl::optional<int64_t> newest_rtp_timestamp_;
+ int sample_rate_khz_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_PACKET_ARRIVAL_HISTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc
new file mode 100644
index 0000000000..286a7acb2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/packet_arrival_history.h"
+
+#include <cstdint>
+#include <limits>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kFs = 8000;
+constexpr int kFsKhz = kFs / 1000;
+constexpr int kFrameSizeMs = 20;
+constexpr int kWindowSizeMs = 1000;
+
+class PacketArrivalHistoryTest : public testing::Test {
+ public:
+ PacketArrivalHistoryTest() : history_(kWindowSizeMs) {
+ history_.set_sample_rate(kFs);
+ }
+ void IncrementTime(int delta_ms) { time_ms_ += delta_ms; }
+ int InsertPacketAndGetDelay(int timestamp_delta_ms) {
+ uint32_t timestamp = timestamp_ + timestamp_delta_ms * kFsKhz;
+ if (timestamp_delta_ms > 0) {
+ timestamp_ = timestamp;
+ }
+ history_.Insert(timestamp, time_ms_);
+ EXPECT_EQ(history_.IsNewestRtpTimestamp(timestamp),
+ timestamp_delta_ms >= 0);
+ return history_.GetDelayMs(timestamp, time_ms_);
+ }
+
+ protected:
+ int64_t time_ms_ = 0;
+ PacketArrivalHistory history_;
+ uint32_t timestamp_ = 0x12345678;
+};
+
+TEST_F(PacketArrivalHistoryTest, RelativeArrivalDelay) {
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+
+ IncrementTime(kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 0);
+
+ IncrementTime(2 * kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 20);
+
+ // Reordered packet.
+ EXPECT_EQ(InsertPacketAndGetDelay(-2 * kFrameSizeMs), 60);
+
+ IncrementTime(2 * kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 40);
+
+ // Move reference packet forward.
+ EXPECT_EQ(InsertPacketAndGetDelay(4 * kFrameSizeMs), 0);
+
+ IncrementTime(2 * kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 20);
+
+ // Earlier packet is now more delayed due to the new reference packet.
+ EXPECT_EQ(history_.GetMaxDelayMs(), 100);
+}
+
+TEST_F(PacketArrivalHistoryTest, ReorderedPackets) {
+ // Insert first packet.
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+
+ // Insert reordered packet.
+ EXPECT_EQ(InsertPacketAndGetDelay(-80), 80);
+
+ // Insert another reordered packet.
+ EXPECT_EQ(InsertPacketAndGetDelay(-kFrameSizeMs), 20);
+
+ // Insert the next packet in order and verify that the relative delay is
+ // estimated based on the first inserted packet.
+ IncrementTime(4 * kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 60);
+
+ EXPECT_EQ(history_.GetMaxDelayMs(), 80);
+}
+
+TEST_F(PacketArrivalHistoryTest, MaxHistorySize) {
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+
+ IncrementTime(2 * kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 20);
+ EXPECT_EQ(history_.GetMaxDelayMs(), 20);
+
+ // Insert next packet with a timestamp difference larger than maximum history
+ // size. This removes the previously inserted packet from the history.
+ IncrementTime(kWindowSizeMs + kFrameSizeMs);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs + kWindowSizeMs), 0);
+ EXPECT_EQ(history_.GetMaxDelayMs(), 0);
+}
+
+TEST_F(PacketArrivalHistoryTest, TimestampWraparound) {
+ timestamp_ = std::numeric_limits<uint32_t>::max();
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+
+ IncrementTime(2 * kFrameSizeMs);
+ // Insert timestamp that will wrap around.
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), kFrameSizeMs);
+
+ // Insert reordered packet before the wraparound.
+ EXPECT_EQ(InsertPacketAndGetDelay(-2 * kFrameSizeMs), 3 * kFrameSizeMs);
+
+ // Insert another in-order packet after the wraparound.
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 0);
+
+ EXPECT_EQ(history_.GetMaxDelayMs(), 3 * kFrameSizeMs);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.cc
new file mode 100644
index 0000000000..f6b5a476c9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.cc
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This is the implementation of the PacketBuffer class. It is mostly based on
+// an STL list. The list is kept sorted at all times so that the next packet to
+// decode is at the beginning of the list.
+
+#include "modules/audio_coding/neteq/packet_buffer.h"
+
+#include <algorithm>
+#include <list>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+// Predicate used when inserting packets in the buffer list.
+// Operator() returns true when `packet` goes before `new_packet`.
+class NewTimestampIsLarger {
+ public:
+ explicit NewTimestampIsLarger(const Packet& new_packet)
+ : new_packet_(new_packet) {}
+ bool operator()(const Packet& packet) { return (new_packet_ >= packet); }
+
+ private:
+ const Packet& new_packet_;
+};
+
+// Returns true if both payload types are known to the decoder database, and
+// have the same sample rate.
+bool EqualSampleRates(uint8_t pt1,
+ uint8_t pt2,
+ const DecoderDatabase& decoder_database) {
+ auto* di1 = decoder_database.GetDecoderInfo(pt1);
+ auto* di2 = decoder_database.GetDecoderInfo(pt2);
+ return di1 && di2 && di1->SampleRateHz() == di2->SampleRateHz();
+}
+
+void LogPacketDiscarded(int codec_level, StatisticsCalculator* stats) {
+ RTC_CHECK(stats);
+ if (codec_level > 0) {
+ stats->SecondaryPacketsDiscarded(1);
+ } else {
+ stats->PacketsDiscarded(1);
+ }
+}
+
+absl::optional<SmartFlushingConfig> GetSmartflushingConfig() {
+ absl::optional<SmartFlushingConfig> result;
+ std::string field_trial_string =
+ field_trial::FindFullName("WebRTC-Audio-NetEqSmartFlushing");
+ result = SmartFlushingConfig();
+ bool enabled = false;
+ auto parser = StructParametersParser::Create(
+ "enabled", &enabled, "target_level_threshold_ms",
+ &result->target_level_threshold_ms, "target_level_multiplier",
+ &result->target_level_multiplier);
+ parser->Parse(field_trial_string);
+ if (!enabled) {
+ return absl::nullopt;
+ }
+ RTC_LOG(LS_INFO) << "Using smart flushing, target_level_threshold_ms: "
+ << result->target_level_threshold_ms
+ << ", target_level_multiplier: "
+ << result->target_level_multiplier;
+ return result;
+}
+
+} // namespace
+
+PacketBuffer::PacketBuffer(size_t max_number_of_packets,
+ const TickTimer* tick_timer)
+ : smart_flushing_config_(GetSmartflushingConfig()),
+ max_number_of_packets_(max_number_of_packets),
+ tick_timer_(tick_timer) {}
+
+// Destructor. All packets in the buffer will be destroyed.
+PacketBuffer::~PacketBuffer() {
+ buffer_.clear();
+}
+
+// Flush the buffer. All packets in the buffer will be destroyed.
+void PacketBuffer::Flush(StatisticsCalculator* stats) {
+ for (auto& p : buffer_) {
+ LogPacketDiscarded(p.priority.codec_level, stats);
+ }
+ buffer_.clear();
+ stats->FlushedPacketBuffer();
+}
+
+void PacketBuffer::PartialFlush(int target_level_ms,
+ size_t sample_rate,
+ size_t last_decoded_length,
+ StatisticsCalculator* stats) {
+ // Make sure that at least half the packet buffer capacity will be available
+ // after the flush. This is done to avoid getting stuck if the target level is
+ // very high.
+ int target_level_samples =
+ std::min(target_level_ms * sample_rate / 1000,
+ max_number_of_packets_ * last_decoded_length / 2);
+ // We should avoid flushing to very low levels.
+ target_level_samples = std::max(
+ target_level_samples, smart_flushing_config_->target_level_threshold_ms);
+ while (GetSpanSamples(last_decoded_length, sample_rate, true) >
+ static_cast<size_t>(target_level_samples) ||
+ buffer_.size() > max_number_of_packets_ / 2) {
+ LogPacketDiscarded(PeekNextPacket()->priority.codec_level, stats);
+ buffer_.pop_front();
+ }
+}
+
+bool PacketBuffer::Empty() const {
+ return buffer_.empty();
+}
+
+int PacketBuffer::InsertPacket(Packet&& packet,
+ StatisticsCalculator* stats,
+ size_t last_decoded_length,
+ size_t sample_rate,
+ int target_level_ms,
+ const DecoderDatabase& decoder_database) {
+ if (packet.empty()) {
+ RTC_LOG(LS_WARNING) << "InsertPacket invalid packet";
+ return kInvalidPacket;
+ }
+
+ RTC_DCHECK_GE(packet.priority.codec_level, 0);
+ RTC_DCHECK_GE(packet.priority.red_level, 0);
+
+ int return_val = kOK;
+
+ packet.waiting_time = tick_timer_->GetNewStopwatch();
+
+ // Perform a smart flush if the buffer size exceeds a multiple of the target
+ // level.
+ const size_t span_threshold =
+ smart_flushing_config_
+ ? smart_flushing_config_->target_level_multiplier *
+ std::max(smart_flushing_config_->target_level_threshold_ms,
+ target_level_ms) *
+ sample_rate / 1000
+ : 0;
+ const bool smart_flush =
+ smart_flushing_config_.has_value() &&
+ GetSpanSamples(last_decoded_length, sample_rate, true) >= span_threshold;
+ if (buffer_.size() >= max_number_of_packets_ || smart_flush) {
+ size_t buffer_size_before_flush = buffer_.size();
+ if (smart_flushing_config_.has_value()) {
+ // Flush down to the target level.
+ PartialFlush(target_level_ms, sample_rate, last_decoded_length, stats);
+ return_val = kPartialFlush;
+ } else {
+ // Buffer is full.
+ Flush(stats);
+ return_val = kFlushed;
+ }
+ RTC_LOG(LS_WARNING) << "Packet buffer flushed, "
+ << (buffer_size_before_flush - buffer_.size())
+ << " packets discarded.";
+ }
+
+ // Get an iterator pointing to the place in the buffer where the new packet
+ // should be inserted. The list is searched from the back, since the most
+ // likely case is that the new packet should be near the end of the list.
+ PacketList::reverse_iterator rit = std::find_if(
+ buffer_.rbegin(), buffer_.rend(), NewTimestampIsLarger(packet));
+
+ // The new packet is to be inserted to the right of `rit`. If it has the same
+ // timestamp as `rit`, which has a higher priority, do not insert the new
+ // packet to list.
+ if (rit != buffer_.rend() && packet.timestamp == rit->timestamp) {
+ LogPacketDiscarded(packet.priority.codec_level, stats);
+ return return_val;
+ }
+
+ // The new packet is to be inserted to the left of `it`. If it has the same
+ // timestamp as `it`, which has a lower priority, replace `it` with the new
+ // packet.
+ PacketList::iterator it = rit.base();
+ if (it != buffer_.end() && packet.timestamp == it->timestamp) {
+ LogPacketDiscarded(it->priority.codec_level, stats);
+ it = buffer_.erase(it);
+ }
+ buffer_.insert(it, std::move(packet)); // Insert the packet at that position.
+
+ return return_val;
+}
+
+int PacketBuffer::InsertPacketList(
+ PacketList* packet_list,
+ const DecoderDatabase& decoder_database,
+ absl::optional<uint8_t>* current_rtp_payload_type,
+ absl::optional<uint8_t>* current_cng_rtp_payload_type,
+ StatisticsCalculator* stats,
+ size_t last_decoded_length,
+ size_t sample_rate,
+ int target_level_ms) {
+ RTC_DCHECK(stats);
+ bool flushed = false;
+ for (auto& packet : *packet_list) {
+ if (decoder_database.IsComfortNoise(packet.payload_type)) {
+ if (*current_cng_rtp_payload_type &&
+ **current_cng_rtp_payload_type != packet.payload_type) {
+ // New CNG payload type implies new codec type.
+ *current_rtp_payload_type = absl::nullopt;
+ Flush(stats);
+ flushed = true;
+ }
+ *current_cng_rtp_payload_type = packet.payload_type;
+ } else if (!decoder_database.IsDtmf(packet.payload_type)) {
+ // This must be speech.
+ if ((*current_rtp_payload_type &&
+ **current_rtp_payload_type != packet.payload_type) ||
+ (*current_cng_rtp_payload_type &&
+ !EqualSampleRates(packet.payload_type,
+ **current_cng_rtp_payload_type,
+ decoder_database))) {
+ *current_cng_rtp_payload_type = absl::nullopt;
+ Flush(stats);
+ flushed = true;
+ }
+ *current_rtp_payload_type = packet.payload_type;
+ }
+ int return_val =
+ InsertPacket(std::move(packet), stats, last_decoded_length, sample_rate,
+ target_level_ms, decoder_database);
+ if (return_val == kFlushed) {
+ // The buffer flushed, but this is not an error. We can still continue.
+ flushed = true;
+ } else if (return_val != kOK) {
+ // An error occurred. Delete remaining packets in list and return.
+ packet_list->clear();
+ return return_val;
+ }
+ }
+ packet_list->clear();
+ return flushed ? kFlushed : kOK;
+}
+
+int PacketBuffer::NextTimestamp(uint32_t* next_timestamp) const {
+ if (Empty()) {
+ return kBufferEmpty;
+ }
+ if (!next_timestamp) {
+ return kInvalidPointer;
+ }
+ *next_timestamp = buffer_.front().timestamp;
+ return kOK;
+}
+
+int PacketBuffer::NextHigherTimestamp(uint32_t timestamp,
+ uint32_t* next_timestamp) const {
+ if (Empty()) {
+ return kBufferEmpty;
+ }
+ if (!next_timestamp) {
+ return kInvalidPointer;
+ }
+ PacketList::const_iterator it;
+ for (it = buffer_.begin(); it != buffer_.end(); ++it) {
+ if (it->timestamp >= timestamp) {
+ // Found a packet matching the search.
+ *next_timestamp = it->timestamp;
+ return kOK;
+ }
+ }
+ return kNotFound;
+}
+
+const Packet* PacketBuffer::PeekNextPacket() const {
+ return buffer_.empty() ? nullptr : &buffer_.front();
+}
+
+absl::optional<Packet> PacketBuffer::GetNextPacket() {
+ if (Empty()) {
+ // Buffer is empty.
+ return absl::nullopt;
+ }
+
+ absl::optional<Packet> packet(std::move(buffer_.front()));
+ // Assert that the packet sanity checks in InsertPacket method works.
+ RTC_DCHECK(!packet->empty());
+ buffer_.pop_front();
+
+ return packet;
+}
+
+int PacketBuffer::DiscardNextPacket(StatisticsCalculator* stats) {
+ if (Empty()) {
+ return kBufferEmpty;
+ }
+ // Assert that the packet sanity checks in InsertPacket method works.
+ const Packet& packet = buffer_.front();
+ RTC_DCHECK(!packet.empty());
+ LogPacketDiscarded(packet.priority.codec_level, stats);
+ buffer_.pop_front();
+ return kOK;
+}
+
+void PacketBuffer::DiscardOldPackets(uint32_t timestamp_limit,
+ uint32_t horizon_samples,
+ StatisticsCalculator* stats) {
+ buffer_.remove_if([timestamp_limit, horizon_samples, stats](const Packet& p) {
+ if (timestamp_limit == p.timestamp ||
+ !IsObsoleteTimestamp(p.timestamp, timestamp_limit, horizon_samples)) {
+ return false;
+ }
+ LogPacketDiscarded(p.priority.codec_level, stats);
+ return true;
+ });
+}
+
+void PacketBuffer::DiscardAllOldPackets(uint32_t timestamp_limit,
+ StatisticsCalculator* stats) {
+ DiscardOldPackets(timestamp_limit, 0, stats);
+}
+
+void PacketBuffer::DiscardPacketsWithPayloadType(uint8_t payload_type,
+ StatisticsCalculator* stats) {
+ buffer_.remove_if([payload_type, stats](const Packet& p) {
+ if (p.payload_type != payload_type) {
+ return false;
+ }
+ LogPacketDiscarded(p.priority.codec_level, stats);
+ return true;
+ });
+}
+
+size_t PacketBuffer::NumPacketsInBuffer() const {
+ return buffer_.size();
+}
+
+size_t PacketBuffer::NumSamplesInBuffer(size_t last_decoded_length) const {
+ size_t num_samples = 0;
+ size_t last_duration = last_decoded_length;
+ for (const Packet& packet : buffer_) {
+ if (packet.frame) {
+ // TODO(hlundin): Verify that it's fine to count all packets and remove
+ // this check.
+ if (packet.priority != Packet::Priority(0, 0)) {
+ continue;
+ }
+ size_t duration = packet.frame->Duration();
+ if (duration > 0) {
+ last_duration = duration; // Save the most up-to-date (valid) duration.
+ }
+ }
+ num_samples += last_duration;
+ }
+ return num_samples;
+}
+
+size_t PacketBuffer::GetSpanSamples(size_t last_decoded_length,
+ size_t sample_rate,
+ bool count_dtx_waiting_time) const {
+ if (buffer_.size() == 0) {
+ return 0;
+ }
+
+ size_t span = buffer_.back().timestamp - buffer_.front().timestamp;
+ if (buffer_.back().frame && buffer_.back().frame->Duration() > 0) {
+ size_t duration = buffer_.back().frame->Duration();
+ if (count_dtx_waiting_time && buffer_.back().frame->IsDtxPacket()) {
+ size_t waiting_time_samples = rtc::dchecked_cast<size_t>(
+ buffer_.back().waiting_time->ElapsedMs() * (sample_rate / 1000));
+ duration = std::max(duration, waiting_time_samples);
+ }
+ span += duration;
+ } else {
+ span += last_decoded_length;
+ }
+ return span;
+}
+
+bool PacketBuffer::ContainsDtxOrCngPacket(
+ const DecoderDatabase* decoder_database) const {
+ RTC_DCHECK(decoder_database);
+ for (const Packet& packet : buffer_) {
+ if ((packet.frame && packet.frame->IsDtxPacket()) ||
+ decoder_database->IsComfortNoise(packet.payload_type)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.h b/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.h
new file mode 100644
index 0000000000..c6fb47ffbf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PACKET_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_PACKET_BUFFER_H_
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/include/module_common_types_public.h" // IsNewerTimestamp
+
+namespace webrtc {
+
+class DecoderDatabase;
+class StatisticsCalculator;
+class TickTimer;
+struct SmartFlushingConfig {
+ // When calculating the flushing threshold, the maximum between the target
+ // level and this value is used.
+ int target_level_threshold_ms = 500;
+ // A smart flush is triggered when the packet buffer contains a multiple of
+ // the target level.
+ int target_level_multiplier = 3;
+};
+
+// This is the actual buffer holding the packets before decoding.
+class PacketBuffer {
+ public:
+ enum BufferReturnCodes {
+ kOK = 0,
+ kFlushed,
+ kPartialFlush,
+ kNotFound,
+ kBufferEmpty,
+ kInvalidPacket,
+ kInvalidPointer
+ };
+
+ // Constructor creates a buffer which can hold a maximum of
+ // `max_number_of_packets` packets.
+ PacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer);
+
+ // Deletes all packets in the buffer before destroying the buffer.
+ virtual ~PacketBuffer();
+
+ PacketBuffer(const PacketBuffer&) = delete;
+ PacketBuffer& operator=(const PacketBuffer&) = delete;
+
+ // Flushes the buffer and deletes all packets in it.
+ virtual void Flush(StatisticsCalculator* stats);
+
+ // Partial flush. Flush packets but leave some packets behind.
+ virtual void PartialFlush(int target_level_ms,
+ size_t sample_rate,
+ size_t last_decoded_length,
+ StatisticsCalculator* stats);
+
+ // Returns true for an empty buffer.
+ virtual bool Empty() const;
+
+ // Inserts `packet` into the buffer. The buffer will take over ownership of
+ // the packet object.
+ // Returns PacketBuffer::kOK on success, PacketBuffer::kFlushed if the buffer
+ // was flushed due to overfilling.
+ virtual int InsertPacket(Packet&& packet,
+ StatisticsCalculator* stats,
+ size_t last_decoded_length,
+ size_t sample_rate,
+ int target_level_ms,
+ const DecoderDatabase& decoder_database);
+
+ // Inserts a list of packets into the buffer. The buffer will take over
+ // ownership of the packet objects.
+ // Returns PacketBuffer::kOK if all packets were inserted successfully.
+ // If the buffer was flushed due to overfilling, only a subset of the list is
+ // inserted, and PacketBuffer::kFlushed is returned.
+ // The last three parameters are included for legacy compatibility.
+ // TODO(hlundin): Redesign to not use current_*_payload_type and
+ // decoder_database.
+ virtual int InsertPacketList(
+ PacketList* packet_list,
+ const DecoderDatabase& decoder_database,
+ absl::optional<uint8_t>* current_rtp_payload_type,
+ absl::optional<uint8_t>* current_cng_rtp_payload_type,
+ StatisticsCalculator* stats,
+ size_t last_decoded_length,
+ size_t sample_rate,
+ int target_level_ms);
+
+ // Gets the timestamp for the first packet in the buffer and writes it to the
+ // output variable `next_timestamp`.
+ // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
+ // PacketBuffer::kOK otherwise.
+ virtual int NextTimestamp(uint32_t* next_timestamp) const;
+
+ // Gets the timestamp for the first packet in the buffer with a timestamp no
+ // lower than the input limit `timestamp`. The result is written to the output
+ // variable `next_timestamp`.
+ // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
+ // PacketBuffer::kOK otherwise.
+ virtual int NextHigherTimestamp(uint32_t timestamp,
+ uint32_t* next_timestamp) const;
+
+ // Returns a (constant) pointer to the first packet in the buffer. Returns
+ // NULL if the buffer is empty.
+ virtual const Packet* PeekNextPacket() const;
+
+ // Extracts the first packet in the buffer and returns it.
+ // Returns an empty optional if the buffer is empty.
+ virtual absl::optional<Packet> GetNextPacket();
+
+ // Discards the first packet in the buffer. The packet is deleted.
+ // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
+ // PacketBuffer::kOK otherwise.
+ virtual int DiscardNextPacket(StatisticsCalculator* stats);
+
+ // Discards all packets that are (strictly) older than timestamp_limit,
+ // but newer than timestamp_limit - horizon_samples. Setting horizon_samples
+ // to zero implies that the horizon is set to half the timestamp range. That
+ // is, if a packet is more than 2^31 timestamps into the future compared with
+ // timestamp_limit (including wrap-around), it is considered old.
+ virtual void DiscardOldPackets(uint32_t timestamp_limit,
+ uint32_t horizon_samples,
+ StatisticsCalculator* stats);
+
+ // Discards all packets that are (strictly) older than timestamp_limit.
+ virtual void DiscardAllOldPackets(uint32_t timestamp_limit,
+ StatisticsCalculator* stats);
+
+ // Removes all packets with a specific payload type from the buffer.
+ virtual void DiscardPacketsWithPayloadType(uint8_t payload_type,
+ StatisticsCalculator* stats);
+
+ // Returns the number of packets in the buffer, including duplicates and
+ // redundant packets.
+ virtual size_t NumPacketsInBuffer() const;
+
+ // Returns the number of samples in the buffer, including samples carried in
+ // duplicate and redundant packets.
+ virtual size_t NumSamplesInBuffer(size_t last_decoded_length) const;
+
+ // Returns the total duration in samples that the packets in the buffer spans
+ // across.
+ virtual size_t GetSpanSamples(size_t last_decoded_length,
+ size_t sample_rate,
+ bool count_dtx_waiting_time) const;
+
+ // Returns true if the packet buffer contains any DTX or CNG packets.
+ virtual bool ContainsDtxOrCngPacket(
+ const DecoderDatabase* decoder_database) const;
+
+ // Static method returning true if `timestamp` is older than `timestamp_limit`
+ // but less than `horizon_samples` behind `timestamp_limit`. For instance,
+ // with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the
+ // range (90, 100) is considered obsolete, and will yield true.
+ // Setting `horizon_samples` to 0 is the same as setting it to 2^31, i.e.,
+ // half the 32-bit timestamp range.
+ static bool IsObsoleteTimestamp(uint32_t timestamp,
+ uint32_t timestamp_limit,
+ uint32_t horizon_samples) {
+ return IsNewerTimestamp(timestamp_limit, timestamp) &&
+ (horizon_samples == 0 ||
+ IsNewerTimestamp(timestamp, timestamp_limit - horizon_samples));
+ }
+
+ private:
+ absl::optional<SmartFlushingConfig> smart_flushing_config_;
+ size_t max_number_of_packets_;
+ PacketList buffer_;
+ const TickTimer* tick_timer_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
new file mode 100644
index 0000000000..1a054daca3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
@@ -0,0 +1,989 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for PacketBuffer class.
+
+#include "modules/audio_coding/neteq/packet_buffer.h"
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/mock/mock_statistics_calculator.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::MockFunction;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace {
+class MockEncodedAudioFrame : public webrtc::AudioDecoder::EncodedAudioFrame {
+ public:
+ MOCK_METHOD(size_t, Duration, (), (const, override));
+
+ MOCK_METHOD(bool, IsDtxPacket, (), (const, override));
+
+ MOCK_METHOD(absl::optional<DecodeResult>,
+ Decode,
+ (rtc::ArrayView<int16_t> decoded),
+ (const, override));
+};
+
+// Helper class to generate packets. Packets must be deleted by the user.
+class PacketGenerator {
+ public:
+ PacketGenerator(uint16_t seq_no, uint32_t ts, uint8_t pt, int frame_size);
+ virtual ~PacketGenerator() {}
+ void Reset(uint16_t seq_no, uint32_t ts, uint8_t pt, int frame_size);
+ webrtc::Packet NextPacket(
+ int payload_size_bytes,
+ std::unique_ptr<webrtc::AudioDecoder::EncodedAudioFrame> audio_frame);
+
+ uint16_t seq_no_;
+ uint32_t ts_;
+ uint8_t pt_;
+ int frame_size_;
+};
+
+PacketGenerator::PacketGenerator(uint16_t seq_no,
+ uint32_t ts,
+ uint8_t pt,
+ int frame_size) {
+ Reset(seq_no, ts, pt, frame_size);
+}
+
+void PacketGenerator::Reset(uint16_t seq_no,
+ uint32_t ts,
+ uint8_t pt,
+ int frame_size) {
+ seq_no_ = seq_no;
+ ts_ = ts;
+ pt_ = pt;
+ frame_size_ = frame_size;
+}
+
+webrtc::Packet PacketGenerator::NextPacket(
+ int payload_size_bytes,
+ std::unique_ptr<webrtc::AudioDecoder::EncodedAudioFrame> audio_frame) {
+ webrtc::Packet packet;
+ packet.sequence_number = seq_no_;
+ packet.timestamp = ts_;
+ packet.payload_type = pt_;
+ packet.payload.SetSize(payload_size_bytes);
+ ++seq_no_;
+ ts_ += frame_size_;
+ packet.frame = std::move(audio_frame);
+ return packet;
+}
+
+struct PacketsToInsert {
+ uint16_t sequence_number;
+ uint32_t timestamp;
+ uint8_t payload_type;
+ bool primary;
+ // Order of this packet to appear upon extraction, after inserting a series
+ // of packets. A negative number means that it should have been discarded
+ // before extraction.
+ int extract_order;
+};
+
+} // namespace
+
+namespace webrtc {
+
+// Start of test definitions.
+
+TEST(PacketBuffer, CreateAndDestroy) {
+ TickTimer tick_timer;
+ PacketBuffer* buffer = new PacketBuffer(10, &tick_timer); // 10 packets.
+ EXPECT_TRUE(buffer->Empty());
+ delete buffer;
+}
+
+TEST(PacketBuffer, InsertPacket) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(17u, 4711u, 0, 10);
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ const int payload_len = 100;
+ const Packet packet = gen.NextPacket(payload_len, nullptr);
+ EXPECT_EQ(0, buffer.InsertPacket(/*packet=*/packet.Clone(),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/10000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ uint32_t next_ts;
+ EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+ EXPECT_EQ(4711u, next_ts);
+ EXPECT_FALSE(buffer.Empty());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+ const Packet* next_packet = buffer.PeekNextPacket();
+ EXPECT_EQ(packet, *next_packet); // Compare contents.
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+
+ // Do not explicitly flush buffer or delete packet to test that it is deleted
+ // with the buffer. (Tested with Valgrind or similar tool.)
+}
+
+// Test to flush buffer.
+TEST(PacketBuffer, FlushBuffer) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(0, 0, 0, 10);
+ const int payload_len = 10;
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ // Insert 10 small packets; should be ok.
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/gen.NextPacket(payload_len, nullptr),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ }
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+ EXPECT_FALSE(buffer.Empty());
+
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1)).Times(10);
+ buffer.Flush(&mock_stats);
+ // Buffer should delete the payloads itself.
+ EXPECT_EQ(0u, buffer.NumPacketsInBuffer());
+ EXPECT_TRUE(buffer.Empty());
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// Test to fill the buffer over the limits, and verify that it flushes.
+TEST(PacketBuffer, OverfillBuffer) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(0, 0, 0, 10);
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ // Insert 10 small packets; should be ok.
+ const int payload_len = 10;
+ int i;
+ for (i = 0; i < 10; ++i) {
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/gen.NextPacket(payload_len, nullptr),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ }
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+ uint32_t next_ts;
+ EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+ EXPECT_EQ(0u, next_ts); // Expect first inserted packet to be first in line.
+
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1)).Times(10);
+ const Packet packet = gen.NextPacket(payload_len, nullptr);
+ // Insert 11th packet; should flush the buffer and insert it after flushing.
+ EXPECT_EQ(PacketBuffer::kFlushed,
+ buffer.InsertPacket(/*packet=*/packet.Clone(),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+ // Expect last inserted packet to be first in line.
+ EXPECT_EQ(packet.timestamp, next_ts);
+
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// Test a partial buffer flush.
+TEST(PacketBuffer, PartialFlush) {
+ // Use a field trial to configure smart flushing.
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-Audio-NetEqSmartFlushing/enabled:true,"
+ "target_level_threshold_ms:0,target_level_multiplier:2/");
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(0, 0, 0, 10);
+ const int payload_len = 10;
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ // Insert 10 small packets; should be ok.
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/gen.NextPacket(payload_len, nullptr),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/100,
+ /*decoder_database=*/decoder_database));
+ }
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+ EXPECT_FALSE(buffer.Empty());
+
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1)).Times(7);
+ buffer.PartialFlush(/*target_level_ms=*/30,
+ /*sample_rate=*/1000,
+ /*last_decoded_length=*/payload_len,
+ /*stats=*/&mock_stats);
+ // There should still be some packets left in the buffer.
+ EXPECT_EQ(3u, buffer.NumPacketsInBuffer());
+ EXPECT_FALSE(buffer.Empty());
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// Test to fill the buffer over the limits, and verify that the smart flush
+// functionality works as expected.
+TEST(PacketBuffer, SmartFlushOverfillBuffer) {
+ // Use a field trial to configure smart flushing.
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-Audio-NetEqSmartFlushing/enabled:true,"
+ "target_level_threshold_ms:0,target_level_multiplier:2/");
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(0, 0, 0, 10);
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ // Insert 10 small packets; should be ok.
+ const int payload_len = 10;
+ int i;
+ for (i = 0; i < 10; ++i) {
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/gen.NextPacket(payload_len, nullptr),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/100,
+ /*decoder_database=*/decoder_database));
+ }
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+ uint32_t next_ts;
+ EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+ EXPECT_EQ(0u, next_ts); // Expect first inserted packet to be first in line.
+
+ const Packet packet = gen.NextPacket(payload_len, nullptr);
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1)).Times(6);
+ // Insert 11th packet; should cause a partial flush and insert the packet
+ // after flushing.
+ EXPECT_EQ(PacketBuffer::kPartialFlush,
+ buffer.InsertPacket(/*packet=*/packet.Clone(),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/40,
+ /*decoder_database=*/decoder_database));
+ EXPECT_EQ(5u, buffer.NumPacketsInBuffer());
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// Test inserting a list of packets.
+TEST(PacketBuffer, InsertPacketList) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(0, 0, 0, 10);
+ PacketList list;
+ const int payload_len = 10;
+
+ // Insert 10 small packets.
+ for (int i = 0; i < 10; ++i) {
+ list.push_back(gen.NextPacket(payload_len, nullptr));
+ }
+
+ MockDecoderDatabase decoder_database;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+ .WillRepeatedly(Return(&info));
+
+ StrictMock<MockStatisticsCalculator> mock_stats;
+
+ absl::optional<uint8_t> current_pt;
+ absl::optional<uint8_t> current_cng_pt;
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacketList(/*packet_list=*/&list,
+ /*decoder_database=*/decoder_database,
+ /*current_rtp_payload_type=*/&current_pt,
+ /*current_cng_rtp_payload_type=*/&current_cng_pt,
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/30));
+ EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(0, current_pt); // Current payload type changed to 0.
+ EXPECT_EQ(absl::nullopt, current_cng_pt); // CNG payload type not changed.
+
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// Test inserting a list of packets. Last packet is of a different payload type.
+// Expecting the buffer to flush.
+// TODO(hlundin): Remove this test when legacy operation is no longer needed.
+TEST(PacketBuffer, InsertPacketListChangePayloadType) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ PacketGenerator gen(0, 0, 0, 10);
+ PacketList list;
+ const int payload_len = 10;
+
+ // Insert 10 small packets.
+ for (int i = 0; i < 10; ++i) {
+ list.push_back(gen.NextPacket(payload_len, nullptr));
+ }
+ // Insert 11th packet of another payload type (not CNG).
+ {
+ Packet packet = gen.NextPacket(payload_len, nullptr);
+ packet.payload_type = 1;
+ list.push_back(std::move(packet));
+ }
+
+ MockDecoderDatabase decoder_database;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ const DecoderDatabase::DecoderInfo info0(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+ .WillRepeatedly(Return(&info0));
+ const DecoderDatabase::DecoderInfo info1(SdpAudioFormat("pcma", 8000, 1),
+ absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(1))
+ .WillRepeatedly(Return(&info1));
+
+ StrictMock<MockStatisticsCalculator> mock_stats;
+
+ absl::optional<uint8_t> current_pt;
+ absl::optional<uint8_t> current_cng_pt;
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1)).Times(10);
+ EXPECT_EQ(
+ PacketBuffer::kFlushed,
+ buffer.InsertPacketList(/*packet_list=*/&list,
+ /*decoder_database=*/decoder_database,
+ /*current_rtp_payload_type=*/&current_pt,
+ /*current_cng_rtp_payload_type=*/&current_cng_pt,
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/30));
+ EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer()); // Only the last packet.
+ EXPECT_EQ(1, current_pt); // Current payload type changed to 1.
+ EXPECT_EQ(absl::nullopt, current_cng_pt); // CNG payload type not changed.
+
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+TEST(PacketBuffer, ExtractOrderRedundancy) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(100, &tick_timer); // 100 packets.
+ const int kPackets = 18;
+ const int kFrameSize = 10;
+ const int kPayloadLength = 10;
+
+ PacketsToInsert packet_facts[kPackets] = {
+ {0xFFFD, 0xFFFFFFD7, 0, true, 0}, {0xFFFE, 0xFFFFFFE1, 0, true, 1},
+ {0xFFFE, 0xFFFFFFD7, 1, false, -1}, {0xFFFF, 0xFFFFFFEB, 0, true, 2},
+ {0xFFFF, 0xFFFFFFE1, 1, false, -1}, {0x0000, 0xFFFFFFF5, 0, true, 3},
+ {0x0000, 0xFFFFFFEB, 1, false, -1}, {0x0001, 0xFFFFFFFF, 0, true, 4},
+ {0x0001, 0xFFFFFFF5, 1, false, -1}, {0x0002, 0x0000000A, 0, true, 5},
+ {0x0002, 0xFFFFFFFF, 1, false, -1}, {0x0003, 0x0000000A, 1, false, -1},
+ {0x0004, 0x0000001E, 0, true, 7}, {0x0004, 0x00000014, 1, false, 6},
+ {0x0005, 0x0000001E, 0, true, -1}, {0x0005, 0x00000014, 1, false, -1},
+ {0x0006, 0x00000028, 0, true, 8}, {0x0006, 0x0000001E, 1, false, -1},
+ };
+ MockDecoderDatabase decoder_database;
+
+ const size_t kExpectPacketsInBuffer = 9;
+
+ std::vector<Packet> expect_order(kExpectPacketsInBuffer);
+
+ PacketGenerator gen(0, 0, 0, kFrameSize);
+
+ StrictMock<MockStatisticsCalculator> mock_stats;
+
+ // Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
+ // check ensures that exactly one call to PacketsDiscarded happens in each
+ // DiscardNextPacket call.
+ InSequence s;
+ MockFunction<void(int check_point_id)> check;
+ for (int i = 0; i < kPackets; ++i) {
+ gen.Reset(packet_facts[i].sequence_number, packet_facts[i].timestamp,
+ packet_facts[i].payload_type, kFrameSize);
+ Packet packet = gen.NextPacket(kPayloadLength, nullptr);
+ packet.priority.codec_level = packet_facts[i].primary ? 0 : 1;
+ if (packet_facts[i].extract_order < 0) {
+ if (packet.priority.codec_level > 0) {
+ EXPECT_CALL(mock_stats, SecondaryPacketsDiscarded(1));
+ } else {
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1));
+ }
+ }
+ EXPECT_CALL(check, Call(i));
+ EXPECT_EQ(PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/packet.Clone(),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/kPayloadLength,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ if (packet_facts[i].extract_order >= 0) {
+ expect_order[packet_facts[i].extract_order] = std::move(packet);
+ }
+ check.Call(i);
+ }
+
+ EXPECT_EQ(kExpectPacketsInBuffer, buffer.NumPacketsInBuffer());
+
+ for (size_t i = 0; i < kExpectPacketsInBuffer; ++i) {
+ const absl::optional<Packet> packet = buffer.GetNextPacket();
+ EXPECT_EQ(packet, expect_order[i]); // Compare contents.
+ }
+ EXPECT_TRUE(buffer.Empty());
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+TEST(PacketBuffer, DiscardPackets) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(100, &tick_timer); // 100 packets.
+ const uint16_t start_seq_no = 17;
+ const uint32_t start_ts = 4711;
+ const uint32_t ts_increment = 10;
+ PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+ PacketList list;
+ const int payload_len = 10;
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ constexpr int kTotalPackets = 10;
+ // Insert 10 small packets.
+ for (int i = 0; i < kTotalPackets; ++i) {
+ buffer.InsertPacket(/*packet=*/gen.NextPacket(payload_len, nullptr),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database);
+ }
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+
+ uint32_t current_ts = start_ts;
+
+ // Discard them one by one and make sure that the right packets are at the
+ // front of the buffer.
+ constexpr int kDiscardPackets = 5;
+
+ // Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
+ // check ensures that exactly one call to PacketsDiscarded happens in each
+ // DiscardNextPacket call.
+ InSequence s;
+ MockFunction<void(int check_point_id)> check;
+ for (int i = 0; i < kDiscardPackets; ++i) {
+ uint32_t ts;
+ EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&ts));
+ EXPECT_EQ(current_ts, ts);
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1));
+ EXPECT_CALL(check, Call(i));
+ EXPECT_EQ(PacketBuffer::kOK, buffer.DiscardNextPacket(&mock_stats));
+ current_ts += ts_increment;
+ check.Call(i);
+ }
+
+ constexpr int kRemainingPackets = kTotalPackets - kDiscardPackets;
+ // This will discard all remaining packets but one. The oldest packet is older
+ // than the indicated horizon_samples, and will thus be left in the buffer.
+ constexpr size_t kSkipPackets = 1;
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1))
+ .Times(kRemainingPackets - kSkipPackets);
+ EXPECT_CALL(check, Call(17)); // Arbitrary id number.
+ buffer.DiscardOldPackets(start_ts + kTotalPackets * ts_increment,
+ kRemainingPackets * ts_increment, &mock_stats);
+ check.Call(17); // Same arbitrary id number.
+
+ EXPECT_EQ(kSkipPackets, buffer.NumPacketsInBuffer());
+ uint32_t ts;
+ EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&ts));
+ EXPECT_EQ(current_ts, ts);
+
+ // Discard all remaining packets.
+ EXPECT_CALL(mock_stats, PacketsDiscarded(kSkipPackets));
+ buffer.DiscardAllOldPackets(start_ts + kTotalPackets * ts_increment,
+ &mock_stats);
+
+ EXPECT_TRUE(buffer.Empty());
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+TEST(PacketBuffer, Reordering) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(100, &tick_timer); // 100 packets.
+ const uint16_t start_seq_no = 17;
+ const uint32_t start_ts = 4711;
+ const uint32_t ts_increment = 10;
+ PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+ const int payload_len = 10;
+
+ // Generate 10 small packets and insert them into a PacketList. Insert every
+ // odd packet to the front, and every even packet to the back, thus creating
+ // a (rather strange) reordering.
+ PacketList list;
+ for (int i = 0; i < 10; ++i) {
+ Packet packet = gen.NextPacket(payload_len, nullptr);
+ if (i % 2) {
+ list.push_front(std::move(packet));
+ } else {
+ list.push_back(std::move(packet));
+ }
+ }
+
+ MockDecoderDatabase decoder_database;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+ .WillRepeatedly(Return(&info));
+ absl::optional<uint8_t> current_pt;
+ absl::optional<uint8_t> current_cng_pt;
+
+ StrictMock<MockStatisticsCalculator> mock_stats;
+
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacketList(/*packet_list=*/&list,
+ /*decoder_database=*/decoder_database,
+ /*current_rtp_payload_type=*/&current_pt,
+ /*current_cng_rtp_payload_type=*/&current_cng_pt,
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/30));
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+
+ // Extract them and make sure that come out in the right order.
+ uint32_t current_ts = start_ts;
+ for (int i = 0; i < 10; ++i) {
+ const absl::optional<Packet> packet = buffer.GetNextPacket();
+ ASSERT_TRUE(packet);
+ EXPECT_EQ(current_ts, packet->timestamp);
+ current_ts += ts_increment;
+ }
+ EXPECT_TRUE(buffer.Empty());
+
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// The test first inserts a packet with narrow-band CNG, then a packet with
+// wide-band speech. The expected behavior of the packet buffer is to detect a
+// change in sample rate, even though no speech packet has been inserted before,
+// and flush out the CNG packet.
+TEST(PacketBuffer, CngFirstThenSpeechWithNewSampleRate) {
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
+ const uint8_t kCngPt = 13;
+ const int kPayloadLen = 10;
+ const uint8_t kSpeechPt = 100;
+
+ MockDecoderDatabase decoder_database;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ const DecoderDatabase::DecoderInfo info_cng(SdpAudioFormat("cn", 8000, 1),
+ absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(kCngPt))
+ .WillRepeatedly(Return(&info_cng));
+ const DecoderDatabase::DecoderInfo info_speech(
+ SdpAudioFormat("l16", 16000, 1), absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(kSpeechPt))
+ .WillRepeatedly(Return(&info_speech));
+
+ // Insert first packet, which is narrow-band CNG.
+ PacketGenerator gen(0, 0, kCngPt, 10);
+ PacketList list;
+ list.push_back(gen.NextPacket(kPayloadLen, nullptr));
+ absl::optional<uint8_t> current_pt;
+ absl::optional<uint8_t> current_cng_pt;
+
+ StrictMock<MockStatisticsCalculator> mock_stats;
+
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer.InsertPacketList(/*packet_list=*/&list,
+ /*decoder_database=*/decoder_database,
+ /*current_rtp_payload_type=*/&current_pt,
+ /*current_cng_rtp_payload_type=*/&current_cng_pt,
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/kPayloadLen,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/30));
+ EXPECT_TRUE(list.empty());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+ ASSERT_TRUE(buffer.PeekNextPacket());
+ EXPECT_EQ(kCngPt, buffer.PeekNextPacket()->payload_type);
+ EXPECT_EQ(current_pt, absl::nullopt); // Current payload type not set.
+ EXPECT_EQ(kCngPt, current_cng_pt); // CNG payload type set.
+
+ // Insert second packet, which is wide-band speech.
+ {
+ Packet packet = gen.NextPacket(kPayloadLen, nullptr);
+ packet.payload_type = kSpeechPt;
+ list.push_back(std::move(packet));
+ }
+ // Expect the buffer to flush out the CNG packet, since it does not match the
+ // new speech sample rate.
+ EXPECT_CALL(mock_stats, PacketsDiscarded(1));
+ EXPECT_EQ(
+ PacketBuffer::kFlushed,
+ buffer.InsertPacketList(/*packet_list=*/&list,
+ /*decoder_database=*/decoder_database,
+ /*current_rtp_payload_type=*/&current_pt,
+ /*current_cng_rtp_payload_type=*/&current_cng_pt,
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/kPayloadLen,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/30));
+ EXPECT_TRUE(list.empty());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+ ASSERT_TRUE(buffer.PeekNextPacket());
+ EXPECT_EQ(kSpeechPt, buffer.PeekNextPacket()->payload_type);
+
+ EXPECT_EQ(kSpeechPt, current_pt); // Current payload type set.
+ EXPECT_EQ(absl::nullopt, current_cng_pt); // CNG payload type reset.
+
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+TEST(PacketBuffer, Failures) {
+ const uint16_t start_seq_no = 17;
+ const uint32_t start_ts = 4711;
+ const uint32_t ts_increment = 10;
+ int payload_len = 100;
+ PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+ TickTimer tick_timer;
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ PacketBuffer* buffer = new PacketBuffer(100, &tick_timer); // 100 packets.
+ {
+ Packet packet = gen.NextPacket(payload_len, nullptr);
+ packet.payload.Clear();
+ EXPECT_EQ(PacketBuffer::kInvalidPacket,
+ buffer->InsertPacket(/*packet=*/std::move(packet),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ }
+ // Buffer should still be empty. Test all empty-checks.
+ uint32_t temp_ts;
+ EXPECT_EQ(PacketBuffer::kBufferEmpty, buffer->NextTimestamp(&temp_ts));
+ EXPECT_EQ(PacketBuffer::kBufferEmpty,
+ buffer->NextHigherTimestamp(0, &temp_ts));
+ EXPECT_EQ(NULL, buffer->PeekNextPacket());
+ EXPECT_FALSE(buffer->GetNextPacket());
+
+ // Discarding packets will not invoke mock_stats.PacketDiscarded() because the
+ // packet buffer is empty.
+ EXPECT_EQ(PacketBuffer::kBufferEmpty, buffer->DiscardNextPacket(&mock_stats));
+ buffer->DiscardAllOldPackets(0, &mock_stats);
+
+ // Insert one packet to make the buffer non-empty.
+ EXPECT_EQ(
+ PacketBuffer::kOK,
+ buffer->InsertPacket(/*packet=*/gen.NextPacket(payload_len, nullptr),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+ EXPECT_EQ(PacketBuffer::kInvalidPointer, buffer->NextTimestamp(NULL));
+ EXPECT_EQ(PacketBuffer::kInvalidPointer,
+ buffer->NextHigherTimestamp(0, NULL));
+ delete buffer;
+
+ // Insert packet list of three packets, where the second packet has an invalid
+ // payload. Expect first packet to be inserted, and the remaining two to be
+ // discarded.
+ buffer = new PacketBuffer(100, &tick_timer); // 100 packets.
+ PacketList list;
+ list.push_back(gen.NextPacket(payload_len, nullptr)); // Valid packet.
+ {
+ Packet packet = gen.NextPacket(payload_len, nullptr);
+ packet.payload.Clear(); // Invalid.
+ list.push_back(std::move(packet));
+ }
+ list.push_back(gen.NextPacket(payload_len, nullptr)); // Valid packet.
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, factory.get());
+ EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+ .WillRepeatedly(Return(&info));
+ absl::optional<uint8_t> current_pt;
+ absl::optional<uint8_t> current_cng_pt;
+ EXPECT_EQ(
+ PacketBuffer::kInvalidPacket,
+ buffer->InsertPacketList(/*packet_list=*/&list,
+ /*decoder_database=*/decoder_database,
+ /*current_rtp_payload_type=*/&current_pt,
+ /*current_cng_rtp_payload_type=*/&current_cng_pt,
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/payload_len,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/30));
+ EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
+ EXPECT_EQ(1u, buffer->NumPacketsInBuffer());
+ delete buffer;
+ EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
+}
+
+// Test packet comparison function.
+// The function should return true if the first packet "goes before" the second.
+TEST(PacketBuffer, ComparePackets) {
+ PacketGenerator gen(0, 0, 0, 10);
+ Packet a(gen.NextPacket(10, nullptr)); // SN = 0, TS = 0.
+ Packet b(gen.NextPacket(10, nullptr)); // SN = 1, TS = 10.
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(a >= b);
+
+ // Testing wrap-around case; 'a' is earlier but has a larger timestamp value.
+ a.timestamp = 0xFFFFFFFF - 10;
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(a >= b);
+
+ // Test equal packets.
+ EXPECT_TRUE(a == a);
+ EXPECT_FALSE(a != a);
+ EXPECT_FALSE(a < a);
+ EXPECT_FALSE(a > a);
+ EXPECT_TRUE(a <= a);
+ EXPECT_TRUE(a >= a);
+
+ // Test equal timestamps but different sequence numbers (0 and 1).
+ a.timestamp = b.timestamp;
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(a >= b);
+
+ // Test equal timestamps but different sequence numbers (32767 and 1).
+ a.sequence_number = 0xFFFF;
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(a >= b);
+
+ // Test equal timestamps and sequence numbers, but differing priorities.
+ a.sequence_number = b.sequence_number;
+ a.priority = {1, 0};
+ b.priority = {0, 0};
+ // a after b
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ EXPECT_FALSE(a < b);
+ EXPECT_TRUE(a > b);
+ EXPECT_FALSE(a <= b);
+ EXPECT_TRUE(a >= b);
+
+ Packet c(gen.NextPacket(0, nullptr)); // SN = 2, TS = 20.
+ Packet d(gen.NextPacket(0, nullptr)); // SN = 3, TS = 20.
+ c.timestamp = b.timestamp;
+ d.timestamp = b.timestamp;
+ c.sequence_number = b.sequence_number;
+ d.sequence_number = b.sequence_number;
+ c.priority = {1, 1};
+ d.priority = {0, 1};
+ // c after d
+ EXPECT_FALSE(c == d);
+ EXPECT_TRUE(c != d);
+ EXPECT_FALSE(c < d);
+ EXPECT_TRUE(c > d);
+ EXPECT_FALSE(c <= d);
+ EXPECT_TRUE(c >= d);
+
+ // c after a
+ EXPECT_FALSE(c == a);
+ EXPECT_TRUE(c != a);
+ EXPECT_FALSE(c < a);
+ EXPECT_TRUE(c > a);
+ EXPECT_FALSE(c <= a);
+ EXPECT_TRUE(c >= a);
+
+ // c after b
+ EXPECT_FALSE(c == b);
+ EXPECT_TRUE(c != b);
+ EXPECT_FALSE(c < b);
+ EXPECT_TRUE(c > b);
+ EXPECT_FALSE(c <= b);
+ EXPECT_TRUE(c >= b);
+
+ // a after d
+ EXPECT_FALSE(a == d);
+ EXPECT_TRUE(a != d);
+ EXPECT_FALSE(a < d);
+ EXPECT_TRUE(a > d);
+ EXPECT_FALSE(a <= d);
+ EXPECT_TRUE(a >= d);
+
+ // d after b
+ EXPECT_FALSE(d == b);
+ EXPECT_TRUE(d != b);
+ EXPECT_FALSE(d < b);
+ EXPECT_TRUE(d > b);
+ EXPECT_FALSE(d <= b);
+ EXPECT_TRUE(d >= b);
+}
+
+TEST(PacketBuffer, GetSpanSamples) {
+ constexpr size_t kFrameSizeSamples = 10;
+ constexpr int kPayloadSizeBytes = 1; // Does not matter to this test;
+ constexpr uint32_t kStartTimeStamp = 0xFFFFFFFE; // Close to wrap around.
+ constexpr int kSampleRateHz = 48000;
+ constexpr bool KCountDtxWaitingTime = false;
+ TickTimer tick_timer;
+ PacketBuffer buffer(3, &tick_timer);
+ PacketGenerator gen(0, kStartTimeStamp, 0, kFrameSizeSamples);
+ StrictMock<MockStatisticsCalculator> mock_stats;
+ MockDecoderDatabase decoder_database;
+
+ Packet packet_1 = gen.NextPacket(kPayloadSizeBytes, nullptr);
+
+ std::unique_ptr<MockEncodedAudioFrame> mock_audio_frame =
+ std::make_unique<MockEncodedAudioFrame>();
+ EXPECT_CALL(*mock_audio_frame, Duration())
+ .WillRepeatedly(Return(kFrameSizeSamples));
+ Packet packet_2 =
+ gen.NextPacket(kPayloadSizeBytes, std::move(mock_audio_frame));
+
+ RTC_DCHECK_GT(packet_1.timestamp,
+ packet_2.timestamp); // Tmestamp wrapped around.
+
+ EXPECT_EQ(PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/std::move(packet_1),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/kFrameSizeSamples,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+
+ constexpr size_t kLastDecodedSizeSamples = 2;
+ // packet_1 has no access to duration, and relies last decoded duration as
+ // input.
+ EXPECT_EQ(kLastDecodedSizeSamples,
+ buffer.GetSpanSamples(kLastDecodedSizeSamples, kSampleRateHz,
+ KCountDtxWaitingTime));
+
+ EXPECT_EQ(PacketBuffer::kOK,
+ buffer.InsertPacket(/*packet=*/std::move(packet_2),
+ /*stats=*/&mock_stats,
+ /*last_decoded_length=*/kFrameSizeSamples,
+ /*sample_rate=*/1000,
+ /*target_level_ms=*/60,
+ /*decoder_database=*/decoder_database));
+
+ EXPECT_EQ(kFrameSizeSamples * 2,
+ buffer.GetSpanSamples(0, kSampleRateHz, KCountDtxWaitingTime));
+
+ // packet_2 has access to duration, and ignores last decoded duration as
+ // input.
+ EXPECT_EQ(kFrameSizeSamples * 2,
+ buffer.GetSpanSamples(kLastDecodedSizeSamples, kSampleRateHz,
+ KCountDtxWaitingTime));
+}
+
+namespace {
+void TestIsObsoleteTimestamp(uint32_t limit_timestamp) {
+ // Check with zero horizon, which implies that the horizon is at 2^31, i.e.,
+ // half the timestamp range.
+ static const uint32_t kZeroHorizon = 0;
+ static const uint32_t k2Pow31Minus1 = 0x7FFFFFFF;
+ // Timestamp on the limit is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+ limit_timestamp, limit_timestamp, kZeroHorizon));
+ // 1 sample behind is old.
+ EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp - 1,
+ limit_timestamp, kZeroHorizon));
+ // 2^31 - 1 samples behind is old.
+ EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp - k2Pow31Minus1,
+ limit_timestamp, kZeroHorizon));
+ // 1 sample ahead is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+ limit_timestamp + 1, limit_timestamp, kZeroHorizon));
+ // If |t1-t2|=2^31 and t1>t2, t2 is older than t1 but not the opposite.
+ uint32_t other_timestamp = limit_timestamp + (1 << 31);
+ uint32_t lowest_timestamp = std::min(limit_timestamp, other_timestamp);
+ uint32_t highest_timestamp = std::max(limit_timestamp, other_timestamp);
+ EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(
+ lowest_timestamp, highest_timestamp, kZeroHorizon));
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+ highest_timestamp, lowest_timestamp, kZeroHorizon));
+
+ // Fixed horizon at 10 samples.
+ static const uint32_t kHorizon = 10;
+ // Timestamp on the limit is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp,
+ limit_timestamp, kHorizon));
+ // 1 sample behind is old.
+ EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp - 1,
+ limit_timestamp, kHorizon));
+ // 9 samples behind is old.
+ EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp - 9,
+ limit_timestamp, kHorizon));
+ // 10 samples behind is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp - 10,
+ limit_timestamp, kHorizon));
+ // 2^31 - 1 samples behind is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+ limit_timestamp - k2Pow31Minus1, limit_timestamp, kHorizon));
+ // 1 sample ahead is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp + 1,
+ limit_timestamp, kHorizon));
+ // 2^31 samples ahead is not old.
+ EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(limit_timestamp + (1 << 31),
+ limit_timestamp, kHorizon));
+}
+} // namespace
+
+// Test the IsObsoleteTimestamp method with different limit timestamps.
+TEST(PacketBuffer, IsObsoleteTimestamp) {
+ TestIsObsoleteTimestamp(0);
+ TestIsObsoleteTimestamp(1);
+ TestIsObsoleteTimestamp(0xFFFFFFFF); // -1 in uint32_t.
+ TestIsObsoleteTimestamp(0x80000000); // 2^31.
+ TestIsObsoleteTimestamp(0x80000001); // 2^31 + 1.
+ TestIsObsoleteTimestamp(0x7FFFFFFF); // 2^31 - 1.
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc
new file mode 100644
index 0000000000..9999d6764b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+
+namespace webrtc {
+
+PostDecodeVad::~PostDecodeVad() {
+ if (vad_instance_)
+ WebRtcVad_Free(vad_instance_);
+}
+
+void PostDecodeVad::Enable() {
+ if (!vad_instance_) {
+ // Create the instance.
+ vad_instance_ = WebRtcVad_Create();
+ if (vad_instance_ == nullptr) {
+ // Failed to create instance.
+ Disable();
+ return;
+ }
+ }
+ Init();
+ enabled_ = true;
+}
+
+void PostDecodeVad::Disable() {
+ enabled_ = false;
+ running_ = false;
+}
+
+void PostDecodeVad::Init() {
+ running_ = false;
+ if (vad_instance_) {
+ WebRtcVad_Init(vad_instance_);
+ WebRtcVad_set_mode(vad_instance_, kVadMode);
+ running_ = true;
+ }
+}
+
+void PostDecodeVad::Update(int16_t* signal,
+ size_t length,
+ AudioDecoder::SpeechType speech_type,
+ bool sid_frame,
+ int fs_hz) {
+ if (!vad_instance_ || !enabled_) {
+ return;
+ }
+
+ if (speech_type == AudioDecoder::kComfortNoise || sid_frame ||
+ fs_hz > 16000) {
+ // TODO(hlundin): Remove restriction on fs_hz.
+ running_ = false;
+ active_speech_ = true;
+ sid_interval_counter_ = 0;
+ } else if (!running_) {
+ ++sid_interval_counter_;
+ }
+
+ if (sid_interval_counter_ >= kVadAutoEnable) {
+ Init();
+ }
+
+ if (length > 0 && running_) {
+ size_t vad_sample_index = 0;
+ active_speech_ = false;
+ // Loop through frame sizes 30, 20, and 10 ms.
+ for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
+ vad_frame_size_ms -= 10) {
+ size_t vad_frame_size_samples =
+ static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
+ while (length - vad_sample_index >= vad_frame_size_samples) {
+ int vad_return =
+ WebRtcVad_Process(vad_instance_, fs_hz, &signal[vad_sample_index],
+ vad_frame_size_samples);
+ active_speech_ |= (vad_return == 1);
+ vad_sample_index += vad_frame_size_samples;
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h
new file mode 100644
index 0000000000..3bd91b9edb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
+#define MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "common_audio/vad/include/webrtc_vad.h"
+
+namespace webrtc {
+
+class PostDecodeVad {
+ public:
+ PostDecodeVad()
+ : enabled_(false),
+ running_(false),
+ active_speech_(true),
+ sid_interval_counter_(0),
+ vad_instance_(NULL) {}
+
+ virtual ~PostDecodeVad();
+
+ PostDecodeVad(const PostDecodeVad&) = delete;
+ PostDecodeVad& operator=(const PostDecodeVad&) = delete;
+
+ // Enables post-decode VAD.
+ void Enable();
+
+ // Disables post-decode VAD.
+ void Disable();
+
+ // Initializes post-decode VAD.
+ void Init();
+
+ // Updates post-decode VAD with the audio data in `signal` having `length`
+ // samples. The data is of type `speech_type`, at the sample rate `fs_hz`.
+ void Update(int16_t* signal,
+ size_t length,
+ AudioDecoder::SpeechType speech_type,
+ bool sid_frame,
+ int fs_hz);
+
+ // Accessors.
+ bool enabled() const { return enabled_; }
+ bool running() const { return running_; }
+ bool active_speech() const { return active_speech_; }
+
+ private:
+ static const int kVadMode = 0; // Sets aggressiveness to "Normal".
+ // Number of Update() calls without CNG/SID before re-enabling VAD.
+ static const int kVadAutoEnable = 3000;
+
+ bool enabled_;
+ bool running_;
+ bool active_speech_;
+ int sid_interval_counter_;
+ ::VadInst* vad_instance_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc
new file mode 100644
index 0000000000..da3e4e864e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for PostDecodeVad class.
+
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(PostDecodeVad, CreateAndDestroy) {
+ PostDecodeVad vad;
+}
+
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.cc b/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.cc
new file mode 100644
index 0000000000..232170b177
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+
+#include <algorithm>
+
+#include "api/array_view.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/time_stretch.h"
+
+namespace webrtc {
+
+PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
+ const int16_t* input,
+ size_t input_length,
+ size_t old_data_length,
+ AudioMultiVector* output,
+ size_t* length_change_samples) {
+ old_data_length_per_channel_ = old_data_length;
+ // Input length must be (almost) 30 ms.
+ // Also, the new part must be at least `overlap_samples_` elements.
+ static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
+ if (num_channels_ == 0 ||
+ input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ ||
+ old_data_length >= input_length / num_channels_ - overlap_samples_) {
+ // Length of input data too short to do preemptive expand. Simply move all
+ // data from input to output.
+ output->PushBackInterleaved(
+ rtc::ArrayView<const int16_t>(input, input_length));
+ return kError;
+ }
+ const bool kFastMode = false; // Fast mode is not available for PE Expand.
+ return TimeStretch::Process(input, input_length, kFastMode, output,
+ length_change_samples);
+}
+
+void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
+ int16_t* best_correlation,
+ size_t* peak_index) const {
+ // When the signal does not contain any active speech, the correlation does
+ // not matter. Simply set it to zero.
+ *best_correlation = 0;
+
+ // For low energy expansion, the new data can be less than 15 ms,
+ // but we must ensure that best_correlation is not larger than the length of
+ // the new data.
+ // but we must ensure that best_correlation is not larger than the new data.
+ *peak_index = std::min(*peak_index, len - old_data_length_per_channel_);
+}
+
+PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
+ const int16_t* input,
+ size_t input_length,
+ size_t peak_index,
+ int16_t best_correlation,
+ bool active_speech,
+ bool /*fast_mode*/,
+ AudioMultiVector* output) const {
+ // Pre-calculate common multiplication with `fs_mult_`.
+ // 120 corresponds to 15 ms.
+ size_t fs_mult_120 = static_cast<size_t>(fs_mult_ * 120);
+ // Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
+ // or passive speech.
+ if (((best_correlation > kCorrelationThreshold) &&
+ (old_data_length_per_channel_ <= fs_mult_120)) ||
+ !active_speech) {
+ // Do accelerate operation by overlap add.
+
+ // Set length of the first part, not to be modified.
+ size_t unmodified_length =
+ std::max(old_data_length_per_channel_, fs_mult_120);
+ // Copy first part, including cross-fade region.
+ output->PushBackInterleaved(rtc::ArrayView<const int16_t>(
+ input, (unmodified_length + peak_index) * num_channels_));
+ // Copy the last `peak_index` samples up to 15 ms to `temp_vector`.
+ AudioMultiVector temp_vector(num_channels_);
+ temp_vector.PushBackInterleaved(rtc::ArrayView<const int16_t>(
+ &input[(unmodified_length - peak_index) * num_channels_],
+ peak_index * num_channels_));
+ // Cross-fade `temp_vector` onto the end of `output`.
+ output->CrossFade(temp_vector, peak_index);
+ // Copy the last unmodified part, 15 ms + pitch period until the end.
+ output->PushBackInterleaved(rtc::ArrayView<const int16_t>(
+ &input[unmodified_length * num_channels_],
+ input_length - unmodified_length * num_channels_));
+
+ if (active_speech) {
+ return kSuccess;
+ } else {
+ return kSuccessLowEnergy;
+ }
+ } else {
+ // Accelerate not allowed. Simply move all data from decoded to outData.
+ output->PushBackInterleaved(
+ rtc::ArrayView<const int16_t>(input, input_length));
+ return kNoStretch;
+ }
+}
+
+PreemptiveExpand* PreemptiveExpandFactory::Create(
+ int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise,
+ size_t overlap_samples) const {
+ return new PreemptiveExpand(sample_rate_hz, num_channels, background_noise,
+ overlap_samples);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.h b/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.h
new file mode 100644
index 0000000000..6338b993fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PREEMPTIVE_EXPAND_H_
+#define MODULES_AUDIO_CODING_NETEQ_PREEMPTIVE_EXPAND_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/audio_coding/neteq/time_stretch.h"
+
+namespace webrtc {
+
+class AudioMultiVector;
+class BackgroundNoise;
+
+// This class implements the PreemptiveExpand operation. Most of the work is
+// done in the base class TimeStretch, which is shared with the Accelerate
+// operation. In the PreemptiveExpand class, the operations that are specific to
+// PreemptiveExpand are implemented.
+class PreemptiveExpand : public TimeStretch {
+ public:
+ PreemptiveExpand(int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise,
+ size_t overlap_samples)
+ : TimeStretch(sample_rate_hz, num_channels, background_noise),
+ old_data_length_per_channel_(0),
+ overlap_samples_(overlap_samples) {}
+
+ PreemptiveExpand(const PreemptiveExpand&) = delete;
+ PreemptiveExpand& operator=(const PreemptiveExpand&) = delete;
+
+ // This method performs the actual PreemptiveExpand operation. The samples are
+ // read from `input`, of length `input_length` elements, and are written to
+ // `output`. The number of samples added through time-stretching is
+ // is provided in the output `length_change_samples`. The method returns
+ // the outcome of the operation as an enumerator value.
+ ReturnCodes Process(const int16_t* pw16_decoded,
+ size_t len,
+ size_t old_data_len,
+ AudioMultiVector* output,
+ size_t* length_change_samples);
+
+ protected:
+ // Sets the parameters `best_correlation` and `peak_index` to suitable
+ // values when the signal contains no active speech.
+ void SetParametersForPassiveSpeech(size_t input_length,
+ int16_t* best_correlation,
+ size_t* peak_index) const override;
+
+ // Checks the criteria for performing the time-stretching operation and,
+ // if possible, performs the time-stretching.
+ ReturnCodes CheckCriteriaAndStretch(const int16_t* input,
+ size_t input_length,
+ size_t peak_index,
+ int16_t best_correlation,
+ bool active_speech,
+ bool /*fast_mode*/,
+ AudioMultiVector* output) const override;
+
+ private:
+ size_t old_data_length_per_channel_;
+ size_t overlap_samples_;
+};
+
+struct PreemptiveExpandFactory {
+ PreemptiveExpandFactory() {}
+ virtual ~PreemptiveExpandFactory() {}
+
+ virtual PreemptiveExpand* Create(int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise,
+ size_t overlap_samples) const;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_PREEMPTIVE_EXPAND_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.cc b/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.cc
new file mode 100644
index 0000000000..ada175831c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/random_vector.h"
+
+namespace webrtc {
+
+const int16_t RandomVector::kRandomTable[RandomVector::kRandomTableSize] = {
+ 2680, 5532, 441, 5520, 16170, -5146, -1024, -8733, 3115,
+ 9598, -10380, -4959, -1280, -21716, 7133, -1522, 13458, -3902,
+ 2789, -675, 3441, 5016, -13599, -4003, -2739, 3922, -7209,
+ 13352, -11617, -7241, 12905, -2314, 5426, 10121, -9702, 11207,
+ -13542, 1373, 816, -5934, -12504, 4798, 1811, 4112, -613,
+ 201, -10367, -2960, -2419, 3442, 4299, -6116, -6092, 1552,
+ -1650, -480, -1237, 18720, -11858, -8303, -8212, 865, -2890,
+ -16968, 12052, -5845, -5912, 9777, -5665, -6294, 5426, -4737,
+ -6335, 1652, 761, 3832, 641, -8552, -9084, -5753, 8146,
+ 12156, -4915, 15086, -1231, -1869, 11749, -9319, -6403, 11407,
+ 6232, -1683, 24340, -11166, 4017, -10448, 3153, -2936, 6212,
+ 2891, -866, -404, -4807, -2324, -1917, -2388, -6470, -3895,
+ -10300, 5323, -5403, 2205, 4640, 7022, -21186, -6244, -882,
+ -10031, -3395, -12885, 7155, -5339, 5079, -2645, -9515, 6622,
+ 14651, 15852, 359, 122, 8246, -3502, -6696, -3679, -13535,
+ -1409, -704, -7403, -4007, 1798, 279, -420, -12796, -14219,
+ 1141, 3359, 11434, 7049, -6684, -7473, 14283, -4115, -9123,
+ -8969, 4152, 4117, 13792, 5742, 16168, 8661, -1609, -6095,
+ 1881, 14380, -5588, 6758, -6425, -22969, -7269, 7031, 1119,
+ -1611, -5850, -11281, 3559, -8952, -10146, -4667, -16251, -1538,
+ 2062, -1012, -13073, 227, -3142, -5265, 20, 5770, -7559,
+ 4740, -4819, 992, -8208, -7130, -4652, 6725, 7369, -1036,
+ 13144, -1588, -5304, -2344, -449, -5705, -8894, 5205, -17904,
+ -11188, -1022, 4852, 10101, -5255, -4200, -752, 7941, -1543,
+ 5959, 14719, 13346, 17045, -15605, -1678, -1600, -9230, 68,
+ 23348, 1172, 7750, 11212, -18227, 9956, 4161, 883, 3947,
+ 4341, 1014, -4889, -2603, 1246, -5630, -3596, -870, -1298,
+ 2784, -3317, -6612, -20541, 4166, 4181, -8625, 3562, 12890,
+ 4761, 3205, -12259, -8579};
+
+void RandomVector::Reset() {
+ seed_ = 777;
+ seed_increment_ = 1;
+}
+
+void RandomVector::Generate(size_t length, int16_t* output) {
+ for (size_t i = 0; i < length; i++) {
+ seed_ += seed_increment_;
+ size_t position = seed_ & (kRandomTableSize - 1);
+ output[i] = kRandomTable[position];
+ }
+}
+
+void RandomVector::IncreaseSeedIncrement(int16_t increase_by) {
+ seed_increment_ += increase_by;
+ seed_increment_ &= kRandomTableSize - 1;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.h b/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.h
new file mode 100644
index 0000000000..4a782f1116
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_RANDOM_VECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_RANDOM_VECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+// This class generates pseudo-random samples.
+class RandomVector {
+ public:
+ static const size_t kRandomTableSize = 256;
+ static const int16_t kRandomTable[kRandomTableSize];
+
+ RandomVector() : seed_(777), seed_increment_(1) {}
+
+ RandomVector(const RandomVector&) = delete;
+ RandomVector& operator=(const RandomVector&) = delete;
+
+ void Reset();
+
+ void Generate(size_t length, int16_t* output);
+
+ void IncreaseSeedIncrement(int16_t increase_by);
+
+ // Accessors and mutators.
+ int16_t seed_increment() { return seed_increment_; }
+ void set_seed_increment(int16_t value) { seed_increment_ = value; }
+
+ private:
+ uint32_t seed_;
+ int16_t seed_increment_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_RANDOM_VECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/random_vector_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/random_vector_unittest.cc
new file mode 100644
index 0000000000..44479a6dd6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/random_vector_unittest.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for RandomVector class.
+
+#include "modules/audio_coding/neteq/random_vector.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RandomVector, CreateAndDestroy) {
+ RandomVector random_vector;
+}
+
+// TODO(hlundin): Write more tests.
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.cc b/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.cc
new file mode 100644
index 0000000000..7438f25301
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <list>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+// The method loops through a list of packets {A, B, C, ...}. Each packet is
+// split into its corresponding RED payloads, {A1, A2, ...}, which is
+// temporarily held in the list `new_packets`.
+// When the first packet in `packet_list` has been processed, the original
+// packet is replaced by the new ones in `new_packets`, so that `packet_list`
+// becomes: {A1, A2, ..., B, C, ...}. The method then continues with B, and C,
+// until all the original packets have been replaced by their split payloads.
+bool RedPayloadSplitter::SplitRed(PacketList* packet_list) {
+ // Too many RED blocks indicates that something is wrong. Clamp it at some
+ // reasonable value.
+ const size_t kMaxRedBlocks = 32;
+ bool ret = true;
+ PacketList::iterator it = packet_list->begin();
+ while (it != packet_list->end()) {
+ const Packet& red_packet = *it;
+ RTC_DCHECK(!red_packet.payload.empty());
+ const uint8_t* payload_ptr = red_packet.payload.data();
+ size_t payload_length = red_packet.payload.size();
+
+ // Read RED headers (according to RFC 2198):
+ //
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // |F| block PT | timestamp offset | block length |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // Last RED header:
+ // 0 1 2 3 4 5 6 7
+ // +-+-+-+-+-+-+-+-+
+ // |0| Block PT |
+ // +-+-+-+-+-+-+-+-+
+
+ struct RedHeader {
+ uint8_t payload_type;
+ uint32_t timestamp;
+ size_t payload_length;
+ };
+
+ std::vector<RedHeader> new_headers;
+ bool last_block = false;
+ size_t sum_length = 0;
+ while (!last_block) {
+ if (payload_length == 0) {
+ RTC_LOG(LS_WARNING) << "SplitRed header too short";
+ return false;
+ }
+ RedHeader new_header;
+ // Check the F bit. If F == 0, this was the last block.
+ last_block = ((*payload_ptr & 0x80) == 0);
+ // Bits 1 through 7 are payload type.
+ new_header.payload_type = payload_ptr[0] & 0x7F;
+ if (last_block) {
+ // No more header data to read.
+ sum_length += kRedLastHeaderLength; // Account for RED header size.
+ new_header.timestamp = red_packet.timestamp;
+ new_header.payload_length = red_packet.payload.size() - sum_length;
+ payload_ptr += kRedLastHeaderLength; // Advance to first payload byte.
+ payload_length -= kRedLastHeaderLength;
+ } else {
+ if (payload_length < kRedHeaderLength) {
+ RTC_LOG(LS_WARNING) << "SplitRed header too short";
+ return false;
+ }
+ // Bits 8 through 21 are timestamp offset.
+ int timestamp_offset =
+ (payload_ptr[1] << 6) + ((payload_ptr[2] & 0xFC) >> 2);
+ new_header.timestamp = red_packet.timestamp - timestamp_offset;
+ // Bits 22 through 31 are payload length.
+ new_header.payload_length =
+ ((payload_ptr[2] & 0x03) << 8) + payload_ptr[3];
+
+ sum_length += new_header.payload_length;
+ sum_length += kRedHeaderLength; // Account for RED header size.
+
+ payload_ptr += kRedHeaderLength; // Advance to next RED header.
+ payload_length -= kRedHeaderLength;
+ }
+ // Store in new list of packets.
+ if (new_header.payload_length > 0) {
+ new_headers.push_back(new_header);
+ }
+ }
+
+ if (new_headers.size() <= kMaxRedBlocks) {
+ // Populate the new packets with payload data.
+ // `payload_ptr` now points at the first payload byte.
+ PacketList new_packets; // An empty list to store the split packets in.
+ for (size_t i = 0; i != new_headers.size(); ++i) {
+ const auto& new_header = new_headers[i];
+ size_t payload_length = new_header.payload_length;
+ if (payload_ptr + payload_length >
+ red_packet.payload.data() + red_packet.payload.size()) {
+ // The block lengths in the RED headers do not match the overall
+ // packet length. Something is corrupt. Discard this and the remaining
+ // payloads from this packet.
+ RTC_LOG(LS_WARNING) << "SplitRed length mismatch";
+ ret = false;
+ break;
+ }
+
+ Packet new_packet;
+ new_packet.timestamp = new_header.timestamp;
+ new_packet.payload_type = new_header.payload_type;
+ new_packet.sequence_number = red_packet.sequence_number;
+ new_packet.priority.red_level =
+ rtc::dchecked_cast<int>((new_headers.size() - 1) - i);
+ new_packet.payload.SetData(payload_ptr, payload_length);
+ new_packet.packet_info = RtpPacketInfo(
+ /*ssrc=*/red_packet.packet_info.ssrc(),
+ /*csrcs=*/std::vector<uint32_t>(),
+ /*rtp_timestamp=*/new_packet.timestamp,
+ red_packet.packet_info.audio_level(),
+ /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time=*/red_packet.packet_info.receive_time());
+ new_packets.push_front(std::move(new_packet));
+ payload_ptr += payload_length;
+ }
+ // Insert new packets into original list, before the element pointed to by
+ // iterator `it`.
+ packet_list->splice(it, std::move(new_packets));
+ } else {
+ RTC_LOG(LS_WARNING) << "SplitRed too many blocks: " << new_headers.size();
+ ret = false;
+ }
+ // Remove `it` from the packet list. This operation effectively moves the
+ // iterator `it` to the next packet in the list. Thus, we do not have to
+ // increment it manually.
+ it = packet_list->erase(it);
+ }
+ return ret;
+}
+
+void RedPayloadSplitter::CheckRedPayloads(
+ PacketList* packet_list,
+ const DecoderDatabase& decoder_database) {
+ int main_payload_type = -1;
+ for (auto it = packet_list->begin(); it != packet_list->end(); /* */) {
+ uint8_t this_payload_type = it->payload_type;
+ if (decoder_database.IsRed(this_payload_type)) {
+ it = packet_list->erase(it);
+ continue;
+ }
+ if (!decoder_database.IsDtmf(this_payload_type) &&
+ !decoder_database.IsComfortNoise(this_payload_type)) {
+ if (main_payload_type == -1) {
+ // This is the first packet in the list which is non-DTMF non-CNG.
+ main_payload_type = this_payload_type;
+ } else {
+ if (this_payload_type != main_payload_type) {
+ // We do not allow redundant payloads of a different type.
+ // Remove `it` from the packet list. This operation effectively
+ // moves the iterator `it` to the next packet in the list. Thus, we
+ // do not have to increment it manually.
+ it = packet_list->erase(it);
+ continue;
+ }
+ }
+ }
+ ++it;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.h b/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.h
new file mode 100644
index 0000000000..2f48e4b7d4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_
+
+#include "modules/audio_coding/neteq/packet.h"
+
+namespace webrtc {
+
+class DecoderDatabase;
+
+static const size_t kRedHeaderLength = 4; // 4 bytes RED header.
+static const size_t kRedLastHeaderLength =
+ 1; // reduced size for last RED header.
+// This class handles splitting of RED payloads into smaller parts.
+// Codec-specific packet splitting can be performed by
+// AudioDecoder::ParsePayload.
+class RedPayloadSplitter {
+ public:
+ RedPayloadSplitter() {}
+
+ virtual ~RedPayloadSplitter() {}
+
+ RedPayloadSplitter(const RedPayloadSplitter&) = delete;
+ RedPayloadSplitter& operator=(const RedPayloadSplitter&) = delete;
+
+ // Splits each packet in `packet_list` into its separate RED payloads. Each
+ // RED payload is packetized into a Packet. The original elements in
+ // `packet_list` are properly deleted, and replaced by the new packets.
+ // Note that all packets in `packet_list` must be RED payloads, i.e., have
+ // RED headers according to RFC 2198 at the very beginning of the payload.
+ // Returns kOK or an error.
+ virtual bool SplitRed(PacketList* packet_list);
+
+ // Checks all packets in `packet_list`. Packets that are DTMF events or
+ // comfort noise payloads are kept. Except that, only one single payload type
+ // is accepted. Any packet with another payload type is discarded.
+ virtual void CheckRedPayloads(PacketList* packet_list,
+ const DecoderDatabase& decoder_database);
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
new file mode 100644
index 0000000000..a0ba5414ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for RedPayloadSplitter class.
+
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+
+
+#include <memory>
+#include <utility> // pair
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+
+using ::testing::Return;
+using ::testing::ReturnNull;
+
+namespace webrtc {
+
+static const int kRedPayloadType = 100;
+static const size_t kPayloadLength = 10;
+static const uint16_t kSequenceNumber = 0;
+static const uint32_t kBaseTimestamp = 0x12345678;
+
+// A possible Opus packet that contains FEC is the following.
+// The frame is 20 ms in duration.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |0|0|0|0|1|0|0|0|x|1|x|x|x|x|x|x|x| |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+// | Compressed frame 1 (N-2 bytes)... :
+// : |
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+void CreateOpusFecPayload(uint8_t* payload,
+ size_t payload_length,
+ uint8_t payload_value) {
+ if (payload_length < 2) {
+ return;
+ }
+ payload[0] = 0x08;
+ payload[1] = 0x40;
+ memset(&payload[2], payload_value, payload_length - 2);
+}
+
+// RED headers (according to RFC 2198):
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |F| block PT | timestamp offset | block length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Last RED header:
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |0| Block PT |
+// +-+-+-+-+-+-+-+-+
+
+// Creates a RED packet, with `num_payloads` payloads, with payload types given
+// by the values in array `payload_types` (which must be of length
+// `num_payloads`). Each redundant payload is `timestamp_offset` samples
+// "behind" the the previous payload.
+Packet CreateRedPayload(size_t num_payloads,
+ uint8_t* payload_types,
+ int timestamp_offset,
+ bool embed_opus_fec = false) {
+ Packet packet;
+ packet.payload_type = kRedPayloadType;
+ packet.timestamp = kBaseTimestamp;
+ packet.sequence_number = kSequenceNumber;
+ packet.payload.SetSize((kPayloadLength + 1) +
+ (num_payloads - 1) *
+ (kPayloadLength + kRedHeaderLength));
+ uint8_t* payload_ptr = packet.payload.data();
+ for (size_t i = 0; i < num_payloads; ++i) {
+ // Write the RED headers.
+ if (i == num_payloads - 1) {
+ // Special case for last payload.
+ *payload_ptr = payload_types[i] & 0x7F; // F = 0;
+ ++payload_ptr;
+ break;
+ }
+ *payload_ptr = payload_types[i] & 0x7F;
+ // Not the last block; set F = 1.
+ *payload_ptr |= 0x80;
+ ++payload_ptr;
+ int this_offset =
+ rtc::checked_cast<int>((num_payloads - i - 1) * timestamp_offset);
+ *payload_ptr = this_offset >> 6;
+ ++payload_ptr;
+ RTC_DCHECK_LE(kPayloadLength, 1023); // Max length described by 10 bits.
+ *payload_ptr = ((this_offset & 0x3F) << 2) | (kPayloadLength >> 8);
+ ++payload_ptr;
+ *payload_ptr = kPayloadLength & 0xFF;
+ ++payload_ptr;
+ }
+ for (size_t i = 0; i < num_payloads; ++i) {
+ // Write `i` to all bytes in each payload.
+ if (embed_opus_fec) {
+ CreateOpusFecPayload(payload_ptr, kPayloadLength,
+ static_cast<uint8_t>(i));
+ } else {
+ memset(payload_ptr, static_cast<int>(i), kPayloadLength);
+ }
+ payload_ptr += kPayloadLength;
+ }
+ return packet;
+}
+
+// Create a packet with all payload bytes set to `payload_value`.
+Packet CreatePacket(uint8_t payload_type,
+ size_t payload_length,
+ uint8_t payload_value,
+ bool opus_fec = false) {
+ Packet packet;
+ packet.payload_type = payload_type;
+ packet.timestamp = kBaseTimestamp;
+ packet.sequence_number = kSequenceNumber;
+ packet.payload.SetSize(payload_length);
+ if (opus_fec) {
+ CreateOpusFecPayload(packet.payload.data(), packet.payload.size(),
+ payload_value);
+ } else {
+ memset(packet.payload.data(), payload_value, packet.payload.size());
+ }
+ return packet;
+}
+
+// Checks that `packet` has the attributes given in the remaining parameters.
+void VerifyPacket(const Packet& packet,
+ size_t payload_length,
+ uint8_t payload_type,
+ uint16_t sequence_number,
+ uint32_t timestamp,
+ uint8_t payload_value,
+ Packet::Priority priority) {
+ EXPECT_EQ(payload_length, packet.payload.size());
+ EXPECT_EQ(payload_type, packet.payload_type);
+ EXPECT_EQ(sequence_number, packet.sequence_number);
+ EXPECT_EQ(timestamp, packet.timestamp);
+ EXPECT_EQ(priority, packet.priority);
+ ASSERT_FALSE(packet.payload.empty());
+ for (size_t i = 0; i < packet.payload.size(); ++i) {
+ ASSERT_EQ(payload_value, packet.payload.data()[i]);
+ }
+}
+
+void VerifyPacket(const Packet& packet,
+ size_t payload_length,
+ uint8_t payload_type,
+ uint16_t sequence_number,
+ uint32_t timestamp,
+ uint8_t payload_value,
+ bool primary) {
+ return VerifyPacket(packet, payload_length, payload_type, sequence_number,
+ timestamp, payload_value,
+ Packet::Priority{0, primary ? 0 : 1});
+}
+
+// Start of test definitions.
+
+TEST(RedPayloadSplitter, CreateAndDestroy) {
+ RedPayloadSplitter* splitter = new RedPayloadSplitter;
+ delete splitter;
+}
+
+// Packet A is split into A1 and A2.
+TEST(RedPayloadSplitter, OnePacketTwoPayloads) {
+ uint8_t payload_types[] = {0, 0};
+ const int kTimestampOffset = 160;
+ PacketList packet_list;
+ packet_list.push_back(CreateRedPayload(2, payload_types, kTimestampOffset));
+ RedPayloadSplitter splitter;
+ EXPECT_TRUE(splitter.SplitRed(&packet_list));
+ ASSERT_EQ(2u, packet_list.size());
+ // Check first packet. The first in list should always be the primary payload.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[1],
+ kSequenceNumber, kBaseTimestamp, 1, true);
+ packet_list.pop_front();
+ // Check second packet.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+ kSequenceNumber, kBaseTimestamp - kTimestampOffset, 0, false);
+}
+
+// Packets A and B are not split at all. Only the RED header in each packet is
+// removed.
+TEST(RedPayloadSplitter, TwoPacketsOnePayload) {
+ uint8_t payload_types[] = {0};
+ const int kTimestampOffset = 160;
+ // Create first packet, with a single RED payload.
+ PacketList packet_list;
+ packet_list.push_back(CreateRedPayload(1, payload_types, kTimestampOffset));
+ // Create second packet, with a single RED payload.
+ {
+ Packet packet = CreateRedPayload(1, payload_types, kTimestampOffset);
+ // Manually change timestamp and sequence number of second packet.
+ packet.timestamp += kTimestampOffset;
+ packet.sequence_number++;
+ packet_list.push_back(std::move(packet));
+ }
+ RedPayloadSplitter splitter;
+ EXPECT_TRUE(splitter.SplitRed(&packet_list));
+ ASSERT_EQ(2u, packet_list.size());
+ // Check first packet.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+ kSequenceNumber, kBaseTimestamp, 0, true);
+ packet_list.pop_front();
+ // Check second packet.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+ kSequenceNumber + 1, kBaseTimestamp + kTimestampOffset, 0, true);
+}
+
+// Packets A and B are split into packets A1, A2, A3, B1, B2, B3, with
+// attributes as follows:
+//
+// A1* A2 A3 B1* B2 B3
+// Payload type 0 1 2 0 1 2
+// Timestamp b b-o b-2o b+o b b-o
+// Sequence number 0 0 0 1 1 1
+//
+// b = kBaseTimestamp, o = kTimestampOffset, * = primary.
+TEST(RedPayloadSplitter, TwoPacketsThreePayloads) {
+ uint8_t payload_types[] = {2, 1, 0}; // Primary is the last one.
+ const int kTimestampOffset = 160;
+ // Create first packet, with 3 RED payloads.
+ PacketList packet_list;
+ packet_list.push_back(CreateRedPayload(3, payload_types, kTimestampOffset));
+ // Create first packet, with 3 RED payloads.
+ {
+ Packet packet = CreateRedPayload(3, payload_types, kTimestampOffset);
+ // Manually change timestamp and sequence number of second packet.
+ packet.timestamp += kTimestampOffset;
+ packet.sequence_number++;
+ packet_list.push_back(std::move(packet));
+ }
+ RedPayloadSplitter splitter;
+ EXPECT_TRUE(splitter.SplitRed(&packet_list));
+ ASSERT_EQ(6u, packet_list.size());
+ // Check first packet, A1.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[2],
+ kSequenceNumber, kBaseTimestamp, 2, {0, 0});
+ packet_list.pop_front();
+ // Check second packet, A2.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[1],
+ kSequenceNumber, kBaseTimestamp - kTimestampOffset, 1, {0, 1});
+ packet_list.pop_front();
+ // Check third packet, A3.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+ kSequenceNumber, kBaseTimestamp - 2 * kTimestampOffset, 0,
+ {0, 2});
+ packet_list.pop_front();
+ // Check fourth packet, B1.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[2],
+ kSequenceNumber + 1, kBaseTimestamp + kTimestampOffset, 2,
+ {0, 0});
+ packet_list.pop_front();
+ // Check fifth packet, B2.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[1],
+ kSequenceNumber + 1, kBaseTimestamp, 1, {0, 1});
+ packet_list.pop_front();
+ // Check sixth packet, B3.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+ kSequenceNumber + 1, kBaseTimestamp - kTimestampOffset, 0,
+ {0, 2});
+}
+
+// Creates a list with 4 packets with these payload types:
+// 0 = CNGnb
+// 1 = PCMu
+// 2 = DTMF (AVT)
+// 3 = iLBC
+// We expect the method CheckRedPayloads to discard the iLBC packet, since it
+// is a non-CNG, non-DTMF payload of another type than the first speech payload
+// found in the list (which is PCMu).
+TEST(RedPayloadSplitter, CheckRedPayloads) {
+ PacketList packet_list;
+ for (uint8_t i = 0; i <= 3; ++i) {
+ // Create packet with payload type `i`, payload length 10 bytes, all 0.
+ packet_list.push_back(CreatePacket(i, 10, 0));
+ }
+
+ // Use a real DecoderDatabase object here instead of a mock, since it is
+ // easier to just register the payload types and let the actual implementation
+ // do its job.
+ DecoderDatabase decoder_database(
+ rtc::make_ref_counted<MockAudioDecoderFactory>(), absl::nullopt);
+ decoder_database.RegisterPayload(0, SdpAudioFormat("cn", 8000, 1));
+ decoder_database.RegisterPayload(1, SdpAudioFormat("pcmu", 8000, 1));
+ decoder_database.RegisterPayload(2,
+ SdpAudioFormat("telephone-event", 8000, 1));
+ decoder_database.RegisterPayload(3, SdpAudioFormat("ilbc", 8000, 1));
+
+ RedPayloadSplitter splitter;
+ splitter.CheckRedPayloads(&packet_list, decoder_database);
+
+ ASSERT_EQ(3u, packet_list.size()); // Should have dropped the last packet.
+ // Verify packets. The loop verifies that payload types 0, 1, and 2 are in the
+ // list.
+ for (int i = 0; i <= 2; ++i) {
+ VerifyPacket(packet_list.front(), 10, i, kSequenceNumber, kBaseTimestamp, 0,
+ true);
+ packet_list.pop_front();
+ }
+ EXPECT_TRUE(packet_list.empty());
+}
+
+// This test creates a RED packet where the payloads also have the payload type
+// for RED. That is, some kind of weird nested RED packet. This is not supported
+// and the splitter should discard all packets.
+TEST(RedPayloadSplitter, CheckRedPayloadsRecursiveRed) {
+ PacketList packet_list;
+ for (uint8_t i = 0; i <= 3; ++i) {
+ // Create packet with RED payload type, payload length 10 bytes, all 0.
+ packet_list.push_back(CreatePacket(kRedPayloadType, 10, 0));
+ }
+
+ // Use a real DecoderDatabase object here instead of a mock, since it is
+ // easier to just register the payload types and let the actual implementation
+ // do its job.
+ DecoderDatabase decoder_database(
+ rtc::make_ref_counted<MockAudioDecoderFactory>(), absl::nullopt);
+ decoder_database.RegisterPayload(kRedPayloadType,
+ SdpAudioFormat("red", 8000, 1));
+
+ RedPayloadSplitter splitter;
+ splitter.CheckRedPayloads(&packet_list, decoder_database);
+
+ EXPECT_TRUE(packet_list.empty()); // Should have dropped all packets.
+}
+
+// Packet A is split into A1, A2 and A3. But the length parameter is off, so
+// the last payloads should be discarded.
+TEST(RedPayloadSplitter, WrongPayloadLength) {
+ uint8_t payload_types[] = {0, 0, 0};
+ const int kTimestampOffset = 160;
+ PacketList packet_list;
+ {
+ Packet packet = CreateRedPayload(3, payload_types, kTimestampOffset);
+ // Manually tamper with the payload length of the packet.
+ // This is one byte too short for the second payload (out of three).
+ // We expect only the first payload to be returned.
+ packet.payload.SetSize(packet.payload.size() - (kPayloadLength + 1));
+ packet_list.push_back(std::move(packet));
+ }
+ RedPayloadSplitter splitter;
+ EXPECT_FALSE(splitter.SplitRed(&packet_list));
+ ASSERT_EQ(1u, packet_list.size());
+ // Check first packet.
+ VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+ kSequenceNumber, kBaseTimestamp - 2 * kTimestampOffset, 0,
+ {0, 2});
+ packet_list.pop_front();
+}
+
+// Test that we reject packets too short to contain a RED header.
+TEST(RedPayloadSplitter, RejectsIncompleteHeaders) {
+ RedPayloadSplitter splitter;
+
+ uint8_t payload_types[] = {0, 0};
+ const int kTimestampOffset = 160;
+
+ PacketList packet_list;
+
+ // Truncate the packet such that the first block can not be parsed.
+ packet_list.push_back(CreateRedPayload(2, payload_types, kTimestampOffset));
+ packet_list.front().payload.SetSize(4);
+ EXPECT_FALSE(splitter.SplitRed(&packet_list));
+ EXPECT_FALSE(packet_list.empty());
+
+ // Truncate the packet such that the first block can not be parsed.
+ packet_list.front().payload.SetSize(3);
+ EXPECT_FALSE(splitter.SplitRed(&packet_list));
+ EXPECT_FALSE(packet_list.empty());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.cc b/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.cc
new file mode 100644
index 0000000000..f6e073fc88
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/reorder_optimizer.h"
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDelayBuckets = 100;
+constexpr int kBucketSizeMs = 20;
+
+} // namespace
+
+ReorderOptimizer::ReorderOptimizer(int forget_factor,
+ int ms_per_loss_percent,
+ absl::optional<int> start_forget_weight)
+ : histogram_(kDelayBuckets, forget_factor, start_forget_weight),
+ ms_per_loss_percent_(ms_per_loss_percent) {}
+
+void ReorderOptimizer::Update(int relative_delay_ms,
+ bool reordered,
+ int base_delay_ms) {
+ const int index = reordered ? relative_delay_ms / kBucketSizeMs : 0;
+ if (index < histogram_.NumBuckets()) {
+ // Maximum delay to register is 2000 ms.
+ histogram_.Add(index);
+ }
+ int bucket_index = MinimizeCostFunction(base_delay_ms);
+ optimal_delay_ms_ = (1 + bucket_index) * kBucketSizeMs;
+}
+
+void ReorderOptimizer::Reset() {
+ histogram_.Reset();
+ optimal_delay_ms_.reset();
+}
+
+int ReorderOptimizer::MinimizeCostFunction(int base_delay_ms) const {
+ const std::vector<int>& buckets = histogram_.buckets();
+
+ // Values are calculated in Q30.
+ int64_t loss_probability = 1 << 30;
+ int64_t min_cost = std::numeric_limits<int64_t>::max();
+ int min_bucket = 0;
+ for (int i = 0; i < static_cast<int>(buckets.size()); ++i) {
+ loss_probability -= buckets[i];
+ int64_t delay_ms =
+ static_cast<int64_t>(std::max(0, i * kBucketSizeMs - base_delay_ms))
+ << 30;
+ int64_t cost = delay_ms + 100 * ms_per_loss_percent_ * loss_probability;
+
+ if (cost < min_cost) {
+ min_cost = cost;
+ min_bucket = i;
+ }
+ if (loss_probability == 0) {
+ break;
+ }
+ }
+
+ return min_bucket;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.h b/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.h
new file mode 100644
index 0000000000..06f6bc7e50
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_REORDER_OPTIMIZER_H_
+#define MODULES_AUDIO_CODING_NETEQ_REORDER_OPTIMIZER_H_
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/histogram.h"
+
+namespace webrtc {
+
+// Calculates an optimal delay to reduce the chance of missing reordered
+// packets. The delay/loss trade-off can be tune using the `ms_per_loss_percent`
+// parameter.
+class ReorderOptimizer {
+ public:
+ ReorderOptimizer(int forget_factor,
+ int ms_per_loss_percent,
+ absl::optional<int> start_forget_weight);
+
+ void Update(int relative_delay_ms, bool reordered, int base_delay_ms);
+
+ absl::optional<int> GetOptimalDelayMs() const { return optimal_delay_ms_; }
+
+ void Reset();
+
+ private:
+ int MinimizeCostFunction(int base_delay_ms) const;
+
+ Histogram histogram_;
+ const int ms_per_loss_percent_;
+ absl::optional<int> optimal_delay_ms_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_REORDER_OPTIMIZER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer_unittest.cc
new file mode 100644
index 0000000000..aaa1062560
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer_unittest.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/reorder_optimizer.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kForgetFactor = 32745; // 0.9993 in Q15.
+constexpr int kMsPerLossPercent = 20;
+constexpr int kStartForgetWeight = 1;
+
+} // namespace
+
+TEST(ReorderOptimizerTest, OnlyIncreaseDelayForReorderedPackets) {
+ ReorderOptimizer reorder_optimizer(kForgetFactor, kMsPerLossPercent,
+ kStartForgetWeight);
+ EXPECT_FALSE(reorder_optimizer.GetOptimalDelayMs());
+
+ // Delay should not increase for in-order packets.
+ reorder_optimizer.Update(60, /*reordered=*/false, 0);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 20);
+
+ reorder_optimizer.Update(100, /*reordered=*/false, 0);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 20);
+
+ reorder_optimizer.Update(80, /*reordered=*/true, 0);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 100);
+}
+
+TEST(ReorderOptimizerTest, AvoidIncreasingDelayWhenProbabilityIsLow) {
+ ReorderOptimizer reorder_optimizer(kForgetFactor, kMsPerLossPercent,
+ kStartForgetWeight);
+
+ reorder_optimizer.Update(40, /*reordered=*/true, 0);
+ reorder_optimizer.Update(40, /*reordered=*/true, 0);
+ reorder_optimizer.Update(40, /*reordered=*/true, 0);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 60);
+
+ // The cost of the delay is too high relative the probability.
+ reorder_optimizer.Update(600, /*reordered=*/true, 0);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 60);
+}
+
+TEST(ReorderOptimizerTest, BaseDelayIsSubtractedFromCost) {
+ constexpr int kBaseDelayMs = 200;
+ ReorderOptimizer reorder_optimizer(kForgetFactor, kMsPerLossPercent,
+ kStartForgetWeight);
+
+ reorder_optimizer.Update(40, /*reordered=*/true, kBaseDelayMs);
+ reorder_optimizer.Update(40, /*reordered=*/true, kBaseDelayMs);
+ reorder_optimizer.Update(40, /*reordered=*/true, kBaseDelayMs);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 60);
+
+ // The cost of the delay is too high relative the probability.
+ reorder_optimizer.Update(600, /*reordered=*/true, kBaseDelayMs);
+ EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 620);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.cc b/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.cc
new file mode 100644
index 0000000000..52d3fa90f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+
+#include <string.h> // memset
+
+#include <algorithm>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+size_t AddIntToSizeTWithLowerCap(int a, size_t b) {
+ const size_t ret = b + a;
+ // If a + b is negative, resulting in a negative wrap, cap it to zero instead.
+ static_assert(sizeof(size_t) >= sizeof(int),
+ "int must not be wider than size_t for this to work");
+ return (a < 0 && ret > b) ? 0 : ret;
+}
+
+constexpr int kInterruptionLenMs = 150;
+} // namespace
+
+// Allocating the static const so that it can be passed by reference to
+// RTC_DCHECK.
+const size_t StatisticsCalculator::kLenWaitingTimes;
+
+StatisticsCalculator::PeriodicUmaLogger::PeriodicUmaLogger(
+ absl::string_view uma_name,
+ int report_interval_ms,
+ int max_value)
+ : uma_name_(uma_name),
+ report_interval_ms_(report_interval_ms),
+ max_value_(max_value),
+ timer_(0) {}
+
+StatisticsCalculator::PeriodicUmaLogger::~PeriodicUmaLogger() = default;
+
+void StatisticsCalculator::PeriodicUmaLogger::AdvanceClock(int step_ms) {
+ timer_ += step_ms;
+ if (timer_ < report_interval_ms_) {
+ return;
+ }
+ LogToUma(Metric());
+ Reset();
+ timer_ -= report_interval_ms_;
+ RTC_DCHECK_GE(timer_, 0);
+}
+
+void StatisticsCalculator::PeriodicUmaLogger::LogToUma(int value) const {
+ RTC_HISTOGRAM_COUNTS_SPARSE(uma_name_, value, 1, max_value_, 50);
+}
+
+StatisticsCalculator::PeriodicUmaCount::PeriodicUmaCount(
+ absl::string_view uma_name,
+ int report_interval_ms,
+ int max_value)
+ : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {}
+
+StatisticsCalculator::PeriodicUmaCount::~PeriodicUmaCount() {
+ // Log the count for the current (incomplete) interval.
+ LogToUma(Metric());
+}
+
+void StatisticsCalculator::PeriodicUmaCount::RegisterSample() {
+ ++counter_;
+}
+
+int StatisticsCalculator::PeriodicUmaCount::Metric() const {
+ return counter_;
+}
+
+void StatisticsCalculator::PeriodicUmaCount::Reset() {
+ counter_ = 0;
+}
+
+StatisticsCalculator::PeriodicUmaAverage::PeriodicUmaAverage(
+ absl::string_view uma_name,
+ int report_interval_ms,
+ int max_value)
+ : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {}
+
+StatisticsCalculator::PeriodicUmaAverage::~PeriodicUmaAverage() {
+ // Log the average for the current (incomplete) interval.
+ LogToUma(Metric());
+}
+
+void StatisticsCalculator::PeriodicUmaAverage::RegisterSample(int value) {
+ sum_ += value;
+ ++counter_;
+}
+
+int StatisticsCalculator::PeriodicUmaAverage::Metric() const {
+ return counter_ == 0 ? 0 : static_cast<int>(sum_ / counter_);
+}
+
+void StatisticsCalculator::PeriodicUmaAverage::Reset() {
+ sum_ = 0.0;
+ counter_ = 0;
+}
+
+StatisticsCalculator::StatisticsCalculator()
+ : preemptive_samples_(0),
+ accelerate_samples_(0),
+ expanded_speech_samples_(0),
+ expanded_noise_samples_(0),
+ timestamps_since_last_report_(0),
+ secondary_decoded_samples_(0),
+ discarded_secondary_packets_(0),
+ delayed_packet_outage_counter_(
+ "WebRTC.Audio.DelayedPacketOutageEventsPerMinute",
+ 60000, // 60 seconds report interval.
+ 100),
+ excess_buffer_delay_("WebRTC.Audio.AverageExcessBufferDelayMs",
+ 60000, // 60 seconds report interval.
+ 1000),
+ buffer_full_counter_("WebRTC.Audio.JitterBufferFullPerMinute",
+ 60000, // 60 seconds report interval.
+ 100) {}
+
+StatisticsCalculator::~StatisticsCalculator() = default;
+
+void StatisticsCalculator::Reset() {
+ preemptive_samples_ = 0;
+ accelerate_samples_ = 0;
+ expanded_speech_samples_ = 0;
+ expanded_noise_samples_ = 0;
+ secondary_decoded_samples_ = 0;
+ discarded_secondary_packets_ = 0;
+ waiting_times_.clear();
+}
+
+void StatisticsCalculator::ResetMcu() {
+ timestamps_since_last_report_ = 0;
+}
+
+void StatisticsCalculator::ExpandedVoiceSamples(size_t num_samples,
+ bool is_new_concealment_event) {
+ expanded_speech_samples_ += num_samples;
+ ConcealedSamplesCorrection(rtc::dchecked_cast<int>(num_samples), true);
+ lifetime_stats_.concealment_events += is_new_concealment_event;
+}
+
+void StatisticsCalculator::ExpandedNoiseSamples(size_t num_samples,
+ bool is_new_concealment_event) {
+ expanded_noise_samples_ += num_samples;
+ ConcealedSamplesCorrection(rtc::dchecked_cast<int>(num_samples), false);
+ lifetime_stats_.concealment_events += is_new_concealment_event;
+}
+
+void StatisticsCalculator::ExpandedVoiceSamplesCorrection(int num_samples) {
+ expanded_speech_samples_ =
+ AddIntToSizeTWithLowerCap(num_samples, expanded_speech_samples_);
+ ConcealedSamplesCorrection(num_samples, true);
+}
+
+void StatisticsCalculator::ExpandedNoiseSamplesCorrection(int num_samples) {
+ expanded_noise_samples_ =
+ AddIntToSizeTWithLowerCap(num_samples, expanded_noise_samples_);
+ ConcealedSamplesCorrection(num_samples, false);
+}
+
+void StatisticsCalculator::DecodedOutputPlayed() {
+ decoded_output_played_ = true;
+}
+
+void StatisticsCalculator::EndExpandEvent(int fs_hz) {
+ RTC_DCHECK_GE(lifetime_stats_.concealed_samples,
+ concealed_samples_at_event_end_);
+ const int event_duration_ms =
+ 1000 *
+ (lifetime_stats_.concealed_samples - concealed_samples_at_event_end_) /
+ fs_hz;
+ if (event_duration_ms >= kInterruptionLenMs && decoded_output_played_) {
+ lifetime_stats_.interruption_count++;
+ lifetime_stats_.total_interruption_duration_ms += event_duration_ms;
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AudioInterruptionMs", event_duration_ms,
+ /*min=*/150, /*max=*/5000, /*bucket_count=*/50);
+ }
+ concealed_samples_at_event_end_ = lifetime_stats_.concealed_samples;
+}
+
+void StatisticsCalculator::ConcealedSamplesCorrection(int num_samples,
+ bool is_voice) {
+ if (num_samples < 0) {
+ // Store negative correction to subtract from future positive additions.
+ // See also the function comment in the header file.
+ concealed_samples_correction_ -= num_samples;
+ if (!is_voice) {
+ silent_concealed_samples_correction_ -= num_samples;
+ }
+ return;
+ }
+
+ const size_t canceled_out =
+ std::min(static_cast<size_t>(num_samples), concealed_samples_correction_);
+ concealed_samples_correction_ -= canceled_out;
+ lifetime_stats_.concealed_samples += num_samples - canceled_out;
+
+ if (!is_voice) {
+ const size_t silent_canceled_out = std::min(
+ static_cast<size_t>(num_samples), silent_concealed_samples_correction_);
+ silent_concealed_samples_correction_ -= silent_canceled_out;
+ lifetime_stats_.silent_concealed_samples +=
+ num_samples - silent_canceled_out;
+ }
+}
+
+void StatisticsCalculator::PreemptiveExpandedSamples(size_t num_samples) {
+ preemptive_samples_ += num_samples;
+ operations_and_state_.preemptive_samples += num_samples;
+ lifetime_stats_.inserted_samples_for_deceleration += num_samples;
+}
+
+void StatisticsCalculator::AcceleratedSamples(size_t num_samples) {
+ accelerate_samples_ += num_samples;
+ operations_and_state_.accelerate_samples += num_samples;
+ lifetime_stats_.removed_samples_for_acceleration += num_samples;
+}
+
+void StatisticsCalculator::GeneratedNoiseSamples(size_t num_samples) {
+ lifetime_stats_.generated_noise_samples += num_samples;
+}
+
+void StatisticsCalculator::PacketsDiscarded(size_t num_packets) {
+ lifetime_stats_.packets_discarded += num_packets;
+}
+
+void StatisticsCalculator::SecondaryPacketsDiscarded(size_t num_packets) {
+ discarded_secondary_packets_ += num_packets;
+ lifetime_stats_.fec_packets_discarded += num_packets;
+}
+
+void StatisticsCalculator::SecondaryPacketsReceived(size_t num_packets) {
+ lifetime_stats_.fec_packets_received += num_packets;
+}
+
+void StatisticsCalculator::IncreaseCounter(size_t num_samples, int fs_hz) {
+ const int time_step_ms =
+ rtc::CheckedDivExact(static_cast<int>(1000 * num_samples), fs_hz);
+ delayed_packet_outage_counter_.AdvanceClock(time_step_ms);
+ excess_buffer_delay_.AdvanceClock(time_step_ms);
+ buffer_full_counter_.AdvanceClock(time_step_ms);
+ timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
+ if (timestamps_since_last_report_ >
+ static_cast<uint32_t>(fs_hz * kMaxReportPeriod)) {
+ timestamps_since_last_report_ = 0;
+ }
+ lifetime_stats_.total_samples_received += num_samples;
+}
+
+void StatisticsCalculator::JitterBufferDelay(
+ size_t num_samples,
+ uint64_t waiting_time_ms,
+ uint64_t target_delay_ms,
+ uint64_t unlimited_target_delay_ms) {
+ lifetime_stats_.jitter_buffer_delay_ms += waiting_time_ms * num_samples;
+ lifetime_stats_.jitter_buffer_target_delay_ms +=
+ target_delay_ms * num_samples;
+ lifetime_stats_.jitter_buffer_minimum_delay_ms +=
+ unlimited_target_delay_ms * num_samples;
+ lifetime_stats_.jitter_buffer_emitted_count += num_samples;
+}
+
+void StatisticsCalculator::SecondaryDecodedSamples(int num_samples) {
+ secondary_decoded_samples_ += num_samples;
+}
+
+void StatisticsCalculator::FlushedPacketBuffer() {
+ operations_and_state_.packet_buffer_flushes++;
+ buffer_full_counter_.RegisterSample();
+}
+
+void StatisticsCalculator::ReceivedPacket() {
+ ++lifetime_stats_.jitter_buffer_packets_received;
+}
+
+void StatisticsCalculator::RelativePacketArrivalDelay(size_t delay_ms) {
+ lifetime_stats_.relative_packet_arrival_delay_ms += delay_ms;
+}
+
+void StatisticsCalculator::LogDelayedPacketOutageEvent(int num_samples,
+ int fs_hz) {
+ int outage_duration_ms = num_samples / (fs_hz / 1000);
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.DelayedPacketOutageEventMs",
+ outage_duration_ms, 1 /* min */, 2000 /* max */,
+ 100 /* bucket count */);
+ delayed_packet_outage_counter_.RegisterSample();
+ lifetime_stats_.delayed_packet_outage_samples += num_samples;
+}
+
+void StatisticsCalculator::StoreWaitingTime(int waiting_time_ms) {
+ excess_buffer_delay_.RegisterSample(waiting_time_ms);
+ RTC_DCHECK_LE(waiting_times_.size(), kLenWaitingTimes);
+ if (waiting_times_.size() == kLenWaitingTimes) {
+ // Erase first value.
+ waiting_times_.pop_front();
+ }
+ waiting_times_.push_back(waiting_time_ms);
+ operations_and_state_.last_waiting_time_ms = waiting_time_ms;
+}
+
+void StatisticsCalculator::GetNetworkStatistics(size_t samples_per_packet,
+ NetEqNetworkStatistics* stats) {
+ RTC_DCHECK(stats);
+
+ stats->accelerate_rate =
+ CalculateQ14Ratio(accelerate_samples_, timestamps_since_last_report_);
+
+ stats->preemptive_rate =
+ CalculateQ14Ratio(preemptive_samples_, timestamps_since_last_report_);
+
+ stats->expand_rate =
+ CalculateQ14Ratio(expanded_speech_samples_ + expanded_noise_samples_,
+ timestamps_since_last_report_);
+
+ stats->speech_expand_rate = CalculateQ14Ratio(expanded_speech_samples_,
+ timestamps_since_last_report_);
+
+ stats->secondary_decoded_rate = CalculateQ14Ratio(
+ secondary_decoded_samples_, timestamps_since_last_report_);
+
+ const size_t discarded_secondary_samples =
+ discarded_secondary_packets_ * samples_per_packet;
+ stats->secondary_discarded_rate =
+ CalculateQ14Ratio(discarded_secondary_samples,
+ static_cast<uint32_t>(discarded_secondary_samples +
+ secondary_decoded_samples_));
+
+ if (waiting_times_.size() == 0) {
+ stats->mean_waiting_time_ms = -1;
+ stats->median_waiting_time_ms = -1;
+ stats->min_waiting_time_ms = -1;
+ stats->max_waiting_time_ms = -1;
+ } else {
+ std::sort(waiting_times_.begin(), waiting_times_.end());
+ // Find mid-point elements. If the size is odd, the two values
+ // `middle_left` and `middle_right` will both be the one middle element; if
+ // the size is even, they will be the the two neighboring elements at the
+ // middle of the list.
+ const int middle_left = waiting_times_[(waiting_times_.size() - 1) / 2];
+ const int middle_right = waiting_times_[waiting_times_.size() / 2];
+ // Calculate the average of the two. (Works also for odd sizes.)
+ stats->median_waiting_time_ms = (middle_left + middle_right) / 2;
+ stats->min_waiting_time_ms = waiting_times_.front();
+ stats->max_waiting_time_ms = waiting_times_.back();
+ double sum = 0;
+ for (auto time : waiting_times_) {
+ sum += time;
+ }
+ stats->mean_waiting_time_ms = static_cast<int>(sum / waiting_times_.size());
+ }
+
+ // Reset counters.
+ ResetMcu();
+ Reset();
+}
+
+NetEqLifetimeStatistics StatisticsCalculator::GetLifetimeStatistics() const {
+ return lifetime_stats_;
+}
+
+NetEqOperationsAndState StatisticsCalculator::GetOperationsAndState() const {
+ return operations_and_state_;
+}
+
+uint16_t StatisticsCalculator::CalculateQ14Ratio(size_t numerator,
+ uint32_t denominator) {
+ if (numerator == 0) {
+ return 0;
+ } else if (numerator < denominator) {
+ // Ratio must be smaller than 1 in Q14.
+ RTC_DCHECK_LT((numerator << 14) / denominator, (1 << 14));
+ return static_cast<uint16_t>((numerator << 14) / denominator);
+ } else {
+ // Will not produce a ratio larger than 1, since this is probably an error.
+ return 1 << 14;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.h b/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.h
new file mode 100644
index 0000000000..33a22d02dd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
+
+#include <deque>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/neteq/neteq.h"
+
+namespace webrtc {
+
+class DelayManager;
+
+// This class handles various network statistics in NetEq.
+class StatisticsCalculator {
+ public:
+ StatisticsCalculator();
+
+ virtual ~StatisticsCalculator();
+
+ StatisticsCalculator(const StatisticsCalculator&) = delete;
+ StatisticsCalculator& operator=(const StatisticsCalculator&) = delete;
+
+ // Resets most of the counters.
+ void Reset();
+
+ // Resets the counters that are not handled by Reset().
+ void ResetMcu();
+
+ // Reports that `num_samples` samples were produced through expansion, and
+ // that the expansion produced other than just noise samples.
+ void ExpandedVoiceSamples(size_t num_samples, bool is_new_concealment_event);
+
+ // Reports that `num_samples` samples were produced through expansion, and
+ // that the expansion produced only noise samples.
+ void ExpandedNoiseSamples(size_t num_samples, bool is_new_concealment_event);
+
+ // Corrects the statistics for number of samples produced through non-noise
+ // expansion by adding `num_samples` (negative or positive) to the current
+ // value. The result is capped to zero to avoid negative values.
+ void ExpandedVoiceSamplesCorrection(int num_samples);
+
+ // Same as ExpandedVoiceSamplesCorrection but for noise samples.
+ void ExpandedNoiseSamplesCorrection(int num_samples);
+
+ void DecodedOutputPlayed();
+
+ // Mark end of expand event; triggers some stats to be reported.
+ void EndExpandEvent(int fs_hz);
+
+ // Reports that `num_samples` samples were produced through preemptive
+ // expansion.
+ void PreemptiveExpandedSamples(size_t num_samples);
+
+ // Reports that `num_samples` samples were removed through accelerate.
+ void AcceleratedSamples(size_t num_samples);
+
+ // Reports that `num_samples` comfort noise samples were generated.
+ void GeneratedNoiseSamples(size_t num_samples);
+
+ // Reports that `num_packets` packets were discarded.
+ virtual void PacketsDiscarded(size_t num_packets);
+
+ // Reports that `num_packets` secondary (FEC) packets were discarded.
+ virtual void SecondaryPacketsDiscarded(size_t num_packets);
+
+ // Reports that `num_packets` secondary (FEC) packets were received.
+ virtual void SecondaryPacketsReceived(size_t num_packets);
+
+ // Increases the report interval counter with `num_samples` at a sample rate
+ // of `fs_hz`. This is how the StatisticsCalculator gets notified that current
+ // time is increasing.
+ void IncreaseCounter(size_t num_samples, int fs_hz);
+
+ // Update jitter buffer delay counter.
+ void JitterBufferDelay(size_t num_samples,
+ uint64_t waiting_time_ms,
+ uint64_t target_delay_ms,
+ uint64_t unlimited_target_delay_ms);
+
+ // Stores new packet waiting time in waiting time statistics.
+ void StoreWaitingTime(int waiting_time_ms);
+
+ // Reports that `num_samples` samples were decoded from secondary packets.
+ void SecondaryDecodedSamples(int num_samples);
+
+ // Reports that the packet buffer was flushed.
+ void FlushedPacketBuffer();
+
+ // Reports that the jitter buffer received a packet.
+ void ReceivedPacket();
+
+ // Reports that a received packet was delayed by `delay_ms` milliseconds.
+ virtual void RelativePacketArrivalDelay(size_t delay_ms);
+
+ // Logs a delayed packet outage event of `num_samples` expanded at a sample
+ // rate of `fs_hz`. A delayed packet outage event is defined as an expand
+ // period caused not by an actual packet loss, but by a delayed packet.
+ virtual void LogDelayedPacketOutageEvent(int num_samples, int fs_hz);
+
+ // Returns the current network statistics in `stats`. The number of samples
+ // per packet is `samples_per_packet`. The method does not populate
+ // `preferred_buffer_size_ms`, `jitter_peaks_found` or `clockdrift_ppm`; use
+ // the PopulateDelayManagerStats method for those.
+ void GetNetworkStatistics(size_t samples_per_packet,
+ NetEqNetworkStatistics* stats);
+
+ // Returns a copy of this class's lifetime statistics. These statistics are
+ // never reset.
+ NetEqLifetimeStatistics GetLifetimeStatistics() const;
+
+ NetEqOperationsAndState GetOperationsAndState() const;
+
+ private:
+ static const int kMaxReportPeriod = 60; // Seconds before auto-reset.
+ static const size_t kLenWaitingTimes = 100;
+
+ class PeriodicUmaLogger {
+ public:
+ PeriodicUmaLogger(absl::string_view uma_name,
+ int report_interval_ms,
+ int max_value);
+ virtual ~PeriodicUmaLogger();
+ void AdvanceClock(int step_ms);
+
+ protected:
+ void LogToUma(int value) const;
+ virtual int Metric() const = 0;
+ virtual void Reset() = 0;
+
+ const std::string uma_name_;
+ const int report_interval_ms_;
+ const int max_value_;
+ int timer_ = 0;
+ };
+
+ class PeriodicUmaCount final : public PeriodicUmaLogger {
+ public:
+ PeriodicUmaCount(absl::string_view uma_name,
+ int report_interval_ms,
+ int max_value);
+ ~PeriodicUmaCount() override;
+ void RegisterSample();
+
+ protected:
+ int Metric() const override;
+ void Reset() override;
+
+ private:
+ int counter_ = 0;
+ };
+
+ class PeriodicUmaAverage final : public PeriodicUmaLogger {
+ public:
+ PeriodicUmaAverage(absl::string_view uma_name,
+ int report_interval_ms,
+ int max_value);
+ ~PeriodicUmaAverage() override;
+ void RegisterSample(int value);
+
+ protected:
+ int Metric() const override;
+ void Reset() override;
+
+ private:
+ double sum_ = 0.0;
+ int counter_ = 0;
+ };
+
+ // Corrects the concealed samples counter in lifetime_stats_. The value of
+ // num_samples_ is added directly to the stat if the correction is positive.
+ // If the correction is negative, it is cached and will be subtracted against
+ // future additions to the counter. This is meant to be called from
+ // Expanded{Voice,Noise}Samples{Correction}.
+ void ConcealedSamplesCorrection(int num_samples, bool is_voice);
+
+ // Calculates numerator / denominator, and returns the value in Q14.
+ static uint16_t CalculateQ14Ratio(size_t numerator, uint32_t denominator);
+
+ NetEqLifetimeStatistics lifetime_stats_;
+ NetEqOperationsAndState operations_and_state_;
+ size_t concealed_samples_correction_ = 0;
+ size_t silent_concealed_samples_correction_ = 0;
+ size_t preemptive_samples_;
+ size_t accelerate_samples_;
+ size_t expanded_speech_samples_;
+ size_t expanded_noise_samples_;
+ size_t concealed_samples_at_event_end_ = 0;
+ uint32_t timestamps_since_last_report_;
+ std::deque<int> waiting_times_;
+ uint32_t secondary_decoded_samples_;
+ size_t discarded_secondary_packets_;
+ PeriodicUmaCount delayed_packet_outage_counter_;
+ PeriodicUmaAverage excess_buffer_delay_;
+ PeriodicUmaCount buffer_full_counter_;
+ bool decoded_output_played_ = false;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator_unittest.cc
new file mode 100644
index 0000000000..491cd83dc4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator_unittest.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(LifetimeStatistics, TotalSamplesReceived) {
+ StatisticsCalculator stats;
+ for (int i = 0; i < 10; ++i) {
+ stats.IncreaseCounter(480, 48000); // 10 ms at 48 kHz.
+ }
+ EXPECT_EQ(10 * 480u, stats.GetLifetimeStatistics().total_samples_received);
+}
+
+TEST(LifetimeStatistics, SamplesConcealed) {
+ StatisticsCalculator stats;
+ stats.ExpandedVoiceSamples(100, false);
+ stats.ExpandedNoiseSamples(17, false);
+ EXPECT_EQ(100u + 17u, stats.GetLifetimeStatistics().concealed_samples);
+}
+
+// This test verifies that a negative correction of concealed_samples does not
+// result in a decrease in the stats value (because stats-consuming applications
+// would not expect the value to decrease). Instead, the correction should be
+// made to future increments to the stat.
+TEST(LifetimeStatistics, SamplesConcealedCorrection) {
+ StatisticsCalculator stats;
+ stats.ExpandedVoiceSamples(100, false);
+ EXPECT_EQ(100u, stats.GetLifetimeStatistics().concealed_samples);
+ stats.ExpandedVoiceSamplesCorrection(-10);
+ // Do not subtract directly, but keep the correction for later.
+ EXPECT_EQ(100u, stats.GetLifetimeStatistics().concealed_samples);
+ stats.ExpandedVoiceSamplesCorrection(20);
+ // The total correction is 20 - 10.
+ EXPECT_EQ(110u, stats.GetLifetimeStatistics().concealed_samples);
+
+ // Also test correction done to the next ExpandedVoiceSamples call.
+ stats.ExpandedVoiceSamplesCorrection(-17);
+ EXPECT_EQ(110u, stats.GetLifetimeStatistics().concealed_samples);
+ stats.ExpandedVoiceSamples(100, false);
+ EXPECT_EQ(110u + 100u - 17u, stats.GetLifetimeStatistics().concealed_samples);
+}
+
+// This test verifies that neither "accelerate" nor "pre-emptive expand" reults
+// in a modification to concealed_samples stats. Only PLC operations (i.e.,
+// "expand" and "merge") should affect the stat.
+TEST(LifetimeStatistics, NoUpdateOnTimeStretch) {
+ StatisticsCalculator stats;
+ stats.ExpandedVoiceSamples(100, false);
+ stats.AcceleratedSamples(4711);
+ stats.PreemptiveExpandedSamples(17);
+ stats.ExpandedVoiceSamples(100, false);
+ EXPECT_EQ(200u, stats.GetLifetimeStatistics().concealed_samples);
+}
+
+TEST(StatisticsCalculator, ExpandedSamplesCorrection) {
+ StatisticsCalculator stats;
+ NetEqNetworkStatistics stats_output;
+ constexpr int kSampleRateHz = 48000;
+ constexpr int k10MsSamples = kSampleRateHz / 100;
+ constexpr int kPacketSizeMs = 20;
+ constexpr size_t kSamplesPerPacket = kPacketSizeMs * kSampleRateHz / 1000;
+
+ // Advance time by 10 ms.
+ stats.IncreaseCounter(k10MsSamples, kSampleRateHz);
+
+ stats.GetNetworkStatistics(kSamplesPerPacket, &stats_output);
+
+ EXPECT_EQ(0u, stats_output.expand_rate);
+ EXPECT_EQ(0u, stats_output.speech_expand_rate);
+
+ // Correct with a negative value.
+ stats.ExpandedVoiceSamplesCorrection(-100);
+ stats.ExpandedNoiseSamplesCorrection(-100);
+ stats.IncreaseCounter(k10MsSamples, kSampleRateHz);
+ stats.GetNetworkStatistics(kSamplesPerPacket, &stats_output);
+ // Expect no change, since negative values are disallowed.
+ EXPECT_EQ(0u, stats_output.expand_rate);
+ EXPECT_EQ(0u, stats_output.speech_expand_rate);
+
+ // Correct with a positive value.
+ stats.ExpandedVoiceSamplesCorrection(50);
+ stats.ExpandedNoiseSamplesCorrection(200);
+ stats.IncreaseCounter(k10MsSamples, kSampleRateHz);
+ stats.GetNetworkStatistics(kSamplesPerPacket, &stats_output);
+ // Calculate expected rates in Q14. Expand rate is noise + voice, while
+ // speech expand rate is only voice.
+ EXPECT_EQ(((50u + 200u) << 14) / k10MsSamples, stats_output.expand_rate);
+ EXPECT_EQ((50u << 14) / k10MsSamples, stats_output.speech_expand_rate);
+}
+
+TEST(StatisticsCalculator, RelativePacketArrivalDelay) {
+ StatisticsCalculator stats;
+
+ stats.RelativePacketArrivalDelay(50);
+ NetEqLifetimeStatistics stats_output = stats.GetLifetimeStatistics();
+ EXPECT_EQ(50u, stats_output.relative_packet_arrival_delay_ms);
+
+ stats.RelativePacketArrivalDelay(20);
+ stats_output = stats.GetLifetimeStatistics();
+ EXPECT_EQ(70u, stats_output.relative_packet_arrival_delay_ms);
+}
+
+TEST(StatisticsCalculator, ReceivedPacket) {
+ StatisticsCalculator stats;
+
+ stats.ReceivedPacket();
+ NetEqLifetimeStatistics stats_output = stats.GetLifetimeStatistics();
+ EXPECT_EQ(1u, stats_output.jitter_buffer_packets_received);
+
+ stats.ReceivedPacket();
+ stats_output = stats.GetLifetimeStatistics();
+ EXPECT_EQ(2u, stats_output.jitter_buffer_packets_received);
+}
+
+TEST(StatisticsCalculator, InterruptionCounter) {
+ constexpr int fs_khz = 48;
+ constexpr int fs_hz = fs_khz * 1000;
+ StatisticsCalculator stats;
+ stats.DecodedOutputPlayed();
+ stats.EndExpandEvent(fs_hz);
+ auto lts = stats.GetLifetimeStatistics();
+ EXPECT_EQ(0, lts.interruption_count);
+ EXPECT_EQ(0, lts.total_interruption_duration_ms);
+
+ // Add an event that is shorter than 150 ms. Should not be logged.
+ stats.ExpandedVoiceSamples(10 * fs_khz, false); // 10 ms.
+ stats.ExpandedNoiseSamples(139 * fs_khz, false); // 139 ms.
+ stats.EndExpandEvent(fs_hz);
+ lts = stats.GetLifetimeStatistics();
+ EXPECT_EQ(0, lts.interruption_count);
+
+ // Add an event that is longer than 150 ms. Should be logged.
+ stats.ExpandedVoiceSamples(140 * fs_khz, false); // 140 ms.
+ stats.ExpandedNoiseSamples(11 * fs_khz, false); // 11 ms.
+ stats.EndExpandEvent(fs_hz);
+ lts = stats.GetLifetimeStatistics();
+ EXPECT_EQ(1, lts.interruption_count);
+ EXPECT_EQ(151, lts.total_interruption_duration_ms);
+
+ // Add one more long event.
+ stats.ExpandedVoiceSamples(100 * fs_khz, false); // 100 ms.
+ stats.ExpandedNoiseSamples(5000 * fs_khz, false); // 5000 ms.
+ stats.EndExpandEvent(fs_hz);
+ lts = stats.GetLifetimeStatistics();
+ EXPECT_EQ(2, lts.interruption_count);
+ EXPECT_EQ(5100 + 151, lts.total_interruption_duration_ms);
+}
+
+TEST(StatisticsCalculator, InterruptionCounterDoNotLogBeforeDecoding) {
+ constexpr int fs_khz = 48;
+ constexpr int fs_hz = fs_khz * 1000;
+ StatisticsCalculator stats;
+
+ // Add an event that is longer than 150 ms. Should normally be logged, but we
+ // have not called DecodedOutputPlayed() yet, so it shouldn't this time.
+ stats.ExpandedVoiceSamples(151 * fs_khz, false); // 151 ms.
+ stats.EndExpandEvent(fs_hz);
+ auto lts = stats.GetLifetimeStatistics();
+ EXPECT_EQ(0, lts.interruption_count);
+
+ // Call DecodedOutputPlayed(). Logging should happen after this.
+ stats.DecodedOutputPlayed();
+
+ // Add one more long event.
+ stats.ExpandedVoiceSamples(151 * fs_khz, false); // 151 ms.
+ stats.EndExpandEvent(fs_hz);
+ lts = stats.GetLifetimeStatistics();
+ EXPECT_EQ(1, lts.interruption_count);
+}
+
+TEST(StatisticsCalculator, DiscardedPackets) {
+ StatisticsCalculator statistics_calculator;
+ EXPECT_EQ(0u,
+ statistics_calculator.GetLifetimeStatistics().packets_discarded);
+
+ statistics_calculator.PacketsDiscarded(1);
+ EXPECT_EQ(1u,
+ statistics_calculator.GetLifetimeStatistics().packets_discarded);
+
+ statistics_calculator.PacketsDiscarded(10);
+ EXPECT_EQ(11u,
+ statistics_calculator.GetLifetimeStatistics().packets_discarded);
+
+ // Calling `SecondaryPacketsDiscarded` does not modify `packets_discarded`.
+ statistics_calculator.SecondaryPacketsDiscarded(1);
+ EXPECT_EQ(11u,
+ statistics_calculator.GetLifetimeStatistics().packets_discarded);
+
+ // Calling `FlushedPacketBuffer` does not modify `packets_discarded`.
+ statistics_calculator.FlushedPacketBuffer();
+ EXPECT_EQ(11u,
+ statistics_calculator.GetLifetimeStatistics().packets_discarded);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.cc b/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.cc
new file mode 100644
index 0000000000..7d7cac7157
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/sync_buffer.h"
+
+#include <algorithm> // Access to min.
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+size_t SyncBuffer::FutureLength() const {
+ return Size() - next_index_;
+}
+
+void SyncBuffer::PushBack(const AudioMultiVector& append_this) {
+ size_t samples_added = append_this.Size();
+ AudioMultiVector::PushBack(append_this);
+ AudioMultiVector::PopFront(samples_added);
+ if (samples_added <= next_index_) {
+ next_index_ -= samples_added;
+ } else {
+ // This means that we are pushing out future data that was never used.
+ // RTC_DCHECK_NOTREACHED();
+ // TODO(hlundin): This assert must be disabled to support 60 ms frames.
+ // This should not happen even for 60 ms frames, but it does. Investigate
+ // why.
+ next_index_ = 0;
+ }
+ dtmf_index_ -= std::min(dtmf_index_, samples_added);
+}
+
+void SyncBuffer::PushBackInterleaved(const rtc::BufferT<int16_t>& append_this) {
+ const size_t size_before_adding = Size();
+ AudioMultiVector::PushBackInterleaved(append_this);
+ const size_t samples_added_per_channel = Size() - size_before_adding;
+ RTC_DCHECK_EQ(samples_added_per_channel * Channels(), append_this.size());
+ AudioMultiVector::PopFront(samples_added_per_channel);
+ next_index_ -= std::min(next_index_, samples_added_per_channel);
+ dtmf_index_ -= std::min(dtmf_index_, samples_added_per_channel);
+}
+
+void SyncBuffer::PushFrontZeros(size_t length) {
+ InsertZerosAtIndex(length, 0);
+}
+
+void SyncBuffer::InsertZerosAtIndex(size_t length, size_t position) {
+ position = std::min(position, Size());
+ length = std::min(length, Size() - position);
+ AudioMultiVector::PopBack(length);
+ for (size_t channel = 0; channel < Channels(); ++channel) {
+ channels_[channel]->InsertZerosAt(length, position);
+ }
+ if (next_index_ >= position) {
+ // We are moving the `next_index_` sample.
+ set_next_index(next_index_ + length); // Overflow handled by subfunction.
+ }
+ if (dtmf_index_ > 0 && dtmf_index_ >= position) {
+ // We are moving the `dtmf_index_` sample.
+ set_dtmf_index(dtmf_index_ + length); // Overflow handled by subfunction.
+ }
+}
+
+void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
+ size_t length,
+ size_t position) {
+ position = std::min(position, Size()); // Cap `position` in the valid range.
+ length = std::min(length, Size() - position);
+ AudioMultiVector::OverwriteAt(insert_this, length, position);
+}
+
+void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
+ size_t position) {
+ ReplaceAtIndex(insert_this, insert_this.Size(), position);
+}
+
+void SyncBuffer::GetNextAudioInterleaved(size_t requested_len,
+ AudioFrame* output) {
+ RTC_DCHECK(output);
+ const size_t samples_to_read = std::min(FutureLength(), requested_len);
+ output->ResetWithoutMuting();
+ const size_t tot_samples_read = ReadInterleavedFromIndex(
+ next_index_, samples_to_read, output->mutable_data());
+ const size_t samples_read_per_channel = tot_samples_read / Channels();
+ next_index_ += samples_read_per_channel;
+ output->num_channels_ = Channels();
+ output->samples_per_channel_ = samples_read_per_channel;
+}
+
+void SyncBuffer::IncreaseEndTimestamp(uint32_t increment) {
+ end_timestamp_ += increment;
+}
+
+void SyncBuffer::Flush() {
+ Zeros(Size());
+ next_index_ = Size();
+ end_timestamp_ = 0;
+ dtmf_index_ = 0;
+}
+
+void SyncBuffer::set_next_index(size_t value) {
+ // Cannot set `next_index_` larger than the size of the buffer.
+ next_index_ = std::min(value, Size());
+}
+
+void SyncBuffer::set_dtmf_index(size_t value) {
+ // Cannot set `dtmf_index_` larger than the size of the buffer.
+ dtmf_index_ = std::min(value, Size());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.h b/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.h
new file mode 100644
index 0000000000..cf56c432e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_SYNC_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_SYNC_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/audio/audio_frame.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/audio_vector.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class SyncBuffer : public AudioMultiVector {
+ public:
+ SyncBuffer(size_t channels, size_t length)
+ : AudioMultiVector(channels, length),
+ next_index_(length),
+ end_timestamp_(0),
+ dtmf_index_(0) {}
+
+ SyncBuffer(const SyncBuffer&) = delete;
+ SyncBuffer& operator=(const SyncBuffer&) = delete;
+
+ // Returns the number of samples yet to play out from the buffer.
+ size_t FutureLength() const;
+
+ // Adds the contents of `append_this` to the back of the SyncBuffer. Removes
+ // the same number of samples from the beginning of the SyncBuffer, to
+ // maintain a constant buffer size. The `next_index_` is updated to reflect
+ // the move of the beginning of "future" data.
+ void PushBack(const AudioMultiVector& append_this) override;
+
+ // Like PushBack, but reads the samples channel-interleaved from the input.
+ void PushBackInterleaved(const rtc::BufferT<int16_t>& append_this);
+
+ // Adds `length` zeros to the beginning of each channel. Removes
+ // the same number of samples from the end of the SyncBuffer, to
+ // maintain a constant buffer size. The `next_index_` is updated to reflect
+ // the move of the beginning of "future" data.
+ // Note that this operation may delete future samples that are waiting to
+ // be played.
+ void PushFrontZeros(size_t length);
+
+ // Inserts `length` zeros into each channel at index `position`. The size of
+ // the SyncBuffer is kept constant, which means that the last `length`
+ // elements in each channel will be purged.
+ virtual void InsertZerosAtIndex(size_t length, size_t position);
+
+ // Overwrites each channel in this SyncBuffer with values taken from
+ // `insert_this`. The values are taken from the beginning of `insert_this` and
+ // are inserted starting at `position`. `length` values are written into each
+ // channel. The size of the SyncBuffer is kept constant. That is, if `length`
+ // and `position` are selected such that the new data would extend beyond the
+ // end of the current SyncBuffer, the buffer is not extended.
+ // The `next_index_` is not updated.
+ virtual void ReplaceAtIndex(const AudioMultiVector& insert_this,
+ size_t length,
+ size_t position);
+
+ // Same as the above method, but where all of `insert_this` is written (with
+ // the same constraints as above, that the SyncBuffer is not extended).
+ virtual void ReplaceAtIndex(const AudioMultiVector& insert_this,
+ size_t position);
+
+ // Reads `requested_len` samples from each channel and writes them interleaved
+ // into `output`. The `next_index_` is updated to point to the sample to read
+ // next time. The AudioFrame `output` is first reset, and the `data_`,
+ // `num_channels_`, and `samples_per_channel_` fields are updated.
+ void GetNextAudioInterleaved(size_t requested_len, AudioFrame* output);
+
+ // Adds `increment` to `end_timestamp_`.
+ void IncreaseEndTimestamp(uint32_t increment);
+
+ // Flushes the buffer. The buffer will contain only zeros after the flush, and
+ // `next_index_` will point to the end, like when the buffer was first
+ // created.
+ void Flush();
+
+ const AudioVector& Channel(size_t n) const { return *channels_[n]; }
+ AudioVector& Channel(size_t n) { return *channels_[n]; }
+
+ // Accessors and mutators.
+ size_t next_index() const { return next_index_; }
+ void set_next_index(size_t value);
+ uint32_t end_timestamp() const { return end_timestamp_; }
+ void set_end_timestamp(uint32_t value) { end_timestamp_ = value; }
+ size_t dtmf_index() const { return dtmf_index_; }
+ void set_dtmf_index(size_t value);
+
+ private:
+ size_t next_index_;
+ uint32_t end_timestamp_; // The timestamp of the last sample in the buffer.
+ size_t dtmf_index_; // Index to the first non-DTMF sample in the buffer.
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_SYNC_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer_unittest.cc
new file mode 100644
index 0000000000..bdcd92446b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer_unittest.cc
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/sync_buffer.h"
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(SyncBuffer, CreateAndDestroy) {
+ // Create a SyncBuffer with two channels and 10 samples each.
+ static const size_t kLen = 10;
+ static const size_t kChannels = 2;
+ SyncBuffer sync_buffer(kChannels, kLen);
+ EXPECT_EQ(kChannels, sync_buffer.Channels());
+ EXPECT_EQ(kLen, sync_buffer.Size());
+ // When the buffer is empty, the next index to play out is at the end.
+ EXPECT_EQ(kLen, sync_buffer.next_index());
+ // Verify that all elements are zero.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kLen; ++i) {
+ EXPECT_EQ(0, sync_buffer[channel][i]);
+ }
+ }
+}
+
+TEST(SyncBuffer, SetNextIndex) {
+ // Create a SyncBuffer with two channels and 100 samples each.
+ static const size_t kLen = 100;
+ static const size_t kChannels = 2;
+ SyncBuffer sync_buffer(kChannels, kLen);
+ sync_buffer.set_next_index(0);
+ EXPECT_EQ(0u, sync_buffer.next_index());
+ sync_buffer.set_next_index(kLen / 2);
+ EXPECT_EQ(kLen / 2, sync_buffer.next_index());
+ sync_buffer.set_next_index(kLen);
+ EXPECT_EQ(kLen, sync_buffer.next_index());
+ // Try to set larger than the buffer size; should cap at buffer size.
+ sync_buffer.set_next_index(kLen + 1);
+ EXPECT_EQ(kLen, sync_buffer.next_index());
+}
+
+TEST(SyncBuffer, PushBackAndFlush) {
+ // Create a SyncBuffer with two channels and 100 samples each.
+ static const size_t kLen = 100;
+ static const size_t kChannels = 2;
+ SyncBuffer sync_buffer(kChannels, kLen);
+ static const size_t kNewLen = 10;
+ AudioMultiVector new_data(kChannels, kNewLen);
+ // Populate `new_data`.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kNewLen; ++i) {
+ new_data[channel][i] = rtc::checked_cast<int16_t>(i);
+ }
+ }
+ // Push back `new_data` into `sync_buffer`. This operation should pop out
+ // data from the front of `sync_buffer`, so that the size of the buffer
+ // remains the same. The `next_index_` should also move with the same length.
+ sync_buffer.PushBack(new_data);
+ ASSERT_EQ(kLen, sync_buffer.Size());
+ // Verify that `next_index_` moved accordingly.
+ EXPECT_EQ(kLen - kNewLen, sync_buffer.next_index());
+ // Verify the new contents.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kNewLen; ++i) {
+ EXPECT_EQ(new_data[channel][i],
+ sync_buffer[channel][sync_buffer.next_index() + i]);
+ }
+ }
+
+ // Now flush the buffer, and verify that it is all zeros, and that next_index
+ // points to the end.
+ sync_buffer.Flush();
+ ASSERT_EQ(kLen, sync_buffer.Size());
+ EXPECT_EQ(kLen, sync_buffer.next_index());
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kLen; ++i) {
+ EXPECT_EQ(0, sync_buffer[channel][i]);
+ }
+ }
+}
+
+TEST(SyncBuffer, PushFrontZeros) {
+ // Create a SyncBuffer with two channels and 100 samples each.
+ static const size_t kLen = 100;
+ static const size_t kChannels = 2;
+ SyncBuffer sync_buffer(kChannels, kLen);
+ static const size_t kNewLen = 10;
+ AudioMultiVector new_data(kChannels, kNewLen);
+ // Populate `new_data`.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kNewLen; ++i) {
+ new_data[channel][i] = rtc::checked_cast<int16_t>(1000 + i);
+ }
+ }
+ sync_buffer.PushBack(new_data);
+ EXPECT_EQ(kLen, sync_buffer.Size());
+
+ // Push `kNewLen` - 1 zeros into each channel in the front of the SyncBuffer.
+ sync_buffer.PushFrontZeros(kNewLen - 1);
+ EXPECT_EQ(kLen, sync_buffer.Size()); // Size should remain the same.
+ // Verify that `next_index_` moved accordingly. Should be at the end - 1.
+ EXPECT_EQ(kLen - 1, sync_buffer.next_index());
+ // Verify the zeros.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kNewLen - 1; ++i) {
+ EXPECT_EQ(0, sync_buffer[channel][i]);
+ }
+ }
+ // Verify that the correct data is at the end of the SyncBuffer.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ EXPECT_EQ(1000, sync_buffer[channel][sync_buffer.next_index()]);
+ }
+}
+
+TEST(SyncBuffer, GetNextAudioInterleaved) {
+ // Create a SyncBuffer with two channels and 100 samples each.
+ static const size_t kLen = 100;
+ static const size_t kChannels = 2;
+ SyncBuffer sync_buffer(kChannels, kLen);
+ static const size_t kNewLen = 10;
+ AudioMultiVector new_data(kChannels, kNewLen);
+ // Populate `new_data`.
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ for (size_t i = 0; i < kNewLen; ++i) {
+ new_data[channel][i] = rtc::checked_cast<int16_t>(i);
+ }
+ }
+ // Push back `new_data` into `sync_buffer`. This operation should pop out
+ // data from the front of `sync_buffer`, so that the size of the buffer
+ // remains the same. The `next_index_` should also move with the same length.
+ sync_buffer.PushBack(new_data);
+
+ // Read to interleaved output. Read in two batches, where each read operation
+ // should automatically update the `net_index_` in the SyncBuffer.
+ // Note that `samples_read` is the number of samples read from each channel.
+ // That is, the number of samples written to `output` is
+ // `samples_read` * `kChannels`.
+ AudioFrame output1;
+ sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output1);
+ EXPECT_EQ(kChannels, output1.num_channels_);
+ EXPECT_EQ(kNewLen / 2, output1.samples_per_channel_);
+
+ AudioFrame output2;
+ sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output2);
+ EXPECT_EQ(kChannels, output2.num_channels_);
+ EXPECT_EQ(kNewLen / 2, output2.samples_per_channel_);
+
+ // Verify the data.
+ const int16_t* output_ptr = output1.data();
+ for (size_t i = 0; i < kNewLen / 2; ++i) {
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ EXPECT_EQ(new_data[channel][i], *output_ptr);
+ ++output_ptr;
+ }
+ }
+ output_ptr = output2.data();
+ for (size_t i = kNewLen / 2; i < kNewLen; ++i) {
+ for (size_t channel = 0; channel < kChannels; ++channel) {
+ EXPECT_EQ(new_data[channel][i], *output_ptr);
+ ++output_ptr;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/parse_delay_file.m b/third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/parse_delay_file.m
new file mode 100644
index 0000000000..031d8a39ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/parse_delay_file.m
@@ -0,0 +1,201 @@
+%
+% Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+%
+% Use of this source code is governed by a BSD-style license
+% that can be found in the LICENSE file in the root of the source
+% tree. An additional intellectual property rights grant can be found
+% in the file PATENTS. All contributing project authors may
+% be found in the AUTHORS file in the root of the source tree.
+%
+
+function outStruct = parse_delay_file(file)
+
+fid = fopen(file, 'rb');
+if fid == -1
+ error('Cannot open file %s', file);
+end
+
+textline = fgetl(fid);
+if ~strncmp(textline, '#!NetEQ_Delay_Logging', 21)
+ error('Wrong file format');
+end
+
+ver = sscanf(textline, '#!NetEQ_Delay_Logging%d.%d');
+if ~all(ver == [2; 0])
+ error('Wrong version of delay logging function')
+end
+
+
+start_pos = ftell(fid);
+fseek(fid, -12, 'eof');
+textline = fgetl(fid);
+if ~strncmp(textline, 'End of file', 21)
+ error('File ending is not correct. Seems like the simulation ended abnormally.');
+end
+
+fseek(fid,-12-4, 'eof');
+Npackets = fread(fid, 1, 'int32');
+fseek(fid, start_pos, 'bof');
+
+rtpts = zeros(Npackets, 1);
+seqno = zeros(Npackets, 1);
+pt = zeros(Npackets, 1);
+plen = zeros(Npackets, 1);
+recin_t = nan*ones(Npackets, 1);
+decode_t = nan*ones(Npackets, 1);
+playout_delay = zeros(Npackets, 1);
+optbuf = zeros(Npackets, 1);
+
+fs_ix = 1;
+clock = 0;
+ts_ix = 1;
+ended = 0;
+late_packets = 0;
+fs_now = 8000;
+last_decode_k = 0;
+tot_expand = 0;
+tot_accelerate = 0;
+tot_preemptive = 0;
+
+while not(ended)
+ signal = fread(fid, 1, '*int32');
+
+ switch signal
+ case 3 % NETEQ_DELAY_LOGGING_SIGNAL_CLOCK
+ clock = fread(fid, 1, '*float32');
+
+ % keep on reading batches of M until the signal is no longer "3"
+ % read int32 + float32 in one go
+ % this is to save execution time
+ temp = [3; 0];
+ M = 120;
+ while all(temp(1,:) == 3)
+ fp = ftell(fid);
+ temp = fread(fid, [2 M], '*int32');
+ end
+
+ % back up to last clock event
+ fseek(fid, fp - ftell(fid) + ...
+ (find(temp(1,:) ~= 3, 1 ) - 2) * 2 * 4 + 4, 'cof');
+ % read the last clock value
+ clock = fread(fid, 1, '*float32');
+
+ case 1 % NETEQ_DELAY_LOGGING_SIGNAL_RECIN
+ temp_ts = fread(fid, 1, 'uint32');
+
+ if late_packets > 0
+ temp_ix = ts_ix - 1;
+ while (temp_ix >= 1) && (rtpts(temp_ix) ~= temp_ts)
+ % TODO(hlundin): use matlab vector search instead?
+ temp_ix = temp_ix - 1;
+ end
+
+ if temp_ix >= 1
+ % the ts was found in the vector
+ late_packets = late_packets - 1;
+ else
+ temp_ix = ts_ix;
+ ts_ix = ts_ix + 1;
+ end
+ else
+ temp_ix = ts_ix;
+ ts_ix = ts_ix + 1;
+ end
+
+ rtpts(temp_ix) = temp_ts;
+ seqno(temp_ix) = fread(fid, 1, 'uint16');
+ pt(temp_ix) = fread(fid, 1, 'int32');
+ plen(temp_ix) = fread(fid, 1, 'int16');
+ recin_t(temp_ix) = clock;
+
+ case 2 % NETEQ_DELAY_LOGGING_SIGNAL_FLUSH
+ % do nothing
+
+ case 4 % NETEQ_DELAY_LOGGING_SIGNAL_EOF
+ ended = 1;
+
+ case 5 % NETEQ_DELAY_LOGGING_SIGNAL_DECODE
+ last_decode_ts = fread(fid, 1, 'uint32');
+ temp_delay = fread(fid, 1, 'uint16');
+
+ k = find(rtpts(1:(ts_ix - 1))==last_decode_ts,1,'last');
+ if ~isempty(k)
+ decode_t(k) = clock;
+ playout_delay(k) = temp_delay + ...
+ 5 * fs_now / 8000; % add overlap length
+ last_decode_k = k;
+ end
+
+ case 6 % NETEQ_DELAY_LOGGING_SIGNAL_CHANGE_FS
+ fsvec(fs_ix) = fread(fid, 1, 'uint16');
+ fschange_ts(fs_ix) = last_decode_ts;
+ fs_now = fsvec(fs_ix);
+ fs_ix = fs_ix + 1;
+
+ case 7 % NETEQ_DELAY_LOGGING_SIGNAL_MERGE_INFO
+ playout_delay(last_decode_k) = playout_delay(last_decode_k) ...
+ + fread(fid, 1, 'int32');
+
+ case 8 % NETEQ_DELAY_LOGGING_SIGNAL_EXPAND_INFO
+ temp = fread(fid, 1, 'int32');
+ if last_decode_k ~= 0
+ tot_expand = tot_expand + temp / (fs_now / 1000);
+ end
+
+ case 9 % NETEQ_DELAY_LOGGING_SIGNAL_ACCELERATE_INFO
+ temp = fread(fid, 1, 'int32');
+ if last_decode_k ~= 0
+ tot_accelerate = tot_accelerate + temp / (fs_now / 1000);
+ end
+
+ case 10 % NETEQ_DELAY_LOGGING_SIGNAL_PREEMPTIVE_INFO
+ temp = fread(fid, 1, 'int32');
+ if last_decode_k ~= 0
+ tot_preemptive = tot_preemptive + temp / (fs_now / 1000);
+ end
+
+ case 11 % NETEQ_DELAY_LOGGING_SIGNAL_OPTBUF
+ optbuf(last_decode_k) = fread(fid, 1, 'int32');
+
+ case 12 % NETEQ_DELAY_LOGGING_SIGNAL_DECODE_ONE_DESC
+ last_decode_ts = fread(fid, 1, 'uint32');
+ k = ts_ix - 1;
+
+ while (k >= 1) && (rtpts(k) ~= last_decode_ts)
+ % TODO(hlundin): use matlab vector search instead?
+ k = k - 1;
+ end
+
+ if k < 1
+ % packet not received yet
+ k = ts_ix;
+ rtpts(ts_ix) = last_decode_ts;
+ late_packets = late_packets + 1;
+ end
+
+ decode_t(k) = clock;
+ playout_delay(k) = fread(fid, 1, 'uint16') + ...
+ 5 * fs_now / 8000; % add overlap length
+ last_decode_k = k;
+
+ end
+
+end
+
+
+fclose(fid);
+
+outStruct = struct(...
+ 'ts', rtpts, ...
+ 'sn', seqno, ...
+ 'pt', pt,...
+ 'plen', plen,...
+ 'arrival', recin_t,...
+ 'decode', decode_t,...
+ 'fs', fsvec(:),...
+ 'fschange_ts', fschange_ts(:),...
+ 'playout_delay', playout_delay,...
+ 'tot_expand', tot_expand,...
+ 'tot_accelerate', tot_accelerate,...
+ 'tot_preemptive', tot_preemptive,...
+ 'optbuf', optbuf);
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/plot_neteq_delay.m b/third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/plot_neteq_delay.m
new file mode 100644
index 0000000000..86d533fbeb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/delay_tool/plot_neteq_delay.m
@@ -0,0 +1,197 @@
+%
+% Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+%
+% Use of this source code is governed by a BSD-style license
+% that can be found in the LICENSE file in the root of the source
+% tree. An additional intellectual property rights grant can be found
+% in the file PATENTS. All contributing project authors may
+% be found in the AUTHORS file in the root of the source tree.
+%
+
+function [delay_struct, delayvalues] = plot_neteq_delay(delayfile, varargin)
+
+% InfoStruct = plot_neteq_delay(delayfile)
+% InfoStruct = plot_neteq_delay(delayfile, 'skipdelay', skip_seconds)
+%
+% Henrik Lundin, 2006-11-17
+% Henrik Lundin, 2011-05-17
+%
+
+try
+ s = parse_delay_file(delayfile);
+catch
+ error(lasterr);
+end
+
+delayskip=0;
+noplot=0;
+arg_ptr=1;
+delaypoints=[];
+
+s.sn=unwrap_seqno(s.sn);
+
+while arg_ptr+1 <= nargin
+ switch lower(varargin{arg_ptr})
+ case {'skipdelay', 'delayskip'}
+ % skip a number of seconds in the beginning when calculating delays
+ delayskip = varargin{arg_ptr+1};
+ arg_ptr = arg_ptr + 2;
+ case 'noplot'
+ noplot=1;
+ arg_ptr = arg_ptr + 1;
+ case {'get_delay', 'getdelay'}
+ % return a vector of delay values for the points in the given vector
+ delaypoints = varargin{arg_ptr+1};
+ arg_ptr = arg_ptr + 2;
+ otherwise
+ warning('Unknown switch %s\n', varargin{arg_ptr});
+ arg_ptr = arg_ptr + 1;
+ end
+end
+
+% find lost frames that were covered by one-descriptor decoding
+one_desc_ix=find(isnan(s.arrival));
+for k=1:length(one_desc_ix)
+ ix=find(s.ts==max(s.ts(s.ts(one_desc_ix(k))>s.ts)));
+ s.sn(one_desc_ix(k))=s.sn(ix)+1;
+ s.pt(one_desc_ix(k))=s.pt(ix);
+ s.arrival(one_desc_ix(k))=s.arrival(ix)+s.decode(one_desc_ix(k))-s.decode(ix);
+end
+
+% remove duplicate received frames that were never decoded (RED codec)
+if length(unique(s.ts(isfinite(s.ts)))) < length(s.ts(isfinite(s.ts)))
+ ix=find(isfinite(s.decode));
+ s.sn=s.sn(ix);
+ s.ts=s.ts(ix);
+ s.arrival=s.arrival(ix);
+ s.playout_delay=s.playout_delay(ix);
+ s.pt=s.pt(ix);
+ s.optbuf=s.optbuf(ix);
+ plen=plen(ix);
+ s.decode=s.decode(ix);
+end
+
+% find non-unique sequence numbers
+[~,un_ix]=unique(s.sn);
+nonun_ix=setdiff(1:length(s.sn),un_ix);
+if ~isempty(nonun_ix)
+ warning('RTP sequence numbers are in error');
+end
+
+% sort vectors
+[s.sn,sort_ix]=sort(s.sn);
+s.ts=s.ts(sort_ix);
+s.arrival=s.arrival(sort_ix);
+s.decode=s.decode(sort_ix);
+s.playout_delay=s.playout_delay(sort_ix);
+s.pt=s.pt(sort_ix);
+
+send_t=s.ts-s.ts(1);
+if length(s.fs)<1
+ warning('No info about sample rate found in file. Using default 8000.');
+ s.fs(1)=8000;
+ s.fschange_ts(1)=min(s.ts);
+elseif s.fschange_ts(1)>min(s.ts)
+ s.fschange_ts(1)=min(s.ts);
+end
+
+end_ix=length(send_t);
+for k=length(s.fs):-1:1
+ start_ix=find(s.ts==s.fschange_ts(k));
+ send_t(start_ix:end_ix)=send_t(start_ix:end_ix)/s.fs(k)*1000;
+ s.playout_delay(start_ix:end_ix)=s.playout_delay(start_ix:end_ix)/s.fs(k)*1000;
+ s.optbuf(start_ix:end_ix)=s.optbuf(start_ix:end_ix)/s.fs(k)*1000;
+ end_ix=start_ix-1;
+end
+
+tot_time=max(send_t)-min(send_t);
+
+seq_ix=s.sn-min(s.sn)+1;
+send_t=send_t+max(min(s.arrival-send_t),0);
+
+plot_send_t=nan*ones(max(seq_ix),1);
+plot_send_t(seq_ix)=send_t;
+plot_nw_delay=nan*ones(max(seq_ix),1);
+plot_nw_delay(seq_ix)=s.arrival-send_t;
+
+cng_ix=find(s.pt~=13); % find those packets that are not CNG/SID
+
+if noplot==0
+ h=plot(plot_send_t/1000,plot_nw_delay);
+ set(h,'color',0.75*[1 1 1]);
+ hold on
+ if any(s.optbuf~=0)
+ peak_ix=find(s.optbuf(cng_ix)<0); % peak mode is labeled with negative values
+ no_peak_ix=find(s.optbuf(cng_ix)>0); %setdiff(1:length(cng_ix),peak_ix);
+ h1=plot(send_t(cng_ix(peak_ix))/1000,...
+ s.arrival(cng_ix(peak_ix))+abs(s.optbuf(cng_ix(peak_ix)))-send_t(cng_ix(peak_ix)),...
+ 'r.');
+ h2=plot(send_t(cng_ix(no_peak_ix))/1000,...
+ s.arrival(cng_ix(no_peak_ix))+abs(s.optbuf(cng_ix(no_peak_ix)))-send_t(cng_ix(no_peak_ix)),...
+ 'g.');
+ set([h1, h2],'markersize',1)
+ end
+ %h=plot(send_t(seq_ix)/1000,s.decode+s.playout_delay-send_t(seq_ix));
+ h=plot(send_t(cng_ix)/1000,s.decode(cng_ix)+s.playout_delay(cng_ix)-send_t(cng_ix));
+ set(h,'linew',1.5);
+ hold off
+ ax1=axis;
+ axis tight
+ ax2=axis;
+ axis([ax2(1:3) ax1(4)])
+end
+
+
+% calculate delays and other parameters
+
+delayskip_ix = find(send_t-send_t(1)>=delayskip*1000, 1 );
+
+use_ix = intersect(cng_ix,... % use those that are not CNG/SID frames...
+ intersect(find(isfinite(s.decode)),... % ... that did arrive ...
+ (delayskip_ix:length(s.decode))')); % ... and are sent after delayskip seconds
+
+mean_delay = mean(s.decode(use_ix)+s.playout_delay(use_ix)-send_t(use_ix));
+neteq_delay = mean(s.decode(use_ix)+s.playout_delay(use_ix)-s.arrival(use_ix));
+
+Npack=max(s.sn(delayskip_ix:end))-min(s.sn(delayskip_ix:end))+1;
+nw_lossrate=(Npack-length(s.sn(delayskip_ix:end)))/Npack;
+neteq_lossrate=(length(s.sn(delayskip_ix:end))-length(use_ix))/Npack;
+
+delay_struct=struct('mean_delay',mean_delay,'neteq_delay',neteq_delay,...
+ 'nw_lossrate',nw_lossrate,'neteq_lossrate',neteq_lossrate,...
+ 'tot_expand',round(s.tot_expand),'tot_accelerate',round(s.tot_accelerate),...
+ 'tot_preemptive',round(s.tot_preemptive),'tot_time',tot_time,...
+ 'filename',delayfile,'units','ms','fs',unique(s.fs));
+
+if not(isempty(delaypoints))
+ delayvalues=interp1(send_t(cng_ix),...
+ s.decode(cng_ix)+s.playout_delay(cng_ix)-send_t(cng_ix),...
+ delaypoints,'nearest',NaN);
+else
+ delayvalues=[];
+end
+
+
+
+% SUBFUNCTIONS %
+
+function y=unwrap_seqno(x)
+
+jumps=find(abs((diff(x)-1))>65000);
+
+while ~isempty(jumps)
+ n=jumps(1);
+ if x(n+1)-x(n) < 0
+ % negative jump
+ x(n+1:end)=x(n+1:end)+65536;
+ else
+ % positive jump
+ x(n+1:end)=x(n+1:end)-65536;
+ end
+
+ jumps=find(abs((diff(x(n+1:end))-1))>65000);
+end
+
+y=x;
+
+return;
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.cc
new file mode 100644
index 0000000000..e6c1809fb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.cc
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/test/neteq_decoding_test.h"
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/rtp_headers.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/audio_coding/neteq/test/result_sink.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/testsupport/file_utils.h"
+
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
+#else
+#include "modules/audio_coding/neteq/neteq_unittest.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+namespace {
+
+void LoadDecoders(webrtc::NetEq* neteq) {
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(0, SdpAudioFormat("pcmu", 8000, 1)));
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(8, SdpAudioFormat("pcma", 8000, 1)));
+#ifdef WEBRTC_CODEC_ILBC
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(102, SdpAudioFormat("ilbc", 8000, 1)));
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(103, SdpAudioFormat("isac", 16000, 1)));
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(104, SdpAudioFormat("isac", 32000, 1)));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(
+ 111, SdpAudioFormat("opus", 48000, 2, {{"stereo", "0"}})));
+#endif
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(93, SdpAudioFormat("L16", 8000, 1)));
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(94, SdpAudioFormat("L16", 16000, 1)));
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(95, SdpAudioFormat("L16", 32000, 1)));
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(13, SdpAudioFormat("cn", 8000, 1)));
+ ASSERT_EQ(true,
+ neteq->RegisterPayloadType(98, SdpAudioFormat("cn", 16000, 1)));
+}
+
+} // namespace
+
+const int NetEqDecodingTest::kTimeStepMs;
+const size_t NetEqDecodingTest::kBlockSize8kHz;
+const size_t NetEqDecodingTest::kBlockSize16kHz;
+const size_t NetEqDecodingTest::kBlockSize32kHz;
+const int NetEqDecodingTest::kInitSampleRateHz;
+
+NetEqDecodingTest::NetEqDecodingTest()
+ : clock_(0),
+ config_(),
+ output_sample_rate_(kInitSampleRateHz),
+ algorithmic_delay_ms_(0) {
+ config_.sample_rate_hz = kInitSampleRateHz;
+}
+
+void NetEqDecodingTest::SetUp() {
+ auto decoder_factory = CreateBuiltinAudioDecoderFactory();
+ neteq_ = DefaultNetEqFactory().CreateNetEq(config_, decoder_factory, &clock_);
+ NetEqNetworkStatistics stat;
+ ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
+ algorithmic_delay_ms_ = stat.current_buffer_size_ms;
+ ASSERT_TRUE(neteq_);
+ LoadDecoders(neteq_.get());
+}
+
+void NetEqDecodingTest::TearDown() {}
+
+void NetEqDecodingTest::OpenInputFile(absl::string_view rtp_file) {
+ rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
+}
+
+void NetEqDecodingTest::Process() {
+ // Check if time to receive.
+ while (packet_ && clock_.TimeInMilliseconds() >= packet_->time_ms()) {
+ if (packet_->payload_length_bytes() > 0) {
+#ifndef WEBRTC_CODEC_ISAC
+ // Ignore payload type 104 (iSAC-swb) if ISAC is not supported.
+ if (packet_->header().payloadType != 104)
+#endif
+ ASSERT_EQ(
+ 0, neteq_->InsertPacket(
+ packet_->header(),
+ rtc::ArrayView<const uint8_t>(
+ packet_->payload(), packet_->payload_length_bytes())));
+ }
+ // Get next packet.
+ packet_ = rtp_source_->NextPacket();
+ }
+
+ // Get audio from NetEq.
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
+ ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) ||
+ (out_frame_.samples_per_channel_ == kBlockSize16kHz) ||
+ (out_frame_.samples_per_channel_ == kBlockSize32kHz) ||
+ (out_frame_.samples_per_channel_ == kBlockSize48kHz));
+ output_sample_rate_ = out_frame_.sample_rate_hz_;
+ EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz());
+
+ // Increase time.
+ clock_.AdvanceTimeMilliseconds(kTimeStepMs);
+}
+
+void NetEqDecodingTest::DecodeAndCompare(
+ absl::string_view rtp_file,
+ absl::string_view output_checksum,
+ absl::string_view network_stats_checksum,
+ bool gen_ref) {
+ OpenInputFile(rtp_file);
+
+ std::string ref_out_file =
+ gen_ref ? webrtc::test::OutputPath() + "neteq_universal_ref.pcm" : "";
+ ResultSink output(ref_out_file);
+
+ std::string stat_out_file =
+ gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
+ ResultSink network_stats(stat_out_file);
+
+ packet_ = rtp_source_->NextPacket();
+ int i = 0;
+ uint64_t last_concealed_samples = 0;
+ uint64_t last_total_samples_received = 0;
+ while (packet_) {
+ rtc::StringBuilder ss;
+ ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
+ SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
+ ASSERT_NO_FATAL_FAILURE(Process());
+ ASSERT_NO_FATAL_FAILURE(
+ output.AddResult(out_frame_.data(), out_frame_.samples_per_channel_));
+
+ // Query the network statistics API once per second
+ if (clock_.TimeInMilliseconds() % 1000 == 0) {
+ // Process NetworkStatistics.
+ NetEqNetworkStatistics current_network_stats;
+ ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats));
+ ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats));
+
+ // Verify that liftime stats and network stats report similar loss
+ // concealment rates.
+ auto lifetime_stats = neteq_->GetLifetimeStatistics();
+ const uint64_t delta_concealed_samples =
+ lifetime_stats.concealed_samples - last_concealed_samples;
+ last_concealed_samples = lifetime_stats.concealed_samples;
+ const uint64_t delta_total_samples_received =
+ lifetime_stats.total_samples_received - last_total_samples_received;
+ last_total_samples_received = lifetime_stats.total_samples_received;
+ // The tolerance is 1% but expressed in Q14.
+ EXPECT_NEAR(
+ (delta_concealed_samples << 14) / delta_total_samples_received,
+ current_network_stats.expand_rate, (2 << 14) / 100.0);
+ }
+ }
+
+ SCOPED_TRACE("Check output audio.");
+ output.VerifyChecksum(output_checksum);
+ SCOPED_TRACE("Check network stats.");
+ network_stats.VerifyChecksum(network_stats_checksum);
+}
+
+void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
+ int timestamp,
+ RTPHeader* rtp_info) {
+ rtp_info->sequenceNumber = frame_index;
+ rtp_info->timestamp = timestamp;
+ rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
+ rtp_info->payloadType = 94; // PCM16b WB codec.
+ rtp_info->markerBit = false;
+}
+
+void NetEqDecodingTest::PopulateCng(int frame_index,
+ int timestamp,
+ RTPHeader* rtp_info,
+ uint8_t* payload,
+ size_t* payload_len) {
+ rtp_info->sequenceNumber = frame_index;
+ rtp_info->timestamp = timestamp;
+ rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
+ rtp_info->payloadType = 98; // WB CNG.
+ rtp_info->markerBit = false;
+ payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
+ *payload_len = 1; // Only noise level, no spectral parameters.
+}
+
+void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
+ uint32_t start_timestamp,
+ const std::set<uint16_t>& drop_seq_numbers,
+ bool expect_seq_no_wrap,
+ bool expect_timestamp_wrap) {
+ uint16_t seq_no = start_seq_no;
+ uint32_t timestamp = start_timestamp;
+ const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
+ const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
+ const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
+ const size_t kPayloadBytes = kSamples * sizeof(int16_t);
+ double next_input_time_ms = 0.0;
+
+ // Insert speech for 2 seconds.
+ const int kSpeechDurationMs = 2000;
+ uint16_t last_seq_no;
+ uint32_t last_timestamp;
+ bool timestamp_wrapped = false;
+ bool seq_no_wrapped = false;
+ for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
+ // Each turn in this for loop is 10 ms.
+ while (next_input_time_ms <= t_ms) {
+ // Insert one 30 ms speech frame.
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+ if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
+ // This sequence number was not in the set to drop. Insert it.
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ }
+ NetEqNetworkStatistics network_stats;
+ ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
+
+ EXPECT_LE(network_stats.preferred_buffer_size_ms, 80);
+ EXPECT_LE(network_stats.current_buffer_size_ms,
+ 80 + algorithmic_delay_ms_);
+ last_seq_no = seq_no;
+ last_timestamp = timestamp;
+
+ ++seq_no;
+ timestamp += kSamples;
+ next_input_time_ms += static_cast<double>(kFrameSizeMs);
+
+ seq_no_wrapped |= seq_no < last_seq_no;
+ timestamp_wrapped |= timestamp < last_timestamp;
+ }
+ // Pull out data once.
+ AudioFrame output;
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
+ ASSERT_EQ(1u, output.num_channels_);
+
+ // Expect delay (in samples) to be less than 2 packets.
+ absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
+ ASSERT_TRUE(playout_timestamp);
+ EXPECT_LE(timestamp - *playout_timestamp,
+ static_cast<uint32_t>(kSamples * 2));
+ }
+ // Make sure we have actually tested wrap-around.
+ ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
+ ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
+}
+
+void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
+ double network_freeze_ms,
+ bool pull_audio_during_freeze,
+ int delay_tolerance_ms,
+ int max_time_to_speech_ms) {
+ uint16_t seq_no = 0;
+ uint32_t timestamp = 0;
+ const int kFrameSizeMs = 30;
+ const size_t kSamples = kFrameSizeMs * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ double next_input_time_ms = 0.0;
+ double t_ms;
+ bool muted;
+
+ // Insert speech for 5 seconds.
+ const int kSpeechDurationMs = 5000;
+ for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
+ // Each turn in this for loop is 10 ms.
+ while (next_input_time_ms <= t_ms) {
+ // Insert one 30 ms speech frame.
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ ++seq_no;
+ timestamp += kSamples;
+ next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
+ }
+ // Pull out data once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ }
+
+ EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+ absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
+ ASSERT_TRUE(playout_timestamp);
+ int32_t delay_before = timestamp - *playout_timestamp;
+
+ // Insert CNG for 1 minute (= 60000 ms).
+ const int kCngPeriodMs = 100;
+ const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
+ const int kCngDurationMs = 60000;
+ for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
+ // Each turn in this for loop is 10 ms.
+ while (next_input_time_ms <= t_ms) {
+ // Insert one CNG frame each 100 ms.
+ uint8_t payload[kPayloadBytes];
+ size_t payload_len;
+ RTPHeader rtp_info;
+ PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, payload_len)));
+ ++seq_no;
+ timestamp += kCngPeriodSamples;
+ next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
+ }
+ // Pull out data once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ }
+
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+
+ if (network_freeze_ms > 0) {
+ // First keep pulling audio for `network_freeze_ms` without inserting
+ // any data, then insert CNG data corresponding to `network_freeze_ms`
+ // without pulling any output audio.
+ const double loop_end_time = t_ms + network_freeze_ms;
+ for (; t_ms < loop_end_time; t_ms += 10) {
+ // Pull out data once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+ }
+ bool pull_once = pull_audio_during_freeze;
+ // If `pull_once` is true, GetAudio will be called once half-way through
+ // the network recovery period.
+ double pull_time_ms = (t_ms + next_input_time_ms) / 2;
+ while (next_input_time_ms <= t_ms) {
+ if (pull_once && next_input_time_ms >= pull_time_ms) {
+ pull_once = false;
+ // Pull out data once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+ t_ms += 10;
+ }
+ // Insert one CNG frame each 100 ms.
+ uint8_t payload[kPayloadBytes];
+ size_t payload_len;
+ RTPHeader rtp_info;
+ PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, payload_len)));
+ ++seq_no;
+ timestamp += kCngPeriodSamples;
+ next_input_time_ms += kCngPeriodMs * drift_factor;
+ }
+ }
+
+ // Insert speech again until output type is speech.
+ double speech_restart_time_ms = t_ms;
+ while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
+ // Each turn in this for loop is 10 ms.
+ while (next_input_time_ms <= t_ms) {
+ // Insert one 30 ms speech frame.
+ uint8_t payload[kPayloadBytes] = {0};
+ RTPHeader rtp_info;
+ PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
+ ++seq_no;
+ timestamp += kSamples;
+ next_input_time_ms += kFrameSizeMs * drift_factor;
+ }
+ // Pull out data once.
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+ // Increase clock.
+ t_ms += 10;
+ }
+
+ // Check that the speech starts again within reasonable time.
+ double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
+ EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
+ playout_timestamp = neteq_->GetPlayoutTimestamp();
+ ASSERT_TRUE(playout_timestamp);
+ int32_t delay_after = timestamp - *playout_timestamp;
+ // Compare delay before and after, and make sure it differs less than 20 ms.
+ EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
+ EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
+}
+
+void NetEqDecodingTestTwoInstances::SetUp() {
+ NetEqDecodingTest::SetUp();
+ config2_ = config_;
+}
+
+void NetEqDecodingTestTwoInstances::CreateSecondInstance() {
+ auto decoder_factory = CreateBuiltinAudioDecoderFactory();
+ neteq2_ =
+ DefaultNetEqFactory().CreateNetEq(config2_, decoder_factory, &clock_);
+ ASSERT_TRUE(neteq2_);
+ LoadDecoders(neteq2_.get());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.h b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.h
new file mode 100644
index 0000000000..456c397fdd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_decoding_test.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TEST_NETEQ_DECODING_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TEST_NETEQ_DECODING_TEST_H_
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/audio/audio_frame.h"
+#include "api/neteq/neteq.h"
+#include "api/rtp_headers.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class NetEqDecodingTest : public ::testing::Test {
+ protected:
+ // NetEQ must be polled for data once every 10 ms.
+ // Thus, none of the constants below can be changed.
+ static constexpr int kTimeStepMs = 10;
+ static constexpr size_t kBlockSize8kHz = kTimeStepMs * 8;
+ static constexpr size_t kBlockSize16kHz = kTimeStepMs * 16;
+ static constexpr size_t kBlockSize32kHz = kTimeStepMs * 32;
+ static constexpr size_t kBlockSize48kHz = kTimeStepMs * 48;
+ static constexpr int kInitSampleRateHz = 8000;
+
+ NetEqDecodingTest();
+ virtual void SetUp();
+ virtual void TearDown();
+ void OpenInputFile(absl::string_view rtp_file);
+ void Process();
+
+ void DecodeAndCompare(absl::string_view rtp_file,
+ absl::string_view output_checksum,
+ absl::string_view network_stats_checksum,
+ bool gen_ref);
+
+ static void PopulateRtpInfo(int frame_index,
+ int timestamp,
+ RTPHeader* rtp_info);
+ static void PopulateCng(int frame_index,
+ int timestamp,
+ RTPHeader* rtp_info,
+ uint8_t* payload,
+ size_t* payload_len);
+
+ void WrapTest(uint16_t start_seq_no,
+ uint32_t start_timestamp,
+ const std::set<uint16_t>& drop_seq_numbers,
+ bool expect_seq_no_wrap,
+ bool expect_timestamp_wrap);
+
+ void LongCngWithClockDrift(double drift_factor,
+ double network_freeze_ms,
+ bool pull_audio_during_freeze,
+ int delay_tolerance_ms,
+ int max_time_to_speech_ms);
+
+ SimulatedClock clock_;
+ std::unique_ptr<NetEq> neteq_;
+ NetEq::Config config_;
+ std::unique_ptr<test::RtpFileSource> rtp_source_;
+ std::unique_ptr<test::Packet> packet_;
+ AudioFrame out_frame_;
+ int output_sample_rate_;
+ int algorithmic_delay_ms_;
+};
+
+class NetEqDecodingTestTwoInstances : public NetEqDecodingTest {
+ public:
+ NetEqDecodingTestTwoInstances() : NetEqDecodingTest() {}
+
+ void SetUp() override;
+
+ void CreateSecondInstance();
+
+ protected:
+ std::unique_ptr<NetEq> neteq2_;
+ NetEq::Config config2_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TEST_NETEQ_DECODING_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
new file mode 100644
index 0000000000..1004141f16
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/testsupport/file_utils.h"
+
+ABSL_FLAG(int, frame_size_ms, 20, "Codec frame size (milliseconds).");
+
+using ::testing::InitGoogleTest;
+
+namespace webrtc {
+namespace test {
+namespace {
+static const int kInputSampleRateKhz = 8;
+static const int kOutputSampleRateKhz = 8;
+} // namespace
+
+class NetEqIlbcQualityTest : public NetEqQualityTest {
+ protected:
+ NetEqIlbcQualityTest()
+ : NetEqQualityTest(absl::GetFlag(FLAGS_frame_size_ms),
+ kInputSampleRateKhz,
+ kOutputSampleRateKhz,
+ SdpAudioFormat("ilbc", 8000, 1)) {
+ // Flag validation
+ RTC_CHECK(absl::GetFlag(FLAGS_frame_size_ms) == 20 ||
+ absl::GetFlag(FLAGS_frame_size_ms) == 30 ||
+ absl::GetFlag(FLAGS_frame_size_ms) == 40 ||
+ absl::GetFlag(FLAGS_frame_size_ms) == 60)
+ << "Invalid frame size, should be 20, 30, 40, or 60 ms.";
+ }
+
+ void SetUp() override {
+ ASSERT_EQ(1u, channels_) << "iLBC supports only mono audio.";
+ AudioEncoderIlbcConfig config;
+ config.frame_size_ms = absl::GetFlag(FLAGS_frame_size_ms);
+ encoder_.reset(new AudioEncoderIlbcImpl(config, 102));
+ NetEqQualityTest::SetUp();
+ }
+
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
+ size_t encoded_samples = 0;
+ uint32_t dummy_timestamp = 0;
+ AudioEncoder::EncodedInfo info;
+ do {
+ info = encoder_->Encode(dummy_timestamp,
+ rtc::ArrayView<const int16_t>(
+ in_data + encoded_samples, kFrameSizeSamples),
+ payload);
+ encoded_samples += kFrameSizeSamples;
+ } while (info.encoded_bytes == 0);
+ return rtc::checked_cast<int>(info.encoded_bytes);
+ }
+
+ private:
+ std::unique_ptr<AudioEncoderIlbcImpl> encoder_;
+};
+
+TEST_F(NetEqIlbcQualityTest, Test) {
+ Simulate();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
new file mode 100644
index 0000000000..6a096c307c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "absl/flags/flag.h"
+#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+
+ABSL_FLAG(int, bit_rate_kbps, 32, "Target bit rate (kbps).");
+
+using ::testing::InitGoogleTest;
+
+namespace webrtc {
+namespace test {
+namespace {
+static const int kIsacBlockDurationMs = 30;
+static const int kIsacInputSamplingKhz = 16;
+static const int kIsacOutputSamplingKhz = 16;
+} // namespace
+
+class NetEqIsacQualityTest : public NetEqQualityTest {
+ protected:
+ NetEqIsacQualityTest();
+ void SetUp() override;
+ void TearDown() override;
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override;
+
+ private:
+ ISACFIX_MainStruct* isac_encoder_;
+ int bit_rate_kbps_;
+};
+
+NetEqIsacQualityTest::NetEqIsacQualityTest()
+ : NetEqQualityTest(kIsacBlockDurationMs,
+ kIsacInputSamplingKhz,
+ kIsacOutputSamplingKhz,
+ SdpAudioFormat("isac", 16000, 1)),
+ isac_encoder_(NULL),
+ bit_rate_kbps_(absl::GetFlag(FLAGS_bit_rate_kbps)) {
+ // Flag validation
+ RTC_CHECK(absl::GetFlag(FLAGS_bit_rate_kbps) >= 10 &&
+ absl::GetFlag(FLAGS_bit_rate_kbps) <= 32)
+ << "Invalid bit rate, should be between 10 and 32 kbps.";
+}
+
+void NetEqIsacQualityTest::SetUp() {
+ ASSERT_EQ(1u, channels_) << "iSAC supports only mono audio.";
+ // Create encoder memory.
+ WebRtcIsacfix_Create(&isac_encoder_);
+ ASSERT_TRUE(isac_encoder_ != NULL);
+ EXPECT_EQ(0, WebRtcIsacfix_EncoderInit(isac_encoder_, 1));
+ // Set bitrate and block length.
+ EXPECT_EQ(0, WebRtcIsacfix_Control(isac_encoder_, bit_rate_kbps_ * 1000,
+ kIsacBlockDurationMs));
+ NetEqQualityTest::SetUp();
+}
+
+void NetEqIsacQualityTest::TearDown() {
+ // Free memory.
+ EXPECT_EQ(0, WebRtcIsacfix_Free(isac_encoder_));
+ NetEqQualityTest::TearDown();
+}
+
+int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) {
+ // ISAC takes 10 ms for every call.
+ const int subblocks = kIsacBlockDurationMs / 10;
+ const int subblock_length = 10 * kIsacInputSamplingKhz;
+ int value = 0;
+
+ int pointer = 0;
+ for (int idx = 0; idx < subblocks; idx++, pointer += subblock_length) {
+ // The Isac encoder does not perform encoding (and returns 0) until it
+ // receives a sequence of sub-blocks that amount to the frame duration.
+ EXPECT_EQ(0, value);
+ payload->AppendData(max_bytes, [&](rtc::ArrayView<uint8_t> payload) {
+ value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer],
+ payload.data());
+ return (value >= 0) ? static_cast<size_t>(value) : 0;
+ });
+ }
+ EXPECT_GT(value, 0);
+ return value;
+}
+
+TEST_F(NetEqIsacQualityTest, Test) {
+ Simulate();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
new file mode 100644
index 0000000000..5a2df24ef6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "absl/flags/flag.h"
+#include "modules/audio_coding/codecs/opus/opus_inst.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+
+ABSL_FLAG(int, bit_rate_kbps, 32, "Target bit rate (kbps).");
+
+ABSL_FLAG(int,
+ complexity,
+ 10,
+ "Complexity: 0 ~ 10 -- defined as in Opus"
+ "specification.");
+
+ABSL_FLAG(int, maxplaybackrate, 48000, "Maximum playback rate (Hz).");
+
+ABSL_FLAG(int, application, 0, "Application mode: 0 -- VOIP, 1 -- Audio.");
+
+ABSL_FLAG(int, reported_loss_rate, 10, "Reported percentile of packet loss.");
+
+ABSL_FLAG(bool, fec, false, "Enable FEC for encoding (-nofec to disable).");
+
+ABSL_FLAG(bool, dtx, false, "Enable DTX for encoding (-nodtx to disable).");
+
+ABSL_FLAG(int, sub_packets, 1, "Number of sub packets to repacketize.");
+
+using ::testing::InitGoogleTest;
+
+namespace webrtc {
+namespace test {
+namespace {
+
+static const int kOpusBlockDurationMs = 20;
+static const int kOpusSamplingKhz = 48;
+} // namespace
+
+class NetEqOpusQualityTest : public NetEqQualityTest {
+ protected:
+ NetEqOpusQualityTest();
+ void SetUp() override;
+ void TearDown() override;
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override;
+
+ private:
+ WebRtcOpusEncInst* opus_encoder_;
+ OpusRepacketizer* repacketizer_;
+ size_t sub_block_size_samples_;
+ int bit_rate_kbps_;
+ bool fec_;
+ bool dtx_;
+ int complexity_;
+ int maxplaybackrate_;
+ int target_loss_rate_;
+ int sub_packets_;
+ int application_;
+};
+
+NetEqOpusQualityTest::NetEqOpusQualityTest()
+ : NetEqQualityTest(kOpusBlockDurationMs * absl::GetFlag(FLAGS_sub_packets),
+ kOpusSamplingKhz,
+ kOpusSamplingKhz,
+ SdpAudioFormat("opus", 48000, 2)),
+ opus_encoder_(NULL),
+ repacketizer_(NULL),
+ sub_block_size_samples_(
+ static_cast<size_t>(kOpusBlockDurationMs * kOpusSamplingKhz)),
+ bit_rate_kbps_(absl::GetFlag(FLAGS_bit_rate_kbps)),
+ fec_(absl::GetFlag(FLAGS_fec)),
+ dtx_(absl::GetFlag(FLAGS_dtx)),
+ complexity_(absl::GetFlag(FLAGS_complexity)),
+ maxplaybackrate_(absl::GetFlag(FLAGS_maxplaybackrate)),
+ target_loss_rate_(absl::GetFlag(FLAGS_reported_loss_rate)),
+ sub_packets_(absl::GetFlag(FLAGS_sub_packets)) {
+ // Flag validation
+ RTC_CHECK(absl::GetFlag(FLAGS_bit_rate_kbps) >= 6 &&
+ absl::GetFlag(FLAGS_bit_rate_kbps) <= 510)
+ << "Invalid bit rate, should be between 6 and 510 kbps.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_complexity) >= -1 &&
+ absl::GetFlag(FLAGS_complexity) <= 10)
+ << "Invalid complexity setting, should be between 0 and 10.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_application) == 0 ||
+ absl::GetFlag(FLAGS_application) == 1)
+ << "Invalid application mode, should be 0 or 1.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_reported_loss_rate) >= 0 &&
+ absl::GetFlag(FLAGS_reported_loss_rate) <= 100)
+ << "Invalid packet loss percentile, should be between 0 and 100.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_sub_packets) >= 1 &&
+ absl::GetFlag(FLAGS_sub_packets) <= 3)
+ << "Invalid number of sub packets, should be between 1 and 3.";
+
+ // Redefine decoder type if input is stereo.
+ if (channels_ > 1) {
+ audio_format_ = SdpAudioFormat("opus", 48000, 2,
+ SdpAudioFormat::Parameters{{"stereo", "1"}});
+ }
+ application_ = absl::GetFlag(FLAGS_application);
+}
+
+void NetEqOpusQualityTest::SetUp() {
+ // Create encoder memory.
+ WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, application_, 48000);
+ ASSERT_TRUE(opus_encoder_);
+
+ // Create repacketizer.
+ repacketizer_ = opus_repacketizer_create();
+ ASSERT_TRUE(repacketizer_);
+
+ // Set bitrate.
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_kbps_ * 1000));
+ if (fec_) {
+ EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_encoder_));
+ }
+ if (dtx_) {
+ EXPECT_EQ(0, WebRtcOpus_EnableDtx(opus_encoder_));
+ }
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, complexity_));
+ EXPECT_EQ(0, WebRtcOpus_SetMaxPlaybackRate(opus_encoder_, maxplaybackrate_));
+ EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_, target_loss_rate_));
+ NetEqQualityTest::SetUp();
+}
+
+void NetEqOpusQualityTest::TearDown() {
+ // Free memory.
+ EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+ opus_repacketizer_destroy(repacketizer_);
+ NetEqQualityTest::TearDown();
+}
+
+int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) {
+ EXPECT_EQ(block_size_samples, sub_block_size_samples_ * sub_packets_);
+ int16_t* pointer = in_data;
+ int value;
+ opus_repacketizer_init(repacketizer_);
+ for (int idx = 0; idx < sub_packets_; idx++) {
+ payload->AppendData(max_bytes, [&](rtc::ArrayView<uint8_t> payload) {
+ value = WebRtcOpus_Encode(opus_encoder_, pointer, sub_block_size_samples_,
+ max_bytes, payload.data());
+
+ Log() << "Encoded a frame with Opus mode "
+ << (value == 0 ? 0 : payload[0] >> 3) << std::endl;
+
+ return (value >= 0) ? static_cast<size_t>(value) : 0;
+ });
+
+ if (OPUS_OK !=
+ opus_repacketizer_cat(repacketizer_, payload->data(), value)) {
+ opus_repacketizer_init(repacketizer_);
+ // If the repacketization fails, we discard this frame.
+ return 0;
+ }
+ pointer += sub_block_size_samples_ * channels_;
+ }
+ value = opus_repacketizer_out(repacketizer_, payload->data(),
+ static_cast<opus_int32>(max_bytes));
+ EXPECT_GE(value, 0);
+ return value;
+}
+
+TEST_F(NetEqOpusQualityTest, Test) {
+ Simulate();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcm16b_quality_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcm16b_quality_test.cc
new file mode 100644
index 0000000000..c3e160cb66
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcm16b_quality_test.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/testsupport/file_utils.h"
+
+ABSL_FLAG(int, frame_size_ms, 20, "Codec frame size (milliseconds).");
+
+using ::testing::InitGoogleTest;
+
+namespace webrtc {
+namespace test {
+namespace {
+static const int kInputSampleRateKhz = 48;
+static const int kOutputSampleRateKhz = 48;
+} // namespace
+
+class NetEqPcm16bQualityTest : public NetEqQualityTest {
+ protected:
+ NetEqPcm16bQualityTest()
+ : NetEqQualityTest(absl::GetFlag(FLAGS_frame_size_ms),
+ kInputSampleRateKhz,
+ kOutputSampleRateKhz,
+ SdpAudioFormat("l16", 48000, 1)) {
+ // Flag validation
+ RTC_CHECK(absl::GetFlag(FLAGS_frame_size_ms) >= 10 &&
+ absl::GetFlag(FLAGS_frame_size_ms) <= 60 &&
+ (absl::GetFlag(FLAGS_frame_size_ms) % 10) == 0)
+ << "Invalid frame size, should be 10, 20, ..., 60 ms.";
+ }
+
+ void SetUp() override {
+ AudioEncoderPcm16B::Config config;
+ config.frame_size_ms = absl::GetFlag(FLAGS_frame_size_ms);
+ config.sample_rate_hz = 48000;
+ config.num_channels = channels_;
+ encoder_.reset(new AudioEncoderPcm16B(config));
+ NetEqQualityTest::SetUp();
+ }
+
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 480; // Samples per 10 ms.
+ size_t encoded_samples = 0;
+ uint32_t dummy_timestamp = 0;
+ AudioEncoder::EncodedInfo info;
+ do {
+ info = encoder_->Encode(dummy_timestamp,
+ rtc::ArrayView<const int16_t>(
+ in_data + encoded_samples, kFrameSizeSamples),
+ payload);
+ encoded_samples += kFrameSizeSamples;
+ } while (info.encoded_bytes == 0);
+ return rtc::checked_cast<int>(info.encoded_bytes);
+ }
+
+ private:
+ std::unique_ptr<AudioEncoderPcm16B> encoder_;
+};
+
+TEST_F(NetEqPcm16bQualityTest, Test) {
+ Simulate();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
new file mode 100644
index 0000000000..d22170c623
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/testsupport/file_utils.h"
+
+ABSL_FLAG(int, frame_size_ms, 20, "Codec frame size (milliseconds).");
+
+using ::testing::InitGoogleTest;
+
+namespace webrtc {
+namespace test {
+namespace {
+static const int kInputSampleRateKhz = 8;
+static const int kOutputSampleRateKhz = 8;
+} // namespace
+
+class NetEqPcmuQualityTest : public NetEqQualityTest {
+ protected:
+ NetEqPcmuQualityTest()
+ : NetEqQualityTest(absl::GetFlag(FLAGS_frame_size_ms),
+ kInputSampleRateKhz,
+ kOutputSampleRateKhz,
+ SdpAudioFormat("pcmu", 8000, 1)) {
+ // Flag validation
+ RTC_CHECK(absl::GetFlag(FLAGS_frame_size_ms) >= 10 &&
+ absl::GetFlag(FLAGS_frame_size_ms) <= 60 &&
+ (absl::GetFlag(FLAGS_frame_size_ms) % 10) == 0)
+ << "Invalid frame size, should be 10, 20, ..., 60 ms.";
+ }
+
+ void SetUp() override {
+ ASSERT_EQ(1u, channels_) << "PCMu supports only mono audio.";
+ AudioEncoderPcmU::Config config;
+ config.frame_size_ms = absl::GetFlag(FLAGS_frame_size_ms);
+ encoder_.reset(new AudioEncoderPcmU(config));
+ NetEqQualityTest::SetUp();
+ }
+
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
+ size_t encoded_samples = 0;
+ uint32_t dummy_timestamp = 0;
+ AudioEncoder::EncodedInfo info;
+ do {
+ info = encoder_->Encode(dummy_timestamp,
+ rtc::ArrayView<const int16_t>(
+ in_data + encoded_samples, kFrameSizeSamples),
+ payload);
+ encoded_samples += kFrameSizeSamples;
+ } while (info.encoded_bytes == 0);
+ return rtc::checked_cast<int>(info.encoded_bytes);
+ }
+
+ private:
+ std::unique_ptr<AudioEncoderPcmU> encoder_;
+};
+
+TEST_F(NetEqPcmuQualityTest, Test) {
+ Simulate();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_performance_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_performance_unittest.cc
new file mode 100644
index 0000000000..c06772af26
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_performance_unittest.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_performance_test.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/perf_test.h"
+
+// Runs a test with 10% packet losses and 10% clock drift, to exercise
+// both loss concealment and time-stretching code.
+TEST(NetEqPerformanceTest, 10_Pl_10_Drift) {
+ const int kSimulationTimeMs = 10000000;
+ const int kQuickSimulationTimeMs = 100000;
+ const int kLossPeriod = 10; // Drop every 10th packet.
+ const double kDriftFactor = 0.1;
+ int64_t runtime = webrtc::test::NetEqPerformanceTest::Run(
+ webrtc::field_trial::IsEnabled("WebRTC-QuickPerfTest")
+ ? kQuickSimulationTimeMs
+ : kSimulationTimeMs,
+ kLossPeriod, kDriftFactor);
+ ASSERT_GT(runtime, 0);
+ webrtc::test::PrintResult("neteq_performance", "", "10_pl_10_drift", runtime,
+ "ms", true);
+}
+
+// Runs a test with neither packet losses nor clock drift, to put
+// emphasis on the "good-weather" code path, which is presumably much
+// more lightweight.
+TEST(NetEqPerformanceTest, 0_Pl_0_Drift) {
+ const int kSimulationTimeMs = 10000000;
+ const int kQuickSimulationTimeMs = 100000;
+ const int kLossPeriod = 0; // No losses.
+ const double kDriftFactor = 0.0; // No clock drift.
+ int64_t runtime = webrtc::test::NetEqPerformanceTest::Run(
+ webrtc::field_trial::IsEnabled("WebRTC-QuickPerfTest")
+ ? kQuickSimulationTimeMs
+ : kSimulationTimeMs,
+ kLossPeriod, kDriftFactor);
+ ASSERT_GT(runtime, 0);
+ webrtc::test::PrintResult("neteq_performance", "", "0_pl_0_drift", runtime,
+ "ms", true);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc
new file mode 100644
index 0000000000..a72b2009eb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <iostream>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "modules/audio_coding/neteq/tools/neteq_performance_test.h"
+#include "rtc_base/checks.h"
+
+// Define command line flags.
+ABSL_FLAG(int, runtime_ms, 10000, "Simulated runtime in ms.");
+ABSL_FLAG(int, lossrate, 10, "Packet lossrate; drop every N packets.");
+ABSL_FLAG(float, drift, 0.1f, "Clockdrift factor.");
+
+int main(int argc, char* argv[]) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ std::string program_name = args[0];
+ std::string usage =
+ "Tool for measuring the speed of NetEq.\n"
+ "Usage: " +
+ program_name +
+ " [options]\n\n"
+ " --runtime_ms=N runtime in ms; default is 10000 ms\n"
+ " --lossrate=N drop every N packets; default is 10\n"
+ " --drift=F clockdrift factor between 0.0 and 1.0; "
+ "default is 0.1\n";
+ if (args.size() != 1) {
+ printf("%s", usage.c_str());
+ return 1;
+ }
+ RTC_CHECK_GT(absl::GetFlag(FLAGS_runtime_ms), 0);
+ RTC_CHECK_GE(absl::GetFlag(FLAGS_lossrate), 0);
+ RTC_CHECK(absl::GetFlag(FLAGS_drift) >= 0.0 &&
+ absl::GetFlag(FLAGS_drift) < 1.0);
+
+ int64_t result = webrtc::test::NetEqPerformanceTest::Run(
+ absl::GetFlag(FLAGS_runtime_ms), absl::GetFlag(FLAGS_lossrate),
+ absl::GetFlag(FLAGS_drift));
+ if (result <= 0) {
+ std::cout << "There was an error" << std::endl;
+ return -1;
+ }
+
+ std::cout << "Simulation done" << std::endl;
+ std::cout << "Runtime = " << result << " ms" << std::endl;
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.cc b/third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.cc
new file mode 100644
index 0000000000..f5d50dc859
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/test/result_sink.h"
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/message_digest.h"
+#include "rtc_base/string_encode.h"
+#include "test/gtest.h"
+
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
+#else
+#include "modules/audio_coding/neteq/neteq_unittest.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+void Convert(const webrtc::NetEqNetworkStatistics& stats_raw,
+ webrtc::neteq_unittest::NetEqNetworkStatistics* stats) {
+ stats->set_current_buffer_size_ms(stats_raw.current_buffer_size_ms);
+ stats->set_preferred_buffer_size_ms(stats_raw.preferred_buffer_size_ms);
+ stats->set_jitter_peaks_found(stats_raw.jitter_peaks_found);
+ stats->set_expand_rate(stats_raw.expand_rate);
+ stats->set_speech_expand_rate(stats_raw.speech_expand_rate);
+ stats->set_preemptive_rate(stats_raw.preemptive_rate);
+ stats->set_accelerate_rate(stats_raw.accelerate_rate);
+ stats->set_secondary_decoded_rate(stats_raw.secondary_decoded_rate);
+ stats->set_secondary_discarded_rate(stats_raw.secondary_discarded_rate);
+ stats->set_mean_waiting_time_ms(stats_raw.mean_waiting_time_ms);
+ stats->set_median_waiting_time_ms(stats_raw.median_waiting_time_ms);
+ stats->set_min_waiting_time_ms(stats_raw.min_waiting_time_ms);
+ stats->set_max_waiting_time_ms(stats_raw.max_waiting_time_ms);
+}
+
+void AddMessage(FILE* file,
+ rtc::MessageDigest* digest,
+ absl::string_view message) {
+ int32_t size = message.length();
+ if (file)
+ ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
+ digest->Update(&size, sizeof(size));
+
+ if (file)
+ ASSERT_EQ(static_cast<size_t>(size),
+ fwrite(message.data(), sizeof(char), size, file));
+ digest->Update(message.data(), sizeof(char) * size);
+}
+
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
+
+ResultSink::ResultSink(absl::string_view output_file)
+ : output_fp_(nullptr),
+ digest_(rtc::MessageDigestFactory::Create(rtc::DIGEST_SHA_1)) {
+ if (!output_file.empty()) {
+ output_fp_ = fopen(std::string(output_file).c_str(), "wb");
+ EXPECT_TRUE(output_fp_ != NULL);
+ }
+}
+
+ResultSink::~ResultSink() {
+ if (output_fp_)
+ fclose(output_fp_);
+}
+
+void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) {
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+ neteq_unittest::NetEqNetworkStatistics stats;
+ Convert(stats_raw, &stats);
+
+ std::string stats_string;
+ ASSERT_TRUE(stats.SerializeToString(&stats_string));
+ AddMessage(output_fp_, digest_.get(), stats_string);
+#else
+ FAIL() << "Writing to reference file requires Proto Buffer.";
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
+}
+
+void ResultSink::VerifyChecksum(absl::string_view checksum) {
+ std::string buffer;
+ buffer.resize(digest_->Size());
+ digest_->Finish(buffer.data(), buffer.size());
+ const std::string result = rtc::hex_encode(buffer);
+ if (checksum.size() == result.size()) {
+ EXPECT_EQ(checksum, result);
+ } else {
+ // Check result is one the '|'-separated checksums.
+ EXPECT_NE(checksum.find(result), absl::string_view::npos)
+ << result << " should be one of these:\n"
+ << checksum;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.h b/third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.h
new file mode 100644
index 0000000000..c6923d7a7f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/test/result_sink.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TEST_RESULT_SINK_H_
+#define MODULES_AUDIO_CODING_NETEQ_TEST_RESULT_SINK_H_
+
+#include <cstdio>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/neteq/neteq.h"
+#include "rtc_base/message_digest.h"
+
+namespace webrtc {
+
+class ResultSink {
+ public:
+ explicit ResultSink(absl::string_view output_file);
+ ~ResultSink();
+
+ template <typename T>
+ void AddResult(const T* test_results, size_t length);
+
+ void AddResult(const NetEqNetworkStatistics& stats);
+
+ void VerifyChecksum(absl::string_view ref_check_sum);
+
+ private:
+ FILE* output_fp_;
+ std::unique_ptr<rtc::MessageDigest> digest_;
+};
+
+template <typename T>
+void ResultSink::AddResult(const T* test_results, size_t length) {
+ if (output_fp_) {
+ ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_));
+ }
+ digest_->Update(test_results, sizeof(T) * length);
+}
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TEST_RESULT_SINK_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.cc b/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.cc
new file mode 100644
index 0000000000..b89be0608d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.cc
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/time_stretch.h"
+
+#include <algorithm> // min, max
+#include <memory>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
+ size_t input_len,
+ bool fast_mode,
+ AudioMultiVector* output,
+ size_t* length_change_samples) {
+ // Pre-calculate common multiplication with `fs_mult_`.
+ size_t fs_mult_120 =
+ static_cast<size_t>(fs_mult_ * 120); // Corresponds to 15 ms.
+
+ const int16_t* signal;
+ std::unique_ptr<int16_t[]> signal_array;
+ size_t signal_len;
+ if (num_channels_ == 1) {
+ signal = input;
+ signal_len = input_len;
+ } else {
+ // We want `signal` to be only the first channel of `input`, which is
+ // interleaved. Thus, we take the first sample, skip forward `num_channels`
+ // samples, and continue like that.
+ signal_len = input_len / num_channels_;
+ signal_array.reset(new int16_t[signal_len]);
+ signal = signal_array.get();
+ size_t j = kRefChannel;
+ for (size_t i = 0; i < signal_len; ++i) {
+ signal_array[i] = input[j];
+ j += num_channels_;
+ }
+ }
+
+ // Find maximum absolute value of input signal.
+ max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len);
+
+ // Downsample to 4 kHz sample rate and calculate auto-correlation.
+ DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen,
+ sample_rate_hz_, true /* compensate delay*/,
+ downsampled_input_);
+ AutoCorrelation();
+
+ // Find the strongest correlation peak.
+ static const size_t kNumPeaks = 1;
+ size_t peak_index;
+ int16_t peak_value;
+ DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks,
+ fs_mult_, &peak_index, &peak_value);
+ // Assert that `peak_index` stays within boundaries.
+ RTC_DCHECK_LE(peak_index, (2 * kCorrelationLen - 1) * fs_mult_);
+
+ // Compensate peak_index for displaced starting position. The displacement
+ // happens in AutoCorrelation(). Here, `kMinLag` is in the down-sampled 4 kHz
+ // domain, while the `peak_index` is in the original sample rate; hence, the
+ // multiplication by fs_mult_ * 2.
+ peak_index += kMinLag * fs_mult_ * 2;
+ // Assert that `peak_index` stays within boundaries.
+ RTC_DCHECK_GE(peak_index, static_cast<size_t>(20 * fs_mult_));
+ RTC_DCHECK_LE(peak_index,
+ 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_);
+
+ // Calculate scaling to ensure that `peak_index` samples can be square-summed
+ // without overflowing.
+ int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
+ WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
+ scaling = std::max(0, scaling);
+
+ // `vec1` starts at 15 ms minus one pitch period.
+ const int16_t* vec1 = &signal[fs_mult_120 - peak_index];
+ // `vec2` start at 15 ms.
+ const int16_t* vec2 = &signal[fs_mult_120];
+ // Calculate energies for `vec1` and `vec2`, assuming they both contain
+ // `peak_index` samples.
+ int32_t vec1_energy =
+ WebRtcSpl_DotProductWithScale(vec1, vec1, peak_index, scaling);
+ int32_t vec2_energy =
+ WebRtcSpl_DotProductWithScale(vec2, vec2, peak_index, scaling);
+
+ // Calculate cross-correlation between `vec1` and `vec2`.
+ int32_t cross_corr =
+ WebRtcSpl_DotProductWithScale(vec1, vec2, peak_index, scaling);
+
+ // Check if the signal seems to be active speech or not (simple VAD).
+ bool active_speech =
+ SpeechDetection(vec1_energy, vec2_energy, peak_index, scaling);
+
+ int16_t best_correlation;
+ if (!active_speech) {
+ SetParametersForPassiveSpeech(signal_len, &best_correlation, &peak_index);
+ } else {
+ // Calculate correlation:
+ // cross_corr / sqrt(vec1_energy * vec2_energy).
+
+ // Start with calculating scale values.
+ int energy1_scale = std::max(0, 16 - WebRtcSpl_NormW32(vec1_energy));
+ int energy2_scale = std::max(0, 16 - WebRtcSpl_NormW32(vec2_energy));
+
+ // Make sure total scaling is even (to simplify scale factor after sqrt).
+ if ((energy1_scale + energy2_scale) & 1) {
+ // The sum is odd.
+ energy1_scale += 1;
+ }
+
+ // Scale energies to int16_t.
+ int16_t vec1_energy_int16 =
+ static_cast<int16_t>(vec1_energy >> energy1_scale);
+ int16_t vec2_energy_int16 =
+ static_cast<int16_t>(vec2_energy >> energy2_scale);
+
+ // Calculate square-root of energy product.
+ int16_t sqrt_energy_prod =
+ WebRtcSpl_SqrtFloor(vec1_energy_int16 * vec2_energy_int16);
+
+ // Calculate cross_corr / sqrt(en1*en2) in Q14.
+ int temp_scale = 14 - (energy1_scale + energy2_scale) / 2;
+ cross_corr = WEBRTC_SPL_SHIFT_W32(cross_corr, temp_scale);
+ cross_corr = std::max(0, cross_corr); // Don't use if negative.
+ best_correlation = WebRtcSpl_DivW32W16(cross_corr, sqrt_energy_prod);
+ // Make sure `best_correlation` is no larger than 1 in Q14.
+ best_correlation = std::min(static_cast<int16_t>(16384), best_correlation);
+ }
+
+ // Check accelerate criteria and stretch the signal.
+ ReturnCodes return_value =
+ CheckCriteriaAndStretch(input, input_len, peak_index, best_correlation,
+ active_speech, fast_mode, output);
+ switch (return_value) {
+ case kSuccess:
+ *length_change_samples = peak_index;
+ break;
+ case kSuccessLowEnergy:
+ *length_change_samples = peak_index;
+ break;
+ case kNoStretch:
+ case kError:
+ *length_change_samples = 0;
+ break;
+ }
+ return return_value;
+}
+
+void TimeStretch::AutoCorrelation() {
+ // Calculate correlation from lag kMinLag to lag kMaxLag in 4 kHz domain.
+ int32_t auto_corr[kCorrelationLen];
+ CrossCorrelationWithAutoShift(
+ &downsampled_input_[kMaxLag], &downsampled_input_[kMaxLag - kMinLag],
+ kCorrelationLen, kMaxLag - kMinLag, -1, auto_corr);
+
+ // Normalize correlation to 14 bits and write to `auto_correlation_`.
+ int32_t max_corr = WebRtcSpl_MaxAbsValueW32(auto_corr, kCorrelationLen);
+ int scaling = std::max(0, 17 - WebRtcSpl_NormW32(max_corr));
+ WebRtcSpl_VectorBitShiftW32ToW16(auto_correlation_, kCorrelationLen,
+ auto_corr, scaling);
+}
+
+bool TimeStretch::SpeechDetection(int32_t vec1_energy,
+ int32_t vec2_energy,
+ size_t peak_index,
+ int scaling) const {
+ // Check if the signal seems to be active speech or not (simple VAD).
+ // If (vec1_energy + vec2_energy) / (2 * peak_index) <=
+ // 8 * background_noise_energy, then we say that the signal contains no
+ // active speech.
+ // Rewrite the inequality as:
+ // (vec1_energy + vec2_energy) / 16 <= peak_index * background_noise_energy.
+ // The two sides of the inequality will be denoted `left_side` and
+ // `right_side`.
+ int32_t left_side = rtc::saturated_cast<int32_t>(
+ (static_cast<int64_t>(vec1_energy) + vec2_energy) / 16);
+ int32_t right_side;
+ if (background_noise_.initialized()) {
+ right_side = background_noise_.Energy(kRefChannel);
+ } else {
+ // If noise parameters have not been estimated, use a fixed threshold.
+ right_side = 75000;
+ }
+ int right_scale = 16 - WebRtcSpl_NormW32(right_side);
+ right_scale = std::max(0, right_scale);
+ left_side = left_side >> right_scale;
+ right_side =
+ rtc::dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
+
+ // Scale `left_side` properly before comparing with `right_side`.
+ // (`scaling` is the scale factor before energy calculation, thus the scale
+ // factor for the energy is 2 * scaling.)
+ if (WebRtcSpl_NormW32(left_side) < 2 * scaling) {
+ // Cannot scale only `left_side`, must scale `right_side` too.
+ int temp_scale = WebRtcSpl_NormW32(left_side);
+ left_side = left_side << temp_scale;
+ right_side = right_side >> (2 * scaling - temp_scale);
+ } else {
+ left_side = left_side << 2 * scaling;
+ }
+ return left_side > right_side;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.h b/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.h
new file mode 100644
index 0000000000..f0ddaebeca
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_
+#define MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_
+
+#include <string.h> // memset, size_t
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class BackgroundNoise;
+
+// This is the base class for Accelerate and PreemptiveExpand. This class
+// cannot be instantiated, but must be used through either of the derived
+// classes.
+class TimeStretch {
+ public:
+ enum ReturnCodes {
+ kSuccess = 0,
+ kSuccessLowEnergy = 1,
+ kNoStretch = 2,
+ kError = -1
+ };
+
+ TimeStretch(int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise)
+ : sample_rate_hz_(sample_rate_hz),
+ fs_mult_(sample_rate_hz / 8000),
+ num_channels_(num_channels),
+ background_noise_(background_noise),
+ max_input_value_(0) {
+ RTC_DCHECK(sample_rate_hz_ == 8000 || sample_rate_hz_ == 16000 ||
+ sample_rate_hz_ == 32000 || sample_rate_hz_ == 48000);
+ RTC_DCHECK_GT(num_channels_, 0);
+ memset(auto_correlation_, 0, sizeof(auto_correlation_));
+ }
+
+ virtual ~TimeStretch() {}
+
+ TimeStretch(const TimeStretch&) = delete;
+ TimeStretch& operator=(const TimeStretch&) = delete;
+
+ // This method performs the processing common to both Accelerate and
+ // PreemptiveExpand.
+ ReturnCodes Process(const int16_t* input,
+ size_t input_len,
+ bool fast_mode,
+ AudioMultiVector* output,
+ size_t* length_change_samples);
+
+ protected:
+ // Sets the parameters `best_correlation` and `peak_index` to suitable
+ // values when the signal contains no active speech. This method must be
+ // implemented by the sub-classes.
+ virtual void SetParametersForPassiveSpeech(size_t input_length,
+ int16_t* best_correlation,
+ size_t* peak_index) const = 0;
+
+ // Checks the criteria for performing the time-stretching operation and,
+ // if possible, performs the time-stretching. This method must be implemented
+ // by the sub-classes.
+ virtual ReturnCodes CheckCriteriaAndStretch(
+ const int16_t* input,
+ size_t input_length,
+ size_t peak_index,
+ int16_t best_correlation,
+ bool active_speech,
+ bool fast_mode,
+ AudioMultiVector* output) const = 0;
+
+ static const size_t kCorrelationLen = 50;
+ static const size_t kLogCorrelationLen = 6; // >= log2(kCorrelationLen).
+ static const size_t kMinLag = 10;
+ static const size_t kMaxLag = 60;
+ static const size_t kDownsampledLen = kCorrelationLen + kMaxLag;
+ static const int kCorrelationThreshold = 14746; // 0.9 in Q14.
+ static constexpr size_t kRefChannel = 0; // First channel is reference.
+
+ const int sample_rate_hz_;
+ const int fs_mult_; // Sample rate multiplier = sample_rate_hz_ / 8000.
+ const size_t num_channels_;
+ const BackgroundNoise& background_noise_;
+ int16_t max_input_value_;
+ int16_t downsampled_input_[kDownsampledLen];
+ // Adding 1 to the size of `auto_correlation_` because of how it is used
+ // by the peak-detection algorithm.
+ int16_t auto_correlation_[kCorrelationLen + 1];
+
+ private:
+ // Calculates the auto-correlation of `downsampled_input_` and writes the
+ // result to `auto_correlation_`.
+ void AutoCorrelation();
+
+ // Performs a simple voice-activity detection based on the input parameters.
+ bool SpeechDetection(int32_t vec1_energy,
+ int32_t vec2_energy,
+ size_t peak_index,
+ int scaling) const;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
new file mode 100644
index 0000000000..da3a98229a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Accelerate and PreemptiveExpand classes.
+
+#include <map>
+#include <memory>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/accelerate.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+const size_t kNumChannels = 1;
+}
+
+TEST(TimeStretch, CreateAndDestroy) {
+ const int kSampleRate = 8000;
+ const int kOverlapSamples = 5 * kSampleRate / 8000;
+ BackgroundNoise bgn(kNumChannels);
+ Accelerate accelerate(kSampleRate, kNumChannels, bgn);
+ PreemptiveExpand preemptive_expand(kSampleRate, kNumChannels, bgn,
+ kOverlapSamples);
+}
+
+TEST(TimeStretch, CreateUsingFactory) {
+ const int kSampleRate = 8000;
+ const int kOverlapSamples = 5 * kSampleRate / 8000;
+ BackgroundNoise bgn(kNumChannels);
+
+ AccelerateFactory accelerate_factory;
+ Accelerate* accelerate =
+ accelerate_factory.Create(kSampleRate, kNumChannels, bgn);
+ EXPECT_TRUE(accelerate != NULL);
+ delete accelerate;
+
+ PreemptiveExpandFactory preemptive_expand_factory;
+ PreemptiveExpand* preemptive_expand = preemptive_expand_factory.Create(
+ kSampleRate, kNumChannels, bgn, kOverlapSamples);
+ EXPECT_TRUE(preemptive_expand != NULL);
+ delete preemptive_expand;
+}
+
+class TimeStretchTest : public ::testing::Test {
+ protected:
+ TimeStretchTest()
+ : input_file_(new test::InputAudioFile(
+ test::ResourcePath("audio_coding/testfile32kHz", "pcm"))),
+ sample_rate_hz_(32000),
+ block_size_(30 * sample_rate_hz_ / 1000), // 30 ms
+ audio_(new int16_t[block_size_]),
+ background_noise_(kNumChannels) {}
+
+ const int16_t* Next30Ms() {
+ RTC_CHECK(input_file_->Read(block_size_, audio_.get()));
+ return audio_.get();
+ }
+
+ // Returns the total length change (in samples) that the accelerate operation
+ // resulted in during the run.
+ size_t TestAccelerate(size_t loops, bool fast_mode) {
+ Accelerate accelerate(sample_rate_hz_, kNumChannels, background_noise_);
+ size_t total_length_change = 0;
+ for (size_t i = 0; i < loops; ++i) {
+ AudioMultiVector output(kNumChannels);
+ size_t length_change;
+ UpdateReturnStats(accelerate.Process(Next30Ms(), block_size_, fast_mode,
+ &output, &length_change));
+ total_length_change += length_change;
+ }
+ return total_length_change;
+ }
+
+ void UpdateReturnStats(TimeStretch::ReturnCodes ret) {
+ switch (ret) {
+ case TimeStretch::kSuccess:
+ case TimeStretch::kSuccessLowEnergy:
+ case TimeStretch::kNoStretch:
+ ++return_stats_[ret];
+ break;
+ case TimeStretch::kError:
+ FAIL() << "Process returned an error";
+ }
+ }
+
+ std::unique_ptr<test::InputAudioFile> input_file_;
+ const int sample_rate_hz_;
+ const size_t block_size_;
+ std::unique_ptr<int16_t[]> audio_;
+ std::map<TimeStretch::ReturnCodes, int> return_stats_;
+ BackgroundNoise background_noise_;
+};
+
+TEST_F(TimeStretchTest, Accelerate) {
+ // TestAccelerate returns the total length change in samples.
+ EXPECT_EQ(15268U, TestAccelerate(100, false));
+ EXPECT_EQ(9, return_stats_[TimeStretch::kSuccess]);
+ EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
+ EXPECT_EQ(33, return_stats_[TimeStretch::kNoStretch]);
+}
+
+TEST_F(TimeStretchTest, AccelerateFastMode) {
+ // TestAccelerate returns the total length change in samples.
+ EXPECT_EQ(21400U, TestAccelerate(100, true));
+ EXPECT_EQ(31, return_stats_[TimeStretch::kSuccess]);
+ EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
+ EXPECT_EQ(11, return_stats_[TimeStretch::kNoStretch]);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.cc b/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.cc
new file mode 100644
index 0000000000..59177d027f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+
+#include "api/audio_codecs/audio_format.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void TimestampScaler::Reset() {
+ first_packet_received_ = false;
+}
+
+void TimestampScaler::ToInternal(Packet* packet) {
+ if (!packet) {
+ return;
+ }
+ packet->timestamp = ToInternal(packet->timestamp, packet->payload_type);
+}
+
+void TimestampScaler::ToInternal(PacketList* packet_list) {
+ PacketList::iterator it;
+ for (it = packet_list->begin(); it != packet_list->end(); ++it) {
+ ToInternal(&(*it));
+ }
+}
+
+uint32_t TimestampScaler::ToInternal(uint32_t external_timestamp,
+ uint8_t rtp_payload_type) {
+ const DecoderDatabase::DecoderInfo* info =
+ decoder_database_.GetDecoderInfo(rtp_payload_type);
+ if (!info) {
+ // Payload type is unknown. Do not scale.
+ return external_timestamp;
+ }
+ if (!(info->IsComfortNoise() || info->IsDtmf())) {
+ // Do not change the timestamp scaling settings for DTMF or CNG.
+ numerator_ = info->SampleRateHz();
+ if (info->GetFormat().clockrate_hz == 0) {
+ // If the clockrate is invalid (i.e. with an old-style external codec)
+ // we cannot do any timestamp scaling.
+ denominator_ = numerator_;
+ } else {
+ denominator_ = info->GetFormat().clockrate_hz;
+ }
+ }
+ if (numerator_ != denominator_) {
+ // We have a scale factor != 1.
+ if (!first_packet_received_) {
+ external_ref_ = external_timestamp;
+ internal_ref_ = external_timestamp;
+ first_packet_received_ = true;
+ }
+ const int64_t external_diff = int64_t{external_timestamp} - external_ref_;
+ RTC_DCHECK_GT(denominator_, 0);
+ external_ref_ = external_timestamp;
+ internal_ref_ += (external_diff * numerator_) / denominator_;
+ return internal_ref_;
+ } else {
+ // No scaling.
+ return external_timestamp;
+ }
+}
+
+uint32_t TimestampScaler::ToExternal(uint32_t internal_timestamp) const {
+ if (!first_packet_received_ || (numerator_ == denominator_)) {
+ // Not initialized, or scale factor is 1.
+ return internal_timestamp;
+ } else {
+ const int64_t internal_diff = int64_t{internal_timestamp} - internal_ref_;
+ RTC_DCHECK_GT(numerator_, 0);
+ // Do not update references in this method.
+ // Switch `denominator_` and `numerator_` to convert the other way.
+ return external_ref_ + (internal_diff * denominator_) / numerator_;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.h b/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.h
new file mode 100644
index 0000000000..f42ce7207a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_
+
+#include "modules/audio_coding/neteq/packet.h"
+
+namespace webrtc {
+
+// Forward declaration.
+class DecoderDatabase;
+
+// This class scales timestamps for codecs that need timestamp scaling.
+// This is done for codecs where one RTP timestamp does not correspond to
+// one sample.
+class TimestampScaler {
+ public:
+ explicit TimestampScaler(const DecoderDatabase& decoder_database)
+ : first_packet_received_(false),
+ numerator_(1),
+ denominator_(1),
+ external_ref_(0),
+ internal_ref_(0),
+ decoder_database_(decoder_database) {}
+
+ virtual ~TimestampScaler() {}
+
+ TimestampScaler(const TimestampScaler&) = delete;
+ TimestampScaler& operator=(const TimestampScaler&) = delete;
+
+ // Start over.
+ virtual void Reset();
+
+ // Scale the timestamp in `packet` from external to internal.
+ virtual void ToInternal(Packet* packet);
+
+ // Scale the timestamp for all packets in `packet_list` from external to
+ // internal.
+ virtual void ToInternal(PacketList* packet_list);
+
+ // Returns the internal equivalent of `external_timestamp`, given the
+ // RTP payload type `rtp_payload_type`.
+ virtual uint32_t ToInternal(uint32_t external_timestamp,
+ uint8_t rtp_payload_type);
+
+ // Scales back to external timestamp. This is the inverse of ToInternal().
+ virtual uint32_t ToExternal(uint32_t internal_timestamp) const;
+
+ private:
+ bool first_packet_received_;
+ int numerator_;
+ int denominator_;
+ uint32_t external_ref_;
+ uint32_t internal_ref_;
+ const DecoderDatabase& decoder_database_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
new file mode 100644
index 0000000000..c2bb4dd95f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Return;
+using ::testing::ReturnNull;
+
+namespace webrtc {
+
+TEST(TimestampScaler, TestNoScaling) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use PCMu, because it doesn't use scaled timestamps.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 0;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ for (uint32_t timestamp = 0xFFFFFFFF - 5; timestamp != 5; ++timestamp) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(timestamp, scaler.ToExternal(timestamp));
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestNoScalingLargeStep) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use PCMu, because it doesn't use scaled timestamps.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 0;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ static const uint32_t kStep = 160;
+ uint32_t start_timestamp = 0;
+ // `external_timestamp` will be a large positive value.
+ start_timestamp = start_timestamp - 5 * kStep;
+ for (uint32_t timestamp = start_timestamp; timestamp != 5 * kStep;
+ timestamp += kStep) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(timestamp, scaler.ToExternal(timestamp));
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 17;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ uint32_t external_timestamp = 0xFFFFFFFF - 5;
+ uint32_t internal_timestamp = external_timestamp;
+ for (; external_timestamp != 5; ++external_timestamp) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+ internal_timestamp += 2;
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722LargeStep) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 17;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ static const uint32_t kStep = 320;
+ uint32_t external_timestamp = 0;
+ // `external_timestamp` will be a large positive value.
+ external_timestamp = external_timestamp - 5 * kStep;
+ uint32_t internal_timestamp = external_timestamp;
+ for (; external_timestamp != 5 * kStep; external_timestamp += kStep) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+ // Internal timestamp should be incremented with twice the step.
+ internal_timestamp += 2 * kStep;
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722WithCng) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info_g722(SdpAudioFormat("g722", 8000, 1),
+ absl::nullopt, factory.get());
+ const DecoderDatabase::DecoderInfo info_cng(SdpAudioFormat("cn", 16000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadTypeG722 = 17;
+ static const uint8_t kRtpPayloadTypeCng = 13;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadTypeG722))
+ .WillRepeatedly(Return(&info_g722));
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadTypeCng))
+ .WillRepeatedly(Return(&info_cng));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ uint32_t external_timestamp = 0xFFFFFFFF - 5;
+ uint32_t internal_timestamp = external_timestamp;
+ bool next_is_cng = false;
+ for (; external_timestamp != 5; ++external_timestamp) {
+ // Alternate between G.722 and CNG every other packet.
+ if (next_is_cng) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadTypeCng));
+ next_is_cng = false;
+ } else {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadTypeG722));
+ next_is_cng = true;
+ }
+ // Scale back.
+ EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+ internal_timestamp += 2;
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+// Make sure that the method ToInternal(Packet* packet) is wired up correctly.
+// Since it is simply calling the other ToInternal method, we are not doing
+// as many tests here.
+TEST(TimestampScaler, TestG722Packet) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 17;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ uint32_t external_timestamp = 0xFFFFFFFF - 5;
+ uint32_t internal_timestamp = external_timestamp;
+ Packet packet;
+ packet.payload_type = kRtpPayloadType;
+ for (; external_timestamp != 5; ++external_timestamp) {
+ packet.timestamp = external_timestamp;
+ // Scale to internal timestamp.
+ scaler.ToInternal(&packet);
+ EXPECT_EQ(internal_timestamp, packet.timestamp);
+ internal_timestamp += 2;
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+// Make sure that the method ToInternal(PacketList* packet_list) is wired up
+// correctly. Since it is simply calling the ToInternal(Packet* packet) method,
+// we are not doing as many tests here.
+TEST(TimestampScaler, TestG722PacketList) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 17;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ uint32_t external_timestamp = 0xFFFFFFFF - 5;
+ uint32_t internal_timestamp = external_timestamp;
+ PacketList packet_list;
+ {
+ Packet packet1;
+ packet1.payload_type = kRtpPayloadType;
+ packet1.timestamp = external_timestamp;
+ Packet packet2;
+ packet2.payload_type = kRtpPayloadType;
+ packet2.timestamp = external_timestamp + 10;
+ packet_list.push_back(std::move(packet1));
+ packet_list.push_back(std::move(packet2));
+ }
+
+ scaler.ToInternal(&packet_list);
+ EXPECT_EQ(internal_timestamp, packet_list.front().timestamp);
+ packet_list.pop_front();
+ EXPECT_EQ(internal_timestamp + 20, packet_list.front().timestamp);
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722Reset) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 17;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ uint32_t external_timestamp = 0xFFFFFFFF - 5;
+ uint32_t internal_timestamp = external_timestamp;
+ for (; external_timestamp != 5; ++external_timestamp) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+ internal_timestamp += 2;
+ }
+ // Reset the scaler. After this, we expect the internal and external to start
+ // over at the same value again.
+ scaler.Reset();
+ internal_timestamp = external_timestamp;
+ for (; external_timestamp != 15; ++external_timestamp) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+ internal_timestamp += 2;
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+// TODO(minyue): This test becomes trivial since Opus does not need a timestamp
+// scaler. Therefore, this test may be removed in future. There is no harm to
+// keep it, since it can be taken as a test case for the situation of a trivial
+// timestamp scaler.
+TEST(TimestampScaler, TestOpusLargeStep) {
+ MockDecoderDatabase db;
+ auto factory = CreateBuiltinAudioDecoderFactory();
+ const DecoderDatabase::DecoderInfo info(SdpAudioFormat("opus", 48000, 2),
+ absl::nullopt, factory.get());
+ static const uint8_t kRtpPayloadType = 17;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillRepeatedly(Return(&info));
+
+ TimestampScaler scaler(db);
+ // Test both sides of the timestamp wrap-around.
+ static const uint32_t kStep = 960;
+ uint32_t external_timestamp = 0;
+ // `external_timestamp` will be a large positive value.
+ external_timestamp = external_timestamp - 5 * kStep;
+ uint32_t internal_timestamp = external_timestamp;
+ for (; external_timestamp != 5 * kStep; external_timestamp += kStep) {
+ // Scale to internal timestamp.
+ EXPECT_EQ(internal_timestamp,
+ scaler.ToInternal(external_timestamp, kRtpPayloadType));
+ // Scale back.
+ EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+ internal_timestamp += kStep;
+ }
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, Failures) {
+ static const uint8_t kRtpPayloadType = 17;
+ MockDecoderDatabase db;
+ EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+ .WillOnce(ReturnNull()); // Return NULL to indicate unknown payload type.
+
+ TimestampScaler scaler(db);
+ uint32_t timestamp = 4711; // Some number.
+ EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
+
+ Packet* packet = NULL;
+ scaler.ToInternal(packet); // Should not crash. That's all we can test.
+
+ EXPECT_CALL(db, Die()); // Called when database object is deleted.
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/DEPS b/third_party/libwebrtc/modules/audio_coding/neteq/tools/DEPS
new file mode 100644
index 0000000000..4db1e1d6e5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+logging/rtc_event_log",
+]
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/README.md b/third_party/libwebrtc/modules/audio_coding/neteq/tools/README.md
new file mode 100644
index 0000000000..e7bd95c285
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/README.md
@@ -0,0 +1,17 @@
+# NetEQ RTP Play tool
+
+## Testing of the command line arguments
+The command line tool `neteq_rtpplay` can be tested by running `neteq_rtpplay_test.sh`, which is not use on try bots, but it can be used before submitting any CLs that may break the behavior of the command line arguments of `neteq_rtpplay`.
+
+Run `neteq_rtpplay_test.sh` as follows from the `src/` folder:
+```
+src$ ./modules/audio_coding/neteq/tools/neteq_rtpplay_test.sh \
+ out/Default/neteq_rtpplay \
+ resources/audio_coding/neteq_opus.rtp \
+ resources/short_mixed_mono_48.pcm
+```
+
+You can replace the RTP and PCM files with any other compatible files.
+If you get an error using the files indicated above, try running `gclient sync`.
+
+Requirements: `awk` and `md5sum`.
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_checksum.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_checksum.h
new file mode 100644
index 0000000000..42e3a3a3a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_checksum.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/message_digest.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace test {
+
+class AudioChecksum : public AudioSink {
+ public:
+ AudioChecksum()
+ : checksum_(rtc::MessageDigestFactory::Create(rtc::DIGEST_MD5)),
+ checksum_result_(checksum_->Size()),
+ finished_(false) {}
+
+ AudioChecksum(const AudioChecksum&) = delete;
+ AudioChecksum& operator=(const AudioChecksum&) = delete;
+
+ bool WriteArray(const int16_t* audio, size_t num_samples) override {
+ if (finished_)
+ return false;
+
+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+#error "Big-endian gives a different checksum"
+#endif
+ checksum_->Update(audio, num_samples * sizeof(*audio));
+ return true;
+ }
+
+ // Finalizes the computations, and returns the checksum.
+ std::string Finish() {
+ if (!finished_) {
+ finished_ = true;
+ checksum_->Finish(checksum_result_.data(), checksum_result_.size());
+ }
+ return rtc::hex_encode(checksum_result_);
+ }
+
+ private:
+ std::unique_ptr<rtc::MessageDigest> checksum_;
+ rtc::Buffer checksum_result_;
+ bool finished_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.cc
new file mode 100644
index 0000000000..514e6eb2ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+namespace test {
+
+bool AudioLoop::Init(absl::string_view file_name,
+ size_t max_loop_length_samples,
+ size_t block_length_samples) {
+ FILE* fp = fopen(std::string(file_name).c_str(), "rb");
+ if (!fp)
+ return false;
+
+ audio_array_.reset(
+ new int16_t[max_loop_length_samples + block_length_samples]);
+ size_t samples_read =
+ fread(audio_array_.get(), sizeof(int16_t), max_loop_length_samples, fp);
+ fclose(fp);
+
+ // Block length must be shorter than the loop length.
+ if (block_length_samples > samples_read)
+ return false;
+
+ // Add an extra block length of samples to the end of the array, starting
+ // over again from the beginning of the array. This is done to simplify
+ // the reading process when reading over the end of the loop.
+ memcpy(&audio_array_[samples_read], audio_array_.get(),
+ block_length_samples * sizeof(int16_t));
+
+ loop_length_samples_ = samples_read;
+ block_length_samples_ = block_length_samples;
+ next_index_ = 0;
+ return true;
+}
+
+rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
+ // Check that the AudioLoop is initialized.
+ if (block_length_samples_ == 0)
+ return rtc::ArrayView<const int16_t>();
+
+ const int16_t* output_ptr = &audio_array_[next_index_];
+ next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
+ return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.h
new file mode 100644
index 0000000000..f5f0b59011
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_loop.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+
+namespace webrtc {
+namespace test {
+
+// Class serving as an infinite source of audio, realized by looping an audio
+// clip.
+class AudioLoop {
+ public:
+ AudioLoop()
+ : next_index_(0), loop_length_samples_(0), block_length_samples_(0) {}
+
+ virtual ~AudioLoop() {}
+
+ AudioLoop(const AudioLoop&) = delete;
+ AudioLoop& operator=(const AudioLoop&) = delete;
+
+ // Initializes the AudioLoop by reading from `file_name`. The loop will be no
+ // longer than `max_loop_length_samples`, if the length of the file is
+ // greater. Otherwise, the loop length is the same as the file length.
+ // The audio will be delivered in blocks of `block_length_samples`.
+ // Returns false if the initialization failed, otherwise true.
+ bool Init(absl::string_view file_name,
+ size_t max_loop_length_samples,
+ size_t block_length_samples);
+
+ // Returns a (pointer,size) pair for the next block of audio. The size is
+ // equal to the `block_length_samples` Init() argument.
+ rtc::ArrayView<const int16_t> GetNextBlock();
+
+ private:
+ size_t next_index_;
+ size_t loop_length_samples_;
+ size_t block_length_samples_;
+ std::unique_ptr<int16_t[]> audio_array_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.cc
new file mode 100644
index 0000000000..7d7af7ef9f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.cc
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+
+namespace webrtc {
+namespace test {
+
+bool AudioSinkFork::WriteArray(const int16_t* audio, size_t num_samples) {
+ return left_sink_->WriteArray(audio, num_samples) &&
+ right_sink_->WriteArray(audio, num_samples);
+}
+
+bool VoidAudioSink::WriteArray(const int16_t* audio, size_t num_samples) {
+ return true;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.h
new file mode 100644
index 0000000000..53729fa920
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/audio_sink.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
+
+#include "api/audio/audio_frame.h"
+
+namespace webrtc {
+namespace test {
+
+// Interface class for an object receiving raw output audio from test
+// applications.
+class AudioSink {
+ public:
+ AudioSink() {}
+ virtual ~AudioSink() {}
+
+ AudioSink(const AudioSink&) = delete;
+ AudioSink& operator=(const AudioSink&) = delete;
+
+ // Writes `num_samples` from `audio` to the AudioSink. Returns true if
+ // successful, otherwise false.
+ virtual bool WriteArray(const int16_t* audio, size_t num_samples) = 0;
+
+ // Writes `audio_frame` to the AudioSink. Returns true if successful,
+ // otherwise false.
+ bool WriteAudioFrame(const AudioFrame& audio_frame) {
+ return WriteArray(audio_frame.data(), audio_frame.samples_per_channel_ *
+ audio_frame.num_channels_);
+ }
+};
+
+// Forks the output audio to two AudioSink objects.
+class AudioSinkFork : public AudioSink {
+ public:
+ AudioSinkFork(AudioSink* left, AudioSink* right)
+ : left_sink_(left), right_sink_(right) {}
+
+ AudioSinkFork(const AudioSinkFork&) = delete;
+ AudioSinkFork& operator=(const AudioSinkFork&) = delete;
+
+ bool WriteArray(const int16_t* audio, size_t num_samples) override;
+
+ private:
+ AudioSink* left_sink_;
+ AudioSink* right_sink_;
+};
+
+// An AudioSink implementation that does nothing.
+class VoidAudioSink : public AudioSink {
+ public:
+ VoidAudioSink() = default;
+
+ VoidAudioSink(const VoidAudioSink&) = delete;
+ VoidAudioSink& operator=(const VoidAudioSink&) = delete;
+
+ bool WriteArray(const int16_t* audio, size_t num_samples) override;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
new file mode 100644
index 0000000000..18a910365f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
+
+#include <algorithm>
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+ConstantPcmPacketSource::ConstantPcmPacketSource(size_t payload_len_samples,
+ int16_t sample_value,
+ int sample_rate_hz,
+ int payload_type)
+ : payload_len_samples_(payload_len_samples),
+ packet_len_bytes_(2 * payload_len_samples_ + kHeaderLenBytes),
+ samples_per_ms_(sample_rate_hz / 1000),
+ next_arrival_time_ms_(0.0),
+ payload_type_(payload_type),
+ seq_number_(0),
+ timestamp_(0),
+ payload_ssrc_(0xABCD1234) {
+ size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
+ RTC_CHECK_EQ(2U, encoded_len);
+}
+
+std::unique_ptr<Packet> ConstantPcmPacketSource::NextPacket() {
+ RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
+ rtc::CopyOnWriteBuffer packet_buffer(packet_len_bytes_);
+ uint8_t* packet_memory = packet_buffer.MutableData();
+ // Fill the payload part of the packet memory with the pre-encoded value.
+ for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
+ packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2];
+ WriteHeader(packet_memory);
+ // `packet` assumes ownership of `packet_memory`.
+ auto packet =
+ std::make_unique<Packet>(std::move(packet_buffer), next_arrival_time_ms_);
+ next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_;
+ return packet;
+}
+
+void ConstantPcmPacketSource::WriteHeader(uint8_t* packet_memory) {
+ packet_memory[0] = 0x80;
+ packet_memory[1] = static_cast<uint8_t>(payload_type_);
+ packet_memory[2] = seq_number_ >> 8;
+ packet_memory[3] = seq_number_ & 0xFF;
+ packet_memory[4] = timestamp_ >> 24;
+ packet_memory[5] = (timestamp_ >> 16) & 0xFF;
+ packet_memory[6] = (timestamp_ >> 8) & 0xFF;
+ packet_memory[7] = timestamp_ & 0xFF;
+ packet_memory[8] = payload_ssrc_ >> 24;
+ packet_memory[9] = (payload_ssrc_ >> 16) & 0xFF;
+ packet_memory[10] = (payload_ssrc_ >> 8) & 0xFF;
+ packet_memory[11] = payload_ssrc_ & 0xFF;
+ ++seq_number_;
+ timestamp_ += static_cast<uint32_t>(payload_len_samples_);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
new file mode 100644
index 0000000000..ab4f5c2281
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
+
+#include <stdio.h>
+
+#include <string>
+
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+
+namespace webrtc {
+namespace test {
+
+// This class implements a packet source that delivers PCM16b encoded packets
+// with a constant sample value. The payload length, constant sample value,
+// sample rate, and payload type are all set in the constructor.
+class ConstantPcmPacketSource : public PacketSource {
+ public:
+ ConstantPcmPacketSource(size_t payload_len_samples,
+ int16_t sample_value,
+ int sample_rate_hz,
+ int payload_type);
+
+ ConstantPcmPacketSource(const ConstantPcmPacketSource&) = delete;
+ ConstantPcmPacketSource& operator=(const ConstantPcmPacketSource&) = delete;
+
+ std::unique_ptr<Packet> NextPacket() override;
+
+ private:
+ void WriteHeader(uint8_t* packet_memory);
+
+ const size_t kHeaderLenBytes = 12;
+ const size_t payload_len_samples_;
+ const size_t packet_len_bytes_;
+ uint8_t encoded_sample_[2];
+ const int samples_per_ms_;
+ double next_arrival_time_ms_;
+ const int payload_type_;
+ uint16_t seq_number_;
+ uint32_t timestamp_;
+ const uint32_t payload_ssrc_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc
new file mode 100644
index 0000000000..87b987ddb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/encode_neteq_input.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace test {
+
+EncodeNetEqInput::EncodeNetEqInput(std::unique_ptr<Generator> generator,
+ std::unique_ptr<AudioEncoder> encoder,
+ int64_t input_duration_ms)
+ : generator_(std::move(generator)),
+ encoder_(std::move(encoder)),
+ input_duration_ms_(input_duration_ms) {
+ CreatePacket();
+}
+
+EncodeNetEqInput::~EncodeNetEqInput() = default;
+
+absl::optional<int64_t> EncodeNetEqInput::NextPacketTime() const {
+ RTC_DCHECK(packet_data_);
+ return static_cast<int64_t>(packet_data_->time_ms);
+}
+
+absl::optional<int64_t> EncodeNetEqInput::NextOutputEventTime() const {
+ return next_output_event_ms_;
+}
+
+std::unique_ptr<NetEqInput::PacketData> EncodeNetEqInput::PopPacket() {
+ RTC_DCHECK(packet_data_);
+ // Grab the packet to return...
+ std::unique_ptr<PacketData> packet_to_return = std::move(packet_data_);
+ // ... and line up the next packet for future use.
+ CreatePacket();
+
+ return packet_to_return;
+}
+
+void EncodeNetEqInput::AdvanceOutputEvent() {
+ next_output_event_ms_ += kOutputPeriodMs;
+}
+
+bool EncodeNetEqInput::ended() const {
+ return next_output_event_ms_ > input_duration_ms_;
+}
+
+absl::optional<RTPHeader> EncodeNetEqInput::NextHeader() const {
+ RTC_DCHECK(packet_data_);
+ return packet_data_->header;
+}
+
+void EncodeNetEqInput::CreatePacket() {
+ // Create a new PacketData object.
+ RTC_DCHECK(!packet_data_);
+ packet_data_.reset(new NetEqInput::PacketData);
+ RTC_DCHECK_EQ(packet_data_->payload.size(), 0);
+
+ // Loop until we get a packet.
+ AudioEncoder::EncodedInfo info;
+ RTC_DCHECK(!info.send_even_if_empty);
+ int num_blocks = 0;
+ while (packet_data_->payload.size() == 0 && !info.send_even_if_empty) {
+ const size_t num_samples = rtc::CheckedDivExact(
+ static_cast<int>(encoder_->SampleRateHz() * kOutputPeriodMs), 1000);
+
+ info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
+ &packet_data_->payload);
+
+ rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
+ num_samples * encoder_->RtpTimestampRateHz() /
+ encoder_->SampleRateHz());
+ ++num_blocks;
+ }
+ packet_data_->header.timestamp = info.encoded_timestamp;
+ packet_data_->header.payloadType = info.payload_type;
+ packet_data_->header.sequenceNumber = sequence_number_++;
+ packet_data_->time_ms = next_packet_time_ms_;
+ next_packet_time_ms_ += num_blocks * kOutputPeriodMs;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.h
new file mode 100644
index 0000000000..caa9ac76f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/encode_neteq_input.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
+
+#include <memory>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+namespace webrtc {
+namespace test {
+
+// This class provides a NetEqInput that takes audio from a generator object and
+// encodes it using a given audio encoder.
+class EncodeNetEqInput : public NetEqInput {
+ public:
+ // Generator class, to be provided to the EncodeNetEqInput constructor.
+ class Generator {
+ public:
+ virtual ~Generator() = default;
+ // Returns the next num_samples values from the signal generator.
+ virtual rtc::ArrayView<const int16_t> Generate(size_t num_samples) = 0;
+ };
+
+ // The source will end after the given input duration.
+ EncodeNetEqInput(std::unique_ptr<Generator> generator,
+ std::unique_ptr<AudioEncoder> encoder,
+ int64_t input_duration_ms);
+ ~EncodeNetEqInput() override;
+
+ absl::optional<int64_t> NextPacketTime() const override;
+
+ absl::optional<int64_t> NextOutputEventTime() const override;
+
+ std::unique_ptr<PacketData> PopPacket() override;
+
+ void AdvanceOutputEvent() override;
+
+ bool ended() const override;
+
+ absl::optional<RTPHeader> NextHeader() const override;
+
+ private:
+ static constexpr int64_t kOutputPeriodMs = 10;
+
+ void CreatePacket();
+
+ std::unique_ptr<Generator> generator_;
+ std::unique_ptr<AudioEncoder> encoder_;
+ std::unique_ptr<PacketData> packet_data_;
+ uint32_t rtp_timestamp_ = 0;
+ int16_t sequence_number_ = 0;
+ int64_t next_packet_time_ms_ = 0;
+ int64_t next_output_event_ms_ = 0;
+ const int64_t input_duration_ms_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.cc
new file mode 100644
index 0000000000..6c5e5ac2e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+class FakeEncodedFrame : public AudioDecoder::EncodedAudioFrame {
+ public:
+ FakeEncodedFrame(AudioDecoder* decoder, rtc::Buffer&& payload)
+ : decoder_(decoder), payload_(std::move(payload)) {}
+
+ size_t Duration() const override {
+ const int ret = decoder_->PacketDuration(payload_.data(), payload_.size());
+ return ret < 0 ? 0 : static_cast<size_t>(ret);
+ }
+
+ absl::optional<DecodeResult> Decode(
+ rtc::ArrayView<int16_t> decoded) const override {
+ auto speech_type = AudioDecoder::kSpeech;
+ const int ret = decoder_->Decode(
+ payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+ decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+ return ret < 0 ? absl::nullopt
+ : absl::optional<DecodeResult>(
+ {static_cast<size_t>(ret), speech_type});
+ }
+
+ // This is to mimic OpusFrame.
+ bool IsDtxPacket() const override {
+ uint32_t original_payload_size_bytes =
+ ByteReader<uint32_t>::ReadLittleEndian(&payload_.data()[8]);
+ return original_payload_size_bytes <= 2;
+ }
+
+ private:
+ AudioDecoder* const decoder_;
+ const rtc::Buffer payload_;
+};
+
+} // namespace
+
+std::vector<AudioDecoder::ParseResult> FakeDecodeFromFile::ParsePayload(
+ rtc::Buffer&& payload,
+ uint32_t timestamp) {
+ std::vector<ParseResult> results;
+ std::unique_ptr<EncodedAudioFrame> frame(
+ new FakeEncodedFrame(this, std::move(payload)));
+ results.emplace_back(timestamp, 0, std::move(frame));
+ return results;
+}
+
+int FakeDecodeFromFile::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, SampleRateHz());
+
+ const int samples_to_decode = PacketDuration(encoded, encoded_len);
+ const int total_samples_to_decode = samples_to_decode * (stereo_ ? 2 : 1);
+
+ if (encoded_len == 0) {
+ // Decoder is asked to produce codec-internal comfort noise.
+ RTC_DCHECK(!encoded); // NetEq always sends nullptr in this case.
+ RTC_DCHECK(cng_mode_);
+ RTC_DCHECK_GT(total_samples_to_decode, 0);
+ std::fill_n(decoded, total_samples_to_decode, 0);
+ *speech_type = kComfortNoise;
+ return rtc::dchecked_cast<int>(total_samples_to_decode);
+ }
+
+ RTC_CHECK_GE(encoded_len, 12);
+ uint32_t timestamp_to_decode =
+ ByteReader<uint32_t>::ReadLittleEndian(encoded);
+
+ if (next_timestamp_from_input_ &&
+ timestamp_to_decode != *next_timestamp_from_input_) {
+ // A gap in the timestamp sequence is detected. Skip the same number of
+ // samples from the file.
+ uint32_t jump = timestamp_to_decode - *next_timestamp_from_input_;
+ RTC_CHECK(input_->Seek(jump));
+ }
+
+ next_timestamp_from_input_ = timestamp_to_decode + samples_to_decode;
+
+ uint32_t original_payload_size_bytes =
+ ByteReader<uint32_t>::ReadLittleEndian(&encoded[8]);
+ if (original_payload_size_bytes <= 2) {
+ // This is a comfort noise payload.
+ RTC_DCHECK_GT(total_samples_to_decode, 0);
+ std::fill_n(decoded, total_samples_to_decode, 0);
+ *speech_type = kComfortNoise;
+ cng_mode_ = true;
+ return rtc::dchecked_cast<int>(total_samples_to_decode);
+ }
+
+ cng_mode_ = false;
+ RTC_CHECK(input_->Read(static_cast<size_t>(samples_to_decode), decoded));
+
+ if (stereo_) {
+ InputAudioFile::DuplicateInterleaved(decoded, samples_to_decode, 2,
+ decoded);
+ }
+
+ *speech_type = kSpeech;
+ last_decoded_length_ = samples_to_decode;
+ return rtc::dchecked_cast<int>(total_samples_to_decode);
+}
+
+int FakeDecodeFromFile::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ const uint32_t original_payload_size_bytes =
+ encoded_len < 8 + sizeof(uint32_t)
+ ? 0
+ : ByteReader<uint32_t>::ReadLittleEndian(&encoded[8]);
+ const uint32_t samples_to_decode =
+ encoded_len < 4 + sizeof(uint32_t)
+ ? 0
+ : ByteReader<uint32_t>::ReadLittleEndian(&encoded[4]);
+ if (encoded_len == 0) {
+ // Decoder is asked to produce codec-internal comfort noise.
+ return rtc::CheckedDivExact(SampleRateHz(), 100);
+ }
+ bool is_dtx_payload =
+ original_payload_size_bytes <= 2 || samples_to_decode == 0;
+ bool has_error_duration =
+ samples_to_decode % rtc::CheckedDivExact(SampleRateHz(), 100) != 0;
+ if (is_dtx_payload || has_error_duration) {
+ if (last_decoded_length_ > 0) {
+ // Use length of last decoded packet.
+ return rtc::dchecked_cast<int>(last_decoded_length_);
+ } else {
+ // This is the first packet to decode, and we do not know the length of
+ // it. Set it to 10 ms.
+ return rtc::CheckedDivExact(SampleRateHz(), 100);
+ }
+ }
+ return samples_to_decode;
+}
+
+void FakeDecodeFromFile::PrepareEncoded(uint32_t timestamp,
+ size_t samples,
+ size_t original_payload_size_bytes,
+ rtc::ArrayView<uint8_t> encoded) {
+ RTC_CHECK_GE(encoded.size(), 12);
+ ByteWriter<uint32_t>::WriteLittleEndian(&encoded[0], timestamp);
+ ByteWriter<uint32_t>::WriteLittleEndian(&encoded[4],
+ rtc::checked_cast<uint32_t>(samples));
+ ByteWriter<uint32_t>::WriteLittleEndian(
+ &encoded[8], rtc::checked_cast<uint32_t>(original_payload_size_bytes));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h
new file mode 100644
index 0000000000..7b53653998
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+
+namespace webrtc {
+namespace test {
+// Provides an AudioDecoder implementation that delivers audio data from a file.
+// The "encoded" input should contain information about what RTP timestamp the
+// encoding represents, and how many samples the decoder should produce for that
+// encoding. A helper method PrepareEncoded is provided to prepare such
+// encodings. If packets are missing, as determined from the timestamps, the
+// file reading will skip forward to match the loss.
+class FakeDecodeFromFile : public AudioDecoder {
+ public:
+ FakeDecodeFromFile(std::unique_ptr<InputAudioFile> input,
+ int sample_rate_hz,
+ bool stereo)
+ : input_(std::move(input)),
+ sample_rate_hz_(sample_rate_hz),
+ stereo_(stereo) {}
+
+ ~FakeDecodeFromFile() = default;
+
+ std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+ uint32_t timestamp) override;
+
+ void Reset() override {}
+
+ int SampleRateHz() const override { return sample_rate_hz_; }
+
+ size_t Channels() const override { return stereo_ ? 2 : 1; }
+
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+
+ // Helper method. Writes `timestamp`, `samples` and
+ // `original_payload_size_bytes` to `encoded` in a format that the
+ // FakeDecodeFromFile decoder will understand. `encoded` must be at least 12
+ // bytes long.
+ static void PrepareEncoded(uint32_t timestamp,
+ size_t samples,
+ size_t original_payload_size_bytes,
+ rtc::ArrayView<uint8_t> encoded);
+
+ private:
+ std::unique_ptr<InputAudioFile> input_;
+ absl::optional<uint32_t> next_timestamp_from_input_;
+ const int sample_rate_hz_;
+ const bool stereo_;
+ size_t last_decoded_length_ = 0;
+ bool cng_mode_ = false;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.cc
new file mode 100644
index 0000000000..3c33aabf1c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h"
+
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+InitialPacketInserterNetEqInput::InitialPacketInserterNetEqInput(
+ std::unique_ptr<NetEqInput> source,
+ int number_of_initial_packets,
+ int sample_rate_hz)
+ : source_(std::move(source)),
+ packets_to_insert_(number_of_initial_packets),
+ sample_rate_hz_(sample_rate_hz) {}
+
+absl::optional<int64_t> InitialPacketInserterNetEqInput::NextPacketTime()
+ const {
+ return source_->NextPacketTime();
+}
+
+absl::optional<int64_t> InitialPacketInserterNetEqInput::NextOutputEventTime()
+ const {
+ return source_->NextOutputEventTime();
+}
+
+std::unique_ptr<InitialPacketInserterNetEqInput::PacketData>
+InitialPacketInserterNetEqInput::PopPacket() {
+ if (!first_packet_) {
+ first_packet_ = source_->PopPacket();
+ if (!first_packet_) {
+ // The source has no packets, so we should not insert any dummy packets.
+ packets_to_insert_ = 0;
+ }
+ }
+ if (packets_to_insert_ > 0) {
+ RTC_CHECK(first_packet_);
+ auto dummy_packet = std::unique_ptr<PacketData>(new PacketData());
+ dummy_packet->header = first_packet_->header;
+ dummy_packet->payload = rtc::Buffer(first_packet_->payload.data(),
+ first_packet_->payload.size());
+ dummy_packet->time_ms = first_packet_->time_ms;
+ dummy_packet->header.sequenceNumber -= packets_to_insert_;
+ // This assumes 20ms per packet.
+ dummy_packet->header.timestamp -=
+ 20 * sample_rate_hz_ * packets_to_insert_ / 1000;
+ packets_to_insert_--;
+ return dummy_packet;
+ }
+ return source_->PopPacket();
+}
+
+void InitialPacketInserterNetEqInput::AdvanceOutputEvent() {
+ source_->AdvanceOutputEvent();
+}
+
+bool InitialPacketInserterNetEqInput::ended() const {
+ return source_->ended();
+}
+
+absl::optional<RTPHeader> InitialPacketInserterNetEqInput::NextHeader() const {
+ return source_->NextHeader();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h
new file mode 100644
index 0000000000..bd20a7aecf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_INITIAL_PACKET_INSERTER_NETEQ_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_INITIAL_PACKET_INSERTER_NETEQ_INPUT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+namespace webrtc {
+namespace test {
+
+// Wrapper class that can insert a number of packets at the start of the
+// simulation.
+class InitialPacketInserterNetEqInput final : public NetEqInput {
+ public:
+ InitialPacketInserterNetEqInput(std::unique_ptr<NetEqInput> source,
+ int number_of_initial_packets,
+ int sample_rate_hz);
+ absl::optional<int64_t> NextPacketTime() const override;
+ absl::optional<int64_t> NextOutputEventTime() const override;
+ std::unique_ptr<PacketData> PopPacket() override;
+ void AdvanceOutputEvent() override;
+ bool ended() const override;
+ absl::optional<RTPHeader> NextHeader() const override;
+
+ private:
+ const std::unique_ptr<NetEqInput> source_;
+ int packets_to_insert_;
+ const int sample_rate_hz_;
+ std::unique_ptr<PacketData> first_packet_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_INITIAL_PACKET_INSERTER_NETEQ_INPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
new file mode 100644
index 0000000000..b077dbff21
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+InputAudioFile::InputAudioFile(absl::string_view file_name, bool loop_at_end)
+ : loop_at_end_(loop_at_end) {
+ fp_ = fopen(std::string(file_name).c_str(), "rb");
+ RTC_DCHECK(fp_) << file_name << " could not be opened.";
+}
+
+InputAudioFile::~InputAudioFile() {
+ RTC_DCHECK(fp_);
+ fclose(fp_);
+}
+
+bool InputAudioFile::Read(size_t samples, int16_t* destination) {
+ if (!fp_) {
+ return false;
+ }
+ size_t samples_read = fread(destination, sizeof(int16_t), samples, fp_);
+ if (samples_read < samples) {
+ if (!loop_at_end_) {
+ return false;
+ }
+ // Rewind and read the missing samples.
+ rewind(fp_);
+ size_t missing_samples = samples - samples_read;
+ if (fread(destination + samples_read, sizeof(int16_t), missing_samples,
+ fp_) < missing_samples) {
+ // Could not read enough even after rewinding the file.
+ return false;
+ }
+ }
+ return true;
+}
+
+bool InputAudioFile::Seek(int samples) {
+ if (!fp_) {
+ return false;
+ }
+ // Find file boundaries.
+ const long current_pos = ftell(fp_);
+ RTC_CHECK_NE(EOF, current_pos)
+ << "Error returned when getting file position.";
+ RTC_CHECK_EQ(0, fseek(fp_, 0, SEEK_END)); // Move to end of file.
+ const long file_size = ftell(fp_);
+ RTC_CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
+ // Find new position.
+ long new_pos = current_pos + sizeof(int16_t) * samples; // Samples to bytes.
+ if (loop_at_end_) {
+ new_pos = new_pos % file_size; // Wrap around the end of the file.
+ if (new_pos < 0) {
+ // For negative values of new_pos, newpos % file_size will also be
+ // negative. To get the correct result it's needed to add file_size.
+ new_pos += file_size;
+ }
+ } else {
+ new_pos = new_pos > file_size ? file_size : new_pos; // Don't loop.
+ }
+ RTC_CHECK_GE(new_pos, 0)
+ << "Trying to move to before the beginning of the file";
+ // Move to new position relative to the beginning of the file.
+ RTC_CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
+ return true;
+}
+
+void InputAudioFile::DuplicateInterleaved(const int16_t* source,
+ size_t samples,
+ size_t channels,
+ int16_t* destination) {
+ // Start from the end of `source` and `destination`, and work towards the
+ // beginning. This is to allow in-place interleaving of the same array (i.e.,
+ // `source` and `destination` are the same array).
+ for (int i = static_cast<int>(samples - 1); i >= 0; --i) {
+ for (int j = static_cast<int>(channels - 1); j >= 0; --j) {
+ destination[i * channels + j] = source[i];
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.h
new file mode 100644
index 0000000000..f538b295a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
+
+#include <stdio.h>
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+namespace test {
+
+// Class for handling a looping input audio file.
+class InputAudioFile {
+ public:
+ explicit InputAudioFile(absl::string_view file_name, bool loop_at_end = true);
+
+ virtual ~InputAudioFile();
+
+ InputAudioFile(const InputAudioFile&) = delete;
+ InputAudioFile& operator=(const InputAudioFile&) = delete;
+
+ // Reads `samples` elements from source file to `destination`. Returns true
+ // if the read was successful, otherwise false. If the file end is reached,
+ // the file is rewound and reading continues from the beginning.
+ // The output `destination` must have the capacity to hold `samples` elements.
+ virtual bool Read(size_t samples, int16_t* destination);
+
+ // Fast-forwards (`samples` > 0) or -backwards (`samples` < 0) the file by the
+ // indicated number of samples. Just like Read(), Seek() starts over at the
+ // beginning of the file if the end is reached. However, seeking backwards
+ // past the beginning of the file is not possible.
+ virtual bool Seek(int samples);
+
+ // Creates a multi-channel signal from a mono signal. Each sample is repeated
+ // `channels` times to create an interleaved multi-channel signal where all
+ // channels are identical. The output `destination` must have the capacity to
+ // hold samples * channels elements. Note that `source` and `destination` can
+ // be the same array (i.e., point to the same address).
+ static void DuplicateInterleaved(const int16_t* source,
+ size_t samples,
+ size_t channels,
+ int16_t* destination);
+
+ private:
+ FILE* fp_;
+ const bool loop_at_end_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc
new file mode 100644
index 0000000000..52f7ea82a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for test InputAudioFile class.
+
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+TEST(TestInputAudioFile, DuplicateInterleaveSeparateSrcDst) {
+ static const size_t kSamples = 10;
+ static const size_t kChannels = 2;
+ int16_t input[kSamples];
+ for (size_t i = 0; i < kSamples; ++i) {
+ input[i] = rtc::checked_cast<int16_t>(i);
+ }
+ int16_t output[kSamples * kChannels];
+ InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, output);
+
+ // Verify output
+ int16_t* output_ptr = output;
+ for (size_t i = 0; i < kSamples; ++i) {
+ for (size_t j = 0; j < kChannels; ++j) {
+ EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
+ }
+ }
+}
+
+TEST(TestInputAudioFile, DuplicateInterleaveSameSrcDst) {
+ static const size_t kSamples = 10;
+ static const size_t kChannels = 5;
+ int16_t input[kSamples * kChannels];
+ for (size_t i = 0; i < kSamples; ++i) {
+ input[i] = rtc::checked_cast<int16_t>(i);
+ }
+ InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, input);
+
+ // Verify output
+ int16_t* output_ptr = input;
+ for (size_t i = 0; i < kSamples; ++i) {
+ for (size_t j = 0; j < kChannels; ++j) {
+ EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc
new file mode 100644
index 0000000000..91c3a1d96b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+
+#include <algorithm>
+#include <fstream>
+#include <ios>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+constexpr char kArrivalDelayX[] = "arrival_delay_x";
+constexpr char kArrivalDelayY[] = "arrival_delay_y";
+constexpr char kTargetDelayX[] = "target_delay_x";
+constexpr char kTargetDelayY[] = "target_delay_y";
+constexpr char kPlayoutDelayX[] = "playout_delay_x";
+constexpr char kPlayoutDelayY[] = "playout_delay_y";
+
+// Helper function for NetEqDelayAnalyzer::CreateGraphs. Returns the
+// interpolated value of a function at the point x. Vector x_vec contains the
+// sample points, and y_vec contains the function values at these points. The
+// return value is a linear interpolation between y_vec values.
+double LinearInterpolate(double x,
+ const std::vector<int64_t>& x_vec,
+ const std::vector<int64_t>& y_vec) {
+ // Find first element which is larger than x.
+ auto it = std::upper_bound(x_vec.begin(), x_vec.end(), x);
+ if (it == x_vec.end()) {
+ --it;
+ }
+ const size_t upper_ix = it - x_vec.begin();
+
+ size_t lower_ix;
+ if (upper_ix == 0 || x_vec[upper_ix] <= x) {
+ lower_ix = upper_ix;
+ } else {
+ lower_ix = upper_ix - 1;
+ }
+ double y;
+ if (lower_ix == upper_ix) {
+ y = y_vec[lower_ix];
+ } else {
+ RTC_DCHECK_NE(x_vec[lower_ix], x_vec[upper_ix]);
+ y = (x - x_vec[lower_ix]) * (y_vec[upper_ix] - y_vec[lower_ix]) /
+ (x_vec[upper_ix] - x_vec[lower_ix]) +
+ y_vec[lower_ix];
+ }
+ return y;
+}
+
+void PrintDelays(const NetEqDelayAnalyzer::Delays& delays,
+ int64_t ref_time_ms,
+ absl::string_view var_name_x,
+ absl::string_view var_name_y,
+ std::ofstream& output,
+ absl::string_view terminator = "") {
+ output << var_name_x << " = [ ";
+ for (const std::pair<int64_t, float>& delay : delays) {
+ output << (delay.first - ref_time_ms) / 1000.f << ", ";
+ }
+ output << "]" << terminator << std::endl;
+
+ output << var_name_y << " = [ ";
+ for (const std::pair<int64_t, float>& delay : delays) {
+ output << delay.second << ", ";
+ }
+ output << "]" << terminator << std::endl;
+}
+
+} // namespace
+
+void NetEqDelayAnalyzer::AfterInsertPacket(
+ const test::NetEqInput::PacketData& packet,
+ NetEq* neteq) {
+ data_.insert(
+ std::make_pair(packet.header.timestamp, TimingData(packet.time_ms)));
+ ssrcs_.insert(packet.header.ssrc);
+ payload_types_.insert(packet.header.payloadType);
+}
+
+void NetEqDelayAnalyzer::BeforeGetAudio(NetEq* neteq) {
+ last_sync_buffer_ms_ = neteq->SyncBufferSizeMs();
+}
+
+void NetEqDelayAnalyzer::AfterGetAudio(int64_t time_now_ms,
+ const AudioFrame& audio_frame,
+ bool /*muted*/,
+ NetEq* neteq) {
+ get_audio_time_ms_.push_back(time_now_ms);
+ for (const RtpPacketInfo& info : audio_frame.packet_infos_) {
+ auto it = data_.find(info.rtp_timestamp());
+ if (it == data_.end()) {
+ // This is a packet that was split out from another packet. Skip it.
+ continue;
+ }
+ auto& it_timing = it->second;
+ RTC_CHECK(!it_timing.decode_get_audio_count)
+ << "Decode time already written";
+ it_timing.decode_get_audio_count = get_audio_count_;
+ RTC_CHECK(!it_timing.sync_delay_ms) << "Decode time already written";
+ it_timing.sync_delay_ms = last_sync_buffer_ms_;
+ it_timing.target_delay_ms = neteq->TargetDelayMs();
+ it_timing.current_delay_ms = neteq->FilteredCurrentDelayMs();
+ }
+ last_sample_rate_hz_ = audio_frame.sample_rate_hz_;
+ ++get_audio_count_;
+}
+
+void NetEqDelayAnalyzer::CreateGraphs(Delays* arrival_delay_ms,
+ Delays* corrected_arrival_delay_ms,
+ Delays* playout_delay_ms,
+ Delays* target_delay_ms) const {
+ if (get_audio_time_ms_.empty()) {
+ return;
+ }
+ // Create nominal_get_audio_time_ms, a vector starting at
+ // get_audio_time_ms_[0] and increasing by 10 for each element.
+ std::vector<int64_t> nominal_get_audio_time_ms(get_audio_time_ms_.size());
+ nominal_get_audio_time_ms[0] = get_audio_time_ms_[0];
+ std::transform(
+ nominal_get_audio_time_ms.begin(), nominal_get_audio_time_ms.end() - 1,
+ nominal_get_audio_time_ms.begin() + 1, [](int64_t& x) { return x + 10; });
+ RTC_DCHECK(
+ std::is_sorted(get_audio_time_ms_.begin(), get_audio_time_ms_.end()));
+
+ std::vector<double> rtp_timestamps_ms;
+ double offset = std::numeric_limits<double>::max();
+ TimestampUnwrapper unwrapper;
+ // This loop traverses data_ and populates rtp_timestamps_ms as well as
+ // calculates the base offset.
+ for (auto& d : data_) {
+ rtp_timestamps_ms.push_back(
+ static_cast<double>(unwrapper.Unwrap(d.first)) /
+ rtc::CheckedDivExact(last_sample_rate_hz_, 1000));
+ offset =
+ std::min(offset, d.second.arrival_time_ms - rtp_timestamps_ms.back());
+ }
+
+ // This loop traverses the data again and populates the graph vectors. The
+ // reason to have two loops and traverse twice is that the offset cannot be
+ // known until the first traversal is done. Meanwhile, the final offset must
+ // be known already at the start of this second loop.
+ size_t i = 0;
+ for (const auto& data : data_) {
+ const double offset_send_time_ms = rtp_timestamps_ms[i++] + offset;
+ const auto& timing = data.second;
+ corrected_arrival_delay_ms->push_back(std::make_pair(
+ timing.arrival_time_ms,
+ LinearInterpolate(timing.arrival_time_ms, get_audio_time_ms_,
+ nominal_get_audio_time_ms) -
+ offset_send_time_ms));
+ arrival_delay_ms->push_back(std::make_pair(
+ timing.arrival_time_ms, timing.arrival_time_ms - offset_send_time_ms));
+
+ if (timing.decode_get_audio_count) {
+ // This packet was decoded.
+ RTC_DCHECK(timing.sync_delay_ms);
+ const int64_t get_audio_time =
+ *timing.decode_get_audio_count * 10 + get_audio_time_ms_[0];
+ const float playout_ms =
+ get_audio_time + *timing.sync_delay_ms - offset_send_time_ms;
+ playout_delay_ms->push_back(std::make_pair(get_audio_time, playout_ms));
+ RTC_DCHECK(timing.target_delay_ms);
+ RTC_DCHECK(timing.current_delay_ms);
+ const float target =
+ playout_ms - *timing.current_delay_ms + *timing.target_delay_ms;
+ target_delay_ms->push_back(std::make_pair(get_audio_time, target));
+ }
+ }
+}
+
+void NetEqDelayAnalyzer::CreateMatlabScript(
+ absl::string_view script_name) const {
+ Delays arrival_delay_ms;
+ Delays corrected_arrival_delay_ms;
+ Delays playout_delay_ms;
+ Delays target_delay_ms;
+ CreateGraphs(&arrival_delay_ms, &corrected_arrival_delay_ms,
+ &playout_delay_ms, &target_delay_ms);
+
+ // Maybe better to find the actually smallest timestamp, to surely avoid
+ // x-axis starting from negative.
+ const int64_t ref_time_ms = arrival_delay_ms.front().first;
+
+ // Create an output file stream to Matlab script file.
+ std::ofstream output(std::string{script_name});
+
+ PrintDelays(corrected_arrival_delay_ms, ref_time_ms, kArrivalDelayX,
+ kArrivalDelayY, output, ";");
+
+ // PrintDelays(corrected_arrival_delay_x, kCorrectedArrivalDelayX,
+ // kCorrectedArrivalDelayY, output);
+
+ PrintDelays(playout_delay_ms, ref_time_ms, kPlayoutDelayX, kPlayoutDelayY,
+ output, ";");
+
+ PrintDelays(target_delay_ms, ref_time_ms, kTargetDelayX, kTargetDelayY,
+ output, ";");
+
+ output << "h=plot(" << kArrivalDelayX << ", " << kArrivalDelayY << ", "
+ << kTargetDelayX << ", " << kTargetDelayY << ", 'g.', "
+ << kPlayoutDelayX << ", " << kPlayoutDelayY << ");" << std::endl;
+ output << "set(h(1),'color',0.75*[1 1 1]);" << std::endl;
+ output << "set(h(2),'markersize',6);" << std::endl;
+ output << "set(h(3),'linew',1.5);" << std::endl;
+ output << "ax1=axis;" << std::endl;
+ output << "axis tight" << std::endl;
+ output << "ax2=axis;" << std::endl;
+ output << "axis([ax2(1:3) ax1(4)])" << std::endl;
+ output << "xlabel('time [s]');" << std::endl;
+ output << "ylabel('relative delay [ms]');" << std::endl;
+ if (!ssrcs_.empty()) {
+ auto ssrc_it = ssrcs_.cbegin();
+ output << "title('SSRC: 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
+ while (ssrc_it != ssrcs_.end()) {
+ output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
+ }
+ output << std::dec;
+ auto pt_it = payload_types_.cbegin();
+ output << "; Payload Types: " << *pt_it++;
+ while (pt_it != payload_types_.end()) {
+ output << ", " << *pt_it++;
+ }
+ output << "');" << std::endl;
+ }
+}
+
+void NetEqDelayAnalyzer::CreatePythonScript(
+ absl::string_view script_name) const {
+ Delays arrival_delay_ms;
+ Delays corrected_arrival_delay_ms;
+ Delays playout_delay_ms;
+ Delays target_delay_ms;
+ CreateGraphs(&arrival_delay_ms, &corrected_arrival_delay_ms,
+ &playout_delay_ms, &target_delay_ms);
+
+ // Maybe better to find the actually smallest timestamp, to surely avoid
+ // x-axis starting from negative.
+ const int64_t ref_time_ms = arrival_delay_ms.front().first;
+
+ // Create an output file stream to the python script file.
+ std::ofstream output(std::string{script_name});
+
+ // Necessary includes
+ output << "import numpy as np" << std::endl;
+ output << "import matplotlib.pyplot as plt" << std::endl;
+
+ PrintDelays(corrected_arrival_delay_ms, ref_time_ms, kArrivalDelayX,
+ kArrivalDelayY, output);
+
+ // PrintDelays(corrected_arrival_delay_x, kCorrectedArrivalDelayX,
+ // kCorrectedArrivalDelayY, output);
+
+ PrintDelays(playout_delay_ms, ref_time_ms, kPlayoutDelayX, kPlayoutDelayY,
+ output);
+
+ PrintDelays(target_delay_ms, ref_time_ms, kTargetDelayX, kTargetDelayY,
+ output);
+
+ output << "if __name__ == '__main__':" << std::endl;
+ output << " h=plt.plot(" << kArrivalDelayX << ", " << kArrivalDelayY << ", "
+ << kTargetDelayX << ", " << kTargetDelayY << ", 'g.', "
+ << kPlayoutDelayX << ", " << kPlayoutDelayY << ")" << std::endl;
+ output << " plt.setp(h[0],'color',[.75, .75, .75])" << std::endl;
+ output << " plt.setp(h[1],'markersize',6)" << std::endl;
+ output << " plt.setp(h[2],'linewidth',1.5)" << std::endl;
+ output << " plt.axis('tight')" << std::endl;
+ output << " plt.xlabel('time [s]')" << std::endl;
+ output << " plt.ylabel('relative delay [ms]')" << std::endl;
+ if (!ssrcs_.empty()) {
+ auto ssrc_it = ssrcs_.cbegin();
+ output << " plt.title('SSRC: 0x" << std::hex
+ << static_cast<int64_t>(*ssrc_it++);
+ while (ssrc_it != ssrcs_.end()) {
+ output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
+ }
+ output << std::dec;
+ auto pt_it = payload_types_.cbegin();
+ output << "; Payload Types: " << *pt_it++;
+ while (pt_it != payload_types_.end()) {
+ output << ", " << *pt_it++;
+ }
+ output << "')" << std::endl;
+ }
+ output << " plt.show()" << std::endl;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
new file mode 100644
index 0000000000..ffcba5843f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+
+namespace webrtc {
+namespace test {
+
+class NetEqDelayAnalyzer : public test::NetEqPostInsertPacket,
+ public test::NetEqGetAudioCallback {
+ public:
+ void AfterInsertPacket(const test::NetEqInput::PacketData& packet,
+ NetEq* neteq) override;
+
+ void BeforeGetAudio(NetEq* neteq) override;
+
+ void AfterGetAudio(int64_t time_now_ms,
+ const AudioFrame& audio_frame,
+ bool muted,
+ NetEq* neteq) override;
+
+ using Delays = std::vector<std::pair<int64_t, float>>;
+ void CreateGraphs(Delays* arrival_delay_ms,
+ Delays* corrected_arrival_delay_ms,
+ Delays* playout_delay_ms,
+ Delays* target_delay_ms) const;
+
+ // Creates a matlab script with file name script_name. When executed in
+ // Matlab, the script will generate graphs with the same timing information
+ // as provided by CreateGraphs.
+ void CreateMatlabScript(absl::string_view script_name) const;
+
+ // Creates a python script with file name `script_name`. When executed in
+ // Python, the script will generate graphs with the same timing information
+ // as provided by CreateGraphs.
+ void CreatePythonScript(absl::string_view script_name) const;
+
+ private:
+ struct TimingData {
+ explicit TimingData(int64_t at) : arrival_time_ms(at) {}
+ int64_t arrival_time_ms;
+ absl::optional<int64_t> decode_get_audio_count;
+ absl::optional<int64_t> sync_delay_ms;
+ absl::optional<int> target_delay_ms;
+ absl::optional<int> current_delay_ms;
+ };
+ std::map<uint32_t, TimingData> data_;
+ std::vector<int64_t> get_audio_time_ms_;
+ size_t get_audio_count_ = 0;
+ size_t last_sync_buffer_ms_ = 0;
+ int last_sample_rate_hz_ = 0;
+ std::set<uint32_t> ssrcs_;
+ std::set<int> payload_types_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.cc
new file mode 100644
index 0000000000..0c1f27799a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_event_log_input.h"
+
+#include <limits>
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/tools/rtc_event_log_source.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqEventLogInput* NetEqEventLogInput::CreateFromFile(
+ absl::string_view file_name,
+ absl::optional<uint32_t> ssrc_filter) {
+ auto event_log_src =
+ RtcEventLogSource::CreateFromFile(file_name, ssrc_filter);
+ if (!event_log_src) {
+ return nullptr;
+ }
+ return new NetEqEventLogInput(std::move(event_log_src));
+}
+
+NetEqEventLogInput* NetEqEventLogInput::CreateFromString(
+ absl::string_view file_contents,
+ absl::optional<uint32_t> ssrc_filter) {
+ auto event_log_src =
+ RtcEventLogSource::CreateFromString(file_contents, ssrc_filter);
+ if (!event_log_src) {
+ return nullptr;
+ }
+ return new NetEqEventLogInput(std::move(event_log_src));
+}
+
+absl::optional<int64_t> NetEqEventLogInput::NextOutputEventTime() const {
+ return next_output_event_ms_;
+}
+
+void NetEqEventLogInput::AdvanceOutputEvent() {
+ next_output_event_ms_ = source_->NextAudioOutputEventMs();
+ if (*next_output_event_ms_ == std::numeric_limits<int64_t>::max()) {
+ next_output_event_ms_ = absl::nullopt;
+ }
+}
+
+PacketSource* NetEqEventLogInput::source() {
+ return source_.get();
+}
+
+NetEqEventLogInput::NetEqEventLogInput(
+ std::unique_ptr<RtcEventLogSource> source)
+ : source_(std::move(source)) {
+ LoadNextPacket();
+ AdvanceOutputEvent();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.h
new file mode 100644
index 0000000000..c947ee1fc0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_event_log_input.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EVENT_LOG_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EVENT_LOG_INPUT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+namespace test {
+
+class RtcEventLogSource;
+
+// Implementation of NetEqPacketSourceInput to be used with an
+// RtcEventLogSource.
+class NetEqEventLogInput final : public NetEqPacketSourceInput {
+ public:
+ static NetEqEventLogInput* CreateFromFile(
+ absl::string_view file_name,
+ absl::optional<uint32_t> ssrc_filter);
+ static NetEqEventLogInput* CreateFromString(
+ absl::string_view file_contents,
+ absl::optional<uint32_t> ssrc_filter);
+
+ absl::optional<int64_t> NextOutputEventTime() const override;
+ void AdvanceOutputEvent() override;
+
+ protected:
+ PacketSource* source() override;
+
+ private:
+ NetEqEventLogInput(std::unique_ptr<RtcEventLogSource> source);
+ std::unique_ptr<RtcEventLogSource> source_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EVENT_LOG_INPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.cc
new file mode 100644
index 0000000000..de416348f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqInput::PacketData::PacketData() = default;
+NetEqInput::PacketData::~PacketData() = default;
+
+std::string NetEqInput::PacketData::ToString() const {
+ rtc::StringBuilder ss;
+ ss << "{"
+ "time_ms: "
+ << static_cast<int64_t>(time_ms)
+ << ", "
+ "header: {"
+ "pt: "
+ << static_cast<int>(header.payloadType)
+ << ", "
+ "sn: "
+ << header.sequenceNumber
+ << ", "
+ "ts: "
+ << header.timestamp
+ << ", "
+ "ssrc: "
+ << header.ssrc
+ << "}, "
+ "payload bytes: "
+ << payload.size() << "}";
+ return ss.Release();
+}
+
+TimeLimitedNetEqInput::TimeLimitedNetEqInput(std::unique_ptr<NetEqInput> input,
+ int64_t duration_ms)
+ : input_(std::move(input)),
+ start_time_ms_(input_->NextEventTime()),
+ duration_ms_(duration_ms) {}
+
+TimeLimitedNetEqInput::~TimeLimitedNetEqInput() = default;
+
+absl::optional<int64_t> TimeLimitedNetEqInput::NextPacketTime() const {
+ return ended_ ? absl::nullopt : input_->NextPacketTime();
+}
+
+absl::optional<int64_t> TimeLimitedNetEqInput::NextOutputEventTime() const {
+ return ended_ ? absl::nullopt : input_->NextOutputEventTime();
+}
+
+std::unique_ptr<NetEqInput::PacketData> TimeLimitedNetEqInput::PopPacket() {
+ if (ended_) {
+ return std::unique_ptr<PacketData>();
+ }
+ auto packet = input_->PopPacket();
+ MaybeSetEnded();
+ return packet;
+}
+
+void TimeLimitedNetEqInput::AdvanceOutputEvent() {
+ if (!ended_) {
+ input_->AdvanceOutputEvent();
+ MaybeSetEnded();
+ }
+}
+
+bool TimeLimitedNetEqInput::ended() const {
+ return ended_ || input_->ended();
+}
+
+absl::optional<RTPHeader> TimeLimitedNetEqInput::NextHeader() const {
+ return ended_ ? absl::nullopt : input_->NextHeader();
+}
+
+void TimeLimitedNetEqInput::MaybeSetEnded() {
+ if (NextEventTime() && start_time_ms_ &&
+ *NextEventTime() - *start_time_ms_ > duration_ms_) {
+ ended_ = true;
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.h
new file mode 100644
index 0000000000..3a66264043
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_input.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+namespace test {
+
+// Interface class for input to the NetEqTest class.
+class NetEqInput {
+ public:
+ struct PacketData {
+ PacketData();
+ ~PacketData();
+ std::string ToString() const;
+
+ RTPHeader header;
+ rtc::Buffer payload;
+ int64_t time_ms;
+ };
+
+ virtual ~NetEqInput() = default;
+
+ // Returns at what time (in ms) NetEq::InsertPacket should be called next, or
+ // empty if the source is out of packets.
+ virtual absl::optional<int64_t> NextPacketTime() const = 0;
+
+ // Returns at what time (in ms) NetEq::GetAudio should be called next, or
+ // empty if no more output events are available.
+ virtual absl::optional<int64_t> NextOutputEventTime() const = 0;
+
+ // Returns the time (in ms) for the next event from either NextPacketTime()
+ // or NextOutputEventTime(), or empty if both are out of events.
+ absl::optional<int64_t> NextEventTime() const {
+ const auto a = NextPacketTime();
+ const auto b = NextOutputEventTime();
+ // Return the minimum of non-empty `a` and `b`, or empty if both are empty.
+ if (a) {
+ return b ? std::min(*a, *b) : a;
+ }
+ return b ? b : absl::nullopt;
+ }
+
+ // Returns the next packet to be inserted into NetEq. The packet following the
+ // returned one is pre-fetched in the NetEqInput object, such that future
+ // calls to NextPacketTime() or NextHeader() will return information from that
+ // packet.
+ virtual std::unique_ptr<PacketData> PopPacket() = 0;
+
+ // Move to the next output event. This will make NextOutputEventTime() return
+ // a new value (potentially the same if several output events share the same
+ // time).
+ virtual void AdvanceOutputEvent() = 0;
+
+ // Returns true if the source has come to an end. An implementation must
+ // eventually return true from this method, or the test will end up in an
+ // infinite loop.
+ virtual bool ended() const = 0;
+
+ // Returns the RTP header for the next packet, i.e., the packet that will be
+ // delivered next by PopPacket().
+ virtual absl::optional<RTPHeader> NextHeader() const = 0;
+};
+
+// Wrapper class to impose a time limit on a NetEqInput object, typically
+// another time limit than what the object itself provides. For example, an
+// input taken from a file can be cut shorter by wrapping it in this class.
+class TimeLimitedNetEqInput : public NetEqInput {
+ public:
+ TimeLimitedNetEqInput(std::unique_ptr<NetEqInput> input, int64_t duration_ms);
+ ~TimeLimitedNetEqInput() override;
+ absl::optional<int64_t> NextPacketTime() const override;
+ absl::optional<int64_t> NextOutputEventTime() const override;
+ std::unique_ptr<PacketData> PopPacket() override;
+ void AdvanceOutputEvent() override;
+ bool ended() const override;
+ absl::optional<RTPHeader> NextHeader() const override;
+
+ private:
+ void MaybeSetEnded();
+
+ std::unique_ptr<NetEqInput> input_;
+ const absl::optional<int64_t> start_time_ms_;
+ const int64_t duration_ms_;
+ bool ended_ = false;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc
new file mode 100644
index 0000000000..55a5653238
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqPacketSourceInput::NetEqPacketSourceInput() : next_output_event_ms_(0) {}
+
+absl::optional<int64_t> NetEqPacketSourceInput::NextPacketTime() const {
+ return packet_
+ ? absl::optional<int64_t>(static_cast<int64_t>(packet_->time_ms()))
+ : absl::nullopt;
+}
+
+absl::optional<RTPHeader> NetEqPacketSourceInput::NextHeader() const {
+ return packet_ ? absl::optional<RTPHeader>(packet_->header()) : absl::nullopt;
+}
+
+void NetEqPacketSourceInput::LoadNextPacket() {
+ packet_ = source()->NextPacket();
+}
+
+std::unique_ptr<NetEqInput::PacketData> NetEqPacketSourceInput::PopPacket() {
+ if (!packet_) {
+ return std::unique_ptr<PacketData>();
+ }
+ std::unique_ptr<PacketData> packet_data(new PacketData);
+ packet_data->header = packet_->header();
+ if (packet_->payload_length_bytes() == 0 &&
+ packet_->virtual_payload_length_bytes() > 0) {
+ // This is a header-only "dummy" packet. Set the payload to all zeros, with
+ // length according to the virtual length.
+ packet_data->payload.SetSize(packet_->virtual_payload_length_bytes());
+ std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
+ } else {
+ packet_data->payload.SetData(packet_->payload(),
+ packet_->payload_length_bytes());
+ }
+ packet_data->time_ms = packet_->time_ms();
+
+ LoadNextPacket();
+
+ return packet_data;
+}
+
+NetEqRtpDumpInput::NetEqRtpDumpInput(absl::string_view file_name,
+ const RtpHeaderExtensionMap& hdr_ext_map,
+ absl::optional<uint32_t> ssrc_filter)
+ : source_(RtpFileSource::Create(file_name, ssrc_filter)) {
+ for (const auto& ext_pair : hdr_ext_map) {
+ source_->RegisterRtpHeaderExtension(ext_pair.second, ext_pair.first);
+ }
+ LoadNextPacket();
+}
+
+absl::optional<int64_t> NetEqRtpDumpInput::NextOutputEventTime() const {
+ return next_output_event_ms_;
+}
+
+void NetEqRtpDumpInput::AdvanceOutputEvent() {
+ if (next_output_event_ms_) {
+ *next_output_event_ms_ += kOutputPeriodMs;
+ }
+ if (!NextPacketTime()) {
+ next_output_event_ms_ = absl::nullopt;
+ }
+}
+
+PacketSource* NetEqRtpDumpInput::source() {
+ return source_.get();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.h
new file mode 100644
index 0000000000..407fa491b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+namespace test {
+
+class RtpFileSource;
+
+// An adapter class to dress up a PacketSource object as a NetEqInput.
+class NetEqPacketSourceInput : public NetEqInput {
+ public:
+ using RtpHeaderExtensionMap = std::map<int, webrtc::RTPExtensionType>;
+
+ NetEqPacketSourceInput();
+ absl::optional<int64_t> NextPacketTime() const override;
+ std::unique_ptr<PacketData> PopPacket() override;
+ absl::optional<RTPHeader> NextHeader() const override;
+ bool ended() const override { return !next_output_event_ms_; }
+
+ protected:
+ virtual PacketSource* source() = 0;
+ void LoadNextPacket();
+
+ absl::optional<int64_t> next_output_event_ms_;
+
+ private:
+ std::unique_ptr<Packet> packet_;
+};
+
+// Implementation of NetEqPacketSourceInput to be used with an RtpFileSource.
+class NetEqRtpDumpInput final : public NetEqPacketSourceInput {
+ public:
+ NetEqRtpDumpInput(absl::string_view file_name,
+ const RtpHeaderExtensionMap& hdr_ext_map,
+ absl::optional<uint32_t> ssrc_filter);
+
+ absl::optional<int64_t> NextOutputEventTime() const override;
+ void AdvanceOutputEvent() override;
+
+ protected:
+ PacketSource* source() override;
+
+ private:
+ static constexpr int64_t kOutputPeriodMs = 10;
+
+ std::unique_ptr<RtpFileSource> source_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
new file mode 100644
index 0000000000..ccaa87b5e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_performance_test.h"
+
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+#include "test/testsupport/file_utils.h"
+
+using webrtc::NetEq;
+using webrtc::test::AudioLoop;
+using webrtc::test::RtpGenerator;
+
+namespace webrtc {
+namespace test {
+
+int64_t NetEqPerformanceTest::Run(int runtime_ms,
+ int lossrate,
+ double drift_factor) {
+ const std::string kInputFileName =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ const int kSampRateHz = 32000;
+ const std::string kDecoderName = "pcm16-swb32";
+ const int kPayloadType = 95;
+
+ // Initialize NetEq instance.
+ NetEq::Config config;
+ config.sample_rate_hz = kSampRateHz;
+ webrtc::Clock* clock = webrtc::Clock::GetRealTimeClock();
+ auto audio_decoder_factory = CreateBuiltinAudioDecoderFactory();
+ auto neteq =
+ DefaultNetEqFactory().CreateNetEq(config, audio_decoder_factory, clock);
+ // Register decoder in `neteq`.
+ if (!neteq->RegisterPayloadType(kPayloadType,
+ SdpAudioFormat("l16", kSampRateHz, 1)))
+ return -1;
+
+ // Set up AudioLoop object.
+ AudioLoop audio_loop;
+ const size_t kMaxLoopLengthSamples = kSampRateHz * 10; // 10 second loop.
+ const size_t kInputBlockSizeSamples = 60 * kSampRateHz / 1000; // 60 ms.
+ if (!audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+ kInputBlockSizeSamples))
+ return -1;
+
+ int32_t time_now_ms = 0;
+
+ // Get first input packet.
+ RTPHeader rtp_header;
+ RtpGenerator rtp_gen(kSampRateHz / 1000);
+ // Start with positive drift first half of simulation.
+ rtp_gen.set_drift_factor(drift_factor);
+ bool drift_flipped = false;
+ int32_t packet_input_time_ms =
+ rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
+ auto input_samples = audio_loop.GetNextBlock();
+ if (input_samples.empty())
+ exit(1);
+ uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
+ size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+ input_samples.size(), input_payload);
+ RTC_CHECK_EQ(sizeof(input_payload), payload_len);
+
+ // Main loop.
+ int64_t start_time_ms = clock->TimeInMilliseconds();
+ AudioFrame out_frame;
+ while (time_now_ms < runtime_ms) {
+ while (packet_input_time_ms <= time_now_ms) {
+ // Drop every N packets, where N = FLAG_lossrate.
+ bool lost = false;
+ if (lossrate > 0) {
+ lost = ((rtp_header.sequenceNumber - 1) % lossrate) == 0;
+ }
+ if (!lost) {
+ // Insert packet.
+ int error = neteq->InsertPacket(rtp_header, input_payload);
+ if (error != NetEq::kOK)
+ return -1;
+ }
+
+ // Get next packet.
+ packet_input_time_ms = rtp_gen.GetRtpHeader(
+ kPayloadType, kInputBlockSizeSamples, &rtp_header);
+ input_samples = audio_loop.GetNextBlock();
+ if (input_samples.empty())
+ return -1;
+ payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+ input_samples.size(), input_payload);
+ RTC_DCHECK_EQ(payload_len, kInputBlockSizeSamples * sizeof(int16_t));
+ }
+
+ // Get output audio, but don't do anything with it.
+ bool muted;
+ int error = neteq->GetAudio(&out_frame, &muted);
+ RTC_CHECK(!muted);
+ if (error != NetEq::kOK)
+ return -1;
+
+ RTC_DCHECK_EQ(out_frame.samples_per_channel_, (kSampRateHz * 10) / 1000);
+
+ static const int kOutputBlockSizeMs = 10;
+ time_now_ms += kOutputBlockSizeMs;
+ if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
+ // Apply negative drift second half of simulation.
+ rtp_gen.set_drift_factor(-drift_factor);
+ drift_flipped = true;
+ }
+ }
+ int64_t end_time_ms = clock->TimeInMilliseconds();
+ return end_time_ms - start_time_ms;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h
new file mode 100644
index 0000000000..b5b4d91577
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+namespace test {
+
+class NetEqPerformanceTest {
+ public:
+ // Runs a performance test with parameters as follows:
+ // `runtime_ms`: the simulation time, i.e., the duration of the audio data.
+ // `lossrate`: drop one out of `lossrate` packets, e.g., one out of 10.
+ // `drift_factor`: clock drift in [0, 1].
+ // Returns the runtime in ms.
+ static int64_t Run(int runtime_ms, int lossrate, double drift_factor);
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
new file mode 100644
index 0000000000..1fd82dfcdd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -0,0 +1,475 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+
+#include <stdio.h>
+
+#include <cmath>
+
+#include "absl/flags/flag.h"
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+#include "modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_wav_file.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+#include "test/testsupport/file_utils.h"
+
+const std::string& DefaultInFilename() {
+ static const std::string path =
+ ::webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+ return path;
+}
+
+const std::string& DefaultOutFilename() {
+ static const std::string path =
+ ::webrtc::test::OutputPath() + "neteq_quality_test_out.pcm";
+ return path;
+}
+
+ABSL_FLAG(
+ std::string,
+ in_filename,
+ DefaultInFilename(),
+ "Filename for input audio (specify sample rate with --input_sample_rate, "
+ "and channels with --channels).");
+
+ABSL_FLAG(int, input_sample_rate, 16000, "Sample rate of input file in Hz.");
+
+ABSL_FLAG(int, channels, 1, "Number of channels in input audio.");
+
+ABSL_FLAG(std::string,
+ out_filename,
+ DefaultOutFilename(),
+ "Name of output audio file.");
+
+ABSL_FLAG(
+ int,
+ runtime_ms,
+ 10000,
+ "Simulated runtime (milliseconds). -1 will consume the complete file.");
+
+ABSL_FLAG(int, packet_loss_rate, 10, "Percentile of packet loss.");
+
+ABSL_FLAG(int,
+ random_loss_mode,
+ ::webrtc::test::kUniformLoss,
+ "Random loss mode: 0--no loss, 1--uniform loss, 2--Gilbert Elliot "
+ "loss, 3--fixed loss.");
+
+ABSL_FLAG(int,
+ burst_length,
+ 30,
+ "Burst length in milliseconds, only valid for Gilbert Elliot loss.");
+
+ABSL_FLAG(float, drift_factor, 0.0, "Time drift factor.");
+
+ABSL_FLAG(int,
+ preload_packets,
+ 1,
+ "Preload the buffer with this many packets.");
+
+ABSL_FLAG(std::string,
+ loss_events,
+ "",
+ "List of loss events time and duration separated by comma: "
+ "<first_event_time> <first_event_duration>, <second_event_time> "
+ "<second_event_duration>, ...");
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+std::unique_ptr<NetEq> CreateNetEq(
+ const NetEq::Config& config,
+ Clock* clock,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+ return DefaultNetEqFactory().CreateNetEq(config, decoder_factory, clock);
+}
+
+} // namespace
+
+const uint8_t kPayloadType = 95;
+const int kOutputSizeMs = 10;
+const int kInitSeed = 0x12345678;
+const int kPacketLossTimeUnitMs = 10;
+
+// Common validator for file names.
+static bool ValidateFilename(absl::string_view value, bool is_output) {
+ if (!is_output) {
+ RTC_CHECK_NE(value.substr(value.find_last_of('.') + 1), "wav")
+ << "WAV file input is not supported";
+ }
+ FILE* fid = is_output ? fopen(std::string(value).c_str(), "wb")
+ : fopen(std::string(value).c_str(), "rb");
+ if (fid == nullptr)
+ return false;
+ fclose(fid);
+ return true;
+}
+
+// ProbTrans00Solver() is to calculate the transition probability from no-loss
+// state to itself in a modified Gilbert Elliot packet loss model. The result is
+// to achieve the target packet loss rate `loss_rate`, when a packet is not
+// lost only if all `units` drawings within the duration of the packet result in
+// no-loss.
+static double ProbTrans00Solver(int units,
+ double loss_rate,
+ double prob_trans_10) {
+ if (units == 1)
+ return prob_trans_10 / (1.0f - loss_rate) - prob_trans_10;
+ // 0 == prob_trans_00 ^ (units - 1) + (1 - loss_rate) / prob_trans_10 *
+ // prob_trans_00 - (1 - loss_rate) * (1 + 1 / prob_trans_10).
+ // There is a unique solution between 0.0 and 1.0, due to the monotonicity and
+ // an opposite sign at 0.0 and 1.0.
+ // For simplicity, we reformulate the equation as
+ // f(x) = x ^ (units - 1) + a x + b.
+ // Its derivative is
+ // f'(x) = (units - 1) x ^ (units - 2) + a.
+ // The derivative is strictly greater than 0 when x is between 0 and 1.
+ // We use Newton's method to solve the equation, iteration is
+ // x(k+1) = x(k) - f(x) / f'(x);
+ const double kPrecision = 0.001f;
+ const int kIterations = 100;
+ const double a = (1.0f - loss_rate) / prob_trans_10;
+ const double b = (loss_rate - 1.0f) * (1.0f + 1.0f / prob_trans_10);
+ double x = 0.0; // Starting point;
+ double f = b;
+ double f_p;
+ int iter = 0;
+ while ((f >= kPrecision || f <= -kPrecision) && iter < kIterations) {
+ f_p = (units - 1.0f) * std::pow(x, units - 2) + a;
+ x -= f / f_p;
+ if (x > 1.0f) {
+ x = 1.0f;
+ } else if (x < 0.0f) {
+ x = 0.0f;
+ }
+ f = std::pow(x, units - 1) + a * x + b;
+ iter++;
+ }
+ return x;
+}
+
+NetEqQualityTest::NetEqQualityTest(
+ int block_duration_ms,
+ int in_sampling_khz,
+ int out_sampling_khz,
+ const SdpAudioFormat& format,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory)
+ : audio_format_(format),
+ channels_(absl::GetFlag(FLAGS_channels)),
+ decoded_time_ms_(0),
+ decodable_time_ms_(0),
+ drift_factor_(absl::GetFlag(FLAGS_drift_factor)),
+ packet_loss_rate_(absl::GetFlag(FLAGS_packet_loss_rate)),
+ block_duration_ms_(block_duration_ms),
+ in_sampling_khz_(in_sampling_khz),
+ out_sampling_khz_(out_sampling_khz),
+ in_size_samples_(
+ static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
+ payload_size_bytes_(0),
+ max_payload_bytes_(0),
+ in_file_(
+ new ResampleInputAudioFile(absl::GetFlag(FLAGS_in_filename),
+ absl::GetFlag(FLAGS_input_sample_rate),
+ in_sampling_khz * 1000,
+ absl::GetFlag(FLAGS_runtime_ms) > 0)),
+ rtp_generator_(
+ new RtpGenerator(in_sampling_khz_, 0, 0, decodable_time_ms_)),
+ total_payload_size_bytes_(0) {
+ // Flag validation
+ RTC_CHECK(ValidateFilename(absl::GetFlag(FLAGS_in_filename), false))
+ << "Invalid input filename.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_input_sample_rate) == 8000 ||
+ absl::GetFlag(FLAGS_input_sample_rate) == 16000 ||
+ absl::GetFlag(FLAGS_input_sample_rate) == 32000 ||
+ absl::GetFlag(FLAGS_input_sample_rate) == 48000)
+ << "Invalid sample rate should be 8000, 16000, 32000 or 48000 Hz.";
+
+ RTC_CHECK_EQ(absl::GetFlag(FLAGS_channels), 1)
+ << "Invalid number of channels, current support only 1.";
+
+ RTC_CHECK(ValidateFilename(absl::GetFlag(FLAGS_out_filename), true))
+ << "Invalid output filename.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_packet_loss_rate) >= 0 &&
+ absl::GetFlag(FLAGS_packet_loss_rate) <= 100)
+ << "Invalid packet loss percentile, should be between 0 and 100.";
+
+ RTC_CHECK(absl::GetFlag(FLAGS_random_loss_mode) >= 0 &&
+ absl::GetFlag(FLAGS_random_loss_mode) < kLastLossMode)
+ << "Invalid random packet loss mode, should be between 0 and "
+ << kLastLossMode - 1 << ".";
+
+ RTC_CHECK_GE(absl::GetFlag(FLAGS_burst_length), kPacketLossTimeUnitMs)
+ << "Invalid burst length, should be greater than or equal to "
+ << kPacketLossTimeUnitMs << " ms.";
+
+ RTC_CHECK_GT(absl::GetFlag(FLAGS_drift_factor), -0.1)
+ << "Invalid drift factor, should be greater than -0.1.";
+
+ RTC_CHECK_GE(absl::GetFlag(FLAGS_preload_packets), 0)
+ << "Invalid number of packets to preload; must be non-negative.";
+
+ const std::string out_filename = absl::GetFlag(FLAGS_out_filename);
+ const std::string log_filename = out_filename + ".log";
+ log_file_.open(log_filename.c_str(), std::ofstream::out);
+ RTC_CHECK(log_file_.is_open());
+
+ if (out_filename.size() >= 4 &&
+ out_filename.substr(out_filename.size() - 4) == ".wav") {
+ // Open a wav file.
+ output_.reset(
+ new webrtc::test::OutputWavFile(out_filename, 1000 * out_sampling_khz));
+ } else {
+ // Open a pcm file.
+ output_.reset(new webrtc::test::OutputAudioFile(out_filename));
+ }
+
+ NetEq::Config config;
+ config.sample_rate_hz = out_sampling_khz_ * 1000;
+ neteq_ = CreateNetEq(config, Clock::GetRealTimeClock(), decoder_factory);
+ max_payload_bytes_ = in_size_samples_ * channels_ * sizeof(int16_t);
+ in_data_.reset(new int16_t[in_size_samples_ * channels_]);
+}
+
+NetEqQualityTest::~NetEqQualityTest() {
+ log_file_.close();
+}
+
+bool NoLoss::Lost(int now_ms) {
+ return false;
+}
+
+UniformLoss::UniformLoss(double loss_rate) : loss_rate_(loss_rate) {}
+
+bool UniformLoss::Lost(int now_ms) {
+ int drop_this = rand();
+ return (drop_this < loss_rate_ * RAND_MAX);
+}
+
+GilbertElliotLoss::GilbertElliotLoss(double prob_trans_11, double prob_trans_01)
+ : prob_trans_11_(prob_trans_11),
+ prob_trans_01_(prob_trans_01),
+ lost_last_(false),
+ uniform_loss_model_(new UniformLoss(0)) {}
+
+GilbertElliotLoss::~GilbertElliotLoss() {}
+
+bool GilbertElliotLoss::Lost(int now_ms) {
+ // Simulate bursty channel (Gilbert model).
+ // (1st order) Markov chain model with memory of the previous/last
+ // packet state (lost or received).
+ if (lost_last_) {
+ // Previous packet was not received.
+ uniform_loss_model_->set_loss_rate(prob_trans_11_);
+ return lost_last_ = uniform_loss_model_->Lost(now_ms);
+ } else {
+ uniform_loss_model_->set_loss_rate(prob_trans_01_);
+ return lost_last_ = uniform_loss_model_->Lost(now_ms);
+ }
+}
+
+FixedLossModel::FixedLossModel(
+ std::set<FixedLossEvent, FixedLossEventCmp> loss_events)
+ : loss_events_(loss_events) {
+ loss_events_it_ = loss_events_.begin();
+}
+
+FixedLossModel::~FixedLossModel() {}
+
+bool FixedLossModel::Lost(int now_ms) {
+ if (loss_events_it_ != loss_events_.end() &&
+ now_ms > loss_events_it_->start_ms) {
+ if (now_ms <= loss_events_it_->start_ms + loss_events_it_->duration_ms) {
+ return true;
+ } else {
+ ++loss_events_it_;
+ return false;
+ }
+ }
+ return false;
+}
+
+void NetEqQualityTest::SetUp() {
+ ASSERT_TRUE(neteq_->RegisterPayloadType(kPayloadType, audio_format_));
+ rtp_generator_->set_drift_factor(drift_factor_);
+
+ int units = block_duration_ms_ / kPacketLossTimeUnitMs;
+ switch (absl::GetFlag(FLAGS_random_loss_mode)) {
+ case kUniformLoss: {
+ // `unit_loss_rate` is the packet loss rate for each unit time interval
+ // (kPacketLossTimeUnitMs). Since a packet loss event is generated if any
+ // of |block_duration_ms_ / kPacketLossTimeUnitMs| unit time intervals of
+ // a full packet duration is drawn with a loss, `unit_loss_rate` fulfills
+ // (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
+ // 1 - packet_loss_rate.
+ double unit_loss_rate =
+ (1.0 - std::pow(1.0 - 0.01 * packet_loss_rate_, 1.0 / units));
+ loss_model_.reset(new UniformLoss(unit_loss_rate));
+ break;
+ }
+ case kGilbertElliotLoss: {
+ // `FLAGS_burst_length` should be integer times of kPacketLossTimeUnitMs.
+ ASSERT_EQ(0, absl::GetFlag(FLAGS_burst_length) % kPacketLossTimeUnitMs);
+
+ // We do not allow 100 percent packet loss in Gilbert Elliot model, which
+ // makes no sense.
+ ASSERT_GT(100, packet_loss_rate_);
+
+ // To guarantee the overall packet loss rate, transition probabilities
+ // need to satisfy:
+ // pi_0 * (1 - prob_trans_01_) ^ units +
+ // pi_1 * prob_trans_10_ ^ (units - 1) == 1 - loss_rate
+ // pi_0 = prob_trans_10 / (prob_trans_10 + prob_trans_01_)
+ // is the stationary state probability of no-loss
+ // pi_1 = prob_trans_01_ / (prob_trans_10 + prob_trans_01_)
+ // is the stationary state probability of loss
+ // After a derivation prob_trans_00 should satisfy:
+ // prob_trans_00 ^ (units - 1) = (loss_rate - 1) / prob_trans_10 *
+ // prob_trans_00 + (1 - loss_rate) * (1 + 1 / prob_trans_10).
+ double loss_rate = 0.01f * packet_loss_rate_;
+ double prob_trans_10 =
+ 1.0f * kPacketLossTimeUnitMs / absl::GetFlag(FLAGS_burst_length);
+ double prob_trans_00 = ProbTrans00Solver(units, loss_rate, prob_trans_10);
+ loss_model_.reset(
+ new GilbertElliotLoss(1.0f - prob_trans_10, 1.0f - prob_trans_00));
+ break;
+ }
+ case kFixedLoss: {
+ std::istringstream loss_events_stream(absl::GetFlag(FLAGS_loss_events));
+ std::string loss_event_string;
+ std::set<FixedLossEvent, FixedLossEventCmp> loss_events;
+ while (std::getline(loss_events_stream, loss_event_string, ',')) {
+ std::vector<int> loss_event_params;
+ std::istringstream loss_event_params_stream(loss_event_string);
+ std::copy(std::istream_iterator<int>(loss_event_params_stream),
+ std::istream_iterator<int>(),
+ std::back_inserter(loss_event_params));
+ RTC_CHECK_EQ(loss_event_params.size(), 2);
+ auto result = loss_events.insert(
+ FixedLossEvent(loss_event_params[0], loss_event_params[1]));
+ RTC_CHECK(result.second);
+ }
+ RTC_CHECK_GT(loss_events.size(), 0);
+ loss_model_.reset(new FixedLossModel(loss_events));
+ break;
+ }
+ default: {
+ loss_model_.reset(new NoLoss);
+ break;
+ }
+ }
+
+ // Make sure that the packet loss profile is same for all derived tests.
+ srand(kInitSeed);
+}
+
+std::ofstream& NetEqQualityTest::Log() {
+ return log_file_;
+}
+
+bool NetEqQualityTest::PacketLost() {
+ int cycles = block_duration_ms_ / kPacketLossTimeUnitMs;
+
+ // The loop is to make sure that codecs with different block lengths share the
+ // same packet loss profile.
+ bool lost = false;
+ for (int idx = 0; idx < cycles; idx++) {
+ if (loss_model_->Lost(decoded_time_ms_)) {
+ // The packet will be lost if any of the drawings indicates a loss, but
+ // the loop has to go on to make sure that codecs with different block
+ // lengths keep the same pace.
+ lost = true;
+ }
+ }
+ return lost;
+}
+
+int NetEqQualityTest::Transmit() {
+ int packet_input_time_ms = rtp_generator_->GetRtpHeader(
+ kPayloadType, in_size_samples_, &rtp_header_);
+ Log() << "Packet of size " << payload_size_bytes_ << " bytes, for frame at "
+ << packet_input_time_ms << " ms ";
+ if (payload_size_bytes_ > 0) {
+ if (!PacketLost()) {
+ int ret = neteq_->InsertPacket(
+ rtp_header_,
+ rtc::ArrayView<const uint8_t>(payload_.data(), payload_size_bytes_));
+ if (ret != NetEq::kOK)
+ return -1;
+ Log() << "was sent.";
+ } else {
+ Log() << "was lost.";
+ }
+ }
+ Log() << std::endl;
+ return packet_input_time_ms;
+}
+
+int NetEqQualityTest::DecodeBlock() {
+ bool muted;
+ int ret = neteq_->GetAudio(&out_frame_, &muted);
+ RTC_CHECK(!muted);
+
+ if (ret != NetEq::kOK) {
+ return -1;
+ } else {
+ RTC_DCHECK_EQ(out_frame_.num_channels_, channels_);
+ RTC_DCHECK_EQ(out_frame_.samples_per_channel_,
+ static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
+ RTC_CHECK(output_->WriteArray(
+ out_frame_.data(),
+ out_frame_.samples_per_channel_ * out_frame_.num_channels_));
+ return static_cast<int>(out_frame_.samples_per_channel_);
+ }
+}
+
+void NetEqQualityTest::Simulate() {
+ int audio_size_samples;
+ bool end_of_input = false;
+ int runtime_ms = absl::GetFlag(FLAGS_runtime_ms) >= 0
+ ? absl::GetFlag(FLAGS_runtime_ms)
+ : INT_MAX;
+
+ while (!end_of_input && decoded_time_ms_ < runtime_ms) {
+ // Preload the buffer if needed.
+ while (decodable_time_ms_ -
+ absl::GetFlag(FLAGS_preload_packets) * block_duration_ms_ <
+ decoded_time_ms_) {
+ if (!in_file_->Read(in_size_samples_ * channels_, &in_data_[0])) {
+ end_of_input = true;
+ ASSERT_TRUE(end_of_input && absl::GetFlag(FLAGS_runtime_ms) < 0);
+ break;
+ }
+ payload_.Clear();
+ payload_size_bytes_ = EncodeBlock(&in_data_[0], in_size_samples_,
+ &payload_, max_payload_bytes_);
+ total_payload_size_bytes_ += payload_size_bytes_;
+ decodable_time_ms_ = Transmit() + block_duration_ms_;
+ }
+ audio_size_samples = DecodeBlock();
+ if (audio_size_samples > 0) {
+ decoded_time_ms_ += audio_size_samples / out_sampling_khz_;
+ }
+ }
+ Log() << "Average bit rate was "
+ << 8.0f * total_payload_size_bytes_ / absl::GetFlag(FLAGS_runtime_ms)
+ << " kbps" << std::endl;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
new file mode 100644
index 0000000000..edcb117748
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
+
+#include <fstream>
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+enum LossModes {
+ kNoLoss,
+ kUniformLoss,
+ kGilbertElliotLoss,
+ kFixedLoss,
+ kLastLossMode
+};
+
+class LossModel {
+ public:
+ virtual ~LossModel() {}
+ virtual bool Lost(int now_ms) = 0;
+};
+
+class NoLoss : public LossModel {
+ public:
+ bool Lost(int now_ms) override;
+};
+
+class UniformLoss : public LossModel {
+ public:
+ UniformLoss(double loss_rate);
+ bool Lost(int now_ms) override;
+ void set_loss_rate(double loss_rate) { loss_rate_ = loss_rate; }
+
+ private:
+ double loss_rate_;
+};
+
+class GilbertElliotLoss : public LossModel {
+ public:
+ GilbertElliotLoss(double prob_trans_11, double prob_trans_01);
+ ~GilbertElliotLoss() override;
+ bool Lost(int now_ms) override;
+
+ private:
+ // Prob. of losing current packet, when previous packet is lost.
+ double prob_trans_11_;
+ // Prob. of losing current packet, when previous packet is not lost.
+ double prob_trans_01_;
+ bool lost_last_;
+ std::unique_ptr<UniformLoss> uniform_loss_model_;
+};
+
+struct FixedLossEvent {
+ int start_ms;
+ int duration_ms;
+ FixedLossEvent(int start_ms, int duration_ms)
+ : start_ms(start_ms), duration_ms(duration_ms) {}
+};
+
+struct FixedLossEventCmp {
+ bool operator()(const FixedLossEvent& l_event,
+ const FixedLossEvent& r_event) const {
+ return l_event.start_ms < r_event.start_ms;
+ }
+};
+
+class FixedLossModel : public LossModel {
+ public:
+ FixedLossModel(std::set<FixedLossEvent, FixedLossEventCmp> loss_events);
+ ~FixedLossModel() override;
+ bool Lost(int now_ms) override;
+
+ private:
+ std::set<FixedLossEvent, FixedLossEventCmp> loss_events_;
+ std::set<FixedLossEvent, FixedLossEventCmp>::iterator loss_events_it_;
+};
+
+class NetEqQualityTest : public ::testing::Test {
+ protected:
+ NetEqQualityTest(
+ int block_duration_ms,
+ int in_sampling_khz,
+ int out_sampling_khz,
+ const SdpAudioFormat& format,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory =
+ webrtc::CreateBuiltinAudioDecoderFactory());
+ ~NetEqQualityTest() override;
+
+ void SetUp() override;
+
+ // EncodeBlock(...) does the following:
+ // 1. encodes a block of audio, saved in `in_data` and has a length of
+ // `block_size_samples` (samples per channel),
+ // 2. save the bit stream to `payload` of `max_bytes` bytes in size,
+ // 3. returns the length of the payload (in bytes),
+ virtual int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) = 0;
+
+ // PacketLost(...) determines weather a packet sent at an indicated time gets
+ // lost or not.
+ bool PacketLost();
+
+ // DecodeBlock() decodes a block of audio using the payload stored in
+ // `payload_` with the length of `payload_size_bytes_` (bytes). The decoded
+ // audio is to be stored in `out_data_`.
+ int DecodeBlock();
+
+ // Transmit() uses `rtp_generator_` to generate a packet and passes it to
+ // `neteq_`.
+ int Transmit();
+
+ // Runs encoding / transmitting / decoding.
+ void Simulate();
+
+ // Write to log file. Usage Log() << ...
+ std::ofstream& Log();
+
+ SdpAudioFormat audio_format_;
+ const size_t channels_;
+
+ private:
+ int decoded_time_ms_;
+ int decodable_time_ms_;
+ double drift_factor_;
+ int packet_loss_rate_;
+ const int block_duration_ms_;
+ const int in_sampling_khz_;
+ const int out_sampling_khz_;
+
+ // Number of samples per channel in a frame.
+ const size_t in_size_samples_;
+
+ size_t payload_size_bytes_;
+ size_t max_payload_bytes_;
+
+ std::unique_ptr<InputAudioFile> in_file_;
+ std::unique_ptr<AudioSink> output_;
+ std::ofstream log_file_;
+
+ std::unique_ptr<RtpGenerator> rtp_generator_;
+ std::unique_ptr<NetEq> neteq_;
+ std::unique_ptr<LossModel> loss_model_;
+
+ std::unique_ptr<int16_t[]> in_data_;
+ rtc::Buffer payload_;
+ AudioFrame out_frame_;
+ RTPHeader rtp_header_;
+
+ size_t total_payload_size_bytes_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
new file mode 100644
index 0000000000..ffd114ae5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
+
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqReplacementInput::NetEqReplacementInput(
+ std::unique_ptr<NetEqInput> source,
+ uint8_t replacement_payload_type,
+ const std::set<uint8_t>& comfort_noise_types,
+ const std::set<uint8_t>& forbidden_types)
+ : source_(std::move(source)),
+ replacement_payload_type_(replacement_payload_type),
+ comfort_noise_types_(comfort_noise_types),
+ forbidden_types_(forbidden_types) {
+ RTC_CHECK(source_);
+ packet_ = source_->PopPacket();
+ ReplacePacket();
+}
+
+absl::optional<int64_t> NetEqReplacementInput::NextPacketTime() const {
+ return packet_
+ ? absl::optional<int64_t>(static_cast<int64_t>(packet_->time_ms))
+ : absl::nullopt;
+}
+
+absl::optional<int64_t> NetEqReplacementInput::NextOutputEventTime() const {
+ return source_->NextOutputEventTime();
+}
+
+std::unique_ptr<NetEqInput::PacketData> NetEqReplacementInput::PopPacket() {
+ std::unique_ptr<PacketData> to_return = std::move(packet_);
+ while (true) {
+ packet_ = source_->PopPacket();
+ if (!packet_)
+ break;
+ if (packet_->payload.size() > packet_->header.paddingLength) {
+ // Not padding only. Good to go. Skip this packet otherwise.
+ break;
+ }
+ }
+ ReplacePacket();
+ return to_return;
+}
+
+void NetEqReplacementInput::AdvanceOutputEvent() {
+ source_->AdvanceOutputEvent();
+}
+
+bool NetEqReplacementInput::ended() const {
+ return source_->ended();
+}
+
+absl::optional<RTPHeader> NetEqReplacementInput::NextHeader() const {
+ return source_->NextHeader();
+}
+
+void NetEqReplacementInput::ReplacePacket() {
+ if (!source_->NextPacketTime()) {
+ // End of input. Cannot do proper replacement on the very last packet, so we
+ // delete it instead.
+ packet_.reset();
+ return;
+ }
+
+ RTC_DCHECK(packet_);
+
+ RTC_CHECK_EQ(forbidden_types_.count(packet_->header.payloadType), 0)
+ << "Payload type " << static_cast<int>(packet_->header.payloadType)
+ << " is forbidden.";
+
+ // Check if this packet is comfort noise.
+ if (comfort_noise_types_.count(packet_->header.payloadType) != 0) {
+ // If CNG, simply insert a zero-energy one-byte payload.
+ uint8_t cng_payload[1] = {127}; // Max attenuation of CNG.
+ packet_->payload.SetData(cng_payload);
+ return;
+ }
+
+ absl::optional<RTPHeader> next_hdr = source_->NextHeader();
+ RTC_DCHECK(next_hdr);
+ uint8_t payload[12];
+ RTC_DCHECK_LE(last_frame_size_timestamps_, 120 * 48);
+ uint32_t input_frame_size_timestamps = last_frame_size_timestamps_;
+ const uint32_t timestamp_diff =
+ next_hdr->timestamp - packet_->header.timestamp;
+ if (next_hdr->sequenceNumber == packet_->header.sequenceNumber + 1 &&
+ timestamp_diff <= 120 * 48) {
+ // Packets are in order and the timestamp diff is less than 5760 samples.
+ // Accept the timestamp diff as a valid frame size.
+ input_frame_size_timestamps = timestamp_diff;
+ last_frame_size_timestamps_ = input_frame_size_timestamps;
+ }
+ RTC_DCHECK_LE(input_frame_size_timestamps, 120 * 48);
+ FakeDecodeFromFile::PrepareEncoded(packet_->header.timestamp,
+ input_frame_size_timestamps,
+ packet_->payload.size(), payload);
+ packet_->payload.SetData(payload);
+ packet_->header.payloadType = replacement_payload_type_;
+ return;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h
new file mode 100644
index 0000000000..9ce9b9dc63
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
+
+#include <memory>
+#include <set>
+
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+namespace webrtc {
+namespace test {
+
+// This class converts the packets from a NetEqInput to fake encodings to be
+// decoded by a FakeDecodeFromFile decoder.
+class NetEqReplacementInput : public NetEqInput {
+ public:
+ NetEqReplacementInput(std::unique_ptr<NetEqInput> source,
+ uint8_t replacement_payload_type,
+ const std::set<uint8_t>& comfort_noise_types,
+ const std::set<uint8_t>& forbidden_types);
+
+ absl::optional<int64_t> NextPacketTime() const override;
+ absl::optional<int64_t> NextOutputEventTime() const override;
+ std::unique_ptr<PacketData> PopPacket() override;
+ void AdvanceOutputEvent() override;
+ bool ended() const override;
+ absl::optional<RTPHeader> NextHeader() const override;
+
+ private:
+ void ReplacePacket();
+
+ std::unique_ptr<NetEqInput> source_;
+ const uint8_t replacement_payload_type_;
+ const std::set<uint8_t> comfort_noise_types_;
+ const std::set<uint8_t> forbidden_types_;
+ std::unique_ptr<PacketData> packet_; // The next packet to deliver.
+ uint32_t last_frame_size_timestamps_ = 960; // Initial guess: 20 ms @ 48 kHz.
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
new file mode 100644
index 0000000000..b274069bd4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <iostream>
+#include <string>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "modules/audio_coding/neteq/tools/neteq_test_factory.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+
+using TestConfig = webrtc::test::NetEqTestFactory::Config;
+
+ABSL_FLAG(bool,
+ codec_map,
+ false,
+ "Prints the mapping between RTP payload type and "
+ "codec");
+ABSL_FLAG(std::string,
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+ " will assign the group Enable to field trial WebRTC-FooFeature.");
+ABSL_FLAG(int, pcmu, TestConfig::default_pcmu(), "RTP payload type for PCM-u");
+ABSL_FLAG(int, pcma, TestConfig::default_pcma(), "RTP payload type for PCM-a");
+ABSL_FLAG(int, ilbc, TestConfig::default_ilbc(), "RTP payload type for iLBC");
+ABSL_FLAG(int, isac, TestConfig::default_isac(), "RTP payload type for iSAC");
+ABSL_FLAG(int,
+ isac_swb,
+ TestConfig::default_isac_swb(),
+ "RTP payload type for iSAC-swb (32 kHz)");
+ABSL_FLAG(int, opus, TestConfig::default_opus(), "RTP payload type for Opus");
+ABSL_FLAG(int,
+ pcm16b,
+ TestConfig::default_pcm16b(),
+ "RTP payload type for PCM16b-nb (8 kHz)");
+ABSL_FLAG(int,
+ pcm16b_wb,
+ TestConfig::default_pcm16b_wb(),
+ "RTP payload type for PCM16b-wb (16 kHz)");
+ABSL_FLAG(int,
+ pcm16b_swb32,
+ TestConfig::default_pcm16b_swb32(),
+ "RTP payload type for PCM16b-swb32 (32 kHz)");
+ABSL_FLAG(int,
+ pcm16b_swb48,
+ TestConfig::default_pcm16b_swb48(),
+ "RTP payload type for PCM16b-swb48 (48 kHz)");
+ABSL_FLAG(int, g722, TestConfig::default_g722(), "RTP payload type for G.722");
+ABSL_FLAG(int,
+ avt,
+ TestConfig::default_avt(),
+ "RTP payload type for AVT/DTMF (8 kHz)");
+ABSL_FLAG(int,
+ avt_16,
+ TestConfig::default_avt_16(),
+ "RTP payload type for AVT/DTMF (16 kHz)");
+ABSL_FLAG(int,
+ avt_32,
+ TestConfig::default_avt_32(),
+ "RTP payload type for AVT/DTMF (32 kHz)");
+ABSL_FLAG(int,
+ avt_48,
+ TestConfig::default_avt_48(),
+ "RTP payload type for AVT/DTMF (48 kHz)");
+ABSL_FLAG(int,
+ red,
+ TestConfig::default_red(),
+ "RTP payload type for redundant audio (RED)");
+ABSL_FLAG(int,
+ cn_nb,
+ TestConfig::default_cn_nb(),
+ "RTP payload type for comfort noise (8 kHz)");
+ABSL_FLAG(int,
+ cn_wb,
+ TestConfig::default_cn_wb(),
+ "RTP payload type for comfort noise (16 kHz)");
+ABSL_FLAG(int,
+ cn_swb32,
+ TestConfig::default_cn_swb32(),
+ "RTP payload type for comfort noise (32 kHz)");
+ABSL_FLAG(int,
+ cn_swb48,
+ TestConfig::default_cn_swb48(),
+ "RTP payload type for comfort noise (48 kHz)");
+ABSL_FLAG(std::string,
+ replacement_audio_file,
+ "",
+ "A PCM file that will be used to populate dummy"
+ " RTP packets");
+ABSL_FLAG(std::string,
+ ssrc,
+ "",
+ "Only use packets with this SSRC (decimal or hex, the latter "
+ "starting with 0x)");
+ABSL_FLAG(int,
+ audio_level,
+ TestConfig::default_audio_level(),
+ "Extension ID for audio level (RFC 6464)");
+ABSL_FLAG(int,
+ abs_send_time,
+ TestConfig::default_abs_send_time(),
+ "Extension ID for absolute sender time");
+ABSL_FLAG(int,
+ transport_seq_no,
+ TestConfig::default_transport_seq_no(),
+ "Extension ID for transport sequence number");
+ABSL_FLAG(int,
+ video_content_type,
+ TestConfig::default_video_content_type(),
+ "Extension ID for video content type");
+ABSL_FLAG(int,
+ video_timing,
+ TestConfig::default_video_timing(),
+ "Extension ID for video timing");
+ABSL_FLAG(std::string,
+ output_files_base_name,
+ "",
+ "Custom path used as prefix for the output files - i.e., "
+ "matlab plot, python plot, text log.");
+ABSL_FLAG(bool,
+ matlabplot,
+ false,
+ "Generates a matlab script for plotting the delay profile");
+ABSL_FLAG(bool,
+ pythonplot,
+ false,
+ "Generates a python script for plotting the delay profile");
+ABSL_FLAG(bool,
+ textlog,
+ false,
+ "Generates a text log describing the simulation on a "
+ "step-by-step basis.");
+ABSL_FLAG(bool, concealment_events, false, "Prints concealment events");
+ABSL_FLAG(int,
+ max_nr_packets_in_buffer,
+ TestConfig::default_max_nr_packets_in_buffer(),
+ "Maximum allowed number of packets in the buffer");
+ABSL_FLAG(bool,
+ enable_fast_accelerate,
+ false,
+ "Enables jitter buffer fast accelerate");
+
+namespace {
+
+// Parses the input string for a valid SSRC (at the start of the string). If a
+// valid SSRC is found, it is written to the output variable `ssrc`, and true is
+// returned. Otherwise, false is returned.
+bool ParseSsrc(absl::string_view str, uint32_t* ssrc) {
+ if (str.empty())
+ return true;
+ int base = 10;
+ // Look for "0x" or "0X" at the start and change base to 16 if found.
+ if ((str.compare(0, 2, "0x") == 0) || (str.compare(0, 2, "0X") == 0))
+ base = 16;
+ errno = 0;
+ char* end_ptr;
+ std::string str_str = std::string(str);
+ unsigned long value = strtoul(str_str.c_str(), &end_ptr, base); // NOLINT
+ if (value == ULONG_MAX && errno == ERANGE)
+ return false; // Value out of range for unsigned long.
+ if (sizeof(unsigned long) > sizeof(uint32_t) && value > 0xFFFFFFFF) // NOLINT
+ return false; // Value out of range for uint32_t.
+ if (end_ptr - str_str.c_str() < static_cast<ptrdiff_t>(str.length()))
+ return false; // Part of the string was not parsed.
+ *ssrc = static_cast<uint32_t>(value);
+ return true;
+}
+
+static bool ValidateExtensionId(int value) {
+ if (value > 0 && value <= 255) // Value is ok.
+ return true;
+ printf("Extension ID must be between 1 and 255, not %d\n",
+ static_cast<int>(value));
+ return false;
+}
+
+// Flag validators.
+bool ValidatePayloadType(int value) {
+ if (value >= 0 && value <= 127) // Value is ok.
+ return true;
+ printf("Payload type must be between 0 and 127, not %d\n",
+ static_cast<int>(value));
+ return false;
+}
+
+bool ValidateSsrcValue(absl::string_view str) {
+ uint32_t dummy_ssrc;
+ if (ParseSsrc(str, &dummy_ssrc)) // Value is ok.
+ return true;
+ printf("Invalid SSRC: %.*s\n", static_cast<int>(str.size()), str.data());
+ return false;
+}
+
+void PrintCodecMappingEntry(absl::string_view codec, int flag) {
+ std::cout << codec << ": " << flag << std::endl;
+}
+
+void PrintCodecMapping() {
+ PrintCodecMappingEntry("PCM-u", absl::GetFlag(FLAGS_pcmu));
+ PrintCodecMappingEntry("PCM-a", absl::GetFlag(FLAGS_pcma));
+ PrintCodecMappingEntry("iLBC", absl::GetFlag(FLAGS_ilbc));
+ PrintCodecMappingEntry("iSAC", absl::GetFlag(FLAGS_isac));
+ PrintCodecMappingEntry("iSAC-swb (32 kHz)", absl::GetFlag(FLAGS_isac_swb));
+ PrintCodecMappingEntry("Opus", absl::GetFlag(FLAGS_opus));
+ PrintCodecMappingEntry("PCM16b-nb (8 kHz)", absl::GetFlag(FLAGS_pcm16b));
+ PrintCodecMappingEntry("PCM16b-wb (16 kHz)", absl::GetFlag(FLAGS_pcm16b_wb));
+ PrintCodecMappingEntry("PCM16b-swb32 (32 kHz)",
+ absl::GetFlag(FLAGS_pcm16b_swb32));
+ PrintCodecMappingEntry("PCM16b-swb48 (48 kHz)",
+ absl::GetFlag(FLAGS_pcm16b_swb48));
+ PrintCodecMappingEntry("G.722", absl::GetFlag(FLAGS_g722));
+ PrintCodecMappingEntry("AVT/DTMF (8 kHz)", absl::GetFlag(FLAGS_avt));
+ PrintCodecMappingEntry("AVT/DTMF (16 kHz)", absl::GetFlag(FLAGS_avt_16));
+ PrintCodecMappingEntry("AVT/DTMF (32 kHz)", absl::GetFlag(FLAGS_avt_32));
+ PrintCodecMappingEntry("AVT/DTMF (48 kHz)", absl::GetFlag(FLAGS_avt_48));
+ PrintCodecMappingEntry("redundant audio (RED)", absl::GetFlag(FLAGS_red));
+ PrintCodecMappingEntry("comfort noise (8 kHz)", absl::GetFlag(FLAGS_cn_nb));
+ PrintCodecMappingEntry("comfort noise (16 kHz)", absl::GetFlag(FLAGS_cn_wb));
+ PrintCodecMappingEntry("comfort noise (32 kHz)",
+ absl::GetFlag(FLAGS_cn_swb32));
+ PrintCodecMappingEntry("comfort noise (48 kHz)",
+ absl::GetFlag(FLAGS_cn_swb48));
+}
+
+bool ValidateOutputFilesOptions(bool textlog,
+ bool plotting,
+ absl::string_view output_files_base_name,
+ absl::string_view output_audio_filename) {
+ bool output_files_base_name_specified = !output_files_base_name.empty();
+ if (!textlog && !plotting && output_files_base_name_specified) {
+ std::cout << "Error: --output_files_base_name cannot be used without at "
+ "least one of the following flags: --textlog, --matlabplot, "
+ "--pythonplot."
+ << std::endl;
+ return false;
+ }
+ // Without `output_audio_filename`, `output_files_base_name` is required when
+ // plotting output files must be generated (in order to form a valid output
+ // file name).
+ if (output_audio_filename.empty() && plotting &&
+ !output_files_base_name_specified) {
+ std::cout << "Error: when no output audio file is specified and "
+ "--matlabplot and/or --pythonplot are used, "
+ "--output_files_base_name must be also used."
+ << std::endl;
+ return false;
+ }
+ return true;
+}
+
+absl::optional<std::string> CreateOptionalOutputFileName(
+ bool output_requested,
+ absl::string_view basename,
+ absl::string_view output_audio_filename,
+ absl::string_view suffix) {
+ if (!output_requested) {
+ return absl::nullopt;
+ }
+ if (!basename.empty()) {
+ // Override the automatic assignment.
+ rtc::StringBuilder sb(basename);
+ sb << suffix;
+ return sb.str();
+ }
+ if (!output_audio_filename.empty()) {
+ // Automatically assign name.
+ rtc::StringBuilder sb(output_audio_filename);
+ sb << suffix;
+ return sb.str();
+ }
+ std::cout << "Error: invalid text log file parameters.";
+ return absl::nullopt;
+}
+
+} // namespace
+
+int main(int argc, char* argv[]) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ webrtc::test::NetEqTestFactory factory;
+ std::string usage =
+ "Tool for decoding an RTP dump file using NetEq.\n"
+ "Example usage:\n"
+ "./neteq_rtpplay input.rtp [output.{pcm, wav}]\n";
+ if (absl::GetFlag(FLAGS_codec_map)) {
+ PrintCodecMapping();
+ exit(0);
+ }
+ if (args.size() != 2 &&
+ args.size() != 3) { // The output audio file is optional.
+ // Print usage information.
+ std::cout << usage;
+ exit(0);
+ }
+ const std::string output_audio_filename((args.size() == 3) ? args[2] : "");
+ const std::string output_files_base_name(
+ absl::GetFlag(FLAGS_output_files_base_name));
+ RTC_CHECK(ValidateOutputFilesOptions(
+ absl::GetFlag(FLAGS_textlog),
+ absl::GetFlag(FLAGS_matlabplot) || absl::GetFlag(FLAGS_pythonplot),
+ output_files_base_name, output_audio_filename));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcmu)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcma)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_ilbc)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_isac)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_isac_swb)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_opus)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b_wb)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b_swb32)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b_swb48)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_g722)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt_16)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt_32)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt_48)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_red)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_nb)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_wb)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_swb32)));
+ RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_swb48)));
+ RTC_CHECK(ValidateSsrcValue(absl::GetFlag(FLAGS_ssrc)));
+ RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_audio_level)));
+ RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_abs_send_time)));
+ RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_transport_seq_no)));
+ RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_video_content_type)));
+ RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_video_timing)));
+
+ // Make force_fieldtrials persistent string during entire program live as
+ // absl::GetFlag creates temporary string and c_str() will point to
+ // deallocated string.
+ const std::string force_fieldtrials = absl::GetFlag(FLAGS_force_fieldtrials);
+ webrtc::field_trial::InitFieldTrialsFromString(force_fieldtrials.c_str());
+
+ webrtc::test::NetEqTestFactory::Config config;
+ config.pcmu = absl::GetFlag(FLAGS_pcmu);
+ config.pcma = absl::GetFlag(FLAGS_pcma);
+ config.ilbc = absl::GetFlag(FLAGS_ilbc);
+ config.isac = absl::GetFlag(FLAGS_isac);
+ config.isac_swb = absl::GetFlag(FLAGS_isac_swb);
+ config.opus = absl::GetFlag(FLAGS_opus);
+ config.pcm16b = absl::GetFlag(FLAGS_pcm16b);
+ config.pcm16b_wb = absl::GetFlag(FLAGS_pcm16b_wb);
+ config.pcm16b_swb32 = absl::GetFlag(FLAGS_pcm16b_swb32);
+ config.pcm16b_swb48 = absl::GetFlag(FLAGS_pcm16b_swb48);
+ config.g722 = absl::GetFlag(FLAGS_g722);
+ config.avt = absl::GetFlag(FLAGS_avt);
+ config.avt_16 = absl::GetFlag(FLAGS_avt_16);
+ config.avt_32 = absl::GetFlag(FLAGS_avt_32);
+ config.avt_48 = absl::GetFlag(FLAGS_avt_48);
+ config.red = absl::GetFlag(FLAGS_red);
+ config.cn_nb = absl::GetFlag(FLAGS_cn_nb);
+ config.cn_wb = absl::GetFlag(FLAGS_cn_wb);
+ config.cn_swb32 = absl::GetFlag(FLAGS_cn_swb32);
+ config.cn_swb48 = absl::GetFlag(FLAGS_cn_swb48);
+ config.replacement_audio_file = absl::GetFlag(FLAGS_replacement_audio_file);
+ config.audio_level = absl::GetFlag(FLAGS_audio_level);
+ config.abs_send_time = absl::GetFlag(FLAGS_abs_send_time);
+ config.transport_seq_no = absl::GetFlag(FLAGS_transport_seq_no);
+ config.video_content_type = absl::GetFlag(FLAGS_video_content_type);
+ config.video_timing = absl::GetFlag(FLAGS_video_timing);
+ config.matlabplot = absl::GetFlag(FLAGS_matlabplot);
+ config.pythonplot = absl::GetFlag(FLAGS_pythonplot);
+ config.concealment_events = absl::GetFlag(FLAGS_concealment_events);
+ config.max_nr_packets_in_buffer =
+ absl::GetFlag(FLAGS_max_nr_packets_in_buffer);
+ config.enable_fast_accelerate = absl::GetFlag(FLAGS_enable_fast_accelerate);
+ if (!output_audio_filename.empty()) {
+ config.output_audio_filename = output_audio_filename;
+ }
+ config.textlog = absl::GetFlag(FLAGS_textlog);
+ config.textlog_filename = CreateOptionalOutputFileName(
+ absl::GetFlag(FLAGS_textlog), output_files_base_name,
+ output_audio_filename, ".text_log.txt");
+ config.plot_scripts_basename = CreateOptionalOutputFileName(
+ absl::GetFlag(FLAGS_matlabplot) || absl::GetFlag(FLAGS_pythonplot),
+ output_files_base_name, output_audio_filename, "");
+
+ // Check if an SSRC value was provided.
+ if (absl::GetFlag(FLAGS_ssrc).size() > 0) {
+ uint32_t ssrc;
+ RTC_CHECK(ParseSsrc(absl::GetFlag(FLAGS_ssrc), &ssrc))
+ << "Flag verification has failed.";
+ config.ssrc_filter = absl::make_optional(ssrc);
+ }
+
+ std::unique_ptr<webrtc::test::NetEqTest> test =
+ factory.InitializeTestFromFile(/*input_filename=*/args[1],
+ /*factory=*/nullptr, config);
+ RTC_CHECK(test) << "ERROR: Unable to run test";
+ test->Run();
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay_test.sh b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay_test.sh
new file mode 100755
index 0000000000..0a6bf16016
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_rtpplay_test.sh
@@ -0,0 +1,183 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+#
+
+# Aliases.
+BIN=$1
+TEST_RTC_EVENT_LOG=$2
+INPUT_PCM_FILE=$3
+
+# Check setup.
+if [ ! -f $BIN ]; then
+ echo "Cannot find neteq_rtpplay binary."
+ exit 99
+fi
+if [ ! -f $TEST_RTC_EVENT_LOG ]; then
+ echo "Cannot find RTC event log file."
+ exit 99
+fi
+if [ ! -f $INPUT_PCM_FILE ]; then
+ echo "Cannot find PCM file."
+ exit 99
+fi
+
+# Defines.
+
+TMP_DIR=$(mktemp -d /tmp/tmp_XXXXXXXXXX)
+PASS=0
+FAIL=1
+TEST_SUITE_RESULT=$PASS
+
+file_hash () {
+ md5sum $1 | awk '{ print $1 }'
+}
+
+test_passed () {
+ echo PASS
+}
+
+test_failed () {
+ echo "FAIL: $1"
+ TEST_SUITE_RESULT=$FAIL
+}
+
+test_file_checksums_match () {
+ if [ ! -f $1 ] || [ ! -f $2 ]; then
+ test_failed "Cannot compare hash values: file(s) not found."
+ return
+ fi
+ HASH1=$(file_hash $1)
+ HASH2=$(file_hash $2)
+ if [ "$HASH1" = "$HASH2" ]; then
+ test_passed
+ else
+ test_failed "$1 differs from $2"
+ fi
+}
+
+test_file_exists () {
+ if [ -f $1 ]; then
+ test_passed
+ else
+ test_failed "$1 does not exist"
+ fi
+}
+
+test_exit_code_0 () {
+ if [ $1 -eq 0 ]; then
+ test_passed
+ else
+ test_failed "$1 did not return 0"
+ fi
+}
+
+test_exit_code_not_0 () {
+ if [ $1 -eq 0 ]; then
+ test_failed "$1 returned 0"
+ else
+ test_passed
+ fi
+}
+
+# Generate test data.
+
+# Case 1. Pre-existing way.
+CASE1_WAV=$TMP_DIR/case1.wav
+$BIN $TEST_RTC_EVENT_LOG $CASE1_WAV \
+ --replacement_audio_file $INPUT_PCM_FILE \
+ --textlog --pythonplot --matlabplot \
+ > $TMP_DIR/case1.stdout 2> /dev/null
+CASE1_RETURN_CODE=$?
+CASE1_TEXTLOG=$TMP_DIR/case1.wav.text_log.txt
+CASE1_PYPLOT=$TMP_DIR/case1_wav.py
+CASE1_MATPLOT=$TMP_DIR/case1_wav.m
+
+# Case 2. No output files.
+$BIN $TEST_RTC_EVENT_LOG --replacement_audio_file $INPUT_PCM_FILE \
+ > $TMP_DIR/case2.stdout 2> /dev/null
+CASE2_RETURN_CODE=$?
+
+# Case 3. No output audio file.
+
+# Case 3.1 Without --output_files_base_name (won't run).
+$BIN $TEST_RTC_EVENT_LOG \
+ --replacement_audio_file $INPUT_PCM_FILE \
+ --textlog --pythonplot --matlabplot \
+ &> /dev/null
+CASE3_1_RETURN_CODE=$?
+
+# Case 3.2 With --output_files_base_name (runs).
+$BIN $TEST_RTC_EVENT_LOG \
+ --replacement_audio_file $INPUT_PCM_FILE \
+ --output_files_base_name $TMP_DIR/case3_2 \
+ --textlog --pythonplot --matlabplot \
+ > $TMP_DIR/case3_2.stdout 2> /dev/null
+CASE3_2_RETURN_CODE=$?
+CASE3_2_TEXTLOG=$TMP_DIR/case3_2.text_log.txt
+CASE3_2_PYPLOT=$TMP_DIR/case3_2.py
+CASE3_2_MATPLOT=$TMP_DIR/case3_2.m
+
+# Case 4. With output audio file and --output_files_base_name.
+CASE4_WAV=$TMP_DIR/case4.wav
+$BIN $TEST_RTC_EVENT_LOG $TMP_DIR/case4.wav \
+ --replacement_audio_file $INPUT_PCM_FILE \
+ --output_files_base_name $TMP_DIR/case4 \
+ --textlog --pythonplot --matlabplot \
+ > $TMP_DIR/case4.stdout 2> /dev/null
+CASE4_RETURN_CODE=$?
+CASE4_TEXTLOG=$TMP_DIR/case4.text_log.txt
+CASE4_PYPLOT=$TMP_DIR/case4.py
+CASE4_MATPLOT=$TMP_DIR/case4.m
+
+# Tests.
+
+echo Check exit codes
+test_exit_code_0 $CASE1_RETURN_CODE
+test_exit_code_0 $CASE2_RETURN_CODE
+test_exit_code_not_0 $CASE3_1_RETURN_CODE
+test_exit_code_0 $CASE3_2_RETURN_CODE
+test_exit_code_0 $CASE4_RETURN_CODE
+
+echo Check that the expected output files exist
+test_file_exists $CASE1_TEXTLOG
+test_file_exists $CASE3_2_TEXTLOG
+test_file_exists $CASE4_TEXTLOG
+test_file_exists $CASE1_PYPLOT
+test_file_exists $CASE3_2_PYPLOT
+test_file_exists $CASE4_PYPLOT
+test_file_exists $CASE1_MATPLOT
+test_file_exists $CASE3_2_MATPLOT
+test_file_exists $CASE4_MATPLOT
+
+echo Check that the same WAV file is produced
+test_file_checksums_match $CASE1_WAV $CASE4_WAV
+
+echo Check that the same text log is produced
+test_file_checksums_match $CASE1_TEXTLOG $CASE3_2_TEXTLOG
+test_file_checksums_match $CASE1_TEXTLOG $CASE4_TEXTLOG
+
+echo Check that the same python plot scripts is produced
+test_file_checksums_match $CASE1_PYPLOT $CASE3_2_PYPLOT
+test_file_checksums_match $CASE1_PYPLOT $CASE4_PYPLOT
+
+echo Check that the same matlab plot scripts is produced
+test_file_checksums_match $CASE1_MATPLOT $CASE3_2_MATPLOT
+test_file_checksums_match $CASE1_MATPLOT $CASE4_MATPLOT
+
+# Clean up
+rm -fr $TMP_DIR
+
+if [ $TEST_SUITE_RESULT -eq $PASS ]; then
+ echo All tests passed.
+ exit 0
+else
+ echo One or more tests failed.
+ exit 1
+fi
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.cc
new file mode 100644
index 0000000000..6738e494f6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
+
+#include <algorithm>
+#include <numeric>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+namespace test {
+
+std::string NetEqStatsGetter::ConcealmentEvent::ToString() const {
+ char ss_buf[256];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss << "ConcealmentEvent duration_ms:" << duration_ms
+ << " event_number:" << concealment_event_number
+ << " time_from_previous_event_end_ms:" << time_from_previous_event_end_ms;
+ return ss.str();
+}
+
+NetEqStatsGetter::NetEqStatsGetter(
+ std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer)
+ : delay_analyzer_(std::move(delay_analyzer)) {}
+
+void NetEqStatsGetter::BeforeGetAudio(NetEq* neteq) {
+ if (delay_analyzer_) {
+ delay_analyzer_->BeforeGetAudio(neteq);
+ }
+}
+
+void NetEqStatsGetter::AfterGetAudio(int64_t time_now_ms,
+ const AudioFrame& audio_frame,
+ bool muted,
+ NetEq* neteq) {
+ // TODO(minyue): Get stats should better not be called as a call back after
+ // get audio. It is called independently from get audio in practice.
+ const auto lifetime_stat = neteq->GetLifetimeStatistics();
+ if (last_stats_query_time_ms_ == 0 ||
+ rtc::TimeDiff(time_now_ms, last_stats_query_time_ms_) >=
+ stats_query_interval_ms_) {
+ NetEqNetworkStatistics stats;
+ RTC_CHECK_EQ(neteq->NetworkStatistics(&stats), 0);
+ stats_.push_back(std::make_pair(time_now_ms, stats));
+ lifetime_stats_.push_back(std::make_pair(time_now_ms, lifetime_stat));
+ last_stats_query_time_ms_ = time_now_ms;
+ }
+
+ const auto voice_concealed_samples =
+ lifetime_stat.concealed_samples - lifetime_stat.silent_concealed_samples;
+ if (current_concealment_event_ != lifetime_stat.concealment_events &&
+ voice_concealed_samples_until_last_event_ < voice_concealed_samples) {
+ if (last_event_end_time_ms_ > 0) {
+ // Do not account for the first event to avoid start of the call
+ // skewing.
+ ConcealmentEvent concealment_event;
+ uint64_t last_event_voice_concealed_samples =
+ voice_concealed_samples - voice_concealed_samples_until_last_event_;
+ RTC_CHECK_GT(last_event_voice_concealed_samples, 0);
+ concealment_event.duration_ms = last_event_voice_concealed_samples /
+ (audio_frame.sample_rate_hz_ / 1000);
+ concealment_event.concealment_event_number = current_concealment_event_;
+ concealment_event.time_from_previous_event_end_ms =
+ time_now_ms - last_event_end_time_ms_;
+ concealment_events_.emplace_back(concealment_event);
+ voice_concealed_samples_until_last_event_ = voice_concealed_samples;
+ }
+ last_event_end_time_ms_ = time_now_ms;
+ voice_concealed_samples_until_last_event_ = voice_concealed_samples;
+ current_concealment_event_ = lifetime_stat.concealment_events;
+ }
+
+ if (delay_analyzer_) {
+ delay_analyzer_->AfterGetAudio(time_now_ms, audio_frame, muted, neteq);
+ }
+}
+
+double NetEqStatsGetter::AverageSpeechExpandRate() const {
+ double sum_speech_expand = std::accumulate(
+ stats_.begin(), stats_.end(), double{0.0},
+ [](double a, std::pair<int64_t, NetEqNetworkStatistics> b) {
+ return a + static_cast<double>(b.second.speech_expand_rate);
+ });
+ return sum_speech_expand / 16384.0 / stats_.size();
+}
+
+NetEqStatsGetter::Stats NetEqStatsGetter::AverageStats() const {
+ Stats sum_stats = std::accumulate(
+ stats_.begin(), stats_.end(), Stats(),
+ [](Stats a, std::pair<int64_t, NetEqNetworkStatistics> bb) {
+ const auto& b = bb.second;
+ a.current_buffer_size_ms += b.current_buffer_size_ms;
+ a.preferred_buffer_size_ms += b.preferred_buffer_size_ms;
+ a.jitter_peaks_found += b.jitter_peaks_found;
+ a.expand_rate += b.expand_rate / 16384.0;
+ a.speech_expand_rate += b.speech_expand_rate / 16384.0;
+ a.preemptive_rate += b.preemptive_rate / 16384.0;
+ a.accelerate_rate += b.accelerate_rate / 16384.0;
+ a.secondary_decoded_rate += b.secondary_decoded_rate / 16384.0;
+ a.secondary_discarded_rate += b.secondary_discarded_rate / 16384.0;
+ a.mean_waiting_time_ms += b.mean_waiting_time_ms;
+ a.median_waiting_time_ms += b.median_waiting_time_ms;
+ a.min_waiting_time_ms = std::min(
+ a.min_waiting_time_ms, static_cast<double>(b.min_waiting_time_ms));
+ a.max_waiting_time_ms = std::max(
+ a.max_waiting_time_ms, static_cast<double>(b.max_waiting_time_ms));
+ return a;
+ });
+
+ sum_stats.current_buffer_size_ms /= stats_.size();
+ sum_stats.preferred_buffer_size_ms /= stats_.size();
+ sum_stats.jitter_peaks_found /= stats_.size();
+ sum_stats.packet_loss_rate /= stats_.size();
+ sum_stats.expand_rate /= stats_.size();
+ sum_stats.speech_expand_rate /= stats_.size();
+ sum_stats.preemptive_rate /= stats_.size();
+ sum_stats.accelerate_rate /= stats_.size();
+ sum_stats.secondary_decoded_rate /= stats_.size();
+ sum_stats.secondary_discarded_rate /= stats_.size();
+ sum_stats.added_zero_samples /= stats_.size();
+ sum_stats.mean_waiting_time_ms /= stats_.size();
+ sum_stats.median_waiting_time_ms /= stats_.size();
+
+ return sum_stats;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.h
new file mode 100644
index 0000000000..b1b12bb1f8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_getter.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_GETTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_GETTER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+
+namespace webrtc {
+namespace test {
+
+class NetEqStatsGetter : public NetEqGetAudioCallback {
+ public:
+ // This struct is a replica of webrtc::NetEqNetworkStatistics, but with all
+ // values stored in double precision.
+ struct Stats {
+ double current_buffer_size_ms = 0.0;
+ double preferred_buffer_size_ms = 0.0;
+ double jitter_peaks_found = 0.0;
+ double packet_loss_rate = 0.0;
+ double expand_rate = 0.0;
+ double speech_expand_rate = 0.0;
+ double preemptive_rate = 0.0;
+ double accelerate_rate = 0.0;
+ double secondary_decoded_rate = 0.0;
+ double secondary_discarded_rate = 0.0;
+ double clockdrift_ppm = 0.0;
+ double added_zero_samples = 0.0;
+ double mean_waiting_time_ms = 0.0;
+ double median_waiting_time_ms = 0.0;
+ double min_waiting_time_ms = 0.0;
+ double max_waiting_time_ms = 0.0;
+ };
+
+ struct ConcealmentEvent {
+ uint64_t duration_ms;
+ size_t concealment_event_number;
+ int64_t time_from_previous_event_end_ms;
+ std::string ToString() const;
+ };
+
+ // Takes a pointer to another callback object, which will be invoked after
+ // this object finishes. This does not transfer ownership, and null is a
+ // valid value.
+ explicit NetEqStatsGetter(std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer);
+
+ void set_stats_query_interval_ms(int64_t stats_query_interval_ms) {
+ stats_query_interval_ms_ = stats_query_interval_ms;
+ }
+
+ void BeforeGetAudio(NetEq* neteq) override;
+
+ void AfterGetAudio(int64_t time_now_ms,
+ const AudioFrame& audio_frame,
+ bool muted,
+ NetEq* neteq) override;
+
+ double AverageSpeechExpandRate() const;
+
+ NetEqDelayAnalyzer* delay_analyzer() const { return delay_analyzer_.get(); }
+
+ const std::vector<ConcealmentEvent>& concealment_events() const {
+ // Do not account for the last concealment event to avoid potential end
+ // call skewing.
+ return concealment_events_;
+ }
+
+ const std::vector<std::pair<int64_t, NetEqNetworkStatistics>>* stats() const {
+ return &stats_;
+ }
+
+ const std::vector<std::pair<int64_t, NetEqLifetimeStatistics>>*
+ lifetime_stats() const {
+ return &lifetime_stats_;
+ }
+
+ Stats AverageStats() const;
+
+ private:
+ std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer_;
+ int64_t stats_query_interval_ms_ = 1000;
+ int64_t last_stats_query_time_ms_ = 0;
+ std::vector<std::pair<int64_t, NetEqNetworkStatistics>> stats_;
+ std::vector<std::pair<int64_t, NetEqLifetimeStatistics>> lifetime_stats_;
+ size_t current_concealment_event_ = 1;
+ uint64_t voice_concealed_samples_until_last_event_ = 0;
+ std::vector<ConcealmentEvent> concealment_events_;
+ int64_t last_event_end_time_ms_ = 0;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_GETTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc
new file mode 100644
index 0000000000..162a4c9300
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_stats_plotter.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <utility>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqStatsPlotter::NetEqStatsPlotter(bool make_matlab_plot,
+ bool make_python_plot,
+ bool show_concealment_events,
+ absl::string_view base_file_name)
+ : make_matlab_plot_(make_matlab_plot),
+ make_python_plot_(make_python_plot),
+ show_concealment_events_(show_concealment_events),
+ base_file_name_(base_file_name) {
+ std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer;
+ if (make_matlab_plot || make_python_plot) {
+ delay_analyzer.reset(new NetEqDelayAnalyzer);
+ }
+ stats_getter_.reset(new NetEqStatsGetter(std::move(delay_analyzer)));
+}
+
+void NetEqStatsPlotter::SimulationEnded(int64_t simulation_time_ms) {
+ if (make_matlab_plot_) {
+ auto matlab_script_name = base_file_name_;
+ std::replace(matlab_script_name.begin(), matlab_script_name.end(), '.',
+ '_');
+ printf("Creating Matlab plot script %s.m\n", matlab_script_name.c_str());
+ stats_getter_->delay_analyzer()->CreateMatlabScript(matlab_script_name +
+ ".m");
+ }
+ if (make_python_plot_) {
+ auto python_script_name = base_file_name_;
+ std::replace(python_script_name.begin(), python_script_name.end(), '.',
+ '_');
+ printf("Creating Python plot script %s.py\n", python_script_name.c_str());
+ stats_getter_->delay_analyzer()->CreatePythonScript(python_script_name +
+ ".py");
+ }
+
+ printf("Simulation statistics:\n");
+ printf(" output duration: %" PRId64 " ms\n", simulation_time_ms);
+ auto stats = stats_getter_->AverageStats();
+ printf(" packet_loss_rate: %f %%\n", 100.0 * stats.packet_loss_rate);
+ printf(" expand_rate: %f %%\n", 100.0 * stats.expand_rate);
+ printf(" speech_expand_rate: %f %%\n", 100.0 * stats.speech_expand_rate);
+ printf(" preemptive_rate: %f %%\n", 100.0 * stats.preemptive_rate);
+ printf(" accelerate_rate: %f %%\n", 100.0 * stats.accelerate_rate);
+ printf(" secondary_decoded_rate: %f %%\n",
+ 100.0 * stats.secondary_decoded_rate);
+ printf(" secondary_discarded_rate: %f %%\n",
+ 100.0 * stats.secondary_discarded_rate);
+ printf(" clockdrift_ppm: %f ppm\n", stats.clockdrift_ppm);
+ printf(" mean_waiting_time_ms: %f ms\n", stats.mean_waiting_time_ms);
+ printf(" median_waiting_time_ms: %f ms\n", stats.median_waiting_time_ms);
+ printf(" min_waiting_time_ms: %f ms\n", stats.min_waiting_time_ms);
+ printf(" max_waiting_time_ms: %f ms\n", stats.max_waiting_time_ms);
+ printf(" current_buffer_size_ms: %f ms\n", stats.current_buffer_size_ms);
+ printf(" preferred_buffer_size_ms: %f ms\n", stats.preferred_buffer_size_ms);
+ if (show_concealment_events_) {
+ printf(" concealment_events_ms:\n");
+ for (auto concealment_event : stats_getter_->concealment_events())
+ printf("%s\n", concealment_event.ToString().c_str());
+ printf(" end of concealment_events_ms\n");
+ }
+
+ const auto lifetime_stats_vector = stats_getter_->lifetime_stats();
+ if (!lifetime_stats_vector->empty()) {
+ auto lifetime_stats = lifetime_stats_vector->back().second;
+ printf(" total_samples_received: %" PRIu64 "\n",
+ lifetime_stats.total_samples_received);
+ printf(" concealed_samples: %" PRIu64 "\n",
+ lifetime_stats.concealed_samples);
+ printf(" concealment_events: %" PRIu64 "\n",
+ lifetime_stats.concealment_events);
+ printf(" delayed_packet_outage_samples: %" PRIu64 "\n",
+ lifetime_stats.delayed_packet_outage_samples);
+ printf(" num_interruptions: %d\n", lifetime_stats.interruption_count);
+ printf(" sum_interruption_length_ms: %d ms\n",
+ lifetime_stats.total_interruption_duration_ms);
+ printf(" interruption_ratio: %f\n",
+ static_cast<double>(lifetime_stats.total_interruption_duration_ms) /
+ simulation_time_ms);
+ printf(" removed_samples_for_acceleration: %" PRIu64 "\n",
+ lifetime_stats.removed_samples_for_acceleration);
+ printf(" inserted_samples_for_deceleration: %" PRIu64 "\n",
+ lifetime_stats.inserted_samples_for_deceleration);
+ printf(" generated_noise_samples: %" PRIu64 "\n",
+ lifetime_stats.generated_noise_samples);
+ printf(" packets_discarded: %" PRIu64 "\n",
+ lifetime_stats.packets_discarded);
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.h
new file mode 100644
index 0000000000..11c16da9d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_stats_plotter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_PLOTTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_PLOTTER_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+
+namespace webrtc {
+namespace test {
+
+class NetEqStatsPlotter : public NetEqSimulationEndedCallback {
+ public:
+ NetEqStatsPlotter(bool make_matlab_plot,
+ bool make_python_plot,
+ bool show_concealment_events,
+ absl::string_view base_file_name);
+
+ void SimulationEnded(int64_t simulation_time_ms) override;
+
+ NetEqStatsGetter* stats_getter() { return stats_getter_.get(); }
+
+ private:
+ std::unique_ptr<NetEqStatsGetter> stats_getter_;
+ const bool make_matlab_plot_;
+ const bool make_python_plot_;
+ const bool show_concealment_events_;
+ const std::string base_file_name_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_PLOTTER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.cc
new file mode 100644
index 0000000000..19b1df11a1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.cc
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+
+#include <iomanip>
+#include <iostream>
+
+#include "modules/audio_coding/neteq/default_neteq_factory.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+absl::optional<NetEq::Operation> ActionToOperations(
+ absl::optional<NetEqSimulator::Action> a) {
+ if (!a) {
+ return absl::nullopt;
+ }
+ switch (*a) {
+ case NetEqSimulator::Action::kAccelerate:
+ return absl::make_optional(NetEq::Operation::kAccelerate);
+ case NetEqSimulator::Action::kExpand:
+ return absl::make_optional(NetEq::Operation::kExpand);
+ case NetEqSimulator::Action::kNormal:
+ return absl::make_optional(NetEq::Operation::kNormal);
+ case NetEqSimulator::Action::kPreemptiveExpand:
+ return absl::make_optional(NetEq::Operation::kPreemptiveExpand);
+ }
+}
+
+std::unique_ptr<NetEq> CreateNetEq(
+ const NetEq::Config& config,
+ Clock* clock,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+ return DefaultNetEqFactory().CreateNetEq(config, decoder_factory, clock);
+}
+
+} // namespace
+
+void DefaultNetEqTestErrorCallback::OnInsertPacketError(
+ const NetEqInput::PacketData& packet) {
+ std::cerr << "InsertPacket returned an error." << std::endl;
+ std::cerr << "Packet data: " << packet.ToString() << std::endl;
+ RTC_FATAL();
+}
+
+void DefaultNetEqTestErrorCallback::OnGetAudioError() {
+ std::cerr << "GetAudio returned an error." << std::endl;
+ RTC_FATAL();
+}
+
+NetEqTest::NetEqTest(const NetEq::Config& config,
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+ const DecoderMap& codecs,
+ std::unique_ptr<std::ofstream> text_log,
+ NetEqFactory* neteq_factory,
+ std::unique_ptr<NetEqInput> input,
+ std::unique_ptr<AudioSink> output,
+ Callbacks callbacks)
+ : clock_(0),
+ neteq_(neteq_factory
+ ? neteq_factory->CreateNetEq(config, decoder_factory, &clock_)
+ : CreateNetEq(config, &clock_, decoder_factory)),
+ input_(std::move(input)),
+ output_(std::move(output)),
+ callbacks_(callbacks),
+ sample_rate_hz_(config.sample_rate_hz),
+ text_log_(std::move(text_log)) {
+ RTC_CHECK(!config.enable_muted_state)
+ << "The code does not handle enable_muted_state";
+ RegisterDecoders(codecs);
+}
+
+NetEqTest::~NetEqTest() = default;
+
+int64_t NetEqTest::Run() {
+ int64_t simulation_time = 0;
+ SimulationStepResult step_result;
+ do {
+ step_result = RunToNextGetAudio();
+ simulation_time += step_result.simulation_step_ms;
+ } while (!step_result.is_simulation_finished);
+ if (callbacks_.simulation_ended_callback) {
+ callbacks_.simulation_ended_callback->SimulationEnded(simulation_time);
+ }
+ return simulation_time;
+}
+
+NetEqTest::SimulationStepResult NetEqTest::RunToNextGetAudio() {
+ SimulationStepResult result;
+ const int64_t start_time_ms = *input_->NextEventTime();
+ int64_t time_now_ms = start_time_ms;
+ current_state_.packet_iat_ms.clear();
+
+ while (!input_->ended()) {
+ // Advance time to next event.
+ RTC_DCHECK(input_->NextEventTime());
+ clock_.AdvanceTimeMilliseconds(*input_->NextEventTime() - time_now_ms);
+ time_now_ms = *input_->NextEventTime();
+ // Check if it is time to insert packet.
+ if (input_->NextPacketTime() && time_now_ms >= *input_->NextPacketTime()) {
+ std::unique_ptr<NetEqInput::PacketData> packet_data = input_->PopPacket();
+ RTC_CHECK(packet_data);
+ const size_t payload_data_length =
+ packet_data->payload.size() - packet_data->header.paddingLength;
+ if (payload_data_length != 0) {
+ int error = neteq_->InsertPacket(
+ packet_data->header,
+ rtc::ArrayView<const uint8_t>(packet_data->payload));
+ if (error != NetEq::kOK && callbacks_.error_callback) {
+ callbacks_.error_callback->OnInsertPacketError(*packet_data);
+ }
+ if (callbacks_.post_insert_packet) {
+ callbacks_.post_insert_packet->AfterInsertPacket(*packet_data,
+ neteq_.get());
+ }
+ } else {
+ neteq_->InsertEmptyPacket(packet_data->header);
+ }
+ if (last_packet_time_ms_) {
+ current_state_.packet_iat_ms.push_back(time_now_ms -
+ *last_packet_time_ms_);
+ }
+ if (text_log_) {
+ const auto ops_state = neteq_->GetOperationsAndState();
+ const auto delta_wallclock =
+ last_packet_time_ms_ ? (time_now_ms - *last_packet_time_ms_) : -1;
+ const auto delta_timestamp =
+ last_packet_timestamp_
+ ? (static_cast<int64_t>(packet_data->header.timestamp) -
+ *last_packet_timestamp_) *
+ 1000 / sample_rate_hz_
+ : -1;
+ const auto packet_size_bytes =
+ packet_data->payload.size() == 12
+ ? ByteReader<uint32_t>::ReadLittleEndian(
+ &packet_data->payload[8])
+ : -1;
+ *text_log_ << "Packet - wallclock: " << std::setw(5) << time_now_ms
+ << ", delta wc: " << std::setw(4) << delta_wallclock
+ << ", seq_no: " << packet_data->header.sequenceNumber
+ << ", timestamp: " << std::setw(10)
+ << packet_data->header.timestamp
+ << ", delta ts: " << std::setw(4) << delta_timestamp
+ << ", size: " << std::setw(5) << packet_size_bytes
+ << ", frame size: " << std::setw(3)
+ << ops_state.current_frame_size_ms
+ << ", buffer size: " << std::setw(4)
+ << ops_state.current_buffer_size_ms << std::endl;
+ }
+ last_packet_time_ms_ = absl::make_optional<int>(time_now_ms);
+ last_packet_timestamp_ =
+ absl::make_optional<uint32_t>(packet_data->header.timestamp);
+ }
+
+ // Check if it is time to get output audio.
+ if (input_->NextOutputEventTime() &&
+ time_now_ms >= *input_->NextOutputEventTime()) {
+ if (callbacks_.get_audio_callback) {
+ callbacks_.get_audio_callback->BeforeGetAudio(neteq_.get());
+ }
+ AudioFrame out_frame;
+ bool muted;
+ int error = neteq_->GetAudio(&out_frame, &muted, nullptr,
+ ActionToOperations(next_action_));
+ next_action_ = absl::nullopt;
+ RTC_CHECK(!muted) << "The code does not handle enable_muted_state";
+ if (error != NetEq::kOK) {
+ if (callbacks_.error_callback) {
+ callbacks_.error_callback->OnGetAudioError();
+ }
+ } else {
+ sample_rate_hz_ = out_frame.sample_rate_hz_;
+ }
+ if (callbacks_.get_audio_callback) {
+ callbacks_.get_audio_callback->AfterGetAudio(time_now_ms, out_frame,
+ muted, neteq_.get());
+ }
+
+ if (output_) {
+ RTC_CHECK(output_->WriteArray(
+ out_frame.data(),
+ out_frame.samples_per_channel_ * out_frame.num_channels_));
+ }
+
+ input_->AdvanceOutputEvent();
+ result.simulation_step_ms =
+ input_->NextEventTime().value_or(time_now_ms) - start_time_ms;
+ const auto operations_state = neteq_->GetOperationsAndState();
+ current_state_.current_delay_ms = operations_state.current_buffer_size_ms;
+ current_state_.packet_size_ms = operations_state.current_frame_size_ms;
+ current_state_.next_packet_available =
+ operations_state.next_packet_available;
+ current_state_.packet_buffer_flushed =
+ operations_state.packet_buffer_flushes >
+ prev_ops_state_.packet_buffer_flushes;
+ // TODO(ivoc): Add more accurate reporting by tracking the origin of
+ // samples in the sync buffer.
+ result.action_times_ms[Action::kExpand] = 0;
+ result.action_times_ms[Action::kAccelerate] = 0;
+ result.action_times_ms[Action::kPreemptiveExpand] = 0;
+ result.action_times_ms[Action::kNormal] = 0;
+
+ if (out_frame.speech_type_ == AudioFrame::SpeechType::kPLC ||
+ out_frame.speech_type_ == AudioFrame::SpeechType::kPLCCNG) {
+ // Consider the whole frame to be the result of expansion.
+ result.action_times_ms[Action::kExpand] = 10;
+ } else if (operations_state.accelerate_samples -
+ prev_ops_state_.accelerate_samples >
+ 0) {
+ // Consider the whole frame to be the result of acceleration.
+ result.action_times_ms[Action::kAccelerate] = 10;
+ } else if (operations_state.preemptive_samples -
+ prev_ops_state_.preemptive_samples >
+ 0) {
+ // Consider the whole frame to be the result of preemptive expansion.
+ result.action_times_ms[Action::kPreemptiveExpand] = 10;
+ } else {
+ // Consider the whole frame to be the result of normal playout.
+ result.action_times_ms[Action::kNormal] = 10;
+ }
+ auto lifetime_stats = LifetimeStats();
+ if (text_log_) {
+ const bool plc =
+ (out_frame.speech_type_ == AudioFrame::SpeechType::kPLC) ||
+ (out_frame.speech_type_ == AudioFrame::SpeechType::kPLCCNG);
+ const bool cng = out_frame.speech_type_ == AudioFrame::SpeechType::kCNG;
+ const bool voice_concealed =
+ (lifetime_stats.concealed_samples -
+ lifetime_stats.silent_concealed_samples) >
+ (prev_lifetime_stats_.concealed_samples -
+ prev_lifetime_stats_.silent_concealed_samples);
+ *text_log_ << "GetAudio - wallclock: " << std::setw(5) << time_now_ms
+ << ", delta wc: " << std::setw(4)
+ << (input_->NextEventTime().value_or(time_now_ms) -
+ start_time_ms)
+ << ", CNG: " << cng << ", PLC: " << plc
+ << ", voice concealed: " << voice_concealed
+ << ", buffer size: " << std::setw(4)
+ << current_state_.current_delay_ms << std::endl;
+ if (lifetime_stats.packets_discarded >
+ prev_lifetime_stats_.packets_discarded) {
+ *text_log_ << "Discarded "
+ << (lifetime_stats.packets_discarded -
+ prev_lifetime_stats_.packets_discarded)
+ << " primary packets." << std::endl;
+ }
+ if (operations_state.packet_buffer_flushes >
+ prev_ops_state_.packet_buffer_flushes) {
+ *text_log_ << "Flushed packet buffer "
+ << (operations_state.packet_buffer_flushes -
+ prev_ops_state_.packet_buffer_flushes)
+ << " times." << std::endl;
+ }
+ }
+ prev_lifetime_stats_ = lifetime_stats;
+ const bool no_more_packets_to_decode =
+ !input_->NextPacketTime() && !operations_state.next_packet_available;
+ // End the simulation if the gap is too large. This indicates an issue
+ // with the event log file.
+ const bool simulation_step_too_large = result.simulation_step_ms > 1000;
+ if (simulation_step_too_large) {
+ // If we don't reset the step time, the large gap will be included in
+ // the simulation time, which can be a large distortion.
+ result.simulation_step_ms = 10;
+ }
+ result.is_simulation_finished = simulation_step_too_large ||
+ no_more_packets_to_decode ||
+ input_->ended();
+ prev_ops_state_ = operations_state;
+ return result;
+ }
+ }
+ result.simulation_step_ms =
+ input_->NextEventTime().value_or(time_now_ms) - start_time_ms;
+ result.is_simulation_finished = true;
+ return result;
+}
+
+void NetEqTest::SetNextAction(NetEqTest::Action next_operation) {
+ next_action_ = absl::optional<Action>(next_operation);
+}
+
+NetEqTest::NetEqState NetEqTest::GetNetEqState() {
+ return current_state_;
+}
+
+NetEqNetworkStatistics NetEqTest::SimulationStats() {
+ NetEqNetworkStatistics stats;
+ RTC_CHECK_EQ(neteq_->NetworkStatistics(&stats), 0);
+ return stats;
+}
+
+NetEqLifetimeStatistics NetEqTest::LifetimeStats() const {
+ return neteq_->GetLifetimeStatistics();
+}
+
+NetEqTest::DecoderMap NetEqTest::StandardDecoderMap() {
+ DecoderMap codecs = {
+ {0, SdpAudioFormat("pcmu", 8000, 1)},
+ {8, SdpAudioFormat("pcma", 8000, 1)},
+#ifdef WEBRTC_CODEC_ILBC
+ {102, SdpAudioFormat("ilbc", 8000, 1)},
+#endif
+ {103, SdpAudioFormat("isac", 16000, 1)},
+#if !defined(WEBRTC_ANDROID)
+ {104, SdpAudioFormat("isac", 32000, 1)},
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ {111, SdpAudioFormat("opus", 48000, 2)},
+#endif
+ {93, SdpAudioFormat("l16", 8000, 1)},
+ {94, SdpAudioFormat("l16", 16000, 1)},
+ {95, SdpAudioFormat("l16", 32000, 1)},
+ {96, SdpAudioFormat("l16", 48000, 1)},
+ {9, SdpAudioFormat("g722", 8000, 1)},
+ {106, SdpAudioFormat("telephone-event", 8000, 1)},
+ {114, SdpAudioFormat("telephone-event", 16000, 1)},
+ {115, SdpAudioFormat("telephone-event", 32000, 1)},
+ {116, SdpAudioFormat("telephone-event", 48000, 1)},
+ {117, SdpAudioFormat("red", 8000, 1)},
+ {13, SdpAudioFormat("cn", 8000, 1)},
+ {98, SdpAudioFormat("cn", 16000, 1)},
+ {99, SdpAudioFormat("cn", 32000, 1)},
+ {100, SdpAudioFormat("cn", 48000, 1)}
+ };
+ return codecs;
+}
+
+void NetEqTest::RegisterDecoders(const DecoderMap& codecs) {
+ for (const auto& c : codecs) {
+ RTC_CHECK(neteq_->RegisterPayloadType(c.first, c.second))
+ << "Cannot register " << c.second.name << " to payload type "
+ << c.first;
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.h
new file mode 100644
index 0000000000..0a6c24f3d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
+
+#include <fstream>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/neteq/neteq.h"
+#include "api/neteq/neteq_factory.h"
+#include "api/test/neteq_simulator.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace test {
+
+class NetEqTestErrorCallback {
+ public:
+ virtual ~NetEqTestErrorCallback() = default;
+ virtual void OnInsertPacketError(const NetEqInput::PacketData& packet) {}
+ virtual void OnGetAudioError() {}
+};
+
+class DefaultNetEqTestErrorCallback : public NetEqTestErrorCallback {
+ void OnInsertPacketError(const NetEqInput::PacketData& packet) override;
+ void OnGetAudioError() override;
+};
+
+class NetEqPostInsertPacket {
+ public:
+ virtual ~NetEqPostInsertPacket() = default;
+ virtual void AfterInsertPacket(const NetEqInput::PacketData& packet,
+ NetEq* neteq) = 0;
+};
+
+class NetEqGetAudioCallback {
+ public:
+ virtual ~NetEqGetAudioCallback() = default;
+ virtual void BeforeGetAudio(NetEq* neteq) = 0;
+ virtual void AfterGetAudio(int64_t time_now_ms,
+ const AudioFrame& audio_frame,
+ bool muted,
+ NetEq* neteq) = 0;
+};
+
+class NetEqSimulationEndedCallback {
+ public:
+ virtual ~NetEqSimulationEndedCallback() = default;
+ virtual void SimulationEnded(int64_t simulation_time_ms) = 0;
+};
+
+// Class that provides an input--output test for NetEq. The input (both packets
+// and output events) is provided by a NetEqInput object, while the output is
+// directed to an AudioSink object.
+class NetEqTest : public NetEqSimulator {
+ public:
+ using DecoderMap = std::map<int, SdpAudioFormat>;
+
+ struct Callbacks {
+ NetEqTestErrorCallback* error_callback = nullptr;
+ NetEqPostInsertPacket* post_insert_packet = nullptr;
+ NetEqGetAudioCallback* get_audio_callback = nullptr;
+ NetEqSimulationEndedCallback* simulation_ended_callback = nullptr;
+ };
+
+ // Sets up the test with given configuration, codec mappings, input, ouput,
+ // and callback objects for error reporting.
+ NetEqTest(const NetEq::Config& config,
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+ const DecoderMap& codecs,
+ std::unique_ptr<std::ofstream> text_log,
+ NetEqFactory* neteq_factory,
+ std::unique_ptr<NetEqInput> input,
+ std::unique_ptr<AudioSink> output,
+ Callbacks callbacks);
+
+ ~NetEqTest() override;
+
+ // Runs the test. Returns the duration of the produced audio in ms.
+ int64_t Run() override;
+ // Runs the simulation until we hit the next GetAudio event. If the simulation
+ // is finished, is_simulation_finished will be set to true in the returned
+ // SimulationStepResult.
+ SimulationStepResult RunToNextGetAudio() override;
+
+ void SetNextAction(Action next_operation) override;
+ NetEqState GetNetEqState() override;
+
+ // Returns the statistics from NetEq.
+ NetEqNetworkStatistics SimulationStats();
+ NetEqLifetimeStatistics LifetimeStats() const;
+
+ static DecoderMap StandardDecoderMap();
+
+ private:
+ void RegisterDecoders(const DecoderMap& codecs);
+ SimulatedClock clock_;
+ absl::optional<Action> next_action_;
+ absl::optional<int> last_packet_time_ms_;
+ std::unique_ptr<NetEq> neteq_;
+ std::unique_ptr<NetEqInput> input_;
+ std::unique_ptr<AudioSink> output_;
+ Callbacks callbacks_;
+ int sample_rate_hz_;
+ NetEqState current_state_;
+ NetEqOperationsAndState prev_ops_state_;
+ NetEqLifetimeStatistics prev_lifetime_stats_;
+ absl::optional<uint32_t> last_packet_timestamp_;
+ std::unique_ptr<std::ofstream> text_log_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.cc
new file mode 100644
index 0000000000..6cd371406c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.cc
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_test_factory.h"
+
+#include <errno.h>
+#include <limits.h> // For ULONG_MAX returned by strtoul.
+#include <stdio.h>
+#include <stdlib.h> // For strtoul.
+
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/neteq/neteq.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+#include "modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+#include "modules/audio_coding/neteq/tools/neteq_event_log_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
+#include "modules/audio_coding/neteq/tools/neteq_stats_plotter.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_wav_file.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "rtc_base/checks.h"
+#include "test/function_audio_decoder_factory.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+absl::optional<int> CodecSampleRate(
+ uint8_t payload_type,
+ webrtc::test::NetEqTestFactory::Config config) {
+ if (payload_type == config.pcmu || payload_type == config.pcma ||
+ payload_type == config.ilbc || payload_type == config.pcm16b ||
+ payload_type == config.cn_nb || payload_type == config.avt)
+ return 8000;
+ if (payload_type == config.isac || payload_type == config.pcm16b_wb ||
+ payload_type == config.g722 || payload_type == config.cn_wb ||
+ payload_type == config.avt_16)
+ return 16000;
+ if (payload_type == config.isac_swb || payload_type == config.pcm16b_swb32 ||
+ payload_type == config.cn_swb32 || payload_type == config.avt_32)
+ return 32000;
+ if (payload_type == config.opus || payload_type == config.pcm16b_swb48 ||
+ payload_type == config.cn_swb48 || payload_type == config.avt_48)
+ return 48000;
+ if (payload_type == config.red)
+ return 0;
+ return absl::nullopt;
+}
+
+} // namespace
+
+// A callback class which prints whenver the inserted packet stream changes
+// the SSRC.
+class SsrcSwitchDetector : public NetEqPostInsertPacket {
+ public:
+ // Takes a pointer to another callback object, which will be invoked after
+ // this object finishes. This does not transfer ownership, and null is a
+ // valid value.
+ explicit SsrcSwitchDetector(NetEqPostInsertPacket* other_callback)
+ : other_callback_(other_callback) {}
+
+ void AfterInsertPacket(const NetEqInput::PacketData& packet,
+ NetEq* neteq) override {
+ if (last_ssrc_ && packet.header.ssrc != *last_ssrc_) {
+ std::cout << "Changing streams from 0x" << std::hex << *last_ssrc_
+ << " to 0x" << std::hex << packet.header.ssrc << std::dec
+ << " (payload type "
+ << static_cast<int>(packet.header.payloadType) << ")"
+ << std::endl;
+ }
+ last_ssrc_ = packet.header.ssrc;
+ if (other_callback_) {
+ other_callback_->AfterInsertPacket(packet, neteq);
+ }
+ }
+
+ private:
+ NetEqPostInsertPacket* other_callback_;
+ absl::optional<uint32_t> last_ssrc_;
+};
+
+NetEqTestFactory::NetEqTestFactory() = default;
+NetEqTestFactory::~NetEqTestFactory() = default;
+
+NetEqTestFactory::Config::Config() = default;
+NetEqTestFactory::Config::Config(const Config& other) = default;
+NetEqTestFactory::Config::~Config() = default;
+
+std::unique_ptr<NetEqTest> NetEqTestFactory::InitializeTestFromString(
+ absl::string_view input_string,
+ NetEqFactory* factory,
+ const Config& config) {
+ std::unique_ptr<NetEqInput> input(
+ NetEqEventLogInput::CreateFromString(input_string, config.ssrc_filter));
+ if (!input) {
+ std::cerr << "Error: Cannot parse input string" << std::endl;
+ return nullptr;
+ }
+ return InitializeTest(std::move(input), factory, config);
+}
+
+std::unique_ptr<NetEqTest> NetEqTestFactory::InitializeTestFromFile(
+ absl::string_view input_file_name,
+ NetEqFactory* factory,
+ const Config& config) {
+ // Gather RTP header extensions in a map.
+ NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
+ {config.audio_level, kRtpExtensionAudioLevel},
+ {config.abs_send_time, kRtpExtensionAbsoluteSendTime},
+ {config.transport_seq_no, kRtpExtensionTransportSequenceNumber},
+ {config.video_content_type, kRtpExtensionVideoContentType},
+ {config.video_timing, kRtpExtensionVideoTiming}};
+
+ std::unique_ptr<NetEqInput> input;
+ if (RtpFileSource::ValidRtpDump(input_file_name) ||
+ RtpFileSource::ValidPcap(input_file_name)) {
+ input.reset(new NetEqRtpDumpInput(input_file_name, rtp_ext_map,
+ config.ssrc_filter));
+ } else {
+ input.reset(NetEqEventLogInput::CreateFromFile(input_file_name,
+ config.ssrc_filter));
+ }
+
+ std::cout << "Input file: " << input_file_name << std::endl;
+ if (!input) {
+ std::cerr << "Error: Cannot open input file" << std::endl;
+ return nullptr;
+ }
+ return InitializeTest(std::move(input), factory, config);
+}
+
+std::unique_ptr<NetEqTest> NetEqTestFactory::InitializeTest(
+ std::unique_ptr<NetEqInput> input,
+ NetEqFactory* factory,
+ const Config& config) {
+ if (input->ended()) {
+ std::cerr << "Error: Input is empty" << std::endl;
+ return nullptr;
+ }
+
+ if (!config.field_trial_string.empty()) {
+ field_trials_ =
+ std::make_unique<ScopedFieldTrials>(config.field_trial_string);
+ }
+
+ // Skip some initial events/packets if requested.
+ if (config.skip_get_audio_events > 0) {
+ std::cout << "Skipping " << config.skip_get_audio_events
+ << " get_audio events" << std::endl;
+ if (!input->NextPacketTime() || !input->NextOutputEventTime()) {
+ std::cerr << "No events found" << std::endl;
+ return nullptr;
+ }
+ for (int i = 0; i < config.skip_get_audio_events; i++) {
+ input->AdvanceOutputEvent();
+ if (!input->NextOutputEventTime()) {
+ std::cerr << "Not enough get_audio events found" << std::endl;
+ return nullptr;
+ }
+ }
+ while (*input->NextPacketTime() < *input->NextOutputEventTime()) {
+ input->PopPacket();
+ if (!input->NextPacketTime()) {
+ std::cerr << "Not enough incoming packets found" << std::endl;
+ return nullptr;
+ }
+ }
+ }
+
+ // Check the sample rate.
+ absl::optional<int> sample_rate_hz;
+ std::set<std::pair<int, uint32_t>> discarded_pt_and_ssrc;
+ while (absl::optional<RTPHeader> first_rtp_header = input->NextHeader()) {
+ RTC_DCHECK(first_rtp_header);
+ sample_rate_hz = CodecSampleRate(first_rtp_header->payloadType, config);
+ if (sample_rate_hz) {
+ std::cout << "Found valid packet with payload type "
+ << static_cast<int>(first_rtp_header->payloadType)
+ << " and SSRC 0x" << std::hex << first_rtp_header->ssrc
+ << std::dec << std::endl;
+ if (config.initial_dummy_packets > 0) {
+ std::cout << "Nr of initial dummy packets: "
+ << config.initial_dummy_packets << std::endl;
+ input = std::make_unique<InitialPacketInserterNetEqInput>(
+ std::move(input), config.initial_dummy_packets, *sample_rate_hz);
+ }
+ break;
+ }
+ // Discard this packet and move to the next. Keep track of discarded payload
+ // types and SSRCs.
+ discarded_pt_and_ssrc.emplace(first_rtp_header->payloadType,
+ first_rtp_header->ssrc);
+ input->PopPacket();
+ }
+ if (!discarded_pt_and_ssrc.empty()) {
+ std::cout << "Discarded initial packets with the following payload types "
+ "and SSRCs:"
+ << std::endl;
+ for (const auto& d : discarded_pt_and_ssrc) {
+ std::cout << "PT " << d.first << "; SSRC 0x" << std::hex
+ << static_cast<int>(d.second) << std::dec << std::endl;
+ }
+ }
+ if (!sample_rate_hz) {
+ std::cerr << "Cannot find any packets with known payload types"
+ << std::endl;
+ return nullptr;
+ }
+
+ // If an output file is requested, open it.
+ std::unique_ptr<AudioSink> output;
+ if (!config.output_audio_filename.has_value()) {
+ output = std::make_unique<VoidAudioSink>();
+ std::cout << "No output audio file" << std::endl;
+ } else if (config.output_audio_filename->size() >= 4 &&
+ config.output_audio_filename->substr(
+ config.output_audio_filename->size() - 4) == ".wav") {
+ // Open a wav file with the known sample rate.
+ output = std::make_unique<OutputWavFile>(*config.output_audio_filename,
+ *sample_rate_hz);
+ std::cout << "Output WAV file: " << *config.output_audio_filename
+ << std::endl;
+ } else {
+ // Open a pcm file.
+ output = std::make_unique<OutputAudioFile>(*config.output_audio_filename);
+ std::cout << "Output PCM file: " << *config.output_audio_filename
+ << std::endl;
+ }
+
+ NetEqTest::DecoderMap codecs = NetEqTest::StandardDecoderMap();
+
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory =
+ CreateBuiltinAudioDecoderFactory();
+
+ // Check if a replacement audio file was provided.
+ if (config.replacement_audio_file.size() > 0) {
+ // Find largest unused payload type.
+ int replacement_pt = 127;
+ while (codecs.find(replacement_pt) != codecs.end()) {
+ --replacement_pt;
+ if (replacement_pt <= 0) {
+ std::cerr << "Error: Unable to find available replacement payload type"
+ << std::endl;
+ return nullptr;
+ }
+ }
+
+ auto std_set_int32_to_uint8 = [](const std::set<int32_t>& a) {
+ std::set<uint8_t> b;
+ for (auto& x : a) {
+ b.insert(static_cast<uint8_t>(x));
+ }
+ return b;
+ };
+
+ std::set<uint8_t> cn_types = std_set_int32_to_uint8(
+ {config.cn_nb, config.cn_wb, config.cn_swb32, config.cn_swb48});
+ std::set<uint8_t> forbidden_types =
+ std_set_int32_to_uint8({config.g722, config.red, config.avt,
+ config.avt_16, config.avt_32, config.avt_48});
+ input.reset(new NetEqReplacementInput(std::move(input), replacement_pt,
+ cn_types, forbidden_types));
+
+ // Note that capture-by-copy implies that the lambda captures the value of
+ // decoder_factory before it's reassigned on the left-hand side.
+ decoder_factory = rtc::make_ref_counted<FunctionAudioDecoderFactory>(
+ [decoder_factory, config](
+ const SdpAudioFormat& format,
+ absl::optional<AudioCodecPairId> codec_pair_id) {
+ std::unique_ptr<AudioDecoder> decoder =
+ decoder_factory->MakeAudioDecoder(format, codec_pair_id);
+ if (!decoder && format.name == "replacement") {
+ decoder = std::make_unique<FakeDecodeFromFile>(
+ std::make_unique<InputAudioFile>(config.replacement_audio_file),
+ format.clockrate_hz, format.num_channels > 1);
+ }
+ return decoder;
+ });
+
+ if (!codecs
+ .insert({replacement_pt, SdpAudioFormat("replacement", 48000, 1)})
+ .second) {
+ std::cerr << "Error: Unable to insert replacement audio codec"
+ << std::endl;
+ return nullptr;
+ }
+ }
+
+ // Create a text log output stream if needed.
+ std::unique_ptr<std::ofstream> text_log;
+ if (config.textlog && config.textlog_filename.has_value()) {
+ // Write to file.
+ text_log = std::make_unique<std::ofstream>(*config.textlog_filename);
+ } else if (config.textlog) {
+ // Print to stdout.
+ text_log = std::make_unique<std::ofstream>();
+ text_log->basic_ios<char>::rdbuf(std::cout.rdbuf());
+ }
+
+ NetEqTest::Callbacks callbacks;
+ stats_plotter_ = std::make_unique<NetEqStatsPlotter>(
+ config.matlabplot, config.pythonplot, config.concealment_events,
+ config.plot_scripts_basename.value_or(""));
+
+ ssrc_switch_detector_.reset(
+ new SsrcSwitchDetector(stats_plotter_->stats_getter()->delay_analyzer()));
+ callbacks.post_insert_packet = ssrc_switch_detector_.get();
+ callbacks.get_audio_callback = stats_plotter_->stats_getter();
+ callbacks.simulation_ended_callback = stats_plotter_.get();
+ NetEq::Config neteq_config;
+ neteq_config.sample_rate_hz = *sample_rate_hz;
+ neteq_config.max_packets_in_buffer = config.max_nr_packets_in_buffer;
+ neteq_config.enable_fast_accelerate = config.enable_fast_accelerate;
+ return std::make_unique<NetEqTest>(
+ neteq_config, decoder_factory, codecs, std::move(text_log), factory,
+ std::move(input), std::move(output), callbacks);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.h
new file mode 100644
index 0000000000..96ce0b4334
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_test_factory.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_FACTORY_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_FACTORY_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "test/field_trial.h"
+
+namespace webrtc {
+namespace test {
+
+class SsrcSwitchDetector;
+class NetEqStatsGetter;
+class NetEqStatsPlotter;
+
+// Note that the NetEqTestFactory needs to be alive when the NetEqTest object is
+// used for a simulation.
+class NetEqTestFactory {
+ public:
+ NetEqTestFactory();
+ ~NetEqTestFactory();
+ struct Config {
+ Config();
+ Config(const Config& other);
+ ~Config();
+ // RTP payload type for PCM-u.
+ static constexpr int default_pcmu() { return 0; }
+ int pcmu = default_pcmu();
+ // RTP payload type for PCM-a.
+ static constexpr int default_pcma() { return 8; }
+ int pcma = default_pcma();
+ // RTP payload type for iLBC.
+ static constexpr int default_ilbc() { return 102; }
+ int ilbc = default_ilbc();
+ // RTP payload type for iSAC.
+ static constexpr int default_isac() { return 103; }
+ int isac = default_isac();
+ // RTP payload type for iSAC-swb (32 kHz).
+ static constexpr int default_isac_swb() { return 104; }
+ int isac_swb = default_isac_swb();
+ // RTP payload type for Opus.
+ static constexpr int default_opus() { return 111; }
+ int opus = default_opus();
+ // RTP payload type for PCM16b-nb (8 kHz).
+ static constexpr int default_pcm16b() { return 93; }
+ int pcm16b = default_pcm16b();
+ // RTP payload type for PCM16b-wb (16 kHz).
+ static constexpr int default_pcm16b_wb() { return 94; }
+ int pcm16b_wb = default_pcm16b_wb();
+ // RTP payload type for PCM16b-swb32 (32 kHz).
+ static constexpr int default_pcm16b_swb32() { return 95; }
+ int pcm16b_swb32 = default_pcm16b_swb32();
+ // RTP payload type for PCM16b-swb48 (48 kHz).
+ static constexpr int default_pcm16b_swb48() { return 96; }
+ int pcm16b_swb48 = default_pcm16b_swb48();
+ // RTP payload type for G.722.
+ static constexpr int default_g722() { return 9; }
+ int g722 = default_g722();
+ // RTP payload type for AVT/DTMF (8 kHz).
+ static constexpr int default_avt() { return 106; }
+ int avt = default_avt();
+ // RTP payload type for AVT/DTMF (16 kHz).
+ static constexpr int default_avt_16() { return 114; }
+ int avt_16 = default_avt_16();
+ // RTP payload type for AVT/DTMF (32 kHz).
+ static constexpr int default_avt_32() { return 115; }
+ int avt_32 = default_avt_32();
+ // RTP payload type for AVT/DTMF (48 kHz).
+ static constexpr int default_avt_48() { return 116; }
+ int avt_48 = default_avt_48();
+ // RTP payload type for redundant audio (RED).
+ static constexpr int default_red() { return 117; }
+ int red = default_red();
+ // RTP payload type for comfort noise (8 kHz).
+ static constexpr int default_cn_nb() { return 13; }
+ int cn_nb = default_cn_nb();
+ // RTP payload type for comfort noise (16 kHz).
+ static constexpr int default_cn_wb() { return 98; }
+ int cn_wb = default_cn_wb();
+ // RTP payload type for comfort noise (32 kHz).
+ static constexpr int default_cn_swb32() { return 99; }
+ int cn_swb32 = default_cn_swb32();
+ // RTP payload type for comfort noise (48 kHz).
+ static constexpr int default_cn_swb48() { return 100; }
+ int cn_swb48 = default_cn_swb48();
+ // A PCM file that will be used to populate dummy RTP packets.
+ std::string replacement_audio_file;
+ // Only use packets with this SSRC.
+ absl::optional<uint32_t> ssrc_filter;
+ // Extension ID for audio level (RFC 6464).
+ static constexpr int default_audio_level() { return 1; }
+ int audio_level = default_audio_level();
+ // Extension ID for absolute sender time.
+ static constexpr int default_abs_send_time() { return 3; }
+ int abs_send_time = default_abs_send_time();
+ // Extension ID for transport sequence number.
+ static constexpr int default_transport_seq_no() { return 5; }
+ int transport_seq_no = default_transport_seq_no();
+ // Extension ID for video content type.
+ static constexpr int default_video_content_type() { return 7; }
+ int video_content_type = default_video_content_type();
+ // Extension ID for video timing.
+ static constexpr int default_video_timing() { return 8; }
+ int video_timing = default_video_timing();
+ // Generate a matlab script for plotting the delay profile.
+ bool matlabplot = false;
+ // Generates a python script for plotting the delay profile.
+ bool pythonplot = false;
+ // Prints concealment events.
+ bool concealment_events = false;
+ // Maximum allowed number of packets in the buffer.
+ static constexpr int default_max_nr_packets_in_buffer() { return 200; }
+ int max_nr_packets_in_buffer = default_max_nr_packets_in_buffer();
+ // Number of dummy packets to put in the packet buffer at the start of the
+ // simulation.
+ static constexpr int default_initial_dummy_packets() { return 0; }
+ int initial_dummy_packets = default_initial_dummy_packets();
+ // Number of getAudio events to skip at the start of the simulation.
+ static constexpr int default_skip_get_audio_events() { return 0; }
+ int skip_get_audio_events = default_skip_get_audio_events();
+ // Enables jitter buffer fast accelerate.
+ bool enable_fast_accelerate = false;
+ // Dumps events that describes the simulation on a step-by-step basis.
+ bool textlog = false;
+ // If specified and `textlog` is true, the output of `textlog` is written to
+ // the specified file name.
+ absl::optional<std::string> textlog_filename;
+ // Base name for the output script files for plotting the delay profile.
+ absl::optional<std::string> plot_scripts_basename;
+ // Path to the output audio file.
+ absl::optional<std::string> output_audio_filename;
+ // Field trials to use during the simulation.
+ std::string field_trial_string;
+ };
+
+ std::unique_ptr<NetEqTest> InitializeTestFromFile(
+ absl::string_view input_filename,
+ NetEqFactory* neteq_factory,
+ const Config& config);
+ std::unique_ptr<NetEqTest> InitializeTestFromString(
+ absl::string_view input_string,
+ NetEqFactory* neteq_factory,
+ const Config& config);
+
+ private:
+ std::unique_ptr<NetEqTest> InitializeTest(std::unique_ptr<NetEqInput> input,
+ NetEqFactory* neteq_factory,
+ const Config& config);
+ std::unique_ptr<SsrcSwitchDetector> ssrc_switch_detector_;
+ std::unique_ptr<NetEqStatsPlotter> stats_plotter_;
+ // The field trials are stored in the test factory, because neteq_test is not
+ // in a testonly target, and therefore cannot use ScopedFieldTrials.
+ std::unique_ptr<ScopedFieldTrials> field_trials_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/output_audio_file.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/output_audio_file.h
new file mode 100644
index 0000000000..25577fc882
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/output_audio_file.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
+
+#include <stdio.h>
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+
+namespace webrtc {
+namespace test {
+
+class OutputAudioFile : public AudioSink {
+ public:
+ // Creates an OutputAudioFile, opening a file named `file_name` for writing.
+ // The file format is 16-bit signed host-endian PCM.
+ explicit OutputAudioFile(absl::string_view file_name) {
+ out_file_ = fopen(std::string(file_name).c_str(), "wb");
+ }
+
+ virtual ~OutputAudioFile() {
+ if (out_file_)
+ fclose(out_file_);
+ }
+
+ OutputAudioFile(const OutputAudioFile&) = delete;
+ OutputAudioFile& operator=(const OutputAudioFile&) = delete;
+
+ bool WriteArray(const int16_t* audio, size_t num_samples) override {
+ RTC_DCHECK(out_file_);
+ return fwrite(audio, sizeof(*audio), num_samples, out_file_) == num_samples;
+ }
+
+ private:
+ FILE* out_file_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/output_wav_file.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/output_wav_file.h
new file mode 100644
index 0000000000..20eedfb554
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/output_wav_file.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+
+namespace webrtc {
+namespace test {
+
+class OutputWavFile : public AudioSink {
+ public:
+ // Creates an OutputWavFile, opening a file named `file_name` for writing.
+ // The output file is a PCM encoded wav file.
+ OutputWavFile(absl::string_view file_name,
+ int sample_rate_hz,
+ int num_channels = 1)
+ : wav_writer_(file_name, sample_rate_hz, num_channels) {}
+
+ OutputWavFile(const OutputWavFile&) = delete;
+ OutputWavFile& operator=(const OutputWavFile&) = delete;
+
+ bool WriteArray(const int16_t* audio, size_t num_samples) override {
+ wav_writer_.WriteSamples(audio, num_samples);
+ return true;
+ }
+
+ private:
+ WavWriter wav_writer_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.cc
new file mode 100644
index 0000000000..e540173f43
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+namespace test {
+
+Packet::Packet(rtc::CopyOnWriteBuffer packet,
+ size_t virtual_packet_length_bytes,
+ double time_ms,
+ const RtpHeaderExtensionMap* extension_map)
+ : packet_(std::move(packet)),
+ virtual_packet_length_bytes_(virtual_packet_length_bytes),
+ time_ms_(time_ms),
+ valid_header_(ParseHeader(extension_map)) {}
+
+Packet::Packet(const RTPHeader& header,
+ size_t virtual_packet_length_bytes,
+ size_t virtual_payload_length_bytes,
+ double time_ms)
+ : header_(header),
+ virtual_packet_length_bytes_(virtual_packet_length_bytes),
+ virtual_payload_length_bytes_(virtual_payload_length_bytes),
+ time_ms_(time_ms),
+ valid_header_(true) {}
+
+Packet::~Packet() = default;
+
+bool Packet::ExtractRedHeaders(std::list<RTPHeader*>* headers) const {
+ //
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // |1| block PT | timestamp offset | block length |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // |1| ... |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // |0| block PT |
+ // +-+-+-+-+-+-+-+-+
+ //
+
+ const uint8_t* payload_ptr = payload();
+ const uint8_t* payload_end_ptr = payload_ptr + payload_length_bytes();
+
+ // Find all RED headers with the extension bit set to 1. That is, all headers
+ // but the last one.
+ while ((payload_ptr < payload_end_ptr) && (*payload_ptr & 0x80)) {
+ RTPHeader* header = new RTPHeader;
+ CopyToHeader(header);
+ header->payloadType = payload_ptr[0] & 0x7F;
+ uint32_t offset = (payload_ptr[1] << 6) + ((payload_ptr[2] & 0xFC) >> 2);
+ header->timestamp -= offset;
+ headers->push_front(header);
+ payload_ptr += 4;
+ }
+ // Last header.
+ RTC_DCHECK_LT(payload_ptr, payload_end_ptr);
+ if (payload_ptr >= payload_end_ptr) {
+ return false; // Payload too short.
+ }
+ RTPHeader* header = new RTPHeader;
+ CopyToHeader(header);
+ header->payloadType = payload_ptr[0] & 0x7F;
+ headers->push_front(header);
+ return true;
+}
+
+void Packet::DeleteRedHeaders(std::list<RTPHeader*>* headers) {
+ while (!headers->empty()) {
+ delete headers->front();
+ headers->pop_front();
+ }
+}
+
+bool Packet::ParseHeader(const RtpHeaderExtensionMap* extension_map) {
+ // Use RtpPacketReceived instead of RtpPacket because former already has a
+ // converter into legacy RTPHeader.
+ webrtc::RtpPacketReceived rtp_packet(extension_map);
+
+ // Because of the special case of dummy packets that have padding marked in
+ // the RTP header, but do not have rtp payload with the padding size, handle
+ // padding manually. Regular RTP packet parser reports failure, but it is fine
+ // in this context.
+ bool padding = (packet_[0] & 0b0010'0000);
+ size_t padding_size = 0;
+ if (padding) {
+ // Clear the padding bit to prevent failure when rtp payload is omited.
+ rtc::CopyOnWriteBuffer packet(packet_);
+ packet.MutableData()[0] &= ~0b0010'0000;
+ if (!rtp_packet.Parse(std::move(packet))) {
+ return false;
+ }
+ if (rtp_packet.payload_size() > 0) {
+ padding_size = rtp_packet.data()[rtp_packet.size() - 1];
+ }
+ if (padding_size > rtp_packet.payload_size()) {
+ return false;
+ }
+ } else {
+ if (!rtp_packet.Parse(packet_)) {
+ return false;
+ }
+ }
+ rtp_payload_ = rtc::MakeArrayView(packet_.data() + rtp_packet.headers_size(),
+ rtp_packet.payload_size() - padding_size);
+ rtp_packet.GetHeader(&header_);
+
+ RTC_CHECK_GE(virtual_packet_length_bytes_, rtp_packet.size());
+ RTC_DCHECK_GE(virtual_packet_length_bytes_, rtp_packet.headers_size());
+ virtual_payload_length_bytes_ =
+ virtual_packet_length_bytes_ - rtp_packet.headers_size();
+ return true;
+}
+
+void Packet::CopyToHeader(RTPHeader* destination) const {
+ *destination = header_;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.h
new file mode 100644
index 0000000000..96710907df
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
+
+#include <list>
+
+#include "api/array_view.h"
+#include "api/rtp_headers.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+namespace test {
+
+// Class for handling RTP packets in test applications.
+class Packet {
+ public:
+ // Creates a packet, with the packet payload (including header bytes) in
+ // `packet`. The `time_ms` is an extra time associated with this packet,
+ // typically used to denote arrival time.
+ // `virtual_packet_length_bytes` is typically used when reading RTP dump files
+ // that only contain the RTP headers, and no payload (a.k.a RTP dummy files or
+ // RTP light). The `virtual_packet_length_bytes` tells what size the packet
+ // had on wire, including the now discarded payload.
+ Packet(rtc::CopyOnWriteBuffer packet,
+ size_t virtual_packet_length_bytes,
+ double time_ms,
+ const RtpHeaderExtensionMap* extension_map = nullptr);
+
+ Packet(rtc::CopyOnWriteBuffer packet,
+ double time_ms,
+ const RtpHeaderExtensionMap* extension_map = nullptr)
+ : Packet(packet, packet.size(), time_ms, extension_map) {}
+
+ // Same as above, but creates the packet from an already parsed RTPHeader.
+ // This is typically used when reading RTP dump files that only contain the
+ // RTP headers, and no payload. The `virtual_packet_length_bytes` tells what
+ // size the packet had on wire, including the now discarded payload,
+ // The `virtual_payload_length_bytes` tells the size of the payload.
+ Packet(const RTPHeader& header,
+ size_t virtual_packet_length_bytes,
+ size_t virtual_payload_length_bytes,
+ double time_ms);
+
+ virtual ~Packet();
+
+ Packet(const Packet&) = delete;
+ Packet& operator=(const Packet&) = delete;
+
+ // Parses the first bytes of the RTP payload, interpreting them as RED headers
+ // according to RFC 2198. The headers will be inserted into `headers`. The
+ // caller of the method assumes ownership of the objects in the list, and
+ // must delete them properly.
+ bool ExtractRedHeaders(std::list<RTPHeader*>* headers) const;
+
+ // Deletes all RTPHeader objects in `headers`, but does not delete `headers`
+ // itself.
+ static void DeleteRedHeaders(std::list<RTPHeader*>* headers);
+
+ const uint8_t* payload() const { return rtp_payload_.data(); }
+
+ size_t packet_length_bytes() const { return packet_.size(); }
+
+ size_t payload_length_bytes() const { return rtp_payload_.size(); }
+
+ size_t virtual_packet_length_bytes() const {
+ return virtual_packet_length_bytes_;
+ }
+
+ size_t virtual_payload_length_bytes() const {
+ return virtual_payload_length_bytes_;
+ }
+
+ const RTPHeader& header() const { return header_; }
+
+ double time_ms() const { return time_ms_; }
+ bool valid_header() const { return valid_header_; }
+
+ private:
+ bool ParseHeader(const RtpHeaderExtensionMap* extension_map);
+ void CopyToHeader(RTPHeader* destination) const;
+
+ RTPHeader header_;
+ const rtc::CopyOnWriteBuffer packet_;
+ rtc::ArrayView<const uint8_t> rtp_payload_; // Empty for dummy RTP packets.
+ // Virtual lengths are used when parsing RTP header files (dummy RTP files).
+ const size_t virtual_packet_length_bytes_;
+ size_t virtual_payload_length_bytes_ = 0;
+ const double time_ms_; // Used to denote a packet's arrival time.
+ const bool valid_header_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.cc
new file mode 100644
index 0000000000..598ae6edd4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+
+namespace webrtc {
+namespace test {
+
+PacketSource::PacketSource() = default;
+
+PacketSource::~PacketSource() = default;
+
+void PacketSource::FilterOutPayloadType(uint8_t payload_type) {
+ filter_.set(payload_type, true);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.h
new file mode 100644
index 0000000000..be1705cae1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_source.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
+
+#include <bitset>
+#include <memory>
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+
+namespace webrtc {
+namespace test {
+
+// Interface class for an object delivering RTP packets to test applications.
+class PacketSource {
+ public:
+ PacketSource();
+ virtual ~PacketSource();
+
+ PacketSource(const PacketSource&) = delete;
+ PacketSource& operator=(const PacketSource&) = delete;
+
+ // Returns next packet. Returns nullptr if the source is depleted, or if an
+ // error occurred.
+ virtual std::unique_ptr<Packet> NextPacket() = 0;
+
+ virtual void FilterOutPayloadType(uint8_t payload_type);
+
+ protected:
+ std::bitset<128> filter_; // Payload type is 7 bits in the RFC.
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_unittest.cc
new file mode 100644
index 0000000000..69cf56b529
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/packet_unittest.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for test Packet class.
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kHeaderLengthBytes = 12;
+
+void MakeRtpHeader(int payload_type,
+ int seq_number,
+ uint32_t timestamp,
+ uint32_t ssrc,
+ uint8_t* rtp_data) {
+ rtp_data[0] = 0x80;
+ rtp_data[1] = static_cast<uint8_t>(payload_type);
+ rtp_data[2] = (seq_number >> 8) & 0xFF;
+ rtp_data[3] = (seq_number)&0xFF;
+ rtp_data[4] = timestamp >> 24;
+ rtp_data[5] = (timestamp >> 16) & 0xFF;
+ rtp_data[6] = (timestamp >> 8) & 0xFF;
+ rtp_data[7] = timestamp & 0xFF;
+ rtp_data[8] = ssrc >> 24;
+ rtp_data[9] = (ssrc >> 16) & 0xFF;
+ rtp_data[10] = (ssrc >> 8) & 0xFF;
+ rtp_data[11] = ssrc & 0xFF;
+}
+} // namespace
+
+TEST(TestPacket, RegularPacket) {
+ const size_t kPacketLengthBytes = 100;
+ rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
+ const uint8_t kPayloadType = 17;
+ const uint16_t kSequenceNumber = 4711;
+ const uint32_t kTimestamp = 47114711;
+ const uint32_t kSsrc = 0x12345678;
+ MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory.MutableData());
+ const double kPacketTime = 1.0;
+ Packet packet(std::move(packet_memory), kPacketTime);
+ ASSERT_TRUE(packet.valid_header());
+ EXPECT_EQ(kPayloadType, packet.header().payloadType);
+ EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+ EXPECT_EQ(kTimestamp, packet.header().timestamp);
+ EXPECT_EQ(kSsrc, packet.header().ssrc);
+ EXPECT_EQ(0, packet.header().numCSRCs);
+ EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+ packet.payload_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+ packet.virtual_payload_length_bytes());
+ EXPECT_EQ(kPacketTime, packet.time_ms());
+}
+
+TEST(TestPacket, DummyPacket) {
+ const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header.
+ const size_t kVirtualPacketLengthBytes = 100;
+ rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
+ const uint8_t kPayloadType = 17;
+ const uint16_t kSequenceNumber = 4711;
+ const uint32_t kTimestamp = 47114711;
+ const uint32_t kSsrc = 0x12345678;
+ MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory.MutableData());
+ const double kPacketTime = 1.0;
+ Packet packet(std::move(packet_memory), kVirtualPacketLengthBytes,
+ kPacketTime);
+ ASSERT_TRUE(packet.valid_header());
+ EXPECT_EQ(kPayloadType, packet.header().payloadType);
+ EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+ EXPECT_EQ(kTimestamp, packet.header().timestamp);
+ EXPECT_EQ(kSsrc, packet.header().ssrc);
+ EXPECT_EQ(0, packet.header().numCSRCs);
+ EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+ packet.payload_length_bytes());
+ EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes());
+ EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes,
+ packet.virtual_payload_length_bytes());
+ EXPECT_EQ(kPacketTime, packet.time_ms());
+}
+
+TEST(TestPacket, DummyPaddingPacket) {
+ const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header.
+ const size_t kVirtualPacketLengthBytes = 100;
+ rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
+ const uint8_t kPayloadType = 17;
+ const uint16_t kSequenceNumber = 4711;
+ const uint32_t kTimestamp = 47114711;
+ const uint32_t kSsrc = 0x12345678;
+ MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory.MutableData());
+ packet_memory.MutableData()[0] |= 0b0010'0000; // Set the padding bit.
+ const double kPacketTime = 1.0;
+ Packet packet(std::move(packet_memory), kVirtualPacketLengthBytes,
+ kPacketTime);
+ ASSERT_TRUE(packet.valid_header());
+ EXPECT_EQ(kPayloadType, packet.header().payloadType);
+ EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+ EXPECT_EQ(kTimestamp, packet.header().timestamp);
+ EXPECT_EQ(kSsrc, packet.header().ssrc);
+ EXPECT_EQ(0, packet.header().numCSRCs);
+ EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+ packet.payload_length_bytes());
+ EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes());
+ EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes,
+ packet.virtual_payload_length_bytes());
+ EXPECT_EQ(kPacketTime, packet.time_ms());
+}
+
+namespace {
+// Writes one RED block header starting at `rtp_data`, according to RFC 2198.
+// returns the number of bytes written (1 or 4).
+//
+// Format if `last_payoad` is false:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |1| block PT | timestamp offset | block length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Format if `last_payoad` is true:
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |0| Block PT |
+// +-+-+-+-+-+-+-+-+
+
+int MakeRedHeader(int payload_type,
+ uint32_t timestamp_offset,
+ int block_length,
+ bool last_payload,
+ uint8_t* rtp_data) {
+ rtp_data[0] = 0x80 | (payload_type & 0x7F); // Set the first bit to 1.
+ if (last_payload) {
+ rtp_data[0] &= 0x7F; // Reset the first but to 0 to indicate last block.
+ return 1;
+ }
+ rtp_data[1] = timestamp_offset >> 6;
+ rtp_data[2] = (timestamp_offset & 0x3F) << 2;
+ rtp_data[2] |= block_length >> 8;
+ rtp_data[3] = block_length & 0xFF;
+ return 4;
+}
+} // namespace
+
+TEST(TestPacket, RED) {
+ const size_t kPacketLengthBytes = 100;
+ rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
+ const uint8_t kRedPayloadType = 17;
+ const uint16_t kSequenceNumber = 4711;
+ const uint32_t kTimestamp = 47114711;
+ const uint32_t kSsrc = 0x12345678;
+ MakeRtpHeader(kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory.MutableData());
+ // Create four RED headers.
+ // Payload types are just the same as the block index the offset is 100 times
+ // the block index.
+ const int kRedBlocks = 4;
+ uint8_t* payload_ptr = packet_memory.MutableData() +
+ kHeaderLengthBytes; // First byte after header.
+ for (int i = 0; i < kRedBlocks; ++i) {
+ int payload_type = i;
+ // Offset value is not used for the last block.
+ uint32_t timestamp_offset = 100 * i;
+ int block_length = 10 * i;
+ bool last_block = (i == kRedBlocks - 1) ? true : false;
+ payload_ptr += MakeRedHeader(payload_type, timestamp_offset, block_length,
+ last_block, payload_ptr);
+ }
+ const double kPacketTime = 1.0;
+ // Hand over ownership of `packet_memory` to `packet`.
+ Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
+ ASSERT_TRUE(packet.valid_header());
+ EXPECT_EQ(kRedPayloadType, packet.header().payloadType);
+ EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+ EXPECT_EQ(kTimestamp, packet.header().timestamp);
+ EXPECT_EQ(kSsrc, packet.header().ssrc);
+ EXPECT_EQ(0, packet.header().numCSRCs);
+ EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+ packet.payload_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
+ EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+ packet.virtual_payload_length_bytes());
+ EXPECT_EQ(kPacketTime, packet.time_ms());
+ std::list<RTPHeader*> red_headers;
+ EXPECT_TRUE(packet.ExtractRedHeaders(&red_headers));
+ EXPECT_EQ(kRedBlocks, static_cast<int>(red_headers.size()));
+ int block_index = 0;
+ for (std::list<RTPHeader*>::reverse_iterator it = red_headers.rbegin();
+ it != red_headers.rend(); ++it) {
+ // Reading list from the back, since the extraction puts the main payload
+ // (which is the last one on wire) first.
+ RTPHeader* red_block = *it;
+ EXPECT_EQ(block_index, red_block->payloadType);
+ EXPECT_EQ(kSequenceNumber, red_block->sequenceNumber);
+ if (block_index == kRedBlocks - 1) {
+ // Last block has zero offset per definition.
+ EXPECT_EQ(kTimestamp, red_block->timestamp);
+ } else {
+ EXPECT_EQ(kTimestamp - 100 * block_index, red_block->timestamp);
+ }
+ EXPECT_EQ(kSsrc, red_block->ssrc);
+ EXPECT_EQ(0, red_block->numCSRCs);
+ ++block_index;
+ }
+ Packet::DeleteRedHeaders(&red_headers);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
new file mode 100644
index 0000000000..5050e1fb17
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+bool ResampleInputAudioFile::Read(size_t samples,
+ int output_rate_hz,
+ int16_t* destination) {
+ const size_t samples_to_read = samples * file_rate_hz_ / output_rate_hz;
+ RTC_CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
+ << "Frame size and sample rates don't add up to an integer.";
+ std::unique_ptr<int16_t[]> temp_destination(new int16_t[samples_to_read]);
+ if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
+ return false;
+ resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
+ size_t output_length = 0;
+ RTC_CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read,
+ destination, samples, output_length),
+ 0);
+ RTC_CHECK_EQ(samples, output_length);
+ return true;
+}
+
+bool ResampleInputAudioFile::Read(size_t samples, int16_t* destination) {
+ RTC_CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
+ return Read(samples, output_rate_hz_, destination);
+}
+
+void ResampleInputAudioFile::set_output_rate_hz(int rate_hz) {
+ output_rate_hz_ = rate_hz;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h
new file mode 100644
index 0000000000..279fece616
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "common_audio/resampler/include/resampler.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+
+namespace webrtc {
+namespace test {
+
+// Class for handling a looping input audio file with resampling.
+class ResampleInputAudioFile : public InputAudioFile {
+ public:
+ ResampleInputAudioFile(absl::string_view file_name,
+ int file_rate_hz,
+ bool loop_at_end = true)
+ : InputAudioFile(file_name, loop_at_end),
+ file_rate_hz_(file_rate_hz),
+ output_rate_hz_(-1) {}
+ ResampleInputAudioFile(absl::string_view file_name,
+ int file_rate_hz,
+ int output_rate_hz,
+ bool loop_at_end = true)
+ : InputAudioFile(file_name, loop_at_end),
+ file_rate_hz_(file_rate_hz),
+ output_rate_hz_(output_rate_hz) {}
+
+ ResampleInputAudioFile(const ResampleInputAudioFile&) = delete;
+ ResampleInputAudioFile& operator=(const ResampleInputAudioFile&) = delete;
+
+ bool Read(size_t samples, int output_rate_hz, int16_t* destination);
+ bool Read(size_t samples, int16_t* destination) override;
+ void set_output_rate_hz(int rate_hz);
+
+ private:
+ const int file_rate_hz_;
+ int output_rate_hz_;
+ Resampler resampler_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
new file mode 100644
index 0000000000..1407aab5f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/rtc_event_log_source.h"
+
+#include <string.h>
+
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "logging/rtc_event_log/rtc_event_processor.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+bool ShouldSkipStream(ParsedRtcEventLog::MediaType media_type,
+ uint32_t ssrc,
+ absl::optional<uint32_t> ssrc_filter) {
+ if (media_type != ParsedRtcEventLog::MediaType::AUDIO)
+ return true;
+ if (ssrc_filter.has_value() && ssrc != *ssrc_filter)
+ return true;
+ return false;
+}
+} // namespace
+
+std::unique_ptr<RtcEventLogSource> RtcEventLogSource::CreateFromFile(
+ absl::string_view file_name,
+ absl::optional<uint32_t> ssrc_filter) {
+ auto source = std::unique_ptr<RtcEventLogSource>(new RtcEventLogSource());
+ ParsedRtcEventLog parsed_log;
+ auto status = parsed_log.ParseFile(file_name);
+ if (!status.ok()) {
+ std::cerr << "Failed to parse event log: " << status.message() << std::endl;
+ std::cerr << "Skipping log." << std::endl;
+ return nullptr;
+ }
+ if (!source->Initialize(parsed_log, ssrc_filter)) {
+ std::cerr << "Failed to initialize source from event log, skipping."
+ << std::endl;
+ return nullptr;
+ }
+ return source;
+}
+
+std::unique_ptr<RtcEventLogSource> RtcEventLogSource::CreateFromString(
+ absl::string_view file_contents,
+ absl::optional<uint32_t> ssrc_filter) {
+ auto source = std::unique_ptr<RtcEventLogSource>(new RtcEventLogSource());
+ ParsedRtcEventLog parsed_log;
+ auto status = parsed_log.ParseString(file_contents);
+ if (!status.ok()) {
+ std::cerr << "Failed to parse event log: " << status.message() << std::endl;
+ std::cerr << "Skipping log." << std::endl;
+ return nullptr;
+ }
+ if (!source->Initialize(parsed_log, ssrc_filter)) {
+ std::cerr << "Failed to initialize source from event log, skipping."
+ << std::endl;
+ return nullptr;
+ }
+ return source;
+}
+
+RtcEventLogSource::~RtcEventLogSource() {}
+
+std::unique_ptr<Packet> RtcEventLogSource::NextPacket() {
+ if (rtp_packet_index_ >= rtp_packets_.size())
+ return nullptr;
+
+ std::unique_ptr<Packet> packet = std::move(rtp_packets_[rtp_packet_index_++]);
+ return packet;
+}
+
+int64_t RtcEventLogSource::NextAudioOutputEventMs() {
+ if (audio_output_index_ >= audio_outputs_.size())
+ return std::numeric_limits<int64_t>::max();
+
+ int64_t output_time_ms = audio_outputs_[audio_output_index_++];
+ return output_time_ms;
+}
+
+RtcEventLogSource::RtcEventLogSource() : PacketSource() {}
+
+bool RtcEventLogSource::Initialize(const ParsedRtcEventLog& parsed_log,
+ absl::optional<uint32_t> ssrc_filter) {
+ const auto first_log_end_time_us =
+ parsed_log.stop_log_events().empty()
+ ? std::numeric_limits<int64_t>::max()
+ : parsed_log.stop_log_events().front().log_time_us();
+
+ std::set<uint32_t> packet_ssrcs;
+ auto handle_rtp_packet =
+ [this, first_log_end_time_us,
+ &packet_ssrcs](const webrtc::LoggedRtpPacketIncoming& incoming) {
+ if (!filter_.test(incoming.rtp.header.payloadType) &&
+ incoming.log_time_us() < first_log_end_time_us) {
+ rtp_packets_.emplace_back(std::make_unique<Packet>(
+ incoming.rtp.header, incoming.rtp.total_length,
+ incoming.rtp.total_length - incoming.rtp.header_length,
+ static_cast<double>(incoming.log_time_ms())));
+ packet_ssrcs.insert(rtp_packets_.back()->header().ssrc);
+ }
+ };
+
+ std::set<uint32_t> ignored_ssrcs;
+ auto handle_audio_playout =
+ [this, first_log_end_time_us, &packet_ssrcs,
+ &ignored_ssrcs](const webrtc::LoggedAudioPlayoutEvent& audio_playout) {
+ if (audio_playout.log_time_us() < first_log_end_time_us) {
+ if (packet_ssrcs.count(audio_playout.ssrc) > 0) {
+ audio_outputs_.emplace_back(audio_playout.log_time_ms());
+ } else {
+ ignored_ssrcs.insert(audio_playout.ssrc);
+ }
+ }
+ };
+
+ // This wouldn't be needed if we knew that there was at most one audio stream.
+ webrtc::RtcEventProcessor event_processor;
+ for (const auto& rtp_packets : parsed_log.incoming_rtp_packets_by_ssrc()) {
+ ParsedRtcEventLog::MediaType media_type =
+ parsed_log.GetMediaType(rtp_packets.ssrc, webrtc::kIncomingPacket);
+ if (ShouldSkipStream(media_type, rtp_packets.ssrc, ssrc_filter)) {
+ continue;
+ }
+ event_processor.AddEvents(rtp_packets.incoming_packets, handle_rtp_packet);
+ // If no SSRC filter has been set, use the first SSRC only. The simulator
+ // does not work properly with interleaved packets from multiple SSRCs.
+ if (!ssrc_filter.has_value()) {
+ ssrc_filter = rtp_packets.ssrc;
+ }
+ }
+
+ for (const auto& audio_playouts : parsed_log.audio_playout_events()) {
+ if (ssrc_filter.has_value() && audio_playouts.first != *ssrc_filter)
+ continue;
+ event_processor.AddEvents(audio_playouts.second, handle_audio_playout);
+ }
+
+ // Fills in rtp_packets_ and audio_outputs_.
+ event_processor.ProcessEventsInOrder();
+
+ for (const auto& ssrc : ignored_ssrcs) {
+ std::cout << "Ignoring GetAudio events from SSRC 0x" << std::hex << ssrc
+ << " because no packets were found with a matching SSRC."
+ << std::endl;
+ }
+
+ return true;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
new file mode 100644
index 0000000000..c67912a67d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "logging/rtc_event_log/rtc_event_log_parser.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+class RtpHeaderParser;
+
+namespace test {
+
+class Packet;
+
+class RtcEventLogSource : public PacketSource {
+ public:
+ // Creates an RtcEventLogSource reading from `file_name`. If the file cannot
+ // be opened, or has the wrong format, NULL will be returned.
+ static std::unique_ptr<RtcEventLogSource> CreateFromFile(
+ absl::string_view file_name,
+ absl::optional<uint32_t> ssrc_filter);
+ // Same as above, but uses a string with the file contents.
+ static std::unique_ptr<RtcEventLogSource> CreateFromString(
+ absl::string_view file_contents,
+ absl::optional<uint32_t> ssrc_filter);
+
+ virtual ~RtcEventLogSource();
+
+ RtcEventLogSource(const RtcEventLogSource&) = delete;
+ RtcEventLogSource& operator=(const RtcEventLogSource&) = delete;
+
+ std::unique_ptr<Packet> NextPacket() override;
+
+ // Returns the timestamp of the next audio output event, in milliseconds. The
+ // maximum value of int64_t is returned if there are no more audio output
+ // events available.
+ int64_t NextAudioOutputEventMs();
+
+ private:
+ RtcEventLogSource();
+
+ bool Initialize(const ParsedRtcEventLog& parsed_log,
+ absl::optional<uint32_t> ssrc_filter);
+
+ std::vector<std::unique_ptr<Packet>> rtp_packets_;
+ size_t rtp_packet_index_ = 0;
+ std::vector<int64_t> audio_outputs_;
+ size_t audio_output_index_ = 0;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc
new file mode 100644
index 0000000000..7ecf925ebb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+
+ABSL_FLAG(int, red, 117, "RTP payload type for RED");
+ABSL_FLAG(int,
+ audio_level,
+ -1,
+ "Extension ID for audio level (RFC 6464); "
+ "-1 not to print audio level");
+ABSL_FLAG(int,
+ abs_send_time,
+ -1,
+ "Extension ID for absolute sender time; "
+ "-1 not to print absolute send time");
+
+int main(int argc, char* argv[]) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ std::string usage =
+ "Tool for parsing an RTP dump file to text output.\n"
+ "Example usage:\n"
+ "./rtp_analyze input.rtp output.txt\n\n"
+ "Output is sent to stdout if no output file is given. "
+ "Note that this tool can read files with or without payloads.\n";
+ if (args.size() != 2 && args.size() != 3) {
+ printf("%s", usage.c_str());
+ return 1;
+ }
+
+ RTC_CHECK(absl::GetFlag(FLAGS_red) >= 0 &&
+ absl::GetFlag(FLAGS_red) <= 127); // Payload type
+ RTC_CHECK(absl::GetFlag(FLAGS_audio_level) == -1 || // Default
+ (absl::GetFlag(FLAGS_audio_level) > 0 &&
+ absl::GetFlag(FLAGS_audio_level) <= 255)); // Extension ID
+ RTC_CHECK(absl::GetFlag(FLAGS_abs_send_time) == -1 || // Default
+ (absl::GetFlag(FLAGS_abs_send_time) > 0 &&
+ absl::GetFlag(FLAGS_abs_send_time) <= 255)); // Extension ID
+
+ printf("Input file: %s\n", args[1]);
+ std::unique_ptr<webrtc::test::RtpFileSource> file_source(
+ webrtc::test::RtpFileSource::Create(args[1]));
+ RTC_DCHECK(file_source.get());
+ // Set RTP extension IDs.
+ bool print_audio_level = false;
+ if (absl::GetFlag(FLAGS_audio_level) != -1) {
+ print_audio_level = true;
+ file_source->RegisterRtpHeaderExtension(webrtc::kRtpExtensionAudioLevel,
+ absl::GetFlag(FLAGS_audio_level));
+ }
+ bool print_abs_send_time = false;
+ if (absl::GetFlag(FLAGS_abs_send_time) != -1) {
+ print_abs_send_time = true;
+ file_source->RegisterRtpHeaderExtension(
+ webrtc::kRtpExtensionAbsoluteSendTime,
+ absl::GetFlag(FLAGS_abs_send_time));
+ }
+
+ FILE* out_file;
+ if (args.size() == 3) {
+ out_file = fopen(args[2], "wt");
+ if (!out_file) {
+ printf("Cannot open output file %s\n", args[2]);
+ return -1;
+ }
+ printf("Output file: %s\n\n", args[2]);
+ } else {
+ out_file = stdout;
+ }
+
+ // Print file header.
+ fprintf(out_file, "SeqNo TimeStamp SendTime Size PT M SSRC");
+ if (print_audio_level) {
+ fprintf(out_file, " AuLvl (V)");
+ }
+ if (print_abs_send_time) {
+ fprintf(out_file, " AbsSendTime");
+ }
+ fprintf(out_file, "\n");
+
+ uint32_t max_abs_send_time = 0;
+ int cycles = -1;
+ std::unique_ptr<webrtc::test::Packet> packet;
+ while (true) {
+ packet = file_source->NextPacket();
+ if (!packet.get()) {
+ // End of file reached.
+ break;
+ }
+ // Write packet data to file. Use virtual_packet_length_bytes so that the
+ // correct packet sizes are printed also for RTP header-only dumps.
+ fprintf(out_file, "%5u %10u %10u %5i %5i %2i %#08X",
+ packet->header().sequenceNumber, packet->header().timestamp,
+ static_cast<unsigned int>(packet->time_ms()),
+ static_cast<int>(packet->virtual_packet_length_bytes()),
+ packet->header().payloadType, packet->header().markerBit,
+ packet->header().ssrc);
+ if (print_audio_level && packet->header().extension.hasAudioLevel) {
+ fprintf(out_file, " %5u (%1i)", packet->header().extension.audioLevel,
+ packet->header().extension.voiceActivity);
+ }
+ if (print_abs_send_time && packet->header().extension.hasAbsoluteSendTime) {
+ if (cycles == -1) {
+ // Initialize.
+ max_abs_send_time = packet->header().extension.absoluteSendTime;
+ cycles = 0;
+ }
+ // Abs sender time is 24 bit 6.18 fixed point. Shift by 8 to normalize to
+ // 32 bits (unsigned). Calculate the difference between this packet's
+ // send time and the maximum observed. Cast to signed 32-bit to get the
+ // desired wrap-around behavior.
+ if (static_cast<int32_t>(
+ (packet->header().extension.absoluteSendTime << 8) -
+ (max_abs_send_time << 8)) >= 0) {
+ // The difference is non-negative, meaning that this packet is newer
+ // than the previously observed maximum absolute send time.
+ if (packet->header().extension.absoluteSendTime < max_abs_send_time) {
+ // Wrap detected.
+ cycles++;
+ }
+ max_abs_send_time = packet->header().extension.absoluteSendTime;
+ }
+ // Abs sender time is 24 bit 6.18 fixed point. Divide by 2^18 to convert
+ // to floating point representation.
+ double send_time_seconds =
+ static_cast<double>(packet->header().extension.absoluteSendTime) /
+ 262144 +
+ 64.0 * cycles;
+ fprintf(out_file, " %11f", send_time_seconds);
+ }
+ fprintf(out_file, "\n");
+
+ if (packet->header().payloadType == absl::GetFlag(FLAGS_red)) {
+ std::list<webrtc::RTPHeader*> red_headers;
+ packet->ExtractRedHeaders(&red_headers);
+ while (!red_headers.empty()) {
+ webrtc::RTPHeader* red = red_headers.front();
+ RTC_DCHECK(red);
+ fprintf(out_file, "* %5u %10u %10u %5i\n", red->sequenceNumber,
+ red->timestamp, static_cast<unsigned int>(packet->time_ms()),
+ red->payloadType);
+ red_headers.pop_front();
+ delete red;
+ }
+ }
+ }
+
+ fclose(out_file);
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_encode.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_encode.cc
new file mode 100644
index 0000000000..6aeeb6d129
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_encode.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_FUCHSIA)
+#include <netinet/in.h>
+#endif
+
+#include <iostream>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/memory/memory.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/L16/audio_encoder_L16.h"
+#include "api/audio_codecs/g711/audio_encoder_g711.h"
+#include "api/audio_codecs/g722/audio_encoder_g722.h"
+#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
+#include "api/audio_codecs/isac/audio_encoder_isac.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+ABSL_FLAG(bool, list_codecs, false, "Enumerate all codecs");
+ABSL_FLAG(std::string, codec, "opus", "Codec to use");
+ABSL_FLAG(int,
+ frame_len,
+ 0,
+ "Frame length in ms; 0 indicates codec default value");
+ABSL_FLAG(int, bitrate, 0, "Bitrate in kbps; 0 indicates codec default value");
+ABSL_FLAG(int,
+ payload_type,
+ -1,
+ "RTP payload type; -1 indicates codec default value");
+ABSL_FLAG(int,
+ cng_payload_type,
+ -1,
+ "RTP payload type for CNG; -1 indicates default value");
+ABSL_FLAG(int, ssrc, 0, "SSRC to write to the RTP header");
+ABSL_FLAG(bool, dtx, false, "Use DTX/CNG");
+ABSL_FLAG(int, sample_rate, 48000, "Sample rate of the input file");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+// Add new codecs here, and to the map below.
+enum class CodecType {
+ kOpus,
+ kPcmU,
+ kPcmA,
+ kG722,
+ kPcm16b8,
+ kPcm16b16,
+ kPcm16b32,
+ kPcm16b48,
+ kIlbc,
+ kIsac
+};
+
+struct CodecTypeAndInfo {
+ CodecType type;
+ int default_payload_type;
+ bool internal_dtx;
+};
+
+// List all supported codecs here. This map defines the command-line parameter
+// value (the key string) for selecting each codec, together with information
+// whether it is using internal or external DTX/CNG.
+const std::map<std::string, CodecTypeAndInfo>& CodecList() {
+ static const auto* const codec_list =
+ new std::map<std::string, CodecTypeAndInfo>{
+ {"opus", {CodecType::kOpus, 111, true}},
+ {"pcmu", {CodecType::kPcmU, 0, false}},
+ {"pcma", {CodecType::kPcmA, 8, false}},
+ {"g722", {CodecType::kG722, 9, false}},
+ {"pcm16b_8", {CodecType::kPcm16b8, 93, false}},
+ {"pcm16b_16", {CodecType::kPcm16b16, 94, false}},
+ {"pcm16b_32", {CodecType::kPcm16b32, 95, false}},
+ {"pcm16b_48", {CodecType::kPcm16b48, 96, false}},
+ {"ilbc", {CodecType::kIlbc, 102, false}},
+ {"isac", {CodecType::kIsac, 103, false}}};
+ return *codec_list;
+}
+
+// This class will receive callbacks from ACM when a packet is ready, and write
+// it to the output file.
+class Packetizer : public AudioPacketizationCallback {
+ public:
+ Packetizer(FILE* out_file, uint32_t ssrc, int timestamp_rate_hz)
+ : out_file_(out_file),
+ ssrc_(ssrc),
+ timestamp_rate_hz_(timestamp_rate_hz) {}
+
+ int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) override {
+ if (payload_len_bytes == 0) {
+ return 0;
+ }
+
+ constexpr size_t kRtpHeaderLength = 12;
+ constexpr size_t kRtpDumpHeaderLength = 8;
+ const uint16_t length = htons(rtc::checked_cast<uint16_t>(
+ kRtpHeaderLength + kRtpDumpHeaderLength + payload_len_bytes));
+ const uint16_t plen = htons(
+ rtc::checked_cast<uint16_t>(kRtpHeaderLength + payload_len_bytes));
+ const uint32_t offset = htonl(timestamp / (timestamp_rate_hz_ / 1000));
+ RTC_CHECK_EQ(fwrite(&length, sizeof(uint16_t), 1, out_file_), 1);
+ RTC_CHECK_EQ(fwrite(&plen, sizeof(uint16_t), 1, out_file_), 1);
+ RTC_CHECK_EQ(fwrite(&offset, sizeof(uint32_t), 1, out_file_), 1);
+
+ const uint8_t rtp_header[] = {0x80,
+ static_cast<uint8_t>(payload_type & 0x7F),
+ static_cast<uint8_t>(sequence_number_ >> 8),
+ static_cast<uint8_t>(sequence_number_),
+ static_cast<uint8_t>(timestamp >> 24),
+ static_cast<uint8_t>(timestamp >> 16),
+ static_cast<uint8_t>(timestamp >> 8),
+ static_cast<uint8_t>(timestamp),
+ static_cast<uint8_t>(ssrc_ >> 24),
+ static_cast<uint8_t>(ssrc_ >> 16),
+ static_cast<uint8_t>(ssrc_ >> 8),
+ static_cast<uint8_t>(ssrc_)};
+ static_assert(sizeof(rtp_header) == kRtpHeaderLength, "");
+ RTC_CHECK_EQ(
+ fwrite(rtp_header, sizeof(uint8_t), kRtpHeaderLength, out_file_),
+ kRtpHeaderLength);
+ ++sequence_number_; // Intended to wrap on overflow.
+
+ RTC_CHECK_EQ(
+ fwrite(payload_data, sizeof(uint8_t), payload_len_bytes, out_file_),
+ payload_len_bytes);
+
+ return 0;
+ }
+
+ private:
+ FILE* const out_file_;
+ const uint32_t ssrc_;
+ const int timestamp_rate_hz_;
+ uint16_t sequence_number_ = 0;
+};
+
+void SetFrameLenIfFlagIsPositive(int* config_frame_len) {
+ if (absl::GetFlag(FLAGS_frame_len) > 0) {
+ *config_frame_len = absl::GetFlag(FLAGS_frame_len);
+ }
+}
+
+template <typename T>
+typename T::Config GetCodecConfig() {
+ typename T::Config config;
+ SetFrameLenIfFlagIsPositive(&config.frame_size_ms);
+ RTC_CHECK(config.IsOk());
+ return config;
+}
+
+AudioEncoderL16::Config Pcm16bConfig(CodecType codec_type) {
+ auto config = GetCodecConfig<AudioEncoderL16>();
+ switch (codec_type) {
+ case CodecType::kPcm16b8:
+ config.sample_rate_hz = 8000;
+ return config;
+ case CodecType::kPcm16b16:
+ config.sample_rate_hz = 16000;
+ return config;
+ case CodecType::kPcm16b32:
+ config.sample_rate_hz = 32000;
+ return config;
+ case CodecType::kPcm16b48:
+ config.sample_rate_hz = 48000;
+ return config;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ return config;
+ }
+}
+
+std::unique_ptr<AudioEncoder> CreateEncoder(CodecType codec_type,
+ int payload_type) {
+ switch (codec_type) {
+ case CodecType::kOpus: {
+ AudioEncoderOpus::Config config = GetCodecConfig<AudioEncoderOpus>();
+ if (absl::GetFlag(FLAGS_bitrate) > 0) {
+ config.bitrate_bps = absl::GetFlag(FLAGS_bitrate);
+ }
+ config.dtx_enabled = absl::GetFlag(FLAGS_dtx);
+ RTC_CHECK(config.IsOk());
+ return AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+ }
+
+ case CodecType::kPcmU:
+ case CodecType::kPcmA: {
+ AudioEncoderG711::Config config = GetCodecConfig<AudioEncoderG711>();
+ config.type = codec_type == CodecType::kPcmU
+ ? AudioEncoderG711::Config::Type::kPcmU
+ : AudioEncoderG711::Config::Type::kPcmA;
+ RTC_CHECK(config.IsOk());
+ return AudioEncoderG711::MakeAudioEncoder(config, payload_type);
+ }
+
+ case CodecType::kG722: {
+ return AudioEncoderG722::MakeAudioEncoder(
+ GetCodecConfig<AudioEncoderG722>(), payload_type);
+ }
+
+ case CodecType::kPcm16b8:
+ case CodecType::kPcm16b16:
+ case CodecType::kPcm16b32:
+ case CodecType::kPcm16b48: {
+ return AudioEncoderL16::MakeAudioEncoder(Pcm16bConfig(codec_type),
+ payload_type);
+ }
+
+ case CodecType::kIlbc: {
+ return AudioEncoderIlbc::MakeAudioEncoder(
+ GetCodecConfig<AudioEncoderIlbc>(), payload_type);
+ }
+
+ case CodecType::kIsac: {
+ return AudioEncoderIsac::MakeAudioEncoder(
+ GetCodecConfig<AudioEncoderIsac>(), payload_type);
+ }
+ }
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+}
+
+AudioEncoderCngConfig GetCngConfig(int sample_rate_hz) {
+ AudioEncoderCngConfig cng_config;
+ const auto default_payload_type = [&] {
+ switch (sample_rate_hz) {
+ case 8000:
+ return 13;
+ case 16000:
+ return 98;
+ case 32000:
+ return 99;
+ case 48000:
+ return 100;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return 0;
+ };
+ cng_config.payload_type = absl::GetFlag(FLAGS_cng_payload_type) != -1
+ ? absl::GetFlag(FLAGS_cng_payload_type)
+ : default_payload_type();
+ return cng_config;
+}
+
+int RunRtpEncode(int argc, char* argv[]) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ const std::string usage =
+ "Tool for generating an RTP dump file from audio input.\n"
+ "Example usage:\n"
+ "./rtp_encode input.pcm output.rtp --codec=[codec] "
+ "--frame_len=[frame_len] --bitrate=[bitrate]\n\n";
+ if (!absl::GetFlag(FLAGS_list_codecs) && args.size() != 3) {
+ printf("%s", usage.c_str());
+ return 1;
+ }
+
+ if (absl::GetFlag(FLAGS_list_codecs)) {
+ printf("The following arguments are valid --codec parameters:\n");
+ for (const auto& c : CodecList()) {
+ printf(" %s\n", c.first.c_str());
+ }
+ return 0;
+ }
+
+ const auto codec_it = CodecList().find(absl::GetFlag(FLAGS_codec));
+ if (codec_it == CodecList().end()) {
+ printf("%s is not a valid codec name.\n",
+ absl::GetFlag(FLAGS_codec).c_str());
+ printf("Use argument --list_codecs to see all valid codec names.\n");
+ return 1;
+ }
+
+ // Create the codec.
+ const int payload_type = absl::GetFlag(FLAGS_payload_type) == -1
+ ? codec_it->second.default_payload_type
+ : absl::GetFlag(FLAGS_payload_type);
+ std::unique_ptr<AudioEncoder> codec =
+ CreateEncoder(codec_it->second.type, payload_type);
+
+ // Create an external VAD/CNG encoder if needed.
+ if (absl::GetFlag(FLAGS_dtx) && !codec_it->second.internal_dtx) {
+ AudioEncoderCngConfig cng_config = GetCngConfig(codec->SampleRateHz());
+ RTC_DCHECK(codec);
+ cng_config.speech_encoder = std::move(codec);
+ codec = CreateComfortNoiseEncoder(std::move(cng_config));
+ }
+ RTC_DCHECK(codec);
+
+ // Set up ACM.
+ const int timestamp_rate_hz = codec->RtpTimestampRateHz();
+ AudioCodingModule::Config config;
+ std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(config));
+ acm->SetEncoder(std::move(codec));
+
+ // Open files.
+ printf("Input file: %s\n", args[1]);
+ InputAudioFile input_file(args[1], false); // Open input in non-looping mode.
+ FILE* out_file = fopen(args[2], "wb");
+ RTC_CHECK(out_file) << "Could not open file " << args[2] << " for writing";
+ printf("Output file: %s\n", args[2]);
+ fprintf(out_file, "#!rtpplay1.0 \n"); //,
+ // Write 3 32-bit values followed by 2 16-bit values, all set to 0. This means
+ // a total of 16 bytes.
+ const uint8_t file_header[16] = {0};
+ RTC_CHECK_EQ(fwrite(file_header, sizeof(file_header), 1, out_file), 1);
+
+ // Create and register the packetizer, which will write the packets to file.
+ Packetizer packetizer(out_file, absl::GetFlag(FLAGS_ssrc), timestamp_rate_hz);
+ RTC_DCHECK_EQ(acm->RegisterTransportCallback(&packetizer), 0);
+
+ AudioFrame audio_frame;
+ audio_frame.samples_per_channel_ =
+ absl::GetFlag(FLAGS_sample_rate) / 100; // 10 ms
+ audio_frame.sample_rate_hz_ = absl::GetFlag(FLAGS_sample_rate);
+ audio_frame.num_channels_ = 1;
+
+ while (input_file.Read(audio_frame.samples_per_channel_,
+ audio_frame.mutable_data())) {
+ RTC_CHECK_GE(acm->Add10MsData(audio_frame), 0);
+ audio_frame.timestamp_ += audio_frame.samples_per_channel_;
+ }
+
+ return 0;
+}
+
+} // namespace
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::RunRtpEncode(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
new file mode 100644
index 0000000000..a43c29638c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+#ifndef WIN32
+#include <netinet/in.h>
+#endif
+
+#include <memory>
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/checks.h"
+#include "test/rtp_file_reader.h"
+
+namespace webrtc {
+namespace test {
+
+RtpFileSource* RtpFileSource::Create(absl::string_view file_name,
+ absl::optional<uint32_t> ssrc_filter) {
+ RtpFileSource* source = new RtpFileSource(ssrc_filter);
+ RTC_CHECK(source->OpenFile(file_name));
+ return source;
+}
+
+bool RtpFileSource::ValidRtpDump(absl::string_view file_name) {
+ std::unique_ptr<RtpFileReader> temp_file(
+ RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
+ return !!temp_file;
+}
+
+bool RtpFileSource::ValidPcap(absl::string_view file_name) {
+ std::unique_ptr<RtpFileReader> temp_file(
+ RtpFileReader::Create(RtpFileReader::kPcap, file_name));
+ return !!temp_file;
+}
+
+RtpFileSource::~RtpFileSource() {}
+
+bool RtpFileSource::RegisterRtpHeaderExtension(RTPExtensionType type,
+ uint8_t id) {
+ return rtp_header_extension_map_.RegisterByType(id, type);
+}
+
+std::unique_ptr<Packet> RtpFileSource::NextPacket() {
+ while (true) {
+ RtpPacket temp_packet;
+ if (!rtp_reader_->NextPacket(&temp_packet)) {
+ return NULL;
+ }
+ if (temp_packet.original_length == 0) {
+ // May be an RTCP packet.
+ // Read the next one.
+ continue;
+ }
+ auto packet = std::make_unique<Packet>(
+ rtc::CopyOnWriteBuffer(temp_packet.data, temp_packet.length),
+ temp_packet.original_length, temp_packet.time_ms,
+ &rtp_header_extension_map_);
+ if (!packet->valid_header()) {
+ continue;
+ }
+ if (filter_.test(packet->header().payloadType) ||
+ (ssrc_filter_ && packet->header().ssrc != *ssrc_filter_)) {
+ // This payload type should be filtered out. Continue to the next packet.
+ continue;
+ }
+ return packet;
+ }
+}
+
+RtpFileSource::RtpFileSource(absl::optional<uint32_t> ssrc_filter)
+ : PacketSource(),
+ ssrc_filter_(ssrc_filter) {}
+
+bool RtpFileSource::OpenFile(absl::string_view file_name) {
+ rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
+ if (rtp_reader_)
+ return true;
+ rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kPcap, file_name));
+ if (!rtp_reader_) {
+ RTC_FATAL()
+ << "Couldn't open input file as either a rtpdump or .pcap. Note "
+ << "that .pcapng is not supported.";
+ }
+ return true;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
new file mode 100644
index 0000000000..55505be630
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+namespace test {
+
+class RtpFileReader;
+
+class RtpFileSource : public PacketSource {
+ public:
+ // Creates an RtpFileSource reading from `file_name`. If the file cannot be
+ // opened, or has the wrong format, NULL will be returned.
+ static RtpFileSource* Create(
+ absl::string_view file_name,
+ absl::optional<uint32_t> ssrc_filter = absl::nullopt);
+
+ // Checks whether a files is a valid RTP dump or PCAP (Wireshark) file.
+ static bool ValidRtpDump(absl::string_view file_name);
+ static bool ValidPcap(absl::string_view file_name);
+
+ ~RtpFileSource() override;
+
+ RtpFileSource(const RtpFileSource&) = delete;
+ RtpFileSource& operator=(const RtpFileSource&) = delete;
+
+ // Registers an RTP header extension and binds it to `id`.
+ virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
+
+ std::unique_ptr<Packet> NextPacket() override;
+
+ private:
+ static const int kFirstLineLength = 40;
+ static const int kRtpFileHeaderSize = 4 + 4 + 4 + 2 + 2;
+ static const size_t kPacketHeaderSize = 8;
+
+ explicit RtpFileSource(absl::optional<uint32_t> ssrc_filter);
+
+ bool OpenFile(absl::string_view file_name);
+
+ std::unique_ptr<RtpFileReader> rtp_reader_;
+ const absl::optional<uint32_t> ssrc_filter_;
+ RtpHeaderExtensionMap rtp_header_extension_map_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.cc
new file mode 100644
index 0000000000..e883fc11d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+
+
+namespace webrtc {
+namespace test {
+
+uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ RTPHeader* rtp_header) {
+ RTC_DCHECK(rtp_header);
+ if (!rtp_header) {
+ return 0;
+ }
+ rtp_header->sequenceNumber = seq_number_++;
+ rtp_header->timestamp = timestamp_;
+ timestamp_ += static_cast<uint32_t>(payload_length_samples);
+ rtp_header->payloadType = payload_type;
+ rtp_header->markerBit = false;
+ rtp_header->ssrc = ssrc_;
+ rtp_header->numCSRCs = 0;
+
+ uint32_t this_send_time = next_send_time_ms_;
+ RTC_DCHECK_GT(samples_per_ms_, 0);
+ next_send_time_ms_ +=
+ ((1.0 + drift_factor_) * payload_length_samples) / samples_per_ms_;
+ return this_send_time;
+}
+
+void RtpGenerator::set_drift_factor(double factor) {
+ if (factor > -1.0) {
+ drift_factor_ = factor;
+ }
+}
+
+uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ RTPHeader* rtp_header) {
+ uint32_t ret = RtpGenerator::GetRtpHeader(payload_type,
+ payload_length_samples, rtp_header);
+ if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
+ jump_from_timestamp_ &&
+ timestamp_ > jump_from_timestamp_) {
+ // We just moved across the `jump_from_timestamp_` timestamp. Do the jump.
+ timestamp_ = jump_to_timestamp_;
+ }
+ return ret;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.h b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.h
new file mode 100644
index 0000000000..2e615adec5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
+
+#include "api/rtp_headers.h"
+
+namespace webrtc {
+namespace test {
+
+// Class for generating RTP headers.
+class RtpGenerator {
+ public:
+ RtpGenerator(int samples_per_ms,
+ uint16_t start_seq_number = 0,
+ uint32_t start_timestamp = 0,
+ uint32_t start_send_time_ms = 0,
+ uint32_t ssrc = 0x12345678)
+ : seq_number_(start_seq_number),
+ timestamp_(start_timestamp),
+ next_send_time_ms_(start_send_time_ms),
+ ssrc_(ssrc),
+ samples_per_ms_(samples_per_ms),
+ drift_factor_(0.0) {}
+
+ virtual ~RtpGenerator() {}
+
+ RtpGenerator(const RtpGenerator&) = delete;
+ RtpGenerator& operator=(const RtpGenerator&) = delete;
+
+ // Writes the next RTP header to `rtp_header`, which will be of type
+ // `payload_type`. Returns the send time for this packet (in ms). The value of
+ // `payload_length_samples` determines the send time for the next packet.
+ virtual uint32_t GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ RTPHeader* rtp_header);
+
+ void set_drift_factor(double factor);
+
+ protected:
+ uint16_t seq_number_;
+ uint32_t timestamp_;
+ uint32_t next_send_time_ms_;
+ const uint32_t ssrc_;
+ const int samples_per_ms_;
+ double drift_factor_;
+};
+
+class TimestampJumpRtpGenerator : public RtpGenerator {
+ public:
+ TimestampJumpRtpGenerator(int samples_per_ms,
+ uint16_t start_seq_number,
+ uint32_t start_timestamp,
+ uint32_t jump_from_timestamp,
+ uint32_t jump_to_timestamp)
+ : RtpGenerator(samples_per_ms, start_seq_number, start_timestamp),
+ jump_from_timestamp_(jump_from_timestamp),
+ jump_to_timestamp_(jump_to_timestamp) {}
+
+ TimestampJumpRtpGenerator(const TimestampJumpRtpGenerator&) = delete;
+ TimestampJumpRtpGenerator& operator=(const TimestampJumpRtpGenerator&) =
+ delete;
+
+ uint32_t GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ RTPHeader* rtp_header) override;
+
+ private:
+ uint32_t jump_from_timestamp_;
+ uint32_t jump_to_timestamp_;
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_jitter.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_jitter.cc
new file mode 100644
index 0000000000..cccaa9a3bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtp_jitter.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr size_t kRtpDumpHeaderLength = 8;
+
+// Returns the next packet or an empty buffer if end of file was encountered.
+rtc::Buffer ReadNextPacket(FILE* file) {
+ // Read the rtpdump header for the next packet.
+ rtc::Buffer buffer;
+ buffer.SetData(kRtpDumpHeaderLength, [&](rtc::ArrayView<uint8_t> x) {
+ return fread(x.data(), 1, x.size(), file);
+ });
+ if (buffer.size() != kRtpDumpHeaderLength) {
+ return rtc::Buffer();
+ }
+
+ // Get length field. This is the total length for this packet written to file,
+ // including the kRtpDumpHeaderLength bytes already read.
+ const uint16_t len = ByteReader<uint16_t>::ReadBigEndian(buffer.data());
+ RTC_CHECK_GE(len, kRtpDumpHeaderLength);
+
+ // Read remaining data from file directly into buffer.
+ buffer.AppendData(len - kRtpDumpHeaderLength, [&](rtc::ArrayView<uint8_t> x) {
+ return fread(x.data(), 1, x.size(), file);
+ });
+ if (buffer.size() != len) {
+ buffer.Clear();
+ }
+ return buffer;
+}
+
+struct PacketAndTime {
+ rtc::Buffer packet;
+ int time;
+};
+
+void WritePacket(const PacketAndTime& packet, FILE* file) {
+ // Write the first 4 bytes from the original packet.
+ const auto* payload_ptr = packet.packet.data();
+ RTC_CHECK_EQ(fwrite(payload_ptr, 4, 1, file), 1);
+ payload_ptr += 4;
+
+ // Convert the new time offset to network endian, and write to file.
+ uint8_t time[sizeof(uint32_t)];
+ ByteWriter<uint32_t, sizeof(uint32_t)>::WriteBigEndian(time, packet.time);
+ RTC_CHECK_EQ(fwrite(time, sizeof(uint32_t), 1, file), 1);
+ payload_ptr += 4; // Skip the old time in the original payload.
+
+ // Write the remaining part of the payload.
+ RTC_DCHECK_EQ(payload_ptr - packet.packet.data(), kRtpDumpHeaderLength);
+ RTC_CHECK_EQ(
+ fwrite(payload_ptr, packet.packet.size() - kRtpDumpHeaderLength, 1, file),
+ 1);
+}
+
+int RunRtpJitter(int argc, char* argv[]) {
+ const std::string program_name = argv[0];
+ const std::string usage =
+ "Tool for alternating the arrival times in an RTP dump file.\n"
+ "Example usage:\n" +
+ program_name + " input.rtp arrival_times_ms.txt output.rtp\n\n";
+ if (argc != 4) {
+ printf("%s", usage.c_str());
+ return 1;
+ }
+
+ printf("Input RTP file: %s\n", argv[1]);
+ FILE* in_file = fopen(argv[1], "rb");
+ RTC_CHECK(in_file) << "Could not open file " << argv[1] << " for reading";
+ printf("Timing file: %s\n", argv[2]);
+ std::ifstream timing_file(argv[2]);
+ printf("Output file: %s\n", argv[3]);
+ FILE* out_file = fopen(argv[3], "wb");
+ RTC_CHECK(out_file) << "Could not open file " << argv[2] << " for writing";
+
+ // Copy the RTP file header to the output file.
+ char header_string[30];
+ RTC_CHECK(fgets(header_string, 30, in_file));
+ fprintf(out_file, "%s", header_string);
+ uint8_t file_header[16];
+ RTC_CHECK_EQ(fread(file_header, sizeof(file_header), 1, in_file), 1);
+ RTC_CHECK_EQ(fwrite(file_header, sizeof(file_header), 1, out_file), 1);
+
+ // Read all time values from the timing file. Store in a vector.
+ std::vector<int> new_arrival_times;
+ int new_time;
+ while (timing_file >> new_time) {
+ new_arrival_times.push_back(new_time);
+ }
+
+ // Read all packets from the input RTP file, but no more than the number of
+ // new time values. Store RTP packets together with new time values.
+ auto time_it = new_arrival_times.begin();
+ std::vector<PacketAndTime> packets;
+ while (1) {
+ auto packet = ReadNextPacket(in_file);
+ if (packet.empty() || time_it == new_arrival_times.end()) {
+ break;
+ }
+ packets.push_back({std::move(packet), *time_it});
+ ++time_it;
+ }
+
+ // Sort on new time values.
+ std::sort(packets.begin(), packets.end(),
+ [](const PacketAndTime& a, const PacketAndTime& b) {
+ return a.time < b.time;
+ });
+
+ // Write packets to output file.
+ for (const auto& p : packets) {
+ WritePacket(p, out_file);
+ }
+
+ fclose(in_file);
+ fclose(out_file);
+ return 0;
+}
+
+} // namespace
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::RunRtpJitter(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtpcat.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtpcat.cc
new file mode 100644
index 0000000000..431de553ae
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/rtpcat.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "test/rtp_file_reader.h"
+#include "test/rtp_file_writer.h"
+
+using webrtc::test::RtpFileReader;
+using webrtc::test::RtpFileWriter;
+
+int main(int argc, char* argv[]) {
+ if (argc < 3) {
+ printf("Concatenates multiple rtpdump files into one.\n");
+ printf("Usage: rtpcat in1.rtp int2.rtp [...] out.rtp\n");
+ exit(1);
+ }
+
+ std::unique_ptr<RtpFileWriter> output(
+ RtpFileWriter::Create(RtpFileWriter::kRtpDump, argv[argc - 1]));
+ RTC_CHECK(output.get() != NULL) << "Cannot open output file.";
+ printf("Output RTP file: %s\n", argv[argc - 1]);
+
+ for (int i = 1; i < argc - 1; i++) {
+ std::unique_ptr<RtpFileReader> input(
+ RtpFileReader::Create(RtpFileReader::kRtpDump, argv[i]));
+ RTC_CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
+ printf("Input RTP file: %s\n", argv[i]);
+
+ webrtc::test::RtpPacket packet;
+ while (input->NextPacket(&packet))
+ RTC_CHECK(output->WritePacket(&packet));
+ }
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.cc b/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.cc
new file mode 100644
index 0000000000..baed812327
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/underrun_optimizer.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDelayBuckets = 100;
+constexpr int kBucketSizeMs = 20;
+
+} // namespace
+
+UnderrunOptimizer::UnderrunOptimizer(const TickTimer* tick_timer,
+ int histogram_quantile,
+ int forget_factor,
+ absl::optional<int> start_forget_weight,
+ absl::optional<int> resample_interval_ms)
+ : tick_timer_(tick_timer),
+ histogram_(kDelayBuckets, forget_factor, start_forget_weight),
+ histogram_quantile_(histogram_quantile),
+ resample_interval_ms_(resample_interval_ms) {}
+
+void UnderrunOptimizer::Update(int relative_delay_ms) {
+ absl::optional<int> histogram_update;
+ if (resample_interval_ms_) {
+ if (!resample_stopwatch_) {
+ resample_stopwatch_ = tick_timer_->GetNewStopwatch();
+ }
+ if (static_cast<int>(resample_stopwatch_->ElapsedMs()) >
+ *resample_interval_ms_) {
+ histogram_update = max_delay_in_interval_ms_;
+ resample_stopwatch_ = tick_timer_->GetNewStopwatch();
+ max_delay_in_interval_ms_ = 0;
+ }
+ max_delay_in_interval_ms_ =
+ std::max(max_delay_in_interval_ms_, relative_delay_ms);
+ } else {
+ histogram_update = relative_delay_ms;
+ }
+ if (!histogram_update) {
+ return;
+ }
+
+ const int index = *histogram_update / kBucketSizeMs;
+ if (index < histogram_.NumBuckets()) {
+ // Maximum delay to register is 2000 ms.
+ histogram_.Add(index);
+ }
+ int bucket_index = histogram_.Quantile(histogram_quantile_);
+ optimal_delay_ms_ = (1 + bucket_index) * kBucketSizeMs;
+}
+
+void UnderrunOptimizer::Reset() {
+ histogram_.Reset();
+ resample_stopwatch_.reset();
+ max_delay_in_interval_ms_ = 0;
+ optimal_delay_ms_.reset();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.h b/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.h
new file mode 100644
index 0000000000..b37ce18795
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_UNDERRUN_OPTIMIZER_H_
+#define MODULES_AUDIO_CODING_NETEQ_UNDERRUN_OPTIMIZER_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/histogram.h"
+
+namespace webrtc {
+
+// Estimates probability of buffer underrun due to late packet arrival.
+// The optimal delay is decided such that the probability of underrun is lower
+// than 1 - `histogram_quantile`.
+class UnderrunOptimizer {
+ public:
+ UnderrunOptimizer(const TickTimer* tick_timer,
+ int histogram_quantile,
+ int forget_factor,
+ absl::optional<int> start_forget_weight,
+ absl::optional<int> resample_interval_ms);
+
+ void Update(int relative_delay_ms);
+
+ absl::optional<int> GetOptimalDelayMs() const { return optimal_delay_ms_; }
+
+ void Reset();
+
+ private:
+ const TickTimer* tick_timer_;
+ Histogram histogram_;
+ const int histogram_quantile_; // In Q30.
+ const absl::optional<int> resample_interval_ms_;
+ std::unique_ptr<TickTimer::Stopwatch> resample_stopwatch_;
+ int max_delay_in_interval_ms_ = 0;
+ absl::optional<int> optimal_delay_ms_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_CODING_NETEQ_UNDERRUN_OPTIMIZER_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer_unittest.cc
new file mode 100644
index 0000000000..a86e9cf107
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer_unittest.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/underrun_optimizer.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultHistogramQuantile = 1020054733; // 0.95 in Q30.
+constexpr int kForgetFactor = 32745; // 0.9993 in Q15.
+
+} // namespace
+
+TEST(UnderrunOptimizerTest, ResamplePacketDelays) {
+ TickTimer tick_timer;
+ constexpr int kResampleIntervalMs = 500;
+ UnderrunOptimizer underrun_optimizer(&tick_timer, kDefaultHistogramQuantile,
+ kForgetFactor, absl::nullopt,
+ kResampleIntervalMs);
+
+ // The histogram should be updated once with the maximum delay observed for
+ // the following sequence of updates.
+ for (int i = 0; i < 500; i += 20) {
+ underrun_optimizer.Update(i);
+ EXPECT_FALSE(underrun_optimizer.GetOptimalDelayMs());
+ }
+ tick_timer.Increment(kResampleIntervalMs / tick_timer.ms_per_tick() + 1);
+ underrun_optimizer.Update(0);
+ EXPECT_EQ(underrun_optimizer.GetOptimalDelayMs(), 500);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build
new file mode 100644
index 0000000000..032a190b0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build
@@ -0,0 +1,247 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/neteq/audio_vector.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/underrun_optimizer.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/neteq/accelerate.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/audio_multi_vector.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/buffer_level_filter.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/comfort_noise.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/cross_correlation.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/decoder_database.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/delay_manager.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/dsp_helper.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/expand.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/expand_uma_logger.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/histogram.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/merge.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/nack_tracker.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/normal.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/packet.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/reorder_optimizer.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/statistics_calculator.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/sync_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/time_stretch.cc",
+ "/third_party/libwebrtc/modules/audio_coding/neteq/timestamp_scaler.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("neteq_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/pcm16b_c_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/pcm16b_c_gn/moz.build
new file mode 100644
index 0000000000..4828d70c9e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/pcm16b_c_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("pcm16b_c_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/pcm16b_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/pcm16b_gn/moz.build
new file mode 100644
index 0000000000..be33178a5c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/pcm16b_gn/moz.build
@@ -0,0 +1,207 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("pcm16b_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/red_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/red_gn/moz.build
new file mode 100644
index 0000000000..8b44f5f5ed
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/red_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("red_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/test/Channel.cc b/third_party/libwebrtc/modules/audio_coding/test/Channel.cc
new file mode 100644
index 0000000000..35aa6cb6b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/Channel.cc
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/Channel.h"
+
+#include <iostream>
+
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+int32_t Channel::SendData(AudioFrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ int64_t absolute_capture_timestamp_ms) {
+ RTPHeader rtp_header;
+ int32_t status;
+ size_t payloadDataSize = payloadSize;
+
+ rtp_header.markerBit = false;
+ rtp_header.ssrc = 0;
+ rtp_header.sequenceNumber =
+ (external_sequence_number_ < 0)
+ ? _seqNo++
+ : static_cast<uint16_t>(external_sequence_number_);
+ rtp_header.payloadType = payloadType;
+ rtp_header.timestamp = (external_send_timestamp_ < 0)
+ ? timeStamp
+ : static_cast<uint32_t>(external_send_timestamp_);
+
+ if (frameType == AudioFrameType::kEmptyFrame) {
+ // When frame is empty, we should not transmit it. The frame size of the
+ // next non-empty frame will be based on the previous frame size.
+ _useLastFrameSize = _lastFrameSizeSample > 0;
+ return 0;
+ }
+
+ memcpy(_payloadData, payloadData, payloadDataSize);
+ if (_isStereo) {
+ if (_leftChannel) {
+ _rtp_header = rtp_header;
+ _leftChannel = false;
+ } else {
+ rtp_header = _rtp_header;
+ _leftChannel = true;
+ }
+ }
+
+ _channelCritSect.Lock();
+ if (_saveBitStream) {
+ // fwrite(payloadData, sizeof(uint8_t), payloadSize, _bitStreamFile);
+ }
+
+ if (!_isStereo) {
+ CalcStatistics(rtp_header, payloadSize);
+ }
+ _useLastFrameSize = false;
+ _lastInTimestamp = timeStamp;
+ _totalBytes += payloadDataSize;
+ _channelCritSect.Unlock();
+
+ if (_useFECTestWithPacketLoss) {
+ _packetLoss += 1;
+ if (_packetLoss == 3) {
+ _packetLoss = 0;
+ return 0;
+ }
+ }
+
+ if (num_packets_to_drop_ > 0) {
+ num_packets_to_drop_--;
+ return 0;
+ }
+
+ status =
+ _receiverACM->IncomingPacket(_payloadData, payloadDataSize, rtp_header);
+
+ return status;
+}
+
+// TODO(turajs): rewite this method.
+void Channel::CalcStatistics(const RTPHeader& rtp_header, size_t payloadSize) {
+ int n;
+ if ((rtp_header.payloadType != _lastPayloadType) &&
+ (_lastPayloadType != -1)) {
+ // payload-type is changed.
+ // we have to terminate the calculations on the previous payload type
+ // we ignore the last packet in that payload type just to make things
+ // easier.
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ if (_lastPayloadType == _payloadStats[n].payloadType) {
+ _payloadStats[n].newPacket = true;
+ break;
+ }
+ }
+ }
+ _lastPayloadType = rtp_header.payloadType;
+
+ bool newPayload = true;
+ ACMTestPayloadStats* currentPayloadStr = NULL;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ if (rtp_header.payloadType == _payloadStats[n].payloadType) {
+ newPayload = false;
+ currentPayloadStr = &_payloadStats[n];
+ break;
+ }
+ }
+
+ if (!newPayload) {
+ if (!currentPayloadStr->newPacket) {
+ if (!_useLastFrameSize) {
+ _lastFrameSizeSample =
+ (uint32_t)((uint32_t)rtp_header.timestamp -
+ (uint32_t)currentPayloadStr->lastTimestamp);
+ }
+ RTC_DCHECK_GT(_lastFrameSizeSample, 0);
+ int k = 0;
+ for (; k < MAX_NUM_FRAMESIZES; ++k) {
+ if ((currentPayloadStr->frameSizeStats[k].frameSizeSample ==
+ _lastFrameSizeSample) ||
+ (currentPayloadStr->frameSizeStats[k].frameSizeSample == 0)) {
+ break;
+ }
+ }
+ if (k == MAX_NUM_FRAMESIZES) {
+ // New frame size found but no space to count statistics on it. Skip it.
+ printf("No memory to store statistics for payload %d : frame size %d\n",
+ _lastPayloadType, _lastFrameSizeSample);
+ return;
+ }
+ ACMTestFrameSizeStats* currentFrameSizeStats =
+ &(currentPayloadStr->frameSizeStats[k]);
+ currentFrameSizeStats->frameSizeSample = (int16_t)_lastFrameSizeSample;
+
+ // increment the number of encoded samples.
+ currentFrameSizeStats->totalEncodedSamples += _lastFrameSizeSample;
+ // increment the number of recveived packets
+ currentFrameSizeStats->numPackets++;
+ // increment the total number of bytes (this is based on
+ // the previous payload we don't know the frame-size of
+ // the current payload.
+ currentFrameSizeStats->totalPayloadLenByte +=
+ currentPayloadStr->lastPayloadLenByte;
+ // store the maximum payload-size (this is based on
+ // the previous payload we don't know the frame-size of
+ // the current payload.
+ if (currentFrameSizeStats->maxPayloadLen <
+ currentPayloadStr->lastPayloadLenByte) {
+ currentFrameSizeStats->maxPayloadLen =
+ currentPayloadStr->lastPayloadLenByte;
+ }
+ // store the current values for the next time
+ currentPayloadStr->lastTimestamp = rtp_header.timestamp;
+ currentPayloadStr->lastPayloadLenByte = payloadSize;
+ } else {
+ currentPayloadStr->newPacket = false;
+ currentPayloadStr->lastPayloadLenByte = payloadSize;
+ currentPayloadStr->lastTimestamp = rtp_header.timestamp;
+ currentPayloadStr->payloadType = rtp_header.payloadType;
+ memset(currentPayloadStr->frameSizeStats, 0,
+ MAX_NUM_FRAMESIZES * sizeof(ACMTestFrameSizeStats));
+ }
+ } else {
+ n = 0;
+ while (_payloadStats[n].payloadType != -1) {
+ n++;
+ }
+ // first packet
+ _payloadStats[n].newPacket = false;
+ _payloadStats[n].lastPayloadLenByte = payloadSize;
+ _payloadStats[n].lastTimestamp = rtp_header.timestamp;
+ _payloadStats[n].payloadType = rtp_header.payloadType;
+ memset(_payloadStats[n].frameSizeStats, 0,
+ MAX_NUM_FRAMESIZES * sizeof(ACMTestFrameSizeStats));
+ }
+}
+
+Channel::Channel(int16_t chID)
+ : _receiverACM(NULL),
+ _seqNo(0),
+ _bitStreamFile(NULL),
+ _saveBitStream(false),
+ _lastPayloadType(-1),
+ _isStereo(false),
+ _leftChannel(true),
+ _lastInTimestamp(0),
+ _useLastFrameSize(false),
+ _lastFrameSizeSample(0),
+ _packetLoss(0),
+ _useFECTestWithPacketLoss(false),
+ _beginTime(rtc::TimeMillis()),
+ _totalBytes(0),
+ external_send_timestamp_(-1),
+ external_sequence_number_(-1),
+ num_packets_to_drop_(0) {
+ int n;
+ int k;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ _payloadStats[n].payloadType = -1;
+ _payloadStats[n].newPacket = true;
+ for (k = 0; k < MAX_NUM_FRAMESIZES; k++) {
+ _payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
+ _payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
+ _payloadStats[n].frameSizeStats[k].numPackets = 0;
+ _payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
+ _payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
+ }
+ }
+ if (chID >= 0) {
+ _saveBitStream = true;
+ rtc::StringBuilder ss;
+ ss.AppendFormat("bitStream_%d.dat", chID);
+ _bitStreamFile = fopen(ss.str().c_str(), "wb");
+ } else {
+ _saveBitStream = false;
+ }
+}
+
+Channel::~Channel() {}
+
+void Channel::RegisterReceiverACM(AudioCodingModule* acm) {
+ _receiverACM = acm;
+ return;
+}
+
+void Channel::ResetStats() {
+ int n;
+ int k;
+ _channelCritSect.Lock();
+ _lastPayloadType = -1;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ _payloadStats[n].payloadType = -1;
+ _payloadStats[n].newPacket = true;
+ for (k = 0; k < MAX_NUM_FRAMESIZES; k++) {
+ _payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
+ _payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
+ _payloadStats[n].frameSizeStats[k].numPackets = 0;
+ _payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
+ _payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
+ }
+ }
+ _beginTime = rtc::TimeMillis();
+ _totalBytes = 0;
+ _channelCritSect.Unlock();
+}
+
+uint32_t Channel::LastInTimestamp() {
+ uint32_t timestamp;
+ _channelCritSect.Lock();
+ timestamp = _lastInTimestamp;
+ _channelCritSect.Unlock();
+ return timestamp;
+}
+
+double Channel::BitRate() {
+ double rate;
+ uint64_t currTime = rtc::TimeMillis();
+ _channelCritSect.Lock();
+ rate = ((double)_totalBytes * 8.0) / (double)(currTime - _beginTime);
+ _channelCritSect.Unlock();
+ return rate;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/Channel.h b/third_party/libwebrtc/modules/audio_coding/test/Channel.h
new file mode 100644
index 0000000000..7a8829e1d2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/Channel.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_CHANNEL_H_
+#define MODULES_AUDIO_CODING_TEST_CHANNEL_H_
+
+#include <stdio.h>
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+#define MAX_NUM_PAYLOADS 50
+#define MAX_NUM_FRAMESIZES 6
+
+// TODO(turajs): Write constructor for this structure.
+struct ACMTestFrameSizeStats {
+ uint16_t frameSizeSample;
+ size_t maxPayloadLen;
+ uint32_t numPackets;
+ uint64_t totalPayloadLenByte;
+ uint64_t totalEncodedSamples;
+ double rateBitPerSec;
+ double usageLenSec;
+};
+
+// TODO(turajs): Write constructor for this structure.
+struct ACMTestPayloadStats {
+ bool newPacket;
+ int16_t payloadType;
+ size_t lastPayloadLenByte;
+ uint32_t lastTimestamp;
+ ACMTestFrameSizeStats frameSizeStats[MAX_NUM_FRAMESIZES];
+};
+
+class Channel : public AudioPacketizationCallback {
+ public:
+ Channel(int16_t chID = -1);
+ ~Channel() override;
+
+ int32_t SendData(AudioFrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ int64_t absolute_capture_timestamp_ms) override;
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ void ResetStats();
+
+ void SetIsStereo(bool isStereo) { _isStereo = isStereo; }
+
+ uint32_t LastInTimestamp();
+
+ void SetFECTestWithPacketLoss(bool usePacketLoss) {
+ _useFECTestWithPacketLoss = usePacketLoss;
+ }
+
+ double BitRate();
+
+ void set_send_timestamp(uint32_t new_send_ts) {
+ external_send_timestamp_ = new_send_ts;
+ }
+
+ void set_sequence_number(uint16_t new_sequence_number) {
+ external_sequence_number_ = new_sequence_number;
+ }
+
+ void set_num_packets_to_drop(int new_num_packets_to_drop) {
+ num_packets_to_drop_ = new_num_packets_to_drop;
+ }
+
+ private:
+ void CalcStatistics(const RTPHeader& rtp_header, size_t payloadSize);
+
+ AudioCodingModule* _receiverACM;
+ uint16_t _seqNo;
+ // 60msec * 32 sample(max)/msec * 2 description (maybe) * 2 bytes/sample
+ uint8_t _payloadData[60 * 32 * 2 * 2];
+
+ Mutex _channelCritSect;
+ FILE* _bitStreamFile;
+ bool _saveBitStream;
+ int16_t _lastPayloadType;
+ ACMTestPayloadStats _payloadStats[MAX_NUM_PAYLOADS];
+ bool _isStereo;
+ RTPHeader _rtp_header;
+ bool _leftChannel;
+ uint32_t _lastInTimestamp;
+ bool _useLastFrameSize;
+ uint32_t _lastFrameSizeSample;
+ // FEC Test variables
+ int16_t _packetLoss;
+ bool _useFECTestWithPacketLoss;
+ uint64_t _beginTime;
+ uint64_t _totalBytes;
+
+ // External timing info, defaulted to -1. Only used if they are
+ // non-negative.
+ int64_t external_send_timestamp_;
+ int32_t external_sequence_number_;
+ int num_packets_to_drop_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_CHANNEL_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.cc b/third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.cc
new file mode 100644
index 0000000000..8d4bcce8df
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.cc
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/EncodeDecodeTest.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+// Buffer size for stereo 48 kHz audio.
+constexpr size_t kWebRtc10MsPcmAudio = 960;
+
+} // namespace
+
+TestPacketization::TestPacketization(RTPStream* rtpStream, uint16_t frequency)
+ : _rtpStream(rtpStream), _frequency(frequency), _seqNo(0) {}
+
+TestPacketization::~TestPacketization() {}
+
+int32_t TestPacketization::SendData(const AudioFrameType /* frameType */,
+ const uint8_t payloadType,
+ const uint32_t timeStamp,
+ const uint8_t* payloadData,
+ const size_t payloadSize,
+ int64_t absolute_capture_timestamp_ms) {
+ _rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
+ _frequency);
+ return 1;
+}
+
+Sender::Sender()
+ : _acm(NULL), _pcmFile(), _audioFrame(), _packetization(NULL) {}
+
+void Sender::Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view in_file_name,
+ int in_sample_rate,
+ int payload_type,
+ SdpAudioFormat format) {
+ // Open input file
+ const std::string file_name = webrtc::test::ResourcePath(in_file_name, "pcm");
+ _pcmFile.Open(file_name, in_sample_rate, "rb");
+ if (format.num_channels == 2) {
+ _pcmFile.ReadStereo(true);
+ }
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _pcmFile.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ _pcmFile.FastForward(100);
+
+ acm->SetEncoder(CreateBuiltinAudioEncoderFactory()->MakeAudioEncoder(
+ payload_type, format, absl::nullopt));
+ _packetization = new TestPacketization(rtpStream, format.clockrate_hz);
+ EXPECT_EQ(0, acm->RegisterTransportCallback(_packetization));
+
+ _acm = acm;
+}
+
+void Sender::Teardown() {
+ _pcmFile.Close();
+ delete _packetization;
+}
+
+bool Sender::Add10MsData() {
+ if (!_pcmFile.EndOfFile()) {
+ EXPECT_GT(_pcmFile.Read10MsData(_audioFrame), 0);
+ int32_t ok = _acm->Add10MsData(_audioFrame);
+ EXPECT_GE(ok, 0);
+ return ok >= 0 ? true : false;
+ }
+ return false;
+}
+
+void Sender::Run() {
+ while (true) {
+ if (!Add10MsData()) {
+ break;
+ }
+ }
+}
+
+Receiver::Receiver()
+ : _playoutLengthSmpls(kWebRtc10MsPcmAudio),
+ _payloadSizeBytes(MAX_INCOMING_PAYLOAD) {}
+
+void Receiver::Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view out_file_name,
+ size_t channels,
+ int file_num) {
+ EXPECT_EQ(0, acm->InitializeReceiver());
+
+ if (channels == 1) {
+ acm->SetReceiveCodecs({{103, {"ISAC", 16000, 1}},
+ {104, {"ISAC", 32000, 1}},
+ {107, {"L16", 8000, 1}},
+ {108, {"L16", 16000, 1}},
+ {109, {"L16", 32000, 1}},
+ {0, {"PCMU", 8000, 1}},
+ {8, {"PCMA", 8000, 1}},
+ {102, {"ILBC", 8000, 1}},
+ {9, {"G722", 8000, 1}},
+ {120, {"OPUS", 48000, 2}},
+ {13, {"CN", 8000, 1}},
+ {98, {"CN", 16000, 1}},
+ {99, {"CN", 32000, 1}}});
+ } else {
+ ASSERT_EQ(channels, 2u);
+ acm->SetReceiveCodecs({{111, {"L16", 8000, 2}},
+ {112, {"L16", 16000, 2}},
+ {113, {"L16", 32000, 2}},
+ {110, {"PCMU", 8000, 2}},
+ {118, {"PCMA", 8000, 2}},
+ {119, {"G722", 8000, 2}},
+ {120, {"OPUS", 48000, 2, {{"stereo", "1"}}}}});
+ }
+
+ int playSampFreq;
+ std::string file_name;
+ rtc::StringBuilder file_stream;
+ file_stream << webrtc::test::OutputPath() << out_file_name << file_num
+ << ".pcm";
+ file_name = file_stream.str();
+ _rtpStream = rtpStream;
+
+ playSampFreq = 32000;
+ _pcmFile.Open(file_name, 32000, "wb+");
+
+ _realPayloadSizeBytes = 0;
+ _playoutBuffer = new int16_t[kWebRtc10MsPcmAudio];
+ _frequency = playSampFreq;
+ _acm = acm;
+ _firstTime = true;
+}
+
+void Receiver::Teardown() {
+ delete[] _playoutBuffer;
+ _pcmFile.Close();
+}
+
+bool Receiver::IncomingPacket() {
+ if (!_rtpStream->EndOfFile()) {
+ if (_firstTime) {
+ _firstTime = false;
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpHeader, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0) {
+ if (_rtpStream->EndOfFile()) {
+ _firstTime = true;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ EXPECT_EQ(0, _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes,
+ _rtpHeader));
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpHeader, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {
+ _firstTime = true;
+ }
+ }
+ return true;
+}
+
+bool Receiver::PlayoutData() {
+ AudioFrame audioFrame;
+ bool muted;
+ int32_t ok = _acm->PlayoutData10Ms(_frequency, &audioFrame, &muted);
+ if (muted) {
+ ADD_FAILURE();
+ return false;
+ }
+ EXPECT_EQ(0, ok);
+ if (ok < 0) {
+ return false;
+ }
+ if (_playoutLengthSmpls == 0) {
+ return false;
+ }
+ _pcmFile.Write10MsData(audioFrame.data(), audioFrame.samples_per_channel_ *
+ audioFrame.num_channels_);
+ return true;
+}
+
+void Receiver::Run() {
+ uint8_t counter500Ms = 50;
+ uint32_t clock = 0;
+
+ while (counter500Ms > 0) {
+ if (clock == 0 || clock >= _nextTime) {
+ EXPECT_TRUE(IncomingPacket());
+ if (clock == 0) {
+ clock = _nextTime;
+ }
+ }
+ if ((clock % 10) == 0) {
+ if (!PlayoutData()) {
+ clock++;
+ continue;
+ }
+ }
+ if (_rtpStream->EndOfFile()) {
+ counter500Ms--;
+ }
+ clock++;
+ }
+}
+
+EncodeDecodeTest::EncodeDecodeTest() = default;
+
+void EncodeDecodeTest::Perform() {
+ const std::map<int, SdpAudioFormat> send_codecs = {
+ {103, {"ISAC", 16000, 1}}, {104, {"ISAC", 32000, 1}},
+ {107, {"L16", 8000, 1}}, {108, {"L16", 16000, 1}},
+ {109, {"L16", 32000, 1}}, {0, {"PCMU", 8000, 1}},
+ {8, {"PCMA", 8000, 1}},
+#ifdef WEBRTC_CODEC_ILBC
+ {102, {"ILBC", 8000, 1}},
+#endif
+ {9, {"G722", 8000, 1}}};
+ int file_num = 0;
+ for (const auto& send_codec : send_codecs) {
+ RTPFile rtpFile;
+ std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory())));
+
+ std::string fileName = webrtc::test::TempFilename(
+ webrtc::test::OutputPath(), "encode_decode_rtp");
+ rtpFile.Open(fileName.c_str(), "wb+");
+ rtpFile.WriteHeader();
+ Sender sender;
+ sender.Setup(acm.get(), &rtpFile, "audio_coding/testfile32kHz", 32000,
+ send_codec.first, send_codec.second);
+ sender.Run();
+ sender.Teardown();
+ rtpFile.Close();
+
+ rtpFile.Open(fileName.c_str(), "rb");
+ rtpFile.ReadHeader();
+ Receiver receiver;
+ receiver.Setup(acm.get(), &rtpFile, "encodeDecode_out", 1, file_num);
+ receiver.Run();
+ receiver.Teardown();
+ rtpFile.Close();
+
+ file_num++;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.h b/third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.h
new file mode 100644
index 0000000000..89b76440ef
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/EncodeDecodeTest.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
+#define MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
+
+#include <stdio.h>
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/test/PCMFile.h"
+#include "modules/audio_coding/test/RTPFile.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+#define MAX_INCOMING_PAYLOAD 8096
+
+// TestPacketization callback which writes the encoded payloads to file
+class TestPacketization : public AudioPacketizationCallback {
+ public:
+ TestPacketization(RTPStream* rtpStream, uint16_t frequency);
+ ~TestPacketization();
+ int32_t SendData(AudioFrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ int64_t absolute_capture_timestamp_ms) override;
+
+ private:
+ static void MakeRTPheader(uint8_t* rtpHeader,
+ uint8_t payloadType,
+ int16_t seqNo,
+ uint32_t timeStamp,
+ uint32_t ssrc);
+ RTPStream* _rtpStream;
+ int32_t _frequency;
+ int16_t _seqNo;
+};
+
+class Sender {
+ public:
+ Sender();
+ void Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view in_file_name,
+ int in_sample_rate,
+ int payload_type,
+ SdpAudioFormat format);
+ void Teardown();
+ void Run();
+ bool Add10MsData();
+
+ protected:
+ AudioCodingModule* _acm;
+
+ private:
+ PCMFile _pcmFile;
+ AudioFrame _audioFrame;
+ TestPacketization* _packetization;
+};
+
+class Receiver {
+ public:
+ Receiver();
+ virtual ~Receiver() {}
+ void Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view out_file_name,
+ size_t channels,
+ int file_num);
+ void Teardown();
+ void Run();
+ virtual bool IncomingPacket();
+ bool PlayoutData();
+
+ private:
+ PCMFile _pcmFile;
+ int16_t* _playoutBuffer;
+ uint16_t _playoutLengthSmpls;
+ int32_t _frequency;
+ bool _firstTime;
+
+ protected:
+ AudioCodingModule* _acm;
+ uint8_t _incomingPayload[MAX_INCOMING_PAYLOAD];
+ RTPStream* _rtpStream;
+ RTPHeader _rtpHeader;
+ size_t _realPayloadSizeBytes;
+ size_t _payloadSizeBytes;
+ uint32_t _nextTime;
+};
+
+class EncodeDecodeTest {
+ public:
+ EncodeDecodeTest();
+ void Perform();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/PCMFile.cc b/third_party/libwebrtc/modules/audio_coding/test/PCMFile.cc
new file mode 100644
index 0000000000..e069a42de1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/PCMFile.cc
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/PCMFile.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+PCMFile::PCMFile()
+ : pcm_file_(NULL),
+ samples_10ms_(160),
+ frequency_(16000),
+ end_of_file_(false),
+ auto_rewind_(false),
+ rewinded_(false),
+ read_stereo_(false),
+ save_stereo_(false) {
+ timestamp_ =
+ (((uint32_t)rand() & 0x0000FFFF) << 16) | ((uint32_t)rand() & 0x0000FFFF);
+}
+
+PCMFile::PCMFile(uint32_t timestamp)
+ : pcm_file_(NULL),
+ samples_10ms_(160),
+ frequency_(16000),
+ end_of_file_(false),
+ auto_rewind_(false),
+ rewinded_(false),
+ read_stereo_(false),
+ save_stereo_(false) {
+ timestamp_ = timestamp;
+}
+
+PCMFile::~PCMFile() {
+ if (pcm_file_) {
+ fclose(pcm_file_);
+ }
+}
+
+int16_t PCMFile::ChooseFile(std::string* file_name,
+ int16_t max_len,
+ uint16_t* frequency_hz) {
+ char tmp_name[MAX_FILE_NAME_LENGTH_BYTE];
+
+ EXPECT_TRUE(fgets(tmp_name, MAX_FILE_NAME_LENGTH_BYTE, stdin) != NULL);
+ tmp_name[MAX_FILE_NAME_LENGTH_BYTE - 1] = '\0';
+ int16_t n = 0;
+
+ // Removing trailing spaces.
+ while ((isspace(static_cast<unsigned char>(tmp_name[n])) ||
+ iscntrl(static_cast<unsigned char>(tmp_name[n]))) &&
+ (static_cast<unsigned char>(tmp_name[n]) != 0) &&
+ (n < MAX_FILE_NAME_LENGTH_BYTE)) {
+ n++;
+ }
+ if (n > 0) {
+ memmove(tmp_name, &tmp_name[n], MAX_FILE_NAME_LENGTH_BYTE - n);
+ }
+
+ // Removing trailing spaces.
+ n = (int16_t)(strlen(tmp_name) - 1);
+ if (n >= 0) {
+ while ((isspace(static_cast<unsigned char>(tmp_name[n])) ||
+ iscntrl(static_cast<unsigned char>(tmp_name[n]))) &&
+ (n >= 0)) {
+ n--;
+ }
+ }
+ if (n >= 0) {
+ tmp_name[n + 1] = '\0';
+ }
+
+ int16_t len = (int16_t)strlen(tmp_name);
+ if (len > max_len) {
+ return -1;
+ }
+ if (len > 0) {
+ std::string tmp_string(tmp_name, len + 1);
+ *file_name = tmp_string;
+ }
+ printf("Enter the sampling frequency (in Hz) of the above file [%u]: ",
+ *frequency_hz);
+ EXPECT_TRUE(fgets(tmp_name, 10, stdin) != NULL);
+ uint16_t tmp_frequency = (uint16_t)atoi(tmp_name);
+ if (tmp_frequency > 0) {
+ *frequency_hz = tmp_frequency;
+ }
+ return 0;
+}
+
+void PCMFile::Open(absl::string_view file_name,
+ uint16_t frequency,
+ absl::string_view mode,
+ bool auto_rewind) {
+ if ((pcm_file_ = fopen(std::string(file_name).c_str(),
+ std::string(mode).c_str())) == NULL) {
+ printf("Cannot open file %s.\n", std::string(file_name).c_str());
+ ADD_FAILURE() << "Unable to read file";
+ }
+ frequency_ = frequency;
+ samples_10ms_ = (uint16_t)(frequency_ / 100);
+ auto_rewind_ = auto_rewind;
+ end_of_file_ = false;
+ rewinded_ = false;
+}
+
+int32_t PCMFile::SamplingFrequency() const {
+ return frequency_;
+}
+
+uint16_t PCMFile::PayloadLength10Ms() const {
+ return samples_10ms_;
+}
+
+int32_t PCMFile::Read10MsData(AudioFrame& audio_frame) {
+ uint16_t channels = 1;
+ if (read_stereo_) {
+ channels = 2;
+ }
+
+ int32_t payload_size =
+ (int32_t)fread(audio_frame.mutable_data(), sizeof(uint16_t),
+ samples_10ms_ * channels, pcm_file_);
+ if (payload_size < samples_10ms_ * channels) {
+ int16_t* frame_data = audio_frame.mutable_data();
+ for (int k = payload_size; k < samples_10ms_ * channels; k++) {
+ frame_data[k] = 0;
+ }
+ if (auto_rewind_) {
+ rewind(pcm_file_);
+ rewinded_ = true;
+ } else {
+ end_of_file_ = true;
+ }
+ }
+ audio_frame.samples_per_channel_ = samples_10ms_;
+ audio_frame.sample_rate_hz_ = frequency_;
+ audio_frame.num_channels_ = channels;
+ audio_frame.timestamp_ = timestamp_;
+ timestamp_ += samples_10ms_;
+ ++blocks_read_;
+ if (num_10ms_blocks_to_read_ && blocks_read_ >= *num_10ms_blocks_to_read_)
+ end_of_file_ = true;
+ return samples_10ms_;
+}
+
+void PCMFile::Write10MsData(const AudioFrame& audio_frame) {
+ if (audio_frame.num_channels_ == 1) {
+ if (!save_stereo_) {
+ if (fwrite(audio_frame.data(), sizeof(uint16_t),
+ audio_frame.samples_per_channel_, pcm_file_) !=
+ static_cast<size_t>(audio_frame.samples_per_channel_)) {
+ return;
+ }
+ } else {
+ const int16_t* frame_data = audio_frame.data();
+ int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
+ for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
+ stereo_audio[k << 1] = frame_data[k];
+ stereo_audio[(k << 1) + 1] = frame_data[k];
+ }
+ if (fwrite(stereo_audio, sizeof(int16_t),
+ 2 * audio_frame.samples_per_channel_, pcm_file_) !=
+ static_cast<size_t>(2 * audio_frame.samples_per_channel_)) {
+ return;
+ }
+ delete[] stereo_audio;
+ }
+ } else {
+ if (fwrite(audio_frame.data(), sizeof(int16_t),
+ audio_frame.num_channels_ * audio_frame.samples_per_channel_,
+ pcm_file_) !=
+ static_cast<size_t>(audio_frame.num_channels_ *
+ audio_frame.samples_per_channel_)) {
+ return;
+ }
+ }
+}
+
+void PCMFile::Write10MsData(const int16_t* playout_buffer,
+ size_t length_smpls) {
+ if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
+ length_smpls) {
+ return;
+ }
+}
+
+void PCMFile::Close() {
+ fclose(pcm_file_);
+ pcm_file_ = NULL;
+ blocks_read_ = 0;
+}
+
+void PCMFile::FastForward(int num_10ms_blocks) {
+ const int channels = read_stereo_ ? 2 : 1;
+ long num_bytes_to_move =
+ num_10ms_blocks * sizeof(int16_t) * samples_10ms_ * channels;
+ int error = fseek(pcm_file_, num_bytes_to_move, SEEK_CUR);
+ RTC_DCHECK_EQ(error, 0);
+}
+
+void PCMFile::Rewind() {
+ rewind(pcm_file_);
+ end_of_file_ = false;
+ blocks_read_ = 0;
+}
+
+bool PCMFile::Rewinded() {
+ return rewinded_;
+}
+
+void PCMFile::SaveStereo(bool is_stereo) {
+ save_stereo_ = is_stereo;
+}
+
+void PCMFile::ReadStereo(bool is_stereo) {
+ read_stereo_ = is_stereo;
+}
+
+void PCMFile::SetNum10MsBlocksToRead(int value) {
+ num_10ms_blocks_to_read_ = value;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/PCMFile.h b/third_party/libwebrtc/modules/audio_coding/test/PCMFile.h
new file mode 100644
index 0000000000..5320aa63d0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/PCMFile.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_PCMFILE_H_
+#define MODULES_AUDIO_CODING_TEST_PCMFILE_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/audio/audio_frame.h"
+
+namespace webrtc {
+
+class PCMFile {
+ public:
+ PCMFile();
+ PCMFile(uint32_t timestamp);
+ ~PCMFile();
+
+ void Open(absl::string_view filename,
+ uint16_t frequency,
+ absl::string_view mode,
+ bool auto_rewind = false);
+
+ int32_t Read10MsData(AudioFrame& audio_frame);
+
+ void Write10MsData(const int16_t* playout_buffer, size_t length_smpls);
+ void Write10MsData(const AudioFrame& audio_frame);
+
+ uint16_t PayloadLength10Ms() const;
+ int32_t SamplingFrequency() const;
+ void Close();
+ bool EndOfFile() const { return end_of_file_; }
+ // Moves forward the specified number of 10 ms blocks. If a limit has been set
+ // with SetNum10MsBlocksToRead, fast-forwarding does not count towards this
+ // limit.
+ void FastForward(int num_10ms_blocks);
+ void Rewind();
+ static int16_t ChooseFile(std::string* file_name,
+ int16_t max_len,
+ uint16_t* frequency_hz);
+ bool Rewinded();
+ void SaveStereo(bool is_stereo = true);
+ void ReadStereo(bool is_stereo = true);
+ // If set, the reading will stop after the specified number of blocks have
+ // been read. When that has happened, EndOfFile() will return true. Calling
+ // Rewind() will reset the counter and start over.
+ void SetNum10MsBlocksToRead(int value);
+
+ private:
+ FILE* pcm_file_;
+ uint16_t samples_10ms_;
+ int32_t frequency_;
+ bool end_of_file_;
+ bool auto_rewind_;
+ bool rewinded_;
+ uint32_t timestamp_;
+ bool read_stereo_;
+ bool save_stereo_;
+ absl::optional<int> num_10ms_blocks_to_read_;
+ int blocks_read_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_PCMFILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.cc b/third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.cc
new file mode 100644
index 0000000000..799e9c5b1f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/PacketLossTest.h"
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+ReceiverWithPacketLoss::ReceiverWithPacketLoss()
+ : loss_rate_(0),
+ burst_length_(1),
+ packet_counter_(0),
+ lost_packet_counter_(0),
+ burst_lost_counter_(burst_length_) {}
+
+void ReceiverWithPacketLoss::Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view out_file_name,
+ int channels,
+ int file_num,
+ int loss_rate,
+ int burst_length) {
+ loss_rate_ = loss_rate;
+ burst_length_ = burst_length;
+ burst_lost_counter_ = burst_length_; // To prevent first packet gets lost.
+ rtc::StringBuilder ss;
+ ss << out_file_name << "_" << loss_rate_ << "_" << burst_length_ << "_";
+ Receiver::Setup(acm, rtpStream, ss.str(), channels, file_num);
+}
+
+bool ReceiverWithPacketLoss::IncomingPacket() {
+ if (!_rtpStream->EndOfFile()) {
+ if (packet_counter_ == 0) {
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpHeader, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0) {
+ if (_rtpStream->EndOfFile()) {
+ packet_counter_ = 0;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ if (!PacketLost()) {
+ _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes, _rtpHeader);
+ }
+ packet_counter_++;
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpHeader, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {
+ packet_counter_ = 0;
+ lost_packet_counter_ = 0;
+ }
+ }
+ return true;
+}
+
+bool ReceiverWithPacketLoss::PacketLost() {
+ if (burst_lost_counter_ < burst_length_) {
+ lost_packet_counter_++;
+ burst_lost_counter_++;
+ return true;
+ }
+
+ if (lost_packet_counter_ * 100 < loss_rate_ * packet_counter_) {
+ lost_packet_counter_++;
+ burst_lost_counter_ = 1;
+ return true;
+ }
+ return false;
+}
+
+SenderWithFEC::SenderWithFEC() : expected_loss_rate_(0) {}
+
+void SenderWithFEC::Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view in_file_name,
+ int payload_type,
+ SdpAudioFormat format,
+ int expected_loss_rate) {
+ Sender::Setup(acm, rtpStream, in_file_name, format.clockrate_hz, payload_type,
+ format);
+ EXPECT_TRUE(SetFEC(true));
+ EXPECT_TRUE(SetPacketLossRate(expected_loss_rate));
+}
+
+bool SenderWithFEC::SetFEC(bool enable_fec) {
+ bool success = false;
+ _acm->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* enc) {
+ if (*enc && (*enc)->SetFec(enable_fec)) {
+ success = true;
+ }
+ });
+ return success;
+}
+
+bool SenderWithFEC::SetPacketLossRate(int expected_loss_rate) {
+ if (_acm->SetPacketLossRate(expected_loss_rate) == 0) {
+ expected_loss_rate_ = expected_loss_rate;
+ return true;
+ }
+ return false;
+}
+
+PacketLossTest::PacketLossTest(int channels,
+ int expected_loss_rate,
+ int actual_loss_rate,
+ int burst_length)
+ : channels_(channels),
+ in_file_name_(channels_ == 1 ? "audio_coding/testfile32kHz"
+ : "audio_coding/teststereo32kHz"),
+ sample_rate_hz_(32000),
+ expected_loss_rate_(expected_loss_rate),
+ actual_loss_rate_(actual_loss_rate),
+ burst_length_(burst_length) {}
+
+void PacketLossTest::Perform() {
+#ifndef WEBRTC_CODEC_OPUS
+ return;
+#else
+ RTPFile rtpFile;
+ std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory())));
+ SdpAudioFormat send_format = SdpAudioFormat("opus", 48000, 2);
+ if (channels_ == 2) {
+ send_format.parameters = {{"stereo", "1"}};
+ }
+
+ std::string fileName = webrtc::test::TempFilename(webrtc::test::OutputPath(),
+ "packet_loss_test");
+ rtpFile.Open(fileName.c_str(), "wb+");
+ rtpFile.WriteHeader();
+ SenderWithFEC sender;
+ sender.Setup(acm.get(), &rtpFile, in_file_name_, 120, send_format,
+ expected_loss_rate_);
+ sender.Run();
+ sender.Teardown();
+ rtpFile.Close();
+
+ rtpFile.Open(fileName.c_str(), "rb");
+ rtpFile.ReadHeader();
+ ReceiverWithPacketLoss receiver;
+ receiver.Setup(acm.get(), &rtpFile, "packetLoss_out", channels_, 15,
+ actual_loss_rate_, burst_length_);
+ receiver.Run();
+ receiver.Teardown();
+ rtpFile.Close();
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.h b/third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.h
new file mode 100644
index 0000000000..d841d65a1b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/PacketLossTest.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
+#define MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_coding/test/EncodeDecodeTest.h"
+
+namespace webrtc {
+
+class ReceiverWithPacketLoss : public Receiver {
+ public:
+ ReceiverWithPacketLoss();
+ void Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view out_file_name,
+ int channels,
+ int file_num,
+ int loss_rate,
+ int burst_length);
+ bool IncomingPacket() override;
+
+ protected:
+ bool PacketLost();
+ int loss_rate_;
+ int burst_length_;
+ int packet_counter_;
+ int lost_packet_counter_;
+ int burst_lost_counter_;
+};
+
+class SenderWithFEC : public Sender {
+ public:
+ SenderWithFEC();
+ void Setup(AudioCodingModule* acm,
+ RTPStream* rtpStream,
+ absl::string_view in_file_name,
+ int payload_type,
+ SdpAudioFormat format,
+ int expected_loss_rate);
+ bool SetPacketLossRate(int expected_loss_rate);
+ bool SetFEC(bool enable_fec);
+
+ protected:
+ int expected_loss_rate_;
+};
+
+class PacketLossTest {
+ public:
+ PacketLossTest(int channels,
+ int expected_loss_rate_,
+ int actual_loss_rate,
+ int burst_length);
+ void Perform();
+
+ protected:
+ int channels_;
+ std::string in_file_name_;
+ int sample_rate_hz_;
+ int expected_loss_rate_;
+ int actual_loss_rate_;
+ int burst_length_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/RTPFile.cc b/third_party/libwebrtc/modules/audio_coding/test/RTPFile.cc
new file mode 100644
index 0000000000..0c2ab3c443
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/RTPFile.cc
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "RTPFile.h"
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "absl/strings/string_view.h"
+
+#ifdef WIN32
+#include <Winsock2.h>
+#else
+#include <arpa/inet.h>
+#endif
+
+// TODO(tlegrand): Consider removing usage of gtest.
+#include "test/gtest.h"
+
+namespace webrtc {
+
+void RTPStream::ParseRTPHeader(RTPHeader* rtp_header,
+ const uint8_t* rtpHeader) {
+ rtp_header->payloadType = rtpHeader[1];
+ rtp_header->sequenceNumber =
+ (static_cast<uint16_t>(rtpHeader[2]) << 8) | rtpHeader[3];
+ rtp_header->timestamp = (static_cast<uint32_t>(rtpHeader[4]) << 24) |
+ (static_cast<uint32_t>(rtpHeader[5]) << 16) |
+ (static_cast<uint32_t>(rtpHeader[6]) << 8) |
+ rtpHeader[7];
+ rtp_header->ssrc = (static_cast<uint32_t>(rtpHeader[8]) << 24) |
+ (static_cast<uint32_t>(rtpHeader[9]) << 16) |
+ (static_cast<uint32_t>(rtpHeader[10]) << 8) |
+ rtpHeader[11];
+}
+
+void RTPStream::MakeRTPheader(uint8_t* rtpHeader,
+ uint8_t payloadType,
+ int16_t seqNo,
+ uint32_t timeStamp,
+ uint32_t ssrc) {
+ rtpHeader[0] = 0x80;
+ rtpHeader[1] = payloadType;
+ rtpHeader[2] = (seqNo >> 8) & 0xFF;
+ rtpHeader[3] = seqNo & 0xFF;
+ rtpHeader[4] = timeStamp >> 24;
+ rtpHeader[5] = (timeStamp >> 16) & 0xFF;
+ rtpHeader[6] = (timeStamp >> 8) & 0xFF;
+ rtpHeader[7] = timeStamp & 0xFF;
+ rtpHeader[8] = ssrc >> 24;
+ rtpHeader[9] = (ssrc >> 16) & 0xFF;
+ rtpHeader[10] = (ssrc >> 8) & 0xFF;
+ rtpHeader[11] = ssrc & 0xFF;
+}
+
+RTPPacket::RTPPacket(uint8_t payloadType,
+ uint32_t timeStamp,
+ int16_t seqNo,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t frequency)
+ : payloadType(payloadType),
+ timeStamp(timeStamp),
+ seqNo(seqNo),
+ payloadSize(payloadSize),
+ frequency(frequency) {
+ if (payloadSize > 0) {
+ this->payloadData = new uint8_t[payloadSize];
+ memcpy(this->payloadData, payloadData, payloadSize);
+ }
+}
+
+RTPPacket::~RTPPacket() {
+ delete[] payloadData;
+}
+
+void RTPBuffer::Write(const uint8_t payloadType,
+ const uint32_t timeStamp,
+ const int16_t seqNo,
+ const uint8_t* payloadData,
+ const size_t payloadSize,
+ uint32_t frequency) {
+ RTPPacket* packet = new RTPPacket(payloadType, timeStamp, seqNo, payloadData,
+ payloadSize, frequency);
+ MutexLock lock(&mutex_);
+ _rtpQueue.push(packet);
+}
+
+size_t RTPBuffer::Read(RTPHeader* rtp_header,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) {
+ RTPPacket* packet;
+ {
+ MutexLock lock(&mutex_);
+ packet = _rtpQueue.front();
+ _rtpQueue.pop();
+ }
+ rtp_header->markerBit = 1;
+ rtp_header->payloadType = packet->payloadType;
+ rtp_header->sequenceNumber = packet->seqNo;
+ rtp_header->ssrc = 0;
+ rtp_header->timestamp = packet->timeStamp;
+ if (packet->payloadSize > 0 && payloadSize >= packet->payloadSize) {
+ memcpy(payloadData, packet->payloadData, packet->payloadSize);
+ } else {
+ return 0;
+ }
+ *offset = (packet->timeStamp / (packet->frequency / 1000));
+
+ return packet->payloadSize;
+}
+
+bool RTPBuffer::EndOfFile() const {
+ MutexLock lock(&mutex_);
+ return _rtpQueue.empty();
+}
+
+void RTPFile::Open(absl::string_view filename, absl::string_view mode) {
+ std::string filename_str = std::string(filename);
+ if ((_rtpFile = fopen(filename_str.c_str(), std::string(mode).c_str())) ==
+ NULL) {
+ printf("Cannot write file %s.\n", filename_str.c_str());
+ ADD_FAILURE() << "Unable to write file";
+ exit(1);
+ }
+}
+
+void RTPFile::Close() {
+ if (_rtpFile != NULL) {
+ fclose(_rtpFile);
+ _rtpFile = NULL;
+ }
+}
+
+void RTPFile::WriteHeader() {
+ // Write data in a format that NetEQ and RTP Play can parse
+ fprintf(_rtpFile, "#!RTPencode%s\n", "1.0");
+ uint32_t dummy_variable = 0;
+ // should be converted to network endian format, but does not matter when 0
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 2, 1, _rtpFile));
+ fflush(_rtpFile);
+}
+
+void RTPFile::ReadHeader() {
+ uint32_t start_sec, start_usec, source;
+ uint16_t port, padding;
+ char fileHeader[40];
+ EXPECT_TRUE(fgets(fileHeader, 40, _rtpFile) != 0);
+ EXPECT_EQ(1u, fread(&start_sec, 4, 1, _rtpFile));
+ start_sec = ntohl(start_sec);
+ EXPECT_EQ(1u, fread(&start_usec, 4, 1, _rtpFile));
+ start_usec = ntohl(start_usec);
+ EXPECT_EQ(1u, fread(&source, 4, 1, _rtpFile));
+ source = ntohl(source);
+ EXPECT_EQ(1u, fread(&port, 2, 1, _rtpFile));
+ port = ntohs(port);
+ EXPECT_EQ(1u, fread(&padding, 2, 1, _rtpFile));
+ padding = ntohs(padding);
+}
+
+void RTPFile::Write(const uint8_t payloadType,
+ const uint32_t timeStamp,
+ const int16_t seqNo,
+ const uint8_t* payloadData,
+ const size_t payloadSize,
+ uint32_t frequency) {
+ /* write RTP packet to file */
+ uint8_t rtpHeader[12];
+ MakeRTPheader(rtpHeader, payloadType, seqNo, timeStamp, 0);
+ ASSERT_LE(12 + payloadSize + 8, std::numeric_limits<u_short>::max());
+ uint16_t lengthBytes = htons(static_cast<u_short>(12 + payloadSize + 8));
+ uint16_t plen = htons(static_cast<u_short>(12 + payloadSize));
+ uint32_t offsetMs;
+
+ offsetMs = (timeStamp / (frequency / 1000));
+ offsetMs = htonl(offsetMs);
+ EXPECT_EQ(1u, fwrite(&lengthBytes, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&plen, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&offsetMs, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&rtpHeader, 12, 1, _rtpFile));
+ EXPECT_EQ(payloadSize, fwrite(payloadData, 1, payloadSize, _rtpFile));
+}
+
+size_t RTPFile::Read(RTPHeader* rtp_header,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) {
+ uint16_t lengthBytes;
+ uint16_t plen;
+ uint8_t rtpHeader[12];
+ size_t read_len = fread(&lengthBytes, 2, 1, _rtpFile);
+ /* Check if we have reached end of file. */
+ if ((read_len == 0) && feof(_rtpFile)) {
+ _rtpEOF = true;
+ return 0;
+ }
+ EXPECT_EQ(1u, fread(&plen, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fread(offset, 4, 1, _rtpFile));
+ lengthBytes = ntohs(lengthBytes);
+ plen = ntohs(plen);
+ *offset = ntohl(*offset);
+ EXPECT_GT(plen, 11);
+
+ EXPECT_EQ(1u, fread(rtpHeader, 12, 1, _rtpFile));
+ ParseRTPHeader(rtp_header, rtpHeader);
+ EXPECT_EQ(lengthBytes, plen + 8);
+
+ if (plen == 0) {
+ return 0;
+ }
+ if (lengthBytes < 20) {
+ return 0;
+ }
+ if (payloadSize < static_cast<size_t>((lengthBytes - 20))) {
+ return 0;
+ }
+ lengthBytes -= 20;
+ EXPECT_EQ(lengthBytes, fread(payloadData, 1, lengthBytes, _rtpFile));
+ return lengthBytes;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/RTPFile.h b/third_party/libwebrtc/modules/audio_coding/test/RTPFile.h
new file mode 100644
index 0000000000..b796491da9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/RTPFile.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_RTPFILE_H_
+#define MODULES_AUDIO_CODING_TEST_RTPFILE_H_
+
+#include <stdio.h>
+
+#include <queue>
+
+#include "absl/strings/string_view.h"
+#include "api/rtp_headers.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class RTPStream {
+ public:
+ virtual ~RTPStream() {}
+
+ virtual void Write(uint8_t payloadType,
+ uint32_t timeStamp,
+ int16_t seqNo,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t frequency) = 0;
+
+ // Returns the packet's payload size. Zero should be treated as an
+ // end-of-stream (in the case that EndOfFile() is true) or an error.
+ virtual size_t Read(RTPHeader* rtp_Header,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) = 0;
+ virtual bool EndOfFile() const = 0;
+
+ protected:
+ void MakeRTPheader(uint8_t* rtpHeader,
+ uint8_t payloadType,
+ int16_t seqNo,
+ uint32_t timeStamp,
+ uint32_t ssrc);
+
+ void ParseRTPHeader(RTPHeader* rtp_header, const uint8_t* rtpHeader);
+};
+
+class RTPPacket {
+ public:
+ RTPPacket(uint8_t payloadType,
+ uint32_t timeStamp,
+ int16_t seqNo,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t frequency);
+
+ ~RTPPacket();
+
+ uint8_t payloadType;
+ uint32_t timeStamp;
+ int16_t seqNo;
+ uint8_t* payloadData;
+ size_t payloadSize;
+ uint32_t frequency;
+};
+
+class RTPBuffer : public RTPStream {
+ public:
+ RTPBuffer() = default;
+
+ ~RTPBuffer() = default;
+
+ void Write(uint8_t payloadType,
+ uint32_t timeStamp,
+ int16_t seqNo,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t frequency) override;
+
+ size_t Read(RTPHeader* rtp_header,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) override;
+
+ bool EndOfFile() const override;
+
+ private:
+ mutable Mutex mutex_;
+ std::queue<RTPPacket*> _rtpQueue RTC_GUARDED_BY(&mutex_);
+};
+
+class RTPFile : public RTPStream {
+ public:
+ ~RTPFile() {}
+
+ RTPFile() : _rtpFile(NULL), _rtpEOF(false) {}
+
+ void Open(absl::string_view outFilename, absl::string_view mode);
+
+ void Close();
+
+ void WriteHeader();
+
+ void ReadHeader();
+
+ void Write(uint8_t payloadType,
+ uint32_t timeStamp,
+ int16_t seqNo,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t frequency) override;
+
+ size_t Read(RTPHeader* rtp_header,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) override;
+
+ bool EndOfFile() const override { return _rtpEOF; }
+
+ private:
+ FILE* _rtpFile;
+ bool _rtpEOF;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_RTPFILE_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.cc b/third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.cc
new file mode 100644
index 0000000000..e93df346f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.cc
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/TestAllCodecs.h"
+
+#include <cstdio>
+#include <limits>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+// Description of the test:
+// In this test we set up a one-way communication channel from a participant
+// called "a" to a participant called "b".
+// a -> channel_a_to_b -> b
+//
+// The test loops through all available mono codecs, encode at "a" sends over
+// the channel, and decodes at "b".
+
+#define CHECK_ERROR(f) \
+ do { \
+ EXPECT_GE(f, 0) << "Error Calling API"; \
+ } while (0)
+
+namespace {
+const size_t kVariableSize = std::numeric_limits<size_t>::max();
+}
+
+namespace webrtc {
+
+// Class for simulating packet handling.
+TestPack::TestPack()
+ : receiver_acm_(NULL),
+ sequence_number_(0),
+ timestamp_diff_(0),
+ last_in_timestamp_(0),
+ total_bytes_(0),
+ payload_size_(0) {}
+
+TestPack::~TestPack() {}
+
+void TestPack::RegisterReceiverACM(AudioCodingModule* acm) {
+ receiver_acm_ = acm;
+ return;
+}
+
+int32_t TestPack::SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ int64_t absolute_capture_timestamp_ms) {
+ RTPHeader rtp_header;
+ int32_t status;
+
+ rtp_header.markerBit = false;
+ rtp_header.ssrc = 0;
+ rtp_header.sequenceNumber = sequence_number_++;
+ rtp_header.payloadType = payload_type;
+ rtp_header.timestamp = timestamp;
+
+ if (frame_type == AudioFrameType::kEmptyFrame) {
+ // Skip this frame.
+ return 0;
+ }
+
+ // Only run mono for all test cases.
+ memcpy(payload_data_, payload_data, payload_size);
+
+ status =
+ receiver_acm_->IncomingPacket(payload_data_, payload_size, rtp_header);
+
+ payload_size_ = payload_size;
+ timestamp_diff_ = timestamp - last_in_timestamp_;
+ last_in_timestamp_ = timestamp;
+ total_bytes_ += payload_size;
+ return status;
+}
+
+size_t TestPack::payload_size() {
+ return payload_size_;
+}
+
+uint32_t TestPack::timestamp_diff() {
+ return timestamp_diff_;
+}
+
+void TestPack::reset_payload_size() {
+ payload_size_ = 0;
+}
+
+TestAllCodecs::TestAllCodecs()
+ : acm_a_(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ acm_b_(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ channel_a_to_b_(NULL),
+ test_count_(0),
+ packet_size_samples_(0),
+ packet_size_bytes_(0) {}
+
+TestAllCodecs::~TestAllCodecs() {
+ if (channel_a_to_b_ != NULL) {
+ delete channel_a_to_b_;
+ channel_a_to_b_ = NULL;
+ }
+}
+
+void TestAllCodecs::Perform() {
+ const std::string file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ infile_a_.Open(file_name, 32000, "rb");
+
+ acm_a_->InitializeReceiver();
+ acm_b_->InitializeReceiver();
+
+ acm_b_->SetReceiveCodecs({{103, {"ISAC", 16000, 1}},
+ {104, {"ISAC", 32000, 1}},
+ {107, {"L16", 8000, 1}},
+ {108, {"L16", 16000, 1}},
+ {109, {"L16", 32000, 1}},
+ {111, {"L16", 8000, 2}},
+ {112, {"L16", 16000, 2}},
+ {113, {"L16", 32000, 2}},
+ {0, {"PCMU", 8000, 1}},
+ {110, {"PCMU", 8000, 2}},
+ {8, {"PCMA", 8000, 1}},
+ {118, {"PCMA", 8000, 2}},
+ {102, {"ILBC", 8000, 1}},
+ {9, {"G722", 8000, 1}},
+ {119, {"G722", 8000, 2}},
+ {120, {"OPUS", 48000, 2, {{"stereo", "1"}}}},
+ {13, {"CN", 8000, 1}},
+ {98, {"CN", 16000, 1}},
+ {99, {"CN", 32000, 1}}});
+
+ // Create and connect the channel
+ channel_a_to_b_ = new TestPack;
+ acm_a_->RegisterTransportCallback(channel_a_to_b_);
+ channel_a_to_b_->RegisterReceiverACM(acm_b_.get());
+
+ // All codecs are tested for all allowed sampling frequencies, rates and
+ // packet sizes.
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_g722[] = "G722";
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 480, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 640, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 800, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 960, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#ifdef WEBRTC_CODEC_ILBC
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_ilbc[] = "ILBC";
+ RegisterSendCodec('A', codec_ilbc, 8000, 13300, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_ilbc, 8000, 13300, 480, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_ilbc, 8000, 15200, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_ilbc, 8000, 15200, 320, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_isac[] = "ISAC";
+ RegisterSendCodec('A', codec_isac, 16000, -1, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 16000, -1, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 16000, 15000, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 16000, 32000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ test_count_++;
+ OpenOutFile(test_count_);
+ RegisterSendCodec('A', codec_isac, 32000, -1, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 32000, 56000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 32000, 37000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 32000, 32000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_l16[] = "L16";
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 320, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+
+ test_count_++;
+ OpenOutFile(test_count_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 480, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 640, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+
+ test_count_++;
+ OpenOutFile(test_count_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 640, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_pcma[] = "PCMA";
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 400, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 480, 0);
+ Run(channel_a_to_b_);
+
+ char codec_pcmu[] = "PCMU";
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 400, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 480, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#ifdef WEBRTC_CODEC_OPUS
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_opus[] = "OPUS";
+ RegisterSendCodec('A', codec_opus, 48000, 6000, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 20000, 480 * 2, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 32000, 480 * 4, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 48000, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480 * 4, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 96000, 480 * 6, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 500000, 480 * 2, kVariableSize);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+}
+
+// Register Codec to use in the test
+//
+// Input: side - which ACM to use, 'A' or 'B'
+// codec_name - name to use when register the codec
+// sampling_freq_hz - sampling frequency in Herz
+// rate - bitrate in bytes
+// packet_size - packet size in samples
+// extra_byte - if extra bytes needed compared to the bitrate
+// used when registering, can be an internal header
+// set to kVariableSize if the codec is a variable
+// rate codec
+void TestAllCodecs::RegisterSendCodec(char side,
+ char* codec_name,
+ int32_t sampling_freq_hz,
+ int rate,
+ int packet_size,
+ size_t extra_byte) {
+ // Store packet-size in samples, used to validate the received packet.
+ // If G.722, store half the size to compensate for the timestamp bug in the
+ // RFC for G.722.
+ // If iSAC runs in adaptive mode, packet size in samples can change on the
+ // fly, so we exclude this test by setting `packet_size_samples_` to -1.
+ int clockrate_hz = sampling_freq_hz;
+ size_t num_channels = 1;
+ if (absl::EqualsIgnoreCase(codec_name, "G722")) {
+ packet_size_samples_ = packet_size / 2;
+ clockrate_hz = sampling_freq_hz / 2;
+ } else if (absl::EqualsIgnoreCase(codec_name, "ISAC") && (rate == -1)) {
+ packet_size_samples_ = -1;
+ } else if (absl::EqualsIgnoreCase(codec_name, "OPUS")) {
+ packet_size_samples_ = packet_size;
+ num_channels = 2;
+ } else {
+ packet_size_samples_ = packet_size;
+ }
+
+ // Store the expected packet size in bytes, used to validate the received
+ // packet. If variable rate codec (extra_byte == -1), set to -1.
+ if (extra_byte != kVariableSize) {
+ // Add 0.875 to always round up to a whole byte
+ packet_size_bytes_ =
+ static_cast<size_t>(static_cast<float>(packet_size * rate) /
+ static_cast<float>(sampling_freq_hz * 8) +
+ 0.875) +
+ extra_byte;
+ } else {
+ // Packets will have a variable size.
+ packet_size_bytes_ = kVariableSize;
+ }
+
+ // Set pointer to the ACM where to register the codec.
+ AudioCodingModule* my_acm = NULL;
+ switch (side) {
+ case 'A': {
+ my_acm = acm_a_.get();
+ break;
+ }
+ case 'B': {
+ my_acm = acm_b_.get();
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ ASSERT_TRUE(my_acm != NULL);
+
+ auto factory = CreateBuiltinAudioEncoderFactory();
+ constexpr int payload_type = 17;
+ SdpAudioFormat format = {codec_name, clockrate_hz, num_channels};
+ format.parameters["ptime"] = rtc::ToString(rtc::CheckedDivExact(
+ packet_size, rtc::CheckedDivExact(sampling_freq_hz, 1000)));
+ my_acm->SetEncoder(
+ factory->MakeAudioEncoder(payload_type, format, absl::nullopt));
+}
+
+void TestAllCodecs::Run(TestPack* channel) {
+ AudioFrame audio_frame;
+
+ int32_t out_freq_hz = outfile_b_.SamplingFrequency();
+ size_t receive_size;
+ uint32_t timestamp_diff;
+ channel->reset_payload_size();
+ int error_count = 0;
+ int counter = 0;
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ infile_a_.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ infile_a_.FastForward(100);
+
+ while (!infile_a_.EndOfFile()) {
+ // Add 10 msec to ACM.
+ infile_a_.Read10MsData(audio_frame);
+ CHECK_ERROR(acm_a_->Add10MsData(audio_frame));
+
+ // Verify that the received packet size matches the settings.
+ receive_size = channel->payload_size();
+ if (receive_size) {
+ if ((receive_size != packet_size_bytes_) &&
+ (packet_size_bytes_ != kVariableSize)) {
+ error_count++;
+ }
+
+ // Verify that the timestamp is updated with expected length. The counter
+ // is used to avoid problems when switching codec or frame size in the
+ // test.
+ timestamp_diff = channel->timestamp_diff();
+ if ((counter > 10) &&
+ (static_cast<int>(timestamp_diff) != packet_size_samples_) &&
+ (packet_size_samples_ > -1))
+ error_count++;
+ }
+
+ // Run received side of ACM.
+ bool muted;
+ CHECK_ERROR(acm_b_->PlayoutData10Ms(out_freq_hz, &audio_frame, &muted));
+ ASSERT_FALSE(muted);
+
+ // Write output speech to file.
+ outfile_b_.Write10MsData(audio_frame.data(),
+ audio_frame.samples_per_channel_);
+
+ // Update loop counter
+ counter++;
+ }
+
+ EXPECT_EQ(0, error_count);
+
+ if (infile_a_.EndOfFile()) {
+ infile_a_.Rewind();
+ }
+}
+
+void TestAllCodecs::OpenOutFile(int test_number) {
+ std::string filename = webrtc::test::OutputPath();
+ rtc::StringBuilder test_number_str;
+ test_number_str << test_number;
+ filename += "testallcodecs_out_";
+ filename += test_number_str.str();
+ filename += ".pcm";
+ outfile_b_.Open(filename, 32000, "wb");
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.h b/third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.h
new file mode 100644
index 0000000000..0c276414e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestAllCodecs.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
+#define MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
+
+#include <memory>
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/test/PCMFile.h"
+
+namespace webrtc {
+
+class TestPack : public AudioPacketizationCallback {
+ public:
+ TestPack();
+ ~TestPack();
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ int64_t absolute_capture_timestamp_ms) override;
+
+ size_t payload_size();
+ uint32_t timestamp_diff();
+ void reset_payload_size();
+
+ private:
+ AudioCodingModule* receiver_acm_;
+ uint16_t sequence_number_;
+ uint8_t payload_data_[60 * 32 * 2 * 2];
+ uint32_t timestamp_diff_;
+ uint32_t last_in_timestamp_;
+ uint64_t total_bytes_;
+ size_t payload_size_;
+};
+
+class TestAllCodecs {
+ public:
+ TestAllCodecs();
+ ~TestAllCodecs();
+
+ void Perform();
+
+ private:
+ // The default value of '-1' indicates that the registration is based only on
+ // codec name, and a sampling frequency matching is not required.
+ // This is useful for codecs which support several sampling frequency.
+ // Note! Only mono mode is tested in this test.
+ void RegisterSendCodec(char side,
+ char* codec_name,
+ int32_t sampling_freq_hz,
+ int rate,
+ int packet_size,
+ size_t extra_byte);
+
+ void Run(TestPack* channel);
+ void OpenOutFile(int test_number);
+
+ std::unique_ptr<AudioCodingModule> acm_a_;
+ std::unique_ptr<AudioCodingModule> acm_b_;
+ TestPack* channel_a_to_b_;
+ PCMFile infile_a_;
+ PCMFile outfile_b_;
+ int test_count_;
+ int packet_size_samples_;
+ size_t packet_size_bytes_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestRedFec.cc b/third_party/libwebrtc/modules/audio_coding/test/TestRedFec.cc
new file mode 100644
index 0000000000..892fbc83d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestRedFec.cc
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/TestRedFec.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "api/audio_codecs/L16/audio_decoder_L16.h"
+#include "api/audio_codecs/L16/audio_encoder_L16.h"
+#include "api/audio_codecs/audio_decoder_factory_template.h"
+#include "api/audio_codecs/audio_encoder_factory_template.h"
+#include "api/audio_codecs/g711/audio_decoder_g711.h"
+#include "api/audio_codecs/g711/audio_encoder_g711.h"
+#include "api/audio_codecs/g722/audio_decoder_g722.h"
+#include "api/audio_codecs/g722/audio_encoder_g722.h"
+#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
+#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
+#include "api/audio_codecs/opus/audio_decoder_opus.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TestRedFec::TestRedFec()
+ : encoder_factory_(CreateAudioEncoderFactory<AudioEncoderG711,
+ AudioEncoderG722,
+ AudioEncoderIsacFloat,
+ AudioEncoderL16,
+ AudioEncoderOpus>()),
+ decoder_factory_(CreateAudioDecoderFactory<AudioDecoderG711,
+ AudioDecoderG722,
+ AudioDecoderIsacFloat,
+ AudioDecoderL16,
+ AudioDecoderOpus>()),
+ _acmA(AudioCodingModule::Create(
+ AudioCodingModule::Config(decoder_factory_))),
+ _acmB(AudioCodingModule::Create(
+ AudioCodingModule::Config(decoder_factory_))),
+ _channelA2B(NULL),
+ _testCntr(0) {}
+
+TestRedFec::~TestRedFec() {
+ if (_channelA2B != NULL) {
+ delete _channelA2B;
+ _channelA2B = NULL;
+ }
+}
+
+void TestRedFec::Perform() {
+ const std::string file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ _inFileA.Open(file_name, 32000, "rb");
+
+ ASSERT_EQ(0, _acmA->InitializeReceiver());
+ ASSERT_EQ(0, _acmB->InitializeReceiver());
+
+ // Create and connect the channel
+ _channelA2B = new Channel;
+ _acmA->RegisterTransportCallback(_channelA2B);
+ _channelA2B->RegisterReceiverACM(_acmB.get());
+
+ RegisterSendCodec(_acmA, {"L16", 8000, 1}, Vad::kVadAggressive, true);
+
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ // Switch to another 8 kHz codec; RED should remain switched on.
+ RegisterSendCodec(_acmA, {"PCMU", 8000, 1}, Vad::kVadAggressive, true);
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ // Switch to a 16 kHz codec; RED should be switched off.
+ RegisterSendCodec(_acmA, {"G722", 8000, 1}, Vad::kVadAggressive, false);
+
+ OpenOutFile(_testCntr);
+ RegisterSendCodec(_acmA, {"G722", 8000, 1}, Vad::kVadAggressive, false);
+ Run();
+ RegisterSendCodec(_acmA, {"G722", 8000, 1}, Vad::kVadAggressive, false);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec(_acmA, {"ISAC", 16000, 1}, Vad::kVadVeryAggressive, false);
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ // Switch to a 32 kHz codec; RED should be switched off.
+ RegisterSendCodec(_acmA, {"ISAC", 32000, 1}, Vad::kVadVeryAggressive, false);
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec(_acmA, {"ISAC", 32000, 1}, absl::nullopt, false);
+
+ _channelA2B->SetFECTestWithPacketLoss(true);
+ // Following tests are under packet losses.
+
+ // Switch to a 16 kHz codec; RED should be switched off.
+ RegisterSendCodec(_acmA, {"G722", 8000, 1}, Vad::kVadAggressive, false);
+
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ // Switch to a 16 kHz codec, RED should have been switched off.
+ RegisterSendCodec(_acmA, {"ISAC", 16000, 1}, Vad::kVadVeryAggressive, false);
+
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ // Switch to a 32 kHz codec, RED should have been switched off.
+ RegisterSendCodec(_acmA, {"ISAC", 32000, 1}, Vad::kVadVeryAggressive, false);
+
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec(_acmA, {"ISAC", 32000, 1}, absl::nullopt, false);
+
+ RegisterSendCodec(_acmA, {"opus", 48000, 2}, absl::nullopt, false);
+
+ // _channelA2B imposes 25% packet loss rate.
+ EXPECT_EQ(0, _acmA->SetPacketLossRate(25));
+
+ _acmA->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* enc) {
+ EXPECT_EQ(true, (*enc)->SetFec(true));
+ });
+
+ OpenOutFile(_testCntr);
+ Run();
+
+ // Switch to L16 with RED.
+ RegisterSendCodec(_acmA, {"L16", 8000, 1}, absl::nullopt, true);
+ Run();
+
+ // Switch to Opus again.
+ RegisterSendCodec(_acmA, {"opus", 48000, 2}, absl::nullopt, false);
+ _acmA->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* enc) {
+ EXPECT_EQ(true, (*enc)->SetFec(false));
+ });
+ Run();
+
+ _acmA->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* enc) {
+ EXPECT_EQ(true, (*enc)->SetFec(true));
+ });
+ _outFileB.Close();
+}
+
+void TestRedFec::RegisterSendCodec(
+ const std::unique_ptr<AudioCodingModule>& acm,
+ const SdpAudioFormat& codec_format,
+ absl::optional<Vad::Aggressiveness> vad_mode,
+ bool use_red) {
+ constexpr int payload_type = 17, cn_payload_type = 27, red_payload_type = 37;
+ const auto& other_acm = &acm == &_acmA ? _acmB : _acmA;
+
+ auto encoder = encoder_factory_->MakeAudioEncoder(payload_type, codec_format,
+ absl::nullopt);
+ EXPECT_NE(encoder, nullptr);
+ std::map<int, SdpAudioFormat> receive_codecs = {{payload_type, codec_format}};
+ if (!absl::EqualsIgnoreCase(codec_format.name, "opus")) {
+ if (vad_mode.has_value()) {
+ AudioEncoderCngConfig config;
+ config.speech_encoder = std::move(encoder);
+ config.num_channels = 1;
+ config.payload_type = cn_payload_type;
+ config.vad_mode = vad_mode.value();
+ encoder = CreateComfortNoiseEncoder(std::move(config));
+ receive_codecs.emplace(std::make_pair(
+ cn_payload_type, SdpAudioFormat("CN", codec_format.clockrate_hz, 1)));
+ }
+ if (use_red) {
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type;
+ config.speech_encoder = std::move(encoder);
+ encoder = std::make_unique<AudioEncoderCopyRed>(std::move(config),
+ field_trials_);
+ receive_codecs.emplace(
+ std::make_pair(red_payload_type,
+ SdpAudioFormat("red", codec_format.clockrate_hz, 1)));
+ }
+ }
+ acm->SetEncoder(std::move(encoder));
+ other_acm->SetReceiveCodecs(receive_codecs);
+}
+
+void TestRedFec::Run() {
+ AudioFrame audioFrame;
+ int32_t outFreqHzB = _outFileB.SamplingFrequency();
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _inFileA.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ _inFileA.FastForward(100);
+
+ while (!_inFileA.EndOfFile()) {
+ EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
+ EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
+ bool muted;
+ EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileB.Write10MsData(audioFrame.data(), audioFrame.samples_per_channel_);
+ }
+ _inFileA.Rewind();
+}
+
+void TestRedFec::OpenOutFile(int16_t test_number) {
+ std::string file_name;
+ rtc::StringBuilder file_stream;
+ file_stream << webrtc::test::OutputPath();
+ file_stream << "TestRedFec_outFile_";
+ file_stream << test_number << ".pcm";
+ file_name = file_stream.str();
+ _outFileB.Open(file_name, 16000, "wb");
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestRedFec.h b/third_party/libwebrtc/modules/audio_coding/test/TestRedFec.h
new file mode 100644
index 0000000000..dbadd88487
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestRedFec.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
+#define MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
+
+#include <memory>
+#include <string>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "common_audio/vad/include/vad.h"
+#include "modules/audio_coding/test/Channel.h"
+#include "modules/audio_coding/test/PCMFile.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+class TestRedFec {
+ public:
+ explicit TestRedFec();
+ ~TestRedFec();
+
+ void Perform();
+
+ private:
+ void RegisterSendCodec(const std::unique_ptr<AudioCodingModule>& acm,
+ const SdpAudioFormat& codec_format,
+ absl::optional<Vad::Aggressiveness> vad_mode,
+ bool use_red);
+ void Run();
+ void OpenOutFile(int16_t testNumber);
+
+ test::ScopedKeyValueConfig field_trials_;
+ const rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+ const rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ std::unique_ptr<AudioCodingModule> _acmA;
+ std::unique_ptr<AudioCodingModule> _acmB;
+
+ Channel* _channelA2B;
+
+ PCMFile _inFileA;
+ PCMFile _outFileB;
+ int16_t _testCntr;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestStereo.cc b/third_party/libwebrtc/modules/audio_coding/test/TestStereo.cc
new file mode 100644
index 0000000000..599fafb602
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestStereo.cc
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/TestStereo.h"
+
+#include <string>
+
+#include "absl/strings/match.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+// Class for simulating packet handling
+TestPackStereo::TestPackStereo()
+ : receiver_acm_(NULL),
+ seq_no_(0),
+ timestamp_diff_(0),
+ last_in_timestamp_(0),
+ total_bytes_(0),
+ payload_size_(0),
+ lost_packet_(false) {}
+
+TestPackStereo::~TestPackStereo() {}
+
+void TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm) {
+ receiver_acm_ = acm;
+ return;
+}
+
+int32_t TestPackStereo::SendData(const AudioFrameType frame_type,
+ const uint8_t payload_type,
+ const uint32_t timestamp,
+ const uint8_t* payload_data,
+ const size_t payload_size,
+ int64_t absolute_capture_timestamp_ms) {
+ RTPHeader rtp_header;
+ int32_t status = 0;
+
+ rtp_header.markerBit = false;
+ rtp_header.ssrc = 0;
+ rtp_header.sequenceNumber = seq_no_++;
+ rtp_header.payloadType = payload_type;
+ rtp_header.timestamp = timestamp;
+ if (frame_type == AudioFrameType::kEmptyFrame) {
+ // Skip this frame
+ return 0;
+ }
+
+ if (lost_packet_ == false) {
+ status =
+ receiver_acm_->IncomingPacket(payload_data, payload_size, rtp_header);
+
+ if (frame_type != AudioFrameType::kAudioFrameCN) {
+ payload_size_ = static_cast<int>(payload_size);
+ } else {
+ payload_size_ = -1;
+ }
+
+ timestamp_diff_ = timestamp - last_in_timestamp_;
+ last_in_timestamp_ = timestamp;
+ total_bytes_ += payload_size;
+ }
+ return status;
+}
+
+uint16_t TestPackStereo::payload_size() {
+ return static_cast<uint16_t>(payload_size_);
+}
+
+uint32_t TestPackStereo::timestamp_diff() {
+ return timestamp_diff_;
+}
+
+void TestPackStereo::reset_payload_size() {
+ payload_size_ = 0;
+}
+
+void TestPackStereo::set_codec_mode(enum StereoMonoMode mode) {
+ codec_mode_ = mode;
+}
+
+void TestPackStereo::set_lost_packet(bool lost) {
+ lost_packet_ = lost;
+}
+
+TestStereo::TestStereo()
+ : acm_a_(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ acm_b_(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ channel_a2b_(NULL),
+ test_cntr_(0),
+ pack_size_samp_(0),
+ pack_size_bytes_(0),
+ counter_(0) {}
+
+TestStereo::~TestStereo() {
+ if (channel_a2b_ != NULL) {
+ delete channel_a2b_;
+ channel_a2b_ = NULL;
+ }
+}
+
+void TestStereo::Perform() {
+ uint16_t frequency_hz;
+ int audio_channels;
+ int codec_channels;
+
+ // Open both mono and stereo test files in 32 kHz.
+ const std::string file_name_stereo =
+ webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm");
+ const std::string file_name_mono =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ frequency_hz = 32000;
+ in_file_stereo_ = new PCMFile();
+ in_file_mono_ = new PCMFile();
+ in_file_stereo_->Open(file_name_stereo, frequency_hz, "rb");
+ in_file_stereo_->ReadStereo(true);
+ in_file_mono_->Open(file_name_mono, frequency_hz, "rb");
+ in_file_mono_->ReadStereo(false);
+
+ // Create and initialize two ACMs, one for each side of a one-to-one call.
+ ASSERT_TRUE((acm_a_.get() != NULL) && (acm_b_.get() != NULL));
+ EXPECT_EQ(0, acm_a_->InitializeReceiver());
+ EXPECT_EQ(0, acm_b_->InitializeReceiver());
+
+ acm_b_->SetReceiveCodecs({{103, {"ISAC", 16000, 1}},
+ {104, {"ISAC", 32000, 1}},
+ {107, {"L16", 8000, 1}},
+ {108, {"L16", 16000, 1}},
+ {109, {"L16", 32000, 1}},
+ {111, {"L16", 8000, 2}},
+ {112, {"L16", 16000, 2}},
+ {113, {"L16", 32000, 2}},
+ {0, {"PCMU", 8000, 1}},
+ {110, {"PCMU", 8000, 2}},
+ {8, {"PCMA", 8000, 1}},
+ {118, {"PCMA", 8000, 2}},
+ {102, {"ILBC", 8000, 1}},
+ {9, {"G722", 8000, 1}},
+ {119, {"G722", 8000, 2}},
+ {120, {"OPUS", 48000, 2, {{"stereo", "1"}}}},
+ {13, {"CN", 8000, 1}},
+ {98, {"CN", 16000, 1}},
+ {99, {"CN", 32000, 1}}});
+
+ // Create and connect the channel.
+ channel_a2b_ = new TestPackStereo;
+ EXPECT_EQ(0, acm_a_->RegisterTransportCallback(channel_a2b_));
+ channel_a2b_->RegisterReceiverACM(acm_b_.get());
+
+ char codec_pcma_temp[] = "PCMA";
+ RegisterSendCodec('A', codec_pcma_temp, 8000, 64000, 80, 2);
+
+ //
+ // Test Stereo-To-Stereo for all codecs.
+ //
+ audio_channels = 2;
+ codec_channels = 2;
+
+ // All codecs are tested for all allowed sampling frequencies, rates and
+ // packet sizes.
+ channel_a2b_->set_codec_mode(kStereo);
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_g722[] = "G722";
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 480, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 640, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 800, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 960, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ channel_a2b_->set_codec_mode(kStereo);
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_l16[] = "L16";
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 240, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 480, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 640, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 640, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#ifdef PCMA_AND_PCMU
+ channel_a2b_->set_codec_mode(kStereo);
+ audio_channels = 2;
+ codec_channels = 2;
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_pcma[] = "PCMA";
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 240, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 400, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 480, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_pcmu[] = "PCMU";
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 240, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 400, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 480, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ channel_a2b_->set_codec_mode(kStereo);
+ audio_channels = 2;
+ codec_channels = 2;
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+
+ char codec_opus[] = "opus";
+ // Run Opus with 10 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 20 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480 * 2, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 40 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480 * 4, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 60 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480 * 6, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 20 ms frame size and different bitrates.
+ RegisterSendCodec('A', codec_opus, 48000, 40000, 960, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_opus, 48000, 510000, 960, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+ //
+ // Test Mono-To-Stereo for all codecs.
+ //
+ audio_channels = 1;
+ codec_channels = 2;
+
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#ifdef PCMA_AND_PCMU
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ // Keep encode and decode in stereo.
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 960, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ // Encode in mono, decode in stereo mode.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 960, 1);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+
+ //
+ // Test Stereo-To-Mono for all codecs.
+ //
+ audio_channels = 2;
+ codec_channels = 1;
+ channel_a2b_->set_codec_mode(kMono);
+
+ // Run stereo audio and mono codec.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#ifdef PCMA_AND_PCMU
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ // Encode and decode in mono.
+ RegisterSendCodec('A', codec_opus, 48000, 32000, 960, codec_channels);
+ acm_b_->SetReceiveCodecs({{120, {"OPUS", 48000, 2}}});
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ // Encode in stereo, decode in mono.
+ RegisterSendCodec('A', codec_opus, 48000, 32000, 960, 2);
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ out_file_.Close();
+
+ // Test switching between decoding mono and stereo for Opus.
+
+ // Decode in mono.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+ // Decode in stereo.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ acm_b_->SetReceiveCodecs({{120, {"OPUS", 48000, 2, {{"stereo", "1"}}}}});
+ Run(channel_a2b_, audio_channels, 2);
+ out_file_.Close();
+ // Decode in mono.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ acm_b_->SetReceiveCodecs({{120, {"OPUS", 48000, 2}}});
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+
+ // Delete the file pointers.
+ delete in_file_stereo_;
+ delete in_file_mono_;
+}
+
+// Register Codec to use in the test
+//
+// Input: side - which ACM to use, 'A' or 'B'
+// codec_name - name to use when register the codec
+// sampling_freq_hz - sampling frequency in Herz
+// rate - bitrate in bytes
+// pack_size - packet size in samples
+// channels - number of channels; 1 for mono, 2 for stereo
+void TestStereo::RegisterSendCodec(char side,
+ char* codec_name,
+ int32_t sampling_freq_hz,
+ int rate,
+ int pack_size,
+ int channels) {
+ // Store packet size in samples, used to validate the received packet
+ pack_size_samp_ = pack_size;
+
+ // Store the expected packet size in bytes, used to validate the received
+ // packet. Add 0.875 to always round up to a whole byte.
+ pack_size_bytes_ = (uint16_t)(static_cast<float>(pack_size * rate) /
+ static_cast<float>(sampling_freq_hz * 8) +
+ 0.875);
+
+ // Set pointer to the ACM where to register the codec
+ AudioCodingModule* my_acm = NULL;
+ switch (side) {
+ case 'A': {
+ my_acm = acm_a_.get();
+ break;
+ }
+ case 'B': {
+ my_acm = acm_b_.get();
+ break;
+ }
+ default:
+ break;
+ }
+ ASSERT_TRUE(my_acm != NULL);
+
+ auto encoder_factory = CreateBuiltinAudioEncoderFactory();
+ const int clockrate_hz = absl::EqualsIgnoreCase(codec_name, "g722")
+ ? sampling_freq_hz / 2
+ : sampling_freq_hz;
+ const std::string ptime = rtc::ToString(rtc::CheckedDivExact(
+ pack_size, rtc::CheckedDivExact(sampling_freq_hz, 1000)));
+ SdpAudioFormat::Parameters params = {{"ptime", ptime}};
+ RTC_CHECK(channels == 1 || channels == 2);
+ if (absl::EqualsIgnoreCase(codec_name, "opus")) {
+ if (channels == 2) {
+ params["stereo"] = "1";
+ }
+ channels = 2;
+ params["maxaveragebitrate"] = rtc::ToString(rate);
+ }
+ constexpr int payload_type = 17;
+ auto encoder = encoder_factory->MakeAudioEncoder(
+ payload_type, SdpAudioFormat(codec_name, clockrate_hz, channels, params),
+ absl::nullopt);
+ EXPECT_NE(nullptr, encoder);
+ my_acm->SetEncoder(std::move(encoder));
+
+ send_codec_name_ = codec_name;
+}
+
+void TestStereo::Run(TestPackStereo* channel,
+ int in_channels,
+ int out_channels,
+ int percent_loss) {
+ AudioFrame audio_frame;
+
+ int32_t out_freq_hz_b = out_file_.SamplingFrequency();
+ uint16_t rec_size;
+ uint32_t time_stamp_diff;
+ channel->reset_payload_size();
+ int error_count = 0;
+ int variable_bytes = 0;
+ int variable_packets = 0;
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ in_file_mono_->SetNum10MsBlocksToRead(50);
+ in_file_stereo_->SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ in_file_stereo_->FastForward(100);
+ in_file_mono_->FastForward(100);
+
+ while (true) {
+ // Simulate packet loss by setting `packet_loss_` to "true" in
+ // `percent_loss` percent of the loops.
+ if (percent_loss > 0) {
+ if (counter_ == floor((100 / percent_loss) + 0.5)) {
+ counter_ = 0;
+ channel->set_lost_packet(true);
+ } else {
+ channel->set_lost_packet(false);
+ }
+ counter_++;
+ }
+
+ // Add 10 msec to ACM
+ if (in_channels == 1) {
+ if (in_file_mono_->EndOfFile()) {
+ break;
+ }
+ in_file_mono_->Read10MsData(audio_frame);
+ } else {
+ if (in_file_stereo_->EndOfFile()) {
+ break;
+ }
+ in_file_stereo_->Read10MsData(audio_frame);
+ }
+ EXPECT_GE(acm_a_->Add10MsData(audio_frame), 0);
+
+ // Verify that the received packet size matches the settings.
+ rec_size = channel->payload_size();
+ if ((0 < rec_size) & (rec_size < 65535)) {
+ if (strcmp(send_codec_name_, "opus") == 0) {
+ // Opus is a variable rate codec, hence calculate the average packet
+ // size, and later make sure the average is in the right range.
+ variable_bytes += rec_size;
+ variable_packets++;
+ } else {
+ // For fixed rate codecs, check that packet size is correct.
+ if ((rec_size != pack_size_bytes_ * out_channels) &&
+ (pack_size_bytes_ < 65535)) {
+ error_count++;
+ }
+ }
+ // Verify that the timestamp is updated with expected length
+ time_stamp_diff = channel->timestamp_diff();
+ if ((counter_ > 10) && (time_stamp_diff != pack_size_samp_)) {
+ error_count++;
+ }
+ }
+
+ // Run receive side of ACM
+ bool muted;
+ EXPECT_EQ(0, acm_b_->PlayoutData10Ms(out_freq_hz_b, &audio_frame, &muted));
+ ASSERT_FALSE(muted);
+
+ // Write output speech to file
+ out_file_.Write10MsData(
+ audio_frame.data(),
+ audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+ }
+
+ EXPECT_EQ(0, error_count);
+
+ // Check that packet size is in the right range for variable rate codecs,
+ // such as Opus.
+ if (variable_packets > 0) {
+ variable_bytes /= variable_packets;
+ EXPECT_NEAR(variable_bytes, pack_size_bytes_, 18);
+ }
+
+ if (in_file_mono_->EndOfFile()) {
+ in_file_mono_->Rewind();
+ }
+ if (in_file_stereo_->EndOfFile()) {
+ in_file_stereo_->Rewind();
+ }
+ // Reset in case we ended with a lost packet
+ channel->set_lost_packet(false);
+}
+
+void TestStereo::OpenOutFile(int16_t test_number) {
+ std::string file_name;
+ rtc::StringBuilder file_stream;
+ file_stream << webrtc::test::OutputPath() << "teststereo_out_" << test_number
+ << ".pcm";
+ file_name = file_stream.str();
+ out_file_.Open(file_name, 32000, "wb");
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestStereo.h b/third_party/libwebrtc/modules/audio_coding/test/TestStereo.h
new file mode 100644
index 0000000000..4c50a4b555
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestStereo.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
+#define MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
+
+#include <math.h>
+
+#include <memory>
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/test/PCMFile.h"
+
+#define PCMA_AND_PCMU
+
+namespace webrtc {
+
+enum StereoMonoMode { kNotSet, kMono, kStereo };
+
+class TestPackStereo : public AudioPacketizationCallback {
+ public:
+ TestPackStereo();
+ ~TestPackStereo();
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ int64_t absolute_capture_timestamp_ms) override;
+
+ uint16_t payload_size();
+ uint32_t timestamp_diff();
+ void reset_payload_size();
+ void set_codec_mode(StereoMonoMode mode);
+ void set_lost_packet(bool lost);
+
+ private:
+ AudioCodingModule* receiver_acm_;
+ int16_t seq_no_;
+ uint32_t timestamp_diff_;
+ uint32_t last_in_timestamp_;
+ uint64_t total_bytes_;
+ int payload_size_;
+ StereoMonoMode codec_mode_;
+ // Simulate packet losses
+ bool lost_packet_;
+};
+
+class TestStereo {
+ public:
+ TestStereo();
+ ~TestStereo();
+
+ void Perform();
+
+ private:
+ // The default value of '-1' indicates that the registration is based only on
+ // codec name and a sampling frequncy matching is not required. This is useful
+ // for codecs which support several sampling frequency.
+ void RegisterSendCodec(char side,
+ char* codec_name,
+ int32_t samp_freq_hz,
+ int rate,
+ int pack_size,
+ int channels);
+
+ void Run(TestPackStereo* channel,
+ int in_channels,
+ int out_channels,
+ int percent_loss = 0);
+ void OpenOutFile(int16_t test_number);
+
+ std::unique_ptr<AudioCodingModule> acm_a_;
+ std::unique_ptr<AudioCodingModule> acm_b_;
+
+ TestPackStereo* channel_a2b_;
+
+ PCMFile* in_file_stereo_;
+ PCMFile* in_file_mono_;
+ PCMFile out_file_;
+ int16_t test_cntr_;
+ uint16_t pack_size_samp_;
+ uint16_t pack_size_bytes_;
+ int counter_;
+ char* send_codec_name_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.cc b/third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.cc
new file mode 100644
index 0000000000..cb05deb92a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/TestVADDTX.h"
+
+#include <string>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/audio_decoder_factory_template.h"
+#include "api/audio_codecs/audio_encoder_factory_template.h"
+#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
+#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
+#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
+#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
+#include "api/audio_codecs/opus/audio_decoder_opus.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/test/PCMFile.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+MonitoringAudioPacketizationCallback::MonitoringAudioPacketizationCallback(
+ AudioPacketizationCallback* next)
+ : next_(next) {
+ ResetStatistics();
+}
+
+int32_t MonitoringAudioPacketizationCallback::SendData(
+ AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) {
+ counter_[static_cast<int>(frame_type)]++;
+ return next_->SendData(frame_type, payload_type, timestamp, payload_data,
+ payload_len_bytes, absolute_capture_timestamp_ms);
+}
+
+void MonitoringAudioPacketizationCallback::PrintStatistics() {
+ printf("\n");
+ printf("kEmptyFrame %u\n",
+ counter_[static_cast<int>(AudioFrameType::kEmptyFrame)]);
+ printf("kAudioFrameSpeech %u\n",
+ counter_[static_cast<int>(AudioFrameType::kAudioFrameSpeech)]);
+ printf("kAudioFrameCN %u\n",
+ counter_[static_cast<int>(AudioFrameType::kAudioFrameCN)]);
+ printf("\n\n");
+}
+
+void MonitoringAudioPacketizationCallback::ResetStatistics() {
+ memset(counter_, 0, sizeof(counter_));
+}
+
+void MonitoringAudioPacketizationCallback::GetStatistics(uint32_t* counter) {
+ memcpy(counter, counter_, sizeof(counter_));
+}
+
+TestVadDtx::TestVadDtx()
+ : encoder_factory_(CreateAudioEncoderFactory<AudioEncoderIlbc,
+ AudioEncoderIsacFloat,
+ AudioEncoderOpus>()),
+ decoder_factory_(CreateAudioDecoderFactory<AudioDecoderIlbc,
+ AudioDecoderIsacFloat,
+ AudioDecoderOpus>()),
+ acm_send_(AudioCodingModule::Create(
+ AudioCodingModule::Config(decoder_factory_))),
+ acm_receive_(AudioCodingModule::Create(
+ AudioCodingModule::Config(decoder_factory_))),
+ channel_(std::make_unique<Channel>()),
+ packetization_callback_(
+ std::make_unique<MonitoringAudioPacketizationCallback>(
+ channel_.get())) {
+ EXPECT_EQ(
+ 0, acm_send_->RegisterTransportCallback(packetization_callback_.get()));
+ channel_->RegisterReceiverACM(acm_receive_.get());
+}
+
+bool TestVadDtx::RegisterCodec(const SdpAudioFormat& codec_format,
+ absl::optional<Vad::Aggressiveness> vad_mode) {
+ constexpr int payload_type = 17, cn_payload_type = 117;
+ bool added_comfort_noise = false;
+
+ auto encoder = encoder_factory_->MakeAudioEncoder(payload_type, codec_format,
+ absl::nullopt);
+ if (vad_mode.has_value() &&
+ !absl::EqualsIgnoreCase(codec_format.name, "opus")) {
+ AudioEncoderCngConfig config;
+ config.speech_encoder = std::move(encoder);
+ config.num_channels = 1;
+ config.payload_type = cn_payload_type;
+ config.vad_mode = vad_mode.value();
+ encoder = CreateComfortNoiseEncoder(std::move(config));
+ added_comfort_noise = true;
+ }
+ channel_->SetIsStereo(encoder->NumChannels() > 1);
+ acm_send_->SetEncoder(std::move(encoder));
+
+ std::map<int, SdpAudioFormat> receive_codecs = {{payload_type, codec_format}};
+ acm_receive_->SetReceiveCodecs(receive_codecs);
+
+ return added_comfort_noise;
+}
+
+// Encoding a file and see if the numbers that various packets occur follow
+// the expectation.
+void TestVadDtx::Run(absl::string_view in_filename,
+ int frequency,
+ int channels,
+ absl::string_view out_filename,
+ bool append,
+ const int* expects) {
+ packetization_callback_->ResetStatistics();
+
+ PCMFile in_file;
+ in_file.Open(in_filename, frequency, "rb");
+ in_file.ReadStereo(channels > 1);
+ // Set test length to 1000 ms (100 blocks of 10 ms each).
+ in_file.SetNum10MsBlocksToRead(100);
+ // Fast-forward both files 500 ms (50 blocks). The first second of the file is
+ // silence, but we want to keep half of that to test silence periods.
+ in_file.FastForward(50);
+
+ PCMFile out_file;
+ if (append) {
+ out_file.Open(out_filename, kOutputFreqHz, "ab");
+ } else {
+ out_file.Open(out_filename, kOutputFreqHz, "wb");
+ }
+
+ uint16_t frame_size_samples = in_file.PayloadLength10Ms();
+ AudioFrame audio_frame;
+ while (!in_file.EndOfFile()) {
+ in_file.Read10MsData(audio_frame);
+ audio_frame.timestamp_ = time_stamp_;
+ time_stamp_ += frame_size_samples;
+ EXPECT_GE(acm_send_->Add10MsData(audio_frame), 0);
+ bool muted;
+ acm_receive_->PlayoutData10Ms(kOutputFreqHz, &audio_frame, &muted);
+ ASSERT_FALSE(muted);
+ out_file.Write10MsData(audio_frame);
+ }
+
+ in_file.Close();
+ out_file.Close();
+
+#ifdef PRINT_STAT
+ packetization_callback_->PrintStatistics();
+#endif
+
+ uint32_t stats[3];
+ packetization_callback_->GetStatistics(stats);
+ packetization_callback_->ResetStatistics();
+
+ for (const auto& st : stats) {
+ int i = &st - stats; // Calculate the current position in stats.
+ switch (expects[i]) {
+ case 0: {
+ EXPECT_EQ(0u, st) << "stats[" << i << "] error.";
+ break;
+ }
+ case 1: {
+ EXPECT_GT(st, 0u) << "stats[" << i << "] error.";
+ break;
+ }
+ }
+ }
+}
+
+// Following is the implementation of TestWebRtcVadDtx.
+TestWebRtcVadDtx::TestWebRtcVadDtx() : output_file_num_(0) {}
+
+void TestWebRtcVadDtx::Perform() {
+ RunTestCases({"ISAC", 16000, 1});
+ RunTestCases({"ISAC", 32000, 1});
+ RunTestCases({"ILBC", 8000, 1});
+ RunTestCases({"opus", 48000, 2});
+}
+
+// Test various configurations on VAD/DTX.
+void TestWebRtcVadDtx::RunTestCases(const SdpAudioFormat& codec_format) {
+ Test(/*new_outfile=*/true,
+ /*expect_dtx_enabled=*/RegisterCodec(codec_format, absl::nullopt));
+
+ Test(/*new_outfile=*/false,
+ /*expect_dtx_enabled=*/RegisterCodec(codec_format, Vad::kVadAggressive));
+
+ Test(/*new_outfile=*/false,
+ /*expect_dtx_enabled=*/RegisterCodec(codec_format, Vad::kVadLowBitrate));
+
+ Test(/*new_outfile=*/false, /*expect_dtx_enabled=*/RegisterCodec(
+ codec_format, Vad::kVadVeryAggressive));
+
+ Test(/*new_outfile=*/false,
+ /*expect_dtx_enabled=*/RegisterCodec(codec_format, Vad::kVadNormal));
+}
+
+// Set the expectation and run the test.
+void TestWebRtcVadDtx::Test(bool new_outfile, bool expect_dtx_enabled) {
+ int expects[] = {-1, 1, expect_dtx_enabled, 0, 0};
+ if (new_outfile) {
+ output_file_num_++;
+ }
+ rtc::StringBuilder out_filename;
+ out_filename << webrtc::test::OutputPath() << "testWebRtcVadDtx_outFile_"
+ << output_file_num_ << ".pcm";
+ Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), 32000, 1,
+ out_filename.str(), !new_outfile, expects);
+}
+
+// Following is the implementation of TestOpusDtx.
+void TestOpusDtx::Perform() {
+ int expects[] = {0, 1, 0, 0, 0};
+
+ // Register Opus as send codec
+ std::string out_filename =
+ webrtc::test::OutputPath() + "testOpusDtx_outFile_mono.pcm";
+ RegisterCodec({"opus", 48000, 2}, absl::nullopt);
+ acm_send_->ModifyEncoder([](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+ (*encoder_ptr)->SetDtx(false);
+ });
+
+ Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), 32000, 1,
+ out_filename, false, expects);
+
+ acm_send_->ModifyEncoder([](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+ (*encoder_ptr)->SetDtx(true);
+ });
+ expects[static_cast<int>(AudioFrameType::kEmptyFrame)] = 1;
+ expects[static_cast<int>(AudioFrameType::kAudioFrameCN)] = 1;
+ Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), 32000, 1,
+ out_filename, true, expects);
+
+ // Register stereo Opus as send codec
+ out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
+ RegisterCodec({"opus", 48000, 2, {{"stereo", "1"}}}, absl::nullopt);
+ acm_send_->ModifyEncoder([](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+ (*encoder_ptr)->SetDtx(false);
+ });
+ expects[static_cast<int>(AudioFrameType::kEmptyFrame)] = 0;
+ expects[static_cast<int>(AudioFrameType::kAudioFrameCN)] = 0;
+ Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"), 32000,
+ 2, out_filename, false, expects);
+
+ acm_send_->ModifyEncoder([](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+ (*encoder_ptr)->SetDtx(true);
+ // The default bitrate will not generate frames recognized as CN on desktop
+ // since the frames will be encoded as CELT. Set a low target bitrate to get
+ // consistent behaviour across platforms.
+ (*encoder_ptr)->OnReceivedTargetAudioBitrate(24000);
+ });
+
+ expects[static_cast<int>(AudioFrameType::kEmptyFrame)] = 1;
+ expects[static_cast<int>(AudioFrameType::kAudioFrameCN)] = 1;
+ Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"), 32000,
+ 2, out_filename, true, expects);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.h b/third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.h
new file mode 100644
index 0000000000..d81ae28beb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TestVADDTX.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
+#define MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "common_audio/vad/include/vad.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/audio_coding/test/Channel.h"
+
+namespace webrtc {
+
+// This class records the frame type, and delegates actual sending to the
+// `next_` AudioPacketizationCallback.
+class MonitoringAudioPacketizationCallback : public AudioPacketizationCallback {
+ public:
+ explicit MonitoringAudioPacketizationCallback(
+ AudioPacketizationCallback* next);
+
+ int32_t SendData(AudioFrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_len_bytes,
+ int64_t absolute_capture_timestamp_ms) override;
+
+ void PrintStatistics();
+ void ResetStatistics();
+ void GetStatistics(uint32_t* stats);
+
+ private:
+ // 0 - kEmptyFrame
+ // 1 - kAudioFrameSpeech
+ // 2 - kAudioFrameCN
+ uint32_t counter_[3];
+ AudioPacketizationCallback* const next_;
+};
+
+// TestVadDtx is to verify that VAD/DTX perform as they should. It runs through
+// an audio file and check if the occurrence of various packet types follows
+// expectation. TestVadDtx needs its derived class to implement the Perform()
+// to put the test together.
+class TestVadDtx {
+ public:
+ static const int kOutputFreqHz = 16000;
+
+ TestVadDtx();
+
+ protected:
+ // Returns true iff CN was added.
+ bool RegisterCodec(const SdpAudioFormat& codec_format,
+ absl::optional<Vad::Aggressiveness> vad_mode);
+
+ // Encoding a file and see if the numbers that various packets occur follow
+ // the expectation. Saves result to a file.
+ // expects[x] means
+ // -1 : do not care,
+ // 0 : there have been no packets of type `x`,
+ // 1 : there have been packets of type `x`,
+ // with `x` indicates the following packet types
+ // 0 - kEmptyFrame
+ // 1 - kAudioFrameSpeech
+ // 2 - kAudioFrameCN
+ void Run(absl::string_view in_filename,
+ int frequency,
+ int channels,
+ absl::string_view out_filename,
+ bool append,
+ const int* expects);
+
+ const rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+ const rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ std::unique_ptr<AudioCodingModule> acm_send_;
+ std::unique_ptr<AudioCodingModule> acm_receive_;
+ std::unique_ptr<Channel> channel_;
+ std::unique_ptr<MonitoringAudioPacketizationCallback> packetization_callback_;
+ uint32_t time_stamp_ = 0x12345678;
+};
+
+// TestWebRtcVadDtx is to verify that the WebRTC VAD/DTX perform as they should.
+class TestWebRtcVadDtx final : public TestVadDtx {
+ public:
+ TestWebRtcVadDtx();
+
+ void Perform();
+
+ private:
+ void RunTestCases(const SdpAudioFormat& codec_format);
+ void Test(bool new_outfile, bool expect_dtx_enabled);
+
+ int output_file_num_;
+};
+
+// TestOpusDtx is to verify that the Opus DTX performs as it should.
+class TestOpusDtx final : public TestVadDtx {
+ public:
+ void Perform();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/Tester.cc b/third_party/libwebrtc/modules/audio_coding/test/Tester.cc
new file mode 100644
index 0000000000..113dbe059e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/Tester.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <string>
+#include <vector>
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/test/EncodeDecodeTest.h"
+#include "modules/audio_coding/test/PacketLossTest.h"
+#include "modules/audio_coding/test/TestAllCodecs.h"
+#include "modules/audio_coding/test/TestRedFec.h"
+#include "modules/audio_coding/test/TestStereo.h"
+#include "modules/audio_coding/test/TestVADDTX.h"
+#include "modules/audio_coding/test/TwoWayCommunication.h"
+#include "modules/audio_coding/test/iSACTest.h"
+#include "modules/audio_coding/test/opus_test.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+TEST(AudioCodingModuleTest, TestAllCodecs) {
+ webrtc::TestAllCodecs().Perform();
+}
+
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestEncodeDecode) {
+#else
+TEST(AudioCodingModuleTest, TestEncodeDecode) {
+#endif
+ webrtc::EncodeDecodeTest().Perform();
+}
+
+TEST(AudioCodingModuleTest, TestRedFec) {
+ webrtc::TestRedFec().Perform();
+}
+
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestIsac) {
+#else
+TEST(AudioCodingModuleTest, TestIsac) {
+#endif
+ webrtc::ISACTest().Perform();
+}
+
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
+ defined(WEBRTC_CODEC_ILBC)
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TwoWayCommunication) {
+#else
+TEST(AudioCodingModuleTest, TwoWayCommunication) {
+#endif
+ webrtc::TwoWayCommunication().Perform();
+}
+#endif
+
+// Disabled on ios as flaky, see https://crbug.com/webrtc/7057
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+TEST(AudioCodingModuleTest, DISABLED_TestStereo) {
+#else
+TEST(AudioCodingModuleTest, TestStereo) {
+#endif
+ webrtc::TestStereo().Perform();
+}
+
+TEST(AudioCodingModuleTest, TestWebRtcVadDtx) {
+ webrtc::TestWebRtcVadDtx().Perform();
+}
+
+TEST(AudioCodingModuleTest, TestOpusDtx) {
+ webrtc::TestOpusDtx().Perform();
+}
+
+// Disabled on ios as flaky, see https://crbug.com/webrtc/7057
+#if defined(WEBRTC_IOS)
+TEST(AudioCodingModuleTest, DISABLED_TestOpus) {
+#else
+TEST(AudioCodingModuleTest, TestOpus) {
+#endif
+ webrtc::OpusTest().Perform();
+}
+
+TEST(AudioCodingModuleTest, TestPacketLoss) {
+ webrtc::PacketLossTest(1, 10, 10, 1).Perform();
+}
+
+TEST(AudioCodingModuleTest, TestPacketLossBurst) {
+ webrtc::PacketLossTest(1, 10, 10, 2).Perform();
+}
+
+// Disabled on ios as flake, see https://crbug.com/webrtc/7057
+#if defined(WEBRTC_IOS)
+TEST(AudioCodingModuleTest, DISABLED_TestPacketLossStereo) {
+#else
+TEST(AudioCodingModuleTest, TestPacketLossStereo) {
+#endif
+ webrtc::PacketLossTest(2, 10, 10, 1).Perform();
+}
+
+// Disabled on ios as flake, see https://crbug.com/webrtc/7057
+#if defined(WEBRTC_IOS)
+TEST(AudioCodingModuleTest, DISABLED_TestPacketLossStereoBurst) {
+#else
+TEST(AudioCodingModuleTest, TestPacketLossStereoBurst) {
+#endif
+ webrtc::PacketLossTest(2, 10, 10, 2).Perform();
+}
+
+// The full API test is too long to run automatically on bots, but can be used
+// for offline testing. User interaction is needed.
+#ifdef ACM_TEST_FULL_API
+TEST(AudioCodingModuleTest, TestAPI) {
+ webrtc::APITest().Perform();
+}
+#endif
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.cc b/third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.cc
new file mode 100644
index 0000000000..b42415a21a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TwoWayCommunication.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "modules/audio_coding/test/PCMFile.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+TwoWayCommunication::TwoWayCommunication()
+ : _acmA(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ _acmRefA(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))) {
+ AudioCodingModule::Config config;
+ // The clicks will be more obvious if time-stretching is not allowed.
+ // TODO(henrik.lundin) Really?
+ config.neteq_config.for_test_no_time_stretching = true;
+ config.decoder_factory = CreateBuiltinAudioDecoderFactory();
+ _acmB.reset(AudioCodingModule::Create(config));
+ _acmRefB.reset(AudioCodingModule::Create(config));
+}
+
+TwoWayCommunication::~TwoWayCommunication() {
+ delete _channel_A2B;
+ delete _channel_B2A;
+ delete _channelRef_A2B;
+ delete _channelRef_B2A;
+ _inFileA.Close();
+ _inFileB.Close();
+ _outFileA.Close();
+ _outFileB.Close();
+ _outFileRefA.Close();
+ _outFileRefB.Close();
+}
+
+void TwoWayCommunication::SetUpAutotest(
+ AudioEncoderFactory* const encoder_factory,
+ const SdpAudioFormat& format1,
+ const int payload_type1,
+ const SdpAudioFormat& format2,
+ const int payload_type2) {
+ //--- Set A codecs
+ _acmA->SetEncoder(
+ encoder_factory->MakeAudioEncoder(payload_type1, format1, absl::nullopt));
+ _acmA->SetReceiveCodecs({{payload_type2, format2}});
+
+ //--- Set ref-A codecs
+ _acmRefA->SetEncoder(
+ encoder_factory->MakeAudioEncoder(payload_type1, format1, absl::nullopt));
+ _acmRefA->SetReceiveCodecs({{payload_type2, format2}});
+
+ //--- Set B codecs
+ _acmB->SetEncoder(
+ encoder_factory->MakeAudioEncoder(payload_type2, format2, absl::nullopt));
+ _acmB->SetReceiveCodecs({{payload_type1, format1}});
+
+ //--- Set ref-B codecs
+ _acmRefB->SetEncoder(
+ encoder_factory->MakeAudioEncoder(payload_type2, format2, absl::nullopt));
+ _acmRefB->SetReceiveCodecs({{payload_type1, format1}});
+
+ uint16_t frequencyHz;
+
+ //--- Input A and B
+ std::string in_file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ frequencyHz = 16000;
+ _inFileA.Open(in_file_name, frequencyHz, "rb");
+ _inFileB.Open(in_file_name, frequencyHz, "rb");
+
+ //--- Output A
+ std::string output_file_a = webrtc::test::OutputPath() + "outAutotestA.pcm";
+ frequencyHz = 16000;
+ _outFileA.Open(output_file_a, frequencyHz, "wb");
+ std::string output_ref_file_a =
+ webrtc::test::OutputPath() + "ref_outAutotestA.pcm";
+ _outFileRefA.Open(output_ref_file_a, frequencyHz, "wb");
+
+ //--- Output B
+ std::string output_file_b = webrtc::test::OutputPath() + "outAutotestB.pcm";
+ frequencyHz = 16000;
+ _outFileB.Open(output_file_b, frequencyHz, "wb");
+ std::string output_ref_file_b =
+ webrtc::test::OutputPath() + "ref_outAutotestB.pcm";
+ _outFileRefB.Open(output_ref_file_b, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel;
+ _acmA->RegisterTransportCallback(_channel_A2B);
+ _channel_A2B->RegisterReceiverACM(_acmB.get());
+ //--- Do the same for the reference
+ _channelRef_A2B = new Channel;
+ _acmRefA->RegisterTransportCallback(_channelRef_A2B);
+ _channelRef_A2B->RegisterReceiverACM(_acmRefB.get());
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel;
+ _acmB->RegisterTransportCallback(_channel_B2A);
+ _channel_B2A->RegisterReceiverACM(_acmA.get());
+ //--- Do the same for reference
+ _channelRef_B2A = new Channel;
+ _acmRefB->RegisterTransportCallback(_channelRef_B2A);
+ _channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
+}
+
+void TwoWayCommunication::Perform() {
+ const SdpAudioFormat format1("ISAC", 16000, 1);
+ const SdpAudioFormat format2("L16", 8000, 1);
+ constexpr int payload_type1 = 17, payload_type2 = 18;
+
+ auto encoder_factory = CreateBuiltinAudioEncoderFactory();
+
+ SetUpAutotest(encoder_factory.get(), format1, payload_type1, format2,
+ payload_type2);
+
+ unsigned int msecPassed = 0;
+ unsigned int secPassed = 0;
+
+ int32_t outFreqHzA = _outFileA.SamplingFrequency();
+ int32_t outFreqHzB = _outFileB.SamplingFrequency();
+
+ AudioFrame audioFrame;
+
+ // In the following loop we tests that the code can handle misuse of the APIs.
+ // In the middle of a session with data flowing between two sides, called A
+ // and B, APIs will be called, and the code should continue to run, and be
+ // able to recover.
+ while (!_inFileA.EndOfFile() && !_inFileB.EndOfFile()) {
+ msecPassed += 10;
+ EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
+ EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
+ EXPECT_GE(_acmRefA->Add10MsData(audioFrame), 0);
+
+ EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0);
+
+ EXPECT_GE(_acmB->Add10MsData(audioFrame), 0);
+ EXPECT_GE(_acmRefB->Add10MsData(audioFrame), 0);
+ bool muted;
+ EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileA.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmRefA->PlayoutData10Ms(outFreqHzA, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileRefA.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileB.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmRefB->PlayoutData10Ms(outFreqHzB, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileRefB.Write10MsData(audioFrame);
+
+ // Update time counters each time a second of data has passed.
+ if (msecPassed >= 1000) {
+ msecPassed = 0;
+ secPassed++;
+ }
+ // Re-register send codec on side B.
+ if (((secPassed % 5) == 4) && (msecPassed >= 990)) {
+ _acmB->SetEncoder(encoder_factory->MakeAudioEncoder(
+ payload_type2, format2, absl::nullopt));
+ }
+ // Initialize receiver on side A.
+ if (((secPassed % 7) == 6) && (msecPassed == 0))
+ EXPECT_EQ(0, _acmA->InitializeReceiver());
+ // Re-register codec on side A.
+ if (((secPassed % 7) == 6) && (msecPassed >= 990)) {
+ _acmA->SetReceiveCodecs({{payload_type2, format2}});
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.h b/third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.h
new file mode 100644
index 0000000000..b7eb9e5583
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/TwoWayCommunication.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
+#define MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
+
+#include <memory>
+
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/audio_codecs/audio_format.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/test/Channel.h"
+#include "modules/audio_coding/test/PCMFile.h"
+
+namespace webrtc {
+
+class TwoWayCommunication {
+ public:
+ TwoWayCommunication();
+ ~TwoWayCommunication();
+
+ void Perform();
+
+ private:
+ void SetUpAutotest(AudioEncoderFactory* const encoder_factory,
+ const SdpAudioFormat& format1,
+ int payload_type1,
+ const SdpAudioFormat& format2,
+ int payload_type2);
+
+ std::unique_ptr<AudioCodingModule> _acmA;
+ std::unique_ptr<AudioCodingModule> _acmB;
+
+ std::unique_ptr<AudioCodingModule> _acmRefA;
+ std::unique_ptr<AudioCodingModule> _acmRefB;
+
+ Channel* _channel_A2B;
+ Channel* _channel_B2A;
+
+ Channel* _channelRef_A2B;
+ Channel* _channelRef_B2A;
+
+ PCMFile _inFileA;
+ PCMFile _inFileB;
+
+ PCMFile _outFileA;
+ PCMFile _outFileB;
+
+ PCMFile _outFileRefA;
+ PCMFile _outFileRefB;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/iSACTest.cc b/third_party/libwebrtc/modules/audio_coding/test/iSACTest.cc
new file mode 100644
index 0000000000..246c485afe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/iSACTest.cc
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/iSACTest.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "absl/strings/match.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+using ::testing::AnyOf;
+using ::testing::Eq;
+using ::testing::StrCaseEq;
+
+namespace {
+
+constexpr int kISAC16kPayloadType = 103;
+constexpr int kISAC32kPayloadType = 104;
+const SdpAudioFormat kISAC16kFormat = {"ISAC", 16000, 1};
+const SdpAudioFormat kISAC32kFormat = {"ISAC", 32000, 1};
+
+AudioEncoderIsacFloat::Config TweakConfig(
+ AudioEncoderIsacFloat::Config config,
+ const ACMTestISACConfig& test_config) {
+ if (test_config.currentRateBitPerSec > 0) {
+ config.bit_rate = test_config.currentRateBitPerSec;
+ }
+ if (test_config.currentFrameSizeMsec != 0) {
+ config.frame_size_ms = test_config.currentFrameSizeMsec;
+ }
+ EXPECT_THAT(config.IsOk(), Eq(true));
+ return config;
+}
+
+void SetISACConfigDefault(ACMTestISACConfig& isacConfig) {
+ isacConfig.currentRateBitPerSec = 0;
+ isacConfig.currentFrameSizeMsec = 0;
+ isacConfig.encodingMode = -1;
+ isacConfig.initRateBitPerSec = 0;
+ isacConfig.initFrameSizeInMsec = 0;
+ isacConfig.enforceFrameSize = false;
+}
+
+} // namespace
+
+ISACTest::ISACTest()
+ : _acmA(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ _acmB(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))) {}
+
+ISACTest::~ISACTest() {}
+
+void ISACTest::Setup() {
+ // Register both iSAC-wb & iSAC-swb in both sides as receiver codecs.
+ std::map<int, SdpAudioFormat> receive_codecs = {
+ {kISAC16kPayloadType, kISAC16kFormat},
+ {kISAC32kPayloadType, kISAC32kFormat}};
+ _acmA->SetReceiveCodecs(receive_codecs);
+ _acmB->SetReceiveCodecs(receive_codecs);
+
+ //--- Set A-to-B channel
+ _channel_A2B.reset(new Channel);
+ EXPECT_EQ(0, _acmA->RegisterTransportCallback(_channel_A2B.get()));
+ _channel_A2B->RegisterReceiverACM(_acmB.get());
+
+ //--- Set B-to-A channel
+ _channel_B2A.reset(new Channel);
+ EXPECT_EQ(0, _acmB->RegisterTransportCallback(_channel_B2A.get()));
+ _channel_B2A->RegisterReceiverACM(_acmA.get());
+
+ file_name_swb_ =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+
+ _acmB->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC16kFormat),
+ kISAC16kPayloadType));
+ _acmA->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC32kFormat),
+ kISAC32kPayloadType));
+
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _inFileA.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ _inFileA.FastForward(100);
+ std::string fileNameA = webrtc::test::OutputPath() + "testisac_a.pcm";
+ std::string fileNameB = webrtc::test::OutputPath() + "testisac_b.pcm";
+ _outFileA.Open(fileNameA, 32000, "wb");
+ _outFileB.Open(fileNameB, 32000, "wb");
+
+ while (!_inFileA.EndOfFile()) {
+ Run10ms();
+ }
+
+ _inFileA.Close();
+ _outFileA.Close();
+ _outFileB.Close();
+}
+
+void ISACTest::Perform() {
+ Setup();
+
+ int16_t testNr = 0;
+ ACMTestISACConfig wbISACConfig;
+ ACMTestISACConfig swbISACConfig;
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = -1;
+ swbISACConfig.currentRateBitPerSec = -1;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ testNr++;
+ SwitchingSamplingRate(testNr, 4);
+}
+
+void ISACTest::Run10ms() {
+ AudioFrame audioFrame;
+ EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
+ EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
+ EXPECT_GE(_acmB->Add10MsData(audioFrame), 0);
+ bool muted;
+ EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileA.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame, &muted));
+ ASSERT_FALSE(muted);
+ _outFileB.Write10MsData(audioFrame);
+}
+
+void ISACTest::EncodeDecode(int testNr,
+ ACMTestISACConfig& wbISACConfig,
+ ACMTestISACConfig& swbISACConfig) {
+ // Files in Side A and B
+ _inFileA.Open(file_name_swb_, 32000, "rb", true);
+ _inFileB.Open(file_name_swb_, 32000, "rb", true);
+
+ std::string file_name_out;
+ rtc::StringBuilder file_stream_a;
+ rtc::StringBuilder file_stream_b;
+ file_stream_a << webrtc::test::OutputPath();
+ file_stream_b << webrtc::test::OutputPath();
+ file_stream_a << "out_iSACTest_A_" << testNr << ".pcm";
+ file_stream_b << "out_iSACTest_B_" << testNr << ".pcm";
+ file_name_out = file_stream_a.str();
+ _outFileA.Open(file_name_out, 32000, "wb");
+ file_name_out = file_stream_b.str();
+ _outFileB.Open(file_name_out, 32000, "wb");
+
+ // Side A is sending super-wideband, and side B is sending wideband.
+ _acmA->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ TweakConfig(*AudioEncoderIsacFloat::SdpToConfig(kISAC32kFormat),
+ swbISACConfig),
+ kISAC32kPayloadType));
+ _acmB->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ TweakConfig(*AudioEncoderIsacFloat::SdpToConfig(kISAC16kFormat),
+ wbISACConfig),
+ kISAC16kPayloadType));
+
+ _channel_A2B->ResetStats();
+ _channel_B2A->ResetStats();
+
+ while (!(_inFileA.EndOfFile() || _inFileA.Rewinded())) {
+ Run10ms();
+ }
+
+ _channel_A2B->ResetStats();
+ _channel_B2A->ResetStats();
+
+ _outFileA.Close();
+ _outFileB.Close();
+ _inFileA.Close();
+ _inFileB.Close();
+}
+
+void ISACTest::SwitchingSamplingRate(int testNr, int maxSampRateChange) {
+ // Files in Side A
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ _inFileB.Open(file_name_swb_, 32000, "rb");
+
+ std::string file_name_out;
+ rtc::StringBuilder file_stream_a;
+ rtc::StringBuilder file_stream_b;
+ file_stream_a << webrtc::test::OutputPath();
+ file_stream_b << webrtc::test::OutputPath();
+ file_stream_a << "out_iSACTest_A_" << testNr << ".pcm";
+ file_stream_b << "out_iSACTest_B_" << testNr << ".pcm";
+ file_name_out = file_stream_a.str();
+ _outFileA.Open(file_name_out, 32000, "wb");
+ file_name_out = file_stream_b.str();
+ _outFileB.Open(file_name_out, 32000, "wb");
+
+ // Start with side A sending super-wideband and side B seding wideband.
+ // Toggle sending wideband/super-wideband in this test.
+ _acmA->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC32kFormat),
+ kISAC32kPayloadType));
+ _acmB->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC16kFormat),
+ kISAC16kPayloadType));
+
+ int numSendCodecChanged = 0;
+ while (numSendCodecChanged < (maxSampRateChange << 1)) {
+ Run10ms();
+ if (_inFileA.EndOfFile()) {
+ if (_inFileA.SamplingFrequency() == 16000) {
+ // Switch side A to send super-wideband.
+ _inFileA.Close();
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ _acmA->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC32kFormat),
+ kISAC32kPayloadType));
+ } else {
+ // Switch side A to send wideband.
+ _inFileA.Close();
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ _acmA->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC16kFormat),
+ kISAC16kPayloadType));
+ }
+ numSendCodecChanged++;
+ }
+
+ if (_inFileB.EndOfFile()) {
+ if (_inFileB.SamplingFrequency() == 16000) {
+ // Switch side B to send super-wideband.
+ _inFileB.Close();
+ _inFileB.Open(file_name_swb_, 32000, "rb");
+ _acmB->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC32kFormat),
+ kISAC32kPayloadType));
+ } else {
+ // Switch side B to send wideband.
+ _inFileB.Close();
+ _inFileB.Open(file_name_swb_, 32000, "rb");
+ _acmB->SetEncoder(AudioEncoderIsacFloat::MakeAudioEncoder(
+ *AudioEncoderIsacFloat::SdpToConfig(kISAC16kFormat),
+ kISAC16kPayloadType));
+ }
+ numSendCodecChanged++;
+ }
+ }
+ _outFileA.Close();
+ _outFileB.Close();
+ _inFileA.Close();
+ _inFileB.Close();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/iSACTest.h b/third_party/libwebrtc/modules/audio_coding/test/iSACTest.h
new file mode 100644
index 0000000000..f6efeeac1c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/iSACTest.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_ISACTEST_H_
+#define MODULES_AUDIO_CODING_TEST_ISACTEST_H_
+
+#include <string.h>
+
+#include <memory>
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/test/Channel.h"
+#include "modules/audio_coding/test/PCMFile.h"
+
+namespace webrtc {
+
+struct ACMTestISACConfig {
+ int32_t currentRateBitPerSec;
+ int16_t currentFrameSizeMsec;
+ int16_t encodingMode;
+ uint32_t initRateBitPerSec;
+ int16_t initFrameSizeInMsec;
+ bool enforceFrameSize;
+};
+
+class ISACTest {
+ public:
+ ISACTest();
+ ~ISACTest();
+
+ void Perform();
+
+ private:
+ void Setup();
+
+ void Run10ms();
+
+ void EncodeDecode(int testNr,
+ ACMTestISACConfig& wbISACConfig,
+ ACMTestISACConfig& swbISACConfig);
+
+ void SwitchingSamplingRate(int testNr, int maxSampRateChange);
+
+ std::unique_ptr<AudioCodingModule> _acmA;
+ std::unique_ptr<AudioCodingModule> _acmB;
+
+ std::unique_ptr<Channel> _channel_A2B;
+ std::unique_ptr<Channel> _channel_B2A;
+
+ PCMFile _inFileA;
+ PCMFile _inFileB;
+
+ PCMFile _outFileA;
+ PCMFile _outFileB;
+
+ std::string file_name_swb_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_ISACTEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/opus_test.cc b/third_party/libwebrtc/modules/audio_coding/test/opus_test.cc
new file mode 100644
index 0000000000..6822bc3d72
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/opus_test.cc
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/test/opus_test.h"
+
+#include <string>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/audio_coding/test/TestStereo.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+OpusTest::OpusTest()
+ : acm_receiver_(AudioCodingModule::Create(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory()))),
+ channel_a2b_(NULL),
+ counter_(0),
+ payload_type_(255),
+ rtp_timestamp_(0) {}
+
+OpusTest::~OpusTest() {
+ if (channel_a2b_ != NULL) {
+ delete channel_a2b_;
+ channel_a2b_ = NULL;
+ }
+ if (opus_mono_encoder_ != NULL) {
+ WebRtcOpus_EncoderFree(opus_mono_encoder_);
+ opus_mono_encoder_ = NULL;
+ }
+ if (opus_stereo_encoder_ != NULL) {
+ WebRtcOpus_EncoderFree(opus_stereo_encoder_);
+ opus_stereo_encoder_ = NULL;
+ }
+ if (opus_mono_decoder_ != NULL) {
+ WebRtcOpus_DecoderFree(opus_mono_decoder_);
+ opus_mono_decoder_ = NULL;
+ }
+ if (opus_stereo_decoder_ != NULL) {
+ WebRtcOpus_DecoderFree(opus_stereo_decoder_);
+ opus_stereo_decoder_ = NULL;
+ }
+}
+
+void OpusTest::Perform() {
+#ifndef WEBRTC_CODEC_OPUS
+ // Opus isn't defined, exit.
+ return;
+#else
+ uint16_t frequency_hz;
+ size_t audio_channels;
+ int16_t test_cntr = 0;
+
+ // Open both mono and stereo test files in 32 kHz.
+ const std::string file_name_stereo =
+ webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm");
+ const std::string file_name_mono =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ frequency_hz = 32000;
+ in_file_stereo_.Open(file_name_stereo, frequency_hz, "rb");
+ in_file_stereo_.ReadStereo(true);
+ in_file_mono_.Open(file_name_mono, frequency_hz, "rb");
+ in_file_mono_.ReadStereo(false);
+
+ // Create Opus encoders for mono and stereo.
+ ASSERT_GT(WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1, 0, 48000), -1);
+ ASSERT_GT(WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2, 1, 48000), -1);
+
+ // Create Opus decoders for mono and stereo for stand-alone testing of Opus.
+ ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1, 48000), -1);
+ ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2, 48000), -1);
+ WebRtcOpus_DecoderInit(opus_mono_decoder_);
+ WebRtcOpus_DecoderInit(opus_stereo_decoder_);
+
+ ASSERT_TRUE(acm_receiver_.get() != NULL);
+ EXPECT_EQ(0, acm_receiver_->InitializeReceiver());
+
+ // Register Opus stereo as receiving codec.
+ constexpr int kOpusPayloadType = 120;
+ const SdpAudioFormat kOpusFormatStereo("opus", 48000, 2, {{"stereo", "1"}});
+ payload_type_ = kOpusPayloadType;
+ acm_receiver_->SetReceiveCodecs({{kOpusPayloadType, kOpusFormatStereo}});
+
+ // Create and connect the channel.
+ channel_a2b_ = new TestPackStereo;
+ channel_a2b_->RegisterReceiverACM(acm_receiver_.get());
+
+ //
+ // Test Stereo.
+ //
+
+ channel_a2b_->set_codec_mode(kStereo);
+ audio_channels = 2;
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Run Opus with 2.5 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 120);
+
+ // Run Opus with 5 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 240);
+
+ // Run Opus with 10 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 480);
+
+ // Run Opus with 20 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 960);
+
+ // Run Opus with 40 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 1920);
+
+ // Run Opus with 60 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 2880);
+
+ out_file_.Close();
+ out_file_standalone_.Close();
+
+ //
+ // Test Opus stereo with packet-losses.
+ //
+
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Run Opus with 20 ms frame size, 1% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 1);
+
+ // Run Opus with 20 ms frame size, 5% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 5);
+
+ // Run Opus with 20 ms frame size, 10% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 10);
+
+ out_file_.Close();
+ out_file_standalone_.Close();
+
+ //
+ // Test Mono.
+ //
+ channel_a2b_->set_codec_mode(kMono);
+ audio_channels = 1;
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Register Opus mono as receiving codec.
+ const SdpAudioFormat kOpusFormatMono("opus", 48000, 2);
+ acm_receiver_->SetReceiveCodecs({{kOpusPayloadType, kOpusFormatMono}});
+
+ // Run Opus with 2.5 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 120);
+
+ // Run Opus with 5 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 240);
+
+ // Run Opus with 10 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 480);
+
+ // Run Opus with 20 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 960);
+
+ // Run Opus with 40 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 1920);
+
+ // Run Opus with 60 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 2880);
+
+ out_file_.Close();
+ out_file_standalone_.Close();
+
+ //
+ // Test Opus mono with packet-losses.
+ //
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Run Opus with 20 ms frame size, 1% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 1);
+
+ // Run Opus with 20 ms frame size, 5% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 5);
+
+ // Run Opus with 20 ms frame size, 10% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 10);
+
+ // Close the files.
+ in_file_stereo_.Close();
+ in_file_mono_.Close();
+ out_file_.Close();
+ out_file_standalone_.Close();
+#endif
+}
+
+void OpusTest::Run(TestPackStereo* channel,
+ size_t channels,
+ int bitrate,
+ size_t frame_length,
+ int percent_loss) {
+ AudioFrame audio_frame;
+ int32_t out_freq_hz_b = out_file_.SamplingFrequency();
+ const size_t kBufferSizeSamples = 480 * 12 * 2; // 120 ms stereo audio.
+ int16_t audio[kBufferSizeSamples];
+ int16_t out_audio[kBufferSizeSamples];
+ int16_t audio_type;
+ size_t written_samples = 0;
+ size_t read_samples = 0;
+ size_t decoded_samples = 0;
+ bool first_packet = true;
+ uint32_t start_time_stamp = 0;
+
+ channel->reset_payload_size();
+ counter_ = 0;
+
+ // Set encoder rate.
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, bitrate));
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, bitrate));
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM)
+ // If we are on Android, iOS and/or ARM, use a lower complexity setting as
+ // default.
+ const int kOpusComplexity5 = 5;
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_mono_encoder_, kOpusComplexity5));
+ EXPECT_EQ(0,
+ WebRtcOpus_SetComplexity(opus_stereo_encoder_, kOpusComplexity5));
+#endif
+
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ in_file_stereo_.FastForward(100);
+ in_file_mono_.FastForward(100);
+
+ // Limit the runtime to 1000 blocks of 10 ms each.
+ for (size_t audio_length = 0; audio_length < 1000; audio_length += 10) {
+ bool lost_packet = false;
+
+ // Get 10 msec of audio.
+ if (channels == 1) {
+ if (in_file_mono_.EndOfFile()) {
+ break;
+ }
+ in_file_mono_.Read10MsData(audio_frame);
+ } else {
+ if (in_file_stereo_.EndOfFile()) {
+ break;
+ }
+ in_file_stereo_.Read10MsData(audio_frame);
+ }
+
+ // If input audio is sampled at 32 kHz, resampling to 48 kHz is required.
+ EXPECT_EQ(480, resampler_.Resample10Msec(
+ audio_frame.data(), audio_frame.sample_rate_hz_, 48000,
+ channels, kBufferSizeSamples - written_samples,
+ &audio[written_samples]));
+ written_samples += 480 * channels;
+
+ // Sometimes we need to loop over the audio vector to produce the right
+ // number of packets.
+ size_t loop_encode =
+ (written_samples - read_samples) / (channels * frame_length);
+
+ if (loop_encode > 0) {
+ const size_t kMaxBytes = 1000; // Maximum number of bytes for one packet.
+ size_t bitstream_len_byte;
+ uint8_t bitstream[kMaxBytes];
+ for (size_t i = 0; i < loop_encode; i++) {
+ int bitstream_len_byte_int = WebRtcOpus_Encode(
+ (channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
+ &audio[read_samples], frame_length, kMaxBytes, bitstream);
+ ASSERT_GE(bitstream_len_byte_int, 0);
+ bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
+
+ // Simulate packet loss by setting `packet_loss_` to "true" in
+ // `percent_loss` percent of the loops.
+ // TODO(tlegrand): Move handling of loss simulation to TestPackStereo.
+ if (percent_loss > 0) {
+ if (counter_ == floor((100 / percent_loss) + 0.5)) {
+ counter_ = 0;
+ lost_packet = true;
+ channel->set_lost_packet(true);
+ } else {
+ lost_packet = false;
+ channel->set_lost_packet(false);
+ }
+ counter_++;
+ }
+
+ // Run stand-alone Opus decoder, or decode PLC.
+ if (channels == 1) {
+ if (!lost_packet) {
+ decoded_samples += WebRtcOpus_Decode(
+ opus_mono_decoder_, bitstream, bitstream_len_byte,
+ &out_audio[decoded_samples * channels], &audio_type);
+ } else {
+ // Call decoder PLC.
+ constexpr int kPlcDurationMs = 10;
+ constexpr int kPlcSamples = 48 * kPlcDurationMs;
+ size_t total_plc_samples = 0;
+ while (total_plc_samples < frame_length) {
+ int ret = WebRtcOpus_Decode(
+ opus_mono_decoder_, NULL, 0,
+ &out_audio[decoded_samples * channels], &audio_type);
+ EXPECT_EQ(ret, kPlcSamples);
+ decoded_samples += ret;
+ total_plc_samples += ret;
+ }
+ EXPECT_EQ(total_plc_samples, frame_length);
+ }
+ } else {
+ if (!lost_packet) {
+ decoded_samples += WebRtcOpus_Decode(
+ opus_stereo_decoder_, bitstream, bitstream_len_byte,
+ &out_audio[decoded_samples * channels], &audio_type);
+ } else {
+ // Call decoder PLC.
+ constexpr int kPlcDurationMs = 10;
+ constexpr int kPlcSamples = 48 * kPlcDurationMs;
+ size_t total_plc_samples = 0;
+ while (total_plc_samples < frame_length) {
+ int ret = WebRtcOpus_Decode(
+ opus_stereo_decoder_, NULL, 0,
+ &out_audio[decoded_samples * channels], &audio_type);
+ EXPECT_EQ(ret, kPlcSamples);
+ decoded_samples += ret;
+ total_plc_samples += ret;
+ }
+ EXPECT_EQ(total_plc_samples, frame_length);
+ }
+ }
+
+ // Send data to the channel. "channel" will handle the loss simulation.
+ channel->SendData(AudioFrameType::kAudioFrameSpeech, payload_type_,
+ rtp_timestamp_, bitstream, bitstream_len_byte, 0);
+ if (first_packet) {
+ first_packet = false;
+ start_time_stamp = rtp_timestamp_;
+ }
+ rtp_timestamp_ += static_cast<uint32_t>(frame_length);
+ read_samples += frame_length * channels;
+ }
+ if (read_samples == written_samples) {
+ read_samples = 0;
+ written_samples = 0;
+ }
+ }
+
+ // Run received side of ACM.
+ bool muted;
+ ASSERT_EQ(
+ 0, acm_receiver_->PlayoutData10Ms(out_freq_hz_b, &audio_frame, &muted));
+ ASSERT_FALSE(muted);
+
+ // Write output speech to file.
+ out_file_.Write10MsData(
+ audio_frame.data(),
+ audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+
+ // Write stand-alone speech to file.
+ out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
+
+ if (audio_frame.timestamp_ > start_time_stamp) {
+ // Number of channels should be the same for both stand-alone and
+ // ACM-decoding.
+ EXPECT_EQ(audio_frame.num_channels_, channels);
+ }
+
+ decoded_samples = 0;
+ }
+
+ if (in_file_mono_.EndOfFile()) {
+ in_file_mono_.Rewind();
+ }
+ if (in_file_stereo_.EndOfFile()) {
+ in_file_stereo_.Rewind();
+ }
+ // Reset in case we ended with a lost packet.
+ channel->set_lost_packet(false);
+}
+
+void OpusTest::OpenOutFile(int test_number) {
+ std::string file_name;
+ std::stringstream file_stream;
+ file_stream << webrtc::test::OutputPath() << "opustest_out_" << test_number
+ << ".pcm";
+ file_name = file_stream.str();
+ out_file_.Open(file_name, 48000, "wb");
+ file_stream.str("");
+ file_name = file_stream.str();
+ file_stream << webrtc::test::OutputPath() << "opusstandalone_out_"
+ << test_number << ".pcm";
+ file_name = file_stream.str();
+ out_file_standalone_.Open(file_name, 48000, "wb");
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/test/opus_test.h b/third_party/libwebrtc/modules/audio_coding/test/opus_test.h
new file mode 100644
index 0000000000..c69f922adb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/opus_test.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
+#define MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
+
+#include <math.h>
+
+#include <memory>
+
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/test/PCMFile.h"
+#include "modules/audio_coding/test/TestStereo.h"
+
+namespace webrtc {
+
+class OpusTest {
+ public:
+ OpusTest();
+ ~OpusTest();
+
+ void Perform();
+
+ private:
+ void Run(TestPackStereo* channel,
+ size_t channels,
+ int bitrate,
+ size_t frame_length,
+ int percent_loss = 0);
+
+ void OpenOutFile(int test_number);
+
+ std::unique_ptr<AudioCodingModule> acm_receiver_;
+ TestPackStereo* channel_a2b_;
+ PCMFile in_file_stereo_;
+ PCMFile in_file_mono_;
+ PCMFile out_file_;
+ PCMFile out_file_standalone_;
+ int counter_;
+ uint8_t payload_type_;
+ uint32_t rtp_timestamp_;
+ acm2::ACMResampler resampler_;
+ WebRtcOpusEncInst* opus_mono_encoder_;
+ WebRtcOpusEncInst* opus_stereo_encoder_;
+ WebRtcOpusDecInst* opus_mono_decoder_;
+ WebRtcOpusDecInst* opus_stereo_decoder_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/test/target_delay_unittest.cc b/third_party/libwebrtc/modules/audio_coding/test/target_delay_unittest.cc
new file mode 100644
index 0000000000..5eccdcf8eb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/test/target_delay_unittest.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/audio/audio_frame.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/rtp_headers.h"
+#include "modules/audio_coding/acm2/acm_receiver.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+class TargetDelayTest : public ::testing::Test {
+ protected:
+ TargetDelayTest()
+ : receiver_(
+ AudioCodingModule::Config(CreateBuiltinAudioDecoderFactory())) {}
+
+ ~TargetDelayTest() {}
+
+ void SetUp() {
+ constexpr int pltype = 108;
+ std::map<int, SdpAudioFormat> receive_codecs = {
+ {pltype, {"L16", kSampleRateHz, 1}}};
+ receiver_.SetCodecs(receive_codecs);
+
+ rtp_header_.payloadType = pltype;
+ rtp_header_.timestamp = 0;
+ rtp_header_.ssrc = 0x12345678;
+ rtp_header_.markerBit = false;
+ rtp_header_.sequenceNumber = 0;
+
+ int16_t audio[kFrameSizeSamples];
+ const int kRange = 0x7FF; // 2047, easy for masking.
+ for (size_t n = 0; n < kFrameSizeSamples; ++n)
+ audio[n] = (rand() & kRange) - kRange / 2;
+ WebRtcPcm16b_Encode(audio, kFrameSizeSamples, payload_);
+ }
+
+ void OutOfRangeInput() {
+ EXPECT_EQ(-1, SetMinimumDelay(-1));
+ EXPECT_EQ(-1, SetMinimumDelay(10001));
+ }
+
+ void TargetDelayBufferMinMax() {
+ const int kTargetMinDelayMs = kNum10msPerFrame * 10;
+ ASSERT_EQ(0, SetMinimumDelay(kTargetMinDelayMs));
+ for (int m = 0; m < 30; ++m) // Run enough iterations to fill the buffer.
+ Run(true);
+ int clean_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(kTargetMinDelayMs, clean_optimal_delay);
+
+ const int kTargetMaxDelayMs = 2 * (kNum10msPerFrame * 10);
+ ASSERT_EQ(0, SetMaximumDelay(kTargetMaxDelayMs));
+ for (int n = 0; n < 30; ++n) // Run enough iterations to fill the buffer.
+ Run(false);
+
+ int capped_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(kTargetMaxDelayMs, capped_optimal_delay);
+ }
+
+ private:
+ static const int kSampleRateHz = 16000;
+ static const int kNum10msPerFrame = 2;
+ static const size_t kFrameSizeSamples = 320; // 20 ms @ 16 kHz.
+ // payload-len = frame-samples * 2 bytes/sample.
+ static const int kPayloadLenBytes = 320 * 2;
+ // Inter-arrival time in number of packets in a jittery channel. One is no
+ // jitter.
+ static const int kInterarrivalJitterPacket = 2;
+
+ void Push() {
+ rtp_header_.timestamp += kFrameSizeSamples;
+ rtp_header_.sequenceNumber++;
+ ASSERT_EQ(0, receiver_.InsertPacket(rtp_header_,
+ rtc::ArrayView<const uint8_t>(
+ payload_, kFrameSizeSamples * 2)));
+ }
+
+ // Pull audio equivalent to the amount of audio in one RTP packet.
+ void Pull() {
+ AudioFrame frame;
+ bool muted;
+ for (int k = 0; k < kNum10msPerFrame; ++k) { // Pull one frame.
+ ASSERT_EQ(0, receiver_.GetAudio(-1, &frame, &muted));
+ ASSERT_FALSE(muted);
+ // Had to use ASSERT_TRUE, ASSERT_EQ generated error.
+ ASSERT_TRUE(kSampleRateHz == frame.sample_rate_hz_);
+ ASSERT_EQ(1u, frame.num_channels_);
+ ASSERT_TRUE(kSampleRateHz / 100 == frame.samples_per_channel_);
+ }
+ }
+
+ void Run(bool clean) {
+ for (int n = 0; n < 10; ++n) {
+ for (int m = 0; m < 5; ++m) {
+ Push();
+ Pull();
+ }
+
+ if (!clean) {
+ for (int m = 0; m < 10; ++m) { // Long enough to trigger delay change.
+ Push();
+ for (int n = 0; n < kInterarrivalJitterPacket; ++n)
+ Pull();
+ }
+ }
+ }
+ }
+
+ int SetMinimumDelay(int delay_ms) {
+ return receiver_.SetMinimumDelay(delay_ms);
+ }
+
+ int SetMaximumDelay(int delay_ms) {
+ return receiver_.SetMaximumDelay(delay_ms);
+ }
+
+ int GetCurrentOptimalDelayMs() {
+ NetworkStatistics stats;
+ receiver_.GetNetworkStatistics(&stats);
+ return stats.preferredBufferSize;
+ }
+
+ acm2::AcmReceiver receiver_;
+ RTPHeader rtp_header_;
+ uint8_t payload_[kPayloadLenBytes];
+};
+
+// Flaky on iOS: webrtc:7057.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_OutOfRangeInput DISABLED_OutOfRangeInput
+#else
+#define MAYBE_OutOfRangeInput OutOfRangeInput
+#endif
+TEST_F(TargetDelayTest, MAYBE_OutOfRangeInput) {
+ OutOfRangeInput();
+}
+
+// Flaky on iOS: webrtc:7057.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_TargetDelayBufferMinMax DISABLED_TargetDelayBufferMinMax
+#else
+#define MAYBE_TargetDelayBufferMinMax TargetDelayBufferMinMax
+#endif
+TEST_F(TargetDelayTest, MAYBE_TargetDelayBufferMinMax) {
+ TargetDelayBufferMinMax();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/webrtc_cng_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/webrtc_cng_gn/moz.build
new file mode 100644
index 0000000000..8ff29da0a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/webrtc_cng_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/cng/webrtc_cng.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_cng_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/webrtc_multiopus_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/webrtc_multiopus_gn/moz.build
new file mode 100644
index 0000000000..ba2537bee5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/webrtc_multiopus_gn/moz.build
@@ -0,0 +1,231 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_CODEC_ILBC"] = True
+DEFINES["WEBRTC_CODEC_OPUS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_OPUS_SUPPORT_120MS_PTIME"] = "1"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libopus/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_CODEC_ISACFX"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "ppc64":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_multiopus_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/webrtc_opus_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/webrtc_opus_gn/moz.build
new file mode 100644
index 0000000000..214294646e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/webrtc_opus_gn/moz.build
@@ -0,0 +1,238 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_CODEC_ILBC"] = True
+DEFINES["WEBRTC_CODEC_OPUS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_OPUS_SUPPORT_120MS_PTIME"] = "1"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libopus/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc",
+ "/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_CODEC_ISACFX"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "ppc64":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_opus_gn")
diff --git a/third_party/libwebrtc/modules/audio_coding/webrtc_opus_wrapper_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/webrtc_opus_wrapper_gn/moz.build
new file mode 100644
index 0000000000..5a26300ac8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_coding/webrtc_opus_wrapper_gn/moz.build
@@ -0,0 +1,230 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_CODEC_ILBC"] = True
+DEFINES["WEBRTC_CODEC_OPUS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_OPUS_SUPPORT_120MS_PTIME"] = "1"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libopus/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_coding/codecs/opus/opus_interface.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_CODEC_ISACFX"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "ppc64":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_CODEC_ISAC"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_opus_wrapper_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/BUILD.gn b/third_party/libwebrtc/modules/audio_device/BUILD.gn
new file mode 100644
index 0000000000..6c9d223099
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/BUILD.gn
@@ -0,0 +1,502 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+if (is_android) {
+ import("//build/config/android/config.gni")
+ import("//build/config/android/rules.gni")
+}
+
+config("audio_device_warnings_config") {
+ if (is_win && is_clang) {
+ cflags = [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ "-Wno-microsoft-goto",
+ ]
+ }
+}
+
+rtc_source_set("audio_device_default") {
+ visibility = [ "*" ]
+ sources = [ "include/audio_device_default.h" ]
+ deps = [ ":audio_device_api" ]
+}
+
+rtc_source_set("audio_device") {
+ visibility = [ "*" ]
+ public_deps = [
+ ":audio_device_api",
+
+ # Deprecated.
+ # TODO(webrtc:7452): Remove this public dep. audio_device_impl should
+ # be depended on directly if needed.
+ ":audio_device_impl",
+ ]
+}
+
+rtc_source_set("audio_device_api") {
+ visibility = [ "*" ]
+ sources = [
+ "include/audio_device.h",
+ "include/audio_device_defines.h",
+ ]
+ deps = [
+ "../../api:scoped_refptr",
+ "../../api/task_queue",
+ "../../rtc_base:checks",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ ]
+}
+
+rtc_library("audio_device_buffer") {
+ sources = [
+ "audio_device_buffer.cc",
+ "audio_device_buffer.h",
+ "audio_device_config.h",
+ "fine_audio_buffer.cc",
+ "fine_audio_buffer.h",
+ ]
+ deps = [
+ ":audio_device_api",
+ "../../api:array_view",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_task_queue",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:timestamp_aligner",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ ]
+}
+
+rtc_library("audio_device_generic") {
+ sources = [
+ "audio_device_generic.cc",
+ "audio_device_generic.h",
+ ]
+ deps = [
+ ":audio_device_api",
+ ":audio_device_buffer",
+ "../../rtc_base:logging",
+ ]
+}
+
+rtc_library("audio_device_name") {
+ sources = [
+ "audio_device_name.cc",
+ "audio_device_name.h",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+rtc_source_set("windows_core_audio_utility") {
+ if (is_win && !build_with_chromium) {
+ sources = [
+ "win/core_audio_utility_win.cc",
+ "win/core_audio_utility_win.h",
+ ]
+
+ deps = [
+ ":audio_device_api",
+ ":audio_device_name",
+ "../../api/units:time_delta",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread_types",
+ "../../rtc_base:stringutils",
+ "../../rtc_base/win:windows_version",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings:strings" ]
+
+ libs = [ "oleaut32.lib" ]
+ }
+}
+
+# An ADM with a dedicated factory method which does not depend on the
+# audio_device_impl target. The goal is to use this new structure and
+# gradually phase out the old design.
+# TODO(henrika): currently only supported on Windows.
+rtc_source_set("audio_device_module_from_input_and_output") {
+ visibility = [ "*" ]
+ if (is_win && !build_with_chromium) {
+ sources = [
+ "include/audio_device_factory.cc",
+ "include/audio_device_factory.h",
+ ]
+ sources += [
+ "win/audio_device_module_win.cc",
+ "win/audio_device_module_win.h",
+ "win/core_audio_base_win.cc",
+ "win/core_audio_base_win.h",
+ "win/core_audio_input_win.cc",
+ "win/core_audio_input_win.h",
+ "win/core_audio_output_win.cc",
+ "win/core_audio_output_win.h",
+ ]
+
+ deps = [
+ ":audio_device_api",
+ ":audio_device_buffer",
+ ":windows_core_audio_utility",
+ "../../api:make_ref_counted",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/win:scoped_com_initializer",
+ "../../rtc_base/win:windows_version",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
+
+# Contains default implementations of webrtc::AudioDeviceModule for Windows,
+# Linux, Mac, iOS and Android.
+rtc_library("audio_device_impl") {
+ visibility = [ "*" ]
+ deps = [
+ ":audio_device_api",
+ ":audio_device_buffer",
+ ":audio_device_default",
+ ":audio_device_generic",
+ "../../api:array_view",
+ "../../api:make_ref_counted",
+ "../../api:refcountedbase",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:random",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_task_queue",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:file_wrapper",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "../utility",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ ]
+ if (rtc_include_internal_audio_device && is_ios) {
+ deps += [ "../../sdk:audio_device" ]
+ }
+
+ sources = [
+ "dummy/audio_device_dummy.cc",
+ "dummy/audio_device_dummy.h",
+ "dummy/file_audio_device.cc",
+ "dummy/file_audio_device.h",
+ "include/fake_audio_device.h",
+ "include/test_audio_device.cc",
+ "include/test_audio_device.h",
+ ]
+ if (build_with_mozilla) {
+ sources -= [
+ "include/test_audio_device.cc",
+ "include/test_audio_device.h",
+ ]
+ }
+
+ #if (build_with_mozilla) {
+ # sources += [
+ # "opensl/single_rw_fifo.cc",
+ # "opensl/single_rw_fifo.h",
+ # ]
+ #}
+
+ defines = []
+ cflags = []
+ if (rtc_audio_device_plays_sinus_tone) {
+ defines += [ "AUDIO_DEVICE_PLAYS_SINUS_TONE" ]
+ }
+ if (rtc_enable_android_aaudio) {
+ defines += [ "WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO" ]
+ }
+ if (rtc_include_internal_audio_device) {
+ sources += [
+ "audio_device_data_observer.cc",
+ "audio_device_impl.cc",
+ "audio_device_impl.h",
+ "include/audio_device_data_observer.h",
+ ]
+ if (is_android) {
+ sources += [
+ "android/audio_common.h",
+ "android/audio_device_template.h",
+ "android/audio_manager.cc",
+ "android/audio_manager.h",
+ "android/audio_record_jni.cc",
+ "android/audio_record_jni.h",
+ "android/audio_track_jni.cc",
+ "android/audio_track_jni.h",
+ "android/build_info.cc",
+ "android/build_info.h",
+ "android/opensles_common.cc",
+ "android/opensles_common.h",
+ "android/opensles_player.cc",
+ "android/opensles_player.h",
+ "android/opensles_recorder.cc",
+ "android/opensles_recorder.h",
+ ]
+ libs = [
+ "log",
+ "OpenSLES",
+ ]
+ if (rtc_enable_android_aaudio) {
+ sources += [
+ "android/aaudio_player.cc",
+ "android/aaudio_player.h",
+ "android/aaudio_recorder.cc",
+ "android/aaudio_recorder.h",
+ "android/aaudio_wrapper.cc",
+ "android/aaudio_wrapper.h",
+ ]
+ libs += [ "aaudio" ]
+ }
+
+ if (build_with_mozilla) {
+ include_dirs += [
+ "/config/external/nspr",
+ "/nsprpub/lib/ds",
+ "/nsprpub/pr/include",
+ ]
+ }
+ }
+ if (rtc_use_dummy_audio_file_devices) {
+ defines += [ "WEBRTC_DUMMY_FILE_DEVICES" ]
+ } else {
+ if (is_linux || is_chromeos) {
+ sources += [
+ "linux/alsasymboltable_linux.cc",
+ "linux/alsasymboltable_linux.h",
+ "linux/audio_device_alsa_linux.cc",
+ "linux/audio_device_alsa_linux.h",
+ "linux/audio_mixer_manager_alsa_linux.cc",
+ "linux/audio_mixer_manager_alsa_linux.h",
+ "linux/latebindingsymboltable_linux.cc",
+ "linux/latebindingsymboltable_linux.h",
+ ]
+ defines += [ "WEBRTC_ENABLE_LINUX_ALSA" ]
+ libs = [ "dl" ]
+ if (rtc_use_x11) {
+ libs += [ "X11" ]
+ defines += [ "WEBRTC_USE_X11" ]
+ }
+ if (rtc_include_pulse_audio) {
+ defines += [ "WEBRTC_ENABLE_LINUX_PULSE" ]
+ }
+ sources += [
+ "linux/audio_device_pulse_linux.cc",
+ "linux/audio_device_pulse_linux.h",
+ "linux/audio_mixer_manager_pulse_linux.cc",
+ "linux/audio_mixer_manager_pulse_linux.h",
+ "linux/pulseaudiosymboltable_linux.cc",
+ "linux/pulseaudiosymboltable_linux.h",
+ ]
+ }
+ if (is_mac) {
+ sources += [
+ "mac/audio_device_mac.cc",
+ "mac/audio_device_mac.h",
+ "mac/audio_mixer_manager_mac.cc",
+ "mac/audio_mixer_manager_mac.h",
+ ]
+ deps += [
+ ":audio_device_impl_frameworks",
+ "../third_party/portaudio:mac_portaudio",
+ ]
+ }
+ if (is_win) {
+ sources += [
+ "win/audio_device_core_win.cc",
+ "win/audio_device_core_win.h",
+ ]
+ libs = [
+ # Required for the built-in WASAPI AEC.
+ "dmoguids.lib",
+ "wmcodecdspuuid.lib",
+ "amstrmid.lib",
+ "msdmo.lib",
+ "oleaut32.lib",
+ ]
+ deps += [
+ "../../rtc_base:win32",
+ "../../rtc_base/win:scoped_com_initializer",
+ ]
+ }
+ configs += [ ":audio_device_warnings_config" ]
+ }
+ } else {
+ defines = [ "WEBRTC_DUMMY_AUDIO_BUILD" ]
+ }
+
+ if (!build_with_chromium) {
+ sources += [
+ # Do not link these into Chrome since they contain static data.
+ "dummy/file_audio_device_factory.cc",
+ "dummy/file_audio_device_factory.h",
+ ]
+ }
+}
+
+if (is_mac) {
+ rtc_source_set("audio_device_impl_frameworks") {
+ visibility = [ ":*" ]
+ frameworks = [
+ # Needed for CoreGraphics:
+ "ApplicationServices.framework",
+
+ "AudioToolbox.framework",
+ "CoreAudio.framework",
+
+ # Needed for CGEventSourceKeyState in audio_device_mac.cc:
+ "CoreGraphics.framework",
+ ]
+ }
+}
+
+if (rtc_include_tests) {
+rtc_source_set("mock_audio_device") {
+ visibility = [ "*" ]
+ testonly = true
+ sources = [
+ "include/mock_audio_device.h",
+ "include/mock_audio_transport.h",
+ "mock_audio_device_buffer.h",
+ ]
+ deps = [
+ ":audio_device",
+ ":audio_device_buffer",
+ ":audio_device_impl",
+ "../../api:make_ref_counted",
+ "../../test:test_support",
+ ]
+}
+}
+
+if (rtc_include_tests && !build_with_chromium) {
+ rtc_library("audio_device_unittests") {
+ testonly = true
+
+ sources = [
+ "fine_audio_buffer_unittest.cc",
+ "include/test_audio_device_unittest.cc",
+ ]
+ deps = [
+ ":audio_device",
+ ":audio_device_buffer",
+ ":audio_device_impl",
+ ":mock_audio_device",
+ "../../api:array_view",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/task_queue:default_task_queue_factory",
+ "../../common_audio",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:race_checker",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ if (is_linux || is_chromeos || is_mac || is_win) {
+ sources += [ "audio_device_unittest.cc" ]
+ }
+ if (is_win) {
+ sources += [ "win/core_audio_utility_win_unittest.cc" ]
+ deps += [
+ ":audio_device_module_from_input_and_output",
+ ":windows_core_audio_utility",
+ "../../rtc_base/win:scoped_com_initializer",
+ "../../rtc_base/win:windows_version",
+ ]
+ }
+ if (is_android) {
+ sources += [
+ "android/audio_device_unittest.cc",
+ "android/audio_manager_unittest.cc",
+ "android/ensure_initialized.cc",
+ "android/ensure_initialized.h",
+ ]
+ deps += [
+ "../../sdk/android:internal_jni",
+ "../../sdk/android:libjingle_peerconnection_java",
+ "../../sdk/android:native_api_jni",
+ "../../sdk/android:native_test_jni_onload",
+ "../utility",
+ ]
+ }
+ if (!rtc_include_internal_audio_device) {
+ defines = [ "WEBRTC_DUMMY_AUDIO_BUILD" ]
+ }
+ }
+}
+
+if ((!build_with_chromium && !build_with_mozilla) && is_android) {
+ rtc_android_library("audio_device_java") {
+ sources = [
+ "android/java/src/org/webrtc/voiceengine/BuildInfo.java",
+ "android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java",
+ "android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java",
+ "android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java",
+ "android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java",
+ "android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java",
+ ]
+ deps = [
+ "../../rtc_base:base_java",
+ "//third_party/androidx:androidx_annotation_annotation_java",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_device/DEPS b/third_party/libwebrtc/modules/audio_device/DEPS
new file mode 100644
index 0000000000..9cc627d330
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/DEPS
@@ -0,0 +1,13 @@
+include_rules = [
+ "+common_audio",
+ "+system_wrappers",
+]
+
+specific_include_rules = {
+ "ensure_initialized\.cc": [
+ "+sdk/android",
+ ],
+ "audio_device_impl\.cc": [
+ "+sdk/objc",
+ ],
+}
diff --git a/third_party/libwebrtc/modules/audio_device/OWNERS b/third_party/libwebrtc/modules/audio_device/OWNERS
new file mode 100644
index 0000000000..22d03d552b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/OWNERS
@@ -0,0 +1,2 @@
+henrika@webrtc.org
+tkchin@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_device/android/aaudio_player.cc b/third_party/libwebrtc/modules/audio_device/android/aaudio_player.cc
new file mode 100644
index 0000000000..5257b2ba1b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/aaudio_player.cc
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_player.h"
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+enum AudioDeviceMessageType : uint32_t {
+ kMessageOutputStreamDisconnected,
+};
+
+AAudioPlayer::AAudioPlayer(AudioManager* audio_manager)
+ : main_thread_(rtc::Thread::Current()),
+ aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) {
+ RTC_LOG(LS_INFO) << "ctor";
+ thread_checker_aaudio_.Detach();
+}
+
+AAudioPlayer::~AAudioPlayer() {
+ RTC_LOG(LS_INFO) << "dtor";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ Terminate();
+ RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_;
+}
+
+int AAudioPlayer::Init() {
+ RTC_LOG(LS_INFO) << "Init";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ if (aaudio_.audio_parameters().channels() == 2) {
+ RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
+ }
+ return 0;
+}
+
+int AAudioPlayer::Terminate() {
+ RTC_LOG(LS_INFO) << "Terminate";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ StopPlayout();
+ return 0;
+}
+
+int AAudioPlayer::InitPlayout() {
+ RTC_LOG(LS_INFO) << "InitPlayout";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
+ if (!aaudio_.Init()) {
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+bool AAudioPlayer::PlayoutIsInitialized() const {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ return initialized_;
+}
+
+int AAudioPlayer::StartPlayout() {
+ RTC_LOG(LS_INFO) << "StartPlayout";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DCHECK(!playing_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Playout can not start since InitPlayout must succeed first";
+ return 0;
+ }
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ if (!aaudio_.Start()) {
+ return -1;
+ }
+ underrun_count_ = aaudio_.xrun_count();
+ first_data_callback_ = true;
+ playing_ = true;
+ return 0;
+}
+
+int AAudioPlayer::StopPlayout() {
+ RTC_LOG(LS_INFO) << "StopPlayout";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ if (!aaudio_.Stop()) {
+ RTC_LOG(LS_ERROR) << "StopPlayout failed";
+ return -1;
+ }
+ thread_checker_aaudio_.Detach();
+ initialized_ = false;
+ playing_ = false;
+ return 0;
+}
+
+bool AAudioPlayer::Playing() const {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ return playing_;
+}
+
+void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_DLOG(LS_INFO) << "AttachAudioBuffer";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ audio_device_buffer_ = audioBuffer;
+ const AudioParameters audio_parameters = aaudio_.audio_parameters();
+ audio_device_buffer_->SetPlayoutSampleRate(audio_parameters.sample_rate());
+ audio_device_buffer_->SetPlayoutChannels(audio_parameters.channels());
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the optimal buffer
+ // size per callback used by AAudio.
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+}
+
+int AAudioPlayer::SpeakerVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+void AAudioPlayer::OnErrorCallback(aaudio_result_t error) {
+ RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+ // TODO(henrika): investigate if we can use a thread checker here. Initial
+ // tests shows that this callback can sometimes be called on a unique thread
+ // but according to the documentation it should be on the same thread as the
+ // data callback.
+ // RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+ if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ // The stream is disconnected and any attempt to use it will return
+ // AAUDIO_ERROR_DISCONNECTED.
+ RTC_LOG(LS_WARNING) << "Output stream disconnected";
+ // AAudio documentation states: "You should not close or reopen the stream
+ // from the callback, use another thread instead". A message is therefore
+ // sent to the main thread to do the restart operation.
+ RTC_DCHECK(main_thread_);
+ main_thread_->Post(RTC_FROM_HERE, this, kMessageOutputStreamDisconnected);
+ }
+}
+
+aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
+ int32_t num_frames) {
+ RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+ // Log device id in first data callback to ensure that a valid device is
+ // utilized.
+ if (first_data_callback_) {
+ RTC_LOG(LS_INFO) << "--- First output data callback: "
+ "device id="
+ << aaudio_.device_id();
+ first_data_callback_ = false;
+ }
+
+ // Check if the underrun count has increased. If it has, increase the buffer
+ // size by adding the size of a burst. It will reduce the risk of underruns
+ // at the expense of an increased latency.
+ // TODO(henrika): enable possibility to disable and/or tune the algorithm.
+ const int32_t underrun_count = aaudio_.xrun_count();
+ if (underrun_count > underrun_count_) {
+ RTC_LOG(LS_ERROR) << "Underrun detected: " << underrun_count;
+ underrun_count_ = underrun_count;
+ aaudio_.IncreaseOutputBufferSize();
+ }
+
+ // Estimate latency between writing an audio frame to the output stream and
+ // the time that same frame is played out on the output audio device.
+ latency_millis_ = aaudio_.EstimateLatencyMillis();
+ // TODO(henrika): use for development only.
+ if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) {
+ RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_
+ << ", num_frames: " << num_frames;
+ }
+
+ // Read audio data from the WebRTC source using the FineAudioBuffer object
+ // and write that data into `audio_data` to be played out by AAudio.
+ // Prime output with zeros during a short initial phase to avoid distortion.
+ // TODO(henrika): do more work to figure out of if the initial forced silence
+ // period is really needed.
+ if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
+ const size_t num_bytes =
+ sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
+ memset(audio_data, 0, num_bytes);
+ } else {
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::MakeArrayView(static_cast<int16_t*>(audio_data),
+ aaudio_.samples_per_frame() * num_frames),
+ static_cast<int>(latency_millis_ + 0.5));
+ }
+
+ // TODO(henrika): possibly add trace here to be included in systrace.
+ // See https://developer.android.com/studio/profile/systrace-commandline.html.
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioPlayer::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ switch (msg->message_id) {
+ case kMessageOutputStreamDisconnected:
+ HandleStreamDisconnected();
+ break;
+ }
+}
+
+void AAudioPlayer::HandleStreamDisconnected() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DLOG(LS_INFO) << "HandleStreamDisconnected";
+ if (!initialized_ || !playing_) {
+ return;
+ }
+ // Perform a restart by first closing the disconnected stream and then start
+ // a new stream; this time using the new (preferred) audio output device.
+ StopPlayout();
+ InitPlayout();
+ StartPlayout();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/aaudio_player.h b/third_party/libwebrtc/modules/audio_device/android/aaudio_player.h
new file mode 100644
index 0000000000..4bf3ee3bc0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/aaudio_player.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
+
+#include <aaudio/AAudio.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/aaudio_wrapper.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/message_handler.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio output support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will DCHECK if any method is called on an invalid thread. Audio buffers
+// are requested on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitPlayout() after StopPlayout()
+// to be able to call StartPlayout() again. This is in line with how the Java-
+// based implementation works.
+//
+// An audio stream can be disconnected, e.g. when an audio device is removed.
+// This implementation will restart the audio stream using the new preferred
+// device if such an event happens.
+//
+// Also supports automatic buffer-size adjustment based on underrun detections
+// where the internal AAudio buffer can be increased when needed. It will
+// reduce the risk of underruns (~glitches) at the expense of an increased
+// latency.
+class AAudioPlayer final : public AAudioObserverInterface,
+ public rtc::MessageHandler {
+ public:
+ explicit AAudioPlayer(AudioManager* audio_manager);
+ ~AAudioPlayer();
+
+ int Init();
+ int Terminate();
+
+ int InitPlayout();
+ bool PlayoutIsInitialized() const;
+
+ int StartPlayout();
+ int StopPlayout();
+ bool Playing() const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ // Not implemented in AAudio.
+ int SpeakerVolumeIsAvailable(bool& available); // NOLINT
+ int SetSpeakerVolume(uint32_t volume) { return -1; }
+ int SpeakerVolume(uint32_t& volume) const { return -1; } // NOLINT
+ int MaxSpeakerVolume(uint32_t& maxVolume) const { return -1; } // NOLINT
+ int MinSpeakerVolume(uint32_t& minVolume) const { return -1; } // NOLINT
+
+ protected:
+ // AAudioObserverInterface implementation.
+
+ // For an output stream, this function should render and write `num_frames`
+ // of data in the streams current data format to the `audio_data` buffer.
+ // Called on a real-time thread owned by AAudio.
+ aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+ int32_t num_frames) override;
+ // AAudio calls this functions if any error occurs on a callback thread.
+ // Called on a real-time thread owned by AAudio.
+ void OnErrorCallback(aaudio_result_t error) override;
+
+ // rtc::MessageHandler used for restart messages from the error-callback
+ // thread to the main (creating) thread.
+ void OnMessage(rtc::Message* msg) override;
+
+ private:
+ // Closes the existing stream and starts a new stream.
+ void HandleStreamDisconnected();
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ SequenceChecker main_thread_checker_;
+
+ // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+ // real-time thread owned by AAudio. Detached during construction of this
+ // object.
+ SequenceChecker thread_checker_aaudio_;
+
+ // The thread on which this object is created on.
+ rtc::Thread* main_thread_;
+
+ // Wraps all AAudio resources. Contains an output stream using the default
+ // output audio device. Can be accessed on both the main thread and the
+ // real-time thread owned by AAudio. See separate AAudio documentation about
+ // thread safety.
+ AAudioWrapper aaudio_;
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples.
+ // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but AAudio asks for 192
+ // in each callback (once every 4th ms). This class can then ask for 192 and
+ // the FineAudioBuffer will ask WebRTC for new data approximately only every
+ // second callback and also cache non-utilized audio.
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+ // Counts number of detected underrun events reported by AAudio.
+ int32_t underrun_count_ = 0;
+
+ // True only for the first data callback in each audio session.
+ bool first_data_callback_ = true;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and set by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_ RTC_GUARDED_BY(main_thread_checker_) =
+ nullptr;
+
+ bool initialized_ RTC_GUARDED_BY(main_thread_checker_) = false;
+ bool playing_ RTC_GUARDED_BY(main_thread_checker_) = false;
+
+ // Estimated latency between writing an audio frame to the output stream and
+ // the time that same frame is played out on the output audio device.
+ double latency_millis_ RTC_GUARDED_BY(thread_checker_aaudio_) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.cc b/third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.cc
new file mode 100644
index 0000000000..4757cf8cf0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.cc
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_recorder.h"
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+enum AudioDeviceMessageType : uint32_t {
+ kMessageInputStreamDisconnected,
+};
+
+AAudioRecorder::AAudioRecorder(AudioManager* audio_manager)
+ : main_thread_(rtc::Thread::Current()),
+ aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) {
+ RTC_LOG(LS_INFO) << "ctor";
+ thread_checker_aaudio_.Detach();
+}
+
+AAudioRecorder::~AAudioRecorder() {
+ RTC_LOG(LS_INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+ RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_;
+}
+
+int AAudioRecorder::Init() {
+ RTC_LOG(LS_INFO) << "Init";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (aaudio_.audio_parameters().channels() == 2) {
+ RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
+ }
+ return 0;
+}
+
+int AAudioRecorder::Terminate() {
+ RTC_LOG(LS_INFO) << "Terminate";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ StopRecording();
+ return 0;
+}
+
+int AAudioRecorder::InitRecording() {
+ RTC_LOG(LS_INFO) << "InitRecording";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!recording_);
+ if (!aaudio_.Init()) {
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+int AAudioRecorder::StartRecording() {
+ RTC_LOG(LS_INFO) << "StartRecording";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!recording_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ if (!aaudio_.Start()) {
+ return -1;
+ }
+ overflow_count_ = aaudio_.xrun_count();
+ first_data_callback_ = true;
+ recording_ = true;
+ return 0;
+}
+
+int AAudioRecorder::StopRecording() {
+ RTC_LOG(LS_INFO) << "StopRecording";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!initialized_ || !recording_) {
+ return 0;
+ }
+ if (!aaudio_.Stop()) {
+ return -1;
+ }
+ thread_checker_aaudio_.Detach();
+ initialized_ = false;
+ recording_ = false;
+ return 0;
+}
+
+void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_LOG(LS_INFO) << "AttachAudioBuffer";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ audio_device_buffer_ = audioBuffer;
+ const AudioParameters audio_parameters = aaudio_.audio_parameters();
+ audio_device_buffer_->SetRecordingSampleRate(audio_parameters.sample_rate());
+ audio_device_buffer_->SetRecordingChannels(audio_parameters.channels());
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to deliver any number
+ // of samples (and not only multiples of 10ms which WebRTC uses) to match the
+ // native AAudio buffer size.
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+}
+
+int AAudioRecorder::EnableBuiltInAEC(bool enable) {
+ RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable;
+ RTC_LOG(LS_ERROR) << "Not implemented";
+ return -1;
+}
+
+int AAudioRecorder::EnableBuiltInAGC(bool enable) {
+ RTC_LOG(LS_INFO) << "EnableBuiltInAGC: " << enable;
+ RTC_LOG(LS_ERROR) << "Not implemented";
+ return -1;
+}
+
+int AAudioRecorder::EnableBuiltInNS(bool enable) {
+ RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable;
+ RTC_LOG(LS_ERROR) << "Not implemented";
+ return -1;
+}
+
+void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
+ RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+ // RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
+ if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ // The stream is disconnected and any attempt to use it will return
+ // AAUDIO_ERROR_DISCONNECTED..
+ RTC_LOG(LS_WARNING) << "Input stream disconnected => restart is required";
+ // AAudio documentation states: "You should not close or reopen the stream
+ // from the callback, use another thread instead". A message is therefore
+ // sent to the main thread to do the restart operation.
+ RTC_DCHECK(main_thread_);
+ main_thread_->Post(RTC_FROM_HERE, this, kMessageInputStreamDisconnected);
+ }
+}
+
+// Read and process `num_frames` of data from the `audio_data` buffer.
+// TODO(henrika): possibly add trace here to be included in systrace.
+// See https://developer.android.com/studio/profile/systrace-commandline.html.
+aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
+ void* audio_data,
+ int32_t num_frames) {
+ // TODO(henrika): figure out why we sometimes hit this one.
+ // RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
+ // RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames;
+ // Drain the input buffer at first callback to ensure that it does not
+ // contain any old data. Will also ensure that the lowest possible latency
+ // is obtained.
+ if (first_data_callback_) {
+ RTC_LOG(LS_INFO) << "--- First input data callback: "
+ "device id="
+ << aaudio_.device_id();
+ aaudio_.ClearInputStream(audio_data, num_frames);
+ first_data_callback_ = false;
+ }
+ // Check if the overflow counter has increased and if so log a warning.
+ // TODO(henrika): possible add UMA stat or capacity extension.
+ const int32_t overflow_count = aaudio_.xrun_count();
+ if (overflow_count > overflow_count_) {
+ RTC_LOG(LS_ERROR) << "Overflow detected: " << overflow_count;
+ overflow_count_ = overflow_count;
+ }
+ // Estimated time between an audio frame was recorded by the input device and
+ // it can read on the input stream.
+ latency_millis_ = aaudio_.EstimateLatencyMillis();
+ // TODO(henrika): use for development only.
+ if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) {
+ RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_
+ << ", num_frames: " << num_frames;
+ }
+ // Copy recorded audio in `audio_data` to the WebRTC sink using the
+ // FineAudioBuffer object.
+ fine_audio_buffer_->DeliverRecordedData(
+ rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
+ aaudio_.samples_per_frame() * num_frames),
+ static_cast<int>(latency_millis_ + 0.5));
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioRecorder::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ switch (msg->message_id) {
+ case kMessageInputStreamDisconnected:
+ HandleStreamDisconnected();
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "Invalid message id: " << msg->message_id;
+ break;
+ }
+}
+
+void AAudioRecorder::HandleStreamDisconnected() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "HandleStreamDisconnected";
+ if (!initialized_ || !recording_) {
+ return;
+ }
+ // Perform a restart by first closing the disconnected stream and then start
+ // a new stream; this time using the new (preferred) audio input device.
+ // TODO(henrika): resolve issue where a one restart attempt leads to a long
+ // sequence of new calls to OnErrorCallback().
+ // See b/73148976 for details.
+ StopRecording();
+ InitRecording();
+ StartRecording();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.h b/third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.h
new file mode 100644
index 0000000000..d0ad6be43d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/aaudio_recorder.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
+
+#include <aaudio/AAudio.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/aaudio_wrapper.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/message_handler.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio input support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Audio buffers
+// are delivered on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is in line
+// with how the Java- based implementation works.
+//
+// TODO(henrika): add comments about device changes and adaptive buffer
+// management.
+class AAudioRecorder : public AAudioObserverInterface,
+ public rtc::MessageHandler {
+ public:
+ explicit AAudioRecorder(AudioManager* audio_manager);
+ ~AAudioRecorder();
+
+ int Init();
+ int Terminate();
+
+ int InitRecording();
+ bool RecordingIsInitialized() const { return initialized_; }
+
+ int StartRecording();
+ int StopRecording();
+ bool Recording() const { return recording_; }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ double latency_millis() const { return latency_millis_; }
+
+ // TODO(henrika): add support using AAudio APIs when available.
+ int EnableBuiltInAEC(bool enable);
+ int EnableBuiltInAGC(bool enable);
+ int EnableBuiltInNS(bool enable);
+
+ protected:
+ // AAudioObserverInterface implementation.
+
+ // For an input stream, this function should read `num_frames` of recorded
+ // data, in the stream's current data format, from the `audio_data` buffer.
+ // Called on a real-time thread owned by AAudio.
+ aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+ int32_t num_frames) override;
+
+ // AAudio calls this function if any error occurs on a callback thread.
+ // Called on a real-time thread owned by AAudio.
+ void OnErrorCallback(aaudio_result_t error) override;
+
+ // rtc::MessageHandler used for restart messages.
+ void OnMessage(rtc::Message* msg) override;
+
+ private:
+ // Closes the existing stream and starts a new stream.
+ void HandleStreamDisconnected();
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ SequenceChecker thread_checker_;
+
+ // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+ // real-time thread owned by AAudio. Detached during construction of this
+ // object.
+ SequenceChecker thread_checker_aaudio_;
+
+ // The thread on which this object is created on.
+ rtc::Thread* main_thread_;
+
+ // Wraps all AAudio resources. Contains an input stream using the default
+ // input audio device.
+ AAudioWrapper aaudio_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_ = nullptr;
+
+ bool initialized_ = false;
+ bool recording_ = false;
+
+ // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+ // chunks of audio.
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+ // Counts number of detected overflow events reported by AAudio.
+ int32_t overflow_count_ = 0;
+
+ // Estimated time between an audio frame was recorded by the input device and
+ // it can read on the input stream.
+ double latency_millis_ = 0;
+
+ // True only for the first data callback in each audio session.
+ bool first_data_callback_ = true;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.cc b/third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.cc
new file mode 100644
index 0000000000..3d824b5c57
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.cc
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_wrapper.h"
+
+#include "modules/audio_device/android/audio_manager.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+
+#define LOG_ON_ERROR(op) \
+ do { \
+ aaudio_result_t result = (op); \
+ if (result != AAUDIO_OK) { \
+ RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+ } \
+ } while (0)
+
+#define RETURN_ON_ERROR(op, ...) \
+ do { \
+ aaudio_result_t result = (op); \
+ if (result != AAUDIO_OK) { \
+ RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace webrtc {
+
+namespace {
+
+const char* DirectionToString(aaudio_direction_t direction) {
+ switch (direction) {
+ case AAUDIO_DIRECTION_OUTPUT:
+ return "OUTPUT";
+ case AAUDIO_DIRECTION_INPUT:
+ return "INPUT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* SharingModeToString(aaudio_sharing_mode_t mode) {
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ return "EXCLUSIVE";
+ case AAUDIO_SHARING_MODE_SHARED:
+ return "SHARED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* PerformanceModeToString(aaudio_performance_mode_t mode) {
+ switch (mode) {
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ return "NONE";
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ return "POWER_SAVING";
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ return "LOW_LATENCY";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* FormatToString(int32_t id) {
+ switch (id) {
+ case AAUDIO_FORMAT_INVALID:
+ return "INVALID";
+ case AAUDIO_FORMAT_UNSPECIFIED:
+ return "UNSPECIFIED";
+ case AAUDIO_FORMAT_PCM_I16:
+ return "PCM_I16";
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ return "FLOAT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void ErrorCallback(AAudioStream* stream,
+ void* user_data,
+ aaudio_result_t error) {
+ RTC_DCHECK(user_data);
+ AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
+ RTC_LOG(LS_WARNING) << "ErrorCallback: "
+ << DirectionToString(aaudio_wrapper->direction());
+ RTC_DCHECK(aaudio_wrapper->observer());
+ aaudio_wrapper->observer()->OnErrorCallback(error);
+}
+
+aaudio_data_callback_result_t DataCallback(AAudioStream* stream,
+ void* user_data,
+ void* audio_data,
+ int32_t num_frames) {
+ RTC_DCHECK(user_data);
+ RTC_DCHECK(audio_data);
+ AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
+ RTC_DCHECK(aaudio_wrapper->observer());
+ return aaudio_wrapper->observer()->OnDataCallback(audio_data, num_frames);
+}
+
+// Wraps the stream builder object to ensure that it is released properly when
+// the stream builder goes out of scope.
+class ScopedStreamBuilder {
+ public:
+ ScopedStreamBuilder() {
+ LOG_ON_ERROR(AAudio_createStreamBuilder(&builder_));
+ RTC_DCHECK(builder_);
+ }
+ ~ScopedStreamBuilder() {
+ if (builder_) {
+ LOG_ON_ERROR(AAudioStreamBuilder_delete(builder_));
+ }
+ }
+
+ AAudioStreamBuilder* get() const { return builder_; }
+
+ private:
+ AAudioStreamBuilder* builder_ = nullptr;
+};
+
+} // namespace
+
+AAudioWrapper::AAudioWrapper(AudioManager* audio_manager,
+ aaudio_direction_t direction,
+ AAudioObserverInterface* observer)
+ : direction_(direction), observer_(observer) {
+ RTC_LOG(LS_INFO) << "ctor";
+ RTC_DCHECK(observer_);
+ direction_ == AAUDIO_DIRECTION_OUTPUT
+ ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters()
+ : audio_parameters_ = audio_manager->GetRecordAudioParameters();
+ aaudio_thread_checker_.Detach();
+ RTC_LOG(LS_INFO) << audio_parameters_.ToString();
+}
+
+AAudioWrapper::~AAudioWrapper() {
+ RTC_LOG(LS_INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!stream_);
+}
+
+bool AAudioWrapper::Init() {
+ RTC_LOG(LS_INFO) << "Init";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // Creates a stream builder which can be used to open an audio stream.
+ ScopedStreamBuilder builder;
+ // Configures the stream builder using audio parameters given at construction.
+ SetStreamConfiguration(builder.get());
+ // Opens a stream based on options in the stream builder.
+ if (!OpenStream(builder.get())) {
+ return false;
+ }
+ // Ensures that the opened stream could activate the requested settings.
+ if (!VerifyStreamConfiguration()) {
+ return false;
+ }
+ // Optimizes the buffer scheme for lowest possible latency and creates
+ // additional buffer logic to match the 10ms buffer size used in WebRTC.
+ if (!OptimizeBuffers()) {
+ return false;
+ }
+ LogStreamState();
+ return true;
+}
+
+bool AAudioWrapper::Start() {
+ RTC_LOG(LS_INFO) << "Start";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // TODO(henrika): this state check might not be needed.
+ aaudio_stream_state_t current_state = AAudioStream_getState(stream_);
+ if (current_state != AAUDIO_STREAM_STATE_OPEN) {
+ RTC_LOG(LS_ERROR) << "Invalid state: "
+ << AAudio_convertStreamStateToText(current_state);
+ return false;
+ }
+ // Asynchronous request for the stream to start.
+ RETURN_ON_ERROR(AAudioStream_requestStart(stream_), false);
+ LogStreamState();
+ return true;
+}
+
+bool AAudioWrapper::Stop() {
+ RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // Asynchronous request for the stream to stop.
+ RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false);
+ CloseStream();
+ aaudio_thread_checker_.Detach();
+ return true;
+}
+
+double AAudioWrapper::EstimateLatencyMillis() const {
+ RTC_DCHECK(stream_);
+ double latency_millis = 0.0;
+ if (direction() == AAUDIO_DIRECTION_INPUT) {
+ // For input streams. Best guess we can do is to use the current burst size
+ // as delay estimate.
+ latency_millis = static_cast<double>(frames_per_burst()) / sample_rate() *
+ rtc::kNumMillisecsPerSec;
+ } else {
+ int64_t existing_frame_index;
+ int64_t existing_frame_presentation_time;
+ // Get the time at which a particular frame was presented to audio hardware.
+ aaudio_result_t result = AAudioStream_getTimestamp(
+ stream_, CLOCK_MONOTONIC, &existing_frame_index,
+ &existing_frame_presentation_time);
+ // Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+ if (result == AAUDIO_OK) {
+ // Get write index for next audio frame.
+ int64_t next_frame_index = frames_written();
+ // Number of frames between next frame and the existing frame.
+ int64_t frame_index_delta = next_frame_index - existing_frame_index;
+ // Assume the next frame will be written now.
+ int64_t next_frame_write_time = rtc::TimeNanos();
+ // Calculate time when next frame will be presented to the hardware taking
+ // sample rate into account.
+ int64_t frame_time_delta =
+ (frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate();
+ int64_t next_frame_presentation_time =
+ existing_frame_presentation_time + frame_time_delta;
+ // Derive a latency estimate given results above.
+ latency_millis = static_cast<double>(next_frame_presentation_time -
+ next_frame_write_time) /
+ rtc::kNumNanosecsPerMillisec;
+ }
+ }
+ return latency_millis;
+}
+
+// Returns new buffer size or a negative error value if buffer size could not
+// be increased.
+bool AAudioWrapper::IncreaseOutputBufferSize() {
+ RTC_LOG(LS_INFO) << "IncreaseBufferSize";
+ RTC_DCHECK(stream_);
+ RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
+ RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT);
+ aaudio_result_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+ // Try to increase size of buffer with one burst to reduce risk of underrun.
+ buffer_size += frames_per_burst();
+ // Verify that the new buffer size is not larger than max capacity.
+ // TODO(henrika): keep track of case when we reach the capacity limit.
+ const int32_t max_buffer_size = buffer_capacity_in_frames();
+ if (buffer_size > max_buffer_size) {
+ RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size
+ << ") is higher than max: " << max_buffer_size;
+ return false;
+ }
+ RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size
+ << " (max=" << max_buffer_size << ")";
+ buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
+ if (buffer_size < 0) {
+ RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
+ << AAudio_convertResultToText(buffer_size);
+ return false;
+ }
+ RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size;
+ return true;
+}
+
+void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
+ RTC_LOG(LS_INFO) << "ClearInputStream";
+ RTC_DCHECK(stream_);
+ RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
+ RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT);
+ aaudio_result_t cleared_frames = 0;
+ do {
+ cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0);
+ } while (cleared_frames > 0);
+}
+
+AAudioObserverInterface* AAudioWrapper::observer() const {
+ return observer_;
+}
+
+AudioParameters AAudioWrapper::audio_parameters() const {
+ return audio_parameters_;
+}
+
+int32_t AAudioWrapper::samples_per_frame() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getSamplesPerFrame(stream_);
+}
+
+int32_t AAudioWrapper::buffer_size_in_frames() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getBufferSizeInFrames(stream_);
+}
+
+int32_t AAudioWrapper::buffer_capacity_in_frames() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getBufferCapacityInFrames(stream_);
+}
+
+int32_t AAudioWrapper::device_id() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getDeviceId(stream_);
+}
+
+int32_t AAudioWrapper::xrun_count() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getXRunCount(stream_);
+}
+
+int32_t AAudioWrapper::format() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFormat(stream_);
+}
+
+int32_t AAudioWrapper::sample_rate() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getSampleRate(stream_);
+}
+
+int32_t AAudioWrapper::channel_count() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getChannelCount(stream_);
+}
+
+int32_t AAudioWrapper::frames_per_callback() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFramesPerDataCallback(stream_);
+}
+
+aaudio_sharing_mode_t AAudioWrapper::sharing_mode() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getSharingMode(stream_);
+}
+
+aaudio_performance_mode_t AAudioWrapper::performance_mode() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getPerformanceMode(stream_);
+}
+
+aaudio_stream_state_t AAudioWrapper::stream_state() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getState(stream_);
+}
+
+int64_t AAudioWrapper::frames_written() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFramesWritten(stream_);
+}
+
+int64_t AAudioWrapper::frames_read() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFramesRead(stream_);
+}
+
+void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
+ RTC_LOG(LS_INFO) << "SetStreamConfiguration";
+ RTC_DCHECK(builder);
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // Request usage of default primary output/input device.
+ // TODO(henrika): verify that default device follows Java APIs.
+ // https://developer.android.com/reference/android/media/AudioDeviceInfo.html.
+ AAudioStreamBuilder_setDeviceId(builder, AAUDIO_UNSPECIFIED);
+ // Use preferred sample rate given by the audio parameters.
+ AAudioStreamBuilder_setSampleRate(builder, audio_parameters().sample_rate());
+ // Use preferred channel configuration given by the audio parameters.
+ AAudioStreamBuilder_setChannelCount(builder, audio_parameters().channels());
+ // Always use 16-bit PCM audio sample format.
+ AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_I16);
+ // TODO(henrika): investigate effect of using AAUDIO_SHARING_MODE_EXCLUSIVE.
+ // Ask for exclusive mode since this will give us the lowest possible latency.
+ // If exclusive mode isn't available, shared mode will be used instead.
+ AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_SHARED);
+ // Use the direction that was given at construction.
+ AAudioStreamBuilder_setDirection(builder, direction_);
+ // TODO(henrika): investigate performance using different performance modes.
+ AAudioStreamBuilder_setPerformanceMode(builder,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+ // Given that WebRTC applications require low latency, our audio stream uses
+ // an asynchronous callback function to transfer data to and from the
+ // application. AAudio executes the callback in a higher-priority thread that
+ // has better performance.
+ AAudioStreamBuilder_setDataCallback(builder, DataCallback, this);
+ // Request that AAudio calls this functions if any error occurs on a callback
+ // thread.
+ AAudioStreamBuilder_setErrorCallback(builder, ErrorCallback, this);
+}
+
+bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
+ RTC_LOG(LS_INFO) << "OpenStream";
+ RTC_DCHECK(builder);
+ AAudioStream* stream = nullptr;
+ RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false);
+ stream_ = stream;
+ LogStreamConfiguration();
+ return true;
+}
+
+void AAudioWrapper::CloseStream() {
+ RTC_LOG(LS_INFO) << "CloseStream";
+ RTC_DCHECK(stream_);
+ LOG_ON_ERROR(AAudioStream_close(stream_));
+ stream_ = nullptr;
+}
+
+void AAudioWrapper::LogStreamConfiguration() {
+ RTC_DCHECK(stream_);
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss << "Stream Configuration: ";
+ ss << "sample rate=" << sample_rate() << ", channels=" << channel_count();
+ ss << ", samples per frame=" << samples_per_frame();
+ ss << ", format=" << FormatToString(format());
+ ss << ", sharing mode=" << SharingModeToString(sharing_mode());
+ ss << ", performance mode=" << PerformanceModeToString(performance_mode());
+ ss << ", direction=" << DirectionToString(direction());
+ ss << ", device id=" << AAudioStream_getDeviceId(stream_);
+ ss << ", frames per callback=" << frames_per_callback();
+ RTC_LOG(LS_INFO) << ss.str();
+}
+
+void AAudioWrapper::LogStreamState() {
+ RTC_LOG(LS_INFO) << "AAudio stream state: "
+ << AAudio_convertStreamStateToText(stream_state());
+}
+
+bool AAudioWrapper::VerifyStreamConfiguration() {
+ RTC_LOG(LS_INFO) << "VerifyStreamConfiguration";
+ RTC_DCHECK(stream_);
+ // TODO(henrika): should we verify device ID as well?
+ if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate";
+ return false;
+ }
+ if (AAudioStream_getChannelCount(stream_) !=
+ static_cast<int32_t>(audio_parameters().channels())) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count";
+ return false;
+ }
+ if (AAudioStream_getFormat(stream_) != AAUDIO_FORMAT_PCM_I16) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested format";
+ return false;
+ }
+ if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode";
+ return false;
+ }
+ if (AAudioStream_getPerformanceMode(stream_) !=
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode";
+ return false;
+ }
+ if (AAudioStream_getDirection(stream_) != direction()) {
+ RTC_LOG(LS_ERROR) << "Stream direction could not be set";
+ return false;
+ }
+ if (AAudioStream_getSamplesPerFrame(stream_) !=
+ static_cast<int32_t>(audio_parameters().channels())) {
+ RTC_LOG(LS_ERROR) << "Invalid number of samples per frame";
+ return false;
+ }
+ return true;
+}
+
+bool AAudioWrapper::OptimizeBuffers() {
+ RTC_LOG(LS_INFO) << "OptimizeBuffers";
+ RTC_DCHECK(stream_);
+ // Maximum number of frames that can be filled without blocking.
+ RTC_LOG(LS_INFO) << "max buffer capacity in frames: "
+ << buffer_capacity_in_frames();
+ // Query the number of frames that the application should read or write at
+ // one time for optimal performance.
+ int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
+ RTC_LOG(LS_INFO) << "frames per burst for optimal performance: "
+ << frames_per_burst;
+ frames_per_burst_ = frames_per_burst;
+ if (direction() == AAUDIO_DIRECTION_INPUT) {
+ // There is no point in calling setBufferSizeInFrames() for input streams
+ // since it has no effect on the performance (latency in this case).
+ return true;
+ }
+ // Set buffer size to same as burst size to guarantee lowest possible latency.
+ // This size might change for output streams if underruns are detected and
+ // automatic buffer adjustment is enabled.
+ AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst);
+ int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+ if (buffer_size != frames_per_burst) {
+ RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size";
+ return false;
+ }
+ // Maximum number of frames that can be filled without blocking.
+ RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size;
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.h b/third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.h
new file mode 100644
index 0000000000..1f925b96d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/aaudio_wrapper.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
+
+#include <aaudio/AAudio.h>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+
+namespace webrtc {
+
+class AudioManager;
+
+// AAudio callback interface for audio transport to/from the AAudio stream.
+// The interface also contains an error callback method for notifications of
+// e.g. device changes.
+class AAudioObserverInterface {
+ public:
+ // Audio data will be passed in our out of this function dependning on the
+ // direction of the audio stream. This callback function will be called on a
+ // real-time thread owned by AAudio.
+ virtual aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+ int32_t num_frames) = 0;
+ // AAudio will call this functions if any error occurs on a callback thread.
+ // In response, this function could signal or launch another thread to reopen
+ // a stream on another device. Do not reopen the stream in this callback.
+ virtual void OnErrorCallback(aaudio_result_t error) = 0;
+
+ protected:
+ virtual ~AAudioObserverInterface() {}
+};
+
+// Utility class which wraps the C-based AAudio API into a more handy C++ class
+// where the underlying resources (AAudioStreamBuilder and AAudioStream) are
+// encapsulated. User must set the direction (in or out) at construction since
+// it defines the stream type and the direction of the data flow in the
+// AAudioObserverInterface.
+//
+// AAudio is a new Android C API introduced in the Android O (26) release.
+// It is designed for high-performance audio applications that require low
+// latency. Applications communicate with AAudio by reading and writing data
+// to streams.
+//
+// Each stream is attached to a single audio device, where each audio device
+// has a unique ID. The ID can be used to bind an audio stream to a specific
+// audio device but this implementation lets AAudio choose the default primary
+// device instead (device selection takes place in Java). A stream can only
+// move data in one direction. When a stream is opened, Android checks to
+// ensure that the audio device and stream direction agree.
+class AAudioWrapper {
+ public:
+ AAudioWrapper(AudioManager* audio_manager,
+ aaudio_direction_t direction,
+ AAudioObserverInterface* observer);
+ ~AAudioWrapper();
+
+ bool Init();
+ bool Start();
+ bool Stop();
+
+ // For output streams: estimates latency between writing an audio frame to
+ // the output stream and the time that same frame is played out on the output
+ // audio device.
+ // For input streams: estimates latency between reading an audio frame from
+ // the input stream and the time that same frame was recorded on the input
+ // audio device.
+ double EstimateLatencyMillis() const;
+
+ // Increases the internal buffer size for output streams by one burst size to
+ // reduce the risk of underruns. Can be used while a stream is active.
+ bool IncreaseOutputBufferSize();
+
+ // Drains the recording stream of any existing data by reading from it until
+ // it's empty. Can be used to clear out old data before starting a new audio
+ // session.
+ void ClearInputStream(void* audio_data, int32_t num_frames);
+
+ AAudioObserverInterface* observer() const;
+ AudioParameters audio_parameters() const;
+ int32_t samples_per_frame() const;
+ int32_t buffer_size_in_frames() const;
+ int32_t buffer_capacity_in_frames() const;
+ int32_t device_id() const;
+ int32_t xrun_count() const;
+ int32_t format() const;
+ int32_t sample_rate() const;
+ int32_t channel_count() const;
+ int32_t frames_per_callback() const;
+ aaudio_sharing_mode_t sharing_mode() const;
+ aaudio_performance_mode_t performance_mode() const;
+ aaudio_stream_state_t stream_state() const;
+ int64_t frames_written() const;
+ int64_t frames_read() const;
+ aaudio_direction_t direction() const { return direction_; }
+ AAudioStream* stream() const { return stream_; }
+ int32_t frames_per_burst() const { return frames_per_burst_; }
+
+ private:
+ void SetStreamConfiguration(AAudioStreamBuilder* builder);
+ bool OpenStream(AAudioStreamBuilder* builder);
+ void CloseStream();
+ void LogStreamConfiguration();
+ void LogStreamState();
+ bool VerifyStreamConfiguration();
+ bool OptimizeBuffers();
+
+ SequenceChecker thread_checker_;
+ SequenceChecker aaudio_thread_checker_;
+ AudioParameters audio_parameters_;
+ const aaudio_direction_t direction_;
+ AAudioObserverInterface* observer_ = nullptr;
+ AAudioStream* stream_ = nullptr;
+ int32_t frames_per_burst_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_common.h b/third_party/libwebrtc/modules/audio_device/android/audio_common.h
new file mode 100644
index 0000000000..81ea733aa4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_common.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
+
+namespace webrtc {
+
+const int kDefaultSampleRate = 44100;
+// Delay estimates for the two different supported modes. These values are based
+// on real-time round-trip delay estimates on a large set of devices and they
+// are lower bounds since the filter length is 128 ms, so the AEC works for
+// delays in the range [50, ~170] ms and [150, ~270] ms. Note that, in most
+// cases, the lowest delay estimate will not be utilized since devices that
+// support low-latency output audio often supports HW AEC as well.
+const int kLowLatencyModeDelayEstimateInMilliseconds = 50;
+const int kHighLatencyModeDelayEstimateInMilliseconds = 150;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_device_template.h b/third_party/libwebrtc/modules/audio_device/android/audio_device_template.h
new file mode 100644
index 0000000000..999c5878c6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_device_template.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// InputType/OutputType can be any class that implements the capturing/rendering
+// part of the AudioDeviceGeneric API.
+// Construction and destruction must be done on one and the same thread. Each
+// internal implementation of InputType and OutputType will RTC_DCHECK if that
+// is not the case. All implemented methods must also be called on the same
+// thread. See comments in each InputType/OutputType class for more info.
+// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
+// and ClearAndroidAudioDeviceObjects) from a different thread but both will
+// RTC_CHECK that the calling thread is attached to a Java VM.
+
+template <class InputType, class OutputType>
+class AudioDeviceTemplate : public AudioDeviceGeneric {
+ public:
+ AudioDeviceTemplate(AudioDeviceModule::AudioLayer audio_layer,
+ AudioManager* audio_manager)
+ : audio_layer_(audio_layer),
+ audio_manager_(audio_manager),
+ output_(audio_manager_),
+ input_(audio_manager_),
+ initialized_(false) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_CHECK(audio_manager);
+ audio_manager_->SetActiveAudioLayer(audio_layer);
+ }
+
+ virtual ~AudioDeviceTemplate() { RTC_LOG(LS_INFO) << __FUNCTION__; }
+
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ audioLayer = audio_layer_;
+ return 0;
+ }
+
+ InitStatus Init() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ if (!audio_manager_->Init()) {
+ return InitStatus::OTHER_ERROR;
+ }
+ if (output_.Init() != 0) {
+ audio_manager_->Close();
+ return InitStatus::PLAYOUT_ERROR;
+ }
+ if (input_.Init() != 0) {
+ output_.Terminate();
+ audio_manager_->Close();
+ return InitStatus::RECORDING_ERROR;
+ }
+ initialized_ = true;
+ return InitStatus::OK;
+ }
+
+ int32_t Terminate() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ int32_t err = input_.Terminate();
+ err |= output_.Terminate();
+ err |= !audio_manager_->Close();
+ initialized_ = false;
+ RTC_DCHECK_EQ(err, 0);
+ return err;
+ }
+
+ bool Initialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return initialized_;
+ }
+
+ int16_t PlayoutDevices() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return 1;
+ }
+
+ int16_t RecordingDevices() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return 1;
+ }
+
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t SetPlayoutDevice(uint16_t index) override {
+ // OK to use but it has no effect currently since device selection is
+ // done using Andoid APIs instead.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return 0;
+ }
+
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t SetRecordingDevice(uint16_t index) override {
+ // OK to use but it has no effect currently since device selection is
+ // done using Andoid APIs instead.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return 0;
+ }
+
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t PlayoutIsAvailable(bool& available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ available = true;
+ return 0;
+ }
+
+ int32_t InitPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.InitPlayout();
+ }
+
+ bool PlayoutIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.PlayoutIsInitialized();
+ }
+
+ int32_t RecordingIsAvailable(bool& available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ available = true;
+ return 0;
+ }
+
+ int32_t InitRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return input_.InitRecording();
+ }
+
+ bool RecordingIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return input_.RecordingIsInitialized();
+ }
+
+ int32_t StartPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (!audio_manager_->IsCommunicationModeEnabled()) {
+ RTC_LOG(LS_WARNING)
+ << "The application should use MODE_IN_COMMUNICATION audio mode!";
+ }
+ return output_.StartPlayout();
+ }
+
+ int32_t StopPlayout() override {
+ // Avoid using audio manger (JNI/Java cost) if playout was inactive.
+ if (!Playing())
+ return 0;
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ int32_t err = output_.StopPlayout();
+ return err;
+ }
+
+ bool Playing() const override {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ return output_.Playing();
+ }
+
+ int32_t StartRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (!audio_manager_->IsCommunicationModeEnabled()) {
+ RTC_LOG(LS_WARNING)
+ << "The application should use MODE_IN_COMMUNICATION audio mode!";
+ }
+ return input_.StartRecording();
+ }
+
+ int32_t StopRecording() override {
+ // Avoid using audio manger (JNI/Java cost) if recording was inactive.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (!Recording())
+ return 0;
+ int32_t err = input_.StopRecording();
+ return err;
+ }
+
+ bool Recording() const override { return input_.Recording(); }
+
+ int32_t InitSpeaker() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return 0;
+ }
+
+ bool SpeakerIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return true;
+ }
+
+ int32_t InitMicrophone() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return 0;
+ }
+
+ bool MicrophoneIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return true;
+ }
+
+ int32_t SpeakerVolumeIsAvailable(bool& available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.SpeakerVolumeIsAvailable(available);
+ }
+
+ int32_t SetSpeakerVolume(uint32_t volume) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.SetSpeakerVolume(volume);
+ }
+
+ int32_t SpeakerVolume(uint32_t& volume) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.SpeakerVolume(volume);
+ }
+
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.MaxSpeakerVolume(maxVolume);
+ }
+
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return output_.MinSpeakerVolume(minVolume);
+ }
+
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override {
+ available = false;
+ return -1;
+ }
+
+ int32_t SetMicrophoneVolume(uint32_t volume) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t MicrophoneVolume(uint32_t& volume) const override {
+ RTC_CHECK_NOTREACHED();
+ return -1;
+ }
+
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t SpeakerMuteIsAvailable(bool& available) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t SetSpeakerMute(bool enable) override { RTC_CHECK_NOTREACHED(); }
+
+ int32_t SpeakerMute(bool& enabled) const override { RTC_CHECK_NOTREACHED(); }
+
+ int32_t MicrophoneMuteIsAvailable(bool& available) override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ int32_t SetMicrophoneMute(bool enable) override { RTC_CHECK_NOTREACHED(); }
+
+ int32_t MicrophoneMute(bool& enabled) const override {
+ RTC_CHECK_NOTREACHED();
+ }
+
+ // Returns true if the audio manager has been configured to support stereo
+ // and false otherwised. Default is mono.
+ int32_t StereoPlayoutIsAvailable(bool& available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ available = audio_manager_->IsStereoPlayoutSupported();
+ return 0;
+ }
+
+ int32_t SetStereoPlayout(bool enable) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ bool available = audio_manager_->IsStereoPlayoutSupported();
+ // Android does not support changes between mono and stero on the fly.
+ // Instead, the native audio layer is configured via the audio manager
+ // to either support mono or stereo. It is allowed to call this method
+ // if that same state is not modified.
+ return (enable == available) ? 0 : -1;
+ }
+
+ int32_t StereoPlayout(bool& enabled) const override {
+ enabled = audio_manager_->IsStereoPlayoutSupported();
+ return 0;
+ }
+
+ int32_t StereoRecordingIsAvailable(bool& available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ available = audio_manager_->IsStereoRecordSupported();
+ return 0;
+ }
+
+ int32_t SetStereoRecording(bool enable) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ bool available = audio_manager_->IsStereoRecordSupported();
+ // Android does not support changes between mono and stero on the fly.
+ // Instead, the native audio layer is configured via the audio manager
+ // to either support mono or stereo. It is allowed to call this method
+ // if that same state is not modified.
+ return (enable == available) ? 0 : -1;
+ }
+
+ int32_t StereoRecording(bool& enabled) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ enabled = audio_manager_->IsStereoRecordSupported();
+ return 0;
+ }
+
+ int32_t PlayoutDelay(uint16_t& delay_ms) const override {
+ // Best guess we can do is to use half of the estimated total delay.
+ delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
+ RTC_DCHECK_GT(delay_ms, 0);
+ return 0;
+ }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ output_.AttachAudioBuffer(audioBuffer);
+ input_.AttachAudioBuffer(audioBuffer);
+ }
+
+ // Returns true if the device both supports built in AEC and the device
+ // is not blacklisted.
+ // Currently, if OpenSL ES is used in both directions, this method will still
+ // report the correct value and it has the correct effect. As an example:
+ // a device supports built in AEC and this method returns true. Libjingle
+ // will then disable the WebRTC based AEC and that will work for all devices
+ // (mainly Nexus) even when OpenSL ES is used for input since our current
+ // implementation will enable built-in AEC by default also for OpenSL ES.
+ // The only "bad" thing that happens today is that when Libjingle calls
+ // OpenSLESRecorder::EnableBuiltInAEC() it will not have any real effect and
+ // a "Not Implemented" log will be filed. This non-perfect state will remain
+ // until I have added full support for audio effects based on OpenSL ES APIs.
+ bool BuiltInAECIsAvailable() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return audio_manager_->IsAcousticEchoCancelerSupported();
+ }
+
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ int32_t EnableBuiltInAEC(bool enable) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
+ return input_.EnableBuiltInAEC(enable);
+ }
+
+ // Returns true if the device both supports built in AGC and the device
+ // is not blacklisted.
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ // In addition, see comments for BuiltInAECIsAvailable().
+ bool BuiltInAGCIsAvailable() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return audio_manager_->IsAutomaticGainControlSupported();
+ }
+
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ int32_t EnableBuiltInAGC(bool enable) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
+ return input_.EnableBuiltInAGC(enable);
+ }
+
+ // Returns true if the device both supports built in NS and the device
+ // is not blacklisted.
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ // In addition, see comments for BuiltInAECIsAvailable().
+ bool BuiltInNSIsAvailable() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return audio_manager_->IsNoiseSuppressorSupported();
+ }
+
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ int32_t EnableBuiltInNS(bool enable) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
+ return input_.EnableBuiltInNS(enable);
+ }
+
+ private:
+ SequenceChecker thread_checker_;
+
+ // Local copy of the audio layer set during construction of the
+ // AudioDeviceModuleImpl instance. Read only value.
+ const AudioDeviceModule::AudioLayer audio_layer_;
+
+ // Non-owning raw pointer to AudioManager instance given to use at
+ // construction. The real object is owned by AudioDeviceModuleImpl and the
+ // life time is the same as that of the AudioDeviceModuleImpl, hence there
+ // is no risk of reading a NULL pointer at any time in this class.
+ AudioManager* const audio_manager_;
+
+ OutputType output_;
+
+ InputType input_;
+
+ bool initialized_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_device_unittest.cc b/third_party/libwebrtc/modules/audio_device/android/audio_device_unittest.cc
new file mode 100644
index 0000000000..35ed5da1b7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -0,0 +1,1020 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/include/audio_device.h"
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/build_info.h"
+#include "modules/audio_device/android/ensure_initialized.h"
+#include "modules/audio_device/audio_device_impl.h"
+#include "modules/audio_device/include/mock_audio_transport.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+using std::cout;
+using std::endl;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Gt;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
+#else
+#define PRINTD(...) ((void)0)
+#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static const size_t kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static const int kTestTimeOutInMilliseconds = 10 * 1000;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static const size_t kNumCallbacksPerSecond = 100;
+// Play out a test file during this time (unit is in seconds).
+static const int kFilePlayTimeInSec = 5;
+static const size_t kBitsPerSample = 16;
+static const size_t kBytesPerSample = kBitsPerSample / 8;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first `kNumIgnoreFirstCallbacks` are ignored.
+static const int kFullDuplexTimeInSec = 5;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const size_t kNumIgnoreFirstCallbacks = 50;
+// Sets the number of impulses per second in the latency test.
+static const int kImpulseFrequencyInHz = 1;
+// Length of round-trip latency measurements. Number of transmitted impulses
+// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
+static const int kMeasureLatencyTimeInSec = 11;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+static const int kImpulseThreshold = 1000;
+static const char kTag[] = "[..........] ";
+
+enum TransportType {
+ kPlayout = 0x1,
+ kRecording = 0x2,
+};
+
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStreamInterface {
+ public:
+ virtual void Write(const void* source, size_t num_frames) = 0;
+ virtual void Read(void* destination, size_t num_frames) = 0;
+
+ protected:
+ virtual ~AudioStreamInterface() {}
+};
+
+// Reads audio samples from a PCM file where the file is stored in memory at
+// construction.
+class FileAudioStream : public AudioStreamInterface {
+ public:
+ FileAudioStream(size_t num_callbacks,
+ absl::string_view file_name,
+ int sample_rate)
+ : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
+ file_size_in_bytes_ = test::GetFileSize(file_name);
+ sample_rate_ = sample_rate;
+ EXPECT_GE(file_size_in_callbacks(), num_callbacks)
+ << "Size of test file is not large enough to last during the test.";
+ const size_t num_16bit_samples =
+ test::GetFileSize(file_name) / kBytesPerSample;
+ file_.reset(new int16_t[num_16bit_samples]);
+ FILE* audio_file = fopen(std::string(file_name).c_str(), "rb");
+ EXPECT_NE(audio_file, nullptr);
+ size_t num_samples_read =
+ fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
+ EXPECT_EQ(num_samples_read, num_16bit_samples);
+ fclose(audio_file);
+ }
+
+ // AudioStreamInterface::Write() is not implemented.
+ void Write(const void* source, size_t num_frames) override {}
+
+ // Read samples from file stored in memory (at construction) and copy
+ // `num_frames` (<=> 10ms) to the `destination` byte buffer.
+ void Read(void* destination, size_t num_frames) override {
+ memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
+ num_frames * sizeof(int16_t));
+ file_pos_ += num_frames;
+ }
+
+ int file_size_in_seconds() const {
+ return static_cast<int>(file_size_in_bytes_ /
+ (kBytesPerSample * sample_rate_));
+ }
+ size_t file_size_in_callbacks() const {
+ return file_size_in_seconds() * kNumCallbacksPerSecond;
+ }
+
+ private:
+ size_t file_size_in_bytes_;
+ int sample_rate_;
+ std::unique_ptr<int16_t[]> file_;
+ size_t file_pos_;
+};
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+class FifoAudioStream : public AudioStreamInterface {
+ public:
+ explicit FifoAudioStream(size_t frames_per_buffer)
+ : frames_per_buffer_(frames_per_buffer),
+ bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+ fifo_(new AudioBufferList),
+ largest_size_(0),
+ total_written_elements_(0),
+ write_count_(0) {
+ EXPECT_NE(fifo_.get(), nullptr);
+ }
+
+ ~FifoAudioStream() { Flush(); }
+
+ // Allocate new memory, copy `num_frames` samples from `source` into memory
+ // and add pointer to the memory location to end of the list.
+ // Increases the size of the FIFO by one element.
+ void Write(const void* source, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ PRINTD("+");
+ if (write_count_++ < kNumIgnoreFirstCallbacks) {
+ return;
+ }
+ int16_t* memory = new int16_t[frames_per_buffer_];
+ memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
+ MutexLock lock(&lock_);
+ fifo_->push_back(memory);
+ const size_t size = fifo_->size();
+ if (size > largest_size_) {
+ largest_size_ = size;
+ PRINTD("(%zu)", largest_size_);
+ }
+ total_written_elements_ += size;
+ }
+
+ // Read pointer to data buffer from front of list, copy `num_frames` of stored
+ // data into `destination` and delete the utilized memory allocation.
+ // Decreases the size of the FIFO by one element.
+ void Read(void* destination, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ PRINTD("-");
+ MutexLock lock(&lock_);
+ if (fifo_->empty()) {
+ memset(destination, 0, bytes_per_buffer_);
+ } else {
+ int16_t* memory = fifo_->front();
+ fifo_->pop_front();
+ memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
+ delete memory;
+ }
+ }
+
+ size_t size() const { return fifo_->size(); }
+
+ size_t largest_size() const { return largest_size_; }
+
+ size_t average_size() const {
+ return (total_written_elements_ == 0)
+ ? 0.0
+ : 0.5 + static_cast<float>(total_written_elements_) /
+ (write_count_ - kNumIgnoreFirstCallbacks);
+ }
+
+ private:
+ void Flush() {
+ for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
+ delete *it;
+ }
+ fifo_->clear();
+ }
+
+ using AudioBufferList = std::list<int16_t*>;
+ Mutex lock_;
+ const size_t frames_per_buffer_;
+ const size_t bytes_per_buffer_;
+ std::unique_ptr<AudioBufferList> fifo_;
+ size_t largest_size_;
+ size_t total_written_elements_;
+ size_t write_count_;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+// Usage requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+class LatencyMeasuringAudioStream : public AudioStreamInterface {
+ public:
+ explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
+ : frames_per_buffer_(frames_per_buffer),
+ bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+ play_count_(0),
+ rec_count_(0),
+ pulse_time_(0) {}
+
+ // Insert periodic impulses in first two samples of `destination`.
+ void Read(void* destination, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ if (play_count_ == 0) {
+ PRINT("[");
+ }
+ play_count_++;
+ memset(destination, 0, bytes_per_buffer_);
+ if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+ if (pulse_time_ == 0) {
+ pulse_time_ = rtc::TimeMillis();
+ }
+ PRINT(".");
+ const int16_t impulse = std::numeric_limits<int16_t>::max();
+ int16_t* ptr16 = static_cast<int16_t*>(destination);
+ for (size_t i = 0; i < 2; ++i) {
+ ptr16[i] = impulse;
+ }
+ }
+ }
+
+ // Detect received impulses in `source`, derive time between transmission and
+ // detection and add the calculated delay to list of latencies.
+ void Write(const void* source, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ rec_count_++;
+ if (pulse_time_ == 0) {
+ // Avoid detection of new impulse response until a new impulse has
+ // been transmitted (sets `pulse_time_` to value larger than zero).
+ return;
+ }
+ const int16_t* ptr16 = static_cast<const int16_t*>(source);
+ std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
+ // Find max value in the audio buffer.
+ int max = *std::max_element(vec.begin(), vec.end());
+ // Find index (element position in vector) of the max element.
+ int index_of_max =
+ std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
+ if (max > kImpulseThreshold) {
+ PRINTD("(%d,%d)", max, index_of_max);
+ int64_t now_time = rtc::TimeMillis();
+ int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
+ PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
+ PRINTD("[%d]", extra_delay);
+ // Total latency is the difference between transmit time and detection
+ // tome plus the extra delay within the buffer in which we detected the
+ // received impulse. It is transmitted at sample 0 but can be received
+ // at sample N where N > 0. The term `extra_delay` accounts for N and it
+ // is a value between 0 and 10ms.
+ latencies_.push_back(now_time - pulse_time_ + extra_delay);
+ pulse_time_ = 0;
+ } else {
+ PRINTD("-");
+ }
+ }
+
+ size_t num_latency_values() const { return latencies_.size(); }
+
+ int min_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return *std::min_element(latencies_.begin(), latencies_.end());
+ }
+
+ int max_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return *std::max_element(latencies_.begin(), latencies_.end());
+ }
+
+ int average_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return 0.5 + static_cast<double>(
+ std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+ latencies_.size();
+ }
+
+ void PrintResults() const {
+ PRINT("] ");
+ for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+ PRINT("%d ", *it);
+ }
+ PRINT("\n");
+ PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
+ max_latency(), average_latency());
+ }
+
+ int IndexToMilliseconds(double index) const {
+ return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
+ }
+
+ private:
+ const size_t frames_per_buffer_;
+ const size_t bytes_per_buffer_;
+ size_t play_count_;
+ size_t rec_count_;
+ int64_t pulse_time_;
+ std::vector<int> latencies_;
+};
+
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransportAndroid : public test::MockAudioTransport {
+ public:
+ explicit MockAudioTransportAndroid(int type)
+ : num_callbacks_(0),
+ type_(type),
+ play_count_(0),
+ rec_count_(0),
+ audio_stream_(nullptr) {}
+
+ virtual ~MockAudioTransportAndroid() {}
+
+ // Set default actions of the mock object. We are delegating to fake
+ // implementations (of AudioStreamInterface) here.
+ void HandleCallbacks(rtc::Event* test_is_done,
+ AudioStreamInterface* audio_stream,
+ int num_callbacks) {
+ test_is_done_ = test_is_done;
+ audio_stream_ = audio_stream;
+ num_callbacks_ = num_callbacks;
+ if (play_mode()) {
+ ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
+ .WillByDefault(
+ Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
+ }
+ if (rec_mode()) {
+ ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
+ .WillByDefault(Invoke(
+ this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
+ }
+ }
+
+ int32_t RealRecordedDataIsAvailable(const void* audioSamples,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel) { // NOLINT
+ EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
+ rec_count_++;
+ // Process the recorded audio stream if an AudioStreamInterface
+ // implementation exists.
+ if (audio_stream_) {
+ audio_stream_->Write(audioSamples, nSamples);
+ }
+ if (ReceivedEnoughCallbacks()) {
+ test_is_done_->Set();
+ }
+ return 0;
+ }
+
+ int32_t RealNeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut, // NOLINT
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
+ play_count_++;
+ nSamplesOut = nSamples;
+ // Read (possibly processed) audio stream samples to be played out if an
+ // AudioStreamInterface implementation exists.
+ if (audio_stream_) {
+ audio_stream_->Read(audioSamples, nSamples);
+ }
+ if (ReceivedEnoughCallbacks()) {
+ test_is_done_->Set();
+ }
+ return 0;
+ }
+
+ bool ReceivedEnoughCallbacks() {
+ bool recording_done = false;
+ if (rec_mode())
+ recording_done = rec_count_ >= num_callbacks_;
+ else
+ recording_done = true;
+
+ bool playout_done = false;
+ if (play_mode())
+ playout_done = play_count_ >= num_callbacks_;
+ else
+ playout_done = true;
+
+ return recording_done && playout_done;
+ }
+
+ bool play_mode() const { return type_ & kPlayout; }
+ bool rec_mode() const { return type_ & kRecording; }
+
+ private:
+ rtc::Event* test_is_done_;
+ size_t num_callbacks_;
+ int type_;
+ size_t play_count_;
+ size_t rec_count_;
+ AudioStreamInterface* audio_stream_;
+ std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
+};
+
+// AudioDeviceTest test fixture.
+class AudioDeviceTest : public ::testing::Test {
+ protected:
+ AudioDeviceTest() : task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+ // One-time initialization of JVM and application context. Ensures that we
+ // can do calls between C++ and Java. Initializes both Java and OpenSL ES
+ // implementations.
+ webrtc::audiodevicemodule::EnsureInitialized();
+ // Creates an audio device using a default audio layer.
+ audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+ EXPECT_NE(audio_device_.get(), nullptr);
+ EXPECT_EQ(0, audio_device_->Init());
+ playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
+ record_parameters_ = audio_manager()->GetRecordAudioParameters();
+ build_info_.reset(new BuildInfo());
+ }
+ virtual ~AudioDeviceTest() { EXPECT_EQ(0, audio_device_->Terminate()); }
+
+ int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
+ int record_sample_rate() const { return record_parameters_.sample_rate(); }
+ size_t playout_channels() const { return playout_parameters_.channels(); }
+ size_t record_channels() const { return record_parameters_.channels(); }
+ size_t playout_frames_per_10ms_buffer() const {
+ return playout_parameters_.frames_per_10ms_buffer();
+ }
+ size_t record_frames_per_10ms_buffer() const {
+ return record_parameters_.frames_per_10ms_buffer();
+ }
+
+ int total_delay_ms() const {
+ return audio_manager()->GetDelayEstimateInMilliseconds();
+ }
+
+ rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
+ return audio_device_;
+ }
+
+ AudioDeviceModuleImpl* audio_device_impl() const {
+ return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
+ }
+
+ AudioManager* audio_manager() const {
+ return audio_device_impl()->GetAndroidAudioManagerForTest();
+ }
+
+ AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
+ return static_cast<AudioDeviceModuleImpl*>(adm)
+ ->GetAndroidAudioManagerForTest();
+ }
+
+ AudioDeviceBuffer* audio_device_buffer() const {
+ return audio_device_impl()->GetAudioDeviceBuffer();
+ }
+
+ rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+ AudioDeviceModule::AudioLayer audio_layer) {
+ rtc::scoped_refptr<AudioDeviceModule> module(
+ AudioDeviceModule::Create(audio_layer, task_queue_factory_.get()));
+ return module;
+ }
+
+ // Returns file name relative to the resource root given a sample rate.
+ std::string GetFileName(int sample_rate) {
+ EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
+ char fname[64];
+ snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
+ sample_rate / 1000);
+ std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
+ EXPECT_TRUE(test::FileExists(file_name));
+#ifdef ENABLE_PRINTF
+ PRINT("file name: %s\n", file_name.c_str());
+ const size_t bytes = test::GetFileSize(file_name);
+ PRINT("file size: %zu [bytes]\n", bytes);
+ PRINT("file size: %zu [samples]\n", bytes / kBytesPerSample);
+ const int seconds =
+ static_cast<int>(bytes / (sample_rate * kBytesPerSample));
+ PRINT("file size: %d [secs]\n", seconds);
+ PRINT("file size: %zu [callbacks]\n", seconds * kNumCallbacksPerSecond);
+#endif
+ return file_name;
+ }
+
+ AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
+ AudioDeviceModule::AudioLayer audio_layer;
+ EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
+ return audio_layer;
+ }
+
+ int TestDelayOnAudioLayer(
+ const AudioDeviceModule::AudioLayer& layer_to_test) {
+ rtc::scoped_refptr<AudioDeviceModule> audio_device;
+ audio_device = CreateAudioDevice(layer_to_test);
+ EXPECT_NE(audio_device.get(), nullptr);
+ AudioManager* audio_manager = GetAudioManager(audio_device.get());
+ EXPECT_NE(audio_manager, nullptr);
+ return audio_manager->GetDelayEstimateInMilliseconds();
+ }
+
+ AudioDeviceModule::AudioLayer TestActiveAudioLayer(
+ const AudioDeviceModule::AudioLayer& layer_to_test) {
+ rtc::scoped_refptr<AudioDeviceModule> audio_device;
+ audio_device = CreateAudioDevice(layer_to_test);
+ EXPECT_NE(audio_device.get(), nullptr);
+ AudioDeviceModule::AudioLayer active;
+ EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
+ return active;
+ }
+
+ bool DisableTestForThisDevice(absl::string_view model) {
+ return (build_info_->GetDeviceModel() == model);
+ }
+
+ // Volume control is currently only supported for the Java output audio layer.
+ // For OpenSL ES, the internal stream volume is always on max level and there
+ // is no need for this test to set it to max.
+ bool AudioLayerSupportsVolumeControl() const {
+ return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
+ }
+
+ void SetMaxPlayoutVolume() {
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ uint32_t max_volume;
+ EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
+ EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
+ }
+
+ void DisableBuiltInAECIfAvailable() {
+ if (audio_device()->BuiltInAECIsAvailable()) {
+ EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
+ }
+ }
+
+ void StartPlayout() {
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+ EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ EXPECT_TRUE(audio_device()->Playing());
+ }
+
+ void StopPlayout() {
+ EXPECT_EQ(0, audio_device()->StopPlayout());
+ EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+ }
+
+ void StartRecording() {
+ EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+ EXPECT_FALSE(audio_device()->Recording());
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+ EXPECT_EQ(0, audio_device()->StartRecording());
+ EXPECT_TRUE(audio_device()->Recording());
+ }
+
+ void StopRecording() {
+ EXPECT_EQ(0, audio_device()->StopRecording());
+ EXPECT_FALSE(audio_device()->Recording());
+ }
+
+ int GetMaxSpeakerVolume() const {
+ uint32_t max_volume(0);
+ EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
+ return max_volume;
+ }
+
+ int GetMinSpeakerVolume() const {
+ uint32_t min_volume(0);
+ EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
+ return min_volume;
+ }
+
+ int GetSpeakerVolume() const {
+ uint32_t volume(0);
+ EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
+ return volume;
+ }
+
+ rtc::Event test_is_done_;
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ rtc::scoped_refptr<AudioDeviceModule> audio_device_;
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+ std::unique_ptr<BuildInfo> build_info_;
+};
+
+TEST_F(AudioDeviceTest, ConstructDestruct) {
+ // Using the test fixture to create and destruct the audio device module.
+}
+
+// We always ask for a default audio layer when the ADM is constructed. But the
+// ADM will then internally set the best suitable combination of audio layers,
+// for input and output based on if low-latency output and/or input audio in
+// combination with OpenSL ES is supported or not. This test ensures that the
+// correct selection is done.
+TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
+ const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
+ bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
+ bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
+ bool aaudio = audio_manager()->IsAAudioSupported();
+ AudioDeviceModule::AudioLayer expected_audio_layer;
+ if (aaudio) {
+ expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
+ } else if (low_latency_output && low_latency_input) {
+ expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
+ } else if (low_latency_output && !low_latency_input) {
+ expected_audio_layer =
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
+ } else {
+ expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
+ }
+ EXPECT_EQ(expected_audio_layer, audio_layer);
+}
+
+// Verify that it is possible to explicitly create the two types of supported
+// ADMs. These two tests overrides the default selection of native audio layer
+// by ignoring if the device supports low-latency output or not.
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
+ AudioDeviceModule::AudioLayer active_layer =
+ TestActiveAudioLayer(expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidJavaAudio;
+ AudioDeviceModule::AudioLayer active_layer =
+ TestActiveAudioLayer(expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidOpenSLESAudio;
+ AudioDeviceModule::AudioLayer active_layer =
+ TestActiveAudioLayer(expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+// TODO(bugs.webrtc.org/8914)
+#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
+ DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
+#else
+#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
+ CorrectAudioLayerIsUsedForAAudioInBothDirections
+#endif
+TEST_F(AudioDeviceTest,
+ MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidAAudioAudio;
+ AudioDeviceModule::AudioLayer active_layer =
+ TestActiveAudioLayer(expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+// TODO(bugs.webrtc.org/8914)
+#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
+ DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
+#else
+#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
+ CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
+#endif
+TEST_F(AudioDeviceTest,
+ MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
+ AudioDeviceModule::AudioLayer active_layer =
+ TestActiveAudioLayer(expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+// The Android ADM supports two different delay reporting modes. One for the
+// low-latency output path (in combination with OpenSL ES), and one for the
+// high-latency output path (Java backends in both directions). These two tests
+// verifies that the audio manager reports correct delay estimate given the
+// selected audio layer. Note that, this delay estimate will only be utilized
+// if the HW AEC is disabled.
+TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
+ EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
+ TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
+}
+
+TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
+ EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
+ TestDelayOnAudioLayer(
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
+}
+
+// Ensure that the ADM internal audio device buffer is configured to use the
+// correct set of parameters.
+TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
+ EXPECT_EQ(playout_parameters_.sample_rate(),
+ static_cast<int>(audio_device_buffer()->PlayoutSampleRate()));
+ EXPECT_EQ(record_parameters_.sample_rate(),
+ static_cast<int>(audio_device_buffer()->RecordingSampleRate()));
+ EXPECT_EQ(playout_parameters_.channels(),
+ audio_device_buffer()->PlayoutChannels());
+ EXPECT_EQ(record_parameters_.channels(),
+ audio_device_buffer()->RecordingChannels());
+}
+
+TEST_F(AudioDeviceTest, InitTerminate) {
+ // Initialization is part of the test fixture.
+ EXPECT_TRUE(audio_device()->Initialized());
+ EXPECT_EQ(0, audio_device()->Terminate());
+ EXPECT_FALSE(audio_device()->Initialized());
+}
+
+TEST_F(AudioDeviceTest, Devices) {
+ // Device enumeration is not supported. Verify fixed values only.
+ EXPECT_EQ(1, audio_device()->PlayoutDevices());
+ EXPECT_EQ(1, audio_device()->RecordingDevices());
+}
+
+TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ bool available;
+ EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
+ EXPECT_TRUE(available);
+}
+
+TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ StartPlayout();
+ EXPECT_GT(GetMaxSpeakerVolume(), 0);
+ StopPlayout();
+}
+
+TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ EXPECT_EQ(GetMinSpeakerVolume(), 0);
+}
+
+TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ const int default_volume = GetSpeakerVolume();
+ EXPECT_GE(default_volume, GetMinSpeakerVolume());
+ EXPECT_LE(default_volume, GetMaxSpeakerVolume());
+}
+
+TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ const int default_volume = GetSpeakerVolume();
+ const int max_volume = GetMaxSpeakerVolume();
+ EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
+ int new_volume = GetSpeakerVolume();
+ EXPECT_EQ(new_volume, max_volume);
+ EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
+}
+
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopPlayout) {
+ StartPlayout();
+ StopPlayout();
+ StartPlayout();
+ StopPlayout();
+}
+
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopRecording) {
+ StartRecording();
+ StopRecording();
+ StartRecording();
+ StopRecording();
+}
+
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK and death tests are not supported on Android.
+TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ EXPECT_EQ(0, audio_device()->StopPlayout());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+}
+
+// Verify that calling StopRecording() will leave us in an uninitialized state
+// which will require a new call to InitRecording(). This test does not call
+// StartRecording() while being uninitialized since doing so will hit a
+// RTC_DCHECK and death tests are not supported on Android.
+TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_EQ(0, audio_device()->StartRecording());
+ EXPECT_EQ(0, audio_device()->StopRecording());
+ EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+}
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData callback.
+TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+ MockAudioTransportAndroid mock(kPlayout);
+ mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+ kBytesPerSample, playout_channels(),
+ playout_sample_rate(), NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ test_is_done_.Wait(kTestTimeOutInMilliseconds);
+ StopPlayout();
+}
+
+// Start recording and verify that the native audio layer starts feeding real
+// audio samples via the RecordedDataIsAvailable callback.
+// TODO(henrika): investigate if it is possible to perform a sanity check of
+// delay estimates as well (argument #6).
+TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
+ MockAudioTransportAndroid mock(kRecording);
+ mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+ EXPECT_CALL(
+ mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
+ kBytesPerSample, record_channels(),
+ record_sample_rate(), _, 0, 0, false, _, _))
+ .Times(AtLeast(kNumCallbacks));
+
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ test_is_done_.Wait(kTestTimeOutInMilliseconds);
+ StopRecording();
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
+ MockAudioTransportAndroid mock(kPlayout | kRecording);
+ mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+ kBytesPerSample, playout_channels(),
+ playout_sample_rate(), NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_CALL(
+ mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
+ kBytesPerSample, record_channels(),
+ record_sample_rate(), _, 0, 0, false, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ StartRecording();
+ test_is_done_.Wait(kTestTimeOutInMilliseconds);
+ StopRecording();
+ StopPlayout();
+}
+
+// Start playout and read audio from an external PCM file when the audio layer
+// asks for data to play out. Real audio is played out in this test but it does
+// not contain any explicit verification that the audio quality is perfect.
+TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
+ // TODO(henrika): extend test when mono output is supported.
+ EXPECT_EQ(1u, playout_channels());
+ NiceMock<MockAudioTransportAndroid> mock(kPlayout);
+ const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
+ std::string file_name = GetFileName(playout_sample_rate());
+ std::unique_ptr<FileAudioStream> file_audio_stream(
+ new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
+ mock.HandleCallbacks(&test_is_done_, file_audio_stream.get(), num_callbacks);
+ // SetMaxPlayoutVolume();
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ test_is_done_.Wait(kTestTimeOutInMilliseconds);
+ StopPlayout();
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+// Disabling this test on bots since it is difficult to come up with a robust
+// test condition that all worked as intended. The main issue is that, when
+// swarming is used, an initial latency can be built up when the both sides
+// starts at different times. Hence, the test can fail even if audio works
+// as intended. Keeping the test so it can be enabled manually.
+// http://bugs.webrtc.org/7744
+TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
+ EXPECT_EQ(record_channels(), playout_channels());
+ EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+ NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
+ std::unique_ptr<FifoAudioStream> fifo_audio_stream(
+ new FifoAudioStream(playout_frames_per_10ms_buffer()));
+ mock.HandleCallbacks(&test_is_done_, fifo_audio_stream.get(),
+ kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+ SetMaxPlayoutVolume();
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ StartPlayout();
+ test_is_done_.Wait(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
+ StopPlayout();
+ StopRecording();
+
+ // These thresholds are set rather high to accomodate differences in hardware
+ // in several devices, so this test can be used in swarming.
+ // See http://bugs.webrtc.org/6464
+ EXPECT_LE(fifo_audio_stream->average_size(), 60u);
+ EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+ EXPECT_EQ(record_channels(), playout_channels());
+ EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+ NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
+ std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+ new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
+ mock.HandleCallbacks(&test_is_done_, latency_audio_stream.get(),
+ kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ SetMaxPlayoutVolume();
+ DisableBuiltInAECIfAvailable();
+ StartRecording();
+ StartPlayout();
+ test_is_done_.Wait(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
+ StopPlayout();
+ StopRecording();
+ // Verify that the correct number of transmitted impulses are detected.
+ EXPECT_EQ(latency_audio_stream->num_latency_values(),
+ static_cast<size_t>(
+ kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
+ latency_audio_stream->PrintResults();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_manager.cc b/third_party/libwebrtc/modules/audio_device/android/audio_manager.cc
new file mode 100644
index 0000000000..0b55496619
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_manager.cc
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_manager.h"
+
+#include <utility>
+
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/utility/include/helpers_android.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+// AudioManager::JavaAudioManager implementation
+AudioManager::JavaAudioManager::JavaAudioManager(
+ NativeRegistration* native_reg,
+ std::unique_ptr<GlobalRef> audio_manager)
+ : audio_manager_(std::move(audio_manager)),
+ init_(native_reg->GetMethodId("init", "()Z")),
+ dispose_(native_reg->GetMethodId("dispose", "()V")),
+ is_communication_mode_enabled_(
+ native_reg->GetMethodId("isCommunicationModeEnabled", "()Z")),
+ is_device_blacklisted_for_open_sles_usage_(
+ native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage",
+ "()Z")) {
+ RTC_LOG(LS_INFO) << "JavaAudioManager::ctor";
+}
+
+AudioManager::JavaAudioManager::~JavaAudioManager() {
+ RTC_LOG(LS_INFO) << "JavaAudioManager::~dtor";
+}
+
+bool AudioManager::JavaAudioManager::Init() {
+ return audio_manager_->CallBooleanMethod(init_);
+}
+
+void AudioManager::JavaAudioManager::Close() {
+ audio_manager_->CallVoidMethod(dispose_);
+}
+
+bool AudioManager::JavaAudioManager::IsCommunicationModeEnabled() {
+ return audio_manager_->CallBooleanMethod(is_communication_mode_enabled_);
+}
+
+bool AudioManager::JavaAudioManager::IsDeviceBlacklistedForOpenSLESUsage() {
+ return audio_manager_->CallBooleanMethod(
+ is_device_blacklisted_for_open_sles_usage_);
+}
+
+// AudioManager implementation
+AudioManager::AudioManager()
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
+ initialized_(false),
+ hardware_aec_(false),
+ hardware_agc_(false),
+ hardware_ns_(false),
+ low_latency_playout_(false),
+ low_latency_record_(false),
+ delay_estimate_in_milliseconds_(0) {
+ RTC_LOG(LS_INFO) << "ctor";
+ RTC_CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V",
+ reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioManager", native_methods,
+ arraysize(native_methods));
+ j_audio_manager_.reset(
+ new JavaAudioManager(j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "<init>", "(J)V", PointerTojlong(this))));
+}
+
+AudioManager::~AudioManager() {
+ RTC_LOG(LS_INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Close();
+}
+
+void AudioManager::SetActiveAudioLayer(
+ AudioDeviceModule::AudioLayer audio_layer) {
+ RTC_LOG(LS_INFO) << "SetActiveAudioLayer: " << audio_layer;
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ // Store the currently utilized audio layer.
+ audio_layer_ = audio_layer;
+ // The delay estimate can take one of two fixed values depending on if the
+ // device supports low-latency output or not. However, it is also possible
+ // that the user explicitly selects the high-latency audio path, hence we use
+ // the selected `audio_layer` here to set the delay estimate.
+ delay_estimate_in_milliseconds_ =
+ (audio_layer == AudioDeviceModule::kAndroidJavaAudio)
+ ? kHighLatencyModeDelayEstimateInMilliseconds
+ : kLowLatencyModeDelayEstimateInMilliseconds;
+ RTC_LOG(LS_INFO) << "delay_estimate_in_milliseconds: "
+ << delay_estimate_in_milliseconds_;
+}
+
+SLObjectItf AudioManager::GetOpenSLEngine() {
+ RTC_LOG(LS_INFO) << "GetOpenSLEngine";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // Only allow usage of OpenSL ES if such an audio layer has been specified.
+ if (audio_layer_ != AudioDeviceModule::kAndroidOpenSLESAudio &&
+ audio_layer_ !=
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) {
+ RTC_LOG(LS_INFO)
+ << "Unable to create OpenSL engine for the current audio layer: "
+ << audio_layer_;
+ return nullptr;
+ }
+ // OpenSL ES for Android only supports a single engine per application.
+ // If one already has been created, return existing object instead of
+ // creating a new.
+ if (engine_object_.Get() != nullptr) {
+ RTC_LOG(LS_WARNING)
+ << "The OpenSL ES engine object has already been created";
+ return engine_object_.Get();
+ }
+ // Create the engine object in thread safe mode.
+ const SLEngineOption option[] = {
+ {SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
+ SLresult result =
+ slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL);
+ if (result != SL_RESULT_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "slCreateEngine() failed: "
+ << GetSLErrorString(result);
+ engine_object_.Reset();
+ return nullptr;
+ }
+ // Realize the SL Engine in synchronous mode.
+ result = engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE);
+ if (result != SL_RESULT_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "Realize() failed: " << GetSLErrorString(result);
+ engine_object_.Reset();
+ return nullptr;
+ }
+ // Finally return the SLObjectItf interface of the engine object.
+ return engine_object_.Get();
+}
+
+bool AudioManager::Init() {
+ RTC_LOG(LS_INFO) << "Init";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
+ if (!j_audio_manager_->Init()) {
+ RTC_LOG(LS_ERROR) << "Init() failed";
+ return false;
+ }
+ initialized_ = true;
+ return true;
+}
+
+bool AudioManager::Close() {
+ RTC_LOG(LS_INFO) << "Close";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!initialized_)
+ return true;
+ j_audio_manager_->Close();
+ initialized_ = false;
+ return true;
+}
+
+bool AudioManager::IsCommunicationModeEnabled() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return j_audio_manager_->IsCommunicationModeEnabled();
+}
+
+bool AudioManager::IsAcousticEchoCancelerSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return hardware_aec_;
+}
+
+bool AudioManager::IsAutomaticGainControlSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return hardware_agc_;
+}
+
+bool AudioManager::IsNoiseSuppressorSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return hardware_ns_;
+}
+
+bool AudioManager::IsLowLatencyPlayoutSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // Some devices are blacklisted for usage of OpenSL ES even if they report
+ // that low-latency playout is supported. See b/21485703 for details.
+ return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
+ ? false
+ : low_latency_playout_;
+}
+
+bool AudioManager::IsLowLatencyRecordSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return low_latency_record_;
+}
+
+bool AudioManager::IsProAudioSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // TODO(henrika): return the state independently of if OpenSL ES is
+ // blacklisted or not for now. We could use the same approach as in
+ // IsLowLatencyPlayoutSupported() but I can't see the need for it yet.
+ return pro_audio_;
+}
+
+// TODO(henrika): improve comments...
+bool AudioManager::IsAAudioSupported() const {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+ return a_audio_;
+#else
+ return false;
+#endif
+}
+
+bool AudioManager::IsStereoPlayoutSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (playout_parameters_.channels() == 2);
+}
+
+bool AudioManager::IsStereoRecordSupported() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (record_parameters_.channels() == 2);
+}
+
+int AudioManager::GetDelayEstimateInMilliseconds() const {
+ return delay_estimate_in_milliseconds_;
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
+ jobject obj,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size,
+ jlong native_audio_manager) {
+ webrtc::AudioManager* this_object =
+ reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
+ this_object->OnCacheAudioParameters(
+ env, sample_rate, output_channels, input_channels, hardware_aec,
+ hardware_agc, hardware_ns, low_latency_output, low_latency_input,
+ pro_audio, a_audio, output_buffer_size, input_buffer_size);
+}
+
+void AudioManager::OnCacheAudioParameters(JNIEnv* env,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size) {
+ RTC_LOG(LS_INFO)
+ << "OnCacheAudioParameters: "
+ "hardware_aec: "
+ << static_cast<bool>(hardware_aec)
+ << ", hardware_agc: " << static_cast<bool>(hardware_agc)
+ << ", hardware_ns: " << static_cast<bool>(hardware_ns)
+ << ", low_latency_output: " << static_cast<bool>(low_latency_output)
+ << ", low_latency_input: " << static_cast<bool>(low_latency_input)
+ << ", pro_audio: " << static_cast<bool>(pro_audio)
+ << ", a_audio: " << static_cast<bool>(a_audio)
+ << ", sample_rate: " << static_cast<int>(sample_rate)
+ << ", output_channels: " << static_cast<int>(output_channels)
+ << ", input_channels: " << static_cast<int>(input_channels)
+ << ", output_buffer_size: " << static_cast<int>(output_buffer_size)
+ << ", input_buffer_size: " << static_cast<int>(input_buffer_size);
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ hardware_aec_ = hardware_aec;
+ hardware_agc_ = hardware_agc;
+ hardware_ns_ = hardware_ns;
+ low_latency_playout_ = low_latency_output;
+ low_latency_record_ = low_latency_input;
+ pro_audio_ = pro_audio;
+ a_audio_ = a_audio;
+ playout_parameters_.reset(sample_rate, static_cast<size_t>(output_channels),
+ static_cast<size_t>(output_buffer_size));
+ record_parameters_.reset(sample_rate, static_cast<size_t>(input_channels),
+ static_cast<size_t>(input_buffer_size));
+}
+
+const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
+ RTC_CHECK(playout_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return playout_parameters_;
+}
+
+const AudioParameters& AudioManager::GetRecordAudioParameters() {
+ RTC_CHECK(record_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return record_parameters_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_manager.h b/third_party/libwebrtc/modules/audio_device/android/audio_manager.h
new file mode 100644
index 0000000000..900fc78a68
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_manager.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
+
+#include <SLES/OpenSLES.h>
+#include <jni.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/opensles_common.h"
+#include "modules/audio_device/audio_device_config.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// Implements support for functions in the WebRTC audio stack for Android that
+// relies on the AudioManager in android.media. It also populates an
+// AudioParameter structure with native audio parameters detected at
+// construction. This class does not make any audio-related modifications
+// unless Init() is called. Caching audio parameters makes no changes but only
+// reads data from the Java side.
+class AudioManager {
+ public:
+ // Wraps the Java specific parts of the AudioManager into one helper class.
+ // Stores method IDs for all supported methods at construction and then
+ // allows calls like JavaAudioManager::Close() while hiding the Java/JNI
+ // parts that are associated with this call.
+ class JavaAudioManager {
+ public:
+ JavaAudioManager(NativeRegistration* native_registration,
+ std::unique_ptr<GlobalRef> audio_manager);
+ ~JavaAudioManager();
+
+ bool Init();
+ void Close();
+ bool IsCommunicationModeEnabled();
+ bool IsDeviceBlacklistedForOpenSLESUsage();
+
+ private:
+ std::unique_ptr<GlobalRef> audio_manager_;
+ jmethodID init_;
+ jmethodID dispose_;
+ jmethodID is_communication_mode_enabled_;
+ jmethodID is_device_blacklisted_for_open_sles_usage_;
+ };
+
+ AudioManager();
+ ~AudioManager();
+
+ // Sets the currently active audio layer combination. Must be called before
+ // Init().
+ void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer);
+
+ // Creates and realizes the main (global) Open SL engine object and returns
+ // a reference to it. The engine object is only created at the first call
+ // since OpenSL ES for Android only supports a single engine per application.
+ // Subsequent calls returns the already created engine. The SL engine object
+ // is destroyed when the AudioManager object is deleted. It means that the
+ // engine object will be the first OpenSL ES object to be created and last
+ // object to be destroyed.
+ // Note that NULL will be returned unless the audio layer is specified as
+ // AudioDeviceModule::kAndroidOpenSLESAudio or
+ // AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio.
+ SLObjectItf GetOpenSLEngine();
+
+ // Initializes the audio manager and stores the current audio mode.
+ bool Init();
+ // Revert any setting done by Init().
+ bool Close();
+
+ // Returns true if current audio mode is AudioManager.MODE_IN_COMMUNICATION.
+ bool IsCommunicationModeEnabled() const;
+
+ // Native audio parameters stored during construction.
+ const AudioParameters& GetPlayoutAudioParameters();
+ const AudioParameters& GetRecordAudioParameters();
+
+ // Returns true if the device supports built-in audio effects for AEC, AGC
+ // and NS. Some devices can also be blacklisted for use in combination with
+ // platform effects and these devices will return false.
+ // Can currently only be used in combination with a Java based audio backend
+ // for the recoring side (i.e. using the android.media.AudioRecord API).
+ bool IsAcousticEchoCancelerSupported() const;
+ bool IsAutomaticGainControlSupported() const;
+ bool IsNoiseSuppressorSupported() const;
+
+ // Returns true if the device supports the low-latency audio paths in
+ // combination with OpenSL ES.
+ bool IsLowLatencyPlayoutSupported() const;
+ bool IsLowLatencyRecordSupported() const;
+
+ // Returns true if the device supports (and has been configured for) stereo.
+ // Call the Java API WebRtcAudioManager.setStereoOutput/Input() with true as
+ // paramter to enable stereo. Default is mono in both directions and the
+ // setting is set once and for all when the audio manager object is created.
+ // TODO(henrika): stereo is not supported in combination with OpenSL ES.
+ bool IsStereoPlayoutSupported() const;
+ bool IsStereoRecordSupported() const;
+
+ // Returns true if the device supports pro-audio features in combination with
+ // OpenSL ES.
+ bool IsProAudioSupported() const;
+
+ // Returns true if the device supports AAudio.
+ bool IsAAudioSupported() const;
+
+ // Returns the estimated total delay of this device. Unit is in milliseconds.
+ // The vaule is set once at construction and never changes after that.
+ // Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
+ // webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
+ int GetDelayEstimateInMilliseconds() const;
+
+ private:
+ // Called from Java side so we can cache the native audio parameters.
+ // This method will be called by the WebRtcAudioManager constructor, i.e.
+ // on the same thread that this object is created on.
+ static void JNICALL CacheAudioParameters(JNIEnv* env,
+ jobject obj,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size,
+ jlong native_audio_manager);
+ void OnCacheAudioParameters(JNIEnv* env,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size);
+
+ // Stores thread ID in the constructor.
+ // We can then use RTC_DCHECK_RUN_ON(&thread_checker_) to ensure that
+ // other methods are called from the same thread.
+ SequenceChecker thread_checker_;
+
+ // Calls JavaVM::AttachCurrentThread() if this thread is not attached at
+ // construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ JvmThreadConnector attach_thread_if_needed_;
+
+ // Wraps the JNI interface pointer and methods associated with it.
+ std::unique_ptr<JNIEnvironment> j_environment_;
+
+ // Contains factory method for creating the Java object.
+ std::unique_ptr<NativeRegistration> j_native_registration_;
+
+ // Wraps the Java specific parts of the AudioManager.
+ std::unique_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
+
+ // Contains the selected audio layer specified by the AudioLayer enumerator
+ // in the AudioDeviceModule class.
+ AudioDeviceModule::AudioLayer audio_layer_;
+
+ // This object is the global entry point of the OpenSL ES API.
+ // After creating the engine object, the application can obtain this object‘s
+ // SLEngineItf interface. This interface contains creation methods for all
+ // the other object types in the API. None of these interface are realized
+ // by this class. It only provides access to the global engine object.
+ webrtc::ScopedSLObjectItf engine_object_;
+
+ // Set to true by Init() and false by Close().
+ bool initialized_;
+
+ // True if device supports hardware (or built-in) AEC.
+ bool hardware_aec_;
+ // True if device supports hardware (or built-in) AGC.
+ bool hardware_agc_;
+ // True if device supports hardware (or built-in) NS.
+ bool hardware_ns_;
+
+ // True if device supports the low-latency OpenSL ES audio path for output.
+ bool low_latency_playout_;
+
+ // True if device supports the low-latency OpenSL ES audio path for input.
+ bool low_latency_record_;
+
+ // True if device supports the low-latency OpenSL ES pro-audio path.
+ bool pro_audio_;
+
+ // True if device supports the low-latency AAudio audio path.
+ bool a_audio_;
+
+ // The delay estimate can take one of two fixed values depending on if the
+ // device supports low-latency output or not.
+ int delay_estimate_in_milliseconds_;
+
+ // Contains native parameters (e.g. sample rate, channel configuration).
+ // Set at construction in OnCacheAudioParameters() which is called from
+ // Java on the same thread as this object is created on.
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_manager_unittest.cc b/third_party/libwebrtc/modules/audio_device/android/audio_manager_unittest.cc
new file mode 100644
index 0000000000..093eddd2e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_manager.h"
+
+#include <SLES/OpenSLES_Android.h>
+
+#include "modules/audio_device/android/build_info.h"
+#include "modules/audio_device/android/ensure_initialized.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+static const char kTag[] = " ";
+
+class AudioManagerTest : public ::testing::Test {
+ protected:
+ AudioManagerTest() {
+ // One-time initialization of JVM and application context. Ensures that we
+ // can do calls between C++ and Java.
+ webrtc::audiodevicemodule::EnsureInitialized();
+ audio_manager_.reset(new AudioManager());
+ SetActiveAudioLayer();
+ playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
+ record_parameters_ = audio_manager()->GetRecordAudioParameters();
+ }
+
+ AudioManager* audio_manager() const { return audio_manager_.get(); }
+
+ // A valid audio layer must always be set before calling Init(), hence we
+ // might as well make it a part of the test fixture.
+ void SetActiveAudioLayer() {
+ EXPECT_EQ(0, audio_manager()->GetDelayEstimateInMilliseconds());
+ audio_manager()->SetActiveAudioLayer(AudioDeviceModule::kAndroidJavaAudio);
+ EXPECT_NE(0, audio_manager()->GetDelayEstimateInMilliseconds());
+ }
+
+ // One way to ensure that the engine object is valid is to create an
+ // SL Engine interface since it exposes creation methods of all the OpenSL ES
+ // object types and it is only supported on the engine object. This method
+ // also verifies that the engine interface supports at least one interface.
+ // Note that, the test below is not a full test of the SLEngineItf object
+ // but only a simple sanity test to check that the global engine object is OK.
+ void ValidateSLEngine(SLObjectItf engine_object) {
+ EXPECT_NE(nullptr, engine_object);
+ // Get the SL Engine interface which is exposed by the engine object.
+ SLEngineItf engine;
+ SLresult result =
+ (*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine);
+ EXPECT_EQ(result, SL_RESULT_SUCCESS) << "GetInterface() on engine failed";
+ // Ensure that the SL Engine interface exposes at least one interface.
+ SLuint32 object_id = SL_OBJECTID_ENGINE;
+ SLuint32 num_supported_interfaces = 0;
+ result = (*engine)->QueryNumSupportedInterfaces(engine, object_id,
+ &num_supported_interfaces);
+ EXPECT_EQ(result, SL_RESULT_SUCCESS)
+ << "QueryNumSupportedInterfaces() failed";
+ EXPECT_GE(num_supported_interfaces, 1u);
+ }
+
+ std::unique_ptr<AudioManager> audio_manager_;
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+};
+
+TEST_F(AudioManagerTest, ConstructDestruct) {}
+
+// It should not be possible to create an OpenSL engine object if Java based
+// audio is requested in both directions.
+TEST_F(AudioManagerTest, GetOpenSLEngineShouldFailForJavaAudioLayer) {
+ audio_manager()->SetActiveAudioLayer(AudioDeviceModule::kAndroidJavaAudio);
+ SLObjectItf engine_object = audio_manager()->GetOpenSLEngine();
+ EXPECT_EQ(nullptr, engine_object);
+}
+
+// It should be possible to create an OpenSL engine object if OpenSL ES based
+// audio is requested in any direction.
+TEST_F(AudioManagerTest, GetOpenSLEngineShouldSucceedForOpenSLESAudioLayer) {
+ // List of supported audio layers that uses OpenSL ES audio.
+ const AudioDeviceModule::AudioLayer opensles_audio[] = {
+ AudioDeviceModule::kAndroidOpenSLESAudio,
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio};
+ // Verify that the global (singleton) OpenSL Engine can be acquired for all
+ // audio layes that uses OpenSL ES. Note that the engine is only created once.
+ for (const AudioDeviceModule::AudioLayer audio_layer : opensles_audio) {
+ audio_manager()->SetActiveAudioLayer(audio_layer);
+ SLObjectItf engine_object = audio_manager()->GetOpenSLEngine();
+ EXPECT_NE(nullptr, engine_object);
+ // Perform a simple sanity check of the created engine object.
+ ValidateSLEngine(engine_object);
+ }
+}
+
+TEST_F(AudioManagerTest, InitClose) {
+ EXPECT_TRUE(audio_manager()->Init());
+ EXPECT_TRUE(audio_manager()->Close());
+}
+
+TEST_F(AudioManagerTest, IsAcousticEchoCancelerSupported) {
+ PRINT("%sAcoustic Echo Canceler support: %s\n", kTag,
+ audio_manager()->IsAcousticEchoCancelerSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsAutomaticGainControlSupported) {
+ EXPECT_FALSE(audio_manager()->IsAutomaticGainControlSupported());
+}
+
+TEST_F(AudioManagerTest, IsNoiseSuppressorSupported) {
+ PRINT("%sNoise Suppressor support: %s\n", kTag,
+ audio_manager()->IsNoiseSuppressorSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsLowLatencyPlayoutSupported) {
+ PRINT("%sLow latency output support: %s\n", kTag,
+ audio_manager()->IsLowLatencyPlayoutSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsLowLatencyRecordSupported) {
+ PRINT("%sLow latency input support: %s\n", kTag,
+ audio_manager()->IsLowLatencyRecordSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsProAudioSupported) {
+ PRINT("%sPro audio support: %s\n", kTag,
+ audio_manager()->IsProAudioSupported() ? "Yes" : "No");
+}
+
+// Verify that playout side is configured for mono by default.
+TEST_F(AudioManagerTest, IsStereoPlayoutSupported) {
+ EXPECT_FALSE(audio_manager()->IsStereoPlayoutSupported());
+}
+
+// Verify that recording side is configured for mono by default.
+TEST_F(AudioManagerTest, IsStereoRecordSupported) {
+ EXPECT_FALSE(audio_manager()->IsStereoRecordSupported());
+}
+
+TEST_F(AudioManagerTest, ShowAudioParameterInfo) {
+ const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+ const bool low_latency_in = audio_manager()->IsLowLatencyRecordSupported();
+ PRINT("PLAYOUT:\n");
+ PRINT("%saudio layer: %s\n", kTag,
+ low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
+ PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate());
+ PRINT("%schannels: %zu\n", kTag, playout_parameters_.channels());
+ PRINT("%sframes per buffer: %zu <=> %.2f ms\n", kTag,
+ playout_parameters_.frames_per_buffer(),
+ playout_parameters_.GetBufferSizeInMilliseconds());
+ PRINT("RECORD: \n");
+ PRINT("%saudio layer: %s\n", kTag,
+ low_latency_in ? "Low latency OpenSL" : "Java/JNI based AudioRecord");
+ PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
+ PRINT("%schannels: %zu\n", kTag, record_parameters_.channels());
+ PRINT("%sframes per buffer: %zu <=> %.2f ms\n", kTag,
+ record_parameters_.frames_per_buffer(),
+ record_parameters_.GetBufferSizeInMilliseconds());
+}
+
+// The audio device module only suppors the same sample rate in both directions.
+// In addition, in full-duplex low-latency mode (OpenSL ES), both input and
+// output must use the same native buffer size to allow for usage of the fast
+// audio track in Android.
+TEST_F(AudioManagerTest, VerifyAudioParameters) {
+ const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+ const bool low_latency_in = audio_manager()->IsLowLatencyRecordSupported();
+ EXPECT_EQ(playout_parameters_.sample_rate(),
+ record_parameters_.sample_rate());
+ if (low_latency_out && low_latency_in) {
+ EXPECT_EQ(playout_parameters_.frames_per_buffer(),
+ record_parameters_.frames_per_buffer());
+ }
+}
+
+// Add device-specific information to the test for logging purposes.
+TEST_F(AudioManagerTest, ShowDeviceInfo) {
+ BuildInfo build_info;
+ PRINT("%smodel: %s\n", kTag, build_info.GetDeviceModel().c_str());
+ PRINT("%sbrand: %s\n", kTag, build_info.GetBrand().c_str());
+ PRINT("%smanufacturer: %s\n", kTag,
+ build_info.GetDeviceManufacturer().c_str());
+}
+
+// Add Android build information to the test for logging purposes.
+TEST_F(AudioManagerTest, ShowBuildInfo) {
+ BuildInfo build_info;
+ PRINT("%sbuild release: %s\n", kTag, build_info.GetBuildRelease().c_str());
+ PRINT("%sbuild id: %s\n", kTag, build_info.GetAndroidBuildId().c_str());
+ PRINT("%sbuild type: %s\n", kTag, build_info.GetBuildType().c_str());
+ PRINT("%sSDK version: %d\n", kTag, build_info.GetSdkVersion());
+}
+
+// Basic test of the AudioParameters class using default construction where
+// all members are set to zero.
+TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
+ AudioParameters params;
+ EXPECT_FALSE(params.is_valid());
+ EXPECT_EQ(0, params.sample_rate());
+ EXPECT_EQ(0U, params.channels());
+ EXPECT_EQ(0U, params.frames_per_buffer());
+ EXPECT_EQ(0U, params.frames_per_10ms_buffer());
+ EXPECT_EQ(0U, params.GetBytesPerFrame());
+ EXPECT_EQ(0U, params.GetBytesPerBuffer());
+ EXPECT_EQ(0U, params.GetBytesPer10msBuffer());
+ EXPECT_EQ(0.0f, params.GetBufferSizeInMilliseconds());
+}
+
+// Basic test of the AudioParameters class using non default construction.
+TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) {
+ const int kSampleRate = 48000;
+ const size_t kChannels = 1;
+ const size_t kFramesPerBuffer = 480;
+ const size_t kFramesPer10msBuffer = 480;
+ const size_t kBytesPerFrame = 2;
+ const float kBufferSizeInMs = 10.0f;
+ AudioParameters params(kSampleRate, kChannels, kFramesPerBuffer);
+ EXPECT_TRUE(params.is_valid());
+ EXPECT_EQ(kSampleRate, params.sample_rate());
+ EXPECT_EQ(kChannels, params.channels());
+ EXPECT_EQ(kFramesPerBuffer, params.frames_per_buffer());
+ EXPECT_EQ(static_cast<size_t>(kSampleRate / 100),
+ params.frames_per_10ms_buffer());
+ EXPECT_EQ(kBytesPerFrame, params.GetBytesPerFrame());
+ EXPECT_EQ(kBytesPerFrame * kFramesPerBuffer, params.GetBytesPerBuffer());
+ EXPECT_EQ(kBytesPerFrame * kFramesPer10msBuffer,
+ params.GetBytesPer10msBuffer());
+ EXPECT_EQ(kBufferSizeInMs, params.GetBufferSizeInMilliseconds());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_record_jni.cc b/third_party/libwebrtc/modules/audio_device/android/audio_record_jni.cc
new file mode 100644
index 0000000000..919eabb983
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_record_jni.cc
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_record_jni.h"
+
+#include <string>
+#include <utility>
+
+#include "modules/audio_device/android/audio_common.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+// Scoped class which logs its time of life as a UMA statistic. It generates
+// a histogram which measures the time it takes for a method/scope to execute.
+class ScopedHistogramTimer {
+ public:
+ explicit ScopedHistogramTimer(const std::string& name)
+ : histogram_name_(name), start_time_ms_(rtc::TimeMillis()) {}
+ ~ScopedHistogramTimer() {
+ const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
+ RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
+ RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms;
+ }
+
+ private:
+ const std::string histogram_name_;
+ int64_t start_time_ms_;
+};
+} // namespace
+
+// AudioRecordJni::JavaAudioRecord implementation.
+AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
+ NativeRegistration* native_reg,
+ std::unique_ptr<GlobalRef> audio_record)
+ : audio_record_(std::move(audio_record)),
+ init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
+ start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
+ stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
+ enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
+ enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {}
+
+AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
+
+int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
+ size_t channels) {
+ return audio_record_->CallIntMethod(init_recording_,
+ static_cast<jint>(sample_rate),
+ static_cast<jint>(channels));
+}
+
+bool AudioRecordJni::JavaAudioRecord::StartRecording() {
+ return audio_record_->CallBooleanMethod(start_recording_);
+}
+
+bool AudioRecordJni::JavaAudioRecord::StopRecording() {
+ return audio_record_->CallBooleanMethod(stop_recording_);
+}
+
+bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
+ return audio_record_->CallBooleanMethod(enable_built_in_aec_,
+ static_cast<jboolean>(enable));
+}
+
+bool AudioRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
+ return audio_record_->CallBooleanMethod(enable_built_in_ns_,
+ static_cast<jboolean>(enable));
+}
+
+// AudioRecordJni implementation.
+AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_manager_(audio_manager),
+ audio_parameters_(audio_manager->GetRecordAudioParameters()),
+ total_delay_in_milliseconds_(0),
+ direct_buffer_address_(nullptr),
+ direct_buffer_capacity_in_bytes_(0),
+ frames_per_buffer_(0),
+ initialized_(false),
+ recording_(false),
+ audio_device_buffer_(nullptr) {
+ RTC_LOG(LS_INFO) << "ctor";
+ RTC_DCHECK(audio_parameters_.is_valid());
+ RTC_CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+ reinterpret_cast<void*>(
+ &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
+ {"nativeDataIsRecorded", "(IJ)V",
+ reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
+ arraysize(native_methods));
+ j_audio_record_.reset(
+ new JavaAudioRecord(j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "<init>", "(J)V", PointerTojlong(this))));
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the Java based audio thread.
+ thread_checker_java_.Detach();
+}
+
+AudioRecordJni::~AudioRecordJni() {
+ RTC_LOG(LS_INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+}
+
+int32_t AudioRecordJni::Init() {
+ RTC_LOG(LS_INFO) << "Init";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return 0;
+}
+
+int32_t AudioRecordJni::Terminate() {
+ RTC_LOG(LS_INFO) << "Terminate";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ StopRecording();
+ return 0;
+}
+
+int32_t AudioRecordJni::InitRecording() {
+ RTC_LOG(LS_INFO) << "InitRecording";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!recording_);
+ ScopedHistogramTimer timer("WebRTC.Audio.InitRecordingDurationMs");
+ int frames_per_buffer = j_audio_record_->InitRecording(
+ audio_parameters_.sample_rate(), audio_parameters_.channels());
+ if (frames_per_buffer < 0) {
+ direct_buffer_address_ = nullptr;
+ RTC_LOG(LS_ERROR) << "InitRecording failed";
+ return -1;
+ }
+ frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
+ RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
+ const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+ RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
+ frames_per_buffer_ * bytes_per_frame);
+ RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
+ initialized_ = true;
+ return 0;
+}
+
+int32_t AudioRecordJni::StartRecording() {
+ RTC_LOG(LS_INFO) << "StartRecording";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!recording_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Recording can not start since InitRecording must succeed first";
+ return 0;
+ }
+ ScopedHistogramTimer timer("WebRTC.Audio.StartRecordingDurationMs");
+ if (!j_audio_record_->StartRecording()) {
+ RTC_LOG(LS_ERROR) << "StartRecording failed";
+ return -1;
+ }
+ recording_ = true;
+ return 0;
+}
+
+int32_t AudioRecordJni::StopRecording() {
+ RTC_LOG(LS_INFO) << "StopRecording";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!initialized_ || !recording_) {
+ return 0;
+ }
+ if (!j_audio_record_->StopRecording()) {
+ RTC_LOG(LS_ERROR) << "StopRecording failed";
+ return -1;
+ }
+ // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+ // next time StartRecording() is called since it will create a new Java
+ // thread.
+ thread_checker_java_.Detach();
+ initialized_ = false;
+ recording_ = false;
+ direct_buffer_address_ = nullptr;
+ return 0;
+}
+
+void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_LOG(LS_INFO) << "AttachAudioBuffer";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
+ audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+ const size_t channels = audio_parameters_.channels();
+ RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
+ audio_device_buffer_->SetRecordingChannels(channels);
+ total_delay_in_milliseconds_ =
+ audio_manager_->GetDelayEstimateInMilliseconds();
+ RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
+ RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: "
+ << total_delay_in_milliseconds_;
+}
+
+int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
+ RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
+}
+
+int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) {
+ // TODO(henrika): possibly remove when no longer used by any client.
+ RTC_CHECK_NOTREACHED();
+}
+
+int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
+ RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioRecord) {
+ webrtc::AudioRecordJni* this_object =
+ reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
+ this_object->OnCacheDirectBufferAddress(env, byte_buffer);
+}
+
+void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
+ jobject byte_buffer) {
+ RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!direct_buffer_address_);
+ direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
+ jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+ RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
+ direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioRecord) {
+ webrtc::AudioRecordJni* this_object =
+ reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
+ this_object->OnDataIsRecorded(length);
+}
+
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordThread'.
+void AudioRecordJni::OnDataIsRecorded(int length) {
+ RTC_DCHECK(thread_checker_java_.IsCurrent());
+ if (!audio_device_buffer_) {
+ RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
+ return;
+ }
+ audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
+ frames_per_buffer_);
+ // We provide one (combined) fixed delay estimate for the APM and use the
+ // `playDelayMs` parameter only. Components like the AEC only sees the sum
+ // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
+ audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
+ if (audio_device_buffer_->DeliverRecordedData() == -1) {
+ RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_record_jni.h b/third_party/libwebrtc/modules/audio_device/android/audio_record_jni.h
new file mode 100644
index 0000000000..66a6a89f41
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_record_jni.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
+
+#include <jni.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// Implements 16-bit mono PCM audio input support for Android using the Java
+// AudioRecord interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioRecord.java. This class is created and lives on a thread in
+// C++-land, but recorded audio buffers are delivered on a high-priority
+// thread managed by the Java class.
+//
+// The Java class makes use of AudioEffect features (mainly AEC) which are
+// first available in Jelly Bean. If it is instantiated running against earlier
+// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
+// separately instead.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread.
+//
+// This class uses JvmThreadConnector to attach to a Java VM if needed
+// and detach when the object goes out of scope. Additional thread checking
+// guarantees that no other (possibly non attached) thread is used.
+class AudioRecordJni {
+ public:
+ // Wraps the Java specific parts of the AudioRecordJni into one helper class.
+ class JavaAudioRecord {
+ public:
+ JavaAudioRecord(NativeRegistration* native_registration,
+ std::unique_ptr<GlobalRef> audio_track);
+ ~JavaAudioRecord();
+
+ int InitRecording(int sample_rate, size_t channels);
+ bool StartRecording();
+ bool StopRecording();
+ bool EnableBuiltInAEC(bool enable);
+ bool EnableBuiltInNS(bool enable);
+
+ private:
+ std::unique_ptr<GlobalRef> audio_record_;
+ jmethodID init_recording_;
+ jmethodID start_recording_;
+ jmethodID stop_recording_;
+ jmethodID enable_built_in_aec_;
+ jmethodID enable_built_in_ns_;
+ };
+
+ explicit AudioRecordJni(AudioManager* audio_manager);
+ ~AudioRecordJni();
+
+ int32_t Init();
+ int32_t Terminate();
+
+ int32_t InitRecording();
+ bool RecordingIsInitialized() const { return initialized_; }
+
+ int32_t StartRecording();
+ int32_t StopRecording();
+ bool Recording() const { return recording_; }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ int32_t EnableBuiltInAEC(bool enable);
+ int32_t EnableBuiltInAGC(bool enable);
+ int32_t EnableBuiltInNS(bool enable);
+
+ private:
+ // Called from Java side so we can cache the address of the Java-manged
+ // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+ // is also stored in `direct_buffer_capacity_in_bytes_`.
+ // This method will be called by the WebRtcAudioRecord constructor, i.e.,
+ // on the same thread that this object is created on.
+ static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioRecord);
+ void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
+
+ // Called periodically by the Java based WebRtcAudioRecord object when
+ // recording has started. Each call indicates that there are `length` new
+ // bytes recorded in the memory area `direct_buffer_address_` and it is
+ // now time to send these to the consumer.
+ // This method is called on a high-priority thread from Java. The name of
+ // the thread is 'AudioRecordThread'.
+ static void JNICALL DataIsRecorded(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioRecord);
+ void OnDataIsRecorded(int length);
+
+ // Stores thread ID in constructor.
+ SequenceChecker thread_checker_;
+
+ // Stores thread ID in first call to OnDataIsRecorded() from high-priority
+ // thread in Java. Detached during construction of this object.
+ SequenceChecker thread_checker_java_;
+
+ // Calls JavaVM::AttachCurrentThread() if this thread is not attached at
+ // construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ JvmThreadConnector attach_thread_if_needed_;
+
+ // Wraps the JNI interface pointer and methods associated with it.
+ std::unique_ptr<JNIEnvironment> j_environment_;
+
+ // Contains factory method for creating the Java object.
+ std::unique_ptr<NativeRegistration> j_native_registration_;
+
+ // Wraps the Java specific parts of the AudioRecordJni class.
+ std::unique_ptr<AudioRecordJni::JavaAudioRecord> j_audio_record_;
+
+ // Raw pointer to the audio manger.
+ const AudioManager* audio_manager_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Delay estimate of the total round-trip delay (input + output).
+ // Fixed value set once in AttachAudioBuffer() and it can take one out of two
+ // possible values. See audio_common.h for details.
+ int total_delay_in_milliseconds_;
+
+ // Cached copy of address to direct audio buffer owned by `j_audio_record_`.
+ void* direct_buffer_address_;
+
+ // Number of bytes in the direct audio buffer owned by `j_audio_record_`.
+ size_t direct_buffer_capacity_in_bytes_;
+
+ // Number audio frames per audio buffer. Each audio frame corresponds to
+ // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+ // frame contains 2 bytes (given that the Java layer only supports mono).
+ // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+ size_t frames_per_buffer_;
+
+ bool initialized_;
+
+ bool recording_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_track_jni.cc b/third_party/libwebrtc/modules/audio_device/android/audio_track_jni.cc
new file mode 100644
index 0000000000..5afa1ec252
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_track_jni.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_track_jni.h"
+
+#include <utility>
+
+#include "modules/audio_device/android/audio_manager.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+// AudioTrackJni::JavaAudioTrack implementation.
+AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
+ NativeRegistration* native_reg,
+ std::unique_ptr<GlobalRef> audio_track)
+ : audio_track_(std::move(audio_track)),
+ init_playout_(native_reg->GetMethodId("initPlayout", "(IID)I")),
+ start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
+ stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")),
+ set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")),
+ get_stream_max_volume_(
+ native_reg->GetMethodId("getStreamMaxVolume", "()I")),
+ get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")),
+ get_buffer_size_in_frames_(
+ native_reg->GetMethodId("getBufferSizeInFrames", "()I")) {}
+
+AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}
+
+bool AudioTrackJni::JavaAudioTrack::InitPlayout(int sample_rate, int channels) {
+ double buffer_size_factor =
+ strtod(webrtc::field_trial::FindFullName(
+ "WebRTC-AudioDevicePlayoutBufferSizeFactor")
+ .c_str(),
+ nullptr);
+ if (buffer_size_factor == 0)
+ buffer_size_factor = 1.0;
+ int requested_buffer_size_bytes = audio_track_->CallIntMethod(
+ init_playout_, sample_rate, channels, buffer_size_factor);
+ // Update UMA histograms for both the requested and actual buffer size.
+ if (requested_buffer_size_bytes >= 0) {
+ // To avoid division by zero, we assume the sample rate is 48k if an invalid
+ // value is found.
+ sample_rate = sample_rate <= 0 ? 48000 : sample_rate;
+ // This calculation assumes that audio is mono.
+ const int requested_buffer_size_ms =
+ (requested_buffer_size_bytes * 1000) / (2 * sample_rate);
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeRequestedAudioBufferSizeMs",
+ requested_buffer_size_ms, 0, 1000, 100);
+ int actual_buffer_size_frames =
+ audio_track_->CallIntMethod(get_buffer_size_in_frames_);
+ if (actual_buffer_size_frames >= 0) {
+ const int actual_buffer_size_ms =
+ actual_buffer_size_frames * 1000 / sample_rate;
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeAudioBufferSizeMs",
+ actual_buffer_size_ms, 0, 1000, 100);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool AudioTrackJni::JavaAudioTrack::StartPlayout() {
+ return audio_track_->CallBooleanMethod(start_playout_);
+}
+
+bool AudioTrackJni::JavaAudioTrack::StopPlayout() {
+ return audio_track_->CallBooleanMethod(stop_playout_);
+}
+
+bool AudioTrackJni::JavaAudioTrack::SetStreamVolume(int volume) {
+ return audio_track_->CallBooleanMethod(set_stream_volume_, volume);
+}
+
+int AudioTrackJni::JavaAudioTrack::GetStreamMaxVolume() {
+ return audio_track_->CallIntMethod(get_stream_max_volume_);
+}
+
+int AudioTrackJni::JavaAudioTrack::GetStreamVolume() {
+ return audio_track_->CallIntMethod(get_stream_volume_);
+}
+
+// TODO(henrika): possible extend usage of AudioManager and add it as member.
+AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+ direct_buffer_address_(nullptr),
+ direct_buffer_capacity_in_bytes_(0),
+ frames_per_buffer_(0),
+ initialized_(false),
+ playing_(false),
+ audio_device_buffer_(nullptr) {
+ RTC_LOG(LS_INFO) << "ctor";
+ RTC_DCHECK(audio_parameters_.is_valid());
+ RTC_CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+ reinterpret_cast<void*>(
+ &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
+ {"nativeGetPlayoutData", "(IJ)V",
+ reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
+ arraysize(native_methods));
+ j_audio_track_.reset(
+ new JavaAudioTrack(j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "<init>", "(J)V", PointerTojlong(this))));
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the Java based audio thread.
+ thread_checker_java_.Detach();
+}
+
+AudioTrackJni::~AudioTrackJni() {
+ RTC_LOG(LS_INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+}
+
+int32_t AudioTrackJni::Init() {
+ RTC_LOG(LS_INFO) << "Init";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return 0;
+}
+
+int32_t AudioTrackJni::Terminate() {
+ RTC_LOG(LS_INFO) << "Terminate";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ StopPlayout();
+ return 0;
+}
+
+int32_t AudioTrackJni::InitPlayout() {
+ RTC_LOG(LS_INFO) << "InitPlayout";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
+ if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
+ audio_parameters_.channels())) {
+ RTC_LOG(LS_ERROR) << "InitPlayout failed";
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+int32_t AudioTrackJni::StartPlayout() {
+ RTC_LOG(LS_INFO) << "StartPlayout";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!playing_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Playout can not start since InitPlayout must succeed first";
+ return 0;
+ }
+ if (!j_audio_track_->StartPlayout()) {
+ RTC_LOG(LS_ERROR) << "StartPlayout failed";
+ return -1;
+ }
+ playing_ = true;
+ return 0;
+}
+
+int32_t AudioTrackJni::StopPlayout() {
+ RTC_LOG(LS_INFO) << "StopPlayout";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ if (!j_audio_track_->StopPlayout()) {
+ RTC_LOG(LS_ERROR) << "StopPlayout failed";
+ return -1;
+ }
+ // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+ // next time StartRecording() is called since it will create a new Java
+ // thread.
+ thread_checker_java_.Detach();
+ initialized_ = false;
+ playing_ = false;
+ direct_buffer_address_ = nullptr;
+ return 0;
+}
+
+int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
+ RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return j_audio_track_->SetStreamVolume(volume) ? 0 : -1;
+}
+
+int AudioTrackJni::MaxSpeakerVolume(uint32_t& max_volume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ max_volume = j_audio_track_->GetStreamMaxVolume();
+ return 0;
+}
+
+int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ min_volume = 0;
+ return 0;
+}
+
+int AudioTrackJni::SpeakerVolume(uint32_t& volume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ volume = j_audio_track_->GetStreamVolume();
+ RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume;
+ return 0;
+}
+
+// TODO(henrika): possibly add stereo support.
+void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_LOG(LS_INFO) << "AttachAudioBuffer";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")";
+ audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+ const size_t channels = audio_parameters_.channels();
+ RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")";
+ audio_device_buffer_->SetPlayoutChannels(channels);
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioTrack) {
+ webrtc::AudioTrackJni* this_object =
+ reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
+ this_object->OnCacheDirectBufferAddress(env, byte_buffer);
+}
+
+void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
+ jobject byte_buffer) {
+ RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!direct_buffer_address_);
+ direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
+ jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+ RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
+ direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
+ const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+ frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame;
+ RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioTrack) {
+ webrtc::AudioTrackJni* this_object =
+ reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
+ this_object->OnGetPlayoutData(static_cast<size_t>(length));
+}
+
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordTrack'.
+void AudioTrackJni::OnGetPlayoutData(size_t length) {
+ RTC_DCHECK(thread_checker_java_.IsCurrent());
+ const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+ RTC_DCHECK_EQ(frames_per_buffer_, length / bytes_per_frame);
+ if (!audio_device_buffer_) {
+ RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
+ return;
+ }
+ // Pull decoded data (in 16-bit PCM format) from jitter buffer.
+ int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_);
+ if (samples <= 0) {
+ RTC_LOG(LS_ERROR) << "AudioDeviceBuffer::RequestPlayoutData failed";
+ return;
+ }
+ RTC_DCHECK_EQ(samples, frames_per_buffer_);
+ // Copy decoded data into common byte buffer to ensure that it can be
+ // written to the Java based audio track.
+ samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);
+ RTC_DCHECK_EQ(length, bytes_per_frame * samples);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/audio_track_jni.h b/third_party/libwebrtc/modules/audio_device/android/audio_track_jni.h
new file mode 100644
index 0000000000..7eb69082b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/audio_track_jni.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
+
+#include <jni.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// Implements 16-bit mono PCM audio output support for Android using the Java
+// AudioTrack interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioTrack.java. This class is created and lives on a thread in
+// C++-land, but decoded audio buffers are requested on a high-priority
+// thread managed by the Java class.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread.
+//
+// This class uses JvmThreadConnector to attach to a Java VM if needed
+// and detach when the object goes out of scope. Additional thread checking
+// guarantees that no other (possibly non attached) thread is used.
+class AudioTrackJni {
+ public:
+ // Wraps the Java specific parts of the AudioTrackJni into one helper class.
+ class JavaAudioTrack {
+ public:
+ JavaAudioTrack(NativeRegistration* native_registration,
+ std::unique_ptr<GlobalRef> audio_track);
+ ~JavaAudioTrack();
+
+ bool InitPlayout(int sample_rate, int channels);
+ bool StartPlayout();
+ bool StopPlayout();
+ bool SetStreamVolume(int volume);
+ int GetStreamMaxVolume();
+ int GetStreamVolume();
+
+ private:
+ std::unique_ptr<GlobalRef> audio_track_;
+ jmethodID init_playout_;
+ jmethodID start_playout_;
+ jmethodID stop_playout_;
+ jmethodID set_stream_volume_;
+ jmethodID get_stream_max_volume_;
+ jmethodID get_stream_volume_;
+ jmethodID get_buffer_size_in_frames_;
+ };
+
+ explicit AudioTrackJni(AudioManager* audio_manager);
+ ~AudioTrackJni();
+
+ int32_t Init();
+ int32_t Terminate();
+
+ int32_t InitPlayout();
+ bool PlayoutIsInitialized() const { return initialized_; }
+
+ int32_t StartPlayout();
+ int32_t StopPlayout();
+ bool Playing() const { return playing_; }
+
+ int SpeakerVolumeIsAvailable(bool& available);
+ int SetSpeakerVolume(uint32_t volume);
+ int SpeakerVolume(uint32_t& volume) const;
+ int MaxSpeakerVolume(uint32_t& max_volume) const;
+ int MinSpeakerVolume(uint32_t& min_volume) const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ // Called from Java side so we can cache the address of the Java-manged
+ // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+ // is also stored in `direct_buffer_capacity_in_bytes_`.
+ // Called on the same thread as the creating thread.
+ static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioTrack);
+ void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
+
+ // Called periodically by the Java based WebRtcAudioTrack object when
+ // playout has started. Each call indicates that `length` new bytes should
+ // be written to the memory area `direct_buffer_address_` for playout.
+ // This method is called on a high-priority thread from Java. The name of
+ // the thread is 'AudioTrackThread'.
+ static void JNICALL GetPlayoutData(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioTrack);
+ void OnGetPlayoutData(size_t length);
+
+ // Stores thread ID in constructor.
+ SequenceChecker thread_checker_;
+
+ // Stores thread ID in first call to OnGetPlayoutData() from high-priority
+ // thread in Java. Detached during construction of this object.
+ SequenceChecker thread_checker_java_;
+
+ // Calls JavaVM::AttachCurrentThread() if this thread is not attached at
+ // construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ JvmThreadConnector attach_thread_if_needed_;
+
+ // Wraps the JNI interface pointer and methods associated with it.
+ std::unique_ptr<JNIEnvironment> j_environment_;
+
+ // Contains factory method for creating the Java object.
+ std::unique_ptr<NativeRegistration> j_native_registration_;
+
+ // Wraps the Java specific parts of the AudioTrackJni class.
+ std::unique_ptr<AudioTrackJni::JavaAudioTrack> j_audio_track_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Cached copy of address to direct audio buffer owned by `j_audio_track_`.
+ void* direct_buffer_address_;
+
+ // Number of bytes in the direct audio buffer owned by `j_audio_track_`.
+ size_t direct_buffer_capacity_in_bytes_;
+
+ // Number of audio frames per audio buffer. Each audio frame corresponds to
+ // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+ // frame contains 2 bytes (given that the Java layer only supports mono).
+ // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+ size_t frames_per_buffer_;
+
+ bool initialized_;
+
+ bool playing_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+ // and therefore outlives this object.
+ AudioDeviceBuffer* audio_device_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/build_info.cc b/third_party/libwebrtc/modules/audio_device/android/build_info.cc
new file mode 100644
index 0000000000..916be8244e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/build_info.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/build_info.h"
+
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+BuildInfo::BuildInfo()
+ : j_environment_(JVM::GetInstance()->environment()),
+ j_build_info_(
+ JVM::GetInstance()->GetClass("org/webrtc/voiceengine/BuildInfo")) {}
+
+std::string BuildInfo::GetStringFromJava(const char* name) {
+ jmethodID id = j_build_info_.GetStaticMethodId(name, "()Ljava/lang/String;");
+ jstring j_string =
+ static_cast<jstring>(j_build_info_.CallStaticObjectMethod(id));
+ return j_environment_->JavaToStdString(j_string);
+}
+
+std::string BuildInfo::GetDeviceModel() {
+ return GetStringFromJava("getDeviceModel");
+}
+
+std::string BuildInfo::GetBrand() {
+ return GetStringFromJava("getBrand");
+}
+
+std::string BuildInfo::GetDeviceManufacturer() {
+ return GetStringFromJava("getDeviceManufacturer");
+}
+
+std::string BuildInfo::GetAndroidBuildId() {
+ return GetStringFromJava("getAndroidBuildId");
+}
+
+std::string BuildInfo::GetBuildType() {
+ return GetStringFromJava("getBuildType");
+}
+
+std::string BuildInfo::GetBuildRelease() {
+ return GetStringFromJava("getBuildRelease");
+}
+
+SdkCode BuildInfo::GetSdkVersion() {
+ jmethodID id = j_build_info_.GetStaticMethodId("getSdkVersion", "()I");
+ jint j_version = j_build_info_.CallStaticIntMethod(id);
+ return static_cast<SdkCode>(j_version);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/build_info.h b/third_party/libwebrtc/modules/audio_device/android/build_info.h
new file mode 100644
index 0000000000..3647e56649
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/build_info.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
+
+#include <jni.h>
+
+#include <memory>
+#include <string>
+
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// This enumeration maps to the values returned by BuildInfo::GetSdkVersion(),
+// indicating the Android release associated with a given SDK version.
+// See https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
+// for details.
+enum SdkCode {
+ SDK_CODE_JELLY_BEAN = 16, // Android 4.1
+ SDK_CODE_JELLY_BEAN_MR1 = 17, // Android 4.2
+ SDK_CODE_JELLY_BEAN_MR2 = 18, // Android 4.3
+ SDK_CODE_KITKAT = 19, // Android 4.4
+ SDK_CODE_WATCH = 20, // Android 4.4W
+ SDK_CODE_LOLLIPOP = 21, // Android 5.0
+ SDK_CODE_LOLLIPOP_MR1 = 22, // Android 5.1
+ SDK_CODE_MARSHMALLOW = 23, // Android 6.0
+ SDK_CODE_N = 24,
+};
+
+// Utility class used to query the Java class (org/webrtc/voiceengine/BuildInfo)
+// for device and Android build information.
+// The calling thread is attached to the JVM at construction if needed and a
+// valid Java environment object is also created.
+// All Get methods must be called on the creating thread. If not, the code will
+// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
+class BuildInfo {
+ public:
+ BuildInfo();
+ ~BuildInfo() {}
+
+ // End-user-visible name for the end product (e.g. "Nexus 6").
+ std::string GetDeviceModel();
+ // Consumer-visible brand (e.g. "google").
+ std::string GetBrand();
+ // Manufacturer of the product/hardware (e.g. "motorola").
+ std::string GetDeviceManufacturer();
+ // Android build ID (e.g. LMY47D).
+ std::string GetAndroidBuildId();
+ // The type of build (e.g. "user" or "eng").
+ std::string GetBuildType();
+ // The user-visible version string (e.g. "5.1").
+ std::string GetBuildRelease();
+ // The user-visible SDK version of the framework (e.g. 21). See SdkCode enum
+ // for translation.
+ SdkCode GetSdkVersion();
+
+ private:
+ // Helper method which calls a static getter method with `name` and returns
+ // a string from Java.
+ std::string GetStringFromJava(const char* name);
+
+ // Ensures that this class can access a valid JNI interface pointer even
+ // if the creating thread was not attached to the JVM.
+ JvmThreadConnector attach_thread_if_needed_;
+
+ // Provides access to the JNIEnv interface pointer and the JavaToStdString()
+ // method which is used to translate Java strings to std strings.
+ std::unique_ptr<JNIEnvironment> j_environment_;
+
+ // Holds the jclass object and provides access to CallStaticObjectMethod().
+ // Used by GetStringFromJava() during construction only.
+ JavaClass j_build_info_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/ensure_initialized.cc b/third_party/libwebrtc/modules/audio_device/android/ensure_initialized.cc
new file mode 100644
index 0000000000..59e9c8f7a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/ensure_initialized.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/ensure_initialized.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stddef.h>
+
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/checks.h"
+#include "sdk/android/src/jni/jvm.h"
+
+namespace webrtc {
+namespace audiodevicemodule {
+
+static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
+
+void EnsureInitializedOnce() {
+ RTC_CHECK(::webrtc::jni::GetJVM() != nullptr);
+
+ JNIEnv* jni = ::webrtc::jni::AttachCurrentThreadIfNeeded();
+ JavaVM* jvm = NULL;
+ RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+
+ // Initialize the Java environment (currently only used by the audio manager).
+ webrtc::JVM::Initialize(jvm);
+}
+
+void EnsureInitialized() {
+ RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+}
+
+} // namespace audiodevicemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/ensure_initialized.h b/third_party/libwebrtc/modules/audio_device/android/ensure_initialized.h
new file mode 100644
index 0000000000..c1997b4acd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/ensure_initialized.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+namespace webrtc {
+namespace audiodevicemodule {
+
+void EnsureInitialized();
+
+} // namespace audiodevicemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
new file mode 100644
index 0000000000..aed8a06454
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.os.Build;
+
+public final class BuildInfo {
+ public static String getDevice() {
+ return Build.DEVICE;
+ }
+
+ public static String getDeviceModel() {
+ return Build.MODEL;
+ }
+
+ public static String getProduct() {
+ return Build.PRODUCT;
+ }
+
+ public static String getBrand() {
+ return Build.BRAND;
+ }
+
+ public static String getDeviceManufacturer() {
+ return Build.MANUFACTURER;
+ }
+
+ public static String getAndroidBuildId() {
+ return Build.ID;
+ }
+
+ public static String getBuildType() {
+ return Build.TYPE;
+ }
+
+ public static String getBuildRelease() {
+ return Build.VERSION.RELEASE;
+ }
+
+ public static int getSdkVersion() {
+ return Build.VERSION.SDK_INT;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
new file mode 100644
index 0000000000..92f1c93524
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.media.audiofx.AcousticEchoCanceler;
+import android.media.audiofx.AudioEffect;
+import android.media.audiofx.AudioEffect.Descriptor;
+import android.media.audiofx.NoiseSuppressor;
+import android.os.Build;
+import androidx.annotation.Nullable;
+import java.util.List;
+import java.util.UUID;
+import org.webrtc.Logging;
+
+// This class wraps control of three different platform effects. Supported
+// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
+// Calling enable() will active all effects that are
+// supported by the device if the corresponding `shouldEnableXXX` member is set.
+public class WebRtcAudioEffects {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioEffects";
+
+ // UUIDs for Software Audio Effects that we want to avoid using.
+ // The implementor field will be set to "The Android Open Source Project".
+ private static final UUID AOSP_ACOUSTIC_ECHO_CANCELER =
+ UUID.fromString("bb392ec0-8d4d-11e0-a896-0002a5d5c51b");
+ private static final UUID AOSP_NOISE_SUPPRESSOR =
+ UUID.fromString("c06c8400-8e06-11e0-9cb6-0002a5d5c51b");
+
+ // Contains the available effect descriptors returned from the
+ // AudioEffect.getEffects() call. This result is cached to avoid doing the
+ // slow OS call multiple times.
+ private static @Nullable Descriptor[] cachedEffects;
+
+ // Contains the audio effect objects. Created in enable() and destroyed
+ // in release().
+ private @Nullable AcousticEchoCanceler aec;
+ private @Nullable NoiseSuppressor ns;
+
+ // Affects the final state given to the setEnabled() method on each effect.
+ // The default state is set to "disabled" but each effect can also be enabled
+ // by calling setAEC() and setNS().
+ // To enable an effect, both the shouldEnableXXX member and the static
+ // canUseXXX() must be true.
+ private boolean shouldEnableAec;
+ private boolean shouldEnableNs;
+
+ // Checks if the device implements Acoustic Echo Cancellation (AEC).
+ // Returns true if the device implements AEC, false otherwise.
+ public static boolean isAcousticEchoCancelerSupported() {
+ // Note: we're using isAcousticEchoCancelerEffectAvailable() instead of
+ // AcousticEchoCanceler.isAvailable() to avoid the expensive getEffects()
+ // OS API call.
+ return isAcousticEchoCancelerEffectAvailable();
+ }
+
+ // Checks if the device implements Noise Suppression (NS).
+ // Returns true if the device implements NS, false otherwise.
+ public static boolean isNoiseSuppressorSupported() {
+ // Note: we're using isNoiseSuppressorEffectAvailable() instead of
+ // NoiseSuppressor.isAvailable() to avoid the expensive getEffects()
+ // OS API call.
+ return isNoiseSuppressorEffectAvailable();
+ }
+
+ // Returns true if the device is blacklisted for HW AEC usage.
+ public static boolean isAcousticEchoCancelerBlacklisted() {
+ List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
+ boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
+ if (isBlacklisted) {
+ Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
+ }
+ return isBlacklisted;
+ }
+
+ // Returns true if the device is blacklisted for HW NS usage.
+ public static boolean isNoiseSuppressorBlacklisted() {
+ List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
+ boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
+ if (isBlacklisted) {
+ Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
+ }
+ return isBlacklisted;
+ }
+
+ // Returns true if the platform AEC should be excluded based on its UUID.
+ // AudioEffect.queryEffects() can throw IllegalStateException.
+ private static boolean isAcousticEchoCancelerExcludedByUUID() {
+ for (Descriptor d : getAvailableEffects()) {
+ if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC)
+ && d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Returns true if the platform NS should be excluded based on its UUID.
+ // AudioEffect.queryEffects() can throw IllegalStateException.
+ private static boolean isNoiseSuppressorExcludedByUUID() {
+ for (Descriptor d : getAvailableEffects()) {
+ if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Returns true if the device supports Acoustic Echo Cancellation (AEC).
+ private static boolean isAcousticEchoCancelerEffectAvailable() {
+ return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC);
+ }
+
+ // Returns true if the device supports Noise Suppression (NS).
+ private static boolean isNoiseSuppressorEffectAvailable() {
+ return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS);
+ }
+
+ // Returns true if all conditions for supporting the HW AEC are fulfilled.
+ // It will not be possible to enable the HW AEC if this method returns false.
+ public static boolean canUseAcousticEchoCanceler() {
+ boolean canUseAcousticEchoCanceler = isAcousticEchoCancelerSupported()
+ && !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
+ && !isAcousticEchoCancelerBlacklisted() && !isAcousticEchoCancelerExcludedByUUID();
+ Logging.d(TAG, "canUseAcousticEchoCanceler: " + canUseAcousticEchoCanceler);
+ return canUseAcousticEchoCanceler;
+ }
+
+ // Returns true if all conditions for supporting the HW NS are fulfilled.
+ // It will not be possible to enable the HW NS if this method returns false.
+ public static boolean canUseNoiseSuppressor() {
+ boolean canUseNoiseSuppressor = isNoiseSuppressorSupported()
+ && !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor() && !isNoiseSuppressorBlacklisted()
+ && !isNoiseSuppressorExcludedByUUID();
+ Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
+ return canUseNoiseSuppressor;
+ }
+
+ public static WebRtcAudioEffects create() {
+ return new WebRtcAudioEffects();
+ }
+
+ private WebRtcAudioEffects() {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ }
+
+ // Call this method to enable or disable the platform AEC. It modifies
+ // `shouldEnableAec` which is used in enable() where the actual state
+ // of the AEC effect is modified. Returns true if HW AEC is supported and
+ // false otherwise.
+ public boolean setAEC(boolean enable) {
+ Logging.d(TAG, "setAEC(" + enable + ")");
+ if (!canUseAcousticEchoCanceler()) {
+ Logging.w(TAG, "Platform AEC is not supported");
+ shouldEnableAec = false;
+ return false;
+ }
+ if (aec != null && (enable != shouldEnableAec)) {
+ Logging.e(TAG, "Platform AEC state can't be modified while recording");
+ return false;
+ }
+ shouldEnableAec = enable;
+ return true;
+ }
+
+ // Call this method to enable or disable the platform NS. It modifies
+ // `shouldEnableNs` which is used in enable() where the actual state
+ // of the NS effect is modified. Returns true if HW NS is supported and
+ // false otherwise.
+ public boolean setNS(boolean enable) {
+ Logging.d(TAG, "setNS(" + enable + ")");
+ if (!canUseNoiseSuppressor()) {
+ Logging.w(TAG, "Platform NS is not supported");
+ shouldEnableNs = false;
+ return false;
+ }
+ if (ns != null && (enable != shouldEnableNs)) {
+ Logging.e(TAG, "Platform NS state can't be modified while recording");
+ return false;
+ }
+ shouldEnableNs = enable;
+ return true;
+ }
+
+ public void enable(int audioSession) {
+ Logging.d(TAG, "enable(audioSession=" + audioSession + ")");
+ assertTrue(aec == null);
+ assertTrue(ns == null);
+
+ if (DEBUG) {
+ // Add logging of supported effects but filter out "VoIP effects", i.e.,
+ // AEC, AEC and NS. Avoid calling AudioEffect.queryEffects() unless the
+ // DEBUG flag is set since we have seen crashes in this API.
+ for (Descriptor d : AudioEffect.queryEffects()) {
+ if (effectTypeIsVoIP(d.type)) {
+ Logging.d(TAG, "name: " + d.name + ", "
+ + "mode: " + d.connectMode + ", "
+ + "implementor: " + d.implementor + ", "
+ + "UUID: " + d.uuid);
+ }
+ }
+ }
+
+ if (isAcousticEchoCancelerSupported()) {
+ // Create an AcousticEchoCanceler and attach it to the AudioRecord on
+ // the specified audio session.
+ aec = AcousticEchoCanceler.create(audioSession);
+ if (aec != null) {
+ boolean enabled = aec.getEnabled();
+ boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
+ if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
+ Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
+ }
+ Logging.d(TAG, "AcousticEchoCanceler: was " + (enabled ? "enabled" : "disabled")
+ + ", enable: " + enable + ", is now: "
+ + (aec.getEnabled() ? "enabled" : "disabled"));
+ } else {
+ Logging.e(TAG, "Failed to create the AcousticEchoCanceler instance");
+ }
+ }
+
+ if (isNoiseSuppressorSupported()) {
+ // Create an NoiseSuppressor and attach it to the AudioRecord on the
+ // specified audio session.
+ ns = NoiseSuppressor.create(audioSession);
+ if (ns != null) {
+ boolean enabled = ns.getEnabled();
+ boolean enable = shouldEnableNs && canUseNoiseSuppressor();
+ if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
+ Logging.e(TAG, "Failed to set the NoiseSuppressor state");
+ }
+ Logging.d(TAG, "NoiseSuppressor: was " + (enabled ? "enabled" : "disabled") + ", enable: "
+ + enable + ", is now: " + (ns.getEnabled() ? "enabled" : "disabled"));
+ } else {
+ Logging.e(TAG, "Failed to create the NoiseSuppressor instance");
+ }
+ }
+ }
+
+ // Releases all native audio effect resources. It is a good practice to
+ // release the effect engine when not in use as control can be returned
+ // to other applications or the native resources released.
+ public void release() {
+ Logging.d(TAG, "release");
+ if (aec != null) {
+ aec.release();
+ aec = null;
+ }
+ if (ns != null) {
+ ns.release();
+ ns = null;
+ }
+ }
+
+ // Returns true for effect types in `type` that are of "VoIP" types:
+ // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
+ // Noise Suppressor (NS). Note that, an extra check for support is needed
+ // in each comparison since some devices includes effects in the
+ // AudioEffect.Descriptor array that are actually not available on the device.
+ // As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
+ // AutomaticGainControl.isAvailable() returns false.
+ private boolean effectTypeIsVoIP(UUID type) {
+ return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported())
+ || (AudioEffect.EFFECT_TYPE_NS.equals(type) && isNoiseSuppressorSupported());
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ // Returns the cached copy of the audio effects array, if available, or
+ // queries the operating system for the list of effects.
+ private static @Nullable Descriptor[] getAvailableEffects() {
+ if (cachedEffects != null) {
+ return cachedEffects;
+ }
+ // The caching is best effort only - if this method is called from several
+ // threads in parallel, they may end up doing the underlying OS call
+ // multiple times. It's normally only called on one thread so there's no
+ // real need to optimize for the multiple threads case.
+ cachedEffects = AudioEffect.queryEffects();
+ return cachedEffects;
+ }
+
+ // Returns true if an effect of the specified type is available. Functionally
+ // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
+ // faster as it avoids the expensive OS call to enumerate effects.
+ private static boolean isEffectTypeAvailable(UUID effectType) {
+ Descriptor[] effects = getAvailableEffects();
+ if (effects == null) {
+ return false;
+ }
+ for (Descriptor d : effects) {
+ if (d.type.equals(effectType)) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
new file mode 100644
index 0000000000..43c416f5b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.os.Build;
+import androidx.annotation.Nullable;
+import java.util.Timer;
+import java.util.TimerTask;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+
+// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
+// At construction, storeAudioParameters() is called and it retrieves
+// fundamental audio parameters like native sample rate and number of channels.
+// The result is then provided to the caller by nativeCacheAudioParameters().
+// It is also possible to call init() to set up the audio environment for best
+// possible "VoIP performance". All settings done in init() are reverted by
+// dispose(). This class can also be used without calling init() if the user
+// prefers to set up the audio environment separately. However, it is
+// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
+public class WebRtcAudioManager {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioManager";
+
+ // TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has
+ // been completed. Goal is to always return false on Android O MR1 and higher.
+ private static final boolean blacklistDeviceForAAudioUsage = true;
+
+ // Use mono as default for both audio directions.
+ private static boolean useStereoOutput;
+ private static boolean useStereoInput;
+
+ private static boolean blacklistDeviceForOpenSLESUsage;
+ private static boolean blacklistDeviceForOpenSLESUsageIsOverridden;
+
+ // Call this method to override the default list of blacklisted devices
+ // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
+ // Allows an app to take control over which devices to exclude from using
+ // the OpenSL ES audio output path
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
+ blacklistDeviceForOpenSLESUsageIsOverridden = true;
+ blacklistDeviceForOpenSLESUsage = enable;
+ }
+
+ // Call these methods to override the default mono audio modes for the specified direction(s)
+ // (input and/or output).
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setStereoOutput(boolean enable) {
+ Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
+ useStereoOutput = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setStereoInput(boolean enable) {
+ Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
+ useStereoInput = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean getStereoOutput() {
+ return useStereoOutput;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean getStereoInput() {
+ return useStereoInput;
+ }
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ private static final int DEFAULT_FRAME_PER_BUFFER = 256;
+
+ // Private utility class that periodically checks and logs the volume level
+ // of the audio stream that is currently controlled by the volume control.
+ // A timer triggers logs once every 30 seconds and the timer's associated
+ // thread is named "WebRtcVolumeLevelLoggerThread".
+ private static class VolumeLogger {
+ private static final String THREAD_NAME = "WebRtcVolumeLevelLoggerThread";
+ private static final int TIMER_PERIOD_IN_SECONDS = 30;
+
+ private final AudioManager audioManager;
+ private @Nullable Timer timer;
+
+ public VolumeLogger(AudioManager audioManager) {
+ this.audioManager = audioManager;
+ }
+
+ public void start() {
+ timer = new Timer(THREAD_NAME);
+ timer.schedule(new LogVolumeTask(audioManager.getStreamMaxVolume(AudioManager.STREAM_RING),
+ audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL)),
+ 0, TIMER_PERIOD_IN_SECONDS * 1000);
+ }
+
+ private class LogVolumeTask extends TimerTask {
+ private final int maxRingVolume;
+ private final int maxVoiceCallVolume;
+
+ LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume) {
+ this.maxRingVolume = maxRingVolume;
+ this.maxVoiceCallVolume = maxVoiceCallVolume;
+ }
+
+ @Override
+ public void run() {
+ final int mode = audioManager.getMode();
+ if (mode == AudioManager.MODE_RINGTONE) {
+ Logging.d(TAG, "STREAM_RING stream volume: "
+ + audioManager.getStreamVolume(AudioManager.STREAM_RING) + " (max="
+ + maxRingVolume + ")");
+ } else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
+ Logging.d(TAG, "VOICE_CALL stream volume: "
+ + audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL) + " (max="
+ + maxVoiceCallVolume + ")");
+ }
+ }
+ }
+
+ private void stop() {
+ if (timer != null) {
+ timer.cancel();
+ timer = null;
+ }
+ }
+ }
+
+ private final long nativeAudioManager;
+ private final AudioManager audioManager;
+
+ private boolean initialized;
+ private int nativeSampleRate;
+ private int nativeChannels;
+
+ private boolean hardwareAEC;
+ private boolean hardwareAGC;
+ private boolean hardwareNS;
+ private boolean lowLatencyOutput;
+ private boolean lowLatencyInput;
+ private boolean proAudio;
+ private boolean aAudio;
+ private int sampleRate;
+ private int outputChannels;
+ private int inputChannels;
+ private int outputBufferSize;
+ private int inputBufferSize;
+
+ private final VolumeLogger volumeLogger;
+
+ WebRtcAudioManager(long nativeAudioManager) {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ this.nativeAudioManager = nativeAudioManager;
+ audioManager =
+ (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
+ if (DEBUG) {
+ WebRtcAudioUtils.logDeviceInfo(TAG);
+ }
+ volumeLogger = new VolumeLogger(audioManager);
+ storeAudioParameters();
+ nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
+ hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, outputBufferSize,
+ inputBufferSize, nativeAudioManager);
+ WebRtcAudioUtils.logAudioState(TAG);
+ }
+
+ private boolean init() {
+ Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
+ if (initialized) {
+ return true;
+ }
+ Logging.d(TAG, "audio mode is: "
+ + WebRtcAudioUtils.modeToString(audioManager.getMode()));
+ initialized = true;
+ volumeLogger.start();
+ return true;
+ }
+
+ private void dispose() {
+ Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
+ if (!initialized) {
+ return;
+ }
+ volumeLogger.stop();
+ }
+
+ private boolean isCommunicationModeEnabled() {
+ return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
+ }
+
+ private boolean isDeviceBlacklistedForOpenSLESUsage() {
+ boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
+ ? blacklistDeviceForOpenSLESUsage
+ : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
+ if (blacklisted) {
+ Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
+ }
+ return blacklisted;
+ }
+
+ private void storeAudioParameters() {
+ outputChannels = getStereoOutput() ? 2 : 1;
+ inputChannels = getStereoInput() ? 2 : 1;
+ sampleRate = getNativeOutputSampleRate();
+ hardwareAEC = isAcousticEchoCancelerSupported();
+ // TODO(henrika): use of hardware AGC is no longer supported. Currently
+ // hardcoded to false. To be removed.
+ hardwareAGC = false;
+ hardwareNS = isNoiseSuppressorSupported();
+ lowLatencyOutput = isLowLatencyOutputSupported();
+ lowLatencyInput = isLowLatencyInputSupported();
+ proAudio = isProAudioSupported();
+ aAudio = isAAudioSupported();
+ outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer()
+ : getMinOutputFrameSize(sampleRate, outputChannels);
+ inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
+ : getMinInputFrameSize(sampleRate, inputChannels);
+ }
+
+ // Gets the current earpiece state.
+ private boolean hasEarpiece() {
+ return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_TELEPHONY);
+ }
+
+ // Returns true if low-latency audio output is supported.
+ private boolean isLowLatencyOutputSupported() {
+ return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+ }
+
+ // Returns true if low-latency audio input is supported.
+ // TODO(henrika): remove the hardcoded false return value when OpenSL ES
+ // input performance has been evaluated and tested more.
+ public boolean isLowLatencyInputSupported() {
+ // TODO(henrika): investigate if some sort of device list is needed here
+ // as well. The NDK doc states that: "As of API level 21, lower latency
+ // audio input is supported on select devices. To take advantage of this
+ // feature, first confirm that lower latency output is available".
+ return isLowLatencyOutputSupported();
+ }
+
+ // Returns true if the device has professional audio level of functionality
+ // and therefore supports the lowest possible round-trip latency.
+ private boolean isProAudioSupported() {
+ return Build.VERSION.SDK_INT >= 23
+ && ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_PRO);
+ }
+
+ // AAudio is supported on Androio Oreo MR1 (API 27) and higher.
+ // TODO(bugs.webrtc.org/8914): currently disabled by default.
+ private boolean isAAudioSupported() {
+ if (blacklistDeviceForAAudioUsage) {
+ Logging.w(TAG, "AAudio support is currently disabled on all devices!");
+ }
+ return !blacklistDeviceForAAudioUsage && Build.VERSION.SDK_INT >= 27;
+ }
+
+ // Returns the native output sample rate for this device's output stream.
+ private int getNativeOutputSampleRate() {
+ // Override this if we're running on an old emulator image which only
+ // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
+ if (WebRtcAudioUtils.runningOnEmulator()) {
+ Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
+ return 8000;
+ }
+ // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
+ // If so, use that value and return here.
+ if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
+ Logging.d(TAG, "Default sample rate is overriden to "
+ + WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
+ return WebRtcAudioUtils.getDefaultSampleRateHz();
+ }
+ // No overrides available. Deliver best possible estimate based on default
+ // Android AudioManager APIs.
+ final int sampleRateHz = getSampleRateForApiLevel();
+ Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
+ return sampleRateHz;
+ }
+
+ private int getSampleRateForApiLevel() {
+ String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
+ : Integer.parseInt(sampleRateString);
+ }
+
+ // Returns the native output buffer size for low-latency output streams.
+ private int getLowLatencyOutputFramesPerBuffer() {
+ assertTrue(isLowLatencyOutputSupported());
+ String framesPerBuffer =
+ audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+ return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
+ }
+
+ // Returns true if the device supports an audio effect (AEC or NS).
+ // Four conditions must be fulfilled if functions are to return true:
+ // 1) the platform must support the built-in (HW) effect,
+ // 2) explicit use (override) of a WebRTC based version must not be set,
+ // 3) the device must not be blacklisted for use of the effect, and
+ // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+ private static boolean isAcousticEchoCancelerSupported() {
+ return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+ }
+ private static boolean isNoiseSuppressorSupported() {
+ return WebRtcAudioEffects.canUseNoiseSuppressor();
+ }
+
+ // Returns the minimum output buffer size for Java based audio (AudioTrack).
+ // This size can also be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency output.
+ private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig =
+ (numChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+ return AudioTrack.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+ / bytesPerFrame;
+ }
+
+ // Returns the native input buffer size for input streams.
+ private int getLowLatencyInputFramesPerBuffer() {
+ assertTrue(isLowLatencyInputSupported());
+ return getLowLatencyOutputFramesPerBuffer();
+ }
+
+ // Returns the minimum input buffer size for Java based audio (AudioRecord).
+ // This size can calso be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency input.
+ private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig =
+ (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+ return AudioRecord.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+ / bytesPerFrame;
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private native void nativeCacheAudioParameters(int sampleRate, int outputChannels,
+ int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS,
+ boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio,
+ int outputBufferSize, int inputBufferSize, long nativeAudioManager);
+}
diff --git a/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
new file mode 100644
index 0000000000..8eab01cd69
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.media.AudioFormat;
+import android.media.AudioRecord;
+import android.media.MediaRecorder.AudioSource;
+import android.os.Build;
+import android.os.Process;
+import androidx.annotation.Nullable;
+import java.lang.System;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+
+public class WebRtcAudioRecord {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioRecord";
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ // Requested size of each recorded buffer provided to the client.
+ private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+ // Average number of callbacks per second.
+ private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+ // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
+ // buffer size). The extra space is allocated to guard against glitches under
+ // high load.
+ private static final int BUFFER_SIZE_FACTOR = 2;
+
+ // The AudioRecordJavaThread is allowed to wait for successful call to join()
+ // but the wait times out afther this amount of time.
+ private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+ private static final int DEFAULT_AUDIO_SOURCE = getDefaultAudioSource();
+ private static int audioSource = DEFAULT_AUDIO_SOURCE;
+
+ private final long nativeAudioRecord;
+
+ private @Nullable WebRtcAudioEffects effects;
+
+ private ByteBuffer byteBuffer;
+
+ private @Nullable AudioRecord audioRecord;
+ private @Nullable AudioRecordThread audioThread;
+
+ private static volatile boolean microphoneMute;
+ private byte[] emptyBytes;
+
+ // Audio recording error handler functions.
+ public enum AudioRecordStartErrorCode {
+ AUDIO_RECORD_START_EXCEPTION,
+ AUDIO_RECORD_START_STATE_MISMATCH,
+ }
+
+ public static interface WebRtcAudioRecordErrorCallback {
+ void onWebRtcAudioRecordInitError(String errorMessage);
+ void onWebRtcAudioRecordStartError(AudioRecordStartErrorCode errorCode, String errorMessage);
+ void onWebRtcAudioRecordError(String errorMessage);
+ }
+
+ private static @Nullable WebRtcAudioRecordErrorCallback errorCallback;
+
+ public static void setErrorCallback(WebRtcAudioRecordErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback");
+ WebRtcAudioRecord.errorCallback = errorCallback;
+ }
+
+ /**
+ * Contains audio sample information. Object is passed using {@link
+ * WebRtcAudioRecord.WebRtcAudioRecordSamplesReadyCallback}
+ */
+ public static class AudioSamples {
+ /** See {@link AudioRecord#getAudioFormat()} */
+ private final int audioFormat;
+ /** See {@link AudioRecord#getChannelCount()} */
+ private final int channelCount;
+ /** See {@link AudioRecord#getSampleRate()} */
+ private final int sampleRate;
+
+ private final byte[] data;
+
+ private AudioSamples(AudioRecord audioRecord, byte[] data) {
+ this.audioFormat = audioRecord.getAudioFormat();
+ this.channelCount = audioRecord.getChannelCount();
+ this.sampleRate = audioRecord.getSampleRate();
+ this.data = data;
+ }
+
+ public int getAudioFormat() {
+ return audioFormat;
+ }
+
+ public int getChannelCount() {
+ return channelCount;
+ }
+
+ public int getSampleRate() {
+ return sampleRate;
+ }
+
+ public byte[] getData() {
+ return data;
+ }
+ }
+
+ /** Called when new audio samples are ready. This should only be set for debug purposes */
+ public static interface WebRtcAudioRecordSamplesReadyCallback {
+ void onWebRtcAudioRecordSamplesReady(AudioSamples samples);
+ }
+
+ private static @Nullable WebRtcAudioRecordSamplesReadyCallback audioSamplesReadyCallback;
+
+ public static void setOnAudioSamplesReady(WebRtcAudioRecordSamplesReadyCallback callback) {
+ audioSamplesReadyCallback = callback;
+ }
+
+ /**
+ * Audio thread which keeps calling ByteBuffer.read() waiting for audio
+ * to be recorded. Feeds recorded data to the native counterpart as a
+ * periodic sequence of callbacks using DataIsRecorded().
+ * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+ */
+ private class AudioRecordThread extends Thread {
+ private volatile boolean keepAlive = true;
+
+ public AudioRecordThread(String name) {
+ super(name);
+ }
+
+ // TODO(titovartem) make correct fix during webrtc:9175
+ @SuppressWarnings("ByteBufferBackingArray")
+ @Override
+ public void run() {
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+ Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
+ assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING);
+
+ long lastTime = System.nanoTime();
+ while (keepAlive) {
+ int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
+ if (bytesRead == byteBuffer.capacity()) {
+ if (microphoneMute) {
+ byteBuffer.clear();
+ byteBuffer.put(emptyBytes);
+ }
+ // It's possible we've been shut down during the read, and stopRecording() tried and
+ // failed to join this thread. To be a bit safer, try to avoid calling any native methods
+ // in case they've been unregistered after stopRecording() returned.
+ if (keepAlive) {
+ nativeDataIsRecorded(bytesRead, nativeAudioRecord);
+ }
+ if (audioSamplesReadyCallback != null) {
+ // Copy the entire byte buffer array. Assume that the start of the byteBuffer is
+ // at index 0.
+ byte[] data = Arrays.copyOf(byteBuffer.array(), byteBuffer.capacity());
+ audioSamplesReadyCallback.onWebRtcAudioRecordSamplesReady(
+ new AudioSamples(audioRecord, data));
+ }
+ } else {
+ String errorMessage = "AudioRecord.read failed: " + bytesRead;
+ Logging.e(TAG, errorMessage);
+ if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
+ keepAlive = false;
+ reportWebRtcAudioRecordError(errorMessage);
+ }
+ }
+ if (DEBUG) {
+ long nowTime = System.nanoTime();
+ long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
+ lastTime = nowTime;
+ Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
+ }
+ }
+
+ try {
+ if (audioRecord != null) {
+ audioRecord.stop();
+ }
+ } catch (IllegalStateException e) {
+ Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
+ }
+ }
+
+ // Stops the inner thread loop and also calls AudioRecord.stop().
+ // Does not block the calling thread.
+ public void stopThread() {
+ Logging.d(TAG, "stopThread");
+ keepAlive = false;
+ }
+ }
+
+ WebRtcAudioRecord(long nativeAudioRecord) {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ this.nativeAudioRecord = nativeAudioRecord;
+ if (DEBUG) {
+ WebRtcAudioUtils.logDeviceInfo(TAG);
+ }
+ effects = WebRtcAudioEffects.create();
+ }
+
+ private boolean enableBuiltInAEC(boolean enable) {
+ Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
+ if (effects == null) {
+ Logging.e(TAG, "Built-in AEC is not supported on this platform");
+ return false;
+ }
+ return effects.setAEC(enable);
+ }
+
+ private boolean enableBuiltInNS(boolean enable) {
+ Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
+ if (effects == null) {
+ Logging.e(TAG, "Built-in NS is not supported on this platform");
+ return false;
+ }
+ return effects.setNS(enable);
+ }
+
+ private int initRecording(int sampleRate, int channels) {
+ Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
+ if (audioRecord != null) {
+ reportWebRtcAudioRecordInitError("InitRecording called twice without StopRecording.");
+ return -1;
+ }
+ final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+ final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
+ byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
+ Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+ emptyBytes = new byte[byteBuffer.capacity()];
+ // Rather than passing the ByteBuffer with every callback (requiring
+ // the potentially expensive GetDirectBufferAddress) we simply have the
+ // the native class cache the address to the memory once.
+ nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
+
+ // Get the minimum buffer size required for the successful creation of
+ // an AudioRecord object, in byte units.
+ // Note that this size doesn't guarantee a smooth recording under load.
+ final int channelConfig = channelCountToConfiguration(channels);
+ int minBufferSize =
+ AudioRecord.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
+ if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
+ reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
+ return -1;
+ }
+ Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
+
+ // Use a larger buffer size than the minimum required when creating the
+ // AudioRecord instance to ensure smooth recording under load. It has been
+ // verified that it does not increase the actual recording latency.
+ int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
+ Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
+ try {
+ audioRecord = new AudioRecord(audioSource, sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes);
+ } catch (IllegalArgumentException e) {
+ reportWebRtcAudioRecordInitError("AudioRecord ctor error: " + e.getMessage());
+ releaseAudioResources();
+ return -1;
+ }
+ if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
+ reportWebRtcAudioRecordInitError("Failed to create a new AudioRecord instance");
+ releaseAudioResources();
+ return -1;
+ }
+ if (effects != null) {
+ effects.enable(audioRecord.getAudioSessionId());
+ }
+ logMainParameters();
+ logMainParametersExtended();
+ return framesPerBuffer;
+ }
+
+ private boolean startRecording() {
+ Logging.d(TAG, "startRecording");
+ assertTrue(audioRecord != null);
+ assertTrue(audioThread == null);
+ try {
+ audioRecord.startRecording();
+ } catch (IllegalStateException e) {
+ reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
+ "AudioRecord.startRecording failed: " + e.getMessage());
+ return false;
+ }
+ if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
+ reportWebRtcAudioRecordStartError(
+ AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
+ "AudioRecord.startRecording failed - incorrect state :"
+ + audioRecord.getRecordingState());
+ return false;
+ }
+ audioThread = new AudioRecordThread("AudioRecordJavaThread");
+ audioThread.start();
+ return true;
+ }
+
+ private boolean stopRecording() {
+ Logging.d(TAG, "stopRecording");
+ assertTrue(audioThread != null);
+ audioThread.stopThread();
+ if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
+ Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
+ WebRtcAudioUtils.logAudioState(TAG);
+ }
+ audioThread = null;
+ if (effects != null) {
+ effects.release();
+ }
+ releaseAudioResources();
+ return true;
+ }
+
+ private void logMainParameters() {
+ Logging.d(TAG, "AudioRecord: "
+ + "session ID: " + audioRecord.getAudioSessionId() + ", "
+ + "channels: " + audioRecord.getChannelCount() + ", "
+ + "sample rate: " + audioRecord.getSampleRate());
+ }
+
+ private void logMainParametersExtended() {
+ if (Build.VERSION.SDK_INT >= 23) {
+ Logging.d(TAG, "AudioRecord: "
+ // The frame count of the native AudioRecord buffer.
+ + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
+ }
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private int channelCountToConfiguration(int channels) {
+ return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+ }
+
+ private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
+
+ private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
+
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setAudioSource(int source) {
+ Logging.w(TAG, "Audio source is changed from: " + audioSource
+ + " to " + source);
+ audioSource = source;
+ }
+
+ private static int getDefaultAudioSource() {
+ return AudioSource.VOICE_COMMUNICATION;
+ }
+
+ // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
+ // the microphone is muted.
+ public static void setMicrophoneMute(boolean mute) {
+ Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
+ microphoneMute = mute;
+ }
+
+ // Releases the native AudioRecord resources.
+ private void releaseAudioResources() {
+ Logging.d(TAG, "releaseAudioResources");
+ if (audioRecord != null) {
+ audioRecord.release();
+ audioRecord = null;
+ }
+ }
+
+ private void reportWebRtcAudioRecordInitError(String errorMessage) {
+ Logging.e(TAG, "Init recording error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordInitError(errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioRecordStartError(
+ AudioRecordStartErrorCode errorCode, String errorMessage) {
+ Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioRecordError(String errorMessage) {
+ Logging.e(TAG, "Run-time recording error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordError(errorMessage);
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
new file mode 100644
index 0000000000..3e1875c3d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.content.Context;
+import android.media.AudioAttributes;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.os.Build;
+import android.os.Process;
+import androidx.annotation.Nullable;
+import java.lang.Thread;
+import java.nio.ByteBuffer;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+
+public class WebRtcAudioTrack {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioTrack";
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ // Requested size of each recorded buffer provided to the client.
+ private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+ // Average number of callbacks per second.
+ private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+ // The AudioTrackThread is allowed to wait for successful call to join()
+ // but the wait times out afther this amount of time.
+ private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+ // By default, WebRTC creates audio tracks with a usage attribute
+ // corresponding to voice communications, such as telephony or VoIP.
+ private static final int DEFAULT_USAGE = AudioAttributes.USAGE_VOICE_COMMUNICATION;
+ private static int usageAttribute = DEFAULT_USAGE;
+
+ // This method overrides the default usage attribute and allows the user
+ // to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
+ // NOTE: calling this method will most likely break existing VoIP tuning.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setAudioTrackUsageAttribute(int usage) {
+ Logging.w(TAG, "Default usage attribute is changed from: "
+ + DEFAULT_USAGE + " to " + usage);
+ usageAttribute = usage;
+ }
+
+ private final long nativeAudioTrack;
+ private final AudioManager audioManager;
+ private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
+
+ private ByteBuffer byteBuffer;
+
+ private @Nullable AudioTrack audioTrack;
+ private @Nullable AudioTrackThread audioThread;
+
+ // Samples to be played are replaced by zeros if `speakerMute` is set to true.
+ // Can be used to ensure that the speaker is fully muted.
+ private static volatile boolean speakerMute;
+ private byte[] emptyBytes;
+
+ // Audio playout/track error handler functions.
+ public enum AudioTrackStartErrorCode {
+ AUDIO_TRACK_START_EXCEPTION,
+ AUDIO_TRACK_START_STATE_MISMATCH,
+ }
+
+ @Deprecated
+ public static interface WebRtcAudioTrackErrorCallback {
+ void onWebRtcAudioTrackInitError(String errorMessage);
+ void onWebRtcAudioTrackStartError(String errorMessage);
+ void onWebRtcAudioTrackError(String errorMessage);
+ }
+
+ // TODO(henrika): upgrade all clients to use this new interface instead.
+ public static interface ErrorCallback {
+ void onWebRtcAudioTrackInitError(String errorMessage);
+ void onWebRtcAudioTrackStartError(AudioTrackStartErrorCode errorCode, String errorMessage);
+ void onWebRtcAudioTrackError(String errorMessage);
+ }
+
+ private static @Nullable WebRtcAudioTrackErrorCallback errorCallbackOld;
+ private static @Nullable ErrorCallback errorCallback;
+
+ @Deprecated
+ public static void setErrorCallback(WebRtcAudioTrackErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback (deprecated");
+ WebRtcAudioTrack.errorCallbackOld = errorCallback;
+ }
+
+ public static void setErrorCallback(ErrorCallback errorCallback) {
+ Logging.d(TAG, "Set extended error callback");
+ WebRtcAudioTrack.errorCallback = errorCallback;
+ }
+
+ /**
+ * Audio thread which keeps calling AudioTrack.write() to stream audio.
+ * Data is periodically acquired from the native WebRTC layer using the
+ * nativeGetPlayoutData callback function.
+ * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+ */
+ private class AudioTrackThread extends Thread {
+ private volatile boolean keepAlive = true;
+
+ public AudioTrackThread(String name) {
+ super(name);
+ }
+
+ @Override
+ public void run() {
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+ Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
+ assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
+
+ // Fixed size in bytes of each 10ms block of audio data that we ask for
+ // using callbacks to the native WebRTC client.
+ final int sizeInBytes = byteBuffer.capacity();
+
+ while (keepAlive) {
+ // Get 10ms of PCM data from the native WebRTC client. Audio data is
+ // written into the common ByteBuffer using the address that was
+ // cached at construction.
+ nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
+ // Write data until all data has been written to the audio sink.
+ // Upon return, the buffer position will have been advanced to reflect
+ // the amount of data that was successfully written to the AudioTrack.
+ assertTrue(sizeInBytes <= byteBuffer.remaining());
+ if (speakerMute) {
+ byteBuffer.clear();
+ byteBuffer.put(emptyBytes);
+ byteBuffer.position(0);
+ }
+ int bytesWritten = audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
+ if (bytesWritten != sizeInBytes) {
+ Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
+ // If a write() returns a negative value, an error has occurred.
+ // Stop playing and report an error in this case.
+ if (bytesWritten < 0) {
+ keepAlive = false;
+ reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
+ }
+ }
+ // The byte buffer must be rewinded since byteBuffer.position() is
+ // increased at each call to AudioTrack.write(). If we don't do this,
+ // next call to AudioTrack.write() will fail.
+ byteBuffer.rewind();
+
+ // TODO(henrika): it is possible to create a delay estimate here by
+ // counting number of written frames and subtracting the result from
+ // audioTrack.getPlaybackHeadPosition().
+ }
+
+ // Stops playing the audio data. Since the instance was created in
+ // MODE_STREAM mode, audio will stop playing after the last buffer that
+ // was written has been played.
+ if (audioTrack != null) {
+ Logging.d(TAG, "Calling AudioTrack.stop...");
+ try {
+ audioTrack.stop();
+ Logging.d(TAG, "AudioTrack.stop is done.");
+ } catch (IllegalStateException e) {
+ Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
+ }
+ }
+ }
+
+ // Stops the inner thread loop which results in calling AudioTrack.stop().
+ // Does not block the calling thread.
+ public void stopThread() {
+ Logging.d(TAG, "stopThread");
+ keepAlive = false;
+ }
+ }
+
+ WebRtcAudioTrack(long nativeAudioTrack) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ this.nativeAudioTrack = nativeAudioTrack;
+ audioManager =
+ (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
+ if (DEBUG) {
+ WebRtcAudioUtils.logDeviceInfo(TAG);
+ }
+ }
+
+ private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG,
+ "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels
+ + ", bufferSizeFactor=" + bufferSizeFactor + ")");
+ final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+ byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
+ Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+ emptyBytes = new byte[byteBuffer.capacity()];
+ // Rather than passing the ByteBuffer with every callback (requiring
+ // the potentially expensive GetDirectBufferAddress) we simply have the
+ // the native class cache the address to the memory once.
+ nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
+
+ // Get the minimum buffer size required for the successful creation of an
+ // AudioTrack object to be created in the MODE_STREAM mode.
+ // Note that this size doesn't guarantee a smooth playback under load.
+ final int channelConfig = channelCountToConfiguration(channels);
+ final int minBufferSizeInBytes = (int) (AudioTrack.getMinBufferSize(sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT)
+ * bufferSizeFactor);
+ Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
+ // For the streaming mode, data must be written to the audio sink in
+ // chunks of size (given by byteBuffer.capacity()) less than or equal
+ // to the total buffer size `minBufferSizeInBytes`. But, we have seen
+ // reports of "getMinBufferSize(): error querying hardware". Hence, it
+ // can happen that `minBufferSizeInBytes` contains an invalid value.
+ if (minBufferSizeInBytes < byteBuffer.capacity()) {
+ reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
+ return -1;
+ }
+
+ // Ensure that prevision audio session was stopped correctly before trying
+ // to create a new AudioTrack.
+ if (audioTrack != null) {
+ reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
+ return -1;
+ }
+ try {
+ // Create an AudioTrack object and initialize its associated audio buffer.
+ // The size of this buffer determines how long an AudioTrack can play
+ // before running out of data.
+ // As we are on API level 21 or higher, it is possible to use a special AudioTrack
+ // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
+ // supersede the notion of stream types for defining the behavior of audio playback,
+ // and to allow certain platforms or routing policies to use this information for more
+ // refined volume or routing decisions.
+ audioTrack = createAudioTrack(sampleRate, channelConfig, minBufferSizeInBytes);
+ } catch (IllegalArgumentException e) {
+ reportWebRtcAudioTrackInitError(e.getMessage());
+ releaseAudioResources();
+ return -1;
+ }
+
+ // It can happen that an AudioTrack is created but it was not successfully
+ // initialized upon creation. Seems to be the case e.g. when the maximum
+ // number of globally available audio tracks is exceeded.
+ if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
+ reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
+ releaseAudioResources();
+ return -1;
+ }
+ logMainParameters();
+ logMainParametersExtended();
+ return minBufferSizeInBytes;
+ }
+
+ private boolean startPlayout() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "startPlayout");
+ assertTrue(audioTrack != null);
+ assertTrue(audioThread == null);
+
+ // Starts playing an audio track.
+ try {
+ audioTrack.play();
+ } catch (IllegalStateException e) {
+ reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
+ "AudioTrack.play failed: " + e.getMessage());
+ releaseAudioResources();
+ return false;
+ }
+ if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
+ reportWebRtcAudioTrackStartError(
+ AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
+ "AudioTrack.play failed - incorrect state :"
+ + audioTrack.getPlayState());
+ releaseAudioResources();
+ return false;
+ }
+
+ // Create and start new high-priority thread which calls AudioTrack.write()
+ // and where we also call the native nativeGetPlayoutData() callback to
+ // request decoded audio from WebRTC.
+ audioThread = new AudioTrackThread("AudioTrackJavaThread");
+ audioThread.start();
+ return true;
+ }
+
+ private boolean stopPlayout() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "stopPlayout");
+ assertTrue(audioThread != null);
+ logUnderrunCount();
+ audioThread.stopThread();
+
+ Logging.d(TAG, "Stopping the AudioTrackThread...");
+ audioThread.interrupt();
+ if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
+ Logging.e(TAG, "Join of AudioTrackThread timed out.");
+ WebRtcAudioUtils.logAudioState(TAG);
+ }
+ Logging.d(TAG, "AudioTrackThread has now been stopped.");
+ audioThread = null;
+ releaseAudioResources();
+ return true;
+ }
+
+ // Get max possible volume index for a phone call audio stream.
+ private int getStreamMaxVolume() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "getStreamMaxVolume");
+ assertTrue(audioManager != null);
+ return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
+ }
+
+ // Set current volume level for a phone call audio stream.
+ private boolean setStreamVolume(int volume) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "setStreamVolume(" + volume + ")");
+ assertTrue(audioManager != null);
+ if (audioManager.isVolumeFixed()) {
+ Logging.e(TAG, "The device implements a fixed volume policy.");
+ return false;
+ }
+ audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
+ return true;
+ }
+
+ /** Get current volume level for a phone call audio stream. */
+ private int getStreamVolume() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "getStreamVolume");
+ assertTrue(audioManager != null);
+ return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
+ }
+
+ private void logMainParameters() {
+ Logging.d(TAG, "AudioTrack: "
+ + "session ID: " + audioTrack.getAudioSessionId() + ", "
+ + "channels: " + audioTrack.getChannelCount() + ", "
+ + "sample rate: " + audioTrack.getSampleRate() + ", "
+ // Gain (>=1.0) expressed as linear multiplier on sample values.
+ + "max gain: " + AudioTrack.getMaxVolume());
+ }
+
+ // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
+ // It allows certain platforms or routing policies to use this information for more
+ // refined volume or routing decisions.
+ private static AudioTrack createAudioTrack(
+ int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
+ Logging.d(TAG, "createAudioTrack");
+ // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
+ // performance when Android O is supported. Add some logging in the mean time.
+ final int nativeOutputSampleRate =
+ AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
+ Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
+ if (sampleRateInHz != nativeOutputSampleRate) {
+ Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
+ }
+ if (usageAttribute != DEFAULT_USAGE) {
+ Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
+ }
+ // Create an audio track where the audio usage is for VoIP and the content type is speech.
+ return new AudioTrack(
+ new AudioAttributes.Builder()
+ .setUsage(usageAttribute)
+ .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
+ .build(),
+ new AudioFormat.Builder()
+ .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
+ .setSampleRate(sampleRateInHz)
+ .setChannelMask(channelConfig)
+ .build(),
+ bufferSizeInBytes,
+ AudioTrack.MODE_STREAM,
+ AudioManager.AUDIO_SESSION_ID_GENERATE);
+ }
+
+ private void logBufferSizeInFrames() {
+ if (Build.VERSION.SDK_INT >= 23) {
+ Logging.d(TAG, "AudioTrack: "
+ // The effective size of the AudioTrack buffer that the app writes to.
+ + "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
+ }
+ }
+
+ private int getBufferSizeInFrames() {
+ if (Build.VERSION.SDK_INT >= 23) {
+ return audioTrack.getBufferSizeInFrames();
+ }
+ return -1;
+ }
+
+ private void logBufferCapacityInFrames() {
+ if (Build.VERSION.SDK_INT >= 24) {
+ Logging.d(TAG,
+ "AudioTrack: "
+ // Maximum size of the AudioTrack buffer in frames.
+ + "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
+ }
+ }
+
+ private void logMainParametersExtended() {
+ logBufferSizeInFrames();
+ logBufferCapacityInFrames();
+ }
+
+ // Prints the number of underrun occurrences in the application-level write
+ // buffer since the AudioTrack was created. An underrun occurs if the app does
+ // not write audio data quickly enough, causing the buffer to underflow and a
+ // potential audio glitch.
+ // TODO(henrika): keep track of this value in the field and possibly add new
+ // UMA stat if needed.
+ private void logUnderrunCount() {
+ if (Build.VERSION.SDK_INT >= 24) {
+ Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
+ }
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private int channelCountToConfiguration(int channels) {
+ return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+ }
+
+ private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
+
+ private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
+
+ // Sets all samples to be played out to zero if `mute` is true, i.e.,
+ // ensures that the speaker is muted.
+ public static void setSpeakerMute(boolean mute) {
+ Logging.w(TAG, "setSpeakerMute(" + mute + ")");
+ speakerMute = mute;
+ }
+
+ // Releases the native AudioTrack resources.
+ private void releaseAudioResources() {
+ Logging.d(TAG, "releaseAudioResources");
+ if (audioTrack != null) {
+ audioTrack.release();
+ audioTrack = null;
+ }
+ }
+
+ private void reportWebRtcAudioTrackInitError(String errorMessage) {
+ Logging.e(TAG, "Init playout error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallbackOld != null) {
+ errorCallbackOld.onWebRtcAudioTrackInitError(errorMessage);
+ }
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackInitError(errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioTrackStartError(
+ AudioTrackStartErrorCode errorCode, String errorMessage) {
+ Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallbackOld != null) {
+ errorCallbackOld.onWebRtcAudioTrackStartError(errorMessage);
+ }
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioTrackError(String errorMessage) {
+ Logging.e(TAG, "Run-time playback error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallbackOld != null) {
+ errorCallbackOld.onWebRtcAudioTrackError(errorMessage);
+ }
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackError(errorMessage);
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
new file mode 100644
index 0000000000..afd3d429af
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import static android.media.AudioManager.MODE_IN_CALL;
+import static android.media.AudioManager.MODE_IN_COMMUNICATION;
+import static android.media.AudioManager.MODE_NORMAL;
+import static android.media.AudioManager.MODE_RINGTONE;
+
+import android.annotation.SuppressLint;
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioDeviceInfo;
+import android.media.AudioManager;
+import android.os.Build;
+import java.lang.Thread;
+import java.util.Arrays;
+import java.util.List;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+
+public final class WebRtcAudioUtils {
+ private static final String TAG = "WebRtcAudioUtils";
+
+ // List of devices where we have seen issues (e.g. bad audio quality) using
+ // the low latency output mode in combination with OpenSL ES.
+ // The device name is given by Build.MODEL.
+ private static final String[] BLACKLISTED_OPEN_SL_ES_MODELS = new String[] {
+ // It is recommended to maintain a list of blacklisted models outside
+ // this package and instead call
+ // WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true)
+ // from the client for devices where OpenSL ES shall be disabled.
+ };
+
+ // List of devices where it has been verified that the built-in effect
+ // bad and where it makes sense to avoid using it and instead rely on the
+ // native WebRTC version instead. The device name is given by Build.MODEL.
+ private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
+ // It is recommended to maintain a list of blacklisted models outside
+ // this package and instead call setWebRtcBasedAcousticEchoCanceler(true)
+ // from the client for devices where the built-in AEC shall be disabled.
+ };
+ private static final String[] BLACKLISTED_NS_MODELS = new String[] {
+ // It is recommended to maintain a list of blacklisted models outside
+ // this package and instead call setWebRtcBasedNoiseSuppressor(true)
+ // from the client for devices where the built-in NS shall be disabled.
+ };
+
+ // Use 16kHz as the default sample rate. A higher sample rate might prevent
+ // us from supporting communication mode on some older (e.g. ICS) devices.
+ private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
+ private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
+ // Set to true if setDefaultSampleRateHz() has been called.
+ private static boolean isDefaultSampleRateOverridden;
+
+ // By default, utilize hardware based audio effects for AEC and NS when
+ // available.
+ private static boolean useWebRtcBasedAcousticEchoCanceler;
+ private static boolean useWebRtcBasedNoiseSuppressor;
+
+ // Call these methods if any hardware based effect shall be replaced by a
+ // software based version provided by the WebRTC stack instead.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
+ useWebRtcBasedAcousticEchoCanceler = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setWebRtcBasedNoiseSuppressor(boolean enable) {
+ useWebRtcBasedNoiseSuppressor = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
+ // TODO(henrika): deprecated; remove when no longer used by any client.
+ Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
+ if (useWebRtcBasedAcousticEchoCanceler) {
+ Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
+ }
+ return useWebRtcBasedAcousticEchoCanceler;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
+ if (useWebRtcBasedNoiseSuppressor) {
+ Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
+ }
+ return useWebRtcBasedNoiseSuppressor;
+ }
+
+ // TODO(henrika): deprecated; remove when no longer used by any client.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
+ // Always return true here to avoid trying to use any built-in AGC.
+ return true;
+ }
+
+ // Returns true if the device supports an audio effect (AEC or NS).
+ // Four conditions must be fulfilled if functions are to return true:
+ // 1) the platform must support the built-in (HW) effect,
+ // 2) explicit use (override) of a WebRTC based version must not be set,
+ // 3) the device must not be blacklisted for use of the effect, and
+ // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+ public static boolean isAcousticEchoCancelerSupported() {
+ return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+ }
+ public static boolean isNoiseSuppressorSupported() {
+ return WebRtcAudioEffects.canUseNoiseSuppressor();
+ }
+ // TODO(henrika): deprecated; remove when no longer used by any client.
+ public static boolean isAutomaticGainControlSupported() {
+ // Always return false here to avoid trying to use any built-in AGC.
+ return false;
+ }
+
+ // Call this method if the default handling of querying the native sample
+ // rate shall be overridden. Can be useful on some devices where the
+ // available Android APIs are known to return invalid results.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
+ isDefaultSampleRateOverridden = true;
+ defaultSampleRateHz = sampleRateHz;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean isDefaultSampleRateOverridden() {
+ return isDefaultSampleRateOverridden;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized int getDefaultSampleRateHz() {
+ return defaultSampleRateHz;
+ }
+
+ public static List<String> getBlackListedModelsForAecUsage() {
+ return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
+ }
+
+ public static List<String> getBlackListedModelsForNsUsage() {
+ return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
+ }
+
+ // Helper method for building a string of thread information.
+ public static String getThreadInfo() {
+ return "@[name=" + Thread.currentThread().getName() + ", id=" + Thread.currentThread().getId()
+ + "]";
+ }
+
+ // Returns true if we're running on emulator.
+ public static boolean runningOnEmulator() {
+ return Build.HARDWARE.equals("goldfish") && Build.BRAND.startsWith("generic_");
+ }
+
+ // Returns true if the device is blacklisted for OpenSL ES usage.
+ public static boolean deviceIsBlacklistedForOpenSLESUsage() {
+ List<String> blackListedModels = Arrays.asList(BLACKLISTED_OPEN_SL_ES_MODELS);
+ return blackListedModels.contains(Build.MODEL);
+ }
+
+ // Information about the current build, taken from system properties.
+ static void logDeviceInfo(String tag) {
+ Logging.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
+ + "Release: " + Build.VERSION.RELEASE + ", "
+ + "Brand: " + Build.BRAND + ", "
+ + "Device: " + Build.DEVICE + ", "
+ + "Id: " + Build.ID + ", "
+ + "Hardware: " + Build.HARDWARE + ", "
+ + "Manufacturer: " + Build.MANUFACTURER + ", "
+ + "Model: " + Build.MODEL + ", "
+ + "Product: " + Build.PRODUCT);
+ }
+
+ // Logs information about the current audio state. The idea is to call this
+ // method when errors are detected to log under what conditions the error
+ // occurred. Hopefully it will provide clues to what might be the root cause.
+ static void logAudioState(String tag) {
+ logDeviceInfo(tag);
+ final Context context = ContextUtils.getApplicationContext();
+ final AudioManager audioManager =
+ (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
+ logAudioStateBasic(tag, audioManager);
+ logAudioStateVolume(tag, audioManager);
+ logAudioDeviceInfo(tag, audioManager);
+ }
+
+ // Reports basic audio statistics.
+ private static void logAudioStateBasic(String tag, AudioManager audioManager) {
+ Logging.d(tag, "Audio State: "
+ + "audio mode: " + modeToString(audioManager.getMode()) + ", "
+ + "has mic: " + hasMicrophone() + ", "
+ + "mic muted: " + audioManager.isMicrophoneMute() + ", "
+ + "music active: " + audioManager.isMusicActive() + ", "
+ + "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
+ + "BT SCO: " + audioManager.isBluetoothScoOn());
+ }
+
+ // Adds volume information for all possible stream types.
+ private static void logAudioStateVolume(String tag, AudioManager audioManager) {
+ final int[] streams = {
+ AudioManager.STREAM_VOICE_CALL,
+ AudioManager.STREAM_MUSIC,
+ AudioManager.STREAM_RING,
+ AudioManager.STREAM_ALARM,
+ AudioManager.STREAM_NOTIFICATION,
+ AudioManager.STREAM_SYSTEM
+ };
+ Logging.d(tag, "Audio State: ");
+ // Some devices may not have volume controls and might use a fixed volume.
+ boolean fixedVolume = audioManager.isVolumeFixed();
+ Logging.d(tag, " fixed volume=" + fixedVolume);
+ if (!fixedVolume) {
+ for (int stream : streams) {
+ StringBuilder info = new StringBuilder();
+ info.append(" " + streamTypeToString(stream) + ": ");
+ info.append("volume=").append(audioManager.getStreamVolume(stream));
+ info.append(", max=").append(audioManager.getStreamMaxVolume(stream));
+ logIsStreamMute(tag, audioManager, stream, info);
+ Logging.d(tag, info.toString());
+ }
+ }
+ }
+
+ private static void logIsStreamMute(
+ String tag, AudioManager audioManager, int stream, StringBuilder info) {
+ if (Build.VERSION.SDK_INT >= 23) {
+ info.append(", muted=").append(audioManager.isStreamMute(stream));
+ }
+ }
+
+ // Moz linting complains even though AudioManager.GET_DEVICES_ALL is
+ // listed in the docs here:
+ // https://developer.android.com/reference/android/media/AudioManager#GET_DEVICES_ALL
+ @SuppressLint("WrongConstant")
+ private static void logAudioDeviceInfo(String tag, AudioManager audioManager) {
+ if (Build.VERSION.SDK_INT < 23) {
+ return;
+ }
+ final AudioDeviceInfo[] devices =
+ audioManager.getDevices(AudioManager.GET_DEVICES_ALL);
+ if (devices.length == 0) {
+ return;
+ }
+ Logging.d(tag, "Audio Devices: ");
+ for (AudioDeviceInfo device : devices) {
+ StringBuilder info = new StringBuilder();
+ info.append(" ").append(deviceTypeToString(device.getType()));
+ info.append(device.isSource() ? "(in): " : "(out): ");
+ // An empty array indicates that the device supports arbitrary channel counts.
+ if (device.getChannelCounts().length > 0) {
+ info.append("channels=").append(Arrays.toString(device.getChannelCounts()));
+ info.append(", ");
+ }
+ if (device.getEncodings().length > 0) {
+ // Examples: ENCODING_PCM_16BIT = 2, ENCODING_PCM_FLOAT = 4.
+ info.append("encodings=").append(Arrays.toString(device.getEncodings()));
+ info.append(", ");
+ }
+ if (device.getSampleRates().length > 0) {
+ info.append("sample rates=").append(Arrays.toString(device.getSampleRates()));
+ info.append(", ");
+ }
+ info.append("id=").append(device.getId());
+ Logging.d(tag, info.toString());
+ }
+ }
+
+ // Converts media.AudioManager modes into local string representation.
+ static String modeToString(int mode) {
+ switch (mode) {
+ case MODE_IN_CALL:
+ return "MODE_IN_CALL";
+ case MODE_IN_COMMUNICATION:
+ return "MODE_IN_COMMUNICATION";
+ case MODE_NORMAL:
+ return "MODE_NORMAL";
+ case MODE_RINGTONE:
+ return "MODE_RINGTONE";
+ default:
+ return "MODE_INVALID";
+ }
+ }
+
+ private static String streamTypeToString(int stream) {
+ switch(stream) {
+ case AudioManager.STREAM_VOICE_CALL:
+ return "STREAM_VOICE_CALL";
+ case AudioManager.STREAM_MUSIC:
+ return "STREAM_MUSIC";
+ case AudioManager.STREAM_RING:
+ return "STREAM_RING";
+ case AudioManager.STREAM_ALARM:
+ return "STREAM_ALARM";
+ case AudioManager.STREAM_NOTIFICATION:
+ return "STREAM_NOTIFICATION";
+ case AudioManager.STREAM_SYSTEM:
+ return "STREAM_SYSTEM";
+ default:
+ return "STREAM_INVALID";
+ }
+ }
+
+ // Converts AudioDeviceInfo types to local string representation.
+ private static String deviceTypeToString(int type) {
+ switch (type) {
+ case AudioDeviceInfo.TYPE_UNKNOWN:
+ return "TYPE_UNKNOWN";
+ case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
+ return "TYPE_BUILTIN_EARPIECE";
+ case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
+ return "TYPE_BUILTIN_SPEAKER";
+ case AudioDeviceInfo.TYPE_WIRED_HEADSET:
+ return "TYPE_WIRED_HEADSET";
+ case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
+ return "TYPE_WIRED_HEADPHONES";
+ case AudioDeviceInfo.TYPE_LINE_ANALOG:
+ return "TYPE_LINE_ANALOG";
+ case AudioDeviceInfo.TYPE_LINE_DIGITAL:
+ return "TYPE_LINE_DIGITAL";
+ case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
+ return "TYPE_BLUETOOTH_SCO";
+ case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
+ return "TYPE_BLUETOOTH_A2DP";
+ case AudioDeviceInfo.TYPE_HDMI:
+ return "TYPE_HDMI";
+ case AudioDeviceInfo.TYPE_HDMI_ARC:
+ return "TYPE_HDMI_ARC";
+ case AudioDeviceInfo.TYPE_USB_DEVICE:
+ return "TYPE_USB_DEVICE";
+ case AudioDeviceInfo.TYPE_USB_ACCESSORY:
+ return "TYPE_USB_ACCESSORY";
+ case AudioDeviceInfo.TYPE_DOCK:
+ return "TYPE_DOCK";
+ case AudioDeviceInfo.TYPE_FM:
+ return "TYPE_FM";
+ case AudioDeviceInfo.TYPE_BUILTIN_MIC:
+ return "TYPE_BUILTIN_MIC";
+ case AudioDeviceInfo.TYPE_FM_TUNER:
+ return "TYPE_FM_TUNER";
+ case AudioDeviceInfo.TYPE_TV_TUNER:
+ return "TYPE_TV_TUNER";
+ case AudioDeviceInfo.TYPE_TELEPHONY:
+ return "TYPE_TELEPHONY";
+ case AudioDeviceInfo.TYPE_AUX_LINE:
+ return "TYPE_AUX_LINE";
+ case AudioDeviceInfo.TYPE_IP:
+ return "TYPE_IP";
+ case AudioDeviceInfo.TYPE_BUS:
+ return "TYPE_BUS";
+ case AudioDeviceInfo.TYPE_USB_HEADSET:
+ return "TYPE_USB_HEADSET";
+ default:
+ return "TYPE_UNKNOWN";
+ }
+ }
+
+ // Returns true if the device can record audio via a microphone.
+ private static boolean hasMicrophone() {
+ return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_MICROPHONE);
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_device/android/opensles_common.cc b/third_party/libwebrtc/modules/audio_device/android/opensles_common.cc
new file mode 100644
index 0000000000..019714dae4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/opensles_common.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/opensles_common.h"
+
+#include <SLES/OpenSLES.h>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Returns a string representation given an integer SL_RESULT_XXX code.
+// The mapping can be found in <SLES/OpenSLES.h>.
+const char* GetSLErrorString(size_t code) {
+ static const char* sl_error_strings[] = {
+ "SL_RESULT_SUCCESS", // 0
+ "SL_RESULT_PRECONDITIONS_VIOLATED", // 1
+ "SL_RESULT_PARAMETER_INVALID", // 2
+ "SL_RESULT_MEMORY_FAILURE", // 3
+ "SL_RESULT_RESOURCE_ERROR", // 4
+ "SL_RESULT_RESOURCE_LOST", // 5
+ "SL_RESULT_IO_ERROR", // 6
+ "SL_RESULT_BUFFER_INSUFFICIENT", // 7
+ "SL_RESULT_CONTENT_CORRUPTED", // 8
+ "SL_RESULT_CONTENT_UNSUPPORTED", // 9
+ "SL_RESULT_CONTENT_NOT_FOUND", // 10
+ "SL_RESULT_PERMISSION_DENIED", // 11
+ "SL_RESULT_FEATURE_UNSUPPORTED", // 12
+ "SL_RESULT_INTERNAL_ERROR", // 13
+ "SL_RESULT_UNKNOWN_ERROR", // 14
+ "SL_RESULT_OPERATION_ABORTED", // 15
+ "SL_RESULT_CONTROL_LOST", // 16
+ };
+
+ if (code >= arraysize(sl_error_strings)) {
+ return "SL_RESULT_UNKNOWN_ERROR";
+ }
+ return sl_error_strings[code];
+}
+
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+ int sample_rate,
+ size_t bits_per_sample) {
+ RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+ SLDataFormat_PCM format;
+ format.formatType = SL_DATAFORMAT_PCM;
+ format.numChannels = static_cast<SLuint32>(channels);
+ // Note that, the unit of sample rate is actually in milliHertz and not Hertz.
+ switch (sample_rate) {
+ case 8000:
+ format.samplesPerSec = SL_SAMPLINGRATE_8;
+ break;
+ case 16000:
+ format.samplesPerSec = SL_SAMPLINGRATE_16;
+ break;
+ case 22050:
+ format.samplesPerSec = SL_SAMPLINGRATE_22_05;
+ break;
+ case 32000:
+ format.samplesPerSec = SL_SAMPLINGRATE_32;
+ break;
+ case 44100:
+ format.samplesPerSec = SL_SAMPLINGRATE_44_1;
+ break;
+ case 48000:
+ format.samplesPerSec = SL_SAMPLINGRATE_48;
+ break;
+ case 64000:
+ format.samplesPerSec = SL_SAMPLINGRATE_64;
+ break;
+ case 88200:
+ format.samplesPerSec = SL_SAMPLINGRATE_88_2;
+ break;
+ case 96000:
+ format.samplesPerSec = SL_SAMPLINGRATE_96;
+ break;
+ default:
+ RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
+ break;
+ }
+ format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
+ format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
+ format.endianness = SL_BYTEORDER_LITTLEENDIAN;
+ if (format.numChannels == 1) {
+ format.channelMask = SL_SPEAKER_FRONT_CENTER;
+ } else if (format.numChannels == 2) {
+ format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ } else {
+ RTC_CHECK(false) << "Unsupported number of channels: "
+ << format.numChannels;
+ }
+ return format;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/opensles_common.h b/third_party/libwebrtc/modules/audio_device/android/opensles_common.h
new file mode 100644
index 0000000000..438c522072
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/opensles_common.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
+
+#include <SLES/OpenSLES.h>
+#include <stddef.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Returns a string representation given an integer SL_RESULT_XXX code.
+// The mapping can be found in <SLES/OpenSLES.h>.
+const char* GetSLErrorString(size_t code);
+
+// Configures an SL_DATAFORMAT_PCM structure based on native audio parameters.
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+ int sample_rate,
+ size_t bits_per_sample);
+
+// Helper class for using SLObjectItf interfaces.
+template <typename SLType, typename SLDerefType>
+class ScopedSLObject {
+ public:
+ ScopedSLObject() : obj_(nullptr) {}
+
+ ~ScopedSLObject() { Reset(); }
+
+ SLType* Receive() {
+ RTC_DCHECK(!obj_);
+ return &obj_;
+ }
+
+ SLDerefType operator->() { return *obj_; }
+
+ SLType Get() const { return obj_; }
+
+ void Reset() {
+ if (obj_) {
+ (*obj_)->Destroy(obj_);
+ obj_ = nullptr;
+ }
+ }
+
+ private:
+ SLType obj_;
+};
+
+typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/opensles_player.cc b/third_party/libwebrtc/modules/audio_device/android/opensles_player.cc
new file mode 100644
index 0000000000..f2b3a37194
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/opensles_player.cc
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/opensles_player.h"
+
+#include <android/log.h>
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+
+#define TAG "OpenSLESPlayer"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define RETURN_ON_ERROR(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ ALOGE("%s failed: %s", #op, GetSLErrorString(err)); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace webrtc {
+
+OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
+ : audio_manager_(audio_manager),
+ audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+ audio_device_buffer_(nullptr),
+ initialized_(false),
+ playing_(false),
+ buffer_index_(0),
+ engine_(nullptr),
+ player_(nullptr),
+ simple_buffer_queue_(nullptr),
+ volume_(nullptr),
+ last_play_time_(0) {
+ ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
+ // Use native audio output parameters provided by the audio manager and
+ // define the PCM format structure.
+ pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+ audio_parameters_.sample_rate(),
+ audio_parameters_.bits_per_sample());
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the internal audio thread.
+ thread_checker_opensles_.Detach();
+}
+
+OpenSLESPlayer::~OpenSLESPlayer() {
+ ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+ DestroyAudioPlayer();
+ DestroyMix();
+ engine_ = nullptr;
+ RTC_DCHECK(!engine_);
+ RTC_DCHECK(!output_mix_.Get());
+ RTC_DCHECK(!player_);
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_DCHECK(!volume_);
+}
+
+int OpenSLESPlayer::Init() {
+ ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (audio_parameters_.channels() == 2) {
+ ALOGW("Stereo mode is enabled");
+ }
+ return 0;
+}
+
+int OpenSLESPlayer::Terminate() {
+ ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ StopPlayout();
+ return 0;
+}
+
+int OpenSLESPlayer::InitPlayout() {
+ ALOGD("InitPlayout[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
+ if (!ObtainEngineInterface()) {
+ ALOGE("Failed to obtain SL Engine interface");
+ return -1;
+ }
+ CreateMix();
+ initialized_ = true;
+ buffer_index_ = 0;
+ return 0;
+}
+
+int OpenSLESPlayer::StartPlayout() {
+ ALOGD("StartPlayout[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!playing_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ // The number of lower latency audio players is limited, hence we create the
+ // audio player in Start() and destroy it in Stop().
+ CreateAudioPlayer();
+ // Fill up audio buffers to avoid initial glitch and to ensure that playback
+ // starts when mode is later changed to SL_PLAYSTATE_PLAYING.
+ // TODO(henrika): we can save some delay by only making one call to
+ // EnqueuePlayoutData. Most likely not worth the risk of adding a glitch.
+ last_play_time_ = rtc::Time();
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ EnqueuePlayoutData(true);
+ }
+ // Start streaming data by setting the play state to SL_PLAYSTATE_PLAYING.
+ // For a player object, when the object is in the SL_PLAYSTATE_PLAYING
+ // state, adding buffers will implicitly start playback.
+ RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
+ playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
+ RTC_DCHECK(playing_);
+ return 0;
+}
+
+int OpenSLESPlayer::StopPlayout() {
+ ALOGD("StopPlayout[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ // Stop playing by setting the play state to SL_PLAYSTATE_STOPPED.
+ RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED), -1);
+ // Clear the buffer queue to flush out any remaining data.
+ RETURN_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_), -1);
+#if RTC_DCHECK_IS_ON
+ // Verify that the buffer queue is in fact cleared as it should.
+ SLAndroidSimpleBufferQueueState buffer_queue_state;
+ (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
+ RTC_DCHECK_EQ(0, buffer_queue_state.count);
+ RTC_DCHECK_EQ(0, buffer_queue_state.index);
+#endif
+ // The number of lower latency audio players is limited, hence we create the
+ // audio player in Start() and destroy it in Stop().
+ DestroyAudioPlayer();
+ thread_checker_opensles_.Detach();
+ initialized_ = false;
+ playing_ = false;
+ return 0;
+}
+
+int OpenSLESPlayer::SpeakerVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int OpenSLESPlayer::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ return -1;
+}
+
+int OpenSLESPlayer::MinSpeakerVolume(uint32_t& minVolume) const {
+ return -1;
+}
+
+int OpenSLESPlayer::SetSpeakerVolume(uint32_t volume) {
+ return -1;
+}
+
+int OpenSLESPlayer::SpeakerVolume(uint32_t& volume) const {
+ return -1;
+}
+
+void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ ALOGD("AttachAudioBuffer");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
+ audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetPlayoutChannels(%zu)", channels);
+ audio_device_buffer_->SetPlayoutChannels(channels);
+ RTC_CHECK(audio_device_buffer_);
+ AllocateDataBuffers();
+}
+
+void OpenSLESPlayer::AllocateDataBuffers() {
+ ALOGD("AllocateDataBuffers");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the native OpenSL ES
+ // buffer size. The native buffer size corresponds to the
+ // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio
+ // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is
+ // recommended to construct audio buffers so that they contain an exact
+ // multiple of this number. If so, callbacks will occur at regular intervals,
+ // which reduces jitter.
+ const size_t buffer_size_in_samples =
+ audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
+ ALOGD("native buffer size: %zu", buffer_size_in_samples);
+ ALOGD("native buffer size in ms: %.2f",
+ audio_parameters_.GetBufferSizeInMilliseconds());
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+ // Allocated memory for audio buffers.
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
+ }
+}
+
+bool OpenSLESPlayer::ObtainEngineInterface() {
+ ALOGD("ObtainEngineInterface");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (engine_)
+ return true;
+ // Get access to (or create if not already existing) the global OpenSL Engine
+ // object.
+ SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+ if (engine_object == nullptr) {
+ ALOGE("Failed to access the global OpenSL engine");
+ return false;
+ }
+ // Get the SL Engine Interface which is implicit.
+ RETURN_ON_ERROR(
+ (*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine_),
+ false);
+ return true;
+}
+
+bool OpenSLESPlayer::CreateMix() {
+ ALOGD("CreateMix");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(engine_);
+ if (output_mix_.Get())
+ return true;
+
+ // Create the ouput mix on the engine object. No interfaces will be used.
+ RETURN_ON_ERROR((*engine_)->CreateOutputMix(engine_, output_mix_.Receive(), 0,
+ nullptr, nullptr),
+ false);
+ RETURN_ON_ERROR(output_mix_->Realize(output_mix_.Get(), SL_BOOLEAN_FALSE),
+ false);
+ return true;
+}
+
+void OpenSLESPlayer::DestroyMix() {
+ ALOGD("DestroyMix");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!output_mix_.Get())
+ return;
+ output_mix_.Reset();
+}
+
+bool OpenSLESPlayer::CreateAudioPlayer() {
+ ALOGD("CreateAudioPlayer");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(output_mix_.Get());
+ if (player_object_.Get())
+ return true;
+ RTC_DCHECK(!player_);
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_DCHECK(!volume_);
+
+ // source: Android Simple Buffer Queue Data Locator is source.
+ SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
+ SLDataSource audio_source = {&simple_buffer_queue, &pcm_format_};
+
+ // sink: OutputMix-based data is sink.
+ SLDataLocator_OutputMix locator_output_mix = {SL_DATALOCATOR_OUTPUTMIX,
+ output_mix_.Get()};
+ SLDataSink audio_sink = {&locator_output_mix, nullptr};
+
+ // Define interfaces that we indend to use and realize.
+ const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
+ SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
+ const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
+ SL_BOOLEAN_TRUE};
+
+ // Create the audio player on the engine interface.
+ RETURN_ON_ERROR(
+ (*engine_)->CreateAudioPlayer(
+ engine_, player_object_.Receive(), &audio_source, &audio_sink,
+ arraysize(interface_ids), interface_ids, interface_required),
+ false);
+
+ // Use the Android configuration interface to set platform-specific
+ // parameters. Should be done before player is realized.
+ SLAndroidConfigurationItf player_config;
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION, &player_config),
+ false);
+ // Set audio player configuration to SL_ANDROID_STREAM_VOICE which
+ // corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
+ SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+ RETURN_ON_ERROR(
+ (*player_config)
+ ->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,
+ &stream_type, sizeof(SLint32)),
+ false);
+
+ // Realize the audio player object after configuration has been set.
+ RETURN_ON_ERROR(
+ player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE), false);
+
+ // Get the SLPlayItf interface on the audio player.
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_PLAY, &player_),
+ false);
+
+ // Get the SLAndroidSimpleBufferQueueItf interface on the audio player.
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_BUFFERQUEUE,
+ &simple_buffer_queue_),
+ false);
+
+ // Register callback method for the Android Simple Buffer Queue interface.
+ // This method will be called when the native audio layer needs audio data.
+ RETURN_ON_ERROR((*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback, this),
+ false);
+
+ // Get the SLVolumeItf interface on the audio player.
+ RETURN_ON_ERROR(player_object_->GetInterface(player_object_.Get(),
+ SL_IID_VOLUME, &volume_),
+ false);
+
+ // TODO(henrika): might not be required to set volume to max here since it
+ // seems to be default on most devices. Might be required for unit tests.
+ // RETURN_ON_ERROR((*volume_)->SetVolumeLevel(volume_, 0), false);
+
+ return true;
+}
+
+void OpenSLESPlayer::DestroyAudioPlayer() {
+ ALOGD("DestroyAudioPlayer");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!player_object_.Get())
+ return;
+ (*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+ player_object_.Reset();
+ player_ = nullptr;
+ simple_buffer_queue_ = nullptr;
+ volume_ = nullptr;
+}
+
+// static
+void OpenSLESPlayer::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf caller,
+ void* context) {
+ OpenSLESPlayer* stream = reinterpret_cast<OpenSLESPlayer*>(context);
+ stream->FillBufferQueue();
+}
+
+void OpenSLESPlayer::FillBufferQueue() {
+ RTC_DCHECK(thread_checker_opensles_.IsCurrent());
+ SLuint32 state = GetPlayState();
+ if (state != SL_PLAYSTATE_PLAYING) {
+ ALOGW("Buffer callback in non-playing state!");
+ return;
+ }
+ EnqueuePlayoutData(false);
+}
+
+void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
+ // Check delta time between two successive callbacks and provide a warning
+ // if it becomes very large.
+ // TODO(henrika): using 150ms as upper limit but this value is rather random.
+ const uint32_t current_time = rtc::Time();
+ const uint32_t diff = current_time - last_play_time_;
+ if (diff > 150) {
+ ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
+ }
+ last_play_time_ = current_time;
+ SLint8* audio_ptr8 =
+ reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
+ if (silence) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ // Avoid acquiring real audio data from WebRTC and fill the buffer with
+ // zeros instead. Used to prime the buffer with silence and to avoid asking
+ // for audio data from two different threads.
+ memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
+ } else {
+ RTC_DCHECK(thread_checker_opensles_.IsCurrent());
+ // Read audio data from the WebRTC source using the FineAudioBuffer object
+ // to adjust for differences in buffer size between WebRTC (10ms) and native
+ // OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
+ // delay estimation.
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
+ audio_parameters_.frames_per_buffer() *
+ audio_parameters_.channels()),
+ 25);
+ }
+ // Enqueue the decoded audio buffer for playback.
+ SLresult err = (*simple_buffer_queue_)
+ ->Enqueue(simple_buffer_queue_, audio_ptr8,
+ audio_parameters_.GetBytesPerBuffer());
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("Enqueue failed: %d", err);
+ }
+ buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+}
+
+SLuint32 OpenSLESPlayer::GetPlayState() const {
+ RTC_DCHECK(player_);
+ SLuint32 state;
+ SLresult err = (*player_)->GetPlayState(player_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetPlayState failed: %d", err);
+ }
+ return state;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/opensles_player.h b/third_party/libwebrtc/modules/audio_device/android/opensles_player.h
new file mode 100644
index 0000000000..41593a448f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/opensles_player.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/opensles_common.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements 16-bit mono PCM audio output support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
+// buffers are requested on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitPlayout() after Stoplayout()
+// to be able to call StartPlayout() again. This is inline with how the Java-
+// based implementation works.
+//
+// OpenSL ES is a native C API which have no Dalvik-related overhead such as
+// garbage collection pauses and it supports reduced audio output latency.
+// If the device doesn't claim this feature but supports API level 9 (Android
+// platform version 2.3) or later, then we can still use the OpenSL ES APIs but
+// the output latency may be higher.
+class OpenSLESPlayer {
+ public:
+ // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+ // required for lower latency. Beginning with API level 18 (Android 4.3), a
+ // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+ // size and sample rate must be compatible with the device's native output
+ // configuration provided via the audio manager at construction.
+ // TODO(henrika): perhaps set this value dynamically based on OS version.
+ static const int kNumOfOpenSLESBuffers = 2;
+
+ explicit OpenSLESPlayer(AudioManager* audio_manager);
+ ~OpenSLESPlayer();
+
+ int Init();
+ int Terminate();
+
+ int InitPlayout();
+ bool PlayoutIsInitialized() const { return initialized_; }
+
+ int StartPlayout();
+ int StopPlayout();
+ bool Playing() const { return playing_; }
+
+ int SpeakerVolumeIsAvailable(bool& available);
+ int SetSpeakerVolume(uint32_t volume);
+ int SpeakerVolume(uint32_t& volume) const;
+ int MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int MinSpeakerVolume(uint32_t& minVolume) const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ // These callback methods are called when data is required for playout.
+ // They are both called from an internal "OpenSL ES thread" which is not
+ // attached to the Dalvik VM.
+ static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+ void* context);
+ void FillBufferQueue();
+ // Reads audio data in PCM format using the AudioDeviceBuffer.
+ // Can be called both on the main thread (during Start()) and from the
+ // internal audio thread while output streaming is active.
+ // If the `silence` flag is set, the audio is filled with zeros instead of
+ // asking the WebRTC layer for real audio data. This procedure is also known
+ // as audio priming.
+ void EnqueuePlayoutData(bool silence);
+
+ // Allocate memory for audio buffers which will be used to render audio
+ // via the SLAndroidSimpleBufferQueueItf interface.
+ void AllocateDataBuffers();
+
+ // Obtaines the SL Engine Interface from the existing global Engine object.
+ // The interface exposes creation methods of all the OpenSL ES object types.
+ // This method defines the `engine_` member variable.
+ bool ObtainEngineInterface();
+
+ // Creates/destroys the output mix object.
+ bool CreateMix();
+ void DestroyMix();
+
+ // Creates/destroys the audio player and the simple-buffer object.
+ // Also creates the volume object.
+ bool CreateAudioPlayer();
+ void DestroyAudioPlayer();
+
+ SLuint32 GetPlayState() const;
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ SequenceChecker thread_checker_;
+
+ // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+ // non-application thread which is not attached to the Dalvik JVM.
+ // Detached during construction of this object.
+ SequenceChecker thread_checker_opensles_;
+
+ // Raw pointer to the audio manager injected at construction. Used to cache
+ // audio parameters and to access the global SL engine object needed by the
+ // ObtainEngineInterface() method. The audio manager outlives any instance of
+ // this class.
+ AudioManager* audio_manager_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ bool initialized_;
+ bool playing_;
+
+ // PCM-type format definition.
+ // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+ // 32-bit float representation is needed.
+ SLDataFormat_PCM pcm_format_;
+
+ // Queue of audio buffers to be used by the player object for rendering
+ // audio.
+ std::unique_ptr<SLint16[]> audio_buffers_[kNumOfOpenSLESBuffers];
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples.
+ // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 192
+ // in each callback (one every 4th ms). This class can then ask for 192 and
+ // the FineAudioBuffer will ask WebRTC for new data approximately only every
+ // second callback and also cache non-utilized audio.
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+ // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+ // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+ int buffer_index_;
+
+ // This interface exposes creation methods for all the OpenSL ES object types.
+ // It is the OpenSL ES API entry point.
+ SLEngineItf engine_;
+
+ // Output mix object to be used by the player object.
+ webrtc::ScopedSLObjectItf output_mix_;
+
+ // The audio player media object plays out audio to the speakers. It also
+ // supports volume control.
+ webrtc::ScopedSLObjectItf player_object_;
+
+ // This interface is supported on the audio player and it controls the state
+ // of the audio player.
+ SLPlayItf player_;
+
+ // The Android Simple Buffer Queue interface is supported on the audio player
+ // and it provides methods to send audio data from the source to the audio
+ // player for rendering.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ // This interface exposes controls for manipulating the object’s audio volume
+ // properties. This interface is supported on the Audio Player object.
+ SLVolumeItf volume_;
+
+ // Last time the OpenSL ES layer asked for audio data to play out.
+ uint32_t last_play_time_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/android/opensles_recorder.cc b/third_party/libwebrtc/modules/audio_device/android/opensles_recorder.cc
new file mode 100644
index 0000000000..4e0c26dbf0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/opensles_recorder.cc
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/opensles_recorder.h"
+
+#include <android/log.h>
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+
+#define TAG "OpenSLESRecorder"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define LOG_ON_ERROR(op) \
+ [](SLresult err) { \
+ if (err != SL_RESULT_SUCCESS) { \
+ ALOGE("%s:%d %s failed: %s", __FILE__, __LINE__, #op, \
+ GetSLErrorString(err)); \
+ return true; \
+ } \
+ return false; \
+ }(op)
+
+namespace webrtc {
+
+OpenSLESRecorder::OpenSLESRecorder(AudioManager* audio_manager)
+ : audio_manager_(audio_manager),
+ audio_parameters_(audio_manager->GetRecordAudioParameters()),
+ audio_device_buffer_(nullptr),
+ initialized_(false),
+ recording_(false),
+ engine_(nullptr),
+ recorder_(nullptr),
+ simple_buffer_queue_(nullptr),
+ buffer_index_(0),
+ last_rec_time_(0) {
+ ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the internal audio thread.
+ thread_checker_opensles_.Detach();
+ // Use native audio output parameters provided by the audio manager and
+ // define the PCM format structure.
+ pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+ audio_parameters_.sample_rate(),
+ audio_parameters_.bits_per_sample());
+}
+
+OpenSLESRecorder::~OpenSLESRecorder() {
+ ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+ DestroyAudioRecorder();
+ engine_ = nullptr;
+ RTC_DCHECK(!engine_);
+ RTC_DCHECK(!recorder_);
+ RTC_DCHECK(!simple_buffer_queue_);
+}
+
+int OpenSLESRecorder::Init() {
+ ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (audio_parameters_.channels() == 2) {
+ ALOGD("Stereo mode is enabled");
+ }
+ return 0;
+}
+
+int OpenSLESRecorder::Terminate() {
+ ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ StopRecording();
+ return 0;
+}
+
+int OpenSLESRecorder::InitRecording() {
+ ALOGD("InitRecording[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!recording_);
+ if (!ObtainEngineInterface()) {
+ ALOGE("Failed to obtain SL Engine interface");
+ return -1;
+ }
+ CreateAudioRecorder();
+ initialized_ = true;
+ buffer_index_ = 0;
+ return 0;
+}
+
+int OpenSLESRecorder::StartRecording() {
+ ALOGD("StartRecording[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!recording_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetRecord();
+ }
+ // Add buffers to the queue before changing state to SL_RECORDSTATE_RECORDING
+ // to ensure that recording starts as soon as the state is modified. On some
+ // devices, SLAndroidSimpleBufferQueue::Clear() used in Stop() does not flush
+ // the buffers as intended and we therefore check the number of buffers
+ // already queued first. Enqueue() can return SL_RESULT_BUFFER_INSUFFICIENT
+ // otherwise.
+ int num_buffers_in_queue = GetBufferCount();
+ for (int i = 0; i < kNumOfOpenSLESBuffers - num_buffers_in_queue; ++i) {
+ if (!EnqueueAudioBuffer()) {
+ recording_ = false;
+ return -1;
+ }
+ }
+ num_buffers_in_queue = GetBufferCount();
+ RTC_DCHECK_EQ(num_buffers_in_queue, kNumOfOpenSLESBuffers);
+ LogBufferState();
+ // Start audio recording by changing the state to SL_RECORDSTATE_RECORDING.
+ // Given that buffers are already enqueued, recording should start at once.
+ // The macro returns -1 if recording fails to start.
+ last_rec_time_ = rtc::Time();
+ if (LOG_ON_ERROR(
+ (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING))) {
+ return -1;
+ }
+ recording_ = (GetRecordState() == SL_RECORDSTATE_RECORDING);
+ RTC_DCHECK(recording_);
+ return 0;
+}
+
+int OpenSLESRecorder::StopRecording() {
+ ALOGD("StopRecording[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!initialized_ || !recording_) {
+ return 0;
+ }
+ // Stop recording by setting the record state to SL_RECORDSTATE_STOPPED.
+ if (LOG_ON_ERROR(
+ (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_STOPPED))) {
+ return -1;
+ }
+ // Clear the buffer queue to get rid of old data when resuming recording.
+ if (LOG_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_))) {
+ return -1;
+ }
+ thread_checker_opensles_.Detach();
+ initialized_ = false;
+ recording_ = false;
+ return 0;
+}
+
+void OpenSLESRecorder::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+ ALOGD("AttachAudioBuffer");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_CHECK(audio_buffer);
+ audio_device_buffer_ = audio_buffer;
+ // Ensure that the audio device buffer is informed about the native sample
+ // rate used on the recording side.
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
+ audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+ // Ensure that the audio device buffer is informed about the number of
+ // channels preferred by the OS on the recording side.
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetRecordingChannels(%zu)", channels);
+ audio_device_buffer_->SetRecordingChannels(channels);
+ // Allocated memory for internal data buffers given existing audio parameters.
+ AllocateDataBuffers();
+}
+
+int OpenSLESRecorder::EnableBuiltInAEC(bool enable) {
+ ALOGD("EnableBuiltInAEC(%d)", enable);
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ ALOGE("Not implemented");
+ return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInAGC(bool enable) {
+ ALOGD("EnableBuiltInAGC(%d)", enable);
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ ALOGE("Not implemented");
+ return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInNS(bool enable) {
+ ALOGD("EnableBuiltInNS(%d)", enable);
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ ALOGE("Not implemented");
+ return 0;
+}
+
+bool OpenSLESRecorder::ObtainEngineInterface() {
+ ALOGD("ObtainEngineInterface");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (engine_)
+ return true;
+ // Get access to (or create if not already existing) the global OpenSL Engine
+ // object.
+ SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+ if (engine_object == nullptr) {
+ ALOGE("Failed to access the global OpenSL engine");
+ return false;
+ }
+ // Get the SL Engine Interface which is implicit.
+ if (LOG_ON_ERROR(
+ (*engine_object)
+ ->GetInterface(engine_object, SL_IID_ENGINE, &engine_))) {
+ return false;
+ }
+ return true;
+}
+
+bool OpenSLESRecorder::CreateAudioRecorder() {
+ ALOGD("CreateAudioRecorder");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (recorder_object_.Get())
+ return true;
+ RTC_DCHECK(!recorder_);
+ RTC_DCHECK(!simple_buffer_queue_);
+
+ // Audio source configuration.
+ SLDataLocator_IODevice mic_locator = {SL_DATALOCATOR_IODEVICE,
+ SL_IODEVICE_AUDIOINPUT,
+ SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
+ SLDataSource audio_source = {&mic_locator, NULL};
+
+ // Audio sink configuration.
+ SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
+ SLDataSink audio_sink = {&buffer_queue, &pcm_format_};
+
+ // Create the audio recorder object (requires the RECORD_AUDIO permission).
+ // Do not realize the recorder yet. Set the configuration first.
+ const SLInterfaceID interface_id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ SL_IID_ANDROIDCONFIGURATION};
+ const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+ if (LOG_ON_ERROR((*engine_)->CreateAudioRecorder(
+ engine_, recorder_object_.Receive(), &audio_source, &audio_sink,
+ arraysize(interface_id), interface_id, interface_required))) {
+ return false;
+ }
+
+ // Configure the audio recorder (before it is realized).
+ SLAndroidConfigurationItf recorder_config;
+ if (LOG_ON_ERROR((recorder_object_->GetInterface(recorder_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION,
+ &recorder_config)))) {
+ return false;
+ }
+
+ // Uses the default microphone tuned for audio communication.
+ // Note that, SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION leads to a fast
+ // track but also excludes usage of required effects like AEC, AGC and NS.
+ // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION
+ SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+ if (LOG_ON_ERROR(((*recorder_config)
+ ->SetConfiguration(recorder_config,
+ SL_ANDROID_KEY_RECORDING_PRESET,
+ &stream_type, sizeof(SLint32))))) {
+ return false;
+ }
+
+ // The audio recorder can now be realized (in synchronous mode).
+ if (LOG_ON_ERROR((recorder_object_->Realize(recorder_object_.Get(),
+ SL_BOOLEAN_FALSE)))) {
+ return false;
+ }
+
+ // Get the implicit recorder interface (SL_IID_RECORD).
+ if (LOG_ON_ERROR((recorder_object_->GetInterface(
+ recorder_object_.Get(), SL_IID_RECORD, &recorder_)))) {
+ return false;
+ }
+
+ // Get the simple buffer queue interface (SL_IID_ANDROIDSIMPLEBUFFERQUEUE).
+ // It was explicitly requested.
+ if (LOG_ON_ERROR((recorder_object_->GetInterface(
+ recorder_object_.Get(), SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ &simple_buffer_queue_)))) {
+ return false;
+ }
+
+ // Register the input callback for the simple buffer queue.
+ // This callback will be called when receiving new data from the device.
+ if (LOG_ON_ERROR(((*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback, this)))) {
+ return false;
+ }
+ return true;
+}
+
+void OpenSLESRecorder::DestroyAudioRecorder() {
+ ALOGD("DestroyAudioRecorder");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!recorder_object_.Get())
+ return;
+ (*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+ recorder_object_.Reset();
+ recorder_ = nullptr;
+ simple_buffer_queue_ = nullptr;
+}
+
+void OpenSLESRecorder::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf buffer_queue,
+ void* context) {
+ OpenSLESRecorder* stream = static_cast<OpenSLESRecorder*>(context);
+ stream->ReadBufferQueue();
+}
+
+void OpenSLESRecorder::AllocateDataBuffers() {
+ ALOGD("AllocateDataBuffers");
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to deliver any number
+ // of samples (and not only multiple of 10ms) to match the native audio unit
+ // buffer size.
+ ALOGD("frames per native buffer: %zu", audio_parameters_.frames_per_buffer());
+ ALOGD("frames per 10ms buffer: %zu",
+ audio_parameters_.frames_per_10ms_buffer());
+ ALOGD("bytes per native buffer: %zu", audio_parameters_.GetBytesPerBuffer());
+ ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
+ RTC_DCHECK(audio_device_buffer_);
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+ // Allocate queue of audio buffers that stores recorded audio samples.
+ const int buffer_size_samples =
+ audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
+ audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ audio_buffers_[i].reset(new SLint16[buffer_size_samples]);
+ }
+}
+
+void OpenSLESRecorder::ReadBufferQueue() {
+ RTC_DCHECK(thread_checker_opensles_.IsCurrent());
+ SLuint32 state = GetRecordState();
+ if (state != SL_RECORDSTATE_RECORDING) {
+ ALOGW("Buffer callback in non-recording state!");
+ return;
+ }
+ // Check delta time between two successive callbacks and provide a warning
+ // if it becomes very large.
+ // TODO(henrika): using 150ms as upper limit but this value is rather random.
+ const uint32_t current_time = rtc::Time();
+ const uint32_t diff = current_time - last_rec_time_;
+ if (diff > 150) {
+ ALOGW("Bad OpenSL ES record timing, dT=%u [ms]", diff);
+ }
+ last_rec_time_ = current_time;
+ // Send recorded audio data to the WebRTC sink.
+ // TODO(henrika): fix delay estimates. It is OK to use fixed values for now
+ // since there is no support to turn off built-in EC in combination with
+ // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
+ // these estimates) will never be active.
+ fine_audio_buffer_->DeliverRecordedData(
+ rtc::ArrayView<const int16_t>(
+ audio_buffers_[buffer_index_].get(),
+ audio_parameters_.frames_per_buffer() * audio_parameters_.channels()),
+ 25);
+ // Enqueue the utilized audio buffer and use if for recording again.
+ EnqueueAudioBuffer();
+}
+
+bool OpenSLESRecorder::EnqueueAudioBuffer() {
+ SLresult err =
+ (*simple_buffer_queue_)
+ ->Enqueue(
+ simple_buffer_queue_,
+ reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get()),
+ audio_parameters_.GetBytesPerBuffer());
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("Enqueue failed: %s", GetSLErrorString(err));
+ return false;
+ }
+ buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+ return true;
+}
+
+SLuint32 OpenSLESRecorder::GetRecordState() const {
+ RTC_DCHECK(recorder_);
+ SLuint32 state;
+ SLresult err = (*recorder_)->GetRecordState(recorder_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetRecordState failed: %s", GetSLErrorString(err));
+ }
+ return state;
+}
+
+SLAndroidSimpleBufferQueueState OpenSLESRecorder::GetBufferQueueState() const {
+ RTC_DCHECK(simple_buffer_queue_);
+ // state.count: Number of buffers currently in the queue.
+ // state.index: Index of the currently filling buffer. This is a linear index
+ // that keeps a cumulative count of the number of buffers recorded.
+ SLAndroidSimpleBufferQueueState state;
+ SLresult err =
+ (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetState failed: %s", GetSLErrorString(err));
+ }
+ return state;
+}
+
+void OpenSLESRecorder::LogBufferState() const {
+ SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+ ALOGD("state.count:%d state.index:%d", state.count, state.index);
+}
+
+SLuint32 OpenSLESRecorder::GetBufferCount() {
+ SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+ return state.count;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/android/opensles_recorder.h b/third_party/libwebrtc/modules/audio_device/android/opensles_recorder.h
new file mode 100644
index 0000000000..e659c3c157
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/android/opensles_recorder.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/opensles_common.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements 16-bit mono PCM audio input support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Recorded audio
+// buffers are provided on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is inline
+// with how the Java-based implementation works.
+//
+// As of API level 21, lower latency audio input is supported on select devices.
+// To take advantage of this feature, first confirm that lower latency output is
+// available. The capability for lower latency output is a prerequisite for the
+// lower latency input feature. Then, create an AudioRecorder with the same
+// sample rate and buffer size as would be used for output. OpenSL ES interfaces
+// for input effects preclude the lower latency path.
+// See https://developer.android.com/ndk/guides/audio/opensl-prog-notes.html
+// for more details.
+class OpenSLESRecorder {
+ public:
+ // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+ // required for lower latency. Beginning with API level 18 (Android 4.3), a
+ // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+ // size and sample rate must be compatible with the device's native input
+ // configuration provided via the audio manager at construction.
+ // TODO(henrika): perhaps set this value dynamically based on OS version.
+ static const int kNumOfOpenSLESBuffers = 2;
+
+ explicit OpenSLESRecorder(AudioManager* audio_manager);
+ ~OpenSLESRecorder();
+
+ int Init();
+ int Terminate();
+
+ int InitRecording();
+ bool RecordingIsInitialized() const { return initialized_; }
+
+ int StartRecording();
+ int StopRecording();
+ bool Recording() const { return recording_; }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer);
+
+ // TODO(henrika): add support using OpenSL ES APIs when available.
+ int EnableBuiltInAEC(bool enable);
+ int EnableBuiltInAGC(bool enable);
+ int EnableBuiltInNS(bool enable);
+
+ private:
+ // Obtaines the SL Engine Interface from the existing global Engine object.
+ // The interface exposes creation methods of all the OpenSL ES object types.
+ // This method defines the `engine_` member variable.
+ bool ObtainEngineInterface();
+
+ // Creates/destroys the audio recorder and the simple-buffer queue object.
+ bool CreateAudioRecorder();
+ void DestroyAudioRecorder();
+
+ // Allocate memory for audio buffers which will be used to capture audio
+ // via the SLAndroidSimpleBufferQueueItf interface.
+ void AllocateDataBuffers();
+
+ // These callback methods are called when data has been written to the input
+ // buffer queue. They are both called from an internal "OpenSL ES thread"
+ // which is not attached to the Dalvik VM.
+ static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+ void* context);
+ void ReadBufferQueue();
+
+ // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
+ // called both on the main thread (but before recording has started) and from
+ // the internal audio thread while input streaming is active. It uses
+ // `simple_buffer_queue_` but no lock is needed since the initial calls from
+ // the main thread and the native callback thread are mutually exclusive.
+ bool EnqueueAudioBuffer();
+
+ // Returns the current recorder state.
+ SLuint32 GetRecordState() const;
+
+ // Returns the current buffer queue state.
+ SLAndroidSimpleBufferQueueState GetBufferQueueState() const;
+
+ // Number of buffers currently in the queue.
+ SLuint32 GetBufferCount();
+
+ // Prints a log message of the current queue state. Can be used for debugging
+ // purposes.
+ void LogBufferState() const;
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ SequenceChecker thread_checker_;
+
+ // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+ // non-application thread which is not attached to the Dalvik JVM.
+ // Detached during construction of this object.
+ SequenceChecker thread_checker_opensles_;
+
+ // Raw pointer to the audio manager injected at construction. Used to cache
+ // audio parameters and to access the global SL engine object needed by the
+ // ObtainEngineInterface() method. The audio manager outlives any instance of
+ // this class.
+ AudioManager* const audio_manager_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ // PCM-type format definition.
+ // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+ // 32-bit float representation is needed.
+ SLDataFormat_PCM pcm_format_;
+
+ bool initialized_;
+ bool recording_;
+
+ // This interface exposes creation methods for all the OpenSL ES object types.
+ // It is the OpenSL ES API entry point.
+ SLEngineItf engine_;
+
+ // The audio recorder media object records audio to the destination specified
+ // by the data sink capturing it from the input specified by the data source.
+ webrtc::ScopedSLObjectItf recorder_object_;
+
+ // This interface is supported on the audio recorder object and it controls
+ // the state of the audio recorder.
+ SLRecordItf recorder_;
+
+ // The Android Simple Buffer Queue interface is supported on the audio
+ // recorder. For recording, an app should enqueue empty buffers. When a
+ // registered callback sends notification that the system has finished writing
+ // data to the buffer, the app can read the buffer.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+ // chunks of audio.
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+ // Queue of audio buffers to be used by the recorder object for capturing
+ // audio. They will be used in a Round-robin way and the size of each buffer
+ // is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to
+ // the native OpenSL ES buffer size.
+ std::unique_ptr<std::unique_ptr<SLint16[]>[]> audio_buffers_;
+
+ // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+ // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+ int buffer_index_;
+
+ // Last time the OpenSL ES layer delivered recorded audio data.
+ uint32_t last_rec_time_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_api_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_api_gn/moz.build
new file mode 100644
index 0000000000..5daa3d2133
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_api_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_device_api_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc
new file mode 100644
index 0000000000..8cc11debf4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc
@@ -0,0 +1,518 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/audio_device_buffer.h"
+
+#include <string.h>
+
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+static const char kTimerQueueName[] = "AudioDeviceBufferTimer";
+
+// Time between two sucessive calls to LogStats().
+static const size_t kTimerIntervalInSeconds = 10;
+static const size_t kTimerIntervalInMilliseconds =
+ kTimerIntervalInSeconds * rtc::kNumMillisecsPerSec;
+// Min time required to qualify an audio session as a "call". If playout or
+// recording has been active for less than this time we will not store any
+// logs or UMA stats but instead consider the call as too short.
+static const size_t kMinValidCallTimeTimeInSeconds = 10;
+static const size_t kMinValidCallTimeTimeInMilliseconds =
+ kMinValidCallTimeTimeInSeconds * rtc::kNumMillisecsPerSec;
+#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
+static const double k2Pi = 6.28318530717959;
+#endif
+
+AudioDeviceBuffer::AudioDeviceBuffer(TaskQueueFactory* task_queue_factory)
+ : task_queue_(task_queue_factory->CreateTaskQueue(
+ kTimerQueueName,
+ TaskQueueFactory::Priority::NORMAL)),
+ audio_transport_cb_(nullptr),
+ rec_sample_rate_(0),
+ play_sample_rate_(0),
+ rec_channels_(0),
+ play_channels_(0),
+ playing_(false),
+ recording_(false),
+ typing_status_(false),
+ play_delay_ms_(0),
+ rec_delay_ms_(0),
+ capture_timestamp_ns_(0),
+ num_stat_reports_(0),
+ last_timer_task_time_(0),
+ rec_stat_count_(0),
+ play_stat_count_(0),
+ play_start_time_(0),
+ only_silence_recorded_(true),
+ log_stats_(false) {
+ RTC_LOG(LS_INFO) << "AudioDeviceBuffer::ctor";
+#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
+ phase_ = 0.0;
+ RTC_LOG(LS_WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!";
+#endif
+}
+
+AudioDeviceBuffer::~AudioDeviceBuffer() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DCHECK(!playing_);
+ RTC_DCHECK(!recording_);
+ RTC_LOG(LS_INFO) << "AudioDeviceBuffer::~dtor";
+}
+
+int32_t AudioDeviceBuffer::RegisterAudioCallback(
+ AudioTransport* audio_callback) {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (playing_ || recording_) {
+ RTC_LOG(LS_ERROR) << "Failed to set audio transport since media was active";
+ return -1;
+ }
+ audio_transport_cb_ = audio_callback;
+ return 0;
+}
+
+void AudioDeviceBuffer::StartPlayout() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ // TODO(henrika): allow for usage of DCHECK(!playing_) here instead. Today the
+ // ADM allows calling Start(), Start() by ignoring the second call but it
+ // makes more sense to only allow one call.
+ if (playing_) {
+ return;
+ }
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ // Clear members tracking playout stats and do it on the task queue.
+ task_queue_.PostTask([this] { ResetPlayStats(); });
+ // Start a periodic timer based on task queue if not already done by the
+ // recording side.
+ if (!recording_) {
+ StartPeriodicLogging();
+ }
+ const int64_t now_time = rtc::TimeMillis();
+ // Clear members that are only touched on the main (creating) thread.
+ play_start_time_ = now_time;
+ playing_ = true;
+}
+
+void AudioDeviceBuffer::StartRecording() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ if (recording_) {
+ return;
+ }
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ // Clear members tracking recording stats and do it on the task queue.
+ task_queue_.PostTask([this] { ResetRecStats(); });
+ // Start a periodic timer based on task queue if not already done by the
+ // playout side.
+ if (!playing_) {
+ StartPeriodicLogging();
+ }
+ // Clear members that will be touched on the main (creating) thread.
+ rec_start_time_ = rtc::TimeMillis();
+ recording_ = true;
+ // And finally a member which can be modified on the native audio thread.
+ // It is safe to do so since we know by design that the owning ADM has not
+ // yet started the native audio recording.
+ only_silence_recorded_ = true;
+}
+
+void AudioDeviceBuffer::StopPlayout() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ if (!playing_) {
+ return;
+ }
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ playing_ = false;
+ // Stop periodic logging if no more media is active.
+ if (!recording_) {
+ StopPeriodicLogging();
+ }
+ RTC_LOG(LS_INFO) << "total playout time: "
+ << rtc::TimeSince(play_start_time_);
+}
+
+void AudioDeviceBuffer::StopRecording() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ if (!recording_) {
+ return;
+ }
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ recording_ = false;
+ // Stop periodic logging if no more media is active.
+ if (!playing_) {
+ StopPeriodicLogging();
+ }
+ // Add UMA histogram to keep track of the case when only zeros have been
+ // recorded. Measurements (max of absolute level) are taken twice per second,
+ // which means that if e.g 10 seconds of audio has been recorded, a total of
+ // 20 level estimates must all be identical to zero to trigger the histogram.
+ // `only_silence_recorded_` can only be cleared on the native audio thread
+ // that drives audio capture but we know by design that the audio has stopped
+ // when this method is called, hence there should not be aby conflicts. Also,
+ // the fact that `only_silence_recorded_` can be affected during the complete
+ // call makes chances of conflicts with potentially one last callback very
+ // small.
+ const size_t time_since_start = rtc::TimeSince(rec_start_time_);
+ if (time_since_start > kMinValidCallTimeTimeInMilliseconds) {
+ const int only_zeros = static_cast<int>(only_silence_recorded_);
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.RecordedOnlyZeros", only_zeros);
+ RTC_LOG(LS_INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): "
+ << only_zeros;
+ }
+ RTC_LOG(LS_INFO) << "total recording time: " << time_since_start;
+}
+
+int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) {
+ RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << fsHz << ")";
+ rec_sample_rate_ = fsHz;
+ return 0;
+}
+
+int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) {
+ RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << fsHz << ")";
+ play_sample_rate_ = fsHz;
+ return 0;
+}
+
+uint32_t AudioDeviceBuffer::RecordingSampleRate() const {
+ return rec_sample_rate_;
+}
+
+uint32_t AudioDeviceBuffer::PlayoutSampleRate() const {
+ return play_sample_rate_;
+}
+
+int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) {
+ RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
+ rec_channels_ = channels;
+ return 0;
+}
+
+int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) {
+ RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")";
+ play_channels_ = channels;
+ return 0;
+}
+
+size_t AudioDeviceBuffer::RecordingChannels() const {
+ return rec_channels_;
+}
+
+size_t AudioDeviceBuffer::PlayoutChannels() const {
+ return play_channels_;
+}
+
+int32_t AudioDeviceBuffer::SetTypingStatus(bool typing_status) {
+ typing_status_ = typing_status;
+ return 0;
+}
+
+void AudioDeviceBuffer::SetVQEData(int play_delay_ms, int rec_delay_ms) {
+ play_delay_ms_ = play_delay_ms;
+ rec_delay_ms_ = rec_delay_ms;
+}
+
+int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer,
+ size_t samples_per_channel) {
+ return SetRecordedBuffer(audio_buffer, samples_per_channel, 0);
+}
+
+int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer,
+ size_t samples_per_channel,
+ int64_t capture_timestamp_ns) {
+ // Copy the complete input buffer to the local buffer.
+ const size_t old_size = rec_buffer_.size();
+ rec_buffer_.SetData(static_cast<const int16_t*>(audio_buffer),
+ rec_channels_ * samples_per_channel);
+ // Keep track of the size of the recording buffer. Only updated when the
+ // size changes, which is a rare event.
+ if (old_size != rec_buffer_.size()) {
+ RTC_LOG(LS_INFO) << "Size of recording buffer: " << rec_buffer_.size();
+ }
+
+ // If the timestamp is less then or equal to zero, it's not valid and are
+ // ignored. If we do antimestamp alignment on them they might accidentally
+ // become greater then zero, and will be handled as if they were a correct
+ // timestamp.
+ capture_timestamp_ns_ =
+ (capture_timestamp_ns > 0)
+ ? rtc::kNumNanosecsPerMicrosec *
+ timestamp_aligner_.TranslateTimestamp(
+ capture_timestamp_ns_ / rtc::kNumNanosecsPerMicrosec,
+ rtc::TimeMicros())
+ : capture_timestamp_ns;
+ // Derive a new level value twice per second and check if it is non-zero.
+ int16_t max_abs = 0;
+ RTC_DCHECK_LT(rec_stat_count_, 50);
+ if (++rec_stat_count_ >= 50) {
+ // Returns the largest absolute value in a signed 16-bit vector.
+ max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size());
+ rec_stat_count_ = 0;
+ // Set `only_silence_recorded_` to false as soon as at least one detection
+ // of a non-zero audio packet is found. It can only be restored to true
+ // again by restarting the call.
+ if (max_abs > 0) {
+ only_silence_recorded_ = false;
+ }
+ }
+ // Update recording stats which is used as base for periodic logging of the
+ // audio input state.
+ UpdateRecStats(max_abs, samples_per_channel);
+ return 0;
+}
+
+int32_t AudioDeviceBuffer::DeliverRecordedData() {
+ if (!audio_transport_cb_) {
+ RTC_LOG(LS_WARNING) << "Invalid audio transport";
+ return 0;
+ }
+ const size_t frames = rec_buffer_.size() / rec_channels_;
+ const size_t bytes_per_frame = rec_channels_ * sizeof(int16_t);
+ uint32_t new_mic_level_dummy = 0;
+ uint32_t total_delay_ms = play_delay_ms_ + rec_delay_ms_;
+ int32_t res = audio_transport_cb_->RecordedDataIsAvailable(
+ rec_buffer_.data(), frames, bytes_per_frame, rec_channels_,
+ rec_sample_rate_, total_delay_ms, 0, 0, typing_status_,
+ new_mic_level_dummy, capture_timestamp_ns_);
+ if (res == -1) {
+ RTC_LOG(LS_ERROR) << "RecordedDataIsAvailable() failed";
+ }
+ return 0;
+}
+
+int32_t AudioDeviceBuffer::RequestPlayoutData(size_t samples_per_channel) {
+ // The consumer can change the requested size on the fly and we therefore
+ // resize the buffer accordingly. Also takes place at the first call to this
+ // method.
+ const size_t total_samples = play_channels_ * samples_per_channel;
+ if (play_buffer_.size() != total_samples) {
+ play_buffer_.SetSize(total_samples);
+ RTC_LOG(LS_INFO) << "Size of playout buffer: " << play_buffer_.size();
+ }
+
+ size_t num_samples_out(0);
+ // It is currently supported to start playout without a valid audio
+ // transport object. Leads to warning and silence.
+ if (!audio_transport_cb_) {
+ RTC_LOG(LS_WARNING) << "Invalid audio transport";
+ return 0;
+ }
+
+ // Retrieve new 16-bit PCM audio data using the audio transport instance.
+ int64_t elapsed_time_ms = -1;
+ int64_t ntp_time_ms = -1;
+ const size_t bytes_per_frame = play_channels_ * sizeof(int16_t);
+ uint32_t res = audio_transport_cb_->NeedMorePlayData(
+ samples_per_channel, bytes_per_frame, play_channels_, play_sample_rate_,
+ play_buffer_.data(), num_samples_out, &elapsed_time_ms, &ntp_time_ms);
+ if (res != 0) {
+ RTC_LOG(LS_ERROR) << "NeedMorePlayData() failed";
+ }
+
+ // Derive a new level value twice per second.
+ int16_t max_abs = 0;
+ RTC_DCHECK_LT(play_stat_count_, 50);
+ if (++play_stat_count_ >= 50) {
+ // Returns the largest absolute value in a signed 16-bit vector.
+ max_abs =
+ WebRtcSpl_MaxAbsValueW16(play_buffer_.data(), play_buffer_.size());
+ play_stat_count_ = 0;
+ }
+ // Update playout stats which is used as base for periodic logging of the
+ // audio output state.
+ UpdatePlayStats(max_abs, num_samples_out / play_channels_);
+ return static_cast<int32_t>(num_samples_out / play_channels_);
+}
+
+int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) {
+ RTC_DCHECK_GT(play_buffer_.size(), 0);
+#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
+ const double phase_increment =
+ k2Pi * 440.0 / static_cast<double>(play_sample_rate_);
+ int16_t* destination_r = reinterpret_cast<int16_t*>(audio_buffer);
+ if (play_channels_ == 1) {
+ for (size_t i = 0; i < play_buffer_.size(); ++i) {
+ destination_r[i] = static_cast<int16_t>((sin(phase_) * (1 << 14)));
+ phase_ += phase_increment;
+ }
+ } else if (play_channels_ == 2) {
+ for (size_t i = 0; i < play_buffer_.size() / 2; ++i) {
+ destination_r[2 * i] = destination_r[2 * i + 1] =
+ static_cast<int16_t>((sin(phase_) * (1 << 14)));
+ phase_ += phase_increment;
+ }
+ }
+#else
+ memcpy(audio_buffer, play_buffer_.data(),
+ play_buffer_.size() * sizeof(int16_t));
+#endif
+ // Return samples per channel or number of frames.
+ return static_cast<int32_t>(play_buffer_.size() / play_channels_);
+}
+
+void AudioDeviceBuffer::StartPeriodicLogging() {
+ task_queue_.PostTask([this] { LogStats(AudioDeviceBuffer::LOG_START); });
+}
+
+void AudioDeviceBuffer::StopPeriodicLogging() {
+ task_queue_.PostTask([this] { LogStats(AudioDeviceBuffer::LOG_STOP); });
+}
+
+void AudioDeviceBuffer::LogStats(LogState state) {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ int64_t now_time = rtc::TimeMillis();
+
+ if (state == AudioDeviceBuffer::LOG_START) {
+ // Reset counters at start. We will not add any logging in this state but
+ // the timer will started by posting a new (delayed) task.
+ num_stat_reports_ = 0;
+ last_timer_task_time_ = now_time;
+ log_stats_ = true;
+ } else if (state == AudioDeviceBuffer::LOG_STOP) {
+ // Stop logging and posting new tasks.
+ log_stats_ = false;
+ } else if (state == AudioDeviceBuffer::LOG_ACTIVE) {
+ // Keep logging unless logging was disabled while task was posted.
+ }
+
+ // Avoid adding more logs since we are in STOP mode.
+ if (!log_stats_) {
+ return;
+ }
+
+ int64_t next_callback_time = now_time + kTimerIntervalInMilliseconds;
+ int64_t time_since_last = rtc::TimeDiff(now_time, last_timer_task_time_);
+ last_timer_task_time_ = now_time;
+
+ Stats stats;
+ {
+ MutexLock lock(&lock_);
+ stats = stats_;
+ stats_.max_rec_level = 0;
+ stats_.max_play_level = 0;
+ }
+
+ // Cache current sample rate from atomic members.
+ const uint32_t rec_sample_rate = rec_sample_rate_;
+ const uint32_t play_sample_rate = play_sample_rate_;
+
+ // Log the latest statistics but skip the first two rounds just after state
+ // was set to LOG_START to ensure that we have at least one full stable
+ // 10-second interval for sample-rate estimation. Hence, first printed log
+ // will be after ~20 seconds.
+ if (++num_stat_reports_ > 2 &&
+ static_cast<size_t>(time_since_last) > kTimerIntervalInMilliseconds / 2) {
+ uint32_t diff_samples = stats.rec_samples - last_stats_.rec_samples;
+ float rate = diff_samples / (static_cast<float>(time_since_last) / 1000.0);
+ uint32_t abs_diff_rate_in_percent = 0;
+ if (rec_sample_rate > 0 && rate > 0) {
+ abs_diff_rate_in_percent = static_cast<uint32_t>(
+ 0.5f +
+ ((100.0f * std::abs(rate - rec_sample_rate)) / rec_sample_rate));
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.RecordSampleRateOffsetInPercent",
+ abs_diff_rate_in_percent);
+ RTC_LOG(LS_INFO) << "[REC : " << time_since_last << "msec, "
+ << rec_sample_rate / 1000 << "kHz] callbacks: "
+ << stats.rec_callbacks - last_stats_.rec_callbacks
+ << ", "
+ "samples: "
+ << diff_samples
+ << ", "
+ "rate: "
+ << static_cast<int>(rate + 0.5)
+ << ", "
+ "rate diff: "
+ << abs_diff_rate_in_percent
+ << "%, "
+ "level: "
+ << stats.max_rec_level;
+ }
+
+ diff_samples = stats.play_samples - last_stats_.play_samples;
+ rate = diff_samples / (static_cast<float>(time_since_last) / 1000.0);
+ abs_diff_rate_in_percent = 0;
+ if (play_sample_rate > 0 && rate > 0) {
+ abs_diff_rate_in_percent = static_cast<uint32_t>(
+ 0.5f +
+ ((100.0f * std::abs(rate - play_sample_rate)) / play_sample_rate));
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.PlayoutSampleRateOffsetInPercent",
+ abs_diff_rate_in_percent);
+ RTC_LOG(LS_INFO) << "[PLAY: " << time_since_last << "msec, "
+ << play_sample_rate / 1000 << "kHz] callbacks: "
+ << stats.play_callbacks - last_stats_.play_callbacks
+ << ", "
+ "samples: "
+ << diff_samples
+ << ", "
+ "rate: "
+ << static_cast<int>(rate + 0.5)
+ << ", "
+ "rate diff: "
+ << abs_diff_rate_in_percent
+ << "%, "
+ "level: "
+ << stats.max_play_level;
+ }
+ }
+ last_stats_ = stats;
+
+ int64_t time_to_wait_ms = next_callback_time - rtc::TimeMillis();
+ RTC_DCHECK_GT(time_to_wait_ms, 0) << "Invalid timer interval";
+
+ // Keep posting new (delayed) tasks until state is changed to kLogStop.
+ task_queue_.PostDelayedTask(
+ [this] { AudioDeviceBuffer::LogStats(AudioDeviceBuffer::LOG_ACTIVE); },
+ TimeDelta::Millis(time_to_wait_ms));
+}
+
+void AudioDeviceBuffer::ResetRecStats() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ last_stats_.ResetRecStats();
+ MutexLock lock(&lock_);
+ stats_.ResetRecStats();
+}
+
+void AudioDeviceBuffer::ResetPlayStats() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ last_stats_.ResetPlayStats();
+ MutexLock lock(&lock_);
+ stats_.ResetPlayStats();
+}
+
+void AudioDeviceBuffer::UpdateRecStats(int16_t max_abs,
+ size_t samples_per_channel) {
+ MutexLock lock(&lock_);
+ ++stats_.rec_callbacks;
+ stats_.rec_samples += samples_per_channel;
+ if (max_abs > stats_.max_rec_level) {
+ stats_.max_rec_level = max_abs;
+ }
+}
+
+void AudioDeviceBuffer::UpdatePlayStats(int16_t max_abs,
+ size_t samples_per_channel) {
+ MutexLock lock(&lock_);
+ ++stats_.play_callbacks;
+ stats_.play_samples += samples_per_channel;
+ if (max_abs > stats_.max_play_level) {
+ stats_.max_play_level = max_abs;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_buffer.h b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.h
new file mode 100644
index 0000000000..9a6a88a1be
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_
+#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/timestamp_aligner.h"
+
+namespace webrtc {
+
+// Delta times between two successive playout callbacks are limited to this
+// value before added to an internal array.
+const size_t kMaxDeltaTimeInMs = 500;
+// TODO(henrika): remove when no longer used by external client.
+const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
+
+class AudioDeviceBuffer {
+ public:
+ enum LogState {
+ LOG_START = 0,
+ LOG_STOP,
+ LOG_ACTIVE,
+ };
+
+ struct Stats {
+ void ResetRecStats() {
+ rec_callbacks = 0;
+ rec_samples = 0;
+ max_rec_level = 0;
+ }
+
+ void ResetPlayStats() {
+ play_callbacks = 0;
+ play_samples = 0;
+ max_play_level = 0;
+ }
+
+ // Total number of recording callbacks where the source provides 10ms audio
+ // data each time.
+ uint64_t rec_callbacks = 0;
+
+ // Total number of playback callbacks where the sink asks for 10ms audio
+ // data each time.
+ uint64_t play_callbacks = 0;
+
+ // Total number of recorded audio samples.
+ uint64_t rec_samples = 0;
+
+ // Total number of played audio samples.
+ uint64_t play_samples = 0;
+
+ // Contains max level (max(abs(x))) of recorded audio packets over the last
+ // 10 seconds where a new measurement is done twice per second. The level
+ // is reset to zero at each call to LogStats().
+ int16_t max_rec_level = 0;
+
+ // Contains max level of recorded audio packets over the last 10 seconds
+ // where a new measurement is done twice per second.
+ int16_t max_play_level = 0;
+ };
+
+ explicit AudioDeviceBuffer(TaskQueueFactory* task_queue_factory);
+ virtual ~AudioDeviceBuffer();
+
+ int32_t RegisterAudioCallback(AudioTransport* audio_callback);
+
+ void StartPlayout();
+ void StartRecording();
+ void StopPlayout();
+ void StopRecording();
+
+ int32_t SetRecordingSampleRate(uint32_t fsHz);
+ int32_t SetPlayoutSampleRate(uint32_t fsHz);
+ uint32_t RecordingSampleRate() const;
+ uint32_t PlayoutSampleRate() const;
+
+ int32_t SetRecordingChannels(size_t channels);
+ int32_t SetPlayoutChannels(size_t channels);
+ size_t RecordingChannels() const;
+ size_t PlayoutChannels() const;
+
+ // TODO(bugs.webrtc.org/13621) Deprecate this function
+ virtual int32_t SetRecordedBuffer(const void* audio_buffer,
+ size_t samples_per_channel);
+
+ virtual int32_t SetRecordedBuffer(const void* audio_buffer,
+ size_t samples_per_channel,
+ int64_t capture_timestamp_ns);
+ virtual void SetVQEData(int play_delay_ms, int rec_delay_ms);
+ virtual int32_t DeliverRecordedData();
+ uint32_t NewMicLevel() const;
+
+ virtual int32_t RequestPlayoutData(size_t samples_per_channel);
+ virtual int32_t GetPlayoutData(void* audio_buffer);
+
+ int32_t SetTypingStatus(bool typing_status);
+
+ private:
+ // Starts/stops periodic logging of audio stats.
+ void StartPeriodicLogging();
+ void StopPeriodicLogging();
+
+ // Called periodically on the internal thread created by the TaskQueue.
+ // Updates some stats but dooes it on the task queue to ensure that access of
+ // members is serialized hence avoiding usage of locks.
+ // state = LOG_START => members are initialized and the timer starts.
+ // state = LOG_STOP => no logs are printed and the timer stops.
+ // state = LOG_ACTIVE => logs are printed and the timer is kept alive.
+ void LogStats(LogState state);
+
+ // Updates counters in each play/record callback. These counters are later
+ // (periodically) read by LogStats() using a lock.
+ void UpdateRecStats(int16_t max_abs, size_t samples_per_channel);
+ void UpdatePlayStats(int16_t max_abs, size_t samples_per_channel);
+
+ // Clears all members tracking stats for recording and playout.
+ // These methods both run on the task queue.
+ void ResetRecStats();
+ void ResetPlayStats();
+
+ // This object lives on the main (creating) thread and most methods are
+ // called on that same thread. When audio has started some methods will be
+ // called on either a native audio thread for playout or a native thread for
+ // recording. Some members are not annotated since they are "protected by
+ // design" and adding e.g. a race checker can cause failures for very few
+ // edge cases and it is IMHO not worth the risk to use them in this class.
+ // TODO(henrika): see if it is possible to refactor and annotate all members.
+
+ // Main thread on which this object is created.
+ SequenceChecker main_thread_checker_;
+
+ Mutex lock_;
+
+ // Task queue used to invoke LogStats() periodically. Tasks are executed on a
+ // worker thread but it does not necessarily have to be the same thread for
+ // each task.
+ rtc::TaskQueue task_queue_;
+
+ // Raw pointer to AudioTransport instance. Supplied to RegisterAudioCallback()
+ // and it must outlive this object. It is not possible to change this member
+ // while any media is active. It is possible to start media without calling
+ // RegisterAudioCallback() but that will lead to ignored audio callbacks in
+ // both directions where native audio will be active but no audio samples will
+ // be transported.
+ AudioTransport* audio_transport_cb_;
+
+ // Sample rate in Hertz. Accessed atomically.
+ std::atomic<uint32_t> rec_sample_rate_;
+ std::atomic<uint32_t> play_sample_rate_;
+
+ // Number of audio channels. Accessed atomically.
+ std::atomic<size_t> rec_channels_;
+ std::atomic<size_t> play_channels_;
+
+ // Keeps track of if playout/recording are active or not. A combination
+ // of these states are used to determine when to start and stop the timer.
+ // Only used on the creating thread and not used to control any media flow.
+ bool playing_ RTC_GUARDED_BY(main_thread_checker_);
+ bool recording_ RTC_GUARDED_BY(main_thread_checker_);
+
+ // Buffer used for audio samples to be played out. Size can be changed
+ // dynamically. The 16-bit samples are interleaved, hence the size is
+ // proportional to the number of channels.
+ rtc::BufferT<int16_t> play_buffer_;
+
+ // Byte buffer used for recorded audio samples. Size can be changed
+ // dynamically.
+ rtc::BufferT<int16_t> rec_buffer_;
+
+ // Contains true of a key-press has been detected.
+ bool typing_status_;
+
+ // Delay values used by the AEC.
+ int play_delay_ms_;
+ int rec_delay_ms_;
+
+ // Capture timestamp.
+ int64_t capture_timestamp_ns_;
+
+ // Counts number of times LogStats() has been called.
+ size_t num_stat_reports_ RTC_GUARDED_BY(task_queue_);
+
+ // Time stamp of last timer task (drives logging).
+ int64_t last_timer_task_time_ RTC_GUARDED_BY(task_queue_);
+
+ // Counts number of audio callbacks modulo 50 to create a signal when
+ // a new storage of audio stats shall be done.
+ int16_t rec_stat_count_;
+ int16_t play_stat_count_;
+
+ // Time stamps of when playout and recording starts.
+ int64_t play_start_time_ RTC_GUARDED_BY(main_thread_checker_);
+ int64_t rec_start_time_ RTC_GUARDED_BY(main_thread_checker_);
+
+ // Contains counters for playout and recording statistics.
+ Stats stats_ RTC_GUARDED_BY(lock_);
+
+ // Stores current stats at each timer task. Used to calculate differences
+ // between two successive timer events.
+ Stats last_stats_ RTC_GUARDED_BY(task_queue_);
+
+ // Set to true at construction and modified to false as soon as one audio-
+ // level estimate larger than zero is detected.
+ bool only_silence_recorded_;
+
+ // Set to true when logging of audio stats is enabled for the first time in
+ // StartPeriodicLogging() and set to false by StopPeriodicLogging().
+ // Setting this member to false prevents (possiby invalid) log messages from
+ // being printed in the LogStats() task.
+ bool log_stats_ RTC_GUARDED_BY(task_queue_);
+
+ // Used for converting capture timestaps (received from AudioRecordThread
+ // via AudioRecordJni::DataIsRecorded) to RTC clock.
+ rtc::TimestampAligner timestamp_aligner_;
+
+// Should *never* be defined in production builds. Only used for testing.
+// When defined, the output signal will be replaced by a sinus tone at 440Hz.
+#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
+ double phase_;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_buffer_gn/moz.build
new file mode 100644
index 0000000000..30a6f5a256
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_buffer_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_device_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_config.h b/third_party/libwebrtc/modules/audio_device/audio_device_config.h
new file mode 100644
index 0000000000..fa51747b67
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_config.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_
+
+// Enumerators
+//
+enum { GET_MIC_VOLUME_INTERVAL_MS = 1000 };
+
+// Platform specifics
+//
+#if defined(_WIN32)
+#if (_MSC_VER >= 1400)
+#if !defined(WEBRTC_DUMMY_FILE_DEVICES)
+// Windows Core Audio is the default audio layer in Windows.
+// Only supported for VS 2005 and higher.
+#define WEBRTC_WINDOWS_CORE_AUDIO_BUILD
+#endif
+#endif
+#endif
+
+#endif // AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc b/third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc
new file mode 100644
index 0000000000..3775e7ce6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/include/audio_device_data_observer.h"
+
+#include "api/make_ref_counted.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// A wrapper over AudioDeviceModule that registers itself as AudioTransport
+// callback and redirects the PCM data to AudioDeviceDataObserver callback.
+class ADMWrapper : public AudioDeviceModule, public AudioTransport {
+ public:
+ ADMWrapper(rtc::scoped_refptr<AudioDeviceModule> impl,
+ AudioDeviceDataObserver* legacy_observer,
+ std::unique_ptr<AudioDeviceDataObserver> observer)
+ : impl_(impl),
+ legacy_observer_(legacy_observer),
+ observer_(std::move(observer)) {
+ is_valid_ = impl_.get() != nullptr;
+ }
+ ADMWrapper(AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory,
+ AudioDeviceDataObserver* legacy_observer,
+ std::unique_ptr<AudioDeviceDataObserver> observer)
+ : ADMWrapper(AudioDeviceModule::Create(audio_layer, task_queue_factory),
+ legacy_observer,
+ std::move(observer)) {}
+ ~ADMWrapper() override {
+ audio_transport_ = nullptr;
+ observer_ = nullptr;
+ }
+
+ // Make sure we have a valid ADM before returning it to user.
+ bool IsValid() { return is_valid_; }
+
+ int32_t RecordedDataIsAvailable(const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samples_per_sec,
+ uint32_t total_delay_ms,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel) override {
+ return RecordedDataIsAvailable(audioSamples, nSamples, nBytesPerSample,
+ nChannels, samples_per_sec, total_delay_ms,
+ clockDrift, currentMicLevel, keyPressed,
+ newMicLevel, /*capture_timestamp_ns*/ 0);
+ }
+
+ // AudioTransport methods overrides.
+ int32_t RecordedDataIsAvailable(const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samples_per_sec,
+ uint32_t total_delay_ms,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel,
+ int64_t capture_timestamp_ns) override {
+ int32_t res = 0;
+ // Capture PCM data of locally captured audio.
+ if (observer_) {
+ observer_->OnCaptureData(audioSamples, nSamples, nBytesPerSample,
+ nChannels, samples_per_sec);
+ }
+
+ // Send to the actual audio transport.
+ if (audio_transport_) {
+ res = audio_transport_->RecordedDataIsAvailable(
+ audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec,
+ total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel,
+ capture_timestamp_ns);
+ }
+
+ return res;
+ }
+
+ int32_t NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samples_per_sec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override {
+ int32_t res = 0;
+ // Set out parameters to safe values to be sure not to return corrupted
+ // data.
+ nSamplesOut = 0;
+ *elapsed_time_ms = -1;
+ *ntp_time_ms = -1;
+ // Request data from audio transport.
+ if (audio_transport_) {
+ res = audio_transport_->NeedMorePlayData(
+ nSamples, nBytesPerSample, nChannels, samples_per_sec, audioSamples,
+ nSamplesOut, elapsed_time_ms, ntp_time_ms);
+ }
+
+ // Capture rendered data.
+ if (observer_) {
+ observer_->OnRenderData(audioSamples, nSamples, nBytesPerSample,
+ nChannels, samples_per_sec);
+ }
+
+ return res;
+ }
+
+ void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ // Override AudioDeviceModule's RegisterAudioCallback method to remember the
+ // actual audio transport (e.g.: voice engine).
+ int32_t RegisterAudioCallback(AudioTransport* audio_callback) override {
+ // Remember the audio callback to forward PCM data
+ audio_transport_ = audio_callback;
+ return 0;
+ }
+
+ // AudioDeviceModule pass through method overrides.
+ int32_t ActiveAudioLayer(AudioLayer* audio_layer) const override {
+ return impl_->ActiveAudioLayer(audio_layer);
+ }
+ int32_t Init() override {
+ int res = impl_->Init();
+ if (res != 0) {
+ return res;
+ }
+ // Register self as the audio transport callback for underlying ADM impl.
+ impl_->RegisterAudioCallback(this);
+ return res;
+ }
+ int32_t Terminate() override { return impl_->Terminate(); }
+ bool Initialized() const override { return impl_->Initialized(); }
+ int16_t PlayoutDevices() override { return impl_->PlayoutDevices(); }
+ int16_t RecordingDevices() override { return impl_->RecordingDevices(); }
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ return impl_->PlayoutDeviceName(index, name, guid);
+ }
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ return impl_->RecordingDeviceName(index, name, guid);
+ }
+ int32_t SetPlayoutDevice(uint16_t index) override {
+ return impl_->SetPlayoutDevice(index);
+ }
+ int32_t SetPlayoutDevice(WindowsDeviceType device) override {
+ return impl_->SetPlayoutDevice(device);
+ }
+ int32_t SetRecordingDevice(uint16_t index) override {
+ return impl_->SetRecordingDevice(index);
+ }
+ int32_t SetRecordingDevice(WindowsDeviceType device) override {
+ return impl_->SetRecordingDevice(device);
+ }
+ int32_t PlayoutIsAvailable(bool* available) override {
+ return impl_->PlayoutIsAvailable(available);
+ }
+ int32_t InitPlayout() override { return impl_->InitPlayout(); }
+ bool PlayoutIsInitialized() const override {
+ return impl_->PlayoutIsInitialized();
+ }
+ int32_t RecordingIsAvailable(bool* available) override {
+ return impl_->RecordingIsAvailable(available);
+ }
+ int32_t InitRecording() override { return impl_->InitRecording(); }
+ bool RecordingIsInitialized() const override {
+ return impl_->RecordingIsInitialized();
+ }
+ int32_t StartPlayout() override { return impl_->StartPlayout(); }
+ int32_t StopPlayout() override { return impl_->StopPlayout(); }
+ bool Playing() const override { return impl_->Playing(); }
+ int32_t StartRecording() override { return impl_->StartRecording(); }
+ int32_t StopRecording() override { return impl_->StopRecording(); }
+ bool Recording() const override { return impl_->Recording(); }
+ int32_t InitSpeaker() override { return impl_->InitSpeaker(); }
+ bool SpeakerIsInitialized() const override {
+ return impl_->SpeakerIsInitialized();
+ }
+ int32_t InitMicrophone() override { return impl_->InitMicrophone(); }
+ bool MicrophoneIsInitialized() const override {
+ return impl_->MicrophoneIsInitialized();
+ }
+ int32_t SpeakerVolumeIsAvailable(bool* available) override {
+ return impl_->SpeakerVolumeIsAvailable(available);
+ }
+ int32_t SetSpeakerVolume(uint32_t volume) override {
+ return impl_->SetSpeakerVolume(volume);
+ }
+ int32_t SpeakerVolume(uint32_t* volume) const override {
+ return impl_->SpeakerVolume(volume);
+ }
+ int32_t MaxSpeakerVolume(uint32_t* max_volume) const override {
+ return impl_->MaxSpeakerVolume(max_volume);
+ }
+ int32_t MinSpeakerVolume(uint32_t* min_volume) const override {
+ return impl_->MinSpeakerVolume(min_volume);
+ }
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override {
+ return impl_->MicrophoneVolumeIsAvailable(available);
+ }
+ int32_t SetMicrophoneVolume(uint32_t volume) override {
+ return impl_->SetMicrophoneVolume(volume);
+ }
+ int32_t MicrophoneVolume(uint32_t* volume) const override {
+ return impl_->MicrophoneVolume(volume);
+ }
+ int32_t MaxMicrophoneVolume(uint32_t* max_volume) const override {
+ return impl_->MaxMicrophoneVolume(max_volume);
+ }
+ int32_t MinMicrophoneVolume(uint32_t* min_volume) const override {
+ return impl_->MinMicrophoneVolume(min_volume);
+ }
+ int32_t SpeakerMuteIsAvailable(bool* available) override {
+ return impl_->SpeakerMuteIsAvailable(available);
+ }
+ int32_t SetSpeakerMute(bool enable) override {
+ return impl_->SetSpeakerMute(enable);
+ }
+ int32_t SpeakerMute(bool* enabled) const override {
+ return impl_->SpeakerMute(enabled);
+ }
+ int32_t MicrophoneMuteIsAvailable(bool* available) override {
+ return impl_->MicrophoneMuteIsAvailable(available);
+ }
+ int32_t SetMicrophoneMute(bool enable) override {
+ return impl_->SetMicrophoneMute(enable);
+ }
+ int32_t MicrophoneMute(bool* enabled) const override {
+ return impl_->MicrophoneMute(enabled);
+ }
+ int32_t StereoPlayoutIsAvailable(bool* available) const override {
+ return impl_->StereoPlayoutIsAvailable(available);
+ }
+ int32_t SetStereoPlayout(bool enable) override {
+ return impl_->SetStereoPlayout(enable);
+ }
+ int32_t StereoPlayout(bool* enabled) const override {
+ return impl_->StereoPlayout(enabled);
+ }
+ int32_t StereoRecordingIsAvailable(bool* available) const override {
+ return impl_->StereoRecordingIsAvailable(available);
+ }
+ int32_t SetStereoRecording(bool enable) override {
+ return impl_->SetStereoRecording(enable);
+ }
+ int32_t StereoRecording(bool* enabled) const override {
+ return impl_->StereoRecording(enabled);
+ }
+ int32_t PlayoutDelay(uint16_t* delay_ms) const override {
+ return impl_->PlayoutDelay(delay_ms);
+ }
+ bool BuiltInAECIsAvailable() const override {
+ return impl_->BuiltInAECIsAvailable();
+ }
+ bool BuiltInAGCIsAvailable() const override {
+ return impl_->BuiltInAGCIsAvailable();
+ }
+ bool BuiltInNSIsAvailable() const override {
+ return impl_->BuiltInNSIsAvailable();
+ }
+ int32_t EnableBuiltInAEC(bool enable) override {
+ return impl_->EnableBuiltInAEC(enable);
+ }
+ int32_t EnableBuiltInAGC(bool enable) override {
+ return impl_->EnableBuiltInAGC(enable);
+ }
+ int32_t EnableBuiltInNS(bool enable) override {
+ return impl_->EnableBuiltInNS(enable);
+ }
+ int32_t GetPlayoutUnderrunCount() const override {
+ return impl_->GetPlayoutUnderrunCount();
+ }
+// Only supported on iOS.
+#if defined(WEBRTC_IOS)
+ int GetPlayoutAudioParameters(AudioParameters* params) const override {
+ return impl_->GetPlayoutAudioParameters(params);
+ }
+ int GetRecordAudioParameters(AudioParameters* params) const override {
+ return impl_->GetRecordAudioParameters(params);
+ }
+#endif // WEBRTC_IOS
+
+ protected:
+ rtc::scoped_refptr<AudioDeviceModule> impl_;
+ AudioDeviceDataObserver* legacy_observer_ = nullptr;
+ std::unique_ptr<AudioDeviceDataObserver> observer_;
+ AudioTransport* audio_transport_ = nullptr;
+ bool is_valid_ = false;
+};
+
+} // namespace
+
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ rtc::scoped_refptr<AudioDeviceModule> impl,
+ std::unique_ptr<AudioDeviceDataObserver> observer) {
+ auto audio_device = rtc::make_ref_counted<ADMWrapper>(impl, observer.get(),
+ std::move(observer));
+
+ if (!audio_device->IsValid()) {
+ return nullptr;
+ }
+
+ return audio_device;
+}
+
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ rtc::scoped_refptr<AudioDeviceModule> impl,
+ AudioDeviceDataObserver* legacy_observer) {
+ auto audio_device =
+ rtc::make_ref_counted<ADMWrapper>(impl, legacy_observer, nullptr);
+
+ if (!audio_device->IsValid()) {
+ return nullptr;
+ }
+
+ return audio_device;
+}
+
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ AudioDeviceModule::AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory,
+ std::unique_ptr<AudioDeviceDataObserver> observer) {
+ auto audio_device = rtc::make_ref_counted<ADMWrapper>(
+ audio_layer, task_queue_factory, observer.get(), std::move(observer));
+
+ if (!audio_device->IsValid()) {
+ return nullptr;
+ }
+
+ return audio_device;
+}
+
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ AudioDeviceModule::AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory,
+ AudioDeviceDataObserver* legacy_observer) {
+ auto audio_device = rtc::make_ref_counted<ADMWrapper>(
+ audio_layer, task_queue_factory, legacy_observer, nullptr);
+
+ if (!audio_device->IsValid()) {
+ return nullptr;
+ }
+
+ return audio_device;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_default_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_default_gn/moz.build
new file mode 100644
index 0000000000..c8db0272c6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_default_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_device_default_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc b/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc
new file mode 100644
index 0000000000..7b8cfd1734
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/audio_device_generic.h"
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+bool AudioDeviceGeneric::BuiltInAECIsAvailable() const {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return false;
+}
+
+int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable) {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+bool AudioDeviceGeneric::BuiltInAGCIsAvailable() const {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return false;
+}
+
+int32_t AudioDeviceGeneric::EnableBuiltInAGC(bool enable) {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+bool AudioDeviceGeneric::BuiltInNSIsAvailable() const {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return false;
+}
+
+int32_t AudioDeviceGeneric::EnableBuiltInNS(bool enable) {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+int32_t AudioDeviceGeneric::GetPlayoutUnderrunCount() const {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+#if defined(WEBRTC_IOS)
+int AudioDeviceGeneric::GetPlayoutAudioParameters(
+ AudioParameters* params) const {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+int AudioDeviceGeneric::GetRecordAudioParameters(
+ AudioParameters* params) const {
+ RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+#endif // WEBRTC_IOS
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_generic.h b/third_party/libwebrtc/modules/audio_device/audio_device_generic.h
new file mode 100644
index 0000000000..41e24eb3b0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_generic.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_
+
+#include <stdint.h>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+
+namespace webrtc {
+
+class AudioDeviceGeneric {
+ public:
+ // For use with UMA logging. Must be kept in sync with histograms.xml in
+ // Chrome, located at
+ // https://cs.chromium.org/chromium/src/tools/metrics/histograms/histograms.xml
+ enum class InitStatus {
+ OK = 0,
+ PLAYOUT_ERROR = 1,
+ RECORDING_ERROR = 2,
+ OTHER_ERROR = 3,
+ NUM_STATUSES = 4
+ };
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const = 0;
+
+ // Main initializaton and termination
+ virtual InitStatus Init() = 0;
+ virtual int32_t Terminate() = 0;
+ virtual bool Initialized() const = 0;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices() = 0;
+ virtual int16_t RecordingDevices() = 0;
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
+ virtual int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) = 0;
+ virtual int32_t SetRecordingDevice(uint16_t index) = 0;
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) = 0;
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available) = 0;
+ virtual int32_t InitPlayout() = 0;
+ virtual bool PlayoutIsInitialized() const = 0;
+ virtual int32_t RecordingIsAvailable(bool& available) = 0;
+ virtual int32_t InitRecording() = 0;
+ virtual bool RecordingIsInitialized() const = 0;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() = 0;
+ virtual int32_t StopPlayout() = 0;
+ virtual bool Playing() const = 0;
+ virtual int32_t StartRecording() = 0;
+ virtual int32_t StopRecording() = 0;
+ virtual bool Recording() const = 0;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() = 0;
+ virtual bool SpeakerIsInitialized() const = 0;
+ virtual int32_t InitMicrophone() = 0;
+ virtual bool MicrophoneIsInitialized() const = 0;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
+ virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
+ virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
+ virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const = 0;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const = 0;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
+ virtual int32_t SetSpeakerMute(bool enable) = 0;
+ virtual int32_t SpeakerMute(bool& enabled) const = 0;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
+ virtual int32_t SetMicrophoneMute(bool enable) = 0;
+ virtual int32_t MicrophoneMute(bool& enabled) const = 0;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
+ virtual int32_t SetStereoPlayout(bool enable) = 0;
+ virtual int32_t StereoPlayout(bool& enabled) const = 0;
+ virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
+ virtual int32_t SetStereoRecording(bool enable) = 0;
+ virtual int32_t StereoRecording(bool& enabled) const = 0;
+
+ // Delay information and control
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
+
+ // Android only
+ virtual bool BuiltInAECIsAvailable() const;
+ virtual bool BuiltInAGCIsAvailable() const;
+ virtual bool BuiltInNSIsAvailable() const;
+
+ // Windows Core Audio and Android only.
+ virtual int32_t EnableBuiltInAEC(bool enable);
+ virtual int32_t EnableBuiltInAGC(bool enable);
+ virtual int32_t EnableBuiltInNS(bool enable);
+
+ // Play underrun count.
+ virtual int32_t GetPlayoutUnderrunCount() const;
+
+// iOS only.
+// TODO(henrika): add Android support.
+#if defined(WEBRTC_IOS)
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const;
+ virtual int GetRecordAudioParameters(AudioParameters* params) const;
+#endif // WEBRTC_IOS
+
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
+
+ virtual ~AudioDeviceGeneric() {}
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_generic_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_generic_gn/moz.build
new file mode 100644
index 0000000000..cb0ddf1b05
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_generic_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_device_generic_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build
new file mode 100644
index 0000000000..b4d2828819
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build
@@ -0,0 +1,206 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_device_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_impl.cc b/third_party/libwebrtc/modules/audio_device/audio_device_impl.cc
new file mode 100644
index 0000000000..092b98f2bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_impl.cc
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/audio_device_impl.h"
+
+#include <stddef.h>
+
+#include "api/make_ref_counted.h"
+#include "api/scoped_refptr.h"
+#include "modules/audio_device/audio_device_config.h" // IWYU pragma: keep
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+#if defined(_WIN32)
+#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
+#include "modules/audio_device/win/audio_device_core_win.h"
+#endif
+#elif defined(WEBRTC_ANDROID)
+#include <stdlib.h>
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#include "modules/audio_device/android/aaudio_player.h"
+#include "modules/audio_device/android/aaudio_recorder.h"
+#endif
+#include "modules/audio_device/android/audio_device_template.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/audio_record_jni.h"
+#include "modules/audio_device/android/audio_track_jni.h"
+#include "modules/audio_device/android/opensles_player.h"
+#include "modules/audio_device/android/opensles_recorder.h"
+#elif defined(WEBRTC_LINUX)
+#if defined(WEBRTC_ENABLE_LINUX_ALSA)
+#include "modules/audio_device/linux/audio_device_alsa_linux.h"
+#endif
+#if defined(WEBRTC_ENABLE_LINUX_PULSE)
+#include "modules/audio_device/linux/audio_device_pulse_linux.h"
+#endif
+#elif defined(WEBRTC_IOS)
+#include "sdk/objc/native/src/audio/audio_device_ios.h"
+#elif defined(WEBRTC_MAC)
+#include "modules/audio_device/mac/audio_device_mac.h"
+#endif
+#if defined(WEBRTC_DUMMY_FILE_DEVICES)
+#include "modules/audio_device/dummy/file_audio_device.h"
+#include "modules/audio_device/dummy/file_audio_device_factory.h"
+#endif
+#include "modules/audio_device/dummy/audio_device_dummy.h"
+
+#define CHECKinitialized_() \
+ { \
+ if (!initialized_) { \
+ return -1; \
+ } \
+ }
+
+#define CHECKinitialized__BOOL() \
+ { \
+ if (!initialized_) { \
+ return false; \
+ } \
+ }
+
+namespace webrtc {
+
+rtc::scoped_refptr<AudioDeviceModule> AudioDeviceModule::Create(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory);
+}
+
+// static
+rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ // The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own
+ // dedicated factory method which should be used instead.
+ if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) {
+ RTC_LOG(LS_ERROR) << "Use the CreateWindowsCoreAudioAudioDeviceModule() "
+ "factory method instead for this option.";
+ return nullptr;
+ }
+
+ // Create the generic reference counted (platform independent) implementation.
+ auto audio_device = rtc::make_ref_counted<AudioDeviceModuleImpl>(
+ audio_layer, task_queue_factory);
+
+ // Ensure that the current platform is supported.
+ if (audio_device->CheckPlatform() == -1) {
+ return nullptr;
+ }
+
+ // Create the platform-dependent implementation.
+ if (audio_device->CreatePlatformSpecificObjects() == -1) {
+ return nullptr;
+ }
+
+ // Ensure that the generic audio buffer can communicate with the platform
+ // specific parts.
+ if (audio_device->AttachAudioBuffer() == -1) {
+ return nullptr;
+ }
+
+ return audio_device;
+}
+
+AudioDeviceModuleImpl::AudioDeviceModuleImpl(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory)
+ : audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+}
+
+int32_t AudioDeviceModuleImpl::CheckPlatform() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ // Ensure that the current platform is supported
+ PlatformType platform(kPlatformNotSupported);
+#if defined(_WIN32)
+ platform = kPlatformWin32;
+ RTC_LOG(LS_INFO) << "current platform is Win32";
+#elif defined(WEBRTC_ANDROID)
+ platform = kPlatformAndroid;
+ RTC_LOG(LS_INFO) << "current platform is Android";
+#elif defined(WEBRTC_LINUX)
+ platform = kPlatformLinux;
+ RTC_LOG(LS_INFO) << "current platform is Linux";
+#elif defined(WEBRTC_IOS)
+ platform = kPlatformIOS;
+ RTC_LOG(LS_INFO) << "current platform is IOS";
+#elif defined(WEBRTC_MAC)
+ platform = kPlatformMac;
+ RTC_LOG(LS_INFO) << "current platform is Mac";
+#endif
+ if (platform == kPlatformNotSupported) {
+ RTC_LOG(LS_ERROR)
+ << "current platform is not supported => this module will self "
+ "destruct!";
+ return -1;
+ }
+ platform_type_ = platform;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+// Dummy ADM implementations if build flags are set.
+#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
+ audio_device_.reset(new AudioDeviceDummy());
+ RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized";
+#elif defined(WEBRTC_DUMMY_FILE_DEVICES)
+ audio_device_.reset(FileAudioDeviceFactory::CreateFileAudioDevice());
+ if (audio_device_) {
+ RTC_LOG(LS_INFO) << "Will use file-playing dummy device.";
+ } else {
+ // Create a dummy device instead.
+ audio_device_.reset(new AudioDeviceDummy());
+ RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized";
+ }
+
+// Real (non-dummy) ADM implementations.
+#else
+ AudioLayer audio_layer(PlatformAudioLayer());
+// Windows ADM implementation.
+#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
+ if ((audio_layer == kWindowsCoreAudio) ||
+ (audio_layer == kPlatformDefaultAudio)) {
+ RTC_LOG(LS_INFO) << "Attempting to use the Windows Core Audio APIs...";
+ if (AudioDeviceWindowsCore::CoreAudioIsSupported()) {
+ audio_device_.reset(new AudioDeviceWindowsCore());
+ RTC_LOG(LS_INFO) << "Windows Core Audio APIs will be utilized";
+ }
+ }
+#endif // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
+
+#if defined(WEBRTC_ANDROID)
+ // Create an Android audio manager.
+ audio_manager_android_.reset(new AudioManager());
+ // Select best possible combination of audio layers.
+ if (audio_layer == kPlatformDefaultAudio) {
+ if (audio_manager_android_->IsAAudioSupported()) {
+ // Use of AAudio for both playout and recording has highest priority.
+ audio_layer = kAndroidAAudioAudio;
+ } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
+ audio_manager_android_->IsLowLatencyRecordSupported()) {
+ // Use OpenSL ES for both playout and recording.
+ audio_layer = kAndroidOpenSLESAudio;
+ } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
+ !audio_manager_android_->IsLowLatencyRecordSupported()) {
+ // Use OpenSL ES for output on devices that only supports the
+ // low-latency output audio path.
+ audio_layer = kAndroidJavaInputAndOpenSLESOutputAudio;
+ } else {
+ // Use Java-based audio in both directions when low-latency output is
+ // not supported.
+ audio_layer = kAndroidJavaAudio;
+ }
+ }
+ AudioManager* audio_manager = audio_manager_android_.get();
+ if (audio_layer == kAndroidJavaAudio) {
+ // Java audio for both input and output audio.
+ audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(
+ audio_layer, audio_manager));
+ } else if (audio_layer == kAndroidOpenSLESAudio) {
+ // OpenSL ES based audio for both input and output audio.
+ audio_device_.reset(
+ new AudioDeviceTemplate<OpenSLESRecorder, OpenSLESPlayer>(
+ audio_layer, audio_manager));
+ } else if (audio_layer == kAndroidJavaInputAndOpenSLESOutputAudio) {
+ // Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
+ // This combination provides low-latency output audio and at the same
+ // time support for HW AEC using the AudioRecord Java API.
+ audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, OpenSLESPlayer>(
+ audio_layer, audio_manager));
+ } else if (audio_layer == kAndroidAAudioAudio) {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+ // AAudio based audio for both input and output.
+ audio_device_.reset(new AudioDeviceTemplate<AAudioRecorder, AAudioPlayer>(
+ audio_layer, audio_manager));
+#endif
+ } else if (audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+ // Java audio for input and AAudio for output audio (i.e. mixed APIs).
+ audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AAudioPlayer>(
+ audio_layer, audio_manager));
+#endif
+ } else {
+ RTC_LOG(LS_ERROR) << "The requested audio layer is not supported";
+ audio_device_.reset(nullptr);
+ }
+// END #if defined(WEBRTC_ANDROID)
+
+// Linux ADM implementation.
+// Note that, WEBRTC_ENABLE_LINUX_ALSA is always defined by default when
+// WEBRTC_LINUX is defined. WEBRTC_ENABLE_LINUX_PULSE depends on the
+// 'rtc_include_pulse_audio' build flag.
+// TODO(bugs.webrtc.org/9127): improve support and make it more clear that
+// PulseAudio is the default selection.
+#elif defined(WEBRTC_LINUX)
+#if !defined(WEBRTC_ENABLE_LINUX_PULSE)
+ // Build flag 'rtc_include_pulse_audio' is set to false. In this mode:
+ // - kPlatformDefaultAudio => ALSA, and
+ // - kLinuxAlsaAudio => ALSA, and
+ // - kLinuxPulseAudio => Invalid selection.
+ RTC_LOG(LS_WARNING) << "PulseAudio is disabled using build flag.";
+ if ((audio_layer == kLinuxAlsaAudio) ||
+ (audio_layer == kPlatformDefaultAudio)) {
+ audio_device_.reset(new AudioDeviceLinuxALSA());
+ RTC_LOG(LS_INFO) << "Linux ALSA APIs will be utilized.";
+ }
+#else
+ // Build flag 'rtc_include_pulse_audio' is set to true (default). In this
+ // mode:
+ // - kPlatformDefaultAudio => PulseAudio, and
+ // - kLinuxPulseAudio => PulseAudio, and
+ // - kLinuxAlsaAudio => ALSA (supported but not default).
+ RTC_LOG(LS_INFO) << "PulseAudio support is enabled.";
+ if ((audio_layer == kLinuxPulseAudio) ||
+ (audio_layer == kPlatformDefaultAudio)) {
+ // Linux PulseAudio implementation is default.
+ audio_device_.reset(new AudioDeviceLinuxPulse());
+ RTC_LOG(LS_INFO) << "Linux PulseAudio APIs will be utilized";
+ } else if (audio_layer == kLinuxAlsaAudio) {
+ audio_device_.reset(new AudioDeviceLinuxALSA());
+ RTC_LOG(LS_WARNING) << "Linux ALSA APIs will be utilized.";
+ }
+#endif // #if !defined(WEBRTC_ENABLE_LINUX_PULSE)
+#endif // #if defined(WEBRTC_LINUX)
+
+// iOS ADM implementation.
+#if defined(WEBRTC_IOS)
+ if (audio_layer == kPlatformDefaultAudio) {
+ audio_device_.reset(
+ new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
+ RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
+ }
+// END #if defined(WEBRTC_IOS)
+
+// Mac OS X ADM implementation.
+#elif defined(WEBRTC_MAC)
+ if (audio_layer == kPlatformDefaultAudio) {
+ audio_device_.reset(new AudioDeviceMac());
+ RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized.";
+ }
+#endif // WEBRTC_MAC
+
+ // Dummy ADM implementation.
+ if (audio_layer == kDummyAudio) {
+ audio_device_.reset(new AudioDeviceDummy());
+ RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized.";
+ }
+#endif // if defined(WEBRTC_DUMMY_AUDIO_BUILD)
+
+ if (!audio_device_) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to create the platform specific ADM implementation.";
+ return -1;
+ }
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::AttachAudioBuffer() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ audio_device_->AttachAudioBuffer(&audio_device_buffer_);
+ return 0;
+}
+
+AudioDeviceModuleImpl::~AudioDeviceModuleImpl() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+}
+
+int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ AudioLayer activeAudio;
+ if (audio_device_->ActiveAudioLayer(activeAudio) == -1) {
+ return -1;
+ }
+ *audioLayer = activeAudio;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::Init() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ if (initialized_)
+ return 0;
+ RTC_CHECK(audio_device_);
+ AudioDeviceGeneric::InitStatus status = audio_device_->Init();
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.InitializationResult", static_cast<int>(status),
+ static_cast<int>(AudioDeviceGeneric::InitStatus::NUM_STATUSES));
+ if (status != AudioDeviceGeneric::InitStatus::OK) {
+ RTC_LOG(LS_ERROR) << "Audio device initialization failed.";
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::Terminate() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ if (!initialized_)
+ return 0;
+ if (audio_device_->Terminate() == -1) {
+ return -1;
+ }
+ initialized_ = false;
+ return 0;
+}
+
+bool AudioDeviceModuleImpl::Initialized() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << ": " << initialized_;
+ return initialized_;
+}
+
+int32_t AudioDeviceModuleImpl::InitSpeaker() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->InitSpeaker();
+}
+
+int32_t AudioDeviceModuleImpl::InitMicrophone() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->InitMicrophone();
+}
+
+int32_t AudioDeviceModuleImpl::SpeakerVolumeIsAvailable(bool* available) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SetSpeakerVolume(uint32_t volume) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
+ CHECKinitialized_();
+ return audio_device_->SetSpeakerVolume(volume);
+}
+
+int32_t AudioDeviceModuleImpl::SpeakerVolume(uint32_t* volume) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint32_t level = 0;
+ if (audio_device_->SpeakerVolume(level) == -1) {
+ return -1;
+ }
+ *volume = level;
+ RTC_LOG(LS_INFO) << "output: " << *volume;
+ return 0;
+}
+
+bool AudioDeviceModuleImpl::SpeakerIsInitialized() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isInitialized = audio_device_->SpeakerIsInitialized();
+ RTC_LOG(LS_INFO) << "output: " << isInitialized;
+ return isInitialized;
+}
+
+bool AudioDeviceModuleImpl::MicrophoneIsInitialized() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isInitialized = audio_device_->MicrophoneIsInitialized();
+ RTC_LOG(LS_INFO) << "output: " << isInitialized;
+ return isInitialized;
+}
+
+int32_t AudioDeviceModuleImpl::MaxSpeakerVolume(uint32_t* maxVolume) const {
+ CHECKinitialized_();
+ uint32_t maxVol = 0;
+ if (audio_device_->MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+ *maxVolume = maxVol;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::MinSpeakerVolume(uint32_t* minVolume) const {
+ CHECKinitialized_();
+ uint32_t minVol = 0;
+ if (audio_device_->MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+ *minVolume = minVol;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SetSpeakerMute(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ return audio_device_->SetSpeakerMute(enable);
+}
+
+int32_t AudioDeviceModuleImpl::SpeakerMute(bool* enabled) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool muted = false;
+ if (audio_device_->SpeakerMute(muted) == -1) {
+ return -1;
+ }
+ *enabled = muted;
+ RTC_LOG(LS_INFO) << "output: " << muted;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::MicrophoneMuteIsAvailable(bool* available) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SetMicrophoneMute(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ return (audio_device_->SetMicrophoneMute(enable));
+}
+
+int32_t AudioDeviceModuleImpl::MicrophoneMute(bool* enabled) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool muted = false;
+ if (audio_device_->MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+ *enabled = muted;
+ RTC_LOG(LS_INFO) << "output: " << muted;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::MicrophoneVolumeIsAvailable(bool* available) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
+ CHECKinitialized_();
+ return (audio_device_->SetMicrophoneVolume(volume));
+}
+
+int32_t AudioDeviceModuleImpl::MicrophoneVolume(uint32_t* volume) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint32_t level = 0;
+ if (audio_device_->MicrophoneVolume(level) == -1) {
+ return -1;
+ }
+ *volume = level;
+ RTC_LOG(LS_INFO) << "output: " << *volume;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::StereoRecordingIsAvailable(
+ bool* available) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ if (audio_device_->RecordingIsInitialized()) {
+ RTC_LOG(LS_ERROR)
+ << "unable to set stereo mode after recording is initialized";
+ return -1;
+ }
+ if (audio_device_->SetStereoRecording(enable) == -1) {
+ if (enable) {
+ RTC_LOG(LS_WARNING) << "failed to enable stereo recording";
+ }
+ return -1;
+ }
+ int8_t nChannels(1);
+ if (enable) {
+ nChannels = 2;
+ }
+ audio_device_buffer_.SetRecordingChannels(nChannels);
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::StereoRecording(bool* enabled) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool stereo = false;
+ if (audio_device_->StereoRecording(stereo) == -1) {
+ return -1;
+ }
+ *enabled = stereo;
+ RTC_LOG(LS_INFO) << "output: " << stereo;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::StereoPlayoutIsAvailable(bool* available) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ if (audio_device_->PlayoutIsInitialized()) {
+ RTC_LOG(LS_ERROR)
+ << "unable to set stereo mode while playing side is initialized";
+ return -1;
+ }
+ if (audio_device_->SetStereoPlayout(enable)) {
+ RTC_LOG(LS_WARNING) << "stereo playout is not supported";
+ return -1;
+ }
+ int8_t nChannels(1);
+ if (enable) {
+ nChannels = 2;
+ }
+ audio_device_buffer_.SetPlayoutChannels(nChannels);
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::StereoPlayout(bool* enabled) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool stereo = false;
+ if (audio_device_->StereoPlayout(stereo) == -1) {
+ return -1;
+ }
+ *enabled = stereo;
+ RTC_LOG(LS_INFO) << "output: " << stereo;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::PlayoutIsAvailable(bool* available) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::RecordingIsAvailable(bool* available) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->RecordingIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::MaxMicrophoneVolume(uint32_t* maxVolume) const {
+ CHECKinitialized_();
+ uint32_t maxVol(0);
+ if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+ *maxVolume = maxVol;
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::MinMicrophoneVolume(uint32_t* minVolume) const {
+ CHECKinitialized_();
+ uint32_t minVol(0);
+ if (audio_device_->MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+ *minVolume = minVol;
+ return 0;
+}
+
+int16_t AudioDeviceModuleImpl::PlayoutDevices() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint16_t nPlayoutDevices = audio_device_->PlayoutDevices();
+ RTC_LOG(LS_INFO) << "output: " << nPlayoutDevices;
+ return (int16_t)(nPlayoutDevices);
+}
+
+int32_t AudioDeviceModuleImpl::SetPlayoutDevice(uint16_t index) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
+ CHECKinitialized_();
+ return audio_device_->SetPlayoutDevice(index);
+}
+
+int32_t AudioDeviceModuleImpl::SetPlayoutDevice(WindowsDeviceType device) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->SetPlayoutDevice(device);
+}
+
+int32_t AudioDeviceModuleImpl::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)";
+ CHECKinitialized_();
+ if (name == NULL) {
+ return -1;
+ }
+ if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) {
+ return -1;
+ }
+ if (name != NULL) {
+ RTC_LOG(LS_INFO) << "output: name = " << name;
+ }
+ if (guid != NULL) {
+ RTC_LOG(LS_INFO) << "output: guid = " << guid;
+ }
+ return 0;
+}
+
+int32_t AudioDeviceModuleImpl::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)";
+ CHECKinitialized_();
+ if (name == NULL) {
+ return -1;
+ }
+ if (audio_device_->RecordingDeviceName(index, name, guid) == -1) {
+ return -1;
+ }
+ if (name != NULL) {
+ RTC_LOG(LS_INFO) << "output: name = " << name;
+ }
+ if (guid != NULL) {
+ RTC_LOG(LS_INFO) << "output: guid = " << guid;
+ }
+ return 0;
+}
+
+int16_t AudioDeviceModuleImpl::RecordingDevices() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint16_t nRecordingDevices = audio_device_->RecordingDevices();
+ RTC_LOG(LS_INFO) << "output: " << nRecordingDevices;
+ return (int16_t)nRecordingDevices;
+}
+
+int32_t AudioDeviceModuleImpl::SetRecordingDevice(uint16_t index) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
+ CHECKinitialized_();
+ return audio_device_->SetRecordingDevice(index);
+}
+
+int32_t AudioDeviceModuleImpl::SetRecordingDevice(WindowsDeviceType device) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->SetRecordingDevice(device);
+}
+
+int32_t AudioDeviceModuleImpl::InitPlayout() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (PlayoutIsInitialized()) {
+ return 0;
+ }
+ int32_t result = audio_device_->InitPlayout();
+ RTC_LOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
+ static_cast<int>(result == 0));
+ return result;
+}
+
+int32_t AudioDeviceModuleImpl::InitRecording() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (RecordingIsInitialized()) {
+ return 0;
+ }
+ int32_t result = audio_device_->InitRecording();
+ RTC_LOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
+ static_cast<int>(result == 0));
+ return result;
+}
+
+bool AudioDeviceModuleImpl::PlayoutIsInitialized() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->PlayoutIsInitialized();
+}
+
+bool AudioDeviceModuleImpl::RecordingIsInitialized() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->RecordingIsInitialized();
+}
+
+int32_t AudioDeviceModuleImpl::StartPlayout() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (Playing()) {
+ return 0;
+ }
+ audio_device_buffer_.StartPlayout();
+ int32_t result = audio_device_->StartPlayout();
+ RTC_LOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
+ static_cast<int>(result == 0));
+ return result;
+}
+
+int32_t AudioDeviceModuleImpl::StopPlayout() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ int32_t result = audio_device_->StopPlayout();
+ audio_device_buffer_.StopPlayout();
+ RTC_LOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
+ static_cast<int>(result == 0));
+ return result;
+}
+
+bool AudioDeviceModuleImpl::Playing() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->Playing();
+}
+
+int32_t AudioDeviceModuleImpl::StartRecording() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (Recording()) {
+ return 0;
+ }
+ audio_device_buffer_.StartRecording();
+ int32_t result = audio_device_->StartRecording();
+ RTC_LOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
+ static_cast<int>(result == 0));
+ return result;
+}
+
+int32_t AudioDeviceModuleImpl::StopRecording() {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ int32_t result = audio_device_->StopRecording();
+ audio_device_buffer_.StopRecording();
+ RTC_LOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
+ static_cast<int>(result == 0));
+ return result;
+}
+
+bool AudioDeviceModuleImpl::Recording() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->Recording();
+}
+
+int32_t AudioDeviceModuleImpl::RegisterAudioCallback(
+ AudioTransport* audioCallback) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ return audio_device_buffer_.RegisterAudioCallback(audioCallback);
+}
+
+int32_t AudioDeviceModuleImpl::PlayoutDelay(uint16_t* delayMS) const {
+ CHECKinitialized_();
+ uint16_t delay = 0;
+ if (audio_device_->PlayoutDelay(delay) == -1) {
+ RTC_LOG(LS_ERROR) << "failed to retrieve the playout delay";
+ return -1;
+ }
+ *delayMS = delay;
+ return 0;
+}
+
+bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_device_->BuiltInAECIsAvailable();
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return isAvailable;
+}
+
+int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ int32_t ok = audio_device_->EnableBuiltInAEC(enable);
+ RTC_LOG(LS_INFO) << "output: " << ok;
+ return ok;
+}
+
+bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_device_->BuiltInAGCIsAvailable();
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return isAvailable;
+}
+
+int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ int32_t ok = audio_device_->EnableBuiltInAGC(enable);
+ RTC_LOG(LS_INFO) << "output: " << ok;
+ return ok;
+}
+
+bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_device_->BuiltInNSIsAvailable();
+ RTC_LOG(LS_INFO) << "output: " << isAvailable;
+ return isAvailable;
+}
+
+int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ int32_t ok = audio_device_->EnableBuiltInNS(enable);
+ RTC_LOG(LS_INFO) << "output: " << ok;
+ return ok;
+}
+
+int32_t AudioDeviceModuleImpl::GetPlayoutUnderrunCount() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ int32_t underrunCount = audio_device_->GetPlayoutUnderrunCount();
+ RTC_LOG(LS_INFO) << "output: " << underrunCount;
+ return underrunCount;
+}
+
+#if defined(WEBRTC_IOS)
+int AudioDeviceModuleImpl::GetPlayoutAudioParameters(
+ AudioParameters* params) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ int r = audio_device_->GetPlayoutAudioParameters(params);
+ RTC_LOG(LS_INFO) << "output: " << r;
+ return r;
+}
+
+int AudioDeviceModuleImpl::GetRecordAudioParameters(
+ AudioParameters* params) const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ int r = audio_device_->GetRecordAudioParameters(params);
+ RTC_LOG(LS_INFO) << "output: " << r;
+ return r;
+}
+#endif // WEBRTC_IOS
+
+AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ return platform_type_;
+}
+
+AudioDeviceModule::AudioLayer AudioDeviceModuleImpl::PlatformAudioLayer()
+ const {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ return audio_layer_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_impl.h b/third_party/libwebrtc/modules/audio_device/audio_device_impl.h
new file mode 100644
index 0000000000..45f73dcd65
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_impl.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_
+#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_
+
+#if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+class AudioDeviceGeneric;
+class AudioManager;
+
+class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
+ public:
+ enum PlatformType {
+ kPlatformNotSupported = 0,
+ kPlatformWin32 = 1,
+ kPlatformWinCe = 2,
+ kPlatformLinux = 3,
+ kPlatformMac = 4,
+ kPlatformAndroid = 5,
+ kPlatformIOS = 6
+ };
+
+ int32_t CheckPlatform();
+ int32_t CreatePlatformSpecificObjects();
+ int32_t AttachAudioBuffer();
+
+ AudioDeviceModuleImpl(AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory);
+ ~AudioDeviceModuleImpl() override;
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
+
+ // Full-duplex transportation of PCM audio
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
+
+ // Main initializaton and termination
+ int32_t Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool* available) override;
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool* available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool* available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t* volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t* volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool* available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool* enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool* available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool* enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool* available) const override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool* enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool* available) const override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool* enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t* delayMS) const override;
+
+ bool BuiltInAECIsAvailable() const override;
+ int32_t EnableBuiltInAEC(bool enable) override;
+ bool BuiltInAGCIsAvailable() const override;
+ int32_t EnableBuiltInAGC(bool enable) override;
+ bool BuiltInNSIsAvailable() const override;
+ int32_t EnableBuiltInNS(bool enable) override;
+
+ // Play underrun count.
+ int32_t GetPlayoutUnderrunCount() const override;
+
+#if defined(WEBRTC_IOS)
+ int GetPlayoutAudioParameters(AudioParameters* params) const override;
+ int GetRecordAudioParameters(AudioParameters* params) const override;
+#endif // WEBRTC_IOS
+
+#if defined(WEBRTC_ANDROID)
+ // Only use this acccessor for test purposes on Android.
+ AudioManager* GetAndroidAudioManagerForTest() {
+ return audio_manager_android_.get();
+ }
+#endif
+ AudioDeviceBuffer* GetAudioDeviceBuffer() { return &audio_device_buffer_; }
+
+ int RestartPlayoutInternally() override { return -1; }
+ int RestartRecordingInternally() override { return -1; }
+ int SetPlayoutSampleRate(uint32_t sample_rate) override { return -1; }
+ int SetRecordingSampleRate(uint32_t sample_rate) override { return -1; }
+
+ private:
+ PlatformType Platform() const;
+ AudioLayer PlatformAudioLayer() const;
+
+ AudioLayer audio_layer_;
+ PlatformType platform_type_ = kPlatformNotSupported;
+ bool initialized_ = false;
+#if defined(WEBRTC_ANDROID)
+ // Should be declared first to ensure that it outlives other resources.
+ std::unique_ptr<AudioManager> audio_manager_android_;
+#endif
+ AudioDeviceBuffer audio_device_buffer_;
+ std::unique_ptr<AudioDeviceGeneric> audio_device_;
+};
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
+
+#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_impl_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_impl_gn/moz.build
new file mode 100644
index 0000000000..0d8c6863e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_impl_gn/moz.build
@@ -0,0 +1,217 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_DUMMY_AUDIO_BUILD"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc",
+ "/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc",
+ "/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_device_impl_gn")
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_name.cc b/third_party/libwebrtc/modules/audio_device/audio_device_name.cc
new file mode 100644
index 0000000000..5318496768
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_name.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/audio_device_name.h"
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+
+const char AudioDeviceName::kDefaultDeviceId[] = "default";
+
+AudioDeviceName::AudioDeviceName(absl::string_view device_name,
+ absl::string_view unique_id)
+ : device_name(device_name), unique_id(unique_id) {}
+
+bool AudioDeviceName::IsValid() {
+ return !device_name.empty() && !unique_id.empty();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_name.h b/third_party/libwebrtc/modules/audio_device/audio_device_name.h
new file mode 100644
index 0000000000..db37852e9a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_name.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
+#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
+
+#include <deque>
+#include <string>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+
+struct AudioDeviceName {
+ // Represents a default device. Note that, on Windows there are two different
+ // types of default devices (Default and Default Communication). They can
+ // either be two different physical devices or be two different roles for one
+ // single device. Hence, this id must be combined with a "role parameter" on
+ // Windows to uniquely identify a default device.
+ static const char kDefaultDeviceId[];
+
+ AudioDeviceName() = default;
+ AudioDeviceName(absl::string_view device_name, absl::string_view unique_id);
+
+ ~AudioDeviceName() = default;
+
+ // Support copy and move.
+ AudioDeviceName(const AudioDeviceName& other) = default;
+ AudioDeviceName(AudioDeviceName&&) = default;
+ AudioDeviceName& operator=(const AudioDeviceName&) = default;
+ AudioDeviceName& operator=(AudioDeviceName&&) = default;
+
+ bool IsValid();
+
+ std::string device_name; // Friendly name of the device.
+ std::string unique_id; // Unique identifier for the device.
+};
+
+typedef std::deque<AudioDeviceName> AudioDeviceNames;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc b/third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc
new file mode 100644
index 0000000000..51a8575b66
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc
@@ -0,0 +1,1243 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/include/audio_device.h"
+
+#include <algorithm>
+#include <cstring>
+#include <list>
+#include <memory>
+#include <numeric>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/audio_device_impl.h"
+#include "modules/audio_device/include/mock_audio_transport.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#ifdef WEBRTC_WIN
+#include "modules/audio_device/include/audio_device_factory.h"
+#include "modules/audio_device/win/core_audio_utility_win.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#endif // WEBRTC_WIN
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Ge;
+using ::testing::Invoke;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+
+namespace webrtc {
+namespace {
+
+// Using a #define for AUDIO_DEVICE since we will call *different* versions of
+// the ADM functions, depending on the ID type.
+#if defined(WEBRTC_WIN)
+#define AUDIO_DEVICE_ID (AudioDeviceModule::WindowsDeviceType::kDefaultDevice)
+#else
+#define AUDIO_DEVICE_ID (0u)
+#endif // defined(WEBRTC_WIN)
+
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
+#else
+#define PRINTD(...) ((void)0)
+#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+// Don't run these tests if audio-related requirements are not met.
+#define SKIP_TEST_IF_NOT(requirements_satisfied) \
+ do { \
+ if (!requirements_satisfied) { \
+ GTEST_SKIP() << "Skipped. No audio device found."; \
+ } \
+ } while (false)
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static constexpr size_t kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static constexpr size_t kTestTimeOutInMilliseconds = 10 * 1000;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static constexpr size_t kNumCallbacksPerSecond = 100;
+// Run the full-duplex test during this time (unit is in seconds).
+static constexpr size_t kFullDuplexTimeInSec = 5;
+// Length of round-trip latency measurements. Number of deteced impulses
+// shall be kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1 since the
+// last transmitted pulse is not used.
+static constexpr size_t kMeasureLatencyTimeInSec = 10;
+// Sets the number of impulses per second in the latency test.
+static constexpr size_t kImpulseFrequencyInHz = 1;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+static constexpr int kImpulseThreshold = 1000;
+
+enum class TransportType {
+ kInvalid,
+ kPlay,
+ kRecord,
+ kPlayAndRecord,
+};
+
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStream {
+ public:
+ virtual void Write(rtc::ArrayView<const int16_t> source) = 0;
+ virtual void Read(rtc::ArrayView<int16_t> destination) = 0;
+
+ virtual ~AudioStream() = default;
+};
+
+// Converts index corresponding to position within a 10ms buffer into a
+// delay value in milliseconds.
+// Example: index=240, frames_per_10ms_buffer=480 => 5ms as output.
+int IndexToMilliseconds(size_t index, size_t frames_per_10ms_buffer) {
+ return rtc::checked_cast<int>(
+ 10.0 * (static_cast<double>(index) / frames_per_10ms_buffer) + 0.5);
+}
+
+} // namespace
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+// Note that, we know by design that the size of the audio buffer will not
+// change over time and that both sides will in most cases use the same size.
+class FifoAudioStream : public AudioStream {
+ public:
+ void Write(rtc::ArrayView<const int16_t> source) override {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ const size_t size = [&] {
+ MutexLock lock(&lock_);
+ fifo_.push_back(Buffer16(source.data(), source.size()));
+ return fifo_.size();
+ }();
+ if (size > max_size_) {
+ max_size_ = size;
+ }
+ // Add marker once per second to signal that audio is active.
+ if (write_count_++ % 100 == 0) {
+ PRINTD(".");
+ }
+ written_elements_ += size;
+ }
+
+ void Read(rtc::ArrayView<int16_t> destination) override {
+ MutexLock lock(&lock_);
+ if (fifo_.empty()) {
+ std::fill(destination.begin(), destination.end(), 0);
+ } else {
+ const Buffer16& buffer = fifo_.front();
+ if (buffer.size() == destination.size()) {
+ // Default case where input and output uses same sample rate and
+ // channel configuration. No conversion is needed.
+ std::copy(buffer.begin(), buffer.end(), destination.begin());
+ } else if (destination.size() == 2 * buffer.size()) {
+ // Recorded input signal in `buffer` is in mono. Do channel upmix to
+ // match stereo output (1 -> 2).
+ for (size_t i = 0; i < buffer.size(); ++i) {
+ destination[2 * i] = buffer[i];
+ destination[2 * i + 1] = buffer[i];
+ }
+ } else if (buffer.size() == 2 * destination.size()) {
+ // Recorded input signal in `buffer` is in stereo. Do channel downmix
+ // to match mono output (2 -> 1).
+ for (size_t i = 0; i < destination.size(); ++i) {
+ destination[i] =
+ (static_cast<int32_t>(buffer[2 * i]) + buffer[2 * i + 1]) / 2;
+ }
+ } else {
+ RTC_DCHECK_NOTREACHED() << "Required conversion is not support";
+ }
+ fifo_.pop_front();
+ }
+ }
+
+ size_t size() const {
+ MutexLock lock(&lock_);
+ return fifo_.size();
+ }
+
+ size_t max_size() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ return max_size_;
+ }
+
+ size_t average_size() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ return 0.5 + static_cast<float>(written_elements_ / write_count_);
+ }
+
+ using Buffer16 = rtc::BufferT<int16_t>;
+
+ mutable Mutex lock_;
+ rtc::RaceChecker race_checker_;
+
+ std::list<Buffer16> fifo_ RTC_GUARDED_BY(lock_);
+ size_t write_count_ RTC_GUARDED_BY(race_checker_) = 0;
+ size_t max_size_ RTC_GUARDED_BY(race_checker_) = 0;
+ size_t written_elements_ RTC_GUARDED_BY(race_checker_) = 0;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+class LatencyAudioStream : public AudioStream {
+ public:
+ LatencyAudioStream() {
+ // Delay thread checkers from being initialized until first callback from
+ // respective thread.
+ read_thread_checker_.Detach();
+ write_thread_checker_.Detach();
+ }
+
+ // Insert periodic impulses in first two samples of `destination`.
+ void Read(rtc::ArrayView<int16_t> destination) override {
+ RTC_DCHECK_RUN_ON(&read_thread_checker_);
+ if (read_count_ == 0) {
+ PRINT("[");
+ }
+ read_count_++;
+ std::fill(destination.begin(), destination.end(), 0);
+ if (read_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+ PRINT(".");
+ {
+ MutexLock lock(&lock_);
+ if (!pulse_time_) {
+ pulse_time_ = rtc::TimeMillis();
+ }
+ }
+ constexpr int16_t impulse = std::numeric_limits<int16_t>::max();
+ std::fill_n(destination.begin(), 2, impulse);
+ }
+ }
+
+ // Detect received impulses in `source`, derive time between transmission and
+ // detection and add the calculated delay to list of latencies.
+ void Write(rtc::ArrayView<const int16_t> source) override {
+ RTC_DCHECK_RUN_ON(&write_thread_checker_);
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ MutexLock lock(&lock_);
+ write_count_++;
+ if (!pulse_time_) {
+ // Avoid detection of new impulse response until a new impulse has
+ // been transmitted (sets `pulse_time_` to value larger than zero).
+ return;
+ }
+ // Find index (element position in vector) of the max element.
+ const size_t index_of_max =
+ std::max_element(source.begin(), source.end()) - source.begin();
+ // Derive time between transmitted pulse and received pulse if the level
+ // is high enough (removes noise).
+ const size_t max = source[index_of_max];
+ if (max > kImpulseThreshold) {
+ PRINTD("(%zu, %zu)", max, index_of_max);
+ int64_t now_time = rtc::TimeMillis();
+ int extra_delay = IndexToMilliseconds(index_of_max, source.size());
+ PRINTD("[%d]", rtc::checked_cast<int>(now_time - pulse_time_));
+ PRINTD("[%d]", extra_delay);
+ // Total latency is the difference between transmit time and detection
+ // tome plus the extra delay within the buffer in which we detected the
+ // received impulse. It is transmitted at sample 0 but can be received
+ // at sample N where N > 0. The term `extra_delay` accounts for N and it
+ // is a value between 0 and 10ms.
+ latencies_.push_back(now_time - *pulse_time_ + extra_delay);
+ pulse_time_.reset();
+ } else {
+ PRINTD("-");
+ }
+ }
+
+ size_t num_latency_values() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ return latencies_.size();
+ }
+
+ int min_latency() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ if (latencies_.empty())
+ return 0;
+ return *std::min_element(latencies_.begin(), latencies_.end());
+ }
+
+ int max_latency() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ if (latencies_.empty())
+ return 0;
+ return *std::max_element(latencies_.begin(), latencies_.end());
+ }
+
+ int average_latency() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ if (latencies_.empty())
+ return 0;
+ return 0.5 + static_cast<double>(
+ std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+ latencies_.size();
+ }
+
+ void PrintResults() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ PRINT("] ");
+ for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+ PRINTD("%d ", *it);
+ }
+ PRINT("\n");
+ PRINT("[..........] [min, max, avg]=[%d, %d, %d] ms\n", min_latency(),
+ max_latency(), average_latency());
+ }
+
+ Mutex lock_;
+ rtc::RaceChecker race_checker_;
+ SequenceChecker read_thread_checker_;
+ SequenceChecker write_thread_checker_;
+
+ absl::optional<int64_t> pulse_time_ RTC_GUARDED_BY(lock_);
+ std::vector<int> latencies_ RTC_GUARDED_BY(race_checker_);
+ size_t read_count_ RTC_GUARDED_BY(read_thread_checker_) = 0;
+ size_t write_count_ RTC_GUARDED_BY(write_thread_checker_) = 0;
+};
+
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransport : public test::MockAudioTransport {
+ public:
+ explicit MockAudioTransport(TransportType type) : type_(type) {}
+ ~MockAudioTransport() {}
+
+ // Set default actions of the mock object. We are delegating to fake
+ // implementation where the number of callbacks is counted and an event
+ // is set after a certain number of callbacks. Audio parameters are also
+ // checked.
+ void HandleCallbacks(rtc::Event* event,
+ AudioStream* audio_stream,
+ int num_callbacks) {
+ event_ = event;
+ audio_stream_ = audio_stream;
+ num_callbacks_ = num_callbacks;
+ if (play_mode()) {
+ ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
+ .WillByDefault(
+ Invoke(this, &MockAudioTransport::RealNeedMorePlayData));
+ }
+ if (rec_mode()) {
+ ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
+ .WillByDefault(
+ Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable));
+ }
+ }
+
+ // Special constructor used in manual tests where the user wants to run audio
+ // until e.g. a keyboard key is pressed. The event flag is set to nullptr by
+ // default since it is up to the user to stop the test. See e.g.
+ // DISABLED_RunPlayoutAndRecordingInFullDuplexAndWaitForEnterKey().
+ void HandleCallbacks(AudioStream* audio_stream) {
+ HandleCallbacks(nullptr, audio_stream, 0);
+ }
+
+ int32_t RealRecordedDataIsAvailable(const void* audio_buffer,
+ const size_t samples_per_channel,
+ const size_t bytes_per_frame,
+ const size_t channels,
+ const uint32_t sample_rate,
+ const uint32_t total_delay_ms,
+ const int32_t clock_drift,
+ const uint32_t current_mic_level,
+ const bool typing_status,
+ uint32_t& new_mic_level) {
+ EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
+ // Store audio parameters once in the first callback. For all other
+ // callbacks, verify that the provided audio parameters are maintained and
+ // that each callback corresponds to 10ms for any given sample rate.
+ if (!record_parameters_.is_complete()) {
+ record_parameters_.reset(sample_rate, channels, samples_per_channel);
+ } else {
+ EXPECT_EQ(samples_per_channel, record_parameters_.frames_per_buffer());
+ EXPECT_EQ(bytes_per_frame, record_parameters_.GetBytesPerFrame());
+ EXPECT_EQ(channels, record_parameters_.channels());
+ EXPECT_EQ(static_cast<int>(sample_rate),
+ record_parameters_.sample_rate());
+ EXPECT_EQ(samples_per_channel,
+ record_parameters_.frames_per_10ms_buffer());
+ }
+ {
+ MutexLock lock(&lock_);
+ rec_count_++;
+ }
+ // Write audio data to audio stream object if one has been injected.
+ if (audio_stream_) {
+ audio_stream_->Write(
+ rtc::MakeArrayView(static_cast<const int16_t*>(audio_buffer),
+ samples_per_channel * channels));
+ }
+ // Signal the event after given amount of callbacks.
+ if (event_ && ReceivedEnoughCallbacks()) {
+ event_->Set();
+ }
+ return 0;
+ }
+
+ int32_t RealNeedMorePlayData(const size_t samples_per_channel,
+ const size_t bytes_per_frame,
+ const size_t channels,
+ const uint32_t sample_rate,
+ void* audio_buffer,
+ size_t& samples_out,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
+ // Store audio parameters once in the first callback. For all other
+ // callbacks, verify that the provided audio parameters are maintained and
+ // that each callback corresponds to 10ms for any given sample rate.
+ if (!playout_parameters_.is_complete()) {
+ playout_parameters_.reset(sample_rate, channels, samples_per_channel);
+ } else {
+ EXPECT_EQ(samples_per_channel, playout_parameters_.frames_per_buffer());
+ EXPECT_EQ(bytes_per_frame, playout_parameters_.GetBytesPerFrame());
+ EXPECT_EQ(channels, playout_parameters_.channels());
+ EXPECT_EQ(static_cast<int>(sample_rate),
+ playout_parameters_.sample_rate());
+ EXPECT_EQ(samples_per_channel,
+ playout_parameters_.frames_per_10ms_buffer());
+ }
+ {
+ MutexLock lock(&lock_);
+ play_count_++;
+ }
+ samples_out = samples_per_channel * channels;
+ // Read audio data from audio stream object if one has been injected.
+ if (audio_stream_) {
+ audio_stream_->Read(rtc::MakeArrayView(
+ static_cast<int16_t*>(audio_buffer), samples_per_channel * channels));
+ } else {
+ // Fill the audio buffer with zeros to avoid disturbing audio.
+ const size_t num_bytes = samples_per_channel * bytes_per_frame;
+ std::memset(audio_buffer, 0, num_bytes);
+ }
+ // Signal the event after given amount of callbacks.
+ if (event_ && ReceivedEnoughCallbacks()) {
+ event_->Set();
+ }
+ return 0;
+ }
+
+ bool ReceivedEnoughCallbacks() {
+ bool recording_done = false;
+ if (rec_mode()) {
+ MutexLock lock(&lock_);
+ recording_done = rec_count_ >= num_callbacks_;
+ } else {
+ recording_done = true;
+ }
+ bool playout_done = false;
+ if (play_mode()) {
+ MutexLock lock(&lock_);
+ playout_done = play_count_ >= num_callbacks_;
+ } else {
+ playout_done = true;
+ }
+ return recording_done && playout_done;
+ }
+
+ bool play_mode() const {
+ return type_ == TransportType::kPlay ||
+ type_ == TransportType::kPlayAndRecord;
+ }
+
+ bool rec_mode() const {
+ return type_ == TransportType::kRecord ||
+ type_ == TransportType::kPlayAndRecord;
+ }
+
+ void ResetCallbackCounters() {
+ MutexLock lock(&lock_);
+ if (play_mode()) {
+ play_count_ = 0;
+ }
+ if (rec_mode()) {
+ rec_count_ = 0;
+ }
+ }
+
+ private:
+ Mutex lock_;
+ TransportType type_ = TransportType::kInvalid;
+ rtc::Event* event_ = nullptr;
+ AudioStream* audio_stream_ = nullptr;
+ size_t num_callbacks_ = 0;
+ size_t play_count_ RTC_GUARDED_BY(lock_) = 0;
+ size_t rec_count_ RTC_GUARDED_BY(lock_) = 0;
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+};
+
+// AudioDeviceTest test fixture.
+
+// bugs.webrtc.org/9808
+// Both the tests and the code under test are very old, unstaffed and not
+// a part of webRTC stack.
+// Here sanitizers make the tests hang, without providing usefull report.
+// So we are just disabling them, without intention to re-enable them.
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER) || defined(UNDEFINED_SANITIZER)
+#define MAYBE_AudioDeviceTest DISABLED_AudioDeviceTest
+#else
+#define MAYBE_AudioDeviceTest AudioDeviceTest
+#endif
+
+class MAYBE_AudioDeviceTest
+ : public ::testing::TestWithParam<webrtc::AudioDeviceModule::AudioLayer> {
+ protected:
+ MAYBE_AudioDeviceTest()
+ : audio_layer_(GetParam()),
+ task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+ rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+ // Add extra logging fields here if needed for debugging.
+ rtc::LogMessage::LogTimestamps();
+ rtc::LogMessage::LogThreads();
+ audio_device_ = CreateAudioDevice();
+ EXPECT_NE(audio_device_.get(), nullptr);
+ AudioDeviceModule::AudioLayer audio_layer;
+ int got_platform_audio_layer =
+ audio_device_->ActiveAudioLayer(&audio_layer);
+ // First, ensure that a valid audio layer can be activated.
+ if (got_platform_audio_layer != 0) {
+ requirements_satisfied_ = false;
+ }
+ // Next, verify that the ADM can be initialized.
+ if (requirements_satisfied_) {
+ requirements_satisfied_ = (audio_device_->Init() == 0);
+ }
+ // Finally, ensure that at least one valid device exists in each direction.
+ if (requirements_satisfied_) {
+ const int16_t num_playout_devices = audio_device_->PlayoutDevices();
+ const int16_t num_record_devices = audio_device_->RecordingDevices();
+ requirements_satisfied_ =
+ num_playout_devices > 0 && num_record_devices > 0;
+ }
+ if (requirements_satisfied_) {
+ EXPECT_EQ(0, audio_device_->SetPlayoutDevice(AUDIO_DEVICE_ID));
+ EXPECT_EQ(0, audio_device_->InitSpeaker());
+ EXPECT_EQ(0, audio_device_->StereoPlayoutIsAvailable(&stereo_playout_));
+ EXPECT_EQ(0, audio_device_->SetStereoPlayout(stereo_playout_));
+ EXPECT_EQ(0, audio_device_->SetRecordingDevice(AUDIO_DEVICE_ID));
+ EXPECT_EQ(0, audio_device_->InitMicrophone());
+ // Avoid asking for input stereo support and always record in mono
+ // since asking can cause issues in combination with remote desktop.
+ // See https://bugs.chromium.org/p/webrtc/issues/detail?id=7397 for
+ // details.
+ EXPECT_EQ(0, audio_device_->SetStereoRecording(false));
+ }
+ }
+
+ // This is needed by all tests using MockAudioTransport,
+ // since there is no way to unregister it.
+ // Without Terminate(), audio_device would still accesses
+ // the destructed mock via "webrtc_audio_module_rec_thread".
+ // An alternative would be for the mock to outlive audio_device.
+ void PreTearDown() { EXPECT_EQ(0, audio_device_->Terminate()); }
+
+ virtual ~MAYBE_AudioDeviceTest() {
+ if (audio_device_) {
+ EXPECT_EQ(0, audio_device_->Terminate());
+ }
+ }
+
+ bool requirements_satisfied() const { return requirements_satisfied_; }
+ rtc::Event* event() { return &event_; }
+ AudioDeviceModule::AudioLayer audio_layer() const { return audio_layer_; }
+
+ // AudioDeviceModuleForTest extends the default ADM interface with some extra
+ // test methods. Intended for usage in tests only and requires a unique
+ // factory method. See CreateAudioDevice() for details.
+ const rtc::scoped_refptr<AudioDeviceModuleForTest>& audio_device() const {
+ return audio_device_;
+ }
+
+ rtc::scoped_refptr<AudioDeviceModuleForTest> CreateAudioDevice() {
+ // Use the default factory for kPlatformDefaultAudio and a special factory
+ // CreateWindowsCoreAudioAudioDeviceModuleForTest() for kWindowsCoreAudio2.
+ // The value of `audio_layer_` is set at construction by GetParam() and two
+ // different layers are tested on Windows only.
+ if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) {
+ return AudioDeviceModule::CreateForTest(audio_layer_,
+ task_queue_factory_.get());
+ } else if (audio_layer_ == AudioDeviceModule::kWindowsCoreAudio2) {
+#ifdef WEBRTC_WIN
+ // We must initialize the COM library on a thread before we calling any of
+ // the library functions. All COM functions in the ADM will return
+ // CO_E_NOTINITIALIZED otherwise.
+ com_initializer_ =
+ std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
+ EXPECT_TRUE(com_initializer_->Succeeded());
+ EXPECT_TRUE(webrtc_win::core_audio_utility::IsSupported());
+ EXPECT_TRUE(webrtc_win::core_audio_utility::IsMMCSSSupported());
+ return CreateWindowsCoreAudioAudioDeviceModuleForTest(
+ task_queue_factory_.get(), true);
+#else
+ return nullptr;
+#endif
+ } else {
+ return nullptr;
+ }
+ }
+
+ void StartPlayout() {
+ EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ EXPECT_TRUE(audio_device()->Playing());
+ }
+
+ void StopPlayout() {
+ EXPECT_EQ(0, audio_device()->StopPlayout());
+ EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+ }
+
+ void StartRecording() {
+ EXPECT_FALSE(audio_device()->Recording());
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+ EXPECT_EQ(0, audio_device()->StartRecording());
+ EXPECT_TRUE(audio_device()->Recording());
+ }
+
+ void StopRecording() {
+ EXPECT_EQ(0, audio_device()->StopRecording());
+ EXPECT_FALSE(audio_device()->Recording());
+ EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+ }
+
+ bool NewWindowsAudioDeviceModuleIsUsed() {
+#ifdef WEBRTC_WIN
+ AudioDeviceModule::AudioLayer audio_layer;
+ EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
+ if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) {
+ // Default device is always added as first element in the list and the
+ // default communication device as the second element. Hence, the list
+ // contains two extra elements in this case.
+ return true;
+ }
+#endif
+ return false;
+ }
+
+ private:
+#ifdef WEBRTC_WIN
+ // Windows Core Audio based ADM needs to run on a COM initialized thread.
+ std::unique_ptr<ScopedCOMInitializer> com_initializer_;
+#endif
+ AudioDeviceModule::AudioLayer audio_layer_;
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ bool requirements_satisfied_ = true;
+ rtc::Event event_;
+ rtc::scoped_refptr<AudioDeviceModuleForTest> audio_device_;
+ bool stereo_playout_ = false;
+};
+
+// Instead of using the test fixture, verify that the different factory methods
+// work as intended.
+TEST(MAYBE_AudioDeviceTestWin, ConstructDestructWithFactory) {
+ std::unique_ptr<TaskQueueFactory> task_queue_factory =
+ CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<AudioDeviceModule> audio_device;
+ // The default factory should work for all platforms when a default ADM is
+ // requested.
+ audio_device = AudioDeviceModule::Create(
+ AudioDeviceModule::kPlatformDefaultAudio, task_queue_factory.get());
+ EXPECT_TRUE(audio_device);
+ audio_device = nullptr;
+#ifdef WEBRTC_WIN
+ // For Windows, the old factory method creates an ADM where the platform-
+ // specific parts are implemented by an AudioDeviceGeneric object. Verify
+ // that the old factory can't be used in combination with the latest audio
+ // layer AudioDeviceModule::kWindowsCoreAudio2.
+ audio_device = AudioDeviceModule::Create(
+ AudioDeviceModule::kWindowsCoreAudio2, task_queue_factory.get());
+ EXPECT_FALSE(audio_device);
+ audio_device = nullptr;
+ // Instead, ensure that the new dedicated factory method called
+ // CreateWindowsCoreAudioAudioDeviceModule() can be used on Windows and that
+ // it sets the audio layer to kWindowsCoreAudio2 implicitly. Note that, the
+ // new ADM for Windows must be created on a COM thread.
+ ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+ EXPECT_TRUE(com_initializer.Succeeded());
+ audio_device =
+ CreateWindowsCoreAudioAudioDeviceModule(task_queue_factory.get());
+ EXPECT_TRUE(audio_device);
+ AudioDeviceModule::AudioLayer audio_layer;
+ EXPECT_EQ(0, audio_device->ActiveAudioLayer(&audio_layer));
+ EXPECT_EQ(audio_layer, AudioDeviceModule::kWindowsCoreAudio2);
+#endif
+}
+
+// Uses the test fixture to create, initialize and destruct the ADM.
+TEST_P(MAYBE_AudioDeviceTest, ConstructDestructDefault) {}
+
+TEST_P(MAYBE_AudioDeviceTest, InitTerminate) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ // Initialization is part of the test fixture.
+ EXPECT_TRUE(audio_device()->Initialized());
+ EXPECT_EQ(0, audio_device()->Terminate());
+ EXPECT_FALSE(audio_device()->Initialized());
+}
+
+// Enumerate all available and active output devices.
+TEST_P(MAYBE_AudioDeviceTest, PlayoutDeviceNames) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ char device_name[kAdmMaxDeviceNameSize];
+ char unique_id[kAdmMaxGuidSize];
+ int num_devices = audio_device()->PlayoutDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->PlayoutDeviceName(i, device_name, unique_id));
+ }
+ EXPECT_EQ(-1, audio_device()->PlayoutDeviceName(num_devices, device_name,
+ unique_id));
+}
+
+// Enumerate all available and active input devices.
+TEST_P(MAYBE_AudioDeviceTest, RecordingDeviceNames) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ char device_name[kAdmMaxDeviceNameSize];
+ char unique_id[kAdmMaxGuidSize];
+ int num_devices = audio_device()->RecordingDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0,
+ audio_device()->RecordingDeviceName(i, device_name, unique_id));
+ }
+ EXPECT_EQ(-1, audio_device()->RecordingDeviceName(num_devices, device_name,
+ unique_id));
+}
+
+// Counts number of active output devices and ensure that all can be selected.
+TEST_P(MAYBE_AudioDeviceTest, SetPlayoutDevice) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ int num_devices = audio_device()->PlayoutDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ // Verify that all available playout devices can be set (not enabled yet).
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->SetPlayoutDevice(i));
+ }
+ EXPECT_EQ(-1, audio_device()->SetPlayoutDevice(num_devices));
+#ifdef WEBRTC_WIN
+ // On Windows, verify the alternative method where the user can select device
+ // by role.
+ EXPECT_EQ(
+ 0, audio_device()->SetPlayoutDevice(AudioDeviceModule::kDefaultDevice));
+ EXPECT_EQ(0, audio_device()->SetPlayoutDevice(
+ AudioDeviceModule::kDefaultCommunicationDevice));
+#endif
+}
+
+// Counts number of active input devices and ensure that all can be selected.
+TEST_P(MAYBE_AudioDeviceTest, SetRecordingDevice) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ int num_devices = audio_device()->RecordingDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ // Verify that all available recording devices can be set (not enabled yet).
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->SetRecordingDevice(i));
+ }
+ EXPECT_EQ(-1, audio_device()->SetRecordingDevice(num_devices));
+#ifdef WEBRTC_WIN
+ // On Windows, verify the alternative method where the user can select device
+ // by role.
+ EXPECT_EQ(
+ 0, audio_device()->SetRecordingDevice(AudioDeviceModule::kDefaultDevice));
+ EXPECT_EQ(0, audio_device()->SetRecordingDevice(
+ AudioDeviceModule::kDefaultCommunicationDevice));
+#endif
+}
+
+// Tests Start/Stop playout without any registered audio callback.
+TEST_P(MAYBE_AudioDeviceTest, StartStopPlayout) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartPlayout();
+ StopPlayout();
+}
+
+// Tests Start/Stop recording without any registered audio callback.
+TEST_P(MAYBE_AudioDeviceTest, StartStopRecording) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartRecording();
+ StopRecording();
+}
+
+// Tests Start/Stop playout for all available input devices to ensure that
+// the selected device can be created and used as intended.
+TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithRealDevice) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ int num_devices = audio_device()->PlayoutDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ // Verify that all available playout devices can be set and used.
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->SetPlayoutDevice(i));
+ StartPlayout();
+ StopPlayout();
+ }
+#ifdef WEBRTC_WIN
+ AudioDeviceModule::WindowsDeviceType device_role[] = {
+ AudioDeviceModule::kDefaultDevice,
+ AudioDeviceModule::kDefaultCommunicationDevice};
+ for (size_t i = 0; i < arraysize(device_role); ++i) {
+ EXPECT_EQ(0, audio_device()->SetPlayoutDevice(device_role[i]));
+ StartPlayout();
+ StopPlayout();
+ }
+#endif
+}
+
+// Tests Start/Stop recording for all available input devices to ensure that
+// the selected device can be created and used as intended.
+TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithRealDevice) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ int num_devices = audio_device()->RecordingDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ // Verify that all available recording devices can be set and used.
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->SetRecordingDevice(i));
+ StartRecording();
+ StopRecording();
+ }
+#ifdef WEBRTC_WIN
+ AudioDeviceModule::WindowsDeviceType device_role[] = {
+ AudioDeviceModule::kDefaultDevice,
+ AudioDeviceModule::kDefaultCommunicationDevice};
+ for (size_t i = 0; i < arraysize(device_role); ++i) {
+ EXPECT_EQ(0, audio_device()->SetRecordingDevice(device_role[i]));
+ StartRecording();
+ StopRecording();
+ }
+#endif
+}
+
+// Tests Init/Stop/Init recording without any registered audio callback.
+// See https://bugs.chromium.org/p/webrtc/issues/detail?id=8041 for details
+// on why this test is useful.
+TEST_P(MAYBE_AudioDeviceTest, InitStopInitRecording) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+ StopRecording();
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ StopRecording();
+}
+
+// Verify that additional attempts to initialize or start recording while
+// already being active works. Additional calls should just be ignored.
+TEST_P(MAYBE_AudioDeviceTest, StartInitRecording) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartRecording();
+ // An additional attempt to initialize at this stage should be ignored.
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ // Same for additional request to start recording while already active.
+ EXPECT_EQ(0, audio_device()->StartRecording());
+ StopRecording();
+}
+
+// Verify that additional attempts to initialize or start playou while
+// already being active works. Additional calls should just be ignored.
+TEST_P(MAYBE_AudioDeviceTest, StartInitPlayout) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartPlayout();
+ // An additional attempt to initialize at this stage should be ignored.
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ // Same for additional request to start playout while already active.
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ StopPlayout();
+}
+
+// Tests Init/Stop/Init recording while playout is active.
+TEST_P(MAYBE_AudioDeviceTest, InitStopInitRecordingWhilePlaying) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartPlayout();
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+ StopRecording();
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ StopRecording();
+ StopPlayout();
+}
+
+// Tests Init/Stop/Init playout without any registered audio callback.
+TEST_P(MAYBE_AudioDeviceTest, InitStopInitPlayout) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+ StopPlayout();
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ StopPlayout();
+}
+
+// Tests Init/Stop/Init playout while recording is active.
+TEST_P(MAYBE_AudioDeviceTest, InitStopInitPlayoutWhileRecording) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartRecording();
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+ StopPlayout();
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ StopPlayout();
+ StopRecording();
+}
+
+// TODO(henrika): restart without intermediate destruction is currently only
+// supported on Windows.
+#ifdef WEBRTC_WIN
+// Tests Start/Stop playout followed by a second session (emulates a restart
+// triggered by a user using public APIs).
+TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithExternalRestart) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartPlayout();
+ StopPlayout();
+ // Restart playout without destroying the ADM in between. Ensures that we
+ // support: Init(), Start(), Stop(), Init(), Start(), Stop().
+ StartPlayout();
+ StopPlayout();
+}
+
+// Tests Start/Stop recording followed by a second session (emulates a restart
+// triggered by a user using public APIs).
+TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithExternalRestart) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ StartRecording();
+ StopRecording();
+ // Restart recording without destroying the ADM in between. Ensures that we
+ // support: Init(), Start(), Stop(), Init(), Start(), Stop().
+ StartRecording();
+ StopRecording();
+}
+
+// Tests Start/Stop playout followed by a second session (emulates a restart
+// triggered by an internal callback e.g. corresponding to a device switch).
+// Note that, internal restart is only supported in combination with the latest
+// Windows ADM.
+TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithInternalRestart) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ if (audio_layer() != AudioDeviceModule::kWindowsCoreAudio2) {
+ return;
+ }
+ MockAudioTransport mock(TransportType::kPlay);
+ mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ event()->Wait(kTestTimeOutInMilliseconds);
+ EXPECT_TRUE(audio_device()->Playing());
+ // Restart playout but without stopping the internal audio thread.
+ // This procedure uses a non-public test API and it emulates what happens
+ // inside the ADM when e.g. a device is removed.
+ EXPECT_EQ(0, audio_device()->RestartPlayoutInternally());
+
+ // Run basic tests of public APIs while a restart attempt is active.
+ // These calls should now be very thin and not trigger any new actions.
+ EXPECT_EQ(-1, audio_device()->StopPlayout());
+ EXPECT_TRUE(audio_device()->Playing());
+ EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+
+ // Wait until audio has restarted and a new sequence of audio callbacks
+ // becomes active.
+ // TODO(henrika): is it possible to verify that the internal state transition
+ // is Stop->Init->Start?
+ ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock));
+ mock.ResetCallbackCounters();
+ EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ event()->Wait(kTestTimeOutInMilliseconds);
+ EXPECT_TRUE(audio_device()->Playing());
+ // Stop playout and the audio thread after successful internal restart.
+ StopPlayout();
+ PreTearDown();
+}
+
+// Tests Start/Stop recording followed by a second session (emulates a restart
+// triggered by an internal callback e.g. corresponding to a device switch).
+// Note that, internal restart is only supported in combination with the latest
+// Windows ADM.
+TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithInternalRestart) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ if (audio_layer() != AudioDeviceModule::kWindowsCoreAudio2) {
+ return;
+ }
+ MockAudioTransport mock(TransportType::kRecord);
+ mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _,
+ false, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ event()->Wait(kTestTimeOutInMilliseconds);
+ EXPECT_TRUE(audio_device()->Recording());
+ // Restart recording but without stopping the internal audio thread.
+ // This procedure uses a non-public test API and it emulates what happens
+ // inside the ADM when e.g. a device is removed.
+ EXPECT_EQ(0, audio_device()->RestartRecordingInternally());
+
+ // Run basic tests of public APIs while a restart attempt is active.
+ // These calls should now be very thin and not trigger any new actions.
+ EXPECT_EQ(-1, audio_device()->StopRecording());
+ EXPECT_TRUE(audio_device()->Recording());
+ EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_EQ(0, audio_device()->StartRecording());
+
+ // Wait until audio has restarted and a new sequence of audio callbacks
+ // becomes active.
+ // TODO(henrika): is it possible to verify that the internal state transition
+ // is Stop->Init->Start?
+ ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock));
+ mock.ResetCallbackCounters();
+ EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _,
+ false, _))
+ .Times(AtLeast(kNumCallbacks));
+ event()->Wait(kTestTimeOutInMilliseconds);
+ EXPECT_TRUE(audio_device()->Recording());
+ // Stop recording and the audio thread after successful internal restart.
+ StopRecording();
+ PreTearDown();
+}
+#endif // #ifdef WEBRTC_WIN
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData() callback.
+// Note that we can't add expectations on audio parameters in EXPECT_CALL
+// since parameter are not provided in the each callback. We therefore test and
+// verify the parameters in the fake audio transport implementation instead.
+TEST_P(MAYBE_AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ MockAudioTransport mock(TransportType::kPlay);
+ mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ event()->Wait(kTestTimeOutInMilliseconds);
+ StopPlayout();
+ PreTearDown();
+}
+
+// Don't run these tests in combination with sanitizers.
+// They are already flaky *without* sanitizers.
+// Sanitizers seem to increase flakiness (which brings noise),
+// without reporting anything.
+// TODO(webrtc:10867): Re-enable when flakiness fixed.
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+#define MAYBE_StartRecordingVerifyCallbacks \
+ DISABLED_StartRecordingVerifyCallbacks
+#define MAYBE_StartPlayoutAndRecordingVerifyCallbacks \
+ DISABLED_StartPlayoutAndRecordingVerifyCallbacks
+#else
+#define MAYBE_StartRecordingVerifyCallbacks StartRecordingVerifyCallbacks
+#define MAYBE_StartPlayoutAndRecordingVerifyCallbacks \
+ StartPlayoutAndRecordingVerifyCallbacks
+#endif
+
+// Start recording and verify that the native audio layer starts providing real
+// audio samples using the RecordedDataIsAvailable() callback.
+TEST_P(MAYBE_AudioDeviceTest, MAYBE_StartRecordingVerifyCallbacks) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ MockAudioTransport mock(TransportType::kRecord);
+ mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _,
+ false, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ event()->Wait(kTestTimeOutInMilliseconds);
+ StopRecording();
+ PreTearDown();
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+TEST_P(MAYBE_AudioDeviceTest, MAYBE_StartPlayoutAndRecordingVerifyCallbacks) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ MockAudioTransport mock(TransportType::kPlayAndRecord);
+ mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _,
+ false, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ StartRecording();
+ event()->Wait(kTestTimeOutInMilliseconds);
+ StopRecording();
+ StopPlayout();
+ PreTearDown();
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would contain max 1,
+// with an average somewhere in (0,1) depending on how long the packets are
+// buffered. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for a few seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+TEST_P(MAYBE_AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ NiceMock<MockAudioTransport> mock(TransportType::kPlayAndRecord);
+ FifoAudioStream audio_stream;
+ mock.HandleCallbacks(event(), &audio_stream,
+ kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ // Run both sides using the same channel configuration to avoid conversions
+ // between mono/stereo while running in full duplex mode. Also, some devices
+ // (mainly on Windows) do not support mono.
+ EXPECT_EQ(0, audio_device()->SetStereoPlayout(true));
+ EXPECT_EQ(0, audio_device()->SetStereoRecording(true));
+ // Mute speakers to prevent howling.
+ EXPECT_EQ(0, audio_device()->SetSpeakerVolume(0));
+ StartPlayout();
+ StartRecording();
+ event()->Wait(static_cast<int>(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec)));
+ StopRecording();
+ StopPlayout();
+ PreTearDown();
+}
+
+// Runs audio in full duplex until user hits Enter. Intended as a manual test
+// to ensure that the audio quality is good and that real device switches works
+// as intended.
+TEST_P(MAYBE_AudioDeviceTest,
+ DISABLED_RunPlayoutAndRecordingInFullDuplexAndWaitForEnterKey) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ if (audio_layer() != AudioDeviceModule::kWindowsCoreAudio2) {
+ return;
+ }
+ NiceMock<MockAudioTransport> mock(TransportType::kPlayAndRecord);
+ FifoAudioStream audio_stream;
+ mock.HandleCallbacks(&audio_stream);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ EXPECT_EQ(0, audio_device()->SetStereoPlayout(true));
+ EXPECT_EQ(0, audio_device()->SetStereoRecording(true));
+ // Ensure that the sample rate for both directions are identical so that we
+ // always can listen to our own voice. Will lead to rate conversion (and
+ // higher latency) if the native sample rate is not 48kHz.
+ EXPECT_EQ(0, audio_device()->SetPlayoutSampleRate(48000));
+ EXPECT_EQ(0, audio_device()->SetRecordingSampleRate(48000));
+ StartPlayout();
+ StartRecording();
+ do {
+ PRINT("Loopback audio is active at 48kHz. Press Enter to stop.\n");
+ } while (getchar() != '\n');
+ StopRecording();
+ StopPlayout();
+ PreTearDown();
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test needs the '--gtest_also_run_disabled_tests' flag to run and also
+// some sort of audio feedback loop. E.g. a headset where the mic is placed
+// close to the speaker to ensure highest possible echo. It is also recommended
+// to run the test at highest possible output volume.
+TEST_P(MAYBE_AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ NiceMock<MockAudioTransport> mock(TransportType::kPlayAndRecord);
+ LatencyAudioStream audio_stream;
+ mock.HandleCallbacks(event(), &audio_stream,
+ kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ EXPECT_EQ(0, audio_device()->SetStereoPlayout(true));
+ EXPECT_EQ(0, audio_device()->SetStereoRecording(true));
+ StartPlayout();
+ StartRecording();
+ event()->Wait(static_cast<int>(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec)));
+ StopRecording();
+ StopPlayout();
+ // Avoid concurrent access to audio_stream.
+ PreTearDown();
+ // Verify that a sufficient number of transmitted impulses are detected.
+ EXPECT_GE(audio_stream.num_latency_values(),
+ static_cast<size_t>(
+ kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 2));
+ // Print out min, max and average delay values for debugging purposes.
+ audio_stream.PrintResults();
+}
+
+#ifdef WEBRTC_WIN
+// Test two different audio layers (or rather two different Core Audio
+// implementations) for Windows.
+INSTANTIATE_TEST_SUITE_P(
+ AudioLayerWin,
+ MAYBE_AudioDeviceTest,
+ ::testing::Values(AudioDeviceModule::kPlatformDefaultAudio,
+ AudioDeviceModule::kWindowsCoreAudio2));
+#else
+// For all platforms but Windows, only test the default audio layer.
+INSTANTIATE_TEST_SUITE_P(
+ AudioLayer,
+ MAYBE_AudioDeviceTest,
+ ::testing::Values(AudioDeviceModule::kPlatformDefaultAudio));
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc
new file mode 100644
index 0000000000..b8fd837038
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/dummy/audio_device_dummy.h"
+
+namespace webrtc {
+
+int32_t AudioDeviceDummy::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ return -1;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceDummy::Init() {
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceDummy::Terminate() {
+ return 0;
+}
+
+bool AudioDeviceDummy::Initialized() const {
+ return true;
+}
+
+int16_t AudioDeviceDummy::PlayoutDevices() {
+ return -1;
+}
+
+int16_t AudioDeviceDummy::RecordingDevices() {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetPlayoutDevice(uint16_t index) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetRecordingDevice(uint16_t index) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::PlayoutIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::InitPlayout() {
+ return -1;
+}
+
+bool AudioDeviceDummy::PlayoutIsInitialized() const {
+ return false;
+}
+
+int32_t AudioDeviceDummy::RecordingIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::InitRecording() {
+ return -1;
+}
+
+bool AudioDeviceDummy::RecordingIsInitialized() const {
+ return false;
+}
+
+int32_t AudioDeviceDummy::StartPlayout() {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::StopPlayout() {
+ return 0;
+}
+
+bool AudioDeviceDummy::Playing() const {
+ return false;
+}
+
+int32_t AudioDeviceDummy::StartRecording() {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::StopRecording() {
+ return 0;
+}
+
+bool AudioDeviceDummy::Recording() const {
+ return false;
+}
+
+int32_t AudioDeviceDummy::InitSpeaker() {
+ return -1;
+}
+
+bool AudioDeviceDummy::SpeakerIsInitialized() const {
+ return false;
+}
+
+int32_t AudioDeviceDummy::InitMicrophone() {
+ return -1;
+}
+
+bool AudioDeviceDummy::MicrophoneIsInitialized() const {
+ return false;
+}
+
+int32_t AudioDeviceDummy::SpeakerVolumeIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetSpeakerVolume(uint32_t volume) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SpeakerVolume(uint32_t& volume) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MinSpeakerVolume(uint32_t& minVolume) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MicrophoneVolumeIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetMicrophoneVolume(uint32_t volume) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MicrophoneVolume(uint32_t& volume) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MinMicrophoneVolume(uint32_t& minVolume) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SpeakerMuteIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetSpeakerMute(bool enable) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SpeakerMute(bool& enabled) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MicrophoneMuteIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetMicrophoneMute(bool enable) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::MicrophoneMute(bool& enabled) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::StereoPlayoutIsAvailable(bool& available) {
+ return -1;
+}
+int32_t AudioDeviceDummy::SetStereoPlayout(bool enable) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::StereoPlayout(bool& enabled) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::StereoRecordingIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::SetStereoRecording(bool enable) {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::StereoRecording(bool& enabled) const {
+ return -1;
+}
+
+int32_t AudioDeviceDummy::PlayoutDelay(uint16_t& delayMS) const {
+ return -1;
+}
+
+void AudioDeviceDummy::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h
new file mode 100644
index 0000000000..2a2541098e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_
+
+#include <stdint.h>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+
+namespace webrtc {
+
+class AudioDeviceDummy : public AudioDeviceGeneric {
+ public:
+ AudioDeviceDummy() {}
+ virtual ~AudioDeviceDummy() {}
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+
+ // Main initializaton and termination
+ InitStatus Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_
diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc
new file mode 100644
index 0000000000..8c10ae4186
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -0,0 +1,508 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/dummy/file_audio_device.h"
+
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+const int kRecordingFixedSampleRate = 48000;
+const size_t kRecordingNumChannels = 2;
+const int kPlayoutFixedSampleRate = 48000;
+const size_t kPlayoutNumChannels = 2;
+const size_t kPlayoutBufferSize =
+ kPlayoutFixedSampleRate / 100 * kPlayoutNumChannels * 2;
+const size_t kRecordingBufferSize =
+ kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
+
+FileAudioDevice::FileAudioDevice(absl::string_view inputFilename,
+ absl::string_view outputFilename)
+ : _ptrAudioBuffer(NULL),
+ _recordingBuffer(NULL),
+ _playoutBuffer(NULL),
+ _recordingFramesLeft(0),
+ _playoutFramesLeft(0),
+ _recordingBufferSizeIn10MS(0),
+ _recordingFramesIn10MS(0),
+ _playoutFramesIn10MS(0),
+ _playing(false),
+ _recording(false),
+ _lastCallPlayoutMillis(0),
+ _lastCallRecordMillis(0),
+ _outputFilename(outputFilename),
+ _inputFilename(inputFilename) {}
+
+FileAudioDevice::~FileAudioDevice() {}
+
+int32_t FileAudioDevice::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ return -1;
+}
+
+AudioDeviceGeneric::InitStatus FileAudioDevice::Init() {
+ return InitStatus::OK;
+}
+
+int32_t FileAudioDevice::Terminate() {
+ return 0;
+}
+
+bool FileAudioDevice::Initialized() const {
+ return true;
+}
+
+int16_t FileAudioDevice::PlayoutDevices() {
+ return 1;
+}
+
+int16_t FileAudioDevice::RecordingDevices() {
+ return 1;
+}
+
+int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const char* kName = "dummy_device";
+ const char* kGuid = "dummy_device_unique_id";
+ if (index < 1) {
+ memset(name, 0, kAdmMaxDeviceNameSize);
+ memset(guid, 0, kAdmMaxGuidSize);
+ memcpy(name, kName, strlen(kName));
+ memcpy(guid, kGuid, strlen(guid));
+ return 0;
+ }
+ return -1;
+}
+
+int32_t FileAudioDevice::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const char* kName = "dummy_device";
+ const char* kGuid = "dummy_device_unique_id";
+ if (index < 1) {
+ memset(name, 0, kAdmMaxDeviceNameSize);
+ memset(guid, 0, kAdmMaxGuidSize);
+ memcpy(name, kName, strlen(kName));
+ memcpy(guid, kGuid, strlen(guid));
+ return 0;
+ }
+ return -1;
+}
+
+int32_t FileAudioDevice::SetPlayoutDevice(uint16_t index) {
+ if (index == 0) {
+ _playout_index = index;
+ return 0;
+ }
+ return -1;
+}
+
+int32_t FileAudioDevice::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SetRecordingDevice(uint16_t index) {
+ if (index == 0) {
+ _record_index = index;
+ return _record_index;
+ }
+ return -1;
+}
+
+int32_t FileAudioDevice::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ return -1;
+}
+
+int32_t FileAudioDevice::PlayoutIsAvailable(bool& available) {
+ if (_playout_index == 0) {
+ available = true;
+ return _playout_index;
+ }
+ available = false;
+ return -1;
+}
+
+int32_t FileAudioDevice::InitPlayout() {
+ MutexLock lock(&mutex_);
+
+ if (_playing) {
+ return -1;
+ }
+
+ _playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
+
+ if (_ptrAudioBuffer) {
+ // Update webrtc audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
+ _ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
+ }
+ return 0;
+}
+
+bool FileAudioDevice::PlayoutIsInitialized() const {
+ return _playoutFramesIn10MS != 0;
+}
+
+int32_t FileAudioDevice::RecordingIsAvailable(bool& available) {
+ if (_record_index == 0) {
+ available = true;
+ return _record_index;
+ }
+ available = false;
+ return -1;
+}
+
+int32_t FileAudioDevice::InitRecording() {
+ MutexLock lock(&mutex_);
+
+ if (_recording) {
+ return -1;
+ }
+
+ _recordingFramesIn10MS = static_cast<size_t>(kRecordingFixedSampleRate / 100);
+
+ if (_ptrAudioBuffer) {
+ _ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate);
+ _ptrAudioBuffer->SetRecordingChannels(kRecordingNumChannels);
+ }
+ return 0;
+}
+
+bool FileAudioDevice::RecordingIsInitialized() const {
+ return _recordingFramesIn10MS != 0;
+}
+
+int32_t FileAudioDevice::StartPlayout() {
+ if (_playing) {
+ return 0;
+ }
+
+ _playing = true;
+ _playoutFramesLeft = 0;
+
+ if (!_playoutBuffer) {
+ _playoutBuffer = new int8_t[kPlayoutBufferSize];
+ }
+ if (!_playoutBuffer) {
+ _playing = false;
+ return -1;
+ }
+
+ // PLAYOUT
+ if (!_outputFilename.empty()) {
+ _outputFile = FileWrapper::OpenWriteOnly(_outputFilename);
+ if (!_outputFile.is_open()) {
+ RTC_LOG(LS_ERROR) << "Failed to open playout file: " << _outputFilename;
+ _playing = false;
+ delete[] _playoutBuffer;
+ _playoutBuffer = NULL;
+ return -1;
+ }
+ }
+
+ _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (PlayThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_play_thread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ RTC_LOG(LS_INFO) << "Started playout capture to output file: "
+ << _outputFilename;
+ return 0;
+}
+
+int32_t FileAudioDevice::StopPlayout() {
+ {
+ MutexLock lock(&mutex_);
+ _playing = false;
+ }
+
+ // stop playout thread first
+ if (!_ptrThreadPlay.empty())
+ _ptrThreadPlay.Finalize();
+
+ MutexLock lock(&mutex_);
+
+ _playoutFramesLeft = 0;
+ delete[] _playoutBuffer;
+ _playoutBuffer = NULL;
+ _outputFile.Close();
+
+ RTC_LOG(LS_INFO) << "Stopped playout capture to output file: "
+ << _outputFilename;
+ return 0;
+}
+
+bool FileAudioDevice::Playing() const {
+ return _playing;
+}
+
+int32_t FileAudioDevice::StartRecording() {
+ _recording = true;
+
+ // Make sure we only create the buffer once.
+ _recordingBufferSizeIn10MS =
+ _recordingFramesIn10MS * kRecordingNumChannels * 2;
+ if (!_recordingBuffer) {
+ _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+ }
+
+ if (!_inputFilename.empty()) {
+ _inputFile = FileWrapper::OpenReadOnly(_inputFilename);
+ if (!_inputFile.is_open()) {
+ RTC_LOG(LS_ERROR) << "Failed to open audio input file: "
+ << _inputFilename;
+ _recording = false;
+ delete[] _recordingBuffer;
+ _recordingBuffer = NULL;
+ return -1;
+ }
+ }
+
+ _ptrThreadRec = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (RecThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_capture_thread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ RTC_LOG(LS_INFO) << "Started recording from input file: " << _inputFilename;
+
+ return 0;
+}
+
+int32_t FileAudioDevice::StopRecording() {
+ {
+ MutexLock lock(&mutex_);
+ _recording = false;
+ }
+
+ if (!_ptrThreadRec.empty())
+ _ptrThreadRec.Finalize();
+
+ MutexLock lock(&mutex_);
+ _recordingFramesLeft = 0;
+ if (_recordingBuffer) {
+ delete[] _recordingBuffer;
+ _recordingBuffer = NULL;
+ }
+ _inputFile.Close();
+
+ RTC_LOG(LS_INFO) << "Stopped recording from input file: " << _inputFilename;
+ return 0;
+}
+
+bool FileAudioDevice::Recording() const {
+ return _recording;
+}
+
+int32_t FileAudioDevice::InitSpeaker() {
+ return -1;
+}
+
+bool FileAudioDevice::SpeakerIsInitialized() const {
+ return false;
+}
+
+int32_t FileAudioDevice::InitMicrophone() {
+ return 0;
+}
+
+bool FileAudioDevice::MicrophoneIsInitialized() const {
+ return true;
+}
+
+int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::MinSpeakerVolume(uint32_t& minVolume) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::MicrophoneVolumeIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) {
+ return -1;
+}
+
+int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::MinMicrophoneVolume(uint32_t& minVolume) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SetSpeakerMute(bool enable) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SpeakerMute(bool& enabled) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) {
+ return -1;
+}
+
+int32_t FileAudioDevice::SetMicrophoneMute(bool enable) {
+ return -1;
+}
+
+int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const {
+ return -1;
+}
+
+int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+int32_t FileAudioDevice::SetStereoPlayout(bool enable) {
+ return 0;
+}
+
+int32_t FileAudioDevice::StereoPlayout(bool& enabled) const {
+ enabled = true;
+ return 0;
+}
+
+int32_t FileAudioDevice::StereoRecordingIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+int32_t FileAudioDevice::SetStereoRecording(bool enable) {
+ return 0;
+}
+
+int32_t FileAudioDevice::StereoRecording(bool& enabled) const {
+ enabled = true;
+ return 0;
+}
+
+int32_t FileAudioDevice::PlayoutDelay(uint16_t& delayMS) const {
+ return 0;
+}
+
+void FileAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ MutexLock lock(&mutex_);
+
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+bool FileAudioDevice::PlayThreadProcess() {
+ if (!_playing) {
+ return false;
+ }
+ int64_t currentTime = rtc::TimeMillis();
+ mutex_.Lock();
+
+ if (_lastCallPlayoutMillis == 0 ||
+ currentTime - _lastCallPlayoutMillis >= 10) {
+ mutex_.Unlock();
+ _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
+ mutex_.Lock();
+
+ _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+ RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
+ if (_outputFile.is_open()) {
+ _outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
+ }
+ _lastCallPlayoutMillis = currentTime;
+ }
+ _playoutFramesLeft = 0;
+ mutex_.Unlock();
+
+ int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
+ if (deltaTimeMillis < 10) {
+ SleepMs(10 - deltaTimeMillis);
+ }
+
+ return true;
+}
+
+bool FileAudioDevice::RecThreadProcess() {
+ if (!_recording) {
+ return false;
+ }
+
+ int64_t currentTime = rtc::TimeMillis();
+ mutex_.Lock();
+
+ if (_lastCallRecordMillis == 0 || currentTime - _lastCallRecordMillis >= 10) {
+ if (_inputFile.is_open()) {
+ if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
+ _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+ _recordingFramesIn10MS);
+ } else {
+ _inputFile.Rewind();
+ }
+ _lastCallRecordMillis = currentTime;
+ mutex_.Unlock();
+ _ptrAudioBuffer->DeliverRecordedData();
+ mutex_.Lock();
+ }
+ }
+
+ mutex_.Unlock();
+
+ int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
+ if (deltaTimeMillis < 10) {
+ SleepMs(10 - deltaTimeMillis);
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h
new file mode 100644
index 0000000000..27979933f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_
+#define AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+// This is a fake audio device which plays audio from a file as its microphone
+// and plays out into a file.
+class FileAudioDevice : public AudioDeviceGeneric {
+ public:
+ // Constructs a file audio device with `id`. It will read audio from
+ // `inputFilename` and record output audio to `outputFilename`.
+ //
+ // The input file should be a readable 48k stereo raw file, and the output
+ // file should point to a writable location. The output format will also be
+ // 48k stereo raw audio.
+ FileAudioDevice(absl::string_view inputFilename,
+ absl::string_view outputFilename);
+ virtual ~FileAudioDevice();
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+
+ // Main initializaton and termination
+ InitStatus Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+ private:
+ static void RecThreadFunc(void*);
+ static void PlayThreadFunc(void*);
+ bool RecThreadProcess();
+ bool PlayThreadProcess();
+
+ int32_t _playout_index;
+ int32_t _record_index;
+ AudioDeviceBuffer* _ptrAudioBuffer;
+ int8_t* _recordingBuffer; // In bytes.
+ int8_t* _playoutBuffer; // In bytes.
+ uint32_t _recordingFramesLeft;
+ uint32_t _playoutFramesLeft;
+ Mutex mutex_;
+
+ size_t _recordingBufferSizeIn10MS;
+ size_t _recordingFramesIn10MS;
+ size_t _playoutFramesIn10MS;
+
+ rtc::PlatformThread _ptrThreadRec;
+ rtc::PlatformThread _ptrThreadPlay;
+
+ bool _playing;
+ bool _recording;
+ int64_t _lastCallPlayoutMillis;
+ int64_t _lastCallRecordMillis;
+
+ FileWrapper _outputFile;
+ FileWrapper _inputFile;
+ std::string _outputFilename;
+ std::string _inputFilename;
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc
new file mode 100644
index 0000000000..8c41111478
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/dummy/file_audio_device_factory.h"
+
+#include <stdio.h>
+
+#include <cstdlib>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_device/dummy/file_audio_device.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+
+bool FileAudioDeviceFactory::_isConfigured = false;
+char FileAudioDeviceFactory::_inputAudioFilename[MAX_FILENAME_LEN] = "";
+char FileAudioDeviceFactory::_outputAudioFilename[MAX_FILENAME_LEN] = "";
+
+FileAudioDevice* FileAudioDeviceFactory::CreateFileAudioDevice() {
+ // Bail out here if the files haven't been set explicitly.
+ // audio_device_impl.cc should then fall back to dummy audio.
+ if (!_isConfigured) {
+ RTC_LOG(LS_WARNING)
+ << "WebRTC configured with WEBRTC_DUMMY_FILE_DEVICES but "
+ "no device files supplied. Will fall back to dummy "
+ "audio.";
+
+ return nullptr;
+ }
+ return new FileAudioDevice(_inputAudioFilename, _outputAudioFilename);
+}
+
+void FileAudioDeviceFactory::SetFilenamesToUse(
+ absl::string_view inputAudioFilename,
+ absl::string_view outputAudioFilename) {
+#ifdef WEBRTC_DUMMY_FILE_DEVICES
+ RTC_DCHECK_LT(inputAudioFilename.size(), MAX_FILENAME_LEN);
+ RTC_DCHECK_LT(outputAudioFilename.size(), MAX_FILENAME_LEN);
+
+ // Copy the strings since we don't know the lifetime of the input pointers.
+ rtc::strcpyn(_inputAudioFilename, MAX_FILENAME_LEN, inputAudioFilename);
+ rtc::strcpyn(_outputAudioFilename, MAX_FILENAME_LEN, outputAudioFilename);
+ _isConfigured = true;
+#else
+ // Sanity: must be compiled with the right define to run this.
+ printf(
+ "Trying to use dummy file devices, but is not compiled "
+ "with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
+ std::exit(1);
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h
new file mode 100644
index 0000000000..18f9388f21
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_
+#define AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_
+
+#include <stdint.h>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+
+class FileAudioDevice;
+
+// This class is used by audio_device_impl.cc when WebRTC is compiled with
+// WEBRTC_DUMMY_FILE_DEVICES. The application must include this file and set the
+// filenames to use before the audio device module is initialized. This is
+// intended for test tools which use the audio device module.
+class FileAudioDeviceFactory {
+ public:
+ static FileAudioDevice* CreateFileAudioDevice();
+
+ // The input file must be a readable 48k stereo raw file. The output
+ // file must be writable. The strings will be copied.
+ static void SetFilenamesToUse(absl::string_view inputAudioFilename,
+ absl::string_view outputAudioFilename);
+
+ private:
+ enum : uint32_t { MAX_FILENAME_LEN = 512 };
+ static bool _isConfigured;
+ static char _inputAudioFilename[MAX_FILENAME_LEN];
+ static char _outputAudioFilename[MAX_FILENAME_LEN];
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc
new file mode 100644
index 0000000000..86240da196
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/fine_audio_buffer.h"
+
+#include <cstdint>
+#include <cstring>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer)
+ : audio_device_buffer_(audio_device_buffer),
+ playout_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
+ audio_device_buffer->PlayoutSampleRate() * 10 / 1000)),
+ record_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
+ audio_device_buffer->RecordingSampleRate() * 10 / 1000)),
+ playout_channels_(audio_device_buffer->PlayoutChannels()),
+ record_channels_(audio_device_buffer->RecordingChannels()) {
+ RTC_DCHECK(audio_device_buffer_);
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (IsReadyForPlayout()) {
+ RTC_DLOG(LS_INFO) << "playout_samples_per_channel_10ms: "
+ << playout_samples_per_channel_10ms_;
+ RTC_DLOG(LS_INFO) << "playout_channels: " << playout_channels_;
+ }
+ if (IsReadyForRecord()) {
+ RTC_DLOG(LS_INFO) << "record_samples_per_channel_10ms: "
+ << record_samples_per_channel_10ms_;
+ RTC_DLOG(LS_INFO) << "record_channels: " << record_channels_;
+ }
+}
+
+FineAudioBuffer::~FineAudioBuffer() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+}
+
+void FineAudioBuffer::ResetPlayout() {
+ playout_buffer_.Clear();
+}
+
+void FineAudioBuffer::ResetRecord() {
+ record_buffer_.Clear();
+}
+
+bool FineAudioBuffer::IsReadyForPlayout() const {
+ return playout_samples_per_channel_10ms_ > 0 && playout_channels_ > 0;
+}
+
+bool FineAudioBuffer::IsReadyForRecord() const {
+ return record_samples_per_channel_10ms_ > 0 && record_channels_ > 0;
+}
+
+void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
+ int playout_delay_ms) {
+ RTC_DCHECK(IsReadyForPlayout());
+ // Ask WebRTC for new data in chunks of 10ms until we have enough to
+ // fulfill the request. It is possible that the buffer already contains
+ // enough samples from the last round.
+ while (playout_buffer_.size() < audio_buffer.size()) {
+ // Get 10ms decoded audio from WebRTC. The ADB knows about number of
+ // channels; hence we can ask for number of samples per channel here.
+ if (audio_device_buffer_->RequestPlayoutData(
+ playout_samples_per_channel_10ms_) ==
+ static_cast<int32_t>(playout_samples_per_channel_10ms_)) {
+ // Append 10ms to the end of the local buffer taking number of channels
+ // into account.
+ const size_t num_elements_10ms =
+ playout_channels_ * playout_samples_per_channel_10ms_;
+ const size_t written_elements = playout_buffer_.AppendData(
+ num_elements_10ms, [&](rtc::ArrayView<int16_t> buf) {
+ const size_t samples_per_channel_10ms =
+ audio_device_buffer_->GetPlayoutData(buf.data());
+ return playout_channels_ * samples_per_channel_10ms;
+ });
+ RTC_DCHECK_EQ(num_elements_10ms, written_elements);
+ } else {
+ // Provide silence if AudioDeviceBuffer::RequestPlayoutData() fails.
+ // Can e.g. happen when an AudioTransport has not been registered.
+ const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
+ std::memset(audio_buffer.data(), 0, num_bytes);
+ return;
+ }
+ }
+
+ // Provide the requested number of bytes to the consumer.
+ const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
+ memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes);
+ // Move remaining samples to start of buffer to prepare for next round.
+ memmove(playout_buffer_.data(), playout_buffer_.data() + audio_buffer.size(),
+ (playout_buffer_.size() - audio_buffer.size()) * sizeof(int16_t));
+ playout_buffer_.SetSize(playout_buffer_.size() - audio_buffer.size());
+ // Cache playout latency for usage in DeliverRecordedData();
+ playout_delay_ms_ = playout_delay_ms;
+}
+
+void FineAudioBuffer::DeliverRecordedData(
+ rtc::ArrayView<const int16_t> audio_buffer,
+ int record_delay_ms) {
+ RTC_DCHECK(IsReadyForRecord());
+ // Always append new data and grow the buffer when needed.
+ record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
+ // Consume samples from buffer in chunks of 10ms until there is not
+ // enough data left. The number of remaining samples in the cache is given by
+ // the new size of the internal `record_buffer_`.
+ const size_t num_elements_10ms =
+ record_channels_ * record_samples_per_channel_10ms_;
+ while (record_buffer_.size() >= num_elements_10ms) {
+ audio_device_buffer_->SetRecordedBuffer(record_buffer_.data(),
+ record_samples_per_channel_10ms_);
+ audio_device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
+ audio_device_buffer_->DeliverRecordedData();
+ memmove(record_buffer_.data(), record_buffer_.data() + num_elements_10ms,
+ (record_buffer_.size() - num_elements_10ms) * sizeof(int16_t));
+ record_buffer_.SetSize(record_buffer_.size() - num_elements_10ms);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h
new file mode 100644
index 0000000000..a6c3042bb2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
+#define MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
+
+#include "api/array_view.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+
+// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with 16-bit PCM
+// audio samples corresponding to 10ms of data. It then allows for this data
+// to be pulled in a finer or coarser granularity. I.e. interacting with this
+// class instead of directly with the AudioDeviceBuffer one can ask for any
+// number of audio data samples. This class also ensures that audio data can be
+// delivered to the ADB in 10ms chunks when the size of the provided audio
+// buffers differs from 10ms.
+// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
+// accumulated 10ms worth of data to the ADB every second call.
+class FineAudioBuffer {
+ public:
+ // `device_buffer` is a buffer that provides 10ms of audio data.
+ FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer);
+ ~FineAudioBuffer();
+
+ // Clears buffers and counters dealing with playout and/or recording.
+ void ResetPlayout();
+ void ResetRecord();
+
+ // Utility methods which returns true if valid parameters are acquired at
+ // constructions.
+ bool IsReadyForPlayout() const;
+ bool IsReadyForRecord() const;
+
+ // Copies audio samples into `audio_buffer` where number of requested
+ // elements is specified by `audio_buffer.size()`. The producer will always
+ // fill up the audio buffer and if no audio exists, the buffer will contain
+ // silence instead. The provided delay estimate in `playout_delay_ms` should
+ // contain an estimate of the latency between when an audio frame is read from
+ // WebRTC and when it is played out on the speaker.
+ void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
+ int playout_delay_ms);
+
+ // Consumes the audio data in `audio_buffer` and sends it to the WebRTC layer
+ // in chunks of 10ms. The sum of the provided delay estimate in
+ // `record_delay_ms` and the latest `playout_delay_ms` in GetPlayoutData()
+ // are given to the AEC in the audio processing module.
+ // They can be fixed values on most platforms and they are ignored if an
+ // external (hardware/built-in) AEC is used.
+ // Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
+ // 5ms of data and sends a total of 10ms to WebRTC and clears the internal
+ // cache. Call #3 restarts the scheme above.
+ void DeliverRecordedData(rtc::ArrayView<const int16_t> audio_buffer,
+ int record_delay_ms);
+
+ private:
+ // Device buffer that works with 10ms chunks of data both for playout and
+ // for recording. I.e., the WebRTC side will always be asked for audio to be
+ // played out in 10ms chunks and recorded audio will be sent to WebRTC in
+ // 10ms chunks as well. This raw pointer is owned by the constructor of this
+ // class and the owner must ensure that the pointer is valid during the life-
+ // time of this object.
+ AudioDeviceBuffer* const audio_device_buffer_;
+ // Number of audio samples per channel per 10ms. Set once at construction
+ // based on parameters in `audio_device_buffer`.
+ const size_t playout_samples_per_channel_10ms_;
+ const size_t record_samples_per_channel_10ms_;
+ // Number of audio channels. Set once at construction based on parameters in
+ // `audio_device_buffer`.
+ const size_t playout_channels_;
+ const size_t record_channels_;
+ // Storage for output samples from which a consumer can read audio buffers
+ // in any size using GetPlayoutData().
+ rtc::BufferT<int16_t> playout_buffer_;
+ // Storage for input samples that are about to be delivered to the WebRTC
+ // ADB or remains from the last successful delivery of a 10ms audio buffer.
+ rtc::BufferT<int16_t> record_buffer_;
+ // Contains latest delay estimate given to GetPlayoutData().
+ int playout_delay_ms_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc
new file mode 100644
index 0000000000..36ea85f7dd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/fine_audio_buffer.h"
+
+#include <limits.h>
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_device/mock_audio_device_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::InSequence;
+using ::testing::Return;
+
+namespace webrtc {
+
+const int kSampleRate = 44100;
+const int kChannels = 2;
+const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
+
+// The fake audio data is 0,1,..SCHAR_MAX-1,0,1,... This is to make it easy
+// to detect errors. This function verifies that the buffers contain such data.
+// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and
+// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
+// will happen.
+// `buffer` is the audio buffer to verify.
+bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) {
+ int start_value = (buffer_number * size) % SCHAR_MAX;
+ for (int i = 0; i < size; ++i) {
+ if (buffer[i] != (i + start_value) % SCHAR_MAX) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This function replaces the real AudioDeviceBuffer::GetPlayoutData when it's
+// called (which is done implicitly when calling GetBufferData). It writes the
+// sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a
+// buffer of different size than the one VerifyBuffer verifies.
+// `iteration` is the number of calls made to UpdateBuffer prior to this call.
+// `samples_per_10_ms` is the number of samples that should be written to the
+// buffer (`arg0`).
+ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
+ int16_t* buffer = static_cast<int16_t*>(arg0);
+ int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX;
+ for (int i = 0; i < samples_per_10_ms; ++i) {
+ buffer[i] = (i + start_value) % SCHAR_MAX;
+ }
+ // Should return samples per channel.
+ return samples_per_10_ms / kChannels;
+}
+
+// Writes a periodic ramp pattern to the supplied `buffer`. See UpdateBuffer()
+// for details.
+void UpdateInputBuffer(int16_t* buffer, int iteration, int size) {
+ int start_value = (iteration * size) % SCHAR_MAX;
+ for (int i = 0; i < size; ++i) {
+ buffer[i] = (i + start_value) % SCHAR_MAX;
+ }
+}
+
+// Action macro which verifies that the recorded 10ms chunk of audio data
+// (in `arg0`) contains the correct reference values even if they have been
+// supplied using a buffer size that is smaller or larger than 10ms.
+// See VerifyBuffer() for details.
+ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {
+ const int16_t* buffer = static_cast<const int16_t*>(arg0);
+ int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX;
+ for (int i = 0; i < samples_per_10_ms; ++i) {
+ EXPECT_EQ(buffer[i], (i + start_value) % SCHAR_MAX);
+ }
+ return 0;
+}
+
+void RunFineBufferTest(int frame_size_in_samples) {
+ const int kFrameSizeSamples = frame_size_in_samples;
+ const int kNumberOfFrames = 5;
+ // Ceiling of integer division: 1 + ((x - 1) / y)
+ const int kNumberOfUpdateBufferCalls =
+ 1 + ((kNumberOfFrames * frame_size_in_samples - 1) / kSamplesPer10Ms);
+
+ auto task_queue_factory = CreateDefaultTaskQueueFactory();
+ MockAudioDeviceBuffer audio_device_buffer(task_queue_factory.get());
+ audio_device_buffer.SetPlayoutSampleRate(kSampleRate);
+ audio_device_buffer.SetPlayoutChannels(kChannels);
+ audio_device_buffer.SetRecordingSampleRate(kSampleRate);
+ audio_device_buffer.SetRecordingChannels(kChannels);
+
+ EXPECT_CALL(audio_device_buffer, RequestPlayoutData(_))
+ .WillRepeatedly(Return(kSamplesPer10Ms));
+ {
+ InSequence s;
+ for (int i = 0; i < kNumberOfUpdateBufferCalls; ++i) {
+ EXPECT_CALL(audio_device_buffer, GetPlayoutData(_))
+ .WillOnce(UpdateBuffer(i, kChannels * kSamplesPer10Ms))
+ .RetiresOnSaturation();
+ }
+ }
+ {
+ InSequence s;
+ for (int j = 0; j < kNumberOfUpdateBufferCalls - 1; ++j) {
+ EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms))
+ .WillOnce(VerifyInputBuffer(j, kChannels * kSamplesPer10Ms))
+ .RetiresOnSaturation();
+ }
+ }
+ EXPECT_CALL(audio_device_buffer, SetVQEData(_, _))
+ .Times(kNumberOfUpdateBufferCalls - 1);
+ EXPECT_CALL(audio_device_buffer, DeliverRecordedData())
+ .Times(kNumberOfUpdateBufferCalls - 1)
+ .WillRepeatedly(Return(0));
+
+ FineAudioBuffer fine_buffer(&audio_device_buffer);
+ std::unique_ptr<int16_t[]> out_buffer(
+ new int16_t[kChannels * kFrameSizeSamples]);
+ std::unique_ptr<int16_t[]> in_buffer(
+ new int16_t[kChannels * kFrameSizeSamples]);
+
+ for (int i = 0; i < kNumberOfFrames; ++i) {
+ fine_buffer.GetPlayoutData(
+ rtc::ArrayView<int16_t>(out_buffer.get(),
+ kChannels * kFrameSizeSamples),
+ 0);
+ EXPECT_TRUE(
+ VerifyBuffer(out_buffer.get(), i, kChannels * kFrameSizeSamples));
+ UpdateInputBuffer(in_buffer.get(), i, kChannels * kFrameSizeSamples);
+ fine_buffer.DeliverRecordedData(
+ rtc::ArrayView<const int16_t>(in_buffer.get(),
+ kChannels * kFrameSizeSamples),
+ 0);
+ }
+}
+
+TEST(FineBufferTest, BufferLessThan10ms) {
+ const int kFrameSizeSamples = kSamplesPer10Ms - 50;
+ RunFineBufferTest(kFrameSizeSamples);
+}
+
+TEST(FineBufferTest, GreaterThan10ms) {
+ const int kFrameSizeSamples = kSamplesPer10Ms + 50;
+ RunFineBufferTest(kFrameSizeSamples);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md b/third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md
new file mode 100644
index 0000000000..101b2e4cc8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md
@@ -0,0 +1,171 @@
+# Audio Device Module (ADM)
+
+<?% config.freshness.owner = 'henrika' %?>
+<?% config.freshness.reviewed = '2021-04-12' %?>
+
+## Overview
+
+The ADM is responsible for driving input (microphone) and output (speaker) audio
+in WebRTC and the API is defined in [audio_device.h][19].
+
+Main functions of the ADM are:
+
+* Initialization and termination of native audio libraries.
+* Registration of an [AudioTransport object][16] which handles audio callbacks
+ for audio in both directions.
+* Device enumeration and selection (only for Linux, Windows and Mac OSX).
+* Start/Stop physical audio streams:
+ * Recording audio from the selected microphone, and
+ * playing out audio on the selected speaker.
+* Level control of the active audio streams.
+* Control of built-in audio effects (Audio Echo Cancelation (AEC), Audio Gain
+ Control (AGC) and Noise Suppression (NS)) for Android and iOS.
+
+ADM implementations reside at two different locations in the WebRTC repository:
+`/modules/audio_device/` and `/sdk/`. The latest implementations for [iOS][20]
+and [Android][21] can be found under `/sdk/`. `/modules/audio_device/` contains
+older versions for mobile platforms and also implementations for desktop
+platforms such as [Linux][22], [Windows][23] and [Mac OSX][24]. This document is
+focusing on the parts in `/modules/audio_device/` but implementation specific
+details such as threading models are omitted to keep the descriptions as simple
+as possible.
+
+By default, the ADM in WebRTC is created in [`WebRtcVoiceEngine::Init`][1] but
+an external implementation can also be injected using
+[`rtc::CreatePeerConnectionFactory`][25]. An example of where an external ADM is
+injected can be found in [PeerConnectionInterfaceTest][26] where a so-called
+[fake ADM][29] is utilized to avoid hardware dependency in a gtest. Clients can
+also inject their own ADMs in situations where functionality is needed that is
+not provided by the default implementations.
+
+## Background
+
+This section contains a historical background of the ADM API.
+
+The ADM interface is old and has undergone many changes over the years. It used
+to be much more granular but it still contains more than 50 methods and is
+implemented on several different hardware platforms.
+
+Some APIs are not implemented on all platforms, and functionality can be spread
+out differently between the methods.
+
+The most up-to-date implementations of the ADM interface are for [iOS][27] and
+for [Android][28].
+
+Desktop version are not updated to comply with the latest
+[C++ style guide](https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++.md)
+and more work is also needed to improve the performance and stability of these
+versions.
+
+## WebRtcVoiceEngine
+
+[`WebRtcVoiceEngine`][2] does not utilize all methods of the ADM but it still
+serves as the best example of its architecture and how to use it. For a more
+detailed view of all methods in the ADM interface, see [ADM unit tests][3].
+
+Assuming that an external ADM implementation is not injected, a default - or
+internal - ADM is created in [`WebRtcVoiceEngine::Init`][1] using
+[`AudioDeviceModule::Create`][4].
+
+Basic initialization is done using a utility method called
+[`adm_helpers::Init`][5] which calls fundamental ADM APIs like:
+
+* [`AudiDeviceModule::Init`][6] - initializes the native audio parts required
+ for each platform.
+* [`AudiDeviceModule::SetPlayoutDevice`][7] - specifies which speaker to use
+ for playing out audio using an `index` retrieved by the corresponding
+ enumeration method [`AudiDeviceModule::PlayoutDeviceName`][8].
+* [`AudiDeviceModule::SetRecordingDevice`][9] - specifies which microphone to
+ use for recording audio using an `index` retrieved by the corresponding
+ enumeration method which is [`AudiDeviceModule::RecordingDeviceName`][10].
+* [`AudiDeviceModule::InitSpeaker`][11] - sets up the parts of the ADM needed
+ to use the selected output device.
+* [`AudiDeviceModule::InitMicrophone`][12] - sets up the parts of the ADM
+ needed to use the selected input device.
+* [`AudiDeviceModule::SetStereoPlayout`][13] - enables playout in stereo if
+ the selected audio device supports it.
+* [`AudiDeviceModule::SetStereoRecording`][14] - enables recording in stereo
+ if the selected audio device supports it.
+
+[`WebRtcVoiceEngine::Init`][1] also calls
+[`AudiDeviceModule::RegisterAudioTransport`][15] to register an existing
+[AudioTransport][16] implementation which handles audio callbacks in both
+directions and therefore serves as the bridge between the native ADM and the
+upper WebRTC layers.
+
+Recorded audio samples are delivered from the ADM to the `WebRtcVoiceEngine`
+(who owns the `AudioTransport` object) via
+[`AudioTransport::RecordedDataIsAvailable`][17]:
+
+```
+int32_t RecordedDataIsAvailable(const void* audioSamples, size_t nSamples, size_t nBytesPerSample,
+ size_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS,
+ int32_t clockDrift, uint32_t currentMicLevel, bool keyPressed,
+ uint32_t& newMicLevel)
+```
+
+Decoded audio samples ready to be played out are are delivered by the
+`WebRtcVoiceEngine` to the ADM, via [`AudioTransport::NeedMorePlayoutData`][18]:
+
+```
+int32_t NeedMorePlayData(size_t nSamples, size_t nBytesPerSample, size_t nChannels, int32_t samplesPerSec,
+ void* audioSamples, size_t& nSamplesOut,
+ int64_t* elapsed_time_ms, int64_t* ntp_time_ms)
+```
+
+Audio samples are 16-bit [linear PCM](https://wiki.multimedia.cx/index.php/PCM)
+using regular interleaving of channels within each sample.
+
+`WebRtcVoiceEngine` also owns an [`AudioState`][30] member and this class is
+used has helper to start and stop audio to and from the ADM. To initialize and
+start recording, it calls:
+
+* [`AudiDeviceModule::InitRecording`][31]
+* [`AudiDeviceModule::StartRecording`][32]
+
+and to initialize and start playout:
+
+* [`AudiDeviceModule::InitPlayout`][33]
+* [`AudiDeviceModule::StartPlayout`][34]
+
+Finally, the corresponding stop methods [`AudiDeviceModule::StopRecording`][35]
+and [`AudiDeviceModule::StopPlayout`][36] are called followed by
+[`AudiDeviceModule::Terminate`][37].
+
+[1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_voice_engine.cc;l=314;drc=f7b1b95f11c74cb5369fdd528b73c70a50f2e206
+[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_voice_engine.h;l=48;drc=d15a575ec3528c252419149d35977e55269d8a41
+[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/audio_device_unittest.cc;l=1;drc=d15a575ec3528c252419149d35977e55269d8a41
+[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=46;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[5]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/adm_helpers.h;drc=2222a80e79ae1ef5cb9510ec51d3868be75f47a2
+[6]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=62;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=77;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[8]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=69;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[9]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=79;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[10]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=72;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[11]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=99;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[12]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=101;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[13]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=130;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[14]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=133;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[15]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=59;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[16]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=34;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[17]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=36;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[18]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=48;drc=9438fb3fff97c803d1ead34c0e4f223db168526f
+[19]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738es
+[20]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2
+[21]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1
+[22]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/linux/;drc=d15a575ec3528c252419149d35977e55269d8a41
+[23]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/win/;drc=d15a575ec3528c252419149d35977e55269d8a41
+[24]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/mac/;drc=3b68aa346a5d3483c3448852d19d91723846825c
+[25]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/create_peerconnection_factory.h;l=45;drc=09ceed2165137c4bea4e02e8d3db31970d0bf273
+[26]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/peer_connection_interface_unittest.cc;l=692;drc=2efb8a5ec61b1b87475d046c03d20244f53b14b6
+[27]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2
+[28]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1
+[29]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/test/fake_audio_capture_module.h;l=42;drc=d15a575ec3528c252419149d35977e55269d8a41
+[30]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/audio_state.h;drc=d15a575ec3528c252419149d35977e55269d8a41
+[31]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=87;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[32]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=94;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[33]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=84;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[34]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=91;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[35]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=95;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[36]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=92;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
+[37]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=63;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e
diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device.h b/third_party/libwebrtc/modules/audio_device/include/audio_device.h
new file mode 100644
index 0000000000..f82029eb51
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/audio_device.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
+
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/ref_count.h"
+
+namespace webrtc {
+
+class AudioDeviceModuleForTest;
+
+class AudioDeviceModule : public rtc::RefCountInterface {
+ public:
+ enum AudioLayer {
+ kPlatformDefaultAudio = 0,
+ kWindowsCoreAudio,
+ kWindowsCoreAudio2,
+ kLinuxAlsaAudio,
+ kLinuxPulseAudio,
+ kAndroidJavaAudio,
+ kAndroidOpenSLESAudio,
+ kAndroidJavaInputAndOpenSLESOutputAudio,
+ kAndroidAAudioAudio,
+ kAndroidJavaInputAndAAudioOutputAudio,
+ kDummyAudio,
+ };
+
+ enum WindowsDeviceType {
+ kDefaultCommunicationDevice = -1,
+ kDefaultDevice = -2
+ };
+
+ public:
+ // Creates a default ADM for usage in production code.
+ static rtc::scoped_refptr<AudioDeviceModule> Create(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory);
+ // Creates an ADM with support for extra test methods. Don't use this factory
+ // in production code.
+ static rtc::scoped_refptr<AudioDeviceModuleForTest> CreateForTest(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory);
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const = 0;
+
+ // Full-duplex transportation of PCM audio
+ virtual int32_t RegisterAudioCallback(AudioTransport* audioCallback) = 0;
+
+ // Main initialization and termination
+ virtual int32_t Init() = 0;
+ virtual int32_t Terminate() = 0;
+ virtual bool Initialized() const = 0;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices() = 0;
+ virtual int16_t RecordingDevices() = 0;
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
+ virtual int32_t SetPlayoutDevice(WindowsDeviceType device) = 0;
+ virtual int32_t SetRecordingDevice(uint16_t index) = 0;
+ virtual int32_t SetRecordingDevice(WindowsDeviceType device) = 0;
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool* available) = 0;
+ virtual int32_t InitPlayout() = 0;
+ virtual bool PlayoutIsInitialized() const = 0;
+ virtual int32_t RecordingIsAvailable(bool* available) = 0;
+ virtual int32_t InitRecording() = 0;
+ virtual bool RecordingIsInitialized() const = 0;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() = 0;
+ virtual int32_t StopPlayout() = 0;
+ virtual bool Playing() const = 0;
+ virtual int32_t StartRecording() = 0;
+ virtual int32_t StopRecording() = 0;
+ virtual bool Recording() const = 0;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() = 0;
+ virtual bool SpeakerIsInitialized() const = 0;
+ virtual int32_t InitMicrophone() = 0;
+ virtual bool MicrophoneIsInitialized() const = 0;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool* available) = 0;
+ virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
+ virtual int32_t SpeakerVolume(uint32_t* volume) const = 0;
+ virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const = 0;
+ virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const = 0;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool* available) = 0;
+ virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
+ virtual int32_t MicrophoneVolume(uint32_t* volume) const = 0;
+ virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const = 0;
+ virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const = 0;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool* available) = 0;
+ virtual int32_t SetSpeakerMute(bool enable) = 0;
+ virtual int32_t SpeakerMute(bool* enabled) const = 0;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool* available) = 0;
+ virtual int32_t SetMicrophoneMute(bool enable) = 0;
+ virtual int32_t MicrophoneMute(bool* enabled) const = 0;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool* available) const = 0;
+ virtual int32_t SetStereoPlayout(bool enable) = 0;
+ virtual int32_t StereoPlayout(bool* enabled) const = 0;
+ virtual int32_t StereoRecordingIsAvailable(bool* available) const = 0;
+ virtual int32_t SetStereoRecording(bool enable) = 0;
+ virtual int32_t StereoRecording(bool* enabled) const = 0;
+
+ // Playout delay
+ virtual int32_t PlayoutDelay(uint16_t* delayMS) const = 0;
+
+ // Only supported on Android.
+ virtual bool BuiltInAECIsAvailable() const = 0;
+ virtual bool BuiltInAGCIsAvailable() const = 0;
+ virtual bool BuiltInNSIsAvailable() const = 0;
+
+ // Enables the built-in audio effects. Only supported on Android.
+ virtual int32_t EnableBuiltInAEC(bool enable) = 0;
+ virtual int32_t EnableBuiltInAGC(bool enable) = 0;
+ virtual int32_t EnableBuiltInNS(bool enable) = 0;
+
+ // Play underrun count. Only supported on Android.
+ // TODO(alexnarest): Make it abstract after upstream projects support it.
+ virtual int32_t GetPlayoutUnderrunCount() const { return -1; }
+
+// Only supported on iOS.
+#if defined(WEBRTC_IOS)
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const = 0;
+ virtual int GetRecordAudioParameters(AudioParameters* params) const = 0;
+#endif // WEBRTC_IOS
+
+ protected:
+ ~AudioDeviceModule() override {}
+};
+
+// Extends the default ADM interface with some extra test methods.
+// Intended for usage in tests only and requires a unique factory method.
+class AudioDeviceModuleForTest : public AudioDeviceModule {
+ public:
+ // Triggers internal restart sequences of audio streaming. Can be used by
+ // tests to emulate events corresponding to e.g. removal of an active audio
+ // device or other actions which causes the stream to be disconnected.
+ virtual int RestartPlayoutInternally() = 0;
+ virtual int RestartRecordingInternally() = 0;
+
+ virtual int SetPlayoutSampleRate(uint32_t sample_rate) = 0;
+ virtual int SetRecordingSampleRate(uint32_t sample_rate) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h
new file mode 100644
index 0000000000..36dc45f19e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+// This interface will capture the raw PCM data of both the local captured as
+// well as the mixed/rendered remote audio.
+class AudioDeviceDataObserver {
+ public:
+ virtual void OnCaptureData(const void* audio_samples,
+ size_t num_samples,
+ size_t bytes_per_sample,
+ size_t num_channels,
+ uint32_t samples_per_sec) = 0;
+
+ virtual void OnRenderData(const void* audio_samples,
+ size_t num_samples,
+ size_t bytes_per_sample,
+ size_t num_channels,
+ uint32_t samples_per_sec) = 0;
+
+ AudioDeviceDataObserver() = default;
+ virtual ~AudioDeviceDataObserver() = default;
+};
+
+// Creates an ADMWrapper around an ADM instance that registers
+// the provided AudioDeviceDataObserver.
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ rtc::scoped_refptr<AudioDeviceModule> impl,
+ std::unique_ptr<AudioDeviceDataObserver> observer);
+
+// Creates an ADMWrapper around an ADM instance that registers
+// the provided AudioDeviceDataObserver.
+ABSL_DEPRECATED("")
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ rtc::scoped_refptr<AudioDeviceModule> impl,
+ AudioDeviceDataObserver* observer);
+
+// Creates an ADM instance with AudioDeviceDataObserver registered.
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ AudioDeviceModule::AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory,
+ std::unique_ptr<AudioDeviceDataObserver> observer);
+
+// Creates an ADM instance with AudioDeviceDataObserver registered.
+ABSL_DEPRECATED("")
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
+ AudioDeviceModule::AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory,
+ AudioDeviceDataObserver* observer);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_default.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_default.h
new file mode 100644
index 0000000000..3779d6fb3b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_default.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_
+
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+namespace webrtc_impl {
+
+// AudioDeviceModuleDefault template adds default implementation for all
+// AudioDeviceModule methods to the class, which inherits from
+// AudioDeviceModuleDefault<T>.
+template <typename T>
+class AudioDeviceModuleDefault : public T {
+ public:
+ AudioDeviceModuleDefault() {}
+ virtual ~AudioDeviceModuleDefault() {}
+
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
+ return 0;
+ }
+ int32_t Init() override { return 0; }
+ int32_t InitSpeaker() override { return 0; }
+ int32_t SetPlayoutDevice(uint16_t index) override { return 0; }
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ return 0;
+ }
+ int32_t SetStereoPlayout(bool enable) override { return 0; }
+ int32_t StopPlayout() override { return 0; }
+ int32_t InitMicrophone() override { return 0; }
+ int32_t SetRecordingDevice(uint16_t index) override { return 0; }
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ return 0;
+ }
+ int32_t SetStereoRecording(bool enable) override { return 0; }
+ int32_t StopRecording() override { return 0; }
+
+ int32_t Terminate() override { return 0; }
+
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer* audioLayer) const override {
+ return 0;
+ }
+ bool Initialized() const override { return true; }
+ int16_t PlayoutDevices() override { return 0; }
+ int16_t RecordingDevices() override { return 0; }
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ return 0;
+ }
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ return 0;
+ }
+ int32_t PlayoutIsAvailable(bool* available) override { return 0; }
+ int32_t InitPlayout() override { return 0; }
+ bool PlayoutIsInitialized() const override { return true; }
+ int32_t RecordingIsAvailable(bool* available) override { return 0; }
+ int32_t InitRecording() override { return 0; }
+ bool RecordingIsInitialized() const override { return true; }
+ int32_t StartPlayout() override { return 0; }
+ bool Playing() const override { return false; }
+ int32_t StartRecording() override { return 0; }
+ bool Recording() const override { return false; }
+ bool SpeakerIsInitialized() const override { return true; }
+ bool MicrophoneIsInitialized() const override { return true; }
+ int32_t SpeakerVolumeIsAvailable(bool* available) override { return 0; }
+ int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
+ int32_t SpeakerVolume(uint32_t* volume) const override { return 0; }
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; }
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; }
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override { return 0; }
+ int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
+ int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; }
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; }
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; }
+ int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; }
+ int32_t SetSpeakerMute(bool enable) override { return 0; }
+ int32_t SpeakerMute(bool* enabled) const override { return 0; }
+ int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; }
+ int32_t SetMicrophoneMute(bool enable) override { return 0; }
+ int32_t MicrophoneMute(bool* enabled) const override { return 0; }
+ int32_t StereoPlayoutIsAvailable(bool* available) const override {
+ *available = false;
+ return 0;
+ }
+ int32_t StereoPlayout(bool* enabled) const override { return 0; }
+ int32_t StereoRecordingIsAvailable(bool* available) const override {
+ *available = false;
+ return 0;
+ }
+ int32_t StereoRecording(bool* enabled) const override { return 0; }
+ int32_t PlayoutDelay(uint16_t* delayMS) const override {
+ *delayMS = 0;
+ return 0;
+ }
+ bool BuiltInAECIsAvailable() const override { return false; }
+ int32_t EnableBuiltInAEC(bool enable) override { return -1; }
+ bool BuiltInAGCIsAvailable() const override { return false; }
+ int32_t EnableBuiltInAGC(bool enable) override { return -1; }
+ bool BuiltInNSIsAvailable() const override { return false; }
+ int32_t EnableBuiltInNS(bool enable) override { return -1; }
+
+ int32_t GetPlayoutUnderrunCount() const override { return -1; }
+
+#if defined(WEBRTC_IOS)
+ int GetPlayoutAudioParameters(AudioParameters* params) const override {
+ return -1;
+ }
+ int GetRecordAudioParameters(AudioParameters* params) const override {
+ return -1;
+ }
+#endif // WEBRTC_IOS
+};
+
+} // namespace webrtc_impl
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h
new file mode 100644
index 0000000000..89d33f8538
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+static const int kAdmMaxDeviceNameSize = 128;
+static const int kAdmMaxFileNameSize = 512;
+static const int kAdmMaxGuidSize = 128;
+
+static const int kAdmMinPlayoutBufferSizeMs = 10;
+static const int kAdmMaxPlayoutBufferSizeMs = 250;
+
+// ----------------------------------------------------------------------------
+// AudioTransport
+// ----------------------------------------------------------------------------
+
+class AudioTransport {
+ public:
+ // TODO(bugs.webrtc.org/13620) Deprecate this function
+ virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint32_t totalDelayMS,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel) = 0; // NOLINT
+
+ virtual int32_t RecordedDataIsAvailable(
+ const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint32_t totalDelayMS,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel,
+ int64_t estimatedCaptureTimeNS) { // NOLINT
+ // TODO(webrtc:13620) Make the default behaver of the new API to behave as
+ // the old API. This can be pure virtual if all uses of the old API is
+ // removed.
+ return RecordedDataIsAvailable(
+ audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec,
+ totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel);
+ }
+
+ // Implementation has to setup safe values for all specified out parameters.
+ virtual int32_t NeedMorePlayData(size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut, // NOLINT
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) = 0; // NOLINT
+
+ // Method to pull mixed render audio data from all active VoE channels.
+ // The data will not be passed as reference for audio processing internally.
+ virtual void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) = 0;
+
+ protected:
+ virtual ~AudioTransport() {}
+};
+
+// Helper class for storage of fundamental audio parameters such as sample rate,
+// number of channels, native buffer size etc.
+// Note that one audio frame can contain more than one channel sample and each
+// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in
+// stereo contains 2 * (16/8) = 4 bytes of data.
+class AudioParameters {
+ public:
+ // This implementation does only support 16-bit PCM samples.
+ static const size_t kBitsPerSample = 16;
+ AudioParameters()
+ : sample_rate_(0),
+ channels_(0),
+ frames_per_buffer_(0),
+ frames_per_10ms_buffer_(0) {}
+ AudioParameters(int sample_rate, size_t channels, size_t frames_per_buffer)
+ : sample_rate_(sample_rate),
+ channels_(channels),
+ frames_per_buffer_(frames_per_buffer),
+ frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
+ void reset(int sample_rate, size_t channels, size_t frames_per_buffer) {
+ sample_rate_ = sample_rate;
+ channels_ = channels;
+ frames_per_buffer_ = frames_per_buffer;
+ frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
+ }
+ size_t bits_per_sample() const { return kBitsPerSample; }
+ void reset(int sample_rate, size_t channels, double buffer_duration) {
+ reset(sample_rate, channels,
+ static_cast<size_t>(sample_rate * buffer_duration + 0.5));
+ }
+ void reset(int sample_rate, size_t channels) {
+ reset(sample_rate, channels, static_cast<size_t>(0));
+ }
+ int sample_rate() const { return sample_rate_; }
+ size_t channels() const { return channels_; }
+ size_t frames_per_buffer() const { return frames_per_buffer_; }
+ size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
+ size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
+ size_t GetBytesPerBuffer() const {
+ return frames_per_buffer_ * GetBytesPerFrame();
+ }
+ // The WebRTC audio device buffer (ADB) only requires that the sample rate
+ // and number of channels are configured. Hence, to be "valid", only these
+ // two attributes must be set.
+ bool is_valid() const { return ((sample_rate_ > 0) && (channels_ > 0)); }
+ // Most platforms also require that a native buffer size is defined.
+ // An audio parameter instance is considered to be "complete" if it is both
+ // "valid" (can be used by the ADB) and also has a native frame size.
+ bool is_complete() const { return (is_valid() && (frames_per_buffer_ > 0)); }
+ size_t GetBytesPer10msBuffer() const {
+ return frames_per_10ms_buffer_ * GetBytesPerFrame();
+ }
+ double GetBufferSizeInMilliseconds() const {
+ if (sample_rate_ == 0)
+ return 0.0;
+ return frames_per_buffer_ / (sample_rate_ / 1000.0);
+ }
+ double GetBufferSizeInSeconds() const {
+ if (sample_rate_ == 0)
+ return 0.0;
+ return static_cast<double>(frames_per_buffer_) / (sample_rate_);
+ }
+ std::string ToString() const {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss << "AudioParameters: ";
+ ss << "sample_rate=" << sample_rate() << ", channels=" << channels();
+ ss << ", frames_per_buffer=" << frames_per_buffer();
+ ss << ", frames_per_10ms_buffer=" << frames_per_10ms_buffer();
+ ss << ", bytes_per_frame=" << GetBytesPerFrame();
+ ss << ", bytes_per_buffer=" << GetBytesPerBuffer();
+ ss << ", bytes_per_10ms_buffer=" << GetBytesPer10msBuffer();
+ ss << ", size_in_ms=" << GetBufferSizeInMilliseconds();
+ return ss.str();
+ }
+
+ private:
+ int sample_rate_;
+ size_t channels_;
+ size_t frames_per_buffer_;
+ size_t frames_per_10ms_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc
new file mode 100644
index 0000000000..130e096e6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/include/audio_device_factory.h"
+
+#include <memory>
+
+#if defined(WEBRTC_WIN)
+#include "modules/audio_device/win/audio_device_module_win.h"
+#include "modules/audio_device/win/core_audio_input_win.h"
+#include "modules/audio_device/win/core_audio_output_win.h"
+#include "modules/audio_device/win/core_audio_utility_win.h"
+#endif
+
+#include "api/task_queue/task_queue_factory.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr<AudioDeviceModule> CreateWindowsCoreAudioAudioDeviceModule(
+ TaskQueueFactory* task_queue_factory,
+ bool automatic_restart) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return CreateWindowsCoreAudioAudioDeviceModuleForTest(task_queue_factory,
+ automatic_restart);
+}
+
+rtc::scoped_refptr<AudioDeviceModuleForTest>
+CreateWindowsCoreAudioAudioDeviceModuleForTest(
+ TaskQueueFactory* task_queue_factory,
+ bool automatic_restart) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ // Returns NULL if Core Audio is not supported or if COM has not been
+ // initialized correctly using ScopedCOMInitializer.
+ if (!webrtc_win::core_audio_utility::IsSupported()) {
+ RTC_LOG(LS_ERROR)
+ << "Unable to create ADM since Core Audio is not supported";
+ return nullptr;
+ }
+ return CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
+ std::make_unique<webrtc_win::CoreAudioInput>(automatic_restart),
+ std::make_unique<webrtc_win::CoreAudioOutput>(automatic_restart),
+ task_queue_factory);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h
new file mode 100644
index 0000000000..edd7686b8e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
+
+#include <memory>
+
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+// Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API.
+// The creating thread must be a COM thread; otherwise nullptr will be returned.
+// By default `automatic_restart` is set to true and it results in support for
+// automatic restart of audio if e.g. the existing device is removed. If set to
+// false, no attempt to restart audio is performed under these conditions.
+//
+// Example (assuming webrtc namespace):
+//
+// public:
+// rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
+// task_queue_factory_ = CreateDefaultTaskQueueFactory();
+// // Tell COM that this thread shall live in the MTA.
+// com_initializer_ = std::make_unique<ScopedCOMInitializer>(
+// ScopedCOMInitializer::kMTA);
+// if (!com_initializer_->Succeeded()) {
+// return nullptr;
+// }
+// // Create the ADM with support for automatic restart if devices are
+// // unplugged.
+// return CreateWindowsCoreAudioAudioDeviceModule(
+// task_queue_factory_.get());
+// }
+//
+// private:
+// std::unique_ptr<ScopedCOMInitializer> com_initializer_;
+// std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+//
+rtc::scoped_refptr<AudioDeviceModule> CreateWindowsCoreAudioAudioDeviceModule(
+ TaskQueueFactory* task_queue_factory,
+ bool automatic_restart = true);
+
+rtc::scoped_refptr<AudioDeviceModuleForTest>
+CreateWindowsCoreAudioAudioDeviceModuleForTest(
+ TaskQueueFactory* task_queue_factory,
+ bool automatic_restart = true);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h b/third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h
new file mode 100644
index 0000000000..2322ce0263
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_
+
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_default.h"
+
+namespace webrtc {
+
+class FakeAudioDeviceModule
+ : public webrtc_impl::AudioDeviceModuleDefault<AudioDeviceModule> {
+ public:
+ // TODO(bugs.webrtc.org/12701): Fix all users of this class to managed
+ // references using scoped_refptr. Current code doesn't always use refcounting
+ // for this class.
+ void AddRef() const override {}
+ rtc::RefCountReleaseStatus Release() const override {
+ return rtc::RefCountReleaseStatus::kDroppedLastRef;
+ }
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h b/third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h
new file mode 100644
index 0000000000..73fbdd547d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_
+
+#include <string>
+
+#include "api/make_ref_counted.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockAudioDeviceModule : public AudioDeviceModule {
+ public:
+ static rtc::scoped_refptr<MockAudioDeviceModule> CreateNice() {
+ return rtc::make_ref_counted<::testing::NiceMock<MockAudioDeviceModule>>();
+ }
+ static rtc::scoped_refptr<MockAudioDeviceModule> CreateStrict() {
+ return rtc::make_ref_counted<
+ ::testing::StrictMock<MockAudioDeviceModule>>();
+ }
+
+ // AudioDeviceModule.
+ MOCK_METHOD(int32_t,
+ ActiveAudioLayer,
+ (AudioLayer * audioLayer),
+ (const, override));
+ MOCK_METHOD(int32_t,
+ RegisterAudioCallback,
+ (AudioTransport * audioCallback),
+ (override));
+ MOCK_METHOD(int32_t, Init, (), (override));
+ MOCK_METHOD(int32_t, Terminate, (), (override));
+ MOCK_METHOD(bool, Initialized, (), (const, override));
+ MOCK_METHOD(int16_t, PlayoutDevices, (), (override));
+ MOCK_METHOD(int16_t, RecordingDevices, (), (override));
+ MOCK_METHOD(int32_t,
+ PlayoutDeviceName,
+ (uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]),
+ (override));
+ MOCK_METHOD(int32_t,
+ RecordingDeviceName,
+ (uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]),
+ (override));
+ MOCK_METHOD(int32_t, SetPlayoutDevice, (uint16_t index), (override));
+ MOCK_METHOD(int32_t,
+ SetPlayoutDevice,
+ (WindowsDeviceType device),
+ (override));
+ MOCK_METHOD(int32_t, SetRecordingDevice, (uint16_t index), (override));
+ MOCK_METHOD(int32_t,
+ SetRecordingDevice,
+ (WindowsDeviceType device),
+ (override));
+ MOCK_METHOD(int32_t, PlayoutIsAvailable, (bool* available), (override));
+ MOCK_METHOD(int32_t, InitPlayout, (), (override));
+ MOCK_METHOD(bool, PlayoutIsInitialized, (), (const, override));
+ MOCK_METHOD(int32_t, RecordingIsAvailable, (bool* available), (override));
+ MOCK_METHOD(int32_t, InitRecording, (), (override));
+ MOCK_METHOD(bool, RecordingIsInitialized, (), (const, override));
+ MOCK_METHOD(int32_t, StartPlayout, (), (override));
+ MOCK_METHOD(int32_t, StopPlayout, (), (override));
+ MOCK_METHOD(bool, Playing, (), (const, override));
+ MOCK_METHOD(int32_t, StartRecording, (), (override));
+ MOCK_METHOD(int32_t, StopRecording, (), (override));
+ MOCK_METHOD(bool, Recording, (), (const, override));
+ MOCK_METHOD(int32_t, InitSpeaker, (), (override));
+ MOCK_METHOD(bool, SpeakerIsInitialized, (), (const, override));
+ MOCK_METHOD(int32_t, InitMicrophone, (), (override));
+ MOCK_METHOD(bool, MicrophoneIsInitialized, (), (const, override));
+ MOCK_METHOD(int32_t, SpeakerVolumeIsAvailable, (bool* available), (override));
+ MOCK_METHOD(int32_t, SetSpeakerVolume, (uint32_t volume), (override));
+ MOCK_METHOD(int32_t, SpeakerVolume, (uint32_t * volume), (const, override));
+ MOCK_METHOD(int32_t,
+ MaxSpeakerVolume,
+ (uint32_t * maxVolume),
+ (const, override));
+ MOCK_METHOD(int32_t,
+ MinSpeakerVolume,
+ (uint32_t * minVolume),
+ (const, override));
+ MOCK_METHOD(int32_t,
+ MicrophoneVolumeIsAvailable,
+ (bool* available),
+ (override));
+ MOCK_METHOD(int32_t, SetMicrophoneVolume, (uint32_t volume), (override));
+ MOCK_METHOD(int32_t,
+ MicrophoneVolume,
+ (uint32_t * volume),
+ (const, override));
+ MOCK_METHOD(int32_t,
+ MaxMicrophoneVolume,
+ (uint32_t * maxVolume),
+ (const, override));
+ MOCK_METHOD(int32_t,
+ MinMicrophoneVolume,
+ (uint32_t * minVolume),
+ (const, override));
+ MOCK_METHOD(int32_t, SpeakerMuteIsAvailable, (bool* available), (override));
+ MOCK_METHOD(int32_t, SetSpeakerMute, (bool enable), (override));
+ MOCK_METHOD(int32_t, SpeakerMute, (bool* enabled), (const, override));
+ MOCK_METHOD(int32_t,
+ MicrophoneMuteIsAvailable,
+ (bool* available),
+ (override));
+ MOCK_METHOD(int32_t, SetMicrophoneMute, (bool enable), (override));
+ MOCK_METHOD(int32_t, MicrophoneMute, (bool* enabled), (const, override));
+ MOCK_METHOD(int32_t,
+ StereoPlayoutIsAvailable,
+ (bool* available),
+ (const, override));
+ MOCK_METHOD(int32_t, SetStereoPlayout, (bool enable), (override));
+ MOCK_METHOD(int32_t, StereoPlayout, (bool* enabled), (const, override));
+ MOCK_METHOD(int32_t,
+ StereoRecordingIsAvailable,
+ (bool* available),
+ (const, override));
+ MOCK_METHOD(int32_t, SetStereoRecording, (bool enable), (override));
+ MOCK_METHOD(int32_t, StereoRecording, (bool* enabled), (const, override));
+ MOCK_METHOD(int32_t, PlayoutDelay, (uint16_t * delayMS), (const, override));
+ MOCK_METHOD(bool, BuiltInAECIsAvailable, (), (const, override));
+ MOCK_METHOD(bool, BuiltInAGCIsAvailable, (), (const, override));
+ MOCK_METHOD(bool, BuiltInNSIsAvailable, (), (const, override));
+ MOCK_METHOD(int32_t, EnableBuiltInAEC, (bool enable), (override));
+ MOCK_METHOD(int32_t, EnableBuiltInAGC, (bool enable), (override));
+ MOCK_METHOD(int32_t, EnableBuiltInNS, (bool enable), (override));
+ MOCK_METHOD(int32_t, GetPlayoutUnderrunCount, (), (const, override));
+#if defined(WEBRTC_IOS)
+ MOCK_METHOD(int,
+ GetPlayoutAudioParameters,
+ (AudioParameters * params),
+ (const, override));
+ MOCK_METHOD(int,
+ GetRecordAudioParameters,
+ (AudioParameters * params),
+ (const, override));
+#endif // WEBRTC_IOS
+};
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h b/third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h
new file mode 100644
index 0000000000..e1be5f422f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_
+
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockAudioTransport : public AudioTransport {
+ public:
+ MockAudioTransport() {}
+ ~MockAudioTransport() {}
+
+ MOCK_METHOD(int32_t,
+ RecordedDataIsAvailable,
+ (const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint32_t totalDelayMS,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel),
+ (override));
+
+ MOCK_METHOD(int32_t,
+ RecordedDataIsAvailable,
+ (const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint32_t totalDelayMS,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel,
+ int64_t estimated_capture_time_ns),
+ (override));
+
+ MOCK_METHOD(int32_t,
+ NeedMorePlayData,
+ (size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms),
+ (override));
+
+ MOCK_METHOD(void,
+ PullRenderData,
+ (int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms),
+ (override));
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc
new file mode 100644
index 0000000000..7153cc58b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_device/include/test_audio_device.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/make_ref_counted.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_device/include/audio_device_default.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kFrameLengthUs = 10000;
+constexpr int kFramesPerSecond = rtc::kNumMicrosecsPerSec / kFrameLengthUs;
+
+// TestAudioDeviceModule implements an AudioDevice module that can act both as a
+// capturer and a renderer. It will use 10ms audio frames.
+class TestAudioDeviceModuleImpl
+ : public webrtc_impl::AudioDeviceModuleDefault<TestAudioDeviceModule> {
+ public:
+ // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
+ // frames will be processed every 10ms / `speed`.
+ // `capturer` is an object that produces audio data. Can be nullptr if this
+ // device is never used for recording.
+ // `renderer` is an object that receives audio data that would have been
+ // played out. Can be nullptr if this device is never used for playing.
+ // Use one of the Create... functions to get these instances.
+ TestAudioDeviceModuleImpl(TaskQueueFactory* task_queue_factory,
+ std::unique_ptr<Capturer> capturer,
+ std::unique_ptr<Renderer> renderer,
+ float speed = 1)
+ : task_queue_factory_(task_queue_factory),
+ capturer_(std::move(capturer)),
+ renderer_(std::move(renderer)),
+ process_interval_us_(kFrameLengthUs / speed),
+ audio_callback_(nullptr),
+ rendering_(false),
+ capturing_(false) {
+ auto good_sample_rate = [](int sr) {
+ return sr == 8000 || sr == 16000 || sr == 32000 || sr == 44100 ||
+ sr == 48000;
+ };
+
+ if (renderer_) {
+ const int sample_rate = renderer_->SamplingFrequency();
+ playout_buffer_.resize(
+ SamplesPerFrame(sample_rate) * renderer_->NumChannels(), 0);
+ RTC_CHECK(good_sample_rate(sample_rate));
+ }
+ if (capturer_) {
+ RTC_CHECK(good_sample_rate(capturer_->SamplingFrequency()));
+ }
+ }
+
+ ~TestAudioDeviceModuleImpl() override {
+ StopPlayout();
+ StopRecording();
+ }
+
+ int32_t Init() override {
+ task_queue_ =
+ std::make_unique<rtc::TaskQueue>(task_queue_factory_->CreateTaskQueue(
+ "TestAudioDeviceModuleImpl", TaskQueueFactory::Priority::NORMAL));
+
+ RepeatingTaskHandle::Start(task_queue_->Get(), [this]() {
+ ProcessAudio();
+ return TimeDelta::Micros(process_interval_us_);
+ });
+ return 0;
+ }
+
+ int32_t RegisterAudioCallback(AudioTransport* callback) override {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(callback || audio_callback_);
+ audio_callback_ = callback;
+ return 0;
+ }
+
+ int32_t StartPlayout() override {
+ MutexLock lock(&lock_);
+ RTC_CHECK(renderer_);
+ rendering_ = true;
+ return 0;
+ }
+
+ int32_t StopPlayout() override {
+ MutexLock lock(&lock_);
+ rendering_ = false;
+ return 0;
+ }
+
+ int32_t StartRecording() override {
+ MutexLock lock(&lock_);
+ RTC_CHECK(capturer_);
+ capturing_ = true;
+ return 0;
+ }
+
+ int32_t StopRecording() override {
+ MutexLock lock(&lock_);
+ capturing_ = false;
+ return 0;
+ }
+
+ bool Playing() const override {
+ MutexLock lock(&lock_);
+ return rendering_;
+ }
+
+ bool Recording() const override {
+ MutexLock lock(&lock_);
+ return capturing_;
+ }
+
+ // Blocks until the Recorder stops producing data.
+ // Returns false if `timeout_ms` passes before that happens.
+ bool WaitForRecordingEnd() override {
+ return done_capturing_.Wait(rtc::Event::kForever);
+ }
+
+ private:
+ void ProcessAudio() {
+ MutexLock lock(&lock_);
+ if (capturing_) {
+ // Capture 10ms of audio. 2 bytes per sample.
+ const bool keep_capturing = capturer_->Capture(&recording_buffer_);
+ uint32_t new_mic_level = 0;
+ if (recording_buffer_.size() > 0) {
+ audio_callback_->RecordedDataIsAvailable(
+ recording_buffer_.data(),
+ recording_buffer_.size() / capturer_->NumChannels(),
+ 2 * capturer_->NumChannels(), capturer_->NumChannels(),
+ capturer_->SamplingFrequency(), 0, 0, 0, false, new_mic_level);
+ }
+ if (!keep_capturing) {
+ capturing_ = false;
+ done_capturing_.Set();
+ }
+ }
+ if (rendering_) {
+ size_t samples_out = 0;
+ int64_t elapsed_time_ms = -1;
+ int64_t ntp_time_ms = -1;
+ const int sampling_frequency = renderer_->SamplingFrequency();
+ audio_callback_->NeedMorePlayData(
+ SamplesPerFrame(sampling_frequency), 2 * renderer_->NumChannels(),
+ renderer_->NumChannels(), sampling_frequency, playout_buffer_.data(),
+ samples_out, &elapsed_time_ms, &ntp_time_ms);
+ const bool keep_rendering = renderer_->Render(
+ rtc::ArrayView<const int16_t>(playout_buffer_.data(), samples_out));
+ if (!keep_rendering) {
+ rendering_ = false;
+ done_rendering_.Set();
+ }
+ }
+ }
+ TaskQueueFactory* const task_queue_factory_;
+ const std::unique_ptr<Capturer> capturer_ RTC_GUARDED_BY(lock_);
+ const std::unique_ptr<Renderer> renderer_ RTC_GUARDED_BY(lock_);
+ const int64_t process_interval_us_;
+
+ mutable Mutex lock_;
+ AudioTransport* audio_callback_ RTC_GUARDED_BY(lock_);
+ bool rendering_ RTC_GUARDED_BY(lock_);
+ bool capturing_ RTC_GUARDED_BY(lock_);
+ rtc::Event done_rendering_;
+ rtc::Event done_capturing_;
+
+ std::vector<int16_t> playout_buffer_ RTC_GUARDED_BY(lock_);
+ rtc::BufferT<int16_t> recording_buffer_ RTC_GUARDED_BY(lock_);
+ std::unique_ptr<rtc::TaskQueue> task_queue_;
+};
+
+// A fake capturer that generates pulses with random samples between
+// -max_amplitude and +max_amplitude.
+class PulsedNoiseCapturerImpl final
+ : public TestAudioDeviceModule::PulsedNoiseCapturer {
+ public:
+ // Assuming 10ms audio packets.
+ PulsedNoiseCapturerImpl(int16_t max_amplitude,
+ int sampling_frequency_in_hz,
+ int num_channels)
+ : sampling_frequency_in_hz_(sampling_frequency_in_hz),
+ fill_with_zero_(false),
+ random_generator_(1),
+ max_amplitude_(max_amplitude),
+ num_channels_(num_channels) {
+ RTC_DCHECK_GT(max_amplitude, 0);
+ }
+
+ int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
+
+ int NumChannels() const override { return num_channels_; }
+
+ bool Capture(rtc::BufferT<int16_t>* buffer) override {
+ fill_with_zero_ = !fill_with_zero_;
+ int16_t max_amplitude;
+ {
+ MutexLock lock(&lock_);
+ max_amplitude = max_amplitude_;
+ }
+ buffer->SetData(
+ TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz_) *
+ num_channels_,
+ [&](rtc::ArrayView<int16_t> data) {
+ if (fill_with_zero_) {
+ std::fill(data.begin(), data.end(), 0);
+ } else {
+ std::generate(data.begin(), data.end(), [&]() {
+ return random_generator_.Rand(-max_amplitude, max_amplitude);
+ });
+ }
+ return data.size();
+ });
+ return true;
+ }
+
+ void SetMaxAmplitude(int16_t amplitude) override {
+ MutexLock lock(&lock_);
+ max_amplitude_ = amplitude;
+ }
+
+ private:
+ int sampling_frequency_in_hz_;
+ bool fill_with_zero_;
+ Random random_generator_;
+ Mutex lock_;
+ int16_t max_amplitude_ RTC_GUARDED_BY(lock_);
+ const int num_channels_;
+};
+
+class WavFileReader final : public TestAudioDeviceModule::Capturer {
+ public:
+ WavFileReader(absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels,
+ bool repeat)
+ : WavFileReader(std::make_unique<WavReader>(filename),
+ sampling_frequency_in_hz,
+ num_channels,
+ repeat) {}
+
+ int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
+
+ int NumChannels() const override { return num_channels_; }
+
+ bool Capture(rtc::BufferT<int16_t>* buffer) override {
+ buffer->SetData(
+ TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz_) *
+ num_channels_,
+ [&](rtc::ArrayView<int16_t> data) {
+ size_t read = wav_reader_->ReadSamples(data.size(), data.data());
+ if (read < data.size() && repeat_) {
+ do {
+ wav_reader_->Reset();
+ size_t delta = wav_reader_->ReadSamples(
+ data.size() - read, data.subview(read).data());
+ RTC_CHECK_GT(delta, 0) << "No new data read from file";
+ read += delta;
+ } while (read < data.size());
+ }
+ return read;
+ });
+ return buffer->size() > 0;
+ }
+
+ private:
+ WavFileReader(std::unique_ptr<WavReader> wav_reader,
+ int sampling_frequency_in_hz,
+ int num_channels,
+ bool repeat)
+ : sampling_frequency_in_hz_(sampling_frequency_in_hz),
+ num_channels_(num_channels),
+ wav_reader_(std::move(wav_reader)),
+ repeat_(repeat) {
+ RTC_CHECK_EQ(wav_reader_->sample_rate(), sampling_frequency_in_hz);
+ RTC_CHECK_EQ(wav_reader_->num_channels(), num_channels);
+ }
+
+ const int sampling_frequency_in_hz_;
+ const int num_channels_;
+ std::unique_ptr<WavReader> wav_reader_;
+ const bool repeat_;
+};
+
+class WavFileWriter final : public TestAudioDeviceModule::Renderer {
+ public:
+ WavFileWriter(absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels)
+ : WavFileWriter(std::make_unique<WavWriter>(filename,
+ sampling_frequency_in_hz,
+ num_channels),
+ sampling_frequency_in_hz,
+ num_channels) {}
+
+ int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
+
+ int NumChannels() const override { return num_channels_; }
+
+ bool Render(rtc::ArrayView<const int16_t> data) override {
+ wav_writer_->WriteSamples(data.data(), data.size());
+ return true;
+ }
+
+ private:
+ WavFileWriter(std::unique_ptr<WavWriter> wav_writer,
+ int sampling_frequency_in_hz,
+ int num_channels)
+ : sampling_frequency_in_hz_(sampling_frequency_in_hz),
+ wav_writer_(std::move(wav_writer)),
+ num_channels_(num_channels) {}
+
+ int sampling_frequency_in_hz_;
+ std::unique_ptr<WavWriter> wav_writer_;
+ const int num_channels_;
+};
+
+class BoundedWavFileWriter : public TestAudioDeviceModule::Renderer {
+ public:
+ BoundedWavFileWriter(absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels)
+ : sampling_frequency_in_hz_(sampling_frequency_in_hz),
+ wav_writer_(filename, sampling_frequency_in_hz, num_channels),
+ num_channels_(num_channels),
+ silent_audio_(
+ TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) *
+ num_channels,
+ 0),
+ started_writing_(false),
+ trailing_zeros_(0) {}
+
+ int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
+
+ int NumChannels() const override { return num_channels_; }
+
+ bool Render(rtc::ArrayView<const int16_t> data) override {
+ const int16_t kAmplitudeThreshold = 5;
+
+ const int16_t* begin = data.begin();
+ const int16_t* end = data.end();
+ if (!started_writing_) {
+ // Cut off silence at the beginning.
+ while (begin < end) {
+ if (std::abs(*begin) > kAmplitudeThreshold) {
+ started_writing_ = true;
+ break;
+ }
+ ++begin;
+ }
+ }
+ if (started_writing_) {
+ // Cut off silence at the end.
+ while (begin < end) {
+ if (*(end - 1) != 0) {
+ break;
+ }
+ --end;
+ }
+ if (begin < end) {
+ // If it turns out that the silence was not final, need to write all the
+ // skipped zeros and continue writing audio.
+ while (trailing_zeros_ > 0) {
+ const size_t zeros_to_write =
+ std::min(trailing_zeros_, silent_audio_.size());
+ wav_writer_.WriteSamples(silent_audio_.data(), zeros_to_write);
+ trailing_zeros_ -= zeros_to_write;
+ }
+ wav_writer_.WriteSamples(begin, end - begin);
+ }
+ // Save the number of zeros we skipped in case this needs to be restored.
+ trailing_zeros_ += data.end() - end;
+ }
+ return true;
+ }
+
+ private:
+ int sampling_frequency_in_hz_;
+ WavWriter wav_writer_;
+ const int num_channels_;
+ std::vector<int16_t> silent_audio_;
+ bool started_writing_;
+ size_t trailing_zeros_;
+};
+
+class DiscardRenderer final : public TestAudioDeviceModule::Renderer {
+ public:
+ explicit DiscardRenderer(int sampling_frequency_in_hz, int num_channels)
+ : sampling_frequency_in_hz_(sampling_frequency_in_hz),
+ num_channels_(num_channels) {}
+
+ int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
+
+ int NumChannels() const override { return num_channels_; }
+
+ bool Render(rtc::ArrayView<const int16_t> data) override { return true; }
+
+ private:
+ int sampling_frequency_in_hz_;
+ const int num_channels_;
+};
+
+} // namespace
+
+size_t TestAudioDeviceModule::SamplesPerFrame(int sampling_frequency_in_hz) {
+ return rtc::CheckedDivExact(sampling_frequency_in_hz, kFramesPerSecond);
+}
+
+rtc::scoped_refptr<TestAudioDeviceModule> TestAudioDeviceModule::Create(
+ TaskQueueFactory* task_queue_factory,
+ std::unique_ptr<TestAudioDeviceModule::Capturer> capturer,
+ std::unique_ptr<TestAudioDeviceModule::Renderer> renderer,
+ float speed) {
+ return rtc::make_ref_counted<TestAudioDeviceModuleImpl>(
+ task_queue_factory, std::move(capturer), std::move(renderer), speed);
+}
+
+std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer>
+TestAudioDeviceModule::CreatePulsedNoiseCapturer(int16_t max_amplitude,
+ int sampling_frequency_in_hz,
+ int num_channels) {
+ return std::make_unique<PulsedNoiseCapturerImpl>(
+ max_amplitude, sampling_frequency_in_hz, num_channels);
+}
+
+std::unique_ptr<TestAudioDeviceModule::Renderer>
+TestAudioDeviceModule::CreateDiscardRenderer(int sampling_frequency_in_hz,
+ int num_channels) {
+ return std::make_unique<DiscardRenderer>(sampling_frequency_in_hz,
+ num_channels);
+}
+
+std::unique_ptr<TestAudioDeviceModule::Capturer>
+TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels) {
+ return std::make_unique<WavFileReader>(filename, sampling_frequency_in_hz,
+ num_channels, false);
+}
+
+std::unique_ptr<TestAudioDeviceModule::Capturer>
+TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename,
+ bool repeat) {
+ WavReader reader(filename);
+ int sampling_frequency_in_hz = reader.sample_rate();
+ int num_channels = rtc::checked_cast<int>(reader.num_channels());
+ return std::make_unique<WavFileReader>(filename, sampling_frequency_in_hz,
+ num_channels, repeat);
+}
+
+std::unique_ptr<TestAudioDeviceModule::Renderer>
+TestAudioDeviceModule::CreateWavFileWriter(absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels) {
+ return std::make_unique<WavFileWriter>(filename, sampling_frequency_in_hz,
+ num_channels);
+}
+
+std::unique_ptr<TestAudioDeviceModule::Renderer>
+TestAudioDeviceModule::CreateBoundedWavFileWriter(absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels) {
+ return std::make_unique<BoundedWavFileWriter>(
+ filename, sampling_frequency_in_hz, num_channels);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device.h b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.h
new file mode 100644
index 0000000000..8d9ad2cf4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_
+#define MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+// TestAudioDeviceModule implements an AudioDevice module that can act both as a
+// capturer and a renderer. It will use 10ms audio frames.
+class TestAudioDeviceModule : public AudioDeviceModule {
+ public:
+ // Returns the number of samples that Capturers and Renderers with this
+ // sampling frequency will work with every time Capture or Render is called.
+ static size_t SamplesPerFrame(int sampling_frequency_in_hz);
+
+ class Capturer {
+ public:
+ virtual ~Capturer() {}
+ // Returns the sampling frequency in Hz of the audio data that this
+ // capturer produces.
+ virtual int SamplingFrequency() const = 0;
+ // Returns the number of channels of captured audio data.
+ virtual int NumChannels() const = 0;
+ // Replaces the contents of `buffer` with 10ms of captured audio data
+ // (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the
+ // capturer can keep producing data, or false when the capture finishes.
+ virtual bool Capture(rtc::BufferT<int16_t>* buffer) = 0;
+ };
+
+ class Renderer {
+ public:
+ virtual ~Renderer() {}
+ // Returns the sampling frequency in Hz of the audio data that this
+ // renderer receives.
+ virtual int SamplingFrequency() const = 0;
+ // Returns the number of channels of audio data to be required.
+ virtual int NumChannels() const = 0;
+ // Renders the passed audio data and returns true if the renderer wants
+ // to keep receiving data, or false otherwise.
+ virtual bool Render(rtc::ArrayView<const int16_t> data) = 0;
+ };
+
+ // A fake capturer that generates pulses with random samples between
+ // -max_amplitude and +max_amplitude.
+ class PulsedNoiseCapturer : public Capturer {
+ public:
+ ~PulsedNoiseCapturer() override {}
+
+ virtual void SetMaxAmplitude(int16_t amplitude) = 0;
+ };
+
+ ~TestAudioDeviceModule() override {}
+
+ // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
+ // frames will be processed every 10ms / `speed`.
+ // `capturer` is an object that produces audio data. Can be nullptr if this
+ // device is never used for recording.
+ // `renderer` is an object that receives audio data that would have been
+ // played out. Can be nullptr if this device is never used for playing.
+ // Use one of the Create... functions to get these instances.
+ static rtc::scoped_refptr<TestAudioDeviceModule> Create(
+ TaskQueueFactory* task_queue_factory,
+ std::unique_ptr<Capturer> capturer,
+ std::unique_ptr<Renderer> renderer,
+ float speed = 1);
+
+ // Returns a Capturer instance that generates a signal of `num_channels`
+ // channels where every second frame is zero and every second frame is evenly
+ // distributed random noise with max amplitude `max_amplitude`.
+ static std::unique_ptr<PulsedNoiseCapturer> CreatePulsedNoiseCapturer(
+ int16_t max_amplitude,
+ int sampling_frequency_in_hz,
+ int num_channels = 1);
+
+ // Returns a Renderer instance that does nothing with the audio data.
+ static std::unique_ptr<Renderer> CreateDiscardRenderer(
+ int sampling_frequency_in_hz,
+ int num_channels = 1);
+
+ // WavReader and WavWriter creation based on file name.
+
+ // Returns a Capturer instance that gets its data from a file. The sample rate
+ // and channels will be checked against the Wav file.
+ static std::unique_ptr<Capturer> CreateWavFileReader(
+ absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels = 1);
+
+ // Returns a Capturer instance that gets its data from a file.
+ // Automatically detects sample rate and num of channels.
+ // `repeat` - if true, the file will be replayed from the start when we reach
+ // the end of file.
+ static std::unique_ptr<Capturer> CreateWavFileReader(
+ absl::string_view filename,
+ bool repeat = false);
+
+ // Returns a Renderer instance that writes its data to a file.
+ static std::unique_ptr<Renderer> CreateWavFileWriter(
+ absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels = 1);
+
+ // Returns a Renderer instance that writes its data to a WAV file, cutting
+ // off silence at the beginning (not necessarily perfect silence, see
+ // kAmplitudeThreshold) and at the end (only actual 0 samples in this case).
+ static std::unique_ptr<Renderer> CreateBoundedWavFileWriter(
+ absl::string_view filename,
+ int sampling_frequency_in_hz,
+ int num_channels = 1);
+
+ int32_t Init() override = 0;
+ int32_t RegisterAudioCallback(AudioTransport* callback) override = 0;
+
+ int32_t StartPlayout() override = 0;
+ int32_t StopPlayout() override = 0;
+ int32_t StartRecording() override = 0;
+ int32_t StopRecording() override = 0;
+
+ bool Playing() const override = 0;
+ bool Recording() const override = 0;
+
+ // Blocks until the Recorder stops producing data.
+ // Returns false if `timeout_ms` passes before that happens.
+ virtual bool WaitForRecordingEnd() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc b/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc
new file mode 100644
index 0000000000..2975b11325
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/include/test_audio_device.h"
+
+#include <algorithm>
+#include <array>
+
+#include "api/array_view.h"
+#include "common_audio/wav_file.h"
+#include "common_audio/wav_header.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+void RunTest(const std::vector<int16_t>& input_samples,
+ const std::vector<int16_t>& expected_samples,
+ size_t samples_per_frame) {
+ const ::testing::TestInfo* const test_info =
+ ::testing::UnitTest::GetInstance()->current_test_info();
+
+ const std::string output_filename =
+ test::OutputPath() + "BoundedWavFileWriterTest_" + test_info->name() +
+ "_" + std::to_string(std::rand()) + ".wav";
+
+ static const size_t kSamplesPerFrame = 8;
+ static const int kSampleRate = kSamplesPerFrame * 100;
+ EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate),
+ kSamplesPerFrame);
+
+ // Test through file name API.
+ {
+ std::unique_ptr<TestAudioDeviceModule::Renderer> writer =
+ TestAudioDeviceModule::CreateBoundedWavFileWriter(output_filename, 800);
+
+ for (size_t i = 0; i < input_samples.size(); i += kSamplesPerFrame) {
+ EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
+ &input_samples[i],
+ std::min(kSamplesPerFrame, input_samples.size() - i))));
+ }
+ }
+
+ {
+ WavReader reader(output_filename);
+ std::vector<int16_t> read_samples(expected_samples.size());
+ EXPECT_EQ(expected_samples.size(),
+ reader.ReadSamples(read_samples.size(), read_samples.data()));
+ EXPECT_EQ(expected_samples, read_samples);
+
+ EXPECT_EQ(0u, reader.ReadSamples(read_samples.size(), read_samples.data()));
+ }
+
+ remove(output_filename.c_str());
+}
+} // namespace
+
+TEST(BoundedWavFileWriterTest, NoSilence) {
+ static const std::vector<int16_t> kInputSamples = {
+ 75, 1234, 243, -1231, -22222, 0, 3, 88,
+ 1222, -1213, -13222, -7, -3525, 5787, -25247, 8};
+ static const std::vector<int16_t> kExpectedSamples = kInputSamples;
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(BoundedWavFileWriterTest, SomeStartSilence) {
+ static const std::vector<int16_t> kInputSamples = {
+ 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8};
+ static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 10,
+ kInputSamples.end());
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(BoundedWavFileWriterTest, NegativeStartSilence) {
+ static const std::vector<int16_t> kInputSamples = {
+ 0, -4, -6, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8};
+ static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 2,
+ kInputSamples.end());
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(BoundedWavFileWriterTest, SomeEndSilence) {
+ static const std::vector<int16_t> kInputSamples = {
+ 75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
+ kInputSamples.end() - 9);
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(BoundedWavFileWriterTest, DoubleEndSilence) {
+ static const std::vector<int16_t> kInputSamples = {
+ 75, 1234, 243, -1231, -22222, 0, 0, 0,
+ 0, -1213, -13222, -7, -3525, 5787, 0, 0};
+ static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
+ kInputSamples.end() - 2);
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(BoundedWavFileWriterTest, DoubleSilence) {
+ static const std::vector<int16_t> kInputSamples = {0, -1213, -13222, -7,
+ -3525, 5787, 0, 0};
+ static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 1,
+ kInputSamples.end() - 2);
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(BoundedWavFileWriterTest, EndSilenceCutoff) {
+ static const std::vector<int16_t> kInputSamples = {
+ 75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0};
+ static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
+ kInputSamples.end() - 4);
+ RunTest(kInputSamples, kExpectedSamples, 8);
+}
+
+TEST(WavFileReaderTest, RepeatedTrueWithSingleFrameFileReadTwice) {
+ static const std::vector<int16_t> kInputSamples = {75, 1234, 243, -1231,
+ -22222, 0, 3, 88};
+ static const rtc::BufferT<int16_t> kExpectedSamples(kInputSamples.data(),
+ kInputSamples.size());
+
+ const std::string output_filename = test::OutputPath() +
+ "WavFileReaderTest_RepeatedTrue_" +
+ std::to_string(std::rand()) + ".wav";
+
+ static const size_t kSamplesPerFrame = 8;
+ static const int kSampleRate = kSamplesPerFrame * 100;
+ EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate),
+ kSamplesPerFrame);
+
+ // Create wav file to read.
+ {
+ std::unique_ptr<TestAudioDeviceModule::Renderer> writer =
+ TestAudioDeviceModule::CreateWavFileWriter(output_filename, 800);
+
+ for (size_t i = 0; i < kInputSamples.size(); i += kSamplesPerFrame) {
+ EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
+ &kInputSamples[i],
+ std::min(kSamplesPerFrame, kInputSamples.size() - i))));
+ }
+ }
+
+ {
+ std::unique_ptr<TestAudioDeviceModule::Capturer> reader =
+ TestAudioDeviceModule::CreateWavFileReader(output_filename, true);
+ rtc::BufferT<int16_t> buffer(kExpectedSamples.size());
+ EXPECT_TRUE(reader->Capture(&buffer));
+ EXPECT_EQ(kExpectedSamples, buffer);
+ EXPECT_TRUE(reader->Capture(&buffer));
+ EXPECT_EQ(kExpectedSamples, buffer);
+ }
+
+ remove(output_filename.c_str());
+}
+
+TEST(PulsedNoiseCapturerTest, SetMaxAmplitude) {
+ const int16_t kAmplitude = 50;
+ std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer> capturer =
+ TestAudioDeviceModule::CreatePulsedNoiseCapturer(
+ kAmplitude, /*sampling_frequency_in_hz=*/8000);
+ rtc::BufferT<int16_t> recording_buffer;
+
+ // Verify that the capturer doesn't create entries louder than than
+ // kAmplitude. Since the pulse generator alternates between writing
+ // zeroes and actual entries, we need to do the capturing twice.
+ capturer->Capture(&recording_buffer);
+ capturer->Capture(&recording_buffer);
+ int16_t max_sample =
+ *std::max_element(recording_buffer.begin(), recording_buffer.end());
+ EXPECT_LE(max_sample, kAmplitude);
+
+ // Increase the amplitude and verify that the samples can now be louder
+ // than the previous max.
+ capturer->SetMaxAmplitude(kAmplitude * 2);
+ capturer->Capture(&recording_buffer);
+ capturer->Capture(&recording_buffer);
+ max_sample =
+ *std::max_element(recording_buffer.begin(), recording_buffer.end());
+ EXPECT_GT(max_sample, kAmplitude);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc
new file mode 100644
index 0000000000..5dfb91d6f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "modules/audio_device/linux/alsasymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_alsa {
+
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(AlsaSymbolTable, "libasound.so.2")
+#define X(sym) LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(AlsaSymbolTable, sym)
+ALSA_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DEFINE_END(AlsaSymbolTable)
+
+} // namespace adm_linux_alsa
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h
new file mode 100644
index 0000000000..c9970b02bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h
@@ -0,0 +1,148 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
+#define AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_alsa {
+
+// The ALSA symbols we need, as an X-Macro list.
+// This list must contain precisely every libasound function that is used in
+// alsasoundsystem.cc.
+#define ALSA_SYMBOLS_LIST \
+ X(snd_device_name_free_hint) \
+ X(snd_device_name_get_hint) \
+ X(snd_device_name_hint) \
+ X(snd_pcm_avail_update) \
+ X(snd_pcm_close) \
+ X(snd_pcm_delay) \
+ X(snd_pcm_drop) \
+ X(snd_pcm_open) \
+ X(snd_pcm_prepare) \
+ X(snd_pcm_readi) \
+ X(snd_pcm_recover) \
+ X(snd_pcm_resume) \
+ X(snd_pcm_reset) \
+ X(snd_pcm_state) \
+ X(snd_pcm_set_params) \
+ X(snd_pcm_get_params) \
+ X(snd_pcm_start) \
+ X(snd_pcm_stream) \
+ X(snd_pcm_frames_to_bytes) \
+ X(snd_pcm_bytes_to_frames) \
+ X(snd_pcm_wait) \
+ X(snd_pcm_writei) \
+ X(snd_pcm_info_get_class) \
+ X(snd_pcm_info_get_subdevices_avail) \
+ X(snd_pcm_info_get_subdevice_name) \
+ X(snd_pcm_info_set_subdevice) \
+ X(snd_pcm_info_get_id) \
+ X(snd_pcm_info_set_device) \
+ X(snd_pcm_info_set_stream) \
+ X(snd_pcm_info_get_name) \
+ X(snd_pcm_info_get_subdevices_count) \
+ X(snd_pcm_info_sizeof) \
+ X(snd_pcm_hw_params) \
+ X(snd_pcm_hw_params_malloc) \
+ X(snd_pcm_hw_params_free) \
+ X(snd_pcm_hw_params_any) \
+ X(snd_pcm_hw_params_set_access) \
+ X(snd_pcm_hw_params_set_format) \
+ X(snd_pcm_hw_params_set_channels) \
+ X(snd_pcm_hw_params_set_rate_near) \
+ X(snd_pcm_hw_params_set_buffer_size_near) \
+ X(snd_card_next) \
+ X(snd_card_get_name) \
+ X(snd_config_update) \
+ X(snd_config_copy) \
+ X(snd_config_get_id) \
+ X(snd_ctl_open) \
+ X(snd_ctl_close) \
+ X(snd_ctl_card_info) \
+ X(snd_ctl_card_info_sizeof) \
+ X(snd_ctl_card_info_get_id) \
+ X(snd_ctl_card_info_get_name) \
+ X(snd_ctl_pcm_next_device) \
+ X(snd_ctl_pcm_info) \
+ X(snd_mixer_load) \
+ X(snd_mixer_free) \
+ X(snd_mixer_detach) \
+ X(snd_mixer_close) \
+ X(snd_mixer_open) \
+ X(snd_mixer_attach) \
+ X(snd_mixer_first_elem) \
+ X(snd_mixer_elem_next) \
+ X(snd_mixer_selem_get_name) \
+ X(snd_mixer_selem_is_active) \
+ X(snd_mixer_selem_register) \
+ X(snd_mixer_selem_set_playback_volume_all) \
+ X(snd_mixer_selem_get_playback_volume) \
+ X(snd_mixer_selem_has_playback_volume) \
+ X(snd_mixer_selem_get_playback_volume_range) \
+ X(snd_mixer_selem_has_playback_switch) \
+ X(snd_mixer_selem_get_playback_switch) \
+ X(snd_mixer_selem_set_playback_switch_all) \
+ X(snd_mixer_selem_has_capture_switch) \
+ X(snd_mixer_selem_get_capture_switch) \
+ X(snd_mixer_selem_set_capture_switch_all) \
+ X(snd_mixer_selem_has_capture_volume) \
+ X(snd_mixer_selem_set_capture_volume_all) \
+ X(snd_mixer_selem_get_capture_volume) \
+ X(snd_mixer_selem_get_capture_volume_range) \
+ X(snd_dlopen) \
+ X(snd_dlclose) \
+ X(snd_config) \
+ X(snd_config_search) \
+ X(snd_config_get_string) \
+ X(snd_config_search_definition) \
+ X(snd_config_get_type) \
+ X(snd_config_delete) \
+ X(snd_config_iterator_entry) \
+ X(snd_config_iterator_first) \
+ X(snd_config_iterator_next) \
+ X(snd_config_iterator_end) \
+ X(snd_config_delete_compound_members) \
+ X(snd_config_get_integer) \
+ X(snd_config_get_bool) \
+ X(snd_dlsym) \
+ X(snd_strerror) \
+ X(snd_lib_error) \
+ X(snd_lib_error_set_handler)
+
+LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(AlsaSymbolTable)
+#define X(sym) LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(AlsaSymbolTable, sym)
+ALSA_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DECLARE_END(AlsaSymbolTable)
+
+} // namespace adm_linux_alsa
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
new file mode 100644
index 0000000000..50cf3beb6c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -0,0 +1,1637 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_device_alsa_linux.h"
+
+
+#include "modules/audio_device/audio_device_config.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/sleep.h"
+
+WebRTCAlsaSymbolTable* GetAlsaSymbolTable() {
+ static WebRTCAlsaSymbolTable* alsa_symbol_table = new WebRTCAlsaSymbolTable();
+ return alsa_symbol_table;
+}
+
+// Accesses ALSA functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libasound, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \
+ sym)
+
+// Redefine these here to be able to do late-binding
+#undef snd_ctl_card_info_alloca
+#define snd_ctl_card_info_alloca(ptr) \
+ do { \
+ *ptr = (snd_ctl_card_info_t*)__builtin_alloca( \
+ LATE(snd_ctl_card_info_sizeof)()); \
+ memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); \
+ } while (0)
+
+#undef snd_pcm_info_alloca
+#define snd_pcm_info_alloca(pInfo) \
+ do { \
+ *pInfo = (snd_pcm_info_t*)__builtin_alloca(LATE(snd_pcm_info_sizeof)()); \
+ memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); \
+ } while (0)
+
+// snd_lib_error_handler_t
+void WebrtcAlsaErrorHandler(const char* file,
+ int line,
+ const char* function,
+ int err,
+ const char* fmt,
+ ...) {}
+
+namespace webrtc {
+static const unsigned int ALSA_PLAYOUT_FREQ = 48000;
+static const unsigned int ALSA_PLAYOUT_CH = 2;
+static const unsigned int ALSA_PLAYOUT_LATENCY = 40 * 1000; // in us
+static const unsigned int ALSA_CAPTURE_FREQ = 48000;
+static const unsigned int ALSA_CAPTURE_CH = 2;
+static const unsigned int ALSA_CAPTURE_LATENCY = 40 * 1000; // in us
+static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms
+
+#define FUNC_GET_NUM_OF_DEVICE 0
+#define FUNC_GET_DEVICE_NAME 1
+#define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2
+
+AudioDeviceLinuxALSA::AudioDeviceLinuxALSA()
+ : _ptrAudioBuffer(NULL),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _inputDeviceIsSpecified(false),
+ _outputDeviceIsSpecified(false),
+ _handleRecord(NULL),
+ _handlePlayout(NULL),
+ _recordingBuffersizeInFrame(0),
+ _recordingPeriodSizeInFrame(0),
+ _playoutBufferSizeInFrame(0),
+ _playoutPeriodSizeInFrame(0),
+ _recordingBufferSizeIn10MS(0),
+ _playoutBufferSizeIn10MS(0),
+ _recordingFramesIn10MS(0),
+ _playoutFramesIn10MS(0),
+ _recordingFreq(ALSA_CAPTURE_FREQ),
+ _playoutFreq(ALSA_PLAYOUT_FREQ),
+ _recChannels(ALSA_CAPTURE_CH),
+ _playChannels(ALSA_PLAYOUT_CH),
+ _recordingBuffer(NULL),
+ _playoutBuffer(NULL),
+ _recordingFramesLeft(0),
+ _playoutFramesLeft(0),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _recordingDelay(0),
+ _playoutDelay(0) {
+ memset(_oldKeyState, 0, sizeof(_oldKeyState));
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+}
+
+// ----------------------------------------------------------------------------
+// AudioDeviceLinuxALSA - dtor
+// ----------------------------------------------------------------------------
+
+AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ Terminate();
+
+ // Clean up the recording buffer and playout buffer.
+ if (_recordingBuffer) {
+ delete[] _recordingBuffer;
+ _recordingBuffer = NULL;
+ }
+ if (_playoutBuffer) {
+ delete[] _playoutBuffer;
+ _playoutBuffer = NULL;
+ }
+}
+
+void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ MutexLock lock(&mutex_);
+
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+int32_t AudioDeviceLinuxALSA::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
+ return 0;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceLinuxALSA::Init() {
+ MutexLock lock(&mutex_);
+
+ // Load libasound
+ if (!GetAlsaSymbolTable()->Load()) {
+ // Alsa is not installed on this system
+ RTC_LOG(LS_ERROR) << "failed to load symbol table";
+ return InitStatus::OTHER_ERROR;
+ }
+
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+#if defined(WEBRTC_USE_X11)
+ // Get X display handle for typing detection
+ _XDisplay = XOpenDisplay(NULL);
+ if (!_XDisplay) {
+ RTC_LOG(LS_WARNING)
+ << "failed to open X display, typing detection will not work";
+ }
+#endif
+
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceLinuxALSA::Terminate() {
+ if (!_initialized) {
+ return 0;
+ }
+
+ MutexLock lock(&mutex_);
+
+ _mixerManager.Close();
+
+ // RECORDING
+ mutex_.Unlock();
+ _ptrThreadRec.Finalize();
+
+ // PLAYOUT
+ _ptrThreadPlay.Finalize();
+ mutex_.Lock();
+
+#if defined(WEBRTC_USE_X11)
+ if (_XDisplay) {
+ XCloseDisplay(_XDisplay);
+ _XDisplay = NULL;
+ }
+#endif
+ _initialized = false;
+ _outputDeviceIsSpecified = false;
+ _inputDeviceIsSpecified = false;
+
+ return 0;
+}
+
+bool AudioDeviceLinuxALSA::Initialized() const {
+ return (_initialized);
+}
+
+int32_t AudioDeviceLinuxALSA::InitSpeaker() {
+ MutexLock lock(&mutex_);
+ return InitSpeakerLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitSpeakerLocked() {
+ if (_playing) {
+ return -1;
+ }
+
+ char devName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+ return _mixerManager.OpenSpeaker(devName);
+}
+
+int32_t AudioDeviceLinuxALSA::InitMicrophone() {
+ MutexLock lock(&mutex_);
+ return InitMicrophoneLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitMicrophoneLocked() {
+ if (_recording) {
+ return -1;
+ }
+
+ char devName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+ return _mixerManager.OpenMicrophone(devName);
+}
+
+bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const {
+ return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const {
+ return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitSpeaker was successful, we know that a volume control
+ // exists
+ available = true;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+ return (_mixerManager.SetSpeakerVolume(volume));
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.SpeakerVolume(level) == -1) {
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected speaker has a mute control
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+ available = isAvailable;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable) {
+ return (_mixerManager.SetSpeakerMute(enable));
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const {
+ bool muted(0);
+
+ if (_mixerManager.SpeakerMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone has a mute control
+ //
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable) {
+ return (_mixerManager.SetMicrophoneMute(enable));
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const {
+ bool muted(0);
+
+ if (_mixerManager.MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ // If we already have initialized in stereo it's obviously available
+ if (_recIsInitialized && (2 == _recChannels)) {
+ available = true;
+ return 0;
+ }
+
+ // Save rec states and the number of rec channels
+ bool recIsInitialized = _recIsInitialized;
+ bool recording = _recording;
+ int recChannels = _recChannels;
+
+ available = false;
+
+ // Stop/uninitialize recording if initialized (and possibly started)
+ if (_recIsInitialized) {
+ StopRecordingLocked();
+ }
+
+ // Try init in stereo;
+ _recChannels = 2;
+ if (InitRecordingLocked() == 0) {
+ available = true;
+ }
+
+ // Stop/uninitialize recording
+ StopRecordingLocked();
+
+ // Recover previous states
+ _recChannels = recChannels;
+ if (recIsInitialized) {
+ InitRecordingLocked();
+ }
+ if (recording) {
+ StartRecording();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable) {
+ if (enable)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const {
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ // If we already have initialized in stereo it's obviously available
+ if (_playIsInitialized && (2 == _playChannels)) {
+ available = true;
+ return 0;
+ }
+
+ // Save rec states and the number of rec channels
+ bool playIsInitialized = _playIsInitialized;
+ bool playing = _playing;
+ int playChannels = _playChannels;
+
+ available = false;
+
+ // Stop/uninitialize recording if initialized (and possibly started)
+ if (_playIsInitialized) {
+ StopPlayoutLocked();
+ }
+
+ // Try init in stereo;
+ _playChannels = 2;
+ if (InitPlayoutLocked() == 0) {
+ available = true;
+ }
+
+ // Stop/uninitialize recording
+ StopPlayoutLocked();
+
+ // Recover previous states
+ _playChannels = playChannels;
+ if (playIsInitialized) {
+ InitPlayoutLocked();
+ }
+ if (playing) {
+ StartPlayout();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable) {
+ if (enable)
+ _playChannels = 2;
+ else
+ _playChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const {
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitMicrophone was successful, we know that a volume control
+ // exists
+ available = true;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+ return (_mixerManager.SetMicrophoneVolume(volume));
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.MicrophoneVolume(level) == -1) {
+ RTC_LOG(LS_WARNING) << "failed to retrive current microphone level";
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int16_t AudioDeviceLinuxALSA::PlayoutDevices() {
+ return (int16_t)GetDevicesInfo(0, true);
+}
+
+int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index) {
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ uint32_t nDevices = GetDevicesInfo(0, true);
+ RTC_LOG(LS_VERBOSE) << "number of available audio output devices is "
+ << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _outputDeviceIndex = index;
+ _outputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(PlayoutDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
+}
+
+int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(RecordingDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+}
+
+int16_t AudioDeviceLinuxALSA::RecordingDevices() {
+ return (int16_t)GetDevicesInfo(0, false);
+}
+
+int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ uint32_t nDevices = GetDevicesInfo(0, false);
+ RTC_LOG(LS_VERBOSE) << "number of availiable audio input devices is "
+ << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _inputDeviceIndex = index;
+ _inputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetRecordingDevice II (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceLinuxALSA::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the playout side with mono
+ // Assumes that user set num channels after calling this function
+ _playChannels = 1;
+ int32_t res = InitPlayout();
+
+ // Cancel effect of initialization
+ StopPlayout();
+
+ if (res != -1) {
+ available = true;
+ } else {
+ // It may be possible to play out in stereo
+ res = StereoPlayoutIsAvailable(available);
+ if (available) {
+ // Then set channels to 2 so InitPlayout doesn't fail
+ _playChannels = 2;
+ }
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the recording side with mono
+ // Assumes that user set num channels after calling this function
+ _recChannels = 1;
+ int32_t res = InitRecording();
+
+ // Cancel effect of initialization
+ StopRecording();
+
+ if (res != -1) {
+ available = true;
+ } else {
+ // It may be possible to record in stereo
+ res = StereoRecordingIsAvailable(available);
+ if (available) {
+ // Then set channels to 2 so InitPlayout doesn't fail
+ _recChannels = 2;
+ }
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxALSA::InitPlayout() {
+ MutexLock lock(&mutex_);
+ return InitPlayoutLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitPlayoutLocked() {
+ int errVal = 0;
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeakerLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ // Start by closing any existing wave-output devices
+ //
+ if (_handlePlayout != NULL) {
+ LATE(snd_pcm_close)(_handlePlayout);
+ _handlePlayout = NULL;
+ _playIsInitialized = false;
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error closing current playout sound device, error: "
+ << LATE(snd_strerror)(errVal);
+ }
+ }
+
+ // Open PCM device for playout
+ char deviceName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
+ kAdmMaxDeviceNameSize);
+
+ RTC_LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")";
+
+ errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+ SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
+
+ if (errVal == -EBUSY) // Device busy - try some more!
+ {
+ for (int i = 0; i < 5; i++) {
+ SleepMs(1000);
+ errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+ SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
+ if (errVal == 0) {
+ break;
+ }
+ }
+ }
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "unable to open playback device: "
+ << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+ _handlePlayout = NULL;
+ return -1;
+ }
+
+ _playoutFramesIn10MS = _playoutFreq / 100;
+ if ((errVal = LATE(snd_pcm_set_params)(
+ _handlePlayout,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+ SND_PCM_FORMAT_S16_BE,
+#else
+ SND_PCM_FORMAT_S16_LE, // format
+#endif
+ SND_PCM_ACCESS_RW_INTERLEAVED, // access
+ _playChannels, // channels
+ _playoutFreq, // rate
+ 1, // soft_resample
+ ALSA_PLAYOUT_LATENCY // 40*1000 //latency required overall latency
+ // in us
+ )) < 0) { /* 0.5sec */
+ _playoutFramesIn10MS = 0;
+ RTC_LOG(LS_ERROR) << "unable to set playback device: "
+ << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+ ErrorRecovery(errVal, _handlePlayout);
+ errVal = LATE(snd_pcm_close)(_handlePlayout);
+ _handlePlayout = NULL;
+ return -1;
+ }
+
+ errVal = LATE(snd_pcm_get_params)(_handlePlayout, &_playoutBufferSizeInFrame,
+ &_playoutPeriodSizeInFrame);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal)
+ << " (" << errVal << ")";
+ _playoutBufferSizeInFrame = 0;
+ _playoutPeriodSizeInFrame = 0;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:"
+ << _playoutBufferSizeInFrame
+ << " period_size :" << _playoutPeriodSizeInFrame;
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update webrtc audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
+ _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
+ }
+
+ // Set play buffer size
+ _playoutBufferSizeIn10MS =
+ LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesIn10MS);
+
+ // Init varaibles used for play
+
+ if (_handlePlayout != NULL) {
+ _playIsInitialized = true;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int32_t AudioDeviceLinuxALSA::InitRecording() {
+ MutexLock lock(&mutex_);
+ return InitRecordingLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitRecordingLocked() {
+ int errVal = 0;
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophoneLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ // Start by closing any existing pcm-input devices
+ //
+ if (_handleRecord != NULL) {
+ int errVal = LATE(snd_pcm_close)(_handleRecord);
+ _handleRecord = NULL;
+ _recIsInitialized = false;
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR)
+ << "Error closing current recording sound device, error: "
+ << LATE(snd_strerror)(errVal);
+ }
+ }
+
+ // Open PCM device for recording
+ // The corresponding settings for playout are made after the record settings
+ char deviceName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
+ kAdmMaxDeviceNameSize);
+
+ RTC_LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")";
+ errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+ SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+
+ // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
+ if (errVal == -EBUSY) // Device busy - try some more!
+ {
+ for (int i = 0; i < 5; i++) {
+ SleepMs(1000);
+ errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+ SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+ if (errVal == 0) {
+ break;
+ }
+ }
+ }
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "unable to open record device: "
+ << LATE(snd_strerror)(errVal);
+ _handleRecord = NULL;
+ return -1;
+ }
+
+ _recordingFramesIn10MS = _recordingFreq / 100;
+ if ((errVal =
+ LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+ SND_PCM_FORMAT_S16_BE, // format
+#else
+ SND_PCM_FORMAT_S16_LE, // format
+#endif
+ SND_PCM_ACCESS_RW_INTERLEAVED, // access
+ _recChannels, // channels
+ _recordingFreq, // rate
+ 1, // soft_resample
+ ALSA_CAPTURE_LATENCY // latency in us
+ )) < 0) {
+ // Fall back to another mode then.
+ if (_recChannels == 1)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ if ((errVal =
+ LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+ SND_PCM_FORMAT_S16_BE, // format
+#else
+ SND_PCM_FORMAT_S16_LE, // format
+#endif
+ SND_PCM_ACCESS_RW_INTERLEAVED, // access
+ _recChannels, // channels
+ _recordingFreq, // rate
+ 1, // soft_resample
+ ALSA_CAPTURE_LATENCY // latency in us
+ )) < 0) {
+ _recordingFramesIn10MS = 0;
+ RTC_LOG(LS_ERROR) << "unable to set record settings: "
+ << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+ ErrorRecovery(errVal, _handleRecord);
+ errVal = LATE(snd_pcm_close)(_handleRecord);
+ _handleRecord = NULL;
+ return -1;
+ }
+ }
+
+ errVal = LATE(snd_pcm_get_params)(_handleRecord, &_recordingBuffersizeInFrame,
+ &_recordingPeriodSizeInFrame);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal)
+ << " (" << errVal << ")";
+ _recordingBuffersizeInFrame = 0;
+ _recordingPeriodSizeInFrame = 0;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:"
+ << _recordingBuffersizeInFrame
+ << ", period_size:" << _recordingPeriodSizeInFrame;
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update webrtc audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
+ _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+ }
+
+ // Set rec buffer size and create buffer
+ _recordingBufferSizeIn10MS =
+ LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesIn10MS);
+
+ if (_handleRecord != NULL) {
+ // Mark recording side as initialized
+ _recIsInitialized = true;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int32_t AudioDeviceLinuxALSA::StartRecording() {
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ _recording = true;
+
+ int errVal = 0;
+ _recordingFramesLeft = _recordingFramesIn10MS;
+
+ // Make sure we only create the buffer once.
+ if (!_recordingBuffer)
+ _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+ if (!_recordingBuffer) {
+ RTC_LOG(LS_ERROR) << "failed to alloc recording buffer";
+ _recording = false;
+ return -1;
+ }
+ // RECORDING
+ _ptrThreadRec = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (RecThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_capture_thread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ errVal = LATE(snd_pcm_prepare)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_prepare failed ("
+ << LATE(snd_strerror)(errVal) << ")\n";
+ // just log error
+ // if snd_pcm_open fails will return -1
+ }
+
+ errVal = LATE(snd_pcm_start)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_start err: "
+ << LATE(snd_strerror)(errVal);
+ errVal = LATE(snd_pcm_start)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: "
+ << LATE(snd_strerror)(errVal);
+ StopRecording();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StopRecording() {
+ MutexLock lock(&mutex_);
+ return StopRecordingLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::StopRecordingLocked() {
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ if (_handleRecord == NULL) {
+ return -1;
+ }
+
+ // Make sure we don't start recording (it's asynchronous).
+ _recIsInitialized = false;
+ _recording = false;
+
+ _ptrThreadRec.Finalize();
+
+ _recordingFramesLeft = 0;
+ if (_recordingBuffer) {
+ delete[] _recordingBuffer;
+ _recordingBuffer = NULL;
+ }
+
+ // Stop and close pcm recording device.
+ int errVal = LATE(snd_pcm_drop)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ errVal = LATE(snd_pcm_close)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error closing record sound device, error: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ // Check if we have muted and unmute if so.
+ bool muteEnabled = false;
+ MicrophoneMute(muteEnabled);
+ if (muteEnabled) {
+ SetMicrophoneMute(false);
+ }
+
+ // set the pcm input handle to NULL
+ _handleRecord = NULL;
+ return 0;
+}
+
+bool AudioDeviceLinuxALSA::RecordingIsInitialized() const {
+ return (_recIsInitialized);
+}
+
+bool AudioDeviceLinuxALSA::Recording() const {
+ return (_recording);
+}
+
+bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const {
+ return (_playIsInitialized);
+}
+
+int32_t AudioDeviceLinuxALSA::StartPlayout() {
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ _playing = true;
+
+ _playoutFramesLeft = 0;
+ if (!_playoutBuffer)
+ _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
+ if (!_playoutBuffer) {
+ RTC_LOG(LS_ERROR) << "failed to alloc playout buf";
+ _playing = false;
+ return -1;
+ }
+
+ // PLAYOUT
+ _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (PlayThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_play_thread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "playout snd_pcm_prepare failed ("
+ << LATE(snd_strerror)(errVal) << ")\n";
+ // just log error
+ // if snd_pcm_open fails will return -1
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StopPlayout() {
+ MutexLock lock(&mutex_);
+ return StopPlayoutLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::StopPlayoutLocked() {
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ if (_handlePlayout == NULL) {
+ return -1;
+ }
+
+ _playing = false;
+
+ // stop playout thread first
+ _ptrThreadPlay.Finalize();
+
+ _playoutFramesLeft = 0;
+ delete[] _playoutBuffer;
+ _playoutBuffer = NULL;
+
+ // stop and close pcm playout device
+ int errVal = LATE(snd_pcm_drop)(_handlePlayout);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal);
+ }
+
+ errVal = LATE(snd_pcm_close)(_handlePlayout);
+ if (errVal < 0)
+ RTC_LOG(LS_ERROR) << "Error closing playout sound device, error: "
+ << LATE(snd_strerror)(errVal);
+
+ // set the pcm input handle to NULL
+ _playIsInitialized = false;
+ _handlePlayout = NULL;
+ RTC_LOG(LS_VERBOSE) << "handle_playout is now set to NULL";
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const {
+ delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
+ return 0;
+}
+
+bool AudioDeviceLinuxALSA::Playing() const {
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+int32_t AudioDeviceLinuxALSA::GetDevicesInfo(const int32_t function,
+ const bool playback,
+ const int32_t enumDeviceNo,
+ char* enumDeviceName,
+ const int32_t ednLen) const {
+ // Device enumeration based on libjingle implementation
+ // by Tristan Schmelcher at Google Inc.
+
+ const char* type = playback ? "Output" : "Input";
+ // dmix and dsnoop are only for playback and capture, respectively, but ALSA
+ // stupidly includes them in both lists.
+ const char* ignorePrefix = playback ? "dsnoop:" : "dmix:";
+ // (ALSA lists many more "devices" of questionable interest, but we show them
+ // just in case the weird devices may actually be desirable for some
+ // users/systems.)
+
+ int err;
+ int enumCount(0);
+ bool keepSearching(true);
+
+ // From Chromium issue 95797
+ // Loop through the sound cards to get Alsa device hints.
+ // Don't use snd_device_name_hint(-1,..) since there is a access violation
+ // inside this ALSA API with libasound.so.2.0.0.
+ int card = -1;
+ while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
+ void** hints;
+ err = LATE(snd_device_name_hint)(card, "pcm", &hints);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: "
+ << LATE(snd_strerror)(err);
+ return -1;
+ }
+
+ enumCount++; // default is 0
+ if ((function == FUNC_GET_DEVICE_NAME ||
+ function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) &&
+ enumDeviceNo == 0) {
+ strcpy(enumDeviceName, "default");
+
+ err = LATE(snd_device_name_free_hint)(hints);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+ << LATE(snd_strerror)(err);
+ }
+
+ return 0;
+ }
+
+ for (void** list = hints; *list != NULL; ++list) {
+ char* actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
+ if (actualType) { // NULL means it's both.
+ bool wrongType = (strcmp(actualType, type) != 0);
+ free(actualType);
+ if (wrongType) {
+ // Wrong type of device (i.e., input vs. output).
+ continue;
+ }
+ }
+
+ char* name = LATE(snd_device_name_get_hint)(*list, "NAME");
+ if (!name) {
+ RTC_LOG(LS_ERROR) << "Device has no name";
+ // Skip it.
+ continue;
+ }
+
+ // Now check if we actually want to show this device.
+ if (strcmp(name, "default") != 0 && strcmp(name, "null") != 0 &&
+ strcmp(name, "pulse") != 0 &&
+ strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0) {
+ // Yes, we do.
+ char* desc = LATE(snd_device_name_get_hint)(*list, "DESC");
+ if (!desc) {
+ // Virtual devices don't necessarily have descriptions.
+ // Use their names instead.
+ desc = name;
+ }
+
+ if (FUNC_GET_NUM_OF_DEVICE == function) {
+ RTC_LOG(LS_VERBOSE) << "Enum device " << enumCount << " - " << name;
+ }
+ if ((FUNC_GET_DEVICE_NAME == function) && (enumDeviceNo == enumCount)) {
+ // We have found the enum device, copy the name to buffer.
+ strncpy(enumDeviceName, desc, ednLen);
+ enumDeviceName[ednLen - 1] = '\0';
+ keepSearching = false;
+ // Replace '\n' with '-'.
+ char* pret = strchr(enumDeviceName, '\n' /*0xa*/); // LF
+ if (pret)
+ *pret = '-';
+ }
+ if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
+ (enumDeviceNo == enumCount)) {
+ // We have found the enum device, copy the name to buffer.
+ strncpy(enumDeviceName, name, ednLen);
+ enumDeviceName[ednLen - 1] = '\0';
+ keepSearching = false;
+ }
+
+ if (keepSearching)
+ ++enumCount;
+
+ if (desc != name)
+ free(desc);
+ }
+
+ free(name);
+
+ if (!keepSearching)
+ break;
+ }
+
+ err = LATE(snd_device_name_free_hint)(hints);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+ << LATE(snd_strerror)(err);
+ // Continue and return true anyway, since we did get the whole list.
+ }
+ }
+
+ if (FUNC_GET_NUM_OF_DEVICE == function) {
+ if (enumCount == 1) // only default?
+ enumCount = 0;
+ return enumCount; // Normal return point for function 0
+ }
+
+ if (keepSearching) {
+ // If we get here for function 1 and 2, we didn't find the specified
+ // enum device.
+ RTC_LOG(LS_ERROR)
+ << "GetDevicesInfo - Could not find device name or numbers";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const {
+ if (_handleRecord == NULL) {
+ RTC_LOG(LS_ERROR) << "input state has been modified during unlocked period";
+ return -1;
+ }
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const {
+ if (_handlePlayout == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "output state has been modified during unlocked period";
+ return -1;
+ }
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error,
+ snd_pcm_t* deviceHandle) {
+ int st = LATE(snd_pcm_state)(deviceHandle);
+ RTC_LOG(LS_VERBOSE) << "Trying to recover from "
+ << ((LATE(snd_pcm_stream)(deviceHandle) ==
+ SND_PCM_STREAM_CAPTURE)
+ ? "capture"
+ : "playout")
+ << " error: " << LATE(snd_strerror)(error) << " ("
+ << error << ") (state " << st << ")";
+
+ // It is recommended to use snd_pcm_recover for all errors. If that function
+ // cannot handle the error, the input error code will be returned, otherwise
+ // 0 is returned. From snd_pcm_recover API doc: "This functions handles
+ // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
+ // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
+ // trying to prepare given stream for next I/O."
+
+ /** Open */
+ // SND_PCM_STATE_OPEN = 0,
+ /** Setup installed */
+ // SND_PCM_STATE_SETUP,
+ /** Ready to start */
+ // SND_PCM_STATE_PREPARED,
+ /** Running */
+ // SND_PCM_STATE_RUNNING,
+ /** Stopped: underrun (playback) or overrun (capture) detected */
+ // SND_PCM_STATE_XRUN,= 4
+ /** Draining: running (playback) or stopped (capture) */
+ // SND_PCM_STATE_DRAINING,
+ /** Paused */
+ // SND_PCM_STATE_PAUSED,
+ /** Hardware is suspended */
+ // SND_PCM_STATE_SUSPENDED,
+ // ** Hardware is disconnected */
+ // SND_PCM_STATE_DISCONNECTED,
+ // SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
+
+ // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
+ // in Sthlm lab.
+
+ int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
+ if (0 == res) {
+ RTC_LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK";
+
+ if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
+ _recording &&
+ LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) {
+ // For capture streams we also have to repeat the explicit start()
+ // to get data flowing again.
+ int err = LATE(snd_pcm_start)(deviceHandle);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err;
+ return -1;
+ }
+ }
+
+ if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
+ _playing &&
+ LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK) {
+ // For capture streams we also have to repeat the explicit start() to get
+ // data flowing again.
+ int err = LATE(snd_pcm_start)(deviceHandle);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "Recovery - snd_pcm_start error: "
+ << LATE(snd_strerror)(err);
+ return -1;
+ }
+ }
+
+ return -EPIPE == error ? 1 : 0;
+ } else {
+ RTC_LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res;
+ }
+
+ return res;
+}
+
+// ============================================================================
+// Thread Methods
+// ============================================================================
+
+bool AudioDeviceLinuxALSA::PlayThreadProcess() {
+ if (!_playing)
+ return false;
+
+ int err;
+ snd_pcm_sframes_t frames;
+ snd_pcm_sframes_t avail_frames;
+
+ Lock();
+ // return a positive number of frames ready otherwise a negative error code
+ avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
+ if (avail_frames < 0) {
+ RTC_LOG(LS_ERROR) << "playout snd_pcm_avail_update error: "
+ << LATE(snd_strerror)(avail_frames);
+ ErrorRecovery(avail_frames, _handlePlayout);
+ UnLock();
+ return true;
+ } else if (avail_frames == 0) {
+ UnLock();
+
+ // maximum tixe in milliseconds to wait, a negative value means infinity
+ err = LATE(snd_pcm_wait)(_handlePlayout, 2);
+ if (err == 0) { // timeout occured
+ RTC_LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout";
+ }
+
+ return true;
+ }
+
+ if (_playoutFramesLeft <= 0) {
+ UnLock();
+ _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
+ Lock();
+
+ _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+ RTC_DCHECK_EQ(_playoutFramesLeft, _playoutFramesIn10MS);
+ }
+
+ if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
+ avail_frames = _playoutFramesLeft;
+
+ int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesLeft);
+ frames = LATE(snd_pcm_writei)(
+ _handlePlayout, &_playoutBuffer[_playoutBufferSizeIn10MS - size],
+ avail_frames);
+
+ if (frames < 0) {
+ RTC_LOG(LS_VERBOSE) << "playout snd_pcm_writei error: "
+ << LATE(snd_strerror)(frames);
+ _playoutFramesLeft = 0;
+ ErrorRecovery(frames, _handlePlayout);
+ UnLock();
+ return true;
+ } else {
+ RTC_DCHECK_EQ(frames, avail_frames);
+ _playoutFramesLeft -= frames;
+ }
+
+ UnLock();
+ return true;
+}
+
+bool AudioDeviceLinuxALSA::RecThreadProcess() {
+ if (!_recording)
+ return false;
+
+ int err;
+ snd_pcm_sframes_t frames;
+ snd_pcm_sframes_t avail_frames;
+ int8_t buffer[_recordingBufferSizeIn10MS];
+
+ Lock();
+
+ // return a positive number of frames ready otherwise a negative error code
+ avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
+ if (avail_frames < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_avail_update error: "
+ << LATE(snd_strerror)(avail_frames);
+ ErrorRecovery(avail_frames, _handleRecord);
+ UnLock();
+ return true;
+ } else if (avail_frames == 0) { // no frame is available now
+ UnLock();
+
+ // maximum time in milliseconds to wait, a negative value means infinity
+ err = LATE(snd_pcm_wait)(_handleRecord, ALSA_CAPTURE_WAIT_TIMEOUT);
+ if (err == 0) // timeout occured
+ RTC_LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout";
+
+ return true;
+ }
+
+ if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
+ avail_frames = _recordingFramesLeft;
+
+ frames = LATE(snd_pcm_readi)(_handleRecord, buffer,
+ avail_frames); // frames to be written
+ if (frames < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_readi error: "
+ << LATE(snd_strerror)(frames);
+ ErrorRecovery(frames, _handleRecord);
+ UnLock();
+ return true;
+ } else if (frames > 0) {
+ RTC_DCHECK_EQ(frames, avail_frames);
+
+ int left_size =
+ LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesLeft);
+ int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
+
+ memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size], buffer,
+ size);
+ _recordingFramesLeft -= frames;
+
+ if (!_recordingFramesLeft) { // buf is full
+ _recordingFramesLeft = _recordingFramesIn10MS;
+
+ // store the recorded buffer (no action will be taken if the
+ // #recorded samples is not a full buffer)
+ _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+ _recordingFramesIn10MS);
+
+ // calculate delay
+ _playoutDelay = 0;
+ _recordingDelay = 0;
+ if (_handlePlayout) {
+ err = LATE(snd_pcm_delay)(_handlePlayout,
+ &_playoutDelay); // returned delay in frames
+ if (err < 0) {
+ // TODO(xians): Shall we call ErrorRecovery() here?
+ _playoutDelay = 0;
+ RTC_LOG(LS_ERROR)
+ << "playout snd_pcm_delay: " << LATE(snd_strerror)(err);
+ }
+ }
+
+ err = LATE(snd_pcm_delay)(_handleRecord,
+ &_recordingDelay); // returned delay in frames
+ if (err < 0) {
+ // TODO(xians): Shall we call ErrorRecovery() here?
+ _recordingDelay = 0;
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_delay: "
+ << LATE(snd_strerror)(err);
+ }
+
+ // TODO(xians): Shall we add 10ms buffer delay to the record delay?
+ _ptrAudioBuffer->SetVQEData(_playoutDelay * 1000 / _playoutFreq,
+ _recordingDelay * 1000 / _recordingFreq);
+
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+ // Deliver recorded samples at specified sample rate, mic level etc.
+ // to the observer using callback.
+ UnLock();
+ _ptrAudioBuffer->DeliverRecordedData();
+ Lock();
+ }
+ }
+
+ UnLock();
+ return true;
+}
+
+bool AudioDeviceLinuxALSA::KeyPressed() const {
+#if defined(WEBRTC_USE_X11)
+ char szKey[32];
+ unsigned int i = 0;
+ char state = 0;
+
+ if (!_XDisplay)
+ return false;
+
+ // Check key map status
+ XQueryKeymap(_XDisplay, szKey);
+
+ // A bit change in keymap means a key is pressed
+ for (i = 0; i < sizeof(szKey); i++)
+ state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
+
+ // Save old state
+ memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
+ return (state != 0);
+#else
+ return false;
+#endif
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h
new file mode 100644
index 0000000000..23e21d3ce9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+
+#if defined(WEBRTC_USE_X11)
+#include <X11/Xlib.h>
+#endif
+#include <alsa/asoundlib.h>
+#include <sys/ioctl.h>
+#include <sys/soundcard.h>
+
+typedef webrtc::adm_linux_alsa::AlsaSymbolTable WebRTCAlsaSymbolTable;
+WebRTCAlsaSymbolTable* GetAlsaSymbolTable();
+
+namespace webrtc {
+
+class AudioDeviceLinuxALSA : public AudioDeviceGeneric {
+ public:
+ AudioDeviceLinuxALSA();
+ virtual ~AudioDeviceLinuxALSA();
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+
+ // Main initializaton and termination
+ InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ private:
+ int32_t InitRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t StopRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t StopPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t GetDevicesInfo(int32_t function,
+ bool playback,
+ int32_t enumDeviceNo = 0,
+ char* enumDeviceName = NULL,
+ int32_t ednLen = 0) const;
+ int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
+
+ bool KeyPressed() const;
+
+ void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); }
+ void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); }
+
+ inline int32_t InputSanityCheckAfterUnlockedPeriod() const;
+ inline int32_t OutputSanityCheckAfterUnlockedPeriod() const;
+
+ static void RecThreadFunc(void*);
+ static void PlayThreadFunc(void*);
+ bool RecThreadProcess();
+ bool PlayThreadProcess();
+
+ AudioDeviceBuffer* _ptrAudioBuffer;
+
+ Mutex mutex_;
+
+ rtc::PlatformThread _ptrThreadRec;
+ rtc::PlatformThread _ptrThreadPlay;
+
+ AudioMixerManagerLinuxALSA _mixerManager;
+
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+ bool _inputDeviceIsSpecified;
+ bool _outputDeviceIsSpecified;
+
+ snd_pcm_t* _handleRecord;
+ snd_pcm_t* _handlePlayout;
+
+ snd_pcm_uframes_t _recordingBuffersizeInFrame;
+ snd_pcm_uframes_t _recordingPeriodSizeInFrame;
+ snd_pcm_uframes_t _playoutBufferSizeInFrame;
+ snd_pcm_uframes_t _playoutPeriodSizeInFrame;
+
+ ssize_t _recordingBufferSizeIn10MS;
+ ssize_t _playoutBufferSizeIn10MS;
+ uint32_t _recordingFramesIn10MS;
+ uint32_t _playoutFramesIn10MS;
+
+ uint32_t _recordingFreq;
+ uint32_t _playoutFreq;
+ uint8_t _recChannels;
+ uint8_t _playChannels;
+
+ int8_t* _recordingBuffer; // in byte
+ int8_t* _playoutBuffer; // in byte
+ uint32_t _recordingFramesLeft;
+ uint32_t _playoutFramesLeft;
+
+ bool _initialized;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+
+ snd_pcm_sframes_t _recordingDelay;
+ snd_pcm_sframes_t _playoutDelay;
+
+ char _oldKeyState[32];
+#if defined(WEBRTC_USE_X11)
+ Display* _XDisplay;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_ALSA_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
new file mode 100644
index 0000000000..4876c0fb91
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -0,0 +1,2286 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_device_pulse_linux.h"
+
+#include <string.h>
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+
+WebRTCPulseSymbolTable* GetPulseSymbolTable() {
+ static WebRTCPulseSymbolTable* pulse_symbol_table =
+ new WebRTCPulseSymbolTable();
+ return pulse_symbol_table;
+}
+
+// Accesses Pulse functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libpulse, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \
+ GetPulseSymbolTable(), sym)
+
+namespace webrtc {
+
+AudioDeviceLinuxPulse::AudioDeviceLinuxPulse()
+ : _ptrAudioBuffer(NULL),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _inputDeviceIsSpecified(false),
+ _outputDeviceIsSpecified(false),
+ sample_rate_hz_(0),
+ _recChannels(1),
+ _playChannels(1),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _startRec(false),
+ _startPlay(false),
+ update_speaker_volume_at_startup_(false),
+ quit_(false),
+ _sndCardPlayDelay(0),
+ _writeErrors(0),
+ _deviceIndex(-1),
+ _numPlayDevices(0),
+ _numRecDevices(0),
+ _playDeviceName(NULL),
+ _recDeviceName(NULL),
+ _playDisplayDeviceName(NULL),
+ _recDisplayDeviceName(NULL),
+ _playBuffer(NULL),
+ _playbackBufferSize(0),
+ _playbackBufferUnused(0),
+ _tempBufferSpace(0),
+ _recBuffer(NULL),
+ _recordBufferSize(0),
+ _recordBufferUsed(0),
+ _tempSampleData(NULL),
+ _tempSampleDataSize(0),
+ _configuredLatencyPlay(0),
+ _configuredLatencyRec(0),
+ _paDeviceIndex(-1),
+ _paStateChanged(false),
+ _paMainloop(NULL),
+ _paMainloopApi(NULL),
+ _paContext(NULL),
+ _recStream(NULL),
+ _playStream(NULL),
+ _recStreamFlags(0),
+ _playStreamFlags(0) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+
+ memset(_paServerVersion, 0, sizeof(_paServerVersion));
+ memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
+ memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
+ memset(_oldKeyState, 0, sizeof(_oldKeyState));
+}
+
+AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+
+ if (_recBuffer) {
+ delete[] _recBuffer;
+ _recBuffer = NULL;
+ }
+ if (_playBuffer) {
+ delete[] _playBuffer;
+ _playBuffer = NULL;
+ }
+ if (_playDeviceName) {
+ delete[] _playDeviceName;
+ _playDeviceName = NULL;
+ }
+ if (_recDeviceName) {
+ delete[] _recDeviceName;
+ _recDeviceName = NULL;
+ }
+}
+
+void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+// ----------------------------------------------------------------------------
+// ActiveAudioLayer
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kLinuxPulseAudio;
+ return 0;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+
+ // Initialize PulseAudio
+ if (InitPulseAudio() < 0) {
+ RTC_LOG(LS_ERROR) << "failed to initialize PulseAudio";
+ if (TerminatePulseAudio() < 0) {
+ RTC_LOG(LS_ERROR) << "failed to terminate PulseAudio";
+ }
+ return InitStatus::OTHER_ERROR;
+ }
+
+#if defined(WEBRTC_USE_X11)
+ // Get X display handle for typing detection
+ _XDisplay = XOpenDisplay(NULL);
+ if (!_XDisplay) {
+ RTC_LOG(LS_WARNING)
+ << "failed to open X display, typing detection will not work";
+ }
+#endif
+
+ // RECORDING
+ const auto attributes =
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
+ _ptrThreadRec = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (RecThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_rec_thread", attributes);
+
+ // PLAYOUT
+ _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (PlayThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_play_thread", attributes);
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceLinuxPulse::Terminate() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!_initialized) {
+ return 0;
+ }
+ {
+ MutexLock lock(&mutex_);
+ quit_ = true;
+ }
+ _mixerManager.Close();
+
+ // RECORDING
+ _timeEventRec.Set();
+ _ptrThreadRec.Finalize();
+
+ // PLAYOUT
+ _timeEventPlay.Set();
+ _ptrThreadPlay.Finalize();
+
+ // Terminate PulseAudio
+ if (TerminatePulseAudio() < 0) {
+ RTC_LOG(LS_ERROR) << "failed to terminate PulseAudio";
+ return -1;
+ }
+
+#if defined(WEBRTC_USE_X11)
+ if (_XDisplay) {
+ XCloseDisplay(_XDisplay);
+ _XDisplay = NULL;
+ }
+#endif
+
+ _initialized = false;
+ _outputDeviceIsSpecified = false;
+ _inputDeviceIsSpecified = false;
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::Initialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_initialized);
+}
+
+int32_t AudioDeviceLinuxPulse::InitSpeaker() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ // check if default device
+ if (_outputDeviceIndex == 0) {
+ uint16_t deviceIndex = 0;
+ GetDefaultDeviceInfo(false, NULL, deviceIndex);
+ _paDeviceIndex = deviceIndex;
+ } else {
+ // get the PA device index from
+ // the callback
+ _deviceIndex = _outputDeviceIndex;
+
+ // get playout devices
+ PlayoutDevices();
+ }
+
+ // the callback has now set the _paDeviceIndex to
+ // the PulseAudio index of the device
+ if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) {
+ return -1;
+ }
+
+ // clear _deviceIndex
+ _deviceIndex = -1;
+ _paDeviceIndex = -1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitMicrophone() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ // Check if default device
+ if (_inputDeviceIndex == 0) {
+ uint16_t deviceIndex = 0;
+ GetDefaultDeviceInfo(true, NULL, deviceIndex);
+ _paDeviceIndex = deviceIndex;
+ } else {
+ // Get the PA device index from
+ // the callback
+ _deviceIndex = _inputDeviceIndex;
+
+ // get recording devices
+ RecordingDevices();
+ }
+
+ // The callback has now set the _paDeviceIndex to
+ // the PulseAudio index of the device
+ if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) {
+ return -1;
+ }
+
+ // Clear _deviceIndex
+ _deviceIndex = -1;
+ _paDeviceIndex = -1;
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitSpeaker was successful, we know volume control exists.
+ available = true;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!_playing) {
+ // Only update the volume if it's been set while we weren't playing.
+ update_speaker_volume_at_startup_ = true;
+ }
+ return (_mixerManager.SetSpeakerVolume(volume));
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ uint32_t level(0);
+
+ if (_mixerManager.SpeakerVolume(level) == -1) {
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected speaker has a mute control
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+ available = isAvailable;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.SetSpeakerMute(enable));
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool muted(0);
+ if (_mixerManager.SpeakerMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no
+ // volume control, hence it is safe to state that there is no
+ // boost control already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone has a mute control
+ //
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.SetMicrophoneMute(enable));
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool muted(0);
+ if (_mixerManager.MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recChannels == 2 && _recording) {
+ available = true;
+ return 0;
+ }
+
+ available = false;
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+ int error = 0;
+
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // Cannot open the specified device
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone can record stereo.
+ bool isAvailable(false);
+ error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
+ if (!error)
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return error;
+}
+
+int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (enable)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_playChannels == 2 && _playing) {
+ available = true;
+ return 0;
+ }
+
+ available = false;
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+ int error = 0;
+
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // Cannot open the specified device.
+ return -1;
+ }
+
+ // Check if the selected speaker can play stereo.
+ bool isAvailable(false);
+ error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
+ if (!error)
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return error;
+}
+
+int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (enable)
+ _playChannels = 2;
+ else
+ _playChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no
+ // volume control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitMicrophone was successful, we know that a volume control
+ // exists.
+ available = true;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
+ return (_mixerManager.SetMicrophoneVolume(volume));
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.MicrophoneVolume(level) == -1) {
+ RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level";
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int16_t AudioDeviceLinuxPulse::PlayoutDevices() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+ _numPlayDevices = 1; // init to 1 to account for "default"
+
+ // get the whole list of devices and update _numPlayDevices
+ paOperation =
+ LATE(pa_context_get_sink_info_list)(_paContext, PaSinkInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ return _numPlayDevices;
+}
+
+int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ const uint16_t nDevices = PlayoutDevices();
+
+ RTC_LOG(LS_VERBOSE) << "number of availiable output devices is " << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _outputDeviceIndex = index;
+ _outputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ const uint16_t nDevices = PlayoutDevices();
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ // Check if default device
+ if (index == 0) {
+ uint16_t deviceIndex = 0;
+ return GetDefaultDeviceInfo(false, name, deviceIndex);
+ }
+
+ // Tell the callback that we want
+ // The name for this device
+ _playDisplayDeviceName = name;
+ _deviceIndex = index;
+
+ // get playout devices
+ PlayoutDevices();
+
+ // clear device name and index
+ _playDisplayDeviceName = NULL;
+ _deviceIndex = -1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ const uint16_t nDevices(RecordingDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ // Check if default device
+ if (index == 0) {
+ uint16_t deviceIndex = 0;
+ return GetDefaultDeviceInfo(true, name, deviceIndex);
+ }
+
+ // Tell the callback that we want
+ // the name for this device
+ _recDisplayDeviceName = name;
+ _deviceIndex = index;
+
+ // Get recording devices
+ RecordingDevices();
+
+ // Clear device name and index
+ _recDisplayDeviceName = NULL;
+ _deviceIndex = -1;
+
+ return 0;
+}
+
+int16_t AudioDeviceLinuxPulse::RecordingDevices() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+ _numRecDevices = 1; // Init to 1 to account for "default"
+
+ // Get the whole list of devices and update _numRecDevices
+ paOperation = LATE(pa_context_get_source_info_list)(
+ _paContext, PaSourceInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ return _numRecDevices;
+}
+
+int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ const uint16_t nDevices(RecordingDevices());
+
+ RTC_LOG(LS_VERBOSE) << "number of availiable input devices is " << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _inputDeviceIndex = index;
+ _inputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ available = false;
+
+ // Try to initialize the playout side
+ int32_t res = InitPlayout();
+
+ // Cancel effect of initialization
+ StopPlayout();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ available = false;
+
+ // Try to initialize the playout side
+ int32_t res = InitRecording();
+
+ // Cancel effect of initialization
+ StopRecording();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxPulse::InitPlayout() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeaker() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ // Set the play sample specification
+ pa_sample_spec playSampleSpec;
+ playSampleSpec.channels = _playChannels;
+ playSampleSpec.format = PA_SAMPLE_S16LE;
+ playSampleSpec.rate = sample_rate_hz_;
+
+ // Create a new play stream
+ {
+ MutexLock lock(&mutex_);
+ _playStream =
+ LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL);
+ }
+
+ if (!_playStream) {
+ RTC_LOG(LS_ERROR) << "failed to create play stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ // Provide the playStream to the mixer
+ _mixerManager.SetPlayStream(_playStream);
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "stream state "
+ << LATE(pa_stream_get_state)(_playStream);
+
+ // Set stream flags
+ _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
+ PA_STREAM_INTERPOLATE_TIMING);
+
+ if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
+ // If configuring a specific latency then we want to specify
+ // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
+ // automatically to reach that target latency. However, that flag
+ // doesn't exist in Ubuntu 8.04 and many people still use that,
+ // so we have to check the protocol version of libpulse.
+ if (LATE(pa_context_get_protocol_version)(_paContext) >=
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
+ _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
+ }
+
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
+ return -1;
+ }
+
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
+ uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ // Set the play buffer attributes
+ _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
+ _playBufferAttr.tlength = latency; // target fill level of play buffer
+ // minimum free num bytes before server request more data
+ _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
+ // prebuffer tlength before starting playout
+ _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
+
+ _configuredLatencyPlay = latency;
+ }
+
+ // num samples in bytes * num channels
+ _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
+ _playbackBufferUnused = _playbackBufferSize;
+ _playBuffer = new int8_t[_playbackBufferSize];
+
+ // Enable underflow callback
+ LATE(pa_stream_set_underflow_callback)
+ (_playStream, PaStreamUnderflowCallback, this);
+
+ // Set the state callback function for the stream
+ LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this);
+
+ // Mark playout side as initialized
+ {
+ MutexLock lock(&mutex_);
+ _playIsInitialized = true;
+ _sndCardPlayDelay = 0;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitRecording() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophone() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ // Set the rec sample specification
+ pa_sample_spec recSampleSpec;
+ recSampleSpec.channels = _recChannels;
+ recSampleSpec.format = PA_SAMPLE_S16LE;
+ recSampleSpec.rate = sample_rate_hz_;
+
+ // Create a new rec stream
+ _recStream =
+ LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL);
+ if (!_recStream) {
+ RTC_LOG(LS_ERROR) << "failed to create rec stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ // Provide the recStream to the mixer
+ _mixerManager.SetRecStream(_recStream);
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ }
+
+ if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
+ _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
+ PA_STREAM_INTERPOLATE_TIMING);
+
+ // If configuring a specific latency then we want to specify
+ // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
+ // automatically to reach that target latency. However, that flag
+ // doesn't exist in Ubuntu 8.04 and many people still use that,
+ // so we have to check the protocol version of libpulse.
+ if (LATE(pa_context_get_protocol_version)(_paContext) >=
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
+ _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
+ }
+
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec(rec)";
+ return -1;
+ }
+
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
+ uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ // Set the rec buffer attributes
+ // Note: fragsize specifies a maximum transfer size, not a minimum, so
+ // it is not possible to force a high latency setting, only a low one.
+ _recBufferAttr.fragsize = latency; // size of fragment
+ _recBufferAttr.maxlength =
+ latency + bytesPerSec * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ _configuredLatencyRec = latency;
+ }
+
+ _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
+ _recordBufferUsed = 0;
+ _recBuffer = new int8_t[_recordBufferSize];
+
+ // Enable overflow callback
+ LATE(pa_stream_set_overflow_callback)
+ (_recStream, PaStreamOverflowCallback, this);
+
+ // Set the state callback function for the stream
+ LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this);
+
+ // Mark recording side as initialized
+ _recIsInitialized = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StartRecording() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ // Set state to ensure that the recording starts from the audio thread.
+ _startRec = true;
+
+ // The audio thread will signal when recording has started.
+ _timeEventRec.Set();
+ if (!_recStartEvent.Wait(10000)) {
+ {
+ MutexLock lock(&mutex_);
+ _startRec = false;
+ }
+ StopRecording();
+ RTC_LOG(LS_ERROR) << "failed to activate recording";
+ return -1;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ if (_recording) {
+ // The recording state is set by the audio thread after recording
+ // has started.
+ } else {
+ RTC_LOG(LS_ERROR) << "failed to activate recording";
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StopRecording() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ MutexLock lock(&mutex_);
+
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ if (_recStream == NULL) {
+ return -1;
+ }
+
+ _recIsInitialized = false;
+ _recording = false;
+
+ RTC_LOG(LS_VERBOSE) << "stopping recording";
+
+ // Stop Recording
+ PaLock();
+
+ DisableReadCallback();
+ LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
+
+ // Unset this here so that we don't get a TERMINATED callback
+ LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
+
+ if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) {
+ // Disconnect the stream
+ if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to disconnect rec stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ PaUnLock();
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "disconnected recording";
+ }
+
+ LATE(pa_stream_unref)(_recStream);
+ _recStream = NULL;
+
+ PaUnLock();
+
+ // Provide the recStream to the mixer
+ _mixerManager.SetRecStream(_recStream);
+
+ if (_recBuffer) {
+ delete[] _recBuffer;
+ _recBuffer = NULL;
+ }
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::RecordingIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_recIsInitialized);
+}
+
+bool AudioDeviceLinuxPulse::Recording() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_recording);
+}
+
+bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_playIsInitialized);
+}
+
+int32_t AudioDeviceLinuxPulse::StartPlayout() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ // Set state to ensure that playout starts from the audio thread.
+ {
+ MutexLock lock(&mutex_);
+ _startPlay = true;
+ }
+
+ // Both `_startPlay` and `_playing` needs protction since they are also
+ // accessed on the playout thread.
+
+ // The audio thread will signal when playout has started.
+ _timeEventPlay.Set();
+ if (!_playStartEvent.Wait(10000)) {
+ {
+ MutexLock lock(&mutex_);
+ _startPlay = false;
+ }
+ StopPlayout();
+ RTC_LOG(LS_ERROR) << "failed to activate playout";
+ return -1;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ if (_playing) {
+ // The playing state is set by the audio thread after playout
+ // has started.
+ } else {
+ RTC_LOG(LS_ERROR) << "failed to activate playing";
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StopPlayout() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ MutexLock lock(&mutex_);
+
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ if (_playStream == NULL) {
+ return -1;
+ }
+
+ _playIsInitialized = false;
+ _playing = false;
+ _sndCardPlayDelay = 0;
+
+ RTC_LOG(LS_VERBOSE) << "stopping playback";
+
+ // Stop Playout
+ PaLock();
+
+ DisableWriteCallback();
+ LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
+
+ // Unset this here so that we don't get a TERMINATED callback
+ LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
+
+ if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) {
+ // Disconnect the stream
+ if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to disconnect play stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ PaUnLock();
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "disconnected playback";
+ }
+
+ LATE(pa_stream_unref)(_playStream);
+ _playStream = NULL;
+
+ PaUnLock();
+
+ // Provide the playStream to the mixer
+ _mixerManager.SetPlayStream(_playStream);
+
+ if (_playBuffer) {
+ delete[] _playBuffer;
+ _playBuffer = NULL;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const {
+ MutexLock lock(&mutex_);
+ delayMS = (uint16_t)_sndCardPlayDelay;
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::Playing() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context* c, void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaContextStateCallbackHandler(c);
+}
+
+// ----------------------------------------------------------------------------
+// PaSinkInfoCallback
+// ----------------------------------------------------------------------------
+
+void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(i, eol);
+}
+
+void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
+ const pa_source_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(i,
+ eol);
+}
+
+void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context* /*c*/,
+ const pa_server_info* i,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i);
+}
+
+void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p);
+}
+
+void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) {
+ RTC_LOG(LS_VERBOSE) << "context state cb";
+
+ pa_context_state_t state = LATE(pa_context_get_state)(c);
+ switch (state) {
+ case PA_CONTEXT_UNCONNECTED:
+ RTC_LOG(LS_VERBOSE) << "unconnected";
+ break;
+ case PA_CONTEXT_CONNECTING:
+ case PA_CONTEXT_AUTHORIZING:
+ case PA_CONTEXT_SETTING_NAME:
+ RTC_LOG(LS_VERBOSE) << "no state";
+ break;
+ case PA_CONTEXT_FAILED:
+ case PA_CONTEXT_TERMINATED:
+ RTC_LOG(LS_VERBOSE) << "failed";
+ _paStateChanged = true;
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ break;
+ case PA_CONTEXT_READY:
+ RTC_LOG(LS_VERBOSE) << "ready";
+ _paStateChanged = true;
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ break;
+ }
+}
+
+void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ if (_numPlayDevices == _deviceIndex) {
+ // Convert the device index to the one of the sink
+ _paDeviceIndex = i->index;
+
+ if (_playDeviceName) {
+ // Copy the sink name
+ strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
+ _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ if (_playDisplayDeviceName) {
+ // Copy the sink display name
+ strncpy(_playDisplayDeviceName, i->description, kAdmMaxDeviceNameSize);
+ _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ }
+
+ _numPlayDevices++;
+}
+
+void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(const pa_source_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ // We don't want to list output devices
+ if (i->monitor_of_sink == PA_INVALID_INDEX) {
+ if (_numRecDevices == _deviceIndex) {
+ // Convert the device index to the one of the source
+ _paDeviceIndex = i->index;
+
+ if (_recDeviceName) {
+ // copy the source name
+ strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
+ _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ if (_recDisplayDeviceName) {
+ // Copy the source display name
+ strncpy(_recDisplayDeviceName, i->description, kAdmMaxDeviceNameSize);
+ _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ }
+
+ _numRecDevices++;
+ }
+}
+
+void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(
+ const pa_server_info* i) {
+ // Use PA native sampling rate
+ sample_rate_hz_ = i->sample_spec.rate;
+
+ // Copy the PA server version
+ strncpy(_paServerVersion, i->server_version, 31);
+ _paServerVersion[31] = '\0';
+
+ if (_recDisplayDeviceName) {
+ // Copy the source name
+ strncpy(_recDisplayDeviceName, i->default_source_name,
+ kAdmMaxDeviceNameSize);
+ _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+
+ if (_playDisplayDeviceName) {
+ // Copy the sink name
+ strncpy(_playDisplayDeviceName, i->default_sink_name,
+ kAdmMaxDeviceNameSize);
+ _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+}
+
+void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) {
+ RTC_LOG(LS_VERBOSE) << "stream state cb";
+
+ pa_stream_state_t state = LATE(pa_stream_get_state)(p);
+ switch (state) {
+ case PA_STREAM_UNCONNECTED:
+ RTC_LOG(LS_VERBOSE) << "unconnected";
+ break;
+ case PA_STREAM_CREATING:
+ RTC_LOG(LS_VERBOSE) << "creating";
+ break;
+ case PA_STREAM_FAILED:
+ case PA_STREAM_TERMINATED:
+ RTC_LOG(LS_VERBOSE) << "failed";
+ break;
+ case PA_STREAM_READY:
+ RTC_LOG(LS_VERBOSE) << "ready";
+ break;
+ }
+
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+}
+
+int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+
+ // get the server info and update deviceName
+ paOperation =
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ RTC_LOG(LS_VERBOSE) << "checking PulseAudio version: " << _paServerVersion;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+
+ // Get the server info and update sample_rate_hz_
+ paOperation =
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
+ char* name,
+ uint16_t& index) {
+ char tmpName[kAdmMaxDeviceNameSize] = {0};
+ // subtract length of "default: "
+ uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
+ char* pName = NULL;
+
+ if (name) {
+ // Add "default: "
+ strcpy(name, "default: ");
+ pName = &name[9];
+ }
+
+ // Tell the callback that we want
+ // the name for this device
+ if (recDevice) {
+ _recDisplayDeviceName = tmpName;
+ } else {
+ _playDisplayDeviceName = tmpName;
+ }
+
+ // Set members
+ _paDeviceIndex = -1;
+ _deviceIndex = 0;
+ _numPlayDevices = 0;
+ _numRecDevices = 0;
+
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+
+ // Get the server info and update deviceName
+ paOperation =
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ // Get the device index
+ if (recDevice) {
+ paOperation = LATE(pa_context_get_source_info_by_name)(
+ _paContext, (char*)tmpName, PaSourceInfoCallback, this);
+ } else {
+ paOperation = LATE(pa_context_get_sink_info_by_name)(
+ _paContext, (char*)tmpName, PaSinkInfoCallback, this);
+ }
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ // Set the index
+ index = _paDeviceIndex;
+
+ if (name) {
+ // Copy to name string
+ strncpy(pName, tmpName, nameLen);
+ }
+
+ // Clear members
+ _playDisplayDeviceName = NULL;
+ _recDisplayDeviceName = NULL;
+ _paDeviceIndex = -1;
+ _deviceIndex = -1;
+ _numPlayDevices = 0;
+ _numRecDevices = 0;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitPulseAudio() {
+ int retVal = 0;
+
+ // Load libpulse
+ if (!GetPulseSymbolTable()->Load()) {
+ // Most likely the Pulse library and sound server are not installed on
+ // this system
+ RTC_LOG(LS_ERROR) << "failed to load symbol table";
+ return -1;
+ }
+
+ // Create a mainloop API and connection to the default server
+ // the mainloop is the internal asynchronous API event loop
+ if (_paMainloop) {
+ RTC_LOG(LS_ERROR) << "PA mainloop has already existed";
+ return -1;
+ }
+ _paMainloop = LATE(pa_threaded_mainloop_new)();
+ if (!_paMainloop) {
+ RTC_LOG(LS_ERROR) << "could not create mainloop";
+ return -1;
+ }
+
+ // Start the threaded main loop
+ retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
+ if (retVal != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to start main loop, error=" << retVal;
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "mainloop running!";
+
+ PaLock();
+
+ _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
+ if (!_paMainloopApi) {
+ RTC_LOG(LS_ERROR) << "could not create mainloop API";
+ PaUnLock();
+ return -1;
+ }
+
+ // Create a new PulseAudio context
+ if (_paContext) {
+ RTC_LOG(LS_ERROR) << "PA context has already existed";
+ PaUnLock();
+ return -1;
+ }
+ _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
+
+ if (!_paContext) {
+ RTC_LOG(LS_ERROR) << "could not create context";
+ PaUnLock();
+ return -1;
+ }
+
+ // Set state callback function
+ LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this);
+
+ // Connect the context to a server (default)
+ _paStateChanged = false;
+ retVal =
+ LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL);
+
+ if (retVal != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to connect context, error=" << retVal;
+ PaUnLock();
+ return -1;
+ }
+
+ // Wait for state change
+ while (!_paStateChanged) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ // Now check to see what final state we reached.
+ pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
+
+ if (state != PA_CONTEXT_READY) {
+ if (state == PA_CONTEXT_FAILED) {
+ RTC_LOG(LS_ERROR) << "failed to connect to PulseAudio sound server";
+ } else if (state == PA_CONTEXT_TERMINATED) {
+ RTC_LOG(LS_ERROR) << "PulseAudio connection terminated early";
+ } else {
+ // Shouldn't happen, because we only signal on one of those three
+ // states
+ RTC_LOG(LS_ERROR) << "unknown problem connecting to PulseAudio";
+ }
+ PaUnLock();
+ return -1;
+ }
+
+ PaUnLock();
+
+ // Give the objects to the mixer manager
+ _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
+
+ // Check the version
+ if (CheckPulseAudioVersion() < 0) {
+ RTC_LOG(LS_ERROR) << "PulseAudio version " << _paServerVersion
+ << " not supported";
+ return -1;
+ }
+
+ // Initialize sampling frequency
+ if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) {
+ RTC_LOG(LS_ERROR) << "failed to initialize sampling frequency, set to "
+ << sample_rate_hz_ << " Hz";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() {
+ // Do nothing if the instance doesn't exist
+ // likely GetPulseSymbolTable.Load() fails
+ if (!_paMainloop) {
+ return 0;
+ }
+
+ PaLock();
+
+ // Disconnect the context
+ if (_paContext) {
+ LATE(pa_context_disconnect)(_paContext);
+ }
+
+ // Unreference the context
+ if (_paContext) {
+ LATE(pa_context_unref)(_paContext);
+ }
+
+ PaUnLock();
+ _paContext = NULL;
+
+ // Stop the threaded main loop
+ if (_paMainloop) {
+ LATE(pa_threaded_mainloop_stop)(_paMainloop);
+ }
+
+ // Free the mainloop
+ if (_paMainloop) {
+ LATE(pa_threaded_mainloop_free)(_paMainloop);
+ }
+
+ _paMainloop = NULL;
+
+ RTC_LOG(LS_VERBOSE) << "PulseAudio terminated";
+
+ return 0;
+}
+
+void AudioDeviceLinuxPulse::PaLock() {
+ LATE(pa_threaded_mainloop_lock)(_paMainloop);
+}
+
+void AudioDeviceLinuxPulse::PaUnLock() {
+ LATE(pa_threaded_mainloop_unlock)(_paMainloop);
+}
+
+void AudioDeviceLinuxPulse::WaitForOperationCompletion(
+ pa_operation* paOperation) const {
+ if (!paOperation) {
+ RTC_LOG(LS_ERROR) << "paOperation NULL in WaitForOperationCompletion";
+ return;
+ }
+
+ while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ LATE(pa_operation_unref)(paOperation);
+}
+
+// ============================================================================
+// Thread Methods
+// ============================================================================
+
+void AudioDeviceLinuxPulse::EnableWriteCallback() {
+ if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) {
+ // May already have available space. Must check.
+ _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
+ if (_tempBufferSpace > 0) {
+ // Yup, there is already space available, so if we register a
+ // write callback then it will not receive any event. So dispatch
+ // one ourself instead.
+ _timeEventPlay.Set();
+ return;
+ }
+ }
+
+ LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, this);
+}
+
+void AudioDeviceLinuxPulse::DisableWriteCallback() {
+ LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
+}
+
+void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream* /*unused*/,
+ size_t buffer_space,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamWriteCallbackHandler(
+ buffer_space);
+}
+
+void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) {
+ _tempBufferSpace = bufferSpace;
+
+ // Since we write the data asynchronously on a different thread, we have
+ // to temporarily disable the write callback or else Pulse will call it
+ // continuously until we write the data. We re-enable it below.
+ DisableWriteCallback();
+ _timeEventPlay.Set();
+}
+
+void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)
+ ->PaStreamUnderflowCallbackHandler();
+}
+
+void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() {
+ RTC_LOG(LS_WARNING) << "Playout underflow";
+
+ if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
+ // We didn't configure a pa_buffer_attr before, so switching to
+ // one now would be questionable.
+ return;
+ }
+
+ // Otherwise reconfigure the stream with a higher target latency.
+
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
+ return;
+ }
+
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
+ uint32_t newLatency =
+ _configuredLatencyPlay + bytesPerSec *
+ WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ // Set the play buffer attributes
+ _playBufferAttr.maxlength = newLatency;
+ _playBufferAttr.tlength = newLatency;
+ _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
+ _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
+
+ pa_operation* op = LATE(pa_stream_set_buffer_attr)(
+ _playStream, &_playBufferAttr, NULL, NULL);
+ if (!op) {
+ RTC_LOG(LS_ERROR) << "pa_stream_set_buffer_attr()";
+ return;
+ }
+
+ // Don't need to wait for this to complete.
+ LATE(pa_operation_unref)(op);
+
+ // Save the new latency in case we underflow again.
+ _configuredLatencyPlay = newLatency;
+}
+
+void AudioDeviceLinuxPulse::EnableReadCallback() {
+ LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
+}
+
+void AudioDeviceLinuxPulse::DisableReadCallback() {
+ LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
+}
+
+void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/,
+ size_t /*unused2*/,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler();
+}
+
+void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() {
+ // We get the data pointer and size now in order to save one Lock/Unlock
+ // in the worker thread.
+ if (LATE(pa_stream_peek)(_recStream, &_tempSampleData,
+ &_tempSampleDataSize) != 0) {
+ RTC_LOG(LS_ERROR) << "Can't read data!";
+ return;
+ }
+
+ // Since we consume the data asynchronously on a different thread, we have
+ // to temporarily disable the read callback or else Pulse will call it
+ // continuously until we consume the data. We re-enable it below.
+ DisableReadCallback();
+ _timeEventRec.Set();
+}
+
+void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler();
+}
+
+void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() {
+ RTC_LOG(LS_WARNING) << "Recording overflow";
+}
+
+int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) {
+ if (!WEBRTC_PA_REPORT_LATENCY) {
+ return 0;
+ }
+
+ if (!stream) {
+ return 0;
+ }
+
+ pa_usec_t latency;
+ int negative;
+ if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) {
+ RTC_LOG(LS_ERROR) << "Can't query latency";
+ // We'd rather continue playout/capture with an incorrect delay than
+ // stop it altogether, so return a valid value.
+ return 0;
+ }
+
+ if (negative) {
+ RTC_LOG(LS_VERBOSE)
+ << "warning: pa_stream_get_latency reported negative delay";
+
+ // The delay can be negative for monitoring streams if the captured
+ // samples haven't been played yet. In such a case, "latency"
+ // contains the magnitude, so we must negate it to get the real value.
+ int32_t tmpLatency = (int32_t)-latency;
+ if (tmpLatency < 0) {
+ // Make sure that we don't use a negative delay.
+ tmpLatency = 0;
+ }
+
+ return tmpLatency;
+ } else {
+ return (int32_t)latency;
+ }
+}
+
+int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData,
+ size_t bufferSize)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
+ size_t size = bufferSize;
+ uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
+
+ // Account for the peeked data and the used data.
+ uint32_t recDelay =
+ (uint32_t)((LatencyUsecs(_recStream) / 1000) +
+ 10 * ((size + _recordBufferUsed) / _recordBufferSize));
+
+ if (_playStream) {
+ // Get the playout delay.
+ _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000);
+ }
+
+ if (_recordBufferUsed > 0) {
+ // Have to copy to the buffer until it is full.
+ size_t copy = _recordBufferSize - _recordBufferUsed;
+ if (size < copy) {
+ copy = size;
+ }
+
+ memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
+ _recordBufferUsed += copy;
+ bufferData = static_cast<const char*>(bufferData) + copy;
+ size -= copy;
+
+ if (_recordBufferUsed != _recordBufferSize) {
+ // Not enough data yet to pass to VoE.
+ return 0;
+ }
+
+ // Provide data to VoiceEngine.
+ if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) {
+ // We have stopped recording.
+ return -1;
+ }
+
+ _recordBufferUsed = 0;
+ }
+
+ // Now process full 10ms sample sets directly from the input.
+ while (size >= _recordBufferSize) {
+ // Provide data to VoiceEngine.
+ if (ProcessRecordedData(static_cast<int8_t*>(const_cast<void*>(bufferData)),
+ numRecSamples, recDelay) == -1) {
+ // We have stopped recording.
+ return -1;
+ }
+
+ bufferData = static_cast<const char*>(bufferData) + _recordBufferSize;
+ size -= _recordBufferSize;
+
+ // We have consumed 10ms of data.
+ recDelay -= 10;
+ }
+
+ // Now save any leftovers for later.
+ if (size > 0) {
+ memcpy(_recBuffer, bufferData, size);
+ _recordBufferUsed = size;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::ProcessRecordedData(int8_t* bufferData,
+ uint32_t bufferSizeInSamples,
+ uint32_t recDelay)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
+ _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
+
+ // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
+ // near-end signals at the AEC for PulseAudio. I think the system delay is
+ // being correctly calculated here, but for legacy reasons we add +10 ms
+ // to the value in the AEC. The real fix will be part of a larger
+ // investigation into managing system delay in the AEC.
+ if (recDelay > 10)
+ recDelay -= 10;
+ else
+ recDelay = 0;
+ _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay);
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+ // Deliver recorded samples at specified sample rate,
+ // mic level etc. to the observer using callback.
+ UnLock();
+ _ptrAudioBuffer->DeliverRecordedData();
+ Lock();
+
+ // We have been unlocked - check the flag again.
+ if (!_recording) {
+ return -1;
+ }
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::PlayThreadProcess() {
+ if (!_timeEventPlay.Wait(1000)) {
+ return true;
+ }
+
+ MutexLock lock(&mutex_);
+
+ if (quit_) {
+ return false;
+ }
+
+ if (_startPlay) {
+ RTC_LOG(LS_VERBOSE) << "_startPlay true, performing initial actions";
+
+ _startPlay = false;
+ _playDeviceName = NULL;
+
+ // Set if not default device
+ if (_outputDeviceIndex > 0) {
+ // Get the playout device name
+ _playDeviceName = new char[kAdmMaxDeviceNameSize];
+ _deviceIndex = _outputDeviceIndex;
+ PlayoutDevices();
+ }
+
+ // Start muted only supported on 0.9.11 and up
+ if (LATE(pa_context_get_protocol_version)(_paContext) >=
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
+ // Get the currently saved speaker mute status
+ // and set the initial mute status accordingly
+ bool enabled(false);
+ _mixerManager.SpeakerMute(enabled);
+ if (enabled) {
+ _playStreamFlags |= PA_STREAM_START_MUTED;
+ }
+ }
+
+ // Get the currently saved speaker volume
+ uint32_t volume = 0;
+ if (update_speaker_volume_at_startup_)
+ _mixerManager.SpeakerVolume(volume);
+
+ PaLock();
+
+ // NULL gives PA the choice of startup volume.
+ pa_cvolume* ptr_cvolume = NULL;
+ if (update_speaker_volume_at_startup_) {
+ pa_cvolume cVolumes;
+ ptr_cvolume = &cVolumes;
+
+ // Set the same volume for all channels
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
+ LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
+ update_speaker_volume_at_startup_ = false;
+ }
+
+ // Connect the stream to a sink
+ if (LATE(pa_stream_connect_playback)(
+ _playStream, _playDeviceName, &_playBufferAttr,
+ (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to connect play stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "play stream connected";
+
+ // Wait for state change
+ while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "play stream ready";
+
+ // We can now handle write callbacks
+ EnableWriteCallback();
+
+ PaUnLock();
+
+ // Clear device name
+ if (_playDeviceName) {
+ delete[] _playDeviceName;
+ _playDeviceName = NULL;
+ }
+
+ _playing = true;
+ _playStartEvent.Set();
+
+ return true;
+ }
+
+ if (_playing) {
+ if (!_recording) {
+ // Update the playout delay
+ _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000);
+ }
+
+ if (_playbackBufferUnused < _playbackBufferSize) {
+ size_t write = _playbackBufferSize - _playbackBufferUnused;
+ if (_tempBufferSpace < write) {
+ write = _tempBufferSpace;
+ }
+
+ PaLock();
+ if (LATE(pa_stream_write)(
+ _playStream, (void*)&_playBuffer[_playbackBufferUnused], write,
+ NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
+ _writeErrors++;
+ if (_writeErrors > 10) {
+ RTC_LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
+ << ", error=" << LATE(pa_context_errno)(_paContext);
+ _writeErrors = 0;
+ }
+ }
+ PaUnLock();
+
+ _playbackBufferUnused += write;
+ _tempBufferSpace -= write;
+ }
+
+ uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
+ // Might have been reduced to zero by the above.
+ if (_tempBufferSpace > 0) {
+ // Ask for new PCM data to be played out using the
+ // AudioDeviceBuffer ensure that this callback is executed
+ // without taking the audio-thread lock.
+ UnLock();
+ RTC_LOG(LS_VERBOSE) << "requesting data";
+ uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
+ Lock();
+
+ // We have been unlocked - check the flag again.
+ if (!_playing) {
+ return true;
+ }
+
+ nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
+ if (nSamples != numPlaySamples) {
+ RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples
+ << ")";
+ }
+
+ size_t write = _playbackBufferSize;
+ if (_tempBufferSpace < write) {
+ write = _tempBufferSpace;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "will write";
+ PaLock();
+ if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write,
+ NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
+ _writeErrors++;
+ if (_writeErrors > 10) {
+ RTC_LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
+ << ", error=" << LATE(pa_context_errno)(_paContext);
+ _writeErrors = 0;
+ }
+ }
+ PaUnLock();
+
+ _playbackBufferUnused = write;
+ }
+
+ _tempBufferSpace = 0;
+ PaLock();
+ EnableWriteCallback();
+ PaUnLock();
+
+ } // _playing
+
+ return true;
+}
+
+bool AudioDeviceLinuxPulse::RecThreadProcess() {
+ if (!_timeEventRec.Wait(1000)) {
+ return true;
+ }
+
+ MutexLock lock(&mutex_);
+ if (quit_) {
+ return false;
+ }
+ if (_startRec) {
+ RTC_LOG(LS_VERBOSE) << "_startRec true, performing initial actions";
+
+ _recDeviceName = NULL;
+
+ // Set if not default device
+ if (_inputDeviceIndex > 0) {
+ // Get the recording device name
+ _recDeviceName = new char[kAdmMaxDeviceNameSize];
+ _deviceIndex = _inputDeviceIndex;
+ RecordingDevices();
+ }
+
+ PaLock();
+
+ RTC_LOG(LS_VERBOSE) << "connecting stream";
+
+ // Connect the stream to a source
+ if (LATE(pa_stream_connect_record)(
+ _recStream, _recDeviceName, &_recBufferAttr,
+ (pa_stream_flags_t)_recStreamFlags) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to connect rec stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "connected";
+
+ // Wait for state change
+ while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "done";
+
+ // We can now handle read callbacks
+ EnableReadCallback();
+
+ PaUnLock();
+
+ // Clear device name
+ if (_recDeviceName) {
+ delete[] _recDeviceName;
+ _recDeviceName = NULL;
+ }
+
+ _startRec = false;
+ _recording = true;
+ _recStartEvent.Set();
+
+ return true;
+ }
+
+ if (_recording) {
+ // Read data and provide it to VoiceEngine
+ if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) {
+ return true;
+ }
+
+ _tempSampleData = NULL;
+ _tempSampleDataSize = 0;
+
+ PaLock();
+ while (true) {
+ // Ack the last thing we read
+ if (LATE(pa_stream_drop)(_recStream) != 0) {
+ RTC_LOG(LS_WARNING)
+ << "failed to drop, err=" << LATE(pa_context_errno)(_paContext);
+ }
+
+ if (LATE(pa_stream_readable_size)(_recStream) <= 0) {
+ // Then that was all the data
+ break;
+ }
+
+ // Else more data.
+ const void* sampleData;
+ size_t sampleDataSize;
+
+ if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) {
+ RTC_LOG(LS_ERROR) << "RECORD_ERROR, error = "
+ << LATE(pa_context_errno)(_paContext);
+ break;
+ }
+
+ // Drop lock for sigslot dispatch, which could take a while.
+ PaUnLock();
+ // Read data and provide it to VoiceEngine
+ if (ReadRecordedData(sampleData, sampleDataSize) == -1) {
+ return true;
+ }
+ PaLock();
+
+ // Return to top of loop for the ack and the check for more data.
+ }
+
+ EnableReadCallback();
+ PaUnLock();
+
+ } // _recording
+
+ return true;
+}
+
+bool AudioDeviceLinuxPulse::KeyPressed() const {
+#if defined(WEBRTC_USE_X11)
+ char szKey[32];
+ unsigned int i = 0;
+ char state = 0;
+
+ if (!_XDisplay)
+ return false;
+
+ // Check key map status
+ XQueryKeymap(_XDisplay, szKey);
+
+ // A bit change in keymap means a key is pressed
+ for (i = 0; i < sizeof(szKey); i++)
+ state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
+
+ // Save old state
+ memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
+ return (state != 0);
+#else
+ return false;
+#endif
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h
new file mode 100644
index 0000000000..0cf89ef011
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
+#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
+#include "rtc_base/event.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+#if defined(WEBRTC_USE_X11)
+#include <X11/Xlib.h>
+#endif
+
+#include <pulse/pulseaudio.h>
+#include <stddef.h>
+#include <stdint.h>
+
+// We define this flag if it's missing from our headers, because we want to be
+// able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY
+// if run against a recent version of the library.
+#ifndef PA_STREAM_ADJUST_LATENCY
+#define PA_STREAM_ADJUST_LATENCY 0x2000U
+#endif
+#ifndef PA_STREAM_START_MUTED
+#define PA_STREAM_START_MUTED 0x1000U
+#endif
+
+// Set this constant to 0 to disable latency reading
+const uint32_t WEBRTC_PA_REPORT_LATENCY = 1;
+
+// Constants from implementation by Tristan Schmelcher [tschmelcher@google.com]
+
+// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY.
+const uint32_t WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION = 13;
+
+// Some timing constants for optimal operation. See
+// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html
+// for a good explanation of some of the factors that go into this.
+
+// Playback.
+
+// For playback, there is a round-trip delay to fill the server-side playback
+// buffer, so setting too low of a latency is a buffer underflow risk. We will
+// automatically increase the latency if a buffer underflow does occur, but we
+// also enforce a sane minimum at start-up time. Anything lower would be
+// virtually guaranteed to underflow at least once, so there's no point in
+// allowing lower latencies.
+const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS = 20;
+
+// Every time a playback stream underflows, we will reconfigure it with target
+// latency that is greater by this amount.
+const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS = 20;
+
+// We also need to configure a suitable request size. Too small and we'd burn
+// CPU from the overhead of transfering small amounts of data at once. Too large
+// and the amount of data remaining in the buffer right before refilling it
+// would be a buffer underflow risk. We set it to half of the buffer size.
+const uint32_t WEBRTC_PA_PLAYBACK_REQUEST_FACTOR = 2;
+
+// Capture.
+
+// For capture, low latency is not a buffer overflow risk, but it makes us burn
+// CPU from the overhead of transfering small amounts of data at once, so we set
+// a recommended value that we use for the kLowLatency constant (but if the user
+// explicitly requests something lower then we will honour it).
+// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%.
+const uint32_t WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS = 10;
+
+// There is a round-trip delay to ack the data to the server, so the
+// server-side buffer needs extra space to prevent buffer overflow. 20ms is
+// sufficient, but there is no penalty to making it bigger, so we make it huge.
+// (750ms is libpulse's default value for the _total_ buffer size in the
+// kNoLatencyRequirements case.)
+const uint32_t WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS = 750;
+
+const uint32_t WEBRTC_PA_MSECS_PER_SEC = 1000;
+
+// Init _configuredLatencyRec/Play to this value to disable latency requirements
+const int32_t WEBRTC_PA_NO_LATENCY_REQUIREMENTS = -1;
+
+// Set this const to 1 to account for peeked and used data in latency
+// calculation
+const uint32_t WEBRTC_PA_CAPTURE_BUFFER_LATENCY_ADJUSTMENT = 0;
+
+typedef webrtc::adm_linux_pulse::PulseAudioSymbolTable WebRTCPulseSymbolTable;
+WebRTCPulseSymbolTable* GetPulseSymbolTable();
+
+namespace webrtc {
+
+class AudioDeviceLinuxPulse : public AudioDeviceGeneric {
+ public:
+ AudioDeviceLinuxPulse();
+ virtual ~AudioDeviceLinuxPulse();
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+
+ // Main initializaton and termination
+ InitStatus Init() override;
+ int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Playing() const override;
+ int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t& delayMS) const
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+ private:
+ void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); }
+ void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); }
+ void WaitForOperationCompletion(pa_operation* paOperation) const;
+ void WaitForSuccess(pa_operation* paOperation) const;
+
+ bool KeyPressed() const;
+
+ static void PaContextStateCallback(pa_context* c, void* pThis);
+ static void PaSinkInfoCallback(pa_context* c,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis);
+ static void PaSourceInfoCallback(pa_context* c,
+ const pa_source_info* i,
+ int eol,
+ void* pThis);
+ static void PaServerInfoCallback(pa_context* c,
+ const pa_server_info* i,
+ void* pThis);
+ static void PaStreamStateCallback(pa_stream* p, void* pThis);
+ void PaContextStateCallbackHandler(pa_context* c);
+ void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol);
+ void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol);
+ void PaServerInfoCallbackHandler(const pa_server_info* i);
+ void PaStreamStateCallbackHandler(pa_stream* p);
+
+ void EnableWriteCallback();
+ void DisableWriteCallback();
+ static void PaStreamWriteCallback(pa_stream* unused,
+ size_t buffer_space,
+ void* pThis);
+ void PaStreamWriteCallbackHandler(size_t buffer_space);
+ static void PaStreamUnderflowCallback(pa_stream* unused, void* pThis);
+ void PaStreamUnderflowCallbackHandler();
+ void EnableReadCallback();
+ void DisableReadCallback();
+ static void PaStreamReadCallback(pa_stream* unused1,
+ size_t unused2,
+ void* pThis);
+ void PaStreamReadCallbackHandler();
+ static void PaStreamOverflowCallback(pa_stream* unused, void* pThis);
+ void PaStreamOverflowCallbackHandler();
+ int32_t LatencyUsecs(pa_stream* stream);
+ int32_t ReadRecordedData(const void* bufferData, size_t bufferSize);
+ int32_t ProcessRecordedData(int8_t* bufferData,
+ uint32_t bufferSizeInSamples,
+ uint32_t recDelay);
+
+ int32_t CheckPulseAudioVersion();
+ int32_t InitSamplingFrequency();
+ int32_t GetDefaultDeviceInfo(bool recDevice, char* name, uint16_t& index);
+ int32_t InitPulseAudio();
+ int32_t TerminatePulseAudio();
+
+ void PaLock();
+ void PaUnLock();
+
+ static void RecThreadFunc(void*);
+ static void PlayThreadFunc(void*);
+ bool RecThreadProcess() RTC_LOCKS_EXCLUDED(mutex_);
+ bool PlayThreadProcess() RTC_LOCKS_EXCLUDED(mutex_);
+
+ AudioDeviceBuffer* _ptrAudioBuffer;
+
+ mutable Mutex mutex_;
+ rtc::Event _timeEventRec;
+ rtc::Event _timeEventPlay;
+ rtc::Event _recStartEvent;
+ rtc::Event _playStartEvent;
+
+ rtc::PlatformThread _ptrThreadPlay;
+ rtc::PlatformThread _ptrThreadRec;
+
+ AudioMixerManagerLinuxPulse _mixerManager;
+
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+ bool _inputDeviceIsSpecified;
+ bool _outputDeviceIsSpecified;
+
+ int sample_rate_hz_;
+ uint8_t _recChannels;
+ uint8_t _playChannels;
+
+ // Stores thread ID in constructor.
+ // We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that
+ // other methods are called from the same thread.
+ // Currently only does RTC_DCHECK(thread_checker_.IsCurrent()).
+ SequenceChecker thread_checker_;
+
+ bool _initialized;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+ bool _startRec;
+ bool _startPlay;
+ bool update_speaker_volume_at_startup_;
+ bool quit_ RTC_GUARDED_BY(&mutex_);
+
+ uint32_t _sndCardPlayDelay RTC_GUARDED_BY(&mutex_);
+
+ int32_t _writeErrors;
+
+ uint16_t _deviceIndex;
+ int16_t _numPlayDevices;
+ int16_t _numRecDevices;
+ char* _playDeviceName;
+ char* _recDeviceName;
+ char* _playDisplayDeviceName;
+ char* _recDisplayDeviceName;
+ char _paServerVersion[32];
+
+ int8_t* _playBuffer;
+ size_t _playbackBufferSize;
+ size_t _playbackBufferUnused;
+ size_t _tempBufferSpace;
+ int8_t* _recBuffer;
+ size_t _recordBufferSize;
+ size_t _recordBufferUsed;
+ const void* _tempSampleData;
+ size_t _tempSampleDataSize;
+ int32_t _configuredLatencyPlay;
+ int32_t _configuredLatencyRec;
+
+ // PulseAudio
+ uint16_t _paDeviceIndex;
+ bool _paStateChanged;
+
+ pa_threaded_mainloop* _paMainloop;
+ pa_mainloop_api* _paMainloopApi;
+ pa_context* _paContext;
+
+ pa_stream* _recStream;
+ pa_stream* _playStream;
+ uint32_t _recStreamFlags;
+ uint32_t _playStreamFlags;
+ pa_buffer_attr _playBufferAttr;
+ pa_buffer_attr _recBufferAttr;
+
+ char _oldKeyState[32];
+#if defined(WEBRTC_USE_X11)
+ Display* _XDisplay;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
new file mode 100644
index 0000000000..e7e7033173
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
@@ -0,0 +1,979 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
+
+#include "modules/audio_device/linux/audio_device_alsa_linux.h"
+#include "rtc_base/logging.h"
+
+// Accesses ALSA functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libasound, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \
+ sym)
+
+namespace webrtc {
+
+AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA()
+ : _outputMixerHandle(NULL),
+ _inputMixerHandle(NULL),
+ _outputMixerElement(NULL),
+ _inputMixerElement(NULL) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+
+ memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
+ memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
+}
+
+AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+ Close();
+}
+
+// ============================================================================
+// PUBLIC METHODS
+// ============================================================================
+
+int32_t AudioMixerManagerLinuxALSA::Close() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ MutexLock lock(&mutex_);
+
+ CloseSpeakerLocked();
+ CloseMicrophoneLocked();
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseSpeaker() {
+ MutexLock lock(&mutex_);
+ return CloseSpeakerLocked();
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseSpeakerLocked() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ int errVal = 0;
+
+ if (_outputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing playout mixer";
+ LATE(snd_mixer_free)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+ _outputMixerHandle = NULL;
+ _outputMixerElement = NULL;
+ }
+ memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseMicrophone() {
+ MutexLock lock(&mutex_);
+ return CloseMicrophoneLocked();
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseMicrophoneLocked() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ int errVal = 0;
+
+ if (_inputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ LATE(snd_mixer_free)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer 2";
+
+ errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer 3";
+
+ errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer 4";
+ _inputMixerHandle = NULL;
+ _inputMixerElement = NULL;
+ }
+ memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
+ << deviceName << ")";
+
+ MutexLock lock(&mutex_);
+
+ int errVal = 0;
+
+ // Close any existing output mixer handle
+ //
+ if (_outputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing playout mixer";
+
+ LATE(snd_mixer_free)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+ }
+ _outputMixerHandle = NULL;
+ _outputMixerElement = NULL;
+
+ errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
+ return -1;
+ }
+
+ char controlName[kAdmMaxDeviceNameSize] = {0};
+ GetControlName(controlName, deviceName);
+
+ RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+ << ")";
+
+ errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+ << ") error: " << LATE(snd_strerror)(errVal);
+ _outputMixerHandle = NULL;
+ return -1;
+ }
+ strcpy(_outputMixerStr, controlName);
+
+ errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR)
+ << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
+ "error: "
+ << LATE(snd_strerror)(errVal);
+ _outputMixerHandle = NULL;
+ return -1;
+ }
+
+ // Load and find the proper mixer element
+ if (LoadSpeakerMixerElement() < 0) {
+ return -1;
+ }
+
+ if (_outputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "the output mixer device is now open ("
+ << _outputMixerHandle << ")";
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char* deviceName) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
+ << deviceName << ")";
+
+ MutexLock lock(&mutex_);
+
+ int errVal = 0;
+
+ // Close any existing input mixer handle
+ //
+ if (_inputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ LATE(snd_mixer_free)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+ }
+ _inputMixerHandle = NULL;
+ _inputMixerElement = NULL;
+
+ errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
+ return -1;
+ }
+
+ char controlName[kAdmMaxDeviceNameSize] = {0};
+ GetControlName(controlName, deviceName);
+
+ RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+ << ")";
+
+ errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+ << ") error: " << LATE(snd_strerror)(errVal);
+
+ _inputMixerHandle = NULL;
+ return -1;
+ }
+ strcpy(_inputMixerStr, controlName);
+
+ errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR)
+ << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
+ "error: "
+ << LATE(snd_strerror)(errVal);
+
+ _inputMixerHandle = NULL;
+ return -1;
+ }
+ // Load and find the proper mixer element
+ if (LoadMicMixerElement() < 0) {
+ return -1;
+ }
+
+ if (_inputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "the input mixer device is now open ("
+ << _inputMixerHandle << ")";
+ }
+
+ return 0;
+}
+
+bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_outputMixerHandle != NULL);
+}
+
+bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_inputMixerHandle != NULL);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
+ << volume << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ int errVal = LATE(snd_mixer_selem_set_playback_volume_all)(
+ _outputMixerElement, volume);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error changing master volume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int vol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_playback_volume)(
+ _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting outputvolume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
+ << vol;
+
+ volume = static_cast<uint32_t>(vol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MaxSpeakerVolume(
+ uint32_t& maxVolume) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avilable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+ _outputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ maxVolume = static_cast<uint32_t>(maxVol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MinSpeakerVolume(
+ uint32_t& minVolume) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+ _outputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ minVolume = static_cast<uint32_t>(minVol);
+
+ return 0;
+}
+
+// TL: Have done testnig with these but they don't seem reliable and
+// they were therefore not added
+/*
+ // ----------------------------------------------------------------------------
+ // SetMaxSpeakerVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMaxSpeakerVolume(
+ uint32_t maxVolume)
+ {
+
+ if (_outputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_playback_volume_range(
+ _outputMixerElement, &minVol, &maxVol);
+ if ((maxVol <= minVol) || (errVal != 0))
+ {
+ RTC_LOG(LS_WARNING) << "Error getting playback volume range: "
+ << snd_strerror(errVal);
+ }
+
+ maxVol = maxVolume;
+ errVal = snd_mixer_selem_set_playback_volume_range(
+ _outputMixerElement, minVol, maxVol);
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting playback volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ // ----------------------------------------------------------------------------
+ // SetMinSpeakerVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMinSpeakerVolume(
+ uint32_t minVolume)
+ {
+
+ if (_outputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_playback_volume_range(
+ _outputMixerElement, &minVol, &maxVol);
+ if ((maxVol <= minVol) || (errVal != 0))
+ {
+ RTC_LOG(LS_WARNING) << "Error getting playback volume range: "
+ << snd_strerror(errVal);
+ }
+
+ minVol = minVolume;
+ errVal = snd_mixer_selem_set_playback_volume_range(
+ _outputMixerElement, minVol, maxVol);
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting playback volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+ */
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
+ << enable << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ // Ensure that the selected speaker destination has a valid mute control.
+ bool available(false);
+ SpeakerMuteIsAvailable(available);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker";
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ int errVal = LATE(snd_mixer_selem_set_playback_switch_all)(
+ _outputMixerElement, !enable);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error setting playback switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer exists";
+ return -1;
+ }
+
+ // Ensure that the selected speaker destination has a valid mute control.
+ bool available =
+ LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker";
+ return -1;
+ }
+
+ int value(false);
+
+ // Retrieve one boolean control value for a specified mute-control
+ //
+ int errVal = LATE(snd_mixer_selem_get_playback_switch)(
+ _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting playback switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ enabled = (bool)!value;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
+ << enable << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ // Ensure that the selected microphone destination has a valid mute control.
+ bool available(false);
+ MicrophoneMuteIsAvailable(available);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone";
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ int errVal =
+ LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, !enable);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error setting capture switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer exists";
+ return -1;
+ }
+
+ // Ensure that the selected microphone destination has a valid mute control.
+ bool available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone";
+ return -1;
+ }
+
+ int value(false);
+
+ // Retrieve one boolean control value for a specified mute-control
+ //
+ int errVal = LATE(snd_mixer_selem_get_capture_switch)(
+ _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting capture switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ enabled = (bool)!value;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable(
+ bool& available) {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume=" << volume
+ << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ int errVal =
+ LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, volume);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error changing microphone volume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+// TL: Have done testnig with these but they don't seem reliable and
+// they were therefore not added
+/*
+ // ----------------------------------------------------------------------------
+ // SetMaxMicrophoneVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMaxMicrophoneVolume(
+ uint32_t maxVolume)
+ {
+
+ if (_inputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_capture_volume_range(_inputMixerElement,
+ &minVol, &maxVol);
+ if ((maxVol <= minVol) || (errVal != 0))
+ {
+ RTC_LOG(LS_WARNING) << "Error getting capture volume range: "
+ << snd_strerror(errVal);
+ }
+
+ maxVol = (long int)maxVolume;
+ printf("min %d max %d", minVol, maxVol);
+ errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol,
+ maxVol); RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " <<
+ minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting capture volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ // ----------------------------------------------------------------------------
+ // SetMinMicrophoneVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMinMicrophoneVolume(
+ uint32_t minVolume)
+ {
+
+ if (_inputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_capture_volume_range(
+ _inputMixerElement, &minVol, &maxVol);
+ if (maxVol <= minVol)
+ {
+ //maxVol = 255;
+ RTC_LOG(LS_WARNING) << "Error getting capture volume range: "
+ << snd_strerror(errVal);
+ }
+
+ printf("min %d max %d", minVol, maxVol);
+ minVol = (long int)minVolume;
+ errVal = snd_mixer_selem_set_capture_volume_range(
+ _inputMixerElement, minVol, maxVol);
+ RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting capture volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+ */
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ long int vol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_capture_volume)(
+ _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting inputvolume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol=" << vol;
+
+ volume = static_cast<uint32_t>(vol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MaxMicrophoneVolume(
+ uint32_t& maxVolume) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ // check if we have mic volume at all
+ if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) {
+ RTC_LOG(LS_ERROR) << "No microphone volume available";
+ return -1;
+ }
+
+ int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+ _inputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting microphone volume range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ maxVolume = static_cast<uint32_t>(maxVol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MinMicrophoneVolume(
+ uint32_t& minVolume) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+ _inputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting microphone volume range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ minVolume = static_cast<uint32_t>(minVol);
+
+ return 0;
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const {
+ int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
+ << LATE(snd_strerror)(errVal);
+ _inputMixerHandle = NULL;
+ return -1;
+ }
+
+ snd_mixer_elem_t* elem = NULL;
+ snd_mixer_elem_t* micElem = NULL;
+ unsigned mixerIdx = 0;
+ const char* selemName = NULL;
+
+ // Find and store handles to the right mixer elements
+ for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem;
+ elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+ if (LATE(snd_mixer_selem_is_active)(elem)) {
+ selemName = LATE(snd_mixer_selem_get_name)(elem);
+ if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic"
+ {
+ _inputMixerElement = elem;
+ RTC_LOG(LS_VERBOSE) << "Capture element set";
+ } else if (strcmp(selemName, "Mic") == 0) {
+ micElem = elem;
+ RTC_LOG(LS_VERBOSE) << "Mic element found";
+ }
+ }
+
+ if (_inputMixerElement) {
+ // Use the first Capture element that is found
+ // The second one may not work
+ break;
+ }
+ }
+
+ if (_inputMixerElement == NULL) {
+ // We didn't find a Capture handle, use Mic.
+ if (micElem != NULL) {
+ _inputMixerElement = micElem;
+ RTC_LOG(LS_VERBOSE) << "Using Mic as capture volume.";
+ } else {
+ _inputMixerElement = NULL;
+ RTC_LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const {
+ int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
+ << LATE(snd_strerror)(errVal);
+ _outputMixerHandle = NULL;
+ return -1;
+ }
+
+ snd_mixer_elem_t* elem = NULL;
+ snd_mixer_elem_t* masterElem = NULL;
+ snd_mixer_elem_t* speakerElem = NULL;
+ unsigned mixerIdx = 0;
+ const char* selemName = NULL;
+
+ // Find and store handles to the right mixer elements
+ for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem;
+ elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+ if (LATE(snd_mixer_selem_is_active)(elem)) {
+ selemName = LATE(snd_mixer_selem_get_name)(elem);
+ RTC_LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
+ << selemName << " =" << elem;
+
+ // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
+ if (strcmp(selemName, "PCM") == 0) {
+ _outputMixerElement = elem;
+ RTC_LOG(LS_VERBOSE) << "PCM element set";
+ } else if (strcmp(selemName, "Master") == 0) {
+ masterElem = elem;
+ RTC_LOG(LS_VERBOSE) << "Master element found";
+ } else if (strcmp(selemName, "Speaker") == 0) {
+ speakerElem = elem;
+ RTC_LOG(LS_VERBOSE) << "Speaker element found";
+ }
+ }
+
+ if (_outputMixerElement) {
+ // We have found the element we want
+ break;
+ }
+ }
+
+ // If we didn't find a PCM Handle, use Master or Speaker
+ if (_outputMixerElement == NULL) {
+ if (masterElem != NULL) {
+ _outputMixerElement = masterElem;
+ RTC_LOG(LS_VERBOSE) << "Using Master as output volume.";
+ } else if (speakerElem != NULL) {
+ _outputMixerElement = speakerElem;
+ RTC_LOG(LS_VERBOSE) << "Using Speaker as output volume.";
+ } else {
+ _outputMixerElement = NULL;
+ RTC_LOG(LS_ERROR) << "Could not find output volume in the mixer.";
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void AudioMixerManagerLinuxALSA::GetControlName(char* controlName,
+ char* deviceName) const {
+ // Example
+ // deviceName: "front:CARD=Intel,DEV=0"
+ // controlName: "hw:CARD=Intel"
+ char* pos1 = strchr(deviceName, ':');
+ char* pos2 = strchr(deviceName, ',');
+ if (!pos2) {
+ // Can also be default:CARD=Intel
+ pos2 = &deviceName[strlen(deviceName)];
+ }
+ if (pos1 && pos2) {
+ strcpy(controlName, "hw");
+ int nChar = (int)(pos2 - pos1);
+ strncpy(&controlName[2], pos1, nChar);
+ controlName[2 + nChar] = '\0';
+ } else {
+ strcpy(controlName, deviceName);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h
new file mode 100644
index 0000000000..d98287822d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
+
+#include <alsa/asoundlib.h>
+
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/linux/alsasymboltable_linux.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class AudioMixerManagerLinuxALSA {
+ public:
+ int32_t OpenSpeaker(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t OpenMicrophone(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SpeakerVolume(uint32_t& volume) const;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ int32_t SpeakerVolumeIsAvailable(bool& available);
+ int32_t SpeakerMuteIsAvailable(bool& available);
+ int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SpeakerMute(bool& enabled) const;
+ int32_t MicrophoneMuteIsAvailable(bool& available);
+ int32_t SetMicrophoneMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t MicrophoneMute(bool& enabled) const;
+ int32_t MicrophoneVolumeIsAvailable(bool& available);
+ int32_t SetMicrophoneVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t MicrophoneVolume(uint32_t& volume) const;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ int32_t Close() RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t CloseSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t CloseMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
+ bool SpeakerIsInitialized() const;
+ bool MicrophoneIsInitialized() const;
+
+ public:
+ AudioMixerManagerLinuxALSA();
+ ~AudioMixerManagerLinuxALSA();
+
+ private:
+ int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t LoadMicMixerElement() const;
+ int32_t LoadSpeakerMixerElement() const;
+ void GetControlName(char* controlName, char* deviceName) const;
+
+ private:
+ Mutex mutex_;
+ mutable snd_mixer_t* _outputMixerHandle;
+ char _outputMixerStr[kAdmMaxDeviceNameSize];
+ mutable snd_mixer_t* _inputMixerHandle;
+ char _inputMixerStr[kAdmMaxDeviceNameSize];
+ mutable snd_mixer_elem_t* _outputMixerElement;
+ mutable snd_mixer_elem_t* _inputMixerElement;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
new file mode 100644
index 0000000000..91beee3c87
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
@@ -0,0 +1,844 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
+
+#include <stddef.h>
+
+#include "modules/audio_device/linux/audio_device_pulse_linux.h"
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Accesses Pulse functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libpulse, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \
+ GetPulseSymbolTable(), sym)
+
+namespace webrtc {
+
+class AutoPulseLock {
+ public:
+ explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop)
+ : pa_mainloop_(pa_mainloop) {
+ LATE(pa_threaded_mainloop_lock)(pa_mainloop_);
+ }
+
+ ~AutoPulseLock() { LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); }
+
+ private:
+ pa_threaded_mainloop* const pa_mainloop_;
+};
+
+AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse()
+ : _paOutputDeviceIndex(-1),
+ _paInputDeviceIndex(-1),
+ _paPlayStream(NULL),
+ _paRecStream(NULL),
+ _paMainloop(NULL),
+ _paContext(NULL),
+ _paVolume(0),
+ _paMute(0),
+ _paVolSteps(0),
+ _paSpeakerMute(false),
+ _paSpeakerVolume(PA_VOLUME_NORM),
+ _paChannels(0),
+ _paObjectsSet(false) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+}
+
+AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ Close();
+}
+
+// ===========================================================================
+// PUBLIC METHODS
+// ===========================================================================
+
+int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
+ pa_threaded_mainloop* mainloop,
+ pa_context* context) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ if (!mainloop || !context) {
+ RTC_LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
+ return -1;
+ }
+
+ _paMainloop = mainloop;
+ _paContext = context;
+ _paObjectsSet = true;
+
+ RTC_LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::Close() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ CloseSpeaker();
+ CloseMicrophone();
+
+ _paMainloop = NULL;
+ _paContext = NULL;
+ _paObjectsSet = false;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ // Reset the index to -1
+ _paOutputDeviceIndex = -1;
+ _paPlayStream = NULL;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ // Reset the index to -1
+ _paInputDeviceIndex = -1;
+ _paRecStream = NULL;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
+
+ _paPlayStream = playStream;
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
+
+ _paRecStream = recStream;
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(uint16_t deviceIndex) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
+ << deviceIndex << ")";
+
+ // No point in opening the speaker
+ // if PA objects have not been set
+ if (!_paObjectsSet) {
+ RTC_LOG(LS_ERROR) << "PulseAudio objects has not been set";
+ return -1;
+ }
+
+ // Set the index for the PulseAudio
+ // output device to control
+ _paOutputDeviceIndex = deviceIndex;
+
+ RTC_LOG(LS_VERBOSE) << "the output mixer device is now open";
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(uint16_t deviceIndex) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
+ << deviceIndex << ")";
+
+ // No point in opening the microphone
+ // if PA objects have not been set
+ if (!_paObjectsSet) {
+ RTC_LOG(LS_ERROR) << "PulseAudio objects have not been set";
+ return -1;
+ }
+
+ // Set the index for the PulseAudio
+ // input device to control
+ _paInputDeviceIndex = deviceIndex;
+
+ RTC_LOG(LS_VERBOSE) << "the input mixer device is now open";
+
+ return 0;
+}
+
+bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_paOutputDeviceIndex != -1);
+}
+
+bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_paInputDeviceIndex != -1);
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(uint32_t volume) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
+ << volume << ")";
+
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ bool setFailed(false);
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only really set the volume if we have a connected stream
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the number of channels from the sample specification
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_paPlayStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "could not get sample specification";
+ return -1;
+ }
+
+ // Set the same volume for all channels
+ pa_cvolume cVolumes;
+ LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
+
+ pa_operation* paOperation = NULL;
+ paOperation = LATE(pa_context_set_sink_input_volume)(
+ _paContext, LATE(pa_stream_get_index)(_paPlayStream), &cVolumes,
+ PaSetVolumeCallback, NULL);
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for the completion
+ LATE(pa_operation_unref)(paOperation);
+ } else {
+ // We have not created a stream or it's not connected to the sink
+ // Save the volume to be set at connection
+ _paSpeakerVolume = volume;
+ }
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not set speaker volume, error="
+ << LATE(pa_context_errno)(_paContext);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only get the volume if we have a connected stream
+ if (!GetSinkInputInfo())
+ return -1;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ volume = static_cast<uint32_t>(_paVolume);
+ } else {
+ AutoPulseLock auto_lock(_paMainloop);
+ volume = _paSpeakerVolume;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
+ << volume;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MaxSpeakerVolume(
+ uint32_t& maxVolume) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ // PA_VOLUME_NORM corresponds to 100% (0db)
+ // but PA allows up to 150 db amplification
+ maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MinSpeakerVolume(
+ uint32_t& minVolume) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
+ << enable << ")";
+
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ bool setFailed(false);
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only really mute if we have a connected stream
+ AutoPulseLock auto_lock(_paMainloop);
+
+ pa_operation* paOperation = NULL;
+ paOperation = LATE(pa_context_set_sink_input_mute)(
+ _paContext, LATE(pa_stream_get_index)(_paPlayStream), (int)enable,
+ PaSetVolumeCallback, NULL);
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for the completion
+ LATE(pa_operation_unref)(paOperation);
+ } else {
+ // We have not created a stream or it's not connected to the sink
+ // Save the mute status to be set at connection
+ _paSpeakerMute = enable;
+ }
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not mute speaker, error="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only get the mute status if we have a connected stream
+ if (!GetSinkInputInfo())
+ return -1;
+
+ enabled = static_cast<bool>(_paMute);
+ } else {
+ enabled = _paSpeakerMute;
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled=" << enabled;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paOutputDeviceIndex;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
+ }
+ }
+
+ if (!GetSinkInfoByIndex(deviceIndex))
+ return -1;
+
+ available = static_cast<bool>(_paChannels == 2);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(
+ bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+
+ pa_operation* paOperation = NULL;
+
+ // Get info for this source
+ // We want to know if the actual device can record in stereo
+ paOperation = LATE(pa_context_get_source_info_by_index)(
+ _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+
+ available = static_cast<bool>(_paChannels == 2);
+
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
+ " => available="
+ << available;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
+ bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=" << enable
+ << ")";
+
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ bool setFailed(false);
+ pa_operation* paOperation = NULL;
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+
+ // Set mute switch for the source
+ paOperation = LATE(pa_context_set_source_mute_by_index)(
+ _paContext, deviceIndex, enable, PaSetVolumeCallback, NULL);
+
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for this to complete.
+ LATE(pa_operation_unref)(paOperation);
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not mute microphone, error="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+ }
+
+ if (!GetSourceInfoByIndex(deviceIndex))
+ return -1;
+
+ enabled = static_cast<bool>(_paMute);
+
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled=" << enabled;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
+ bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=" << volume
+ << ")";
+
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // Unlike output streams, input streams have no concept of a stream
+ // volume, only a device volume. So we have to change the volume of the
+ // device itself.
+
+ // The device may have a different number of channels than the stream and
+ // their mapping may be different, so we don't want to use the channel
+ // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
+ // bases, and the server allows that even if the device's channel count
+ // is lower, but some buggy PA clients don't like that (the pavucontrol
+ // on Hardy dies in an assert if the channel count is different). So
+ // instead we look up the actual number of channels that the device has.
+ AutoPulseLock auto_lock(_paMainloop);
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+
+ bool setFailed(false);
+ pa_operation* paOperation = NULL;
+
+ // Get the number of channels for this source
+ paOperation = LATE(pa_context_get_source_info_by_index)(
+ _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+
+ uint8_t channels = _paChannels;
+ pa_cvolume cVolumes;
+ LATE(pa_cvolume_set)(&cVolumes, channels, volume);
+
+ // Set the volume for the source
+ paOperation = LATE(pa_context_set_source_volume_by_index)(
+ _paContext, deviceIndex, &cVolumes, PaSetVolumeCallback, NULL);
+
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for this to complete.
+ LATE(pa_operation_unref)(paOperation);
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not set microphone volume, error="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+ // Get the actual stream device index if we have a connected stream.
+ // The device used by the stream can be changed during the call.
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+ }
+
+ if (!GetSourceInfoByIndex(deviceIndex))
+ return -1;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+ volume = static_cast<uint32_t>(_paVolume);
+ }
+
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=" << volume;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(
+ uint32_t& maxVolume) const {
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // PA_VOLUME_NORM corresponds to 100% (0db)
+ // PA allows up to 150 db amplification (PA_VOLUME_MAX)
+ // but that doesn't work well for all sound cards
+ maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MinMicrophoneVolume(
+ uint32_t& minVolume) const {
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
+
+ return 0;
+}
+
+// ===========================================================================
+// Private Methods
+// ===========================================================================
+
+void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(
+ i, eol);
+}
+
+void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
+ pa_context* /*c*/,
+ const pa_sink_input_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioMixerManagerLinuxPulse*>(pThis)
+ ->PaSinkInputInfoCallbackHandler(i, eol);
+}
+
+void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
+ const pa_source_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(
+ i, eol);
+}
+
+void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context* c,
+ int success,
+ void* /*pThis*/) {
+ if (!success) {
+ RTC_LOG(LS_ERROR) << "failed to set volume";
+ }
+}
+
+void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler(
+ const pa_sink_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ _paChannels = i->channel_map.channels; // Get number of channels
+ pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
+ for (int j = 0; j < _paChannels; ++j) {
+ if (paVolume < i->volume.values[j]) {
+ paVolume = i->volume.values[j];
+ }
+ }
+ _paVolume = paVolume; // get the max volume for any channel
+ _paMute = i->mute; // get mute status
+
+ // supported since PA 0.9.15
+ //_paVolSteps = i->n_volume_steps; // get the number of volume steps
+ // default value is PA_VOLUME_NORM+1
+ _paVolSteps = PA_VOLUME_NORM + 1;
+}
+
+void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler(
+ const pa_sink_input_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ _paChannels = i->channel_map.channels; // Get number of channels
+ pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
+ for (int j = 0; j < _paChannels; ++j) {
+ if (paVolume < i->volume.values[j]) {
+ paVolume = i->volume.values[j];
+ }
+ }
+ _paVolume = paVolume; // Get the max volume for any channel
+ _paMute = i->mute; // Get mute status
+}
+
+void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
+ const pa_source_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ _paChannels = i->channel_map.channels; // Get number of channels
+ pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
+ for (int j = 0; j < _paChannels; ++j) {
+ if (paVolume < i->volume.values[j]) {
+ paVolume = i->volume.values[j];
+ }
+ }
+ _paVolume = paVolume; // Get the max volume for any channel
+ _paMute = i->mute; // Get mute status
+
+ // supported since PA 0.9.15
+ //_paVolSteps = i->n_volume_steps; // Get the number of volume steps
+ // default value is PA_VOLUME_NORM+1
+ _paVolSteps = PA_VOLUME_NORM + 1;
+}
+
+void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
+ pa_operation* paOperation) const {
+ while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ LATE(pa_operation_unref)(paOperation);
+}
+
+bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
+ pa_operation* paOperation = NULL;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ // Get info for this stream (sink input).
+ paOperation = LATE(pa_context_get_sink_input_info)(
+ _paContext, LATE(pa_stream_get_index)(_paPlayStream),
+ PaSinkInputInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+ return true;
+}
+
+bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(int device_index) const {
+ pa_operation* paOperation = NULL;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ paOperation = LATE(pa_context_get_sink_info_by_index)(
+ _paContext, device_index, PaSinkInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+ return true;
+}
+
+bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(int device_index) const {
+ pa_operation* paOperation = NULL;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ paOperation = LATE(pa_context_get_source_info_by_index)(
+ _paContext, device_index, PaSourceInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
new file mode 100644
index 0000000000..546440c4a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
+
+#include <pulse/pulseaudio.h>
+#include <stdint.h>
+
+#include "api/sequence_checker.h"
+
+#ifndef UINT32_MAX
+#define UINT32_MAX ((uint32_t)-1)
+#endif
+
+namespace webrtc {
+
+class AudioMixerManagerLinuxPulse {
+ public:
+ int32_t SetPlayStream(pa_stream* playStream);
+ int32_t SetRecStream(pa_stream* recStream);
+ int32_t OpenSpeaker(uint16_t deviceIndex);
+ int32_t OpenMicrophone(uint16_t deviceIndex);
+ int32_t SetSpeakerVolume(uint32_t volume);
+ int32_t SpeakerVolume(uint32_t& volume) const;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ int32_t SpeakerVolumeIsAvailable(bool& available);
+ int32_t SpeakerMuteIsAvailable(bool& available);
+ int32_t SetSpeakerMute(bool enable);
+ int32_t StereoPlayoutIsAvailable(bool& available);
+ int32_t StereoRecordingIsAvailable(bool& available);
+ int32_t SpeakerMute(bool& enabled) const;
+ int32_t MicrophoneMuteIsAvailable(bool& available);
+ int32_t SetMicrophoneMute(bool enable);
+ int32_t MicrophoneMute(bool& enabled) const;
+ int32_t MicrophoneVolumeIsAvailable(bool& available);
+ int32_t SetMicrophoneVolume(uint32_t volume);
+ int32_t MicrophoneVolume(uint32_t& volume) const;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ int32_t SetPulseAudioObjects(pa_threaded_mainloop* mainloop,
+ pa_context* context);
+ int32_t Close();
+ int32_t CloseSpeaker();
+ int32_t CloseMicrophone();
+ bool SpeakerIsInitialized() const;
+ bool MicrophoneIsInitialized() const;
+
+ public:
+ AudioMixerManagerLinuxPulse();
+ ~AudioMixerManagerLinuxPulse();
+
+ private:
+ static void PaSinkInfoCallback(pa_context* c,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis);
+ static void PaSinkInputInfoCallback(pa_context* c,
+ const pa_sink_input_info* i,
+ int eol,
+ void* pThis);
+ static void PaSourceInfoCallback(pa_context* c,
+ const pa_source_info* i,
+ int eol,
+ void* pThis);
+ static void PaSetVolumeCallback(pa_context* /*c*/,
+ int success,
+ void* /*pThis*/);
+ void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol);
+ void PaSinkInputInfoCallbackHandler(const pa_sink_input_info* i, int eol);
+ void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol);
+
+ void WaitForOperationCompletion(pa_operation* paOperation) const;
+
+ bool GetSinkInputInfo() const;
+ bool GetSinkInfoByIndex(int device_index) const;
+ bool GetSourceInfoByIndex(int device_index) const;
+
+ private:
+ int16_t _paOutputDeviceIndex;
+ int16_t _paInputDeviceIndex;
+
+ pa_stream* _paPlayStream;
+ pa_stream* _paRecStream;
+
+ pa_threaded_mainloop* _paMainloop;
+ pa_context* _paContext;
+
+ mutable uint32_t _paVolume;
+ mutable uint32_t _paMute;
+ mutable uint32_t _paVolSteps;
+ bool _paSpeakerMute;
+ mutable uint32_t _paSpeakerVolume;
+ mutable uint8_t _paChannels;
+ bool _paObjectsSet;
+
+ // Stores thread ID in constructor.
+ // We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that
+ // other methods are called from the same thread.
+ // Currently only does RTC_DCHECK(thread_checker_.IsCurrent()).
+ SequenceChecker thread_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
new file mode 100644
index 0000000000..751edafd8b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/logging.h"
+
+#ifdef WEBRTC_LINUX
+#include <dlfcn.h>
+#endif
+
+namespace webrtc {
+namespace adm_linux {
+
+inline static const char* GetDllError() {
+#ifdef WEBRTC_LINUX
+ char* err = dlerror();
+ if (err) {
+ return err;
+ } else {
+ return "No error";
+ }
+#else
+#error Not implemented
+#endif
+}
+
+DllHandle InternalLoadDll(absl::string_view dll_name) {
+#ifdef WEBRTC_LINUX
+ DllHandle handle = dlopen(std::string(dll_name).c_str(), RTLD_NOW);
+#else
+#error Not implemented
+#endif
+ if (handle == kInvalidDllHandle) {
+ RTC_LOG(LS_WARNING) << "Can't load " << dll_name << " : " << GetDllError();
+ }
+ return handle;
+}
+
+void InternalUnloadDll(DllHandle handle) {
+#ifdef WEBRTC_LINUX
+// TODO(pbos): Remove this dlclose() exclusion when leaks and suppressions from
+// here are gone (or AddressSanitizer can display them properly).
+//
+// Skip dlclose() on AddressSanitizer as leaks including this module in the
+// stack trace gets displayed as <unknown module> instead of the actual library
+// -> it can not be suppressed.
+// https://code.google.com/p/address-sanitizer/issues/detail?id=89
+#if !defined(ADDRESS_SANITIZER)
+ if (dlclose(handle) != 0) {
+ RTC_LOG(LS_ERROR) << GetDllError();
+ }
+#endif // !defined(ADDRESS_SANITIZER)
+#else
+#error Not implemented
+#endif
+}
+
+static bool LoadSymbol(DllHandle handle,
+ absl::string_view symbol_name,
+ void** symbol) {
+#ifdef WEBRTC_LINUX
+ *symbol = dlsym(handle, std::string(symbol_name).c_str());
+ char* err = dlerror();
+ if (err) {
+ RTC_LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err;
+ return false;
+ } else if (!*symbol) {
+ RTC_LOG(LS_ERROR) << "Symbol " << symbol_name << " is NULL";
+ return false;
+ }
+ return true;
+#else
+#error Not implemented
+#endif
+}
+
+// This routine MUST assign SOME value for every symbol, even if that value is
+// NULL, or else some symbols may be left with uninitialized data that the
+// caller may later interpret as a valid address.
+bool InternalLoadSymbols(DllHandle handle,
+ int num_symbols,
+ const char* const symbol_names[],
+ void* symbols[]) {
+#ifdef WEBRTC_LINUX
+ // Clear any old errors.
+ dlerror();
+#endif
+ for (int i = 0; i < num_symbols; ++i) {
+ if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace adm_linux
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
new file mode 100644
index 0000000000..00f3c5a449
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_
+#define AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_
+
+#include <stddef.h> // for NULL
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+
+// This file provides macros for creating "symbol table" classes to simplify the
+// dynamic loading of symbols from DLLs. Currently the implementation only
+// supports Linux and pure C symbols.
+// See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
+
+namespace webrtc {
+namespace adm_linux {
+
+#ifdef WEBRTC_LINUX
+typedef void* DllHandle;
+
+const DllHandle kInvalidDllHandle = NULL;
+#else
+#error Not implemented
+#endif
+
+// These are helpers for use only by the class below.
+DllHandle InternalLoadDll(absl::string_view);
+
+void InternalUnloadDll(DllHandle handle);
+
+bool InternalLoadSymbols(DllHandle handle,
+ int num_symbols,
+ const char* const symbol_names[],
+ void* symbols[]);
+
+template <int SYMBOL_TABLE_SIZE,
+ const char kDllName[],
+ const char* const kSymbolNames[]>
+class LateBindingSymbolTable {
+ public:
+ LateBindingSymbolTable()
+ : handle_(kInvalidDllHandle), undefined_symbols_(false) {
+ memset(symbols_, 0, sizeof(symbols_));
+ }
+
+ ~LateBindingSymbolTable() { Unload(); }
+
+ LateBindingSymbolTable(const LateBindingSymbolTable&) = delete;
+ LateBindingSymbolTable& operator=(LateBindingSymbolTable&) = delete;
+
+ static int NumSymbols() { return SYMBOL_TABLE_SIZE; }
+
+ // We do not use this, but we offer it for theoretical convenience.
+ static const char* GetSymbolName(int index) {
+ RTC_DCHECK_LT(index, NumSymbols());
+ return kSymbolNames[index];
+ }
+
+ bool IsLoaded() const { return handle_ != kInvalidDllHandle; }
+
+ // Loads the DLL and the symbol table. Returns true iff the DLL and symbol
+ // table loaded successfully.
+ bool Load() {
+ if (IsLoaded()) {
+ return true;
+ }
+ if (undefined_symbols_) {
+ // We do not attempt to load again because repeated attempts are not
+ // likely to succeed and DLL loading is costly.
+ return false;
+ }
+ handle_ = InternalLoadDll(kDllName);
+ if (!IsLoaded()) {
+ return false;
+ }
+ if (!InternalLoadSymbols(handle_, NumSymbols(), kSymbolNames, symbols_)) {
+ undefined_symbols_ = true;
+ Unload();
+ return false;
+ }
+ return true;
+ }
+
+ void Unload() {
+ if (!IsLoaded()) {
+ return;
+ }
+ InternalUnloadDll(handle_);
+ handle_ = kInvalidDllHandle;
+ memset(symbols_, 0, sizeof(symbols_));
+ }
+
+ // Retrieves the given symbol. NOTE: Recommended to use LATESYM_GET below
+ // instead of this.
+ void* GetSymbol(int index) const {
+ RTC_DCHECK(IsLoaded());
+ RTC_DCHECK_LT(index, NumSymbols());
+ return symbols_[index];
+ }
+
+ private:
+ DllHandle handle_;
+ bool undefined_symbols_;
+ void* symbols_[SYMBOL_TABLE_SIZE];
+};
+
+// This macro must be invoked in a header to declare a symbol table class.
+#define LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(ClassName) enum {
+// This macro must be invoked in the header declaration once for each symbol
+// (recommended to use an X-Macro to avoid duplication).
+// This macro defines an enum with names built from the symbols, which
+// essentially creates a hash table in the compiler from symbol names to their
+// indices in the symbol table class.
+#define LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(ClassName, sym) \
+ ClassName##_SYMBOL_TABLE_INDEX_##sym,
+
+// This macro completes the header declaration.
+#define LATE_BINDING_SYMBOL_TABLE_DECLARE_END(ClassName) \
+ ClassName##_SYMBOL_TABLE_SIZE \
+ } \
+ ; \
+ \
+ extern const char ClassName##_kDllName[]; \
+ extern const char* const \
+ ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE]; \
+ \
+ typedef ::webrtc::adm_linux::LateBindingSymbolTable< \
+ ClassName##_SYMBOL_TABLE_SIZE, ClassName##_kDllName, \
+ ClassName##_kSymbolNames> \
+ ClassName;
+
+// This macro must be invoked in a .cc file to define a previously-declared
+// symbol table class.
+#define LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(ClassName, dllName) \
+ const char ClassName##_kDllName[] = dllName; \
+ const char* const ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE] = {
+// This macro must be invoked in the .cc definition once for each symbol
+// (recommended to use an X-Macro to avoid duplication).
+// This would have to use the mangled name if we were to ever support C++
+// symbols.
+#define LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(ClassName, sym) #sym,
+
+#define LATE_BINDING_SYMBOL_TABLE_DEFINE_END(ClassName) \
+ } \
+ ;
+
+// Index of a given symbol in the given symbol table class.
+#define LATESYM_INDEXOF(ClassName, sym) (ClassName##_SYMBOL_TABLE_INDEX_##sym)
+
+// Returns a reference to the given late-binded symbol, with the correct type.
+#define LATESYM_GET(ClassName, inst, sym) \
+ (*reinterpret_cast<__typeof__(&sym)>( \
+ (inst)->GetSymbol(LATESYM_INDEXOF(ClassName, sym))))
+
+} // namespace adm_linux
+} // namespace webrtc
+
+#endif // ADM_LATEBINDINGSYMBOLTABLE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
new file mode 100644
index 0000000000..e0759e6ca3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
@@ -0,0 +1,41 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_pulse {
+
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
+#define X(sym) \
+ LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
+PULSE_AUDIO_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
+
+} // namespace adm_linux_pulse
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h
new file mode 100644
index 0000000000..2f6a9510d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h
@@ -0,0 +1,106 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
+#define AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_pulse {
+
+// The PulseAudio symbols we need, as an X-Macro list.
+// This list must contain precisely every libpulse function that is used in
+// the ADM LINUX PULSE Device and Mixer classes
+#define PULSE_AUDIO_SYMBOLS_LIST \
+ X(pa_bytes_per_second) \
+ X(pa_context_connect) \
+ X(pa_context_disconnect) \
+ X(pa_context_errno) \
+ X(pa_context_get_protocol_version) \
+ X(pa_context_get_server_info) \
+ X(pa_context_get_sink_info_list) \
+ X(pa_context_get_sink_info_by_index) \
+ X(pa_context_get_sink_info_by_name) \
+ X(pa_context_get_sink_input_info) \
+ X(pa_context_get_source_info_by_index) \
+ X(pa_context_get_source_info_by_name) \
+ X(pa_context_get_source_info_list) \
+ X(pa_context_get_state) \
+ X(pa_context_new) \
+ X(pa_context_set_sink_input_volume) \
+ X(pa_context_set_sink_input_mute) \
+ X(pa_context_set_source_volume_by_index) \
+ X(pa_context_set_source_mute_by_index) \
+ X(pa_context_set_state_callback) \
+ X(pa_context_unref) \
+ X(pa_cvolume_set) \
+ X(pa_operation_get_state) \
+ X(pa_operation_unref) \
+ X(pa_stream_connect_playback) \
+ X(pa_stream_connect_record) \
+ X(pa_stream_disconnect) \
+ X(pa_stream_drop) \
+ X(pa_stream_get_device_index) \
+ X(pa_stream_get_index) \
+ X(pa_stream_get_latency) \
+ X(pa_stream_get_sample_spec) \
+ X(pa_stream_get_state) \
+ X(pa_stream_new) \
+ X(pa_stream_peek) \
+ X(pa_stream_readable_size) \
+ X(pa_stream_set_buffer_attr) \
+ X(pa_stream_set_overflow_callback) \
+ X(pa_stream_set_read_callback) \
+ X(pa_stream_set_state_callback) \
+ X(pa_stream_set_underflow_callback) \
+ X(pa_stream_set_write_callback) \
+ X(pa_stream_unref) \
+ X(pa_stream_writable_size) \
+ X(pa_stream_write) \
+ X(pa_strerror) \
+ X(pa_threaded_mainloop_free) \
+ X(pa_threaded_mainloop_get_api) \
+ X(pa_threaded_mainloop_lock) \
+ X(pa_threaded_mainloop_new) \
+ X(pa_threaded_mainloop_signal) \
+ X(pa_threaded_mainloop_start) \
+ X(pa_threaded_mainloop_stop) \
+ X(pa_threaded_mainloop_unlock) \
+ X(pa_threaded_mainloop_wait)
+
+LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(PulseAudioSymbolTable)
+#define X(sym) \
+ LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(PulseAudioSymbolTable, sym)
+PULSE_AUDIO_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DECLARE_END(PulseAudioSymbolTable)
+
+} // namespace adm_linux_pulse
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc
new file mode 100644
index 0000000000..462287a27d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/mac/audio_device_mac.h"
+
+#include <ApplicationServices/ApplicationServices.h>
+#include <mach/mach.h> // mach_task_self()
+#include <sys/sysctl.h> // sysctlbyname()
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_config.h"
+#include "modules/third_party/portaudio/pa_ringbuffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+#define WEBRTC_CA_RETURN_ON_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+ return -1; \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
+ } \
+ } while (0)
+
+enum { MaxNumberDevices = 64 };
+
+// CoreAudio errors are best interpreted as four character strings.
+void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev,
+ const char* msg,
+ const char* err) {
+ RTC_DCHECK(msg != NULL);
+ RTC_DCHECK(err != NULL);
+
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
+ switch (sev) {
+ case rtc::LS_ERROR:
+ RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3];
+ break;
+ case rtc::LS_WARNING:
+ RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2]
+ << err[3];
+ break;
+ case rtc::LS_VERBOSE:
+ RTC_LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2]
+ << err[3];
+ break;
+ default:
+ break;
+ }
+#else
+ // We need to flip the characters in this case.
+ switch (sev) {
+ case rtc::LS_ERROR:
+ RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
+ break;
+ case rtc::LS_WARNING:
+ RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1]
+ << err[0];
+ break;
+ case rtc::LS_VERBOSE:
+ RTC_LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1]
+ << err[0];
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+AudioDeviceMac::AudioDeviceMac()
+ : _ptrAudioBuffer(NULL),
+ _mixerManager(),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _inputDeviceID(kAudioObjectUnknown),
+ _outputDeviceID(kAudioObjectUnknown),
+ _inputDeviceIsSpecified(false),
+ _outputDeviceIsSpecified(false),
+ _recChannels(N_REC_CHANNELS),
+ _playChannels(N_PLAY_CHANNELS),
+ _captureBufData(NULL),
+ _renderBufData(NULL),
+ _initialized(false),
+ _isShutDown(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _renderDeviceIsAlive(1),
+ _captureDeviceIsAlive(1),
+ _twoDevices(true),
+ _doStop(false),
+ _doStopRec(false),
+ _macBookPro(false),
+ _macBookProPanRight(false),
+ _captureLatencyUs(0),
+ _renderLatencyUs(0),
+ _captureDelayUs(0),
+ _renderDelayUs(0),
+ _renderDelayOffsetSamples(0),
+ _paCaptureBuffer(NULL),
+ _paRenderBuffer(NULL),
+ _captureBufSizeSamples(0),
+ _renderBufSizeSamples(0),
+ prev_key_state_() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+
+ memset(_renderConvertData, 0, sizeof(_renderConvertData));
+ memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
+ memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
+ memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
+ memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
+}
+
+AudioDeviceMac::~AudioDeviceMac() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ if (!_isShutDown) {
+ Terminate();
+ }
+
+ RTC_DCHECK(capture_worker_thread_.empty());
+ RTC_DCHECK(render_worker_thread_.empty());
+
+ if (_paRenderBuffer) {
+ delete _paRenderBuffer;
+ _paRenderBuffer = NULL;
+ }
+
+ if (_paCaptureBuffer) {
+ delete _paCaptureBuffer;
+ _paCaptureBuffer = NULL;
+ }
+
+ if (_renderBufData) {
+ delete[] _renderBufData;
+ _renderBufData = NULL;
+ }
+
+ if (_captureBufData) {
+ delete[] _captureBufData;
+ _captureBufData = NULL;
+ }
+
+ kern_return_t kernErr = KERN_SUCCESS;
+ kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
+ }
+
+ kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
+ }
+}
+
+// ============================================================================
+// API
+// ============================================================================
+
+void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ MutexLock lock(&mutex_);
+
+ _ptrAudioBuffer = audioBuffer;
+
+ // inform the AudioBuffer about default settings for this implementation
+ _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
+ _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
+}
+
+int32_t AudioDeviceMac::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+ return 0;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
+ MutexLock lock(&mutex_);
+
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+
+ OSStatus err = noErr;
+
+ _isShutDown = false;
+
+ // PortAudio ring buffers require an elementCount which is a power of two.
+ if (_renderBufData == NULL) {
+ UInt32 powerOfTwo = 1;
+ while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) {
+ powerOfTwo <<= 1;
+ }
+ _renderBufSizeSamples = powerOfTwo;
+ _renderBufData = new SInt16[_renderBufSizeSamples];
+ }
+
+ if (_paRenderBuffer == NULL) {
+ _paRenderBuffer = new PaUtilRingBuffer;
+ ring_buffer_size_t bufSize = -1;
+ bufSize = PaUtil_InitializeRingBuffer(
+ _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
+ if (bufSize == -1) {
+ RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
+ return InitStatus::PLAYOUT_ERROR;
+ }
+ }
+
+ if (_captureBufData == NULL) {
+ UInt32 powerOfTwo = 1;
+ while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) {
+ powerOfTwo <<= 1;
+ }
+ _captureBufSizeSamples = powerOfTwo;
+ _captureBufData = new Float32[_captureBufSizeSamples];
+ }
+
+ if (_paCaptureBuffer == NULL) {
+ _paCaptureBuffer = new PaUtilRingBuffer;
+ ring_buffer_size_t bufSize = -1;
+ bufSize =
+ PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
+ _captureBufSizeSamples, _captureBufData);
+ if (bufSize == -1) {
+ RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
+ return InitStatus::RECORDING_ERROR;
+ }
+ }
+
+ kern_return_t kernErr = KERN_SUCCESS;
+ kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
+ SYNC_POLICY_FIFO, 0);
+ if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
+ return InitStatus::OTHER_ERROR;
+ }
+
+ kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
+ SYNC_POLICY_FIFO, 0);
+ if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
+ return InitStatus::OTHER_ERROR;
+ }
+
+ // Setting RunLoop to NULL here instructs HAL to manage its own thread for
+ // notifications. This was the default behaviour on OS X 10.5 and earlier,
+ // but now must be explicitly specified. HAL would otherwise try to use the
+ // main thread to issue notifications.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ CFRunLoopRef runLoop = NULL;
+ UInt32 size = sizeof(CFRunLoopRef);
+ int aoerr = AudioObjectSetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop);
+ if (aoerr != noErr) {
+ RTC_LOG(LS_ERROR) << "Error in AudioObjectSetPropertyData: "
+ << (const char*)&aoerr;
+ return InitStatus::OTHER_ERROR;
+ }
+
+ // Listen for any device changes.
+ propertyAddress.mSelector = kAudioHardwarePropertyDevices;
+ WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(
+ kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
+
+ // Determine if this is a MacBook Pro
+ _macBookPro = false;
+ _macBookProPanRight = false;
+ char buf[128];
+ size_t length = sizeof(buf);
+ memset(buf, 0, length);
+
+ int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
+ if (intErr != 0) {
+ RTC_LOG(LS_ERROR) << "Error in sysctlbyname(): " << err;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "Hardware model: " << buf;
+ if (strncmp(buf, "MacBookPro", 10) == 0) {
+ _macBookPro = true;
+ }
+ }
+
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceMac::Terminate() {
+ if (!_initialized) {
+ return 0;
+ }
+
+ if (_recording) {
+ RTC_LOG(LS_ERROR) << "Recording must be stopped";
+ return -1;
+ }
+
+ if (_playing) {
+ RTC_LOG(LS_ERROR) << "Playback must be stopped";
+ return -1;
+ }
+
+ MutexLock lock(&mutex_);
+ _mixerManager.Close();
+
+ OSStatus err = noErr;
+ int retVal = 0;
+
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
+
+ err = AudioHardwareUnload();
+ if (err != noErr) {
+ logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
+ (const char*)&err);
+ retVal = -1;
+ }
+
+ _isShutDown = true;
+ _initialized = false;
+ _outputDeviceIsSpecified = false;
+ _inputDeviceIsSpecified = false;
+
+ return retVal;
+}
+
+bool AudioDeviceMac::Initialized() const {
+ return (_initialized);
+}
+
+int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+ return SpeakerIsAvailableLocked(available);
+}
+
+int32_t AudioDeviceMac::SpeakerIsAvailableLocked(bool& available) {
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeakerLocked() == -1) {
+ available = false;
+ return 0;
+ }
+
+ // Given that InitSpeaker was successful, we know that a valid speaker
+ // exists.
+ available = true;
+
+ // Close the initialized output mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::InitSpeaker() {
+ MutexLock lock(&mutex_);
+ return InitSpeakerLocked();
+}
+
+int32_t AudioDeviceMac::InitSpeakerLocked() {
+ if (_playing) {
+ return -1;
+ }
+
+ if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) {
+ return -1;
+ }
+
+ if (_inputDeviceID == _outputDeviceID) {
+ _twoDevices = false;
+ } else {
+ _twoDevices = true;
+ }
+
+ if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+ return MicrophoneIsAvailableLocked(available);
+}
+
+int32_t AudioDeviceMac::MicrophoneIsAvailableLocked(bool& available) {
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitMicrophoneLocked() == -1) {
+ available = false;
+ return 0;
+ }
+
+ // Given that InitMicrophone was successful, we know that a valid microphone
+ // exists.
+ available = true;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::InitMicrophone() {
+ MutexLock lock(&mutex_);
+ return InitMicrophoneLocked();
+}
+
+int32_t AudioDeviceMac::InitMicrophoneLocked() {
+ if (_recording) {
+ return -1;
+ }
+
+ if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) {
+ return -1;
+ }
+
+ if (_inputDeviceID == _outputDeviceID) {
+ _twoDevices = false;
+ } else {
+ _twoDevices = true;
+ }
+
+ if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+bool AudioDeviceMac::SpeakerIsInitialized() const {
+ return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceMac::MicrophoneIsInitialized() const {
+ return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitSpeaker was successful, we know that a volume control exists
+ //
+ available = true;
+
+ // Close the initialized output mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) {
+ return (_mixerManager.SetSpeakerVolume(volume));
+}
+
+int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.SpeakerVolume(level) == -1) {
+ return -1;
+ }
+
+ volume = level;
+ return 0;
+}
+
+int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+ return 0;
+}
+
+int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+ return 0;
+}
+
+int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected speaker has a mute control
+ //
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+ available = isAvailable;
+
+ // Close the initialized output mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetSpeakerMute(bool enable) {
+ return (_mixerManager.SetSpeakerMute(enable));
+}
+
+int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const {
+ bool muted(0);
+
+ if (_mixerManager.SpeakerMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control, hence it is safe to state that there is no boost control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone has a mute control
+ //
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) {
+ return (_mixerManager.SetMicrophoneMute(enable));
+}
+
+int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const {
+ bool muted(0);
+
+ if (_mixerManager.MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // Cannot open the specified device
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone can record stereo
+ //
+ _mixerManager.StereoRecordingIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetStereoRecording(bool enable) {
+ if (enable)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::StereoRecording(bool& enabled) const {
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // Cannot open the specified device
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone can record stereo
+ //
+ _mixerManager.StereoPlayoutIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetStereoPlayout(bool enable) {
+ if (enable)
+ _playChannels = 2;
+ else
+ _playChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const {
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitMicrophone was successful, we know that a volume control
+ // exists
+ //
+ available = true;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) {
+ return (_mixerManager.SetMicrophoneVolume(volume));
+}
+
+int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.MicrophoneVolume(level) == -1) {
+ RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level";
+ return -1;
+ }
+
+ volume = level;
+ return 0;
+}
+
+int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+ return 0;
+}
+
+int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+ return 0;
+}
+
+int16_t AudioDeviceMac::PlayoutDevices() {
+ AudioDeviceID playDevices[MaxNumberDevices];
+ return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
+ MaxNumberDevices);
+}
+
+int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
+ MutexLock lock(&mutex_);
+
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ AudioDeviceID playDevices[MaxNumberDevices];
+ uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
+ playDevices, MaxNumberDevices);
+ RTC_LOG(LS_VERBOSE) << "number of available waveform-audio output devices is "
+ << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _outputDeviceIndex = index;
+ _outputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(PlayoutDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ return GetDeviceName(kAudioDevicePropertyScopeOutput, index,
+ rtc::ArrayView<char>(name, kAdmMaxDeviceNameSize));
+}
+
+int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(RecordingDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ return GetDeviceName(kAudioDevicePropertyScopeInput, index,
+ rtc::ArrayView<char>(name, kAdmMaxDeviceNameSize));
+}
+
+int16_t AudioDeviceMac::RecordingDevices() {
+ AudioDeviceID recDevices[MaxNumberDevices];
+ return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
+ MaxNumberDevices);
+}
+
+int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ AudioDeviceID recDevices[MaxNumberDevices];
+ uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
+ recDevices, MaxNumberDevices);
+ RTC_LOG(LS_VERBOSE) << "number of available waveform-audio input devices is "
+ << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _inputDeviceIndex = index;
+ _inputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) {
+ available = true;
+
+ // Try to initialize the playout side
+ if (InitPlayout() == -1) {
+ available = false;
+ }
+
+ // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
+ // We must actually start playout here in order to have the IOProc
+ // deleted by calling StopPlayout().
+ if (StartPlayout() == -1) {
+ available = false;
+ }
+
+ // Cancel effect of initialization
+ if (StopPlayout() == -1) {
+ available = false;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) {
+ available = true;
+
+ // Try to initialize the recording side
+ if (InitRecording() == -1) {
+ available = false;
+ }
+
+ // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
+ // We must actually start recording here in order to have the IOProc
+ // deleted by calling StopRecording().
+ if (StartRecording() == -1) {
+ available = false;
+ }
+
+ // Cancel effect of initialization
+ if (StopRecording() == -1) {
+ available = false;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::InitPlayout() {
+ RTC_LOG(LS_INFO) << "InitPlayout";
+ MutexLock lock(&mutex_);
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeakerLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ if (!MicrophoneIsInitialized()) {
+ // Make this call to check if we are using
+ // one or two devices (_twoDevices)
+ bool available = false;
+ if (MicrophoneIsAvailableLocked(available) == -1) {
+ RTC_LOG(LS_WARNING) << "MicrophoneIsAvailable() failed";
+ }
+ }
+
+ PaUtil_FlushRingBuffer(_paRenderBuffer);
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ _renderDelayOffsetSamples = 0;
+ _renderDelayUs = 0;
+ _renderLatencyUs = 0;
+ _renderDeviceIsAlive = 1;
+ _doStop = false;
+
+ // The internal microphone of a MacBook Pro is located under the left speaker
+ // grille. When the internal speakers are in use, we want to fully stereo
+ // pan to the right.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
+ if (_macBookPro) {
+ _macBookProPanRight = false;
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ UInt32 dataSource = 0;
+ size = sizeof(dataSource);
+ WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource));
+
+ if (dataSource == 'ispk') {
+ _macBookProPanRight = true;
+ RTC_LOG(LS_VERBOSE)
+ << "MacBook Pro using internal speakers; stereo panning right";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
+ }
+
+ // Add a listener to determine if the status changes.
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
+ }
+ }
+
+ // Get current stream description
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
+ size = sizeof(_outStreamFormat);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
+
+ if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
+ logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
+ (const char*)&_outStreamFormat.mFormatID);
+ return -1;
+ }
+
+ if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
+ RTC_LOG(LS_ERROR)
+ << "Too many channels on output device (mChannelsPerFrame = "
+ << _outStreamFormat.mChannelsPerFrame << ")";
+ return -1;
+ }
+
+ if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
+ RTC_LOG(LS_ERROR) << "Non-interleaved audio data is not supported."
+ "AudioHardware streams should not have this format.";
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Ouput stream format:";
+ RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate
+ << ", mChannelsPerFrame = "
+ << _outStreamFormat.mChannelsPerFrame;
+ RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = "
+ << _outStreamFormat.mBytesPerPacket
+ << ", mFramesPerPacket = "
+ << _outStreamFormat.mFramesPerPacket;
+ RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame
+ << ", mBitsPerChannel = "
+ << _outStreamFormat.mBitsPerChannel;
+ RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags;
+ logCAMsg(rtc::LS_VERBOSE, "mFormatID",
+ (const char*)&_outStreamFormat.mFormatID);
+
+ // Our preferred format to work with.
+ if (_outStreamFormat.mChannelsPerFrame < 2) {
+ // Disable stereo playout when we only have one channel on the device.
+ _playChannels = 1;
+ RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
+ }
+ WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
+
+ // Listen for format changes.
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ // Listen for processor overloads.
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ if (_twoDevices || !_recIsInitialized) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
+ _outputDeviceID, deviceIOProc, this, &_deviceIOProcID));
+ }
+
+ _playIsInitialized = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::InitRecording() {
+ RTC_LOG(LS_INFO) << "InitRecording";
+ MutexLock lock(&mutex_);
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophoneLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ if (!SpeakerIsInitialized()) {
+ // Make this call to check if we are using
+ // one or two devices (_twoDevices)
+ bool available = false;
+ if (SpeakerIsAvailableLocked(available) == -1) {
+ RTC_LOG(LS_WARNING) << "SpeakerIsAvailable() failed";
+ }
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+
+ PaUtil_FlushRingBuffer(_paCaptureBuffer);
+
+ _captureDelayUs = 0;
+ _captureLatencyUs = 0;
+ _captureDeviceIsAlive = 1;
+ _doStopRec = false;
+
+ // Get current stream description
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
+ memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
+ size = sizeof(_inStreamFormat);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
+
+ if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
+ logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
+ (const char*)&_inStreamFormat.mFormatID);
+ return -1;
+ }
+
+ if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
+ RTC_LOG(LS_ERROR)
+ << "Too many channels on input device (mChannelsPerFrame = "
+ << _inStreamFormat.mChannelsPerFrame << ")";
+ return -1;
+ }
+
+ const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
+ _inStreamFormat.mSampleRate / 100 *
+ N_BLOCKS_IO;
+ if (io_block_size_samples > _captureBufSizeSamples) {
+ RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
+ << ") is larger than ring buffer ("
+ << _captureBufSizeSamples << ")";
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Input stream format:";
+ RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate
+ << ", mChannelsPerFrame = "
+ << _inStreamFormat.mChannelsPerFrame;
+ RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket
+ << ", mFramesPerPacket = "
+ << _inStreamFormat.mFramesPerPacket;
+ RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame
+ << ", mBitsPerChannel = "
+ << _inStreamFormat.mBitsPerChannel;
+ RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags;
+ logCAMsg(rtc::LS_VERBOSE, "mFormatID",
+ (const char*)&_inStreamFormat.mFormatID);
+
+ // Our preferred format to work with
+ if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
+ _inDesiredFormat.mChannelsPerFrame = 2;
+ } else {
+ // Disable stereo recording when we only have one channel on the device.
+ _inDesiredFormat.mChannelsPerFrame = 1;
+ _recChannels = 1;
+ RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ }
+
+ _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
+ _inDesiredFormat.mBytesPerPacket =
+ _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ _inDesiredFormat.mFramesPerPacket = 1;
+ _inDesiredFormat.mBytesPerFrame =
+ _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
+
+ _inDesiredFormat.mFormatFlags =
+ kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
+ _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
+#endif
+ _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
+ &_captureConverter));
+
+ // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
+ // TODO(xians): investigate this block.
+ UInt32 bufByteCount =
+ (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO *
+ _inStreamFormat.mChannelsPerFrame * sizeof(Float32));
+ if (_inStreamFormat.mFramesPerPacket != 0) {
+ if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) {
+ bufByteCount =
+ ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) *
+ _inStreamFormat.mFramesPerPacket;
+ }
+ }
+
+ // Ensure the buffer size is within the acceptable range provided by the
+ // device.
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
+ AudioValueRange range;
+ size = sizeof(range);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &range));
+ if (range.mMinimum > bufByteCount) {
+ bufByteCount = range.mMinimum;
+ } else if (range.mMaximum < bufByteCount) {
+ bufByteCount = range.mMaximum;
+ }
+
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
+ size = sizeof(bufByteCount);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
+
+ // Get capture device latency
+ propertyAddress.mSelector = kAudioDevicePropertyLatency;
+ UInt32 latency = 0;
+ size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
+
+ // Get capture stream latency
+ propertyAddress.mSelector = kAudioDevicePropertyStreams;
+ AudioStreamID stream = 0;
+ size = sizeof(AudioStreamID);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
+ propertyAddress.mSelector = kAudioStreamPropertyLatency;
+ size = sizeof(UInt32);
+ latency = 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _captureLatencyUs +=
+ (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
+
+ // Listen for format changes
+ // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ // Listen for processor overloads
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ if (_twoDevices) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
+ _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID));
+ } else if (!_playIsInitialized) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
+ _inputDeviceID, deviceIOProc, this, &_deviceIOProcID));
+ }
+
+ // Mark recording side as initialized
+ _recIsInitialized = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::StartRecording() {
+ RTC_LOG(LS_INFO) << "StartRecording";
+ MutexLock lock(&mutex_);
+
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ if (!_initialized) {
+ RTC_LOG(LS_ERROR) << "Recording worker thread has not been started";
+ return -1;
+ }
+
+ RTC_DCHECK(capture_worker_thread_.empty());
+ capture_worker_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (CaptureWorkerThread()) {
+ }
+ },
+ "CaptureWorkerThread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ OSStatus err = noErr;
+ if (_twoDevices) {
+ WEBRTC_CA_RETURN_ON_ERR(
+ AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
+ } else if (!_playing) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
+ }
+
+ _recording = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::StopRecording() {
+ RTC_LOG(LS_INFO) << "StopRecording";
+ MutexLock lock(&mutex_);
+
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ OSStatus err = noErr;
+ int32_t captureDeviceIsAlive = _captureDeviceIsAlive;
+ if (_twoDevices && captureDeviceIsAlive == 1) {
+ // Recording side uses its own dedicated device and IOProc.
+ if (_recording) {
+ _recording = false;
+ _doStopRec = true; // Signal to io proc to stop audio device
+ mutex_.Unlock(); // Cannot be under lock, risk of deadlock
+ if (!_stopEventRec.Wait(2000)) {
+ MutexLock lockScoped(&mutex_);
+ RTC_LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
+ "We may have failed to detect a device removal.";
+ WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+ }
+ mutex_.Lock();
+ _doStopRec = false;
+ RTC_LOG(LS_INFO) << "Recording stopped (input device)";
+ } else if (_recIsInitialized) {
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+ RTC_LOG(LS_INFO) << "Recording uninitialized (input device)";
+ }
+ } else {
+ // We signal a stop for a shared device even when rendering has
+ // not yet ended. This is to ensure the IOProc will return early as
+ // intended (by checking `_recording`) before accessing
+ // resources we free below (e.g. the capture converter).
+ //
+ // In the case of a shared devcie, the IOProc will verify
+ // rendering has ended before stopping itself.
+ if (_recording && captureDeviceIsAlive == 1) {
+ _recording = false;
+ _doStop = true; // Signal to io proc to stop audio device
+ mutex_.Unlock(); // Cannot be under lock, risk of deadlock
+ if (!_stopEvent.Wait(2000)) {
+ MutexLock lockScoped(&mutex_);
+ RTC_LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
+ "We may have failed to detect a device removal.";
+ // We assume rendering on a shared device has stopped as well if
+ // the IOProc times out.
+ WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ }
+ mutex_.Lock();
+ _doStop = false;
+ RTC_LOG(LS_INFO) << "Recording stopped (shared device)";
+ } else if (_recIsInitialized && !_playing && !_playIsInitialized) {
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ RTC_LOG(LS_INFO) << "Recording uninitialized (shared device)";
+ }
+ }
+
+ // Setting this signal will allow the worker thread to be stopped.
+ _captureDeviceIsAlive = 0;
+
+ if (!capture_worker_thread_.empty()) {
+ mutex_.Unlock();
+ capture_worker_thread_.Finalize();
+ mutex_.Lock();
+ }
+
+ WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
+
+ // Remove listeners.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ _recIsInitialized = false;
+ _recording = false;
+
+ return 0;
+}
+
+bool AudioDeviceMac::RecordingIsInitialized() const {
+ return (_recIsInitialized);
+}
+
+bool AudioDeviceMac::Recording() const {
+ return (_recording);
+}
+
+bool AudioDeviceMac::PlayoutIsInitialized() const {
+ return (_playIsInitialized);
+}
+
+int32_t AudioDeviceMac::StartPlayout() {
+ RTC_LOG(LS_INFO) << "StartPlayout";
+ MutexLock lock(&mutex_);
+
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ RTC_DCHECK(render_worker_thread_.empty());
+ render_worker_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (RenderWorkerThread()) {
+ }
+ },
+ "RenderWorkerThread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ if (_twoDevices || !_recording) {
+ OSStatus err = noErr;
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
+ }
+ _playing = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::StopPlayout() {
+ RTC_LOG(LS_INFO) << "StopPlayout";
+ MutexLock lock(&mutex_);
+
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ OSStatus err = noErr;
+ int32_t renderDeviceIsAlive = _renderDeviceIsAlive;
+ if (_playing && renderDeviceIsAlive == 1) {
+ // We signal a stop for a shared device even when capturing has not
+ // yet ended. This is to ensure the IOProc will return early as
+ // intended (by checking `_playing`) before accessing resources we
+ // free below (e.g. the render converter).
+ //
+ // In the case of a shared device, the IOProc will verify capturing
+ // has ended before stopping itself.
+ _playing = false;
+ _doStop = true; // Signal to io proc to stop audio device
+ mutex_.Unlock(); // Cannot be under lock, risk of deadlock
+ if (!_stopEvent.Wait(2000)) {
+ MutexLock lockScoped(&mutex_);
+ RTC_LOG(LS_WARNING) << "Timed out stopping the render IOProc."
+ "We may have failed to detect a device removal.";
+
+ // We assume capturing on a shared device has stopped as well if the
+ // IOProc times out.
+ WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ }
+ mutex_.Lock();
+ _doStop = false;
+ RTC_LOG(LS_INFO) << "Playout stopped";
+ } else if (_twoDevices && _playIsInitialized) {
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ RTC_LOG(LS_INFO) << "Playout uninitialized (output device)";
+ } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ RTC_LOG(LS_INFO) << "Playout uninitialized (shared device)";
+ }
+
+ // Setting this signal will allow the worker thread to be stopped.
+ _renderDeviceIsAlive = 0;
+ if (!render_worker_thread_.empty()) {
+ mutex_.Unlock();
+ render_worker_thread_.Finalize();
+ mutex_.Lock();
+ }
+
+ WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
+
+ // Remove listeners.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0};
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ if (_macBookPro) {
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ propertyAddress.mSelector = kAudioDevicePropertyDataSource;
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
+ }
+ }
+
+ _playIsInitialized = false;
+ _playing = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const {
+ int32_t renderDelayUs = _renderDelayUs;
+ delayMS =
+ static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
+ return 0;
+}
+
+bool AudioDeviceMac::Playing() const {
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
+ AudioDeviceID scopedDeviceIds[],
+ const uint32_t deviceListLength) {
+ OSStatus err = noErr;
+
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ UInt32 size = 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
+ if (size == 0) {
+ RTC_LOG(LS_WARNING) << "No devices";
+ return 0;
+ }
+
+ UInt32 numberDevices = size / sizeof(AudioDeviceID);
+ const auto deviceIds = std::make_unique<AudioDeviceID[]>(numberDevices);
+ AudioBufferList* bufferList = NULL;
+ UInt32 numberScopedDevices = 0;
+
+ // First check if there is a default device and list it
+ UInt32 hardwareProperty = 0;
+ if (scope == kAudioDevicePropertyScopeOutput) {
+ hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
+ } else {
+ hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
+ }
+
+ AudioObjectPropertyAddress propertyAddressDefault = {
+ hardwareProperty, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+
+ AudioDeviceID usedID;
+ UInt32 uintSize = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &propertyAddressDefault, 0,
+ NULL, &uintSize, &usedID));
+ if (usedID != kAudioDeviceUnknown) {
+ scopedDeviceIds[numberScopedDevices] = usedID;
+ numberScopedDevices++;
+ } else {
+ RTC_LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown";
+ }
+
+ // Then list the rest of the devices
+ bool listOK = true;
+
+ WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &propertyAddress, 0, NULL, &size,
+ deviceIds.get()));
+ if (err != noErr) {
+ listOK = false;
+ } else {
+ propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
+ propertyAddress.mScope = scope;
+ propertyAddress.mElement = 0;
+ for (UInt32 i = 0; i < numberDevices; i++) {
+ // Check for input channels
+ WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(
+ deviceIds[i], &propertyAddress, 0, NULL, &size));
+ if (err == kAudioHardwareBadDeviceError) {
+ // This device doesn't actually exist; continue iterating.
+ continue;
+ } else if (err != noErr) {
+ listOK = false;
+ break;
+ }
+
+ bufferList = (AudioBufferList*)malloc(size);
+ WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
+ deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList));
+ if (err != noErr) {
+ listOK = false;
+ break;
+ }
+
+ if (bufferList->mNumberBuffers > 0) {
+ if (numberScopedDevices >= deviceListLength) {
+ RTC_LOG(LS_ERROR) << "Device list is not long enough";
+ listOK = false;
+ break;
+ }
+
+ scopedDeviceIds[numberScopedDevices] = deviceIds[i];
+ numberScopedDevices++;
+ }
+
+ free(bufferList);
+ bufferList = NULL;
+ } // for
+ }
+
+ if (!listOK) {
+ if (bufferList) {
+ free(bufferList);
+ bufferList = NULL;
+ }
+ return -1;
+ }
+
+ return numberScopedDevices;
+}
+
+int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
+ const uint16_t index,
+ rtc::ArrayView<char> name) {
+ OSStatus err = noErr;
+ AudioDeviceID deviceIds[MaxNumberDevices];
+
+ int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
+ if (numberDevices < 0) {
+ return -1;
+ } else if (numberDevices == 0) {
+ RTC_LOG(LS_ERROR) << "No devices";
+ return -1;
+ }
+
+ // If the number is below the number of devices, assume it's "WEBRTC ID"
+ // otherwise assume it's a CoreAudio ID
+ AudioDeviceID usedID;
+
+ // Check if there is a default device
+ bool isDefaultDevice = false;
+ if (index == 0) {
+ UInt32 hardwareProperty = 0;
+ if (scope == kAudioDevicePropertyScopeOutput) {
+ hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
+ } else {
+ hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
+ }
+ AudioObjectPropertyAddress propertyAddress = {
+ hardwareProperty, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ UInt32 size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
+ if (usedID == kAudioDeviceUnknown) {
+ RTC_LOG(LS_WARNING) << "GetDeviceName(): Default device unknown";
+ } else {
+ isDefaultDevice = true;
+ }
+ }
+
+ AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName,
+ scope, 0};
+
+ if (isDefaultDevice) {
+ std::array<char, kAdmMaxDeviceNameSize> devName;
+ UInt32 len = devName.size();
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ usedID, &propertyAddress, 0, NULL, &len, devName.data()));
+
+ rtc::SimpleStringBuilder ss(name);
+ ss.AppendFormat("default (%s)", devName.data());
+ } else {
+ if (index < numberDevices) {
+ usedID = deviceIds[index];
+ } else {
+ usedID = index;
+ }
+ UInt32 len = name.size();
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ usedID, &propertyAddress, 0, NULL, &len, name.data()));
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
+ AudioDeviceID& deviceId,
+ const bool isInput) {
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ AudioObjectPropertyScope deviceScope;
+ AudioObjectPropertySelector defaultDeviceSelector;
+ AudioDeviceID deviceIds[MaxNumberDevices];
+
+ if (isInput) {
+ deviceScope = kAudioDevicePropertyScopeInput;
+ defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
+ } else {
+ deviceScope = kAudioDevicePropertyScopeOutput;
+ defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ }
+
+ AudioObjectPropertyAddress propertyAddress = {
+ defaultDeviceSelector, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+
+ // Get the actual device IDs
+ int numberDevices =
+ GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices);
+ if (numberDevices < 0) {
+ return -1;
+ } else if (numberDevices == 0) {
+ RTC_LOG(LS_ERROR) << "InitDevice(): No devices";
+ return -1;
+ }
+
+ bool isDefaultDevice = false;
+ deviceId = kAudioDeviceUnknown;
+ if (userDeviceIndex == 0) {
+ // Try to use default system device
+ size = sizeof(AudioDeviceID);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
+ if (deviceId == kAudioDeviceUnknown) {
+ RTC_LOG(LS_WARNING) << "No default device exists";
+ } else {
+ isDefaultDevice = true;
+ }
+ }
+
+ if (!isDefaultDevice) {
+ deviceId = deviceIds[userDeviceIndex];
+ }
+
+ // Obtain device name and manufacturer for logging.
+ // Also use this as a test to ensure a user-set device ID is valid.
+ char devName[128];
+ char devManf[128];
+ memset(devName, 0, sizeof(devName));
+ memset(devManf, 0, sizeof(devManf));
+
+ propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
+ propertyAddress.mScope = deviceScope;
+ propertyAddress.mElement = 0;
+ size = sizeof(devName);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
+ 0, NULL, &size, devName));
+
+ propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
+ size = sizeof(devManf);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
+ 0, NULL, &size, devManf));
+
+ if (isInput) {
+ RTC_LOG(LS_INFO) << "Input device: " << devManf << " " << devName;
+ } else {
+ RTC_LOG(LS_INFO) << "Output device: " << devManf << " " << devName;
+ }
+
+ return 0;
+}
+
+OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
+ // Our preferred format to work with.
+ _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
+ _outDesiredFormat.mChannelsPerFrame = _playChannels;
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters.
+ _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+ }
+
+ _renderDelayOffsetSamples =
+ _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
+ _outDesiredFormat.mChannelsPerFrame;
+
+ _outDesiredFormat.mBytesPerPacket =
+ _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ // In uncompressed audio, a packet is one frame.
+ _outDesiredFormat.mFramesPerPacket = 1;
+ _outDesiredFormat.mBytesPerFrame =
+ _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
+
+ _outDesiredFormat.mFormatFlags =
+ kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
+ _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
+#endif
+ _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
+
+ OSStatus err = noErr;
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(
+ &_outDesiredFormat, &_outStreamFormat, &_renderConverter));
+
+ // Try to set buffer size to desired value set to 20ms.
+ const uint16_t kPlayBufDelayFixed = 20;
+ UInt32 bufByteCount = static_cast<UInt32>(
+ (_outStreamFormat.mSampleRate / 1000.0) * kPlayBufDelayFixed *
+ _outStreamFormat.mChannelsPerFrame * sizeof(Float32));
+ if (_outStreamFormat.mFramesPerPacket != 0) {
+ if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) {
+ bufByteCount = (static_cast<UInt32>(bufByteCount /
+ _outStreamFormat.mFramesPerPacket) +
+ 1) *
+ _outStreamFormat.mFramesPerPacket;
+ }
+ }
+
+ // Ensure the buffer size is within the range provided by the device.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
+ AudioValueRange range;
+ UInt32 size = sizeof(range);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &range));
+ if (range.mMinimum > bufByteCount) {
+ bufByteCount = range.mMinimum;
+ } else if (range.mMaximum < bufByteCount) {
+ bufByteCount = range.mMaximum;
+ }
+
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
+ size = sizeof(bufByteCount);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
+
+ // Get render device latency.
+ propertyAddress.mSelector = kAudioDevicePropertyLatency;
+ UInt32 latency = 0;
+ size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _renderLatencyUs =
+ static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
+
+ // Get render stream latency.
+ propertyAddress.mSelector = kAudioDevicePropertyStreams;
+ AudioStreamID stream = 0;
+ size = sizeof(AudioStreamID);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
+ propertyAddress.mSelector = kAudioStreamPropertyLatency;
+ size = sizeof(UInt32);
+ latency = 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _renderLatencyUs +=
+ static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
+
+ RTC_LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
+ << _renderDelayOffsetSamples
+ << ", _renderDelayUs=" << _renderDelayUs
+ << ", _renderLatencyUs=" << _renderLatencyUs;
+ return 0;
+}
+
+OSStatus AudioDeviceMac::objectListenerProc(
+ AudioObjectID objectId,
+ UInt32 numberAddresses,
+ const AudioObjectPropertyAddress addresses[],
+ void* clientData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
+ RTC_DCHECK(ptrThis != NULL);
+
+ ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
+
+ // AudioObjectPropertyListenerProc functions are supposed to return 0
+ return 0;
+}
+
+OSStatus AudioDeviceMac::implObjectListenerProc(
+ const AudioObjectID objectId,
+ const UInt32 numberAddresses,
+ const AudioObjectPropertyAddress addresses[]) {
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()";
+
+ for (UInt32 i = 0; i < numberAddresses; i++) {
+ if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
+ HandleDeviceChange();
+ } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) {
+ HandleStreamFormatChange(objectId, addresses[i]);
+ } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) {
+ HandleDataSourceChange(objectId, addresses[i]);
+ } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) {
+ HandleProcessorOverload(addresses[i]);
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::HandleDeviceChange() {
+ OSStatus err = noErr;
+
+ RTC_LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices";
+
+ // A device has changed. Check if our registered devices have been removed.
+ // Ensure the devices have been initialized, meaning the IDs are valid.
+ if (MicrophoneIsInitialized()) {
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0};
+ UInt32 deviceIsAlive = 1;
+ UInt32 size = sizeof(UInt32);
+ err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL,
+ &size, &deviceIsAlive);
+
+ if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
+ RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)";
+ _captureDeviceIsAlive = 0;
+ _mixerManager.CloseMicrophone();
+ } else if (err != noErr) {
+ logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
+ (const char*)&err);
+ return -1;
+ }
+ }
+
+ if (SpeakerIsInitialized()) {
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0};
+ UInt32 deviceIsAlive = 1;
+ UInt32 size = sizeof(UInt32);
+ err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL,
+ &size, &deviceIsAlive);
+
+ if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
+ RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)";
+ _renderDeviceIsAlive = 0;
+ _mixerManager.CloseSpeaker();
+ } else if (err != noErr) {
+ logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
+ (const char*)&err);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceMac::HandleStreamFormatChange(
+ const AudioObjectID objectId,
+ const AudioObjectPropertyAddress propertyAddress) {
+ OSStatus err = noErr;
+
+ RTC_LOG(LS_VERBOSE) << "Stream format changed";
+
+ if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
+ return 0;
+ }
+
+ // Get the new device format
+ AudioStreamBasicDescription streamFormat;
+ UInt32 size = sizeof(streamFormat);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
+
+ if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
+ logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
+ (const char*)&streamFormat.mFormatID);
+ return -1;
+ }
+
+ if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
+ RTC_LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = "
+ << streamFormat.mChannelsPerFrame << ")";
+ return -1;
+ }
+
+ if (_ptrAudioBuffer && streamFormat.mChannelsPerFrame != _recChannels) {
+ RTC_LOG(LS_ERROR) << "Changing channels not supported (mChannelsPerFrame = "
+ << streamFormat.mChannelsPerFrame << ")";
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Stream format:";
+ RTC_LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate
+ << ", mChannelsPerFrame = "
+ << streamFormat.mChannelsPerFrame;
+ RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket
+ << ", mFramesPerPacket = "
+ << streamFormat.mFramesPerPacket;
+ RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
+ << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
+ RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
+ logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
+
+ if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
+ const int io_block_size_samples = streamFormat.mChannelsPerFrame *
+ streamFormat.mSampleRate / 100 *
+ N_BLOCKS_IO;
+ if (io_block_size_samples > _captureBufSizeSamples) {
+ RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
+ << ") is larger than ring buffer ("
+ << _captureBufSizeSamples << ")";
+ return -1;
+ }
+
+ memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
+
+ if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
+ _inDesiredFormat.mChannelsPerFrame = 2;
+ } else {
+ // Disable stereo recording when we only have one channel on the device.
+ _inDesiredFormat.mChannelsPerFrame = 1;
+ _recChannels = 1;
+ RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
+ }
+
+ // Recreate the converter with the new format
+ // TODO(xians): make this thread safe
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
+ &_captureConverter));
+ } else {
+ memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
+
+ // Our preferred format to work with
+ if (_outStreamFormat.mChannelsPerFrame < 2) {
+ _playChannels = 1;
+ RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
+ }
+ WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
+ }
+ return 0;
+}
+
+int32_t AudioDeviceMac::HandleDataSourceChange(
+ const AudioObjectID objectId,
+ const AudioObjectPropertyAddress propertyAddress) {
+ OSStatus err = noErr;
+
+ if (_macBookPro &&
+ propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
+ RTC_LOG(LS_VERBOSE) << "Data source changed";
+
+ _macBookProPanRight = false;
+ UInt32 dataSource = 0;
+ UInt32 size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ objectId, &propertyAddress, 0, NULL, &size, &dataSource));
+ if (dataSource == 'ispk') {
+ _macBookProPanRight = true;
+ RTC_LOG(LS_VERBOSE)
+ << "MacBook Pro using internal speakers; stereo panning right";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
+ }
+ }
+
+ return 0;
+}
+int32_t AudioDeviceMac::HandleProcessorOverload(
+ const AudioObjectPropertyAddress propertyAddress) {
+ // TODO(xians): we probably want to notify the user in some way of the
+ // overload. However, the Windows interpretations of these errors seem to
+ // be more severe than what ProcessorOverload is thrown for.
+ //
+ // We don't log the notification, as it's sent from the HAL's IO thread. We
+ // don't want to slow it down even further.
+ if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
+ // RTC_LOG(LS_WARNING) << "Capture processor // overload";
+ //_callback->ProblemIsReported(
+ // SndCardStreamObserver::ERecordingProblem);
+ } else {
+ // RTC_LOG(LS_WARNING) << "Render processor overload";
+ //_callback->ProblemIsReported(
+ // SndCardStreamObserver::EPlaybackProblem);
+ }
+
+ return 0;
+}
+
+// ============================================================================
+// Thread Methods
+// ============================================================================
+
+OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID,
+ const AudioTimeStamp*,
+ const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime,
+ void* clientData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
+ RTC_DCHECK(ptrThis != NULL);
+
+ ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
+
+ // AudioDeviceIOProc functions are supposed to return 0
+ return 0;
+}
+
+OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription**,
+ void* userData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData;
+ RTC_DCHECK(ptrThis != NULL);
+
+ return ptrThis->implOutConverterProc(numberDataPackets, data);
+}
+
+OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID,
+ const AudioTimeStamp*,
+ const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList*,
+ const AudioTimeStamp*,
+ void* clientData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
+ RTC_DCHECK(ptrThis != NULL);
+
+ ptrThis->implInDeviceIOProc(inputData, inputTime);
+
+ // AudioDeviceIOProc functions are supposed to return 0
+ return 0;
+}
+
+OSStatus AudioDeviceMac::inConverterProc(
+ AudioConverterRef,
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription** /*dataPacketDescription*/,
+ void* userData) {
+ AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData);
+ RTC_DCHECK(ptrThis != NULL);
+
+ return ptrThis->implInConverterProc(numberDataPackets, data);
+}
+
+OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime) {
+ OSStatus err = noErr;
+ UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
+ UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
+
+ if (!_twoDevices && _recording) {
+ implInDeviceIOProc(inputData, inputTime);
+ }
+
+ // Check if we should close down audio device
+ // Double-checked locking optimization to remove locking overhead
+ if (_doStop) {
+ MutexLock lock(&mutex_);
+ if (_doStop) {
+ if (_twoDevices || (!_recording && !_playing)) {
+ // In the case of a shared device, the single driving ioProc
+ // is stopped here
+ WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ if (err == noErr) {
+ RTC_LOG(LS_VERBOSE) << "Playout or shared device stopped";
+ }
+ }
+
+ _doStop = false;
+ _stopEvent.Set();
+ return 0;
+ }
+ }
+
+ if (!_playing) {
+ // This can be the case when a shared device is capturing but not
+ // rendering. We allow the checks above before returning to avoid a
+ // timeout when capturing is stopped.
+ return 0;
+ }
+
+ RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
+ UInt32 size =
+ outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame;
+
+ // TODO(xians): signal an error somehow?
+ err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
+ this, &size, outputData, NULL);
+ if (err != noErr) {
+ if (err == 1) {
+ // This is our own error.
+ RTC_LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
+ return 1;
+ } else {
+ logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
+ (const char*)&err);
+ return 1;
+ }
+ }
+
+ ring_buffer_size_t bufSizeSamples =
+ PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
+
+ int32_t renderDelayUs =
+ static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5);
+ renderDelayUs += static_cast<int32_t>(
+ (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame /
+ _outDesiredFormat.mSampleRate +
+ 0.5);
+
+ _renderDelayUs = renderDelayUs;
+
+ return 0;
+}
+
+OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data) {
+ RTC_DCHECK(data->mNumberBuffers == 1);
+ ring_buffer_size_t numSamples =
+ *numberDataPackets * _outDesiredFormat.mChannelsPerFrame;
+
+ data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
+ // Always give the converter as much as it wants, zero padding as required.
+ data->mBuffers->mDataByteSize =
+ *numberDataPackets * _outDesiredFormat.mBytesPerPacket;
+ data->mBuffers->mData = _renderConvertData;
+ memset(_renderConvertData, 0, sizeof(_renderConvertData));
+
+ PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
+
+ kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
+ return 1;
+ }
+
+ return 0;
+}
+
+OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime) {
+ OSStatus err = noErr;
+ UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
+ UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
+
+ // Check if we should close down audio device
+ // Double-checked locking optimization to remove locking overhead
+ if (_doStopRec) {
+ MutexLock lock(&mutex_);
+ if (_doStopRec) {
+ // This will be signalled only when a shared device is not in use.
+ WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+ if (err == noErr) {
+ RTC_LOG(LS_VERBOSE) << "Recording device stopped";
+ }
+
+ _doStopRec = false;
+ _stopEventRec.Set();
+ return 0;
+ }
+ }
+
+ if (!_recording) {
+ // Allow above checks to avoid a timeout on stopping capture.
+ return 0;
+ }
+
+ ring_buffer_size_t bufSizeSamples =
+ PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
+
+ int32_t captureDelayUs =
+ static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5);
+ captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) /
+ _inStreamFormat.mChannelsPerFrame /
+ _inStreamFormat.mSampleRate +
+ 0.5);
+
+ _captureDelayUs = captureDelayUs;
+
+ RTC_DCHECK(inputData->mNumberBuffers == 1);
+ ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize *
+ _inStreamFormat.mChannelsPerFrame /
+ _inStreamFormat.mBytesPerPacket;
+ PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
+ numSamples);
+
+ kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
+ }
+
+ return err;
+}
+
+OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data) {
+ RTC_DCHECK(data->mNumberBuffers == 1);
+ ring_buffer_size_t numSamples =
+ *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
+
+ while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) {
+ mach_timespec_t timeout;
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = TIMER_PERIOD_MS;
+
+ kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
+ if (kernErr == KERN_OPERATION_TIMED_OUT) {
+ int32_t signal = _captureDeviceIsAlive;
+ if (signal == 0) {
+ // The capture device is no longer alive; stop the worker thread.
+ *numberDataPackets = 0;
+ return 1;
+ }
+ } else if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr;
+ }
+ }
+
+ // Pass the read pointer directly to the converter to avoid a memcpy.
+ void* dummyPtr;
+ ring_buffer_size_t dummySize;
+ PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
+ &data->mBuffers->mData, &numSamples,
+ &dummyPtr, &dummySize);
+ PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
+
+ data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
+ *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
+ data->mBuffers->mDataByteSize =
+ *numberDataPackets * _inStreamFormat.mBytesPerPacket;
+
+ return 0;
+}
+
+bool AudioDeviceMac::RenderWorkerThread() {
+ ring_buffer_size_t numSamples =
+ ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
+ while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) -
+ _renderDelayOffsetSamples <
+ numSamples) {
+ mach_timespec_t timeout;
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = TIMER_PERIOD_MS;
+
+ kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
+ if (kernErr == KERN_OPERATION_TIMED_OUT) {
+ int32_t signal = _renderDeviceIsAlive;
+ if (signal == 0) {
+ // The render device is no longer alive; stop the worker thread.
+ return false;
+ }
+ } else if (kernErr != KERN_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr;
+ }
+ }
+
+ int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
+
+ if (!_ptrAudioBuffer) {
+ RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
+ return false;
+ }
+
+ // Ask for new PCM data to be played out using the AudioDeviceBuffer.
+ uint32_t nSamples =
+ _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
+
+ nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
+ if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
+ RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")";
+ }
+
+ uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
+
+ SInt16* pPlayBuffer = (SInt16*)&playBuffer;
+ if (_macBookProPanRight && (_playChannels == 2)) {
+ // Mix entirely into the right channel and zero the left channel.
+ SInt32 sampleInt32 = 0;
+ for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) {
+ sampleInt32 = pPlayBuffer[sampleIdx];
+ sampleInt32 += pPlayBuffer[sampleIdx + 1];
+ sampleInt32 /= 2;
+
+ if (sampleInt32 > 32767) {
+ sampleInt32 = 32767;
+ } else if (sampleInt32 < -32768) {
+ sampleInt32 = -32768;
+ }
+
+ pPlayBuffer[sampleIdx] = 0;
+ pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32);
+ }
+ }
+
+ PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
+
+ return true;
+}
+
+bool AudioDeviceMac::CaptureWorkerThread() {
+ OSStatus err = noErr;
+ UInt32 noRecSamples =
+ ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame;
+ SInt16 recordBuffer[noRecSamples];
+ UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
+
+ AudioBufferList engineBuffer;
+ engineBuffer.mNumberBuffers = 1; // Interleaved channels.
+ engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
+ engineBuffer.mBuffers->mDataByteSize =
+ _inDesiredFormat.mBytesPerPacket * noRecSamples;
+ engineBuffer.mBuffers->mData = recordBuffer;
+
+ err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
+ this, &size, &engineBuffer, NULL);
+ if (err != noErr) {
+ if (err == 1) {
+ // This is our own error.
+ return false;
+ } else {
+ logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
+ (const char*)&err);
+ return false;
+ }
+ }
+
+ // TODO(xians): what if the returned size is incorrect?
+ if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
+ int32_t msecOnPlaySide;
+ int32_t msecOnRecordSide;
+
+ int32_t captureDelayUs = _captureDelayUs;
+ int32_t renderDelayUs = _renderDelayUs;
+
+ msecOnPlaySide =
+ static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
+ msecOnRecordSide =
+ static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
+
+ if (!_ptrAudioBuffer) {
+ RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
+ return false;
+ }
+
+ // store the recorded buffer (no action will be taken if the
+ // #recorded samples is not a full buffer)
+ _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size);
+ _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide);
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+ // deliver recorded samples at specified sample rate, mic level etc.
+ // to the observer using callback
+ _ptrAudioBuffer->DeliverRecordedData();
+ }
+
+ return true;
+}
+
+bool AudioDeviceMac::KeyPressed() {
+ bool key_down = false;
+ // Loop through all Mac virtual key constant values.
+ for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_);
+ ++key_index) {
+ bool keyState =
+ CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index);
+ // A false -> true change in keymap means a key is pressed.
+ key_down |= (keyState && !prev_key_state_[key_index]);
+ // Save current state.
+ prev_key_state_[key_index] = keyState;
+ }
+ return key_down;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h
new file mode 100644
index 0000000000..bb06395d03
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_MAC_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_MAC_H_
+
+#include <AudioToolbox/AudioConverter.h>
+#include <CoreAudio/CoreAudio.h>
+#include <mach/semaphore.h>
+
+#include <atomic>
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/mac/audio_mixer_manager_mac.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+struct PaUtilRingBuffer;
+
+namespace webrtc {
+
+const uint32_t N_REC_SAMPLES_PER_SEC = 48000;
+const uint32_t N_PLAY_SAMPLES_PER_SEC = 48000;
+
+const uint32_t N_REC_CHANNELS = 1; // default is mono recording
+const uint32_t N_PLAY_CHANNELS = 2; // default is stereo playout
+const uint32_t N_DEVICE_CHANNELS = 64;
+
+const int kBufferSizeMs = 10;
+
+const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES =
+ N_REC_SAMPLES_PER_SEC * kBufferSizeMs / 1000;
+const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES =
+ N_PLAY_SAMPLES_PER_SEC * kBufferSizeMs / 1000;
+
+const int N_BLOCKS_IO = 2;
+const int N_BUFFERS_IN = 2; // Must be at least N_BLOCKS_IO.
+const int N_BUFFERS_OUT = 3; // Must be at least N_BLOCKS_IO.
+
+const uint32_t TIMER_PERIOD_MS = 2 * 10 * N_BLOCKS_IO * 1000000;
+
+const uint32_t REC_BUF_SIZE_IN_SAMPLES =
+ ENGINE_REC_BUF_SIZE_IN_SAMPLES * N_DEVICE_CHANNELS * N_BUFFERS_IN;
+const uint32_t PLAY_BUF_SIZE_IN_SAMPLES =
+ ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * N_PLAY_CHANNELS * N_BUFFERS_OUT;
+
+const int kGetMicVolumeIntervalMs = 1000;
+
+class AudioDeviceMac : public AudioDeviceGeneric {
+ public:
+ AudioDeviceMac();
+ ~AudioDeviceMac();
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const;
+
+ // Main initializaton and termination
+ virtual InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Initialized() const;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device);
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available);
+ virtual int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool& available);
+ virtual int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool RecordingIsInitialized() const;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Playing() const;
+ virtual int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Recording() const;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool MicrophoneIsInitialized() const;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t& volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool& enabled) const;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool& enabled) const;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool& enabled) const;
+
+ // Delay information and control
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
+
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ private:
+ int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ virtual int32_t MicrophoneIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t MicrophoneIsAvailableLocked(bool& available)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ virtual int32_t SpeakerIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SpeakerIsAvailableLocked(bool& available)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ static void AtomicSet32(int32_t* theValue, int32_t newValue);
+ static int32_t AtomicGet32(int32_t* theValue);
+
+ static void logCAMsg(rtc::LoggingSeverity sev,
+ const char* msg,
+ const char* err);
+
+ int32_t GetNumberDevices(AudioObjectPropertyScope scope,
+ AudioDeviceID scopedDeviceIds[],
+ uint32_t deviceListLength);
+
+ int32_t GetDeviceName(AudioObjectPropertyScope scope,
+ uint16_t index,
+ rtc::ArrayView<char> name);
+
+ int32_t InitDevice(uint16_t userDeviceIndex,
+ AudioDeviceID& deviceId,
+ bool isInput);
+
+ // Always work with our preferred playout format inside VoE.
+ // Then convert the output to the OS setting using an AudioConverter.
+ OSStatus SetDesiredPlayoutFormat();
+
+ static OSStatus objectListenerProc(
+ AudioObjectID objectId,
+ UInt32 numberAddresses,
+ const AudioObjectPropertyAddress addresses[],
+ void* clientData);
+
+ OSStatus implObjectListenerProc(AudioObjectID objectId,
+ UInt32 numberAddresses,
+ const AudioObjectPropertyAddress addresses[]);
+
+ int32_t HandleDeviceChange();
+
+ int32_t HandleStreamFormatChange(AudioObjectID objectId,
+ AudioObjectPropertyAddress propertyAddress);
+
+ int32_t HandleDataSourceChange(AudioObjectID objectId,
+ AudioObjectPropertyAddress propertyAddress);
+
+ int32_t HandleProcessorOverload(AudioObjectPropertyAddress propertyAddress);
+
+ static OSStatus deviceIOProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime,
+ void* clientData);
+
+ static OSStatus outConverterProc(
+ AudioConverterRef audioConverter,
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription** dataPacketDescription,
+ void* userData);
+
+ static OSStatus inDeviceIOProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime,
+ void* clientData);
+
+ static OSStatus inConverterProc(
+ AudioConverterRef audioConverter,
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription** dataPacketDescription,
+ void* inUserData);
+
+ OSStatus implDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime)
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ OSStatus implOutConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data);
+
+ OSStatus implInDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime)
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ OSStatus implInConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data);
+
+ static void RunCapture(void*);
+ static void RunRender(void*);
+ bool CaptureWorkerThread();
+ bool RenderWorkerThread();
+
+ bool KeyPressed();
+
+ AudioDeviceBuffer* _ptrAudioBuffer;
+
+ Mutex mutex_;
+
+ rtc::Event _stopEventRec;
+ rtc::Event _stopEvent;
+
+ // Only valid/running between calls to StartRecording and StopRecording.
+ rtc::PlatformThread capture_worker_thread_;
+
+ // Only valid/running between calls to StartPlayout and StopPlayout.
+ rtc::PlatformThread render_worker_thread_;
+
+ AudioMixerManagerMac _mixerManager;
+
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+ AudioDeviceID _inputDeviceID;
+ AudioDeviceID _outputDeviceID;
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050
+ AudioDeviceIOProcID _inDeviceIOProcID;
+ AudioDeviceIOProcID _deviceIOProcID;
+#endif
+ bool _inputDeviceIsSpecified;
+ bool _outputDeviceIsSpecified;
+
+ uint8_t _recChannels;
+ uint8_t _playChannels;
+
+ Float32* _captureBufData;
+ SInt16* _renderBufData;
+
+ SInt16 _renderConvertData[PLAY_BUF_SIZE_IN_SAMPLES];
+
+ bool _initialized;
+ bool _isShutDown;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+
+ // Atomically set varaibles
+ std::atomic<int32_t> _renderDeviceIsAlive;
+ std::atomic<int32_t> _captureDeviceIsAlive;
+
+ bool _twoDevices;
+ bool _doStop; // For play if not shared device or play+rec if shared device
+ bool _doStopRec; // For rec if not shared device
+ bool _macBookPro;
+ bool _macBookProPanRight;
+
+ AudioConverterRef _captureConverter;
+ AudioConverterRef _renderConverter;
+
+ AudioStreamBasicDescription _outStreamFormat;
+ AudioStreamBasicDescription _outDesiredFormat;
+ AudioStreamBasicDescription _inStreamFormat;
+ AudioStreamBasicDescription _inDesiredFormat;
+
+ uint32_t _captureLatencyUs;
+ uint32_t _renderLatencyUs;
+
+ // Atomically set variables
+ mutable std::atomic<int32_t> _captureDelayUs;
+ mutable std::atomic<int32_t> _renderDelayUs;
+
+ int32_t _renderDelayOffsetSamples;
+
+ PaUtilRingBuffer* _paCaptureBuffer;
+ PaUtilRingBuffer* _paRenderBuffer;
+
+ semaphore_t _renderSemaphore;
+ semaphore_t _captureSemaphore;
+
+ int _captureBufSizeSamples;
+ int _renderBufSizeSamples;
+
+ // Typing detection
+ // 0x5c is key "9", after that comes function keys.
+ bool prev_key_state_[0x5d];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_AUDIO_DEVICE_MAC_H_
diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc
new file mode 100644
index 0000000000..942e7db3b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc
@@ -0,0 +1,924 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/mac/audio_mixer_manager_mac.h"
+
+#include <unistd.h> // getpid()
+
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+#define WEBRTC_CA_RETURN_ON_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+ return -1; \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
+ } \
+ } while (0)
+
+AudioMixerManagerMac::AudioMixerManagerMac()
+ : _inputDeviceID(kAudioObjectUnknown),
+ _outputDeviceID(kAudioObjectUnknown),
+ _noInputChannels(0),
+ _noOutputChannels(0) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+}
+
+AudioMixerManagerMac::~AudioMixerManagerMac() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+ Close();
+}
+
+// ============================================================================
+// PUBLIC METHODS
+// ============================================================================
+
+int32_t AudioMixerManagerMac::Close() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ MutexLock lock(&mutex_);
+
+ CloseSpeakerLocked();
+ CloseMicrophoneLocked();
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::CloseSpeaker() {
+ MutexLock lock(&mutex_);
+ return CloseSpeakerLocked();
+}
+
+int32_t AudioMixerManagerMac::CloseSpeakerLocked() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ _outputDeviceID = kAudioObjectUnknown;
+ _noOutputChannels = 0;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::CloseMicrophone() {
+ MutexLock lock(&mutex_);
+ return CloseMicrophoneLocked();
+}
+
+int32_t AudioMixerManagerMac::CloseMicrophoneLocked() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ _inputDeviceID = kAudioObjectUnknown;
+ _noInputChannels = 0;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::OpenSpeaker(AudioDeviceID deviceID) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::OpenSpeaker(id=" << deviceID
+ << ")";
+
+ MutexLock lock(&mutex_);
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ pid_t hogPid = -1;
+
+ _outputDeviceID = deviceID;
+
+ // Check which process, if any, has hogged the device.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeOutput, 0};
+
+ // First, does it have the property? Aggregate devices don't.
+ if (AudioObjectHasProperty(_outputDeviceID, &propertyAddress)) {
+ size = sizeof(hogPid);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid));
+
+ if (hogPid == -1) {
+ RTC_LOG(LS_VERBOSE) << "No process has hogged the output device";
+ }
+ // getpid() is apparently "always successful"
+ else if (hogPid == getpid()) {
+ RTC_LOG(LS_VERBOSE) << "Our process has hogged the output device";
+ } else {
+ RTC_LOG(LS_WARNING) << "Another process (pid = "
+ << static_cast<int>(hogPid)
+ << ") has hogged the output device";
+
+ return -1;
+ }
+ }
+
+ // get number of channels from stream format
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+
+ // Get the stream format, to be able to read the number of channels.
+ AudioStreamBasicDescription streamFormat;
+ size = sizeof(AudioStreamBasicDescription);
+ memset(&streamFormat, 0, size);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat));
+
+ _noOutputChannels = streamFormat.mChannelsPerFrame;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::OpenMicrophone(id=" << deviceID
+ << ")";
+
+ MutexLock lock(&mutex_);
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ pid_t hogPid = -1;
+
+ _inputDeviceID = deviceID;
+
+ // Check which process, if any, has hogged the device.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeInput, 0};
+ size = sizeof(hogPid);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid));
+ if (hogPid == -1) {
+ RTC_LOG(LS_VERBOSE) << "No process has hogged the input device";
+ }
+ // getpid() is apparently "always successful"
+ else if (hogPid == getpid()) {
+ RTC_LOG(LS_VERBOSE) << "Our process has hogged the input device";
+ } else {
+ RTC_LOG(LS_WARNING) << "Another process (pid = " << static_cast<int>(hogPid)
+ << ") has hogged the input device";
+
+ return -1;
+ }
+
+ // get number of channels from stream format
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+
+ // Get the stream format, to be able to read the number of channels.
+ AudioStreamBasicDescription streamFormat;
+ size = sizeof(AudioStreamBasicDescription);
+ memset(&streamFormat, 0, size);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat));
+
+ _noInputChannels = streamFormat.mChannelsPerFrame;
+
+ return 0;
+}
+
+bool AudioMixerManagerMac::SpeakerIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_outputDeviceID != kAudioObjectUnknown);
+}
+
+bool AudioMixerManagerMac::MicrophoneIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_inputDeviceID != kAudioObjectUnknown);
+}
+
+int32_t AudioMixerManagerMac::SetSpeakerVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetSpeakerVolume(volume="
+ << volume << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ bool success = false;
+
+ // volume range is 0.0 - 1.0, convert from 0 -255
+ const Float32 vol = (Float32)(volume / 255.0);
+
+ RTC_DCHECK(vol <= 1.0 && vol >= 0.0);
+
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &vol));
+
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &vol));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ RTC_LOG(LS_WARNING) << "Unable to set a volume on any output channel";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerVolume(uint32_t& volume) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ Float32 channelVol = 0;
+ Float32 vol = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &vol));
+
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(vol * 255 + 0.5);
+ } else {
+ // Otherwise get the average volume across channels.
+ vol = 0;
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ channelVol = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelVol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol));
+
+ vol += channelVol;
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ RTC_LOG(LS_WARNING) << "Unable to get a volume on any channel";
+ return -1;
+ }
+
+ RTC_DCHECK_GT(channels, 0);
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(255 * vol / channels + 0.5);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SpeakerVolume() => vol=" << vol;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ maxVolume = 255;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MinSpeakerVolume(uint32_t& minVolume) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ minVolume = 0;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerVolumeIsAvailable(bool& available) {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ available = true;
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err != noErr || !isSettable) {
+ available = false;
+ RTC_LOG(LS_WARNING) << "Volume cannot be set for output channel " << i
+ << ", err=" << err;
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerMuteIsAvailable(bool& available) {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ available = true;
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err != noErr || !isSettable) {
+ available = false;
+ RTC_LOG(LS_WARNING) << "Mute cannot be set for output channel " << i
+ << ", err=" << err;
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SetSpeakerMute(bool enable) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetSpeakerMute(enable="
+ << enable << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ UInt32 mute = enable ? 1 : 0;
+ bool success = false;
+
+ // Does the render device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &mute));
+
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &mute));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ RTC_LOG(LS_WARNING) << "Unable to set mute on any input channel";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerMute(bool& enabled) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ UInt32 channelMuted = 0;
+ UInt32 muted = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(muted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &muted));
+
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ } else {
+ // Otherwise check if all channels are muted.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ muted = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelMuted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted));
+
+ muted = (muted && channelMuted);
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ RTC_LOG(LS_WARNING) << "Unable to get mute for any channel";
+ return -1;
+ }
+
+ RTC_DCHECK_GT(channels, 0);
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SpeakerMute() => enabled="
+ << enabled;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::StereoPlayoutIsAvailable(bool& available) {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ available = (_noOutputChannels == 2);
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::StereoRecordingIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ available = (_noInputChannels == 2);
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneMuteIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ available = true;
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err != noErr || !isSettable) {
+ available = false;
+ RTC_LOG(LS_WARNING) << "Mute cannot be set for output channel " << i
+ << ", err=" << err;
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SetMicrophoneMute(bool enable) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetMicrophoneMute(enable="
+ << enable << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ UInt32 mute = enable ? 1 : 0;
+ bool success = false;
+
+ // Does the capture device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &mute));
+
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &mute));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ RTC_LOG(LS_WARNING) << "Unable to set mute on any input channel";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneMute(bool& enabled) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ UInt32 channelMuted = 0;
+ UInt32 muted = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(muted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &muted));
+
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ } else {
+ // Otherwise check if all channels are muted.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ muted = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelMuted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted));
+
+ muted = (muted && channelMuted);
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ RTC_LOG(LS_WARNING) << "Unable to get mute for any channel";
+ return -1;
+ }
+
+ RTC_DCHECK_GT(channels, 0);
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::MicrophoneMute() => enabled="
+ << enabled;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneVolumeIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ available = true;
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err != noErr || !isSettable) {
+ available = false;
+ RTC_LOG(LS_WARNING) << "Volume cannot be set for input channel " << i
+ << ", err=" << err;
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetMicrophoneVolume(volume="
+ << volume << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ bool success = false;
+
+ // volume range is 0.0 - 1.0, convert from 0 - 255
+ const Float32 vol = (Float32)(volume / 255.0);
+
+ RTC_DCHECK(vol <= 1.0 && vol >= 0.0);
+
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &vol));
+
+ return 0;
+ }
+
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &vol));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ RTC_LOG(LS_WARNING) << "Unable to set a level on any input channel";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneVolume(uint32_t& volume) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ Float32 channelVol = 0;
+ Float32 volFloat32 = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(volFloat32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &volFloat32));
+
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(volFloat32 * 255 + 0.5);
+ } else {
+ // Otherwise get the average volume across channels.
+ volFloat32 = 0;
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ channelVol = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelVol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol));
+
+ volFloat32 += channelVol;
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ RTC_LOG(LS_WARNING) << "Unable to get a level on any channel";
+ return -1;
+ }
+
+ RTC_DCHECK_GT(channels, 0);
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(255 * volFloat32 / channels + 0.5);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::MicrophoneVolume() => vol="
+ << volume;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ maxVolume = 255;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MinMicrophoneVolume(uint32_t& minVolume) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ RTC_LOG(LS_WARNING) << "device ID has not been set";
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 10
+ minVolume = 0;
+
+ return 0;
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+// CoreAudio errors are best interpreted as four character strings.
+void AudioMixerManagerMac::logCAMsg(const rtc::LoggingSeverity sev,
+ const char* msg,
+ const char* err) {
+ RTC_DCHECK(msg != NULL);
+ RTC_DCHECK(err != NULL);
+ RTC_DCHECK(sev == rtc::LS_ERROR || sev == rtc::LS_WARNING);
+
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
+ switch (sev) {
+ case rtc::LS_ERROR:
+ RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3];
+ break;
+ case rtc::LS_WARNING:
+ RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2]
+ << err[3];
+ break;
+ default:
+ break;
+ }
+#else
+ // We need to flip the characters in this case.
+ switch (sev) {
+ case rtc::LS_ERROR:
+ RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
+ break;
+ case rtc::LS_WARNING:
+ RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1]
+ << err[0];
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+} // namespace webrtc
+// EOF
diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h
new file mode 100644
index 0000000000..0ccab4879b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_MAC_H_
+#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_MAC_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class AudioMixerManagerMac {
+ public:
+ int32_t OpenSpeaker(AudioDeviceID deviceID) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t OpenMicrophone(AudioDeviceID deviceID) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SpeakerVolume(uint32_t& volume) const;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ int32_t SpeakerVolumeIsAvailable(bool& available);
+ int32_t SpeakerMuteIsAvailable(bool& available);
+ int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SpeakerMute(bool& enabled) const;
+ int32_t StereoPlayoutIsAvailable(bool& available);
+ int32_t StereoRecordingIsAvailable(bool& available);
+ int32_t MicrophoneMuteIsAvailable(bool& available);
+ int32_t SetMicrophoneMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t MicrophoneMute(bool& enabled) const;
+ int32_t MicrophoneVolumeIsAvailable(bool& available);
+ int32_t SetMicrophoneVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t MicrophoneVolume(uint32_t& volume) const;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ int32_t Close() RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t CloseSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t CloseMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
+ bool SpeakerIsInitialized() const;
+ bool MicrophoneIsInitialized() const;
+
+ public:
+ AudioMixerManagerMac();
+ ~AudioMixerManagerMac();
+
+ private:
+ int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ static void logCAMsg(rtc::LoggingSeverity sev,
+ const char* msg,
+ const char* err);
+
+ private:
+ Mutex mutex_;
+
+ AudioDeviceID _inputDeviceID;
+ AudioDeviceID _outputDeviceID;
+
+ uint16_t _noInputChannels;
+ uint16_t _noOutputChannels;
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_MIXER_MAC_H
diff --git a/third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h b/third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h
new file mode 100644
index 0000000000..b0f54c20ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
+#define MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockAudioDeviceBuffer : public AudioDeviceBuffer {
+ public:
+ using AudioDeviceBuffer::AudioDeviceBuffer;
+ virtual ~MockAudioDeviceBuffer() {}
+ MOCK_METHOD(int32_t, RequestPlayoutData, (size_t nSamples), (override));
+ MOCK_METHOD(int32_t, GetPlayoutData, (void* audioBuffer), (override));
+ MOCK_METHOD(int32_t,
+ SetRecordedBuffer,
+ (const void* audioBuffer, size_t nSamples),
+ (override));
+ MOCK_METHOD(void, SetVQEData, (int playDelayMS, int recDelayMS), (override));
+ MOCK_METHOD(int32_t, DeliverRecordedData, (), (override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.cc b/third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.cc
new file mode 100644
index 0000000000..a7bad3af6e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(_MSC_VER)
+#include <windows.h>
+#endif
+
+#include "single_rw_fifo.h"
+
+#include <assert.h>
+
+static int UpdatePos(int pos, int capacity) {
+ return (pos + 1) % capacity;
+}
+
+namespace webrtc {
+
+namespace subtle {
+
+// Start with compiler support, then processor-specific hacks
+#if defined(__GNUC__) || defined(__clang__)
+// Available on GCC and clang - others?
+inline void MemoryBarrier() {
+ __sync_synchronize();
+}
+
+#elif defined(_MSC_VER)
+inline void MemoryBarrier() {
+ ::MemoryBarrier();
+}
+
+#elif defined(__aarch64__)
+// From http://http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm64_gcc.h
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ("dmb ish" ::: "memory");
+}
+
+#elif defined(__ARMEL__)
+// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
+inline void MemoryBarrier() {
+ // Note: This is a function call, which is also an implicit compiler barrier.
+ typedef void (*KernelMemoryBarrierFunc)();
+ ((KernelMemoryBarrierFunc)0xffff0fa0)();
+}
+
+#elif defined(__x86_64__) || defined (__i386__)
+// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_x86_gcc.h
+// mfence exists on x64 and x86 platforms containing SSE2.
+// x86 platforms that don't have SSE2 will crash with SIGILL.
+// If this code needs to run on such platforms in the future,
+// add runtime CPU detection here.
+inline void MemoryBarrier() {
+ __asm__ __volatile__("mfence" : : : "memory");
+}
+
+#elif defined(__MIPSEL__)
+// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_mips_gcc.h
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory");
+}
+
+#else
+#error Add an implementation of MemoryBarrier() for this platform!
+#endif
+
+} // namespace subtle
+
+SingleRwFifo::SingleRwFifo(int capacity)
+ : queue_(new int8_t*[capacity]),
+ capacity_(capacity),
+ size_(0),
+ read_pos_(0),
+ write_pos_(0)
+{
+}
+
+SingleRwFifo::~SingleRwFifo() {
+}
+
+void SingleRwFifo::Push(int8_t* mem) {
+ assert(mem);
+
+ // Ensure that there is space for the new data in the FIFO.
+ // Note there is only one writer meaning that the other thread is guaranteed
+ // only to decrease the size.
+ const int free_slots = capacity() - size();
+ if (free_slots <= 0) {
+ // Size can be queried outside of the Push function. The caller is assumed
+ // to ensure that Push will be successful before calling it.
+ assert(false);
+ return;
+ }
+ queue_.get()[write_pos_] = mem;
+ // Memory barrier ensures that |size_| is updated after the size has changed.
+ subtle::MemoryBarrier();
+ ++size_;
+ write_pos_ = UpdatePos(write_pos_, capacity());
+}
+
+int8_t* SingleRwFifo::Pop() {
+ int8_t* ret_val = NULL;
+ if (size() <= 0) {
+ // Size can be queried outside of the Pop function. The caller is assumed
+ // to ensure that Pop will be successfull before calling it.
+ assert(false);
+ return ret_val;
+ }
+ ret_val = queue_.get()[read_pos_];
+ // Memory barrier ensures that |size_| is updated after the size has changed.
+ subtle::MemoryBarrier();
+ --size_;
+ read_pos_ = UpdatePos(read_pos_, capacity());
+ return ret_val;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.h b/third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.h
new file mode 100644
index 0000000000..0db4815fa5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/opensl/single_rw_fifo.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
+
+namespace webrtc {
+
+// Implements a lock-free FIFO losely based on
+// http://src.chromium.org/viewvc/chrome/trunk/src/media/base/audio_fifo.cc
+// Note that this class assumes there is one producer (writer) and one
+// consumer (reader) thread.
+class SingleRwFifo {
+ public:
+ explicit SingleRwFifo(int capacity);
+ ~SingleRwFifo();
+
+ void Push(int8_t* mem);
+ int8_t* Pop();
+
+ void Clear();
+
+ int size() { return size_; }
+ int capacity() const { return capacity_; }
+
+ private:
+ std::unique_ptr<int8_t* []> queue_;
+ int capacity_;
+
+ std::atomic<int32_t> size_;
+
+ int read_pos_;
+ int write_pos_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc
new file mode 100644
index 0000000000..1e3a94edf6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc
@@ -0,0 +1,4178 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#pragma warning(disable : 4995) // name was marked as #pragma deprecated
+
+#if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
+// Reports the major and minor versions of the compiler.
+// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version
+// 13 and a 1.0 point release. The Visual C++ 2005 compiler version is 1400.
+// Type cl /? at the command line to see the major and minor versions of your
+// compiler along with the build number.
+#pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
+#endif
+
+#include "modules/audio_device/audio_device_config.h"
+
+#ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
+
+// clang-format off
+// To get Windows includes in the right order, this must come before the Windows
+// includes below.
+#include "modules/audio_device/win/audio_device_core_win.h"
+// clang-format on
+
+#include <string.h>
+
+#include <comdef.h>
+#include <dmo.h>
+#include <functiondiscoverykeys_devpkey.h>
+#include <mmsystem.h>
+#include <strsafe.h>
+#include <uuids.h>
+#include <windows.h>
+
+#include <iomanip>
+
+#include "api/make_ref_counted.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/sleep.h"
+
+// Macro that calls a COM method returning HRESULT value.
+#define EXIT_ON_ERROR(hres) \
+ do { \
+ if (FAILED(hres)) \
+ goto Exit; \
+ } while (0)
+
+// Macro that continues to a COM error.
+#define CONTINUE_ON_ERROR(hres) \
+ do { \
+ if (FAILED(hres)) \
+ goto Next; \
+ } while (0)
+
+// Macro that releases a COM object if not NULL.
+#define SAFE_RELEASE(p) \
+ do { \
+ if ((p)) { \
+ (p)->Release(); \
+ (p) = NULL; \
+ } \
+ } while (0)
+
+#define ROUND(x) ((x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5))
+
+// REFERENCE_TIME time units per millisecond
+#define REFTIMES_PER_MILLISEC 10000
+
+typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // must be 0x1000
+ LPCSTR szName; // pointer to name (in user addr space)
+ DWORD dwThreadID; // thread ID (-1=caller thread)
+ DWORD dwFlags; // reserved for future use, must be zero
+} THREADNAME_INFO;
+
+namespace webrtc {
+namespace {
+
+enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
+
+enum { kAecCaptureStreamIndex = 0, kAecRenderStreamIndex = 1 };
+
+// An implementation of IMediaBuffer, as required for
+// IMediaObject::ProcessOutput(). After consuming data provided by
+// ProcessOutput(), call SetLength() to update the buffer availability.
+//
+// Example implementation:
+// http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
+class MediaBufferImpl final : public IMediaBuffer {
+ public:
+ explicit MediaBufferImpl(DWORD maxLength)
+ : _data(new BYTE[maxLength]),
+ _length(0),
+ _maxLength(maxLength),
+ _refCount(0) {}
+
+ // IMediaBuffer methods.
+ STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength)) {
+ if (!ppBuffer || !pcbLength) {
+ return E_POINTER;
+ }
+
+ *ppBuffer = _data;
+ *pcbLength = _length;
+
+ return S_OK;
+ }
+
+ STDMETHOD(GetMaxLength(DWORD* pcbMaxLength)) {
+ if (!pcbMaxLength) {
+ return E_POINTER;
+ }
+
+ *pcbMaxLength = _maxLength;
+ return S_OK;
+ }
+
+ STDMETHOD(SetLength(DWORD cbLength)) {
+ if (cbLength > _maxLength) {
+ return E_INVALIDARG;
+ }
+
+ _length = cbLength;
+ return S_OK;
+ }
+
+ // IUnknown methods.
+ STDMETHOD_(ULONG, AddRef()) { return InterlockedIncrement(&_refCount); }
+
+ STDMETHOD(QueryInterface(REFIID riid, void** ppv)) {
+ if (!ppv) {
+ return E_POINTER;
+ } else if (riid != IID_IMediaBuffer && riid != IID_IUnknown) {
+ return E_NOINTERFACE;
+ }
+
+ *ppv = static_cast<IMediaBuffer*>(this);
+ AddRef();
+ return S_OK;
+ }
+
+ STDMETHOD_(ULONG, Release()) {
+ LONG refCount = InterlockedDecrement(&_refCount);
+ if (refCount == 0) {
+ delete this;
+ }
+
+ return refCount;
+ }
+
+ private:
+ ~MediaBufferImpl() { delete[] _data; }
+
+ BYTE* _data;
+ DWORD _length;
+ const DWORD _maxLength;
+ LONG _refCount;
+};
+} // namespace
+
+// ============================================================================
+// Static Methods
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// CoreAudioIsSupported
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::CoreAudioIsSupported() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ bool MMDeviceIsAvailable(false);
+ bool coreAudioIsSupported(false);
+
+ HRESULT hr(S_OK);
+ wchar_t buf[MAXERRORLENGTH];
+ wchar_t errorText[MAXERRORLENGTH];
+
+ // 1) Check if Windows version is Vista SP1 or later.
+ //
+ // CoreAudio is only available on Vista SP1 and later.
+ //
+ OSVERSIONINFOEX osvi;
+ DWORDLONG dwlConditionMask = 0;
+ int op = VER_LESS_EQUAL;
+
+ // Initialize the OSVERSIONINFOEX structure.
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+ osvi.dwMajorVersion = 6;
+ osvi.dwMinorVersion = 0;
+ osvi.wServicePackMajor = 0;
+ osvi.wServicePackMinor = 0;
+ osvi.wProductType = VER_NT_WORKSTATION;
+
+ // Initialize the condition mask.
+ VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
+
+ DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
+ VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
+ VER_PRODUCT_TYPE;
+
+ // Perform the test.
+ BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask, dwlConditionMask);
+ if (isVistaRTMorXP != 0) {
+ RTC_LOG(LS_VERBOSE)
+ << "*** Windows Core Audio is only supported on Vista SP1 or later";
+ return false;
+ }
+
+ // 2) Initializes the COM library for use by the calling thread.
+
+ // The COM init wrapper sets the thread's concurrency model to MTA,
+ // and creates a new apartment for the thread if one is required. The
+ // wrapper also ensures that each call to CoInitializeEx is balanced
+ // by a corresponding call to CoUninitialize.
+ //
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ // Things will work even if an STA thread is calling this method but we
+ // want to ensure that MTA is used and therefore return false here.
+ return false;
+ }
+
+ // 3) Check if the MMDevice API is available.
+ //
+ // The Windows Multimedia Device (MMDevice) API enables audio clients to
+ // discover audio endpoint devices, determine their capabilities, and create
+ // driver instances for those devices.
+ // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
+ // The MMDevice API consists of several interfaces. The first of these is the
+ // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice
+ // API, a client obtains a reference to the IMMDeviceEnumerator interface of a
+ // device-enumerator object by calling the CoCreateInstance function.
+ //
+ // Through the IMMDeviceEnumerator interface, the client can obtain references
+ // to the other interfaces in the MMDevice API. The MMDevice API implements
+ // the following interfaces:
+ //
+ // IMMDevice Represents an audio device.
+ // IMMDeviceCollection Represents a collection of audio devices.
+ // IMMDeviceEnumerator Provides methods for enumerating audio devices.
+ // IMMEndpoint Represents an audio endpoint device.
+ //
+ IMMDeviceEnumerator* pIMMD(NULL);
+ const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
+ const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+
+ hr = CoCreateInstance(
+ CLSID_MMDeviceEnumerator, // GUID value of MMDeviceEnumerator coclass
+ NULL, CLSCTX_ALL,
+ IID_IMMDeviceEnumerator, // GUID value of the IMMDeviceEnumerator
+ // interface
+ (void**)&pIMMD);
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+ " Failed to create the required COM object (hr="
+ << hr << ")";
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+ " CoCreateInstance(MMDeviceEnumerator) failed (hr="
+ << hr << ")";
+
+ const DWORD dwFlags =
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+
+ // Gets the system's human readable message string for this HRESULT.
+ // All error message in English by default.
+ DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+ MAXERRORLENGTH, NULL);
+
+ RTC_DCHECK_LE(messageLength, MAXERRORLENGTH);
+
+ // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+ for (; messageLength && ::isspace(errorText[messageLength - 1]);
+ --messageLength) {
+ errorText[messageLength - 1] = '\0';
+ }
+
+ StringCchPrintfW(buf, MAXERRORLENGTH, L"Error details: ");
+ StringCchCatW(buf, MAXERRORLENGTH, errorText);
+ RTC_LOG(LS_VERBOSE) << buf;
+ } else {
+ MMDeviceIsAvailable = true;
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+ " CoCreateInstance(MMDeviceEnumerator) succeeded (hr="
+ << hr << ")";
+ SAFE_RELEASE(pIMMD);
+ }
+
+ // 4) Verify that we can create and initialize our Core Audio class.
+ //
+ if (MMDeviceIsAvailable) {
+ coreAudioIsSupported = false;
+
+ AudioDeviceWindowsCore* p = new (std::nothrow) AudioDeviceWindowsCore();
+ if (p == NULL) {
+ return false;
+ }
+
+ int ok(0);
+
+ if (p->Init() != InitStatus::OK) {
+ ok |= -1;
+ }
+
+ ok |= p->Terminate();
+
+ if (ok == 0) {
+ coreAudioIsSupported = true;
+ }
+
+ delete p;
+ }
+
+ if (coreAudioIsSupported) {
+ RTC_LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported";
+ }
+
+ return (coreAudioIsSupported);
+}
+
+// ============================================================================
+// Construction & Destruction
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// AudioDeviceWindowsCore() - ctor
+// ----------------------------------------------------------------------------
+
+AudioDeviceWindowsCore::AudioDeviceWindowsCore()
+ : _avrtLibrary(nullptr),
+ _winSupportAvrt(false),
+ _comInit(ScopedCOMInitializer::kMTA),
+ _ptrAudioBuffer(nullptr),
+ _ptrEnumerator(nullptr),
+ _ptrRenderCollection(nullptr),
+ _ptrCaptureCollection(nullptr),
+ _ptrDeviceOut(nullptr),
+ _ptrDeviceIn(nullptr),
+ _ptrClientOut(nullptr),
+ _ptrClientIn(nullptr),
+ _ptrRenderClient(nullptr),
+ _ptrCaptureClient(nullptr),
+ _ptrCaptureVolume(nullptr),
+ _ptrRenderSimpleVolume(nullptr),
+ _dmo(nullptr),
+ _mediaBuffer(nullptr),
+ _builtInAecEnabled(false),
+ _hRenderSamplesReadyEvent(nullptr),
+ _hPlayThread(nullptr),
+ _hRenderStartedEvent(nullptr),
+ _hShutdownRenderEvent(nullptr),
+ _hCaptureSamplesReadyEvent(nullptr),
+ _hRecThread(nullptr),
+ _hCaptureStartedEvent(nullptr),
+ _hShutdownCaptureEvent(nullptr),
+ _hMmTask(nullptr),
+ _playAudioFrameSize(0),
+ _playSampleRate(0),
+ _playBlockSize(0),
+ _playChannels(2),
+ _sndCardPlayDelay(0),
+ _writtenSamples(0),
+ _readSamples(0),
+ _recAudioFrameSize(0),
+ _recSampleRate(0),
+ _recBlockSize(0),
+ _recChannels(2),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _speakerIsInitialized(false),
+ _microphoneIsInitialized(false),
+ _usingInputDeviceIndex(false),
+ _usingOutputDeviceIndex(false),
+ _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
+ _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+ RTC_DCHECK(_comInit.Succeeded());
+
+ // Try to load the Avrt DLL
+ if (!_avrtLibrary) {
+ // Get handle to the Avrt DLL module.
+ _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
+ if (_avrtLibrary) {
+ // Handle is valid (should only happen if OS larger than vista & win7).
+ // Try to get the function addresses.
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " The Avrt DLL module is now loaded";
+
+ _PAvRevertMmThreadCharacteristics =
+ (PAvRevertMmThreadCharacteristics)GetProcAddress(
+ _avrtLibrary, "AvRevertMmThreadCharacteristics");
+ _PAvSetMmThreadCharacteristicsA =
+ (PAvSetMmThreadCharacteristicsA)GetProcAddress(
+ _avrtLibrary, "AvSetMmThreadCharacteristicsA");
+ _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(
+ _avrtLibrary, "AvSetMmThreadPriority");
+
+ if (_PAvRevertMmThreadCharacteristics &&
+ _PAvSetMmThreadCharacteristicsA && _PAvSetMmThreadPriority) {
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " AvRevertMmThreadCharacteristics() is OK";
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " AvSetMmThreadCharacteristicsA() is OK";
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " AvSetMmThreadPriority() is OK";
+ _winSupportAvrt = true;
+ }
+ }
+ }
+
+ // Create our samples ready events - we want auto reset events that start in
+ // the not-signaled state. The state of an auto-reset event object remains
+ // signaled until a single waiting thread is released, at which time the
+ // system automatically sets the state to nonsignaled. If no threads are
+ // waiting, the event object's state remains signaled. (Except for
+ // _hShutdownCaptureEvent, which is used to shutdown multiple threads).
+ _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+ _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+
+ _perfCounterFreq.QuadPart = 1;
+ _perfCounterFactor = 0.0;
+
+ // list of number of channels to use on recording side
+ _recChannelsPrioList[0] = 2; // stereo is prio 1
+ _recChannelsPrioList[1] = 1; // mono is prio 2
+ _recChannelsPrioList[2] = 4; // quad is prio 3
+
+ // list of number of channels to use on playout side
+ _playChannelsPrioList[0] = 2; // stereo is prio 1
+ _playChannelsPrioList[1] = 1; // mono is prio 2
+
+ HRESULT hr;
+
+ // We know that this API will work since it has already been verified in
+ // CoreAudioIsSupported, hence no need to check for errors here as well.
+
+ // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
+ // TODO(henrika): we should probably move this allocation to Init() instead
+ // and deallocate in Terminate() to make the implementation more symmetric.
+ CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
+ __uuidof(IMMDeviceEnumerator),
+ reinterpret_cast<void**>(&_ptrEnumerator));
+ RTC_DCHECK(_ptrEnumerator);
+
+ // DMO initialization for built-in WASAPI AEC.
+ {
+ IMediaObject* ptrDMO = NULL;
+ hr = CoCreateInstance(CLSID_CWMAudioAEC, NULL, CLSCTX_INPROC_SERVER,
+ IID_IMediaObject, reinterpret_cast<void**>(&ptrDMO));
+ if (FAILED(hr) || ptrDMO == NULL) {
+ // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the
+ // feature is prevented from being enabled.
+ _builtInAecEnabled = false;
+ _TraceCOMError(hr);
+ }
+ _dmo = ptrDMO;
+ SAFE_RELEASE(ptrDMO);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// AudioDeviceWindowsCore() - dtor
+// ----------------------------------------------------------------------------
+
+AudioDeviceWindowsCore::~AudioDeviceWindowsCore() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ Terminate();
+
+ // The IMMDeviceEnumerator is created during construction. Must release
+ // it here and not in Terminate() since we don't recreate it in Init().
+ SAFE_RELEASE(_ptrEnumerator);
+
+ _ptrAudioBuffer = NULL;
+
+ if (NULL != _hRenderSamplesReadyEvent) {
+ CloseHandle(_hRenderSamplesReadyEvent);
+ _hRenderSamplesReadyEvent = NULL;
+ }
+
+ if (NULL != _hCaptureSamplesReadyEvent) {
+ CloseHandle(_hCaptureSamplesReadyEvent);
+ _hCaptureSamplesReadyEvent = NULL;
+ }
+
+ if (NULL != _hRenderStartedEvent) {
+ CloseHandle(_hRenderStartedEvent);
+ _hRenderStartedEvent = NULL;
+ }
+
+ if (NULL != _hCaptureStartedEvent) {
+ CloseHandle(_hCaptureStartedEvent);
+ _hCaptureStartedEvent = NULL;
+ }
+
+ if (NULL != _hShutdownRenderEvent) {
+ CloseHandle(_hShutdownRenderEvent);
+ _hShutdownRenderEvent = NULL;
+ }
+
+ if (NULL != _hShutdownCaptureEvent) {
+ CloseHandle(_hShutdownCaptureEvent);
+ _hShutdownCaptureEvent = NULL;
+ }
+
+ if (_avrtLibrary) {
+ BOOL freeOK = FreeLibrary(_avrtLibrary);
+ if (!freeOK) {
+ RTC_LOG(LS_WARNING)
+ << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+ " failed to free the loaded Avrt DLL module correctly";
+ } else {
+ RTC_LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+ " the Avrt DLL module is now unloaded";
+ }
+ }
+}
+
+// ============================================================================
+// API
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// AttachAudioBuffer
+// ----------------------------------------------------------------------------
+
+void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+// ----------------------------------------------------------------------------
+// ActiveAudioLayer
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kWindowsCoreAudio;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// Init
+// ----------------------------------------------------------------------------
+
+AudioDeviceGeneric::InitStatus AudioDeviceWindowsCore::Init() {
+ MutexLock lock(&mutex_);
+
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+
+ // Enumerate all audio rendering and capturing endpoint devices.
+ // Note that, some of these will not be able to select by the user.
+ // The complete collection is for internal use only.
+ _EnumerateEndpointDevicesAll(eRender);
+ _EnumerateEndpointDevicesAll(eCapture);
+
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+// ----------------------------------------------------------------------------
+// Terminate
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::Terminate() {
+ MutexLock lock(&mutex_);
+
+ if (!_initialized) {
+ return 0;
+ }
+
+ _initialized = false;
+ _speakerIsInitialized = false;
+ _microphoneIsInitialized = false;
+ _playing = false;
+ _recording = false;
+
+ SAFE_RELEASE(_ptrRenderCollection);
+ SAFE_RELEASE(_ptrCaptureCollection);
+ SAFE_RELEASE(_ptrDeviceOut);
+ SAFE_RELEASE(_ptrDeviceIn);
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrRenderClient);
+ SAFE_RELEASE(_ptrCaptureClient);
+ SAFE_RELEASE(_ptrCaptureVolume);
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// Initialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::Initialized() const {
+ return (_initialized);
+}
+
+// ----------------------------------------------------------------------------
+// InitSpeaker
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitSpeaker() {
+ MutexLock lock(&mutex_);
+ return InitSpeakerLocked();
+}
+
+int32_t AudioDeviceWindowsCore::InitSpeakerLocked() {
+ if (_playing) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ if (_usingOutputDeviceIndex) {
+ int16_t nDevices = PlayoutDevicesLocked();
+ if (_outputDeviceIndex > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "current device selection is invalid => unable to"
+ " initialize";
+ return -1;
+ }
+ }
+
+ int32_t ret(0);
+
+ SAFE_RELEASE(_ptrDeviceOut);
+ if (_usingOutputDeviceIndex) {
+ // Refresh the selected rendering endpoint device using current index
+ ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
+ } else {
+ ERole role;
+ (_outputDevice == AudioDeviceModule::kDefaultDevice)
+ ? role = eConsole
+ : role = eCommunications;
+ // Refresh the selected rendering endpoint device using role
+ ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
+ }
+
+ if (ret != 0 || (_ptrDeviceOut == NULL)) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the rendering enpoint device";
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
+ }
+
+ IAudioSessionManager* pManager = NULL;
+ ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL,
+ NULL, (void**)&pManager);
+ if (ret != 0 || pManager == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the render manager";
+ SAFE_RELEASE(pManager);
+ return -1;
+ }
+
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+ ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
+ if (ret != 0 || _ptrRenderSimpleVolume == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the render simple volume";
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+ return -1;
+ }
+ SAFE_RELEASE(pManager);
+
+ _speakerIsInitialized = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// InitMicrophone
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitMicrophone() {
+ MutexLock lock(&mutex_);
+ return InitMicrophoneLocked();
+}
+
+int32_t AudioDeviceWindowsCore::InitMicrophoneLocked() {
+ if (_recording) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ if (_usingInputDeviceIndex) {
+ int16_t nDevices = RecordingDevicesLocked();
+ if (_inputDeviceIndex > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "current device selection is invalid => unable to"
+ " initialize";
+ return -1;
+ }
+ }
+
+ int32_t ret(0);
+
+ SAFE_RELEASE(_ptrDeviceIn);
+ if (_usingInputDeviceIndex) {
+ // Refresh the selected capture endpoint device using current index
+ ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
+ } else {
+ ERole role;
+ (_inputDevice == AudioDeviceModule::kDefaultDevice)
+ ? role = eConsole
+ : role = eCommunications;
+ // Refresh the selected capture endpoint device using role
+ ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
+ }
+
+ if (ret != 0 || (_ptrDeviceIn == NULL)) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the capturing enpoint device";
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
+ }
+
+ SAFE_RELEASE(_ptrCaptureVolume);
+ ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&_ptrCaptureVolume));
+ if (ret != 0 || _ptrCaptureVolume == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the capture volume";
+ SAFE_RELEASE(_ptrCaptureVolume);
+ return -1;
+ }
+
+ _microphoneIsInitialized = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::SpeakerIsInitialized() const {
+ return (_speakerIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const {
+ return (_microphoneIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerVolumeIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioSessionManager* pManager = NULL;
+ ISimpleAudioVolume* pVolume = NULL;
+
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL,
+ (void**)&pManager);
+ EXIT_ON_ERROR(hr);
+
+ hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
+ EXIT_ON_ERROR(hr);
+
+ float volume(0.0f);
+ hr = pVolume->GetMasterVolume(&volume);
+ if (FAILED(hr)) {
+ available = false;
+ }
+ available = true;
+
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetSpeakerVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume) {
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+ }
+
+ if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
+ volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+
+ // scale input volume to valid range (0.0 to 1.0)
+ const float fLevel = (float)volume / MAX_CORE_SPEAKER_VOLUME;
+ volume_mutex_.Lock();
+ hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel, NULL);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const {
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+ }
+
+ HRESULT hr = S_OK;
+ float fLevel(0.0f);
+
+ volume_mutex_.Lock();
+ hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ // scale input volume range [0.0,1.0] to valid output range
+ volume = static_cast<uint32_t>(fLevel * MAX_CORE_SPEAKER_VOLUME);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MaxSpeakerVolume
+//
+// The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
+// silence and 1.0 indicates full volume (no attenuation).
+// We add our (webrtc-internal) own max level to match the Wave API and
+// how it is used today in VoE.
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ maxVolume = static_cast<uint32_t>(MAX_CORE_SPEAKER_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// MinSpeakerVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const {
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(MIN_CORE_SPEAKER_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerMuteIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the speaker system mute state.
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ if (FAILED(hr))
+ available = false;
+ else
+ available = true;
+
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetSpeakerMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable) {
+ MutexLock lock(&mutex_);
+
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Set the speaker system mute state.
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ const BOOL mute(enable);
+ hr = pVolume->SetMute(mute, NULL);
+ EXIT_ON_ERROR(hr);
+
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const {
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the speaker system mute state.
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ EXIT_ON_ERROR(hr);
+
+ enabled = (mute == TRUE) ? true : false;
+
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneMuteIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the microphone system mute state.
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ if (FAILED(hr))
+ available = false;
+ else
+ available = true;
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetMicrophoneMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable) {
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Set the microphone system mute state.
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ const BOOL mute(enable);
+ hr = pVolume->SetMute(mute, NULL);
+ EXIT_ON_ERROR(hr);
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const {
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the microphone system mute state.
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ EXIT_ON_ERROR(hr);
+
+ enabled = (mute == TRUE) ? true : false;
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// StereoRecordingIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetStereoRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable) {
+ MutexLock lock(&mutex_);
+
+ if (enable) {
+ _recChannelsPrioList[0] = 2; // try stereo first
+ _recChannelsPrioList[1] = 1;
+ _recChannels = 2;
+ } else {
+ _recChannelsPrioList[0] = 1; // try mono first
+ _recChannelsPrioList[1] = 2;
+ _recChannels = 1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StereoRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const {
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StereoPlayoutIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetStereoPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable) {
+ MutexLock lock(&mutex_);
+
+ if (enable) {
+ _playChannelsPrioList[0] = 2; // try stereo first
+ _playChannelsPrioList[1] = 1;
+ _playChannels = 2;
+ } else {
+ _playChannelsPrioList[0] = 1; // try mono first
+ _playChannelsPrioList[1] = 2;
+ _playChannels = 1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StereoPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const {
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneVolumeIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ float volume(0.0f);
+ hr = pVolume->GetMasterVolumeLevelScalar(&volume);
+ if (FAILED(hr)) {
+ available = false;
+ }
+ available = true;
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetMicrophoneVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume="
+ << volume << ")";
+
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+ }
+
+ if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
+ volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME)) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ // scale input volume to valid range (0.0 to 1.0)
+ const float fLevel = static_cast<float>(volume) / MAX_CORE_MICROPHONE_VOLUME;
+ volume_mutex_.Lock();
+ _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const {
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+ }
+
+ HRESULT hr = S_OK;
+ float fLevel(0.0f);
+ volume = 0;
+ volume_mutex_.Lock();
+ hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ // scale input volume range [0.0,1.0] to valid output range
+ volume = static_cast<uint32_t>(fLevel * MAX_CORE_MICROPHONE_VOLUME);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MaxMicrophoneVolume
+//
+// The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
+// silence and 1.0 indicates full volume (no attenuation).
+// We add our (webrtc-internal) own max level to match the Wave API and
+// how it is used today in VoE.
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ maxVolume = static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// MinMicrophoneVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const {
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutDevices
+// ----------------------------------------------------------------------------
+int16_t AudioDeviceWindowsCore::PlayoutDevices() {
+ MutexLock lock(&mutex_);
+ return PlayoutDevicesLocked();
+}
+
+int16_t AudioDeviceWindowsCore::PlayoutDevicesLocked() {
+ if (_RefreshDeviceList(eRender) != -1) {
+ return (_DeviceListCount(eRender));
+ }
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetPlayoutDevice I (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index) {
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ // Get current number of available rendering endpoint devices and refresh the
+ // rendering collection.
+ UINT nDevices = PlayoutDevices();
+
+ if (index < 0 || index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ MutexLock lock(&mutex_);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrRenderCollection);
+
+ // Select an endpoint rendering device given the specified index
+ SAFE_RELEASE(_ptrDeviceOut);
+ hr = _ptrRenderCollection->Item(index, &_ptrDeviceOut);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingOutputDeviceIndex = true;
+ _outputDeviceIndex = index;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetPlayoutDevice II (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ ERole role(eCommunications);
+
+ if (device == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+ role = eCommunications;
+ }
+
+ MutexLock lock(&mutex_);
+
+ // Refresh the list of rendering endpoint devices
+ _RefreshDeviceList(eRender);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ // Select an endpoint rendering device given the specified role
+ SAFE_RELEASE(_ptrDeviceOut);
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(eRender, role, &_ptrDeviceOut);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingOutputDeviceIndex = false;
+ _outputDevice = device;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutDeviceName
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ bool defaultCommunicationDevice(false);
+ const int16_t nDevices(PlayoutDevices()); // also updates the list of devices
+
+ // Special fix for the case when the user selects '-1' as index (<=> Default
+ // Communication Device)
+ if (index == (uint16_t)(-1)) {
+ defaultCommunicationDevice = true;
+ index = 0;
+ RTC_LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+ }
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ MutexLock lock(&mutex_);
+
+ int32_t ret(-1);
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (defaultCommunicationDevice) {
+ ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName,
+ bufferLen);
+ } else {
+ ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
+ }
+
+ if (ret == 0) {
+ // Convert the endpoint device's friendly-name to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+ kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ // Get the endpoint ID string (uniquely identifies the device among all audio
+ // endpoint devices)
+ if (defaultCommunicationDevice) {
+ ret =
+ _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
+ } else {
+ ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
+ }
+
+ if (guid != NULL && ret == 0) {
+ // Convert the endpoint device's ID string to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+ NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingDeviceName
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ bool defaultCommunicationDevice(false);
+ const int16_t nDevices(
+ RecordingDevices()); // also updates the list of devices
+
+ // Special fix for the case when the user selects '-1' as index (<=> Default
+ // Communication Device)
+ if (index == (uint16_t)(-1)) {
+ defaultCommunicationDevice = true;
+ index = 0;
+ RTC_LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+ }
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ MutexLock lock(&mutex_);
+
+ int32_t ret(-1);
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (defaultCommunicationDevice) {
+ ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName,
+ bufferLen);
+ } else {
+ ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
+ }
+
+ if (ret == 0) {
+ // Convert the endpoint device's friendly-name to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+ kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ // Get the endpoint ID string (uniquely identifies the device among all audio
+ // endpoint devices)
+ if (defaultCommunicationDevice) {
+ ret =
+ _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
+ } else {
+ ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
+ }
+
+ if (guid != NULL && ret == 0) {
+ // Convert the endpoint device's ID string to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+ NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingDevices
+// ----------------------------------------------------------------------------
+
+int16_t AudioDeviceWindowsCore::RecordingDevices() {
+ MutexLock lock(&mutex_);
+ return RecordingDevicesLocked();
+}
+
+int16_t AudioDeviceWindowsCore::RecordingDevicesLocked() {
+ if (_RefreshDeviceList(eCapture) != -1) {
+ return (_DeviceListCount(eCapture));
+ }
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetRecordingDevice I (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ // Get current number of available capture endpoint devices and refresh the
+ // capture collection.
+ UINT nDevices = RecordingDevices();
+
+ if (index < 0 || index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ MutexLock lock(&mutex_);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrCaptureCollection);
+
+ // Select an endpoint capture device given the specified index
+ SAFE_RELEASE(_ptrDeviceIn);
+ hr = _ptrCaptureCollection->Item(index, &_ptrDeviceIn);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingInputDeviceIndex = true;
+ _inputDeviceIndex = index;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetRecordingDevice II (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ ERole role(eCommunications);
+
+ if (device == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+ role = eCommunications;
+ }
+
+ MutexLock lock(&mutex_);
+
+ // Refresh the list of capture endpoint devices
+ _RefreshDeviceList(eCapture);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ // Select an endpoint capture device given the specified role
+ SAFE_RELEASE(_ptrDeviceIn);
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(eCapture, role, &_ptrDeviceIn);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingInputDeviceIndex = false;
+ _inputDevice = device;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the playout side
+ int32_t res = InitPlayout();
+
+ // Cancel effect of initialization
+ StopPlayout();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the recording side
+ int32_t res = InitRecording();
+
+ // Cancel effect of initialization
+ StopRecording();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// InitPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitPlayout() {
+ MutexLock lock(&mutex_);
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeakerLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ // Ensure that the updated rendering endpoint device is valid
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ if (_builtInAecEnabled && _recIsInitialized) {
+ // Ensure the correct render device is configured in case
+ // InitRecording() was called before InitPlayout().
+ if (SetDMOProperties() == -1) {
+ return -1;
+ }
+ }
+
+ HRESULT hr = S_OK;
+ WAVEFORMATEX* pWfxOut = NULL;
+ WAVEFORMATEX Wfx = WAVEFORMATEX();
+ WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+ // Create COM object with IAudioClient interface.
+ SAFE_RELEASE(_ptrClientOut);
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+ (void**)&_ptrClientOut);
+ EXIT_ON_ERROR(hr);
+
+ // Retrieve the stream format that the audio engine uses for its internal
+ // processing (mixing) of shared-mode streams.
+ hr = _ptrClientOut->GetMixFormat(&pWfxOut);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:";
+ // format type
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(pWfxOut->wFormatTag) << " ("
+ << pWfxOut->wFormatTag << ")";
+ // number of channels (i.e. mono, stereo...)
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << pWfxOut->nChannels;
+ // sample rate
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec;
+ // for buffer estimation
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec;
+ // block size of data
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << pWfxOut->nBlockAlign;
+ // number of bits per sample of mono data
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << pWfxOut->cbSize;
+ }
+
+ // Set wave format
+ Wfx.wFormatTag = WAVE_FORMAT_PCM;
+ Wfx.wBitsPerSample = 16;
+ Wfx.cbSize = 0;
+
+ const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
+ hr = S_FALSE;
+
+ // Iterate over frequencies and channels, in order of priority
+ for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+ for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList) /
+ sizeof(_playChannelsPrioList[0]);
+ chan++) {
+ Wfx.nChannels = _playChannelsPrioList[chan];
+ Wfx.nSamplesPerSec = freqs[freq];
+ Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
+ Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
+ // If the method succeeds and the audio endpoint device supports the
+ // specified stream format, it returns S_OK. If the method succeeds and
+ // provides a closest match to the specified format, it returns S_FALSE.
+ hr = _ptrClientOut->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &Wfx,
+ &pWfxClosestMatch);
+ if (hr == S_OK) {
+ break;
+ } else {
+ if (pWfxClosestMatch) {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels
+ << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+ << " is not supported. Closest match: "
+ "nChannels="
+ << pWfxClosestMatch->nChannels << ", nSamplesPerSec="
+ << pWfxClosestMatch->nSamplesPerSec;
+ CoTaskMemFree(pWfxClosestMatch);
+ pWfxClosestMatch = NULL;
+ } else {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels
+ << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+ << " is not supported. No closest match.";
+ }
+ }
+ }
+ if (hr == S_OK)
+ break;
+ }
+
+ // TODO(andrew): what happens in the event of failure in the above loop?
+ // Is _ptrClientOut->Initialize expected to fail?
+ // Same in InitRecording().
+ if (hr == S_OK) {
+ _playAudioFrameSize = Wfx.nBlockAlign;
+ // Block size is the number of samples each channel in 10ms.
+ _playBlockSize = Wfx.nSamplesPerSec / 100;
+ _playSampleRate = Wfx.nSamplesPerSec;
+ _devicePlaySampleRate =
+ Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
+ _devicePlayBlockSize = Wfx.nSamplesPerSec / 100;
+ _playChannels = Wfx.nChannels;
+
+ RTC_LOG(LS_VERBOSE) << "VoE selected this rendering format:";
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(Wfx.wFormatTag) << " (" << Wfx.wFormatTag
+ << ")";
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << Wfx.nChannels;
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << Wfx.nSamplesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec : " << Wfx.nAvgBytesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << Wfx.nBlockAlign;
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << Wfx.wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << Wfx.cbSize;
+ RTC_LOG(LS_VERBOSE) << "Additional settings:";
+ RTC_LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize;
+ RTC_LOG(LS_VERBOSE) << "_playBlockSize : " << _playBlockSize;
+ RTC_LOG(LS_VERBOSE) << "_playChannels : " << _playChannels;
+ }
+
+ // Create a rendering stream.
+ //
+ // ****************************************************************************
+ // For a shared-mode stream that uses event-driven buffering, the caller must
+ // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
+ // determines how large a buffer to allocate based on the scheduling period
+ // of the audio engine. Although the client's buffer processing thread is
+ // event driven, the basic buffer management process, as described previously,
+ // is unaltered.
+ // Each time the thread awakens, it should call
+ // IAudioClient::GetCurrentPadding to determine how much data to write to a
+ // rendering buffer or read from a capture buffer. In contrast to the two
+ // buffers that the Initialize method allocates for an exclusive-mode stream
+ // that uses event-driven buffering, a shared-mode stream requires a single
+ // buffer.
+ // ****************************************************************************
+ //
+ REFERENCE_TIME hnsBufferDuration =
+ 0; // ask for minimum buffer size (default)
+ if (_devicePlaySampleRate == 44100) {
+ // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
+ // There seems to be a larger risk of underruns for 44.1 compared
+ // with the default rate (48kHz). When using default, we set the requested
+ // buffer duration to 0, which sets the buffer to the minimum size
+ // required by the engine thread. The actual buffer size can then be
+ // read by GetBufferSize() and it is 20ms on most machines.
+ hnsBufferDuration = 30 * 10000;
+ }
+ hr = _ptrClientOut->Initialize(
+ AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK, // processing of the audio buffer by
+ // the client will be event driven
+ hnsBufferDuration, // requested buffer capacity as a time value (in
+ // 100-nanosecond units)
+ 0, // periodicity
+ &Wfx, // selected wave format
+ NULL); // session GUID
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+ }
+ EXIT_ON_ERROR(hr);
+
+ if (_ptrAudioBuffer) {
+ // Update the audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+ } else {
+ // We can enter this state during CoreAudioIsSupported() when no
+ // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+ // does not exist. It is OK to end up here since we don't initiate any media
+ // in CoreAudioIsSupported().
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceBuffer must be attached before streaming can start";
+ }
+
+ // Get the actual size of the shared (endpoint buffer).
+ // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+ UINT bufferFrameCount(0);
+ hr = _ptrClientOut->GetBufferSize(&bufferFrameCount);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
+ << bufferFrameCount << " (<=> "
+ << bufferFrameCount * _playAudioFrameSize << " bytes)";
+ }
+
+ // Set the event handle that the system signals when an audio buffer is ready
+ // to be processed by the client.
+ hr = _ptrClientOut->SetEventHandle(_hRenderSamplesReadyEvent);
+ EXIT_ON_ERROR(hr);
+
+ // Get an IAudioRenderClient interface.
+ SAFE_RELEASE(_ptrRenderClient);
+ hr = _ptrClientOut->GetService(__uuidof(IAudioRenderClient),
+ (void**)&_ptrRenderClient);
+ EXIT_ON_ERROR(hr);
+
+ // Mark playout side as initialized
+ _playIsInitialized = true;
+
+ CoTaskMemFree(pWfxOut);
+ CoTaskMemFree(pWfxClosestMatch);
+
+ RTC_LOG(LS_VERBOSE) << "render side is now initialized";
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ CoTaskMemFree(pWfxOut);
+ CoTaskMemFree(pWfxClosestMatch);
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+ return -1;
+}
+
+// Capture initialization when the built-in AEC DirectX Media Object (DMO) is
+// used. Called from InitRecording(), most of which is skipped over. The DMO
+// handles device initialization itself.
+// Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
+int32_t AudioDeviceWindowsCore::InitRecordingDMO() {
+ RTC_DCHECK(_builtInAecEnabled);
+ RTC_DCHECK(_dmo);
+
+ if (SetDMOProperties() == -1) {
+ return -1;
+ }
+
+ DMO_MEDIA_TYPE mt = {};
+ HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
+ if (FAILED(hr)) {
+ MoFreeMediaType(&mt);
+ _TraceCOMError(hr);
+ return -1;
+ }
+ mt.majortype = MEDIATYPE_Audio;
+ mt.subtype = MEDIASUBTYPE_PCM;
+ mt.formattype = FORMAT_WaveFormatEx;
+
+ // Supported formats
+ // nChannels: 1 (in AEC-only mode)
+ // nSamplesPerSec: 8000, 11025, 16000, 22050
+ // wBitsPerSample: 16
+ WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
+ ptrWav->wFormatTag = WAVE_FORMAT_PCM;
+ ptrWav->nChannels = 1;
+ // 16000 is the highest we can support with our resampler.
+ ptrWav->nSamplesPerSec = 16000;
+ ptrWav->nAvgBytesPerSec = 32000;
+ ptrWav->nBlockAlign = 2;
+ ptrWav->wBitsPerSample = 16;
+ ptrWav->cbSize = 0;
+
+ // Set the VoE format equal to the AEC output format.
+ _recAudioFrameSize = ptrWav->nBlockAlign;
+ _recSampleRate = ptrWav->nSamplesPerSec;
+ _recBlockSize = ptrWav->nSamplesPerSec / 100;
+ _recChannels = ptrWav->nChannels;
+
+ // Set the DMO output format parameters.
+ hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
+ MoFreeMediaType(&mt);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ if (_ptrAudioBuffer) {
+ _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+ _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+ } else {
+ // Refer to InitRecording() for comments.
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceBuffer must be attached before streaming can start";
+ }
+
+ _mediaBuffer = rtc::make_ref_counted<MediaBufferImpl>(_recBlockSize *
+ _recAudioFrameSize);
+
+ // Optional, but if called, must be after media types are set.
+ hr = _dmo->AllocateStreamingResources();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ _recIsInitialized = true;
+ RTC_LOG(LS_VERBOSE) << "Capture side is now initialized";
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// InitRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitRecording() {
+ MutexLock lock(&mutex_);
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ if (QueryPerformanceFrequency(&_perfCounterFreq) == 0) {
+ return -1;
+ }
+ _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophoneLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ // Ensure that the updated capturing endpoint device is valid
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ if (_builtInAecEnabled) {
+ // The DMO will configure the capture device.
+ return InitRecordingDMO();
+ }
+
+ HRESULT hr = S_OK;
+ WAVEFORMATEX* pWfxIn = NULL;
+ WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE();
+ WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+ // Create COM object with IAudioClient interface.
+ SAFE_RELEASE(_ptrClientIn);
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+ (void**)&_ptrClientIn);
+ EXIT_ON_ERROR(hr);
+
+ // Retrieve the stream format that the audio engine uses for its internal
+ // processing (mixing) of shared-mode streams.
+ hr = _ptrClientIn->GetMixFormat(&pWfxIn);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:";
+ // format type
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(pWfxIn->wFormatTag) << " ("
+ << pWfxIn->wFormatTag << ")";
+ // number of channels (i.e. mono, stereo...)
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << pWfxIn->nChannels;
+ // sample rate
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec;
+ // for buffer estimation
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec;
+ // block size of data
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << pWfxIn->nBlockAlign;
+ // number of bits per sample of mono data
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << pWfxIn->cbSize;
+ }
+
+ // Set wave format
+ Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ Wfx.Format.wBitsPerSample = 16;
+ Wfx.Format.cbSize = 22;
+ Wfx.dwChannelMask = 0;
+ Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample;
+ Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+ const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
+ hr = S_FALSE;
+
+ // Iterate over frequencies and channels, in order of priority
+ for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+ for (unsigned int chan = 0;
+ chan < sizeof(_recChannelsPrioList) / sizeof(_recChannelsPrioList[0]);
+ chan++) {
+ Wfx.Format.nChannels = _recChannelsPrioList[chan];
+ Wfx.Format.nSamplesPerSec = freqs[freq];
+ Wfx.Format.nBlockAlign =
+ Wfx.Format.nChannels * Wfx.Format.wBitsPerSample / 8;
+ Wfx.Format.nAvgBytesPerSec =
+ Wfx.Format.nSamplesPerSec * Wfx.Format.nBlockAlign;
+ // If the method succeeds and the audio endpoint device supports the
+ // specified stream format, it returns S_OK. If the method succeeds and
+ // provides a closest match to the specified format, it returns S_FALSE.
+ hr = _ptrClientIn->IsFormatSupported(
+ AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*)&Wfx, &pWfxClosestMatch);
+ if (hr == S_OK) {
+ break;
+ } else {
+ if (pWfxClosestMatch) {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels
+ << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+ << " is not supported. Closest match: "
+ "nChannels="
+ << pWfxClosestMatch->nChannels << ", nSamplesPerSec="
+ << pWfxClosestMatch->nSamplesPerSec;
+ CoTaskMemFree(pWfxClosestMatch);
+ pWfxClosestMatch = NULL;
+ } else {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels
+ << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+ << " is not supported. No closest match.";
+ }
+ }
+ }
+ if (hr == S_OK)
+ break;
+ }
+
+ if (hr == S_OK) {
+ _recAudioFrameSize = Wfx.Format.nBlockAlign;
+ _recSampleRate = Wfx.Format.nSamplesPerSec;
+ _recBlockSize = Wfx.Format.nSamplesPerSec / 100;
+ _recChannels = Wfx.Format.nChannels;
+
+ RTC_LOG(LS_VERBOSE) << "VoE selected this capturing format:";
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(Wfx.Format.wFormatTag) << " ("
+ << Wfx.Format.wFormatTag << ")";
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << Wfx.Format.nChannels;
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << Wfx.Format.nSamplesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec : " << Wfx.Format.nAvgBytesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << Wfx.Format.nBlockAlign;
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << Wfx.Format.wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << Wfx.Format.cbSize;
+ RTC_LOG(LS_VERBOSE) << "Additional settings:";
+ RTC_LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize;
+ RTC_LOG(LS_VERBOSE) << "_recBlockSize : " << _recBlockSize;
+ RTC_LOG(LS_VERBOSE) << "_recChannels : " << _recChannels;
+ }
+
+ // Create a capturing stream.
+ hr = _ptrClientIn->Initialize(
+ AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK | // processing of the audio buffer by
+ // the client will be event driven
+ AUDCLNT_STREAMFLAGS_NOPERSIST, // volume and mute settings for an
+ // audio session will not persist
+ // across system restarts
+ 0, // required for event-driven shared mode
+ 0, // periodicity
+ (WAVEFORMATEX*)&Wfx, // selected wave format
+ NULL); // session GUID
+
+ if (hr != S_OK) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+ }
+ EXIT_ON_ERROR(hr);
+
+ if (_ptrAudioBuffer) {
+ // Update the audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ } else {
+ // We can enter this state during CoreAudioIsSupported() when no
+ // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+ // does not exist. It is OK to end up here since we don't initiate any media
+ // in CoreAudioIsSupported().
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceBuffer must be attached before streaming can start";
+ }
+
+ // Get the actual size of the shared (endpoint buffer).
+ // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+ UINT bufferFrameCount(0);
+ hr = _ptrClientIn->GetBufferSize(&bufferFrameCount);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
+ << bufferFrameCount << " (<=> "
+ << bufferFrameCount * _recAudioFrameSize << " bytes)";
+ }
+
+ // Set the event handle that the system signals when an audio buffer is ready
+ // to be processed by the client.
+ hr = _ptrClientIn->SetEventHandle(_hCaptureSamplesReadyEvent);
+ EXIT_ON_ERROR(hr);
+
+ // Get an IAudioCaptureClient interface.
+ SAFE_RELEASE(_ptrCaptureClient);
+ hr = _ptrClientIn->GetService(__uuidof(IAudioCaptureClient),
+ (void**)&_ptrCaptureClient);
+ EXIT_ON_ERROR(hr);
+
+ // Mark capture side as initialized
+ _recIsInitialized = true;
+
+ CoTaskMemFree(pWfxIn);
+ CoTaskMemFree(pWfxClosestMatch);
+
+ RTC_LOG(LS_VERBOSE) << "capture side is now initialized";
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ CoTaskMemFree(pWfxIn);
+ CoTaskMemFree(pWfxClosestMatch);
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// StartRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StartRecording() {
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_hRecThread != NULL) {
+ return 0;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+
+ // Create thread which will drive the capturing
+ LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
+ if (_builtInAecEnabled) {
+ // Redirect to the DMO polling method.
+ lpStartAddress = WSAPICaptureThreadPollDMO;
+
+ if (!_playing) {
+ // The DMO won't provide us captured output data unless we
+ // give it render data to process.
+ RTC_LOG(LS_ERROR)
+ << "Playout must be started before recording when using"
+ " the built-in AEC";
+ return -1;
+ }
+ }
+
+ RTC_DCHECK(_hRecThread == NULL);
+ _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL);
+ if (_hRecThread == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to create the recording thread";
+ return -1;
+ }
+
+ // Set thread priority to highest possible
+ SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
+ } // critScoped
+
+ DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
+ if (ret != WAIT_OBJECT_0) {
+ RTC_LOG(LS_VERBOSE) << "capturing did not start up properly";
+ return -1;
+ }
+ RTC_LOG(LS_VERBOSE) << "capture audio stream has now started...";
+
+ _recording = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StopRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StopRecording() {
+ int32_t err = 0;
+
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ _Lock();
+
+ if (_hRecThread == NULL) {
+ RTC_LOG(LS_VERBOSE)
+ << "no capturing stream is active => close down WASAPI only";
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+ _recIsInitialized = false;
+ _recording = false;
+ _UnLock();
+ return 0;
+ }
+
+ // Stop the driving thread...
+ RTC_LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread...";
+ // Manual-reset event; it will remain signalled to stop all capture threads.
+ SetEvent(_hShutdownCaptureEvent);
+
+ _UnLock();
+ DWORD ret = WaitForSingleObject(_hRecThread, 2000);
+ if (ret != WAIT_OBJECT_0) {
+ RTC_LOG(LS_ERROR)
+ << "failed to close down webrtc_core_audio_capture_thread";
+ err = -1;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed";
+ }
+ _Lock();
+
+ ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
+ // Ensure that the thread has released these interfaces properly.
+ RTC_DCHECK(err == -1 || _ptrClientIn == NULL);
+ RTC_DCHECK(err == -1 || _ptrCaptureClient == NULL);
+
+ _recIsInitialized = false;
+ _recording = false;
+
+ // These will create thread leaks in the result of an error,
+ // but we can at least resume the call.
+ CloseHandle(_hRecThread);
+ _hRecThread = NULL;
+
+ if (_builtInAecEnabled) {
+ RTC_DCHECK(_dmo);
+ // This is necessary. Otherwise the DMO can generate garbage render
+ // audio even after rendering has stopped.
+ HRESULT hr = _dmo->FreeStreamingResources();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ err = -1;
+ }
+ }
+
+ _UnLock();
+
+ return err;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::RecordingIsInitialized() const {
+ return (_recIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// Recording
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::Recording() const {
+ return (_recording);
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::PlayoutIsInitialized() const {
+ return (_playIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// StartPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StartPlayout() {
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_hPlayThread != NULL) {
+ return 0;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+
+ // Create thread which will drive the rendering.
+ RTC_DCHECK(_hPlayThread == NULL);
+ _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL);
+ if (_hPlayThread == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to create the playout thread";
+ return -1;
+ }
+
+ // Set thread priority to highest possible.
+ SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
+ } // critScoped
+
+ DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
+ if (ret != WAIT_OBJECT_0) {
+ RTC_LOG(LS_VERBOSE) << "rendering did not start up properly";
+ return -1;
+ }
+
+ _playing = true;
+ RTC_LOG(LS_VERBOSE) << "rendering audio stream has now started...";
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StopPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StopPlayout() {
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+
+ if (_hPlayThread == NULL) {
+ RTC_LOG(LS_VERBOSE)
+ << "no rendering stream is active => close down WASAPI only";
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+ _playIsInitialized = false;
+ _playing = false;
+ return 0;
+ }
+
+ // stop the driving thread...
+ RTC_LOG(LS_VERBOSE)
+ << "closing down the webrtc_core_audio_render_thread...";
+ SetEvent(_hShutdownRenderEvent);
+ } // critScoped
+
+ DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
+ if (ret != WAIT_OBJECT_0) {
+ // the thread did not stop as it should
+ RTC_LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread";
+ CloseHandle(_hPlayThread);
+ _hPlayThread = NULL;
+ _playIsInitialized = false;
+ _playing = false;
+ return -1;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+ RTC_LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed";
+
+ // to reset this event manually at each time we finish with it,
+ // in case that the render thread has exited before StopPlayout(),
+ // this event might be caught by the new render thread within same VoE
+ // instance.
+ ResetEvent(_hShutdownRenderEvent);
+
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+
+ _playIsInitialized = false;
+ _playing = false;
+
+ CloseHandle(_hPlayThread);
+ _hPlayThread = NULL;
+
+ if (_builtInAecEnabled && _recording) {
+ // The DMO won't provide us captured output data unless we
+ // give it render data to process.
+ //
+ // We still permit the playout to shutdown, and trace a warning.
+ // Otherwise, VoE can get into a state which will never permit
+ // playout to stop properly.
+ RTC_LOG(LS_WARNING)
+ << "Recording should be stopped before playout when using the"
+ " built-in AEC";
+ }
+
+ // Reset the playout delay value.
+ _sndCardPlayDelay = 0;
+ } // critScoped
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutDelay
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const {
+ MutexLock lockScoped(&mutex_);
+ delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
+ return 0;
+}
+
+bool AudioDeviceWindowsCore::BuiltInAECIsAvailable() const {
+ return _dmo != nullptr;
+}
+
+// ----------------------------------------------------------------------------
+// Playing
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::Playing() const {
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// [static] WSAPIRenderThread
+// ----------------------------------------------------------------------------
+
+DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context) {
+ return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoRenderThread();
+}
+
+// ----------------------------------------------------------------------------
+// [static] WSAPICaptureThread
+// ----------------------------------------------------------------------------
+
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context) {
+ return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoCaptureThread();
+}
+
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context) {
+ return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+ ->DoCaptureThreadPollDMO();
+}
+
+// ----------------------------------------------------------------------------
+// DoRenderThread
+// ----------------------------------------------------------------------------
+
+DWORD AudioDeviceWindowsCore::DoRenderThread() {
+ bool keepPlaying = true;
+ HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
+ HRESULT hr = S_OK;
+ HANDLE hMmTask = NULL;
+
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ RTC_LOG(LS_ERROR) << "failed to initialize COM in render thread";
+ return 1;
+ }
+
+ rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
+
+ // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+ // priority.
+ //
+ if (_winSupportAvrt) {
+ DWORD taskIndex(0);
+ hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+ if (hMmTask) {
+ if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) {
+ RTC_LOG(LS_WARNING) << "failed to boost play-thread using MMCSS";
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "render thread is now registered with MMCSS (taskIndex="
+ << taskIndex << ")";
+ } else {
+ RTC_LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err="
+ << GetLastError() << ")";
+ _TraceCOMError(GetLastError());
+ }
+ }
+
+ _Lock();
+
+ IAudioClock* clock = NULL;
+
+ // Get size of rendering buffer (length is expressed as the number of audio
+ // frames the buffer can hold). This value is fixed during the rendering
+ // session.
+ //
+ UINT32 bufferLength = 0;
+ hr = _ptrClientOut->GetBufferSize(&bufferLength);
+ EXIT_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "[REND] size of buffer : " << bufferLength;
+
+ // Get maximum latency for the current stream (will not change for the
+ // lifetime of the IAudioClient object).
+ //
+ REFERENCE_TIME latency;
+ _ptrClientOut->GetStreamLatency(&latency);
+ RTC_LOG(LS_VERBOSE) << "[REND] max stream latency : " << (DWORD)latency
+ << " (" << (double)(latency / 10000.0) << " ms)";
+
+ // Get the length of the periodic interval separating successive processing
+ // passes by the audio engine on the data in the endpoint buffer.
+ //
+ // The period between processing passes by the audio engine is fixed for a
+ // particular audio endpoint device and represents the smallest processing
+ // quantum for the audio engine. This period plus the stream latency between
+ // the buffer and endpoint device represents the minimum possible latency that
+ // an audio application can achieve. Typical value: 100000 <=> 0.01 sec =
+ // 10ms.
+ //
+ REFERENCE_TIME devPeriod = 0;
+ REFERENCE_TIME devPeriodMin = 0;
+ _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
+ RTC_LOG(LS_VERBOSE) << "[REND] device period : " << (DWORD)devPeriod
+ << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+ // Derive initial rendering delay.
+ // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
+ //
+ int playout_delay = 10 * (bufferLength / _playBlockSize) +
+ (int)((latency + devPeriod) / 10000);
+ _sndCardPlayDelay = playout_delay;
+ _writtenSamples = 0;
+ RTC_LOG(LS_VERBOSE) << "[REND] initial delay : " << playout_delay;
+
+ double endpointBufferSizeMS =
+ 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
+ RTC_LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : "
+ << endpointBufferSizeMS;
+
+ // Before starting the stream, fill the rendering buffer with silence.
+ //
+ BYTE* pData = NULL;
+ hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
+ EXIT_ON_ERROR(hr);
+
+ hr =
+ _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
+ EXIT_ON_ERROR(hr);
+
+ _writtenSamples += bufferLength;
+
+ hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_WARNING)
+ << "failed to get IAudioClock interface from the IAudioClient";
+ }
+
+ // Start up the rendering audio stream.
+ hr = _ptrClientOut->Start();
+ EXIT_ON_ERROR(hr);
+
+ _UnLock();
+
+ // Set event which will ensure that the calling thread modifies the playing
+ // state to true.
+ //
+ SetEvent(_hRenderStartedEvent);
+
+ // >> ------------------ THREAD LOOP ------------------
+
+ while (keepPlaying) {
+ // Wait for a render notification event or a shutdown event
+ DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+ switch (waitResult) {
+ case WAIT_OBJECT_0 + 0: // _hShutdownRenderEvent
+ keepPlaying = false;
+ break;
+ case WAIT_OBJECT_0 + 1: // _hRenderSamplesReadyEvent
+ break;
+ case WAIT_TIMEOUT: // timeout notification
+ RTC_LOG(LS_WARNING) << "render event timed out after 0.5 seconds";
+ goto Exit;
+ default: // unexpected error
+ RTC_LOG(LS_WARNING) << "unknown wait termination on render side";
+ goto Exit;
+ }
+
+ while (keepPlaying) {
+ _Lock();
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period.
+ if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR)
+ << "output state has been modified during unlocked period";
+ goto Exit;
+ }
+
+ // Get the number of frames of padding (queued up to play) in the endpoint
+ // buffer.
+ UINT32 padding = 0;
+ hr = _ptrClientOut->GetCurrentPadding(&padding);
+ EXIT_ON_ERROR(hr);
+
+ // Derive the amount of available space in the output buffer
+ uint32_t framesAvailable = bufferLength - padding;
+
+ // Do we have 10 ms available in the render buffer?
+ if (framesAvailable < _playBlockSize) {
+ // Not enough space in render buffer to store next render packet.
+ _UnLock();
+ break;
+ }
+
+ // Write n*10ms buffers to the render buffer
+ const uint32_t n10msBuffers = (framesAvailable / _playBlockSize);
+ for (uint32_t n = 0; n < n10msBuffers; n++) {
+ // Get pointer (i.e., grab the buffer) to next space in the shared
+ // render buffer.
+ hr = _ptrRenderClient->GetBuffer(_playBlockSize, &pData);
+ EXIT_ON_ERROR(hr);
+
+ if (_ptrAudioBuffer) {
+ // Request data to be played out (#bytes =
+ // _playBlockSize*_audioFrameSize)
+ _UnLock();
+ int32_t nSamples =
+ _ptrAudioBuffer->RequestPlayoutData(_playBlockSize);
+ _Lock();
+
+ if (nSamples == -1) {
+ _UnLock();
+ RTC_LOG(LS_ERROR) << "failed to read data from render client";
+ goto Exit;
+ }
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period
+ if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR)
+ << "output state has been modified during unlocked"
+ " period";
+ goto Exit;
+ }
+ if (nSamples != static_cast<int32_t>(_playBlockSize)) {
+ RTC_LOG(LS_WARNING)
+ << "nSamples(" << nSamples << ") != _playBlockSize"
+ << _playBlockSize << ")";
+ }
+
+ // Get the actual (stored) data
+ nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
+ }
+
+ DWORD dwFlags(0);
+ hr = _ptrRenderClient->ReleaseBuffer(_playBlockSize, dwFlags);
+ // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
+ // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
+ EXIT_ON_ERROR(hr);
+
+ _writtenSamples += _playBlockSize;
+ }
+
+ // Check the current delay on the playout side.
+ if (clock) {
+ UINT64 pos = 0;
+ UINT64 freq = 1;
+ clock->GetPosition(&pos, NULL);
+ clock->GetFrequency(&freq);
+ playout_delay = ROUND((double(_writtenSamples) / _devicePlaySampleRate -
+ double(pos) / freq) *
+ 1000.0);
+ _sndCardPlayDelay = playout_delay;
+ }
+
+ _UnLock();
+ }
+ }
+
+ // ------------------ THREAD LOOP ------------------ <<
+
+ SleepMs(static_cast<DWORD>(endpointBufferSizeMS + 0.5));
+ hr = _ptrClientOut->Stop();
+
+Exit:
+ SAFE_RELEASE(clock);
+
+ if (FAILED(hr)) {
+ _ptrClientOut->Stop();
+ _UnLock();
+ _TraceCOMError(hr);
+ }
+
+ if (_winSupportAvrt) {
+ if (NULL != hMmTask) {
+ _PAvRevertMmThreadCharacteristics(hMmTask);
+ }
+ }
+
+ _Lock();
+
+ if (keepPlaying) {
+ if (_ptrClientOut != NULL) {
+ hr = _ptrClientOut->Stop();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ hr = _ptrClientOut->Reset();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ }
+ RTC_LOG(LS_ERROR)
+ << "Playout error: rendering thread has ended pre-maturely";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly";
+ }
+
+ _UnLock();
+
+ return (DWORD)hr;
+}
+
+DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority() {
+ _hMmTask = NULL;
+
+ rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
+
+ // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+ // priority.
+ if (_winSupportAvrt) {
+ DWORD taskIndex(0);
+ _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+ if (_hMmTask) {
+ if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL)) {
+ RTC_LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS";
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "capture thread is now registered with MMCSS (taskIndex="
+ << taskIndex << ")";
+ } else {
+ RTC_LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err="
+ << GetLastError() << ")";
+ _TraceCOMError(GetLastError());
+ }
+ }
+
+ return S_OK;
+}
+
+void AudioDeviceWindowsCore::RevertCaptureThreadPriority() {
+ if (_winSupportAvrt) {
+ if (NULL != _hMmTask) {
+ _PAvRevertMmThreadCharacteristics(_hMmTask);
+ }
+ }
+
+ _hMmTask = NULL;
+}
+
+DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
+ RTC_DCHECK(_mediaBuffer);
+ bool keepRecording = true;
+
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ RTC_LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
+ return 1;
+ }
+
+ HRESULT hr = InitCaptureThreadPriority();
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ // Set event which will ensure that the calling thread modifies the
+ // recording state to true.
+ SetEvent(_hCaptureStartedEvent);
+
+ // >> ---------------------------- THREAD LOOP ----------------------------
+ while (keepRecording) {
+ // Poll the DMO every 5 ms.
+ // (The same interval used in the Wave implementation.)
+ DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
+ switch (waitResult) {
+ case WAIT_OBJECT_0: // _hShutdownCaptureEvent
+ keepRecording = false;
+ break;
+ case WAIT_TIMEOUT: // timeout notification
+ break;
+ default: // unexpected error
+ RTC_LOG(LS_WARNING) << "Unknown wait termination on capture side";
+ hr = -1; // To signal an error callback.
+ keepRecording = false;
+ break;
+ }
+
+ while (keepRecording) {
+ MutexLock lockScoped(&mutex_);
+
+ DWORD dwStatus = 0;
+ {
+ DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
+ dmoBuffer.pBuffer = _mediaBuffer.get();
+ dmoBuffer.pBuffer->AddRef();
+
+ // Poll the DMO for AEC processed capture data. The DMO will
+ // copy available data to `dmoBuffer`, and should only return
+ // 10 ms frames. The value of `dwStatus` should be ignored.
+ hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
+ SAFE_RELEASE(dmoBuffer.pBuffer);
+ dwStatus = dmoBuffer.dwStatus;
+ }
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ keepRecording = false;
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ ULONG bytesProduced = 0;
+ BYTE* data;
+ // Get a pointer to the data buffer. This should be valid until
+ // the next call to ProcessOutput.
+ hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ keepRecording = false;
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ if (bytesProduced > 0) {
+ const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
+ // TODO(andrew): verify that this is always satisfied. It might
+ // be that ProcessOutput will try to return more than 10 ms if
+ // we fail to call it frequently enough.
+ RTC_DCHECK_EQ(kSamplesProduced, static_cast<int>(_recBlockSize));
+ RTC_DCHECK_EQ(sizeof(BYTE), sizeof(int8_t));
+ _ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast<int8_t*>(data),
+ kSamplesProduced);
+ _ptrAudioBuffer->SetVQEData(0, 0);
+
+ _UnLock(); // Release lock while making the callback.
+ _ptrAudioBuffer->DeliverRecordedData();
+ _Lock();
+ }
+
+ // Reset length to indicate buffer availability.
+ hr = _mediaBuffer->SetLength(0);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ keepRecording = false;
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE)) {
+ // The DMO cannot currently produce more data. This is the
+ // normal case; otherwise it means the DMO had more than 10 ms
+ // of data available and ProcessOutput should be called again.
+ break;
+ }
+ }
+ }
+ // ---------------------------- THREAD LOOP ---------------------------- <<
+
+ RevertCaptureThreadPriority();
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR)
+ << "Recording error: capturing thread has ended prematurely";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
+ }
+
+ return hr;
+}
+
+// ----------------------------------------------------------------------------
+// DoCaptureThread
+// ----------------------------------------------------------------------------
+
+DWORD AudioDeviceWindowsCore::DoCaptureThread() {
+ bool keepRecording = true;
+ HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
+ HRESULT hr = S_OK;
+
+ LARGE_INTEGER t1;
+
+ BYTE* syncBuffer = NULL;
+ UINT32 syncBufIndex = 0;
+
+ _readSamples = 0;
+
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ RTC_LOG(LS_ERROR) << "failed to initialize COM in capture thread";
+ return 1;
+ }
+
+ hr = InitCaptureThreadPriority();
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ _Lock();
+
+ // Get size of capturing buffer (length is expressed as the number of audio
+ // frames the buffer can hold). This value is fixed during the capturing
+ // session.
+ //
+ UINT32 bufferLength = 0;
+ if (_ptrClientIn == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "input state has been modified before capture loop starts.";
+ return 1;
+ }
+ hr = _ptrClientIn->GetBufferSize(&bufferLength);
+ EXIT_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] size of buffer : " << bufferLength;
+
+ // Allocate memory for sync buffer.
+ // It is used for compensation between native 44.1 and internal 44.0 and
+ // for cases when the capture buffer is larger than 10ms.
+ //
+ const UINT32 syncBufferSize = 2 * (bufferLength * _recAudioFrameSize);
+ syncBuffer = new BYTE[syncBufferSize];
+ if (syncBuffer == NULL) {
+ return (DWORD)E_POINTER;
+ }
+ RTC_LOG(LS_VERBOSE) << "[CAPT] size of sync buffer : " << syncBufferSize
+ << " [bytes]";
+
+ // Get maximum latency for the current stream (will not change for the
+ // lifetime of the IAudioClient object).
+ //
+ REFERENCE_TIME latency;
+ _ptrClientIn->GetStreamLatency(&latency);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] max stream latency : " << (DWORD)latency
+ << " (" << (double)(latency / 10000.0) << " ms)";
+
+ // Get the length of the periodic interval separating successive processing
+ // passes by the audio engine on the data in the endpoint buffer.
+ //
+ REFERENCE_TIME devPeriod = 0;
+ REFERENCE_TIME devPeriodMin = 0;
+ _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] device period : " << (DWORD)devPeriod
+ << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+ double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] extraDelayMS : " << extraDelayMS;
+
+ double endpointBufferSizeMS =
+ 10.0 * ((double)bufferLength / (double)_recBlockSize);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : "
+ << endpointBufferSizeMS;
+
+ // Start up the capturing stream.
+ //
+ hr = _ptrClientIn->Start();
+ EXIT_ON_ERROR(hr);
+
+ _UnLock();
+
+ // Set event which will ensure that the calling thread modifies the recording
+ // state to true.
+ //
+ SetEvent(_hCaptureStartedEvent);
+
+ // >> ---------------------------- THREAD LOOP ----------------------------
+
+ while (keepRecording) {
+ // Wait for a capture notification event or a shutdown event
+ DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+ switch (waitResult) {
+ case WAIT_OBJECT_0 + 0: // _hShutdownCaptureEvent
+ keepRecording = false;
+ break;
+ case WAIT_OBJECT_0 + 1: // _hCaptureSamplesReadyEvent
+ break;
+ case WAIT_TIMEOUT: // timeout notification
+ RTC_LOG(LS_WARNING) << "capture event timed out after 0.5 seconds";
+ goto Exit;
+ default: // unexpected error
+ RTC_LOG(LS_WARNING) << "unknown wait termination on capture side";
+ goto Exit;
+ }
+
+ while (keepRecording) {
+ BYTE* pData = 0;
+ UINT32 framesAvailable = 0;
+ DWORD flags = 0;
+ UINT64 recTime = 0;
+ UINT64 recPos = 0;
+
+ _Lock();
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period.
+ if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR)
+ << "input state has been modified during unlocked period";
+ goto Exit;
+ }
+
+ // Find out how much capture data is available
+ //
+ hr = _ptrCaptureClient->GetBuffer(
+ &pData, // packet which is ready to be read by used
+ &framesAvailable, // #frames in the captured packet (can be zero)
+ &flags, // support flags (check)
+ &recPos, // device position of first audio frame in data packet
+ &recTime); // value of performance counter at the time of recording
+ // the first audio frame
+
+ if (SUCCEEDED(hr)) {
+ if (AUDCLNT_S_BUFFER_EMPTY == hr) {
+ // Buffer was empty => start waiting for a new capture notification
+ // event
+ _UnLock();
+ break;
+ }
+
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ // Treat all of the data in the packet as silence and ignore the
+ // actual data values.
+ RTC_LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT";
+ pData = NULL;
+ }
+
+ RTC_DCHECK_NE(framesAvailable, 0);
+
+ if (pData) {
+ CopyMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], pData,
+ framesAvailable * _recAudioFrameSize);
+ } else {
+ ZeroMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize],
+ framesAvailable * _recAudioFrameSize);
+ }
+ RTC_DCHECK_GE(syncBufferSize, (syncBufIndex * _recAudioFrameSize) +
+ framesAvailable * _recAudioFrameSize);
+
+ // Release the capture buffer
+ //
+ hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
+ EXIT_ON_ERROR(hr);
+
+ _readSamples += framesAvailable;
+ syncBufIndex += framesAvailable;
+
+ QueryPerformanceCounter(&t1);
+
+ // Get the current recording and playout delay.
+ uint32_t sndCardRecDelay = (uint32_t)(
+ ((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime) / 10000) +
+ (10 * syncBufIndex) / _recBlockSize - 10);
+ uint32_t sndCardPlayDelay = static_cast<uint32_t>(_sndCardPlayDelay);
+
+ while (syncBufIndex >= _recBlockSize) {
+ if (_ptrAudioBuffer) {
+ _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer,
+ _recBlockSize);
+ _ptrAudioBuffer->SetVQEData(sndCardPlayDelay, sndCardRecDelay);
+
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+ _UnLock(); // release lock while making the callback
+ _ptrAudioBuffer->DeliverRecordedData();
+ _Lock(); // restore the lock
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period
+ if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR) << "input state has been modified during"
+ " unlocked period";
+ goto Exit;
+ }
+ }
+
+ // store remaining data which was not able to deliver as 10ms segment
+ MoveMemory(&syncBuffer[0],
+ &syncBuffer[_recBlockSize * _recAudioFrameSize],
+ (syncBufIndex - _recBlockSize) * _recAudioFrameSize);
+ syncBufIndex -= _recBlockSize;
+ sndCardRecDelay -= 10;
+ }
+ } else {
+ // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the
+ // audio samples must wait for the next processing pass. The client
+ // might benefit from keeping a count of the failed GetBuffer calls. If
+ // GetBuffer returns this error repeatedly, the client can start a new
+ // processing loop after shutting down the current client by calling
+ // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio
+ // client.
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned"
+ " AUDCLNT_E_BUFFER_ERROR, hr = 0x"
+ << rtc::ToHex(hr);
+ goto Exit;
+ }
+
+ _UnLock();
+ }
+ }
+
+ // ---------------------------- THREAD LOOP ---------------------------- <<
+
+ if (_ptrClientIn) {
+ hr = _ptrClientIn->Stop();
+ }
+
+Exit:
+ if (FAILED(hr)) {
+ _ptrClientIn->Stop();
+ _UnLock();
+ _TraceCOMError(hr);
+ }
+
+ RevertCaptureThreadPriority();
+
+ _Lock();
+
+ if (keepRecording) {
+ if (_ptrClientIn != NULL) {
+ hr = _ptrClientIn->Stop();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ hr = _ptrClientIn->Reset();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ }
+
+ RTC_LOG(LS_ERROR)
+ << "Recording error: capturing thread has ended pre-maturely";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly";
+ }
+
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+
+ _UnLock();
+
+ if (syncBuffer) {
+ delete[] syncBuffer;
+ }
+
+ return (DWORD)hr;
+}
+
+int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable) {
+ if (_recIsInitialized) {
+ RTC_LOG(LS_ERROR)
+ << "Attempt to set Windows AEC with recording already initialized";
+ return -1;
+ }
+
+ if (_dmo == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "Built-in AEC DMO was not initialized properly at create time";
+ return -1;
+ }
+
+ _builtInAecEnabled = enable;
+ return 0;
+}
+
+void AudioDeviceWindowsCore::_Lock() RTC_NO_THREAD_SAFETY_ANALYSIS {
+ mutex_.Lock();
+}
+
+void AudioDeviceWindowsCore::_UnLock() RTC_NO_THREAD_SAFETY_ANALYSIS {
+ mutex_.Unlock();
+}
+
+int AudioDeviceWindowsCore::SetDMOProperties() {
+ HRESULT hr = S_OK;
+ RTC_DCHECK(_dmo);
+
+ rtc::scoped_refptr<IPropertyStore> ps;
+ {
+ IPropertyStore* ptrPS = NULL;
+ hr = _dmo->QueryInterface(IID_IPropertyStore,
+ reinterpret_cast<void**>(&ptrPS));
+ if (FAILED(hr) || ptrPS == NULL) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ ps = ptrPS;
+ SAFE_RELEASE(ptrPS);
+ }
+
+ // Set the AEC system mode.
+ // SINGLE_CHANNEL_AEC - AEC processing only.
+ if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_SYSTEM_MODE,
+ SINGLE_CHANNEL_AEC)) {
+ return -1;
+ }
+
+ // Set the AEC source mode.
+ // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
+ if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
+ VARIANT_TRUE) == -1) {
+ return -1;
+ }
+
+ // Enable the feature mode.
+ // This lets us override all the default processing settings below.
+ if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) ==
+ -1) {
+ return -1;
+ }
+
+ // Disable analog AGC (default enabled).
+ if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
+ VARIANT_FALSE) == -1) {
+ return -1;
+ }
+
+ // Disable noise suppression (default enabled).
+ // 0 - Disabled, 1 - Enabled
+ if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) {
+ return -1;
+ }
+
+ // Relevant parameters to leave at default settings:
+ // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
+ // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
+ // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
+ // TODO(andrew): investigate decresing the length to 128 ms.
+ // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
+ // 0 is automatic; defaults to 160 samples (or 10 ms frames at the
+ // selected 16 kHz) as long as mic array processing is disabled.
+ // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
+ // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
+
+ // Set the devices selected by VoE. If using a default device, we need to
+ // search for the device index.
+ int inDevIndex = _inputDeviceIndex;
+ int outDevIndex = _outputDeviceIndex;
+ if (!_usingInputDeviceIndex) {
+ ERole role = eCommunications;
+ if (_inputDevice == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ }
+
+ if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1) {
+ return -1;
+ }
+ }
+
+ if (!_usingOutputDeviceIndex) {
+ ERole role = eCommunications;
+ if (_outputDevice == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ }
+
+ if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1) {
+ return -1;
+ }
+ }
+
+ DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
+ static_cast<uint32_t>(0x0000ffff & inDevIndex);
+ RTC_LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
+ << ", render device index: " << outDevIndex;
+ if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) ==
+ -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
+ REFPROPERTYKEY key,
+ VARIANT_BOOL value) {
+ PROPVARIANT pv;
+ PropVariantInit(&pv);
+ pv.vt = VT_BOOL;
+ pv.boolVal = value;
+ HRESULT hr = ptrPS->SetValue(key, pv);
+ PropVariantClear(&pv);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ return 0;
+}
+
+int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
+ REFPROPERTYKEY key,
+ LONG value) {
+ PROPVARIANT pv;
+ PropVariantInit(&pv);
+ pv.vt = VT_I4;
+ pv.lVal = value;
+ HRESULT hr = ptrPS->SetValue(key, pv);
+ PropVariantClear(&pv);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _RefreshDeviceList
+//
+// Creates a new list of endpoint rendering or capture devices after
+// deleting any previously created (and possibly out-of-date) list of
+// such devices.
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDeviceCollection* pCollection = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+ RTC_DCHECK(_ptrEnumerator);
+
+ // Create a fresh list of devices using the specified direction
+ hr = _ptrEnumerator->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE,
+ &pCollection);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pCollection);
+ return -1;
+ }
+
+ if (dir == eRender) {
+ SAFE_RELEASE(_ptrRenderCollection);
+ _ptrRenderCollection = pCollection;
+ } else {
+ SAFE_RELEASE(_ptrCaptureCollection);
+ _ptrCaptureCollection = pCollection;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _DeviceListCount
+//
+// Gets a count of the endpoint rendering or capture devices in the
+// current list of such devices.
+// ----------------------------------------------------------------------------
+
+int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ UINT count = 0;
+
+ RTC_DCHECK(eRender == dir || eCapture == dir);
+
+ if (eRender == dir && NULL != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->GetCount(&count);
+ } else if (NULL != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->GetCount(&count);
+ }
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ return static_cast<int16_t>(count);
+}
+
+// ----------------------------------------------------------------------------
+// _GetListDeviceName
+//
+// Gets the friendly name of an endpoint rendering or capture device
+// from the current list of such devices. The caller uses an index
+// into the list to identify the device.
+//
+// Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
+// in _RefreshDeviceList().
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+
+ if (eRender == dir && NULL != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->Item(index, &pDevice);
+ } else if (NULL != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->Item(index, &pDevice);
+ }
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDefaultDeviceName
+//
+// Gets the friendly name of an endpoint rendering or capture device
+// given a specified device role.
+//
+// Uses: _ptrEnumerator
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+ RTC_DCHECK(role == eConsole || role == eCommunications);
+ RTC_DCHECK(_ptrEnumerator);
+
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+// ----------------------------------------------------------------------------
+// _GetListDeviceID
+//
+// Gets the unique ID string of an endpoint rendering or capture device
+// from the current list of such devices. The caller uses an index
+// into the list to identify the device.
+//
+// Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
+// in _RefreshDeviceList().
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+
+ if (eRender == dir && NULL != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->Item(index, &pDevice);
+ } else if (NULL != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->Item(index, &pDevice);
+ }
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDefaultDeviceID
+//
+// Gets the uniqe device ID of an endpoint rendering or capture device
+// given a specified device role.
+//
+// Uses: _ptrEnumerator
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+ RTC_DCHECK(role == eConsole || role == eCommunications);
+ RTC_DCHECK(_ptrEnumerator);
+
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
+ ERole role,
+ int* index) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ WCHAR szDefaultDeviceID[MAX_PATH] = {0};
+ WCHAR szDeviceID[MAX_PATH] = {0};
+
+ const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]);
+ RTC_DCHECK_EQ(kDeviceIDLength,
+ sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
+
+ if (_GetDefaultDeviceID(dir, role, szDefaultDeviceID, kDeviceIDLength) ==
+ -1) {
+ return -1;
+ }
+
+ IMMDeviceCollection* collection = _ptrCaptureCollection;
+ if (dir == eRender) {
+ collection = _ptrRenderCollection;
+ }
+
+ if (!collection) {
+ RTC_LOG(LS_ERROR) << "Device collection not valid";
+ return -1;
+ }
+
+ UINT count = 0;
+ hr = collection->GetCount(&count);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ *index = -1;
+ for (UINT i = 0; i < count; i++) {
+ memset(szDeviceID, 0, sizeof(szDeviceID));
+ rtc::scoped_refptr<IMMDevice> device;
+ {
+ IMMDevice* ptrDevice = NULL;
+ hr = collection->Item(i, &ptrDevice);
+ if (FAILED(hr) || ptrDevice == NULL) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ device = ptrDevice;
+ SAFE_RELEASE(ptrDevice);
+ }
+
+ if (_GetDeviceID(device.get(), szDeviceID, kDeviceIDLength) == -1) {
+ return -1;
+ }
+
+ if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0) {
+ // Found a match.
+ *index = i;
+ break;
+ }
+ }
+
+ if (*index == -1) {
+ RTC_LOG(LS_ERROR) << "Unable to find collection index for default device";
+ return -1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDeviceName
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
+ LPWSTR pszBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ static const WCHAR szDefault[] = L"<Device not available>";
+
+ HRESULT hr = E_FAIL;
+ IPropertyStore* pProps = NULL;
+ PROPVARIANT varName;
+
+ RTC_DCHECK(pszBuffer);
+ RTC_DCHECK_GT(bufferLen, 0);
+
+ if (pDevice != NULL) {
+ hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x"
+ << rtc::ToHex(hr);
+ }
+ }
+
+ // Initialize container for property value.
+ PropVariantInit(&varName);
+
+ if (SUCCEEDED(hr)) {
+ // Get the endpoint device's friendly-name property.
+ hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x"
+ << rtc::ToHex(hr);
+ }
+ }
+
+ if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt)) {
+ hr = E_FAIL;
+ RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value,"
+ " hr = 0x"
+ << rtc::ToHex(hr);
+ }
+
+ if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt)) {
+ // The returned value is not a wide null terminated string.
+ hr = E_UNEXPECTED;
+ RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected"
+ " type, hr = 0x"
+ << rtc::ToHex(hr);
+ }
+
+ if (SUCCEEDED(hr) && (varName.pwszVal != NULL)) {
+ // Copy the valid device name to the provided ouput buffer.
+ wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
+ } else {
+ // Failed to find the device name.
+ wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+ }
+
+ PropVariantClear(&varName);
+ SAFE_RELEASE(pProps);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDeviceID
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice,
+ LPWSTR pszBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ static const WCHAR szDefault[] = L"<Device not available>";
+
+ HRESULT hr = E_FAIL;
+ LPWSTR pwszID = NULL;
+
+ RTC_DCHECK(pszBuffer);
+ RTC_DCHECK_GT(bufferLen, 0);
+
+ if (pDevice != NULL) {
+ hr = pDevice->GetId(&pwszID);
+ }
+
+ if (hr == S_OK) {
+ // Found the device ID.
+ wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
+ } else {
+ // Failed to find the device ID.
+ wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+ }
+
+ CoTaskMemFree(pwszID);
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDefaultDevice
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir,
+ ERole role,
+ IMMDevice** ppDevice) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, ppDevice);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetListDevice
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir,
+ int index,
+ IMMDevice** ppDevice) {
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ IMMDeviceCollection* pCollection = NULL;
+
+ hr = _ptrEnumerator->EnumAudioEndpoints(
+ dir,
+ DEVICE_STATE_ACTIVE, // only active endpoints are OK
+ &pCollection);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pCollection);
+ return -1;
+ }
+
+ hr = pCollection->Item(index, ppDevice);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pCollection);
+ return -1;
+ }
+
+ SAFE_RELEASE(pCollection);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _EnumerateEndpointDevicesAll
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(
+ EDataFlow dataFlow) const {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ HRESULT hr = S_OK;
+ IMMDeviceCollection* pCollection = NULL;
+ IMMDevice* pEndpoint = NULL;
+ IPropertyStore* pProps = NULL;
+ IAudioEndpointVolume* pEndpointVolume = NULL;
+ LPWSTR pwszID = NULL;
+
+ // Generate a collection of audio endpoint devices in the system.
+ // Get states for *all* endpoint devices.
+ // Output: IMMDeviceCollection interface.
+ hr = _ptrEnumerator->EnumAudioEndpoints(
+ dataFlow, // data-flow direction (input parameter)
+ DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
+ &pCollection); // release interface when done
+
+ EXIT_ON_ERROR(hr);
+
+ // use the IMMDeviceCollection interface...
+
+ UINT count = 0;
+
+ // Retrieve a count of the devices in the device collection.
+ hr = pCollection->GetCount(&count);
+ EXIT_ON_ERROR(hr);
+ if (dataFlow == eRender)
+ RTC_LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): "
+ << count;
+ else if (dataFlow == eCapture)
+ RTC_LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): "
+ << count;
+
+ if (count == 0) {
+ return 0;
+ }
+
+ // Each loop prints the name of an endpoint device.
+ for (ULONG i = 0; i < count; i++) {
+ RTC_LOG(LS_VERBOSE) << "Endpoint " << i << ":";
+
+ // Get pointer to endpoint number i.
+ // Output: IMMDevice interface.
+ hr = pCollection->Item(i, &pEndpoint);
+ CONTINUE_ON_ERROR(hr);
+
+ // use the IMMDevice interface of the specified endpoint device...
+
+ // Get the endpoint ID string (uniquely identifies the device among all
+ // audio endpoint devices)
+ hr = pEndpoint->GetId(&pwszID);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "ID string : " << pwszID;
+
+ // Retrieve an interface to the device's property store.
+ // Output: IPropertyStore interface.
+ hr = pEndpoint->OpenPropertyStore(STGM_READ, &pProps);
+ CONTINUE_ON_ERROR(hr);
+
+ // use the IPropertyStore interface...
+
+ PROPVARIANT varName;
+ // Initialize container for property value.
+ PropVariantInit(&varName);
+
+ // Get the endpoint's friendly-name property.
+ // Example: "Speakers (Realtek High Definition Audio)"
+ hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\"";
+
+ // Get the endpoint's current device state
+ DWORD dwState;
+ hr = pEndpoint->GetState(&dwState);
+ CONTINUE_ON_ERROR(hr);
+ if (dwState & DEVICE_STATE_ACTIVE)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : *ACTIVE*";
+ if (dwState & DEVICE_STATE_DISABLED)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : DISABLED";
+ if (dwState & DEVICE_STATE_NOTPRESENT)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : NOTPRESENT";
+ if (dwState & DEVICE_STATE_UNPLUGGED)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : UNPLUGGED";
+
+ // Check the hardware volume capabilities.
+ DWORD dwHwSupportMask = 0;
+ hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ (void**)&pEndpointVolume);
+ CONTINUE_ON_ERROR(hr);
+ hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
+ CONTINUE_ON_ERROR(hr);
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
+ // The audio endpoint device supports a hardware volume control
+ RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask)
+ << ") : HARDWARE_SUPPORT_VOLUME";
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
+ // The audio endpoint device supports a hardware mute control
+ RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask)
+ << ") : HARDWARE_SUPPORT_MUTE";
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
+ // The audio endpoint device supports a hardware peak meter
+ RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask)
+ << ") : HARDWARE_SUPPORT_METER";
+
+ // Check the channel count (#channels in the audio stream that enters or
+ // leaves the audio endpoint device)
+ UINT nChannelCount(0);
+ hr = pEndpointVolume->GetChannelCount(&nChannelCount);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "#channels : " << nChannelCount;
+
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME) {
+ // Get the volume range.
+ float fLevelMinDB(0.0);
+ float fLevelMaxDB(0.0);
+ float fVolumeIncrementDB(0.0);
+ hr = pEndpointVolume->GetVolumeRange(&fLevelMinDB, &fLevelMaxDB,
+ &fVolumeIncrementDB);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), "
+ << fLevelMaxDB << " (max), " << fVolumeIncrementDB
+ << " (inc) [dB]";
+
+ // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is
+ // divided into n uniform intervals of size vinc = fVolumeIncrementDB,
+ // where n = (vmax ?vmin) / vinc. The values vmin, vmax, and vinc are
+ // measured in decibels. The client can set the volume level to one of n +
+ // 1 discrete values in the range from vmin to vmax.
+ int n = (int)((fLevelMaxDB - fLevelMinDB) / fVolumeIncrementDB);
+ RTC_LOG(LS_VERBOSE) << "#intervals : " << n;
+
+ // Get information about the current step in the volume range.
+ // This method represents the volume level of the audio stream that enters
+ // or leaves the audio endpoint device as an index or "step" in a range of
+ // discrete volume levels. Output value nStepCount is the number of steps
+ // in the range. Output value nStep is the step index of the current
+ // volume level. If the number of steps is n = nStepCount, then step index
+ // nStep can assume values from 0 (minimum volume) to n ?1 (maximum
+ // volume).
+ UINT nStep(0);
+ UINT nStepCount(0);
+ hr = pEndpointVolume->GetVolumeStepInfo(&nStep, &nStepCount);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), "
+ << nStepCount << " (nStepCount)";
+ }
+ Next:
+ if (FAILED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "Error when logging device information";
+ }
+ CoTaskMemFree(pwszID);
+ pwszID = NULL;
+ PropVariantClear(&varName);
+ SAFE_RELEASE(pProps);
+ SAFE_RELEASE(pEndpoint);
+ SAFE_RELEASE(pEndpointVolume);
+ }
+ SAFE_RELEASE(pCollection);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ CoTaskMemFree(pwszID);
+ pwszID = NULL;
+ SAFE_RELEASE(pCollection);
+ SAFE_RELEASE(pEndpoint);
+ SAFE_RELEASE(pEndpointVolume);
+ SAFE_RELEASE(pProps);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// _TraceCOMError
+// ----------------------------------------------------------------------------
+
+void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const {
+ wchar_t buf[MAXERRORLENGTH];
+ wchar_t errorText[MAXERRORLENGTH];
+
+ const DWORD dwFlags =
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+
+ // Gets the system's human readable message string for this HRESULT.
+ // All error message in English by default.
+ DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+ MAXERRORLENGTH, NULL);
+
+ RTC_DCHECK_LE(messageLength, MAXERRORLENGTH);
+
+ // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+ for (; messageLength && ::isspace(errorText[messageLength - 1]);
+ --messageLength) {
+ errorText[messageLength - 1] = '\0';
+ }
+
+ RTC_LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")";
+ StringCchPrintfW(buf, MAXERRORLENGTH, L"Error details: ");
+ StringCchCatW(buf, MAXERRORLENGTH, errorText);
+ RTC_LOG(LS_ERROR) << rtc::ToUtf8(buf);
+}
+
+bool AudioDeviceWindowsCore::KeyPressed() const {
+ int key_down = 0;
+ for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
+ short res = GetAsyncKeyState(key);
+ key_down |= res & 0x1; // Get the LSB
+ }
+ return (key_down > 0);
+}
+} // namespace webrtc
+
+#endif // WEBRTC_WINDOWS_CORE_AUDIO_BUILD
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h
new file mode 100644
index 0000000000..7e7ef21157
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_
+
+#if (_MSC_VER >= 1400) // only include for VS 2005 and higher
+
+#include "rtc_base/win32.h"
+
+#include "modules/audio_device/audio_device_generic.h"
+
+#include <wmcodecdsp.h> // CLSID_CWMAudioAEC
+ // (must be before audioclient.h)
+#include <audioclient.h> // WASAPI
+#include <audiopolicy.h>
+#include <avrt.h> // Avrt
+#include <endpointvolume.h>
+#include <mediaobj.h> // IMediaObject
+#include <mmdeviceapi.h> // MMDevice
+
+#include "api/scoped_refptr.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+
+// Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority
+#pragma comment(lib, "avrt.lib")
+// AVRT function pointers
+typedef BOOL(WINAPI* PAvRevertMmThreadCharacteristics)(HANDLE);
+typedef HANDLE(WINAPI* PAvSetMmThreadCharacteristicsA)(LPCSTR, LPDWORD);
+typedef BOOL(WINAPI* PAvSetMmThreadPriority)(HANDLE, AVRT_PRIORITY);
+
+namespace webrtc {
+
+const float MAX_CORE_SPEAKER_VOLUME = 255.0f;
+const float MIN_CORE_SPEAKER_VOLUME = 0.0f;
+const float MAX_CORE_MICROPHONE_VOLUME = 255.0f;
+const float MIN_CORE_MICROPHONE_VOLUME = 0.0f;
+const uint16_t CORE_SPEAKER_VOLUME_STEP_SIZE = 1;
+const uint16_t CORE_MICROPHONE_VOLUME_STEP_SIZE = 1;
+
+class AudioDeviceWindowsCore : public AudioDeviceGeneric {
+ public:
+ AudioDeviceWindowsCore();
+ ~AudioDeviceWindowsCore();
+
+ static bool CoreAudioIsSupported();
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const;
+
+ // Main initializaton and termination
+ virtual InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Initialized() const;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int16_t RecordingDevices() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize])
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize])
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available);
+ virtual int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool& available);
+ virtual int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool RecordingIsInitialized() const;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Playing() const;
+ virtual int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool MicrophoneIsInitialized() const;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SpeakerVolume(uint32_t& volume) const
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume)
+ RTC_LOCKS_EXCLUDED(mutex_, volume_mutex_);
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const
+ RTC_LOCKS_EXCLUDED(mutex_, volume_mutex_);
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SpeakerMute(bool& enabled) const;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool& enabled) const;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available);
+ virtual int32_t SetStereoPlayout(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
+ virtual int32_t SetStereoRecording(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StereoRecording(bool& enabled) const
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Delay information and control
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ virtual bool BuiltInAECIsAvailable() const;
+
+ virtual int32_t EnableBuiltInAEC(bool enable);
+
+ public:
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ bool KeyPressed() const;
+
+ private: // avrt function pointers
+ PAvRevertMmThreadCharacteristics _PAvRevertMmThreadCharacteristics;
+ PAvSetMmThreadCharacteristicsA _PAvSetMmThreadCharacteristicsA;
+ PAvSetMmThreadPriority _PAvSetMmThreadPriority;
+ HMODULE _avrtLibrary;
+ bool _winSupportAvrt;
+
+ private: // thread functions
+ int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int16_t PlayoutDevicesLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int16_t RecordingDevicesLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ DWORD InitCaptureThreadPriority();
+ void RevertCaptureThreadPriority();
+ static DWORD WINAPI WSAPICaptureThread(LPVOID context);
+ DWORD DoCaptureThread();
+
+ static DWORD WINAPI WSAPICaptureThreadPollDMO(LPVOID context);
+ DWORD DoCaptureThreadPollDMO() RTC_LOCKS_EXCLUDED(mutex_);
+
+ static DWORD WINAPI WSAPIRenderThread(LPVOID context);
+ DWORD DoRenderThread();
+
+ void _Lock();
+ void _UnLock();
+
+ int SetDMOProperties();
+
+ int SetBoolProperty(IPropertyStore* ptrPS,
+ REFPROPERTYKEY key,
+ VARIANT_BOOL value);
+
+ int SetVtI4Property(IPropertyStore* ptrPS, REFPROPERTYKEY key, LONG value);
+
+ int32_t _EnumerateEndpointDevicesAll(EDataFlow dataFlow) const;
+ void _TraceCOMError(HRESULT hr) const;
+
+ int32_t _RefreshDeviceList(EDataFlow dir);
+ int16_t _DeviceListCount(EDataFlow dir);
+ int32_t _GetDefaultDeviceName(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetListDeviceName(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetDeviceName(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen);
+ int32_t _GetListDeviceID(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetDefaultDeviceID(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetDefaultDeviceIndex(EDataFlow dir, ERole role, int* index);
+ int32_t _GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen);
+ int32_t _GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice);
+ int32_t _GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice);
+
+ int32_t InitRecordingDMO();
+
+ ScopedCOMInitializer _comInit;
+ AudioDeviceBuffer* _ptrAudioBuffer;
+ mutable Mutex mutex_;
+ mutable Mutex volume_mutex_ RTC_ACQUIRED_AFTER(mutex_);
+
+ IMMDeviceEnumerator* _ptrEnumerator;
+ IMMDeviceCollection* _ptrRenderCollection;
+ IMMDeviceCollection* _ptrCaptureCollection;
+ IMMDevice* _ptrDeviceOut;
+ IMMDevice* _ptrDeviceIn;
+
+ IAudioClient* _ptrClientOut;
+ IAudioClient* _ptrClientIn;
+ IAudioRenderClient* _ptrRenderClient;
+ IAudioCaptureClient* _ptrCaptureClient;
+ IAudioEndpointVolume* _ptrCaptureVolume;
+ ISimpleAudioVolume* _ptrRenderSimpleVolume;
+
+ // DirectX Media Object (DMO) for the built-in AEC.
+ rtc::scoped_refptr<IMediaObject> _dmo;
+ rtc::scoped_refptr<IMediaBuffer> _mediaBuffer;
+ bool _builtInAecEnabled;
+
+ HANDLE _hRenderSamplesReadyEvent;
+ HANDLE _hPlayThread;
+ HANDLE _hRenderStartedEvent;
+ HANDLE _hShutdownRenderEvent;
+
+ HANDLE _hCaptureSamplesReadyEvent;
+ HANDLE _hRecThread;
+ HANDLE _hCaptureStartedEvent;
+ HANDLE _hShutdownCaptureEvent;
+
+ HANDLE _hMmTask;
+
+ UINT _playAudioFrameSize;
+ uint32_t _playSampleRate;
+ uint32_t _devicePlaySampleRate;
+ uint32_t _playBlockSize;
+ uint32_t _devicePlayBlockSize;
+ uint32_t _playChannels;
+ uint32_t _sndCardPlayDelay;
+ UINT64 _writtenSamples;
+ UINT64 _readSamples;
+
+ UINT _recAudioFrameSize;
+ uint32_t _recSampleRate;
+ uint32_t _recBlockSize;
+ uint32_t _recChannels;
+
+ uint16_t _recChannelsPrioList[3];
+ uint16_t _playChannelsPrioList[2];
+
+ LARGE_INTEGER _perfCounterFreq;
+ double _perfCounterFactor;
+
+ private:
+ bool _initialized;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+ bool _speakerIsInitialized;
+ bool _microphoneIsInitialized;
+
+ bool _usingInputDeviceIndex;
+ bool _usingOutputDeviceIndex;
+ AudioDeviceModule::WindowsDeviceType _inputDevice;
+ AudioDeviceModule::WindowsDeviceType _outputDevice;
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+};
+
+#endif // #if (_MSC_VER >= 1400)
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc
new file mode 100644
index 0000000000..a36c40735e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/audio_device_module_win.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/make_ref_counted.h"
+#include "api/sequence_checker.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+#define RETURN_IF_OUTPUT_RESTARTS(...) \
+ do { \
+ if (output_->Restarting()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_INPUT_RESTARTS(...) \
+ do { \
+ if (input_->Restarting()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_OUTPUT_IS_INITIALIZED(...) \
+ do { \
+ if (output_->PlayoutIsInitialized()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_INPUT_IS_INITIALIZED(...) \
+ do { \
+ if (input_->RecordingIsInitialized()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_OUTPUT_IS_ACTIVE(...) \
+ do { \
+ if (output_->Playing()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_INPUT_IS_ACTIVE(...) \
+ do { \
+ if (input_->Recording()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+// This class combines a generic instance of an AudioInput and a generic
+// instance of an AudioOutput to create an AudioDeviceModule. This is mostly
+// done by delegating to the audio input/output with some glue code. This class
+// also directly implements some of the AudioDeviceModule methods with dummy
+// implementations.
+//
+// An instance must be created, destroyed and used on one and the same thread,
+// i.e., all public methods must also be called on the same thread. A thread
+// checker will RTC_DCHECK if any method is called on an invalid thread.
+// TODO(henrika): is thread checking needed in AudioInput and AudioOutput?
+class WindowsAudioDeviceModule : public AudioDeviceModuleForTest {
+ public:
+ enum class InitStatus {
+ OK = 0,
+ PLAYOUT_ERROR = 1,
+ RECORDING_ERROR = 2,
+ OTHER_ERROR = 3,
+ NUM_STATUSES = 4
+ };
+
+ WindowsAudioDeviceModule(std::unique_ptr<AudioInput> audio_input,
+ std::unique_ptr<AudioOutput> audio_output,
+ TaskQueueFactory* task_queue_factory)
+ : input_(std::move(audio_input)),
+ output_(std::move(audio_output)),
+ task_queue_factory_(task_queue_factory) {
+ RTC_CHECK(input_);
+ RTC_CHECK(output_);
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ }
+
+ ~WindowsAudioDeviceModule() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ Terminate();
+ }
+
+ WindowsAudioDeviceModule(const WindowsAudioDeviceModule&) = delete;
+ WindowsAudioDeviceModule& operator=(const WindowsAudioDeviceModule&) = delete;
+
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer* audioLayer) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ // TODO(henrika): it might be possible to remove this unique signature.
+ *audioLayer = AudioDeviceModule::kWindowsCoreAudio2;
+ return 0;
+ }
+
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(audio_device_buffer_);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return audio_device_buffer_->RegisterAudioCallback(audioCallback);
+ }
+
+ int32_t Init() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_INPUT_RESTARTS(0);
+ if (initialized_) {
+ return 0;
+ }
+ audio_device_buffer_ =
+ std::make_unique<AudioDeviceBuffer>(task_queue_factory_);
+ AttachAudioBuffer();
+ InitStatus status;
+ if (output_->Init() != 0) {
+ status = InitStatus::PLAYOUT_ERROR;
+ } else if (input_->Init() != 0) {
+ output_->Terminate();
+ status = InitStatus::RECORDING_ERROR;
+ } else {
+ initialized_ = true;
+ status = InitStatus::OK;
+ }
+ if (status != InitStatus::OK) {
+ RTC_LOG(LS_ERROR) << "Audio device initialization failed";
+ return -1;
+ }
+ return 0;
+ }
+
+ int32_t Terminate() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_INPUT_RESTARTS(0);
+ if (!initialized_)
+ return 0;
+ int32_t err = input_->Terminate();
+ err |= output_->Terminate();
+ initialized_ = false;
+ RTC_DCHECK_EQ(err, 0);
+ return err;
+ }
+
+ bool Initialized() const override {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return initialized_;
+ }
+
+ int16_t PlayoutDevices() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->NumDevices();
+ }
+
+ int16_t RecordingDevices() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ return input_->NumDevices();
+ }
+
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ std::string name_str, guid_str;
+ int ret = -1;
+ if (guid != nullptr) {
+ ret = output_->DeviceName(index, &name_str, &guid_str);
+ rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str());
+ } else {
+ ret = output_->DeviceName(index, &name_str, nullptr);
+ }
+ rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str());
+ return ret;
+ }
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ std::string name_str, guid_str;
+ int ret = -1;
+ if (guid != nullptr) {
+ ret = input_->DeviceName(index, &name_str, &guid_str);
+ rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str());
+ } else {
+ ret = input_->DeviceName(index, &name_str, nullptr);
+ }
+ rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str());
+ return ret;
+ }
+
+ int32_t SetPlayoutDevice(uint16_t index) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->SetDevice(index);
+ }
+
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->SetDevice(device);
+ }
+ int32_t SetRecordingDevice(uint16_t index) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->SetDevice(index);
+ }
+
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->SetDevice(device);
+ }
+
+ int32_t PlayoutIsAvailable(bool* available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t InitPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_OUTPUT_IS_INITIALIZED(0);
+ return output_->InitPlayout();
+ }
+
+ bool PlayoutIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(true);
+ return output_->PlayoutIsInitialized();
+ }
+
+ int32_t RecordingIsAvailable(bool* available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t InitRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ RETURN_IF_INPUT_IS_INITIALIZED(0);
+ return input_->InitRecording();
+ }
+
+ bool RecordingIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(true);
+ return input_->RecordingIsInitialized();
+ }
+
+ int32_t StartPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_OUTPUT_IS_ACTIVE(0);
+ return output_->StartPlayout();
+ }
+
+ int32_t StopPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(-1);
+ return output_->StopPlayout();
+ }
+
+ bool Playing() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(true);
+ return output_->Playing();
+ }
+
+ int32_t StartRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ RETURN_IF_INPUT_IS_ACTIVE(0);
+ return input_->StartRecording();
+ }
+
+ int32_t StopRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(-1);
+ return input_->StopRecording();
+ }
+
+ bool Recording() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RETURN_IF_INPUT_RESTARTS(true);
+ return input_->Recording();
+ }
+
+ int32_t InitSpeaker() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_ ? 0 : -1;
+ }
+
+ bool SpeakerIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_;
+ }
+
+ int32_t InitMicrophone() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_ ? 0 : -1;
+ }
+
+ bool MicrophoneIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_;
+ }
+
+ int32_t SpeakerVolumeIsAvailable(bool* available) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = false;
+ return 0;
+ }
+
+ int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
+ int32_t SpeakerVolume(uint32_t* volume) const override { return 0; }
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; }
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; }
+
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = false;
+ return 0;
+ }
+
+ int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
+ int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; }
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; }
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; }
+
+ int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; }
+ int32_t SetSpeakerMute(bool enable) override { return 0; }
+ int32_t SpeakerMute(bool* enabled) const override { return 0; }
+
+ int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; }
+ int32_t SetMicrophoneMute(bool enable) override { return 0; }
+ int32_t MicrophoneMute(bool* enabled) const override { return 0; }
+
+ int32_t StereoPlayoutIsAvailable(bool* available) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t SetStereoPlayout(bool enable) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+ }
+
+ int32_t StereoPlayout(bool* enabled) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *enabled = true;
+ return 0;
+ }
+
+ int32_t StereoRecordingIsAvailable(bool* available) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t SetStereoRecording(bool enable) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+ }
+
+ int32_t StereoRecording(bool* enabled) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *enabled = true;
+ return 0;
+ }
+
+ int32_t PlayoutDelay(uint16_t* delayMS) const override { return 0; }
+
+ bool BuiltInAECIsAvailable() const override { return false; }
+ bool BuiltInAGCIsAvailable() const override { return false; }
+ bool BuiltInNSIsAvailable() const override { return false; }
+
+ int32_t EnableBuiltInAEC(bool enable) override { return 0; }
+ int32_t EnableBuiltInAGC(bool enable) override { return 0; }
+ int32_t EnableBuiltInNS(bool enable) override { return 0; }
+
+ int32_t AttachAudioBuffer() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ output_->AttachAudioBuffer(audio_device_buffer_.get());
+ input_->AttachAudioBuffer(audio_device_buffer_.get());
+ return 0;
+ }
+
+ int RestartPlayoutInternally() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->RestartPlayout();
+ }
+
+ int RestartRecordingInternally() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->RestartRecording();
+ }
+
+ int SetPlayoutSampleRate(uint32_t sample_rate) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return output_->SetSampleRate(sample_rate);
+ }
+
+ int SetRecordingSampleRate(uint32_t sample_rate) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->SetSampleRate(sample_rate);
+ }
+
+ private:
+ // Ensures that the class is used on the same thread as it is constructed
+ // and destroyed on.
+ SequenceChecker thread_checker_;
+
+ // Implements the AudioInput interface and deals with audio capturing parts.
+ const std::unique_ptr<AudioInput> input_;
+
+ // Implements the AudioOutput interface and deals with audio rendering parts.
+ const std::unique_ptr<AudioOutput> output_;
+
+ TaskQueueFactory* const task_queue_factory_;
+
+ // The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio
+ // to/from the WebRTC layer. Created and owned by this object. Used by
+ // both `input_` and `output_` but they use orthogonal parts of the ADB.
+ std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
+
+ // Set to true after a successful call to Init(). Cleared by Terminate().
+ bool initialized_ = false;
+};
+
+} // namespace
+
+rtc::scoped_refptr<AudioDeviceModuleForTest>
+CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
+ std::unique_ptr<AudioInput> audio_input,
+ std::unique_ptr<AudioOutput> audio_output,
+ TaskQueueFactory* task_queue_factory) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return rtc::make_ref_counted<WindowsAudioDeviceModule>(
+ std::move(audio_input), std::move(audio_output), task_queue_factory);
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h
new file mode 100644
index 0000000000..1ed0b25620
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
+
+#include <memory>
+#include <string>
+
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+
+namespace webrtc_win {
+
+// This interface represents the main input-related parts of the complete
+// AudioDeviceModule interface.
+class AudioInput {
+ public:
+ virtual ~AudioInput() {}
+
+ virtual int Init() = 0;
+ virtual int Terminate() = 0;
+ virtual int NumDevices() const = 0;
+ virtual int SetDevice(int index) = 0;
+ virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0;
+ virtual int DeviceName(int index, std::string* name, std::string* guid) = 0;
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0;
+ virtual bool RecordingIsInitialized() const = 0;
+ virtual int InitRecording() = 0;
+ virtual int StartRecording() = 0;
+ virtual int StopRecording() = 0;
+ virtual bool Recording() = 0;
+ virtual int VolumeIsAvailable(bool* available) = 0;
+ virtual int RestartRecording() = 0;
+ virtual bool Restarting() const = 0;
+ virtual int SetSampleRate(uint32_t sample_rate) = 0;
+};
+
+// This interface represents the main output-related parts of the complete
+// AudioDeviceModule interface.
+class AudioOutput {
+ public:
+ virtual ~AudioOutput() {}
+
+ virtual int Init() = 0;
+ virtual int Terminate() = 0;
+ virtual int NumDevices() const = 0;
+ virtual int SetDevice(int index) = 0;
+ virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0;
+ virtual int DeviceName(int index, std::string* name, std::string* guid) = 0;
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0;
+ virtual bool PlayoutIsInitialized() const = 0;
+ virtual int InitPlayout() = 0;
+ virtual int StartPlayout() = 0;
+ virtual int StopPlayout() = 0;
+ virtual bool Playing() = 0;
+ virtual int VolumeIsAvailable(bool* available) = 0;
+ virtual int RestartPlayout() = 0;
+ virtual bool Restarting() const = 0;
+ virtual int SetSampleRate(uint32_t sample_rate) = 0;
+};
+
+// Combines an AudioInput and an AudioOutput implementation to build an
+// AudioDeviceModule. Hides most parts of the full ADM interface.
+rtc::scoped_refptr<AudioDeviceModuleForTest>
+CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
+ std::unique_ptr<AudioInput> audio_input,
+ std::unique_ptr<AudioOutput> audio_output,
+ TaskQueueFactory* task_queue_factory);
+
+} // namespace webrtc_win
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc
new file mode 100644
index 0000000000..dc8526b625
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc
@@ -0,0 +1,948 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_base_win.h"
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "rtc_base/win/windows_version.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+// Even if the device supports low latency and even if IAudioClient3 can be
+// used (requires Win10 or higher), we currently disable any attempts to
+// initialize the client for low-latency.
+// TODO(henrika): more research is needed before we can enable low-latency.
+const bool kEnableLowLatencyIfSupported = false;
+
+// Each unit of reference time is 100 nanoseconds, hence `kReftimesPerSec`
+// corresponds to one second.
+// TODO(henrika): possibly add usage in Init().
+// const REFERENCE_TIME kReferenceTimesPerSecond = 10000000;
+
+enum DefaultDeviceType {
+ kUndefined = -1,
+ kDefault = 0,
+ kDefaultCommunications = 1,
+ kDefaultDeviceTypeMaxCount = kDefaultCommunications + 1,
+};
+
+const char* DirectionToString(CoreAudioBase::Direction direction) {
+ switch (direction) {
+ case CoreAudioBase::Direction::kOutput:
+ return "Output";
+ case CoreAudioBase::Direction::kInput:
+ return "Input";
+ default:
+ return "Unkown";
+ }
+}
+
+const char* RoleToString(const ERole role) {
+ switch (role) {
+ case eConsole:
+ return "Console";
+ case eMultimedia:
+ return "Multimedia";
+ case eCommunications:
+ return "Communications";
+ default:
+ return "Unsupported";
+ }
+}
+
+std::string IndexToString(int index) {
+ std::string ss = std::to_string(index);
+ switch (index) {
+ case kDefault:
+ ss += " (Default)";
+ break;
+ case kDefaultCommunications:
+ ss += " (Communications)";
+ break;
+ default:
+ break;
+ }
+ return ss;
+}
+
+const char* SessionStateToString(AudioSessionState state) {
+ switch (state) {
+ case AudioSessionStateActive:
+ return "Active";
+ case AudioSessionStateInactive:
+ return "Inactive";
+ case AudioSessionStateExpired:
+ return "Expired";
+ default:
+ return "Invalid";
+ }
+}
+
+const char* SessionDisconnectReasonToString(
+ AudioSessionDisconnectReason reason) {
+ switch (reason) {
+ case DisconnectReasonDeviceRemoval:
+ return "DeviceRemoval";
+ case DisconnectReasonServerShutdown:
+ return "ServerShutdown";
+ case DisconnectReasonFormatChanged:
+ return "FormatChanged";
+ case DisconnectReasonSessionLogoff:
+ return "SessionLogoff";
+ case DisconnectReasonSessionDisconnected:
+ return "Disconnected";
+ case DisconnectReasonExclusiveModeOverride:
+ return "ExclusiveModeOverride";
+ default:
+ return "Invalid";
+ }
+}
+
+// Returns true if the selected audio device supports low latency, i.e, if it
+// is possible to initialize the engine using periods less than the default
+// period (10ms).
+bool IsLowLatencySupported(IAudioClient3* client3,
+ const WAVEFORMATEXTENSIBLE* format,
+ uint32_t* min_period_in_frames) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ // Get the range of periodicities supported by the engine for the specified
+ // stream format.
+ uint32_t default_period = 0;
+ uint32_t fundamental_period = 0;
+ uint32_t min_period = 0;
+ uint32_t max_period = 0;
+ if (FAILED(core_audio_utility::GetSharedModeEnginePeriod(
+ client3, format, &default_period, &fundamental_period, &min_period,
+ &max_period))) {
+ return false;
+ }
+
+ // Low latency is supported if the shortest allowed period is less than the
+ // default engine period.
+ // TODO(henrika): verify that this assumption is correct.
+ const bool low_latency = min_period < default_period;
+ RTC_LOG(LS_INFO) << "low_latency: " << low_latency;
+ *min_period_in_frames = low_latency ? min_period : 0;
+ return low_latency;
+}
+
+} // namespace
+
+CoreAudioBase::CoreAudioBase(Direction direction,
+ bool automatic_restart,
+ OnDataCallback data_callback,
+ OnErrorCallback error_callback)
+ : format_(),
+ direction_(direction),
+ automatic_restart_(automatic_restart),
+ on_data_callback_(data_callback),
+ on_error_callback_(error_callback),
+ device_index_(kUndefined),
+ is_restarting_(false) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction)
+ << "]";
+ RTC_DLOG(LS_INFO) << "Automatic restart: " << automatic_restart;
+ RTC_DLOG(LS_INFO) << "Windows version: " << rtc::rtc_win::GetVersion();
+
+ // Create the event which the audio engine will signal each time a buffer
+ // becomes ready to be processed by the client.
+ audio_samples_event_.Set(CreateEvent(nullptr, false, false, nullptr));
+ RTC_DCHECK(audio_samples_event_.IsValid());
+
+ // Event to be set in Stop() when rendering/capturing shall stop.
+ stop_event_.Set(CreateEvent(nullptr, false, false, nullptr));
+ RTC_DCHECK(stop_event_.IsValid());
+
+ // Event to be set when it has been detected that an active device has been
+ // invalidated or the stream format has changed.
+ restart_event_.Set(CreateEvent(nullptr, false, false, nullptr));
+ RTC_DCHECK(restart_event_.IsValid());
+}
+
+CoreAudioBase::~CoreAudioBase() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_EQ(ref_count_, 1);
+}
+
+EDataFlow CoreAudioBase::GetDataFlow() const {
+ return direction_ == CoreAudioBase::Direction::kOutput ? eRender : eCapture;
+}
+
+bool CoreAudioBase::IsRestarting() const {
+ return is_restarting_;
+}
+
+int64_t CoreAudioBase::TimeSinceStart() const {
+ return rtc::TimeSince(start_time_);
+}
+
+int CoreAudioBase::NumberOfActiveDevices() const {
+ return core_audio_utility::NumberOfActiveDevices(GetDataFlow());
+}
+
+int CoreAudioBase::NumberOfEnumeratedDevices() const {
+ const int num_active = NumberOfActiveDevices();
+ return num_active > 0 ? num_active + kDefaultDeviceTypeMaxCount : 0;
+}
+
+void CoreAudioBase::ReleaseCOMObjects() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ // ComPtr::Reset() sets the ComPtr to nullptr releasing any previous
+ // reference.
+ if (audio_client_) {
+ audio_client_.Reset();
+ }
+ if (audio_clock_.Get()) {
+ audio_clock_.Reset();
+ }
+ if (audio_session_control_.Get()) {
+ audio_session_control_.Reset();
+ }
+}
+
+bool CoreAudioBase::IsDefaultDevice(int index) const {
+ return index == kDefault;
+}
+
+bool CoreAudioBase::IsDefaultCommunicationsDevice(int index) const {
+ return index == kDefaultCommunications;
+}
+
+bool CoreAudioBase::IsDefaultDeviceId(absl::string_view device_id) const {
+ // Returns true if `device_id` corresponds to the id of the default
+ // device. Note that, if only one device is available (or if the user has not
+ // explicitly set a default device), `device_id` will also math
+ // IsDefaultCommunicationsDeviceId().
+ return (IsInput() &&
+ (device_id == core_audio_utility::GetDefaultInputDeviceID())) ||
+ (IsOutput() &&
+ (device_id == core_audio_utility::GetDefaultOutputDeviceID()));
+}
+
+bool CoreAudioBase::IsDefaultCommunicationsDeviceId(
+ absl::string_view device_id) const {
+ // Returns true if `device_id` corresponds to the id of the default
+ // communication device. Note that, if only one device is available (or if
+ // the user has not explicitly set a communication device), `device_id` will
+ // also math IsDefaultDeviceId().
+ return (IsInput() &&
+ (device_id ==
+ core_audio_utility::GetCommunicationsInputDeviceID())) ||
+ (IsOutput() &&
+ (device_id == core_audio_utility::GetCommunicationsOutputDeviceID()));
+}
+
+bool CoreAudioBase::IsInput() const {
+ return direction_ == CoreAudioBase::Direction::kInput;
+}
+
+bool CoreAudioBase::IsOutput() const {
+ return direction_ == CoreAudioBase::Direction::kOutput;
+}
+
+std::string CoreAudioBase::GetDeviceID(int index) const {
+ if (index >= NumberOfEnumeratedDevices()) {
+ RTC_LOG(LS_ERROR) << "Invalid device index";
+ return std::string();
+ }
+
+ std::string device_id;
+ if (IsDefaultDevice(index)) {
+ device_id = IsInput() ? core_audio_utility::GetDefaultInputDeviceID()
+ : core_audio_utility::GetDefaultOutputDeviceID();
+ } else if (IsDefaultCommunicationsDevice(index)) {
+ device_id = IsInput()
+ ? core_audio_utility::GetCommunicationsInputDeviceID()
+ : core_audio_utility::GetCommunicationsOutputDeviceID();
+ } else {
+ AudioDeviceNames device_names;
+ bool ok = IsInput()
+ ? core_audio_utility::GetInputDeviceNames(&device_names)
+ : core_audio_utility::GetOutputDeviceNames(&device_names);
+ if (ok) {
+ device_id = device_names[index].unique_id;
+ }
+ }
+ return device_id;
+}
+
+int CoreAudioBase::SetDevice(int index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]: index=" << IndexToString(index);
+ if (initialized_) {
+ return -1;
+ }
+
+ std::string device_id = GetDeviceID(index);
+ RTC_DLOG(LS_INFO) << "index=" << IndexToString(index)
+ << " => device_id: " << device_id;
+ device_index_ = index;
+ device_id_ = device_id;
+
+ return device_id_.empty() ? -1 : 0;
+}
+
+int CoreAudioBase::DeviceName(int index,
+ std::string* name,
+ std::string* guid) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]: index=" << IndexToString(index);
+ if (index > NumberOfEnumeratedDevices() - 1) {
+ RTC_LOG(LS_ERROR) << "Invalid device index";
+ return -1;
+ }
+
+ AudioDeviceNames device_names;
+ bool ok = IsInput() ? core_audio_utility::GetInputDeviceNames(&device_names)
+ : core_audio_utility::GetOutputDeviceNames(&device_names);
+ // Validate the index one extra time in-case the size of the generated list
+ // did not match NumberOfEnumeratedDevices().
+ if (!ok || static_cast<int>(device_names.size()) <= index) {
+ RTC_LOG(LS_ERROR) << "Failed to get the device name";
+ return -1;
+ }
+
+ *name = device_names[index].device_name;
+ RTC_DLOG(LS_INFO) << "name: " << *name;
+ if (guid != nullptr) {
+ *guid = device_names[index].unique_id;
+ RTC_DLOG(LS_INFO) << "guid: " << *guid;
+ }
+ return 0;
+}
+
+bool CoreAudioBase::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DCHECK_GE(device_index_, 0);
+ RTC_DCHECK(!device_id_.empty());
+ RTC_DCHECK(audio_device_buffer_);
+ RTC_DCHECK(!audio_client_);
+ RTC_DCHECK(!audio_session_control_.Get());
+
+ // Use an existing combination of `device_index_` and `device_id_` to set
+ // parameters which are required to create an audio client. It is up to the
+ // parent class to set `device_index_` and `device_id_`.
+ std::string device_id = AudioDeviceName::kDefaultDeviceId;
+ ERole role = ERole();
+ if (IsDefaultDevice(device_index_)) {
+ role = eConsole;
+ } else if (IsDefaultCommunicationsDevice(device_index_)) {
+ role = eCommunications;
+ } else {
+ device_id = device_id_;
+ }
+ RTC_LOG(LS_INFO) << "Unique device identifier: device_id=" << device_id
+ << ", role=" << RoleToString(role);
+
+ // Create an IAudioClient interface which enables us to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ ComPtr<IAudioClient> audio_client;
+ if (core_audio_utility::GetAudioClientVersion() == 3) {
+ RTC_DLOG(LS_INFO) << "Using IAudioClient3";
+ audio_client =
+ core_audio_utility::CreateClient3(device_id, GetDataFlow(), role);
+ } else if (core_audio_utility::GetAudioClientVersion() == 2) {
+ RTC_DLOG(LS_INFO) << "Using IAudioClient2";
+ audio_client =
+ core_audio_utility::CreateClient2(device_id, GetDataFlow(), role);
+ } else {
+ RTC_DLOG(LS_INFO) << "Using IAudioClient";
+ audio_client =
+ core_audio_utility::CreateClient(device_id, GetDataFlow(), role);
+ }
+ if (!audio_client) {
+ return false;
+ }
+
+ // Set extra client properties before initialization if the audio client
+ // supports it.
+ // TODO(henrika): evaluate effect(s) of making these changes. Also, perhaps
+ // these types of settings belongs to the client and not the utility parts.
+ if (core_audio_utility::GetAudioClientVersion() >= 2) {
+ if (FAILED(core_audio_utility::SetClientProperties(
+ static_cast<IAudioClient2*>(audio_client.Get())))) {
+ return false;
+ }
+ }
+
+ // Retrieve preferred audio input or output parameters for the given client
+ // and the specified client properties. Override the preferred rate if sample
+ // rate has been defined by the user. Rate conversion will be performed by
+ // the audio engine to match the client if needed.
+ AudioParameters params;
+ HRESULT res = sample_rate_ ? core_audio_utility::GetPreferredAudioParameters(
+ audio_client.Get(), &params, *sample_rate_)
+ : core_audio_utility::GetPreferredAudioParameters(
+ audio_client.Get(), &params);
+ if (FAILED(res)) {
+ return false;
+ }
+
+ // Define the output WAVEFORMATEXTENSIBLE format in `format_`.
+ WAVEFORMATEX* format = &format_.Format;
+ format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ // Check the preferred channel configuration and request implicit channel
+ // upmixing (audio engine extends from 2 to N channels internally) if the
+ // preferred number of channels is larger than two; i.e., initialize the
+ // stream in stereo even if the preferred configuration is multi-channel.
+ if (params.channels() <= 2) {
+ format->nChannels = rtc::dchecked_cast<WORD>(params.channels());
+ } else {
+ // TODO(henrika): ensure that this approach works on different multi-channel
+ // devices. Verified on:
+ // - Corsair VOID PRO Surround USB Adapter (supports 7.1)
+ RTC_LOG(LS_WARNING)
+ << "Using channel upmixing in WASAPI audio engine (2 => "
+ << params.channels() << ")";
+ format->nChannels = 2;
+ }
+ format->nSamplesPerSec = params.sample_rate();
+ format->wBitsPerSample = rtc::dchecked_cast<WORD>(params.bits_per_sample());
+ format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
+ format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
+ format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ // Add the parts which are unique for the WAVE_FORMAT_EXTENSIBLE structure.
+ format_.Samples.wValidBitsPerSample =
+ rtc::dchecked_cast<WORD>(params.bits_per_sample());
+ format_.dwChannelMask =
+ format->nChannels == 1 ? KSAUDIO_SPEAKER_MONO : KSAUDIO_SPEAKER_STEREO;
+ format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ RTC_DLOG(LS_INFO) << core_audio_utility::WaveFormatToString(&format_);
+
+ // Verify that the format is supported but exclude the test if the default
+ // sample rate has been overridden. If so, the WASAPI audio engine will do
+ // any necessary conversions between the client format we have given it and
+ // the playback mix format or recording split format.
+ if (!sample_rate_) {
+ if (!core_audio_utility::IsFormatSupported(
+ audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &format_)) {
+ return false;
+ }
+ }
+
+ // Check if low-latency is supported and use special initialization if it is.
+ // Low-latency initialization requires these things:
+ // - IAudioClient3 (>= Win10)
+ // - HDAudio driver
+ // - kEnableLowLatencyIfSupported changed from false (default) to true.
+ // TODO(henrika): IsLowLatencySupported() returns AUDCLNT_E_UNSUPPORTED_FORMAT
+ // when `sample_rate_.has_value()` returns true if rate conversion is
+ // actually required (i.e., client asks for other than the default rate).
+ bool low_latency_support = false;
+ uint32_t min_period_in_frames = 0;
+ if (kEnableLowLatencyIfSupported &&
+ core_audio_utility::GetAudioClientVersion() >= 3) {
+ low_latency_support =
+ IsLowLatencySupported(static_cast<IAudioClient3*>(audio_client.Get()),
+ &format_, &min_period_in_frames);
+ }
+
+ if (low_latency_support) {
+ RTC_DCHECK_GE(core_audio_utility::GetAudioClientVersion(), 3);
+ // Use IAudioClient3::InitializeSharedAudioStream() API to initialize a
+ // low-latency event-driven client. Request the smallest possible
+ // periodicity.
+ // TODO(henrika): evaluate this scheme in terms of CPU etc.
+ if (FAILED(core_audio_utility::SharedModeInitializeLowLatency(
+ static_cast<IAudioClient3*>(audio_client.Get()), &format_,
+ audio_samples_event_, min_period_in_frames,
+ sample_rate_.has_value(), &endpoint_buffer_size_frames_))) {
+ return false;
+ }
+ } else {
+ // Initialize the audio stream between the client and the device in shared
+ // mode using event-driven buffer handling. Also, using 0 as requested
+ // buffer size results in a default (minimum) endpoint buffer size.
+ // TODO(henrika): possibly increase `requested_buffer_size` to add
+ // robustness.
+ const REFERENCE_TIME requested_buffer_size = 0;
+ if (FAILED(core_audio_utility::SharedModeInitialize(
+ audio_client.Get(), &format_, audio_samples_event_,
+ requested_buffer_size, sample_rate_.has_value(),
+ &endpoint_buffer_size_frames_))) {
+ return false;
+ }
+ }
+
+ // Check device period and the preferred buffer size and log a warning if
+ // WebRTC's buffer size is not an even divisor of the preferred buffer size
+ // in Core Audio.
+ // TODO(henrika): sort out if a non-perfect match really is an issue.
+ // TODO(henrika): compare with IAudioClient3::GetSharedModeEnginePeriod().
+ REFERENCE_TIME device_period;
+ if (FAILED(core_audio_utility::GetDevicePeriod(
+ audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) {
+ return false;
+ }
+ const double device_period_in_seconds =
+ static_cast<double>(
+ core_audio_utility::ReferenceTimeToTimeDelta(device_period).ms()) /
+ 1000.0L;
+ const int preferred_frames_per_buffer =
+ static_cast<int>(params.sample_rate() * device_period_in_seconds + 0.5);
+ RTC_DLOG(LS_INFO) << "preferred_frames_per_buffer: "
+ << preferred_frames_per_buffer;
+ if (preferred_frames_per_buffer % params.frames_per_buffer()) {
+ RTC_LOG(LS_WARNING) << "Buffer size of " << params.frames_per_buffer()
+ << " is not an even divisor of "
+ << preferred_frames_per_buffer;
+ }
+
+ // Create an AudioSessionControl interface given the initialized client.
+ // The IAudioControl interface enables a client to configure the control
+ // parameters for an audio session and to monitor events in the session.
+ ComPtr<IAudioSessionControl> audio_session_control =
+ core_audio_utility::CreateAudioSessionControl(audio_client.Get());
+ if (!audio_session_control.Get()) {
+ return false;
+ }
+
+ // The Sndvol program displays volume and mute controls for sessions that
+ // are in the active and inactive states.
+ AudioSessionState state;
+ if (FAILED(audio_session_control->GetState(&state))) {
+ return false;
+ }
+ RTC_DLOG(LS_INFO) << "audio session state: " << SessionStateToString(state);
+ RTC_DCHECK_EQ(state, AudioSessionStateInactive);
+
+ // Register the client to receive notifications of session events, including
+ // changes in the stream state.
+ if (FAILED(audio_session_control->RegisterAudioSessionNotification(this))) {
+ return false;
+ }
+
+ // Store valid COM interfaces.
+ audio_client_ = audio_client;
+ audio_session_control_ = audio_session_control;
+
+ return true;
+}
+
+bool CoreAudioBase::Start() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ if (IsRestarting()) {
+ // Audio thread should be alive during internal restart since the restart
+ // callback is triggered on that thread and it also makes the restart
+ // sequence less complex.
+ RTC_DCHECK(!audio_thread_.empty());
+ }
+
+ // Start an audio thread but only if one does not already exist (which is the
+ // case during restart).
+ if (audio_thread_.empty()) {
+ const absl::string_view name =
+ IsInput() ? "wasapi_capture_thread" : "wasapi_render_thread";
+ audio_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] { ThreadRun(); }, name,
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+ RTC_DLOG(LS_INFO) << "Started thread with name: " << name
+ << " and handle: " << *audio_thread_.GetHandle();
+ }
+
+ // Start streaming data between the endpoint buffer and the audio engine.
+ _com_error error = audio_client_->Start();
+ if (FAILED(error.Error())) {
+ StopThread();
+ RTC_LOG(LS_ERROR) << "IAudioClient::Start failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ start_time_ = rtc::TimeMillis();
+ num_data_callbacks_ = 0;
+
+ return true;
+}
+
+bool CoreAudioBase::Stop() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DLOG(LS_INFO) << "total activity time: " << TimeSinceStart();
+
+ // Stop audio streaming.
+ _com_error error = audio_client_->Stop();
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+ // Stop and destroy the audio thread but only when a restart attempt is not
+ // ongoing.
+ if (!IsRestarting()) {
+ StopThread();
+ }
+
+ // Flush all pending data and reset the audio clock stream position to 0.
+ error = audio_client_->Reset();
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Reset failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+
+ if (IsOutput()) {
+ // Extra safety check to ensure that the buffers are cleared.
+ // If the buffers are not cleared correctly, the next call to Start()
+ // would fail with AUDCLNT_E_BUFFER_ERROR at
+ // IAudioRenderClient::GetBuffer().
+ UINT32 num_queued_frames = 0;
+ audio_client_->GetCurrentPadding(&num_queued_frames);
+ RTC_DCHECK_EQ(0u, num_queued_frames);
+ }
+
+ // Delete the previous registration by the client to receive notifications
+ // about audio session events.
+ RTC_DLOG(LS_INFO) << "audio session state: "
+ << SessionStateToString(GetAudioSessionState());
+ error = audio_session_control_->UnregisterAudioSessionNotification(this);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioSessionControl::UnregisterAudioSessionNotification failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+
+ // To ensure that the restart process is as simple as possible, the audio
+ // thread is not destroyed during restart attempts triggered by internal
+ // error callbacks.
+ if (!IsRestarting()) {
+ thread_checker_audio_.Detach();
+ }
+
+ // Release all allocated COM interfaces to allow for a restart without
+ // intermediate destruction.
+ ReleaseCOMObjects();
+
+ return true;
+}
+
+bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const {
+ // A valid IAudioClient is required to access the ISimpleAudioVolume interface
+ // properly. It is possible to use IAudioSessionManager::GetSimpleAudioVolume
+ // as well but we use the audio client here to ensure that the initialized
+ // audio session is visible under group box labeled "Applications" in
+ // Sndvol.exe.
+ if (!audio_client_) {
+ return false;
+ }
+
+ // Try to create an ISimpleAudioVolume instance.
+ ComPtr<ISimpleAudioVolume> audio_volume =
+ core_audio_utility::CreateSimpleAudioVolume(audio_client_.Get());
+ if (!audio_volume.Get()) {
+ RTC_DLOG(LS_ERROR) << "Volume control is not supported";
+ return false;
+ }
+
+ // Try to use the valid volume control.
+ float volume = 0.0;
+ _com_error error = audio_volume->GetMasterVolume(&volume);
+ if (error.Error() != S_OK) {
+ RTC_LOG(LS_ERROR) << "ISimpleAudioVolume::GetMasterVolume failed: "
+ << core_audio_utility::ErrorToString(error);
+ *available = false;
+ }
+ RTC_DLOG(LS_INFO) << "master volume for output audio session: " << volume;
+
+ *available = true;
+ return false;
+}
+
+// Internal test method which can be used in tests to emulate a restart signal.
+// It simply sets the same event which is normally triggered by session and
+// device notifications. Hence, the emulated restart sequence covers most parts
+// of a real sequence expect the actual device switch.
+bool CoreAudioBase::Restart() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ if (!automatic_restart()) {
+ return false;
+ }
+ is_restarting_ = true;
+ SetEvent(restart_event_.Get());
+ return true;
+}
+
+void CoreAudioBase::StopThread() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(!IsRestarting());
+ if (!audio_thread_.empty()) {
+ RTC_DLOG(LS_INFO) << "Sets stop_event...";
+ SetEvent(stop_event_.Get());
+ RTC_DLOG(LS_INFO) << "PlatformThread::Finalize...";
+ audio_thread_.Finalize();
+
+ // Ensure that we don't quit the main thread loop immediately next
+ // time Start() is called.
+ ResetEvent(stop_event_.Get());
+ ResetEvent(restart_event_.Get());
+ }
+}
+
+bool CoreAudioBase::HandleRestartEvent() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(!audio_thread_.empty());
+ RTC_DCHECK(IsRestarting());
+ // Let each client (input and/or output) take care of its own restart
+ // sequence since each side might need unique actions.
+ // TODO(henrika): revisit and investigate if one common base implementation
+ // is possible
+ bool restart_ok = on_error_callback_(ErrorType::kStreamDisconnected);
+ is_restarting_ = false;
+ return restart_ok;
+}
+
+bool CoreAudioBase::SwitchDeviceIfNeeded() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(IsRestarting());
+
+ RTC_DLOG(LS_INFO) << "device_index=" << device_index_
+ << " => device_id: " << device_id_;
+
+ // Ensure that at least one device exists and can be utilized. The most
+ // probable cause for ending up here is that a device has been removed.
+ if (core_audio_utility::NumberOfActiveDevices(IsInput() ? eCapture
+ : eRender) < 1) {
+ RTC_DLOG(LS_ERROR) << "All devices are disabled or removed";
+ return false;
+ }
+
+ // Get the unique device ID for the index which is currently used. It seems
+ // safe to assume that if the ID is the same as the existing device ID, then
+ // the device configuration is the same as before.
+ std::string device_id = GetDeviceID(device_index_);
+ if (device_id != device_id_) {
+ RTC_LOG(LS_WARNING)
+ << "Device configuration has changed => changing device selection...";
+ // TODO(henrika): depending on the current state and how we got here, we
+ // must select a new device here.
+ if (SetDevice(kDefault) == -1) {
+ RTC_LOG(LS_WARNING) << "Failed to set new audio device";
+ return false;
+ }
+ } else {
+ RTC_LOG(LS_INFO)
+ << "Device configuration has not changed => keeping selected device";
+ }
+ return true;
+}
+
+AudioSessionState CoreAudioBase::GetAudioSessionState() const {
+ AudioSessionState state = AudioSessionStateInactive;
+ RTC_DCHECK(audio_session_control_.Get());
+ _com_error error = audio_session_control_->GetState(&state);
+ if (FAILED(error.Error())) {
+ RTC_DLOG(LS_ERROR) << "IAudioSessionControl::GetState failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+ return state;
+}
+
+// TODO(henrika): only used for debugging purposes currently.
+ULONG CoreAudioBase::AddRef() {
+ ULONG new_ref = InterlockedIncrement(&ref_count_);
+ // RTC_DLOG(LS_INFO) << "__AddRef => " << new_ref;
+ return new_ref;
+}
+
+// TODO(henrika): does not call delete this.
+ULONG CoreAudioBase::Release() {
+ ULONG new_ref = InterlockedDecrement(&ref_count_);
+ // RTC_DLOG(LS_INFO) << "__Release => " << new_ref;
+ return new_ref;
+}
+
+// TODO(henrika): can probably be replaced by "return S_OK" only.
+HRESULT CoreAudioBase::QueryInterface(REFIID iid, void** object) {
+ if (object == nullptr) {
+ return E_POINTER;
+ }
+ if (iid == IID_IUnknown || iid == __uuidof(IAudioSessionEvents)) {
+ *object = static_cast<IAudioSessionEvents*>(this);
+ return S_OK;
+ }
+ *object = nullptr;
+ return E_NOINTERFACE;
+}
+
+// IAudioSessionEvents::OnStateChanged.
+HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) {
+ RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "["
+ << DirectionToString(direction())
+ << "] new_state: " << SessionStateToString(new_state);
+ return S_OK;
+}
+
+// When a session is disconnected because of a device removal or format change
+// event, we want to inform the audio thread about the lost audio session and
+// trigger an attempt to restart audio using a new (default) device.
+// This method is called on separate threads owned by the session manager and
+// it can happen that the same type of callback is called more than once for the
+// same event.
+HRESULT CoreAudioBase::OnSessionDisconnected(
+ AudioSessionDisconnectReason disconnect_reason) {
+ RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "["
+ << DirectionToString(direction()) << "] reason: "
+ << SessionDisconnectReasonToString(disconnect_reason);
+ // Ignore changes in the audio session (don't try to restart) if the user
+ // has explicitly asked for this type of ADM during construction.
+ if (!automatic_restart()) {
+ RTC_DLOG(LS_WARNING) << "___Automatic restart is disabled";
+ return S_OK;
+ }
+
+ if (IsRestarting()) {
+ RTC_DLOG(LS_WARNING) << "___Ignoring since restart is already active";
+ return S_OK;
+ }
+
+ // By default, automatic restart is enabled and the restart event will be set
+ // below if the device was removed or the format was changed.
+ if (disconnect_reason == DisconnectReasonDeviceRemoval ||
+ disconnect_reason == DisconnectReasonFormatChanged) {
+ is_restarting_ = true;
+ SetEvent(restart_event_.Get());
+ }
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnDisplayNameChanged
+HRESULT CoreAudioBase::OnDisplayNameChanged(LPCWSTR new_display_name,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnIconPathChanged
+HRESULT CoreAudioBase::OnIconPathChanged(LPCWSTR new_icon_path,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnSimpleVolumeChanged
+HRESULT CoreAudioBase::OnSimpleVolumeChanged(float new_simple_volume,
+ BOOL new_mute,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnChannelVolumeChanged
+HRESULT CoreAudioBase::OnChannelVolumeChanged(DWORD channel_count,
+ float new_channel_volumes[],
+ DWORD changed_channel,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnGroupingParamChanged
+HRESULT CoreAudioBase::OnGroupingParamChanged(LPCGUID new_grouping_param,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+void CoreAudioBase::ThreadRun() {
+ if (!core_audio_utility::IsMMCSSSupported()) {
+ RTC_LOG(LS_ERROR) << "MMCSS is not supported";
+ return;
+ }
+ RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction())
+ << "] ThreadRun starts...";
+ // TODO(henrika): difference between "Pro Audio" and "Audio"?
+ ScopedMMCSSRegistration mmcss_registration(L"Pro Audio");
+ ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+ RTC_DCHECK(mmcss_registration.Succeeded());
+ RTC_DCHECK(com_initializer.Succeeded());
+ RTC_DCHECK(stop_event_.IsValid());
+ RTC_DCHECK(audio_samples_event_.IsValid());
+
+ bool streaming = true;
+ bool error = false;
+ HANDLE wait_array[] = {stop_event_.Get(), restart_event_.Get(),
+ audio_samples_event_.Get()};
+
+ // The device frequency is the frequency generated by the hardware clock in
+ // the audio device. The GetFrequency() method reports a constant frequency.
+ UINT64 device_frequency = 0;
+ _com_error result(S_FALSE);
+ if (audio_clock_) {
+ RTC_DCHECK(IsOutput());
+ result = audio_clock_->GetFrequency(&device_frequency);
+ if (FAILED(result.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClock::GetFrequency failed: "
+ << core_audio_utility::ErrorToString(result);
+ }
+ }
+
+ // Keep streaming audio until the stop event or the stream-switch event
+ // is signaled. An error event can also break the main thread loop.
+ while (streaming && !error) {
+ // Wait for a close-down event, stream-switch event or a new render event.
+ DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
+ wait_array, false, INFINITE);
+ switch (wait_result) {
+ case WAIT_OBJECT_0 + 0:
+ // `stop_event_` has been set.
+ streaming = false;
+ break;
+ case WAIT_OBJECT_0 + 1:
+ // `restart_event_` has been set.
+ error = !HandleRestartEvent();
+ break;
+ case WAIT_OBJECT_0 + 2:
+ // `audio_samples_event_` has been set.
+ error = !on_data_callback_(device_frequency);
+ break;
+ default:
+ error = true;
+ break;
+ }
+ }
+
+ if (streaming && error) {
+ RTC_LOG(LS_ERROR) << "[" << DirectionToString(direction())
+ << "] WASAPI streaming failed.";
+ // Stop audio streaming since something has gone wrong in our main thread
+ // loop. Note that, we are still in a "started" state, hence a Stop() call
+ // is required to join the thread properly.
+ result = audio_client_->Stop();
+ if (FAILED(result.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: "
+ << core_audio_utility::ErrorToString(result);
+ }
+
+ // TODO(henrika): notify clients that something has gone wrong and that
+ // this stream should be destroyed instead of reused in the future.
+ }
+
+ RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction())
+ << "] ...ThreadRun stops";
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h
new file mode 100644
index 0000000000..6c1357e059
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "modules/audio_device/win/core_audio_utility_win.h"
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace webrtc_win {
+
+// Serves as base class for CoreAudioInput and CoreAudioOutput and supports
+// device handling and audio streaming where the direction (input or output)
+// is set at constructions by the parent.
+// The IAudioSessionEvents interface provides notifications of session-related
+// events such as changes in the volume level, display name, and session state.
+// This class does not use the default ref-counting memory management method
+// provided by IUnknown: calling CoreAudioBase::Release() will not delete the
+// object. The client will receive notification from the session manager on
+// a separate thread owned and controlled by the manager.
+// TODO(henrika): investigate if CoreAudioBase should implement
+// IMMNotificationClient as well (might improve support for device changes).
+class CoreAudioBase : public IAudioSessionEvents {
+ public:
+ enum class Direction {
+ kInput,
+ kOutput,
+ };
+
+ // TODO(henrika): add more error types.
+ enum class ErrorType {
+ kStreamDisconnected,
+ };
+
+ template <typename T>
+ auto as_integer(T const value) -> typename std::underlying_type<T>::type {
+ return static_cast<typename std::underlying_type<T>::type>(value);
+ }
+
+ // Callback definition for notifications of new audio data. For input clients,
+ // it means that "new audio data has now been captured", and for output
+ // clients, "the output layer now needs new audio data".
+ typedef std::function<bool(uint64_t device_frequency)> OnDataCallback;
+
+ // Callback definition for notifications of run-time error messages. It can
+ // be called e.g. when an active audio device is removed and an audio stream
+ // is disconnected (`error` is then set to kStreamDisconnected). Both input
+ // and output clients implements OnErrorCallback() and will trigger an
+ // internal restart sequence for kStreamDisconnected.
+ // This method is currently always called on the audio thread.
+ // TODO(henrika): add support for more error types.
+ typedef std::function<bool(ErrorType error)> OnErrorCallback;
+
+ void ThreadRun();
+
+ CoreAudioBase(const CoreAudioBase&) = delete;
+ CoreAudioBase& operator=(const CoreAudioBase&) = delete;
+
+ protected:
+ explicit CoreAudioBase(Direction direction,
+ bool automatic_restart,
+ OnDataCallback data_callback,
+ OnErrorCallback error_callback);
+ ~CoreAudioBase();
+
+ std::string GetDeviceID(int index) const;
+ int SetDevice(int index);
+ int DeviceName(int index, std::string* name, std::string* guid) const;
+
+ // Checks if the current device ID is no longer in use (e.g. due to a
+ // disconnected stream), and if so, switches device to the default audio
+ // device. Called on the audio thread during restart attempts.
+ bool SwitchDeviceIfNeeded();
+
+ bool Init();
+ bool Start();
+ bool Stop();
+ bool IsVolumeControlAvailable(bool* available) const;
+ bool Restart();
+
+ Direction direction() const { return direction_; }
+ bool automatic_restart() const { return automatic_restart_; }
+
+ // Releases all allocated COM resources in the base class.
+ void ReleaseCOMObjects();
+
+ // Returns number of active devices given the specified `direction_` set
+ // by the parent (input or output).
+ int NumberOfActiveDevices() const;
+
+ // Returns total number of enumerated audio devices which is the sum of all
+ // active devices plus two extra (one default and one default
+ // communications). The value in `direction_` determines if capture or
+ // render devices are counted.
+ int NumberOfEnumeratedDevices() const;
+
+ bool IsInput() const;
+ bool IsOutput() const;
+ bool IsDefaultDevice(int index) const;
+ bool IsDefaultCommunicationsDevice(int index) const;
+ bool IsDefaultDeviceId(absl::string_view device_id) const;
+ bool IsDefaultCommunicationsDeviceId(absl::string_view device_id) const;
+ EDataFlow GetDataFlow() const;
+ bool IsRestarting() const;
+ int64_t TimeSinceStart() const;
+
+ // TODO(henrika): is the existing thread checker in WindowsAudioDeviceModule
+ // sufficient? As is, we have one top-level protection and then a second
+ // level here. In addition, calls to Init(), Start() and Stop() are not
+ // included to allow for support of internal restart (where these methods are
+ // called on the audio thread).
+ SequenceChecker thread_checker_;
+ SequenceChecker thread_checker_audio_;
+ AudioDeviceBuffer* audio_device_buffer_ = nullptr;
+ bool initialized_ = false;
+ WAVEFORMATEXTENSIBLE format_ = {};
+ uint32_t endpoint_buffer_size_frames_ = 0;
+ Microsoft::WRL::ComPtr<IAudioClock> audio_clock_;
+ Microsoft::WRL::ComPtr<IAudioClient> audio_client_;
+ bool is_active_ = false;
+ int64_t num_data_callbacks_ = 0;
+ int latency_ms_ = 0;
+ absl::optional<uint32_t> sample_rate_;
+
+ private:
+ const Direction direction_;
+ const bool automatic_restart_;
+ const OnDataCallback on_data_callback_;
+ const OnErrorCallback on_error_callback_;
+ ScopedHandle audio_samples_event_;
+ ScopedHandle stop_event_;
+ ScopedHandle restart_event_;
+ int64_t start_time_ = 0;
+ std::string device_id_;
+ int device_index_ = -1;
+ // Used by the IAudioSessionEvents implementations. Currently only utilized
+ // for debugging purposes.
+ LONG ref_count_ = 1;
+ // Set when restart process starts and cleared when restart stops
+ // successfully. Accessed atomically.
+ std::atomic<bool> is_restarting_;
+ rtc::PlatformThread audio_thread_;
+ Microsoft::WRL::ComPtr<IAudioSessionControl> audio_session_control_;
+
+ void StopThread();
+ AudioSessionState GetAudioSessionState() const;
+
+ // Called on the audio thread when a restart event has been set.
+ // It will then trigger calls to the installed error callbacks with error
+ // type set to kStreamDisconnected.
+ bool HandleRestartEvent();
+
+ // IUnknown (required by IAudioSessionEvents and IMMNotificationClient).
+ ULONG __stdcall AddRef() override;
+ ULONG __stdcall Release() override;
+ HRESULT __stdcall QueryInterface(REFIID iid, void** object) override;
+
+ // IAudioSessionEvents implementation.
+ // These methods are called on separate threads owned by the session manager.
+ // More than one thread can be involved depending on the type of callback
+ // and audio session.
+ HRESULT __stdcall OnStateChanged(AudioSessionState new_state) override;
+ HRESULT __stdcall OnSessionDisconnected(
+ AudioSessionDisconnectReason disconnect_reason) override;
+ HRESULT __stdcall OnDisplayNameChanged(LPCWSTR new_display_name,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnIconPathChanged(LPCWSTR new_icon_path,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnSimpleVolumeChanged(float new_simple_volume,
+ BOOL new_mute,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnChannelVolumeChanged(DWORD channel_count,
+ float new_channel_volumes[],
+ DWORD changed_channel,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnGroupingParamChanged(LPCGUID new_grouping_param,
+ LPCGUID event_context) override;
+};
+
+} // namespace webrtc_win
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc
new file mode 100644
index 0000000000..17790dafc4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_input_win.h"
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+namespace webrtc_win {
+
+enum AudioDeviceMessageType : uint32_t {
+ kMessageInputStreamDisconnected,
+};
+
+CoreAudioInput::CoreAudioInput(bool automatic_restart)
+ : CoreAudioBase(
+ CoreAudioBase::Direction::kInput,
+ automatic_restart,
+ [this](uint64_t freq) { return OnDataCallback(freq); },
+ [this](ErrorType err) { return OnErrorCallback(err); }) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ thread_checker_audio_.Detach();
+}
+
+CoreAudioInput::~CoreAudioInput() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+}
+
+int CoreAudioInput::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+}
+
+int CoreAudioInput::Terminate() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ StopRecording();
+ return 0;
+}
+
+int CoreAudioInput::NumDevices() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return core_audio_utility::NumberOfActiveDevices(eCapture);
+}
+
+int CoreAudioInput::SetDevice(int index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return CoreAudioBase::SetDevice(index);
+}
+
+int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": "
+ << ((device == AudioDeviceModule::kDefaultDevice)
+ ? "Default"
+ : "DefaultCommunication");
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1);
+}
+
+int CoreAudioInput::DeviceName(int index,
+ std::string* name,
+ std::string* guid) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(name);
+ return CoreAudioBase::DeviceName(index, name, guid);
+}
+
+void CoreAudioInput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ audio_device_buffer_ = audio_buffer;
+}
+
+bool CoreAudioInput::RecordingIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return initialized_;
+}
+
+int CoreAudioInput::InitRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!Recording());
+ RTC_DCHECK(!audio_capture_client_);
+
+ // Creates an IAudioClient instance and stores the valid interface pointer in
+ // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
+ // platform support. The base class will use optimal input parameters and do
+ // an event driven shared mode initialization. The utilized format will be
+ // stored in `format_` and can be used for configuration and allocation of
+ // audio buffers.
+ if (!CoreAudioBase::Init()) {
+ return -1;
+ }
+ RTC_DCHECK(audio_client_);
+
+ // Configure the recording side of the audio device buffer using `format_`
+ // after a trivial sanity check of the format structure.
+ RTC_DCHECK(audio_device_buffer_);
+ WAVEFORMATEX* format = &format_.Format;
+ RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ audio_device_buffer_->SetRecordingSampleRate(format->nSamplesPerSec);
+ audio_device_buffer_->SetRecordingChannels(format->nChannels);
+
+ // Create a modified audio buffer class which allows us to supply any number
+ // of samples (and not only multiple of 10ms) to match the optimal buffer
+ // size per callback used by Core Audio.
+ // TODO(henrika): can we share one FineAudioBuffer with the output side?
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+
+ // Create an IAudioCaptureClient for an initialized IAudioClient.
+ // The IAudioCaptureClient interface enables a client to read input data from
+ // a capture endpoint buffer.
+ ComPtr<IAudioCaptureClient> audio_capture_client =
+ core_audio_utility::CreateCaptureClient(audio_client_.Get());
+ if (!audio_capture_client) {
+ return -1;
+ }
+
+ // Query performance frequency.
+ LARGE_INTEGER ticks_per_sec = {};
+ qpc_to_100ns_.reset();
+ if (::QueryPerformanceFrequency(&ticks_per_sec)) {
+ double qpc_ticks_per_second =
+ rtc::dchecked_cast<double>(ticks_per_sec.QuadPart);
+ qpc_to_100ns_ = 10000000.0 / qpc_ticks_per_second;
+ }
+
+ // Store valid COM interfaces.
+ audio_capture_client_ = audio_capture_client;
+
+ initialized_ = true;
+ return 0;
+}
+
+int CoreAudioInput::StartRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(!Recording());
+ RTC_DCHECK(fine_audio_buffer_);
+ RTC_DCHECK(audio_device_buffer_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Recording can not start since InitRecording must succeed first";
+ return 0;
+ }
+
+ fine_audio_buffer_->ResetRecord();
+ if (!IsRestarting()) {
+ audio_device_buffer_->StartRecording();
+ }
+
+ if (!Start()) {
+ return -1;
+ }
+
+ is_active_ = true;
+ return 0;
+}
+
+int CoreAudioInput::StopRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (!initialized_) {
+ return 0;
+ }
+
+ // Release resources allocated in InitRecording() and then return if this
+ // method is called without any active input audio.
+ if (!Recording()) {
+ RTC_DLOG(LS_WARNING) << "No input stream is active";
+ ReleaseCOMObjects();
+ initialized_ = false;
+ return 0;
+ }
+
+ if (!Stop()) {
+ RTC_LOG(LS_ERROR) << "StopRecording failed";
+ return -1;
+ }
+
+ if (!IsRestarting()) {
+ RTC_DCHECK(audio_device_buffer_);
+ audio_device_buffer_->StopRecording();
+ }
+
+ // Release all allocated resources to allow for a restart without
+ // intermediate destruction.
+ ReleaseCOMObjects();
+ qpc_to_100ns_.reset();
+
+ initialized_ = false;
+ is_active_ = false;
+ return 0;
+}
+
+bool CoreAudioInput::Recording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_;
+ return is_active_;
+}
+
+// TODO(henrika): finalize support of audio session volume control. As is, we
+// are not compatible with the old ADM implementation since it allows accessing
+// the volume control with any active audio output stream.
+int CoreAudioInput::VolumeIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsVolumeControlAvailable(available) ? 0 : -1;
+}
+
+// Triggers the restart sequence. Only used for testing purposes to emulate
+// a real event where e.g. an active input device is removed.
+int CoreAudioInput::RestartRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!Recording()) {
+ return 0;
+ }
+
+ if (!Restart()) {
+ RTC_LOG(LS_ERROR) << "RestartRecording failed";
+ return -1;
+ }
+ return 0;
+}
+
+bool CoreAudioInput::Restarting() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsRestarting();
+}
+
+int CoreAudioInput::SetSampleRate(uint32_t sample_rate) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ sample_rate_ = sample_rate;
+ return 0;
+}
+
+void CoreAudioInput::ReleaseCOMObjects() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CoreAudioBase::ReleaseCOMObjects();
+ if (audio_capture_client_.Get()) {
+ audio_capture_client_.Reset();
+ }
+}
+
+bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) {
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+
+ if (!initialized_ || !is_active_) {
+ // This is concurrent examination of state across multiple threads so will
+ // be somewhat error prone, but we should still be defensive and not use
+ // audio_capture_client_ if we know it's not there.
+ return false;
+ }
+ if (num_data_callbacks_ == 0) {
+ RTC_LOG(LS_INFO) << "--- Input audio stream is alive ---";
+ }
+ UINT32 num_frames_in_next_packet = 0;
+ _com_error error =
+ audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet);
+ if (error.Error() == AUDCLNT_E_DEVICE_INVALIDATED) {
+ // Avoid breaking the thread loop implicitly by returning false and return
+ // true instead for AUDCLNT_E_DEVICE_INVALIDATED even it is a valid error
+ // message. We will use notifications about device changes instead to stop
+ // data callbacks and attempt to restart streaming .
+ RTC_DLOG(LS_ERROR) << "AUDCLNT_E_DEVICE_INVALIDATED";
+ return true;
+ }
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Drain the WASAPI capture buffer fully if audio has been recorded.
+ while (num_frames_in_next_packet > 0) {
+ uint8_t* audio_data;
+ UINT32 num_frames_to_read = 0;
+ DWORD flags = 0;
+ UINT64 device_position_frames = 0;
+ UINT64 capture_time_100ns = 0;
+ error = audio_capture_client_->GetBuffer(&audio_data, &num_frames_to_read,
+ &flags, &device_position_frames,
+ &capture_time_100ns);
+ if (error.Error() == AUDCLNT_S_BUFFER_EMPTY) {
+ // The call succeeded but no capture data is available to be read.
+ // Return and start waiting for new capture event
+ RTC_DCHECK_EQ(num_frames_to_read, 0u);
+ return true;
+ }
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Update input delay estimate but only about once per second to save
+ // resources. The estimate is usually stable.
+ if (num_data_callbacks_ % 100 == 0) {
+ absl::optional<int> opt_record_delay_ms;
+ // TODO(henrika): note that FineAudioBuffer adds latency as well.
+ opt_record_delay_ms = EstimateLatencyMillis(capture_time_100ns);
+ if (opt_record_delay_ms) {
+ latency_ms_ = *opt_record_delay_ms;
+ } else {
+ RTC_DLOG(LS_WARNING) << "Input latency is set to fixed value";
+ latency_ms_ = 20;
+ }
+ }
+ if (num_data_callbacks_ % 500 == 0) {
+ RTC_DLOG(LS_INFO) << "latency: " << latency_ms_;
+ }
+
+ // The data in the packet is not correlated with the previous packet's
+ // device position; possibly due to a stream state transition or timing
+ // glitch. The behavior of the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY flag
+ // is undefined on the application's first call to GetBuffer after Start.
+ if (device_position_frames != 0 &&
+ flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
+ RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY";
+ }
+ // The time at which the device's stream position was recorded is uncertain.
+ // Thus, the client might be unable to accurately set a time stamp for the
+ // current data packet.
+ if (flags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR) {
+ RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR";
+ }
+
+ // Treat all of the data in the packet as silence and ignore the actual
+ // data values when AUDCLNT_BUFFERFLAGS_SILENT is set.
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ rtc::ExplicitZeroMemory(audio_data,
+ format_.Format.nBlockAlign * num_frames_to_read);
+ RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence";
+ } else {
+ // Copy recorded audio in `audio_data` to the WebRTC sink using the
+ // FineAudioBuffer object.
+ fine_audio_buffer_->DeliverRecordedData(
+ rtc::MakeArrayView(reinterpret_cast<const int16_t*>(audio_data),
+ format_.Format.nChannels * num_frames_to_read),
+
+ latency_ms_);
+ }
+
+ error = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::ReleaseBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ error =
+ audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+ }
+ ++num_data_callbacks_;
+ return true;
+}
+
+bool CoreAudioInput::OnErrorCallback(ErrorType error) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error);
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ if (error == CoreAudioBase::ErrorType::kStreamDisconnected) {
+ HandleStreamDisconnected();
+ } else {
+ RTC_DLOG(LS_WARNING) << "Unsupported error type";
+ }
+ return true;
+}
+
+absl::optional<int> CoreAudioInput::EstimateLatencyMillis(
+ uint64_t capture_time_100ns) {
+ if (!qpc_to_100ns_) {
+ return absl::nullopt;
+ }
+ // Input parameter `capture_time_100ns` contains the performance counter at
+ // the time that the audio endpoint device recorded the device position of
+ // the first audio frame in the data packet converted into 100ns units.
+ // We derive a delay estimate by:
+ // - sampling the current performance counter (qpc_now_raw),
+ // - converting it into 100ns time units (now_time_100ns), and
+ // - subtracting `capture_time_100ns` from now_time_100ns.
+ LARGE_INTEGER perf_counter_now = {};
+ if (!::QueryPerformanceCounter(&perf_counter_now)) {
+ return absl::nullopt;
+ }
+ uint64_t qpc_now_raw = perf_counter_now.QuadPart;
+ uint64_t now_time_100ns = qpc_now_raw * (*qpc_to_100ns_);
+ webrtc::TimeDelta delay_us = webrtc::TimeDelta::Micros(
+ 0.1 * (now_time_100ns - capture_time_100ns) + 0.5);
+ return delay_us.ms();
+}
+
+// Called from OnErrorCallback() when error type is kStreamDisconnected.
+// Note that this method is called on the audio thread and the internal restart
+// sequence is also executed on that same thread. The audio thread is therefore
+// not stopped during restart. Such a scheme also makes the restart process less
+// complex.
+// Note that, none of the called methods are thread checked since they can also
+// be called on the main thread. Thread checkers are instead added on one layer
+// above (in audio_device_module.cc) which ensures that the public API is thread
+// safe.
+// TODO(henrika): add more details.
+bool CoreAudioInput::HandleStreamDisconnected() {
+ RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(automatic_restart());
+
+ if (StopRecording() != 0) {
+ return false;
+ }
+
+ if (!SwitchDeviceIfNeeded()) {
+ return false;
+ }
+
+ if (InitRecording() != 0) {
+ return false;
+ }
+ if (StartRecording() != 0) {
+ return false;
+ }
+
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>";
+ return true;
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h
new file mode 100644
index 0000000000..be290f9f4e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "modules/audio_device/win/audio_device_module_win.h"
+#include "modules/audio_device/win/core_audio_base_win.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace webrtc_win {
+
+// Windows specific AudioInput implementation using a CoreAudioBase class where
+// an input direction is set at construction. Supports capture device handling
+// and streaming of captured audio to a WebRTC client.
+class CoreAudioInput final : public CoreAudioBase, public AudioInput {
+ public:
+ CoreAudioInput(bool automatic_restart);
+ ~CoreAudioInput() override;
+
+ // AudioInput implementation.
+ int Init() override;
+ int Terminate() override;
+ int NumDevices() const override;
+ int SetDevice(int index) override;
+ int SetDevice(AudioDeviceModule::WindowsDeviceType device) override;
+ int DeviceName(int index, std::string* name, std::string* guid) override;
+ void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
+ bool RecordingIsInitialized() const override;
+ int InitRecording() override;
+ int StartRecording() override;
+ int StopRecording() override;
+ bool Recording() override;
+ int VolumeIsAvailable(bool* available) override;
+ int RestartRecording() override;
+ bool Restarting() const override;
+ int SetSampleRate(uint32_t sample_rate) override;
+
+ CoreAudioInput(const CoreAudioInput&) = delete;
+ CoreAudioInput& operator=(const CoreAudioInput&) = delete;
+
+ private:
+ void ReleaseCOMObjects();
+ bool OnDataCallback(uint64_t device_frequency);
+ bool OnErrorCallback(ErrorType error);
+ absl::optional<int> EstimateLatencyMillis(uint64_t capture_time_100ns);
+ bool HandleStreamDisconnected();
+
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+ Microsoft::WRL::ComPtr<IAudioCaptureClient> audio_capture_client_;
+ absl::optional<double> qpc_to_100ns_;
+};
+
+} // namespace webrtc_win
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc
new file mode 100644
index 0000000000..c92fedf0e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_output_win.h"
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+namespace webrtc_win {
+
+CoreAudioOutput::CoreAudioOutput(bool automatic_restart)
+ : CoreAudioBase(
+ CoreAudioBase::Direction::kOutput,
+ automatic_restart,
+ [this](uint64_t freq) { return OnDataCallback(freq); },
+ [this](ErrorType err) { return OnErrorCallback(err); }) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ thread_checker_audio_.Detach();
+}
+
+CoreAudioOutput::~CoreAudioOutput() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ Terminate();
+}
+
+int CoreAudioOutput::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+}
+
+int CoreAudioOutput::Terminate() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ StopPlayout();
+ return 0;
+}
+
+int CoreAudioOutput::NumDevices() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return core_audio_utility::NumberOfActiveDevices(eRender);
+}
+
+int CoreAudioOutput::SetDevice(int index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return CoreAudioBase::SetDevice(index);
+}
+
+int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": "
+ << ((device == AudioDeviceModule::kDefaultDevice)
+ ? "Default"
+ : "DefaultCommunication");
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1);
+}
+
+int CoreAudioOutput::DeviceName(int index,
+ std::string* name,
+ std::string* guid) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(name);
+ return CoreAudioBase::DeviceName(index, name, guid);
+}
+
+void CoreAudioOutput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ audio_device_buffer_ = audio_buffer;
+}
+
+bool CoreAudioOutput::PlayoutIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return initialized_;
+}
+
+int CoreAudioOutput::InitPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting();
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!Playing());
+ RTC_DCHECK(!audio_render_client_);
+
+ // Creates an IAudioClient instance and stores the valid interface pointer in
+ // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
+ // platform support. The base class will use optimal output parameters and do
+ // an event driven shared mode initialization. The utilized format will be
+ // stored in `format_` and can be used for configuration and allocation of
+ // audio buffers.
+ if (!CoreAudioBase::Init()) {
+ return -1;
+ }
+ RTC_DCHECK(audio_client_);
+
+ // Configure the playout side of the audio device buffer using `format_`
+ // after a trivial sanity check of the format structure.
+ RTC_DCHECK(audio_device_buffer_);
+ WAVEFORMATEX* format = &format_.Format;
+ RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ audio_device_buffer_->SetPlayoutSampleRate(format->nSamplesPerSec);
+ audio_device_buffer_->SetPlayoutChannels(format->nChannels);
+
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the optimal
+ // buffer size per callback used by Core Audio.
+ // TODO(henrika): can we share one FineAudioBuffer with the input side?
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+
+ // Create an IAudioRenderClient for an initialized IAudioClient.
+ // The IAudioRenderClient interface enables us to write output data to
+ // a rendering endpoint buffer.
+ ComPtr<IAudioRenderClient> audio_render_client =
+ core_audio_utility::CreateRenderClient(audio_client_.Get());
+ if (!audio_render_client.Get()) {
+ return -1;
+ }
+
+ ComPtr<IAudioClock> audio_clock =
+ core_audio_utility::CreateAudioClock(audio_client_.Get());
+ if (!audio_clock.Get()) {
+ return -1;
+ }
+
+ // Store valid COM interfaces.
+ audio_render_client_ = audio_render_client;
+ audio_clock_ = audio_clock;
+
+ initialized_ = true;
+ return 0;
+}
+
+int CoreAudioOutput::StartPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting();
+ RTC_DCHECK(!Playing());
+ RTC_DCHECK(fine_audio_buffer_);
+ RTC_DCHECK(audio_device_buffer_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Playout can not start since InitPlayout must succeed first";
+ }
+
+ fine_audio_buffer_->ResetPlayout();
+ if (!IsRestarting()) {
+ audio_device_buffer_->StartPlayout();
+ }
+
+ if (!core_audio_utility::FillRenderEndpointBufferWithSilence(
+ audio_client_.Get(), audio_render_client_.Get())) {
+ RTC_LOG(LS_WARNING) << "Failed to prepare output endpoint with silence";
+ }
+
+ num_frames_written_ = endpoint_buffer_size_frames_;
+
+ if (!Start()) {
+ return -1;
+ }
+
+ is_active_ = true;
+ return 0;
+}
+
+int CoreAudioOutput::StopPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting();
+ if (!initialized_) {
+ return 0;
+ }
+
+ // Release resources allocated in InitPlayout() and then return if this
+ // method is called without any active output audio.
+ if (!Playing()) {
+ RTC_DLOG(LS_WARNING) << "No output stream is active";
+ ReleaseCOMObjects();
+ initialized_ = false;
+ return 0;
+ }
+
+ if (!Stop()) {
+ RTC_LOG(LS_ERROR) << "StopPlayout failed";
+ return -1;
+ }
+
+ if (!IsRestarting()) {
+ RTC_DCHECK(audio_device_buffer_);
+ audio_device_buffer_->StopPlayout();
+ }
+
+ // Release all allocated resources to allow for a restart without
+ // intermediate destruction.
+ ReleaseCOMObjects();
+
+ initialized_ = false;
+ is_active_ = false;
+ return 0;
+}
+
+bool CoreAudioOutput::Playing() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_;
+ return is_active_;
+}
+
+// TODO(henrika): finalize support of audio session volume control. As is, we
+// are not compatible with the old ADM implementation since it allows accessing
+// the volume control with any active audio output stream.
+int CoreAudioOutput::VolumeIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsVolumeControlAvailable(available) ? 0 : -1;
+}
+
+// Triggers the restart sequence. Only used for testing purposes to emulate
+// a real event where e.g. an active output device is removed.
+int CoreAudioOutput::RestartPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!Playing()) {
+ return 0;
+ }
+ if (!Restart()) {
+ RTC_LOG(LS_ERROR) << "RestartPlayout failed";
+ return -1;
+ }
+ return 0;
+}
+
+bool CoreAudioOutput::Restarting() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsRestarting();
+}
+
+int CoreAudioOutput::SetSampleRate(uint32_t sample_rate) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ sample_rate_ = sample_rate;
+ return 0;
+}
+
+void CoreAudioOutput::ReleaseCOMObjects() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CoreAudioBase::ReleaseCOMObjects();
+ if (audio_render_client_.Get()) {
+ audio_render_client_.Reset();
+ }
+}
+
+bool CoreAudioOutput::OnErrorCallback(ErrorType error) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error);
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ if (!initialized_ || !Playing()) {
+ return true;
+ }
+
+ if (error == CoreAudioBase::ErrorType::kStreamDisconnected) {
+ HandleStreamDisconnected();
+ } else {
+ RTC_DLOG(LS_WARNING) << "Unsupported error type";
+ }
+ return true;
+}
+
+bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) {
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ if (num_data_callbacks_ == 0) {
+ RTC_LOG(LS_INFO) << "--- Output audio stream is alive ---";
+ }
+ // Get the padding value which indicates the amount of valid unread data that
+ // the endpoint buffer currently contains.
+ UINT32 num_unread_frames = 0;
+ _com_error error = audio_client_->GetCurrentPadding(&num_unread_frames);
+ if (error.Error() == AUDCLNT_E_DEVICE_INVALIDATED) {
+ // Avoid breaking the thread loop implicitly by returning false and return
+ // true instead for AUDCLNT_E_DEVICE_INVALIDATED even it is a valid error
+ // message. We will use notifications about device changes instead to stop
+ // data callbacks and attempt to restart streaming .
+ RTC_DLOG(LS_ERROR) << "AUDCLNT_E_DEVICE_INVALIDATED";
+ return true;
+ }
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Contains how much new data we can write to the buffer without the risk of
+ // overwriting previously written data that the audio engine has not yet read
+ // from the buffer. I.e., it is the maximum buffer size we can request when
+ // calling IAudioRenderClient::GetBuffer().
+ UINT32 num_requested_frames =
+ endpoint_buffer_size_frames_ - num_unread_frames;
+ if (num_requested_frames == 0) {
+ RTC_DLOG(LS_WARNING)
+ << "Audio thread is signaled but no new audio samples are needed";
+ return true;
+ }
+
+ // Request all available space in the rendering endpoint buffer into which the
+ // client can later write an audio packet.
+ uint8_t* audio_data;
+ error = audio_render_client_->GetBuffer(num_requested_frames, &audio_data);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Update output delay estimate but only about once per second to save
+ // resources. The estimate is usually stable.
+ if (num_data_callbacks_ % 100 == 0) {
+ // TODO(henrika): note that FineAudioBuffer adds latency as well.
+ latency_ms_ = EstimateOutputLatencyMillis(device_frequency);
+ if (num_data_callbacks_ % 500 == 0) {
+ RTC_DLOG(LS_INFO) << "latency: " << latency_ms_;
+ }
+ }
+
+ // Get audio data from WebRTC and write it to the allocated buffer in
+ // `audio_data`. The playout latency is not updated for each callback.
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::MakeArrayView(reinterpret_cast<int16_t*>(audio_data),
+ num_requested_frames * format_.Format.nChannels),
+ latency_ms_);
+
+ // Release the buffer space acquired in IAudioRenderClient::GetBuffer.
+ error = audio_render_client_->ReleaseBuffer(num_requested_frames, 0);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ num_frames_written_ += num_requested_frames;
+ ++num_data_callbacks_;
+
+ return true;
+}
+
+// TODO(henrika): IAudioClock2::GetDevicePosition could perhaps be used here
+// instead. Tried it once, but it crashed for capture devices.
+int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) {
+ UINT64 position = 0;
+ UINT64 qpc_position = 0;
+ int delay_ms = 0;
+ // Get the device position through output parameter `position`. This is the
+ // stream position of the sample that is currently playing through the
+ // speakers.
+ _com_error error = audio_clock_->GetPosition(&position, &qpc_position);
+ if (error.Error() == S_OK) {
+ // Number of frames already played out through the speaker.
+ const uint64_t num_played_out_frames =
+ format_.Format.nSamplesPerSec * position / device_frequency;
+
+ // Number of frames that have been written to the buffer but not yet
+ // played out corresponding to the estimated latency measured in number
+ // of audio frames.
+ const uint64_t delay_frames = num_frames_written_ - num_played_out_frames;
+
+ // Convert latency in number of frames into milliseconds.
+ webrtc::TimeDelta delay =
+ webrtc::TimeDelta::Micros(delay_frames * rtc::kNumMicrosecsPerSec /
+ format_.Format.nSamplesPerSec);
+ delay_ms = delay.ms();
+ }
+ return delay_ms;
+}
+
+// Called from OnErrorCallback() when error type is kStreamDisconnected.
+// Note that this method is called on the audio thread and the internal restart
+// sequence is also executed on that same thread. The audio thread is therefore
+// not stopped during restart. Such a scheme also makes the restart process less
+// complex.
+// Note that, none of the called methods are thread checked since they can also
+// be called on the main thread. Thread checkers are instead added on one layer
+// above (in audio_device_module.cc) which ensures that the public API is thread
+// safe.
+// TODO(henrika): add more details.
+bool CoreAudioOutput::HandleStreamDisconnected() {
+ RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(automatic_restart());
+
+ if (StopPlayout() != 0) {
+ return false;
+ }
+
+ if (!SwitchDeviceIfNeeded()) {
+ return false;
+ }
+
+ if (InitPlayout() != 0) {
+ return false;
+ }
+ if (StartPlayout() != 0) {
+ return false;
+ }
+
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>";
+ return true;
+}
+
+} // namespace webrtc_win
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h
new file mode 100644
index 0000000000..5a547498a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_device/win/audio_device_module_win.h"
+#include "modules/audio_device/win/core_audio_base_win.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace webrtc_win {
+
+// Windows specific AudioOutput implementation using a CoreAudioBase class where
+// an output direction is set at construction. Supports render device handling
+// and streaming of decoded audio from a WebRTC client to the native audio
+// layer.
+class CoreAudioOutput final : public CoreAudioBase, public AudioOutput {
+ public:
+ CoreAudioOutput(bool automatic_restart);
+ ~CoreAudioOutput() override;
+
+ // AudioOutput implementation.
+ int Init() override;
+ int Terminate() override;
+ int NumDevices() const override;
+ int SetDevice(int index) override;
+ int SetDevice(AudioDeviceModule::WindowsDeviceType device) override;
+ int DeviceName(int index, std::string* name, std::string* guid) override;
+ void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
+ bool PlayoutIsInitialized() const override;
+ int InitPlayout() override;
+ int StartPlayout() override;
+ int StopPlayout() override;
+ bool Playing() override;
+ int VolumeIsAvailable(bool* available) override;
+ int RestartPlayout() override;
+ bool Restarting() const override;
+ int SetSampleRate(uint32_t sample_rate) override;
+
+ CoreAudioOutput(const CoreAudioOutput&) = delete;
+ CoreAudioOutput& operator=(const CoreAudioOutput&) = delete;
+
+ private:
+ void ReleaseCOMObjects();
+ bool OnDataCallback(uint64_t device_frequency);
+ bool OnErrorCallback(ErrorType error);
+ int EstimateOutputLatencyMillis(uint64_t device_frequency);
+ bool HandleStreamDisconnected();
+
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+ Microsoft::WRL::ComPtr<IAudioRenderClient> audio_render_client_;
+ uint64_t num_frames_written_ = 0;
+};
+
+} // namespace webrtc_win
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc
new file mode 100644
index 0000000000..e4e2864db5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc
@@ -0,0 +1,1529 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_utility_win.h"
+
+#include <functiondiscoverykeys_devpkey.h>
+#include <stdio.h>
+#include <tchar.h>
+
+#include <iomanip>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread_types.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/win/windows_version.h"
+
+using Microsoft::WRL::ComPtr;
+using webrtc::AudioDeviceName;
+using webrtc::AudioParameters;
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+using core_audio_utility::ErrorToString;
+
+// Converts from channel mask to list of included channels.
+// Each audio data format contains channels for one or more of the positions
+// listed below. The number of channels simply equals the number of nonzero
+// flag bits in the `channel_mask`. The relative positions of the channels
+// within each block of audio data always follow the same relative ordering
+// as the flag bits in the table below. For example, if `channel_mask` contains
+// the value 0x00000033, the format defines four audio channels that are
+// assigned for playback to the front-left, front-right, back-left,
+// and back-right speakers, respectively. The channel data should be interleaved
+// in that order within each block.
+std::string ChannelMaskToString(DWORD channel_mask) {
+ std::string ss;
+ int n = 0;
+ if (channel_mask & SPEAKER_FRONT_LEFT) {
+ ss += "FRONT_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_RIGHT) {
+ ss += "FRONT_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_CENTER) {
+ ss += "FRONT_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_LOW_FREQUENCY) {
+ ss += "LOW_FREQUENCY | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_BACK_LEFT) {
+ ss += "BACK_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_BACK_RIGHT) {
+ ss += "BACK_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_LEFT_OF_CENTER) {
+ ss += "FRONT_LEFT_OF_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_RIGHT_OF_CENTER) {
+ ss += "RIGHT_OF_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_BACK_CENTER) {
+ ss += "BACK_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_SIDE_LEFT) {
+ ss += "SIDE_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_SIDE_RIGHT) {
+ ss += "SIDE_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_CENTER) {
+ ss += "TOP_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_FRONT_LEFT) {
+ ss += "TOP_FRONT_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_FRONT_CENTER) {
+ ss += "TOP_FRONT_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_FRONT_RIGHT) {
+ ss += "TOP_FRONT_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_BACK_LEFT) {
+ ss += "TOP_BACK_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_BACK_CENTER) {
+ ss += "TOP_BACK_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_BACK_RIGHT) {
+ ss += "TOP_BACK_RIGHT | ";
+ ++n;
+ }
+
+ if (!ss.empty()) {
+ // Delete last appended " | " substring.
+ ss.erase(ss.end() - 3, ss.end());
+ }
+ ss += " (";
+ ss += std::to_string(n);
+ ss += ")";
+ return ss;
+}
+
+#if !defined(KSAUDIO_SPEAKER_1POINT1)
+// These values are only defined in ksmedia.h after a certain version, to build
+// cleanly for older windows versions this just defines the ones that are
+// missing.
+#define KSAUDIO_SPEAKER_1POINT1 (SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY)
+#define KSAUDIO_SPEAKER_2POINT1 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY)
+#define KSAUDIO_SPEAKER_3POINT0 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER)
+#define KSAUDIO_SPEAKER_3POINT1 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
+ SPEAKER_LOW_FREQUENCY)
+#define KSAUDIO_SPEAKER_5POINT0 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
+ SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT)
+#define KSAUDIO_SPEAKER_7POINT0 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | \
+ SPEAKER_SIDE_RIGHT)
+#endif
+
+#if !defined(AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY)
+#define AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
+#define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
+#endif
+
+// Converts the most common format tags defined in mmreg.h into string
+// equivalents. Mainly intended for log messages.
+const char* WaveFormatTagToString(WORD format_tag) {
+ switch (format_tag) {
+ case WAVE_FORMAT_UNKNOWN:
+ return "WAVE_FORMAT_UNKNOWN";
+ case WAVE_FORMAT_PCM:
+ return "WAVE_FORMAT_PCM";
+ case WAVE_FORMAT_IEEE_FLOAT:
+ return "WAVE_FORMAT_IEEE_FLOAT";
+ case WAVE_FORMAT_EXTENSIBLE:
+ return "WAVE_FORMAT_EXTENSIBLE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* RoleToString(const ERole role) {
+ switch (role) {
+ case eConsole:
+ return "Console";
+ case eMultimedia:
+ return "Multimedia";
+ case eCommunications:
+ return "Communications";
+ default:
+ return "Unsupported";
+ }
+}
+
+const char* FlowToString(const EDataFlow flow) {
+ switch (flow) {
+ case eRender:
+ return "Render";
+ case eCapture:
+ return "Capture";
+ case eAll:
+ return "Render or Capture";
+ default:
+ return "Unsupported";
+ }
+}
+
+bool LoadAudiosesDll() {
+ static const wchar_t* const kAudiosesDLL =
+ L"%WINDIR%\\system32\\audioses.dll";
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
+ RTC_DLOG(LS_INFO) << rtc::ToUtf8(path);
+ return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
+ nullptr);
+}
+
+bool LoadAvrtDll() {
+ static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll";
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path));
+ RTC_DLOG(LS_INFO) << rtc::ToUtf8(path);
+ return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
+ nullptr);
+}
+
+ComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
+ bool allow_reinitialize) {
+ ComPtr<IMMDeviceEnumerator> device_enumerator;
+ _com_error error =
+ ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL,
+ IID_PPV_ARGS(&device_enumerator));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " << ErrorToString(error);
+ }
+
+ if (error.Error() == CO_E_NOTINITIALIZED && allow_reinitialize) {
+ RTC_LOG(LS_ERROR) << "CoCreateInstance failed with CO_E_NOTINITIALIZED";
+ // We have seen crashes which indicates that this method can in fact
+ // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
+ // modules. Calling CoInitializeEx() is an attempt to resolve the reported
+ // issues. See http://crbug.com/378465 for details.
+ error = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if (FAILED(error.Error())) {
+ error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
+ CLSCTX_ALL, IID_PPV_ARGS(&device_enumerator));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "CoCreateInstance failed: "
+ << ErrorToString(error);
+ }
+ }
+ }
+ return device_enumerator;
+}
+
+bool IsSupportedInternal() {
+ // The Core Audio APIs are implemented in the user-mode system components
+ // Audioses.dll and Mmdevapi.dll. Dependency Walker shows that it is
+ // enough to verify possibility to load the Audioses DLL since it depends
+ // on Mmdevapi.dll. See http://crbug.com/166397 why this extra step is
+ // required to guarantee Core Audio support.
+ if (!LoadAudiosesDll())
+ return false;
+
+ // Being able to load the Audioses.dll does not seem to be sufficient for
+ // all devices to guarantee Core Audio support. To be 100%, we also verify
+ // that it is possible to a create the IMMDeviceEnumerator interface. If
+ // this works as well we should be home free.
+ ComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumeratorInternal(false);
+ if (!device_enumerator) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to create Core Audio device enumerator on thread with ID "
+ << rtc::CurrentThreadId();
+ return false;
+ }
+
+ return true;
+}
+
+bool IsDeviceActive(IMMDevice* device) {
+ DWORD state = DEVICE_STATE_DISABLED;
+ return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE);
+}
+
+// Retrieve an audio device specified by `device_id` or a default device
+// specified by data-flow direction and role if `device_id` is default.
+ComPtr<IMMDevice> CreateDeviceInternal(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateDeviceInternal: "
+ "id="
+ << device_id << ", flow=" << FlowToString(data_flow)
+ << ", role=" << RoleToString(role);
+ ComPtr<IMMDevice> audio_endpoint_device;
+
+ // Create the IMMDeviceEnumerator interface.
+ ComPtr<IMMDeviceEnumerator> device_enum(CreateDeviceEnumeratorInternal(true));
+ if (!device_enum.Get())
+ return audio_endpoint_device;
+
+ _com_error error(S_FALSE);
+ if (device_id == AudioDeviceName::kDefaultDeviceId) {
+ // Get the default audio endpoint for the specified data-flow direction and
+ // role. Note that, if only a single rendering or capture device is
+ // available, the system always assigns all three rendering or capture roles
+ // to that device. If the method fails to find a rendering or capture device
+ // for the specified role, this means that no rendering or capture device is
+ // available at all. If no device is available, the method sets the output
+ // pointer to NULL and returns ERROR_NOT_FOUND.
+ error = device_enum->GetDefaultAudioEndpoint(
+ data_flow, role, audio_endpoint_device.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: "
+ << ErrorToString(error);
+ }
+ } else {
+ // Ask for an audio endpoint device that is identified by an endpoint ID
+ // string.
+ error = device_enum->GetDevice(rtc::ToUtf16(device_id).c_str(),
+ audio_endpoint_device.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDeviceEnumerator::GetDevice failed: "
+ << ErrorToString(error);
+ }
+ }
+
+ // Verify that the audio endpoint device is active, i.e., that the audio
+ // adapter that connects to the endpoint device is present and enabled.
+ if (SUCCEEDED(error.Error()) && audio_endpoint_device.Get() &&
+ !IsDeviceActive(audio_endpoint_device.Get())) {
+ RTC_LOG(LS_WARNING) << "Selected endpoint device is not active";
+ audio_endpoint_device.Reset();
+ }
+
+ return audio_endpoint_device;
+}
+
+std::string GetDeviceIdInternal(IMMDevice* device) {
+ // Retrieve unique name of endpoint device.
+ // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
+ LPWSTR device_id;
+ if (SUCCEEDED(device->GetId(&device_id))) {
+ std::string device_id_utf8 = rtc::ToUtf8(device_id, wcslen(device_id));
+ CoTaskMemFree(device_id);
+ return device_id_utf8;
+ } else {
+ return std::string();
+ }
+}
+
+std::string GetDeviceFriendlyNameInternal(IMMDevice* device) {
+ // Retrieve user-friendly name of endpoint device.
+ // Example: "Microphone (Realtek High Definition Audio)".
+ ComPtr<IPropertyStore> properties;
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.GetAddressOf());
+ if (FAILED(hr))
+ return std::string();
+
+ ScopedPropVariant friendly_name_pv;
+ hr = properties->GetValue(PKEY_Device_FriendlyName,
+ friendly_name_pv.Receive());
+ if (FAILED(hr))
+ return std::string();
+
+ if (friendly_name_pv.get().vt == VT_LPWSTR &&
+ friendly_name_pv.get().pwszVal) {
+ return rtc::ToUtf8(friendly_name_pv.get().pwszVal,
+ wcslen(friendly_name_pv.get().pwszVal));
+ } else {
+ return std::string();
+ }
+}
+
+ComPtr<IAudioSessionManager2> CreateSessionManager2Internal(
+ IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioSessionManager2>();
+
+ ComPtr<IAudioSessionManager2> audio_session_manager;
+ _com_error error =
+ audio_device->Activate(__uuidof(IAudioSessionManager2), CLSCTX_ALL,
+ nullptr, &audio_session_manager);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioSessionManager2) failed: "
+ << ErrorToString(error);
+ }
+ return audio_session_manager;
+}
+
+ComPtr<IAudioSessionEnumerator> CreateSessionEnumeratorInternal(
+ IMMDevice* audio_device) {
+ if (!audio_device) {
+ return ComPtr<IAudioSessionEnumerator>();
+ }
+
+ ComPtr<IAudioSessionEnumerator> audio_session_enumerator;
+ ComPtr<IAudioSessionManager2> audio_session_manager =
+ CreateSessionManager2Internal(audio_device);
+ if (!audio_session_manager.Get()) {
+ return audio_session_enumerator;
+ }
+ _com_error error =
+ audio_session_manager->GetSessionEnumerator(&audio_session_enumerator);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioSessionEnumerator::IAudioSessionEnumerator failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioSessionEnumerator>();
+ }
+ return audio_session_enumerator;
+}
+
+// Creates and activates an IAudioClient COM object given the selected
+// endpoint device.
+ComPtr<IAudioClient> CreateClientInternal(IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioClient>();
+
+ ComPtr<IAudioClient> audio_client;
+ _com_error error = audio_device->Activate(__uuidof(IAudioClient), CLSCTX_ALL,
+ nullptr, &audio_client);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient) failed: "
+ << ErrorToString(error);
+ }
+ return audio_client;
+}
+
+ComPtr<IAudioClient2> CreateClient2Internal(IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioClient2>();
+
+ ComPtr<IAudioClient2> audio_client;
+ _com_error error = audio_device->Activate(__uuidof(IAudioClient2), CLSCTX_ALL,
+ nullptr, &audio_client);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient2) failed: "
+ << ErrorToString(error);
+ }
+ return audio_client;
+}
+
+ComPtr<IAudioClient3> CreateClient3Internal(IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioClient3>();
+
+ ComPtr<IAudioClient3> audio_client;
+ _com_error error = audio_device->Activate(__uuidof(IAudioClient3), CLSCTX_ALL,
+ nullptr, &audio_client);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient3) failed: "
+ << ErrorToString(error);
+ }
+ return audio_client;
+}
+
+ComPtr<IMMDeviceCollection> CreateCollectionInternal(EDataFlow data_flow) {
+ ComPtr<IMMDeviceEnumerator> device_enumerator(
+ CreateDeviceEnumeratorInternal(true));
+ if (!device_enumerator) {
+ return ComPtr<IMMDeviceCollection>();
+ }
+
+ // Generate a collection of active (present and not disabled) audio endpoint
+ // devices for the specified data-flow direction.
+ // This method will succeed even if all devices are disabled.
+ ComPtr<IMMDeviceCollection> collection;
+ _com_error error = device_enumerator->EnumAudioEndpoints(
+ data_flow, DEVICE_STATE_ACTIVE, collection.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDeviceCollection::EnumAudioEndpoints failed: "
+ << ErrorToString(error);
+ }
+ return collection;
+}
+
+bool GetDeviceNamesInternal(EDataFlow data_flow,
+ webrtc::AudioDeviceNames* device_names) {
+ RTC_DLOG(LS_INFO) << "GetDeviceNamesInternal: flow="
+ << FlowToString(data_flow);
+
+ // Generate a collection of active audio endpoint devices for the specified
+ // direction.
+ ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
+ if (!collection.Get()) {
+ RTC_LOG(LS_ERROR) << "Failed to create a collection of active devices";
+ return false;
+ }
+
+ // Retrieve the number of active (present, not disabled and plugged in) audio
+ // devices for the specified direction.
+ UINT number_of_active_devices = 0;
+ _com_error error = collection->GetCount(&number_of_active_devices);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDeviceCollection::GetCount failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ if (number_of_active_devices == 0) {
+ RTC_DLOG(LS_WARNING) << "Found no active devices";
+ return false;
+ }
+
+ // Loop over all active devices and add friendly name and unique id to the
+ // `device_names` queue. For now, devices are added at indexes 0, 1, ..., N-1
+ // but they will be moved to 2,3,..., N+1 at the next stage when default and
+ // default communication devices are added at index 0 and 1.
+ ComPtr<IMMDevice> audio_device;
+ for (UINT i = 0; i < number_of_active_devices; ++i) {
+ // Retrieve a pointer to the specified item in the device collection.
+ error = collection->Item(i, audio_device.GetAddressOf());
+ if (FAILED(error.Error())) {
+ // Skip this item and try to get the next item instead; will result in an
+ // incomplete list of devices.
+ RTC_LOG(LS_WARNING) << "IMMDeviceCollection::Item failed: "
+ << ErrorToString(error);
+ continue;
+ }
+ if (!audio_device.Get()) {
+ RTC_LOG(LS_WARNING) << "Invalid audio device";
+ continue;
+ }
+
+ // Retrieve the complete device name for the given audio device endpoint.
+ AudioDeviceName device_name(
+ GetDeviceFriendlyNameInternal(audio_device.Get()),
+ GetDeviceIdInternal(audio_device.Get()));
+ // Add combination of user-friendly and unique name to the output list.
+ device_names->push_back(device_name);
+ }
+
+ // Log a warning of the list of device is not complete but let's keep on
+ // trying to add default and default communications device at the front.
+ if (device_names->size() != number_of_active_devices) {
+ RTC_DLOG(LS_WARNING)
+ << "List of device names does not contain all active devices";
+ }
+
+ // Avoid adding default and default communication devices if no active device
+ // could be added to the queue. We might as well break here and return false
+ // since no active devices were identified.
+ if (device_names->empty()) {
+ RTC_DLOG(LS_ERROR) << "List of active devices is empty";
+ return false;
+ }
+
+ // Prepend the queue with two more elements: one for the default device and
+ // one for the default communication device (can correspond to the same unique
+ // id if only one active device exists). The first element (index 0) is the
+ // default device and the second element (index 1) is the default
+ // communication device.
+ ERole role[] = {eCommunications, eConsole};
+ ComPtr<IMMDevice> default_device;
+ AudioDeviceName default_device_name;
+ for (size_t i = 0; i < arraysize(role); ++i) {
+ default_device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId,
+ data_flow, role[i]);
+ if (!default_device.Get()) {
+ // Add empty strings to device name if the device could not be created.
+ RTC_DLOG(LS_WARNING) << "Failed to add device with role: "
+ << RoleToString(role[i]);
+ default_device_name.device_name = std::string();
+ default_device_name.unique_id = std::string();
+ } else {
+ // Populate the device name with friendly name and unique id.
+ std::string device_name;
+ device_name += (role[i] == eConsole ? "Default - " : "Communication - ");
+ device_name += GetDeviceFriendlyNameInternal(default_device.Get());
+ std::string unique_id = GetDeviceIdInternal(default_device.Get());
+ default_device_name.device_name = std::move(device_name);
+ default_device_name.unique_id = std::move(unique_id);
+ }
+
+ // Add combination of user-friendly and unique name to the output queue.
+ // The last element (<=> eConsole) will be at the front of the queue, hence
+ // at index 0. Empty strings will be added for cases where no default
+ // devices were found.
+ device_names->push_front(default_device_name);
+ }
+
+ // Example of log output when only one device is active. Note that the queue
+ // contains two extra elements at index 0 (Default) and 1 (Communication) to
+ // allow selection of device by role instead of id. All elements corresponds
+ // the same unique id.
+ // [0] friendly name: Default - Headset Microphone (2- Arctis 7 Chat)
+ // [0] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
+ // [1] friendly name: Communication - Headset Microphone (2- Arctis 7 Chat)
+ // [1] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
+ // [2] friendly name: Headset Microphone (2- Arctis 7 Chat)
+ // [2] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
+ for (size_t i = 0; i < device_names->size(); ++i) {
+ RTC_DLOG(LS_INFO) << "[" << i
+ << "] friendly name: " << (*device_names)[i].device_name;
+ RTC_DLOG(LS_INFO) << "[" << i
+ << "] unique id : " << (*device_names)[i].unique_id;
+ }
+
+ return true;
+}
+
+HRESULT GetPreferredAudioParametersInternal(IAudioClient* client,
+ AudioParameters* params,
+ int fixed_sample_rate) {
+ WAVEFORMATPCMEX mix_format;
+ HRESULT hr = core_audio_utility::GetSharedModeMixFormat(client, &mix_format);
+ if (FAILED(hr))
+ return hr;
+
+ REFERENCE_TIME default_period = 0;
+ hr = core_audio_utility::GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED,
+ &default_period);
+ if (FAILED(hr))
+ return hr;
+
+ int sample_rate = mix_format.Format.nSamplesPerSec;
+ // Override default sample rate if `fixed_sample_rate` is set and different
+ // from the default rate.
+ if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) {
+ RTC_DLOG(LS_INFO) << "Using fixed sample rate instead of the preferred: "
+ << sample_rate << " is replaced by " << fixed_sample_rate;
+ sample_rate = fixed_sample_rate;
+ }
+ // TODO(henrika): utilize full mix_format.Format.wBitsPerSample.
+ // const size_t bits_per_sample = AudioParameters::kBitsPerSample;
+ // TODO(henrika): improve channel layout support.
+ const size_t channels = mix_format.Format.nChannels;
+
+ // Use the native device period to derive the smallest possible buffer size
+ // in shared mode.
+ double device_period_in_seconds =
+ static_cast<double>(
+ core_audio_utility::ReferenceTimeToTimeDelta(default_period).ms()) /
+ 1000.0L;
+ const size_t frames_per_buffer =
+ static_cast<size_t>(sample_rate * device_period_in_seconds + 0.5);
+
+ AudioParameters audio_params(sample_rate, channels, frames_per_buffer);
+ *params = audio_params;
+ RTC_DLOG(LS_INFO) << audio_params.ToString();
+
+ return hr;
+}
+
+} // namespace
+
+namespace core_audio_utility {
+
+// core_audio_utility::WaveFormatWrapper implementation.
+WAVEFORMATEXTENSIBLE* WaveFormatWrapper::GetExtensible() const {
+ RTC_CHECK(IsExtensible());
+ return reinterpret_cast<WAVEFORMATEXTENSIBLE*>(ptr_);
+}
+
+bool WaveFormatWrapper::IsExtensible() const {
+ return ptr_->wFormatTag == WAVE_FORMAT_EXTENSIBLE && ptr_->cbSize >= 22;
+}
+
+bool WaveFormatWrapper::IsPcm() const {
+ return IsExtensible() ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_PCM
+ : ptr_->wFormatTag == WAVE_FORMAT_PCM;
+}
+
+bool WaveFormatWrapper::IsFloat() const {
+ return IsExtensible()
+ ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
+ : ptr_->wFormatTag == WAVE_FORMAT_IEEE_FLOAT;
+}
+
+size_t WaveFormatWrapper::size() const {
+ return sizeof(*ptr_) + ptr_->cbSize;
+}
+
+bool IsSupported() {
+ RTC_DLOG(LS_INFO) << "IsSupported";
+ static bool g_is_supported = IsSupportedInternal();
+ return g_is_supported;
+}
+
+bool IsMMCSSSupported() {
+ RTC_DLOG(LS_INFO) << "IsMMCSSSupported";
+ return LoadAvrtDll();
+}
+
+int NumberOfActiveDevices(EDataFlow data_flow) {
+ // Generate a collection of active audio endpoint devices for the specified
+ // data-flow direction.
+ ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
+ if (!collection.Get()) {
+ return 0;
+ }
+
+ // Retrieve the number of active audio devices for the specified direction.
+ UINT number_of_active_devices = 0;
+ collection->GetCount(&number_of_active_devices);
+ std::string str;
+ if (data_flow == eCapture) {
+ str = "Number of capture devices: ";
+ } else if (data_flow == eRender) {
+ str = "Number of render devices: ";
+ } else if (data_flow == eAll) {
+ str = "Total number of devices: ";
+ }
+ RTC_DLOG(LS_INFO) << str << number_of_active_devices;
+ return static_cast<int>(number_of_active_devices);
+}
+
+uint32_t GetAudioClientVersion() {
+ uint32_t version = 1;
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN10) {
+ version = 3;
+ } else if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN8) {
+ version = 2;
+ }
+ return version;
+}
+
+ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator() {
+ RTC_DLOG(LS_INFO) << "CreateDeviceEnumerator";
+ return CreateDeviceEnumeratorInternal(true);
+}
+
+std::string GetDefaultInputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetDefaultInputDeviceID";
+ ComPtr<IMMDevice> device(
+ CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+std::string GetDefaultOutputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetDefaultOutputDeviceID";
+ ComPtr<IMMDevice> device(
+ CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+std::string GetCommunicationsInputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetCommunicationsInputDeviceID";
+ ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
+ eCapture, eCommunications));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+std::string GetCommunicationsOutputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetCommunicationsOutputDeviceID";
+ ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
+ eRender, eCommunications));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+ComPtr<IMMDevice> CreateDevice(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateDevice";
+ return CreateDeviceInternal(device_id, data_flow, role);
+}
+
+AudioDeviceName GetDeviceName(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "GetDeviceName";
+ RTC_DCHECK(device);
+ AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device),
+ GetDeviceIdInternal(device));
+ RTC_DLOG(LS_INFO) << "friendly name: " << device_name.device_name;
+ RTC_DLOG(LS_INFO) << "unique id : " << device_name.unique_id;
+ return device_name;
+}
+
+std::string GetFriendlyName(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "GetFriendlyName";
+ ComPtr<IMMDevice> audio_device = CreateDevice(device_id, data_flow, role);
+ if (!audio_device.Get())
+ return std::string();
+
+ AudioDeviceName device_name = GetDeviceName(audio_device.Get());
+ return device_name.device_name;
+}
+
+EDataFlow GetDataFlow(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "GetDataFlow";
+ RTC_DCHECK(device);
+ ComPtr<IMMEndpoint> endpoint;
+ _com_error error = device->QueryInterface(endpoint.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::QueryInterface failed: "
+ << ErrorToString(error);
+ return eAll;
+ }
+
+ EDataFlow data_flow;
+ error = endpoint->GetDataFlow(&data_flow);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMEndpoint::GetDataFlow failed: "
+ << ErrorToString(error);
+ return eAll;
+ }
+ return data_flow;
+}
+
+bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) {
+ RTC_DLOG(LS_INFO) << "GetInputDeviceNames";
+ RTC_DCHECK(device_names);
+ RTC_DCHECK(device_names->empty());
+ return GetDeviceNamesInternal(eCapture, device_names);
+}
+
+bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) {
+ RTC_DLOG(LS_INFO) << "GetOutputDeviceNames";
+ RTC_DCHECK(device_names);
+ RTC_DCHECK(device_names->empty());
+ return GetDeviceNamesInternal(eRender, device_names);
+}
+
+ComPtr<IAudioSessionManager2> CreateSessionManager2(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "CreateSessionManager2";
+ return CreateSessionManager2Internal(device);
+}
+
+Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
+ IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "CreateSessionEnumerator";
+ return CreateSessionEnumeratorInternal(device);
+}
+
+int NumberOfActiveSessions(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "NumberOfActiveSessions";
+ ComPtr<IAudioSessionEnumerator> session_enumerator =
+ CreateSessionEnumerator(device);
+
+ // Iterate over all audio sessions for the given device.
+ int session_count = 0;
+ _com_error error = session_enumerator->GetCount(&session_count);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetCount failed: "
+ << ErrorToString(error);
+ return 0;
+ }
+ RTC_DLOG(LS_INFO) << "Total number of audio sessions: " << session_count;
+
+ int num_active = 0;
+ for (int session = 0; session < session_count; session++) {
+ // Acquire the session control interface.
+ ComPtr<IAudioSessionControl> session_control;
+ error = session_enumerator->GetSession(session, &session_control);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetSession failed: "
+ << ErrorToString(error);
+ return 0;
+ }
+
+ // Log the display name of the audio session for debugging purposes.
+ LPWSTR display_name;
+ if (SUCCEEDED(session_control->GetDisplayName(&display_name))) {
+ RTC_DLOG(LS_INFO) << "display name: "
+ << rtc::ToUtf8(display_name, wcslen(display_name));
+ CoTaskMemFree(display_name);
+ }
+
+ // Get the current state and check if the state is active or not.
+ AudioSessionState state;
+ error = session_control->GetState(&state);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioSessionControl::GetState failed: "
+ << ErrorToString(error);
+ return 0;
+ }
+ if (state == AudioSessionStateActive) {
+ ++num_active;
+ }
+ }
+
+ RTC_DLOG(LS_INFO) << "Number of active audio sessions: " << num_active;
+ return num_active;
+}
+
+ComPtr<IAudioClient> CreateClient(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateClient";
+ ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
+ return CreateClientInternal(device.Get());
+}
+
+ComPtr<IAudioClient2> CreateClient2(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateClient2";
+ ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
+ return CreateClient2Internal(device.Get());
+}
+
+ComPtr<IAudioClient3> CreateClient3(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateClient3";
+ ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
+ return CreateClient3Internal(device.Get());
+}
+
+HRESULT SetClientProperties(IAudioClient2* client) {
+ RTC_DLOG(LS_INFO) << "SetClientProperties";
+ RTC_DCHECK(client);
+ if (GetAudioClientVersion() < 2) {
+ RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+ AudioClientProperties props = {0};
+ props.cbSize = sizeof(AudioClientProperties);
+ // Real-time VoIP communication.
+ // TODO(henrika): other categories?
+ props.eCategory = AudioCategory_Communications;
+ // Hardware-offloaded audio processing allows the main audio processing tasks
+ // to be performed outside the computer's main CPU. Check support and log the
+ // result but hard-code `bIsOffload` to FALSE for now.
+ // TODO(henrika): evaluate hardware-offloading. Might complicate usage of
+ // IAudioClient::GetMixFormat().
+ BOOL supports_offload = FALSE;
+ _com_error error =
+ client->IsOffloadCapable(props.eCategory, &supports_offload);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient2::IsOffloadCapable failed: "
+ << ErrorToString(error);
+ }
+ RTC_DLOG(LS_INFO) << "supports_offload: " << supports_offload;
+ props.bIsOffload = false;
+#if (NTDDI_VERSION < NTDDI_WINBLUE)
+ RTC_DLOG(LS_INFO) << "options: Not supported in this build";
+#else
+ // TODO(henrika): pros and cons compared with AUDCLNT_STREAMOPTIONS_NONE?
+ props.Options |= AUDCLNT_STREAMOPTIONS_NONE;
+ // Requires System.Devices.AudioDevice.RawProcessingSupported.
+ // The application can choose to *always ignore* the OEM AEC/AGC by setting
+ // the AUDCLNT_STREAMOPTIONS_RAW flag in the call to SetClientProperties.
+ // This flag will preserve the user experience aspect of Communications
+ // streams, but will not insert any OEM provided communications specific
+ // processing in the audio signal path.
+ // props.Options |= AUDCLNT_STREAMOPTIONS_RAW;
+
+ // If it is important to avoid resampling in the audio engine, set this flag.
+ // AUDCLNT_STREAMOPTIONS_MATCH_FORMAT (or anything in IAudioClient3) is not
+ // an appropriate interface to use for communications scenarios.
+ // This interface is mainly meant for pro audio scenarios.
+ // props.Options |= AUDCLNT_STREAMOPTIONS_MATCH_FORMAT;
+ RTC_DLOG(LS_INFO) << "options: 0x" << rtc::ToHex(props.Options);
+#endif
+ error = client->SetClientProperties(&props);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient2::SetClientProperties failed: "
+ << ErrorToString(error);
+ }
+ return error.Error();
+}
+
+HRESULT GetBufferSizeLimits(IAudioClient2* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ REFERENCE_TIME* min_buffer_duration,
+ REFERENCE_TIME* max_buffer_duration) {
+ RTC_DLOG(LS_INFO) << "GetBufferSizeLimits";
+ RTC_DCHECK(client);
+ if (GetAudioClientVersion() < 2) {
+ RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+ REFERENCE_TIME min_duration = 0;
+ REFERENCE_TIME max_duration = 0;
+ _com_error error =
+ client->GetBufferSizeLimits(reinterpret_cast<const WAVEFORMATEX*>(format),
+ TRUE, &min_duration, &max_duration);
+ if (error.Error() == AUDCLNT_E_OFFLOAD_MODE_ONLY) {
+ // This API seems to be supported in off-load mode only but it is not
+ // documented as a valid error code. Making a special note about it here.
+ RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: "
+ "AUDCLNT_E_OFFLOAD_MODE_ONLY";
+ } else if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: "
+ << ErrorToString(error);
+ } else {
+ *min_buffer_duration = min_duration;
+ *max_buffer_duration = max_duration;
+ RTC_DLOG(LS_INFO) << "min_buffer_duration: " << min_buffer_duration;
+ RTC_DLOG(LS_INFO) << "max_buffer_duration: " << max_buffer_duration;
+ }
+ return error.Error();
+}
+
+HRESULT GetSharedModeMixFormat(IAudioClient* client,
+ WAVEFORMATEXTENSIBLE* format) {
+ RTC_DLOG(LS_INFO) << "GetSharedModeMixFormat";
+ RTC_DCHECK(client);
+
+ // The GetMixFormat method retrieves the stream format that the audio engine
+ // uses for its internal processing of shared-mode streams. The method
+ // allocates the storage for the structure and this memory will be released
+ // when `mix_format` goes out of scope. The GetMixFormat method retrieves a
+ // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure
+ // instead of a standalone WAVEFORMATEX structure. The method outputs a
+ // pointer to the WAVEFORMATEX structure that is embedded at the start of
+ // this WAVEFORMATEXTENSIBLE structure.
+ // Note that, crbug/803056 indicates that some devices can return a format
+ // where only the WAVEFORMATEX parts is initialized and we must be able to
+ // account for that.
+ ScopedCoMem<WAVEFORMATEXTENSIBLE> mix_format;
+ _com_error error =
+ client->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&mix_format));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetMixFormat failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ // Use a wave format wrapper to make things simpler.
+ WaveFormatWrapper wrapped_format(mix_format.Get());
+
+ // Verify that the reported format can be mixed by the audio engine in
+ // shared mode.
+ if (!wrapped_format.IsPcm() && !wrapped_format.IsFloat()) {
+ RTC_DLOG(LS_ERROR)
+ << "Only pure PCM or float audio streams can be mixed in shared mode";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+
+ // Log a warning for the rare case where `mix_format` only contains a
+ // stand-alone WAVEFORMATEX structure but don't return.
+ if (!wrapped_format.IsExtensible()) {
+ RTC_DLOG(LS_WARNING)
+ << "The returned format contains no extended information. "
+ "The size is "
+ << wrapped_format.size() << " bytes.";
+ }
+
+ // Copy the correct number of bytes into |*format| taking into account if
+ // the returned structure is correctly extended or not.
+ RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE));
+ memcpy(format, wrapped_format.get(), wrapped_format.size());
+ RTC_DLOG(LS_INFO) << WaveFormatToString(format);
+
+ return error.Error();
+}
+
+bool IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATEXTENSIBLE* format) {
+ RTC_DLOG(LS_INFO) << "IsFormatSupported";
+ RTC_DCHECK(client);
+ ScopedCoMem<WAVEFORMATEX> closest_match;
+ // This method provides a way for a client to determine, before calling
+ // IAudioClient::Initialize, whether the audio engine supports a particular
+ // stream format or not. In shared mode, the audio engine always supports
+ // the mix format (see GetSharedModeMixFormat).
+ // TODO(henrika): verify support for exclusive mode as well?
+ _com_error error = client->IsFormatSupported(
+ share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
+ &closest_match);
+ RTC_LOG(LS_INFO) << WaveFormatToString(
+ const_cast<WAVEFORMATEXTENSIBLE*>(format));
+ if ((error.Error() == S_OK) && (closest_match == nullptr)) {
+ RTC_DLOG(LS_INFO)
+ << "The audio endpoint device supports the specified stream format";
+ } else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) {
+ // Call succeeded with a closest match to the specified format. This log can
+ // only be triggered for shared mode.
+ RTC_LOG(LS_WARNING)
+ << "Exact format is not supported, but a closest match exists";
+ RTC_LOG(LS_INFO) << WaveFormatToString(closest_match.Get());
+ } else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) &&
+ (closest_match == nullptr)) {
+ // The audio engine does not support the caller-specified format or any
+ // similar format.
+ RTC_DLOG(LS_INFO) << "The audio endpoint device does not support the "
+ "specified stream format";
+ } else {
+ RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: "
+ << ErrorToString(error);
+ }
+
+ return (error.Error() == S_OK);
+}
+
+HRESULT GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period) {
+ RTC_DLOG(LS_INFO) << "GetDevicePeriod";
+ RTC_DCHECK(client);
+ // The `default_period` parameter specifies the default scheduling period
+ // for a shared-mode stream. The `minimum_period` parameter specifies the
+ // minimum scheduling period for an exclusive-mode stream.
+ // The time is expressed in 100-nanosecond units.
+ REFERENCE_TIME default_period = 0;
+ REFERENCE_TIME minimum_period = 0;
+ _com_error error = client->GetDevicePeriod(&default_period, &minimum_period);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetDevicePeriod failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period
+ : minimum_period;
+ RTC_LOG(LS_INFO) << "device_period: "
+ << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]";
+ RTC_LOG(LS_INFO) << "minimum_period: "
+ << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]";
+ return error.Error();
+}
+
+HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
+ const WAVEFORMATEXTENSIBLE* format,
+ uint32_t* default_period_in_frames,
+ uint32_t* fundamental_period_in_frames,
+ uint32_t* min_period_in_frames,
+ uint32_t* max_period_in_frames) {
+ RTC_DLOG(LS_INFO) << "GetSharedModeEnginePeriod";
+ RTC_DCHECK(client3);
+
+ UINT32 default_period = 0;
+ UINT32 fundamental_period = 0;
+ UINT32 min_period = 0;
+ UINT32 max_period = 0;
+ _com_error error = client3->GetSharedModeEnginePeriod(
+ reinterpret_cast<const WAVEFORMATEX*>(format), &default_period,
+ &fundamental_period, &min_period, &max_period);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient3::GetSharedModeEnginePeriod failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ WAVEFORMATEX format_ex = format->Format;
+ const WORD sample_rate = format_ex.nSamplesPerSec;
+ RTC_LOG(LS_INFO) << "default_period_in_frames: " << default_period << " ("
+ << FramesToMilliseconds(default_period, sample_rate)
+ << " ms)";
+ RTC_LOG(LS_INFO) << "fundamental_period_in_frames: " << fundamental_period
+ << " ("
+ << FramesToMilliseconds(fundamental_period, sample_rate)
+ << " ms)";
+ RTC_LOG(LS_INFO) << "min_period_in_frames: " << min_period << " ("
+ << FramesToMilliseconds(min_period, sample_rate) << " ms)";
+ RTC_LOG(LS_INFO) << "max_period_in_frames: " << max_period << " ("
+ << FramesToMilliseconds(max_period, sample_rate) << " ms)";
+ *default_period_in_frames = default_period;
+ *fundamental_period_in_frames = fundamental_period;
+ *min_period_in_frames = min_period;
+ *max_period_in_frames = max_period;
+ return error.Error();
+}
+
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ AudioParameters* params) {
+ RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters";
+ RTC_DCHECK(client);
+ return GetPreferredAudioParametersInternal(client, params, -1);
+}
+
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ webrtc::AudioParameters* params,
+ uint32_t sample_rate) {
+ RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters: " << sample_rate;
+ RTC_DCHECK(client);
+ return GetPreferredAudioParametersInternal(client, params, sample_rate);
+}
+
+HRESULT SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ REFERENCE_TIME buffer_duration,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size) {
+ RTC_DLOG(LS_INFO) << "SharedModeInitialize: buffer_duration="
+ << buffer_duration
+ << ", auto_convert_pcm=" << auto_convert_pcm;
+ RTC_DCHECK(client);
+ RTC_DCHECK_GE(buffer_duration, 0);
+ if (buffer_duration != 0) {
+ RTC_DLOG(LS_WARNING) << "Non-default buffer size is used";
+ }
+ if (auto_convert_pcm) {
+ RTC_DLOG(LS_WARNING) << "Sample rate converter can be utilized";
+ }
+ // The AUDCLNT_STREAMFLAGS_NOPERSIST flag disables persistence of the volume
+ // and mute settings for a session that contains rendering streams.
+ // By default, the volume level and muting state for a rendering session are
+ // persistent across system restarts. The volume level and muting state for a
+ // capture session are never persistent.
+ DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
+
+ // Enable event-driven streaming if a valid event handle is provided.
+ // After the stream starts, the audio engine will signal the event handle
+ // to notify the client each time a buffer becomes ready to process.
+ // Event-driven buffering is supported for both rendering and capturing.
+ // Both shared-mode and exclusive-mode streams can use event-driven buffering.
+ bool use_event =
+ (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
+ if (use_event) {
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven";
+ }
+
+ // Check if sample-rate conversion is requested.
+ if (auto_convert_pcm) {
+ // Add channel matrixer (not utilized here) and rate converter to convert
+ // from our (the client's) format to the audio engine mix format.
+ // Currently only supported for testing, i.e., not possible to enable using
+ // public APIs.
+ RTC_DLOG(LS_INFO) << "The stream is initialized to support rate conversion";
+ stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
+ stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
+ }
+ RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
+
+ // Initialize the shared mode client for minimal delay if `buffer_duration`
+ // is 0 or possibly a higher delay (more robust) if `buffer_duration` is
+ // larger than 0. The actual size is given by IAudioClient::GetBufferSize().
+ _com_error error = client->Initialize(
+ AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0,
+ reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Initialize failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ // If a stream is initialized to be event driven and in shared mode, the
+ // associated application must also obtain a handle by making a call to
+ // IAudioClient::SetEventHandle.
+ if (use_event) {
+ error = client->SetEventHandle(event_handle);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ // Retrieves the size (maximum capacity) of the endpoint buffer. The size is
+ // expressed as the number of audio frames the buffer can hold.
+ // For rendering clients, the buffer length determines the maximum amount of
+ // rendering data that the application can write to the endpoint buffer
+ // during a single processing pass. For capture clients, the buffer length
+ // determines the maximum amount of capture data that the audio engine can
+ // read from the endpoint buffer during a single processing pass.
+ error = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ *endpoint_buffer_size = buffer_size_in_frames;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames
+ << " [audio frames]";
+ const double size_in_ms = static_cast<double>(buffer_size_in_frames) /
+ (format->Format.nSamplesPerSec / 1000.0);
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << static_cast<int>(size_in_ms + 0.5) << " [ms]";
+ RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << buffer_size_in_frames * format->Format.nChannels *
+ (format->Format.wBitsPerSample / 8)
+ << " [bytes]";
+
+ // TODO(henrika): utilize when delay measurements are added.
+ REFERENCE_TIME latency = 0;
+ error = client->GetStreamLatency(&latency);
+ RTC_DLOG(LS_INFO) << "stream latency: "
+ << ReferenceTimeToTimeDelta(latency).ms() << " [ms]";
+ return error.Error();
+}
+
+HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ uint32_t period_in_frames,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size) {
+ RTC_DLOG(LS_INFO) << "SharedModeInitializeLowLatency: period_in_frames="
+ << period_in_frames
+ << ", auto_convert_pcm=" << auto_convert_pcm;
+ RTC_DCHECK(client);
+ RTC_DCHECK_GT(period_in_frames, 0);
+ if (auto_convert_pcm) {
+ RTC_DLOG(LS_WARNING) << "Sample rate converter is enabled";
+ }
+
+ // Define stream flags.
+ DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
+ bool use_event =
+ (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
+ if (use_event) {
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven";
+ }
+ if (auto_convert_pcm) {
+ stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
+ stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
+ }
+ RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
+
+ // Initialize the shared mode client for lowest possible latency.
+ // It is assumed that GetSharedModeEnginePeriod() has been used to query the
+ // smallest possible engine period and that it is given by `period_in_frames`.
+ _com_error error = client->InitializeSharedAudioStream(
+ stream_flags, period_in_frames,
+ reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient3::InitializeSharedAudioStream failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ // Set the event handle.
+ if (use_event) {
+ error = client->SetEventHandle(event_handle);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ // Retrieve the size (maximum capacity) of the endpoint buffer.
+ error = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ *endpoint_buffer_size = buffer_size_in_frames;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames
+ << " [audio frames]";
+ const double size_in_ms = static_cast<double>(buffer_size_in_frames) /
+ (format->Format.nSamplesPerSec / 1000.0);
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << static_cast<int>(size_in_ms + 0.5) << " [ms]";
+ RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << buffer_size_in_frames * format->Format.nChannels *
+ (format->Format.wBitsPerSample / 8)
+ << " [bytes]";
+
+ // TODO(henrika): utilize when delay measurements are added.
+ REFERENCE_TIME latency = 0;
+ error = client->GetStreamLatency(&latency);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_WARNING) << "IAudioClient::GetStreamLatency failed: "
+ << ErrorToString(error);
+ } else {
+ RTC_DLOG(LS_INFO) << "stream latency: "
+ << ReferenceTimeToTimeDelta(latency).ms() << " [ms]";
+ }
+ return error.Error();
+}
+
+ComPtr<IAudioRenderClient> CreateRenderClient(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateRenderClient";
+ RTC_DCHECK(client);
+ // Get access to the IAudioRenderClient interface. This interface
+ // enables us to write output data to a rendering endpoint buffer.
+ ComPtr<IAudioRenderClient> audio_render_client;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_render_client));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioClient::GetService(IID_IAudioRenderClient) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioRenderClient>();
+ }
+ return audio_render_client;
+}
+
+ComPtr<IAudioCaptureClient> CreateCaptureClient(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateCaptureClient";
+ RTC_DCHECK(client);
+ // Get access to the IAudioCaptureClient interface. This interface
+ // enables us to read input data from a capturing endpoint buffer.
+ ComPtr<IAudioCaptureClient> audio_capture_client;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_capture_client));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioClient::GetService(IID_IAudioCaptureClient) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioCaptureClient>();
+ }
+ return audio_capture_client;
+}
+
+ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateAudioClock";
+ RTC_DCHECK(client);
+ // Get access to the IAudioClock interface. This interface enables us to
+ // monitor a stream's data rate and the current position in the stream.
+ ComPtr<IAudioClock> audio_clock;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_clock));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioClock) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioClock>();
+ }
+ return audio_clock;
+}
+
+ComPtr<IAudioSessionControl> CreateAudioSessionControl(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateAudioSessionControl";
+ RTC_DCHECK(client);
+ ComPtr<IAudioSessionControl> audio_session_control;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_session_control));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioControl) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioSessionControl>();
+ }
+ return audio_session_control;
+}
+
+ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateSimpleAudioVolume";
+ RTC_DCHECK(client);
+ // Get access to the ISimpleAudioVolume interface. This interface enables a
+ // client to control the master volume level of an audio session.
+ ComPtr<ISimpleAudioVolume> simple_audio_volume;
+ _com_error error = client->GetService(IID_PPV_ARGS(&simple_audio_volume));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioClient::GetService(IID_ISimpleAudioVolume) failed: "
+ << ErrorToString(error);
+ return ComPtr<ISimpleAudioVolume>();
+ }
+ return simple_audio_volume;
+}
+
+bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
+ IAudioRenderClient* render_client) {
+ RTC_DLOG(LS_INFO) << "FillRenderEndpointBufferWithSilence";
+ RTC_DCHECK(client);
+ RTC_DCHECK(render_client);
+ UINT32 endpoint_buffer_size = 0;
+ _com_error error = client->GetBufferSize(&endpoint_buffer_size);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ UINT32 num_queued_frames = 0;
+ // Get number of audio frames that are queued up to play in the endpoint
+ // buffer.
+ error = client->GetCurrentPadding(&num_queued_frames);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
+ << ErrorToString(error);
+ return false;
+ }
+ RTC_DLOG(LS_INFO) << "num_queued_frames: " << num_queued_frames;
+
+ BYTE* data = nullptr;
+ int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
+ RTC_DLOG(LS_INFO) << "num_frames_to_fill: " << num_frames_to_fill;
+ error = render_client->GetBuffer(num_frames_to_fill, &data);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
+ // explicitly write silence data to the rendering buffer.
+ error = render_client->ReleaseBuffer(num_frames_to_fill,
+ AUDCLNT_BUFFERFLAGS_SILENT);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ return true;
+}
+
+std::string WaveFormatToString(const WaveFormatWrapper format) {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ // Start with the WAVEFORMATEX part (which always exists).
+ ss.AppendFormat("wFormatTag: %s (0x%X)",
+ WaveFormatTagToString(format->wFormatTag),
+ format->wFormatTag);
+ ss.AppendFormat(", nChannels: %d", format->nChannels);
+ ss.AppendFormat(", nSamplesPerSec: %d", format->nSamplesPerSec);
+ ss.AppendFormat(", nAvgBytesPerSec: %d", format->nAvgBytesPerSec);
+ ss.AppendFormat(", nBlockAlign: %d", format->nBlockAlign);
+ ss.AppendFormat(", wBitsPerSample: %d", format->wBitsPerSample);
+ ss.AppendFormat(", cbSize: %d", format->cbSize);
+ if (!format.IsExtensible())
+ return ss.str();
+
+ // Append the WAVEFORMATEXTENSIBLE part (which we know exists).
+ ss.AppendFormat(
+ " [+] wValidBitsPerSample: %d, dwChannelMask: %s",
+ format.GetExtensible()->Samples.wValidBitsPerSample,
+ ChannelMaskToString(format.GetExtensible()->dwChannelMask).c_str());
+ if (format.IsPcm()) {
+ ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM");
+ } else if (format.IsFloat()) {
+ ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT");
+ } else {
+ ss.AppendFormat("%s", ", SubFormat: NOT_SUPPORTED");
+ }
+ return ss.str();
+}
+
+webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time) {
+ // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
+ return webrtc::TimeDelta::Micros(0.1 * time + 0.5);
+}
+
+double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate) {
+ // Convert the current period in frames into milliseconds.
+ return static_cast<double>(num_frames) / (sample_rate / 1000.0);
+}
+
+std::string ErrorToString(const _com_error& error) {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss.AppendFormat("(HRESULT: 0x%08X)", error.Error());
+ return ss.str();
+}
+
+} // namespace core_audio_utility
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h
new file mode 100644
index 0000000000..454e60bf31
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
+
+#include <audioclient.h>
+#include <audiopolicy.h>
+#include <avrt.h>
+#include <comdef.h>
+#include <mmdeviceapi.h>
+#include <objbase.h>
+#include <propidl.h>
+#include <wrl/client.h>
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/units/time_delta.h"
+#include "modules/audio_device/audio_device_name.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+#pragma comment(lib, "Avrt.lib")
+
+namespace webrtc {
+namespace webrtc_win {
+
+// Utility class which registers a thread with MMCSS in the constructor and
+// deregisters MMCSS in the destructor. The task name is given by `task_name`.
+// The Multimedia Class Scheduler service (MMCSS) enables multimedia
+// applications to ensure that their time-sensitive processing receives
+// prioritized access to CPU resources without denying CPU resources to
+// lower-priority applications.
+class ScopedMMCSSRegistration {
+ public:
+ const char* PriorityClassToString(DWORD priority_class) {
+ switch (priority_class) {
+ case ABOVE_NORMAL_PRIORITY_CLASS:
+ return "ABOVE_NORMAL";
+ case BELOW_NORMAL_PRIORITY_CLASS:
+ return "BELOW_NORMAL";
+ case HIGH_PRIORITY_CLASS:
+ return "HIGH";
+ case IDLE_PRIORITY_CLASS:
+ return "IDLE";
+ case NORMAL_PRIORITY_CLASS:
+ return "NORMAL";
+ case REALTIME_PRIORITY_CLASS:
+ return "REALTIME";
+ default:
+ return "INVALID";
+ }
+ }
+
+ const char* PriorityToString(int priority) {
+ switch (priority) {
+ case THREAD_PRIORITY_ABOVE_NORMAL:
+ return "ABOVE_NORMAL";
+ case THREAD_PRIORITY_BELOW_NORMAL:
+ return "BELOW_NORMAL";
+ case THREAD_PRIORITY_HIGHEST:
+ return "HIGHEST";
+ case THREAD_PRIORITY_IDLE:
+ return "IDLE";
+ case THREAD_PRIORITY_LOWEST:
+ return "LOWEST";
+ case THREAD_PRIORITY_NORMAL:
+ return "NORMAL";
+ case THREAD_PRIORITY_TIME_CRITICAL:
+ return "TIME_CRITICAL";
+ default:
+ // Can happen in combination with REALTIME_PRIORITY_CLASS.
+ return "INVALID";
+ }
+ }
+
+ explicit ScopedMMCSSRegistration(const wchar_t* task_name) {
+ RTC_DLOG(LS_INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name);
+ // Register the calling thread with MMCSS for the supplied `task_name`.
+ DWORD mmcss_task_index = 0;
+ mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index);
+ if (mmcss_handle_ == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to enable MMCSS on this thread: "
+ << GetLastError();
+ } else {
+ const DWORD priority_class = GetPriorityClass(GetCurrentProcess());
+ const int priority = GetThreadPriority(GetCurrentThread());
+ RTC_DLOG(LS_INFO) << "priority class: "
+ << PriorityClassToString(priority_class) << "("
+ << priority_class << ")";
+ RTC_DLOG(LS_INFO) << "priority: " << PriorityToString(priority) << "("
+ << priority << ")";
+ }
+ }
+
+ ~ScopedMMCSSRegistration() {
+ if (Succeeded()) {
+ // Deregister with MMCSS.
+ RTC_DLOG(LS_INFO) << "~ScopedMMCSSRegistration";
+ AvRevertMmThreadCharacteristics(mmcss_handle_);
+ }
+ }
+
+ ScopedMMCSSRegistration(const ScopedMMCSSRegistration&) = delete;
+ ScopedMMCSSRegistration& operator=(const ScopedMMCSSRegistration&) = delete;
+
+ bool Succeeded() const { return mmcss_handle_ != nullptr; }
+
+ private:
+ HANDLE mmcss_handle_ = nullptr;
+};
+
+// A PROPVARIANT that is automatically initialized and cleared upon respective
+// construction and destruction of this class.
+class ScopedPropVariant {
+ public:
+ ScopedPropVariant() { PropVariantInit(&pv_); }
+
+ ~ScopedPropVariant() { Reset(); }
+
+ ScopedPropVariant(const ScopedPropVariant&) = delete;
+ ScopedPropVariant& operator=(const ScopedPropVariant&) = delete;
+ bool operator==(const ScopedPropVariant&) const = delete;
+ bool operator!=(const ScopedPropVariant&) const = delete;
+
+ // Returns a pointer to the underlying PROPVARIANT for use as an out param in
+ // a function call.
+ PROPVARIANT* Receive() {
+ RTC_DCHECK_EQ(pv_.vt, VT_EMPTY);
+ return &pv_;
+ }
+
+ // Clears the instance to prepare it for re-use (e.g., via Receive).
+ void Reset() {
+ if (pv_.vt != VT_EMPTY) {
+ HRESULT result = PropVariantClear(&pv_);
+ RTC_DCHECK_EQ(result, S_OK);
+ }
+ }
+
+ const PROPVARIANT& get() const { return pv_; }
+ const PROPVARIANT* ptr() const { return &pv_; }
+
+ private:
+ PROPVARIANT pv_;
+};
+
+// Simple scoped memory releaser class for COM allocated memory.
+template <typename T>
+class ScopedCoMem {
+ public:
+ ScopedCoMem() : mem_ptr_(nullptr) {}
+
+ ~ScopedCoMem() { Reset(nullptr); }
+
+ ScopedCoMem(const ScopedCoMem&) = delete;
+ ScopedCoMem& operator=(const ScopedCoMem&) = delete;
+
+ T** operator&() { // NOLINT
+ RTC_DCHECK(mem_ptr_ == nullptr); // To catch memory leaks.
+ return &mem_ptr_;
+ }
+
+ operator T*() { return mem_ptr_; }
+
+ T* operator->() {
+ RTC_DCHECK(mem_ptr_ != nullptr);
+ return mem_ptr_;
+ }
+
+ const T* operator->() const {
+ RTC_DCHECK(mem_ptr_ != nullptr);
+ return mem_ptr_;
+ }
+
+ explicit operator bool() const { return mem_ptr_; }
+
+ friend bool operator==(const ScopedCoMem& lhs, std::nullptr_t) {
+ return lhs.Get() == nullptr;
+ }
+
+ friend bool operator==(std::nullptr_t, const ScopedCoMem& rhs) {
+ return rhs.Get() == nullptr;
+ }
+
+ friend bool operator!=(const ScopedCoMem& lhs, std::nullptr_t) {
+ return lhs.Get() != nullptr;
+ }
+
+ friend bool operator!=(std::nullptr_t, const ScopedCoMem& rhs) {
+ return rhs.Get() != nullptr;
+ }
+
+ void Reset(T* ptr) {
+ if (mem_ptr_)
+ CoTaskMemFree(mem_ptr_);
+ mem_ptr_ = ptr;
+ }
+
+ T* Get() const { return mem_ptr_; }
+
+ private:
+ T* mem_ptr_;
+};
+
+// A HANDLE that is automatically initialized and closed upon respective
+// construction and destruction of this class.
+class ScopedHandle {
+ public:
+ ScopedHandle() : handle_(nullptr) {}
+ explicit ScopedHandle(HANDLE h) : handle_(nullptr) { Set(h); }
+
+ ~ScopedHandle() { Close(); }
+
+ ScopedHandle& operator=(const ScopedHandle&) = delete;
+ bool operator==(const ScopedHandle&) const = delete;
+ bool operator!=(const ScopedHandle&) const = delete;
+
+ // Use this instead of comparing to INVALID_HANDLE_VALUE.
+ bool IsValid() const { return handle_ != nullptr; }
+
+ void Set(HANDLE new_handle) {
+ Close();
+ // Windows is inconsistent about invalid handles.
+ // See https://blogs.msdn.microsoft.com/oldnewthing/20040302-00/?p=40443
+ // for details.
+ if (new_handle != INVALID_HANDLE_VALUE) {
+ handle_ = new_handle;
+ }
+ }
+
+ HANDLE Get() const { return handle_; }
+
+ operator HANDLE() const { return handle_; }
+
+ void Close() {
+ if (handle_) {
+ if (!::CloseHandle(handle_)) {
+ RTC_DCHECK_NOTREACHED();
+ }
+ handle_ = nullptr;
+ }
+ }
+
+ private:
+ HANDLE handle_;
+};
+
+// Utility methods for the Core Audio API on Windows.
+// Always ensure that Core Audio is supported before using these methods.
+// Use webrtc_win::core_audio_utility::IsSupported() for this purpose.
+// Also, all methods must be called on a valid COM thread. This can be done
+// by using the ScopedCOMInitializer helper class.
+// These methods are based on media::CoreAudioUtil in Chrome.
+namespace core_audio_utility {
+
+// Helper class which automates casting between WAVEFORMATEX and
+// WAVEFORMATEXTENSIBLE raw pointers using implicit constructors and
+// operator overloading. Note that, no memory is allocated by this utility
+// structure. It only serves as a handle (or a wrapper) of the structure
+// provided to it at construction.
+class WaveFormatWrapper {
+ public:
+ WaveFormatWrapper(WAVEFORMATEXTENSIBLE* p)
+ : ptr_(reinterpret_cast<WAVEFORMATEX*>(p)) {}
+ WaveFormatWrapper(WAVEFORMATEX* p) : ptr_(p) {}
+ ~WaveFormatWrapper() = default;
+
+ operator WAVEFORMATEX*() const { return ptr_; }
+ WAVEFORMATEX* operator->() const { return ptr_; }
+ WAVEFORMATEX* get() const { return ptr_; }
+ WAVEFORMATEXTENSIBLE* GetExtensible() const;
+
+ bool IsExtensible() const;
+ bool IsPcm() const;
+ bool IsFloat() const;
+ size_t size() const;
+
+ private:
+ WAVEFORMATEX* ptr_;
+};
+
+// Returns true if Windows Core Audio is supported.
+// Always verify that this method returns true before using any of the
+// other methods in this class.
+bool IsSupported();
+
+// Returns true if Multimedia Class Scheduler service (MMCSS) is supported.
+// The MMCSS enables multimedia applications to ensure that their time-sensitive
+// processing receives prioritized access to CPU resources without denying CPU
+// resources to lower-priority applications.
+bool IsMMCSSSupported();
+
+// The MMDevice API lets clients discover the audio endpoint devices in the
+// system and determine which devices are suitable for the application to use.
+// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
+
+// Number of active audio devices in the specified data flow direction.
+// Set `data_flow` to eAll to retrieve the total number of active audio
+// devices.
+int NumberOfActiveDevices(EDataFlow data_flow);
+
+// Returns 1, 2, or 3 depending on what version of IAudioClient the platform
+// supports.
+// Example: IAudioClient2 is supported on Windows 8 and higher => 2 is returned.
+uint32_t GetAudioClientVersion();
+
+// Creates an IMMDeviceEnumerator interface which provides methods for
+// enumerating audio endpoint devices.
+// TODO(henrika): IMMDeviceEnumerator::RegisterEndpointNotificationCallback.
+Microsoft::WRL::ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
+
+// These functions return the unique device id of the default or
+// communications input/output device, or an empty string if no such device
+// exists or if the device has been disabled.
+std::string GetDefaultInputDeviceID();
+std::string GetDefaultOutputDeviceID();
+std::string GetCommunicationsInputDeviceID();
+std::string GetCommunicationsOutputDeviceID();
+
+// Creates an IMMDevice interface corresponding to the unique device id in
+// `device_id`, or by data-flow direction and role if `device_id` is set to
+// AudioDeviceName::kDefaultDeviceId.
+Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+
+// Returns the unique ID and user-friendly name of a given endpoint device.
+// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and
+// "Microphone (Realtek High Definition Audio)".
+webrtc::AudioDeviceName GetDeviceName(IMMDevice* device);
+
+// Gets the user-friendly name of the endpoint device which is represented
+// by a unique id in `device_id`, or by data-flow direction and role if
+// `device_id` is set to AudioDeviceName::kDefaultDeviceId.
+std::string GetFriendlyName(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+
+// Query if the audio device is a rendering device or a capture device.
+EDataFlow GetDataFlow(IMMDevice* device);
+
+// Enumerates all input devices and adds the names (friendly name and unique
+// device id) to the list in `device_names`.
+bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names);
+
+// Enumerates all output devices and adds the names (friendly name and unique
+// device id) to the list in `device_names`.
+bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
+
+// The Windows Audio Session API (WASAPI) enables client applications to
+// manage the flow of audio data between the application and an audio endpoint
+// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI
+// interfaces.
+
+// Creates an IAudioSessionManager2 interface for the specified `device`.
+// This interface provides access to e.g. the IAudioSessionEnumerator
+Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2(
+ IMMDevice* device);
+
+// Creates an IAudioSessionEnumerator interface for the specified `device`.
+// The client can use the interface to enumerate audio sessions on the audio
+// device
+Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
+ IMMDevice* device);
+
+// Number of active audio sessions for the given `device`. Expired or inactive
+// sessions are not included.
+int NumberOfActiveSessions(IMMDevice* device);
+
+// Creates an IAudioClient instance for a specific device or the default
+// device specified by data-flow direction and role.
+Microsoft::WRL::ComPtr<IAudioClient> CreateClient(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+Microsoft::WRL::ComPtr<IAudioClient2> CreateClient2(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+Microsoft::WRL::ComPtr<IAudioClient3> CreateClient3(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+
+// Sets the AudioCategory_Communications category. Should be called before
+// GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must
+// be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported
+// on Windows 8 and above.
+// TODO(henrika): evaluate effect (if any).
+HRESULT SetClientProperties(IAudioClient2* client);
+
+// Returns the buffer size limits of the hardware audio engine in
+// 100-nanosecond units given a specified `format`. Does not require prior
+// audio stream initialization. The `client` argument must be an IAudioClient2
+// or IAudioClient3 interface pointer, hence only supported on Windows 8 and
+// above.
+// TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY.
+HRESULT GetBufferSizeLimits(IAudioClient2* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ REFERENCE_TIME* min_buffer_duration,
+ REFERENCE_TIME* max_buffer_duration);
+
+// Get the mix format that the audio engine uses internally for processing
+// of shared-mode streams. The client can call this method before calling
+// IAudioClient::Initialize. When creating a shared-mode stream for an audio
+// endpoint device, the Initialize method always accepts the stream format
+// obtained by this method.
+HRESULT GetSharedModeMixFormat(IAudioClient* client,
+ WAVEFORMATEXTENSIBLE* format);
+
+// Returns true if the specified `client` supports the format in `format`
+// for the given `share_mode` (shared or exclusive). The client can call this
+// method before calling IAudioClient::Initialize.
+bool IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATEXTENSIBLE* format);
+
+// For a shared-mode stream, the audio engine periodically processes the
+// data in the endpoint buffer at the period obtained in `device_period`.
+// For an exclusive mode stream, `device_period` corresponds to the minimum
+// time interval between successive processing by the endpoint device.
+// This period plus the stream latency between the buffer and endpoint device
+// represents the minimum possible latency that an audio application can
+// achieve. The time in `device_period` is expressed in 100-nanosecond units.
+HRESULT GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period);
+
+// Returns the range of periodicities supported by the engine for the specified
+// stream `format`. The periodicity of the engine is the rate at which the
+// engine wakes an event-driven audio client to transfer audio data to or from
+// the engine. Can be used for low-latency support on some devices.
+// The `client` argument must be an IAudioClient3 interface pointer, hence only
+// supported on Windows 10 and above.
+HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
+ const WAVEFORMATEXTENSIBLE* format,
+ uint32_t* default_period_in_frames,
+ uint32_t* fundamental_period_in_frames,
+ uint32_t* min_period_in_frames,
+ uint32_t* max_period_in_frames);
+
+// Get the preferred audio parameters for the given `client` corresponding to
+// the stream format that the audio engine uses for its internal processing of
+// shared-mode streams. The acquired values should only be utilized for shared
+// mode streamed since there are no preferred settings for an exclusive mode
+// stream.
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ webrtc::AudioParameters* params);
+// As above but override the preferred sample rate and use `sample_rate`
+// instead. Intended mainly for testing purposes and in combination with rate
+// conversion.
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ webrtc::AudioParameters* params,
+ uint32_t sample_rate);
+
+// After activating an IAudioClient interface on an audio endpoint device,
+// the client must initialize it once, and only once, to initialize the audio
+// stream between the client and the device. In shared mode, the client
+// connects indirectly through the audio engine which does the mixing.
+// If a valid event is provided in `event_handle`, the client will be
+// initialized for event-driven buffer handling. If `event_handle` is set to
+// nullptr, event-driven buffer handling is not utilized. To achieve the
+// minimum stream latency between the client application and audio endpoint
+// device, set `buffer_duration` to 0. A client has the option of requesting a
+// buffer size that is larger than what is strictly necessary to make timing
+// glitches rare or nonexistent. Increasing the buffer size does not necessarily
+// increase the stream latency. Each unit of reference time is 100 nanoseconds.
+// The `auto_convert_pcm` parameter can be used for testing purposes to ensure
+// that the sample rate of the client side does not have to match the audio
+// engine mix format. If `auto_convert_pcm` is set to true, a rate converter
+// will be inserted to convert between the sample rate in `format` and the
+// preferred rate given by GetPreferredAudioParameters().
+// The output parameter `endpoint_buffer_size` contains the size of the
+// endpoint buffer and it is expressed as the number of audio frames the
+// buffer can hold.
+HRESULT SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ REFERENCE_TIME buffer_duration,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size);
+
+// Works as SharedModeInitialize() but adds support for using smaller engine
+// periods than the default period.
+// The `client` argument must be an IAudioClient3 interface pointer, hence only
+// supported on Windows 10 and above.
+// TODO(henrika): can probably be merged into SharedModeInitialize() to avoid
+// duplicating code. Keeping as separate method for now until decided if we
+// need low-latency support.
+HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ uint32_t period_in_frames,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size);
+
+// Creates an IAudioRenderClient client for an existing IAudioClient given by
+// `client`. The IAudioRenderClient interface enables a client to write
+// output data to a rendering endpoint buffer. The methods in this interface
+// manage the movement of data packets that contain audio-rendering data.
+Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient(
+ IAudioClient* client);
+
+// Creates an IAudioCaptureClient client for an existing IAudioClient given by
+// `client`. The IAudioCaptureClient interface enables a client to read
+// input data from a capture endpoint buffer. The methods in this interface
+// manage the movement of data packets that contain capture data.
+Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient(
+ IAudioClient* client);
+
+// Creates an IAudioClock interface for an existing IAudioClient given by
+// `client`. The IAudioClock interface enables a client to monitor a stream's
+// data rate and the current position in the stream.
+Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client);
+
+// Creates an AudioSessionControl interface for an existing IAudioClient given
+// by `client`. The IAudioControl interface enables a client to configure the
+// control parameters for an audio session and to monitor events in the session.
+Microsoft::WRL::ComPtr<IAudioSessionControl> CreateAudioSessionControl(
+ IAudioClient* client);
+
+// Creates an ISimpleAudioVolume interface for an existing IAudioClient given by
+// `client`. This interface enables a client to control the master volume level
+// of an active audio session.
+Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(
+ IAudioClient* client);
+
+// Fills up the endpoint rendering buffer with silence for an existing
+// IAudioClient given by `client` and a corresponding IAudioRenderClient
+// given by `render_client`.
+bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
+ IAudioRenderClient* render_client);
+
+// Prints/logs all fields of the format structure in `format`.
+// Also supports extended versions (WAVEFORMATEXTENSIBLE).
+std::string WaveFormatToString(WaveFormatWrapper format);
+
+// Converts Windows internal REFERENCE_TIME (100 nanosecond units) into
+// generic webrtc::TimeDelta which then can be converted to any time unit.
+webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time);
+
+// Converts size expressed in number of audio frames, `num_frames`, into
+// milliseconds given a specified `sample_rate`.
+double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate);
+
+// Converts a COM error into a human-readable string.
+std::string ErrorToString(const _com_error& error);
+
+} // namespace core_audio_utility
+} // namespace webrtc_win
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc
new file mode 100644
index 0000000000..277f54eb35
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc
@@ -0,0 +1,876 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_utility_win.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "rtc_base/win/windows_version.h"
+#include "test/gtest.h"
+
+using Microsoft::WRL::ComPtr;
+using webrtc::AudioDeviceName;
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+#define ABORT_TEST_IF_NOT(requirements_satisfied) \
+ do { \
+ bool fail = false; \
+ if (ShouldAbortTest(requirements_satisfied, #requirements_satisfied, \
+ &fail)) { \
+ if (fail) \
+ FAIL(); \
+ else \
+ return; \
+ } \
+ } while (false)
+
+bool ShouldAbortTest(bool requirements_satisfied,
+ const char* requirements_expression,
+ bool* should_fail) {
+ if (!requirements_satisfied) {
+ RTC_LOG(LS_ERROR) << "Requirement(s) not satisfied ("
+ << requirements_expression << ")";
+ // TODO(henrika): improve hard-coded condition to determine if test should
+ // fail or be ignored. Could use e.g. a command-line argument here to
+ // determine if the test should fail or not.
+ *should_fail = false;
+ return true;
+ }
+ *should_fail = false;
+ return false;
+}
+
+} // namespace
+
+// CoreAudioUtilityWinTest test fixture.
+class CoreAudioUtilityWinTest : public ::testing::Test {
+ protected:
+ CoreAudioUtilityWinTest() : com_init_(ScopedCOMInitializer::kMTA) {
+ // We must initialize the COM library on a thread before we calling any of
+ // the library functions. All COM functions will return CO_E_NOTINITIALIZED
+ // otherwise.
+ EXPECT_TRUE(com_init_.Succeeded());
+
+ // Configure logging.
+ rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+ rtc::LogMessage::LogTimestamps();
+ rtc::LogMessage::LogThreads();
+ }
+
+ virtual ~CoreAudioUtilityWinTest() {}
+
+ bool DevicesAvailable() {
+ return core_audio_utility::IsSupported() &&
+ core_audio_utility::NumberOfActiveDevices(eCapture) > 0 &&
+ core_audio_utility::NumberOfActiveDevices(eRender) > 0;
+ }
+
+ private:
+ ScopedCOMInitializer com_init_;
+};
+
+TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapper) {
+ // Use default constructor for WAVEFORMATEX and verify its size.
+ WAVEFORMATEX format = {};
+ core_audio_utility::WaveFormatWrapper wave_format(&format);
+ EXPECT_FALSE(wave_format.IsExtensible());
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format->cbSize, 0);
+
+ // Ensure that the stand-alone WAVEFORMATEX structure has a valid format tag
+ // and that all accessors work.
+ format.wFormatTag = WAVE_FORMAT_PCM;
+ EXPECT_FALSE(wave_format.IsExtensible());
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format.get()->wFormatTag, WAVE_FORMAT_PCM);
+ EXPECT_EQ(wave_format->wFormatTag, WAVE_FORMAT_PCM);
+
+ // Next, ensure that the size is valid. Stand-alone is not extended.
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+
+ // Verify format types for the stand-alone version.
+ EXPECT_TRUE(wave_format.IsPcm());
+ EXPECT_FALSE(wave_format.IsFloat());
+ format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
+ EXPECT_TRUE(wave_format.IsFloat());
+}
+
+TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) {
+ // Use default constructor for WAVEFORMATEXTENSIBLE and verify that it
+ // results in same size as for WAVEFORMATEX even if the size of `format_ex`
+ // equals the size of WAVEFORMATEXTENSIBLE.
+ WAVEFORMATEXTENSIBLE format_ex = {};
+ core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex);
+ EXPECT_FALSE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format_ex->cbSize, 0);
+
+ // Ensure that the extended structure has a valid format tag and that all
+ // accessors work.
+ format_ex.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ EXPECT_FALSE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format_ex->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ EXPECT_EQ(wave_format_ex.get()->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+
+ // Next, ensure that the size is valid (sum of stand-alone and extended).
+ // Now the structure qualifies as extended.
+ format_ex.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ EXPECT_TRUE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEXTENSIBLE));
+ EXPECT_TRUE(wave_format_ex.GetExtensible());
+ EXPECT_EQ(wave_format_ex.GetExtensible()->Format.wFormatTag,
+ WAVE_FORMAT_EXTENSIBLE);
+
+ // Verify format types for the extended version.
+ EXPECT_FALSE(wave_format_ex.IsPcm());
+ format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ EXPECT_TRUE(wave_format_ex.IsPcm());
+ EXPECT_FALSE(wave_format_ex.IsFloat());
+ format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ EXPECT_TRUE(wave_format_ex.IsFloat());
+}
+
+TEST_F(CoreAudioUtilityWinTest, NumberOfActiveDevices) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ int render_devices = core_audio_utility::NumberOfActiveDevices(eRender);
+ EXPECT_GT(render_devices, 0);
+ int capture_devices = core_audio_utility::NumberOfActiveDevices(eCapture);
+ EXPECT_GT(capture_devices, 0);
+ int total_devices = core_audio_utility::NumberOfActiveDevices(eAll);
+ EXPECT_EQ(total_devices, render_devices + capture_devices);
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetAudioClientVersion) {
+ uint32_t client_version = core_audio_utility::GetAudioClientVersion();
+ EXPECT_GE(client_version, 1u);
+ EXPECT_LE(client_version, 3u);
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateDeviceEnumerator) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ ComPtr<IMMDeviceEnumerator> enumerator =
+ core_audio_utility::CreateDeviceEnumerator();
+ EXPECT_TRUE(enumerator.Get());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDefaultInputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id = core_audio_utility::GetDefaultInputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDefaultOutputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id =
+ core_audio_utility::GetDefaultOutputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetCommunicationsInputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id =
+ core_audio_utility::GetCommunicationsInputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetCommunicationsOutputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id =
+ core_audio_utility::GetCommunicationsOutputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateDefaultDevice) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {{eRender, eConsole}, {eRender, eCommunications},
+ {eRender, eMultimedia}, {eCapture, eConsole},
+ {eCapture, eCommunications}, {eCapture, eMultimedia}};
+
+ // Create default devices for all flow/role combinations above.
+ ComPtr<IMMDevice> audio_device;
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role);
+ EXPECT_TRUE(audio_device.Get());
+ EXPECT_EQ(data[i].flow,
+ core_audio_utility::GetDataFlow(audio_device.Get()));
+ }
+
+ // Only eRender and eCapture are allowed as flow parameter.
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eAll, eConsole);
+ EXPECT_FALSE(audio_device.Get());
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateDevice) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Get name and ID of default device used for playback.
+ ComPtr<IMMDevice> default_render_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ AudioDeviceName default_render_name =
+ core_audio_utility::GetDeviceName(default_render_device.Get());
+ EXPECT_TRUE(default_render_name.IsValid());
+
+ // Use the unique ID as input to CreateDevice() and create a corresponding
+ // IMMDevice. The data-flow direction and role parameters are ignored for
+ // this scenario.
+ ComPtr<IMMDevice> audio_device = core_audio_utility::CreateDevice(
+ default_render_name.unique_id, EDataFlow(), ERole());
+ EXPECT_TRUE(audio_device.Get());
+
+ // Verify that the two IMMDevice interfaces represents the same endpoint
+ // by comparing their unique IDs.
+ AudioDeviceName device_name =
+ core_audio_utility::GetDeviceName(audio_device.Get());
+ EXPECT_EQ(default_render_name.unique_id, device_name.unique_id);
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDefaultDeviceName) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {{eRender, eConsole},
+ {eRender, eCommunications},
+ {eCapture, eConsole},
+ {eCapture, eCommunications}};
+
+ // Get name and ID of default devices for all flow/role combinations above.
+ ComPtr<IMMDevice> audio_device;
+ AudioDeviceName device_name;
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role);
+ device_name = core_audio_utility::GetDeviceName(audio_device.Get());
+ EXPECT_TRUE(device_name.IsValid());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetFriendlyName) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Get name and ID of default device used for recording.
+ ComPtr<IMMDevice> audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eCapture, eConsole);
+ AudioDeviceName device_name =
+ core_audio_utility::GetDeviceName(audio_device.Get());
+ EXPECT_TRUE(device_name.IsValid());
+
+ // Use unique ID as input to GetFriendlyName() and compare the result
+ // with the already obtained friendly name for the default capture device.
+ std::string friendly_name = core_audio_utility::GetFriendlyName(
+ device_name.unique_id, eCapture, eConsole);
+ EXPECT_EQ(friendly_name, device_name.device_name);
+
+ // Same test as above but for playback.
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ device_name = core_audio_utility::GetDeviceName(audio_device.Get());
+ friendly_name = core_audio_utility::GetFriendlyName(device_name.unique_id,
+ eRender, eConsole);
+ EXPECT_EQ(friendly_name, device_name.device_name);
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetInputDeviceNames) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ webrtc::AudioDeviceNames device_names;
+ EXPECT_TRUE(core_audio_utility::GetInputDeviceNames(&device_names));
+ // Number of elements in the list should be two more than the number of
+ // active devices since we always add default and default communication
+ // devices on index 0 and 1.
+ EXPECT_EQ(static_cast<int>(device_names.size()),
+ 2 + core_audio_utility::NumberOfActiveDevices(eCapture));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetOutputDeviceNames) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ webrtc::AudioDeviceNames device_names;
+ EXPECT_TRUE(core_audio_utility::GetOutputDeviceNames(&device_names));
+ // Number of elements in the list should be two more than the number of
+ // active devices since we always add default and default communication
+ // devices on index 0 and 1.
+ EXPECT_EQ(static_cast<int>(device_names.size()),
+ 2 + core_audio_utility::NumberOfActiveDevices(eRender));
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateSessionManager2) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioSessionManager2 interface for a default audio
+ // endpoint device specified by two different data flows and the `eConsole`
+ // role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
+ EXPECT_TRUE(device.Get());
+ ComPtr<IAudioSessionManager2> session_manager =
+ core_audio_utility::CreateSessionManager2(device.Get());
+ EXPECT_TRUE(session_manager.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateSessionEnumerator) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioSessionEnumerator interface for a default
+ // audio endpoint device specified by two different data flows and the
+ // `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
+ EXPECT_TRUE(device.Get());
+ ComPtr<IAudioSessionEnumerator> session_enumerator =
+ core_audio_utility::CreateSessionEnumerator(device.Get());
+ EXPECT_TRUE(session_enumerator.Get());
+
+ // Perform a sanity test of the interface by asking for the total number
+ // of audio sessions that are open on the audio device. Note that, we do
+ // not check if the session is active or not.
+ int session_count = 0;
+ EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&session_count)));
+ EXPECT_GE(session_count, 0);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, NumberOfActiveSessions) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Count number of active audio session for a default audio endpoint device
+ // specified by two different data flows and the `eConsole` role.
+ // Ensure that the number of active audio sessions is less than or equal to
+ // the total number of audio sessions on that same device.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ // Create an audio endpoint device.
+ ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
+ EXPECT_TRUE(device.Get());
+
+ // Ask for total number of audio sessions on the created device.
+ ComPtr<IAudioSessionEnumerator> session_enumerator =
+ core_audio_utility::CreateSessionEnumerator(device.Get());
+ EXPECT_TRUE(session_enumerator.Get());
+ int total_session_count = 0;
+ EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&total_session_count)));
+ EXPECT_GE(total_session_count, 0);
+
+ // Use NumberOfActiveSessions and get number of active audio sessions.
+ int active_session_count =
+ core_audio_utility::NumberOfActiveSessions(device.Get());
+ EXPECT_LE(active_session_count, total_session_count);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateClient) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioClient interface for a default audio endpoint
+ // device specified by two different data flows and the `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateClient2) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 2);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioClient2 interface for a default audio endpoint
+ // device specified by two different data flows and the `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
+ EXPECT_TRUE(client2.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateClient3) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 3);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioClient3 interface for a default audio endpoint
+ // device specified by two different data flows and the `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
+ EXPECT_TRUE(client3.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, SetClientProperties) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 2);
+
+ ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client2.Get());
+ EXPECT_TRUE(
+ SUCCEEDED(core_audio_utility::SetClientProperties(client2.Get())));
+
+ ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client3.Get());
+ EXPECT_TRUE(
+ SUCCEEDED(core_audio_utility::SetClientProperties(client3.Get())));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetSharedModeEnginePeriod) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 3);
+
+ ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client3.Get());
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client3.Get(), &format)));
+
+ uint32_t default_period = 0;
+ uint32_t fundamental_period = 0;
+ uint32_t min_period = 0;
+ uint32_t max_period = 0;
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetSharedModeEnginePeriod(
+ client3.Get(), &format, &default_period, &fundamental_period, &min_period,
+ &max_period)));
+}
+
+// TODO(henrika): figure out why usage of this API always reports
+// AUDCLNT_E_OFFLOAD_MODE_ONLY.
+TEST_F(CoreAudioUtilityWinTest, DISABLED_GetBufferSizeLimits) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 2);
+
+ ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client2.Get());
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client2.Get(), &format)));
+
+ REFERENCE_TIME min_buffer_duration = 0;
+ REFERENCE_TIME max_buffer_duration = 0;
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetBufferSizeLimits(
+ client2.Get(), &format, &min_buffer_duration, &max_buffer_duration)));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetSharedModeMixFormat) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+
+ // Perform a simple sanity test of the acquired format structure.
+ WAVEFORMATEXTENSIBLE format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ core_audio_utility::WaveFormatWrapper wformat(&format);
+ EXPECT_GE(wformat->nChannels, 1);
+ EXPECT_GE(wformat->nSamplesPerSec, 8000u);
+ EXPECT_GE(wformat->wBitsPerSample, 16);
+ if (wformat.IsExtensible()) {
+ EXPECT_EQ(wformat->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ EXPECT_GE(wformat->cbSize, 22);
+ EXPECT_GE(wformat.GetExtensible()->Samples.wValidBitsPerSample, 16);
+ } else {
+ EXPECT_EQ(wformat->cbSize, 0);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, IsFormatSupported) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Create a default render client.
+ ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+
+ // Get the default, shared mode, mixing format.
+ WAVEFORMATEXTENSIBLE format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // In shared mode, the audio engine always supports the mix format.
+ EXPECT_TRUE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+
+ // Use an invalid format and verify that it is not supported.
+ format.Format.nSamplesPerSec += 1;
+ EXPECT_FALSE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Verify that the device periods are valid for the default render and
+ // capture devices.
+ ComPtr<IAudioClient> client;
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ REFERENCE_TIME shared_time_period = 0;
+ REFERENCE_TIME exclusive_time_period = 0;
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
+ EXPECT_GT(shared_time_period, 0);
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
+ client.Get(), AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
+ EXPECT_GT(exclusive_time_period, 0);
+ EXPECT_LE(exclusive_time_period, shared_time_period);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetPreferredAudioParameters) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {{eRender, eConsole},
+ {eRender, eCommunications},
+ {eCapture, eConsole},
+ {eCapture, eCommunications}};
+
+ // Verify that the preferred audio parameters are OK for all flow/role
+ // combinations above.
+ ComPtr<IAudioClient> client;
+ webrtc::AudioParameters params;
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data[i].flow, data[i].role);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
+ client.Get(), &params)));
+ EXPECT_TRUE(params.is_valid());
+ EXPECT_TRUE(params.is_complete());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, SharedModeInitialize) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ ComPtr<IAudioClient> client;
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // Perform a shared-mode initialization without event-driven buffer handling.
+ uint32_t endpoint_buffer_size = 0;
+ HRESULT hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // It is only possible to create a client once.
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_FALSE(SUCCEEDED(hr));
+ EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
+
+ // Verify that it is possible to reinitialize the client after releasing it
+ // and then creating a new client.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use a non-supported format and verify that initialization fails.
+ // A simple way to emulate an invalid format is to use the shared-mode
+ // mixing format and modify the preferred sample rate.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+ format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
+ EXPECT_FALSE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(FAILED(hr));
+ EXPECT_EQ(hr, E_INVALIDARG);
+
+ // Finally, perform a shared-mode initialization using event-driven buffer
+ // handling. The event handle will be signaled when an audio buffer is ready
+ // to be processed by the client (not verified here). The event handle should
+ // be in the non-signaled state.
+ ScopedHandle event_handle(::CreateEvent(nullptr, TRUE, FALSE, nullptr));
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ EXPECT_TRUE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, event_handle, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // TODO(henrika): possibly add test for signature which overrides the default
+ // sample rate.
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<IAudioRenderClient> render_client;
+ ComPtr<IAudioCaptureClient> capture_client;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ if (data_flow[i] == eRender) {
+ // It is not possible to create a render client using an unitialized
+ // client interface.
+ render_client = core_audio_utility::CreateRenderClient(client.Get());
+ EXPECT_FALSE(render_client.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
+ 0, false, &endpoint_buffer_size);
+ render_client = core_audio_utility::CreateRenderClient(client.Get());
+ EXPECT_TRUE(render_client.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ } else if (data_flow[i] == eCapture) {
+ // It is not possible to create a capture client using an unitialized
+ // client interface.
+ capture_client = core_audio_utility::CreateCaptureClient(client.Get());
+ EXPECT_FALSE(capture_client.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
+ 0, false, &endpoint_buffer_size);
+ capture_client = core_audio_utility::CreateCaptureClient(client.Get());
+ EXPECT_TRUE(capture_client.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ }
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateAudioClock) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<IAudioClock> audio_clock;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // It is not possible to create an audio clock using an unitialized client
+ // interface.
+ audio_clock = core_audio_utility::CreateAudioClock(client.Get());
+ EXPECT_FALSE(audio_clock.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ audio_clock = core_audio_utility::CreateAudioClock(client.Get());
+ EXPECT_TRUE(audio_clock.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use the audio clock and verify that querying the device frequency works.
+ UINT64 frequency = 0;
+ EXPECT_TRUE(SUCCEEDED(audio_clock->GetFrequency(&frequency)));
+ EXPECT_GT(frequency, 0u);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateAudioSessionControl) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<IAudioSessionControl> audio_session_control;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // It is not possible to create an audio session control using an
+ // unitialized client interface.
+ audio_session_control =
+ core_audio_utility::CreateAudioSessionControl(client.Get());
+ EXPECT_FALSE(audio_session_control.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ audio_session_control =
+ core_audio_utility::CreateAudioSessionControl(client.Get());
+ EXPECT_TRUE(audio_session_control.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use the audio session control and verify that the session state can be
+ // queried. When a client opens a session by assigning the first stream to
+ // the session (by calling the IAudioClient::Initialize method), the initial
+ // session state is inactive. The session state changes from inactive to
+ // active when a stream in the session begins running (because the client
+ // has called the IAudioClient::Start method).
+ AudioSessionState state;
+ EXPECT_TRUE(SUCCEEDED(audio_session_control->GetState(&state)));
+ EXPECT_EQ(state, AudioSessionStateInactive);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateSimpleAudioVolume) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<ISimpleAudioVolume> simple_audio_volume;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // It is not possible to create an audio volume using an uninitialized
+ // client interface.
+ simple_audio_volume =
+ core_audio_utility::CreateSimpleAudioVolume(client.Get());
+ EXPECT_FALSE(simple_audio_volume.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ simple_audio_volume =
+ core_audio_utility::CreateSimpleAudioVolume(client.Get());
+ EXPECT_TRUE(simple_audio_volume.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use the audio volume interface and validate that it works. The volume
+ // level should be value in the range 0.0 to 1.0 at first call.
+ float volume = 0.0;
+ EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume)));
+ EXPECT_GE(volume, 0.0);
+ EXPECT_LE(volume, 1.0);
+
+ // Next, set a new volume and verify that the setter does its job.
+ const float target_volume = 0.5;
+ EXPECT_TRUE(SUCCEEDED(
+ simple_audio_volume->SetMasterVolume(target_volume, nullptr)));
+ EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume)));
+ EXPECT_EQ(volume, target_volume);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, FillRenderEndpointBufferWithSilence) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Create default clients using the default mixing format for shared mode.
+ ComPtr<IAudioClient> client(core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
+ EXPECT_TRUE(client.Get());
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ ComPtr<IAudioRenderClient> render_client(
+ core_audio_utility::CreateRenderClient(client.Get()));
+ EXPECT_TRUE(render_client.Get());
+
+ // The endpoint audio buffer should not be filled up by default after being
+ // created.
+ UINT32 num_queued_frames = 0;
+ client->GetCurrentPadding(&num_queued_frames);
+ EXPECT_EQ(num_queued_frames, 0u);
+
+ // Fill it up with zeros and verify that the buffer is full.
+ // It is not possible to verify that the actual data consists of zeros
+ // since we can't access data that has already been sent to the endpoint
+ // buffer.
+ EXPECT_TRUE(core_audio_utility::FillRenderEndpointBufferWithSilence(
+ client.Get(), render_client.Get()));
+ client->GetCurrentPadding(&num_queued_frames);
+ EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/BUILD.gn b/third_party/libwebrtc/modules/audio_mixer/BUILD.gn
new file mode 100644
index 0000000000..1196835fec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/BUILD.gn
@@ -0,0 +1,143 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+group("audio_mixer") {
+ deps = [
+ ":audio_frame_manipulator",
+ ":audio_mixer_impl",
+ ]
+}
+
+rtc_library("audio_mixer_impl") {
+ visibility = [ "*" ]
+ sources = [
+ "audio_mixer_impl.cc",
+ "audio_mixer_impl.h",
+ "default_output_rate_calculator.cc",
+ "default_output_rate_calculator.h",
+ "frame_combiner.cc",
+ "frame_combiner.h",
+ "output_rate_calculator.h",
+ ]
+
+ public = [
+ "audio_mixer_impl.h",
+ "default_output_rate_calculator.h", # For creating a mixer with limiter
+ # disabled.
+ "frame_combiner.h",
+ ]
+
+ configs += [ "../audio_processing:apm_debug_dump" ]
+
+ deps = [
+ ":audio_frame_manipulator",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio:audio_mixer_api",
+ "../../audio/utility:audio_frame_operations",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:race_checker",
+ "../../rtc_base:refcount",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ "../audio_processing:api",
+ "../audio_processing:apm_logging",
+ "../audio_processing:audio_frame_view",
+ "../audio_processing/agc2:fixed_digital",
+ ]
+}
+
+rtc_library("audio_frame_manipulator") {
+ visibility = [
+ ":*",
+ "../../modules:*",
+ ]
+
+ sources = [
+ "audio_frame_manipulator.cc",
+ "audio_frame_manipulator.h",
+ ]
+
+ deps = [
+ "../../api/audio:audio_frame_api",
+ "../../audio/utility:audio_frame_operations",
+ "../../rtc_base:checks",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("audio_mixer_test_utils") {
+ testonly = true
+
+ sources = [
+ "gain_change_calculator.cc",
+ "gain_change_calculator.h",
+ "sine_wave_generator.cc",
+ "sine_wave_generator.h",
+ ]
+
+ deps = [
+ ":audio_frame_manipulator",
+ ":audio_mixer_impl",
+ "../../api:array_view",
+ "../../api/audio:audio_frame_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ ]
+ }
+
+ rtc_library("audio_mixer_unittests") {
+ testonly = true
+
+ sources = [
+ "audio_frame_manipulator_unittest.cc",
+ "audio_mixer_impl_unittest.cc",
+ "frame_combiner_unittest.cc",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ deps = [
+ ":audio_frame_manipulator",
+ ":audio_mixer_impl",
+ ":audio_mixer_test_utils",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api/audio:audio_mixer_api",
+ "../../api/units:timestamp",
+ "../../audio/utility:audio_frame_operations",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../test:test_support",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_executable("audio_mixer_test") {
+ testonly = true
+ sources = [ "audio_mixer_test.cc" ]
+
+ deps = [
+ ":audio_mixer_impl",
+ "../../api/audio:audio_mixer_api",
+ "../../common_audio",
+ "../../rtc_base:stringutils",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_mixer/DEPS b/third_party/libwebrtc/modules/audio_mixer/DEPS
new file mode 100644
index 0000000000..46f29bccf8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/DEPS
@@ -0,0 +1,13 @@
+include_rules = [
+ "+audio/utility/audio_frame_operations.h",
+ "+audio/utility/channel_mixer.h",
+ "+call",
+ "+common_audio",
+ "+modules/audio_coding",
+ "+modules/audio_device",
+ "+modules/audio_processing",
+ "+modules/pacing",
+ "+modules/rtp_rtcp",
+ "+modules/utility",
+ "+system_wrappers",
+]
diff --git a/third_party/libwebrtc/modules/audio_mixer/OWNERS b/third_party/libwebrtc/modules/audio_mixer/OWNERS
new file mode 100644
index 0000000000..5edc304ab3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/OWNERS
@@ -0,0 +1,2 @@
+alessiob@webrtc.org
+henrik.lundin@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc
new file mode 100644
index 0000000000..3100271cfb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+
+#include "audio/utility/audio_frame_operations.h"
+#include "audio/utility/channel_mixer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame) {
+ if (audio_frame.muted()) {
+ return 0;
+ }
+
+ uint32_t energy = 0;
+ const int16_t* frame_data = audio_frame.data();
+ for (size_t position = 0;
+ position < audio_frame.samples_per_channel_ * audio_frame.num_channels_;
+ position++) {
+ // TODO(aleloi): This can overflow. Convert to floats.
+ energy += frame_data[position] * frame_data[position];
+ }
+ return energy;
+}
+
+void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame) {
+ RTC_DCHECK(audio_frame);
+ RTC_DCHECK_GE(start_gain, 0.0f);
+ RTC_DCHECK_GE(target_gain, 0.0f);
+ if (start_gain == target_gain || audio_frame->muted()) {
+ return;
+ }
+
+ size_t samples = audio_frame->samples_per_channel_;
+ RTC_DCHECK_LT(0, samples);
+ float increment = (target_gain - start_gain) / samples;
+ float gain = start_gain;
+ int16_t* frame_data = audio_frame->mutable_data();
+ for (size_t i = 0; i < samples; ++i) {
+ // If the audio is interleaved of several channels, we want to
+ // apply the same gain change to the ith sample of every channel.
+ for (size_t ch = 0; ch < audio_frame->num_channels_; ++ch) {
+ frame_data[audio_frame->num_channels_ * i + ch] *= gain;
+ }
+ gain += increment;
+ }
+}
+
+void RemixFrame(size_t target_number_of_channels, AudioFrame* frame) {
+ RTC_DCHECK_GE(target_number_of_channels, 1);
+ // TODO(bugs.webrtc.org/10783): take channel layout into account as well.
+ if (frame->num_channels() == target_number_of_channels) {
+ return;
+ }
+
+ // Use legacy components for the most simple cases (mono <-> stereo) to ensure
+ // that native WebRTC clients are not affected when support for multi-channel
+ // audio is added to Chrome.
+ // TODO(bugs.webrtc.org/10783): utilize channel mixer for mono/stereo as well.
+ if (target_number_of_channels < 3 && frame->num_channels() < 3) {
+ if (frame->num_channels() > target_number_of_channels) {
+ AudioFrameOperations::DownmixChannels(target_number_of_channels, frame);
+ } else {
+ AudioFrameOperations::UpmixChannels(target_number_of_channels, frame);
+ }
+ } else {
+ // Use generic channel mixer when the number of channels for input our
+ // output is larger than two. E.g. stereo -> 5.1 channel up-mixing.
+ // TODO(bugs.webrtc.org/10783): ensure that actual channel layouts are used
+ // instead of guessing based on number of channels.
+ const ChannelLayout output_layout(
+ GuessChannelLayout(target_number_of_channels));
+ ChannelMixer mixer(GuessChannelLayout(frame->num_channels()),
+ output_layout);
+ mixer.Transform(frame);
+ RTC_DCHECK_EQ(frame->channel_layout(), output_layout);
+ }
+ RTC_DCHECK_EQ(frame->num_channels(), target_number_of_channels)
+ << "Wrong number of channels, " << frame->num_channels() << " vs "
+ << target_number_of_channels;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h
new file mode 100644
index 0000000000..ab3633d266
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
+#define MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/audio/audio_frame.h"
+
+namespace webrtc {
+
+// Updates the audioFrame's energy (based on its samples).
+uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame);
+
+// Ramps up or down the provided audio frame. Ramp(0, 1, frame) will
+// linearly increase the samples in the frame from 0 to full volume.
+void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame);
+
+// Downmixes or upmixes a frame between stereo and mono.
+void RemixFrame(size_t target_number_of_channels, AudioFrame* frame);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build
new file mode 100644
index 0000000000..79dbb7b153
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_frame_manipulator_gn")
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc
new file mode 100644
index 0000000000..cfb3f2c230
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+
+#include <algorithm>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+void FillFrameWithConstants(size_t samples_per_channel,
+ size_t number_of_channels,
+ int16_t value,
+ AudioFrame* frame) {
+ frame->num_channels_ = number_of_channels;
+ frame->samples_per_channel_ = samples_per_channel;
+ int16_t* frame_data = frame->mutable_data();
+ std::fill(frame_data, frame_data + samples_per_channel * number_of_channels,
+ value);
+}
+} // namespace
+
+TEST(AudioFrameManipulator, CompareForwardRampWithExpectedResultStereo) {
+ constexpr int kSamplesPerChannel = 5;
+ constexpr int kNumberOfChannels = 2;
+
+ // Create a frame with values 5, 5, 5, ... and channels & samples as above.
+ AudioFrame frame;
+ FillFrameWithConstants(kSamplesPerChannel, kNumberOfChannels, 5, &frame);
+
+ Ramp(0.0f, 1.0f, &frame);
+
+ const int total_samples = kSamplesPerChannel * kNumberOfChannels;
+ const int16_t expected_result[total_samples] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4};
+ const int16_t* frame_data = frame.data();
+ EXPECT_TRUE(
+ std::equal(frame_data, frame_data + total_samples, expected_result));
+}
+
+TEST(AudioFrameManipulator, CompareBackwardRampWithExpectedResultMono) {
+ constexpr int kSamplesPerChannel = 5;
+ constexpr int kNumberOfChannels = 1;
+
+ // Create a frame with values 5, 5, 5, ... and channels & samples as above.
+ AudioFrame frame;
+ FillFrameWithConstants(kSamplesPerChannel, kNumberOfChannels, 5, &frame);
+
+ Ramp(1.0f, 0.0f, &frame);
+
+ const int total_samples = kSamplesPerChannel * kNumberOfChannels;
+ const int16_t expected_result[total_samples] = {5, 4, 3, 2, 1};
+ const int16_t* frame_data = frame.data();
+ EXPECT_TRUE(
+ std::equal(frame_data, frame_data + total_samples, expected_result));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc
new file mode 100644
index 0000000000..73a6e3a8a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_mixer_impl.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+struct AudioMixerImpl::SourceStatus {
+ SourceStatus(Source* audio_source, bool is_mixed, float gain)
+ : audio_source(audio_source), is_mixed(is_mixed), gain(gain) {}
+ Source* audio_source = nullptr;
+ bool is_mixed = false;
+ float gain = 0.0f;
+
+ // A frame that will be passed to audio_source->GetAudioFrameWithInfo.
+ AudioFrame audio_frame;
+};
+
+namespace {
+
+struct SourceFrame {
+ SourceFrame() = default;
+
+ SourceFrame(AudioMixerImpl::SourceStatus* source_status,
+ AudioFrame* audio_frame,
+ bool muted)
+ : source_status(source_status), audio_frame(audio_frame), muted(muted) {
+ RTC_DCHECK(source_status);
+ RTC_DCHECK(audio_frame);
+ if (!muted) {
+ energy = AudioMixerCalculateEnergy(*audio_frame);
+ }
+ }
+
+ SourceFrame(AudioMixerImpl::SourceStatus* source_status,
+ AudioFrame* audio_frame,
+ bool muted,
+ uint32_t energy)
+ : source_status(source_status),
+ audio_frame(audio_frame),
+ muted(muted),
+ energy(energy) {
+ RTC_DCHECK(source_status);
+ RTC_DCHECK(audio_frame);
+ }
+
+ AudioMixerImpl::SourceStatus* source_status = nullptr;
+ AudioFrame* audio_frame = nullptr;
+ bool muted = true;
+ uint32_t energy = 0;
+};
+
+// ShouldMixBefore(a, b) is used to select mixer sources.
+// Returns true if `a` is preferred over `b` as a source to be mixed.
+bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
+ if (a.muted != b.muted) {
+ return b.muted;
+ }
+
+ const auto a_activity = a.audio_frame->vad_activity_;
+ const auto b_activity = b.audio_frame->vad_activity_;
+
+ if (a_activity != b_activity) {
+ return a_activity == AudioFrame::kVadActive;
+ }
+
+ return a.energy > b.energy;
+}
+
+void RampAndUpdateGain(
+ rtc::ArrayView<const SourceFrame> mixed_sources_and_frames) {
+ for (const auto& source_frame : mixed_sources_and_frames) {
+ float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f;
+ Ramp(source_frame.source_status->gain, target_gain,
+ source_frame.audio_frame);
+ source_frame.source_status->gain = target_gain;
+ }
+}
+
+std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>>::const_iterator
+FindSourceInList(
+ AudioMixerImpl::Source const* audio_source,
+ std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>> const*
+ audio_source_list) {
+ return std::find_if(
+ audio_source_list->begin(), audio_source_list->end(),
+ [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) {
+ return p->audio_source == audio_source;
+ });
+}
+} // namespace
+
+struct AudioMixerImpl::HelperContainers {
+ void resize(size_t size) {
+ audio_to_mix.resize(size);
+ audio_source_mixing_data_list.resize(size);
+ ramp_list.resize(size);
+ preferred_rates.resize(size);
+ }
+
+ std::vector<AudioFrame*> audio_to_mix;
+ std::vector<SourceFrame> audio_source_mixing_data_list;
+ std::vector<SourceFrame> ramp_list;
+ std::vector<int> preferred_rates;
+};
+
+AudioMixerImpl::AudioMixerImpl(
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix)
+ : max_sources_to_mix_(max_sources_to_mix),
+ output_rate_calculator_(std::move(output_rate_calculator)),
+ audio_source_list_(),
+ helper_containers_(std::make_unique<HelperContainers>()),
+ frame_combiner_(use_limiter) {
+ RTC_CHECK_GE(max_sources_to_mix, 1) << "At least one source must be mixed";
+ audio_source_list_.reserve(max_sources_to_mix);
+ helper_containers_->resize(max_sources_to_mix);
+}
+
+AudioMixerImpl::~AudioMixerImpl() {}
+
+rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create(
+ int max_sources_to_mix) {
+ return Create(std::unique_ptr<DefaultOutputRateCalculator>(
+ new DefaultOutputRateCalculator()),
+ /*use_limiter=*/true, max_sources_to_mix);
+}
+
+rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create(
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix) {
+ return rtc::make_ref_counted<AudioMixerImpl>(
+ std::move(output_rate_calculator), use_limiter, max_sources_to_mix);
+}
+
+void AudioMixerImpl::Mix(size_t number_of_channels,
+ AudioFrame* audio_frame_for_mixing) {
+ RTC_DCHECK(number_of_channels >= 1);
+ MutexLock lock(&mutex_);
+
+ size_t number_of_streams = audio_source_list_.size();
+
+ std::transform(audio_source_list_.begin(), audio_source_list_.end(),
+ helper_containers_->preferred_rates.begin(),
+ [&](std::unique_ptr<SourceStatus>& a) {
+ return a->audio_source->PreferredSampleRate();
+ });
+
+ int output_frequency = output_rate_calculator_->CalculateOutputRateFromRange(
+ rtc::ArrayView<const int>(helper_containers_->preferred_rates.data(),
+ number_of_streams));
+
+ frame_combiner_.Combine(GetAudioFromSources(output_frequency),
+ number_of_channels, output_frequency,
+ number_of_streams, audio_frame_for_mixing);
+}
+
+bool AudioMixerImpl::AddSource(Source* audio_source) {
+ RTC_DCHECK(audio_source);
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) ==
+ audio_source_list_.end())
+ << "Source already added to mixer";
+ audio_source_list_.emplace_back(new SourceStatus(audio_source, false, 0));
+ helper_containers_->resize(audio_source_list_.size());
+ return true;
+}
+
+void AudioMixerImpl::RemoveSource(Source* audio_source) {
+ RTC_DCHECK(audio_source);
+ MutexLock lock(&mutex_);
+ const auto iter = FindSourceInList(audio_source, &audio_source_list_);
+ RTC_DCHECK(iter != audio_source_list_.end()) << "Source not present in mixer";
+ audio_source_list_.erase(iter);
+}
+
+rtc::ArrayView<AudioFrame* const> AudioMixerImpl::GetAudioFromSources(
+ int output_frequency) {
+ // Get audio from the audio sources and put it in the SourceFrame vector.
+ int audio_source_mixing_data_count = 0;
+ for (auto& source_and_status : audio_source_list_) {
+ const auto audio_frame_info =
+ source_and_status->audio_source->GetAudioFrameWithInfo(
+ output_frequency, &source_and_status->audio_frame);
+
+ if (audio_frame_info == Source::AudioFrameInfo::kError) {
+ RTC_LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source";
+ continue;
+ }
+ helper_containers_
+ ->audio_source_mixing_data_list[audio_source_mixing_data_count++] =
+ SourceFrame(source_and_status.get(), &source_and_status->audio_frame,
+ audio_frame_info == Source::AudioFrameInfo::kMuted);
+ }
+ rtc::ArrayView<SourceFrame> audio_source_mixing_data_view(
+ helper_containers_->audio_source_mixing_data_list.data(),
+ audio_source_mixing_data_count);
+
+ // Sort frames by sorting function.
+ std::sort(audio_source_mixing_data_view.begin(),
+ audio_source_mixing_data_view.end(), ShouldMixBefore);
+
+ int max_audio_frame_counter = max_sources_to_mix_;
+ int ramp_list_lengh = 0;
+ int audio_to_mix_count = 0;
+ // Go through list in order and put unmuted frames in result list.
+ for (const auto& p : audio_source_mixing_data_view) {
+ // Filter muted.
+ if (p.muted) {
+ p.source_status->is_mixed = false;
+ continue;
+ }
+
+ // Add frame to result vector for mixing.
+ bool is_mixed = false;
+ if (max_audio_frame_counter > 0) {
+ --max_audio_frame_counter;
+ helper_containers_->audio_to_mix[audio_to_mix_count++] = p.audio_frame;
+ helper_containers_->ramp_list[ramp_list_lengh++] =
+ SourceFrame(p.source_status, p.audio_frame, false, -1);
+ is_mixed = true;
+ }
+ p.source_status->is_mixed = is_mixed;
+ }
+ RampAndUpdateGain(rtc::ArrayView<SourceFrame>(
+ helper_containers_->ramp_list.data(), ramp_list_lengh));
+ return rtc::ArrayView<AudioFrame* const>(
+ helper_containers_->audio_to_mix.data(), audio_to_mix_count);
+}
+
+bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
+ AudioMixerImpl::Source* audio_source) const {
+ MutexLock lock(&mutex_);
+
+ const auto iter = FindSourceInList(audio_source, &audio_source_list_);
+ if (iter != audio_source_list_.end()) {
+ return (*iter)->is_mixed;
+ }
+
+ RTC_LOG(LS_ERROR) << "Audio source unknown";
+ return false;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h
new file mode 100644
index 0000000000..76b1131777
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
+#define MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio/audio_mixer.h"
+#include "api/scoped_refptr.h"
+#include "modules/audio_mixer/frame_combiner.h"
+#include "modules/audio_mixer/output_rate_calculator.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class AudioMixerImpl : public AudioMixer {
+ public:
+ struct SourceStatus;
+
+ // AudioProcessing only accepts 10 ms frames.
+ static const int kFrameDurationInMs = 10;
+
+ static const int kDefaultNumberOfMixedAudioSources = 3;
+
+ static rtc::scoped_refptr<AudioMixerImpl> Create(
+ int max_sources_to_mix = kDefaultNumberOfMixedAudioSources);
+
+ static rtc::scoped_refptr<AudioMixerImpl> Create(
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix = kDefaultNumberOfMixedAudioSources);
+
+ ~AudioMixerImpl() override;
+
+ AudioMixerImpl(const AudioMixerImpl&) = delete;
+ AudioMixerImpl& operator=(const AudioMixerImpl&) = delete;
+
+ // AudioMixer functions
+ bool AddSource(Source* audio_source) override;
+ void RemoveSource(Source* audio_source) override;
+
+ void Mix(size_t number_of_channels,
+ AudioFrame* audio_frame_for_mixing) override
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Returns true if the source was mixed last round. Returns
+ // false and logs an error if the source was never added to the
+ // mixer.
+ bool GetAudioSourceMixabilityStatusForTest(Source* audio_source) const;
+
+ protected:
+ AudioMixerImpl(std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix);
+
+ private:
+ struct HelperContainers;
+
+ // Compute what audio sources to mix from audio_source_list_. Ramp
+ // in and out. Update mixed status. Mixes up to
+ // kMaximumAmountOfMixedAudioSources audio sources.
+ rtc::ArrayView<AudioFrame* const> GetAudioFromSources(int output_frequency)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // The critical section lock guards audio source insertion and
+ // removal, which can be done from any thread. The race checker
+ // checks that mixing is done sequentially.
+ mutable Mutex mutex_;
+
+ const int max_sources_to_mix_;
+
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator_;
+
+ // List of all audio sources.
+ std::vector<std::unique_ptr<SourceStatus>> audio_source_list_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<HelperContainers> helper_containers_
+ RTC_GUARDED_BY(mutex_);
+
+ // Component that handles actual adding of audio frames.
+ FrameCombiner frame_combiner_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build
new file mode 100644
index 0000000000..1e88321d53
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc",
+ "/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc",
+ "/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_mixer_impl_gn")
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc
new file mode 100644
index 0000000000..20b7d299f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc
@@ -0,0 +1,794 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_mixer_impl.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio/audio_mixer.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "api/units/timestamp.h"
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Exactly;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::UnorderedElementsAre;
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultSampleRateHz = 48000;
+
+// Utility function that resets the frame member variables with
+// sensible defaults.
+void ResetFrame(AudioFrame* frame) {
+ frame->sample_rate_hz_ = kDefaultSampleRateHz;
+ frame->num_channels_ = 1;
+
+ // Frame duration 10ms.
+ frame->samples_per_channel_ = kDefaultSampleRateHz / 100;
+ frame->vad_activity_ = AudioFrame::kVadActive;
+ frame->speech_type_ = AudioFrame::kNormalSpeech;
+}
+
+std::string ProduceDebugText(int sample_rate_hz,
+ int number_of_channels,
+ int number_of_sources) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz << " ";
+ ss << "Number of channels: " << number_of_channels << " ";
+ ss << "Number of sources: " << number_of_sources;
+ return ss.Release();
+}
+
+AudioFrame frame_for_mixing;
+
+} // namespace
+
+class MockMixerAudioSource : public ::testing::NiceMock<AudioMixer::Source> {
+ public:
+ MockMixerAudioSource()
+ : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) {
+ ON_CALL(*this, GetAudioFrameWithInfo(_, _))
+ .WillByDefault(
+ Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo));
+ ON_CALL(*this, PreferredSampleRate())
+ .WillByDefault(Return(kDefaultSampleRateHz));
+ }
+
+ MOCK_METHOD(AudioFrameInfo,
+ GetAudioFrameWithInfo,
+ (int sample_rate_hz, AudioFrame* audio_frame),
+ (override));
+
+ MOCK_METHOD(int, PreferredSampleRate, (), (const, override));
+ MOCK_METHOD(int, Ssrc, (), (const, override));
+
+ AudioFrame* fake_frame() { return &fake_frame_; }
+ AudioFrameInfo fake_info() { return fake_audio_frame_info_; }
+ void set_fake_info(const AudioFrameInfo audio_frame_info) {
+ fake_audio_frame_info_ = audio_frame_info;
+ }
+
+ void set_packet_infos(const RtpPacketInfos& packet_infos) {
+ packet_infos_ = packet_infos;
+ }
+
+ private:
+ AudioFrameInfo FakeAudioFrameWithInfo(int sample_rate_hz,
+ AudioFrame* audio_frame) {
+ audio_frame->CopyFrom(fake_frame_);
+ audio_frame->sample_rate_hz_ = sample_rate_hz;
+ audio_frame->samples_per_channel_ =
+ rtc::CheckedDivExact(sample_rate_hz, 100);
+ audio_frame->packet_infos_ = packet_infos_;
+ return fake_info();
+ }
+
+ AudioFrame fake_frame_;
+ AudioFrameInfo fake_audio_frame_info_;
+ RtpPacketInfos packet_infos_;
+};
+
+class CustomRateCalculator : public OutputRateCalculator {
+ public:
+ explicit CustomRateCalculator(int rate) : rate_(rate) {}
+ int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_rates) override {
+ return rate_;
+ }
+
+ private:
+ const int rate_;
+};
+
+// Creates participants from `frames` and `frame_info` and adds them
+// to the mixer. Compares mixed status with `expected_status`
+void MixAndCompare(
+ const std::vector<AudioFrame>& frames,
+ const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info,
+ const std::vector<bool>& expected_status) {
+ const size_t num_audio_sources = frames.size();
+ RTC_DCHECK(frames.size() == frame_info.size());
+ RTC_DCHECK(frame_info.size() == expected_status.size());
+
+ const auto mixer = AudioMixerImpl::Create();
+ std::vector<MockMixerAudioSource> participants(num_audio_sources);
+
+ for (size_t i = 0; i < num_audio_sources; ++i) {
+ participants[i].fake_frame()->CopyFrom(frames[i]);
+ participants[i].set_fake_info(frame_info[i]);
+ }
+
+ for (size_t i = 0; i < num_audio_sources; ++i) {
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ for (size_t i = 0; i < num_audio_sources; ++i) {
+ EXPECT_EQ(expected_status[i],
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Mixed status of AudioSource #" << i << " wrong.";
+ }
+}
+
+void MixMonoAtGivenNativeRate(int native_sample_rate,
+ AudioFrame* mix_frame,
+ rtc::scoped_refptr<AudioMixer> mixer,
+ MockMixerAudioSource* audio_source) {
+ ON_CALL(*audio_source, PreferredSampleRate())
+ .WillByDefault(Return(native_sample_rate));
+ audio_source->fake_frame()->sample_rate_hz_ = native_sample_rate;
+ audio_source->fake_frame()->samples_per_channel_ = native_sample_rate / 100;
+
+ mixer->Mix(1, mix_frame);
+}
+
+TEST(AudioMixer, LargestEnergyVadActiveMixed) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 3;
+
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participants[kAudioSources];
+
+ for (int i = 0; i < kAudioSources; ++i) {
+ ResetFrame(participants[i].fake_frame());
+
+ // We set the 80-th sample value since the first 80 samples may be
+ // modified by a ramped-in window.
+ participants[i].fake_frame()->mutable_data()[80] = i;
+
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, _)).Times(Exactly(1));
+ }
+
+ // Last participant gives audio frame with passive VAD, although it has the
+ // largest energy.
+ participants[kAudioSources - 1].fake_frame()->vad_activity_ =
+ AudioFrame::kVadPassive;
+
+ AudioFrame audio_frame;
+ mixer->Mix(1, // number of channels
+ &audio_frame);
+
+ for (int i = 0; i < kAudioSources; ++i) {
+ bool is_mixed =
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]);
+ if (i == kAudioSources - 1 ||
+ i < kAudioSources - 1 -
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources) {
+ EXPECT_FALSE(is_mixed)
+ << "Mixing status of AudioSource #" << i << " wrong.";
+ } else {
+ EXPECT_TRUE(is_mixed)
+ << "Mixing status of AudioSource #" << i << " wrong.";
+ }
+ }
+}
+
+TEST(AudioMixer, FrameNotModifiedForSingleParticipant) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participant;
+
+ ResetFrame(participant.fake_frame());
+ const size_t n_samples = participant.fake_frame()->samples_per_channel_;
+
+ // Modify the frame so that it's not zero.
+ int16_t* fake_frame_data = participant.fake_frame()->mutable_data();
+ for (size_t j = 0; j < n_samples; ++j) {
+ fake_frame_data[j] = static_cast<int16_t>(j);
+ }
+
+ EXPECT_TRUE(mixer->AddSource(&participant));
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2));
+
+ AudioFrame audio_frame;
+ // Two mix iteration to compare after the ramp-up step.
+ for (int i = 0; i < 2; ++i) {
+ mixer->Mix(1, // number of channels
+ &audio_frame);
+ }
+
+ EXPECT_EQ(0, memcmp(participant.fake_frame()->data(), audio_frame.data(),
+ n_samples));
+}
+
+TEST(AudioMixer, SourceAtNativeRateShouldNeverResample) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource audio_source;
+ ResetFrame(audio_source.fake_frame());
+
+ mixer->AddSource(&audio_source);
+
+ for (auto frequency : {8000, 16000, 32000, 48000}) {
+ EXPECT_CALL(audio_source, GetAudioFrameWithInfo(frequency, _))
+ .Times(Exactly(1));
+
+ MixMonoAtGivenNativeRate(frequency, &frame_for_mixing, mixer,
+ &audio_source);
+ }
+}
+
+TEST(AudioMixer, MixerShouldMixAtNativeSourceRate) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource audio_source;
+ ResetFrame(audio_source.fake_frame());
+
+ mixer->AddSource(&audio_source);
+
+ for (auto frequency : {8000, 16000, 32000, 48000}) {
+ MixMonoAtGivenNativeRate(frequency, &frame_for_mixing, mixer,
+ &audio_source);
+
+ EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_);
+ }
+}
+
+TEST(AudioMixer, MixerShouldAlwaysMixAtNativeRate) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participant;
+ ResetFrame(participant.fake_frame());
+ mixer->AddSource(&participant);
+
+ const int needed_frequency = 44100;
+ ON_CALL(participant, PreferredSampleRate())
+ .WillByDefault(Return(needed_frequency));
+
+ // We expect mixing frequency to be native and >= needed_frequency.
+ const int expected_mix_frequency = 48000;
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(expected_mix_frequency, _))
+ .Times(Exactly(1));
+ participant.fake_frame()->sample_rate_hz_ = expected_mix_frequency;
+ participant.fake_frame()->samples_per_channel_ = expected_mix_frequency / 100;
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ EXPECT_EQ(48000, frame_for_mixing.sample_rate_hz_);
+}
+
+// Check that the mixing rate is always >= participants preferred rate.
+TEST(AudioMixer, ShouldNotCauseQualityLossForMultipleSources) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ std::vector<MockMixerAudioSource> audio_sources(2);
+ const std::vector<int> source_sample_rates = {8000, 16000};
+ for (int i = 0; i < 2; ++i) {
+ auto& source = audio_sources[i];
+ ResetFrame(source.fake_frame());
+ mixer->AddSource(&source);
+ const auto sample_rate = source_sample_rates[i];
+ EXPECT_CALL(source, PreferredSampleRate()).WillOnce(Return(sample_rate));
+
+ EXPECT_CALL(source, GetAudioFrameWithInfo(::testing::Ge(sample_rate), _));
+ }
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, ParticipantNumberOfChannels) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participant;
+ ResetFrame(participant.fake_frame());
+
+ EXPECT_TRUE(mixer->AddSource(&participant));
+ for (size_t number_of_channels : {1, 2}) {
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ mixer->Mix(number_of_channels, &frame_for_mixing);
+ EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_);
+ }
+}
+
+// Maximal amount of participants are mixed one iteration, then
+// another participant with higher energy is added.
+TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ const auto mixer = AudioMixerImpl::Create();
+ MockMixerAudioSource participants[kAudioSources];
+
+ for (int i = 0; i < kAudioSources; ++i) {
+ ResetFrame(participants[i].fake_frame());
+ // Set the participant audio energy to increase with the index
+ // `i`.
+ participants[i].fake_frame()->mutable_data()[0] = 100 * i;
+ }
+
+ // Add all participants but the loudest for mixing.
+ for (int i = 0; i < kAudioSources - 1; ++i) {
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ // First mixer iteration
+ mixer->Mix(1, &frame_for_mixing);
+
+ // All participants but the loudest should have been mixed.
+ for (int i = 0; i < kAudioSources - 1; ++i) {
+ EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Mixed status of AudioSource #" << i << " wrong.";
+ }
+
+ // Add new participant with higher energy.
+ EXPECT_TRUE(mixer->AddSource(&participants[kAudioSources - 1]));
+ for (int i = 0; i < kAudioSources; ++i) {
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ // The most quiet participant should not have been mixed.
+ EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0]))
+ << "Mixed status of AudioSource #0 wrong.";
+
+ // The loudest participants should have been mixed.
+ for (int i = 1; i < kAudioSources; ++i) {
+ EXPECT_EQ(true,
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Mixed status of AudioSource #" << i << " wrong.";
+ }
+}
+
+// This test checks that the initialization and participant addition
+// can be done on a different thread.
+TEST(AudioMixer, ConstructFromOtherThread) {
+ TaskQueueForTest init_queue("init");
+ rtc::scoped_refptr<AudioMixer> mixer;
+ init_queue.SendTask([&mixer]() { mixer = AudioMixerImpl::Create(); });
+
+ MockMixerAudioSource participant;
+ EXPECT_CALL(participant, PreferredSampleRate())
+ .WillRepeatedly(Return(kDefaultSampleRateHz));
+
+ ResetFrame(participant.fake_frame());
+
+ TaskQueueForTest participant_queue("participant");
+ participant_queue.SendTask(
+ [&mixer, &participant]() { mixer->AddSource(&participant); });
+
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+
+ // Do one mixer iteration
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, MutedShouldMixAfterUnmuted) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted;
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, PassiveShouldMixAfterNormal) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frames[0].vad_activity_ = AudioFrame::kVadPassive;
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, ActiveShouldMixBeforeLoud) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frames[0].vad_activity_ = AudioFrame::kVadPassive;
+ int16_t* frame_data = frames[0].mutable_data();
+ std::fill(frame_data, frame_data + kDefaultSampleRateHz / 100,
+ std::numeric_limits<int16_t>::max());
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, ShouldMixUpToSpecifiedNumberOfSourcesToMix) {
+ constexpr int kAudioSources = 5;
+ constexpr int kSourcesToMix = 2;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ // Set up to kSourceToMix sources with kVadActive so that they're mixed.
+ const std::vector<AudioFrame::VADActivity> kVadActivities = {
+ AudioFrame::kVadUnknown, AudioFrame::kVadPassive, AudioFrame::kVadPassive,
+ AudioFrame::kVadActive, AudioFrame::kVadActive};
+ // Populate VAD and frame for all sources.
+ for (int i = 0; i < kAudioSources; i++) {
+ frames[i].vad_activity_ = kVadActivities[i];
+ }
+
+ std::vector<MockMixerAudioSource> participants(kAudioSources);
+ for (int i = 0; i < kAudioSources; ++i) {
+ participants[i].fake_frame()->CopyFrom(frames[i]);
+ participants[i].set_fake_info(frame_info[i]);
+ }
+
+ const auto mixer = AudioMixerImpl::Create(kSourcesToMix);
+ for (int i = 0; i < kAudioSources; ++i) {
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ std::vector<bool> expected_status = {false, false, false, true, true};
+ for (int i = 0; i < kAudioSources; ++i) {
+ EXPECT_EQ(expected_status[i],
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Wrong mix status for source #" << i << " is wrong";
+ }
+}
+
+TEST(AudioMixer, UnmutedShouldMixBeforeLoud) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted;
+ int16_t* frame_data = frames[0].mutable_data();
+ std::fill(frame_data, frame_data + kDefaultSampleRateHz / 100,
+ std::numeric_limits<int16_t>::max());
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, MixingRateShouldBeDecidedByRateCalculator) {
+ constexpr int kOutputRate = 22000;
+ const auto mixer =
+ AudioMixerImpl::Create(std::unique_ptr<OutputRateCalculator>(
+ new CustomRateCalculator(kOutputRate)),
+ true);
+ MockMixerAudioSource audio_source;
+ mixer->AddSource(&audio_source);
+ ResetFrame(audio_source.fake_frame());
+
+ EXPECT_CALL(audio_source, GetAudioFrameWithInfo(kOutputRate, _))
+ .Times(Exactly(1));
+
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, ZeroSourceRateShouldBeDecidedByRateCalculator) {
+ constexpr int kOutputRate = 8000;
+ const auto mixer =
+ AudioMixerImpl::Create(std::unique_ptr<OutputRateCalculator>(
+ new CustomRateCalculator(kOutputRate)),
+ true);
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ EXPECT_EQ(kOutputRate, frame_for_mixing.sample_rate_hz_);
+}
+
+TEST(AudioMixer, NoLimiterBasicApiCalls) {
+ const auto mixer = AudioMixerImpl::Create(
+ std::unique_ptr<OutputRateCalculator>(new DefaultOutputRateCalculator()),
+ false);
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, AnyRateIsPossibleWithNoLimiter) {
+ // No APM limiter means no AudioProcessing::NativeRate restriction
+ // on mixing rate. The rate has to be divisible by 100 since we use
+ // 10 ms frames, though.
+ for (const auto rate : {8000, 20000, 24000, 32000, 44100}) {
+ for (const size_t number_of_channels : {1, 2}) {
+ for (const auto number_of_sources : {0, 1, 2, 3, 4}) {
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_sources, number_of_sources));
+ const auto mixer =
+ AudioMixerImpl::Create(std::unique_ptr<OutputRateCalculator>(
+ new CustomRateCalculator(rate)),
+ false);
+
+ std::vector<MockMixerAudioSource> sources(number_of_sources);
+ for (auto& source : sources) {
+ ResetFrame(source.fake_frame());
+ mixer->AddSource(&source);
+ }
+
+ mixer->Mix(number_of_channels, &frame_for_mixing);
+ EXPECT_EQ(rate, frame_for_mixing.sample_rate_hz_);
+ EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_);
+ }
+ }
+ }
+}
+
+TEST(AudioMixer, MultipleChannelsOneParticipant) {
+ // Set up a participant with a 6-channel frame, and make sure a 6-channel
+ // frame with the right sample values comes out from the mixer. There are 2
+ // Mix calls because of ramp-up.
+ constexpr size_t kNumberOfChannels = 6;
+ MockMixerAudioSource source;
+ ResetFrame(source.fake_frame());
+ const auto mixer = AudioMixerImpl::Create();
+ mixer->AddSource(&source);
+ mixer->Mix(1, &frame_for_mixing);
+ auto* frame = source.fake_frame();
+ frame->num_channels_ = kNumberOfChannels;
+ std::fill(frame->mutable_data(),
+ frame->mutable_data() + AudioFrame::kMaxDataSizeSamples, 0);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ frame->mutable_data()[100 * frame->num_channels_ + i] = 1000 * i;
+ }
+
+ mixer->Mix(kNumberOfChannels, &frame_for_mixing);
+
+ EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ EXPECT_EQ(frame_for_mixing.data()[100 * frame_for_mixing.num_channels_ + i],
+ static_cast<int16_t>(1000 * i));
+ }
+}
+
+TEST(AudioMixer, MultipleChannelsManyParticipants) {
+ // Sets up 2 participants. One has a 6-channel frame. Make sure a 6-channel
+ // frame with the right sample values comes out from the mixer. There are 2
+ // Mix calls because of ramp-up.
+ constexpr size_t kNumberOfChannels = 6;
+ MockMixerAudioSource source;
+ const auto mixer = AudioMixerImpl::Create();
+ mixer->AddSource(&source);
+ ResetFrame(source.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+ auto* frame = source.fake_frame();
+ frame->num_channels_ = kNumberOfChannels;
+ std::fill(frame->mutable_data(),
+ frame->mutable_data() + AudioFrame::kMaxDataSizeSamples, 0);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ frame->mutable_data()[100 * frame->num_channels_ + i] = 1000 * i;
+ }
+ MockMixerAudioSource other_source;
+ ResetFrame(other_source.fake_frame());
+ mixer->AddSource(&other_source);
+
+ mixer->Mix(kNumberOfChannels, &frame_for_mixing);
+
+ EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ EXPECT_EQ(frame_for_mixing.data()[100 * frame_for_mixing.num_channels_ + i],
+ static_cast<int16_t>(1000 * i));
+ }
+}
+
+TEST(AudioMixer, ShouldIncludeRtpPacketInfoFromAllMixedSources) {
+ const uint32_t kSsrc0 = 10;
+ const uint32_t kSsrc1 = 11;
+ const uint32_t kSsrc2 = 12;
+ const uint32_t kCsrc0 = 20;
+ const uint32_t kCsrc1 = 21;
+ const uint32_t kCsrc2 = 22;
+ const uint32_t kCsrc3 = 23;
+ const int kAudioLevel0 = 10;
+ const int kAudioLevel1 = 40;
+ const absl::optional<uint32_t> kAudioLevel2 = absl::nullopt;
+ const uint32_t kRtpTimestamp0 = 300;
+ const uint32_t kRtpTimestamp1 = 400;
+ const Timestamp kReceiveTime0 = Timestamp::Millis(10);
+ const Timestamp kReceiveTime1 = Timestamp::Millis(20);
+
+ const RtpPacketInfo kPacketInfo0(kSsrc0, {kCsrc0, kCsrc1}, kRtpTimestamp0,
+ kAudioLevel0, absl::nullopt, kReceiveTime0);
+ const RtpPacketInfo kPacketInfo1(kSsrc1, {kCsrc2}, kRtpTimestamp1,
+ kAudioLevel1, absl::nullopt, kReceiveTime1);
+ const RtpPacketInfo kPacketInfo2(kSsrc2, {kCsrc3}, kRtpTimestamp1,
+ kAudioLevel2, absl::nullopt, kReceiveTime1);
+
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource source;
+ source.set_packet_infos(RtpPacketInfos({kPacketInfo0}));
+ mixer->AddSource(&source);
+ ResetFrame(source.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+
+ MockMixerAudioSource other_source;
+ other_source.set_packet_infos(RtpPacketInfos({kPacketInfo1, kPacketInfo2}));
+ ResetFrame(other_source.fake_frame());
+ mixer->AddSource(&other_source);
+
+ mixer->Mix(/*number_of_channels=*/1, &frame_for_mixing);
+
+ EXPECT_THAT(frame_for_mixing.packet_infos_,
+ UnorderedElementsAre(kPacketInfo0, kPacketInfo1, kPacketInfo2));
+}
+
+TEST(AudioMixer, MixerShouldIncludeRtpPacketInfoFromMixedSourcesOnly) {
+ const uint32_t kSsrc0 = 10;
+ const uint32_t kSsrc1 = 11;
+ const uint32_t kSsrc2 = 21;
+ const uint32_t kCsrc0 = 30;
+ const uint32_t kCsrc1 = 31;
+ const uint32_t kCsrc2 = 32;
+ const uint32_t kCsrc3 = 33;
+ const int kAudioLevel0 = 10;
+ const absl::optional<uint32_t> kAudioLevelMissing = absl::nullopt;
+ const uint32_t kRtpTimestamp0 = 300;
+ const uint32_t kRtpTimestamp1 = 400;
+ const Timestamp kReceiveTime0 = Timestamp::Millis(10);
+ const Timestamp kReceiveTime1 = Timestamp::Millis(20);
+
+ const RtpPacketInfo kPacketInfo0(kSsrc0, {kCsrc0, kCsrc1}, kRtpTimestamp0,
+ kAudioLevel0, absl::nullopt, kReceiveTime0);
+ const RtpPacketInfo kPacketInfo1(kSsrc1, {kCsrc2}, kRtpTimestamp1,
+ kAudioLevelMissing, absl::nullopt,
+ kReceiveTime1);
+ const RtpPacketInfo kPacketInfo2(kSsrc2, {kCsrc3}, kRtpTimestamp1,
+ kAudioLevelMissing, absl::nullopt,
+ kReceiveTime1);
+
+ const auto mixer = AudioMixerImpl::Create(/*max_sources_to_mix=*/2);
+
+ MockMixerAudioSource source1;
+ source1.set_packet_infos(RtpPacketInfos({kPacketInfo0}));
+ mixer->AddSource(&source1);
+ ResetFrame(source1.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+
+ MockMixerAudioSource source2;
+ source2.set_packet_infos(RtpPacketInfos({kPacketInfo1}));
+ ResetFrame(source2.fake_frame());
+ mixer->AddSource(&source2);
+
+ // The mixer prioritizes kVadActive over kVadPassive.
+ // We limit the number of sources to mix to 2 and set the third source's VAD
+ // activity to kVadPassive so that it will not be added to the mix.
+ MockMixerAudioSource source3;
+ source3.set_packet_infos(RtpPacketInfos({kPacketInfo2}));
+ ResetFrame(source3.fake_frame());
+ source3.fake_frame()->vad_activity_ = AudioFrame::kVadPassive;
+ mixer->AddSource(&source3);
+
+ mixer->Mix(/*number_of_channels=*/1, &frame_for_mixing);
+
+ EXPECT_THAT(frame_for_mixing.packet_infos_,
+ UnorderedElementsAre(kPacketInfo0, kPacketInfo1));
+}
+
+class HighOutputRateCalculator : public OutputRateCalculator {
+ public:
+ static const int kDefaultFrequency = 76000;
+ int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) override {
+ return kDefaultFrequency;
+ }
+ ~HighOutputRateCalculator() override {}
+};
+const int HighOutputRateCalculator::kDefaultFrequency;
+
+TEST(AudioMixerDeathTest, MultipleChannelsAndHighRate) {
+ constexpr size_t kSamplesPerChannel =
+ HighOutputRateCalculator::kDefaultFrequency / 100;
+ // As many channels as an AudioFrame can fit:
+ constexpr size_t kNumberOfChannels =
+ AudioFrame::kMaxDataSizeSamples / kSamplesPerChannel;
+ MockMixerAudioSource source;
+ const auto mixer = AudioMixerImpl::Create(
+ std::make_unique<HighOutputRateCalculator>(), true);
+ mixer->AddSource(&source);
+ ResetFrame(source.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+ auto* frame = source.fake_frame();
+ frame->num_channels_ = kNumberOfChannels;
+ frame->sample_rate_hz_ = HighOutputRateCalculator::kDefaultFrequency;
+ frame->samples_per_channel_ = kSamplesPerChannel;
+
+ std::fill(frame->mutable_data(),
+ frame->mutable_data() + AudioFrame::kMaxDataSizeSamples, 0);
+ MockMixerAudioSource other_source;
+ ResetFrame(other_source.fake_frame());
+ auto* other_frame = other_source.fake_frame();
+ other_frame->num_channels_ = kNumberOfChannels;
+ other_frame->sample_rate_hz_ = HighOutputRateCalculator::kDefaultFrequency;
+ other_frame->samples_per_channel_ = kSamplesPerChannel;
+ mixer->AddSource(&other_source);
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ EXPECT_DEATH(mixer->Mix(kNumberOfChannels, &frame_for_mixing), "");
+#elif !RTC_DCHECK_IS_ON
+ mixer->Mix(kNumberOfChannels, &frame_for_mixing);
+ EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
+ EXPECT_EQ(frame_for_mixing.sample_rate_hz_,
+ HighOutputRateCalculator::kDefaultFrequency);
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc
new file mode 100644
index 0000000000..3ee28a7937
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio/audio_mixer.h"
+
+#include <cstring>
+#include <iostream>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+#include "rtc_base/strings/string_builder.h"
+
+ABSL_FLAG(int,
+ sampling_rate,
+ 16000,
+ "Rate at which to mix (all input streams must have this rate)");
+
+ABSL_FLAG(bool,
+ stereo,
+ false,
+ "Enable stereo (interleaved). Inputs need not be as this parameter.");
+
+ABSL_FLAG(bool, limiter, true, "Enable limiter.");
+ABSL_FLAG(std::string,
+ output_file,
+ "mixed_file.wav",
+ "File in which to store the mixed result.");
+ABSL_FLAG(std::string, input_file_1, "", "First input. Default none.");
+ABSL_FLAG(std::string, input_file_2, "", "Second input. Default none.");
+ABSL_FLAG(std::string, input_file_3, "", "Third input. Default none.");
+ABSL_FLAG(std::string, input_file_4, "", "Fourth input. Default none.");
+
+namespace webrtc {
+namespace test {
+
+class FilePlayingSource : public AudioMixer::Source {
+ public:
+ explicit FilePlayingSource(absl::string_view filename)
+ : wav_reader_(new WavReader(filename)),
+ sample_rate_hz_(wav_reader_->sample_rate()),
+ samples_per_channel_(sample_rate_hz_ / 100),
+ number_of_channels_(wav_reader_->num_channels()) {}
+
+ AudioFrameInfo GetAudioFrameWithInfo(int target_rate_hz,
+ AudioFrame* frame) override {
+ frame->samples_per_channel_ = samples_per_channel_;
+ frame->num_channels_ = number_of_channels_;
+ frame->sample_rate_hz_ = target_rate_hz;
+
+ RTC_CHECK_EQ(target_rate_hz, sample_rate_hz_);
+
+ const size_t num_to_read = number_of_channels_ * samples_per_channel_;
+ const size_t num_read =
+ wav_reader_->ReadSamples(num_to_read, frame->mutable_data());
+
+ file_has_ended_ = num_to_read != num_read;
+ if (file_has_ended_) {
+ frame->Mute();
+ }
+ return file_has_ended_ ? AudioFrameInfo::kMuted : AudioFrameInfo::kNormal;
+ }
+
+ int Ssrc() const override { return 0; }
+
+ int PreferredSampleRate() const override { return sample_rate_hz_; }
+
+ bool FileHasEnded() const { return file_has_ended_; }
+
+ std::string ToString() const {
+ rtc::StringBuilder ss;
+ ss << "{rate: " << sample_rate_hz_ << ", channels: " << number_of_channels_
+ << ", samples_tot: " << wav_reader_->num_samples() << "}";
+ return ss.Release();
+ }
+
+ private:
+ std::unique_ptr<WavReader> wav_reader_;
+ int sample_rate_hz_;
+ int samples_per_channel_;
+ int number_of_channels_;
+ bool file_has_ended_ = false;
+};
+} // namespace test
+} // namespace webrtc
+
+namespace {
+
+const std::vector<std::string> parse_input_files() {
+ std::vector<std::string> result;
+ for (auto& x :
+ {absl::GetFlag(FLAGS_input_file_1), absl::GetFlag(FLAGS_input_file_2),
+ absl::GetFlag(FLAGS_input_file_3), absl::GetFlag(FLAGS_input_file_4)}) {
+ if (!x.empty()) {
+ result.push_back(x);
+ }
+ }
+ return result;
+}
+} // namespace
+
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+
+ rtc::scoped_refptr<webrtc::AudioMixerImpl> mixer(
+ webrtc::AudioMixerImpl::Create(
+ std::unique_ptr<webrtc::OutputRateCalculator>(
+ new webrtc::DefaultOutputRateCalculator()),
+ absl::GetFlag(FLAGS_limiter)));
+
+ const std::vector<std::string> input_files = parse_input_files();
+ std::vector<webrtc::test::FilePlayingSource> sources;
+ const int num_channels = absl::GetFlag(FLAGS_stereo) ? 2 : 1;
+ sources.reserve(input_files.size());
+ for (const auto& input_file : input_files) {
+ sources.emplace_back(input_file);
+ }
+
+ for (auto& source : sources) {
+ auto error = mixer->AddSource(&source);
+ RTC_CHECK(error);
+ }
+
+ if (sources.empty()) {
+ std::cout << "Need at least one source!\n";
+ return 1;
+ }
+
+ const size_t sample_rate = sources[0].PreferredSampleRate();
+ for (const auto& source : sources) {
+ RTC_CHECK_EQ(sample_rate, source.PreferredSampleRate());
+ }
+
+ // Print stats.
+ std::cout << "Limiting is: " << (absl::GetFlag(FLAGS_limiter) ? "on" : "off")
+ << "\n"
+ "Channels: "
+ << num_channels
+ << "\n"
+ "Rate: "
+ << sample_rate
+ << "\n"
+ "Number of input streams: "
+ << input_files.size() << "\n";
+ for (const auto& source : sources) {
+ std::cout << "\t" << source.ToString() << "\n";
+ }
+ std::cout << "Now mixing\n...\n";
+
+ webrtc::WavWriter wav_writer(absl::GetFlag(FLAGS_output_file), sample_rate,
+ num_channels);
+
+ webrtc::AudioFrame frame;
+
+ bool all_streams_finished = false;
+ while (!all_streams_finished) {
+ mixer->Mix(num_channels, &frame);
+ RTC_CHECK_EQ(sample_rate / 100, frame.samples_per_channel_);
+ RTC_CHECK_EQ(sample_rate, frame.sample_rate_hz_);
+ RTC_CHECK_EQ(num_channels, frame.num_channels_);
+ wav_writer.WriteSamples(frame.data(),
+ num_channels * frame.samples_per_channel_);
+
+ all_streams_finished =
+ std::all_of(sources.begin(), sources.end(),
+ [](const webrtc::test::FilePlayingSource& source) {
+ return source.FileHasEnded();
+ });
+ }
+
+ std::cout << "Done!\n" << std::endl;
+}
diff --git a/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc
new file mode 100644
index 0000000000..5f24b653a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+int DefaultOutputRateCalculator::CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) {
+ if (preferred_sample_rates.empty()) {
+ return DefaultOutputRateCalculator::kDefaultFrequency;
+ }
+ using NativeRate = AudioProcessing::NativeRate;
+ const int maximal_frequency = *std::max_element(
+ preferred_sample_rates.cbegin(), preferred_sample_rates.cend());
+
+ RTC_DCHECK_LE(NativeRate::kSampleRate8kHz, maximal_frequency);
+ RTC_DCHECK_GE(NativeRate::kSampleRate48kHz, maximal_frequency);
+
+ static constexpr NativeRate native_rates[] = {
+ NativeRate::kSampleRate8kHz, NativeRate::kSampleRate16kHz,
+ NativeRate::kSampleRate32kHz, NativeRate::kSampleRate48kHz};
+ const auto* rounded_up_index = std::lower_bound(
+ std::begin(native_rates), std::end(native_rates), maximal_frequency);
+ RTC_DCHECK(rounded_up_index != std::end(native_rates));
+ return *rounded_up_index;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h
new file mode 100644
index 0000000000..02a3b5c37b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
+#define MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_mixer/output_rate_calculator.h"
+
+namespace webrtc {
+
+class DefaultOutputRateCalculator : public OutputRateCalculator {
+ public:
+ static const int kDefaultFrequency = 48000;
+
+ // Produces the least native rate greater or equal to the preferred
+ // sample rates. A native rate is one in
+ // AudioProcessing::NativeRate. If `preferred_sample_rates` is
+ // empty, returns `kDefaultFrequency`.
+ int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) override;
+ ~DefaultOutputRateCalculator() override {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc
new file mode 100644
index 0000000000..e31eea595f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/frame_combiner.h"
+
+#include <algorithm>
+#include <array>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+using MixingBuffer =
+ std::array<std::array<float, FrameCombiner::kMaximumChannelSize>,
+ FrameCombiner::kMaximumNumberOfChannels>;
+
+void SetAudioFrameFields(rtc::ArrayView<const AudioFrame* const> mix_list,
+ size_t number_of_channels,
+ int sample_rate,
+ size_t number_of_streams,
+ AudioFrame* audio_frame_for_mixing) {
+ const size_t samples_per_channel = static_cast<size_t>(
+ (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
+
+ // TODO(minyue): Issue bugs.webrtc.org/3390.
+ // Audio frame timestamp. The 'timestamp_' field is set to dummy
+ // value '0', because it is only supported in the one channel case and
+ // is then updated in the helper functions.
+ audio_frame_for_mixing->UpdateFrame(
+ 0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
+ AudioFrame::kVadUnknown, number_of_channels);
+
+ if (mix_list.empty()) {
+ audio_frame_for_mixing->elapsed_time_ms_ = -1;
+ } else {
+ audio_frame_for_mixing->timestamp_ = mix_list[0]->timestamp_;
+ audio_frame_for_mixing->elapsed_time_ms_ = mix_list[0]->elapsed_time_ms_;
+ audio_frame_for_mixing->ntp_time_ms_ = mix_list[0]->ntp_time_ms_;
+ std::vector<RtpPacketInfo> packet_infos;
+ for (const auto& frame : mix_list) {
+ audio_frame_for_mixing->timestamp_ =
+ std::min(audio_frame_for_mixing->timestamp_, frame->timestamp_);
+ audio_frame_for_mixing->ntp_time_ms_ =
+ std::min(audio_frame_for_mixing->ntp_time_ms_, frame->ntp_time_ms_);
+ audio_frame_for_mixing->elapsed_time_ms_ = std::max(
+ audio_frame_for_mixing->elapsed_time_ms_, frame->elapsed_time_ms_);
+ packet_infos.insert(packet_infos.end(), frame->packet_infos_.begin(),
+ frame->packet_infos_.end());
+ }
+ audio_frame_for_mixing->packet_infos_ =
+ RtpPacketInfos(std::move(packet_infos));
+ }
+}
+
+void MixFewFramesWithNoLimiter(rtc::ArrayView<const AudioFrame* const> mix_list,
+ AudioFrame* audio_frame_for_mixing) {
+ if (mix_list.empty()) {
+ audio_frame_for_mixing->Mute();
+ return;
+ }
+ RTC_DCHECK_LE(mix_list.size(), 1);
+ std::copy(mix_list[0]->data(),
+ mix_list[0]->data() +
+ mix_list[0]->num_channels_ * mix_list[0]->samples_per_channel_,
+ audio_frame_for_mixing->mutable_data());
+}
+
+void MixToFloatFrame(rtc::ArrayView<const AudioFrame* const> mix_list,
+ size_t samples_per_channel,
+ size_t number_of_channels,
+ MixingBuffer* mixing_buffer) {
+ RTC_DCHECK_LE(samples_per_channel, FrameCombiner::kMaximumChannelSize);
+ RTC_DCHECK_LE(number_of_channels, FrameCombiner::kMaximumNumberOfChannels);
+ // Clear the mixing buffer.
+ for (auto& one_channel_buffer : *mixing_buffer) {
+ std::fill(one_channel_buffer.begin(), one_channel_buffer.end(), 0.f);
+ }
+
+ // Convert to FloatS16 and mix.
+ for (size_t i = 0; i < mix_list.size(); ++i) {
+ const AudioFrame* const frame = mix_list[i];
+ const int16_t* const frame_data = frame->data();
+ for (size_t j = 0; j < std::min(number_of_channels,
+ FrameCombiner::kMaximumNumberOfChannels);
+ ++j) {
+ for (size_t k = 0; k < std::min(samples_per_channel,
+ FrameCombiner::kMaximumChannelSize);
+ ++k) {
+ (*mixing_buffer)[j][k] += frame_data[number_of_channels * k + j];
+ }
+ }
+ }
+}
+
+void RunLimiter(AudioFrameView<float> mixing_buffer_view, Limiter* limiter) {
+ const size_t sample_rate = mixing_buffer_view.samples_per_channel() * 1000 /
+ AudioMixerImpl::kFrameDurationInMs;
+ // TODO(alessiob): Avoid calling SetSampleRate every time.
+ limiter->SetSampleRate(sample_rate);
+ limiter->Process(mixing_buffer_view);
+}
+
+// Both interleaves and rounds.
+void InterleaveToAudioFrame(AudioFrameView<const float> mixing_buffer_view,
+ AudioFrame* audio_frame_for_mixing) {
+ const size_t number_of_channels = mixing_buffer_view.num_channels();
+ const size_t samples_per_channel = mixing_buffer_view.samples_per_channel();
+ int16_t* const mixing_data = audio_frame_for_mixing->mutable_data();
+ // Put data in the result frame.
+ for (size_t i = 0; i < number_of_channels; ++i) {
+ for (size_t j = 0; j < samples_per_channel; ++j) {
+ mixing_data[number_of_channels * j + i] =
+ FloatS16ToS16(mixing_buffer_view.channel(i)[j]);
+ }
+ }
+}
+} // namespace
+
+constexpr size_t FrameCombiner::kMaximumNumberOfChannels;
+constexpr size_t FrameCombiner::kMaximumChannelSize;
+
+FrameCombiner::FrameCombiner(bool use_limiter)
+ : data_dumper_(new ApmDataDumper(0)),
+ mixing_buffer_(
+ std::make_unique<std::array<std::array<float, kMaximumChannelSize>,
+ kMaximumNumberOfChannels>>()),
+ limiter_(static_cast<size_t>(48000), data_dumper_.get(), "AudioMixer"),
+ use_limiter_(use_limiter) {
+ static_assert(kMaximumChannelSize * kMaximumNumberOfChannels <=
+ AudioFrame::kMaxDataSizeSamples,
+ "");
+}
+
+FrameCombiner::~FrameCombiner() = default;
+
+void FrameCombiner::Combine(rtc::ArrayView<AudioFrame* const> mix_list,
+ size_t number_of_channels,
+ int sample_rate,
+ size_t number_of_streams,
+ AudioFrame* audio_frame_for_mixing) {
+ RTC_DCHECK(audio_frame_for_mixing);
+
+ LogMixingStats(mix_list, sample_rate, number_of_streams);
+
+ SetAudioFrameFields(mix_list, number_of_channels, sample_rate,
+ number_of_streams, audio_frame_for_mixing);
+
+ const size_t samples_per_channel = static_cast<size_t>(
+ (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
+
+ for (const auto* frame : mix_list) {
+ RTC_DCHECK_EQ(samples_per_channel, frame->samples_per_channel_);
+ RTC_DCHECK_EQ(sample_rate, frame->sample_rate_hz_);
+ }
+
+ // The 'num_channels_' field of frames in 'mix_list' could be
+ // different from 'number_of_channels'.
+ for (auto* frame : mix_list) {
+ RemixFrame(number_of_channels, frame);
+ }
+
+ if (number_of_streams <= 1) {
+ MixFewFramesWithNoLimiter(mix_list, audio_frame_for_mixing);
+ return;
+ }
+
+ MixToFloatFrame(mix_list, samples_per_channel, number_of_channels,
+ mixing_buffer_.get());
+
+ const size_t output_number_of_channels =
+ std::min(number_of_channels, kMaximumNumberOfChannels);
+ const size_t output_samples_per_channel =
+ std::min(samples_per_channel, kMaximumChannelSize);
+
+ // Put float data in an AudioFrameView.
+ std::array<float*, kMaximumNumberOfChannels> channel_pointers{};
+ for (size_t i = 0; i < output_number_of_channels; ++i) {
+ channel_pointers[i] = &(*mixing_buffer_.get())[i][0];
+ }
+ AudioFrameView<float> mixing_buffer_view(&channel_pointers[0],
+ output_number_of_channels,
+ output_samples_per_channel);
+
+ if (use_limiter_) {
+ RunLimiter(mixing_buffer_view, &limiter_);
+ }
+
+ InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing);
+}
+
+void FrameCombiner::LogMixingStats(
+ rtc::ArrayView<const AudioFrame* const> mix_list,
+ int sample_rate,
+ size_t number_of_streams) const {
+ // Log every second.
+ uma_logging_counter_++;
+ if (uma_logging_counter_ > 1000 / AudioMixerImpl::kFrameDurationInMs) {
+ uma_logging_counter_ = 0;
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Audio.AudioMixer.NumIncomingStreams",
+ static_cast<int>(number_of_streams));
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.AudioMixer.NumIncomingActiveStreams2",
+ rtc::dchecked_cast<int>(mix_list.size()), /*min=*/1, /*max=*/16,
+ /*bucket_count=*/16);
+
+ using NativeRate = AudioProcessing::NativeRate;
+ static constexpr NativeRate native_rates[] = {
+ NativeRate::kSampleRate8kHz, NativeRate::kSampleRate16kHz,
+ NativeRate::kSampleRate32kHz, NativeRate::kSampleRate48kHz};
+ const auto* rate_position = std::lower_bound(
+ std::begin(native_rates), std::end(native_rates), sample_rate);
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.AudioMixer.MixingRate",
+ std::distance(std::begin(native_rates), rate_position),
+ arraysize(native_rates));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/frame_combiner.h b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.h
new file mode 100644
index 0000000000..9ddf81e41e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
+#define MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/audio_frame.h"
+#include "modules/audio_processing/agc2/limiter.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+class FrameCombiner {
+ public:
+ enum class LimiterType { kNoLimiter, kApmAgcLimiter, kApmAgc2Limiter };
+ explicit FrameCombiner(bool use_limiter);
+ ~FrameCombiner();
+
+ // Combine several frames into one. Assumes sample_rate,
+ // samples_per_channel of the input frames match the parameters. The
+ // parameters 'number_of_channels' and 'sample_rate' are needed
+ // because 'mix_list' can be empty. The parameter
+ // 'number_of_streams' is used for determining whether to pass the
+ // data through a limiter.
+ void Combine(rtc::ArrayView<AudioFrame* const> mix_list,
+ size_t number_of_channels,
+ int sample_rate,
+ size_t number_of_streams,
+ AudioFrame* audio_frame_for_mixing);
+
+ // Stereo, 48 kHz, 10 ms.
+ static constexpr size_t kMaximumNumberOfChannels = 8;
+ static constexpr size_t kMaximumChannelSize = 48 * 10;
+
+ using MixingBuffer = std::array<std::array<float, kMaximumChannelSize>,
+ kMaximumNumberOfChannels>;
+
+ private:
+ void LogMixingStats(rtc::ArrayView<const AudioFrame* const> mix_list,
+ int sample_rate,
+ size_t number_of_streams) const;
+
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ std::unique_ptr<MixingBuffer> mixing_buffer_;
+ Limiter limiter_;
+ const bool use_limiter_;
+ mutable int uma_logging_counter_ = 0;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc b/third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc
new file mode 100644
index 0000000000..fa1fef325c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/frame_combiner.h"
+
+#include <cstdint>
+#include <initializer_list>
+#include <numeric>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "audio/utility/audio_frame_operations.h"
+#include "modules/audio_mixer/gain_change_calculator.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAreArray;
+
+using LimiterType = FrameCombiner::LimiterType;
+
+struct FrameCombinerConfig {
+ bool use_limiter;
+ int sample_rate_hz;
+ int number_of_channels;
+ float wave_frequency;
+};
+
+std::string ProduceDebugText(int sample_rate_hz,
+ int number_of_channels,
+ int number_of_sources) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz << " ,";
+ ss << "number of channels: " << number_of_channels << " ,";
+ ss << "number of sources: " << number_of_sources;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(const FrameCombinerConfig& config) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << config.sample_rate_hz << " ,";
+ ss << "number of channels: " << config.number_of_channels << " ,";
+ ss << "limiter active: " << (config.use_limiter ? "on" : "off") << " ,";
+ ss << "wave frequency: " << config.wave_frequency << " ,";
+ return ss.Release();
+}
+
+AudioFrame frame1;
+AudioFrame frame2;
+
+void SetUpFrames(int sample_rate_hz, int number_of_channels) {
+ RtpPacketInfo packet_info1(
+ /*ssrc=*/1001, /*csrcs=*/{}, /*rtp_timestamp=*/1000,
+ /*audio_level=*/absl::nullopt, /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time_ms=*/1);
+ RtpPacketInfo packet_info2(
+ /*ssrc=*/4004, /*csrcs=*/{}, /*rtp_timestamp=*/1234,
+ /*audio_level=*/absl::nullopt, /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time_ms=*/2);
+ RtpPacketInfo packet_info3(
+ /*ssrc=*/7007, /*csrcs=*/{}, /*rtp_timestamp=*/1333,
+ /*audio_level=*/absl::nullopt, /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time_ms=*/2);
+
+ frame1.packet_infos_ = RtpPacketInfos({packet_info1});
+ frame2.packet_infos_ = RtpPacketInfos({packet_info2, packet_info3});
+
+ for (auto* frame : {&frame1, &frame2}) {
+ frame->UpdateFrame(0, nullptr, rtc::CheckedDivExact(sample_rate_hz, 100),
+ sample_rate_hz, AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive, number_of_channels);
+ }
+}
+} // namespace
+
+// The limiter requires sample rate divisible by 2000.
+TEST(FrameCombiner, BasicApiCallsLimiter) {
+ FrameCombiner combiner(true);
+ for (const int rate : {8000, 18000, 34000, 48000}) {
+ for (const int number_of_channels : {1, 2, 4, 8}) {
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ for (const int number_of_frames : {0, 1, 2}) {
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ }
+ }
+ }
+}
+
+// The RtpPacketInfos field of the mixed packet should contain the union of the
+// RtpPacketInfos from the frames that were actually mixed.
+TEST(FrameCombiner, ContainsAllRtpPacketInfos) {
+ static constexpr int kSampleRateHz = 48000;
+ static constexpr int kNumChannels = 1;
+ FrameCombiner combiner(true);
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(kSampleRateHz, kNumChannels);
+
+ for (const int number_of_frames : {0, 1, 2}) {
+ SCOPED_TRACE(
+ ProduceDebugText(kSampleRateHz, kNumChannels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+
+ std::vector<RtpPacketInfo> packet_infos;
+ for (const auto& frame : frames_to_combine) {
+ packet_infos.insert(packet_infos.end(), frame->packet_infos_.begin(),
+ frame->packet_infos_.end());
+ }
+
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, kNumChannels, kSampleRateHz,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ EXPECT_THAT(audio_frame_for_mixing.packet_infos_,
+ UnorderedElementsAreArray(packet_infos));
+ }
+}
+
+// There are DCHECKs in place to check for invalid parameters.
+TEST(FrameCombinerDeathTest, DebugBuildCrashesWithManyChannels) {
+ FrameCombiner combiner(true);
+ for (const int rate : {8000, 18000, 34000, 48000}) {
+ for (const int number_of_channels : {10, 20, 21}) {
+ if (static_cast<size_t>(rate / 100 * number_of_channels) >
+ AudioFrame::kMaxDataSizeSamples) {
+ continue;
+ }
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ const int number_of_frames = 2;
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ EXPECT_DEATH(
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing),
+ "");
+#elif !RTC_DCHECK_IS_ON
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+#endif
+ }
+ }
+}
+
+TEST(FrameCombinerDeathTest, DebugBuildCrashesWithHighRate) {
+ FrameCombiner combiner(true);
+ for (const int rate : {50000, 96000, 128000, 196000}) {
+ for (const int number_of_channels : {1, 2, 3}) {
+ if (static_cast<size_t>(rate / 100 * number_of_channels) >
+ AudioFrame::kMaxDataSizeSamples) {
+ continue;
+ }
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ const int number_of_frames = 2;
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ EXPECT_DEATH(
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing),
+ "");
+#elif !RTC_DCHECK_IS_ON
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+#endif
+ }
+ }
+}
+
+// With no limiter, the rate has to be divisible by 100 since we use
+// 10 ms frames.
+TEST(FrameCombiner, BasicApiCallsNoLimiter) {
+ FrameCombiner combiner(false);
+ for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
+ for (const int number_of_channels : {1, 2, 4, 8}) {
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ for (const int number_of_frames : {0, 1, 2}) {
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ }
+ }
+ }
+}
+
+TEST(FrameCombiner, CombiningZeroFramesShouldProduceSilence) {
+ FrameCombiner combiner(false);
+ for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
+ for (const int number_of_channels : {1, 2}) {
+ SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 0));
+
+ AudioFrame audio_frame_for_mixing;
+
+ const std::vector<AudioFrame*> frames_to_combine;
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ const int16_t* audio_frame_for_mixing_data =
+ audio_frame_for_mixing.data();
+ const std::vector<int16_t> mixed_data(
+ audio_frame_for_mixing_data,
+ audio_frame_for_mixing_data + number_of_channels * rate / 100);
+
+ const std::vector<int16_t> expected(number_of_channels * rate / 100, 0);
+ EXPECT_EQ(mixed_data, expected);
+ EXPECT_THAT(audio_frame_for_mixing.packet_infos_, IsEmpty());
+ }
+ }
+}
+
+TEST(FrameCombiner, CombiningOneFrameShouldNotChangeFrame) {
+ FrameCombiner combiner(false);
+ for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
+ for (const int number_of_channels : {1, 2, 4, 8, 10}) {
+ SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1));
+
+ AudioFrame audio_frame_for_mixing;
+
+ SetUpFrames(rate, number_of_channels);
+ int16_t* frame1_data = frame1.mutable_data();
+ std::iota(frame1_data, frame1_data + number_of_channels * rate / 100, 0);
+ const std::vector<AudioFrame*> frames_to_combine = {&frame1};
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+
+ const int16_t* audio_frame_for_mixing_data =
+ audio_frame_for_mixing.data();
+ const std::vector<int16_t> mixed_data(
+ audio_frame_for_mixing_data,
+ audio_frame_for_mixing_data + number_of_channels * rate / 100);
+
+ std::vector<int16_t> expected(number_of_channels * rate / 100);
+ std::iota(expected.begin(), expected.end(), 0);
+ EXPECT_EQ(mixed_data, expected);
+ EXPECT_THAT(audio_frame_for_mixing.packet_infos_,
+ ElementsAreArray(frame1.packet_infos_));
+ }
+ }
+}
+
+// Send a sine wave through the FrameCombiner, and check that the
+// difference between input and output varies smoothly. Also check
+// that it is inside reasonable bounds. This is to catch issues like
+// chromium:695993 and chromium:816875.
+TEST(FrameCombiner, GainCurveIsSmoothForAlternatingNumberOfStreams) {
+ // Rates are divisible by 2000 when limiter is active.
+ std::vector<FrameCombinerConfig> configs = {
+ {false, 30100, 2, 50.f}, {false, 16500, 1, 3200.f},
+ {true, 8000, 1, 3200.f}, {true, 16000, 1, 50.f},
+ {true, 18000, 8, 3200.f}, {true, 10000, 2, 50.f},
+ };
+
+ for (const auto& config : configs) {
+ SCOPED_TRACE(ProduceDebugText(config));
+
+ FrameCombiner combiner(config.use_limiter);
+
+ constexpr int16_t wave_amplitude = 30000;
+ SineWaveGenerator wave_generator(config.wave_frequency, wave_amplitude);
+
+ GainChangeCalculator change_calculator;
+ float cumulative_change = 0.f;
+
+ constexpr size_t iterations = 100;
+
+ for (size_t i = 0; i < iterations; ++i) {
+ SetUpFrames(config.sample_rate_hz, config.number_of_channels);
+ wave_generator.GenerateNextFrame(&frame1);
+ AudioFrameOperations::Mute(&frame2);
+
+ std::vector<AudioFrame*> frames_to_combine = {&frame1};
+ if (i % 2 == 0) {
+ frames_to_combine.push_back(&frame2);
+ }
+ const size_t number_of_samples =
+ frame1.samples_per_channel_ * config.number_of_channels;
+
+ // Ensures limiter is on if 'use_limiter'.
+ constexpr size_t number_of_streams = 2;
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, config.number_of_channels,
+ config.sample_rate_hz, number_of_streams,
+ &audio_frame_for_mixing);
+ cumulative_change += change_calculator.CalculateGainChange(
+ rtc::ArrayView<const int16_t>(frame1.data(), number_of_samples),
+ rtc::ArrayView<const int16_t>(audio_frame_for_mixing.data(),
+ number_of_samples));
+ }
+
+ // Check that the gain doesn't vary too much.
+ EXPECT_LT(cumulative_change, 10);
+
+ // Check that the latest gain is within reasonable bounds. It
+ // should be slightly less that 1.
+ EXPECT_LT(0.9f, change_calculator.LatestGain());
+ EXPECT_LT(change_calculator.LatestGain(), 1.01f);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/g3doc/index.md b/third_party/libwebrtc/modules/audio_mixer/g3doc/index.md
new file mode 100644
index 0000000000..4ced289bf8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/g3doc/index.md
@@ -0,0 +1,54 @@
+<?% config.freshness.owner = 'alessiob' %?>
+<?% config.freshness.reviewed = '2021-04-21' %?>
+
+# The WebRTC Audio Mixer Module
+
+The WebRTC audio mixer module is responsible for mixing multiple incoming audio
+streams (sources) into a single audio stream (mix). It works with 10 ms frames,
+it supports sample rates up to 48 kHz and up to 8 audio channels. The API is
+defined in
+[`api/audio/audio_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/audio/audio_mixer.h)
+and it includes the definition of
+[`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h),
+which describes an incoming audio stream, and the definition of
+[`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h),
+which operates on a collection of
+[`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+objects to produce a mix.
+
+## AudioMixer::Source
+
+A source has different characteristic (e.g., sample rate, number of channels,
+muted state) and it is identified by an SSRC[^1].
+[`AudioMixer::Source::GetAudioFrameWithInfo()`](https://source.chromium.org/search?q=symbol:AudioMixer::Source::GetAudioFrameWithInfo%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+is used to retrieve the next 10 ms chunk of audio to be mixed.
+
+[^1]: A synchronization source (SSRC) is the source of a stream of RTP packets,
+ identified by a 32-bit numeric SSRC identifier carried in the RTP header
+ so as not to be dependent upon the network address (see
+ [RFC 3550](https://tools.ietf.org/html/rfc3550#section-3)).
+
+## AudioMixer
+
+The interface allows to add and remove sources and the
+[`AudioMixer::Mix()`](https://source.chromium.org/search?q=symbol:AudioMixer::Mix%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+method allows to generates a mix with the desired number of channels.
+
+## WebRTC implementation
+
+The interface is implemented in different parts of WebRTC:
+
+* [`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h):
+ [`audio/audio_receive_stream.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/audio_receive_stream.h)
+* [`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h):
+ [`modules/audio_mixer/audio_mixer_impl.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_mixer/audio_mixer_impl.h)
+
+[`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+is thread-safe. The output sample rate of the generated mix is automatically
+assigned depending on the sample rate of the sources; whereas the number of
+output channels is defined by the caller[^2]. Samples from the non-muted sources
+are summed up and then a limiter is used to apply soft-clipping when needed.
+
+[^2]: [`audio/utility/channel_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/utility/channel_mixer.h)
+ is used to mix channels in the non-trivial cases - i.e., if the number of
+ channels for a source or the mix is greater than 3.
diff --git a/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc
new file mode 100644
index 0000000000..dbd0945239
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/gain_change_calculator.h"
+
+#include <math.h>
+
+#include <cstdlib>
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int16_t kReliabilityThreshold = 100;
+} // namespace
+
+float GainChangeCalculator::CalculateGainChange(
+ rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out) {
+ RTC_DCHECK_EQ(in.size(), out.size());
+
+ std::vector<float> gain(in.size());
+ CalculateGain(in, out, gain);
+ return CalculateDifferences(gain);
+}
+
+float GainChangeCalculator::LatestGain() const {
+ return last_reliable_gain_;
+}
+
+void GainChangeCalculator::CalculateGain(rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out,
+ rtc::ArrayView<float> gain) {
+ RTC_DCHECK_EQ(in.size(), out.size());
+ RTC_DCHECK_EQ(in.size(), gain.size());
+
+ for (size_t i = 0; i < in.size(); ++i) {
+ if (std::abs(in[i]) >= kReliabilityThreshold) {
+ last_reliable_gain_ = out[i] / static_cast<float>(in[i]);
+ }
+ gain[i] = last_reliable_gain_;
+ }
+}
+
+float GainChangeCalculator::CalculateDifferences(
+ rtc::ArrayView<const float> values) {
+ float res = 0;
+ for (float f : values) {
+ res += fabs(f - last_value_);
+ last_value_ = f;
+ }
+ return res;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h
new file mode 100644
index 0000000000..3dde9be61e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
+#define MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+class GainChangeCalculator {
+ public:
+ // The 'out' signal is assumed to be produced from 'in' by applying
+ // a smoothly varying gain. This method computes variations of the
+ // gain and handles special cases when the samples are small.
+ float CalculateGainChange(rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out);
+
+ float LatestGain() const;
+
+ private:
+ void CalculateGain(rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out,
+ rtc::ArrayView<float> gain);
+
+ float CalculateDifferences(rtc::ArrayView<const float> values);
+ float last_value_ = 0.f;
+ float last_reliable_gain_ = 1.0f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h b/third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h
new file mode 100644
index 0000000000..46b65a8b57
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
+#define MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Decides the sample rate of a mixing iteration given the preferred
+// sample rates of the sources.
+class OutputRateCalculator {
+ public:
+ virtual int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) = 0;
+
+ virtual ~OutputRateCalculator() {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc
new file mode 100644
index 0000000000..591fe14e8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/sine_wave_generator.h"
+
+#include <math.h>
+#include <stddef.h>
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+constexpr float kPi = 3.14159265f;
+} // namespace
+
+void SineWaveGenerator::GenerateNextFrame(AudioFrame* frame) {
+ RTC_DCHECK(frame);
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_; ++i) {
+ for (size_t ch = 0; ch < frame->num_channels_; ++ch) {
+ frame_data[frame->num_channels_ * i + ch] =
+ rtc::saturated_cast<int16_t>(amplitude_ * sinf(phase_));
+ }
+ phase_ += wave_frequency_hz_ * 2 * kPi / frame->sample_rate_hz_;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h
new file mode 100644
index 0000000000..ec0fcd24bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
+#define MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
+
+#include <stdint.h>
+
+#include "api/audio/audio_frame.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class SineWaveGenerator {
+ public:
+ SineWaveGenerator(float wave_frequency_hz, int16_t amplitude)
+ : wave_frequency_hz_(wave_frequency_hz), amplitude_(amplitude) {
+ RTC_DCHECK_GT(wave_frequency_hz, 0);
+ }
+
+ // Produces appropriate output based on frame->num_channels_,
+ // frame->sample_rate_hz_.
+ void GenerateNextFrame(AudioFrame* frame);
+
+ private:
+ float phase_ = 0.f;
+ const float wave_frequency_hz_;
+ const int16_t amplitude_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/BUILD.gn
new file mode 100644
index 0000000000..525707a5d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/BUILD.gn
@@ -0,0 +1,664 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+if (rtc_enable_protobuf) {
+ import("//third_party/protobuf/proto_library.gni")
+}
+
+config("apm_debug_dump") {
+ if (apm_debug_dump) {
+ defines = [ "WEBRTC_APM_DEBUG_DUMP=1" ]
+ } else {
+ defines = [ "WEBRTC_APM_DEBUG_DUMP=0" ]
+ }
+}
+
+rtc_library("api") {
+ visibility = [ "*" ]
+ sources = [
+ "include/audio_processing.cc",
+ "include/audio_processing.h",
+ ]
+ deps = [
+ ":audio_frame_view",
+ ":audio_processing_statistics",
+ "../../api:array_view",
+ "../../api:scoped_refptr",
+ "../../api/audio:aec3_config",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio:echo_control",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:file_wrapper",
+ "../../rtc_base/system:rtc_export",
+ "agc:gain_control_interface",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("audio_frame_proxies") {
+ visibility = [ "*" ]
+ sources = [
+ "include/audio_frame_proxies.cc",
+ "include/audio_frame_proxies.h",
+ ]
+ deps = [
+ ":api",
+ ":audio_frame_view",
+ "../../api/audio:audio_frame_api",
+ ]
+}
+
+rtc_library("audio_buffer") {
+ visibility = [ "*" ]
+
+ configs += [ ":apm_debug_dump" ]
+
+ sources = [
+ "audio_buffer.cc",
+ "audio_buffer.h",
+ "splitting_filter.cc",
+ "splitting_filter.h",
+ "three_band_filter_bank.cc",
+ "three_band_filter_bank.h",
+ ]
+
+ defines = []
+
+ deps = [
+ ":api",
+ "../../api:array_view",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base:checks",
+ ]
+}
+
+rtc_library("high_pass_filter") {
+ visibility = [ "*" ]
+
+ sources = [
+ "high_pass_filter.cc",
+ "high_pass_filter.h",
+ ]
+
+ defines = []
+
+ deps = [
+ ":audio_buffer",
+ "../../api:array_view",
+ "../../rtc_base:checks",
+ "utility:cascaded_biquad_filter",
+ ]
+}
+
+rtc_source_set("aec_dump_interface") {
+ visibility = [ "*" ]
+ sources = [
+ "include/aec_dump.cc",
+ "include/aec_dump.h",
+ ]
+
+ deps = [
+ ":api",
+ ":audio_frame_view",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
+}
+
+rtc_library("gain_controller2") {
+ configs += [ ":apm_debug_dump" ]
+ sources = [
+ "gain_controller2.cc",
+ "gain_controller2.h",
+ ]
+ defines = []
+ deps = [
+ ":aec_dump_interface",
+ ":api",
+ ":apm_logging",
+ ":audio_buffer",
+ ":audio_frame_view",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:stringutils",
+ "../../system_wrappers:field_trial",
+ "agc2:adaptive_digital",
+ "agc2:cpu_features",
+ "agc2:fixed_digital",
+ "agc2:gain_applier",
+ "agc2:vad_wrapper",
+ ]
+}
+
+rtc_library("audio_processing") {
+ visibility = [ "*" ]
+ configs += [ ":apm_debug_dump" ]
+ sources = [
+ "audio_processing_builder_impl.cc",
+ "audio_processing_impl.cc",
+ "audio_processing_impl.h",
+ "echo_control_mobile_impl.cc",
+ "echo_control_mobile_impl.h",
+ "gain_control_impl.cc",
+ "gain_control_impl.h",
+ "render_queue_item_verifier.h",
+ ]
+
+ defines = []
+ deps = [
+ ":aec_dump_interface",
+ ":api",
+ ":apm_logging",
+ ":audio_buffer",
+ ":audio_frame_proxies",
+ ":audio_frame_view",
+ ":audio_processing_statistics",
+ ":gain_controller2",
+ ":high_pass_filter",
+ ":optionally_built_submodule_creators",
+ ":rms_level",
+ "../../api:array_view",
+ "../../api:function_view",
+ "../../api:make_ref_counted",
+ "../../api/audio:aec3_config",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio:echo_control",
+ "../../audio/utility:audio_frame_operations",
+ "../../common_audio:common_audio_c",
+ "../../common_audio/third_party/ooura:fft_size_256",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:gtest_prod",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:sanitizer",
+ "../../rtc_base:swap_queue",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:rtc_export",
+ "../../system_wrappers",
+ "../../system_wrappers:denormal_disabler",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "aec3",
+ "aec_dump:aec_dump",
+ "aecm:aecm_core",
+ "agc",
+ "agc:analog_gain_stats_reporter",
+ "agc:gain_control_interface",
+ "agc:legacy_agc",
+ "capture_levels_adjuster",
+ "ns",
+ "transient:transient_suppressor_api",
+ "vad",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ deps += [
+ "../../common_audio",
+ "../../common_audio:fir_filter",
+ "../../common_audio:fir_filter_factory",
+ "../../system_wrappers",
+ ]
+
+ if (rtc_enable_protobuf) {
+ deps += [ "aec_dump:aec_dump_impl" ]
+ } else {
+ deps += [ "aec_dump:null_aec_dump_factory" ]
+ }
+}
+
+rtc_library("residual_echo_detector") {
+ poisonous = [ "default_echo_detector" ]
+ configs += [ ":apm_debug_dump" ]
+ sources = [
+ "echo_detector/circular_buffer.cc",
+ "echo_detector/circular_buffer.h",
+ "echo_detector/mean_variance_estimator.cc",
+ "echo_detector/mean_variance_estimator.h",
+ "echo_detector/moving_max.cc",
+ "echo_detector/moving_max.h",
+ "echo_detector/normalized_covariance_estimator.cc",
+ "echo_detector/normalized_covariance_estimator.h",
+ "residual_echo_detector.cc",
+ "residual_echo_detector.h",
+ ]
+ deps = [
+ ":api",
+ ":apm_logging",
+ "../../api:array_view",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("optionally_built_submodule_creators") {
+ sources = [
+ "optionally_built_submodule_creators.cc",
+ "optionally_built_submodule_creators.h",
+ ]
+ deps = [
+ "transient:transient_suppressor_api",
+ "transient:transient_suppressor_impl",
+ ]
+}
+
+rtc_source_set("rms_level") {
+ visibility = [ "*" ]
+ sources = [
+ "rms_level.cc",
+ "rms_level.h",
+ ]
+ deps = [
+ "../../api:array_view",
+ "../../rtc_base:checks",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("audio_processing_statistics") {
+ visibility = [ "*" ]
+ sources = [
+ "include/audio_processing_statistics.cc",
+ "include/audio_processing_statistics.h",
+ ]
+ deps = [ "../../rtc_base/system:rtc_export" ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_source_set("audio_frame_view") {
+ sources = [ "include/audio_frame_view.h" ]
+ deps = [ "../../api:array_view" ]
+}
+
+if (rtc_enable_protobuf) {
+ proto_library("audioproc_debug_proto") {
+ sources = [ "debug.proto" ]
+
+ proto_out_dir = "modules/audio_processing"
+ }
+}
+
+rtc_library("apm_logging") {
+ configs += [ ":apm_debug_dump" ]
+ sources = [
+ "logging/apm_data_dumper.cc",
+ "logging/apm_data_dumper.h",
+ ]
+ deps = [
+ "../../api:array_view",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ defines = []
+}
+
+if (rtc_include_tests) {
+ rtc_source_set("mocks") {
+ testonly = true
+ sources = [ "include/mock_audio_processing.h" ]
+ deps = [
+ ":aec_dump_interface",
+ ":api",
+ ":audio_buffer",
+ ":audio_processing",
+ ":audio_processing_statistics",
+ "../../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ }
+
+ if (!build_with_chromium) {
+ group("audio_processing_tests") {
+ testonly = true
+ deps = [
+ ":audioproc_test_utils",
+ "transient:click_annotate",
+ "transient:transient_suppression_test",
+ ]
+
+ if (rtc_enable_protobuf) {
+ deps += [
+ ":audioproc_unittest_proto",
+ "aec_dump:aec_dump_unittests",
+ "test/conversational_speech",
+ "test/py_quality_assessment",
+ ]
+ }
+ }
+
+ rtc_library("audio_processing_unittests") {
+ testonly = true
+
+ configs += [ ":apm_debug_dump" ]
+ sources = [
+ "audio_buffer_unittest.cc",
+ "audio_frame_view_unittest.cc",
+ "echo_control_mobile_unittest.cc",
+ "gain_controller2_unittest.cc",
+ "splitting_filter_unittest.cc",
+ "test/fake_recording_device_unittest.cc",
+ ]
+
+ deps = [
+ ":analog_mic_simulation",
+ ":api",
+ ":apm_logging",
+ ":audio_buffer",
+ ":audio_frame_view",
+ ":audio_processing",
+ ":audioproc_test_utils",
+ ":gain_controller2",
+ ":high_pass_filter",
+ ":mocks",
+ "../../api:array_view",
+ "../../api:make_ref_counted",
+ "../../api:scoped_refptr",
+ "../../api/audio:aec3_config",
+ "../../api/audio:aec3_factory",
+ "../../api/audio:echo_detector_creator",
+ "../../common_audio",
+ "../../common_audio:common_audio_c",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:gtest_prod",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:protobuf_utils",
+ "../../rtc_base:random",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:swap_queue",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:threading",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:file_wrapper",
+ "../../system_wrappers",
+ "../../system_wrappers:denormal_disabler",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:rtc_expect_death",
+ "../../test:test_support",
+ "../audio_coding:neteq_input_audio_tools",
+ "aec_dump:mock_aec_dump_unittests",
+ "agc:agc_unittests",
+ "agc2:adaptive_digital_unittests",
+ "agc2:biquad_filter_unittests",
+ "agc2:fixed_digital_unittests",
+ "agc2:noise_estimator_unittests",
+ "agc2:test_utils",
+ "agc2:vad_wrapper_unittests",
+ "agc2/rnn_vad:unittests",
+ "capture_levels_adjuster",
+ "capture_levels_adjuster:capture_levels_adjuster_unittests",
+ "test/conversational_speech:unittest",
+ "transient:transient_suppression_unittests",
+ "utility:legacy_delay_estimator_unittest",
+ "utility:pffft_wrapper_unittest",
+ "vad:vad_unittests",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ defines = []
+
+ if (rtc_prefer_fixed_point) {
+ defines += [ "WEBRTC_AUDIOPROC_FIXED_PROFILE" ]
+ } else {
+ defines += [ "WEBRTC_AUDIOPROC_FLOAT_PROFILE" ]
+ }
+
+ if (rtc_enable_protobuf) {
+ defines += [ "WEBRTC_AUDIOPROC_DEBUG_DUMP" ]
+ deps += [
+ ":audioproc_debug_proto",
+ ":audioproc_protobuf_utils",
+ ":audioproc_test_utils",
+ ":audioproc_unittest_proto",
+ ":optionally_built_submodule_creators",
+ ":residual_echo_detector",
+ ":rms_level",
+ ":runtime_settings_protobuf_utils",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio:echo_control",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_task_queue",
+ "aec_dump",
+ "aec_dump:aec_dump_unittests",
+ ]
+ absl_deps += [ "//third_party/abseil-cpp/absl/flags:flag" ]
+ sources += [
+ "audio_processing_impl_locking_unittest.cc",
+ "audio_processing_impl_unittest.cc",
+ "audio_processing_unittest.cc",
+ "echo_control_mobile_bit_exact_unittest.cc",
+ "echo_detector/circular_buffer_unittest.cc",
+ "echo_detector/mean_variance_estimator_unittest.cc",
+ "echo_detector/moving_max_unittest.cc",
+ "echo_detector/normalized_covariance_estimator_unittest.cc",
+ "gain_control_unittest.cc",
+ "high_pass_filter_unittest.cc",
+ "residual_echo_detector_unittest.cc",
+ "rms_level_unittest.cc",
+ "test/debug_dump_replayer.cc",
+ "test/debug_dump_replayer.h",
+ "test/debug_dump_test.cc",
+ "test/echo_canceller_test_tools.cc",
+ "test/echo_canceller_test_tools.h",
+ "test/echo_canceller_test_tools_unittest.cc",
+ "test/echo_control_mock.h",
+ "test/test_utils.h",
+ ]
+ }
+ }
+ }
+
+ rtc_library("audio_processing_perf_tests") {
+ testonly = true
+ configs += [ ":apm_debug_dump" ]
+
+ sources = [ "audio_processing_performance_unittest.cc" ]
+ deps = [
+ ":audio_processing",
+ ":audioproc_test_utils",
+ "../../api:array_view",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:protobuf_utils",
+ "../../rtc_base:random",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:safe_conversions",
+ "../../system_wrappers",
+ "../../test:perf_test",
+ "../../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ }
+
+ rtc_library("analog_mic_simulation") {
+ sources = [
+ "test/fake_recording_device.cc",
+ "test/fake_recording_device.h",
+ ]
+ deps = [
+ "../../api:array_view",
+ "../../api/audio:audio_frame_api",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:safe_minmax",
+ "agc:gain_map",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ }
+
+ if (rtc_enable_protobuf && !build_with_chromium) {
+ rtc_library("audioproc_f_impl") {
+ testonly = true
+ configs += [ ":apm_debug_dump" ]
+ sources = [
+ "test/aec_dump_based_simulator.cc",
+ "test/aec_dump_based_simulator.h",
+ "test/api_call_statistics.cc",
+ "test/api_call_statistics.h",
+ "test/audio_processing_simulator.cc",
+ "test/audio_processing_simulator.h",
+ "test/audioproc_float_impl.cc",
+ "test/audioproc_float_impl.h",
+ "test/wav_based_simulator.cc",
+ "test/wav_based_simulator.h",
+ ]
+
+ deps = [
+ ":analog_mic_simulation",
+ ":api",
+ ":apm_logging",
+ ":audio_processing",
+ ":audioproc_debug_proto",
+ ":audioproc_protobuf_utils",
+ ":audioproc_test_utils",
+ ":runtime_settings_protobuf_utils",
+ "../../api/audio:aec3_config_json",
+ "../../api/audio:aec3_factory",
+ "../../api/audio:echo_detector_creator",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:logging",
+ "../../rtc_base:protobuf_utils",
+ "../../rtc_base:rtc_json",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/system:file_wrapper",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../test:test_support",
+ "aec_dump",
+ "aec_dump:aec_dump_impl",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ } # audioproc_f_impl
+ }
+
+ if (rtc_enable_protobuf) {
+ proto_library("audioproc_unittest_proto") {
+ sources = [ "test/unittest.proto" ]
+ proto_out_dir = "modules/audio_processing/test"
+ }
+
+ rtc_library("audioproc_protobuf_utils") {
+ sources = [
+ "test/protobuf_utils.cc",
+ "test/protobuf_utils.h",
+ ]
+
+ deps = [
+ ":audioproc_debug_proto",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../rtc_base:protobuf_utils",
+ "../../rtc_base/system:arch",
+ ]
+ }
+
+ rtc_library("runtime_settings_protobuf_utils") {
+ testonly = true
+ sources = [
+ "test/runtime_setting_util.cc",
+ "test/runtime_setting_util.h",
+ ]
+
+ deps = [
+ ":api",
+ ":audioproc_debug_proto",
+ ":audioproc_protobuf_utils",
+ "../../rtc_base:checks",
+ ]
+ }
+ }
+}
+
+if (rtc_include_tests) {
+rtc_library("audioproc_test_utils") {
+ visibility = [ "*" ]
+ testonly = true
+ sources = [
+ "test/audio_buffer_tools.cc",
+ "test/audio_buffer_tools.h",
+ "test/audio_processing_builder_for_testing.cc",
+ "test/audio_processing_builder_for_testing.h",
+ "test/bitexactness_tools.cc",
+ "test/bitexactness_tools.h",
+ "test/performance_timer.cc",
+ "test/performance_timer.h",
+ "test/simulator_buffers.cc",
+ "test/simulator_buffers.h",
+ "test/test_utils.cc",
+ "test/test_utils.h",
+ ]
+
+ configs += [ ":apm_debug_dump" ]
+
+ deps = [
+ ":api",
+ ":audio_buffer",
+ ":audio_processing",
+ "../../api:array_view",
+ "../../api/audio:audio_frame_api",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:random",
+ "../../rtc_base/system:arch",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "../audio_coding:neteq_input_audio_tools",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/DEPS b/third_party/libwebrtc/modules/audio_processing/DEPS
new file mode 100644
index 0000000000..79fd071785
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/DEPS
@@ -0,0 +1,14 @@
+include_rules = [
+ "+audio/utility/audio_frame_operations.h",
+ "+common_audio",
+ "+system_wrappers",
+]
+
+specific_include_rules = {
+ ".*test\.cc": [
+ "+rtc_tools",
+ # Android platform build has different paths.
+ "+gtest",
+ "+external/webrtc",
+ ],
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/OWNERS b/third_party/libwebrtc/modules/audio_processing/OWNERS
new file mode 100644
index 0000000000..ca9bc46323
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/OWNERS
@@ -0,0 +1,8 @@
+aleloi@webrtc.org
+alessiob@webrtc.org
+gustaf@webrtc.org
+henrik.lundin@webrtc.org
+ivoc@webrtc.org
+minyue@webrtc.org
+peah@webrtc.org
+saza@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/aec3/BUILD.gn
new file mode 100644
index 0000000000..7937f77160
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/BUILD.gn
@@ -0,0 +1,383 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("aec3") {
+ visibility = [ "*" ]
+ configs += [ "..:apm_debug_dump" ]
+ sources = [
+ "adaptive_fir_filter.cc",
+ "adaptive_fir_filter_erl.cc",
+ "aec3_common.cc",
+ "aec3_fft.cc",
+ "aec_state.cc",
+ "aec_state.h",
+ "alignment_mixer.cc",
+ "alignment_mixer.h",
+ "api_call_jitter_metrics.cc",
+ "api_call_jitter_metrics.h",
+ "block.h",
+ "block_buffer.cc",
+ "block_delay_buffer.cc",
+ "block_delay_buffer.h",
+ "block_framer.cc",
+ "block_framer.h",
+ "block_processor.cc",
+ "block_processor.h",
+ "block_processor_metrics.cc",
+ "block_processor_metrics.h",
+ "clockdrift_detector.cc",
+ "clockdrift_detector.h",
+ "coarse_filter_update_gain.cc",
+ "coarse_filter_update_gain.h",
+ "comfort_noise_generator.cc",
+ "comfort_noise_generator.h",
+ "config_selector.cc",
+ "config_selector.h",
+ "decimator.cc",
+ "decimator.h",
+ "delay_estimate.h",
+ "dominant_nearend_detector.cc",
+ "dominant_nearend_detector.h",
+ "downsampled_render_buffer.cc",
+ "downsampled_render_buffer.h",
+ "echo_audibility.cc",
+ "echo_audibility.h",
+ "echo_canceller3.cc",
+ "echo_canceller3.h",
+ "echo_path_delay_estimator.cc",
+ "echo_path_delay_estimator.h",
+ "echo_path_variability.cc",
+ "echo_path_variability.h",
+ "echo_remover.cc",
+ "echo_remover.h",
+ "echo_remover_metrics.cc",
+ "echo_remover_metrics.h",
+ "erl_estimator.cc",
+ "erl_estimator.h",
+ "erle_estimator.cc",
+ "erle_estimator.h",
+ "fft_buffer.cc",
+ "filter_analyzer.cc",
+ "filter_analyzer.h",
+ "frame_blocker.cc",
+ "frame_blocker.h",
+ "fullband_erle_estimator.cc",
+ "fullband_erle_estimator.h",
+ "matched_filter.cc",
+ "matched_filter_lag_aggregator.cc",
+ "matched_filter_lag_aggregator.h",
+ "moving_average.cc",
+ "moving_average.h",
+ "multi_channel_content_detector.cc",
+ "multi_channel_content_detector.h",
+ "nearend_detector.h",
+ "refined_filter_update_gain.cc",
+ "refined_filter_update_gain.h",
+ "render_buffer.cc",
+ "render_delay_buffer.cc",
+ "render_delay_buffer.h",
+ "render_delay_controller.cc",
+ "render_delay_controller.h",
+ "render_delay_controller_metrics.cc",
+ "render_delay_controller_metrics.h",
+ "render_signal_analyzer.cc",
+ "render_signal_analyzer.h",
+ "residual_echo_estimator.cc",
+ "residual_echo_estimator.h",
+ "reverb_decay_estimator.cc",
+ "reverb_decay_estimator.h",
+ "reverb_frequency_response.cc",
+ "reverb_frequency_response.h",
+ "reverb_model.cc",
+ "reverb_model.h",
+ "reverb_model_estimator.cc",
+ "reverb_model_estimator.h",
+ "signal_dependent_erle_estimator.cc",
+ "signal_dependent_erle_estimator.h",
+ "spectrum_buffer.cc",
+ "stationarity_estimator.cc",
+ "stationarity_estimator.h",
+ "subband_erle_estimator.cc",
+ "subband_erle_estimator.h",
+ "subband_nearend_detector.cc",
+ "subband_nearend_detector.h",
+ "subtractor.cc",
+ "subtractor.h",
+ "subtractor_output.cc",
+ "subtractor_output.h",
+ "subtractor_output_analyzer.cc",
+ "subtractor_output_analyzer.h",
+ "suppression_filter.cc",
+ "suppression_filter.h",
+ "suppression_gain.cc",
+ "suppression_gain.h",
+ "transparent_mode.cc",
+ "transparent_mode.h",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && target_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":adaptive_fir_filter",
+ ":adaptive_fir_filter_erl",
+ ":aec3_common",
+ ":aec3_fft",
+ ":fft_data",
+ ":matched_filter",
+ ":render_buffer",
+ ":vector_math",
+ "..:apm_logging",
+ "..:audio_buffer",
+ "..:high_pass_filter",
+ "../../../api:array_view",
+ "../../../api/audio:aec3_config",
+ "../../../api/audio:echo_control",
+ "../../../common_audio:common_audio_c",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:race_checker",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base:swap_queue",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../rtc_base/system:arch",
+ "../../../system_wrappers",
+ "../../../system_wrappers:field_trial",
+ "../../../system_wrappers:metrics",
+ "../utility:cascaded_biquad_filter",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (target_cpu == "x86" || target_cpu == "x64") {
+ deps += [ ":aec3_avx2" ]
+ }
+}
+
+rtc_source_set("aec3_common") {
+ sources = [ "aec3_common.h" ]
+}
+
+rtc_source_set("aec3_fft") {
+ sources = [ "aec3_fft.h" ]
+ deps = [
+ ":aec3_common",
+ ":fft_data",
+ "../../../api:array_view",
+ "../../../common_audio/third_party/ooura:fft_size_128",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/system:arch",
+ ]
+}
+
+rtc_source_set("render_buffer") {
+ sources = [
+ "block.h",
+ "block_buffer.h",
+ "fft_buffer.h",
+ "render_buffer.h",
+ "spectrum_buffer.h",
+ ]
+ deps = [
+ ":aec3_common",
+ ":fft_data",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/system:arch",
+ ]
+}
+
+rtc_source_set("adaptive_fir_filter") {
+ sources = [ "adaptive_fir_filter.h" ]
+ deps = [
+ ":aec3_common",
+ ":aec3_fft",
+ ":fft_data",
+ ":render_buffer",
+ "..:apm_logging",
+ "../../../api:array_view",
+ "../../../rtc_base/system:arch",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+rtc_source_set("adaptive_fir_filter_erl") {
+ sources = [ "adaptive_fir_filter_erl.h" ]
+ deps = [
+ ":aec3_common",
+ "../../../api:array_view",
+ "../../../rtc_base/system:arch",
+ ]
+}
+
+rtc_source_set("matched_filter") {
+ sources = [ "matched_filter.h" ]
+ deps = [
+ ":aec3_common",
+ "../../../api:array_view",
+ "../../../rtc_base/system:arch",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_source_set("vector_math") {
+ sources = [ "vector_math.h" ]
+ deps = [
+ ":aec3_common",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/system:arch",
+ ]
+}
+
+rtc_source_set("fft_data") {
+ sources = [ "fft_data.h" ]
+ deps = [
+ ":aec3_common",
+ "../../../api:array_view",
+ "../../../rtc_base/system:arch",
+ ]
+}
+
+if (target_cpu == "x86" || target_cpu == "x64") {
+ rtc_library("aec3_avx2") {
+ configs += [ "..:apm_debug_dump" ]
+ sources = [
+ "adaptive_fir_filter_avx2.cc",
+ "adaptive_fir_filter_erl_avx2.cc",
+ "fft_data_avx2.cc",
+ "matched_filter_avx2.cc",
+ "vector_math_avx2.cc",
+ ]
+
+ cflags = [
+ "-mavx",
+ "-mavx2",
+ "-mfma",
+ ]
+
+ deps = [
+ ":adaptive_fir_filter",
+ ":adaptive_fir_filter_erl",
+ ":fft_data",
+ ":matched_filter",
+ ":vector_math",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ ]
+ }
+}
+
+if (rtc_include_tests) {
+ rtc_library("aec3_unittests") {
+ testonly = true
+
+ configs += [ "..:apm_debug_dump" ]
+ sources = [
+ "mock/mock_block_processor.cc",
+ "mock/mock_block_processor.h",
+ "mock/mock_echo_remover.cc",
+ "mock/mock_echo_remover.h",
+ "mock/mock_render_delay_buffer.cc",
+ "mock/mock_render_delay_buffer.h",
+ "mock/mock_render_delay_controller.cc",
+ "mock/mock_render_delay_controller.h",
+ ]
+
+ deps = [
+ ":adaptive_fir_filter",
+ ":adaptive_fir_filter_erl",
+ ":aec3",
+ ":aec3_common",
+ ":aec3_fft",
+ ":fft_data",
+ ":matched_filter",
+ ":render_buffer",
+ ":vector_math",
+ "..:apm_logging",
+ "..:audio_buffer",
+ "..:audio_processing",
+ "..:high_pass_filter",
+ "../../../api:array_view",
+ "../../../api/audio:aec3_config",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:random",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base:stringutils",
+ "../../../rtc_base/system:arch",
+ "../../../system_wrappers",
+ "../../../system_wrappers:metrics",
+ "../../../test:field_trial",
+ "../../../test:test_support",
+ "../utility:cascaded_biquad_filter",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+
+ defines = []
+
+ if (rtc_enable_protobuf) {
+ sources += [
+ "adaptive_fir_filter_erl_unittest.cc",
+ "adaptive_fir_filter_unittest.cc",
+ "aec3_fft_unittest.cc",
+ "aec_state_unittest.cc",
+ "alignment_mixer_unittest.cc",
+ "api_call_jitter_metrics_unittest.cc",
+ "block_delay_buffer_unittest.cc",
+ "block_framer_unittest.cc",
+ "block_processor_metrics_unittest.cc",
+ "block_processor_unittest.cc",
+ "clockdrift_detector_unittest.cc",
+ "coarse_filter_update_gain_unittest.cc",
+ "comfort_noise_generator_unittest.cc",
+ "config_selector_unittest.cc",
+ "decimator_unittest.cc",
+ "echo_canceller3_unittest.cc",
+ "echo_path_delay_estimator_unittest.cc",
+ "echo_path_variability_unittest.cc",
+ "echo_remover_metrics_unittest.cc",
+ "echo_remover_unittest.cc",
+ "erl_estimator_unittest.cc",
+ "erle_estimator_unittest.cc",
+ "fft_data_unittest.cc",
+ "filter_analyzer_unittest.cc",
+ "frame_blocker_unittest.cc",
+ "matched_filter_lag_aggregator_unittest.cc",
+ "matched_filter_unittest.cc",
+ "moving_average_unittest.cc",
+ "multi_channel_content_detector_unittest.cc",
+ "refined_filter_update_gain_unittest.cc",
+ "render_buffer_unittest.cc",
+ "render_delay_buffer_unittest.cc",
+ "render_delay_controller_metrics_unittest.cc",
+ "render_delay_controller_unittest.cc",
+ "render_signal_analyzer_unittest.cc",
+ "residual_echo_estimator_unittest.cc",
+ "reverb_model_estimator_unittest.cc",
+ "signal_dependent_erle_estimator_unittest.cc",
+ "subtractor_unittest.cc",
+ "suppression_filter_unittest.cc",
+ "suppression_gain_unittest.cc",
+ "vector_math_unittest.cc",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ deps += [ "..:audio_processing_unittests" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.cc b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.cc
new file mode 100644
index 0000000000..917aa951ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.cc
@@ -0,0 +1,744 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <math.h>
+
+#include <algorithm>
+#include <functional>
+
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace aec3 {
+
+// Computes and stores the frequency response of the filter.
+void ComputeFrequencyResponse(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+ for (auto& H2_ch : *H2) {
+ H2_ch.fill(0.f);
+ }
+
+ const size_t num_render_channels = H[0].size();
+ RTC_DCHECK_EQ(H.size(), H2->capacity());
+ for (size_t p = 0; p < num_partitions; ++p) {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) {
+ float tmp =
+ H[p][ch].re[j] * H[p][ch].re[j] + H[p][ch].im[j] * H[p][ch].im[j];
+ (*H2)[p][j] = std::max((*H2)[p][j], tmp);
+ }
+ }
+ }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Computes and stores the frequency response of the filter.
+void ComputeFrequencyResponse_Neon(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+ for (auto& H2_ch : *H2) {
+ H2_ch.fill(0.f);
+ }
+
+ const size_t num_render_channels = H[0].size();
+ RTC_DCHECK_EQ(H.size(), H2->capacity());
+ for (size_t p = 0; p < num_partitions; ++p) {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+ auto& H2_p = (*H2)[p];
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ for (size_t j = 0; j < kFftLengthBy2; j += 4) {
+ const float32x4_t re = vld1q_f32(&H_p_ch.re[j]);
+ const float32x4_t im = vld1q_f32(&H_p_ch.im[j]);
+ float32x4_t H2_new = vmulq_f32(re, re);
+ H2_new = vmlaq_f32(H2_new, im, im);
+ float32x4_t H2_p_j = vld1q_f32(&H2_p[j]);
+ H2_p_j = vmaxq_f32(H2_p_j, H2_new);
+ vst1q_f32(&H2_p[j], H2_p_j);
+ }
+ float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] +
+ H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+ H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new);
+ }
+ }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Computes and stores the frequency response of the filter.
+void ComputeFrequencyResponse_Sse2(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+ for (auto& H2_ch : *H2) {
+ H2_ch.fill(0.f);
+ }
+
+ const size_t num_render_channels = H[0].size();
+ RTC_DCHECK_EQ(H.size(), H2->capacity());
+ // constexpr __mmmask8 kMaxMask = static_cast<__mmmask8>(256u);
+ for (size_t p = 0; p < num_partitions; ++p) {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+ auto& H2_p = (*H2)[p];
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ for (size_t j = 0; j < kFftLengthBy2; j += 4) {
+ const __m128 re = _mm_loadu_ps(&H_p_ch.re[j]);
+ const __m128 re2 = _mm_mul_ps(re, re);
+ const __m128 im = _mm_loadu_ps(&H_p_ch.im[j]);
+ const __m128 im2 = _mm_mul_ps(im, im);
+ const __m128 H2_new = _mm_add_ps(re2, im2);
+ __m128 H2_k_j = _mm_loadu_ps(&H2_p[j]);
+ H2_k_j = _mm_max_ps(H2_k_j, H2_new);
+ _mm_storeu_ps(&H2_p[j], H2_k_j);
+ }
+ float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] +
+ H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+ H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new);
+ }
+ }
+}
+#endif
+
+// Adapts the filter partitions as H(t+1)=H(t)+G(t)*conj(X(t)).
+void AdaptPartitions(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H) {
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ size_t index = render_buffer.Position();
+ const size_t num_render_channels = render_buffer_data[index].size();
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& X_p_ch = render_buffer_data[index][ch];
+ FftData& H_p_ch = (*H)[p][ch];
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ H_p_ch.re[k] += X_p_ch.re[k] * G.re[k] + X_p_ch.im[k] * G.im[k];
+ H_p_ch.im[k] += X_p_ch.re[k] * G.im[k] - X_p_ch.im[k] * G.re[k];
+ }
+ }
+ index = index < (render_buffer_data.size() - 1) ? index + 1 : 0;
+ }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Adapts the filter partitions. (Neon variant)
+void AdaptPartitions_Neon(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H) {
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ const size_t num_render_channels = render_buffer_data[0].size();
+ const size_t lim1 = std::min(
+ render_buffer_data.size() - render_buffer.Position(), num_partitions);
+ const size_t lim2 = num_partitions;
+ constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4;
+
+ size_t X_partition = render_buffer.Position();
+ size_t limit = lim1;
+ size_t p = 0;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ FftData& H_p_ch = (*H)[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+ const float32x4_t G_re = vld1q_f32(&G.re[k]);
+ const float32x4_t G_im = vld1q_f32(&G.im[k]);
+ const float32x4_t X_re = vld1q_f32(&X.re[k]);
+ const float32x4_t X_im = vld1q_f32(&X.im[k]);
+ const float32x4_t H_re = vld1q_f32(&H_p_ch.re[k]);
+ const float32x4_t H_im = vld1q_f32(&H_p_ch.im[k]);
+ const float32x4_t a = vmulq_f32(X_re, G_re);
+ const float32x4_t e = vmlaq_f32(a, X_im, G_im);
+ const float32x4_t c = vmulq_f32(X_re, G_im);
+ const float32x4_t f = vmlsq_f32(c, X_im, G_re);
+ const float32x4_t g = vaddq_f32(H_re, e);
+ const float32x4_t h = vaddq_f32(H_im, f);
+ vst1q_f32(&H_p_ch.re[k], g);
+ vst1q_f32(&H_p_ch.im[k], h);
+ }
+ }
+ }
+
+ X_partition = 0;
+ limit = lim2;
+ } while (p < lim2);
+
+ X_partition = render_buffer.Position();
+ limit = lim1;
+ p = 0;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ FftData& H_p_ch = (*H)[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+
+ H_p_ch.re[kFftLengthBy2] += X.re[kFftLengthBy2] * G.re[kFftLengthBy2] +
+ X.im[kFftLengthBy2] * G.im[kFftLengthBy2];
+ H_p_ch.im[kFftLengthBy2] += X.re[kFftLengthBy2] * G.im[kFftLengthBy2] -
+ X.im[kFftLengthBy2] * G.re[kFftLengthBy2];
+ }
+ }
+ X_partition = 0;
+ limit = lim2;
+ } while (p < lim2);
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Adapts the filter partitions. (SSE2 variant)
+void AdaptPartitions_Sse2(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H) {
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ const size_t num_render_channels = render_buffer_data[0].size();
+ const size_t lim1 = std::min(
+ render_buffer_data.size() - render_buffer.Position(), num_partitions);
+ const size_t lim2 = num_partitions;
+ constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4;
+
+ size_t X_partition = render_buffer.Position();
+ size_t limit = lim1;
+ size_t p = 0;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ FftData& H_p_ch = (*H)[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+
+ for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+ const __m128 G_re = _mm_loadu_ps(&G.re[k]);
+ const __m128 G_im = _mm_loadu_ps(&G.im[k]);
+ const __m128 X_re = _mm_loadu_ps(&X.re[k]);
+ const __m128 X_im = _mm_loadu_ps(&X.im[k]);
+ const __m128 H_re = _mm_loadu_ps(&H_p_ch.re[k]);
+ const __m128 H_im = _mm_loadu_ps(&H_p_ch.im[k]);
+ const __m128 a = _mm_mul_ps(X_re, G_re);
+ const __m128 b = _mm_mul_ps(X_im, G_im);
+ const __m128 c = _mm_mul_ps(X_re, G_im);
+ const __m128 d = _mm_mul_ps(X_im, G_re);
+ const __m128 e = _mm_add_ps(a, b);
+ const __m128 f = _mm_sub_ps(c, d);
+ const __m128 g = _mm_add_ps(H_re, e);
+ const __m128 h = _mm_add_ps(H_im, f);
+ _mm_storeu_ps(&H_p_ch.re[k], g);
+ _mm_storeu_ps(&H_p_ch.im[k], h);
+ }
+ }
+ }
+ X_partition = 0;
+ limit = lim2;
+ } while (p < lim2);
+
+ X_partition = render_buffer.Position();
+ limit = lim1;
+ p = 0;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ FftData& H_p_ch = (*H)[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+
+ H_p_ch.re[kFftLengthBy2] += X.re[kFftLengthBy2] * G.re[kFftLengthBy2] +
+ X.im[kFftLengthBy2] * G.im[kFftLengthBy2];
+ H_p_ch.im[kFftLengthBy2] += X.re[kFftLengthBy2] * G.im[kFftLengthBy2] -
+ X.im[kFftLengthBy2] * G.re[kFftLengthBy2];
+ }
+ }
+
+ X_partition = 0;
+ limit = lim2;
+ } while (p < lim2);
+}
+#endif
+
+// Produces the filter output.
+void ApplyFilter(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S) {
+ S->re.fill(0.f);
+ S->im.fill(0.f);
+
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ size_t index = render_buffer.Position();
+ const size_t num_render_channels = render_buffer_data[index].size();
+ for (size_t p = 0; p < num_partitions; ++p) {
+ RTC_DCHECK_EQ(num_render_channels, H[p].size());
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& X_p_ch = render_buffer_data[index][ch];
+ const FftData& H_p_ch = H[p][ch];
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ S->re[k] += X_p_ch.re[k] * H_p_ch.re[k] - X_p_ch.im[k] * H_p_ch.im[k];
+ S->im[k] += X_p_ch.re[k] * H_p_ch.im[k] + X_p_ch.im[k] * H_p_ch.re[k];
+ }
+ }
+ index = index < (render_buffer_data.size() - 1) ? index + 1 : 0;
+ }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Produces the filter output (Neon variant).
+void ApplyFilter_Neon(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S) {
+ // const RenderBuffer& render_buffer,
+ // rtc::ArrayView<const FftData> H,
+ // FftData* S) {
+ RTC_DCHECK_GE(H.size(), H.size() - 1);
+ S->Clear();
+
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ const size_t num_render_channels = render_buffer_data[0].size();
+ const size_t lim1 = std::min(
+ render_buffer_data.size() - render_buffer.Position(), num_partitions);
+ const size_t lim2 = num_partitions;
+ constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4;
+
+ size_t X_partition = render_buffer.Position();
+ size_t p = 0;
+ size_t limit = lim1;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+ const float32x4_t X_re = vld1q_f32(&X.re[k]);
+ const float32x4_t X_im = vld1q_f32(&X.im[k]);
+ const float32x4_t H_re = vld1q_f32(&H_p_ch.re[k]);
+ const float32x4_t H_im = vld1q_f32(&H_p_ch.im[k]);
+ const float32x4_t S_re = vld1q_f32(&S->re[k]);
+ const float32x4_t S_im = vld1q_f32(&S->im[k]);
+ const float32x4_t a = vmulq_f32(X_re, H_re);
+ const float32x4_t e = vmlsq_f32(a, X_im, H_im);
+ const float32x4_t c = vmulq_f32(X_re, H_im);
+ const float32x4_t f = vmlaq_f32(c, X_im, H_re);
+ const float32x4_t g = vaddq_f32(S_re, e);
+ const float32x4_t h = vaddq_f32(S_im, f);
+ vst1q_f32(&S->re[k], g);
+ vst1q_f32(&S->im[k], h);
+ }
+ }
+ }
+ limit = lim2;
+ X_partition = 0;
+ } while (p < lim2);
+
+ X_partition = render_buffer.Position();
+ p = 0;
+ limit = lim1;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ S->re[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] -
+ X.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+ S->im[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2] +
+ X.im[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2];
+ }
+ }
+ limit = lim2;
+ X_partition = 0;
+ } while (p < lim2);
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Produces the filter output (SSE2 variant).
+void ApplyFilter_Sse2(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S) {
+ // const RenderBuffer& render_buffer,
+ // rtc::ArrayView<const FftData> H,
+ // FftData* S) {
+ RTC_DCHECK_GE(H.size(), H.size() - 1);
+ S->re.fill(0.f);
+ S->im.fill(0.f);
+
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ const size_t num_render_channels = render_buffer_data[0].size();
+ const size_t lim1 = std::min(
+ render_buffer_data.size() - render_buffer.Position(), num_partitions);
+ const size_t lim2 = num_partitions;
+ constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4;
+
+ size_t X_partition = render_buffer.Position();
+ size_t p = 0;
+ size_t limit = lim1;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+ const __m128 X_re = _mm_loadu_ps(&X.re[k]);
+ const __m128 X_im = _mm_loadu_ps(&X.im[k]);
+ const __m128 H_re = _mm_loadu_ps(&H_p_ch.re[k]);
+ const __m128 H_im = _mm_loadu_ps(&H_p_ch.im[k]);
+ const __m128 S_re = _mm_loadu_ps(&S->re[k]);
+ const __m128 S_im = _mm_loadu_ps(&S->im[k]);
+ const __m128 a = _mm_mul_ps(X_re, H_re);
+ const __m128 b = _mm_mul_ps(X_im, H_im);
+ const __m128 c = _mm_mul_ps(X_re, H_im);
+ const __m128 d = _mm_mul_ps(X_im, H_re);
+ const __m128 e = _mm_sub_ps(a, b);
+ const __m128 f = _mm_add_ps(c, d);
+ const __m128 g = _mm_add_ps(S_re, e);
+ const __m128 h = _mm_add_ps(S_im, f);
+ _mm_storeu_ps(&S->re[k], g);
+ _mm_storeu_ps(&S->im[k], h);
+ }
+ }
+ }
+ limit = lim2;
+ X_partition = 0;
+ } while (p < lim2);
+
+ X_partition = render_buffer.Position();
+ p = 0;
+ limit = lim1;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ S->re[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] -
+ X.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+ S->im[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2] +
+ X.im[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2];
+ }
+ }
+ limit = lim2;
+ X_partition = 0;
+ } while (p < lim2);
+}
+#endif
+
+} // namespace aec3
+
+namespace {
+
+// Ensures that the newly added filter partitions after a size increase are set
+// to zero.
+void ZeroFilter(size_t old_size,
+ size_t new_size,
+ std::vector<std::vector<FftData>>* H) {
+ RTC_DCHECK_GE(H->size(), old_size);
+ RTC_DCHECK_GE(H->size(), new_size);
+
+ for (size_t p = old_size; p < new_size; ++p) {
+ RTC_DCHECK_EQ((*H)[p].size(), (*H)[0].size());
+ for (size_t ch = 0; ch < (*H)[0].size(); ++ch) {
+ (*H)[p][ch].Clear();
+ }
+ }
+}
+
+} // namespace
+
+AdaptiveFirFilter::AdaptiveFirFilter(size_t max_size_partitions,
+ size_t initial_size_partitions,
+ size_t size_change_duration_blocks,
+ size_t num_render_channels,
+ Aec3Optimization optimization,
+ ApmDataDumper* data_dumper)
+ : data_dumper_(data_dumper),
+ fft_(),
+ optimization_(optimization),
+ num_render_channels_(num_render_channels),
+ max_size_partitions_(max_size_partitions),
+ size_change_duration_blocks_(
+ static_cast<int>(size_change_duration_blocks)),
+ current_size_partitions_(initial_size_partitions),
+ target_size_partitions_(initial_size_partitions),
+ old_target_size_partitions_(initial_size_partitions),
+ H_(max_size_partitions_, std::vector<FftData>(num_render_channels_)) {
+ RTC_DCHECK(data_dumper_);
+ RTC_DCHECK_GE(max_size_partitions, initial_size_partitions);
+
+ RTC_DCHECK_LT(0, size_change_duration_blocks_);
+ one_by_size_change_duration_blocks_ = 1.f / size_change_duration_blocks_;
+
+ ZeroFilter(0, max_size_partitions_, &H_);
+
+ SetSizePartitions(current_size_partitions_, true);
+}
+
+AdaptiveFirFilter::~AdaptiveFirFilter() = default;
+
+void AdaptiveFirFilter::HandleEchoPathChange() {
+ // TODO(peah): Check the value and purpose of the code below.
+ ZeroFilter(current_size_partitions_, max_size_partitions_, &H_);
+}
+
+void AdaptiveFirFilter::SetSizePartitions(size_t size, bool immediate_effect) {
+ RTC_DCHECK_EQ(max_size_partitions_, H_.capacity());
+ RTC_DCHECK_LE(size, max_size_partitions_);
+
+ target_size_partitions_ = std::min(max_size_partitions_, size);
+ if (immediate_effect) {
+ size_t old_size_partitions_ = current_size_partitions_;
+ current_size_partitions_ = old_target_size_partitions_ =
+ target_size_partitions_;
+ ZeroFilter(old_size_partitions_, current_size_partitions_, &H_);
+
+ partition_to_constrain_ =
+ std::min(partition_to_constrain_, current_size_partitions_ - 1);
+ size_change_counter_ = 0;
+ } else {
+ size_change_counter_ = size_change_duration_blocks_;
+ }
+}
+
+void AdaptiveFirFilter::UpdateSize() {
+ RTC_DCHECK_GE(size_change_duration_blocks_, size_change_counter_);
+ size_t old_size_partitions_ = current_size_partitions_;
+ if (size_change_counter_ > 0) {
+ --size_change_counter_;
+
+ auto average = [](float from, float to, float from_weight) {
+ return from * from_weight + to * (1.f - from_weight);
+ };
+
+ float change_factor =
+ size_change_counter_ * one_by_size_change_duration_blocks_;
+
+ current_size_partitions_ = average(old_target_size_partitions_,
+ target_size_partitions_, change_factor);
+
+ partition_to_constrain_ =
+ std::min(partition_to_constrain_, current_size_partitions_ - 1);
+ } else {
+ current_size_partitions_ = old_target_size_partitions_ =
+ target_size_partitions_;
+ }
+ ZeroFilter(old_size_partitions_, current_size_partitions_, &H_);
+ RTC_DCHECK_LE(0, size_change_counter_);
+}
+
+void AdaptiveFirFilter::Filter(const RenderBuffer& render_buffer,
+ FftData* S) const {
+ RTC_DCHECK(S);
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2:
+ aec3::ApplyFilter_Sse2(render_buffer, current_size_partitions_, H_, S);
+ break;
+ case Aec3Optimization::kAvx2:
+ aec3::ApplyFilter_Avx2(render_buffer, current_size_partitions_, H_, S);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon:
+ aec3::ApplyFilter_Neon(render_buffer, current_size_partitions_, H_, S);
+ break;
+#endif
+ default:
+ aec3::ApplyFilter(render_buffer, current_size_partitions_, H_, S);
+ }
+}
+
+void AdaptiveFirFilter::Adapt(const RenderBuffer& render_buffer,
+ const FftData& G) {
+ // Adapt the filter and update the filter size.
+ AdaptAndUpdateSize(render_buffer, G);
+
+ // Constrain the filter partitions in a cyclic manner.
+ Constrain();
+}
+
+void AdaptiveFirFilter::Adapt(const RenderBuffer& render_buffer,
+ const FftData& G,
+ std::vector<float>* impulse_response) {
+ // Adapt the filter and update the filter size.
+ AdaptAndUpdateSize(render_buffer, G);
+
+ // Constrain the filter partitions in a cyclic manner.
+ ConstrainAndUpdateImpulseResponse(impulse_response);
+}
+
+void AdaptiveFirFilter::ComputeFrequencyResponse(
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) const {
+ RTC_DCHECK_GE(max_size_partitions_, H2->capacity());
+
+ H2->resize(current_size_partitions_);
+
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2:
+ aec3::ComputeFrequencyResponse_Sse2(current_size_partitions_, H_, H2);
+ break;
+ case Aec3Optimization::kAvx2:
+ aec3::ComputeFrequencyResponse_Avx2(current_size_partitions_, H_, H2);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon:
+ aec3::ComputeFrequencyResponse_Neon(current_size_partitions_, H_, H2);
+ break;
+#endif
+ default:
+ aec3::ComputeFrequencyResponse(current_size_partitions_, H_, H2);
+ }
+}
+
+void AdaptiveFirFilter::AdaptAndUpdateSize(const RenderBuffer& render_buffer,
+ const FftData& G) {
+ // Update the filter size if needed.
+ UpdateSize();
+
+ // Adapt the filter.
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2:
+ aec3::AdaptPartitions_Sse2(render_buffer, G, current_size_partitions_,
+ &H_);
+ break;
+ case Aec3Optimization::kAvx2:
+ aec3::AdaptPartitions_Avx2(render_buffer, G, current_size_partitions_,
+ &H_);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon:
+ aec3::AdaptPartitions_Neon(render_buffer, G, current_size_partitions_,
+ &H_);
+ break;
+#endif
+ default:
+ aec3::AdaptPartitions(render_buffer, G, current_size_partitions_, &H_);
+ }
+}
+
+// Constrains the partition of the frequency domain filter to be limited in
+// time via setting the relevant time-domain coefficients to zero and updates
+// the corresponding values in an externally stored impulse response estimate.
+void AdaptiveFirFilter::ConstrainAndUpdateImpulseResponse(
+ std::vector<float>* impulse_response) {
+ RTC_DCHECK_EQ(GetTimeDomainLength(max_size_partitions_),
+ impulse_response->capacity());
+ impulse_response->resize(GetTimeDomainLength(current_size_partitions_));
+ std::array<float, kFftLength> h;
+ impulse_response->resize(GetTimeDomainLength(current_size_partitions_));
+ std::fill(
+ impulse_response->begin() + partition_to_constrain_ * kFftLengthBy2,
+ impulse_response->begin() + (partition_to_constrain_ + 1) * kFftLengthBy2,
+ 0.f);
+
+ for (size_t ch = 0; ch < num_render_channels_; ++ch) {
+ fft_.Ifft(H_[partition_to_constrain_][ch], &h);
+
+ static constexpr float kScale = 1.0f / kFftLengthBy2;
+ std::for_each(h.begin(), h.begin() + kFftLengthBy2,
+ [](float& a) { a *= kScale; });
+ std::fill(h.begin() + kFftLengthBy2, h.end(), 0.f);
+
+ if (ch == 0) {
+ std::copy(
+ h.begin(), h.begin() + kFftLengthBy2,
+ impulse_response->begin() + partition_to_constrain_ * kFftLengthBy2);
+ } else {
+ for (size_t k = 0, j = partition_to_constrain_ * kFftLengthBy2;
+ k < kFftLengthBy2; ++k, ++j) {
+ if (fabsf((*impulse_response)[j]) < fabsf(h[k])) {
+ (*impulse_response)[j] = h[k];
+ }
+ }
+ }
+
+ fft_.Fft(&h, &H_[partition_to_constrain_][ch]);
+ }
+
+ partition_to_constrain_ =
+ partition_to_constrain_ < (current_size_partitions_ - 1)
+ ? partition_to_constrain_ + 1
+ : 0;
+}
+
+// Constrains the a partiton of the frequency domain filter to be limited in
+// time via setting the relevant time-domain coefficients to zero.
+void AdaptiveFirFilter::Constrain() {
+ std::array<float, kFftLength> h;
+ for (size_t ch = 0; ch < num_render_channels_; ++ch) {
+ fft_.Ifft(H_[partition_to_constrain_][ch], &h);
+
+ static constexpr float kScale = 1.0f / kFftLengthBy2;
+ std::for_each(h.begin(), h.begin() + kFftLengthBy2,
+ [](float& a) { a *= kScale; });
+ std::fill(h.begin() + kFftLengthBy2, h.end(), 0.f);
+
+ fft_.Fft(&h, &H_[partition_to_constrain_][ch]);
+ }
+
+ partition_to_constrain_ =
+ partition_to_constrain_ < (current_size_partitions_ - 1)
+ ? partition_to_constrain_ + 1
+ : 0;
+}
+
+void AdaptiveFirFilter::ScaleFilter(float factor) {
+ for (auto& H_p : H_) {
+ for (auto& H_p_ch : H_p) {
+ for (auto& re : H_p_ch.re) {
+ re *= factor;
+ }
+ for (auto& im : H_p_ch.im) {
+ im *= factor;
+ }
+ }
+ }
+}
+
+// Set the filter coefficients.
+void AdaptiveFirFilter::SetFilter(size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H) {
+ const size_t min_num_partitions =
+ std::min(current_size_partitions_, num_partitions);
+ for (size_t p = 0; p < min_num_partitions; ++p) {
+ RTC_DCHECK_EQ(H_[p].size(), H[p].size());
+ RTC_DCHECK_EQ(num_render_channels_, H_[p].size());
+
+ for (size_t ch = 0; ch < num_render_channels_; ++ch) {
+ std::copy(H[p][ch].re.begin(), H[p][ch].re.end(), H_[p][ch].re.begin());
+ std::copy(H[p][ch].im.begin(), H[p][ch].im.end(), H_[p][ch].im.begin());
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.h b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.h
new file mode 100644
index 0000000000..34c06f4367
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace aec3 {
+// Computes and stores the frequency response of the filter.
+void ComputeFrequencyResponse(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+#if defined(WEBRTC_HAS_NEON)
+void ComputeFrequencyResponse_Neon(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void ComputeFrequencyResponse_Sse2(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+
+void ComputeFrequencyResponse_Avx2(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+#endif
+
+// Adapts the filter partitions.
+void AdaptPartitions(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H);
+#if defined(WEBRTC_HAS_NEON)
+void AdaptPartitions_Neon(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void AdaptPartitions_Sse2(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H);
+
+void AdaptPartitions_Avx2(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H);
+#endif
+
+// Produces the filter output.
+void ApplyFilter(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S);
+#if defined(WEBRTC_HAS_NEON)
+void ApplyFilter_Neon(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void ApplyFilter_Sse2(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S);
+
+void ApplyFilter_Avx2(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S);
+#endif
+
+} // namespace aec3
+
+// Provides a frequency domain adaptive filter functionality.
+class AdaptiveFirFilter {
+ public:
+ AdaptiveFirFilter(size_t max_size_partitions,
+ size_t initial_size_partitions,
+ size_t size_change_duration_blocks,
+ size_t num_render_channels,
+ Aec3Optimization optimization,
+ ApmDataDumper* data_dumper);
+
+ ~AdaptiveFirFilter();
+
+ AdaptiveFirFilter(const AdaptiveFirFilter&) = delete;
+ AdaptiveFirFilter& operator=(const AdaptiveFirFilter&) = delete;
+
+ // Produces the output of the filter.
+ void Filter(const RenderBuffer& render_buffer, FftData* S) const;
+
+ // Adapts the filter and updates an externally stored impulse response
+ // estimate.
+ void Adapt(const RenderBuffer& render_buffer,
+ const FftData& G,
+ std::vector<float>* impulse_response);
+
+ // Adapts the filter.
+ void Adapt(const RenderBuffer& render_buffer, const FftData& G);
+
+ // Receives reports that known echo path changes have occured and adjusts
+ // the filter adaptation accordingly.
+ void HandleEchoPathChange();
+
+ // Returns the filter size.
+ size_t SizePartitions() const { return current_size_partitions_; }
+
+ // Sets the filter size.
+ void SetSizePartitions(size_t size, bool immediate_effect);
+
+ // Computes the frequency responses for the filter partitions.
+ void ComputeFrequencyResponse(
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) const;
+
+ // Returns the maximum number of partitions for the filter.
+ size_t max_filter_size_partitions() const { return max_size_partitions_; }
+
+ void DumpFilter(absl::string_view name_frequency_domain) {
+ for (size_t p = 0; p < max_size_partitions_; ++p) {
+ data_dumper_->DumpRaw(name_frequency_domain, H_[p][0].re);
+ data_dumper_->DumpRaw(name_frequency_domain, H_[p][0].im);
+ }
+ }
+
+ // Scale the filter impulse response and spectrum by a factor.
+ void ScaleFilter(float factor);
+
+ // Set the filter coefficients.
+ void SetFilter(size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H);
+
+ // Gets the filter coefficients.
+ const std::vector<std::vector<FftData>>& GetFilter() const { return H_; }
+
+ private:
+ // Adapts the filter and updates the filter size.
+ void AdaptAndUpdateSize(const RenderBuffer& render_buffer, const FftData& G);
+
+ // Constrain the filter partitions in a cyclic manner.
+ void Constrain();
+ // Constrains the filter in a cyclic manner and updates the corresponding
+ // values in the supplied impulse response.
+ void ConstrainAndUpdateImpulseResponse(std::vector<float>* impulse_response);
+
+ // Gradually Updates the current filter size towards the target size.
+ void UpdateSize();
+
+ ApmDataDumper* const data_dumper_;
+ const Aec3Fft fft_;
+ const Aec3Optimization optimization_;
+ const size_t num_render_channels_;
+ const size_t max_size_partitions_;
+ const int size_change_duration_blocks_;
+ float one_by_size_change_duration_blocks_;
+ size_t current_size_partitions_;
+ size_t target_size_partitions_;
+ size_t old_target_size_partitions_;
+ int size_change_counter_ = 0;
+ std::vector<std::vector<FftData>> H_;
+ size_t partition_to_constrain_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc
new file mode 100644
index 0000000000..44d4514275
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+
+#include "common_audio/intrin.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace aec3 {
+
+// Computes and stores the frequency response of the filter.
+void ComputeFrequencyResponse_Avx2(
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+ for (auto& H2_ch : *H2) {
+ H2_ch.fill(0.f);
+ }
+
+ const size_t num_render_channels = H[0].size();
+ RTC_DCHECK_EQ(H.size(), H2->capacity());
+ for (size_t p = 0; p < num_partitions; ++p) {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+ auto& H2_p = (*H2)[p];
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ for (size_t j = 0; j < kFftLengthBy2; j += 8) {
+ __m256 re = _mm256_loadu_ps(&H_p_ch.re[j]);
+ __m256 re2 = _mm256_mul_ps(re, re);
+ __m256 im = _mm256_loadu_ps(&H_p_ch.im[j]);
+ re2 = _mm256_fmadd_ps(im, im, re2);
+ __m256 H2_k_j = _mm256_loadu_ps(&H2_p[j]);
+ H2_k_j = _mm256_max_ps(H2_k_j, re2);
+ _mm256_storeu_ps(&H2_p[j], H2_k_j);
+ }
+ float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] +
+ H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+ H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new);
+ }
+ }
+}
+
+// Adapts the filter partitions.
+void AdaptPartitions_Avx2(const RenderBuffer& render_buffer,
+ const FftData& G,
+ size_t num_partitions,
+ std::vector<std::vector<FftData>>* H) {
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ const size_t num_render_channels = render_buffer_data[0].size();
+ const size_t lim1 = std::min(
+ render_buffer_data.size() - render_buffer.Position(), num_partitions);
+ const size_t lim2 = num_partitions;
+ constexpr size_t kNumEightBinBands = kFftLengthBy2 / 8;
+
+ size_t X_partition = render_buffer.Position();
+ size_t limit = lim1;
+ size_t p = 0;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ FftData& H_p_ch = (*H)[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+
+ for (size_t k = 0, n = 0; n < kNumEightBinBands; ++n, k += 8) {
+ const __m256 G_re = _mm256_loadu_ps(&G.re[k]);
+ const __m256 G_im = _mm256_loadu_ps(&G.im[k]);
+ const __m256 X_re = _mm256_loadu_ps(&X.re[k]);
+ const __m256 X_im = _mm256_loadu_ps(&X.im[k]);
+ const __m256 H_re = _mm256_loadu_ps(&H_p_ch.re[k]);
+ const __m256 H_im = _mm256_loadu_ps(&H_p_ch.im[k]);
+ const __m256 a = _mm256_mul_ps(X_re, G_re);
+ const __m256 b = _mm256_mul_ps(X_im, G_im);
+ const __m256 c = _mm256_mul_ps(X_re, G_im);
+ const __m256 d = _mm256_mul_ps(X_im, G_re);
+ const __m256 e = _mm256_add_ps(a, b);
+ const __m256 f = _mm256_sub_ps(c, d);
+ const __m256 g = _mm256_add_ps(H_re, e);
+ const __m256 h = _mm256_add_ps(H_im, f);
+ _mm256_storeu_ps(&H_p_ch.re[k], g);
+ _mm256_storeu_ps(&H_p_ch.im[k], h);
+ }
+ }
+ }
+ X_partition = 0;
+ limit = lim2;
+ } while (p < lim2);
+
+ X_partition = render_buffer.Position();
+ limit = lim1;
+ p = 0;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ FftData& H_p_ch = (*H)[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+
+ H_p_ch.re[kFftLengthBy2] += X.re[kFftLengthBy2] * G.re[kFftLengthBy2] +
+ X.im[kFftLengthBy2] * G.im[kFftLengthBy2];
+ H_p_ch.im[kFftLengthBy2] += X.re[kFftLengthBy2] * G.im[kFftLengthBy2] -
+ X.im[kFftLengthBy2] * G.re[kFftLengthBy2];
+ }
+ }
+
+ X_partition = 0;
+ limit = lim2;
+ } while (p < lim2);
+}
+
+// Produces the filter output (AVX2 variant).
+void ApplyFilter_Avx2(const RenderBuffer& render_buffer,
+ size_t num_partitions,
+ const std::vector<std::vector<FftData>>& H,
+ FftData* S) {
+ RTC_DCHECK_GE(H.size(), H.size() - 1);
+ S->re.fill(0.f);
+ S->im.fill(0.f);
+
+ rtc::ArrayView<const std::vector<FftData>> render_buffer_data =
+ render_buffer.GetFftBuffer();
+ const size_t num_render_channels = render_buffer_data[0].size();
+ const size_t lim1 = std::min(
+ render_buffer_data.size() - render_buffer.Position(), num_partitions);
+ const size_t lim2 = num_partitions;
+ constexpr size_t kNumEightBinBands = kFftLengthBy2 / 8;
+
+ size_t X_partition = render_buffer.Position();
+ size_t p = 0;
+ size_t limit = lim1;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ for (size_t k = 0, n = 0; n < kNumEightBinBands; ++n, k += 8) {
+ const __m256 X_re = _mm256_loadu_ps(&X.re[k]);
+ const __m256 X_im = _mm256_loadu_ps(&X.im[k]);
+ const __m256 H_re = _mm256_loadu_ps(&H_p_ch.re[k]);
+ const __m256 H_im = _mm256_loadu_ps(&H_p_ch.im[k]);
+ const __m256 S_re = _mm256_loadu_ps(&S->re[k]);
+ const __m256 S_im = _mm256_loadu_ps(&S->im[k]);
+ const __m256 a = _mm256_mul_ps(X_re, H_re);
+ const __m256 b = _mm256_mul_ps(X_im, H_im);
+ const __m256 c = _mm256_mul_ps(X_re, H_im);
+ const __m256 d = _mm256_mul_ps(X_im, H_re);
+ const __m256 e = _mm256_sub_ps(a, b);
+ const __m256 f = _mm256_add_ps(c, d);
+ const __m256 g = _mm256_add_ps(S_re, e);
+ const __m256 h = _mm256_add_ps(S_im, f);
+ _mm256_storeu_ps(&S->re[k], g);
+ _mm256_storeu_ps(&S->im[k], h);
+ }
+ }
+ }
+ limit = lim2;
+ X_partition = 0;
+ } while (p < lim2);
+
+ X_partition = render_buffer.Position();
+ p = 0;
+ limit = lim1;
+ do {
+ for (; p < limit; ++p, ++X_partition) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const FftData& H_p_ch = H[p][ch];
+ const FftData& X = render_buffer_data[X_partition][ch];
+ S->re[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] -
+ X.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+ S->im[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2] +
+ X.im[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2];
+ }
+ }
+ limit = lim2;
+ X_partition = 0;
+ } while (p < lim2);
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.cc b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.cc
new file mode 100644
index 0000000000..45b8813979
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h"
+
+#include <algorithm>
+#include <functional>
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+
+namespace webrtc {
+
+namespace aec3 {
+
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void ErlComputer(const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl) {
+ std::fill(erl.begin(), erl.end(), 0.f);
+ for (auto& H2_j : H2) {
+ std::transform(H2_j.begin(), H2_j.end(), erl.begin(), erl.begin(),
+ std::plus<float>());
+ }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void ErlComputer_NEON(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl) {
+ std::fill(erl.begin(), erl.end(), 0.f);
+ for (auto& H2_j : H2) {
+ for (size_t k = 0; k < kFftLengthBy2; k += 4) {
+ const float32x4_t H2_j_k = vld1q_f32(&H2_j[k]);
+ float32x4_t erl_k = vld1q_f32(&erl[k]);
+ erl_k = vaddq_f32(erl_k, H2_j_k);
+ vst1q_f32(&erl[k], erl_k);
+ }
+ erl[kFftLengthBy2] += H2_j[kFftLengthBy2];
+ }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void ErlComputer_SSE2(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl) {
+ std::fill(erl.begin(), erl.end(), 0.f);
+ for (auto& H2_j : H2) {
+ for (size_t k = 0; k < kFftLengthBy2; k += 4) {
+ const __m128 H2_j_k = _mm_loadu_ps(&H2_j[k]);
+ __m128 erl_k = _mm_loadu_ps(&erl[k]);
+ erl_k = _mm_add_ps(erl_k, H2_j_k);
+ _mm_storeu_ps(&erl[k], erl_k);
+ }
+ erl[kFftLengthBy2] += H2_j[kFftLengthBy2];
+ }
+}
+#endif
+
+} // namespace aec3
+
+void ComputeErl(const Aec3Optimization& optimization,
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl) {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, erl.size());
+ // Update the frequency response and echo return loss for the filter.
+ switch (optimization) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2:
+ aec3::ErlComputer_SSE2(H2, erl);
+ break;
+ case Aec3Optimization::kAvx2:
+ aec3::ErlComputer_AVX2(H2, erl);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon:
+ aec3::ErlComputer_NEON(H2, erl);
+ break;
+#endif
+ default:
+ aec3::ErlComputer(H2, erl);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.h b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.h
new file mode 100644
index 0000000000..4ac13b1bc3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_ERL_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_ERL_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace aec3 {
+
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void ErlComputer(const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl);
+#if defined(WEBRTC_HAS_NEON)
+void ErlComputer_NEON(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void ErlComputer_SSE2(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl);
+
+void ErlComputer_AVX2(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl);
+#endif
+
+} // namespace aec3
+
+// Computes the echo return loss based on a frequency response.
+void ComputeErl(const Aec3Optimization& optimization,
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_ERL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_avx2.cc b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_avx2.cc
new file mode 100644
index 0000000000..5fe7514db1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_avx2.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h"
+
+#include <immintrin.h>
+
+namespace webrtc {
+
+namespace aec3 {
+
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void ErlComputer_AVX2(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+ rtc::ArrayView<float> erl) {
+ std::fill(erl.begin(), erl.end(), 0.f);
+ for (auto& H2_j : H2) {
+ for (size_t k = 0; k < kFftLengthBy2; k += 8) {
+ const __m256 H2_j_k = _mm256_loadu_ps(&H2_j[k]);
+ __m256 erl_k = _mm256_loadu_ps(&erl[k]);
+ erl_k = _mm256_add_ps(erl_k, H2_j_k);
+ _mm256_storeu_ps(&erl[k], erl_k);
+ }
+ erl[kFftLengthBy2] += H2_j[kFftLengthBy2];
+ }
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_gn/moz.build
new file mode 100644
index 0000000000..c7dd6c5d60
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("adaptive_fir_filter_erl_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc
new file mode 100644
index 0000000000..d2af70a9f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h"
+
+#include <array>
+#include <vector>
+
+#include "rtc_base/system/arch.h"
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+
+#if defined(WEBRTC_HAS_NEON)
+// Verifies that the optimized method for echo return loss computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateErlNeonOptimization) {
+ const size_t kNumPartitions = 12;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+ std::array<float, kFftLengthBy2Plus1> erl;
+ std::array<float, kFftLengthBy2Plus1> erl_NEON;
+
+ for (size_t j = 0; j < H2.size(); ++j) {
+ for (size_t k = 0; k < H2[j].size(); ++k) {
+ H2[j][k] = k + j / 3.f;
+ }
+ }
+
+ ErlComputer(H2, erl);
+ ErlComputer_NEON(H2, erl_NEON);
+
+ for (size_t j = 0; j < erl.size(); ++j) {
+ EXPECT_FLOAT_EQ(erl[j], erl_NEON[j]);
+ }
+}
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized method for echo return loss computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateErlSse2Optimization) {
+ bool use_sse2 = (GetCPUInfo(kSSE2) != 0);
+ if (use_sse2) {
+ const size_t kNumPartitions = 12;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+ std::array<float, kFftLengthBy2Plus1> erl;
+ std::array<float, kFftLengthBy2Plus1> erl_SSE2;
+
+ for (size_t j = 0; j < H2.size(); ++j) {
+ for (size_t k = 0; k < H2[j].size(); ++k) {
+ H2[j][k] = k + j / 3.f;
+ }
+ }
+
+ ErlComputer(H2, erl);
+ ErlComputer_SSE2(H2, erl_SSE2);
+
+ for (size_t j = 0; j < erl.size(); ++j) {
+ EXPECT_FLOAT_EQ(erl[j], erl_SSE2[j]);
+ }
+ }
+}
+
+// Verifies that the optimized method for echo return loss computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateErlAvx2Optimization) {
+ bool use_avx2 = (GetCPUInfo(kAVX2) != 0);
+ if (use_avx2) {
+ const size_t kNumPartitions = 12;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+ std::array<float, kFftLengthBy2Plus1> erl;
+ std::array<float, kFftLengthBy2Plus1> erl_AVX2;
+
+ for (size_t j = 0; j < H2.size(); ++j) {
+ for (size_t k = 0; k < H2[j].size(); ++k) {
+ H2[j][k] = k + j / 3.f;
+ }
+ }
+
+ ErlComputer(H2, erl);
+ ErlComputer_AVX2(H2, erl_AVX2);
+
+ for (size_t j = 0; j < erl.size(); ++j) {
+ EXPECT_FLOAT_EQ(erl[j], erl_AVX2[j]);
+ }
+ }
+}
+
+#endif
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_gn/moz.build
new file mode 100644
index 0000000000..2129bf1552
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_gn/moz.build
@@ -0,0 +1,204 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("adaptive_fir_filter_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc
new file mode 100644
index 0000000000..a13764c109
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include <math.h>
+
+#include <algorithm>
+#include <numeric>
+#include <string>
+
+#include "rtc_base/system/arch.h"
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/coarse_filter_update_gain.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+namespace {
+
+std::string ProduceDebugText(size_t num_render_channels, size_t delay) {
+ rtc::StringBuilder ss;
+ ss << "delay: " << delay << ", ";
+ ss << "num_render_channels:" << num_render_channels;
+ return ss.Release();
+}
+
+} // namespace
+
+class AdaptiveFirFilterOneTwoFourEightRenderChannels
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<size_t> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ ::testing::Values(1, 2, 4, 8));
+
+#if defined(WEBRTC_HAS_NEON)
+// Verifies that the optimized methods for filter adaptation are similar to
+// their reference counterparts.
+TEST_P(AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ FilterAdaptationNeonOptimizations) {
+ const size_t num_render_channels = GetParam();
+ for (size_t num_partitions : {2, 5, 12, 30, 50}) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz,
+ num_render_channels));
+ Random random_generator(42U);
+ Block x(kNumBands, num_render_channels);
+ FftData S_C;
+ FftData S_Neon;
+ FftData G;
+ Aec3Fft fft;
+ std::vector<std::vector<FftData>> H_C(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ std::vector<std::vector<FftData>> H_Neon(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ H_C[p][ch].Clear();
+ H_Neon[p][ch].Clear();
+ }
+ }
+
+ for (int k = 0; k < 30; ++k) {
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int ch = 0; ch < x.NumChannels(); ++ch) {
+ RandomizeSampleVector(&random_generator, x.View(band, ch));
+ }
+ }
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ }
+ auto* const render_buffer = render_delay_buffer->GetRenderBuffer();
+
+ for (size_t j = 0; j < G.re.size(); ++j) {
+ G.re[j] = j / 10001.f;
+ }
+ for (size_t j = 1; j < G.im.size() - 1; ++j) {
+ G.im[j] = j / 20001.f;
+ }
+ G.im[0] = 0.f;
+ G.im[G.im.size() - 1] = 0.f;
+
+ AdaptPartitions_Neon(*render_buffer, G, num_partitions, &H_Neon);
+ AdaptPartitions(*render_buffer, G, num_partitions, &H_C);
+ AdaptPartitions_Neon(*render_buffer, G, num_partitions, &H_Neon);
+ AdaptPartitions(*render_buffer, G, num_partitions, &H_C);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t j = 0; j < H_C[p][ch].re.size(); ++j) {
+ EXPECT_FLOAT_EQ(H_C[p][ch].re[j], H_Neon[p][ch].re[j]);
+ EXPECT_FLOAT_EQ(H_C[p][ch].im[j], H_Neon[p][ch].im[j]);
+ }
+ }
+ }
+
+ ApplyFilter_Neon(*render_buffer, num_partitions, H_Neon, &S_Neon);
+ ApplyFilter(*render_buffer, num_partitions, H_C, &S_C);
+ for (size_t j = 0; j < S_C.re.size(); ++j) {
+ EXPECT_NEAR(S_C.re[j], S_Neon.re[j], fabs(S_C.re[j] * 0.00001f));
+ EXPECT_NEAR(S_C.im[j], S_Neon.im[j], fabs(S_C.re[j] * 0.00001f));
+ }
+ }
+}
+
+// Verifies that the optimized method for frequency response computation is
+// bitexact to the reference counterpart.
+TEST_P(AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ ComputeFrequencyResponseNeonOptimization) {
+ const size_t num_render_channels = GetParam();
+ for (size_t num_partitions : {2, 5, 12, 30, 50}) {
+ std::vector<std::vector<FftData>> H(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(num_partitions);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2_Neon(num_partitions);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t k = 0; k < H[p][ch].re.size(); ++k) {
+ H[p][ch].re[k] = k + p / 3.f + ch;
+ H[p][ch].im[k] = p + k / 7.f - ch;
+ }
+ }
+ }
+
+ ComputeFrequencyResponse(num_partitions, H, &H2);
+ ComputeFrequencyResponse_Neon(num_partitions, H, &H2_Neon);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t k = 0; k < H2[p].size(); ++k) {
+ EXPECT_FLOAT_EQ(H2[p][k], H2_Neon[p][k]);
+ }
+ }
+ }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods for filter adaptation are bitexact to
+// their reference counterparts.
+TEST_P(AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ FilterAdaptationSse2Optimizations) {
+ const size_t num_render_channels = GetParam();
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ bool use_sse2 = (GetCPUInfo(kSSE2) != 0);
+ if (use_sse2) {
+ for (size_t num_partitions : {2, 5, 12, 30, 50}) {
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz,
+ num_render_channels));
+ Random random_generator(42U);
+ Block x(kNumBands, num_render_channels);
+ FftData S_C;
+ FftData S_Sse2;
+ FftData G;
+ Aec3Fft fft;
+ std::vector<std::vector<FftData>> H_C(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ std::vector<std::vector<FftData>> H_Sse2(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ H_C[p][ch].Clear();
+ H_Sse2[p][ch].Clear();
+ }
+ }
+
+ for (size_t k = 0; k < 500; ++k) {
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int ch = 0; ch < x.NumChannels(); ++ch) {
+ RandomizeSampleVector(&random_generator, x.View(band, ch));
+ }
+ }
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ auto* const render_buffer = render_delay_buffer->GetRenderBuffer();
+
+ ApplyFilter_Sse2(*render_buffer, num_partitions, H_Sse2, &S_Sse2);
+ ApplyFilter(*render_buffer, num_partitions, H_C, &S_C);
+ for (size_t j = 0; j < S_C.re.size(); ++j) {
+ EXPECT_FLOAT_EQ(S_C.re[j], S_Sse2.re[j]);
+ EXPECT_FLOAT_EQ(S_C.im[j], S_Sse2.im[j]);
+ }
+
+ std::for_each(G.re.begin(), G.re.end(),
+ [&](float& a) { a = random_generator.Rand<float>(); });
+ std::for_each(G.im.begin(), G.im.end(),
+ [&](float& a) { a = random_generator.Rand<float>(); });
+
+ AdaptPartitions_Sse2(*render_buffer, G, num_partitions, &H_Sse2);
+ AdaptPartitions(*render_buffer, G, num_partitions, &H_C);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t j = 0; j < H_C[p][ch].re.size(); ++j) {
+ EXPECT_FLOAT_EQ(H_C[p][ch].re[j], H_Sse2[p][ch].re[j]);
+ EXPECT_FLOAT_EQ(H_C[p][ch].im[j], H_Sse2[p][ch].im[j]);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies that the optimized methods for filter adaptation are bitexact to
+// their reference counterparts.
+TEST_P(AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ FilterAdaptationAvx2Optimizations) {
+ const size_t num_render_channels = GetParam();
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ bool use_avx2 = (GetCPUInfo(kAVX2) != 0);
+ if (use_avx2) {
+ for (size_t num_partitions : {2, 5, 12, 30, 50}) {
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz,
+ num_render_channels));
+ Random random_generator(42U);
+ Block x(kNumBands, num_render_channels);
+ FftData S_C;
+ FftData S_Avx2;
+ FftData G;
+ Aec3Fft fft;
+ std::vector<std::vector<FftData>> H_C(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ std::vector<std::vector<FftData>> H_Avx2(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ H_C[p][ch].Clear();
+ H_Avx2[p][ch].Clear();
+ }
+ }
+
+ for (size_t k = 0; k < 500; ++k) {
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int ch = 0; ch < x.NumChannels(); ++ch) {
+ RandomizeSampleVector(&random_generator, x.View(band, ch));
+ }
+ }
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ auto* const render_buffer = render_delay_buffer->GetRenderBuffer();
+
+ ApplyFilter_Avx2(*render_buffer, num_partitions, H_Avx2, &S_Avx2);
+ ApplyFilter(*render_buffer, num_partitions, H_C, &S_C);
+ for (size_t j = 0; j < S_C.re.size(); ++j) {
+ EXPECT_FLOAT_EQ(S_C.re[j], S_Avx2.re[j]);
+ EXPECT_FLOAT_EQ(S_C.im[j], S_Avx2.im[j]);
+ }
+
+ std::for_each(G.re.begin(), G.re.end(),
+ [&](float& a) { a = random_generator.Rand<float>(); });
+ std::for_each(G.im.begin(), G.im.end(),
+ [&](float& a) { a = random_generator.Rand<float>(); });
+
+ AdaptPartitions_Avx2(*render_buffer, G, num_partitions, &H_Avx2);
+ AdaptPartitions(*render_buffer, G, num_partitions, &H_C);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t j = 0; j < H_C[p][ch].re.size(); ++j) {
+ EXPECT_FLOAT_EQ(H_C[p][ch].re[j], H_Avx2[p][ch].re[j]);
+ EXPECT_FLOAT_EQ(H_C[p][ch].im[j], H_Avx2[p][ch].im[j]);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies that the optimized method for frequency response computation is
+// bitexact to the reference counterpart.
+TEST_P(AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ ComputeFrequencyResponseSse2Optimization) {
+ const size_t num_render_channels = GetParam();
+ bool use_sse2 = (GetCPUInfo(kSSE2) != 0);
+ if (use_sse2) {
+ for (size_t num_partitions : {2, 5, 12, 30, 50}) {
+ std::vector<std::vector<FftData>> H(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(num_partitions);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2_Sse2(
+ num_partitions);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t k = 0; k < H[p][ch].re.size(); ++k) {
+ H[p][ch].re[k] = k + p / 3.f + ch;
+ H[p][ch].im[k] = p + k / 7.f - ch;
+ }
+ }
+ }
+
+ ComputeFrequencyResponse(num_partitions, H, &H2);
+ ComputeFrequencyResponse_Sse2(num_partitions, H, &H2_Sse2);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t k = 0; k < H2[p].size(); ++k) {
+ EXPECT_FLOAT_EQ(H2[p][k], H2_Sse2[p][k]);
+ }
+ }
+ }
+ }
+}
+
+// Verifies that the optimized method for frequency response computation is
+// bitexact to the reference counterpart.
+TEST_P(AdaptiveFirFilterOneTwoFourEightRenderChannels,
+ ComputeFrequencyResponseAvx2Optimization) {
+ const size_t num_render_channels = GetParam();
+ bool use_avx2 = (GetCPUInfo(kAVX2) != 0);
+ if (use_avx2) {
+ for (size_t num_partitions : {2, 5, 12, 30, 50}) {
+ std::vector<std::vector<FftData>> H(
+ num_partitions, std::vector<FftData>(num_render_channels));
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(num_partitions);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2_Avx2(
+ num_partitions);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t k = 0; k < H[p][ch].re.size(); ++k) {
+ H[p][ch].re[k] = k + p / 3.f + ch;
+ H[p][ch].im[k] = p + k / 7.f - ch;
+ }
+ }
+ }
+
+ ComputeFrequencyResponse(num_partitions, H, &H2);
+ ComputeFrequencyResponse_Avx2(num_partitions, H, &H2_Avx2);
+
+ for (size_t p = 0; p < num_partitions; ++p) {
+ for (size_t k = 0; k < H2[p].size(); ++k) {
+ EXPECT_FLOAT_EQ(H2[p][k], H2_Avx2[p][k]);
+ }
+ }
+ }
+ }
+}
+
+#endif
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the check for non-null data dumper works.
+TEST(AdaptiveFirFilterDeathTest, NullDataDumper) {
+ EXPECT_DEATH(AdaptiveFirFilter(9, 9, 250, 1, DetectOptimization(), nullptr),
+ "");
+}
+
+// Verifies that the check for non-null filter output works.
+TEST(AdaptiveFirFilterDeathTest, NullFilterOutput) {
+ ApmDataDumper data_dumper(42);
+ AdaptiveFirFilter filter(9, 9, 250, 1, DetectOptimization(), &data_dumper);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), 48000, 1));
+ EXPECT_DEATH(filter.Filter(*render_delay_buffer->GetRenderBuffer(), nullptr),
+ "");
+}
+
+#endif
+
+// Verifies that the filter statistics can be accessed when filter statistics
+// are turned on.
+TEST(AdaptiveFirFilterTest, FilterStatisticsAccess) {
+ ApmDataDumper data_dumper(42);
+ Aec3Optimization optimization = DetectOptimization();
+ AdaptiveFirFilter filter(9, 9, 250, 1, optimization, &data_dumper);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> H2(
+ filter.max_filter_size_partitions(),
+ std::array<float, kFftLengthBy2Plus1>());
+ for (auto& H2_k : H2) {
+ H2_k.fill(0.f);
+ }
+
+ std::array<float, kFftLengthBy2Plus1> erl;
+ ComputeErl(optimization, H2, erl);
+ filter.ComputeFrequencyResponse(&H2);
+}
+
+// Verifies that the filter size if correctly repported.
+TEST(AdaptiveFirFilterTest, FilterSize) {
+ ApmDataDumper data_dumper(42);
+ for (size_t filter_size = 1; filter_size < 5; ++filter_size) {
+ AdaptiveFirFilter filter(filter_size, filter_size, 250, 1,
+ DetectOptimization(), &data_dumper);
+ EXPECT_EQ(filter_size, filter.SizePartitions());
+ }
+}
+
+class AdaptiveFirFilterMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ AdaptiveFirFilterMultiChannel,
+ ::testing::Combine(::testing::Values(1, 4),
+ ::testing::Values(1, 8)));
+
+// Verifies that the filter is being able to properly filter a signal and to
+// adapt its coefficients.
+TEST_P(AdaptiveFirFilterMultiChannel, FilterAndAdapt) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ constexpr size_t kNumBlocksToProcessPerRenderChannel = 1000;
+
+ ApmDataDumper data_dumper(42);
+ EchoCanceller3Config config;
+
+ if (num_render_channels == 33) {
+ config.filter.refined = {13, 0.00005f, 0.0005f, 0.0001f, 2.f, 20075344.f};
+ config.filter.coarse = {13, 0.1f, 20075344.f};
+ config.filter.refined_initial = {12, 0.005f, 0.5f, 0.001f, 2.f, 20075344.f};
+ config.filter.coarse_initial = {12, 0.7f, 20075344.f};
+ }
+
+ AdaptiveFirFilter filter(
+ config.filter.refined.length_blocks, config.filter.refined.length_blocks,
+ config.filter.config_change_duration_blocks, num_render_channels,
+ DetectOptimization(), &data_dumper);
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>> H2(
+ num_capture_channels, std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ filter.max_filter_size_partitions(),
+ std::array<float, kFftLengthBy2Plus1>()));
+ std::vector<std::vector<float>> h(
+ num_capture_channels,
+ std::vector<float>(
+ GetTimeDomainLength(filter.max_filter_size_partitions()), 0.f));
+ Aec3Fft fft;
+ config.delay.default_delay = 1;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+ CoarseFilterUpdateGain gain(config.filter.coarse,
+ config.filter.config_change_duration_blocks);
+ Random random_generator(42U);
+ Block x(kNumBands, num_render_channels);
+ std::vector<float> n(kBlockSize, 0.f);
+ std::vector<float> y(kBlockSize, 0.f);
+ AecState aec_state(EchoCanceller3Config{}, num_capture_channels);
+ RenderSignalAnalyzer render_signal_analyzer(config);
+ absl::optional<DelayEstimate> delay_estimate;
+ std::vector<float> e(kBlockSize, 0.f);
+ std::array<float, kFftLength> s_scratch;
+ std::vector<SubtractorOutput> output(num_capture_channels);
+ FftData S;
+ FftData G;
+ FftData E;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(num_capture_channels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_refined(
+ num_capture_channels);
+ std::array<float, kFftLengthBy2Plus1> E2_coarse;
+ // [B,A] = butter(2,100/8000,'high')
+ constexpr CascadedBiQuadFilter::BiQuadCoefficients
+ kHighPassFilterCoefficients = {{0.97261f, -1.94523f, 0.97261f},
+ {-1.94448f, 0.94598f}};
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(0.f);
+ }
+ for (auto& E2_refined_ch : E2_refined) {
+ E2_refined_ch.fill(0.f);
+ }
+ E2_coarse.fill(0.f);
+ for (auto& subtractor_output : output) {
+ subtractor_output.Reset();
+ }
+
+ constexpr float kScale = 1.0f / kFftLengthBy2;
+
+ for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+ std::vector<DelayBuffer<float>> delay_buffer(
+ num_render_channels, DelayBuffer<float>(delay_samples));
+ std::vector<std::unique_ptr<CascadedBiQuadFilter>> x_hp_filter(
+ num_render_channels);
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ x_hp_filter[ch] = std::make_unique<CascadedBiQuadFilter>(
+ kHighPassFilterCoefficients, 1);
+ }
+ CascadedBiQuadFilter y_hp_filter(kHighPassFilterCoefficients, 1);
+
+ SCOPED_TRACE(ProduceDebugText(num_render_channels, delay_samples));
+ const size_t num_blocks_to_process =
+ kNumBlocksToProcessPerRenderChannel * num_render_channels;
+ for (size_t j = 0; j < num_blocks_to_process; ++j) {
+ std::fill(y.begin(), y.end(), 0.f);
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ RandomizeSampleVector(&random_generator, x.View(/*band=*/0, ch));
+ std::array<float, kBlockSize> y_channel;
+ delay_buffer[ch].Delay(x.View(/*band=*/0, ch), y_channel);
+ for (size_t k = 0; k < y.size(); ++k) {
+ y[k] += y_channel[k] / num_render_channels;
+ }
+ }
+
+ RandomizeSampleVector(&random_generator, n);
+ const float noise_scaling = 1.f / 100.f / num_render_channels;
+ for (size_t k = 0; k < y.size(); ++k) {
+ y[k] += n[k] * noise_scaling;
+ }
+
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ x_hp_filter[ch]->Process(x.View(/*band=*/0, ch));
+ }
+ y_hp_filter.Process(y);
+
+ render_delay_buffer->Insert(x);
+ if (j == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ auto* const render_buffer = render_delay_buffer->GetRenderBuffer();
+
+ render_signal_analyzer.Update(*render_buffer,
+ aec_state.MinDirectPathFilterDelay());
+
+ filter.Filter(*render_buffer, &S);
+ fft.Ifft(S, &s_scratch);
+ std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2,
+ e.begin(),
+ [&](float a, float b) { return a - b * kScale; });
+ std::for_each(e.begin(), e.end(),
+ [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+ fft.ZeroPaddedFft(e, Aec3Fft::Window::kRectangular, &E);
+ for (auto& o : output) {
+ for (size_t k = 0; k < kBlockSize; ++k) {
+ o.s_refined[k] = kScale * s_scratch[k + kFftLengthBy2];
+ }
+ }
+
+ std::array<float, kFftLengthBy2Plus1> render_power;
+ render_buffer->SpectralSum(filter.SizePartitions(), &render_power);
+ gain.Compute(render_power, render_signal_analyzer, E,
+ filter.SizePartitions(), false, &G);
+ filter.Adapt(*render_buffer, G, &h[0]);
+ aec_state.HandleEchoPathChange(EchoPathVariability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false));
+
+ filter.ComputeFrequencyResponse(&H2[0]);
+ aec_state.Update(delay_estimate, H2, h, *render_buffer, E2_refined, Y2,
+ output);
+ }
+ // Verify that the filter is able to perform well.
+ EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+ std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+ }
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_avx2_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_avx2_gn/moz.build
new file mode 100644
index 0000000000..23ed46452f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_avx2_gn/moz.build
@@ -0,0 +1,178 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+CXXFLAGS += [
+ "-mavx2",
+ "-mfma"
+]
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl_avx2.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_avx2.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_avx2.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_avx2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+Library("aec3_avx2_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.cc b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.cc
new file mode 100644
index 0000000000..3ba10d5baf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+#include <stdint.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+Aec3Optimization DetectOptimization() {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ if (GetCPUInfo(kAVX2) != 0) {
+ return Aec3Optimization::kAvx2;
+ } else if (GetCPUInfo(kSSE2) != 0) {
+ return Aec3Optimization::kSse2;
+ }
+#endif
+
+#if defined(WEBRTC_HAS_NEON)
+ return Aec3Optimization::kNeon;
+#else
+ return Aec3Optimization::kNone;
+#endif
+}
+
+float FastApproxLog2f(const float in) {
+ RTC_DCHECK_GT(in, .0f);
+ // Read and interpret float as uint32_t and then cast to float.
+ // This is done to extract the exponent (bits 30 - 23).
+ // "Right shift" of the exponent is then performed by multiplying
+ // with the constant (1/2^23). Finally, we subtract a constant to
+ // remove the bias (https://en.wikipedia.org/wiki/Exponent_bias).
+ union {
+ float dummy;
+ uint32_t a;
+ } x = {in};
+ float out = x.a;
+ out *= 1.1920929e-7f; // 1/2^23
+ out -= 126.942695f; // Remove bias.
+ return out;
+}
+
+float Log2TodB(const float in_log2) {
+ return 3.0102999566398121 * in_log2;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.h b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.h
new file mode 100644
index 0000000000..32b564f14b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_
+
+#include <stddef.h>
+
+namespace webrtc {
+
+#ifdef _MSC_VER /* visual c++ */
+#define ALIGN16_BEG __declspec(align(16))
+#define ALIGN16_END
+#else /* gcc or icc */
+#define ALIGN16_BEG
+#define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+enum class Aec3Optimization { kNone, kSse2, kAvx2, kNeon };
+
+constexpr int kNumBlocksPerSecond = 250;
+
+constexpr int kMetricsReportingIntervalBlocks = 10 * kNumBlocksPerSecond;
+constexpr int kMetricsComputationBlocks = 3;
+constexpr int kMetricsCollectionBlocks =
+ kMetricsReportingIntervalBlocks - kMetricsComputationBlocks;
+
+constexpr size_t kFftLengthBy2 = 64;
+constexpr size_t kFftLengthBy2Plus1 = kFftLengthBy2 + 1;
+constexpr size_t kFftLengthBy2Minus1 = kFftLengthBy2 - 1;
+constexpr size_t kFftLength = 2 * kFftLengthBy2;
+constexpr size_t kFftLengthBy2Log2 = 6;
+
+constexpr int kRenderTransferQueueSizeFrames = 100;
+
+constexpr size_t kMaxNumBands = 3;
+constexpr size_t kFrameSize = 160;
+constexpr size_t kSubFrameLength = kFrameSize / 2;
+
+constexpr size_t kBlockSize = kFftLengthBy2;
+constexpr size_t kBlockSizeLog2 = kFftLengthBy2Log2;
+
+constexpr size_t kExtendedBlockSize = 2 * kFftLengthBy2;
+constexpr size_t kMatchedFilterWindowSizeSubBlocks = 32;
+constexpr size_t kMatchedFilterAlignmentShiftSizeSubBlocks =
+ kMatchedFilterWindowSizeSubBlocks * 3 / 4;
+
+// TODO(peah): Integrate this with how it is done inside audio_processing_impl.
+constexpr size_t NumBandsForRate(int sample_rate_hz) {
+ return static_cast<size_t>(sample_rate_hz / 16000);
+}
+
+constexpr bool ValidFullBandRate(int sample_rate_hz) {
+ return sample_rate_hz == 16000 || sample_rate_hz == 32000 ||
+ sample_rate_hz == 48000;
+}
+
+constexpr int GetTimeDomainLength(int filter_length_blocks) {
+ return filter_length_blocks * kFftLengthBy2;
+}
+
+constexpr size_t GetDownSampledBufferSize(size_t down_sampling_factor,
+ size_t num_matched_filters) {
+ return kBlockSize / down_sampling_factor *
+ (kMatchedFilterAlignmentShiftSizeSubBlocks * num_matched_filters +
+ kMatchedFilterWindowSizeSubBlocks + 1);
+}
+
+constexpr size_t GetRenderDelayBufferSize(size_t down_sampling_factor,
+ size_t num_matched_filters,
+ size_t filter_length_blocks) {
+ return GetDownSampledBufferSize(down_sampling_factor, num_matched_filters) /
+ (kBlockSize / down_sampling_factor) +
+ filter_length_blocks + 1;
+}
+
+// Detects what kind of optimizations to use for the code.
+Aec3Optimization DetectOptimization();
+
+// Computes the log2 of the input in a fast an approximate manner.
+float FastApproxLog2f(float in);
+
+// Returns dB from a power quantity expressed in log2.
+float Log2TodB(float in_log2);
+
+static_assert(1 << kBlockSizeLog2 == kBlockSize,
+ "Proper number of shifts for blocksize");
+
+static_assert(1 << kFftLengthBy2Log2 == kFftLengthBy2,
+ "Proper number of shifts for the fft length");
+
+static_assert(1 == NumBandsForRate(16000), "Number of bands for 16 kHz");
+static_assert(2 == NumBandsForRate(32000), "Number of bands for 32 kHz");
+static_assert(3 == NumBandsForRate(48000), "Number of bands for 48 kHz");
+
+static_assert(ValidFullBandRate(16000),
+ "Test that 16 kHz is a valid sample rate");
+static_assert(ValidFullBandRate(32000),
+ "Test that 32 kHz is a valid sample rate");
+static_assert(ValidFullBandRate(48000),
+ "Test that 48 kHz is a valid sample rate");
+static_assert(!ValidFullBandRate(8001),
+ "Test that 8001 Hz is not a valid sample rate");
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common_gn/moz.build
new file mode 100644
index 0000000000..4e67aab28b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aec3_common_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.cc b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.cc
new file mode 100644
index 0000000000..9cc8016f0b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+
+#include <algorithm>
+#include <functional>
+#include <iterator>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+namespace {
+
+const float kHanning64[kFftLengthBy2] = {
+ 0.f, 0.00248461f, 0.00991376f, 0.0222136f, 0.03926189f,
+ 0.06088921f, 0.08688061f, 0.11697778f, 0.15088159f, 0.1882551f,
+ 0.22872687f, 0.27189467f, 0.31732949f, 0.36457977f, 0.41317591f,
+ 0.46263495f, 0.51246535f, 0.56217185f, 0.61126047f, 0.65924333f,
+ 0.70564355f, 0.75f, 0.79187184f, 0.83084292f, 0.86652594f,
+ 0.89856625f, 0.92664544f, 0.95048443f, 0.96984631f, 0.98453864f,
+ 0.99441541f, 0.99937846f, 0.99937846f, 0.99441541f, 0.98453864f,
+ 0.96984631f, 0.95048443f, 0.92664544f, 0.89856625f, 0.86652594f,
+ 0.83084292f, 0.79187184f, 0.75f, 0.70564355f, 0.65924333f,
+ 0.61126047f, 0.56217185f, 0.51246535f, 0.46263495f, 0.41317591f,
+ 0.36457977f, 0.31732949f, 0.27189467f, 0.22872687f, 0.1882551f,
+ 0.15088159f, 0.11697778f, 0.08688061f, 0.06088921f, 0.03926189f,
+ 0.0222136f, 0.00991376f, 0.00248461f, 0.f};
+
+// Hanning window from Matlab command win = sqrt(hanning(128)).
+const float kSqrtHanning128[kFftLength] = {
+ 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
+ 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
+ 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+ 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f,
+ 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f,
+ 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+ 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f,
+ 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f,
+ 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+ 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f,
+ 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f,
+ 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+ 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f,
+ 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f,
+ 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+ 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f,
+ 1.00000000000000f, 0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
+ 0.99518472667220f, 0.99247953459871f, 0.98917650996478f, 0.98527764238894f,
+ 0.98078528040323f, 0.97570213003853f, 0.97003125319454f, 0.96377606579544f,
+ 0.95694033573221f, 0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
+ 0.92387953251129f, 0.91420975570353f, 0.90398929312344f, 0.89322430119552f,
+ 0.88192126434835f, 0.87008699110871f, 0.85772861000027f, 0.84485356524971f,
+ 0.83146961230255f, 0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
+ 0.77301045336274f, 0.75720884650648f, 0.74095112535496f, 0.72424708295147f,
+ 0.70710678118655f, 0.68954054473707f, 0.67155895484702f, 0.65317284295378f,
+ 0.63439328416365f, 0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
+ 0.55557023301960f, 0.53499761988710f, 0.51410274419322f, 0.49289819222978f,
+ 0.47139673682600f, 0.44961132965461f, 0.42755509343028f, 0.40524131400499f,
+ 0.38268343236509f, 0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
+ 0.29028467725446f, 0.26671275747490f, 0.24298017990326f, 0.21910124015687f,
+ 0.19509032201613f, 0.17096188876030f, 0.14673047445536f, 0.12241067519922f,
+ 0.09801714032956f, 0.07356456359967f, 0.04906767432742f, 0.02454122852291f};
+
+bool IsSse2Available() {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ return GetCPUInfo(kSSE2) != 0;
+#else
+ return false;
+#endif
+}
+
+} // namespace
+
+Aec3Fft::Aec3Fft() : ooura_fft_(IsSse2Available()) {}
+
+// TODO(peah): Change x to be std::array once the rest of the code allows this.
+void Aec3Fft::ZeroPaddedFft(rtc::ArrayView<const float> x,
+ Window window,
+ FftData* X) const {
+ RTC_DCHECK(X);
+ RTC_DCHECK_EQ(kFftLengthBy2, x.size());
+ std::array<float, kFftLength> fft;
+ std::fill(fft.begin(), fft.begin() + kFftLengthBy2, 0.f);
+ switch (window) {
+ case Window::kRectangular:
+ std::copy(x.begin(), x.end(), fft.begin() + kFftLengthBy2);
+ break;
+ case Window::kHanning:
+ std::transform(x.begin(), x.end(), std::begin(kHanning64),
+ fft.begin() + kFftLengthBy2,
+ [](float a, float b) { return a * b; });
+ break;
+ case Window::kSqrtHanning:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ Fft(&fft, X);
+}
+
+void Aec3Fft::PaddedFft(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> x_old,
+ Window window,
+ FftData* X) const {
+ RTC_DCHECK(X);
+ RTC_DCHECK_EQ(kFftLengthBy2, x.size());
+ RTC_DCHECK_EQ(kFftLengthBy2, x_old.size());
+ std::array<float, kFftLength> fft;
+
+ switch (window) {
+ case Window::kRectangular:
+ std::copy(x_old.begin(), x_old.end(), fft.begin());
+ std::copy(x.begin(), x.end(), fft.begin() + x_old.size());
+ break;
+ case Window::kHanning:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case Window::kSqrtHanning:
+ std::transform(x_old.begin(), x_old.end(), std::begin(kSqrtHanning128),
+ fft.begin(), std::multiplies<float>());
+ std::transform(x.begin(), x.end(),
+ std::begin(kSqrtHanning128) + x_old.size(),
+ fft.begin() + x_old.size(), std::multiplies<float>());
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ Fft(&fft, X);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.h b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.h
new file mode 100644
index 0000000000..c68de53963
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "common_audio/third_party/ooura/fft_size_128/ooura_fft.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Wrapper class that provides 128 point real valued FFT functionality with the
+// FftData type.
+class Aec3Fft {
+ public:
+ enum class Window { kRectangular, kHanning, kSqrtHanning };
+
+ Aec3Fft();
+
+ Aec3Fft(const Aec3Fft&) = delete;
+ Aec3Fft& operator=(const Aec3Fft&) = delete;
+
+ // Computes the FFT. Note that both the input and output are modified.
+ void Fft(std::array<float, kFftLength>* x, FftData* X) const {
+ RTC_DCHECK(x);
+ RTC_DCHECK(X);
+ ooura_fft_.Fft(x->data());
+ X->CopyFromPackedArray(*x);
+ }
+ // Computes the inverse Fft.
+ void Ifft(const FftData& X, std::array<float, kFftLength>* x) const {
+ RTC_DCHECK(x);
+ X.CopyToPackedArray(x);
+ ooura_fft_.InverseFft(x->data());
+ }
+
+ // Windows the input using a Hanning window, and then adds padding of
+ // kFftLengthBy2 initial zeros before computing the Fft.
+ void ZeroPaddedFft(rtc::ArrayView<const float> x,
+ Window window,
+ FftData* X) const;
+
+ // Concatenates the kFftLengthBy2 values long x and x_old before computing the
+ // Fft. After that, x is copied to x_old.
+ void PaddedFft(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> x_old,
+ FftData* X) const {
+ PaddedFft(x, x_old, Window::kRectangular, X);
+ }
+
+ // Padded Fft using a time-domain window.
+ void PaddedFft(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> x_old,
+ Window window,
+ FftData* X) const;
+
+ private:
+ const OouraFft ooura_fft_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_gn/moz.build
new file mode 100644
index 0000000000..114f488076
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_gn/moz.build
@@ -0,0 +1,204 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aec3_fft_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_unittest.cc
new file mode 100644
index 0000000000..e60ef5b713
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft_unittest.cc
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+
+#include <algorithm>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null input in Fft works.
+TEST(Aec3FftDeathTest, NullFftInput) {
+ Aec3Fft fft;
+ FftData X;
+ EXPECT_DEATH(fft.Fft(nullptr, &X), "");
+}
+
+// Verifies that the check for non-null input in Fft works.
+TEST(Aec3FftDeathTest, NullFftOutput) {
+ Aec3Fft fft;
+ std::array<float, kFftLength> x;
+ EXPECT_DEATH(fft.Fft(&x, nullptr), "");
+}
+
+// Verifies that the check for non-null output in Ifft works.
+TEST(Aec3FftDeathTest, NullIfftOutput) {
+ Aec3Fft fft;
+ FftData X;
+ EXPECT_DEATH(fft.Ifft(X, nullptr), "");
+}
+
+// Verifies that the check for non-null output in ZeroPaddedFft works.
+TEST(Aec3FftDeathTest, NullZeroPaddedFftOutput) {
+ Aec3Fft fft;
+ std::array<float, kFftLengthBy2> x;
+ EXPECT_DEATH(fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, nullptr),
+ "");
+}
+
+// Verifies that the check for input length in ZeroPaddedFft works.
+TEST(Aec3FftDeathTest, ZeroPaddedFftWrongInputLength) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLengthBy2 - 1> x;
+ EXPECT_DEATH(fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, &X), "");
+}
+
+// Verifies that the check for non-null output in PaddedFft works.
+TEST(Aec3FftDeathTest, NullPaddedFftOutput) {
+ Aec3Fft fft;
+ std::array<float, kFftLengthBy2> x;
+ std::array<float, kFftLengthBy2> x_old;
+ EXPECT_DEATH(fft.PaddedFft(x, x_old, nullptr), "");
+}
+
+// Verifies that the check for input length in PaddedFft works.
+TEST(Aec3FftDeathTest, PaddedFftWrongInputLength) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLengthBy2 - 1> x;
+ std::array<float, kFftLengthBy2> x_old;
+ EXPECT_DEATH(fft.PaddedFft(x, x_old, &X), "");
+}
+
+// Verifies that the check for length in the old value in PaddedFft works.
+TEST(Aec3FftDeathTest, PaddedFftWrongOldValuesLength) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLengthBy2> x;
+ std::array<float, kFftLengthBy2 - 1> x_old;
+ EXPECT_DEATH(fft.PaddedFft(x, x_old, &X), "");
+}
+
+#endif
+
+// Verifies that Fft works as intended.
+TEST(Aec3Fft, Fft) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLength> x;
+ x.fill(0.f);
+ fft.Fft(&x, &X);
+ EXPECT_THAT(X.re, ::testing::Each(0.f));
+ EXPECT_THAT(X.im, ::testing::Each(0.f));
+
+ x.fill(0.f);
+ x[0] = 1.f;
+ fft.Fft(&x, &X);
+ EXPECT_THAT(X.re, ::testing::Each(1.f));
+ EXPECT_THAT(X.im, ::testing::Each(0.f));
+
+ x.fill(1.f);
+ fft.Fft(&x, &X);
+ EXPECT_EQ(128.f, X.re[0]);
+ std::for_each(X.re.begin() + 1, X.re.end(),
+ [](float a) { EXPECT_EQ(0.f, a); });
+ EXPECT_THAT(X.im, ::testing::Each(0.f));
+}
+
+// Verifies that InverseFft works as intended.
+TEST(Aec3Fft, Ifft) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLength> x;
+
+ X.re.fill(0.f);
+ X.im.fill(0.f);
+ fft.Ifft(X, &x);
+ EXPECT_THAT(x, ::testing::Each(0.f));
+
+ X.re.fill(1.f);
+ X.im.fill(0.f);
+ fft.Ifft(X, &x);
+ EXPECT_EQ(64.f, x[0]);
+ std::for_each(x.begin() + 1, x.end(), [](float a) { EXPECT_EQ(0.f, a); });
+
+ X.re.fill(0.f);
+ X.re[0] = 128;
+ X.im.fill(0.f);
+ fft.Ifft(X, &x);
+ EXPECT_THAT(x, ::testing::Each(64.f));
+}
+
+// Verifies that InverseFft and Fft work as intended.
+TEST(Aec3Fft, FftAndIfft) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLength> x;
+ std::array<float, kFftLength> x_ref;
+
+ int v = 0;
+ for (int k = 0; k < 20; ++k) {
+ for (size_t j = 0; j < x.size(); ++j) {
+ x[j] = v++;
+ x_ref[j] = x[j] * 64.f;
+ }
+ fft.Fft(&x, &X);
+ fft.Ifft(X, &x);
+ for (size_t j = 0; j < x.size(); ++j) {
+ EXPECT_NEAR(x_ref[j], x[j], 0.001f);
+ }
+ }
+}
+
+// Verifies that ZeroPaddedFft work as intended.
+TEST(Aec3Fft, ZeroPaddedFft) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLengthBy2> x_in;
+ std::array<float, kFftLength> x_ref;
+ std::array<float, kFftLength> x_out;
+
+ int v = 0;
+ x_ref.fill(0.f);
+ for (int k = 0; k < 20; ++k) {
+ for (size_t j = 0; j < x_in.size(); ++j) {
+ x_in[j] = v++;
+ x_ref[j + kFftLengthBy2] = x_in[j] * 64.f;
+ }
+ fft.ZeroPaddedFft(x_in, Aec3Fft::Window::kRectangular, &X);
+ fft.Ifft(X, &x_out);
+ for (size_t j = 0; j < x_out.size(); ++j) {
+ EXPECT_NEAR(x_ref[j], x_out[j], 0.1f);
+ }
+ }
+}
+
+// Verifies that ZeroPaddedFft work as intended.
+TEST(Aec3Fft, PaddedFft) {
+ Aec3Fft fft;
+ FftData X;
+ std::array<float, kFftLengthBy2> x_in;
+ std::array<float, kFftLength> x_out;
+ std::array<float, kFftLengthBy2> x_old;
+ std::array<float, kFftLengthBy2> x_old_ref;
+ std::array<float, kFftLength> x_ref;
+
+ int v = 0;
+ x_old.fill(0.f);
+ for (int k = 0; k < 20; ++k) {
+ for (size_t j = 0; j < x_in.size(); ++j) {
+ x_in[j] = v++;
+ }
+
+ std::copy(x_old.begin(), x_old.end(), x_ref.begin());
+ std::copy(x_in.begin(), x_in.end(), x_ref.begin() + kFftLengthBy2);
+ std::copy(x_in.begin(), x_in.end(), x_old_ref.begin());
+ std::for_each(x_ref.begin(), x_ref.end(), [](float& a) { a *= 64.f; });
+
+ fft.PaddedFft(x_in, x_old, &X);
+ std::copy(x_in.begin(), x_in.end(), x_old.begin());
+ fft.Ifft(X, &x_out);
+
+ for (size_t j = 0; j < x_out.size(); ++j) {
+ EXPECT_NEAR(x_ref[j], x_out[j], 0.1f);
+ }
+
+ EXPECT_EQ(x_old_ref, x_old);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec3_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_gn/moz.build
new file mode 100644
index 0000000000..571aae8a6a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec3_gn/moz.build
@@ -0,0 +1,269 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/adaptive_fir_filter_erl.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/aec3_common.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/aec3_fft.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/decimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aec3_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.cc b/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.cc
new file mode 100644
index 0000000000..81fd91fab9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.cc
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec_state.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <numeric>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+bool DeactivateInitialStateResetAtEchoPathChange() {
+ return field_trial::IsEnabled(
+ "WebRTC-Aec3DeactivateInitialStateResetKillSwitch");
+}
+
+bool FullResetAtEchoPathChange() {
+ return !field_trial::IsEnabled("WebRTC-Aec3AecStateFullResetKillSwitch");
+}
+
+bool SubtractorAnalyzerResetAtEchoPathChange() {
+ return !field_trial::IsEnabled(
+ "WebRTC-Aec3AecStateSubtractorAnalyzerResetKillSwitch");
+}
+
+void ComputeAvgRenderReverb(
+ const SpectrumBuffer& spectrum_buffer,
+ int delay_blocks,
+ float reverb_decay,
+ ReverbModel* reverb_model,
+ rtc::ArrayView<float, kFftLengthBy2Plus1> reverb_power_spectrum) {
+ RTC_DCHECK(reverb_model);
+ const size_t num_render_channels = spectrum_buffer.buffer[0].size();
+ int idx_at_delay =
+ spectrum_buffer.OffsetIndex(spectrum_buffer.read, delay_blocks);
+ int idx_past = spectrum_buffer.IncIndex(idx_at_delay);
+
+ std::array<float, kFftLengthBy2Plus1> X2_data;
+ rtc::ArrayView<const float> X2;
+ if (num_render_channels > 1) {
+ auto average_channels =
+ [](size_t num_render_channels,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ spectrum_band_0,
+ rtc::ArrayView<float, kFftLengthBy2Plus1> render_power) {
+ std::fill(render_power.begin(), render_power.end(), 0.f);
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ render_power[k] += spectrum_band_0[ch][k];
+ }
+ }
+ const float normalizer = 1.f / num_render_channels;
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ render_power[k] *= normalizer;
+ }
+ };
+ average_channels(num_render_channels, spectrum_buffer.buffer[idx_past],
+ X2_data);
+ reverb_model->UpdateReverbNoFreqShaping(
+ X2_data, /*power_spectrum_scaling=*/1.0f, reverb_decay);
+
+ average_channels(num_render_channels, spectrum_buffer.buffer[idx_at_delay],
+ X2_data);
+ X2 = X2_data;
+ } else {
+ reverb_model->UpdateReverbNoFreqShaping(
+ spectrum_buffer.buffer[idx_past][/*channel=*/0],
+ /*power_spectrum_scaling=*/1.0f, reverb_decay);
+
+ X2 = spectrum_buffer.buffer[idx_at_delay][/*channel=*/0];
+ }
+
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> reverb_power =
+ reverb_model->reverb();
+ for (size_t k = 0; k < X2.size(); ++k) {
+ reverb_power_spectrum[k] = X2[k] + reverb_power[k];
+ }
+}
+
+} // namespace
+
+std::atomic<int> AecState::instance_count_(0);
+
+void AecState::GetResidualEchoScaling(
+ rtc::ArrayView<float> residual_scaling) const {
+ bool filter_has_had_time_to_converge;
+ if (config_.filter.conservative_initial_phase) {
+ filter_has_had_time_to_converge =
+ strong_not_saturated_render_blocks_ >= 1.5f * kNumBlocksPerSecond;
+ } else {
+ filter_has_had_time_to_converge =
+ strong_not_saturated_render_blocks_ >= 0.8f * kNumBlocksPerSecond;
+ }
+ echo_audibility_.GetResidualEchoScaling(filter_has_had_time_to_converge,
+ residual_scaling);
+}
+
+AecState::AecState(const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ config_(config),
+ num_capture_channels_(num_capture_channels),
+ deactivate_initial_state_reset_at_echo_path_change_(
+ DeactivateInitialStateResetAtEchoPathChange()),
+ full_reset_at_echo_path_change_(FullResetAtEchoPathChange()),
+ subtractor_analyzer_reset_at_echo_path_change_(
+ SubtractorAnalyzerResetAtEchoPathChange()),
+ initial_state_(config_),
+ delay_state_(config_, num_capture_channels_),
+ transparent_state_(TransparentMode::Create(config_)),
+ filter_quality_state_(config_, num_capture_channels_),
+ erl_estimator_(2 * kNumBlocksPerSecond),
+ erle_estimator_(2 * kNumBlocksPerSecond, config_, num_capture_channels_),
+ filter_analyzer_(config_, num_capture_channels_),
+ echo_audibility_(
+ config_.echo_audibility.use_stationarity_properties_at_init),
+ reverb_model_estimator_(config_, num_capture_channels_),
+ subtractor_output_analyzer_(num_capture_channels_) {}
+
+AecState::~AecState() = default;
+
+void AecState::HandleEchoPathChange(
+ const EchoPathVariability& echo_path_variability) {
+ const auto full_reset = [&]() {
+ filter_analyzer_.Reset();
+ capture_signal_saturation_ = false;
+ strong_not_saturated_render_blocks_ = 0;
+ blocks_with_active_render_ = 0;
+ if (!deactivate_initial_state_reset_at_echo_path_change_) {
+ initial_state_.Reset();
+ }
+ if (transparent_state_) {
+ transparent_state_->Reset();
+ }
+ erle_estimator_.Reset(true);
+ erl_estimator_.Reset();
+ filter_quality_state_.Reset();
+ };
+
+ // TODO(peah): Refine the reset scheme according to the type of gain and
+ // delay adjustment.
+
+ if (full_reset_at_echo_path_change_ &&
+ echo_path_variability.delay_change !=
+ EchoPathVariability::DelayAdjustment::kNone) {
+ full_reset();
+ } else if (echo_path_variability.gain_change) {
+ erle_estimator_.Reset(false);
+ }
+ if (subtractor_analyzer_reset_at_echo_path_change_) {
+ subtractor_output_analyzer_.HandleEchoPathChange();
+ }
+}
+
+void AecState::Update(
+ const absl::optional<DelayEstimate>& external_delay,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ adaptive_filter_frequency_responses,
+ rtc::ArrayView<const std::vector<float>> adaptive_filter_impulse_responses,
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2_refined,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const SubtractorOutput> subtractor_output) {
+ RTC_DCHECK_EQ(num_capture_channels_, Y2.size());
+ RTC_DCHECK_EQ(num_capture_channels_, subtractor_output.size());
+ RTC_DCHECK_EQ(num_capture_channels_,
+ adaptive_filter_frequency_responses.size());
+ RTC_DCHECK_EQ(num_capture_channels_,
+ adaptive_filter_impulse_responses.size());
+
+ // Analyze the filter outputs and filters.
+ bool any_filter_converged;
+ bool any_coarse_filter_converged;
+ bool all_filters_diverged;
+ subtractor_output_analyzer_.Update(subtractor_output, &any_filter_converged,
+ &any_coarse_filter_converged,
+ &all_filters_diverged);
+
+ bool any_filter_consistent;
+ float max_echo_path_gain;
+ filter_analyzer_.Update(adaptive_filter_impulse_responses, render_buffer,
+ &any_filter_consistent, &max_echo_path_gain);
+
+ // Estimate the direct path delay of the filter.
+ if (config_.filter.use_linear_filter) {
+ delay_state_.Update(filter_analyzer_.FilterDelaysBlocks(), external_delay,
+ strong_not_saturated_render_blocks_);
+ }
+
+ const Block& aligned_render_block =
+ render_buffer.GetBlock(-delay_state_.MinDirectPathFilterDelay());
+
+ // Update render counters.
+ bool active_render = false;
+ for (int ch = 0; ch < aligned_render_block.NumChannels(); ++ch) {
+ const float render_energy =
+ std::inner_product(aligned_render_block.begin(/*block=*/0, ch),
+ aligned_render_block.end(/*block=*/0, ch),
+ aligned_render_block.begin(/*block=*/0, ch), 0.f);
+ if (render_energy > (config_.render_levels.active_render_limit *
+ config_.render_levels.active_render_limit) *
+ kFftLengthBy2) {
+ active_render = true;
+ break;
+ }
+ }
+ blocks_with_active_render_ += active_render ? 1 : 0;
+ strong_not_saturated_render_blocks_ +=
+ active_render && !SaturatedCapture() ? 1 : 0;
+
+ std::array<float, kFftLengthBy2Plus1> avg_render_spectrum_with_reverb;
+
+ ComputeAvgRenderReverb(render_buffer.GetSpectrumBuffer(),
+ delay_state_.MinDirectPathFilterDelay(),
+ ReverbDecay(/*mild=*/false), &avg_render_reverb_,
+ avg_render_spectrum_with_reverb);
+
+ if (config_.echo_audibility.use_stationarity_properties) {
+ // Update the echo audibility evaluator.
+ echo_audibility_.Update(render_buffer, avg_render_reverb_.reverb(),
+ delay_state_.MinDirectPathFilterDelay(),
+ delay_state_.ExternalDelayReported());
+ }
+
+ // Update the ERL and ERLE measures.
+ if (initial_state_.TransitionTriggered()) {
+ erle_estimator_.Reset(false);
+ }
+
+ erle_estimator_.Update(render_buffer, adaptive_filter_frequency_responses,
+ avg_render_spectrum_with_reverb, Y2, E2_refined,
+ subtractor_output_analyzer_.ConvergedFilters());
+
+ erl_estimator_.Update(
+ subtractor_output_analyzer_.ConvergedFilters(),
+ render_buffer.Spectrum(delay_state_.MinDirectPathFilterDelay()), Y2);
+
+ // Detect and flag echo saturation.
+ if (config_.ep_strength.echo_can_saturate) {
+ saturation_detector_.Update(aligned_render_block, SaturatedCapture(),
+ UsableLinearEstimate(), subtractor_output,
+ max_echo_path_gain);
+ } else {
+ RTC_DCHECK(!saturation_detector_.SaturatedEcho());
+ }
+
+ // Update the decision on whether to use the initial state parameter set.
+ initial_state_.Update(active_render, SaturatedCapture());
+
+ // Detect whether the transparent mode should be activated.
+ if (transparent_state_) {
+ transparent_state_->Update(
+ delay_state_.MinDirectPathFilterDelay(), any_filter_consistent,
+ any_filter_converged, any_coarse_filter_converged, all_filters_diverged,
+ active_render, SaturatedCapture());
+ }
+
+ // Analyze the quality of the filter.
+ filter_quality_state_.Update(active_render, TransparentModeActive(),
+ SaturatedCapture(), external_delay,
+ any_filter_converged);
+
+ // Update the reverb estimate.
+ const bool stationary_block =
+ config_.echo_audibility.use_stationarity_properties &&
+ echo_audibility_.IsBlockStationary();
+
+ reverb_model_estimator_.Update(
+ filter_analyzer_.GetAdjustedFilters(),
+ adaptive_filter_frequency_responses,
+ erle_estimator_.GetInstLinearQualityEstimates(),
+ delay_state_.DirectPathFilterDelays(),
+ filter_quality_state_.UsableLinearFilterOutputs(), stationary_block);
+
+ erle_estimator_.Dump(data_dumper_);
+ reverb_model_estimator_.Dump(data_dumper_.get());
+ data_dumper_->DumpRaw("aec3_active_render", active_render);
+ data_dumper_->DumpRaw("aec3_erl", Erl());
+ data_dumper_->DumpRaw("aec3_erl_time_domain", ErlTimeDomain());
+ data_dumper_->DumpRaw("aec3_erle", Erle(/*onset_compensated=*/false)[0]);
+ data_dumper_->DumpRaw("aec3_erle_onset_compensated",
+ Erle(/*onset_compensated=*/true)[0]);
+ data_dumper_->DumpRaw("aec3_usable_linear_estimate", UsableLinearEstimate());
+ data_dumper_->DumpRaw("aec3_transparent_mode", TransparentModeActive());
+ data_dumper_->DumpRaw("aec3_filter_delay",
+ filter_analyzer_.MinFilterDelayBlocks());
+
+ data_dumper_->DumpRaw("aec3_any_filter_consistent", any_filter_consistent);
+ data_dumper_->DumpRaw("aec3_initial_state",
+ initial_state_.InitialStateActive());
+ data_dumper_->DumpRaw("aec3_capture_saturation", SaturatedCapture());
+ data_dumper_->DumpRaw("aec3_echo_saturation", SaturatedEcho());
+ data_dumper_->DumpRaw("aec3_any_filter_converged", any_filter_converged);
+ data_dumper_->DumpRaw("aec3_any_coarse_filter_converged",
+ any_coarse_filter_converged);
+ data_dumper_->DumpRaw("aec3_all_filters_diverged", all_filters_diverged);
+
+ data_dumper_->DumpRaw("aec3_external_delay_avaliable",
+ external_delay ? 1 : 0);
+ data_dumper_->DumpRaw("aec3_filter_tail_freq_resp_est",
+ GetReverbFrequencyResponse());
+ data_dumper_->DumpRaw("aec3_subtractor_y2", subtractor_output[0].y2);
+ data_dumper_->DumpRaw("aec3_subtractor_e2_coarse",
+ subtractor_output[0].e2_coarse);
+ data_dumper_->DumpRaw("aec3_subtractor_e2_refined",
+ subtractor_output[0].e2_refined);
+}
+
+AecState::InitialState::InitialState(const EchoCanceller3Config& config)
+ : conservative_initial_phase_(config.filter.conservative_initial_phase),
+ initial_state_seconds_(config.filter.initial_state_seconds) {
+ Reset();
+}
+void AecState::InitialState::InitialState::Reset() {
+ initial_state_ = true;
+ strong_not_saturated_render_blocks_ = 0;
+}
+void AecState::InitialState::InitialState::Update(bool active_render,
+ bool saturated_capture) {
+ strong_not_saturated_render_blocks_ +=
+ active_render && !saturated_capture ? 1 : 0;
+
+ // Flag whether the initial state is still active.
+ bool prev_initial_state = initial_state_;
+ if (conservative_initial_phase_) {
+ initial_state_ =
+ strong_not_saturated_render_blocks_ < 5 * kNumBlocksPerSecond;
+ } else {
+ initial_state_ = strong_not_saturated_render_blocks_ <
+ initial_state_seconds_ * kNumBlocksPerSecond;
+ }
+
+ // Flag whether the transition from the initial state has started.
+ transition_triggered_ = !initial_state_ && prev_initial_state;
+}
+
+AecState::FilterDelay::FilterDelay(const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : delay_headroom_blocks_(config.delay.delay_headroom_samples / kBlockSize),
+ filter_delays_blocks_(num_capture_channels, delay_headroom_blocks_),
+ min_filter_delay_(delay_headroom_blocks_) {}
+
+void AecState::FilterDelay::Update(
+ rtc::ArrayView<const int> analyzer_filter_delay_estimates_blocks,
+ const absl::optional<DelayEstimate>& external_delay,
+ size_t blocks_with_proper_filter_adaptation) {
+ // Update the delay based on the external delay.
+ if (external_delay &&
+ (!external_delay_ || external_delay_->delay != external_delay->delay)) {
+ external_delay_ = external_delay;
+ external_delay_reported_ = true;
+ }
+
+ // Override the estimated delay if it is not certain that the filter has had
+ // time to converge.
+ const bool delay_estimator_may_not_have_converged =
+ blocks_with_proper_filter_adaptation < 2 * kNumBlocksPerSecond;
+ if (delay_estimator_may_not_have_converged && external_delay_) {
+ const int delay_guess = delay_headroom_blocks_;
+ std::fill(filter_delays_blocks_.begin(), filter_delays_blocks_.end(),
+ delay_guess);
+ } else {
+ RTC_DCHECK_EQ(filter_delays_blocks_.size(),
+ analyzer_filter_delay_estimates_blocks.size());
+ std::copy(analyzer_filter_delay_estimates_blocks.begin(),
+ analyzer_filter_delay_estimates_blocks.end(),
+ filter_delays_blocks_.begin());
+ }
+
+ min_filter_delay_ = *std::min_element(filter_delays_blocks_.begin(),
+ filter_delays_blocks_.end());
+}
+
+AecState::FilteringQualityAnalyzer::FilteringQualityAnalyzer(
+ const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : use_linear_filter_(config.filter.use_linear_filter),
+ usable_linear_filter_estimates_(num_capture_channels, false) {}
+
+void AecState::FilteringQualityAnalyzer::Reset() {
+ std::fill(usable_linear_filter_estimates_.begin(),
+ usable_linear_filter_estimates_.end(), false);
+ overall_usable_linear_estimates_ = false;
+ filter_update_blocks_since_reset_ = 0;
+}
+
+void AecState::FilteringQualityAnalyzer::Update(
+ bool active_render,
+ bool transparent_mode,
+ bool saturated_capture,
+ const absl::optional<DelayEstimate>& external_delay,
+ bool any_filter_converged) {
+ // Update blocks counter.
+ const bool filter_update = active_render && !saturated_capture;
+ filter_update_blocks_since_reset_ += filter_update ? 1 : 0;
+ filter_update_blocks_since_start_ += filter_update ? 1 : 0;
+
+ // Store convergence flag when observed.
+ convergence_seen_ = convergence_seen_ || any_filter_converged;
+
+ // Verify requirements for achieving a decent filter. The requirements for
+ // filter adaptation at call startup are more restrictive than after an
+ // in-call reset.
+ const bool sufficient_data_to_converge_at_startup =
+ filter_update_blocks_since_start_ > kNumBlocksPerSecond * 0.4f;
+ const bool sufficient_data_to_converge_at_reset =
+ sufficient_data_to_converge_at_startup &&
+ filter_update_blocks_since_reset_ > kNumBlocksPerSecond * 0.2f;
+
+ // The linear filter can only be used if it has had time to converge.
+ overall_usable_linear_estimates_ = sufficient_data_to_converge_at_startup &&
+ sufficient_data_to_converge_at_reset;
+
+ // The linear filter can only be used if an external delay or convergence have
+ // been identified
+ overall_usable_linear_estimates_ =
+ overall_usable_linear_estimates_ && (external_delay || convergence_seen_);
+
+ // If transparent mode is on, deactivate usign the linear filter.
+ overall_usable_linear_estimates_ =
+ overall_usable_linear_estimates_ && !transparent_mode;
+
+ if (use_linear_filter_) {
+ std::fill(usable_linear_filter_estimates_.begin(),
+ usable_linear_filter_estimates_.end(),
+ overall_usable_linear_estimates_);
+ }
+}
+
+void AecState::SaturationDetector::Update(
+ const Block& x,
+ bool saturated_capture,
+ bool usable_linear_estimate,
+ rtc::ArrayView<const SubtractorOutput> subtractor_output,
+ float echo_path_gain) {
+ saturated_echo_ = false;
+ if (!saturated_capture) {
+ return;
+ }
+
+ if (usable_linear_estimate) {
+ constexpr float kSaturationThreshold = 20000.f;
+ for (size_t ch = 0; ch < subtractor_output.size(); ++ch) {
+ saturated_echo_ =
+ saturated_echo_ ||
+ (subtractor_output[ch].s_refined_max_abs > kSaturationThreshold ||
+ subtractor_output[ch].s_coarse_max_abs > kSaturationThreshold);
+ }
+ } else {
+ float max_sample = 0.f;
+ for (int ch = 0; ch < x.NumChannels(); ++ch) {
+ rtc::ArrayView<const float, kBlockSize> x_ch = x.View(/*band=*/0, ch);
+ for (float sample : x_ch) {
+ max_sample = std::max(max_sample, fabsf(sample));
+ }
+ }
+
+ const float kMargin = 10.f;
+ float peak_echo_amplitude = max_sample * echo_path_gain * kMargin;
+ saturated_echo_ = saturated_echo_ || peak_echo_amplitude > 32000;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.h b/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.h
new file mode 100644
index 0000000000..a39325c8b8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec_state.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/echo_audibility.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/erl_estimator.h"
+#include "modules/audio_processing/aec3/erle_estimator.h"
+#include "modules/audio_processing/aec3/filter_analyzer.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/reverb_model_estimator.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/aec3/subtractor_output_analyzer.h"
+#include "modules/audio_processing/aec3/transparent_mode.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Handles the state and the conditions for the echo removal functionality.
+class AecState {
+ public:
+ AecState(const EchoCanceller3Config& config, size_t num_capture_channels);
+ ~AecState();
+
+ // Returns whether the echo subtractor can be used to determine the residual
+ // echo.
+ bool UsableLinearEstimate() const {
+ return filter_quality_state_.LinearFilterUsable() &&
+ config_.filter.use_linear_filter;
+ }
+
+ // Returns whether the echo subtractor output should be used as output.
+ bool UseLinearFilterOutput() const {
+ return filter_quality_state_.LinearFilterUsable() &&
+ config_.filter.use_linear_filter;
+ }
+
+ // Returns whether the render signal is currently active.
+ bool ActiveRender() const { return blocks_with_active_render_ > 200; }
+
+ // Returns the appropriate scaling of the residual echo to match the
+ // audibility.
+ void GetResidualEchoScaling(rtc::ArrayView<float> residual_scaling) const;
+
+ // Returns whether the stationary properties of the signals are used in the
+ // aec.
+ bool UseStationarityProperties() const {
+ return config_.echo_audibility.use_stationarity_properties;
+ }
+
+ // Returns the ERLE.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Erle(
+ bool onset_compensated) const {
+ return erle_estimator_.Erle(onset_compensated);
+ }
+
+ // Returns the non-capped ERLE.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> ErleUnbounded()
+ const {
+ return erle_estimator_.ErleUnbounded();
+ }
+
+ // Returns the fullband ERLE estimate in log2 units.
+ float FullBandErleLog2() const { return erle_estimator_.FullbandErleLog2(); }
+
+ // Returns the ERL.
+ const std::array<float, kFftLengthBy2Plus1>& Erl() const {
+ return erl_estimator_.Erl();
+ }
+
+ // Returns the time-domain ERL.
+ float ErlTimeDomain() const { return erl_estimator_.ErlTimeDomain(); }
+
+ // Returns the delay estimate based on the linear filter.
+ int MinDirectPathFilterDelay() const {
+ return delay_state_.MinDirectPathFilterDelay();
+ }
+
+ // Returns whether the capture signal is saturated.
+ bool SaturatedCapture() const { return capture_signal_saturation_; }
+
+ // Returns whether the echo signal is saturated.
+ bool SaturatedEcho() const { return saturation_detector_.SaturatedEcho(); }
+
+ // Updates the capture signal saturation.
+ void UpdateCaptureSaturation(bool capture_signal_saturation) {
+ capture_signal_saturation_ = capture_signal_saturation;
+ }
+
+ // Returns whether the transparent mode is active
+ bool TransparentModeActive() const {
+ return transparent_state_ && transparent_state_->Active();
+ }
+
+ // Takes appropriate action at an echo path change.
+ void HandleEchoPathChange(const EchoPathVariability& echo_path_variability);
+
+ // Returns the decay factor for the echo reverberation. The parameter `mild`
+ // indicates which exponential decay to return. The default one or a milder
+ // one that can be used during nearend regions.
+ float ReverbDecay(bool mild) const {
+ return reverb_model_estimator_.ReverbDecay(mild);
+ }
+
+ // Return the frequency response of the reverberant echo.
+ rtc::ArrayView<const float> GetReverbFrequencyResponse() const {
+ return reverb_model_estimator_.GetReverbFrequencyResponse();
+ }
+
+ // Returns whether the transition for going out of the initial stated has
+ // been triggered.
+ bool TransitionTriggered() const {
+ return initial_state_.TransitionTriggered();
+ }
+
+ // Updates the aec state.
+ // TODO(bugs.webrtc.org/10913): Compute multi-channel ERL.
+ void Update(
+ const absl::optional<DelayEstimate>& external_delay,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ adaptive_filter_frequency_responses,
+ rtc::ArrayView<const std::vector<float>>
+ adaptive_filter_impulse_responses,
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2_refined,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const SubtractorOutput> subtractor_output);
+
+ // Returns filter length in blocks.
+ int FilterLengthBlocks() const {
+ // All filters have the same length, so arbitrarily return channel 0 length.
+ return filter_analyzer_.FilterLengthBlocks();
+ }
+
+ private:
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const EchoCanceller3Config config_;
+ const size_t num_capture_channels_;
+ const bool deactivate_initial_state_reset_at_echo_path_change_;
+ const bool full_reset_at_echo_path_change_;
+ const bool subtractor_analyzer_reset_at_echo_path_change_;
+
+ // Class for controlling the transition from the intial state, which in turn
+ // controls when the filter parameters for the initial state should be used.
+ class InitialState {
+ public:
+ explicit InitialState(const EchoCanceller3Config& config);
+ // Resets the state to again begin in the initial state.
+ void Reset();
+
+ // Updates the state based on new data.
+ void Update(bool active_render, bool saturated_capture);
+
+ // Returns whether the initial state is active or not.
+ bool InitialStateActive() const { return initial_state_; }
+
+ // Returns that the transition from the initial state has was started.
+ bool TransitionTriggered() const { return transition_triggered_; }
+
+ private:
+ const bool conservative_initial_phase_;
+ const float initial_state_seconds_;
+ bool transition_triggered_ = false;
+ bool initial_state_ = true;
+ size_t strong_not_saturated_render_blocks_ = 0;
+ } initial_state_;
+
+ // Class for choosing the direct-path delay relative to the beginning of the
+ // filter, as well as any other data related to the delay used within
+ // AecState.
+ class FilterDelay {
+ public:
+ FilterDelay(const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+
+ // Returns whether an external delay has been reported to the AecState (from
+ // the delay estimator).
+ bool ExternalDelayReported() const { return external_delay_reported_; }
+
+ // Returns the delay in blocks relative to the beginning of the filter that
+ // corresponds to the direct path of the echo.
+ rtc::ArrayView<const int> DirectPathFilterDelays() const {
+ return filter_delays_blocks_;
+ }
+
+ // Returns the minimum delay among the direct path delays relative to the
+ // beginning of the filter
+ int MinDirectPathFilterDelay() const { return min_filter_delay_; }
+
+ // Updates the delay estimates based on new data.
+ void Update(
+ rtc::ArrayView<const int> analyzer_filter_delay_estimates_blocks,
+ const absl::optional<DelayEstimate>& external_delay,
+ size_t blocks_with_proper_filter_adaptation);
+
+ private:
+ const int delay_headroom_blocks_;
+ bool external_delay_reported_ = false;
+ std::vector<int> filter_delays_blocks_;
+ int min_filter_delay_;
+ absl::optional<DelayEstimate> external_delay_;
+ } delay_state_;
+
+ // Classifier for toggling transparent mode when there is no echo.
+ std::unique_ptr<TransparentMode> transparent_state_;
+
+ // Class for analyzing how well the linear filter is, and can be expected to,
+ // perform on the current signals. The purpose of this is for using to
+ // select the echo suppression functionality as well as the input to the echo
+ // suppressor.
+ class FilteringQualityAnalyzer {
+ public:
+ FilteringQualityAnalyzer(const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+
+ // Returns whether the linear filter can be used for the echo
+ // canceller output.
+ bool LinearFilterUsable() const { return overall_usable_linear_estimates_; }
+
+ // Returns whether an individual filter output can be used for the echo
+ // canceller output.
+ const std::vector<bool>& UsableLinearFilterOutputs() const {
+ return usable_linear_filter_estimates_;
+ }
+
+ // Resets the state of the analyzer.
+ void Reset();
+
+ // Updates the analysis based on new data.
+ void Update(bool active_render,
+ bool transparent_mode,
+ bool saturated_capture,
+ const absl::optional<DelayEstimate>& external_delay,
+ bool any_filter_converged);
+
+ private:
+ const bool use_linear_filter_;
+ bool overall_usable_linear_estimates_ = false;
+ size_t filter_update_blocks_since_reset_ = 0;
+ size_t filter_update_blocks_since_start_ = 0;
+ bool convergence_seen_ = false;
+ std::vector<bool> usable_linear_filter_estimates_;
+ } filter_quality_state_;
+
+ // Class for detecting whether the echo is to be considered to be
+ // saturated.
+ class SaturationDetector {
+ public:
+ // Returns whether the echo is to be considered saturated.
+ bool SaturatedEcho() const { return saturated_echo_; }
+
+ // Updates the detection decision based on new data.
+ void Update(const Block& x,
+ bool saturated_capture,
+ bool usable_linear_estimate,
+ rtc::ArrayView<const SubtractorOutput> subtractor_output,
+ float echo_path_gain);
+
+ private:
+ bool saturated_echo_ = false;
+ } saturation_detector_;
+
+ ErlEstimator erl_estimator_;
+ ErleEstimator erle_estimator_;
+ size_t strong_not_saturated_render_blocks_ = 0;
+ size_t blocks_with_active_render_ = 0;
+ bool capture_signal_saturation_ = false;
+ FilterAnalyzer filter_analyzer_;
+ EchoAudibility echo_audibility_;
+ ReverbModelEstimator reverb_model_estimator_;
+ ReverbModel avg_render_reverb_;
+ SubtractorOutputAnalyzer subtractor_output_analyzer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/aec_state_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/aec_state_unittest.cc
new file mode 100644
index 0000000000..6662c8fb1a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/aec_state_unittest.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec_state.h"
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+void RunNormalUsageTest(size_t num_render_channels,
+ size_t num_capture_channels) {
+ // TODO(bugs.webrtc.org/10913): Test with different content in different
+ // channels.
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ ApmDataDumper data_dumper(42);
+ EchoCanceller3Config config;
+ AecState state(config, num_capture_channels);
+ absl::optional<DelayEstimate> delay_estimate =
+ DelayEstimate(DelayEstimate::Quality::kRefined, 10);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_refined(
+ num_capture_channels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(num_capture_channels);
+ Block x(kNumBands, num_render_channels);
+ EchoPathVariability echo_path_variability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false);
+ std::vector<std::array<float, kBlockSize>> y(num_capture_channels);
+ std::vector<SubtractorOutput> subtractor_output(num_capture_channels);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].Reset();
+ subtractor_output[ch].s_refined.fill(100.f);
+ subtractor_output[ch].e_refined.fill(100.f);
+ y[ch].fill(1000.f);
+ E2_refined[ch].fill(0.f);
+ Y2[ch].fill(0.f);
+ }
+ Aec3Fft fft;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ converged_filter_frequency_response(
+ num_capture_channels,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(10));
+ for (auto& v_ch : converged_filter_frequency_response) {
+ for (auto& v : v_ch) {
+ v.fill(0.01f);
+ }
+ }
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ diverged_filter_frequency_response = converged_filter_frequency_response;
+ converged_filter_frequency_response[0][2].fill(100.f);
+ converged_filter_frequency_response[0][2][0] = 1.f;
+ std::vector<std::vector<float>> impulse_response(
+ num_capture_channels,
+ std::vector<float>(
+ GetTimeDomainLength(config.filter.refined.length_blocks), 0.f));
+
+ // Verify that linear AEC usability is true when the filter is converged
+ for (size_t band = 0; band < kNumBands; ++band) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ std::fill(x.begin(band, ch), x.end(band, ch), 101.f);
+ }
+ }
+ for (int k = 0; k < 3000; ++k) {
+ render_delay_buffer->Insert(x);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ }
+ EXPECT_TRUE(state.UsableLinearEstimate());
+
+ // Verify that linear AEC usability becomes false after an echo path
+ // change is reported
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.HandleEchoPathChange(EchoPathVariability(
+ false, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false));
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ EXPECT_FALSE(state.UsableLinearEstimate());
+
+ // Verify that the active render detection works as intended.
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ std::fill(x.begin(0, ch), x.end(0, ch), 101.f);
+ }
+ render_delay_buffer->Insert(x);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.HandleEchoPathChange(EchoPathVariability(
+ true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false));
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ EXPECT_FALSE(state.ActiveRender());
+
+ for (int k = 0; k < 1000; ++k) {
+ render_delay_buffer->Insert(x);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ }
+ EXPECT_TRUE(state.ActiveRender());
+
+ // Verify that the ERL is properly estimated
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ std::fill(x.begin(band, channel), x.end(band, channel), 0.0f);
+ }
+ }
+
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ x.View(/*band=*/0, ch)[0] = 5000.f;
+ }
+ for (size_t k = 0;
+ k < render_delay_buffer->GetRenderBuffer()->GetFftBuffer().size(); ++k) {
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ }
+
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(10.f * 10000.f * 10000.f);
+ }
+ for (size_t k = 0; k < 1000; ++k) {
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ }
+
+ ASSERT_TRUE(state.UsableLinearEstimate());
+ const std::array<float, kFftLengthBy2Plus1>& erl = state.Erl();
+ EXPECT_EQ(erl[0], erl[1]);
+ for (size_t k = 1; k < erl.size() - 1; ++k) {
+ EXPECT_NEAR(k % 2 == 0 ? 10.f : 1000.f, erl[k], 0.1);
+ }
+ EXPECT_EQ(erl[erl.size() - 2], erl[erl.size() - 1]);
+
+ // Verify that the ERLE is properly estimated
+ for (auto& E2_refined_ch : E2_refined) {
+ E2_refined_ch.fill(1.f * 10000.f * 10000.f);
+ }
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(10.f * E2_refined[0][0]);
+ }
+ for (size_t k = 0; k < 1000; ++k) {
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ }
+ ASSERT_TRUE(state.UsableLinearEstimate());
+ {
+ // Note that the render spectrum is built so it does not have energy in
+ // the odd bands but just in the even bands.
+ const auto& erle = state.Erle(/*onset_compensated=*/true)[0];
+ EXPECT_EQ(erle[0], erle[1]);
+ constexpr size_t kLowFrequencyLimit = 32;
+ for (size_t k = 2; k < kLowFrequencyLimit; k = k + 2) {
+ EXPECT_NEAR(4.f, erle[k], 0.1);
+ }
+ for (size_t k = kLowFrequencyLimit; k < erle.size() - 1; k = k + 2) {
+ EXPECT_NEAR(1.5f, erle[k], 0.1);
+ }
+ EXPECT_EQ(erle[erle.size() - 2], erle[erle.size() - 1]);
+ }
+ for (auto& E2_refined_ch : E2_refined) {
+ E2_refined_ch.fill(1.f * 10000.f * 10000.f);
+ }
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(5.f * E2_refined[0][0]);
+ }
+ for (size_t k = 0; k < 1000; ++k) {
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ subtractor_output[ch].ComputeMetrics(y[ch]);
+ }
+ state.Update(delay_estimate, converged_filter_frequency_response,
+ impulse_response, *render_delay_buffer->GetRenderBuffer(),
+ E2_refined, Y2, subtractor_output);
+ }
+
+ ASSERT_TRUE(state.UsableLinearEstimate());
+ {
+ const auto& erle = state.Erle(/*onset_compensated=*/true)[0];
+ EXPECT_EQ(erle[0], erle[1]);
+ constexpr size_t kLowFrequencyLimit = 32;
+ for (size_t k = 1; k < kLowFrequencyLimit; ++k) {
+ EXPECT_NEAR(k % 2 == 0 ? 4.f : 1.f, erle[k], 0.1);
+ }
+ for (size_t k = kLowFrequencyLimit; k < erle.size() - 1; ++k) {
+ EXPECT_NEAR(k % 2 == 0 ? 1.5f : 1.f, erle[k], 0.1);
+ }
+ EXPECT_EQ(erle[erle.size() - 2], erle[erle.size() - 1]);
+ }
+}
+
+} // namespace
+
+class AecStateMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ AecStateMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 8),
+ ::testing::Values(1, 2, 8)));
+
+// Verify the general functionality of AecState
+TEST_P(AecStateMultiChannel, NormalUsage) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ RunNormalUsageTest(num_render_channels, num_capture_channels);
+}
+
+// Verifies the delay for a converged filter is correctly identified.
+TEST(AecState, ConvergedFilterDelay) {
+ constexpr int kFilterLengthBlocks = 10;
+ constexpr size_t kNumCaptureChannels = 1;
+ EchoCanceller3Config config;
+ AecState state(config, kNumCaptureChannels);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, 48000, 1));
+ absl::optional<DelayEstimate> delay_estimate;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_refined(
+ kNumCaptureChannels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(kNumCaptureChannels);
+ std::array<float, kBlockSize> x;
+ EchoPathVariability echo_path_variability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false);
+ std::vector<SubtractorOutput> subtractor_output(kNumCaptureChannels);
+ for (auto& output : subtractor_output) {
+ output.Reset();
+ output.s_refined.fill(100.f);
+ }
+ std::array<float, kBlockSize> y;
+ x.fill(0.f);
+ y.fill(0.f);
+
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ frequency_response(kNumCaptureChannels,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ kFilterLengthBlocks));
+ for (auto& v_ch : frequency_response) {
+ for (auto& v : v_ch) {
+ v.fill(0.01f);
+ }
+ }
+
+ std::vector<std::vector<float>> impulse_response(
+ kNumCaptureChannels,
+ std::vector<float>(
+ GetTimeDomainLength(config.filter.refined.length_blocks), 0.f));
+
+ // Verify that the filter delay for a converged filter is properly
+ // identified.
+ for (int k = 0; k < kFilterLengthBlocks; ++k) {
+ for (auto& ir : impulse_response) {
+ std::fill(ir.begin(), ir.end(), 0.f);
+ ir[k * kBlockSize + 1] = 1.f;
+ }
+
+ state.HandleEchoPathChange(echo_path_variability);
+ subtractor_output[0].ComputeMetrics(y);
+ state.Update(delay_estimate, frequency_response, impulse_response,
+ *render_delay_buffer->GetRenderBuffer(), E2_refined, Y2,
+ subtractor_output);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.cc
new file mode 100644
index 0000000000..7f076dea8e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/alignment_mixer.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+AlignmentMixer::MixingVariant ChooseMixingVariant(bool downmix,
+ bool adaptive_selection,
+ int num_channels) {
+ RTC_DCHECK(!(adaptive_selection && downmix));
+ RTC_DCHECK_LT(0, num_channels);
+
+ if (num_channels == 1) {
+ return AlignmentMixer::MixingVariant::kFixed;
+ }
+ if (downmix) {
+ return AlignmentMixer::MixingVariant::kDownmix;
+ }
+ if (adaptive_selection) {
+ return AlignmentMixer::MixingVariant::kAdaptive;
+ }
+ return AlignmentMixer::MixingVariant::kFixed;
+}
+
+} // namespace
+
+AlignmentMixer::AlignmentMixer(
+ size_t num_channels,
+ const EchoCanceller3Config::Delay::AlignmentMixing& config)
+ : AlignmentMixer(num_channels,
+ config.downmix,
+ config.adaptive_selection,
+ config.activity_power_threshold,
+ config.prefer_first_two_channels) {}
+
+AlignmentMixer::AlignmentMixer(size_t num_channels,
+ bool downmix,
+ bool adaptive_selection,
+ float activity_power_threshold,
+ bool prefer_first_two_channels)
+ : num_channels_(num_channels),
+ one_by_num_channels_(1.f / num_channels_),
+ excitation_energy_threshold_(kBlockSize * activity_power_threshold),
+ prefer_first_two_channels_(prefer_first_two_channels),
+ selection_variant_(
+ ChooseMixingVariant(downmix, adaptive_selection, num_channels_)) {
+ if (selection_variant_ == MixingVariant::kAdaptive) {
+ std::fill(strong_block_counters_.begin(), strong_block_counters_.end(), 0);
+ cumulative_energies_.resize(num_channels_);
+ std::fill(cumulative_energies_.begin(), cumulative_energies_.end(), 0.f);
+ }
+}
+
+void AlignmentMixer::ProduceOutput(const Block& x,
+ rtc::ArrayView<float, kBlockSize> y) {
+ RTC_DCHECK_EQ(x.NumChannels(), num_channels_);
+
+ if (selection_variant_ == MixingVariant::kDownmix) {
+ Downmix(x, y);
+ return;
+ }
+
+ int ch = selection_variant_ == MixingVariant::kFixed ? 0 : SelectChannel(x);
+
+ RTC_DCHECK_GT(x.NumChannels(), ch);
+ std::copy(x.begin(/*band=*/0, ch), x.end(/*band=*/0, ch), y.begin());
+}
+
+void AlignmentMixer::Downmix(const Block& x,
+ rtc::ArrayView<float, kBlockSize> y) const {
+ RTC_DCHECK_EQ(x.NumChannels(), num_channels_);
+ RTC_DCHECK_GE(num_channels_, 2);
+ std::memcpy(&y[0], x.View(/*band=*/0, /*channel=*/0).data(),
+ kBlockSize * sizeof(y[0]));
+ for (size_t ch = 1; ch < num_channels_; ++ch) {
+ const auto x_ch = x.View(/*band=*/0, ch);
+ for (size_t i = 0; i < kBlockSize; ++i) {
+ y[i] += x_ch[i];
+ }
+ }
+
+ for (size_t i = 0; i < kBlockSize; ++i) {
+ y[i] *= one_by_num_channels_;
+ }
+}
+
+int AlignmentMixer::SelectChannel(const Block& x) {
+ RTC_DCHECK_EQ(x.NumChannels(), num_channels_);
+ RTC_DCHECK_GE(num_channels_, 2);
+ RTC_DCHECK_EQ(cumulative_energies_.size(), num_channels_);
+
+ constexpr size_t kBlocksToChooseLeftOrRight =
+ static_cast<size_t>(0.5f * kNumBlocksPerSecond);
+ const bool good_signal_in_left_or_right =
+ prefer_first_two_channels_ &&
+ (strong_block_counters_[0] > kBlocksToChooseLeftOrRight ||
+ strong_block_counters_[1] > kBlocksToChooseLeftOrRight);
+
+ const int num_ch_to_analyze =
+ good_signal_in_left_or_right ? 2 : num_channels_;
+
+ constexpr int kNumBlocksBeforeEnergySmoothing = 60 * kNumBlocksPerSecond;
+ ++block_counter_;
+
+ for (int ch = 0; ch < num_ch_to_analyze; ++ch) {
+ float x2_sum = 0.f;
+ rtc::ArrayView<const float, kBlockSize> x_ch = x.View(/*band=*/0, ch);
+ for (size_t i = 0; i < kBlockSize; ++i) {
+ x2_sum += x_ch[i] * x_ch[i];
+ }
+
+ if (ch < 2 && x2_sum > excitation_energy_threshold_) {
+ ++strong_block_counters_[ch];
+ }
+
+ if (block_counter_ <= kNumBlocksBeforeEnergySmoothing) {
+ cumulative_energies_[ch] += x2_sum;
+ } else {
+ constexpr float kSmoothing = 1.f / (10 * kNumBlocksPerSecond);
+ cumulative_energies_[ch] +=
+ kSmoothing * (x2_sum - cumulative_energies_[ch]);
+ }
+ }
+
+ // Normalize the energies to allow the energy computations to from now be
+ // based on smoothing.
+ if (block_counter_ == kNumBlocksBeforeEnergySmoothing) {
+ constexpr float kOneByNumBlocksBeforeEnergySmoothing =
+ 1.f / kNumBlocksBeforeEnergySmoothing;
+ for (int ch = 0; ch < num_ch_to_analyze; ++ch) {
+ cumulative_energies_[ch] *= kOneByNumBlocksBeforeEnergySmoothing;
+ }
+ }
+
+ int strongest_ch = 0;
+ for (int ch = 0; ch < num_ch_to_analyze; ++ch) {
+ if (cumulative_energies_[ch] > cumulative_energies_[strongest_ch]) {
+ strongest_ch = ch;
+ }
+ }
+
+ if ((good_signal_in_left_or_right && selected_channel_ > 1) ||
+ cumulative_energies_[strongest_ch] >
+ 2.f * cumulative_energies_[selected_channel_]) {
+ selected_channel_ = strongest_ch;
+ }
+
+ return selected_channel_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.h b/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.h
new file mode 100644
index 0000000000..b3ed04755c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ALIGNMENT_MIXER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ALIGNMENT_MIXER_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block.h"
+
+namespace webrtc {
+
+// Performs channel conversion to mono for the purpose of providing a decent
+// mono input for the delay estimation. This is achieved by analyzing all
+// incoming channels and produce one single channel output.
+class AlignmentMixer {
+ public:
+ AlignmentMixer(size_t num_channels,
+ const EchoCanceller3Config::Delay::AlignmentMixing& config);
+
+ AlignmentMixer(size_t num_channels,
+ bool downmix,
+ bool adaptive_selection,
+ float excitation_limit,
+ bool prefer_first_two_channels);
+
+ void ProduceOutput(const Block& x, rtc::ArrayView<float, kBlockSize> y);
+
+ enum class MixingVariant { kDownmix, kAdaptive, kFixed };
+
+ private:
+ const size_t num_channels_;
+ const float one_by_num_channels_;
+ const float excitation_energy_threshold_;
+ const bool prefer_first_two_channels_;
+ const MixingVariant selection_variant_;
+ std::array<size_t, 2> strong_block_counters_;
+ std::vector<float> cumulative_energies_;
+ int selected_channel_ = 0;
+ size_t block_counter_ = 0;
+
+ void Downmix(const Block& x, rtc::ArrayView<float, kBlockSize> y) const;
+ int SelectChannel(const Block& x);
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ALIGNMENT_MIXER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer_unittest.cc
new file mode 100644
index 0000000000..eaf6dcb235
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/alignment_mixer_unittest.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/alignment_mixer.h"
+
+#include <string>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::AllOf;
+using ::testing::Each;
+
+namespace webrtc {
+namespace {
+std::string ProduceDebugText(bool initial_silence,
+ bool huge_activity_threshold,
+ bool prefer_first_two_channels,
+ int num_channels,
+ int strongest_ch) {
+ rtc::StringBuilder ss;
+ ss << ", Initial silence: " << initial_silence;
+ ss << ", Huge activity threshold: " << huge_activity_threshold;
+ ss << ", Prefer first two channels: " << prefer_first_two_channels;
+ ss << ", Number of channels: " << num_channels;
+ ss << ", Strongest channel: " << strongest_ch;
+ return ss.Release();
+}
+
+} // namespace
+
+TEST(AlignmentMixer, GeneralAdaptiveMode) {
+ constexpr int kChannelOffset = 100;
+ constexpr int kMaxChannelsToTest = 8;
+ constexpr float kStrongestSignalScaling =
+ kMaxChannelsToTest * kChannelOffset * 100;
+
+ for (bool initial_silence : {false, true}) {
+ for (bool huge_activity_threshold : {false, true}) {
+ for (bool prefer_first_two_channels : {false, true}) {
+ for (int num_channels = 2; num_channels < 8; ++num_channels) {
+ for (int strongest_ch = 0; strongest_ch < num_channels;
+ ++strongest_ch) {
+ SCOPED_TRACE(ProduceDebugText(
+ initial_silence, huge_activity_threshold,
+ prefer_first_two_channels, num_channels, strongest_ch));
+ const float excitation_limit =
+ huge_activity_threshold ? 1000000000.f : 0.001f;
+ AlignmentMixer am(num_channels, /*downmix*/ false,
+ /*adaptive_selection*/ true, excitation_limit,
+ prefer_first_two_channels);
+
+ Block x(
+ /*num_bands=*/1, num_channels);
+ if (initial_silence) {
+ std::array<float, kBlockSize> y;
+ for (int frame = 0; frame < 10 * kNumBlocksPerSecond; ++frame) {
+ am.ProduceOutput(x, y);
+ }
+ }
+
+ for (int frame = 0; frame < 2 * kNumBlocksPerSecond; ++frame) {
+ const auto channel_value = [&](int frame_index,
+ int channel_index) {
+ return static_cast<float>(frame_index +
+ channel_index * kChannelOffset);
+ };
+
+ for (int ch = 0; ch < num_channels; ++ch) {
+ float scaling =
+ ch == strongest_ch ? kStrongestSignalScaling : 1.f;
+ auto x_ch = x.View(/*band=*/0, ch);
+ std::fill(x_ch.begin(), x_ch.end(),
+ channel_value(frame, ch) * scaling);
+ }
+
+ std::array<float, kBlockSize> y;
+ y.fill(-1.f);
+ am.ProduceOutput(x, y);
+
+ if (frame > 1 * kNumBlocksPerSecond) {
+ if (!prefer_first_two_channels || huge_activity_threshold) {
+ EXPECT_THAT(y,
+ AllOf(Each(x.View(/*band=*/0, strongest_ch)[0])));
+ } else {
+ bool left_or_right_chosen;
+ for (int ch = 0; ch < 2; ++ch) {
+ left_or_right_chosen = true;
+ const auto x_ch = x.View(/*band=*/0, ch);
+ for (size_t k = 0; k < kBlockSize; ++k) {
+ if (y[k] != x_ch[k]) {
+ left_or_right_chosen = false;
+ break;
+ }
+ }
+ if (left_or_right_chosen) {
+ break;
+ }
+ }
+ EXPECT_TRUE(left_or_right_chosen);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(AlignmentMixer, DownmixMode) {
+ for (int num_channels = 1; num_channels < 8; ++num_channels) {
+ AlignmentMixer am(num_channels, /*downmix*/ true,
+ /*adaptive_selection*/ false, /*excitation_limit*/ 1.f,
+ /*prefer_first_two_channels*/ false);
+
+ Block x(/*num_bands=*/1, num_channels);
+ const auto channel_value = [](int frame_index, int channel_index) {
+ return static_cast<float>(frame_index + channel_index);
+ };
+ for (int frame = 0; frame < 10; ++frame) {
+ for (int ch = 0; ch < num_channels; ++ch) {
+ auto x_ch = x.View(/*band=*/0, ch);
+ std::fill(x_ch.begin(), x_ch.end(), channel_value(frame, ch));
+ }
+
+ std::array<float, kBlockSize> y;
+ y.fill(-1.f);
+ am.ProduceOutput(x, y);
+
+ float expected_mixed_value = 0.f;
+ for (int ch = 0; ch < num_channels; ++ch) {
+ expected_mixed_value += channel_value(frame, ch);
+ }
+ expected_mixed_value *= 1.f / num_channels;
+
+ EXPECT_THAT(y, AllOf(Each(expected_mixed_value)));
+ }
+ }
+}
+
+TEST(AlignmentMixer, FixedMode) {
+ for (int num_channels = 1; num_channels < 8; ++num_channels) {
+ AlignmentMixer am(num_channels, /*downmix*/ false,
+ /*adaptive_selection*/ false, /*excitation_limit*/ 1.f,
+ /*prefer_first_two_channels*/ false);
+
+ Block x(/*num_band=*/1, num_channels);
+ const auto channel_value = [](int frame_index, int channel_index) {
+ return static_cast<float>(frame_index + channel_index);
+ };
+ for (int frame = 0; frame < 10; ++frame) {
+ for (int ch = 0; ch < num_channels; ++ch) {
+ auto x_ch = x.View(/*band=*/0, ch);
+ std::fill(x_ch.begin(), x_ch.end(), channel_value(frame, ch));
+ }
+
+ std::array<float, kBlockSize> y;
+ y.fill(-1.f);
+ am.ProduceOutput(x, y);
+ EXPECT_THAT(y, AllOf(Each(x.View(/*band=*/0, /*channel=*/0)[0])));
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(AlignmentMixerDeathTest, ZeroNumChannels) {
+ EXPECT_DEATH(
+ AlignmentMixer(/*num_channels*/ 0, /*downmix*/ false,
+ /*adaptive_selection*/ false, /*excitation_limit*/ 1.f,
+ /*prefer_first_two_channels*/ false);
+ , "");
+}
+
+TEST(AlignmentMixerDeathTest, IncorrectVariant) {
+ EXPECT_DEATH(
+ AlignmentMixer(/*num_channels*/ 1, /*downmix*/ true,
+ /*adaptive_selection*/ true, /*excitation_limit*/ 1.f,
+ /*prefer_first_two_channels*/ false);
+ , "");
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.cc b/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.cc
new file mode 100644
index 0000000000..45f56a5dce
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+bool TimeToReportMetrics(int frames_since_last_report) {
+ constexpr int kNumFramesPerSecond = 100;
+ constexpr int kReportingIntervalFrames = 10 * kNumFramesPerSecond;
+ return frames_since_last_report == kReportingIntervalFrames;
+}
+
+} // namespace
+
+ApiCallJitterMetrics::Jitter::Jitter()
+ : max_(0), min_(std::numeric_limits<int>::max()) {}
+
+void ApiCallJitterMetrics::Jitter::Update(int num_api_calls_in_a_row) {
+ min_ = std::min(min_, num_api_calls_in_a_row);
+ max_ = std::max(max_, num_api_calls_in_a_row);
+}
+
+void ApiCallJitterMetrics::Jitter::Reset() {
+ min_ = std::numeric_limits<int>::max();
+ max_ = 0;
+}
+
+void ApiCallJitterMetrics::Reset() {
+ render_jitter_.Reset();
+ capture_jitter_.Reset();
+ num_api_calls_in_a_row_ = 0;
+ frames_since_last_report_ = 0;
+ last_call_was_render_ = false;
+ proper_call_observed_ = false;
+}
+
+void ApiCallJitterMetrics::ReportRenderCall() {
+ if (!last_call_was_render_) {
+ // If the previous call was a capture and a proper call has been observed
+ // (containing both render and capture data), storing the last number of
+ // capture calls into the metrics.
+ if (proper_call_observed_) {
+ capture_jitter_.Update(num_api_calls_in_a_row_);
+ }
+
+ // Reset the call counter to start counting render calls.
+ num_api_calls_in_a_row_ = 0;
+ }
+ ++num_api_calls_in_a_row_;
+ last_call_was_render_ = true;
+}
+
+void ApiCallJitterMetrics::ReportCaptureCall() {
+ if (last_call_was_render_) {
+ // If the previous call was a render and a proper call has been observed
+ // (containing both render and capture data), storing the last number of
+ // render calls into the metrics.
+ if (proper_call_observed_) {
+ render_jitter_.Update(num_api_calls_in_a_row_);
+ }
+ // Reset the call counter to start counting capture calls.
+ num_api_calls_in_a_row_ = 0;
+
+ // If this statement is reached, at least one render and one capture call
+ // have been observed.
+ proper_call_observed_ = true;
+ }
+ ++num_api_calls_in_a_row_;
+ last_call_was_render_ = false;
+
+ // Only report and update jitter metrics for when a proper call, containing
+ // both render and capture data, has been observed.
+ if (proper_call_observed_ &&
+ TimeToReportMetrics(++frames_since_last_report_)) {
+ // Report jitter, where the base basic unit is frames.
+ constexpr int kMaxJitterToReport = 50;
+
+ // Report max and min jitter for render and capture, in units of 20 ms.
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.MaxRenderJitter",
+ std::min(kMaxJitterToReport, render_jitter().max()), 1,
+ kMaxJitterToReport, kMaxJitterToReport);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.MinRenderJitter",
+ std::min(kMaxJitterToReport, render_jitter().min()), 1,
+ kMaxJitterToReport, kMaxJitterToReport);
+
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.MaxCaptureJitter",
+ std::min(kMaxJitterToReport, capture_jitter().max()), 1,
+ kMaxJitterToReport, kMaxJitterToReport);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.MinCaptureJitter",
+ std::min(kMaxJitterToReport, capture_jitter().min()), 1,
+ kMaxJitterToReport, kMaxJitterToReport);
+
+ frames_since_last_report_ = 0;
+ Reset();
+ }
+}
+
+bool ApiCallJitterMetrics::WillReportMetricsAtNextCapture() const {
+ return TimeToReportMetrics(frames_since_last_report_ + 1);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.h b/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.h
new file mode 100644
index 0000000000..dd1fa82e93
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
+
+namespace webrtc {
+
+// Stores data for reporting metrics on the API call jitter.
+class ApiCallJitterMetrics {
+ public:
+ class Jitter {
+ public:
+ Jitter();
+ void Update(int num_api_calls_in_a_row);
+ void Reset();
+
+ int min() const { return min_; }
+ int max() const { return max_; }
+
+ private:
+ int max_;
+ int min_;
+ };
+
+ ApiCallJitterMetrics() { Reset(); }
+
+ // Update metrics for render API call.
+ void ReportRenderCall();
+
+ // Update and periodically report metrics for capture API call.
+ void ReportCaptureCall();
+
+ // Methods used only for testing.
+ const Jitter& render_jitter() const { return render_jitter_; }
+ const Jitter& capture_jitter() const { return capture_jitter_; }
+ bool WillReportMetricsAtNextCapture() const;
+
+ private:
+ void Reset();
+
+ Jitter render_jitter_;
+ Jitter capture_jitter_;
+
+ int num_api_calls_in_a_row_ = 0;
+ int frames_since_last_report_ = 0;
+ bool last_call_was_render_ = false;
+ bool proper_call_observed_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc
new file mode 100644
index 0000000000..b902487152
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify constant jitter.
+TEST(ApiCallJitterMetrics, ConstantJitter) {
+ for (int jitter = 1; jitter < 20; ++jitter) {
+ ApiCallJitterMetrics metrics;
+ for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+ for (int j = 0; j < jitter; ++j) {
+ metrics.ReportRenderCall();
+ }
+
+ for (int j = 0; j < jitter; ++j) {
+ metrics.ReportCaptureCall();
+
+ if (metrics.WillReportMetricsAtNextCapture()) {
+ EXPECT_EQ(jitter, metrics.render_jitter().min());
+ EXPECT_EQ(jitter, metrics.render_jitter().max());
+ EXPECT_EQ(jitter, metrics.capture_jitter().min());
+ EXPECT_EQ(jitter, metrics.capture_jitter().max());
+ }
+ }
+ }
+ }
+}
+
+// Verify peaky jitter for the render.
+TEST(ApiCallJitterMetrics, JitterPeakRender) {
+ constexpr int kMinJitter = 2;
+ constexpr int kJitterPeak = 10;
+ constexpr int kPeakInterval = 100;
+
+ ApiCallJitterMetrics metrics;
+ int render_surplus = 0;
+
+ for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+ const int num_render_calls =
+ k % kPeakInterval == 0 ? kJitterPeak : kMinJitter;
+ for (int j = 0; j < num_render_calls; ++j) {
+ metrics.ReportRenderCall();
+ ++render_surplus;
+ }
+
+ ASSERT_LE(kMinJitter, render_surplus);
+ const int num_capture_calls =
+ render_surplus == kMinJitter ? kMinJitter : kMinJitter + 1;
+ for (int j = 0; j < num_capture_calls; ++j) {
+ metrics.ReportCaptureCall();
+
+ if (metrics.WillReportMetricsAtNextCapture()) {
+ EXPECT_EQ(kMinJitter, metrics.render_jitter().min());
+ EXPECT_EQ(kJitterPeak, metrics.render_jitter().max());
+ EXPECT_EQ(kMinJitter, metrics.capture_jitter().min());
+ EXPECT_EQ(kMinJitter + 1, metrics.capture_jitter().max());
+ }
+ --render_surplus;
+ }
+ }
+}
+
+// Verify peaky jitter for the capture.
+TEST(ApiCallJitterMetrics, JitterPeakCapture) {
+ constexpr int kMinJitter = 2;
+ constexpr int kJitterPeak = 10;
+ constexpr int kPeakInterval = 100;
+
+ ApiCallJitterMetrics metrics;
+ int capture_surplus = kMinJitter;
+
+ for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+ ASSERT_LE(kMinJitter, capture_surplus);
+ const int num_render_calls =
+ capture_surplus == kMinJitter ? kMinJitter : kMinJitter + 1;
+ for (int j = 0; j < num_render_calls; ++j) {
+ metrics.ReportRenderCall();
+ --capture_surplus;
+ }
+
+ const int num_capture_calls =
+ k % kPeakInterval == 0 ? kJitterPeak : kMinJitter;
+ for (int j = 0; j < num_capture_calls; ++j) {
+ metrics.ReportCaptureCall();
+
+ if (metrics.WillReportMetricsAtNextCapture()) {
+ EXPECT_EQ(kMinJitter, metrics.render_jitter().min());
+ EXPECT_EQ(kMinJitter + 1, metrics.render_jitter().max());
+ EXPECT_EQ(kMinJitter, metrics.capture_jitter().min());
+ EXPECT_EQ(kJitterPeak, metrics.capture_jitter().max());
+ }
+ ++capture_surplus;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block.h b/third_party/libwebrtc/modules/audio_processing/aec3/block.h
new file mode 100644
index 0000000000..c1fc70722d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_H_
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// Contains one or more channels of 4 milliseconds of audio data.
+// The audio is split in one or more frequency bands, each with a sampling
+// rate of 16 kHz.
+class Block {
+ public:
+ Block(int num_bands, int num_channels, float default_value = 0.0f)
+ : num_bands_(num_bands),
+ num_channels_(num_channels),
+ data_(num_bands * num_channels * kBlockSize, default_value) {}
+
+ // Returns the number of bands.
+ int NumBands() const { return num_bands_; }
+
+ // Returns the number of channels.
+ int NumChannels() const { return num_channels_; }
+
+ // Modifies the number of channels and sets all samples to zero.
+ void SetNumChannels(int num_channels) {
+ num_channels_ = num_channels;
+ data_.resize(num_bands_ * num_channels_ * kBlockSize);
+ std::fill(data_.begin(), data_.end(), 0.0f);
+ }
+
+ // Iterators for accessing the data.
+ auto begin(int band, int channel) {
+ return data_.begin() + GetIndex(band, channel);
+ }
+
+ auto begin(int band, int channel) const {
+ return data_.begin() + GetIndex(band, channel);
+ }
+
+ auto end(int band, int channel) { return begin(band, channel) + kBlockSize; }
+
+ auto end(int band, int channel) const {
+ return begin(band, channel) + kBlockSize;
+ }
+
+ // Access data via ArrayView.
+ rtc::ArrayView<float, kBlockSize> View(int band, int channel) {
+ return rtc::ArrayView<float, kBlockSize>(&data_[GetIndex(band, channel)],
+ kBlockSize);
+ }
+
+ rtc::ArrayView<const float, kBlockSize> View(int band, int channel) const {
+ return rtc::ArrayView<const float, kBlockSize>(
+ &data_[GetIndex(band, channel)], kBlockSize);
+ }
+
+ // Lets two Blocks swap audio data.
+ void Swap(Block& b) {
+ std::swap(num_bands_, b.num_bands_);
+ std::swap(num_channels_, b.num_channels_);
+ data_.swap(b.data_);
+ }
+
+ private:
+ // Returns the index of the first sample of the requested |band| and
+ // |channel|.
+ int GetIndex(int band, int channel) const {
+ return (band * num_channels_ + channel) * kBlockSize;
+ }
+
+ int num_bands_;
+ int num_channels_;
+ std::vector<float> data_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.cc
new file mode 100644
index 0000000000..289c3f0d10
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.cc
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_buffer.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+BlockBuffer::BlockBuffer(size_t size, size_t num_bands, size_t num_channels)
+ : size(static_cast<int>(size)),
+ buffer(size, Block(num_bands, num_channels)) {}
+
+BlockBuffer::~BlockBuffer() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.h
new file mode 100644
index 0000000000..3489d51646
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_buffer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_BUFFER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/block.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for bundling a circular buffer of two dimensional vector objects
+// together with the read and write indices.
+struct BlockBuffer {
+ BlockBuffer(size_t size, size_t num_bands, size_t num_channels);
+ ~BlockBuffer();
+
+ int IncIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index < size - 1 ? index + 1 : 0;
+ }
+
+ int DecIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index > 0 ? index - 1 : size - 1;
+ }
+
+ int OffsetIndex(int index, int offset) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ RTC_DCHECK_GE(size, offset);
+ return (size + index + offset) % size;
+ }
+
+ void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+ void IncWriteIndex() { write = IncIndex(write); }
+ void DecWriteIndex() { write = DecIndex(write); }
+ void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+ void IncReadIndex() { read = IncIndex(read); }
+ void DecReadIndex() { read = DecIndex(read); }
+
+ const int size;
+ std::vector<Block> buffer;
+ int write = 0;
+ int read = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.cc
new file mode 100644
index 0000000000..059bbafcdb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/block_delay_buffer.h"
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+BlockDelayBuffer::BlockDelayBuffer(size_t num_channels,
+ size_t num_bands,
+ size_t frame_length,
+ size_t delay_samples)
+ : frame_length_(frame_length),
+ delay_(delay_samples),
+ buf_(num_channels,
+ std::vector<std::vector<float>>(num_bands,
+ std::vector<float>(delay_, 0.f))) {}
+
+BlockDelayBuffer::~BlockDelayBuffer() = default;
+
+void BlockDelayBuffer::DelaySignal(AudioBuffer* frame) {
+ RTC_DCHECK_EQ(buf_.size(), frame->num_channels());
+ if (delay_ == 0) {
+ return;
+ }
+
+ const size_t num_bands = buf_[0].size();
+ const size_t num_channels = buf_.size();
+
+ const size_t i_start = last_insert_;
+ size_t i = 0;
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ RTC_DCHECK_EQ(buf_[ch].size(), frame->num_bands());
+ RTC_DCHECK_EQ(buf_[ch].size(), num_bands);
+ rtc::ArrayView<float* const> frame_ch(frame->split_bands(ch), num_bands);
+ const size_t delay = delay_;
+
+ for (size_t band = 0; band < num_bands; ++band) {
+ RTC_DCHECK_EQ(delay_, buf_[ch][band].size());
+ i = i_start;
+
+ // Offloading these pointers and class variables to local variables allows
+ // the compiler to optimize the below loop when compiling with
+ // '-fno-strict-aliasing'.
+ float* buf_ch_band = buf_[ch][band].data();
+ float* frame_ch_band = frame_ch[band];
+
+ for (size_t k = 0, frame_length = frame_length_; k < frame_length; ++k) {
+ const float tmp = buf_ch_band[i];
+ buf_ch_band[i] = frame_ch_band[k];
+ frame_ch_band[k] = tmp;
+
+ i = i < delay - 1 ? i + 1 : 0;
+ }
+ }
+ }
+
+ last_insert_ = i;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.h
new file mode 100644
index 0000000000..711a790bfe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_DELAY_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_DELAY_BUFFER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "modules/audio_processing/audio_buffer.h"
+
+namespace webrtc {
+
+// Class for applying a fixed delay to the samples in a signal partitioned using
+// the audiobuffer band-splitting scheme.
+class BlockDelayBuffer {
+ public:
+ BlockDelayBuffer(size_t num_channels,
+ size_t num_bands,
+ size_t frame_length,
+ size_t delay_samples);
+ ~BlockDelayBuffer();
+
+ // Delays the samples by the specified delay.
+ void DelaySignal(AudioBuffer* frame);
+
+ private:
+ const size_t frame_length_;
+ const size_t delay_;
+ std::vector<std::vector<std::vector<float>>> buf_;
+ size_t last_insert_ = 0;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_DELAY_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer_unittest.cc
new file mode 100644
index 0000000000..011ab49651
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_delay_buffer_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_delay_buffer.h"
+
+#include <string>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+float SampleValue(size_t sample_index) {
+ return sample_index % 32768;
+}
+
+// Populates the frame with linearly increasing sample values for each band.
+void PopulateInputFrame(size_t frame_length,
+ size_t num_bands,
+ size_t first_sample_index,
+ float* const* frame) {
+ for (size_t k = 0; k < num_bands; ++k) {
+ for (size_t i = 0; i < frame_length; ++i) {
+ frame[k][i] = SampleValue(first_sample_index + i);
+ }
+ }
+}
+
+std::string ProduceDebugText(int sample_rate_hz, size_t delay) {
+ char log_stream_buffer[8 * 1024];
+ rtc::SimpleStringBuilder ss(log_stream_buffer);
+ ss << "Sample rate: " << sample_rate_hz;
+ ss << ", Delay: " << delay;
+ return ss.str();
+}
+
+} // namespace
+
+class BlockDelayBufferTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, int, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ ParameterCombinations,
+ BlockDelayBufferTest,
+ ::testing::Combine(::testing::Values(0, 1, 27, 160, 4321, 7021),
+ ::testing::Values(16000, 32000, 48000),
+ ::testing::Values(1, 2, 4)));
+
+// Verifies that the correct signal delay is achived.
+TEST_P(BlockDelayBufferTest, CorrectDelayApplied) {
+ const size_t delay = std::get<0>(GetParam());
+ const int rate = std::get<1>(GetParam());
+ const size_t num_channels = std::get<2>(GetParam());
+
+ SCOPED_TRACE(ProduceDebugText(rate, delay));
+ size_t num_bands = NumBandsForRate(rate);
+ size_t subband_frame_length = 160;
+
+ BlockDelayBuffer delay_buffer(num_channels, num_bands, subband_frame_length,
+ delay);
+
+ static constexpr size_t kNumFramesToProcess = 20;
+ for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+ ++frame_index) {
+ AudioBuffer audio_buffer(rate, num_channels, rate, num_channels, rate,
+ num_channels);
+ if (rate > 16000) {
+ audio_buffer.SplitIntoFrequencyBands();
+ }
+ size_t first_sample_index = frame_index * subband_frame_length;
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ PopulateInputFrame(subband_frame_length, num_bands, first_sample_index,
+ &audio_buffer.split_bands(ch)[0]);
+ }
+ delay_buffer.DelaySignal(&audio_buffer);
+
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ for (size_t band = 0; band < num_bands; ++band) {
+ size_t sample_index = first_sample_index;
+ for (size_t i = 0; i < subband_frame_length; ++i, ++sample_index) {
+ if (sample_index < delay) {
+ EXPECT_EQ(0.f, audio_buffer.split_bands(ch)[band][i]);
+ } else {
+ EXPECT_EQ(SampleValue(sample_index - delay),
+ audio_buffer.split_bands(ch)[band][i]);
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.cc
new file mode 100644
index 0000000000..4243ddeba0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_framer.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+BlockFramer::BlockFramer(size_t num_bands, size_t num_channels)
+ : num_bands_(num_bands),
+ num_channels_(num_channels),
+ buffer_(num_bands_,
+ std::vector<std::vector<float>>(
+ num_channels,
+ std::vector<float>(kBlockSize, 0.f))) {
+ RTC_DCHECK_LT(0, num_bands);
+ RTC_DCHECK_LT(0, num_channels);
+}
+
+BlockFramer::~BlockFramer() = default;
+
+// All the constants are chosen so that the buffer is either empty or has enough
+// samples for InsertBlockAndExtractSubFrame to produce a frame. In order to
+// achieve this, the InsertBlockAndExtractSubFrame and InsertBlock methods need
+// to be called in the correct order.
+void BlockFramer::InsertBlock(const Block& block) {
+ RTC_DCHECK_EQ(num_bands_, block.NumBands());
+ RTC_DCHECK_EQ(num_channels_, block.NumChannels());
+ for (size_t band = 0; band < num_bands_; ++band) {
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ RTC_DCHECK_EQ(0, buffer_[band][channel].size());
+
+ buffer_[band][channel].insert(buffer_[band][channel].begin(),
+ block.begin(band, channel),
+ block.end(band, channel));
+ }
+ }
+}
+
+void BlockFramer::InsertBlockAndExtractSubFrame(
+ const Block& block,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame) {
+ RTC_DCHECK(sub_frame);
+ RTC_DCHECK_EQ(num_bands_, block.NumBands());
+ RTC_DCHECK_EQ(num_channels_, block.NumChannels());
+ RTC_DCHECK_EQ(num_bands_, sub_frame->size());
+ for (size_t band = 0; band < num_bands_; ++band) {
+ RTC_DCHECK_EQ(num_channels_, (*sub_frame)[0].size());
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ RTC_DCHECK_LE(kSubFrameLength,
+ buffer_[band][channel].size() + kBlockSize);
+ RTC_DCHECK_GE(kBlockSize, buffer_[band][channel].size());
+ RTC_DCHECK_EQ(kSubFrameLength, (*sub_frame)[band][channel].size());
+
+ const int samples_to_frame =
+ kSubFrameLength - buffer_[band][channel].size();
+ std::copy(buffer_[band][channel].begin(), buffer_[band][channel].end(),
+ (*sub_frame)[band][channel].begin());
+ std::copy(
+ block.begin(band, channel),
+ block.begin(band, channel) + samples_to_frame,
+ (*sub_frame)[band][channel].begin() + buffer_[band][channel].size());
+ buffer_[band][channel].clear();
+ buffer_[band][channel].insert(
+ buffer_[band][channel].begin(),
+ block.begin(band, channel) + samples_to_frame,
+ block.end(band, channel));
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.h b/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.h
new file mode 100644
index 0000000000..e2cdd5a17c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_framer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block.h"
+
+namespace webrtc {
+
+// Class for producing frames consisting of 2 subframes of 80 samples each
+// from 64 sample blocks. The class is designed to work together with the
+// FrameBlocker class which performs the reverse conversion. Used together with
+// that, this class produces output frames are the same rate as frames are
+// received by the FrameBlocker class. Note that the internal buffers will
+// overrun if any other rate of packets insertion is used.
+class BlockFramer {
+ public:
+ BlockFramer(size_t num_bands, size_t num_channels);
+ ~BlockFramer();
+ BlockFramer(const BlockFramer&) = delete;
+ BlockFramer& operator=(const BlockFramer&) = delete;
+
+ // Adds a 64 sample block into the data that will form the next output frame.
+ void InsertBlock(const Block& block);
+ // Adds a 64 sample block and extracts an 80 sample subframe.
+ void InsertBlockAndExtractSubFrame(
+ const Block& block,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame);
+
+ private:
+ const size_t num_bands_;
+ const size_t num_channels_;
+ std::vector<std::vector<std::vector<float>>> buffer_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_framer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_framer_unittest.cc
new file mode 100644
index 0000000000..9439623f72
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_framer_unittest.cc
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_framer.h"
+
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+void SetupSubFrameView(
+ std::vector<std::vector<std::vector<float>>>* sub_frame,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame_view) {
+ for (size_t band = 0; band < sub_frame_view->size(); ++band) {
+ for (size_t channel = 0; channel < (*sub_frame_view)[band].size();
+ ++channel) {
+ (*sub_frame_view)[band][channel] =
+ rtc::ArrayView<float>((*sub_frame)[band][channel].data(),
+ (*sub_frame)[band][channel].size());
+ }
+ }
+}
+
+float ComputeSampleValue(size_t chunk_counter,
+ size_t chunk_size,
+ size_t band,
+ size_t channel,
+ size_t sample_index,
+ int offset) {
+ float value = static_cast<int>(100 + chunk_counter * chunk_size +
+ sample_index + channel) +
+ offset;
+ return 5000 * band + value;
+}
+
+bool VerifySubFrame(
+ size_t sub_frame_counter,
+ int offset,
+ const std::vector<std::vector<rtc::ArrayView<float>>>& sub_frame_view) {
+ for (size_t band = 0; band < sub_frame_view.size(); ++band) {
+ for (size_t channel = 0; channel < sub_frame_view[band].size(); ++channel) {
+ for (size_t sample = 0; sample < sub_frame_view[band][channel].size();
+ ++sample) {
+ const float reference_value = ComputeSampleValue(
+ sub_frame_counter, kSubFrameLength, band, channel, sample, offset);
+ if (reference_value != sub_frame_view[band][channel][sample]) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+void FillBlock(size_t block_counter, Block* block) {
+ for (int band = 0; band < block->NumBands(); ++band) {
+ for (int channel = 0; channel < block->NumChannels(); ++channel) {
+ auto b = block->View(band, channel);
+ for (size_t sample = 0; sample < kBlockSize; ++sample) {
+ b[sample] = ComputeSampleValue(block_counter, kBlockSize, band, channel,
+ sample, 0);
+ }
+ }
+ }
+}
+
+// Verifies that the BlockFramer is able to produce the expected frame content.
+void RunFramerTest(int sample_rate_hz, size_t num_channels) {
+ constexpr size_t kNumSubFramesToProcess = 10;
+ const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(num_bands, num_channels);
+ std::vector<std::vector<std::vector<float>>> output_sub_frame(
+ num_bands, std::vector<std::vector<float>>(
+ num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> output_sub_frame_view(
+ num_bands, std::vector<rtc::ArrayView<float>>(num_channels));
+ SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+ BlockFramer framer(num_bands, num_channels);
+
+ size_t block_index = 0;
+ for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess;
+ ++sub_frame_index) {
+ FillBlock(block_index++, &block);
+ framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view);
+ if (sub_frame_index > 1) {
+ EXPECT_TRUE(VerifySubFrame(sub_frame_index, -64, output_sub_frame_view));
+ }
+
+ if ((sub_frame_index + 1) % 4 == 0) {
+ FillBlock(block_index++, &block);
+ framer.InsertBlock(block);
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the BlockFramer crashes if the InsertBlockAndExtractSubFrame
+// method is called for inputs with the wrong number of bands or band lengths.
+void RunWronglySizedInsertAndExtractParametersTest(
+ int sample_rate_hz,
+ size_t correct_num_channels,
+ size_t num_block_bands,
+ size_t num_block_channels,
+ size_t num_sub_frame_bands,
+ size_t num_sub_frame_channels,
+ size_t sub_frame_length) {
+ const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(num_block_bands, num_block_channels);
+ std::vector<std::vector<std::vector<float>>> output_sub_frame(
+ num_sub_frame_bands,
+ std::vector<std::vector<float>>(
+ num_sub_frame_channels, std::vector<float>(sub_frame_length, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> output_sub_frame_view(
+ output_sub_frame.size(),
+ std::vector<rtc::ArrayView<float>>(num_sub_frame_channels));
+ SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+ BlockFramer framer(correct_num_bands, correct_num_channels);
+ EXPECT_DEATH(
+ framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view), "");
+}
+
+// Verifies that the BlockFramer crashes if the InsertBlock method is called for
+// inputs with the wrong number of bands or band lengths.
+void RunWronglySizedInsertParameterTest(int sample_rate_hz,
+ size_t correct_num_channels,
+ size_t num_block_bands,
+ size_t num_block_channels) {
+ const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block correct_block(correct_num_bands, correct_num_channels);
+ Block wrong_block(num_block_bands, num_block_channels);
+ std::vector<std::vector<std::vector<float>>> output_sub_frame(
+ correct_num_bands,
+ std::vector<std::vector<float>>(
+ correct_num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> output_sub_frame_view(
+ output_sub_frame.size(),
+ std::vector<rtc::ArrayView<float>>(correct_num_channels));
+ SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+ BlockFramer framer(correct_num_bands, correct_num_channels);
+ framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+ framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+ framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+ framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+
+ EXPECT_DEATH(framer.InsertBlock(wrong_block), "");
+}
+
+// Verifies that the BlockFramer crashes if the InsertBlock method is called
+// after a wrong number of previous InsertBlockAndExtractSubFrame method calls
+// have been made.
+
+void RunWronglyInsertOrderTest(int sample_rate_hz,
+ size_t num_channels,
+ size_t num_preceeding_api_calls) {
+ const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(correct_num_bands, num_channels);
+ std::vector<std::vector<std::vector<float>>> output_sub_frame(
+ correct_num_bands,
+ std::vector<std::vector<float>>(
+ num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> output_sub_frame_view(
+ output_sub_frame.size(),
+ std::vector<rtc::ArrayView<float>>(num_channels));
+ SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+ BlockFramer framer(correct_num_bands, num_channels);
+ for (size_t k = 0; k < num_preceeding_api_calls; ++k) {
+ framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view);
+ }
+
+ EXPECT_DEATH(framer.InsertBlock(block), "");
+}
+#endif
+
+std::string ProduceDebugText(int sample_rate_hz, size_t num_channels) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ ss << ", number of channels: " << num_channels;
+ return ss.Release();
+}
+
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(BlockFramerDeathTest,
+ WrongNumberOfBandsInBlockForInsertBlockAndExtractSubFrame) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto correct_num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, wrong_num_bands, correct_num_channels,
+ correct_num_bands, correct_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(BlockFramerDeathTest,
+ WrongNumberOfChannelsInBlockForInsertBlockAndExtractSubFrame) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto correct_num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_channels = correct_num_channels + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, wrong_num_channels,
+ correct_num_bands, correct_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(BlockFramerDeathTest,
+ WrongNumberOfBandsInSubFrameForInsertBlockAndExtractSubFrame) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto correct_num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, correct_num_channels,
+ wrong_num_bands, correct_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(BlockFramerDeathTest,
+ WrongNumberOfChannelsInSubFrameForInsertBlockAndExtractSubFrame) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto correct_num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_channels = correct_num_channels + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, correct_num_channels,
+ correct_num_bands, wrong_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(BlockFramerDeathTest,
+ WrongNumberOfSamplesInSubFrameForInsertBlockAndExtractSubFrame) {
+ const size_t correct_num_channels = 1;
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, correct_num_channels,
+ correct_num_bands, correct_num_channels, kSubFrameLength - 1);
+ }
+}
+
+TEST(BlockFramerDeathTest, WrongNumberOfBandsInBlockForInsertBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto correct_num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+ RunWronglySizedInsertParameterTest(rate, correct_num_channels,
+ wrong_num_bands, correct_num_channels);
+ }
+ }
+}
+
+TEST(BlockFramerDeathTest, WrongNumberOfChannelsInBlockForInsertBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto correct_num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_channels = correct_num_channels + 1;
+ RunWronglySizedInsertParameterTest(rate, correct_num_channels,
+ correct_num_bands, wrong_num_channels);
+ }
+ }
+}
+
+TEST(BlockFramerDeathTest, WrongNumberOfPreceedingApiCallsForInsertBlock) {
+ for (size_t num_channels : {1, 2, 8}) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t num_calls = 0; num_calls < 4; ++num_calls) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << rate;
+ ss << ", Num channels: " << num_channels;
+ ss << ", Num preceeding InsertBlockAndExtractSubFrame calls: "
+ << num_calls;
+
+ SCOPED_TRACE(ss.str());
+ RunWronglyInsertOrderTest(rate, num_channels, num_calls);
+ }
+ }
+ }
+}
+
+// Verifies that the verification for 0 number of channels works.
+TEST(BlockFramerDeathTest, ZeroNumberOfChannelsParameter) {
+ EXPECT_DEATH(BlockFramer(16000, 0), "");
+}
+
+// Verifies that the verification for 0 number of bands works.
+TEST(BlockFramerDeathTest, ZeroNumberOfBandsParameter) {
+ EXPECT_DEATH(BlockFramer(0, 1), "");
+}
+
+// Verifies that the verification for null sub_frame pointer works.
+TEST(BlockFramerDeathTest, NullSubFrameParameter) {
+ EXPECT_DEATH(
+ BlockFramer(1, 1).InsertBlockAndExtractSubFrame(Block(1, 1), nullptr),
+ "");
+}
+
+#endif
+
+TEST(BlockFramer, FrameBitexactness) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, num_channels));
+ RunFramerTest(rate, num_channels);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.cc
new file mode 100644
index 0000000000..11b8aa62ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.cc
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/block_processor.h"
+
+#include <stddef.h>
+
+#include <atomic>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor_metrics.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/echo_remover.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+enum class BlockProcessorApiCall { kCapture, kRender };
+
+class BlockProcessorImpl final : public BlockProcessor {
+ public:
+ BlockProcessorImpl(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ std::unique_ptr<RenderDelayBuffer> render_buffer,
+ std::unique_ptr<RenderDelayController> delay_controller,
+ std::unique_ptr<EchoRemover> echo_remover);
+
+ BlockProcessorImpl() = delete;
+
+ ~BlockProcessorImpl() override;
+
+ void ProcessCapture(bool echo_path_gain_change,
+ bool capture_signal_saturation,
+ Block* linear_output,
+ Block* capture_block) override;
+
+ void BufferRender(const Block& block) override;
+
+ void UpdateEchoLeakageStatus(bool leakage_detected) override;
+
+ void GetMetrics(EchoControl::Metrics* metrics) const override;
+
+ void SetAudioBufferDelay(int delay_ms) override;
+ void SetCaptureOutputUsage(bool capture_output_used) override;
+
+ private:
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const EchoCanceller3Config config_;
+ bool capture_properly_started_ = false;
+ bool render_properly_started_ = false;
+ const size_t sample_rate_hz_;
+ std::unique_ptr<RenderDelayBuffer> render_buffer_;
+ std::unique_ptr<RenderDelayController> delay_controller_;
+ std::unique_ptr<EchoRemover> echo_remover_;
+ BlockProcessorMetrics metrics_;
+ RenderDelayBuffer::BufferingEvent render_event_;
+ size_t capture_call_counter_ = 0;
+ absl::optional<DelayEstimate> estimated_delay_;
+};
+
+std::atomic<int> BlockProcessorImpl::instance_count_(0);
+
+BlockProcessorImpl::BlockProcessorImpl(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ std::unique_ptr<RenderDelayBuffer> render_buffer,
+ std::unique_ptr<RenderDelayController> delay_controller,
+ std::unique_ptr<EchoRemover> echo_remover)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ config_(config),
+ sample_rate_hz_(sample_rate_hz),
+ render_buffer_(std::move(render_buffer)),
+ delay_controller_(std::move(delay_controller)),
+ echo_remover_(std::move(echo_remover)),
+ render_event_(RenderDelayBuffer::BufferingEvent::kNone) {
+ RTC_DCHECK(ValidFullBandRate(sample_rate_hz_));
+}
+
+BlockProcessorImpl::~BlockProcessorImpl() = default;
+
+void BlockProcessorImpl::ProcessCapture(bool echo_path_gain_change,
+ bool capture_signal_saturation,
+ Block* linear_output,
+ Block* capture_block) {
+ RTC_DCHECK(capture_block);
+ RTC_DCHECK_EQ(NumBandsForRate(sample_rate_hz_), capture_block->NumBands());
+
+ capture_call_counter_++;
+
+ data_dumper_->DumpRaw("aec3_processblock_call_order",
+ static_cast<int>(BlockProcessorApiCall::kCapture));
+ data_dumper_->DumpWav("aec3_processblock_capture_input",
+ capture_block->View(/*band=*/0, /*channel=*/0), 16000,
+ 1);
+
+ if (render_properly_started_) {
+ if (!capture_properly_started_) {
+ capture_properly_started_ = true;
+ render_buffer_->Reset();
+ if (delay_controller_)
+ delay_controller_->Reset(true);
+ }
+ } else {
+ // If no render data has yet arrived, do not process the capture signal.
+ render_buffer_->HandleSkippedCaptureProcessing();
+ return;
+ }
+
+ EchoPathVariability echo_path_variability(
+ echo_path_gain_change, EchoPathVariability::DelayAdjustment::kNone,
+ false);
+
+ if (render_event_ == RenderDelayBuffer::BufferingEvent::kRenderOverrun &&
+ render_properly_started_) {
+ echo_path_variability.delay_change =
+ EchoPathVariability::DelayAdjustment::kBufferFlush;
+ if (delay_controller_)
+ delay_controller_->Reset(true);
+ RTC_LOG(LS_WARNING) << "Reset due to render buffer overrun at block "
+ << capture_call_counter_;
+ }
+ render_event_ = RenderDelayBuffer::BufferingEvent::kNone;
+
+ // Update the render buffers with any newly arrived render blocks and prepare
+ // the render buffers for reading the render data corresponding to the current
+ // capture block.
+ RenderDelayBuffer::BufferingEvent buffer_event =
+ render_buffer_->PrepareCaptureProcessing();
+ // Reset the delay controller at render buffer underrun.
+ if (buffer_event == RenderDelayBuffer::BufferingEvent::kRenderUnderrun) {
+ if (delay_controller_)
+ delay_controller_->Reset(false);
+ }
+
+ data_dumper_->DumpWav("aec3_processblock_capture_input2",
+ capture_block->View(/*band=*/0, /*channel=*/0), 16000,
+ 1);
+
+ bool has_delay_estimator = !config_.delay.use_external_delay_estimator;
+ if (has_delay_estimator) {
+ RTC_DCHECK(delay_controller_);
+ // Compute and apply the render delay required to achieve proper signal
+ // alignment.
+ estimated_delay_ = delay_controller_->GetDelay(
+ render_buffer_->GetDownsampledRenderBuffer(), render_buffer_->Delay(),
+ *capture_block);
+
+ if (estimated_delay_) {
+ bool delay_change =
+ render_buffer_->AlignFromDelay(estimated_delay_->delay);
+ if (delay_change) {
+ rtc::LoggingSeverity log_level =
+ config_.delay.log_warning_on_delay_changes ? rtc::LS_WARNING
+ : rtc::LS_INFO;
+ RTC_LOG_V(log_level) << "Delay changed to " << estimated_delay_->delay
+ << " at block " << capture_call_counter_;
+ echo_path_variability.delay_change =
+ EchoPathVariability::DelayAdjustment::kNewDetectedDelay;
+ }
+ }
+
+ echo_path_variability.clock_drift = delay_controller_->HasClockdrift();
+
+ } else {
+ render_buffer_->AlignFromExternalDelay();
+ }
+
+ // Remove the echo from the capture signal.
+ if (has_delay_estimator || render_buffer_->HasReceivedBufferDelay()) {
+ echo_remover_->ProcessCapture(
+ echo_path_variability, capture_signal_saturation, estimated_delay_,
+ render_buffer_->GetRenderBuffer(), linear_output, capture_block);
+ }
+
+ // Update the metrics.
+ metrics_.UpdateCapture(false);
+}
+
+void BlockProcessorImpl::BufferRender(const Block& block) {
+ RTC_DCHECK_EQ(NumBandsForRate(sample_rate_hz_), block.NumBands());
+ data_dumper_->DumpRaw("aec3_processblock_call_order",
+ static_cast<int>(BlockProcessorApiCall::kRender));
+ data_dumper_->DumpWav("aec3_processblock_render_input",
+ block.View(/*band=*/0, /*channel=*/0), 16000, 1);
+ data_dumper_->DumpWav("aec3_processblock_render_input2",
+ block.View(/*band=*/0, /*channel=*/0), 16000, 1);
+
+ render_event_ = render_buffer_->Insert(block);
+
+ metrics_.UpdateRender(render_event_ !=
+ RenderDelayBuffer::BufferingEvent::kNone);
+
+ render_properly_started_ = true;
+ if (delay_controller_)
+ delay_controller_->LogRenderCall();
+}
+
+void BlockProcessorImpl::UpdateEchoLeakageStatus(bool leakage_detected) {
+ echo_remover_->UpdateEchoLeakageStatus(leakage_detected);
+}
+
+void BlockProcessorImpl::GetMetrics(EchoControl::Metrics* metrics) const {
+ echo_remover_->GetMetrics(metrics);
+ constexpr int block_size_ms = 4;
+ absl::optional<size_t> delay = render_buffer_->Delay();
+ metrics->delay_ms = delay ? static_cast<int>(*delay) * block_size_ms : 0;
+}
+
+void BlockProcessorImpl::SetAudioBufferDelay(int delay_ms) {
+ render_buffer_->SetAudioBufferDelay(delay_ms);
+}
+
+void BlockProcessorImpl::SetCaptureOutputUsage(bool capture_output_used) {
+ echo_remover_->SetCaptureOutputUsage(capture_output_used);
+}
+
+} // namespace
+
+BlockProcessor* BlockProcessor::Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels) {
+ std::unique_ptr<RenderDelayBuffer> render_buffer(
+ RenderDelayBuffer::Create(config, sample_rate_hz, num_render_channels));
+ std::unique_ptr<RenderDelayController> delay_controller;
+ if (!config.delay.use_external_delay_estimator) {
+ delay_controller.reset(RenderDelayController::Create(config, sample_rate_hz,
+ num_capture_channels));
+ }
+ std::unique_ptr<EchoRemover> echo_remover(EchoRemover::Create(
+ config, sample_rate_hz, num_render_channels, num_capture_channels));
+ return Create(config, sample_rate_hz, num_render_channels,
+ num_capture_channels, std::move(render_buffer),
+ std::move(delay_controller), std::move(echo_remover));
+}
+
+BlockProcessor* BlockProcessor::Create(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ std::unique_ptr<RenderDelayBuffer> render_buffer) {
+ std::unique_ptr<RenderDelayController> delay_controller;
+ if (!config.delay.use_external_delay_estimator) {
+ delay_controller.reset(RenderDelayController::Create(config, sample_rate_hz,
+ num_capture_channels));
+ }
+ std::unique_ptr<EchoRemover> echo_remover(EchoRemover::Create(
+ config, sample_rate_hz, num_render_channels, num_capture_channels));
+ return Create(config, sample_rate_hz, num_render_channels,
+ num_capture_channels, std::move(render_buffer),
+ std::move(delay_controller), std::move(echo_remover));
+}
+
+BlockProcessor* BlockProcessor::Create(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ std::unique_ptr<RenderDelayBuffer> render_buffer,
+ std::unique_ptr<RenderDelayController> delay_controller,
+ std::unique_ptr<EchoRemover> echo_remover) {
+ return new BlockProcessorImpl(config, sample_rate_hz, num_render_channels,
+ num_capture_channels, std::move(render_buffer),
+ std::move(delay_controller),
+ std::move(echo_remover));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.h b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.h
new file mode 100644
index 0000000000..01a83ae5f7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/echo_remover.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+
+namespace webrtc {
+
+// Class for performing echo cancellation on 64 sample blocks of audio data.
+class BlockProcessor {
+ public:
+ static BlockProcessor* Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels);
+ // Only used for testing purposes.
+ static BlockProcessor* Create(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ std::unique_ptr<RenderDelayBuffer> render_buffer);
+ static BlockProcessor* Create(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ std::unique_ptr<RenderDelayBuffer> render_buffer,
+ std::unique_ptr<RenderDelayController> delay_controller,
+ std::unique_ptr<EchoRemover> echo_remover);
+
+ virtual ~BlockProcessor() = default;
+
+ // Get current metrics.
+ virtual void GetMetrics(EchoControl::Metrics* metrics) const = 0;
+
+ // Provides an optional external estimate of the audio buffer delay.
+ virtual void SetAudioBufferDelay(int delay_ms) = 0;
+
+ // Processes a block of capture data.
+ virtual void ProcessCapture(bool echo_path_gain_change,
+ bool capture_signal_saturation,
+ Block* linear_output,
+ Block* capture_block) = 0;
+
+ // Buffers a block of render data supplied by a FrameBlocker object.
+ virtual void BufferRender(const Block& render_block) = 0;
+
+ // Reports whether echo leakage has been detected in the echo canceller
+ // output.
+ virtual void UpdateEchoLeakageStatus(bool leakage_detected) = 0;
+
+ // Specifies whether the capture output will be used. The purpose of this is
+ // to allow the block processor to deactivate some of the processing when the
+ // resulting output is anyway not used, for instance when the endpoint is
+ // muted.
+ virtual void SetCaptureOutputUsage(bool capture_output_used) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.cc
new file mode 100644
index 0000000000..deac1fcd22
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_processor_metrics.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+enum class RenderUnderrunCategory {
+ kNone,
+ kFew,
+ kSeveral,
+ kMany,
+ kConstant,
+ kNumCategories
+};
+
+enum class RenderOverrunCategory {
+ kNone,
+ kFew,
+ kSeveral,
+ kMany,
+ kConstant,
+ kNumCategories
+};
+
+} // namespace
+
+void BlockProcessorMetrics::UpdateCapture(bool underrun) {
+ ++capture_block_counter_;
+ if (underrun) {
+ ++render_buffer_underruns_;
+ }
+
+ if (capture_block_counter_ == kMetricsReportingIntervalBlocks) {
+ metrics_reported_ = true;
+
+ RenderUnderrunCategory underrun_category;
+ if (render_buffer_underruns_ == 0) {
+ underrun_category = RenderUnderrunCategory::kNone;
+ } else if (render_buffer_underruns_ > (capture_block_counter_ >> 1)) {
+ underrun_category = RenderUnderrunCategory::kConstant;
+ } else if (render_buffer_underruns_ > 100) {
+ underrun_category = RenderUnderrunCategory::kMany;
+ } else if (render_buffer_underruns_ > 10) {
+ underrun_category = RenderUnderrunCategory::kSeveral;
+ } else {
+ underrun_category = RenderUnderrunCategory::kFew;
+ }
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.EchoCanceller.RenderUnderruns",
+ static_cast<int>(underrun_category),
+ static_cast<int>(RenderUnderrunCategory::kNumCategories));
+
+ RenderOverrunCategory overrun_category;
+ if (render_buffer_overruns_ == 0) {
+ overrun_category = RenderOverrunCategory::kNone;
+ } else if (render_buffer_overruns_ > (buffer_render_calls_ >> 1)) {
+ overrun_category = RenderOverrunCategory::kConstant;
+ } else if (render_buffer_overruns_ > 100) {
+ overrun_category = RenderOverrunCategory::kMany;
+ } else if (render_buffer_overruns_ > 10) {
+ overrun_category = RenderOverrunCategory::kSeveral;
+ } else {
+ overrun_category = RenderOverrunCategory::kFew;
+ }
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.EchoCanceller.RenderOverruns",
+ static_cast<int>(overrun_category),
+ static_cast<int>(RenderOverrunCategory::kNumCategories));
+
+ ResetMetrics();
+ capture_block_counter_ = 0;
+ } else {
+ metrics_reported_ = false;
+ }
+}
+
+void BlockProcessorMetrics::UpdateRender(bool overrun) {
+ ++buffer_render_calls_;
+ if (overrun) {
+ ++render_buffer_overruns_;
+ }
+}
+
+void BlockProcessorMetrics::ResetMetrics() {
+ render_buffer_underruns_ = 0;
+ render_buffer_overruns_ = 0;
+ buffer_render_calls_ = 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.h b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.h
new file mode 100644
index 0000000000..a70d0dac5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_
+
+namespace webrtc {
+
+// Handles the reporting of metrics for the block_processor.
+class BlockProcessorMetrics {
+ public:
+ BlockProcessorMetrics() = default;
+
+ BlockProcessorMetrics(const BlockProcessorMetrics&) = delete;
+ BlockProcessorMetrics& operator=(const BlockProcessorMetrics&) = delete;
+
+ // Updates the metric with new capture data.
+ void UpdateCapture(bool underrun);
+
+ // Updates the metric with new render data.
+ void UpdateRender(bool overrun);
+
+ // Returns true if the metrics have just been reported, otherwise false.
+ bool MetricsReported() { return metrics_reported_; }
+
+ private:
+ // Resets the metrics.
+ void ResetMetrics();
+
+ int capture_block_counter_ = 0;
+ bool metrics_reported_ = false;
+ int render_buffer_underruns_ = 0;
+ int render_buffer_overruns_ = 0;
+ int buffer_render_calls_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics_unittest.cc
new file mode 100644
index 0000000000..3e23c2499d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_metrics_unittest.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_processor_metrics.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify the general functionality of BlockProcessorMetrics.
+TEST(BlockProcessorMetrics, NormalUsage) {
+ BlockProcessorMetrics metrics;
+
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
+ metrics.UpdateRender(false);
+ metrics.UpdateRender(false);
+ metrics.UpdateCapture(false);
+ EXPECT_FALSE(metrics.MetricsReported());
+ }
+ metrics.UpdateCapture(false);
+ EXPECT_TRUE(metrics.MetricsReported());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_unittest.cc
new file mode 100644
index 0000000000..aba5c4186d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/block_processor_unittest.cc
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_processor.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/mock/mock_echo_remover.h"
+#include "modules/audio_processing/aec3/mock/mock_render_delay_buffer.h"
+#include "modules/audio_processing/aec3/mock/mock_render_delay_controller.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+// Verifies that the basic BlockProcessor functionality works and that the API
+// methods are callable.
+void RunBasicSetupAndApiCallTest(int sample_rate_hz, int num_iterations) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+
+ std::unique_ptr<BlockProcessor> block_processor(
+ BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz,
+ kNumRenderChannels, kNumCaptureChannels));
+ Block block(NumBandsForRate(sample_rate_hz), kNumRenderChannels, 1000.f);
+ for (int k = 0; k < num_iterations; ++k) {
+ block_processor->BufferRender(block);
+ block_processor->ProcessCapture(false, false, nullptr, &block);
+ block_processor->UpdateEchoLeakageStatus(false);
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+void RunRenderBlockSizeVerificationTest(int sample_rate_hz) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+
+ std::unique_ptr<BlockProcessor> block_processor(
+ BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz,
+ kNumRenderChannels, kNumCaptureChannels));
+ Block block(NumBandsForRate(sample_rate_hz), kNumRenderChannels);
+
+ EXPECT_DEATH(block_processor->BufferRender(block), "");
+}
+
+void RunRenderNumBandsVerificationTest(int sample_rate_hz) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+
+ const size_t wrong_num_bands = NumBandsForRate(sample_rate_hz) < 3
+ ? NumBandsForRate(sample_rate_hz) + 1
+ : 1;
+ std::unique_ptr<BlockProcessor> block_processor(
+ BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz,
+ kNumRenderChannels, kNumCaptureChannels));
+ Block block(wrong_num_bands, kNumRenderChannels);
+
+ EXPECT_DEATH(block_processor->BufferRender(block), "");
+}
+
+void RunCaptureNumBandsVerificationTest(int sample_rate_hz) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+
+ const size_t wrong_num_bands = NumBandsForRate(sample_rate_hz) < 3
+ ? NumBandsForRate(sample_rate_hz) + 1
+ : 1;
+ std::unique_ptr<BlockProcessor> block_processor(
+ BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz,
+ kNumRenderChannels, kNumCaptureChannels));
+ Block block(wrong_num_bands, kNumRenderChannels);
+
+ EXPECT_DEATH(block_processor->ProcessCapture(false, false, nullptr, &block),
+ "");
+}
+#endif
+
+std::string ProduceDebugText(int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ return ss.Release();
+}
+
+void FillSampleVector(int call_counter,
+ int delay,
+ rtc::ArrayView<float> samples) {
+ for (size_t i = 0; i < samples.size(); ++i) {
+ samples[i] = (call_counter - delay) * 10000.0f + i;
+ }
+}
+
+} // namespace
+
+// Verifies that the delay controller functionality is properly integrated with
+// the render delay buffer inside block processor.
+// TODO(peah): Activate the unittest once the required code has been landed.
+TEST(BlockProcessor, DISABLED_DelayControllerIntegration) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+ constexpr size_t kNumBlocks = 310;
+ constexpr size_t kDelayInSamples = 640;
+ constexpr size_t kDelayHeadroom = 1;
+ constexpr size_t kDelayInBlocks =
+ kDelayInSamples / kBlockSize - kDelayHeadroom;
+ Random random_generator(42U);
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<testing::StrictMock<webrtc::test::MockRenderDelayBuffer>>
+ render_delay_buffer_mock(
+ new StrictMock<webrtc::test::MockRenderDelayBuffer>(rate, 1));
+ EXPECT_CALL(*render_delay_buffer_mock, Insert(_))
+ .Times(kNumBlocks)
+ .WillRepeatedly(Return(RenderDelayBuffer::BufferingEvent::kNone));
+ EXPECT_CALL(*render_delay_buffer_mock, AlignFromDelay(kDelayInBlocks))
+ .Times(AtLeast(1));
+ EXPECT_CALL(*render_delay_buffer_mock, MaxDelay()).WillOnce(Return(30));
+ EXPECT_CALL(*render_delay_buffer_mock, Delay())
+ .Times(kNumBlocks + 1)
+ .WillRepeatedly(Return(0));
+ std::unique_ptr<BlockProcessor> block_processor(BlockProcessor::Create(
+ EchoCanceller3Config(), rate, kNumRenderChannels, kNumCaptureChannels,
+ std::move(render_delay_buffer_mock)));
+
+ Block render_block(NumBandsForRate(rate), kNumRenderChannels);
+ Block capture_block(NumBandsForRate(rate), kNumCaptureChannels);
+ DelayBuffer<float> signal_delay_buffer(kDelayInSamples);
+ for (size_t k = 0; k < kNumBlocks; ++k) {
+ RandomizeSampleVector(&random_generator,
+ render_block.View(/*band=*/0, /*capture=*/0));
+ signal_delay_buffer.Delay(render_block.View(/*band=*/0, /*capture=*/0),
+ capture_block.View(/*band=*/0, /*capture=*/0));
+ block_processor->BufferRender(render_block);
+ block_processor->ProcessCapture(false, false, nullptr, &capture_block);
+ }
+ }
+}
+
+// Verifies that BlockProcessor submodules are called in a proper manner.
+TEST(BlockProcessor, DISABLED_SubmoduleIntegration) {
+ constexpr size_t kNumBlocks = 310;
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+
+ Random random_generator(42U);
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<testing::StrictMock<webrtc::test::MockRenderDelayBuffer>>
+ render_delay_buffer_mock(
+ new StrictMock<webrtc::test::MockRenderDelayBuffer>(rate, 1));
+ std::unique_ptr<
+ ::testing::StrictMock<webrtc::test::MockRenderDelayController>>
+ render_delay_controller_mock(
+ new StrictMock<webrtc::test::MockRenderDelayController>());
+ std::unique_ptr<testing::StrictMock<webrtc::test::MockEchoRemover>>
+ echo_remover_mock(new StrictMock<webrtc::test::MockEchoRemover>());
+
+ EXPECT_CALL(*render_delay_buffer_mock, Insert(_))
+ .Times(kNumBlocks - 1)
+ .WillRepeatedly(Return(RenderDelayBuffer::BufferingEvent::kNone));
+ EXPECT_CALL(*render_delay_buffer_mock, PrepareCaptureProcessing())
+ .Times(kNumBlocks);
+ EXPECT_CALL(*render_delay_buffer_mock, AlignFromDelay(9)).Times(AtLeast(1));
+ EXPECT_CALL(*render_delay_buffer_mock, Delay())
+ .Times(kNumBlocks)
+ .WillRepeatedly(Return(0));
+ EXPECT_CALL(*render_delay_controller_mock, GetDelay(_, _, _))
+ .Times(kNumBlocks);
+ EXPECT_CALL(*echo_remover_mock, ProcessCapture(_, _, _, _, _, _))
+ .Times(kNumBlocks);
+ EXPECT_CALL(*echo_remover_mock, UpdateEchoLeakageStatus(_))
+ .Times(kNumBlocks);
+
+ std::unique_ptr<BlockProcessor> block_processor(BlockProcessor::Create(
+ EchoCanceller3Config(), rate, kNumRenderChannels, kNumCaptureChannels,
+ std::move(render_delay_buffer_mock),
+ std::move(render_delay_controller_mock), std::move(echo_remover_mock)));
+
+ Block render_block(NumBandsForRate(rate), kNumRenderChannels);
+ Block capture_block(NumBandsForRate(rate), kNumCaptureChannels);
+ DelayBuffer<float> signal_delay_buffer(640);
+ for (size_t k = 0; k < kNumBlocks; ++k) {
+ RandomizeSampleVector(&random_generator,
+ render_block.View(/*band=*/0, /*capture=*/0));
+ signal_delay_buffer.Delay(render_block.View(/*band=*/0, /*capture=*/0),
+ capture_block.View(/*band=*/0, /*capture=*/0));
+ block_processor->BufferRender(render_block);
+ block_processor->ProcessCapture(false, false, nullptr, &capture_block);
+ block_processor->UpdateEchoLeakageStatus(false);
+ }
+ }
+}
+
+TEST(BlockProcessor, BasicSetupAndApiCalls) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ RunBasicSetupAndApiCallTest(rate, 1);
+ }
+}
+
+TEST(BlockProcessor, TestLongerCall) {
+ RunBasicSetupAndApiCallTest(16000, 20 * kNumBlocksPerSecond);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// TODO(gustaf): Re-enable the test once the issue with memory leaks during
+// DEATH tests on test bots has been fixed.
+TEST(BlockProcessorDeathTest, DISABLED_VerifyRenderBlockSizeCheck) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ RunRenderBlockSizeVerificationTest(rate);
+ }
+}
+
+TEST(BlockProcessorDeathTest, VerifyRenderNumBandsCheck) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ RunRenderNumBandsVerificationTest(rate);
+ }
+}
+
+// TODO(peah): Verify the check for correct number of bands in the capture
+// signal.
+TEST(BlockProcessorDeathTest, VerifyCaptureNumBandsCheck) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ RunCaptureNumBandsVerificationTest(rate);
+ }
+}
+
+// Verifiers that the verification for null ProcessCapture input works.
+TEST(BlockProcessorDeathTest, NullProcessCaptureParameter) {
+ EXPECT_DEATH(std::unique_ptr<BlockProcessor>(
+ BlockProcessor::Create(EchoCanceller3Config(), 16000, 1, 1))
+ ->ProcessCapture(false, false, nullptr, nullptr),
+ "");
+}
+
+// Verifies the check for correct sample rate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(BlockProcessor, DISABLED_WrongSampleRate) {
+ EXPECT_DEATH(std::unique_ptr<BlockProcessor>(
+ BlockProcessor::Create(EchoCanceller3Config(), 8001, 1, 1)),
+ "");
+}
+
+#endif
+
+// Verifies that external delay estimator delays are applied correctly when a
+// call begins with a sequence of capture blocks.
+TEST(BlockProcessor, ExternalDelayAppliedCorrectlyWithInitialCaptureCalls) {
+ constexpr int kNumRenderChannels = 1;
+ constexpr int kNumCaptureChannels = 1;
+ constexpr int kSampleRateHz = 16000;
+
+ EchoCanceller3Config config;
+ config.delay.use_external_delay_estimator = true;
+
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels));
+
+ std::unique_ptr<testing::NiceMock<webrtc::test::MockEchoRemover>>
+ echo_remover_mock(new NiceMock<webrtc::test::MockEchoRemover>());
+ webrtc::test::MockEchoRemover* echo_remover_mock_pointer =
+ echo_remover_mock.get();
+
+ std::unique_ptr<BlockProcessor> block_processor(BlockProcessor::Create(
+ config, kSampleRateHz, kNumRenderChannels, kNumCaptureChannels,
+ std::move(delay_buffer), /*delay_controller=*/nullptr,
+ std::move(echo_remover_mock)));
+
+ Block render_block(NumBandsForRate(kSampleRateHz), kNumRenderChannels);
+ Block capture_block(NumBandsForRate(kSampleRateHz), kNumCaptureChannels);
+
+ // Process...
+ // - 10 capture calls, where no render data is available,
+ // - 10 render calls, populating the buffer,
+ // - 2 capture calls, verifying that the delay was applied correctly.
+ constexpr int kDelayInBlocks = 5;
+ constexpr int kDelayInMs = 20;
+ block_processor->SetAudioBufferDelay(kDelayInMs);
+
+ int capture_call_counter = 0;
+ int render_call_counter = 0;
+ for (size_t k = 0; k < 10; ++k) {
+ FillSampleVector(++capture_call_counter, kDelayInBlocks,
+ capture_block.View(/*band=*/0, /*capture=*/0));
+ block_processor->ProcessCapture(false, false, nullptr, &capture_block);
+ }
+ for (size_t k = 0; k < 10; ++k) {
+ FillSampleVector(++render_call_counter, 0,
+ render_block.View(/*band=*/0, /*capture=*/0));
+ block_processor->BufferRender(render_block);
+ }
+
+ EXPECT_CALL(*echo_remover_mock_pointer, ProcessCapture)
+ .WillRepeatedly(
+ [](EchoPathVariability /*echo_path_variability*/,
+ bool /*capture_signal_saturation*/,
+ const absl::optional<DelayEstimate>& /*external_delay*/,
+ RenderBuffer* render_buffer, Block* /*linear_output*/,
+ Block* capture) {
+ const auto& render = render_buffer->GetBlock(0);
+ const auto render_view = render.View(/*band=*/0, /*channel=*/0);
+ const auto capture_view = capture->View(/*band=*/0, /*channel=*/0);
+ for (size_t i = 0; i < kBlockSize; ++i) {
+ EXPECT_FLOAT_EQ(render_view[i], capture_view[i]);
+ }
+ });
+
+ FillSampleVector(++capture_call_counter, kDelayInBlocks,
+ capture_block.View(/*band=*/0, /*capture=*/0));
+ block_processor->ProcessCapture(false, false, nullptr, &capture_block);
+
+ FillSampleVector(++capture_call_counter, kDelayInBlocks,
+ capture_block.View(/*band=*/0, /*capture=*/0));
+ block_processor->ProcessCapture(false, false, nullptr, &capture_block);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.cc b/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.cc
new file mode 100644
index 0000000000..2c49b795c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+namespace webrtc {
+
+ClockdriftDetector::ClockdriftDetector()
+ : level_(Level::kNone), stability_counter_(0) {
+ delay_history_.fill(0);
+}
+
+ClockdriftDetector::~ClockdriftDetector() = default;
+
+void ClockdriftDetector::Update(int delay_estimate) {
+ if (delay_estimate == delay_history_[0]) {
+ // Reset clockdrift level if delay estimate is stable for 7500 blocks (30
+ // seconds).
+ if (++stability_counter_ > 7500)
+ level_ = Level::kNone;
+ return;
+ }
+
+ stability_counter_ = 0;
+ const int d1 = delay_history_[0] - delay_estimate;
+ const int d2 = delay_history_[1] - delay_estimate;
+ const int d3 = delay_history_[2] - delay_estimate;
+
+ // Patterns recognized as positive clockdrift:
+ // [x-3], x-2, x-1, x.
+ // [x-3], x-1, x-2, x.
+ const bool probable_drift_up =
+ (d1 == -1 && d2 == -2) || (d1 == -2 && d2 == -1);
+ const bool drift_up = probable_drift_up && d3 == -3;
+
+ // Patterns recognized as negative clockdrift:
+ // [x+3], x+2, x+1, x.
+ // [x+3], x+1, x+2, x.
+ const bool probable_drift_down = (d1 == 1 && d2 == 2) || (d1 == 2 && d2 == 1);
+ const bool drift_down = probable_drift_down && d3 == 3;
+
+ // Set clockdrift level.
+ if (drift_up || drift_down) {
+ level_ = Level::kVerified;
+ } else if ((probable_drift_up || probable_drift_down) &&
+ level_ == Level::kNone) {
+ level_ = Level::kProbable;
+ }
+
+ // Shift delay history one step.
+ delay_history_[2] = delay_history_[1];
+ delay_history_[1] = delay_history_[0];
+ delay_history_[0] = delay_estimate;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.h b/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.h
new file mode 100644
index 0000000000..2ba90bb889
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+
+#include <stddef.h>
+
+#include <array>
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct DownsampledRenderBuffer;
+struct EchoCanceller3Config;
+
+// Detects clockdrift by analyzing the estimated delay.
+class ClockdriftDetector {
+ public:
+ enum class Level { kNone, kProbable, kVerified, kNumCategories };
+ ClockdriftDetector();
+ ~ClockdriftDetector();
+ void Update(int delay_estimate);
+ Level ClockdriftLevel() const { return level_; }
+
+ private:
+ std::array<int, 3> delay_history_;
+ Level level_;
+ size_t stability_counter_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector_unittest.cc
new file mode 100644
index 0000000000..0f98b01d3a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/clockdrift_detector_unittest.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+TEST(ClockdriftDetector, ClockdriftDetector) {
+ ClockdriftDetector c;
+ // No clockdrift at start.
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+
+ // Monotonically increasing delay.
+ for (int i = 0; i < 100; i++)
+ c.Update(1000);
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+ for (int i = 0; i < 100; i++)
+ c.Update(1001);
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+ for (int i = 0; i < 100; i++)
+ c.Update(1002);
+ // Probable clockdrift.
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable);
+ for (int i = 0; i < 100; i++)
+ c.Update(1003);
+ // Verified clockdrift.
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified);
+
+ // Stable delay.
+ for (int i = 0; i < 10000; i++)
+ c.Update(1003);
+ // No clockdrift.
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+
+ // Decreasing delay.
+ for (int i = 0; i < 100; i++)
+ c.Update(1001);
+ for (int i = 0; i < 100; i++)
+ c.Update(999);
+ // Probable clockdrift.
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable);
+ for (int i = 0; i < 100; i++)
+ c.Update(1000);
+ for (int i = 0; i < 100; i++)
+ c.Update(998);
+ // Verified clockdrift.
+ EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.cc b/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.cc
new file mode 100644
index 0000000000..f4fb74d20d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/coarse_filter_update_gain.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+CoarseFilterUpdateGain::CoarseFilterUpdateGain(
+ const EchoCanceller3Config::Filter::CoarseConfiguration& config,
+ size_t config_change_duration_blocks)
+ : config_change_duration_blocks_(
+ static_cast<int>(config_change_duration_blocks)) {
+ SetConfig(config, true);
+ RTC_DCHECK_LT(0, config_change_duration_blocks_);
+ one_by_config_change_duration_blocks_ = 1.f / config_change_duration_blocks_;
+}
+
+void CoarseFilterUpdateGain::HandleEchoPathChange() {
+ poor_signal_excitation_counter_ = 0;
+ call_counter_ = 0;
+}
+
+void CoarseFilterUpdateGain::Compute(
+ const std::array<float, kFftLengthBy2Plus1>& render_power,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const FftData& E_coarse,
+ size_t size_partitions,
+ bool saturated_capture_signal,
+ FftData* G) {
+ RTC_DCHECK(G);
+ ++call_counter_;
+
+ UpdateCurrentConfig();
+
+ if (render_signal_analyzer.PoorSignalExcitation()) {
+ poor_signal_excitation_counter_ = 0;
+ }
+
+ // Do not update the filter if the render is not sufficiently excited.
+ if (++poor_signal_excitation_counter_ < size_partitions ||
+ saturated_capture_signal || call_counter_ <= size_partitions) {
+ G->re.fill(0.f);
+ G->im.fill(0.f);
+ return;
+ }
+
+ // Compute mu.
+ std::array<float, kFftLengthBy2Plus1> mu;
+ const auto& X2 = render_power;
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ if (X2[k] > current_config_.noise_gate) {
+ mu[k] = current_config_.rate / X2[k];
+ } else {
+ mu[k] = 0.f;
+ }
+ }
+
+ // Avoid updating the filter close to narrow bands in the render signals.
+ render_signal_analyzer.MaskRegionsAroundNarrowBands(&mu);
+
+ // G = mu * E * X2.
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ G->re[k] = mu[k] * E_coarse.re[k];
+ G->im[k] = mu[k] * E_coarse.im[k];
+ }
+}
+
+void CoarseFilterUpdateGain::UpdateCurrentConfig() {
+ RTC_DCHECK_GE(config_change_duration_blocks_, config_change_counter_);
+ if (config_change_counter_ > 0) {
+ if (--config_change_counter_ > 0) {
+ auto average = [](float from, float to, float from_weight) {
+ return from * from_weight + to * (1.f - from_weight);
+ };
+
+ float change_factor =
+ config_change_counter_ * one_by_config_change_duration_blocks_;
+
+ current_config_.rate =
+ average(old_target_config_.rate, target_config_.rate, change_factor);
+ current_config_.noise_gate =
+ average(old_target_config_.noise_gate, target_config_.noise_gate,
+ change_factor);
+ } else {
+ current_config_ = old_target_config_ = target_config_;
+ }
+ }
+ RTC_DCHECK_LE(0, config_change_counter_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.h b/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.h
new file mode 100644
index 0000000000..a1a1399b2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_COARSE_FILTER_UPDATE_GAIN_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_COARSE_FILTER_UPDATE_GAIN_H_
+
+#include <stddef.h>
+
+#include <array>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+
+namespace webrtc {
+
+// Provides functionality for computing the fixed gain for the coarse filter.
+class CoarseFilterUpdateGain {
+ public:
+ explicit CoarseFilterUpdateGain(
+ const EchoCanceller3Config::Filter::CoarseConfiguration& config,
+ size_t config_change_duration_blocks);
+
+ // Takes action in the case of a known echo path change.
+ void HandleEchoPathChange();
+
+ // Computes the gain.
+ void Compute(const std::array<float, kFftLengthBy2Plus1>& render_power,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const FftData& E_coarse,
+ size_t size_partitions,
+ bool saturated_capture_signal,
+ FftData* G);
+
+ // Sets a new config.
+ void SetConfig(
+ const EchoCanceller3Config::Filter::CoarseConfiguration& config,
+ bool immediate_effect) {
+ if (immediate_effect) {
+ old_target_config_ = current_config_ = target_config_ = config;
+ config_change_counter_ = 0;
+ } else {
+ old_target_config_ = current_config_;
+ target_config_ = config;
+ config_change_counter_ = config_change_duration_blocks_;
+ }
+ }
+
+ private:
+ EchoCanceller3Config::Filter::CoarseConfiguration current_config_;
+ EchoCanceller3Config::Filter::CoarseConfiguration target_config_;
+ EchoCanceller3Config::Filter::CoarseConfiguration old_target_config_;
+ const int config_change_duration_blocks_;
+ float one_by_config_change_duration_blocks_;
+ // TODO(peah): Check whether this counter should instead be initialized to a
+ // large value.
+ size_t poor_signal_excitation_counter_ = 0;
+ size_t call_counter_ = 0;
+ int config_change_counter_ = 0;
+
+ void UpdateCurrentConfig();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_COARSE_FILTER_UPDATE_GAIN_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain_unittest.cc
new file mode 100644
index 0000000000..55b79bb812
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/coarse_filter_update_gain_unittest.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/coarse_filter_update_gain.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+// Method for performing the simulations needed to test the refined filter
+// update gain functionality.
+void RunFilterUpdateTest(int num_blocks_to_process,
+ size_t delay_samples,
+ size_t num_render_channels,
+ int filter_length_blocks,
+ const std::vector<int>& blocks_with_saturation,
+ std::array<float, kBlockSize>* e_last_block,
+ std::array<float, kBlockSize>* y_last_block,
+ FftData* G_last_block) {
+ ApmDataDumper data_dumper(42);
+ EchoCanceller3Config config;
+ config.filter.refined.length_blocks = filter_length_blocks;
+ AdaptiveFirFilter refined_filter(
+ config.filter.refined.length_blocks, config.filter.refined.length_blocks,
+ config.filter.config_change_duration_blocks, num_render_channels,
+ DetectOptimization(), &data_dumper);
+ AdaptiveFirFilter coarse_filter(
+ config.filter.coarse.length_blocks, config.filter.coarse.length_blocks,
+ config.filter.config_change_duration_blocks, num_render_channels,
+ DetectOptimization(), &data_dumper);
+ Aec3Fft fft;
+
+ constexpr int kSampleRateHz = 48000;
+ config.delay.default_delay = 1;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+
+ CoarseFilterUpdateGain coarse_gain(
+ config.filter.coarse, config.filter.config_change_duration_blocks);
+ Random random_generator(42U);
+ Block x(NumBandsForRate(kSampleRateHz), num_render_channels);
+ std::array<float, kBlockSize> y;
+ RenderSignalAnalyzer render_signal_analyzer(config);
+ std::array<float, kFftLength> s;
+ FftData S;
+ FftData G;
+ FftData E_coarse;
+ std::array<float, kBlockSize> e_coarse;
+
+ constexpr float kScale = 1.0f / kFftLengthBy2;
+
+ DelayBuffer<float> delay_buffer(delay_samples);
+ for (int k = 0; k < num_blocks_to_process; ++k) {
+ // Handle saturation.
+ bool saturation =
+ std::find(blocks_with_saturation.begin(), blocks_with_saturation.end(),
+ k) != blocks_with_saturation.end();
+
+ // Create the render signal.
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ RandomizeSampleVector(&random_generator, x.View(band, channel));
+ }
+ }
+ delay_buffer.Delay(x.View(/*band=*/0, /*channel*/ 0), y);
+
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+ delay_samples / kBlockSize);
+
+ coarse_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S);
+ fft.Ifft(S, &s);
+ std::transform(y.begin(), y.end(), s.begin() + kFftLengthBy2,
+ e_coarse.begin(),
+ [&](float a, float b) { return a - b * kScale; });
+ std::for_each(e_coarse.begin(), e_coarse.end(),
+ [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+ fft.ZeroPaddedFft(e_coarse, Aec3Fft::Window::kRectangular, &E_coarse);
+
+ std::array<float, kFftLengthBy2Plus1> render_power;
+ render_delay_buffer->GetRenderBuffer()->SpectralSum(
+ coarse_filter.SizePartitions(), &render_power);
+ coarse_gain.Compute(render_power, render_signal_analyzer, E_coarse,
+ coarse_filter.SizePartitions(), saturation, &G);
+ coarse_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G);
+ }
+
+ std::copy(e_coarse.begin(), e_coarse.end(), e_last_block->begin());
+ std::copy(y.begin(), y.end(), y_last_block->begin());
+ std::copy(G.re.begin(), G.re.end(), G_last_block->re.begin());
+ std::copy(G.im.begin(), G.im.end(), G_last_block->im.begin());
+}
+
+std::string ProduceDebugText(int filter_length_blocks) {
+ rtc::StringBuilder ss;
+ ss << "Length: " << filter_length_blocks;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(size_t delay, int filter_length_blocks) {
+ rtc::StringBuilder ss;
+ ss << "Delay: " << delay << ", ";
+ ss << ProduceDebugText(filter_length_blocks);
+ return ss.Release();
+}
+
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output gain parameter works.
+TEST(CoarseFilterUpdateGainDeathTest, NullDataOutputGain) {
+ ApmDataDumper data_dumper(42);
+ FftBuffer fft_buffer(1, 1);
+ RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+ FftData E;
+ const EchoCanceller3Config::Filter::CoarseConfiguration& config = {
+ 12, 0.5f, 220075344.f};
+ CoarseFilterUpdateGain gain(config, 250);
+ std::array<float, kFftLengthBy2Plus1> render_power;
+ render_power.fill(0.f);
+ EXPECT_DEATH(gain.Compute(render_power, analyzer, E, 1, false, nullptr), "");
+}
+
+#endif
+
+class CoarseFilterUpdateGainOneTwoEightRenderChannels
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<size_t> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ CoarseFilterUpdateGainOneTwoEightRenderChannels,
+ ::testing::Values(1, 2, 8));
+
+// Verifies that the gain formed causes the filter using it to converge.
+TEST_P(CoarseFilterUpdateGainOneTwoEightRenderChannels,
+ GainCausesFilterToConverge) {
+ const size_t num_render_channels = GetParam();
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+ SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G;
+
+ RunFilterUpdateTest(5000, delay_samples, num_render_channels,
+ filter_length_blocks, blocks_with_saturation, &e, &y,
+ &G);
+
+ // Verify that the refined filter is able to perform well.
+ // Use different criteria to take overmodelling into account.
+ if (filter_length_blocks == 12) {
+ EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+ std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+ } else {
+ EXPECT_LT(std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+ std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+ }
+ }
+ }
+}
+
+// Verifies that the gain is zero when there is saturation.
+TEST_P(CoarseFilterUpdateGainOneTwoEightRenderChannels, SaturationBehavior) {
+ const size_t num_render_channels = GetParam();
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+ for (int k = 99; k < 200; ++k) {
+ blocks_with_saturation.push_back(k);
+ }
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G_a;
+ FftData G_a_ref;
+ G_a_ref.re.fill(0.f);
+ G_a_ref.im.fill(0.f);
+
+ RunFilterUpdateTest(100, 65, num_render_channels, filter_length_blocks,
+ blocks_with_saturation, &e, &y, &G_a);
+
+ EXPECT_EQ(G_a_ref.re, G_a.re);
+ EXPECT_EQ(G_a_ref.im, G_a.im);
+ }
+}
+
+class CoarseFilterUpdateGainOneTwoFourRenderChannels
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<size_t> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ MultiChannel,
+ CoarseFilterUpdateGainOneTwoFourRenderChannels,
+ ::testing::Values(1, 2, 4),
+ [](const ::testing::TestParamInfo<
+ CoarseFilterUpdateGainOneTwoFourRenderChannels::ParamType>& info) {
+ return (rtc::StringBuilder() << "Render" << info.param).str();
+ });
+
+// Verifies that the magnitude of the gain on average decreases for a
+// persistently exciting signal.
+TEST_P(CoarseFilterUpdateGainOneTwoFourRenderChannels, DecreasingGain) {
+ const size_t num_render_channels = GetParam();
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G_a;
+ FftData G_b;
+ FftData G_c;
+ std::array<float, kFftLengthBy2Plus1> G_a_power;
+ std::array<float, kFftLengthBy2Plus1> G_b_power;
+ std::array<float, kFftLengthBy2Plus1> G_c_power;
+
+ RunFilterUpdateTest(100, 65, num_render_channels, filter_length_blocks,
+ blocks_with_saturation, &e, &y, &G_a);
+ RunFilterUpdateTest(200, 65, num_render_channels, filter_length_blocks,
+ blocks_with_saturation, &e, &y, &G_b);
+ RunFilterUpdateTest(300, 65, num_render_channels, filter_length_blocks,
+ blocks_with_saturation, &e, &y, &G_c);
+
+ G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+ G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+ G_c.Spectrum(Aec3Optimization::kNone, G_c_power);
+
+ EXPECT_GT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+ std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+
+ EXPECT_GT(std::accumulate(G_b_power.begin(), G_b_power.end(), 0.),
+ std::accumulate(G_c_power.begin(), G_c_power.end(), 0.));
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.cc
new file mode 100644
index 0000000000..de5227c089
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/comfort_noise_generator.h"
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <cstdint>
+#include <functional>
+#include <numeric>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aec3/vector_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Computes the noise floor value that matches a WGN input of noise_floor_dbfs.
+float GetNoiseFloorFactor(float noise_floor_dbfs) {
+ // kdBfsNormalization = 20.f*log10(32768.f).
+ constexpr float kdBfsNormalization = 90.30899869919436f;
+ return 64.f * powf(10.f, (kdBfsNormalization + noise_floor_dbfs) * 0.1f);
+}
+
+// Table of sqrt(2) * sin(2*pi*i/32).
+constexpr float kSqrt2Sin[32] = {
+ +0.0000000f, +0.2758994f, +0.5411961f, +0.7856950f, +1.0000000f,
+ +1.1758756f, +1.3065630f, +1.3870398f, +1.4142136f, +1.3870398f,
+ +1.3065630f, +1.1758756f, +1.0000000f, +0.7856950f, +0.5411961f,
+ +0.2758994f, +0.0000000f, -0.2758994f, -0.5411961f, -0.7856950f,
+ -1.0000000f, -1.1758756f, -1.3065630f, -1.3870398f, -1.4142136f,
+ -1.3870398f, -1.3065630f, -1.1758756f, -1.0000000f, -0.7856950f,
+ -0.5411961f, -0.2758994f};
+
+void GenerateComfortNoise(Aec3Optimization optimization,
+ const std::array<float, kFftLengthBy2Plus1>& N2,
+ uint32_t* seed,
+ FftData* lower_band_noise,
+ FftData* upper_band_noise) {
+ FftData* N_low = lower_band_noise;
+ FftData* N_high = upper_band_noise;
+
+ // Compute square root spectrum.
+ std::array<float, kFftLengthBy2Plus1> N;
+ std::copy(N2.begin(), N2.end(), N.begin());
+ aec3::VectorMath(optimization).Sqrt(N);
+
+ // Compute the noise level for the upper bands.
+ constexpr float kOneByNumBands = 1.f / (kFftLengthBy2Plus1 / 2 + 1);
+ constexpr int kFftLengthBy2Plus1By2 = kFftLengthBy2Plus1 / 2;
+ const float high_band_noise_level =
+ std::accumulate(N.begin() + kFftLengthBy2Plus1By2, N.end(), 0.f) *
+ kOneByNumBands;
+
+ // The analysis and synthesis windowing cause loss of power when
+ // cross-fading the noise where frames are completely uncorrelated
+ // (generated with random phase), hence the factor sqrt(2).
+ // This is not the case for the speech signal where the input is overlapping
+ // (strong correlation).
+ N_low->re[0] = N_low->re[kFftLengthBy2] = N_high->re[0] =
+ N_high->re[kFftLengthBy2] = 0.f;
+ for (size_t k = 1; k < kFftLengthBy2; k++) {
+ constexpr int kIndexMask = 32 - 1;
+ // Generate a random 31-bit integer.
+ seed[0] = (seed[0] * 69069 + 1) & (0x80000000 - 1);
+ // Convert to a 5-bit index.
+ int i = seed[0] >> 26;
+
+ // y = sqrt(2) * sin(a)
+ const float x = kSqrt2Sin[i];
+ // x = sqrt(2) * cos(a) = sqrt(2) * sin(a + pi/2)
+ const float y = kSqrt2Sin[(i + 8) & kIndexMask];
+
+ // Form low-frequency noise via spectral shaping.
+ N_low->re[k] = N[k] * x;
+ N_low->im[k] = N[k] * y;
+
+ // Form the high-frequency noise via simple levelling.
+ N_high->re[k] = high_band_noise_level * x;
+ N_high->im[k] = high_band_noise_level * y;
+ }
+}
+
+} // namespace
+
+ComfortNoiseGenerator::ComfortNoiseGenerator(const EchoCanceller3Config& config,
+ Aec3Optimization optimization,
+ size_t num_capture_channels)
+ : optimization_(optimization),
+ seed_(42),
+ num_capture_channels_(num_capture_channels),
+ noise_floor_(GetNoiseFloorFactor(config.comfort_noise.noise_floor_dbfs)),
+ N2_initial_(
+ std::make_unique<std::vector<std::array<float, kFftLengthBy2Plus1>>>(
+ num_capture_channels_)),
+ Y2_smoothed_(num_capture_channels_),
+ N2_(num_capture_channels_) {
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ (*N2_initial_)[ch].fill(0.f);
+ Y2_smoothed_[ch].fill(0.f);
+ N2_[ch].fill(1.0e6f);
+ }
+}
+
+ComfortNoiseGenerator::~ComfortNoiseGenerator() = default;
+
+void ComfortNoiseGenerator::Compute(
+ bool saturated_capture,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ capture_spectrum,
+ rtc::ArrayView<FftData> lower_band_noise,
+ rtc::ArrayView<FftData> upper_band_noise) {
+ const auto& Y2 = capture_spectrum;
+
+ if (!saturated_capture) {
+ // Smooth Y2.
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ std::transform(Y2_smoothed_[ch].begin(), Y2_smoothed_[ch].end(),
+ Y2[ch].begin(), Y2_smoothed_[ch].begin(),
+ [](float a, float b) { return a + 0.1f * (b - a); });
+ }
+
+ if (N2_counter_ > 50) {
+ // Update N2 from Y2_smoothed.
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ std::transform(N2_[ch].begin(), N2_[ch].end(), Y2_smoothed_[ch].begin(),
+ N2_[ch].begin(), [](float a, float b) {
+ return b < a ? (0.9f * b + 0.1f * a) * 1.0002f
+ : a * 1.0002f;
+ });
+ }
+ }
+
+ if (N2_initial_) {
+ if (++N2_counter_ == 1000) {
+ N2_initial_.reset();
+ } else {
+ // Compute the N2_initial from N2.
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ std::transform(N2_[ch].begin(), N2_[ch].end(),
+ (*N2_initial_)[ch].begin(), (*N2_initial_)[ch].begin(),
+ [](float a, float b) {
+ return a > b ? b + 0.001f * (a - b) : a;
+ });
+ }
+ }
+ }
+
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ for (auto& n : N2_[ch]) {
+ n = std::max(n, noise_floor_);
+ }
+ if (N2_initial_) {
+ for (auto& n : (*N2_initial_)[ch]) {
+ n = std::max(n, noise_floor_);
+ }
+ }
+ }
+ }
+
+ // Choose N2 estimate to use.
+ const auto& N2 = N2_initial_ ? (*N2_initial_) : N2_;
+
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ GenerateComfortNoise(optimization_, N2[ch], &seed_, &lower_band_noise[ch],
+ &upper_band_noise[ch]);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.h b/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.h
new file mode 100644
index 0000000000..2785b765c5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <memory>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace aec3 {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+void EstimateComfortNoise_SSE2(const std::array<float, kFftLengthBy2Plus1>& N2,
+ uint32_t* seed,
+ FftData* lower_band_noise,
+ FftData* upper_band_noise);
+#endif
+void EstimateComfortNoise(const std::array<float, kFftLengthBy2Plus1>& N2,
+ uint32_t* seed,
+ FftData* lower_band_noise,
+ FftData* upper_band_noise);
+
+} // namespace aec3
+
+// Generates the comfort noise.
+class ComfortNoiseGenerator {
+ public:
+ ComfortNoiseGenerator(const EchoCanceller3Config& config,
+ Aec3Optimization optimization,
+ size_t num_capture_channels);
+ ComfortNoiseGenerator() = delete;
+ ~ComfortNoiseGenerator();
+ ComfortNoiseGenerator(const ComfortNoiseGenerator&) = delete;
+
+ // Computes the comfort noise.
+ void Compute(bool saturated_capture,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ capture_spectrum,
+ rtc::ArrayView<FftData> lower_band_noise,
+ rtc::ArrayView<FftData> upper_band_noise);
+
+ // Returns the estimate of the background noise spectrum.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> NoiseSpectrum()
+ const {
+ return N2_;
+ }
+
+ private:
+ const Aec3Optimization optimization_;
+ uint32_t seed_;
+ const size_t num_capture_channels_;
+ const float noise_floor_;
+ std::unique_ptr<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ N2_initial_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2_smoothed_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> N2_;
+ int N2_counter_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc
new file mode 100644
index 0000000000..a9da17559a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/comfort_noise_generator.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "rtc_base/random.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+namespace {
+
+float Power(const FftData& N) {
+ std::array<float, kFftLengthBy2Plus1> N2;
+ N.Spectrum(Aec3Optimization::kNone, N2);
+ return std::accumulate(N2.begin(), N2.end(), 0.f) / N2.size();
+}
+
+} // namespace
+
+TEST(ComfortNoiseGenerator, CorrectLevel) {
+ constexpr size_t kNumChannels = 5;
+ EchoCanceller3Config config;
+ ComfortNoiseGenerator cng(config, DetectOptimization(), kNumChannels);
+ AecState aec_state(config, kNumChannels);
+
+ std::vector<std::array<float, kFftLengthBy2Plus1>> N2(kNumChannels);
+ std::vector<FftData> n_lower(kNumChannels);
+ std::vector<FftData> n_upper(kNumChannels);
+
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ N2[ch].fill(1000.f * 1000.f / (ch + 1));
+ n_lower[ch].re.fill(0.f);
+ n_lower[ch].im.fill(0.f);
+ n_upper[ch].re.fill(0.f);
+ n_upper[ch].im.fill(0.f);
+ }
+
+ // Ensure instantaneous updata to nonzero noise.
+ cng.Compute(false, N2, n_lower, n_upper);
+
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ EXPECT_LT(0.f, Power(n_lower[ch]));
+ EXPECT_LT(0.f, Power(n_upper[ch]));
+ }
+
+ for (int k = 0; k < 10000; ++k) {
+ cng.Compute(false, N2, n_lower, n_upper);
+ }
+
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ EXPECT_NEAR(2.f * N2[ch][0], Power(n_lower[ch]), N2[ch][0] / 10.f);
+ EXPECT_NEAR(2.f * N2[ch][0], Power(n_upper[ch]), N2[ch][0] / 10.f);
+ }
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.cc b/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.cc
new file mode 100644
index 0000000000..c55344da79
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.cc
@@ -0,0 +1,71 @@
+
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/config_selector.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Validates that the mono and the multichannel configs have compatible fields.
+bool CompatibleConfigs(const EchoCanceller3Config& mono_config,
+ const EchoCanceller3Config& multichannel_config) {
+ if (mono_config.delay.fixed_capture_delay_samples !=
+ multichannel_config.delay.fixed_capture_delay_samples) {
+ return false;
+ }
+ if (mono_config.filter.export_linear_aec_output !=
+ multichannel_config.filter.export_linear_aec_output) {
+ return false;
+ }
+ if (mono_config.filter.high_pass_filter_echo_reference !=
+ multichannel_config.filter.high_pass_filter_echo_reference) {
+ return false;
+ }
+ if (mono_config.multi_channel.detect_stereo_content !=
+ multichannel_config.multi_channel.detect_stereo_content) {
+ return false;
+ }
+ if (mono_config.multi_channel.stereo_detection_timeout_threshold_seconds !=
+ multichannel_config.multi_channel
+ .stereo_detection_timeout_threshold_seconds) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+ConfigSelector::ConfigSelector(
+ const EchoCanceller3Config& config,
+ const absl::optional<EchoCanceller3Config>& multichannel_config,
+ int num_render_input_channels)
+ : config_(config), multichannel_config_(multichannel_config) {
+ if (multichannel_config_.has_value()) {
+ RTC_DCHECK(CompatibleConfigs(config_, *multichannel_config_));
+ }
+
+ Update(!config_.multi_channel.detect_stereo_content &&
+ num_render_input_channels > 1);
+
+ RTC_DCHECK(active_config_);
+}
+
+void ConfigSelector::Update(bool multichannel_content) {
+ if (multichannel_content && multichannel_config_.has_value()) {
+ active_config_ = &(*multichannel_config_);
+ } else {
+ active_config_ = &config_;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.h b/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.h
new file mode 100644
index 0000000000..3b3f94e5ac
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/config_selector.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_CONFIG_SELECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_CONFIG_SELECTOR_H_
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+
+namespace webrtc {
+
+// Selects the config to use.
+class ConfigSelector {
+ public:
+ ConfigSelector(
+ const EchoCanceller3Config& config,
+ const absl::optional<EchoCanceller3Config>& multichannel_config,
+ int num_render_input_channels);
+
+ // Updates the config selection based on the detection of multichannel
+ // content.
+ void Update(bool multichannel_content);
+
+ const EchoCanceller3Config& active_config() const { return *active_config_; }
+
+ private:
+ const EchoCanceller3Config config_;
+ const absl::optional<EchoCanceller3Config> multichannel_config_;
+ const EchoCanceller3Config* active_config_ = nullptr;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_CONFIG_SELECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/config_selector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/config_selector_unittest.cc
new file mode 100644
index 0000000000..1826bfcace
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/config_selector_unittest.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/config_selector.h"
+
+#include <tuple>
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class ConfigSelectorChannelsAndContentDetection
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<int, bool>> {};
+
+INSTANTIATE_TEST_SUITE_P(ConfigSelectorMultiParameters,
+ ConfigSelectorChannelsAndContentDetection,
+ ::testing::Combine(::testing::Values(1, 2, 8),
+ ::testing::Values(false, true)));
+
+class ConfigSelectorChannels : public ::testing::Test,
+ public ::testing::WithParamInterface<int> {};
+
+INSTANTIATE_TEST_SUITE_P(ConfigSelectorMultiParameters,
+ ConfigSelectorChannels,
+ ::testing::Values(1, 2, 8));
+
+TEST_P(ConfigSelectorChannelsAndContentDetection,
+ MonoConfigIsSelectedWhenNoMultiChannelConfigPresent) {
+ const auto [num_channels, detect_stereo_content] = GetParam();
+ EchoCanceller3Config config;
+ config.multi_channel.detect_stereo_content = detect_stereo_content;
+ absl::optional<EchoCanceller3Config> multichannel_config;
+
+ config.delay.default_delay = config.delay.default_delay + 1;
+ const size_t custom_delay_value_in_config = config.delay.default_delay;
+
+ ConfigSelector cs(config, multichannel_config,
+ /*num_render_input_channels=*/num_channels);
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_config);
+
+ cs.Update(/*multichannel_content=*/false);
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_config);
+
+ cs.Update(/*multichannel_content=*/true);
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_config);
+}
+
+TEST_P(ConfigSelectorChannelsAndContentDetection,
+ CorrectInitialConfigIsSelected) {
+ const auto [num_channels, detect_stereo_content] = GetParam();
+ EchoCanceller3Config config;
+ config.multi_channel.detect_stereo_content = detect_stereo_content;
+ absl::optional<EchoCanceller3Config> multichannel_config = config;
+
+ config.delay.default_delay += 1;
+ const size_t custom_delay_value_in_config = config.delay.default_delay;
+ multichannel_config->delay.default_delay += 2;
+ const size_t custom_delay_value_in_multichannel_config =
+ multichannel_config->delay.default_delay;
+
+ ConfigSelector cs(config, multichannel_config,
+ /*num_render_input_channels=*/num_channels);
+
+ if (num_channels == 1 || detect_stereo_content) {
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_config);
+ } else {
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_multichannel_config);
+ }
+}
+
+TEST_P(ConfigSelectorChannels, CorrectConfigUpdateBehavior) {
+ const int num_channels = GetParam();
+ EchoCanceller3Config config;
+ config.multi_channel.detect_stereo_content = true;
+ absl::optional<EchoCanceller3Config> multichannel_config = config;
+
+ config.delay.default_delay += 1;
+ const size_t custom_delay_value_in_config = config.delay.default_delay;
+ multichannel_config->delay.default_delay += 2;
+ const size_t custom_delay_value_in_multichannel_config =
+ multichannel_config->delay.default_delay;
+
+ ConfigSelector cs(config, multichannel_config,
+ /*num_render_input_channels=*/num_channels);
+
+ cs.Update(/*multichannel_content=*/false);
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_config);
+
+ if (num_channels == 1) {
+ cs.Update(/*multichannel_content=*/false);
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_config);
+ } else {
+ cs.Update(/*multichannel_content=*/true);
+ EXPECT_EQ(cs.active_config().delay.default_delay,
+ custom_delay_value_in_multichannel_config);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/decimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/decimator.cc
new file mode 100644
index 0000000000..bd03237ca0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/decimator.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/decimator.h"
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// signal.butter(2, 3400/8000.0, 'lowpass', analog=False)
+const std::vector<CascadedBiQuadFilter::BiQuadParam> GetLowPassFilterDS2() {
+ return std::vector<CascadedBiQuadFilter::BiQuadParam>{
+ {{-1.f, 0.f}, {0.13833231f, 0.40743176f}, 0.22711796393486466f},
+ {{-1.f, 0.f}, {0.13833231f, 0.40743176f}, 0.22711796393486466f},
+ {{-1.f, 0.f}, {0.13833231f, 0.40743176f}, 0.22711796393486466f}};
+}
+
+// signal.ellip(6, 1, 40, 1800/8000, btype='lowpass', analog=False)
+const std::vector<CascadedBiQuadFilter::BiQuadParam> GetLowPassFilterDS4() {
+ return std::vector<CascadedBiQuadFilter::BiQuadParam>{
+ {{-0.08873842f, 0.99605496f}, {0.75916227f, 0.23841065f}, 0.26250696827f},
+ {{0.62273832f, 0.78243018f}, {0.74892112f, 0.5410152f}, 0.26250696827f},
+ {{0.71107693f, 0.70311421f}, {0.74895534f, 0.63924616f}, 0.26250696827f}};
+}
+
+// signal.cheby1(1, 6, [1000/8000, 2000/8000], btype='bandpass', analog=False)
+const std::vector<CascadedBiQuadFilter::BiQuadParam> GetBandPassFilterDS8() {
+ return std::vector<CascadedBiQuadFilter::BiQuadParam>{
+ {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true},
+ {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true},
+ {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true},
+ {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true},
+ {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true}};
+}
+
+// signal.butter(2, 1000/8000.0, 'highpass', analog=False)
+const std::vector<CascadedBiQuadFilter::BiQuadParam> GetHighPassFilter() {
+ return std::vector<CascadedBiQuadFilter::BiQuadParam>{
+ {{1.f, 0.f}, {0.72712179f, 0.21296904f}, 0.7570763753338849f}};
+}
+
+const std::vector<CascadedBiQuadFilter::BiQuadParam> GetPassThroughFilter() {
+ return std::vector<CascadedBiQuadFilter::BiQuadParam>{};
+}
+} // namespace
+
+Decimator::Decimator(size_t down_sampling_factor)
+ : down_sampling_factor_(down_sampling_factor),
+ anti_aliasing_filter_(down_sampling_factor_ == 4
+ ? GetLowPassFilterDS4()
+ : (down_sampling_factor_ == 8
+ ? GetBandPassFilterDS8()
+ : GetLowPassFilterDS2())),
+ noise_reduction_filter_(down_sampling_factor_ == 8
+ ? GetPassThroughFilter()
+ : GetHighPassFilter()) {
+ RTC_DCHECK(down_sampling_factor_ == 2 || down_sampling_factor_ == 4 ||
+ down_sampling_factor_ == 8);
+}
+
+void Decimator::Decimate(rtc::ArrayView<const float> in,
+ rtc::ArrayView<float> out) {
+ RTC_DCHECK_EQ(kBlockSize, in.size());
+ RTC_DCHECK_EQ(kBlockSize / down_sampling_factor_, out.size());
+ std::array<float, kBlockSize> x;
+
+ // Limit the frequency content of the signal to avoid aliasing.
+ anti_aliasing_filter_.Process(in, x);
+
+ // Reduce the impact of near-end noise.
+ noise_reduction_filter_.Process(x);
+
+ // Downsample the signal.
+ for (size_t j = 0, k = 0; j < out.size(); ++j, k += down_sampling_factor_) {
+ RTC_DCHECK_GT(kBlockSize, k);
+ out[j] = x[k];
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/decimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/decimator.h
new file mode 100644
index 0000000000..dbff3d9fff
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/decimator.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+
+namespace webrtc {
+
+// Provides functionality for decimating a signal.
+class Decimator {
+ public:
+ explicit Decimator(size_t down_sampling_factor);
+
+ Decimator(const Decimator&) = delete;
+ Decimator& operator=(const Decimator&) = delete;
+
+ // Downsamples the signal.
+ void Decimate(rtc::ArrayView<const float> in, rtc::ArrayView<float> out);
+
+ private:
+ const size_t down_sampling_factor_;
+ CascadedBiQuadFilter anti_aliasing_filter_;
+ CascadedBiQuadFilter noise_reduction_filter_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/decimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/decimator_unittest.cc
new file mode 100644
index 0000000000..e6f5ea0403
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/decimator_unittest.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/decimator.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <cstring>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ return ss.Release();
+}
+
+constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+constexpr float kPi = 3.141592f;
+constexpr size_t kNumStartupBlocks = 50;
+constexpr size_t kNumBlocks = 1000;
+
+void ProduceDecimatedSinusoidalOutputPower(int sample_rate_hz,
+ size_t down_sampling_factor,
+ float sinusoidal_frequency_hz,
+ float* input_power,
+ float* output_power) {
+ float input[kBlockSize * kNumBlocks];
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+ // Produce a sinusoid of the specified frequency.
+ for (size_t k = 0; k < kBlockSize * kNumBlocks; ++k) {
+ input[k] = 32767.f * std::sin(2.f * kPi * sinusoidal_frequency_hz * k /
+ sample_rate_hz);
+ }
+
+ Decimator decimator(down_sampling_factor);
+ std::vector<float> output(sub_block_size * kNumBlocks);
+
+ for (size_t k = 0; k < kNumBlocks; ++k) {
+ std::vector<float> sub_block(sub_block_size);
+ decimator.Decimate(
+ rtc::ArrayView<const float>(&input[k * kBlockSize], kBlockSize),
+ sub_block);
+
+ std::copy(sub_block.begin(), sub_block.end(),
+ output.begin() + k * sub_block_size);
+ }
+
+ ASSERT_GT(kNumBlocks, kNumStartupBlocks);
+ rtc::ArrayView<const float> input_to_evaluate(
+ &input[kNumStartupBlocks * kBlockSize],
+ (kNumBlocks - kNumStartupBlocks) * kBlockSize);
+ rtc::ArrayView<const float> output_to_evaluate(
+ &output[kNumStartupBlocks * sub_block_size],
+ (kNumBlocks - kNumStartupBlocks) * sub_block_size);
+ *input_power =
+ std::inner_product(input_to_evaluate.begin(), input_to_evaluate.end(),
+ input_to_evaluate.begin(), 0.f) /
+ input_to_evaluate.size();
+ *output_power =
+ std::inner_product(output_to_evaluate.begin(), output_to_evaluate.end(),
+ output_to_evaluate.begin(), 0.f) /
+ output_to_evaluate.size();
+}
+
+} // namespace
+
+// Verifies that there is little aliasing from upper frequencies in the
+// downsampling.
+TEST(Decimator, NoLeakageFromUpperFrequencies) {
+ float input_power;
+ float output_power;
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ ProduceDebugText(rate);
+ ProduceDecimatedSinusoidalOutputPower(rate, down_sampling_factor,
+ 3.f / 8.f * rate, &input_power,
+ &output_power);
+ EXPECT_GT(0.0001f * input_power, output_power);
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies the check for the input size.
+TEST(DecimatorDeathTest, WrongInputSize) {
+ Decimator decimator(4);
+ std::vector<float> x(kBlockSize - 1, 0.f);
+ std::array<float, kBlockSize / 4> x_downsampled;
+ EXPECT_DEATH(decimator.Decimate(x, x_downsampled), "");
+}
+
+// Verifies the check for non-null output parameter.
+TEST(DecimatorDeathTest, NullOutput) {
+ Decimator decimator(4);
+ std::vector<float> x(kBlockSize, 0.f);
+ EXPECT_DEATH(decimator.Decimate(x, nullptr), "");
+}
+
+// Verifies the check for the output size.
+TEST(DecimatorDeathTest, WrongOutputSize) {
+ Decimator decimator(4);
+ std::vector<float> x(kBlockSize, 0.f);
+ std::array<float, kBlockSize / 4 - 1> x_downsampled;
+ EXPECT_DEATH(decimator.Decimate(x, x_downsampled), "");
+}
+
+// Verifies the check for the correct downsampling factor.
+TEST(DecimatorDeathTest, CorrectDownSamplingFactor) {
+ EXPECT_DEATH(Decimator(3), "");
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/delay_estimate.h b/third_party/libwebrtc/modules/audio_processing/aec3/delay_estimate.h
new file mode 100644
index 0000000000..ea5dd27153
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/delay_estimate.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_
+
+namespace webrtc {
+
+// Stores delay_estimates.
+struct DelayEstimate {
+ enum class Quality { kCoarse, kRefined };
+
+ DelayEstimate(Quality quality, size_t delay)
+ : quality(quality), delay(delay) {}
+
+ Quality quality;
+ size_t delay;
+ size_t blocks_since_last_change = 0;
+ size_t blocks_since_last_update = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.cc b/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.cc
new file mode 100644
index 0000000000..40073cf615
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/dominant_nearend_detector.h"
+
+#include <numeric>
+
+namespace webrtc {
+DominantNearendDetector::DominantNearendDetector(
+ const EchoCanceller3Config::Suppressor::DominantNearendDetection& config,
+ size_t num_capture_channels)
+ : enr_threshold_(config.enr_threshold),
+ enr_exit_threshold_(config.enr_exit_threshold),
+ snr_threshold_(config.snr_threshold),
+ hold_duration_(config.hold_duration),
+ trigger_threshold_(config.trigger_threshold),
+ use_during_initial_phase_(config.use_during_initial_phase),
+ num_capture_channels_(num_capture_channels),
+ trigger_counters_(num_capture_channels_),
+ hold_counters_(num_capture_channels_) {}
+
+void DominantNearendDetector::Update(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ bool initial_state) {
+ nearend_state_ = false;
+
+ auto low_frequency_energy = [](rtc::ArrayView<const float> spectrum) {
+ RTC_DCHECK_LE(16, spectrum.size());
+ return std::accumulate(spectrum.begin() + 1, spectrum.begin() + 16, 0.f);
+ };
+
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ const float ne_sum = low_frequency_energy(nearend_spectrum[ch]);
+ const float echo_sum = low_frequency_energy(residual_echo_spectrum[ch]);
+ const float noise_sum = low_frequency_energy(comfort_noise_spectrum[ch]);
+
+ // Detect strong active nearend if the nearend is sufficiently stronger than
+ // the echo and the nearend noise.
+ if ((!initial_state || use_during_initial_phase_) &&
+ echo_sum < enr_threshold_ * ne_sum &&
+ ne_sum > snr_threshold_ * noise_sum) {
+ if (++trigger_counters_[ch] >= trigger_threshold_) {
+ // After a period of strong active nearend activity, flag nearend mode.
+ hold_counters_[ch] = hold_duration_;
+ trigger_counters_[ch] = trigger_threshold_;
+ }
+ } else {
+ // Forget previously detected strong active nearend activity.
+ trigger_counters_[ch] = std::max(0, trigger_counters_[ch] - 1);
+ }
+
+ // Exit nearend-state early at strong echo.
+ if (echo_sum > enr_exit_threshold_ * ne_sum &&
+ echo_sum > snr_threshold_ * noise_sum) {
+ hold_counters_[ch] = 0;
+ }
+
+ // Remain in any nearend mode for a certain duration.
+ hold_counters_[ch] = std::max(0, hold_counters_[ch] - 1);
+ nearend_state_ = nearend_state_ || hold_counters_[ch] > 0;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.h b/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.h
new file mode 100644
index 0000000000..046d1488d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/dominant_nearend_detector.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DOMINANT_NEAREND_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DOMINANT_NEAREND_DETECTOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/nearend_detector.h"
+
+namespace webrtc {
+// Class for selecting whether the suppressor is in the nearend or echo state.
+class DominantNearendDetector : public NearendDetector {
+ public:
+ DominantNearendDetector(
+ const EchoCanceller3Config::Suppressor::DominantNearendDetection& config,
+ size_t num_capture_channels);
+
+ // Returns whether the current state is the nearend state.
+ bool IsNearendState() const override { return nearend_state_; }
+
+ // Updates the state selection based on latest spectral estimates.
+ void Update(rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ bool initial_state) override;
+
+ private:
+ const float enr_threshold_;
+ const float enr_exit_threshold_;
+ const float snr_threshold_;
+ const int hold_duration_;
+ const int trigger_threshold_;
+ const bool use_during_initial_phase_;
+ const size_t num_capture_channels_;
+
+ bool nearend_state_ = false;
+ std::vector<int> trigger_counters_;
+ std::vector<int> hold_counters_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_DOMINANT_NEAREND_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.cc
new file mode 100644
index 0000000000..c105911aa8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+DownsampledRenderBuffer::DownsampledRenderBuffer(size_t downsampled_buffer_size)
+ : size(static_cast<int>(downsampled_buffer_size)),
+ buffer(downsampled_buffer_size, 0.f) {
+ std::fill(buffer.begin(), buffer.end(), 0.f);
+}
+
+DownsampledRenderBuffer::~DownsampledRenderBuffer() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.h
new file mode 100644
index 0000000000..fbdc9b4e93
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/downsampled_render_buffer.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Holds the circular buffer of the downsampled render data.
+struct DownsampledRenderBuffer {
+ explicit DownsampledRenderBuffer(size_t downsampled_buffer_size);
+ ~DownsampledRenderBuffer();
+
+ int IncIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index < size - 1 ? index + 1 : 0;
+ }
+
+ int DecIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index > 0 ? index - 1 : size - 1;
+ }
+
+ int OffsetIndex(int index, int offset) const {
+ RTC_DCHECK_GE(buffer.size(), offset);
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return (size + index + offset) % size;
+ }
+
+ void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+ void IncWriteIndex() { write = IncIndex(write); }
+ void DecWriteIndex() { write = DecIndex(write); }
+ void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+ void IncReadIndex() { read = IncIndex(read); }
+ void DecReadIndex() { read = DecIndex(read); }
+
+ const int size;
+ std::vector<float> buffer;
+ int write = 0;
+ int read = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.cc
new file mode 100644
index 0000000000..142a33d5e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_audibility.h"
+
+#include <algorithm>
+#include <cmath>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/block_buffer.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "modules/audio_processing/aec3/stationarity_estimator.h"
+
+namespace webrtc {
+
+EchoAudibility::EchoAudibility(bool use_render_stationarity_at_init)
+ : use_render_stationarity_at_init_(use_render_stationarity_at_init) {
+ Reset();
+}
+
+EchoAudibility::~EchoAudibility() = default;
+
+void EchoAudibility::Update(const RenderBuffer& render_buffer,
+ rtc::ArrayView<const float> average_reverb,
+ int delay_blocks,
+ bool external_delay_seen) {
+ UpdateRenderNoiseEstimator(render_buffer.GetSpectrumBuffer(),
+ render_buffer.GetBlockBuffer(),
+ external_delay_seen);
+
+ if (external_delay_seen || use_render_stationarity_at_init_) {
+ UpdateRenderStationarityFlags(render_buffer, average_reverb, delay_blocks);
+ }
+}
+
+void EchoAudibility::Reset() {
+ render_stationarity_.Reset();
+ non_zero_render_seen_ = false;
+ render_spectrum_write_prev_ = absl::nullopt;
+}
+
+void EchoAudibility::UpdateRenderStationarityFlags(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const float> average_reverb,
+ int min_channel_delay_blocks) {
+ const SpectrumBuffer& spectrum_buffer = render_buffer.GetSpectrumBuffer();
+ int idx_at_delay = spectrum_buffer.OffsetIndex(spectrum_buffer.read,
+ min_channel_delay_blocks);
+
+ int num_lookahead = render_buffer.Headroom() - min_channel_delay_blocks + 1;
+ num_lookahead = std::max(0, num_lookahead);
+
+ render_stationarity_.UpdateStationarityFlags(spectrum_buffer, average_reverb,
+ idx_at_delay, num_lookahead);
+}
+
+void EchoAudibility::UpdateRenderNoiseEstimator(
+ const SpectrumBuffer& spectrum_buffer,
+ const BlockBuffer& block_buffer,
+ bool external_delay_seen) {
+ if (!render_spectrum_write_prev_) {
+ render_spectrum_write_prev_ = spectrum_buffer.write;
+ render_block_write_prev_ = block_buffer.write;
+ return;
+ }
+ int render_spectrum_write_current = spectrum_buffer.write;
+ if (!non_zero_render_seen_ && !external_delay_seen) {
+ non_zero_render_seen_ = !IsRenderTooLow(block_buffer);
+ }
+ if (non_zero_render_seen_) {
+ for (int idx = render_spectrum_write_prev_.value();
+ idx != render_spectrum_write_current;
+ idx = spectrum_buffer.DecIndex(idx)) {
+ render_stationarity_.UpdateNoiseEstimator(spectrum_buffer.buffer[idx]);
+ }
+ }
+ render_spectrum_write_prev_ = render_spectrum_write_current;
+}
+
+bool EchoAudibility::IsRenderTooLow(const BlockBuffer& block_buffer) {
+ const int num_render_channels =
+ static_cast<int>(block_buffer.buffer[0].NumChannels());
+ bool too_low = false;
+ const int render_block_write_current = block_buffer.write;
+ if (render_block_write_current == render_block_write_prev_) {
+ too_low = true;
+ } else {
+ for (int idx = render_block_write_prev_; idx != render_block_write_current;
+ idx = block_buffer.IncIndex(idx)) {
+ float max_abs_over_channels = 0.f;
+ for (int ch = 0; ch < num_render_channels; ++ch) {
+ rtc::ArrayView<const float, kBlockSize> block =
+ block_buffer.buffer[idx].View(/*band=*/0, /*channel=*/ch);
+ auto r = std::minmax_element(block.cbegin(), block.cend());
+ float max_abs_channel =
+ std::max(std::fabs(*r.first), std::fabs(*r.second));
+ max_abs_over_channels =
+ std::max(max_abs_over_channels, max_abs_channel);
+ }
+ if (max_abs_over_channels < 10.f) {
+ too_low = true; // Discards all blocks if one of them is too low.
+ break;
+ }
+ }
+ }
+ render_block_write_prev_ = render_block_write_current;
+ return too_low;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.h b/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.h
new file mode 100644
index 0000000000..b9d6f87d2a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_audibility.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_AUDIBILITY_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_AUDIBILITY_H_
+
+#include <stddef.h>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/block_buffer.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "modules/audio_processing/aec3/stationarity_estimator.h"
+
+namespace webrtc {
+
+class EchoAudibility {
+ public:
+ explicit EchoAudibility(bool use_render_stationarity_at_init);
+ ~EchoAudibility();
+
+ EchoAudibility(const EchoAudibility&) = delete;
+ EchoAudibility& operator=(const EchoAudibility&) = delete;
+
+ // Feed new render data to the echo audibility estimator.
+ void Update(const RenderBuffer& render_buffer,
+ rtc::ArrayView<const float> average_reverb,
+ int min_channel_delay_blocks,
+ bool external_delay_seen);
+ // Get the residual echo scaling.
+ void GetResidualEchoScaling(bool filter_has_had_time_to_converge,
+ rtc::ArrayView<float> residual_scaling) const {
+ for (size_t band = 0; band < residual_scaling.size(); ++band) {
+ if (render_stationarity_.IsBandStationary(band) &&
+ (filter_has_had_time_to_converge ||
+ use_render_stationarity_at_init_)) {
+ residual_scaling[band] = 0.f;
+ } else {
+ residual_scaling[band] = 1.0f;
+ }
+ }
+ }
+
+ // Returns true if the current render block is estimated as stationary.
+ bool IsBlockStationary() const {
+ return render_stationarity_.IsBlockStationary();
+ }
+
+ private:
+ // Reset the EchoAudibility class.
+ void Reset();
+
+ // Updates the render stationarity flags for the current frame.
+ void UpdateRenderStationarityFlags(const RenderBuffer& render_buffer,
+ rtc::ArrayView<const float> average_reverb,
+ int delay_blocks);
+
+ // Updates the noise estimator with the new render data since the previous
+ // call to this method.
+ void UpdateRenderNoiseEstimator(const SpectrumBuffer& spectrum_buffer,
+ const BlockBuffer& block_buffer,
+ bool external_delay_seen);
+
+ // Returns a bool being true if the render signal contains just close to zero
+ // values.
+ bool IsRenderTooLow(const BlockBuffer& block_buffer);
+
+ absl::optional<int> render_spectrum_write_prev_;
+ int render_block_write_prev_;
+ bool non_zero_render_seen_;
+ const bool use_render_stationarity_at_init_;
+ StationarityEstimator render_stationarity_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_AUDIBILITY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.cc
new file mode 100644
index 0000000000..e8e2175994
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.cc
@@ -0,0 +1,992 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/echo_canceller3.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/high_pass_filter.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+
+enum class EchoCanceller3ApiCall { kCapture, kRender };
+
+bool DetectSaturation(rtc::ArrayView<const float> y) {
+ for (size_t k = 0; k < y.size(); ++k) {
+ if (y[k] >= 32700.0f || y[k] <= -32700.0f) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Retrieves a value from a field trial if it is available. If no value is
+// present, the default value is returned. If the retrieved value is beyond the
+// specified limits, the default value is returned instead.
+void RetrieveFieldTrialValue(absl::string_view trial_name,
+ float min,
+ float max,
+ float* value_to_update) {
+ const std::string field_trial_str = field_trial::FindFullName(trial_name);
+
+ FieldTrialParameter<double> field_trial_param(/*key=*/"", *value_to_update);
+
+ ParseFieldTrial({&field_trial_param}, field_trial_str);
+ float field_trial_value = static_cast<float>(field_trial_param.Get());
+
+ if (field_trial_value >= min && field_trial_value <= max &&
+ field_trial_value != *value_to_update) {
+ RTC_LOG(LS_INFO) << "Key " << trial_name
+ << " changing AEC3 parameter value from "
+ << *value_to_update << " to " << field_trial_value;
+ *value_to_update = field_trial_value;
+ }
+}
+
+void RetrieveFieldTrialValue(absl::string_view trial_name,
+ int min,
+ int max,
+ int* value_to_update) {
+ const std::string field_trial_str = field_trial::FindFullName(trial_name);
+
+ FieldTrialParameter<int> field_trial_param(/*key=*/"", *value_to_update);
+
+ ParseFieldTrial({&field_trial_param}, field_trial_str);
+ float field_trial_value = field_trial_param.Get();
+
+ if (field_trial_value >= min && field_trial_value <= max &&
+ field_trial_value != *value_to_update) {
+ RTC_LOG(LS_INFO) << "Key " << trial_name
+ << " changing AEC3 parameter value from "
+ << *value_to_update << " to " << field_trial_value;
+ *value_to_update = field_trial_value;
+ }
+}
+
+void FillSubFrameView(
+ AudioBuffer* frame,
+ size_t sub_frame_index,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame_view) {
+ RTC_DCHECK_GE(1, sub_frame_index);
+ RTC_DCHECK_LE(0, sub_frame_index);
+ RTC_DCHECK_EQ(frame->num_bands(), sub_frame_view->size());
+ RTC_DCHECK_EQ(frame->num_channels(), (*sub_frame_view)[0].size());
+ for (size_t band = 0; band < sub_frame_view->size(); ++band) {
+ for (size_t channel = 0; channel < (*sub_frame_view)[0].size(); ++channel) {
+ (*sub_frame_view)[band][channel] = rtc::ArrayView<float>(
+ &frame->split_bands(channel)[band][sub_frame_index * kSubFrameLength],
+ kSubFrameLength);
+ }
+ }
+}
+
+void FillSubFrameView(
+ bool proper_downmix_needed,
+ std::vector<std::vector<std::vector<float>>>* frame,
+ size_t sub_frame_index,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame_view) {
+ RTC_DCHECK_GE(1, sub_frame_index);
+ RTC_DCHECK_EQ(frame->size(), sub_frame_view->size());
+ const size_t frame_num_channels = (*frame)[0].size();
+ const size_t sub_frame_num_channels = (*sub_frame_view)[0].size();
+ if (frame_num_channels > sub_frame_num_channels) {
+ RTC_DCHECK_EQ(sub_frame_num_channels, 1u);
+ if (proper_downmix_needed) {
+ // When a proper downmix is needed (which is the case when proper stereo
+ // is present in the echo reference signal but the echo canceller does the
+ // processing in mono) downmix the echo reference by averaging the channel
+ // content (otherwise downmixing is done by selecting channel 0).
+ for (size_t band = 0; band < frame->size(); ++band) {
+ for (size_t ch = 1; ch < frame_num_channels; ++ch) {
+ for (size_t k = 0; k < kSubFrameLength; ++k) {
+ (*frame)[band][/*channel=*/0]
+ [sub_frame_index * kSubFrameLength + k] +=
+ (*frame)[band][ch][sub_frame_index * kSubFrameLength + k];
+ }
+ }
+ const float one_by_num_channels = 1.0f / frame_num_channels;
+ for (size_t k = 0; k < kSubFrameLength; ++k) {
+ (*frame)[band][/*channel=*/0][sub_frame_index * kSubFrameLength +
+ k] *= one_by_num_channels;
+ }
+ }
+ }
+ for (size_t band = 0; band < frame->size(); ++band) {
+ (*sub_frame_view)[band][/*channel=*/0] = rtc::ArrayView<float>(
+ &(*frame)[band][/*channel=*/0][sub_frame_index * kSubFrameLength],
+ kSubFrameLength);
+ }
+ } else {
+ RTC_DCHECK_EQ(frame_num_channels, sub_frame_num_channels);
+ for (size_t band = 0; band < frame->size(); ++band) {
+ for (size_t channel = 0; channel < (*frame)[band].size(); ++channel) {
+ (*sub_frame_view)[band][channel] = rtc::ArrayView<float>(
+ &(*frame)[band][channel][sub_frame_index * kSubFrameLength],
+ kSubFrameLength);
+ }
+ }
+ }
+}
+
+void ProcessCaptureFrameContent(
+ AudioBuffer* linear_output,
+ AudioBuffer* capture,
+ bool level_change,
+ bool aec_reference_is_downmixed_stereo,
+ bool saturated_microphone_signal,
+ size_t sub_frame_index,
+ FrameBlocker* capture_blocker,
+ BlockFramer* linear_output_framer,
+ BlockFramer* output_framer,
+ BlockProcessor* block_processor,
+ Block* linear_output_block,
+ std::vector<std::vector<rtc::ArrayView<float>>>*
+ linear_output_sub_frame_view,
+ Block* capture_block,
+ std::vector<std::vector<rtc::ArrayView<float>>>* capture_sub_frame_view) {
+ FillSubFrameView(capture, sub_frame_index, capture_sub_frame_view);
+
+ if (linear_output) {
+ RTC_DCHECK(linear_output_framer);
+ RTC_DCHECK(linear_output_block);
+ RTC_DCHECK(linear_output_sub_frame_view);
+ FillSubFrameView(linear_output, sub_frame_index,
+ linear_output_sub_frame_view);
+ }
+
+ capture_blocker->InsertSubFrameAndExtractBlock(*capture_sub_frame_view,
+ capture_block);
+ block_processor->ProcessCapture(
+ /*echo_path_gain_change=*/level_change ||
+ aec_reference_is_downmixed_stereo,
+ saturated_microphone_signal, linear_output_block, capture_block);
+ output_framer->InsertBlockAndExtractSubFrame(*capture_block,
+ capture_sub_frame_view);
+
+ if (linear_output) {
+ RTC_DCHECK(linear_output_framer);
+ linear_output_framer->InsertBlockAndExtractSubFrame(
+ *linear_output_block, linear_output_sub_frame_view);
+ }
+}
+
+void ProcessRemainingCaptureFrameContent(bool level_change,
+ bool aec_reference_is_downmixed_stereo,
+ bool saturated_microphone_signal,
+ FrameBlocker* capture_blocker,
+ BlockFramer* linear_output_framer,
+ BlockFramer* output_framer,
+ BlockProcessor* block_processor,
+ Block* linear_output_block,
+ Block* block) {
+ if (!capture_blocker->IsBlockAvailable()) {
+ return;
+ }
+
+ capture_blocker->ExtractBlock(block);
+ block_processor->ProcessCapture(
+ /*echo_path_gain_change=*/level_change ||
+ aec_reference_is_downmixed_stereo,
+ saturated_microphone_signal, linear_output_block, block);
+ output_framer->InsertBlock(*block);
+
+ if (linear_output_framer) {
+ RTC_DCHECK(linear_output_block);
+ linear_output_framer->InsertBlock(*linear_output_block);
+ }
+}
+
+void BufferRenderFrameContent(
+ bool proper_downmix_needed,
+ std::vector<std::vector<std::vector<float>>>* render_frame,
+ size_t sub_frame_index,
+ FrameBlocker* render_blocker,
+ BlockProcessor* block_processor,
+ Block* block,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame_view) {
+ FillSubFrameView(proper_downmix_needed, render_frame, sub_frame_index,
+ sub_frame_view);
+ render_blocker->InsertSubFrameAndExtractBlock(*sub_frame_view, block);
+ block_processor->BufferRender(*block);
+}
+
+void BufferRemainingRenderFrameContent(FrameBlocker* render_blocker,
+ BlockProcessor* block_processor,
+ Block* block) {
+ if (!render_blocker->IsBlockAvailable()) {
+ return;
+ }
+ render_blocker->ExtractBlock(block);
+ block_processor->BufferRender(*block);
+}
+
+void CopyBufferIntoFrame(const AudioBuffer& buffer,
+ size_t num_bands,
+ size_t num_channels,
+ std::vector<std::vector<std::vector<float>>>* frame) {
+ RTC_DCHECK_EQ(num_bands, frame->size());
+ RTC_DCHECK_EQ(num_channels, (*frame)[0].size());
+ RTC_DCHECK_EQ(AudioBuffer::kSplitBandSize, (*frame)[0][0].size());
+ for (size_t band = 0; band < num_bands; ++band) {
+ for (size_t channel = 0; channel < num_channels; ++channel) {
+ rtc::ArrayView<const float> buffer_view(
+ &buffer.split_bands_const(channel)[band][0],
+ AudioBuffer::kSplitBandSize);
+ std::copy(buffer_view.begin(), buffer_view.end(),
+ (*frame)[band][channel].begin());
+ }
+ }
+}
+
+} // namespace
+
+// TODO(webrtc:5298): Move this to a separate file.
+EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) {
+ EchoCanceller3Config adjusted_cfg = config;
+
+ if (field_trial::IsEnabled("WebRTC-Aec3StereoContentDetectionKillSwitch")) {
+ adjusted_cfg.multi_channel.detect_stereo_content = false;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3AntiHowlingMinimizationKillSwitch")) {
+ adjusted_cfg.suppressor.high_bands_suppression
+ .anti_howling_activation_threshold = 25.f;
+ adjusted_cfg.suppressor.high_bands_suppression.anti_howling_gain = 0.01f;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3UseShortConfigChangeDuration")) {
+ adjusted_cfg.filter.config_change_duration_blocks = 10;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3UseZeroInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = 0.f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3UseDot1SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = .1f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3UseDot2SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = .2f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3UseDot3SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = .3f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3UseDot6SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = .6f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3UseDot9SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = .9f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3Use1Dot2SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = 1.2f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3Use1Dot6SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = 1.6f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3Use2Dot0SecondsInitialStateDuration")) {
+ adjusted_cfg.filter.initial_state_seconds = 2.0f;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3HighPassFilterEchoReference")) {
+ adjusted_cfg.filter.high_pass_filter_echo_reference = true;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3EchoSaturationDetectionKillSwitch")) {
+ adjusted_cfg.ep_strength.echo_can_saturate = false;
+ }
+
+ const std::string use_nearend_reverb_len_tunings =
+ field_trial::FindFullName("WebRTC-Aec3UseNearendReverbLen");
+ FieldTrialParameter<double> nearend_reverb_default_len(
+ "default_len", adjusted_cfg.ep_strength.default_len);
+ FieldTrialParameter<double> nearend_reverb_nearend_len(
+ "nearend_len", adjusted_cfg.ep_strength.nearend_len);
+
+ ParseFieldTrial({&nearend_reverb_default_len, &nearend_reverb_nearend_len},
+ use_nearend_reverb_len_tunings);
+ float default_len = static_cast<float>(nearend_reverb_default_len.Get());
+ float nearend_len = static_cast<float>(nearend_reverb_nearend_len.Get());
+ if (default_len > -1 && default_len < 1 && nearend_len > -1 &&
+ nearend_len < 1) {
+ adjusted_cfg.ep_strength.default_len =
+ static_cast<float>(nearend_reverb_default_len.Get());
+ adjusted_cfg.ep_strength.nearend_len =
+ static_cast<float>(nearend_reverb_nearend_len.Get());
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3ConservativeTailFreqResponse")) {
+ adjusted_cfg.ep_strength.use_conservative_tail_frequency_response = true;
+ }
+
+ if (field_trial::IsDisabled("WebRTC-Aec3ConservativeTailFreqResponse")) {
+ adjusted_cfg.ep_strength.use_conservative_tail_frequency_response = false;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3ShortHeadroomKillSwitch")) {
+ // Two blocks headroom.
+ adjusted_cfg.delay.delay_headroom_samples = kBlockSize * 2;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3ClampInstQualityToZeroKillSwitch")) {
+ adjusted_cfg.erle.clamp_quality_estimate_to_zero = false;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3ClampInstQualityToOneKillSwitch")) {
+ adjusted_cfg.erle.clamp_quality_estimate_to_one = false;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3OnsetDetectionKillSwitch")) {
+ adjusted_cfg.erle.onset_detection = false;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceRenderDelayEstimationDownmixing")) {
+ adjusted_cfg.delay.render_alignment_mixing.downmix = true;
+ adjusted_cfg.delay.render_alignment_mixing.adaptive_selection = false;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceCaptureDelayEstimationDownmixing")) {
+ adjusted_cfg.delay.capture_alignment_mixing.downmix = true;
+ adjusted_cfg.delay.capture_alignment_mixing.adaptive_selection = false;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceCaptureDelayEstimationLeftRightPrioritization")) {
+ adjusted_cfg.delay.capture_alignment_mixing.prefer_first_two_channels =
+ true;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-"
+ "Aec3RenderDelayEstimationLeftRightPrioritizationKillSwitch")) {
+ adjusted_cfg.delay.capture_alignment_mixing.prefer_first_two_channels =
+ false;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3DelayEstimatorDetectPreEcho")) {
+ adjusted_cfg.delay.detect_pre_echo = true;
+ }
+
+ if (field_trial::IsDisabled("WebRTC-Aec3DelayEstimatorDetectPreEcho")) {
+ adjusted_cfg.delay.detect_pre_echo = false;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3SensitiveDominantNearendActivation")) {
+ adjusted_cfg.suppressor.dominant_nearend_detection.enr_threshold = 0.5f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3VerySensitiveDominantNearendActivation")) {
+ adjusted_cfg.suppressor.dominant_nearend_detection.enr_threshold = 0.75f;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3TransparentAntiHowlingGain")) {
+ adjusted_cfg.suppressor.high_bands_suppression.anti_howling_gain = 1.f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceMoreTransparentNormalSuppressorTuning")) {
+ adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_transparent = 0.4f;
+ adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_suppress = 0.5f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceMoreTransparentNearendSuppressorTuning")) {
+ adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_transparent = 1.29f;
+ adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_suppress = 1.3f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceMoreTransparentNormalSuppressorHfTuning")) {
+ adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_transparent = 0.3f;
+ adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_suppress = 0.4f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceMoreTransparentNearendSuppressorHfTuning")) {
+ adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_transparent = 1.09f;
+ adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_suppress = 1.1f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceRapidlyAdjustingNormalSuppressorTunings")) {
+ adjusted_cfg.suppressor.normal_tuning.max_inc_factor = 2.5f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceRapidlyAdjustingNearendSuppressorTunings")) {
+ adjusted_cfg.suppressor.nearend_tuning.max_inc_factor = 2.5f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceSlowlyAdjustingNormalSuppressorTunings")) {
+ adjusted_cfg.suppressor.normal_tuning.max_dec_factor_lf = .2f;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceSlowlyAdjustingNearendSuppressorTunings")) {
+ adjusted_cfg.suppressor.nearend_tuning.max_dec_factor_lf = .2f;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3EnforceConservativeHfSuppression")) {
+ adjusted_cfg.suppressor.conservative_hf_suppression = true;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3EnforceStationarityProperties")) {
+ adjusted_cfg.echo_audibility.use_stationarity_properties = true;
+ }
+
+ if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceStationarityPropertiesAtInit")) {
+ adjusted_cfg.echo_audibility.use_stationarity_properties_at_init = true;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3EnforceLowActiveRenderLimit")) {
+ adjusted_cfg.render_levels.active_render_limit = 50.f;
+ } else if (field_trial::IsEnabled(
+ "WebRTC-Aec3EnforceVeryLowActiveRenderLimit")) {
+ adjusted_cfg.render_levels.active_render_limit = 30.f;
+ }
+
+ if (field_trial::IsEnabled("WebRTC-Aec3NonlinearModeReverbKillSwitch")) {
+ adjusted_cfg.echo_model.model_reverb_in_nonlinear_mode = false;
+ }
+
+ // Field-trial based override for the whole suppressor tuning.
+ const std::string suppressor_tuning_override_trial_name =
+ field_trial::FindFullName("WebRTC-Aec3SuppressorTuningOverride");
+
+ FieldTrialParameter<double> nearend_tuning_mask_lf_enr_transparent(
+ "nearend_tuning_mask_lf_enr_transparent",
+ adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_transparent);
+ FieldTrialParameter<double> nearend_tuning_mask_lf_enr_suppress(
+ "nearend_tuning_mask_lf_enr_suppress",
+ adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_suppress);
+ FieldTrialParameter<double> nearend_tuning_mask_hf_enr_transparent(
+ "nearend_tuning_mask_hf_enr_transparent",
+ adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_transparent);
+ FieldTrialParameter<double> nearend_tuning_mask_hf_enr_suppress(
+ "nearend_tuning_mask_hf_enr_suppress",
+ adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_suppress);
+ FieldTrialParameter<double> nearend_tuning_max_inc_factor(
+ "nearend_tuning_max_inc_factor",
+ adjusted_cfg.suppressor.nearend_tuning.max_inc_factor);
+ FieldTrialParameter<double> nearend_tuning_max_dec_factor_lf(
+ "nearend_tuning_max_dec_factor_lf",
+ adjusted_cfg.suppressor.nearend_tuning.max_dec_factor_lf);
+ FieldTrialParameter<double> normal_tuning_mask_lf_enr_transparent(
+ "normal_tuning_mask_lf_enr_transparent",
+ adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_transparent);
+ FieldTrialParameter<double> normal_tuning_mask_lf_enr_suppress(
+ "normal_tuning_mask_lf_enr_suppress",
+ adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_suppress);
+ FieldTrialParameter<double> normal_tuning_mask_hf_enr_transparent(
+ "normal_tuning_mask_hf_enr_transparent",
+ adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_transparent);
+ FieldTrialParameter<double> normal_tuning_mask_hf_enr_suppress(
+ "normal_tuning_mask_hf_enr_suppress",
+ adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_suppress);
+ FieldTrialParameter<double> normal_tuning_max_inc_factor(
+ "normal_tuning_max_inc_factor",
+ adjusted_cfg.suppressor.normal_tuning.max_inc_factor);
+ FieldTrialParameter<double> normal_tuning_max_dec_factor_lf(
+ "normal_tuning_max_dec_factor_lf",
+ adjusted_cfg.suppressor.normal_tuning.max_dec_factor_lf);
+ FieldTrialParameter<double> dominant_nearend_detection_enr_threshold(
+ "dominant_nearend_detection_enr_threshold",
+ adjusted_cfg.suppressor.dominant_nearend_detection.enr_threshold);
+ FieldTrialParameter<double> dominant_nearend_detection_enr_exit_threshold(
+ "dominant_nearend_detection_enr_exit_threshold",
+ adjusted_cfg.suppressor.dominant_nearend_detection.enr_exit_threshold);
+ FieldTrialParameter<double> dominant_nearend_detection_snr_threshold(
+ "dominant_nearend_detection_snr_threshold",
+ adjusted_cfg.suppressor.dominant_nearend_detection.snr_threshold);
+ FieldTrialParameter<int> dominant_nearend_detection_hold_duration(
+ "dominant_nearend_detection_hold_duration",
+ adjusted_cfg.suppressor.dominant_nearend_detection.hold_duration);
+ FieldTrialParameter<int> dominant_nearend_detection_trigger_threshold(
+ "dominant_nearend_detection_trigger_threshold",
+ adjusted_cfg.suppressor.dominant_nearend_detection.trigger_threshold);
+
+ ParseFieldTrial(
+ {&nearend_tuning_mask_lf_enr_transparent,
+ &nearend_tuning_mask_lf_enr_suppress,
+ &nearend_tuning_mask_hf_enr_transparent,
+ &nearend_tuning_mask_hf_enr_suppress, &nearend_tuning_max_inc_factor,
+ &nearend_tuning_max_dec_factor_lf,
+ &normal_tuning_mask_lf_enr_transparent,
+ &normal_tuning_mask_lf_enr_suppress,
+ &normal_tuning_mask_hf_enr_transparent,
+ &normal_tuning_mask_hf_enr_suppress, &normal_tuning_max_inc_factor,
+ &normal_tuning_max_dec_factor_lf,
+ &dominant_nearend_detection_enr_threshold,
+ &dominant_nearend_detection_enr_exit_threshold,
+ &dominant_nearend_detection_snr_threshold,
+ &dominant_nearend_detection_hold_duration,
+ &dominant_nearend_detection_trigger_threshold},
+ suppressor_tuning_override_trial_name);
+
+ adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_transparent =
+ static_cast<float>(nearend_tuning_mask_lf_enr_transparent.Get());
+ adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_suppress =
+ static_cast<float>(nearend_tuning_mask_lf_enr_suppress.Get());
+ adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_transparent =
+ static_cast<float>(nearend_tuning_mask_hf_enr_transparent.Get());
+ adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_suppress =
+ static_cast<float>(nearend_tuning_mask_hf_enr_suppress.Get());
+ adjusted_cfg.suppressor.nearend_tuning.max_inc_factor =
+ static_cast<float>(nearend_tuning_max_inc_factor.Get());
+ adjusted_cfg.suppressor.nearend_tuning.max_dec_factor_lf =
+ static_cast<float>(nearend_tuning_max_dec_factor_lf.Get());
+ adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_transparent =
+ static_cast<float>(normal_tuning_mask_lf_enr_transparent.Get());
+ adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_suppress =
+ static_cast<float>(normal_tuning_mask_lf_enr_suppress.Get());
+ adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_transparent =
+ static_cast<float>(normal_tuning_mask_hf_enr_transparent.Get());
+ adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_suppress =
+ static_cast<float>(normal_tuning_mask_hf_enr_suppress.Get());
+ adjusted_cfg.suppressor.normal_tuning.max_inc_factor =
+ static_cast<float>(normal_tuning_max_inc_factor.Get());
+ adjusted_cfg.suppressor.normal_tuning.max_dec_factor_lf =
+ static_cast<float>(normal_tuning_max_dec_factor_lf.Get());
+ adjusted_cfg.suppressor.dominant_nearend_detection.enr_threshold =
+ static_cast<float>(dominant_nearend_detection_enr_threshold.Get());
+ adjusted_cfg.suppressor.dominant_nearend_detection.enr_exit_threshold =
+ static_cast<float>(dominant_nearend_detection_enr_exit_threshold.Get());
+ adjusted_cfg.suppressor.dominant_nearend_detection.snr_threshold =
+ static_cast<float>(dominant_nearend_detection_snr_threshold.Get());
+ adjusted_cfg.suppressor.dominant_nearend_detection.hold_duration =
+ dominant_nearend_detection_hold_duration.Get();
+ adjusted_cfg.suppressor.dominant_nearend_detection.trigger_threshold =
+ dominant_nearend_detection_trigger_threshold.Get();
+
+ // Field trial-based overrides of individual suppressor parameters.
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNearendLfMaskTransparentOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_transparent);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNearendLfMaskSuppressOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_suppress);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNearendHfMaskTransparentOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_transparent);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNearendHfMaskSuppressOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.nearend_tuning.mask_hf.enr_suppress);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNearendMaxIncFactorOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.nearend_tuning.max_inc_factor);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNearendMaxDecFactorLfOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.nearend_tuning.max_dec_factor_lf);
+
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNormalLfMaskTransparentOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_transparent);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNormalLfMaskSuppressOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.normal_tuning.mask_lf.enr_suppress);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNormalHfMaskTransparentOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_transparent);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNormalHfMaskSuppressOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.normal_tuning.mask_hf.enr_suppress);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNormalMaxIncFactorOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.normal_tuning.max_inc_factor);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorNormalMaxDecFactorLfOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.normal_tuning.max_dec_factor_lf);
+
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorDominantNearendEnrThresholdOverride", 0.f, 100.f,
+ &adjusted_cfg.suppressor.dominant_nearend_detection.enr_threshold);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorDominantNearendEnrExitThresholdOverride", 0.f,
+ 100.f,
+ &adjusted_cfg.suppressor.dominant_nearend_detection.enr_exit_threshold);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorDominantNearendSnrThresholdOverride", 0.f, 100.f,
+ &adjusted_cfg.suppressor.dominant_nearend_detection.snr_threshold);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorDominantNearendHoldDurationOverride", 0, 1000,
+ &adjusted_cfg.suppressor.dominant_nearend_detection.hold_duration);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorDominantNearendTriggerThresholdOverride", 0, 1000,
+ &adjusted_cfg.suppressor.dominant_nearend_detection.trigger_threshold);
+
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3SuppressorAntiHowlingGainOverride", 0.f, 10.f,
+ &adjusted_cfg.suppressor.high_bands_suppression.anti_howling_gain);
+
+ // Field trial-based overrides of individual delay estimator parameters.
+ RetrieveFieldTrialValue("WebRTC-Aec3DelayEstimateSmoothingOverride", 0.f, 1.f,
+ &adjusted_cfg.delay.delay_estimate_smoothing);
+ RetrieveFieldTrialValue(
+ "WebRTC-Aec3DelayEstimateSmoothingDelayFoundOverride", 0.f, 1.f,
+ &adjusted_cfg.delay.delay_estimate_smoothing_delay_found);
+
+ return adjusted_cfg;
+}
+
+class EchoCanceller3::RenderWriter {
+ public:
+ RenderWriter(ApmDataDumper* data_dumper,
+ const EchoCanceller3Config& config,
+ SwapQueue<std::vector<std::vector<std::vector<float>>>,
+ Aec3RenderQueueItemVerifier>* render_transfer_queue,
+ size_t num_bands,
+ size_t num_channels);
+
+ RenderWriter() = delete;
+ RenderWriter(const RenderWriter&) = delete;
+ RenderWriter& operator=(const RenderWriter&) = delete;
+
+ ~RenderWriter();
+ void Insert(const AudioBuffer& input);
+
+ private:
+ ApmDataDumper* data_dumper_;
+ const size_t num_bands_;
+ const size_t num_channels_;
+ std::unique_ptr<HighPassFilter> high_pass_filter_;
+ std::vector<std::vector<std::vector<float>>> render_queue_input_frame_;
+ SwapQueue<std::vector<std::vector<std::vector<float>>>,
+ Aec3RenderQueueItemVerifier>* render_transfer_queue_;
+};
+
+EchoCanceller3::RenderWriter::RenderWriter(
+ ApmDataDumper* data_dumper,
+ const EchoCanceller3Config& config,
+ SwapQueue<std::vector<std::vector<std::vector<float>>>,
+ Aec3RenderQueueItemVerifier>* render_transfer_queue,
+ size_t num_bands,
+ size_t num_channels)
+ : data_dumper_(data_dumper),
+ num_bands_(num_bands),
+ num_channels_(num_channels),
+ render_queue_input_frame_(
+ num_bands_,
+ std::vector<std::vector<float>>(
+ num_channels_,
+ std::vector<float>(AudioBuffer::kSplitBandSize, 0.f))),
+ render_transfer_queue_(render_transfer_queue) {
+ RTC_DCHECK(data_dumper);
+ if (config.filter.high_pass_filter_echo_reference) {
+ high_pass_filter_ = std::make_unique<HighPassFilter>(16000, num_channels);
+ }
+}
+
+EchoCanceller3::RenderWriter::~RenderWriter() = default;
+
+void EchoCanceller3::RenderWriter::Insert(const AudioBuffer& input) {
+ RTC_DCHECK_EQ(AudioBuffer::kSplitBandSize, input.num_frames_per_band());
+ RTC_DCHECK_EQ(num_bands_, input.num_bands());
+ RTC_DCHECK_EQ(num_channels_, input.num_channels());
+
+ // TODO(bugs.webrtc.org/8759) Temporary work-around.
+ if (num_bands_ != input.num_bands())
+ return;
+
+ data_dumper_->DumpWav("aec3_render_input", AudioBuffer::kSplitBandSize,
+ &input.split_bands_const(0)[0][0], 16000, 1);
+
+ CopyBufferIntoFrame(input, num_bands_, num_channels_,
+ &render_queue_input_frame_);
+ if (high_pass_filter_) {
+ high_pass_filter_->Process(&render_queue_input_frame_[0]);
+ }
+
+ static_cast<void>(render_transfer_queue_->Insert(&render_queue_input_frame_));
+}
+
+std::atomic<int> EchoCanceller3::instance_count_(0);
+
+EchoCanceller3::EchoCanceller3(
+ const EchoCanceller3Config& config,
+ const absl::optional<EchoCanceller3Config>& multichannel_config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ config_(AdjustConfig(config)),
+ sample_rate_hz_(sample_rate_hz),
+ num_bands_(NumBandsForRate(sample_rate_hz_)),
+ num_render_input_channels_(num_render_channels),
+ num_capture_channels_(num_capture_channels),
+ config_selector_(AdjustConfig(config),
+ multichannel_config,
+ num_render_input_channels_),
+ multichannel_content_detector_(
+ config_selector_.active_config().multi_channel.detect_stereo_content,
+ num_render_input_channels_,
+ config_selector_.active_config()
+ .multi_channel.stereo_detection_threshold,
+ config_selector_.active_config()
+ .multi_channel.stereo_detection_timeout_threshold_seconds,
+ config_selector_.active_config()
+ .multi_channel.stereo_detection_hysteresis_seconds),
+ output_framer_(num_bands_, num_capture_channels_),
+ capture_blocker_(num_bands_, num_capture_channels_),
+ render_transfer_queue_(
+ kRenderTransferQueueSizeFrames,
+ std::vector<std::vector<std::vector<float>>>(
+ num_bands_,
+ std::vector<std::vector<float>>(
+ num_render_input_channels_,
+ std::vector<float>(AudioBuffer::kSplitBandSize, 0.f))),
+ Aec3RenderQueueItemVerifier(num_bands_,
+ num_render_input_channels_,
+ AudioBuffer::kSplitBandSize)),
+ render_queue_output_frame_(
+ num_bands_,
+ std::vector<std::vector<float>>(
+ num_render_input_channels_,
+ std::vector<float>(AudioBuffer::kSplitBandSize, 0.f))),
+ render_block_(num_bands_, num_render_input_channels_),
+ capture_block_(num_bands_, num_capture_channels_),
+ capture_sub_frame_view_(
+ num_bands_,
+ std::vector<rtc::ArrayView<float>>(num_capture_channels_)) {
+ RTC_DCHECK(ValidFullBandRate(sample_rate_hz_));
+
+ if (config_selector_.active_config().delay.fixed_capture_delay_samples > 0) {
+ block_delay_buffer_.reset(new BlockDelayBuffer(
+ num_capture_channels_, num_bands_, AudioBuffer::kSplitBandSize,
+ config_.delay.fixed_capture_delay_samples));
+ }
+
+ render_writer_.reset(new RenderWriter(
+ data_dumper_.get(), config_selector_.active_config(),
+ &render_transfer_queue_, num_bands_, num_render_input_channels_));
+
+ RTC_DCHECK_EQ(num_bands_, std::max(sample_rate_hz_, 16000) / 16000);
+ RTC_DCHECK_GE(kMaxNumBands, num_bands_);
+
+ if (config_selector_.active_config().filter.export_linear_aec_output) {
+ linear_output_framer_.reset(
+ new BlockFramer(/*num_bands=*/1, num_capture_channels_));
+ linear_output_block_ =
+ std::make_unique<Block>(/*num_bands=*/1, num_capture_channels_),
+ linear_output_sub_frame_view_ =
+ std::vector<std::vector<rtc::ArrayView<float>>>(
+ 1, std::vector<rtc::ArrayView<float>>(num_capture_channels_));
+ }
+
+ Initialize();
+
+ RTC_LOG(LS_INFO) << "AEC3 created with sample rate: " << sample_rate_hz_
+ << " Hz, num render channels: " << num_render_input_channels_
+ << ", num capture channels: " << num_capture_channels_;
+}
+
+EchoCanceller3::~EchoCanceller3() = default;
+
+void EchoCanceller3::Initialize() {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+
+ num_render_channels_to_aec_ =
+ multichannel_content_detector_.IsProperMultiChannelContentDetected()
+ ? num_render_input_channels_
+ : 1;
+
+ config_selector_.Update(
+ multichannel_content_detector_.IsProperMultiChannelContentDetected());
+
+ render_block_.SetNumChannels(num_render_channels_to_aec_);
+
+ render_blocker_.reset(
+ new FrameBlocker(num_bands_, num_render_channels_to_aec_));
+
+ block_processor_.reset(BlockProcessor::Create(
+ config_selector_.active_config(), sample_rate_hz_,
+ num_render_channels_to_aec_, num_capture_channels_));
+
+ render_sub_frame_view_ = std::vector<std::vector<rtc::ArrayView<float>>>(
+ num_bands_,
+ std::vector<rtc::ArrayView<float>>(num_render_channels_to_aec_));
+}
+
+void EchoCanceller3::AnalyzeRender(const AudioBuffer& render) {
+ RTC_DCHECK_RUNS_SERIALIZED(&render_race_checker_);
+
+ RTC_DCHECK_EQ(render.num_channels(), num_render_input_channels_);
+ data_dumper_->DumpRaw("aec3_call_order",
+ static_cast<int>(EchoCanceller3ApiCall::kRender));
+
+ return render_writer_->Insert(render);
+}
+
+void EchoCanceller3::AnalyzeCapture(const AudioBuffer& capture) {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ data_dumper_->DumpWav("aec3_capture_analyze_input", capture.num_frames(),
+ capture.channels_const()[0], sample_rate_hz_, 1);
+ saturated_microphone_signal_ = false;
+ for (size_t channel = 0; channel < capture.num_channels(); ++channel) {
+ saturated_microphone_signal_ |=
+ DetectSaturation(rtc::ArrayView<const float>(
+ capture.channels_const()[channel], capture.num_frames()));
+ if (saturated_microphone_signal_) {
+ break;
+ }
+ }
+}
+
+void EchoCanceller3::ProcessCapture(AudioBuffer* capture, bool level_change) {
+ ProcessCapture(capture, nullptr, level_change);
+}
+
+void EchoCanceller3::ProcessCapture(AudioBuffer* capture,
+ AudioBuffer* linear_output,
+ bool level_change) {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ RTC_DCHECK(capture);
+ RTC_DCHECK_EQ(num_bands_, capture->num_bands());
+ RTC_DCHECK_EQ(AudioBuffer::kSplitBandSize, capture->num_frames_per_band());
+ RTC_DCHECK_EQ(capture->num_channels(), num_capture_channels_);
+ data_dumper_->DumpRaw("aec3_call_order",
+ static_cast<int>(EchoCanceller3ApiCall::kCapture));
+
+ if (linear_output && !linear_output_framer_) {
+ RTC_LOG(LS_ERROR) << "Trying to retrieve the linear AEC output without "
+ "properly configuring AEC3.";
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ // Report capture call in the metrics and periodically update API call
+ // metrics.
+ api_call_metrics_.ReportCaptureCall();
+
+ // Optionally delay the capture signal.
+ if (config_selector_.active_config().delay.fixed_capture_delay_samples > 0) {
+ RTC_DCHECK(block_delay_buffer_);
+ block_delay_buffer_->DelaySignal(capture);
+ }
+
+ rtc::ArrayView<float> capture_lower_band = rtc::ArrayView<float>(
+ &capture->split_bands(0)[0][0], AudioBuffer::kSplitBandSize);
+
+ data_dumper_->DumpWav("aec3_capture_input", capture_lower_band, 16000, 1);
+
+ EmptyRenderQueue();
+
+ ProcessCaptureFrameContent(
+ linear_output, capture, level_change,
+ multichannel_content_detector_.IsTemporaryMultiChannelContentDetected(),
+ saturated_microphone_signal_, 0, &capture_blocker_,
+ linear_output_framer_.get(), &output_framer_, block_processor_.get(),
+ linear_output_block_.get(), &linear_output_sub_frame_view_,
+ &capture_block_, &capture_sub_frame_view_);
+
+ ProcessCaptureFrameContent(
+ linear_output, capture, level_change,
+ multichannel_content_detector_.IsTemporaryMultiChannelContentDetected(),
+ saturated_microphone_signal_, 1, &capture_blocker_,
+ linear_output_framer_.get(), &output_framer_, block_processor_.get(),
+ linear_output_block_.get(), &linear_output_sub_frame_view_,
+ &capture_block_, &capture_sub_frame_view_);
+
+ ProcessRemainingCaptureFrameContent(
+ level_change,
+ multichannel_content_detector_.IsTemporaryMultiChannelContentDetected(),
+ saturated_microphone_signal_, &capture_blocker_,
+ linear_output_framer_.get(), &output_framer_, block_processor_.get(),
+ linear_output_block_.get(), &capture_block_);
+
+ data_dumper_->DumpWav("aec3_capture_output", AudioBuffer::kSplitBandSize,
+ &capture->split_bands(0)[0][0], 16000, 1);
+}
+
+EchoControl::Metrics EchoCanceller3::GetMetrics() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ Metrics metrics;
+ block_processor_->GetMetrics(&metrics);
+ return metrics;
+}
+
+void EchoCanceller3::SetAudioBufferDelay(int delay_ms) {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ block_processor_->SetAudioBufferDelay(delay_ms);
+}
+
+void EchoCanceller3::SetCaptureOutputUsage(bool capture_output_used) {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ block_processor_->SetCaptureOutputUsage(capture_output_used);
+}
+
+bool EchoCanceller3::ActiveProcessing() const {
+ return true;
+}
+
+EchoCanceller3Config EchoCanceller3::CreateDefaultMultichannelConfig() {
+ EchoCanceller3Config cfg;
+ // Use shorter and more rapidly adapting coarse filter to compensate for
+ // thge increased number of total filter parameters to adapt.
+ cfg.filter.coarse.length_blocks = 11;
+ cfg.filter.coarse.rate = 0.95f;
+ cfg.filter.coarse_initial.length_blocks = 11;
+ cfg.filter.coarse_initial.rate = 0.95f;
+
+ // Use more concervative suppressor behavior for non-nearend speech.
+ cfg.suppressor.normal_tuning.max_dec_factor_lf = 0.35f;
+ cfg.suppressor.normal_tuning.max_inc_factor = 1.5f;
+ return cfg;
+}
+
+void EchoCanceller3::SetBlockProcessorForTesting(
+ std::unique_ptr<BlockProcessor> block_processor) {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ RTC_DCHECK(block_processor);
+ block_processor_ = std::move(block_processor);
+}
+
+void EchoCanceller3::EmptyRenderQueue() {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ bool frame_to_buffer =
+ render_transfer_queue_.Remove(&render_queue_output_frame_);
+ while (frame_to_buffer) {
+ // Report render call in the metrics.
+ api_call_metrics_.ReportRenderCall();
+
+ if (multichannel_content_detector_.UpdateDetection(
+ render_queue_output_frame_)) {
+ // Reinitialize the AEC when proper stereo is detected.
+ Initialize();
+ }
+
+ // Buffer frame content.
+ BufferRenderFrameContent(
+ /*proper_downmix_needed=*/multichannel_content_detector_
+ .IsTemporaryMultiChannelContentDetected(),
+ &render_queue_output_frame_, 0, render_blocker_.get(),
+ block_processor_.get(), &render_block_, &render_sub_frame_view_);
+
+ BufferRenderFrameContent(
+ /*proper_downmix_needed=*/multichannel_content_detector_
+ .IsTemporaryMultiChannelContentDetected(),
+ &render_queue_output_frame_, 1, render_blocker_.get(),
+ block_processor_.get(), &render_block_, &render_sub_frame_view_);
+
+ BufferRemainingRenderFrameContent(render_blocker_.get(),
+ block_processor_.get(), &render_block_);
+
+ frame_to_buffer =
+ render_transfer_queue_.Remove(&render_queue_output_frame_);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.h b/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.h
new file mode 100644
index 0000000000..7bf8e51a4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_
+
+#include <stddef.h>
+
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+#include "modules/audio_processing/aec3/block_delay_buffer.h"
+#include "modules/audio_processing/aec3/block_framer.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/config_selector.h"
+#include "modules/audio_processing/aec3/frame_blocker.h"
+#include "modules/audio_processing/aec3/multi_channel_content_detector.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/swap_queue.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// Method for adjusting config parameter dependencies.
+// Only to be used externally to AEC3 for testing purposes.
+// TODO(webrtc:5298): Move this to a separate file.
+EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config);
+
+// Functor for verifying the invariance of the frames being put into the render
+// queue.
+class Aec3RenderQueueItemVerifier {
+ public:
+ Aec3RenderQueueItemVerifier(size_t num_bands,
+ size_t num_channels,
+ size_t frame_length)
+ : num_bands_(num_bands),
+ num_channels_(num_channels),
+ frame_length_(frame_length) {}
+
+ bool operator()(const std::vector<std::vector<std::vector<float>>>& v) const {
+ if (v.size() != num_bands_) {
+ return false;
+ }
+ for (const auto& band : v) {
+ if (band.size() != num_channels_) {
+ return false;
+ }
+ for (const auto& channel : band) {
+ if (channel.size() != frame_length_) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ private:
+ const size_t num_bands_;
+ const size_t num_channels_;
+ const size_t frame_length_;
+};
+
+// Main class for the echo canceller3.
+// It does 4 things:
+// -Receives 10 ms frames of band-split audio.
+// -Provides the lower level echo canceller functionality with
+// blocks of 64 samples of audio data.
+// -Partially handles the jitter in the render and capture API
+// call sequence.
+//
+// The class is supposed to be used in a non-concurrent manner apart from the
+// AnalyzeRender call which can be called concurrently with the other methods.
+class EchoCanceller3 : public EchoControl {
+ public:
+ EchoCanceller3(
+ const EchoCanceller3Config& config,
+ const absl::optional<EchoCanceller3Config>& multichannel_config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels);
+
+ ~EchoCanceller3() override;
+
+ EchoCanceller3(const EchoCanceller3&) = delete;
+ EchoCanceller3& operator=(const EchoCanceller3&) = delete;
+
+ // Analyzes and stores an internal copy of the split-band domain render
+ // signal.
+ void AnalyzeRender(AudioBuffer* render) override { AnalyzeRender(*render); }
+ // Analyzes the full-band domain capture signal to detect signal saturation.
+ void AnalyzeCapture(AudioBuffer* capture) override {
+ AnalyzeCapture(*capture);
+ }
+ // Processes the split-band domain capture signal in order to remove any echo
+ // present in the signal.
+ void ProcessCapture(AudioBuffer* capture, bool level_change) override;
+ // As above, but also returns the linear filter output.
+ void ProcessCapture(AudioBuffer* capture,
+ AudioBuffer* linear_output,
+ bool level_change) override;
+ // Collect current metrics from the echo canceller.
+ Metrics GetMetrics() const override;
+ // Provides an optional external estimate of the audio buffer delay.
+ void SetAudioBufferDelay(int delay_ms) override;
+
+ // Specifies whether the capture output will be used. The purpose of this is
+ // to allow the echo controller to deactivate some of the processing when the
+ // resulting output is anyway not used, for instance when the endpoint is
+ // muted.
+ void SetCaptureOutputUsage(bool capture_output_used) override;
+
+ bool ActiveProcessing() const override;
+
+ // Signals whether an external detector has detected echo leakage from the
+ // echo canceller.
+ // Note that in the case echo leakage has been flagged, it should be unflagged
+ // once it is no longer occurring.
+ void UpdateEchoLeakageStatus(bool leakage_detected) {
+ RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+ block_processor_->UpdateEchoLeakageStatus(leakage_detected);
+ }
+
+ // Produces a default configuration for multichannel.
+ static EchoCanceller3Config CreateDefaultMultichannelConfig();
+
+ private:
+ friend class EchoCanceller3Tester;
+ FRIEND_TEST_ALL_PREFIXES(EchoCanceller3, DetectionOfProperStereo);
+ FRIEND_TEST_ALL_PREFIXES(EchoCanceller3,
+ DetectionOfProperStereoUsingThreshold);
+ FRIEND_TEST_ALL_PREFIXES(EchoCanceller3,
+ DetectionOfProperStereoUsingHysteresis);
+ FRIEND_TEST_ALL_PREFIXES(EchoCanceller3,
+ StereoContentDetectionForMonoSignals);
+
+ class RenderWriter;
+
+ // (Re-)Initializes the selected subset of the EchoCanceller3 fields, at
+ // creation as well as during reconfiguration.
+ void Initialize();
+
+ // Only for testing. Replaces the internal block processor.
+ void SetBlockProcessorForTesting(
+ std::unique_ptr<BlockProcessor> block_processor);
+
+ // Only for testing. Returns whether stereo processing is active.
+ bool StereoRenderProcessingActiveForTesting() const {
+ return multichannel_content_detector_.IsProperMultiChannelContentDetected();
+ }
+
+ // Only for testing.
+ const EchoCanceller3Config& GetActiveConfigForTesting() const {
+ return config_selector_.active_config();
+ }
+
+ // Empties the render SwapQueue.
+ void EmptyRenderQueue();
+
+ // Analyzes and stores an internal copy of the split-band domain render
+ // signal.
+ void AnalyzeRender(const AudioBuffer& render);
+ // Analyzes the full-band domain capture signal to detect signal saturation.
+ void AnalyzeCapture(const AudioBuffer& capture);
+
+ rtc::RaceChecker capture_race_checker_;
+ rtc::RaceChecker render_race_checker_;
+
+ // State that is accessed by the AnalyzeRender call.
+ std::unique_ptr<RenderWriter> render_writer_
+ RTC_GUARDED_BY(render_race_checker_);
+
+ // State that may be accessed by the capture thread.
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const EchoCanceller3Config config_;
+ const int sample_rate_hz_;
+ const int num_bands_;
+ const size_t num_render_input_channels_;
+ size_t num_render_channels_to_aec_;
+ const size_t num_capture_channels_;
+ ConfigSelector config_selector_;
+ MultiChannelContentDetector multichannel_content_detector_;
+ std::unique_ptr<BlockFramer> linear_output_framer_
+ RTC_GUARDED_BY(capture_race_checker_);
+ BlockFramer output_framer_ RTC_GUARDED_BY(capture_race_checker_);
+ FrameBlocker capture_blocker_ RTC_GUARDED_BY(capture_race_checker_);
+ std::unique_ptr<FrameBlocker> render_blocker_
+ RTC_GUARDED_BY(capture_race_checker_);
+ SwapQueue<std::vector<std::vector<std::vector<float>>>,
+ Aec3RenderQueueItemVerifier>
+ render_transfer_queue_;
+ std::unique_ptr<BlockProcessor> block_processor_
+ RTC_GUARDED_BY(capture_race_checker_);
+ std::vector<std::vector<std::vector<float>>> render_queue_output_frame_
+ RTC_GUARDED_BY(capture_race_checker_);
+ bool saturated_microphone_signal_ RTC_GUARDED_BY(capture_race_checker_) =
+ false;
+ Block render_block_ RTC_GUARDED_BY(capture_race_checker_);
+ std::unique_ptr<Block> linear_output_block_
+ RTC_GUARDED_BY(capture_race_checker_);
+ Block capture_block_ RTC_GUARDED_BY(capture_race_checker_);
+ std::vector<std::vector<rtc::ArrayView<float>>> render_sub_frame_view_
+ RTC_GUARDED_BY(capture_race_checker_);
+ std::vector<std::vector<rtc::ArrayView<float>>> linear_output_sub_frame_view_
+ RTC_GUARDED_BY(capture_race_checker_);
+ std::vector<std::vector<rtc::ArrayView<float>>> capture_sub_frame_view_
+ RTC_GUARDED_BY(capture_race_checker_);
+ std::unique_ptr<BlockDelayBuffer> block_delay_buffer_
+ RTC_GUARDED_BY(capture_race_checker_);
+ ApiCallJitterMetrics api_call_metrics_ RTC_GUARDED_BY(capture_race_checker_);
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3_unittest.cc
new file mode 100644
index 0000000000..ad126af4d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_canceller3_unittest.cc
@@ -0,0 +1,1160 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_canceller3.h"
+
+#include <deque>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/frame_blocker.h"
+#include "modules/audio_processing/aec3/mock/mock_block_processor.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/high_pass_filter.h"
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::StrictMock;
+
+// Populates the frame with linearly increasing sample values for each band,
+// with a band-specific offset, in order to allow simple bitexactness
+// verification for each band.
+void PopulateInputFrame(size_t frame_length,
+ size_t num_bands,
+ size_t frame_index,
+ float* const* frame,
+ int offset) {
+ for (size_t k = 0; k < num_bands; ++k) {
+ for (size_t i = 0; i < frame_length; ++i) {
+ float value = static_cast<int>(frame_index * frame_length + i) + offset;
+ frame[k][i] = (value > 0 ? 5000 * k + value : 0);
+ }
+ }
+}
+
+// Populates the frame with linearly increasing sample values.
+void PopulateInputFrame(size_t frame_length,
+ size_t frame_index,
+ float* frame,
+ int offset) {
+ for (size_t i = 0; i < frame_length; ++i) {
+ float value = static_cast<int>(frame_index * frame_length + i) + offset;
+ frame[i] = std::max(value, 0.f);
+ }
+}
+
+// Verifies the that samples in the output frame are identical to the samples
+// that were produced for the input frame, with an offset in order to compensate
+// for buffering delays.
+bool VerifyOutputFrameBitexactness(size_t frame_length,
+ size_t num_bands,
+ size_t frame_index,
+ const float* const* frame,
+ int offset) {
+ float reference_frame_data[kMaxNumBands][2 * kSubFrameLength];
+ float* reference_frame[kMaxNumBands];
+ for (size_t k = 0; k < num_bands; ++k) {
+ reference_frame[k] = &reference_frame_data[k][0];
+ }
+
+ PopulateInputFrame(frame_length, num_bands, frame_index, reference_frame,
+ offset);
+ for (size_t k = 0; k < num_bands; ++k) {
+ for (size_t i = 0; i < frame_length; ++i) {
+ if (reference_frame[k][i] != frame[k][i]) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool VerifyOutputFrameBitexactness(rtc::ArrayView<const float> reference,
+ rtc::ArrayView<const float> frame,
+ int offset) {
+ for (size_t k = 0; k < frame.size(); ++k) {
+ int reference_index = static_cast<int>(k) + offset;
+ if (reference_index >= 0) {
+ if (reference[reference_index] != frame[k]) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Class for testing that the capture data is properly received by the block
+// processor and that the processor data is properly passed to the
+// EchoCanceller3 output.
+class CaptureTransportVerificationProcessor : public BlockProcessor {
+ public:
+ explicit CaptureTransportVerificationProcessor(size_t num_bands) {}
+
+ CaptureTransportVerificationProcessor() = delete;
+ CaptureTransportVerificationProcessor(
+ const CaptureTransportVerificationProcessor&) = delete;
+ CaptureTransportVerificationProcessor& operator=(
+ const CaptureTransportVerificationProcessor&) = delete;
+
+ ~CaptureTransportVerificationProcessor() override = default;
+
+ void ProcessCapture(bool level_change,
+ bool saturated_microphone_signal,
+ Block* linear_output,
+ Block* capture_block) override {}
+
+ void BufferRender(const Block& block) override {}
+
+ void UpdateEchoLeakageStatus(bool leakage_detected) override {}
+
+ void GetMetrics(EchoControl::Metrics* metrics) const override {}
+
+ void SetAudioBufferDelay(int delay_ms) override {}
+
+ void SetCaptureOutputUsage(bool capture_output_used) {}
+};
+
+// Class for testing that the render data is properly received by the block
+// processor.
+class RenderTransportVerificationProcessor : public BlockProcessor {
+ public:
+ explicit RenderTransportVerificationProcessor(size_t num_bands) {}
+
+ RenderTransportVerificationProcessor() = delete;
+ RenderTransportVerificationProcessor(
+ const RenderTransportVerificationProcessor&) = delete;
+ RenderTransportVerificationProcessor& operator=(
+ const RenderTransportVerificationProcessor&) = delete;
+
+ ~RenderTransportVerificationProcessor() override = default;
+
+ void ProcessCapture(bool level_change,
+ bool saturated_microphone_signal,
+ Block* linear_output,
+ Block* capture_block) override {
+ Block render_block = received_render_blocks_.front();
+ received_render_blocks_.pop_front();
+ capture_block->Swap(render_block);
+ }
+
+ void BufferRender(const Block& block) override {
+ received_render_blocks_.push_back(block);
+ }
+
+ void UpdateEchoLeakageStatus(bool leakage_detected) override {}
+
+ void GetMetrics(EchoControl::Metrics* metrics) const override {}
+
+ void SetAudioBufferDelay(int delay_ms) override {}
+
+ void SetCaptureOutputUsage(bool capture_output_used) {}
+
+ private:
+ std::deque<Block> received_render_blocks_;
+};
+
+std::string ProduceDebugText(int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(int sample_rate_hz, int variant) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz << ", variant: " << variant;
+ return ss.Release();
+}
+
+void RunAecInStereo(AudioBuffer& buffer,
+ EchoCanceller3& aec3,
+ float channel_0_value,
+ float channel_1_value) {
+ rtc::ArrayView<float> data_channel_0(&buffer.channels()[0][0],
+ buffer.num_frames());
+ std::fill(data_channel_0.begin(), data_channel_0.end(), channel_0_value);
+ rtc::ArrayView<float> data_channel_1(&buffer.channels()[1][0],
+ buffer.num_frames());
+ std::fill(data_channel_1.begin(), data_channel_1.end(), channel_1_value);
+ aec3.AnalyzeRender(&buffer);
+ aec3.AnalyzeCapture(&buffer);
+ aec3.ProcessCapture(&buffer, /*level_change=*/false);
+}
+
+void RunAecInSMono(AudioBuffer& buffer,
+ EchoCanceller3& aec3,
+ float channel_0_value) {
+ rtc::ArrayView<float> data_channel_0(&buffer.channels()[0][0],
+ buffer.num_frames());
+ std::fill(data_channel_0.begin(), data_channel_0.end(), channel_0_value);
+ aec3.AnalyzeRender(&buffer);
+ aec3.AnalyzeCapture(&buffer);
+ aec3.ProcessCapture(&buffer, /*level_change=*/false);
+}
+
+} // namespace
+
+class EchoCanceller3Tester {
+ public:
+ explicit EchoCanceller3Tester(int sample_rate_hz)
+ : sample_rate_hz_(sample_rate_hz),
+ num_bands_(NumBandsForRate(sample_rate_hz_)),
+ frame_length_(160),
+ fullband_frame_length_(rtc::CheckedDivExact(sample_rate_hz_, 100)),
+ capture_buffer_(fullband_frame_length_ * 100,
+ 1,
+ fullband_frame_length_ * 100,
+ 1,
+ fullband_frame_length_ * 100,
+ 1),
+ render_buffer_(fullband_frame_length_ * 100,
+ 1,
+ fullband_frame_length_ * 100,
+ 1,
+ fullband_frame_length_ * 100,
+ 1) {}
+
+ EchoCanceller3Tester() = delete;
+ EchoCanceller3Tester(const EchoCanceller3Tester&) = delete;
+ EchoCanceller3Tester& operator=(const EchoCanceller3Tester&) = delete;
+
+ // Verifies that the capture data is properly received by the block processor
+ // and that the processor data is properly passed to the EchoCanceller3
+ // output.
+ void RunCaptureTransportVerificationTest() {
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt, sample_rate_hz_,
+ 1, 1);
+ aec3.SetBlockProcessorForTesting(
+ std::make_unique<CaptureTransportVerificationProcessor>(num_bands_));
+
+ for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+ ++frame_index) {
+ aec3.AnalyzeCapture(&capture_buffer_);
+ OptionalBandSplit();
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], 0);
+ PopulateInputFrame(frame_length_, frame_index,
+ &render_buffer_.channels()[0][0], 0);
+
+ aec3.AnalyzeRender(&render_buffer_);
+ aec3.ProcessCapture(&capture_buffer_, false);
+ EXPECT_TRUE(VerifyOutputFrameBitexactness(
+ frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], -64));
+ }
+ }
+
+ // Test method for testing that the render data is properly received by the
+ // block processor.
+ void RunRenderTransportVerificationTest() {
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt, sample_rate_hz_,
+ 1, 1);
+ aec3.SetBlockProcessorForTesting(
+ std::make_unique<RenderTransportVerificationProcessor>(num_bands_));
+
+ std::vector<std::vector<float>> render_input(1);
+ std::vector<float> capture_output;
+ for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+ ++frame_index) {
+ aec3.AnalyzeCapture(&capture_buffer_);
+ OptionalBandSplit();
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], 100);
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &render_buffer_.split_bands(0)[0], 0);
+
+ for (size_t k = 0; k < frame_length_; ++k) {
+ render_input[0].push_back(render_buffer_.split_bands(0)[0][k]);
+ }
+ aec3.AnalyzeRender(&render_buffer_);
+ aec3.ProcessCapture(&capture_buffer_, false);
+ for (size_t k = 0; k < frame_length_; ++k) {
+ capture_output.push_back(capture_buffer_.split_bands(0)[0][k]);
+ }
+ }
+
+ EXPECT_TRUE(
+ VerifyOutputFrameBitexactness(render_input[0], capture_output, -64));
+ }
+
+ // Verifies that information about echo path changes are properly propagated
+ // to the block processor.
+ // The cases tested are:
+ // -That no set echo path change flags are received when there is no echo path
+ // change.
+ // -That set echo path change flags are received and continues to be received
+ // as long as echo path changes are flagged.
+ // -That set echo path change flags are no longer received when echo path
+ // change events stop being flagged.
+ enum class EchoPathChangeTestVariant { kNone, kOneSticky, kOneNonSticky };
+
+ void RunEchoPathChangeVerificationTest(
+ EchoPathChangeTestVariant echo_path_change_test_variant) {
+ constexpr size_t kNumFullBlocksPerFrame = 160 / kBlockSize;
+ constexpr size_t kExpectedNumBlocksToProcess =
+ (kNumFramesToProcess * 160) / kBlockSize;
+ std::unique_ptr<testing::StrictMock<webrtc::test::MockBlockProcessor>>
+ block_processor_mock(
+ new StrictMock<webrtc::test::MockBlockProcessor>());
+ EXPECT_CALL(*block_processor_mock, BufferRender(_))
+ .Times(kExpectedNumBlocksToProcess);
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0);
+
+ switch (echo_path_change_test_variant) {
+ case EchoPathChangeTestVariant::kNone:
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(false, _, _, _))
+ .Times(kExpectedNumBlocksToProcess);
+ break;
+ case EchoPathChangeTestVariant::kOneSticky:
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(true, _, _, _))
+ .Times(kExpectedNumBlocksToProcess);
+ break;
+ case EchoPathChangeTestVariant::kOneNonSticky:
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(true, _, _, _))
+ .Times(kNumFullBlocksPerFrame);
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(false, _, _, _))
+ .Times(kExpectedNumBlocksToProcess - kNumFullBlocksPerFrame);
+ break;
+ }
+
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt, sample_rate_hz_,
+ 1, 1);
+ aec3.SetBlockProcessorForTesting(std::move(block_processor_mock));
+
+ for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+ ++frame_index) {
+ bool echo_path_change = false;
+ switch (echo_path_change_test_variant) {
+ case EchoPathChangeTestVariant::kNone:
+ break;
+ case EchoPathChangeTestVariant::kOneSticky:
+ echo_path_change = true;
+ break;
+ case EchoPathChangeTestVariant::kOneNonSticky:
+ if (frame_index == 0) {
+ echo_path_change = true;
+ }
+ break;
+ }
+
+ aec3.AnalyzeCapture(&capture_buffer_);
+ OptionalBandSplit();
+
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], 0);
+ PopulateInputFrame(frame_length_, frame_index,
+ &render_buffer_.channels()[0][0], 0);
+
+ aec3.AnalyzeRender(&render_buffer_);
+ aec3.ProcessCapture(&capture_buffer_, echo_path_change);
+ }
+ }
+
+ // Test for verifying that echo leakage information is being properly passed
+ // to the processor.
+ // The cases tested are:
+ // -That no method calls are received when they should not.
+ // -That false values are received each time they are flagged.
+ // -That true values are received each time they are flagged.
+ // -That a false value is received when flagged after a true value has been
+ // flagged.
+ enum class EchoLeakageTestVariant {
+ kNone,
+ kFalseSticky,
+ kTrueSticky,
+ kTrueNonSticky
+ };
+
+ void RunEchoLeakageVerificationTest(
+ EchoLeakageTestVariant leakage_report_variant) {
+ constexpr size_t kExpectedNumBlocksToProcess =
+ (kNumFramesToProcess * 160) / kBlockSize;
+ std::unique_ptr<testing::StrictMock<webrtc::test::MockBlockProcessor>>
+ block_processor_mock(
+ new StrictMock<webrtc::test::MockBlockProcessor>());
+ EXPECT_CALL(*block_processor_mock, BufferRender(_))
+ .Times(kExpectedNumBlocksToProcess);
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(_, _, _, _))
+ .Times(kExpectedNumBlocksToProcess);
+
+ switch (leakage_report_variant) {
+ case EchoLeakageTestVariant::kNone:
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0);
+ break;
+ case EchoLeakageTestVariant::kFalseSticky:
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(false))
+ .Times(1);
+ break;
+ case EchoLeakageTestVariant::kTrueSticky:
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(true))
+ .Times(1);
+ break;
+ case EchoLeakageTestVariant::kTrueNonSticky: {
+ ::testing::InSequence s;
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(true))
+ .Times(1);
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(false))
+ .Times(kNumFramesToProcess - 1);
+ } break;
+ }
+
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt, sample_rate_hz_,
+ 1, 1);
+ aec3.SetBlockProcessorForTesting(std::move(block_processor_mock));
+
+ for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+ ++frame_index) {
+ switch (leakage_report_variant) {
+ case EchoLeakageTestVariant::kNone:
+ break;
+ case EchoLeakageTestVariant::kFalseSticky:
+ if (frame_index == 0) {
+ aec3.UpdateEchoLeakageStatus(false);
+ }
+ break;
+ case EchoLeakageTestVariant::kTrueSticky:
+ if (frame_index == 0) {
+ aec3.UpdateEchoLeakageStatus(true);
+ }
+ break;
+ case EchoLeakageTestVariant::kTrueNonSticky:
+ if (frame_index == 0) {
+ aec3.UpdateEchoLeakageStatus(true);
+ } else {
+ aec3.UpdateEchoLeakageStatus(false);
+ }
+ break;
+ }
+
+ aec3.AnalyzeCapture(&capture_buffer_);
+ OptionalBandSplit();
+
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], 0);
+ PopulateInputFrame(frame_length_, frame_index,
+ &render_buffer_.channels()[0][0], 0);
+
+ aec3.AnalyzeRender(&render_buffer_);
+ aec3.ProcessCapture(&capture_buffer_, false);
+ }
+ }
+
+ // This verifies that saturation information is properly passed to the
+ // BlockProcessor.
+ // The cases tested are:
+ // -That no saturation event is passed to the processor if there is no
+ // saturation.
+ // -That one frame with one negative saturated sample value is reported to be
+ // saturated and that following non-saturated frames are properly reported as
+ // not being saturated.
+ // -That one frame with one positive saturated sample value is reported to be
+ // saturated and that following non-saturated frames are properly reported as
+ // not being saturated.
+ enum class SaturationTestVariant { kNone, kOneNegative, kOnePositive };
+
+ void RunCaptureSaturationVerificationTest(
+ SaturationTestVariant saturation_variant) {
+ const size_t kNumFullBlocksPerFrame = 160 / kBlockSize;
+ const size_t kExpectedNumBlocksToProcess =
+ (kNumFramesToProcess * 160) / kBlockSize;
+ std::unique_ptr<testing::StrictMock<webrtc::test::MockBlockProcessor>>
+ block_processor_mock(
+ new StrictMock<webrtc::test::MockBlockProcessor>());
+ EXPECT_CALL(*block_processor_mock, BufferRender(_))
+ .Times(kExpectedNumBlocksToProcess);
+ EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0);
+
+ switch (saturation_variant) {
+ case SaturationTestVariant::kNone:
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _, _))
+ .Times(kExpectedNumBlocksToProcess);
+ break;
+ case SaturationTestVariant::kOneNegative: {
+ ::testing::InSequence s;
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(_, true, _, _))
+ .Times(kNumFullBlocksPerFrame);
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _, _))
+ .Times(kExpectedNumBlocksToProcess - kNumFullBlocksPerFrame);
+ } break;
+ case SaturationTestVariant::kOnePositive: {
+ ::testing::InSequence s;
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(_, true, _, _))
+ .Times(kNumFullBlocksPerFrame);
+ EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _, _))
+ .Times(kExpectedNumBlocksToProcess - kNumFullBlocksPerFrame);
+ } break;
+ }
+
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt, sample_rate_hz_,
+ 1, 1);
+ aec3.SetBlockProcessorForTesting(std::move(block_processor_mock));
+ for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+ ++frame_index) {
+ for (int k = 0; k < fullband_frame_length_; ++k) {
+ capture_buffer_.channels()[0][k] = 0.f;
+ }
+ switch (saturation_variant) {
+ case SaturationTestVariant::kNone:
+ break;
+ case SaturationTestVariant::kOneNegative:
+ if (frame_index == 0) {
+ capture_buffer_.channels()[0][10] = -32768.f;
+ }
+ break;
+ case SaturationTestVariant::kOnePositive:
+ if (frame_index == 0) {
+ capture_buffer_.channels()[0][10] = 32767.f;
+ }
+ break;
+ }
+
+ aec3.AnalyzeCapture(&capture_buffer_);
+ OptionalBandSplit();
+
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], 0);
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &render_buffer_.split_bands(0)[0], 0);
+
+ aec3.AnalyzeRender(&render_buffer_);
+ aec3.ProcessCapture(&capture_buffer_, false);
+ }
+ }
+
+ // This test verifies that the swapqueue is able to handle jitter in the
+ // capture and render API calls.
+ void RunRenderSwapQueueVerificationTest() {
+ const EchoCanceller3Config config;
+ EchoCanceller3 aec3(config, /*multichannel_config=*/absl::nullopt,
+ sample_rate_hz_, 1, 1);
+ aec3.SetBlockProcessorForTesting(
+ std::make_unique<RenderTransportVerificationProcessor>(num_bands_));
+
+ std::vector<std::vector<float>> render_input(1);
+ std::vector<float> capture_output;
+
+ for (size_t frame_index = 0; frame_index < kRenderTransferQueueSizeFrames;
+ ++frame_index) {
+ if (sample_rate_hz_ > 16000) {
+ render_buffer_.SplitIntoFrequencyBands();
+ }
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &render_buffer_.split_bands(0)[0], 0);
+
+ if (sample_rate_hz_ > 16000) {
+ render_buffer_.SplitIntoFrequencyBands();
+ }
+
+ for (size_t k = 0; k < frame_length_; ++k) {
+ render_input[0].push_back(render_buffer_.split_bands(0)[0][k]);
+ }
+ aec3.AnalyzeRender(&render_buffer_);
+ }
+
+ for (size_t frame_index = 0; frame_index < kRenderTransferQueueSizeFrames;
+ ++frame_index) {
+ aec3.AnalyzeCapture(&capture_buffer_);
+ if (sample_rate_hz_ > 16000) {
+ capture_buffer_.SplitIntoFrequencyBands();
+ }
+
+ PopulateInputFrame(frame_length_, num_bands_, frame_index,
+ &capture_buffer_.split_bands(0)[0], 0);
+
+ aec3.ProcessCapture(&capture_buffer_, false);
+ for (size_t k = 0; k < frame_length_; ++k) {
+ capture_output.push_back(capture_buffer_.split_bands(0)[0][k]);
+ }
+ }
+
+ EXPECT_TRUE(
+ VerifyOutputFrameBitexactness(render_input[0], capture_output, -64));
+ }
+
+ // This test verifies that a buffer overrun in the render swapqueue is
+ // properly reported.
+ void RunRenderPipelineSwapQueueOverrunReturnValueTest() {
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt, sample_rate_hz_,
+ 1, 1);
+
+ constexpr size_t kRenderTransferQueueSize = 30;
+ for (size_t k = 0; k < 2; ++k) {
+ for (size_t frame_index = 0; frame_index < kRenderTransferQueueSize;
+ ++frame_index) {
+ if (sample_rate_hz_ > 16000) {
+ render_buffer_.SplitIntoFrequencyBands();
+ }
+ PopulateInputFrame(frame_length_, frame_index,
+ &render_buffer_.channels()[0][0], 0);
+
+ aec3.AnalyzeRender(&render_buffer_);
+ }
+ }
+ }
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ // Verifies the that the check for the number of bands in the AnalyzeRender
+ // input is correct by adjusting the sample rates of EchoCanceller3 and the
+ // input AudioBuffer to have a different number of bands.
+ void RunAnalyzeRenderNumBandsCheckVerification() {
+ // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a
+ // way that the number of bands for the rates are different.
+ const int aec3_sample_rate_hz = sample_rate_hz_ == 48000 ? 32000 : 48000;
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt,
+ aec3_sample_rate_hz, 1, 1);
+ PopulateInputFrame(frame_length_, 0, &render_buffer_.channels_f()[0][0], 0);
+
+ EXPECT_DEATH(aec3.AnalyzeRender(&render_buffer_), "");
+ }
+
+ // Verifies the that the check for the number of bands in the ProcessCapture
+ // input is correct by adjusting the sample rates of EchoCanceller3 and the
+ // input AudioBuffer to have a different number of bands.
+ void RunProcessCaptureNumBandsCheckVerification() {
+ // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a
+ // way that the number of bands for the rates are different.
+ const int aec3_sample_rate_hz = sample_rate_hz_ == 48000 ? 32000 : 48000;
+ EchoCanceller3 aec3(EchoCanceller3Config(),
+ /*multichannel_config=*/absl::nullopt,
+ aec3_sample_rate_hz, 1, 1);
+ PopulateInputFrame(frame_length_, num_bands_, 0,
+ &capture_buffer_.split_bands_f(0)[0], 100);
+ EXPECT_DEATH(aec3.ProcessCapture(&capture_buffer_, false), "");
+ }
+
+#endif
+
+ private:
+ void OptionalBandSplit() {
+ if (sample_rate_hz_ > 16000) {
+ capture_buffer_.SplitIntoFrequencyBands();
+ render_buffer_.SplitIntoFrequencyBands();
+ }
+ }
+
+ static constexpr size_t kNumFramesToProcess = 20;
+ const int sample_rate_hz_;
+ const size_t num_bands_;
+ const size_t frame_length_;
+ const int fullband_frame_length_;
+ AudioBuffer capture_buffer_;
+ AudioBuffer render_buffer_;
+};
+
+TEST(EchoCanceller3Buffering, CaptureBitexactness) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ EchoCanceller3Tester(rate).RunCaptureTransportVerificationTest();
+ }
+}
+
+TEST(EchoCanceller3Buffering, RenderBitexactness) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ EchoCanceller3Tester(rate).RunRenderTransportVerificationTest();
+ }
+}
+
+TEST(EchoCanceller3Buffering, RenderSwapQueue) {
+ EchoCanceller3Tester(16000).RunRenderSwapQueueVerificationTest();
+}
+
+TEST(EchoCanceller3Buffering, RenderSwapQueueOverrunReturnValue) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ EchoCanceller3Tester(rate)
+ .RunRenderPipelineSwapQueueOverrunReturnValueTest();
+ }
+}
+
+TEST(EchoCanceller3Messaging, CaptureSaturation) {
+ auto variants = {EchoCanceller3Tester::SaturationTestVariant::kNone,
+ EchoCanceller3Tester::SaturationTestVariant::kOneNegative,
+ EchoCanceller3Tester::SaturationTestVariant::kOnePositive};
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto variant : variants) {
+ SCOPED_TRACE(ProduceDebugText(rate, static_cast<int>(variant)));
+ EchoCanceller3Tester(rate).RunCaptureSaturationVerificationTest(variant);
+ }
+ }
+}
+
+TEST(EchoCanceller3Messaging, EchoPathChange) {
+ auto variants = {
+ EchoCanceller3Tester::EchoPathChangeTestVariant::kNone,
+ EchoCanceller3Tester::EchoPathChangeTestVariant::kOneSticky,
+ EchoCanceller3Tester::EchoPathChangeTestVariant::kOneNonSticky};
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto variant : variants) {
+ SCOPED_TRACE(ProduceDebugText(rate, static_cast<int>(variant)));
+ EchoCanceller3Tester(rate).RunEchoPathChangeVerificationTest(variant);
+ }
+ }
+}
+
+TEST(EchoCanceller3Messaging, EchoLeakage) {
+ auto variants = {
+ EchoCanceller3Tester::EchoLeakageTestVariant::kNone,
+ EchoCanceller3Tester::EchoLeakageTestVariant::kFalseSticky,
+ EchoCanceller3Tester::EchoLeakageTestVariant::kTrueSticky,
+ EchoCanceller3Tester::EchoLeakageTestVariant::kTrueNonSticky};
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto variant : variants) {
+ SCOPED_TRACE(ProduceDebugText(rate, static_cast<int>(variant)));
+ EchoCanceller3Tester(rate).RunEchoLeakageVerificationTest(variant);
+ }
+ }
+}
+
+// Tests the parameter functionality for the field trial override for the
+// anti-howling gain.
+TEST(EchoCanceller3FieldTrials, Aec3SuppressorAntiHowlingGainOverride) {
+ EchoCanceller3Config default_config;
+ EchoCanceller3Config adjusted_config = AdjustConfig(default_config);
+ ASSERT_EQ(
+ default_config.suppressor.high_bands_suppression.anti_howling_gain,
+ adjusted_config.suppressor.high_bands_suppression.anti_howling_gain);
+
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-Aec3SuppressorAntiHowlingGainOverride/0.02/");
+ adjusted_config = AdjustConfig(default_config);
+
+ ASSERT_NE(
+ default_config.suppressor.high_bands_suppression.anti_howling_gain,
+ adjusted_config.suppressor.high_bands_suppression.anti_howling_gain);
+ EXPECT_FLOAT_EQ(
+ 0.02f,
+ adjusted_config.suppressor.high_bands_suppression.anti_howling_gain);
+}
+
+// Tests the field trial override for the enforcement of a low active render
+// limit.
+TEST(EchoCanceller3FieldTrials, Aec3EnforceLowActiveRenderLimit) {
+ EchoCanceller3Config default_config;
+ EchoCanceller3Config adjusted_config = AdjustConfig(default_config);
+ ASSERT_EQ(default_config.render_levels.active_render_limit,
+ adjusted_config.render_levels.active_render_limit);
+
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-Aec3EnforceLowActiveRenderLimit/Enabled/");
+ adjusted_config = AdjustConfig(default_config);
+
+ ASSERT_NE(default_config.render_levels.active_render_limit,
+ adjusted_config.render_levels.active_render_limit);
+ EXPECT_FLOAT_EQ(50.f, adjusted_config.render_levels.active_render_limit);
+}
+
+// Testing the field trial-based override of the suppressor parameters for a
+// joint passing of all parameters.
+TEST(EchoCanceller3FieldTrials, Aec3SuppressorTuningOverrideAllParams) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-Aec3SuppressorTuningOverride/"
+ "nearend_tuning_mask_lf_enr_transparent:0.1,nearend_tuning_mask_lf_enr_"
+ "suppress:0.2,nearend_tuning_mask_hf_enr_transparent:0.3,nearend_tuning_"
+ "mask_hf_enr_suppress:0.4,nearend_tuning_max_inc_factor:0.5,nearend_"
+ "tuning_max_dec_factor_lf:0.6,normal_tuning_mask_lf_enr_transparent:0.7,"
+ "normal_tuning_mask_lf_enr_suppress:0.8,normal_tuning_mask_hf_enr_"
+ "transparent:0.9,normal_tuning_mask_hf_enr_suppress:1.0,normal_tuning_"
+ "max_inc_factor:1.1,normal_tuning_max_dec_factor_lf:1.2,dominant_nearend_"
+ "detection_enr_threshold:1.3,dominant_nearend_detection_enr_exit_"
+ "threshold:1.4,dominant_nearend_detection_snr_threshold:1.5,dominant_"
+ "nearend_detection_hold_duration:10,dominant_nearend_detection_trigger_"
+ "threshold:11/");
+
+ EchoCanceller3Config default_config;
+ EchoCanceller3Config adjusted_config = AdjustConfig(default_config);
+
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.mask_lf.enr_transparent,
+ default_config.suppressor.nearend_tuning.mask_lf.enr_transparent);
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.mask_lf.enr_suppress,
+ default_config.suppressor.nearend_tuning.mask_lf.enr_suppress);
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.mask_hf.enr_transparent,
+ default_config.suppressor.nearend_tuning.mask_hf.enr_transparent);
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.mask_hf.enr_suppress,
+ default_config.suppressor.nearend_tuning.mask_hf.enr_suppress);
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.max_inc_factor,
+ default_config.suppressor.nearend_tuning.max_inc_factor);
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.max_dec_factor_lf,
+ default_config.suppressor.nearend_tuning.max_dec_factor_lf);
+ ASSERT_NE(adjusted_config.suppressor.normal_tuning.mask_lf.enr_transparent,
+ default_config.suppressor.normal_tuning.mask_lf.enr_transparent);
+ ASSERT_NE(adjusted_config.suppressor.normal_tuning.mask_lf.enr_suppress,
+ default_config.suppressor.normal_tuning.mask_lf.enr_suppress);
+ ASSERT_NE(adjusted_config.suppressor.normal_tuning.mask_hf.enr_transparent,
+ default_config.suppressor.normal_tuning.mask_hf.enr_transparent);
+ ASSERT_NE(adjusted_config.suppressor.normal_tuning.mask_hf.enr_suppress,
+ default_config.suppressor.normal_tuning.mask_hf.enr_suppress);
+ ASSERT_NE(adjusted_config.suppressor.normal_tuning.max_inc_factor,
+ default_config.suppressor.normal_tuning.max_inc_factor);
+ ASSERT_NE(adjusted_config.suppressor.normal_tuning.max_dec_factor_lf,
+ default_config.suppressor.normal_tuning.max_dec_factor_lf);
+ ASSERT_NE(adjusted_config.suppressor.dominant_nearend_detection.enr_threshold,
+ default_config.suppressor.dominant_nearend_detection.enr_threshold);
+ ASSERT_NE(
+ adjusted_config.suppressor.dominant_nearend_detection.enr_exit_threshold,
+ default_config.suppressor.dominant_nearend_detection.enr_exit_threshold);
+ ASSERT_NE(adjusted_config.suppressor.dominant_nearend_detection.snr_threshold,
+ default_config.suppressor.dominant_nearend_detection.snr_threshold);
+ ASSERT_NE(adjusted_config.suppressor.dominant_nearend_detection.hold_duration,
+ default_config.suppressor.dominant_nearend_detection.hold_duration);
+ ASSERT_NE(
+ adjusted_config.suppressor.dominant_nearend_detection.trigger_threshold,
+ default_config.suppressor.dominant_nearend_detection.trigger_threshold);
+
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.nearend_tuning.mask_lf.enr_transparent, 0.1);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.nearend_tuning.mask_lf.enr_suppress, 0.2);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.nearend_tuning.mask_hf.enr_transparent, 0.3);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.nearend_tuning.mask_hf.enr_suppress, 0.4);
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.nearend_tuning.max_inc_factor,
+ 0.5);
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.nearend_tuning.max_dec_factor_lf,
+ 0.6);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.normal_tuning.mask_lf.enr_transparent, 0.7);
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.normal_tuning.mask_lf.enr_suppress,
+ 0.8);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.normal_tuning.mask_hf.enr_transparent, 0.9);
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.normal_tuning.mask_hf.enr_suppress,
+ 1.0);
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.normal_tuning.max_inc_factor, 1.1);
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.normal_tuning.max_dec_factor_lf,
+ 1.2);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.dominant_nearend_detection.enr_threshold, 1.3);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.dominant_nearend_detection.enr_exit_threshold,
+ 1.4);
+ EXPECT_FLOAT_EQ(
+ adjusted_config.suppressor.dominant_nearend_detection.snr_threshold, 1.5);
+ EXPECT_EQ(adjusted_config.suppressor.dominant_nearend_detection.hold_duration,
+ 10);
+ EXPECT_EQ(
+ adjusted_config.suppressor.dominant_nearend_detection.trigger_threshold,
+ 11);
+}
+
+// Testing the field trial-based override of the suppressor parameters for
+// passing one parameter.
+TEST(EchoCanceller3FieldTrials, Aec3SuppressorTuningOverrideOneParam) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-Aec3SuppressorTuningOverride/nearend_tuning_max_inc_factor:0.5/");
+
+ EchoCanceller3Config default_config;
+ EchoCanceller3Config adjusted_config = AdjustConfig(default_config);
+
+ ASSERT_EQ(adjusted_config.suppressor.nearend_tuning.mask_lf.enr_transparent,
+ default_config.suppressor.nearend_tuning.mask_lf.enr_transparent);
+ ASSERT_EQ(adjusted_config.suppressor.nearend_tuning.mask_lf.enr_suppress,
+ default_config.suppressor.nearend_tuning.mask_lf.enr_suppress);
+ ASSERT_EQ(adjusted_config.suppressor.nearend_tuning.mask_hf.enr_transparent,
+ default_config.suppressor.nearend_tuning.mask_hf.enr_transparent);
+ ASSERT_EQ(adjusted_config.suppressor.nearend_tuning.mask_hf.enr_suppress,
+ default_config.suppressor.nearend_tuning.mask_hf.enr_suppress);
+ ASSERT_EQ(adjusted_config.suppressor.nearend_tuning.max_dec_factor_lf,
+ default_config.suppressor.nearend_tuning.max_dec_factor_lf);
+ ASSERT_EQ(adjusted_config.suppressor.normal_tuning.mask_lf.enr_transparent,
+ default_config.suppressor.normal_tuning.mask_lf.enr_transparent);
+ ASSERT_EQ(adjusted_config.suppressor.normal_tuning.mask_lf.enr_suppress,
+ default_config.suppressor.normal_tuning.mask_lf.enr_suppress);
+ ASSERT_EQ(adjusted_config.suppressor.normal_tuning.mask_hf.enr_transparent,
+ default_config.suppressor.normal_tuning.mask_hf.enr_transparent);
+ ASSERT_EQ(adjusted_config.suppressor.normal_tuning.mask_hf.enr_suppress,
+ default_config.suppressor.normal_tuning.mask_hf.enr_suppress);
+ ASSERT_EQ(adjusted_config.suppressor.normal_tuning.max_inc_factor,
+ default_config.suppressor.normal_tuning.max_inc_factor);
+ ASSERT_EQ(adjusted_config.suppressor.normal_tuning.max_dec_factor_lf,
+ default_config.suppressor.normal_tuning.max_dec_factor_lf);
+ ASSERT_EQ(adjusted_config.suppressor.dominant_nearend_detection.enr_threshold,
+ default_config.suppressor.dominant_nearend_detection.enr_threshold);
+ ASSERT_EQ(
+ adjusted_config.suppressor.dominant_nearend_detection.enr_exit_threshold,
+ default_config.suppressor.dominant_nearend_detection.enr_exit_threshold);
+ ASSERT_EQ(adjusted_config.suppressor.dominant_nearend_detection.snr_threshold,
+ default_config.suppressor.dominant_nearend_detection.snr_threshold);
+ ASSERT_EQ(adjusted_config.suppressor.dominant_nearend_detection.hold_duration,
+ default_config.suppressor.dominant_nearend_detection.hold_duration);
+ ASSERT_EQ(
+ adjusted_config.suppressor.dominant_nearend_detection.trigger_threshold,
+ default_config.suppressor.dominant_nearend_detection.trigger_threshold);
+
+ ASSERT_NE(adjusted_config.suppressor.nearend_tuning.max_inc_factor,
+ default_config.suppressor.nearend_tuning.max_inc_factor);
+
+ EXPECT_FLOAT_EQ(adjusted_config.suppressor.nearend_tuning.max_inc_factor,
+ 0.5);
+}
+
+// Testing the field trial-based that override the exponential decay parameters.
+TEST(EchoCanceller3FieldTrials, Aec3UseNearendReverb) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-Aec3UseNearendReverbLen/default_len:0.9,nearend_len:0.8/");
+ EchoCanceller3Config default_config;
+ EchoCanceller3Config adjusted_config = AdjustConfig(default_config);
+ EXPECT_FLOAT_EQ(adjusted_config.ep_strength.default_len, 0.9);
+ EXPECT_FLOAT_EQ(adjusted_config.ep_strength.nearend_len, 0.8);
+}
+
+TEST(EchoCanceller3, DetectionOfProperStereo) {
+ constexpr int kSampleRateHz = 16000;
+ constexpr int kNumChannels = 2;
+ AudioBuffer buffer(/*input_rate=*/kSampleRateHz,
+ /*input_num_channels=*/kNumChannels,
+ /*input_rate=*/kSampleRateHz,
+ /*buffer_num_channels=*/kNumChannels,
+ /*output_rate=*/kSampleRateHz,
+ /*output_num_channels=*/kNumChannels);
+
+ constexpr size_t kNumBlocksForMonoConfig = 1;
+ constexpr size_t kNumBlocksForSurroundConfig = 2;
+ EchoCanceller3Config mono_config;
+ absl::optional<EchoCanceller3Config> multichannel_config;
+
+ mono_config.multi_channel.detect_stereo_content = true;
+ mono_config.multi_channel.stereo_detection_threshold = 0.0f;
+ mono_config.multi_channel.stereo_detection_hysteresis_seconds = 0.0f;
+ multichannel_config = mono_config;
+ mono_config.filter.coarse_initial.length_blocks = kNumBlocksForMonoConfig;
+ multichannel_config->filter.coarse_initial.length_blocks =
+ kNumBlocksForSurroundConfig;
+
+ EchoCanceller3 aec3(mono_config, multichannel_config,
+ /*sample_rate_hz=*/kSampleRateHz,
+ /*num_render_channels=*/kNumChannels,
+ /*num_capture_input_channels=*/kNumChannels);
+
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ RunAecInStereo(buffer, aec3, 100.0f, 100.0f);
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ RunAecInStereo(buffer, aec3, 100.0f, 101.0f);
+ EXPECT_TRUE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForSurroundConfig);
+}
+
+TEST(EchoCanceller3, DetectionOfProperStereoUsingThreshold) {
+ constexpr int kSampleRateHz = 16000;
+ constexpr int kNumChannels = 2;
+ AudioBuffer buffer(/*input_rate=*/kSampleRateHz,
+ /*input_num_channels=*/kNumChannels,
+ /*input_rate=*/kSampleRateHz,
+ /*buffer_num_channels=*/kNumChannels,
+ /*output_rate=*/kSampleRateHz,
+ /*output_num_channels=*/kNumChannels);
+
+ constexpr size_t kNumBlocksForMonoConfig = 1;
+ constexpr size_t kNumBlocksForSurroundConfig = 2;
+ EchoCanceller3Config mono_config;
+ absl::optional<EchoCanceller3Config> multichannel_config;
+
+ constexpr float kStereoDetectionThreshold = 2.0f;
+ mono_config.multi_channel.detect_stereo_content = true;
+ mono_config.multi_channel.stereo_detection_threshold =
+ kStereoDetectionThreshold;
+ mono_config.multi_channel.stereo_detection_hysteresis_seconds = 0.0f;
+ multichannel_config = mono_config;
+ mono_config.filter.coarse_initial.length_blocks = kNumBlocksForMonoConfig;
+ multichannel_config->filter.coarse_initial.length_blocks =
+ kNumBlocksForSurroundConfig;
+
+ EchoCanceller3 aec3(mono_config, multichannel_config,
+ /*sample_rate_hz=*/kSampleRateHz,
+ /*num_render_channels=*/kNumChannels,
+ /*num_capture_input_channels=*/kNumChannels);
+
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ RunAecInStereo(buffer, aec3, 100.0f,
+ 100.0f + kStereoDetectionThreshold - 1.0f);
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ RunAecInStereo(buffer, aec3, 100.0f,
+ 100.0f + kStereoDetectionThreshold + 10.0f);
+ EXPECT_TRUE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForSurroundConfig);
+}
+
+TEST(EchoCanceller3, DetectionOfProperStereoUsingHysteresis) {
+ constexpr int kSampleRateHz = 16000;
+ constexpr int kNumChannels = 2;
+ AudioBuffer buffer(/*input_rate=*/kSampleRateHz,
+ /*input_num_channels=*/kNumChannels,
+ /*input_rate=*/kSampleRateHz,
+ /*buffer_num_channels=*/kNumChannels,
+ /*output_rate=*/kSampleRateHz,
+ /*output_num_channels=*/kNumChannels);
+
+ constexpr size_t kNumBlocksForMonoConfig = 1;
+ constexpr size_t kNumBlocksForSurroundConfig = 2;
+ EchoCanceller3Config mono_config;
+ absl::optional<EchoCanceller3Config> surround_config;
+
+ mono_config.multi_channel.detect_stereo_content = true;
+ mono_config.multi_channel.stereo_detection_hysteresis_seconds = 0.5f;
+ surround_config = mono_config;
+ mono_config.filter.coarse_initial.length_blocks = kNumBlocksForMonoConfig;
+ surround_config->filter.coarse_initial.length_blocks =
+ kNumBlocksForSurroundConfig;
+
+ EchoCanceller3 aec3(mono_config, surround_config,
+ /*sample_rate_hz=*/kSampleRateHz,
+ /*num_render_channels=*/kNumChannels,
+ /*num_capture_input_channels=*/kNumChannels);
+
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ RunAecInStereo(buffer, aec3, 100.0f, 100.0f);
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ constexpr int kNumFramesPerSecond = 100;
+ for (int k = 0;
+ k < static_cast<int>(
+ kNumFramesPerSecond *
+ mono_config.multi_channel.stereo_detection_hysteresis_seconds);
+ ++k) {
+ RunAecInStereo(buffer, aec3, 100.0f, 101.0f);
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+ }
+
+ RunAecInStereo(buffer, aec3, 100.0f, 101.0f);
+ EXPECT_TRUE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForSurroundConfig);
+}
+
+TEST(EchoCanceller3, StereoContentDetectionForMonoSignals) {
+ constexpr int kSampleRateHz = 16000;
+ constexpr int kNumChannels = 2;
+ AudioBuffer buffer(/*input_rate=*/kSampleRateHz,
+ /*input_num_channels=*/kNumChannels,
+ /*input_rate=*/kSampleRateHz,
+ /*buffer_num_channels=*/kNumChannels,
+ /*output_rate=*/kSampleRateHz,
+ /*output_num_channels=*/kNumChannels);
+
+ constexpr size_t kNumBlocksForMonoConfig = 1;
+ constexpr size_t kNumBlocksForSurroundConfig = 2;
+ EchoCanceller3Config mono_config;
+ absl::optional<EchoCanceller3Config> multichannel_config;
+
+ for (bool detect_stereo_content : {false, true}) {
+ mono_config.multi_channel.detect_stereo_content = detect_stereo_content;
+ multichannel_config = mono_config;
+ mono_config.filter.coarse_initial.length_blocks = kNumBlocksForMonoConfig;
+ multichannel_config->filter.coarse_initial.length_blocks =
+ kNumBlocksForSurroundConfig;
+
+ AudioBuffer mono_buffer(/*input_rate=*/kSampleRateHz,
+ /*input_num_channels=*/1,
+ /*input_rate=*/kSampleRateHz,
+ /*buffer_num_channels=*/1,
+ /*output_rate=*/kSampleRateHz,
+ /*output_num_channels=*/1);
+
+ EchoCanceller3 aec3(mono_config, multichannel_config,
+ /*sample_rate_hz=*/kSampleRateHz,
+ /*num_render_channels=*/1,
+ /*num_capture_input_channels=*/1);
+
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+
+ RunAecInSMono(mono_buffer, aec3, 100.0f);
+ EXPECT_FALSE(aec3.StereoRenderProcessingActiveForTesting());
+ EXPECT_EQ(
+ aec3.GetActiveConfigForTesting().filter.coarse_initial.length_blocks,
+ kNumBlocksForMonoConfig);
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(EchoCanceller3InputCheckDeathTest, WrongCaptureNumBandsCheckVerification) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ EchoCanceller3Tester(rate).RunProcessCaptureNumBandsCheckVerification();
+ }
+}
+
+// Verifiers that the verification for null input to the capture processing api
+// call works.
+TEST(EchoCanceller3InputCheckDeathTest, NullCaptureProcessingParameter) {
+ EXPECT_DEATH(
+ EchoCanceller3(EchoCanceller3Config(),
+ /*multichannel_config_=*/absl::nullopt, 16000, 1, 1)
+ .ProcessCapture(nullptr, false),
+ "");
+}
+
+// Verifies the check for correct sample rate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoCanceller3InputCheckDeathTest, DISABLED_WrongSampleRate) {
+ ApmDataDumper data_dumper(0);
+ EXPECT_DEATH(
+ EchoCanceller3(EchoCanceller3Config(),
+ /*multichannel_config_=*/absl::nullopt, 8001, 1, 1),
+ "");
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.cc
new file mode 100644
index 0000000000..fc83ca2f89
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/echo_path_delay_estimator.h"
+
+#include <array>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+EchoPathDelayEstimator::EchoPathDelayEstimator(
+ ApmDataDumper* data_dumper,
+ const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : data_dumper_(data_dumper),
+ down_sampling_factor_(config.delay.down_sampling_factor),
+ sub_block_size_(down_sampling_factor_ != 0
+ ? kBlockSize / down_sampling_factor_
+ : kBlockSize),
+ capture_mixer_(num_capture_channels,
+ config.delay.capture_alignment_mixing),
+ capture_decimator_(down_sampling_factor_),
+ matched_filter_(
+ data_dumper_,
+ DetectOptimization(),
+ sub_block_size_,
+ kMatchedFilterWindowSizeSubBlocks,
+ config.delay.num_filters,
+ kMatchedFilterAlignmentShiftSizeSubBlocks,
+ config.delay.down_sampling_factor == 8
+ ? config.render_levels.poor_excitation_render_limit_ds8
+ : config.render_levels.poor_excitation_render_limit,
+ config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold,
+ config.delay.detect_pre_echo),
+ matched_filter_lag_aggregator_(data_dumper_,
+ matched_filter_.GetMaxFilterLag(),
+ config.delay) {
+ RTC_DCHECK(data_dumper);
+ RTC_DCHECK(down_sampling_factor_ > 0);
+}
+
+EchoPathDelayEstimator::~EchoPathDelayEstimator() = default;
+
+void EchoPathDelayEstimator::Reset(bool reset_delay_confidence) {
+ Reset(true, reset_delay_confidence);
+}
+
+absl::optional<DelayEstimate> EchoPathDelayEstimator::EstimateDelay(
+ const DownsampledRenderBuffer& render_buffer,
+ const Block& capture) {
+ std::array<float, kBlockSize> downsampled_capture_data;
+ rtc::ArrayView<float> downsampled_capture(downsampled_capture_data.data(),
+ sub_block_size_);
+
+ std::array<float, kBlockSize> downmixed_capture;
+ capture_mixer_.ProduceOutput(capture, downmixed_capture);
+ capture_decimator_.Decimate(downmixed_capture, downsampled_capture);
+ data_dumper_->DumpWav("aec3_capture_decimator_output",
+ downsampled_capture.size(), downsampled_capture.data(),
+ 16000 / down_sampling_factor_, 1);
+ matched_filter_.Update(render_buffer, downsampled_capture,
+ matched_filter_lag_aggregator_.ReliableDelayFound());
+
+ absl::optional<DelayEstimate> aggregated_matched_filter_lag =
+ matched_filter_lag_aggregator_.Aggregate(
+ matched_filter_.GetBestLagEstimate());
+
+ // Run clockdrift detection.
+ if (aggregated_matched_filter_lag &&
+ (*aggregated_matched_filter_lag).quality ==
+ DelayEstimate::Quality::kRefined)
+ clockdrift_detector_.Update(
+ matched_filter_lag_aggregator_.GetDelayAtHighestPeak());
+
+ // TODO(peah): Move this logging outside of this class once EchoCanceller3
+ // development is done.
+ data_dumper_->DumpRaw(
+ "aec3_echo_path_delay_estimator_delay",
+ aggregated_matched_filter_lag
+ ? static_cast<int>(aggregated_matched_filter_lag->delay *
+ down_sampling_factor_)
+ : -1);
+
+ // Return the detected delay in samples as the aggregated matched filter lag
+ // compensated by the down sampling factor for the signal being correlated.
+ if (aggregated_matched_filter_lag) {
+ aggregated_matched_filter_lag->delay *= down_sampling_factor_;
+ }
+
+ if (old_aggregated_lag_ && aggregated_matched_filter_lag &&
+ old_aggregated_lag_->delay == aggregated_matched_filter_lag->delay) {
+ ++consistent_estimate_counter_;
+ } else {
+ consistent_estimate_counter_ = 0;
+ }
+ old_aggregated_lag_ = aggregated_matched_filter_lag;
+ constexpr size_t kNumBlocksPerSecondBy2 = kNumBlocksPerSecond / 2;
+ if (consistent_estimate_counter_ > kNumBlocksPerSecondBy2) {
+ Reset(false, false);
+ }
+
+ return aggregated_matched_filter_lag;
+}
+
+void EchoPathDelayEstimator::Reset(bool reset_lag_aggregator,
+ bool reset_delay_confidence) {
+ if (reset_lag_aggregator) {
+ matched_filter_lag_aggregator_.Reset(reset_delay_confidence);
+ }
+ matched_filter_.Reset();
+ old_aggregated_lag_ = absl::nullopt;
+ consistent_estimate_counter_ = 0;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.h
new file mode 100644
index 0000000000..b24d0a29ec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/alignment_mixer.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/matched_filter.h"
+#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct DownsampledRenderBuffer;
+struct EchoCanceller3Config;
+
+// Estimates the delay of the echo path.
+class EchoPathDelayEstimator {
+ public:
+ EchoPathDelayEstimator(ApmDataDumper* data_dumper,
+ const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+ ~EchoPathDelayEstimator();
+
+ EchoPathDelayEstimator(const EchoPathDelayEstimator&) = delete;
+ EchoPathDelayEstimator& operator=(const EchoPathDelayEstimator&) = delete;
+
+ // Resets the estimation. If the delay confidence is reset, the reset behavior
+ // is as if the call is restarted.
+ void Reset(bool reset_delay_confidence);
+
+ // Produce a delay estimate if such is avaliable.
+ absl::optional<DelayEstimate> EstimateDelay(
+ const DownsampledRenderBuffer& render_buffer,
+ const Block& capture);
+
+ // Log delay estimator properties.
+ void LogDelayEstimationProperties(int sample_rate_hz, size_t shift) const {
+ matched_filter_.LogFilterProperties(sample_rate_hz, shift,
+ down_sampling_factor_);
+ }
+
+ // Returns the level of detected clockdrift.
+ ClockdriftDetector::Level Clockdrift() const {
+ return clockdrift_detector_.ClockdriftLevel();
+ }
+
+ private:
+ ApmDataDumper* const data_dumper_;
+ const size_t down_sampling_factor_;
+ const size_t sub_block_size_;
+ AlignmentMixer capture_mixer_;
+ Decimator capture_decimator_;
+ MatchedFilter matched_filter_;
+ MatchedFilterLagAggregator matched_filter_lag_aggregator_;
+ absl::optional<DelayEstimate> old_aggregated_lag_;
+ size_t consistent_estimate_counter_ = 0;
+ ClockdriftDetector clockdrift_detector_;
+
+ // Internal reset method with more granularity.
+ void Reset(bool reset_lag_aggregator, bool reset_delay_confidence);
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc
new file mode 100644
index 0000000000..810b0ae185
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_path_delay_estimator.h"
+
+#include <algorithm>
+#include <string>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(size_t delay, size_t down_sampling_factor) {
+ rtc::StringBuilder ss;
+ ss << "Delay: " << delay;
+ ss << ", Down sampling factor: " << down_sampling_factor;
+ return ss.Release();
+}
+
+} // namespace
+
+class EchoPathDelayEstimatorMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ EchoPathDelayEstimatorMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 3, 6, 8),
+ ::testing::Values(1, 2, 4)));
+
+// Verifies that the basic API calls work.
+TEST_P(EchoPathDelayEstimatorMultiChannel, BasicApiCalls) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+ EchoPathDelayEstimator estimator(&data_dumper, config, num_capture_channels);
+ Block render(kNumBands, num_render_channels);
+ Block capture(/*num_bands=*/1, num_capture_channels);
+ for (size_t k = 0; k < 100; ++k) {
+ render_delay_buffer->Insert(render);
+ estimator.EstimateDelay(render_delay_buffer->GetDownsampledRenderBuffer(),
+ capture);
+ }
+}
+
+// Verifies that the delay estimator produces correct delay for artificially
+// delayed signals.
+TEST(EchoPathDelayEstimator, DelayEstimation) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ Random random_generator(42U);
+ Block render(kNumBands, kNumRenderChannels);
+ Block capture(/*num_bands=*/1, kNumCaptureChannels);
+ ApmDataDumper data_dumper(0);
+ constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ EchoCanceller3Config config;
+ config.delay.delay_headroom_samples = 0;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = 10;
+ for (size_t delay_samples : {30, 64, 150, 200, 800, 4000}) {
+ SCOPED_TRACE(ProduceDebugText(delay_samples, down_sampling_factor));
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels));
+ DelayBuffer<float> signal_delay_buffer(delay_samples);
+ EchoPathDelayEstimator estimator(&data_dumper, config,
+ kNumCaptureChannels);
+
+ absl::optional<DelayEstimate> estimated_delay_samples;
+ for (size_t k = 0; k < (500 + (delay_samples) / kBlockSize); ++k) {
+ RandomizeSampleVector(&random_generator,
+ render.View(/*band=*/0, /*channel=*/0));
+ signal_delay_buffer.Delay(render.View(/*band=*/0, /*channel=*/0),
+ capture.View(/*band=*/0, /*channel=*/0));
+ render_delay_buffer->Insert(render);
+
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ auto estimate = estimator.EstimateDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(), capture);
+
+ if (estimate) {
+ estimated_delay_samples = estimate;
+ }
+ }
+
+ if (estimated_delay_samples) {
+ // Allow estimated delay to be off by a block as internally the delay is
+ // quantized with an error up to a block.
+ size_t delay_ds = delay_samples / down_sampling_factor;
+ size_t estimated_delay_ds =
+ estimated_delay_samples->delay / down_sampling_factor;
+ EXPECT_NEAR(delay_ds, estimated_delay_ds,
+ kBlockSize / down_sampling_factor);
+ } else {
+ ADD_FAILURE();
+ }
+ }
+ }
+}
+
+// Verifies that the delay estimator does not produce delay estimates for render
+// signals of low level.
+TEST(EchoPathDelayEstimator, NoDelayEstimatesForLowLevelRenderSignals) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ Random random_generator(42U);
+ EchoCanceller3Config config;
+ Block render(kNumBands, kNumRenderChannels);
+ Block capture(/*num_bands=*/1, kNumCaptureChannels);
+ ApmDataDumper data_dumper(0);
+ EchoPathDelayEstimator estimator(&data_dumper, config, kNumCaptureChannels);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz,
+ kNumRenderChannels));
+ for (size_t k = 0; k < 100; ++k) {
+ RandomizeSampleVector(&random_generator,
+ render.View(/*band=*/0, /*channel=*/0));
+ for (auto& render_k : render.View(/*band=*/0, /*channel=*/0)) {
+ render_k *= 100.f / 32767.f;
+ }
+ std::copy(render.begin(/*band=*/0, /*channel=*/0),
+ render.end(/*band=*/0, /*channel=*/0),
+ capture.begin(/*band*/ 0, /*channel=*/0));
+ render_delay_buffer->Insert(render);
+ render_delay_buffer->PrepareCaptureProcessing();
+ EXPECT_FALSE(estimator.EstimateDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(), capture));
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for the render blocksize.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoPathDelayEstimatorDeathTest, DISABLED_WrongRenderBlockSize) {
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ EchoPathDelayEstimator estimator(&data_dumper, config, 1);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, 48000, 1));
+ Block capture(/*num_bands=*/1, /*num_channels=*/1);
+ EXPECT_DEATH(estimator.EstimateDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(), capture),
+ "");
+}
+
+// Verifies the check for non-null data dumper.
+TEST(EchoPathDelayEstimatorDeathTest, NullDataDumper) {
+ EXPECT_DEATH(EchoPathDelayEstimator(nullptr, EchoCanceller3Config(), 1), "");
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.cc
new file mode 100644
index 0000000000..0ae9cff98e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.cc
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+
+namespace webrtc {
+
+EchoPathVariability::EchoPathVariability(bool gain_change,
+ DelayAdjustment delay_change,
+ bool clock_drift)
+ : gain_change(gain_change),
+ delay_change(delay_change),
+ clock_drift(clock_drift) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.h b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.h
new file mode 100644
index 0000000000..78e4f64b2b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_
+
+namespace webrtc {
+
+struct EchoPathVariability {
+ enum class DelayAdjustment {
+ kNone,
+ kBufferFlush,
+ kNewDetectedDelay
+ };
+
+ EchoPathVariability(bool gain_change,
+ DelayAdjustment delay_change,
+ bool clock_drift);
+
+ bool AudioPathChanged() const {
+ return gain_change || delay_change != DelayAdjustment::kNone;
+ }
+ bool gain_change;
+ DelayAdjustment delay_change;
+ bool clock_drift;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability_unittest.cc
new file mode 100644
index 0000000000..0f10f95f72
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_path_variability_unittest.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(EchoPathVariability, CorrectBehavior) {
+ // Test correct passing and reporting of the gain change information.
+ EchoPathVariability v(
+ true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false);
+ EXPECT_TRUE(v.gain_change);
+ EXPECT_TRUE(v.delay_change ==
+ EchoPathVariability::DelayAdjustment::kNewDetectedDelay);
+ EXPECT_TRUE(v.AudioPathChanged());
+ EXPECT_FALSE(v.clock_drift);
+
+ v = EchoPathVariability(true, EchoPathVariability::DelayAdjustment::kNone,
+ false);
+ EXPECT_TRUE(v.gain_change);
+ EXPECT_TRUE(v.delay_change == EchoPathVariability::DelayAdjustment::kNone);
+ EXPECT_TRUE(v.AudioPathChanged());
+ EXPECT_FALSE(v.clock_drift);
+
+ v = EchoPathVariability(
+ false, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false);
+ EXPECT_FALSE(v.gain_change);
+ EXPECT_TRUE(v.delay_change ==
+ EchoPathVariability::DelayAdjustment::kNewDetectedDelay);
+ EXPECT_TRUE(v.AudioPathChanged());
+ EXPECT_FALSE(v.clock_drift);
+
+ v = EchoPathVariability(false, EchoPathVariability::DelayAdjustment::kNone,
+ false);
+ EXPECT_FALSE(v.gain_change);
+ EXPECT_TRUE(v.delay_change == EchoPathVariability::DelayAdjustment::kNone);
+ EXPECT_FALSE(v.AudioPathChanged());
+ EXPECT_FALSE(v.clock_drift);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.cc
new file mode 100644
index 0000000000..673d88af03
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.cc
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/echo_remover.h"
+
+#include <math.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <cmath>
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/comfort_noise_generator.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/echo_remover_metrics.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/residual_echo_estimator.h"
+#include "modules/audio_processing/aec3/subtractor.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/aec3/suppression_filter.h"
+#include "modules/audio_processing/aec3/suppression_gain.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// Maximum number of channels for which the capture channel data is stored on
+// the stack. If the number of channels are larger than this, they are stored
+// using scratch memory that is pre-allocated on the heap. The reason for this
+// partitioning is not to waste heap space for handling the more common numbers
+// of channels, while at the same time not limiting the support for higher
+// numbers of channels by enforcing the capture channel data to be stored on the
+// stack using a fixed maximum value.
+constexpr size_t kMaxNumChannelsOnStack = 2;
+
+// Chooses the number of channels to store on the heap when that is required due
+// to the number of capture channels being larger than the pre-defined number
+// of channels to store on the stack.
+size_t NumChannelsOnHeap(size_t num_capture_channels) {
+ return num_capture_channels > kMaxNumChannelsOnStack ? num_capture_channels
+ : 0;
+}
+
+void LinearEchoPower(const FftData& E,
+ const FftData& Y,
+ std::array<float, kFftLengthBy2Plus1>* S2) {
+ for (size_t k = 0; k < E.re.size(); ++k) {
+ (*S2)[k] = (Y.re[k] - E.re[k]) * (Y.re[k] - E.re[k]) +
+ (Y.im[k] - E.im[k]) * (Y.im[k] - E.im[k]);
+ }
+}
+
+// Fades between two input signals using a fix-sized transition.
+void SignalTransition(rtc::ArrayView<const float> from,
+ rtc::ArrayView<const float> to,
+ rtc::ArrayView<float> out) {
+ if (from == to) {
+ RTC_DCHECK_EQ(to.size(), out.size());
+ std::copy(to.begin(), to.end(), out.begin());
+ } else {
+ constexpr size_t kTransitionSize = 30;
+ constexpr float kOneByTransitionSizePlusOne = 1.f / (kTransitionSize + 1);
+
+ RTC_DCHECK_EQ(from.size(), to.size());
+ RTC_DCHECK_EQ(from.size(), out.size());
+ RTC_DCHECK_LE(kTransitionSize, out.size());
+
+ for (size_t k = 0; k < kTransitionSize; ++k) {
+ float a = (k + 1) * kOneByTransitionSizePlusOne;
+ out[k] = a * to[k] + (1.f - a) * from[k];
+ }
+
+ std::copy(to.begin() + kTransitionSize, to.end(),
+ out.begin() + kTransitionSize);
+ }
+}
+
+// Computes a windowed (square root Hanning) padded FFT and updates the related
+// memory.
+void WindowedPaddedFft(const Aec3Fft& fft,
+ rtc::ArrayView<const float> v,
+ rtc::ArrayView<float> v_old,
+ FftData* V) {
+ fft.PaddedFft(v, v_old, Aec3Fft::Window::kSqrtHanning, V);
+ std::copy(v.begin(), v.end(), v_old.begin());
+}
+
+// Class for removing the echo from the capture signal.
+class EchoRemoverImpl final : public EchoRemover {
+ public:
+ EchoRemoverImpl(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels);
+ ~EchoRemoverImpl() override;
+ EchoRemoverImpl(const EchoRemoverImpl&) = delete;
+ EchoRemoverImpl& operator=(const EchoRemoverImpl&) = delete;
+
+ void GetMetrics(EchoControl::Metrics* metrics) const override;
+
+ // Removes the echo from a block of samples from the capture signal. The
+ // supplied render signal is assumed to be pre-aligned with the capture
+ // signal.
+ void ProcessCapture(EchoPathVariability echo_path_variability,
+ bool capture_signal_saturation,
+ const absl::optional<DelayEstimate>& external_delay,
+ RenderBuffer* render_buffer,
+ Block* linear_output,
+ Block* capture) override;
+
+ // Updates the status on whether echo leakage is detected in the output of the
+ // echo remover.
+ void UpdateEchoLeakageStatus(bool leakage_detected) override {
+ echo_leakage_detected_ = leakage_detected;
+ }
+
+ void SetCaptureOutputUsage(bool capture_output_used) override {
+ capture_output_used_ = capture_output_used;
+ }
+
+ private:
+ // Selects which of the coarse and refined linear filter outputs that is most
+ // appropriate to pass to the suppressor and forms the linear filter output by
+ // smoothly transition between those.
+ void FormLinearFilterOutput(const SubtractorOutput& subtractor_output,
+ rtc::ArrayView<float> output);
+
+ static std::atomic<int> instance_count_;
+ const EchoCanceller3Config config_;
+ const Aec3Fft fft_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const Aec3Optimization optimization_;
+ const int sample_rate_hz_;
+ const size_t num_render_channels_;
+ const size_t num_capture_channels_;
+ const bool use_coarse_filter_output_;
+ Subtractor subtractor_;
+ SuppressionGain suppression_gain_;
+ ComfortNoiseGenerator cng_;
+ SuppressionFilter suppression_filter_;
+ RenderSignalAnalyzer render_signal_analyzer_;
+ ResidualEchoEstimator residual_echo_estimator_;
+ bool echo_leakage_detected_ = false;
+ bool capture_output_used_ = true;
+ AecState aec_state_;
+ EchoRemoverMetrics metrics_;
+ std::vector<std::array<float, kFftLengthBy2>> e_old_;
+ std::vector<std::array<float, kFftLengthBy2>> y_old_;
+ size_t block_counter_ = 0;
+ int gain_change_hangover_ = 0;
+ bool refined_filter_output_last_selected_ = true;
+
+ std::vector<std::array<float, kFftLengthBy2>> e_heap_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2_heap_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_heap_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2_heap_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2_unbounded_heap_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> S2_linear_heap_;
+ std::vector<FftData> Y_heap_;
+ std::vector<FftData> E_heap_;
+ std::vector<FftData> comfort_noise_heap_;
+ std::vector<FftData> high_band_comfort_noise_heap_;
+ std::vector<SubtractorOutput> subtractor_output_heap_;
+};
+
+std::atomic<int> EchoRemoverImpl::instance_count_(0);
+
+EchoRemoverImpl::EchoRemoverImpl(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels)
+ : config_(config),
+ fft_(),
+ data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ optimization_(DetectOptimization()),
+ sample_rate_hz_(sample_rate_hz),
+ num_render_channels_(num_render_channels),
+ num_capture_channels_(num_capture_channels),
+ use_coarse_filter_output_(
+ config_.filter.enable_coarse_filter_output_usage),
+ subtractor_(config,
+ num_render_channels_,
+ num_capture_channels_,
+ data_dumper_.get(),
+ optimization_),
+ suppression_gain_(config_,
+ optimization_,
+ sample_rate_hz,
+ num_capture_channels),
+ cng_(config_, optimization_, num_capture_channels_),
+ suppression_filter_(optimization_,
+ sample_rate_hz_,
+ num_capture_channels_),
+ render_signal_analyzer_(config_),
+ residual_echo_estimator_(config_, num_render_channels),
+ aec_state_(config_, num_capture_channels_),
+ e_old_(num_capture_channels_, {0.f}),
+ y_old_(num_capture_channels_, {0.f}),
+ e_heap_(NumChannelsOnHeap(num_capture_channels_), {0.f}),
+ Y2_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ E2_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ R2_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ R2_unbounded_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ S2_linear_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ Y_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ E_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ comfort_noise_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ high_band_comfort_noise_heap_(NumChannelsOnHeap(num_capture_channels_)),
+ subtractor_output_heap_(NumChannelsOnHeap(num_capture_channels_)) {
+ RTC_DCHECK(ValidFullBandRate(sample_rate_hz));
+}
+
+EchoRemoverImpl::~EchoRemoverImpl() = default;
+
+void EchoRemoverImpl::GetMetrics(EchoControl::Metrics* metrics) const {
+ // Echo return loss (ERL) is inverted to go from gain to attenuation.
+ metrics->echo_return_loss = -10.0 * std::log10(aec_state_.ErlTimeDomain());
+ metrics->echo_return_loss_enhancement =
+ Log2TodB(aec_state_.FullBandErleLog2());
+}
+
+void EchoRemoverImpl::ProcessCapture(
+ EchoPathVariability echo_path_variability,
+ bool capture_signal_saturation,
+ const absl::optional<DelayEstimate>& external_delay,
+ RenderBuffer* render_buffer,
+ Block* linear_output,
+ Block* capture) {
+ ++block_counter_;
+ const Block& x = render_buffer->GetBlock(0);
+ Block* y = capture;
+ RTC_DCHECK(render_buffer);
+ RTC_DCHECK(y);
+ RTC_DCHECK_EQ(x.NumBands(), NumBandsForRate(sample_rate_hz_));
+ RTC_DCHECK_EQ(y->NumBands(), NumBandsForRate(sample_rate_hz_));
+ RTC_DCHECK_EQ(x.NumChannels(), num_render_channels_);
+ RTC_DCHECK_EQ(y->NumChannels(), num_capture_channels_);
+
+ // Stack allocated data to use when the number of channels is low.
+ std::array<std::array<float, kFftLengthBy2>, kMaxNumChannelsOnStack> e_stack;
+ std::array<std::array<float, kFftLengthBy2Plus1>, kMaxNumChannelsOnStack>
+ Y2_stack;
+ std::array<std::array<float, kFftLengthBy2Plus1>, kMaxNumChannelsOnStack>
+ E2_stack;
+ std::array<std::array<float, kFftLengthBy2Plus1>, kMaxNumChannelsOnStack>
+ R2_stack;
+ std::array<std::array<float, kFftLengthBy2Plus1>, kMaxNumChannelsOnStack>
+ R2_unbounded_stack;
+ std::array<std::array<float, kFftLengthBy2Plus1>, kMaxNumChannelsOnStack>
+ S2_linear_stack;
+ std::array<FftData, kMaxNumChannelsOnStack> Y_stack;
+ std::array<FftData, kMaxNumChannelsOnStack> E_stack;
+ std::array<FftData, kMaxNumChannelsOnStack> comfort_noise_stack;
+ std::array<FftData, kMaxNumChannelsOnStack> high_band_comfort_noise_stack;
+ std::array<SubtractorOutput, kMaxNumChannelsOnStack> subtractor_output_stack;
+
+ rtc::ArrayView<std::array<float, kFftLengthBy2>> e(e_stack.data(),
+ num_capture_channels_);
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> Y2(
+ Y2_stack.data(), num_capture_channels_);
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> E2(
+ E2_stack.data(), num_capture_channels_);
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2(
+ R2_stack.data(), num_capture_channels_);
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2_unbounded(
+ R2_unbounded_stack.data(), num_capture_channels_);
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> S2_linear(
+ S2_linear_stack.data(), num_capture_channels_);
+ rtc::ArrayView<FftData> Y(Y_stack.data(), num_capture_channels_);
+ rtc::ArrayView<FftData> E(E_stack.data(), num_capture_channels_);
+ rtc::ArrayView<FftData> comfort_noise(comfort_noise_stack.data(),
+ num_capture_channels_);
+ rtc::ArrayView<FftData> high_band_comfort_noise(
+ high_band_comfort_noise_stack.data(), num_capture_channels_);
+ rtc::ArrayView<SubtractorOutput> subtractor_output(
+ subtractor_output_stack.data(), num_capture_channels_);
+ if (NumChannelsOnHeap(num_capture_channels_) > 0) {
+ // If the stack-allocated space is too small, use the heap for storing the
+ // microphone data.
+ e = rtc::ArrayView<std::array<float, kFftLengthBy2>>(e_heap_.data(),
+ num_capture_channels_);
+ Y2 = rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>>(
+ Y2_heap_.data(), num_capture_channels_);
+ E2 = rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>>(
+ E2_heap_.data(), num_capture_channels_);
+ R2 = rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>>(
+ R2_heap_.data(), num_capture_channels_);
+ R2_unbounded = rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>>(
+ R2_unbounded_heap_.data(), num_capture_channels_);
+ S2_linear = rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>>(
+ S2_linear_heap_.data(), num_capture_channels_);
+ Y = rtc::ArrayView<FftData>(Y_heap_.data(), num_capture_channels_);
+ E = rtc::ArrayView<FftData>(E_heap_.data(), num_capture_channels_);
+ comfort_noise = rtc::ArrayView<FftData>(comfort_noise_heap_.data(),
+ num_capture_channels_);
+ high_band_comfort_noise = rtc::ArrayView<FftData>(
+ high_band_comfort_noise_heap_.data(), num_capture_channels_);
+ subtractor_output = rtc::ArrayView<SubtractorOutput>(
+ subtractor_output_heap_.data(), num_capture_channels_);
+ }
+
+ data_dumper_->DumpWav("aec3_echo_remover_capture_input",
+ y->View(/*band=*/0, /*channel=*/0), 16000, 1);
+ data_dumper_->DumpWav("aec3_echo_remover_render_input",
+ x.View(/*band=*/0, /*channel=*/0), 16000, 1);
+ data_dumper_->DumpRaw("aec3_echo_remover_capture_input",
+ y->View(/*band=*/0, /*channel=*/0));
+ data_dumper_->DumpRaw("aec3_echo_remover_render_input",
+ x.View(/*band=*/0, /*channel=*/0));
+
+ aec_state_.UpdateCaptureSaturation(capture_signal_saturation);
+
+ if (echo_path_variability.AudioPathChanged()) {
+ // Ensure that the gain change is only acted on once per frame.
+ if (echo_path_variability.gain_change) {
+ if (gain_change_hangover_ == 0) {
+ constexpr int kMaxBlocksPerFrame = 3;
+ gain_change_hangover_ = kMaxBlocksPerFrame;
+ rtc::LoggingSeverity log_level =
+ config_.delay.log_warning_on_delay_changes ? rtc::LS_WARNING
+ : rtc::LS_VERBOSE;
+ RTC_LOG_V(log_level)
+ << "Gain change detected at block " << block_counter_;
+ } else {
+ echo_path_variability.gain_change = false;
+ }
+ }
+
+ subtractor_.HandleEchoPathChange(echo_path_variability);
+ aec_state_.HandleEchoPathChange(echo_path_variability);
+
+ if (echo_path_variability.delay_change !=
+ EchoPathVariability::DelayAdjustment::kNone) {
+ suppression_gain_.SetInitialState(true);
+ }
+ }
+ if (gain_change_hangover_ > 0) {
+ --gain_change_hangover_;
+ }
+
+ // Analyze the render signal.
+ render_signal_analyzer_.Update(*render_buffer,
+ aec_state_.MinDirectPathFilterDelay());
+
+ // State transition.
+ if (aec_state_.TransitionTriggered()) {
+ subtractor_.ExitInitialState();
+ suppression_gain_.SetInitialState(false);
+ }
+
+ // Perform linear echo cancellation.
+ subtractor_.Process(*render_buffer, *y, render_signal_analyzer_, aec_state_,
+ subtractor_output);
+
+ // Compute spectra.
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ FormLinearFilterOutput(subtractor_output[ch], e[ch]);
+ WindowedPaddedFft(fft_, y->View(/*band=*/0, ch), y_old_[ch], &Y[ch]);
+ WindowedPaddedFft(fft_, e[ch], e_old_[ch], &E[ch]);
+ LinearEchoPower(E[ch], Y[ch], &S2_linear[ch]);
+ Y[ch].Spectrum(optimization_, Y2[ch]);
+ E[ch].Spectrum(optimization_, E2[ch]);
+ }
+
+ // Optionally return the linear filter output.
+ if (linear_output) {
+ RTC_DCHECK_GE(1, linear_output->NumBands());
+ RTC_DCHECK_EQ(num_capture_channels_, linear_output->NumChannels());
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ std::copy(e[ch].begin(), e[ch].end(),
+ linear_output->begin(/*band=*/0, ch));
+ }
+ }
+
+ // Update the AEC state information.
+ aec_state_.Update(external_delay, subtractor_.FilterFrequencyResponses(),
+ subtractor_.FilterImpulseResponses(), *render_buffer, E2,
+ Y2, subtractor_output);
+
+ // Choose the linear output.
+ const auto& Y_fft = aec_state_.UseLinearFilterOutput() ? E : Y;
+
+ data_dumper_->DumpWav("aec3_output_linear",
+ y->View(/*band=*/0, /*channel=*/0), 16000, 1);
+ data_dumper_->DumpWav("aec3_output_linear2", kBlockSize, &e[0][0], 16000, 1);
+
+ // Estimate the comfort noise.
+ cng_.Compute(aec_state_.SaturatedCapture(), Y2, comfort_noise,
+ high_band_comfort_noise);
+
+ // Only do the below processing if the output of the audio processing module
+ // is used.
+ std::array<float, kFftLengthBy2Plus1> G;
+ if (capture_output_used_) {
+ // Estimate the residual echo power.
+ residual_echo_estimator_.Estimate(aec_state_, *render_buffer, S2_linear, Y2,
+ suppression_gain_.IsDominantNearend(), R2,
+ R2_unbounded);
+
+ // Suppressor nearend estimate.
+ if (aec_state_.UsableLinearEstimate()) {
+ // E2 is bound by Y2.
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ std::transform(E2[ch].begin(), E2[ch].end(), Y2[ch].begin(),
+ E2[ch].begin(),
+ [](float a, float b) { return std::min(a, b); });
+ }
+ }
+ const auto& nearend_spectrum = aec_state_.UsableLinearEstimate() ? E2 : Y2;
+
+ // Suppressor echo estimate.
+ const auto& echo_spectrum =
+ aec_state_.UsableLinearEstimate() ? S2_linear : R2;
+
+ // Determine if the suppressor should assume clock drift.
+ const bool clock_drift = config_.echo_removal_control.has_clock_drift ||
+ echo_path_variability.clock_drift;
+
+ // Compute preferred gains.
+ float high_bands_gain;
+ suppression_gain_.GetGain(nearend_spectrum, echo_spectrum, R2, R2_unbounded,
+ cng_.NoiseSpectrum(), render_signal_analyzer_,
+ aec_state_, x, clock_drift, &high_bands_gain, &G);
+
+ suppression_filter_.ApplyGain(comfort_noise, high_band_comfort_noise, G,
+ high_bands_gain, Y_fft, y);
+
+ } else {
+ G.fill(0.f);
+ }
+
+ // Update the metrics.
+ metrics_.Update(aec_state_, cng_.NoiseSpectrum()[0], G);
+
+ // Debug outputs for the purpose of development and analysis.
+ data_dumper_->DumpWav("aec3_echo_estimate", kBlockSize,
+ &subtractor_output[0].s_refined[0], 16000, 1);
+ data_dumper_->DumpRaw("aec3_output", y->View(/*band=*/0, /*channel=*/0));
+ data_dumper_->DumpRaw("aec3_narrow_render",
+ render_signal_analyzer_.NarrowPeakBand() ? 1 : 0);
+ data_dumper_->DumpRaw("aec3_N2", cng_.NoiseSpectrum()[0]);
+ data_dumper_->DumpRaw("aec3_suppressor_gain", G);
+ data_dumper_->DumpWav("aec3_output", y->View(/*band=*/0, /*channel=*/0),
+ 16000, 1);
+ data_dumper_->DumpRaw("aec3_using_subtractor_output[0]",
+ aec_state_.UseLinearFilterOutput() ? 1 : 0);
+ data_dumper_->DumpRaw("aec3_E2", E2[0]);
+ data_dumper_->DumpRaw("aec3_S2_linear", S2_linear[0]);
+ data_dumper_->DumpRaw("aec3_Y2", Y2[0]);
+ data_dumper_->DumpRaw(
+ "aec3_X2", render_buffer->Spectrum(
+ aec_state_.MinDirectPathFilterDelay())[/*channel=*/0]);
+ data_dumper_->DumpRaw("aec3_R2", R2[0]);
+ data_dumper_->DumpRaw("aec3_filter_delay",
+ aec_state_.MinDirectPathFilterDelay());
+ data_dumper_->DumpRaw("aec3_capture_saturation",
+ aec_state_.SaturatedCapture() ? 1 : 0);
+}
+
+void EchoRemoverImpl::FormLinearFilterOutput(
+ const SubtractorOutput& subtractor_output,
+ rtc::ArrayView<float> output) {
+ RTC_DCHECK_EQ(subtractor_output.e_refined.size(), output.size());
+ RTC_DCHECK_EQ(subtractor_output.e_coarse.size(), output.size());
+ bool use_refined_output = true;
+ if (use_coarse_filter_output_) {
+ // As the output of the refined adaptive filter generally should be better
+ // than the coarse filter output, add a margin and threshold for when
+ // choosing the coarse filter output.
+ if (subtractor_output.e2_coarse < 0.9f * subtractor_output.e2_refined &&
+ subtractor_output.y2 > 30.f * 30.f * kBlockSize &&
+ (subtractor_output.s2_refined > 60.f * 60.f * kBlockSize ||
+ subtractor_output.s2_coarse > 60.f * 60.f * kBlockSize)) {
+ use_refined_output = false;
+ } else {
+ // If the refined filter is diverged, choose the filter output that has
+ // the lowest power.
+ if (subtractor_output.e2_coarse < subtractor_output.e2_refined &&
+ subtractor_output.y2 < subtractor_output.e2_refined) {
+ use_refined_output = false;
+ }
+ }
+ }
+
+ SignalTransition(refined_filter_output_last_selected_
+ ? subtractor_output.e_refined
+ : subtractor_output.e_coarse,
+ use_refined_output ? subtractor_output.e_refined
+ : subtractor_output.e_coarse,
+ output);
+ refined_filter_output_last_selected_ = use_refined_output;
+}
+
+} // namespace
+
+EchoRemover* EchoRemover::Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels) {
+ return new EchoRemoverImpl(config, sample_rate_hz, num_render_channels,
+ num_capture_channels);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.h b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.h
new file mode 100644
index 0000000000..f2f4f5e64d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+namespace webrtc {
+
+// Class for removing the echo from the capture signal.
+class EchoRemover {
+ public:
+ static EchoRemover* Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels,
+ size_t num_capture_channels);
+ virtual ~EchoRemover() = default;
+
+ // Get current metrics.
+ virtual void GetMetrics(EchoControl::Metrics* metrics) const = 0;
+
+ // Removes the echo from a block of samples from the capture signal. The
+ // supplied render signal is assumed to be pre-aligned with the capture
+ // signal.
+ virtual void ProcessCapture(
+ EchoPathVariability echo_path_variability,
+ bool capture_signal_saturation,
+ const absl::optional<DelayEstimate>& external_delay,
+ RenderBuffer* render_buffer,
+ Block* linear_output,
+ Block* capture) = 0;
+
+ // Updates the status on whether echo leakage is detected in the output of the
+ // echo remover.
+ virtual void UpdateEchoLeakageStatus(bool leakage_detected) = 0;
+
+ // Specifies whether the capture output will be used. The purpose of this is
+ // to allow the echo remover to deactivate some of the processing when the
+ // resulting output is anyway not used, for instance when the endpoint is
+ // muted.
+ virtual void SetCaptureOutputUsage(bool capture_output_used) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.cc
new file mode 100644
index 0000000000..c3fc80773a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_remover_metrics.h"
+
+#include <math.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+EchoRemoverMetrics::DbMetric::DbMetric() : DbMetric(0.f, 0.f, 0.f) {}
+EchoRemoverMetrics::DbMetric::DbMetric(float sum_value,
+ float floor_value,
+ float ceil_value)
+ : sum_value(sum_value), floor_value(floor_value), ceil_value(ceil_value) {}
+
+void EchoRemoverMetrics::DbMetric::Update(float value) {
+ sum_value += value;
+ floor_value = std::min(floor_value, value);
+ ceil_value = std::max(ceil_value, value);
+}
+
+void EchoRemoverMetrics::DbMetric::UpdateInstant(float value) {
+ sum_value = value;
+ floor_value = std::min(floor_value, value);
+ ceil_value = std::max(ceil_value, value);
+}
+
+EchoRemoverMetrics::EchoRemoverMetrics() {
+ ResetMetrics();
+}
+
+void EchoRemoverMetrics::ResetMetrics() {
+ erl_time_domain_ = DbMetric(0.f, 10000.f, 0.000f);
+ erle_time_domain_ = DbMetric(0.f, 0.f, 1000.f);
+ saturated_capture_ = false;
+}
+
+void EchoRemoverMetrics::Update(
+ const AecState& aec_state,
+ const std::array<float, kFftLengthBy2Plus1>& comfort_noise_spectrum,
+ const std::array<float, kFftLengthBy2Plus1>& suppressor_gain) {
+ metrics_reported_ = false;
+ if (++block_counter_ <= kMetricsCollectionBlocks) {
+ erl_time_domain_.UpdateInstant(aec_state.ErlTimeDomain());
+ erle_time_domain_.UpdateInstant(aec_state.FullBandErleLog2());
+ saturated_capture_ = saturated_capture_ || aec_state.SaturatedCapture();
+ } else {
+ // Report the metrics over several frames in order to lower the impact of
+ // the logarithms involved on the computational complexity.
+ switch (block_counter_) {
+ case kMetricsCollectionBlocks + 1:
+ RTC_HISTOGRAM_BOOLEAN(
+ "WebRTC.Audio.EchoCanceller.UsableLinearEstimate",
+ static_cast<int>(aec_state.UsableLinearEstimate() ? 1 : 0));
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.FilterDelay",
+ aec_state.MinDirectPathFilterDelay(), 0, 30,
+ 31);
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.EchoCanceller.CaptureSaturation",
+ static_cast<int>(saturated_capture_ ? 1 : 0));
+ break;
+ case kMetricsCollectionBlocks + 2:
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.Erl.Value",
+ aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+ erl_time_domain_.sum_value),
+ 0, 59, 30);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.Erl.Max",
+ aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+ erl_time_domain_.ceil_value),
+ 0, 59, 30);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.Erl.Min",
+ aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+ erl_time_domain_.floor_value),
+ 0, 59, 30);
+ break;
+ case kMetricsCollectionBlocks + 3:
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.Erle.Value",
+ aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f,
+ erle_time_domain_.sum_value),
+ 0, 19, 20);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.Erle.Max",
+ aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f,
+ erle_time_domain_.ceil_value),
+ 0, 19, 20);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.EchoCanceller.Erle.Min",
+ aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f,
+ erle_time_domain_.floor_value),
+ 0, 19, 20);
+ metrics_reported_ = true;
+ RTC_DCHECK_EQ(kMetricsReportingIntervalBlocks, block_counter_);
+ block_counter_ = 0;
+ ResetMetrics();
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ }
+}
+
+namespace aec3 {
+
+void UpdateDbMetric(const std::array<float, kFftLengthBy2Plus1>& value,
+ std::array<EchoRemoverMetrics::DbMetric, 2>* statistic) {
+ RTC_DCHECK(statistic);
+ // Truncation is intended in the band width computation.
+ constexpr int kNumBands = 2;
+ constexpr int kBandWidth = 65 / kNumBands;
+ constexpr float kOneByBandWidth = 1.f / kBandWidth;
+ RTC_DCHECK_EQ(kNumBands, statistic->size());
+ RTC_DCHECK_EQ(65, value.size());
+ for (size_t k = 0; k < statistic->size(); ++k) {
+ float average_band =
+ std::accumulate(value.begin() + kBandWidth * k,
+ value.begin() + kBandWidth * (k + 1), 0.f) *
+ kOneByBandWidth;
+ (*statistic)[k].Update(average_band);
+ }
+}
+
+int TransformDbMetricForReporting(bool negate,
+ float min_value,
+ float max_value,
+ float offset,
+ float scaling,
+ float value) {
+ float new_value = 10.f * std::log10(value * scaling + 1e-10f) + offset;
+ if (negate) {
+ new_value = -new_value;
+ }
+ return static_cast<int>(rtc::SafeClamp(new_value, min_value, max_value));
+}
+
+} // namespace aec3
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.h b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.h
new file mode 100644
index 0000000000..aec8084d78
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_
+
+#include <array>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+
+namespace webrtc {
+
+// Handles the reporting of metrics for the echo remover.
+class EchoRemoverMetrics {
+ public:
+ struct DbMetric {
+ DbMetric();
+ DbMetric(float sum_value, float floor_value, float ceil_value);
+ void Update(float value);
+ void UpdateInstant(float value);
+ float sum_value;
+ float floor_value;
+ float ceil_value;
+ };
+
+ EchoRemoverMetrics();
+
+ EchoRemoverMetrics(const EchoRemoverMetrics&) = delete;
+ EchoRemoverMetrics& operator=(const EchoRemoverMetrics&) = delete;
+
+ // Updates the metric with new data.
+ void Update(
+ const AecState& aec_state,
+ const std::array<float, kFftLengthBy2Plus1>& comfort_noise_spectrum,
+ const std::array<float, kFftLengthBy2Plus1>& suppressor_gain);
+
+ // Returns true if the metrics have just been reported, otherwise false.
+ bool MetricsReported() { return metrics_reported_; }
+
+ private:
+ // Resets the metrics.
+ void ResetMetrics();
+
+ int block_counter_ = 0;
+ DbMetric erl_time_domain_;
+ DbMetric erle_time_domain_;
+ bool saturated_capture_ = false;
+ bool metrics_reported_ = false;
+};
+
+namespace aec3 {
+
+// Updates a banded metric of type DbMetric with the values in the supplied
+// array.
+void UpdateDbMetric(const std::array<float, kFftLengthBy2Plus1>& value,
+ std::array<EchoRemoverMetrics::DbMetric, 2>* statistic);
+
+// Transforms a DbMetric from the linear domain into the logarithmic domain.
+int TransformDbMetricForReporting(bool negate,
+ float min_value,
+ float max_value,
+ float offset,
+ float scaling,
+ float value);
+
+} // namespace aec3
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc
new file mode 100644
index 0000000000..45b30a9c74
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_remover_metrics.h"
+
+#include <math.h>
+
+#include <cmath>
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-null input.
+TEST(UpdateDbMetricDeathTest, NullValue) {
+ std::array<float, kFftLengthBy2Plus1> value;
+ value.fill(0.f);
+ EXPECT_DEATH(aec3::UpdateDbMetric(value, nullptr), "");
+}
+
+#endif
+
+// Verifies the updating functionality of UpdateDbMetric.
+TEST(UpdateDbMetric, Updating) {
+ std::array<float, kFftLengthBy2Plus1> value;
+ std::array<EchoRemoverMetrics::DbMetric, 2> statistic;
+ statistic.fill(EchoRemoverMetrics::DbMetric(0.f, 100.f, -100.f));
+ constexpr float kValue0 = 10.f;
+ constexpr float kValue1 = 20.f;
+ std::fill(value.begin(), value.begin() + 32, kValue0);
+ std::fill(value.begin() + 32, value.begin() + 64, kValue1);
+
+ aec3::UpdateDbMetric(value, &statistic);
+ EXPECT_FLOAT_EQ(kValue0, statistic[0].sum_value);
+ EXPECT_FLOAT_EQ(kValue0, statistic[0].ceil_value);
+ EXPECT_FLOAT_EQ(kValue0, statistic[0].floor_value);
+ EXPECT_FLOAT_EQ(kValue1, statistic[1].sum_value);
+ EXPECT_FLOAT_EQ(kValue1, statistic[1].ceil_value);
+ EXPECT_FLOAT_EQ(kValue1, statistic[1].floor_value);
+
+ aec3::UpdateDbMetric(value, &statistic);
+ EXPECT_FLOAT_EQ(2.f * kValue0, statistic[0].sum_value);
+ EXPECT_FLOAT_EQ(kValue0, statistic[0].ceil_value);
+ EXPECT_FLOAT_EQ(kValue0, statistic[0].floor_value);
+ EXPECT_FLOAT_EQ(2.f * kValue1, statistic[1].sum_value);
+ EXPECT_FLOAT_EQ(kValue1, statistic[1].ceil_value);
+ EXPECT_FLOAT_EQ(kValue1, statistic[1].floor_value);
+}
+
+// Verifies that the TransformDbMetricForReporting method produces the desired
+// output for values for dBFS.
+TEST(TransformDbMetricForReporting, DbFsScaling) {
+ std::array<float, kBlockSize> x;
+ FftData X;
+ std::array<float, kFftLengthBy2Plus1> X2;
+ Aec3Fft fft;
+ x.fill(1000.f);
+ fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, &X);
+ X.Spectrum(Aec3Optimization::kNone, X2);
+
+ float offset = -10.f * std::log10(32768.f * 32768.f);
+ EXPECT_NEAR(offset, -90.3f, 0.1f);
+ EXPECT_EQ(
+ static_cast<int>(30.3f),
+ aec3::TransformDbMetricForReporting(
+ true, 0.f, 90.f, offset, 1.f / (kBlockSize * kBlockSize), X2[0]));
+}
+
+// Verifies that the TransformDbMetricForReporting method is able to properly
+// limit the output.
+TEST(TransformDbMetricForReporting, Limits) {
+ EXPECT_EQ(0, aec3::TransformDbMetricForReporting(false, 0.f, 10.f, 0.f, 1.f,
+ 0.001f));
+ EXPECT_EQ(10, aec3::TransformDbMetricForReporting(false, 0.f, 10.f, 0.f, 1.f,
+ 100.f));
+}
+
+// Verifies that the TransformDbMetricForReporting method is able to properly
+// negate output.
+TEST(TransformDbMetricForReporting, Negate) {
+ EXPECT_EQ(10, aec3::TransformDbMetricForReporting(true, -20.f, 20.f, 0.f, 1.f,
+ 0.1f));
+ EXPECT_EQ(-10, aec3::TransformDbMetricForReporting(true, -20.f, 20.f, 0.f,
+ 1.f, 10.f));
+}
+
+// Verify the Update functionality of DbMetric.
+TEST(DbMetric, Update) {
+ EchoRemoverMetrics::DbMetric metric(0.f, 20.f, -20.f);
+ constexpr int kNumValues = 100;
+ constexpr float kValue = 10.f;
+ for (int k = 0; k < kNumValues; ++k) {
+ metric.Update(kValue);
+ }
+ EXPECT_FLOAT_EQ(kValue * kNumValues, metric.sum_value);
+ EXPECT_FLOAT_EQ(kValue, metric.ceil_value);
+ EXPECT_FLOAT_EQ(kValue, metric.floor_value);
+}
+
+// Verify the Update functionality of DbMetric.
+TEST(DbMetric, UpdateInstant) {
+ EchoRemoverMetrics::DbMetric metric(0.f, 20.f, -20.f);
+ constexpr float kMinValue = -77.f;
+ constexpr float kMaxValue = 33.f;
+ constexpr float kLastValue = (kMinValue + kMaxValue) / 2.0f;
+ for (float value = kMinValue; value <= kMaxValue; value++)
+ metric.UpdateInstant(value);
+ metric.UpdateInstant(kLastValue);
+ EXPECT_FLOAT_EQ(kLastValue, metric.sum_value);
+ EXPECT_FLOAT_EQ(kMaxValue, metric.ceil_value);
+ EXPECT_FLOAT_EQ(kMinValue, metric.floor_value);
+}
+
+// Verify the constructor functionality of DbMetric.
+TEST(DbMetric, Constructor) {
+ EchoRemoverMetrics::DbMetric metric;
+ EXPECT_FLOAT_EQ(0.f, metric.sum_value);
+ EXPECT_FLOAT_EQ(0.f, metric.ceil_value);
+ EXPECT_FLOAT_EQ(0.f, metric.floor_value);
+
+ metric = EchoRemoverMetrics::DbMetric(1.f, 2.f, 3.f);
+ EXPECT_FLOAT_EQ(1.f, metric.sum_value);
+ EXPECT_FLOAT_EQ(2.f, metric.floor_value);
+ EXPECT_FLOAT_EQ(3.f, metric.ceil_value);
+}
+
+// Verify the general functionality of EchoRemoverMetrics.
+TEST(EchoRemoverMetrics, NormalUsage) {
+ EchoRemoverMetrics metrics;
+ AecState aec_state(EchoCanceller3Config{}, 1);
+ std::array<float, kFftLengthBy2Plus1> comfort_noise_spectrum;
+ std::array<float, kFftLengthBy2Plus1> suppressor_gain;
+ comfort_noise_spectrum.fill(10.f);
+ suppressor_gain.fill(1.f);
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
+ metrics.Update(aec_state, comfort_noise_spectrum, suppressor_gain);
+ EXPECT_FALSE(metrics.MetricsReported());
+ }
+ metrics.Update(aec_state, comfort_noise_spectrum, suppressor_gain);
+ EXPECT_TRUE(metrics.MetricsReported());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_unittest.cc
new file mode 100644
index 0000000000..66168ab08d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/echo_remover_unittest.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_remover.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <string>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+std::string ProduceDebugText(int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(int sample_rate_hz, int delay) {
+ rtc::StringBuilder ss(ProduceDebugText(sample_rate_hz));
+ ss << ", Delay: " << delay;
+ return ss.Release();
+}
+
+} // namespace
+
+class EchoRemoverMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ EchoRemoverMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 8),
+ ::testing::Values(1, 2, 8)));
+
+// Verifies the basic API call sequence
+TEST_P(EchoRemoverMultiChannel, BasicApiCalls) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ absl::optional<DelayEstimate> delay_estimate;
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<EchoRemover> remover(
+ EchoRemover::Create(EchoCanceller3Config(), rate, num_render_channels,
+ num_capture_channels));
+ std::unique_ptr<RenderDelayBuffer> render_buffer(RenderDelayBuffer::Create(
+ EchoCanceller3Config(), rate, num_render_channels));
+
+ Block render(NumBandsForRate(rate), num_render_channels);
+ Block capture(NumBandsForRate(rate), num_capture_channels);
+ for (size_t k = 0; k < 100; ++k) {
+ EchoPathVariability echo_path_variability(
+ k % 3 == 0 ? true : false,
+ k % 5 == 0 ? EchoPathVariability::DelayAdjustment::kNewDetectedDelay
+ : EchoPathVariability::DelayAdjustment::kNone,
+ false);
+ render_buffer->Insert(render);
+ render_buffer->PrepareCaptureProcessing();
+
+ remover->ProcessCapture(echo_path_variability, k % 2 == 0 ? true : false,
+ delay_estimate, render_buffer->GetRenderBuffer(),
+ nullptr, &capture);
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for the samplerate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoRemoverDeathTest, DISABLED_WrongSampleRate) {
+ EXPECT_DEATH(std::unique_ptr<EchoRemover>(
+ EchoRemover::Create(EchoCanceller3Config(), 8001, 1, 1)),
+ "");
+}
+
+// Verifies the check for the number of capture bands.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.c
+TEST(EchoRemoverDeathTest, DISABLED_WrongCaptureNumBands) {
+ absl::optional<DelayEstimate> delay_estimate;
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<EchoRemover> remover(
+ EchoRemover::Create(EchoCanceller3Config(), rate, 1, 1));
+ std::unique_ptr<RenderDelayBuffer> render_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), rate, 1));
+ Block capture(NumBandsForRate(rate == 48000 ? 16000 : rate + 16000), 1);
+ EchoPathVariability echo_path_variability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false);
+ EXPECT_DEATH(remover->ProcessCapture(
+ echo_path_variability, false, delay_estimate,
+ render_buffer->GetRenderBuffer(), nullptr, &capture),
+ "");
+ }
+}
+
+// Verifies the check for non-null capture block.
+TEST(EchoRemoverDeathTest, NullCapture) {
+ absl::optional<DelayEstimate> delay_estimate;
+ std::unique_ptr<EchoRemover> remover(
+ EchoRemover::Create(EchoCanceller3Config(), 16000, 1, 1));
+ std::unique_ptr<RenderDelayBuffer> render_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), 16000, 1));
+ EchoPathVariability echo_path_variability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false);
+ EXPECT_DEATH(remover->ProcessCapture(
+ echo_path_variability, false, delay_estimate,
+ render_buffer->GetRenderBuffer(), nullptr, nullptr),
+ "");
+}
+
+#endif
+
+// Performs a sanity check that the echo_remover is able to properly
+// remove echoes.
+TEST(EchoRemover, BasicEchoRemoval) {
+ constexpr int kNumBlocksToProcess = 500;
+ Random random_generator(42U);
+ absl::optional<DelayEstimate> delay_estimate;
+ for (size_t num_channels : {1, 2, 4}) {
+ for (auto rate : {16000, 32000, 48000}) {
+ Block x(NumBandsForRate(rate), num_channels);
+ Block y(NumBandsForRate(rate), num_channels);
+ EchoPathVariability echo_path_variability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false);
+ for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+ SCOPED_TRACE(ProduceDebugText(rate, delay_samples));
+ EchoCanceller3Config config;
+ std::unique_ptr<EchoRemover> remover(
+ EchoRemover::Create(config, rate, num_channels, num_channels));
+ std::unique_ptr<RenderDelayBuffer> render_buffer(
+ RenderDelayBuffer::Create(config, rate, num_channels));
+ render_buffer->AlignFromDelay(delay_samples / kBlockSize);
+
+ std::vector<std::vector<std::unique_ptr<DelayBuffer<float>>>>
+ delay_buffers(x.NumBands());
+ for (size_t band = 0; band < delay_buffers.size(); ++band) {
+ delay_buffers[band].resize(x.NumChannels());
+ }
+
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ delay_buffers[band][channel].reset(
+ new DelayBuffer<float>(delay_samples));
+ }
+ }
+
+ float input_energy = 0.f;
+ float output_energy = 0.f;
+ for (int k = 0; k < kNumBlocksToProcess; ++k) {
+ const bool silence = k < 100 || (k % 100 >= 10);
+
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ if (silence) {
+ std::fill(x.begin(band, channel), x.end(band, channel), 0.f);
+ } else {
+ RandomizeSampleVector(&random_generator, x.View(band, channel));
+ }
+ delay_buffers[band][channel]->Delay(x.View(band, channel),
+ y.View(band, channel));
+ }
+ }
+
+ if (k > kNumBlocksToProcess / 2) {
+ input_energy = std::inner_product(
+ y.begin(/*band=*/0, /*channel=*/0),
+ y.end(/*band=*/0, /*channel=*/0),
+ y.begin(/*band=*/0, /*channel=*/0), input_energy);
+ }
+
+ render_buffer->Insert(x);
+ render_buffer->PrepareCaptureProcessing();
+
+ remover->ProcessCapture(echo_path_variability, false, delay_estimate,
+ render_buffer->GetRenderBuffer(), nullptr,
+ &y);
+
+ if (k > kNumBlocksToProcess / 2) {
+ output_energy = std::inner_product(
+ y.begin(/*band=*/0, /*channel=*/0),
+ y.end(/*band=*/0, /*channel=*/0),
+ y.begin(/*band=*/0, /*channel=*/0), output_energy);
+ }
+ }
+ EXPECT_GT(input_energy, 10.f * output_energy);
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.cc
new file mode 100644
index 0000000000..01cc33cb80
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erl_estimator.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kMinErl = 0.01f;
+constexpr float kMaxErl = 1000.f;
+
+} // namespace
+
+ErlEstimator::ErlEstimator(size_t startup_phase_length_blocks_)
+ : startup_phase_length_blocks__(startup_phase_length_blocks_) {
+ erl_.fill(kMaxErl);
+ hold_counters_.fill(0);
+ erl_time_domain_ = kMaxErl;
+ hold_counter_time_domain_ = 0;
+}
+
+ErlEstimator::~ErlEstimator() = default;
+
+void ErlEstimator::Reset() {
+ blocks_since_reset_ = 0;
+}
+
+void ErlEstimator::Update(
+ const std::vector<bool>& converged_filters,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> render_spectra,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ capture_spectra) {
+ const size_t num_capture_channels = converged_filters.size();
+ RTC_DCHECK_EQ(capture_spectra.size(), num_capture_channels);
+
+ // Corresponds to WGN of power -46 dBFS.
+ constexpr float kX2Min = 44015068.0f;
+
+ const auto first_converged_iter =
+ std::find(converged_filters.begin(), converged_filters.end(), true);
+ const bool any_filter_converged =
+ first_converged_iter != converged_filters.end();
+
+ if (++blocks_since_reset_ < startup_phase_length_blocks__ ||
+ !any_filter_converged) {
+ return;
+ }
+
+ // Use the maximum spectrum across capture and the maximum across render.
+ std::array<float, kFftLengthBy2Plus1> max_capture_spectrum_data;
+ std::array<float, kFftLengthBy2Plus1> max_capture_spectrum =
+ capture_spectra[/*channel=*/0];
+ if (num_capture_channels > 1) {
+ // Initialize using the first channel with a converged filter.
+ const size_t first_converged =
+ std::distance(converged_filters.begin(), first_converged_iter);
+ RTC_DCHECK_GE(first_converged, 0);
+ RTC_DCHECK_LT(first_converged, num_capture_channels);
+ max_capture_spectrum_data = capture_spectra[first_converged];
+
+ for (size_t ch = first_converged + 1; ch < num_capture_channels; ++ch) {
+ if (!converged_filters[ch]) {
+ continue;
+ }
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ max_capture_spectrum_data[k] =
+ std::max(max_capture_spectrum_data[k], capture_spectra[ch][k]);
+ }
+ }
+ max_capture_spectrum = max_capture_spectrum_data;
+ }
+
+ const size_t num_render_channels = render_spectra.size();
+ std::array<float, kFftLengthBy2Plus1> max_render_spectrum_data;
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> max_render_spectrum =
+ render_spectra[/*channel=*/0];
+ if (num_render_channels > 1) {
+ std::copy(render_spectra[0].begin(), render_spectra[0].end(),
+ max_render_spectrum_data.begin());
+ for (size_t ch = 1; ch < num_render_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ max_render_spectrum_data[k] =
+ std::max(max_render_spectrum_data[k], render_spectra[ch][k]);
+ }
+ }
+ max_render_spectrum = max_render_spectrum_data;
+ }
+
+ const auto& X2 = max_render_spectrum;
+ const auto& Y2 = max_capture_spectrum;
+
+ // Update the estimates in a maximum statistics manner.
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ if (X2[k] > kX2Min) {
+ const float new_erl = Y2[k] / X2[k];
+ if (new_erl < erl_[k]) {
+ hold_counters_[k - 1] = 1000;
+ erl_[k] += 0.1f * (new_erl - erl_[k]);
+ erl_[k] = std::max(erl_[k], kMinErl);
+ }
+ }
+ }
+
+ std::for_each(hold_counters_.begin(), hold_counters_.end(),
+ [](int& a) { --a; });
+ std::transform(hold_counters_.begin(), hold_counters_.end(), erl_.begin() + 1,
+ erl_.begin() + 1, [](int a, float b) {
+ return a > 0 ? b : std::min(kMaxErl, 2.f * b);
+ });
+
+ erl_[0] = erl_[1];
+ erl_[kFftLengthBy2] = erl_[kFftLengthBy2 - 1];
+
+ // Compute ERL over all frequency bins.
+ const float X2_sum = std::accumulate(X2.begin(), X2.end(), 0.0f);
+
+ if (X2_sum > kX2Min * X2.size()) {
+ const float Y2_sum = std::accumulate(Y2.begin(), Y2.end(), 0.0f);
+ const float new_erl = Y2_sum / X2_sum;
+ if (new_erl < erl_time_domain_) {
+ hold_counter_time_domain_ = 1000;
+ erl_time_domain_ += 0.1f * (new_erl - erl_time_domain_);
+ erl_time_domain_ = std::max(erl_time_domain_, kMinErl);
+ }
+ }
+
+ --hold_counter_time_domain_;
+ erl_time_domain_ = (hold_counter_time_domain_ > 0)
+ ? erl_time_domain_
+ : std::min(kMaxErl, 2.f * erl_time_domain_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.h
new file mode 100644
index 0000000000..639a52c561
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// Estimates the echo return loss based on the signal spectra.
+class ErlEstimator {
+ public:
+ explicit ErlEstimator(size_t startup_phase_length_blocks_);
+ ~ErlEstimator();
+
+ ErlEstimator(const ErlEstimator&) = delete;
+ ErlEstimator& operator=(const ErlEstimator&) = delete;
+
+ // Resets the ERL estimation.
+ void Reset();
+
+ // Updates the ERL estimate.
+ void Update(const std::vector<bool>& converged_filters,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ render_spectra,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ capture_spectra);
+
+ // Returns the most recent ERL estimate.
+ const std::array<float, kFftLengthBy2Plus1>& Erl() const { return erl_; }
+ float ErlTimeDomain() const { return erl_time_domain_; }
+
+ private:
+ const size_t startup_phase_length_blocks__;
+ std::array<float, kFftLengthBy2Plus1> erl_;
+ std::array<int, kFftLengthBy2Minus1> hold_counters_;
+ float erl_time_domain_;
+ int hold_counter_time_domain_;
+ size_t blocks_since_reset_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator_unittest.cc
new file mode 100644
index 0000000000..79e5465e3c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/erl_estimator_unittest.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erl_estimator.h"
+
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+std::string ProduceDebugText(size_t num_render_channels,
+ size_t num_capture_channels) {
+ rtc::StringBuilder ss;
+ ss << "Render channels: " << num_render_channels;
+ ss << ", Capture channels: " << num_capture_channels;
+ return ss.Release();
+}
+
+void VerifyErl(const std::array<float, kFftLengthBy2Plus1>& erl,
+ float erl_time_domain,
+ float reference) {
+ std::for_each(erl.begin(), erl.end(),
+ [reference](float a) { EXPECT_NEAR(reference, a, 0.001); });
+ EXPECT_NEAR(reference, erl_time_domain, 0.001);
+}
+
+} // namespace
+
+class ErlEstimatorMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ ErlEstimatorMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 8),
+ ::testing::Values(1, 2, 8)));
+
+// Verifies that the correct ERL estimates are achieved.
+TEST_P(ErlEstimatorMultiChannel, Estimates) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ SCOPED_TRACE(ProduceDebugText(num_render_channels, num_capture_channels));
+ std::vector<std::array<float, kFftLengthBy2Plus1>> X2(num_render_channels);
+ for (auto& X2_ch : X2) {
+ X2_ch.fill(0.f);
+ }
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(num_capture_channels);
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(0.f);
+ }
+ std::vector<bool> converged_filters(num_capture_channels, false);
+ const size_t converged_idx = num_capture_channels - 1;
+ converged_filters[converged_idx] = true;
+
+ ErlEstimator estimator(0);
+
+ // Verifies that the ERL estimate is properly reduced to lower values.
+ for (auto& X2_ch : X2) {
+ X2_ch.fill(500 * 1000.f * 1000.f);
+ }
+ Y2[converged_idx].fill(10 * X2[0][0]);
+ for (size_t k = 0; k < 200; ++k) {
+ estimator.Update(converged_filters, X2, Y2);
+ }
+ VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 10.f);
+
+ // Verifies that the ERL is not immediately increased when the ERL in the
+ // data increases.
+ Y2[converged_idx].fill(10000 * X2[0][0]);
+ for (size_t k = 0; k < 998; ++k) {
+ estimator.Update(converged_filters, X2, Y2);
+ }
+ VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 10.f);
+
+ // Verifies that the rate of increase is 3 dB.
+ estimator.Update(converged_filters, X2, Y2);
+ VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 20.f);
+
+ // Verifies that the maximum ERL is achieved when there are no low RLE
+ // estimates.
+ for (size_t k = 0; k < 1000; ++k) {
+ estimator.Update(converged_filters, X2, Y2);
+ }
+ VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 1000.f);
+
+ // Verifies that the ERL estimate is is not updated for low-level signals
+ for (auto& X2_ch : X2) {
+ X2_ch.fill(1000.f * 1000.f);
+ }
+ Y2[converged_idx].fill(10 * X2[0][0]);
+ for (size_t k = 0; k < 200; ++k) {
+ estimator.Update(converged_filters, X2, Y2);
+ }
+ VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 1000.f);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.cc
new file mode 100644
index 0000000000..0e3d715c59
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erle_estimator.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+ErleEstimator::ErleEstimator(size_t startup_phase_length_blocks,
+ const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : startup_phase_length_blocks_(startup_phase_length_blocks),
+ fullband_erle_estimator_(config.erle, num_capture_channels),
+ subband_erle_estimator_(config, num_capture_channels) {
+ if (config.erle.num_sections > 1) {
+ signal_dependent_erle_estimator_ =
+ std::make_unique<SignalDependentErleEstimator>(config,
+ num_capture_channels);
+ }
+ Reset(true);
+}
+
+ErleEstimator::~ErleEstimator() = default;
+
+void ErleEstimator::Reset(bool delay_change) {
+ fullband_erle_estimator_.Reset();
+ subband_erle_estimator_.Reset();
+ if (signal_dependent_erle_estimator_) {
+ signal_dependent_erle_estimator_->Reset();
+ }
+ if (delay_change) {
+ blocks_since_reset_ = 0;
+ }
+}
+
+void ErleEstimator::Update(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses,
+ rtc::ArrayView<const float, kFftLengthBy2Plus1>
+ avg_render_spectrum_with_reverb,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> capture_spectra,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ subtractor_spectra,
+ const std::vector<bool>& converged_filters) {
+ RTC_DCHECK_EQ(subband_erle_estimator_.Erle(/*onset_compensated=*/true).size(),
+ capture_spectra.size());
+ RTC_DCHECK_EQ(subband_erle_estimator_.Erle(/*onset_compensated=*/true).size(),
+ subtractor_spectra.size());
+ const auto& X2_reverb = avg_render_spectrum_with_reverb;
+ const auto& Y2 = capture_spectra;
+ const auto& E2 = subtractor_spectra;
+
+ if (++blocks_since_reset_ < startup_phase_length_blocks_) {
+ return;
+ }
+
+ subband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filters);
+
+ if (signal_dependent_erle_estimator_) {
+ signal_dependent_erle_estimator_->Update(
+ render_buffer, filter_frequency_responses, X2_reverb, Y2, E2,
+ subband_erle_estimator_.Erle(/*onset_compensated=*/false),
+ subband_erle_estimator_.Erle(/*onset_compensated=*/true),
+ converged_filters);
+ }
+
+ fullband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filters);
+}
+
+void ErleEstimator::Dump(
+ const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+ fullband_erle_estimator_.Dump(data_dumper);
+ subband_erle_estimator_.Dump(data_dumper);
+ if (signal_dependent_erle_estimator_) {
+ signal_dependent_erle_estimator_->Dump(data_dumper);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.h
new file mode 100644
index 0000000000..55797592a9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/fullband_erle_estimator.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+#include "modules/audio_processing/aec3/subband_erle_estimator.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// Estimates the echo return loss enhancement. One estimate is done per subband
+// and another one is done using the aggreation of energy over all the subbands.
+class ErleEstimator {
+ public:
+ ErleEstimator(size_t startup_phase_length_blocks,
+ const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+ ~ErleEstimator();
+
+ // Resets the fullband ERLE estimator and the subbands ERLE estimators.
+ void Reset(bool delay_change);
+
+ // Updates the ERLE estimates.
+ void Update(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses,
+ rtc::ArrayView<const float, kFftLengthBy2Plus1>
+ avg_render_spectrum_with_reverb,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ capture_spectra,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ subtractor_spectra,
+ const std::vector<bool>& converged_filters);
+
+ // Returns the most recent subband ERLE estimates.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Erle(
+ bool onset_compensated) const {
+ return signal_dependent_erle_estimator_
+ ? signal_dependent_erle_estimator_->Erle(onset_compensated)
+ : subband_erle_estimator_.Erle(onset_compensated);
+ }
+
+ // Returns the non-capped subband ERLE.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> ErleUnbounded()
+ const {
+ // Unbounded ERLE is only used with the subband erle estimator where the
+ // ERLE is often capped at low values. When the signal dependent ERLE
+ // estimator is used the capped ERLE is returned.
+ return !signal_dependent_erle_estimator_
+ ? subband_erle_estimator_.ErleUnbounded()
+ : signal_dependent_erle_estimator_->Erle(
+ /*onset_compensated=*/false);
+ }
+
+ // Returns the subband ERLE that are estimated during onsets (only used for
+ // testing).
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> ErleDuringOnsets()
+ const {
+ return subband_erle_estimator_.ErleDuringOnsets();
+ }
+
+ // Returns the fullband ERLE estimate.
+ float FullbandErleLog2() const {
+ return fullband_erle_estimator_.FullbandErleLog2();
+ }
+
+ // Returns an estimation of the current linear filter quality based on the
+ // current and past fullband ERLE estimates. The returned value is a float
+ // vector with content between 0 and 1 where 1 indicates that, at this current
+ // time instant, the linear filter is reaching its maximum subtraction
+ // performance.
+ rtc::ArrayView<const absl::optional<float>> GetInstLinearQualityEstimates()
+ const {
+ return fullband_erle_estimator_.GetInstLinearQualityEstimates();
+ }
+
+ void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+ private:
+ const size_t startup_phase_length_blocks_;
+ FullBandErleEstimator fullband_erle_estimator_;
+ SubbandErleEstimator subband_erle_estimator_;
+ std::unique_ptr<SignalDependentErleEstimator>
+ signal_dependent_erle_estimator_;
+ size_t blocks_since_reset_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator_unittest.cc
new file mode 100644
index 0000000000..42be7d9c7d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/erle_estimator_unittest.cc
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erle_estimator.h"
+
+#include <cmath>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kLowFrequencyLimit = kFftLengthBy2 / 2;
+constexpr float kTrueErle = 10.f;
+constexpr float kTrueErleOnsets = 1.0f;
+constexpr float kEchoPathGain = 3.f;
+
+void VerifyErleBands(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> erle,
+ float reference_lf,
+ float reference_hf) {
+ for (size_t ch = 0; ch < erle.size(); ++ch) {
+ std::for_each(
+ erle[ch].begin(), erle[ch].begin() + kLowFrequencyLimit,
+ [reference_lf](float a) { EXPECT_NEAR(reference_lf, a, 0.001); });
+ std::for_each(
+ erle[ch].begin() + kLowFrequencyLimit, erle[ch].end(),
+ [reference_hf](float a) { EXPECT_NEAR(reference_hf, a, 0.001); });
+ }
+}
+
+void VerifyErle(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> erle,
+ float erle_time_domain,
+ float reference_lf,
+ float reference_hf) {
+ VerifyErleBands(erle, reference_lf, reference_hf);
+ EXPECT_NEAR(kTrueErle, erle_time_domain, 0.5);
+}
+
+void VerifyErleGreaterOrEqual(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> erle1,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> erle2) {
+ for (size_t ch = 0; ch < erle1.size(); ++ch) {
+ for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) {
+ EXPECT_GE(erle1[ch][i], erle2[ch][i]);
+ }
+ }
+}
+
+void FormFarendTimeFrame(Block* x) {
+ const std::array<float, kBlockSize> frame = {
+ 7459.88, 17209.6, 17383, 20768.9, 16816.7, 18386.3, 4492.83, 9675.85,
+ 6665.52, 14808.6, 9342.3, 7483.28, 19261.7, 4145.98, 1622.18, 13475.2,
+ 7166.32, 6856.61, 21937, 7263.14, 9569.07, 14919, 8413.32, 7551.89,
+ 7848.65, 6011.27, 13080.6, 15865.2, 12656, 17459.6, 4263.93, 4503.03,
+ 9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6,
+ 11405, 15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8,
+ 1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4,
+ 12416.2, 16434, 2454.69, 9840.8, 6867.23, 1615.75, 6059.9, 8394.19};
+ for (int band = 0; band < x->NumBands(); ++band) {
+ for (int channel = 0; channel < x->NumChannels(); ++channel) {
+ RTC_DCHECK_GE(kBlockSize, frame.size());
+ std::copy(frame.begin(), frame.end(), x->begin(band, channel));
+ }
+ }
+}
+
+void FormFarendFrame(const RenderBuffer& render_buffer,
+ float erle,
+ std::array<float, kFftLengthBy2Plus1>* X2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> E2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> Y2) {
+ const auto& spectrum_buffer = render_buffer.GetSpectrumBuffer();
+ const int num_render_channels = spectrum_buffer.buffer[0].size();
+ const int num_capture_channels = Y2.size();
+
+ X2->fill(0.f);
+ for (int ch = 0; ch < num_render_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ (*X2)[k] += spectrum_buffer.buffer[spectrum_buffer.write][ch][k] /
+ num_render_channels;
+ }
+ }
+
+ for (int ch = 0; ch < num_capture_channels; ++ch) {
+ std::transform(X2->begin(), X2->end(), Y2[ch].begin(),
+ [](float a) { return a * kEchoPathGain * kEchoPathGain; });
+ std::transform(Y2[ch].begin(), Y2[ch].end(), E2[ch].begin(),
+ [erle](float a) { return a / erle; });
+ }
+}
+
+void FormNearendFrame(
+ Block* x,
+ std::array<float, kFftLengthBy2Plus1>* X2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> E2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> Y2) {
+ for (int band = 0; band < x->NumBands(); ++band) {
+ for (int ch = 0; ch < x->NumChannels(); ++ch) {
+ std::fill(x->begin(band, ch), x->end(band, ch), 0.f);
+ }
+ }
+
+ X2->fill(0.f);
+ for (size_t ch = 0; ch < Y2.size(); ++ch) {
+ Y2[ch].fill(500.f * 1000.f * 1000.f);
+ E2[ch].fill(Y2[ch][0]);
+ }
+}
+
+void GetFilterFreq(
+ size_t delay_headroom_samples,
+ rtc::ArrayView<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_response) {
+ const size_t delay_headroom_blocks = delay_headroom_samples / kBlockSize;
+ for (size_t ch = 0; ch < filter_frequency_response[0].size(); ++ch) {
+ for (auto& block_freq_resp : filter_frequency_response) {
+ block_freq_resp[ch].fill(0.f);
+ }
+
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ filter_frequency_response[delay_headroom_blocks][ch][k] = kEchoPathGain;
+ }
+ }
+}
+
+} // namespace
+
+class ErleEstimatorMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ ErleEstimatorMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 4, 8),
+ ::testing::Values(1, 2, 8)));
+
+TEST_P(ErleEstimatorMultiChannel, VerifyErleIncreaseAndHold) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ std::array<float, kFftLengthBy2Plus1> X2;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2(num_capture_channels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(num_capture_channels);
+ std::vector<bool> converged_filters(num_capture_channels, true);
+
+ EchoCanceller3Config config;
+ config.erle.onset_detection = true;
+
+ Block x(kNumBands, num_render_channels);
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_response(
+ config.filter.refined.length_blocks,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ num_capture_channels));
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+
+ GetFilterFreq(config.delay.delay_headroom_samples, filter_frequency_response);
+
+ ErleEstimator estimator(0, config, num_capture_channels);
+
+ FormFarendTimeFrame(&x);
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+ // Verifies that the ERLE estimate is properly increased to higher values.
+ FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), kTrueErle, &X2, E2,
+ Y2);
+ for (size_t k = 0; k < 1000; ++k) {
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+ estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+ filter_frequency_response, X2, Y2, E2, converged_filters);
+ }
+ VerifyErle(estimator.Erle(/*onset_compensated=*/true),
+ std::pow(2.f, estimator.FullbandErleLog2()), config.erle.max_l,
+ config.erle.max_h);
+ VerifyErleGreaterOrEqual(estimator.Erle(/*onset_compensated=*/false),
+ estimator.Erle(/*onset_compensated=*/true));
+ VerifyErleGreaterOrEqual(estimator.ErleUnbounded(),
+ estimator.Erle(/*onset_compensated=*/false));
+
+ FormNearendFrame(&x, &X2, E2, Y2);
+ // Verifies that the ERLE is not immediately decreased during nearend
+ // activity.
+ for (size_t k = 0; k < 50; ++k) {
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+ estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+ filter_frequency_response, X2, Y2, E2, converged_filters);
+ }
+ VerifyErle(estimator.Erle(/*onset_compensated=*/true),
+ std::pow(2.f, estimator.FullbandErleLog2()), config.erle.max_l,
+ config.erle.max_h);
+ VerifyErleGreaterOrEqual(estimator.Erle(/*onset_compensated=*/false),
+ estimator.Erle(/*onset_compensated=*/true));
+ VerifyErleGreaterOrEqual(estimator.ErleUnbounded(),
+ estimator.Erle(/*onset_compensated=*/false));
+}
+
+TEST_P(ErleEstimatorMultiChannel, VerifyErleTrackingOnOnsets) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ std::array<float, kFftLengthBy2Plus1> X2;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2(num_capture_channels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(num_capture_channels);
+ std::vector<bool> converged_filters(num_capture_channels, true);
+ EchoCanceller3Config config;
+ config.erle.onset_detection = true;
+ Block x(kNumBands, num_render_channels);
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_response(
+ config.filter.refined.length_blocks,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ num_capture_channels));
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+
+ GetFilterFreq(config.delay.delay_headroom_samples, filter_frequency_response);
+
+ ErleEstimator estimator(/*startup_phase_length_blocks=*/0, config,
+ num_capture_channels);
+
+ FormFarendTimeFrame(&x);
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ for (size_t burst = 0; burst < 20; ++burst) {
+ FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), kTrueErleOnsets,
+ &X2, E2, Y2);
+ for (size_t k = 0; k < 10; ++k) {
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+ estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+ filter_frequency_response, X2, Y2, E2,
+ converged_filters);
+ }
+ FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), kTrueErle, &X2, E2,
+ Y2);
+ for (size_t k = 0; k < 1000; ++k) {
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+ estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+ filter_frequency_response, X2, Y2, E2,
+ converged_filters);
+ }
+ FormNearendFrame(&x, &X2, E2, Y2);
+ for (size_t k = 0; k < 300; ++k) {
+ render_delay_buffer->Insert(x);
+ render_delay_buffer->PrepareCaptureProcessing();
+ estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+ filter_frequency_response, X2, Y2, E2,
+ converged_filters);
+ }
+ }
+ VerifyErleBands(estimator.ErleDuringOnsets(), config.erle.min,
+ config.erle.min);
+ FormNearendFrame(&x, &X2, E2, Y2);
+ for (size_t k = 0; k < 1000; k++) {
+ estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+ filter_frequency_response, X2, Y2, E2, converged_filters);
+ }
+ // Verifies that during ne activity, Erle converges to the Erle for
+ // onsets.
+ VerifyErle(estimator.Erle(/*onset_compensated=*/true),
+ std::pow(2.f, estimator.FullbandErleLog2()), config.erle.min,
+ config.erle.min);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.cc
new file mode 100644
index 0000000000..1ce2d31d8f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/fft_buffer.h"
+
+namespace webrtc {
+
+FftBuffer::FftBuffer(size_t size, size_t num_channels)
+ : size(static_cast<int>(size)),
+ buffer(size, std::vector<FftData>(num_channels)) {
+ for (auto& block : buffer) {
+ for (auto& channel_fft_data : block) {
+ channel_fft_data.Clear();
+ }
+ }
+}
+
+FftBuffer::~FftBuffer() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.h
new file mode 100644
index 0000000000..4187315863
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fft_buffer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for bundling a circular buffer of FftData objects together with the
+// read and write indices.
+struct FftBuffer {
+ FftBuffer(size_t size, size_t num_channels);
+ ~FftBuffer();
+
+ int IncIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index < size - 1 ? index + 1 : 0;
+ }
+
+ int DecIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index > 0 ? index - 1 : size - 1;
+ }
+
+ int OffsetIndex(int index, int offset) const {
+ RTC_DCHECK_GE(buffer.size(), offset);
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return (size + index + offset) % size;
+ }
+
+ void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+ void IncWriteIndex() { write = IncIndex(write); }
+ void DecWriteIndex() { write = DecIndex(write); }
+ void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+ void IncReadIndex() { read = IncIndex(read); }
+ void DecReadIndex() { read = DecIndex(read); }
+
+ const int size;
+ std::vector<std::vector<FftData>> buffer;
+ int write = 0;
+ int read = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fft_data.h b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data.h
new file mode 100644
index 0000000000..9c25e784aa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// Struct that holds imaginary data produced from 128 point real-valued FFTs.
+struct FftData {
+ // Copies the data in src.
+ void Assign(const FftData& src) {
+ std::copy(src.re.begin(), src.re.end(), re.begin());
+ std::copy(src.im.begin(), src.im.end(), im.begin());
+ im[0] = im[kFftLengthBy2] = 0;
+ }
+
+ // Clears all the imaginary.
+ void Clear() {
+ re.fill(0.f);
+ im.fill(0.f);
+ }
+
+ // Computes the power spectrum of the data.
+ void SpectrumAVX2(rtc::ArrayView<float> power_spectrum) const;
+
+ // Computes the power spectrum of the data.
+ void Spectrum(Aec3Optimization optimization,
+ rtc::ArrayView<float> power_spectrum) const {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, power_spectrum.size());
+ switch (optimization) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2: {
+ constexpr int kNumFourBinBands = kFftLengthBy2 / 4;
+ constexpr int kLimit = kNumFourBinBands * 4;
+ for (size_t k = 0; k < kLimit; k += 4) {
+ const __m128 r = _mm_loadu_ps(&re[k]);
+ const __m128 i = _mm_loadu_ps(&im[k]);
+ const __m128 ii = _mm_mul_ps(i, i);
+ const __m128 rr = _mm_mul_ps(r, r);
+ const __m128 rrii = _mm_add_ps(rr, ii);
+ _mm_storeu_ps(&power_spectrum[k], rrii);
+ }
+ power_spectrum[kFftLengthBy2] = re[kFftLengthBy2] * re[kFftLengthBy2] +
+ im[kFftLengthBy2] * im[kFftLengthBy2];
+ } break;
+ case Aec3Optimization::kAvx2:
+ SpectrumAVX2(power_spectrum);
+ break;
+#endif
+ default:
+ std::transform(re.begin(), re.end(), im.begin(), power_spectrum.begin(),
+ [](float a, float b) { return a * a + b * b; });
+ }
+ }
+
+ // Copy the data from an interleaved array.
+ void CopyFromPackedArray(const std::array<float, kFftLength>& v) {
+ re[0] = v[0];
+ re[kFftLengthBy2] = v[1];
+ im[0] = im[kFftLengthBy2] = 0;
+ for (size_t k = 1, j = 2; k < kFftLengthBy2; ++k) {
+ re[k] = v[j++];
+ im[k] = v[j++];
+ }
+ }
+
+ // Copies the data into an interleaved array.
+ void CopyToPackedArray(std::array<float, kFftLength>* v) const {
+ RTC_DCHECK(v);
+ (*v)[0] = re[0];
+ (*v)[1] = re[kFftLengthBy2];
+ for (size_t k = 1, j = 2; k < kFftLengthBy2; ++k) {
+ (*v)[j++] = re[k];
+ (*v)[j++] = im[k];
+ }
+ }
+
+ std::array<float, kFftLengthBy2Plus1> re;
+ std::array<float, kFftLengthBy2Plus1> im;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_avx2.cc b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_avx2.cc
new file mode 100644
index 0000000000..1fe4bd69c6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_avx2.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/fft_data.h"
+
+#include <immintrin.h>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Computes the power spectrum of the data.
+void FftData::SpectrumAVX2(rtc::ArrayView<float> power_spectrum) const {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, power_spectrum.size());
+ for (size_t k = 0; k < kFftLengthBy2; k += 8) {
+ __m256 r = _mm256_loadu_ps(&re[k]);
+ __m256 i = _mm256_loadu_ps(&im[k]);
+ __m256 ii = _mm256_mul_ps(i, i);
+ ii = _mm256_fmadd_ps(r, r, ii);
+ _mm256_storeu_ps(&power_spectrum[k], ii);
+ }
+ power_spectrum[kFftLengthBy2] = re[kFftLengthBy2] * re[kFftLengthBy2] +
+ im[kFftLengthBy2] * im[kFftLengthBy2];
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_gn/moz.build
new file mode 100644
index 0000000000..bf921e36ad
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("fft_data_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_unittest.cc
new file mode 100644
index 0000000000..d76fabdbd6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fft_data_unittest.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/fft_data.h"
+
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods are bitexact to their reference
+// counterparts.
+TEST(FftData, TestSse2Optimizations) {
+ if (GetCPUInfo(kSSE2) != 0) {
+ FftData x;
+
+ for (size_t k = 0; k < x.re.size(); ++k) {
+ x.re[k] = k + 1;
+ }
+
+ x.im[0] = x.im[x.im.size() - 1] = 0.f;
+ for (size_t k = 1; k < x.im.size() - 1; ++k) {
+ x.im[k] = 2.f * (k + 1);
+ }
+
+ std::array<float, kFftLengthBy2Plus1> spectrum;
+ std::array<float, kFftLengthBy2Plus1> spectrum_sse2;
+ x.Spectrum(Aec3Optimization::kNone, spectrum);
+ x.Spectrum(Aec3Optimization::kSse2, spectrum_sse2);
+ EXPECT_EQ(spectrum, spectrum_sse2);
+ }
+}
+
+// Verifies that the optimized methods are bitexact to their reference
+// counterparts.
+TEST(FftData, TestAvx2Optimizations) {
+ if (GetCPUInfo(kAVX2) != 0) {
+ FftData x;
+
+ for (size_t k = 0; k < x.re.size(); ++k) {
+ x.re[k] = k + 1;
+ }
+
+ x.im[0] = x.im[x.im.size() - 1] = 0.f;
+ for (size_t k = 1; k < x.im.size() - 1; ++k) {
+ x.im[k] = 2.f * (k + 1);
+ }
+
+ std::array<float, kFftLengthBy2Plus1> spectrum;
+ std::array<float, kFftLengthBy2Plus1> spectrum_avx2;
+ x.Spectrum(Aec3Optimization::kNone, spectrum);
+ x.Spectrum(Aec3Optimization::kAvx2, spectrum_avx2);
+ EXPECT_EQ(spectrum, spectrum_avx2);
+ }
+}
+#endif
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for null output in CopyToPackedArray.
+TEST(FftDataDeathTest, NonNullCopyToPackedArrayOutput) {
+ EXPECT_DEATH(FftData().CopyToPackedArray(nullptr), "");
+}
+
+// Verifies the check for null output in Spectrum.
+TEST(FftDataDeathTest, NonNullSpectrumOutput) {
+ EXPECT_DEATH(FftData().Spectrum(Aec3Optimization::kNone, nullptr), "");
+}
+
+#endif
+
+// Verifies that the Assign method properly copies the data from the source and
+// ensures that the imaginary components for the DC and Nyquist bins are 0.
+TEST(FftData, Assign) {
+ FftData x;
+ FftData y;
+
+ x.re.fill(1.f);
+ x.im.fill(2.f);
+ y.Assign(x);
+ EXPECT_EQ(x.re, y.re);
+ EXPECT_EQ(0.f, y.im[0]);
+ EXPECT_EQ(0.f, y.im[x.im.size() - 1]);
+ for (size_t k = 1; k < x.im.size() - 1; ++k) {
+ EXPECT_EQ(x.im[k], y.im[k]);
+ }
+}
+
+// Verifies that the Clear method properly clears all the data.
+TEST(FftData, Clear) {
+ FftData x_ref;
+ FftData x;
+
+ x_ref.re.fill(0.f);
+ x_ref.im.fill(0.f);
+
+ x.re.fill(1.f);
+ x.im.fill(2.f);
+ x.Clear();
+
+ EXPECT_EQ(x_ref.re, x.re);
+ EXPECT_EQ(x_ref.im, x.im);
+}
+
+// Verifies that the spectrum is correctly computed.
+TEST(FftData, Spectrum) {
+ FftData x;
+
+ for (size_t k = 0; k < x.re.size(); ++k) {
+ x.re[k] = k + 1;
+ }
+
+ x.im[0] = x.im[x.im.size() - 1] = 0.f;
+ for (size_t k = 1; k < x.im.size() - 1; ++k) {
+ x.im[k] = 2.f * (k + 1);
+ }
+
+ std::array<float, kFftLengthBy2Plus1> spectrum;
+ x.Spectrum(Aec3Optimization::kNone, spectrum);
+
+ EXPECT_EQ(x.re[0] * x.re[0], spectrum[0]);
+ EXPECT_EQ(x.re[spectrum.size() - 1] * x.re[spectrum.size() - 1],
+ spectrum[spectrum.size() - 1]);
+ for (size_t k = 1; k < spectrum.size() - 1; ++k) {
+ EXPECT_EQ(x.re[k] * x.re[k] + x.im[k] * x.im[k], spectrum[k]);
+ }
+}
+
+// Verifies that the functionality in CopyToPackedArray works as intended.
+TEST(FftData, CopyToPackedArray) {
+ FftData x;
+ std::array<float, kFftLength> x_packed;
+
+ for (size_t k = 0; k < x.re.size(); ++k) {
+ x.re[k] = k + 1;
+ }
+
+ x.im[0] = x.im[x.im.size() - 1] = 0.f;
+ for (size_t k = 1; k < x.im.size() - 1; ++k) {
+ x.im[k] = 2.f * (k + 1);
+ }
+
+ x.CopyToPackedArray(&x_packed);
+
+ EXPECT_EQ(x.re[0], x_packed[0]);
+ EXPECT_EQ(x.re[x.re.size() - 1], x_packed[1]);
+ for (size_t k = 1; k < x_packed.size() / 2; ++k) {
+ EXPECT_EQ(x.re[k], x_packed[2 * k]);
+ EXPECT_EQ(x.im[k], x_packed[2 * k + 1]);
+ }
+}
+
+// Verifies that the functionality in CopyFromPackedArray works as intended
+// (relies on that the functionality in CopyToPackedArray has been verified in
+// the test above).
+TEST(FftData, CopyFromPackedArray) {
+ FftData x_ref;
+ FftData x;
+ std::array<float, kFftLength> x_packed;
+
+ for (size_t k = 0; k < x_ref.re.size(); ++k) {
+ x_ref.re[k] = k + 1;
+ }
+
+ x_ref.im[0] = x_ref.im[x_ref.im.size() - 1] = 0.f;
+ for (size_t k = 1; k < x_ref.im.size() - 1; ++k) {
+ x_ref.im[k] = 2.f * (k + 1);
+ }
+
+ x_ref.CopyToPackedArray(&x_packed);
+ x.CopyFromPackedArray(x_packed);
+
+ EXPECT_EQ(x_ref.re, x.re);
+ EXPECT_EQ(x_ref.im, x.im);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.cc
new file mode 100644
index 0000000000..d8fd3aa275
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.cc
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/filter_analyzer.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <array>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+size_t FindPeakIndex(rtc::ArrayView<const float> filter_time_domain,
+ size_t peak_index_in,
+ size_t start_sample,
+ size_t end_sample) {
+ size_t peak_index_out = peak_index_in;
+ float max_h2 =
+ filter_time_domain[peak_index_out] * filter_time_domain[peak_index_out];
+ for (size_t k = start_sample; k <= end_sample; ++k) {
+ float tmp = filter_time_domain[k] * filter_time_domain[k];
+ if (tmp > max_h2) {
+ peak_index_out = k;
+ max_h2 = tmp;
+ }
+ }
+
+ return peak_index_out;
+}
+
+} // namespace
+
+std::atomic<int> FilterAnalyzer::instance_count_(0);
+
+FilterAnalyzer::FilterAnalyzer(const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ bounded_erl_(config.ep_strength.bounded_erl),
+ default_gain_(config.ep_strength.default_gain),
+ h_highpass_(num_capture_channels,
+ std::vector<float>(
+ GetTimeDomainLength(config.filter.refined.length_blocks),
+ 0.f)),
+ filter_analysis_states_(num_capture_channels,
+ FilterAnalysisState(config)),
+ filter_delays_blocks_(num_capture_channels, 0) {
+ Reset();
+}
+
+FilterAnalyzer::~FilterAnalyzer() = default;
+
+void FilterAnalyzer::Reset() {
+ blocks_since_reset_ = 0;
+ ResetRegion();
+ for (auto& state : filter_analysis_states_) {
+ state.Reset(default_gain_);
+ }
+ std::fill(filter_delays_blocks_.begin(), filter_delays_blocks_.end(), 0);
+}
+
+void FilterAnalyzer::Update(
+ rtc::ArrayView<const std::vector<float>> filters_time_domain,
+ const RenderBuffer& render_buffer,
+ bool* any_filter_consistent,
+ float* max_echo_path_gain) {
+ RTC_DCHECK(any_filter_consistent);
+ RTC_DCHECK(max_echo_path_gain);
+ RTC_DCHECK_EQ(filters_time_domain.size(), filter_analysis_states_.size());
+ RTC_DCHECK_EQ(filters_time_domain.size(), h_highpass_.size());
+
+ ++blocks_since_reset_;
+ SetRegionToAnalyze(filters_time_domain[0].size());
+ AnalyzeRegion(filters_time_domain, render_buffer);
+
+ // Aggregate the results for all capture channels.
+ auto& st_ch0 = filter_analysis_states_[0];
+ *any_filter_consistent = st_ch0.consistent_estimate;
+ *max_echo_path_gain = st_ch0.gain;
+ min_filter_delay_blocks_ = filter_delays_blocks_[0];
+ for (size_t ch = 1; ch < filters_time_domain.size(); ++ch) {
+ auto& st_ch = filter_analysis_states_[ch];
+ *any_filter_consistent =
+ *any_filter_consistent || st_ch.consistent_estimate;
+ *max_echo_path_gain = std::max(*max_echo_path_gain, st_ch.gain);
+ min_filter_delay_blocks_ =
+ std::min(min_filter_delay_blocks_, filter_delays_blocks_[ch]);
+ }
+}
+
+void FilterAnalyzer::AnalyzeRegion(
+ rtc::ArrayView<const std::vector<float>> filters_time_domain,
+ const RenderBuffer& render_buffer) {
+ // Preprocess the filter to avoid issues with low-frequency components in the
+ // filter.
+ PreProcessFilters(filters_time_domain);
+ data_dumper_->DumpRaw("aec3_linear_filter_processed_td", h_highpass_[0]);
+
+ constexpr float kOneByBlockSize = 1.f / kBlockSize;
+ for (size_t ch = 0; ch < filters_time_domain.size(); ++ch) {
+ RTC_DCHECK_LT(region_.start_sample_, filters_time_domain[ch].size());
+ RTC_DCHECK_LT(region_.end_sample_, filters_time_domain[ch].size());
+
+ auto& st_ch = filter_analysis_states_[ch];
+ RTC_DCHECK_EQ(h_highpass_[ch].size(), filters_time_domain[ch].size());
+ RTC_DCHECK_GT(h_highpass_[ch].size(), 0);
+ st_ch.peak_index = std::min(st_ch.peak_index, h_highpass_[ch].size() - 1);
+
+ st_ch.peak_index =
+ FindPeakIndex(h_highpass_[ch], st_ch.peak_index, region_.start_sample_,
+ region_.end_sample_);
+ filter_delays_blocks_[ch] = st_ch.peak_index >> kBlockSizeLog2;
+ UpdateFilterGain(h_highpass_[ch], &st_ch);
+ st_ch.filter_length_blocks =
+ filters_time_domain[ch].size() * kOneByBlockSize;
+
+ st_ch.consistent_estimate = st_ch.consistent_filter_detector.Detect(
+ h_highpass_[ch], region_,
+ render_buffer.GetBlock(-filter_delays_blocks_[ch]), st_ch.peak_index,
+ filter_delays_blocks_[ch]);
+ }
+}
+
+void FilterAnalyzer::UpdateFilterGain(
+ rtc::ArrayView<const float> filter_time_domain,
+ FilterAnalysisState* st) {
+ bool sufficient_time_to_converge =
+ blocks_since_reset_ > 5 * kNumBlocksPerSecond;
+
+ if (sufficient_time_to_converge && st->consistent_estimate) {
+ st->gain = fabsf(filter_time_domain[st->peak_index]);
+ } else {
+ // TODO(peah): Verify whether this check against a float is ok.
+ if (st->gain) {
+ st->gain = std::max(st->gain, fabsf(filter_time_domain[st->peak_index]));
+ }
+ }
+
+ if (bounded_erl_ && st->gain) {
+ st->gain = std::max(st->gain, 0.01f);
+ }
+}
+
+void FilterAnalyzer::PreProcessFilters(
+ rtc::ArrayView<const std::vector<float>> filters_time_domain) {
+ for (size_t ch = 0; ch < filters_time_domain.size(); ++ch) {
+ RTC_DCHECK_LT(region_.start_sample_, filters_time_domain[ch].size());
+ RTC_DCHECK_LT(region_.end_sample_, filters_time_domain[ch].size());
+
+ RTC_DCHECK_GE(h_highpass_[ch].capacity(), filters_time_domain[ch].size());
+ h_highpass_[ch].resize(filters_time_domain[ch].size());
+ // Minimum phase high-pass filter with cutoff frequency at about 600 Hz.
+ constexpr std::array<float, 3> h = {
+ {0.7929742f, -0.36072128f, -0.47047766f}};
+
+ std::fill(h_highpass_[ch].begin() + region_.start_sample_,
+ h_highpass_[ch].begin() + region_.end_sample_ + 1, 0.f);
+ float* h_highpass_ch = h_highpass_[ch].data();
+ const float* filters_time_domain_ch = filters_time_domain[ch].data();
+ const size_t region_end = region_.end_sample_;
+ for (size_t k = std::max(h.size() - 1, region_.start_sample_);
+ k <= region_end; ++k) {
+ float tmp = h_highpass_ch[k];
+ for (size_t j = 0; j < h.size(); ++j) {
+ tmp += filters_time_domain_ch[k - j] * h[j];
+ }
+ h_highpass_ch[k] = tmp;
+ }
+ }
+}
+
+void FilterAnalyzer::ResetRegion() {
+ region_.start_sample_ = 0;
+ region_.end_sample_ = 0;
+}
+
+void FilterAnalyzer::SetRegionToAnalyze(size_t filter_size) {
+ constexpr size_t kNumberBlocksToUpdate = 1;
+ auto& r = region_;
+ r.start_sample_ = r.end_sample_ >= filter_size - 1 ? 0 : r.end_sample_ + 1;
+ r.end_sample_ =
+ std::min(r.start_sample_ + kNumberBlocksToUpdate * kBlockSize - 1,
+ filter_size - 1);
+
+ // Check range.
+ RTC_DCHECK_LT(r.start_sample_, filter_size);
+ RTC_DCHECK_LT(r.end_sample_, filter_size);
+ RTC_DCHECK_LE(r.start_sample_, r.end_sample_);
+}
+
+FilterAnalyzer::ConsistentFilterDetector::ConsistentFilterDetector(
+ const EchoCanceller3Config& config)
+ : active_render_threshold_(config.render_levels.active_render_limit *
+ config.render_levels.active_render_limit *
+ kFftLengthBy2) {
+ Reset();
+}
+
+void FilterAnalyzer::ConsistentFilterDetector::Reset() {
+ significant_peak_ = false;
+ filter_floor_accum_ = 0.f;
+ filter_secondary_peak_ = 0.f;
+ filter_floor_low_limit_ = 0;
+ filter_floor_high_limit_ = 0;
+ consistent_estimate_counter_ = 0;
+ consistent_delay_reference_ = -10;
+}
+
+bool FilterAnalyzer::ConsistentFilterDetector::Detect(
+ rtc::ArrayView<const float> filter_to_analyze,
+ const FilterRegion& region,
+ const Block& x_block,
+ size_t peak_index,
+ int delay_blocks) {
+ if (region.start_sample_ == 0) {
+ filter_floor_accum_ = 0.f;
+ filter_secondary_peak_ = 0.f;
+ filter_floor_low_limit_ = peak_index < 64 ? 0 : peak_index - 64;
+ filter_floor_high_limit_ =
+ peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128;
+ }
+
+ float filter_floor_accum = filter_floor_accum_;
+ float filter_secondary_peak = filter_secondary_peak_;
+ for (size_t k = region.start_sample_;
+ k < std::min(region.end_sample_ + 1, filter_floor_low_limit_); ++k) {
+ float abs_h = fabsf(filter_to_analyze[k]);
+ filter_floor_accum += abs_h;
+ filter_secondary_peak = std::max(filter_secondary_peak, abs_h);
+ }
+
+ for (size_t k = std::max(filter_floor_high_limit_, region.start_sample_);
+ k <= region.end_sample_; ++k) {
+ float abs_h = fabsf(filter_to_analyze[k]);
+ filter_floor_accum += abs_h;
+ filter_secondary_peak = std::max(filter_secondary_peak, abs_h);
+ }
+ filter_floor_accum_ = filter_floor_accum;
+ filter_secondary_peak_ = filter_secondary_peak;
+
+ if (region.end_sample_ == filter_to_analyze.size() - 1) {
+ float filter_floor = filter_floor_accum_ /
+ (filter_floor_low_limit_ + filter_to_analyze.size() -
+ filter_floor_high_limit_);
+
+ float abs_peak = fabsf(filter_to_analyze[peak_index]);
+ significant_peak_ = abs_peak > 10.f * filter_floor &&
+ abs_peak > 2.f * filter_secondary_peak_;
+ }
+
+ if (significant_peak_) {
+ bool active_render_block = false;
+ for (int ch = 0; ch < x_block.NumChannels(); ++ch) {
+ rtc::ArrayView<const float, kBlockSize> x_channel =
+ x_block.View(/*band=*/0, ch);
+ const float x_energy = std::inner_product(
+ x_channel.begin(), x_channel.end(), x_channel.begin(), 0.f);
+ if (x_energy > active_render_threshold_) {
+ active_render_block = true;
+ break;
+ }
+ }
+
+ if (consistent_delay_reference_ == delay_blocks) {
+ if (active_render_block) {
+ ++consistent_estimate_counter_;
+ }
+ } else {
+ consistent_estimate_counter_ = 0;
+ consistent_delay_reference_ = delay_blocks;
+ }
+ }
+ return consistent_estimate_counter_ > 1.5f * kNumBlocksPerSecond;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.h b/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.h
new file mode 100644
index 0000000000..9aec8b14d7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FILTER_ANALYZER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FILTER_ANALYZER_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class RenderBuffer;
+
+// Class for analyzing the properties of an adaptive filter.
+class FilterAnalyzer {
+ public:
+ FilterAnalyzer(const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+ ~FilterAnalyzer();
+
+ FilterAnalyzer(const FilterAnalyzer&) = delete;
+ FilterAnalyzer& operator=(const FilterAnalyzer&) = delete;
+
+ // Resets the analysis.
+ void Reset();
+
+ // Updates the estimates with new input data.
+ void Update(rtc::ArrayView<const std::vector<float>> filters_time_domain,
+ const RenderBuffer& render_buffer,
+ bool* any_filter_consistent,
+ float* max_echo_path_gain);
+
+ // Returns the delay in blocks for each filter.
+ rtc::ArrayView<const int> FilterDelaysBlocks() const {
+ return filter_delays_blocks_;
+ }
+
+ // Returns the minimum delay of all filters in terms of blocks.
+ int MinFilterDelayBlocks() const { return min_filter_delay_blocks_; }
+
+ // Returns the number of blocks for the current used filter.
+ int FilterLengthBlocks() const {
+ return filter_analysis_states_[0].filter_length_blocks;
+ }
+
+ // Returns the preprocessed filter.
+ rtc::ArrayView<const std::vector<float>> GetAdjustedFilters() const {
+ return h_highpass_;
+ }
+
+ // Public for testing purposes only.
+ void SetRegionToAnalyze(size_t filter_size);
+
+ private:
+ struct FilterAnalysisState;
+
+ void AnalyzeRegion(
+ rtc::ArrayView<const std::vector<float>> filters_time_domain,
+ const RenderBuffer& render_buffer);
+
+ void UpdateFilterGain(rtc::ArrayView<const float> filters_time_domain,
+ FilterAnalysisState* st);
+ void PreProcessFilters(
+ rtc::ArrayView<const std::vector<float>> filters_time_domain);
+
+ void ResetRegion();
+
+ struct FilterRegion {
+ size_t start_sample_;
+ size_t end_sample_;
+ };
+
+ // This class checks whether the shape of the impulse response has been
+ // consistent over time.
+ class ConsistentFilterDetector {
+ public:
+ explicit ConsistentFilterDetector(const EchoCanceller3Config& config);
+ void Reset();
+ bool Detect(rtc::ArrayView<const float> filter_to_analyze,
+ const FilterRegion& region,
+ const Block& x_block,
+ size_t peak_index,
+ int delay_blocks);
+
+ private:
+ bool significant_peak_;
+ float filter_floor_accum_;
+ float filter_secondary_peak_;
+ size_t filter_floor_low_limit_;
+ size_t filter_floor_high_limit_;
+ const float active_render_threshold_;
+ size_t consistent_estimate_counter_ = 0;
+ int consistent_delay_reference_ = -10;
+ };
+
+ struct FilterAnalysisState {
+ explicit FilterAnalysisState(const EchoCanceller3Config& config)
+ : filter_length_blocks(config.filter.refined_initial.length_blocks),
+ consistent_filter_detector(config) {
+ Reset(config.ep_strength.default_gain);
+ }
+
+ void Reset(float default_gain) {
+ peak_index = 0;
+ gain = default_gain;
+ consistent_filter_detector.Reset();
+ }
+
+ float gain;
+ size_t peak_index;
+ int filter_length_blocks;
+ bool consistent_estimate = false;
+ ConsistentFilterDetector consistent_filter_detector;
+ };
+
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const bool bounded_erl_;
+ const float default_gain_;
+ std::vector<std::vector<float>> h_highpass_;
+
+ size_t blocks_since_reset_ = 0;
+ FilterRegion region_;
+
+ std::vector<FilterAnalysisState> filter_analysis_states_;
+ std::vector<int> filter_delays_blocks_;
+
+ int min_filter_delay_blocks_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_FILTER_ANALYZER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer_unittest.cc
new file mode 100644
index 0000000000..f1e2e4c188
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/filter_analyzer_unittest.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/filter_analyzer.h"
+
+#include <algorithm>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verifies that the filter analyzer handles filter resizes properly.
+TEST(FilterAnalyzer, FilterResize) {
+ EchoCanceller3Config c;
+ std::vector<float> filter(65, 0.f);
+ for (size_t num_capture_channels : {1, 2, 4}) {
+ FilterAnalyzer fa(c, num_capture_channels);
+ fa.SetRegionToAnalyze(filter.size());
+ fa.SetRegionToAnalyze(filter.size());
+ filter.resize(32);
+ fa.SetRegionToAnalyze(filter.size());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.cc b/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.cc
new file mode 100644
index 0000000000..3039dcf7f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/frame_blocker.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FrameBlocker::FrameBlocker(size_t num_bands, size_t num_channels)
+ : num_bands_(num_bands),
+ num_channels_(num_channels),
+ buffer_(num_bands_, std::vector<std::vector<float>>(num_channels)) {
+ RTC_DCHECK_LT(0, num_bands);
+ RTC_DCHECK_LT(0, num_channels);
+ for (auto& band : buffer_) {
+ for (auto& channel : band) {
+ channel.reserve(kBlockSize);
+ RTC_DCHECK(channel.empty());
+ }
+ }
+}
+
+FrameBlocker::~FrameBlocker() = default;
+
+void FrameBlocker::InsertSubFrameAndExtractBlock(
+ const std::vector<std::vector<rtc::ArrayView<float>>>& sub_frame,
+ Block* block) {
+ RTC_DCHECK(block);
+ RTC_DCHECK_EQ(num_bands_, block->NumBands());
+ RTC_DCHECK_EQ(num_bands_, sub_frame.size());
+ for (size_t band = 0; band < num_bands_; ++band) {
+ RTC_DCHECK_EQ(num_channels_, block->NumChannels());
+ RTC_DCHECK_EQ(num_channels_, sub_frame[band].size());
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ RTC_DCHECK_GE(kBlockSize - 16, buffer_[band][channel].size());
+ RTC_DCHECK_EQ(kSubFrameLength, sub_frame[band][channel].size());
+ const int samples_to_block = kBlockSize - buffer_[band][channel].size();
+ std::copy(buffer_[band][channel].begin(), buffer_[band][channel].end(),
+ block->begin(band, channel));
+ std::copy(sub_frame[band][channel].begin(),
+ sub_frame[band][channel].begin() + samples_to_block,
+ block->begin(band, channel) + kBlockSize - samples_to_block);
+ buffer_[band][channel].clear();
+ buffer_[band][channel].insert(
+ buffer_[band][channel].begin(),
+ sub_frame[band][channel].begin() + samples_to_block,
+ sub_frame[band][channel].end());
+ }
+ }
+}
+
+bool FrameBlocker::IsBlockAvailable() const {
+ return kBlockSize == buffer_[0][0].size();
+}
+
+void FrameBlocker::ExtractBlock(Block* block) {
+ RTC_DCHECK(block);
+ RTC_DCHECK_EQ(num_bands_, block->NumBands());
+ RTC_DCHECK_EQ(num_channels_, block->NumChannels());
+ RTC_DCHECK(IsBlockAvailable());
+ for (size_t band = 0; band < num_bands_; ++band) {
+ for (size_t channel = 0; channel < num_channels_; ++channel) {
+ RTC_DCHECK_EQ(kBlockSize, buffer_[band][channel].size());
+ std::copy(buffer_[band][channel].begin(), buffer_[band][channel].end(),
+ block->begin(band, channel));
+ buffer_[band][channel].clear();
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.h b/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.h
new file mode 100644
index 0000000000..623c812157
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block.h"
+
+namespace webrtc {
+
+// Class for producing 64 sample multiband blocks from frames consisting of 2
+// subframes of 80 samples.
+class FrameBlocker {
+ public:
+ FrameBlocker(size_t num_bands, size_t num_channels);
+ ~FrameBlocker();
+ FrameBlocker(const FrameBlocker&) = delete;
+ FrameBlocker& operator=(const FrameBlocker&) = delete;
+
+ // Inserts one 80 sample multiband subframe from the multiband frame and
+ // extracts one 64 sample multiband block.
+ void InsertSubFrameAndExtractBlock(
+ const std::vector<std::vector<rtc::ArrayView<float>>>& sub_frame,
+ Block* block);
+ // Reports whether a multiband block of 64 samples is available for
+ // extraction.
+ bool IsBlockAvailable() const;
+ // Extracts a multiband block of 64 samples.
+ void ExtractBlock(Block* block);
+
+ private:
+ const size_t num_bands_;
+ const size_t num_channels_;
+ std::vector<std::vector<std::vector<float>>> buffer_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker_unittest.cc
new file mode 100644
index 0000000000..92e393023a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/frame_blocker_unittest.cc
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/frame_blocker.h"
+
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_framer.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+float ComputeSampleValue(size_t chunk_counter,
+ size_t chunk_size,
+ size_t band,
+ size_t channel,
+ size_t sample_index,
+ int offset) {
+ float value =
+ static_cast<int>(chunk_counter * chunk_size + sample_index + channel) +
+ offset;
+ return value > 0 ? 5000 * band + value : 0;
+}
+
+void FillSubFrame(size_t sub_frame_counter,
+ int offset,
+ std::vector<std::vector<std::vector<float>>>* sub_frame) {
+ for (size_t band = 0; band < sub_frame->size(); ++band) {
+ for (size_t channel = 0; channel < (*sub_frame)[band].size(); ++channel) {
+ for (size_t sample = 0; sample < (*sub_frame)[band][channel].size();
+ ++sample) {
+ (*sub_frame)[band][channel][sample] = ComputeSampleValue(
+ sub_frame_counter, kSubFrameLength, band, channel, sample, offset);
+ }
+ }
+ }
+}
+
+void FillSubFrameView(
+ size_t sub_frame_counter,
+ int offset,
+ std::vector<std::vector<std::vector<float>>>* sub_frame,
+ std::vector<std::vector<rtc::ArrayView<float>>>* sub_frame_view) {
+ FillSubFrame(sub_frame_counter, offset, sub_frame);
+ for (size_t band = 0; band < sub_frame_view->size(); ++band) {
+ for (size_t channel = 0; channel < (*sub_frame_view)[band].size();
+ ++channel) {
+ (*sub_frame_view)[band][channel] = rtc::ArrayView<float>(
+ &(*sub_frame)[band][channel][0], (*sub_frame)[band][channel].size());
+ }
+ }
+}
+
+bool VerifySubFrame(
+ size_t sub_frame_counter,
+ int offset,
+ const std::vector<std::vector<rtc::ArrayView<float>>>& sub_frame_view) {
+ std::vector<std::vector<std::vector<float>>> reference_sub_frame(
+ sub_frame_view.size(),
+ std::vector<std::vector<float>>(
+ sub_frame_view[0].size(),
+ std::vector<float>(sub_frame_view[0][0].size(), 0.f)));
+ FillSubFrame(sub_frame_counter, offset, &reference_sub_frame);
+ for (size_t band = 0; band < sub_frame_view.size(); ++band) {
+ for (size_t channel = 0; channel < sub_frame_view[band].size(); ++channel) {
+ for (size_t sample = 0; sample < sub_frame_view[band][channel].size();
+ ++sample) {
+ if (reference_sub_frame[band][channel][sample] !=
+ sub_frame_view[band][channel][sample]) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool VerifyBlock(size_t block_counter, int offset, const Block& block) {
+ for (int band = 0; band < block.NumBands(); ++band) {
+ for (int channel = 0; channel < block.NumChannels(); ++channel) {
+ for (size_t sample = 0; sample < kBlockSize; ++sample) {
+ auto it = block.begin(band, channel) + sample;
+ const float reference_value = ComputeSampleValue(
+ block_counter, kBlockSize, band, channel, sample, offset);
+ if (reference_value != *it) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+// Verifies that the FrameBlocker properly forms blocks out of the frames.
+void RunBlockerTest(int sample_rate_hz, size_t num_channels) {
+ constexpr size_t kNumSubFramesToProcess = 20;
+ const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(num_bands, num_channels);
+ std::vector<std::vector<std::vector<float>>> input_sub_frame(
+ num_bands, std::vector<std::vector<float>>(
+ num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> input_sub_frame_view(
+ num_bands, std::vector<rtc::ArrayView<float>>(num_channels));
+ FrameBlocker blocker(num_bands, num_channels);
+
+ size_t block_counter = 0;
+ for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess;
+ ++sub_frame_index) {
+ FillSubFrameView(sub_frame_index, 0, &input_sub_frame,
+ &input_sub_frame_view);
+
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block);
+ VerifyBlock(block_counter++, 0, block);
+
+ if ((sub_frame_index + 1) % 4 == 0) {
+ EXPECT_TRUE(blocker.IsBlockAvailable());
+ } else {
+ EXPECT_FALSE(blocker.IsBlockAvailable());
+ }
+ if (blocker.IsBlockAvailable()) {
+ blocker.ExtractBlock(&block);
+ VerifyBlock(block_counter++, 0, block);
+ }
+ }
+}
+
+// Verifies that the FrameBlocker and BlockFramer work well together and produce
+// the expected output.
+void RunBlockerAndFramerTest(int sample_rate_hz, size_t num_channels) {
+ const size_t kNumSubFramesToProcess = 20;
+ const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(num_bands, num_channels);
+ std::vector<std::vector<std::vector<float>>> input_sub_frame(
+ num_bands, std::vector<std::vector<float>>(
+ num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<std::vector<float>>> output_sub_frame(
+ num_bands, std::vector<std::vector<float>>(
+ num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> output_sub_frame_view(
+ num_bands, std::vector<rtc::ArrayView<float>>(num_channels));
+ std::vector<std::vector<rtc::ArrayView<float>>> input_sub_frame_view(
+ num_bands, std::vector<rtc::ArrayView<float>>(num_channels));
+ FrameBlocker blocker(num_bands, num_channels);
+ BlockFramer framer(num_bands, num_channels);
+
+ for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess;
+ ++sub_frame_index) {
+ FillSubFrameView(sub_frame_index, 0, &input_sub_frame,
+ &input_sub_frame_view);
+ FillSubFrameView(sub_frame_index, 0, &output_sub_frame,
+ &output_sub_frame_view);
+
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block);
+ framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view);
+
+ if ((sub_frame_index + 1) % 4 == 0) {
+ EXPECT_TRUE(blocker.IsBlockAvailable());
+ } else {
+ EXPECT_FALSE(blocker.IsBlockAvailable());
+ }
+ if (blocker.IsBlockAvailable()) {
+ blocker.ExtractBlock(&block);
+ framer.InsertBlock(block);
+ }
+ if (sub_frame_index > 1) {
+ EXPECT_TRUE(VerifySubFrame(sub_frame_index, -64, output_sub_frame_view));
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the FrameBlocker crashes if the InsertSubFrameAndExtractBlock
+// method is called for inputs with the wrong number of bands or band lengths.
+void RunWronglySizedInsertAndExtractParametersTest(
+ int sample_rate_hz,
+ size_t correct_num_channels,
+ size_t num_block_bands,
+ size_t num_block_channels,
+ size_t num_sub_frame_bands,
+ size_t num_sub_frame_channels,
+ size_t sub_frame_length) {
+ const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(num_block_bands, num_block_channels);
+ std::vector<std::vector<std::vector<float>>> input_sub_frame(
+ num_sub_frame_bands,
+ std::vector<std::vector<float>>(
+ num_sub_frame_channels, std::vector<float>(sub_frame_length, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> input_sub_frame_view(
+ input_sub_frame.size(),
+ std::vector<rtc::ArrayView<float>>(num_sub_frame_channels));
+ FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view);
+ FrameBlocker blocker(correct_num_bands, correct_num_channels);
+ EXPECT_DEATH(
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block), "");
+}
+
+// Verifies that the FrameBlocker crashes if the ExtractBlock method is called
+// for inputs with the wrong number of bands or band lengths.
+void RunWronglySizedExtractParameterTest(int sample_rate_hz,
+ size_t correct_num_channels,
+ size_t num_block_bands,
+ size_t num_block_channels) {
+ const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block correct_block(correct_num_bands, correct_num_channels);
+ Block wrong_block(num_block_bands, num_block_channels);
+ std::vector<std::vector<std::vector<float>>> input_sub_frame(
+ correct_num_bands,
+ std::vector<std::vector<float>>(
+ correct_num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> input_sub_frame_view(
+ input_sub_frame.size(),
+ std::vector<rtc::ArrayView<float>>(correct_num_channels));
+ FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view);
+ FrameBlocker blocker(correct_num_bands, correct_num_channels);
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+
+ EXPECT_DEATH(blocker.ExtractBlock(&wrong_block), "");
+}
+
+// Verifies that the FrameBlocker crashes if the ExtractBlock method is called
+// after a wrong number of previous InsertSubFrameAndExtractBlock method calls
+// have been made.
+void RunWrongExtractOrderTest(int sample_rate_hz,
+ size_t num_channels,
+ size_t num_preceeding_api_calls) {
+ const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+ Block block(num_bands, num_channels);
+ std::vector<std::vector<std::vector<float>>> input_sub_frame(
+ num_bands, std::vector<std::vector<float>>(
+ num_channels, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> input_sub_frame_view(
+ input_sub_frame.size(), std::vector<rtc::ArrayView<float>>(num_channels));
+ FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view);
+ FrameBlocker blocker(num_bands, num_channels);
+ for (size_t k = 0; k < num_preceeding_api_calls; ++k) {
+ blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block);
+ }
+
+ EXPECT_DEATH(blocker.ExtractBlock(&block), "");
+}
+#endif
+
+std::string ProduceDebugText(int sample_rate_hz, size_t num_channels) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ ss << ", number of channels: " << num_channels;
+ return ss.Release();
+}
+
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(FrameBlockerDeathTest,
+ WrongNumberOfBandsInBlockForInsertSubFrameAndExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, wrong_num_bands, correct_num_channels,
+ correct_num_bands, correct_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest,
+ WrongNumberOfChannelsInBlockForInsertSubFrameAndExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_channels = correct_num_channels + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, wrong_num_channels,
+ correct_num_bands, correct_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest,
+ WrongNumberOfBandsInSubFrameForInsertSubFrameAndExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, correct_num_channels,
+ wrong_num_bands, correct_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest,
+ WrongNumberOfChannelsInSubFrameForInsertSubFrameAndExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_channels = correct_num_channels + 1;
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, wrong_num_channels,
+ correct_num_bands, wrong_num_channels, kSubFrameLength);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest,
+ WrongNumberOfSamplesInSubFrameForInsertSubFrameAndExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ RunWronglySizedInsertAndExtractParametersTest(
+ rate, correct_num_channels, correct_num_bands, correct_num_channels,
+ correct_num_bands, correct_num_channels, kSubFrameLength - 1);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest, WrongNumberOfBandsInBlockForExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+ RunWronglySizedExtractParameterTest(
+ rate, correct_num_channels, wrong_num_bands, correct_num_channels);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest, WrongNumberOfChannelsInBlockForExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t correct_num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels));
+ const size_t correct_num_bands = NumBandsForRate(rate);
+ const size_t wrong_num_channels = correct_num_channels + 1;
+ RunWronglySizedExtractParameterTest(
+ rate, correct_num_channels, correct_num_bands, wrong_num_channels);
+ }
+ }
+}
+
+TEST(FrameBlockerDeathTest, WrongNumberOfPreceedingApiCallsForExtractBlock) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t num_channels : {1, 2, 4, 8}) {
+ for (size_t num_calls = 0; num_calls < 4; ++num_calls) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << rate;
+ ss << "Num channels: " << num_channels;
+ ss << ", Num preceeding InsertSubFrameAndExtractBlock calls: "
+ << num_calls;
+
+ SCOPED_TRACE(ss.str());
+ RunWrongExtractOrderTest(rate, num_channels, num_calls);
+ }
+ }
+ }
+}
+
+// Verifies that the verification for 0 number of channels works.
+TEST(FrameBlockerDeathTest, ZeroNumberOfChannelsParameter) {
+ EXPECT_DEATH(FrameBlocker(16000, 0), "");
+}
+
+// Verifies that the verification for 0 number of bands works.
+TEST(FrameBlockerDeathTest, ZeroNumberOfBandsParameter) {
+ EXPECT_DEATH(FrameBlocker(0, 1), "");
+}
+
+// Verifiers that the verification for null sub_frame pointer works.
+TEST(FrameBlockerDeathTest, NullBlockParameter) {
+ std::vector<std::vector<std::vector<float>>> sub_frame(
+ 1, std::vector<std::vector<float>>(
+ 1, std::vector<float>(kSubFrameLength, 0.f)));
+ std::vector<std::vector<rtc::ArrayView<float>>> sub_frame_view(
+ sub_frame.size());
+ FillSubFrameView(0, 0, &sub_frame, &sub_frame_view);
+ EXPECT_DEATH(
+ FrameBlocker(1, 1).InsertSubFrameAndExtractBlock(sub_frame_view, nullptr),
+ "");
+}
+
+#endif
+
+TEST(FrameBlocker, BlockBitexactness) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, num_channels));
+ RunBlockerTest(rate, num_channels);
+ }
+ }
+}
+
+TEST(FrameBlocker, BlockerAndFramer) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t num_channels : {1, 2, 4, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate, num_channels));
+ RunBlockerAndFramerTest(rate, num_channels);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.cc
new file mode 100644
index 0000000000..e56674e4c9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/fullband_erle_estimator.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+constexpr float kEpsilon = 1e-3f;
+constexpr float kX2BandEnergyThreshold = 44015068.0f;
+constexpr int kBlocksToHoldErle = 100;
+constexpr int kPointsToAccumulate = 6;
+} // namespace
+
+FullBandErleEstimator::FullBandErleEstimator(
+ const EchoCanceller3Config::Erle& config,
+ size_t num_capture_channels)
+ : min_erle_log2_(FastApproxLog2f(config.min + kEpsilon)),
+ max_erle_lf_log2_(FastApproxLog2f(config.max_l + kEpsilon)),
+ hold_counters_instantaneous_erle_(num_capture_channels, 0),
+ erle_time_domain_log2_(num_capture_channels, min_erle_log2_),
+ instantaneous_erle_(num_capture_channels, ErleInstantaneous(config)),
+ linear_filters_qualities_(num_capture_channels) {
+ Reset();
+}
+
+FullBandErleEstimator::~FullBandErleEstimator() = default;
+
+void FullBandErleEstimator::Reset() {
+ for (auto& instantaneous_erle_ch : instantaneous_erle_) {
+ instantaneous_erle_ch.Reset();
+ }
+
+ UpdateQualityEstimates();
+ std::fill(erle_time_domain_log2_.begin(), erle_time_domain_log2_.end(),
+ min_erle_log2_);
+ std::fill(hold_counters_instantaneous_erle_.begin(),
+ hold_counters_instantaneous_erle_.end(), 0);
+}
+
+void FullBandErleEstimator::Update(
+ rtc::ArrayView<const float> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters) {
+ for (size_t ch = 0; ch < Y2.size(); ++ch) {
+ if (converged_filters[ch]) {
+ // Computes the fullband ERLE.
+ const float X2_sum = std::accumulate(X2.begin(), X2.end(), 0.0f);
+ if (X2_sum > kX2BandEnergyThreshold * X2.size()) {
+ const float Y2_sum =
+ std::accumulate(Y2[ch].begin(), Y2[ch].end(), 0.0f);
+ const float E2_sum =
+ std::accumulate(E2[ch].begin(), E2[ch].end(), 0.0f);
+ if (instantaneous_erle_[ch].Update(Y2_sum, E2_sum)) {
+ hold_counters_instantaneous_erle_[ch] = kBlocksToHoldErle;
+ erle_time_domain_log2_[ch] +=
+ 0.05f * ((instantaneous_erle_[ch].GetInstErleLog2().value()) -
+ erle_time_domain_log2_[ch]);
+ erle_time_domain_log2_[ch] =
+ std::max(erle_time_domain_log2_[ch], min_erle_log2_);
+ }
+ }
+ }
+ --hold_counters_instantaneous_erle_[ch];
+ if (hold_counters_instantaneous_erle_[ch] == 0) {
+ instantaneous_erle_[ch].ResetAccumulators();
+ }
+ }
+
+ UpdateQualityEstimates();
+}
+
+void FullBandErleEstimator::Dump(
+ const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+ data_dumper->DumpRaw("aec3_fullband_erle_log2", FullbandErleLog2());
+ instantaneous_erle_[0].Dump(data_dumper);
+}
+
+void FullBandErleEstimator::UpdateQualityEstimates() {
+ for (size_t ch = 0; ch < instantaneous_erle_.size(); ++ch) {
+ linear_filters_qualities_[ch] =
+ instantaneous_erle_[ch].GetQualityEstimate();
+ }
+}
+
+FullBandErleEstimator::ErleInstantaneous::ErleInstantaneous(
+ const EchoCanceller3Config::Erle& config)
+ : clamp_inst_quality_to_zero_(config.clamp_quality_estimate_to_zero),
+ clamp_inst_quality_to_one_(config.clamp_quality_estimate_to_one) {
+ Reset();
+}
+
+FullBandErleEstimator::ErleInstantaneous::~ErleInstantaneous() = default;
+
+bool FullBandErleEstimator::ErleInstantaneous::Update(const float Y2_sum,
+ const float E2_sum) {
+ bool update_estimates = false;
+ E2_acum_ += E2_sum;
+ Y2_acum_ += Y2_sum;
+ num_points_++;
+ if (num_points_ == kPointsToAccumulate) {
+ if (E2_acum_ > 0.f) {
+ update_estimates = true;
+ erle_log2_ = FastApproxLog2f(Y2_acum_ / E2_acum_ + kEpsilon);
+ }
+ num_points_ = 0;
+ E2_acum_ = 0.f;
+ Y2_acum_ = 0.f;
+ }
+
+ if (update_estimates) {
+ UpdateMaxMin();
+ UpdateQualityEstimate();
+ }
+ return update_estimates;
+}
+
+void FullBandErleEstimator::ErleInstantaneous::Reset() {
+ ResetAccumulators();
+ max_erle_log2_ = -10.f; // -30 dB.
+ min_erle_log2_ = 33.f; // 100 dB.
+ inst_quality_estimate_ = 0.f;
+}
+
+void FullBandErleEstimator::ErleInstantaneous::ResetAccumulators() {
+ erle_log2_ = absl::nullopt;
+ inst_quality_estimate_ = 0.f;
+ num_points_ = 0;
+ E2_acum_ = 0.f;
+ Y2_acum_ = 0.f;
+}
+
+void FullBandErleEstimator::ErleInstantaneous::Dump(
+ const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+ data_dumper->DumpRaw("aec3_fullband_erle_inst_log2",
+ erle_log2_ ? *erle_log2_ : -10.f);
+ data_dumper->DumpRaw(
+ "aec3_erle_instantaneous_quality",
+ GetQualityEstimate() ? GetQualityEstimate().value() : 0.f);
+ data_dumper->DumpRaw("aec3_fullband_erle_max_log2", max_erle_log2_);
+ data_dumper->DumpRaw("aec3_fullband_erle_min_log2", min_erle_log2_);
+}
+
+void FullBandErleEstimator::ErleInstantaneous::UpdateMaxMin() {
+ RTC_DCHECK(erle_log2_);
+ // Adding the forgetting factors for the maximum and minimum and capping the
+ // result to the incoming value.
+ max_erle_log2_ -= 0.0004f; // Forget factor, approx 1dB every 3 sec.
+ max_erle_log2_ = std::max(max_erle_log2_, erle_log2_.value());
+ min_erle_log2_ += 0.0004f; // Forget factor, approx 1dB every 3 sec.
+ min_erle_log2_ = std::min(min_erle_log2_, erle_log2_.value());
+}
+
+void FullBandErleEstimator::ErleInstantaneous::UpdateQualityEstimate() {
+ const float alpha = 0.07f;
+ float quality_estimate = 0.f;
+ RTC_DCHECK(erle_log2_);
+ // TODO(peah): Currently, the estimate can become be less than 0; this should
+ // be corrected.
+ if (max_erle_log2_ > min_erle_log2_) {
+ quality_estimate = (erle_log2_.value() - min_erle_log2_) /
+ (max_erle_log2_ - min_erle_log2_);
+ }
+ if (quality_estimate > inst_quality_estimate_) {
+ inst_quality_estimate_ = quality_estimate;
+ } else {
+ inst_quality_estimate_ +=
+ alpha * (quality_estimate - inst_quality_estimate_);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.h
new file mode 100644
index 0000000000..7a082176d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/fullband_erle_estimator.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FULLBAND_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FULLBAND_ERLE_ESTIMATOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// Estimates the echo return loss enhancement using the energy of all the
+// freuquency bands.
+class FullBandErleEstimator {
+ public:
+ FullBandErleEstimator(const EchoCanceller3Config::Erle& config,
+ size_t num_capture_channels);
+ ~FullBandErleEstimator();
+ // Resets the ERLE estimator.
+ void Reset();
+
+ // Updates the ERLE estimator.
+ void Update(rtc::ArrayView<const float> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters);
+
+ // Returns the fullband ERLE estimates in log2 units.
+ float FullbandErleLog2() const {
+ float min_erle = erle_time_domain_log2_[0];
+ for (size_t ch = 1; ch < erle_time_domain_log2_.size(); ++ch) {
+ min_erle = std::min(min_erle, erle_time_domain_log2_[ch]);
+ }
+ return min_erle;
+ }
+
+ // Returns an estimation of the current linear filter quality. It returns a
+ // float number between 0 and 1 mapping 1 to the highest possible quality.
+ rtc::ArrayView<const absl::optional<float>> GetInstLinearQualityEstimates()
+ const {
+ return linear_filters_qualities_;
+ }
+
+ void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+ private:
+ void UpdateQualityEstimates();
+
+ class ErleInstantaneous {
+ public:
+ explicit ErleInstantaneous(const EchoCanceller3Config::Erle& config);
+ ~ErleInstantaneous();
+
+ // Updates the estimator with a new point, returns true
+ // if the instantaneous ERLE was updated due to having enough
+ // points for performing the estimate.
+ bool Update(float Y2_sum, float E2_sum);
+ // Resets the instantaneous ERLE estimator to its initial state.
+ void Reset();
+ // Resets the members related with an instantaneous estimate.
+ void ResetAccumulators();
+ // Returns the instantaneous ERLE in log2 units.
+ absl::optional<float> GetInstErleLog2() const { return erle_log2_; }
+ // Gets an indication between 0 and 1 of the performance of the linear
+ // filter for the current time instant.
+ absl::optional<float> GetQualityEstimate() const {
+ if (erle_log2_) {
+ float value = inst_quality_estimate_;
+ if (clamp_inst_quality_to_zero_) {
+ value = std::max(0.f, value);
+ }
+ if (clamp_inst_quality_to_one_) {
+ value = std::min(1.f, value);
+ }
+ return absl::optional<float>(value);
+ }
+ return absl::nullopt;
+ }
+ void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+ private:
+ void UpdateMaxMin();
+ void UpdateQualityEstimate();
+ const bool clamp_inst_quality_to_zero_;
+ const bool clamp_inst_quality_to_one_;
+ absl::optional<float> erle_log2_;
+ float inst_quality_estimate_;
+ float max_erle_log2_;
+ float min_erle_log2_;
+ float Y2_acum_;
+ float E2_acum_;
+ int num_points_;
+ };
+
+ const float min_erle_log2_;
+ const float max_erle_lf_log2_;
+ std::vector<int> hold_counters_instantaneous_erle_;
+ std::vector<float> erle_time_domain_log2_;
+ std::vector<ErleInstantaneous> instantaneous_erle_;
+ std::vector<absl::optional<float>> linear_filters_qualities_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_FULLBAND_ERLE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.cc b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.cc
new file mode 100644
index 0000000000..c5e394ad2f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.cc
@@ -0,0 +1,807 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/matched_filter.h"
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <numeric>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace {
+
+// Subsample rate used for computing the accumulated error.
+// The implementation of some core functions depends on this constant being
+// equal to 4.
+constexpr int kAccumulatedErrorSubSampleRate = 4;
+
+void UpdateAccumulatedError(
+ const rtc::ArrayView<const float> instantaneous_accumulated_error,
+ const rtc::ArrayView<float> accumulated_error,
+ float one_over_error_sum_anchor) {
+ for (size_t k = 0; k < instantaneous_accumulated_error.size(); ++k) {
+ float error_norm =
+ instantaneous_accumulated_error[k] * one_over_error_sum_anchor;
+ if (error_norm < accumulated_error[k]) {
+ accumulated_error[k] = error_norm;
+ } else {
+ accumulated_error[k] += 0.01f * (error_norm - accumulated_error[k]);
+ }
+ }
+}
+
+size_t ComputePreEchoLag(const rtc::ArrayView<float> accumulated_error,
+ size_t lag,
+ size_t alignment_shift_winner) {
+ size_t pre_echo_lag_estimate = lag - alignment_shift_winner;
+ size_t maximum_pre_echo_lag =
+ std::min(pre_echo_lag_estimate / kAccumulatedErrorSubSampleRate,
+ accumulated_error.size());
+ for (size_t k = 1; k < maximum_pre_echo_lag; ++k) {
+ if (accumulated_error[k] < 0.5f * accumulated_error[k - 1] &&
+ accumulated_error[k] < 0.5f) {
+ pre_echo_lag_estimate = (k + 1) * kAccumulatedErrorSubSampleRate - 1;
+ break;
+ }
+ }
+ return pre_echo_lag_estimate + alignment_shift_winner;
+}
+
+} // namespace
+
+namespace webrtc {
+namespace aec3 {
+
+#if defined(WEBRTC_HAS_NEON)
+
+inline float SumAllElements(float32x4_t elements) {
+ float32x2_t sum = vpadd_f32(vget_low_f32(elements), vget_high_f32(elements));
+ sum = vpadd_f32(sum, sum);
+ return vget_lane_f32(sum, 0);
+}
+
+void MatchedFilterCoreWithAccumulatedError_NEON(
+ size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory) {
+ const int h_size = static_cast<int>(h.size());
+ const int x_size = static_cast<int>(x.size());
+ RTC_DCHECK_EQ(0, h_size % 4);
+ std::fill(accumulated_error.begin(), accumulated_error.end(), 0.0f);
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+ RTC_DCHECK_GT(x_size, x_start_index);
+ // Compute loop chunk sizes until, and after, the wraparound of the circular
+ // buffer for x.
+ const int chunk1 =
+ std::min(h_size, static_cast<int>(x_size - x_start_index));
+ if (chunk1 != h_size) {
+ const int chunk2 = h_size - chunk1;
+ std::copy(x.begin() + x_start_index, x.end(), scratch_memory.begin());
+ std::copy(x.begin(), x.begin() + chunk2, scratch_memory.begin() + chunk1);
+ }
+ const float* x_p =
+ chunk1 != h_size ? scratch_memory.data() : &x[x_start_index];
+ const float* h_p = &h[0];
+ float* accumulated_error_p = &accumulated_error[0];
+ // Initialize values for the accumulation.
+ float32x4_t x2_sum_128 = vdupq_n_f32(0);
+ float x2_sum = 0.f;
+ float s = 0;
+ // Perform 128 bit vector operations.
+ const int limit_by_4 = h_size >> 2;
+ for (int k = limit_by_4; k > 0;
+ --k, h_p += 4, x_p += 4, accumulated_error_p++) {
+ // Load the data into 128 bit vectors.
+ const float32x4_t x_k = vld1q_f32(x_p);
+ const float32x4_t h_k = vld1q_f32(h_p);
+ // Compute and accumulate x * x.
+ x2_sum_128 = vmlaq_f32(x2_sum_128, x_k, x_k);
+ // Compute x * h
+ float32x4_t hk_xk_128 = vmulq_f32(h_k, x_k);
+ s += SumAllElements(hk_xk_128);
+ const float e = s - y[i];
+ accumulated_error_p[0] += e * e;
+ }
+ // Combine the accumulated vector and scalar values.
+ x2_sum += SumAllElements(x2_sum_128);
+ // Compute the matched filter error.
+ float e = y[i] - s;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+ const float32x4_t alpha_128 = vmovq_n_f32(alpha);
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ float* h_p = &h[0];
+ x_p = chunk1 != h_size ? scratch_memory.data() : &x[x_start_index];
+ // Perform 128 bit vector operations.
+ const int limit_by_4 = h_size >> 2;
+ for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+ // Load the data into 128 bit vectors.
+ float32x4_t h_k = vld1q_f32(h_p);
+ const float32x4_t x_k = vld1q_f32(x_p);
+ // Compute h = h + alpha * x.
+ h_k = vmlaq_f32(h_k, alpha_128, x_k);
+ // Store the result.
+ vst1q_f32(h_p, h_k);
+ }
+ *filters_updated = true;
+ }
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+ }
+}
+
+void MatchedFilterCore_NEON(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulated_error,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory) {
+ const int h_size = static_cast<int>(h.size());
+ const int x_size = static_cast<int>(x.size());
+ RTC_DCHECK_EQ(0, h_size % 4);
+
+ if (compute_accumulated_error) {
+ return MatchedFilterCoreWithAccumulatedError_NEON(
+ x_start_index, x2_sum_threshold, smoothing, x, y, h, filters_updated,
+ error_sum, accumulated_error, scratch_memory);
+ }
+
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+
+ RTC_DCHECK_GT(x_size, x_start_index);
+ const float* x_p = &x[x_start_index];
+ const float* h_p = &h[0];
+
+ // Initialize values for the accumulation.
+ float32x4_t s_128 = vdupq_n_f32(0);
+ float32x4_t x2_sum_128 = vdupq_n_f32(0);
+ float x2_sum = 0.f;
+ float s = 0;
+
+ // Compute loop chunk sizes until, and after, the wraparound of the circular
+ // buffer for x.
+ const int chunk1 =
+ std::min(h_size, static_cast<int>(x_size - x_start_index));
+
+ // Perform the loop in two chunks.
+ const int chunk2 = h_size - chunk1;
+ for (int limit : {chunk1, chunk2}) {
+ // Perform 128 bit vector operations.
+ const int limit_by_4 = limit >> 2;
+ for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+ // Load the data into 128 bit vectors.
+ const float32x4_t x_k = vld1q_f32(x_p);
+ const float32x4_t h_k = vld1q_f32(h_p);
+ // Compute and accumulate x * x and h * x.
+ x2_sum_128 = vmlaq_f32(x2_sum_128, x_k, x_k);
+ s_128 = vmlaq_f32(s_128, h_k, x_k);
+ }
+
+ // Perform non-vector operations for any remaining items.
+ for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+ const float x_k = *x_p;
+ x2_sum += x_k * x_k;
+ s += *h_p * x_k;
+ }
+
+ x_p = &x[0];
+ }
+
+ // Combine the accumulated vector and scalar values.
+ s += SumAllElements(s_128);
+ x2_sum += SumAllElements(x2_sum_128);
+
+ // Compute the matched filter error.
+ float e = y[i] - s;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+ const float32x4_t alpha_128 = vmovq_n_f32(alpha);
+
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ float* h_p = &h[0];
+ x_p = &x[x_start_index];
+
+ // Perform the loop in two chunks.
+ for (int limit : {chunk1, chunk2}) {
+ // Perform 128 bit vector operations.
+ const int limit_by_4 = limit >> 2;
+ for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+ // Load the data into 128 bit vectors.
+ float32x4_t h_k = vld1q_f32(h_p);
+ const float32x4_t x_k = vld1q_f32(x_p);
+ // Compute h = h + alpha * x.
+ h_k = vmlaq_f32(h_k, alpha_128, x_k);
+
+ // Store the result.
+ vst1q_f32(h_p, h_k);
+ }
+
+ // Perform non-vector operations for any remaining items.
+ for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+ *h_p += alpha * *x_p;
+ }
+
+ x_p = &x[0];
+ }
+
+ *filters_updated = true;
+ }
+
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+ }
+}
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+void MatchedFilterCore_AccumulatedError_SSE2(
+ size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory) {
+ const int h_size = static_cast<int>(h.size());
+ const int x_size = static_cast<int>(x.size());
+ RTC_DCHECK_EQ(0, h_size % 8);
+ std::fill(accumulated_error.begin(), accumulated_error.end(), 0.0f);
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+ RTC_DCHECK_GT(x_size, x_start_index);
+ const int chunk1 =
+ std::min(h_size, static_cast<int>(x_size - x_start_index));
+ if (chunk1 != h_size) {
+ const int chunk2 = h_size - chunk1;
+ std::copy(x.begin() + x_start_index, x.end(), scratch_memory.begin());
+ std::copy(x.begin(), x.begin() + chunk2, scratch_memory.begin() + chunk1);
+ }
+ const float* x_p =
+ chunk1 != h_size ? scratch_memory.data() : &x[x_start_index];
+ const float* h_p = &h[0];
+ float* a_p = &accumulated_error[0];
+ __m128 s_inst_128;
+ __m128 s_inst_128_4;
+ __m128 x2_sum_128 = _mm_set1_ps(0);
+ __m128 x2_sum_128_4 = _mm_set1_ps(0);
+ __m128 e_128;
+ float* const s_p = reinterpret_cast<float*>(&s_inst_128);
+ float* const s_4_p = reinterpret_cast<float*>(&s_inst_128_4);
+ float* const e_p = reinterpret_cast<float*>(&e_128);
+ float x2_sum = 0.0f;
+ float s_acum = 0;
+ // Perform 128 bit vector operations.
+ const int limit_by_8 = h_size >> 3;
+ for (int k = limit_by_8; k > 0; --k, h_p += 8, x_p += 8, a_p += 2) {
+ // Load the data into 128 bit vectors.
+ const __m128 x_k = _mm_loadu_ps(x_p);
+ const __m128 h_k = _mm_loadu_ps(h_p);
+ const __m128 x_k_4 = _mm_loadu_ps(x_p + 4);
+ const __m128 h_k_4 = _mm_loadu_ps(h_p + 4);
+ const __m128 xx = _mm_mul_ps(x_k, x_k);
+ const __m128 xx_4 = _mm_mul_ps(x_k_4, x_k_4);
+ // Compute and accumulate x * x and h * x.
+ x2_sum_128 = _mm_add_ps(x2_sum_128, xx);
+ x2_sum_128_4 = _mm_add_ps(x2_sum_128_4, xx_4);
+ s_inst_128 = _mm_mul_ps(h_k, x_k);
+ s_inst_128_4 = _mm_mul_ps(h_k_4, x_k_4);
+ s_acum += s_p[0] + s_p[1] + s_p[2] + s_p[3];
+ e_p[0] = s_acum - y[i];
+ s_acum += s_4_p[0] + s_4_p[1] + s_4_p[2] + s_4_p[3];
+ e_p[1] = s_acum - y[i];
+ a_p[0] += e_p[0] * e_p[0];
+ a_p[1] += e_p[1] * e_p[1];
+ }
+ // Combine the accumulated vector and scalar values.
+ x2_sum_128 = _mm_add_ps(x2_sum_128, x2_sum_128_4);
+ float* v = reinterpret_cast<float*>(&x2_sum_128);
+ x2_sum += v[0] + v[1] + v[2] + v[3];
+ // Compute the matched filter error.
+ float e = y[i] - s_acum;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+ const __m128 alpha_128 = _mm_set1_ps(alpha);
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ float* h_p = &h[0];
+ const float* x_p =
+ chunk1 != h_size ? scratch_memory.data() : &x[x_start_index];
+ // Perform 128 bit vector operations.
+ const int limit_by_4 = h_size >> 2;
+ for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+ // Load the data into 128 bit vectors.
+ __m128 h_k = _mm_loadu_ps(h_p);
+ const __m128 x_k = _mm_loadu_ps(x_p);
+ // Compute h = h + alpha * x.
+ const __m128 alpha_x = _mm_mul_ps(alpha_128, x_k);
+ h_k = _mm_add_ps(h_k, alpha_x);
+ // Store the result.
+ _mm_storeu_ps(h_p, h_k);
+ }
+ *filters_updated = true;
+ }
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+ }
+}
+
+void MatchedFilterCore_SSE2(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulated_error,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory) {
+ if (compute_accumulated_error) {
+ return MatchedFilterCore_AccumulatedError_SSE2(
+ x_start_index, x2_sum_threshold, smoothing, x, y, h, filters_updated,
+ error_sum, accumulated_error, scratch_memory);
+ }
+ const int h_size = static_cast<int>(h.size());
+ const int x_size = static_cast<int>(x.size());
+ RTC_DCHECK_EQ(0, h_size % 4);
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+ RTC_DCHECK_GT(x_size, x_start_index);
+ const float* x_p = &x[x_start_index];
+ const float* h_p = &h[0];
+ // Initialize values for the accumulation.
+ __m128 s_128 = _mm_set1_ps(0);
+ __m128 s_128_4 = _mm_set1_ps(0);
+ __m128 x2_sum_128 = _mm_set1_ps(0);
+ __m128 x2_sum_128_4 = _mm_set1_ps(0);
+ float x2_sum = 0.f;
+ float s = 0;
+ // Compute loop chunk sizes until, and after, the wraparound of the circular
+ // buffer for x.
+ const int chunk1 =
+ std::min(h_size, static_cast<int>(x_size - x_start_index));
+ // Perform the loop in two chunks.
+ const int chunk2 = h_size - chunk1;
+ for (int limit : {chunk1, chunk2}) {
+ // Perform 128 bit vector operations.
+ const int limit_by_8 = limit >> 3;
+ for (int k = limit_by_8; k > 0; --k, h_p += 8, x_p += 8) {
+ // Load the data into 128 bit vectors.
+ const __m128 x_k = _mm_loadu_ps(x_p);
+ const __m128 h_k = _mm_loadu_ps(h_p);
+ const __m128 x_k_4 = _mm_loadu_ps(x_p + 4);
+ const __m128 h_k_4 = _mm_loadu_ps(h_p + 4);
+ const __m128 xx = _mm_mul_ps(x_k, x_k);
+ const __m128 xx_4 = _mm_mul_ps(x_k_4, x_k_4);
+ // Compute and accumulate x * x and h * x.
+ x2_sum_128 = _mm_add_ps(x2_sum_128, xx);
+ x2_sum_128_4 = _mm_add_ps(x2_sum_128_4, xx_4);
+ const __m128 hx = _mm_mul_ps(h_k, x_k);
+ const __m128 hx_4 = _mm_mul_ps(h_k_4, x_k_4);
+ s_128 = _mm_add_ps(s_128, hx);
+ s_128_4 = _mm_add_ps(s_128_4, hx_4);
+ }
+ // Perform non-vector operations for any remaining items.
+ for (int k = limit - limit_by_8 * 8; k > 0; --k, ++h_p, ++x_p) {
+ const float x_k = *x_p;
+ x2_sum += x_k * x_k;
+ s += *h_p * x_k;
+ }
+ x_p = &x[0];
+ }
+ // Combine the accumulated vector and scalar values.
+ x2_sum_128 = _mm_add_ps(x2_sum_128, x2_sum_128_4);
+ float* v = reinterpret_cast<float*>(&x2_sum_128);
+ x2_sum += v[0] + v[1] + v[2] + v[3];
+ s_128 = _mm_add_ps(s_128, s_128_4);
+ v = reinterpret_cast<float*>(&s_128);
+ s += v[0] + v[1] + v[2] + v[3];
+ // Compute the matched filter error.
+ float e = y[i] - s;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+ const __m128 alpha_128 = _mm_set1_ps(alpha);
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ float* h_p = &h[0];
+ x_p = &x[x_start_index];
+ // Perform the loop in two chunks.
+ for (int limit : {chunk1, chunk2}) {
+ // Perform 128 bit vector operations.
+ const int limit_by_4 = limit >> 2;
+ for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+ // Load the data into 128 bit vectors.
+ __m128 h_k = _mm_loadu_ps(h_p);
+ const __m128 x_k = _mm_loadu_ps(x_p);
+
+ // Compute h = h + alpha * x.
+ const __m128 alpha_x = _mm_mul_ps(alpha_128, x_k);
+ h_k = _mm_add_ps(h_k, alpha_x);
+ // Store the result.
+ _mm_storeu_ps(h_p, h_k);
+ }
+ // Perform non-vector operations for any remaining items.
+ for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+ *h_p += alpha * *x_p;
+ }
+ x_p = &x[0];
+ }
+ *filters_updated = true;
+ }
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+ }
+}
+#endif
+
+void MatchedFilterCore(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulated_error,
+ rtc::ArrayView<float> accumulated_error) {
+ if (compute_accumulated_error) {
+ std::fill(accumulated_error.begin(), accumulated_error.end(), 0.0f);
+ }
+
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+ float x2_sum = 0.f;
+ float s = 0;
+ size_t x_index = x_start_index;
+ if (compute_accumulated_error) {
+ for (size_t k = 0; k < h.size(); ++k) {
+ x2_sum += x[x_index] * x[x_index];
+ s += h[k] * x[x_index];
+ x_index = x_index < (x.size() - 1) ? x_index + 1 : 0;
+ if ((k + 1 & 0b11) == 0) {
+ int idx = k >> 2;
+ accumulated_error[idx] += (y[i] - s) * (y[i] - s);
+ }
+ }
+ } else {
+ for (size_t k = 0; k < h.size(); ++k) {
+ x2_sum += x[x_index] * x[x_index];
+ s += h[k] * x[x_index];
+ x_index = x_index < (x.size() - 1) ? x_index + 1 : 0;
+ }
+ }
+
+ // Compute the matched filter error.
+ float e = y[i] - s;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ size_t x_index = x_start_index;
+ for (size_t k = 0; k < h.size(); ++k) {
+ h[k] += alpha * x[x_index];
+ x_index = x_index < (x.size() - 1) ? x_index + 1 : 0;
+ }
+ *filters_updated = true;
+ }
+
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x.size() - 1;
+ }
+}
+
+size_t MaxSquarePeakIndex(rtc::ArrayView<const float> h) {
+ if (h.size() < 2) {
+ return 0;
+ }
+ float max_element1 = h[0] * h[0];
+ float max_element2 = h[1] * h[1];
+ size_t lag_estimate1 = 0;
+ size_t lag_estimate2 = 1;
+ const size_t last_index = h.size() - 1;
+ // Keeping track of even & odd max elements separately typically allows the
+ // compiler to produce more efficient code.
+ for (size_t k = 2; k < last_index; k += 2) {
+ float element1 = h[k] * h[k];
+ float element2 = h[k + 1] * h[k + 1];
+ if (element1 > max_element1) {
+ max_element1 = element1;
+ lag_estimate1 = k;
+ }
+ if (element2 > max_element2) {
+ max_element2 = element2;
+ lag_estimate2 = k + 1;
+ }
+ }
+ if (max_element2 > max_element1) {
+ max_element1 = max_element2;
+ lag_estimate1 = lag_estimate2;
+ }
+ // In case of odd h size, we have not yet checked the last element.
+ float last_element = h[last_index] * h[last_index];
+ if (last_element > max_element1) {
+ return last_index;
+ }
+ return lag_estimate1;
+}
+
+} // namespace aec3
+
+MatchedFilter::MatchedFilter(ApmDataDumper* data_dumper,
+ Aec3Optimization optimization,
+ size_t sub_block_size,
+ size_t window_size_sub_blocks,
+ int num_matched_filters,
+ size_t alignment_shift_sub_blocks,
+ float excitation_limit,
+ float smoothing_fast,
+ float smoothing_slow,
+ float matching_filter_threshold,
+ bool detect_pre_echo)
+ : data_dumper_(data_dumper),
+ optimization_(optimization),
+ sub_block_size_(sub_block_size),
+ filter_intra_lag_shift_(alignment_shift_sub_blocks * sub_block_size_),
+ filters_(
+ num_matched_filters,
+ std::vector<float>(window_size_sub_blocks * sub_block_size_, 0.f)),
+ filters_offsets_(num_matched_filters, 0),
+ excitation_limit_(excitation_limit),
+ smoothing_fast_(smoothing_fast),
+ smoothing_slow_(smoothing_slow),
+ matching_filter_threshold_(matching_filter_threshold),
+ detect_pre_echo_(detect_pre_echo) {
+ RTC_DCHECK(data_dumper);
+ RTC_DCHECK_LT(0, window_size_sub_blocks);
+ RTC_DCHECK((kBlockSize % sub_block_size) == 0);
+ RTC_DCHECK((sub_block_size % 4) == 0);
+ static_assert(kAccumulatedErrorSubSampleRate == 4);
+ if (detect_pre_echo_) {
+ accumulated_error_ = std::vector<std::vector<float>>(
+ num_matched_filters,
+ std::vector<float>(window_size_sub_blocks * sub_block_size_ /
+ kAccumulatedErrorSubSampleRate,
+ 1.0f));
+
+ instantaneous_accumulated_error_ =
+ std::vector<float>(window_size_sub_blocks * sub_block_size_ /
+ kAccumulatedErrorSubSampleRate,
+ 0.0f);
+ scratch_memory_ =
+ std::vector<float>(window_size_sub_blocks * sub_block_size_);
+ }
+}
+
+MatchedFilter::~MatchedFilter() = default;
+
+void MatchedFilter::Reset() {
+ for (auto& f : filters_) {
+ std::fill(f.begin(), f.end(), 0.f);
+ }
+
+ for (auto& e : accumulated_error_) {
+ std::fill(e.begin(), e.end(), 1.0f);
+ }
+
+ winner_lag_ = absl::nullopt;
+ reported_lag_estimate_ = absl::nullopt;
+}
+
+void MatchedFilter::Update(const DownsampledRenderBuffer& render_buffer,
+ rtc::ArrayView<const float> capture,
+ bool use_slow_smoothing) {
+ RTC_DCHECK_EQ(sub_block_size_, capture.size());
+ auto& y = capture;
+
+ const float smoothing =
+ use_slow_smoothing ? smoothing_slow_ : smoothing_fast_;
+
+ const float x2_sum_threshold =
+ filters_[0].size() * excitation_limit_ * excitation_limit_;
+
+ // Compute anchor for the matched filter error.
+ float error_sum_anchor = 0.0f;
+ for (size_t k = 0; k < y.size(); ++k) {
+ error_sum_anchor += y[k] * y[k];
+ }
+
+ // Apply all matched filters.
+ float winner_error_sum = error_sum_anchor;
+ winner_lag_ = absl::nullopt;
+ reported_lag_estimate_ = absl::nullopt;
+ size_t alignment_shift = 0;
+ absl::optional<size_t> previous_lag_estimate;
+ const int num_filters = static_cast<int>(filters_.size());
+ int winner_index = -1;
+ for (int n = 0; n < num_filters; ++n) {
+ float error_sum = 0.f;
+ bool filters_updated = false;
+ const bool compute_pre_echo =
+ detect_pre_echo_ && n == last_detected_best_lag_filter_;
+
+ size_t x_start_index =
+ (render_buffer.read + alignment_shift + sub_block_size_ - 1) %
+ render_buffer.buffer.size();
+
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2:
+ aec3::MatchedFilterCore_SSE2(
+ x_start_index, x2_sum_threshold, smoothing, render_buffer.buffer, y,
+ filters_[n], &filters_updated, &error_sum, compute_pre_echo,
+ instantaneous_accumulated_error_, scratch_memory_);
+ break;
+ case Aec3Optimization::kAvx2:
+ aec3::MatchedFilterCore_AVX2(
+ x_start_index, x2_sum_threshold, smoothing, render_buffer.buffer, y,
+ filters_[n], &filters_updated, &error_sum, compute_pre_echo,
+ instantaneous_accumulated_error_, scratch_memory_);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon:
+ aec3::MatchedFilterCore_NEON(
+ x_start_index, x2_sum_threshold, smoothing, render_buffer.buffer, y,
+ filters_[n], &filters_updated, &error_sum, compute_pre_echo,
+ instantaneous_accumulated_error_, scratch_memory_);
+ break;
+#endif
+ default:
+ aec3::MatchedFilterCore(x_start_index, x2_sum_threshold, smoothing,
+ render_buffer.buffer, y, filters_[n],
+ &filters_updated, &error_sum, compute_pre_echo,
+ instantaneous_accumulated_error_);
+ }
+
+ // Estimate the lag in the matched filter as the distance to the portion in
+ // the filter that contributes the most to the matched filter output. This
+ // is detected as the peak of the matched filter.
+ const size_t lag_estimate = aec3::MaxSquarePeakIndex(filters_[n]);
+ const bool reliable =
+ lag_estimate > 2 && lag_estimate < (filters_[n].size() - 10) &&
+ error_sum < matching_filter_threshold_ * error_sum_anchor;
+
+ // Find the best estimate
+ const size_t lag = lag_estimate + alignment_shift;
+ if (filters_updated && reliable && error_sum < winner_error_sum) {
+ winner_error_sum = error_sum;
+ winner_index = n;
+ // In case that 2 matched filters return the same winner candidate
+ // (overlap region), the one with the smaller index is chosen in order
+ // to search for pre-echoes.
+ if (previous_lag_estimate && previous_lag_estimate == lag) {
+ winner_lag_ = previous_lag_estimate;
+ winner_index = n - 1;
+ } else {
+ winner_lag_ = lag;
+ }
+ }
+ previous_lag_estimate = lag;
+ alignment_shift += filter_intra_lag_shift_;
+ }
+
+ if (winner_index != -1) {
+ RTC_DCHECK(winner_lag_.has_value());
+ reported_lag_estimate_ =
+ LagEstimate(winner_lag_.value(), /*pre_echo_lag=*/winner_lag_.value());
+ if (detect_pre_echo_ && last_detected_best_lag_filter_ == winner_index) {
+ if (error_sum_anchor > 30.0f * 30.0f * y.size()) {
+ UpdateAccumulatedError(instantaneous_accumulated_error_,
+ accumulated_error_[winner_index],
+ 1.0f / error_sum_anchor);
+ }
+ reported_lag_estimate_->pre_echo_lag = ComputePreEchoLag(
+ accumulated_error_[winner_index], winner_lag_.value(),
+ winner_index * filter_intra_lag_shift_ /*alignment_shift_winner*/);
+ }
+ last_detected_best_lag_filter_ = winner_index;
+ }
+ if (ApmDataDumper::IsAvailable()) {
+ Dump();
+ }
+}
+
+void MatchedFilter::LogFilterProperties(int sample_rate_hz,
+ size_t shift,
+ size_t downsampling_factor) const {
+ size_t alignment_shift = 0;
+ constexpr int kFsBy1000 = 16;
+ for (size_t k = 0; k < filters_.size(); ++k) {
+ int start = static_cast<int>(alignment_shift * downsampling_factor);
+ int end = static_cast<int>((alignment_shift + filters_[k].size()) *
+ downsampling_factor);
+ RTC_LOG(LS_VERBOSE) << "Filter " << k << ": start: "
+ << (start - static_cast<int>(shift)) / kFsBy1000
+ << " ms, end: "
+ << (end - static_cast<int>(shift)) / kFsBy1000
+ << " ms.";
+ alignment_shift += filter_intra_lag_shift_;
+ }
+}
+
+void MatchedFilter::Dump() {
+ for (size_t n = 0; n < filters_.size(); ++n) {
+ const size_t lag_estimate = aec3::MaxSquarePeakIndex(filters_[n]);
+ std::string dumper_filter = "aec3_correlator_" + std::to_string(n) + "_h";
+ data_dumper_->DumpRaw(dumper_filter.c_str(), filters_[n]);
+ std::string dumper_lag = "aec3_correlator_lag_" + std::to_string(n);
+ data_dumper_->DumpRaw(dumper_lag.c_str(),
+ lag_estimate + n * filter_intra_lag_shift_);
+ if (detect_pre_echo_) {
+ std::string dumper_error =
+ "aec3_correlator_error_" + std::to_string(n) + "_h";
+ data_dumper_->DumpRaw(dumper_error.c_str(), accumulated_error_[n]);
+
+ size_t pre_echo_lag = ComputePreEchoLag(
+ accumulated_error_[n], lag_estimate + n * filter_intra_lag_shift_,
+ n * filter_intra_lag_shift_);
+ std::string dumper_pre_lag =
+ "aec3_correlator_pre_echo_lag_" + std::to_string(n);
+ data_dumper_->DumpRaw(dumper_pre_lag.c_str(), pre_echo_lag);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.h b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.h
new file mode 100644
index 0000000000..760d5e39fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct DownsampledRenderBuffer;
+
+namespace aec3 {
+
+#if defined(WEBRTC_HAS_NEON)
+
+// Filter core for the matched filter that is optimized for NEON.
+void MatchedFilterCore_NEON(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulation_error,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory);
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+// Filter core for the matched filter that is optimized for SSE2.
+void MatchedFilterCore_SSE2(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulated_error,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory);
+
+// Filter core for the matched filter that is optimized for AVX2.
+void MatchedFilterCore_AVX2(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulated_error,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory);
+
+#endif
+
+// Filter core for the matched filter.
+void MatchedFilterCore(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulation_error,
+ rtc::ArrayView<float> accumulated_error);
+
+// Find largest peak of squared values in array.
+size_t MaxSquarePeakIndex(rtc::ArrayView<const float> h);
+
+} // namespace aec3
+
+// Produces recursively updated cross-correlation estimates for several signal
+// shifts where the intra-shift spacing is uniform.
+class MatchedFilter {
+ public:
+ // Stores properties for the lag estimate corresponding to a particular signal
+ // shift.
+ struct LagEstimate {
+ LagEstimate() = default;
+ LagEstimate(size_t lag, size_t pre_echo_lag)
+ : lag(lag), pre_echo_lag(pre_echo_lag) {}
+ size_t lag = 0;
+ size_t pre_echo_lag = 0;
+ };
+
+ MatchedFilter(ApmDataDumper* data_dumper,
+ Aec3Optimization optimization,
+ size_t sub_block_size,
+ size_t window_size_sub_blocks,
+ int num_matched_filters,
+ size_t alignment_shift_sub_blocks,
+ float excitation_limit,
+ float smoothing_fast,
+ float smoothing_slow,
+ float matching_filter_threshold,
+ bool detect_pre_echo);
+
+ MatchedFilter() = delete;
+ MatchedFilter(const MatchedFilter&) = delete;
+ MatchedFilter& operator=(const MatchedFilter&) = delete;
+
+ ~MatchedFilter();
+
+ // Updates the correlation with the values in the capture buffer.
+ void Update(const DownsampledRenderBuffer& render_buffer,
+ rtc::ArrayView<const float> capture,
+ bool use_slow_smoothing);
+
+ // Resets the matched filter.
+ void Reset();
+
+ // Returns the current lag estimates.
+ absl::optional<const MatchedFilter::LagEstimate> GetBestLagEstimate() const {
+ return reported_lag_estimate_;
+ }
+
+ // Returns the maximum filter lag.
+ size_t GetMaxFilterLag() const {
+ return filters_.size() * filter_intra_lag_shift_ + filters_[0].size();
+ }
+
+ // Log matched filter properties.
+ void LogFilterProperties(int sample_rate_hz,
+ size_t shift,
+ size_t downsampling_factor) const;
+
+ private:
+ void Dump();
+
+ ApmDataDumper* const data_dumper_;
+ const Aec3Optimization optimization_;
+ const size_t sub_block_size_;
+ const size_t filter_intra_lag_shift_;
+ std::vector<std::vector<float>> filters_;
+ std::vector<std::vector<float>> accumulated_error_;
+ std::vector<float> instantaneous_accumulated_error_;
+ std::vector<float> scratch_memory_;
+ absl::optional<MatchedFilter::LagEstimate> reported_lag_estimate_;
+ absl::optional<size_t> winner_lag_;
+ int last_detected_best_lag_filter_ = -1;
+ std::vector<size_t> filters_offsets_;
+ const float excitation_limit_;
+ const float smoothing_fast_;
+ const float smoothing_slow_;
+ const float matching_filter_threshold_;
+ const bool detect_pre_echo_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_avx2.cc b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_avx2.cc
new file mode 100644
index 0000000000..8c2ffcbd1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_avx2.cc
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <immintrin.h>
+
+#include "modules/audio_processing/aec3/matched_filter.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace aec3 {
+
+// Let ha denote the horizontal of a, and hb the horizontal sum of b
+// returns [ha, hb, ha, hb]
+inline __m128 hsum_ab(__m256 a, __m256 b) {
+ __m256 s_256 = _mm256_hadd_ps(a, b);
+ const __m256i mask = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
+ s_256 = _mm256_permutevar8x32_ps(s_256, mask);
+ __m128 s = _mm_hadd_ps(_mm256_extractf128_ps(s_256, 0),
+ _mm256_extractf128_ps(s_256, 1));
+ s = _mm_hadd_ps(s, s);
+ return s;
+}
+
+void MatchedFilterCore_AccumulatedError_AVX2(
+ size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory) {
+ const int h_size = static_cast<int>(h.size());
+ const int x_size = static_cast<int>(x.size());
+ RTC_DCHECK_EQ(0, h_size % 16);
+ std::fill(accumulated_error.begin(), accumulated_error.end(), 0.0f);
+
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+ RTC_DCHECK_GT(x_size, x_start_index);
+ const int chunk1 =
+ std::min(h_size, static_cast<int>(x_size - x_start_index));
+ if (chunk1 != h_size) {
+ const int chunk2 = h_size - chunk1;
+ std::copy(x.begin() + x_start_index, x.end(), scratch_memory.begin());
+ std::copy(x.begin(), x.begin() + chunk2, scratch_memory.begin() + chunk1);
+ }
+ const float* x_p =
+ chunk1 != h_size ? scratch_memory.data() : &x[x_start_index];
+ const float* h_p = &h[0];
+ float* a_p = &accumulated_error[0];
+ __m256 s_inst_hadd_256;
+ __m256 s_inst_256;
+ __m256 s_inst_256_8;
+ __m256 x2_sum_256 = _mm256_set1_ps(0);
+ __m256 x2_sum_256_8 = _mm256_set1_ps(0);
+ __m128 e_128;
+ float x2_sum = 0.0f;
+ float s_acum = 0;
+ const int limit_by_16 = h_size >> 4;
+ for (int k = limit_by_16; k > 0; --k, h_p += 16, x_p += 16, a_p += 4) {
+ // Load the data into 256 bit vectors.
+ __m256 x_k = _mm256_loadu_ps(x_p);
+ __m256 h_k = _mm256_loadu_ps(h_p);
+ __m256 x_k_8 = _mm256_loadu_ps(x_p + 8);
+ __m256 h_k_8 = _mm256_loadu_ps(h_p + 8);
+ // Compute and accumulate x * x and h * x.
+ x2_sum_256 = _mm256_fmadd_ps(x_k, x_k, x2_sum_256);
+ x2_sum_256_8 = _mm256_fmadd_ps(x_k_8, x_k_8, x2_sum_256_8);
+ s_inst_256 = _mm256_mul_ps(h_k, x_k);
+ s_inst_256_8 = _mm256_mul_ps(h_k_8, x_k_8);
+ s_inst_hadd_256 = _mm256_hadd_ps(s_inst_256, s_inst_256_8);
+ s_inst_hadd_256 = _mm256_hadd_ps(s_inst_hadd_256, s_inst_hadd_256);
+ s_acum += s_inst_hadd_256[0];
+ e_128[0] = s_acum - y[i];
+ s_acum += s_inst_hadd_256[4];
+ e_128[1] = s_acum - y[i];
+ s_acum += s_inst_hadd_256[1];
+ e_128[2] = s_acum - y[i];
+ s_acum += s_inst_hadd_256[5];
+ e_128[3] = s_acum - y[i];
+
+ __m128 accumulated_error = _mm_load_ps(a_p);
+ accumulated_error = _mm_fmadd_ps(e_128, e_128, accumulated_error);
+ _mm_storeu_ps(a_p, accumulated_error);
+ }
+ // Sum components together.
+ x2_sum_256 = _mm256_add_ps(x2_sum_256, x2_sum_256_8);
+ __m128 x2_sum_128 = _mm_add_ps(_mm256_extractf128_ps(x2_sum_256, 0),
+ _mm256_extractf128_ps(x2_sum_256, 1));
+ // Combine the accumulated vector and scalar values.
+ float* v = reinterpret_cast<float*>(&x2_sum_128);
+ x2_sum += v[0] + v[1] + v[2] + v[3];
+
+ // Compute the matched filter error.
+ float e = y[i] - s_acum;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+ const __m256 alpha_256 = _mm256_set1_ps(alpha);
+
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ float* h_p = &h[0];
+ const float* x_p =
+ chunk1 != h_size ? scratch_memory.data() : &x[x_start_index];
+ // Perform 256 bit vector operations.
+ const int limit_by_8 = h_size >> 3;
+ for (int k = limit_by_8; k > 0; --k, h_p += 8, x_p += 8) {
+ // Load the data into 256 bit vectors.
+ __m256 h_k = _mm256_loadu_ps(h_p);
+ __m256 x_k = _mm256_loadu_ps(x_p);
+ // Compute h = h + alpha * x.
+ h_k = _mm256_fmadd_ps(x_k, alpha_256, h_k);
+
+ // Store the result.
+ _mm256_storeu_ps(h_p, h_k);
+ }
+ *filters_updated = true;
+ }
+
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+ }
+}
+
+void MatchedFilterCore_AVX2(size_t x_start_index,
+ float x2_sum_threshold,
+ float smoothing,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> h,
+ bool* filters_updated,
+ float* error_sum,
+ bool compute_accumulated_error,
+ rtc::ArrayView<float> accumulated_error,
+ rtc::ArrayView<float> scratch_memory) {
+ if (compute_accumulated_error) {
+ return MatchedFilterCore_AccumulatedError_AVX2(
+ x_start_index, x2_sum_threshold, smoothing, x, y, h, filters_updated,
+ error_sum, accumulated_error, scratch_memory);
+ }
+ const int h_size = static_cast<int>(h.size());
+ const int x_size = static_cast<int>(x.size());
+ RTC_DCHECK_EQ(0, h_size % 8);
+
+ // Process for all samples in the sub-block.
+ for (size_t i = 0; i < y.size(); ++i) {
+ // Apply the matched filter as filter * x, and compute x * x.
+
+ RTC_DCHECK_GT(x_size, x_start_index);
+ const float* x_p = &x[x_start_index];
+ const float* h_p = &h[0];
+
+ // Initialize values for the accumulation.
+ __m256 s_256 = _mm256_set1_ps(0);
+ __m256 s_256_8 = _mm256_set1_ps(0);
+ __m256 x2_sum_256 = _mm256_set1_ps(0);
+ __m256 x2_sum_256_8 = _mm256_set1_ps(0);
+ float x2_sum = 0.f;
+ float s = 0;
+
+ // Compute loop chunk sizes until, and after, the wraparound of the circular
+ // buffer for x.
+ const int chunk1 =
+ std::min(h_size, static_cast<int>(x_size - x_start_index));
+
+ // Perform the loop in two chunks.
+ const int chunk2 = h_size - chunk1;
+ for (int limit : {chunk1, chunk2}) {
+ // Perform 256 bit vector operations.
+ const int limit_by_16 = limit >> 4;
+ for (int k = limit_by_16; k > 0; --k, h_p += 16, x_p += 16) {
+ // Load the data into 256 bit vectors.
+ __m256 x_k = _mm256_loadu_ps(x_p);
+ __m256 h_k = _mm256_loadu_ps(h_p);
+ __m256 x_k_8 = _mm256_loadu_ps(x_p + 8);
+ __m256 h_k_8 = _mm256_loadu_ps(h_p + 8);
+ // Compute and accumulate x * x and h * x.
+ x2_sum_256 = _mm256_fmadd_ps(x_k, x_k, x2_sum_256);
+ x2_sum_256_8 = _mm256_fmadd_ps(x_k_8, x_k_8, x2_sum_256_8);
+ s_256 = _mm256_fmadd_ps(h_k, x_k, s_256);
+ s_256_8 = _mm256_fmadd_ps(h_k_8, x_k_8, s_256_8);
+ }
+
+ // Perform non-vector operations for any remaining items.
+ for (int k = limit - limit_by_16 * 16; k > 0; --k, ++h_p, ++x_p) {
+ const float x_k = *x_p;
+ x2_sum += x_k * x_k;
+ s += *h_p * x_k;
+ }
+
+ x_p = &x[0];
+ }
+
+ // Sum components together.
+ x2_sum_256 = _mm256_add_ps(x2_sum_256, x2_sum_256_8);
+ s_256 = _mm256_add_ps(s_256, s_256_8);
+ __m128 sum = hsum_ab(x2_sum_256, s_256);
+ x2_sum += sum[0];
+ s += sum[1];
+
+ // Compute the matched filter error.
+ float e = y[i] - s;
+ const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f;
+ (*error_sum) += e * e;
+
+ // Update the matched filter estimate in an NLMS manner.
+ if (x2_sum > x2_sum_threshold && !saturation) {
+ RTC_DCHECK_LT(0.f, x2_sum);
+ const float alpha = smoothing * e / x2_sum;
+ const __m256 alpha_256 = _mm256_set1_ps(alpha);
+
+ // filter = filter + smoothing * (y - filter * x) * x / x * x.
+ float* h_p = &h[0];
+ x_p = &x[x_start_index];
+
+ // Perform the loop in two chunks.
+ for (int limit : {chunk1, chunk2}) {
+ // Perform 256 bit vector operations.
+ const int limit_by_8 = limit >> 3;
+ for (int k = limit_by_8; k > 0; --k, h_p += 8, x_p += 8) {
+ // Load the data into 256 bit vectors.
+ __m256 h_k = _mm256_loadu_ps(h_p);
+ __m256 x_k = _mm256_loadu_ps(x_p);
+ // Compute h = h + alpha * x.
+ h_k = _mm256_fmadd_ps(x_k, alpha_256, h_k);
+
+ // Store the result.
+ _mm256_storeu_ps(h_p, h_k);
+ }
+
+ // Perform non-vector operations for any remaining items.
+ for (int k = limit - limit_by_8 * 8; k > 0; --k, ++h_p, ++x_p) {
+ *h_p += alpha * *x_p;
+ }
+
+ x_p = &x[0];
+ }
+
+ *filters_updated = true;
+ }
+
+ x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+ }
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_gn/moz.build
new file mode 100644
index 0000000000..0892a4f3f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("matched_filter_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc
new file mode 100644
index 0000000000..17f517a001
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+int GetDownSamplingBlockSizeLog2(int down_sampling_factor) {
+ int down_sampling_factor_log2 = 0;
+ down_sampling_factor >>= 1;
+ while (down_sampling_factor > 0) {
+ down_sampling_factor_log2++;
+ down_sampling_factor >>= 1;
+ }
+ return static_cast<int>(kBlockSizeLog2) > down_sampling_factor_log2
+ ? static_cast<int>(kBlockSizeLog2) - down_sampling_factor_log2
+ : 0;
+}
+} // namespace
+
+MatchedFilterLagAggregator::MatchedFilterLagAggregator(
+ ApmDataDumper* data_dumper,
+ size_t max_filter_lag,
+ const EchoCanceller3Config::Delay& delay_config)
+ : data_dumper_(data_dumper),
+ thresholds_(delay_config.delay_selection_thresholds),
+ headroom_(static_cast<int>(delay_config.delay_headroom_samples /
+ delay_config.down_sampling_factor)),
+ highest_peak_aggregator_(max_filter_lag) {
+ if (delay_config.detect_pre_echo) {
+ pre_echo_lag_aggregator_ = std::make_unique<PreEchoLagAggregator>(
+ max_filter_lag, delay_config.down_sampling_factor);
+ }
+ RTC_DCHECK(data_dumper);
+ RTC_DCHECK_LE(thresholds_.initial, thresholds_.converged);
+}
+
+MatchedFilterLagAggregator::~MatchedFilterLagAggregator() = default;
+
+void MatchedFilterLagAggregator::Reset(bool hard_reset) {
+ highest_peak_aggregator_.Reset();
+ if (pre_echo_lag_aggregator_ != nullptr) {
+ pre_echo_lag_aggregator_->Reset();
+ }
+ if (hard_reset) {
+ significant_candidate_found_ = false;
+ }
+}
+
+absl::optional<DelayEstimate> MatchedFilterLagAggregator::Aggregate(
+ const absl::optional<const MatchedFilter::LagEstimate>& lag_estimate) {
+ if (lag_estimate && pre_echo_lag_aggregator_) {
+ pre_echo_lag_aggregator_->Dump(data_dumper_);
+ pre_echo_lag_aggregator_->Aggregate(
+ std::max(0, static_cast<int>(lag_estimate->pre_echo_lag) - headroom_));
+ }
+
+ if (lag_estimate) {
+ highest_peak_aggregator_.Aggregate(
+ std::max(0, static_cast<int>(lag_estimate->lag) - headroom_));
+ rtc::ArrayView<const int> histogram = highest_peak_aggregator_.histogram();
+ int candidate = highest_peak_aggregator_.candidate();
+ significant_candidate_found_ = significant_candidate_found_ ||
+ histogram[candidate] > thresholds_.converged;
+ if (histogram[candidate] > thresholds_.converged ||
+ (histogram[candidate] > thresholds_.initial &&
+ !significant_candidate_found_)) {
+ DelayEstimate::Quality quality = significant_candidate_found_
+ ? DelayEstimate::Quality::kRefined
+ : DelayEstimate::Quality::kCoarse;
+ int reported_delay = pre_echo_lag_aggregator_ != nullptr
+ ? pre_echo_lag_aggregator_->pre_echo_candidate()
+ : candidate;
+ return DelayEstimate(quality, reported_delay);
+ }
+ }
+
+ return absl::nullopt;
+}
+
+MatchedFilterLagAggregator::HighestPeakAggregator::HighestPeakAggregator(
+ size_t max_filter_lag)
+ : histogram_(max_filter_lag + 1, 0) {
+ histogram_data_.fill(0);
+}
+
+void MatchedFilterLagAggregator::HighestPeakAggregator::Reset() {
+ std::fill(histogram_.begin(), histogram_.end(), 0);
+ histogram_data_.fill(0);
+ histogram_data_index_ = 0;
+}
+
+void MatchedFilterLagAggregator::HighestPeakAggregator::Aggregate(int lag) {
+ RTC_DCHECK_GT(histogram_.size(), histogram_data_[histogram_data_index_]);
+ RTC_DCHECK_LE(0, histogram_data_[histogram_data_index_]);
+ --histogram_[histogram_data_[histogram_data_index_]];
+ histogram_data_[histogram_data_index_] = lag;
+ RTC_DCHECK_GT(histogram_.size(), histogram_data_[histogram_data_index_]);
+ RTC_DCHECK_LE(0, histogram_data_[histogram_data_index_]);
+ ++histogram_[histogram_data_[histogram_data_index_]];
+ histogram_data_index_ = (histogram_data_index_ + 1) % histogram_data_.size();
+ candidate_ =
+ std::distance(histogram_.begin(),
+ std::max_element(histogram_.begin(), histogram_.end()));
+}
+
+MatchedFilterLagAggregator::PreEchoLagAggregator::PreEchoLagAggregator(
+ size_t max_filter_lag,
+ size_t down_sampling_factor)
+ : block_size_log2_(GetDownSamplingBlockSizeLog2(down_sampling_factor)),
+ histogram_(
+ ((max_filter_lag + 1) * down_sampling_factor) >> kBlockSizeLog2,
+ 0) {
+ Reset();
+}
+
+void MatchedFilterLagAggregator::PreEchoLagAggregator::Reset() {
+ std::fill(histogram_.begin(), histogram_.end(), 0);
+ histogram_data_.fill(0);
+ histogram_data_index_ = 0;
+ pre_echo_candidate_ = 0;
+}
+
+void MatchedFilterLagAggregator::PreEchoLagAggregator::Aggregate(
+ int pre_echo_lag) {
+ int pre_echo_block_size = pre_echo_lag >> block_size_log2_;
+ RTC_DCHECK(pre_echo_block_size >= 0 &&
+ pre_echo_block_size < static_cast<int>(histogram_.size()));
+ pre_echo_block_size =
+ rtc::SafeClamp(pre_echo_block_size, 0, histogram_.size() - 1);
+ if (histogram_[histogram_data_[histogram_data_index_]] > 0) {
+ --histogram_[histogram_data_[histogram_data_index_]];
+ }
+ histogram_data_[histogram_data_index_] = pre_echo_block_size;
+ ++histogram_[histogram_data_[histogram_data_index_]];
+ histogram_data_index_ = (histogram_data_index_ + 1) % histogram_data_.size();
+ int pre_echo_candidate_block_size =
+ std::distance(histogram_.begin(),
+ std::max_element(histogram_.begin(), histogram_.end()));
+ pre_echo_candidate_ = (pre_echo_candidate_block_size << block_size_log2_);
+}
+
+void MatchedFilterLagAggregator::PreEchoLagAggregator::Dump(
+ ApmDataDumper* const data_dumper) {
+ data_dumper->DumpRaw("aec3_pre_echo_delay_candidate", pre_echo_candidate_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.h b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.h
new file mode 100644
index 0000000000..c0598bf226
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/matched_filter.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Aggregates lag estimates produced by the MatchedFilter class into a single
+// reliable combined lag estimate.
+class MatchedFilterLagAggregator {
+ public:
+ MatchedFilterLagAggregator(ApmDataDumper* data_dumper,
+ size_t max_filter_lag,
+ const EchoCanceller3Config::Delay& delay_config);
+
+ MatchedFilterLagAggregator() = delete;
+ MatchedFilterLagAggregator(const MatchedFilterLagAggregator&) = delete;
+ MatchedFilterLagAggregator& operator=(const MatchedFilterLagAggregator&) =
+ delete;
+
+ ~MatchedFilterLagAggregator();
+
+ // Resets the aggregator.
+ void Reset(bool hard_reset);
+
+ // Aggregates the provided lag estimates.
+ absl::optional<DelayEstimate> Aggregate(
+ const absl::optional<const MatchedFilter::LagEstimate>& lag_estimate);
+
+ // Returns whether a reliable delay estimate has been found.
+ bool ReliableDelayFound() const { return significant_candidate_found_; }
+
+ // Returns the delay candidate that is computed by looking at the highest peak
+ // on the matched filters.
+ int GetDelayAtHighestPeak() const {
+ return highest_peak_aggregator_.candidate();
+ }
+
+ private:
+ class PreEchoLagAggregator {
+ public:
+ PreEchoLagAggregator(size_t max_filter_lag, size_t down_sampling_factor);
+ void Reset();
+ void Aggregate(int pre_echo_lag);
+ int pre_echo_candidate() const { return pre_echo_candidate_; }
+ void Dump(ApmDataDumper* const data_dumper);
+
+ private:
+ const int block_size_log2_;
+ std::array<int, 250> histogram_data_;
+ std::vector<int> histogram_;
+ int histogram_data_index_ = 0;
+ int pre_echo_candidate_ = 0;
+ };
+
+ class HighestPeakAggregator {
+ public:
+ explicit HighestPeakAggregator(size_t max_filter_lag);
+ void Reset();
+ void Aggregate(int lag);
+ int candidate() const { return candidate_; }
+ rtc::ArrayView<const int> histogram() const { return histogram_; }
+
+ private:
+ std::vector<int> histogram_;
+ std::array<int, 250> histogram_data_;
+ int histogram_data_index_ = 0;
+ int candidate_ = -1;
+ };
+
+ ApmDataDumper* const data_dumper_;
+ bool significant_candidate_found_ = false;
+ const EchoCanceller3Config::Delay::DelaySelectionThresholds thresholds_;
+ const int headroom_;
+ HighestPeakAggregator highest_peak_aggregator_;
+ std::unique_ptr<PreEchoLagAggregator> pre_echo_lag_aggregator_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc
new file mode 100644
index 0000000000..6804102584
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h"
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kNumLagsBeforeDetection = 26;
+
+} // namespace
+
+// Verifies that varying lag estimates causes lag estimates to not be deemed
+// reliable.
+TEST(MatchedFilterLagAggregator,
+ LagEstimateInvarianceRequiredForAggregatedLag) {
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ MatchedFilterLagAggregator aggregator(&data_dumper, /*max_filter_lag=*/100,
+ config.delay);
+
+ absl::optional<DelayEstimate> aggregated_lag;
+ for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) {
+ aggregated_lag = aggregator.Aggregate(
+ MatchedFilter::LagEstimate(/*lag=*/10, /*pre_echo_lag=*/10));
+ }
+ EXPECT_TRUE(aggregated_lag);
+
+ for (size_t k = 0; k < kNumLagsBeforeDetection * 100; ++k) {
+ aggregated_lag = aggregator.Aggregate(
+ MatchedFilter::LagEstimate(/*lag=*/k % 100, /*pre_echo_lag=*/k % 100));
+ }
+ EXPECT_FALSE(aggregated_lag);
+
+ for (size_t k = 0; k < kNumLagsBeforeDetection * 100; ++k) {
+ aggregated_lag = aggregator.Aggregate(
+ MatchedFilter::LagEstimate(/*lag=*/k % 100, /*pre_echo_lag=*/k % 100));
+ EXPECT_FALSE(aggregated_lag);
+ }
+}
+
+// Verifies that lag estimate updates are required to produce an updated lag
+// aggregate.
+TEST(MatchedFilterLagAggregator,
+ DISABLED_LagEstimateUpdatesRequiredForAggregatedLag) {
+ constexpr size_t kLag = 5;
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ MatchedFilterLagAggregator aggregator(&data_dumper, /*max_filter_lag=*/kLag,
+ config.delay);
+ for (size_t k = 0; k < kNumLagsBeforeDetection * 10; ++k) {
+ absl::optional<DelayEstimate> aggregated_lag = aggregator.Aggregate(
+ MatchedFilter::LagEstimate(/*lag=*/kLag, /*pre_echo_lag=*/kLag));
+ EXPECT_FALSE(aggregated_lag);
+ EXPECT_EQ(kLag, aggregated_lag->delay);
+ }
+}
+
+// Verifies that an aggregated lag is persistent if the lag estimates do not
+// change and that an aggregated lag is not produced without gaining lag
+// estimate confidence.
+TEST(MatchedFilterLagAggregator, DISABLED_PersistentAggregatedLag) {
+ constexpr size_t kLag1 = 5;
+ constexpr size_t kLag2 = 10;
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ std::vector<MatchedFilter::LagEstimate> lag_estimates(1);
+ MatchedFilterLagAggregator aggregator(&data_dumper, std::max(kLag1, kLag2),
+ config.delay);
+ absl::optional<DelayEstimate> aggregated_lag;
+ for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) {
+ aggregated_lag = aggregator.Aggregate(
+ MatchedFilter::LagEstimate(/*lag=*/kLag1, /*pre_echo_lag=*/kLag1));
+ }
+ EXPECT_TRUE(aggregated_lag);
+ EXPECT_EQ(kLag1, aggregated_lag->delay);
+
+ for (size_t k = 0; k < kNumLagsBeforeDetection * 40; ++k) {
+ aggregated_lag = aggregator.Aggregate(
+ MatchedFilter::LagEstimate(/*lag=*/kLag2, /*pre_echo_lag=*/kLag2));
+ EXPECT_TRUE(aggregated_lag);
+ EXPECT_EQ(kLag1, aggregated_lag->delay);
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-null data dumper.
+TEST(MatchedFilterLagAggregatorDeathTest, NullDataDumper) {
+ EchoCanceller3Config config;
+ EXPECT_DEATH(MatchedFilterLagAggregator(nullptr, 10, config.delay), "");
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_unittest.cc
new file mode 100644
index 0000000000..b080308191
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/matched_filter_unittest.cc
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/matched_filter.h"
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <string>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+namespace {
+
+std::string ProduceDebugText(size_t delay, size_t down_sampling_factor) {
+ rtc::StringBuilder ss;
+ ss << "Delay: " << delay;
+ ss << ", Down sampling factor: " << down_sampling_factor;
+ return ss.Release();
+}
+
+constexpr size_t kNumMatchedFilters = 10;
+constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+constexpr size_t kWindowSizeSubBlocks = 32;
+constexpr size_t kAlignmentShiftSubBlocks = kWindowSizeSubBlocks * 3 / 4;
+
+} // namespace
+
+class MatchedFilterTest : public ::testing::TestWithParam<bool> {};
+
+#if defined(WEBRTC_HAS_NEON)
+// Verifies that the optimized methods for NEON are similar to their reference
+// counterparts.
+TEST_P(MatchedFilterTest, TestNeonOptimizations) {
+ Random random_generator(42U);
+ constexpr float kSmoothing = 0.7f;
+ const bool kComputeAccumulatederror = GetParam();
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+ std::vector<float> x(2000);
+ RandomizeSampleVector(&random_generator, x);
+ std::vector<float> y(sub_block_size);
+ std::vector<float> h_NEON(512);
+ std::vector<float> h(512);
+ std::vector<float> accumulated_error(512);
+ std::vector<float> accumulated_error_NEON(512);
+ std::vector<float> scratch_memory(512);
+
+ int x_index = 0;
+ for (int k = 0; k < 1000; ++k) {
+ RandomizeSampleVector(&random_generator, y);
+
+ bool filters_updated = false;
+ float error_sum = 0.f;
+ bool filters_updated_NEON = false;
+ float error_sum_NEON = 0.f;
+
+ MatchedFilterCore_NEON(x_index, h.size() * 150.f * 150.f, kSmoothing, x,
+ y, h_NEON, &filters_updated_NEON, &error_sum_NEON,
+ kComputeAccumulatederror, accumulated_error_NEON,
+ scratch_memory);
+
+ MatchedFilterCore(x_index, h.size() * 150.f * 150.f, kSmoothing, x, y, h,
+ &filters_updated, &error_sum, kComputeAccumulatederror,
+ accumulated_error);
+
+ EXPECT_EQ(filters_updated, filters_updated_NEON);
+ EXPECT_NEAR(error_sum, error_sum_NEON, error_sum / 100000.f);
+
+ for (size_t j = 0; j < h.size(); ++j) {
+ EXPECT_NEAR(h[j], h_NEON[j], 0.00001f);
+ }
+
+ if (kComputeAccumulatederror) {
+ for (size_t j = 0; j < accumulated_error.size(); ++j) {
+ float difference =
+ std::abs(accumulated_error[j] - accumulated_error_NEON[j]);
+ float relative_difference = accumulated_error[j] > 0
+ ? difference / accumulated_error[j]
+ : difference;
+ EXPECT_NEAR(relative_difference, 0.0f, 0.02f);
+ }
+ }
+
+ x_index = (x_index + sub_block_size) % x.size();
+ }
+ }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods for SSE2 are bitexact to their reference
+// counterparts.
+TEST_P(MatchedFilterTest, TestSse2Optimizations) {
+ const bool kComputeAccumulatederror = GetParam();
+ bool use_sse2 = (GetCPUInfo(kSSE2) != 0);
+ if (use_sse2) {
+ Random random_generator(42U);
+ constexpr float kSmoothing = 0.7f;
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+ std::vector<float> x(2000);
+ RandomizeSampleVector(&random_generator, x);
+ std::vector<float> y(sub_block_size);
+ std::vector<float> h_SSE2(512);
+ std::vector<float> h(512);
+ std::vector<float> accumulated_error(512 / 4);
+ std::vector<float> accumulated_error_SSE2(512 / 4);
+ std::vector<float> scratch_memory(512);
+ int x_index = 0;
+ for (int k = 0; k < 1000; ++k) {
+ RandomizeSampleVector(&random_generator, y);
+
+ bool filters_updated = false;
+ float error_sum = 0.f;
+ bool filters_updated_SSE2 = false;
+ float error_sum_SSE2 = 0.f;
+
+ MatchedFilterCore_SSE2(x_index, h.size() * 150.f * 150.f, kSmoothing, x,
+ y, h_SSE2, &filters_updated_SSE2,
+ &error_sum_SSE2, kComputeAccumulatederror,
+ accumulated_error_SSE2, scratch_memory);
+
+ MatchedFilterCore(x_index, h.size() * 150.f * 150.f, kSmoothing, x, y,
+ h, &filters_updated, &error_sum,
+ kComputeAccumulatederror, accumulated_error);
+
+ EXPECT_EQ(filters_updated, filters_updated_SSE2);
+ EXPECT_NEAR(error_sum, error_sum_SSE2, error_sum / 100000.f);
+
+ for (size_t j = 0; j < h.size(); ++j) {
+ EXPECT_NEAR(h[j], h_SSE2[j], 0.00001f);
+ }
+
+ for (size_t j = 0; j < accumulated_error.size(); ++j) {
+ float difference =
+ std::abs(accumulated_error[j] - accumulated_error_SSE2[j]);
+ float relative_difference = accumulated_error[j] > 0
+ ? difference / accumulated_error[j]
+ : difference;
+ EXPECT_NEAR(relative_difference, 0.0f, 0.00001f);
+ }
+
+ x_index = (x_index + sub_block_size) % x.size();
+ }
+ }
+ }
+}
+
+TEST_P(MatchedFilterTest, TestAvx2Optimizations) {
+ bool use_avx2 = (GetCPUInfo(kAVX2) != 0);
+ const bool kComputeAccumulatederror = GetParam();
+ if (use_avx2) {
+ Random random_generator(42U);
+ constexpr float kSmoothing = 0.7f;
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+ std::vector<float> x(2000);
+ RandomizeSampleVector(&random_generator, x);
+ std::vector<float> y(sub_block_size);
+ std::vector<float> h_AVX2(512);
+ std::vector<float> h(512);
+ std::vector<float> accumulated_error(512 / 4);
+ std::vector<float> accumulated_error_AVX2(512 / 4);
+ std::vector<float> scratch_memory(512);
+ int x_index = 0;
+ for (int k = 0; k < 1000; ++k) {
+ RandomizeSampleVector(&random_generator, y);
+ bool filters_updated = false;
+ float error_sum = 0.f;
+ bool filters_updated_AVX2 = false;
+ float error_sum_AVX2 = 0.f;
+ MatchedFilterCore_AVX2(x_index, h.size() * 150.f * 150.f, kSmoothing, x,
+ y, h_AVX2, &filters_updated_AVX2,
+ &error_sum_AVX2, kComputeAccumulatederror,
+ accumulated_error_AVX2, scratch_memory);
+ MatchedFilterCore(x_index, h.size() * 150.f * 150.f, kSmoothing, x, y,
+ h, &filters_updated, &error_sum,
+ kComputeAccumulatederror, accumulated_error);
+ EXPECT_EQ(filters_updated, filters_updated_AVX2);
+ EXPECT_NEAR(error_sum, error_sum_AVX2, error_sum / 100000.f);
+ for (size_t j = 0; j < h.size(); ++j) {
+ EXPECT_NEAR(h[j], h_AVX2[j], 0.00001f);
+ }
+ for (size_t j = 0; j < accumulated_error.size(); j += 4) {
+ float difference =
+ std::abs(accumulated_error[j] - accumulated_error_AVX2[j]);
+ float relative_difference = accumulated_error[j] > 0
+ ? difference / accumulated_error[j]
+ : difference;
+ EXPECT_NEAR(relative_difference, 0.0f, 0.00001f);
+ }
+ x_index = (x_index + sub_block_size) % x.size();
+ }
+ }
+ }
+}
+
+#endif
+
+// Verifies that the (optimized) function MaxSquarePeakIndex() produces output
+// equal to the corresponding std-functions.
+TEST(MatchedFilter, MaxSquarePeakIndex) {
+ Random random_generator(42U);
+ constexpr int kMaxLength = 128;
+ constexpr int kNumIterationsPerLength = 256;
+ for (int length = 1; length < kMaxLength; ++length) {
+ std::vector<float> y(length);
+ for (int i = 0; i < kNumIterationsPerLength; ++i) {
+ RandomizeSampleVector(&random_generator, y);
+
+ size_t lag_from_function = MaxSquarePeakIndex(y);
+ size_t lag_from_std = std::distance(
+ y.begin(),
+ std::max_element(y.begin(), y.end(), [](float a, float b) -> bool {
+ return a * a < b * b;
+ }));
+ EXPECT_EQ(lag_from_function, lag_from_std);
+ }
+ }
+}
+
+// Verifies that the matched filter produces proper lag estimates for
+// artificially delayed signals.
+TEST_P(MatchedFilterTest, LagEstimation) {
+ const bool kDetectPreEcho = GetParam();
+ Random random_generator(42U);
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+ Block render(kNumBands, kNumChannels);
+ std::vector<std::vector<float>> capture(
+ 1, std::vector<float>(kBlockSize, 0.f));
+ ApmDataDumper data_dumper(0);
+ for (size_t delay_samples : {5, 64, 150, 200, 800, 1000}) {
+ SCOPED_TRACE(ProduceDebugText(delay_samples, down_sampling_factor));
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = kNumMatchedFilters;
+ Decimator capture_decimator(down_sampling_factor);
+ DelayBuffer<float> signal_delay_buffer(down_sampling_factor *
+ delay_samples);
+ MatchedFilter filter(
+ &data_dumper, DetectOptimization(), sub_block_size,
+ kWindowSizeSubBlocks, kNumMatchedFilters, kAlignmentShiftSubBlocks,
+ 150, config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold, kDetectPreEcho);
+
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumChannels));
+
+ // Analyze the correlation between render and capture.
+ for (size_t k = 0; k < (600 + delay_samples / sub_block_size); ++k) {
+ for (size_t band = 0; band < kNumBands; ++band) {
+ for (size_t channel = 0; channel < kNumChannels; ++channel) {
+ RandomizeSampleVector(&random_generator,
+ render.View(band, channel));
+ }
+ }
+ signal_delay_buffer.Delay(render.View(/*band=*/0, /*channel=*/0),
+ capture[0]);
+ render_delay_buffer->Insert(render);
+
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+
+ render_delay_buffer->PrepareCaptureProcessing();
+ std::array<float, kBlockSize> downsampled_capture_data;
+ rtc::ArrayView<float> downsampled_capture(
+ downsampled_capture_data.data(), sub_block_size);
+ capture_decimator.Decimate(capture[0], downsampled_capture);
+ filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(),
+ downsampled_capture, /*use_slow_smoothing=*/false);
+ }
+
+ // Obtain the lag estimates.
+ auto lag_estimate = filter.GetBestLagEstimate();
+ EXPECT_TRUE(lag_estimate.has_value());
+
+ // Verify that the expected most accurate lag estimate is correct.
+ if (lag_estimate.has_value()) {
+ EXPECT_EQ(delay_samples, lag_estimate->lag);
+ EXPECT_EQ(delay_samples, lag_estimate->pre_echo_lag);
+ }
+ }
+ }
+}
+
+// Test the pre echo estimation.
+TEST_P(MatchedFilterTest, PreEchoEstimation) {
+ const bool kDetectPreEcho = GetParam();
+ Random random_generator(42U);
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+ Block render(kNumBands, kNumChannels);
+ std::vector<std::vector<float>> capture(
+ 1, std::vector<float>(kBlockSize, 0.f));
+ std::vector<float> capture_with_pre_echo(kBlockSize, 0.f);
+ ApmDataDumper data_dumper(0);
+ // data_dumper.SetActivated(true);
+ size_t pre_echo_delay_samples = 20e-3 * 16000 / down_sampling_factor;
+ size_t echo_delay_samples = 50e-3 * 16000 / down_sampling_factor;
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = kNumMatchedFilters;
+ Decimator capture_decimator(down_sampling_factor);
+ DelayBuffer<float> signal_echo_delay_buffer(down_sampling_factor *
+ echo_delay_samples);
+ DelayBuffer<float> signal_pre_echo_delay_buffer(down_sampling_factor *
+ pre_echo_delay_samples);
+ MatchedFilter filter(
+ &data_dumper, DetectOptimization(), sub_block_size,
+ kWindowSizeSubBlocks, kNumMatchedFilters, kAlignmentShiftSubBlocks, 150,
+ config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold, kDetectPreEcho);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumChannels));
+ // Analyze the correlation between render and capture.
+ for (size_t k = 0; k < (600 + echo_delay_samples / sub_block_size); ++k) {
+ for (size_t band = 0; band < kNumBands; ++band) {
+ for (size_t channel = 0; channel < kNumChannels; ++channel) {
+ RandomizeSampleVector(&random_generator, render.View(band, channel));
+ }
+ }
+ signal_echo_delay_buffer.Delay(render.View(0, 0), capture[0]);
+ signal_pre_echo_delay_buffer.Delay(render.View(0, 0),
+ capture_with_pre_echo);
+ for (size_t k = 0; k < capture[0].size(); ++k) {
+ constexpr float gain_pre_echo = 0.8f;
+ capture[0][k] += gain_pre_echo * capture_with_pre_echo[k];
+ }
+ render_delay_buffer->Insert(render);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ std::array<float, kBlockSize> downsampled_capture_data;
+ rtc::ArrayView<float> downsampled_capture(downsampled_capture_data.data(),
+ sub_block_size);
+ capture_decimator.Decimate(capture[0], downsampled_capture);
+ filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(),
+ downsampled_capture, /*use_slow_smoothing=*/false);
+ }
+ // Obtain the lag estimates.
+ auto lag_estimate = filter.GetBestLagEstimate();
+ EXPECT_TRUE(lag_estimate.has_value());
+ // Verify that the expected most accurate lag estimate is correct.
+ if (lag_estimate.has_value()) {
+ EXPECT_EQ(echo_delay_samples, lag_estimate->lag);
+ if (kDetectPreEcho) {
+ // The pre echo delay is estimated in a subsampled domain and a larger
+ // error is allowed.
+ EXPECT_NEAR(pre_echo_delay_samples, lag_estimate->pre_echo_lag, 4);
+ } else {
+ // The pre echo delay fallback to the highest mached filter peak when
+ // its detection is disabled.
+ EXPECT_EQ(echo_delay_samples, lag_estimate->pre_echo_lag);
+ }
+ }
+ }
+}
+
+// Verifies that the matched filter does not produce reliable and accurate
+// estimates for uncorrelated render and capture signals.
+TEST_P(MatchedFilterTest, LagNotReliableForUncorrelatedRenderAndCapture) {
+ const bool kDetectPreEcho = GetParam();
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ Random random_generator(42U);
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = kNumMatchedFilters;
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+ Block render(kNumBands, kNumChannels);
+ std::array<float, kBlockSize> capture_data;
+ rtc::ArrayView<float> capture(capture_data.data(), sub_block_size);
+ std::fill(capture.begin(), capture.end(), 0.f);
+ ApmDataDumper data_dumper(0);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumChannels));
+ MatchedFilter filter(
+ &data_dumper, DetectOptimization(), sub_block_size,
+ kWindowSizeSubBlocks, kNumMatchedFilters, kAlignmentShiftSubBlocks, 150,
+ config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold, kDetectPreEcho);
+
+ // Analyze the correlation between render and capture.
+ for (size_t k = 0; k < 100; ++k) {
+ RandomizeSampleVector(&random_generator,
+ render.View(/*band=*/0, /*channel=*/0));
+ RandomizeSampleVector(&random_generator, capture);
+ render_delay_buffer->Insert(render);
+ filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(), capture,
+ false);
+ }
+
+ // Obtain the best lag estimate and Verify that no lag estimates are
+ // reliable.
+ auto best_lag_estimates = filter.GetBestLagEstimate();
+ EXPECT_FALSE(best_lag_estimates.has_value());
+ }
+}
+
+// Verifies that the matched filter does not produce updated lag estimates for
+// render signals of low level.
+TEST_P(MatchedFilterTest, LagNotUpdatedForLowLevelRender) {
+ const bool kDetectPreEcho = GetParam();
+ Random random_generator(42U);
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+ Block render(kNumBands, kNumChannels);
+ std::vector<std::vector<float>> capture(
+ 1, std::vector<float>(kBlockSize, 0.f));
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ MatchedFilter filter(
+ &data_dumper, DetectOptimization(), sub_block_size,
+ kWindowSizeSubBlocks, kNumMatchedFilters, kAlignmentShiftSubBlocks, 150,
+ config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold, kDetectPreEcho);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz,
+ kNumChannels));
+ Decimator capture_decimator(down_sampling_factor);
+
+ // Analyze the correlation between render and capture.
+ for (size_t k = 0; k < 100; ++k) {
+ RandomizeSampleVector(&random_generator, render.View(0, 0));
+ for (auto& render_k : render.View(0, 0)) {
+ render_k *= 149.f / 32767.f;
+ }
+ std::copy(render.begin(0, 0), render.end(0, 0), capture[0].begin());
+ std::array<float, kBlockSize> downsampled_capture_data;
+ rtc::ArrayView<float> downsampled_capture(downsampled_capture_data.data(),
+ sub_block_size);
+ capture_decimator.Decimate(capture[0], downsampled_capture);
+ filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(),
+ downsampled_capture, false);
+ }
+
+ // Verify that no lag estimate has been produced.
+ auto lag_estimate = filter.GetBestLagEstimate();
+ EXPECT_FALSE(lag_estimate.has_value());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(_, MatchedFilterTest, testing::Values(true, false));
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+class MatchedFilterDeathTest : public ::testing::TestWithParam<bool> {};
+
+// Verifies the check for non-zero windows size.
+TEST_P(MatchedFilterDeathTest, ZeroWindowSize) {
+ const bool kDetectPreEcho = GetParam();
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ EXPECT_DEATH(MatchedFilter(&data_dumper, DetectOptimization(), 16, 0, 1, 1,
+ 150, config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold,
+ kDetectPreEcho),
+ "");
+}
+
+// Verifies the check for non-null data dumper.
+TEST_P(MatchedFilterDeathTest, NullDataDumper) {
+ const bool kDetectPreEcho = GetParam();
+ EchoCanceller3Config config;
+ EXPECT_DEATH(MatchedFilter(nullptr, DetectOptimization(), 16, 1, 1, 1, 150,
+ config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold,
+ kDetectPreEcho),
+ "");
+}
+
+// Verifies the check for that the sub block size is a multiple of 4.
+// TODO(peah): Activate the unittest once the required code has been landed.
+TEST_P(MatchedFilterDeathTest, DISABLED_BlockSizeMultipleOf4) {
+ const bool kDetectPreEcho = GetParam();
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ EXPECT_DEATH(MatchedFilter(&data_dumper, DetectOptimization(), 15, 1, 1, 1,
+ 150, config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold,
+ kDetectPreEcho),
+ "");
+}
+
+// Verifies the check for that there is an integer number of sub blocks that add
+// up to a block size.
+// TODO(peah): Activate the unittest once the required code has been landed.
+TEST_P(MatchedFilterDeathTest, DISABLED_SubBlockSizeAddsUpToBlockSize) {
+ const bool kDetectPreEcho = GetParam();
+ ApmDataDumper data_dumper(0);
+ EchoCanceller3Config config;
+ EXPECT_DEATH(MatchedFilter(&data_dumper, DetectOptimization(), 12, 1, 1, 1,
+ 150, config.delay.delay_estimate_smoothing,
+ config.delay.delay_estimate_smoothing_delay_found,
+ config.delay.delay_candidate_detection_threshold,
+ kDetectPreEcho),
+ "");
+}
+
+INSTANTIATE_TEST_SUITE_P(_,
+ MatchedFilterDeathTest,
+ testing::Values(true, false));
+
+#endif
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.cc b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.cc
new file mode 100644
index 0000000000..c5c33dbd68
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/mock/mock_block_processor.h"
+
+namespace webrtc {
+namespace test {
+
+MockBlockProcessor::MockBlockProcessor() = default;
+MockBlockProcessor::~MockBlockProcessor() = default;
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.h b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.h
new file mode 100644
index 0000000000..c9ae38c4aa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_block_processor.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_BLOCK_PROCESSOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_BLOCK_PROCESSOR_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockBlockProcessor : public BlockProcessor {
+ public:
+ MockBlockProcessor();
+ virtual ~MockBlockProcessor();
+
+ MOCK_METHOD(void,
+ ProcessCapture,
+ (bool level_change,
+ bool saturated_microphone_signal,
+ Block* linear_output,
+ Block* capture_block),
+ (override));
+ MOCK_METHOD(void, BufferRender, (const Block& block), (override));
+ MOCK_METHOD(void,
+ UpdateEchoLeakageStatus,
+ (bool leakage_detected),
+ (override));
+ MOCK_METHOD(void,
+ GetMetrics,
+ (EchoControl::Metrics * metrics),
+ (const, override));
+ MOCK_METHOD(void, SetAudioBufferDelay, (int delay_ms), (override));
+ MOCK_METHOD(void,
+ SetCaptureOutputUsage,
+ (bool capture_output_used),
+ (override));
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_BLOCK_PROCESSOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.cc b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.cc
new file mode 100644
index 0000000000..b903bf0785
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/mock/mock_echo_remover.h"
+
+namespace webrtc {
+namespace test {
+
+MockEchoRemover::MockEchoRemover() = default;
+MockEchoRemover::~MockEchoRemover() = default;
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.h b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.h
new file mode 100644
index 0000000000..31f075ef0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_echo_remover.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_ECHO_REMOVER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_ECHO_REMOVER_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/echo_remover.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockEchoRemover : public EchoRemover {
+ public:
+ MockEchoRemover();
+ virtual ~MockEchoRemover();
+
+ MOCK_METHOD(void,
+ ProcessCapture,
+ (EchoPathVariability echo_path_variability,
+ bool capture_signal_saturation,
+ const absl::optional<DelayEstimate>& delay_estimate,
+ RenderBuffer* render_buffer,
+ Block* linear_output,
+ Block* capture),
+ (override));
+ MOCK_METHOD(void,
+ UpdateEchoLeakageStatus,
+ (bool leakage_detected),
+ (override));
+ MOCK_METHOD(void,
+ GetMetrics,
+ (EchoControl::Metrics * metrics),
+ (const, override));
+ MOCK_METHOD(void,
+ SetCaptureOutputUsage,
+ (bool capture_output_used),
+ (override));
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_ECHO_REMOVER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.cc
new file mode 100644
index 0000000000..d4ad09b4bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/mock/mock_render_delay_buffer.h"
+
+namespace webrtc {
+namespace test {
+
+MockRenderDelayBuffer::MockRenderDelayBuffer(int sample_rate_hz,
+ size_t num_channels)
+ : block_buffer_(GetRenderDelayBufferSize(4, 4, 12),
+ NumBandsForRate(sample_rate_hz),
+ num_channels),
+ spectrum_buffer_(block_buffer_.buffer.size(), num_channels),
+ fft_buffer_(block_buffer_.buffer.size(), num_channels),
+ render_buffer_(&block_buffer_, &spectrum_buffer_, &fft_buffer_),
+ downsampled_render_buffer_(GetDownSampledBufferSize(4, 4)) {
+ ON_CALL(*this, GetRenderBuffer())
+ .WillByDefault(
+ ::testing::Invoke(this, &MockRenderDelayBuffer::FakeGetRenderBuffer));
+ ON_CALL(*this, GetDownsampledRenderBuffer())
+ .WillByDefault(::testing::Invoke(
+ this, &MockRenderDelayBuffer::FakeGetDownsampledRenderBuffer));
+}
+
+MockRenderDelayBuffer::~MockRenderDelayBuffer() = default;
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h
new file mode 100644
index 0000000000..c17fd62caa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_BUFFER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockRenderDelayBuffer : public RenderDelayBuffer {
+ public:
+ MockRenderDelayBuffer(int sample_rate_hz, size_t num_channels);
+ virtual ~MockRenderDelayBuffer();
+
+ MOCK_METHOD(void, Reset, (), (override));
+ MOCK_METHOD(RenderDelayBuffer::BufferingEvent,
+ Insert,
+ (const Block& block),
+ (override));
+ MOCK_METHOD(void, HandleSkippedCaptureProcessing, (), (override));
+ MOCK_METHOD(RenderDelayBuffer::BufferingEvent,
+ PrepareCaptureProcessing,
+ (),
+ (override));
+ MOCK_METHOD(bool, AlignFromDelay, (size_t delay), (override));
+ MOCK_METHOD(void, AlignFromExternalDelay, (), (override));
+ MOCK_METHOD(size_t, Delay, (), (const, override));
+ MOCK_METHOD(size_t, MaxDelay, (), (const, override));
+ MOCK_METHOD(RenderBuffer*, GetRenderBuffer, (), (override));
+ MOCK_METHOD(const DownsampledRenderBuffer&,
+ GetDownsampledRenderBuffer,
+ (),
+ (const, override));
+ MOCK_METHOD(void, SetAudioBufferDelay, (int delay_ms), (override));
+ MOCK_METHOD(bool, HasReceivedBufferDelay, (), (override));
+
+ private:
+ RenderBuffer* FakeGetRenderBuffer() { return &render_buffer_; }
+ const DownsampledRenderBuffer& FakeGetDownsampledRenderBuffer() const {
+ return downsampled_render_buffer_;
+ }
+ BlockBuffer block_buffer_;
+ SpectrumBuffer spectrum_buffer_;
+ FftBuffer fft_buffer_;
+ RenderBuffer render_buffer_;
+ DownsampledRenderBuffer downsampled_render_buffer_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.cc b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.cc
new file mode 100644
index 0000000000..4ae2af96bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/mock/mock_render_delay_controller.h"
+
+namespace webrtc {
+namespace test {
+
+MockRenderDelayController::MockRenderDelayController() = default;
+MockRenderDelayController::~MockRenderDelayController() = default;
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.h b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
new file mode 100644
index 0000000000..14d499dd28
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_CONTROLLER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_CONTROLLER_H_
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockRenderDelayController : public RenderDelayController {
+ public:
+ MockRenderDelayController();
+ virtual ~MockRenderDelayController();
+
+ MOCK_METHOD(void, Reset, (bool reset_delay_statistics), (override));
+ MOCK_METHOD(void, LogRenderCall, (), (override));
+ MOCK_METHOD(absl::optional<DelayEstimate>,
+ GetDelay,
+ (const DownsampledRenderBuffer& render_buffer,
+ size_t render_delay_buffer_delay,
+ const Block& capture),
+ (override));
+ MOCK_METHOD(bool, HasClockdrift, (), (const, override));
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.cc b/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.cc
new file mode 100644
index 0000000000..7a81ee89ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.cc
@@ -0,0 +1,60 @@
+
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/moving_average.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace aec3 {
+
+MovingAverage::MovingAverage(size_t num_elem, size_t mem_len)
+ : num_elem_(num_elem),
+ mem_len_(mem_len - 1),
+ scaling_(1.0f / static_cast<float>(mem_len)),
+ memory_(num_elem * mem_len_, 0.f),
+ mem_index_(0) {
+ RTC_DCHECK(num_elem_ > 0);
+ RTC_DCHECK(mem_len > 0);
+}
+
+MovingAverage::~MovingAverage() = default;
+
+void MovingAverage::Average(rtc::ArrayView<const float> input,
+ rtc::ArrayView<float> output) {
+ RTC_DCHECK(input.size() == num_elem_);
+ RTC_DCHECK(output.size() == num_elem_);
+
+ // Sum all contributions.
+ std::copy(input.begin(), input.end(), output.begin());
+ for (auto i = memory_.begin(); i < memory_.end(); i += num_elem_) {
+ std::transform(i, i + num_elem_, output.begin(), output.begin(),
+ std::plus<float>());
+ }
+
+ // Divide by mem_len_.
+ for (float& o : output) {
+ o *= scaling_;
+ }
+
+ // Update memory.
+ if (mem_len_ > 0) {
+ std::copy(input.begin(), input.end(),
+ memory_.begin() + mem_index_ * num_elem_);
+ mem_index_ = (mem_index_ + 1) % mem_len_;
+ }
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.h b/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.h
new file mode 100644
index 0000000000..913d78519c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/moving_average.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOVING_AVERAGE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOVING_AVERAGE_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+namespace aec3 {
+
+class MovingAverage {
+ public:
+ // Creates an instance of MovingAverage that accepts inputs of length num_elem
+ // and averages over mem_len inputs.
+ MovingAverage(size_t num_elem, size_t mem_len);
+ ~MovingAverage();
+
+ // Computes the average of input and mem_len-1 previous inputs and stores the
+ // result in output.
+ void Average(rtc::ArrayView<const float> input, rtc::ArrayView<float> output);
+
+ private:
+ const size_t num_elem_;
+ const size_t mem_len_;
+ const float scaling_;
+ std::vector<float> memory_;
+ size_t mem_index_;
+};
+
+} // namespace aec3
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MOVING_AVERAGE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/moving_average_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/moving_average_unittest.cc
new file mode 100644
index 0000000000..84ba9cbc5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/moving_average_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/moving_average.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MovingAverage, Average) {
+ constexpr size_t num_elem = 4;
+ constexpr size_t mem_len = 3;
+ constexpr float e = 1e-6f;
+ aec3::MovingAverage ma(num_elem, mem_len);
+ std::array<float, num_elem> data1 = {1, 2, 3, 4};
+ std::array<float, num_elem> data2 = {5, 1, 9, 7};
+ std::array<float, num_elem> data3 = {3, 3, 5, 6};
+ std::array<float, num_elem> data4 = {8, 4, 2, 1};
+ std::array<float, num_elem> output;
+
+ ma.Average(data1, output);
+ EXPECT_NEAR(output[0], data1[0] / 3.0f, e);
+ EXPECT_NEAR(output[1], data1[1] / 3.0f, e);
+ EXPECT_NEAR(output[2], data1[2] / 3.0f, e);
+ EXPECT_NEAR(output[3], data1[3] / 3.0f, e);
+
+ ma.Average(data2, output);
+ EXPECT_NEAR(output[0], (data1[0] + data2[0]) / 3.0f, e);
+ EXPECT_NEAR(output[1], (data1[1] + data2[1]) / 3.0f, e);
+ EXPECT_NEAR(output[2], (data1[2] + data2[2]) / 3.0f, e);
+ EXPECT_NEAR(output[3], (data1[3] + data2[3]) / 3.0f, e);
+
+ ma.Average(data3, output);
+ EXPECT_NEAR(output[0], (data1[0] + data2[0] + data3[0]) / 3.0f, e);
+ EXPECT_NEAR(output[1], (data1[1] + data2[1] + data3[1]) / 3.0f, e);
+ EXPECT_NEAR(output[2], (data1[2] + data2[2] + data3[2]) / 3.0f, e);
+ EXPECT_NEAR(output[3], (data1[3] + data2[3] + data3[3]) / 3.0f, e);
+
+ ma.Average(data4, output);
+ EXPECT_NEAR(output[0], (data2[0] + data3[0] + data4[0]) / 3.0f, e);
+ EXPECT_NEAR(output[1], (data2[1] + data3[1] + data4[1]) / 3.0f, e);
+ EXPECT_NEAR(output[2], (data2[2] + data3[2] + data4[2]) / 3.0f, e);
+ EXPECT_NEAR(output[3], (data2[3] + data3[3] + data4[3]) / 3.0f, e);
+}
+
+TEST(MovingAverage, PassThrough) {
+ constexpr size_t num_elem = 4;
+ constexpr size_t mem_len = 1;
+ constexpr float e = 1e-6f;
+ aec3::MovingAverage ma(num_elem, mem_len);
+ std::array<float, num_elem> data1 = {1, 2, 3, 4};
+ std::array<float, num_elem> data2 = {5, 1, 9, 7};
+ std::array<float, num_elem> data3 = {3, 3, 5, 6};
+ std::array<float, num_elem> data4 = {8, 4, 2, 1};
+ std::array<float, num_elem> output;
+
+ ma.Average(data1, output);
+ EXPECT_NEAR(output[0], data1[0], e);
+ EXPECT_NEAR(output[1], data1[1], e);
+ EXPECT_NEAR(output[2], data1[2], e);
+ EXPECT_NEAR(output[3], data1[3], e);
+
+ ma.Average(data2, output);
+ EXPECT_NEAR(output[0], data2[0], e);
+ EXPECT_NEAR(output[1], data2[1], e);
+ EXPECT_NEAR(output[2], data2[2], e);
+ EXPECT_NEAR(output[3], data2[3], e);
+
+ ma.Average(data3, output);
+ EXPECT_NEAR(output[0], data3[0], e);
+ EXPECT_NEAR(output[1], data3[1], e);
+ EXPECT_NEAR(output[2], data3[2], e);
+ EXPECT_NEAR(output[3], data3[3], e);
+
+ ma.Average(data4, output);
+ EXPECT_NEAR(output[0], data4[0], e);
+ EXPECT_NEAR(output[1], data4[1], e);
+ EXPECT_NEAR(output[2], data4[2], e);
+ EXPECT_NEAR(output[3], data4[3], e);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.cc b/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.cc
new file mode 100644
index 0000000000..98068964d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/multi_channel_content_detector.h"
+
+#include <cmath>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kNumFramesPerSecond = 100;
+
+// Compares the left and right channels in the render `frame` to determine
+// whether the signal is a proper stereo signal. To allow for differences
+// introduced by hardware drivers, a threshold `detection_threshold` is used for
+// the detection.
+bool HasStereoContent(const std::vector<std::vector<std::vector<float>>>& frame,
+ float detection_threshold) {
+ if (frame[0].size() < 2) {
+ return false;
+ }
+
+ for (size_t band = 0; band < frame.size(); ++band) {
+ for (size_t k = 0; k < frame[band][0].size(); ++k) {
+ if (std::fabs(frame[band][0][k] - frame[band][1][k]) >
+ detection_threshold) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// In order to avoid logging metrics for very short lifetimes that are unlikely
+// to reflect real calls and that may dilute the "real" data, logging is limited
+// to lifetimes of at leats 5 seconds.
+constexpr int kMinNumberOfFramesRequiredToLogMetrics = 500;
+
+// Continuous metrics are logged every 10 seconds.
+constexpr int kFramesPer10Seconds = 1000;
+
+} // namespace
+
+MultiChannelContentDetector::MetricsLogger::MetricsLogger() {}
+
+MultiChannelContentDetector::MetricsLogger::~MetricsLogger() {
+ if (frame_counter_ < kMinNumberOfFramesRequiredToLogMetrics)
+ return;
+
+ RTC_HISTOGRAM_BOOLEAN(
+ "WebRTC.Audio.EchoCanceller.PersistentMultichannelContentEverDetected",
+ any_multichannel_content_detected_ ? 1 : 0);
+}
+
+void MultiChannelContentDetector::MetricsLogger::Update(
+ bool persistent_multichannel_content_detected) {
+ ++frame_counter_;
+ if (persistent_multichannel_content_detected) {
+ any_multichannel_content_detected_ = true;
+ ++persistent_multichannel_frame_counter_;
+ }
+
+ if (frame_counter_ < kMinNumberOfFramesRequiredToLogMetrics)
+ return;
+ if (frame_counter_ % kFramesPer10Seconds != 0)
+ return;
+ const bool mostly_multichannel_last_10_seconds =
+ (persistent_multichannel_frame_counter_ >= kFramesPer10Seconds / 2);
+ RTC_HISTOGRAM_BOOLEAN(
+ "WebRTC.Audio.EchoCanceller.ProcessingPersistentMultichannelContent",
+ mostly_multichannel_last_10_seconds ? 1 : 0);
+
+ persistent_multichannel_frame_counter_ = 0;
+}
+
+MultiChannelContentDetector::MultiChannelContentDetector(
+ bool detect_stereo_content,
+ int num_render_input_channels,
+ float detection_threshold,
+ int stereo_detection_timeout_threshold_seconds,
+ float stereo_detection_hysteresis_seconds)
+ : detect_stereo_content_(detect_stereo_content),
+ detection_threshold_(detection_threshold),
+ detection_timeout_threshold_frames_(
+ stereo_detection_timeout_threshold_seconds > 0
+ ? absl::make_optional(stereo_detection_timeout_threshold_seconds *
+ kNumFramesPerSecond)
+ : absl::nullopt),
+ stereo_detection_hysteresis_frames_(static_cast<int>(
+ stereo_detection_hysteresis_seconds * kNumFramesPerSecond)),
+ metrics_logger_((detect_stereo_content && num_render_input_channels > 1)
+ ? std::make_unique<MetricsLogger>()
+ : nullptr),
+ persistent_multichannel_content_detected_(
+ !detect_stereo_content && num_render_input_channels > 1) {}
+
+bool MultiChannelContentDetector::UpdateDetection(
+ const std::vector<std::vector<std::vector<float>>>& frame) {
+ if (!detect_stereo_content_) {
+ RTC_DCHECK_EQ(frame[0].size() > 1,
+ persistent_multichannel_content_detected_);
+ return false;
+ }
+
+ const bool previous_persistent_multichannel_content_detected =
+ persistent_multichannel_content_detected_;
+ const bool stereo_detected_in_frame =
+ HasStereoContent(frame, detection_threshold_);
+
+ consecutive_frames_with_stereo_ =
+ stereo_detected_in_frame ? consecutive_frames_with_stereo_ + 1 : 0;
+ frames_since_stereo_detected_last_ =
+ stereo_detected_in_frame ? 0 : frames_since_stereo_detected_last_ + 1;
+
+ // Detect persistent multichannel content.
+ if (consecutive_frames_with_stereo_ > stereo_detection_hysteresis_frames_) {
+ persistent_multichannel_content_detected_ = true;
+ }
+ if (detection_timeout_threshold_frames_.has_value() &&
+ frames_since_stereo_detected_last_ >=
+ *detection_timeout_threshold_frames_) {
+ persistent_multichannel_content_detected_ = false;
+ }
+
+ // Detect temporary multichannel content.
+ temporary_multichannel_content_detected_ =
+ persistent_multichannel_content_detected_ ? false
+ : stereo_detected_in_frame;
+
+ if (metrics_logger_)
+ metrics_logger_->Update(persistent_multichannel_content_detected_);
+
+ return previous_persistent_multichannel_content_detected !=
+ persistent_multichannel_content_detected_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.h b/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.h
new file mode 100644
index 0000000000..1742c5fc17
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MULTI_CHANNEL_CONTENT_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MULTI_CHANNEL_CONTENT_DETECTOR_H_
+
+#include <memory>
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// Analyzes audio content to determine whether the contained audio is proper
+// multichannel, or only upmixed mono. To allow for differences introduced by
+// hardware drivers, a threshold `detection_threshold` is used for the
+// detection.
+// Logs metrics continously and upon destruction.
+class MultiChannelContentDetector {
+ public:
+ // If |stereo_detection_timeout_threshold_seconds| <= 0, no timeout is
+ // applied: Once multichannel is detected, the detector remains in that state
+ // for its lifetime.
+ MultiChannelContentDetector(bool detect_stereo_content,
+ int num_render_input_channels,
+ float detection_threshold,
+ int stereo_detection_timeout_threshold_seconds,
+ float stereo_detection_hysteresis_seconds);
+
+ // Compares the left and right channels in the render `frame` to determine
+ // whether the signal is a proper multichannel signal. Returns a bool
+ // indicating whether a change in the proper multichannel content was
+ // detected.
+ bool UpdateDetection(
+ const std::vector<std::vector<std::vector<float>>>& frame);
+
+ bool IsProperMultiChannelContentDetected() const {
+ return persistent_multichannel_content_detected_;
+ }
+
+ bool IsTemporaryMultiChannelContentDetected() const {
+ return temporary_multichannel_content_detected_;
+ }
+
+ private:
+ // Tracks and logs metrics for the amount of multichannel content detected.
+ class MetricsLogger {
+ public:
+ MetricsLogger();
+
+ // The destructor logs call summary statistics.
+ ~MetricsLogger();
+
+ // Updates and logs metrics.
+ void Update(bool persistent_multichannel_content_detected);
+
+ private:
+ int frame_counter_ = 0;
+
+ // Counts the number of frames of persistent multichannel audio observed
+ // during the current metrics collection interval.
+ int persistent_multichannel_frame_counter_ = 0;
+
+ // Indicates whether persistent multichannel content has ever been detected.
+ bool any_multichannel_content_detected_ = false;
+ };
+
+ const bool detect_stereo_content_;
+ const float detection_threshold_;
+ const absl::optional<int> detection_timeout_threshold_frames_;
+ const int stereo_detection_hysteresis_frames_;
+
+ // Collects and reports metrics on the amount of multichannel content
+ // detected. Only created if |num_render_input_channels| > 1 and
+ // |detect_stereo_content_| is true.
+ const std::unique_ptr<MetricsLogger> metrics_logger_;
+
+ bool persistent_multichannel_content_detected_;
+ bool temporary_multichannel_content_detected_ = false;
+ int64_t frames_since_stereo_detected_last_ = 0;
+ int64_t consecutive_frames_with_stereo_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_MULTI_CHANNEL_CONTENT_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector_unittest.cc
new file mode 100644
index 0000000000..8d38dd0991
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/multi_channel_content_detector_unittest.cc
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/multi_channel_content_detector.h"
+
+#include "system_wrappers/include/metrics.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MultiChannelContentDetector, HandlingOfMono) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/1,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+}
+
+TEST(MultiChannelContentDetector, HandlingOfMonoAndDetectionOff) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/false,
+ /*num_render_input_channels=*/1,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+}
+
+TEST(MultiChannelContentDetector, HandlingOfDetectionOff) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/false,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+
+ std::vector<std::vector<std::vector<float>>> frame(
+ 1, std::vector<std::vector<float>>(2, std::vector<float>(160, 0.0f)));
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 101.0f);
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+}
+
+TEST(MultiChannelContentDetector, InitialDetectionOfStereo) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+}
+
+TEST(MultiChannelContentDetector, DetectionWhenFakeStereo) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ std::vector<std::vector<std::vector<float>>> frame(
+ 1, std::vector<std::vector<float>>(2, std::vector<float>(160, 0.0f)));
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 100.0f);
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+}
+
+TEST(MultiChannelContentDetector, DetectionWhenStereo) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ std::vector<std::vector<std::vector<float>>> frame(
+ 1, std::vector<std::vector<float>>(2, std::vector<float>(160, 0.0f)));
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 101.0f);
+ EXPECT_TRUE(mc.UpdateDetection(frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+}
+
+TEST(MultiChannelContentDetector, DetectionWhenStereoAfterAWhile) {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ std::vector<std::vector<std::vector<float>>> frame(
+ 1, std::vector<std::vector<float>>(2, std::vector<float>(160, 0.0f)));
+
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 100.0f);
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 101.0f);
+
+ EXPECT_TRUE(mc.UpdateDetection(frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+}
+
+TEST(MultiChannelContentDetector, DetectionWithStereoBelowThreshold) {
+ constexpr float kThreshold = 1.0f;
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/kThreshold,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ std::vector<std::vector<std::vector<float>>> frame(
+ 1, std::vector<std::vector<float>>(2, std::vector<float>(160, 0.0f)));
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 100.0f + kThreshold);
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+}
+
+TEST(MultiChannelContentDetector, DetectionWithStereoAboveThreshold) {
+ constexpr float kThreshold = 1.0f;
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/kThreshold,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ std::vector<std::vector<std::vector<float>>> frame(
+ 1, std::vector<std::vector<float>>(2, std::vector<float>(160, 0.0f)));
+ std::fill(frame[0][0].begin(), frame[0][0].end(), 100.0f);
+ std::fill(frame[0][1].begin(), frame[0][1].end(), 100.0f + kThreshold + 0.1f);
+
+ EXPECT_TRUE(mc.UpdateDetection(frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+
+ EXPECT_FALSE(mc.UpdateDetection(frame));
+}
+
+class MultiChannelContentDetectorTimeoutBehavior
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<bool, int>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannelContentDetector,
+ MultiChannelContentDetectorTimeoutBehavior,
+ ::testing::Combine(::testing::Values(false, true),
+ ::testing::Values(0, 1, 10)));
+
+TEST_P(MultiChannelContentDetectorTimeoutBehavior,
+ TimeOutBehaviorForNonTrueStereo) {
+ constexpr int kNumFramesPerSecond = 100;
+ const bool detect_stereo_content = std::get<0>(GetParam());
+ const int stereo_detection_timeout_threshold_seconds =
+ std::get<1>(GetParam());
+ const int stereo_detection_timeout_threshold_frames =
+ stereo_detection_timeout_threshold_seconds * kNumFramesPerSecond;
+
+ MultiChannelContentDetector mc(detect_stereo_content,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ stereo_detection_timeout_threshold_seconds,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ std::vector<std::vector<std::vector<float>>> true_stereo_frame = {
+ {std::vector<float>(160, 100.0f), std::vector<float>(160, 101.0f)}};
+
+ std::vector<std::vector<std::vector<float>>> fake_stereo_frame = {
+ {std::vector<float>(160, 100.0f), std::vector<float>(160, 100.0f)}};
+
+ // Pass fake stereo frames and verify the content detection.
+ for (int k = 0; k < 10; ++k) {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ if (detect_stereo_content) {
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+ } else {
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ }
+ }
+
+ // Pass a true stereo frame and verify that it is properly detected.
+ if (detect_stereo_content) {
+ EXPECT_TRUE(mc.UpdateDetection(true_stereo_frame));
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ }
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+
+ // Pass fake stereo frames until any timeouts are about to occur.
+ for (int k = 0; k < stereo_detection_timeout_threshold_frames - 1; ++k) {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ }
+
+ // Pass a fake stereo frame and verify that any timeouts properly occur.
+ if (detect_stereo_content && stereo_detection_timeout_threshold_frames > 0) {
+ EXPECT_TRUE(mc.UpdateDetection(fake_stereo_frame));
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ }
+
+ // Pass fake stereo frames and verify the behavior after any timeout.
+ for (int k = 0; k < 10; ++k) {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ if (detect_stereo_content &&
+ stereo_detection_timeout_threshold_frames > 0) {
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+ } else {
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ }
+ }
+}
+
+class MultiChannelContentDetectorHysteresisBehavior
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<bool, float>> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ MultiChannelContentDetector,
+ MultiChannelContentDetectorHysteresisBehavior,
+ ::testing::Combine(::testing::Values(false, true),
+ ::testing::Values(0.0f, 0.1f, 0.2f)));
+
+TEST_P(MultiChannelContentDetectorHysteresisBehavior,
+ PeriodBeforeStereoDetectionIsTriggered) {
+ constexpr int kNumFramesPerSecond = 100;
+ const bool detect_stereo_content = std::get<0>(GetParam());
+ const int stereo_detection_hysteresis_seconds = std::get<1>(GetParam());
+ const int stereo_detection_hysteresis_frames =
+ stereo_detection_hysteresis_seconds * kNumFramesPerSecond;
+
+ MultiChannelContentDetector mc(
+ detect_stereo_content,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/0,
+ stereo_detection_hysteresis_seconds);
+ std::vector<std::vector<std::vector<float>>> true_stereo_frame = {
+ {std::vector<float>(160, 100.0f), std::vector<float>(160, 101.0f)}};
+
+ std::vector<std::vector<std::vector<float>>> fake_stereo_frame = {
+ {std::vector<float>(160, 100.0f), std::vector<float>(160, 100.0f)}};
+
+ // Pass fake stereo frames and verify the content detection.
+ for (int k = 0; k < 10; ++k) {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ if (detect_stereo_content) {
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+ } else {
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ }
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+
+ // Pass a two true stereo frames and verify that they are properly detected.
+ ASSERT_TRUE(stereo_detection_hysteresis_frames > 2 ||
+ stereo_detection_hysteresis_frames == 0);
+ for (int k = 0; k < 2; ++k) {
+ if (detect_stereo_content) {
+ if (stereo_detection_hysteresis_seconds == 0.0f) {
+ if (k == 0) {
+ EXPECT_TRUE(mc.UpdateDetection(true_stereo_frame));
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ }
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_TRUE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+ }
+
+ if (stereo_detection_hysteresis_seconds == 0.0f) {
+ return;
+ }
+
+ // Pass true stereo frames until any timeouts are about to occur.
+ for (int k = 0; k < stereo_detection_hysteresis_frames - 3; ++k) {
+ if (detect_stereo_content) {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_FALSE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_TRUE(mc.IsTemporaryMultiChannelContentDetected());
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+ }
+
+ // Pass a true stereo frame and verify that it is properly detected.
+ if (detect_stereo_content) {
+ EXPECT_TRUE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+
+ // Pass an additional true stereo frame and verify that it is properly
+ // detected.
+ if (detect_stereo_content) {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(true_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+
+ // Pass a fake stereo frame and verify that it is properly detected.
+ if (detect_stereo_content) {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ } else {
+ EXPECT_FALSE(mc.UpdateDetection(fake_stereo_frame));
+ EXPECT_TRUE(mc.IsProperMultiChannelContentDetected());
+ EXPECT_FALSE(mc.IsTemporaryMultiChannelContentDetected());
+ }
+}
+
+class MultiChannelContentDetectorMetricsDisabled
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<bool, int>> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ /*no prefix*/,
+ MultiChannelContentDetectorMetricsDisabled,
+ ::testing::Values(std::tuple<bool, int>(false, 2),
+ std::tuple<bool, int>(true, 1)));
+
+// Test that no metrics are logged when they are clearly uninteresting and would
+// dilute relevant data: when the reference audio is single channel, or when
+// dynamic detection is disabled.
+TEST_P(MultiChannelContentDetectorMetricsDisabled, ReportsNoMetrics) {
+ metrics::Reset();
+ constexpr int kNumFramesPerSecond = 100;
+ const bool detect_stereo_content = std::get<0>(GetParam());
+ const int channel_count = std::get<1>(GetParam());
+ std::vector<std::vector<std::vector<float>>> audio_frame = {
+ std::vector<std::vector<float>>(channel_count,
+ std::vector<float>(160, 100.0f))};
+ {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/detect_stereo_content,
+ /*num_render_input_channels=*/channel_count,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/1,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ for (int k = 0; k < 20 * kNumFramesPerSecond; ++k) {
+ mc.UpdateDetection(audio_frame);
+ }
+ }
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Audio.EchoCanceller."
+ "ProcessingPersistentMultichannelContent"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Audio.EchoCanceller."
+ "PersistentMultichannelContentEverDetected"));
+}
+
+// Tests that after 3 seconds, no metrics are reported.
+TEST(MultiChannelContentDetectorMetrics, ReportsNoMetricsForShortLifetime) {
+ metrics::Reset();
+ constexpr int kNumFramesPerSecond = 100;
+ constexpr int kTooFewFramesToLogMetrics = 3 * kNumFramesPerSecond;
+ std::vector<std::vector<std::vector<float>>> audio_frame = {
+ std::vector<std::vector<float>>(2, std::vector<float>(160, 100.0f))};
+ {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/1,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ for (int k = 0; k < kTooFewFramesToLogMetrics; ++k) {
+ mc.UpdateDetection(audio_frame);
+ }
+ }
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Audio.EchoCanceller."
+ "ProcessingPersistentMultichannelContent"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Audio.EchoCanceller."
+ "PersistentMultichannelContentEverDetected"));
+}
+
+// Tests that after 25 seconds, metrics are reported.
+TEST(MultiChannelContentDetectorMetrics, ReportsMetrics) {
+ metrics::Reset();
+ constexpr int kNumFramesPerSecond = 100;
+ std::vector<std::vector<std::vector<float>>> true_stereo_frame = {
+ {std::vector<float>(160, 100.0f), std::vector<float>(160, 101.0f)}};
+ std::vector<std::vector<std::vector<float>>> fake_stereo_frame = {
+ {std::vector<float>(160, 100.0f), std::vector<float>(160, 100.0f)}};
+ {
+ MultiChannelContentDetector mc(
+ /*detect_stereo_content=*/true,
+ /*num_render_input_channels=*/2,
+ /*detection_threshold=*/0.0f,
+ /*stereo_detection_timeout_threshold_seconds=*/1,
+ /*stereo_detection_hysteresis_seconds=*/0.0f);
+ for (int k = 0; k < 10 * kNumFramesPerSecond; ++k) {
+ mc.UpdateDetection(true_stereo_frame);
+ }
+ for (int k = 0; k < 15 * kNumFramesPerSecond; ++k) {
+ mc.UpdateDetection(fake_stereo_frame);
+ }
+ }
+ // After 10 seconds of true stereo and the remainder fake stereo, we expect
+ // one lifetime metric sample (multichannel detected) and two periodic samples
+ // (one multichannel, one mono).
+
+ // Check lifetime metric.
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Audio.EchoCanceller."
+ "PersistentMultichannelContentEverDetected"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Audio.EchoCanceller."
+ "PersistentMultichannelContentEverDetected", 1));
+
+ // Check periodic metric.
+ EXPECT_METRIC_EQ(
+ 2, metrics::NumSamples("WebRTC.Audio.EchoCanceller."
+ "ProcessingPersistentMultichannelContent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Audio.EchoCanceller."
+ "ProcessingPersistentMultichannelContent", 0));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Audio.EchoCanceller."
+ "ProcessingPersistentMultichannelContent", 1));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/nearend_detector.h b/third_party/libwebrtc/modules/audio_processing/aec3/nearend_detector.h
new file mode 100644
index 0000000000..0d8a06b2cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/nearend_detector.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_NEAREND_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_NEAREND_DETECTOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+// Class for selecting whether the suppressor is in the nearend or echo state.
+class NearendDetector {
+ public:
+ virtual ~NearendDetector() {}
+
+ // Returns whether the current state is the nearend state.
+ virtual bool IsNearendState() const = 0;
+
+ // Updates the state selection based on latest spectral estimates.
+ virtual void Update(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ bool initial_state) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_NEAREND_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.cc b/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.cc
new file mode 100644
index 0000000000..8e391d6fa6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/refined_filter_update_gain.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kHErrorInitial = 10000.f;
+constexpr int kPoorExcitationCounterInitial = 1000;
+
+} // namespace
+
+std::atomic<int> RefinedFilterUpdateGain::instance_count_(0);
+
+RefinedFilterUpdateGain::RefinedFilterUpdateGain(
+ const EchoCanceller3Config::Filter::RefinedConfiguration& config,
+ size_t config_change_duration_blocks)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ config_change_duration_blocks_(
+ static_cast<int>(config_change_duration_blocks)),
+ poor_excitation_counter_(kPoorExcitationCounterInitial) {
+ SetConfig(config, true);
+ H_error_.fill(kHErrorInitial);
+ RTC_DCHECK_LT(0, config_change_duration_blocks_);
+ one_by_config_change_duration_blocks_ = 1.f / config_change_duration_blocks_;
+}
+
+RefinedFilterUpdateGain::~RefinedFilterUpdateGain() {}
+
+void RefinedFilterUpdateGain::HandleEchoPathChange(
+ const EchoPathVariability& echo_path_variability) {
+ if (echo_path_variability.gain_change) {
+ // TODO(bugs.webrtc.org/9526) Handle gain changes.
+ }
+
+ if (echo_path_variability.delay_change !=
+ EchoPathVariability::DelayAdjustment::kNone) {
+ H_error_.fill(kHErrorInitial);
+ }
+
+ if (!echo_path_variability.gain_change) {
+ poor_excitation_counter_ = kPoorExcitationCounterInitial;
+ call_counter_ = 0;
+ }
+}
+
+void RefinedFilterUpdateGain::Compute(
+ const std::array<float, kFftLengthBy2Plus1>& render_power,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const SubtractorOutput& subtractor_output,
+ rtc::ArrayView<const float> erl,
+ size_t size_partitions,
+ bool saturated_capture_signal,
+ bool disallow_leakage_diverged,
+ FftData* gain_fft) {
+ RTC_DCHECK(gain_fft);
+ // Introducing shorter notation to improve readability.
+ const FftData& E_refined = subtractor_output.E_refined;
+ const auto& E2_refined = subtractor_output.E2_refined;
+ const auto& E2_coarse = subtractor_output.E2_coarse;
+ FftData* G = gain_fft;
+ const auto& X2 = render_power;
+
+ ++call_counter_;
+
+ UpdateCurrentConfig();
+
+ if (render_signal_analyzer.PoorSignalExcitation()) {
+ poor_excitation_counter_ = 0;
+ }
+
+ // Do not update the filter if the render is not sufficiently excited.
+ if (++poor_excitation_counter_ < size_partitions ||
+ saturated_capture_signal || call_counter_ <= size_partitions) {
+ G->re.fill(0.f);
+ G->im.fill(0.f);
+ } else {
+ // Corresponds to WGN of power -39 dBFS.
+ std::array<float, kFftLengthBy2Plus1> mu;
+ // mu = H_error / (0.5* H_error* X2 + n * E2).
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ if (X2[k] >= current_config_.noise_gate) {
+ mu[k] = H_error_[k] /
+ (0.5f * H_error_[k] * X2[k] + size_partitions * E2_refined[k]);
+ } else {
+ mu[k] = 0.f;
+ }
+ }
+
+ // Avoid updating the filter close to narrow bands in the render signals.
+ render_signal_analyzer.MaskRegionsAroundNarrowBands(&mu);
+
+ // H_error = H_error - 0.5 * mu * X2 * H_error.
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ H_error_[k] -= 0.5f * mu[k] * X2[k] * H_error_[k];
+ }
+
+ // G = mu * E.
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ G->re[k] = mu[k] * E_refined.re[k];
+ G->im[k] = mu[k] * E_refined.im[k];
+ }
+ }
+
+ // H_error = H_error + factor * erl.
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ if (E2_refined[k] <= E2_coarse[k] || disallow_leakage_diverged) {
+ H_error_[k] += current_config_.leakage_converged * erl[k];
+ } else {
+ H_error_[k] += current_config_.leakage_diverged * erl[k];
+ }
+
+ H_error_[k] = std::max(H_error_[k], current_config_.error_floor);
+ H_error_[k] = std::min(H_error_[k], current_config_.error_ceil);
+ }
+
+ data_dumper_->DumpRaw("aec3_refined_gain_H_error", H_error_);
+}
+
+void RefinedFilterUpdateGain::UpdateCurrentConfig() {
+ RTC_DCHECK_GE(config_change_duration_blocks_, config_change_counter_);
+ if (config_change_counter_ > 0) {
+ if (--config_change_counter_ > 0) {
+ auto average = [](float from, float to, float from_weight) {
+ return from * from_weight + to * (1.f - from_weight);
+ };
+
+ float change_factor =
+ config_change_counter_ * one_by_config_change_duration_blocks_;
+
+ current_config_.leakage_converged =
+ average(old_target_config_.leakage_converged,
+ target_config_.leakage_converged, change_factor);
+ current_config_.leakage_diverged =
+ average(old_target_config_.leakage_diverged,
+ target_config_.leakage_diverged, change_factor);
+ current_config_.error_floor =
+ average(old_target_config_.error_floor, target_config_.error_floor,
+ change_factor);
+ current_config_.error_ceil =
+ average(old_target_config_.error_ceil, target_config_.error_ceil,
+ change_factor);
+ current_config_.noise_gate =
+ average(old_target_config_.noise_gate, target_config_.noise_gate,
+ change_factor);
+ } else {
+ current_config_ = old_target_config_ = target_config_;
+ }
+ }
+ RTC_DCHECK_LE(0, config_change_counter_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.h b/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.h
new file mode 100644
index 0000000000..1a68ebc296
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_REFINED_FILTER_UPDATE_GAIN_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_REFINED_FILTER_UPDATE_GAIN_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <atomic>
+#include <memory>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+class AdaptiveFirFilter;
+class ApmDataDumper;
+struct EchoPathVariability;
+struct FftData;
+class RenderSignalAnalyzer;
+struct SubtractorOutput;
+
+// Provides functionality for computing the adaptive gain for the refined
+// filter.
+class RefinedFilterUpdateGain {
+ public:
+ RefinedFilterUpdateGain(
+ const EchoCanceller3Config::Filter::RefinedConfiguration& config,
+ size_t config_change_duration_blocks);
+ ~RefinedFilterUpdateGain();
+
+ RefinedFilterUpdateGain(const RefinedFilterUpdateGain&) = delete;
+ RefinedFilterUpdateGain& operator=(const RefinedFilterUpdateGain&) = delete;
+
+ // Takes action in the case of a known echo path change.
+ void HandleEchoPathChange(const EchoPathVariability& echo_path_variability);
+
+ // Computes the gain.
+ void Compute(const std::array<float, kFftLengthBy2Plus1>& render_power,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const SubtractorOutput& subtractor_output,
+ rtc::ArrayView<const float> erl,
+ size_t size_partitions,
+ bool saturated_capture_signal,
+ bool disallow_leakage_diverged,
+ FftData* gain_fft);
+
+ // Sets a new config.
+ void SetConfig(
+ const EchoCanceller3Config::Filter::RefinedConfiguration& config,
+ bool immediate_effect) {
+ if (immediate_effect) {
+ old_target_config_ = current_config_ = target_config_ = config;
+ config_change_counter_ = 0;
+ } else {
+ old_target_config_ = current_config_;
+ target_config_ = config;
+ config_change_counter_ = config_change_duration_blocks_;
+ }
+ }
+
+ private:
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const int config_change_duration_blocks_;
+ float one_by_config_change_duration_blocks_;
+ EchoCanceller3Config::Filter::RefinedConfiguration current_config_;
+ EchoCanceller3Config::Filter::RefinedConfiguration target_config_;
+ EchoCanceller3Config::Filter::RefinedConfiguration old_target_config_;
+ std::array<float, kFftLengthBy2Plus1> H_error_;
+ size_t poor_excitation_counter_;
+ size_t call_counter_ = 0;
+ int config_change_counter_ = 0;
+
+ // Updates the current config towards the target config.
+ void UpdateCurrentConfig();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_REFINED_FILTER_UPDATE_GAIN_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain_unittest.cc
new file mode 100644
index 0000000000..c77c5b53d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/refined_filter_update_gain_unittest.cc
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/refined_filter_update_gain.h"
+
+#include <algorithm>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/coarse_filter_update_gain.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Method for performing the simulations needed to test the refined filter
+// update gain functionality.
+void RunFilterUpdateTest(int num_blocks_to_process,
+ size_t delay_samples,
+ int filter_length_blocks,
+ const std::vector<int>& blocks_with_echo_path_changes,
+ const std::vector<int>& blocks_with_saturation,
+ bool use_silent_render_in_second_half,
+ std::array<float, kBlockSize>* e_last_block,
+ std::array<float, kBlockSize>* y_last_block,
+ FftData* G_last_block) {
+ ApmDataDumper data_dumper(42);
+ Aec3Optimization optimization = DetectOptimization();
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ EchoCanceller3Config config;
+ config.filter.refined.length_blocks = filter_length_blocks;
+ config.filter.coarse.length_blocks = filter_length_blocks;
+ AdaptiveFirFilter refined_filter(
+ config.filter.refined.length_blocks, config.filter.refined.length_blocks,
+ config.filter.config_change_duration_blocks, kNumRenderChannels,
+ optimization, &data_dumper);
+ AdaptiveFirFilter coarse_filter(
+ config.filter.coarse.length_blocks, config.filter.coarse.length_blocks,
+ config.filter.config_change_duration_blocks, kNumRenderChannels,
+ optimization, &data_dumper);
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>> H2(
+ kNumCaptureChannels, std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ refined_filter.max_filter_size_partitions(),
+ std::array<float, kFftLengthBy2Plus1>()));
+ for (auto& H2_ch : H2) {
+ for (auto& H2_k : H2_ch) {
+ H2_k.fill(0.f);
+ }
+ }
+ std::vector<std::vector<float>> h(
+ kNumCaptureChannels,
+ std::vector<float>(
+ GetTimeDomainLength(refined_filter.max_filter_size_partitions()),
+ 0.f));
+
+ Aec3Fft fft;
+ std::array<float, kBlockSize> x_old;
+ x_old.fill(0.f);
+ CoarseFilterUpdateGain coarse_gain(
+ config.filter.coarse, config.filter.config_change_duration_blocks);
+ RefinedFilterUpdateGain refined_gain(
+ config.filter.refined, config.filter.config_change_duration_blocks);
+ Random random_generator(42U);
+ Block x(kNumBands, kNumRenderChannels);
+ std::vector<float> y(kBlockSize, 0.f);
+ config.delay.default_delay = 1;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels));
+ AecState aec_state(config, kNumCaptureChannels);
+ RenderSignalAnalyzer render_signal_analyzer(config);
+ absl::optional<DelayEstimate> delay_estimate;
+ std::array<float, kFftLength> s_scratch;
+ std::array<float, kBlockSize> s;
+ FftData S;
+ FftData G;
+ std::vector<SubtractorOutput> output(kNumCaptureChannels);
+ for (auto& subtractor_output : output) {
+ subtractor_output.Reset();
+ }
+ FftData& E_refined = output[0].E_refined;
+ FftData E_coarse;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(kNumCaptureChannels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_refined(
+ kNumCaptureChannels);
+ std::array<float, kBlockSize>& e_refined = output[0].e_refined;
+ std::array<float, kBlockSize>& e_coarse = output[0].e_coarse;
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(0.f);
+ }
+
+ constexpr float kScale = 1.0f / kFftLengthBy2;
+
+ DelayBuffer<float> delay_buffer(delay_samples);
+ for (int k = 0; k < num_blocks_to_process; ++k) {
+ // Handle echo path changes.
+ if (std::find(blocks_with_echo_path_changes.begin(),
+ blocks_with_echo_path_changes.end(),
+ k) != blocks_with_echo_path_changes.end()) {
+ refined_filter.HandleEchoPathChange();
+ }
+
+ // Handle saturation.
+ const bool saturation =
+ std::find(blocks_with_saturation.begin(), blocks_with_saturation.end(),
+ k) != blocks_with_saturation.end();
+
+ // Create the render signal.
+ if (use_silent_render_in_second_half && k > num_blocks_to_process / 2) {
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ std::fill(x.begin(band, channel), x.end(band, channel), 0.f);
+ }
+ }
+ } else {
+ for (int band = 0; band < x.NumChannels(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ RandomizeSampleVector(&random_generator, x.View(band, channel));
+ }
+ }
+ }
+ delay_buffer.Delay(x.View(/*band=*/0, /*channel=*/0), y);
+
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+ aec_state.MinDirectPathFilterDelay());
+
+ // Apply the refined filter.
+ refined_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S);
+ fft.Ifft(S, &s_scratch);
+ std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2,
+ e_refined.begin(),
+ [&](float a, float b) { return a - b * kScale; });
+ std::for_each(e_refined.begin(), e_refined.end(),
+ [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+ fft.ZeroPaddedFft(e_refined, Aec3Fft::Window::kRectangular, &E_refined);
+ for (size_t k = 0; k < kBlockSize; ++k) {
+ s[k] = kScale * s_scratch[k + kFftLengthBy2];
+ }
+
+ // Apply the coarse filter.
+ coarse_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S);
+ fft.Ifft(S, &s_scratch);
+ std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2,
+ e_coarse.begin(),
+ [&](float a, float b) { return a - b * kScale; });
+ std::for_each(e_coarse.begin(), e_coarse.end(),
+ [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+ fft.ZeroPaddedFft(e_coarse, Aec3Fft::Window::kRectangular, &E_coarse);
+
+ // Compute spectra for future use.
+ E_refined.Spectrum(Aec3Optimization::kNone, output[0].E2_refined);
+ E_coarse.Spectrum(Aec3Optimization::kNone, output[0].E2_coarse);
+
+ // Adapt the coarse filter.
+ std::array<float, kFftLengthBy2Plus1> render_power;
+ render_delay_buffer->GetRenderBuffer()->SpectralSum(
+ coarse_filter.SizePartitions(), &render_power);
+ coarse_gain.Compute(render_power, render_signal_analyzer, E_coarse,
+ coarse_filter.SizePartitions(), saturation, &G);
+ coarse_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G);
+
+ // Adapt the refined filter
+ render_delay_buffer->GetRenderBuffer()->SpectralSum(
+ refined_filter.SizePartitions(), &render_power);
+
+ std::array<float, kFftLengthBy2Plus1> erl;
+ ComputeErl(optimization, H2[0], erl);
+ refined_gain.Compute(render_power, render_signal_analyzer, output[0], erl,
+ refined_filter.SizePartitions(), saturation, false,
+ &G);
+ refined_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G, &h[0]);
+
+ // Update the delay.
+ aec_state.HandleEchoPathChange(EchoPathVariability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false));
+ refined_filter.ComputeFrequencyResponse(&H2[0]);
+ std::copy(output[0].E2_refined.begin(), output[0].E2_refined.end(),
+ E2_refined[0].begin());
+ aec_state.Update(delay_estimate, H2, h,
+ *render_delay_buffer->GetRenderBuffer(), E2_refined, Y2,
+ output);
+ }
+
+ std::copy(e_refined.begin(), e_refined.end(), e_last_block->begin());
+ std::copy(y.begin(), y.end(), y_last_block->begin());
+ std::copy(G.re.begin(), G.re.end(), G_last_block->re.begin());
+ std::copy(G.im.begin(), G.im.end(), G_last_block->im.begin());
+}
+
+std::string ProduceDebugText(int filter_length_blocks) {
+ rtc::StringBuilder ss;
+ ss << "Length: " << filter_length_blocks;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(size_t delay, int filter_length_blocks) {
+ rtc::StringBuilder ss;
+ ss << "Delay: " << delay << ", ";
+ ss << ProduceDebugText(filter_length_blocks);
+ return ss.Release();
+}
+
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output gain parameter works.
+TEST(RefinedFilterUpdateGainDeathTest, NullDataOutputGain) {
+ ApmDataDumper data_dumper(42);
+ EchoCanceller3Config config;
+ RenderSignalAnalyzer analyzer(config);
+ SubtractorOutput output;
+ RefinedFilterUpdateGain gain(config.filter.refined,
+ config.filter.config_change_duration_blocks);
+ std::array<float, kFftLengthBy2Plus1> render_power;
+ render_power.fill(0.f);
+ std::array<float, kFftLengthBy2Plus1> erl;
+ erl.fill(0.f);
+ EXPECT_DEATH(
+ gain.Compute(render_power, analyzer, output, erl,
+ config.filter.refined.length_blocks, false, false, nullptr),
+ "");
+}
+
+#endif
+
+// Verifies that the gain formed causes the filter using it to converge.
+TEST(RefinedFilterUpdateGain, GainCausesFilterToConverge) {
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+ SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G;
+
+ RunFilterUpdateTest(600, delay_samples, filter_length_blocks,
+ blocks_with_echo_path_changes, blocks_with_saturation,
+ false, &e, &y, &G);
+
+ // Verify that the refined filter is able to perform well.
+ // Use different criteria to take overmodelling into account.
+ if (filter_length_blocks == 12) {
+ EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+ std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+ } else {
+ EXPECT_LT(std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+ std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+ }
+ }
+ }
+}
+
+// Verifies that the magnitude of the gain on average decreases for a
+// persistently exciting signal.
+TEST(RefinedFilterUpdateGain, DecreasingGain) {
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G_a;
+ FftData G_b;
+ FftData G_c;
+ std::array<float, kFftLengthBy2Plus1> G_a_power;
+ std::array<float, kFftLengthBy2Plus1> G_b_power;
+ std::array<float, kFftLengthBy2Plus1> G_c_power;
+
+ RunFilterUpdateTest(250, 65, 12, blocks_with_echo_path_changes,
+ blocks_with_saturation, false, &e, &y, &G_a);
+ RunFilterUpdateTest(500, 65, 12, blocks_with_echo_path_changes,
+ blocks_with_saturation, false, &e, &y, &G_b);
+ RunFilterUpdateTest(750, 65, 12, blocks_with_echo_path_changes,
+ blocks_with_saturation, false, &e, &y, &G_c);
+
+ G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+ G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+ G_c.Spectrum(Aec3Optimization::kNone, G_c_power);
+
+ EXPECT_GT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+ std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+
+ EXPECT_GT(std::accumulate(G_b_power.begin(), G_b_power.end(), 0.),
+ std::accumulate(G_c_power.begin(), G_c_power.end(), 0.));
+}
+
+// Verifies that the gain is zero when there is saturation and that the internal
+// error estimates cause the gain to increase after a period of saturation.
+TEST(RefinedFilterUpdateGain, SaturationBehavior) {
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+ for (int k = 99; k < 200; ++k) {
+ blocks_with_saturation.push_back(k);
+ }
+
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G_a;
+ FftData G_b;
+ FftData G_a_ref;
+ G_a_ref.re.fill(0.f);
+ G_a_ref.im.fill(0.f);
+
+ std::array<float, kFftLengthBy2Plus1> G_a_power;
+ std::array<float, kFftLengthBy2Plus1> G_b_power;
+
+ RunFilterUpdateTest(100, 65, filter_length_blocks,
+ blocks_with_echo_path_changes, blocks_with_saturation,
+ false, &e, &y, &G_a);
+
+ EXPECT_EQ(G_a_ref.re, G_a.re);
+ EXPECT_EQ(G_a_ref.im, G_a.im);
+
+ RunFilterUpdateTest(99, 65, filter_length_blocks,
+ blocks_with_echo_path_changes, blocks_with_saturation,
+ false, &e, &y, &G_a);
+ RunFilterUpdateTest(201, 65, filter_length_blocks,
+ blocks_with_echo_path_changes, blocks_with_saturation,
+ false, &e, &y, &G_b);
+
+ G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+ G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+
+ EXPECT_LT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+ std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+ }
+}
+
+// Verifies that the gain increases after an echo path change.
+// TODO(peah): Correct and reactivate this test.
+TEST(RefinedFilterUpdateGain, DISABLED_EchoPathChangeBehavior) {
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<int> blocks_with_saturation;
+ blocks_with_echo_path_changes.push_back(99);
+
+ std::array<float, kBlockSize> e;
+ std::array<float, kBlockSize> y;
+ FftData G_a;
+ FftData G_b;
+ std::array<float, kFftLengthBy2Plus1> G_a_power;
+ std::array<float, kFftLengthBy2Plus1> G_b_power;
+
+ RunFilterUpdateTest(100, 65, filter_length_blocks,
+ blocks_with_echo_path_changes, blocks_with_saturation,
+ false, &e, &y, &G_a);
+ RunFilterUpdateTest(101, 65, filter_length_blocks,
+ blocks_with_echo_path_changes, blocks_with_saturation,
+ false, &e, &y, &G_b);
+
+ G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+ G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+
+ EXPECT_LT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+ std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.cc
new file mode 100644
index 0000000000..aa511e2b6b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+RenderBuffer::RenderBuffer(BlockBuffer* block_buffer,
+ SpectrumBuffer* spectrum_buffer,
+ FftBuffer* fft_buffer)
+ : block_buffer_(block_buffer),
+ spectrum_buffer_(spectrum_buffer),
+ fft_buffer_(fft_buffer) {
+ RTC_DCHECK(block_buffer_);
+ RTC_DCHECK(spectrum_buffer_);
+ RTC_DCHECK(fft_buffer_);
+ RTC_DCHECK_EQ(block_buffer_->buffer.size(), fft_buffer_->buffer.size());
+ RTC_DCHECK_EQ(spectrum_buffer_->buffer.size(), fft_buffer_->buffer.size());
+ RTC_DCHECK_EQ(spectrum_buffer_->read, fft_buffer_->read);
+ RTC_DCHECK_EQ(spectrum_buffer_->write, fft_buffer_->write);
+}
+
+RenderBuffer::~RenderBuffer() = default;
+
+void RenderBuffer::SpectralSum(
+ size_t num_spectra,
+ std::array<float, kFftLengthBy2Plus1>* X2) const {
+ X2->fill(0.f);
+ int position = spectrum_buffer_->read;
+ for (size_t j = 0; j < num_spectra; ++j) {
+ for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) {
+ for (size_t k = 0; k < X2->size(); ++k) {
+ (*X2)[k] += channel_spectrum[k];
+ }
+ }
+ position = spectrum_buffer_->IncIndex(position);
+ }
+}
+
+void RenderBuffer::SpectralSums(
+ size_t num_spectra_shorter,
+ size_t num_spectra_longer,
+ std::array<float, kFftLengthBy2Plus1>* X2_shorter,
+ std::array<float, kFftLengthBy2Plus1>* X2_longer) const {
+ RTC_DCHECK_LE(num_spectra_shorter, num_spectra_longer);
+ X2_shorter->fill(0.f);
+ int position = spectrum_buffer_->read;
+ size_t j = 0;
+ for (; j < num_spectra_shorter; ++j) {
+ for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) {
+ for (size_t k = 0; k < X2_shorter->size(); ++k) {
+ (*X2_shorter)[k] += channel_spectrum[k];
+ }
+ }
+ position = spectrum_buffer_->IncIndex(position);
+ }
+ std::copy(X2_shorter->begin(), X2_shorter->end(), X2_longer->begin());
+ for (; j < num_spectra_longer; ++j) {
+ for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) {
+ for (size_t k = 0; k < X2_longer->size(); ++k) {
+ (*X2_longer)[k] += channel_spectrum[k];
+ }
+ }
+ position = spectrum_buffer_->IncIndex(position);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.h
new file mode 100644
index 0000000000..8adc996087
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_buffer.h"
+#include "modules/audio_processing/aec3/fft_buffer.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Provides a buffer of the render data for the echo remover.
+class RenderBuffer {
+ public:
+ RenderBuffer(BlockBuffer* block_buffer,
+ SpectrumBuffer* spectrum_buffer,
+ FftBuffer* fft_buffer);
+
+ RenderBuffer() = delete;
+ RenderBuffer(const RenderBuffer&) = delete;
+ RenderBuffer& operator=(const RenderBuffer&) = delete;
+
+ ~RenderBuffer();
+
+ // Get a block.
+ const Block& GetBlock(int buffer_offset_blocks) const {
+ int position =
+ block_buffer_->OffsetIndex(block_buffer_->read, buffer_offset_blocks);
+ return block_buffer_->buffer[position];
+ }
+
+ // Get the spectrum from one of the FFTs in the buffer.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Spectrum(
+ int buffer_offset_ffts) const {
+ int position = spectrum_buffer_->OffsetIndex(spectrum_buffer_->read,
+ buffer_offset_ffts);
+ return spectrum_buffer_->buffer[position];
+ }
+
+ // Returns the circular fft buffer.
+ rtc::ArrayView<const std::vector<FftData>> GetFftBuffer() const {
+ return fft_buffer_->buffer;
+ }
+
+ // Returns the current position in the circular buffer.
+ size_t Position() const {
+ RTC_DCHECK_EQ(spectrum_buffer_->read, fft_buffer_->read);
+ RTC_DCHECK_EQ(spectrum_buffer_->write, fft_buffer_->write);
+ return fft_buffer_->read;
+ }
+
+ // Returns the sum of the spectrums for a certain number of FFTs.
+ void SpectralSum(size_t num_spectra,
+ std::array<float, kFftLengthBy2Plus1>* X2) const;
+
+ // Returns the sums of the spectrums for two numbers of FFTs.
+ void SpectralSums(size_t num_spectra_shorter,
+ size_t num_spectra_longer,
+ std::array<float, kFftLengthBy2Plus1>* X2_shorter,
+ std::array<float, kFftLengthBy2Plus1>* X2_longer) const;
+
+ // Gets the recent activity seen in the render signal.
+ bool GetRenderActivity() const { return render_activity_; }
+
+ // Specifies the recent activity seen in the render signal.
+ void SetRenderActivity(bool activity) { render_activity_ = activity; }
+
+ // Returns the headroom between the write and the read positions in the
+ // buffer.
+ int Headroom() const {
+ // The write and read indices are decreased over time.
+ int headroom =
+ fft_buffer_->write < fft_buffer_->read
+ ? fft_buffer_->read - fft_buffer_->write
+ : fft_buffer_->size - fft_buffer_->write + fft_buffer_->read;
+
+ RTC_DCHECK_LE(0, headroom);
+ RTC_DCHECK_GE(fft_buffer_->size, headroom);
+
+ return headroom;
+ }
+
+ // Returns a reference to the spectrum buffer.
+ const SpectrumBuffer& GetSpectrumBuffer() const { return *spectrum_buffer_; }
+
+ // Returns a reference to the block buffer.
+ const BlockBuffer& GetBlockBuffer() const { return *block_buffer_; }
+
+ private:
+ const BlockBuffer* const block_buffer_;
+ const SpectrumBuffer* const spectrum_buffer_;
+ const FftBuffer* const fft_buffer_;
+ bool render_activity_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_gn/moz.build
new file mode 100644
index 0000000000..8d679eb2bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("render_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_unittest.cc
new file mode 100644
index 0000000000..5d9d646e76
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_buffer_unittest.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-null fft buffer.
+TEST(RenderBufferDeathTest, NullExternalFftBuffer) {
+ BlockBuffer block_buffer(10, 3, 1);
+ SpectrumBuffer spectrum_buffer(10, 1);
+ EXPECT_DEATH(RenderBuffer(&block_buffer, &spectrum_buffer, nullptr), "");
+}
+
+// Verifies the check for non-null spectrum buffer.
+TEST(RenderBufferDeathTest, NullExternalSpectrumBuffer) {
+ FftBuffer fft_buffer(10, 1);
+ BlockBuffer block_buffer(10, 3, 1);
+ EXPECT_DEATH(RenderBuffer(&block_buffer, nullptr, &fft_buffer), "");
+}
+
+// Verifies the check for non-null block buffer.
+TEST(RenderBufferDeathTest, NullExternalBlockBuffer) {
+ FftBuffer fft_buffer(10, 1);
+ SpectrumBuffer spectrum_buffer(10, 1);
+ EXPECT_DEATH(RenderBuffer(nullptr, &spectrum_buffer, &fft_buffer), "");
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.cc
new file mode 100644
index 0000000000..ec5d35507e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.cc
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cmath>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/alignment_mixer.h"
+#include "modules/audio_processing/aec3/block_buffer.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/fft_buffer.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+bool UpdateCaptureCallCounterOnSkippedBlocks() {
+ return !field_trial::IsEnabled(
+ "WebRTC-Aec3RenderBufferCallCounterUpdateKillSwitch");
+}
+
+class RenderDelayBufferImpl final : public RenderDelayBuffer {
+ public:
+ RenderDelayBufferImpl(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels);
+ RenderDelayBufferImpl() = delete;
+ ~RenderDelayBufferImpl() override;
+
+ void Reset() override;
+ BufferingEvent Insert(const Block& block) override;
+ BufferingEvent PrepareCaptureProcessing() override;
+ void HandleSkippedCaptureProcessing() override;
+ bool AlignFromDelay(size_t delay) override;
+ void AlignFromExternalDelay() override;
+ size_t Delay() const override { return ComputeDelay(); }
+ size_t MaxDelay() const override {
+ return blocks_.buffer.size() - 1 - buffer_headroom_;
+ }
+ RenderBuffer* GetRenderBuffer() override { return &echo_remover_buffer_; }
+
+ const DownsampledRenderBuffer& GetDownsampledRenderBuffer() const override {
+ return low_rate_;
+ }
+
+ int BufferLatency() const;
+ void SetAudioBufferDelay(int delay_ms) override;
+ bool HasReceivedBufferDelay() override;
+
+ private:
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const Aec3Optimization optimization_;
+ const EchoCanceller3Config config_;
+ const bool update_capture_call_counter_on_skipped_blocks_;
+ const float render_linear_amplitude_gain_;
+ const rtc::LoggingSeverity delay_log_level_;
+ size_t down_sampling_factor_;
+ const int sub_block_size_;
+ BlockBuffer blocks_;
+ SpectrumBuffer spectra_;
+ FftBuffer ffts_;
+ absl::optional<size_t> delay_;
+ RenderBuffer echo_remover_buffer_;
+ DownsampledRenderBuffer low_rate_;
+ AlignmentMixer render_mixer_;
+ Decimator render_decimator_;
+ const Aec3Fft fft_;
+ std::vector<float> render_ds_;
+ const int buffer_headroom_;
+ bool last_call_was_render_ = false;
+ int num_api_calls_in_a_row_ = 0;
+ int max_observed_jitter_ = 1;
+ int64_t capture_call_counter_ = 0;
+ int64_t render_call_counter_ = 0;
+ bool render_activity_ = false;
+ size_t render_activity_counter_ = 0;
+ absl::optional<int> external_audio_buffer_delay_;
+ bool external_audio_buffer_delay_verified_after_reset_ = false;
+ size_t min_latency_blocks_ = 0;
+ size_t excess_render_detection_counter_ = 0;
+
+ int MapDelayToTotalDelay(size_t delay) const;
+ int ComputeDelay() const;
+ void ApplyTotalDelay(int delay);
+ void InsertBlock(const Block& block, int previous_write);
+ bool DetectActiveRender(rtc::ArrayView<const float> x) const;
+ bool DetectExcessRenderBlocks();
+ void IncrementWriteIndices();
+ void IncrementLowRateReadIndices();
+ void IncrementReadIndices();
+ bool RenderOverrun();
+ bool RenderUnderrun();
+};
+
+std::atomic<int> RenderDelayBufferImpl::instance_count_ = 0;
+
+RenderDelayBufferImpl::RenderDelayBufferImpl(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ optimization_(DetectOptimization()),
+ config_(config),
+ update_capture_call_counter_on_skipped_blocks_(
+ UpdateCaptureCallCounterOnSkippedBlocks()),
+ render_linear_amplitude_gain_(
+ std::pow(10.0f, config_.render_levels.render_power_gain_db / 20.f)),
+ delay_log_level_(config_.delay.log_warning_on_delay_changes
+ ? rtc::LS_WARNING
+ : rtc::LS_VERBOSE),
+ down_sampling_factor_(config.delay.down_sampling_factor),
+ sub_block_size_(static_cast<int>(down_sampling_factor_ > 0
+ ? kBlockSize / down_sampling_factor_
+ : kBlockSize)),
+ blocks_(GetRenderDelayBufferSize(down_sampling_factor_,
+ config.delay.num_filters,
+ config.filter.refined.length_blocks),
+ NumBandsForRate(sample_rate_hz),
+ num_render_channels),
+ spectra_(blocks_.buffer.size(), num_render_channels),
+ ffts_(blocks_.buffer.size(), num_render_channels),
+ delay_(config_.delay.default_delay),
+ echo_remover_buffer_(&blocks_, &spectra_, &ffts_),
+ low_rate_(GetDownSampledBufferSize(down_sampling_factor_,
+ config.delay.num_filters)),
+ render_mixer_(num_render_channels, config.delay.render_alignment_mixing),
+ render_decimator_(down_sampling_factor_),
+ fft_(),
+ render_ds_(sub_block_size_, 0.f),
+ buffer_headroom_(config.filter.refined.length_blocks) {
+ RTC_DCHECK_EQ(blocks_.buffer.size(), ffts_.buffer.size());
+ RTC_DCHECK_EQ(spectra_.buffer.size(), ffts_.buffer.size());
+ for (size_t i = 0; i < blocks_.buffer.size(); ++i) {
+ RTC_DCHECK_EQ(blocks_.buffer[i].NumChannels(), ffts_.buffer[i].size());
+ RTC_DCHECK_EQ(spectra_.buffer[i].size(), ffts_.buffer[i].size());
+ }
+
+ Reset();
+}
+
+RenderDelayBufferImpl::~RenderDelayBufferImpl() = default;
+
+// Resets the buffer delays and clears the reported delays.
+void RenderDelayBufferImpl::Reset() {
+ last_call_was_render_ = false;
+ num_api_calls_in_a_row_ = 1;
+ min_latency_blocks_ = 0;
+ excess_render_detection_counter_ = 0;
+
+ // Initialize the read index to one sub-block before the write index.
+ low_rate_.read = low_rate_.OffsetIndex(low_rate_.write, sub_block_size_);
+
+ // Check for any external audio buffer delay and whether it is feasible.
+ if (external_audio_buffer_delay_) {
+ const int headroom = 2;
+ size_t audio_buffer_delay_to_set;
+ // Minimum delay is 1 (like the low-rate render buffer).
+ if (*external_audio_buffer_delay_ <= headroom) {
+ audio_buffer_delay_to_set = 1;
+ } else {
+ audio_buffer_delay_to_set = *external_audio_buffer_delay_ - headroom;
+ }
+
+ audio_buffer_delay_to_set = std::min(audio_buffer_delay_to_set, MaxDelay());
+
+ // When an external delay estimate is available, use that delay as the
+ // initial render buffer delay.
+ ApplyTotalDelay(audio_buffer_delay_to_set);
+ delay_ = ComputeDelay();
+
+ external_audio_buffer_delay_verified_after_reset_ = false;
+ } else {
+ // If an external delay estimate is not available, use that delay as the
+ // initial delay. Set the render buffer delays to the default delay.
+ ApplyTotalDelay(config_.delay.default_delay);
+
+ // Unset the delays which are set by AlignFromDelay.
+ delay_ = absl::nullopt;
+ }
+}
+
+// Inserts a new block into the render buffers.
+RenderDelayBuffer::BufferingEvent RenderDelayBufferImpl::Insert(
+ const Block& block) {
+ ++render_call_counter_;
+ if (delay_) {
+ if (!last_call_was_render_) {
+ last_call_was_render_ = true;
+ num_api_calls_in_a_row_ = 1;
+ } else {
+ if (++num_api_calls_in_a_row_ > max_observed_jitter_) {
+ max_observed_jitter_ = num_api_calls_in_a_row_;
+ RTC_LOG_V(delay_log_level_)
+ << "New max number api jitter observed at render block "
+ << render_call_counter_ << ": " << num_api_calls_in_a_row_
+ << " blocks";
+ }
+ }
+ }
+
+ // Increase the write indices to where the new blocks should be written.
+ const int previous_write = blocks_.write;
+ IncrementWriteIndices();
+
+ // Allow overrun and do a reset when render overrun occurrs due to more render
+ // data being inserted than capture data is received.
+ BufferingEvent event =
+ RenderOverrun() ? BufferingEvent::kRenderOverrun : BufferingEvent::kNone;
+
+ // Detect and update render activity.
+ if (!render_activity_) {
+ render_activity_counter_ +=
+ DetectActiveRender(block.View(/*band=*/0, /*channel=*/0)) ? 1 : 0;
+ render_activity_ = render_activity_counter_ >= 20;
+ }
+
+ // Insert the new render block into the specified position.
+ InsertBlock(block, previous_write);
+
+ if (event != BufferingEvent::kNone) {
+ Reset();
+ }
+
+ return event;
+}
+
+void RenderDelayBufferImpl::HandleSkippedCaptureProcessing() {
+ if (update_capture_call_counter_on_skipped_blocks_) {
+ ++capture_call_counter_;
+ }
+}
+
+// Prepares the render buffers for processing another capture block.
+RenderDelayBuffer::BufferingEvent
+RenderDelayBufferImpl::PrepareCaptureProcessing() {
+ RenderDelayBuffer::BufferingEvent event = BufferingEvent::kNone;
+ ++capture_call_counter_;
+
+ if (delay_) {
+ if (last_call_was_render_) {
+ last_call_was_render_ = false;
+ num_api_calls_in_a_row_ = 1;
+ } else {
+ if (++num_api_calls_in_a_row_ > max_observed_jitter_) {
+ max_observed_jitter_ = num_api_calls_in_a_row_;
+ RTC_LOG_V(delay_log_level_)
+ << "New max number api jitter observed at capture block "
+ << capture_call_counter_ << ": " << num_api_calls_in_a_row_
+ << " blocks";
+ }
+ }
+ }
+
+ if (DetectExcessRenderBlocks()) {
+ // Too many render blocks compared to capture blocks. Risk of delay ending
+ // up before the filter used by the delay estimator.
+ RTC_LOG_V(delay_log_level_)
+ << "Excess render blocks detected at block " << capture_call_counter_;
+ Reset();
+ event = BufferingEvent::kRenderOverrun;
+ } else if (RenderUnderrun()) {
+ // Don't increment the read indices of the low rate buffer if there is a
+ // render underrun.
+ RTC_LOG_V(delay_log_level_)
+ << "Render buffer underrun detected at block " << capture_call_counter_;
+ IncrementReadIndices();
+ // Incrementing the buffer index without increasing the low rate buffer
+ // index means that the delay is reduced by one.
+ if (delay_ && *delay_ > 0)
+ delay_ = *delay_ - 1;
+ event = BufferingEvent::kRenderUnderrun;
+ } else {
+ // Increment the read indices in the render buffers to point to the most
+ // recent block to use in the capture processing.
+ IncrementLowRateReadIndices();
+ IncrementReadIndices();
+ }
+
+ echo_remover_buffer_.SetRenderActivity(render_activity_);
+ if (render_activity_) {
+ render_activity_counter_ = 0;
+ render_activity_ = false;
+ }
+
+ return event;
+}
+
+// Sets the delay and returns a bool indicating whether the delay was changed.
+bool RenderDelayBufferImpl::AlignFromDelay(size_t delay) {
+ RTC_DCHECK(!config_.delay.use_external_delay_estimator);
+ if (!external_audio_buffer_delay_verified_after_reset_ &&
+ external_audio_buffer_delay_ && delay_) {
+ int difference = static_cast<int>(delay) - static_cast<int>(*delay_);
+ RTC_LOG_V(delay_log_level_)
+ << "Mismatch between first estimated delay after reset "
+ "and externally reported audio buffer delay: "
+ << difference << " blocks";
+ external_audio_buffer_delay_verified_after_reset_ = true;
+ }
+ if (delay_ && *delay_ == delay) {
+ return false;
+ }
+ delay_ = delay;
+
+ // Compute the total delay and limit the delay to the allowed range.
+ int total_delay = MapDelayToTotalDelay(*delay_);
+ total_delay =
+ std::min(MaxDelay(), static_cast<size_t>(std::max(total_delay, 0)));
+
+ // Apply the delay to the buffers.
+ ApplyTotalDelay(total_delay);
+ return true;
+}
+
+void RenderDelayBufferImpl::SetAudioBufferDelay(int delay_ms) {
+ if (!external_audio_buffer_delay_) {
+ RTC_LOG_V(delay_log_level_)
+ << "Receiving a first externally reported audio buffer delay of "
+ << delay_ms << " ms.";
+ }
+
+ // Convert delay from milliseconds to blocks (rounded down).
+ external_audio_buffer_delay_ = delay_ms / 4;
+}
+
+bool RenderDelayBufferImpl::HasReceivedBufferDelay() {
+ return external_audio_buffer_delay_.has_value();
+}
+
+// Maps the externally computed delay to the delay used internally.
+int RenderDelayBufferImpl::MapDelayToTotalDelay(
+ size_t external_delay_blocks) const {
+ const int latency_blocks = BufferLatency();
+ return latency_blocks + static_cast<int>(external_delay_blocks);
+}
+
+// Returns the delay (not including call jitter).
+int RenderDelayBufferImpl::ComputeDelay() const {
+ const int latency_blocks = BufferLatency();
+ int internal_delay = spectra_.read >= spectra_.write
+ ? spectra_.read - spectra_.write
+ : spectra_.size + spectra_.read - spectra_.write;
+
+ return internal_delay - latency_blocks;
+}
+
+// Set the read indices according to the delay.
+void RenderDelayBufferImpl::ApplyTotalDelay(int delay) {
+ RTC_LOG_V(delay_log_level_)
+ << "Applying total delay of " << delay << " blocks.";
+ blocks_.read = blocks_.OffsetIndex(blocks_.write, -delay);
+ spectra_.read = spectra_.OffsetIndex(spectra_.write, delay);
+ ffts_.read = ffts_.OffsetIndex(ffts_.write, delay);
+}
+
+void RenderDelayBufferImpl::AlignFromExternalDelay() {
+ RTC_DCHECK(config_.delay.use_external_delay_estimator);
+ if (external_audio_buffer_delay_) {
+ const int64_t delay = render_call_counter_ - capture_call_counter_ +
+ *external_audio_buffer_delay_;
+ const int64_t delay_with_headroom =
+ delay - config_.delay.delay_headroom_samples / kBlockSize;
+ ApplyTotalDelay(delay_with_headroom);
+ }
+}
+
+// Inserts a block into the render buffers.
+void RenderDelayBufferImpl::InsertBlock(const Block& block,
+ int previous_write) {
+ auto& b = blocks_;
+ auto& lr = low_rate_;
+ auto& ds = render_ds_;
+ auto& f = ffts_;
+ auto& s = spectra_;
+ const size_t num_bands = b.buffer[b.write].NumBands();
+ const size_t num_render_channels = b.buffer[b.write].NumChannels();
+ RTC_DCHECK_EQ(block.NumBands(), num_bands);
+ RTC_DCHECK_EQ(block.NumChannels(), num_render_channels);
+ for (size_t band = 0; band < num_bands; ++band) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ std::copy(block.begin(band, ch), block.end(band, ch),
+ b.buffer[b.write].begin(band, ch));
+ }
+ }
+
+ if (render_linear_amplitude_gain_ != 1.f) {
+ for (size_t band = 0; band < num_bands; ++band) {
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ rtc::ArrayView<float, kBlockSize> b_view =
+ b.buffer[b.write].View(band, ch);
+ for (float& sample : b_view) {
+ sample *= render_linear_amplitude_gain_;
+ }
+ }
+ }
+ }
+
+ std::array<float, kBlockSize> downmixed_render;
+ render_mixer_.ProduceOutput(b.buffer[b.write], downmixed_render);
+ render_decimator_.Decimate(downmixed_render, ds);
+ data_dumper_->DumpWav("aec3_render_decimator_output", ds.size(), ds.data(),
+ 16000 / down_sampling_factor_, 1);
+ std::copy(ds.rbegin(), ds.rend(), lr.buffer.begin() + lr.write);
+ for (int channel = 0; channel < b.buffer[b.write].NumChannels(); ++channel) {
+ fft_.PaddedFft(b.buffer[b.write].View(/*band=*/0, channel),
+ b.buffer[previous_write].View(/*band=*/0, channel),
+ &f.buffer[f.write][channel]);
+ f.buffer[f.write][channel].Spectrum(optimization_,
+ s.buffer[s.write][channel]);
+ }
+}
+
+bool RenderDelayBufferImpl::DetectActiveRender(
+ rtc::ArrayView<const float> x) const {
+ const float x_energy = std::inner_product(x.begin(), x.end(), x.begin(), 0.f);
+ return x_energy > (config_.render_levels.active_render_limit *
+ config_.render_levels.active_render_limit) *
+ kFftLengthBy2;
+}
+
+bool RenderDelayBufferImpl::DetectExcessRenderBlocks() {
+ bool excess_render_detected = false;
+ const size_t latency_blocks = static_cast<size_t>(BufferLatency());
+ // The recently seen minimum latency in blocks. Should be close to 0.
+ min_latency_blocks_ = std::min(min_latency_blocks_, latency_blocks);
+ // After processing a configurable number of blocks the minimum latency is
+ // checked.
+ if (++excess_render_detection_counter_ >=
+ config_.buffering.excess_render_detection_interval_blocks) {
+ // If the minimum latency is not lower than the threshold there have been
+ // more render than capture frames.
+ excess_render_detected = min_latency_blocks_ >
+ config_.buffering.max_allowed_excess_render_blocks;
+ // Reset the counter and let the minimum latency be the current latency.
+ min_latency_blocks_ = latency_blocks;
+ excess_render_detection_counter_ = 0;
+ }
+
+ data_dumper_->DumpRaw("aec3_latency_blocks", latency_blocks);
+ data_dumper_->DumpRaw("aec3_min_latency_blocks", min_latency_blocks_);
+ data_dumper_->DumpRaw("aec3_excess_render_detected", excess_render_detected);
+ return excess_render_detected;
+}
+
+// Computes the latency in the buffer (the number of unread sub-blocks).
+int RenderDelayBufferImpl::BufferLatency() const {
+ const DownsampledRenderBuffer& l = low_rate_;
+ int latency_samples = (l.buffer.size() + l.read - l.write) % l.buffer.size();
+ int latency_blocks = latency_samples / sub_block_size_;
+ return latency_blocks;
+}
+
+// Increments the write indices for the render buffers.
+void RenderDelayBufferImpl::IncrementWriteIndices() {
+ low_rate_.UpdateWriteIndex(-sub_block_size_);
+ blocks_.IncWriteIndex();
+ spectra_.DecWriteIndex();
+ ffts_.DecWriteIndex();
+}
+
+// Increments the read indices of the low rate render buffers.
+void RenderDelayBufferImpl::IncrementLowRateReadIndices() {
+ low_rate_.UpdateReadIndex(-sub_block_size_);
+}
+
+// Increments the read indices for the render buffers.
+void RenderDelayBufferImpl::IncrementReadIndices() {
+ if (blocks_.read != blocks_.write) {
+ blocks_.IncReadIndex();
+ spectra_.DecReadIndex();
+ ffts_.DecReadIndex();
+ }
+}
+
+// Checks for a render buffer overrun.
+bool RenderDelayBufferImpl::RenderOverrun() {
+ return low_rate_.read == low_rate_.write || blocks_.read == blocks_.write;
+}
+
+// Checks for a render buffer underrun.
+bool RenderDelayBufferImpl::RenderUnderrun() {
+ return low_rate_.read == low_rate_.write;
+}
+
+} // namespace
+
+RenderDelayBuffer* RenderDelayBuffer::Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels) {
+ return new RenderDelayBufferImpl(config, sample_rate_hz, num_render_channels);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.h
new file mode 100644
index 0000000000..6dc1aefb85
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+namespace webrtc {
+
+// Class for buffering the incoming render blocks such that these may be
+// extracted with a specified delay.
+class RenderDelayBuffer {
+ public:
+ enum class BufferingEvent {
+ kNone,
+ kRenderUnderrun,
+ kRenderOverrun,
+ kApiCallSkew
+ };
+
+ static RenderDelayBuffer* Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_render_channels);
+ virtual ~RenderDelayBuffer() = default;
+
+ // Resets the buffer alignment.
+ virtual void Reset() = 0;
+
+ // Inserts a block into the buffer.
+ virtual BufferingEvent Insert(const Block& block) = 0;
+
+ // Updates the buffers one step based on the specified buffer delay. Returns
+ // an enum indicating whether there was a special event that occurred.
+ virtual BufferingEvent PrepareCaptureProcessing() = 0;
+
+ // Called on capture blocks where PrepareCaptureProcessing is not called.
+ virtual void HandleSkippedCaptureProcessing() = 0;
+
+ // Sets the buffer delay and returns a bool indicating whether the delay
+ // changed.
+ virtual bool AlignFromDelay(size_t delay) = 0;
+
+ // Sets the buffer delay from the most recently reported external delay.
+ virtual void AlignFromExternalDelay() = 0;
+
+ // Gets the buffer delay.
+ virtual size_t Delay() const = 0;
+
+ // Gets the buffer delay.
+ virtual size_t MaxDelay() const = 0;
+
+ // Returns the render buffer for the echo remover.
+ virtual RenderBuffer* GetRenderBuffer() = 0;
+
+ // Returns the downsampled render buffer.
+ virtual const DownsampledRenderBuffer& GetDownsampledRenderBuffer() const = 0;
+
+ // Returns the maximum non calusal offset that can occur in the delay buffer.
+ static int DelayEstimatorOffset(const EchoCanceller3Config& config);
+
+ // Provides an optional external estimate of the audio buffer delay.
+ virtual void SetAudioBufferDelay(int delay_ms) = 0;
+
+ // Returns whether an external delay estimate has been reported via
+ // SetAudioBufferDelay.
+ virtual bool HasReceivedBufferDelay() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer_unittest.cc
new file mode 100644
index 0000000000..d51e06a1ac
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_buffer_unittest.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ return ss.Release();
+}
+
+} // namespace
+
+// Verifies that the buffer overflow is correctly reported.
+TEST(RenderDelayBuffer, BufferOverflow) {
+ const EchoCanceller3Config config;
+ for (auto num_channels : {1, 2, 8}) {
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(
+ RenderDelayBuffer::Create(config, rate, num_channels));
+ Block block_to_insert(NumBandsForRate(rate), num_channels);
+ for (size_t k = 0; k < 10; ++k) {
+ EXPECT_EQ(RenderDelayBuffer::BufferingEvent::kNone,
+ delay_buffer->Insert(block_to_insert));
+ }
+ bool overrun_occurred = false;
+ for (size_t k = 0; k < 1000; ++k) {
+ RenderDelayBuffer::BufferingEvent event =
+ delay_buffer->Insert(block_to_insert);
+ overrun_occurred =
+ overrun_occurred ||
+ RenderDelayBuffer::BufferingEvent::kRenderOverrun == event;
+ }
+
+ EXPECT_TRUE(overrun_occurred);
+ }
+ }
+}
+
+// Verifies that the check for available block works.
+TEST(RenderDelayBuffer, AvailableBlock) {
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(RenderDelayBuffer::Create(
+ EchoCanceller3Config(), kSampleRateHz, kNumChannels));
+ Block input_block(kNumBands, kNumChannels, 1.0f);
+ EXPECT_EQ(RenderDelayBuffer::BufferingEvent::kNone,
+ delay_buffer->Insert(input_block));
+ delay_buffer->PrepareCaptureProcessing();
+}
+
+// Verifies the AlignFromDelay method.
+TEST(RenderDelayBuffer, AlignFromDelay) {
+ EchoCanceller3Config config;
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(
+ RenderDelayBuffer::Create(config, 16000, 1));
+ ASSERT_TRUE(delay_buffer->Delay());
+ delay_buffer->Reset();
+ size_t initial_internal_delay = 0;
+ for (size_t delay = initial_internal_delay;
+ delay < initial_internal_delay + 20; ++delay) {
+ ASSERT_TRUE(delay_buffer->AlignFromDelay(delay));
+ EXPECT_EQ(delay, delay_buffer->Delay());
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for feasible delay.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(RenderDelayBufferDeathTest, DISABLED_WrongDelay) {
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), 48000, 1));
+ EXPECT_DEATH(delay_buffer->AlignFromDelay(21), "");
+}
+
+// Verifies the check for the number of bands in the inserted blocks.
+TEST(RenderDelayBufferDeathTest, WrongNumberOfBands) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(RenderDelayBuffer::Create(
+ EchoCanceller3Config(), rate, num_channels));
+ Block block_to_insert(
+ NumBandsForRate(rate < 48000 ? rate + 16000 : 16000), num_channels);
+ EXPECT_DEATH(delay_buffer->Insert(block_to_insert), "");
+ }
+ }
+}
+
+// Verifies the check for the number of channels in the inserted blocks.
+TEST(RenderDelayBufferDeathTest, WrongNumberOfChannels) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (size_t num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(RenderDelayBuffer::Create(
+ EchoCanceller3Config(), rate, num_channels));
+ Block block_to_insert(NumBandsForRate(rate), num_channels + 1);
+ EXPECT_DEATH(delay_buffer->Insert(block_to_insert), "");
+ }
+ }
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.cc
new file mode 100644
index 0000000000..7fd2868c38
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <atomic>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/echo_path_delay_estimator.h"
+#include "modules/audio_processing/aec3/render_delay_controller_metrics.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+class RenderDelayControllerImpl final : public RenderDelayController {
+ public:
+ RenderDelayControllerImpl(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_capture_channels);
+
+ RenderDelayControllerImpl() = delete;
+ RenderDelayControllerImpl(const RenderDelayControllerImpl&) = delete;
+ RenderDelayControllerImpl& operator=(const RenderDelayControllerImpl&) =
+ delete;
+
+ ~RenderDelayControllerImpl() override;
+ void Reset(bool reset_delay_confidence) override;
+ void LogRenderCall() override;
+ absl::optional<DelayEstimate> GetDelay(
+ const DownsampledRenderBuffer& render_buffer,
+ size_t render_delay_buffer_delay,
+ const Block& capture) override;
+ bool HasClockdrift() const override;
+
+ private:
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const int hysteresis_limit_blocks_;
+ absl::optional<DelayEstimate> delay_;
+ EchoPathDelayEstimator delay_estimator_;
+ RenderDelayControllerMetrics metrics_;
+ absl::optional<DelayEstimate> delay_samples_;
+ size_t capture_call_counter_ = 0;
+ int delay_change_counter_ = 0;
+ DelayEstimate::Quality last_delay_estimate_quality_;
+};
+
+DelayEstimate ComputeBufferDelay(
+ const absl::optional<DelayEstimate>& current_delay,
+ int hysteresis_limit_blocks,
+ DelayEstimate estimated_delay) {
+ // Compute the buffer delay increase required to achieve the desired latency.
+ size_t new_delay_blocks = estimated_delay.delay >> kBlockSizeLog2;
+ // Add hysteresis.
+ if (current_delay) {
+ size_t current_delay_blocks = current_delay->delay;
+ if (new_delay_blocks > current_delay_blocks &&
+ new_delay_blocks <= current_delay_blocks + hysteresis_limit_blocks) {
+ new_delay_blocks = current_delay_blocks;
+ }
+ }
+ DelayEstimate new_delay = estimated_delay;
+ new_delay.delay = new_delay_blocks;
+ return new_delay;
+}
+
+std::atomic<int> RenderDelayControllerImpl::instance_count_(0);
+
+RenderDelayControllerImpl::RenderDelayControllerImpl(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_capture_channels)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ hysteresis_limit_blocks_(
+ static_cast<int>(config.delay.hysteresis_limit_blocks)),
+ delay_estimator_(data_dumper_.get(), config, num_capture_channels),
+ last_delay_estimate_quality_(DelayEstimate::Quality::kCoarse) {
+ RTC_DCHECK(ValidFullBandRate(sample_rate_hz));
+ delay_estimator_.LogDelayEstimationProperties(sample_rate_hz, 0);
+}
+
+RenderDelayControllerImpl::~RenderDelayControllerImpl() = default;
+
+void RenderDelayControllerImpl::Reset(bool reset_delay_confidence) {
+ delay_ = absl::nullopt;
+ delay_samples_ = absl::nullopt;
+ delay_estimator_.Reset(reset_delay_confidence);
+ delay_change_counter_ = 0;
+ if (reset_delay_confidence) {
+ last_delay_estimate_quality_ = DelayEstimate::Quality::kCoarse;
+ }
+}
+
+void RenderDelayControllerImpl::LogRenderCall() {}
+
+absl::optional<DelayEstimate> RenderDelayControllerImpl::GetDelay(
+ const DownsampledRenderBuffer& render_buffer,
+ size_t render_delay_buffer_delay,
+ const Block& capture) {
+ ++capture_call_counter_;
+
+ auto delay_samples = delay_estimator_.EstimateDelay(render_buffer, capture);
+
+ if (delay_samples) {
+ if (!delay_samples_ || delay_samples->delay != delay_samples_->delay) {
+ delay_change_counter_ = 0;
+ }
+ if (delay_samples_) {
+ delay_samples_->blocks_since_last_change =
+ delay_samples_->delay == delay_samples->delay
+ ? delay_samples_->blocks_since_last_change + 1
+ : 0;
+ delay_samples_->blocks_since_last_update = 0;
+ delay_samples_->delay = delay_samples->delay;
+ delay_samples_->quality = delay_samples->quality;
+ } else {
+ delay_samples_ = delay_samples;
+ }
+ } else {
+ if (delay_samples_) {
+ ++delay_samples_->blocks_since_last_change;
+ ++delay_samples_->blocks_since_last_update;
+ }
+ }
+
+ if (delay_change_counter_ < 2 * kNumBlocksPerSecond) {
+ ++delay_change_counter_;
+ }
+
+ if (delay_samples_) {
+ // Compute the render delay buffer delay.
+ const bool use_hysteresis =
+ last_delay_estimate_quality_ == DelayEstimate::Quality::kRefined &&
+ delay_samples_->quality == DelayEstimate::Quality::kRefined;
+ delay_ = ComputeBufferDelay(
+ delay_, use_hysteresis ? hysteresis_limit_blocks_ : 0, *delay_samples_);
+ last_delay_estimate_quality_ = delay_samples_->quality;
+ }
+
+ metrics_.Update(delay_samples_ ? absl::optional<size_t>(delay_samples_->delay)
+ : absl::nullopt,
+ delay_ ? delay_->delay : 0, 0, delay_estimator_.Clockdrift());
+
+ data_dumper_->DumpRaw("aec3_render_delay_controller_delay",
+ delay_samples ? delay_samples->delay : 0);
+ data_dumper_->DumpRaw("aec3_render_delay_controller_buffer_delay",
+ delay_ ? delay_->delay : 0);
+
+ return delay_;
+}
+
+bool RenderDelayControllerImpl::HasClockdrift() const {
+ return delay_estimator_.Clockdrift() != ClockdriftDetector::Level::kNone;
+}
+
+} // namespace
+
+RenderDelayController* RenderDelayController::Create(
+ const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_capture_channels) {
+ return new RenderDelayControllerImpl(config, sample_rate_hz,
+ num_capture_channels);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.h b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.h
new file mode 100644
index 0000000000..4a18a11e36
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// Class for aligning the render and capture signal using a RenderDelayBuffer.
+class RenderDelayController {
+ public:
+ static RenderDelayController* Create(const EchoCanceller3Config& config,
+ int sample_rate_hz,
+ size_t num_capture_channels);
+ virtual ~RenderDelayController() = default;
+
+ // Resets the delay controller. If the delay confidence is reset, the reset
+ // behavior is as if the call is restarted.
+ virtual void Reset(bool reset_delay_confidence) = 0;
+
+ // Logs a render call.
+ virtual void LogRenderCall() = 0;
+
+ // Aligns the render buffer content with the capture signal.
+ virtual absl::optional<DelayEstimate> GetDelay(
+ const DownsampledRenderBuffer& render_buffer,
+ size_t render_delay_buffer_delay,
+ const Block& capture) = 0;
+
+ // Returns true if clockdrift has been detected.
+ virtual bool HasClockdrift() const = 0;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.cc
new file mode 100644
index 0000000000..582e033482
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_controller_metrics.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+enum class DelayReliabilityCategory {
+ kNone,
+ kPoor,
+ kMedium,
+ kGood,
+ kExcellent,
+ kNumCategories
+};
+enum class DelayChangesCategory {
+ kNone,
+ kFew,
+ kSeveral,
+ kMany,
+ kConstant,
+ kNumCategories
+};
+
+constexpr int kMaxSkewShiftCount = 20;
+
+} // namespace
+
+RenderDelayControllerMetrics::RenderDelayControllerMetrics() = default;
+
+void RenderDelayControllerMetrics::Update(
+ absl::optional<size_t> delay_samples,
+ size_t buffer_delay_blocks,
+ absl::optional<int> skew_shift_blocks,
+ ClockdriftDetector::Level clockdrift) {
+ ++call_counter_;
+
+ if (!initial_update) {
+ size_t delay_blocks;
+ if (delay_samples) {
+ ++reliable_delay_estimate_counter_;
+ delay_blocks = (*delay_samples) / kBlockSize + 2;
+ } else {
+ delay_blocks = 0;
+ }
+
+ if (delay_blocks != delay_blocks_) {
+ ++delay_change_counter_;
+ delay_blocks_ = delay_blocks;
+ }
+
+ if (skew_shift_blocks) {
+ skew_shift_count_ = std::min(kMaxSkewShiftCount, skew_shift_count_);
+ }
+ } else if (++initial_call_counter_ == 5 * kNumBlocksPerSecond) {
+ initial_update = false;
+ }
+
+ if (call_counter_ == kMetricsReportingIntervalBlocks) {
+ int value_to_report = static_cast<int>(delay_blocks_);
+ value_to_report = std::min(124, value_to_report >> 1);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.EchoPathDelay",
+ value_to_report, 0, 124, 125);
+
+ value_to_report = static_cast<int>(buffer_delay_blocks + 2);
+ value_to_report = std::min(124, value_to_report >> 1);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.BufferDelay",
+ value_to_report, 0, 124, 125);
+
+ DelayReliabilityCategory delay_reliability;
+ if (reliable_delay_estimate_counter_ == 0) {
+ delay_reliability = DelayReliabilityCategory::kNone;
+ } else if (reliable_delay_estimate_counter_ > (call_counter_ >> 1)) {
+ delay_reliability = DelayReliabilityCategory::kExcellent;
+ } else if (reliable_delay_estimate_counter_ > 100) {
+ delay_reliability = DelayReliabilityCategory::kGood;
+ } else if (reliable_delay_estimate_counter_ > 10) {
+ delay_reliability = DelayReliabilityCategory::kMedium;
+ } else {
+ delay_reliability = DelayReliabilityCategory::kPoor;
+ }
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.EchoCanceller.ReliableDelayEstimates",
+ static_cast<int>(delay_reliability),
+ static_cast<int>(DelayReliabilityCategory::kNumCategories));
+
+ DelayChangesCategory delay_changes;
+ if (delay_change_counter_ == 0) {
+ delay_changes = DelayChangesCategory::kNone;
+ } else if (delay_change_counter_ > 10) {
+ delay_changes = DelayChangesCategory::kConstant;
+ } else if (delay_change_counter_ > 5) {
+ delay_changes = DelayChangesCategory::kMany;
+ } else if (delay_change_counter_ > 2) {
+ delay_changes = DelayChangesCategory::kSeveral;
+ } else {
+ delay_changes = DelayChangesCategory::kFew;
+ }
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.EchoCanceller.DelayChanges",
+ static_cast<int>(delay_changes),
+ static_cast<int>(DelayChangesCategory::kNumCategories));
+
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.EchoCanceller.Clockdrift", static_cast<int>(clockdrift),
+ static_cast<int>(ClockdriftDetector::Level::kNumCategories));
+
+ metrics_reported_ = true;
+ call_counter_ = 0;
+ ResetMetrics();
+ } else {
+ metrics_reported_ = false;
+ }
+
+ if (!initial_update && ++skew_report_timer_ == 60 * kNumBlocksPerSecond) {
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.MaxSkewShiftCount",
+ skew_shift_count_, 0, kMaxSkewShiftCount,
+ kMaxSkewShiftCount + 1);
+
+ skew_shift_count_ = 0;
+ skew_report_timer_ = 0;
+ }
+}
+
+void RenderDelayControllerMetrics::ResetMetrics() {
+ delay_change_counter_ = 0;
+ reliable_delay_estimate_counter_ = 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.h b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.h
new file mode 100644
index 0000000000..309122d80d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_
+
+#include <stddef.h>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+namespace webrtc {
+
+// Handles the reporting of metrics for the render delay controller.
+class RenderDelayControllerMetrics {
+ public:
+ RenderDelayControllerMetrics();
+
+ RenderDelayControllerMetrics(const RenderDelayControllerMetrics&) = delete;
+ RenderDelayControllerMetrics& operator=(const RenderDelayControllerMetrics&) =
+ delete;
+
+ // Updates the metric with new data.
+ void Update(absl::optional<size_t> delay_samples,
+ size_t buffer_delay_blocks,
+ absl::optional<int> skew_shift_blocks,
+ ClockdriftDetector::Level clockdrift);
+
+ // Returns true if the metrics have just been reported, otherwise false.
+ bool MetricsReported() { return metrics_reported_; }
+
+ private:
+ // Resets the metrics.
+ void ResetMetrics();
+
+ size_t delay_blocks_ = 0;
+ int reliable_delay_estimate_counter_ = 0;
+ int delay_change_counter_ = 0;
+ int call_counter_ = 0;
+ int skew_report_timer_ = 0;
+ int initial_call_counter_ = 0;
+ bool metrics_reported_ = false;
+ bool initial_update = true;
+ int skew_shift_count_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
new file mode 100644
index 0000000000..e7d7703433
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_controller_metrics.h"
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify the general functionality of RenderDelayControllerMetrics.
+TEST(RenderDelayControllerMetrics, NormalUsage) {
+ RenderDelayControllerMetrics metrics;
+
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
+ metrics.Update(absl::nullopt, 0, absl::nullopt,
+ ClockdriftDetector::Level::kNone);
+ EXPECT_FALSE(metrics.MetricsReported());
+ }
+ metrics.Update(absl::nullopt, 0, absl::nullopt,
+ ClockdriftDetector::Level::kNone);
+ EXPECT_TRUE(metrics.MetricsReported());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_unittest.cc
new file mode 100644
index 0000000000..e1a54fca9e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_delay_controller_unittest.cc
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(int sample_rate_hz,
+ size_t delay,
+ size_t num_render_channels,
+ size_t num_capture_channels) {
+ rtc::StringBuilder ss;
+ ss << ProduceDebugText(sample_rate_hz) << ", Delay: " << delay
+ << ", Num render channels: " << num_render_channels
+ << ", Num capture channels: " << num_capture_channels;
+ return ss.Release();
+}
+
+constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+
+} // namespace
+
+// Verifies the output of GetDelay when there are no AnalyzeRender calls.
+// TODO(bugs.webrtc.org/11161): Re-enable tests.
+TEST(RenderDelayController, DISABLED_NoRenderSignal) {
+ for (size_t num_render_channels : {1, 2, 8}) {
+ Block block(/*num_bands=1*/ 1, /*num_channels=*/1);
+ EchoCanceller3Config config;
+ for (size_t num_matched_filters = 4; num_matched_filters <= 10;
+ num_matched_filters++) {
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = num_matched_filters;
+ for (auto rate : {16000, 32000, 48000}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ std::unique_ptr<RenderDelayBuffer> delay_buffer(
+ RenderDelayBuffer::Create(config, rate, num_render_channels));
+ std::unique_ptr<RenderDelayController> delay_controller(
+ RenderDelayController::Create(config, rate,
+ /*num_capture_channels*/ 1));
+ for (size_t k = 0; k < 100; ++k) {
+ auto delay = delay_controller->GetDelay(
+ delay_buffer->GetDownsampledRenderBuffer(),
+ delay_buffer->Delay(), block);
+ EXPECT_FALSE(delay->delay);
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies the basic API call sequence.
+// TODO(bugs.webrtc.org/11161): Re-enable tests.
+TEST(RenderDelayController, DISABLED_BasicApiCalls) {
+ for (size_t num_capture_channels : {1, 2, 4}) {
+ for (size_t num_render_channels : {1, 2, 8}) {
+ Block capture_block(/*num_bands=*/1, num_capture_channels);
+ absl::optional<DelayEstimate> delay_blocks;
+ for (size_t num_matched_filters = 4; num_matched_filters <= 10;
+ num_matched_filters++) {
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = num_matched_filters;
+ config.delay.capture_alignment_mixing.downmix = false;
+ config.delay.capture_alignment_mixing.adaptive_selection = false;
+
+ for (auto rate : {16000, 32000, 48000}) {
+ Block render_block(NumBandsForRate(rate), num_render_channels);
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, rate, num_render_channels));
+ std::unique_ptr<RenderDelayController> delay_controller(
+ RenderDelayController::Create(EchoCanceller3Config(), rate,
+ num_capture_channels));
+ for (size_t k = 0; k < 10; ++k) {
+ render_delay_buffer->Insert(render_block);
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ delay_blocks = delay_controller->GetDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(),
+ render_delay_buffer->Delay(), capture_block);
+ }
+ EXPECT_TRUE(delay_blocks);
+ EXPECT_FALSE(delay_blocks->delay);
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies that the RenderDelayController is able to align the signals for
+// simple timeshifts between the signals.
+// TODO(bugs.webrtc.org/11161): Re-enable tests.
+TEST(RenderDelayController, DISABLED_Alignment) {
+ Random random_generator(42U);
+ for (size_t num_capture_channels : {1, 2, 4}) {
+ Block capture_block(/*num_bands=*/1, num_capture_channels);
+ for (size_t num_matched_filters = 4; num_matched_filters <= 10;
+ num_matched_filters++) {
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = num_matched_filters;
+ config.delay.capture_alignment_mixing.downmix = false;
+ config.delay.capture_alignment_mixing.adaptive_selection = false;
+
+ for (size_t num_render_channels : {1, 2, 8}) {
+ for (auto rate : {16000, 32000, 48000}) {
+ Block render_block(NumBandsForRate(rate), num_render_channels);
+
+ for (size_t delay_samples : {15, 50, 150, 200, 800, 4000}) {
+ absl::optional<DelayEstimate> delay_blocks;
+ SCOPED_TRACE(ProduceDebugText(rate, delay_samples,
+ num_render_channels,
+ num_capture_channels));
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, rate, num_render_channels));
+ std::unique_ptr<RenderDelayController> delay_controller(
+ RenderDelayController::Create(config, rate,
+ num_capture_channels));
+ DelayBuffer<float> signal_delay_buffer(delay_samples);
+ for (size_t k = 0; k < (400 + delay_samples / kBlockSize); ++k) {
+ for (int band = 0; band < render_block.NumBands(); ++band) {
+ for (int channel = 0; channel < render_block.NumChannels();
+ ++channel) {
+ RandomizeSampleVector(&random_generator,
+ render_block.View(band, channel));
+ }
+ }
+ signal_delay_buffer.Delay(
+ render_block.View(/*band=*/0, /*channel=*/0),
+ capture_block.View(/*band=*/0, /*channel=*/0));
+ render_delay_buffer->Insert(render_block);
+ render_delay_buffer->PrepareCaptureProcessing();
+ delay_blocks = delay_controller->GetDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(),
+ render_delay_buffer->Delay(), capture_block);
+ }
+ ASSERT_TRUE(!!delay_blocks);
+
+ constexpr int kDelayHeadroomBlocks = 1;
+ size_t expected_delay_blocks =
+ std::max(0, static_cast<int>(delay_samples / kBlockSize) -
+ kDelayHeadroomBlocks);
+
+ EXPECT_EQ(expected_delay_blocks, delay_blocks->delay);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies that the RenderDelayController is able to properly handle noncausal
+// delays.
+// TODO(bugs.webrtc.org/11161): Re-enable tests.
+TEST(RenderDelayController, DISABLED_NonCausalAlignment) {
+ Random random_generator(42U);
+ for (size_t num_capture_channels : {1, 2, 4}) {
+ for (size_t num_render_channels : {1, 2, 8}) {
+ for (size_t num_matched_filters = 4; num_matched_filters <= 10;
+ num_matched_filters++) {
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = num_matched_filters;
+ config.delay.capture_alignment_mixing.downmix = false;
+ config.delay.capture_alignment_mixing.adaptive_selection = false;
+ for (auto rate : {16000, 32000, 48000}) {
+ Block render_block(NumBandsForRate(rate), num_render_channels);
+ Block capture_block(NumBandsForRate(rate), num_capture_channels);
+
+ for (int delay_samples : {-15, -50, -150, -200}) {
+ absl::optional<DelayEstimate> delay_blocks;
+ SCOPED_TRACE(ProduceDebugText(rate, -delay_samples,
+ num_render_channels,
+ num_capture_channels));
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, rate, num_render_channels));
+ std::unique_ptr<RenderDelayController> delay_controller(
+ RenderDelayController::Create(EchoCanceller3Config(), rate,
+ num_capture_channels));
+ DelayBuffer<float> signal_delay_buffer(-delay_samples);
+ for (int k = 0;
+ k < (400 - delay_samples / static_cast<int>(kBlockSize));
+ ++k) {
+ RandomizeSampleVector(
+ &random_generator,
+ capture_block.View(/*band=*/0, /*channel=*/0));
+ signal_delay_buffer.Delay(
+ capture_block.View(/*band=*/0, /*channel=*/0),
+ render_block.View(/*band=*/0, /*channel=*/0));
+ render_delay_buffer->Insert(render_block);
+ render_delay_buffer->PrepareCaptureProcessing();
+ delay_blocks = delay_controller->GetDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(),
+ render_delay_buffer->Delay(), capture_block);
+ }
+
+ ASSERT_FALSE(delay_blocks);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies that the RenderDelayController is able to align the signals for
+// simple timeshifts between the signals when there is jitter in the API calls.
+// TODO(bugs.webrtc.org/11161): Re-enable tests.
+TEST(RenderDelayController, DISABLED_AlignmentWithJitter) {
+ Random random_generator(42U);
+ for (size_t num_capture_channels : {1, 2, 4}) {
+ for (size_t num_render_channels : {1, 2, 8}) {
+ Block capture_block(
+ /*num_bands=*/1, num_capture_channels);
+ for (size_t num_matched_filters = 4; num_matched_filters <= 10;
+ num_matched_filters++) {
+ for (auto down_sampling_factor : kDownSamplingFactors) {
+ EchoCanceller3Config config;
+ config.delay.down_sampling_factor = down_sampling_factor;
+ config.delay.num_filters = num_matched_filters;
+ config.delay.capture_alignment_mixing.downmix = false;
+ config.delay.capture_alignment_mixing.adaptive_selection = false;
+
+ for (auto rate : {16000, 32000, 48000}) {
+ Block render_block(NumBandsForRate(rate), num_render_channels);
+ for (size_t delay_samples : {15, 50, 300, 800}) {
+ absl::optional<DelayEstimate> delay_blocks;
+ SCOPED_TRACE(ProduceDebugText(rate, delay_samples,
+ num_render_channels,
+ num_capture_channels));
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, rate, num_render_channels));
+ std::unique_ptr<RenderDelayController> delay_controller(
+ RenderDelayController::Create(config, rate,
+ num_capture_channels));
+ DelayBuffer<float> signal_delay_buffer(delay_samples);
+ constexpr size_t kMaxTestJitterBlocks = 26;
+ for (size_t j = 0; j < (1000 + delay_samples / kBlockSize) /
+ kMaxTestJitterBlocks +
+ 1;
+ ++j) {
+ std::vector<Block> capture_block_buffer;
+ for (size_t k = 0; k < (kMaxTestJitterBlocks - 1); ++k) {
+ RandomizeSampleVector(
+ &random_generator,
+ render_block.View(/*band=*/0, /*channel=*/0));
+ signal_delay_buffer.Delay(
+ render_block.View(/*band=*/0, /*channel=*/0),
+ capture_block.View(/*band=*/0, /*channel=*/0));
+ capture_block_buffer.push_back(capture_block);
+ render_delay_buffer->Insert(render_block);
+ }
+ for (size_t k = 0; k < (kMaxTestJitterBlocks - 1); ++k) {
+ render_delay_buffer->PrepareCaptureProcessing();
+ delay_blocks = delay_controller->GetDelay(
+ render_delay_buffer->GetDownsampledRenderBuffer(),
+ render_delay_buffer->Delay(), capture_block_buffer[k]);
+ }
+ }
+
+ constexpr int kDelayHeadroomBlocks = 1;
+ size_t expected_delay_blocks =
+ std::max(0, static_cast<int>(delay_samples / kBlockSize) -
+ kDelayHeadroomBlocks);
+ if (expected_delay_blocks < 2) {
+ expected_delay_blocks = 0;
+ }
+
+ ASSERT_TRUE(delay_blocks);
+ EXPECT_EQ(expected_delay_blocks, delay_blocks->delay);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for correct sample rate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(RenderDelayControllerDeathTest, DISABLED_WrongSampleRate) {
+ for (auto rate : {-1, 0, 8001, 16001}) {
+ SCOPED_TRACE(ProduceDebugText(rate));
+ EchoCanceller3Config config;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, rate, 1));
+ EXPECT_DEATH(
+ std::unique_ptr<RenderDelayController>(
+ RenderDelayController::Create(EchoCanceller3Config(), rate, 1)),
+ "");
+ }
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.cc
new file mode 100644
index 0000000000..bfbeb0ec2e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+constexpr size_t kCounterThreshold = 5;
+
+// Identifies local bands with narrow characteristics.
+void IdentifySmallNarrowBandRegions(
+ const RenderBuffer& render_buffer,
+ const absl::optional<size_t>& delay_partitions,
+ std::array<size_t, kFftLengthBy2 - 1>* narrow_band_counters) {
+ RTC_DCHECK(narrow_band_counters);
+
+ if (!delay_partitions) {
+ narrow_band_counters->fill(0);
+ return;
+ }
+
+ std::array<size_t, kFftLengthBy2 - 1> channel_counters;
+ channel_counters.fill(0);
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> X2 =
+ render_buffer.Spectrum(*delay_partitions);
+ for (size_t ch = 0; ch < X2.size(); ++ch) {
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ if (X2[ch][k] > 3 * std::max(X2[ch][k - 1], X2[ch][k + 1])) {
+ ++channel_counters[k - 1];
+ }
+ }
+ }
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ (*narrow_band_counters)[k - 1] =
+ channel_counters[k - 1] > 0 ? (*narrow_band_counters)[k - 1] + 1 : 0;
+ }
+}
+
+// Identifies whether the signal has a single strong narrow-band component.
+void IdentifyStrongNarrowBandComponent(const RenderBuffer& render_buffer,
+ int strong_peak_freeze_duration,
+ absl::optional<int>* narrow_peak_band,
+ size_t* narrow_peak_counter) {
+ RTC_DCHECK(narrow_peak_band);
+ RTC_DCHECK(narrow_peak_counter);
+ if (*narrow_peak_band &&
+ ++(*narrow_peak_counter) >
+ static_cast<size_t>(strong_peak_freeze_duration)) {
+ *narrow_peak_band = absl::nullopt;
+ }
+
+ const Block& x_latest = render_buffer.GetBlock(0);
+ float max_peak_level = 0.f;
+ for (int channel = 0; channel < x_latest.NumChannels(); ++channel) {
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2_latest =
+ render_buffer.Spectrum(0)[channel];
+
+ // Identify the spectral peak.
+ const int peak_bin =
+ static_cast<int>(std::max_element(X2_latest.begin(), X2_latest.end()) -
+ X2_latest.begin());
+
+ // Compute the level around the peak.
+ float non_peak_power = 0.f;
+ for (int k = std::max(0, peak_bin - 14); k < peak_bin - 4; ++k) {
+ non_peak_power = std::max(X2_latest[k], non_peak_power);
+ }
+ for (int k = peak_bin + 5;
+ k < std::min(peak_bin + 15, static_cast<int>(kFftLengthBy2Plus1));
+ ++k) {
+ non_peak_power = std::max(X2_latest[k], non_peak_power);
+ }
+
+ // Assess the render signal strength.
+ auto result0 = std::minmax_element(x_latest.begin(/*band=*/0, channel),
+ x_latest.end(/*band=*/0, channel));
+ float max_abs = std::max(fabs(*result0.first), fabs(*result0.second));
+
+ if (x_latest.NumBands() > 1) {
+ const auto result1 =
+ std::minmax_element(x_latest.begin(/*band=*/1, channel),
+ x_latest.end(/*band=*/1, channel));
+ max_abs =
+ std::max(max_abs, static_cast<float>(std::max(
+ fabs(*result1.first), fabs(*result1.second))));
+ }
+
+ // Detect whether the spectral peak has as strong narrowband nature.
+ const float peak_level = X2_latest[peak_bin];
+ if (peak_bin > 0 && max_abs > 100 && peak_level > 100 * non_peak_power) {
+ // Store the strongest peak across channels.
+ if (peak_level > max_peak_level) {
+ max_peak_level = peak_level;
+ *narrow_peak_band = peak_bin;
+ *narrow_peak_counter = 0;
+ }
+ }
+ }
+}
+
+} // namespace
+
+RenderSignalAnalyzer::RenderSignalAnalyzer(const EchoCanceller3Config& config)
+ : strong_peak_freeze_duration_(config.filter.refined.length_blocks) {
+ narrow_band_counters_.fill(0);
+}
+RenderSignalAnalyzer::~RenderSignalAnalyzer() = default;
+
+void RenderSignalAnalyzer::Update(
+ const RenderBuffer& render_buffer,
+ const absl::optional<size_t>& delay_partitions) {
+ // Identify bands of narrow nature.
+ IdentifySmallNarrowBandRegions(render_buffer, delay_partitions,
+ &narrow_band_counters_);
+
+ // Identify the presence of a strong narrow band.
+ IdentifyStrongNarrowBandComponent(render_buffer, strong_peak_freeze_duration_,
+ &narrow_peak_band_, &narrow_peak_counter_);
+}
+
+void RenderSignalAnalyzer::MaskRegionsAroundNarrowBands(
+ std::array<float, kFftLengthBy2Plus1>* v) const {
+ RTC_DCHECK(v);
+
+ // Set v to zero around narrow band signal regions.
+ if (narrow_band_counters_[0] > kCounterThreshold) {
+ (*v)[1] = (*v)[0] = 0.f;
+ }
+ for (size_t k = 2; k < kFftLengthBy2 - 1; ++k) {
+ if (narrow_band_counters_[k - 1] > kCounterThreshold) {
+ (*v)[k - 2] = (*v)[k - 1] = (*v)[k] = (*v)[k + 1] = (*v)[k + 2] = 0.f;
+ }
+ }
+ if (narrow_band_counters_[kFftLengthBy2 - 2] > kCounterThreshold) {
+ (*v)[kFftLengthBy2] = (*v)[kFftLengthBy2 - 1] = 0.f;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.h b/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.h
new file mode 100644
index 0000000000..2e4aaa4ba7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_
+
+#include <algorithm>
+#include <array>
+#include <cstddef>
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Provides functionality for analyzing the properties of the render signal.
+class RenderSignalAnalyzer {
+ public:
+ explicit RenderSignalAnalyzer(const EchoCanceller3Config& config);
+ ~RenderSignalAnalyzer();
+
+ RenderSignalAnalyzer(const RenderSignalAnalyzer&) = delete;
+ RenderSignalAnalyzer& operator=(const RenderSignalAnalyzer&) = delete;
+
+ // Updates the render signal analysis with the most recent render signal.
+ void Update(const RenderBuffer& render_buffer,
+ const absl::optional<size_t>& delay_partitions);
+
+ // Returns true if the render signal is poorly exciting.
+ bool PoorSignalExcitation() const {
+ RTC_DCHECK_LT(2, narrow_band_counters_.size());
+ return std::any_of(narrow_band_counters_.begin(),
+ narrow_band_counters_.end(),
+ [](size_t a) { return a > 10; });
+ }
+
+ // Zeros the array around regions with narrow bands signal characteristics.
+ void MaskRegionsAroundNarrowBands(
+ std::array<float, kFftLengthBy2Plus1>* v) const;
+
+ absl::optional<int> NarrowPeakBand() const { return narrow_peak_band_; }
+
+ private:
+ const int strong_peak_freeze_duration_;
+ std::array<size_t, kFftLengthBy2 - 1> narrow_band_counters_;
+ absl::optional<int> narrow_peak_band_;
+ size_t narrow_peak_counter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc
new file mode 100644
index 0000000000..16f6280cb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+
+#include <math.h>
+
+#include <array>
+#include <cmath>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kPi = 3.141592f;
+
+void ProduceSinusoidInNoise(int sample_rate_hz,
+ size_t sinusoid_channel,
+ float sinusoidal_frequency_hz,
+ Random* random_generator,
+ size_t* sample_counter,
+ Block* x) {
+ // Fill x with low-amplitude noise.
+ for (int band = 0; band < x->NumBands(); ++band) {
+ for (int channel = 0; channel < x->NumChannels(); ++channel) {
+ RandomizeSampleVector(random_generator, x->View(band, channel),
+ /*amplitude=*/500.f);
+ }
+ }
+ // Produce a sinusoid of the specified frequency in the specified channel.
+ for (size_t k = *sample_counter, j = 0; k < (*sample_counter + kBlockSize);
+ ++k, ++j) {
+ x->View(/*band=*/0, sinusoid_channel)[j] +=
+ 32000.f *
+ std::sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz);
+ }
+ *sample_counter = *sample_counter + kBlockSize;
+}
+
+void RunNarrowBandDetectionTest(size_t num_channels) {
+ RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+ Random random_generator(42U);
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ Block x(kNumBands, num_channels);
+ std::array<float, kBlockSize> x_old;
+ Aec3Fft fft;
+ EchoCanceller3Config config;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_channels));
+
+ std::array<float, kFftLengthBy2Plus1> mask;
+ x_old.fill(0.f);
+ constexpr int kSinusFrequencyBin = 32;
+
+ auto generate_sinusoid_test = [&](bool known_delay) {
+ size_t sample_counter = 0;
+ for (size_t k = 0; k < 100; ++k) {
+ ProduceSinusoidInNoise(16000, num_channels - 1,
+ 16000 / 2 * kSinusFrequencyBin / kFftLengthBy2,
+ &random_generator, &sample_counter, &x);
+
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+ known_delay ? absl::optional<size_t>(0) : absl::nullopt);
+ }
+ };
+
+ generate_sinusoid_test(true);
+ mask.fill(1.f);
+ analyzer.MaskRegionsAroundNarrowBands(&mask);
+ for (int k = 0; k < static_cast<int>(mask.size()); ++k) {
+ EXPECT_EQ(abs(k - kSinusFrequencyBin) <= 2 ? 0.f : 1.f, mask[k]);
+ }
+ EXPECT_TRUE(analyzer.PoorSignalExcitation());
+ EXPECT_TRUE(static_cast<bool>(analyzer.NarrowPeakBand()));
+ EXPECT_EQ(*analyzer.NarrowPeakBand(), 32);
+
+ // Verify that no bands are detected as narrow when the delay is unknown.
+ generate_sinusoid_test(false);
+ mask.fill(1.f);
+ analyzer.MaskRegionsAroundNarrowBands(&mask);
+ std::for_each(mask.begin(), mask.end(), [](float a) { EXPECT_EQ(1.f, a); });
+ EXPECT_FALSE(analyzer.PoorSignalExcitation());
+}
+
+std::string ProduceDebugText(size_t num_channels) {
+ rtc::StringBuilder ss;
+ ss << "number of channels: " << num_channels;
+ return ss.Release();
+}
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the check for non-null output parameter works.
+TEST(RenderSignalAnalyzerDeathTest, NullMaskOutput) {
+ RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+ EXPECT_DEATH(analyzer.MaskRegionsAroundNarrowBands(nullptr), "");
+}
+
+#endif
+
+// Verify that no narrow bands are detected in a Gaussian noise signal.
+TEST(RenderSignalAnalyzer, NoFalseDetectionOfNarrowBands) {
+ for (auto num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(num_channels));
+ RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+ Random random_generator(42U);
+ Block x(3, num_channels);
+ std::array<float, kBlockSize> x_old;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(EchoCanceller3Config(), 48000, num_channels));
+ std::array<float, kFftLengthBy2Plus1> mask;
+ x_old.fill(0.f);
+
+ for (int k = 0; k < 100; ++k) {
+ for (int band = 0; band < x.NumBands(); ++band) {
+ for (int channel = 0; channel < x.NumChannels(); ++channel) {
+ RandomizeSampleVector(&random_generator, x.View(band, channel));
+ }
+ }
+
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+
+ analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+ absl::optional<size_t>(0));
+ }
+
+ mask.fill(1.f);
+ analyzer.MaskRegionsAroundNarrowBands(&mask);
+ EXPECT_TRUE(std::all_of(mask.begin(), mask.end(),
+ [](float a) { return a == 1.f; }));
+ EXPECT_FALSE(analyzer.PoorSignalExcitation());
+ EXPECT_FALSE(static_cast<bool>(analyzer.NarrowPeakBand()));
+ }
+}
+
+// Verify that a sinusoid signal is detected as narrow bands.
+TEST(RenderSignalAnalyzer, NarrowBandDetection) {
+ for (auto num_channels : {1, 2, 8}) {
+ SCOPED_TRACE(ProduceDebugText(num_channels));
+ RunNarrowBandDetectionTest(num_channels);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.cc
new file mode 100644
index 0000000000..640a3e3cb9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.cc
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/residual_echo_estimator.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/reverb_model.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kDefaultTransparentModeGain = 0.01f;
+
+float GetTransparentModeGain() {
+ return kDefaultTransparentModeGain;
+}
+
+float GetEarlyReflectionsDefaultModeGain(
+ const EchoCanceller3Config::EpStrength& config) {
+ if (field_trial::IsEnabled("WebRTC-Aec3UseLowEarlyReflectionsDefaultGain")) {
+ return 0.1f;
+ }
+ return config.default_gain;
+}
+
+float GetLateReflectionsDefaultModeGain(
+ const EchoCanceller3Config::EpStrength& config) {
+ if (field_trial::IsEnabled("WebRTC-Aec3UseLowLateReflectionsDefaultGain")) {
+ return 0.1f;
+ }
+ return config.default_gain;
+}
+
+bool UseErleOnsetCompensationInDominantNearend(
+ const EchoCanceller3Config::EpStrength& config) {
+ return config.erle_onset_compensation_in_dominant_nearend ||
+ field_trial::IsEnabled(
+ "WebRTC-Aec3UseErleOnsetCompensationInDominantNearend");
+}
+
+// Computes the indexes that will be used for computing spectral power over
+// the blocks surrounding the delay.
+void GetRenderIndexesToAnalyze(
+ const SpectrumBuffer& spectrum_buffer,
+ const EchoCanceller3Config::EchoModel& echo_model,
+ int filter_delay_blocks,
+ int* idx_start,
+ int* idx_stop) {
+ RTC_DCHECK(idx_start);
+ RTC_DCHECK(idx_stop);
+ size_t window_start;
+ size_t window_end;
+ window_start =
+ std::max(0, filter_delay_blocks -
+ static_cast<int>(echo_model.render_pre_window_size));
+ window_end = filter_delay_blocks +
+ static_cast<int>(echo_model.render_post_window_size);
+ *idx_start = spectrum_buffer.OffsetIndex(spectrum_buffer.read, window_start);
+ *idx_stop = spectrum_buffer.OffsetIndex(spectrum_buffer.read, window_end + 1);
+}
+
+// Estimates the residual echo power based on the echo return loss enhancement
+// (ERLE) and the linear power estimate.
+void LinearEstimate(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> S2_linear,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> erle,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2) {
+ RTC_DCHECK_EQ(S2_linear.size(), erle.size());
+ RTC_DCHECK_EQ(S2_linear.size(), R2.size());
+
+ const size_t num_capture_channels = R2.size();
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ RTC_DCHECK_LT(0.f, erle[ch][k]);
+ R2[ch][k] = S2_linear[ch][k] / erle[ch][k];
+ }
+ }
+}
+
+// Estimates the residual echo power based on the estimate of the echo path
+// gain.
+void NonLinearEstimate(
+ float echo_path_gain,
+ const std::array<float, kFftLengthBy2Plus1>& X2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2) {
+ const size_t num_capture_channels = R2.size();
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ R2[ch][k] = X2[k] * echo_path_gain;
+ }
+ }
+}
+
+// Applies a soft noise gate to the echo generating power.
+void ApplyNoiseGate(const EchoCanceller3Config::EchoModel& config,
+ rtc::ArrayView<float, kFftLengthBy2Plus1> X2) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ if (config.noise_gate_power > X2[k]) {
+ X2[k] = std::max(0.f, X2[k] - config.noise_gate_slope *
+ (config.noise_gate_power - X2[k]));
+ }
+ }
+}
+
+// Estimates the echo generating signal power as gated maximal power over a
+// time window.
+void EchoGeneratingPower(size_t num_render_channels,
+ const SpectrumBuffer& spectrum_buffer,
+ const EchoCanceller3Config::EchoModel& echo_model,
+ int filter_delay_blocks,
+ rtc::ArrayView<float, kFftLengthBy2Plus1> X2) {
+ int idx_stop;
+ int idx_start;
+ GetRenderIndexesToAnalyze(spectrum_buffer, echo_model, filter_delay_blocks,
+ &idx_start, &idx_stop);
+
+ std::fill(X2.begin(), X2.end(), 0.f);
+ if (num_render_channels == 1) {
+ for (int k = idx_start; k != idx_stop; k = spectrum_buffer.IncIndex(k)) {
+ for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) {
+ X2[j] = std::max(X2[j], spectrum_buffer.buffer[k][/*channel=*/0][j]);
+ }
+ }
+ } else {
+ for (int k = idx_start; k != idx_stop; k = spectrum_buffer.IncIndex(k)) {
+ std::array<float, kFftLengthBy2Plus1> render_power;
+ render_power.fill(0.f);
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ const auto& channel_power = spectrum_buffer.buffer[k][ch];
+ for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) {
+ render_power[j] += channel_power[j];
+ }
+ }
+ for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) {
+ X2[j] = std::max(X2[j], render_power[j]);
+ }
+ }
+ }
+}
+
+} // namespace
+
+ResidualEchoEstimator::ResidualEchoEstimator(const EchoCanceller3Config& config,
+ size_t num_render_channels)
+ : config_(config),
+ num_render_channels_(num_render_channels),
+ early_reflections_transparent_mode_gain_(GetTransparentModeGain()),
+ late_reflections_transparent_mode_gain_(GetTransparentModeGain()),
+ early_reflections_general_gain_(
+ GetEarlyReflectionsDefaultModeGain(config_.ep_strength)),
+ late_reflections_general_gain_(
+ GetLateReflectionsDefaultModeGain(config_.ep_strength)),
+ erle_onset_compensation_in_dominant_nearend_(
+ UseErleOnsetCompensationInDominantNearend(config_.ep_strength)) {
+ Reset();
+}
+
+ResidualEchoEstimator::~ResidualEchoEstimator() = default;
+
+void ResidualEchoEstimator::Estimate(
+ const AecState& aec_state,
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> S2_linear,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ bool dominant_nearend,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2_unbounded) {
+ RTC_DCHECK_EQ(R2.size(), Y2.size());
+ RTC_DCHECK_EQ(R2.size(), S2_linear.size());
+
+ const size_t num_capture_channels = R2.size();
+
+ // Estimate the power of the stationary noise in the render signal.
+ UpdateRenderNoisePower(render_buffer);
+
+ // Estimate the residual echo power.
+ if (aec_state.UsableLinearEstimate()) {
+ // When there is saturated echo, assume the same spectral content as is
+ // present in the microphone signal.
+ if (aec_state.SaturatedEcho()) {
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ std::copy(Y2[ch].begin(), Y2[ch].end(), R2[ch].begin());
+ std::copy(Y2[ch].begin(), Y2[ch].end(), R2_unbounded[ch].begin());
+ }
+ } else {
+ const bool onset_compensated =
+ erle_onset_compensation_in_dominant_nearend_ || !dominant_nearend;
+ LinearEstimate(S2_linear, aec_state.Erle(onset_compensated), R2);
+ LinearEstimate(S2_linear, aec_state.ErleUnbounded(), R2_unbounded);
+ }
+
+ UpdateReverb(ReverbType::kLinear, aec_state, render_buffer,
+ dominant_nearend);
+ AddReverb(R2);
+ AddReverb(R2_unbounded);
+ } else {
+ const float echo_path_gain =
+ GetEchoPathGain(aec_state, /*gain_for_early_reflections=*/true);
+
+ // When there is saturated echo, assume the same spectral content as is
+ // present in the microphone signal.
+ if (aec_state.SaturatedEcho()) {
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ std::copy(Y2[ch].begin(), Y2[ch].end(), R2[ch].begin());
+ std::copy(Y2[ch].begin(), Y2[ch].end(), R2_unbounded[ch].begin());
+ }
+ } else {
+ // Estimate the echo generating signal power.
+ std::array<float, kFftLengthBy2Plus1> X2;
+ EchoGeneratingPower(num_render_channels_,
+ render_buffer.GetSpectrumBuffer(), config_.echo_model,
+ aec_state.MinDirectPathFilterDelay(), X2);
+ if (!aec_state.UseStationarityProperties()) {
+ ApplyNoiseGate(config_.echo_model, X2);
+ }
+
+ // Subtract the stationary noise power to avoid stationary noise causing
+ // excessive echo suppression.
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ X2[k] -= config_.echo_model.stationary_gate_slope * X2_noise_floor_[k];
+ X2[k] = std::max(0.f, X2[k]);
+ }
+
+ NonLinearEstimate(echo_path_gain, X2, R2);
+ NonLinearEstimate(echo_path_gain, X2, R2_unbounded);
+ }
+
+ if (config_.echo_model.model_reverb_in_nonlinear_mode &&
+ !aec_state.TransparentModeActive()) {
+ UpdateReverb(ReverbType::kNonLinear, aec_state, render_buffer,
+ dominant_nearend);
+ AddReverb(R2);
+ AddReverb(R2_unbounded);
+ }
+ }
+
+ if (aec_state.UseStationarityProperties()) {
+ // Scale the echo according to echo audibility.
+ std::array<float, kFftLengthBy2Plus1> residual_scaling;
+ aec_state.GetResidualEchoScaling(residual_scaling);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ R2[ch][k] *= residual_scaling[k];
+ R2_unbounded[ch][k] *= residual_scaling[k];
+ }
+ }
+ }
+}
+
+void ResidualEchoEstimator::Reset() {
+ echo_reverb_.Reset();
+ X2_noise_floor_counter_.fill(config_.echo_model.noise_floor_hold);
+ X2_noise_floor_.fill(config_.echo_model.min_noise_floor_power);
+}
+
+void ResidualEchoEstimator::UpdateRenderNoisePower(
+ const RenderBuffer& render_buffer) {
+ std::array<float, kFftLengthBy2Plus1> render_power_data;
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> X2 =
+ render_buffer.Spectrum(0);
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> render_power =
+ X2[/*channel=*/0];
+ if (num_render_channels_ > 1) {
+ render_power_data.fill(0.f);
+ for (size_t ch = 0; ch < num_render_channels_; ++ch) {
+ const auto& channel_power = X2[ch];
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ render_power_data[k] += channel_power[k];
+ }
+ }
+ render_power = render_power_data;
+ }
+
+ // Estimate the stationary noise power in a minimum statistics manner.
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ // Decrease rapidly.
+ if (render_power[k] < X2_noise_floor_[k]) {
+ X2_noise_floor_[k] = render_power[k];
+ X2_noise_floor_counter_[k] = 0;
+ } else {
+ // Increase in a delayed, leaky manner.
+ if (X2_noise_floor_counter_[k] >=
+ static_cast<int>(config_.echo_model.noise_floor_hold)) {
+ X2_noise_floor_[k] = std::max(X2_noise_floor_[k] * 1.1f,
+ config_.echo_model.min_noise_floor_power);
+ } else {
+ ++X2_noise_floor_counter_[k];
+ }
+ }
+ }
+}
+
+// Updates the reverb estimation.
+void ResidualEchoEstimator::UpdateReverb(ReverbType reverb_type,
+ const AecState& aec_state,
+ const RenderBuffer& render_buffer,
+ bool dominant_nearend) {
+ // Choose reverb partition based on what type of echo power model is used.
+ const size_t first_reverb_partition =
+ reverb_type == ReverbType::kLinear
+ ? aec_state.FilterLengthBlocks() + 1
+ : aec_state.MinDirectPathFilterDelay() + 1;
+
+ // Compute render power for the reverb.
+ std::array<float, kFftLengthBy2Plus1> render_power_data;
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> X2 =
+ render_buffer.Spectrum(first_reverb_partition);
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> render_power =
+ X2[/*channel=*/0];
+ if (num_render_channels_ > 1) {
+ render_power_data.fill(0.f);
+ for (size_t ch = 0; ch < num_render_channels_; ++ch) {
+ const auto& channel_power = X2[ch];
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ render_power_data[k] += channel_power[k];
+ }
+ }
+ render_power = render_power_data;
+ }
+
+ // Update the reverb estimate.
+ float reverb_decay = aec_state.ReverbDecay(/*mild=*/dominant_nearend);
+ if (reverb_type == ReverbType::kLinear) {
+ echo_reverb_.UpdateReverb(
+ render_power, aec_state.GetReverbFrequencyResponse(), reverb_decay);
+ } else {
+ const float echo_path_gain =
+ GetEchoPathGain(aec_state, /*gain_for_early_reflections=*/false);
+ echo_reverb_.UpdateReverbNoFreqShaping(render_power, echo_path_gain,
+ reverb_decay);
+ }
+}
+// Adds the estimated power of the reverb to the residual echo power.
+void ResidualEchoEstimator::AddReverb(
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2) const {
+ const size_t num_capture_channels = R2.size();
+
+ // Add the reverb power.
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> reverb_power =
+ echo_reverb_.reverb();
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ R2[ch][k] += reverb_power[k];
+ }
+ }
+}
+
+// Chooses the echo path gain to use.
+float ResidualEchoEstimator::GetEchoPathGain(
+ const AecState& aec_state,
+ bool gain_for_early_reflections) const {
+ float gain_amplitude;
+ if (aec_state.TransparentModeActive()) {
+ gain_amplitude = gain_for_early_reflections
+ ? early_reflections_transparent_mode_gain_
+ : late_reflections_transparent_mode_gain_;
+ } else {
+ gain_amplitude = gain_for_early_reflections
+ ? early_reflections_general_gain_
+ : late_reflections_general_gain_;
+ }
+ return gain_amplitude * gain_amplitude;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.h
new file mode 100644
index 0000000000..c468764002
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_
+
+#include <array>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/reverb_model.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class ResidualEchoEstimator {
+ public:
+ ResidualEchoEstimator(const EchoCanceller3Config& config,
+ size_t num_render_channels);
+ ~ResidualEchoEstimator();
+
+ ResidualEchoEstimator(const ResidualEchoEstimator&) = delete;
+ ResidualEchoEstimator& operator=(const ResidualEchoEstimator&) = delete;
+
+ void Estimate(
+ const AecState& aec_state,
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> S2_linear,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ bool dominant_nearend,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2,
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2_unbounded);
+
+ private:
+ enum class ReverbType { kLinear, kNonLinear };
+
+ // Resets the state.
+ void Reset();
+
+ // Updates estimate for the power of the stationary noise component in the
+ // render signal.
+ void UpdateRenderNoisePower(const RenderBuffer& render_buffer);
+
+ // Updates the reverb estimation.
+ void UpdateReverb(ReverbType reverb_type,
+ const AecState& aec_state,
+ const RenderBuffer& render_buffer,
+ bool dominant_nearend);
+
+ // Adds the estimated unmodelled echo power to the residual echo power
+ // estimate.
+ void AddReverb(
+ rtc::ArrayView<std::array<float, kFftLengthBy2Plus1>> R2) const;
+
+ // Gets the echo path gain to apply.
+ float GetEchoPathGain(const AecState& aec_state,
+ bool gain_for_early_reflections) const;
+
+ const EchoCanceller3Config config_;
+ const size_t num_render_channels_;
+ const float early_reflections_transparent_mode_gain_;
+ const float late_reflections_transparent_mode_gain_;
+ const float early_reflections_general_gain_;
+ const float late_reflections_general_gain_;
+ const bool erle_onset_compensation_in_dominant_nearend_;
+ std::array<float, kFftLengthBy2Plus1> X2_noise_floor_;
+ std::array<int, kFftLengthBy2Plus1> X2_noise_floor_counter_;
+ ReverbModel echo_reverb_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc
new file mode 100644
index 0000000000..9a7bf0a89c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/residual_echo_estimator.h"
+
+#include <numeric>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kSampleRateHz = 48000;
+constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+constexpr float kEpsilon = 1e-4f;
+} // namespace
+
+class ResidualEchoEstimatorTest {
+ public:
+ ResidualEchoEstimatorTest(size_t num_render_channels,
+ size_t num_capture_channels,
+ const EchoCanceller3Config& config)
+ : num_render_channels_(num_render_channels),
+ num_capture_channels_(num_capture_channels),
+ config_(config),
+ estimator_(config_, num_render_channels_),
+ aec_state_(config_, num_capture_channels_),
+ render_delay_buffer_(RenderDelayBuffer::Create(config_,
+ kSampleRateHz,
+ num_render_channels_)),
+ E2_refined_(num_capture_channels_),
+ S2_linear_(num_capture_channels_),
+ Y2_(num_capture_channels_),
+ R2_(num_capture_channels_),
+ R2_unbounded_(num_capture_channels_),
+ x_(kNumBands, num_render_channels_),
+ H2_(num_capture_channels_,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(10)),
+ h_(num_capture_channels_,
+ std::vector<float>(
+ GetTimeDomainLength(config_.filter.refined.length_blocks),
+ 0.0f)),
+ random_generator_(42U),
+ output_(num_capture_channels_) {
+ for (auto& H2_ch : H2_) {
+ for (auto& H2_k : H2_ch) {
+ H2_k.fill(0.01f);
+ }
+ H2_ch[2].fill(10.f);
+ H2_ch[2][0] = 0.1f;
+ }
+
+ for (auto& subtractor_output : output_) {
+ subtractor_output.Reset();
+ subtractor_output.s_refined.fill(100.f);
+ }
+ y_.fill(0.f);
+
+ constexpr float kLevel = 10.f;
+ for (auto& E2_refined_ch : E2_refined_) {
+ E2_refined_ch.fill(kLevel);
+ }
+ S2_linear_[0].fill(kLevel);
+ for (auto& Y2_ch : Y2_) {
+ Y2_ch.fill(kLevel);
+ }
+ }
+
+ void RunOneFrame(bool dominant_nearend) {
+ RandomizeSampleVector(&random_generator_,
+ x_.View(/*band=*/0, /*channel=*/0));
+ render_delay_buffer_->Insert(x_);
+ if (first_frame_) {
+ render_delay_buffer_->Reset();
+ first_frame_ = false;
+ }
+ render_delay_buffer_->PrepareCaptureProcessing();
+
+ aec_state_.Update(delay_estimate_, H2_, h_,
+ *render_delay_buffer_->GetRenderBuffer(), E2_refined_,
+ Y2_, output_);
+
+ estimator_.Estimate(aec_state_, *render_delay_buffer_->GetRenderBuffer(),
+ S2_linear_, Y2_, dominant_nearend, R2_, R2_unbounded_);
+ }
+
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> R2() const {
+ return R2_;
+ }
+
+ private:
+ const size_t num_render_channels_;
+ const size_t num_capture_channels_;
+ const EchoCanceller3Config& config_;
+ ResidualEchoEstimator estimator_;
+ AecState aec_state_;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_refined_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> S2_linear_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2_unbounded_;
+ Block x_;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>> H2_;
+ std::vector<std::vector<float>> h_;
+ Random random_generator_;
+ std::vector<SubtractorOutput> output_;
+ std::array<float, kBlockSize> y_;
+ absl::optional<DelayEstimate> delay_estimate_;
+ bool first_frame_ = true;
+};
+
+class ResidualEchoEstimatorMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ ResidualEchoEstimatorMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 4),
+ ::testing::Values(1, 2, 4)));
+
+TEST_P(ResidualEchoEstimatorMultiChannel, BasicTest) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+
+ EchoCanceller3Config config;
+ ResidualEchoEstimatorTest residual_echo_estimator_test(
+ num_render_channels, num_capture_channels, config);
+ for (int k = 0; k < 1993; ++k) {
+ residual_echo_estimator_test.RunOneFrame(/*dominant_nearend=*/false);
+ }
+}
+
+TEST(ResidualEchoEstimatorMultiChannel, ReverbTest) {
+ const size_t num_render_channels = 1;
+ const size_t num_capture_channels = 1;
+ const size_t nFrames = 100;
+
+ EchoCanceller3Config reference_config;
+ reference_config.ep_strength.default_len = 0.95f;
+ reference_config.ep_strength.nearend_len = 0.95f;
+ EchoCanceller3Config config_use_nearend_len = reference_config;
+ config_use_nearend_len.ep_strength.default_len = 0.95f;
+ config_use_nearend_len.ep_strength.nearend_len = 0.83f;
+
+ ResidualEchoEstimatorTest reference_residual_echo_estimator_test(
+ num_render_channels, num_capture_channels, reference_config);
+ ResidualEchoEstimatorTest use_nearend_len_residual_echo_estimator_test(
+ num_render_channels, num_capture_channels, config_use_nearend_len);
+
+ std::vector<float> acum_energy_reference_R2(num_capture_channels, 0.0f);
+ std::vector<float> acum_energy_R2(num_capture_channels, 0.0f);
+ for (size_t frame = 0; frame < nFrames; ++frame) {
+ bool dominant_nearend = frame <= nFrames / 2 ? false : true;
+ reference_residual_echo_estimator_test.RunOneFrame(dominant_nearend);
+ use_nearend_len_residual_echo_estimator_test.RunOneFrame(dominant_nearend);
+ const auto& reference_R2 = reference_residual_echo_estimator_test.R2();
+ const auto& R2 = use_nearend_len_residual_echo_estimator_test.R2();
+ ASSERT_EQ(reference_R2.size(), R2.size());
+ for (size_t ch = 0; ch < reference_R2.size(); ++ch) {
+ float energy_reference_R2 = std::accumulate(
+ reference_R2[ch].cbegin(), reference_R2[ch].cend(), 0.0f);
+ float energy_R2 = std::accumulate(R2[ch].cbegin(), R2[ch].cend(), 0.0f);
+ if (dominant_nearend) {
+ EXPECT_GE(energy_reference_R2, energy_R2);
+ } else {
+ EXPECT_NEAR(energy_reference_R2, energy_R2, kEpsilon);
+ }
+ acum_energy_reference_R2[ch] += energy_reference_R2;
+ acum_energy_R2[ch] += energy_R2;
+ }
+ if (frame == nFrames / 2 || frame == nFrames - 1) {
+ for (size_t ch = 0; ch < acum_energy_reference_R2.size(); ch++) {
+ if (dominant_nearend) {
+ EXPECT_GT(acum_energy_reference_R2[ch], acum_energy_R2[ch]);
+ } else {
+ EXPECT_NEAR(acum_energy_reference_R2[ch], acum_energy_R2[ch],
+ kEpsilon);
+ }
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.cc
new file mode 100644
index 0000000000..2daf376911
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.cc
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/reverb_decay_estimator.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kEarlyReverbMinSizeBlocks = 3;
+constexpr int kBlocksPerSection = 6;
+// Linear regression approach assumes symmetric index around 0.
+constexpr float kEarlyReverbFirstPointAtLinearRegressors =
+ -0.5f * kBlocksPerSection * kFftLengthBy2 + 0.5f;
+
+// Averages the values in a block of size kFftLengthBy2;
+float BlockAverage(rtc::ArrayView<const float> v, size_t block_index) {
+ constexpr float kOneByFftLengthBy2 = 1.f / kFftLengthBy2;
+ const int i = block_index * kFftLengthBy2;
+ RTC_DCHECK_GE(v.size(), i + kFftLengthBy2);
+ const float sum =
+ std::accumulate(v.begin() + i, v.begin() + i + kFftLengthBy2, 0.f);
+ return sum * kOneByFftLengthBy2;
+}
+
+// Analyzes the gain in a block.
+void AnalyzeBlockGain(const std::array<float, kFftLengthBy2>& h2,
+ float floor_gain,
+ float* previous_gain,
+ bool* block_adapting,
+ bool* decaying_gain) {
+ float gain = std::max(BlockAverage(h2, 0), 1e-32f);
+ *block_adapting =
+ *previous_gain > 1.1f * gain || *previous_gain < 0.9f * gain;
+ *decaying_gain = gain > floor_gain;
+ *previous_gain = gain;
+}
+
+// Arithmetic sum of $2 \sum_{i=0.5}^{(N-1)/2}i^2$ calculated directly.
+constexpr float SymmetricArithmetricSum(int N) {
+ return N * (N * N - 1.0f) * (1.f / 12.f);
+}
+
+// Returns the peak energy of an impulse response.
+float BlockEnergyPeak(rtc::ArrayView<const float> h, int peak_block) {
+ RTC_DCHECK_LE((peak_block + 1) * kFftLengthBy2, h.size());
+ RTC_DCHECK_GE(peak_block, 0);
+ float peak_value =
+ *std::max_element(h.begin() + peak_block * kFftLengthBy2,
+ h.begin() + (peak_block + 1) * kFftLengthBy2,
+ [](float a, float b) { return a * a < b * b; });
+ return peak_value * peak_value;
+}
+
+// Returns the average energy of an impulse response block.
+float BlockEnergyAverage(rtc::ArrayView<const float> h, int block_index) {
+ RTC_DCHECK_LE((block_index + 1) * kFftLengthBy2, h.size());
+ RTC_DCHECK_GE(block_index, 0);
+ constexpr float kOneByFftLengthBy2 = 1.f / kFftLengthBy2;
+ const auto sum_of_squares = [](float a, float b) { return a + b * b; };
+ return std::accumulate(h.begin() + block_index * kFftLengthBy2,
+ h.begin() + (block_index + 1) * kFftLengthBy2, 0.f,
+ sum_of_squares) *
+ kOneByFftLengthBy2;
+}
+
+} // namespace
+
+ReverbDecayEstimator::ReverbDecayEstimator(const EchoCanceller3Config& config)
+ : filter_length_blocks_(config.filter.refined.length_blocks),
+ filter_length_coefficients_(GetTimeDomainLength(filter_length_blocks_)),
+ use_adaptive_echo_decay_(config.ep_strength.default_len < 0.f),
+ early_reverb_estimator_(config.filter.refined.length_blocks -
+ kEarlyReverbMinSizeBlocks),
+ late_reverb_start_(kEarlyReverbMinSizeBlocks),
+ late_reverb_end_(kEarlyReverbMinSizeBlocks),
+ previous_gains_(config.filter.refined.length_blocks, 0.f),
+ decay_(std::fabs(config.ep_strength.default_len)),
+ mild_decay_(std::fabs(config.ep_strength.nearend_len)) {
+ RTC_DCHECK_GT(config.filter.refined.length_blocks,
+ static_cast<size_t>(kEarlyReverbMinSizeBlocks));
+}
+
+ReverbDecayEstimator::~ReverbDecayEstimator() = default;
+
+void ReverbDecayEstimator::Update(rtc::ArrayView<const float> filter,
+ const absl::optional<float>& filter_quality,
+ int filter_delay_blocks,
+ bool usable_linear_filter,
+ bool stationary_signal) {
+ const int filter_size = static_cast<int>(filter.size());
+
+ if (stationary_signal) {
+ return;
+ }
+
+ bool estimation_feasible =
+ filter_delay_blocks <=
+ filter_length_blocks_ - kEarlyReverbMinSizeBlocks - 1;
+ estimation_feasible =
+ estimation_feasible && filter_size == filter_length_coefficients_;
+ estimation_feasible = estimation_feasible && filter_delay_blocks > 0;
+ estimation_feasible = estimation_feasible && usable_linear_filter;
+
+ if (!estimation_feasible) {
+ ResetDecayEstimation();
+ return;
+ }
+
+ if (!use_adaptive_echo_decay_) {
+ return;
+ }
+
+ const float new_smoothing = filter_quality ? *filter_quality * 0.2f : 0.f;
+ smoothing_constant_ = std::max(new_smoothing, smoothing_constant_);
+ if (smoothing_constant_ == 0.f) {
+ return;
+ }
+
+ if (block_to_analyze_ < filter_length_blocks_) {
+ // Analyze the filter and accumulate data for reverb estimation.
+ AnalyzeFilter(filter);
+ ++block_to_analyze_;
+ } else {
+ // When the filter is fully analyzed, estimate the reverb decay and reset
+ // the block_to_analyze_ counter.
+ EstimateDecay(filter, filter_delay_blocks);
+ }
+}
+
+void ReverbDecayEstimator::ResetDecayEstimation() {
+ early_reverb_estimator_.Reset();
+ late_reverb_decay_estimator_.Reset(0);
+ block_to_analyze_ = 0;
+ estimation_region_candidate_size_ = 0;
+ estimation_region_identified_ = false;
+ smoothing_constant_ = 0.f;
+ late_reverb_start_ = 0;
+ late_reverb_end_ = 0;
+}
+
+void ReverbDecayEstimator::EstimateDecay(rtc::ArrayView<const float> filter,
+ int peak_block) {
+ auto& h = filter;
+ RTC_DCHECK_EQ(0, h.size() % kFftLengthBy2);
+
+ // Reset the block analysis counter.
+ block_to_analyze_ =
+ std::min(peak_block + kEarlyReverbMinSizeBlocks, filter_length_blocks_);
+
+ // To estimate the reverb decay, the energy of the first filter section must
+ // be substantially larger than the last. Also, the first filter section
+ // energy must not deviate too much from the max peak.
+ const float first_reverb_gain = BlockEnergyAverage(h, block_to_analyze_);
+ const size_t h_size_blocks = h.size() >> kFftLengthBy2Log2;
+ tail_gain_ = BlockEnergyAverage(h, h_size_blocks - 1);
+ float peak_energy = BlockEnergyPeak(h, peak_block);
+ const bool sufficient_reverb_decay = first_reverb_gain > 4.f * tail_gain_;
+ const bool valid_filter =
+ first_reverb_gain > 2.f * tail_gain_ && peak_energy < 100.f;
+
+ // Estimate the size of the regions with early and late reflections.
+ const int size_early_reverb = early_reverb_estimator_.Estimate();
+ const int size_late_reverb =
+ std::max(estimation_region_candidate_size_ - size_early_reverb, 0);
+
+ // Only update the reverb decay estimate if the size of the identified late
+ // reverb is sufficiently large.
+ if (size_late_reverb >= 5) {
+ if (valid_filter && late_reverb_decay_estimator_.EstimateAvailable()) {
+ float decay = std::pow(
+ 2.0f, late_reverb_decay_estimator_.Estimate() * kFftLengthBy2);
+ constexpr float kMaxDecay = 0.95f; // ~1 sec min RT60.
+ constexpr float kMinDecay = 0.02f; // ~15 ms max RT60.
+ decay = std::max(.97f * decay_, decay);
+ decay = std::min(decay, kMaxDecay);
+ decay = std::max(decay, kMinDecay);
+ decay_ += smoothing_constant_ * (decay - decay_);
+ }
+
+ // Update length of decay. Must have enough data (number of sections) in
+ // order to estimate decay rate.
+ late_reverb_decay_estimator_.Reset(size_late_reverb * kFftLengthBy2);
+ late_reverb_start_ =
+ peak_block + kEarlyReverbMinSizeBlocks + size_early_reverb;
+ late_reverb_end_ =
+ block_to_analyze_ + estimation_region_candidate_size_ - 1;
+ } else {
+ late_reverb_decay_estimator_.Reset(0);
+ late_reverb_start_ = 0;
+ late_reverb_end_ = 0;
+ }
+
+ // Reset variables for the identification of the region for reverb decay
+ // estimation.
+ estimation_region_identified_ = !(valid_filter && sufficient_reverb_decay);
+ estimation_region_candidate_size_ = 0;
+
+ // Stop estimation of the decay until another good filter is received.
+ smoothing_constant_ = 0.f;
+
+ // Reset early reflections detector.
+ early_reverb_estimator_.Reset();
+}
+
+void ReverbDecayEstimator::AnalyzeFilter(rtc::ArrayView<const float> filter) {
+ auto h = rtc::ArrayView<const float>(
+ filter.begin() + block_to_analyze_ * kFftLengthBy2, kFftLengthBy2);
+
+ // Compute squared filter coeffiecients for the block to analyze_;
+ std::array<float, kFftLengthBy2> h2;
+ std::transform(h.begin(), h.end(), h2.begin(), [](float a) { return a * a; });
+
+ // Map out the region for estimating the reverb decay.
+ bool adapting;
+ bool above_noise_floor;
+ AnalyzeBlockGain(h2, tail_gain_, &previous_gains_[block_to_analyze_],
+ &adapting, &above_noise_floor);
+
+ // Count consecutive number of "good" filter sections, where "good" means:
+ // 1) energy is above noise floor.
+ // 2) energy of current section has not changed too much from last check.
+ estimation_region_identified_ =
+ estimation_region_identified_ || adapting || !above_noise_floor;
+ if (!estimation_region_identified_) {
+ ++estimation_region_candidate_size_;
+ }
+
+ // Accumulate data for reverb decay estimation and for the estimation of early
+ // reflections.
+ if (block_to_analyze_ <= late_reverb_end_) {
+ if (block_to_analyze_ >= late_reverb_start_) {
+ for (float h2_k : h2) {
+ float h2_log2 = FastApproxLog2f(h2_k + 1e-10);
+ late_reverb_decay_estimator_.Accumulate(h2_log2);
+ early_reverb_estimator_.Accumulate(h2_log2, smoothing_constant_);
+ }
+ } else {
+ for (float h2_k : h2) {
+ float h2_log2 = FastApproxLog2f(h2_k + 1e-10);
+ early_reverb_estimator_.Accumulate(h2_log2, smoothing_constant_);
+ }
+ }
+ }
+}
+
+void ReverbDecayEstimator::Dump(ApmDataDumper* data_dumper) const {
+ data_dumper->DumpRaw("aec3_reverb_decay", decay_);
+ data_dumper->DumpRaw("aec3_reverb_tail_energy", tail_gain_);
+ data_dumper->DumpRaw("aec3_reverb_alpha", smoothing_constant_);
+ data_dumper->DumpRaw("aec3_num_reverb_decay_blocks",
+ late_reverb_end_ - late_reverb_start_);
+ data_dumper->DumpRaw("aec3_late_reverb_start", late_reverb_start_);
+ data_dumper->DumpRaw("aec3_late_reverb_end", late_reverb_end_);
+ early_reverb_estimator_.Dump(data_dumper);
+}
+
+void ReverbDecayEstimator::LateReverbLinearRegressor::Reset(
+ int num_data_points) {
+ RTC_DCHECK_LE(0, num_data_points);
+ RTC_DCHECK_EQ(0, num_data_points % 2);
+ const int N = num_data_points;
+ nz_ = 0.f;
+ // Arithmetic sum of $2 \sum_{i=0.5}^{(N-1)/2}i^2$ calculated directly.
+ nn_ = SymmetricArithmetricSum(N);
+ // The linear regression approach assumes symmetric index around 0.
+ count_ = N > 0 ? -N * 0.5f + 0.5f : 0.f;
+ N_ = N;
+ n_ = 0;
+}
+
+void ReverbDecayEstimator::LateReverbLinearRegressor::Accumulate(float z) {
+ nz_ += count_ * z;
+ ++count_;
+ ++n_;
+}
+
+float ReverbDecayEstimator::LateReverbLinearRegressor::Estimate() {
+ RTC_DCHECK(EstimateAvailable());
+ if (nn_ == 0.f) {
+ RTC_DCHECK_NOTREACHED();
+ return 0.f;
+ }
+ return nz_ / nn_;
+}
+
+ReverbDecayEstimator::EarlyReverbLengthEstimator::EarlyReverbLengthEstimator(
+ int max_blocks)
+ : numerators_smooth_(max_blocks - kBlocksPerSection, 0.f),
+ numerators_(numerators_smooth_.size(), 0.f),
+ coefficients_counter_(0) {
+ RTC_DCHECK_LE(0, max_blocks);
+}
+
+ReverbDecayEstimator::EarlyReverbLengthEstimator::
+ ~EarlyReverbLengthEstimator() = default;
+
+void ReverbDecayEstimator::EarlyReverbLengthEstimator::Reset() {
+ coefficients_counter_ = 0;
+ std::fill(numerators_.begin(), numerators_.end(), 0.f);
+ block_counter_ = 0;
+}
+
+void ReverbDecayEstimator::EarlyReverbLengthEstimator::Accumulate(
+ float value,
+ float smoothing) {
+ // Each section is composed by kBlocksPerSection blocks and each section
+ // overlaps with the next one in (kBlocksPerSection - 1) blocks. For example,
+ // the first section covers the blocks [0:5], the second covers the blocks
+ // [1:6] and so on. As a result, for each value, kBlocksPerSection sections
+ // need to be updated.
+ int first_section_index = std::max(block_counter_ - kBlocksPerSection + 1, 0);
+ int last_section_index =
+ std::min(block_counter_, static_cast<int>(numerators_.size() - 1));
+ float x_value = static_cast<float>(coefficients_counter_) +
+ kEarlyReverbFirstPointAtLinearRegressors;
+ const float value_to_inc = kFftLengthBy2 * value;
+ float value_to_add =
+ x_value * value + (block_counter_ - last_section_index) * value_to_inc;
+ for (int section = last_section_index; section >= first_section_index;
+ --section, value_to_add += value_to_inc) {
+ numerators_[section] += value_to_add;
+ }
+
+ // Check if this update was the last coefficient of the current block. In that
+ // case, check if we are at the end of one of the sections and update the
+ // numerator of the linear regressor that is computed in such section.
+ if (++coefficients_counter_ == kFftLengthBy2) {
+ if (block_counter_ >= (kBlocksPerSection - 1)) {
+ size_t section = block_counter_ - (kBlocksPerSection - 1);
+ RTC_DCHECK_GT(numerators_.size(), section);
+ RTC_DCHECK_GT(numerators_smooth_.size(), section);
+ numerators_smooth_[section] +=
+ smoothing * (numerators_[section] - numerators_smooth_[section]);
+ n_sections_ = section + 1;
+ }
+ ++block_counter_;
+ coefficients_counter_ = 0;
+ }
+}
+
+// Estimates the size in blocks of the early reverb. The estimation is done by
+// comparing the tilt that is estimated in each section. As an optimization
+// detail and due to the fact that all the linear regressors that are computed
+// shared the same denominator, the comparison of the tilts is done by a
+// comparison of the numerator of the linear regressors.
+int ReverbDecayEstimator::EarlyReverbLengthEstimator::Estimate() {
+ constexpr float N = kBlocksPerSection * kFftLengthBy2;
+ constexpr float nn = SymmetricArithmetricSum(N);
+ // numerator_11 refers to the quantity that the linear regressor needs in the
+ // numerator for getting a decay equal to 1.1 (which is not a decay).
+ // log2(1.1) * nn / kFftLengthBy2.
+ constexpr float numerator_11 = 0.13750352374993502f * nn / kFftLengthBy2;
+ // log2(0.8) * nn / kFftLengthBy2.
+ constexpr float numerator_08 = -0.32192809488736229f * nn / kFftLengthBy2;
+ constexpr int kNumSectionsToAnalyze = 9;
+
+ if (n_sections_ < kNumSectionsToAnalyze) {
+ return 0;
+ }
+
+ // Estimation of the blocks that correspond to early reverberations. The
+ // estimation is done by analyzing the impulse response. The portions of the
+ // impulse response whose energy is not decreasing over its coefficients are
+ // considered to be part of the early reverberations. Furthermore, the blocks
+ // where the energy is decreasing faster than what it does at the end of the
+ // impulse response are also considered to be part of the early
+ // reverberations. The estimation is limited to the first
+ // kNumSectionsToAnalyze sections.
+
+ RTC_DCHECK_LE(n_sections_, numerators_smooth_.size());
+ const float min_numerator_tail =
+ *std::min_element(numerators_smooth_.begin() + kNumSectionsToAnalyze,
+ numerators_smooth_.begin() + n_sections_);
+ int early_reverb_size_minus_1 = 0;
+ for (int k = 0; k < kNumSectionsToAnalyze; ++k) {
+ if ((numerators_smooth_[k] > numerator_11) ||
+ (numerators_smooth_[k] < numerator_08 &&
+ numerators_smooth_[k] < 0.9f * min_numerator_tail)) {
+ early_reverb_size_minus_1 = k;
+ }
+ }
+
+ return early_reverb_size_minus_1 == 0 ? 0 : early_reverb_size_minus_1 + 1;
+}
+
+void ReverbDecayEstimator::EarlyReverbLengthEstimator::Dump(
+ ApmDataDumper* data_dumper) const {
+ data_dumper->DumpRaw("aec3_er_acum_numerator", numerators_smooth_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.h
new file mode 100644
index 0000000000..fee54210e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_decay_estimator.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_DECAY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_DECAY_ESTIMATOR_H_
+
+#include <array>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h" // kMaxAdaptiveFilter...
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct EchoCanceller3Config;
+
+// Class for estimating the decay of the late reverb.
+class ReverbDecayEstimator {
+ public:
+ explicit ReverbDecayEstimator(const EchoCanceller3Config& config);
+ ~ReverbDecayEstimator();
+ // Updates the decay estimate.
+ void Update(rtc::ArrayView<const float> filter,
+ const absl::optional<float>& filter_quality,
+ int filter_delay_blocks,
+ bool usable_linear_filter,
+ bool stationary_signal);
+ // Returns the decay for the exponential model. The parameter `mild` indicates
+ // which exponential decay to return, the default one or a milder one.
+ float Decay(bool mild) const {
+ if (use_adaptive_echo_decay_) {
+ return decay_;
+ } else {
+ return mild ? mild_decay_ : decay_;
+ }
+ }
+ // Dumps debug data.
+ void Dump(ApmDataDumper* data_dumper) const;
+
+ private:
+ void EstimateDecay(rtc::ArrayView<const float> filter, int peak_block);
+ void AnalyzeFilter(rtc::ArrayView<const float> filter);
+
+ void ResetDecayEstimation();
+
+ // Class for estimating the decay of the late reverb from the linear filter.
+ class LateReverbLinearRegressor {
+ public:
+ // Resets the estimator to receive a specified number of data points.
+ void Reset(int num_data_points);
+ // Accumulates estimation data.
+ void Accumulate(float z);
+ // Estimates the decay.
+ float Estimate();
+ // Returns whether an estimate is available.
+ bool EstimateAvailable() const { return n_ == N_ && N_ != 0; }
+
+ public:
+ float nz_ = 0.f;
+ float nn_ = 0.f;
+ float count_ = 0.f;
+ int N_ = 0;
+ int n_ = 0;
+ };
+
+ // Class for identifying the length of the early reverb from the linear
+ // filter. For identifying the early reverberations, the impulse response is
+ // divided in sections and the tilt of each section is computed by a linear
+ // regressor.
+ class EarlyReverbLengthEstimator {
+ public:
+ explicit EarlyReverbLengthEstimator(int max_blocks);
+ ~EarlyReverbLengthEstimator();
+
+ // Resets the estimator.
+ void Reset();
+ // Accumulates estimation data.
+ void Accumulate(float value, float smoothing);
+ // Estimates the size in blocks of the early reverb.
+ int Estimate();
+ // Dumps debug data.
+ void Dump(ApmDataDumper* data_dumper) const;
+
+ private:
+ std::vector<float> numerators_smooth_;
+ std::vector<float> numerators_;
+ int coefficients_counter_;
+ int block_counter_ = 0;
+ int n_sections_ = 0;
+ };
+
+ const int filter_length_blocks_;
+ const int filter_length_coefficients_;
+ const bool use_adaptive_echo_decay_;
+ LateReverbLinearRegressor late_reverb_decay_estimator_;
+ EarlyReverbLengthEstimator early_reverb_estimator_;
+ int late_reverb_start_;
+ int late_reverb_end_;
+ int block_to_analyze_ = 0;
+ int estimation_region_candidate_size_ = 0;
+ bool estimation_region_identified_ = false;
+ std::vector<float> previous_gains_;
+ float decay_;
+ float mild_decay_;
+ float tail_gain_ = 0.f;
+ float smoothing_constant_ = 0.f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_DECAY_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.cc b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.cc
new file mode 100644
index 0000000000..6e7282a1fc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/reverb_frequency_response.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <array>
+#include <numeric>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Computes the ratio of the energies between the direct path and the tail. The
+// energy is computed in the power spectrum domain discarding the DC
+// contributions.
+float AverageDecayWithinFilter(
+ rtc::ArrayView<const float> freq_resp_direct_path,
+ rtc::ArrayView<const float> freq_resp_tail) {
+ // Skipping the DC for the ratio computation
+ constexpr size_t kSkipBins = 1;
+ RTC_CHECK_EQ(freq_resp_direct_path.size(), freq_resp_tail.size());
+
+ float direct_path_energy =
+ std::accumulate(freq_resp_direct_path.begin() + kSkipBins,
+ freq_resp_direct_path.end(), 0.f);
+
+ if (direct_path_energy == 0.f) {
+ return 0.f;
+ }
+
+ float tail_energy = std::accumulate(freq_resp_tail.begin() + kSkipBins,
+ freq_resp_tail.end(), 0.f);
+ return tail_energy / direct_path_energy;
+}
+
+} // namespace
+
+ReverbFrequencyResponse::ReverbFrequencyResponse(
+ bool use_conservative_tail_frequency_response)
+ : use_conservative_tail_frequency_response_(
+ use_conservative_tail_frequency_response) {
+ tail_response_.fill(0.0f);
+}
+
+ReverbFrequencyResponse::~ReverbFrequencyResponse() = default;
+
+void ReverbFrequencyResponse::Update(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+ frequency_response,
+ int filter_delay_blocks,
+ const absl::optional<float>& linear_filter_quality,
+ bool stationary_block) {
+ if (stationary_block || !linear_filter_quality) {
+ return;
+ }
+
+ Update(frequency_response, filter_delay_blocks, *linear_filter_quality);
+}
+
+void ReverbFrequencyResponse::Update(
+ const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+ frequency_response,
+ int filter_delay_blocks,
+ float linear_filter_quality) {
+ rtc::ArrayView<const float> freq_resp_tail(
+ frequency_response[frequency_response.size() - 1]);
+
+ rtc::ArrayView<const float> freq_resp_direct_path(
+ frequency_response[filter_delay_blocks]);
+
+ float average_decay =
+ AverageDecayWithinFilter(freq_resp_direct_path, freq_resp_tail);
+
+ const float smoothing = 0.2f * linear_filter_quality;
+ average_decay_ += smoothing * (average_decay - average_decay_);
+
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ tail_response_[k] = freq_resp_direct_path[k] * average_decay_;
+ }
+
+ if (use_conservative_tail_frequency_response_) {
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ tail_response_[k] = std::max(freq_resp_tail[k], tail_response_[k]);
+ }
+ }
+
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ const float avg_neighbour =
+ 0.5f * (tail_response_[k - 1] + tail_response_[k + 1]);
+ tail_response_[k] = std::max(tail_response_[k], avg_neighbour);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.h b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.h
new file mode 100644
index 0000000000..69b16b54d0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_frequency_response.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_FREQUENCY_RESPONSE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_FREQUENCY_RESPONSE_H_
+
+#include <array>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// Class for updating the frequency response for the reverb.
+class ReverbFrequencyResponse {
+ public:
+ explicit ReverbFrequencyResponse(
+ bool use_conservative_tail_frequency_response);
+ ~ReverbFrequencyResponse();
+
+ // Updates the frequency response estimate of the reverb.
+ void Update(const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+ frequency_response,
+ int filter_delay_blocks,
+ const absl::optional<float>& linear_filter_quality,
+ bool stationary_block);
+
+ // Returns the estimated frequency response for the reverb.
+ rtc::ArrayView<const float> FrequencyResponse() const {
+ return tail_response_;
+ }
+
+ private:
+ void Update(const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+ frequency_response,
+ int filter_delay_blocks,
+ float linear_filter_quality);
+
+ const bool use_conservative_tail_frequency_response_;
+ float average_decay_ = 0.f;
+ std::array<float, kFftLengthBy2Plus1> tail_response_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_FREQUENCY_RESPONSE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.cc b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.cc
new file mode 100644
index 0000000000..e4f3507d31
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/reverb_model.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <functional>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+ReverbModel::ReverbModel() {
+ Reset();
+}
+
+ReverbModel::~ReverbModel() = default;
+
+void ReverbModel::Reset() {
+ reverb_.fill(0.);
+}
+
+void ReverbModel::UpdateReverbNoFreqShaping(
+ rtc::ArrayView<const float> power_spectrum,
+ float power_spectrum_scaling,
+ float reverb_decay) {
+ if (reverb_decay > 0) {
+ // Update the estimate of the reverberant power.
+ for (size_t k = 0; k < power_spectrum.size(); ++k) {
+ reverb_[k] = (reverb_[k] + power_spectrum[k] * power_spectrum_scaling) *
+ reverb_decay;
+ }
+ }
+}
+
+void ReverbModel::UpdateReverb(
+ rtc::ArrayView<const float> power_spectrum,
+ rtc::ArrayView<const float> power_spectrum_scaling,
+ float reverb_decay) {
+ if (reverb_decay > 0) {
+ // Update the estimate of the reverberant power.
+ for (size_t k = 0; k < power_spectrum.size(); ++k) {
+ reverb_[k] =
+ (reverb_[k] + power_spectrum[k] * power_spectrum_scaling[k]) *
+ reverb_decay;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.h b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.h
new file mode 100644
index 0000000000..5ba54853da
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// The ReverbModel class describes an exponential reverberant model
+// that can be applied over power spectrums.
+class ReverbModel {
+ public:
+ ReverbModel();
+ ~ReverbModel();
+
+ // Resets the state.
+ void Reset();
+
+ // Returns the reverb.
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> reverb() const {
+ return reverb_;
+ }
+
+ // The methods UpdateReverbNoFreqShaping and UpdateReverb update the
+ // estimate of the reverberation contribution to an input/output power
+ // spectrum. Before applying the exponential reverberant model, the input
+ // power spectrum is pre-scaled. Use the method UpdateReverb when a different
+ // scaling should be applied per frequency and UpdateReverb_no_freq_shape if
+ // the same scaling should be used for all the frequencies.
+ void UpdateReverbNoFreqShaping(rtc::ArrayView<const float> power_spectrum,
+ float power_spectrum_scaling,
+ float reverb_decay);
+
+ // Update the reverb based on new data.
+ void UpdateReverb(rtc::ArrayView<const float> power_spectrum,
+ rtc::ArrayView<const float> power_spectrum_scaling,
+ float reverb_decay);
+
+ private:
+
+ std::array<float, kFftLengthBy2Plus1> reverb_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.cc
new file mode 100644
index 0000000000..5cd7a7870d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/reverb_model_estimator.h"
+
+namespace webrtc {
+
+ReverbModelEstimator::ReverbModelEstimator(const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : reverb_decay_estimators_(num_capture_channels),
+ reverb_frequency_responses_(
+ num_capture_channels,
+ ReverbFrequencyResponse(
+ config.ep_strength.use_conservative_tail_frequency_response)) {
+ for (size_t ch = 0; ch < reverb_decay_estimators_.size(); ++ch) {
+ reverb_decay_estimators_[ch] =
+ std::make_unique<ReverbDecayEstimator>(config);
+ }
+}
+
+ReverbModelEstimator::~ReverbModelEstimator() = default;
+
+void ReverbModelEstimator::Update(
+ rtc::ArrayView<const std::vector<float>> impulse_responses,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ frequency_responses,
+ rtc::ArrayView<const absl::optional<float>> linear_filter_qualities,
+ rtc::ArrayView<const int> filter_delays_blocks,
+ const std::vector<bool>& usable_linear_estimates,
+ bool stationary_block) {
+ const size_t num_capture_channels = reverb_decay_estimators_.size();
+ RTC_DCHECK_EQ(num_capture_channels, impulse_responses.size());
+ RTC_DCHECK_EQ(num_capture_channels, frequency_responses.size());
+ RTC_DCHECK_EQ(num_capture_channels, usable_linear_estimates.size());
+
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ // Estimate the frequency response for the reverb.
+ reverb_frequency_responses_[ch].Update(
+ frequency_responses[ch], filter_delays_blocks[ch],
+ linear_filter_qualities[ch], stationary_block);
+
+ // Estimate the reverb decay,
+ reverb_decay_estimators_[ch]->Update(
+ impulse_responses[ch], linear_filter_qualities[ch],
+ filter_delays_blocks[ch], usable_linear_estimates[ch],
+ stationary_block);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.h
new file mode 100644
index 0000000000..63bade977f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h" // kFftLengthBy2Plus1
+#include "modules/audio_processing/aec3/reverb_decay_estimator.h"
+#include "modules/audio_processing/aec3/reverb_frequency_response.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Class for estimating the model parameters for the reverberant echo.
+class ReverbModelEstimator {
+ public:
+ ReverbModelEstimator(const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+ ~ReverbModelEstimator();
+
+ // Updates the estimates based on new data.
+ void Update(
+ rtc::ArrayView<const std::vector<float>> impulse_responses,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ frequency_responses,
+ rtc::ArrayView<const absl::optional<float>> linear_filter_qualities,
+ rtc::ArrayView<const int> filter_delays_blocks,
+ const std::vector<bool>& usable_linear_estimates,
+ bool stationary_block);
+
+ // Returns the exponential decay of the reverberant echo. The parameter `mild`
+ // indicates which exponential decay to return, the default one or a milder
+ // one.
+ // TODO(peah): Correct to properly support multiple channels.
+ float ReverbDecay(bool mild) const {
+ return reverb_decay_estimators_[0]->Decay(mild);
+ }
+
+ // Return the frequency response of the reverberant echo.
+ // TODO(peah): Correct to properly support multiple channels.
+ rtc::ArrayView<const float> GetReverbFrequencyResponse() const {
+ return reverb_frequency_responses_[0].FrequencyResponse();
+ }
+
+ // Dumps debug data.
+ void Dump(ApmDataDumper* data_dumper) const {
+ reverb_decay_estimators_[0]->Dump(data_dumper);
+ }
+
+ private:
+ std::vector<std::unique_ptr<ReverbDecayEstimator>> reverb_decay_estimators_;
+ std::vector<ReverbFrequencyResponse> reverb_frequency_responses_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc
new file mode 100644
index 0000000000..fb7dcef37f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/reverb_model_estimator.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+EchoCanceller3Config CreateConfigForTest(float default_decay) {
+ EchoCanceller3Config cfg;
+ cfg.ep_strength.default_len = default_decay;
+ cfg.filter.refined.length_blocks = 40;
+ return cfg;
+}
+
+constexpr int kFilterDelayBlocks = 2;
+
+} // namespace
+
+class ReverbModelEstimatorTest {
+ public:
+ ReverbModelEstimatorTest(float default_decay, size_t num_capture_channels)
+ : aec3_config_(CreateConfigForTest(default_decay)),
+ estimated_decay_(default_decay),
+ h_(num_capture_channels,
+ std::vector<float>(
+ aec3_config_.filter.refined.length_blocks * kBlockSize,
+ 0.f)),
+ H2_(num_capture_channels,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ aec3_config_.filter.refined.length_blocks)),
+ quality_linear_(num_capture_channels, 1.0f) {
+ CreateImpulseResponseWithDecay();
+ }
+ void RunEstimator();
+ float GetDecay(bool mild) {
+ return mild ? mild_estimated_decay_ : estimated_decay_;
+ }
+ float GetTrueDecay() { return kTruePowerDecay; }
+ float GetPowerTailDb() { return 10.f * std::log10(estimated_power_tail_); }
+ float GetTruePowerTailDb() { return 10.f * std::log10(true_power_tail_); }
+
+ private:
+ void CreateImpulseResponseWithDecay();
+ static constexpr bool kStationaryBlock = false;
+ static constexpr float kTruePowerDecay = 0.5f;
+ const EchoCanceller3Config aec3_config_;
+ float estimated_decay_;
+ float mild_estimated_decay_;
+ float estimated_power_tail_ = 0.f;
+ float true_power_tail_ = 0.f;
+ std::vector<std::vector<float>> h_;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>> H2_;
+ std::vector<absl::optional<float>> quality_linear_;
+};
+
+void ReverbModelEstimatorTest::CreateImpulseResponseWithDecay() {
+ const Aec3Fft fft;
+ for (const auto& h_k : h_) {
+ RTC_DCHECK_EQ(h_k.size(),
+ aec3_config_.filter.refined.length_blocks * kBlockSize);
+ }
+ for (const auto& H2_k : H2_) {
+ RTC_DCHECK_EQ(H2_k.size(), aec3_config_.filter.refined.length_blocks);
+ }
+ RTC_DCHECK_EQ(kFilterDelayBlocks, 2);
+
+ float decay_sample = std::sqrt(powf(kTruePowerDecay, 1.f / kBlockSize));
+ const size_t filter_delay_coefficients = kFilterDelayBlocks * kBlockSize;
+ for (auto& h_i : h_) {
+ std::fill(h_i.begin(), h_i.end(), 0.f);
+ h_i[filter_delay_coefficients] = 1.f;
+ for (size_t k = filter_delay_coefficients + 1; k < h_i.size(); ++k) {
+ h_i[k] = h_i[k - 1] * decay_sample;
+ }
+ }
+
+ for (size_t ch = 0; ch < H2_.size(); ++ch) {
+ for (size_t j = 0, k = 0; j < H2_[ch].size(); ++j, k += kBlockSize) {
+ std::array<float, kFftLength> fft_data;
+ fft_data.fill(0.f);
+ std::copy(h_[ch].begin() + k, h_[ch].begin() + k + kBlockSize,
+ fft_data.begin());
+ FftData H_j;
+ fft.Fft(&fft_data, &H_j);
+ H_j.Spectrum(Aec3Optimization::kNone, H2_[ch][j]);
+ }
+ }
+ rtc::ArrayView<float> H2_tail(H2_[0][H2_[0].size() - 1]);
+ true_power_tail_ = std::accumulate(H2_tail.begin(), H2_tail.end(), 0.f);
+}
+void ReverbModelEstimatorTest::RunEstimator() {
+ const size_t num_capture_channels = H2_.size();
+ constexpr bool kUsableLinearEstimate = true;
+ ReverbModelEstimator estimator(aec3_config_, num_capture_channels);
+ std::vector<bool> usable_linear_estimates(num_capture_channels,
+ kUsableLinearEstimate);
+ std::vector<int> filter_delay_blocks(num_capture_channels,
+ kFilterDelayBlocks);
+ for (size_t k = 0; k < 3000; ++k) {
+ estimator.Update(h_, H2_, quality_linear_, filter_delay_blocks,
+ usable_linear_estimates, kStationaryBlock);
+ }
+ estimated_decay_ = estimator.ReverbDecay(/*mild=*/false);
+ mild_estimated_decay_ = estimator.ReverbDecay(/*mild=*/true);
+ auto freq_resp_tail = estimator.GetReverbFrequencyResponse();
+ estimated_power_tail_ =
+ std::accumulate(freq_resp_tail.begin(), freq_resp_tail.end(), 0.f);
+}
+
+TEST(ReverbModelEstimatorTests, NotChangingDecay) {
+ constexpr float kDefaultDecay = 0.9f;
+ for (size_t num_capture_channels : {1, 2, 4, 8}) {
+ ReverbModelEstimatorTest test(kDefaultDecay, num_capture_channels);
+ test.RunEstimator();
+ EXPECT_EQ(test.GetDecay(/*mild=*/false), kDefaultDecay);
+ EXPECT_EQ(test.GetDecay(/*mild=*/true),
+ EchoCanceller3Config().ep_strength.nearend_len);
+ EXPECT_NEAR(test.GetPowerTailDb(), test.GetTruePowerTailDb(), 5.f);
+ }
+}
+
+TEST(ReverbModelEstimatorTests, ChangingDecay) {
+ constexpr float kDefaultDecay = -0.9f;
+ for (size_t num_capture_channels : {1, 2, 4, 8}) {
+ ReverbModelEstimatorTest test(kDefaultDecay, num_capture_channels);
+ test.RunEstimator();
+ EXPECT_NEAR(test.GetDecay(/*mild=*/false), test.GetTrueDecay(), 0.1f);
+ EXPECT_NEAR(test.GetDecay(/*mild=*/true), test.GetTrueDecay(), 0.1f);
+ EXPECT_NEAR(test.GetPowerTailDb(), test.GetTruePowerTailDb(), 5.f);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
new file mode 100644
index 0000000000..a5e77092a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+
+#include <algorithm>
+#include <functional>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr std::array<size_t, SignalDependentErleEstimator::kSubbands + 1>
+ kBandBoundaries = {1, 8, 16, 24, 32, 48, kFftLengthBy2Plus1};
+
+std::array<size_t, kFftLengthBy2Plus1> FormSubbandMap() {
+ std::array<size_t, kFftLengthBy2Plus1> map_band_to_subband;
+ size_t subband = 1;
+ for (size_t k = 0; k < map_band_to_subband.size(); ++k) {
+ RTC_DCHECK_LT(subband, kBandBoundaries.size());
+ if (k >= kBandBoundaries[subband]) {
+ subband++;
+ RTC_DCHECK_LT(k, kBandBoundaries[subband]);
+ }
+ map_band_to_subband[k] = subband - 1;
+ }
+ return map_band_to_subband;
+}
+
+// Defines the size in blocks of the sections that are used for dividing the
+// linear filter. The sections are split in a non-linear manner so that lower
+// sections that typically represent the direct path have a larger resolution
+// than the higher sections which typically represent more reverberant acoustic
+// paths.
+std::vector<size_t> DefineFilterSectionSizes(size_t delay_headroom_blocks,
+ size_t num_blocks,
+ size_t num_sections) {
+ size_t filter_length_blocks = num_blocks - delay_headroom_blocks;
+ std::vector<size_t> section_sizes(num_sections);
+ size_t remaining_blocks = filter_length_blocks;
+ size_t remaining_sections = num_sections;
+ size_t estimator_size = 2;
+ size_t idx = 0;
+ while (remaining_sections > 1 &&
+ remaining_blocks > estimator_size * remaining_sections) {
+ RTC_DCHECK_LT(idx, section_sizes.size());
+ section_sizes[idx] = estimator_size;
+ remaining_blocks -= estimator_size;
+ remaining_sections--;
+ estimator_size *= 2;
+ idx++;
+ }
+
+ size_t last_groups_size = remaining_blocks / remaining_sections;
+ for (; idx < num_sections; idx++) {
+ section_sizes[idx] = last_groups_size;
+ }
+ section_sizes[num_sections - 1] +=
+ remaining_blocks - last_groups_size * remaining_sections;
+ return section_sizes;
+}
+
+// Forms the limits in blocks for each filter section. Those sections
+// are used for analyzing the echo estimates and investigating which
+// linear filter sections contribute most to the echo estimate energy.
+std::vector<size_t> SetSectionsBoundaries(size_t delay_headroom_blocks,
+ size_t num_blocks,
+ size_t num_sections) {
+ std::vector<size_t> estimator_boundaries_blocks(num_sections + 1);
+ if (estimator_boundaries_blocks.size() == 2) {
+ estimator_boundaries_blocks[0] = 0;
+ estimator_boundaries_blocks[1] = num_blocks;
+ return estimator_boundaries_blocks;
+ }
+ RTC_DCHECK_GT(estimator_boundaries_blocks.size(), 2);
+ const std::vector<size_t> section_sizes =
+ DefineFilterSectionSizes(delay_headroom_blocks, num_blocks,
+ estimator_boundaries_blocks.size() - 1);
+
+ size_t idx = 0;
+ size_t current_size_block = 0;
+ RTC_DCHECK_EQ(section_sizes.size() + 1, estimator_boundaries_blocks.size());
+ estimator_boundaries_blocks[0] = delay_headroom_blocks;
+ for (size_t k = delay_headroom_blocks; k < num_blocks; ++k) {
+ current_size_block++;
+ if (current_size_block >= section_sizes[idx]) {
+ idx = idx + 1;
+ if (idx == section_sizes.size()) {
+ break;
+ }
+ estimator_boundaries_blocks[idx] = k + 1;
+ current_size_block = 0;
+ }
+ }
+ estimator_boundaries_blocks[section_sizes.size()] = num_blocks;
+ return estimator_boundaries_blocks;
+}
+
+std::array<float, SignalDependentErleEstimator::kSubbands>
+SetMaxErleSubbands(float max_erle_l, float max_erle_h, size_t limit_subband_l) {
+ std::array<float, SignalDependentErleEstimator::kSubbands> max_erle;
+ std::fill(max_erle.begin(), max_erle.begin() + limit_subband_l, max_erle_l);
+ std::fill(max_erle.begin() + limit_subband_l, max_erle.end(), max_erle_h);
+ return max_erle;
+}
+
+} // namespace
+
+SignalDependentErleEstimator::SignalDependentErleEstimator(
+ const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : min_erle_(config.erle.min),
+ num_sections_(config.erle.num_sections),
+ num_blocks_(config.filter.refined.length_blocks),
+ delay_headroom_blocks_(config.delay.delay_headroom_samples / kBlockSize),
+ band_to_subband_(FormSubbandMap()),
+ max_erle_(SetMaxErleSubbands(config.erle.max_l,
+ config.erle.max_h,
+ band_to_subband_[kFftLengthBy2 / 2])),
+ section_boundaries_blocks_(SetSectionsBoundaries(delay_headroom_blocks_,
+ num_blocks_,
+ num_sections_)),
+ use_onset_detection_(config.erle.onset_detection),
+ erle_(num_capture_channels),
+ erle_onset_compensated_(num_capture_channels),
+ S2_section_accum_(
+ num_capture_channels,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(num_sections_)),
+ erle_estimators_(
+ num_capture_channels,
+ std::vector<std::array<float, kSubbands>>(num_sections_)),
+ erle_ref_(num_capture_channels),
+ correction_factors_(
+ num_capture_channels,
+ std::vector<std::array<float, kSubbands>>(num_sections_)),
+ num_updates_(num_capture_channels),
+ n_active_sections_(num_capture_channels) {
+ RTC_DCHECK_LE(num_sections_, num_blocks_);
+ RTC_DCHECK_GE(num_sections_, 1);
+ Reset();
+}
+
+SignalDependentErleEstimator::~SignalDependentErleEstimator() = default;
+
+void SignalDependentErleEstimator::Reset() {
+ for (size_t ch = 0; ch < erle_.size(); ++ch) {
+ erle_[ch].fill(min_erle_);
+ erle_onset_compensated_[ch].fill(min_erle_);
+ for (auto& erle_estimator : erle_estimators_[ch]) {
+ erle_estimator.fill(min_erle_);
+ }
+ erle_ref_[ch].fill(min_erle_);
+ for (auto& factor : correction_factors_[ch]) {
+ factor.fill(1.0f);
+ }
+ num_updates_[ch].fill(0);
+ n_active_sections_[ch].fill(0);
+ }
+}
+
+// Updates the Erle estimate by analyzing the current input signals. It takes
+// the render buffer and the filter frequency response in order to do an
+// estimation of the number of sections of the linear filter that are needed
+// for getting the majority of the energy in the echo estimate. Based on that
+// number of sections, it updates the erle estimation by introducing a
+// correction factor to the erle that is given as an input to this method.
+void SignalDependentErleEstimator::Update(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses,
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> average_erle,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ average_erle_onset_compensated,
+ const std::vector<bool>& converged_filters) {
+ RTC_DCHECK_GT(num_sections_, 1);
+
+ // Gets the number of filter sections that are needed for achieving 90 %
+ // of the power spectrum energy of the echo estimate.
+ ComputeNumberOfActiveFilterSections(render_buffer,
+ filter_frequency_responses);
+
+ // Updates the correction factors that is used for correcting the erle and
+ // adapt it to the particular characteristics of the input signal.
+ UpdateCorrectionFactors(X2, Y2, E2, converged_filters);
+
+ // Applies the correction factor to the input erle for getting a more refined
+ // erle estimation for the current input signal.
+ for (size_t ch = 0; ch < erle_.size(); ++ch) {
+ for (size_t k = 0; k < kFftLengthBy2; ++k) {
+ RTC_DCHECK_GT(correction_factors_[ch].size(), n_active_sections_[ch][k]);
+ float correction_factor =
+ correction_factors_[ch][n_active_sections_[ch][k]]
+ [band_to_subband_[k]];
+ erle_[ch][k] = rtc::SafeClamp(average_erle[ch][k] * correction_factor,
+ min_erle_, max_erle_[band_to_subband_[k]]);
+ if (use_onset_detection_) {
+ erle_onset_compensated_[ch][k] = rtc::SafeClamp(
+ average_erle_onset_compensated[ch][k] * correction_factor,
+ min_erle_, max_erle_[band_to_subband_[k]]);
+ }
+ }
+ }
+}
+
+void SignalDependentErleEstimator::Dump(
+ const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+ for (auto& erle : erle_estimators_[0]) {
+ data_dumper->DumpRaw("aec3_all_erle", erle);
+ }
+ data_dumper->DumpRaw("aec3_ref_erle", erle_ref_[0]);
+ for (auto& factor : correction_factors_[0]) {
+ data_dumper->DumpRaw("aec3_erle_correction_factor", factor);
+ }
+}
+
+// Estimates for each band the smallest number of sections in the filter that
+// together constitute 90% of the estimated echo energy.
+void SignalDependentErleEstimator::ComputeNumberOfActiveFilterSections(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses) {
+ RTC_DCHECK_GT(num_sections_, 1);
+ // Computes an approximation of the power spectrum if the filter would have
+ // been limited to a certain number of filter sections.
+ ComputeEchoEstimatePerFilterSection(render_buffer,
+ filter_frequency_responses);
+ // For each band, computes the number of filter sections that are needed for
+ // achieving the 90 % energy in the echo estimate.
+ ComputeActiveFilterSections();
+}
+
+void SignalDependentErleEstimator::UpdateCorrectionFactors(
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters) {
+ for (size_t ch = 0; ch < converged_filters.size(); ++ch) {
+ if (converged_filters[ch]) {
+ constexpr float kX2BandEnergyThreshold = 44015068.0f;
+ constexpr float kSmthConstantDecreases = 0.1f;
+ constexpr float kSmthConstantIncreases = kSmthConstantDecreases / 2.f;
+ auto subband_powers = [](rtc::ArrayView<const float> power_spectrum,
+ rtc::ArrayView<float> power_spectrum_subbands) {
+ for (size_t subband = 0; subband < kSubbands; ++subband) {
+ RTC_DCHECK_LE(kBandBoundaries[subband + 1], power_spectrum.size());
+ power_spectrum_subbands[subband] = std::accumulate(
+ power_spectrum.begin() + kBandBoundaries[subband],
+ power_spectrum.begin() + kBandBoundaries[subband + 1], 0.f);
+ }
+ };
+
+ std::array<float, kSubbands> X2_subbands, E2_subbands, Y2_subbands;
+ subband_powers(X2, X2_subbands);
+ subband_powers(E2[ch], E2_subbands);
+ subband_powers(Y2[ch], Y2_subbands);
+ std::array<size_t, kSubbands> idx_subbands;
+ for (size_t subband = 0; subband < kSubbands; ++subband) {
+ // When aggregating the number of active sections in the filter for
+ // different bands we choose to take the minimum of all of them. As an
+ // example, if for one of the bands it is the direct path its refined
+ // contributor to the final echo estimate, we consider the direct path
+ // is as well the refined contributor for the subband that contains that
+ // particular band. That aggregate number of sections will be later used
+ // as the identifier of the erle estimator that needs to be updated.
+ RTC_DCHECK_LE(kBandBoundaries[subband + 1],
+ n_active_sections_[ch].size());
+ idx_subbands[subband] = *std::min_element(
+ n_active_sections_[ch].begin() + kBandBoundaries[subband],
+ n_active_sections_[ch].begin() + kBandBoundaries[subband + 1]);
+ }
+
+ std::array<float, kSubbands> new_erle;
+ std::array<bool, kSubbands> is_erle_updated;
+ is_erle_updated.fill(false);
+ new_erle.fill(0.f);
+ for (size_t subband = 0; subband < kSubbands; ++subband) {
+ if (X2_subbands[subband] > kX2BandEnergyThreshold &&
+ E2_subbands[subband] > 0) {
+ new_erle[subband] = Y2_subbands[subband] / E2_subbands[subband];
+ RTC_DCHECK_GT(new_erle[subband], 0);
+ is_erle_updated[subband] = true;
+ ++num_updates_[ch][subband];
+ }
+ }
+
+ for (size_t subband = 0; subband < kSubbands; ++subband) {
+ const size_t idx = idx_subbands[subband];
+ RTC_DCHECK_LT(idx, erle_estimators_[ch].size());
+ float alpha = new_erle[subband] > erle_estimators_[ch][idx][subband]
+ ? kSmthConstantIncreases
+ : kSmthConstantDecreases;
+ alpha = static_cast<float>(is_erle_updated[subband]) * alpha;
+ erle_estimators_[ch][idx][subband] +=
+ alpha * (new_erle[subband] - erle_estimators_[ch][idx][subband]);
+ erle_estimators_[ch][idx][subband] = rtc::SafeClamp(
+ erle_estimators_[ch][idx][subband], min_erle_, max_erle_[subband]);
+ }
+
+ for (size_t subband = 0; subband < kSubbands; ++subband) {
+ float alpha = new_erle[subband] > erle_ref_[ch][subband]
+ ? kSmthConstantIncreases
+ : kSmthConstantDecreases;
+ alpha = static_cast<float>(is_erle_updated[subband]) * alpha;
+ erle_ref_[ch][subband] +=
+ alpha * (new_erle[subband] - erle_ref_[ch][subband]);
+ erle_ref_[ch][subband] = rtc::SafeClamp(erle_ref_[ch][subband],
+ min_erle_, max_erle_[subband]);
+ }
+
+ for (size_t subband = 0; subband < kSubbands; ++subband) {
+ constexpr int kNumUpdateThr = 50;
+ if (is_erle_updated[subband] &&
+ num_updates_[ch][subband] > kNumUpdateThr) {
+ const size_t idx = idx_subbands[subband];
+ RTC_DCHECK_GT(erle_ref_[ch][subband], 0.f);
+ // Computes the ratio between the erle that is updated using all the
+ // points and the erle that is updated only on signals that share the
+ // same number of active filter sections.
+ float new_correction_factor =
+ erle_estimators_[ch][idx][subband] / erle_ref_[ch][subband];
+
+ correction_factors_[ch][idx][subband] +=
+ 0.1f *
+ (new_correction_factor - correction_factors_[ch][idx][subband]);
+ }
+ }
+ }
+ }
+}
+
+void SignalDependentErleEstimator::ComputeEchoEstimatePerFilterSection(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses) {
+ const SpectrumBuffer& spectrum_render_buffer =
+ render_buffer.GetSpectrumBuffer();
+ const size_t num_render_channels = spectrum_render_buffer.buffer[0].size();
+ const size_t num_capture_channels = S2_section_accum_.size();
+ const float one_by_num_render_channels = 1.f / num_render_channels;
+
+ RTC_DCHECK_EQ(S2_section_accum_.size(), filter_frequency_responses.size());
+
+ for (size_t capture_ch = 0; capture_ch < num_capture_channels; ++capture_ch) {
+ RTC_DCHECK_EQ(S2_section_accum_[capture_ch].size() + 1,
+ section_boundaries_blocks_.size());
+ size_t idx_render = render_buffer.Position();
+ idx_render = spectrum_render_buffer.OffsetIndex(
+ idx_render, section_boundaries_blocks_[0]);
+
+ for (size_t section = 0; section < num_sections_; ++section) {
+ std::array<float, kFftLengthBy2Plus1> X2_section;
+ std::array<float, kFftLengthBy2Plus1> H2_section;
+ X2_section.fill(0.f);
+ H2_section.fill(0.f);
+ const size_t block_limit =
+ std::min(section_boundaries_blocks_[section + 1],
+ filter_frequency_responses[capture_ch].size());
+ for (size_t block = section_boundaries_blocks_[section];
+ block < block_limit; ++block) {
+ for (size_t render_ch = 0;
+ render_ch < spectrum_render_buffer.buffer[idx_render].size();
+ ++render_ch) {
+ for (size_t k = 0; k < X2_section.size(); ++k) {
+ X2_section[k] +=
+ spectrum_render_buffer.buffer[idx_render][render_ch][k] *
+ one_by_num_render_channels;
+ }
+ }
+ std::transform(H2_section.begin(), H2_section.end(),
+ filter_frequency_responses[capture_ch][block].begin(),
+ H2_section.begin(), std::plus<float>());
+ idx_render = spectrum_render_buffer.IncIndex(idx_render);
+ }
+
+ std::transform(X2_section.begin(), X2_section.end(), H2_section.begin(),
+ S2_section_accum_[capture_ch][section].begin(),
+ std::multiplies<float>());
+ }
+
+ for (size_t section = 1; section < num_sections_; ++section) {
+ std::transform(S2_section_accum_[capture_ch][section - 1].begin(),
+ S2_section_accum_[capture_ch][section - 1].end(),
+ S2_section_accum_[capture_ch][section].begin(),
+ S2_section_accum_[capture_ch][section].begin(),
+ std::plus<float>());
+ }
+ }
+}
+
+void SignalDependentErleEstimator::ComputeActiveFilterSections() {
+ for (size_t ch = 0; ch < n_active_sections_.size(); ++ch) {
+ std::fill(n_active_sections_[ch].begin(), n_active_sections_[ch].end(), 0);
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ size_t section = num_sections_;
+ float target = 0.9f * S2_section_accum_[ch][num_sections_ - 1][k];
+ while (section > 0 && S2_section_accum_[ch][section - 1][k] >= target) {
+ n_active_sections_[ch][k] = --section;
+ }
+ }
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.h
new file mode 100644
index 0000000000..6847c1ab13
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// This class estimates the dependency of the Erle to the input signal. By
+// looking at the input signal, an estimation on whether the current echo
+// estimate is due to the direct path or to a more reverberant one is performed.
+// Once that estimation is done, it is possible to refine the average Erle that
+// this class receive as an input.
+class SignalDependentErleEstimator {
+ public:
+ SignalDependentErleEstimator(const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+
+ ~SignalDependentErleEstimator();
+
+ void Reset();
+
+ // Returns the Erle per frequency subband.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Erle(
+ bool onset_compensated) const {
+ return onset_compensated && use_onset_detection_ ? erle_onset_compensated_
+ : erle_;
+ }
+
+ // Updates the Erle estimate. The Erle that is passed as an input is required
+ // to be an estimation of the average Erle achieved by the linear filter.
+ void Update(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_response,
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> average_erle,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ average_erle_onset_compensated,
+ const std::vector<bool>& converged_filters);
+
+ void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+ static constexpr size_t kSubbands = 6;
+
+ private:
+ void ComputeNumberOfActiveFilterSections(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses);
+
+ void UpdateCorrectionFactors(
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters);
+
+ void ComputeEchoEstimatePerFilterSection(
+ const RenderBuffer& render_buffer,
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ filter_frequency_responses);
+
+ void ComputeActiveFilterSections();
+
+ const float min_erle_;
+ const size_t num_sections_;
+ const size_t num_blocks_;
+ const size_t delay_headroom_blocks_;
+ const std::array<size_t, kFftLengthBy2Plus1> band_to_subband_;
+ const std::array<float, kSubbands> max_erle_;
+ const std::vector<size_t> section_boundaries_blocks_;
+ const bool use_onset_detection_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> erle_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> erle_onset_compensated_;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ S2_section_accum_;
+ std::vector<std::vector<std::array<float, kSubbands>>> erle_estimators_;
+ std::vector<std::array<float, kSubbands>> erle_ref_;
+ std::vector<std::vector<std::array<float, kSubbands>>> correction_factors_;
+ std::vector<std::array<int, kSubbands>> num_updates_;
+ std::vector<std::array<size_t, kFftLengthBy2Plus1>> n_active_sections_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc
new file mode 100644
index 0000000000..67927a6c68
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+
+#include <algorithm>
+#include <iostream>
+#include <string>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void GetActiveFrame(Block* x) {
+ const std::array<float, kBlockSize> frame = {
+ 7459.88, 17209.6, 17383, 20768.9, 16816.7, 18386.3, 4492.83, 9675.85,
+ 6665.52, 14808.6, 9342.3, 7483.28, 19261.7, 4145.98, 1622.18, 13475.2,
+ 7166.32, 6856.61, 21937, 7263.14, 9569.07, 14919, 8413.32, 7551.89,
+ 7848.65, 6011.27, 13080.6, 15865.2, 12656, 17459.6, 4263.93, 4503.03,
+ 9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6,
+ 11405, 15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8,
+ 1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4,
+ 12416.2, 16434, 2454.69, 9840.8, 6867.23, 1615.75, 6059.9, 8394.19};
+ for (int band = 0; band < x->NumBands(); ++band) {
+ for (int channel = 0; channel < x->NumChannels(); ++channel) {
+ RTC_DCHECK_GE(kBlockSize, frame.size());
+ std::copy(frame.begin(), frame.end(), x->begin(band, channel));
+ }
+ }
+}
+
+class TestInputs {
+ public:
+ TestInputs(const EchoCanceller3Config& cfg,
+ size_t num_render_channels,
+ size_t num_capture_channels);
+ ~TestInputs();
+ const RenderBuffer& GetRenderBuffer() { return *render_buffer_; }
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> GetX2() { return X2_; }
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> GetY2() const {
+ return Y2_;
+ }
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> GetE2() const {
+ return E2_;
+ }
+ rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ GetH2() const {
+ return H2_;
+ }
+ const std::vector<bool>& GetConvergedFilters() const {
+ return converged_filters_;
+ }
+ void Update();
+
+ private:
+ void UpdateCurrentPowerSpectra();
+ int n_ = 0;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer_;
+ RenderBuffer* render_buffer_;
+ std::array<float, kFftLengthBy2Plus1> X2_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>> H2_;
+ Block x_;
+ std::vector<bool> converged_filters_;
+};
+
+TestInputs::TestInputs(const EchoCanceller3Config& cfg,
+ size_t num_render_channels,
+ size_t num_capture_channels)
+ : render_delay_buffer_(
+ RenderDelayBuffer::Create(cfg, 16000, num_render_channels)),
+ Y2_(num_capture_channels),
+ E2_(num_capture_channels),
+ H2_(num_capture_channels,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ cfg.filter.refined.length_blocks)),
+ x_(1, num_render_channels),
+ converged_filters_(num_capture_channels, true) {
+ render_delay_buffer_->AlignFromDelay(4);
+ render_buffer_ = render_delay_buffer_->GetRenderBuffer();
+ for (auto& H2_ch : H2_) {
+ for (auto& H2_p : H2_ch) {
+ H2_p.fill(0.f);
+ }
+ }
+ for (auto& H2_p : H2_[0]) {
+ H2_p.fill(1.f);
+ }
+}
+
+TestInputs::~TestInputs() = default;
+
+void TestInputs::Update() {
+ if (n_ % 2 == 0) {
+ std::fill(x_.begin(/*band=*/0, /*channel=*/0),
+ x_.end(/*band=*/0, /*channel=*/0), 0.f);
+ } else {
+ GetActiveFrame(&x_);
+ }
+
+ render_delay_buffer_->Insert(x_);
+ render_delay_buffer_->PrepareCaptureProcessing();
+ UpdateCurrentPowerSpectra();
+ ++n_;
+}
+
+void TestInputs::UpdateCurrentPowerSpectra() {
+ const SpectrumBuffer& spectrum_render_buffer =
+ render_buffer_->GetSpectrumBuffer();
+ size_t idx = render_buffer_->Position();
+ size_t prev_idx = spectrum_render_buffer.OffsetIndex(idx, 1);
+ auto& X2 = spectrum_render_buffer.buffer[idx][/*channel=*/0];
+ auto& X2_prev = spectrum_render_buffer.buffer[prev_idx][/*channel=*/0];
+ std::copy(X2.begin(), X2.end(), X2_.begin());
+ for (size_t ch = 0; ch < Y2_.size(); ++ch) {
+ RTC_DCHECK_EQ(X2.size(), Y2_[ch].size());
+ for (size_t k = 0; k < X2.size(); ++k) {
+ E2_[ch][k] = 0.01f * X2_prev[k];
+ Y2_[ch][k] = X2[k] + E2_[ch][k];
+ }
+ }
+}
+
+} // namespace
+
+class SignalDependentErleEstimatorMultiChannel
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+INSTANTIATE_TEST_SUITE_P(MultiChannel,
+ SignalDependentErleEstimatorMultiChannel,
+ ::testing::Combine(::testing::Values(1, 2, 4),
+ ::testing::Values(1, 2, 4)));
+
+TEST_P(SignalDependentErleEstimatorMultiChannel, SweepSettings) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ EchoCanceller3Config cfg;
+ size_t max_length_blocks = 50;
+ for (size_t blocks = 1; blocks < max_length_blocks; blocks = blocks + 10) {
+ for (size_t delay_headroom = 0; delay_headroom < 5; ++delay_headroom) {
+ for (size_t num_sections = 2; num_sections < max_length_blocks;
+ ++num_sections) {
+ cfg.filter.refined.length_blocks = blocks;
+ cfg.filter.refined_initial.length_blocks =
+ std::min(cfg.filter.refined_initial.length_blocks, blocks);
+ cfg.delay.delay_headroom_samples = delay_headroom * kBlockSize;
+ cfg.erle.num_sections = num_sections;
+ if (EchoCanceller3Config::Validate(&cfg)) {
+ SignalDependentErleEstimator s(cfg, num_capture_channels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> average_erle(
+ num_capture_channels);
+ for (auto& e : average_erle) {
+ e.fill(cfg.erle.max_l);
+ }
+ TestInputs inputs(cfg, num_render_channels, num_capture_channels);
+ for (size_t n = 0; n < 10; ++n) {
+ inputs.Update();
+ s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(),
+ inputs.GetY2(), inputs.GetE2(), average_erle, average_erle,
+ inputs.GetConvergedFilters());
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_P(SignalDependentErleEstimatorMultiChannel, LongerRun) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+ EchoCanceller3Config cfg;
+ cfg.filter.refined.length_blocks = 2;
+ cfg.filter.refined_initial.length_blocks = 1;
+ cfg.delay.delay_headroom_samples = 0;
+ cfg.delay.hysteresis_limit_blocks = 0;
+ cfg.erle.num_sections = 2;
+ EXPECT_EQ(EchoCanceller3Config::Validate(&cfg), true);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> average_erle(
+ num_capture_channels);
+ for (auto& e : average_erle) {
+ e.fill(cfg.erle.max_l);
+ }
+ SignalDependentErleEstimator s(cfg, num_capture_channels);
+ TestInputs inputs(cfg, num_render_channels, num_capture_channels);
+ for (size_t n = 0; n < 200; ++n) {
+ inputs.Update();
+ s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(),
+ inputs.GetY2(), inputs.GetE2(), average_erle, average_erle,
+ inputs.GetConvergedFilters());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.cc
new file mode 100644
index 0000000000..fe32ece09c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+SpectrumBuffer::SpectrumBuffer(size_t size, size_t num_channels)
+ : size(static_cast<int>(size)),
+ buffer(size,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(num_channels)) {
+ for (auto& channel : buffer) {
+ for (auto& c : channel) {
+ std::fill(c.begin(), c.end(), 0.f);
+ }
+ }
+}
+
+SpectrumBuffer::~SpectrumBuffer() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.h b/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.h
new file mode 100644
index 0000000000..51e1317f55
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/spectrum_buffer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SPECTRUM_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SPECTRUM_BUFFER_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for bundling a circular buffer of one dimensional vector objects
+// together with the read and write indices.
+struct SpectrumBuffer {
+ SpectrumBuffer(size_t size, size_t num_channels);
+ ~SpectrumBuffer();
+
+ int IncIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index < size - 1 ? index + 1 : 0;
+ }
+
+ int DecIndex(int index) const {
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ return index > 0 ? index - 1 : size - 1;
+ }
+
+ int OffsetIndex(int index, int offset) const {
+ RTC_DCHECK_GE(size, offset);
+ RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+ RTC_DCHECK_GE(size + index + offset, 0);
+ return (size + index + offset) % size;
+ }
+
+ void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+ void IncWriteIndex() { write = IncIndex(write); }
+ void DecWriteIndex() { write = DecIndex(write); }
+ void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+ void IncReadIndex() { read = IncIndex(read); }
+ void DecReadIndex() { read = DecIndex(read); }
+
+ const int size;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>> buffer;
+ int write = 0;
+ int read = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SPECTRUM_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.cc
new file mode 100644
index 0000000000..4d364041b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.cc
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/stationarity_estimator.h"
+
+#include <algorithm>
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/spectrum_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+namespace {
+constexpr float kMinNoisePower = 10.f;
+constexpr int kHangoverBlocks = kNumBlocksPerSecond / 20;
+constexpr int kNBlocksAverageInitPhase = 20;
+constexpr int kNBlocksInitialPhase = kNumBlocksPerSecond * 2.;
+} // namespace
+
+StationarityEstimator::StationarityEstimator()
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)) {
+ Reset();
+}
+
+StationarityEstimator::~StationarityEstimator() = default;
+
+void StationarityEstimator::Reset() {
+ noise_.Reset();
+ hangovers_.fill(0);
+ stationarity_flags_.fill(false);
+}
+
+// Update just the noise estimator. Usefull until the delay is known
+void StationarityEstimator::UpdateNoiseEstimator(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> spectrum) {
+ noise_.Update(spectrum);
+ data_dumper_->DumpRaw("aec3_stationarity_noise_spectrum", noise_.Spectrum());
+ data_dumper_->DumpRaw("aec3_stationarity_is_block_stationary",
+ IsBlockStationary());
+}
+
+void StationarityEstimator::UpdateStationarityFlags(
+ const SpectrumBuffer& spectrum_buffer,
+ rtc::ArrayView<const float> render_reverb_contribution_spectrum,
+ int idx_current,
+ int num_lookahead) {
+ std::array<int, kWindowLength> indexes;
+ int num_lookahead_bounded = std::min(num_lookahead, kWindowLength - 1);
+ int idx = idx_current;
+
+ if (num_lookahead_bounded < kWindowLength - 1) {
+ int num_lookback = (kWindowLength - 1) - num_lookahead_bounded;
+ idx = spectrum_buffer.OffsetIndex(idx_current, num_lookback);
+ }
+ // For estimating the stationarity properties of the current frame, the
+ // power for each band is accumulated for several consecutive spectra in the
+ // method EstimateBandStationarity.
+ // In order to avoid getting the indexes of the spectra for every band with
+ // its associated overhead, those indexes are stored in an array and then use
+ // when the estimation is done.
+ indexes[0] = idx;
+ for (size_t k = 1; k < indexes.size(); ++k) {
+ indexes[k] = spectrum_buffer.DecIndex(indexes[k - 1]);
+ }
+ RTC_DCHECK_EQ(
+ spectrum_buffer.DecIndex(indexes[kWindowLength - 1]),
+ spectrum_buffer.OffsetIndex(idx_current, -(num_lookahead_bounded + 1)));
+
+ for (size_t k = 0; k < stationarity_flags_.size(); ++k) {
+ stationarity_flags_[k] = EstimateBandStationarity(
+ spectrum_buffer, render_reverb_contribution_spectrum, indexes, k);
+ }
+ UpdateHangover();
+ SmoothStationaryPerFreq();
+}
+
+bool StationarityEstimator::IsBlockStationary() const {
+ float acum_stationarity = 0.f;
+ RTC_DCHECK_EQ(stationarity_flags_.size(), kFftLengthBy2Plus1);
+ for (size_t band = 0; band < stationarity_flags_.size(); ++band) {
+ bool st = IsBandStationary(band);
+ acum_stationarity += static_cast<float>(st);
+ }
+ return ((acum_stationarity * (1.f / kFftLengthBy2Plus1)) > 0.75f);
+}
+
+bool StationarityEstimator::EstimateBandStationarity(
+ const SpectrumBuffer& spectrum_buffer,
+ rtc::ArrayView<const float> average_reverb,
+ const std::array<int, kWindowLength>& indexes,
+ size_t band) const {
+ constexpr float kThrStationarity = 10.f;
+ float acum_power = 0.f;
+ const int num_render_channels =
+ static_cast<int>(spectrum_buffer.buffer[0].size());
+ const float one_by_num_channels = 1.f / num_render_channels;
+ for (auto idx : indexes) {
+ for (int ch = 0; ch < num_render_channels; ++ch) {
+ acum_power += spectrum_buffer.buffer[idx][ch][band] * one_by_num_channels;
+ }
+ }
+ acum_power += average_reverb[band];
+ float noise = kWindowLength * GetStationarityPowerBand(band);
+ RTC_CHECK_LT(0.f, noise);
+ bool stationary = acum_power < kThrStationarity * noise;
+ data_dumper_->DumpRaw("aec3_stationarity_long_ratio", acum_power / noise);
+ return stationary;
+}
+
+bool StationarityEstimator::AreAllBandsStationary() {
+ for (auto b : stationarity_flags_) {
+ if (!b)
+ return false;
+ }
+ return true;
+}
+
+void StationarityEstimator::UpdateHangover() {
+ bool reduce_hangover = AreAllBandsStationary();
+ for (size_t k = 0; k < stationarity_flags_.size(); ++k) {
+ if (!stationarity_flags_[k]) {
+ hangovers_[k] = kHangoverBlocks;
+ } else if (reduce_hangover) {
+ hangovers_[k] = std::max(hangovers_[k] - 1, 0);
+ }
+ }
+}
+
+void StationarityEstimator::SmoothStationaryPerFreq() {
+ std::array<bool, kFftLengthBy2Plus1> all_ahead_stationary_smooth;
+ for (size_t k = 1; k < kFftLengthBy2Plus1 - 1; ++k) {
+ all_ahead_stationary_smooth[k] = stationarity_flags_[k - 1] &&
+ stationarity_flags_[k] &&
+ stationarity_flags_[k + 1];
+ }
+
+ all_ahead_stationary_smooth[0] = all_ahead_stationary_smooth[1];
+ all_ahead_stationary_smooth[kFftLengthBy2Plus1 - 1] =
+ all_ahead_stationary_smooth[kFftLengthBy2Plus1 - 2];
+
+ stationarity_flags_ = all_ahead_stationary_smooth;
+}
+
+std::atomic<int> StationarityEstimator::instance_count_(0);
+
+StationarityEstimator::NoiseSpectrum::NoiseSpectrum() {
+ Reset();
+}
+
+StationarityEstimator::NoiseSpectrum::~NoiseSpectrum() = default;
+
+void StationarityEstimator::NoiseSpectrum::Reset() {
+ block_counter_ = 0;
+ noise_spectrum_.fill(kMinNoisePower);
+}
+
+void StationarityEstimator::NoiseSpectrum::Update(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> spectrum) {
+ RTC_DCHECK_LE(1, spectrum[0].size());
+ const int num_render_channels = static_cast<int>(spectrum.size());
+
+ std::array<float, kFftLengthBy2Plus1> avg_spectrum_data;
+ rtc::ArrayView<const float> avg_spectrum;
+ if (num_render_channels == 1) {
+ avg_spectrum = spectrum[0];
+ } else {
+ // For multiple channels, average the channel spectra before passing to the
+ // noise spectrum estimator.
+ avg_spectrum = avg_spectrum_data;
+ std::copy(spectrum[0].begin(), spectrum[0].end(),
+ avg_spectrum_data.begin());
+ for (int ch = 1; ch < num_render_channels; ++ch) {
+ for (size_t k = 1; k < kFftLengthBy2Plus1; ++k) {
+ avg_spectrum_data[k] += spectrum[ch][k];
+ }
+ }
+
+ const float one_by_num_channels = 1.f / num_render_channels;
+ for (size_t k = 1; k < kFftLengthBy2Plus1; ++k) {
+ avg_spectrum_data[k] *= one_by_num_channels;
+ }
+ }
+
+ ++block_counter_;
+ float alpha = GetAlpha();
+ for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+ if (block_counter_ <= kNBlocksAverageInitPhase) {
+ noise_spectrum_[k] += (1.f / kNBlocksAverageInitPhase) * avg_spectrum[k];
+ } else {
+ noise_spectrum_[k] =
+ UpdateBandBySmoothing(avg_spectrum[k], noise_spectrum_[k], alpha);
+ }
+ }
+}
+
+float StationarityEstimator::NoiseSpectrum::GetAlpha() const {
+ constexpr float kAlpha = 0.004f;
+ constexpr float kAlphaInit = 0.04f;
+ constexpr float kTiltAlpha = (kAlphaInit - kAlpha) / kNBlocksInitialPhase;
+
+ if (block_counter_ > (kNBlocksInitialPhase + kNBlocksAverageInitPhase)) {
+ return kAlpha;
+ } else {
+ return kAlphaInit -
+ kTiltAlpha * (block_counter_ - kNBlocksAverageInitPhase);
+ }
+}
+
+float StationarityEstimator::NoiseSpectrum::UpdateBandBySmoothing(
+ float power_band,
+ float power_band_noise,
+ float alpha) const {
+ float power_band_noise_updated = power_band_noise;
+ if (power_band_noise < power_band) {
+ RTC_DCHECK_GT(power_band, 0.f);
+ float alpha_inc = alpha * (power_band_noise / power_band);
+ if (block_counter_ > kNBlocksInitialPhase) {
+ if (10.f * power_band_noise < power_band) {
+ alpha_inc *= 0.1f;
+ }
+ }
+ power_band_noise_updated += alpha_inc * (power_band - power_band_noise);
+ } else {
+ power_band_noise_updated += alpha * (power_band - power_band_noise);
+ power_band_noise_updated =
+ std::max(power_band_noise_updated, kMinNoisePower);
+ }
+ return power_band_noise_updated;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.h
new file mode 100644
index 0000000000..8bcd3b789e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/stationarity_estimator.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_STATIONARITY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_STATIONARITY_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <atomic>
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h" // kFftLengthBy2Plus1...
+#include "modules/audio_processing/aec3/reverb_model.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct SpectrumBuffer;
+
+class StationarityEstimator {
+ public:
+ StationarityEstimator();
+ ~StationarityEstimator();
+
+ // Reset the stationarity estimator.
+ void Reset();
+
+ // Update just the noise estimator. Usefull until the delay is known
+ void UpdateNoiseEstimator(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> spectrum);
+
+ // Update the flag indicating whether this current frame is stationary. For
+ // getting a more robust estimation, it looks at future and/or past frames.
+ void UpdateStationarityFlags(
+ const SpectrumBuffer& spectrum_buffer,
+ rtc::ArrayView<const float> render_reverb_contribution_spectrum,
+ int idx_current,
+ int num_lookahead);
+
+ // Returns true if the current band is stationary.
+ bool IsBandStationary(size_t band) const {
+ return stationarity_flags_[band] && (hangovers_[band] == 0);
+ }
+
+ // Returns true if the current block is estimated as stationary.
+ bool IsBlockStationary() const;
+
+ private:
+ static constexpr int kWindowLength = 13;
+ // Returns the power of the stationary noise spectrum at a band.
+ float GetStationarityPowerBand(size_t k) const { return noise_.Power(k); }
+
+ // Get an estimation of the stationarity for the current band by looking
+ // at the past/present/future available data.
+ bool EstimateBandStationarity(const SpectrumBuffer& spectrum_buffer,
+ rtc::ArrayView<const float> average_reverb,
+ const std::array<int, kWindowLength>& indexes,
+ size_t band) const;
+
+ // True if all bands at the current point are stationary.
+ bool AreAllBandsStationary();
+
+ // Update the hangover depending on the stationary status of the current
+ // frame.
+ void UpdateHangover();
+
+ // Smooth the stationarity detection by looking at neighbouring frequency
+ // bands.
+ void SmoothStationaryPerFreq();
+
+ class NoiseSpectrum {
+ public:
+ NoiseSpectrum();
+ ~NoiseSpectrum();
+
+ // Reset the noise power spectrum estimate state.
+ void Reset();
+
+ // Update the noise power spectrum with a new frame.
+ void Update(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> spectrum);
+
+ // Get the noise estimation power spectrum.
+ rtc::ArrayView<const float> Spectrum() const { return noise_spectrum_; }
+
+ // Get the noise power spectrum at a certain band.
+ float Power(size_t band) const {
+ RTC_DCHECK_LT(band, noise_spectrum_.size());
+ return noise_spectrum_[band];
+ }
+
+ private:
+ // Get the update coefficient to be used for the current frame.
+ float GetAlpha() const;
+
+ // Update the noise power spectrum at a certain band with a new frame.
+ float UpdateBandBySmoothing(float power_band,
+ float power_band_noise,
+ float alpha) const;
+ std::array<float, kFftLengthBy2Plus1> noise_spectrum_;
+ size_t block_counter_;
+ };
+
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ NoiseSpectrum noise_;
+ std::array<int, kFftLengthBy2Plus1> hangovers_;
+ std::array<bool, kFftLengthBy2Plus1> stationarity_flags_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_STATIONARITY_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.cc b/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.cc
new file mode 100644
index 0000000000..dc7f92fd99
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.cc
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subband_erle_estimator.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kX2BandEnergyThreshold = 44015068.0f;
+constexpr int kBlocksToHoldErle = 100;
+constexpr int kBlocksForOnsetDetection = kBlocksToHoldErle + 150;
+constexpr int kPointsToAccumulate = 6;
+
+std::array<float, kFftLengthBy2Plus1> SetMaxErleBands(float max_erle_l,
+ float max_erle_h) {
+ std::array<float, kFftLengthBy2Plus1> max_erle;
+ std::fill(max_erle.begin(), max_erle.begin() + kFftLengthBy2 / 2, max_erle_l);
+ std::fill(max_erle.begin() + kFftLengthBy2 / 2, max_erle.end(), max_erle_h);
+ return max_erle;
+}
+
+bool EnableMinErleDuringOnsets() {
+ return !field_trial::IsEnabled("WebRTC-Aec3MinErleDuringOnsetsKillSwitch");
+}
+
+} // namespace
+
+SubbandErleEstimator::SubbandErleEstimator(const EchoCanceller3Config& config,
+ size_t num_capture_channels)
+ : use_onset_detection_(config.erle.onset_detection),
+ min_erle_(config.erle.min),
+ max_erle_(SetMaxErleBands(config.erle.max_l, config.erle.max_h)),
+ use_min_erle_during_onsets_(EnableMinErleDuringOnsets()),
+ accum_spectra_(num_capture_channels),
+ erle_(num_capture_channels),
+ erle_onset_compensated_(num_capture_channels),
+ erle_unbounded_(num_capture_channels),
+ erle_during_onsets_(num_capture_channels),
+ coming_onset_(num_capture_channels),
+ hold_counters_(num_capture_channels) {
+ Reset();
+}
+
+SubbandErleEstimator::~SubbandErleEstimator() = default;
+
+void SubbandErleEstimator::Reset() {
+ const size_t num_capture_channels = erle_.size();
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ erle_[ch].fill(min_erle_);
+ erle_onset_compensated_[ch].fill(min_erle_);
+ erle_unbounded_[ch].fill(min_erle_);
+ erle_during_onsets_[ch].fill(min_erle_);
+ coming_onset_[ch].fill(true);
+ hold_counters_[ch].fill(0);
+ }
+ ResetAccumulatedSpectra();
+}
+
+void SubbandErleEstimator::Update(
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters) {
+ UpdateAccumulatedSpectra(X2, Y2, E2, converged_filters);
+ UpdateBands(converged_filters);
+
+ if (use_onset_detection_) {
+ DecreaseErlePerBandForLowRenderSignals();
+ }
+
+ const size_t num_capture_channels = erle_.size();
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ auto& erle = erle_[ch];
+ erle[0] = erle[1];
+ erle[kFftLengthBy2] = erle[kFftLengthBy2 - 1];
+
+ auto& erle_oc = erle_onset_compensated_[ch];
+ erle_oc[0] = erle_oc[1];
+ erle_oc[kFftLengthBy2] = erle_oc[kFftLengthBy2 - 1];
+
+ auto& erle_u = erle_unbounded_[ch];
+ erle_u[0] = erle_u[1];
+ erle_u[kFftLengthBy2] = erle_u[kFftLengthBy2 - 1];
+ }
+}
+
+void SubbandErleEstimator::Dump(
+ const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+ data_dumper->DumpRaw("aec3_erle_onset", ErleDuringOnsets()[0]);
+}
+
+void SubbandErleEstimator::UpdateBands(
+ const std::vector<bool>& converged_filters) {
+ const int num_capture_channels = static_cast<int>(accum_spectra_.Y2.size());
+ for (int ch = 0; ch < num_capture_channels; ++ch) {
+ // Note that the use of the converged_filter flag already imposed
+ // a minimum of the erle that can be estimated as that flag would
+ // be false if the filter is performing poorly.
+ if (!converged_filters[ch]) {
+ continue;
+ }
+
+ if (accum_spectra_.num_points[ch] != kPointsToAccumulate) {
+ continue;
+ }
+
+ std::array<float, kFftLengthBy2> new_erle;
+ std::array<bool, kFftLengthBy2> is_erle_updated;
+ is_erle_updated.fill(false);
+
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ if (accum_spectra_.E2[ch][k] > 0.f) {
+ new_erle[k] = accum_spectra_.Y2[ch][k] / accum_spectra_.E2[ch][k];
+ is_erle_updated[k] = true;
+ }
+ }
+
+ if (use_onset_detection_) {
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ if (is_erle_updated[k] && !accum_spectra_.low_render_energy[ch][k]) {
+ if (coming_onset_[ch][k]) {
+ coming_onset_[ch][k] = false;
+ if (!use_min_erle_during_onsets_) {
+ float alpha =
+ new_erle[k] < erle_during_onsets_[ch][k] ? 0.3f : 0.15f;
+ erle_during_onsets_[ch][k] = rtc::SafeClamp(
+ erle_during_onsets_[ch][k] +
+ alpha * (new_erle[k] - erle_during_onsets_[ch][k]),
+ min_erle_, max_erle_[k]);
+ }
+ }
+ hold_counters_[ch][k] = kBlocksForOnsetDetection;
+ }
+ }
+ }
+
+ auto update_erle_band = [](float& erle, float new_erle,
+ bool low_render_energy, float min_erle,
+ float max_erle) {
+ float alpha = 0.05f;
+ if (new_erle < erle) {
+ alpha = low_render_energy ? 0.f : 0.1f;
+ }
+ erle =
+ rtc::SafeClamp(erle + alpha * (new_erle - erle), min_erle, max_erle);
+ };
+
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ if (is_erle_updated[k]) {
+ const bool low_render_energy = accum_spectra_.low_render_energy[ch][k];
+ update_erle_band(erle_[ch][k], new_erle[k], low_render_energy,
+ min_erle_, max_erle_[k]);
+ if (use_onset_detection_) {
+ update_erle_band(erle_onset_compensated_[ch][k], new_erle[k],
+ low_render_energy, min_erle_, max_erle_[k]);
+ }
+
+ // Virtually unbounded ERLE.
+ constexpr float kUnboundedErleMax = 100000.0f;
+ update_erle_band(erle_unbounded_[ch][k], new_erle[k], low_render_energy,
+ min_erle_, kUnboundedErleMax);
+ }
+ }
+ }
+}
+
+void SubbandErleEstimator::DecreaseErlePerBandForLowRenderSignals() {
+ const int num_capture_channels = static_cast<int>(accum_spectra_.Y2.size());
+ for (int ch = 0; ch < num_capture_channels; ++ch) {
+ for (size_t k = 1; k < kFftLengthBy2; ++k) {
+ --hold_counters_[ch][k];
+ if (hold_counters_[ch][k] <=
+ (kBlocksForOnsetDetection - kBlocksToHoldErle)) {
+ if (erle_onset_compensated_[ch][k] > erle_during_onsets_[ch][k]) {
+ erle_onset_compensated_[ch][k] =
+ std::max(erle_during_onsets_[ch][k],
+ 0.97f * erle_onset_compensated_[ch][k]);
+ RTC_DCHECK_LE(min_erle_, erle_onset_compensated_[ch][k]);
+ }
+ if (hold_counters_[ch][k] <= 0) {
+ coming_onset_[ch][k] = true;
+ hold_counters_[ch][k] = 0;
+ }
+ }
+ }
+ }
+}
+
+void SubbandErleEstimator::ResetAccumulatedSpectra() {
+ for (size_t ch = 0; ch < erle_during_onsets_.size(); ++ch) {
+ accum_spectra_.Y2[ch].fill(0.f);
+ accum_spectra_.E2[ch].fill(0.f);
+ accum_spectra_.num_points[ch] = 0;
+ accum_spectra_.low_render_energy[ch].fill(false);
+ }
+}
+
+void SubbandErleEstimator::UpdateAccumulatedSpectra(
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters) {
+ auto& st = accum_spectra_;
+ RTC_DCHECK_EQ(st.E2.size(), E2.size());
+ RTC_DCHECK_EQ(st.E2.size(), E2.size());
+ const int num_capture_channels = static_cast<int>(Y2.size());
+ for (int ch = 0; ch < num_capture_channels; ++ch) {
+ // Note that the use of the converged_filter flag already imposed
+ // a minimum of the erle that can be estimated as that flag would
+ // be false if the filter is performing poorly.
+ if (!converged_filters[ch]) {
+ continue;
+ }
+
+ if (st.num_points[ch] == kPointsToAccumulate) {
+ st.num_points[ch] = 0;
+ st.Y2[ch].fill(0.f);
+ st.E2[ch].fill(0.f);
+ st.low_render_energy[ch].fill(false);
+ }
+
+ std::transform(Y2[ch].begin(), Y2[ch].end(), st.Y2[ch].begin(),
+ st.Y2[ch].begin(), std::plus<float>());
+ std::transform(E2[ch].begin(), E2[ch].end(), st.E2[ch].begin(),
+ st.E2[ch].begin(), std::plus<float>());
+
+ for (size_t k = 0; k < X2.size(); ++k) {
+ st.low_render_energy[ch][k] =
+ st.low_render_energy[ch][k] || X2[k] < kX2BandEnergyThreshold;
+ }
+
+ ++st.num_points[ch];
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.h b/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.h
new file mode 100644
index 0000000000..8bf9c4d645
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subband_erle_estimator.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_ERLE_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// Estimates the echo return loss enhancement for each frequency subband.
+class SubbandErleEstimator {
+ public:
+ SubbandErleEstimator(const EchoCanceller3Config& config,
+ size_t num_capture_channels);
+ ~SubbandErleEstimator();
+
+ // Resets the ERLE estimator.
+ void Reset();
+
+ // Updates the ERLE estimate.
+ void Update(rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters);
+
+ // Returns the ERLE estimate.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Erle(
+ bool onset_compensated) const {
+ return onset_compensated && use_onset_detection_ ? erle_onset_compensated_
+ : erle_;
+ }
+
+ // Returns the non-capped ERLE estimate.
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> ErleUnbounded()
+ const {
+ return erle_unbounded_;
+ }
+
+ // Returns the ERLE estimate at onsets (only used for testing).
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> ErleDuringOnsets()
+ const {
+ return erle_during_onsets_;
+ }
+
+ void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+ private:
+ struct AccumulatedSpectra {
+ explicit AccumulatedSpectra(size_t num_capture_channels)
+ : Y2(num_capture_channels),
+ E2(num_capture_channels),
+ low_render_energy(num_capture_channels),
+ num_points(num_capture_channels) {}
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2;
+ std::vector<std::array<bool, kFftLengthBy2Plus1>> low_render_energy;
+ std::vector<int> num_points;
+ };
+
+ void UpdateAccumulatedSpectra(
+ rtc::ArrayView<const float, kFftLengthBy2Plus1> X2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2,
+ const std::vector<bool>& converged_filters);
+
+ void ResetAccumulatedSpectra();
+
+ void UpdateBands(const std::vector<bool>& converged_filters);
+ void DecreaseErlePerBandForLowRenderSignals();
+
+ const bool use_onset_detection_;
+ const float min_erle_;
+ const std::array<float, kFftLengthBy2Plus1> max_erle_;
+ const bool use_min_erle_during_onsets_;
+ AccumulatedSpectra accum_spectra_;
+ // ERLE without special handling of render onsets.
+ std::vector<std::array<float, kFftLengthBy2Plus1>> erle_;
+ // ERLE lowered during render onsets.
+ std::vector<std::array<float, kFftLengthBy2Plus1>> erle_onset_compensated_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> erle_unbounded_;
+ // Estimation of ERLE during render onsets.
+ std::vector<std::array<float, kFftLengthBy2Plus1>> erle_during_onsets_;
+ std::vector<std::array<bool, kFftLengthBy2Plus1>> coming_onset_;
+ std::vector<std::array<int, kFftLengthBy2Plus1>> hold_counters_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_ERLE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.cc b/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.cc
new file mode 100644
index 0000000000..2aa400c3af
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subband_nearend_detector.h"
+
+#include <numeric>
+
+namespace webrtc {
+SubbandNearendDetector::SubbandNearendDetector(
+ const EchoCanceller3Config::Suppressor::SubbandNearendDetection& config,
+ size_t num_capture_channels)
+ : config_(config),
+ num_capture_channels_(num_capture_channels),
+ nearend_smoothers_(num_capture_channels_,
+ aec3::MovingAverage(kFftLengthBy2Plus1,
+ config_.nearend_average_blocks)),
+ one_over_subband_length1_(
+ 1.f / (config_.subband1.high - config_.subband1.low + 1)),
+ one_over_subband_length2_(
+ 1.f / (config_.subband2.high - config_.subband2.low + 1)) {}
+
+void SubbandNearendDetector::Update(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ bool initial_state) {
+ nearend_state_ = false;
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ const std::array<float, kFftLengthBy2Plus1>& noise =
+ comfort_noise_spectrum[ch];
+ std::array<float, kFftLengthBy2Plus1> nearend;
+ nearend_smoothers_[ch].Average(nearend_spectrum[ch], nearend);
+
+ // Noise power of the first region.
+ float noise_power =
+ std::accumulate(noise.begin() + config_.subband1.low,
+ noise.begin() + config_.subband1.high + 1, 0.f) *
+ one_over_subband_length1_;
+
+ // Nearend power of the first region.
+ float nearend_power_subband1 =
+ std::accumulate(nearend.begin() + config_.subband1.low,
+ nearend.begin() + config_.subband1.high + 1, 0.f) *
+ one_over_subband_length1_;
+
+ // Nearend power of the second region.
+ float nearend_power_subband2 =
+ std::accumulate(nearend.begin() + config_.subband2.low,
+ nearend.begin() + config_.subband2.high + 1, 0.f) *
+ one_over_subband_length2_;
+
+ // One channel is sufficient to trigger nearend state.
+ nearend_state_ =
+ nearend_state_ ||
+ (nearend_power_subband1 <
+ config_.nearend_threshold * nearend_power_subband2 &&
+ (nearend_power_subband1 > config_.snr_threshold * noise_power));
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.h b/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.h
new file mode 100644
index 0000000000..8357edb65f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subband_nearend_detector.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_NEAREND_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_NEAREND_DETECTOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/moving_average.h"
+#include "modules/audio_processing/aec3/nearend_detector.h"
+
+namespace webrtc {
+// Class for selecting whether the suppressor is in the nearend or echo state.
+class SubbandNearendDetector : public NearendDetector {
+ public:
+ SubbandNearendDetector(
+ const EchoCanceller3Config::Suppressor::SubbandNearendDetection& config,
+ size_t num_capture_channels);
+
+ // Returns whether the current state is the nearend state.
+ bool IsNearendState() const override { return nearend_state_; }
+
+ // Updates the state selection based on latest spectral estimates.
+ void Update(rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ bool initial_state) override;
+
+ private:
+ const EchoCanceller3Config::Suppressor::SubbandNearendDetection config_;
+ const size_t num_capture_channels_;
+ std::vector<aec3::MovingAverage> nearend_smoothers_;
+ const float one_over_subband_length1_;
+ const float one_over_subband_length2_;
+ bool nearend_state_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_NEAREND_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.cc b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.cc
new file mode 100644
index 0000000000..aa36bb272a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.cc
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subtractor.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+
+bool UseCoarseFilterResetHangover() {
+ return !field_trial::IsEnabled(
+ "WebRTC-Aec3CoarseFilterResetHangoverKillSwitch");
+}
+
+void PredictionError(const Aec3Fft& fft,
+ const FftData& S,
+ rtc::ArrayView<const float> y,
+ std::array<float, kBlockSize>* e,
+ std::array<float, kBlockSize>* s) {
+ std::array<float, kFftLength> tmp;
+ fft.Ifft(S, &tmp);
+ constexpr float kScale = 1.0f / kFftLengthBy2;
+ std::transform(y.begin(), y.end(), tmp.begin() + kFftLengthBy2, e->begin(),
+ [&](float a, float b) { return a - b * kScale; });
+
+ if (s) {
+ for (size_t k = 0; k < s->size(); ++k) {
+ (*s)[k] = kScale * tmp[k + kFftLengthBy2];
+ }
+ }
+}
+
+void ScaleFilterOutput(rtc::ArrayView<const float> y,
+ float factor,
+ rtc::ArrayView<float> e,
+ rtc::ArrayView<float> s) {
+ RTC_DCHECK_EQ(y.size(), e.size());
+ RTC_DCHECK_EQ(y.size(), s.size());
+ for (size_t k = 0; k < y.size(); ++k) {
+ s[k] *= factor;
+ e[k] = y[k] - s[k];
+ }
+}
+
+} // namespace
+
+Subtractor::Subtractor(const EchoCanceller3Config& config,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ ApmDataDumper* data_dumper,
+ Aec3Optimization optimization)
+ : fft_(),
+ data_dumper_(data_dumper),
+ optimization_(optimization),
+ config_(config),
+ num_capture_channels_(num_capture_channels),
+ use_coarse_filter_reset_hangover_(UseCoarseFilterResetHangover()),
+ refined_filters_(num_capture_channels_),
+ coarse_filter_(num_capture_channels_),
+ refined_gains_(num_capture_channels_),
+ coarse_gains_(num_capture_channels_),
+ filter_misadjustment_estimators_(num_capture_channels_),
+ poor_coarse_filter_counters_(num_capture_channels_, 0),
+ coarse_filter_reset_hangover_(num_capture_channels_, 0),
+ refined_frequency_responses_(
+ num_capture_channels_,
+ std::vector<std::array<float, kFftLengthBy2Plus1>>(
+ std::max(config_.filter.refined_initial.length_blocks,
+ config_.filter.refined.length_blocks),
+ std::array<float, kFftLengthBy2Plus1>())),
+ refined_impulse_responses_(
+ num_capture_channels_,
+ std::vector<float>(GetTimeDomainLength(std::max(
+ config_.filter.refined_initial.length_blocks,
+ config_.filter.refined.length_blocks)),
+ 0.f)),
+ coarse_impulse_responses_(0) {
+ // Set up the storing of coarse impulse responses if data dumping is
+ // available.
+ if (ApmDataDumper::IsAvailable()) {
+ coarse_impulse_responses_.resize(num_capture_channels_);
+ const size_t filter_size = GetTimeDomainLength(
+ std::max(config_.filter.coarse_initial.length_blocks,
+ config_.filter.coarse.length_blocks));
+ for (std::vector<float>& impulse_response : coarse_impulse_responses_) {
+ impulse_response.resize(filter_size, 0.f);
+ }
+ }
+
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ refined_filters_[ch] = std::make_unique<AdaptiveFirFilter>(
+ config_.filter.refined.length_blocks,
+ config_.filter.refined_initial.length_blocks,
+ config.filter.config_change_duration_blocks, num_render_channels,
+ optimization, data_dumper_);
+
+ coarse_filter_[ch] = std::make_unique<AdaptiveFirFilter>(
+ config_.filter.coarse.length_blocks,
+ config_.filter.coarse_initial.length_blocks,
+ config.filter.config_change_duration_blocks, num_render_channels,
+ optimization, data_dumper_);
+ refined_gains_[ch] = std::make_unique<RefinedFilterUpdateGain>(
+ config_.filter.refined_initial,
+ config_.filter.config_change_duration_blocks);
+ coarse_gains_[ch] = std::make_unique<CoarseFilterUpdateGain>(
+ config_.filter.coarse_initial,
+ config.filter.config_change_duration_blocks);
+ }
+
+ RTC_DCHECK(data_dumper_);
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ for (auto& H2_k : refined_frequency_responses_[ch]) {
+ H2_k.fill(0.f);
+ }
+ }
+}
+
+Subtractor::~Subtractor() = default;
+
+void Subtractor::HandleEchoPathChange(
+ const EchoPathVariability& echo_path_variability) {
+ const auto full_reset = [&]() {
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ refined_filters_[ch]->HandleEchoPathChange();
+ coarse_filter_[ch]->HandleEchoPathChange();
+ refined_gains_[ch]->HandleEchoPathChange(echo_path_variability);
+ coarse_gains_[ch]->HandleEchoPathChange();
+ refined_gains_[ch]->SetConfig(config_.filter.refined_initial, true);
+ coarse_gains_[ch]->SetConfig(config_.filter.coarse_initial, true);
+ refined_filters_[ch]->SetSizePartitions(
+ config_.filter.refined_initial.length_blocks, true);
+ coarse_filter_[ch]->SetSizePartitions(
+ config_.filter.coarse_initial.length_blocks, true);
+ }
+ };
+
+ if (echo_path_variability.delay_change !=
+ EchoPathVariability::DelayAdjustment::kNone) {
+ full_reset();
+ }
+
+ if (echo_path_variability.gain_change) {
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ refined_gains_[ch]->HandleEchoPathChange(echo_path_variability);
+ }
+ }
+}
+
+void Subtractor::ExitInitialState() {
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ refined_gains_[ch]->SetConfig(config_.filter.refined, false);
+ coarse_gains_[ch]->SetConfig(config_.filter.coarse, false);
+ refined_filters_[ch]->SetSizePartitions(
+ config_.filter.refined.length_blocks, false);
+ coarse_filter_[ch]->SetSizePartitions(config_.filter.coarse.length_blocks,
+ false);
+ }
+}
+
+void Subtractor::Process(const RenderBuffer& render_buffer,
+ const Block& capture,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const AecState& aec_state,
+ rtc::ArrayView<SubtractorOutput> outputs) {
+ RTC_DCHECK_EQ(num_capture_channels_, capture.NumChannels());
+
+ // Compute the render powers.
+ const bool same_filter_sizes = refined_filters_[0]->SizePartitions() ==
+ coarse_filter_[0]->SizePartitions();
+ std::array<float, kFftLengthBy2Plus1> X2_refined;
+ std::array<float, kFftLengthBy2Plus1> X2_coarse_data;
+ auto& X2_coarse = same_filter_sizes ? X2_refined : X2_coarse_data;
+ if (same_filter_sizes) {
+ render_buffer.SpectralSum(refined_filters_[0]->SizePartitions(),
+ &X2_refined);
+ } else if (refined_filters_[0]->SizePartitions() >
+ coarse_filter_[0]->SizePartitions()) {
+ render_buffer.SpectralSums(coarse_filter_[0]->SizePartitions(),
+ refined_filters_[0]->SizePartitions(),
+ &X2_coarse, &X2_refined);
+ } else {
+ render_buffer.SpectralSums(refined_filters_[0]->SizePartitions(),
+ coarse_filter_[0]->SizePartitions(), &X2_refined,
+ &X2_coarse);
+ }
+
+ // Process all capture channels
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ SubtractorOutput& output = outputs[ch];
+ rtc::ArrayView<const float> y = capture.View(/*band=*/0, ch);
+ FftData& E_refined = output.E_refined;
+ FftData E_coarse;
+ std::array<float, kBlockSize>& e_refined = output.e_refined;
+ std::array<float, kBlockSize>& e_coarse = output.e_coarse;
+
+ FftData S;
+ FftData& G = S;
+
+ // Form the outputs of the refined and coarse filters.
+ refined_filters_[ch]->Filter(render_buffer, &S);
+ PredictionError(fft_, S, y, &e_refined, &output.s_refined);
+
+ coarse_filter_[ch]->Filter(render_buffer, &S);
+ PredictionError(fft_, S, y, &e_coarse, &output.s_coarse);
+
+ // Compute the signal powers in the subtractor output.
+ output.ComputeMetrics(y);
+
+ // Adjust the filter if needed.
+ bool refined_filters_adjusted = false;
+ filter_misadjustment_estimators_[ch].Update(output);
+ if (filter_misadjustment_estimators_[ch].IsAdjustmentNeeded()) {
+ float scale = filter_misadjustment_estimators_[ch].GetMisadjustment();
+ refined_filters_[ch]->ScaleFilter(scale);
+ for (auto& h_k : refined_impulse_responses_[ch]) {
+ h_k *= scale;
+ }
+ ScaleFilterOutput(y, scale, e_refined, output.s_refined);
+ filter_misadjustment_estimators_[ch].Reset();
+ refined_filters_adjusted = true;
+ }
+
+ // Compute the FFts of the refined and coarse filter outputs.
+ fft_.ZeroPaddedFft(e_refined, Aec3Fft::Window::kHanning, &E_refined);
+ fft_.ZeroPaddedFft(e_coarse, Aec3Fft::Window::kHanning, &E_coarse);
+
+ // Compute spectra for future use.
+ E_coarse.Spectrum(optimization_, output.E2_coarse);
+ E_refined.Spectrum(optimization_, output.E2_refined);
+
+ // Update the refined filter.
+ if (!refined_filters_adjusted) {
+ // Do not allow the performance of the coarse filter to affect the
+ // adaptation speed of the refined filter just after the coarse filter has
+ // been reset.
+ const bool disallow_leakage_diverged =
+ coarse_filter_reset_hangover_[ch] > 0 &&
+ use_coarse_filter_reset_hangover_;
+
+ std::array<float, kFftLengthBy2Plus1> erl;
+ ComputeErl(optimization_, refined_frequency_responses_[ch], erl);
+ refined_gains_[ch]->Compute(X2_refined, render_signal_analyzer, output,
+ erl, refined_filters_[ch]->SizePartitions(),
+ aec_state.SaturatedCapture(),
+ disallow_leakage_diverged, &G);
+ } else {
+ G.re.fill(0.f);
+ G.im.fill(0.f);
+ }
+ refined_filters_[ch]->Adapt(render_buffer, G,
+ &refined_impulse_responses_[ch]);
+ refined_filters_[ch]->ComputeFrequencyResponse(
+ &refined_frequency_responses_[ch]);
+
+ if (ch == 0) {
+ data_dumper_->DumpRaw("aec3_subtractor_G_refined", G.re);
+ data_dumper_->DumpRaw("aec3_subtractor_G_refined", G.im);
+ }
+
+ // Update the coarse filter.
+ poor_coarse_filter_counters_[ch] =
+ output.e2_refined < output.e2_coarse
+ ? poor_coarse_filter_counters_[ch] + 1
+ : 0;
+ if (poor_coarse_filter_counters_[ch] < 5) {
+ coarse_gains_[ch]->Compute(X2_coarse, render_signal_analyzer, E_coarse,
+ coarse_filter_[ch]->SizePartitions(),
+ aec_state.SaturatedCapture(), &G);
+ coarse_filter_reset_hangover_[ch] =
+ std::max(coarse_filter_reset_hangover_[ch] - 1, 0);
+ } else {
+ poor_coarse_filter_counters_[ch] = 0;
+ coarse_filter_[ch]->SetFilter(refined_filters_[ch]->SizePartitions(),
+ refined_filters_[ch]->GetFilter());
+ coarse_gains_[ch]->Compute(X2_coarse, render_signal_analyzer, E_refined,
+ coarse_filter_[ch]->SizePartitions(),
+ aec_state.SaturatedCapture(), &G);
+ coarse_filter_reset_hangover_[ch] =
+ config_.filter.coarse_reset_hangover_blocks;
+ }
+
+ if (ApmDataDumper::IsAvailable()) {
+ RTC_DCHECK_LT(ch, coarse_impulse_responses_.size());
+ coarse_filter_[ch]->Adapt(render_buffer, G,
+ &coarse_impulse_responses_[ch]);
+ } else {
+ coarse_filter_[ch]->Adapt(render_buffer, G);
+ }
+
+ if (ch == 0) {
+ data_dumper_->DumpRaw("aec3_subtractor_G_coarse", G.re);
+ data_dumper_->DumpRaw("aec3_subtractor_G_coarse", G.im);
+ filter_misadjustment_estimators_[ch].Dump(data_dumper_);
+ DumpFilters();
+ }
+
+ std::for_each(e_refined.begin(), e_refined.end(),
+ [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+
+ if (ch == 0) {
+ data_dumper_->DumpWav("aec3_refined_filters_output", kBlockSize,
+ &e_refined[0], 16000, 1);
+ data_dumper_->DumpWav("aec3_coarse_filter_output", kBlockSize,
+ &e_coarse[0], 16000, 1);
+ }
+ }
+}
+
+void Subtractor::FilterMisadjustmentEstimator::Update(
+ const SubtractorOutput& output) {
+ e2_acum_ += output.e2_refined;
+ y2_acum_ += output.y2;
+ if (++n_blocks_acum_ == n_blocks_) {
+ if (y2_acum_ > n_blocks_ * 200.f * 200.f * kBlockSize) {
+ float update = (e2_acum_ / y2_acum_);
+ if (e2_acum_ > n_blocks_ * 7500.f * 7500.f * kBlockSize) {
+ // Duration equal to blockSizeMs * n_blocks_ * 4.
+ overhang_ = 4;
+ } else {
+ overhang_ = std::max(overhang_ - 1, 0);
+ }
+
+ if ((update < inv_misadjustment_) || (overhang_ > 0)) {
+ inv_misadjustment_ += 0.1f * (update - inv_misadjustment_);
+ }
+ }
+ e2_acum_ = 0.f;
+ y2_acum_ = 0.f;
+ n_blocks_acum_ = 0;
+ }
+}
+
+void Subtractor::FilterMisadjustmentEstimator::Reset() {
+ e2_acum_ = 0.f;
+ y2_acum_ = 0.f;
+ n_blocks_acum_ = 0;
+ inv_misadjustment_ = 0.f;
+ overhang_ = 0.f;
+}
+
+void Subtractor::FilterMisadjustmentEstimator::Dump(
+ ApmDataDumper* data_dumper) const {
+ data_dumper->DumpRaw("aec3_inv_misadjustment_factor", inv_misadjustment_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.h b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.h
new file mode 100644
index 0000000000..86159a3442
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_
+
+#include <math.h>
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/coarse_filter_update_gain.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/refined_filter_update_gain.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Proves linear echo cancellation functionality
+class Subtractor {
+ public:
+ Subtractor(const EchoCanceller3Config& config,
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ ApmDataDumper* data_dumper,
+ Aec3Optimization optimization);
+ ~Subtractor();
+ Subtractor(const Subtractor&) = delete;
+ Subtractor& operator=(const Subtractor&) = delete;
+
+ // Performs the echo subtraction.
+ void Process(const RenderBuffer& render_buffer,
+ const Block& capture,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const AecState& aec_state,
+ rtc::ArrayView<SubtractorOutput> outputs);
+
+ void HandleEchoPathChange(const EchoPathVariability& echo_path_variability);
+
+ // Exits the initial state.
+ void ExitInitialState();
+
+ // Returns the block-wise frequency responses for the refined adaptive
+ // filters.
+ const std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>&
+ FilterFrequencyResponses() const {
+ return refined_frequency_responses_;
+ }
+
+ // Returns the estimates of the impulse responses for the refined adaptive
+ // filters.
+ const std::vector<std::vector<float>>& FilterImpulseResponses() const {
+ return refined_impulse_responses_;
+ }
+
+ void DumpFilters() {
+ data_dumper_->DumpRaw(
+ "aec3_subtractor_h_refined",
+ rtc::ArrayView<const float>(
+ refined_impulse_responses_[0].data(),
+ GetTimeDomainLength(
+ refined_filters_[0]->max_filter_size_partitions())));
+ if (ApmDataDumper::IsAvailable()) {
+ RTC_DCHECK_GT(coarse_impulse_responses_.size(), 0);
+ data_dumper_->DumpRaw(
+ "aec3_subtractor_h_coarse",
+ rtc::ArrayView<const float>(
+ coarse_impulse_responses_[0].data(),
+ GetTimeDomainLength(
+ coarse_filter_[0]->max_filter_size_partitions())));
+ }
+
+ refined_filters_[0]->DumpFilter("aec3_subtractor_H_refined");
+ coarse_filter_[0]->DumpFilter("aec3_subtractor_H_coarse");
+ }
+
+ private:
+ class FilterMisadjustmentEstimator {
+ public:
+ FilterMisadjustmentEstimator() = default;
+ ~FilterMisadjustmentEstimator() = default;
+ // Update the misadjustment estimator.
+ void Update(const SubtractorOutput& output);
+ // GetMisadjustment() Returns a recommended scale for the filter so the
+ // prediction error energy gets closer to the energy that is seen at the
+ // microphone input.
+ float GetMisadjustment() const {
+ RTC_DCHECK_GT(inv_misadjustment_, 0.0f);
+ // It is not aiming to adjust all the estimated mismatch. Instead,
+ // it adjusts half of that estimated mismatch.
+ return 2.f / sqrtf(inv_misadjustment_);
+ }
+ // Returns true if the prediciton error energy is significantly larger
+ // than the microphone signal energy and, therefore, an adjustment is
+ // recommended.
+ bool IsAdjustmentNeeded() const { return inv_misadjustment_ > 10.f; }
+ void Reset();
+ void Dump(ApmDataDumper* data_dumper) const;
+
+ private:
+ const int n_blocks_ = 4;
+ int n_blocks_acum_ = 0;
+ float e2_acum_ = 0.f;
+ float y2_acum_ = 0.f;
+ float inv_misadjustment_ = 0.f;
+ int overhang_ = 0.f;
+ };
+
+ const Aec3Fft fft_;
+ ApmDataDumper* data_dumper_;
+ const Aec3Optimization optimization_;
+ const EchoCanceller3Config config_;
+ const size_t num_capture_channels_;
+ const bool use_coarse_filter_reset_hangover_;
+
+ std::vector<std::unique_ptr<AdaptiveFirFilter>> refined_filters_;
+ std::vector<std::unique_ptr<AdaptiveFirFilter>> coarse_filter_;
+ std::vector<std::unique_ptr<RefinedFilterUpdateGain>> refined_gains_;
+ std::vector<std::unique_ptr<CoarseFilterUpdateGain>> coarse_gains_;
+ std::vector<FilterMisadjustmentEstimator> filter_misadjustment_estimators_;
+ std::vector<size_t> poor_coarse_filter_counters_;
+ std::vector<int> coarse_filter_reset_hangover_;
+ std::vector<std::vector<std::array<float, kFftLengthBy2Plus1>>>
+ refined_frequency_responses_;
+ std::vector<std::vector<float>> refined_impulse_responses_;
+ std::vector<std::vector<float>> coarse_impulse_responses_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.cc b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.cc
new file mode 100644
index 0000000000..ed80101f06
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subtractor_output.h"
+
+#include <numeric>
+
+namespace webrtc {
+
+SubtractorOutput::SubtractorOutput() = default;
+SubtractorOutput::~SubtractorOutput() = default;
+
+void SubtractorOutput::Reset() {
+ s_refined.fill(0.f);
+ s_coarse.fill(0.f);
+ e_refined.fill(0.f);
+ e_coarse.fill(0.f);
+ E_refined.re.fill(0.f);
+ E_refined.im.fill(0.f);
+ E2_refined.fill(0.f);
+ E2_coarse.fill(0.f);
+ e2_refined = 0.f;
+ e2_coarse = 0.f;
+ s2_refined = 0.f;
+ s2_coarse = 0.f;
+ y2 = 0.f;
+}
+
+void SubtractorOutput::ComputeMetrics(rtc::ArrayView<const float> y) {
+ const auto sum_of_squares = [](float a, float b) { return a + b * b; };
+ y2 = std::accumulate(y.begin(), y.end(), 0.f, sum_of_squares);
+ e2_refined =
+ std::accumulate(e_refined.begin(), e_refined.end(), 0.f, sum_of_squares);
+ e2_coarse =
+ std::accumulate(e_coarse.begin(), e_coarse.end(), 0.f, sum_of_squares);
+ s2_refined =
+ std::accumulate(s_refined.begin(), s_refined.end(), 0.f, sum_of_squares);
+ s2_coarse =
+ std::accumulate(s_coarse.begin(), s_coarse.end(), 0.f, sum_of_squares);
+
+ s_refined_max_abs = *std::max_element(s_refined.begin(), s_refined.end());
+ s_refined_max_abs =
+ std::max(s_refined_max_abs,
+ -(*std::min_element(s_refined.begin(), s_refined.end())));
+
+ s_coarse_max_abs = *std::max_element(s_coarse.begin(), s_coarse.end());
+ s_coarse_max_abs = std::max(
+ s_coarse_max_abs, -(*std::min_element(s_coarse.begin(), s_coarse.end())));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.h b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.h
new file mode 100644
index 0000000000..d2d12082c6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+
+namespace webrtc {
+
+// Stores the values being returned from the echo subtractor for a single
+// capture channel.
+struct SubtractorOutput {
+ SubtractorOutput();
+ ~SubtractorOutput();
+
+ std::array<float, kBlockSize> s_refined;
+ std::array<float, kBlockSize> s_coarse;
+ std::array<float, kBlockSize> e_refined;
+ std::array<float, kBlockSize> e_coarse;
+ FftData E_refined;
+ std::array<float, kFftLengthBy2Plus1> E2_refined;
+ std::array<float, kFftLengthBy2Plus1> E2_coarse;
+ float s2_refined = 0.f;
+ float s2_coarse = 0.f;
+ float e2_refined = 0.f;
+ float e2_coarse = 0.f;
+ float y2 = 0.f;
+ float s_refined_max_abs = 0.f;
+ float s_coarse_max_abs = 0.f;
+
+ // Reset the struct content.
+ void Reset();
+
+ // Updates the powers of the signals.
+ void ComputeMetrics(rtc::ArrayView<const float> y);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.cc b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.cc
new file mode 100644
index 0000000000..baf0600161
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subtractor_output_analyzer.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+SubtractorOutputAnalyzer::SubtractorOutputAnalyzer(size_t num_capture_channels)
+ : filters_converged_(num_capture_channels, false) {}
+
+void SubtractorOutputAnalyzer::Update(
+ rtc::ArrayView<const SubtractorOutput> subtractor_output,
+ bool* any_filter_converged,
+ bool* any_coarse_filter_converged,
+ bool* all_filters_diverged) {
+ RTC_DCHECK(any_filter_converged);
+ RTC_DCHECK(all_filters_diverged);
+ RTC_DCHECK_EQ(subtractor_output.size(), filters_converged_.size());
+
+ *any_filter_converged = false;
+ *any_coarse_filter_converged = false;
+ *all_filters_diverged = true;
+
+ for (size_t ch = 0; ch < subtractor_output.size(); ++ch) {
+ const float y2 = subtractor_output[ch].y2;
+ const float e2_refined = subtractor_output[ch].e2_refined;
+ const float e2_coarse = subtractor_output[ch].e2_coarse;
+
+ constexpr float kConvergenceThreshold = 50 * 50 * kBlockSize;
+ constexpr float kConvergenceThresholdLowLevel = 20 * 20 * kBlockSize;
+ bool refined_filter_converged =
+ e2_refined < 0.5f * y2 && y2 > kConvergenceThreshold;
+ bool coarse_filter_converged_strict =
+ e2_coarse < 0.05f * y2 && y2 > kConvergenceThreshold;
+ bool coarse_filter_converged_relaxed =
+ e2_coarse < 0.2f * y2 && y2 > kConvergenceThresholdLowLevel;
+ float min_e2 = std::min(e2_refined, e2_coarse);
+ bool filter_diverged = min_e2 > 1.5f * y2 && y2 > 30.f * 30.f * kBlockSize;
+ filters_converged_[ch] =
+ refined_filter_converged || coarse_filter_converged_strict;
+
+ *any_filter_converged = *any_filter_converged || filters_converged_[ch];
+ *any_coarse_filter_converged =
+ *any_coarse_filter_converged || coarse_filter_converged_relaxed;
+ *all_filters_diverged = *all_filters_diverged && filter_diverged;
+ }
+}
+
+void SubtractorOutputAnalyzer::HandleEchoPathChange() {
+ std::fill(filters_converged_.begin(), filters_converged_.end(), false);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.h b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.h
new file mode 100644
index 0000000000..32707dbb19
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_output_analyzer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_ANALYZER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_ANALYZER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/subtractor_output.h"
+
+namespace webrtc {
+
+// Class for analyzing the properties subtractor output.
+class SubtractorOutputAnalyzer {
+ public:
+ explicit SubtractorOutputAnalyzer(size_t num_capture_channels);
+ ~SubtractorOutputAnalyzer() = default;
+
+ // Analyses the subtractor output.
+ void Update(rtc::ArrayView<const SubtractorOutput> subtractor_output,
+ bool* any_filter_converged,
+ bool* any_coarse_filter_converged,
+ bool* all_filters_diverged);
+
+ const std::vector<bool>& ConvergedFilters() const {
+ return filters_converged_;
+ }
+
+ // Handle echo path change.
+ void HandleEchoPathChange();
+
+ private:
+ std::vector<bool> filters_converged_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_ANALYZER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_unittest.cc
new file mode 100644
index 0000000000..56b9cec9f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/subtractor_unittest.cc
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subtractor.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <string>
+
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+#include "rtc_base/random.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::vector<float> RunSubtractorTest(
+ size_t num_render_channels,
+ size_t num_capture_channels,
+ int num_blocks_to_process,
+ int delay_samples,
+ int refined_filter_length_blocks,
+ int coarse_filter_length_blocks,
+ bool uncorrelated_inputs,
+ const std::vector<int>& blocks_with_echo_path_changes) {
+ ApmDataDumper data_dumper(42);
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ EchoCanceller3Config config;
+ config.filter.refined.length_blocks = refined_filter_length_blocks;
+ config.filter.coarse.length_blocks = coarse_filter_length_blocks;
+
+ Subtractor subtractor(config, num_render_channels, num_capture_channels,
+ &data_dumper, DetectOptimization());
+ absl::optional<DelayEstimate> delay_estimate;
+ Block x(kNumBands, num_render_channels);
+ Block y(/*num_bands=*/1, num_capture_channels);
+ std::array<float, kBlockSize> x_old;
+ std::vector<SubtractorOutput> output(num_capture_channels);
+ config.delay.default_delay = 1;
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels));
+ RenderSignalAnalyzer render_signal_analyzer(config);
+ Random random_generator(42U);
+ Aec3Fft fft;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(num_capture_channels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2_refined(
+ num_capture_channels);
+ std::array<float, kFftLengthBy2Plus1> E2_coarse;
+ AecState aec_state(config, num_capture_channels);
+ x_old.fill(0.f);
+ for (auto& Y2_ch : Y2) {
+ Y2_ch.fill(0.f);
+ }
+ for (auto& E2_refined_ch : E2_refined) {
+ E2_refined_ch.fill(0.f);
+ }
+ E2_coarse.fill(0.f);
+
+ std::vector<std::vector<std::unique_ptr<DelayBuffer<float>>>> delay_buffer(
+ num_capture_channels);
+ for (size_t capture_ch = 0; capture_ch < num_capture_channels; ++capture_ch) {
+ delay_buffer[capture_ch].resize(num_render_channels);
+ for (size_t render_ch = 0; render_ch < num_render_channels; ++render_ch) {
+ delay_buffer[capture_ch][render_ch] =
+ std::make_unique<DelayBuffer<float>>(delay_samples);
+ }
+ }
+
+ // [B,A] = butter(2,100/8000,'high')
+ constexpr CascadedBiQuadFilter::BiQuadCoefficients
+ kHighPassFilterCoefficients = {{0.97261f, -1.94523f, 0.97261f},
+ {-1.94448f, 0.94598f}};
+ std::vector<std::unique_ptr<CascadedBiQuadFilter>> x_hp_filter(
+ num_render_channels);
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ x_hp_filter[ch] =
+ std::make_unique<CascadedBiQuadFilter>(kHighPassFilterCoefficients, 1);
+ }
+ std::vector<std::unique_ptr<CascadedBiQuadFilter>> y_hp_filter(
+ num_capture_channels);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ y_hp_filter[ch] =
+ std::make_unique<CascadedBiQuadFilter>(kHighPassFilterCoefficients, 1);
+ }
+
+ for (int k = 0; k < num_blocks_to_process; ++k) {
+ for (size_t render_ch = 0; render_ch < num_render_channels; ++render_ch) {
+ RandomizeSampleVector(&random_generator, x.View(/*band=*/0, render_ch));
+ }
+ if (uncorrelated_inputs) {
+ for (size_t capture_ch = 0; capture_ch < num_capture_channels;
+ ++capture_ch) {
+ RandomizeSampleVector(&random_generator,
+ y.View(/*band=*/0, capture_ch));
+ }
+ } else {
+ for (size_t capture_ch = 0; capture_ch < num_capture_channels;
+ ++capture_ch) {
+ rtc::ArrayView<float> y_view = y.View(/*band=*/0, capture_ch);
+ for (size_t render_ch = 0; render_ch < num_render_channels;
+ ++render_ch) {
+ std::array<float, kBlockSize> y_channel;
+ delay_buffer[capture_ch][render_ch]->Delay(
+ x.View(/*band=*/0, render_ch), y_channel);
+ for (size_t k = 0; k < kBlockSize; ++k) {
+ y_view[k] += y_channel[k] / num_render_channels;
+ }
+ }
+ }
+ }
+ for (size_t ch = 0; ch < num_render_channels; ++ch) {
+ x_hp_filter[ch]->Process(x.View(/*band=*/0, ch));
+ }
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ y_hp_filter[ch]->Process(y.View(/*band=*/0, ch));
+ }
+
+ render_delay_buffer->Insert(x);
+ if (k == 0) {
+ render_delay_buffer->Reset();
+ }
+ render_delay_buffer->PrepareCaptureProcessing();
+ render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+ aec_state.MinDirectPathFilterDelay());
+
+ // Handle echo path changes.
+ if (std::find(blocks_with_echo_path_changes.begin(),
+ blocks_with_echo_path_changes.end(),
+ k) != blocks_with_echo_path_changes.end()) {
+ subtractor.HandleEchoPathChange(EchoPathVariability(
+ true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay,
+ false));
+ }
+ subtractor.Process(*render_delay_buffer->GetRenderBuffer(), y,
+ render_signal_analyzer, aec_state, output);
+
+ aec_state.HandleEchoPathChange(EchoPathVariability(
+ false, EchoPathVariability::DelayAdjustment::kNone, false));
+ aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(),
+ subtractor.FilterImpulseResponses(),
+ *render_delay_buffer->GetRenderBuffer(), E2_refined, Y2,
+ output);
+ }
+
+ std::vector<float> results(num_capture_channels);
+ for (size_t ch = 0; ch < num_capture_channels; ++ch) {
+ const float output_power = std::inner_product(
+ output[ch].e_refined.begin(), output[ch].e_refined.end(),
+ output[ch].e_refined.begin(), 0.f);
+ const float y_power =
+ std::inner_product(y.begin(/*band=*/0, ch), y.end(/*band=*/0, ch),
+ y.begin(/*band=*/0, ch), 0.f);
+ if (y_power == 0.f) {
+ ADD_FAILURE();
+ results[ch] = -1.f;
+ }
+ results[ch] = output_power / y_power;
+ }
+ return results;
+}
+
+std::string ProduceDebugText(size_t num_render_channels,
+ size_t num_capture_channels,
+ size_t delay,
+ int filter_length_blocks) {
+ rtc::StringBuilder ss;
+ ss << "delay: " << delay << ", ";
+ ss << "filter_length_blocks:" << filter_length_blocks << ", ";
+ ss << "num_render_channels:" << num_render_channels << ", ";
+ ss << "num_capture_channels:" << num_capture_channels;
+ return ss.Release();
+}
+
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non data dumper works.
+TEST(SubtractorDeathTest, NullDataDumper) {
+ EXPECT_DEATH(
+ Subtractor(EchoCanceller3Config(), 1, 1, nullptr, DetectOptimization()),
+ "");
+}
+
+#endif
+
+// Verifies that the subtractor is able to converge on correlated data.
+TEST(Subtractor, Convergence) {
+ std::vector<int> blocks_with_echo_path_changes;
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+ SCOPED_TRACE(ProduceDebugText(1, 1, delay_samples, filter_length_blocks));
+ std::vector<float> echo_to_nearend_powers = RunSubtractorTest(
+ 1, 1, 2500, delay_samples, filter_length_blocks, filter_length_blocks,
+ false, blocks_with_echo_path_changes);
+
+ for (float echo_to_nearend_power : echo_to_nearend_powers) {
+ EXPECT_GT(0.1f, echo_to_nearend_power);
+ }
+ }
+ }
+}
+
+// Verifies that the subtractor is able to handle the case when the refined
+// filter is longer than the coarse filter.
+TEST(Subtractor, RefinedFilterLongerThanCoarseFilter) {
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<float> echo_to_nearend_powers = RunSubtractorTest(
+ 1, 1, 400, 64, 20, 15, false, blocks_with_echo_path_changes);
+ for (float echo_to_nearend_power : echo_to_nearend_powers) {
+ EXPECT_GT(0.5f, echo_to_nearend_power);
+ }
+}
+
+// Verifies that the subtractor is able to handle the case when the coarse
+// filter is longer than the refined filter.
+TEST(Subtractor, CoarseFilterLongerThanRefinedFilter) {
+ std::vector<int> blocks_with_echo_path_changes;
+ std::vector<float> echo_to_nearend_powers = RunSubtractorTest(
+ 1, 1, 400, 64, 15, 20, false, blocks_with_echo_path_changes);
+ for (float echo_to_nearend_power : echo_to_nearend_powers) {
+ EXPECT_GT(0.5f, echo_to_nearend_power);
+ }
+}
+
+// Verifies that the subtractor does not converge on uncorrelated signals.
+TEST(Subtractor, NonConvergenceOnUncorrelatedSignals) {
+ std::vector<int> blocks_with_echo_path_changes;
+ for (size_t filter_length_blocks : {12, 20, 30}) {
+ for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+ SCOPED_TRACE(ProduceDebugText(1, 1, delay_samples, filter_length_blocks));
+
+ std::vector<float> echo_to_nearend_powers = RunSubtractorTest(
+ 1, 1, 3000, delay_samples, filter_length_blocks, filter_length_blocks,
+ true, blocks_with_echo_path_changes);
+ for (float echo_to_nearend_power : echo_to_nearend_powers) {
+ EXPECT_NEAR(1.f, echo_to_nearend_power, 0.1);
+ }
+ }
+ }
+}
+
+class SubtractorMultiChannelUpToEightRender
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+#if defined(NDEBUG)
+INSTANTIATE_TEST_SUITE_P(NonDebugMultiChannel,
+ SubtractorMultiChannelUpToEightRender,
+ ::testing::Combine(::testing::Values(1, 2, 8),
+ ::testing::Values(1, 2, 4)));
+#else
+INSTANTIATE_TEST_SUITE_P(DebugMultiChannel,
+ SubtractorMultiChannelUpToEightRender,
+ ::testing::Combine(::testing::Values(1, 2),
+ ::testing::Values(1, 2)));
+#endif
+
+// Verifies that the subtractor is able to converge on correlated data.
+TEST_P(SubtractorMultiChannelUpToEightRender, Convergence) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+
+ std::vector<int> blocks_with_echo_path_changes;
+ size_t num_blocks_to_process = 2500 * num_render_channels;
+ std::vector<float> echo_to_nearend_powers = RunSubtractorTest(
+ num_render_channels, num_capture_channels, num_blocks_to_process, 64, 20,
+ 20, false, blocks_with_echo_path_changes);
+
+ for (float echo_to_nearend_power : echo_to_nearend_powers) {
+ EXPECT_GT(0.1f, echo_to_nearend_power);
+ }
+}
+
+class SubtractorMultiChannelUpToFourRender
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {};
+
+#if defined(NDEBUG)
+INSTANTIATE_TEST_SUITE_P(NonDebugMultiChannel,
+ SubtractorMultiChannelUpToFourRender,
+ ::testing::Combine(::testing::Values(1, 2, 4),
+ ::testing::Values(1, 2, 4)));
+#else
+INSTANTIATE_TEST_SUITE_P(DebugMultiChannel,
+ SubtractorMultiChannelUpToFourRender,
+ ::testing::Combine(::testing::Values(1, 2),
+ ::testing::Values(1, 2)));
+#endif
+
+// Verifies that the subtractor does not converge on uncorrelated signals.
+TEST_P(SubtractorMultiChannelUpToFourRender,
+ NonConvergenceOnUncorrelatedSignals) {
+ const size_t num_render_channels = std::get<0>(GetParam());
+ const size_t num_capture_channels = std::get<1>(GetParam());
+
+ std::vector<int> blocks_with_echo_path_changes;
+ size_t num_blocks_to_process = 5000 * num_render_channels;
+ std::vector<float> echo_to_nearend_powers = RunSubtractorTest(
+ num_render_channels, num_capture_channels, num_blocks_to_process, 64, 20,
+ 20, true, blocks_with_echo_path_changes);
+ for (float echo_to_nearend_power : echo_to_nearend_powers) {
+ EXPECT_LT(.8f, echo_to_nearend_power);
+ EXPECT_NEAR(1.f, echo_to_nearend_power, 0.25f);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.cc b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.cc
new file mode 100644
index 0000000000..83ded425d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_filter.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <functional>
+#include <iterator>
+
+#include "modules/audio_processing/aec3/vector_math.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+// Hanning window from Matlab command win = sqrt(hanning(128)).
+const float kSqrtHanning[kFftLength] = {
+ 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
+ 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
+ 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+ 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f,
+ 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f,
+ 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+ 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f,
+ 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f,
+ 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+ 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f,
+ 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f,
+ 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+ 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f,
+ 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f,
+ 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+ 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f,
+ 1.00000000000000f, 0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
+ 0.99518472667220f, 0.99247953459871f, 0.98917650996478f, 0.98527764238894f,
+ 0.98078528040323f, 0.97570213003853f, 0.97003125319454f, 0.96377606579544f,
+ 0.95694033573221f, 0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
+ 0.92387953251129f, 0.91420975570353f, 0.90398929312344f, 0.89322430119552f,
+ 0.88192126434835f, 0.87008699110871f, 0.85772861000027f, 0.84485356524971f,
+ 0.83146961230255f, 0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
+ 0.77301045336274f, 0.75720884650648f, 0.74095112535496f, 0.72424708295147f,
+ 0.70710678118655f, 0.68954054473707f, 0.67155895484702f, 0.65317284295378f,
+ 0.63439328416365f, 0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
+ 0.55557023301960f, 0.53499761988710f, 0.51410274419322f, 0.49289819222978f,
+ 0.47139673682600f, 0.44961132965461f, 0.42755509343028f, 0.40524131400499f,
+ 0.38268343236509f, 0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
+ 0.29028467725446f, 0.26671275747490f, 0.24298017990326f, 0.21910124015687f,
+ 0.19509032201613f, 0.17096188876030f, 0.14673047445536f, 0.12241067519922f,
+ 0.09801714032956f, 0.07356456359967f, 0.04906767432742f, 0.02454122852291f};
+
+} // namespace
+
+SuppressionFilter::SuppressionFilter(Aec3Optimization optimization,
+ int sample_rate_hz,
+ size_t num_capture_channels)
+ : optimization_(optimization),
+ sample_rate_hz_(sample_rate_hz),
+ num_capture_channels_(num_capture_channels),
+ fft_(),
+ e_output_old_(NumBandsForRate(sample_rate_hz_),
+ std::vector<std::array<float, kFftLengthBy2>>(
+ num_capture_channels_)) {
+ RTC_DCHECK(ValidFullBandRate(sample_rate_hz_));
+ for (size_t b = 0; b < e_output_old_.size(); ++b) {
+ for (size_t ch = 0; ch < e_output_old_[b].size(); ++ch) {
+ e_output_old_[b][ch].fill(0.f);
+ }
+ }
+}
+
+SuppressionFilter::~SuppressionFilter() = default;
+
+void SuppressionFilter::ApplyGain(
+ rtc::ArrayView<const FftData> comfort_noise,
+ rtc::ArrayView<const FftData> comfort_noise_high_band,
+ const std::array<float, kFftLengthBy2Plus1>& suppression_gain,
+ float high_bands_gain,
+ rtc::ArrayView<const FftData> E_lowest_band,
+ Block* e) {
+ RTC_DCHECK(e);
+ RTC_DCHECK_EQ(e->NumBands(), NumBandsForRate(sample_rate_hz_));
+
+ // Comfort noise gain is sqrt(1-g^2), where g is the suppression gain.
+ std::array<float, kFftLengthBy2Plus1> noise_gain;
+ for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) {
+ noise_gain[i] = 1.f - suppression_gain[i] * suppression_gain[i];
+ }
+ aec3::VectorMath(optimization_).Sqrt(noise_gain);
+
+ const float high_bands_noise_scaling =
+ 0.4f * std::sqrt(1.f - high_bands_gain * high_bands_gain);
+
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ FftData E;
+
+ // Analysis filterbank.
+ E.Assign(E_lowest_band[ch]);
+
+ for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) {
+ // Apply suppression gains.
+ float E_real = E.re[i] * suppression_gain[i];
+ float E_imag = E.im[i] * suppression_gain[i];
+
+ // Scale and add the comfort noise.
+ E.re[i] = E_real + noise_gain[i] * comfort_noise[ch].re[i];
+ E.im[i] = E_imag + noise_gain[i] * comfort_noise[ch].im[i];
+ }
+
+ // Synthesis filterbank.
+ std::array<float, kFftLength> e_extended;
+ constexpr float kIfftNormalization = 2.f / kFftLength;
+ fft_.Ifft(E, &e_extended);
+
+ auto e0 = e->View(/*band=*/0, ch);
+ float* e0_old = e_output_old_[0][ch].data();
+
+ // Window and add the first half of e_extended with the second half of
+ // e_extended from the previous block.
+ for (size_t i = 0; i < kFftLengthBy2; ++i) {
+ float e0_i = e0_old[i] * kSqrtHanning[kFftLengthBy2 + i];
+ e0_i += e_extended[i] * kSqrtHanning[i];
+ e0[i] = e0_i * kIfftNormalization;
+ }
+
+ // The second half of e_extended is stored for the succeeding frame.
+ std::copy(e_extended.begin() + kFftLengthBy2,
+ e_extended.begin() + kFftLength,
+ std::begin(e_output_old_[0][ch]));
+
+ // Apply suppression gain to upper bands.
+ for (int b = 1; b < e->NumBands(); ++b) {
+ auto e_band = e->View(b, ch);
+ for (size_t i = 0; i < kFftLengthBy2; ++i) {
+ e_band[i] *= high_bands_gain;
+ }
+ }
+
+ // Add comfort noise to band 1.
+ if (e->NumBands() > 1) {
+ E.Assign(comfort_noise_high_band[ch]);
+ std::array<float, kFftLength> time_domain_high_band_noise;
+ fft_.Ifft(E, &time_domain_high_band_noise);
+
+ auto e1 = e->View(/*band=*/1, ch);
+ const float gain = high_bands_noise_scaling * kIfftNormalization;
+ for (size_t i = 0; i < kFftLengthBy2; ++i) {
+ e1[i] += time_domain_high_band_noise[i] * gain;
+ }
+ }
+
+ // Delay upper bands to match the delay of the filter bank.
+ for (int b = 1; b < e->NumBands(); ++b) {
+ auto e_band = e->View(b, ch);
+ float* e_band_old = e_output_old_[b][ch].data();
+ for (size_t i = 0; i < kFftLengthBy2; ++i) {
+ std::swap(e_band[i], e_band_old[i]);
+ }
+ }
+
+ // Clamp output of all bands.
+ for (int b = 0; b < e->NumBands(); ++b) {
+ auto e_band = e->View(b, ch);
+ for (size_t i = 0; i < kFftLengthBy2; ++i) {
+ e_band[i] = rtc::SafeClamp(e_band[i], -32768.f, 32767.f);
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.h b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.h
new file mode 100644
index 0000000000..c18b2334bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/block.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+
+namespace webrtc {
+
+class SuppressionFilter {
+ public:
+ SuppressionFilter(Aec3Optimization optimization,
+ int sample_rate_hz,
+ size_t num_capture_channels_);
+ ~SuppressionFilter();
+
+ SuppressionFilter(const SuppressionFilter&) = delete;
+ SuppressionFilter& operator=(const SuppressionFilter&) = delete;
+
+ void ApplyGain(rtc::ArrayView<const FftData> comfort_noise,
+ rtc::ArrayView<const FftData> comfort_noise_high_bands,
+ const std::array<float, kFftLengthBy2Plus1>& suppression_gain,
+ float high_bands_gain,
+ rtc::ArrayView<const FftData> E_lowest_band,
+ Block* e);
+
+ private:
+ const Aec3Optimization optimization_;
+ const int sample_rate_hz_;
+ const size_t num_capture_channels_;
+ const Aec3Fft fft_;
+ std::vector<std::vector<std::array<float, kFftLengthBy2>>> e_output_old_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter_unittest.cc
new file mode 100644
index 0000000000..464f5cfed2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_filter_unittest.cc
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_filter.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kPi = 3.141592f;
+
+void ProduceSinusoid(int sample_rate_hz,
+ float sinusoidal_frequency_hz,
+ size_t* sample_counter,
+ Block* x) {
+ // Produce a sinusoid of the specified frequency.
+ for (size_t k = *sample_counter, j = 0; k < (*sample_counter + kBlockSize);
+ ++k, ++j) {
+ for (int channel = 0; channel < x->NumChannels(); ++channel) {
+ x->View(/*band=*/0, channel)[j] =
+ 32767.f *
+ std::sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz);
+ }
+ }
+ *sample_counter = *sample_counter + kBlockSize;
+
+ for (int band = 1; band < x->NumBands(); ++band) {
+ for (int channel = 0; channel < x->NumChannels(); ++channel) {
+ std::fill(x->begin(band, channel), x->end(band, channel), 0.f);
+ }
+ }
+}
+
+} // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for null suppressor output.
+TEST(SuppressionFilterDeathTest, NullOutput) {
+ std::vector<FftData> cn(1);
+ std::vector<FftData> cn_high_bands(1);
+ std::vector<FftData> E(1);
+ std::array<float, kFftLengthBy2Plus1> gain;
+
+ EXPECT_DEATH(SuppressionFilter(Aec3Optimization::kNone, 16000, 1)
+ .ApplyGain(cn, cn_high_bands, gain, 1.0f, E, nullptr),
+ "");
+}
+
+// Verifies the check for allowed sample rate.
+TEST(SuppressionFilterDeathTest, ProperSampleRate) {
+ EXPECT_DEATH(SuppressionFilter(Aec3Optimization::kNone, 16001, 1), "");
+}
+
+#endif
+
+// Verifies that no comfort noise is added when the gain is 1.
+TEST(SuppressionFilter, ComfortNoiseInUnityGain) {
+ SuppressionFilter filter(Aec3Optimization::kNone, 48000, 1);
+ std::vector<FftData> cn(1);
+ std::vector<FftData> cn_high_bands(1);
+ std::array<float, kFftLengthBy2Plus1> gain;
+ std::array<float, kFftLengthBy2> e_old_;
+ Aec3Fft fft;
+
+ e_old_.fill(0.f);
+ gain.fill(1.f);
+ cn[0].re.fill(1.f);
+ cn[0].im.fill(1.f);
+ cn_high_bands[0].re.fill(1.f);
+ cn_high_bands[0].im.fill(1.f);
+
+ Block e(3, kBlockSize);
+ Block e_ref = e;
+
+ std::vector<FftData> E(1);
+ fft.PaddedFft(e.View(/*band=*/0, /*channel=*/0), e_old_,
+ Aec3Fft::Window::kSqrtHanning, &E[0]);
+ std::copy(e.begin(/*band=*/0, /*channel=*/0),
+ e.end(/*band=*/0, /*channel=*/0), e_old_.begin());
+
+ filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e);
+
+ for (int band = 0; band < e.NumBands(); ++band) {
+ for (int channel = 0; channel < e.NumChannels(); ++channel) {
+ const auto e_view = e.View(band, channel);
+ const auto e_ref_view = e_ref.View(band, channel);
+ for (size_t sample = 0; sample < e_view.size(); ++sample) {
+ EXPECT_EQ(e_ref_view[sample], e_view[sample]);
+ }
+ }
+ }
+}
+
+// Verifies that the suppressor is able to suppress a signal.
+TEST(SuppressionFilter, SignalSuppression) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ constexpr size_t kNumChannels = 1;
+
+ SuppressionFilter filter(Aec3Optimization::kNone, kSampleRateHz, 1);
+ std::vector<FftData> cn(1);
+ std::vector<FftData> cn_high_bands(1);
+ std::array<float, kFftLengthBy2> e_old_;
+ Aec3Fft fft;
+ std::array<float, kFftLengthBy2Plus1> gain;
+ Block e(kNumBands, kNumChannels);
+ e_old_.fill(0.f);
+
+ gain.fill(1.f);
+ std::for_each(gain.begin() + 10, gain.end(), [](float& a) { a = 0.f; });
+
+ cn[0].re.fill(0.f);
+ cn[0].im.fill(0.f);
+ cn_high_bands[0].re.fill(0.f);
+ cn_high_bands[0].im.fill(0.f);
+
+ size_t sample_counter = 0;
+
+ float e0_input = 0.f;
+ float e0_output = 0.f;
+ for (size_t k = 0; k < 100; ++k) {
+ ProduceSinusoid(16000, 16000 * 40 / kFftLengthBy2 / 2, &sample_counter, &e);
+ e0_input = std::inner_product(e.begin(/*band=*/0, /*channel=*/0),
+ e.end(/*band=*/0, /*channel=*/0),
+ e.begin(/*band=*/0, /*channel=*/0), e0_input);
+
+ std::vector<FftData> E(1);
+ fft.PaddedFft(e.View(/*band=*/0, /*channel=*/0), e_old_,
+ Aec3Fft::Window::kSqrtHanning, &E[0]);
+ std::copy(e.begin(/*band=*/0, /*channel=*/0),
+ e.end(/*band=*/0, /*channel=*/0), e_old_.begin());
+
+ filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e);
+ e0_output = std::inner_product(
+ e.begin(/*band=*/0, /*channel=*/0), e.end(/*band=*/0, /*channel=*/0),
+ e.begin(/*band=*/0, /*channel=*/0), e0_output);
+ }
+
+ EXPECT_LT(e0_output, e0_input / 1000.f);
+}
+
+// Verifies that the suppressor is able to pass through a desired signal while
+// applying suppressing for some frequencies.
+TEST(SuppressionFilter, SignalTransparency) {
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ SuppressionFilter filter(Aec3Optimization::kNone, kSampleRateHz, 1);
+ std::vector<FftData> cn(1);
+ std::array<float, kFftLengthBy2> e_old_;
+ Aec3Fft fft;
+ std::vector<FftData> cn_high_bands(1);
+ std::array<float, kFftLengthBy2Plus1> gain;
+ Block e(kNumBands, kNumChannels);
+ e_old_.fill(0.f);
+ gain.fill(1.f);
+ std::for_each(gain.begin() + 30, gain.end(), [](float& a) { a = 0.f; });
+
+ cn[0].re.fill(0.f);
+ cn[0].im.fill(0.f);
+ cn_high_bands[0].re.fill(0.f);
+ cn_high_bands[0].im.fill(0.f);
+
+ size_t sample_counter = 0;
+
+ float e0_input = 0.f;
+ float e0_output = 0.f;
+ for (size_t k = 0; k < 100; ++k) {
+ ProduceSinusoid(16000, 16000 * 10 / kFftLengthBy2 / 2, &sample_counter, &e);
+ e0_input = std::inner_product(e.begin(/*band=*/0, /*channel=*/0),
+ e.end(/*band=*/0, /*channel=*/0),
+ e.begin(/*band=*/0, /*channel=*/0), e0_input);
+
+ std::vector<FftData> E(1);
+ fft.PaddedFft(e.View(/*band=*/0, /*channel=*/0), e_old_,
+ Aec3Fft::Window::kSqrtHanning, &E[0]);
+ std::copy(e.begin(/*band=*/0, /*channel=*/0),
+ e.end(/*band=*/0, /*channel=*/0), e_old_.begin());
+
+ filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e);
+ e0_output = std::inner_product(
+ e.begin(/*band=*/0, /*channel=*/0), e.end(/*band=*/0, /*channel=*/0),
+ e.begin(/*band=*/0, /*channel=*/0), e0_output);
+ }
+
+ EXPECT_LT(0.9f * e0_input, e0_output);
+}
+
+// Verifies that the suppressor delay.
+TEST(SuppressionFilter, Delay) {
+ constexpr size_t kNumChannels = 1;
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+
+ SuppressionFilter filter(Aec3Optimization::kNone, kSampleRateHz, 1);
+ std::vector<FftData> cn(1);
+ std::vector<FftData> cn_high_bands(1);
+ std::array<float, kFftLengthBy2> e_old_;
+ Aec3Fft fft;
+ std::array<float, kFftLengthBy2Plus1> gain;
+ Block e(kNumBands, kNumChannels);
+
+ gain.fill(1.f);
+
+ cn[0].re.fill(0.f);
+ cn[0].im.fill(0.f);
+ cn_high_bands[0].re.fill(0.f);
+ cn_high_bands[0].im.fill(0.f);
+
+ for (size_t k = 0; k < 100; ++k) {
+ for (size_t band = 0; band < kNumBands; ++band) {
+ for (size_t channel = 0; channel < kNumChannels; ++channel) {
+ auto e_view = e.View(band, channel);
+ for (size_t sample = 0; sample < kBlockSize; ++sample) {
+ e_view[sample] = k * kBlockSize + sample + channel;
+ }
+ }
+ }
+
+ std::vector<FftData> E(1);
+ fft.PaddedFft(e.View(/*band=*/0, /*channel=*/0), e_old_,
+ Aec3Fft::Window::kSqrtHanning, &E[0]);
+ std::copy(e.begin(/*band=*/0, /*channel=*/0),
+ e.end(/*band=*/0, /*channel=*/0), e_old_.begin());
+
+ filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e);
+ if (k > 2) {
+ for (size_t band = 0; band < kNumBands; ++band) {
+ for (size_t channel = 0; channel < kNumChannels; ++channel) {
+ const auto e_view = e.View(band, channel);
+ for (size_t sample = 0; sample < kBlockSize; ++sample) {
+ EXPECT_NEAR(k * kBlockSize + sample - kBlockSize + channel,
+ e_view[sample], 0.01);
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.cc b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.cc
new file mode 100644
index 0000000000..037dabaabe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.cc
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_gain.h"
+
+#include <math.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/dominant_nearend_detector.h"
+#include "modules/audio_processing/aec3/moving_average.h"
+#include "modules/audio_processing/aec3/subband_nearend_detector.h"
+#include "modules/audio_processing/aec3/vector_math.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+void LimitLowFrequencyGains(std::array<float, kFftLengthBy2Plus1>* gain) {
+ // Limit the low frequency gains to avoid the impact of the high-pass filter
+ // on the lower-frequency gain influencing the overall achieved gain.
+ (*gain)[0] = (*gain)[1] = std::min((*gain)[1], (*gain)[2]);
+}
+
+void LimitHighFrequencyGains(bool conservative_hf_suppression,
+ std::array<float, kFftLengthBy2Plus1>* gain) {
+ // Limit the high frequency gains to avoid echo leakage due to an imperfect
+ // filter.
+ constexpr size_t kFirstBandToLimit = (64 * 2000) / 8000;
+ const float min_upper_gain = (*gain)[kFirstBandToLimit];
+ std::for_each(
+ gain->begin() + kFirstBandToLimit + 1, gain->end(),
+ [min_upper_gain](float& a) { a = std::min(a, min_upper_gain); });
+ (*gain)[kFftLengthBy2] = (*gain)[kFftLengthBy2Minus1];
+
+ if (conservative_hf_suppression) {
+ // Limits the gain in the frequencies for which the adaptive filter has not
+ // converged.
+ // TODO(peah): Make adaptive to take the actual filter error into account.
+ constexpr size_t kUpperAccurateBandPlus1 = 29;
+
+ constexpr float oneByBandsInSum =
+ 1 / static_cast<float>(kUpperAccurateBandPlus1 - 20);
+ const float hf_gain_bound =
+ std::accumulate(gain->begin() + 20,
+ gain->begin() + kUpperAccurateBandPlus1, 0.f) *
+ oneByBandsInSum;
+
+ std::for_each(
+ gain->begin() + kUpperAccurateBandPlus1, gain->end(),
+ [hf_gain_bound](float& a) { a = std::min(a, hf_gain_bound); });
+ }
+}
+
+// Scales the echo according to assessed audibility at the other end.
+void WeightEchoForAudibility(const EchoCanceller3Config& config,
+ rtc::ArrayView<const float> echo,
+ rtc::ArrayView<float> weighted_echo) {
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, echo.size());
+ RTC_DCHECK_EQ(kFftLengthBy2Plus1, weighted_echo.size());
+
+ auto weigh = [](float threshold, float normalizer, size_t begin, size_t end,
+ rtc::ArrayView<const float> echo,
+ rtc::ArrayView<float> weighted_echo) {
+ for (size_t k = begin; k < end; ++k) {
+ if (echo[k] < threshold) {
+ float tmp = (threshold - echo[k]) * normalizer;
+ weighted_echo[k] = echo[k] * std::max(0.f, 1.f - tmp * tmp);
+ } else {
+ weighted_echo[k] = echo[k];
+ }
+ }
+ };
+
+ float threshold = config.echo_audibility.floor_power *
+ config.echo_audibility.audibility_threshold_lf;
+ float normalizer = 1.f / (threshold - config.echo_audibility.floor_power);
+ weigh(threshold, normalizer, 0, 3, echo, weighted_echo);
+
+ threshold = config.echo_audibility.floor_power *
+ config.echo_audibility.audibility_threshold_mf;
+ normalizer = 1.f / (threshold - config.echo_audibility.floor_power);
+ weigh(threshold, normalizer, 3, 7, echo, weighted_echo);
+
+ threshold = config.echo_audibility.floor_power *
+ config.echo_audibility.audibility_threshold_hf;
+ normalizer = 1.f / (threshold - config.echo_audibility.floor_power);
+ weigh(threshold, normalizer, 7, kFftLengthBy2Plus1, echo, weighted_echo);
+}
+
+} // namespace
+
+std::atomic<int> SuppressionGain::instance_count_(0);
+
+float SuppressionGain::UpperBandsGain(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ const absl::optional<int>& narrow_peak_band,
+ bool saturated_echo,
+ const Block& render,
+ const std::array<float, kFftLengthBy2Plus1>& low_band_gain) const {
+ RTC_DCHECK_LT(0, render.NumBands());
+ if (render.NumBands() == 1) {
+ return 1.f;
+ }
+ const int num_render_channels = render.NumChannels();
+
+ if (narrow_peak_band &&
+ (*narrow_peak_band > static_cast<int>(kFftLengthBy2Plus1 - 10))) {
+ return 0.001f;
+ }
+
+ constexpr size_t kLowBandGainLimit = kFftLengthBy2 / 2;
+ const float gain_below_8_khz = *std::min_element(
+ low_band_gain.begin() + kLowBandGainLimit, low_band_gain.end());
+
+ // Always attenuate the upper bands when there is saturated echo.
+ if (saturated_echo) {
+ return std::min(0.001f, gain_below_8_khz);
+ }
+
+ // Compute the upper and lower band energies.
+ const auto sum_of_squares = [](float a, float b) { return a + b * b; };
+ float low_band_energy = 0.f;
+ for (int ch = 0; ch < num_render_channels; ++ch) {
+ const float channel_energy =
+ std::accumulate(render.begin(/*band=*/0, ch),
+ render.end(/*band=*/0, ch), 0.0f, sum_of_squares);
+ low_band_energy = std::max(low_band_energy, channel_energy);
+ }
+ float high_band_energy = 0.f;
+ for (int k = 1; k < render.NumBands(); ++k) {
+ for (int ch = 0; ch < num_render_channels; ++ch) {
+ const float energy = std::accumulate(
+ render.begin(k, ch), render.end(k, ch), 0.f, sum_of_squares);
+ high_band_energy = std::max(high_band_energy, energy);
+ }
+ }
+
+ // If there is more power in the lower frequencies than the upper frequencies,
+ // or if the power in upper frequencies is low, do not bound the gain in the
+ // upper bands.
+ float anti_howling_gain;
+ const float activation_threshold =
+ kBlockSize * config_.suppressor.high_bands_suppression
+ .anti_howling_activation_threshold;
+ if (high_band_energy < std::max(low_band_energy, activation_threshold)) {
+ anti_howling_gain = 1.f;
+ } else {
+ // In all other cases, bound the gain for upper frequencies.
+ RTC_DCHECK_LE(low_band_energy, high_band_energy);
+ RTC_DCHECK_NE(0.f, high_band_energy);
+ anti_howling_gain =
+ config_.suppressor.high_bands_suppression.anti_howling_gain *
+ sqrtf(low_band_energy / high_band_energy);
+ }
+
+ float gain_bound = 1.f;
+ if (!dominant_nearend_detector_->IsNearendState()) {
+ // Bound the upper gain during significant echo activity.
+ const auto& cfg = config_.suppressor.high_bands_suppression;
+ auto low_frequency_energy = [](rtc::ArrayView<const float> spectrum) {
+ RTC_DCHECK_LE(16, spectrum.size());
+ return std::accumulate(spectrum.begin() + 1, spectrum.begin() + 16, 0.f);
+ };
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ const float echo_sum = low_frequency_energy(echo_spectrum[ch]);
+ const float noise_sum = low_frequency_energy(comfort_noise_spectrum[ch]);
+ if (echo_sum > cfg.enr_threshold * noise_sum) {
+ gain_bound = cfg.max_gain_during_echo;
+ break;
+ }
+ }
+ }
+
+ // Choose the gain as the minimum of the lower and upper gains.
+ return std::min(std::min(gain_below_8_khz, anti_howling_gain), gain_bound);
+}
+
+// Computes the gain to reduce the echo to a non audible level.
+void SuppressionGain::GainToNoAudibleEcho(
+ const std::array<float, kFftLengthBy2Plus1>& nearend,
+ const std::array<float, kFftLengthBy2Plus1>& echo,
+ const std::array<float, kFftLengthBy2Plus1>& masker,
+ std::array<float, kFftLengthBy2Plus1>* gain) const {
+ const auto& p = dominant_nearend_detector_->IsNearendState() ? nearend_params_
+ : normal_params_;
+ for (size_t k = 0; k < gain->size(); ++k) {
+ float enr = echo[k] / (nearend[k] + 1.f); // Echo-to-nearend ratio.
+ float emr = echo[k] / (masker[k] + 1.f); // Echo-to-masker (noise) ratio.
+ float g = 1.0f;
+ if (enr > p.enr_transparent_[k] && emr > p.emr_transparent_[k]) {
+ g = (p.enr_suppress_[k] - enr) /
+ (p.enr_suppress_[k] - p.enr_transparent_[k]);
+ g = std::max(g, p.emr_transparent_[k] / emr);
+ }
+ (*gain)[k] = g;
+ }
+}
+
+// Compute the minimum gain as the attenuating gain to put the signal just
+// above the zero sample values.
+void SuppressionGain::GetMinGain(
+ rtc::ArrayView<const float> weighted_residual_echo,
+ rtc::ArrayView<const float> last_nearend,
+ rtc::ArrayView<const float> last_echo,
+ bool low_noise_render,
+ bool saturated_echo,
+ rtc::ArrayView<float> min_gain) const {
+ if (!saturated_echo) {
+ const float min_echo_power =
+ low_noise_render ? config_.echo_audibility.low_render_limit
+ : config_.echo_audibility.normal_render_limit;
+
+ for (size_t k = 0; k < min_gain.size(); ++k) {
+ min_gain[k] = weighted_residual_echo[k] > 0.f
+ ? min_echo_power / weighted_residual_echo[k]
+ : 1.f;
+ min_gain[k] = std::min(min_gain[k], 1.f);
+ }
+
+ if (!initial_state_ ||
+ config_.suppressor.lf_smoothing_during_initial_phase) {
+ const float& dec = dominant_nearend_detector_->IsNearendState()
+ ? nearend_params_.max_dec_factor_lf
+ : normal_params_.max_dec_factor_lf;
+
+ for (int k = 0; k <= config_.suppressor.last_lf_smoothing_band; ++k) {
+ // Make sure the gains of the low frequencies do not decrease too
+ // quickly after strong nearend.
+ if (last_nearend[k] > last_echo[k] ||
+ k <= config_.suppressor.last_permanent_lf_smoothing_band) {
+ min_gain[k] = std::max(min_gain[k], last_gain_[k] * dec);
+ min_gain[k] = std::min(min_gain[k], 1.f);
+ }
+ }
+ }
+ } else {
+ std::fill(min_gain.begin(), min_gain.end(), 0.f);
+ }
+}
+
+// Compute the maximum gain by limiting the gain increase from the previous
+// gain.
+void SuppressionGain::GetMaxGain(rtc::ArrayView<float> max_gain) const {
+ const auto& inc = dominant_nearend_detector_->IsNearendState()
+ ? nearend_params_.max_inc_factor
+ : normal_params_.max_inc_factor;
+ const auto& floor = config_.suppressor.floor_first_increase;
+ for (size_t k = 0; k < max_gain.size(); ++k) {
+ max_gain[k] = std::min(std::max(last_gain_[k] * inc, floor), 1.f);
+ }
+}
+
+void SuppressionGain::LowerBandGain(
+ bool low_noise_render,
+ const AecState& aec_state,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ suppressor_input,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> residual_echo,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> comfort_noise,
+ bool clock_drift,
+ std::array<float, kFftLengthBy2Plus1>* gain) {
+ gain->fill(1.f);
+ const bool saturated_echo = aec_state.SaturatedEcho();
+ std::array<float, kFftLengthBy2Plus1> max_gain;
+ GetMaxGain(max_gain);
+
+ for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
+ std::array<float, kFftLengthBy2Plus1> G;
+ std::array<float, kFftLengthBy2Plus1> nearend;
+ nearend_smoothers_[ch].Average(suppressor_input[ch], nearend);
+
+ // Weight echo power in terms of audibility.
+ std::array<float, kFftLengthBy2Plus1> weighted_residual_echo;
+ WeightEchoForAudibility(config_, residual_echo[ch], weighted_residual_echo);
+
+ std::array<float, kFftLengthBy2Plus1> min_gain;
+ GetMinGain(weighted_residual_echo, last_nearend_[ch], last_echo_[ch],
+ low_noise_render, saturated_echo, min_gain);
+
+ GainToNoAudibleEcho(nearend, weighted_residual_echo, comfort_noise[0], &G);
+
+ // Clamp gains.
+ for (size_t k = 0; k < gain->size(); ++k) {
+ G[k] = std::max(std::min(G[k], max_gain[k]), min_gain[k]);
+ (*gain)[k] = std::min((*gain)[k], G[k]);
+ }
+
+ // Store data required for the gain computation of the next block.
+ std::copy(nearend.begin(), nearend.end(), last_nearend_[ch].begin());
+ std::copy(weighted_residual_echo.begin(), weighted_residual_echo.end(),
+ last_echo_[ch].begin());
+ }
+
+ LimitLowFrequencyGains(gain);
+ // Use conservative high-frequency gains during clock-drift or when not in
+ // dominant nearend.
+ if (!dominant_nearend_detector_->IsNearendState() || clock_drift ||
+ config_.suppressor.conservative_hf_suppression) {
+ LimitHighFrequencyGains(config_.suppressor.conservative_hf_suppression,
+ gain);
+ }
+
+ // Store computed gains.
+ std::copy(gain->begin(), gain->end(), last_gain_.begin());
+
+ // Transform gains to amplitude domain.
+ aec3::VectorMath(optimization_).Sqrt(*gain);
+}
+
+SuppressionGain::SuppressionGain(const EchoCanceller3Config& config,
+ Aec3Optimization optimization,
+ int sample_rate_hz,
+ size_t num_capture_channels)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ optimization_(optimization),
+ config_(config),
+ num_capture_channels_(num_capture_channels),
+ state_change_duration_blocks_(
+ static_cast<int>(config_.filter.config_change_duration_blocks)),
+ last_nearend_(num_capture_channels_, {0}),
+ last_echo_(num_capture_channels_, {0}),
+ nearend_smoothers_(
+ num_capture_channels_,
+ aec3::MovingAverage(kFftLengthBy2Plus1,
+ config.suppressor.nearend_average_blocks)),
+ nearend_params_(config_.suppressor.last_lf_band,
+ config_.suppressor.first_hf_band,
+ config_.suppressor.nearend_tuning),
+ normal_params_(config_.suppressor.last_lf_band,
+ config_.suppressor.first_hf_band,
+ config_.suppressor.normal_tuning),
+ use_unbounded_echo_spectrum_(config.suppressor.dominant_nearend_detection
+ .use_unbounded_echo_spectrum) {
+ RTC_DCHECK_LT(0, state_change_duration_blocks_);
+ last_gain_.fill(1.f);
+ if (config_.suppressor.use_subband_nearend_detection) {
+ dominant_nearend_detector_ = std::make_unique<SubbandNearendDetector>(
+ config_.suppressor.subband_nearend_detection, num_capture_channels_);
+ } else {
+ dominant_nearend_detector_ = std::make_unique<DominantNearendDetector>(
+ config_.suppressor.dominant_nearend_detection, num_capture_channels_);
+ }
+ RTC_DCHECK(dominant_nearend_detector_);
+}
+
+SuppressionGain::~SuppressionGain() = default;
+
+void SuppressionGain::GetGain(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum_unbounded,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const AecState& aec_state,
+ const Block& render,
+ bool clock_drift,
+ float* high_bands_gain,
+ std::array<float, kFftLengthBy2Plus1>* low_band_gain) {
+ RTC_DCHECK(high_bands_gain);
+ RTC_DCHECK(low_band_gain);
+
+ // Choose residual echo spectrum for dominant nearend detection.
+ const auto echo = use_unbounded_echo_spectrum_
+ ? residual_echo_spectrum_unbounded
+ : residual_echo_spectrum;
+
+ // Update the nearend state selection.
+ dominant_nearend_detector_->Update(nearend_spectrum, echo,
+ comfort_noise_spectrum, initial_state_);
+
+ // Compute gain for the lower band.
+ bool low_noise_render = low_render_detector_.Detect(render);
+ LowerBandGain(low_noise_render, aec_state, nearend_spectrum,
+ residual_echo_spectrum, comfort_noise_spectrum, clock_drift,
+ low_band_gain);
+
+ // Compute the gain for the upper bands.
+ const absl::optional<int> narrow_peak_band =
+ render_signal_analyzer.NarrowPeakBand();
+
+ *high_bands_gain =
+ UpperBandsGain(echo_spectrum, comfort_noise_spectrum, narrow_peak_band,
+ aec_state.SaturatedEcho(), render, *low_band_gain);
+
+ data_dumper_->DumpRaw("aec3_dominant_nearend",
+ dominant_nearend_detector_->IsNearendState());
+}
+
+void SuppressionGain::SetInitialState(bool state) {
+ initial_state_ = state;
+ if (state) {
+ initial_state_change_counter_ = state_change_duration_blocks_;
+ } else {
+ initial_state_change_counter_ = 0;
+ }
+}
+
+// Detects when the render signal can be considered to have low power and
+// consist of stationary noise.
+bool SuppressionGain::LowNoiseRenderDetector::Detect(const Block& render) {
+ float x2_sum = 0.f;
+ float x2_max = 0.f;
+ for (int ch = 0; ch < render.NumChannels(); ++ch) {
+ for (float x_k : render.View(/*band=*/0, ch)) {
+ const float x2 = x_k * x_k;
+ x2_sum += x2;
+ x2_max = std::max(x2_max, x2);
+ }
+ }
+ x2_sum = x2_sum / render.NumChannels();
+
+ constexpr float kThreshold = 50.f * 50.f * 64.f;
+ const bool low_noise_render =
+ average_power_ < kThreshold && x2_max < 3 * average_power_;
+ average_power_ = average_power_ * 0.9f + x2_sum * 0.1f;
+ return low_noise_render;
+}
+
+SuppressionGain::GainParameters::GainParameters(
+ int last_lf_band,
+ int first_hf_band,
+ const EchoCanceller3Config::Suppressor::Tuning& tuning)
+ : max_inc_factor(tuning.max_inc_factor),
+ max_dec_factor_lf(tuning.max_dec_factor_lf) {
+ // Compute per-band masking thresholds.
+ RTC_DCHECK_LT(last_lf_band, first_hf_band);
+ auto& lf = tuning.mask_lf;
+ auto& hf = tuning.mask_hf;
+ RTC_DCHECK_LT(lf.enr_transparent, lf.enr_suppress);
+ RTC_DCHECK_LT(hf.enr_transparent, hf.enr_suppress);
+ for (int k = 0; k < static_cast<int>(kFftLengthBy2Plus1); k++) {
+ float a;
+ if (k <= last_lf_band) {
+ a = 0.f;
+ } else if (k < first_hf_band) {
+ a = (k - last_lf_band) / static_cast<float>(first_hf_band - last_lf_band);
+ } else {
+ a = 1.f;
+ }
+ enr_transparent_[k] = (1 - a) * lf.enr_transparent + a * hf.enr_transparent;
+ enr_suppress_[k] = (1 - a) * lf.enr_suppress + a * hf.enr_suppress;
+ emr_transparent_[k] = (1 - a) * lf.emr_transparent + a * hf.emr_transparent;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.h b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.h
new file mode 100644
index 0000000000..c19ddd7e30
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_
+
+#include <array>
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/moving_average.h"
+#include "modules/audio_processing/aec3/nearend_detector.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+class SuppressionGain {
+ public:
+ SuppressionGain(const EchoCanceller3Config& config,
+ Aec3Optimization optimization,
+ int sample_rate_hz,
+ size_t num_capture_channels);
+ ~SuppressionGain();
+
+ SuppressionGain(const SuppressionGain&) = delete;
+ SuppressionGain& operator=(const SuppressionGain&) = delete;
+
+ void GetGain(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ nearend_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ residual_echo_spectrum_unbounded,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ const RenderSignalAnalyzer& render_signal_analyzer,
+ const AecState& aec_state,
+ const Block& render,
+ bool clock_drift,
+ float* high_bands_gain,
+ std::array<float, kFftLengthBy2Plus1>* low_band_gain);
+
+ bool IsDominantNearend() {
+ return dominant_nearend_detector_->IsNearendState();
+ }
+
+ // Toggles the usage of the initial state.
+ void SetInitialState(bool state);
+
+ private:
+ // Computes the gain to apply for the bands beyond the first band.
+ float UpperBandsGain(
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> echo_spectrum,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ comfort_noise_spectrum,
+ const absl::optional<int>& narrow_peak_band,
+ bool saturated_echo,
+ const Block& render,
+ const std::array<float, kFftLengthBy2Plus1>& low_band_gain) const;
+
+ void GainToNoAudibleEcho(const std::array<float, kFftLengthBy2Plus1>& nearend,
+ const std::array<float, kFftLengthBy2Plus1>& echo,
+ const std::array<float, kFftLengthBy2Plus1>& masker,
+ std::array<float, kFftLengthBy2Plus1>* gain) const;
+
+ void LowerBandGain(
+ bool stationary_with_low_power,
+ const AecState& aec_state,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
+ suppressor_input,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> residual_echo,
+ rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> comfort_noise,
+ bool clock_drift,
+ std::array<float, kFftLengthBy2Plus1>* gain);
+
+ void GetMinGain(rtc::ArrayView<const float> weighted_residual_echo,
+ rtc::ArrayView<const float> last_nearend,
+ rtc::ArrayView<const float> last_echo,
+ bool low_noise_render,
+ bool saturated_echo,
+ rtc::ArrayView<float> min_gain) const;
+
+ void GetMaxGain(rtc::ArrayView<float> max_gain) const;
+
+ class LowNoiseRenderDetector {
+ public:
+ bool Detect(const Block& render);
+
+ private:
+ float average_power_ = 32768.f * 32768.f;
+ };
+
+ struct GainParameters {
+ explicit GainParameters(
+ int last_lf_band,
+ int first_hf_band,
+ const EchoCanceller3Config::Suppressor::Tuning& tuning);
+ const float max_inc_factor;
+ const float max_dec_factor_lf;
+ std::array<float, kFftLengthBy2Plus1> enr_transparent_;
+ std::array<float, kFftLengthBy2Plus1> enr_suppress_;
+ std::array<float, kFftLengthBy2Plus1> emr_transparent_;
+ };
+
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ const Aec3Optimization optimization_;
+ const EchoCanceller3Config config_;
+ const size_t num_capture_channels_;
+ const int state_change_duration_blocks_;
+ std::array<float, kFftLengthBy2Plus1> last_gain_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> last_nearend_;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> last_echo_;
+ LowNoiseRenderDetector low_render_detector_;
+ bool initial_state_ = true;
+ int initial_state_change_counter_ = 0;
+ std::vector<aec3::MovingAverage> nearend_smoothers_;
+ const GainParameters nearend_params_;
+ const GainParameters normal_params_;
+ // Determines if the dominant nearend detector uses the unbounded residual
+ // echo spectrum.
+ const bool use_unbounded_echo_spectrum_;
+ std::unique_ptr<NearendDetector> dominant_nearend_detector_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain_unittest.cc
new file mode 100644
index 0000000000..02de706c77
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/suppression_gain_unittest.cc
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_gain.h"
+
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/subtractor.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output gains works.
+TEST(SuppressionGainDeathTest, NullOutputGains) {
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2(1, {0.0f});
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2(1, {0.0f});
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2_unbounded(1, {0.0f});
+ std::vector<std::array<float, kFftLengthBy2Plus1>> S2(1);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> N2(1, {0.0f});
+ for (auto& S2_k : S2) {
+ S2_k.fill(0.1f);
+ }
+ FftData E;
+ FftData Y;
+ E.re.fill(0.0f);
+ E.im.fill(0.0f);
+ Y.re.fill(0.0f);
+ Y.im.fill(0.0f);
+
+ float high_bands_gain;
+ AecState aec_state(EchoCanceller3Config{}, 1);
+ EXPECT_DEATH(
+ SuppressionGain(EchoCanceller3Config{}, DetectOptimization(), 16000, 1)
+ .GetGain(E2, S2, R2, R2_unbounded, N2,
+ RenderSignalAnalyzer((EchoCanceller3Config{})), aec_state,
+ Block(3, 1), false, &high_bands_gain, nullptr),
+ "");
+}
+
+#endif
+
+// Does a sanity check that the gains are correctly computed.
+TEST(SuppressionGain, BasicGainComputation) {
+ constexpr size_t kNumRenderChannels = 1;
+ constexpr size_t kNumCaptureChannels = 2;
+ constexpr int kSampleRateHz = 16000;
+ constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz);
+ SuppressionGain suppression_gain(EchoCanceller3Config(), DetectOptimization(),
+ kSampleRateHz, kNumCaptureChannels);
+ RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+ float high_bands_gain;
+ std::vector<std::array<float, kFftLengthBy2Plus1>> E2(kNumCaptureChannels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> S2(kNumCaptureChannels,
+ {0.0f});
+ std::vector<std::array<float, kFftLengthBy2Plus1>> Y2(kNumCaptureChannels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2(kNumCaptureChannels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> R2_unbounded(
+ kNumCaptureChannels);
+ std::vector<std::array<float, kFftLengthBy2Plus1>> N2(kNumCaptureChannels);
+ std::array<float, kFftLengthBy2Plus1> g;
+ std::vector<SubtractorOutput> output(kNumCaptureChannels);
+ Block x(kNumBands, kNumRenderChannels);
+ EchoCanceller3Config config;
+ AecState aec_state(config, kNumCaptureChannels);
+ ApmDataDumper data_dumper(42);
+ Subtractor subtractor(config, kNumRenderChannels, kNumCaptureChannels,
+ &data_dumper, DetectOptimization());
+ std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+ RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels));
+ absl::optional<DelayEstimate> delay_estimate;
+
+ // Ensure that a strong noise is detected to mask any echoes.
+ for (size_t ch = 0; ch < kNumCaptureChannels; ++ch) {
+ E2[ch].fill(10.f);
+ Y2[ch].fill(10.f);
+ R2[ch].fill(0.1f);
+ R2_unbounded[ch].fill(0.1f);
+ N2[ch].fill(100.0f);
+ }
+ for (auto& subtractor_output : output) {
+ subtractor_output.Reset();
+ }
+
+ // Ensure that the gain is no longer forced to zero.
+ for (int k = 0; k <= kNumBlocksPerSecond / 5 + 1; ++k) {
+ aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(),
+ subtractor.FilterImpulseResponses(),
+ *render_delay_buffer->GetRenderBuffer(), E2, Y2, output);
+ }
+
+ for (int k = 0; k < 100; ++k) {
+ aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(),
+ subtractor.FilterImpulseResponses(),
+ *render_delay_buffer->GetRenderBuffer(), E2, Y2, output);
+ suppression_gain.GetGain(E2, S2, R2, R2_unbounded, N2, analyzer, aec_state,
+ x, false, &high_bands_gain, &g);
+ }
+ std::for_each(g.begin(), g.end(),
+ [](float a) { EXPECT_NEAR(1.0f, a, 0.001f); });
+
+ // Ensure that a strong nearend is detected to mask any echoes.
+ for (size_t ch = 0; ch < kNumCaptureChannels; ++ch) {
+ E2[ch].fill(100.f);
+ Y2[ch].fill(100.f);
+ R2[ch].fill(0.1f);
+ R2_unbounded[ch].fill(0.1f);
+ S2[ch].fill(0.1f);
+ N2[ch].fill(0.f);
+ }
+
+ for (int k = 0; k < 100; ++k) {
+ aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(),
+ subtractor.FilterImpulseResponses(),
+ *render_delay_buffer->GetRenderBuffer(), E2, Y2, output);
+ suppression_gain.GetGain(E2, S2, R2, R2_unbounded, N2, analyzer, aec_state,
+ x, false, &high_bands_gain, &g);
+ }
+ std::for_each(g.begin(), g.end(),
+ [](float a) { EXPECT_NEAR(1.0f, a, 0.001f); });
+
+ // Add a strong echo to one of the channels and ensure that it is suppressed.
+ E2[1].fill(1000000000.0f);
+ R2[1].fill(10000000000000.0f);
+ R2_unbounded[1].fill(10000000000000.0f);
+
+ for (int k = 0; k < 10; ++k) {
+ suppression_gain.GetGain(E2, S2, R2, R2_unbounded, N2, analyzer, aec_state,
+ x, false, &high_bands_gain, &g);
+ }
+ std::for_each(g.begin(), g.end(),
+ [](float a) { EXPECT_NEAR(0.0f, a, 0.001f); });
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.cc b/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.cc
new file mode 100644
index 0000000000..489f53f4f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/transparent_mode.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kBlocksSinceConvergencedFilterInit = 10000;
+constexpr size_t kBlocksSinceConsistentEstimateInit = 10000;
+
+bool DeactivateTransparentMode() {
+ return field_trial::IsEnabled("WebRTC-Aec3TransparentModeKillSwitch");
+}
+
+bool ActivateTransparentModeHmm() {
+ return field_trial::IsEnabled("WebRTC-Aec3TransparentModeHmm");
+}
+
+} // namespace
+
+// Classifier that toggles transparent mode which reduces echo suppression when
+// headsets are used.
+class TransparentModeImpl : public TransparentMode {
+ public:
+ bool Active() const override { return transparency_activated_; }
+
+ void Reset() override {
+ // Determines if transparent mode is used.
+ transparency_activated_ = false;
+
+ // The estimated probability of being transparent mode.
+ prob_transparent_state_ = 0.f;
+ }
+
+ void Update(int filter_delay_blocks,
+ bool any_filter_consistent,
+ bool any_filter_converged,
+ bool any_coarse_filter_converged,
+ bool all_filters_diverged,
+ bool active_render,
+ bool saturated_capture) override {
+ // The classifier is implemented as a Hidden Markov Model (HMM) with two
+ // hidden states: "normal" and "transparent". The estimated probabilities of
+ // the two states are updated by observing filter convergence during active
+ // render. The filters are less likely to be reported as converged when
+ // there is no echo present in the microphone signal.
+
+ // The constants have been obtained by observing active_render and
+ // any_coarse_filter_converged under varying call scenarios. They
+ // have further been hand tuned to prefer normal state during uncertain
+ // regions (to avoid echo leaks).
+
+ // The model is only updated during active render.
+ if (!active_render)
+ return;
+
+ // Probability of switching from one state to the other.
+ constexpr float kSwitch = 0.000001f;
+
+ // Probability of observing converged filters in states "normal" and
+ // "transparent" during active render.
+ constexpr float kConvergedNormal = 0.01f;
+ constexpr float kConvergedTransparent = 0.001f;
+
+ // Probability of transitioning to transparent state from normal state and
+ // transparent state respectively.
+ constexpr float kA[2] = {kSwitch, 1.f - kSwitch};
+
+ // Probability of the two observations (converged filter or not converged
+ // filter) in normal state and transparent state respectively.
+ constexpr float kB[2][2] = {
+ {1.f - kConvergedNormal, kConvergedNormal},
+ {1.f - kConvergedTransparent, kConvergedTransparent}};
+
+ // Probability of the two states before the update.
+ const float prob_transparent = prob_transparent_state_;
+ const float prob_normal = 1.f - prob_transparent;
+
+ // Probability of transitioning to transparent state.
+ const float prob_transition_transparent =
+ prob_normal * kA[0] + prob_transparent * kA[1];
+ const float prob_transition_normal = 1.f - prob_transition_transparent;
+
+ // Observed output.
+ const int out = static_cast<int>(any_coarse_filter_converged);
+
+ // Joint probabilites of the observed output and respective states.
+ const float prob_joint_normal = prob_transition_normal * kB[0][out];
+ const float prob_joint_transparent =
+ prob_transition_transparent * kB[1][out];
+
+ // Conditional probability of transparent state and the observed output.
+ RTC_DCHECK_GT(prob_joint_normal + prob_joint_transparent, 0.f);
+ prob_transparent_state_ =
+ prob_joint_transparent / (prob_joint_normal + prob_joint_transparent);
+
+ // Transparent mode is only activated when its state probability is high.
+ // Dead zone between activation/deactivation thresholds to avoid switching
+ // back and forth.
+ if (prob_transparent_state_ > 0.95f) {
+ transparency_activated_ = true;
+ } else if (prob_transparent_state_ < 0.5f) {
+ transparency_activated_ = false;
+ }
+ }
+
+ private:
+ bool transparency_activated_ = false;
+ float prob_transparent_state_ = 0.f;
+};
+
+// Legacy classifier for toggling transparent mode.
+class LegacyTransparentModeImpl : public TransparentMode {
+ public:
+ explicit LegacyTransparentModeImpl(const EchoCanceller3Config& config)
+ : linear_and_stable_echo_path_(
+ config.echo_removal_control.linear_and_stable_echo_path),
+ active_blocks_since_sane_filter_(kBlocksSinceConsistentEstimateInit),
+ non_converged_sequence_size_(kBlocksSinceConvergencedFilterInit) {}
+
+ bool Active() const override { return transparency_activated_; }
+
+ void Reset() override {
+ non_converged_sequence_size_ = kBlocksSinceConvergencedFilterInit;
+ diverged_sequence_size_ = 0;
+ strong_not_saturated_render_blocks_ = 0;
+ if (linear_and_stable_echo_path_) {
+ recent_convergence_during_activity_ = false;
+ }
+ }
+
+ void Update(int filter_delay_blocks,
+ bool any_filter_consistent,
+ bool any_filter_converged,
+ bool any_coarse_filter_converged,
+ bool all_filters_diverged,
+ bool active_render,
+ bool saturated_capture) override {
+ ++capture_block_counter_;
+ strong_not_saturated_render_blocks_ +=
+ active_render && !saturated_capture ? 1 : 0;
+
+ if (any_filter_consistent && filter_delay_blocks < 5) {
+ sane_filter_observed_ = true;
+ active_blocks_since_sane_filter_ = 0;
+ } else if (active_render) {
+ ++active_blocks_since_sane_filter_;
+ }
+
+ bool sane_filter_recently_seen;
+ if (!sane_filter_observed_) {
+ sane_filter_recently_seen =
+ capture_block_counter_ <= 5 * kNumBlocksPerSecond;
+ } else {
+ sane_filter_recently_seen =
+ active_blocks_since_sane_filter_ <= 30 * kNumBlocksPerSecond;
+ }
+
+ if (any_filter_converged) {
+ recent_convergence_during_activity_ = true;
+ active_non_converged_sequence_size_ = 0;
+ non_converged_sequence_size_ = 0;
+ ++num_converged_blocks_;
+ } else {
+ if (++non_converged_sequence_size_ > 20 * kNumBlocksPerSecond) {
+ num_converged_blocks_ = 0;
+ }
+
+ if (active_render &&
+ ++active_non_converged_sequence_size_ > 60 * kNumBlocksPerSecond) {
+ recent_convergence_during_activity_ = false;
+ }
+ }
+
+ if (!all_filters_diverged) {
+ diverged_sequence_size_ = 0;
+ } else if (++diverged_sequence_size_ >= 60) {
+ // TODO(peah): Change these lines to ensure proper triggering of usable
+ // filter.
+ non_converged_sequence_size_ = kBlocksSinceConvergencedFilterInit;
+ }
+
+ if (active_non_converged_sequence_size_ > 60 * kNumBlocksPerSecond) {
+ finite_erl_recently_detected_ = false;
+ }
+ if (num_converged_blocks_ > 50) {
+ finite_erl_recently_detected_ = true;
+ }
+
+ if (finite_erl_recently_detected_) {
+ transparency_activated_ = false;
+ } else if (sane_filter_recently_seen &&
+ recent_convergence_during_activity_) {
+ transparency_activated_ = false;
+ } else {
+ const bool filter_should_have_converged =
+ strong_not_saturated_render_blocks_ > 6 * kNumBlocksPerSecond;
+ transparency_activated_ = filter_should_have_converged;
+ }
+ }
+
+ private:
+ const bool linear_and_stable_echo_path_;
+ size_t capture_block_counter_ = 0;
+ bool transparency_activated_ = false;
+ size_t active_blocks_since_sane_filter_;
+ bool sane_filter_observed_ = false;
+ bool finite_erl_recently_detected_ = false;
+ size_t non_converged_sequence_size_;
+ size_t diverged_sequence_size_ = 0;
+ size_t active_non_converged_sequence_size_ = 0;
+ size_t num_converged_blocks_ = 0;
+ bool recent_convergence_during_activity_ = false;
+ size_t strong_not_saturated_render_blocks_ = 0;
+};
+
+std::unique_ptr<TransparentMode> TransparentMode::Create(
+ const EchoCanceller3Config& config) {
+ if (config.ep_strength.bounded_erl || DeactivateTransparentMode()) {
+ RTC_LOG(LS_INFO) << "AEC3 Transparent Mode: Disabled";
+ return nullptr;
+ }
+ if (ActivateTransparentModeHmm()) {
+ RTC_LOG(LS_INFO) << "AEC3 Transparent Mode: HMM";
+ return std::make_unique<TransparentModeImpl>();
+ }
+ RTC_LOG(LS_INFO) << "AEC3 Transparent Mode: Legacy";
+ return std::make_unique<LegacyTransparentModeImpl>(config);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.h b/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.h
new file mode 100644
index 0000000000..bc5dd0391b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/transparent_mode.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_TRANSPARENT_MODE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_TRANSPARENT_MODE_H_
+
+#include <memory>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// Class for detecting and toggling the transparent mode which causes the
+// suppressor to apply less suppression.
+class TransparentMode {
+ public:
+ static std::unique_ptr<TransparentMode> Create(
+ const EchoCanceller3Config& config);
+
+ virtual ~TransparentMode() {}
+
+ // Returns whether the transparent mode should be active.
+ virtual bool Active() const = 0;
+
+ // Resets the state of the detector.
+ virtual void Reset() = 0;
+
+ // Updates the detection decision based on new data.
+ virtual void Update(int filter_delay_blocks,
+ bool any_filter_consistent,
+ bool any_filter_converged,
+ bool any_coarse_filter_converged,
+ bool all_filters_diverged,
+ bool active_render,
+ bool saturated_capture) = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_PROCESSING_AEC3_TRANSPARENT_MODE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/vector_math.h b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math.h
new file mode 100644
index 0000000000..e4d1381ae1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <math.h>
+
+#include <algorithm>
+#include <array>
+#include <functional>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace aec3 {
+
+// Provides optimizations for mathematical operations based on vectors.
+class VectorMath {
+ public:
+ explicit VectorMath(Aec3Optimization optimization)
+ : optimization_(optimization) {}
+
+ // Elementwise square root.
+ void SqrtAVX2(rtc::ArrayView<float> x);
+ void Sqrt(rtc::ArrayView<float> x) {
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2: {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 2;
+
+ int j = 0;
+ for (; j < vector_limit * 4; j += 4) {
+ __m128 g = _mm_loadu_ps(&x[j]);
+ g = _mm_sqrt_ps(g);
+ _mm_storeu_ps(&x[j], g);
+ }
+
+ for (; j < x_size; ++j) {
+ x[j] = sqrtf(x[j]);
+ }
+ } break;
+ case Aec3Optimization::kAvx2:
+ SqrtAVX2(x);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon: {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 2;
+
+ int j = 0;
+ for (; j < vector_limit * 4; j += 4) {
+ float32x4_t g = vld1q_f32(&x[j]);
+#if !defined(WEBRTC_ARCH_ARM64)
+ float32x4_t y = vrsqrteq_f32(g);
+
+ // Code to handle sqrt(0).
+ // If the input to sqrtf() is zero, a zero will be returned.
+ // If the input to vrsqrteq_f32() is zero, positive infinity is
+ // returned.
+ const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000);
+ // check for divide by zero
+ const uint32x4_t div_by_zero =
+ vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(y));
+ // zero out the positive infinity results
+ y = vreinterpretq_f32_u32(
+ vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(y)));
+ // from arm documentation
+ // The Newton-Raphson iteration:
+ // y[n+1] = y[n] * (3 - d * (y[n] * y[n])) / 2)
+ // converges to (1/√d) if y0 is the result of VRSQRTE applied to d.
+ //
+ // Note: The precision did not improve after 2 iterations.
+ for (int i = 0; i < 2; i++) {
+ y = vmulq_f32(vrsqrtsq_f32(vmulq_f32(y, y), g), y);
+ }
+ // sqrt(g) = g * 1/sqrt(g)
+ g = vmulq_f32(g, y);
+#else
+ g = vsqrtq_f32(g);
+#endif
+ vst1q_f32(&x[j], g);
+ }
+
+ for (; j < x_size; ++j) {
+ x[j] = sqrtf(x[j]);
+ }
+ }
+#endif
+ break;
+ default:
+ std::for_each(x.begin(), x.end(), [](float& a) { a = sqrtf(a); });
+ }
+ }
+
+ // Elementwise vector multiplication z = x * y.
+ void MultiplyAVX2(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> z);
+ void Multiply(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> z) {
+ RTC_DCHECK_EQ(z.size(), x.size());
+ RTC_DCHECK_EQ(z.size(), y.size());
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2: {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 2;
+
+ int j = 0;
+ for (; j < vector_limit * 4; j += 4) {
+ const __m128 x_j = _mm_loadu_ps(&x[j]);
+ const __m128 y_j = _mm_loadu_ps(&y[j]);
+ const __m128 z_j = _mm_mul_ps(x_j, y_j);
+ _mm_storeu_ps(&z[j], z_j);
+ }
+
+ for (; j < x_size; ++j) {
+ z[j] = x[j] * y[j];
+ }
+ } break;
+ case Aec3Optimization::kAvx2:
+ MultiplyAVX2(x, y, z);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon: {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 2;
+
+ int j = 0;
+ for (; j < vector_limit * 4; j += 4) {
+ const float32x4_t x_j = vld1q_f32(&x[j]);
+ const float32x4_t y_j = vld1q_f32(&y[j]);
+ const float32x4_t z_j = vmulq_f32(x_j, y_j);
+ vst1q_f32(&z[j], z_j);
+ }
+
+ for (; j < x_size; ++j) {
+ z[j] = x[j] * y[j];
+ }
+ } break;
+#endif
+ default:
+ std::transform(x.begin(), x.end(), y.begin(), z.begin(),
+ std::multiplies<float>());
+ }
+ }
+
+ // Elementwise vector accumulation z += x.
+ void AccumulateAVX2(rtc::ArrayView<const float> x, rtc::ArrayView<float> z);
+ void Accumulate(rtc::ArrayView<const float> x, rtc::ArrayView<float> z) {
+ RTC_DCHECK_EQ(z.size(), x.size());
+ switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ case Aec3Optimization::kSse2: {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 2;
+
+ int j = 0;
+ for (; j < vector_limit * 4; j += 4) {
+ const __m128 x_j = _mm_loadu_ps(&x[j]);
+ __m128 z_j = _mm_loadu_ps(&z[j]);
+ z_j = _mm_add_ps(x_j, z_j);
+ _mm_storeu_ps(&z[j], z_j);
+ }
+
+ for (; j < x_size; ++j) {
+ z[j] += x[j];
+ }
+ } break;
+ case Aec3Optimization::kAvx2:
+ AccumulateAVX2(x, z);
+ break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+ case Aec3Optimization::kNeon: {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 2;
+
+ int j = 0;
+ for (; j < vector_limit * 4; j += 4) {
+ const float32x4_t x_j = vld1q_f32(&x[j]);
+ float32x4_t z_j = vld1q_f32(&z[j]);
+ z_j = vaddq_f32(z_j, x_j);
+ vst1q_f32(&z[j], z_j);
+ }
+
+ for (; j < x_size; ++j) {
+ z[j] += x[j];
+ }
+ } break;
+#endif
+ default:
+ std::transform(x.begin(), x.end(), z.begin(), z.begin(),
+ std::plus<float>());
+ }
+ }
+
+ private:
+ Aec3Optimization optimization_;
+};
+
+} // namespace aec3
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_avx2.cc b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_avx2.cc
new file mode 100644
index 0000000000..0b5f3c142e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_avx2.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/vector_math.h"
+
+#include <immintrin.h>
+#include <math.h>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace aec3 {
+
+// Elementwise square root.
+void VectorMath::SqrtAVX2(rtc::ArrayView<float> x) {
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 3;
+
+ int j = 0;
+ for (; j < vector_limit * 8; j += 8) {
+ __m256 g = _mm256_loadu_ps(&x[j]);
+ g = _mm256_sqrt_ps(g);
+ _mm256_storeu_ps(&x[j], g);
+ }
+
+ for (; j < x_size; ++j) {
+ x[j] = sqrtf(x[j]);
+ }
+}
+
+// Elementwise vector multiplication z = x * y.
+void VectorMath::MultiplyAVX2(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float> z) {
+ RTC_DCHECK_EQ(z.size(), x.size());
+ RTC_DCHECK_EQ(z.size(), y.size());
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 3;
+
+ int j = 0;
+ for (; j < vector_limit * 8; j += 8) {
+ const __m256 x_j = _mm256_loadu_ps(&x[j]);
+ const __m256 y_j = _mm256_loadu_ps(&y[j]);
+ const __m256 z_j = _mm256_mul_ps(x_j, y_j);
+ _mm256_storeu_ps(&z[j], z_j);
+ }
+
+ for (; j < x_size; ++j) {
+ z[j] = x[j] * y[j];
+ }
+}
+
+// Elementwise vector accumulation z += x.
+void VectorMath::AccumulateAVX2(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> z) {
+ RTC_DCHECK_EQ(z.size(), x.size());
+ const int x_size = static_cast<int>(x.size());
+ const int vector_limit = x_size >> 3;
+
+ int j = 0;
+ for (; j < vector_limit * 8; j += 8) {
+ const __m256 x_j = _mm256_loadu_ps(&x[j]);
+ __m256 z_j = _mm256_loadu_ps(&z[j]);
+ z_j = _mm256_add_ps(x_j, z_j);
+ _mm256_storeu_ps(&z[j], z_j);
+ }
+
+ for (; j < x_size; ++j) {
+ z[j] += x[j];
+ }
+}
+
+} // namespace aec3
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_gn/moz.build
new file mode 100644
index 0000000000..873539f6ec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("vector_math_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_unittest.cc
new file mode 100644
index 0000000000..a9c37e33cf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec3/vector_math_unittest.cc
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/vector_math.h"
+
+#include <math.h>
+
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_HAS_NEON)
+
+TEST(VectorMath, Sqrt) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_neon;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = (2.f / 3.f) * k;
+ }
+
+ std::copy(x.begin(), x.end(), z.begin());
+ aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z);
+ std::copy(x.begin(), x.end(), z_neon.begin());
+ aec3::VectorMath(Aec3Optimization::kNeon).Sqrt(z_neon);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_NEAR(z[k], z_neon[k], 0.0001f);
+ EXPECT_NEAR(sqrtf(x[k]), z_neon[k], 0.0001f);
+ }
+}
+
+TEST(VectorMath, Multiply) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> y;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_neon;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = k;
+ y[k] = (2.f / 3.f) * k;
+ }
+
+ aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z);
+ aec3::VectorMath(Aec3Optimization::kNeon).Multiply(x, y, z_neon);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_neon[k]);
+ EXPECT_FLOAT_EQ(x[k] * y[k], z_neon[k]);
+ }
+}
+
+TEST(VectorMath, Accumulate) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_neon;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = k;
+ z[k] = z_neon[k] = 2.f * k;
+ }
+
+ aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z);
+ aec3::VectorMath(Aec3Optimization::kNeon).Accumulate(x, z_neon);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_neon[k]);
+ EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_neon[k]);
+ }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+TEST(VectorMath, Sse2Sqrt) {
+ if (GetCPUInfo(kSSE2) != 0) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_sse2;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = (2.f / 3.f) * k;
+ }
+
+ std::copy(x.begin(), x.end(), z.begin());
+ aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z);
+ std::copy(x.begin(), x.end(), z_sse2.begin());
+ aec3::VectorMath(Aec3Optimization::kSse2).Sqrt(z_sse2);
+ EXPECT_EQ(z, z_sse2);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_sse2[k]);
+ EXPECT_FLOAT_EQ(sqrtf(x[k]), z_sse2[k]);
+ }
+ }
+}
+
+TEST(VectorMath, Avx2Sqrt) {
+ if (GetCPUInfo(kAVX2) != 0) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_avx2;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = (2.f / 3.f) * k;
+ }
+
+ std::copy(x.begin(), x.end(), z.begin());
+ aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z);
+ std::copy(x.begin(), x.end(), z_avx2.begin());
+ aec3::VectorMath(Aec3Optimization::kAvx2).Sqrt(z_avx2);
+ EXPECT_EQ(z, z_avx2);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_avx2[k]);
+ EXPECT_FLOAT_EQ(sqrtf(x[k]), z_avx2[k]);
+ }
+ }
+}
+
+TEST(VectorMath, Sse2Multiply) {
+ if (GetCPUInfo(kSSE2) != 0) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> y;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_sse2;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = k;
+ y[k] = (2.f / 3.f) * k;
+ }
+
+ aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z);
+ aec3::VectorMath(Aec3Optimization::kSse2).Multiply(x, y, z_sse2);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_sse2[k]);
+ EXPECT_FLOAT_EQ(x[k] * y[k], z_sse2[k]);
+ }
+ }
+}
+
+TEST(VectorMath, Avx2Multiply) {
+ if (GetCPUInfo(kAVX2) != 0) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> y;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_avx2;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = k;
+ y[k] = (2.f / 3.f) * k;
+ }
+
+ aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z);
+ aec3::VectorMath(Aec3Optimization::kAvx2).Multiply(x, y, z_avx2);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_avx2[k]);
+ EXPECT_FLOAT_EQ(x[k] * y[k], z_avx2[k]);
+ }
+ }
+}
+
+TEST(VectorMath, Sse2Accumulate) {
+ if (GetCPUInfo(kSSE2) != 0) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_sse2;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = k;
+ z[k] = z_sse2[k] = 2.f * k;
+ }
+
+ aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z);
+ aec3::VectorMath(Aec3Optimization::kSse2).Accumulate(x, z_sse2);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_sse2[k]);
+ EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_sse2[k]);
+ }
+ }
+}
+
+TEST(VectorMath, Avx2Accumulate) {
+ if (GetCPUInfo(kAVX2) != 0) {
+ std::array<float, kFftLengthBy2Plus1> x;
+ std::array<float, kFftLengthBy2Plus1> z;
+ std::array<float, kFftLengthBy2Plus1> z_avx2;
+
+ for (size_t k = 0; k < x.size(); ++k) {
+ x[k] = k;
+ z[k] = z_avx2[k] = 2.f * k;
+ }
+
+ aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z);
+ aec3::VectorMath(Aec3Optimization::kAvx2).Accumulate(x, z_avx2);
+ for (size_t k = 0; k < z.size(); ++k) {
+ EXPECT_FLOAT_EQ(z[k], z_avx2[k]);
+ EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_avx2[k]);
+ }
+ }
+}
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/aec_dump/BUILD.gn
new file mode 100644
index 0000000000..38d8776258
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/BUILD.gn
@@ -0,0 +1,112 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni") # This contains def of 'rtc_enable_protobuf'
+
+rtc_source_set("aec_dump") {
+ visibility = [ "*" ]
+ sources = [ "aec_dump_factory.h" ]
+
+ deps = [
+ "..:aec_dump_interface",
+ "../../../rtc_base/system:file_wrapper",
+ "../../../rtc_base/system:rtc_export",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("mock_aec_dump") {
+ testonly = true
+ sources = [
+ "mock_aec_dump.cc",
+ "mock_aec_dump.h",
+ ]
+
+ deps = [
+ "..:aec_dump_interface",
+ "..:audioproc_test_utils",
+ "../",
+ "../../../test:test_support",
+ ]
+ }
+
+ rtc_library("mock_aec_dump_unittests") {
+ testonly = true
+ configs += [ "..:apm_debug_dump" ]
+ sources = [ "aec_dump_integration_test.cc" ]
+
+ deps = [
+ ":mock_aec_dump",
+ "..:api",
+ "..:audioproc_test_utils",
+ "../",
+ "//testing/gtest",
+ ]
+ }
+}
+
+if (rtc_enable_protobuf) {
+ rtc_library("aec_dump_impl") {
+ sources = [
+ "aec_dump_impl.cc",
+ "aec_dump_impl.h",
+ "capture_stream_info.cc",
+ "capture_stream_info.h",
+ ]
+
+ deps = [
+ ":aec_dump",
+ "..:aec_dump_interface",
+ "../../../api/audio:audio_frame_api",
+ "../../../api/task_queue",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:ignore_wundef",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:protobuf_utils",
+ "../../../rtc_base:race_checker",
+ "../../../rtc_base:rtc_event",
+ "../../../rtc_base:rtc_task_queue",
+ "../../../rtc_base/system:file_wrapper",
+ "../../../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+
+ deps += [ "../:audioproc_debug_proto" ]
+ }
+
+ if (rtc_include_tests) {
+ rtc_library("aec_dump_unittests") {
+ testonly = true
+ defines = []
+ deps = [
+ ":aec_dump",
+ ":aec_dump_impl",
+ "..:audioproc_debug_proto",
+ "../",
+ "../../../rtc_base:task_queue_for_test",
+ "../../../test:fileutils",
+ "../../../test:test_support",
+ "//testing/gtest",
+ ]
+ sources = [ "aec_dump_unittest.cc" ]
+ }
+ }
+}
+
+rtc_library("null_aec_dump_factory") {
+ assert_no_deps = [ ":aec_dump_impl" ]
+ sources = [ "null_aec_dump_factory.cc" ]
+
+ deps = [
+ ":aec_dump",
+ "..:aec_dump_interface",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_factory.h b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_factory.h
new file mode 100644
index 0000000000..20718c3d7f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_factory.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_FACTORY_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_FACTORY_H_
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace rtc {
+class TaskQueue;
+} // namespace rtc
+
+namespace webrtc {
+
+class RTC_EXPORT AecDumpFactory {
+ public:
+ // The `worker_queue` may not be null and must outlive the created
+ // AecDump instance. `max_log_size_bytes == -1` means the log size
+ // will be unlimited. `handle` may not be null. The AecDump takes
+ // responsibility for `handle` and closes it in the destructor. A
+ // non-null return value indicates that the file has been
+ // sucessfully opened.
+ static std::unique_ptr<AecDump> Create(webrtc::FileWrapper file,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue);
+ static std::unique_ptr<AecDump> Create(absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue);
+ static std::unique_ptr<AecDump> Create(FILE* handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_gn/moz.build
new file mode 100644
index 0000000000..a16f58d5d0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aec_dump_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.cc b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.cc
new file mode 100644
index 0000000000..94c24048e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.cc
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec_dump/aec_dump_impl.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue.h"
+
+namespace webrtc {
+
+namespace {
+void CopyFromConfigToEvent(const webrtc::InternalAPMConfig& config,
+ webrtc::audioproc::Config* pb_cfg) {
+ pb_cfg->set_aec_enabled(config.aec_enabled);
+ pb_cfg->set_aec_delay_agnostic_enabled(config.aec_delay_agnostic_enabled);
+ pb_cfg->set_aec_drift_compensation_enabled(
+ config.aec_drift_compensation_enabled);
+ pb_cfg->set_aec_extended_filter_enabled(config.aec_extended_filter_enabled);
+ pb_cfg->set_aec_suppression_level(config.aec_suppression_level);
+
+ pb_cfg->set_aecm_enabled(config.aecm_enabled);
+ pb_cfg->set_aecm_comfort_noise_enabled(config.aecm_comfort_noise_enabled);
+ pb_cfg->set_aecm_routing_mode(config.aecm_routing_mode);
+
+ pb_cfg->set_agc_enabled(config.agc_enabled);
+ pb_cfg->set_agc_mode(config.agc_mode);
+ pb_cfg->set_agc_limiter_enabled(config.agc_limiter_enabled);
+ pb_cfg->set_noise_robust_agc_enabled(config.noise_robust_agc_enabled);
+
+ pb_cfg->set_hpf_enabled(config.hpf_enabled);
+
+ pb_cfg->set_ns_enabled(config.ns_enabled);
+ pb_cfg->set_ns_level(config.ns_level);
+
+ pb_cfg->set_transient_suppression_enabled(
+ config.transient_suppression_enabled);
+
+ pb_cfg->set_pre_amplifier_enabled(config.pre_amplifier_enabled);
+ pb_cfg->set_pre_amplifier_fixed_gain_factor(
+ config.pre_amplifier_fixed_gain_factor);
+
+ pb_cfg->set_experiments_description(config.experiments_description);
+}
+
+} // namespace
+
+AecDumpImpl::AecDumpImpl(FileWrapper debug_file,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue)
+ : debug_file_(std::move(debug_file)),
+ num_bytes_left_for_log_(max_log_size_bytes),
+ worker_queue_(worker_queue) {}
+
+AecDumpImpl::~AecDumpImpl() {
+ // Block until all tasks have finished running.
+ rtc::Event thread_sync_event;
+ worker_queue_->PostTask([&thread_sync_event] { thread_sync_event.Set(); });
+ // Wait until the event has been signaled with .Set(). By then all
+ // pending tasks will have finished.
+ thread_sync_event.Wait(rtc::Event::kForever);
+}
+
+void AecDumpImpl::WriteInitMessage(const ProcessingConfig& api_format,
+ int64_t time_now_ms) {
+ auto event = std::make_unique<audioproc::Event>();
+ event->set_type(audioproc::Event::INIT);
+ audioproc::Init* msg = event->mutable_init();
+
+ msg->set_sample_rate(api_format.input_stream().sample_rate_hz());
+ msg->set_output_sample_rate(api_format.output_stream().sample_rate_hz());
+ msg->set_reverse_sample_rate(
+ api_format.reverse_input_stream().sample_rate_hz());
+ msg->set_reverse_output_sample_rate(
+ api_format.reverse_output_stream().sample_rate_hz());
+
+ msg->set_num_input_channels(
+ static_cast<int32_t>(api_format.input_stream().num_channels()));
+ msg->set_num_output_channels(
+ static_cast<int32_t>(api_format.output_stream().num_channels()));
+ msg->set_num_reverse_channels(
+ static_cast<int32_t>(api_format.reverse_input_stream().num_channels()));
+ msg->set_num_reverse_output_channels(
+ api_format.reverse_output_stream().num_channels());
+ msg->set_timestamp_ms(time_now_ms);
+
+ PostWriteToFileTask(std::move(event));
+}
+
+void AecDumpImpl::AddCaptureStreamInput(
+ const AudioFrameView<const float>& src) {
+ capture_stream_info_.AddInput(src);
+}
+
+void AecDumpImpl::AddCaptureStreamOutput(
+ const AudioFrameView<const float>& src) {
+ capture_stream_info_.AddOutput(src);
+}
+
+void AecDumpImpl::AddCaptureStreamInput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) {
+ capture_stream_info_.AddInput(data, num_channels, samples_per_channel);
+}
+
+void AecDumpImpl::AddCaptureStreamOutput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) {
+ capture_stream_info_.AddOutput(data, num_channels, samples_per_channel);
+}
+
+void AecDumpImpl::AddAudioProcessingState(const AudioProcessingState& state) {
+ capture_stream_info_.AddAudioProcessingState(state);
+}
+
+void AecDumpImpl::WriteCaptureStreamMessage() {
+ PostWriteToFileTask(capture_stream_info_.FetchEvent());
+}
+
+void AecDumpImpl::WriteRenderStreamMessage(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) {
+ auto event = std::make_unique<audioproc::Event>();
+ event->set_type(audioproc::Event::REVERSE_STREAM);
+ audioproc::ReverseStream* msg = event->mutable_reverse_stream();
+ const size_t data_size = sizeof(int16_t) * samples_per_channel * num_channels;
+ msg->set_data(data, data_size);
+
+ PostWriteToFileTask(std::move(event));
+}
+
+void AecDumpImpl::WriteRenderStreamMessage(
+ const AudioFrameView<const float>& src) {
+ auto event = std::make_unique<audioproc::Event>();
+ event->set_type(audioproc::Event::REVERSE_STREAM);
+
+ audioproc::ReverseStream* msg = event->mutable_reverse_stream();
+
+ for (int i = 0; i < src.num_channels(); ++i) {
+ const auto& channel_view = src.channel(i);
+ msg->add_channel(channel_view.begin(), sizeof(float) * channel_view.size());
+ }
+
+ PostWriteToFileTask(std::move(event));
+}
+
+void AecDumpImpl::WriteConfig(const InternalAPMConfig& config) {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ auto event = std::make_unique<audioproc::Event>();
+ event->set_type(audioproc::Event::CONFIG);
+ CopyFromConfigToEvent(config, event->mutable_config());
+ PostWriteToFileTask(std::move(event));
+}
+
+void AecDumpImpl::WriteRuntimeSetting(
+ const AudioProcessing::RuntimeSetting& runtime_setting) {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ auto event = std::make_unique<audioproc::Event>();
+ event->set_type(audioproc::Event::RUNTIME_SETTING);
+ audioproc::RuntimeSetting* setting = event->mutable_runtime_setting();
+ switch (runtime_setting.type()) {
+ case AudioProcessing::RuntimeSetting::Type::kCapturePreGain: {
+ float x;
+ runtime_setting.GetFloat(&x);
+ setting->set_capture_pre_gain(x);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::kCapturePostGain: {
+ float x;
+ runtime_setting.GetFloat(&x);
+ setting->set_capture_post_gain(x);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::
+ kCustomRenderProcessingRuntimeSetting: {
+ float x;
+ runtime_setting.GetFloat(&x);
+ setting->set_custom_render_processing_setting(x);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::kCaptureCompressionGain:
+ // Runtime AGC1 compression gain is ignored.
+ // TODO(http://bugs.webrtc.org/10432): Store compression gain in aecdumps.
+ break;
+ case AudioProcessing::RuntimeSetting::Type::kCaptureFixedPostGain: {
+ float x;
+ runtime_setting.GetFloat(&x);
+ setting->set_capture_fixed_post_gain(x);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::kCaptureOutputUsed: {
+ bool x;
+ runtime_setting.GetBool(&x);
+ setting->set_capture_output_used(x);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::kPlayoutVolumeChange: {
+ int x;
+ runtime_setting.GetInt(&x);
+ setting->set_playout_volume_change(x);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::kPlayoutAudioDeviceChange: {
+ AudioProcessing::RuntimeSetting::PlayoutAudioDeviceInfo src;
+ runtime_setting.GetPlayoutAudioDeviceInfo(&src);
+ auto* dst = setting->mutable_playout_audio_device_change();
+ dst->set_id(src.id);
+ dst->set_max_volume(src.max_volume);
+ break;
+ }
+ case AudioProcessing::RuntimeSetting::Type::kNotSpecified:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ PostWriteToFileTask(std::move(event));
+}
+
+void AecDumpImpl::PostWriteToFileTask(std::unique_ptr<audioproc::Event> event) {
+ RTC_DCHECK(event);
+ worker_queue_->PostTask([event = std::move(event), this] {
+ std::string event_string = event->SerializeAsString();
+ const size_t event_byte_size = event_string.size();
+
+ if (num_bytes_left_for_log_ >= 0) {
+ const int64_t next_message_size = sizeof(int32_t) + event_byte_size;
+ if (num_bytes_left_for_log_ < next_message_size) {
+ // Ensure that no further events are written, even if they're smaller
+ // than the current event.
+ num_bytes_left_for_log_ = 0;
+ return;
+ }
+ num_bytes_left_for_log_ -= next_message_size;
+ }
+
+ // Write message preceded by its size.
+ if (!debug_file_.Write(&event_byte_size, sizeof(int32_t))) {
+ RTC_DCHECK_NOTREACHED();
+ }
+ if (!debug_file_.Write(event_string.data(), event_string.size())) {
+ RTC_DCHECK_NOTREACHED();
+ }
+ });
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(webrtc::FileWrapper file,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ RTC_DCHECK(worker_queue);
+ if (!file.is_open())
+ return nullptr;
+
+ return std::make_unique<AecDumpImpl>(std::move(file), max_log_size_bytes,
+ worker_queue);
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ return Create(FileWrapper::OpenWriteOnly(file_name), max_log_size_bytes,
+ worker_queue);
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(FILE* handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ return Create(FileWrapper(handle), max_log_size_bytes, worker_queue);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.h b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.h
new file mode 100644
index 0000000000..fac3712b7a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_impl.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_IMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec_dump/capture_stream_info.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+
+// Files generated at build-time by the protobuf compiler.
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "modules/audio_processing/debug.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+
+// Task-queue based implementation of AecDump. It is thread safe by
+// relying on locks in TaskQueue.
+class AecDumpImpl : public AecDump {
+ public:
+ // `max_log_size_bytes` - maximum number of bytes to write to the debug file,
+ // `max_log_size_bytes == -1` means the log size will be unlimited.
+ AecDumpImpl(FileWrapper debug_file,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue);
+ AecDumpImpl(const AecDumpImpl&) = delete;
+ AecDumpImpl& operator=(const AecDumpImpl&) = delete;
+ ~AecDumpImpl() override;
+
+ void WriteInitMessage(const ProcessingConfig& api_format,
+ int64_t time_now_ms) override;
+ void AddCaptureStreamInput(const AudioFrameView<const float>& src) override;
+ void AddCaptureStreamOutput(const AudioFrameView<const float>& src) override;
+ void AddCaptureStreamInput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) override;
+ void AddCaptureStreamOutput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) override;
+ void AddAudioProcessingState(const AudioProcessingState& state) override;
+ void WriteCaptureStreamMessage() override;
+
+ void WriteRenderStreamMessage(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) override;
+ void WriteRenderStreamMessage(
+ const AudioFrameView<const float>& src) override;
+
+ void WriteConfig(const InternalAPMConfig& config) override;
+
+ void WriteRuntimeSetting(
+ const AudioProcessing::RuntimeSetting& runtime_setting) override;
+
+ private:
+ void PostWriteToFileTask(std::unique_ptr<audioproc::Event> event);
+
+ FileWrapper debug_file_;
+ int64_t num_bytes_left_for_log_ = 0;
+ rtc::RaceChecker race_checker_;
+ rtc::TaskQueue* worker_queue_;
+ CaptureStreamInfo capture_stream_info_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_integration_test.cc b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
new file mode 100644
index 0000000000..503135d87f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <memory>
+#include <utility>
+
+#include "modules/audio_processing/aec_dump/mock_aec_dump.h"
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Exactly;
+using ::testing::StrictMock;
+
+namespace {
+rtc::scoped_refptr<webrtc::AudioProcessing> CreateAudioProcessing() {
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm(
+ webrtc::AudioProcessingBuilderForTesting().Create());
+ RTC_DCHECK(apm);
+ return apm;
+}
+
+std::unique_ptr<webrtc::test::MockAecDump> CreateMockAecDump() {
+ auto mock_aec_dump =
+ std::make_unique<testing::StrictMock<webrtc::test::MockAecDump>>();
+ EXPECT_CALL(*mock_aec_dump.get(), WriteConfig(_)).Times(AtLeast(1));
+ EXPECT_CALL(*mock_aec_dump.get(), WriteInitMessage(_, _)).Times(AtLeast(1));
+ return std::unique_ptr<webrtc::test::MockAecDump>(std::move(mock_aec_dump));
+}
+
+} // namespace
+
+TEST(AecDumpIntegration, ConfigurationAndInitShouldBeLogged) {
+ auto apm = CreateAudioProcessing();
+
+ apm->AttachAecDump(CreateMockAecDump());
+}
+
+TEST(AecDumpIntegration,
+ RenderStreamShouldBeLoggedOnceEveryProcessReverseStream) {
+ auto apm = CreateAudioProcessing();
+ auto mock_aec_dump = CreateMockAecDump();
+ constexpr int kNumChannels = 1;
+ constexpr int kNumSampleRateHz = 16000;
+ constexpr int kNumSamplesPerChannel = kNumSampleRateHz / 100;
+ std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
+ frame.fill(0.f);
+ webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels);
+
+ EXPECT_CALL(*mock_aec_dump.get(), WriteRenderStreamMessage(_, _, _))
+ .Times(Exactly(1));
+
+ apm->AttachAecDump(std::move(mock_aec_dump));
+ apm->ProcessReverseStream(frame.data(), stream_config, stream_config,
+ frame.data());
+}
+
+TEST(AecDumpIntegration, CaptureStreamShouldBeLoggedOnceEveryProcessStream) {
+ auto apm = CreateAudioProcessing();
+ auto mock_aec_dump = CreateMockAecDump();
+ constexpr int kNumChannels = 1;
+ constexpr int kNumSampleRateHz = 16000;
+ constexpr int kNumSamplesPerChannel = kNumSampleRateHz / 100;
+ std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
+ frame.fill(0.f);
+
+ webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels);
+
+ EXPECT_CALL(*mock_aec_dump.get(), AddCaptureStreamInput(_, _, _))
+ .Times(AtLeast(1));
+
+ EXPECT_CALL(*mock_aec_dump.get(), AddCaptureStreamOutput(_, _, _))
+ .Times(Exactly(1));
+
+ EXPECT_CALL(*mock_aec_dump.get(), AddAudioProcessingState(_))
+ .Times(Exactly(1));
+
+ EXPECT_CALL(*mock_aec_dump.get(), WriteCaptureStreamMessage())
+ .Times(Exactly(1));
+
+ apm->AttachAecDump(std::move(mock_aec_dump));
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_unittest.cc b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_unittest.cc
new file mode 100644
index 0000000000..62f896fe14
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/aec_dump_unittest.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <utility>
+
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(AecDumper, APICallsDoNotCrash) {
+ // Note order of initialization: Task queue has to be initialized
+ // before AecDump.
+ webrtc::TaskQueueForTest file_writer_queue("file_writer_queue");
+
+ const std::string filename =
+ webrtc::test::TempFilename(webrtc::test::OutputPath(), "aec_dump");
+
+ {
+ std::unique_ptr<webrtc::AecDump> aec_dump =
+ webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue);
+
+ constexpr int kNumChannels = 1;
+ constexpr int kNumSamplesPerChannel = 160;
+ std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
+ frame.fill(0.f);
+ aec_dump->WriteRenderStreamMessage(frame.data(), kNumChannels,
+ kNumSamplesPerChannel);
+
+ aec_dump->AddCaptureStreamInput(frame.data(), kNumChannels,
+ kNumSamplesPerChannel);
+ aec_dump->AddCaptureStreamOutput(frame.data(), kNumChannels,
+ kNumSamplesPerChannel);
+
+ aec_dump->WriteCaptureStreamMessage();
+
+ webrtc::InternalAPMConfig apm_config;
+ aec_dump->WriteConfig(apm_config);
+
+ webrtc::ProcessingConfig api_format;
+ constexpr int64_t kTimeNowMs = 123456789ll;
+ aec_dump->WriteInitMessage(api_format, kTimeNowMs);
+ }
+ // Remove file after the AecDump d-tor has finished.
+ ASSERT_EQ(0, remove(filename.c_str()));
+}
+
+TEST(AecDumper, WriteToFile) {
+ webrtc::TaskQueueForTest file_writer_queue("file_writer_queue");
+
+ const std::string filename =
+ webrtc::test::TempFilename(webrtc::test::OutputPath(), "aec_dump");
+
+ {
+ std::unique_ptr<webrtc::AecDump> aec_dump =
+ webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue);
+
+ constexpr int kNumChannels = 1;
+ constexpr int kNumSamplesPerChannel = 160;
+ std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
+ frame.fill(0.f);
+
+ aec_dump->WriteRenderStreamMessage(frame.data(), kNumChannels,
+ kNumSamplesPerChannel);
+ }
+
+ // Verify the file has been written after the AecDump d-tor has
+ // finished.
+ FILE* fid = fopen(filename.c_str(), "r");
+ ASSERT_TRUE(fid != NULL);
+
+ // Clean it up.
+ ASSERT_EQ(0, fclose(fid));
+ ASSERT_EQ(0, remove(filename.c_str()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.cc b/third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.cc
new file mode 100644
index 0000000000..7a1ee8bc4a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec_dump/capture_stream_info.h"
+
+namespace webrtc {
+
+void CaptureStreamInfo::AddInput(const AudioFrameView<const float>& src) {
+ auto* stream = event_->mutable_stream();
+
+ for (int i = 0; i < src.num_channels(); ++i) {
+ const auto& channel_view = src.channel(i);
+ stream->add_input_channel(channel_view.begin(),
+ sizeof(float) * channel_view.size());
+ }
+}
+
+void CaptureStreamInfo::AddOutput(const AudioFrameView<const float>& src) {
+ auto* stream = event_->mutable_stream();
+
+ for (int i = 0; i < src.num_channels(); ++i) {
+ const auto& channel_view = src.channel(i);
+ stream->add_output_channel(channel_view.begin(),
+ sizeof(float) * channel_view.size());
+ }
+}
+
+void CaptureStreamInfo::AddInput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) {
+ auto* stream = event_->mutable_stream();
+ const size_t data_size = sizeof(int16_t) * samples_per_channel * num_channels;
+ stream->set_input_data(data, data_size);
+}
+
+void CaptureStreamInfo::AddOutput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) {
+ auto* stream = event_->mutable_stream();
+ const size_t data_size = sizeof(int16_t) * samples_per_channel * num_channels;
+ stream->set_output_data(data, data_size);
+}
+
+void CaptureStreamInfo::AddAudioProcessingState(
+ const AecDump::AudioProcessingState& state) {
+ auto* stream = event_->mutable_stream();
+ stream->set_delay(state.delay);
+ stream->set_drift(state.drift);
+ stream->set_level(state.level);
+ stream->set_keypress(state.keypress);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.h b/third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.h
new file mode 100644
index 0000000000..0819bbcb23
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/capture_stream_info.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_CAPTURE_STREAM_INFO_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_CAPTURE_STREAM_INFO_H_
+
+#include <memory>
+#include <utility>
+
+#include "modules/audio_processing/include/aec_dump.h"
+#include "rtc_base/ignore_wundef.h"
+
+// Files generated at build-time by the protobuf compiler.
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "modules/audio_processing/debug.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+
+class CaptureStreamInfo {
+ public:
+ CaptureStreamInfo() { CreateNewEvent(); }
+ CaptureStreamInfo(const CaptureStreamInfo&) = delete;
+ CaptureStreamInfo& operator=(const CaptureStreamInfo&) = delete;
+ ~CaptureStreamInfo() = default;
+
+ void AddInput(const AudioFrameView<const float>& src);
+ void AddOutput(const AudioFrameView<const float>& src);
+
+ void AddInput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel);
+ void AddOutput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel);
+
+ void AddAudioProcessingState(const AecDump::AudioProcessingState& state);
+
+ std::unique_ptr<audioproc::Event> FetchEvent() {
+ std::unique_ptr<audioproc::Event> result = std::move(event_);
+ CreateNewEvent();
+ return result;
+ }
+
+ private:
+ void CreateNewEvent() {
+ event_ = std::make_unique<audioproc::Event>();
+ event_->set_type(audioproc::Event::STREAM);
+ }
+ std::unique_ptr<audioproc::Event> event_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC_DUMP_CAPTURE_STREAM_INFO_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.cc b/third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.cc
new file mode 100644
index 0000000000..fe35d81db9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.cc
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec_dump/mock_aec_dump.h"
+
+namespace webrtc {
+
+namespace test {
+
+MockAecDump::MockAecDump() = default;
+MockAecDump::~MockAecDump() = default;
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.h b/third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.h
new file mode 100644
index 0000000000..b396739de4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/mock_aec_dump.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_MOCK_AEC_DUMP_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_MOCK_AEC_DUMP_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/aec_dump.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+namespace test {
+
+class MockAecDump : public AecDump {
+ public:
+ MockAecDump();
+ virtual ~MockAecDump();
+
+ MOCK_METHOD(void,
+ WriteInitMessage,
+ (const ProcessingConfig& api_format, int64_t time_now_ms),
+ (override));
+
+ MOCK_METHOD(void,
+ AddCaptureStreamInput,
+ (const AudioFrameView<const float>& src),
+ (override));
+ MOCK_METHOD(void,
+ AddCaptureStreamOutput,
+ (const AudioFrameView<const float>& src),
+ (override));
+ MOCK_METHOD(void,
+ AddCaptureStreamInput,
+ (const int16_t* const data,
+ int num_channels,
+ int samples_per_channel),
+ (override));
+ MOCK_METHOD(void,
+ AddCaptureStreamOutput,
+ (const int16_t* const data,
+ int num_channels,
+ int samples_per_channel),
+ (override));
+ MOCK_METHOD(void,
+ AddAudioProcessingState,
+ (const AudioProcessingState& state),
+ (override));
+ MOCK_METHOD(void, WriteCaptureStreamMessage, (), (override));
+
+ MOCK_METHOD(void,
+ WriteRenderStreamMessage,
+ (const int16_t* const data,
+ int num_channels,
+ int samples_per_channel),
+ (override));
+ MOCK_METHOD(void,
+ WriteRenderStreamMessage,
+ (const AudioFrameView<const float>& src),
+ (override));
+
+ MOCK_METHOD(void, WriteConfig, (const InternalAPMConfig& config), (override));
+
+ MOCK_METHOD(void,
+ WriteRuntimeSetting,
+ (const AudioProcessing::RuntimeSetting& config),
+ (override));
+};
+
+} // namespace test
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AEC_DUMP_MOCK_AEC_DUMP_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory.cc b/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory.cc
new file mode 100644
index 0000000000..9bd9745069
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/include/aec_dump.h"
+
+namespace webrtc {
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(webrtc::FileWrapper file,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ return nullptr;
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ return nullptr;
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(FILE* handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ return nullptr;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory_gn/moz.build
new file mode 100644
index 0000000000..ff1c21af1f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/aec_dump/null_aec_dump_factory.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("null_aec_dump_factory_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aec_dump_interface_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aec_dump_interface_gn/moz.build
new file mode 100644
index 0000000000..96883bf5ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aec_dump_interface_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/include/aec_dump.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aec_dump_interface_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/aecm/BUILD.gn
new file mode 100644
index 0000000000..a77f04aba5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("aecm_core") {
+ sources = [
+ "aecm_core.cc",
+ "aecm_core.h",
+ "aecm_defines.h",
+ "echo_control_mobile.cc",
+ "echo_control_mobile.h",
+ ]
+ deps = [
+ "../../../common_audio:common_audio_c",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base:sanitizer",
+ "../../../system_wrappers",
+ "../utility:legacy_delay_estimator",
+ ]
+ cflags = []
+
+ if (rtc_build_with_neon) {
+ sources += [ "aecm_core_neon.cc" ]
+
+ if (target_cpu != "arm64") {
+ # Enable compilation for the NEON instruction set.
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags += [ "-mfpu=neon" ]
+ }
+ }
+
+ if (target_cpu == "mipsel") {
+ sources += [ "aecm_core_mips.cc" ]
+ } else {
+ sources += [ "aecm_core_c.cc" ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.cc b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.cc
new file mode 100644
index 0000000000..fbc3239732
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.cc
@@ -0,0 +1,1125 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/real_fft.h"
+}
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+#ifdef AEC_DEBUG
+FILE* dfile;
+FILE* testfile;
+#endif
+
+// Initialization table for echo channel in 8 kHz
+static const int16_t kChannelStored8kHz[PART_LEN1] = {
+ 2040, 1815, 1590, 1498, 1405, 1395, 1385, 1418, 1451, 1506, 1562,
+ 1644, 1726, 1804, 1882, 1918, 1953, 1982, 2010, 2025, 2040, 2034,
+ 2027, 2021, 2014, 1997, 1980, 1925, 1869, 1800, 1732, 1683, 1635,
+ 1604, 1572, 1545, 1517, 1481, 1444, 1405, 1367, 1331, 1294, 1270,
+ 1245, 1239, 1233, 1247, 1260, 1282, 1303, 1338, 1373, 1407, 1441,
+ 1470, 1499, 1524, 1549, 1565, 1582, 1601, 1621, 1649, 1676};
+
+// Initialization table for echo channel in 16 kHz
+static const int16_t kChannelStored16kHz[PART_LEN1] = {
+ 2040, 1590, 1405, 1385, 1451, 1562, 1726, 1882, 1953, 2010, 2040,
+ 2027, 2014, 1980, 1869, 1732, 1635, 1572, 1517, 1444, 1367, 1294,
+ 1245, 1233, 1260, 1303, 1373, 1441, 1499, 1549, 1582, 1621, 1676,
+ 1741, 1802, 1861, 1921, 1983, 2040, 2102, 2170, 2265, 2375, 2515,
+ 2651, 2781, 2922, 3075, 3253, 3471, 3738, 3976, 4151, 4258, 4308,
+ 4288, 4270, 4253, 4237, 4179, 4086, 3947, 3757, 3484, 3153};
+
+} // namespace
+
+const int16_t WebRtcAecm_kCosTable[] = {
+ 8192, 8190, 8187, 8180, 8172, 8160, 8147, 8130, 8112, 8091, 8067,
+ 8041, 8012, 7982, 7948, 7912, 7874, 7834, 7791, 7745, 7697, 7647,
+ 7595, 7540, 7483, 7424, 7362, 7299, 7233, 7164, 7094, 7021, 6947,
+ 6870, 6791, 6710, 6627, 6542, 6455, 6366, 6275, 6182, 6087, 5991,
+ 5892, 5792, 5690, 5586, 5481, 5374, 5265, 5155, 5043, 4930, 4815,
+ 4698, 4580, 4461, 4341, 4219, 4096, 3971, 3845, 3719, 3591, 3462,
+ 3331, 3200, 3068, 2935, 2801, 2667, 2531, 2395, 2258, 2120, 1981,
+ 1842, 1703, 1563, 1422, 1281, 1140, 998, 856, 713, 571, 428,
+ 285, 142, 0, -142, -285, -428, -571, -713, -856, -998, -1140,
+ -1281, -1422, -1563, -1703, -1842, -1981, -2120, -2258, -2395, -2531, -2667,
+ -2801, -2935, -3068, -3200, -3331, -3462, -3591, -3719, -3845, -3971, -4095,
+ -4219, -4341, -4461, -4580, -4698, -4815, -4930, -5043, -5155, -5265, -5374,
+ -5481, -5586, -5690, -5792, -5892, -5991, -6087, -6182, -6275, -6366, -6455,
+ -6542, -6627, -6710, -6791, -6870, -6947, -7021, -7094, -7164, -7233, -7299,
+ -7362, -7424, -7483, -7540, -7595, -7647, -7697, -7745, -7791, -7834, -7874,
+ -7912, -7948, -7982, -8012, -8041, -8067, -8091, -8112, -8130, -8147, -8160,
+ -8172, -8180, -8187, -8190, -8191, -8190, -8187, -8180, -8172, -8160, -8147,
+ -8130, -8112, -8091, -8067, -8041, -8012, -7982, -7948, -7912, -7874, -7834,
+ -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, -7362, -7299, -7233,
+ -7164, -7094, -7021, -6947, -6870, -6791, -6710, -6627, -6542, -6455, -6366,
+ -6275, -6182, -6087, -5991, -5892, -5792, -5690, -5586, -5481, -5374, -5265,
+ -5155, -5043, -4930, -4815, -4698, -4580, -4461, -4341, -4219, -4096, -3971,
+ -3845, -3719, -3591, -3462, -3331, -3200, -3068, -2935, -2801, -2667, -2531,
+ -2395, -2258, -2120, -1981, -1842, -1703, -1563, -1422, -1281, -1140, -998,
+ -856, -713, -571, -428, -285, -142, 0, 142, 285, 428, 571,
+ 713, 856, 998, 1140, 1281, 1422, 1563, 1703, 1842, 1981, 2120,
+ 2258, 2395, 2531, 2667, 2801, 2935, 3068, 3200, 3331, 3462, 3591,
+ 3719, 3845, 3971, 4095, 4219, 4341, 4461, 4580, 4698, 4815, 4930,
+ 5043, 5155, 5265, 5374, 5481, 5586, 5690, 5792, 5892, 5991, 6087,
+ 6182, 6275, 6366, 6455, 6542, 6627, 6710, 6791, 6870, 6947, 7021,
+ 7094, 7164, 7233, 7299, 7362, 7424, 7483, 7540, 7595, 7647, 7697,
+ 7745, 7791, 7834, 7874, 7912, 7948, 7982, 8012, 8041, 8067, 8091,
+ 8112, 8130, 8147, 8160, 8172, 8180, 8187, 8190};
+
+const int16_t WebRtcAecm_kSinTable[] = {
+ 0, 142, 285, 428, 571, 713, 856, 998, 1140, 1281, 1422,
+ 1563, 1703, 1842, 1981, 2120, 2258, 2395, 2531, 2667, 2801, 2935,
+ 3068, 3200, 3331, 3462, 3591, 3719, 3845, 3971, 4095, 4219, 4341,
+ 4461, 4580, 4698, 4815, 4930, 5043, 5155, 5265, 5374, 5481, 5586,
+ 5690, 5792, 5892, 5991, 6087, 6182, 6275, 6366, 6455, 6542, 6627,
+ 6710, 6791, 6870, 6947, 7021, 7094, 7164, 7233, 7299, 7362, 7424,
+ 7483, 7540, 7595, 7647, 7697, 7745, 7791, 7834, 7874, 7912, 7948,
+ 7982, 8012, 8041, 8067, 8091, 8112, 8130, 8147, 8160, 8172, 8180,
+ 8187, 8190, 8191, 8190, 8187, 8180, 8172, 8160, 8147, 8130, 8112,
+ 8091, 8067, 8041, 8012, 7982, 7948, 7912, 7874, 7834, 7791, 7745,
+ 7697, 7647, 7595, 7540, 7483, 7424, 7362, 7299, 7233, 7164, 7094,
+ 7021, 6947, 6870, 6791, 6710, 6627, 6542, 6455, 6366, 6275, 6182,
+ 6087, 5991, 5892, 5792, 5690, 5586, 5481, 5374, 5265, 5155, 5043,
+ 4930, 4815, 4698, 4580, 4461, 4341, 4219, 4096, 3971, 3845, 3719,
+ 3591, 3462, 3331, 3200, 3068, 2935, 2801, 2667, 2531, 2395, 2258,
+ 2120, 1981, 1842, 1703, 1563, 1422, 1281, 1140, 998, 856, 713,
+ 571, 428, 285, 142, 0, -142, -285, -428, -571, -713, -856,
+ -998, -1140, -1281, -1422, -1563, -1703, -1842, -1981, -2120, -2258, -2395,
+ -2531, -2667, -2801, -2935, -3068, -3200, -3331, -3462, -3591, -3719, -3845,
+ -3971, -4095, -4219, -4341, -4461, -4580, -4698, -4815, -4930, -5043, -5155,
+ -5265, -5374, -5481, -5586, -5690, -5792, -5892, -5991, -6087, -6182, -6275,
+ -6366, -6455, -6542, -6627, -6710, -6791, -6870, -6947, -7021, -7094, -7164,
+ -7233, -7299, -7362, -7424, -7483, -7540, -7595, -7647, -7697, -7745, -7791,
+ -7834, -7874, -7912, -7948, -7982, -8012, -8041, -8067, -8091, -8112, -8130,
+ -8147, -8160, -8172, -8180, -8187, -8190, -8191, -8190, -8187, -8180, -8172,
+ -8160, -8147, -8130, -8112, -8091, -8067, -8041, -8012, -7982, -7948, -7912,
+ -7874, -7834, -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, -7362,
+ -7299, -7233, -7164, -7094, -7021, -6947, -6870, -6791, -6710, -6627, -6542,
+ -6455, -6366, -6275, -6182, -6087, -5991, -5892, -5792, -5690, -5586, -5481,
+ -5374, -5265, -5155, -5043, -4930, -4815, -4698, -4580, -4461, -4341, -4219,
+ -4096, -3971, -3845, -3719, -3591, -3462, -3331, -3200, -3068, -2935, -2801,
+ -2667, -2531, -2395, -2258, -2120, -1981, -1842, -1703, -1563, -1422, -1281,
+ -1140, -998, -856, -713, -571, -428, -285, -142};
+
+
+// Moves the pointer to the next entry and inserts `far_spectrum` and
+// corresponding Q-domain in its buffer.
+//
+// Inputs:
+// - self : Pointer to the delay estimation instance
+// - far_spectrum : Pointer to the far end spectrum
+// - far_q : Q-domain of far end spectrum
+//
+void WebRtcAecm_UpdateFarHistory(AecmCore* self,
+ uint16_t* far_spectrum,
+ int far_q) {
+ // Get new buffer position
+ self->far_history_pos++;
+ if (self->far_history_pos >= MAX_DELAY) {
+ self->far_history_pos = 0;
+ }
+ // Update Q-domain buffer
+ self->far_q_domains[self->far_history_pos] = far_q;
+ // Update far end spectrum buffer
+ memcpy(&(self->far_history[self->far_history_pos * PART_LEN1]), far_spectrum,
+ sizeof(uint16_t) * PART_LEN1);
+}
+
+// Returns a pointer to the far end spectrum aligned to current near end
+// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been
+// called before AlignedFarend(...). Otherwise, you get the pointer to the
+// previous frame. The memory is only valid until the next call of
+// WebRtc_DelayEstimatorProcessFix(...).
+//
+// Inputs:
+// - self : Pointer to the AECM instance.
+// - delay : Current delay estimate.
+//
+// Output:
+// - far_q : The Q-domain of the aligned far end spectrum
+//
+// Return value:
+// - far_spectrum : Pointer to the aligned far end spectrum
+// NULL - Error
+//
+const uint16_t* WebRtcAecm_AlignedFarend(AecmCore* self,
+ int* far_q,
+ int delay) {
+ int buffer_position = 0;
+ RTC_DCHECK(self);
+ buffer_position = self->far_history_pos - delay;
+
+ // Check buffer position
+ if (buffer_position < 0) {
+ buffer_position += MAX_DELAY;
+ }
+ // Get Q-domain
+ *far_q = self->far_q_domains[buffer_position];
+ // Return far end spectrum
+ return &(self->far_history[buffer_position * PART_LEN1]);
+}
+
+// Declare function pointers.
+CalcLinearEnergies WebRtcAecm_CalcLinearEnergies;
+StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel;
+ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel;
+
+AecmCore* WebRtcAecm_CreateCore() {
+ // Allocate zero-filled memory.
+ AecmCore* aecm = static_cast<AecmCore*>(calloc(1, sizeof(AecmCore)));
+
+ aecm->farFrameBuf =
+ WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ if (!aecm->farFrameBuf) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+
+ aecm->nearNoisyFrameBuf =
+ WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ if (!aecm->nearNoisyFrameBuf) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+
+ aecm->nearCleanFrameBuf =
+ WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ if (!aecm->nearCleanFrameBuf) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+
+ aecm->outFrameBuf =
+ WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ if (!aecm->outFrameBuf) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+
+ aecm->delay_estimator_farend =
+ WebRtc_CreateDelayEstimatorFarend(PART_LEN1, MAX_DELAY);
+ if (aecm->delay_estimator_farend == NULL) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+ aecm->delay_estimator =
+ WebRtc_CreateDelayEstimator(aecm->delay_estimator_farend, 0);
+ if (aecm->delay_estimator == NULL) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+ // TODO(bjornv): Explicitly disable robust delay validation until no
+ // performance regression has been established. Then remove the line.
+ WebRtc_enable_robust_validation(aecm->delay_estimator, 0);
+
+ aecm->real_fft = WebRtcSpl_CreateRealFFT(PART_LEN_SHIFT);
+ if (aecm->real_fft == NULL) {
+ WebRtcAecm_FreeCore(aecm);
+ return NULL;
+ }
+
+ // Init some aecm pointers. 16 and 32 byte alignment is only necessary
+ // for Neon code currently.
+ aecm->xBuf = (int16_t*)(((uintptr_t)aecm->xBuf_buf + 31) & ~31);
+ aecm->dBufClean = (int16_t*)(((uintptr_t)aecm->dBufClean_buf + 31) & ~31);
+ aecm->dBufNoisy = (int16_t*)(((uintptr_t)aecm->dBufNoisy_buf + 31) & ~31);
+ aecm->outBuf = (int16_t*)(((uintptr_t)aecm->outBuf_buf + 15) & ~15);
+ aecm->channelStored =
+ (int16_t*)(((uintptr_t)aecm->channelStored_buf + 15) & ~15);
+ aecm->channelAdapt16 =
+ (int16_t*)(((uintptr_t)aecm->channelAdapt16_buf + 15) & ~15);
+ aecm->channelAdapt32 =
+ (int32_t*)(((uintptr_t)aecm->channelAdapt32_buf + 31) & ~31);
+
+ return aecm;
+}
+
+void WebRtcAecm_InitEchoPathCore(AecmCore* aecm, const int16_t* echo_path) {
+ int i = 0;
+
+ // Reset the stored channel
+ memcpy(aecm->channelStored, echo_path, sizeof(int16_t) * PART_LEN1);
+ // Reset the adapted channels
+ memcpy(aecm->channelAdapt16, echo_path, sizeof(int16_t) * PART_LEN1);
+ for (i = 0; i < PART_LEN1; i++) {
+ aecm->channelAdapt32[i] = (int32_t)aecm->channelAdapt16[i] << 16;
+ }
+
+ // Reset channel storing variables
+ aecm->mseAdaptOld = 1000;
+ aecm->mseStoredOld = 1000;
+ aecm->mseThreshold = WEBRTC_SPL_WORD32_MAX;
+ aecm->mseChannelCount = 0;
+}
+
+static void CalcLinearEnergiesC(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est,
+ uint32_t* far_energy,
+ uint32_t* echo_energy_adapt,
+ uint32_t* echo_energy_stored) {
+ int i;
+
+ // Get energy for the delayed far end signal and estimated
+ // echo using both stored and adapted channels.
+ for (i = 0; i < PART_LEN1; i++) {
+ echo_est[i] =
+ WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+ (*far_energy) += (uint32_t)(far_spectrum[i]);
+ *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i];
+ (*echo_energy_stored) += (uint32_t)echo_est[i];
+ }
+}
+
+static void StoreAdaptiveChannelC(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est) {
+ int i;
+
+ // During startup we store the channel every block.
+ memcpy(aecm->channelStored, aecm->channelAdapt16,
+ sizeof(int16_t) * PART_LEN1);
+ // Recalculate echo estimate
+ for (i = 0; i < PART_LEN; i += 4) {
+ echo_est[i] =
+ WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+ echo_est[i + 1] =
+ WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1], far_spectrum[i + 1]);
+ echo_est[i + 2] =
+ WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2], far_spectrum[i + 2]);
+ echo_est[i + 3] =
+ WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3], far_spectrum[i + 3]);
+ }
+ echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+}
+
+static void ResetAdaptiveChannelC(AecmCore* aecm) {
+ int i;
+
+ // The stored channel has a significantly lower MSE than the adaptive one for
+ // two consecutive calculations. Reset the adaptive channel.
+ memcpy(aecm->channelAdapt16, aecm->channelStored,
+ sizeof(int16_t) * PART_LEN1);
+ // Restore the W32 channel
+ for (i = 0; i < PART_LEN; i += 4) {
+ aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16;
+ aecm->channelAdapt32[i + 1] = (int32_t)aecm->channelStored[i + 1] << 16;
+ aecm->channelAdapt32[i + 2] = (int32_t)aecm->channelStored[i + 2] << 16;
+ aecm->channelAdapt32[i + 3] = (int32_t)aecm->channelStored[i + 3] << 16;
+ }
+ aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16;
+}
+
+// Initialize function pointers for ARM Neon platform.
+#if defined(WEBRTC_HAS_NEON)
+static void WebRtcAecm_InitNeon(void) {
+ WebRtcAecm_StoreAdaptiveChannel = WebRtcAecm_StoreAdaptiveChannelNeon;
+ WebRtcAecm_ResetAdaptiveChannel = WebRtcAecm_ResetAdaptiveChannelNeon;
+ WebRtcAecm_CalcLinearEnergies = WebRtcAecm_CalcLinearEnergiesNeon;
+}
+#endif
+
+// Initialize function pointers for MIPS platform.
+#if defined(MIPS32_LE)
+static void WebRtcAecm_InitMips(void) {
+#if defined(MIPS_DSP_R1_LE)
+ WebRtcAecm_StoreAdaptiveChannel = WebRtcAecm_StoreAdaptiveChannel_mips;
+ WebRtcAecm_ResetAdaptiveChannel = WebRtcAecm_ResetAdaptiveChannel_mips;
+#endif
+ WebRtcAecm_CalcLinearEnergies = WebRtcAecm_CalcLinearEnergies_mips;
+}
+#endif
+
+// WebRtcAecm_InitCore(...)
+//
+// This function initializes the AECM instant created with
+// WebRtcAecm_CreateCore(...) Input:
+// - aecm : Pointer to the Echo Suppression instance
+// - samplingFreq : Sampling Frequency
+//
+// Output:
+// - aecm : Initialized instance
+//
+// Return value : 0 - Ok
+// -1 - Error
+//
+int WebRtcAecm_InitCore(AecmCore* const aecm, int samplingFreq) {
+ int i = 0;
+ int32_t tmp32 = PART_LEN1 * PART_LEN1;
+ int16_t tmp16 = PART_LEN1;
+
+ if (samplingFreq != 8000 && samplingFreq != 16000) {
+ samplingFreq = 8000;
+ return -1;
+ }
+ // sanity check of sampling frequency
+ aecm->mult = (int16_t)samplingFreq / 8000;
+
+ aecm->farBufWritePos = 0;
+ aecm->farBufReadPos = 0;
+ aecm->knownDelay = 0;
+ aecm->lastKnownDelay = 0;
+
+ WebRtc_InitBuffer(aecm->farFrameBuf);
+ WebRtc_InitBuffer(aecm->nearNoisyFrameBuf);
+ WebRtc_InitBuffer(aecm->nearCleanFrameBuf);
+ WebRtc_InitBuffer(aecm->outFrameBuf);
+
+ memset(aecm->xBuf_buf, 0, sizeof(aecm->xBuf_buf));
+ memset(aecm->dBufClean_buf, 0, sizeof(aecm->dBufClean_buf));
+ memset(aecm->dBufNoisy_buf, 0, sizeof(aecm->dBufNoisy_buf));
+ memset(aecm->outBuf_buf, 0, sizeof(aecm->outBuf_buf));
+
+ aecm->seed = 666;
+ aecm->totCount = 0;
+
+ if (WebRtc_InitDelayEstimatorFarend(aecm->delay_estimator_farend) != 0) {
+ return -1;
+ }
+ if (WebRtc_InitDelayEstimator(aecm->delay_estimator) != 0) {
+ return -1;
+ }
+ // Set far end histories to zero
+ memset(aecm->far_history, 0, sizeof(uint16_t) * PART_LEN1 * MAX_DELAY);
+ memset(aecm->far_q_domains, 0, sizeof(int) * MAX_DELAY);
+ aecm->far_history_pos = MAX_DELAY;
+
+ aecm->nlpFlag = 1;
+ aecm->fixedDelay = -1;
+
+ aecm->dfaCleanQDomain = 0;
+ aecm->dfaCleanQDomainOld = 0;
+ aecm->dfaNoisyQDomain = 0;
+ aecm->dfaNoisyQDomainOld = 0;
+
+ memset(aecm->nearLogEnergy, 0, sizeof(aecm->nearLogEnergy));
+ aecm->farLogEnergy = 0;
+ memset(aecm->echoAdaptLogEnergy, 0, sizeof(aecm->echoAdaptLogEnergy));
+ memset(aecm->echoStoredLogEnergy, 0, sizeof(aecm->echoStoredLogEnergy));
+
+ // Initialize the echo channels with a stored shape.
+ if (samplingFreq == 8000) {
+ WebRtcAecm_InitEchoPathCore(aecm, kChannelStored8kHz);
+ } else {
+ WebRtcAecm_InitEchoPathCore(aecm, kChannelStored16kHz);
+ }
+
+ memset(aecm->echoFilt, 0, sizeof(aecm->echoFilt));
+ memset(aecm->nearFilt, 0, sizeof(aecm->nearFilt));
+ aecm->noiseEstCtr = 0;
+
+ aecm->cngMode = AecmTrue;
+
+ memset(aecm->noiseEstTooLowCtr, 0, sizeof(aecm->noiseEstTooLowCtr));
+ memset(aecm->noiseEstTooHighCtr, 0, sizeof(aecm->noiseEstTooHighCtr));
+ // Shape the initial noise level to an approximate pink noise.
+ for (i = 0; i < (PART_LEN1 >> 1) - 1; i++) {
+ aecm->noiseEst[i] = (tmp32 << 8);
+ tmp16--;
+ tmp32 -= (int32_t)((tmp16 << 1) + 1);
+ }
+ for (; i < PART_LEN1; i++) {
+ aecm->noiseEst[i] = (tmp32 << 8);
+ }
+
+ aecm->farEnergyMin = WEBRTC_SPL_WORD16_MAX;
+ aecm->farEnergyMax = WEBRTC_SPL_WORD16_MIN;
+ aecm->farEnergyMaxMin = 0;
+ aecm->farEnergyVAD = FAR_ENERGY_MIN; // This prevents false speech detection
+ // at the beginning.
+ aecm->farEnergyMSE = 0;
+ aecm->currentVADValue = 0;
+ aecm->vadUpdateCount = 0;
+ aecm->firstVAD = 1;
+
+ aecm->startupState = 0;
+ aecm->supGain = SUPGAIN_DEFAULT;
+ aecm->supGainOld = SUPGAIN_DEFAULT;
+
+ aecm->supGainErrParamA = SUPGAIN_ERROR_PARAM_A;
+ aecm->supGainErrParamD = SUPGAIN_ERROR_PARAM_D;
+ aecm->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B;
+ aecm->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D;
+
+ // Assert a preprocessor definition at compile-time. It's an assumption
+ // used in assembly code, so check the assembly files before any change.
+ static_assert(PART_LEN % 16 == 0, "PART_LEN is not a multiple of 16");
+
+ // Initialize function pointers.
+ WebRtcAecm_CalcLinearEnergies = CalcLinearEnergiesC;
+ WebRtcAecm_StoreAdaptiveChannel = StoreAdaptiveChannelC;
+ WebRtcAecm_ResetAdaptiveChannel = ResetAdaptiveChannelC;
+
+#if defined(WEBRTC_HAS_NEON)
+ WebRtcAecm_InitNeon();
+#endif
+
+#if defined(MIPS32_LE)
+ WebRtcAecm_InitMips();
+#endif
+ return 0;
+}
+
+// TODO(bjornv): This function is currently not used. Add support for these
+// parameters from a higher level
+int WebRtcAecm_Control(AecmCore* aecm, int delay, int nlpFlag) {
+ aecm->nlpFlag = nlpFlag;
+ aecm->fixedDelay = delay;
+
+ return 0;
+}
+
+void WebRtcAecm_FreeCore(AecmCore* aecm) {
+ if (aecm == NULL) {
+ return;
+ }
+
+ WebRtc_FreeBuffer(aecm->farFrameBuf);
+ WebRtc_FreeBuffer(aecm->nearNoisyFrameBuf);
+ WebRtc_FreeBuffer(aecm->nearCleanFrameBuf);
+ WebRtc_FreeBuffer(aecm->outFrameBuf);
+
+ WebRtc_FreeDelayEstimator(aecm->delay_estimator);
+ WebRtc_FreeDelayEstimatorFarend(aecm->delay_estimator_farend);
+ WebRtcSpl_FreeRealFFT(aecm->real_fft);
+
+ free(aecm);
+}
+
+int WebRtcAecm_ProcessFrame(AecmCore* aecm,
+ const int16_t* farend,
+ const int16_t* nearendNoisy,
+ const int16_t* nearendClean,
+ int16_t* out) {
+ int16_t outBlock_buf[PART_LEN + 8]; // Align buffer to 8-byte boundary.
+ int16_t* outBlock = (int16_t*)(((uintptr_t)outBlock_buf + 15) & ~15);
+
+ int16_t farFrame[FRAME_LEN];
+ const int16_t* out_ptr = NULL;
+ int size = 0;
+
+ // Buffer the current frame.
+ // Fetch an older one corresponding to the delay.
+ WebRtcAecm_BufferFarFrame(aecm, farend, FRAME_LEN);
+ WebRtcAecm_FetchFarFrame(aecm, farFrame, FRAME_LEN, aecm->knownDelay);
+
+ // Buffer the synchronized far and near frames,
+ // to pass the smaller blocks individually.
+ WebRtc_WriteBuffer(aecm->farFrameBuf, farFrame, FRAME_LEN);
+ WebRtc_WriteBuffer(aecm->nearNoisyFrameBuf, nearendNoisy, FRAME_LEN);
+ if (nearendClean != NULL) {
+ WebRtc_WriteBuffer(aecm->nearCleanFrameBuf, nearendClean, FRAME_LEN);
+ }
+
+ // Process as many blocks as possible.
+ while (WebRtc_available_read(aecm->farFrameBuf) >= PART_LEN) {
+ int16_t far_block[PART_LEN];
+ const int16_t* far_block_ptr = NULL;
+ int16_t near_noisy_block[PART_LEN];
+ const int16_t* near_noisy_block_ptr = NULL;
+
+ WebRtc_ReadBuffer(aecm->farFrameBuf, (void**)&far_block_ptr, far_block,
+ PART_LEN);
+ WebRtc_ReadBuffer(aecm->nearNoisyFrameBuf, (void**)&near_noisy_block_ptr,
+ near_noisy_block, PART_LEN);
+ if (nearendClean != NULL) {
+ int16_t near_clean_block[PART_LEN];
+ const int16_t* near_clean_block_ptr = NULL;
+
+ WebRtc_ReadBuffer(aecm->nearCleanFrameBuf, (void**)&near_clean_block_ptr,
+ near_clean_block, PART_LEN);
+ if (WebRtcAecm_ProcessBlock(aecm, far_block_ptr, near_noisy_block_ptr,
+ near_clean_block_ptr, outBlock) == -1) {
+ return -1;
+ }
+ } else {
+ if (WebRtcAecm_ProcessBlock(aecm, far_block_ptr, near_noisy_block_ptr,
+ NULL, outBlock) == -1) {
+ return -1;
+ }
+ }
+
+ WebRtc_WriteBuffer(aecm->outFrameBuf, outBlock, PART_LEN);
+ }
+
+ // Stuff the out buffer if we have less than a frame to output.
+ // This should only happen for the first frame.
+ size = (int)WebRtc_available_read(aecm->outFrameBuf);
+ if (size < FRAME_LEN) {
+ WebRtc_MoveReadPtr(aecm->outFrameBuf, size - FRAME_LEN);
+ }
+
+ // Obtain an output frame.
+ WebRtc_ReadBuffer(aecm->outFrameBuf, (void**)&out_ptr, out, FRAME_LEN);
+ if (out_ptr != out) {
+ // ReadBuffer() hasn't copied to `out` in this case.
+ memcpy(out, out_ptr, FRAME_LEN * sizeof(int16_t));
+ }
+
+ return 0;
+}
+
+// WebRtcAecm_AsymFilt(...)
+//
+// Performs asymmetric filtering.
+//
+// Inputs:
+// - filtOld : Previous filtered value.
+// - inVal : New input value.
+// - stepSizePos : Step size when we have a positive contribution.
+// - stepSizeNeg : Step size when we have a negative contribution.
+//
+// Output:
+//
+// Return: - Filtered value.
+//
+int16_t WebRtcAecm_AsymFilt(const int16_t filtOld,
+ const int16_t inVal,
+ const int16_t stepSizePos,
+ const int16_t stepSizeNeg) {
+ int16_t retVal;
+
+ if ((filtOld == WEBRTC_SPL_WORD16_MAX) | (filtOld == WEBRTC_SPL_WORD16_MIN)) {
+ return inVal;
+ }
+ retVal = filtOld;
+ if (filtOld > inVal) {
+ retVal -= (filtOld - inVal) >> stepSizeNeg;
+ } else {
+ retVal += (inVal - filtOld) >> stepSizePos;
+ }
+
+ return retVal;
+}
+
+// ExtractFractionPart(a, zeros)
+//
+// returns the fraction part of `a`, with `zeros` number of leading zeros, as an
+// int16_t scaled to Q8. There is no sanity check of `a` in the sense that the
+// number of zeros match.
+static int16_t ExtractFractionPart(uint32_t a, int zeros) {
+ return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23);
+}
+
+// Calculates and returns the log of `energy` in Q8. The input `energy` is
+// supposed to be in Q(`q_domain`).
+static int16_t LogOfEnergyInQ8(uint32_t energy, int q_domain) {
+ static const int16_t kLogLowValue = PART_LEN_SHIFT << 7;
+ int16_t log_energy_q8 = kLogLowValue;
+ if (energy > 0) {
+ int zeros = WebRtcSpl_NormU32(energy);
+ int16_t frac = ExtractFractionPart(energy, zeros);
+ // log2 of `energy` in Q8.
+ log_energy_q8 += ((31 - zeros) << 8) + frac - (q_domain << 8);
+ }
+ return log_energy_q8;
+}
+
+// WebRtcAecm_CalcEnergies(...)
+//
+// This function calculates the log of energies for nearend, farend and
+// estimated echoes. There is also an update of energy decision levels, i.e.
+// internal VAD.
+//
+//
+// @param aecm [i/o] Handle of the AECM instance.
+// @param far_spectrum [in] Pointer to farend spectrum.
+// @param far_q [in] Q-domain of farend spectrum.
+// @param nearEner [in] Near end energy for current block in
+// Q(aecm->dfaQDomain).
+// @param echoEst [out] Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_CalcEnergies(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ const int16_t far_q,
+ const uint32_t nearEner,
+ int32_t* echoEst) {
+ // Local variables
+ uint32_t tmpAdapt = 0;
+ uint32_t tmpStored = 0;
+ uint32_t tmpFar = 0;
+
+ int i;
+
+ int16_t tmp16;
+ int16_t increase_max_shifts = 4;
+ int16_t decrease_max_shifts = 11;
+ int16_t increase_min_shifts = 11;
+ int16_t decrease_min_shifts = 3;
+
+ // Get log of near end energy and store in buffer
+
+ // Shift buffer
+ memmove(aecm->nearLogEnergy + 1, aecm->nearLogEnergy,
+ sizeof(int16_t) * (MAX_BUF_LEN - 1));
+
+ // Logarithm of integrated magnitude spectrum (nearEner)
+ aecm->nearLogEnergy[0] = LogOfEnergyInQ8(nearEner, aecm->dfaNoisyQDomain);
+
+ WebRtcAecm_CalcLinearEnergies(aecm, far_spectrum, echoEst, &tmpFar, &tmpAdapt,
+ &tmpStored);
+
+ // Shift buffers
+ memmove(aecm->echoAdaptLogEnergy + 1, aecm->echoAdaptLogEnergy,
+ sizeof(int16_t) * (MAX_BUF_LEN - 1));
+ memmove(aecm->echoStoredLogEnergy + 1, aecm->echoStoredLogEnergy,
+ sizeof(int16_t) * (MAX_BUF_LEN - 1));
+
+ // Logarithm of delayed far end energy
+ aecm->farLogEnergy = LogOfEnergyInQ8(tmpFar, far_q);
+
+ // Logarithm of estimated echo energy through adapted channel
+ aecm->echoAdaptLogEnergy[0] =
+ LogOfEnergyInQ8(tmpAdapt, RESOLUTION_CHANNEL16 + far_q);
+
+ // Logarithm of estimated echo energy through stored channel
+ aecm->echoStoredLogEnergy[0] =
+ LogOfEnergyInQ8(tmpStored, RESOLUTION_CHANNEL16 + far_q);
+
+ // Update farend energy levels (min, max, vad, mse)
+ if (aecm->farLogEnergy > FAR_ENERGY_MIN) {
+ if (aecm->startupState == 0) {
+ increase_max_shifts = 2;
+ decrease_min_shifts = 2;
+ increase_min_shifts = 8;
+ }
+
+ aecm->farEnergyMin =
+ WebRtcAecm_AsymFilt(aecm->farEnergyMin, aecm->farLogEnergy,
+ increase_min_shifts, decrease_min_shifts);
+ aecm->farEnergyMax =
+ WebRtcAecm_AsymFilt(aecm->farEnergyMax, aecm->farLogEnergy,
+ increase_max_shifts, decrease_max_shifts);
+ aecm->farEnergyMaxMin = (aecm->farEnergyMax - aecm->farEnergyMin);
+
+ // Dynamic VAD region size
+ tmp16 = 2560 - aecm->farEnergyMin;
+ if (tmp16 > 0) {
+ tmp16 = (int16_t)((tmp16 * FAR_ENERGY_VAD_REGION) >> 9);
+ } else {
+ tmp16 = 0;
+ }
+ tmp16 += FAR_ENERGY_VAD_REGION;
+
+ if ((aecm->startupState == 0) | (aecm->vadUpdateCount > 1024)) {
+ // In startup phase or VAD update halted
+ aecm->farEnergyVAD = aecm->farEnergyMin + tmp16;
+ } else {
+ if (aecm->farEnergyVAD > aecm->farLogEnergy) {
+ aecm->farEnergyVAD +=
+ (aecm->farLogEnergy + tmp16 - aecm->farEnergyVAD) >> 6;
+ aecm->vadUpdateCount = 0;
+ } else {
+ aecm->vadUpdateCount++;
+ }
+ }
+ // Put MSE threshold higher than VAD
+ aecm->farEnergyMSE = aecm->farEnergyVAD + (1 << 8);
+ }
+
+ // Update VAD variables
+ if (aecm->farLogEnergy > aecm->farEnergyVAD) {
+ if ((aecm->startupState == 0) | (aecm->farEnergyMaxMin > FAR_ENERGY_DIFF)) {
+ // We are in startup or have significant dynamics in input speech level
+ aecm->currentVADValue = 1;
+ }
+ } else {
+ aecm->currentVADValue = 0;
+ }
+ if ((aecm->currentVADValue) && (aecm->firstVAD)) {
+ aecm->firstVAD = 0;
+ if (aecm->echoAdaptLogEnergy[0] > aecm->nearLogEnergy[0]) {
+ // The estimated echo has higher energy than the near end signal.
+ // This means that the initialization was too aggressive. Scale
+ // down by a factor 8
+ for (i = 0; i < PART_LEN1; i++) {
+ aecm->channelAdapt16[i] >>= 3;
+ }
+ // Compensate the adapted echo energy level accordingly.
+ aecm->echoAdaptLogEnergy[0] -= (3 << 8);
+ aecm->firstVAD = 1;
+ }
+ }
+}
+
+// WebRtcAecm_CalcStepSize(...)
+//
+// This function calculates the step size used in channel estimation
+//
+//
+// @param aecm [in] Handle of the AECM instance.
+// @param mu [out] (Return value) Stepsize in log2(), i.e. number of
+// shifts.
+//
+//
+int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm) {
+ int32_t tmp32;
+ int16_t tmp16;
+ int16_t mu = MU_MAX;
+
+ // Here we calculate the step size mu used in the
+ // following NLMS based Channel estimation algorithm
+ if (!aecm->currentVADValue) {
+ // Far end energy level too low, no channel update
+ mu = 0;
+ } else if (aecm->startupState > 0) {
+ if (aecm->farEnergyMin >= aecm->farEnergyMax) {
+ mu = MU_MIN;
+ } else {
+ tmp16 = (aecm->farLogEnergy - aecm->farEnergyMin);
+ tmp32 = tmp16 * MU_DIFF;
+ tmp32 = WebRtcSpl_DivW32W16(tmp32, aecm->farEnergyMaxMin);
+ mu = MU_MIN - 1 - (int16_t)(tmp32);
+ // The -1 is an alternative to rounding. This way we get a larger
+ // stepsize, so we in some sense compensate for truncation in NLMS
+ }
+ if (mu < MU_MAX) {
+ mu = MU_MAX; // Equivalent with maximum step size of 2^-MU_MAX
+ }
+ }
+
+ return mu;
+}
+
+// WebRtcAecm_UpdateChannel(...)
+//
+// This function performs channel estimation. NLMS and decision on channel
+// storage.
+//
+//
+// @param aecm [i/o] Handle of the AECM instance.
+// @param far_spectrum [in] Absolute value of the farend signal in Q(far_q)
+// @param far_q [in] Q-domain of the farend signal
+// @param dfa [in] Absolute value of the nearend signal
+// (Q[aecm->dfaQDomain])
+// @param mu [in] NLMS step size.
+// @param echoEst [i/o] Estimated echo in Q(far_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_UpdateChannel(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ const int16_t far_q,
+ const uint16_t* const dfa,
+ const int16_t mu,
+ int32_t* echoEst) {
+ uint32_t tmpU32no1, tmpU32no2;
+ int32_t tmp32no1, tmp32no2;
+ int32_t mseStored;
+ int32_t mseAdapt;
+
+ int i;
+
+ int16_t zerosFar, zerosNum, zerosCh, zerosDfa;
+ int16_t shiftChFar, shiftNum, shift2ResChan;
+ int16_t tmp16no1;
+ int16_t xfaQ, dfaQ;
+
+ // This is the channel estimation algorithm. It is base on NLMS but has a
+ // variable step length, which was calculated above.
+ if (mu) {
+ for (i = 0; i < PART_LEN1; i++) {
+ // Determine norm of channel and farend to make sure we don't get overflow
+ // in multiplication
+ zerosCh = WebRtcSpl_NormU32(aecm->channelAdapt32[i]);
+ zerosFar = WebRtcSpl_NormU32((uint32_t)far_spectrum[i]);
+ if (zerosCh + zerosFar > 31) {
+ // Multiplication is safe
+ tmpU32no1 =
+ WEBRTC_SPL_UMUL_32_16(aecm->channelAdapt32[i], far_spectrum[i]);
+ shiftChFar = 0;
+ } else {
+ // We need to shift down before multiplication
+ shiftChFar = 32 - zerosCh - zerosFar;
+ // If zerosCh == zerosFar == 0, shiftChFar is 32. A
+ // right shift of 32 is undefined. To avoid that, we
+ // do this check.
+ tmpU32no1 =
+ rtc::dchecked_cast<uint32_t>(
+ shiftChFar >= 32 ? 0 : aecm->channelAdapt32[i] >> shiftChFar) *
+ far_spectrum[i];
+ }
+ // Determine Q-domain of numerator
+ zerosNum = WebRtcSpl_NormU32(tmpU32no1);
+ if (dfa[i]) {
+ zerosDfa = WebRtcSpl_NormU32((uint32_t)dfa[i]);
+ } else {
+ zerosDfa = 32;
+ }
+ tmp16no1 = zerosDfa - 2 + aecm->dfaNoisyQDomain - RESOLUTION_CHANNEL32 -
+ far_q + shiftChFar;
+ if (zerosNum > tmp16no1 + 1) {
+ xfaQ = tmp16no1;
+ dfaQ = zerosDfa - 2;
+ } else {
+ xfaQ = zerosNum - 2;
+ dfaQ = RESOLUTION_CHANNEL32 + far_q - aecm->dfaNoisyQDomain -
+ shiftChFar + xfaQ;
+ }
+ // Add in the same Q-domain
+ tmpU32no1 = WEBRTC_SPL_SHIFT_W32(tmpU32no1, xfaQ);
+ tmpU32no2 = WEBRTC_SPL_SHIFT_W32((uint32_t)dfa[i], dfaQ);
+ tmp32no1 = (int32_t)tmpU32no2 - (int32_t)tmpU32no1;
+ zerosNum = WebRtcSpl_NormW32(tmp32no1);
+ if ((tmp32no1) && (far_spectrum[i] > (CHANNEL_VAD << far_q))) {
+ //
+ // Update is needed
+ //
+ // This is what we would like to compute
+ //
+ // tmp32no1 = dfa[i] - (aecm->channelAdapt[i] * far_spectrum[i])
+ // tmp32norm = (i + 1)
+ // aecm->channelAdapt[i] += (2^mu) * tmp32no1
+ // / (tmp32norm * far_spectrum[i])
+ //
+
+ // Make sure we don't get overflow in multiplication.
+ if (zerosNum + zerosFar > 31) {
+ if (tmp32no1 > 0) {
+ tmp32no2 =
+ (int32_t)WEBRTC_SPL_UMUL_32_16(tmp32no1, far_spectrum[i]);
+ } else {
+ tmp32no2 =
+ -(int32_t)WEBRTC_SPL_UMUL_32_16(-tmp32no1, far_spectrum[i]);
+ }
+ shiftNum = 0;
+ } else {
+ shiftNum = 32 - (zerosNum + zerosFar);
+ if (tmp32no1 > 0) {
+ tmp32no2 = (tmp32no1 >> shiftNum) * far_spectrum[i];
+ } else {
+ tmp32no2 = -((-tmp32no1 >> shiftNum) * far_spectrum[i]);
+ }
+ }
+ // Normalize with respect to frequency bin
+ tmp32no2 = WebRtcSpl_DivW32W16(tmp32no2, i + 1);
+ // Make sure we are in the right Q-domain
+ shift2ResChan =
+ shiftNum + shiftChFar - xfaQ - mu - ((30 - zerosFar) << 1);
+ if (WebRtcSpl_NormW32(tmp32no2) < shift2ResChan) {
+ tmp32no2 = WEBRTC_SPL_WORD32_MAX;
+ } else {
+ tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, shift2ResChan);
+ }
+ aecm->channelAdapt32[i] =
+ WebRtcSpl_AddSatW32(aecm->channelAdapt32[i], tmp32no2);
+ if (aecm->channelAdapt32[i] < 0) {
+ // We can never have negative channel gain
+ aecm->channelAdapt32[i] = 0;
+ }
+ aecm->channelAdapt16[i] = (int16_t)(aecm->channelAdapt32[i] >> 16);
+ }
+ }
+ }
+ // END: Adaptive channel update
+
+ // Determine if we should store or restore the channel
+ if ((aecm->startupState == 0) & (aecm->currentVADValue)) {
+ // During startup we store the channel every block,
+ // and we recalculate echo estimate
+ WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst);
+ } else {
+ if (aecm->farLogEnergy < aecm->farEnergyMSE) {
+ aecm->mseChannelCount = 0;
+ } else {
+ aecm->mseChannelCount++;
+ }
+ // Enough data for validation. Store channel if we can.
+ if (aecm->mseChannelCount >= (MIN_MSE_COUNT + 10)) {
+ // We have enough data.
+ // Calculate MSE of "Adapt" and "Stored" versions.
+ // It is actually not MSE, but average absolute error.
+ mseStored = 0;
+ mseAdapt = 0;
+ for (i = 0; i < MIN_MSE_COUNT; i++) {
+ tmp32no1 = ((int32_t)aecm->echoStoredLogEnergy[i] -
+ (int32_t)aecm->nearLogEnergy[i]);
+ tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
+ mseStored += tmp32no2;
+
+ tmp32no1 = ((int32_t)aecm->echoAdaptLogEnergy[i] -
+ (int32_t)aecm->nearLogEnergy[i]);
+ tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
+ mseAdapt += tmp32no2;
+ }
+ if (((mseStored << MSE_RESOLUTION) < (MIN_MSE_DIFF * mseAdapt)) &
+ ((aecm->mseStoredOld << MSE_RESOLUTION) <
+ (MIN_MSE_DIFF * aecm->mseAdaptOld))) {
+ // The stored channel has a significantly lower MSE than the adaptive
+ // one for two consecutive calculations. Reset the adaptive channel.
+ WebRtcAecm_ResetAdaptiveChannel(aecm);
+ } else if (((MIN_MSE_DIFF * mseStored) > (mseAdapt << MSE_RESOLUTION)) &
+ (mseAdapt < aecm->mseThreshold) &
+ (aecm->mseAdaptOld < aecm->mseThreshold)) {
+ // The adaptive channel has a significantly lower MSE than the stored
+ // one. The MSE for the adaptive channel has also been low for two
+ // consecutive calculations. Store the adaptive channel.
+ WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst);
+
+ // Update threshold
+ if (aecm->mseThreshold == WEBRTC_SPL_WORD32_MAX) {
+ aecm->mseThreshold = (mseAdapt + aecm->mseAdaptOld);
+ } else {
+ int scaled_threshold = aecm->mseThreshold * 5 / 8;
+ aecm->mseThreshold += ((mseAdapt - scaled_threshold) * 205) >> 8;
+ }
+ }
+
+ // Reset counter
+ aecm->mseChannelCount = 0;
+
+ // Store the MSE values.
+ aecm->mseStoredOld = mseStored;
+ aecm->mseAdaptOld = mseAdapt;
+ }
+ }
+ // END: Determine if we should store or reset channel estimate.
+}
+
+// CalcSuppressionGain(...)
+//
+// This function calculates the suppression gain that is used in the Wiener
+// filter.
+//
+//
+// @param aecm [i/n] Handle of the AECM instance.
+// @param supGain [out] (Return value) Suppression gain with which to scale
+// the noise
+// level (Q14).
+//
+//
+int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm) {
+ int32_t tmp32no1;
+
+ int16_t supGain = SUPGAIN_DEFAULT;
+ int16_t tmp16no1;
+ int16_t dE = 0;
+
+ // Determine suppression gain used in the Wiener filter. The gain is based on
+ // a mix of far end energy and echo estimation error. Adjust for the far end
+ // signal level. A low signal level indicates no far end signal, hence we set
+ // the suppression gain to 0
+ if (!aecm->currentVADValue) {
+ supGain = 0;
+ } else {
+ // Adjust for possible double talk. If we have large variations in
+ // estimation error we likely have double talk (or poor channel).
+ tmp16no1 = (aecm->nearLogEnergy[0] - aecm->echoStoredLogEnergy[0] -
+ ENERGY_DEV_OFFSET);
+ dE = WEBRTC_SPL_ABS_W16(tmp16no1);
+
+ if (dE < ENERGY_DEV_TOL) {
+ // Likely no double talk. The better estimation, the more we can suppress
+ // signal. Update counters
+ if (dE < SUPGAIN_EPC_DT) {
+ tmp32no1 = aecm->supGainErrParamDiffAB * dE;
+ tmp32no1 += (SUPGAIN_EPC_DT >> 1);
+ tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT);
+ supGain = aecm->supGainErrParamA - tmp16no1;
+ } else {
+ tmp32no1 = aecm->supGainErrParamDiffBD * (ENERGY_DEV_TOL - dE);
+ tmp32no1 += ((ENERGY_DEV_TOL - SUPGAIN_EPC_DT) >> 1);
+ tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(
+ tmp32no1, (ENERGY_DEV_TOL - SUPGAIN_EPC_DT));
+ supGain = aecm->supGainErrParamD + tmp16no1;
+ }
+ } else {
+ // Likely in double talk. Use default value
+ supGain = aecm->supGainErrParamD;
+ }
+ }
+
+ if (supGain > aecm->supGainOld) {
+ tmp16no1 = supGain;
+ } else {
+ tmp16no1 = aecm->supGainOld;
+ }
+ aecm->supGainOld = supGain;
+ if (tmp16no1 < aecm->supGain) {
+ aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4);
+ } else {
+ aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4);
+ }
+
+ // END: Update suppression gain
+
+ return aecm->supGain;
+}
+
+void WebRtcAecm_BufferFarFrame(AecmCore* const aecm,
+ const int16_t* const farend,
+ const int farLen) {
+ int writeLen = farLen, writePos = 0;
+
+ // Check if the write position must be wrapped
+ while (aecm->farBufWritePos + writeLen > FAR_BUF_LEN) {
+ // Write to remaining buffer space before wrapping
+ writeLen = FAR_BUF_LEN - aecm->farBufWritePos;
+ memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
+ sizeof(int16_t) * writeLen);
+ aecm->farBufWritePos = 0;
+ writePos = writeLen;
+ writeLen = farLen - writeLen;
+ }
+
+ memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
+ sizeof(int16_t) * writeLen);
+ aecm->farBufWritePos += writeLen;
+}
+
+void WebRtcAecm_FetchFarFrame(AecmCore* const aecm,
+ int16_t* const farend,
+ const int farLen,
+ const int knownDelay) {
+ int readLen = farLen;
+ int readPos = 0;
+ int delayChange = knownDelay - aecm->lastKnownDelay;
+
+ aecm->farBufReadPos -= delayChange;
+
+ // Check if delay forces a read position wrap
+ while (aecm->farBufReadPos < 0) {
+ aecm->farBufReadPos += FAR_BUF_LEN;
+ }
+ while (aecm->farBufReadPos > FAR_BUF_LEN - 1) {
+ aecm->farBufReadPos -= FAR_BUF_LEN;
+ }
+
+ aecm->lastKnownDelay = knownDelay;
+
+ // Check if read position must be wrapped
+ while (aecm->farBufReadPos + readLen > FAR_BUF_LEN) {
+ // Read from remaining buffer space before wrapping
+ readLen = FAR_BUF_LEN - aecm->farBufReadPos;
+ memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
+ sizeof(int16_t) * readLen);
+ aecm->farBufReadPos = 0;
+ readPos = readLen;
+ readLen = farLen - readLen;
+ }
+ memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
+ sizeof(int16_t) * readLen);
+ aecm->farBufReadPos += readLen;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.h b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.h
new file mode 100644
index 0000000000..3de49315c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.h
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs echo control (suppression) with fft routines in fixed-point.
+
+#ifndef MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_
+#define MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aecm/aecm_defines.h"
+
+struct RealFFT;
+
+namespace webrtc {
+
+#ifdef _MSC_VER // visual c++
+#define ALIGN8_BEG __declspec(align(8))
+#define ALIGN8_END
+#else // gcc or icc
+#define ALIGN8_BEG
+#define ALIGN8_END __attribute__((aligned(8)))
+#endif
+
+typedef struct {
+ int16_t real;
+ int16_t imag;
+} ComplexInt16;
+
+typedef struct {
+ int farBufWritePos;
+ int farBufReadPos;
+ int knownDelay;
+ int lastKnownDelay;
+ int firstVAD; // Parameter to control poorly initialized channels
+
+ RingBuffer* farFrameBuf;
+ RingBuffer* nearNoisyFrameBuf;
+ RingBuffer* nearCleanFrameBuf;
+ RingBuffer* outFrameBuf;
+
+ int16_t farBuf[FAR_BUF_LEN];
+
+ int16_t mult;
+ uint32_t seed;
+
+ // Delay estimation variables
+ void* delay_estimator_farend;
+ void* delay_estimator;
+ uint16_t currentDelay;
+ // Far end history variables
+ // TODO(bjornv): Replace `far_history` with ring_buffer.
+ uint16_t far_history[PART_LEN1 * MAX_DELAY];
+ int far_history_pos;
+ int far_q_domains[MAX_DELAY];
+
+ int16_t nlpFlag;
+ int16_t fixedDelay;
+
+ uint32_t totCount;
+
+ int16_t dfaCleanQDomain;
+ int16_t dfaCleanQDomainOld;
+ int16_t dfaNoisyQDomain;
+ int16_t dfaNoisyQDomainOld;
+
+ int16_t nearLogEnergy[MAX_BUF_LEN];
+ int16_t farLogEnergy;
+ int16_t echoAdaptLogEnergy[MAX_BUF_LEN];
+ int16_t echoStoredLogEnergy[MAX_BUF_LEN];
+
+ // The extra 16 or 32 bytes in the following buffers are for alignment based
+ // Neon code.
+ // It's designed this way since the current GCC compiler can't align a
+ // buffer in 16 or 32 byte boundaries properly.
+ int16_t channelStored_buf[PART_LEN1 + 8];
+ int16_t channelAdapt16_buf[PART_LEN1 + 8];
+ int32_t channelAdapt32_buf[PART_LEN1 + 8];
+ int16_t xBuf_buf[PART_LEN2 + 16]; // farend
+ int16_t dBufClean_buf[PART_LEN2 + 16]; // nearend
+ int16_t dBufNoisy_buf[PART_LEN2 + 16]; // nearend
+ int16_t outBuf_buf[PART_LEN + 8];
+
+ // Pointers to the above buffers
+ int16_t* channelStored;
+ int16_t* channelAdapt16;
+ int32_t* channelAdapt32;
+ int16_t* xBuf;
+ int16_t* dBufClean;
+ int16_t* dBufNoisy;
+ int16_t* outBuf;
+
+ int32_t echoFilt[PART_LEN1];
+ int16_t nearFilt[PART_LEN1];
+ int32_t noiseEst[PART_LEN1];
+ int noiseEstTooLowCtr[PART_LEN1];
+ int noiseEstTooHighCtr[PART_LEN1];
+ int16_t noiseEstCtr;
+ int16_t cngMode;
+
+ int32_t mseAdaptOld;
+ int32_t mseStoredOld;
+ int32_t mseThreshold;
+
+ int16_t farEnergyMin;
+ int16_t farEnergyMax;
+ int16_t farEnergyMaxMin;
+ int16_t farEnergyVAD;
+ int16_t farEnergyMSE;
+ int currentVADValue;
+ int16_t vadUpdateCount;
+
+ int16_t startupState;
+ int16_t mseChannelCount;
+ int16_t supGain;
+ int16_t supGainOld;
+
+ int16_t supGainErrParamA;
+ int16_t supGainErrParamD;
+ int16_t supGainErrParamDiffAB;
+ int16_t supGainErrParamDiffBD;
+
+ struct RealFFT* real_fft;
+
+#ifdef AEC_DEBUG
+ FILE* farFile;
+ FILE* nearFile;
+ FILE* outFile;
+#endif
+} AecmCore;
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CreateCore()
+//
+// Allocates the memory needed by the AECM. The memory needs to be
+// initialized separately using the WebRtcAecm_InitCore() function.
+// Returns a pointer to the instance and a nullptr at failure.
+AecmCore* WebRtcAecm_CreateCore();
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_InitCore(...)
+//
+// This function initializes the AECM instant created with
+// WebRtcAecm_CreateCore()
+// Input:
+// - aecm : Pointer to the AECM instance
+// - samplingFreq : Sampling Frequency
+//
+// Output:
+// - aecm : Initialized instance
+//
+// Return value : 0 - Ok
+// -1 - Error
+//
+int WebRtcAecm_InitCore(AecmCore* const aecm, int samplingFreq);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_FreeCore(...)
+//
+// This function releases the memory allocated by WebRtcAecm_CreateCore()
+// Input:
+// - aecm : Pointer to the AECM instance
+//
+void WebRtcAecm_FreeCore(AecmCore* aecm);
+
+int WebRtcAecm_Control(AecmCore* aecm, int delay, int nlpFlag);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_InitEchoPathCore(...)
+//
+// This function resets the echo channel adaptation with the specified channel.
+// Input:
+// - aecm : Pointer to the AECM instance
+// - echo_path : Pointer to the data that should initialize the echo
+// path
+//
+// Output:
+// - aecm : Initialized instance
+//
+void WebRtcAecm_InitEchoPathCore(AecmCore* aecm, const int16_t* echo_path);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_ProcessFrame(...)
+//
+// This function processes frames and sends blocks to
+// WebRtcAecm_ProcessBlock(...)
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance
+// - farend : In buffer containing one frame of echo signal
+// - nearendNoisy : In buffer containing one frame of nearend+echo signal
+// without NS
+// - nearendClean : In buffer containing one frame of nearend+echo signal
+// with NS
+//
+// Output:
+// - out : Out buffer, one frame of nearend signal :
+//
+//
+int WebRtcAecm_ProcessFrame(AecmCore* aecm,
+ const int16_t* farend,
+ const int16_t* nearendNoisy,
+ const int16_t* nearendClean,
+ int16_t* out);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_ProcessBlock(...)
+//
+// This function is called for every block within one frame
+// This function is called by WebRtcAecm_ProcessFrame(...)
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance
+// - farend : In buffer containing one block of echo signal
+// - nearendNoisy : In buffer containing one frame of nearend+echo signal
+// without NS
+// - nearendClean : In buffer containing one frame of nearend+echo signal
+// with NS
+//
+// Output:
+// - out : Out buffer, one block of nearend signal :
+//
+//
+int WebRtcAecm_ProcessBlock(AecmCore* aecm,
+ const int16_t* farend,
+ const int16_t* nearendNoisy,
+ const int16_t* noisyClean,
+ int16_t* out);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_BufferFarFrame()
+//
+// Inserts a frame of data into farend buffer.
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance
+// - farend : In buffer containing one frame of farend signal
+// - farLen : Length of frame
+//
+void WebRtcAecm_BufferFarFrame(AecmCore* const aecm,
+ const int16_t* const farend,
+ int farLen);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_FetchFarFrame()
+//
+// Read the farend buffer to account for known delay
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance
+// - farend : In buffer containing one frame of farend signal
+// - farLen : Length of frame
+// - knownDelay : known delay
+//
+void WebRtcAecm_FetchFarFrame(AecmCore* const aecm,
+ int16_t* const farend,
+ int farLen,
+ int knownDelay);
+
+// All the functions below are intended to be private
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_UpdateFarHistory()
+//
+// Moves the pointer to the next entry and inserts `far_spectrum` and
+// corresponding Q-domain in its buffer.
+//
+// Inputs:
+// - self : Pointer to the delay estimation instance
+// - far_spectrum : Pointer to the far end spectrum
+// - far_q : Q-domain of far end spectrum
+//
+void WebRtcAecm_UpdateFarHistory(AecmCore* self,
+ uint16_t* far_spectrum,
+ int far_q);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_AlignedFarend()
+//
+// Returns a pointer to the far end spectrum aligned to current near end
+// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been
+// called before AlignedFarend(...). Otherwise, you get the pointer to the
+// previous frame. The memory is only valid until the next call of
+// WebRtc_DelayEstimatorProcessFix(...).
+//
+// Inputs:
+// - self : Pointer to the AECM instance.
+// - delay : Current delay estimate.
+//
+// Output:
+// - far_q : The Q-domain of the aligned far end spectrum
+//
+// Return value:
+// - far_spectrum : Pointer to the aligned far end spectrum
+// NULL - Error
+//
+const uint16_t* WebRtcAecm_AlignedFarend(AecmCore* self, int* far_q, int delay);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CalcSuppressionGain()
+//
+// This function calculates the suppression gain that is used in the
+// Wiener filter.
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance.
+//
+// Return value:
+// - supGain : Suppression gain with which to scale the noise
+// level (Q14).
+//
+int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CalcEnergies()
+//
+// This function calculates the log of energies for nearend, farend and
+// estimated echoes. There is also an update of energy decision levels,
+// i.e. internal VAD.
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance.
+// - far_spectrum : Pointer to farend spectrum.
+// - far_q : Q-domain of farend spectrum.
+// - nearEner : Near end energy for current block in
+// Q(aecm->dfaQDomain).
+//
+// Output:
+// - echoEst : Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_CalcEnergies(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int16_t far_q,
+ uint32_t nearEner,
+ int32_t* echoEst);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CalcStepSize()
+//
+// This function calculates the step size used in channel estimation
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance.
+//
+// Return value:
+// - mu : Stepsize in log2(), i.e. number of shifts.
+//
+int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_UpdateChannel(...)
+//
+// This function performs channel estimation.
+// NLMS and decision on channel storage.
+//
+// Inputs:
+// - aecm : Pointer to the AECM instance.
+// - far_spectrum : Absolute value of the farend signal in Q(far_q)
+// - far_q : Q-domain of the farend signal
+// - dfa : Absolute value of the nearend signal
+// (Q[aecm->dfaQDomain])
+// - mu : NLMS step size.
+// Input/Output:
+// - echoEst : Estimated echo in Q(far_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_UpdateChannel(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int16_t far_q,
+ const uint16_t* const dfa,
+ int16_t mu,
+ int32_t* echoEst);
+
+extern const int16_t WebRtcAecm_kCosTable[];
+extern const int16_t WebRtcAecm_kSinTable[];
+
+///////////////////////////////////////////////////////////////////////////////
+// Some function pointers, for internal functions shared by ARM NEON and
+// generic C code.
+//
+typedef void (*CalcLinearEnergies)(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echoEst,
+ uint32_t* far_energy,
+ uint32_t* echo_energy_adapt,
+ uint32_t* echo_energy_stored);
+extern CalcLinearEnergies WebRtcAecm_CalcLinearEnergies;
+
+typedef void (*StoreAdaptiveChannel)(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est);
+extern StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel;
+
+typedef void (*ResetAdaptiveChannel)(AecmCore* aecm);
+extern ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel;
+
+// For the above function pointers, functions for generic platforms are declared
+// and defined as static in file aecm_core.c, while those for ARM Neon platforms
+// are declared below and defined in file aecm_core_neon.c.
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est,
+ uint32_t* far_energy,
+ uint32_t* echo_energy_adapt,
+ uint32_t* echo_energy_stored);
+
+void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est);
+
+void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm);
+#endif
+
+#if defined(MIPS32_LE)
+void WebRtcAecm_CalcLinearEnergies_mips(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est,
+ uint32_t* far_energy,
+ uint32_t* echo_energy_adapt,
+ uint32_t* echo_energy_stored);
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcAecm_StoreAdaptiveChannel_mips(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est);
+
+void WebRtcAecm_ResetAdaptiveChannel_mips(AecmCore* aecm);
+#endif
+#endif
+
+} // namespace webrtc
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_c.cc b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_c.cc
new file mode 100644
index 0000000000..d363dd2cfd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_c.cc
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/real_fft.h"
+}
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+extern "C" {
+#include "system_wrappers/include/cpu_features_wrapper.h"
+}
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/sanitizer.h"
+
+namespace webrtc {
+
+namespace {
+
+// Square root of Hanning window in Q14.
+static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = {
+ 0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172, 3562, 3951,
+ 4337, 4720, 5101, 5478, 5853, 6224, 6591, 6954, 7313, 7668, 8019,
+ 8364, 8705, 9040, 9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514,
+ 11795, 12068, 12335, 12594, 12845, 13089, 13325, 13553, 13773, 13985, 14189,
+ 14384, 14571, 14749, 14918, 15079, 15231, 15373, 15506, 15631, 15746, 15851,
+ 15947, 16034, 16111, 16179, 16237, 16286, 16325, 16354, 16373, 16384};
+
+#ifdef AECM_WITH_ABS_APPROX
+// Q15 alpha = 0.99439986968132 const Factor for magnitude approximation
+static const uint16_t kAlpha1 = 32584;
+// Q15 beta = 0.12967166976970 const Factor for magnitude approximation
+static const uint16_t kBeta1 = 4249;
+// Q15 alpha = 0.94234827210087 const Factor for magnitude approximation
+static const uint16_t kAlpha2 = 30879;
+// Q15 beta = 0.33787806009150 const Factor for magnitude approximation
+static const uint16_t kBeta2 = 11072;
+// Q15 alpha = 0.82247698684306 const Factor for magnitude approximation
+static const uint16_t kAlpha3 = 26951;
+// Q15 beta = 0.57762063060713 const Factor for magnitude approximation
+static const uint16_t kBeta3 = 18927;
+#endif
+
+static const int16_t kNoiseEstQDomain = 15;
+static const int16_t kNoiseEstIncCount = 5;
+
+static void ComfortNoise(AecmCore* aecm,
+ const uint16_t* dfa,
+ ComplexInt16* out,
+ const int16_t* lambda) {
+ int16_t i;
+ int16_t tmp16;
+ int32_t tmp32;
+
+ int16_t randW16[PART_LEN];
+ int16_t uReal[PART_LEN1];
+ int16_t uImag[PART_LEN1];
+ int32_t outLShift32;
+ int16_t noiseRShift16[PART_LEN1];
+
+ int16_t shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain;
+ int16_t minTrackShift;
+
+ RTC_DCHECK_GE(shiftFromNearToNoise, 0);
+ RTC_DCHECK_LT(shiftFromNearToNoise, 16);
+
+ if (aecm->noiseEstCtr < 100) {
+ // Track the minimum more quickly initially.
+ aecm->noiseEstCtr++;
+ minTrackShift = 6;
+ } else {
+ minTrackShift = 9;
+ }
+
+ // Estimate noise power.
+ for (i = 0; i < PART_LEN1; i++) {
+ // Shift to the noise domain.
+ tmp32 = (int32_t)dfa[i];
+ outLShift32 = tmp32 << shiftFromNearToNoise;
+
+ if (outLShift32 < aecm->noiseEst[i]) {
+ // Reset "too low" counter
+ aecm->noiseEstTooLowCtr[i] = 0;
+ // Track the minimum.
+ if (aecm->noiseEst[i] < (1 << minTrackShift)) {
+ // For small values, decrease noiseEst[i] every
+ // `kNoiseEstIncCount` block. The regular approach below can not
+ // go further down due to truncation.
+ aecm->noiseEstTooHighCtr[i]++;
+ if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) {
+ aecm->noiseEst[i]--;
+ aecm->noiseEstTooHighCtr[i] = 0; // Reset the counter
+ }
+ } else {
+ aecm->noiseEst[i] -=
+ ((aecm->noiseEst[i] - outLShift32) >> minTrackShift);
+ }
+ } else {
+ // Reset "too high" counter
+ aecm->noiseEstTooHighCtr[i] = 0;
+ // Ramp slowly upwards until we hit the minimum again.
+ if ((aecm->noiseEst[i] >> 19) > 0) {
+ // Avoid overflow.
+ // Multiplication with 2049 will cause wrap around. Scale
+ // down first and then multiply
+ aecm->noiseEst[i] >>= 11;
+ aecm->noiseEst[i] *= 2049;
+ } else if ((aecm->noiseEst[i] >> 11) > 0) {
+ // Large enough for relative increase
+ aecm->noiseEst[i] *= 2049;
+ aecm->noiseEst[i] >>= 11;
+ } else {
+ // Make incremental increases based on size every
+ // `kNoiseEstIncCount` block
+ aecm->noiseEstTooLowCtr[i]++;
+ if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) {
+ aecm->noiseEst[i] += (aecm->noiseEst[i] >> 9) + 1;
+ aecm->noiseEstTooLowCtr[i] = 0; // Reset counter
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < PART_LEN1; i++) {
+ tmp32 = aecm->noiseEst[i] >> shiftFromNearToNoise;
+ if (tmp32 > 32767) {
+ tmp32 = 32767;
+ aecm->noiseEst[i] = tmp32 << shiftFromNearToNoise;
+ }
+ noiseRShift16[i] = (int16_t)tmp32;
+
+ tmp16 = ONE_Q14 - lambda[i];
+ noiseRShift16[i] = (int16_t)((tmp16 * noiseRShift16[i]) >> 14);
+ }
+
+ // Generate a uniform random array on [0 2^15-1].
+ WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed);
+
+ // Generate noise according to estimated energy.
+ uReal[0] = 0; // Reject LF noise.
+ uImag[0] = 0;
+ for (i = 1; i < PART_LEN1; i++) {
+ // Get a random index for the cos and sin tables over [0 359].
+ tmp16 = (int16_t)((359 * randW16[i - 1]) >> 15);
+
+ // Tables are in Q13.
+ uReal[i] =
+ (int16_t)((noiseRShift16[i] * WebRtcAecm_kCosTable[tmp16]) >> 13);
+ uImag[i] =
+ (int16_t)((-noiseRShift16[i] * WebRtcAecm_kSinTable[tmp16]) >> 13);
+ }
+ uImag[PART_LEN] = 0;
+
+ for (i = 0; i < PART_LEN1; i++) {
+ out[i].real = WebRtcSpl_AddSatW16(out[i].real, uReal[i]);
+ out[i].imag = WebRtcSpl_AddSatW16(out[i].imag, uImag[i]);
+ }
+}
+
+static void WindowAndFFT(AecmCore* aecm,
+ int16_t* fft,
+ const int16_t* time_signal,
+ ComplexInt16* freq_signal,
+ int time_signal_scaling) {
+ int i = 0;
+
+ // FFT of signal
+ for (i = 0; i < PART_LEN; i++) {
+ // Window time domain signal and insert into real part of
+ // transformation array `fft`
+ int16_t scaled_time_signal = time_signal[i] * (1 << time_signal_scaling);
+ fft[i] = (int16_t)((scaled_time_signal * WebRtcAecm_kSqrtHanning[i]) >> 14);
+ scaled_time_signal = time_signal[i + PART_LEN] * (1 << time_signal_scaling);
+ fft[PART_LEN + i] = (int16_t)(
+ (scaled_time_signal * WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14);
+ }
+
+ // Do forward FFT, then take only the first PART_LEN complex samples,
+ // and change signs of the imaginary parts.
+ WebRtcSpl_RealForwardFFT(aecm->real_fft, fft, (int16_t*)freq_signal);
+ for (i = 0; i < PART_LEN; i++) {
+ freq_signal[i].imag = -freq_signal[i].imag;
+ }
+}
+
+static void InverseFFTAndWindow(AecmCore* aecm,
+ int16_t* fft,
+ ComplexInt16* efw,
+ int16_t* output,
+ const int16_t* nearendClean) {
+ int i, j, outCFFT;
+ int32_t tmp32no1;
+ // Reuse `efw` for the inverse FFT output after transferring
+ // the contents to `fft`.
+ int16_t* ifft_out = (int16_t*)efw;
+
+ // Synthesis
+ for (i = 1, j = 2; i < PART_LEN; i += 1, j += 2) {
+ fft[j] = efw[i].real;
+ fft[j + 1] = -efw[i].imag;
+ }
+ fft[0] = efw[0].real;
+ fft[1] = -efw[0].imag;
+
+ fft[PART_LEN2] = efw[PART_LEN].real;
+ fft[PART_LEN2 + 1] = -efw[PART_LEN].imag;
+
+ // Inverse FFT. Keep outCFFT to scale the samples in the next block.
+ outCFFT = WebRtcSpl_RealInverseFFT(aecm->real_fft, fft, ifft_out);
+ for (i = 0; i < PART_LEN; i++) {
+ ifft_out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+ ifft_out[i], WebRtcAecm_kSqrtHanning[i], 14);
+ tmp32no1 = WEBRTC_SPL_SHIFT_W32((int32_t)ifft_out[i],
+ outCFFT - aecm->dfaCleanQDomain);
+ output[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+ tmp32no1 + aecm->outBuf[i],
+ WEBRTC_SPL_WORD16_MIN);
+
+ tmp32no1 =
+ (ifft_out[PART_LEN + i] * WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14;
+ tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, outCFFT - aecm->dfaCleanQDomain);
+ aecm->outBuf[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, tmp32no1,
+ WEBRTC_SPL_WORD16_MIN);
+ }
+
+ // Copy the current block to the old position
+ // (aecm->outBuf is shifted elsewhere)
+ memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(int16_t) * PART_LEN);
+ memcpy(aecm->dBufNoisy, aecm->dBufNoisy + PART_LEN,
+ sizeof(int16_t) * PART_LEN);
+ if (nearendClean != NULL) {
+ memcpy(aecm->dBufClean, aecm->dBufClean + PART_LEN,
+ sizeof(int16_t) * PART_LEN);
+ }
+}
+
+// Transforms a time domain signal into the frequency domain, outputting the
+// complex valued signal, absolute value and sum of absolute values.
+//
+// time_signal [in] Pointer to time domain signal
+// freq_signal_real [out] Pointer to real part of frequency domain array
+// freq_signal_imag [out] Pointer to imaginary part of frequency domain
+// array
+// freq_signal_abs [out] Pointer to absolute value of frequency domain
+// array
+// freq_signal_sum_abs [out] Pointer to the sum of all absolute values in
+// the frequency domain array
+// return value The Q-domain of current frequency values
+//
+static int TimeToFrequencyDomain(AecmCore* aecm,
+ const int16_t* time_signal,
+ ComplexInt16* freq_signal,
+ uint16_t* freq_signal_abs,
+ uint32_t* freq_signal_sum_abs) {
+ int i = 0;
+ int time_signal_scaling = 0;
+
+ int32_t tmp32no1 = 0;
+ int32_t tmp32no2 = 0;
+
+ // In fft_buf, +16 for 32-byte alignment.
+ int16_t fft_buf[PART_LEN4 + 16];
+ int16_t* fft = (int16_t*)(((uintptr_t)fft_buf + 31) & ~31);
+
+ int16_t tmp16no1;
+#ifndef WEBRTC_ARCH_ARM_V7
+ int16_t tmp16no2;
+#endif
+#ifdef AECM_WITH_ABS_APPROX
+ int16_t max_value = 0;
+ int16_t min_value = 0;
+ uint16_t alpha = 0;
+ uint16_t beta = 0;
+#endif
+
+#ifdef AECM_DYNAMIC_Q
+ tmp16no1 = WebRtcSpl_MaxAbsValueW16(time_signal, PART_LEN2);
+ time_signal_scaling = WebRtcSpl_NormW16(tmp16no1);
+#endif
+
+ WindowAndFFT(aecm, fft, time_signal, freq_signal, time_signal_scaling);
+
+ // Extract imaginary and real part, calculate the magnitude for
+ // all frequency bins
+ freq_signal[0].imag = 0;
+ freq_signal[PART_LEN].imag = 0;
+ freq_signal_abs[0] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[0].real);
+ freq_signal_abs[PART_LEN] =
+ (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[PART_LEN].real);
+ (*freq_signal_sum_abs) =
+ (uint32_t)(freq_signal_abs[0]) + (uint32_t)(freq_signal_abs[PART_LEN]);
+
+ for (i = 1; i < PART_LEN; i++) {
+ if (freq_signal[i].real == 0) {
+ freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+ } else if (freq_signal[i].imag == 0) {
+ freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+ } else {
+ // Approximation for magnitude of complex fft output
+ // magn = sqrt(real^2 + imag^2)
+ // magn ~= alpha * max(`imag`,`real`) + beta * min(`imag`,`real`)
+ //
+ // The parameters alpha and beta are stored in Q15
+
+#ifdef AECM_WITH_ABS_APPROX
+ tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+ tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+
+ if (tmp16no1 > tmp16no2) {
+ max_value = tmp16no1;
+ min_value = tmp16no2;
+ } else {
+ max_value = tmp16no2;
+ min_value = tmp16no1;
+ }
+
+ // Magnitude in Q(-6)
+ if ((max_value >> 2) > min_value) {
+ alpha = kAlpha1;
+ beta = kBeta1;
+ } else if ((max_value >> 1) > min_value) {
+ alpha = kAlpha2;
+ beta = kBeta2;
+ } else {
+ alpha = kAlpha3;
+ beta = kBeta3;
+ }
+ tmp16no1 = (int16_t)((max_value * alpha) >> 15);
+ tmp16no2 = (int16_t)((min_value * beta) >> 15);
+ freq_signal_abs[i] = (uint16_t)tmp16no1 + (uint16_t)tmp16no2;
+#else
+#ifdef WEBRTC_ARCH_ARM_V7
+ __asm __volatile(
+ "smulbb %[tmp32no1], %[real], %[real]\n\t"
+ "smlabb %[tmp32no2], %[imag], %[imag], %[tmp32no1]\n\t"
+ : [tmp32no1] "+&r"(tmp32no1), [tmp32no2] "=r"(tmp32no2)
+ : [real] "r"(freq_signal[i].real), [imag] "r"(freq_signal[i].imag));
+#else
+ tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+ tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+ tmp32no1 = tmp16no1 * tmp16no1;
+ tmp32no2 = tmp16no2 * tmp16no2;
+ tmp32no2 = WebRtcSpl_AddSatW32(tmp32no1, tmp32no2);
+#endif // WEBRTC_ARCH_ARM_V7
+ tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2);
+
+ freq_signal_abs[i] = (uint16_t)tmp32no1;
+#endif // AECM_WITH_ABS_APPROX
+ }
+ (*freq_signal_sum_abs) += (uint32_t)freq_signal_abs[i];
+ }
+
+ return time_signal_scaling;
+}
+
+} // namespace
+
+int RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/8200
+ WebRtcAecm_ProcessBlock(AecmCore* aecm,
+ const int16_t* farend,
+ const int16_t* nearendNoisy,
+ const int16_t* nearendClean,
+ int16_t* output) {
+ int i;
+
+ uint32_t xfaSum;
+ uint32_t dfaNoisySum;
+ uint32_t dfaCleanSum;
+ uint32_t echoEst32Gained;
+ uint32_t tmpU32;
+
+ int32_t tmp32no1;
+
+ uint16_t xfa[PART_LEN1];
+ uint16_t dfaNoisy[PART_LEN1];
+ uint16_t dfaClean[PART_LEN1];
+ uint16_t* ptrDfaClean = dfaClean;
+ const uint16_t* far_spectrum_ptr = NULL;
+
+ // 32 byte aligned buffers (with +8 or +16).
+ // TODO(kma): define fft with ComplexInt16.
+ int16_t fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe.
+ int32_t echoEst32_buf[PART_LEN1 + 8];
+ int32_t dfw_buf[PART_LEN2 + 8];
+ int32_t efw_buf[PART_LEN2 + 8];
+
+ int16_t* fft = (int16_t*)(((uintptr_t)fft_buf + 31) & ~31);
+ int32_t* echoEst32 = (int32_t*)(((uintptr_t)echoEst32_buf + 31) & ~31);
+ ComplexInt16* dfw = (ComplexInt16*)(((uintptr_t)dfw_buf + 31) & ~31);
+ ComplexInt16* efw = (ComplexInt16*)(((uintptr_t)efw_buf + 31) & ~31);
+
+ int16_t hnl[PART_LEN1];
+ int16_t numPosCoef = 0;
+ int16_t nlpGain = ONE_Q14;
+ int delay;
+ int16_t tmp16no1;
+ int16_t tmp16no2;
+ int16_t mu;
+ int16_t supGain;
+ int16_t zeros32, zeros16;
+ int16_t zerosDBufNoisy, zerosDBufClean, zerosXBuf;
+ int far_q;
+ int16_t resolutionDiff, qDomainDiff, dfa_clean_q_domain_diff;
+
+ const int kMinPrefBand = 4;
+ const int kMaxPrefBand = 24;
+ int32_t avgHnl32 = 0;
+
+ // Determine startup state. There are three states:
+ // (0) the first CONV_LEN blocks
+ // (1) another CONV_LEN blocks
+ // (2) the rest
+
+ if (aecm->startupState < 2) {
+ aecm->startupState =
+ (aecm->totCount >= CONV_LEN) + (aecm->totCount >= CONV_LEN2);
+ }
+ // END: Determine startup state
+
+ // Buffer near and far end signals
+ memcpy(aecm->xBuf + PART_LEN, farend, sizeof(int16_t) * PART_LEN);
+ memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(int16_t) * PART_LEN);
+ if (nearendClean != NULL) {
+ memcpy(aecm->dBufClean + PART_LEN, nearendClean,
+ sizeof(int16_t) * PART_LEN);
+ }
+
+ // Transform far end signal from time domain to frequency domain.
+ far_q = TimeToFrequencyDomain(aecm, aecm->xBuf, dfw, xfa, &xfaSum);
+
+ // Transform noisy near end signal from time domain to frequency domain.
+ zerosDBufNoisy =
+ TimeToFrequencyDomain(aecm, aecm->dBufNoisy, dfw, dfaNoisy, &dfaNoisySum);
+ aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain;
+ aecm->dfaNoisyQDomain = (int16_t)zerosDBufNoisy;
+
+ if (nearendClean == NULL) {
+ ptrDfaClean = dfaNoisy;
+ aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld;
+ aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain;
+ dfaCleanSum = dfaNoisySum;
+ } else {
+ // Transform clean near end signal from time domain to frequency domain.
+ zerosDBufClean = TimeToFrequencyDomain(aecm, aecm->dBufClean, dfw, dfaClean,
+ &dfaCleanSum);
+ aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain;
+ aecm->dfaCleanQDomain = (int16_t)zerosDBufClean;
+ }
+
+ // Get the delay
+ // Save far-end history and estimate delay
+ WebRtcAecm_UpdateFarHistory(aecm, xfa, far_q);
+ if (WebRtc_AddFarSpectrumFix(aecm->delay_estimator_farend, xfa, PART_LEN1,
+ far_q) == -1) {
+ return -1;
+ }
+ delay = WebRtc_DelayEstimatorProcessFix(aecm->delay_estimator, dfaNoisy,
+ PART_LEN1, zerosDBufNoisy);
+ if (delay == -1) {
+ return -1;
+ } else if (delay == -2) {
+ // If the delay is unknown, we assume zero.
+ // NOTE: this will have to be adjusted if we ever add lookahead.
+ delay = 0;
+ }
+
+ if (aecm->fixedDelay >= 0) {
+ // Use fixed delay
+ delay = aecm->fixedDelay;
+ }
+
+ // Get aligned far end spectrum
+ far_spectrum_ptr = WebRtcAecm_AlignedFarend(aecm, &far_q, delay);
+ zerosXBuf = (int16_t)far_q;
+ if (far_spectrum_ptr == NULL) {
+ return -1;
+ }
+
+ // Calculate log(energy) and update energy threshold levels
+ WebRtcAecm_CalcEnergies(aecm, far_spectrum_ptr, zerosXBuf, dfaNoisySum,
+ echoEst32);
+
+ // Calculate stepsize
+ mu = WebRtcAecm_CalcStepSize(aecm);
+
+ // Update counters
+ aecm->totCount++;
+
+ // This is the channel estimation algorithm.
+ // It is base on NLMS but has a variable step length,
+ // which was calculated above.
+ WebRtcAecm_UpdateChannel(aecm, far_spectrum_ptr, zerosXBuf, dfaNoisy, mu,
+ echoEst32);
+ supGain = WebRtcAecm_CalcSuppressionGain(aecm);
+
+ // Calculate Wiener filter hnl[]
+ for (i = 0; i < PART_LEN1; i++) {
+ // Far end signal through channel estimate in Q8
+ // How much can we shift right to preserve resolution
+ tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
+ aecm->echoFilt[i] +=
+ rtc::dchecked_cast<int32_t>((int64_t{tmp32no1} * 50) >> 8);
+
+ zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
+ zeros16 = WebRtcSpl_NormW16(supGain) + 1;
+ if (zeros32 + zeros16 > 16) {
+ // Multiplication is safe
+ // Result in
+ // Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+
+ // aecm->xfaQDomainBuf[diff])
+ echoEst32Gained =
+ WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], (uint16_t)supGain);
+ resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+ resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+ } else {
+ tmp16no1 = 17 - zeros32 - zeros16;
+ resolutionDiff =
+ 14 + tmp16no1 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+ resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+ if (zeros32 > tmp16no1) {
+ echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i],
+ supGain >> tmp16no1);
+ } else {
+ // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
+ echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain;
+ }
+ }
+
+ zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
+ RTC_DCHECK_GE(zeros16, 0); // `zeros16` is a norm, hence non-negative.
+ dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld;
+ if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) {
+ tmp16no1 = aecm->nearFilt[i] * (1 << zeros16);
+ qDomainDiff = zeros16 - dfa_clean_q_domain_diff;
+ tmp16no2 = ptrDfaClean[i] >> -qDomainDiff;
+ } else {
+ tmp16no1 = dfa_clean_q_domain_diff < 0
+ ? aecm->nearFilt[i] >> -dfa_clean_q_domain_diff
+ : aecm->nearFilt[i] * (1 << dfa_clean_q_domain_diff);
+ qDomainDiff = 0;
+ tmp16no2 = ptrDfaClean[i];
+ }
+ tmp32no1 = (int32_t)(tmp16no2 - tmp16no1);
+ tmp16no2 = (int16_t)(tmp32no1 >> 4);
+ tmp16no2 += tmp16no1;
+ zeros16 = WebRtcSpl_NormW16(tmp16no2);
+ if ((tmp16no2) & (-qDomainDiff > zeros16)) {
+ aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX;
+ } else {
+ aecm->nearFilt[i] = qDomainDiff < 0 ? tmp16no2 * (1 << -qDomainDiff)
+ : tmp16no2 >> qDomainDiff;
+ }
+
+ // Wiener filter coefficients, resulting hnl in Q14
+ if (echoEst32Gained == 0) {
+ hnl[i] = ONE_Q14;
+ } else if (aecm->nearFilt[i] == 0) {
+ hnl[i] = 0;
+ } else {
+ // Multiply the suppression gain
+ // Rounding
+ echoEst32Gained += (uint32_t)(aecm->nearFilt[i] >> 1);
+ tmpU32 =
+ WebRtcSpl_DivU32U16(echoEst32Gained, (uint16_t)aecm->nearFilt[i]);
+
+ // Current resolution is
+ // Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN- max(0,17-zeros16- zeros32))
+ // Make sure we are in Q14
+ tmp32no1 = (int32_t)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff);
+ if (tmp32no1 > ONE_Q14) {
+ hnl[i] = 0;
+ } else if (tmp32no1 < 0) {
+ hnl[i] = ONE_Q14;
+ } else {
+ // 1-echoEst/dfa
+ hnl[i] = ONE_Q14 - (int16_t)tmp32no1;
+ if (hnl[i] < 0) {
+ hnl[i] = 0;
+ }
+ }
+ }
+ if (hnl[i]) {
+ numPosCoef++;
+ }
+ }
+ // Only in wideband. Prevent the gain in upper band from being larger than
+ // in lower band.
+ if (aecm->mult == 2) {
+ // TODO(bjornv): Investigate if the scaling of hnl[i] below can cause
+ // speech distortion in double-talk.
+ for (i = 0; i < PART_LEN1; i++) {
+ hnl[i] = (int16_t)((hnl[i] * hnl[i]) >> 14);
+ }
+
+ for (i = kMinPrefBand; i <= kMaxPrefBand; i++) {
+ avgHnl32 += (int32_t)hnl[i];
+ }
+ RTC_DCHECK_GT(kMaxPrefBand - kMinPrefBand + 1, 0);
+ avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1);
+
+ for (i = kMaxPrefBand; i < PART_LEN1; i++) {
+ if (hnl[i] > (int16_t)avgHnl32) {
+ hnl[i] = (int16_t)avgHnl32;
+ }
+ }
+ }
+
+ // Calculate NLP gain, result is in Q14
+ if (aecm->nlpFlag) {
+ for (i = 0; i < PART_LEN1; i++) {
+ // Truncate values close to zero and one.
+ if (hnl[i] > NLP_COMP_HIGH) {
+ hnl[i] = ONE_Q14;
+ } else if (hnl[i] < NLP_COMP_LOW) {
+ hnl[i] = 0;
+ }
+
+ // Remove outliers
+ if (numPosCoef < 3) {
+ nlpGain = 0;
+ } else {
+ nlpGain = ONE_Q14;
+ }
+
+ // NLP
+ if ((hnl[i] == ONE_Q14) && (nlpGain == ONE_Q14)) {
+ hnl[i] = ONE_Q14;
+ } else {
+ hnl[i] = (int16_t)((hnl[i] * nlpGain) >> 14);
+ }
+
+ // multiply with Wiener coefficients
+ efw[i].real = (int16_t)(
+ WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, hnl[i], 14));
+ efw[i].imag = (int16_t)(
+ WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, hnl[i], 14));
+ }
+ } else {
+ // multiply with Wiener coefficients
+ for (i = 0; i < PART_LEN1; i++) {
+ efw[i].real = (int16_t)(
+ WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, hnl[i], 14));
+ efw[i].imag = (int16_t)(
+ WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, hnl[i], 14));
+ }
+ }
+
+ if (aecm->cngMode == AecmTrue) {
+ ComfortNoise(aecm, ptrDfaClean, efw, hnl);
+ }
+
+ InverseFFTAndWindow(aecm, fft, efw, output, nearendClean);
+
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_gn/moz.build
new file mode 100644
index 0000000000..aec0342004
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_gn/moz.build
@@ -0,0 +1,222 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_c.cc",
+ "/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_neon.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_neon.cc"
+ ]
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aecm_core_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_mips.cc b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_mips.cc
new file mode 100644
index 0000000000..828aa6d2fb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_mips.cc
@@ -0,0 +1,1656 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = {
+ 0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172, 3562, 3951,
+ 4337, 4720, 5101, 5478, 5853, 6224, 6591, 6954, 7313, 7668, 8019,
+ 8364, 8705, 9040, 9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514,
+ 11795, 12068, 12335, 12594, 12845, 13089, 13325, 13553, 13773, 13985, 14189,
+ 14384, 14571, 14749, 14918, 15079, 15231, 15373, 15506, 15631, 15746, 15851,
+ 15947, 16034, 16111, 16179, 16237, 16286, 16325, 16354, 16373, 16384};
+
+static const int16_t kNoiseEstQDomain = 15;
+static const int16_t kNoiseEstIncCount = 5;
+
+static int16_t coefTable[] = {
+ 0, 4, 256, 260, 128, 132, 384, 388, 64, 68, 320, 324, 192, 196, 448,
+ 452, 32, 36, 288, 292, 160, 164, 416, 420, 96, 100, 352, 356, 224, 228,
+ 480, 484, 16, 20, 272, 276, 144, 148, 400, 404, 80, 84, 336, 340, 208,
+ 212, 464, 468, 48, 52, 304, 308, 176, 180, 432, 436, 112, 116, 368, 372,
+ 240, 244, 496, 500, 8, 12, 264, 268, 136, 140, 392, 396, 72, 76, 328,
+ 332, 200, 204, 456, 460, 40, 44, 296, 300, 168, 172, 424, 428, 104, 108,
+ 360, 364, 232, 236, 488, 492, 24, 28, 280, 284, 152, 156, 408, 412, 88,
+ 92, 344, 348, 216, 220, 472, 476, 56, 60, 312, 316, 184, 188, 440, 444,
+ 120, 124, 376, 380, 248, 252, 504, 508};
+
+static int16_t coefTable_ifft[] = {
+ 0, 512, 256, 508, 128, 252, 384, 380, 64, 124, 320, 444, 192, 188, 448,
+ 316, 32, 60, 288, 476, 160, 220, 416, 348, 96, 92, 352, 412, 224, 156,
+ 480, 284, 16, 28, 272, 492, 144, 236, 400, 364, 80, 108, 336, 428, 208,
+ 172, 464, 300, 48, 44, 304, 460, 176, 204, 432, 332, 112, 76, 368, 396,
+ 240, 140, 496, 268, 8, 12, 264, 500, 136, 244, 392, 372, 72, 116, 328,
+ 436, 200, 180, 456, 308, 40, 52, 296, 468, 168, 212, 424, 340, 104, 84,
+ 360, 404, 232, 148, 488, 276, 24, 20, 280, 484, 152, 228, 408, 356, 88,
+ 100, 344, 420, 216, 164, 472, 292, 56, 36, 312, 452, 184, 196, 440, 324,
+ 120, 68, 376, 388, 248, 132, 504, 260};
+
+} // namespace
+
+static void ComfortNoise(AecmCore* aecm,
+ const uint16_t* dfa,
+ ComplexInt16* out,
+ const int16_t* lambda);
+
+static void WindowAndFFT(AecmCore* aecm,
+ int16_t* fft,
+ const int16_t* time_signal,
+ ComplexInt16* freq_signal,
+ int time_signal_scaling) {
+ int i, j;
+ int32_t tmp1, tmp2, tmp3, tmp4;
+ int16_t* pfrfi;
+ ComplexInt16* pfreq_signal;
+ int16_t f_coef, s_coef;
+ int32_t load_ptr, store_ptr1, store_ptr2, shift, shift1;
+ int32_t hann, hann1, coefs;
+
+ memset(fft, 0, sizeof(int16_t) * PART_LEN4);
+
+ // FFT of signal
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift], %[time_signal_scaling], -14 \n\t"
+ "addiu %[i], $zero, 64 \n\t"
+ "addiu %[load_ptr], %[time_signal], 0 \n\t"
+ "addiu %[hann], %[hanning], 0 \n\t"
+ "addiu %[hann1], %[hanning], 128 \n\t"
+ "addiu %[coefs], %[coefTable], 0 \n\t"
+ "bltz %[shift], 2f \n\t"
+ " negu %[shift1], %[shift] \n\t"
+ "1: "
+ "\n\t"
+ "lh %[tmp1], 0(%[load_ptr]) \n\t"
+ "lh %[tmp2], 0(%[hann]) \n\t"
+ "lh %[tmp3], 128(%[load_ptr]) \n\t"
+ "lh %[tmp4], 0(%[hann1]) \n\t"
+ "addiu %[i], %[i], -1 \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp3], %[tmp3], %[tmp4] \n\t"
+ "lh %[f_coef], 0(%[coefs]) \n\t"
+ "lh %[s_coef], 2(%[coefs]) \n\t"
+ "addiu %[load_ptr], %[load_ptr], 2 \n\t"
+ "addiu %[hann], %[hann], 2 \n\t"
+ "addiu %[hann1], %[hann1], -2 \n\t"
+ "addu %[store_ptr1], %[fft], %[f_coef] \n\t"
+ "addu %[store_ptr2], %[fft], %[s_coef] \n\t"
+ "sllv %[tmp1], %[tmp1], %[shift] \n\t"
+ "sllv %[tmp3], %[tmp3], %[shift] \n\t"
+ "sh %[tmp1], 0(%[store_ptr1]) \n\t"
+ "sh %[tmp3], 0(%[store_ptr2]) \n\t"
+ "bgtz %[i], 1b \n\t"
+ " addiu %[coefs], %[coefs], 4 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: "
+ "\n\t"
+ "lh %[tmp1], 0(%[load_ptr]) \n\t"
+ "lh %[tmp2], 0(%[hann]) \n\t"
+ "lh %[tmp3], 128(%[load_ptr]) \n\t"
+ "lh %[tmp4], 0(%[hann1]) \n\t"
+ "addiu %[i], %[i], -1 \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp3], %[tmp3], %[tmp4] \n\t"
+ "lh %[f_coef], 0(%[coefs]) \n\t"
+ "lh %[s_coef], 2(%[coefs]) \n\t"
+ "addiu %[load_ptr], %[load_ptr], 2 \n\t"
+ "addiu %[hann], %[hann], 2 \n\t"
+ "addiu %[hann1], %[hann1], -2 \n\t"
+ "addu %[store_ptr1], %[fft], %[f_coef] \n\t"
+ "addu %[store_ptr2], %[fft], %[s_coef] \n\t"
+ "srav %[tmp1], %[tmp1], %[shift1] \n\t"
+ "srav %[tmp3], %[tmp3], %[shift1] \n\t"
+ "sh %[tmp1], 0(%[store_ptr1]) \n\t"
+ "sh %[tmp3], 0(%[store_ptr2]) \n\t"
+ "bgtz %[i], 2b \n\t"
+ " addiu %[coefs], %[coefs], 4 \n\t"
+ "3: "
+ "\n\t"
+ ".set pop \n\t"
+ : [load_ptr] "=&r"(load_ptr), [shift] "=&r"(shift), [hann] "=&r"(hann),
+ [hann1] "=&r"(hann1), [shift1] "=&r"(shift1), [coefs] "=&r"(coefs),
+ [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
+ [tmp4] "=&r"(tmp4), [i] "=&r"(i), [f_coef] "=&r"(f_coef),
+ [s_coef] "=&r"(s_coef), [store_ptr1] "=&r"(store_ptr1),
+ [store_ptr2] "=&r"(store_ptr2)
+ : [time_signal] "r"(time_signal), [coefTable] "r"(coefTable),
+ [time_signal_scaling] "r"(time_signal_scaling),
+ [hanning] "r"(WebRtcAecm_kSqrtHanning), [fft] "r"(fft)
+ : "memory", "hi", "lo");
+
+ WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
+ pfrfi = fft;
+ pfreq_signal = freq_signal;
+
+ __asm __volatile(
+ ".set push "
+ "\n\t"
+ ".set noreorder "
+ "\n\t"
+ "addiu %[j], $zero, 128 "
+ "\n\t"
+ "1: "
+ "\n\t"
+ "lh %[tmp1], 0(%[pfrfi]) "
+ "\n\t"
+ "lh %[tmp2], 2(%[pfrfi]) "
+ "\n\t"
+ "lh %[tmp3], 4(%[pfrfi]) "
+ "\n\t"
+ "lh %[tmp4], 6(%[pfrfi]) "
+ "\n\t"
+ "subu %[tmp2], $zero, %[tmp2] "
+ "\n\t"
+ "sh %[tmp1], 0(%[pfreq_signal]) "
+ "\n\t"
+ "sh %[tmp2], 2(%[pfreq_signal]) "
+ "\n\t"
+ "subu %[tmp4], $zero, %[tmp4] "
+ "\n\t"
+ "sh %[tmp3], 4(%[pfreq_signal]) "
+ "\n\t"
+ "sh %[tmp4], 6(%[pfreq_signal]) "
+ "\n\t"
+ "lh %[tmp1], 8(%[pfrfi]) "
+ "\n\t"
+ "lh %[tmp2], 10(%[pfrfi]) "
+ "\n\t"
+ "lh %[tmp3], 12(%[pfrfi]) "
+ "\n\t"
+ "lh %[tmp4], 14(%[pfrfi]) "
+ "\n\t"
+ "addiu %[j], %[j], -8 "
+ "\n\t"
+ "subu %[tmp2], $zero, %[tmp2] "
+ "\n\t"
+ "sh %[tmp1], 8(%[pfreq_signal]) "
+ "\n\t"
+ "sh %[tmp2], 10(%[pfreq_signal]) "
+ "\n\t"
+ "subu %[tmp4], $zero, %[tmp4] "
+ "\n\t"
+ "sh %[tmp3], 12(%[pfreq_signal]) "
+ "\n\t"
+ "sh %[tmp4], 14(%[pfreq_signal]) "
+ "\n\t"
+ "addiu %[pfreq_signal], %[pfreq_signal], 16 "
+ "\n\t"
+ "bgtz %[j], 1b "
+ "\n\t"
+ " addiu %[pfrfi], %[pfrfi], 16 "
+ "\n\t"
+ ".set pop "
+ "\n\t"
+ : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
+ [j] "=&r"(j), [pfrfi] "+r"(pfrfi), [pfreq_signal] "+r"(pfreq_signal),
+ [tmp4] "=&r"(tmp4)
+ :
+ : "memory");
+}
+
+static void InverseFFTAndWindow(AecmCore* aecm,
+ int16_t* fft,
+ ComplexInt16* efw,
+ int16_t* output,
+ const int16_t* nearendClean) {
+ int i, outCFFT;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp_re, tmp_im;
+ int16_t* pcoefTable_ifft = coefTable_ifft;
+ int16_t* pfft = fft;
+ int16_t* ppfft = fft;
+ ComplexInt16* pefw = efw;
+ int32_t out_aecm;
+ int16_t* paecm_buf = aecm->outBuf;
+ const int16_t* p_kSqrtHanning = WebRtcAecm_kSqrtHanning;
+ const int16_t* pp_kSqrtHanning = &WebRtcAecm_kSqrtHanning[PART_LEN];
+ int16_t* output1 = output;
+
+ __asm __volatile(
+ ".set push "
+ "\n\t"
+ ".set noreorder "
+ "\n\t"
+ "addiu %[i], $zero, 64 "
+ "\n\t"
+ "1: "
+ "\n\t"
+ "lh %[tmp1], 0(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp2], 2(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp_re], 0(%[pefw]) "
+ "\n\t"
+ "lh %[tmp_im], 2(%[pefw]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp2] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp1] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "subu %[tmp_im], $zero, %[tmp_im] "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "lh %[tmp1], 4(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp2], 6(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp_re], 4(%[pefw]) "
+ "\n\t"
+ "lh %[tmp_im], 6(%[pefw]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp2] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp1] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "subu %[tmp_im], $zero, %[tmp_im] "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "lh %[tmp1], 8(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp2], 10(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp_re], 8(%[pefw]) "
+ "\n\t"
+ "lh %[tmp_im], 10(%[pefw]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp2] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp1] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "subu %[tmp_im], $zero, %[tmp_im] "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "lh %[tmp1], 12(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp2], 14(%[pcoefTable_ifft]) "
+ "\n\t"
+ "lh %[tmp_re], 12(%[pefw]) "
+ "\n\t"
+ "lh %[tmp_im], 14(%[pefw]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp2] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "addu %[pfft], %[fft], %[tmp1] "
+ "\n\t"
+ "sh %[tmp_re], 0(%[pfft]) "
+ "\n\t"
+ "subu %[tmp_im], $zero, %[tmp_im] "
+ "\n\t"
+ "sh %[tmp_im], 2(%[pfft]) "
+ "\n\t"
+ "addiu %[pcoefTable_ifft], %[pcoefTable_ifft], 16 "
+ "\n\t"
+ "addiu %[i], %[i], -4 "
+ "\n\t"
+ "bgtz %[i], 1b "
+ "\n\t"
+ " addiu %[pefw], %[pefw], 16 "
+ "\n\t"
+ ".set pop "
+ "\n\t"
+ : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [pfft] "+r"(pfft), [i] "=&r"(i),
+ [tmp_re] "=&r"(tmp_re), [tmp_im] "=&r"(tmp_im), [pefw] "+r"(pefw),
+ [pcoefTable_ifft] "+r"(pcoefTable_ifft), [fft] "+r"(fft)
+ :
+ : "memory");
+
+ fft[2] = efw[PART_LEN].real;
+ fft[3] = -efw[PART_LEN].imag;
+
+ outCFFT = WebRtcSpl_ComplexIFFT(fft, PART_LEN_SHIFT, 1);
+ pfft = fft;
+
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[i], $zero, 128 \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[ppfft]) \n\t"
+ "lh %[tmp2], 4(%[ppfft]) \n\t"
+ "lh %[tmp3], 8(%[ppfft]) \n\t"
+ "lh %[tmp4], 12(%[ppfft]) \n\t"
+ "addiu %[i], %[i], -4 \n\t"
+ "sh %[tmp1], 0(%[pfft]) \n\t"
+ "sh %[tmp2], 2(%[pfft]) \n\t"
+ "sh %[tmp3], 4(%[pfft]) \n\t"
+ "sh %[tmp4], 6(%[pfft]) \n\t"
+ "addiu %[ppfft], %[ppfft], 16 \n\t"
+ "bgtz %[i], 1b \n\t"
+ " addiu %[pfft], %[pfft], 8 \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [pfft] "+r"(pfft), [i] "=&r"(i),
+ [tmp3] "=&r"(tmp3), [tmp4] "=&r"(tmp4), [ppfft] "+r"(ppfft)
+ :
+ : "memory");
+
+ pfft = fft;
+ out_aecm = (int32_t)(outCFFT - aecm->dfaCleanQDomain);
+
+ __asm __volatile(
+ ".set push "
+ "\n\t"
+ ".set noreorder "
+ "\n\t"
+ "addiu %[i], $zero, 64 "
+ "\n\t"
+ "11: "
+ "\n\t"
+ "lh %[tmp1], 0(%[pfft]) "
+ "\n\t"
+ "lh %[tmp2], 0(%[p_kSqrtHanning]) "
+ "\n\t"
+ "addiu %[i], %[i], -2 "
+ "\n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] "
+ "\n\t"
+ "lh %[tmp3], 2(%[pfft]) "
+ "\n\t"
+ "lh %[tmp4], 2(%[p_kSqrtHanning]) "
+ "\n\t"
+ "mul %[tmp3], %[tmp3], %[tmp4] "
+ "\n\t"
+ "addiu %[tmp1], %[tmp1], 8192 "
+ "\n\t"
+ "sra %[tmp1], %[tmp1], 14 "
+ "\n\t"
+ "addiu %[tmp3], %[tmp3], 8192 "
+ "\n\t"
+ "sra %[tmp3], %[tmp3], 14 "
+ "\n\t"
+ "bgez %[out_aecm], 1f "
+ "\n\t"
+ " negu %[tmp2], %[out_aecm] "
+ "\n\t"
+ "srav %[tmp1], %[tmp1], %[tmp2] "
+ "\n\t"
+ "b 2f "
+ "\n\t"
+ " srav %[tmp3], %[tmp3], %[tmp2] "
+ "\n\t"
+ "1: "
+ "\n\t"
+ "sllv %[tmp1], %[tmp1], %[out_aecm] "
+ "\n\t"
+ "sllv %[tmp3], %[tmp3], %[out_aecm] "
+ "\n\t"
+ "2: "
+ "\n\t"
+ "lh %[tmp4], 0(%[paecm_buf]) "
+ "\n\t"
+ "lh %[tmp2], 2(%[paecm_buf]) "
+ "\n\t"
+ "addu %[tmp3], %[tmp3], %[tmp2] "
+ "\n\t"
+ "addu %[tmp1], %[tmp1], %[tmp4] "
+ "\n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shll_s.w %[tmp1], %[tmp1], 16 "
+ "\n\t"
+ "sra %[tmp1], %[tmp1], 16 "
+ "\n\t"
+ "shll_s.w %[tmp3], %[tmp3], 16 "
+ "\n\t"
+ "sra %[tmp3], %[tmp3], 16 "
+ "\n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "sra %[tmp4], %[tmp1], 31 "
+ "\n\t"
+ "sra %[tmp2], %[tmp1], 15 "
+ "\n\t"
+ "beq %[tmp4], %[tmp2], 3f "
+ "\n\t"
+ " ori %[tmp2], $zero, 0x7fff "
+ "\n\t"
+ "xor %[tmp1], %[tmp2], %[tmp4] "
+ "\n\t"
+ "3: "
+ "\n\t"
+ "sra %[tmp2], %[tmp3], 31 "
+ "\n\t"
+ "sra %[tmp4], %[tmp3], 15 "
+ "\n\t"
+ "beq %[tmp2], %[tmp4], 4f "
+ "\n\t"
+ " ori %[tmp4], $zero, 0x7fff "
+ "\n\t"
+ "xor %[tmp3], %[tmp4], %[tmp2] "
+ "\n\t"
+ "4: "
+ "\n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sh %[tmp1], 0(%[pfft]) "
+ "\n\t"
+ "sh %[tmp1], 0(%[output1]) "
+ "\n\t"
+ "sh %[tmp3], 2(%[pfft]) "
+ "\n\t"
+ "sh %[tmp3], 2(%[output1]) "
+ "\n\t"
+ "lh %[tmp1], 128(%[pfft]) "
+ "\n\t"
+ "lh %[tmp2], 0(%[pp_kSqrtHanning]) "
+ "\n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] "
+ "\n\t"
+ "lh %[tmp3], 130(%[pfft]) "
+ "\n\t"
+ "lh %[tmp4], -2(%[pp_kSqrtHanning]) "
+ "\n\t"
+ "mul %[tmp3], %[tmp3], %[tmp4] "
+ "\n\t"
+ "sra %[tmp1], %[tmp1], 14 "
+ "\n\t"
+ "sra %[tmp3], %[tmp3], 14 "
+ "\n\t"
+ "bgez %[out_aecm], 5f "
+ "\n\t"
+ " negu %[tmp2], %[out_aecm] "
+ "\n\t"
+ "srav %[tmp3], %[tmp3], %[tmp2] "
+ "\n\t"
+ "b 6f "
+ "\n\t"
+ " srav %[tmp1], %[tmp1], %[tmp2] "
+ "\n\t"
+ "5: "
+ "\n\t"
+ "sllv %[tmp1], %[tmp1], %[out_aecm] "
+ "\n\t"
+ "sllv %[tmp3], %[tmp3], %[out_aecm] "
+ "\n\t"
+ "6: "
+ "\n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shll_s.w %[tmp1], %[tmp1], 16 "
+ "\n\t"
+ "sra %[tmp1], %[tmp1], 16 "
+ "\n\t"
+ "shll_s.w %[tmp3], %[tmp3], 16 "
+ "\n\t"
+ "sra %[tmp3], %[tmp3], 16 "
+ "\n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "sra %[tmp4], %[tmp1], 31 "
+ "\n\t"
+ "sra %[tmp2], %[tmp1], 15 "
+ "\n\t"
+ "beq %[tmp4], %[tmp2], 7f "
+ "\n\t"
+ " ori %[tmp2], $zero, 0x7fff "
+ "\n\t"
+ "xor %[tmp1], %[tmp2], %[tmp4] "
+ "\n\t"
+ "7: "
+ "\n\t"
+ "sra %[tmp2], %[tmp3], 31 "
+ "\n\t"
+ "sra %[tmp4], %[tmp3], 15 "
+ "\n\t"
+ "beq %[tmp2], %[tmp4], 8f "
+ "\n\t"
+ " ori %[tmp4], $zero, 0x7fff "
+ "\n\t"
+ "xor %[tmp3], %[tmp4], %[tmp2] "
+ "\n\t"
+ "8: "
+ "\n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sh %[tmp1], 0(%[paecm_buf]) "
+ "\n\t"
+ "sh %[tmp3], 2(%[paecm_buf]) "
+ "\n\t"
+ "addiu %[output1], %[output1], 4 "
+ "\n\t"
+ "addiu %[paecm_buf], %[paecm_buf], 4 "
+ "\n\t"
+ "addiu %[pfft], %[pfft], 4 "
+ "\n\t"
+ "addiu %[p_kSqrtHanning], %[p_kSqrtHanning], 4 "
+ "\n\t"
+ "bgtz %[i], 11b "
+ "\n\t"
+ " addiu %[pp_kSqrtHanning], %[pp_kSqrtHanning], -4 "
+ "\n\t"
+ ".set pop "
+ "\n\t"
+ : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [pfft] "+r"(pfft),
+ [output1] "+r"(output1), [tmp3] "=&r"(tmp3), [tmp4] "=&r"(tmp4),
+ [paecm_buf] "+r"(paecm_buf), [i] "=&r"(i),
+ [pp_kSqrtHanning] "+r"(pp_kSqrtHanning),
+ [p_kSqrtHanning] "+r"(p_kSqrtHanning)
+ : [out_aecm] "r"(out_aecm),
+ [WebRtcAecm_kSqrtHanning] "r"(WebRtcAecm_kSqrtHanning)
+ : "hi", "lo", "memory");
+
+ // Copy the current block to the old position
+ // (aecm->outBuf is shifted elsewhere)
+ memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(int16_t) * PART_LEN);
+ memcpy(aecm->dBufNoisy, aecm->dBufNoisy + PART_LEN,
+ sizeof(int16_t) * PART_LEN);
+ if (nearendClean != NULL) {
+ memcpy(aecm->dBufClean, aecm->dBufClean + PART_LEN,
+ sizeof(int16_t) * PART_LEN);
+ }
+}
+
+void WebRtcAecm_CalcLinearEnergies_mips(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est,
+ uint32_t* far_energy,
+ uint32_t* echo_energy_adapt,
+ uint32_t* echo_energy_stored) {
+ int i;
+ uint32_t par1 = (*far_energy);
+ uint32_t par2 = (*echo_energy_adapt);
+ uint32_t par3 = (*echo_energy_stored);
+ int16_t* ch_stored_p = &(aecm->channelStored[0]);
+ int16_t* ch_adapt_p = &(aecm->channelAdapt16[0]);
+ uint16_t* spectrum_p = (uint16_t*)(&(far_spectrum[0]));
+ int32_t* echo_p = &(echo_est[0]);
+ int32_t temp0, stored0, echo0, adept0, spectrum0;
+ int32_t stored1, adept1, spectrum1, echo1, temp1;
+
+ // Get energy for the delayed far end signal and estimated
+ // echo using both stored and adapted channels.
+ for (i = 0; i < PART_LEN; i += 4) {
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[stored0], 0(%[ch_stored_p]) \n\t"
+ "lhu %[adept0], 0(%[ch_adapt_p]) \n\t"
+ "lhu %[spectrum0], 0(%[spectrum_p]) \n\t"
+ "lh %[stored1], 2(%[ch_stored_p]) \n\t"
+ "lhu %[adept1], 2(%[ch_adapt_p]) \n\t"
+ "lhu %[spectrum1], 2(%[spectrum_p]) \n\t"
+ "mul %[echo0], %[stored0], %[spectrum0] \n\t"
+ "mul %[temp0], %[adept0], %[spectrum0] \n\t"
+ "mul %[echo1], %[stored1], %[spectrum1] \n\t"
+ "mul %[temp1], %[adept1], %[spectrum1] \n\t"
+ "addu %[par1], %[par1], %[spectrum0] \n\t"
+ "addu %[par1], %[par1], %[spectrum1] \n\t"
+ "addiu %[echo_p], %[echo_p], 16 \n\t"
+ "addu %[par3], %[par3], %[echo0] \n\t"
+ "addu %[par2], %[par2], %[temp0] \n\t"
+ "addu %[par3], %[par3], %[echo1] \n\t"
+ "addu %[par2], %[par2], %[temp1] \n\t"
+ "usw %[echo0], -16(%[echo_p]) \n\t"
+ "usw %[echo1], -12(%[echo_p]) \n\t"
+ "lh %[stored0], 4(%[ch_stored_p]) \n\t"
+ "lhu %[adept0], 4(%[ch_adapt_p]) \n\t"
+ "lhu %[spectrum0], 4(%[spectrum_p]) \n\t"
+ "lh %[stored1], 6(%[ch_stored_p]) \n\t"
+ "lhu %[adept1], 6(%[ch_adapt_p]) \n\t"
+ "lhu %[spectrum1], 6(%[spectrum_p]) \n\t"
+ "mul %[echo0], %[stored0], %[spectrum0] \n\t"
+ "mul %[temp0], %[adept0], %[spectrum0] \n\t"
+ "mul %[echo1], %[stored1], %[spectrum1] \n\t"
+ "mul %[temp1], %[adept1], %[spectrum1] \n\t"
+ "addu %[par1], %[par1], %[spectrum0] \n\t"
+ "addu %[par1], %[par1], %[spectrum1] \n\t"
+ "addiu %[ch_stored_p], %[ch_stored_p], 8 \n\t"
+ "addiu %[ch_adapt_p], %[ch_adapt_p], 8 \n\t"
+ "addiu %[spectrum_p], %[spectrum_p], 8 \n\t"
+ "addu %[par3], %[par3], %[echo0] \n\t"
+ "addu %[par2], %[par2], %[temp0] \n\t"
+ "addu %[par3], %[par3], %[echo1] \n\t"
+ "addu %[par2], %[par2], %[temp1] \n\t"
+ "usw %[echo0], -8(%[echo_p]) \n\t"
+ "usw %[echo1], -4(%[echo_p]) \n\t"
+ ".set pop \n\t"
+ : [temp0] "=&r"(temp0), [stored0] "=&r"(stored0),
+ [adept0] "=&r"(adept0), [spectrum0] "=&r"(spectrum0),
+ [echo0] "=&r"(echo0), [echo_p] "+r"(echo_p), [par3] "+r"(par3),
+ [par1] "+r"(par1), [par2] "+r"(par2), [stored1] "=&r"(stored1),
+ [adept1] "=&r"(adept1), [echo1] "=&r"(echo1),
+ [spectrum1] "=&r"(spectrum1), [temp1] "=&r"(temp1),
+ [ch_stored_p] "+r"(ch_stored_p), [ch_adapt_p] "+r"(ch_adapt_p),
+ [spectrum_p] "+r"(spectrum_p)
+ :
+ : "hi", "lo", "memory");
+ }
+
+ echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN],
+ far_spectrum[PART_LEN]);
+ par1 += (uint32_t)(far_spectrum[PART_LEN]);
+ par2 += aecm->channelAdapt16[PART_LEN] * far_spectrum[PART_LEN];
+ par3 += (uint32_t)echo_est[PART_LEN];
+
+ (*far_energy) = par1;
+ (*echo_energy_adapt) = par2;
+ (*echo_energy_stored) = par3;
+}
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcAecm_StoreAdaptiveChannel_mips(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est) {
+ int i;
+ int16_t* temp1;
+ uint16_t* temp8;
+ int32_t temp0, temp2, temp3, temp4, temp5, temp6;
+ int32_t* temp7 = &(echo_est[0]);
+ temp1 = &(aecm->channelStored[0]);
+ temp8 = (uint16_t*)(&far_spectrum[0]);
+
+ // During startup we store the channel every block.
+ memcpy(aecm->channelStored, aecm->channelAdapt16,
+ sizeof(int16_t) * PART_LEN1);
+ // Recalculate echo estimate
+ for (i = 0; i < PART_LEN; i += 4) {
+ __asm __volatile(
+ "ulw %[temp0], 0(%[temp8]) \n\t"
+ "ulw %[temp2], 0(%[temp1]) \n\t"
+ "ulw %[temp4], 4(%[temp8]) \n\t"
+ "ulw %[temp5], 4(%[temp1]) \n\t"
+ "muleq_s.w.phl %[temp3], %[temp2], %[temp0] \n\t"
+ "muleq_s.w.phr %[temp0], %[temp2], %[temp0] \n\t"
+ "muleq_s.w.phl %[temp6], %[temp5], %[temp4] \n\t"
+ "muleq_s.w.phr %[temp4], %[temp5], %[temp4] \n\t"
+ "addiu %[temp7], %[temp7], 16 \n\t"
+ "addiu %[temp1], %[temp1], 8 \n\t"
+ "addiu %[temp8], %[temp8], 8 \n\t"
+ "sra %[temp3], %[temp3], 1 \n\t"
+ "sra %[temp0], %[temp0], 1 \n\t"
+ "sra %[temp6], %[temp6], 1 \n\t"
+ "sra %[temp4], %[temp4], 1 \n\t"
+ "usw %[temp3], -12(%[temp7]) \n\t"
+ "usw %[temp0], -16(%[temp7]) \n\t"
+ "usw %[temp6], -4(%[temp7]) \n\t"
+ "usw %[temp4], -8(%[temp7]) \n\t"
+ : [temp0] "=&r"(temp0), [temp2] "=&r"(temp2), [temp3] "=&r"(temp3),
+ [temp4] "=&r"(temp4), [temp5] "=&r"(temp5), [temp6] "=&r"(temp6),
+ [temp1] "+r"(temp1), [temp8] "+r"(temp8), [temp7] "+r"(temp7)
+ :
+ : "hi", "lo", "memory");
+ }
+ echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+}
+
+void WebRtcAecm_ResetAdaptiveChannel_mips(AecmCore* aecm) {
+ int i;
+ int32_t* temp3;
+ int16_t* temp0;
+ int32_t temp1, temp2, temp4, temp5;
+
+ temp0 = &(aecm->channelStored[0]);
+ temp3 = &(aecm->channelAdapt32[0]);
+
+ // The stored channel has a significantly lower MSE than the adaptive one for
+ // two consecutive calculations. Reset the adaptive channel.
+ memcpy(aecm->channelAdapt16, aecm->channelStored,
+ sizeof(int16_t) * PART_LEN1);
+
+ // Restore the W32 channel
+ for (i = 0; i < PART_LEN; i += 4) {
+ __asm __volatile(
+ "ulw %[temp1], 0(%[temp0]) \n\t"
+ "ulw %[temp4], 4(%[temp0]) \n\t"
+ "preceq.w.phl %[temp2], %[temp1] \n\t"
+ "preceq.w.phr %[temp1], %[temp1] \n\t"
+ "preceq.w.phl %[temp5], %[temp4] \n\t"
+ "preceq.w.phr %[temp4], %[temp4] \n\t"
+ "addiu %[temp0], %[temp0], 8 \n\t"
+ "usw %[temp2], 4(%[temp3]) \n\t"
+ "usw %[temp1], 0(%[temp3]) \n\t"
+ "usw %[temp5], 12(%[temp3]) \n\t"
+ "usw %[temp4], 8(%[temp3]) \n\t"
+ "addiu %[temp3], %[temp3], 16 \n\t"
+ : [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), [temp4] "=&r"(temp4),
+ [temp5] "=&r"(temp5), [temp3] "+r"(temp3), [temp0] "+r"(temp0)
+ :
+ : "memory");
+ }
+
+ aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16;
+}
+#endif // #if defined(MIPS_DSP_R1_LE)
+
+// Transforms a time domain signal into the frequency domain, outputting the
+// complex valued signal, absolute value and sum of absolute values.
+//
+// time_signal [in] Pointer to time domain signal
+// freq_signal_real [out] Pointer to real part of frequency domain array
+// freq_signal_imag [out] Pointer to imaginary part of frequency domain
+// array
+// freq_signal_abs [out] Pointer to absolute value of frequency domain
+// array
+// freq_signal_sum_abs [out] Pointer to the sum of all absolute values in
+// the frequency domain array
+// return value The Q-domain of current frequency values
+//
+static int TimeToFrequencyDomain(AecmCore* aecm,
+ const int16_t* time_signal,
+ ComplexInt16* freq_signal,
+ uint16_t* freq_signal_abs,
+ uint32_t* freq_signal_sum_abs) {
+ int i = 0;
+ int time_signal_scaling = 0;
+
+ // In fft_buf, +16 for 32-byte alignment.
+ int16_t fft_buf[PART_LEN4 + 16];
+ int16_t* fft = (int16_t*)(((uintptr_t)fft_buf + 31) & ~31);
+
+ int16_t tmp16no1;
+#if !defined(MIPS_DSP_R2_LE)
+ int32_t tmp32no1;
+ int32_t tmp32no2;
+ int16_t tmp16no2;
+#else
+ int32_t tmp32no10, tmp32no11, tmp32no12, tmp32no13;
+ int32_t tmp32no20, tmp32no21, tmp32no22, tmp32no23;
+ int16_t* freqp;
+ uint16_t* freqabsp;
+ uint32_t freqt0, freqt1, freqt2, freqt3;
+ uint32_t freqs;
+#endif
+
+#ifdef AECM_DYNAMIC_Q
+ tmp16no1 = WebRtcSpl_MaxAbsValueW16(time_signal, PART_LEN2);
+ time_signal_scaling = WebRtcSpl_NormW16(tmp16no1);
+#endif
+
+ WindowAndFFT(aecm, fft, time_signal, freq_signal, time_signal_scaling);
+
+ // Extract imaginary and real part,
+ // calculate the magnitude for all frequency bins
+ freq_signal[0].imag = 0;
+ freq_signal[PART_LEN].imag = 0;
+ freq_signal[PART_LEN].real = fft[PART_LEN2];
+ freq_signal_abs[0] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[0].real);
+ freq_signal_abs[PART_LEN] =
+ (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[PART_LEN].real);
+ (*freq_signal_sum_abs) =
+ (uint32_t)(freq_signal_abs[0]) + (uint32_t)(freq_signal_abs[PART_LEN]);
+
+#if !defined(MIPS_DSP_R2_LE)
+ for (i = 1; i < PART_LEN; i++) {
+ if (freq_signal[i].real == 0) {
+ freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+ } else if (freq_signal[i].imag == 0) {
+ freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+ } else {
+ // Approximation for magnitude of complex fft output
+ // magn = sqrt(real^2 + imag^2)
+ // magn ~= alpha * max(`imag`,`real`) + beta * min(`imag`,`real`)
+ //
+ // The parameters alpha and beta are stored in Q15
+ tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+ tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+ tmp32no1 = tmp16no1 * tmp16no1;
+ tmp32no2 = tmp16no2 * tmp16no2;
+ tmp32no2 = WebRtcSpl_AddSatW32(tmp32no1, tmp32no2);
+ tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2);
+
+ freq_signal_abs[i] = (uint16_t)tmp32no1;
+ }
+ (*freq_signal_sum_abs) += (uint32_t)freq_signal_abs[i];
+ }
+#else // #if !defined(MIPS_DSP_R2_LE)
+ freqs =
+ (uint32_t)(freq_signal_abs[0]) + (uint32_t)(freq_signal_abs[PART_LEN]);
+ freqp = &(freq_signal[1].real);
+
+ __asm __volatile(
+ "lw %[freqt0], 0(%[freqp]) \n\t"
+ "lw %[freqt1], 4(%[freqp]) \n\t"
+ "lw %[freqt2], 8(%[freqp]) \n\t"
+ "mult $ac0, $zero, $zero \n\t"
+ "mult $ac1, $zero, $zero \n\t"
+ "mult $ac2, $zero, $zero \n\t"
+ "dpaq_s.w.ph $ac0, %[freqt0], %[freqt0] \n\t"
+ "dpaq_s.w.ph $ac1, %[freqt1], %[freqt1] \n\t"
+ "dpaq_s.w.ph $ac2, %[freqt2], %[freqt2] \n\t"
+ "addiu %[freqp], %[freqp], 12 \n\t"
+ "extr.w %[tmp32no20], $ac0, 1 \n\t"
+ "extr.w %[tmp32no21], $ac1, 1 \n\t"
+ "extr.w %[tmp32no22], $ac2, 1 \n\t"
+ : [freqt0] "=&r"(freqt0), [freqt1] "=&r"(freqt1), [freqt2] "=&r"(freqt2),
+ [freqp] "+r"(freqp), [tmp32no20] "=r"(tmp32no20),
+ [tmp32no21] "=r"(tmp32no21), [tmp32no22] "=r"(tmp32no22)
+ :
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo");
+
+ tmp32no10 = WebRtcSpl_SqrtFloor(tmp32no20);
+ tmp32no11 = WebRtcSpl_SqrtFloor(tmp32no21);
+ tmp32no12 = WebRtcSpl_SqrtFloor(tmp32no22);
+ freq_signal_abs[1] = (uint16_t)tmp32no10;
+ freq_signal_abs[2] = (uint16_t)tmp32no11;
+ freq_signal_abs[3] = (uint16_t)tmp32no12;
+ freqs += (uint32_t)tmp32no10;
+ freqs += (uint32_t)tmp32no11;
+ freqs += (uint32_t)tmp32no12;
+ freqabsp = &(freq_signal_abs[4]);
+ for (i = 4; i < PART_LEN; i += 4) {
+ __asm __volatile(
+ "ulw %[freqt0], 0(%[freqp]) \n\t"
+ "ulw %[freqt1], 4(%[freqp]) \n\t"
+ "ulw %[freqt2], 8(%[freqp]) \n\t"
+ "ulw %[freqt3], 12(%[freqp]) \n\t"
+ "mult $ac0, $zero, $zero \n\t"
+ "mult $ac1, $zero, $zero \n\t"
+ "mult $ac2, $zero, $zero \n\t"
+ "mult $ac3, $zero, $zero \n\t"
+ "dpaq_s.w.ph $ac0, %[freqt0], %[freqt0] \n\t"
+ "dpaq_s.w.ph $ac1, %[freqt1], %[freqt1] \n\t"
+ "dpaq_s.w.ph $ac2, %[freqt2], %[freqt2] \n\t"
+ "dpaq_s.w.ph $ac3, %[freqt3], %[freqt3] \n\t"
+ "addiu %[freqp], %[freqp], 16 \n\t"
+ "addiu %[freqabsp], %[freqabsp], 8 \n\t"
+ "extr.w %[tmp32no20], $ac0, 1 \n\t"
+ "extr.w %[tmp32no21], $ac1, 1 \n\t"
+ "extr.w %[tmp32no22], $ac2, 1 \n\t"
+ "extr.w %[tmp32no23], $ac3, 1 \n\t"
+ : [freqt0] "=&r"(freqt0), [freqt1] "=&r"(freqt1),
+ [freqt2] "=&r"(freqt2), [freqt3] "=&r"(freqt3),
+ [tmp32no20] "=r"(tmp32no20), [tmp32no21] "=r"(tmp32no21),
+ [tmp32no22] "=r"(tmp32no22), [tmp32no23] "=r"(tmp32no23),
+ [freqabsp] "+r"(freqabsp), [freqp] "+r"(freqp)
+ :
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo",
+ "$ac3hi", "$ac3lo");
+
+ tmp32no10 = WebRtcSpl_SqrtFloor(tmp32no20);
+ tmp32no11 = WebRtcSpl_SqrtFloor(tmp32no21);
+ tmp32no12 = WebRtcSpl_SqrtFloor(tmp32no22);
+ tmp32no13 = WebRtcSpl_SqrtFloor(tmp32no23);
+
+ __asm __volatile(
+ "sh %[tmp32no10], -8(%[freqabsp]) \n\t"
+ "sh %[tmp32no11], -6(%[freqabsp]) \n\t"
+ "sh %[tmp32no12], -4(%[freqabsp]) \n\t"
+ "sh %[tmp32no13], -2(%[freqabsp]) \n\t"
+ "addu %[freqs], %[freqs], %[tmp32no10] \n\t"
+ "addu %[freqs], %[freqs], %[tmp32no11] \n\t"
+ "addu %[freqs], %[freqs], %[tmp32no12] \n\t"
+ "addu %[freqs], %[freqs], %[tmp32no13] \n\t"
+ : [freqs] "+r"(freqs)
+ : [tmp32no10] "r"(tmp32no10), [tmp32no11] "r"(tmp32no11),
+ [tmp32no12] "r"(tmp32no12), [tmp32no13] "r"(tmp32no13),
+ [freqabsp] "r"(freqabsp)
+ : "memory");
+ }
+
+ (*freq_signal_sum_abs) = freqs;
+#endif
+
+ return time_signal_scaling;
+}
+
+int WebRtcAecm_ProcessBlock(AecmCore* aecm,
+ const int16_t* farend,
+ const int16_t* nearendNoisy,
+ const int16_t* nearendClean,
+ int16_t* output) {
+ int i;
+ uint32_t xfaSum;
+ uint32_t dfaNoisySum;
+ uint32_t dfaCleanSum;
+ uint32_t echoEst32Gained;
+ uint32_t tmpU32;
+ int32_t tmp32no1;
+
+ uint16_t xfa[PART_LEN1];
+ uint16_t dfaNoisy[PART_LEN1];
+ uint16_t dfaClean[PART_LEN1];
+ uint16_t* ptrDfaClean = dfaClean;
+ const uint16_t* far_spectrum_ptr = NULL;
+
+ // 32 byte aligned buffers (with +8 or +16).
+ int16_t fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe.
+ int32_t echoEst32_buf[PART_LEN1 + 8];
+ int32_t dfw_buf[PART_LEN2 + 8];
+ int32_t efw_buf[PART_LEN2 + 8];
+
+ int16_t* fft = (int16_t*)(((uint32_t)fft_buf + 31) & ~31);
+ int32_t* echoEst32 = (int32_t*)(((uint32_t)echoEst32_buf + 31) & ~31);
+ ComplexInt16* dfw = (ComplexInt16*)(((uint32_t)dfw_buf + 31) & ~31);
+ ComplexInt16* efw = (ComplexInt16*)(((uint32_t)efw_buf + 31) & ~31);
+
+ int16_t hnl[PART_LEN1];
+ int16_t numPosCoef = 0;
+ int delay;
+ int16_t tmp16no1;
+ int16_t tmp16no2;
+ int16_t mu;
+ int16_t supGain;
+ int16_t zeros32, zeros16;
+ int16_t zerosDBufNoisy, zerosDBufClean, zerosXBuf;
+ int far_q;
+ int16_t resolutionDiff, qDomainDiff, dfa_clean_q_domain_diff;
+
+ const int kMinPrefBand = 4;
+ const int kMaxPrefBand = 24;
+ int32_t avgHnl32 = 0;
+
+ int32_t temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
+ int16_t* ptr;
+ int16_t* ptr1;
+ int16_t* er_ptr;
+ int16_t* dr_ptr;
+
+ ptr = &hnl[0];
+ ptr1 = &hnl[0];
+ er_ptr = &efw[0].real;
+ dr_ptr = &dfw[0].real;
+
+ // Determine startup state. There are three states:
+ // (0) the first CONV_LEN blocks
+ // (1) another CONV_LEN blocks
+ // (2) the rest
+
+ if (aecm->startupState < 2) {
+ aecm->startupState =
+ (aecm->totCount >= CONV_LEN) + (aecm->totCount >= CONV_LEN2);
+ }
+ // END: Determine startup state
+
+ // Buffer near and far end signals
+ memcpy(aecm->xBuf + PART_LEN, farend, sizeof(int16_t) * PART_LEN);
+ memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(int16_t) * PART_LEN);
+ if (nearendClean != NULL) {
+ memcpy(aecm->dBufClean + PART_LEN, nearendClean,
+ sizeof(int16_t) * PART_LEN);
+ }
+
+ // Transform far end signal from time domain to frequency domain.
+ far_q = TimeToFrequencyDomain(aecm, aecm->xBuf, dfw, xfa, &xfaSum);
+
+ // Transform noisy near end signal from time domain to frequency domain.
+ zerosDBufNoisy =
+ TimeToFrequencyDomain(aecm, aecm->dBufNoisy, dfw, dfaNoisy, &dfaNoisySum);
+ aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain;
+ aecm->dfaNoisyQDomain = (int16_t)zerosDBufNoisy;
+
+ if (nearendClean == NULL) {
+ ptrDfaClean = dfaNoisy;
+ aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld;
+ aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain;
+ dfaCleanSum = dfaNoisySum;
+ } else {
+ // Transform clean near end signal from time domain to frequency domain.
+ zerosDBufClean = TimeToFrequencyDomain(aecm, aecm->dBufClean, dfw, dfaClean,
+ &dfaCleanSum);
+ aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain;
+ aecm->dfaCleanQDomain = (int16_t)zerosDBufClean;
+ }
+
+ // Get the delay
+ // Save far-end history and estimate delay
+ WebRtcAecm_UpdateFarHistory(aecm, xfa, far_q);
+
+ if (WebRtc_AddFarSpectrumFix(aecm->delay_estimator_farend, xfa, PART_LEN1,
+ far_q) == -1) {
+ return -1;
+ }
+ delay = WebRtc_DelayEstimatorProcessFix(aecm->delay_estimator, dfaNoisy,
+ PART_LEN1, zerosDBufNoisy);
+ if (delay == -1) {
+ return -1;
+ } else if (delay == -2) {
+ // If the delay is unknown, we assume zero.
+ // NOTE: this will have to be adjusted if we ever add lookahead.
+ delay = 0;
+ }
+
+ if (aecm->fixedDelay >= 0) {
+ // Use fixed delay
+ delay = aecm->fixedDelay;
+ }
+
+ // Get aligned far end spectrum
+ far_spectrum_ptr = WebRtcAecm_AlignedFarend(aecm, &far_q, delay);
+ zerosXBuf = (int16_t)far_q;
+
+ if (far_spectrum_ptr == NULL) {
+ return -1;
+ }
+
+ // Calculate log(energy) and update energy threshold levels
+ WebRtcAecm_CalcEnergies(aecm, far_spectrum_ptr, zerosXBuf, dfaNoisySum,
+ echoEst32);
+ // Calculate stepsize
+ mu = WebRtcAecm_CalcStepSize(aecm);
+
+ // Update counters
+ aecm->totCount++;
+
+ // This is the channel estimation algorithm.
+ // It is base on NLMS but has a variable step length,
+ // which was calculated above.
+ WebRtcAecm_UpdateChannel(aecm, far_spectrum_ptr, zerosXBuf, dfaNoisy, mu,
+ echoEst32);
+
+ supGain = WebRtcAecm_CalcSuppressionGain(aecm);
+
+ // Calculate Wiener filter hnl[]
+ for (i = 0; i < PART_LEN1; i++) {
+ // Far end signal through channel estimate in Q8
+ // How much can we shift right to preserve resolution
+ tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
+ aecm->echoFilt[i] +=
+ rtc::dchecked_cast<int32_t>((int64_t{tmp32no1} * 50) >> 8);
+
+ zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
+ zeros16 = WebRtcSpl_NormW16(supGain) + 1;
+ if (zeros32 + zeros16 > 16) {
+ // Multiplication is safe
+ // Result in
+ // Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+aecm->xfaQDomainBuf[diff])
+ echoEst32Gained =
+ WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], (uint16_t)supGain);
+ resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+ resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+ } else {
+ tmp16no1 = 17 - zeros32 - zeros16;
+ resolutionDiff =
+ 14 + tmp16no1 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+ resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+ if (zeros32 > tmp16no1) {
+ echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i],
+ supGain >> tmp16no1);
+ } else {
+ // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
+ echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain;
+ }
+ }
+
+ zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
+ RTC_DCHECK_GE(zeros16, 0); // `zeros16` is a norm, hence non-negative.
+ dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld;
+ if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) {
+ tmp16no1 = aecm->nearFilt[i] << zeros16;
+ qDomainDiff = zeros16 - dfa_clean_q_domain_diff;
+ tmp16no2 = ptrDfaClean[i] >> -qDomainDiff;
+ } else {
+ tmp16no1 = dfa_clean_q_domain_diff < 0
+ ? aecm->nearFilt[i] >> -dfa_clean_q_domain_diff
+ : aecm->nearFilt[i] << dfa_clean_q_domain_diff;
+ qDomainDiff = 0;
+ tmp16no2 = ptrDfaClean[i];
+ }
+
+ tmp32no1 = (int32_t)(tmp16no2 - tmp16no1);
+ tmp16no2 = (int16_t)(tmp32no1 >> 4);
+ tmp16no2 += tmp16no1;
+ zeros16 = WebRtcSpl_NormW16(tmp16no2);
+ if ((tmp16no2) & (-qDomainDiff > zeros16)) {
+ aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX;
+ } else {
+ aecm->nearFilt[i] =
+ qDomainDiff < 0 ? tmp16no2 << -qDomainDiff : tmp16no2 >> qDomainDiff;
+ }
+
+ // Wiener filter coefficients, resulting hnl in Q14
+ if (echoEst32Gained == 0) {
+ hnl[i] = ONE_Q14;
+ numPosCoef++;
+ } else if (aecm->nearFilt[i] == 0) {
+ hnl[i] = 0;
+ } else {
+ // Multiply the suppression gain
+ // Rounding
+ echoEst32Gained += (uint32_t)(aecm->nearFilt[i] >> 1);
+ tmpU32 =
+ WebRtcSpl_DivU32U16(echoEst32Gained, (uint16_t)aecm->nearFilt[i]);
+
+ // Current resolution is
+ // Q-(RESOLUTION_CHANNEL + RESOLUTION_SUPGAIN
+ // - max(0, 17 - zeros16 - zeros32))
+ // Make sure we are in Q14
+ tmp32no1 = (int32_t)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff);
+ if (tmp32no1 > ONE_Q14) {
+ hnl[i] = 0;
+ } else if (tmp32no1 < 0) {
+ hnl[i] = ONE_Q14;
+ numPosCoef++;
+ } else {
+ // 1-echoEst/dfa
+ hnl[i] = ONE_Q14 - (int16_t)tmp32no1;
+ if (hnl[i] <= 0) {
+ hnl[i] = 0;
+ } else {
+ numPosCoef++;
+ }
+ }
+ }
+ }
+
+ // Only in wideband. Prevent the gain in upper band from being larger than
+ // in lower band.
+ if (aecm->mult == 2) {
+ // TODO(bjornv): Investigate if the scaling of hnl[i] below can cause
+ // speech distortion in double-talk.
+ for (i = 0; i < (PART_LEN1 >> 3); i++) {
+ __asm __volatile(
+ "lh %[temp1], 0(%[ptr1]) \n\t"
+ "lh %[temp2], 2(%[ptr1]) \n\t"
+ "lh %[temp3], 4(%[ptr1]) \n\t"
+ "lh %[temp4], 6(%[ptr1]) \n\t"
+ "lh %[temp5], 8(%[ptr1]) \n\t"
+ "lh %[temp6], 10(%[ptr1]) \n\t"
+ "lh %[temp7], 12(%[ptr1]) \n\t"
+ "lh %[temp8], 14(%[ptr1]) \n\t"
+ "mul %[temp1], %[temp1], %[temp1] \n\t"
+ "mul %[temp2], %[temp2], %[temp2] \n\t"
+ "mul %[temp3], %[temp3], %[temp3] \n\t"
+ "mul %[temp4], %[temp4], %[temp4] \n\t"
+ "mul %[temp5], %[temp5], %[temp5] \n\t"
+ "mul %[temp6], %[temp6], %[temp6] \n\t"
+ "mul %[temp7], %[temp7], %[temp7] \n\t"
+ "mul %[temp8], %[temp8], %[temp8] \n\t"
+ "sra %[temp1], %[temp1], 14 \n\t"
+ "sra %[temp2], %[temp2], 14 \n\t"
+ "sra %[temp3], %[temp3], 14 \n\t"
+ "sra %[temp4], %[temp4], 14 \n\t"
+ "sra %[temp5], %[temp5], 14 \n\t"
+ "sra %[temp6], %[temp6], 14 \n\t"
+ "sra %[temp7], %[temp7], 14 \n\t"
+ "sra %[temp8], %[temp8], 14 \n\t"
+ "sh %[temp1], 0(%[ptr1]) \n\t"
+ "sh %[temp2], 2(%[ptr1]) \n\t"
+ "sh %[temp3], 4(%[ptr1]) \n\t"
+ "sh %[temp4], 6(%[ptr1]) \n\t"
+ "sh %[temp5], 8(%[ptr1]) \n\t"
+ "sh %[temp6], 10(%[ptr1]) \n\t"
+ "sh %[temp7], 12(%[ptr1]) \n\t"
+ "sh %[temp8], 14(%[ptr1]) \n\t"
+ "addiu %[ptr1], %[ptr1], 16 \n\t"
+ : [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), [temp3] "=&r"(temp3),
+ [temp4] "=&r"(temp4), [temp5] "=&r"(temp5), [temp6] "=&r"(temp6),
+ [temp7] "=&r"(temp7), [temp8] "=&r"(temp8), [ptr1] "+r"(ptr1)
+ :
+ : "memory", "hi", "lo");
+ }
+ for (i = 0; i < (PART_LEN1 & 7); i++) {
+ __asm __volatile(
+ "lh %[temp1], 0(%[ptr1]) \n\t"
+ "mul %[temp1], %[temp1], %[temp1] \n\t"
+ "sra %[temp1], %[temp1], 14 \n\t"
+ "sh %[temp1], 0(%[ptr1]) \n\t"
+ "addiu %[ptr1], %[ptr1], 2 \n\t"
+ : [temp1] "=&r"(temp1), [ptr1] "+r"(ptr1)
+ :
+ : "memory", "hi", "lo");
+ }
+
+ for (i = kMinPrefBand; i <= kMaxPrefBand; i++) {
+ avgHnl32 += (int32_t)hnl[i];
+ }
+
+ RTC_DCHECK_GT(kMaxPrefBand - kMinPrefBand + 1, 0);
+ avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1);
+
+ for (i = kMaxPrefBand; i < PART_LEN1; i++) {
+ if (hnl[i] > (int16_t)avgHnl32) {
+ hnl[i] = (int16_t)avgHnl32;
+ }
+ }
+ }
+
+ // Calculate NLP gain, result is in Q14
+ if (aecm->nlpFlag) {
+ if (numPosCoef < 3) {
+ for (i = 0; i < PART_LEN1; i++) {
+ efw[i].real = 0;
+ efw[i].imag = 0;
+ hnl[i] = 0;
+ }
+ } else {
+ for (i = 0; i < PART_LEN1; i++) {
+#if defined(MIPS_DSP_R1_LE)
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[temp1], 0(%[ptr]) \n\t"
+ "lh %[temp2], 0(%[dr_ptr]) \n\t"
+ "slti %[temp4], %[temp1], 0x4001 \n\t"
+ "beqz %[temp4], 3f \n\t"
+ " lh %[temp3], 2(%[dr_ptr]) \n\t"
+ "slti %[temp5], %[temp1], 3277 \n\t"
+ "bnez %[temp5], 2f \n\t"
+ " addiu %[dr_ptr], %[dr_ptr], 4 \n\t"
+ "mul %[temp2], %[temp2], %[temp1] \n\t"
+ "mul %[temp3], %[temp3], %[temp1] \n\t"
+ "shra_r.w %[temp2], %[temp2], 14 \n\t"
+ "shra_r.w %[temp3], %[temp3], 14 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "addu %[temp1], $zero, $zero \n\t"
+ "addu %[temp2], $zero, $zero \n\t"
+ "addu %[temp3], $zero, $zero \n\t"
+ "b 1f \n\t"
+ " nop \n\t"
+ "3: \n\t"
+ "addiu %[temp1], $0, 0x4000 \n\t"
+ "1: \n\t"
+ "sh %[temp1], 0(%[ptr]) \n\t"
+ "4: \n\t"
+ "sh %[temp2], 0(%[er_ptr]) \n\t"
+ "sh %[temp3], 2(%[er_ptr]) \n\t"
+ "addiu %[ptr], %[ptr], 2 \n\t"
+ "addiu %[er_ptr], %[er_ptr], 4 \n\t"
+ ".set pop \n\t"
+ : [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), [temp3] "=&r"(temp3),
+ [temp4] "=&r"(temp4), [temp5] "=&r"(temp5), [ptr] "+r"(ptr),
+ [er_ptr] "+r"(er_ptr), [dr_ptr] "+r"(dr_ptr)
+ :
+ : "memory", "hi", "lo");
+#else
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[temp1], 0(%[ptr]) \n\t"
+ "lh %[temp2], 0(%[dr_ptr]) \n\t"
+ "slti %[temp4], %[temp1], 0x4001 \n\t"
+ "beqz %[temp4], 3f \n\t"
+ " lh %[temp3], 2(%[dr_ptr]) \n\t"
+ "slti %[temp5], %[temp1], 3277 \n\t"
+ "bnez %[temp5], 2f \n\t"
+ " addiu %[dr_ptr], %[dr_ptr], 4 \n\t"
+ "mul %[temp2], %[temp2], %[temp1] \n\t"
+ "mul %[temp3], %[temp3], %[temp1] \n\t"
+ "addiu %[temp2], %[temp2], 0x2000 \n\t"
+ "addiu %[temp3], %[temp3], 0x2000 \n\t"
+ "sra %[temp2], %[temp2], 14 \n\t"
+ "sra %[temp3], %[temp3], 14 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "addu %[temp1], $zero, $zero \n\t"
+ "addu %[temp2], $zero, $zero \n\t"
+ "addu %[temp3], $zero, $zero \n\t"
+ "b 1f \n\t"
+ " nop \n\t"
+ "3: \n\t"
+ "addiu %[temp1], $0, 0x4000 \n\t"
+ "1: \n\t"
+ "sh %[temp1], 0(%[ptr]) \n\t"
+ "4: \n\t"
+ "sh %[temp2], 0(%[er_ptr]) \n\t"
+ "sh %[temp3], 2(%[er_ptr]) \n\t"
+ "addiu %[ptr], %[ptr], 2 \n\t"
+ "addiu %[er_ptr], %[er_ptr], 4 \n\t"
+ ".set pop \n\t"
+ : [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), [temp3] "=&r"(temp3),
+ [temp4] "=&r"(temp4), [temp5] "=&r"(temp5), [ptr] "+r"(ptr),
+ [er_ptr] "+r"(er_ptr), [dr_ptr] "+r"(dr_ptr)
+ :
+ : "memory", "hi", "lo");
+#endif
+ }
+ }
+ } else {
+ // multiply with Wiener coefficients
+ for (i = 0; i < PART_LEN1; i++) {
+ efw[i].real = (int16_t)(
+ WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, hnl[i], 14));
+ efw[i].imag = (int16_t)(
+ WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, hnl[i], 14));
+ }
+ }
+
+ if (aecm->cngMode == AecmTrue) {
+ ComfortNoise(aecm, ptrDfaClean, efw, hnl);
+ }
+
+ InverseFFTAndWindow(aecm, fft, efw, output, nearendClean);
+
+ return 0;
+}
+
+// Generate comfort noise and add to output signal.
+static void ComfortNoise(AecmCore* aecm,
+ const uint16_t* dfa,
+ ComplexInt16* out,
+ const int16_t* lambda) {
+ int16_t i;
+ int16_t tmp16, tmp161, tmp162, tmp163, nrsh1, nrsh2;
+ int32_t tmp32, tmp321, tnoise, tnoise1;
+ int32_t tmp322, tmp323, *tmp1;
+ int16_t* dfap;
+ int16_t* lambdap;
+ const int32_t c2049 = 2049;
+ const int32_t c359 = 359;
+ const int32_t c114 = ONE_Q14;
+
+ int16_t randW16[PART_LEN];
+ int16_t uReal[PART_LEN1];
+ int16_t uImag[PART_LEN1];
+ int32_t outLShift32;
+
+ int16_t shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain;
+ int16_t minTrackShift = 9;
+
+ RTC_DCHECK_GE(shiftFromNearToNoise, 0);
+ RTC_DCHECK_LT(shiftFromNearToNoise, 16);
+
+ if (aecm->noiseEstCtr < 100) {
+ // Track the minimum more quickly initially.
+ aecm->noiseEstCtr++;
+ minTrackShift = 6;
+ }
+
+ // Generate a uniform random array on [0 2^15-1].
+ WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed);
+ int16_t* randW16p = (int16_t*)randW16;
+#if defined(MIPS_DSP_R1_LE)
+ int16_t* kCosTablep = (int16_t*)WebRtcAecm_kCosTable;
+ int16_t* kSinTablep = (int16_t*)WebRtcAecm_kSinTable;
+#endif // #if defined(MIPS_DSP_R1_LE)
+ tmp1 = (int32_t*)aecm->noiseEst + 1;
+ dfap = (int16_t*)dfa + 1;
+ lambdap = (int16_t*)lambda + 1;
+ // Estimate noise power.
+ for (i = 1; i < PART_LEN1; i += 2) {
+ // Shift to the noise domain.
+ __asm __volatile(
+ "lh %[tmp32], 0(%[dfap]) \n\t"
+ "lw %[tnoise], 0(%[tmp1]) \n\t"
+ "sllv %[outLShift32], %[tmp32], %[shiftFromNearToNoise] \n\t"
+ : [tmp32] "=&r"(tmp32), [outLShift32] "=r"(outLShift32),
+ [tnoise] "=&r"(tnoise)
+ : [tmp1] "r"(tmp1), [dfap] "r"(dfap),
+ [shiftFromNearToNoise] "r"(shiftFromNearToNoise)
+ : "memory");
+
+ if (outLShift32 < tnoise) {
+ // Reset "too low" counter
+ aecm->noiseEstTooLowCtr[i] = 0;
+ // Track the minimum.
+ if (tnoise < (1 << minTrackShift)) {
+ // For small values, decrease noiseEst[i] every
+ // `kNoiseEstIncCount` block. The regular approach below can not
+ // go further down due to truncation.
+ aecm->noiseEstTooHighCtr[i]++;
+ if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) {
+ tnoise--;
+ aecm->noiseEstTooHighCtr[i] = 0; // Reset the counter
+ }
+ } else {
+ __asm __volatile(
+ "subu %[tmp32], %[tnoise], %[outLShift32] \n\t"
+ "srav %[tmp32], %[tmp32], %[minTrackShift] \n\t"
+ "subu %[tnoise], %[tnoise], %[tmp32] \n\t"
+ : [tmp32] "=&r"(tmp32), [tnoise] "+r"(tnoise)
+ :
+ [outLShift32] "r"(outLShift32), [minTrackShift] "r"(minTrackShift));
+ }
+ } else {
+ // Reset "too high" counter
+ aecm->noiseEstTooHighCtr[i] = 0;
+ // Ramp slowly upwards until we hit the minimum again.
+ if ((tnoise >> 19) <= 0) {
+ if ((tnoise >> 11) > 0) {
+ // Large enough for relative increase
+ __asm __volatile(
+ "mul %[tnoise], %[tnoise], %[c2049] \n\t"
+ "sra %[tnoise], %[tnoise], 11 \n\t"
+ : [tnoise] "+r"(tnoise)
+ : [c2049] "r"(c2049)
+ : "hi", "lo");
+ } else {
+ // Make incremental increases based on size every
+ // `kNoiseEstIncCount` block
+ aecm->noiseEstTooLowCtr[i]++;
+ if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) {
+ __asm __volatile(
+ "sra %[tmp32], %[tnoise], 9 \n\t"
+ "addi %[tnoise], %[tnoise], 1 \n\t"
+ "addu %[tnoise], %[tnoise], %[tmp32] \n\t"
+ : [tnoise] "+r"(tnoise), [tmp32] "=&r"(tmp32)
+ :);
+ aecm->noiseEstTooLowCtr[i] = 0; // Reset counter
+ }
+ }
+ } else {
+ // Avoid overflow.
+ // Multiplication with 2049 will cause wrap around. Scale
+ // down first and then multiply
+ __asm __volatile(
+ "sra %[tnoise], %[tnoise], 11 \n\t"
+ "mul %[tnoise], %[tnoise], %[c2049] \n\t"
+ : [tnoise] "+r"(tnoise)
+ : [c2049] "r"(c2049)
+ : "hi", "lo");
+ }
+ }
+
+ // Shift to the noise domain.
+ __asm __volatile(
+ "lh %[tmp32], 2(%[dfap]) \n\t"
+ "lw %[tnoise1], 4(%[tmp1]) \n\t"
+ "addiu %[dfap], %[dfap], 4 \n\t"
+ "sllv %[outLShift32], %[tmp32], %[shiftFromNearToNoise] \n\t"
+ : [tmp32] "=&r"(tmp32), [dfap] "+r"(dfap),
+ [outLShift32] "=r"(outLShift32), [tnoise1] "=&r"(tnoise1)
+ : [tmp1] "r"(tmp1), [shiftFromNearToNoise] "r"(shiftFromNearToNoise)
+ : "memory");
+
+ if (outLShift32 < tnoise1) {
+ // Reset "too low" counter
+ aecm->noiseEstTooLowCtr[i + 1] = 0;
+ // Track the minimum.
+ if (tnoise1 < (1 << minTrackShift)) {
+ // For small values, decrease noiseEst[i] every
+ // `kNoiseEstIncCount` block. The regular approach below can not
+ // go further down due to truncation.
+ aecm->noiseEstTooHighCtr[i + 1]++;
+ if (aecm->noiseEstTooHighCtr[i + 1] >= kNoiseEstIncCount) {
+ tnoise1--;
+ aecm->noiseEstTooHighCtr[i + 1] = 0; // Reset the counter
+ }
+ } else {
+ __asm __volatile(
+ "subu %[tmp32], %[tnoise1], %[outLShift32] \n\t"
+ "srav %[tmp32], %[tmp32], %[minTrackShift] \n\t"
+ "subu %[tnoise1], %[tnoise1], %[tmp32] \n\t"
+ : [tmp32] "=&r"(tmp32), [tnoise1] "+r"(tnoise1)
+ :
+ [outLShift32] "r"(outLShift32), [minTrackShift] "r"(minTrackShift));
+ }
+ } else {
+ // Reset "too high" counter
+ aecm->noiseEstTooHighCtr[i + 1] = 0;
+ // Ramp slowly upwards until we hit the minimum again.
+ if ((tnoise1 >> 19) <= 0) {
+ if ((tnoise1 >> 11) > 0) {
+ // Large enough for relative increase
+ __asm __volatile(
+ "mul %[tnoise1], %[tnoise1], %[c2049] \n\t"
+ "sra %[tnoise1], %[tnoise1], 11 \n\t"
+ : [tnoise1] "+r"(tnoise1)
+ : [c2049] "r"(c2049)
+ : "hi", "lo");
+ } else {
+ // Make incremental increases based on size every
+ // `kNoiseEstIncCount` block
+ aecm->noiseEstTooLowCtr[i + 1]++;
+ if (aecm->noiseEstTooLowCtr[i + 1] >= kNoiseEstIncCount) {
+ __asm __volatile(
+ "sra %[tmp32], %[tnoise1], 9 \n\t"
+ "addi %[tnoise1], %[tnoise1], 1 \n\t"
+ "addu %[tnoise1], %[tnoise1], %[tmp32] \n\t"
+ : [tnoise1] "+r"(tnoise1), [tmp32] "=&r"(tmp32)
+ :);
+ aecm->noiseEstTooLowCtr[i + 1] = 0; // Reset counter
+ }
+ }
+ } else {
+ // Avoid overflow.
+ // Multiplication with 2049 will cause wrap around. Scale
+ // down first and then multiply
+ __asm __volatile(
+ "sra %[tnoise1], %[tnoise1], 11 \n\t"
+ "mul %[tnoise1], %[tnoise1], %[c2049] \n\t"
+ : [tnoise1] "+r"(tnoise1)
+ : [c2049] "r"(c2049)
+ : "hi", "lo");
+ }
+ }
+
+ __asm __volatile(
+ "lh %[tmp16], 0(%[lambdap]) \n\t"
+ "lh %[tmp161], 2(%[lambdap]) \n\t"
+ "sw %[tnoise], 0(%[tmp1]) \n\t"
+ "sw %[tnoise1], 4(%[tmp1]) \n\t"
+ "subu %[tmp16], %[c114], %[tmp16] \n\t"
+ "subu %[tmp161], %[c114], %[tmp161] \n\t"
+ "srav %[tmp32], %[tnoise], %[shiftFromNearToNoise] \n\t"
+ "srav %[tmp321], %[tnoise1], %[shiftFromNearToNoise] \n\t"
+ "addiu %[lambdap], %[lambdap], 4 \n\t"
+ "addiu %[tmp1], %[tmp1], 8 \n\t"
+ : [tmp16] "=&r"(tmp16), [tmp161] "=&r"(tmp161), [tmp1] "+r"(tmp1),
+ [tmp32] "=&r"(tmp32), [tmp321] "=&r"(tmp321), [lambdap] "+r"(lambdap)
+ : [tnoise] "r"(tnoise), [tnoise1] "r"(tnoise1), [c114] "r"(c114),
+ [shiftFromNearToNoise] "r"(shiftFromNearToNoise)
+ : "memory");
+
+ if (tmp32 > 32767) {
+ tmp32 = 32767;
+ aecm->noiseEst[i] = tmp32 << shiftFromNearToNoise;
+ }
+ if (tmp321 > 32767) {
+ tmp321 = 32767;
+ aecm->noiseEst[i + 1] = tmp321 << shiftFromNearToNoise;
+ }
+
+ __asm __volatile(
+ "mul %[tmp32], %[tmp32], %[tmp16] \n\t"
+ "mul %[tmp321], %[tmp321], %[tmp161] \n\t"
+ "sra %[nrsh1], %[tmp32], 14 \n\t"
+ "sra %[nrsh2], %[tmp321], 14 \n\t"
+ : [nrsh1] "=&r"(nrsh1), [nrsh2] "=r"(nrsh2)
+ : [tmp16] "r"(tmp16), [tmp161] "r"(tmp161), [tmp32] "r"(tmp32),
+ [tmp321] "r"(tmp321)
+ : "memory", "hi", "lo");
+
+ __asm __volatile(
+ "lh %[tmp32], 0(%[randW16p]) \n\t"
+ "lh %[tmp321], 2(%[randW16p]) \n\t"
+ "addiu %[randW16p], %[randW16p], 4 \n\t"
+ "mul %[tmp32], %[tmp32], %[c359] \n\t"
+ "mul %[tmp321], %[tmp321], %[c359] \n\t"
+ "sra %[tmp16], %[tmp32], 15 \n\t"
+ "sra %[tmp161], %[tmp321], 15 \n\t"
+ : [randW16p] "+r"(randW16p), [tmp32] "=&r"(tmp32), [tmp16] "=r"(tmp16),
+ [tmp161] "=r"(tmp161), [tmp321] "=&r"(tmp321)
+ : [c359] "r"(c359)
+ : "memory", "hi", "lo");
+
+#if !defined(MIPS_DSP_R1_LE)
+ tmp32 = WebRtcAecm_kCosTable[tmp16];
+ tmp321 = WebRtcAecm_kSinTable[tmp16];
+ tmp322 = WebRtcAecm_kCosTable[tmp161];
+ tmp323 = WebRtcAecm_kSinTable[tmp161];
+#else
+ __asm __volatile(
+ "sll %[tmp16], %[tmp16], 1 \n\t"
+ "sll %[tmp161], %[tmp161], 1 \n\t"
+ "lhx %[tmp32], %[tmp16](%[kCosTablep]) \n\t"
+ "lhx %[tmp321], %[tmp16](%[kSinTablep]) \n\t"
+ "lhx %[tmp322], %[tmp161](%[kCosTablep]) \n\t"
+ "lhx %[tmp323], %[tmp161](%[kSinTablep]) \n\t"
+ : [tmp32] "=&r"(tmp32), [tmp321] "=&r"(tmp321), [tmp322] "=&r"(tmp322),
+ [tmp323] "=&r"(tmp323)
+ : [kCosTablep] "r"(kCosTablep), [tmp16] "r"(tmp16),
+ [tmp161] "r"(tmp161), [kSinTablep] "r"(kSinTablep)
+ : "memory");
+#endif
+ __asm __volatile(
+ "mul %[tmp32], %[tmp32], %[nrsh1] \n\t"
+ "negu %[tmp162], %[nrsh1] \n\t"
+ "mul %[tmp322], %[tmp322], %[nrsh2] \n\t"
+ "negu %[tmp163], %[nrsh2] \n\t"
+ "sra %[tmp32], %[tmp32], 13 \n\t"
+ "mul %[tmp321], %[tmp321], %[tmp162] \n\t"
+ "sra %[tmp322], %[tmp322], 13 \n\t"
+ "mul %[tmp323], %[tmp323], %[tmp163] \n\t"
+ "sra %[tmp321], %[tmp321], 13 \n\t"
+ "sra %[tmp323], %[tmp323], 13 \n\t"
+ : [tmp32] "+r"(tmp32), [tmp321] "+r"(tmp321), [tmp162] "=&r"(tmp162),
+ [tmp322] "+r"(tmp322), [tmp323] "+r"(tmp323), [tmp163] "=&r"(tmp163)
+ : [nrsh1] "r"(nrsh1), [nrsh2] "r"(nrsh2)
+ : "hi", "lo");
+ // Tables are in Q13.
+ uReal[i] = (int16_t)tmp32;
+ uImag[i] = (int16_t)tmp321;
+ uReal[i + 1] = (int16_t)tmp322;
+ uImag[i + 1] = (int16_t)tmp323;
+ }
+
+ int32_t tt, sgn;
+ tt = out[0].real;
+ sgn = ((int)tt) >> 31;
+ out[0].real = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+ tt = out[0].imag;
+ sgn = ((int)tt) >> 31;
+ out[0].imag = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+ for (i = 1; i < PART_LEN; i++) {
+ tt = out[i].real + uReal[i];
+ sgn = ((int)tt) >> 31;
+ out[i].real = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+ tt = out[i].imag + uImag[i];
+ sgn = ((int)tt) >> 31;
+ out[i].imag = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+ }
+ tt = out[PART_LEN].real + uReal[PART_LEN];
+ sgn = ((int)tt) >> 31;
+ out[PART_LEN].real = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+ tt = out[PART_LEN].imag;
+ sgn = ((int)tt) >> 31;
+ out[PART_LEN].imag = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_neon.cc b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_neon.cc
new file mode 100644
index 0000000000..584110d3af
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_core_neon.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "common_audio/signal_processing/include/real_fft.h"
+#include "modules/audio_processing/aecm/aecm_core.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// TODO(kma): Re-write the corresponding assembly file, the offset
+// generating script and makefile, to replace these C functions.
+
+static inline void AddLanes(uint32_t* ptr, uint32x4_t v) {
+#if defined(WEBRTC_ARCH_ARM64)
+ *(ptr) = vaddvq_u32(v);
+#else
+ uint32x2_t tmp_v;
+ tmp_v = vadd_u32(vget_low_u32(v), vget_high_u32(v));
+ tmp_v = vpadd_u32(tmp_v, tmp_v);
+ *(ptr) = vget_lane_u32(tmp_v, 0);
+#endif
+}
+
+} // namespace
+
+void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est,
+ uint32_t* far_energy,
+ uint32_t* echo_energy_adapt,
+ uint32_t* echo_energy_stored) {
+ int16_t* start_stored_p = aecm->channelStored;
+ int16_t* start_adapt_p = aecm->channelAdapt16;
+ int32_t* echo_est_p = echo_est;
+ const int16_t* end_stored_p = aecm->channelStored + PART_LEN;
+ const uint16_t* far_spectrum_p = far_spectrum;
+ int16x8_t store_v, adapt_v;
+ uint16x8_t spectrum_v;
+ uint32x4_t echo_est_v_low, echo_est_v_high;
+ uint32x4_t far_energy_v, echo_stored_v, echo_adapt_v;
+
+ far_energy_v = vdupq_n_u32(0);
+ echo_adapt_v = vdupq_n_u32(0);
+ echo_stored_v = vdupq_n_u32(0);
+
+ // Get energy for the delayed far end signal and estimated
+ // echo using both stored and adapted channels.
+ // The C code:
+ // for (i = 0; i < PART_LEN1; i++) {
+ // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+ // far_spectrum[i]);
+ // (*far_energy) += (uint32_t)(far_spectrum[i]);
+ // *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i];
+ // (*echo_energy_stored) += (uint32_t)echo_est[i];
+ // }
+ while (start_stored_p < end_stored_p) {
+ spectrum_v = vld1q_u16(far_spectrum_p);
+ adapt_v = vld1q_s16(start_adapt_p);
+ store_v = vld1q_s16(start_stored_p);
+
+ far_energy_v = vaddw_u16(far_energy_v, vget_low_u16(spectrum_v));
+ far_energy_v = vaddw_u16(far_energy_v, vget_high_u16(spectrum_v));
+
+ echo_est_v_low = vmull_u16(vreinterpret_u16_s16(vget_low_s16(store_v)),
+ vget_low_u16(spectrum_v));
+ echo_est_v_high = vmull_u16(vreinterpret_u16_s16(vget_high_s16(store_v)),
+ vget_high_u16(spectrum_v));
+ vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low));
+ vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high));
+
+ echo_stored_v = vaddq_u32(echo_est_v_low, echo_stored_v);
+ echo_stored_v = vaddq_u32(echo_est_v_high, echo_stored_v);
+
+ echo_adapt_v =
+ vmlal_u16(echo_adapt_v, vreinterpret_u16_s16(vget_low_s16(adapt_v)),
+ vget_low_u16(spectrum_v));
+ echo_adapt_v =
+ vmlal_u16(echo_adapt_v, vreinterpret_u16_s16(vget_high_s16(adapt_v)),
+ vget_high_u16(spectrum_v));
+
+ start_stored_p += 8;
+ start_adapt_p += 8;
+ far_spectrum_p += 8;
+ echo_est_p += 8;
+ }
+
+ AddLanes(far_energy, far_energy_v);
+ AddLanes(echo_energy_stored, echo_stored_v);
+ AddLanes(echo_energy_adapt, echo_adapt_v);
+
+ echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN],
+ far_spectrum[PART_LEN]);
+ *echo_energy_stored += (uint32_t)echo_est[PART_LEN];
+ *far_energy += (uint32_t)far_spectrum[PART_LEN];
+ *echo_energy_adapt += aecm->channelAdapt16[PART_LEN] * far_spectrum[PART_LEN];
+}
+
+void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm,
+ const uint16_t* far_spectrum,
+ int32_t* echo_est) {
+ RTC_DCHECK_EQ(0, (uintptr_t)echo_est % 32);
+ RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelStored % 16);
+ RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelAdapt16 % 16);
+
+ // This is C code of following optimized code.
+ // During startup we store the channel every block.
+ // memcpy(aecm->channelStored,
+ // aecm->channelAdapt16,
+ // sizeof(int16_t) * PART_LEN1);
+ // Recalculate echo estimate
+ // for (i = 0; i < PART_LEN; i += 4) {
+ // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+ // far_spectrum[i]);
+ // echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1],
+ // far_spectrum[i + 1]);
+ // echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2],
+ // far_spectrum[i + 2]);
+ // echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3],
+ // far_spectrum[i + 3]);
+ // }
+ // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+ // far_spectrum[i]);
+ const uint16_t* far_spectrum_p = far_spectrum;
+ int16_t* start_adapt_p = aecm->channelAdapt16;
+ int16_t* start_stored_p = aecm->channelStored;
+ const int16_t* end_stored_p = aecm->channelStored + PART_LEN;
+ int32_t* echo_est_p = echo_est;
+
+ uint16x8_t far_spectrum_v;
+ int16x8_t adapt_v;
+ uint32x4_t echo_est_v_low, echo_est_v_high;
+
+ while (start_stored_p < end_stored_p) {
+ far_spectrum_v = vld1q_u16(far_spectrum_p);
+ adapt_v = vld1q_s16(start_adapt_p);
+
+ vst1q_s16(start_stored_p, adapt_v);
+
+ echo_est_v_low = vmull_u16(vget_low_u16(far_spectrum_v),
+ vget_low_u16(vreinterpretq_u16_s16(adapt_v)));
+ echo_est_v_high = vmull_u16(vget_high_u16(far_spectrum_v),
+ vget_high_u16(vreinterpretq_u16_s16(adapt_v)));
+
+ vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low));
+ vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high));
+
+ far_spectrum_p += 8;
+ start_adapt_p += 8;
+ start_stored_p += 8;
+ echo_est_p += 8;
+ }
+ aecm->channelStored[PART_LEN] = aecm->channelAdapt16[PART_LEN];
+ echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN],
+ far_spectrum[PART_LEN]);
+}
+
+void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm) {
+ RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelStored % 16);
+ RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelAdapt16 % 16);
+ RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelAdapt32 % 32);
+
+ // The C code of following optimized code.
+ // for (i = 0; i < PART_LEN1; i++) {
+ // aecm->channelAdapt16[i] = aecm->channelStored[i];
+ // aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32(
+ // (int32_t)aecm->channelStored[i], 16);
+ // }
+
+ int16_t* start_stored_p = aecm->channelStored;
+ int16_t* start_adapt16_p = aecm->channelAdapt16;
+ int32_t* start_adapt32_p = aecm->channelAdapt32;
+ const int16_t* end_stored_p = start_stored_p + PART_LEN;
+
+ int16x8_t stored_v;
+ int32x4_t adapt32_v_low, adapt32_v_high;
+
+ while (start_stored_p < end_stored_p) {
+ stored_v = vld1q_s16(start_stored_p);
+ vst1q_s16(start_adapt16_p, stored_v);
+
+ adapt32_v_low = vshll_n_s16(vget_low_s16(stored_v), 16);
+ adapt32_v_high = vshll_n_s16(vget_high_s16(stored_v), 16);
+
+ vst1q_s32(start_adapt32_p, adapt32_v_low);
+ vst1q_s32(start_adapt32_p + 4, adapt32_v_high);
+
+ start_stored_p += 8;
+ start_adapt16_p += 8;
+ start_adapt32_p += 8;
+ }
+ aecm->channelAdapt16[PART_LEN] = aecm->channelStored[PART_LEN];
+ aecm->channelAdapt32[PART_LEN] = (int32_t)aecm->channelStored[PART_LEN] << 16;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/aecm_defines.h b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_defines.h
new file mode 100644
index 0000000000..5805549e2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/aecm_defines.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AECM_AECM_DEFINES_H_
+#define MODULES_AUDIO_PROCESSING_AECM_AECM_DEFINES_H_
+
+#define AECM_DYNAMIC_Q /* Turn on/off dynamic Q-domain. */
+
+/* Algorithm parameters */
+#define FRAME_LEN 80 /* Total frame length, 10 ms. */
+
+#define PART_LEN 64 /* Length of partition. */
+#define PART_LEN_SHIFT 7 /* Length of (PART_LEN * 2) in base 2. */
+
+#define PART_LEN1 (PART_LEN + 1) /* Unique fft coefficients. */
+#define PART_LEN2 (PART_LEN << 1) /* Length of partition * 2. */
+#define PART_LEN4 (PART_LEN << 2) /* Length of partition * 4. */
+#define FAR_BUF_LEN PART_LEN4 /* Length of buffers. */
+#define MAX_DELAY 100
+
+/* Counter parameters */
+#define CONV_LEN 512 /* Convergence length used at startup. */
+#define CONV_LEN2 (CONV_LEN << 1) /* Used at startup. */
+
+/* Energy parameters */
+#define MAX_BUF_LEN 64 /* History length of energy signals. */
+#define FAR_ENERGY_MIN 1025 /* Lowest Far energy level: At least 2 */
+ /* in energy. */
+#define FAR_ENERGY_DIFF 929 /* Allowed difference between max */
+ /* and min. */
+#define ENERGY_DEV_OFFSET 0 /* The energy error offset in Q8. */
+#define ENERGY_DEV_TOL 400 /* The energy estimation tolerance (Q8). */
+#define FAR_ENERGY_VAD_REGION 230 /* Far VAD tolerance region. */
+
+/* Stepsize parameters */
+#define MU_MIN 10 /* Min stepsize 2^-MU_MIN (far end energy */
+ /* dependent). */
+#define MU_MAX 1 /* Max stepsize 2^-MU_MAX (far end energy */
+ /* dependent). */
+#define MU_DIFF 9 /* MU_MIN - MU_MAX */
+
+/* Channel parameters */
+#define MIN_MSE_COUNT 20 /* Min number of consecutive blocks with enough */
+ /* far end energy to compare channel estimates. */
+#define MIN_MSE_DIFF 29 /* The ratio between adapted and stored channel to */
+ /* accept a new storage (0.8 in Q-MSE_RESOLUTION). */
+#define MSE_RESOLUTION 5 /* MSE parameter resolution. */
+#define RESOLUTION_CHANNEL16 12 /* W16 Channel in Q-RESOLUTION_CHANNEL16. */
+#define RESOLUTION_CHANNEL32 28 /* W32 Channel in Q-RESOLUTION_CHANNEL. */
+#define CHANNEL_VAD 16 /* Minimum energy in frequency band */
+ /* to update channel. */
+
+/* Suppression gain parameters: SUPGAIN parameters in Q-(RESOLUTION_SUPGAIN). */
+#define RESOLUTION_SUPGAIN 8 /* Channel in Q-(RESOLUTION_SUPGAIN). */
+#define SUPGAIN_DEFAULT (1 << RESOLUTION_SUPGAIN) /* Default. */
+#define SUPGAIN_ERROR_PARAM_A 3072 /* Estimation error parameter */
+ /* (Maximum gain) (8 in Q8). */
+#define SUPGAIN_ERROR_PARAM_B 1536 /* Estimation error parameter */
+ /* (Gain before going down). */
+#define SUPGAIN_ERROR_PARAM_D SUPGAIN_DEFAULT /* Estimation error parameter */
+/* (Should be the same as Default) (1 in Q8). */
+#define SUPGAIN_EPC_DT 200 /* SUPGAIN_ERROR_PARAM_C * ENERGY_DEV_TOL */
+
+/* Defines for "check delay estimation" */
+#define CORR_WIDTH 31 /* Number of samples to correlate over. */
+#define CORR_MAX 16 /* Maximum correlation offset. */
+#define CORR_MAX_BUF 63
+#define CORR_DEV 4
+#define CORR_MAX_LEVEL 20
+#define CORR_MAX_LOW 4
+#define CORR_BUF_LEN (CORR_MAX << 1) + 1
+/* Note that CORR_WIDTH + 2*CORR_MAX <= MAX_BUF_LEN. */
+
+#define ONE_Q14 (1 << 14)
+
+/* NLP defines */
+#define NLP_COMP_LOW 3277 /* 0.2 in Q14 */
+#define NLP_COMP_HIGH ONE_Q14 /* 1 in Q14 */
+
+#endif
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.cc b/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.cc
new file mode 100644
index 0000000000..14522c0f1d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.cc
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+
+#ifdef AEC_DEBUG
+#include <stdio.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aecm/aecm_defines.h"
+}
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+namespace webrtc {
+
+namespace {
+
+#define BUF_SIZE_FRAMES 50 // buffer size (frames)
+// Maximum length of resampled signal. Must be an integer multiple of frames
+// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN
+// The factor of 2 handles wb, and the + 1 is as a safety margin
+#define MAX_RESAMP_LEN (5 * FRAME_LEN)
+
+static const size_t kBufSizeSamp =
+ BUF_SIZE_FRAMES * FRAME_LEN; // buffer size (samples)
+static const int kSampMsNb = 8; // samples per ms in nb
+// Target suppression levels for nlp modes
+// log{0.001, 0.00001, 0.00000001}
+static const int kInitCheck = 42;
+
+typedef struct {
+ int sampFreq;
+ int scSampFreq;
+ short bufSizeStart;
+ int knownDelay;
+
+ // Stores the last frame added to the farend buffer
+ short farendOld[2][FRAME_LEN];
+ short initFlag; // indicates if AEC has been initialized
+
+ // Variables used for averaging far end buffer size
+ short counter;
+ short sum;
+ short firstVal;
+ short checkBufSizeCtr;
+
+ // Variables used for delay shifts
+ short msInSndCardBuf;
+ short filtDelay;
+ int timeForDelayChange;
+ int ECstartup;
+ int checkBuffSize;
+ int delayChange;
+ short lastDelayDiff;
+
+ int16_t echoMode;
+
+#ifdef AEC_DEBUG
+ FILE* bufFile;
+ FILE* delayFile;
+ FILE* preCompFile;
+ FILE* postCompFile;
+#endif // AEC_DEBUG
+ // Structures
+ RingBuffer* farendBuf;
+
+ AecmCore* aecmCore;
+} AecMobile;
+
+} // namespace
+
+// Estimates delay to set the position of the farend buffer read pointer
+// (controlled by knownDelay)
+static int WebRtcAecm_EstBufDelay(AecMobile* aecm, short msInSndCardBuf);
+
+// Stuffs the farend buffer if the estimated delay is too large
+static int WebRtcAecm_DelayComp(AecMobile* aecm);
+
+void* WebRtcAecm_Create() {
+ // Allocate zero-filled memory.
+ AecMobile* aecm = static_cast<AecMobile*>(calloc(1, sizeof(AecMobile)));
+
+ aecm->aecmCore = WebRtcAecm_CreateCore();
+ if (!aecm->aecmCore) {
+ WebRtcAecm_Free(aecm);
+ return NULL;
+ }
+
+ aecm->farendBuf = WebRtc_CreateBuffer(kBufSizeSamp, sizeof(int16_t));
+ if (!aecm->farendBuf) {
+ WebRtcAecm_Free(aecm);
+ return NULL;
+ }
+
+#ifdef AEC_DEBUG
+ aecm->aecmCore->farFile = fopen("aecFar.pcm", "wb");
+ aecm->aecmCore->nearFile = fopen("aecNear.pcm", "wb");
+ aecm->aecmCore->outFile = fopen("aecOut.pcm", "wb");
+ // aecm->aecmCore->outLpFile = fopen("aecOutLp.pcm","wb");
+
+ aecm->bufFile = fopen("aecBuf.dat", "wb");
+ aecm->delayFile = fopen("aecDelay.dat", "wb");
+ aecm->preCompFile = fopen("preComp.pcm", "wb");
+ aecm->postCompFile = fopen("postComp.pcm", "wb");
+#endif // AEC_DEBUG
+ return aecm;
+}
+
+void WebRtcAecm_Free(void* aecmInst) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+ if (aecm == NULL) {
+ return;
+ }
+
+#ifdef AEC_DEBUG
+ fclose(aecm->aecmCore->farFile);
+ fclose(aecm->aecmCore->nearFile);
+ fclose(aecm->aecmCore->outFile);
+ // fclose(aecm->aecmCore->outLpFile);
+
+ fclose(aecm->bufFile);
+ fclose(aecm->delayFile);
+ fclose(aecm->preCompFile);
+ fclose(aecm->postCompFile);
+#endif // AEC_DEBUG
+ WebRtcAecm_FreeCore(aecm->aecmCore);
+ WebRtc_FreeBuffer(aecm->farendBuf);
+ free(aecm);
+}
+
+int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+ AecmConfig aecConfig;
+
+ if (aecm == NULL) {
+ return -1;
+ }
+
+ if (sampFreq != 8000 && sampFreq != 16000) {
+ return AECM_BAD_PARAMETER_ERROR;
+ }
+ aecm->sampFreq = sampFreq;
+
+ // Initialize AECM core
+ if (WebRtcAecm_InitCore(aecm->aecmCore, aecm->sampFreq) == -1) {
+ return AECM_UNSPECIFIED_ERROR;
+ }
+
+ // Initialize farend buffer
+ WebRtc_InitBuffer(aecm->farendBuf);
+
+ aecm->initFlag = kInitCheck; // indicates that initialization has been done
+
+ aecm->delayChange = 1;
+
+ aecm->sum = 0;
+ aecm->counter = 0;
+ aecm->checkBuffSize = 1;
+ aecm->firstVal = 0;
+
+ aecm->ECstartup = 1;
+ aecm->bufSizeStart = 0;
+ aecm->checkBufSizeCtr = 0;
+ aecm->filtDelay = 0;
+ aecm->timeForDelayChange = 0;
+ aecm->knownDelay = 0;
+ aecm->lastDelayDiff = 0;
+
+ memset(&aecm->farendOld, 0, sizeof(aecm->farendOld));
+
+ // Default settings.
+ aecConfig.cngMode = AecmTrue;
+ aecConfig.echoMode = 3;
+
+ if (WebRtcAecm_set_config(aecm, aecConfig) == -1) {
+ return AECM_UNSPECIFIED_ERROR;
+ }
+
+ return 0;
+}
+
+// Returns any error that is caused when buffering the
+// farend signal.
+int32_t WebRtcAecm_GetBufferFarendError(void* aecmInst,
+ const int16_t* farend,
+ size_t nrOfSamples) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+ if (aecm == NULL)
+ return -1;
+
+ if (farend == NULL)
+ return AECM_NULL_POINTER_ERROR;
+
+ if (aecm->initFlag != kInitCheck)
+ return AECM_UNINITIALIZED_ERROR;
+
+ if (nrOfSamples != 80 && nrOfSamples != 160)
+ return AECM_BAD_PARAMETER_ERROR;
+
+ return 0;
+}
+
+int32_t WebRtcAecm_BufferFarend(void* aecmInst,
+ const int16_t* farend,
+ size_t nrOfSamples) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+ const int32_t err =
+ WebRtcAecm_GetBufferFarendError(aecmInst, farend, nrOfSamples);
+
+ if (err != 0)
+ return err;
+
+ // TODO(unknown): Is this really a good idea?
+ if (!aecm->ECstartup) {
+ WebRtcAecm_DelayComp(aecm);
+ }
+
+ WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
+
+ return 0;
+}
+
+int32_t WebRtcAecm_Process(void* aecmInst,
+ const int16_t* nearendNoisy,
+ const int16_t* nearendClean,
+ int16_t* out,
+ size_t nrOfSamples,
+ int16_t msInSndCardBuf) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+ int32_t retVal = 0;
+ size_t i;
+ short nmbrOfFilledBuffers;
+ size_t nBlocks10ms;
+ size_t nFrames;
+#ifdef AEC_DEBUG
+ short msInAECBuf;
+#endif
+
+ if (aecm == NULL) {
+ return -1;
+ }
+
+ if (nearendNoisy == NULL) {
+ return AECM_NULL_POINTER_ERROR;
+ }
+
+ if (out == NULL) {
+ return AECM_NULL_POINTER_ERROR;
+ }
+
+ if (aecm->initFlag != kInitCheck) {
+ return AECM_UNINITIALIZED_ERROR;
+ }
+
+ if (nrOfSamples != 80 && nrOfSamples != 160) {
+ return AECM_BAD_PARAMETER_ERROR;
+ }
+
+ if (msInSndCardBuf < 0) {
+ msInSndCardBuf = 0;
+ retVal = AECM_BAD_PARAMETER_WARNING;
+ } else if (msInSndCardBuf > 500) {
+ msInSndCardBuf = 500;
+ retVal = AECM_BAD_PARAMETER_WARNING;
+ }
+ msInSndCardBuf += 10;
+ aecm->msInSndCardBuf = msInSndCardBuf;
+
+ nFrames = nrOfSamples / FRAME_LEN;
+ nBlocks10ms = nFrames / aecm->aecmCore->mult;
+
+ if (aecm->ECstartup) {
+ if (nearendClean == NULL) {
+ if (out != nearendNoisy) {
+ memcpy(out, nearendNoisy, sizeof(short) * nrOfSamples);
+ }
+ } else if (out != nearendClean) {
+ memcpy(out, nearendClean, sizeof(short) * nrOfSamples);
+ }
+
+ nmbrOfFilledBuffers =
+ (short)WebRtc_available_read(aecm->farendBuf) / FRAME_LEN;
+ // The AECM is in the start up mode
+ // AECM is disabled until the soundcard buffer and farend buffers are OK
+
+ // Mechanism to ensure that the soundcard buffer is reasonably stable.
+ if (aecm->checkBuffSize) {
+ aecm->checkBufSizeCtr++;
+ // Before we fill up the far end buffer we require the amount of data on
+ // the sound card to be stable (+/-8 ms) compared to the first value. This
+ // comparison is made during the following 4 consecutive frames. If it
+ // seems to be stable then we start to fill up the far end buffer.
+
+ if (aecm->counter == 0) {
+ aecm->firstVal = aecm->msInSndCardBuf;
+ aecm->sum = 0;
+ }
+
+ if (abs(aecm->firstVal - aecm->msInSndCardBuf) <
+ WEBRTC_SPL_MAX(0.2 * aecm->msInSndCardBuf, kSampMsNb)) {
+ aecm->sum += aecm->msInSndCardBuf;
+ aecm->counter++;
+ } else {
+ aecm->counter = 0;
+ }
+
+ if (aecm->counter * nBlocks10ms >= 6) {
+ // The farend buffer size is determined in blocks of 80 samples
+ // Use 75% of the average value of the soundcard buffer
+ aecm->bufSizeStart = WEBRTC_SPL_MIN(
+ (3 * aecm->sum * aecm->aecmCore->mult) / (aecm->counter * 40),
+ BUF_SIZE_FRAMES);
+ // buffersize has now been determined
+ aecm->checkBuffSize = 0;
+ }
+
+ if (aecm->checkBufSizeCtr * nBlocks10ms > 50) {
+ // for really bad sound cards, don't disable echocanceller for more than
+ // 0.5 sec
+ aecm->bufSizeStart = WEBRTC_SPL_MIN(
+ (3 * aecm->msInSndCardBuf * aecm->aecmCore->mult) / 40,
+ BUF_SIZE_FRAMES);
+ aecm->checkBuffSize = 0;
+ }
+ }
+
+ // if checkBuffSize changed in the if-statement above
+ if (!aecm->checkBuffSize) {
+ // soundcard buffer is now reasonably stable
+ // When the far end buffer is filled with approximately the same amount of
+ // data as the amount on the sound card we end the start up phase and
+ // start to cancel echoes.
+
+ if (nmbrOfFilledBuffers == aecm->bufSizeStart) {
+ aecm->ECstartup = 0; // Enable the AECM
+ } else if (nmbrOfFilledBuffers > aecm->bufSizeStart) {
+ WebRtc_MoveReadPtr(aecm->farendBuf,
+ (int)WebRtc_available_read(aecm->farendBuf) -
+ (int)aecm->bufSizeStart * FRAME_LEN);
+ aecm->ECstartup = 0;
+ }
+ }
+
+ } else {
+ // AECM is enabled
+
+ // Note only 1 block supported for nb and 2 blocks for wb
+ for (i = 0; i < nFrames; i++) {
+ int16_t farend[FRAME_LEN];
+ const int16_t* farend_ptr = NULL;
+
+ nmbrOfFilledBuffers =
+ (short)WebRtc_available_read(aecm->farendBuf) / FRAME_LEN;
+
+ // Check that there is data in the far end buffer
+ if (nmbrOfFilledBuffers > 0) {
+ // Get the next 80 samples from the farend buffer
+ WebRtc_ReadBuffer(aecm->farendBuf, (void**)&farend_ptr, farend,
+ FRAME_LEN);
+
+ // Always store the last frame for use when we run out of data
+ memcpy(&(aecm->farendOld[i][0]), farend_ptr, FRAME_LEN * sizeof(short));
+ } else {
+ // We have no data so we use the last played frame
+ memcpy(farend, &(aecm->farendOld[i][0]), FRAME_LEN * sizeof(short));
+ farend_ptr = farend;
+ }
+
+ // Call buffer delay estimator when all data is extracted,
+ // i,e. i = 0 for NB and i = 1 for WB
+ if ((i == 0 && aecm->sampFreq == 8000) ||
+ (i == 1 && aecm->sampFreq == 16000)) {
+ WebRtcAecm_EstBufDelay(aecm, aecm->msInSndCardBuf);
+ }
+
+ // Call the AECM
+ /*WebRtcAecm_ProcessFrame(aecm->aecmCore, farend, &nearend[FRAME_LEN * i],
+ &out[FRAME_LEN * i], aecm->knownDelay);*/
+ if (WebRtcAecm_ProcessFrame(
+ aecm->aecmCore, farend_ptr, &nearendNoisy[FRAME_LEN * i],
+ (nearendClean ? &nearendClean[FRAME_LEN * i] : NULL),
+ &out[FRAME_LEN * i]) == -1)
+ return -1;
+ }
+ }
+
+#ifdef AEC_DEBUG
+ msInAECBuf = (short)WebRtc_available_read(aecm->farendBuf) /
+ (kSampMsNb * aecm->aecmCore->mult);
+ fwrite(&msInAECBuf, 2, 1, aecm->bufFile);
+ fwrite(&(aecm->knownDelay), sizeof(aecm->knownDelay), 1, aecm->delayFile);
+#endif
+
+ return retVal;
+}
+
+int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+ if (aecm == NULL) {
+ return -1;
+ }
+
+ if (aecm->initFlag != kInitCheck) {
+ return AECM_UNINITIALIZED_ERROR;
+ }
+
+ if (config.cngMode != AecmFalse && config.cngMode != AecmTrue) {
+ return AECM_BAD_PARAMETER_ERROR;
+ }
+ aecm->aecmCore->cngMode = config.cngMode;
+
+ if (config.echoMode < 0 || config.echoMode > 4) {
+ return AECM_BAD_PARAMETER_ERROR;
+ }
+ aecm->echoMode = config.echoMode;
+
+ if (aecm->echoMode == 0) {
+ aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 3;
+ aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 3;
+ aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 3;
+ aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 3;
+ aecm->aecmCore->supGainErrParamDiffAB =
+ (SUPGAIN_ERROR_PARAM_A >> 3) - (SUPGAIN_ERROR_PARAM_B >> 3);
+ aecm->aecmCore->supGainErrParamDiffBD =
+ (SUPGAIN_ERROR_PARAM_B >> 3) - (SUPGAIN_ERROR_PARAM_D >> 3);
+ } else if (aecm->echoMode == 1) {
+ aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 2;
+ aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 2;
+ aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 2;
+ aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 2;
+ aecm->aecmCore->supGainErrParamDiffAB =
+ (SUPGAIN_ERROR_PARAM_A >> 2) - (SUPGAIN_ERROR_PARAM_B >> 2);
+ aecm->aecmCore->supGainErrParamDiffBD =
+ (SUPGAIN_ERROR_PARAM_B >> 2) - (SUPGAIN_ERROR_PARAM_D >> 2);
+ } else if (aecm->echoMode == 2) {
+ aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 1;
+ aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 1;
+ aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 1;
+ aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 1;
+ aecm->aecmCore->supGainErrParamDiffAB =
+ (SUPGAIN_ERROR_PARAM_A >> 1) - (SUPGAIN_ERROR_PARAM_B >> 1);
+ aecm->aecmCore->supGainErrParamDiffBD =
+ (SUPGAIN_ERROR_PARAM_B >> 1) - (SUPGAIN_ERROR_PARAM_D >> 1);
+ } else if (aecm->echoMode == 3) {
+ aecm->aecmCore->supGain = SUPGAIN_DEFAULT;
+ aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT;
+ aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A;
+ aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D;
+ aecm->aecmCore->supGainErrParamDiffAB =
+ SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B;
+ aecm->aecmCore->supGainErrParamDiffBD =
+ SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D;
+ } else if (aecm->echoMode == 4) {
+ aecm->aecmCore->supGain = SUPGAIN_DEFAULT << 1;
+ aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT << 1;
+ aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A << 1;
+ aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D << 1;
+ aecm->aecmCore->supGainErrParamDiffAB =
+ (SUPGAIN_ERROR_PARAM_A << 1) - (SUPGAIN_ERROR_PARAM_B << 1);
+ aecm->aecmCore->supGainErrParamDiffBD =
+ (SUPGAIN_ERROR_PARAM_B << 1) - (SUPGAIN_ERROR_PARAM_D << 1);
+ }
+
+ return 0;
+}
+
+int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
+ const void* echo_path,
+ size_t size_bytes) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+ const int16_t* echo_path_ptr = static_cast<const int16_t*>(echo_path);
+
+ if (aecmInst == NULL) {
+ return -1;
+ }
+ if (echo_path == NULL) {
+ return AECM_NULL_POINTER_ERROR;
+ }
+ if (size_bytes != WebRtcAecm_echo_path_size_bytes()) {
+ // Input channel size does not match the size of AECM
+ return AECM_BAD_PARAMETER_ERROR;
+ }
+ if (aecm->initFlag != kInitCheck) {
+ return AECM_UNINITIALIZED_ERROR;
+ }
+
+ WebRtcAecm_InitEchoPathCore(aecm->aecmCore, echo_path_ptr);
+
+ return 0;
+}
+
+int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
+ void* echo_path,
+ size_t size_bytes) {
+ AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+ int16_t* echo_path_ptr = static_cast<int16_t*>(echo_path);
+
+ if (aecmInst == NULL) {
+ return -1;
+ }
+ if (echo_path == NULL) {
+ return AECM_NULL_POINTER_ERROR;
+ }
+ if (size_bytes != WebRtcAecm_echo_path_size_bytes()) {
+ // Input channel size does not match the size of AECM
+ return AECM_BAD_PARAMETER_ERROR;
+ }
+ if (aecm->initFlag != kInitCheck) {
+ return AECM_UNINITIALIZED_ERROR;
+ }
+
+ memcpy(echo_path_ptr, aecm->aecmCore->channelStored, size_bytes);
+ return 0;
+}
+
+size_t WebRtcAecm_echo_path_size_bytes() {
+ return (PART_LEN1 * sizeof(int16_t));
+}
+
+static int WebRtcAecm_EstBufDelay(AecMobile* aecm, short msInSndCardBuf) {
+ short delayNew, nSampSndCard;
+ short nSampFar = (short)WebRtc_available_read(aecm->farendBuf);
+ short diff;
+
+ nSampSndCard = msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult;
+
+ delayNew = nSampSndCard - nSampFar;
+
+ if (delayNew < FRAME_LEN) {
+ WebRtc_MoveReadPtr(aecm->farendBuf, FRAME_LEN);
+ delayNew += FRAME_LEN;
+ }
+
+ aecm->filtDelay =
+ WEBRTC_SPL_MAX(0, (8 * aecm->filtDelay + 2 * delayNew) / 10);
+
+ diff = aecm->filtDelay - aecm->knownDelay;
+ if (diff > 224) {
+ if (aecm->lastDelayDiff < 96) {
+ aecm->timeForDelayChange = 0;
+ } else {
+ aecm->timeForDelayChange++;
+ }
+ } else if (diff < 96 && aecm->knownDelay > 0) {
+ if (aecm->lastDelayDiff > 224) {
+ aecm->timeForDelayChange = 0;
+ } else {
+ aecm->timeForDelayChange++;
+ }
+ } else {
+ aecm->timeForDelayChange = 0;
+ }
+ aecm->lastDelayDiff = diff;
+
+ if (aecm->timeForDelayChange > 25) {
+ aecm->knownDelay = WEBRTC_SPL_MAX((int)aecm->filtDelay - 160, 0);
+ }
+ return 0;
+}
+
+static int WebRtcAecm_DelayComp(AecMobile* aecm) {
+ int nSampFar = (int)WebRtc_available_read(aecm->farendBuf);
+ int nSampSndCard, delayNew, nSampAdd;
+ const int maxStuffSamp = 10 * FRAME_LEN;
+
+ nSampSndCard = aecm->msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult;
+ delayNew = nSampSndCard - nSampFar;
+
+ if (delayNew > FAR_BUF_LEN - FRAME_LEN * aecm->aecmCore->mult) {
+ // The difference of the buffer sizes is larger than the maximum
+ // allowed known delay. Compensate by stuffing the buffer.
+ nSampAdd =
+ (int)(WEBRTC_SPL_MAX(((nSampSndCard >> 1) - nSampFar), FRAME_LEN));
+ nSampAdd = WEBRTC_SPL_MIN(nSampAdd, maxStuffSamp);
+
+ WebRtc_MoveReadPtr(aecm->farendBuf, -nSampAdd);
+ aecm->delayChange = 1; // the delay needs to be updated
+ }
+
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.h b/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.h
new file mode 100644
index 0000000000..ee780524de
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/aecm/echo_control_mobile.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
+#define MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+enum { AecmFalse = 0, AecmTrue };
+
+// Errors
+#define AECM_UNSPECIFIED_ERROR 12000
+#define AECM_UNSUPPORTED_FUNCTION_ERROR 12001
+#define AECM_UNINITIALIZED_ERROR 12002
+#define AECM_NULL_POINTER_ERROR 12003
+#define AECM_BAD_PARAMETER_ERROR 12004
+
+// Warnings
+#define AECM_BAD_PARAMETER_WARNING 12100
+
+typedef struct {
+ int16_t cngMode; // AECM_FALSE, AECM_TRUE (default)
+ int16_t echoMode; // 0, 1, 2, 3 (default), 4
+} AecmConfig;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Allocates the memory needed by the AECM. The memory needs to be
+ * initialized separately using the WebRtcAecm_Init() function.
+ * Returns a pointer to the instance and a nullptr at failure.
+ */
+void* WebRtcAecm_Create();
+
+/*
+ * This function releases the memory allocated by WebRtcAecm_Create()
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ */
+void WebRtcAecm_Free(void* aecmInst);
+
+/*
+ * Initializes an AECM instance.
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * int32_t sampFreq Sampling frequency of data
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq);
+
+/*
+ * Inserts an 80 or 160 sample block of data into the farend buffer.
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * int16_t* farend In buffer containing one frame of
+ * farend signal
+ * int16_t nrOfSamples Number of samples in farend buffer
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_BufferFarend(void* aecmInst,
+ const int16_t* farend,
+ size_t nrOfSamples);
+
+/*
+ * Reports any errors that would arise when buffering a farend buffer.
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * int16_t* farend In buffer containing one frame of
+ * farend signal
+ * int16_t nrOfSamples Number of samples in farend buffer
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_GetBufferFarendError(void* aecmInst,
+ const int16_t* farend,
+ size_t nrOfSamples);
+
+/*
+ * Runs the AECM on an 80 or 160 sample blocks of data.
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * int16_t* nearendNoisy In buffer containing one frame of
+ * reference nearend+echo signal. If
+ * noise reduction is active, provide
+ * the noisy signal here.
+ * int16_t* nearendClean In buffer containing one frame of
+ * nearend+echo signal. If noise
+ * reduction is active, provide the
+ * clean signal here. Otherwise pass a
+ * NULL pointer.
+ * int16_t nrOfSamples Number of samples in nearend buffer
+ * int16_t msInSndCardBuf Delay estimate for sound card and
+ * system buffers
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int16_t* out Out buffer, one frame of processed nearend
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_Process(void* aecmInst,
+ const int16_t* nearendNoisy,
+ const int16_t* nearendClean,
+ int16_t* out,
+ size_t nrOfSamples,
+ int16_t msInSndCardBuf);
+
+/*
+ * This function enables the user to set certain parameters on-the-fly
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * AecmConfig config Config instance that contains all
+ * properties to be set
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config);
+
+/*
+ * This function enables the user to set the echo path on-the-fly.
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * void* echo_path Pointer to the echo path to be set
+ * size_t size_bytes Size in bytes of the echo path
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
+ const void* echo_path,
+ size_t size_bytes);
+
+/*
+ * This function enables the user to get the currently used echo path
+ * on-the-fly
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * void* echo_path Pointer to echo path
+ * size_t size_bytes Size in bytes of the echo path
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
+ void* echo_path,
+ size_t size_bytes);
+
+/*
+ * This function enables the user to get the echo path size in bytes
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * size_t return Size in bytes
+ */
+size_t WebRtcAecm_echo_path_size_bytes();
+
+#ifdef __cplusplus
+}
+#endif
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/agc/BUILD.gn
new file mode 100644
index 0000000000..a3d430dd29
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/BUILD.gn
@@ -0,0 +1,193 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_source_set("gain_control_interface") {
+ sources = [ "gain_control.h" ]
+}
+
+rtc_library("agc") {
+ sources = [
+ "agc_manager_direct.cc",
+ "agc_manager_direct.h",
+ ]
+ configs += [ "..:apm_debug_dump" ]
+ deps = [
+ ":clipping_predictor",
+ ":clipping_predictor_evaluator",
+ ":gain_control_interface",
+ ":gain_map",
+ ":level_estimation",
+ "..:api",
+ "..:apm_logging",
+ "..:audio_buffer",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../common_audio",
+ "../../../common_audio:common_audio_c",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gtest_prod",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:safe_minmax",
+ "../../../system_wrappers:field_trial",
+ "../../../system_wrappers:metrics",
+ "../vad",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("analog_gain_stats_reporter") {
+ sources = [
+ "analog_gain_stats_reporter.cc",
+ "analog_gain_stats_reporter.h",
+ ]
+ deps = [
+ "../../../rtc_base:gtest_prod",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:safe_minmax",
+ "../../../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("clipping_predictor") {
+ sources = [
+ "clipping_predictor.cc",
+ "clipping_predictor.h",
+ ]
+ deps = [
+ ":clipping_predictor_level_buffer",
+ ":gain_map",
+ "..:api",
+ "..:audio_frame_view",
+ "../../../common_audio",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:safe_minmax",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("clipping_predictor_evaluator") {
+ sources = [
+ "clipping_predictor_evaluator.cc",
+ "clipping_predictor_evaluator.h",
+ ]
+ deps = [
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("clipping_predictor_level_buffer") {
+ sources = [
+ "clipping_predictor_level_buffer.cc",
+ "clipping_predictor_level_buffer.h",
+ ]
+ deps = [
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("level_estimation") {
+ sources = [
+ "agc.cc",
+ "agc.h",
+ "loudness_histogram.cc",
+ "loudness_histogram.h",
+ "utility.cc",
+ "utility.h",
+ ]
+ deps = [
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../vad",
+ ]
+}
+
+rtc_library("legacy_agc") {
+ visibility = [
+ ":*",
+ "..:*",
+ ] # Only targets in this file and in
+ # audio_processing can depend on
+ # this.
+
+ sources = [
+ "legacy/analog_agc.cc",
+ "legacy/analog_agc.h",
+ "legacy/digital_agc.cc",
+ "legacy/digital_agc.h",
+ "legacy/gain_control.h",
+ ]
+
+ deps = [
+ "../../../common_audio",
+ "../../../common_audio:common_audio_c",
+ "../../../common_audio/third_party/ooura:fft_size_256",
+ "../../../rtc_base:checks",
+ "../../../system_wrappers",
+ ]
+
+ if (rtc_build_with_neon) {
+ if (target_cpu != "arm64") {
+ # Enable compilation for the NEON instruction set.
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+ }
+}
+
+rtc_source_set("gain_map") {
+ sources = [ "gain_map_internal.h" ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("agc_unittests") {
+ testonly = true
+ sources = [
+ "agc_manager_direct_unittest.cc",
+ "analog_gain_stats_reporter_unittest.cc",
+ "clipping_predictor_evaluator_unittest.cc",
+ "clipping_predictor_level_buffer_unittest.cc",
+ "clipping_predictor_unittest.cc",
+ "loudness_histogram_unittest.cc",
+ "mock_agc.h",
+ ]
+ configs += [ "..:apm_debug_dump" ]
+
+ deps = [
+ ":agc",
+ ":analog_gain_stats_reporter",
+ ":clipping_predictor",
+ ":clipping_predictor_evaluator",
+ ":clipping_predictor_level_buffer",
+ ":gain_control_interface",
+ ":level_estimation",
+ "..:mocks",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:random",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base:stringutils",
+ "../../../system_wrappers:metrics",
+ "../../../test:field_trial",
+ "../../../test:fileutils",
+ "../../../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/agc.cc b/third_party/libwebrtc/modules/audio_processing/agc/agc.cc
new file mode 100644
index 0000000000..a018ff9f93
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/agc.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/agc.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <vector>
+
+#include "modules/audio_processing/agc/loudness_histogram.h"
+#include "modules/audio_processing/agc/utility.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kDefaultLevelDbfs = -18;
+constexpr int kNumAnalysisFrames = 100;
+constexpr double kActivityThreshold = 0.3;
+constexpr int kNum10msFramesInOneSecond = 100;
+constexpr int kMaxSampleRateHz = 384000;
+
+} // namespace
+
+Agc::Agc()
+ : target_level_loudness_(Dbfs2Loudness(kDefaultLevelDbfs)),
+ target_level_dbfs_(kDefaultLevelDbfs),
+ histogram_(LoudnessHistogram::Create(kNumAnalysisFrames)),
+ inactive_histogram_(LoudnessHistogram::Create()) {}
+
+Agc::~Agc() = default;
+
+void Agc::Process(rtc::ArrayView<const int16_t> audio) {
+ const int sample_rate_hz = audio.size() * kNum10msFramesInOneSecond;
+ RTC_DCHECK_LE(sample_rate_hz, kMaxSampleRateHz);
+ vad_.ProcessChunk(audio.data(), audio.size(), sample_rate_hz);
+ const std::vector<double>& rms = vad_.chunkwise_rms();
+ const std::vector<double>& probabilities =
+ vad_.chunkwise_voice_probabilities();
+ RTC_DCHECK_EQ(rms.size(), probabilities.size());
+ for (size_t i = 0; i < rms.size(); ++i) {
+ histogram_->Update(rms[i], probabilities[i]);
+ }
+}
+
+bool Agc::GetRmsErrorDb(int* error) {
+ if (!error) {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ if (histogram_->num_updates() < kNumAnalysisFrames) {
+ // We haven't yet received enough frames.
+ return false;
+ }
+
+ if (histogram_->AudioContent() < kNumAnalysisFrames * kActivityThreshold) {
+ // We are likely in an inactive segment.
+ return false;
+ }
+
+ double loudness = Linear2Loudness(histogram_->CurrentRms());
+ *error = std::floor(Loudness2Db(target_level_loudness_ - loudness) + 0.5);
+ histogram_->Reset();
+ return true;
+}
+
+void Agc::Reset() {
+ histogram_->Reset();
+}
+
+int Agc::set_target_level_dbfs(int level) {
+ // TODO(turajs): just some arbitrary sanity check. We can come up with better
+ // limits. The upper limit should be chosen such that the risk of clipping is
+ // low. The lower limit should not result in a too quiet signal.
+ if (level >= 0 || level <= -100)
+ return -1;
+ target_level_dbfs_ = level;
+ target_level_loudness_ = Dbfs2Loudness(level);
+ return 0;
+}
+
+int Agc::target_level_dbfs() const {
+ return target_level_dbfs_;
+}
+
+float Agc::voice_probability() const {
+ return vad_.last_voice_probability();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/agc.h b/third_party/libwebrtc/modules/audio_processing/agc/agc.h
new file mode 100644
index 0000000000..da42808225
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/agc.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_AGC_H_
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+
+namespace webrtc {
+
+class LoudnessHistogram;
+
+class Agc {
+ public:
+ Agc();
+ virtual ~Agc();
+
+ // `audio` must be mono; in a multi-channel stream, provide the first (usually
+ // left) channel.
+ virtual void Process(rtc::ArrayView<const int16_t> audio);
+
+ // Retrieves the difference between the target RMS level and the current
+ // signal RMS level in dB. Returns true if an update is available and false
+ // otherwise, in which case `error` should be ignored and no action taken.
+ virtual bool GetRmsErrorDb(int* error);
+ virtual void Reset();
+
+ virtual int set_target_level_dbfs(int level);
+ virtual int target_level_dbfs() const;
+ virtual float voice_probability() const;
+
+ private:
+ double target_level_loudness_;
+ int target_level_dbfs_;
+ std::unique_ptr<LoudnessHistogram> histogram_;
+ std::unique_ptr<LoudnessHistogram> inactive_histogram_;
+ VoiceActivityDetector vad_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_AGC_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/agc_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/agc_gn/moz.build
new file mode 100644
index 0000000000..f64fb947d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/agc_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("agc_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.cc b/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.cc
new file mode 100644
index 0000000000..8926682b9a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.cc
@@ -0,0 +1,720 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "api/array_view.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc/gain_control.h"
+#include "modules/audio_processing/agc/gain_map_internal.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+// Amount of error we tolerate in the microphone level (presumably due to OS
+// quantization) before we assume the user has manually adjusted the microphone.
+constexpr int kLevelQuantizationSlack = 25;
+
+constexpr int kDefaultCompressionGain = 7;
+constexpr int kMaxCompressionGain = 12;
+constexpr int kMinCompressionGain = 2;
+// Controls the rate of compression changes towards the target.
+constexpr float kCompressionGainStep = 0.05f;
+
+constexpr int kMaxMicLevel = 255;
+static_assert(kGainMapSize > kMaxMicLevel, "gain map too small");
+constexpr int kMinMicLevel = 12;
+
+// Prevent very large microphone level changes.
+constexpr int kMaxResidualGainChange = 15;
+
+// Maximum additional gain allowed to compensate for microphone level
+// restrictions from clipping events.
+constexpr int kSurplusCompressionGain = 6;
+
+// History size for the clipping predictor evaluator (unit: number of 10 ms
+// frames).
+constexpr int kClippingPredictorEvaluatorHistorySize = 500;
+
+using AnalogAgcConfig =
+ AudioProcessing::Config::GainController1::AnalogGainController;
+
+// Returns whether a fall-back solution to choose the maximum level should be
+// chosen.
+bool UseMaxAnalogChannelLevel() {
+ return field_trial::IsEnabled("WebRTC-UseMaxAnalogAgcChannelLevel");
+}
+
+// If the "WebRTC-Audio-2ndAgcMinMicLevelExperiment" field trial is specified,
+// parses it and returns a value between 0 and 255 depending on the field-trial
+// string. Returns an unspecified value if the field trial is not specified, if
+// disabled or if it cannot be parsed. Example:
+// 'WebRTC-Audio-2ndAgcMinMicLevelExperiment/Enabled-80' => returns 80.
+absl::optional<int> GetMinMicLevelOverride() {
+ constexpr char kMinMicLevelFieldTrial[] =
+ "WebRTC-Audio-2ndAgcMinMicLevelExperiment";
+ if (!webrtc::field_trial::IsEnabled(kMinMicLevelFieldTrial)) {
+ return absl::nullopt;
+ }
+ const auto field_trial_string =
+ webrtc::field_trial::FindFullName(kMinMicLevelFieldTrial);
+ int min_mic_level = -1;
+ sscanf(field_trial_string.c_str(), "Enabled-%d", &min_mic_level);
+ if (min_mic_level >= 0 && min_mic_level <= 255) {
+ return min_mic_level;
+ } else {
+ RTC_LOG(LS_WARNING) << "[agc] Invalid parameter for "
+ << kMinMicLevelFieldTrial << ", ignored.";
+ return absl::nullopt;
+ }
+}
+
+int ClampLevel(int mic_level, int min_mic_level) {
+ return rtc::SafeClamp(mic_level, min_mic_level, kMaxMicLevel);
+}
+
+int LevelFromGainError(int gain_error, int level, int min_mic_level) {
+ RTC_DCHECK_GE(level, 0);
+ RTC_DCHECK_LE(level, kMaxMicLevel);
+ if (gain_error == 0) {
+ return level;
+ }
+
+ int new_level = level;
+ if (gain_error > 0) {
+ while (kGainMap[new_level] - kGainMap[level] < gain_error &&
+ new_level < kMaxMicLevel) {
+ ++new_level;
+ }
+ } else {
+ while (kGainMap[new_level] - kGainMap[level] > gain_error &&
+ new_level > min_mic_level) {
+ --new_level;
+ }
+ }
+ return new_level;
+}
+
+// Returns the proportion of samples in the buffer which are at full-scale
+// (and presumably clipped).
+float ComputeClippedRatio(const float* const* audio,
+ size_t num_channels,
+ size_t samples_per_channel) {
+ RTC_DCHECK_GT(samples_per_channel, 0);
+ int num_clipped = 0;
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ int num_clipped_in_ch = 0;
+ for (size_t i = 0; i < samples_per_channel; ++i) {
+ RTC_DCHECK(audio[ch]);
+ if (audio[ch][i] >= 32767.0f || audio[ch][i] <= -32768.0f) {
+ ++num_clipped_in_ch;
+ }
+ }
+ num_clipped = std::max(num_clipped, num_clipped_in_ch);
+ }
+ return static_cast<float>(num_clipped) / (samples_per_channel);
+}
+
+void LogClippingPredictorMetrics(const ClippingPredictorEvaluator& evaluator) {
+ absl::optional<ClippingPredictionMetrics> metrics =
+ ComputeClippingPredictionMetrics(evaluator.counters());
+ if (metrics.has_value()) {
+ RTC_LOG(LS_INFO) << "Clipping predictor metrics: P " << metrics->precision
+ << " R " << metrics->recall << " F1 score "
+ << metrics->f1_score;
+ RTC_DCHECK_GE(metrics->f1_score, 0.0f);
+ RTC_DCHECK_LE(metrics->f1_score, 1.0f);
+ RTC_DCHECK_GE(metrics->precision, 0.0f);
+ RTC_DCHECK_LE(metrics->precision, 1.0f);
+ RTC_DCHECK_GE(metrics->recall, 0.0f);
+ RTC_DCHECK_LE(metrics->recall, 1.0f);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.Agc.ClippingPredictor.F1Score",
+ /*sample=*/std::round(metrics->f1_score * 100.0f),
+ /*min=*/0,
+ /*max=*/100,
+ /*bucket_count=*/50);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.Agc.ClippingPredictor.Precision",
+ /*sample=*/std::round(metrics->precision * 100.0f),
+ /*min=*/0,
+ /*max=*/100,
+ /*bucket_count=*/50);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.Agc.ClippingPredictor.Recall",
+ /*sample=*/std::round(metrics->recall * 100.0f),
+ /*min=*/0,
+ /*max=*/100,
+ /*bucket_count=*/50);
+ }
+}
+
+void LogClippingMetrics(int clipping_rate) {
+ RTC_LOG(LS_INFO) << "Input clipping rate: " << clipping_rate << "%";
+ RTC_HISTOGRAM_COUNTS_LINEAR(/*name=*/"WebRTC.Audio.Agc.InputClippingRate",
+ /*sample=*/clipping_rate, /*min=*/0, /*max=*/100,
+ /*bucket_count=*/50);
+}
+
+} // namespace
+
+MonoAgc::MonoAgc(ApmDataDumper* data_dumper,
+ int startup_min_level,
+ int clipped_level_min,
+ bool disable_digital_adaptive,
+ int min_mic_level)
+ : min_mic_level_(min_mic_level),
+ disable_digital_adaptive_(disable_digital_adaptive),
+ agc_(std::make_unique<Agc>()),
+ max_level_(kMaxMicLevel),
+ max_compression_gain_(kMaxCompressionGain),
+ target_compression_(kDefaultCompressionGain),
+ compression_(target_compression_),
+ compression_accumulator_(compression_),
+ startup_min_level_(ClampLevel(startup_min_level, min_mic_level_)),
+ clipped_level_min_(clipped_level_min) {}
+
+MonoAgc::~MonoAgc() = default;
+
+void MonoAgc::Initialize() {
+ max_level_ = kMaxMicLevel;
+ max_compression_gain_ = kMaxCompressionGain;
+ target_compression_ = disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
+ compression_ = disable_digital_adaptive_ ? 0 : target_compression_;
+ compression_accumulator_ = compression_;
+ capture_output_used_ = true;
+ check_volume_on_next_process_ = true;
+}
+
+void MonoAgc::Process(rtc::ArrayView<const int16_t> audio) {
+ new_compression_to_set_ = absl::nullopt;
+
+ if (check_volume_on_next_process_) {
+ check_volume_on_next_process_ = false;
+ // We have to wait until the first process call to check the volume,
+ // because Chromium doesn't guarantee it to be valid any earlier.
+ CheckVolumeAndReset();
+ }
+
+ agc_->Process(audio);
+
+ UpdateGain();
+ if (!disable_digital_adaptive_) {
+ UpdateCompressor();
+ }
+}
+
+void MonoAgc::HandleClipping(int clipped_level_step) {
+ // Always decrease the maximum level, even if the current level is below
+ // threshold.
+ SetMaxLevel(std::max(clipped_level_min_, max_level_ - clipped_level_step));
+ if (log_to_histograms_) {
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.AgcClippingAdjustmentAllowed",
+ level_ - clipped_level_step >= clipped_level_min_);
+ }
+ if (level_ > clipped_level_min_) {
+ // Don't try to adjust the level if we're already below the limit. As
+ // a consequence, if the user has brought the level above the limit, we
+ // will still not react until the postproc updates the level.
+ SetLevel(std::max(clipped_level_min_, level_ - clipped_level_step));
+ // Reset the AGCs for all channels since the level has changed.
+ agc_->Reset();
+ }
+}
+
+void MonoAgc::SetLevel(int new_level) {
+ int voe_level = stream_analog_level_;
+ if (voe_level == 0) {
+ RTC_DLOG(LS_INFO)
+ << "[agc] VolumeCallbacks returned level=0, taking no action.";
+ return;
+ }
+ if (voe_level < 0 || voe_level > kMaxMicLevel) {
+ RTC_LOG(LS_ERROR) << "VolumeCallbacks returned an invalid level="
+ << voe_level;
+ return;
+ }
+
+ // Detect manual input volume adjustments by checking if the current level
+ // `voe_level` is outside of the `[level_ - kLevelQuantizationSlack, level_ +
+ // kLevelQuantizationSlack]` range where `level_` is the last input volume
+ // known by this gain controller.
+ if (voe_level > level_ + kLevelQuantizationSlack ||
+ voe_level < level_ - kLevelQuantizationSlack) {
+ RTC_DLOG(LS_INFO) << "[agc] Mic volume was manually adjusted. Updating "
+ "stored level from "
+ << level_ << " to " << voe_level;
+ level_ = voe_level;
+ // Always allow the user to increase the volume.
+ if (level_ > max_level_) {
+ SetMaxLevel(level_);
+ }
+ // Take no action in this case, since we can't be sure when the volume
+ // was manually adjusted. The compressor will still provide some of the
+ // desired gain change.
+ agc_->Reset();
+
+ return;
+ }
+
+ new_level = std::min(new_level, max_level_);
+ if (new_level == level_) {
+ return;
+ }
+
+ stream_analog_level_ = new_level;
+ RTC_DLOG(LS_INFO) << "[agc] voe_level=" << voe_level << ", level_=" << level_
+ << ", new_level=" << new_level;
+ level_ = new_level;
+}
+
+void MonoAgc::SetMaxLevel(int level) {
+ RTC_DCHECK_GE(level, clipped_level_min_);
+ max_level_ = level;
+ // Scale the `kSurplusCompressionGain` linearly across the restricted
+ // level range.
+ max_compression_gain_ =
+ kMaxCompressionGain + std::floor((1.f * kMaxMicLevel - max_level_) /
+ (kMaxMicLevel - clipped_level_min_) *
+ kSurplusCompressionGain +
+ 0.5f);
+ RTC_DLOG(LS_INFO) << "[agc] max_level_=" << max_level_
+ << ", max_compression_gain_=" << max_compression_gain_;
+}
+
+void MonoAgc::HandleCaptureOutputUsedChange(bool capture_output_used) {
+ if (capture_output_used_ == capture_output_used) {
+ return;
+ }
+ capture_output_used_ = capture_output_used;
+
+ if (capture_output_used) {
+ // When we start using the output, we should reset things to be safe.
+ check_volume_on_next_process_ = true;
+ }
+}
+
+int MonoAgc::CheckVolumeAndReset() {
+ int level = stream_analog_level_;
+ // Reasons for taking action at startup:
+ // 1) A person starting a call is expected to be heard.
+ // 2) Independent of interpretation of `level` == 0 we should raise it so the
+ // AGC can do its job properly.
+ if (level == 0 && !startup_) {
+ RTC_DLOG(LS_INFO)
+ << "[agc] VolumeCallbacks returned level=0, taking no action.";
+ return 0;
+ }
+ if (level < 0 || level > kMaxMicLevel) {
+ RTC_LOG(LS_ERROR) << "[agc] VolumeCallbacks returned an invalid level="
+ << level;
+ return -1;
+ }
+ RTC_DLOG(LS_INFO) << "[agc] Initial GetMicVolume()=" << level;
+
+ int minLevel = startup_ ? startup_min_level_ : min_mic_level_;
+ if (level < minLevel) {
+ level = minLevel;
+ RTC_DLOG(LS_INFO) << "[agc] Initial volume too low, raising to " << level;
+ stream_analog_level_ = level;
+ }
+ agc_->Reset();
+ level_ = level;
+ startup_ = false;
+ return 0;
+}
+
+// Requests the RMS error from AGC and distributes the required gain change
+// between the digital compression stage and volume slider. We use the
+// compressor first, providing a slack region around the current slider
+// position to reduce movement.
+//
+// If the slider needs to be moved, we check first if the user has adjusted
+// it, in which case we take no action and cache the updated level.
+void MonoAgc::UpdateGain() {
+ int rms_error = 0;
+ if (!agc_->GetRmsErrorDb(&rms_error)) {
+ // No error update ready.
+ return;
+ }
+ // The compressor will always add at least kMinCompressionGain. In effect,
+ // this adjusts our target gain upward by the same amount and rms_error
+ // needs to reflect that.
+ rms_error += kMinCompressionGain;
+
+ // Handle as much error as possible with the compressor first.
+ int raw_compression =
+ rtc::SafeClamp(rms_error, kMinCompressionGain, max_compression_gain_);
+
+ // Deemphasize the compression gain error. Move halfway between the current
+ // target and the newly received target. This serves to soften perceptible
+ // intra-talkspurt adjustments, at the cost of some adaptation speed.
+ if ((raw_compression == max_compression_gain_ &&
+ target_compression_ == max_compression_gain_ - 1) ||
+ (raw_compression == kMinCompressionGain &&
+ target_compression_ == kMinCompressionGain + 1)) {
+ // Special case to allow the target to reach the endpoints of the
+ // compression range. The deemphasis would otherwise halt it at 1 dB shy.
+ target_compression_ = raw_compression;
+ } else {
+ target_compression_ =
+ (raw_compression - target_compression_) / 2 + target_compression_;
+ }
+
+ // Residual error will be handled by adjusting the volume slider. Use the
+ // raw rather than deemphasized compression here as we would otherwise
+ // shrink the amount of slack the compressor provides.
+ const int residual_gain =
+ rtc::SafeClamp(rms_error - raw_compression, -kMaxResidualGainChange,
+ kMaxResidualGainChange);
+ RTC_DLOG(LS_INFO) << "[agc] rms_error=" << rms_error
+ << ", target_compression=" << target_compression_
+ << ", residual_gain=" << residual_gain;
+ if (residual_gain == 0)
+ return;
+
+ int old_level = level_;
+ SetLevel(LevelFromGainError(residual_gain, level_, min_mic_level_));
+ if (old_level != level_) {
+ // level_ was updated by SetLevel; log the new value.
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.AgcSetLevel", level_, 1,
+ kMaxMicLevel, 50);
+ // Reset the AGC since the level has changed.
+ agc_->Reset();
+ }
+}
+
+void MonoAgc::UpdateCompressor() {
+ calls_since_last_gain_log_++;
+ if (calls_since_last_gain_log_ == 100) {
+ calls_since_last_gain_log_ = 0;
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.DigitalGainApplied",
+ compression_, 0, kMaxCompressionGain,
+ kMaxCompressionGain + 1);
+ }
+ if (compression_ == target_compression_) {
+ return;
+ }
+
+ // Adapt the compression gain slowly towards the target, in order to avoid
+ // highly perceptible changes.
+ if (target_compression_ > compression_) {
+ compression_accumulator_ += kCompressionGainStep;
+ } else {
+ compression_accumulator_ -= kCompressionGainStep;
+ }
+
+ // The compressor accepts integer gains in dB. Adjust the gain when
+ // we've come within half a stepsize of the nearest integer. (We don't
+ // check for equality due to potential floating point imprecision).
+ int new_compression = compression_;
+ int nearest_neighbor = std::floor(compression_accumulator_ + 0.5);
+ if (std::fabs(compression_accumulator_ - nearest_neighbor) <
+ kCompressionGainStep / 2) {
+ new_compression = nearest_neighbor;
+ }
+
+ // Set the new compression gain.
+ if (new_compression != compression_) {
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.DigitalGainUpdated",
+ new_compression, 0, kMaxCompressionGain,
+ kMaxCompressionGain + 1);
+ compression_ = new_compression;
+ compression_accumulator_ = new_compression;
+ new_compression_to_set_ = compression_;
+ }
+}
+
+std::atomic<int> AgcManagerDirect::instance_counter_(0);
+
+AgcManagerDirect::AgcManagerDirect(
+ const AudioProcessing::Config::GainController1::AnalogGainController&
+ analog_config,
+ Agc* agc)
+ : AgcManagerDirect(/*num_capture_channels=*/1, analog_config) {
+ RTC_DCHECK(channel_agcs_[0]);
+ RTC_DCHECK(agc);
+ channel_agcs_[0]->set_agc(agc);
+}
+
+AgcManagerDirect::AgcManagerDirect(int num_capture_channels,
+ const AnalogAgcConfig& analog_config)
+ : min_mic_level_override_(GetMinMicLevelOverride()),
+ data_dumper_(new ApmDataDumper(instance_counter_.fetch_add(1) + 1)),
+ use_min_channel_level_(!UseMaxAnalogChannelLevel()),
+ num_capture_channels_(num_capture_channels),
+ disable_digital_adaptive_(!analog_config.enable_digital_adaptive),
+ frames_since_clipped_(analog_config.clipped_wait_frames),
+ capture_output_used_(true),
+ clipped_level_step_(analog_config.clipped_level_step),
+ clipped_ratio_threshold_(analog_config.clipped_ratio_threshold),
+ clipped_wait_frames_(analog_config.clipped_wait_frames),
+ channel_agcs_(num_capture_channels),
+ new_compressions_to_set_(num_capture_channels),
+ clipping_predictor_(
+ CreateClippingPredictor(num_capture_channels,
+ analog_config.clipping_predictor)),
+ use_clipping_predictor_step_(
+ !!clipping_predictor_ &&
+ analog_config.clipping_predictor.use_predicted_step),
+ clipping_predictor_evaluator_(kClippingPredictorEvaluatorHistorySize),
+ clipping_predictor_log_counter_(0),
+ clipping_rate_log_(0.0f),
+ clipping_rate_log_counter_(0) {
+ const int min_mic_level = min_mic_level_override_.value_or(kMinMicLevel);
+ RTC_LOG(LS_INFO) << "[agc] Min mic level: " << min_mic_level
+ << " (overridden: "
+ << (min_mic_level_override_.has_value() ? "yes" : "no")
+ << ")";
+ for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
+ ApmDataDumper* data_dumper_ch = ch == 0 ? data_dumper_.get() : nullptr;
+
+ channel_agcs_[ch] = std::make_unique<MonoAgc>(
+ data_dumper_ch, analog_config.startup_min_volume,
+ analog_config.clipped_level_min, disable_digital_adaptive_,
+ min_mic_level);
+ }
+ RTC_DCHECK(!channel_agcs_.empty());
+ RTC_DCHECK_GT(clipped_level_step_, 0);
+ RTC_DCHECK_LE(clipped_level_step_, 255);
+ RTC_DCHECK_GT(clipped_ratio_threshold_, 0.0f);
+ RTC_DCHECK_LT(clipped_ratio_threshold_, 1.0f);
+ RTC_DCHECK_GT(clipped_wait_frames_, 0);
+ channel_agcs_[0]->ActivateLogging();
+}
+
+AgcManagerDirect::~AgcManagerDirect() {}
+
+void AgcManagerDirect::Initialize() {
+ RTC_DLOG(LS_INFO) << "AgcManagerDirect::Initialize";
+ data_dumper_->InitiateNewSetOfRecordings();
+ for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
+ channel_agcs_[ch]->Initialize();
+ }
+ capture_output_used_ = true;
+
+ AggregateChannelLevels();
+ clipping_predictor_evaluator_.Reset();
+ clipping_predictor_log_counter_ = 0;
+ clipping_rate_log_ = 0.0f;
+ clipping_rate_log_counter_ = 0;
+}
+
+void AgcManagerDirect::SetupDigitalGainControl(
+ GainControl& gain_control) const {
+ if (gain_control.set_mode(GainControl::kFixedDigital) != 0) {
+ RTC_LOG(LS_ERROR) << "set_mode(GainControl::kFixedDigital) failed.";
+ }
+ const int target_level_dbfs = disable_digital_adaptive_ ? 0 : 2;
+ if (gain_control.set_target_level_dbfs(target_level_dbfs) != 0) {
+ RTC_LOG(LS_ERROR) << "set_target_level_dbfs() failed.";
+ }
+ const int compression_gain_db =
+ disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
+ if (gain_control.set_compression_gain_db(compression_gain_db) != 0) {
+ RTC_LOG(LS_ERROR) << "set_compression_gain_db() failed.";
+ }
+ const bool enable_limiter = !disable_digital_adaptive_;
+ if (gain_control.enable_limiter(enable_limiter) != 0) {
+ RTC_LOG(LS_ERROR) << "enable_limiter() failed.";
+ }
+}
+
+void AgcManagerDirect::AnalyzePreProcess(const AudioBuffer* audio) {
+ RTC_DCHECK(audio);
+ AnalyzePreProcess(audio->channels_const(), audio->num_frames());
+}
+
+void AgcManagerDirect::AnalyzePreProcess(const float* const* audio,
+ size_t samples_per_channel) {
+ RTC_DCHECK(audio);
+ AggregateChannelLevels();
+ if (!capture_output_used_) {
+ return;
+ }
+
+ if (!!clipping_predictor_) {
+ AudioFrameView<const float> frame = AudioFrameView<const float>(
+ audio, num_capture_channels_, static_cast<int>(samples_per_channel));
+ clipping_predictor_->Analyze(frame);
+ }
+
+ // Check for clipped samples, as the AGC has difficulty detecting pitch
+ // under clipping distortion. We do this in the preprocessing phase in order
+ // to catch clipped echo as well.
+ //
+ // If we find a sufficiently clipped frame, drop the current microphone level
+ // and enforce a new maximum level, dropped the same amount from the current
+ // maximum. This harsh treatment is an effort to avoid repeated clipped echo
+ // events. As compensation for this restriction, the maximum compression
+ // gain is increased, through SetMaxLevel().
+ float clipped_ratio =
+ ComputeClippedRatio(audio, num_capture_channels_, samples_per_channel);
+ clipping_rate_log_ = std::max(clipped_ratio, clipping_rate_log_);
+ clipping_rate_log_counter_++;
+ constexpr int kNumFramesIn30Seconds = 3000;
+ if (clipping_rate_log_counter_ == kNumFramesIn30Seconds) {
+ LogClippingMetrics(std::round(100.0f * clipping_rate_log_));
+ clipping_rate_log_ = 0.0f;
+ clipping_rate_log_counter_ = 0;
+ }
+
+ if (frames_since_clipped_ < clipped_wait_frames_) {
+ ++frames_since_clipped_;
+ return;
+ }
+
+ const bool clipping_detected = clipped_ratio > clipped_ratio_threshold_;
+ bool clipping_predicted = false;
+ int predicted_step = 0;
+ if (!!clipping_predictor_) {
+ for (int channel = 0; channel < num_capture_channels_; ++channel) {
+ const auto step = clipping_predictor_->EstimateClippedLevelStep(
+ channel, stream_analog_level_, clipped_level_step_,
+ channel_agcs_[channel]->min_mic_level(), kMaxMicLevel);
+ if (step.has_value()) {
+ predicted_step = std::max(predicted_step, step.value());
+ clipping_predicted = true;
+ }
+ }
+ // Clipping prediction evaluation.
+ // `clipping_detected` is not used to evaluate the clipping predictor
+ // since for this purpose a single clipping sample counts as clipping.
+ const bool one_or_more_clipped_samples =
+ clipped_ratio >= (1.0f / samples_per_channel);
+ absl::optional<int> prediction_interval =
+ clipping_predictor_evaluator_.Observe(
+ /*clipping_detected=*/one_or_more_clipped_samples,
+ clipping_predicted);
+ if (prediction_interval.has_value()) {
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.Agc.ClippingPredictor.PredictionInterval",
+ prediction_interval.value(), /*min=*/0,
+ /*max=*/49, /*bucket_count=*/50);
+ }
+ clipping_predictor_log_counter_++;
+ if (clipping_predictor_log_counter_ == kNumFramesIn30Seconds) {
+ LogClippingPredictorMetrics(clipping_predictor_evaluator_);
+ clipping_predictor_log_counter_ = 0;
+ }
+ }
+ if (clipping_detected) {
+ RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio="
+ << clipped_ratio;
+ }
+ int step = clipped_level_step_;
+ if (clipping_predicted) {
+ predicted_step = std::max(predicted_step, clipped_level_step_);
+ RTC_DLOG(LS_INFO) << "[agc] Clipping predicted. step=" << predicted_step;
+ if (use_clipping_predictor_step_) {
+ step = predicted_step;
+ }
+ }
+ if (clipping_detected ||
+ (clipping_predicted && use_clipping_predictor_step_)) {
+ for (auto& state_ch : channel_agcs_) {
+ state_ch->HandleClipping(step);
+ }
+ frames_since_clipped_ = 0;
+ if (!!clipping_predictor_) {
+ clipping_predictor_->Reset();
+ clipping_predictor_evaluator_.RemoveExpectations();
+ }
+ }
+ AggregateChannelLevels();
+}
+
+void AgcManagerDirect::Process(const AudioBuffer* audio) {
+ RTC_DCHECK(audio);
+ AggregateChannelLevels();
+
+ if (!capture_output_used_) {
+ return;
+ }
+
+ const size_t num_frames_per_band = audio->num_frames_per_band();
+ for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
+ std::array<int16_t, AudioBuffer::kMaxSampleRate / 100> audio_data;
+ int16_t* audio_use = audio_data.data();
+ FloatS16ToS16(audio->split_bands_const_f(ch)[0], num_frames_per_band,
+ audio_use);
+ channel_agcs_[ch]->Process({audio_use, num_frames_per_band});
+ new_compressions_to_set_[ch] = channel_agcs_[ch]->new_compression();
+ }
+
+ AggregateChannelLevels();
+}
+
+absl::optional<int> AgcManagerDirect::GetDigitalComressionGain() {
+ return new_compressions_to_set_[channel_controlling_gain_];
+}
+
+void AgcManagerDirect::HandleCaptureOutputUsedChange(bool capture_output_used) {
+ for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
+ channel_agcs_[ch]->HandleCaptureOutputUsedChange(capture_output_used);
+ }
+ capture_output_used_ = capture_output_used;
+}
+
+float AgcManagerDirect::voice_probability() const {
+ float max_prob = 0.f;
+ for (const auto& state_ch : channel_agcs_) {
+ max_prob = std::max(max_prob, state_ch->voice_probability());
+ }
+
+ return max_prob;
+}
+
+void AgcManagerDirect::set_stream_analog_level(int level) {
+ for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
+ channel_agcs_[ch]->set_stream_analog_level(level);
+ }
+
+ AggregateChannelLevels();
+}
+
+void AgcManagerDirect::AggregateChannelLevels() {
+ stream_analog_level_ = channel_agcs_[0]->stream_analog_level();
+ channel_controlling_gain_ = 0;
+ if (use_min_channel_level_) {
+ for (size_t ch = 1; ch < channel_agcs_.size(); ++ch) {
+ int level = channel_agcs_[ch]->stream_analog_level();
+ if (level < stream_analog_level_) {
+ stream_analog_level_ = level;
+ channel_controlling_gain_ = static_cast<int>(ch);
+ }
+ }
+ } else {
+ for (size_t ch = 1; ch < channel_agcs_.size(); ++ch) {
+ int level = channel_agcs_[ch]->stream_analog_level();
+ if (level > stream_analog_level_) {
+ stream_analog_level_ = level;
+ channel_controlling_gain_ = static_cast<int>(ch);
+ }
+ }
+ }
+
+ if (min_mic_level_override_.has_value() && stream_analog_level_ > 0) {
+ stream_analog_level_ =
+ std::max(stream_analog_level_, *min_mic_level_override_);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.h b/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.h
new file mode 100644
index 0000000000..6a3fb2a5b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_AGC_MANAGER_DIRECT_H_
+#define MODULES_AUDIO_PROCESSING_AGC_AGC_MANAGER_DIRECT_H_
+
+#include <atomic>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/agc/agc.h"
+#include "modules/audio_processing/agc/clipping_predictor.h"
+#include "modules/audio_processing/agc/clipping_predictor_evaluator.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gtest_prod_util.h"
+
+namespace webrtc {
+
+class MonoAgc;
+class GainControl;
+
+// Adaptive Gain Controller (AGC) that combines an analog and digital gain
+// controller. The digital controller determines and applies the digital
+// compression gain. The analog controller recommends what input volume (a.k.a.,
+// analog level) to use, handles input volume changes and input clipping. In
+// particular, it handles input volume changes triggered by the user (e.g.,
+// input volume set to zero by a HW mute button). This class is not thread-safe.
+class AgcManagerDirect final {
+ public:
+ // Ctor. `num_capture_channels` specifies the number of channels for the audio
+ // passed to `AnalyzePreProcess()` and `Process()`. Clamps
+ // `analog_config.startup_min_level` in the [12, 255] range.
+ AgcManagerDirect(
+ int num_capture_channels,
+ const AudioProcessing::Config::GainController1::AnalogGainController&
+ analog_config);
+
+ ~AgcManagerDirect();
+ AgcManagerDirect(const AgcManagerDirect&) = delete;
+ AgcManagerDirect& operator=(const AgcManagerDirect&) = delete;
+
+ void Initialize();
+
+ // Configures `gain_control` to work as a fixed digital controller so that the
+ // adaptive part is only handled by this gain controller. Must be called if
+ // `gain_control` is also used to avoid the side-effects of running two AGCs.
+ void SetupDigitalGainControl(GainControl& gain_control) const;
+
+ // Analyzes `audio` before `Process()` is called so that the analysis can be
+ // performed before external digital processing operations take place (e.g.,
+ // echo cancellation). The analysis consists of input clipping detection and
+ // prediction (if enabled).
+ void AnalyzePreProcess(const AudioBuffer* audio);
+
+ // Processes `audio`. Chooses and applies a digital compression gain on each
+ // channel and chooses the new input volume to recommend. Undefined behavior
+ // if `AnalyzePreProcess()` is not called beforehand.
+ void Process(const AudioBuffer* audio);
+
+ // Call when the capture stream output has been flagged to be used/not-used.
+ // If unused, the manager disregards all incoming audio.
+ void HandleCaptureOutputUsedChange(bool capture_output_used);
+
+ float voice_probability() const;
+
+ // Returns the recommended input volume.
+ int stream_analog_level() const { return stream_analog_level_; }
+
+ // Sets the current input volume.
+ void set_stream_analog_level(int level);
+
+ int num_channels() const { return num_capture_channels_; }
+
+ // If available, returns the latest digital compression gain that has been
+ // applied.
+ absl::optional<int> GetDigitalComressionGain();
+
+ // Returns true if clipping prediction is enabled.
+ bool clipping_predictor_enabled() const { return !!clipping_predictor_; }
+
+ // Returns true if clipping prediction is used to adjust the analog gain.
+ bool use_clipping_predictor_step() const {
+ return use_clipping_predictor_step_;
+ }
+
+ private:
+ friend class AgcManagerDirectTestHelper;
+
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest, DisableDigitalDisablesDigital);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentDefault);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentDisabled);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentOutOfRangeAbove);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentOutOfRangeBelow);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentEnabled50);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentEnabledAboveStartupLevel);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectParametrizedTest,
+ ClippingParametersVerified);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectParametrizedTest,
+ DisableClippingPredictorDoesNotLowerVolume);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectParametrizedTest,
+ UsedClippingPredictionsProduceLowerAnalogLevels);
+ FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectParametrizedTest,
+ UnusedClippingPredictionsProduceEqualAnalogLevels);
+
+ // Ctor that creates a single channel AGC and by injecting `agc`.
+ // `agc` will be owned by this class; hence, do not delete it.
+ AgcManagerDirect(
+ const AudioProcessing::Config::GainController1::AnalogGainController&
+ analog_config,
+ Agc* agc);
+
+ void AnalyzePreProcess(const float* const* audio, size_t samples_per_channel);
+
+ void AggregateChannelLevels();
+
+ const absl::optional<int> min_mic_level_override_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ static std::atomic<int> instance_counter_;
+ const bool use_min_channel_level_;
+ const int num_capture_channels_;
+ const bool disable_digital_adaptive_;
+
+ int frames_since_clipped_;
+ int stream_analog_level_ = 0;
+ bool capture_output_used_;
+ int channel_controlling_gain_ = 0;
+
+ const int clipped_level_step_;
+ const float clipped_ratio_threshold_;
+ const int clipped_wait_frames_;
+
+ std::vector<std::unique_ptr<MonoAgc>> channel_agcs_;
+ std::vector<absl::optional<int>> new_compressions_to_set_;
+
+ const std::unique_ptr<ClippingPredictor> clipping_predictor_;
+ const bool use_clipping_predictor_step_;
+ ClippingPredictorEvaluator clipping_predictor_evaluator_;
+ int clipping_predictor_log_counter_;
+ float clipping_rate_log_;
+ int clipping_rate_log_counter_;
+};
+
+class MonoAgc {
+ public:
+ MonoAgc(ApmDataDumper* data_dumper,
+ int startup_min_level,
+ int clipped_level_min,
+ bool disable_digital_adaptive,
+ int min_mic_level);
+ ~MonoAgc();
+ MonoAgc(const MonoAgc&) = delete;
+ MonoAgc& operator=(const MonoAgc&) = delete;
+
+ void Initialize();
+ void HandleCaptureOutputUsedChange(bool capture_output_used);
+
+ void HandleClipping(int clipped_level_step);
+
+ void Process(rtc::ArrayView<const int16_t> audio);
+
+ void set_stream_analog_level(int level) { stream_analog_level_ = level; }
+ int stream_analog_level() const { return stream_analog_level_; }
+ float voice_probability() const { return agc_->voice_probability(); }
+ void ActivateLogging() { log_to_histograms_ = true; }
+ absl::optional<int> new_compression() const {
+ return new_compression_to_set_;
+ }
+
+ // Only used for testing.
+ void set_agc(Agc* agc) { agc_.reset(agc); }
+ int min_mic_level() const { return min_mic_level_; }
+ int startup_min_level() const { return startup_min_level_; }
+
+ private:
+ // Sets a new microphone level, after first checking that it hasn't been
+ // updated by the user, in which case no action is taken.
+ void SetLevel(int new_level);
+
+ // Set the maximum level the AGC is allowed to apply. Also updates the
+ // maximum compression gain to compensate. The level must be at least
+ // `kClippedLevelMin`.
+ void SetMaxLevel(int level);
+
+ int CheckVolumeAndReset();
+ void UpdateGain();
+ void UpdateCompressor();
+
+ const int min_mic_level_;
+ const bool disable_digital_adaptive_;
+ std::unique_ptr<Agc> agc_;
+ int level_ = 0;
+ int max_level_;
+ int max_compression_gain_;
+ int target_compression_;
+ int compression_;
+ float compression_accumulator_;
+ bool capture_output_used_ = true;
+ bool check_volume_on_next_process_ = true;
+ bool startup_ = true;
+ int startup_min_level_;
+ int calls_since_last_gain_log_ = 0;
+ int stream_analog_level_ = 0;
+ absl::optional<int> new_compression_to_set_;
+ bool log_to_histograms_ = false;
+ const int clipped_level_min_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_AGC_MANAGER_DIRECT_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc
new file mode 100644
index 0000000000..ae813d0506
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc
@@ -0,0 +1,1452 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+
+#include <limits>
+
+#include "modules/audio_processing/agc/gain_control.h"
+#include "modules/audio_processing/agc/mock_agc.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace webrtc {
+namespace {
+
+constexpr int kSampleRateHz = 32000;
+constexpr int kNumChannels = 1;
+constexpr int kSamplesPerChannel = kSampleRateHz / 100;
+constexpr int kInitialVolume = 128;
+constexpr int kClippedMin = 165; // Arbitrary, but different from the default.
+constexpr float kAboveClippedThreshold = 0.2f;
+constexpr int kMinMicLevel = 12;
+constexpr int kClippedLevelStep = 15;
+constexpr float kClippedRatioThreshold = 0.1f;
+constexpr int kClippedWaitFrames = 300;
+
+using AnalogAgcConfig =
+ AudioProcessing::Config::GainController1::AnalogGainController;
+using ClippingPredictorConfig = AudioProcessing::Config::GainController1::
+ AnalogGainController::ClippingPredictor;
+constexpr AnalogAgcConfig kDefaultAnalogConfig{};
+
+class MockGainControl : public GainControl {
+ public:
+ virtual ~MockGainControl() {}
+ MOCK_METHOD(int, set_stream_analog_level, (int level), (override));
+ MOCK_METHOD(int, stream_analog_level, (), (const, override));
+ MOCK_METHOD(int, set_mode, (Mode mode), (override));
+ MOCK_METHOD(Mode, mode, (), (const, override));
+ MOCK_METHOD(int, set_target_level_dbfs, (int level), (override));
+ MOCK_METHOD(int, target_level_dbfs, (), (const, override));
+ MOCK_METHOD(int, set_compression_gain_db, (int gain), (override));
+ MOCK_METHOD(int, compression_gain_db, (), (const, override));
+ MOCK_METHOD(int, enable_limiter, (bool enable), (override));
+ MOCK_METHOD(bool, is_limiter_enabled, (), (const, override));
+ MOCK_METHOD(int,
+ set_analog_level_limits,
+ (int minimum, int maximum),
+ (override));
+ MOCK_METHOD(int, analog_level_minimum, (), (const, override));
+ MOCK_METHOD(int, analog_level_maximum, (), (const, override));
+ MOCK_METHOD(bool, stream_is_saturated, (), (const, override));
+};
+
+// TODO(bugs.webrtc.org/12874): Remove and use designated initializers once
+// fixed.
+std::unique_ptr<AgcManagerDirect> CreateAgcManagerDirect(
+ int startup_min_volume,
+ int clipped_level_step,
+ float clipped_ratio_threshold,
+ int clipped_wait_frames,
+ const ClippingPredictorConfig& clipping_predictor_config =
+ kDefaultAnalogConfig.clipping_predictor) {
+ AnalogAgcConfig config;
+ config.startup_min_volume = startup_min_volume;
+ config.clipped_level_min = kClippedMin;
+ config.enable_digital_adaptive = false;
+ config.clipped_level_step = clipped_level_step;
+ config.clipped_ratio_threshold = clipped_ratio_threshold;
+ config.clipped_wait_frames = clipped_wait_frames;
+ config.clipping_predictor = clipping_predictor_config;
+ return std::make_unique<AgcManagerDirect>(/*num_capture_channels=*/1, config);
+}
+
+// Calls `AnalyzePreProcess()` on `manager` `num_calls` times. `peak_ratio` is a
+// value in [0, 1] which determines the amplitude of the samples (1 maps to full
+// scale). The first half of the calls is made on frames which are half filled
+// with zeros in order to simulate a signal with different crest factors.
+void CallPreProcessAudioBuffer(int num_calls,
+ float peak_ratio,
+ AgcManagerDirect& manager) {
+ RTC_DCHECK_LE(peak_ratio, 1.0f);
+ AudioBuffer audio_buffer(kSampleRateHz, 1, kSampleRateHz, 1, kSampleRateHz,
+ 1);
+ const int num_channels = audio_buffer.num_channels();
+ const int num_frames = audio_buffer.num_frames();
+
+ // Make half of the calls with half zeroed frames.
+ for (int ch = 0; ch < num_channels; ++ch) {
+ // 50% of the samples in one frame are zero.
+ for (int i = 0; i < num_frames; i += 2) {
+ audio_buffer.channels()[ch][i] = peak_ratio * 32767.0f;
+ audio_buffer.channels()[ch][i + 1] = 0.0f;
+ }
+ }
+ for (int n = 0; n < num_calls / 2; ++n) {
+ manager.AnalyzePreProcess(&audio_buffer);
+ }
+
+ // Make the remaining half of the calls with frames whose samples are all set.
+ for (int ch = 0; ch < num_channels; ++ch) {
+ for (int i = 0; i < num_frames; ++i) {
+ audio_buffer.channels()[ch][i] = peak_ratio * 32767.0f;
+ }
+ }
+ for (int n = 0; n < num_calls - num_calls / 2; ++n) {
+ manager.AnalyzePreProcess(&audio_buffer);
+ }
+}
+
+constexpr char kMinMicLevelFieldTrial[] =
+ "WebRTC-Audio-2ndAgcMinMicLevelExperiment";
+
+std::string GetAgcMinMicLevelExperimentFieldTrial(const std::string& value) {
+ char field_trial_buffer[64];
+ rtc::SimpleStringBuilder builder(field_trial_buffer);
+ builder << kMinMicLevelFieldTrial << "/" << value << "/";
+ return builder.str();
+}
+
+std::string GetAgcMinMicLevelExperimentFieldTrialEnabled(
+ int enabled_value,
+ const std::string& suffix = "") {
+ RTC_DCHECK_GE(enabled_value, 0);
+ RTC_DCHECK_LE(enabled_value, 255);
+ char field_trial_buffer[64];
+ rtc::SimpleStringBuilder builder(field_trial_buffer);
+ builder << kMinMicLevelFieldTrial << "/Enabled-" << enabled_value << suffix
+ << "/";
+ return builder.str();
+}
+
+std::string GetAgcMinMicLevelExperimentFieldTrial(
+ absl::optional<int> min_mic_level) {
+ if (min_mic_level.has_value()) {
+ return GetAgcMinMicLevelExperimentFieldTrialEnabled(*min_mic_level);
+ }
+ return GetAgcMinMicLevelExperimentFieldTrial("Disabled");
+}
+
+// (Over)writes `samples_value` for the samples in `audio_buffer`.
+// When `clipped_ratio`, a value in [0, 1], is greater than 0, the corresponding
+// fraction of the frame is set to a full scale value to simulate clipping.
+void WriteAudioBufferSamples(float samples_value,
+ float clipped_ratio,
+ AudioBuffer& audio_buffer) {
+ RTC_DCHECK_GE(samples_value, std::numeric_limits<int16_t>::min());
+ RTC_DCHECK_LE(samples_value, std::numeric_limits<int16_t>::max());
+ RTC_DCHECK_GE(clipped_ratio, 0.0f);
+ RTC_DCHECK_LE(clipped_ratio, 1.0f);
+ int num_channels = audio_buffer.num_channels();
+ int num_samples = audio_buffer.num_frames();
+ int num_clipping_samples = clipped_ratio * num_samples;
+ for (int ch = 0; ch < num_channels; ++ch) {
+ int i = 0;
+ for (; i < num_clipping_samples; ++i) {
+ audio_buffer.channels()[ch][i] = 32767.0f;
+ }
+ for (; i < num_samples; ++i) {
+ audio_buffer.channels()[ch][i] = samples_value;
+ }
+ }
+}
+
+void CallPreProcessAndProcess(int num_calls,
+ const AudioBuffer& audio_buffer,
+ AgcManagerDirect& manager) {
+ for (int n = 0; n < num_calls; ++n) {
+ manager.AnalyzePreProcess(&audio_buffer);
+ manager.Process(&audio_buffer);
+ }
+}
+
+} // namespace
+
+// TODO(bugs.webrtc.org/12874): Use constexpr struct with designated
+// initializers once fixed.
+constexpr AnalogAgcConfig GetAnalogAgcTestConfig() {
+ AnalogAgcConfig config;
+ config.startup_min_volume = kInitialVolume;
+ config.clipped_level_min = kClippedMin;
+ config.enable_digital_adaptive = true;
+ config.clipped_level_step = kClippedLevelStep;
+ config.clipped_ratio_threshold = kClippedRatioThreshold;
+ config.clipped_wait_frames = kClippedWaitFrames;
+ config.clipping_predictor = kDefaultAnalogConfig.clipping_predictor;
+ return config;
+};
+
+class AgcManagerDirectTestHelper {
+ public:
+ AgcManagerDirectTestHelper()
+ : audio_buffer(kSampleRateHz,
+ kNumChannels,
+ kSampleRateHz,
+ kNumChannels,
+ kSampleRateHz,
+ kNumChannels),
+ audio(kNumChannels),
+ audio_data(kNumChannels * kSamplesPerChannel, 0.0f),
+ mock_agc(new MockAgc()),
+ manager(GetAnalogAgcTestConfig(), mock_agc) {
+ ExpectInitialize();
+ manager.Initialize();
+ manager.SetupDigitalGainControl(mock_gain_control);
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ audio[ch] = &audio_data[ch * kSamplesPerChannel];
+ }
+ WriteAudioBufferSamples(/*samples_value=*/0.0f, /*clipped_ratio=*/0.0f,
+ audio_buffer);
+ }
+
+ void FirstProcess() {
+ EXPECT_CALL(*mock_agc, Reset()).Times(AtLeast(1));
+ EXPECT_CALL(*mock_agc, GetRmsErrorDb(_)).WillOnce(Return(false));
+ CallProcess(/*num_calls=*/1);
+ }
+
+ void SetVolumeAndProcess(int volume) {
+ manager.set_stream_analog_level(volume);
+ FirstProcess();
+ }
+
+ void ExpectCheckVolumeAndReset(int volume) {
+ manager.set_stream_analog_level(volume);
+ EXPECT_CALL(*mock_agc, Reset());
+ }
+
+ void ExpectInitialize() {
+ EXPECT_CALL(mock_gain_control, set_mode(GainControl::kFixedDigital));
+ EXPECT_CALL(mock_gain_control, set_target_level_dbfs(2));
+ EXPECT_CALL(mock_gain_control, set_compression_gain_db(7));
+ EXPECT_CALL(mock_gain_control, enable_limiter(true));
+ }
+
+ void CallProcess(int num_calls) {
+ for (int i = 0; i < num_calls; ++i) {
+ EXPECT_CALL(*mock_agc, Process(_)).WillOnce(Return());
+ manager.Process(&audio_buffer);
+ absl::optional<int> new_digital_gain = manager.GetDigitalComressionGain();
+ if (new_digital_gain) {
+ mock_gain_control.set_compression_gain_db(*new_digital_gain);
+ }
+ }
+ }
+
+ void CallPreProc(int num_calls, float clipped_ratio) {
+ RTC_DCHECK_GE(clipped_ratio, 0.0f);
+ RTC_DCHECK_LE(clipped_ratio, 1.0f);
+ const int num_clipped = kSamplesPerChannel * clipped_ratio;
+ std::fill(audio_data.begin(), audio_data.end(), 0.0f);
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ for (int k = 0; k < num_clipped; ++k) {
+ audio[ch][k] = 32767.0f;
+ }
+ }
+ for (int i = 0; i < num_calls; ++i) {
+ manager.AnalyzePreProcess(audio.data(), kSamplesPerChannel);
+ }
+ }
+
+ void CallPreProcForChangingAudio(int num_calls, float peak_ratio) {
+ RTC_DCHECK_GE(1.0f, peak_ratio);
+ std::fill(audio_data.begin(), audio_data.end(), 0.0f);
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ for (size_t k = 0; k < kSamplesPerChannel; k += 2) {
+ audio[ch][k] = peak_ratio * 32767.0f;
+ }
+ }
+ for (int i = 0; i < num_calls / 2; ++i) {
+ manager.AnalyzePreProcess(audio.data(), kSamplesPerChannel);
+ }
+ for (size_t ch = 0; ch < kNumChannels; ++ch) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
+ audio[ch][k] = peak_ratio * 32767.0f;
+ }
+ }
+ for (int i = 0; i < num_calls - num_calls / 2; ++i) {
+ manager.AnalyzePreProcess(audio.data(), kSamplesPerChannel);
+ }
+ }
+
+ AudioBuffer audio_buffer;
+ std::vector<float*> audio;
+ std::vector<float> audio_data;
+
+ MockAgc* mock_agc;
+ AgcManagerDirect manager;
+ MockGainControl mock_gain_control;
+};
+
+class AgcManagerDirectParametrizedTest
+ : public ::testing::TestWithParam<absl::optional<int>> {
+ protected:
+ AgcManagerDirectParametrizedTest()
+ : field_trials_(GetAgcMinMicLevelExperimentFieldTrial(GetParam())) {}
+
+ bool IsMinMicLevelOverridden() const { return GetParam().has_value(); }
+ int GetMinMicLevel() const { return GetParam().value_or(kMinMicLevel); }
+
+ private:
+ test::ScopedFieldTrials field_trials_;
+};
+
+INSTANTIATE_TEST_SUITE_P(,
+ AgcManagerDirectParametrizedTest,
+ testing::Values(absl::nullopt, 12, 20));
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ StartupMinVolumeConfigurationIsRespected) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+ EXPECT_EQ(kInitialVolume, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, MicVolumeResponseToRmsError) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Compressor default; no residual error.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+
+ // Inside the compressor's window; no change of volume.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+
+ // Above the compressor's window; volume should be increased.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(130, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(168, helper.manager.stream_analog_level());
+
+ // Inside the compressor's window; no change of volume.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+
+ // Below the compressor's window; volume should be decreased.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(167, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(163, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-9), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(129, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, MicVolumeIsLimited) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Maximum upwards change is limited.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(183, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(243, helper.manager.stream_analog_level());
+
+ // Won't go higher than the maximum.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(254, helper.manager.stream_analog_level());
+
+ // Maximum downwards change is limited.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(194, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(137, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(88, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(54, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(33, helper.manager.stream_analog_level());
+
+ // Won't go lower than the minimum.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(std::max(18, GetMinMicLevel()),
+ helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(std::max(12, GetMinMicLevel()),
+ helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, CompressorStepsTowardsTarget) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Compressor default; no call to set_compression_gain_db.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/20);
+
+ // Moves slowly upwards.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(9), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(8))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(9))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/20);
+
+ // Moves slowly downward, then reverses before reaching the original target.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(8))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(9), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(9))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/20);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, CompressorErrorIsDeemphasized) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillRepeatedly(Return(false));
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(8))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(9))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/20);
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillRepeatedly(Return(false));
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(8))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(7))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(6))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(_)).Times(0);
+ helper.CallProcess(/*num_calls=*/20);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, CompressorReachesMaximum) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillRepeatedly(Return(false));
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(8))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(9))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(10))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(11))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(12))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, CompressorReachesMinimum) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillRepeatedly(Return(false));
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(6))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(5))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(4))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(3))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(2))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, NoActionWhileMuted) {
+ AgcManagerDirectTestHelper helper;
+ helper.manager.HandleCaptureOutputUsedChange(false);
+ helper.manager.Process(&helper.audio_buffer);
+ absl::optional<int> new_digital_gain =
+ helper.manager.GetDigitalComressionGain();
+ if (new_digital_gain) {
+ helper.mock_gain_control.set_compression_gain_db(*new_digital_gain);
+ }
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, UnmutingChecksVolumeWithoutRaising) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ helper.manager.HandleCaptureOutputUsedChange(false);
+ helper.manager.HandleCaptureOutputUsedChange(true);
+ helper.ExpectCheckVolumeAndReset(/*volume=*/127);
+ // SetMicVolume should not be called.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_)).WillOnce(Return(false));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(127, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, UnmutingRaisesTooLowVolume) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ helper.manager.HandleCaptureOutputUsedChange(false);
+ helper.manager.HandleCaptureOutputUsedChange(true);
+ helper.ExpectCheckVolumeAndReset(/*volume=*/11);
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_)).WillOnce(Return(false));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(GetMinMicLevel(), helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ ManualLevelChangeResultsInNoSetMicCall) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Change outside of compressor's range, which would normally trigger a call
+ // to `SetMicVolume()`.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+
+ // When the analog volume changes, the gain controller is reset.
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+
+ // GetMicVolume returns a value outside of the quantization slack, indicating
+ // a manual volume change.
+ ASSERT_NE(helper.manager.stream_analog_level(), 154);
+ helper.manager.set_stream_analog_level(154);
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(154, helper.manager.stream_analog_level());
+
+ // Do the same thing, except downwards now.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.manager.set_stream_analog_level(100);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(100, helper.manager.stream_analog_level());
+
+ // And finally verify the AGC continues working without a manual change.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(99, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ RecoveryAfterManualLevelChangeFromMax) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Force the mic up to max volume. Takes a few steps due to the residual
+ // gain limitation.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(183, helper.manager.stream_analog_level());
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(243, helper.manager.stream_analog_level());
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+
+ // Manual change does not result in SetMicVolume call.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.manager.set_stream_analog_level(50);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(50, helper.manager.stream_analog_level());
+
+ // Continues working as usual afterwards.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(69, helper.manager.stream_analog_level());
+}
+
+// Checks that, when the min mic level override is not specified, AGC ramps up
+// towards the minimum mic level after the mic level is manually set below the
+// minimum gain to enforce.
+TEST(AgcManagerDirectTest, RecoveryAfterManualLevelChangeBelowMin) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Manual change below min, but strictly positive, otherwise
+ // AGC won't take any action.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.manager.set_stream_analog_level(1);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(1, helper.manager.stream_analog_level());
+
+ // Continues working as usual afterwards.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(2, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(11, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(18, helper.manager.stream_analog_level());
+}
+
+// Checks that, when the min mic level override is specified, AGC immediately
+// applies the minimum mic level after the mic level is manually set below the
+// minimum gain to enforce.
+TEST_P(AgcManagerDirectParametrizedTest,
+ RecoveryAfterManualLevelChangeBelowMin) {
+ if (!IsMinMicLevelOverridden()) {
+ GTEST_SKIP() << "Skipped. Min mic level not overridden.";
+ }
+
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ // Manual change below min, but strictly positive, otherwise
+ // AGC won't take any action.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ helper.manager.set_stream_analog_level(1);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(GetMinMicLevel(), helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, NoClippingHasNoImpact) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ helper.CallPreProc(/*num_calls=*/100, /*clipped_ratio=*/0);
+ EXPECT_EQ(128, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, ClippingUnderThresholdHasNoImpact) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/0.099);
+ EXPECT_EQ(128, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, ClippingLowersVolume) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/255);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/0.2);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, WaitingPeriodBetweenClippingChecks) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(255);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(0);
+ helper.CallPreProc(/*num_calls=*/300,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(225, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, ClippingLoweringIsLimited) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/180);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(kClippedMin, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(0);
+ helper.CallPreProc(/*num_calls=*/1000,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(kClippedMin, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ ClippingMaxIsRespectedWhenEqualToLevel) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/255);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/10);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ ClippingMaxIsRespectedWhenHigherThanLevel) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/200);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(185, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+ helper.CallProcess(/*num_calls=*/10);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ MaxCompressionIsIncreasedAfterClipping) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/210);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, kAboveClippedThreshold);
+ EXPECT_EQ(195, helper.manager.stream_analog_level());
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillRepeatedly(Return(false));
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(8))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(9))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(10))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(11))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(12))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(13))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+
+ // Continue clipping until we hit the maximum surplus compression.
+ helper.CallPreProc(/*num_calls=*/300,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(180, helper.manager.stream_analog_level());
+
+ helper.CallPreProc(/*num_calls=*/300,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(1, kAboveClippedThreshold);
+ EXPECT_EQ(kClippedMin, helper.manager.stream_analog_level());
+
+ // Current level is now at the minimum, but the maximum allowed level still
+ // has more to decrease.
+ helper.CallPreProc(/*num_calls=*/300,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+
+ helper.CallPreProc(/*num_calls=*/300,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+
+ helper.CallPreProc(/*num_calls=*/300,
+ /*clipped_ratio=*/kAboveClippedThreshold);
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillRepeatedly(Return(false));
+ helper.CallProcess(/*num_calls=*/19);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(14))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(15))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(16))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(17))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/20);
+ EXPECT_CALL(helper.mock_gain_control, set_compression_gain_db(18))
+ .WillOnce(Return(0));
+ helper.CallProcess(/*num_calls=*/1);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, UserCanRaiseVolumeAfterClipping) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/225);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(210, helper.manager.stream_analog_level());
+
+ // High enough error to trigger a volume check.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(14), Return(true)));
+ // User changed the volume.
+ helper.manager.set_stream_analog_level(250);
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(AtLeast(1));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(250, helper.manager.stream_analog_level());
+
+ // Move down...
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-10), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(210, helper.manager.stream_analog_level());
+ // And back up to the new max established by the user.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(40), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(250, helper.manager.stream_analog_level());
+ // Will not move above new maximum.
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.CallProcess(/*num_calls=*/1);
+ EXPECT_EQ(250, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, ClippingDoesNotPullLowVolumeBackUp) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/80);
+
+ EXPECT_CALL(*helper.mock_agc, Reset()).Times(0);
+ int initial_volume = helper.manager.stream_analog_level();
+ helper.CallPreProc(/*num_calls=*/1, /*clipped_ratio=*/kAboveClippedThreshold);
+ EXPECT_EQ(initial_volume, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, TakesNoActionOnZeroMicVolume) {
+ AgcManagerDirectTestHelper helper;
+ helper.FirstProcess();
+
+ EXPECT_CALL(*helper.mock_agc, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+ helper.manager.set_stream_analog_level(0);
+ helper.CallProcess(/*num_calls=*/10);
+ EXPECT_EQ(0, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, ClippingDetectionLowersVolume) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/255);
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+ helper.CallPreProcForChangingAudio(/*num_calls=*/100, /*peak_ratio=*/0.99f);
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+ helper.CallPreProcForChangingAudio(/*num_calls=*/100, /*peak_ratio=*/1.0f);
+ EXPECT_EQ(240, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ DisabledClippingPredictorDoesNotLowerVolume) {
+ AgcManagerDirectTestHelper helper;
+ helper.SetVolumeAndProcess(/*volume=*/255);
+ EXPECT_FALSE(helper.manager.clipping_predictor_enabled());
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+ helper.CallPreProcForChangingAudio(/*num_calls=*/100, /*peak_ratio=*/0.99f);
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+ helper.CallPreProcForChangingAudio(/*num_calls=*/100, /*peak_ratio=*/0.99f);
+ EXPECT_EQ(255, helper.manager.stream_analog_level());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, DisableDigitalDisablesDigital) {
+ auto agc = std::unique_ptr<Agc>(new ::testing::NiceMock<MockAgc>());
+ MockGainControl mock_gain_control;
+ EXPECT_CALL(mock_gain_control, set_mode(GainControl::kFixedDigital));
+ EXPECT_CALL(mock_gain_control, set_target_level_dbfs(0));
+ EXPECT_CALL(mock_gain_control, set_compression_gain_db(0));
+ EXPECT_CALL(mock_gain_control, enable_limiter(false));
+
+ AnalogAgcConfig config;
+ config.enable_digital_adaptive = false;
+ auto manager = std::make_unique<AgcManagerDirect>(kNumChannels, config);
+ manager->Initialize();
+ manager->SetupDigitalGainControl(mock_gain_control);
+}
+
+TEST(AgcManagerDirectTest, AgcMinMicLevelExperimentDefault) {
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
+ EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), kInitialVolume);
+}
+
+TEST(AgcManagerDirectTest, AgcMinMicLevelExperimentDisabled) {
+ for (const std::string& field_trial_suffix : {"", "_20220210"}) {
+ test::ScopedFieldTrials field_trial(
+ GetAgcMinMicLevelExperimentFieldTrial("Disabled" + field_trial_suffix));
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
+ EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), kInitialVolume);
+ }
+}
+
+// Checks that a field-trial parameter outside of the valid range [0,255] is
+// ignored.
+TEST(AgcManagerDirectTest, AgcMinMicLevelExperimentOutOfRangeAbove) {
+ test::ScopedFieldTrials field_trial(
+ GetAgcMinMicLevelExperimentFieldTrial("Enabled-256"));
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
+ EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), kInitialVolume);
+}
+
+// Checks that a field-trial parameter outside of the valid range [0,255] is
+// ignored.
+TEST(AgcManagerDirectTest, AgcMinMicLevelExperimentOutOfRangeBelow) {
+ test::ScopedFieldTrials field_trial(
+ GetAgcMinMicLevelExperimentFieldTrial("Enabled--1"));
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
+ EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), kInitialVolume);
+}
+
+// Verifies that a valid experiment changes the minimum microphone level. The
+// start volume is larger than the min level and should therefore not be
+// changed.
+TEST(AgcManagerDirectTest, AgcMinMicLevelExperimentEnabled50) {
+ constexpr int kMinMicLevelOverride = 50;
+ for (const std::string& field_trial_suffix : {"", "_20220210"}) {
+ SCOPED_TRACE(field_trial_suffix);
+ test::ScopedFieldTrials field_trial(
+ GetAgcMinMicLevelExperimentFieldTrialEnabled(kMinMicLevelOverride,
+ field_trial_suffix));
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), kMinMicLevelOverride);
+ EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), kInitialVolume);
+ }
+}
+
+// Checks that, when the "WebRTC-Audio-AgcMinMicLevelExperiment" field trial is
+// specified with a valid value, the mic level never gets lowered beyond the
+// override value in the presence of clipping.
+TEST(AgcManagerDirectTest, AgcMinMicLevelExperimentCheckMinLevelWithClipping) {
+ constexpr int kMinMicLevelOverride = 250;
+
+ // Create and initialize two AGCs by specifying and leaving unspecified the
+ // relevant field trial.
+ const auto factory = []() {
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ manager->Initialize();
+ manager->set_stream_analog_level(kInitialVolume);
+ return manager;
+ };
+ std::unique_ptr<AgcManagerDirect> manager = factory();
+ std::unique_ptr<AgcManagerDirect> manager_with_override;
+ {
+ test::ScopedFieldTrials field_trial(
+ GetAgcMinMicLevelExperimentFieldTrialEnabled(kMinMicLevelOverride));
+ manager_with_override = factory();
+ }
+
+ // Create a test input signal which containts 80% of clipped samples.
+ AudioBuffer audio_buffer(kSampleRateHz, 1, kSampleRateHz, 1, kSampleRateHz,
+ 1);
+ WriteAudioBufferSamples(/*samples_value=*/4000.0f, /*clipped_ratio=*/0.8f,
+ audio_buffer);
+
+ // Simulate 4 seconds of clipping; it is expected to trigger a downward
+ // adjustment of the analog gain.
+ CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer, *manager);
+ CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer,
+ *manager_with_override);
+
+ // Make sure that an adaptation occurred.
+ ASSERT_GT(manager->stream_analog_level(), 0);
+
+ // Check that the test signal triggers a larger downward adaptation for
+ // `manager`, which is allowed to reach a lower gain.
+ EXPECT_GT(manager_with_override->stream_analog_level(),
+ manager->stream_analog_level());
+ // Check that the gain selected by `manager_with_override` equals the minimum
+ // value overridden via field trial.
+ EXPECT_EQ(manager_with_override->stream_analog_level(), kMinMicLevelOverride);
+}
+
+// Checks that, when the "WebRTC-Audio-AgcMinMicLevelExperiment" field trial is
+// specified with a value lower than the `clipped_level_min`, the behavior of
+// the analog gain controller is the same as that obtained when the field trial
+// is not specified.
+TEST(AgcManagerDirectTest,
+ AgcMinMicLevelExperimentCompareMicLevelWithClipping) {
+ // Create and initialize two AGCs by specifying and leaving unspecified the
+ // relevant field trial.
+ const auto factory = []() {
+ // Use a large clipped level step to more quickly decrease the analog gain
+ // with clipping.
+ AnalogAgcConfig config = kDefaultAnalogConfig;
+ config.startup_min_volume = kInitialVolume;
+ config.enable_digital_adaptive = false;
+ config.clipped_level_step = 64;
+ config.clipped_ratio_threshold = kClippedRatioThreshold;
+ config.clipped_wait_frames = kClippedWaitFrames;
+ auto controller =
+ std::make_unique<AgcManagerDirect>(/*num_capture_channels=*/1, config);
+ controller->Initialize();
+ controller->set_stream_analog_level(kInitialVolume);
+ return controller;
+ };
+ std::unique_ptr<AgcManagerDirect> manager = factory();
+ std::unique_ptr<AgcManagerDirect> manager_with_override;
+ {
+ constexpr int kMinMicLevelOverride = 20;
+ static_assert(
+ kDefaultAnalogConfig.clipped_level_min >= kMinMicLevelOverride,
+ "Use a lower override value.");
+ test::ScopedFieldTrials field_trial(
+ GetAgcMinMicLevelExperimentFieldTrialEnabled(kMinMicLevelOverride));
+ manager_with_override = factory();
+ }
+
+ // Create a test input signal which containts 80% of clipped samples.
+ AudioBuffer audio_buffer(kSampleRateHz, 1, kSampleRateHz, 1, kSampleRateHz,
+ 1);
+ WriteAudioBufferSamples(/*samples_value=*/4000.0f, /*clipped_ratio=*/0.8f,
+ audio_buffer);
+
+ // Simulate 4 seconds of clipping; it is expected to trigger a downward
+ // adjustment of the analog gain.
+ CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer, *manager);
+ CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer,
+ *manager_with_override);
+
+ // Make sure that an adaptation occurred.
+ ASSERT_GT(manager->stream_analog_level(), 0);
+
+ // Check that the selected analog gain is the same for both controllers and
+ // that it equals the minimum level reached when clipping is handled. That is
+ // expected because the minimum microphone level override is less than the
+ // minimum level used when clipping is detected.
+ EXPECT_EQ(manager->stream_analog_level(),
+ manager_with_override->stream_analog_level());
+ EXPECT_EQ(manager_with_override->stream_analog_level(),
+ kDefaultAnalogConfig.clipped_level_min);
+}
+
+// TODO(bugs.webrtc.org/12774): Test the bahavior of `clipped_level_step`.
+// TODO(bugs.webrtc.org/12774): Test the bahavior of `clipped_ratio_threshold`.
+// TODO(bugs.webrtc.org/12774): Test the bahavior of `clipped_wait_frames`.
+// Verifies that configurable clipping parameters are initialized as intended.
+TEST_P(AgcManagerDirectParametrizedTest, ClippingParametersVerified) {
+ std::unique_ptr<AgcManagerDirect> manager =
+ CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep,
+ kClippedRatioThreshold, kClippedWaitFrames);
+ manager->Initialize();
+ EXPECT_EQ(manager->clipped_level_step_, kClippedLevelStep);
+ EXPECT_EQ(manager->clipped_ratio_threshold_, kClippedRatioThreshold);
+ EXPECT_EQ(manager->clipped_wait_frames_, kClippedWaitFrames);
+ std::unique_ptr<AgcManagerDirect> manager_custom =
+ CreateAgcManagerDirect(kInitialVolume,
+ /*clipped_level_step=*/10,
+ /*clipped_ratio_threshold=*/0.2f,
+ /*clipped_wait_frames=*/50);
+ manager_custom->Initialize();
+ EXPECT_EQ(manager_custom->clipped_level_step_, 10);
+ EXPECT_EQ(manager_custom->clipped_ratio_threshold_, 0.2f);
+ EXPECT_EQ(manager_custom->clipped_wait_frames_, 50);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ DisableClippingPredictorDisablesClippingPredictor) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers once fixed.
+ ClippingPredictorConfig config;
+ config.enabled = false;
+
+ std::unique_ptr<AgcManagerDirect> manager = CreateAgcManagerDirect(
+ kInitialVolume, kClippedLevelStep, kClippedRatioThreshold,
+ kClippedWaitFrames, config);
+ manager->Initialize();
+ EXPECT_FALSE(manager->clipping_predictor_enabled());
+ EXPECT_FALSE(manager->use_clipping_predictor_step());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest, ClippingPredictorDisabledByDefault) {
+ constexpr ClippingPredictorConfig kDefaultConfig;
+ EXPECT_FALSE(kDefaultConfig.enabled);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ EnableClippingPredictorEnablesClippingPredictor) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers once fixed.
+ ClippingPredictorConfig config;
+ config.enabled = true;
+ config.use_predicted_step = true;
+
+ std::unique_ptr<AgcManagerDirect> manager = CreateAgcManagerDirect(
+ kInitialVolume, kClippedLevelStep, kClippedRatioThreshold,
+ kClippedWaitFrames, config);
+ manager->Initialize();
+ EXPECT_TRUE(manager->clipping_predictor_enabled());
+ EXPECT_TRUE(manager->use_clipping_predictor_step());
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ DisableClippingPredictorDoesNotLowerVolume) {
+ AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz,
+ kNumChannels, kSampleRateHz, kNumChannels);
+
+ AnalogAgcConfig config = GetAnalogAgcTestConfig();
+ config.clipping_predictor.enabled = false;
+ AgcManagerDirect manager(config, new ::testing::NiceMock<MockAgc>());
+ manager.Initialize();
+ manager.set_stream_analog_level(/*level=*/255);
+ EXPECT_FALSE(manager.clipping_predictor_enabled());
+ EXPECT_FALSE(manager.use_clipping_predictor_step());
+ EXPECT_EQ(manager.stream_analog_level(), 255);
+ manager.Process(&audio_buffer);
+ CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager);
+ EXPECT_EQ(manager.stream_analog_level(), 255);
+ CallPreProcessAudioBuffer(/*num_calls=*/300, /*peak_ratio=*/0.99f, manager);
+ EXPECT_EQ(manager.stream_analog_level(), 255);
+ CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager);
+ EXPECT_EQ(manager.stream_analog_level(), 255);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ UsedClippingPredictionsProduceLowerAnalogLevels) {
+ AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz,
+ kNumChannels, kSampleRateHz, kNumChannels);
+
+ AnalogAgcConfig config_with_prediction = GetAnalogAgcTestConfig();
+ config_with_prediction.clipping_predictor.enabled = true;
+ config_with_prediction.clipping_predictor.use_predicted_step = true;
+ AnalogAgcConfig config_without_prediction = GetAnalogAgcTestConfig();
+ config_without_prediction.clipping_predictor.enabled = false;
+ AgcManagerDirect manager_with_prediction(config_with_prediction,
+ new ::testing::NiceMock<MockAgc>());
+ AgcManagerDirect manager_without_prediction(
+ config_without_prediction, new ::testing::NiceMock<MockAgc>());
+
+ manager_with_prediction.Initialize();
+ manager_without_prediction.Initialize();
+
+ constexpr int kInitialLevel = 255;
+ constexpr float kClippingPeakRatio = 1.0f;
+ constexpr float kCloseToClippingPeakRatio = 0.99f;
+ constexpr float kZeroPeakRatio = 0.0f;
+ manager_with_prediction.set_stream_analog_level(kInitialLevel);
+ manager_without_prediction.set_stream_analog_level(kInitialLevel);
+ manager_with_prediction.Process(&audio_buffer);
+ manager_without_prediction.Process(&audio_buffer);
+ EXPECT_TRUE(manager_with_prediction.clipping_predictor_enabled());
+ EXPECT_FALSE(manager_without_prediction.clipping_predictor_enabled());
+ EXPECT_TRUE(manager_with_prediction.use_clipping_predictor_step());
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(), kInitialLevel);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel);
+
+ // Expect a change in the analog level when the prediction step is used.
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel);
+
+ // Expect no change during waiting.
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel);
+
+ // Expect a change when the prediction step is used.
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - 2 * kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel);
+
+ // Expect no change when clipping is not detected or predicted.
+ CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - 2 * kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel);
+
+ // Expect a change for clipping frames.
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - 3 * kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(),
+ kInitialLevel - kClippedLevelStep);
+
+ // Expect no change during waiting.
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - 3 * kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(),
+ kInitialLevel - kClippedLevelStep);
+
+ // Expect a change for clipping frames.
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ kInitialLevel - 4 * kClippedLevelStep);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(),
+ kInitialLevel - 2 * kClippedLevelStep);
+}
+
+TEST_P(AgcManagerDirectParametrizedTest,
+ UnusedClippingPredictionsProduceEqualAnalogLevels) {
+ AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz,
+ kNumChannels, kSampleRateHz, kNumChannels);
+
+ AnalogAgcConfig config_with_prediction = GetAnalogAgcTestConfig();
+ config_with_prediction.clipping_predictor.enabled = true;
+ config_with_prediction.clipping_predictor.use_predicted_step = false;
+ AnalogAgcConfig config_without_prediction = GetAnalogAgcTestConfig();
+ config_without_prediction.clipping_predictor.enabled = false;
+ AgcManagerDirect manager_with_prediction(config_with_prediction,
+ new ::testing::NiceMock<MockAgc>());
+ AgcManagerDirect manager_without_prediction(
+ config_without_prediction, new ::testing::NiceMock<MockAgc>());
+
+ constexpr int kInitialLevel = 255;
+ constexpr float kClippingPeakRatio = 1.0f;
+ constexpr float kCloseToClippingPeakRatio = 0.99f;
+ constexpr float kZeroPeakRatio = 0.0f;
+ manager_with_prediction.Initialize();
+ manager_without_prediction.Initialize();
+ manager_with_prediction.set_stream_analog_level(kInitialLevel);
+ manager_without_prediction.set_stream_analog_level(kInitialLevel);
+ manager_with_prediction.Process(&audio_buffer);
+ manager_without_prediction.Process(&audio_buffer);
+ EXPECT_TRUE(manager_with_prediction.clipping_predictor_enabled());
+ EXPECT_FALSE(manager_without_prediction.clipping_predictor_enabled());
+ EXPECT_FALSE(manager_with_prediction.use_clipping_predictor_step());
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(), kInitialLevel);
+ EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel);
+
+ // Expect no change in the analog level for non-clipping frames.
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+
+ // Expect no change for non-clipping frames.
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+
+ // Expect no change for non-clipping frames.
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+
+ // Expect no change when clipping is not detected or predicted.
+ CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+
+ // Expect a change for clipping frames.
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+
+ // Expect no change during waiting.
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+
+ // Expect a change for clipping frames.
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_with_prediction);
+ CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio,
+ manager_without_prediction);
+ EXPECT_EQ(manager_with_prediction.stream_analog_level(),
+ manager_without_prediction.stream_analog_level());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc
new file mode 100644
index 0000000000..0d8753a7c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/analog_gain_stats_reporter.h"
+
+#include <cmath>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kFramesIn60Seconds = 6000;
+constexpr int kMinGain = 0;
+constexpr int kMaxGain = 255;
+constexpr int kMaxUpdate = kMaxGain - kMinGain;
+
+float ComputeAverageUpdate(int sum_updates, int num_updates) {
+ RTC_DCHECK_GE(sum_updates, 0);
+ RTC_DCHECK_LE(sum_updates, kMaxUpdate * kFramesIn60Seconds);
+ RTC_DCHECK_GE(num_updates, 0);
+ RTC_DCHECK_LE(num_updates, kFramesIn60Seconds);
+ if (num_updates == 0) {
+ return 0.0f;
+ }
+ return std::round(static_cast<float>(sum_updates) /
+ static_cast<float>(num_updates));
+}
+} // namespace
+
+AnalogGainStatsReporter::AnalogGainStatsReporter() = default;
+
+AnalogGainStatsReporter::~AnalogGainStatsReporter() = default;
+
+void AnalogGainStatsReporter::UpdateStatistics(int analog_mic_level) {
+ RTC_DCHECK_GE(analog_mic_level, kMinGain);
+ RTC_DCHECK_LE(analog_mic_level, kMaxGain);
+ if (previous_analog_mic_level_.has_value() &&
+ analog_mic_level != previous_analog_mic_level_.value()) {
+ const int level_change =
+ analog_mic_level - previous_analog_mic_level_.value();
+ if (level_change < 0) {
+ ++level_update_stats_.num_decreases;
+ level_update_stats_.sum_decreases -= level_change;
+ } else {
+ ++level_update_stats_.num_increases;
+ level_update_stats_.sum_increases += level_change;
+ }
+ }
+ // Periodically log analog gain change metrics.
+ if (++log_level_update_stats_counter_ >= kFramesIn60Seconds) {
+ LogLevelUpdateStats();
+ level_update_stats_ = {};
+ log_level_update_stats_counter_ = 0;
+ }
+ previous_analog_mic_level_ = analog_mic_level;
+}
+
+void AnalogGainStatsReporter::LogLevelUpdateStats() const {
+ const float average_decrease = ComputeAverageUpdate(
+ level_update_stats_.sum_decreases, level_update_stats_.num_decreases);
+ const float average_increase = ComputeAverageUpdate(
+ level_update_stats_.sum_increases, level_update_stats_.num_increases);
+ const int num_updates =
+ level_update_stats_.num_decreases + level_update_stats_.num_increases;
+ const float average_update = ComputeAverageUpdate(
+ level_update_stats_.sum_decreases + level_update_stats_.sum_increases,
+ num_updates);
+ RTC_DLOG(LS_INFO) << "Analog gain update rate: "
+ << "num_updates=" << num_updates
+ << ", num_decreases=" << level_update_stats_.num_decreases
+ << ", num_increases=" << level_update_stats_.num_increases;
+ RTC_DLOG(LS_INFO) << "Analog gain update average: "
+ << "average_update=" << average_update
+ << ", average_decrease=" << average_decrease
+ << ", average_increase=" << average_increase;
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.ApmAnalogGainDecreaseRate",
+ /*sample=*/level_update_stats_.num_decreases,
+ /*min=*/1,
+ /*max=*/kFramesIn60Seconds,
+ /*bucket_count=*/50);
+ if (level_update_stats_.num_decreases > 0) {
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.ApmAnalogGainDecreaseAverage",
+ /*sample=*/average_decrease,
+ /*min=*/1,
+ /*max=*/kMaxUpdate,
+ /*bucket_count=*/50);
+ }
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.ApmAnalogGainIncreaseRate",
+ /*sample=*/level_update_stats_.num_increases,
+ /*min=*/1,
+ /*max=*/kFramesIn60Seconds,
+ /*bucket_count=*/50);
+ if (level_update_stats_.num_increases > 0) {
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.ApmAnalogGainIncreaseAverage",
+ /*sample=*/average_increase,
+ /*min=*/1,
+ /*max=*/kMaxUpdate,
+ /*bucket_count=*/50);
+ }
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.ApmAnalogGainUpdateRate",
+ /*sample=*/num_updates,
+ /*min=*/1,
+ /*max=*/kFramesIn60Seconds,
+ /*bucket_count=*/50);
+ if (num_updates > 0) {
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ /*name=*/"WebRTC.Audio.ApmAnalogGainUpdateAverage",
+ /*sample=*/average_update,
+ /*min=*/1,
+ /*max=*/kMaxUpdate,
+ /*bucket_count=*/50);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.h b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.h
new file mode 100644
index 0000000000..c9442e8a43
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_ANALOG_GAIN_STATS_REPORTER_H_
+#define MODULES_AUDIO_PROCESSING_AGC_ANALOG_GAIN_STATS_REPORTER_H_
+
+#include "absl/types/optional.h"
+#include "rtc_base/gtest_prod_util.h"
+
+namespace webrtc {
+
+// Analog gain statistics calculator. Computes aggregate stats based on the
+// framewise mic levels processed in `UpdateStatistics()`. Periodically logs the
+// statistics into a histogram.
+class AnalogGainStatsReporter {
+ public:
+ AnalogGainStatsReporter();
+ AnalogGainStatsReporter(const AnalogGainStatsReporter&) = delete;
+ AnalogGainStatsReporter operator=(const AnalogGainStatsReporter&) = delete;
+ ~AnalogGainStatsReporter();
+
+ // Updates the stats based on the `analog_mic_level`. Periodically logs the
+ // stats into a histogram.
+ void UpdateStatistics(int analog_mic_level);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest,
+ CheckLevelUpdateStatsForEmptyStats);
+ FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest,
+ CheckLevelUpdateStatsAfterNoGainChange);
+ FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest,
+ CheckLevelUpdateStatsAfterGainIncrease);
+ FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest,
+ CheckLevelUpdateStatsAfterGainDecrease);
+ FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest,
+ CheckLevelUpdateStatsAfterReset);
+
+ // Stores analog gain update stats to enable calculation of update rate and
+ // average update separately for gain increases and decreases.
+ struct LevelUpdateStats {
+ int num_decreases = 0;
+ int num_increases = 0;
+ int sum_decreases = 0;
+ int sum_increases = 0;
+ } level_update_stats_;
+
+ // Returns a copy of the stored statistics. Use only for testing.
+ const LevelUpdateStats level_update_stats() const {
+ return level_update_stats_;
+ }
+
+ // Computes aggregate stat and logs them into a histogram.
+ void LogLevelUpdateStats() const;
+
+ int log_level_update_stats_counter_ = 0;
+ absl::optional<int> previous_analog_mic_level_ = absl::nullopt;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_ANALOG_GAIN_STATS_REPORTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_gn/moz.build
new file mode 100644
index 0000000000..63f69dc9db
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("analog_gain_stats_reporter_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc
new file mode 100644
index 0000000000..bc9559094b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/analog_gain_stats_reporter.h"
+
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kFramesIn60Seconds = 6000;
+
+class AnalogGainStatsReporterTest : public ::testing::Test {
+ public:
+ AnalogGainStatsReporterTest() {}
+
+ protected:
+ void SetUp() override { metrics::Reset(); }
+};
+
+TEST_F(AnalogGainStatsReporterTest, CheckLogLevelUpdateStatsEmpty) {
+ AnalogGainStatsReporter stats_reporter;
+ constexpr int kMicLevel = 10;
+ stats_reporter.UpdateStatistics(kMicLevel);
+ // Update almost until the periodic logging and reset.
+ for (int i = 0; i < kFramesIn60Seconds - 2; i += 2) {
+ stats_reporter.UpdateStatistics(kMicLevel + 2);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ }
+ EXPECT_METRIC_THAT(metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateRate"),
+ ::testing::ElementsAre());
+ EXPECT_METRIC_THAT(metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseRate"),
+ ::testing::ElementsAre());
+ EXPECT_METRIC_THAT(metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseRate"),
+ ::testing::ElementsAre());
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateAverage"),
+ ::testing::ElementsAre());
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseAverage"),
+ ::testing::ElementsAre());
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseAverage"),
+ ::testing::ElementsAre());
+}
+
+TEST_F(AnalogGainStatsReporterTest, CheckLogLevelUpdateStatsNotEmpty) {
+ AnalogGainStatsReporter stats_reporter;
+ constexpr int kMicLevel = 10;
+ stats_reporter.UpdateStatistics(kMicLevel);
+ // Update until periodic logging.
+ for (int i = 0; i < kFramesIn60Seconds; i += 2) {
+ stats_reporter.UpdateStatistics(kMicLevel + 2);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ }
+ // Update until periodic logging.
+ for (int i = 0; i < kFramesIn60Seconds; i += 2) {
+ stats_reporter.UpdateStatistics(kMicLevel + 3);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ }
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateRate"),
+ ::testing::ElementsAre(::testing::Pair(kFramesIn60Seconds - 1, 1),
+ ::testing::Pair(kFramesIn60Seconds, 1)));
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseRate"),
+ ::testing::ElementsAre(::testing::Pair(kFramesIn60Seconds / 2 - 1, 1),
+ ::testing::Pair(kFramesIn60Seconds / 2, 1)));
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseRate"),
+ ::testing::ElementsAre(::testing::Pair(kFramesIn60Seconds / 2, 2)));
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateAverage"),
+ ::testing::ElementsAre(::testing::Pair(2, 1), ::testing::Pair(3, 1)));
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseAverage"),
+ ::testing::ElementsAre(::testing::Pair(2, 1), ::testing::Pair(3, 1)));
+ EXPECT_METRIC_THAT(
+ metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseAverage"),
+ ::testing::ElementsAre(::testing::Pair(2, 1), ::testing::Pair(3, 1)));
+}
+} // namespace
+
+TEST_F(AnalogGainStatsReporterTest, CheckLevelUpdateStatsForEmptyStats) {
+ AnalogGainStatsReporter stats_reporter;
+ const auto& update_stats = stats_reporter.level_update_stats();
+ EXPECT_EQ(update_stats.num_decreases, 0);
+ EXPECT_EQ(update_stats.sum_decreases, 0);
+ EXPECT_EQ(update_stats.num_increases, 0);
+ EXPECT_EQ(update_stats.sum_increases, 0);
+}
+
+TEST_F(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterNoGainChange) {
+ constexpr int kMicLevel = 10;
+ AnalogGainStatsReporter stats_reporter;
+ stats_reporter.UpdateStatistics(kMicLevel);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ const auto& update_stats = stats_reporter.level_update_stats();
+ EXPECT_EQ(update_stats.num_decreases, 0);
+ EXPECT_EQ(update_stats.sum_decreases, 0);
+ EXPECT_EQ(update_stats.num_increases, 0);
+ EXPECT_EQ(update_stats.sum_increases, 0);
+}
+
+TEST_F(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterGainIncrease) {
+ constexpr int kMicLevel = 10;
+ AnalogGainStatsReporter stats_reporter;
+ stats_reporter.UpdateStatistics(kMicLevel);
+ stats_reporter.UpdateStatistics(kMicLevel + 4);
+ stats_reporter.UpdateStatistics(kMicLevel + 5);
+ const auto& update_stats = stats_reporter.level_update_stats();
+ EXPECT_EQ(update_stats.num_decreases, 0);
+ EXPECT_EQ(update_stats.sum_decreases, 0);
+ EXPECT_EQ(update_stats.num_increases, 2);
+ EXPECT_EQ(update_stats.sum_increases, 5);
+}
+
+TEST_F(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterGainDecrease) {
+ constexpr int kMicLevel = 10;
+ AnalogGainStatsReporter stats_reporter;
+ stats_reporter.UpdateStatistics(kMicLevel);
+ stats_reporter.UpdateStatistics(kMicLevel - 4);
+ stats_reporter.UpdateStatistics(kMicLevel - 5);
+ const auto& stats_update = stats_reporter.level_update_stats();
+ EXPECT_EQ(stats_update.num_decreases, 2);
+ EXPECT_EQ(stats_update.sum_decreases, 5);
+ EXPECT_EQ(stats_update.num_increases, 0);
+ EXPECT_EQ(stats_update.sum_increases, 0);
+}
+
+TEST_F(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterReset) {
+ AnalogGainStatsReporter stats_reporter;
+ constexpr int kMicLevel = 10;
+ stats_reporter.UpdateStatistics(kMicLevel);
+ // Update until the periodic reset.
+ for (int i = 0; i < kFramesIn60Seconds - 2; i += 2) {
+ stats_reporter.UpdateStatistics(kMicLevel + 2);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ }
+ const auto& stats_before_reset = stats_reporter.level_update_stats();
+ EXPECT_EQ(stats_before_reset.num_decreases, kFramesIn60Seconds / 2 - 1);
+ EXPECT_EQ(stats_before_reset.sum_decreases, kFramesIn60Seconds - 2);
+ EXPECT_EQ(stats_before_reset.num_increases, kFramesIn60Seconds / 2 - 1);
+ EXPECT_EQ(stats_before_reset.sum_increases, kFramesIn60Seconds - 2);
+ stats_reporter.UpdateStatistics(kMicLevel + 2);
+ const auto& stats_during_reset = stats_reporter.level_update_stats();
+ EXPECT_EQ(stats_during_reset.num_decreases, 0);
+ EXPECT_EQ(stats_during_reset.sum_decreases, 0);
+ EXPECT_EQ(stats_during_reset.num_increases, 0);
+ EXPECT_EQ(stats_during_reset.sum_increases, 0);
+ stats_reporter.UpdateStatistics(kMicLevel);
+ stats_reporter.UpdateStatistics(kMicLevel + 3);
+ const auto& stats_after_reset = stats_reporter.level_update_stats();
+ EXPECT_EQ(stats_after_reset.num_decreases, 1);
+ EXPECT_EQ(stats_after_reset.sum_decreases, 2);
+ EXPECT_EQ(stats_after_reset.num_increases, 1);
+ EXPECT_EQ(stats_after_reset.sum_increases, 3);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.cc b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.cc
new file mode 100644
index 0000000000..58b3a2769c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.cc
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/clipping_predictor.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc/clipping_predictor_level_buffer.h"
+#include "modules/audio_processing/agc/gain_map_internal.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kClippingPredictorMaxGainChange = 15;
+
+// Estimates the new level from the gain error; a copy of the function
+// `LevelFromGainError` in agc_manager_direct.cc.
+int LevelFromGainError(int gain_error,
+ int level,
+ int min_mic_level,
+ int max_mic_level) {
+ RTC_DCHECK_GE(level, 0);
+ RTC_DCHECK_LE(level, max_mic_level);
+ if (gain_error == 0) {
+ return level;
+ }
+ int new_level = level;
+ if (gain_error > 0) {
+ while (kGainMap[new_level] - kGainMap[level] < gain_error &&
+ new_level < max_mic_level) {
+ ++new_level;
+ }
+ } else {
+ while (kGainMap[new_level] - kGainMap[level] > gain_error &&
+ new_level > min_mic_level) {
+ --new_level;
+ }
+ }
+ return new_level;
+}
+
+float ComputeCrestFactor(const ClippingPredictorLevelBuffer::Level& level) {
+ const float crest_factor =
+ FloatS16ToDbfs(level.max) - FloatS16ToDbfs(std::sqrt(level.average));
+ return crest_factor;
+}
+
+// Crest factor-based clipping prediction and clipped level step estimation.
+class ClippingEventPredictor : public ClippingPredictor {
+ public:
+ // ClippingEventPredictor with `num_channels` channels (limited to values
+ // higher than zero); window size `window_length` and reference window size
+ // `reference_window_length` (both referring to the number of frames in the
+ // respective sliding windows and limited to values higher than zero);
+ // reference window delay `reference_window_delay` (delay in frames, limited
+ // to values zero and higher with an additional requirement of
+ // `window_length` < `reference_window_length` + reference_window_delay`);
+ // and an estimation peak threshold `clipping_threshold` and a crest factor
+ // drop threshold `crest_factor_margin` (both in dB).
+ ClippingEventPredictor(int num_channels,
+ int window_length,
+ int reference_window_length,
+ int reference_window_delay,
+ float clipping_threshold,
+ float crest_factor_margin)
+ : window_length_(window_length),
+ reference_window_length_(reference_window_length),
+ reference_window_delay_(reference_window_delay),
+ clipping_threshold_(clipping_threshold),
+ crest_factor_margin_(crest_factor_margin) {
+ RTC_DCHECK_GT(num_channels, 0);
+ RTC_DCHECK_GT(window_length, 0);
+ RTC_DCHECK_GT(reference_window_length, 0);
+ RTC_DCHECK_GE(reference_window_delay, 0);
+ RTC_DCHECK_GT(reference_window_length + reference_window_delay,
+ window_length);
+ const int buffer_length = GetMinFramesProcessed();
+ RTC_DCHECK_GT(buffer_length, 0);
+ for (int i = 0; i < num_channels; ++i) {
+ ch_buffers_.push_back(
+ std::make_unique<ClippingPredictorLevelBuffer>(buffer_length));
+ }
+ }
+
+ ClippingEventPredictor(const ClippingEventPredictor&) = delete;
+ ClippingEventPredictor& operator=(const ClippingEventPredictor&) = delete;
+ ~ClippingEventPredictor() {}
+
+ void Reset() {
+ const int num_channels = ch_buffers_.size();
+ for (int i = 0; i < num_channels; ++i) {
+ ch_buffers_[i]->Reset();
+ }
+ }
+
+ // Analyzes a frame of audio and stores the framewise metrics in
+ // `ch_buffers_`.
+ void Analyze(const AudioFrameView<const float>& frame) {
+ const int num_channels = frame.num_channels();
+ RTC_DCHECK_EQ(num_channels, ch_buffers_.size());
+ const int samples_per_channel = frame.samples_per_channel();
+ RTC_DCHECK_GT(samples_per_channel, 0);
+ for (int channel = 0; channel < num_channels; ++channel) {
+ float sum_squares = 0.0f;
+ float peak = 0.0f;
+ for (const auto& sample : frame.channel(channel)) {
+ sum_squares += sample * sample;
+ peak = std::max(std::fabs(sample), peak);
+ }
+ ch_buffers_[channel]->Push(
+ {sum_squares / static_cast<float>(samples_per_channel), peak});
+ }
+ }
+
+ // Estimates the analog gain adjustment for channel `channel` using a
+ // sliding window over the frame-wise metrics in `ch_buffers_`. Returns an
+ // estimate for the clipped level step equal to `default_clipped_level_step_`
+ // if at least `GetMinFramesProcessed()` frames have been processed since the
+ // last reset and a clipping event is predicted. `level`, `min_mic_level`, and
+ // `max_mic_level` are limited to [0, 255] and `default_step` to [1, 255].
+ absl::optional<int> EstimateClippedLevelStep(int channel,
+ int level,
+ int default_step,
+ int min_mic_level,
+ int max_mic_level) const {
+ RTC_CHECK_GE(channel, 0);
+ RTC_CHECK_LT(channel, ch_buffers_.size());
+ RTC_DCHECK_GE(level, 0);
+ RTC_DCHECK_LE(level, 255);
+ RTC_DCHECK_GT(default_step, 0);
+ RTC_DCHECK_LE(default_step, 255);
+ RTC_DCHECK_GE(min_mic_level, 0);
+ RTC_DCHECK_LE(min_mic_level, 255);
+ RTC_DCHECK_GE(max_mic_level, 0);
+ RTC_DCHECK_LE(max_mic_level, 255);
+ if (level <= min_mic_level) {
+ return absl::nullopt;
+ }
+ if (PredictClippingEvent(channel)) {
+ const int new_level =
+ rtc::SafeClamp(level - default_step, min_mic_level, max_mic_level);
+ const int step = level - new_level;
+ if (step > 0) {
+ return step;
+ }
+ }
+ return absl::nullopt;
+ }
+
+ private:
+ int GetMinFramesProcessed() const {
+ return reference_window_delay_ + reference_window_length_;
+ }
+
+ // Predicts clipping events based on the processed audio frames. Returns
+ // true if a clipping event is likely.
+ bool PredictClippingEvent(int channel) const {
+ const auto metrics =
+ ch_buffers_[channel]->ComputePartialMetrics(0, window_length_);
+ if (!metrics.has_value() ||
+ !(FloatS16ToDbfs(metrics.value().max) > clipping_threshold_)) {
+ return false;
+ }
+ const auto reference_metrics = ch_buffers_[channel]->ComputePartialMetrics(
+ reference_window_delay_, reference_window_length_);
+ if (!reference_metrics.has_value()) {
+ return false;
+ }
+ const float crest_factor = ComputeCrestFactor(metrics.value());
+ const float reference_crest_factor =
+ ComputeCrestFactor(reference_metrics.value());
+ if (crest_factor < reference_crest_factor - crest_factor_margin_) {
+ return true;
+ }
+ return false;
+ }
+
+ std::vector<std::unique_ptr<ClippingPredictorLevelBuffer>> ch_buffers_;
+ const int window_length_;
+ const int reference_window_length_;
+ const int reference_window_delay_;
+ const float clipping_threshold_;
+ const float crest_factor_margin_;
+};
+
+// Performs crest factor-based clipping peak prediction.
+class ClippingPeakPredictor : public ClippingPredictor {
+ public:
+ // Ctor. ClippingPeakPredictor with `num_channels` channels (limited to values
+ // higher than zero); window size `window_length` and reference window size
+ // `reference_window_length` (both referring to the number of frames in the
+ // respective sliding windows and limited to values higher than zero);
+ // reference window delay `reference_window_delay` (delay in frames, limited
+ // to values zero and higher with an additional requirement of
+ // `window_length` < `reference_window_length` + reference_window_delay`);
+ // and a clipping prediction threshold `clipping_threshold` (in dB). Adaptive
+ // clipped level step estimation is used if `adaptive_step_estimation` is
+ // true.
+ explicit ClippingPeakPredictor(int num_channels,
+ int window_length,
+ int reference_window_length,
+ int reference_window_delay,
+ int clipping_threshold,
+ bool adaptive_step_estimation)
+ : window_length_(window_length),
+ reference_window_length_(reference_window_length),
+ reference_window_delay_(reference_window_delay),
+ clipping_threshold_(clipping_threshold),
+ adaptive_step_estimation_(adaptive_step_estimation) {
+ RTC_DCHECK_GT(num_channels, 0);
+ RTC_DCHECK_GT(window_length, 0);
+ RTC_DCHECK_GT(reference_window_length, 0);
+ RTC_DCHECK_GE(reference_window_delay, 0);
+ RTC_DCHECK_GT(reference_window_length + reference_window_delay,
+ window_length);
+ const int buffer_length = GetMinFramesProcessed();
+ RTC_DCHECK_GT(buffer_length, 0);
+ for (int i = 0; i < num_channels; ++i) {
+ ch_buffers_.push_back(
+ std::make_unique<ClippingPredictorLevelBuffer>(buffer_length));
+ }
+ }
+
+ ClippingPeakPredictor(const ClippingPeakPredictor&) = delete;
+ ClippingPeakPredictor& operator=(const ClippingPeakPredictor&) = delete;
+ ~ClippingPeakPredictor() {}
+
+ void Reset() {
+ const int num_channels = ch_buffers_.size();
+ for (int i = 0; i < num_channels; ++i) {
+ ch_buffers_[i]->Reset();
+ }
+ }
+
+ // Analyzes a frame of audio and stores the framewise metrics in
+ // `ch_buffers_`.
+ void Analyze(const AudioFrameView<const float>& frame) {
+ const int num_channels = frame.num_channels();
+ RTC_DCHECK_EQ(num_channels, ch_buffers_.size());
+ const int samples_per_channel = frame.samples_per_channel();
+ RTC_DCHECK_GT(samples_per_channel, 0);
+ for (int channel = 0; channel < num_channels; ++channel) {
+ float sum_squares = 0.0f;
+ float peak = 0.0f;
+ for (const auto& sample : frame.channel(channel)) {
+ sum_squares += sample * sample;
+ peak = std::max(std::fabs(sample), peak);
+ }
+ ch_buffers_[channel]->Push(
+ {sum_squares / static_cast<float>(samples_per_channel), peak});
+ }
+ }
+
+ // Estimates the analog gain adjustment for channel `channel` using a
+ // sliding window over the frame-wise metrics in `ch_buffers_`. Returns an
+ // estimate for the clipped level step (equal to
+ // `default_clipped_level_step_` if `adaptive_estimation_` is false) if at
+ // least `GetMinFramesProcessed()` frames have been processed since the last
+ // reset and a clipping event is predicted. `level`, `min_mic_level`, and
+ // `max_mic_level` are limited to [0, 255] and `default_step` to [1, 255].
+ absl::optional<int> EstimateClippedLevelStep(int channel,
+ int level,
+ int default_step,
+ int min_mic_level,
+ int max_mic_level) const {
+ RTC_DCHECK_GE(channel, 0);
+ RTC_DCHECK_LT(channel, ch_buffers_.size());
+ RTC_DCHECK_GE(level, 0);
+ RTC_DCHECK_LE(level, 255);
+ RTC_DCHECK_GT(default_step, 0);
+ RTC_DCHECK_LE(default_step, 255);
+ RTC_DCHECK_GE(min_mic_level, 0);
+ RTC_DCHECK_LE(min_mic_level, 255);
+ RTC_DCHECK_GE(max_mic_level, 0);
+ RTC_DCHECK_LE(max_mic_level, 255);
+ if (level <= min_mic_level) {
+ return absl::nullopt;
+ }
+ absl::optional<float> estimate_db = EstimatePeakValue(channel);
+ if (estimate_db.has_value() && estimate_db.value() > clipping_threshold_) {
+ int step = 0;
+ if (!adaptive_step_estimation_) {
+ step = default_step;
+ } else {
+ const int estimated_gain_change =
+ rtc::SafeClamp(-static_cast<int>(std::ceil(estimate_db.value())),
+ -kClippingPredictorMaxGainChange, 0);
+ step =
+ std::max(level - LevelFromGainError(estimated_gain_change, level,
+ min_mic_level, max_mic_level),
+ default_step);
+ }
+ const int new_level =
+ rtc::SafeClamp(level - step, min_mic_level, max_mic_level);
+ if (level > new_level) {
+ return level - new_level;
+ }
+ }
+ return absl::nullopt;
+ }
+
+ private:
+ int GetMinFramesProcessed() {
+ return reference_window_delay_ + reference_window_length_;
+ }
+
+ // Predicts clipping sample peaks based on the processed audio frames.
+ // Returns the estimated peak value if clipping is predicted. Otherwise
+ // returns absl::nullopt.
+ absl::optional<float> EstimatePeakValue(int channel) const {
+ const auto reference_metrics = ch_buffers_[channel]->ComputePartialMetrics(
+ reference_window_delay_, reference_window_length_);
+ if (!reference_metrics.has_value()) {
+ return absl::nullopt;
+ }
+ const auto metrics =
+ ch_buffers_[channel]->ComputePartialMetrics(0, window_length_);
+ if (!metrics.has_value() ||
+ !(FloatS16ToDbfs(metrics.value().max) > clipping_threshold_)) {
+ return absl::nullopt;
+ }
+ const float reference_crest_factor =
+ ComputeCrestFactor(reference_metrics.value());
+ const float& mean_squares = metrics.value().average;
+ const float projected_peak =
+ reference_crest_factor + FloatS16ToDbfs(std::sqrt(mean_squares));
+ return projected_peak;
+ }
+
+ std::vector<std::unique_ptr<ClippingPredictorLevelBuffer>> ch_buffers_;
+ const int window_length_;
+ const int reference_window_length_;
+ const int reference_window_delay_;
+ const int clipping_threshold_;
+ const bool adaptive_step_estimation_;
+};
+
+} // namespace
+
+std::unique_ptr<ClippingPredictor> CreateClippingPredictor(
+ int num_channels,
+ const AudioProcessing::Config::GainController1::AnalogGainController::
+ ClippingPredictor& config) {
+ if (!config.enabled) {
+ RTC_LOG(LS_INFO) << "[agc] Clipping prediction disabled.";
+ return nullptr;
+ }
+ RTC_LOG(LS_INFO) << "[agc] Clipping prediction enabled.";
+ using ClippingPredictorMode = AudioProcessing::Config::GainController1::
+ AnalogGainController::ClippingPredictor::Mode;
+ switch (config.mode) {
+ case ClippingPredictorMode::kClippingEventPrediction:
+ return std::make_unique<ClippingEventPredictor>(
+ num_channels, config.window_length, config.reference_window_length,
+ config.reference_window_delay, config.clipping_threshold,
+ config.crest_factor_margin);
+ case ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction:
+ return std::make_unique<ClippingPeakPredictor>(
+ num_channels, config.window_length, config.reference_window_length,
+ config.reference_window_delay, config.clipping_threshold,
+ /*adaptive_step_estimation=*/true);
+ case ClippingPredictorMode::kFixedStepClippingPeakPrediction:
+ return std::make_unique<ClippingPeakPredictor>(
+ num_channels, config.window_length, config.reference_window_length,
+ config.reference_window_delay, config.clipping_threshold,
+ /*adaptive_step_estimation=*/false);
+ }
+ RTC_DCHECK_NOTREACHED();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.h b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.h
new file mode 100644
index 0000000000..ee2b6ef1e7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+// Frame-wise clipping prediction and clipped level step estimation. Analyzes
+// 10 ms multi-channel frames and estimates an analog mic level decrease step
+// to possibly avoid clipping when predicted. `Analyze()` and
+// `EstimateClippedLevelStep()` can be called in any order.
+class ClippingPredictor {
+ public:
+ virtual ~ClippingPredictor() = default;
+
+ virtual void Reset() = 0;
+
+ // Analyzes a 10 ms multi-channel audio frame.
+ virtual void Analyze(const AudioFrameView<const float>& frame) = 0;
+
+ // Predicts if clipping is going to occur for the specified `channel` in the
+ // near-future and, if so, it returns a recommended analog mic level decrease
+ // step. Returns absl::nullopt if clipping is not predicted.
+ // `level` is the current analog mic level, `default_step` is the amount the
+ // mic level is lowered by the analog controller with every clipping event and
+ // `min_mic_level` and `max_mic_level` is the range of allowed analog mic
+ // levels.
+ virtual absl::optional<int> EstimateClippedLevelStep(
+ int channel,
+ int level,
+ int default_step,
+ int min_mic_level,
+ int max_mic_level) const = 0;
+
+};
+
+// Creates a ClippingPredictor based on the provided `config`. When enabled,
+// the following must hold for `config`:
+// `window_length < reference_window_length + reference_window_delay`.
+// Returns `nullptr` if `config.enabled` is false.
+std::unique_ptr<ClippingPredictor> CreateClippingPredictor(
+ int num_channels,
+ const AudioProcessing::Config::GainController1::AnalogGainController::
+ ClippingPredictor& config);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc
new file mode 100644
index 0000000000..ed7198d119
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/clipping_predictor_evaluator.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+// Returns the index of the oldest item in the ring buffer for a non-empty
+// ring buffer with give `size`, `tail` index and `capacity`.
+int OldestExpectedDetectionIndex(int size, int tail, int capacity) {
+ RTC_DCHECK_GT(size, 0);
+ return tail - size + (tail < size ? capacity : 0);
+}
+
+} // namespace
+
+ClippingPredictorEvaluator::ClippingPredictorEvaluator(int history_size)
+ : history_size_(history_size),
+ ring_buffer_capacity_(history_size + 1),
+ ring_buffer_(ring_buffer_capacity_) {
+ RTC_DCHECK_GT(history_size_, 0);
+ Reset();
+ counters_.true_positives = 0;
+ counters_.true_negatives = 0;
+ counters_.false_positives = 0;
+ counters_.false_negatives = 0;
+}
+
+ClippingPredictorEvaluator::~ClippingPredictorEvaluator() = default;
+
+absl::optional<int> ClippingPredictorEvaluator::Observe(
+ bool clipping_detected,
+ bool clipping_predicted) {
+ RTC_DCHECK_GE(ring_buffer_size_, 0);
+ RTC_DCHECK_LE(ring_buffer_size_, ring_buffer_capacity_);
+ RTC_DCHECK_GE(ring_buffer_tail_, 0);
+ RTC_DCHECK_LT(ring_buffer_tail_, ring_buffer_capacity_);
+
+ DecreaseTimesToLive();
+ // Clipping is expected if there are expected detections regardless of
+ // whether all the expected detections have been previously matched - i.e.,
+ // `ExpectedDetection::detected` is true.
+ const bool clipping_expected = ring_buffer_size_ > 0;
+
+ absl::optional<int> prediction_interval;
+ if (clipping_expected && clipping_detected) {
+ prediction_interval = FindEarliestPredictionInterval();
+ // Add a true positive for each unexpired expected detection.
+ const int num_modified_items = MarkExpectedDetectionAsDetected();
+ counters_.true_positives += num_modified_items;
+ RTC_DCHECK(prediction_interval.has_value() || num_modified_items == 0);
+ RTC_DCHECK(!prediction_interval.has_value() || num_modified_items > 0);
+ } else if (clipping_expected && !clipping_detected) {
+ // Add a false positive if there is one expected detection that has expired
+ // and that has never been matched before. Note that there is at most one
+ // unmatched expired detection.
+ if (HasExpiredUnmatchedExpectedDetection()) {
+ counters_.false_positives++;
+ }
+ } else if (!clipping_expected && clipping_detected) {
+ counters_.false_negatives++;
+ } else {
+ RTC_DCHECK(!clipping_expected && !clipping_detected);
+ counters_.true_negatives++;
+ }
+
+ if (clipping_predicted) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ Push(/*expected_detection=*/{/*ttl=*/history_size_, /*detected=*/false});
+ }
+
+ return prediction_interval;
+}
+
+void ClippingPredictorEvaluator::RemoveExpectations() {
+ // Empty the ring buffer of expected detections.
+ ring_buffer_tail_ = 0;
+ ring_buffer_size_ = 0;
+}
+
+void ClippingPredictorEvaluator::Reset() {
+ counters_.true_positives = 0;
+ counters_.true_negatives = 0;
+ counters_.false_positives = 0;
+ counters_.false_negatives = 0;
+ RemoveExpectations();
+}
+
+// Cost: O(1).
+void ClippingPredictorEvaluator::Push(ExpectedDetection value) {
+ ring_buffer_[ring_buffer_tail_] = value;
+ ring_buffer_tail_++;
+ if (ring_buffer_tail_ == ring_buffer_capacity_) {
+ ring_buffer_tail_ = 0;
+ }
+ ring_buffer_size_ = std::min(ring_buffer_capacity_, ring_buffer_size_ + 1);
+}
+
+// Cost: O(N).
+void ClippingPredictorEvaluator::DecreaseTimesToLive() {
+ bool expired_found = false;
+ for (int i = ring_buffer_tail_ - ring_buffer_size_; i < ring_buffer_tail_;
+ ++i) {
+ int index = i >= 0 ? i : ring_buffer_capacity_ + i;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_LT(index, ring_buffer_.size());
+ RTC_DCHECK_GE(ring_buffer_[index].ttl, 0);
+ if (ring_buffer_[index].ttl == 0) {
+ RTC_DCHECK(!expired_found)
+ << "There must be at most one expired item in the ring buffer.";
+ expired_found = true;
+ RTC_DCHECK_EQ(index, OldestExpectedDetectionIndex(ring_buffer_size_,
+ ring_buffer_tail_,
+ ring_buffer_capacity_))
+ << "The expired item must be the oldest in the ring buffer.";
+ }
+ ring_buffer_[index].ttl--;
+ }
+ if (expired_found) {
+ ring_buffer_size_--;
+ }
+}
+
+// Cost: O(N).
+absl::optional<int> ClippingPredictorEvaluator::FindEarliestPredictionInterval()
+ const {
+ absl::optional<int> prediction_interval;
+ for (int i = ring_buffer_tail_ - ring_buffer_size_; i < ring_buffer_tail_;
+ ++i) {
+ int index = i >= 0 ? i : ring_buffer_capacity_ + i;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_LT(index, ring_buffer_.size());
+ if (!ring_buffer_[index].detected) {
+ prediction_interval = std::max(prediction_interval.value_or(0),
+ history_size_ - ring_buffer_[index].ttl);
+ }
+ }
+ return prediction_interval;
+}
+
+// Cost: O(N).
+int ClippingPredictorEvaluator::MarkExpectedDetectionAsDetected() {
+ int num_modified_items = 0;
+ for (int i = ring_buffer_tail_ - ring_buffer_size_; i < ring_buffer_tail_;
+ ++i) {
+ int index = i >= 0 ? i : ring_buffer_capacity_ + i;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_LT(index, ring_buffer_.size());
+ if (!ring_buffer_[index].detected) {
+ num_modified_items++;
+ }
+ ring_buffer_[index].detected = true;
+ }
+ return num_modified_items;
+}
+
+// Cost: O(1).
+bool ClippingPredictorEvaluator::HasExpiredUnmatchedExpectedDetection() const {
+ if (ring_buffer_size_ == 0) {
+ return false;
+ }
+ // If an expired item, that is `ttl` equal to 0, exists, it must be the
+ // oldest.
+ const int oldest_index = OldestExpectedDetectionIndex(
+ ring_buffer_size_, ring_buffer_tail_, ring_buffer_capacity_);
+ RTC_DCHECK_GE(oldest_index, 0);
+ RTC_DCHECK_LT(oldest_index, ring_buffer_.size());
+ return ring_buffer_[oldest_index].ttl == 0 &&
+ !ring_buffer_[oldest_index].detected;
+}
+
+absl::optional<ClippingPredictionMetrics> ComputeClippingPredictionMetrics(
+ const ClippingPredictionCounters& counters) {
+ RTC_DCHECK_GE(counters.true_positives, 0);
+ RTC_DCHECK_GE(counters.true_negatives, 0);
+ RTC_DCHECK_GE(counters.false_positives, 0);
+ RTC_DCHECK_GE(counters.false_negatives, 0);
+ if (counters.true_positives == 0) {
+ // Both precision and recall are zero in this case and hence the F1 score
+ // is undefined.
+ return absl::nullopt;
+ }
+ int precision_denominator =
+ counters.true_positives + counters.false_positives;
+ int recall_denominator = counters.true_positives + counters.false_negatives;
+ if (precision_denominator == 0 || recall_denominator == 0) {
+ // Both precision and recall must be defined.
+ return absl::nullopt;
+ }
+ ClippingPredictionMetrics metrics;
+ float true_positives = counters.true_positives;
+ metrics.precision = true_positives / precision_denominator;
+ metrics.recall = true_positives / recall_denominator;
+ float f1_score_denominator = metrics.precision + metrics.recall;
+ RTC_DCHECK_GT(f1_score_denominator, 0.0f);
+ metrics.f1_score =
+ 2 * metrics.precision * metrics.recall / f1_score_denominator;
+ return metrics;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.h b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.h
new file mode 100644
index 0000000000..348f753493
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// Clipping prediction counters.
+struct ClippingPredictionCounters {
+ int true_positives; // TP.
+ int true_negatives; // TN.
+ int false_positives; // FP.
+ int false_negatives; // FN.
+};
+
+// Counts true/false positives/negatives while observing sequences of flag pairs
+// that indicate whether clipping has been detected and/or if clipping is
+// predicted. When a true positive is found measures the time interval between
+// prediction and detection events.
+// After a prediction is observed and for a period equal to
+// `history_size` calls to `Observe()`, one or more detections are expected. If
+// the expectation is met, a true positive is added and the time interval
+// between the earliest prediction and the detection is recorded; otherwise,
+// when the deadline is reached, a false positive is added. Note that one
+// detection matches all the expected detections that have not expired - i.e.,
+// one detection counts as multiple true positives.
+// If a detection is observed, but no prediction has been observed over the past
+// `history_size` calls to `Observe()`, then a false negative is added;
+// otherwise, a true negative is added.
+class ClippingPredictorEvaluator {
+ public:
+ // Ctor. `history_size` indicates how long to wait for a call to `Observe()`
+ // having `clipping_detected` set to true from the time clipping is predicted.
+ explicit ClippingPredictorEvaluator(int history_size);
+ ClippingPredictorEvaluator(const ClippingPredictorEvaluator&) = delete;
+ ClippingPredictorEvaluator& operator=(const ClippingPredictorEvaluator&) =
+ delete;
+ ~ClippingPredictorEvaluator();
+
+ // Observes whether clipping has been detected and/or if clipping is
+ // predicted. When predicted one or more detections are expected in the next
+ // `history_size_` calls of `Observe()`. When true positives are found returns
+ // the prediction interval between the earliest prediction and the detection.
+ absl::optional<int> Observe(bool clipping_detected, bool clipping_predicted);
+
+ // Removes any expectation recently set after a call to `Observe()` having
+ // `clipping_predicted` set to true. Counters won't be reset.
+ void RemoveExpectations();
+
+ // Resets counters and removes any expectation (see `RemoveExpectations()`).
+ void Reset();
+
+ ClippingPredictionCounters counters() const { return counters_; }
+
+ private:
+ const int history_size_;
+
+ // State of a detection expected to be observed after a prediction.
+ struct ExpectedDetection {
+ // Time to live (TTL); remaining number of `Observe()` calls to match a call
+ // having `clipping_detected` set to true.
+ int ttl;
+ // True if an `Observe()` call having `clipping_detected` set to true has
+ // been observed.
+ bool detected;
+ };
+ // Ring buffer of expected detections.
+ const int ring_buffer_capacity_;
+ std::vector<ExpectedDetection> ring_buffer_;
+ int ring_buffer_tail_;
+ int ring_buffer_size_;
+
+ // Pushes `expected_detection` into `expected_matches_ring_buffer_`.
+ void Push(ExpectedDetection expected_detection);
+ // Decreased the TTLs in `expected_matches_ring_buffer_` and removes expired
+ // items.
+ void DecreaseTimesToLive();
+ // Returns the prediction interval for the earliest unexpired expected
+ // detection if any.
+ absl::optional<int> FindEarliestPredictionInterval() const;
+ // Marks all the items in `expected_matches_ring_buffer_` as `detected` and
+ // returns the number of updated items.
+ int MarkExpectedDetectionAsDetected();
+ // Returns true if `expected_matches_ring_buffer_` has an item having `ttl`
+ // equal to 0 (expired) and `detected` equal to false (unmatched).
+ bool HasExpiredUnmatchedExpectedDetection() const;
+
+ // Counters.
+ ClippingPredictionCounters counters_;
+};
+
+// Clipping prediction metrics derived from the clipping prediction counters.
+struct ClippingPredictionMetrics {
+ // Precision (P) is defined as TP / (TP + FP).
+ float precision;
+ // Recall (R) is defined as TP / (TP + FN).
+ float recall;
+ // The F1 score is defined as 2 * P * R / (P + R).
+ float f1_score;
+};
+
+// Derives clipping prediction metrics from the true/false positives/negatives
+// `counters`. Returns an unspecified value if one or more metrics are not
+// defined.
+absl::optional<ClippingPredictionMetrics> ComputeClippingPredictionMetrics(
+ const ClippingPredictionCounters& counters);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_gn/moz.build
new file mode 100644
index 0000000000..f5acc34bc3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("clipping_predictor_evaluator_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc
new file mode 100644
index 0000000000..b2d2797ca5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc
@@ -0,0 +1,763 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/clipping_predictor_evaluator.h"
+
+#include <cstdint>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/random.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using testing::Eq;
+using testing::Field;
+using testing::Optional;
+
+constexpr bool kDetected = true;
+constexpr bool kNotDetected = false;
+
+constexpr bool kPredicted = true;
+constexpr bool kNotPredicted = false;
+
+ClippingPredictionCounters operator-(const ClippingPredictionCounters& lhs,
+ const ClippingPredictionCounters& rhs) {
+ return {
+ lhs.true_positives - rhs.true_positives,
+ lhs.true_negatives - rhs.true_negatives,
+ lhs.false_positives - rhs.false_positives,
+ lhs.false_negatives - rhs.false_negatives,
+ };
+}
+
+// Checks the metrics after init - i.e., no call to `Observe()`.
+TEST(ClippingPredictionEvalTest, Init) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().true_negatives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+class ClippingPredictorEvaluatorParameterization
+ : public ::testing::TestWithParam<std::tuple<int, int>> {
+ protected:
+ uint64_t seed() const {
+ return rtc::checked_cast<uint64_t>(std::get<0>(GetParam()));
+ }
+ int history_size() const { return std::get<1>(GetParam()); }
+};
+
+// Checks that after each call to `Observe()` at most one metric changes.
+TEST_P(ClippingPredictorEvaluatorParameterization, AtMostOneMetricChanges) {
+ constexpr int kNumCalls = 123;
+ Random random_generator(seed());
+ ClippingPredictorEvaluator evaluator(history_size());
+
+ for (int i = 0; i < kNumCalls; ++i) {
+ SCOPED_TRACE(i);
+ // Read metrics before `Observe()` is called.
+ const auto pre = evaluator.counters();
+ // `Observe()` a random observation.
+ bool clipping_detected = random_generator.Rand<bool>();
+ bool clipping_predicted = random_generator.Rand<bool>();
+ evaluator.Observe(clipping_detected, clipping_predicted);
+
+ // Check that at most one metric has changed.
+ const auto post = evaluator.counters();
+ int num_changes = 0;
+ num_changes += pre.true_positives == post.true_positives ? 0 : 1;
+ num_changes += pre.true_negatives == post.true_negatives ? 0 : 1;
+ num_changes += pre.false_positives == post.false_positives ? 0 : 1;
+ num_changes += pre.false_negatives == post.false_negatives ? 0 : 1;
+ EXPECT_GE(num_changes, 0);
+ EXPECT_LE(num_changes, 1);
+ }
+}
+
+// Checks that after each call to `Observe()` each metric either remains
+// unchanged or grows.
+TEST_P(ClippingPredictorEvaluatorParameterization, MetricsAreWeaklyMonotonic) {
+ constexpr int kNumCalls = 123;
+ Random random_generator(seed());
+ ClippingPredictorEvaluator evaluator(history_size());
+
+ for (int i = 0; i < kNumCalls; ++i) {
+ SCOPED_TRACE(i);
+ // Read metrics before `Observe()` is called.
+ const auto pre = evaluator.counters();
+ // `Observe()` a random observation.
+ bool clipping_detected = random_generator.Rand<bool>();
+ bool clipping_predicted = random_generator.Rand<bool>();
+ evaluator.Observe(clipping_detected, clipping_predicted);
+
+ // Check that metrics are weakly monotonic.
+ const auto post = evaluator.counters();
+ EXPECT_GE(post.true_positives, pre.true_positives);
+ EXPECT_GE(post.true_negatives, pre.true_negatives);
+ EXPECT_GE(post.false_positives, pre.false_positives);
+ EXPECT_GE(post.false_negatives, pre.false_negatives);
+ }
+}
+
+// Checks that after each call to `Observe()` the growth speed of each metrics
+// is bounded.
+TEST_P(ClippingPredictorEvaluatorParameterization, BoundedMetricsGrowth) {
+ constexpr int kNumCalls = 123;
+ Random random_generator(seed());
+ ClippingPredictorEvaluator evaluator(history_size());
+
+ for (int i = 0; i < kNumCalls; ++i) {
+ SCOPED_TRACE(i);
+ // Read metrics before `Observe()` is called.
+ const auto pre = evaluator.counters();
+ // `Observe()` a random observation.
+ bool clipping_detected = random_generator.Rand<bool>();
+ bool clipping_predicted = random_generator.Rand<bool>();
+ evaluator.Observe(clipping_detected, clipping_predicted);
+
+ const auto diff = evaluator.counters() - pre;
+ // Check that TPs grow by at most `history_size() + 1`. Such an upper bound
+ // is reached when multiple predictions are matched by a single detection.
+ EXPECT_LE(diff.true_positives, history_size() + 1);
+ // Check that TNs, FPs and FNs grow by at most one.
+ EXPECT_LE(diff.true_negatives, 1);
+ EXPECT_LE(diff.false_positives, 1);
+ EXPECT_LE(diff.false_negatives, 1);
+ }
+}
+
+// Checks that `Observe()` returns a prediction interval if and only if one or
+// more true positives are found.
+TEST_P(ClippingPredictorEvaluatorParameterization,
+ PredictionIntervalIfAndOnlyIfTruePositives) {
+ constexpr int kNumCalls = 123;
+ Random random_generator(seed());
+ ClippingPredictorEvaluator evaluator(history_size());
+
+ for (int i = 0; i < kNumCalls; ++i) {
+ SCOPED_TRACE(i);
+ // Read true positives before `Observe()` is called.
+ const int last_tp = evaluator.counters().true_positives;
+ // `Observe()` a random observation.
+ bool clipping_detected = random_generator.Rand<bool>();
+ bool clipping_predicted = random_generator.Rand<bool>();
+ absl::optional<int> prediction_interval =
+ evaluator.Observe(clipping_detected, clipping_predicted);
+
+ // Check that the prediction interval is returned when a true positive is
+ // found.
+ if (evaluator.counters().true_positives == last_tp) {
+ EXPECT_FALSE(prediction_interval.has_value());
+ } else {
+ EXPECT_TRUE(prediction_interval.has_value());
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ ClippingPredictionEvalTest,
+ ClippingPredictorEvaluatorParameterization,
+ ::testing::Combine(::testing::Values(4, 8, 15, 16, 23, 42),
+ ::testing::Values(1, 10, 21)));
+
+// Checks that after initialization, when no detection is expected,
+// observing no detection and no prediction produces a true negative.
+TEST(ClippingPredictionEvalTest, TrueNegativeWithNoDetectNoPredictAfterInit) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().true_negatives, 1);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+// Checks that after initialization, when no detection is expected,
+// observing no detection and prediction produces a true negative.
+TEST(ClippingPredictionEvalTest, TrueNegativeWithNoDetectPredictAfterInit) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ evaluator.Observe(kNotDetected, kPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().true_negatives, 1);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+// Checks that after initialization, when no detection is expected,
+// observing a detection and no prediction produces a false negative.
+TEST(ClippingPredictionEvalTest, FalseNegativeWithDetectNoPredictAfterInit) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().true_negatives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 1);
+}
+
+// Checks that after initialization, when no detection is expected,
+// simultaneously observing a detection and a prediction produces a false
+// negative.
+TEST(ClippingPredictionEvalTest, FalseNegativeWithDetectPredictAfterInit) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ evaluator.Observe(kDetected, kPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().true_negatives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 1);
+}
+
+// Checks that, after removing existing expectations, observing no detection and
+// no prediction produces a true negative.
+TEST(ClippingPredictionEvalTest,
+ TrueNegativeWithNoDetectNoPredictAfterRemoveExpectations) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ // Set an expectation, then remove it.
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.RemoveExpectations();
+ const auto pre = evaluator.counters();
+
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ const auto diff = evaluator.counters() - pre;
+ EXPECT_EQ(diff.true_positives, 0);
+ EXPECT_EQ(diff.true_negatives, 1);
+ EXPECT_EQ(diff.false_positives, 0);
+ EXPECT_EQ(diff.false_negatives, 0);
+}
+
+// Checks that, after removing existing expectations, observing no detection and
+// a prediction produces a true negative.
+TEST(ClippingPredictionEvalTest,
+ TrueNegativeWithNoDetectPredictAfterRemoveExpectations) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ // Set an expectation, then remove it.
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.RemoveExpectations();
+ const auto pre = evaluator.counters();
+
+ evaluator.Observe(kNotDetected, kPredicted);
+ const auto diff = evaluator.counters() - pre;
+ EXPECT_EQ(diff.true_positives, 0);
+ EXPECT_EQ(diff.true_negatives, 1);
+ EXPECT_EQ(diff.false_positives, 0);
+ EXPECT_EQ(diff.false_negatives, 0);
+}
+
+// Checks that, after removing existing expectations, observing a detection and
+// no prediction produces a false negative.
+TEST(ClippingPredictionEvalTest,
+ FalseNegativeWithDetectNoPredictAfterRemoveExpectations) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ // Set an expectation, then remove it.
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.RemoveExpectations();
+ const auto pre = evaluator.counters();
+
+ evaluator.Observe(kDetected, kNotPredicted);
+ const auto diff = evaluator.counters() - pre;
+ EXPECT_EQ(diff.true_positives, 0);
+ EXPECT_EQ(diff.true_negatives, 0);
+ EXPECT_EQ(diff.false_positives, 0);
+ EXPECT_EQ(diff.false_negatives, 1);
+}
+
+// Checks that, after removing existing expectations, simultaneously observing a
+// detection and a prediction produces a false negative.
+TEST(ClippingPredictionEvalTest,
+ FalseNegativeWithDetectPredictAfterRemoveExpectations) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+
+ // Set an expectation, then remove it.
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.RemoveExpectations();
+ const auto pre = evaluator.counters();
+
+ evaluator.Observe(kDetected, kPredicted);
+ const auto diff = evaluator.counters() - pre;
+ EXPECT_EQ(diff.false_negatives, 1);
+ EXPECT_EQ(diff.true_positives, 0);
+ EXPECT_EQ(diff.true_negatives, 0);
+ EXPECT_EQ(diff.false_positives, 0);
+}
+
+// Checks that the evaluator detects true negatives when clipping is neither
+// predicted nor detected.
+TEST(ClippingPredictionEvalTest, TrueNegativesWhenNeverDetectedOrPredicted) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_negatives, 4);
+}
+
+// Checks that, until the observation period expires, the evaluator does not
+// count a false positive when clipping is predicted and not detected.
+TEST(ClippingPredictionEvalTest, PredictedOnceAndNeverDetectedBeforeDeadline) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 1);
+}
+
+// Checks that, after the observation period expires, the evaluator detects a
+// false positive when clipping is predicted and detected.
+TEST(ClippingPredictionEvalTest, PredictedOnceButDetectedAfterDeadline) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 1);
+}
+
+// Checks that a prediction followed by a detection counts as true positive.
+TEST(ClippingPredictionEvalTest, PredictedOnceAndThenImmediatelyDetected) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 1);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+}
+
+// Checks that a prediction followed by a delayed detection counts as true
+// positive if the delay is within the observation period.
+TEST(ClippingPredictionEvalTest, PredictedOnceAndDetectedBeforeDeadline) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 1);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+}
+
+// Checks that a prediction followed by a delayed detection counts as true
+// positive if the delay equals the observation period.
+TEST(ClippingPredictionEvalTest, PredictedOnceAndDetectedAtDeadline) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 1);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+}
+
+// Checks that a prediction followed by a multiple adjacent detections within
+// the deadline counts as a single true positive and that, after the deadline,
+// a detection counts as a false negative.
+TEST(ClippingPredictionEvalTest, PredictedOnceAndDetectedMultipleTimes) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ // Multiple detections.
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 1);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 1);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ // A detection outside of the observation period counts as false negative.
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_positives, 1);
+ EXPECT_EQ(evaluator.counters().false_negatives, 1);
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+}
+
+// Checks that when clipping is predicted multiple times, a prediction that is
+// observed too early counts as a false positive, whereas the other predictions
+// that are matched to a detection count as true positives.
+TEST(ClippingPredictionEvalTest,
+ PredictedMultipleTimesAndDetectedOnceAfterDeadline) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted); // ---+
+ evaluator.Observe(kNotDetected, kPredicted); // |
+ evaluator.Observe(kNotDetected, kPredicted); // |
+ evaluator.Observe(kNotDetected, kPredicted); // <--+ Not matched.
+ // The time to match a detection after the first prediction expired.
+ EXPECT_EQ(evaluator.counters().false_positives, 1);
+ evaluator.Observe(kDetected, kNotPredicted);
+ // The detection above does not match the first prediction because it happened
+ // after the deadline of the 1st prediction.
+ EXPECT_EQ(evaluator.counters().false_positives, 1);
+ // However, the detection matches all the other predictions.
+ EXPECT_EQ(evaluator.counters().true_positives, 3);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+// Checks that multiple consecutive predictions match the first detection
+// observed before the expected detection deadline expires.
+TEST(ClippingPredictionEvalTest, PredictedMultipleTimesAndDetectedOnce) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted); // --+
+ evaluator.Observe(kNotDetected, kPredicted); // | --+
+ evaluator.Observe(kNotDetected, kPredicted); // | | --+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+
+ EXPECT_EQ(evaluator.counters().true_positives, 3);
+ // The following observations do not generate any true negatives as they
+ // belong to the observation period of the last prediction - for which a
+ // detection has already been matched.
+ const int true_negatives = evaluator.counters().true_negatives;
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_negatives, true_negatives);
+
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+// Checks that multiple consecutive predictions match the multiple detections
+// observed before the expected detection deadline expires.
+TEST(ClippingPredictionEvalTest,
+ PredictedMultipleTimesAndDetectedMultipleTimes) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted); // --+
+ evaluator.Observe(kNotDetected, kPredicted); // | --+
+ evaluator.Observe(kNotDetected, kPredicted); // | | --+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+
+ EXPECT_EQ(evaluator.counters().true_positives, 3);
+ // The following observation does not generate a true negative as it belongs
+ // to the observation period of the last prediction - for which two detections
+ // have already been matched.
+ const int true_negatives = evaluator.counters().true_negatives;
+ evaluator.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(evaluator.counters().true_negatives, true_negatives);
+
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+// Checks that multiple consecutive predictions match all the detections
+// observed before the expected detection deadline expires.
+TEST(ClippingPredictionEvalTest, PredictedMultipleTimesAndAllDetected) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted); // --+
+ evaluator.Observe(kNotDetected, kPredicted); // | --+
+ evaluator.Observe(kNotDetected, kPredicted); // | | --+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+
+ EXPECT_EQ(evaluator.counters().true_positives, 3);
+
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+// Checks that multiple non-consecutive predictions match all the detections
+// observed before the expected detection deadline expires.
+TEST(ClippingPredictionEvalTest, PredictedMultipleTimesWithGapAndAllDetected) {
+ ClippingPredictorEvaluator evaluator(/*history_size=*/3);
+ evaluator.Observe(kNotDetected, kPredicted); // --+
+ evaluator.Observe(kNotDetected, kNotPredicted); // |
+ evaluator.Observe(kNotDetected, kPredicted); // | --+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+
+ evaluator.Observe(kDetected, kNotPredicted); // <-+
+ EXPECT_EQ(evaluator.counters().true_positives, 2);
+
+ EXPECT_EQ(evaluator.counters().false_positives, 0);
+ EXPECT_EQ(evaluator.counters().false_negatives, 0);
+}
+
+class ClippingPredictorEvaluatorPredictionIntervalParameterization
+ : public ::testing::TestWithParam<std::tuple<int, int>> {
+ protected:
+ int num_extra_observe_calls() const { return std::get<0>(GetParam()); }
+ int history_size() const { return std::get<1>(GetParam()); }
+};
+
+// Checks that the minimum prediction interval is returned if clipping is
+// correctly predicted just before clipping is detected - i.e., smallest
+// anticipation.
+TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization,
+ MinimumPredictionInterval) {
+ ClippingPredictorEvaluator evaluator(history_size());
+ for (int i = 0; i < num_extra_observe_calls(); ++i) {
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt);
+ }
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt);
+ EXPECT_THAT(evaluator.Observe(kDetected, kNotPredicted), Optional(Eq(1)));
+}
+
+// Checks that a prediction interval between the minimum and the maximum is
+// returned if clipping is correctly predicted before it is detected but not as
+// early as possible.
+TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization,
+ IntermediatePredictionInterval) {
+ ClippingPredictorEvaluator evaluator(history_size());
+ for (int i = 0; i < num_extra_observe_calls(); ++i) {
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt);
+ }
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt);
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt);
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt);
+ EXPECT_THAT(evaluator.Observe(kDetected, kNotPredicted), Optional(Eq(3)));
+}
+
+// Checks that the maximum prediction interval is returned if clipping is
+// correctly predicted as early as possible.
+TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization,
+ MaximumPredictionInterval) {
+ ClippingPredictorEvaluator evaluator(history_size());
+ for (int i = 0; i < num_extra_observe_calls(); ++i) {
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt);
+ }
+ for (int i = 0; i < history_size(); ++i) {
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt);
+ }
+ EXPECT_THAT(evaluator.Observe(kDetected, kNotPredicted),
+ Optional(Eq(history_size())));
+}
+
+// Checks that `Observe()` returns the prediction interval as soon as a true
+// positive is found and never again while ongoing detections are matched to a
+// previously observed prediction.
+TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization,
+ PredictionIntervalReturnedOnce) {
+ ASSERT_LT(num_extra_observe_calls(), history_size());
+ ClippingPredictorEvaluator evaluator(history_size());
+ // Observe predictions before detection.
+ for (int i = 0; i < num_extra_observe_calls(); ++i) {
+ EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt);
+ }
+ // Observe a detection.
+ absl::optional<int> prediction_interval =
+ evaluator.Observe(kDetected, kNotPredicted);
+ EXPECT_TRUE(prediction_interval.has_value());
+ // `Observe()` does not return a prediction interval anymore during ongoing
+ // detections observed while a detection is still expected.
+ for (int i = 0; i < history_size(); ++i) {
+ EXPECT_EQ(evaluator.Observe(kDetected, kNotPredicted), absl::nullopt);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ ClippingPredictionEvalTest,
+ ClippingPredictorEvaluatorPredictionIntervalParameterization,
+ ::testing::Combine(::testing::Values(1, 3, 5), ::testing::Values(7, 11)));
+
+// Checks that, when a detection is expected, the expectation is not removed
+// before the detection deadline expires unless `RemoveExpectations()` is
+// called.
+TEST(ClippingPredictionEvalTest, NoFalsePositivesAfterRemoveExpectations) {
+ constexpr int kHistorySize = 2;
+
+ // Case 1: `RemoveExpectations()` is NOT called.
+ ClippingPredictorEvaluator e1(kHistorySize);
+ e1.Observe(kNotDetected, kPredicted);
+ ASSERT_EQ(e1.counters().true_negatives, 1);
+ e1.Observe(kNotDetected, kNotPredicted);
+ e1.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(e1.counters().true_positives, 0);
+ EXPECT_EQ(e1.counters().true_negatives, 1);
+ EXPECT_EQ(e1.counters().false_positives, 1);
+ EXPECT_EQ(e1.counters().false_negatives, 0);
+
+ // Case 2: `RemoveExpectations()` is called.
+ ClippingPredictorEvaluator e2(kHistorySize);
+ e2.Observe(kNotDetected, kPredicted);
+ ASSERT_EQ(e2.counters().true_negatives, 1);
+ e2.RemoveExpectations();
+ e2.Observe(kNotDetected, kNotPredicted);
+ e2.Observe(kNotDetected, kNotPredicted);
+ EXPECT_EQ(e2.counters().true_positives, 0);
+ EXPECT_EQ(e2.counters().true_negatives, 3);
+ EXPECT_EQ(e2.counters().false_positives, 0);
+ EXPECT_EQ(e2.counters().false_negatives, 0);
+}
+
+class ComputeClippingPredictionMetricsParameterization
+ : public ::testing::TestWithParam<int> {
+ protected:
+ int true_negatives() const { return GetParam(); }
+};
+
+// Checks that `ComputeClippingPredictionMetrics()` does not return metrics if
+// precision cannot be defined - i.e., TP + FP is zero.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ NoMetricsWithUndefinedPrecision) {
+ EXPECT_EQ(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/0,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/0,
+ /*false_negatives=*/0}),
+ absl::nullopt);
+ EXPECT_EQ(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/0,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/0,
+ /*false_negatives=*/1}),
+ absl::nullopt);
+}
+
+// Checks that `ComputeClippingPredictionMetrics()` does not return metrics if
+// recall cannot be defined - i.e., TP + FN is zero.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ NoMetricsWithUndefinedRecall) {
+ EXPECT_EQ(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/0,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/0,
+ /*false_negatives=*/0}),
+ absl::nullopt);
+ EXPECT_EQ(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/0,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/1,
+ /*false_negatives=*/0}),
+ absl::nullopt);
+}
+
+// Checks that `ComputeClippingPredictionMetrics()` does not return metrics if
+// the F1 score cannot be defined - i.e., P + R is zero.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ NoMetricsWithUndefinedF1Score) {
+ EXPECT_EQ(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/0,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/1,
+ /*false_negatives=*/1}),
+ absl::nullopt);
+}
+
+// Checks that the highest precision is reached when there are no false
+// positives.
+TEST_P(ComputeClippingPredictionMetricsParameterization, HighestPrecision) {
+ EXPECT_THAT(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/1,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/0,
+ /*false_negatives=*/1}),
+ Optional(Field(&ClippingPredictionMetrics::precision, Eq(1.0f))));
+}
+
+// Checks that the highest recall is reached when there are no false
+// negatives.
+TEST_P(ComputeClippingPredictionMetricsParameterization, HighestRecall) {
+ EXPECT_THAT(ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/1,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/1,
+ /*false_negatives=*/0}),
+ Optional(Field(&ClippingPredictionMetrics::recall, Eq(1.0f))));
+}
+
+// Checks that 50% precision and 50% recall is reached when the number of true
+// positives, false positives and false negatives are the same.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ PrecisionAndRecall50Percent) {
+ absl::optional<ClippingPredictionMetrics> metrics =
+ ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/42,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/42,
+ /*false_negatives=*/42});
+ ASSERT_TRUE(metrics.has_value());
+ EXPECT_EQ(metrics->precision, 0.5f);
+ EXPECT_EQ(metrics->recall, 0.5f);
+ EXPECT_EQ(metrics->f1_score, 0.5f);
+}
+
+// Checks that the highest precision, recall and F1 score are jointly reached
+// when there are no false positives and no false negatives.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ HighestPrecisionRecallF1Score) {
+ absl::optional<ClippingPredictionMetrics> metrics =
+ ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/123,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/0,
+ /*false_negatives=*/0});
+ ASSERT_TRUE(metrics.has_value());
+ EXPECT_EQ(metrics->precision, 1.0f);
+ EXPECT_EQ(metrics->recall, 1.0f);
+ EXPECT_EQ(metrics->f1_score, 1.0f);
+}
+
+// Checks that precision is lower than recall when there are more false
+// positives than false negatives.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ PrecisionLowerThanRecall) {
+ absl::optional<ClippingPredictionMetrics> metrics =
+ ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/1,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/8,
+ /*false_negatives=*/1});
+ ASSERT_TRUE(metrics.has_value());
+ EXPECT_LT(metrics->precision, metrics->recall);
+}
+
+// Checks that precision is greater than recall when there are less false
+// positives than false negatives.
+TEST_P(ComputeClippingPredictionMetricsParameterization,
+ PrecisionGreaterThanRecall) {
+ absl::optional<ClippingPredictionMetrics> metrics =
+ ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/1,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/1,
+ /*false_negatives=*/8});
+ ASSERT_TRUE(metrics.has_value());
+ EXPECT_GT(metrics->precision, metrics->recall);
+}
+
+// Checks that swapping precision and recall does not change the F1 score.
+TEST_P(ComputeClippingPredictionMetricsParameterization, SameF1Score) {
+ absl::optional<ClippingPredictionMetrics> m1 =
+ ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/1,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/8,
+ /*false_negatives=*/1});
+ absl::optional<ClippingPredictionMetrics> m2 =
+ ComputeClippingPredictionMetrics(
+ /*counters=*/{/*true_positives=*/1,
+ /*true_negatives=*/true_negatives(),
+ /*false_positives=*/1,
+ /*false_negatives=*/8});
+ // Preconditions.
+ ASSERT_TRUE(m1.has_value());
+ ASSERT_TRUE(m2.has_value());
+ ASSERT_EQ(m1->precision, m2->recall);
+ ASSERT_EQ(m1->recall, m2->precision);
+ // Same F1 score.
+ EXPECT_EQ(m1->f1_score, m2->f1_score);
+}
+
+INSTANTIATE_TEST_SUITE_P(ClippingPredictionEvalTest,
+ ComputeClippingPredictionMetricsParameterization,
+ ::testing::Values(0, 1, 11));
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_gn/moz.build
new file mode 100644
index 0000000000..fbbf08b2b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("clipping_predictor_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc
new file mode 100644
index 0000000000..bc33cda040
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/clipping_predictor_level_buffer.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+bool ClippingPredictorLevelBuffer::Level::operator==(const Level& level) const {
+ constexpr float kEpsilon = 1e-6f;
+ return std::fabs(average - level.average) < kEpsilon &&
+ std::fabs(max - level.max) < kEpsilon;
+}
+
+ClippingPredictorLevelBuffer::ClippingPredictorLevelBuffer(int capacity)
+ : tail_(-1), size_(0), data_(std::max(1, capacity)) {
+ if (capacity > kMaxCapacity) {
+ RTC_LOG(LS_WARNING) << "[agc]: ClippingPredictorLevelBuffer exceeds the "
+ << "maximum allowed capacity. Capacity: " << capacity;
+ }
+ RTC_DCHECK(!data_.empty());
+}
+
+void ClippingPredictorLevelBuffer::Reset() {
+ tail_ = -1;
+ size_ = 0;
+}
+
+void ClippingPredictorLevelBuffer::Push(Level level) {
+ ++tail_;
+ if (tail_ == Capacity()) {
+ tail_ = 0;
+ }
+ if (size_ < Capacity()) {
+ size_++;
+ }
+ data_[tail_] = level;
+}
+
+// TODO(bugs.webrtc.org/12774): Optimize partial computation for long buffers.
+absl::optional<ClippingPredictorLevelBuffer::Level>
+ClippingPredictorLevelBuffer::ComputePartialMetrics(int delay,
+ int num_items) const {
+ RTC_DCHECK_GE(delay, 0);
+ RTC_DCHECK_LT(delay, Capacity());
+ RTC_DCHECK_GT(num_items, 0);
+ RTC_DCHECK_LE(num_items, Capacity());
+ RTC_DCHECK_LE(delay + num_items, Capacity());
+ if (delay + num_items > Size()) {
+ return absl::nullopt;
+ }
+ float sum = 0.0f;
+ float max = 0.0f;
+ for (int i = 0; i < num_items && i < Size(); ++i) {
+ int idx = tail_ - delay - i;
+ if (idx < 0) {
+ idx += Capacity();
+ }
+ sum += data_[idx].average;
+ max = std::fmax(data_[idx].max, max);
+ }
+ return absl::optional<Level>({sum / static_cast<float>(num_items), max});
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.h b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.h
new file mode 100644
index 0000000000..f3e8368194
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_LEVEL_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_LEVEL_BUFFER_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// A circular buffer to store frame-wise `Level` items for clipping prediction.
+// The current implementation is not optimized for large buffer lengths.
+class ClippingPredictorLevelBuffer {
+ public:
+ struct Level {
+ float average;
+ float max;
+ bool operator==(const Level& level) const;
+ };
+
+ // Recommended maximum capacity. It is possible to create a buffer with a
+ // larger capacity, but the implementation is not optimized for large values.
+ static constexpr int kMaxCapacity = 100;
+
+ // Ctor. Sets the buffer capacity to max(1, `capacity`) and logs a warning
+ // message if the capacity is greater than `kMaxCapacity`.
+ explicit ClippingPredictorLevelBuffer(int capacity);
+ ~ClippingPredictorLevelBuffer() {}
+ ClippingPredictorLevelBuffer(const ClippingPredictorLevelBuffer&) = delete;
+ ClippingPredictorLevelBuffer& operator=(const ClippingPredictorLevelBuffer&) =
+ delete;
+
+ void Reset();
+
+ // Returns the current number of items stored in the buffer.
+ int Size() const { return size_; }
+
+ // Returns the capacity of the buffer.
+ int Capacity() const { return data_.size(); }
+
+ // Adds a `level` item into the circular buffer `data_`. Stores at most
+ // `Capacity()` items. If more items are pushed, the new item replaces the
+ // least recently pushed item.
+ void Push(Level level);
+
+ // If at least `num_items` + `delay` items have been pushed, returns the
+ // average and maximum value for the `num_items` most recently pushed items
+ // from `delay` to `delay` - `num_items` (a delay equal to zero corresponds
+ // to the most recently pushed item). The value of `delay` is limited to
+ // [0, N] and `num_items` to [1, M] where N + M is the capacity of the buffer.
+ absl::optional<Level> ComputePartialMetrics(int delay, int num_items) const;
+
+ private:
+ int tail_;
+ int size_;
+ std::vector<Level> data_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_LEVEL_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_gn/moz.build
new file mode 100644
index 0000000000..e99a2d6c21
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("clipping_predictor_level_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_unittest.cc
new file mode 100644
index 0000000000..7e594a1eca
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_level_buffer_unittest.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/clipping_predictor_level_buffer.h"
+
+#include <algorithm>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Eq;
+using ::testing::Optional;
+
+class ClippingPredictorLevelBufferParametrization
+ : public ::testing::TestWithParam<int> {
+ protected:
+ int capacity() const { return GetParam(); }
+};
+
+TEST_P(ClippingPredictorLevelBufferParametrization, CheckEmptyBufferSize) {
+ ClippingPredictorLevelBuffer buffer(capacity());
+ EXPECT_EQ(buffer.Capacity(), std::max(capacity(), 1));
+ EXPECT_EQ(buffer.Size(), 0);
+}
+
+TEST_P(ClippingPredictorLevelBufferParametrization, CheckHalfEmptyBufferSize) {
+ ClippingPredictorLevelBuffer buffer(capacity());
+ for (int i = 0; i < buffer.Capacity() / 2; ++i) {
+ buffer.Push({2, 4});
+ }
+ EXPECT_EQ(buffer.Capacity(), std::max(capacity(), 1));
+ EXPECT_EQ(buffer.Size(), std::max(capacity(), 1) / 2);
+}
+
+TEST_P(ClippingPredictorLevelBufferParametrization, CheckFullBufferSize) {
+ ClippingPredictorLevelBuffer buffer(capacity());
+ for (int i = 0; i < buffer.Capacity(); ++i) {
+ buffer.Push({2, 4});
+ }
+ EXPECT_EQ(buffer.Capacity(), std::max(capacity(), 1));
+ EXPECT_EQ(buffer.Size(), std::max(capacity(), 1));
+}
+
+TEST_P(ClippingPredictorLevelBufferParametrization, CheckLargeBufferSize) {
+ ClippingPredictorLevelBuffer buffer(capacity());
+ for (int i = 0; i < 2 * buffer.Capacity(); ++i) {
+ buffer.Push({2, 4});
+ }
+ EXPECT_EQ(buffer.Capacity(), std::max(capacity(), 1));
+ EXPECT_EQ(buffer.Size(), std::max(capacity(), 1));
+}
+
+TEST_P(ClippingPredictorLevelBufferParametrization, CheckSizeAfterReset) {
+ ClippingPredictorLevelBuffer buffer(capacity());
+ buffer.Push({1, 1});
+ buffer.Push({1, 1});
+ buffer.Reset();
+ EXPECT_EQ(buffer.Capacity(), std::max(capacity(), 1));
+ EXPECT_EQ(buffer.Size(), 0);
+ buffer.Push({1, 1});
+ EXPECT_EQ(buffer.Capacity(), std::max(capacity(), 1));
+ EXPECT_EQ(buffer.Size(), 1);
+}
+
+INSTANTIATE_TEST_SUITE_P(ClippingPredictorLevelBufferTest,
+ ClippingPredictorLevelBufferParametrization,
+ ::testing::Values(-1, 0, 1, 123));
+
+TEST(ClippingPredictorLevelBufferTest, CheckMetricsAfterFullBuffer) {
+ ClippingPredictorLevelBuffer buffer(/*capacity=*/2);
+ buffer.Push({1, 2});
+ buffer.Push({3, 6});
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/1),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{3, 6})));
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/1, /*num_items=*/1),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{1, 2})));
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/2),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{2, 6})));
+}
+
+TEST(ClippingPredictorLevelBufferTest, CheckMetricsAfterPushBeyondCapacity) {
+ ClippingPredictorLevelBuffer buffer(/*capacity=*/2);
+ buffer.Push({1, 1});
+ buffer.Push({3, 6});
+ buffer.Push({5, 10});
+ buffer.Push({7, 14});
+ buffer.Push({6, 12});
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/1),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{6, 12})));
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/1, /*num_items=*/1),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{7, 14})));
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/2),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{6.5f, 14})));
+}
+
+TEST(ClippingPredictorLevelBufferTest, CheckMetricsAfterTooFewItems) {
+ ClippingPredictorLevelBuffer buffer(/*capacity=*/4);
+ buffer.Push({1, 2});
+ buffer.Push({3, 6});
+ EXPECT_EQ(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/3),
+ absl::nullopt);
+ EXPECT_EQ(buffer.ComputePartialMetrics(/*delay=*/2, /*num_items=*/1),
+ absl::nullopt);
+}
+
+TEST(ClippingPredictorLevelBufferTest, CheckMetricsAfterReset) {
+ ClippingPredictorLevelBuffer buffer(/*capacity=*/2);
+ buffer.Push({1, 2});
+ buffer.Reset();
+ buffer.Push({5, 10});
+ buffer.Push({7, 14});
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/1),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{7, 14})));
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/0, /*num_items=*/2),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{6, 14})));
+ EXPECT_THAT(buffer.ComputePartialMetrics(/*delay=*/1, /*num_items=*/1),
+ Optional(Eq(ClippingPredictorLevelBuffer::Level{5, 10})));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_unittest.cc
new file mode 100644
index 0000000000..e848e1a724
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/clipping_predictor_unittest.cc
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/clipping_predictor.h"
+
+#include <cstdint>
+#include <limits>
+#include <tuple>
+
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Eq;
+using ::testing::Optional;
+using ClippingPredictorConfig = AudioProcessing::Config::GainController1::
+ AnalogGainController::ClippingPredictor;
+using ClippingPredictorMode = AudioProcessing::Config::GainController1::
+ AnalogGainController::ClippingPredictor::Mode;
+
+constexpr int kSampleRateHz = 32000;
+constexpr int kNumChannels = 1;
+constexpr int kSamplesPerChannel = kSampleRateHz / 100;
+constexpr int kMaxMicLevel = 255;
+constexpr int kMinMicLevel = 12;
+constexpr int kDefaultClippedLevelStep = 15;
+constexpr float kMaxSampleS16 =
+ static_cast<float>(std::numeric_limits<int16_t>::max());
+
+// Threshold in dB corresponding to a signal with an amplitude equal to 99% of
+// the dynamic range - i.e., computed as `20*log10(0.99)`.
+constexpr float kClippingThresholdDb = -0.08729610804900176f;
+
+void CallAnalyze(int num_calls,
+ const AudioFrameView<const float>& frame,
+ ClippingPredictor& predictor) {
+ for (int i = 0; i < num_calls; ++i) {
+ predictor.Analyze(frame);
+ }
+}
+
+// Creates and analyzes an audio frame with a non-zero (approx. 4.15dB) crest
+// factor.
+void AnalyzeNonZeroCrestFactorAudio(int num_calls,
+ int num_channels,
+ float peak_ratio,
+ ClippingPredictor& predictor) {
+ RTC_DCHECK_GT(num_calls, 0);
+ RTC_DCHECK_GT(num_channels, 0);
+ RTC_DCHECK_LE(peak_ratio, 1.0f);
+ std::vector<float*> audio(num_channels);
+ std::vector<float> audio_data(num_channels * kSamplesPerChannel, 0.0f);
+ for (int channel = 0; channel < num_channels; ++channel) {
+ audio[channel] = &audio_data[channel * kSamplesPerChannel];
+ for (int sample = 0; sample < kSamplesPerChannel; sample += 10) {
+ audio[channel][sample] = 0.1f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 1] = 0.2f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 2] = 0.3f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 3] = 0.4f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 4] = 0.5f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 5] = 0.6f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 6] = 0.7f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 7] = 0.8f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 8] = 0.9f * peak_ratio * kMaxSampleS16;
+ audio[channel][sample + 9] = 1.0f * peak_ratio * kMaxSampleS16;
+ }
+ }
+ AudioFrameView<const float> frame(audio.data(), num_channels,
+ kSamplesPerChannel);
+ CallAnalyze(num_calls, frame, predictor);
+}
+
+void CheckChannelEstimatesWithValue(int num_channels,
+ int level,
+ int default_step,
+ int min_mic_level,
+ int max_mic_level,
+ const ClippingPredictor& predictor,
+ int expected) {
+ for (int i = 0; i < num_channels; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_THAT(predictor.EstimateClippedLevelStep(
+ i, level, default_step, min_mic_level, max_mic_level),
+ Optional(Eq(expected)));
+ }
+}
+
+void CheckChannelEstimatesWithoutValue(int num_channels,
+ int level,
+ int default_step,
+ int min_mic_level,
+ int max_mic_level,
+ const ClippingPredictor& predictor) {
+ for (int i = 0; i < num_channels; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(predictor.EstimateClippedLevelStep(i, level, default_step,
+ min_mic_level, max_mic_level),
+ absl::nullopt);
+ }
+}
+
+// Creates and analyzes an audio frame with a zero crest factor.
+void AnalyzeZeroCrestFactorAudio(int num_calls,
+ int num_channels,
+ float peak_ratio,
+ ClippingPredictor& predictor) {
+ RTC_DCHECK_GT(num_calls, 0);
+ RTC_DCHECK_GT(num_channels, 0);
+ RTC_DCHECK_LE(peak_ratio, 1.f);
+ std::vector<float*> audio(num_channels);
+ std::vector<float> audio_data(num_channels * kSamplesPerChannel, 0.f);
+ for (int channel = 0; channel < num_channels; ++channel) {
+ audio[channel] = &audio_data[channel * kSamplesPerChannel];
+ for (int sample = 0; sample < kSamplesPerChannel; ++sample) {
+ audio[channel][sample] = peak_ratio * kMaxSampleS16;
+ }
+ }
+ auto frame = AudioFrameView<const float>(audio.data(), num_channels,
+ kSamplesPerChannel);
+ CallAnalyze(num_calls, frame, predictor);
+}
+
+TEST(ClippingPeakPredictorTest, NoPredictorCreated) {
+ auto predictor =
+ CreateClippingPredictor(kNumChannels, /*config=*/{/*enabled=*/false});
+ EXPECT_FALSE(predictor);
+}
+
+TEST(ClippingPeakPredictorTest, ClippingEventPredictionCreated) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ auto predictor = CreateClippingPredictor(
+ kNumChannels,
+ /*config=*/{/*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kClippingEventPrediction});
+ EXPECT_TRUE(predictor);
+}
+
+TEST(ClippingPeakPredictorTest, AdaptiveStepClippingPeakPredictionCreated) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ auto predictor = CreateClippingPredictor(
+ kNumChannels, /*config=*/{
+ /*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction});
+ EXPECT_TRUE(predictor);
+}
+
+TEST(ClippingPeakPredictorTest, FixedStepClippingPeakPredictionCreated) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ auto predictor = CreateClippingPredictor(
+ kNumChannels, /*config=*/{
+ /*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kFixedStepClippingPeakPrediction});
+ EXPECT_TRUE(predictor);
+}
+
+class ClippingPredictorParameterization
+ : public ::testing::TestWithParam<std::tuple<int, int, int, int>> {
+ protected:
+ int num_channels() const { return std::get<0>(GetParam()); }
+ ClippingPredictorConfig GetConfig(ClippingPredictorMode mode) const {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ return {/*enabled=*/true,
+ /*mode=*/mode,
+ /*window_length=*/std::get<1>(GetParam()),
+ /*reference_window_length=*/std::get<2>(GetParam()),
+ /*reference_window_delay=*/std::get<3>(GetParam()),
+ /*clipping_threshold=*/-1.0f,
+ /*crest_factor_margin=*/0.5f};
+ }
+};
+
+TEST_P(ClippingPredictorParameterization,
+ CheckClippingEventPredictorEstimateAfterCrestFactorDrop) {
+ const ClippingPredictorConfig config =
+ GetConfig(ClippingPredictorMode::kClippingEventPrediction);
+ if (config.reference_window_length + config.reference_window_delay <=
+ config.window_length) {
+ return;
+ }
+ auto predictor = CreateClippingPredictor(num_channels(), config);
+ AnalyzeNonZeroCrestFactorAudio(
+ /*num_calls=*/config.reference_window_length +
+ config.reference_window_delay - config.window_length,
+ num_channels(), /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeZeroCrestFactorAudio(config.window_length, num_channels(),
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithValue(
+ num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
+}
+
+TEST_P(ClippingPredictorParameterization,
+ CheckClippingEventPredictorNoEstimateAfterConstantCrestFactor) {
+ const ClippingPredictorConfig config =
+ GetConfig(ClippingPredictorMode::kClippingEventPrediction);
+ if (config.reference_window_length + config.reference_window_delay <=
+ config.window_length) {
+ return;
+ }
+ auto predictor = CreateClippingPredictor(num_channels(), config);
+ AnalyzeNonZeroCrestFactorAudio(
+ /*num_calls=*/config.reference_window_length +
+ config.reference_window_delay - config.window_length,
+ num_channels(), /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length,
+ num_channels(),
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+}
+
+TEST_P(ClippingPredictorParameterization,
+ CheckClippingPeakPredictorEstimateAfterHighCrestFactor) {
+ const ClippingPredictorConfig config =
+ GetConfig(ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction);
+ if (config.reference_window_length + config.reference_window_delay <=
+ config.window_length) {
+ return;
+ }
+ auto predictor = CreateClippingPredictor(num_channels(), config);
+ AnalyzeNonZeroCrestFactorAudio(
+ /*num_calls=*/config.reference_window_length +
+ config.reference_window_delay - config.window_length,
+ num_channels(), /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length,
+ num_channels(),
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithValue(
+ num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
+}
+
+TEST_P(ClippingPredictorParameterization,
+ CheckClippingPeakPredictorNoEstimateAfterLowCrestFactor) {
+ const ClippingPredictorConfig config =
+ GetConfig(ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction);
+ if (config.reference_window_length + config.reference_window_delay <=
+ config.window_length) {
+ return;
+ }
+ auto predictor = CreateClippingPredictor(num_channels(), config);
+ AnalyzeZeroCrestFactorAudio(
+ /*num_calls=*/config.reference_window_length +
+ config.reference_window_delay - config.window_length,
+ num_channels(), /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length,
+ num_channels(),
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor,
+ ClippingPredictorParameterization,
+ ::testing::Combine(::testing::Values(1, 5),
+ ::testing::Values(1, 5, 10),
+ ::testing::Values(1, 5),
+ ::testing::Values(0, 1, 5)));
+
+class ClippingEventPredictorParameterization
+ : public ::testing::TestWithParam<std::tuple<float, float>> {
+ protected:
+ ClippingPredictorConfig GetConfig() const {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ return {/*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kClippingEventPrediction,
+ /*window_length=*/5,
+ /*reference_window_length=*/5,
+ /*reference_window_delay=*/5,
+ /*clipping_threshold=*/std::get<0>(GetParam()),
+ /*crest_factor_margin=*/std::get<1>(GetParam())};
+ }
+};
+
+TEST_P(ClippingEventPredictorParameterization,
+ CheckEstimateAfterCrestFactorDrop) {
+ const ClippingPredictorConfig config = GetConfig();
+ auto predictor = CreateClippingPredictor(kNumChannels, config);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length,
+ kNumChannels, /*peak_ratio=*/0.99f,
+ *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ // TODO(bugs.webrtc.org/12774): Add clarifying comment.
+ // TODO(bugs.webrtc.org/12774): Remove 4.15f threshold and split tests.
+ if (config.clipping_threshold < kClippingThresholdDb &&
+ config.crest_factor_margin < 4.15f) {
+ CheckChannelEstimatesWithValue(
+ kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
+ } else {
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor,
+ ClippingEventPredictorParameterization,
+ ::testing::Combine(::testing::Values(-1.0f, 0.0f),
+ ::testing::Values(3.0f, 4.16f)));
+
+class ClippingPredictorModeParameterization
+ : public ::testing::TestWithParam<ClippingPredictorMode> {
+ protected:
+ ClippingPredictorConfig GetConfig(float clipping_threshold_dbfs) const {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ return {/*enabled=*/true,
+ /*mode=*/GetParam(),
+ /*window_length=*/5,
+ /*reference_window_length=*/5,
+ /*reference_window_delay=*/5,
+ /*clipping_threshold=*/clipping_threshold_dbfs,
+ /*crest_factor_margin=*/3.0f};
+ }
+};
+
+TEST_P(ClippingPredictorModeParameterization,
+ CheckEstimateAfterHighCrestFactorWithNoClippingMargin) {
+ const ClippingPredictorConfig config = GetConfig(
+ /*clipping_threshold_dbfs=*/0.0f);
+ auto predictor = CreateClippingPredictor(kNumChannels, config);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length,
+ kNumChannels, /*peak_ratio=*/0.99f,
+ *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ // Since the clipping threshold is set to 0 dBFS, `EstimateClippedLevelStep()`
+ // is expected to return an unavailable value.
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+}
+
+TEST_P(ClippingPredictorModeParameterization,
+ CheckEstimateAfterHighCrestFactorWithClippingMargin) {
+ const ClippingPredictorConfig config =
+ GetConfig(/*clipping_threshold_dbfs=*/-1.0f);
+ auto predictor = CreateClippingPredictor(kNumChannels, config);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length,
+ kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ // TODO(bugs.webrtc.org/12774): Add clarifying comment.
+ const float expected_step =
+ config.mode == ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction
+ ? 17
+ : kDefaultClippedLevelStep;
+ CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor, expected_step);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ GainController1ClippingPredictor,
+ ClippingPredictorModeParameterization,
+ ::testing::Values(
+ ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction,
+ ClippingPredictorMode::kFixedStepClippingPeakPrediction));
+
+TEST(ClippingEventPredictorTest, CheckEstimateAfterReset) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ constexpr ClippingPredictorConfig kConfig{
+ /*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kClippingEventPrediction,
+ /*window_length=*/5,
+ /*reference_window_length=*/5,
+ /*reference_window_delay=*/5,
+ /*clipping_threshold=*/-1.0f,
+ /*crest_factor_margin=*/3.0f};
+ auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
+ kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ predictor->Reset();
+ AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+}
+
+TEST(ClippingPeakPredictorTest, CheckNoEstimateAfterReset) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ constexpr ClippingPredictorConfig kConfig{
+ /*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction,
+ /*window_length=*/5,
+ /*reference_window_length=*/5,
+ /*reference_window_delay=*/5,
+ /*clipping_threshold=*/-1.0f};
+ auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
+ kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ predictor->Reset();
+ AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+}
+
+TEST(ClippingPeakPredictorTest, CheckAdaptiveStepEstimate) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ constexpr ClippingPredictorConfig kConfig{
+ /*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction,
+ /*window_length=*/5,
+ /*reference_window_length=*/5,
+ /*reference_window_delay=*/5,
+ /*clipping_threshold=*/-1.0f};
+ auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
+ kNumChannels, /*peak_ratio=*/0.99f,
+ *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor, /*expected=*/17);
+}
+
+TEST(ClippingPeakPredictorTest, CheckFixedStepEstimate) {
+ // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
+ constexpr ClippingPredictorConfig kConfig{
+ /*enabled=*/true,
+ /*mode=*/ClippingPredictorMode::kFixedStepClippingPeakPrediction,
+ /*window_length=*/5,
+ /*reference_window_length=*/5,
+ /*reference_window_delay=*/5,
+ /*clipping_threshold=*/-1.0f};
+ auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
+ AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
+ kNumChannels, /*peak_ratio=*/0.99f,
+ *predictor);
+ CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
+ kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor);
+ AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
+ /*peak_ratio=*/0.99f, *predictor);
+ CheckChannelEstimatesWithValue(
+ kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
+ kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/gain_control.h b/third_party/libwebrtc/modules/audio_processing/agc/gain_control.h
new file mode 100644
index 0000000000..389b2114af
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/gain_control.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_GAIN_CONTROL_H_
+#define MODULES_AUDIO_PROCESSING_AGC_GAIN_CONTROL_H_
+
+namespace webrtc {
+
+// The automatic gain control (AGC) component brings the signal to an
+// appropriate range. This is done by applying a digital gain directly and, in
+// the analog mode, prescribing an analog gain to be applied at the audio HAL.
+//
+// Recommended to be enabled on the client-side.
+class GainControl {
+ public:
+ // When an analog mode is set, this must be called prior to `ProcessStream()`
+ // to pass the current analog level from the audio HAL. Must be within the
+ // range provided to `set_analog_level_limits()`.
+ virtual int set_stream_analog_level(int level) = 0;
+
+ // When an analog mode is set, this should be called after `ProcessStream()`
+ // to obtain the recommended new analog level for the audio HAL. It is the
+ // users responsibility to apply this level.
+ virtual int stream_analog_level() const = 0;
+
+ enum Mode {
+ // Adaptive mode intended for use if an analog volume control is available
+ // on the capture device. It will require the user to provide coupling
+ // between the OS mixer controls and AGC through the `stream_analog_level()`
+ // functions.
+ //
+ // It consists of an analog gain prescription for the audio device and a
+ // digital compression stage.
+ kAdaptiveAnalog,
+
+ // Adaptive mode intended for situations in which an analog volume control
+ // is unavailable. It operates in a similar fashion to the adaptive analog
+ // mode, but with scaling instead applied in the digital domain. As with
+ // the analog mode, it additionally uses a digital compression stage.
+ kAdaptiveDigital,
+
+ // Fixed mode which enables only the digital compression stage also used by
+ // the two adaptive modes.
+ //
+ // It is distinguished from the adaptive modes by considering only a
+ // short time-window of the input signal. It applies a fixed gain through
+ // most of the input level range, and compresses (gradually reduces gain
+ // with increasing level) the input signal at higher levels. This mode is
+ // preferred on embedded devices where the capture signal level is
+ // predictable, so that a known gain can be applied.
+ kFixedDigital
+ };
+
+ virtual int set_mode(Mode mode) = 0;
+ virtual Mode mode() const = 0;
+
+ // Sets the target peak `level` (or envelope) of the AGC in dBFs (decibels
+ // from digital full-scale). The convention is to use positive values. For
+ // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
+ // level 3 dB below full-scale. Limited to [0, 31].
+ //
+ // TODO(ajm): use a negative value here instead, if/when VoE will similarly
+ // update its interface.
+ virtual int set_target_level_dbfs(int level) = 0;
+ virtual int target_level_dbfs() const = 0;
+
+ // Sets the maximum `gain` the digital compression stage may apply, in dB. A
+ // higher number corresponds to greater compression, while a value of 0 will
+ // leave the signal uncompressed. Limited to [0, 90].
+ virtual int set_compression_gain_db(int gain) = 0;
+ virtual int compression_gain_db() const = 0;
+
+ // When enabled, the compression stage will hard limit the signal to the
+ // target level. Otherwise, the signal will be compressed but not limited
+ // above the target level.
+ virtual int enable_limiter(bool enable) = 0;
+ virtual bool is_limiter_enabled() const = 0;
+
+ // Sets the `minimum` and `maximum` analog levels of the audio capture device.
+ // Must be set if and only if an analog mode is used. Limited to [0, 65535].
+ virtual int set_analog_level_limits(int minimum, int maximum) = 0;
+ virtual int analog_level_minimum() const = 0;
+ virtual int analog_level_maximum() const = 0;
+
+ // Returns true if the AGC has detected a saturation event (period where the
+ // signal reaches digital full-scale) in the current frame and the analog
+ // level cannot be reduced.
+ //
+ // This could be used as an indicator to reduce or disable analog mic gain at
+ // the audio HAL.
+ virtual bool stream_is_saturated() const = 0;
+
+ protected:
+ virtual ~GainControl() {}
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_GAIN_CONTROL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/gain_control_interface_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/gain_control_interface_gn/moz.build
new file mode 100644
index 0000000000..95ab1e2252
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/gain_control_interface_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("gain_control_interface_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/gain_map_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/gain_map_gn/moz.build
new file mode 100644
index 0000000000..1cbd4cc29b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/gain_map_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("gain_map_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/gain_map_internal.h b/third_party/libwebrtc/modules/audio_processing/agc/gain_map_internal.h
new file mode 100644
index 0000000000..547f0f312e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/gain_map_internal.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_GAIN_MAP_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_AGC_GAIN_MAP_INTERNAL_H_
+
+namespace webrtc {
+
+static const int kGainMapSize = 256;
+// Uses parameters: si = 2, sf = 0.25, D = 8/256
+static const int kGainMap[kGainMapSize] = {
+ -56, -54, -52, -50, -48, -47, -45, -43, -42, -40, -38, -37, -35, -34, -33,
+ -31, -30, -29, -27, -26, -25, -24, -23, -22, -20, -19, -18, -17, -16, -15,
+ -14, -14, -13, -12, -11, -10, -9, -8, -8, -7, -6, -5, -5, -4, -3,
+ -2, -2, -1, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6,
+ 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16, 17, 17, 17, 18, 18, 18, 19,
+ 19, 19, 20, 20, 21, 21, 21, 22, 22, 22, 23, 23, 23, 24, 24,
+ 24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 28,
+ 29, 29, 29, 30, 30, 30, 30, 31, 31, 31, 32, 32, 32, 32, 33,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 38, 39, 39, 39, 39, 40, 40, 40, 40, 41,
+ 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 44, 44, 44, 44, 45,
+ 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 48, 48, 48, 48,
+ 49, 49, 49, 49, 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52,
+ 52, 53, 53, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56,
+ 56, 56, 57, 57, 57, 57, 58, 58, 58, 58, 59, 59, 59, 59, 60,
+ 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 63, 63,
+ 64};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_GAIN_MAP_INTERNAL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.cc b/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.cc
new file mode 100644
index 0000000000..e40a3f1629
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.cc
@@ -0,0 +1,1238 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ *
+ * Using a feedback system, determines an appropriate analog volume level
+ * given an input signal and current volume level. Targets a conservative
+ * signal level and is intended for use with a digital AGC to apply
+ * additional gain.
+ *
+ */
+
+#include "modules/audio_processing/agc/legacy/analog_agc.h"
+
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Errors
+#define AGC_UNSPECIFIED_ERROR 18000
+#define AGC_UNINITIALIZED_ERROR 18002
+#define AGC_NULL_POINTER_ERROR 18003
+#define AGC_BAD_PARAMETER_ERROR 18004
+
+/* The slope of in Q13*/
+static const int16_t kSlope1[8] = {21793, 12517, 7189, 4129,
+ 2372, 1362, 472, 78};
+
+/* The offset in Q14 */
+static const int16_t kOffset1[8] = {25395, 23911, 22206, 20737,
+ 19612, 18805, 17951, 17367};
+
+/* The slope of in Q13*/
+static const int16_t kSlope2[8] = {2063, 1731, 1452, 1218, 1021, 857, 597, 337};
+
+/* The offset in Q14 */
+static const int16_t kOffset2[8] = {18432, 18379, 18290, 18177,
+ 18052, 17920, 17670, 17286};
+
+static const int16_t kMuteGuardTimeMs = 8000;
+static const int16_t kInitCheck = 42;
+static const size_t kNumSubframes = 10;
+
+/* Default settings if config is not used */
+#define AGC_DEFAULT_TARGET_LEVEL 3
+#define AGC_DEFAULT_COMP_GAIN 9
+/* This is the target level for the analog part in ENV scale. To convert to RMS
+ * scale you
+ * have to add OFFSET_ENV_TO_RMS.
+ */
+#define ANALOG_TARGET_LEVEL 11
+#define ANALOG_TARGET_LEVEL_2 5 // ANALOG_TARGET_LEVEL / 2
+/* Offset between RMS scale (analog part) and ENV scale (digital part). This
+ * value actually
+ * varies with the FIXED_ANALOG_TARGET_LEVEL, hence we should in the future
+ * replace it with
+ * a table.
+ */
+#define OFFSET_ENV_TO_RMS 9
+/* The reference input level at which the digital part gives an output of
+ * targetLevelDbfs
+ * (desired level) if we have no compression gain. This level should be set high
+ * enough not
+ * to compress the peaks due to the dynamics.
+ */
+#define DIGITAL_REF_AT_0_COMP_GAIN 4
+/* Speed of reference level decrease.
+ */
+#define DIFF_REF_TO_ANALOG 5
+
+/* Size of analog gain table */
+#define GAIN_TBL_LEN 32
+/* Matlab code:
+ * fprintf(1, '\t%i, %i, %i, %i,\n', round(10.^(linspace(0,10,32)/20) * 2^12));
+ */
+/* Q12 */
+static const uint16_t kGainTableAnalog[GAIN_TBL_LEN] = {
+ 4096, 4251, 4412, 4579, 4752, 4932, 5118, 5312, 5513, 5722, 5938,
+ 6163, 6396, 6638, 6889, 7150, 7420, 7701, 7992, 8295, 8609, 8934,
+ 9273, 9623, 9987, 10365, 10758, 11165, 11587, 12025, 12480, 12953};
+
+/* Gain/Suppression tables for virtual Mic (in Q10) */
+static const uint16_t kGainTableVirtualMic[128] = {
+ 1052, 1081, 1110, 1141, 1172, 1204, 1237, 1271, 1305, 1341, 1378,
+ 1416, 1454, 1494, 1535, 1577, 1620, 1664, 1710, 1757, 1805, 1854,
+ 1905, 1957, 2010, 2065, 2122, 2180, 2239, 2301, 2364, 2428, 2495,
+ 2563, 2633, 2705, 2779, 2855, 2933, 3013, 3096, 3180, 3267, 3357,
+ 3449, 3543, 3640, 3739, 3842, 3947, 4055, 4166, 4280, 4397, 4517,
+ 4640, 4767, 4898, 5032, 5169, 5311, 5456, 5605, 5758, 5916, 6078,
+ 6244, 6415, 6590, 6770, 6956, 7146, 7341, 7542, 7748, 7960, 8178,
+ 8402, 8631, 8867, 9110, 9359, 9615, 9878, 10148, 10426, 10711, 11004,
+ 11305, 11614, 11932, 12258, 12593, 12938, 13292, 13655, 14029, 14412, 14807,
+ 15212, 15628, 16055, 16494, 16945, 17409, 17885, 18374, 18877, 19393, 19923,
+ 20468, 21028, 21603, 22194, 22801, 23425, 24065, 24724, 25400, 26095, 26808,
+ 27541, 28295, 29069, 29864, 30681, 31520, 32382};
+static const uint16_t kSuppressionTableVirtualMic[128] = {
+ 1024, 1006, 988, 970, 952, 935, 918, 902, 886, 870, 854, 839, 824, 809, 794,
+ 780, 766, 752, 739, 726, 713, 700, 687, 675, 663, 651, 639, 628, 616, 605,
+ 594, 584, 573, 563, 553, 543, 533, 524, 514, 505, 496, 487, 478, 470, 461,
+ 453, 445, 437, 429, 421, 414, 406, 399, 392, 385, 378, 371, 364, 358, 351,
+ 345, 339, 333, 327, 321, 315, 309, 304, 298, 293, 288, 283, 278, 273, 268,
+ 263, 258, 254, 249, 244, 240, 236, 232, 227, 223, 219, 215, 211, 208, 204,
+ 200, 197, 193, 190, 186, 183, 180, 176, 173, 170, 167, 164, 161, 158, 155,
+ 153, 150, 147, 145, 142, 139, 137, 134, 132, 130, 127, 125, 123, 121, 118,
+ 116, 114, 112, 110, 108, 106, 104, 102};
+
+/* Table for target energy levels. Values in Q(-7)
+ * Matlab code
+ * targetLevelTable = fprintf('%d,\t%d,\t%d,\t%d,\n',
+ * round((32767*10.^(-(0:63)'/20)).^2*16/2^7) */
+
+static const int32_t kTargetLevelTable[64] = {
+ 134209536, 106606424, 84680493, 67264106, 53429779, 42440782, 33711911,
+ 26778323, 21270778, 16895980, 13420954, 10660642, 8468049, 6726411,
+ 5342978, 4244078, 3371191, 2677832, 2127078, 1689598, 1342095,
+ 1066064, 846805, 672641, 534298, 424408, 337119, 267783,
+ 212708, 168960, 134210, 106606, 84680, 67264, 53430,
+ 42441, 33712, 26778, 21271, 16896, 13421, 10661,
+ 8468, 6726, 5343, 4244, 3371, 2678, 2127,
+ 1690, 1342, 1066, 847, 673, 534, 424,
+ 337, 268, 213, 169, 134, 107, 85,
+ 67};
+
+} // namespace
+
+int WebRtcAgc_AddMic(void* state,
+ int16_t* const* in_mic,
+ size_t num_bands,
+ size_t samples) {
+ int32_t nrg, max_nrg, sample, tmp32;
+ int32_t* ptr;
+ uint16_t targetGainIdx, gain;
+ size_t i;
+ int16_t n, L, tmp16, tmp_speech[16];
+ LegacyAgc* stt;
+ stt = reinterpret_cast<LegacyAgc*>(state);
+
+ if (stt->fs == 8000) {
+ L = 8;
+ if (samples != 80) {
+ return -1;
+ }
+ } else {
+ L = 16;
+ if (samples != 160) {
+ return -1;
+ }
+ }
+
+ /* apply slowly varying digital gain */
+ if (stt->micVol > stt->maxAnalog) {
+ /* `maxLevel` is strictly >= `micVol`, so this condition should be
+ * satisfied here, ensuring there is no divide-by-zero. */
+ RTC_DCHECK_GT(stt->maxLevel, stt->maxAnalog);
+
+ /* Q1 */
+ tmp16 = (int16_t)(stt->micVol - stt->maxAnalog);
+ tmp32 = (GAIN_TBL_LEN - 1) * tmp16;
+ tmp16 = (int16_t)(stt->maxLevel - stt->maxAnalog);
+ targetGainIdx = tmp32 / tmp16;
+ RTC_DCHECK_LT(targetGainIdx, GAIN_TBL_LEN);
+
+ /* Increment through the table towards the target gain.
+ * If micVol drops below maxAnalog, we allow the gain
+ * to be dropped immediately. */
+ if (stt->gainTableIdx < targetGainIdx) {
+ stt->gainTableIdx++;
+ } else if (stt->gainTableIdx > targetGainIdx) {
+ stt->gainTableIdx--;
+ }
+
+ /* Q12 */
+ gain = kGainTableAnalog[stt->gainTableIdx];
+
+ for (i = 0; i < samples; i++) {
+ size_t j;
+ for (j = 0; j < num_bands; ++j) {
+ sample = (in_mic[j][i] * gain) >> 12;
+ if (sample > 32767) {
+ in_mic[j][i] = 32767;
+ } else if (sample < -32768) {
+ in_mic[j][i] = -32768;
+ } else {
+ in_mic[j][i] = (int16_t)sample;
+ }
+ }
+ }
+ } else {
+ stt->gainTableIdx = 0;
+ }
+
+ /* compute envelope */
+ if (stt->inQueue > 0) {
+ ptr = stt->env[1];
+ } else {
+ ptr = stt->env[0];
+ }
+
+ for (i = 0; i < kNumSubframes; i++) {
+ /* iterate over samples */
+ max_nrg = 0;
+ for (n = 0; n < L; n++) {
+ nrg = in_mic[0][i * L + n] * in_mic[0][i * L + n];
+ if (nrg > max_nrg) {
+ max_nrg = nrg;
+ }
+ }
+ ptr[i] = max_nrg;
+ }
+
+ /* compute energy */
+ if (stt->inQueue > 0) {
+ ptr = stt->Rxx16w32_array[1];
+ } else {
+ ptr = stt->Rxx16w32_array[0];
+ }
+
+ for (i = 0; i < kNumSubframes / 2; i++) {
+ if (stt->fs == 16000) {
+ WebRtcSpl_DownsampleBy2(&in_mic[0][i * 32], 32, tmp_speech,
+ stt->filterState);
+ } else {
+ memcpy(tmp_speech, &in_mic[0][i * 16], 16 * sizeof(int16_t));
+ }
+ /* Compute energy in blocks of 16 samples */
+ ptr[i] = WebRtcSpl_DotProductWithScale(tmp_speech, tmp_speech, 16, 4);
+ }
+
+ /* update queue information */
+ if (stt->inQueue == 0) {
+ stt->inQueue = 1;
+ } else {
+ stt->inQueue = 2;
+ }
+
+ /* call VAD (use low band only) */
+ WebRtcAgc_ProcessVad(&stt->vadMic, in_mic[0], samples);
+
+ return 0;
+}
+
+int WebRtcAgc_AddFarend(void* state, const int16_t* in_far, size_t samples) {
+ LegacyAgc* stt = reinterpret_cast<LegacyAgc*>(state);
+
+ int err = WebRtcAgc_GetAddFarendError(state, samples);
+
+ if (err != 0)
+ return err;
+
+ return WebRtcAgc_AddFarendToDigital(&stt->digitalAgc, in_far, samples);
+}
+
+int WebRtcAgc_GetAddFarendError(void* state, size_t samples) {
+ LegacyAgc* stt;
+ stt = reinterpret_cast<LegacyAgc*>(state);
+
+ if (stt == NULL)
+ return -1;
+
+ if (stt->fs == 8000) {
+ if (samples != 80)
+ return -1;
+ } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) {
+ if (samples != 160)
+ return -1;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+int WebRtcAgc_VirtualMic(void* agcInst,
+ int16_t* const* in_near,
+ size_t num_bands,
+ size_t samples,
+ int32_t micLevelIn,
+ int32_t* micLevelOut) {
+ int32_t tmpFlt, micLevelTmp, gainIdx;
+ uint16_t gain;
+ size_t ii, j;
+ LegacyAgc* stt;
+
+ uint32_t nrg;
+ size_t sampleCntr;
+ uint32_t frameNrg = 0;
+ uint32_t frameNrgLimit = 5500;
+ int16_t numZeroCrossing = 0;
+ const int16_t kZeroCrossingLowLim = 15;
+ const int16_t kZeroCrossingHighLim = 20;
+
+ stt = reinterpret_cast<LegacyAgc*>(agcInst);
+
+ /*
+ * Before applying gain decide if this is a low-level signal.
+ * The idea is that digital AGC will not adapt to low-level
+ * signals.
+ */
+ if (stt->fs != 8000) {
+ frameNrgLimit = frameNrgLimit << 1;
+ }
+
+ frameNrg = (uint32_t)(in_near[0][0] * in_near[0][0]);
+ for (sampleCntr = 1; sampleCntr < samples; sampleCntr++) {
+ // increment frame energy if it is less than the limit
+ // the correct value of the energy is not important
+ if (frameNrg < frameNrgLimit) {
+ nrg = (uint32_t)(in_near[0][sampleCntr] * in_near[0][sampleCntr]);
+ frameNrg += nrg;
+ }
+
+ // Count the zero crossings
+ numZeroCrossing +=
+ ((in_near[0][sampleCntr] ^ in_near[0][sampleCntr - 1]) < 0);
+ }
+
+ if ((frameNrg < 500) || (numZeroCrossing <= 5)) {
+ stt->lowLevelSignal = 1;
+ } else if (numZeroCrossing <= kZeroCrossingLowLim) {
+ stt->lowLevelSignal = 0;
+ } else if (frameNrg <= frameNrgLimit) {
+ stt->lowLevelSignal = 1;
+ } else if (numZeroCrossing >= kZeroCrossingHighLim) {
+ stt->lowLevelSignal = 1;
+ } else {
+ stt->lowLevelSignal = 0;
+ }
+
+ micLevelTmp = micLevelIn << stt->scale;
+ /* Set desired level */
+ gainIdx = stt->micVol;
+ if (stt->micVol > stt->maxAnalog) {
+ gainIdx = stt->maxAnalog;
+ }
+ if (micLevelTmp != stt->micRef) {
+ /* Something has happened with the physical level, restart. */
+ stt->micRef = micLevelTmp;
+ stt->micVol = 127;
+ *micLevelOut = 127;
+ stt->micGainIdx = 127;
+ gainIdx = 127;
+ }
+ /* Pre-process the signal to emulate the microphone level. */
+ /* Take one step at a time in the gain table. */
+ if (gainIdx > 127) {
+ gain = kGainTableVirtualMic[gainIdx - 128];
+ } else {
+ gain = kSuppressionTableVirtualMic[127 - gainIdx];
+ }
+ for (ii = 0; ii < samples; ii++) {
+ tmpFlt = (in_near[0][ii] * gain) >> 10;
+ if (tmpFlt > 32767) {
+ tmpFlt = 32767;
+ gainIdx--;
+ if (gainIdx >= 127) {
+ gain = kGainTableVirtualMic[gainIdx - 127];
+ } else {
+ gain = kSuppressionTableVirtualMic[127 - gainIdx];
+ }
+ }
+ if (tmpFlt < -32768) {
+ tmpFlt = -32768;
+ gainIdx--;
+ if (gainIdx >= 127) {
+ gain = kGainTableVirtualMic[gainIdx - 127];
+ } else {
+ gain = kSuppressionTableVirtualMic[127 - gainIdx];
+ }
+ }
+ in_near[0][ii] = (int16_t)tmpFlt;
+ for (j = 1; j < num_bands; ++j) {
+ tmpFlt = (in_near[j][ii] * gain) >> 10;
+ if (tmpFlt > 32767) {
+ tmpFlt = 32767;
+ }
+ if (tmpFlt < -32768) {
+ tmpFlt = -32768;
+ }
+ in_near[j][ii] = (int16_t)tmpFlt;
+ }
+ }
+ /* Set the level we (finally) used */
+ stt->micGainIdx = gainIdx;
+ // *micLevelOut = stt->micGainIdx;
+ *micLevelOut = stt->micGainIdx >> stt->scale;
+ /* Add to Mic as if it was the output from a true microphone */
+ if (WebRtcAgc_AddMic(agcInst, in_near, num_bands, samples) != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+void WebRtcAgc_UpdateAgcThresholds(LegacyAgc* stt) {
+ int16_t tmp16;
+
+ /* Set analog target level in envelope dBOv scale */
+ tmp16 = (DIFF_REF_TO_ANALOG * stt->compressionGaindB) + ANALOG_TARGET_LEVEL_2;
+ tmp16 = WebRtcSpl_DivW32W16ResW16((int32_t)tmp16, ANALOG_TARGET_LEVEL);
+ stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN + tmp16;
+ if (stt->analogTarget < DIGITAL_REF_AT_0_COMP_GAIN) {
+ stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN;
+ }
+ if (stt->agcMode == kAgcModeFixedDigital) {
+ /* Adjust for different parameter interpretation in FixedDigital mode */
+ stt->analogTarget = stt->compressionGaindB;
+ }
+ /* Since the offset between RMS and ENV is not constant, we should make this
+ * into a
+ * table, but for now, we'll stick with a constant, tuned for the chosen
+ * analog
+ * target level.
+ */
+ stt->targetIdx = ANALOG_TARGET_LEVEL + OFFSET_ENV_TO_RMS;
+ /* Analog adaptation limits */
+ /* analogTargetLevel = round((32767*10^(-targetIdx/20))^2*16/2^7) */
+ stt->analogTargetLevel =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx]; /* ex. -20 dBov */
+ stt->startUpperLimit =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx - 1]; /* -19 dBov */
+ stt->startLowerLimit =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx + 1]; /* -21 dBov */
+ stt->upperPrimaryLimit =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx - 2]; /* -18 dBov */
+ stt->lowerPrimaryLimit =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx + 2]; /* -22 dBov */
+ stt->upperSecondaryLimit =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx - 5]; /* -15 dBov */
+ stt->lowerSecondaryLimit =
+ kRxxBufferLen * kTargetLevelTable[stt->targetIdx + 5]; /* -25 dBov */
+ stt->upperLimit = stt->startUpperLimit;
+ stt->lowerLimit = stt->startLowerLimit;
+}
+
+void WebRtcAgc_SaturationCtrl(LegacyAgc* stt,
+ uint8_t* saturated,
+ int32_t* env) {
+ int16_t i, tmpW16;
+
+ /* Check if the signal is saturated */
+ for (i = 0; i < 10; i++) {
+ tmpW16 = (int16_t)(env[i] >> 20);
+ if (tmpW16 > 875) {
+ stt->envSum += tmpW16;
+ }
+ }
+
+ if (stt->envSum > 25000) {
+ *saturated = 1;
+ stt->envSum = 0;
+ }
+
+ /* stt->envSum *= 0.99; */
+ stt->envSum = (int16_t)((stt->envSum * 32440) >> 15);
+}
+
+void WebRtcAgc_ZeroCtrl(LegacyAgc* stt, int32_t* inMicLevel, int32_t* env) {
+ int16_t i;
+ int64_t tmp = 0;
+ int32_t midVal;
+
+ /* Is the input signal zero? */
+ for (i = 0; i < 10; i++) {
+ tmp += env[i];
+ }
+
+ /* Each block is allowed to have a few non-zero
+ * samples.
+ */
+ if (tmp < 500) {
+ stt->msZero += 10;
+ } else {
+ stt->msZero = 0;
+ }
+
+ if (stt->muteGuardMs > 0) {
+ stt->muteGuardMs -= 10;
+ }
+
+ if (stt->msZero > 500) {
+ stt->msZero = 0;
+
+ /* Increase microphone level only if it's less than 50% */
+ midVal = (stt->maxAnalog + stt->minLevel + 1) / 2;
+ if (*inMicLevel < midVal) {
+ /* *inMicLevel *= 1.1; */
+ *inMicLevel = (1126 * *inMicLevel) >> 10;
+ /* Reduces risk of a muted mic repeatedly triggering excessive levels due
+ * to zero signal detection. */
+ *inMicLevel = WEBRTC_SPL_MIN(*inMicLevel, stt->zeroCtrlMax);
+ stt->micVol = *inMicLevel;
+ }
+
+ stt->activeSpeech = 0;
+ stt->Rxx16_LPw32Max = 0;
+
+ /* The AGC has a tendency (due to problems with the VAD parameters), to
+ * vastly increase the volume after a muting event. This timer prevents
+ * upwards adaptation for a short period. */
+ stt->muteGuardMs = kMuteGuardTimeMs;
+ }
+}
+
+void WebRtcAgc_SpeakerInactiveCtrl(LegacyAgc* stt) {
+ /* Check if the near end speaker is inactive.
+ * If that is the case the VAD threshold is
+ * increased since the VAD speech model gets
+ * more sensitive to any sound after a long
+ * silence.
+ */
+
+ int32_t tmp32;
+ int16_t vadThresh;
+
+ if (stt->vadMic.stdLongTerm < 2500) {
+ stt->vadThreshold = 1500;
+ } else {
+ vadThresh = kNormalVadThreshold;
+ if (stt->vadMic.stdLongTerm < 4500) {
+ /* Scale between min and max threshold */
+ vadThresh += (4500 - stt->vadMic.stdLongTerm) / 2;
+ }
+
+ /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */
+ tmp32 = vadThresh + 31 * stt->vadThreshold;
+ stt->vadThreshold = (int16_t)(tmp32 >> 5);
+ }
+}
+
+void WebRtcAgc_ExpCurve(int16_t volume, int16_t* index) {
+ // volume in Q14
+ // index in [0-7]
+ /* 8 different curves */
+ if (volume > 5243) {
+ if (volume > 7864) {
+ if (volume > 12124) {
+ *index = 7;
+ } else {
+ *index = 6;
+ }
+ } else {
+ if (volume > 6554) {
+ *index = 5;
+ } else {
+ *index = 4;
+ }
+ }
+ } else {
+ if (volume > 2621) {
+ if (volume > 3932) {
+ *index = 3;
+ } else {
+ *index = 2;
+ }
+ } else {
+ if (volume > 1311) {
+ *index = 1;
+ } else {
+ *index = 0;
+ }
+ }
+ }
+}
+
+int32_t WebRtcAgc_ProcessAnalog(void* state,
+ int32_t inMicLevel,
+ int32_t* outMicLevel,
+ int16_t vadLogRatio,
+ int16_t echo,
+ uint8_t* saturationWarning) {
+ uint32_t tmpU32;
+ int32_t Rxx16w32, tmp32;
+ int32_t inMicLevelTmp, lastMicVol;
+ int16_t i;
+ uint8_t saturated = 0;
+ LegacyAgc* stt;
+
+ stt = reinterpret_cast<LegacyAgc*>(state);
+ inMicLevelTmp = inMicLevel << stt->scale;
+
+ if (inMicLevelTmp > stt->maxAnalog) {
+ return -1;
+ } else if (inMicLevelTmp < stt->minLevel) {
+ return -1;
+ }
+
+ if (stt->firstCall == 0) {
+ int32_t tmpVol;
+ stt->firstCall = 1;
+ tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9;
+ tmpVol = (stt->minLevel + tmp32);
+
+ /* If the mic level is very low at start, increase it! */
+ if ((inMicLevelTmp < tmpVol) && (stt->agcMode == kAgcModeAdaptiveAnalog)) {
+ inMicLevelTmp = tmpVol;
+ }
+ stt->micVol = inMicLevelTmp;
+ }
+
+ /* Set the mic level to the previous output value if there is digital input
+ * gain */
+ if ((inMicLevelTmp == stt->maxAnalog) && (stt->micVol > stt->maxAnalog)) {
+ inMicLevelTmp = stt->micVol;
+ }
+
+ /* If the mic level was manually changed to a very low value raise it! */
+ if ((inMicLevelTmp != stt->micVol) && (inMicLevelTmp < stt->minOutput)) {
+ tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9;
+ inMicLevelTmp = (stt->minLevel + tmp32);
+ stt->micVol = inMicLevelTmp;
+ }
+
+ if (inMicLevelTmp != stt->micVol) {
+ if (inMicLevel == stt->lastInMicLevel) {
+ // We requested a volume adjustment, but it didn't occur. This is
+ // probably due to a coarse quantization of the volume slider.
+ // Restore the requested value to prevent getting stuck.
+ inMicLevelTmp = stt->micVol;
+ } else {
+ // As long as the value changed, update to match.
+ stt->micVol = inMicLevelTmp;
+ }
+ }
+
+ if (inMicLevelTmp > stt->maxLevel) {
+ // Always allow the user to raise the volume above the maxLevel.
+ stt->maxLevel = inMicLevelTmp;
+ }
+
+ // Store last value here, after we've taken care of manual updates etc.
+ stt->lastInMicLevel = inMicLevel;
+ lastMicVol = stt->micVol;
+
+ /* Checks if the signal is saturated. Also a check if individual samples
+ * are larger than 12000 is done. If they are the counter for increasing
+ * the volume level is set to -100ms
+ */
+ WebRtcAgc_SaturationCtrl(stt, &saturated, stt->env[0]);
+
+ /* The AGC is always allowed to lower the level if the signal is saturated */
+ if (saturated == 1) {
+ /* Lower the recording level
+ * Rxx160_LP is adjusted down because it is so slow it could
+ * cause the AGC to make wrong decisions. */
+ /* stt->Rxx160_LPw32 *= 0.875; */
+ stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 8) * 7;
+
+ stt->zeroCtrlMax = stt->micVol;
+
+ /* stt->micVol *= 0.903; */
+ tmp32 = inMicLevelTmp - stt->minLevel;
+ tmpU32 = WEBRTC_SPL_UMUL(29591, (uint32_t)(tmp32));
+ stt->micVol = (tmpU32 >> 15) + stt->minLevel;
+ if (stt->micVol > lastMicVol - 2) {
+ stt->micVol = lastMicVol - 2;
+ }
+ inMicLevelTmp = stt->micVol;
+
+ if (stt->micVol < stt->minOutput) {
+ *saturationWarning = 1;
+ }
+
+ /* Reset counter for decrease of volume level to avoid
+ * decreasing too much. The saturation control can still
+ * lower the level if needed. */
+ stt->msTooHigh = -100;
+
+ /* Enable the control mechanism to ensure that our measure,
+ * Rxx160_LP, is in the correct range. This must be done since
+ * the measure is very slow. */
+ stt->activeSpeech = 0;
+ stt->Rxx16_LPw32Max = 0;
+
+ /* Reset to initial values */
+ stt->msecSpeechInnerChange = kMsecSpeechInner;
+ stt->msecSpeechOuterChange = kMsecSpeechOuter;
+ stt->changeToSlowMode = 0;
+
+ stt->muteGuardMs = 0;
+
+ stt->upperLimit = stt->startUpperLimit;
+ stt->lowerLimit = stt->startLowerLimit;
+ }
+
+ /* Check if the input speech is zero. If so the mic volume
+ * is increased. On some computers the input is zero up as high
+ * level as 17% */
+ WebRtcAgc_ZeroCtrl(stt, &inMicLevelTmp, stt->env[0]);
+
+ /* Check if the near end speaker is inactive.
+ * If that is the case the VAD threshold is
+ * increased since the VAD speech model gets
+ * more sensitive to any sound after a long
+ * silence.
+ */
+ WebRtcAgc_SpeakerInactiveCtrl(stt);
+
+ for (i = 0; i < 5; i++) {
+ /* Computed on blocks of 16 samples */
+
+ Rxx16w32 = stt->Rxx16w32_array[0][i];
+
+ /* Rxx160w32 in Q(-7) */
+ tmp32 = (Rxx16w32 - stt->Rxx16_vectorw32[stt->Rxx16pos]) >> 3;
+ stt->Rxx160w32 = stt->Rxx160w32 + tmp32;
+ stt->Rxx16_vectorw32[stt->Rxx16pos] = Rxx16w32;
+
+ /* Circular buffer */
+ stt->Rxx16pos++;
+ if (stt->Rxx16pos == kRxxBufferLen) {
+ stt->Rxx16pos = 0;
+ }
+
+ /* Rxx16_LPw32 in Q(-4) */
+ tmp32 = (Rxx16w32 - stt->Rxx16_LPw32) >> kAlphaShortTerm;
+ stt->Rxx16_LPw32 = (stt->Rxx16_LPw32) + tmp32;
+
+ if (vadLogRatio > stt->vadThreshold) {
+ /* Speech detected! */
+
+ /* Check if Rxx160_LP is in the correct range. If
+ * it is too high/low then we set it to the maximum of
+ * Rxx16_LPw32 during the first 200ms of speech.
+ */
+ if (stt->activeSpeech < 250) {
+ stt->activeSpeech += 2;
+
+ if (stt->Rxx16_LPw32 > stt->Rxx16_LPw32Max) {
+ stt->Rxx16_LPw32Max = stt->Rxx16_LPw32;
+ }
+ } else if (stt->activeSpeech == 250) {
+ stt->activeSpeech += 2;
+ tmp32 = stt->Rxx16_LPw32Max >> 3;
+ stt->Rxx160_LPw32 = tmp32 * kRxxBufferLen;
+ }
+
+ tmp32 = (stt->Rxx160w32 - stt->Rxx160_LPw32) >> kAlphaLongTerm;
+ stt->Rxx160_LPw32 = stt->Rxx160_LPw32 + tmp32;
+
+ if (stt->Rxx160_LPw32 > stt->upperSecondaryLimit) {
+ stt->msTooHigh += 2;
+ stt->msTooLow = 0;
+ stt->changeToSlowMode = 0;
+
+ if (stt->msTooHigh > stt->msecSpeechOuterChange) {
+ stt->msTooHigh = 0;
+
+ /* Lower the recording level */
+ /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */
+ tmp32 = stt->Rxx160_LPw32 >> 6;
+ stt->Rxx160_LPw32 = tmp32 * 53;
+
+ /* Reduce the max gain to avoid excessive oscillation
+ * (but never drop below the maximum analog level).
+ */
+ stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16;
+ stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog);
+
+ stt->zeroCtrlMax = stt->micVol;
+
+ /* 0.95 in Q15 */
+ tmp32 = inMicLevelTmp - stt->minLevel;
+ tmpU32 = WEBRTC_SPL_UMUL(31130, (uint32_t)(tmp32));
+ stt->micVol = (tmpU32 >> 15) + stt->minLevel;
+ if (stt->micVol > lastMicVol - 1) {
+ stt->micVol = lastMicVol - 1;
+ }
+ inMicLevelTmp = stt->micVol;
+
+ /* Enable the control mechanism to ensure that our measure,
+ * Rxx160_LP, is in the correct range.
+ */
+ stt->activeSpeech = 0;
+ stt->Rxx16_LPw32Max = 0;
+ }
+ } else if (stt->Rxx160_LPw32 > stt->upperLimit) {
+ stt->msTooHigh += 2;
+ stt->msTooLow = 0;
+ stt->changeToSlowMode = 0;
+
+ if (stt->msTooHigh > stt->msecSpeechInnerChange) {
+ /* Lower the recording level */
+ stt->msTooHigh = 0;
+ /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */
+ stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 53;
+
+ /* Reduce the max gain to avoid excessive oscillation
+ * (but never drop below the maximum analog level).
+ */
+ stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16;
+ stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog);
+
+ stt->zeroCtrlMax = stt->micVol;
+
+ /* 0.965 in Q15 */
+ tmp32 = inMicLevelTmp - stt->minLevel;
+ tmpU32 =
+ WEBRTC_SPL_UMUL(31621, (uint32_t)(inMicLevelTmp - stt->minLevel));
+ stt->micVol = (tmpU32 >> 15) + stt->minLevel;
+ if (stt->micVol > lastMicVol - 1) {
+ stt->micVol = lastMicVol - 1;
+ }
+ inMicLevelTmp = stt->micVol;
+ }
+ } else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit) {
+ stt->msTooHigh = 0;
+ stt->changeToSlowMode = 0;
+ stt->msTooLow += 2;
+
+ if (stt->msTooLow > stt->msecSpeechOuterChange) {
+ /* Raise the recording level */
+ int16_t index, weightFIX;
+ int16_t volNormFIX = 16384; // =1 in Q14.
+
+ stt->msTooLow = 0;
+
+ /* Normalize the volume level */
+ tmp32 = (inMicLevelTmp - stt->minLevel) << 14;
+ if (stt->maxInit != stt->minLevel) {
+ volNormFIX = tmp32 / (stt->maxInit - stt->minLevel);
+ }
+
+ /* Find correct curve */
+ WebRtcAgc_ExpCurve(volNormFIX, &index);
+
+ /* Compute weighting factor for the volume increase, 32^(-2*X)/2+1.05
+ */
+ weightFIX =
+ kOffset1[index] - (int16_t)((kSlope1[index] * volNormFIX) >> 13);
+
+ /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */
+ stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67;
+
+ tmp32 = inMicLevelTmp - stt->minLevel;
+ tmpU32 =
+ ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel));
+ stt->micVol = (tmpU32 >> 14) + stt->minLevel;
+ if (stt->micVol < lastMicVol + 2) {
+ stt->micVol = lastMicVol + 2;
+ }
+
+ inMicLevelTmp = stt->micVol;
+ }
+ } else if (stt->Rxx160_LPw32 < stt->lowerLimit) {
+ stt->msTooHigh = 0;
+ stt->changeToSlowMode = 0;
+ stt->msTooLow += 2;
+
+ if (stt->msTooLow > stt->msecSpeechInnerChange) {
+ /* Raise the recording level */
+ int16_t index, weightFIX;
+ int16_t volNormFIX = 16384; // =1 in Q14.
+
+ stt->msTooLow = 0;
+
+ /* Normalize the volume level */
+ tmp32 = (inMicLevelTmp - stt->minLevel) << 14;
+ if (stt->maxInit != stt->minLevel) {
+ volNormFIX = tmp32 / (stt->maxInit - stt->minLevel);
+ }
+
+ /* Find correct curve */
+ WebRtcAgc_ExpCurve(volNormFIX, &index);
+
+ /* Compute weighting factor for the volume increase, (3.^(-2.*X))/8+1
+ */
+ weightFIX =
+ kOffset2[index] - (int16_t)((kSlope2[index] * volNormFIX) >> 13);
+
+ /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */
+ stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67;
+
+ tmp32 = inMicLevelTmp - stt->minLevel;
+ tmpU32 =
+ ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel));
+ stt->micVol = (tmpU32 >> 14) + stt->minLevel;
+ if (stt->micVol < lastMicVol + 1) {
+ stt->micVol = lastMicVol + 1;
+ }
+
+ inMicLevelTmp = stt->micVol;
+ }
+ } else {
+ /* The signal is inside the desired range which is:
+ * lowerLimit < Rxx160_LP/640 < upperLimit
+ */
+ if (stt->changeToSlowMode > 4000) {
+ stt->msecSpeechInnerChange = 1000;
+ stt->msecSpeechOuterChange = 500;
+ stt->upperLimit = stt->upperPrimaryLimit;
+ stt->lowerLimit = stt->lowerPrimaryLimit;
+ } else {
+ stt->changeToSlowMode += 2; // in milliseconds
+ }
+ stt->msTooLow = 0;
+ stt->msTooHigh = 0;
+
+ stt->micVol = inMicLevelTmp;
+ }
+ }
+ }
+
+ /* Ensure gain is not increased in presence of echo or after a mute event
+ * (but allow the zeroCtrl() increase on the frame of a mute detection).
+ */
+ if (echo == 1 ||
+ (stt->muteGuardMs > 0 && stt->muteGuardMs < kMuteGuardTimeMs)) {
+ if (stt->micVol > lastMicVol) {
+ stt->micVol = lastMicVol;
+ }
+ }
+
+ /* limit the gain */
+ if (stt->micVol > stt->maxLevel) {
+ stt->micVol = stt->maxLevel;
+ } else if (stt->micVol < stt->minOutput) {
+ stt->micVol = stt->minOutput;
+ }
+
+ *outMicLevel = WEBRTC_SPL_MIN(stt->micVol, stt->maxAnalog) >> stt->scale;
+
+ return 0;
+}
+
+int WebRtcAgc_Analyze(void* agcInst,
+ const int16_t* const* in_near,
+ size_t num_bands,
+ size_t samples,
+ int32_t inMicLevel,
+ int32_t* outMicLevel,
+ int16_t echo,
+ uint8_t* saturationWarning,
+ int32_t gains[11]) {
+ LegacyAgc* stt = reinterpret_cast<LegacyAgc*>(agcInst);
+
+ if (stt == NULL) {
+ return -1;
+ }
+
+ if (stt->fs == 8000) {
+ if (samples != 80) {
+ return -1;
+ }
+ } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) {
+ if (samples != 160) {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+
+ *saturationWarning = 0;
+ // TODO(minyue): PUT IN RANGE CHECKING FOR INPUT LEVELS
+ *outMicLevel = inMicLevel;
+
+ int32_t error =
+ WebRtcAgc_ComputeDigitalGains(&stt->digitalAgc, in_near, num_bands,
+ stt->fs, stt->lowLevelSignal, gains);
+ if (error == -1) {
+ return -1;
+ }
+
+ if (stt->agcMode < kAgcModeFixedDigital &&
+ (stt->lowLevelSignal == 0 || stt->agcMode != kAgcModeAdaptiveDigital)) {
+ if (WebRtcAgc_ProcessAnalog(agcInst, inMicLevel, outMicLevel,
+ stt->vadMic.logRatio, echo,
+ saturationWarning) == -1) {
+ return -1;
+ }
+ }
+
+ /* update queue */
+ if (stt->inQueue > 1) {
+ memcpy(stt->env[0], stt->env[1], 10 * sizeof(int32_t));
+ memcpy(stt->Rxx16w32_array[0], stt->Rxx16w32_array[1], 5 * sizeof(int32_t));
+ }
+
+ if (stt->inQueue > 0) {
+ stt->inQueue--;
+ }
+
+ return 0;
+}
+
+int WebRtcAgc_Process(const void* agcInst,
+ const int32_t gains[11],
+ const int16_t* const* in_near,
+ size_t num_bands,
+ int16_t* const* out) {
+ const LegacyAgc* stt = (const LegacyAgc*)agcInst;
+ return WebRtcAgc_ApplyDigitalGains(gains, num_bands, stt->fs, in_near, out);
+}
+
+int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) {
+ LegacyAgc* stt;
+ stt = reinterpret_cast<LegacyAgc*>(agcInst);
+
+ if (stt == NULL) {
+ return -1;
+ }
+
+ if (stt->initFlag != kInitCheck) {
+ stt->lastError = AGC_UNINITIALIZED_ERROR;
+ return -1;
+ }
+
+ if (agcConfig.limiterEnable != kAgcFalse &&
+ agcConfig.limiterEnable != kAgcTrue) {
+ stt->lastError = AGC_BAD_PARAMETER_ERROR;
+ return -1;
+ }
+ stt->limiterEnable = agcConfig.limiterEnable;
+ stt->compressionGaindB = agcConfig.compressionGaindB;
+ if ((agcConfig.targetLevelDbfs < 0) || (agcConfig.targetLevelDbfs > 31)) {
+ stt->lastError = AGC_BAD_PARAMETER_ERROR;
+ return -1;
+ }
+ stt->targetLevelDbfs = agcConfig.targetLevelDbfs;
+
+ if (stt->agcMode == kAgcModeFixedDigital) {
+ /* Adjust for different parameter interpretation in FixedDigital mode */
+ stt->compressionGaindB += agcConfig.targetLevelDbfs;
+ }
+
+ /* Update threshold levels for analog adaptation */
+ WebRtcAgc_UpdateAgcThresholds(stt);
+
+ /* Recalculate gain table */
+ if (WebRtcAgc_CalculateGainTable(
+ &(stt->digitalAgc.gainTable[0]), stt->compressionGaindB,
+ stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1) {
+ return -1;
+ }
+ /* Store the config in a WebRtcAgcConfig */
+ stt->usedConfig.compressionGaindB = agcConfig.compressionGaindB;
+ stt->usedConfig.limiterEnable = agcConfig.limiterEnable;
+ stt->usedConfig.targetLevelDbfs = agcConfig.targetLevelDbfs;
+
+ return 0;
+}
+
+int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config) {
+ LegacyAgc* stt;
+ stt = reinterpret_cast<LegacyAgc*>(agcInst);
+
+ if (stt == NULL) {
+ return -1;
+ }
+
+ if (config == NULL) {
+ stt->lastError = AGC_NULL_POINTER_ERROR;
+ return -1;
+ }
+
+ if (stt->initFlag != kInitCheck) {
+ stt->lastError = AGC_UNINITIALIZED_ERROR;
+ return -1;
+ }
+
+ config->limiterEnable = stt->usedConfig.limiterEnable;
+ config->targetLevelDbfs = stt->usedConfig.targetLevelDbfs;
+ config->compressionGaindB = stt->usedConfig.compressionGaindB;
+
+ return 0;
+}
+
+void* WebRtcAgc_Create() {
+ LegacyAgc* stt = static_cast<LegacyAgc*>(malloc(sizeof(LegacyAgc)));
+
+ stt->initFlag = 0;
+ stt->lastError = 0;
+
+ return stt;
+}
+
+void WebRtcAgc_Free(void* state) {
+ LegacyAgc* stt;
+
+ stt = reinterpret_cast<LegacyAgc*>(state);
+ free(stt);
+}
+
+/* minLevel - Minimum volume level
+ * maxLevel - Maximum volume level
+ */
+int WebRtcAgc_Init(void* agcInst,
+ int32_t minLevel,
+ int32_t maxLevel,
+ int16_t agcMode,
+ uint32_t fs) {
+ int32_t max_add, tmp32;
+ int16_t i;
+ int tmpNorm;
+ LegacyAgc* stt;
+
+ /* typecast state pointer */
+ stt = reinterpret_cast<LegacyAgc*>(agcInst);
+
+ if (WebRtcAgc_InitDigital(&stt->digitalAgc, agcMode) != 0) {
+ stt->lastError = AGC_UNINITIALIZED_ERROR;
+ return -1;
+ }
+
+ /* Analog AGC variables */
+ stt->envSum = 0;
+
+ /* mode = 0 - Only saturation protection
+ * 1 - Analog Automatic Gain Control [-targetLevelDbfs (default -3
+ * dBOv)]
+ * 2 - Digital Automatic Gain Control [-targetLevelDbfs (default -3
+ * dBOv)]
+ * 3 - Fixed Digital Gain [compressionGaindB (default 8 dB)]
+ */
+ if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital) {
+ return -1;
+ }
+ stt->agcMode = agcMode;
+ stt->fs = fs;
+
+ /* initialize input VAD */
+ WebRtcAgc_InitVad(&stt->vadMic);
+
+ /* If the volume range is smaller than 0-256 then
+ * the levels are shifted up to Q8-domain */
+ tmpNorm = WebRtcSpl_NormU32((uint32_t)maxLevel);
+ stt->scale = tmpNorm - 23;
+ if (stt->scale < 0) {
+ stt->scale = 0;
+ }
+ // TODO(bjornv): Investigate if we really need to scale up a small range now
+ // when we have
+ // a guard against zero-increments. For now, we do not support scale up (scale
+ // = 0).
+ stt->scale = 0;
+ maxLevel <<= stt->scale;
+ minLevel <<= stt->scale;
+
+ /* Make minLevel and maxLevel static in AdaptiveDigital */
+ if (stt->agcMode == kAgcModeAdaptiveDigital) {
+ minLevel = 0;
+ maxLevel = 255;
+ stt->scale = 0;
+ }
+ /* The maximum supplemental volume range is based on a vague idea
+ * of how much lower the gain will be than the real analog gain. */
+ max_add = (maxLevel - minLevel) / 4;
+
+ /* Minimum/maximum volume level that can be set */
+ stt->minLevel = minLevel;
+ stt->maxAnalog = maxLevel;
+ stt->maxLevel = maxLevel + max_add;
+ stt->maxInit = stt->maxLevel;
+
+ stt->zeroCtrlMax = stt->maxAnalog;
+ stt->lastInMicLevel = 0;
+
+ /* Initialize micVol parameter */
+ stt->micVol = stt->maxAnalog;
+ if (stt->agcMode == kAgcModeAdaptiveDigital) {
+ stt->micVol = 127; /* Mid-point of mic level */
+ }
+ stt->micRef = stt->micVol;
+ stt->micGainIdx = 127;
+
+ /* Minimum output volume is 4% higher than the available lowest volume level
+ */
+ tmp32 = ((stt->maxLevel - stt->minLevel) * 10) >> 8;
+ stt->minOutput = (stt->minLevel + tmp32);
+
+ stt->msTooLow = 0;
+ stt->msTooHigh = 0;
+ stt->changeToSlowMode = 0;
+ stt->firstCall = 0;
+ stt->msZero = 0;
+ stt->muteGuardMs = 0;
+ stt->gainTableIdx = 0;
+
+ stt->msecSpeechInnerChange = kMsecSpeechInner;
+ stt->msecSpeechOuterChange = kMsecSpeechOuter;
+
+ stt->activeSpeech = 0;
+ stt->Rxx16_LPw32Max = 0;
+
+ stt->vadThreshold = kNormalVadThreshold;
+ stt->inActive = 0;
+
+ for (i = 0; i < kRxxBufferLen; i++) {
+ stt->Rxx16_vectorw32[i] = (int32_t)1000; /* -54dBm0 */
+ }
+ stt->Rxx160w32 = 125 * kRxxBufferLen; /* (stt->Rxx16_vectorw32[0]>>3) = 125 */
+
+ stt->Rxx16pos = 0;
+ stt->Rxx16_LPw32 = (int32_t)16284; /* Q(-4) */
+
+ for (i = 0; i < 5; i++) {
+ stt->Rxx16w32_array[0][i] = 0;
+ }
+ for (i = 0; i < 10; i++) {
+ stt->env[0][i] = 0;
+ stt->env[1][i] = 0;
+ }
+ stt->inQueue = 0;
+
+ WebRtcSpl_MemSetW32(stt->filterState, 0, 8);
+
+ stt->initFlag = kInitCheck;
+ // Default config settings.
+ stt->defaultConfig.limiterEnable = kAgcTrue;
+ stt->defaultConfig.targetLevelDbfs = AGC_DEFAULT_TARGET_LEVEL;
+ stt->defaultConfig.compressionGaindB = AGC_DEFAULT_COMP_GAIN;
+
+ if (WebRtcAgc_set_config(stt, stt->defaultConfig) == -1) {
+ stt->lastError = AGC_UNSPECIFIED_ERROR;
+ return -1;
+ }
+ stt->Rxx160_LPw32 = stt->analogTargetLevel; // Initialize rms value
+
+ stt->lowLevelSignal = 0;
+
+ /* Only positive values are allowed that are not too large */
+ if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000)) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.h b/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.h
new file mode 100644
index 0000000000..22cd924a93
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
+
+
+#include "modules/audio_processing/agc/legacy/digital_agc.h"
+#include "modules/audio_processing/agc/legacy/gain_control.h"
+
+namespace webrtc {
+
+/* Analog Automatic Gain Control variables:
+ * Constant declarations (inner limits inside which no changes are done)
+ * In the beginning the range is narrower to widen as soon as the measure
+ * 'Rxx160_LP' is inside it. Currently the starting limits are -22.2+/-1dBm0
+ * and the final limits -22.2+/-2.5dBm0. These levels makes the speech signal
+ * go towards -25.4dBm0 (-31.4dBov). Tuned with wbfile-31.4dBov.pcm
+ * The limits are created by running the AGC with a file having the desired
+ * signal level and thereafter plotting Rxx160_LP in the dBm0-domain defined
+ * by out=10*log10(in/260537279.7); Set the target level to the average level
+ * of our measure Rxx160_LP. Remember that the levels are in blocks of 16 in
+ * Q(-7). (Example matlab code: round(db2pow(-21.2)*16/2^7) )
+ */
+constexpr int16_t kRxxBufferLen = 10;
+
+static const int16_t kMsecSpeechInner = 520;
+static const int16_t kMsecSpeechOuter = 340;
+
+static const int16_t kNormalVadThreshold = 400;
+
+static const int16_t kAlphaShortTerm = 6; // 1 >> 6 = 0.0156
+static const int16_t kAlphaLongTerm = 10; // 1 >> 10 = 0.000977
+
+typedef struct {
+ // Configurable parameters/variables
+ uint32_t fs; // Sampling frequency
+ int16_t compressionGaindB; // Fixed gain level in dB
+ int16_t targetLevelDbfs; // Target level in -dBfs of envelope (default -3)
+ int16_t agcMode; // Hard coded mode (adaptAna/adaptDig/fixedDig)
+ uint8_t limiterEnable; // Enabling limiter (on/off (default off))
+ WebRtcAgcConfig defaultConfig;
+ WebRtcAgcConfig usedConfig;
+
+ // General variables
+ int16_t initFlag;
+ int16_t lastError;
+
+ // Target level parameters
+ // Based on the above: analogTargetLevel = round((32767*10^(-22/20))^2*16/2^7)
+ int32_t analogTargetLevel; // = kRxxBufferLen * 846805; -22 dBfs
+ int32_t startUpperLimit; // = kRxxBufferLen * 1066064; -21 dBfs
+ int32_t startLowerLimit; // = kRxxBufferLen * 672641; -23 dBfs
+ int32_t upperPrimaryLimit; // = kRxxBufferLen * 1342095; -20 dBfs
+ int32_t lowerPrimaryLimit; // = kRxxBufferLen * 534298; -24 dBfs
+ int32_t upperSecondaryLimit; // = kRxxBufferLen * 2677832; -17 dBfs
+ int32_t lowerSecondaryLimit; // = kRxxBufferLen * 267783; -27 dBfs
+ uint16_t targetIdx; // Table index for corresponding target level
+ int16_t analogTarget; // Digital reference level in ENV scale
+
+ // Analog AGC specific variables
+ int32_t filterState[8]; // For downsampling wb to nb
+ int32_t upperLimit; // Upper limit for mic energy
+ int32_t lowerLimit; // Lower limit for mic energy
+ int32_t Rxx160w32; // Average energy for one frame
+ int32_t Rxx16_LPw32; // Low pass filtered subframe energies
+ int32_t Rxx160_LPw32; // Low pass filtered frame energies
+ int32_t Rxx16_LPw32Max; // Keeps track of largest energy subframe
+ int32_t Rxx16_vectorw32[kRxxBufferLen]; // Array with subframe energies
+ int32_t Rxx16w32_array[2][5]; // Energy values of microphone signal
+ int32_t env[2][10]; // Envelope values of subframes
+
+ int16_t Rxx16pos; // Current position in the Rxx16_vectorw32
+ int16_t envSum; // Filtered scaled envelope in subframes
+ int16_t vadThreshold; // Threshold for VAD decision
+ int16_t inActive; // Inactive time in milliseconds
+ int16_t msTooLow; // Milliseconds of speech at a too low level
+ int16_t msTooHigh; // Milliseconds of speech at a too high level
+ int16_t changeToSlowMode; // Change to slow mode after some time at target
+ int16_t firstCall; // First call to the process-function
+ int16_t msZero; // Milliseconds of zero input
+ int16_t msecSpeechOuterChange; // Min ms of speech between volume changes
+ int16_t msecSpeechInnerChange; // Min ms of speech between volume changes
+ int16_t activeSpeech; // Milliseconds of active speech
+ int16_t muteGuardMs; // Counter to prevent mute action
+ int16_t inQueue; // 10 ms batch indicator
+
+ // Microphone level variables
+ int32_t micRef; // Remember ref. mic level for virtual mic
+ uint16_t gainTableIdx; // Current position in virtual gain table
+ int32_t micGainIdx; // Gain index of mic level to increase slowly
+ int32_t micVol; // Remember volume between frames
+ int32_t maxLevel; // Max possible vol level, incl dig gain
+ int32_t maxAnalog; // Maximum possible analog volume level
+ int32_t maxInit; // Initial value of "max"
+ int32_t minLevel; // Minimum possible volume level
+ int32_t minOutput; // Minimum output volume level
+ int32_t zeroCtrlMax; // Remember max gain => don't amp low input
+ int32_t lastInMicLevel;
+
+ int16_t scale; // Scale factor for internal volume levels
+ // Structs for VAD and digital_agc
+ AgcVad vadMic;
+ DigitalAgc digitalAgc;
+
+ int16_t lowLevelSignal;
+} LegacyAgc;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.cc b/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.cc
new file mode 100644
index 0000000000..4cd86acba8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.cc
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/legacy/digital_agc.h"
+
+#include <string.h>
+
+#include "modules/audio_processing/agc/legacy/gain_control.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// To generate the gaintable, copy&paste the following lines to a Matlab window:
+// MaxGain = 6; MinGain = 0; CompRatio = 3; Knee = 1;
+// zeros = 0:31; lvl = 2.^(1-zeros);
+// A = -10*log10(lvl) * (CompRatio - 1) / CompRatio;
+// B = MaxGain - MinGain;
+// gains = round(2^16*10.^(0.05 * (MinGain + B * (
+// log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) /
+// log(1/(1+exp(Knee*B))))));
+// fprintf(1, '\t%i, %i, %i, %i,\n', gains);
+// % Matlab code for plotting the gain and input/output level characteristic
+// (copy/paste the following 3 lines):
+// in = 10*log10(lvl); out = 20*log10(gains/65536);
+// subplot(121); plot(in, out); axis([-30, 0, -5, 20]); grid on; xlabel('Input
+// (dB)'); ylabel('Gain (dB)');
+// subplot(122); plot(in, in+out); axis([-30, 0, -30, 5]); grid on;
+// xlabel('Input (dB)'); ylabel('Output (dB)');
+// zoom on;
+
+// Generator table for y=log2(1+e^x) in Q8.
+enum { kGenFuncTableSize = 128 };
+static const uint16_t kGenFuncTable[kGenFuncTableSize] = {
+ 256, 485, 786, 1126, 1484, 1849, 2217, 2586, 2955, 3324, 3693,
+ 4063, 4432, 4801, 5171, 5540, 5909, 6279, 6648, 7017, 7387, 7756,
+ 8125, 8495, 8864, 9233, 9603, 9972, 10341, 10711, 11080, 11449, 11819,
+ 12188, 12557, 12927, 13296, 13665, 14035, 14404, 14773, 15143, 15512, 15881,
+ 16251, 16620, 16989, 17359, 17728, 18097, 18466, 18836, 19205, 19574, 19944,
+ 20313, 20682, 21052, 21421, 21790, 22160, 22529, 22898, 23268, 23637, 24006,
+ 24376, 24745, 25114, 25484, 25853, 26222, 26592, 26961, 27330, 27700, 28069,
+ 28438, 28808, 29177, 29546, 29916, 30285, 30654, 31024, 31393, 31762, 32132,
+ 32501, 32870, 33240, 33609, 33978, 34348, 34717, 35086, 35456, 35825, 36194,
+ 36564, 36933, 37302, 37672, 38041, 38410, 38780, 39149, 39518, 39888, 40257,
+ 40626, 40996, 41365, 41734, 42104, 42473, 42842, 43212, 43581, 43950, 44320,
+ 44689, 45058, 45428, 45797, 46166, 46536, 46905};
+
+static const int16_t kAvgDecayTime = 250; // frames; < 3000
+
+// the 32 most significant bits of A(19) * B(26) >> 13
+#define AGC_MUL32(A, B) (((B) >> 13) * (A) + (((0x00001FFF & (B)) * (A)) >> 13))
+// C + the 32 most significant bits of A * B
+#define AGC_SCALEDIFF32(A, B, C) \
+ ((C) + ((B) >> 16) * (A) + (((0x0000FFFF & (B)) * (A)) >> 16))
+
+} // namespace
+
+int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16
+ int16_t digCompGaindB, // Q0
+ int16_t targetLevelDbfs, // Q0
+ uint8_t limiterEnable,
+ int16_t analogTarget) { // Q0
+ // This function generates the compressor gain table used in the fixed digital
+ // part.
+ uint32_t tmpU32no1, tmpU32no2, absInLevel, logApprox;
+ int32_t inLevel, limiterLvl;
+ int32_t tmp32, tmp32no1, tmp32no2, numFIX, den, y32;
+ const uint16_t kLog10 = 54426; // log2(10) in Q14
+ const uint16_t kLog10_2 = 49321; // 10*log10(2) in Q14
+ const uint16_t kLogE_1 = 23637; // log2(e) in Q14
+ uint16_t constMaxGain;
+ uint16_t tmpU16, intPart, fracPart;
+ const int16_t kCompRatio = 3;
+ int16_t limiterOffset = 0; // Limiter offset
+ int16_t limiterIdx, limiterLvlX;
+ int16_t constLinApprox, maxGain, diffGain;
+ int16_t i, tmp16, tmp16no1;
+ int zeros, zerosScale;
+
+ // Constants
+ // kLogE_1 = 23637; // log2(e) in Q14
+ // kLog10 = 54426; // log2(10) in Q14
+ // kLog10_2 = 49321; // 10*log10(2) in Q14
+
+ // Calculate maximum digital gain and zero gain level
+ tmp32no1 = (digCompGaindB - analogTarget) * (kCompRatio - 1);
+ tmp16no1 = analogTarget - targetLevelDbfs;
+ tmp16no1 +=
+ WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
+ maxGain = WEBRTC_SPL_MAX(tmp16no1, (analogTarget - targetLevelDbfs));
+ tmp32no1 = maxGain * kCompRatio;
+ if ((digCompGaindB <= analogTarget) && (limiterEnable)) {
+ limiterOffset = 0;
+ }
+
+ // Calculate the difference between maximum gain and gain at 0dB0v
+ tmp32no1 = digCompGaindB * (kCompRatio - 1);
+ diffGain =
+ WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
+ if (diffGain < 0 || diffGain >= kGenFuncTableSize) {
+ RTC_DCHECK(0);
+ return -1;
+ }
+
+ // Calculate the limiter level and index:
+ // limiterLvlX = analogTarget - limiterOffset
+ // limiterLvl = targetLevelDbfs + limiterOffset/compRatio
+ limiterLvlX = analogTarget - limiterOffset;
+ limiterIdx = 2 + WebRtcSpl_DivW32W16ResW16((int32_t)limiterLvlX * (1 << 13),
+ kLog10_2 / 2);
+ tmp16no1 =
+ WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio);
+ limiterLvl = targetLevelDbfs + tmp16no1;
+
+ // Calculate (through table lookup):
+ // constMaxGain = log2(1+2^(log2(e)*diffGain)); (in Q8)
+ constMaxGain = kGenFuncTable[diffGain]; // in Q8
+
+ // Calculate a parameter used to approximate the fractional part of 2^x with a
+ // piecewise linear function in Q14:
+ // constLinApprox = round(3/2*(4*(3-2*sqrt(2))/(log(2)^2)-0.5)*2^14);
+ constLinApprox = 22817; // in Q14
+
+ // Calculate a denominator used in the exponential part to convert from dB to
+ // linear scale:
+ // den = 20*constMaxGain (in Q8)
+ den = WEBRTC_SPL_MUL_16_U16(20, constMaxGain); // in Q8
+
+ for (i = 0; i < 32; i++) {
+ // Calculate scaled input level (compressor):
+ // inLevel =
+ // fix((-constLog10_2*(compRatio-1)*(1-i)+fix(compRatio/2))/compRatio)
+ tmp16 = (int16_t)((kCompRatio - 1) * (i - 1)); // Q0
+ tmp32 = WEBRTC_SPL_MUL_16_U16(tmp16, kLog10_2) + 1; // Q14
+ inLevel = WebRtcSpl_DivW32W16(tmp32, kCompRatio); // Q14
+
+ // Calculate diffGain-inLevel, to map using the genFuncTable
+ inLevel = (int32_t)diffGain * (1 << 14) - inLevel; // Q14
+
+ // Make calculations on abs(inLevel) and compensate for the sign afterwards.
+ absInLevel = (uint32_t)WEBRTC_SPL_ABS_W32(inLevel); // Q14
+
+ // LUT with interpolation
+ intPart = (uint16_t)(absInLevel >> 14);
+ fracPart =
+ (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part
+ tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8
+ tmpU32no1 = tmpU16 * fracPart; // Q22
+ tmpU32no1 += (uint32_t)kGenFuncTable[intPart] << 14; // Q22
+ logApprox = tmpU32no1 >> 8; // Q14
+ // Compensate for negative exponent using the relation:
+ // log2(1 + 2^-x) = log2(1 + 2^x) - x
+ if (inLevel < 0) {
+ zeros = WebRtcSpl_NormU32(absInLevel);
+ zerosScale = 0;
+ if (zeros < 15) {
+ // Not enough space for multiplication
+ tmpU32no2 = absInLevel >> (15 - zeros); // Q(zeros-1)
+ tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no2, kLogE_1); // Q(zeros+13)
+ if (zeros < 9) {
+ zerosScale = 9 - zeros;
+ tmpU32no1 >>= zerosScale; // Q(zeros+13)
+ } else {
+ tmpU32no2 >>= zeros - 9; // Q22
+ }
+ } else {
+ tmpU32no2 = WEBRTC_SPL_UMUL_32_16(absInLevel, kLogE_1); // Q28
+ tmpU32no2 >>= 6; // Q22
+ }
+ logApprox = 0;
+ if (tmpU32no2 < tmpU32no1) {
+ logApprox = (tmpU32no1 - tmpU32no2) >> (8 - zerosScale); // Q14
+ }
+ }
+ numFIX = (maxGain * constMaxGain) * (1 << 6); // Q14
+ numFIX -= (int32_t)logApprox * diffGain; // Q14
+
+ // Calculate ratio
+ // Shift `numFIX` as much as possible.
+ // Ensure we avoid wrap-around in `den` as well.
+ if (numFIX > (den >> 8) || -numFIX > (den >> 8)) { // `den` is Q8.
+ zeros = WebRtcSpl_NormW32(numFIX);
+ } else {
+ zeros = WebRtcSpl_NormW32(den) + 8;
+ }
+ numFIX *= 1 << zeros; // Q(14+zeros)
+
+ // Shift den so we end up in Qy1
+ tmp32no1 = WEBRTC_SPL_SHIFT_W32(den, zeros - 9); // Q(zeros - 1)
+ y32 = numFIX / tmp32no1; // in Q15
+ // This is to do rounding in Q14.
+ y32 = y32 >= 0 ? (y32 + 1) >> 1 : -((-y32 + 1) >> 1);
+
+ if (limiterEnable && (i < limiterIdx)) {
+ tmp32 = WEBRTC_SPL_MUL_16_U16(i - 1, kLog10_2); // Q14
+ tmp32 -= limiterLvl * (1 << 14); // Q14
+ y32 = WebRtcSpl_DivW32W16(tmp32 + 10, 20);
+ }
+ if (y32 > 39000) {
+ tmp32 = (y32 >> 1) * kLog10 + 4096; // in Q27
+ tmp32 >>= 13; // In Q14.
+ } else {
+ tmp32 = y32 * kLog10 + 8192; // in Q28
+ tmp32 >>= 14; // In Q14.
+ }
+ tmp32 += 16 << 14; // in Q14 (Make sure final output is in Q16)
+
+ // Calculate power
+ if (tmp32 > 0) {
+ intPart = (int16_t)(tmp32 >> 14);
+ fracPart = (uint16_t)(tmp32 & 0x00003FFF); // in Q14
+ if ((fracPart >> 13) != 0) {
+ tmp16 = (2 << 14) - constLinApprox;
+ tmp32no2 = (1 << 14) - fracPart;
+ tmp32no2 *= tmp16;
+ tmp32no2 >>= 13;
+ tmp32no2 = (1 << 14) - tmp32no2;
+ } else {
+ tmp16 = constLinApprox - (1 << 14);
+ tmp32no2 = (fracPart * tmp16) >> 13;
+ }
+ fracPart = (uint16_t)tmp32no2;
+ gainTable[i] =
+ (1 << intPart) + WEBRTC_SPL_SHIFT_W32(fracPart, intPart - 14);
+ } else {
+ gainTable[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+int32_t WebRtcAgc_InitDigital(DigitalAgc* stt, int16_t agcMode) {
+ if (agcMode == kAgcModeFixedDigital) {
+ // start at minimum to find correct gain faster
+ stt->capacitorSlow = 0;
+ } else {
+ // start out with 0 dB gain
+ stt->capacitorSlow = 134217728; // (int32_t)(0.125f * 32768.0f * 32768.0f);
+ }
+ stt->capacitorFast = 0;
+ stt->gain = 65536;
+ stt->gatePrevious = 0;
+ stt->agcMode = agcMode;
+
+ // initialize VADs
+ WebRtcAgc_InitVad(&stt->vadNearend);
+ WebRtcAgc_InitVad(&stt->vadFarend);
+
+ return 0;
+}
+
+int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
+ const int16_t* in_far,
+ size_t nrSamples) {
+ RTC_DCHECK(stt);
+ // VAD for far end
+ WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
+
+ return 0;
+}
+
+// Gains is an 11 element long array (one value per ms, incl start & end).
+int32_t WebRtcAgc_ComputeDigitalGains(DigitalAgc* stt,
+ const int16_t* const* in_near,
+ size_t num_bands,
+ uint32_t FS,
+ int16_t lowlevelSignal,
+ int32_t gains[11]) {
+ int32_t tmp32;
+ int32_t env[10];
+ int32_t max_nrg;
+ int32_t cur_level;
+ int32_t gain32;
+ int16_t logratio;
+ int16_t lower_thr, upper_thr;
+ int16_t zeros = 0, zeros_fast, frac = 0;
+ int16_t decay;
+ int16_t gate, gain_adj;
+ int16_t k;
+ size_t n, L;
+
+ // determine number of samples per ms
+ if (FS == 8000) {
+ L = 8;
+ } else if (FS == 16000 || FS == 32000 || FS == 48000) {
+ L = 16;
+ } else {
+ return -1;
+ }
+
+ // VAD for near end
+ logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, in_near[0], L * 10);
+
+ // Account for far end VAD
+ if (stt->vadFarend.counter > 10) {
+ tmp32 = 3 * logratio;
+ logratio = (int16_t)((tmp32 - stt->vadFarend.logRatio) >> 2);
+ }
+
+ // Determine decay factor depending on VAD
+ // upper_thr = 1.0f;
+ // lower_thr = 0.25f;
+ upper_thr = 1024; // Q10
+ lower_thr = 0; // Q10
+ if (logratio > upper_thr) {
+ // decay = -2^17 / DecayTime; -> -65
+ decay = -65;
+ } else if (logratio < lower_thr) {
+ decay = 0;
+ } else {
+ // decay = (int16_t)(((lower_thr - logratio)
+ // * (2^27/(DecayTime*(upper_thr-lower_thr)))) >> 10);
+ // SUBSTITUTED: 2^27/(DecayTime*(upper_thr-lower_thr)) -> 65
+ tmp32 = (lower_thr - logratio) * 65;
+ decay = (int16_t)(tmp32 >> 10);
+ }
+
+ // adjust decay factor for long silence (detected as low standard deviation)
+ // This is only done in the adaptive modes
+ if (stt->agcMode != kAgcModeFixedDigital) {
+ if (stt->vadNearend.stdLongTerm < 4000) {
+ decay = 0;
+ } else if (stt->vadNearend.stdLongTerm < 8096) {
+ // decay = (int16_t)(((stt->vadNearend.stdLongTerm - 4000) * decay) >>
+ // 12);
+ tmp32 = (stt->vadNearend.stdLongTerm - 4000) * decay;
+ decay = (int16_t)(tmp32 >> 12);
+ }
+
+ if (lowlevelSignal != 0) {
+ decay = 0;
+ }
+ }
+ // Find max amplitude per sub frame
+ // iterate over sub frames
+ for (k = 0; k < 10; k++) {
+ // iterate over samples
+ max_nrg = 0;
+ for (n = 0; n < L; n++) {
+ int32_t nrg = in_near[0][k * L + n] * in_near[0][k * L + n];
+ if (nrg > max_nrg) {
+ max_nrg = nrg;
+ }
+ }
+ env[k] = max_nrg;
+ }
+
+ // Calculate gain per sub frame
+ gains[0] = stt->gain;
+ for (k = 0; k < 10; k++) {
+ // Fast envelope follower
+ // decay time = -131000 / -1000 = 131 (ms)
+ stt->capacitorFast =
+ AGC_SCALEDIFF32(-1000, stt->capacitorFast, stt->capacitorFast);
+ if (env[k] > stt->capacitorFast) {
+ stt->capacitorFast = env[k];
+ }
+ // Slow envelope follower
+ if (env[k] > stt->capacitorSlow) {
+ // increase capacitorSlow
+ stt->capacitorSlow = AGC_SCALEDIFF32(500, (env[k] - stt->capacitorSlow),
+ stt->capacitorSlow);
+ } else {
+ // decrease capacitorSlow
+ stt->capacitorSlow =
+ AGC_SCALEDIFF32(decay, stt->capacitorSlow, stt->capacitorSlow);
+ }
+
+ // use maximum of both capacitors as current level
+ if (stt->capacitorFast > stt->capacitorSlow) {
+ cur_level = stt->capacitorFast;
+ } else {
+ cur_level = stt->capacitorSlow;
+ }
+ // Translate signal level into gain, using a piecewise linear approximation
+ // find number of leading zeros
+ zeros = WebRtcSpl_NormU32((uint32_t)cur_level);
+ if (cur_level == 0) {
+ zeros = 31;
+ }
+ tmp32 = ((uint32_t)cur_level << zeros) & 0x7FFFFFFF;
+ frac = (int16_t)(tmp32 >> 19); // Q12.
+ // Interpolate between gainTable[zeros] and gainTable[zeros-1].
+ tmp32 =
+ ((stt->gainTable[zeros - 1] - stt->gainTable[zeros]) * (int64_t)frac) >>
+ 12;
+ gains[k + 1] = stt->gainTable[zeros] + tmp32;
+ }
+
+ // Gate processing (lower gain during absence of speech)
+ zeros = (zeros << 9) - (frac >> 3);
+ // find number of leading zeros
+ zeros_fast = WebRtcSpl_NormU32((uint32_t)stt->capacitorFast);
+ if (stt->capacitorFast == 0) {
+ zeros_fast = 31;
+ }
+ tmp32 = ((uint32_t)stt->capacitorFast << zeros_fast) & 0x7FFFFFFF;
+ zeros_fast <<= 9;
+ zeros_fast -= (int16_t)(tmp32 >> 22);
+
+ gate = 1000 + zeros_fast - zeros - stt->vadNearend.stdShortTerm;
+
+ if (gate < 0) {
+ stt->gatePrevious = 0;
+ } else {
+ tmp32 = stt->gatePrevious * 7;
+ gate = (int16_t)((gate + tmp32) >> 3);
+ stt->gatePrevious = gate;
+ }
+ // gate < 0 -> no gate
+ // gate > 2500 -> max gate
+ if (gate > 0) {
+ if (gate < 2500) {
+ gain_adj = (2500 - gate) >> 5;
+ } else {
+ gain_adj = 0;
+ }
+ for (k = 0; k < 10; k++) {
+ if ((gains[k + 1] - stt->gainTable[0]) > 8388608) {
+ // To prevent wraparound
+ tmp32 = (gains[k + 1] - stt->gainTable[0]) >> 8;
+ tmp32 *= 178 + gain_adj;
+ } else {
+ tmp32 = (gains[k + 1] - stt->gainTable[0]) * (178 + gain_adj);
+ tmp32 >>= 8;
+ }
+ gains[k + 1] = stt->gainTable[0] + tmp32;
+ }
+ }
+
+ // Limit gain to avoid overload distortion
+ for (k = 0; k < 10; k++) {
+ // Find a shift of gains[k + 1] such that it can be squared without
+ // overflow, but at least by 10 bits.
+ zeros = 10;
+ if (gains[k + 1] > 47452159) {
+ zeros = 16 - WebRtcSpl_NormW32(gains[k + 1]);
+ }
+ gain32 = (gains[k + 1] >> zeros) + 1;
+ gain32 *= gain32;
+ // check for overflow
+ while (AGC_MUL32((env[k] >> 12) + 1, gain32) >
+ WEBRTC_SPL_SHIFT_W32((int32_t)32767, 2 * (1 - zeros + 10))) {
+ // multiply by 253/256 ==> -0.1 dB
+ if (gains[k + 1] > 8388607) {
+ // Prevent wrap around
+ gains[k + 1] = (gains[k + 1] / 256) * 253;
+ } else {
+ gains[k + 1] = (gains[k + 1] * 253) / 256;
+ }
+ gain32 = (gains[k + 1] >> zeros) + 1;
+ gain32 *= gain32;
+ }
+ }
+ // gain reductions should be done 1 ms earlier than gain increases
+ for (k = 1; k < 10; k++) {
+ if (gains[k] > gains[k + 1]) {
+ gains[k] = gains[k + 1];
+ }
+ }
+ // save start gain for next frame
+ stt->gain = gains[10];
+
+ return 0;
+}
+
+int32_t WebRtcAgc_ApplyDigitalGains(const int32_t gains[11],
+ size_t num_bands,
+ uint32_t FS,
+ const int16_t* const* in_near,
+ int16_t* const* out) {
+ // Apply gain
+ // handle first sub frame separately
+ size_t L;
+ int16_t L2; // samples/subframe
+
+ // determine number of samples per ms
+ if (FS == 8000) {
+ L = 8;
+ L2 = 3;
+ } else if (FS == 16000 || FS == 32000 || FS == 48000) {
+ L = 16;
+ L2 = 4;
+ } else {
+ return -1;
+ }
+
+ for (size_t i = 0; i < num_bands; ++i) {
+ if (in_near[i] != out[i]) {
+ // Only needed if they don't already point to the same place.
+ memcpy(out[i], in_near[i], 10 * L * sizeof(in_near[i][0]));
+ }
+ }
+
+ // iterate over samples
+ int32_t delta = (gains[1] - gains[0]) * (1 << (4 - L2));
+ int32_t gain32 = gains[0] * (1 << 4);
+ for (size_t n = 0; n < L; n++) {
+ for (size_t i = 0; i < num_bands; ++i) {
+ int32_t out_tmp = (int64_t)out[i][n] * ((gain32 + 127) >> 7) >> 16;
+ if (out_tmp > 4095) {
+ out[i][n] = (int16_t)32767;
+ } else if (out_tmp < -4096) {
+ out[i][n] = (int16_t)-32768;
+ } else {
+ int32_t tmp32 = ((int64_t)out[i][n] * (gain32 >> 4)) >> 16;
+ out[i][n] = (int16_t)tmp32;
+ }
+ }
+
+ gain32 += delta;
+ }
+ // iterate over subframes
+ for (int k = 1; k < 10; k++) {
+ delta = (gains[k + 1] - gains[k]) * (1 << (4 - L2));
+ gain32 = gains[k] * (1 << 4);
+ // iterate over samples
+ for (size_t n = 0; n < L; n++) {
+ for (size_t i = 0; i < num_bands; ++i) {
+ int64_t tmp64 = ((int64_t)(out[i][k * L + n])) * (gain32 >> 4);
+ tmp64 = tmp64 >> 16;
+ if (tmp64 > 32767) {
+ out[i][k * L + n] = 32767;
+ } else if (tmp64 < -32768) {
+ out[i][k * L + n] = -32768;
+ } else {
+ out[i][k * L + n] = (int16_t)(tmp64);
+ }
+ }
+ gain32 += delta;
+ }
+ }
+ return 0;
+}
+
+void WebRtcAgc_InitVad(AgcVad* state) {
+ int16_t k;
+
+ state->HPstate = 0; // state of high pass filter
+ state->logRatio = 0; // log( P(active) / P(inactive) )
+ // average input level (Q10)
+ state->meanLongTerm = 15 << 10;
+
+ // variance of input level (Q8)
+ state->varianceLongTerm = 500 << 8;
+
+ state->stdLongTerm = 0; // standard deviation of input level in dB
+ // short-term average input level (Q10)
+ state->meanShortTerm = 15 << 10;
+
+ // short-term variance of input level (Q8)
+ state->varianceShortTerm = 500 << 8;
+
+ state->stdShortTerm =
+ 0; // short-term standard deviation of input level in dB
+ state->counter = 3; // counts updates
+ for (k = 0; k < 8; k++) {
+ // downsampling filter
+ state->downState[k] = 0;
+ }
+}
+
+int16_t WebRtcAgc_ProcessVad(AgcVad* state, // (i) VAD state
+ const int16_t* in, // (i) Speech signal
+ size_t nrSamples) { // (i) number of samples
+ uint32_t nrg;
+ int32_t out, tmp32, tmp32b;
+ uint16_t tmpU16;
+ int16_t k, subfr, tmp16;
+ int16_t buf1[8];
+ int16_t buf2[4];
+ int16_t HPstate;
+ int16_t zeros, dB;
+ int64_t tmp64;
+
+ // process in 10 sub frames of 1 ms (to save on memory)
+ nrg = 0;
+ HPstate = state->HPstate;
+ for (subfr = 0; subfr < 10; subfr++) {
+ // downsample to 4 kHz
+ if (nrSamples == 160) {
+ for (k = 0; k < 8; k++) {
+ tmp32 = (int32_t)in[2 * k] + (int32_t)in[2 * k + 1];
+ tmp32 >>= 1;
+ buf1[k] = (int16_t)tmp32;
+ }
+ in += 16;
+
+ WebRtcSpl_DownsampleBy2(buf1, 8, buf2, state->downState);
+ } else {
+ WebRtcSpl_DownsampleBy2(in, 8, buf2, state->downState);
+ in += 8;
+ }
+
+ // high pass filter and compute energy
+ for (k = 0; k < 4; k++) {
+ out = buf2[k] + HPstate;
+ tmp32 = 600 * out;
+ HPstate = (int16_t)((tmp32 >> 10) - buf2[k]);
+
+ // Add 'out * out / 2**6' to 'nrg' in a non-overflowing
+ // way. Guaranteed to work as long as 'out * out / 2**6' fits in
+ // an int32_t.
+ nrg += out * (out / (1 << 6));
+ nrg += out * (out % (1 << 6)) / (1 << 6);
+ }
+ }
+ state->HPstate = HPstate;
+
+ // find number of leading zeros
+ if (!(0xFFFF0000 & nrg)) {
+ zeros = 16;
+ } else {
+ zeros = 0;
+ }
+ if (!(0xFF000000 & (nrg << zeros))) {
+ zeros += 8;
+ }
+ if (!(0xF0000000 & (nrg << zeros))) {
+ zeros += 4;
+ }
+ if (!(0xC0000000 & (nrg << zeros))) {
+ zeros += 2;
+ }
+ if (!(0x80000000 & (nrg << zeros))) {
+ zeros += 1;
+ }
+
+ // energy level (range {-32..30}) (Q10)
+ dB = (15 - zeros) * (1 << 11);
+
+ // Update statistics
+
+ if (state->counter < kAvgDecayTime) {
+ // decay time = AvgDecTime * 10 ms
+ state->counter++;
+ }
+
+ // update short-term estimate of mean energy level (Q10)
+ tmp32 = state->meanShortTerm * 15 + dB;
+ state->meanShortTerm = (int16_t)(tmp32 >> 4);
+
+ // update short-term estimate of variance in energy level (Q8)
+ tmp32 = (dB * dB) >> 12;
+ tmp32 += state->varianceShortTerm * 15;
+ state->varianceShortTerm = tmp32 / 16;
+
+ // update short-term estimate of standard deviation in energy level (Q10)
+ tmp32 = state->meanShortTerm * state->meanShortTerm;
+ tmp32 = (state->varianceShortTerm << 12) - tmp32;
+ state->stdShortTerm = (int16_t)WebRtcSpl_Sqrt(tmp32);
+
+ // update long-term estimate of mean energy level (Q10)
+ tmp32 = state->meanLongTerm * state->counter + dB;
+ state->meanLongTerm =
+ WebRtcSpl_DivW32W16ResW16(tmp32, WebRtcSpl_AddSatW16(state->counter, 1));
+
+ // update long-term estimate of variance in energy level (Q8)
+ tmp32 = (dB * dB) >> 12;
+ tmp32 += state->varianceLongTerm * state->counter;
+ state->varianceLongTerm =
+ WebRtcSpl_DivW32W16(tmp32, WebRtcSpl_AddSatW16(state->counter, 1));
+
+ // update long-term estimate of standard deviation in energy level (Q10)
+ tmp32 = state->meanLongTerm * state->meanLongTerm;
+ tmp32 = (state->varianceLongTerm << 12) - tmp32;
+ state->stdLongTerm = (int16_t)WebRtcSpl_Sqrt(tmp32);
+
+ // update voice activity measure (Q10)
+ tmp16 = 3 << 12;
+ // TODO(bjornv): (dB - state->meanLongTerm) can overflow, e.g., in
+ // ApmTest.Process unit test. Previously the macro WEBRTC_SPL_MUL_16_16()
+ // was used, which did an intermediate cast to (int16_t), hence losing
+ // significant bits. This cause logRatio to max out positive, rather than
+ // negative. This is a bug, but has very little significance.
+ tmp32 = tmp16 * (int16_t)(dB - state->meanLongTerm);
+ tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm);
+ tmpU16 = (13 << 12);
+ tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16);
+ tmp64 = tmp32;
+ tmp64 += tmp32b >> 10;
+ tmp64 >>= 6;
+
+ // limit
+ if (tmp64 > 2048) {
+ tmp64 = 2048;
+ } else if (tmp64 < -2048) {
+ tmp64 = -2048;
+ }
+ state->logRatio = (int16_t)tmp64;
+
+ return state->logRatio; // Q10
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.h b/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.h
new file mode 100644
index 0000000000..223c74b9bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+typedef struct {
+ int32_t downState[8];
+ int16_t HPstate;
+ int16_t counter;
+ int16_t logRatio; // log( P(active) / P(inactive) ) (Q10)
+ int16_t meanLongTerm; // Q10
+ int32_t varianceLongTerm; // Q8
+ int16_t stdLongTerm; // Q10
+ int16_t meanShortTerm; // Q10
+ int32_t varianceShortTerm; // Q8
+ int16_t stdShortTerm; // Q10
+} AgcVad; // total = 54 bytes
+
+typedef struct {
+ int32_t capacitorSlow;
+ int32_t capacitorFast;
+ int32_t gain;
+ int32_t gainTable[32];
+ int16_t gatePrevious;
+ int16_t agcMode;
+ AgcVad vadNearend;
+ AgcVad vadFarend;
+} DigitalAgc;
+
+int32_t WebRtcAgc_InitDigital(DigitalAgc* digitalAgcInst, int16_t agcMode);
+
+int32_t WebRtcAgc_ComputeDigitalGains(DigitalAgc* digitalAgcInst,
+ const int16_t* const* inNear,
+ size_t num_bands,
+ uint32_t FS,
+ int16_t lowLevelSignal,
+ int32_t gains[11]);
+
+int32_t WebRtcAgc_ApplyDigitalGains(const int32_t gains[11],
+ size_t num_bands,
+ uint32_t FS,
+ const int16_t* const* in_near,
+ int16_t* const* out);
+
+int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst,
+ const int16_t* inFar,
+ size_t nrSamples);
+
+void WebRtcAgc_InitVad(AgcVad* vadInst);
+
+int16_t WebRtcAgc_ProcessVad(AgcVad* vadInst, // (i) VAD state
+ const int16_t* in, // (i) Speech signal
+ size_t nrSamples); // (i) number of samples
+
+int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16
+ int16_t compressionGaindB, // Q0 (in dB)
+ int16_t targetLevelDbfs, // Q0 (in dB)
+ uint8_t limiterEnable,
+ int16_t analogTarget);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/legacy/gain_control.h b/third_party/libwebrtc/modules/audio_processing/agc/legacy/gain_control.h
new file mode 100644
index 0000000000..abb8e63228
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/legacy/gain_control.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_
+
+namespace webrtc {
+
+enum {
+ kAgcModeUnchanged,
+ kAgcModeAdaptiveAnalog,
+ kAgcModeAdaptiveDigital,
+ kAgcModeFixedDigital
+};
+
+enum { kAgcFalse = 0, kAgcTrue };
+
+typedef struct {
+ int16_t targetLevelDbfs; // default 3 (-3 dBOv)
+ int16_t compressionGaindB; // default 9 dB
+ uint8_t limiterEnable; // default kAgcTrue (on)
+} WebRtcAgcConfig;
+
+/*
+ * This function analyses the number of samples passed to
+ * farend and produces any error code that could arise.
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ * - samples : Number of samples in input vector.
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error.
+ */
+int WebRtcAgc_GetAddFarendError(void* state, size_t samples);
+
+/*
+ * This function processes a 10 ms frame of far-end speech to determine
+ * if there is active speech. The length of the input speech vector must be
+ * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
+ * FS=48000).
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ * - inFar : Far-end input speech vector
+ * - samples : Number of samples in input vector
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_AddFarend(void* agcInst, const int16_t* inFar, size_t samples);
+
+/*
+ * This function processes a 10 ms frame of microphone speech to determine
+ * if there is active speech. The length of the input speech vector must be
+ * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
+ * FS=48000). For very low input levels, the input signal is increased in level
+ * by multiplying and overwriting the samples in inMic[].
+ *
+ * This function should be called before any further processing of the
+ * near-end microphone signal.
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ * - inMic : Microphone input speech vector for each band
+ * - num_bands : Number of bands in input vector
+ * - samples : Number of samples in input vector
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_AddMic(void* agcInst,
+ int16_t* const* inMic,
+ size_t num_bands,
+ size_t samples);
+
+/*
+ * This function replaces the analog microphone with a virtual one.
+ * It is a digital gain applied to the input signal and is used in the
+ * agcAdaptiveDigital mode where no microphone level is adjustable. The length
+ * of the input speech vector must be given in samples (80 when FS=8000, and 160
+ * when FS=16000, FS=32000 or FS=48000).
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ * - inMic : Microphone input speech vector for each band
+ * - num_bands : Number of bands in input vector
+ * - samples : Number of samples in input vector
+ * - micLevelIn : Input level of microphone (static)
+ *
+ * Output:
+ * - inMic : Microphone output after processing (L band)
+ * - inMic_H : Microphone output after processing (H band)
+ * - micLevelOut : Adjusted microphone level after processing
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_VirtualMic(void* agcInst,
+ int16_t* const* inMic,
+ size_t num_bands,
+ size_t samples,
+ int32_t micLevelIn,
+ int32_t* micLevelOut);
+
+/*
+ * This function analyses a 10 ms frame and produces the analog and digital
+ * gains required to normalize the signal. The gain adjustments are done only
+ * during active periods of speech. The length of the speech vectors must be
+ * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
+ * FS=48000). The echo parameter can be used to ensure the AGC will not adjust
+ * upward in the presence of echo.
+ *
+ * This function should be called after processing the near-end microphone
+ * signal, in any case after any echo cancellation.
+ *
+ * Input:
+ * - agcInst : AGC instance
+ * - inNear : Near-end input speech vector for each band
+ * - num_bands : Number of bands in input/output vector
+ * - samples : Number of samples in input/output vector
+ * - inMicLevel : Current microphone volume level
+ * - echo : Set to 0 if the signal passed to add_mic is
+ * almost certainly free of echo; otherwise set
+ * to 1. If you have no information regarding echo
+ * set to 0.
+ *
+ * Output:
+ * - outMicLevel : Adjusted microphone volume level
+ * - saturationWarning : A returned value of 1 indicates a saturation event
+ * has occurred and the volume cannot be further
+ * reduced. Otherwise will be set to 0.
+ * - gains : Vector of gains to apply for digital normalization
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_Analyze(void* agcInst,
+ const int16_t* const* inNear,
+ size_t num_bands,
+ size_t samples,
+ int32_t inMicLevel,
+ int32_t* outMicLevel,
+ int16_t echo,
+ uint8_t* saturationWarning,
+ int32_t gains[11]);
+
+/*
+ * This function processes a 10 ms frame by applying precomputed digital gains.
+ *
+ * Input:
+ * - agcInst : AGC instance
+ * - gains : Vector of gains to apply for digital normalization
+ * - in_near : Near-end input speech vector for each band
+ * - num_bands : Number of bands in input/output vector
+ *
+ * Output:
+ * - out : Gain-adjusted near-end speech vector
+ * : May be the same vector as the input.
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_Process(const void* agcInst,
+ const int32_t gains[11],
+ const int16_t* const* in_near,
+ size_t num_bands,
+ int16_t* const* out);
+
+/*
+ * This function sets the config parameters (targetLevelDbfs,
+ * compressionGaindB and limiterEnable).
+ *
+ * Input:
+ * - agcInst : AGC instance
+ * - config : config struct
+ *
+ * Output:
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig config);
+
+/*
+ * This function returns the config parameters (targetLevelDbfs,
+ * compressionGaindB and limiterEnable).
+ *
+ * Input:
+ * - agcInst : AGC instance
+ *
+ * Output:
+ * - config : config struct
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error
+ */
+int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config);
+
+/*
+ * This function creates and returns an AGC instance, which will contain the
+ * state information for one (duplex) channel.
+ */
+void* WebRtcAgc_Create(void);
+
+/*
+ * This function frees the AGC instance created at the beginning.
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ */
+void WebRtcAgc_Free(void* agcInst);
+
+/*
+ * This function initializes an AGC instance.
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ * - minLevel : Minimum possible mic level
+ * - maxLevel : Maximum possible mic level
+ * - agcMode : 0 - Unchanged
+ * : 1 - Adaptive Analog Automatic Gain Control -3dBOv
+ * : 2 - Adaptive Digital Automatic Gain Control -3dBOv
+ * : 3 - Fixed Digital Gain 0dB
+ * - fs : Sampling frequency
+ *
+ * Return value : 0 - Ok
+ * -1 - Error
+ */
+int WebRtcAgc_Init(void* agcInst,
+ int32_t minLevel,
+ int32_t maxLevel,
+ int16_t agcMode,
+ uint32_t fs);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/legacy_agc_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/legacy_agc_gn/moz.build
new file mode 100644
index 0000000000..a9b1ddd0ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/legacy_agc_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/legacy/analog_agc.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc/legacy/digital_agc.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("legacy_agc_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/level_estimation_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc/level_estimation_gn/moz.build
new file mode 100644
index 0000000000..c4bdd7c93d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/level_estimation_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc/agc.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc/utility.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("level_estimation_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.cc b/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.cc
new file mode 100644
index 0000000000..b0a1f53b97
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.cc
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/loudness_histogram.h"
+
+#include <string.h>
+
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+static const double kHistBinCenters[] = {
+ 7.59621091765857e-02, 9.02036021061016e-02, 1.07115112009343e-01,
+ 1.27197217770508e-01, 1.51044347572047e-01, 1.79362373905283e-01,
+ 2.12989507320644e-01, 2.52921107370304e-01, 3.00339145144454e-01,
+ 3.56647189489147e-01, 4.23511952494003e-01, 5.02912623991786e-01,
+ 5.97199455365749e-01, 7.09163326739184e-01, 8.42118356728544e-01,
+ 1.00000000000000e+00, 1.18748153630660e+00, 1.41011239906908e+00,
+ 1.67448243801153e+00, 1.98841697800836e+00, 2.36120844786349e+00,
+ 2.80389143520905e+00, 3.32956930911896e+00, 3.95380207843188e+00,
+ 4.69506696634852e+00, 5.57530533426190e+00, 6.62057214370769e+00,
+ 7.86180718043869e+00, 9.33575086877358e+00, 1.10860317842269e+01,
+ 1.31644580546776e+01, 1.56325508754123e+01, 1.85633655299256e+01,
+ 2.20436538184971e+01, 2.61764319021997e+01, 3.10840295702492e+01,
+ 3.69117111886792e+01, 4.38319755100383e+01, 5.20496616180135e+01,
+ 6.18080121423973e+01, 7.33958732149108e+01, 8.71562442838066e+01,
+ 1.03496430860848e+02, 1.22900100720889e+02, 1.45941600416277e+02,
+ 1.73302955873365e+02, 2.05794060286978e+02, 2.44376646872353e+02,
+ 2.90192756065437e+02, 3.44598539797631e+02, 4.09204403447902e+02,
+ 4.85922673669740e+02, 5.77024203055553e+02, 6.85205587130498e+02,
+ 8.13668983291589e+02, 9.66216894324125e+02, 1.14736472207740e+03,
+ 1.36247442287647e+03, 1.61791322085579e+03, 1.92124207711260e+03,
+ 2.28143949334655e+03, 2.70916727454970e+03, 3.21708611729384e+03,
+ 3.82023036499473e+03, 4.53645302286906e+03, 5.38695420497926e+03,
+ 6.39690865534207e+03, 7.59621091765857e+03, 9.02036021061016e+03,
+ 1.07115112009343e+04, 1.27197217770508e+04, 1.51044347572047e+04,
+ 1.79362373905283e+04, 2.12989507320644e+04, 2.52921107370304e+04,
+ 3.00339145144454e+04, 3.56647189489147e+04};
+
+static const double kProbQDomain = 1024.0;
+// Loudness of -15 dB (smallest expected loudness) in log domain,
+// loudness_db = 13.5 * log10(rms);
+static const double kLogDomainMinBinCenter = -2.57752062648587;
+// Loudness step of 1 dB in log domain
+static const double kLogDomainStepSizeInverse = 5.81954605750359;
+
+static const int kTransientWidthThreshold = 7;
+static const double kLowProbabilityThreshold = 0.2;
+
+static const int kLowProbThresholdQ10 =
+ static_cast<int>(kLowProbabilityThreshold * kProbQDomain);
+
+LoudnessHistogram::LoudnessHistogram()
+ : num_updates_(0),
+ audio_content_q10_(0),
+ bin_count_q10_(),
+ activity_probability_(),
+ hist_bin_index_(),
+ buffer_index_(0),
+ buffer_is_full_(false),
+ len_circular_buffer_(0),
+ len_high_activity_(0) {
+ static_assert(
+ kHistSize == sizeof(kHistBinCenters) / sizeof(kHistBinCenters[0]),
+ "histogram bin centers incorrect size");
+}
+
+LoudnessHistogram::LoudnessHistogram(int window_size)
+ : num_updates_(0),
+ audio_content_q10_(0),
+ bin_count_q10_(),
+ activity_probability_(new int[window_size]),
+ hist_bin_index_(new int[window_size]),
+ buffer_index_(0),
+ buffer_is_full_(false),
+ len_circular_buffer_(window_size),
+ len_high_activity_(0) {}
+
+LoudnessHistogram::~LoudnessHistogram() {}
+
+void LoudnessHistogram::Update(double rms, double activity_probaility) {
+ // If circular histogram is activated then remove the oldest entry.
+ if (len_circular_buffer_ > 0)
+ RemoveOldestEntryAndUpdate();
+
+ // Find the corresponding bin.
+ int hist_index = GetBinIndex(rms);
+ // To Q10 domain.
+ int prob_q10 =
+ static_cast<int16_t>(floor(activity_probaility * kProbQDomain));
+ InsertNewestEntryAndUpdate(prob_q10, hist_index);
+}
+
+// Doing nothing if buffer is not full, yet.
+void LoudnessHistogram::RemoveOldestEntryAndUpdate() {
+ RTC_DCHECK_GT(len_circular_buffer_, 0);
+ // Do nothing if circular buffer is not full.
+ if (!buffer_is_full_)
+ return;
+
+ int oldest_prob = activity_probability_[buffer_index_];
+ int oldest_hist_index = hist_bin_index_[buffer_index_];
+ UpdateHist(-oldest_prob, oldest_hist_index);
+}
+
+void LoudnessHistogram::RemoveTransient() {
+ // Don't expect to be here if high-activity region is longer than
+ // `kTransientWidthThreshold` or there has not been any transient.
+ RTC_DCHECK_LE(len_high_activity_, kTransientWidthThreshold);
+ int index =
+ (buffer_index_ > 0) ? (buffer_index_ - 1) : len_circular_buffer_ - 1;
+ while (len_high_activity_ > 0) {
+ UpdateHist(-activity_probability_[index], hist_bin_index_[index]);
+ activity_probability_[index] = 0;
+ index = (index > 0) ? (index - 1) : (len_circular_buffer_ - 1);
+ len_high_activity_--;
+ }
+}
+
+void LoudnessHistogram::InsertNewestEntryAndUpdate(int activity_prob_q10,
+ int hist_index) {
+ // Update the circular buffer if it is enabled.
+ if (len_circular_buffer_ > 0) {
+ // Removing transient.
+ if (activity_prob_q10 <= kLowProbThresholdQ10) {
+ // Lower than threshold probability, set it to zero.
+ activity_prob_q10 = 0;
+ // Check if this has been a transient.
+ if (len_high_activity_ <= kTransientWidthThreshold)
+ RemoveTransient(); // Remove this transient.
+ len_high_activity_ = 0;
+ } else if (len_high_activity_ <= kTransientWidthThreshold) {
+ len_high_activity_++;
+ }
+ // Updating the circular buffer.
+ activity_probability_[buffer_index_] = activity_prob_q10;
+ hist_bin_index_[buffer_index_] = hist_index;
+ // Increment the buffer index and check for wrap-around.
+ buffer_index_++;
+ if (buffer_index_ >= len_circular_buffer_) {
+ buffer_index_ = 0;
+ buffer_is_full_ = true;
+ }
+ }
+
+ num_updates_++;
+ if (num_updates_ < 0)
+ num_updates_--;
+
+ UpdateHist(activity_prob_q10, hist_index);
+}
+
+void LoudnessHistogram::UpdateHist(int activity_prob_q10, int hist_index) {
+ bin_count_q10_[hist_index] += activity_prob_q10;
+ audio_content_q10_ += activity_prob_q10;
+}
+
+double LoudnessHistogram::AudioContent() const {
+ return audio_content_q10_ / kProbQDomain;
+}
+
+LoudnessHistogram* LoudnessHistogram::Create() {
+ return new LoudnessHistogram;
+}
+
+LoudnessHistogram* LoudnessHistogram::Create(int window_size) {
+ if (window_size < 0)
+ return NULL;
+ return new LoudnessHistogram(window_size);
+}
+
+void LoudnessHistogram::Reset() {
+ // Reset the histogram, audio-content and number of updates.
+ memset(bin_count_q10_, 0, sizeof(bin_count_q10_));
+ audio_content_q10_ = 0;
+ num_updates_ = 0;
+ // Empty the circular buffer.
+ buffer_index_ = 0;
+ buffer_is_full_ = false;
+ len_high_activity_ = 0;
+}
+
+int LoudnessHistogram::GetBinIndex(double rms) {
+ // First exclude overload cases.
+ if (rms <= kHistBinCenters[0]) {
+ return 0;
+ } else if (rms >= kHistBinCenters[kHistSize - 1]) {
+ return kHistSize - 1;
+ } else {
+ // The quantizer is uniform in log domain. Alternatively we could do binary
+ // search in linear domain.
+ double rms_log = log(rms);
+
+ int index = static_cast<int>(
+ floor((rms_log - kLogDomainMinBinCenter) * kLogDomainStepSizeInverse));
+ // The final decision is in linear domain.
+ double b = 0.5 * (kHistBinCenters[index] + kHistBinCenters[index + 1]);
+ if (rms > b) {
+ return index + 1;
+ }
+ return index;
+ }
+}
+
+double LoudnessHistogram::CurrentRms() const {
+ double p;
+ double mean_val = 0;
+ if (audio_content_q10_ > 0) {
+ double p_total_inverse = 1. / static_cast<double>(audio_content_q10_);
+ for (int n = 0; n < kHistSize; n++) {
+ p = static_cast<double>(bin_count_q10_[n]) * p_total_inverse;
+ mean_val += p * kHistBinCenters[n];
+ }
+ } else {
+ mean_val = kHistBinCenters[0];
+ }
+ return mean_val;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.h b/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.h
new file mode 100644
index 0000000000..51b38714c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LOUDNESS_HISTOGRAM_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LOUDNESS_HISTOGRAM_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+namespace webrtc {
+
+// This class implements the histogram of loudness with circular buffers so that
+// the histogram tracks the last T seconds of the loudness.
+class LoudnessHistogram {
+ public:
+ // Create a non-sliding LoudnessHistogram.
+ static LoudnessHistogram* Create();
+
+ // Create a sliding LoudnessHistogram, i.e. the histogram represents the last
+ // `window_size` samples.
+ static LoudnessHistogram* Create(int window_size);
+ ~LoudnessHistogram();
+
+ // Insert RMS and the corresponding activity probability.
+ void Update(double rms, double activity_probability);
+
+ // Reset the histogram, forget the past.
+ void Reset();
+
+ // Current loudness, which is actually the mean of histogram in loudness
+ // domain.
+ double CurrentRms() const;
+
+ // Sum of the histogram content.
+ double AudioContent() const;
+
+ // Number of times the histogram has been updated.
+ int num_updates() const { return num_updates_; }
+
+ private:
+ LoudnessHistogram();
+ explicit LoudnessHistogram(int window);
+
+ // Find the histogram bin associated with the given `rms`.
+ int GetBinIndex(double rms);
+
+ void RemoveOldestEntryAndUpdate();
+ void InsertNewestEntryAndUpdate(int activity_prob_q10, int hist_index);
+ void UpdateHist(int activity_prob_q10, int hist_index);
+ void RemoveTransient();
+
+ // Number of histogram bins.
+ static const int kHistSize = 77;
+
+ // Number of times the histogram is updated
+ int num_updates_;
+ // Audio content, this should be equal to the sum of the components of
+ // `bin_count_q10_`.
+ int64_t audio_content_q10_;
+
+ // LoudnessHistogram of input RMS in Q10 with `kHistSize_` bins. In each
+ // 'Update(),' we increment the associated histogram-bin with the given
+ // probability. The increment is implemented in Q10 to avoid rounding errors.
+ int64_t bin_count_q10_[kHistSize];
+
+ // Circular buffer for probabilities
+ std::unique_ptr<int[]> activity_probability_;
+ // Circular buffer for histogram-indices of probabilities.
+ std::unique_ptr<int[]> hist_bin_index_;
+ // Current index of circular buffer, where the newest data will be written to,
+ // therefore, pointing to the oldest data if buffer is full.
+ int buffer_index_;
+ // Indicating if buffer is full and we had a wrap around.
+ int buffer_is_full_;
+ // Size of circular buffer.
+ int len_circular_buffer_;
+ int len_high_activity_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_LOUDNESS_HISTOGRAM_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram_unittest.cc
new file mode 100644
index 0000000000..bbc0a7ee92
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/loudness_histogram_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Use CreateHistUnittestFile.m to generate the input file.
+
+#include "modules/audio_processing/agc/loudness_histogram.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/agc/utility.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+struct InputOutput {
+ double rms;
+ double activity_probability;
+ double audio_content;
+ double loudness;
+};
+
+const double kRelativeErrTol = 1e-10;
+
+class LoudnessHistogramTest : public ::testing::Test {
+ protected:
+ void RunTest(bool enable_circular_buff, absl::string_view filename);
+
+ private:
+ void TestClean();
+ std::unique_ptr<LoudnessHistogram> hist_;
+};
+
+void LoudnessHistogramTest::TestClean() {
+ EXPECT_EQ(hist_->CurrentRms(), 7.59621091765857e-02);
+ EXPECT_EQ(hist_->AudioContent(), 0);
+ EXPECT_EQ(hist_->num_updates(), 0);
+}
+
+void LoudnessHistogramTest::RunTest(bool enable_circular_buff,
+ absl::string_view filename) {
+ FILE* in_file = fopen(std::string(filename).c_str(), "rb");
+ ASSERT_TRUE(in_file != NULL);
+ if (enable_circular_buff) {
+ int buffer_size;
+ EXPECT_EQ(fread(&buffer_size, sizeof(buffer_size), 1, in_file), 1u);
+ hist_.reset(LoudnessHistogram::Create(buffer_size));
+ } else {
+ hist_.reset(LoudnessHistogram::Create());
+ }
+ TestClean();
+
+ InputOutput io;
+ int num_updates = 0;
+ while (fread(&io, sizeof(InputOutput), 1, in_file) == 1) {
+ if (io.rms < 0) {
+ // We have to reset.
+ hist_->Reset();
+ TestClean();
+ num_updates = 0;
+ // Read the next chunk of input.
+ if (fread(&io, sizeof(InputOutput), 1, in_file) != 1)
+ break;
+ }
+ hist_->Update(io.rms, io.activity_probability);
+ num_updates++;
+ EXPECT_EQ(hist_->num_updates(), num_updates);
+ double audio_content = hist_->AudioContent();
+
+ double abs_err =
+ std::min(audio_content, io.audio_content) * kRelativeErrTol;
+
+ ASSERT_NEAR(audio_content, io.audio_content, abs_err);
+ double current_loudness = Linear2Loudness(hist_->CurrentRms());
+ abs_err =
+ std::min(fabs(current_loudness), fabs(io.loudness)) * kRelativeErrTol;
+ ASSERT_NEAR(current_loudness, io.loudness, abs_err);
+ }
+ fclose(in_file);
+}
+
+TEST_F(LoudnessHistogramTest, ActiveCircularBuffer) {
+ RunTest(true, test::ResourcePath(
+ "audio_processing/agc/agc_with_circular_buffer", "dat")
+ .c_str());
+}
+
+TEST_F(LoudnessHistogramTest, InactiveCircularBuffer) {
+ RunTest(false, test::ResourcePath(
+ "audio_processing/agc/agc_no_circular_buffer", "dat")
+ .c_str());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/mock_agc.h b/third_party/libwebrtc/modules/audio_processing/agc/mock_agc.h
new file mode 100644
index 0000000000..3080e1563c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/mock_agc.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc/agc.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockAgc : public Agc {
+ public:
+ virtual ~MockAgc() {}
+ MOCK_METHOD(void, Process, (rtc::ArrayView<const int16_t> audio), (override));
+ MOCK_METHOD(bool, GetRmsErrorDb, (int* error), (override));
+ MOCK_METHOD(void, Reset, (), (override));
+ MOCK_METHOD(int, set_target_level_dbfs, (int level), (override));
+ MOCK_METHOD(int, target_level_dbfs, (), (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/utility.cc b/third_party/libwebrtc/modules/audio_processing/agc/utility.cc
new file mode 100644
index 0000000000..2a87e5ce74
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/utility.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/utility.h"
+
+#include <math.h>
+
+namespace webrtc {
+
+static const double kLog10 = 2.30258509299;
+static const double kLinear2DbScale = 20.0 / kLog10;
+static const double kLinear2LoudnessScale = 13.4 / kLog10;
+
+double Loudness2Db(double loudness) {
+ return loudness * kLinear2DbScale / kLinear2LoudnessScale;
+}
+
+double Linear2Loudness(double rms) {
+ if (rms == 0)
+ return -15;
+ return kLinear2LoudnessScale * log(rms);
+}
+
+double Db2Loudness(double db) {
+ return db * kLinear2LoudnessScale / kLinear2DbScale;
+}
+
+double Dbfs2Loudness(double dbfs) {
+ return Db2Loudness(90 + dbfs);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc/utility.h b/third_party/libwebrtc/modules/audio_processing/agc/utility.h
new file mode 100644
index 0000000000..56eec244a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc/utility.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_UTILITY_H_
+#define MODULES_AUDIO_PROCESSING_AGC_UTILITY_H_
+
+namespace webrtc {
+
+// TODO(turajs): Add description of function.
+double Loudness2Db(double loudness);
+
+double Linear2Loudness(double rms);
+
+double Db2Loudness(double db);
+
+double Dbfs2Loudness(double dbfs);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_UTILITY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/agc2/BUILD.gn
new file mode 100644
index 0000000000..ed992488ad
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/BUILD.gn
@@ -0,0 +1,309 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+group("agc2") {
+ deps = [
+ ":adaptive_digital",
+ ":fixed_digital",
+ ]
+}
+
+rtc_library("adaptive_digital") {
+ sources = [
+ "adaptive_digital_gain_applier.cc",
+ "adaptive_digital_gain_applier.h",
+ "adaptive_digital_gain_controller.cc",
+ "adaptive_digital_gain_controller.h",
+ "adaptive_mode_level_estimator.cc",
+ "adaptive_mode_level_estimator.h",
+ "saturation_protector.cc",
+ "saturation_protector.h",
+ "saturation_protector_buffer.cc",
+ "saturation_protector_buffer.h",
+ ]
+
+ visibility = [
+ "..:gain_controller2",
+ "./*",
+ ]
+
+ configs += [ "..:apm_debug_dump" ]
+
+ deps = [
+ ":common",
+ ":cpu_features",
+ ":gain_applier",
+ ":noise_level_estimator",
+ ":vad_wrapper",
+ "..:api",
+ "..:apm_logging",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../common_audio",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:safe_compare",
+ "../../../rtc_base:safe_minmax",
+ "../../../system_wrappers:metrics",
+ ]
+
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("biquad_filter") {
+ visibility = [ "./*" ]
+ sources = [
+ "biquad_filter.cc",
+ "biquad_filter.h",
+ ]
+ deps = [
+ "../../../api:array_view",
+ "../../../rtc_base:macromagic",
+ ]
+}
+
+rtc_source_set("common") {
+ sources = [ "agc2_common.h" ]
+}
+
+rtc_library("fixed_digital") {
+ sources = [
+ "fixed_digital_level_estimator.cc",
+ "fixed_digital_level_estimator.h",
+ "interpolated_gain_curve.cc",
+ "interpolated_gain_curve.h",
+ "limiter.cc",
+ "limiter.h",
+ ]
+
+ visibility = [
+ "..:gain_controller2",
+ "../../audio_mixer:audio_mixer_impl",
+ "./*",
+ ]
+
+ configs += [ "..:apm_debug_dump" ]
+
+ deps = [
+ ":common",
+ "..:apm_logging",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../common_audio",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gtest_prod",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base:stringutils",
+ "../../../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+rtc_library("gain_applier") {
+ sources = [
+ "gain_applier.cc",
+ "gain_applier.h",
+ ]
+
+ visibility = [
+ "..:gain_controller2",
+ "./*",
+ ]
+
+ deps = [
+ ":common",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../rtc_base:safe_minmax",
+ ]
+}
+
+rtc_library("noise_level_estimator") {
+ sources = [
+ "noise_level_estimator.cc",
+ "noise_level_estimator.h",
+ ]
+ visibility = [ "./*" ]
+ deps = [
+ ":biquad_filter",
+ "..:apm_logging",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../../../system_wrappers",
+ ]
+
+ configs += [ "..:apm_debug_dump" ]
+}
+
+rtc_library("vad_wrapper") {
+ sources = [
+ "vad_wrapper.cc",
+ "vad_wrapper.h",
+ ]
+
+ visibility = [
+ "..:gain_controller2",
+ "./*",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && current_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":common",
+ ":cpu_features",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../common_audio",
+ "../../../rtc_base:checks",
+ "rnn_vad",
+ "rnn_vad:rnn_vad_common",
+ ]
+}
+
+rtc_library("cpu_features") {
+ sources = [
+ "cpu_features.cc",
+ "cpu_features.h",
+ ]
+
+ visibility = [
+ "..:gain_controller2",
+ "./*",
+ ]
+
+ deps = [
+ "../../../rtc_base:stringutils",
+ "../../../rtc_base/system:arch",
+ "../../../system_wrappers",
+ ]
+}
+
+if (rtc_include_tests) {
+rtc_library("adaptive_digital_unittests") {
+ testonly = true
+ configs += [ "..:apm_debug_dump" ]
+
+ sources = [
+ "adaptive_digital_gain_applier_unittest.cc",
+ "adaptive_mode_level_estimator_unittest.cc",
+ "gain_applier_unittest.cc",
+ "saturation_protector_buffer_unittest.cc",
+ "saturation_protector_unittest.cc",
+ ]
+ deps = [
+ ":adaptive_digital",
+ ":common",
+ ":gain_applier",
+ ":test_utils",
+ "..:api",
+ "..:apm_logging",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../common_audio",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gunit_helpers",
+ "../../../test:test_support",
+ ]
+}
+
+rtc_library("biquad_filter_unittests") {
+ testonly = true
+ sources = [ "biquad_filter_unittest.cc" ]
+ deps = [
+ ":biquad_filter",
+ "../../../rtc_base:gunit_helpers",
+ ]
+}
+
+rtc_library("fixed_digital_unittests") {
+ testonly = true
+ configs += [ "..:apm_debug_dump" ]
+
+ sources = [
+ "agc2_testing_common_unittest.cc",
+ "compute_interpolated_gain_curve.cc",
+ "compute_interpolated_gain_curve.h",
+ "fixed_digital_level_estimator_unittest.cc",
+ "interpolated_gain_curve_unittest.cc",
+ "limiter_db_gain_curve.cc",
+ "limiter_db_gain_curve.h",
+ "limiter_db_gain_curve_unittest.cc",
+ "limiter_unittest.cc",
+ ]
+ deps = [
+ ":common",
+ ":fixed_digital",
+ ":test_utils",
+ "..:apm_logging",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../common_audio",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gunit_helpers",
+ "../../../system_wrappers:metrics",
+ ]
+}
+
+rtc_library("noise_estimator_unittests") {
+ testonly = true
+ configs += [ "..:apm_debug_dump" ]
+
+ sources = [ "noise_level_estimator_unittest.cc" ]
+ deps = [
+ ":noise_level_estimator",
+ ":test_utils",
+ "..:apm_logging",
+ "..:audio_frame_view",
+ "../../../api:array_view",
+ "../../../api:function_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gunit_helpers",
+ ]
+}
+
+rtc_library("vad_wrapper_unittests") {
+ testonly = true
+ sources = [ "vad_wrapper_unittest.cc" ]
+ deps = [
+ ":common",
+ ":vad_wrapper",
+ "..:audio_frame_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gunit_helpers",
+ "../../../rtc_base:safe_compare",
+ "../../../test:test_support",
+ ]
+}
+
+rtc_library("test_utils") {
+ testonly = true
+ visibility = [
+ ":*",
+ "..:audio_processing_unittests",
+ ]
+ sources = [
+ "agc2_testing_common.cc",
+ "agc2_testing_common.h",
+ "vector_float_frame.cc",
+ "vector_float_frame.h",
+ ]
+ deps = [
+ "..:audio_frame_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:random",
+ ]
+}
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc
new file mode 100644
index 0000000000..a34f598874
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/adaptive_digital_gain_applier.h"
+
+#include <algorithm>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+using AdaptiveDigitalConfig =
+ AudioProcessing::Config::GainController2::AdaptiveDigital;
+
+constexpr int kHeadroomHistogramMin = 0;
+constexpr int kHeadroomHistogramMax = 50;
+constexpr int kGainDbHistogramMax = 30;
+
+// Computes the gain for `input_level_dbfs` to reach `-config.headroom_db`.
+// Clamps the gain in [0, `config.max_gain_db`]. `config.headroom_db` is a
+// safety margin to allow transient peaks to exceed the target peak level
+// without clipping.
+float ComputeGainDb(float input_level_dbfs,
+ const AdaptiveDigitalConfig& config) {
+ // If the level is very low, apply the maximum gain.
+ if (input_level_dbfs < -(config.headroom_db + config.max_gain_db)) {
+ return config.max_gain_db;
+ }
+ // We expect to end up here most of the time: the level is below
+ // -headroom, but we can boost it to -headroom.
+ if (input_level_dbfs < -config.headroom_db) {
+ return -config.headroom_db - input_level_dbfs;
+ }
+ // The level is too high and we can't boost.
+ RTC_DCHECK_GE(input_level_dbfs, -config.headroom_db);
+ return 0.0f;
+}
+
+// Returns `target_gain_db` if applying such a gain to `input_noise_level_dbfs`
+// does not exceed `max_output_noise_level_dbfs`. Otherwise lowers and returns
+// `target_gain_db` so that the output noise level equals
+// `max_output_noise_level_dbfs`.
+float LimitGainByNoise(float target_gain_db,
+ float input_noise_level_dbfs,
+ float max_output_noise_level_dbfs,
+ ApmDataDumper& apm_data_dumper) {
+ const float max_allowed_gain_db =
+ max_output_noise_level_dbfs - input_noise_level_dbfs;
+ apm_data_dumper.DumpRaw("agc2_adaptive_gain_applier_max_allowed_gain_db",
+ max_allowed_gain_db);
+ return std::min(target_gain_db, std::max(max_allowed_gain_db, 0.0f));
+}
+
+float LimitGainByLowConfidence(float target_gain_db,
+ float last_gain_db,
+ float limiter_audio_level_dbfs,
+ bool estimate_is_confident) {
+ if (estimate_is_confident ||
+ limiter_audio_level_dbfs <= kLimiterThresholdForAgcGainDbfs) {
+ return target_gain_db;
+ }
+ const float limiter_level_dbfs_before_gain =
+ limiter_audio_level_dbfs - last_gain_db;
+
+ // Compute a new gain so that `limiter_level_dbfs_before_gain` +
+ // `new_target_gain_db` is not great than `kLimiterThresholdForAgcGainDbfs`.
+ const float new_target_gain_db = std::max(
+ kLimiterThresholdForAgcGainDbfs - limiter_level_dbfs_before_gain, 0.0f);
+ return std::min(new_target_gain_db, target_gain_db);
+}
+
+// Computes how the gain should change during this frame.
+// Return the gain difference in db to 'last_gain_db'.
+float ComputeGainChangeThisFrameDb(float target_gain_db,
+ float last_gain_db,
+ bool gain_increase_allowed,
+ float max_gain_decrease_db,
+ float max_gain_increase_db) {
+ RTC_DCHECK_GT(max_gain_decrease_db, 0);
+ RTC_DCHECK_GT(max_gain_increase_db, 0);
+ float target_gain_difference_db = target_gain_db - last_gain_db;
+ if (!gain_increase_allowed) {
+ target_gain_difference_db = std::min(target_gain_difference_db, 0.0f);
+ }
+ return rtc::SafeClamp(target_gain_difference_db, -max_gain_decrease_db,
+ max_gain_increase_db);
+}
+
+// Copies the (multichannel) audio samples from `src` into `dst`.
+void CopyAudio(AudioFrameView<const float> src,
+ std::vector<std::vector<float>>& dst) {
+ RTC_DCHECK_GT(src.num_channels(), 0);
+ RTC_DCHECK_GT(src.samples_per_channel(), 0);
+ RTC_DCHECK_EQ(dst.size(), src.num_channels());
+ for (int c = 0; c < src.num_channels(); ++c) {
+ rtc::ArrayView<const float> channel_view = src.channel(c);
+ RTC_DCHECK_EQ(channel_view.size(), src.samples_per_channel());
+ RTC_DCHECK_EQ(dst[c].size(), src.samples_per_channel());
+ std::copy(channel_view.begin(), channel_view.end(), dst[c].begin());
+ }
+}
+
+} // namespace
+
+AdaptiveDigitalGainApplier::AdaptiveDigitalGainApplier(
+ ApmDataDumper* apm_data_dumper,
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config,
+ int sample_rate_hz,
+ int num_channels)
+ : apm_data_dumper_(apm_data_dumper),
+ gain_applier_(
+ /*hard_clip_samples=*/false,
+ /*initial_gain_factor=*/DbToRatio(config.initial_gain_db)),
+ config_(config),
+ max_gain_change_db_per_10ms_(config_.max_gain_change_db_per_second *
+ kFrameDurationMs / 1000.0f),
+ calls_since_last_gain_log_(0),
+ frames_to_gain_increase_allowed_(
+ config_.adjacent_speech_frames_threshold),
+ last_gain_db_(config_.initial_gain_db) {
+ RTC_DCHECK_GT(max_gain_change_db_per_10ms_, 0.0f);
+ RTC_DCHECK_GE(frames_to_gain_increase_allowed_, 1);
+ RTC_DCHECK_GE(config_.max_output_noise_level_dbfs, -90.0f);
+ RTC_DCHECK_LE(config_.max_output_noise_level_dbfs, 0.0f);
+ Initialize(sample_rate_hz, num_channels);
+}
+
+void AdaptiveDigitalGainApplier::Initialize(int sample_rate_hz,
+ int num_channels) {
+ if (!config_.dry_run) {
+ return;
+ }
+ RTC_DCHECK_GT(sample_rate_hz, 0);
+ RTC_DCHECK_GT(num_channels, 0);
+ int frame_size = rtc::CheckedDivExact(sample_rate_hz, 100);
+ bool sample_rate_changed =
+ dry_run_frame_.empty() || // Handle initialization.
+ dry_run_frame_[0].size() != static_cast<size_t>(frame_size);
+ bool num_channels_changed =
+ dry_run_channels_.size() != static_cast<size_t>(num_channels);
+ if (sample_rate_changed || num_channels_changed) {
+ // Resize the multichannel audio vector and update the channel pointers.
+ dry_run_frame_.resize(num_channels);
+ dry_run_channels_.resize(num_channels);
+ for (int c = 0; c < num_channels; ++c) {
+ dry_run_frame_[c].resize(frame_size);
+ dry_run_channels_[c] = dry_run_frame_[c].data();
+ }
+ }
+}
+
+void AdaptiveDigitalGainApplier::Process(const FrameInfo& info,
+ AudioFrameView<float> frame) {
+ RTC_DCHECK_GE(info.speech_level_dbfs, -150.0f);
+ RTC_DCHECK_GE(frame.num_channels(), 1);
+ RTC_DCHECK(
+ frame.samples_per_channel() == 80 || frame.samples_per_channel() == 160 ||
+ frame.samples_per_channel() == 320 || frame.samples_per_channel() == 480)
+ << "`frame` does not look like a 10 ms frame for an APM supported sample "
+ "rate";
+
+ // Compute the input level used to select the desired gain.
+ RTC_DCHECK_GT(info.headroom_db, 0.0f);
+ const float input_level_dbfs = info.speech_level_dbfs + info.headroom_db;
+
+ const float target_gain_db = LimitGainByLowConfidence(
+ LimitGainByNoise(ComputeGainDb(input_level_dbfs, config_),
+ info.noise_rms_dbfs, config_.max_output_noise_level_dbfs,
+ *apm_data_dumper_),
+ last_gain_db_, info.limiter_envelope_dbfs, info.speech_level_reliable);
+
+ // Forbid increasing the gain until enough adjacent speech frames are
+ // observed.
+ bool first_confident_speech_frame = false;
+ if (info.speech_probability < kVadConfidenceThreshold) {
+ frames_to_gain_increase_allowed_ = config_.adjacent_speech_frames_threshold;
+ } else if (frames_to_gain_increase_allowed_ > 0) {
+ frames_to_gain_increase_allowed_--;
+ first_confident_speech_frame = frames_to_gain_increase_allowed_ == 0;
+ }
+ apm_data_dumper_->DumpRaw(
+ "agc2_adaptive_gain_applier_frames_to_gain_increase_allowed",
+ frames_to_gain_increase_allowed_);
+
+ const bool gain_increase_allowed = frames_to_gain_increase_allowed_ == 0;
+
+ float max_gain_increase_db = max_gain_change_db_per_10ms_;
+ if (first_confident_speech_frame) {
+ // No gain increase happened while waiting for a long enough speech
+ // sequence. Therefore, temporarily allow a faster gain increase.
+ RTC_DCHECK(gain_increase_allowed);
+ max_gain_increase_db *= config_.adjacent_speech_frames_threshold;
+ }
+
+ const float gain_change_this_frame_db = ComputeGainChangeThisFrameDb(
+ target_gain_db, last_gain_db_, gain_increase_allowed,
+ /*max_gain_decrease_db=*/max_gain_change_db_per_10ms_,
+ max_gain_increase_db);
+
+ apm_data_dumper_->DumpRaw("agc2_adaptive_gain_applier_want_to_change_by_db",
+ target_gain_db - last_gain_db_);
+ apm_data_dumper_->DumpRaw("agc2_adaptive_gain_applier_will_change_by_db",
+ gain_change_this_frame_db);
+
+ // Optimization: avoid calling math functions if gain does not
+ // change.
+ if (gain_change_this_frame_db != 0.f) {
+ gain_applier_.SetGainFactor(
+ DbToRatio(last_gain_db_ + gain_change_this_frame_db));
+ }
+
+ // Modify `frame` only if not running in "dry run" mode.
+ if (!config_.dry_run) {
+ gain_applier_.ApplyGain(frame);
+ } else {
+ // Copy `frame` so that `ApplyGain()` is called (on a copy).
+ CopyAudio(frame, dry_run_frame_);
+ RTC_DCHECK(!dry_run_channels_.empty());
+ AudioFrameView<float> frame_copy(&dry_run_channels_[0],
+ frame.num_channels(),
+ frame.samples_per_channel());
+ gain_applier_.ApplyGain(frame_copy);
+ }
+
+ // Remember that the gain has changed for the next iteration.
+ last_gain_db_ = last_gain_db_ + gain_change_this_frame_db;
+ apm_data_dumper_->DumpRaw("agc2_adaptive_gain_applier_applied_gain_db",
+ last_gain_db_);
+
+ // Log every 10 seconds.
+ calls_since_last_gain_log_++;
+ if (calls_since_last_gain_log_ == 1000) {
+ calls_since_last_gain_log_ = 0;
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc2.EstimatedSpeechLevel",
+ -info.speech_level_dbfs, 0, 100, 101);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc2.EstimatedNoiseLevel",
+ -info.noise_rms_dbfs, 0, 100, 101);
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.Agc2.Headroom", info.headroom_db, kHeadroomHistogramMin,
+ kHeadroomHistogramMax,
+ kHeadroomHistogramMax - kHeadroomHistogramMin + 1);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc2.DigitalGainApplied",
+ last_gain_db_, 0, kGainDbHistogramMax,
+ kGainDbHistogramMax + 1);
+ RTC_LOG(LS_INFO) << "AGC2 adaptive digital"
+ << " | speech_dbfs: " << info.speech_level_dbfs
+ << " | noise_dbfs: " << info.noise_rms_dbfs
+ << " | headroom_db: " << info.headroom_db
+ << " | gain_db: " << last_gain_db_;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.h b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.h
new file mode 100644
index 0000000000..dc84c1e238
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_APPLIER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_APPLIER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/agc2/gain_applier.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// TODO(bugs.webrtc.org/7494): Split into `GainAdaptor` and `GainApplier`.
+// Selects the target digital gain, decides when and how quickly to adapt to the
+// target and applies the current gain to 10 ms frames.
+class AdaptiveDigitalGainApplier {
+ public:
+ // Information about a frame to process.
+ struct FrameInfo {
+ float speech_probability; // Probability of speech in the [0, 1] range.
+ float speech_level_dbfs; // Estimated speech level (dBFS).
+ bool speech_level_reliable; // True with reliable speech level estimation.
+ float noise_rms_dbfs; // Estimated noise RMS level (dBFS).
+ float headroom_db; // Headroom (dB).
+ float limiter_envelope_dbfs; // Envelope level from the limiter (dBFS).
+ };
+
+ AdaptiveDigitalGainApplier(
+ ApmDataDumper* apm_data_dumper,
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config,
+ int sample_rate_hz,
+ int num_channels);
+ AdaptiveDigitalGainApplier(const AdaptiveDigitalGainApplier&) = delete;
+ AdaptiveDigitalGainApplier& operator=(const AdaptiveDigitalGainApplier&) =
+ delete;
+
+ void Initialize(int sample_rate_hz, int num_channels);
+
+ // Analyzes `info`, updates the digital gain and applies it to a 10 ms
+ // `frame`. Supports any sample rate supported by APM.
+ void Process(const FrameInfo& info, AudioFrameView<float> frame);
+
+ private:
+ ApmDataDumper* const apm_data_dumper_;
+ GainApplier gain_applier_;
+
+ const AudioProcessing::Config::GainController2::AdaptiveDigital config_;
+ const float max_gain_change_db_per_10ms_;
+
+ int calls_since_last_gain_log_;
+ int frames_to_gain_increase_allowed_;
+ float last_gain_db_;
+
+ std::vector<std::vector<float>> dry_run_frame_;
+ std::vector<float*> dry_run_channels_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_APPLIER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc
new file mode 100644
index 0000000000..ea7485f512
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/adaptive_digital_gain_applier.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kMono = 1;
+constexpr int kStereo = 2;
+constexpr int kFrameLen10ms8kHz = 80;
+constexpr int kFrameLen10ms48kHz = 480;
+
+constexpr float kMaxSpeechProbability = 1.0f;
+
+// Constants used in place of estimated noise levels.
+constexpr float kNoNoiseDbfs = kMinLevelDbfs;
+constexpr float kWithNoiseDbfs = -20.0f;
+
+// Number of additional frames to process in the tests to ensure that the tested
+// adaptation processes have converged.
+constexpr int kNumExtraFrames = 10;
+
+constexpr float GetMaxGainChangePerFrameDb(
+ float max_gain_change_db_per_second) {
+ return max_gain_change_db_per_second * kFrameDurationMs / 1000.0f;
+}
+
+using AdaptiveDigitalConfig =
+ AudioProcessing::Config::GainController2::AdaptiveDigital;
+
+constexpr AdaptiveDigitalConfig kDefaultConfig{};
+
+// Helper to create initialized `AdaptiveDigitalGainApplier` objects.
+struct GainApplierHelper {
+ GainApplierHelper(const AdaptiveDigitalConfig& config,
+ int sample_rate_hz,
+ int num_channels)
+ : apm_data_dumper(0),
+ gain_applier(
+ std::make_unique<AdaptiveDigitalGainApplier>(&apm_data_dumper,
+ config,
+ sample_rate_hz,
+ num_channels)) {}
+ ApmDataDumper apm_data_dumper;
+ std::unique_ptr<AdaptiveDigitalGainApplier> gain_applier;
+};
+
+// Returns a `FrameInfo` sample to simulate noiseless speech detected with
+// maximum probability and with level, headroom and limiter envelope chosen
+// so that the resulting gain equals the default initial adaptive digital gain
+// i.e., no gain adaptation is expected.
+AdaptiveDigitalGainApplier::FrameInfo GetFrameInfoToNotAdapt(
+ const AdaptiveDigitalConfig& config) {
+ AdaptiveDigitalGainApplier::FrameInfo info;
+ info.speech_probability = kMaxSpeechProbability;
+ info.speech_level_dbfs = -config.initial_gain_db - config.headroom_db;
+ info.speech_level_reliable = true;
+ info.noise_rms_dbfs = kNoNoiseDbfs;
+ info.headroom_db = config.headroom_db;
+ info.limiter_envelope_dbfs = -2.0f;
+ return info;
+}
+
+TEST(GainController2AdaptiveGainApplier, GainApplierShouldNotCrash) {
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kStereo);
+ // Make one call with reasonable audio level values and settings.
+ VectorFloatFrame fake_audio(kStereo, kFrameLen10ms48kHz, 10000.0f);
+ helper.gain_applier->Process(GetFrameInfoToNotAdapt(kDefaultConfig),
+ fake_audio.float_frame_view());
+}
+
+// Checks that the maximum allowed gain is applied.
+TEST(GainController2AdaptiveGainApplier, MaxGainApplied) {
+ constexpr int kNumFramesToAdapt =
+ static_cast<int>(kDefaultConfig.max_gain_db /
+ GetMaxGainChangePerFrameDb(
+ kDefaultConfig.max_gain_change_db_per_second)) +
+ kNumExtraFrames;
+
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/8000, kMono);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = -60.0f;
+ float applied_gain;
+ for (int i = 0; i < kNumFramesToAdapt; ++i) {
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, 1.0f);
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+ applied_gain = fake_audio.float_frame_view().channel(0)[0];
+ }
+ const float applied_gain_db = 20.0f * std::log10f(applied_gain);
+ EXPECT_NEAR(applied_gain_db, kDefaultConfig.max_gain_db, 0.1f);
+}
+
+TEST(GainController2AdaptiveGainApplier, GainDoesNotChangeFast) {
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/8000, kMono);
+
+ constexpr float initial_level_dbfs = -25.0f;
+ constexpr float kMaxGainChangeDbPerFrame =
+ GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second);
+ constexpr int kNumFramesToAdapt =
+ static_cast<int>(initial_level_dbfs / kMaxGainChangeDbPerFrame) +
+ kNumExtraFrames;
+
+ const float max_change_per_frame_linear = DbToRatio(kMaxGainChangeDbPerFrame);
+
+ float last_gain_linear = 1.f;
+ for (int i = 0; i < kNumFramesToAdapt; ++i) {
+ SCOPED_TRACE(i);
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, 1.0f);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = initial_level_dbfs;
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+ float current_gain_linear = fake_audio.float_frame_view().channel(0)[0];
+ EXPECT_LE(std::abs(current_gain_linear - last_gain_linear),
+ max_change_per_frame_linear);
+ last_gain_linear = current_gain_linear;
+ }
+
+ // Check that the same is true when gain decreases as well.
+ for (int i = 0; i < kNumFramesToAdapt; ++i) {
+ SCOPED_TRACE(i);
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, 1.0f);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = 0.f;
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+ float current_gain_linear = fake_audio.float_frame_view().channel(0)[0];
+ EXPECT_LE(std::abs(current_gain_linear - last_gain_linear),
+ max_change_per_frame_linear);
+ last_gain_linear = current_gain_linear;
+ }
+}
+
+TEST(GainController2AdaptiveGainApplier, GainIsRampedInAFrame) {
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kMono);
+
+ constexpr float initial_level_dbfs = -25.0f;
+
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms48kHz, 1.0f);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = initial_level_dbfs;
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+ float maximal_difference = 0.0f;
+ float current_value = 1.0f * DbToRatio(kDefaultConfig.initial_gain_db);
+ for (const auto& x : fake_audio.float_frame_view().channel(0)) {
+ const float difference = std::abs(x - current_value);
+ maximal_difference = std::max(maximal_difference, difference);
+ current_value = x;
+ }
+
+ const float max_change_per_frame_linear = DbToRatio(
+ GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second));
+ const float max_change_per_sample =
+ max_change_per_frame_linear / kFrameLen10ms48kHz;
+
+ EXPECT_LE(maximal_difference, max_change_per_sample);
+}
+
+TEST(GainController2AdaptiveGainApplier, NoiseLimitsGain) {
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kMono);
+
+ constexpr float initial_level_dbfs = -25.0f;
+ constexpr int num_initial_frames =
+ kDefaultConfig.initial_gain_db /
+ GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second);
+ constexpr int num_frames = 50;
+
+ ASSERT_GT(kWithNoiseDbfs, kDefaultConfig.max_output_noise_level_dbfs)
+ << "kWithNoiseDbfs is too low";
+
+ for (int i = 0; i < num_initial_frames + num_frames; ++i) {
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms48kHz, 1.0f);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = initial_level_dbfs;
+ info.noise_rms_dbfs = kWithNoiseDbfs;
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+
+ // Wait so that the adaptive gain applier has time to lower the gain.
+ if (i > num_initial_frames) {
+ const float maximal_ratio =
+ *std::max_element(fake_audio.float_frame_view().channel(0).begin(),
+ fake_audio.float_frame_view().channel(0).end());
+
+ EXPECT_NEAR(maximal_ratio, 1.0f, 0.001f);
+ }
+ }
+}
+
+TEST(GainController2GainApplier, CanHandlePositiveSpeechLevels) {
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kStereo);
+
+ // Make one call with positive audio level values and settings.
+ VectorFloatFrame fake_audio(kStereo, kFrameLen10ms48kHz, 10000.0f);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = 5.0f;
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+}
+
+TEST(GainController2GainApplier, AudioLevelLimitsGain) {
+ GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kMono);
+
+ constexpr float initial_level_dbfs = -25.0f;
+ constexpr int num_initial_frames =
+ kDefaultConfig.initial_gain_db /
+ GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second);
+ constexpr int num_frames = 50;
+
+ ASSERT_GT(kWithNoiseDbfs, kDefaultConfig.max_output_noise_level_dbfs)
+ << "kWithNoiseDbfs is too low";
+
+ for (int i = 0; i < num_initial_frames + num_frames; ++i) {
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms48kHz, 1.0f);
+ AdaptiveDigitalGainApplier::FrameInfo info =
+ GetFrameInfoToNotAdapt(kDefaultConfig);
+ info.speech_level_dbfs = initial_level_dbfs;
+ info.limiter_envelope_dbfs = 1.0f;
+ info.speech_level_reliable = false;
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+
+ // Wait so that the adaptive gain applier has time to lower the gain.
+ if (i > num_initial_frames) {
+ const float maximal_ratio =
+ *std::max_element(fake_audio.float_frame_view().channel(0).begin(),
+ fake_audio.float_frame_view().channel(0).end());
+
+ EXPECT_NEAR(maximal_ratio, 1.0f, 0.001f);
+ }
+ }
+}
+
+class AdaptiveDigitalGainApplierTest : public ::testing::TestWithParam<int> {
+ protected:
+ int adjacent_speech_frames_threshold() const { return GetParam(); }
+};
+
+TEST_P(AdaptiveDigitalGainApplierTest,
+ DoNotIncreaseGainWithTooFewSpeechFrames) {
+ AdaptiveDigitalConfig config;
+ config.adjacent_speech_frames_threshold = adjacent_speech_frames_threshold();
+ GainApplierHelper helper(config, /*sample_rate_hz=*/48000, kMono);
+
+ // Lower the speech level so that the target gain will be increased.
+ AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config);
+ info.speech_level_dbfs -= 12.0f;
+
+ float prev_gain = 0.0f;
+ for (int i = 0; i < config.adjacent_speech_frames_threshold; ++i) {
+ SCOPED_TRACE(i);
+ VectorFloatFrame audio(kMono, kFrameLen10ms48kHz, 1.0f);
+ helper.gain_applier->Process(info, audio.float_frame_view());
+ const float gain = audio.float_frame_view().channel(0)[0];
+ if (i > 0) {
+ EXPECT_EQ(prev_gain, gain); // No gain increase applied.
+ }
+ prev_gain = gain;
+ }
+}
+
+TEST_P(AdaptiveDigitalGainApplierTest, IncreaseGainWithEnoughSpeechFrames) {
+ AdaptiveDigitalConfig config;
+ config.adjacent_speech_frames_threshold = adjacent_speech_frames_threshold();
+ GainApplierHelper helper(config, /*sample_rate_hz=*/48000, kMono);
+
+ // Lower the speech level so that the target gain will be increased.
+ AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config);
+ info.speech_level_dbfs -= 12.0f;
+
+ float prev_gain = 0.0f;
+ for (int i = 0; i < config.adjacent_speech_frames_threshold; ++i) {
+ SCOPED_TRACE(i);
+ VectorFloatFrame audio(kMono, kFrameLen10ms48kHz, 1.0f);
+ helper.gain_applier->Process(info, audio.float_frame_view());
+ prev_gain = audio.float_frame_view().channel(0)[0];
+ }
+
+ // Process one more speech frame.
+ VectorFloatFrame audio(kMono, kFrameLen10ms48kHz, 1.0f);
+ helper.gain_applier->Process(info, audio.float_frame_view());
+
+ // An increased gain has been applied.
+ EXPECT_GT(audio.float_frame_view().channel(0)[0], prev_gain);
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController2,
+ AdaptiveDigitalGainApplierTest,
+ ::testing::Values(1, 7, 31));
+
+// Checks that the input is never modified when running in dry run mode.
+TEST(GainController2GainApplier, DryRunDoesNotChangeInput) {
+ AdaptiveDigitalConfig config;
+ config.dry_run = true;
+ GainApplierHelper helper(config, /*sample_rate_hz=*/8000, kMono);
+
+ // Simulate an input signal with log speech level.
+ AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config);
+ info.speech_level_dbfs = -60.0f;
+ const int num_frames_to_adapt =
+ static_cast<int>(
+ config.max_gain_db /
+ GetMaxGainChangePerFrameDb(config.max_gain_change_db_per_second)) +
+ kNumExtraFrames;
+ constexpr float kPcmSamples = 123.456f;
+ // Run the gain applier and check that the PCM samples are not modified.
+ for (int i = 0; i < num_frames_to_adapt; ++i) {
+ SCOPED_TRACE(i);
+ VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, kPcmSamples);
+ helper.gain_applier->Process(info, fake_audio.float_frame_view());
+ EXPECT_FLOAT_EQ(fake_audio.float_frame_view().channel(0)[0], kPcmSamples);
+ }
+}
+
+// Checks that no sample is modified before and after the sample rate changes.
+TEST(GainController2GainApplier, DryRunHandlesSampleRateChange) {
+ AdaptiveDigitalConfig config;
+ config.dry_run = true;
+ GainApplierHelper helper(config, /*sample_rate_hz=*/8000, kMono);
+
+ AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config);
+ info.speech_level_dbfs = -60.0f;
+ constexpr float kPcmSamples = 123.456f;
+ VectorFloatFrame fake_audio_8k(kMono, kFrameLen10ms8kHz, kPcmSamples);
+ helper.gain_applier->Process(info, fake_audio_8k.float_frame_view());
+ EXPECT_FLOAT_EQ(fake_audio_8k.float_frame_view().channel(0)[0], kPcmSamples);
+ helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono);
+ VectorFloatFrame fake_audio_48k(kMono, kFrameLen10ms48kHz, kPcmSamples);
+ helper.gain_applier->Process(info, fake_audio_48k.float_frame_view());
+ EXPECT_FLOAT_EQ(fake_audio_48k.float_frame_view().channel(0)[0], kPcmSamples);
+}
+
+// Checks that no sample is modified before and after the number of channels
+// changes.
+TEST(GainController2GainApplier, DryRunHandlesNumChannelsChange) {
+ AdaptiveDigitalConfig config;
+ config.dry_run = true;
+ GainApplierHelper helper(config, /*sample_rate_hz=*/8000, kMono);
+
+ AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config);
+ info.speech_level_dbfs = -60.0f;
+ constexpr float kPcmSamples = 123.456f;
+ VectorFloatFrame fake_audio_8k(kMono, kFrameLen10ms8kHz, kPcmSamples);
+ helper.gain_applier->Process(info, fake_audio_8k.float_frame_view());
+ EXPECT_FLOAT_EQ(fake_audio_8k.float_frame_view().channel(0)[0], kPcmSamples);
+ VectorFloatFrame fake_audio_48k(kStereo, kFrameLen10ms8kHz, kPcmSamples);
+ helper.gain_applier->Initialize(/*sample_rate_hz=*/8000, kStereo);
+ helper.gain_applier->Process(info, fake_audio_48k.float_frame_view());
+ EXPECT_FLOAT_EQ(fake_audio_48k.float_frame_view().channel(0)[0], kPcmSamples);
+ EXPECT_FLOAT_EQ(fake_audio_48k.float_frame_view().channel(1)[0], kPcmSamples);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc
new file mode 100644
index 0000000000..381e454868
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/adaptive_digital_gain_controller.h"
+
+#include <algorithm>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/vad_wrapper.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+// Peak and RMS audio levels in dBFS.
+struct AudioLevels {
+ float peak_dbfs;
+ float rms_dbfs;
+};
+
+// Computes the audio levels for the first channel in `frame`.
+AudioLevels ComputeAudioLevels(AudioFrameView<float> frame) {
+ float peak = 0.0f;
+ float rms = 0.0f;
+ for (const auto& x : frame.channel(0)) {
+ peak = std::max(std::fabs(x), peak);
+ rms += x * x;
+ }
+ return {FloatS16ToDbfs(peak),
+ FloatS16ToDbfs(std::sqrt(rms / frame.samples_per_channel()))};
+}
+
+} // namespace
+
+AdaptiveDigitalGainController::AdaptiveDigitalGainController(
+ ApmDataDumper* apm_data_dumper,
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config,
+ int sample_rate_hz,
+ int num_channels)
+ : speech_level_estimator_(apm_data_dumper, config),
+ gain_controller_(apm_data_dumper, config, sample_rate_hz, num_channels),
+ apm_data_dumper_(apm_data_dumper),
+ noise_level_estimator_(CreateNoiseFloorEstimator(apm_data_dumper)),
+ saturation_protector_(
+ CreateSaturationProtector(kSaturationProtectorInitialHeadroomDb,
+ config.adjacent_speech_frames_threshold,
+ apm_data_dumper)) {
+ RTC_DCHECK(apm_data_dumper);
+ RTC_DCHECK(noise_level_estimator_);
+ RTC_DCHECK(saturation_protector_);
+}
+
+AdaptiveDigitalGainController::~AdaptiveDigitalGainController() = default;
+
+void AdaptiveDigitalGainController::Initialize(int sample_rate_hz,
+ int num_channels) {
+ gain_controller_.Initialize(sample_rate_hz, num_channels);
+}
+
+void AdaptiveDigitalGainController::Process(AudioFrameView<float> frame,
+ float speech_probability,
+ float limiter_envelope) {
+ AudioLevels levels = ComputeAudioLevels(frame);
+ apm_data_dumper_->DumpRaw("agc2_input_rms_dbfs", levels.rms_dbfs);
+ apm_data_dumper_->DumpRaw("agc2_input_peak_dbfs", levels.peak_dbfs);
+
+ AdaptiveDigitalGainApplier::FrameInfo info;
+
+ info.speech_probability = speech_probability;
+
+ speech_level_estimator_.Update(levels.rms_dbfs, levels.peak_dbfs,
+ info.speech_probability);
+ info.speech_level_dbfs = speech_level_estimator_.level_dbfs();
+ info.speech_level_reliable = speech_level_estimator_.IsConfident();
+ apm_data_dumper_->DumpRaw("agc2_speech_level_dbfs", info.speech_level_dbfs);
+ apm_data_dumper_->DumpRaw("agc2_speech_level_reliable",
+ info.speech_level_reliable);
+
+ info.noise_rms_dbfs = noise_level_estimator_->Analyze(frame);
+ apm_data_dumper_->DumpRaw("agc2_noise_rms_dbfs", info.noise_rms_dbfs);
+
+ saturation_protector_->Analyze(info.speech_probability, levels.peak_dbfs,
+ info.speech_level_dbfs);
+ info.headroom_db = saturation_protector_->HeadroomDb();
+ apm_data_dumper_->DumpRaw("agc2_headroom_db", info.headroom_db);
+
+ info.limiter_envelope_dbfs = FloatS16ToDbfs(limiter_envelope);
+ apm_data_dumper_->DumpRaw("agc2_limiter_envelope_dbfs",
+ info.limiter_envelope_dbfs);
+
+ gain_controller_.Process(info, frame);
+}
+
+void AdaptiveDigitalGainController::HandleInputGainChange() {
+ speech_level_estimator_.Reset();
+ saturation_protector_->Reset();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.h b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.h
new file mode 100644
index 0000000000..75ea44591e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_CONTROLLER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_CONTROLLER_H_
+
+#include <memory>
+
+#include "modules/audio_processing/agc2/adaptive_digital_gain_applier.h"
+#include "modules/audio_processing/agc2/adaptive_mode_level_estimator.h"
+#include "modules/audio_processing/agc2/noise_level_estimator.h"
+#include "modules/audio_processing/agc2/saturation_protector.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+// Gain controller that adapts and applies a variable digital gain to meet the
+// target level, which is determined by the given configuration.
+class AdaptiveDigitalGainController {
+ public:
+ AdaptiveDigitalGainController(
+ ApmDataDumper* apm_data_dumper,
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config,
+ int sample_rate_hz,
+ int num_channels);
+ AdaptiveDigitalGainController(const AdaptiveDigitalGainController&) = delete;
+ AdaptiveDigitalGainController& operator=(
+ const AdaptiveDigitalGainController&) = delete;
+ ~AdaptiveDigitalGainController();
+
+ // Detects and handles changes of sample rate and or number of channels.
+ void Initialize(int sample_rate_hz, int num_channels);
+
+ // Analyzes `frame`, adapts the current digital gain and applies it to
+ // `frame`.
+ // TODO(bugs.webrtc.org/7494): Remove `limiter_envelope`.
+ void Process(AudioFrameView<float> frame,
+ float speech_probability,
+ float limiter_envelope);
+
+ // Handles a gain change applied to the input signal (e.g., analog gain).
+ void HandleInputGainChange();
+
+ private:
+ AdaptiveModeLevelEstimator speech_level_estimator_;
+ AdaptiveDigitalGainApplier gain_controller_;
+ ApmDataDumper* const apm_data_dumper_;
+ std::unique_ptr<NoiseLevelEstimator> noise_level_estimator_;
+ std::unique_ptr<SaturationProtector> saturation_protector_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gn/moz.build
new file mode 100644
index 0000000000..26182baa24
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gn/moz.build
@@ -0,0 +1,217 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("adaptive_digital_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
new file mode 100644
index 0000000000..fe021fec05
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/adaptive_mode_level_estimator.h"
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+float ClampLevelEstimateDbfs(float level_estimate_dbfs) {
+ return rtc::SafeClamp<float>(level_estimate_dbfs, -90.0f, 30.0f);
+}
+
+// Returns the initial speech level estimate needed to apply the initial gain.
+float GetInitialSpeechLevelEstimateDbfs(
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config) {
+ return ClampLevelEstimateDbfs(-kSaturationProtectorInitialHeadroomDb -
+ config.initial_gain_db - config.headroom_db);
+}
+
+} // namespace
+
+bool AdaptiveModeLevelEstimator::LevelEstimatorState::operator==(
+ const AdaptiveModeLevelEstimator::LevelEstimatorState& b) const {
+ return time_to_confidence_ms == b.time_to_confidence_ms &&
+ level_dbfs.numerator == b.level_dbfs.numerator &&
+ level_dbfs.denominator == b.level_dbfs.denominator;
+}
+
+float AdaptiveModeLevelEstimator::LevelEstimatorState::Ratio::GetRatio() const {
+ RTC_DCHECK_NE(denominator, 0.f);
+ return numerator / denominator;
+}
+
+AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator(
+ ApmDataDumper* apm_data_dumper,
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config)
+ : apm_data_dumper_(apm_data_dumper),
+ initial_speech_level_dbfs_(GetInitialSpeechLevelEstimateDbfs(config)),
+ adjacent_speech_frames_threshold_(
+ config.adjacent_speech_frames_threshold),
+ level_dbfs_(initial_speech_level_dbfs_) {
+ RTC_DCHECK(apm_data_dumper_);
+ RTC_DCHECK_GE(adjacent_speech_frames_threshold_, 1);
+ Reset();
+}
+
+void AdaptiveModeLevelEstimator::Update(float rms_dbfs,
+ float peak_dbfs,
+ float speech_probability) {
+ RTC_DCHECK_GT(rms_dbfs, -150.0f);
+ RTC_DCHECK_LT(rms_dbfs, 50.0f);
+ RTC_DCHECK_GT(peak_dbfs, -150.0f);
+ RTC_DCHECK_LT(peak_dbfs, 50.0f);
+ RTC_DCHECK_GE(speech_probability, 0.0f);
+ RTC_DCHECK_LE(speech_probability, 1.0f);
+ if (speech_probability < kVadConfidenceThreshold) {
+ // Not a speech frame.
+ if (adjacent_speech_frames_threshold_ > 1) {
+ // When two or more adjacent speech frames are required in order to update
+ // the state, we need to decide whether to discard or confirm the updates
+ // based on the speech sequence length.
+ if (num_adjacent_speech_frames_ >= adjacent_speech_frames_threshold_) {
+ // First non-speech frame after a long enough sequence of speech frames.
+ // Update the reliable state.
+ reliable_state_ = preliminary_state_;
+ } else if (num_adjacent_speech_frames_ > 0) {
+ // First non-speech frame after a too short sequence of speech frames.
+ // Reset to the last reliable state.
+ preliminary_state_ = reliable_state_;
+ }
+ }
+ num_adjacent_speech_frames_ = 0;
+ } else {
+ // Speech frame observed.
+ num_adjacent_speech_frames_++;
+
+ // Update preliminary level estimate.
+ RTC_DCHECK_GE(preliminary_state_.time_to_confidence_ms, 0);
+ const bool buffer_is_full = preliminary_state_.time_to_confidence_ms == 0;
+ if (!buffer_is_full) {
+ preliminary_state_.time_to_confidence_ms -= kFrameDurationMs;
+ }
+ // Weighted average of levels with speech probability as weight.
+ RTC_DCHECK_GT(speech_probability, 0.0f);
+ const float leak_factor = buffer_is_full ? kLevelEstimatorLeakFactor : 1.0f;
+ preliminary_state_.level_dbfs.numerator =
+ preliminary_state_.level_dbfs.numerator * leak_factor +
+ rms_dbfs * speech_probability;
+ preliminary_state_.level_dbfs.denominator =
+ preliminary_state_.level_dbfs.denominator * leak_factor +
+ speech_probability;
+
+ const float level_dbfs = preliminary_state_.level_dbfs.GetRatio();
+
+ if (num_adjacent_speech_frames_ >= adjacent_speech_frames_threshold_) {
+ // `preliminary_state_` is now reliable. Update the last level estimation.
+ level_dbfs_ = ClampLevelEstimateDbfs(level_dbfs);
+ }
+ }
+ DumpDebugData();
+}
+
+bool AdaptiveModeLevelEstimator::IsConfident() const {
+ if (adjacent_speech_frames_threshold_ == 1) {
+ // Ignore `reliable_state_` when a single frame is enough to update the
+ // level estimate (because it is not used).
+ return preliminary_state_.time_to_confidence_ms == 0;
+ }
+ // Once confident, it remains confident.
+ RTC_DCHECK(reliable_state_.time_to_confidence_ms != 0 ||
+ preliminary_state_.time_to_confidence_ms == 0);
+ // During the first long enough speech sequence, `reliable_state_` must be
+ // ignored since `preliminary_state_` is used.
+ return reliable_state_.time_to_confidence_ms == 0 ||
+ (num_adjacent_speech_frames_ >= adjacent_speech_frames_threshold_ &&
+ preliminary_state_.time_to_confidence_ms == 0);
+}
+
+void AdaptiveModeLevelEstimator::Reset() {
+ ResetLevelEstimatorState(preliminary_state_);
+ ResetLevelEstimatorState(reliable_state_);
+ level_dbfs_ = initial_speech_level_dbfs_;
+ num_adjacent_speech_frames_ = 0;
+}
+
+void AdaptiveModeLevelEstimator::ResetLevelEstimatorState(
+ LevelEstimatorState& state) const {
+ state.time_to_confidence_ms = kLevelEstimatorTimeToConfidenceMs;
+ state.level_dbfs.numerator = initial_speech_level_dbfs_;
+ state.level_dbfs.denominator = 1.0f;
+}
+
+void AdaptiveModeLevelEstimator::DumpDebugData() const {
+ apm_data_dumper_->DumpRaw(
+ "agc2_adaptive_level_estimator_num_adjacent_speech_frames",
+ num_adjacent_speech_frames_);
+ apm_data_dumper_->DumpRaw(
+ "agc2_adaptive_level_estimator_preliminary_level_estimate_num",
+ preliminary_state_.level_dbfs.numerator);
+ apm_data_dumper_->DumpRaw(
+ "agc2_adaptive_level_estimator_preliminary_level_estimate_den",
+ preliminary_state_.level_dbfs.denominator);
+ apm_data_dumper_->DumpRaw(
+ "agc2_adaptive_level_estimator_preliminary_time_to_confidence_ms",
+ preliminary_state_.time_to_confidence_ms);
+ apm_data_dumper_->DumpRaw(
+ "agc2_adaptive_level_estimator_reliable_time_to_confidence_ms",
+ reliable_state_.time_to_confidence_ms);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.h b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
new file mode 100644
index 0000000000..989c8c3572
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_MODE_LEVEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_MODE_LEVEL_ESTIMATOR_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/vad_wrapper.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+// Level estimator for the digital adaptive gain controller.
+class AdaptiveModeLevelEstimator {
+ public:
+ AdaptiveModeLevelEstimator(
+ ApmDataDumper* apm_data_dumper,
+ const AudioProcessing::Config::GainController2::AdaptiveDigital& config);
+ AdaptiveModeLevelEstimator(const AdaptiveModeLevelEstimator&) = delete;
+ AdaptiveModeLevelEstimator& operator=(const AdaptiveModeLevelEstimator&) =
+ delete;
+
+ // Updates the level estimation.
+ void Update(float rms_dbfs, float peak_dbfs, float speech_probability);
+ // Returns the estimated speech plus noise level.
+ float level_dbfs() const { return level_dbfs_; }
+ // Returns true if the estimator is confident on its current estimate.
+ bool IsConfident() const;
+
+ void Reset();
+
+ private:
+ // Part of the level estimator state used for check-pointing and restore ops.
+ struct LevelEstimatorState {
+ bool operator==(const LevelEstimatorState& s) const;
+ inline bool operator!=(const LevelEstimatorState& s) const {
+ return !(*this == s);
+ }
+ // TODO(bugs.webrtc.org/7494): Remove `time_to_confidence_ms` if redundant.
+ int time_to_confidence_ms;
+ struct Ratio {
+ float numerator;
+ float denominator;
+ float GetRatio() const;
+ } level_dbfs;
+ };
+ static_assert(std::is_trivially_copyable<LevelEstimatorState>::value, "");
+
+ void ResetLevelEstimatorState(LevelEstimatorState& state) const;
+
+ void DumpDebugData() const;
+
+ ApmDataDumper* const apm_data_dumper_;
+
+ const float initial_speech_level_dbfs_;
+ const int adjacent_speech_frames_threshold_;
+ LevelEstimatorState preliminary_state_;
+ LevelEstimatorState reliable_state_;
+ float level_dbfs_;
+ int num_adjacent_speech_frames_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_MODE_LEVEL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc
new file mode 100644
index 0000000000..684fca188a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/adaptive_mode_level_estimator.h"
+
+#include <memory>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+using AdaptiveDigitalConfig =
+ AudioProcessing::Config::GainController2::AdaptiveDigital;
+
+// Number of speech frames that the level estimator must observe in order to
+// become confident about the estimated level.
+constexpr int kNumFramesToConfidence =
+ kLevelEstimatorTimeToConfidenceMs / kFrameDurationMs;
+static_assert(kNumFramesToConfidence > 0, "");
+
+constexpr float kConvergenceSpeedTestsLevelTolerance = 0.5f;
+
+// Provides the `vad_level` value `num_iterations` times to `level_estimator`.
+void RunOnConstantLevel(int num_iterations,
+ float rms_dbfs,
+ float peak_dbfs,
+ float speech_probability,
+ AdaptiveModeLevelEstimator& level_estimator) {
+ for (int i = 0; i < num_iterations; ++i) {
+ level_estimator.Update(rms_dbfs, peak_dbfs, speech_probability);
+ }
+}
+
+constexpr AdaptiveDigitalConfig GetAdaptiveDigitalConfig(
+ int adjacent_speech_frames_threshold) {
+ AdaptiveDigitalConfig config;
+ config.adjacent_speech_frames_threshold = adjacent_speech_frames_threshold;
+ return config;
+}
+
+constexpr float kNoSpeechProbability = 0.0f;
+constexpr float kLowSpeechProbability = kVadConfidenceThreshold / 2.0f;
+constexpr float kMaxSpeechProbability = 1.0f;
+
+// Level estimator with data dumper.
+struct TestLevelEstimator {
+ explicit TestLevelEstimator(int adjacent_speech_frames_threshold)
+ : data_dumper(0),
+ estimator(std::make_unique<AdaptiveModeLevelEstimator>(
+ &data_dumper,
+ GetAdaptiveDigitalConfig(adjacent_speech_frames_threshold))),
+ initial_speech_level_dbfs(estimator->level_dbfs()),
+ level_rms_dbfs(initial_speech_level_dbfs / 2.0f),
+ level_peak_dbfs(initial_speech_level_dbfs / 3.0f) {
+ RTC_DCHECK_LT(level_rms_dbfs, level_peak_dbfs);
+ RTC_DCHECK_LT(initial_speech_level_dbfs, level_rms_dbfs);
+ RTC_DCHECK_GT(level_rms_dbfs - initial_speech_level_dbfs, 5.0f)
+ << "Adjust `level_rms_dbfs` so that the difference from the initial "
+ "level is wide enough for the tests";
+ }
+ ApmDataDumper data_dumper;
+ std::unique_ptr<AdaptiveModeLevelEstimator> estimator;
+ const float initial_speech_level_dbfs;
+ const float level_rms_dbfs;
+ const float level_peak_dbfs;
+};
+
+// Checks that the level estimator converges to a constant input speech level.
+TEST(GainController2AdaptiveModeLevelEstimator, LevelStabilizes) {
+ TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1);
+ RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence,
+ level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs, kMaxSpeechProbability,
+ *level_estimator.estimator);
+ const float estimated_level_dbfs = level_estimator.estimator->level_dbfs();
+ RunOnConstantLevel(/*num_iterations=*/1, level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs, kMaxSpeechProbability,
+ *level_estimator.estimator);
+ EXPECT_NEAR(level_estimator.estimator->level_dbfs(), estimated_level_dbfs,
+ 0.1f);
+}
+
+// Checks that the level controller does not become confident when too few
+// speech frames are observed.
+TEST(GainController2AdaptiveModeLevelEstimator, IsNotConfident) {
+ TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1);
+ RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence / 2,
+ level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs, kMaxSpeechProbability,
+ *level_estimator.estimator);
+ EXPECT_FALSE(level_estimator.estimator->IsConfident());
+}
+
+// Checks that the level controller becomes confident when enough speech frames
+// are observed.
+TEST(GainController2AdaptiveModeLevelEstimator, IsConfident) {
+ TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1);
+ RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence,
+ level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs, kMaxSpeechProbability,
+ *level_estimator.estimator);
+ EXPECT_TRUE(level_estimator.estimator->IsConfident());
+}
+
+// Checks that the estimated level is not affected by the level of non-speech
+// frames.
+TEST(GainController2AdaptiveModeLevelEstimator,
+ EstimatorIgnoresNonSpeechFrames) {
+ TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1);
+ // Simulate speech.
+ RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence,
+ level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs, kMaxSpeechProbability,
+ *level_estimator.estimator);
+ const float estimated_level_dbfs = level_estimator.estimator->level_dbfs();
+ // Simulate full-scale non-speech.
+ RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence,
+ /*rms_dbfs=*/0.0f, /*peak_dbfs=*/0.0f,
+ kNoSpeechProbability, *level_estimator.estimator);
+ // No estimated level change is expected.
+ EXPECT_FLOAT_EQ(level_estimator.estimator->level_dbfs(),
+ estimated_level_dbfs);
+}
+
+// Checks the convergence speed of the estimator before it becomes confident.
+TEST(GainController2AdaptiveModeLevelEstimator,
+ ConvergenceSpeedBeforeConfidence) {
+ TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1);
+ RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence,
+ level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs, kMaxSpeechProbability,
+ *level_estimator.estimator);
+ EXPECT_NEAR(level_estimator.estimator->level_dbfs(),
+ level_estimator.level_rms_dbfs,
+ kConvergenceSpeedTestsLevelTolerance);
+}
+
+// Checks the convergence speed of the estimator after it becomes confident.
+TEST(GainController2AdaptiveModeLevelEstimator,
+ ConvergenceSpeedAfterConfidence) {
+ TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1);
+ // Reach confidence using the initial level estimate.
+ RunOnConstantLevel(
+ /*num_iterations=*/kNumFramesToConfidence,
+ /*rms_dbfs=*/level_estimator.initial_speech_level_dbfs,
+ /*peak_dbfs=*/level_estimator.initial_speech_level_dbfs + 6.0f,
+ kMaxSpeechProbability, *level_estimator.estimator);
+ // No estimate change should occur, but confidence is achieved.
+ ASSERT_FLOAT_EQ(level_estimator.estimator->level_dbfs(),
+ level_estimator.initial_speech_level_dbfs);
+ ASSERT_TRUE(level_estimator.estimator->IsConfident());
+ // After confidence.
+ constexpr float kConvergenceTimeAfterConfidenceNumFrames = 600; // 6 seconds.
+ static_assert(
+ kConvergenceTimeAfterConfidenceNumFrames > kNumFramesToConfidence, "");
+ RunOnConstantLevel(
+ /*num_iterations=*/kConvergenceTimeAfterConfidenceNumFrames,
+ level_estimator.level_rms_dbfs, level_estimator.level_peak_dbfs,
+ kMaxSpeechProbability, *level_estimator.estimator);
+ EXPECT_NEAR(level_estimator.estimator->level_dbfs(),
+ level_estimator.level_rms_dbfs,
+ kConvergenceSpeedTestsLevelTolerance);
+}
+
+class AdaptiveModeLevelEstimatorParametrization
+ : public ::testing::TestWithParam<int> {
+ protected:
+ int adjacent_speech_frames_threshold() const { return GetParam(); }
+};
+
+TEST_P(AdaptiveModeLevelEstimatorParametrization,
+ DoNotAdaptToShortSpeechSegments) {
+ TestLevelEstimator level_estimator(adjacent_speech_frames_threshold());
+ const float initial_level = level_estimator.estimator->level_dbfs();
+ ASSERT_LT(initial_level, level_estimator.level_peak_dbfs);
+ for (int i = 0; i < adjacent_speech_frames_threshold() - 1; ++i) {
+ SCOPED_TRACE(i);
+ level_estimator.estimator->Update(level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs,
+ kMaxSpeechProbability);
+ EXPECT_EQ(initial_level, level_estimator.estimator->level_dbfs());
+ }
+ level_estimator.estimator->Update(level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs,
+ kLowSpeechProbability);
+ EXPECT_EQ(initial_level, level_estimator.estimator->level_dbfs());
+}
+
+TEST_P(AdaptiveModeLevelEstimatorParametrization, AdaptToEnoughSpeechSegments) {
+ TestLevelEstimator level_estimator(adjacent_speech_frames_threshold());
+ const float initial_level = level_estimator.estimator->level_dbfs();
+ ASSERT_LT(initial_level, level_estimator.level_peak_dbfs);
+ for (int i = 0; i < adjacent_speech_frames_threshold(); ++i) {
+ level_estimator.estimator->Update(level_estimator.level_rms_dbfs,
+ level_estimator.level_peak_dbfs,
+ kMaxSpeechProbability);
+ }
+ EXPECT_LT(initial_level, level_estimator.estimator->level_dbfs());
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController2,
+ AdaptiveModeLevelEstimatorParametrization,
+ ::testing::Values(1, 9, 17));
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/agc2_common.h b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_common.h
new file mode 100644
index 0000000000..4af85527b8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_common.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_AGC2_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_AGC2_COMMON_H_
+
+namespace webrtc {
+
+constexpr float kMinFloatS16Value = -32768.0f;
+constexpr float kMaxFloatS16Value = 32767.0f;
+constexpr float kMaxAbsFloatS16Value = 32768.0f;
+
+// Minimum audio level in dBFS scale for S16 samples.
+constexpr float kMinLevelDbfs = -90.31f;
+
+constexpr int kFrameDurationMs = 10;
+constexpr int kSubFramesInFrame = 20;
+constexpr int kMaximalNumberOfSamplesPerChannel = 480;
+
+// Adaptive digital gain applier settings.
+
+// At what limiter levels should we start decreasing the adaptive digital gain.
+constexpr float kLimiterThresholdForAgcGainDbfs = -1.0f;
+
+// This is the threshold for speech. Speech frames are used for updating the
+// speech level, measuring the amount of speech, and decide when to allow target
+// gain changes.
+constexpr float kVadConfidenceThreshold = 0.95f;
+
+// Number of milliseconds of speech frames to observe to make the estimator
+// confident.
+constexpr float kLevelEstimatorTimeToConfidenceMs = 400;
+constexpr float kLevelEstimatorLeakFactor =
+ 1.0f - 1.0f / kLevelEstimatorTimeToConfidenceMs;
+
+// Saturation Protector settings.
+constexpr float kSaturationProtectorInitialHeadroomDb = 20.0f;
+constexpr int kSaturationProtectorBufferSize = 4;
+
+// Number of interpolation points for each region of the limiter.
+// These values have been tuned to limit the interpolated gain curve error given
+// the limiter parameters and allowing a maximum error of +/- 32768^-1.
+constexpr int kInterpolatedGainCurveKneePoints = 22;
+constexpr int kInterpolatedGainCurveBeyondKneePoints = 10;
+constexpr int kInterpolatedGainCurveTotalPoints =
+ kInterpolatedGainCurveKneePoints + kInterpolatedGainCurveBeyondKneePoints;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_AGC2_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.cc b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.cc
new file mode 100644
index 0000000000..125e551b72
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+std::vector<double> LinSpace(double l, double r, int num_points) {
+ RTC_CHECK_GE(num_points, 2);
+ std::vector<double> points(num_points);
+ const double step = (r - l) / (num_points - 1.0);
+ points[0] = l;
+ for (int i = 1; i < num_points - 1; i++) {
+ points[i] = static_cast<double>(l) + i * step;
+ }
+ points[num_points - 1] = r;
+ return points;
+}
+
+WhiteNoiseGenerator::WhiteNoiseGenerator(int min_amplitude, int max_amplitude)
+ : rand_gen_(42),
+ min_amplitude_(min_amplitude),
+ max_amplitude_(max_amplitude) {
+ RTC_DCHECK_LT(min_amplitude_, max_amplitude_);
+ RTC_DCHECK_LE(kMinS16, min_amplitude_);
+ RTC_DCHECK_LE(min_amplitude_, kMaxS16);
+ RTC_DCHECK_LE(kMinS16, max_amplitude_);
+ RTC_DCHECK_LE(max_amplitude_, kMaxS16);
+}
+
+float WhiteNoiseGenerator::operator()() {
+ return static_cast<float>(rand_gen_.Rand(min_amplitude_, max_amplitude_));
+}
+
+SineGenerator::SineGenerator(float amplitude,
+ float frequency_hz,
+ int sample_rate_hz)
+ : amplitude_(amplitude),
+ frequency_hz_(frequency_hz),
+ sample_rate_hz_(sample_rate_hz),
+ x_radians_(0.0f) {
+ RTC_DCHECK_GT(amplitude_, 0);
+ RTC_DCHECK_LE(amplitude_, kMaxS16);
+}
+
+float SineGenerator::operator()() {
+ constexpr float kPi = 3.1415926536f;
+ x_radians_ += frequency_hz_ / sample_rate_hz_ * 2 * kPi;
+ if (x_radians_ >= 2 * kPi) {
+ x_radians_ -= 2 * kPi;
+ }
+ return amplitude_ * std::sinf(x_radians_);
+}
+
+PulseGenerator::PulseGenerator(float pulse_amplitude,
+ float no_pulse_amplitude,
+ float frequency_hz,
+ int sample_rate_hz)
+ : pulse_amplitude_(pulse_amplitude),
+ no_pulse_amplitude_(no_pulse_amplitude),
+ samples_period_(
+ static_cast<int>(static_cast<float>(sample_rate_hz) / frequency_hz)),
+ sample_counter_(0) {
+ RTC_DCHECK_GE(pulse_amplitude_, kMinS16);
+ RTC_DCHECK_LE(pulse_amplitude_, kMaxS16);
+ RTC_DCHECK_GT(no_pulse_amplitude_, kMinS16);
+ RTC_DCHECK_LE(no_pulse_amplitude_, kMaxS16);
+ RTC_DCHECK_GT(sample_rate_hz, frequency_hz);
+}
+
+float PulseGenerator::operator()() {
+ sample_counter_++;
+ if (sample_counter_ >= samples_period_) {
+ sample_counter_ -= samples_period_;
+ }
+ return static_cast<float>(sample_counter_ == 0 ? pulse_amplitude_
+ : no_pulse_amplitude_);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.h b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.h
new file mode 100644
index 0000000000..afed97e83b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_AGC2_TESTING_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_AGC2_TESTING_COMMON_H_
+
+#include <limits>
+#include <vector>
+
+#include "rtc_base/random.h"
+
+namespace webrtc {
+namespace test {
+
+constexpr float kMinS16 =
+ static_cast<float>(std::numeric_limits<int16_t>::min());
+constexpr float kMaxS16 =
+ static_cast<float>(std::numeric_limits<int16_t>::max());
+
+// Level Estimator test parameters.
+constexpr float kDecayMs = 20.0f;
+
+// Limiter parameters.
+constexpr float kLimiterMaxInputLevelDbFs = 1.f;
+constexpr float kLimiterKneeSmoothnessDb = 1.f;
+constexpr float kLimiterCompressionRatio = 5.f;
+
+// Returns evenly spaced `num_points` numbers over a specified interval [l, r].
+std::vector<double> LinSpace(double l, double r, int num_points);
+
+// Generates white noise.
+class WhiteNoiseGenerator {
+ public:
+ WhiteNoiseGenerator(int min_amplitude, int max_amplitude);
+ float operator()();
+
+ private:
+ Random rand_gen_;
+ const int min_amplitude_;
+ const int max_amplitude_;
+};
+
+// Generates a sine function.
+class SineGenerator {
+ public:
+ SineGenerator(float amplitude, float frequency_hz, int sample_rate_hz);
+ float operator()();
+
+ private:
+ const float amplitude_;
+ const float frequency_hz_;
+ const int sample_rate_hz_;
+ float x_radians_;
+};
+
+// Generates periodic pulses.
+class PulseGenerator {
+ public:
+ PulseGenerator(float pulse_amplitude,
+ float no_pulse_amplitude,
+ float frequency_hz,
+ int sample_rate_hz);
+ float operator()();
+
+ private:
+ const float pulse_amplitude_;
+ const float no_pulse_amplitude_;
+ const int samples_period_;
+ int sample_counter_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_AGC2_TESTING_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common_unittest.cc
new file mode 100644
index 0000000000..79c3cc95d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/agc2_testing_common_unittest.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(GainController2TestingCommon, LinSpace) {
+ std::vector<double> points1 = test::LinSpace(-1.0, 2.0, 4);
+ const std::vector<double> expected_points1{{-1.0, 0.0, 1.0, 2.0}};
+ EXPECT_EQ(expected_points1, points1);
+
+ std::vector<double> points2 = test::LinSpace(0.0, 1.0, 4);
+ const std::vector<double> expected_points2{{0.0, 1.0 / 3.0, 2.0 / 3.0, 1.0}};
+ EXPECT_EQ(points2, expected_points2);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.cc b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.cc
new file mode 100644
index 0000000000..c1b80d7320
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/biquad_filter.h"
+
+#include "rtc_base/arraysize.h"
+
+namespace webrtc {
+
+BiQuadFilter::BiQuadFilter(const Config& config)
+ : config_(config), state_({}) {}
+
+BiQuadFilter::~BiQuadFilter() = default;
+
+void BiQuadFilter::SetConfig(const Config& config) {
+ config_ = config;
+ state_ = {};
+}
+
+void BiQuadFilter::Reset() {
+ state_ = {};
+}
+
+void BiQuadFilter::Process(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y) {
+ RTC_DCHECK_EQ(x.size(), y.size());
+ const float config_a0 = config_.a[0];
+ const float config_a1 = config_.a[1];
+ const float config_b0 = config_.b[0];
+ const float config_b1 = config_.b[1];
+ const float config_b2 = config_.b[2];
+ float state_a0 = state_.a[0];
+ float state_a1 = state_.a[1];
+ float state_b0 = state_.b[0];
+ float state_b1 = state_.b[1];
+ for (size_t k = 0, x_size = x.size(); k < x_size; ++k) {
+ // Use a temporary variable for `x[k]` to allow in-place processing.
+ const float tmp = x[k];
+ float y_k = config_b0 * tmp + config_b1 * state_b0 + config_b2 * state_b1 -
+ config_a0 * state_a0 - config_a1 * state_a1;
+ state_b1 = state_b0;
+ state_b0 = tmp;
+ state_a1 = state_a0;
+ state_a0 = y_k;
+ y[k] = y_k;
+ }
+ state_.a[0] = state_a0;
+ state_.a[1] = state_a1;
+ state_.b[0] = state_b0;
+ state_.b[1] = state_b1;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.h b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.h
new file mode 100644
index 0000000000..5273ff9386
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_BIQUAD_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_BIQUAD_FILTER_H_
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Transposed direct form I implementation of a bi-quad filter.
+// b[0] + b[1] • z^(-1) + b[2] • z^(-2)
+// H(z) = ------------------------------------
+// 1 + a[1] • z^(-1) + a[2] • z^(-2)
+class BiQuadFilter {
+ public:
+ // Normalized filter coefficients.
+ // Computed as `[b, a] = scipy.signal.butter(N=2, Wn, btype)`.
+ struct Config {
+ float b[3]; // b[0], b[1], b[2].
+ float a[2]; // a[1], a[2].
+ };
+
+ explicit BiQuadFilter(const Config& config);
+ BiQuadFilter(const BiQuadFilter&) = delete;
+ BiQuadFilter& operator=(const BiQuadFilter&) = delete;
+ ~BiQuadFilter();
+
+ // Sets the filter configuration and resets the internal state.
+ void SetConfig(const Config& config);
+
+ // Zeroes the filter state.
+ void Reset();
+
+ // Filters `x` and writes the output in `y`, which must have the same length
+ // of `x`. In-place processing is supported.
+ void Process(rtc::ArrayView<const float> x, rtc::ArrayView<float> y);
+
+ private:
+ Config config_;
+ struct State {
+ float b[2];
+ float a[2];
+ } state_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_BIQUAD_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_gn/moz.build
new file mode 100644
index 0000000000..ec66966c7e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("biquad_filter_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_unittest.cc
new file mode 100644
index 0000000000..a53036b08e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/biquad_filter_unittest.cc
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/biquad_filter.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kFrameSize = 8;
+constexpr int kNumFrames = 4;
+using FloatArraySequence =
+ std::array<std::array<float, kFrameSize>, kNumFrames>;
+
+constexpr FloatArraySequence kBiQuadInputSeq = {
+ {{{-87.166290f, -8.029022f, 101.619583f, -0.294296f, -5.825764f, -8.890625f,
+ 10.310432f, 54.845333f}},
+ {{-64.647644f, -6.883945f, 11.059189f, -95.242538f, -108.870834f,
+ 11.024944f, 63.044102f, -52.709583f}},
+ {{-32.350529f, -18.108028f, -74.022339f, -8.986874f, -1.525581f,
+ 103.705513f, 6.346226f, -14.319557f}},
+ {{22.645832f, -64.597153f, 55.462521f, -109.393188f, 10.117825f,
+ -40.019642f, -98.612228f, -8.330326f}}}};
+
+// Computed as `scipy.signal.butter(N=2, Wn=60/24000, btype='highpass')`.
+constexpr BiQuadFilter::Config kBiQuadConfig{
+ {0.99446179f, -1.98892358f, 0.99446179f},
+ {-1.98889291f, 0.98895425f}};
+
+// Comparing to scipy. The expected output is generated as follows:
+// zi = np.float32([0, 0])
+// for i in range(4):
+// yn, zi = scipy.signal.lfilter(B, A, x[i], zi=zi)
+// print(yn)
+constexpr FloatArraySequence kBiQuadOutputSeq = {
+ {{{-86.68354497f, -7.02175351f, 102.10290352f, -0.37487333f, -5.87205847f,
+ -8.85521608f, 10.33772563f, 54.51157181f}},
+ {{-64.92531604f, -6.76395978f, 11.15534507f, -94.68073341f, -107.18177856f,
+ 13.24642474f, 64.84288941f, -50.97822629f}},
+ {{-30.1579652f, -15.64850899f, -71.06662821f, -5.5883229f, 1.91175353f,
+ 106.5572003f, 8.57183046f, -12.06298473f}},
+ {{24.84286614f, -62.18094158f, 57.91488056f, -106.65685933f, 13.38760103f,
+ -36.60367134f, -94.44880104f, -3.59920354f}}}};
+
+// Fails for every pair from two equally sized rtc::ArrayView<float> views such
+// that their relative error is above a given threshold. If the expected value
+// of a pair is 0, `tolerance` is used to check the absolute error.
+void ExpectNearRelative(rtc::ArrayView<const float> expected,
+ rtc::ArrayView<const float> computed,
+ const float tolerance) {
+ // The relative error is undefined when the expected value is 0.
+ // When that happens, check the absolute error instead. `safe_den` is used
+ // below to implement such logic.
+ auto safe_den = [](float x) { return (x == 0.0f) ? 1.0f : std::fabs(x); };
+ ASSERT_EQ(expected.size(), computed.size());
+ for (size_t i = 0; i < expected.size(); ++i) {
+ const float abs_diff = std::fabs(expected[i] - computed[i]);
+ // No failure when the values are equal.
+ if (abs_diff == 0.0f) {
+ continue;
+ }
+ SCOPED_TRACE(i);
+ SCOPED_TRACE(expected[i]);
+ SCOPED_TRACE(computed[i]);
+ EXPECT_LE(abs_diff / safe_den(expected[i]), tolerance);
+ }
+}
+
+// Checks that filtering works when different containers are used both as input
+// and as output.
+TEST(BiQuadFilterTest, FilterNotInPlace) {
+ BiQuadFilter filter(kBiQuadConfig);
+ std::array<float, kFrameSize> samples;
+
+ // TODO(https://bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+
+ for (int i = 0; i < kNumFrames; ++i) {
+ SCOPED_TRACE(i);
+ filter.Process(kBiQuadInputSeq[i], samples);
+ ExpectNearRelative(kBiQuadOutputSeq[i], samples, 2e-4f);
+ }
+}
+
+// Checks that filtering works when the same container is used both as input and
+// as output.
+TEST(BiQuadFilterTest, FilterInPlace) {
+ BiQuadFilter filter(kBiQuadConfig);
+ std::array<float, kFrameSize> samples;
+
+ // TODO(https://bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+
+ for (int i = 0; i < kNumFrames; ++i) {
+ SCOPED_TRACE(i);
+ std::copy(kBiQuadInputSeq[i].begin(), kBiQuadInputSeq[i].end(),
+ samples.begin());
+ filter.Process({samples}, {samples});
+ ExpectNearRelative(kBiQuadOutputSeq[i], samples, 2e-4f);
+ }
+}
+
+// Checks that different configurations produce different outputs.
+TEST(BiQuadFilterTest, SetConfigDifferentOutput) {
+ BiQuadFilter filter(/*config=*/{{0.97803048f, -1.95606096f, 0.97803048f},
+ {-1.95557824f, 0.95654368f}});
+
+ std::array<float, kFrameSize> samples1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ filter.Process(kBiQuadInputSeq[i], samples1);
+ }
+
+ filter.SetConfig(
+ {{0.09763107f, 0.19526215f, 0.09763107f}, {-0.94280904f, 0.33333333f}});
+ std::array<float, kFrameSize> samples2;
+ for (int i = 0; i < kNumFrames; ++i) {
+ filter.Process(kBiQuadInputSeq[i], samples2);
+ }
+
+ EXPECT_NE(samples1, samples2);
+}
+
+// Checks that when `SetConfig()` is called but the filter coefficients are the
+// same the filter state is reset.
+TEST(BiQuadFilterTest, SetConfigResetsState) {
+ BiQuadFilter filter(kBiQuadConfig);
+
+ std::array<float, kFrameSize> samples1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ filter.Process(kBiQuadInputSeq[i], samples1);
+ }
+
+ filter.SetConfig(kBiQuadConfig);
+ std::array<float, kFrameSize> samples2;
+ for (int i = 0; i < kNumFrames; ++i) {
+ filter.Process(kBiQuadInputSeq[i], samples2);
+ }
+
+ EXPECT_EQ(samples1, samples2);
+}
+
+// Checks that when `Reset()` is called the filter state is reset.
+TEST(BiQuadFilterTest, Reset) {
+ BiQuadFilter filter(kBiQuadConfig);
+
+ std::array<float, kFrameSize> samples1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ filter.Process(kBiQuadInputSeq[i], samples1);
+ }
+
+ filter.Reset();
+ std::array<float, kFrameSize> samples2;
+ for (int i = 0; i < kNumFrames; ++i) {
+ filter.Process(kBiQuadInputSeq[i], samples2);
+ }
+
+ EXPECT_EQ(samples1, samples2);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/common_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/common_gn/moz.build
new file mode 100644
index 0000000000..9d239a6c3d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/common_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("common_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc b/third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
new file mode 100644
index 0000000000..221b499e32
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/compute_interpolated_gain_curve.h"
+
+#include <algorithm>
+#include <cmath>
+#include <queue>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/limiter_db_gain_curve.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+std::pair<double, double> ComputeLinearApproximationParams(
+ const LimiterDbGainCurve* limiter,
+ const double x) {
+ const double m = limiter->GetGainFirstDerivativeLinear(x);
+ const double q = limiter->GetGainLinear(x) - m * x;
+ return {m, q};
+}
+
+double ComputeAreaUnderPiecewiseLinearApproximation(
+ const LimiterDbGainCurve* limiter,
+ const double x0,
+ const double x1) {
+ RTC_CHECK_LT(x0, x1);
+
+ // Linear approximation in x0 and x1.
+ double m0, q0, m1, q1;
+ std::tie(m0, q0) = ComputeLinearApproximationParams(limiter, x0);
+ std::tie(m1, q1) = ComputeLinearApproximationParams(limiter, x1);
+
+ // Intersection point between two adjacent linear pieces.
+ RTC_CHECK_NE(m1, m0);
+ const double x_split = (q0 - q1) / (m1 - m0);
+ RTC_CHECK_LT(x0, x_split);
+ RTC_CHECK_LT(x_split, x1);
+
+ auto area_under_linear_piece = [](double x_l, double x_r, double m,
+ double q) {
+ return x_r * (m * x_r / 2.0 + q) - x_l * (m * x_l / 2.0 + q);
+ };
+ return area_under_linear_piece(x0, x_split, m0, q0) +
+ area_under_linear_piece(x_split, x1, m1, q1);
+}
+
+// Computes the approximation error in the limiter region for a given interval.
+// The error is computed as the difference between the areas beneath the limiter
+// curve to approximate and its linear under-approximation.
+double LimiterUnderApproximationNegativeError(const LimiterDbGainCurve* limiter,
+ const double x0,
+ const double x1) {
+ const double area_limiter = limiter->GetGainIntegralLinear(x0, x1);
+ const double area_interpolated_curve =
+ ComputeAreaUnderPiecewiseLinearApproximation(limiter, x0, x1);
+ RTC_CHECK_GE(area_limiter, area_interpolated_curve);
+ return area_limiter - area_interpolated_curve;
+}
+
+// Automatically finds where to sample the beyond-knee region of a limiter using
+// a greedy optimization algorithm that iteratively decreases the approximation
+// error.
+// The solution is sub-optimal because the algorithm is greedy and the points
+// are assigned by halving intervals (starting with the whole beyond-knee region
+// as a single interval). However, even if sub-optimal, this algorithm works
+// well in practice and it is efficiently implemented using priority queues.
+std::vector<double> SampleLimiterRegion(const LimiterDbGainCurve* limiter) {
+ static_assert(kInterpolatedGainCurveBeyondKneePoints > 2, "");
+
+ struct Interval {
+ Interval() = default; // Ctor required by std::priority_queue.
+ Interval(double l, double r, double e) : x0(l), x1(r), error(e) {
+ RTC_CHECK(x0 < x1);
+ }
+ bool operator<(const Interval& other) const { return error < other.error; }
+
+ double x0;
+ double x1;
+ double error;
+ };
+
+ std::priority_queue<Interval, std::vector<Interval>> q;
+ q.emplace(limiter->limiter_start_linear(), limiter->max_input_level_linear(),
+ LimiterUnderApproximationNegativeError(
+ limiter, limiter->limiter_start_linear(),
+ limiter->max_input_level_linear()));
+
+ // Iteratively find points by halving the interval with greatest error.
+ while (q.size() < kInterpolatedGainCurveBeyondKneePoints) {
+ // Get the interval with highest error.
+ const auto interval = q.top();
+ q.pop();
+
+ // Split `interval` and enqueue.
+ double x_split = (interval.x0 + interval.x1) / 2.0;
+ q.emplace(interval.x0, x_split,
+ LimiterUnderApproximationNegativeError(limiter, interval.x0,
+ x_split)); // Left.
+ q.emplace(x_split, interval.x1,
+ LimiterUnderApproximationNegativeError(limiter, x_split,
+ interval.x1)); // Right.
+ }
+
+ // Copy x1 values and sort them.
+ RTC_CHECK_EQ(q.size(), kInterpolatedGainCurveBeyondKneePoints);
+ std::vector<double> samples(kInterpolatedGainCurveBeyondKneePoints);
+ for (size_t i = 0; i < kInterpolatedGainCurveBeyondKneePoints; ++i) {
+ const auto interval = q.top();
+ q.pop();
+ samples[i] = interval.x1;
+ }
+ RTC_CHECK(q.empty());
+ std::sort(samples.begin(), samples.end());
+
+ return samples;
+}
+
+// Compute the parameters to over-approximate the knee region via linear
+// interpolation. Over-approximating is saturation-safe since the knee region is
+// convex.
+void PrecomputeKneeApproxParams(const LimiterDbGainCurve* limiter,
+ test::InterpolatedParameters* parameters) {
+ static_assert(kInterpolatedGainCurveKneePoints > 2, "");
+ // Get `kInterpolatedGainCurveKneePoints` - 1 equally spaced points.
+ const std::vector<double> points = test::LinSpace(
+ limiter->knee_start_linear(), limiter->limiter_start_linear(),
+ kInterpolatedGainCurveKneePoints - 1);
+
+ // Set the first two points. The second is computed to help with the beginning
+ // of the knee region, which has high curvature.
+ parameters->computed_approximation_params_x[0] = points[0];
+ parameters->computed_approximation_params_x[1] =
+ (points[0] + points[1]) / 2.0;
+ // Copy the remaining points.
+ std::copy(std::begin(points) + 1, std::end(points),
+ std::begin(parameters->computed_approximation_params_x) + 2);
+
+ // Compute (m, q) pairs for each linear piece y = mx + q.
+ for (size_t i = 0; i < kInterpolatedGainCurveKneePoints - 1; ++i) {
+ const double x0 = parameters->computed_approximation_params_x[i];
+ const double x1 = parameters->computed_approximation_params_x[i + 1];
+ const double y0 = limiter->GetGainLinear(x0);
+ const double y1 = limiter->GetGainLinear(x1);
+ RTC_CHECK_NE(x1, x0);
+ parameters->computed_approximation_params_m[i] = (y1 - y0) / (x1 - x0);
+ parameters->computed_approximation_params_q[i] =
+ y0 - parameters->computed_approximation_params_m[i] * x0;
+ }
+}
+
+// Compute the parameters to under-approximate the beyond-knee region via linear
+// interpolation and greedy sampling. Under-approximating is saturation-safe
+// since the beyond-knee region is concave.
+void PrecomputeBeyondKneeApproxParams(
+ const LimiterDbGainCurve* limiter,
+ test::InterpolatedParameters* parameters) {
+ // Find points on which the linear pieces are tangent to the gain curve.
+ const auto samples = SampleLimiterRegion(limiter);
+
+ // Parametrize each linear piece.
+ double m, q;
+ std::tie(m, q) = ComputeLinearApproximationParams(
+ limiter,
+ parameters
+ ->computed_approximation_params_x[kInterpolatedGainCurveKneePoints -
+ 1]);
+ parameters
+ ->computed_approximation_params_m[kInterpolatedGainCurveKneePoints - 1] =
+ m;
+ parameters
+ ->computed_approximation_params_q[kInterpolatedGainCurveKneePoints - 1] =
+ q;
+ for (size_t i = 0; i < samples.size(); ++i) {
+ std::tie(m, q) = ComputeLinearApproximationParams(limiter, samples[i]);
+ parameters
+ ->computed_approximation_params_m[i +
+ kInterpolatedGainCurveKneePoints] = m;
+ parameters
+ ->computed_approximation_params_q[i +
+ kInterpolatedGainCurveKneePoints] = q;
+ }
+
+ // Find the point of intersection between adjacent linear pieces. They will be
+ // used as boundaries between adjacent linear pieces.
+ for (size_t i = kInterpolatedGainCurveKneePoints;
+ i < kInterpolatedGainCurveKneePoints +
+ kInterpolatedGainCurveBeyondKneePoints;
+ ++i) {
+ RTC_CHECK_NE(parameters->computed_approximation_params_m[i],
+ parameters->computed_approximation_params_m[i - 1]);
+ parameters->computed_approximation_params_x[i] =
+ ( // Formula: (q0 - q1) / (m1 - m0).
+ parameters->computed_approximation_params_q[i - 1] -
+ parameters->computed_approximation_params_q[i]) /
+ (parameters->computed_approximation_params_m[i] -
+ parameters->computed_approximation_params_m[i - 1]);
+ }
+}
+
+} // namespace
+
+namespace test {
+
+InterpolatedParameters ComputeInterpolatedGainCurveApproximationParams() {
+ InterpolatedParameters parameters;
+ LimiterDbGainCurve limiter;
+ parameters.computed_approximation_params_x.fill(0.0f);
+ parameters.computed_approximation_params_m.fill(0.0f);
+ parameters.computed_approximation_params_q.fill(0.0f);
+ PrecomputeKneeApproxParams(&limiter, &parameters);
+ PrecomputeBeyondKneeApproxParams(&limiter, &parameters);
+ return parameters;
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.h b/third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
new file mode 100644
index 0000000000..08b676f5fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_COMPUTE_INTERPOLATED_GAIN_CURVE_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_COMPUTE_INTERPOLATED_GAIN_CURVE_H_
+
+#include <array>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+
+namespace webrtc {
+
+namespace test {
+
+// Parameters for interpolated gain curve using under-approximation to
+// avoid saturation.
+//
+// The saturation gain is defined in order to let hard-clipping occur for
+// those samples having a level that falls in the saturation region. It is an
+// upper bound of the actual gain to apply - i.e., that returned by the
+// limiter.
+
+// Knee and beyond-knee regions approximation parameters.
+// The gain curve is approximated as a piece-wise linear function.
+// `approx_params_x_` are the boundaries between adjacent linear pieces,
+// `approx_params_m_` and `approx_params_q_` are the slope and the y-intercept
+// values of each piece.
+struct InterpolatedParameters {
+ std::array<float, kInterpolatedGainCurveTotalPoints>
+ computed_approximation_params_x;
+ std::array<float, kInterpolatedGainCurveTotalPoints>
+ computed_approximation_params_m;
+ std::array<float, kInterpolatedGainCurveTotalPoints>
+ computed_approximation_params_q;
+};
+
+InterpolatedParameters ComputeInterpolatedGainCurveApproximationParams();
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_COMPUTE_INTERPOLATED_GAIN_CURVE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.cc b/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.cc
new file mode 100644
index 0000000000..cced7614bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/cpu_features.h"
+
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+std::string AvailableCpuFeatures::ToString() const {
+ char buf[64];
+ rtc::SimpleStringBuilder builder(buf);
+ bool first = true;
+ if (sse2) {
+ builder << (first ? "SSE2" : "_SSE2");
+ first = false;
+ }
+ if (avx2) {
+ builder << (first ? "AVX2" : "_AVX2");
+ first = false;
+ }
+ if (neon) {
+ builder << (first ? "NEON" : "_NEON");
+ first = false;
+ }
+ if (first) {
+ return "none";
+ }
+ return builder.str();
+}
+
+// Detects available CPU features.
+AvailableCpuFeatures GetAvailableCpuFeatures() {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ return {/*sse2=*/GetCPUInfo(kSSE2) != 0,
+ /*avx2=*/GetCPUInfo(kAVX2) != 0,
+ /*neon=*/false};
+#elif defined(WEBRTC_HAS_NEON)
+ return {/*sse2=*/false,
+ /*avx2=*/false,
+ /*neon=*/true};
+#else
+ return {/*sse2=*/false,
+ /*avx2=*/false,
+ /*neon=*/false};
+#endif
+}
+
+AvailableCpuFeatures NoAvailableCpuFeatures() {
+ return {/*sse2=*/false, /*avx2=*/false, /*neon=*/false};
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.h b/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.h
new file mode 100644
index 0000000000..54ddfb3055
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_CPU_FEATURES_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_CPU_FEATURES_H_
+
+#include <string>
+
+namespace webrtc {
+
+// Collection of flags indicating which CPU features are available on the
+// current platform. True means available.
+struct AvailableCpuFeatures {
+ AvailableCpuFeatures(bool sse2, bool avx2, bool neon)
+ : sse2(sse2), avx2(avx2), neon(neon) {}
+ // Intel.
+ bool sse2;
+ bool avx2;
+ // ARM.
+ bool neon;
+ std::string ToString() const;
+};
+
+// Detects what CPU features are available.
+AvailableCpuFeatures GetAvailableCpuFeatures();
+
+// Returns the CPU feature flags all set to false.
+AvailableCpuFeatures NoAvailableCpuFeatures();
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_CPU_FEATURES_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features_gn/moz.build
new file mode 100644
index 0000000000..53d9444734
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/cpu_features.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("cpu_features_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_gn/moz.build
new file mode 100644
index 0000000000..3e7668851c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/limiter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("fixed_digital_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.cc b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
new file mode 100644
index 0000000000..1995b24913
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/fixed_digital_level_estimator.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kInitialFilterStateLevel = 0.0f;
+
+// Instant attack.
+constexpr float kAttackFilterConstant = 0.0f;
+
+// Limiter decay constant.
+// Computed as `10 ** (-1/20 * subframe_duration / kDecayMs)` where:
+// - `subframe_duration` is `kFrameDurationMs / kSubFramesInFrame`;
+// - `kDecayMs` is defined in agc2_testing_common.h.
+constexpr float kDecayFilterConstant = 0.9971259f;
+
+} // namespace
+
+FixedDigitalLevelEstimator::FixedDigitalLevelEstimator(
+ int sample_rate_hz,
+ ApmDataDumper* apm_data_dumper)
+ : apm_data_dumper_(apm_data_dumper),
+ filter_state_level_(kInitialFilterStateLevel) {
+ SetSampleRate(sample_rate_hz);
+ CheckParameterCombination();
+ RTC_DCHECK(apm_data_dumper_);
+ apm_data_dumper_->DumpRaw("agc2_level_estimator_samplerate", sample_rate_hz);
+}
+
+void FixedDigitalLevelEstimator::CheckParameterCombination() {
+ RTC_DCHECK_GT(samples_in_frame_, 0);
+ RTC_DCHECK_LE(kSubFramesInFrame, samples_in_frame_);
+ RTC_DCHECK_EQ(samples_in_frame_ % kSubFramesInFrame, 0);
+ RTC_DCHECK_GT(samples_in_sub_frame_, 1);
+}
+
+std::array<float, kSubFramesInFrame> FixedDigitalLevelEstimator::ComputeLevel(
+ const AudioFrameView<const float>& float_frame) {
+ RTC_DCHECK_GT(float_frame.num_channels(), 0);
+ RTC_DCHECK_EQ(float_frame.samples_per_channel(), samples_in_frame_);
+
+ // Compute max envelope without smoothing.
+ std::array<float, kSubFramesInFrame> envelope{};
+ for (int channel_idx = 0; channel_idx < float_frame.num_channels();
+ ++channel_idx) {
+ const auto channel = float_frame.channel(channel_idx);
+ for (int sub_frame = 0; sub_frame < kSubFramesInFrame; ++sub_frame) {
+ for (int sample_in_sub_frame = 0;
+ sample_in_sub_frame < samples_in_sub_frame_; ++sample_in_sub_frame) {
+ envelope[sub_frame] =
+ std::max(envelope[sub_frame],
+ std::abs(channel[sub_frame * samples_in_sub_frame_ +
+ sample_in_sub_frame]));
+ }
+ }
+ }
+
+ // Make sure envelope increases happen one step earlier so that the
+ // corresponding *gain decrease* doesn't miss a sudden signal
+ // increase due to interpolation.
+ for (int sub_frame = 0; sub_frame < kSubFramesInFrame - 1; ++sub_frame) {
+ if (envelope[sub_frame] < envelope[sub_frame + 1]) {
+ envelope[sub_frame] = envelope[sub_frame + 1];
+ }
+ }
+
+ // Add attack / decay smoothing.
+ for (int sub_frame = 0; sub_frame < kSubFramesInFrame; ++sub_frame) {
+ const float envelope_value = envelope[sub_frame];
+ if (envelope_value > filter_state_level_) {
+ envelope[sub_frame] = envelope_value * (1 - kAttackFilterConstant) +
+ filter_state_level_ * kAttackFilterConstant;
+ } else {
+ envelope[sub_frame] = envelope_value * (1 - kDecayFilterConstant) +
+ filter_state_level_ * kDecayFilterConstant;
+ }
+ filter_state_level_ = envelope[sub_frame];
+
+ // Dump data for debug.
+ RTC_DCHECK(apm_data_dumper_);
+ const auto channel = float_frame.channel(0);
+ apm_data_dumper_->DumpRaw("agc2_level_estimator_samples",
+ samples_in_sub_frame_,
+ &channel[sub_frame * samples_in_sub_frame_]);
+ apm_data_dumper_->DumpRaw("agc2_level_estimator_level",
+ envelope[sub_frame]);
+ }
+
+ return envelope;
+}
+
+void FixedDigitalLevelEstimator::SetSampleRate(int sample_rate_hz) {
+ samples_in_frame_ =
+ rtc::CheckedDivExact(sample_rate_hz * kFrameDurationMs, 1000);
+ samples_in_sub_frame_ =
+ rtc::CheckedDivExact(samples_in_frame_, kSubFramesInFrame);
+ CheckParameterCombination();
+}
+
+void FixedDigitalLevelEstimator::Reset() {
+ filter_state_level_ = kInitialFilterStateLevel;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.h b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.h
new file mode 100644
index 0000000000..d26b55950c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_FIXED_DIGITAL_LEVEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_FIXED_DIGITAL_LEVEL_ESTIMATOR_H_
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+// Produces a smooth signal level estimate from an input audio
+// stream. The estimate smoothing is done through exponential
+// filtering.
+class FixedDigitalLevelEstimator {
+ public:
+ // Sample rates are allowed if the number of samples in a frame
+ // (sample_rate_hz * kFrameDurationMs / 1000) is divisible by
+ // kSubFramesInSample. For kFrameDurationMs=10 and
+ // kSubFramesInSample=20, this means that sample_rate_hz has to be
+ // divisible by 2000.
+ FixedDigitalLevelEstimator(int sample_rate_hz,
+ ApmDataDumper* apm_data_dumper);
+
+ FixedDigitalLevelEstimator(const FixedDigitalLevelEstimator&) = delete;
+ FixedDigitalLevelEstimator& operator=(const FixedDigitalLevelEstimator&) =
+ delete;
+
+ // The input is assumed to be in FloatS16 format. Scaled input will
+ // produce similarly scaled output. A frame of with kFrameDurationMs
+ // ms of audio produces a level estimates in the same scale. The
+ // level estimate contains kSubFramesInFrame values.
+ std::array<float, kSubFramesInFrame> ComputeLevel(
+ const AudioFrameView<const float>& float_frame);
+
+ // Rate may be changed at any time (but not concurrently) from the
+ // value passed to the constructor. The class is not thread safe.
+ void SetSampleRate(int sample_rate_hz);
+
+ // Resets the level estimator internal state.
+ void Reset();
+
+ float LastAudioLevel() const { return filter_state_level_; }
+
+ private:
+ void CheckParameterCombination();
+
+ ApmDataDumper* const apm_data_dumper_ = nullptr;
+ float filter_state_level_;
+ int samples_in_frame_;
+ int samples_in_sub_frame_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_FIXED_DIGITAL_LEVEL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc
new file mode 100644
index 0000000000..97b421d04c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/fixed_digital_level_estimator.h"
+
+#include <limits>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kInputLevel = 10000.f;
+
+// Run audio at specified settings through the level estimator, and
+// verify that the output level falls within the bounds.
+void TestLevelEstimator(int sample_rate_hz,
+ int num_channels,
+ float input_level_linear_scale,
+ float expected_min,
+ float expected_max) {
+ ApmDataDumper apm_data_dumper(0);
+ FixedDigitalLevelEstimator level_estimator(sample_rate_hz, &apm_data_dumper);
+
+ const VectorFloatFrame vectors_with_float_frame(
+ num_channels, rtc::CheckedDivExact(sample_rate_hz, 100),
+ input_level_linear_scale);
+
+ for (int i = 0; i < 500; ++i) {
+ const auto level = level_estimator.ComputeLevel(
+ vectors_with_float_frame.float_frame_view());
+
+ // Give the estimator some time to ramp up.
+ if (i < 50) {
+ continue;
+ }
+
+ for (const auto& x : level) {
+ EXPECT_LE(expected_min, x);
+ EXPECT_LE(x, expected_max);
+ }
+ }
+}
+
+// Returns time it takes for the level estimator to decrease its level
+// estimate by 'level_reduction_db'.
+float TimeMsToDecreaseLevel(int sample_rate_hz,
+ int num_channels,
+ float input_level_db,
+ float level_reduction_db) {
+ const float input_level = DbfsToFloatS16(input_level_db);
+ RTC_DCHECK_GT(level_reduction_db, 0);
+
+ const VectorFloatFrame vectors_with_float_frame(
+ num_channels, rtc::CheckedDivExact(sample_rate_hz, 100), input_level);
+
+ ApmDataDumper apm_data_dumper(0);
+ FixedDigitalLevelEstimator level_estimator(sample_rate_hz, &apm_data_dumper);
+
+ // Give the LevelEstimator plenty of time to ramp up and stabilize
+ float last_level = 0.f;
+ for (int i = 0; i < 500; ++i) {
+ const auto level_envelope = level_estimator.ComputeLevel(
+ vectors_with_float_frame.float_frame_view());
+ last_level = *level_envelope.rbegin();
+ }
+
+ // Set input to 0.
+ VectorFloatFrame vectors_with_zero_float_frame(
+ num_channels, rtc::CheckedDivExact(sample_rate_hz, 100), 0);
+
+ const float reduced_level_linear =
+ DbfsToFloatS16(input_level_db - level_reduction_db);
+ int sub_frames_until_level_reduction = 0;
+ while (last_level > reduced_level_linear) {
+ const auto level_envelope = level_estimator.ComputeLevel(
+ vectors_with_zero_float_frame.float_frame_view());
+ for (const auto& v : level_envelope) {
+ EXPECT_LT(v, last_level);
+ sub_frames_until_level_reduction++;
+ last_level = v;
+ if (last_level <= reduced_level_linear) {
+ break;
+ }
+ }
+ }
+ return static_cast<float>(sub_frames_until_level_reduction) *
+ kFrameDurationMs / kSubFramesInFrame;
+}
+} // namespace
+
+TEST(GainController2FixedDigitalLevelEstimator, EstimatorShouldNotCrash) {
+ TestLevelEstimator(8000, 1, 0, std::numeric_limits<float>::lowest(),
+ std::numeric_limits<float>::max());
+}
+
+TEST(GainController2FixedDigitalLevelEstimator,
+ EstimatorShouldEstimateConstantLevel) {
+ TestLevelEstimator(10000, 1, kInputLevel, kInputLevel * 0.99,
+ kInputLevel * 1.01);
+}
+
+TEST(GainController2FixedDigitalLevelEstimator,
+ EstimatorShouldEstimateConstantLevelForManyChannels) {
+ constexpr size_t num_channels = 10;
+ TestLevelEstimator(20000, num_channels, kInputLevel, kInputLevel * 0.99,
+ kInputLevel * 1.01);
+}
+
+TEST(GainController2FixedDigitalLevelEstimator, TimeToDecreaseForLowLevel) {
+ constexpr float kLevelReductionDb = 25;
+ constexpr float kInitialLowLevel = -40;
+ constexpr float kExpectedTime = kLevelReductionDb * test::kDecayMs;
+
+ const float time_to_decrease =
+ TimeMsToDecreaseLevel(22000, 1, kInitialLowLevel, kLevelReductionDb);
+
+ EXPECT_LE(kExpectedTime * 0.9, time_to_decrease);
+ EXPECT_LE(time_to_decrease, kExpectedTime * 1.1);
+}
+
+TEST(GainController2FixedDigitalLevelEstimator,
+ TimeToDecreaseForFullScaleLevel) {
+ constexpr float kLevelReductionDb = 25;
+ constexpr float kExpectedTime = kLevelReductionDb * test::kDecayMs;
+
+ const float time_to_decrease =
+ TimeMsToDecreaseLevel(26000, 1, 0, kLevelReductionDb);
+
+ EXPECT_LE(kExpectedTime * 0.9, time_to_decrease);
+ EXPECT_LE(time_to_decrease, kExpectedTime * 1.1);
+}
+
+TEST(GainController2FixedDigitalLevelEstimator,
+ TimeToDecreaseForMultipleChannels) {
+ constexpr float kLevelReductionDb = 25;
+ constexpr float kExpectedTime = kLevelReductionDb * test::kDecayMs;
+ constexpr size_t kNumChannels = 10;
+
+ const float time_to_decrease =
+ TimeMsToDecreaseLevel(28000, kNumChannels, 0, kLevelReductionDb);
+
+ EXPECT_LE(kExpectedTime * 0.9, time_to_decrease);
+ EXPECT_LE(time_to_decrease, kExpectedTime * 1.1);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.cc b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.cc
new file mode 100644
index 0000000000..f9e276d3a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/gain_applier.h"
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+// Returns true when the gain factor is so close to 1 that it would
+// not affect int16 samples.
+bool GainCloseToOne(float gain_factor) {
+ return 1.f - 1.f / kMaxFloatS16Value <= gain_factor &&
+ gain_factor <= 1.f + 1.f / kMaxFloatS16Value;
+}
+
+void ClipSignal(AudioFrameView<float> signal) {
+ for (int k = 0; k < signal.num_channels(); ++k) {
+ rtc::ArrayView<float> channel_view = signal.channel(k);
+ for (auto& sample : channel_view) {
+ sample = rtc::SafeClamp(sample, kMinFloatS16Value, kMaxFloatS16Value);
+ }
+ }
+}
+
+void ApplyGainWithRamping(float last_gain_linear,
+ float gain_at_end_of_frame_linear,
+ float inverse_samples_per_channel,
+ AudioFrameView<float> float_frame) {
+ // Do not modify the signal.
+ if (last_gain_linear == gain_at_end_of_frame_linear &&
+ GainCloseToOne(gain_at_end_of_frame_linear)) {
+ return;
+ }
+
+ // Gain is constant and different from 1.
+ if (last_gain_linear == gain_at_end_of_frame_linear) {
+ for (int k = 0; k < float_frame.num_channels(); ++k) {
+ rtc::ArrayView<float> channel_view = float_frame.channel(k);
+ for (auto& sample : channel_view) {
+ sample *= gain_at_end_of_frame_linear;
+ }
+ }
+ return;
+ }
+
+ // The gain changes. We have to change slowly to avoid discontinuities.
+ const float increment = (gain_at_end_of_frame_linear - last_gain_linear) *
+ inverse_samples_per_channel;
+ float gain = last_gain_linear;
+ for (int i = 0; i < float_frame.samples_per_channel(); ++i) {
+ for (int ch = 0; ch < float_frame.num_channels(); ++ch) {
+ float_frame.channel(ch)[i] *= gain;
+ }
+ gain += increment;
+ }
+}
+
+} // namespace
+
+GainApplier::GainApplier(bool hard_clip_samples, float initial_gain_factor)
+ : hard_clip_samples_(hard_clip_samples),
+ last_gain_factor_(initial_gain_factor),
+ current_gain_factor_(initial_gain_factor) {}
+
+void GainApplier::ApplyGain(AudioFrameView<float> signal) {
+ if (static_cast<int>(signal.samples_per_channel()) != samples_per_channel_) {
+ Initialize(signal.samples_per_channel());
+ }
+
+ ApplyGainWithRamping(last_gain_factor_, current_gain_factor_,
+ inverse_samples_per_channel_, signal);
+
+ last_gain_factor_ = current_gain_factor_;
+
+ if (hard_clip_samples_) {
+ ClipSignal(signal);
+ }
+}
+
+// TODO(bugs.webrtc.org/7494): Remove once switched to gains in dB.
+void GainApplier::SetGainFactor(float gain_factor) {
+ RTC_DCHECK_GT(gain_factor, 0.f);
+ current_gain_factor_ = gain_factor;
+}
+
+void GainApplier::Initialize(int samples_per_channel) {
+ RTC_DCHECK_GT(samples_per_channel, 0);
+ samples_per_channel_ = static_cast<int>(samples_per_channel);
+ inverse_samples_per_channel_ = 1.f / samples_per_channel_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.h b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.h
new file mode 100644
index 0000000000..ba8a4a4cd2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_GAIN_APPLIER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_GAIN_APPLIER_H_
+
+#include <stddef.h>
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+class GainApplier {
+ public:
+ GainApplier(bool hard_clip_samples, float initial_gain_factor);
+
+ void ApplyGain(AudioFrameView<float> signal);
+ void SetGainFactor(float gain_factor);
+ float GetGainFactor() const { return current_gain_factor_; }
+
+ private:
+ void Initialize(int samples_per_channel);
+
+ // Whether to clip samples after gain is applied. If 'true', result
+ // will fit in FloatS16 range.
+ const bool hard_clip_samples_;
+ float last_gain_factor_;
+
+ // If this value is not equal to 'last_gain_factor', gain will be
+ // ramped from 'last_gain_factor_' to this value during the next
+ // 'ApplyGain'.
+ float current_gain_factor_;
+ int samples_per_channel_ = -1;
+ float inverse_samples_per_channel_ = -1.f;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_GAIN_APPLIER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_gn/moz.build
new file mode 100644
index 0000000000..1e7fc30e59
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("gain_applier_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_unittest.cc
new file mode 100644
index 0000000000..3296345e62
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/gain_applier_unittest.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/gain_applier.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+TEST(AutomaticGainController2GainApplier, InitialGainIsRespected) {
+ constexpr float initial_signal_level = 123.f;
+ constexpr float gain_factor = 10.f;
+ VectorFloatFrame fake_audio(1, 1, initial_signal_level);
+ GainApplier gain_applier(true, gain_factor);
+
+ gain_applier.ApplyGain(fake_audio.float_frame_view());
+ EXPECT_NEAR(fake_audio.float_frame_view().channel(0)[0],
+ initial_signal_level * gain_factor, 0.1f);
+}
+
+TEST(AutomaticGainController2GainApplier, ClippingIsDone) {
+ constexpr float initial_signal_level = 30000.f;
+ constexpr float gain_factor = 10.f;
+ VectorFloatFrame fake_audio(1, 1, initial_signal_level);
+ GainApplier gain_applier(true, gain_factor);
+
+ gain_applier.ApplyGain(fake_audio.float_frame_view());
+ EXPECT_NEAR(fake_audio.float_frame_view().channel(0)[0],
+ std::numeric_limits<int16_t>::max(), 0.1f);
+}
+
+TEST(AutomaticGainController2GainApplier, ClippingIsNotDone) {
+ constexpr float initial_signal_level = 30000.f;
+ constexpr float gain_factor = 10.f;
+ VectorFloatFrame fake_audio(1, 1, initial_signal_level);
+ GainApplier gain_applier(false, gain_factor);
+
+ gain_applier.ApplyGain(fake_audio.float_frame_view());
+
+ EXPECT_NEAR(fake_audio.float_frame_view().channel(0)[0],
+ initial_signal_level * gain_factor, 0.1f);
+}
+
+TEST(AutomaticGainController2GainApplier, RampingIsDone) {
+ constexpr float initial_signal_level = 30000.f;
+ constexpr float initial_gain_factor = 1.f;
+ constexpr float target_gain_factor = 0.5f;
+ constexpr int num_channels = 3;
+ constexpr int samples_per_channel = 4;
+ VectorFloatFrame fake_audio(num_channels, samples_per_channel,
+ initial_signal_level);
+ GainApplier gain_applier(false, initial_gain_factor);
+
+ gain_applier.SetGainFactor(target_gain_factor);
+ gain_applier.ApplyGain(fake_audio.float_frame_view());
+
+ // The maximal gain change should be close to that in linear interpolation.
+ for (size_t channel = 0; channel < num_channels; ++channel) {
+ float max_signal_change = 0.f;
+ float last_signal_level = initial_signal_level;
+ for (const auto sample : fake_audio.float_frame_view().channel(channel)) {
+ const float current_change = fabs(last_signal_level - sample);
+ max_signal_change = std::max(max_signal_change, current_change);
+ last_signal_level = sample;
+ }
+ const float total_gain_change =
+ fabs((initial_gain_factor - target_gain_factor) * initial_signal_level);
+ EXPECT_NEAR(max_signal_change, total_gain_change / samples_per_channel,
+ 0.1f);
+ }
+
+ // Next frame should have the desired level.
+ VectorFloatFrame next_fake_audio_frame(num_channels, samples_per_channel,
+ initial_signal_level);
+ gain_applier.ApplyGain(next_fake_audio_frame.float_frame_view());
+
+ // The last sample should have the new gain.
+ EXPECT_NEAR(next_fake_audio_frame.float_frame_view().channel(0)[0],
+ initial_signal_level * target_gain_factor, 0.1f);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.cc b/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.cc
new file mode 100644
index 0000000000..bb6e038514
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+ InterpolatedGainCurve::approximation_params_x_;
+
+constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+ InterpolatedGainCurve::approximation_params_m_;
+
+constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+ InterpolatedGainCurve::approximation_params_q_;
+
+InterpolatedGainCurve::InterpolatedGainCurve(
+ ApmDataDumper* apm_data_dumper,
+ absl::string_view histogram_name_prefix)
+ : region_logger_(
+ (rtc::StringBuilder("WebRTC.Audio.")
+ << histogram_name_prefix << ".FixedDigitalGainCurveRegion.Identity")
+ .str(),
+ (rtc::StringBuilder("WebRTC.Audio.")
+ << histogram_name_prefix << ".FixedDigitalGainCurveRegion.Knee")
+ .str(),
+ (rtc::StringBuilder("WebRTC.Audio.")
+ << histogram_name_prefix << ".FixedDigitalGainCurveRegion.Limiter")
+ .str(),
+ (rtc::StringBuilder("WebRTC.Audio.")
+ << histogram_name_prefix
+ << ".FixedDigitalGainCurveRegion.Saturation")
+ .str()),
+ apm_data_dumper_(apm_data_dumper) {}
+
+InterpolatedGainCurve::~InterpolatedGainCurve() {
+ if (stats_.available) {
+ RTC_DCHECK(apm_data_dumper_);
+ apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_identity",
+ stats_.look_ups_identity_region);
+ apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_knee",
+ stats_.look_ups_knee_region);
+ apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_limiter",
+ stats_.look_ups_limiter_region);
+ apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_saturation",
+ stats_.look_ups_saturation_region);
+ region_logger_.LogRegionStats(stats_);
+ }
+}
+
+InterpolatedGainCurve::RegionLogger::RegionLogger(
+ absl::string_view identity_histogram_name,
+ absl::string_view knee_histogram_name,
+ absl::string_view limiter_histogram_name,
+ absl::string_view saturation_histogram_name)
+ : identity_histogram(
+ metrics::HistogramFactoryGetCounts(identity_histogram_name,
+ 1,
+ 10000,
+ 50)),
+ knee_histogram(metrics::HistogramFactoryGetCounts(knee_histogram_name,
+ 1,
+ 10000,
+ 50)),
+ limiter_histogram(
+ metrics::HistogramFactoryGetCounts(limiter_histogram_name,
+ 1,
+ 10000,
+ 50)),
+ saturation_histogram(
+ metrics::HistogramFactoryGetCounts(saturation_histogram_name,
+ 1,
+ 10000,
+ 50)) {}
+
+InterpolatedGainCurve::RegionLogger::~RegionLogger() = default;
+
+void InterpolatedGainCurve::RegionLogger::LogRegionStats(
+ const InterpolatedGainCurve::Stats& stats) const {
+ using Region = InterpolatedGainCurve::GainCurveRegion;
+ const int duration_s =
+ stats.region_duration_frames / (1000 / kFrameDurationMs);
+
+ switch (stats.region) {
+ case Region::kIdentity: {
+ if (identity_histogram) {
+ metrics::HistogramAdd(identity_histogram, duration_s);
+ }
+ break;
+ }
+ case Region::kKnee: {
+ if (knee_histogram) {
+ metrics::HistogramAdd(knee_histogram, duration_s);
+ }
+ break;
+ }
+ case Region::kLimiter: {
+ if (limiter_histogram) {
+ metrics::HistogramAdd(limiter_histogram, duration_s);
+ }
+ break;
+ }
+ case Region::kSaturation: {
+ if (saturation_histogram) {
+ metrics::HistogramAdd(saturation_histogram, duration_s);
+ }
+ break;
+ }
+ default: {
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+}
+
+void InterpolatedGainCurve::UpdateStats(float input_level) const {
+ stats_.available = true;
+
+ GainCurveRegion region;
+
+ if (input_level < approximation_params_x_[0]) {
+ stats_.look_ups_identity_region++;
+ region = GainCurveRegion::kIdentity;
+ } else if (input_level <
+ approximation_params_x_[kInterpolatedGainCurveKneePoints - 1]) {
+ stats_.look_ups_knee_region++;
+ region = GainCurveRegion::kKnee;
+ } else if (input_level < kMaxInputLevelLinear) {
+ stats_.look_ups_limiter_region++;
+ region = GainCurveRegion::kLimiter;
+ } else {
+ stats_.look_ups_saturation_region++;
+ region = GainCurveRegion::kSaturation;
+ }
+
+ if (region == stats_.region) {
+ ++stats_.region_duration_frames;
+ } else {
+ region_logger_.LogRegionStats(stats_);
+
+ stats_.region_duration_frames = 0;
+ stats_.region = region;
+ }
+}
+
+// Looks up a gain to apply given a non-negative input level.
+// The cost of this operation depends on the region in which `input_level`
+// falls.
+// For the identity and the saturation regions the cost is O(1).
+// For the other regions, namely knee and limiter, the cost is
+// O(2 + log2(`LightkInterpolatedGainCurveTotalPoints`), plus O(1) for the
+// linear interpolation (one product and one sum).
+float InterpolatedGainCurve::LookUpGainToApply(float input_level) const {
+ UpdateStats(input_level);
+
+ if (input_level <= approximation_params_x_[0]) {
+ // Identity region.
+ return 1.0f;
+ }
+
+ if (input_level >= kMaxInputLevelLinear) {
+ // Saturating lower bound. The saturing samples exactly hit the clipping
+ // level. This method achieves has the lowest harmonic distorsion, but it
+ // may reduce the amplitude of the non-saturating samples too much.
+ return 32768.f / input_level;
+ }
+
+ // Knee and limiter regions; find the linear piece index. Spelling
+ // out the complete type was the only way to silence both the clang
+ // plugin and the windows compilers.
+ std::array<float, kInterpolatedGainCurveTotalPoints>::const_iterator it =
+ std::lower_bound(approximation_params_x_.begin(),
+ approximation_params_x_.end(), input_level);
+ const size_t index = std::distance(approximation_params_x_.begin(), it) - 1;
+ RTC_DCHECK_LE(0, index);
+ RTC_DCHECK_LT(index, approximation_params_m_.size());
+ RTC_DCHECK_LE(approximation_params_x_[index], input_level);
+ if (index < approximation_params_m_.size() - 1) {
+ RTC_DCHECK_LE(input_level, approximation_params_x_[index + 1]);
+ }
+
+ // Piece-wise linear interploation.
+ const float gain = approximation_params_m_[index] * input_level +
+ approximation_params_q_[index];
+ RTC_DCHECK_LE(0.f, gain);
+ return gain;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.h b/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.h
new file mode 100644
index 0000000000..8dd3e48f21
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_INTERPOLATED_GAIN_CURVE_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_INTERPOLATED_GAIN_CURVE_H_
+
+#include <array>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+constexpr float kInputLevelScalingFactor = 32768.0f;
+
+// Defined as DbfsToLinear(kLimiterMaxInputLevelDbFs)
+constexpr float kMaxInputLevelLinear = static_cast<float>(36766.300710566735);
+
+// Interpolated gain curve using under-approximation to avoid saturation.
+//
+// The goal of this class is allowing fast look ups to get an accurate
+// estimates of the gain to apply given an estimated input level.
+class InterpolatedGainCurve {
+ public:
+ enum class GainCurveRegion {
+ kIdentity = 0,
+ kKnee = 1,
+ kLimiter = 2,
+ kSaturation = 3
+ };
+
+ struct Stats {
+ // Region in which the output level equals the input one.
+ size_t look_ups_identity_region = 0;
+ // Smoothing between the identity and the limiter regions.
+ size_t look_ups_knee_region = 0;
+ // Limiter region in which the output and input levels are linearly related.
+ size_t look_ups_limiter_region = 0;
+ // Region in which saturation may occur since the input level is beyond the
+ // maximum expected by the limiter.
+ size_t look_ups_saturation_region = 0;
+ // True if stats have been populated.
+ bool available = false;
+
+ // The current region, and for how many frames the level has been
+ // in that region.
+ GainCurveRegion region = GainCurveRegion::kIdentity;
+ int64_t region_duration_frames = 0;
+ };
+
+ InterpolatedGainCurve(ApmDataDumper* apm_data_dumper,
+ absl::string_view histogram_name_prefix);
+ ~InterpolatedGainCurve();
+
+ InterpolatedGainCurve(const InterpolatedGainCurve&) = delete;
+ InterpolatedGainCurve& operator=(const InterpolatedGainCurve&) = delete;
+
+ Stats get_stats() const { return stats_; }
+
+ // Given a non-negative input level (linear scale), a scalar factor to apply
+ // to a sub-frame is returned.
+ // Levels above kLimiterMaxInputLevelDbFs will be reduced to 0 dBFS
+ // after applying this gain
+ float LookUpGainToApply(float input_level) const;
+
+ private:
+ // For comparing 'approximation_params_*_' with ones computed by
+ // ComputeInterpolatedGainCurve.
+ FRIEND_TEST_ALL_PREFIXES(GainController2InterpolatedGainCurve,
+ CheckApproximationParams);
+
+ struct RegionLogger {
+ metrics::Histogram* identity_histogram;
+ metrics::Histogram* knee_histogram;
+ metrics::Histogram* limiter_histogram;
+ metrics::Histogram* saturation_histogram;
+
+ RegionLogger(absl::string_view identity_histogram_name,
+ absl::string_view knee_histogram_name,
+ absl::string_view limiter_histogram_name,
+ absl::string_view saturation_histogram_name);
+
+ ~RegionLogger();
+
+ void LogRegionStats(const InterpolatedGainCurve::Stats& stats) const;
+ } region_logger_;
+
+ void UpdateStats(float input_level) const;
+
+ ApmDataDumper* const apm_data_dumper_;
+
+ static constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+ approximation_params_x_ = {
+ {30057.296875, 30148.986328125, 30240.67578125, 30424.052734375,
+ 30607.4296875, 30790.806640625, 30974.18359375, 31157.560546875,
+ 31340.939453125, 31524.31640625, 31707.693359375, 31891.0703125,
+ 32074.447265625, 32257.82421875, 32441.201171875, 32624.580078125,
+ 32807.95703125, 32991.33203125, 33174.7109375, 33358.08984375,
+ 33541.46484375, 33724.84375, 33819.53515625, 34009.5390625,
+ 34200.05859375, 34389.81640625, 34674.48828125, 35054.375,
+ 35434.86328125, 35814.81640625, 36195.16796875, 36575.03125}};
+ static constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+ approximation_params_m_ = {
+ {-3.515235675877192989e-07, -1.050251626111275982e-06,
+ -2.085213736791047268e-06, -3.443004743530764244e-06,
+ -4.773849468620028347e-06, -6.077375928725814447e-06,
+ -7.353257842623861507e-06, -8.601219633419532329e-06,
+ -9.821013009059242904e-06, -1.101243378798244521e-05,
+ -1.217532644659513608e-05, -1.330956911260727793e-05,
+ -1.441507538402220234e-05, -1.549179251014720649e-05,
+ -1.653970684856176376e-05, -1.755882840370759368e-05,
+ -1.854918446042574942e-05, -1.951086778717581183e-05,
+ -2.044398024736437947e-05, -2.1348627342376858e-05,
+ -2.222496914328075945e-05, -2.265374678245279938e-05,
+ -2.242570917587727308e-05, -2.220122041762806475e-05,
+ -2.19802095671184361e-05, -2.176260204578284174e-05,
+ -2.133731686626560986e-05, -2.092481918225530535e-05,
+ -2.052459603874012828e-05, -2.013615448959171772e-05,
+ -1.975903069251216948e-05, -1.939277899509761482e-05}};
+
+ static constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+ approximation_params_q_ = {
+ {1.010565876960754395, 1.031631827354431152, 1.062929749488830566,
+ 1.104239225387573242, 1.144973039627075195, 1.185109615325927734,
+ 1.224629044532775879, 1.263512492179870605, 1.301741957664489746,
+ 1.339300632476806641, 1.376173257827758789, 1.412345528602600098,
+ 1.447803974151611328, 1.482536554336547852, 1.516532182693481445,
+ 1.549780607223510742, 1.582272171974182129, 1.613999366760253906,
+ 1.644955039024353027, 1.675132393836975098, 1.704526185989379883,
+ 1.718986630439758301, 1.711274504661560059, 1.703639745712280273,
+ 1.696081161499023438, 1.688597679138183594, 1.673851132392883301,
+ 1.659391283988952637, 1.645209431648254395, 1.631297469139099121,
+ 1.617647409439086914, 1.604251742362976074}};
+
+ // Stats.
+ mutable Stats stats_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_INTERPOLATED_GAIN_CURVE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc
new file mode 100644
index 0000000000..7861ae997d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+
+#include <array>
+#include <type_traits>
+#include <vector>
+
+#include "api/array_view.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/compute_interpolated_gain_curve.h"
+#include "modules/audio_processing/agc2/limiter_db_gain_curve.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr double kLevelEpsilon = 1e-2 * kMaxAbsFloatS16Value;
+constexpr float kInterpolatedGainCurveTolerance = 1.f / 32768.f;
+ApmDataDumper apm_data_dumper(0);
+static_assert(std::is_trivially_destructible<LimiterDbGainCurve>::value, "");
+const LimiterDbGainCurve limiter;
+
+} // namespace
+
+TEST(GainController2InterpolatedGainCurve, CreateUse) {
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels = test::LinSpace(
+ kLevelEpsilon, DbfsToFloatS16(limiter.max_input_level_db() + 1), 500);
+ for (const auto level : levels) {
+ EXPECT_GE(igc.LookUpGainToApply(level), 0.0f);
+ }
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckValidOutput) {
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels = test::LinSpace(
+ kLevelEpsilon, limiter.max_input_level_linear() * 2.0, 500);
+ for (const auto level : levels) {
+ SCOPED_TRACE(std::to_string(level));
+ const float gain = igc.LookUpGainToApply(level);
+ EXPECT_LE(0.0f, gain);
+ EXPECT_LE(gain, 1.0f);
+ }
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckMonotonicity) {
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels = test::LinSpace(
+ kLevelEpsilon, limiter.max_input_level_linear() + kLevelEpsilon + 0.5,
+ 500);
+ float prev_gain = igc.LookUpGainToApply(0.0f);
+ for (const auto level : levels) {
+ const float gain = igc.LookUpGainToApply(level);
+ EXPECT_GE(prev_gain, gain);
+ prev_gain = gain;
+ }
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckApproximation) {
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels = test::LinSpace(
+ kLevelEpsilon, limiter.max_input_level_linear() - kLevelEpsilon, 500);
+ for (const auto level : levels) {
+ SCOPED_TRACE(std::to_string(level));
+ EXPECT_LT(
+ std::fabs(limiter.GetGainLinear(level) - igc.LookUpGainToApply(level)),
+ kInterpolatedGainCurveTolerance);
+ }
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckRegionBoundaries) {
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const std::vector<double> levels{
+ {kLevelEpsilon, limiter.knee_start_linear() + kLevelEpsilon,
+ limiter.limiter_start_linear() + kLevelEpsilon,
+ limiter.max_input_level_linear() + kLevelEpsilon}};
+ for (const auto level : levels) {
+ igc.LookUpGainToApply(level);
+ }
+
+ const auto stats = igc.get_stats();
+ EXPECT_EQ(1ul, stats.look_ups_identity_region);
+ EXPECT_EQ(1ul, stats.look_ups_knee_region);
+ EXPECT_EQ(1ul, stats.look_ups_limiter_region);
+ EXPECT_EQ(1ul, stats.look_ups_saturation_region);
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckIdentityRegion) {
+ constexpr size_t kNumSteps = 10;
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels =
+ test::LinSpace(kLevelEpsilon, limiter.knee_start_linear(), kNumSteps);
+ for (const auto level : levels) {
+ SCOPED_TRACE(std::to_string(level));
+ EXPECT_EQ(1.0f, igc.LookUpGainToApply(level));
+ }
+
+ const auto stats = igc.get_stats();
+ EXPECT_EQ(kNumSteps - 1, stats.look_ups_identity_region);
+ EXPECT_EQ(1ul, stats.look_ups_knee_region);
+ EXPECT_EQ(0ul, stats.look_ups_limiter_region);
+ EXPECT_EQ(0ul, stats.look_ups_saturation_region);
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckNoOverApproximationKnee) {
+ constexpr size_t kNumSteps = 10;
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels =
+ test::LinSpace(limiter.knee_start_linear() + kLevelEpsilon,
+ limiter.limiter_start_linear(), kNumSteps);
+ for (const auto level : levels) {
+ SCOPED_TRACE(std::to_string(level));
+ // Small tolerance added (needed because comparing a float with a double).
+ EXPECT_LE(igc.LookUpGainToApply(level),
+ limiter.GetGainLinear(level) + 1e-7);
+ }
+
+ const auto stats = igc.get_stats();
+ EXPECT_EQ(0ul, stats.look_ups_identity_region);
+ EXPECT_EQ(kNumSteps - 1, stats.look_ups_knee_region);
+ EXPECT_EQ(1ul, stats.look_ups_limiter_region);
+ EXPECT_EQ(0ul, stats.look_ups_saturation_region);
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckNoOverApproximationBeyondKnee) {
+ constexpr size_t kNumSteps = 10;
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels = test::LinSpace(
+ limiter.limiter_start_linear() + kLevelEpsilon,
+ limiter.max_input_level_linear() - kLevelEpsilon, kNumSteps);
+ for (const auto level : levels) {
+ SCOPED_TRACE(std::to_string(level));
+ // Small tolerance added (needed because comparing a float with a double).
+ EXPECT_LE(igc.LookUpGainToApply(level),
+ limiter.GetGainLinear(level) + 1e-7);
+ }
+
+ const auto stats = igc.get_stats();
+ EXPECT_EQ(0ul, stats.look_ups_identity_region);
+ EXPECT_EQ(0ul, stats.look_ups_knee_region);
+ EXPECT_EQ(kNumSteps, stats.look_ups_limiter_region);
+ EXPECT_EQ(0ul, stats.look_ups_saturation_region);
+}
+
+TEST(GainController2InterpolatedGainCurve,
+ CheckNoOverApproximationWithSaturation) {
+ constexpr size_t kNumSteps = 3;
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ const auto levels = test::LinSpace(
+ limiter.max_input_level_linear() + kLevelEpsilon,
+ limiter.max_input_level_linear() + kLevelEpsilon + 0.5, kNumSteps);
+ for (const auto level : levels) {
+ SCOPED_TRACE(std::to_string(level));
+ EXPECT_LE(igc.LookUpGainToApply(level), limiter.GetGainLinear(level));
+ }
+
+ const auto stats = igc.get_stats();
+ EXPECT_EQ(0ul, stats.look_ups_identity_region);
+ EXPECT_EQ(0ul, stats.look_ups_knee_region);
+ EXPECT_EQ(0ul, stats.look_ups_limiter_region);
+ EXPECT_EQ(kNumSteps, stats.look_ups_saturation_region);
+}
+
+TEST(GainController2InterpolatedGainCurve, CheckApproximationParams) {
+ test::InterpolatedParameters parameters =
+ test::ComputeInterpolatedGainCurveApproximationParams();
+
+ InterpolatedGainCurve igc(&apm_data_dumper, "");
+
+ for (size_t i = 0; i < kInterpolatedGainCurveTotalPoints; ++i) {
+ // The tolerance levels are chosen to account for deviations due
+ // to computing with single precision floating point numbers.
+ EXPECT_NEAR(igc.approximation_params_x_[i],
+ parameters.computed_approximation_params_x[i], 0.9f);
+ EXPECT_NEAR(igc.approximation_params_m_[i],
+ parameters.computed_approximation_params_m[i], 0.00001f);
+ EXPECT_NEAR(igc.approximation_params_q_[i],
+ parameters.computed_approximation_params_q[i], 0.001f);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/limiter.cc b/third_party/libwebrtc/modules/audio_processing/agc2/limiter.cc
new file mode 100644
index 0000000000..7a1e2202be
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/limiter.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/limiter.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+// This constant affects the way scaling factors are interpolated for the first
+// sub-frame of a frame. Only in the case in which the first sub-frame has an
+// estimated level which is greater than the that of the previous analyzed
+// sub-frame, linear interpolation is replaced with a power function which
+// reduces the chances of over-shooting (and hence saturation), however reducing
+// the fixed gain effectiveness.
+constexpr float kAttackFirstSubframeInterpolationPower = 8.0f;
+
+void InterpolateFirstSubframe(float last_factor,
+ float current_factor,
+ rtc::ArrayView<float> subframe) {
+ const int n = rtc::dchecked_cast<int>(subframe.size());
+ constexpr float p = kAttackFirstSubframeInterpolationPower;
+ for (int i = 0; i < n; ++i) {
+ subframe[i] = std::pow(1.f - i / n, p) * (last_factor - current_factor) +
+ current_factor;
+ }
+}
+
+void ComputePerSampleSubframeFactors(
+ const std::array<float, kSubFramesInFrame + 1>& scaling_factors,
+ int samples_per_channel,
+ rtc::ArrayView<float> per_sample_scaling_factors) {
+ const int num_subframes = scaling_factors.size() - 1;
+ const int subframe_size =
+ rtc::CheckedDivExact(samples_per_channel, num_subframes);
+
+ // Handle first sub-frame differently in case of attack.
+ const bool is_attack = scaling_factors[0] > scaling_factors[1];
+ if (is_attack) {
+ InterpolateFirstSubframe(
+ scaling_factors[0], scaling_factors[1],
+ rtc::ArrayView<float>(
+ per_sample_scaling_factors.subview(0, subframe_size)));
+ }
+
+ for (int i = is_attack ? 1 : 0; i < num_subframes; ++i) {
+ const int subframe_start = i * subframe_size;
+ const float scaling_start = scaling_factors[i];
+ const float scaling_end = scaling_factors[i + 1];
+ const float scaling_diff = (scaling_end - scaling_start) / subframe_size;
+ for (int j = 0; j < subframe_size; ++j) {
+ per_sample_scaling_factors[subframe_start + j] =
+ scaling_start + scaling_diff * j;
+ }
+ }
+}
+
+void ScaleSamples(rtc::ArrayView<const float> per_sample_scaling_factors,
+ AudioFrameView<float> signal) {
+ const int samples_per_channel = signal.samples_per_channel();
+ RTC_DCHECK_EQ(samples_per_channel, per_sample_scaling_factors.size());
+ for (int i = 0; i < signal.num_channels(); ++i) {
+ rtc::ArrayView<float> channel = signal.channel(i);
+ for (int j = 0; j < samples_per_channel; ++j) {
+ channel[j] = rtc::SafeClamp(channel[j] * per_sample_scaling_factors[j],
+ kMinFloatS16Value, kMaxFloatS16Value);
+ }
+ }
+}
+
+void CheckLimiterSampleRate(int sample_rate_hz) {
+ // Check that per_sample_scaling_factors_ is large enough.
+ RTC_DCHECK_LE(sample_rate_hz,
+ kMaximalNumberOfSamplesPerChannel * 1000 / kFrameDurationMs);
+}
+
+} // namespace
+
+Limiter::Limiter(int sample_rate_hz,
+ ApmDataDumper* apm_data_dumper,
+ absl::string_view histogram_name)
+ : interp_gain_curve_(apm_data_dumper, histogram_name),
+ level_estimator_(sample_rate_hz, apm_data_dumper),
+ apm_data_dumper_(apm_data_dumper) {
+ CheckLimiterSampleRate(sample_rate_hz);
+}
+
+Limiter::~Limiter() = default;
+
+void Limiter::Process(AudioFrameView<float> signal) {
+ const std::array<float, kSubFramesInFrame> level_estimate =
+ level_estimator_.ComputeLevel(signal);
+
+ RTC_DCHECK_EQ(level_estimate.size() + 1, scaling_factors_.size());
+ scaling_factors_[0] = last_scaling_factor_;
+ std::transform(level_estimate.begin(), level_estimate.end(),
+ scaling_factors_.begin() + 1, [this](float x) {
+ return interp_gain_curve_.LookUpGainToApply(x);
+ });
+
+ const int samples_per_channel = signal.samples_per_channel();
+ RTC_DCHECK_LE(samples_per_channel, kMaximalNumberOfSamplesPerChannel);
+
+ auto per_sample_scaling_factors = rtc::ArrayView<float>(
+ &per_sample_scaling_factors_[0], samples_per_channel);
+ ComputePerSampleSubframeFactors(scaling_factors_, samples_per_channel,
+ per_sample_scaling_factors);
+ ScaleSamples(per_sample_scaling_factors, signal);
+
+ last_scaling_factor_ = scaling_factors_.back();
+
+ // Dump data for debug.
+ apm_data_dumper_->DumpRaw("agc2_limiter_last_scaling_factor",
+ last_scaling_factor_);
+ apm_data_dumper_->DumpRaw(
+ "agc2_limiter_region",
+ static_cast<int>(interp_gain_curve_.get_stats().region));
+}
+
+InterpolatedGainCurve::Stats Limiter::GetGainCurveStats() const {
+ return interp_gain_curve_.get_stats();
+}
+
+void Limiter::SetSampleRate(int sample_rate_hz) {
+ CheckLimiterSampleRate(sample_rate_hz);
+ level_estimator_.SetSampleRate(sample_rate_hz);
+}
+
+void Limiter::Reset() {
+ level_estimator_.Reset();
+}
+
+float Limiter::LastAudioLevel() const {
+ return level_estimator_.LastAudioLevel();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/limiter.h b/third_party/libwebrtc/modules/audio_processing/agc2/limiter.h
new file mode 100644
index 0000000000..d4d556349c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/limiter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_LIMITER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_LIMITER_H_
+
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/agc2/fixed_digital_level_estimator.h"
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+class Limiter {
+ public:
+ Limiter(int sample_rate_hz,
+ ApmDataDumper* apm_data_dumper,
+ absl::string_view histogram_name_prefix);
+ Limiter(const Limiter& limiter) = delete;
+ Limiter& operator=(const Limiter& limiter) = delete;
+ ~Limiter();
+
+ // Applies limiter and hard-clipping to `signal`.
+ void Process(AudioFrameView<float> signal);
+ InterpolatedGainCurve::Stats GetGainCurveStats() const;
+
+ // Supported rates must be
+ // * supported by FixedDigitalLevelEstimator
+ // * below kMaximalNumberOfSamplesPerChannel*1000/kFrameDurationMs
+ // so that samples_per_channel fit in the
+ // per_sample_scaling_factors_ array.
+ void SetSampleRate(int sample_rate_hz);
+
+ // Resets the internal state.
+ void Reset();
+
+ float LastAudioLevel() const;
+
+ private:
+ const InterpolatedGainCurve interp_gain_curve_;
+ FixedDigitalLevelEstimator level_estimator_;
+ ApmDataDumper* const apm_data_dumper_ = nullptr;
+
+ // Work array containing the sub-frame scaling factors to be interpolated.
+ std::array<float, kSubFramesInFrame + 1> scaling_factors_ = {};
+ std::array<float, kMaximalNumberOfSamplesPerChannel>
+ per_sample_scaling_factors_ = {};
+ float last_scaling_factor_ = 1.f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_LIMITER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.cc b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.cc
new file mode 100644
index 0000000000..d47c0b2e17
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.cc
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/limiter_db_gain_curve.h"
+
+#include <cmath>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+double ComputeKneeStart(double max_input_level_db,
+ double knee_smoothness_db,
+ double compression_ratio) {
+ RTC_CHECK_LT((compression_ratio - 1.0) * knee_smoothness_db /
+ (2.0 * compression_ratio),
+ max_input_level_db);
+ return -knee_smoothness_db / 2.0 -
+ max_input_level_db / (compression_ratio - 1.0);
+}
+
+std::array<double, 3> ComputeKneeRegionPolynomial(double knee_start_dbfs,
+ double knee_smoothness_db,
+ double compression_ratio) {
+ const double a = (1.0 - compression_ratio) /
+ (2.0 * knee_smoothness_db * compression_ratio);
+ const double b = 1.0 - 2.0 * a * knee_start_dbfs;
+ const double c = a * knee_start_dbfs * knee_start_dbfs;
+ return {{a, b, c}};
+}
+
+double ComputeLimiterD1(double max_input_level_db, double compression_ratio) {
+ return (std::pow(10.0, -max_input_level_db / (20.0 * compression_ratio)) *
+ (1.0 - compression_ratio) / compression_ratio) /
+ kMaxAbsFloatS16Value;
+}
+
+constexpr double ComputeLimiterD2(double compression_ratio) {
+ return (1.0 - 2.0 * compression_ratio) / compression_ratio;
+}
+
+double ComputeLimiterI2(double max_input_level_db,
+ double compression_ratio,
+ double gain_curve_limiter_i1) {
+ RTC_CHECK_NE(gain_curve_limiter_i1, 0.f);
+ return std::pow(10.0, -max_input_level_db / (20.0 * compression_ratio)) /
+ gain_curve_limiter_i1 /
+ std::pow(kMaxAbsFloatS16Value, gain_curve_limiter_i1 - 1);
+}
+
+} // namespace
+
+LimiterDbGainCurve::LimiterDbGainCurve()
+ : max_input_level_linear_(DbfsToFloatS16(max_input_level_db_)),
+ knee_start_dbfs_(ComputeKneeStart(max_input_level_db_,
+ knee_smoothness_db_,
+ compression_ratio_)),
+ knee_start_linear_(DbfsToFloatS16(knee_start_dbfs_)),
+ limiter_start_dbfs_(knee_start_dbfs_ + knee_smoothness_db_),
+ limiter_start_linear_(DbfsToFloatS16(limiter_start_dbfs_)),
+ knee_region_polynomial_(ComputeKneeRegionPolynomial(knee_start_dbfs_,
+ knee_smoothness_db_,
+ compression_ratio_)),
+ gain_curve_limiter_d1_(
+ ComputeLimiterD1(max_input_level_db_, compression_ratio_)),
+ gain_curve_limiter_d2_(ComputeLimiterD2(compression_ratio_)),
+ gain_curve_limiter_i1_(1.0 / compression_ratio_),
+ gain_curve_limiter_i2_(ComputeLimiterI2(max_input_level_db_,
+ compression_ratio_,
+ gain_curve_limiter_i1_)) {
+ static_assert(knee_smoothness_db_ > 0.0f, "");
+ static_assert(compression_ratio_ > 1.0f, "");
+ RTC_CHECK_GE(max_input_level_db_, knee_start_dbfs_ + knee_smoothness_db_);
+}
+
+constexpr double LimiterDbGainCurve::max_input_level_db_;
+constexpr double LimiterDbGainCurve::knee_smoothness_db_;
+constexpr double LimiterDbGainCurve::compression_ratio_;
+
+double LimiterDbGainCurve::GetOutputLevelDbfs(double input_level_dbfs) const {
+ if (input_level_dbfs < knee_start_dbfs_) {
+ return input_level_dbfs;
+ } else if (input_level_dbfs < limiter_start_dbfs_) {
+ return GetKneeRegionOutputLevelDbfs(input_level_dbfs);
+ }
+ return GetCompressorRegionOutputLevelDbfs(input_level_dbfs);
+}
+
+double LimiterDbGainCurve::GetGainLinear(double input_level_linear) const {
+ if (input_level_linear < knee_start_linear_) {
+ return 1.0;
+ }
+ return DbfsToFloatS16(
+ GetOutputLevelDbfs(FloatS16ToDbfs(input_level_linear))) /
+ input_level_linear;
+}
+
+// Computes the first derivative of GetGainLinear() in `x`.
+double LimiterDbGainCurve::GetGainFirstDerivativeLinear(double x) const {
+ // Beyond-knee region only.
+ RTC_CHECK_GE(x, limiter_start_linear_ - 1e-7 * kMaxAbsFloatS16Value);
+ return gain_curve_limiter_d1_ *
+ std::pow(x / kMaxAbsFloatS16Value, gain_curve_limiter_d2_);
+}
+
+// Computes the integral of GetGainLinear() in the range [x0, x1].
+double LimiterDbGainCurve::GetGainIntegralLinear(double x0, double x1) const {
+ RTC_CHECK_LE(x0, x1); // Valid interval.
+ RTC_CHECK_GE(x0, limiter_start_linear_); // Beyond-knee region only.
+ auto limiter_integral = [this](const double& x) {
+ return gain_curve_limiter_i2_ * std::pow(x, gain_curve_limiter_i1_);
+ };
+ return limiter_integral(x1) - limiter_integral(x0);
+}
+
+double LimiterDbGainCurve::GetKneeRegionOutputLevelDbfs(
+ double input_level_dbfs) const {
+ return knee_region_polynomial_[0] * input_level_dbfs * input_level_dbfs +
+ knee_region_polynomial_[1] * input_level_dbfs +
+ knee_region_polynomial_[2];
+}
+
+double LimiterDbGainCurve::GetCompressorRegionOutputLevelDbfs(
+ double input_level_dbfs) const {
+ return (input_level_dbfs - max_input_level_db_) / compression_ratio_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.h b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.h
new file mode 100644
index 0000000000..9086e26739
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_LIMITER_DB_GAIN_CURVE_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_LIMITER_DB_GAIN_CURVE_H_
+
+#include <array>
+
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+
+namespace webrtc {
+
+// A class for computing a limiter gain curve (in dB scale) given a set of
+// hard-coded parameters (namely, kLimiterDbGainCurveMaxInputLevelDbFs,
+// kLimiterDbGainCurveKneeSmoothnessDb, and
+// kLimiterDbGainCurveCompressionRatio). The generated curve consists of four
+// regions: identity (linear), knee (quadratic polynomial), compression
+// (linear), saturation (linear). The aforementioned constants are used to shape
+// the different regions.
+class LimiterDbGainCurve {
+ public:
+ LimiterDbGainCurve();
+
+ double max_input_level_db() const { return max_input_level_db_; }
+ double max_input_level_linear() const { return max_input_level_linear_; }
+ double knee_start_linear() const { return knee_start_linear_; }
+ double limiter_start_linear() const { return limiter_start_linear_; }
+
+ // These methods can be marked 'constexpr' in C++ 14.
+ double GetOutputLevelDbfs(double input_level_dbfs) const;
+ double GetGainLinear(double input_level_linear) const;
+ double GetGainFirstDerivativeLinear(double x) const;
+ double GetGainIntegralLinear(double x0, double x1) const;
+
+ private:
+ double GetKneeRegionOutputLevelDbfs(double input_level_dbfs) const;
+ double GetCompressorRegionOutputLevelDbfs(double input_level_dbfs) const;
+
+ static constexpr double max_input_level_db_ = test::kLimiterMaxInputLevelDbFs;
+ static constexpr double knee_smoothness_db_ = test::kLimiterKneeSmoothnessDb;
+ static constexpr double compression_ratio_ = test::kLimiterCompressionRatio;
+
+ const double max_input_level_linear_;
+
+ // Do not modify signal with level <= knee_start_dbfs_.
+ const double knee_start_dbfs_;
+ const double knee_start_linear_;
+
+ // The upper end of the knee region, which is between knee_start_dbfs_ and
+ // limiter_start_dbfs_.
+ const double limiter_start_dbfs_;
+ const double limiter_start_linear_;
+
+ // Coefficients {a, b, c} of the knee region polynomial
+ // ax^2 + bx + c in the DB scale.
+ const std::array<double, 3> knee_region_polynomial_;
+
+ // Parameters for the computation of the first derivative of GetGainLinear().
+ const double gain_curve_limiter_d1_;
+ const double gain_curve_limiter_d2_;
+
+ // Parameters for the computation of the integral of GetGainLinear().
+ const double gain_curve_limiter_i1_;
+ const double gain_curve_limiter_i2_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_LIMITER_DB_GAIN_CURVE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve_unittest.cc
new file mode 100644
index 0000000000..049c8d568e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_db_gain_curve_unittest.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/limiter_db_gain_curve.h"
+
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(FixedDigitalGainController2Limiter, ConstructDestruct) {
+ LimiterDbGainCurve l;
+}
+
+TEST(FixedDigitalGainController2Limiter, GainCurveShouldBeMonotone) {
+ LimiterDbGainCurve l;
+ float last_output_level = 0.f;
+ bool has_last_output_level = false;
+ for (float level = -90.f; level <= l.max_input_level_db(); level += 0.5f) {
+ const float current_output_level = l.GetOutputLevelDbfs(level);
+ if (!has_last_output_level) {
+ last_output_level = current_output_level;
+ has_last_output_level = true;
+ }
+ EXPECT_LE(last_output_level, current_output_level);
+ last_output_level = current_output_level;
+ }
+}
+
+TEST(FixedDigitalGainController2Limiter, GainCurveShouldBeContinuous) {
+ LimiterDbGainCurve l;
+ float last_output_level = 0.f;
+ bool has_last_output_level = false;
+ constexpr float kMaxDelta = 0.5f;
+ for (float level = -90.f; level <= l.max_input_level_db(); level += 0.5f) {
+ const float current_output_level = l.GetOutputLevelDbfs(level);
+ if (!has_last_output_level) {
+ last_output_level = current_output_level;
+ has_last_output_level = true;
+ }
+ EXPECT_LE(current_output_level, last_output_level + kMaxDelta);
+ last_output_level = current_output_level;
+ }
+}
+
+TEST(FixedDigitalGainController2Limiter, OutputGainShouldBeLessThanFullScale) {
+ LimiterDbGainCurve l;
+ for (float level = -90.f; level <= l.max_input_level_db(); level += 0.5f) {
+ const float current_output_level = l.GetOutputLevelDbfs(level);
+ EXPECT_LE(current_output_level, 0.f);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/limiter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_unittest.cc
new file mode 100644
index 0000000000..e662a7fc89
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/limiter_unittest.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/limiter.h"
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(Limiter, LimiterShouldConstructAndRun) {
+ const int sample_rate_hz = 48000;
+ ApmDataDumper apm_data_dumper(0);
+
+ Limiter limiter(sample_rate_hz, &apm_data_dumper, "");
+
+ VectorFloatFrame vectors_with_float_frame(1, sample_rate_hz / 100,
+ kMaxAbsFloatS16Value);
+ limiter.Process(vectors_with_float_frame.float_frame_view());
+}
+
+TEST(Limiter, OutputVolumeAboveThreshold) {
+ const int sample_rate_hz = 48000;
+ const float input_level =
+ (kMaxAbsFloatS16Value + DbfsToFloatS16(test::kLimiterMaxInputLevelDbFs)) /
+ 2.f;
+ ApmDataDumper apm_data_dumper(0);
+
+ Limiter limiter(sample_rate_hz, &apm_data_dumper, "");
+
+ // Give the level estimator time to adapt.
+ for (int i = 0; i < 5; ++i) {
+ VectorFloatFrame vectors_with_float_frame(1, sample_rate_hz / 100,
+ input_level);
+ limiter.Process(vectors_with_float_frame.float_frame_view());
+ }
+
+ VectorFloatFrame vectors_with_float_frame(1, sample_rate_hz / 100,
+ input_level);
+ limiter.Process(vectors_with_float_frame.float_frame_view());
+ rtc::ArrayView<const float> channel =
+ vectors_with_float_frame.float_frame_view().channel(0);
+
+ for (const auto& sample : channel) {
+ EXPECT_LT(0.9f * kMaxAbsFloatS16Value, sample);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.cc b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.cc
new file mode 100644
index 0000000000..9fb1c24b65
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.cc
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/noise_level_estimator.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kFramesPerSecond = 100;
+
+float FrameEnergy(const AudioFrameView<const float>& audio) {
+ float energy = 0.0f;
+ for (int k = 0; k < audio.num_channels(); ++k) {
+ float channel_energy =
+ std::accumulate(audio.channel(k).begin(), audio.channel(k).end(), 0.0f,
+ [](float a, float b) -> float { return a + b * b; });
+ energy = std::max(channel_energy, energy);
+ }
+ return energy;
+}
+
+float EnergyToDbfs(float signal_energy, int num_samples) {
+ RTC_DCHECK_GE(signal_energy, 0.0f);
+ const float rms_square = signal_energy / num_samples;
+ constexpr float kMinDbfs = -90.30899869919436f;
+ if (rms_square <= 1.0f) {
+ return kMinDbfs;
+ }
+ return 10.0f * std::log10(rms_square) + kMinDbfs;
+}
+
+// Updates the noise floor with instant decay and slow attack. This tuning is
+// specific for AGC2, so that (i) it can promptly increase the gain if the noise
+// floor drops (instant decay) and (ii) in case of music or fast speech, due to
+// which the noise floor can be overestimated, the gain reduction is slowed
+// down.
+float SmoothNoiseFloorEstimate(float current_estimate, float new_estimate) {
+ constexpr float kAttack = 0.5f;
+ if (current_estimate < new_estimate) {
+ // Attack phase.
+ return kAttack * new_estimate + (1.0f - kAttack) * current_estimate;
+ }
+ // Instant attack.
+ return new_estimate;
+}
+
+class NoiseFloorEstimator : public NoiseLevelEstimator {
+ public:
+ // Update the noise floor every 5 seconds.
+ static constexpr int kUpdatePeriodNumFrames = 500;
+ static_assert(kUpdatePeriodNumFrames >= 200,
+ "A too small value may cause noise level overestimation.");
+ static_assert(kUpdatePeriodNumFrames <= 1500,
+ "A too large value may make AGC2 slow at reacting to increased "
+ "noise levels.");
+
+ NoiseFloorEstimator(ApmDataDumper* data_dumper) : data_dumper_(data_dumper) {
+ // Initially assume that 48 kHz will be used. `Analyze()` will detect the
+ // used sample rate and call `Initialize()` again if needed.
+ Initialize(/*sample_rate_hz=*/48000);
+ }
+ NoiseFloorEstimator(const NoiseFloorEstimator&) = delete;
+ NoiseFloorEstimator& operator=(const NoiseFloorEstimator&) = delete;
+ ~NoiseFloorEstimator() = default;
+
+ float Analyze(const AudioFrameView<const float>& frame) override {
+ // Detect sample rate changes.
+ const int sample_rate_hz =
+ static_cast<int>(frame.samples_per_channel() * kFramesPerSecond);
+ if (sample_rate_hz != sample_rate_hz_) {
+ Initialize(sample_rate_hz);
+ }
+
+ const float frame_energy = FrameEnergy(frame);
+ if (frame_energy <= min_noise_energy_) {
+ // Ignore frames when muted or below the minimum measurable energy.
+ data_dumper_->DumpRaw("agc2_noise_floor_estimator_preliminary_level",
+ noise_energy_);
+ return EnergyToDbfs(noise_energy_,
+ static_cast<int>(frame.samples_per_channel()));
+ }
+
+ if (preliminary_noise_energy_set_) {
+ preliminary_noise_energy_ =
+ std::min(preliminary_noise_energy_, frame_energy);
+ } else {
+ preliminary_noise_energy_ = frame_energy;
+ preliminary_noise_energy_set_ = true;
+ }
+ data_dumper_->DumpRaw("agc2_noise_floor_estimator_preliminary_level",
+ preliminary_noise_energy_);
+
+ if (counter_ == 0) {
+ // Full period observed.
+ first_period_ = false;
+ // Update the estimated noise floor energy with the preliminary
+ // estimation.
+ noise_energy_ = SmoothNoiseFloorEstimate(
+ /*current_estimate=*/noise_energy_,
+ /*new_estimate=*/preliminary_noise_energy_);
+ // Reset for a new observation period.
+ counter_ = kUpdatePeriodNumFrames;
+ preliminary_noise_energy_set_ = false;
+ } else if (first_period_) {
+ // While analyzing the signal during the initial period, continuously
+ // update the estimated noise energy, which is monotonic.
+ noise_energy_ = preliminary_noise_energy_;
+ counter_--;
+ } else {
+ // During the observation period it's only allowed to lower the energy.
+ noise_energy_ = std::min(noise_energy_, preliminary_noise_energy_);
+ counter_--;
+ }
+ return EnergyToDbfs(noise_energy_,
+ static_cast<int>(frame.samples_per_channel()));
+ }
+
+ private:
+ void Initialize(int sample_rate_hz) {
+ sample_rate_hz_ = sample_rate_hz;
+ first_period_ = true;
+ preliminary_noise_energy_set_ = false;
+ // Initialize the minimum noise energy to -84 dBFS.
+ min_noise_energy_ = sample_rate_hz * 2.0f * 2.0f / kFramesPerSecond;
+ preliminary_noise_energy_ = min_noise_energy_;
+ noise_energy_ = min_noise_energy_;
+ counter_ = kUpdatePeriodNumFrames;
+ }
+
+ ApmDataDumper* const data_dumper_;
+ int sample_rate_hz_;
+ float min_noise_energy_;
+ bool first_period_;
+ bool preliminary_noise_energy_set_;
+ float preliminary_noise_energy_;
+ float noise_energy_;
+ int counter_;
+};
+
+} // namespace
+
+std::unique_ptr<NoiseLevelEstimator> CreateNoiseFloorEstimator(
+ ApmDataDumper* data_dumper) {
+ return std::make_unique<NoiseFloorEstimator>(data_dumper);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.h b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.h
new file mode 100644
index 0000000000..9f3b957486
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_NOISE_LEVEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_NOISE_LEVEL_ESTIMATOR_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+// Noise level estimator interface.
+class NoiseLevelEstimator {
+ public:
+ virtual ~NoiseLevelEstimator() = default;
+ // Analyzes a 10 ms `frame`, updates the noise level estimation and returns
+ // the value for the latter in dBFS.
+ virtual float Analyze(const AudioFrameView<const float>& frame) = 0;
+};
+
+// Creates a noise level estimator based on noise floor detection.
+std::unique_ptr<NoiseLevelEstimator> CreateNoiseFloorEstimator(
+ ApmDataDumper* data_dumper);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_NOISE_LEVEL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_gn/moz.build
new file mode 100644
index 0000000000..6b53dda825
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("noise_level_estimator_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_unittest.cc
new file mode 100644
index 0000000000..8168c5a229
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/noise_level_estimator_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/noise_level_estimator.h"
+
+#include <array>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "api/function_view.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kNumIterations = 200;
+constexpr int kFramesPerSecond = 100;
+
+// Runs the noise estimator on audio generated by 'sample_generator'
+// for kNumIterations. Returns the last noise level estimate.
+float RunEstimator(rtc::FunctionView<float()> sample_generator,
+ NoiseLevelEstimator& estimator,
+ int sample_rate_hz) {
+ const int samples_per_channel =
+ rtc::CheckedDivExact(sample_rate_hz, kFramesPerSecond);
+ VectorFloatFrame signal(1, samples_per_channel, 0.0f);
+ for (int i = 0; i < kNumIterations; ++i) {
+ AudioFrameView<float> frame_view = signal.float_frame_view();
+ for (int j = 0; j < samples_per_channel; ++j) {
+ frame_view.channel(0)[j] = sample_generator();
+ }
+ estimator.Analyze(frame_view);
+ }
+ return estimator.Analyze(signal.float_frame_view());
+}
+
+class NoiseEstimatorParametrization : public ::testing::TestWithParam<int> {
+ protected:
+ int sample_rate_hz() const { return GetParam(); }
+};
+
+// Checks that full scale white noise maps to about -5.5 dBFS.
+TEST_P(NoiseEstimatorParametrization, NoiseFloorEstimatorWithRandomNoise) {
+ ApmDataDumper data_dumper(0);
+ auto estimator = CreateNoiseFloorEstimator(&data_dumper);
+
+ test::WhiteNoiseGenerator gen(/*min_amplitude=*/test::kMinS16,
+ /*max_amplitude=*/test::kMaxS16);
+ const float noise_level_dbfs =
+ RunEstimator(gen, *estimator, sample_rate_hz());
+ EXPECT_NEAR(noise_level_dbfs, -5.5f, 0.5f);
+}
+
+// Checks that a full scale sine wave maps to about -3 dBFS.
+TEST_P(NoiseEstimatorParametrization, NoiseFloorEstimatorWithSineTone) {
+ ApmDataDumper data_dumper(0);
+ auto estimator = CreateNoiseFloorEstimator(&data_dumper);
+
+ test::SineGenerator gen(/*amplitude=*/test::kMaxS16, /*frequency_hz=*/600.0f,
+ sample_rate_hz());
+ const float noise_level_dbfs =
+ RunEstimator(gen, *estimator, sample_rate_hz());
+ EXPECT_NEAR(noise_level_dbfs, -3.0f, 0.1f);
+}
+
+// Check that sufficiently spaced periodic pulses do not raise the estimated
+// noise floor, which is determined by the amplitude of the non-pulse samples.
+TEST_P(NoiseEstimatorParametrization, NoiseFloorEstimatorWithPulseTone) {
+ ApmDataDumper data_dumper(0);
+ auto estimator = CreateNoiseFloorEstimator(&data_dumper);
+
+ constexpr float kNoPulseAmplitude = 10.0f;
+ test::PulseGenerator gen(/*pulse_amplitude=*/test::kMaxS16, kNoPulseAmplitude,
+ /*frequency_hz=*/20.0f, sample_rate_hz());
+ const float noise_level_dbfs =
+ RunEstimator(gen, *estimator, sample_rate_hz());
+ const float expected_noise_floor_dbfs =
+ 20.0f * std::log10f(kNoPulseAmplitude / test::kMaxS16);
+ EXPECT_NEAR(noise_level_dbfs, expected_noise_floor_dbfs, 0.5f);
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController2NoiseEstimator,
+ NoiseEstimatorParametrization,
+ ::testing::Values(8000, 16000, 32000, 48000));
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/BUILD.gn
new file mode 100644
index 0000000000..d709eb3699
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/BUILD.gn
@@ -0,0 +1,334 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../../webrtc.gni")
+
+rtc_library("rnn_vad") {
+ visibility = [ "../*" ]
+ sources = [
+ "features_extraction.cc",
+ "features_extraction.h",
+ "rnn.cc",
+ "rnn.h",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && target_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":rnn_vad_common",
+ ":rnn_vad_layers",
+ ":rnn_vad_lp_residual",
+ ":rnn_vad_pitch",
+ ":rnn_vad_sequence_buffer",
+ ":rnn_vad_spectral_features",
+ "..:biquad_filter",
+ "..:cpu_features",
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_compare",
+ "../../../../rtc_base:safe_conversions",
+ "//third_party/rnnoise:rnn_vad",
+ ]
+}
+
+rtc_library("rnn_vad_auto_correlation") {
+ sources = [
+ "auto_correlation.cc",
+ "auto_correlation.h",
+ ]
+ deps = [
+ ":rnn_vad_common",
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../utility:pffft_wrapper",
+ ]
+}
+
+rtc_source_set("rnn_vad_common") {
+ # TODO(alessiob): Make this target visibility private.
+ visibility = [
+ ":*",
+ "..:vad_wrapper",
+ ]
+ sources = [ "common.h" ]
+ deps = [
+ "../../../../rtc_base/system:arch",
+ "../../../../system_wrappers",
+ ]
+}
+
+rtc_library("rnn_vad_lp_residual") {
+ sources = [
+ "lp_residual.cc",
+ "lp_residual.h",
+ ]
+ deps = [
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_compare",
+ ]
+}
+
+rtc_source_set("rnn_vad_layers") {
+ sources = [
+ "rnn_fc.cc",
+ "rnn_fc.h",
+ "rnn_gru.cc",
+ "rnn_gru.h",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && current_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":rnn_vad_common",
+ ":vector_math",
+ "..:cpu_features",
+ "../../../../api:array_view",
+ "../../../../api:function_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_conversions",
+ "//third_party/rnnoise:rnn_vad",
+ ]
+ if (current_cpu == "x86" || current_cpu == "x64") {
+ deps += [ ":vector_math_avx2" ]
+ }
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+rtc_source_set("vector_math") {
+ sources = [ "vector_math.h" ]
+ deps = [
+ "..:cpu_features",
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_conversions",
+ "../../../../rtc_base/system:arch",
+ ]
+}
+
+if (current_cpu == "x86" || current_cpu == "x64") {
+ rtc_library("vector_math_avx2") {
+ sources = [ "vector_math_avx2.cc" ]
+ if (is_win && !build_with_mozilla) {
+ cflags = [ "/arch:AVX2" ]
+ } else {
+ cflags = [
+ "-mavx2",
+ "-mfma",
+ ]
+ }
+ deps = [
+ ":vector_math",
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_conversions",
+ ]
+ }
+}
+
+rtc_library("rnn_vad_pitch") {
+ sources = [
+ "pitch_search.cc",
+ "pitch_search.h",
+ "pitch_search_internal.cc",
+ "pitch_search_internal.h",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && current_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":rnn_vad_auto_correlation",
+ ":rnn_vad_common",
+ ":vector_math",
+ "..:cpu_features",
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:gtest_prod",
+ "../../../../rtc_base:safe_compare",
+ "../../../../rtc_base:safe_conversions",
+ "../../../../rtc_base/system:arch",
+ ]
+ if (current_cpu == "x86" || current_cpu == "x64") {
+ deps += [ ":vector_math_avx2" ]
+ }
+}
+
+rtc_source_set("rnn_vad_ring_buffer") {
+ sources = [ "ring_buffer.h" ]
+ deps = [
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ ]
+}
+
+rtc_source_set("rnn_vad_sequence_buffer") {
+ sources = [ "sequence_buffer.h" ]
+ deps = [
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ ]
+}
+
+rtc_library("rnn_vad_spectral_features") {
+ sources = [
+ "spectral_features.cc",
+ "spectral_features.h",
+ "spectral_features_internal.cc",
+ "spectral_features_internal.h",
+ ]
+ deps = [
+ ":rnn_vad_common",
+ ":rnn_vad_ring_buffer",
+ ":rnn_vad_symmetric_matrix_buffer",
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_compare",
+ "../../utility:pffft_wrapper",
+ ]
+}
+
+rtc_source_set("rnn_vad_symmetric_matrix_buffer") {
+ sources = [ "symmetric_matrix_buffer.h" ]
+ deps = [
+ "../../../../api:array_view",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_compare",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("test_utils") {
+ testonly = true
+ sources = [
+ "test_utils.cc",
+ "test_utils.h",
+ ]
+ deps = [
+ ":rnn_vad",
+ ":rnn_vad_common",
+ "../../../../api:array_view",
+ "../../../../api:scoped_refptr",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:safe_compare",
+ "../../../../test:fileutils",
+ "../../../../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ }
+
+ unittest_resources = [
+ "../../../../resources/audio_processing/agc2/rnn_vad/band_energies.dat",
+ "../../../../resources/audio_processing/agc2/rnn_vad/pitch_buf_24k.dat",
+ "../../../../resources/audio_processing/agc2/rnn_vad/pitch_lp_res.dat",
+ "../../../../resources/audio_processing/agc2/rnn_vad/pitch_search_int.dat",
+ "../../../../resources/audio_processing/agc2/rnn_vad/samples.pcm",
+ "../../../../resources/audio_processing/agc2/rnn_vad/vad_prob.dat",
+ ]
+
+ if (is_ios) {
+ bundle_data("unittests_bundle_data") {
+ testonly = true
+ sources = unittest_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+
+ rtc_library("unittests") {
+ testonly = true
+ sources = [
+ "auto_correlation_unittest.cc",
+ "features_extraction_unittest.cc",
+ "lp_residual_unittest.cc",
+ "pitch_search_internal_unittest.cc",
+ "pitch_search_unittest.cc",
+ "ring_buffer_unittest.cc",
+ "rnn_fc_unittest.cc",
+ "rnn_gru_unittest.cc",
+ "rnn_unittest.cc",
+ "rnn_vad_unittest.cc",
+ "sequence_buffer_unittest.cc",
+ "spectral_features_internal_unittest.cc",
+ "spectral_features_unittest.cc",
+ "symmetric_matrix_buffer_unittest.cc",
+ "vector_math_unittest.cc",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && current_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ ":rnn_vad",
+ ":rnn_vad_auto_correlation",
+ ":rnn_vad_common",
+ ":rnn_vad_layers",
+ ":rnn_vad_lp_residual",
+ ":rnn_vad_pitch",
+ ":rnn_vad_ring_buffer",
+ ":rnn_vad_sequence_buffer",
+ ":rnn_vad_spectral_features",
+ ":rnn_vad_symmetric_matrix_buffer",
+ ":test_utils",
+ ":vector_math",
+ "..:cpu_features",
+ "../..:audioproc_test_utils",
+ "../../../../api:array_view",
+ "../../../../common_audio/",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:logging",
+ "../../../../rtc_base:safe_compare",
+ "../../../../rtc_base:safe_conversions",
+ "../../../../rtc_base:stringutils",
+ "../../../../rtc_base/system:arch",
+ "../../../../test:test_support",
+ "../../utility:pffft_wrapper",
+ "//third_party/rnnoise:rnn_vad",
+ ]
+ if (current_cpu == "x86" || current_cpu == "x64") {
+ deps += [ ":vector_math_avx2" ]
+ }
+ absl_deps = [ "//third_party/abseil-cpp/absl/memory" ]
+ data = unittest_resources
+ if (is_ios) {
+ deps += [ ":unittests_bundle_data" ]
+ }
+ }
+
+ if (!build_with_chromium) {
+ rtc_executable("rnn_vad_tool") {
+ testonly = true
+ sources = [ "rnn_vad_tool.cc" ]
+ deps = [
+ ":rnn_vad",
+ ":rnn_vad_common",
+ "..:cpu_features",
+ "../../../../api:array_view",
+ "../../../../common_audio",
+ "../../../../rtc_base:logging",
+ "../../../../rtc_base:safe_compare",
+ "../../../../test:test_support",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/DEPS b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/DEPS
new file mode 100644
index 0000000000..773c2d7edd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+third_party/rnnoise",
+]
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc
new file mode 100644
index 0000000000..3ddeec8dba
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/auto_correlation.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int kAutoCorrelationFftOrder = 9; // Length-512 FFT.
+static_assert(1 << kAutoCorrelationFftOrder >
+ kNumLags12kHz + kBufSize12kHz - kMaxPitch12kHz,
+ "");
+
+} // namespace
+
+AutoCorrelationCalculator::AutoCorrelationCalculator()
+ : fft_(1 << kAutoCorrelationFftOrder, Pffft::FftType::kReal),
+ tmp_(fft_.CreateBuffer()),
+ X_(fft_.CreateBuffer()),
+ H_(fft_.CreateBuffer()) {}
+
+AutoCorrelationCalculator::~AutoCorrelationCalculator() = default;
+
+// The auto-correlations coefficients are computed as follows:
+// |.........|...........| <- pitch buffer
+// [ x (fixed) ]
+// [ y_0 ]
+// [ y_{m-1} ]
+// x and y are sub-array of equal length; x is never moved, whereas y slides.
+// The cross-correlation between y_0 and x corresponds to the auto-correlation
+// for the maximum pitch period. Hence, the first value in `auto_corr` has an
+// inverted lag equal to 0 that corresponds to a lag equal to the maximum
+// pitch period.
+void AutoCorrelationCalculator::ComputeOnPitchBuffer(
+ rtc::ArrayView<const float, kBufSize12kHz> pitch_buf,
+ rtc::ArrayView<float, kNumLags12kHz> auto_corr) {
+ RTC_DCHECK_LT(auto_corr.size(), kMaxPitch12kHz);
+ RTC_DCHECK_GT(pitch_buf.size(), kMaxPitch12kHz);
+ constexpr int kFftFrameSize = 1 << kAutoCorrelationFftOrder;
+ constexpr int kConvolutionLength = kBufSize12kHz - kMaxPitch12kHz;
+ static_assert(kConvolutionLength == kFrameSize20ms12kHz,
+ "Mismatch between pitch buffer size, frame size and maximum "
+ "pitch period.");
+ static_assert(kFftFrameSize > kNumLags12kHz + kConvolutionLength,
+ "The FFT length is not sufficiently big to avoid cyclic "
+ "convolution errors.");
+ auto tmp = tmp_->GetView();
+
+ // Compute the FFT for the reversed reference frame - i.e.,
+ // pitch_buf[-kConvolutionLength:].
+ std::reverse_copy(pitch_buf.end() - kConvolutionLength, pitch_buf.end(),
+ tmp.begin());
+ std::fill(tmp.begin() + kConvolutionLength, tmp.end(), 0.f);
+ fft_.ForwardTransform(*tmp_, H_.get(), /*ordered=*/false);
+
+ // Compute the FFT for the sliding frames chunk. The sliding frames are
+ // defined as pitch_buf[i:i+kConvolutionLength] where i in
+ // [0, kNumLags12kHz). The chunk includes all of them, hence it is
+ // defined as pitch_buf[:kNumLags12kHz+kConvolutionLength].
+ std::copy(pitch_buf.begin(),
+ pitch_buf.begin() + kConvolutionLength + kNumLags12kHz,
+ tmp.begin());
+ std::fill(tmp.begin() + kNumLags12kHz + kConvolutionLength, tmp.end(), 0.f);
+ fft_.ForwardTransform(*tmp_, X_.get(), /*ordered=*/false);
+
+ // Convolve in the frequency domain.
+ constexpr float kScalingFactor = 1.f / static_cast<float>(kFftFrameSize);
+ std::fill(tmp.begin(), tmp.end(), 0.f);
+ fft_.FrequencyDomainConvolve(*X_, *H_, tmp_.get(), kScalingFactor);
+ fft_.BackwardTransform(*tmp_, tmp_.get(), /*ordered=*/false);
+
+ // Extract the auto-correlation coefficients.
+ std::copy(tmp.begin() + kConvolutionLength - 1,
+ tmp.begin() + kConvolutionLength + kNumLags12kHz - 1,
+ auto_corr.begin());
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.h
new file mode 100644
index 0000000000..1ae5054567
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_AUTO_CORRELATION_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_AUTO_CORRELATION_H_
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/utility/pffft_wrapper.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Class to compute the auto correlation on the pitch buffer for a target pitch
+// interval.
+class AutoCorrelationCalculator {
+ public:
+ AutoCorrelationCalculator();
+ AutoCorrelationCalculator(const AutoCorrelationCalculator&) = delete;
+ AutoCorrelationCalculator& operator=(const AutoCorrelationCalculator&) =
+ delete;
+ ~AutoCorrelationCalculator();
+
+ // Computes the auto-correlation coefficients for a target pitch interval.
+ // `auto_corr` indexes are inverted lags.
+ void ComputeOnPitchBuffer(
+ rtc::ArrayView<const float, kBufSize12kHz> pitch_buf,
+ rtc::ArrayView<float, kNumLags12kHz> auto_corr);
+
+ private:
+ Pffft fft_;
+ std::unique_ptr<Pffft::FloatBuffer> tmp_;
+ std::unique_ptr<Pffft::FloatBuffer> X_;
+ std::unique_ptr<Pffft::FloatBuffer> H_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_AUTO_CORRELATION_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation_unittest.cc
new file mode 100644
index 0000000000..76001ed7b7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation_unittest.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/auto_correlation.h"
+
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Checks that the auto correlation function produces output within tolerance
+// given test input data.
+TEST(RnnVadTest, PitchBufferAutoCorrelationWithinTolerance) {
+ PitchTestData test_data;
+ std::array<float, kBufSize12kHz> pitch_buf_decimated;
+ Decimate2x(test_data.PitchBuffer24kHzView(), pitch_buf_decimated);
+ std::array<float, kNumLags12kHz> computed_output;
+ {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ AutoCorrelationCalculator auto_corr_calculator;
+ auto_corr_calculator.ComputeOnPitchBuffer(pitch_buf_decimated,
+ computed_output);
+ }
+ auto auto_corr_view = test_data.AutoCorrelation12kHzView();
+ ExpectNearAbsolute({auto_corr_view.data(), auto_corr_view.size()},
+ computed_output, 3e-3f);
+}
+
+// Checks that the auto correlation function computes the right thing for a
+// simple use case.
+TEST(RnnVadTest, CheckAutoCorrelationOnConstantPitchBuffer) {
+ // Create constant signal with no pitch.
+ std::array<float, kBufSize12kHz> pitch_buf_decimated;
+ std::fill(pitch_buf_decimated.begin(), pitch_buf_decimated.end(), 1.f);
+ std::array<float, kNumLags12kHz> computed_output;
+ {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ AutoCorrelationCalculator auto_corr_calculator;
+ auto_corr_calculator.ComputeOnPitchBuffer(pitch_buf_decimated,
+ computed_output);
+ }
+ // The expected output is a vector filled with the same expected
+ // auto-correlation value. The latter equals the length of a 20 ms frame.
+ constexpr int kFrameSize20ms12kHz = kFrameSize20ms24kHz / 2;
+ std::array<float, kNumLags12kHz> expected_output;
+ std::fill(expected_output.begin(), expected_output.end(),
+ static_cast<float>(kFrameSize20ms12kHz));
+ ExpectNearAbsolute(expected_output, computed_output, 4e-5f);
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/common.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/common.h
new file mode 100644
index 0000000000..c099373200
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/common.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_COMMON_H_
+
+#include <stddef.h>
+
+namespace webrtc {
+namespace rnn_vad {
+
+constexpr double kPi = 3.14159265358979323846;
+
+constexpr int kSampleRate24kHz = 24000;
+constexpr int kFrameSize10ms24kHz = kSampleRate24kHz / 100;
+constexpr int kFrameSize20ms24kHz = kFrameSize10ms24kHz * 2;
+
+// Pitch buffer.
+constexpr int kMinPitch24kHz = kSampleRate24kHz / 800; // 0.00125 s.
+constexpr int kMaxPitch24kHz = kSampleRate24kHz / 62.5; // 0.016 s.
+constexpr int kBufSize24kHz = kMaxPitch24kHz + kFrameSize20ms24kHz;
+static_assert((kBufSize24kHz & 1) == 0, "The buffer size must be even.");
+
+// 24 kHz analysis.
+// Define a higher minimum pitch period for the initial search. This is used to
+// avoid searching for very short periods, for which a refinement step is
+// responsible.
+constexpr int kInitialMinPitch24kHz = 3 * kMinPitch24kHz;
+static_assert(kMinPitch24kHz < kInitialMinPitch24kHz, "");
+static_assert(kInitialMinPitch24kHz < kMaxPitch24kHz, "");
+static_assert(kMaxPitch24kHz > kInitialMinPitch24kHz, "");
+// Number of (inverted) lags during the initial pitch search phase at 24 kHz.
+constexpr int kInitialNumLags24kHz = kMaxPitch24kHz - kInitialMinPitch24kHz;
+// Number of (inverted) lags during the pitch search refinement phase at 24 kHz.
+constexpr int kRefineNumLags24kHz = kMaxPitch24kHz + 1;
+static_assert(
+ kRefineNumLags24kHz > kInitialNumLags24kHz,
+ "The refinement step must search the pitch in an extended pitch range.");
+
+// 12 kHz analysis.
+constexpr int kSampleRate12kHz = 12000;
+constexpr int kFrameSize10ms12kHz = kSampleRate12kHz / 100;
+constexpr int kFrameSize20ms12kHz = kFrameSize10ms12kHz * 2;
+constexpr int kBufSize12kHz = kBufSize24kHz / 2;
+constexpr int kInitialMinPitch12kHz = kInitialMinPitch24kHz / 2;
+constexpr int kMaxPitch12kHz = kMaxPitch24kHz / 2;
+static_assert(kMaxPitch12kHz > kInitialMinPitch12kHz, "");
+// The inverted lags for the pitch interval [`kInitialMinPitch12kHz`,
+// `kMaxPitch12kHz`] are in the range [0, `kNumLags12kHz`].
+constexpr int kNumLags12kHz = kMaxPitch12kHz - kInitialMinPitch12kHz;
+
+// 48 kHz constants.
+constexpr int kMinPitch48kHz = kMinPitch24kHz * 2;
+constexpr int kMaxPitch48kHz = kMaxPitch24kHz * 2;
+
+// Spectral features.
+constexpr int kNumBands = 22;
+constexpr int kNumLowerBands = 6;
+static_assert((0 < kNumLowerBands) && (kNumLowerBands < kNumBands), "");
+constexpr int kCepstralCoeffsHistorySize = 8;
+static_assert(kCepstralCoeffsHistorySize > 2,
+ "The history size must at least be 3 to compute first and second "
+ "derivatives.");
+
+constexpr int kFeatureVectorSize = 42;
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.cc
new file mode 100644
index 0000000000..502023428d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h"
+
+#include <array>
+
+#include "modules/audio_processing/agc2/rnn_vad/lp_residual.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Computed as `scipy.signal.butter(N=2, Wn=60/24000, btype='highpass')`.
+constexpr BiQuadFilter::Config kHpfConfig24k{
+ {0.99446179f, -1.98892358f, 0.99446179f},
+ {-1.98889291f, 0.98895425f}};
+
+} // namespace
+
+FeaturesExtractor::FeaturesExtractor(const AvailableCpuFeatures& cpu_features)
+ : use_high_pass_filter_(false),
+ hpf_(kHpfConfig24k),
+ pitch_buf_24kHz_(),
+ pitch_buf_24kHz_view_(pitch_buf_24kHz_.GetBufferView()),
+ lp_residual_(kBufSize24kHz),
+ lp_residual_view_(lp_residual_.data(), kBufSize24kHz),
+ pitch_estimator_(cpu_features),
+ reference_frame_view_(pitch_buf_24kHz_.GetMostRecentValuesView()) {
+ RTC_DCHECK_EQ(kBufSize24kHz, lp_residual_.size());
+ Reset();
+}
+
+FeaturesExtractor::~FeaturesExtractor() = default;
+
+void FeaturesExtractor::Reset() {
+ pitch_buf_24kHz_.Reset();
+ spectral_features_extractor_.Reset();
+ if (use_high_pass_filter_) {
+ hpf_.Reset();
+ }
+}
+
+bool FeaturesExtractor::CheckSilenceComputeFeatures(
+ rtc::ArrayView<const float, kFrameSize10ms24kHz> samples,
+ rtc::ArrayView<float, kFeatureVectorSize> feature_vector) {
+ // Pre-processing.
+ if (use_high_pass_filter_) {
+ std::array<float, kFrameSize10ms24kHz> samples_filtered;
+ hpf_.Process(samples, samples_filtered);
+ // Feed buffer with the pre-processed version of `samples`.
+ pitch_buf_24kHz_.Push(samples_filtered);
+ } else {
+ // Feed buffer with `samples`.
+ pitch_buf_24kHz_.Push(samples);
+ }
+ // Extract the LP residual.
+ float lpc_coeffs[kNumLpcCoefficients];
+ ComputeAndPostProcessLpcCoefficients(pitch_buf_24kHz_view_, lpc_coeffs);
+ ComputeLpResidual(lpc_coeffs, pitch_buf_24kHz_view_, lp_residual_view_);
+ // Estimate pitch on the LP-residual and write the normalized pitch period
+ // into the output vector (normalization based on training data stats).
+ pitch_period_48kHz_ = pitch_estimator_.Estimate(lp_residual_view_);
+ feature_vector[kFeatureVectorSize - 2] = 0.01f * (pitch_period_48kHz_ - 300);
+ // Extract lagged frames (according to the estimated pitch period).
+ RTC_DCHECK_LE(pitch_period_48kHz_ / 2, kMaxPitch24kHz);
+ auto lagged_frame = pitch_buf_24kHz_view_.subview(
+ kMaxPitch24kHz - pitch_period_48kHz_ / 2, kFrameSize20ms24kHz);
+ // Analyze reference and lagged frames checking if silence has been detected
+ // and write the feature vector.
+ return spectral_features_extractor_.CheckSilenceComputeFeatures(
+ reference_frame_view_, {lagged_frame.data(), kFrameSize20ms24kHz},
+ {feature_vector.data() + kNumLowerBands, kNumBands - kNumLowerBands},
+ {feature_vector.data(), kNumLowerBands},
+ {feature_vector.data() + kNumBands, kNumLowerBands},
+ {feature_vector.data() + kNumBands + kNumLowerBands, kNumLowerBands},
+ {feature_vector.data() + kNumBands + 2 * kNumLowerBands, kNumLowerBands},
+ &feature_vector[kFeatureVectorSize - 1]);
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.h
new file mode 100644
index 0000000000..d47a85bfb0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_FEATURES_EXTRACTION_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_FEATURES_EXTRACTION_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/biquad_filter.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search.h"
+#include "modules/audio_processing/agc2/rnn_vad/sequence_buffer.h"
+#include "modules/audio_processing/agc2/rnn_vad/spectral_features.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Feature extractor to feed the VAD RNN.
+class FeaturesExtractor {
+ public:
+ explicit FeaturesExtractor(const AvailableCpuFeatures& cpu_features);
+ FeaturesExtractor(const FeaturesExtractor&) = delete;
+ FeaturesExtractor& operator=(const FeaturesExtractor&) = delete;
+ ~FeaturesExtractor();
+ void Reset();
+ // Analyzes the samples, computes the feature vector and returns true if
+ // silence is detected (false if not). When silence is detected,
+ // `feature_vector` is partially written and therefore must not be used to
+ // feed the VAD RNN.
+ bool CheckSilenceComputeFeatures(
+ rtc::ArrayView<const float, kFrameSize10ms24kHz> samples,
+ rtc::ArrayView<float, kFeatureVectorSize> feature_vector);
+
+ private:
+ const bool use_high_pass_filter_;
+ // TODO(bugs.webrtc.org/7494): Remove HPF depending on how AGC2 is used in APM
+ // and on whether an HPF is already used as pre-processing step in APM.
+ BiQuadFilter hpf_;
+ SequenceBuffer<float, kBufSize24kHz, kFrameSize10ms24kHz, kFrameSize20ms24kHz>
+ pitch_buf_24kHz_;
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buf_24kHz_view_;
+ std::vector<float> lp_residual_;
+ rtc::ArrayView<float, kBufSize24kHz> lp_residual_view_;
+ PitchEstimator pitch_estimator_;
+ rtc::ArrayView<const float, kFrameSize20ms24kHz> reference_frame_view_;
+ SpectralFeaturesExtractor spectral_features_extractor_;
+ int pitch_period_48kHz_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_FEATURES_EXTRACTION_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc
new file mode 100644
index 0000000000..96f956adfe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h"
+
+#include <cmath>
+#include <vector>
+
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "rtc_base/numerics/safe_conversions.h"
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int ceil(int n, int m) {
+ return (n + m - 1) / m;
+}
+
+// Number of 10 ms frames required to fill a pitch buffer having size
+// `kBufSize24kHz`.
+constexpr int kNumTestDataFrames = ceil(kBufSize24kHz, kFrameSize10ms24kHz);
+// Number of samples for the test data.
+constexpr int kNumTestDataSize = kNumTestDataFrames * kFrameSize10ms24kHz;
+
+// Verifies that the pitch in Hz is in the detectable range.
+bool PitchIsValid(float pitch_hz) {
+ const int pitch_period = static_cast<float>(kSampleRate24kHz) / pitch_hz;
+ return kInitialMinPitch24kHz <= pitch_period &&
+ pitch_period <= kMaxPitch24kHz;
+}
+
+void CreatePureTone(float amplitude, float freq_hz, rtc::ArrayView<float> dst) {
+ for (int i = 0; rtc::SafeLt(i, dst.size()); ++i) {
+ dst[i] = amplitude * std::sin(2.f * kPi * freq_hz * i / kSampleRate24kHz);
+ }
+}
+
+// Feeds `features_extractor` with `samples` splitting it in 10 ms frames.
+// For every frame, the output is written into `feature_vector`. Returns true
+// if silence is detected in the last frame.
+bool FeedTestData(FeaturesExtractor& features_extractor,
+ rtc::ArrayView<const float> samples,
+ rtc::ArrayView<float, kFeatureVectorSize> feature_vector) {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ bool is_silence = true;
+ const int num_frames = samples.size() / kFrameSize10ms24kHz;
+ for (int i = 0; i < num_frames; ++i) {
+ is_silence = features_extractor.CheckSilenceComputeFeatures(
+ {samples.data() + i * kFrameSize10ms24kHz, kFrameSize10ms24kHz},
+ feature_vector);
+ }
+ return is_silence;
+}
+
+// Extracts the features for two pure tones and verifies that the pitch field
+// values reflect the known tone frequencies.
+TEST(RnnVadTest, FeatureExtractionLowHighPitch) {
+ constexpr float amplitude = 1000.f;
+ constexpr float low_pitch_hz = 150.f;
+ constexpr float high_pitch_hz = 250.f;
+ ASSERT_TRUE(PitchIsValid(low_pitch_hz));
+ ASSERT_TRUE(PitchIsValid(high_pitch_hz));
+
+ const AvailableCpuFeatures cpu_features = GetAvailableCpuFeatures();
+ FeaturesExtractor features_extractor(cpu_features);
+ std::vector<float> samples(kNumTestDataSize);
+ std::vector<float> feature_vector(kFeatureVectorSize);
+ ASSERT_EQ(kFeatureVectorSize, rtc::dchecked_cast<int>(feature_vector.size()));
+ rtc::ArrayView<float, kFeatureVectorSize> feature_vector_view(
+ feature_vector.data(), kFeatureVectorSize);
+
+ // Extract the normalized scalar feature that is proportional to the estimated
+ // pitch period.
+ constexpr int pitch_feature_index = kFeatureVectorSize - 2;
+ // Low frequency tone - i.e., high period.
+ CreatePureTone(amplitude, low_pitch_hz, samples);
+ ASSERT_FALSE(FeedTestData(features_extractor, samples, feature_vector_view));
+ float high_pitch_period = feature_vector_view[pitch_feature_index];
+ // High frequency tone - i.e., low period.
+ features_extractor.Reset();
+ CreatePureTone(amplitude, high_pitch_hz, samples);
+ ASSERT_FALSE(FeedTestData(features_extractor, samples, feature_vector_view));
+ float low_pitch_period = feature_vector_view[pitch_feature_index];
+ // Check.
+ EXPECT_LT(low_pitch_period, high_pitch_period);
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.cc
new file mode 100644
index 0000000000..484bfba459
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/lp_residual.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Computes auto-correlation coefficients for `x` and writes them in
+// `auto_corr`. The lag values are in {0, ..., max_lag - 1}, where max_lag
+// equals the size of `auto_corr`.
+void ComputeAutoCorrelation(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float, kNumLpcCoefficients> auto_corr) {
+ constexpr int max_lag = auto_corr.size();
+ RTC_DCHECK_LT(max_lag, x.size());
+ for (int lag = 0; lag < max_lag; ++lag) {
+ auto_corr[lag] =
+ std::inner_product(x.begin(), x.end() - lag, x.begin() + lag, 0.f);
+ }
+}
+
+// Applies denoising to the auto-correlation coefficients.
+void DenoiseAutoCorrelation(
+ rtc::ArrayView<float, kNumLpcCoefficients> auto_corr) {
+ // Assume -40 dB white noise floor.
+ auto_corr[0] *= 1.0001f;
+ // Hard-coded values obtained as
+ // [np.float32((0.008*0.008*i*i)) for i in range(1,5)].
+ auto_corr[1] -= auto_corr[1] * 0.000064f;
+ auto_corr[2] -= auto_corr[2] * 0.000256f;
+ auto_corr[3] -= auto_corr[3] * 0.000576f;
+ auto_corr[4] -= auto_corr[4] * 0.001024f;
+ static_assert(kNumLpcCoefficients == 5, "Update `auto_corr`.");
+}
+
+// Computes the initial inverse filter coefficients given the auto-correlation
+// coefficients of an input frame.
+void ComputeInitialInverseFilterCoefficients(
+ rtc::ArrayView<const float, kNumLpcCoefficients> auto_corr,
+ rtc::ArrayView<float, kNumLpcCoefficients - 1> lpc_coeffs) {
+ float error = auto_corr[0];
+ for (int i = 0; i < kNumLpcCoefficients - 1; ++i) {
+ float reflection_coeff = 0.f;
+ for (int j = 0; j < i; ++j) {
+ reflection_coeff += lpc_coeffs[j] * auto_corr[i - j];
+ }
+ reflection_coeff += auto_corr[i + 1];
+
+ // Avoid division by numbers close to zero.
+ constexpr float kMinErrorMagnitude = 1e-6f;
+ if (std::fabs(error) < kMinErrorMagnitude) {
+ error = std::copysign(kMinErrorMagnitude, error);
+ }
+
+ reflection_coeff /= -error;
+ // Update LPC coefficients and total error.
+ lpc_coeffs[i] = reflection_coeff;
+ for (int j = 0; j < ((i + 1) >> 1); ++j) {
+ const float tmp1 = lpc_coeffs[j];
+ const float tmp2 = lpc_coeffs[i - 1 - j];
+ lpc_coeffs[j] = tmp1 + reflection_coeff * tmp2;
+ lpc_coeffs[i - 1 - j] = tmp2 + reflection_coeff * tmp1;
+ }
+ error -= reflection_coeff * reflection_coeff * error;
+ if (error < 0.001f * auto_corr[0]) {
+ break;
+ }
+ }
+}
+
+} // namespace
+
+void ComputeAndPostProcessLpcCoefficients(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float, kNumLpcCoefficients> lpc_coeffs) {
+ std::array<float, kNumLpcCoefficients> auto_corr;
+ ComputeAutoCorrelation(x, auto_corr);
+ if (auto_corr[0] == 0.f) { // Empty frame.
+ std::fill(lpc_coeffs.begin(), lpc_coeffs.end(), 0);
+ return;
+ }
+ DenoiseAutoCorrelation(auto_corr);
+ std::array<float, kNumLpcCoefficients - 1> lpc_coeffs_pre{};
+ ComputeInitialInverseFilterCoefficients(auto_corr, lpc_coeffs_pre);
+ // LPC coefficients post-processing.
+ // TODO(bugs.webrtc.org/9076): Consider removing these steps.
+ lpc_coeffs_pre[0] *= 0.9f;
+ lpc_coeffs_pre[1] *= 0.9f * 0.9f;
+ lpc_coeffs_pre[2] *= 0.9f * 0.9f * 0.9f;
+ lpc_coeffs_pre[3] *= 0.9f * 0.9f * 0.9f * 0.9f;
+ constexpr float kC = 0.8f;
+ lpc_coeffs[0] = lpc_coeffs_pre[0] + kC;
+ lpc_coeffs[1] = lpc_coeffs_pre[1] + kC * lpc_coeffs_pre[0];
+ lpc_coeffs[2] = lpc_coeffs_pre[2] + kC * lpc_coeffs_pre[1];
+ lpc_coeffs[3] = lpc_coeffs_pre[3] + kC * lpc_coeffs_pre[2];
+ lpc_coeffs[4] = kC * lpc_coeffs_pre[3];
+ static_assert(kNumLpcCoefficients == 5, "Update `lpc_coeffs(_pre)`.");
+}
+
+void ComputeLpResidual(
+ rtc::ArrayView<const float, kNumLpcCoefficients> lpc_coeffs,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y) {
+ RTC_DCHECK_GT(x.size(), kNumLpcCoefficients);
+ RTC_DCHECK_EQ(x.size(), y.size());
+ // The code below implements the following operation:
+ // y[i] = x[i] + dot_product({x[i], ..., x[i - kNumLpcCoefficients + 1]},
+ // lpc_coeffs)
+ // Edge case: i < kNumLpcCoefficients.
+ y[0] = x[0];
+ for (int i = 1; i < kNumLpcCoefficients; ++i) {
+ y[i] =
+ std::inner_product(x.crend() - i, x.crend(), lpc_coeffs.cbegin(), x[i]);
+ }
+ // Regular case.
+ auto last = x.crend();
+ for (int i = kNumLpcCoefficients; rtc::SafeLt(i, y.size()); ++i, --last) {
+ y[i] = std::inner_product(last - kNumLpcCoefficients, last,
+ lpc_coeffs.cbegin(), x[i]);
+ }
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.h
new file mode 100644
index 0000000000..d04c536ec1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_LP_RESIDUAL_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_LP_RESIDUAL_H_
+
+#include <stddef.h>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Linear predictive coding (LPC) inverse filter length.
+constexpr int kNumLpcCoefficients = 5;
+
+// Given a frame `x`, computes a post-processed version of LPC coefficients
+// tailored for pitch estimation.
+void ComputeAndPostProcessLpcCoefficients(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float, kNumLpcCoefficients> lpc_coeffs);
+
+// Computes the LP residual for the input frame `x` and the LPC coefficients
+// `lpc_coeffs`. `y` and `x` can point to the same array for in-place
+// computation.
+void ComputeLpResidual(
+ rtc::ArrayView<const float, kNumLpcCoefficients> lpc_coeffs,
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y);
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_LP_RESIDUAL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual_unittest.cc
new file mode 100644
index 0000000000..7b3a4a3f65
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual_unittest.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/lp_residual.h"
+
+#include <algorithm>
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Checks that the LP residual can be computed on an empty frame.
+TEST(RnnVadTest, LpResidualOfEmptyFrame) {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+
+ // Input frame (empty, i.e., all samples set to 0).
+ std::array<float, kFrameSize10ms24kHz> empty_frame;
+ empty_frame.fill(0.f);
+ // Compute inverse filter coefficients.
+ std::array<float, kNumLpcCoefficients> lpc;
+ ComputeAndPostProcessLpcCoefficients(empty_frame, lpc);
+ // Compute LP residual.
+ std::array<float, kFrameSize10ms24kHz> lp_residual;
+ ComputeLpResidual(lpc, empty_frame, lp_residual);
+}
+
+// Checks that the computed LP residual is bit-exact given test input data.
+TEST(RnnVadTest, LpResidualPipelineBitExactness) {
+ // Input and expected output readers.
+ ChunksFileReader pitch_buffer_reader = CreatePitchBuffer24kHzReader();
+ ChunksFileReader lp_pitch_reader = CreateLpResidualAndPitchInfoReader();
+
+ // Buffers.
+ std::vector<float> pitch_buffer_24kHz(kBufSize24kHz);
+ std::array<float, kNumLpcCoefficients> lpc;
+ std::vector<float> computed_lp_residual(kBufSize24kHz);
+ std::vector<float> expected_lp_residual(kBufSize24kHz);
+
+ // Test length.
+ const int num_frames =
+ std::min(pitch_buffer_reader.num_chunks, 300); // Max 3 s.
+ ASSERT_GE(lp_pitch_reader.num_chunks, num_frames);
+
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ for (int i = 0; i < num_frames; ++i) {
+ SCOPED_TRACE(i);
+ // Read input.
+ ASSERT_TRUE(pitch_buffer_reader.reader->ReadChunk(pitch_buffer_24kHz));
+ // Read expected output (ignore pitch gain and period).
+ ASSERT_TRUE(lp_pitch_reader.reader->ReadChunk(expected_lp_residual));
+ lp_pitch_reader.reader->SeekForward(2); // Pitch period and strength.
+ // Check every 200 ms.
+ if (i % 20 == 0) {
+ ComputeAndPostProcessLpcCoefficients(pitch_buffer_24kHz, lpc);
+ ComputeLpResidual(lpc, pitch_buffer_24kHz, computed_lp_residual);
+ ExpectNearAbsolute(expected_lp_residual, computed_lp_residual, kFloatMin);
+ }
+ }
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.cc
new file mode 100644
index 0000000000..419620fc0c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search.h"
+
+#include <array>
+#include <cstddef>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+PitchEstimator::PitchEstimator(const AvailableCpuFeatures& cpu_features)
+ : cpu_features_(cpu_features),
+ y_energy_24kHz_(kRefineNumLags24kHz, 0.f),
+ pitch_buffer_12kHz_(kBufSize12kHz),
+ auto_correlation_12kHz_(kNumLags12kHz) {}
+
+PitchEstimator::~PitchEstimator() = default;
+
+int PitchEstimator::Estimate(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer) {
+ rtc::ArrayView<float, kBufSize12kHz> pitch_buffer_12kHz_view(
+ pitch_buffer_12kHz_.data(), kBufSize12kHz);
+ RTC_DCHECK_EQ(pitch_buffer_12kHz_.size(), pitch_buffer_12kHz_view.size());
+ rtc::ArrayView<float, kNumLags12kHz> auto_correlation_12kHz_view(
+ auto_correlation_12kHz_.data(), kNumLags12kHz);
+ RTC_DCHECK_EQ(auto_correlation_12kHz_.size(),
+ auto_correlation_12kHz_view.size());
+
+ // TODO(bugs.chromium.org/10480): Use `cpu_features_` to estimate pitch.
+ // Perform the initial pitch search at 12 kHz.
+ Decimate2x(pitch_buffer, pitch_buffer_12kHz_view);
+ auto_corr_calculator_.ComputeOnPitchBuffer(pitch_buffer_12kHz_view,
+ auto_correlation_12kHz_view);
+ CandidatePitchPeriods pitch_periods = ComputePitchPeriod12kHz(
+ pitch_buffer_12kHz_view, auto_correlation_12kHz_view, cpu_features_);
+ // The refinement is done using the pitch buffer that contains 24 kHz samples.
+ // Therefore, adapt the inverted lags in `pitch_candidates_inv_lags` from 12
+ // to 24 kHz.
+ pitch_periods.best *= 2;
+ pitch_periods.second_best *= 2;
+
+ // Refine the initial pitch period estimation from 12 kHz to 48 kHz.
+ // Pre-compute frame energies at 24 kHz.
+ rtc::ArrayView<float, kRefineNumLags24kHz> y_energy_24kHz_view(
+ y_energy_24kHz_.data(), kRefineNumLags24kHz);
+ RTC_DCHECK_EQ(y_energy_24kHz_.size(), y_energy_24kHz_view.size());
+ ComputeSlidingFrameSquareEnergies24kHz(pitch_buffer, y_energy_24kHz_view,
+ cpu_features_);
+ // Estimation at 48 kHz.
+ const int pitch_lag_48kHz = ComputePitchPeriod48kHz(
+ pitch_buffer, y_energy_24kHz_view, pitch_periods, cpu_features_);
+ last_pitch_48kHz_ = ComputeExtendedPitchPeriod48kHz(
+ pitch_buffer, y_energy_24kHz_view,
+ /*initial_pitch_period_48kHz=*/kMaxPitch48kHz - pitch_lag_48kHz,
+ last_pitch_48kHz_, cpu_features_);
+ return last_pitch_48kHz_.period;
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.h
new file mode 100644
index 0000000000..42c448eb56
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_PITCH_SEARCH_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_PITCH_SEARCH_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/auto_correlation.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h"
+#include "rtc_base/gtest_prod_util.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Pitch estimator.
+class PitchEstimator {
+ public:
+ explicit PitchEstimator(const AvailableCpuFeatures& cpu_features);
+ PitchEstimator(const PitchEstimator&) = delete;
+ PitchEstimator& operator=(const PitchEstimator&) = delete;
+ ~PitchEstimator();
+ // Returns the estimated pitch period at 48 kHz.
+ int Estimate(rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(RnnVadTest, PitchSearchWithinTolerance);
+ float GetLastPitchStrengthForTesting() const {
+ return last_pitch_48kHz_.strength;
+ }
+
+ const AvailableCpuFeatures cpu_features_;
+ PitchInfo last_pitch_48kHz_{};
+ AutoCorrelationCalculator auto_corr_calculator_;
+ std::vector<float> y_energy_24kHz_;
+ std::vector<float> pitch_buffer_12kHz_;
+ std::vector<float> auto_correlation_12kHz_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_PITCH_SEARCH_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
new file mode 100644
index 0000000000..e8c912518d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
@@ -0,0 +1,513 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <numeric>
+
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/vector_math.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+float ComputeAutoCorrelation(
+ int inverted_lag,
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ const VectorMath& vector_math) {
+ RTC_DCHECK_LT(inverted_lag, kBufSize24kHz);
+ RTC_DCHECK_LT(inverted_lag, kRefineNumLags24kHz);
+ static_assert(kMaxPitch24kHz < kBufSize24kHz, "");
+ return vector_math.DotProduct(
+ pitch_buffer.subview(/*offset=*/kMaxPitch24kHz),
+ pitch_buffer.subview(inverted_lag, kFrameSize20ms24kHz));
+}
+
+// Given an auto-correlation coefficient `curr_auto_correlation` and its
+// neighboring values `prev_auto_correlation` and `next_auto_correlation`
+// computes a pseudo-interpolation offset to be applied to the pitch period
+// associated to `curr`. The output is a lag in {-1, 0, +1}.
+// TODO(bugs.webrtc.org/9076): Consider removing this method.
+// `GetPitchPseudoInterpolationOffset()` it is relevant only if the spectral
+// analysis works at a sample rate that is twice as that of the pitch buffer;
+// In particular, it is not relevant for the estimated pitch period feature fed
+// into the RNN.
+int GetPitchPseudoInterpolationOffset(float prev_auto_correlation,
+ float curr_auto_correlation,
+ float next_auto_correlation) {
+ if ((next_auto_correlation - prev_auto_correlation) >
+ 0.7f * (curr_auto_correlation - prev_auto_correlation)) {
+ return 1; // `next_auto_correlation` is the largest auto-correlation
+ // coefficient.
+ } else if ((prev_auto_correlation - next_auto_correlation) >
+ 0.7f * (curr_auto_correlation - next_auto_correlation)) {
+ return -1; // `prev_auto_correlation` is the largest auto-correlation
+ // coefficient.
+ }
+ return 0;
+}
+
+// Refines a pitch period `lag` encoded as lag with pseudo-interpolation. The
+// output sample rate is twice as that of `lag`.
+int PitchPseudoInterpolationLagPitchBuf(
+ int lag,
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ const VectorMath& vector_math) {
+ int offset = 0;
+ // Cannot apply pseudo-interpolation at the boundaries.
+ if (lag > 0 && lag < kMaxPitch24kHz) {
+ const int inverted_lag = kMaxPitch24kHz - lag;
+ offset = GetPitchPseudoInterpolationOffset(
+ ComputeAutoCorrelation(inverted_lag + 1, pitch_buffer, vector_math),
+ ComputeAutoCorrelation(inverted_lag, pitch_buffer, vector_math),
+ ComputeAutoCorrelation(inverted_lag - 1, pitch_buffer, vector_math));
+ }
+ return 2 * lag + offset;
+}
+
+// Integer multipliers used in ComputeExtendedPitchPeriod48kHz() when
+// looking for sub-harmonics.
+// The values have been chosen to serve the following algorithm. Given the
+// initial pitch period T, we examine whether one of its harmonics is the true
+// fundamental frequency. We consider T/k with k in {2, ..., 15}. For each of
+// these harmonics, in addition to the pitch strength of itself, we choose one
+// multiple of its pitch period, n*T/k, to validate it (by averaging their pitch
+// strengths). The multiplier n is chosen so that n*T/k is used only one time
+// over all k. When for example k = 4, we should also expect a peak at 3*T/4.
+// When k = 8 instead we don't want to look at 2*T/8, since we have already
+// checked T/4 before. Instead, we look at T*3/8.
+// The array can be generate in Python as follows:
+// from fractions import Fraction
+// # Smallest positive integer not in X.
+// def mex(X):
+// for i in range(1, int(max(X)+2)):
+// if i not in X:
+// return i
+// # Visited multiples of the period.
+// S = {1}
+// for n in range(2, 16):
+// sn = mex({n * i for i in S} | {1})
+// S = S | {Fraction(1, n), Fraction(sn, n)}
+// print(sn, end=', ')
+constexpr std::array<int, 14> kSubHarmonicMultipliers = {
+ {3, 2, 3, 2, 5, 2, 3, 2, 3, 2, 5, 2, 3, 2}};
+
+struct Range {
+ int min;
+ int max;
+};
+
+// Number of analyzed pitches to the left(right) of a pitch candidate.
+constexpr int kPitchNeighborhoodRadius = 2;
+
+// Creates a pitch period interval centered in `inverted_lag` with hard-coded
+// radius. Clipping is applied so that the interval is always valid for a 24 kHz
+// pitch buffer.
+Range CreateInvertedLagRange(int inverted_lag) {
+ return {std::max(inverted_lag - kPitchNeighborhoodRadius, 0),
+ std::min(inverted_lag + kPitchNeighborhoodRadius,
+ kInitialNumLags24kHz - 1)};
+}
+
+constexpr int kNumPitchCandidates = 2; // Best and second best.
+// Maximum number of analyzed pitch periods.
+constexpr int kMaxPitchPeriods24kHz =
+ kNumPitchCandidates * (2 * kPitchNeighborhoodRadius + 1);
+
+// Collection of inverted lags.
+class InvertedLagsIndex {
+ public:
+ InvertedLagsIndex() : num_entries_(0) {}
+ // Adds an inverted lag to the index. Cannot add more than
+ // `kMaxPitchPeriods24kHz` values.
+ void Append(int inverted_lag) {
+ RTC_DCHECK_LT(num_entries_, kMaxPitchPeriods24kHz);
+ inverted_lags_[num_entries_++] = inverted_lag;
+ }
+ const int* data() const { return inverted_lags_.data(); }
+ int size() const { return num_entries_; }
+
+ private:
+ std::array<int, kMaxPitchPeriods24kHz> inverted_lags_;
+ int num_entries_;
+};
+
+// Computes the auto correlation coefficients for the inverted lags in the
+// closed interval `inverted_lags`. Updates `inverted_lags_index` by appending
+// the inverted lags for the computed auto correlation values.
+void ComputeAutoCorrelation(
+ Range inverted_lags,
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<float, kInitialNumLags24kHz> auto_correlation,
+ InvertedLagsIndex& inverted_lags_index,
+ const VectorMath& vector_math) {
+ // Check valid range.
+ RTC_DCHECK_LE(inverted_lags.min, inverted_lags.max);
+ // Trick to avoid zero initialization of `auto_correlation`.
+ // Needed by the pseudo-interpolation.
+ if (inverted_lags.min > 0) {
+ auto_correlation[inverted_lags.min - 1] = 0.f;
+ }
+ if (inverted_lags.max < kInitialNumLags24kHz - 1) {
+ auto_correlation[inverted_lags.max + 1] = 0.f;
+ }
+ // Check valid `inverted_lag` indexes.
+ RTC_DCHECK_GE(inverted_lags.min, 0);
+ RTC_DCHECK_LT(inverted_lags.max, kInitialNumLags24kHz);
+ for (int inverted_lag = inverted_lags.min; inverted_lag <= inverted_lags.max;
+ ++inverted_lag) {
+ auto_correlation[inverted_lag] =
+ ComputeAutoCorrelation(inverted_lag, pitch_buffer, vector_math);
+ inverted_lags_index.Append(inverted_lag);
+ }
+}
+
+// Searches the strongest pitch period at 24 kHz and returns its inverted lag at
+// 48 kHz.
+int ComputePitchPeriod48kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<const int> inverted_lags,
+ rtc::ArrayView<const float, kInitialNumLags24kHz> auto_correlation,
+ rtc::ArrayView<const float, kRefineNumLags24kHz> y_energy,
+ const VectorMath& vector_math) {
+ static_assert(kMaxPitch24kHz > kInitialNumLags24kHz, "");
+ static_assert(kMaxPitch24kHz < kBufSize24kHz, "");
+ int best_inverted_lag = 0; // Pitch period.
+ float best_numerator = -1.f; // Pitch strength numerator.
+ float best_denominator = 0.f; // Pitch strength denominator.
+ for (int inverted_lag : inverted_lags) {
+ // A pitch candidate must have positive correlation.
+ if (auto_correlation[inverted_lag] > 0.f) {
+ // Auto-correlation energy normalized by frame energy.
+ const float numerator =
+ auto_correlation[inverted_lag] * auto_correlation[inverted_lag];
+ const float denominator = y_energy[inverted_lag];
+ // Compare numerator/denominator ratios without using divisions.
+ if (numerator * best_denominator > best_numerator * denominator) {
+ best_inverted_lag = inverted_lag;
+ best_numerator = numerator;
+ best_denominator = denominator;
+ }
+ }
+ }
+ // Pseudo-interpolation to transform `best_inverted_lag` (24 kHz pitch) to a
+ // 48 kHz pitch period.
+ if (best_inverted_lag == 0 || best_inverted_lag >= kInitialNumLags24kHz - 1) {
+ // Cannot apply pseudo-interpolation at the boundaries.
+ return best_inverted_lag * 2;
+ }
+ int offset = GetPitchPseudoInterpolationOffset(
+ auto_correlation[best_inverted_lag + 1],
+ auto_correlation[best_inverted_lag],
+ auto_correlation[best_inverted_lag - 1]);
+ // TODO(bugs.webrtc.org/9076): When retraining, check if `offset` below should
+ // be subtracted since `inverted_lag` is an inverted lag but offset is a lag.
+ return 2 * best_inverted_lag + offset;
+}
+
+// Returns an alternative pitch period for `pitch_period` given a `multiplier`
+// and a `divisor` of the period.
+constexpr int GetAlternativePitchPeriod(int pitch_period,
+ int multiplier,
+ int divisor) {
+ RTC_DCHECK_GT(divisor, 0);
+ // Same as `round(multiplier * pitch_period / divisor)`.
+ return (2 * multiplier * pitch_period + divisor) / (2 * divisor);
+}
+
+// Returns true if the alternative pitch period is stronger than the initial one
+// given the last estimated pitch and the value of `period_divisor` used to
+// compute the alternative pitch period via `GetAlternativePitchPeriod()`.
+bool IsAlternativePitchStrongerThanInitial(PitchInfo last,
+ PitchInfo initial,
+ PitchInfo alternative,
+ int period_divisor) {
+ // Initial pitch period candidate thresholds for a sample rate of 24 kHz.
+ // Computed as [5*k*k for k in range(16)].
+ constexpr std::array<int, 14> kInitialPitchPeriodThresholds = {
+ {20, 45, 80, 125, 180, 245, 320, 405, 500, 605, 720, 845, 980, 1125}};
+ static_assert(
+ kInitialPitchPeriodThresholds.size() == kSubHarmonicMultipliers.size(),
+ "");
+ RTC_DCHECK_GE(last.period, 0);
+ RTC_DCHECK_GE(initial.period, 0);
+ RTC_DCHECK_GE(alternative.period, 0);
+ RTC_DCHECK_GE(period_divisor, 2);
+ // Compute a term that lowers the threshold when `alternative.period` is close
+ // to the last estimated period `last.period` - i.e., pitch tracking.
+ float lower_threshold_term = 0.f;
+ if (std::abs(alternative.period - last.period) <= 1) {
+ // The candidate pitch period is within 1 sample from the last one.
+ // Make the candidate at `alternative.period` very easy to be accepted.
+ lower_threshold_term = last.strength;
+ } else if (std::abs(alternative.period - last.period) == 2 &&
+ initial.period >
+ kInitialPitchPeriodThresholds[period_divisor - 2]) {
+ // The candidate pitch period is 2 samples far from the last one and the
+ // period `initial.period` (from which `alternative.period` has been
+ // derived) is greater than a threshold. Make `alternative.period` easy to
+ // be accepted.
+ lower_threshold_term = 0.5f * last.strength;
+ }
+ // Set the threshold based on the strength of the initial estimate
+ // `initial.period`. Also reduce the chance of false positives caused by a
+ // bias towards high frequencies (originating from short-term correlations).
+ float threshold =
+ std::max(0.3f, 0.7f * initial.strength - lower_threshold_term);
+ if (alternative.period < 3 * kMinPitch24kHz) {
+ // High frequency.
+ threshold = std::max(0.4f, 0.85f * initial.strength - lower_threshold_term);
+ } else if (alternative.period < 2 * kMinPitch24kHz) {
+ // Even higher frequency.
+ threshold = std::max(0.5f, 0.9f * initial.strength - lower_threshold_term);
+ }
+ return alternative.strength > threshold;
+}
+
+} // namespace
+
+void Decimate2x(rtc::ArrayView<const float, kBufSize24kHz> src,
+ rtc::ArrayView<float, kBufSize12kHz> dst) {
+ // TODO(bugs.webrtc.org/9076): Consider adding anti-aliasing filter.
+ static_assert(2 * kBufSize12kHz == kBufSize24kHz, "");
+ for (int i = 0; i < kBufSize12kHz; ++i) {
+ dst[i] = src[2 * i];
+ }
+}
+
+void ComputeSlidingFrameSquareEnergies24kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<float, kRefineNumLags24kHz> y_energy,
+ AvailableCpuFeatures cpu_features) {
+ VectorMath vector_math(cpu_features);
+ static_assert(kFrameSize20ms24kHz < kBufSize24kHz, "");
+ const auto frame_20ms_view = pitch_buffer.subview(0, kFrameSize20ms24kHz);
+ float yy = vector_math.DotProduct(frame_20ms_view, frame_20ms_view);
+ y_energy[0] = yy;
+ static_assert(kMaxPitch24kHz - 1 + kFrameSize20ms24kHz < kBufSize24kHz, "");
+ static_assert(kMaxPitch24kHz < kRefineNumLags24kHz, "");
+ for (int inverted_lag = 0; inverted_lag < kMaxPitch24kHz; ++inverted_lag) {
+ yy -= pitch_buffer[inverted_lag] * pitch_buffer[inverted_lag];
+ yy += pitch_buffer[inverted_lag + kFrameSize20ms24kHz] *
+ pitch_buffer[inverted_lag + kFrameSize20ms24kHz];
+ yy = std::max(1.f, yy);
+ y_energy[inverted_lag + 1] = yy;
+ }
+}
+
+CandidatePitchPeriods ComputePitchPeriod12kHz(
+ rtc::ArrayView<const float, kBufSize12kHz> pitch_buffer,
+ rtc::ArrayView<const float, kNumLags12kHz> auto_correlation,
+ AvailableCpuFeatures cpu_features) {
+ static_assert(kMaxPitch12kHz > kNumLags12kHz, "");
+ static_assert(kMaxPitch12kHz < kBufSize12kHz, "");
+
+ // Stores a pitch candidate period and strength information.
+ struct PitchCandidate {
+ // Pitch period encoded as inverted lag.
+ int period_inverted_lag = 0;
+ // Pitch strength encoded as a ratio.
+ float strength_numerator = -1.f;
+ float strength_denominator = 0.f;
+ // Compare the strength of two pitch candidates.
+ bool HasStrongerPitchThan(const PitchCandidate& b) const {
+ // Comparing the numerator/denominator ratios without using divisions.
+ return strength_numerator * b.strength_denominator >
+ b.strength_numerator * strength_denominator;
+ }
+ };
+
+ VectorMath vector_math(cpu_features);
+ static_assert(kFrameSize20ms12kHz + 1 < kBufSize12kHz, "");
+ const auto frame_view = pitch_buffer.subview(0, kFrameSize20ms12kHz + 1);
+ float denominator = 1.f + vector_math.DotProduct(frame_view, frame_view);
+ // Search best and second best pitches by looking at the scaled
+ // auto-correlation.
+ PitchCandidate best;
+ PitchCandidate second_best;
+ second_best.period_inverted_lag = 1;
+ for (int inverted_lag = 0; inverted_lag < kNumLags12kHz; ++inverted_lag) {
+ // A pitch candidate must have positive correlation.
+ if (auto_correlation[inverted_lag] > 0.f) {
+ PitchCandidate candidate{
+ inverted_lag,
+ auto_correlation[inverted_lag] * auto_correlation[inverted_lag],
+ denominator};
+ if (candidate.HasStrongerPitchThan(second_best)) {
+ if (candidate.HasStrongerPitchThan(best)) {
+ second_best = best;
+ best = candidate;
+ } else {
+ second_best = candidate;
+ }
+ }
+ }
+ // Update `squared_energy_y` for the next inverted lag.
+ const float y_old = pitch_buffer[inverted_lag];
+ const float y_new = pitch_buffer[inverted_lag + kFrameSize20ms12kHz];
+ denominator -= y_old * y_old;
+ denominator += y_new * y_new;
+ denominator = std::max(0.f, denominator);
+ }
+ return {best.period_inverted_lag, second_best.period_inverted_lag};
+}
+
+int ComputePitchPeriod48kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<const float, kRefineNumLags24kHz> y_energy,
+ CandidatePitchPeriods pitch_candidates,
+ AvailableCpuFeatures cpu_features) {
+ // Compute the auto-correlation terms only for neighbors of the two pitch
+ // candidates (best and second best).
+ std::array<float, kInitialNumLags24kHz> auto_correlation;
+ InvertedLagsIndex inverted_lags_index;
+ // Create two inverted lag ranges so that `r1` precedes `r2`.
+ const bool swap_candidates =
+ pitch_candidates.best > pitch_candidates.second_best;
+ const Range r1 = CreateInvertedLagRange(
+ swap_candidates ? pitch_candidates.second_best : pitch_candidates.best);
+ const Range r2 = CreateInvertedLagRange(
+ swap_candidates ? pitch_candidates.best : pitch_candidates.second_best);
+ // Check valid ranges.
+ RTC_DCHECK_LE(r1.min, r1.max);
+ RTC_DCHECK_LE(r2.min, r2.max);
+ // Check `r1` precedes `r2`.
+ RTC_DCHECK_LE(r1.min, r2.min);
+ RTC_DCHECK_LE(r1.max, r2.max);
+ VectorMath vector_math(cpu_features);
+ if (r1.max + 1 >= r2.min) {
+ // Overlapping or adjacent ranges.
+ ComputeAutoCorrelation({r1.min, r2.max}, pitch_buffer, auto_correlation,
+ inverted_lags_index, vector_math);
+ } else {
+ // Disjoint ranges.
+ ComputeAutoCorrelation(r1, pitch_buffer, auto_correlation,
+ inverted_lags_index, vector_math);
+ ComputeAutoCorrelation(r2, pitch_buffer, auto_correlation,
+ inverted_lags_index, vector_math);
+ }
+ return ComputePitchPeriod48kHz(pitch_buffer, inverted_lags_index,
+ auto_correlation, y_energy, vector_math);
+}
+
+PitchInfo ComputeExtendedPitchPeriod48kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<const float, kRefineNumLags24kHz> y_energy,
+ int initial_pitch_period_48kHz,
+ PitchInfo last_pitch_48kHz,
+ AvailableCpuFeatures cpu_features) {
+ RTC_DCHECK_LE(kMinPitch48kHz, initial_pitch_period_48kHz);
+ RTC_DCHECK_LE(initial_pitch_period_48kHz, kMaxPitch48kHz);
+
+ // Stores information for a refined pitch candidate.
+ struct RefinedPitchCandidate {
+ int period;
+ float strength;
+ // Additional strength data used for the final pitch estimation.
+ float xy; // Auto-correlation.
+ float y_energy; // Energy of the sliding frame `y`.
+ };
+
+ const float x_energy = y_energy[kMaxPitch24kHz];
+ const auto pitch_strength = [x_energy](float xy, float y_energy) {
+ RTC_DCHECK_GE(x_energy * y_energy, 0.f);
+ return xy / std::sqrt(1.f + x_energy * y_energy);
+ };
+ VectorMath vector_math(cpu_features);
+
+ // Initialize the best pitch candidate with `initial_pitch_period_48kHz`.
+ RefinedPitchCandidate best_pitch;
+ best_pitch.period =
+ std::min(initial_pitch_period_48kHz / 2, kMaxPitch24kHz - 1);
+ best_pitch.xy = ComputeAutoCorrelation(kMaxPitch24kHz - best_pitch.period,
+ pitch_buffer, vector_math);
+ best_pitch.y_energy = y_energy[kMaxPitch24kHz - best_pitch.period];
+ best_pitch.strength = pitch_strength(best_pitch.xy, best_pitch.y_energy);
+ // Keep a copy of the initial pitch candidate.
+ const PitchInfo initial_pitch{best_pitch.period, best_pitch.strength};
+ // 24 kHz version of the last estimated pitch.
+ const PitchInfo last_pitch{last_pitch_48kHz.period / 2,
+ last_pitch_48kHz.strength};
+
+ // Find `max_period_divisor` such that the result of
+ // `GetAlternativePitchPeriod(initial_pitch_period, 1, max_period_divisor)`
+ // equals `kMinPitch24kHz`.
+ const int max_period_divisor =
+ (2 * initial_pitch.period) / (2 * kMinPitch24kHz - 1);
+ for (int period_divisor = 2; period_divisor <= max_period_divisor;
+ ++period_divisor) {
+ PitchInfo alternative_pitch;
+ alternative_pitch.period = GetAlternativePitchPeriod(
+ initial_pitch.period, /*multiplier=*/1, period_divisor);
+ RTC_DCHECK_GE(alternative_pitch.period, kMinPitch24kHz);
+ // When looking at `alternative_pitch.period`, we also look at one of its
+ // sub-harmonics. `kSubHarmonicMultipliers` is used to know where to look.
+ // `period_divisor` == 2 is a special case since `dual_alternative_period`
+ // might be greater than the maximum pitch period.
+ int dual_alternative_period = GetAlternativePitchPeriod(
+ initial_pitch.period, kSubHarmonicMultipliers[period_divisor - 2],
+ period_divisor);
+ RTC_DCHECK_GT(dual_alternative_period, 0);
+ if (period_divisor == 2 && dual_alternative_period > kMaxPitch24kHz) {
+ dual_alternative_period = initial_pitch.period;
+ }
+ RTC_DCHECK_NE(alternative_pitch.period, dual_alternative_period)
+ << "The lower pitch period and the additional sub-harmonic must not "
+ "coincide.";
+ // Compute an auto-correlation score for the primary pitch candidate
+ // `alternative_pitch.period` by also looking at its possible sub-harmonic
+ // `dual_alternative_period`.
+ const float xy_primary_period = ComputeAutoCorrelation(
+ kMaxPitch24kHz - alternative_pitch.period, pitch_buffer, vector_math);
+ // TODO(webrtc:10480): Copy `xy_primary_period` if the secondary period is
+ // equal to the primary one.
+ const float xy_secondary_period = ComputeAutoCorrelation(
+ kMaxPitch24kHz - dual_alternative_period, pitch_buffer, vector_math);
+ const float xy = 0.5f * (xy_primary_period + xy_secondary_period);
+ const float yy =
+ 0.5f * (y_energy[kMaxPitch24kHz - alternative_pitch.period] +
+ y_energy[kMaxPitch24kHz - dual_alternative_period]);
+ alternative_pitch.strength = pitch_strength(xy, yy);
+
+ // Maybe update best period.
+ if (IsAlternativePitchStrongerThanInitial(
+ last_pitch, initial_pitch, alternative_pitch, period_divisor)) {
+ best_pitch = {alternative_pitch.period, alternative_pitch.strength, xy,
+ yy};
+ }
+ }
+
+ // Final pitch strength and period.
+ best_pitch.xy = std::max(0.f, best_pitch.xy);
+ RTC_DCHECK_LE(0.f, best_pitch.y_energy);
+ float final_pitch_strength =
+ (best_pitch.y_energy <= best_pitch.xy)
+ ? 1.f
+ : best_pitch.xy / (best_pitch.y_energy + 1.f);
+ final_pitch_strength = std::min(best_pitch.strength, final_pitch_strength);
+ int final_pitch_period_48kHz = std::max(
+ kMinPitch48kHz, PitchPseudoInterpolationLagPitchBuf(
+ best_pitch.period, pitch_buffer, vector_math));
+
+ return {final_pitch_period_48kHz, final_pitch_strength};
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h
new file mode 100644
index 0000000000..aa2dd13745
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_PITCH_SEARCH_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_PITCH_SEARCH_INTERNAL_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <utility>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Performs 2x decimation without any anti-aliasing filter.
+void Decimate2x(rtc::ArrayView<const float, kBufSize24kHz> src,
+ rtc::ArrayView<float, kBufSize12kHz> dst);
+
+// Key concepts and keywords used below in this file.
+//
+// The pitch estimation relies on a pitch buffer, which is an array-like data
+// structured designed as follows:
+//
+// |....A....|.....B.....|
+//
+// The part on the left, named `A` contains the oldest samples, whereas `B`
+// contains the most recent ones. The size of `A` corresponds to the maximum
+// pitch period, that of `B` to the analysis frame size (e.g., 16 ms and 20 ms
+// respectively).
+//
+// Pitch estimation is essentially based on the analysis of two 20 ms frames
+// extracted from the pitch buffer. One frame, called `x`, is kept fixed and
+// corresponds to `B` - i.e., the most recent 20 ms. The other frame, called
+// `y`, is extracted from different parts of the buffer instead.
+//
+// The offset between `x` and `y` corresponds to a specific pitch period.
+// For instance, if `y` is positioned at the beginning of the pitch buffer, then
+// the cross-correlation between `x` and `y` can be used as an indication of the
+// strength for the maximum pitch.
+//
+// Such an offset can be encoded in two ways:
+// - As a lag, which is the index in the pitch buffer for the first item in `y`
+// - As an inverted lag, which is the number of samples from the beginning of
+// `x` and the end of `y`
+//
+// |---->| lag
+// |....A....|.....B.....|
+// |<--| inverted lag
+// |.....y.....| `y` 20 ms frame
+//
+// The inverted lag has the advantage of being directly proportional to the
+// corresponding pitch period.
+
+// Computes the sum of squared samples for every sliding frame `y` in the pitch
+// buffer. The indexes of `y_energy` are inverted lags.
+void ComputeSlidingFrameSquareEnergies24kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<float, kRefineNumLags24kHz> y_energy,
+ AvailableCpuFeatures cpu_features);
+
+// Top-2 pitch period candidates. Unit: number of samples - i.e., inverted lags.
+struct CandidatePitchPeriods {
+ int best;
+ int second_best;
+};
+
+// Computes the candidate pitch periods at 12 kHz given a view on the 12 kHz
+// pitch buffer and the auto-correlation values (having inverted lags as
+// indexes).
+CandidatePitchPeriods ComputePitchPeriod12kHz(
+ rtc::ArrayView<const float, kBufSize12kHz> pitch_buffer,
+ rtc::ArrayView<const float, kNumLags12kHz> auto_correlation,
+ AvailableCpuFeatures cpu_features);
+
+// Computes the pitch period at 48 kHz given a view on the 24 kHz pitch buffer,
+// the energies for the sliding frames `y` at 24 kHz and the pitch period
+// candidates at 24 kHz (encoded as inverted lag).
+int ComputePitchPeriod48kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<const float, kRefineNumLags24kHz> y_energy,
+ CandidatePitchPeriods pitch_candidates_24kHz,
+ AvailableCpuFeatures cpu_features);
+
+struct PitchInfo {
+ int period;
+ float strength;
+};
+
+// Computes the pitch period at 48 kHz searching in an extended pitch range
+// given a view on the 24 kHz pitch buffer, the energies for the sliding frames
+// `y` at 24 kHz, the initial 48 kHz estimation (computed by
+// `ComputePitchPeriod48kHz()`) and the last estimated pitch.
+PitchInfo ComputeExtendedPitchPeriod48kHz(
+ rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
+ rtc::ArrayView<const float, kRefineNumLags24kHz> y_energy,
+ int initial_pitch_period_48kHz,
+ PitchInfo last_pitch_48kHz,
+ AvailableCpuFeatures cpu_features);
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_PITCH_SEARCH_INTERNAL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal_unittest.cc
new file mode 100644
index 0000000000..2a6e68f157
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal_unittest.cc
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h"
+
+#include <array>
+#include <string>
+#include <tuple>
+
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "rtc_base/strings/string_builder.h"
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int kTestPitchPeriodsLow = 3 * kMinPitch48kHz / 2;
+constexpr int kTestPitchPeriodsHigh = (3 * kMinPitch48kHz + kMaxPitch48kHz) / 2;
+
+constexpr float kTestPitchStrengthLow = 0.35f;
+constexpr float kTestPitchStrengthHigh = 0.75f;
+
+template <class T>
+std::string PrintTestIndexAndCpuFeatures(
+ const ::testing::TestParamInfo<T>& info) {
+ rtc::StringBuilder builder;
+ builder << info.index << "_" << info.param.cpu_features.ToString();
+ return builder.str();
+}
+
+// Finds the relevant CPU features combinations to test.
+std::vector<AvailableCpuFeatures> GetCpuFeaturesToTest() {
+ std::vector<AvailableCpuFeatures> v;
+ v.push_back(NoAvailableCpuFeatures());
+ AvailableCpuFeatures available = GetAvailableCpuFeatures();
+ if (available.avx2) {
+ v.push_back({/*sse2=*/false, /*avx2=*/true, /*neon=*/false});
+ }
+ if (available.sse2) {
+ v.push_back({/*sse2=*/true, /*avx2=*/false, /*neon=*/false});
+ }
+ return v;
+}
+
+// Checks that the frame-wise sliding square energy function produces output
+// within tolerance given test input data.
+TEST(RnnVadTest, ComputeSlidingFrameSquareEnergies24kHzWithinTolerance) {
+ const AvailableCpuFeatures cpu_features = GetAvailableCpuFeatures();
+
+ PitchTestData test_data;
+ std::array<float, kRefineNumLags24kHz> computed_output;
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ ComputeSlidingFrameSquareEnergies24kHz(test_data.PitchBuffer24kHzView(),
+ computed_output, cpu_features);
+ auto square_energies_view = test_data.SquareEnergies24kHzView();
+ ExpectNearAbsolute({square_energies_view.data(), square_energies_view.size()},
+ computed_output, 1e-3f);
+}
+
+// Checks that the estimated pitch period is bit-exact given test input data.
+TEST(RnnVadTest, ComputePitchPeriod12kHzBitExactness) {
+ const AvailableCpuFeatures cpu_features = GetAvailableCpuFeatures();
+
+ PitchTestData test_data;
+ std::array<float, kBufSize12kHz> pitch_buf_decimated;
+ Decimate2x(test_data.PitchBuffer24kHzView(), pitch_buf_decimated);
+ CandidatePitchPeriods pitch_candidates;
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ pitch_candidates = ComputePitchPeriod12kHz(
+ pitch_buf_decimated, test_data.AutoCorrelation12kHzView(), cpu_features);
+ EXPECT_EQ(pitch_candidates.best, 140);
+ EXPECT_EQ(pitch_candidates.second_best, 142);
+}
+
+// Checks that the refined pitch period is bit-exact given test input data.
+TEST(RnnVadTest, ComputePitchPeriod48kHzBitExactness) {
+ const AvailableCpuFeatures cpu_features = GetAvailableCpuFeatures();
+
+ PitchTestData test_data;
+ std::vector<float> y_energy(kRefineNumLags24kHz);
+ rtc::ArrayView<float, kRefineNumLags24kHz> y_energy_view(y_energy.data(),
+ kRefineNumLags24kHz);
+ ComputeSlidingFrameSquareEnergies24kHz(test_data.PitchBuffer24kHzView(),
+ y_energy_view, cpu_features);
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ EXPECT_EQ(
+ ComputePitchPeriod48kHz(test_data.PitchBuffer24kHzView(), y_energy_view,
+ /*pitch_candidates=*/{280, 284}, cpu_features),
+ 560);
+ EXPECT_EQ(
+ ComputePitchPeriod48kHz(test_data.PitchBuffer24kHzView(), y_energy_view,
+ /*pitch_candidates=*/{260, 284}, cpu_features),
+ 568);
+}
+
+struct PitchCandidatesParameters {
+ CandidatePitchPeriods pitch_candidates;
+ AvailableCpuFeatures cpu_features;
+};
+
+class PitchCandidatesParametrization
+ : public ::testing::TestWithParam<PitchCandidatesParameters> {};
+
+// Checks that the result of `ComputePitchPeriod48kHz()` does not depend on the
+// order of the input pitch candidates.
+TEST_P(PitchCandidatesParametrization,
+ ComputePitchPeriod48kHzOrderDoesNotMatter) {
+ const PitchCandidatesParameters params = GetParam();
+ const CandidatePitchPeriods swapped_pitch_candidates{
+ params.pitch_candidates.second_best, params.pitch_candidates.best};
+
+ PitchTestData test_data;
+ std::vector<float> y_energy(kRefineNumLags24kHz);
+ rtc::ArrayView<float, kRefineNumLags24kHz> y_energy_view(y_energy.data(),
+ kRefineNumLags24kHz);
+ ComputeSlidingFrameSquareEnergies24kHz(test_data.PitchBuffer24kHzView(),
+ y_energy_view, params.cpu_features);
+ EXPECT_EQ(
+ ComputePitchPeriod48kHz(test_data.PitchBuffer24kHzView(), y_energy_view,
+ params.pitch_candidates, params.cpu_features),
+ ComputePitchPeriod48kHz(test_data.PitchBuffer24kHzView(), y_energy_view,
+ swapped_pitch_candidates, params.cpu_features));
+}
+
+std::vector<PitchCandidatesParameters> CreatePitchCandidatesParameters() {
+ std::vector<PitchCandidatesParameters> v;
+ for (AvailableCpuFeatures cpu_features : GetCpuFeaturesToTest()) {
+ v.push_back({{0, 2}, cpu_features});
+ v.push_back({{260, 284}, cpu_features});
+ v.push_back({{280, 284}, cpu_features});
+ v.push_back(
+ {{kInitialNumLags24kHz - 2, kInitialNumLags24kHz - 1}, cpu_features});
+ }
+ return v;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RnnVadTest,
+ PitchCandidatesParametrization,
+ ::testing::ValuesIn(CreatePitchCandidatesParameters()),
+ PrintTestIndexAndCpuFeatures<PitchCandidatesParameters>);
+
+struct ExtendedPitchPeriodSearchParameters {
+ int initial_pitch_period;
+ PitchInfo last_pitch;
+ PitchInfo expected_pitch;
+ AvailableCpuFeatures cpu_features;
+};
+
+class ExtendedPitchPeriodSearchParametrizaion
+ : public ::testing::TestWithParam<ExtendedPitchPeriodSearchParameters> {};
+
+// Checks that the computed pitch period is bit-exact and that the computed
+// pitch strength is within tolerance given test input data.
+TEST_P(ExtendedPitchPeriodSearchParametrizaion,
+ PeriodBitExactnessGainWithinTolerance) {
+ const ExtendedPitchPeriodSearchParameters params = GetParam();
+
+ PitchTestData test_data;
+ std::vector<float> y_energy(kRefineNumLags24kHz);
+ rtc::ArrayView<float, kRefineNumLags24kHz> y_energy_view(y_energy.data(),
+ kRefineNumLags24kHz);
+ ComputeSlidingFrameSquareEnergies24kHz(test_data.PitchBuffer24kHzView(),
+ y_energy_view, params.cpu_features);
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ const auto computed_output = ComputeExtendedPitchPeriod48kHz(
+ test_data.PitchBuffer24kHzView(), y_energy_view,
+ params.initial_pitch_period, params.last_pitch, params.cpu_features);
+ EXPECT_EQ(params.expected_pitch.period, computed_output.period);
+ EXPECT_NEAR(params.expected_pitch.strength, computed_output.strength, 1e-6f);
+}
+
+std::vector<ExtendedPitchPeriodSearchParameters>
+CreateExtendedPitchPeriodSearchParameters() {
+ std::vector<ExtendedPitchPeriodSearchParameters> v;
+ for (AvailableCpuFeatures cpu_features : GetCpuFeaturesToTest()) {
+ for (int last_pitch_period :
+ {kTestPitchPeriodsLow, kTestPitchPeriodsHigh}) {
+ for (float last_pitch_strength :
+ {kTestPitchStrengthLow, kTestPitchStrengthHigh}) {
+ v.push_back({kTestPitchPeriodsLow,
+ {last_pitch_period, last_pitch_strength},
+ {91, -0.0188608f},
+ cpu_features});
+ v.push_back({kTestPitchPeriodsHigh,
+ {last_pitch_period, last_pitch_strength},
+ {475, -0.0904344f},
+ cpu_features});
+ }
+ }
+ }
+ return v;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RnnVadTest,
+ ExtendedPitchPeriodSearchParametrizaion,
+ ::testing::ValuesIn(CreateExtendedPitchPeriodSearchParameters()),
+ PrintTestIndexAndCpuFeatures<ExtendedPitchPeriodSearchParameters>);
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_unittest.cc
new file mode 100644
index 0000000000..79b44b995c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_unittest.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/pitch_search_internal.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Checks that the computed pitch period is bit-exact and that the computed
+// pitch gain is within tolerance given test input data.
+TEST(RnnVadTest, PitchSearchWithinTolerance) {
+ ChunksFileReader reader = CreateLpResidualAndPitchInfoReader();
+ const int num_frames = std::min(reader.num_chunks, 300); // Max 3 s.
+ std::vector<float> lp_residual(kBufSize24kHz);
+ float expected_pitch_period, expected_pitch_strength;
+ const AvailableCpuFeatures cpu_features = GetAvailableCpuFeatures();
+ PitchEstimator pitch_estimator(cpu_features);
+ {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ for (int i = 0; i < num_frames; ++i) {
+ SCOPED_TRACE(i);
+ ASSERT_TRUE(reader.reader->ReadChunk(lp_residual));
+ ASSERT_TRUE(reader.reader->ReadValue(expected_pitch_period));
+ ASSERT_TRUE(reader.reader->ReadValue(expected_pitch_strength));
+ int pitch_period =
+ pitch_estimator.Estimate({lp_residual.data(), kBufSize24kHz});
+ EXPECT_EQ(expected_pitch_period, pitch_period);
+ EXPECT_NEAR(expected_pitch_strength,
+ pitch_estimator.GetLastPitchStrengthForTesting(), 15e-6f);
+ }
+ }
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer.h
new file mode 100644
index 0000000000..a6f7fdd1a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RING_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RING_BUFFER_H_
+
+#include <array>
+#include <cstring>
+#include <type_traits>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Ring buffer for N arrays of type T each one with size S.
+template <typename T, int S, int N>
+class RingBuffer {
+ static_assert(S > 0, "");
+ static_assert(N > 0, "");
+ static_assert(std::is_arithmetic<T>::value,
+ "Integral or floating point required.");
+
+ public:
+ RingBuffer() : tail_(0) {}
+ RingBuffer(const RingBuffer&) = delete;
+ RingBuffer& operator=(const RingBuffer&) = delete;
+ ~RingBuffer() = default;
+ // Set the ring buffer values to zero.
+ void Reset() { buffer_.fill(0); }
+ // Replace the least recently pushed array in the buffer with `new_values`.
+ void Push(rtc::ArrayView<const T, S> new_values) {
+ std::memcpy(buffer_.data() + S * tail_, new_values.data(), S * sizeof(T));
+ tail_ += 1;
+ if (tail_ == N)
+ tail_ = 0;
+ }
+ // Return an array view onto the array with a given delay. A view on the last
+ // and least recently push array is returned when `delay` is 0 and N - 1
+ // respectively.
+ rtc::ArrayView<const T, S> GetArrayView(int delay) const {
+ RTC_DCHECK_LE(0, delay);
+ RTC_DCHECK_LT(delay, N);
+ int offset = tail_ - 1 - delay;
+ if (offset < 0)
+ offset += N;
+ return {buffer_.data() + S * offset, S};
+ }
+
+ private:
+ int tail_; // Index of the least recently pushed sub-array.
+ std::array<T, S * N> buffer_{};
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RING_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer_unittest.cc
new file mode 100644
index 0000000000..d11d4eac3e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/ring_buffer_unittest.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/ring_buffer.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Compare the elements of two given array views.
+template <typename T, std::ptrdiff_t S>
+void ExpectEq(rtc::ArrayView<const T, S> a, rtc::ArrayView<const T, S> b) {
+ for (int i = 0; i < S; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(a[i], b[i]);
+ }
+}
+
+// Test push/read sequences.
+template <typename T, int S, int N>
+void TestRingBuffer() {
+ SCOPED_TRACE(N);
+ SCOPED_TRACE(S);
+ std::array<T, S> prev_pushed_array;
+ std::array<T, S> pushed_array;
+ rtc::ArrayView<const T, S> pushed_array_view(pushed_array.data(), S);
+
+ // Init.
+ RingBuffer<T, S, N> ring_buf;
+ ring_buf.GetArrayView(0);
+ pushed_array.fill(0);
+ ring_buf.Push(pushed_array_view);
+ ExpectEq(pushed_array_view, ring_buf.GetArrayView(0));
+
+ // Push N times and check most recent and second most recent.
+ for (T v = 1; v <= static_cast<T>(N); ++v) {
+ SCOPED_TRACE(v);
+ prev_pushed_array = pushed_array;
+ pushed_array.fill(v);
+ ring_buf.Push(pushed_array_view);
+ ExpectEq(pushed_array_view, ring_buf.GetArrayView(0));
+ if (N > 1) {
+ pushed_array.fill(v - 1);
+ ExpectEq(pushed_array_view, ring_buf.GetArrayView(1));
+ }
+ }
+
+ // Check buffer.
+ for (int delay = 2; delay < N; ++delay) {
+ SCOPED_TRACE(delay);
+ T expected_value = N - static_cast<T>(delay);
+ pushed_array.fill(expected_value);
+ ExpectEq(pushed_array_view, ring_buf.GetArrayView(delay));
+ }
+}
+
+// Check that for different delays, different views are returned.
+TEST(RnnVadTest, RingBufferArrayViews) {
+ constexpr int s = 3;
+ constexpr int n = 4;
+ RingBuffer<int, s, n> ring_buf;
+ std::array<int, s> pushed_array;
+ pushed_array.fill(1);
+ for (int k = 0; k <= n; ++k) { // Push data n + 1 times.
+ SCOPED_TRACE(k);
+ // Check array views.
+ for (int i = 0; i < n; ++i) {
+ SCOPED_TRACE(i);
+ auto view_i = ring_buf.GetArrayView(i);
+ for (int j = i + 1; j < n; ++j) {
+ SCOPED_TRACE(j);
+ auto view_j = ring_buf.GetArrayView(j);
+ EXPECT_NE(view_i, view_j);
+ }
+ }
+ ring_buf.Push(pushed_array);
+ }
+}
+
+TEST(RnnVadTest, RingBufferUnsigned) {
+ TestRingBuffer<uint8_t, 1, 1>();
+ TestRingBuffer<uint8_t, 2, 5>();
+ TestRingBuffer<uint8_t, 5, 2>();
+ TestRingBuffer<uint8_t, 5, 5>();
+}
+
+TEST(RnnVadTest, RingBufferSigned) {
+ TestRingBuffer<int, 1, 1>();
+ TestRingBuffer<int, 2, 5>();
+ TestRingBuffer<int, 5, 2>();
+ TestRingBuffer<int, 5, 5>();
+}
+
+TEST(RnnVadTest, RingBufferFloating) {
+ TestRingBuffer<float, 1, 1>();
+ TestRingBuffer<float, 2, 5>();
+ TestRingBuffer<float, 5, 2>();
+ TestRingBuffer<float, 5, 5>();
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.cc
new file mode 100644
index 0000000000..475bef9775
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/rnn.h"
+
+#include "rtc_base/checks.h"
+#include "third_party/rnnoise/src/rnn_vad_weights.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+using ::rnnoise::kInputLayerInputSize;
+static_assert(kFeatureVectorSize == kInputLayerInputSize, "");
+using ::rnnoise::kInputDenseBias;
+using ::rnnoise::kInputDenseWeights;
+using ::rnnoise::kInputLayerOutputSize;
+static_assert(kInputLayerOutputSize <= kFullyConnectedLayerMaxUnits, "");
+
+using ::rnnoise::kHiddenGruBias;
+using ::rnnoise::kHiddenGruRecurrentWeights;
+using ::rnnoise::kHiddenGruWeights;
+using ::rnnoise::kHiddenLayerOutputSize;
+static_assert(kHiddenLayerOutputSize <= kGruLayerMaxUnits, "");
+
+using ::rnnoise::kOutputDenseBias;
+using ::rnnoise::kOutputDenseWeights;
+using ::rnnoise::kOutputLayerOutputSize;
+static_assert(kOutputLayerOutputSize <= kFullyConnectedLayerMaxUnits, "");
+
+} // namespace
+
+RnnVad::RnnVad(const AvailableCpuFeatures& cpu_features)
+ : input_(kInputLayerInputSize,
+ kInputLayerOutputSize,
+ kInputDenseBias,
+ kInputDenseWeights,
+ ActivationFunction::kTansigApproximated,
+ cpu_features,
+ /*layer_name=*/"FC1"),
+ hidden_(kInputLayerOutputSize,
+ kHiddenLayerOutputSize,
+ kHiddenGruBias,
+ kHiddenGruWeights,
+ kHiddenGruRecurrentWeights,
+ cpu_features,
+ /*layer_name=*/"GRU1"),
+ output_(kHiddenLayerOutputSize,
+ kOutputLayerOutputSize,
+ kOutputDenseBias,
+ kOutputDenseWeights,
+ ActivationFunction::kSigmoidApproximated,
+ // The output layer is just 24x1. The unoptimized code is faster.
+ NoAvailableCpuFeatures(),
+ /*layer_name=*/"FC2") {
+ // Input-output chaining size checks.
+ RTC_DCHECK_EQ(input_.size(), hidden_.input_size())
+ << "The input and the hidden layers sizes do not match.";
+ RTC_DCHECK_EQ(hidden_.size(), output_.input_size())
+ << "The hidden and the output layers sizes do not match.";
+}
+
+RnnVad::~RnnVad() = default;
+
+void RnnVad::Reset() {
+ hidden_.Reset();
+}
+
+float RnnVad::ComputeVadProbability(
+ rtc::ArrayView<const float, kFeatureVectorSize> feature_vector,
+ bool is_silence) {
+ if (is_silence) {
+ Reset();
+ return 0.f;
+ }
+ input_.ComputeOutput(feature_vector);
+ hidden_.ComputeOutput(input_);
+ output_.ComputeOutput(hidden_);
+ RTC_DCHECK_EQ(output_.size(), 1);
+ return output_.data()[0];
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.h
new file mode 100644
index 0000000000..3148f1b3ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/rnn_fc.h"
+#include "modules/audio_processing/agc2/rnn_vad/rnn_gru.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Recurrent network with hard-coded architecture and weights for voice activity
+// detection.
+class RnnVad {
+ public:
+ explicit RnnVad(const AvailableCpuFeatures& cpu_features);
+ RnnVad(const RnnVad&) = delete;
+ RnnVad& operator=(const RnnVad&) = delete;
+ ~RnnVad();
+ void Reset();
+ // Observes `feature_vector` and `is_silence`, updates the RNN and returns the
+ // current voice probability.
+ float ComputeVadProbability(
+ rtc::ArrayView<const float, kFeatureVectorSize> feature_vector,
+ bool is_silence);
+
+ private:
+ FullyConnectedLayer input_;
+ GatedRecurrentLayer hidden_;
+ FullyConnectedLayer output_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc
new file mode 100644
index 0000000000..91501fb6e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <numeric>
+
+#include "modules/audio_processing/agc2/rnn_vad/rnn_fc.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "third_party/rnnoise/src/rnn_activations.h"
+#include "third_party/rnnoise/src/rnn_vad_weights.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+std::vector<float> GetScaledParams(rtc::ArrayView<const int8_t> params) {
+ std::vector<float> scaled_params(params.size());
+ std::transform(params.begin(), params.end(), scaled_params.begin(),
+ [](int8_t x) -> float {
+ return ::rnnoise::kWeightsScale * static_cast<float>(x);
+ });
+ return scaled_params;
+}
+
+// TODO(bugs.chromium.org/10480): Hard-code optimized layout and remove this
+// function to improve setup time.
+// Casts and scales `weights` and re-arranges the layout.
+std::vector<float> PreprocessWeights(rtc::ArrayView<const int8_t> weights,
+ int output_size) {
+ if (output_size == 1) {
+ return GetScaledParams(weights);
+ }
+ // Transpose, scale and cast.
+ const int input_size = rtc::CheckedDivExact(
+ rtc::dchecked_cast<int>(weights.size()), output_size);
+ std::vector<float> w(weights.size());
+ for (int o = 0; o < output_size; ++o) {
+ for (int i = 0; i < input_size; ++i) {
+ w[o * input_size + i] = rnnoise::kWeightsScale *
+ static_cast<float>(weights[i * output_size + o]);
+ }
+ }
+ return w;
+}
+
+rtc::FunctionView<float(float)> GetActivationFunction(
+ ActivationFunction activation_function) {
+ switch (activation_function) {
+ case ActivationFunction::kTansigApproximated:
+ return ::rnnoise::TansigApproximated;
+ case ActivationFunction::kSigmoidApproximated:
+ return ::rnnoise::SigmoidApproximated;
+ }
+}
+
+} // namespace
+
+FullyConnectedLayer::FullyConnectedLayer(
+ const int input_size,
+ const int output_size,
+ const rtc::ArrayView<const int8_t> bias,
+ const rtc::ArrayView<const int8_t> weights,
+ ActivationFunction activation_function,
+ const AvailableCpuFeatures& cpu_features,
+ absl::string_view layer_name)
+ : input_size_(input_size),
+ output_size_(output_size),
+ bias_(GetScaledParams(bias)),
+ weights_(PreprocessWeights(weights, output_size)),
+ vector_math_(cpu_features),
+ activation_function_(GetActivationFunction(activation_function)) {
+ RTC_DCHECK_LE(output_size_, kFullyConnectedLayerMaxUnits)
+ << "Insufficient FC layer over-allocation (" << layer_name << ").";
+ RTC_DCHECK_EQ(output_size_, bias_.size())
+ << "Mismatching output size and bias terms array size (" << layer_name
+ << ").";
+ RTC_DCHECK_EQ(input_size_ * output_size_, weights_.size())
+ << "Mismatching input-output size and weight coefficients array size ("
+ << layer_name << ").";
+}
+
+FullyConnectedLayer::~FullyConnectedLayer() = default;
+
+void FullyConnectedLayer::ComputeOutput(rtc::ArrayView<const float> input) {
+ RTC_DCHECK_EQ(input.size(), input_size_);
+ rtc::ArrayView<const float> weights(weights_);
+ for (int o = 0; o < output_size_; ++o) {
+ output_[o] = activation_function_(
+ bias_[o] + vector_math_.DotProduct(
+ input, weights.subview(o * input_size_, input_size_)));
+ }
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.h
new file mode 100644
index 0000000000..d23957a6f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_FC_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_FC_H_
+
+#include <array>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/function_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/vector_math.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Activation function for a neural network cell.
+enum class ActivationFunction { kTansigApproximated, kSigmoidApproximated };
+
+// Maximum number of units for an FC layer.
+constexpr int kFullyConnectedLayerMaxUnits = 24;
+
+// Fully-connected layer with a custom activation function which owns the output
+// buffer.
+class FullyConnectedLayer {
+ public:
+ // Ctor. `output_size` cannot be greater than `kFullyConnectedLayerMaxUnits`.
+ FullyConnectedLayer(int input_size,
+ int output_size,
+ rtc::ArrayView<const int8_t> bias,
+ rtc::ArrayView<const int8_t> weights,
+ ActivationFunction activation_function,
+ const AvailableCpuFeatures& cpu_features,
+ absl::string_view layer_name);
+ FullyConnectedLayer(const FullyConnectedLayer&) = delete;
+ FullyConnectedLayer& operator=(const FullyConnectedLayer&) = delete;
+ ~FullyConnectedLayer();
+
+ // Returns the size of the input vector.
+ int input_size() const { return input_size_; }
+ // Returns the pointer to the first element of the output buffer.
+ const float* data() const { return output_.data(); }
+ // Returns the size of the output buffer.
+ int size() const { return output_size_; }
+
+ // Computes the fully-connected layer output.
+ void ComputeOutput(rtc::ArrayView<const float> input);
+
+ private:
+ const int input_size_;
+ const int output_size_;
+ const std::vector<float> bias_;
+ const std::vector<float> weights_;
+ const VectorMath vector_math_;
+ rtc::FunctionView<float(float)> activation_function_;
+ // Over-allocated array with size equal to `output_size_`.
+ std::array<float, kFullyConnectedLayerMaxUnits> output_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_FC_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc_unittest.cc
new file mode 100644
index 0000000000..ff9bb18bc2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/rnn_fc.h"
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "modules/audio_processing/test/performance_timer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/system/arch.h"
+#include "test/gtest.h"
+#include "third_party/rnnoise/src/rnn_vad_weights.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+using ::rnnoise::kInputDenseBias;
+using ::rnnoise::kInputDenseWeights;
+using ::rnnoise::kInputLayerInputSize;
+using ::rnnoise::kInputLayerOutputSize;
+
+// Fully connected layer test data.
+constexpr std::array<float, 42> kFullyConnectedInputVector = {
+ -1.00131f, -0.627069f, -7.81097f, 7.86285f, -2.87145f, 3.32365f,
+ -0.653161f, 0.529839f, -0.425307f, 0.25583f, 0.235094f, 0.230527f,
+ -0.144687f, 0.182785f, 0.57102f, 0.125039f, 0.479482f, -0.0255439f,
+ -0.0073141f, -0.147346f, -0.217106f, -0.0846906f, -8.34943f, 3.09065f,
+ 1.42628f, -0.85235f, -0.220207f, -0.811163f, 2.09032f, -2.01425f,
+ -0.690268f, -0.925327f, -0.541354f, 0.58455f, -0.606726f, -0.0372358f,
+ 0.565991f, 0.435854f, 0.420812f, 0.162198f, -2.13f, 10.0089f};
+constexpr std::array<float, 24> kFullyConnectedExpectedOutput = {
+ -0.623293f, -0.988299f, 0.999378f, 0.967168f, 0.103087f, -0.978545f,
+ -0.856347f, 0.346675f, 1.f, -0.717442f, -0.544176f, 0.960363f,
+ 0.983443f, 0.999991f, -0.824335f, 0.984742f, 0.990208f, 0.938179f,
+ 0.875092f, 0.999846f, 0.997707f, -0.999382f, 0.973153f, -0.966605f};
+
+class RnnFcParametrization
+ : public ::testing::TestWithParam<AvailableCpuFeatures> {};
+
+// Checks that the output of a fully connected layer is within tolerance given
+// test input data.
+TEST_P(RnnFcParametrization, CheckFullyConnectedLayerOutput) {
+ FullyConnectedLayer fc(kInputLayerInputSize, kInputLayerOutputSize,
+ kInputDenseBias, kInputDenseWeights,
+ ActivationFunction::kTansigApproximated,
+ /*cpu_features=*/GetParam(),
+ /*layer_name=*/"FC");
+ fc.ComputeOutput(kFullyConnectedInputVector);
+ ExpectNearAbsolute(kFullyConnectedExpectedOutput, fc, 1e-5f);
+}
+
+TEST_P(RnnFcParametrization, DISABLED_BenchmarkFullyConnectedLayer) {
+ const AvailableCpuFeatures cpu_features = GetParam();
+ FullyConnectedLayer fc(kInputLayerInputSize, kInputLayerOutputSize,
+ kInputDenseBias, kInputDenseWeights,
+ ActivationFunction::kTansigApproximated, cpu_features,
+ /*layer_name=*/"FC");
+
+ constexpr int kNumTests = 10000;
+ ::webrtc::test::PerformanceTimer perf_timer(kNumTests);
+ for (int k = 0; k < kNumTests; ++k) {
+ perf_timer.StartTimer();
+ fc.ComputeOutput(kFullyConnectedInputVector);
+ perf_timer.StopTimer();
+ }
+ RTC_LOG(LS_INFO) << "CPU features: " << cpu_features.ToString() << " | "
+ << (perf_timer.GetDurationAverage() / 1000) << " +/- "
+ << (perf_timer.GetDurationStandardDeviation() / 1000)
+ << " ms";
+}
+
+// Finds the relevant CPU features combinations to test.
+std::vector<AvailableCpuFeatures> GetCpuFeaturesToTest() {
+ std::vector<AvailableCpuFeatures> v;
+ v.push_back(NoAvailableCpuFeatures());
+ AvailableCpuFeatures available = GetAvailableCpuFeatures();
+ if (available.sse2) {
+ v.push_back({/*sse2=*/true, /*avx2=*/false, /*neon=*/false});
+ }
+ if (available.avx2) {
+ v.push_back({/*sse2=*/false, /*avx2=*/true, /*neon=*/false});
+ }
+ if (available.neon) {
+ v.push_back({/*sse2=*/false, /*avx2=*/false, /*neon=*/true});
+ }
+ return v;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RnnVadTest,
+ RnnFcParametrization,
+ ::testing::ValuesIn(GetCpuFeaturesToTest()),
+ [](const ::testing::TestParamInfo<AvailableCpuFeatures>& info) {
+ return info.param.ToString();
+ });
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc
new file mode 100644
index 0000000000..ef37410caa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/rnn_gru.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "third_party/rnnoise/src/rnn_activations.h"
+#include "third_party/rnnoise/src/rnn_vad_weights.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int kNumGruGates = 3; // Update, reset, output.
+
+std::vector<float> PreprocessGruTensor(rtc::ArrayView<const int8_t> tensor_src,
+ int output_size) {
+ // Transpose, cast and scale.
+ // `n` is the size of the first dimension of the 3-dim tensor `weights`.
+ const int n = rtc::CheckedDivExact(rtc::dchecked_cast<int>(tensor_src.size()),
+ output_size * kNumGruGates);
+ const int stride_src = kNumGruGates * output_size;
+ const int stride_dst = n * output_size;
+ std::vector<float> tensor_dst(tensor_src.size());
+ for (int g = 0; g < kNumGruGates; ++g) {
+ for (int o = 0; o < output_size; ++o) {
+ for (int i = 0; i < n; ++i) {
+ tensor_dst[g * stride_dst + o * n + i] =
+ ::rnnoise::kWeightsScale *
+ static_cast<float>(
+ tensor_src[i * stride_src + g * output_size + o]);
+ }
+ }
+ }
+ return tensor_dst;
+}
+
+// Computes the output for the update or the reset gate.
+// Operation: `g = sigmoid(W^T∙i + R^T∙s + b)` where
+// - `g`: output gate vector
+// - `W`: weights matrix
+// - `i`: input vector
+// - `R`: recurrent weights matrix
+// - `s`: state gate vector
+// - `b`: bias vector
+void ComputeUpdateResetGate(int input_size,
+ int output_size,
+ const VectorMath& vector_math,
+ rtc::ArrayView<const float> input,
+ rtc::ArrayView<const float> state,
+ rtc::ArrayView<const float> bias,
+ rtc::ArrayView<const float> weights,
+ rtc::ArrayView<const float> recurrent_weights,
+ rtc::ArrayView<float> gate) {
+ RTC_DCHECK_EQ(input.size(), input_size);
+ RTC_DCHECK_EQ(state.size(), output_size);
+ RTC_DCHECK_EQ(bias.size(), output_size);
+ RTC_DCHECK_EQ(weights.size(), input_size * output_size);
+ RTC_DCHECK_EQ(recurrent_weights.size(), output_size * output_size);
+ RTC_DCHECK_GE(gate.size(), output_size); // `gate` is over-allocated.
+ for (int o = 0; o < output_size; ++o) {
+ float x = bias[o];
+ x += vector_math.DotProduct(input,
+ weights.subview(o * input_size, input_size));
+ x += vector_math.DotProduct(
+ state, recurrent_weights.subview(o * output_size, output_size));
+ gate[o] = ::rnnoise::SigmoidApproximated(x);
+ }
+}
+
+// Computes the output for the state gate.
+// Operation: `s' = u .* s + (1 - u) .* ReLU(W^T∙i + R^T∙(s .* r) + b)` where
+// - `s'`: output state gate vector
+// - `s`: previous state gate vector
+// - `u`: update gate vector
+// - `W`: weights matrix
+// - `i`: input vector
+// - `R`: recurrent weights matrix
+// - `r`: reset gate vector
+// - `b`: bias vector
+// - `.*` element-wise product
+void ComputeStateGate(int input_size,
+ int output_size,
+ const VectorMath& vector_math,
+ rtc::ArrayView<const float> input,
+ rtc::ArrayView<const float> update,
+ rtc::ArrayView<const float> reset,
+ rtc::ArrayView<const float> bias,
+ rtc::ArrayView<const float> weights,
+ rtc::ArrayView<const float> recurrent_weights,
+ rtc::ArrayView<float> state) {
+ RTC_DCHECK_EQ(input.size(), input_size);
+ RTC_DCHECK_GE(update.size(), output_size); // `update` is over-allocated.
+ RTC_DCHECK_GE(reset.size(), output_size); // `reset` is over-allocated.
+ RTC_DCHECK_EQ(bias.size(), output_size);
+ RTC_DCHECK_EQ(weights.size(), input_size * output_size);
+ RTC_DCHECK_EQ(recurrent_weights.size(), output_size * output_size);
+ RTC_DCHECK_EQ(state.size(), output_size);
+ std::array<float, kGruLayerMaxUnits> reset_x_state;
+ for (int o = 0; o < output_size; ++o) {
+ reset_x_state[o] = state[o] * reset[o];
+ }
+ for (int o = 0; o < output_size; ++o) {
+ float x = bias[o];
+ x += vector_math.DotProduct(input,
+ weights.subview(o * input_size, input_size));
+ x += vector_math.DotProduct(
+ {reset_x_state.data(), static_cast<size_t>(output_size)},
+ recurrent_weights.subview(o * output_size, output_size));
+ state[o] = update[o] * state[o] + (1.f - update[o]) * std::max(0.f, x);
+ }
+}
+
+} // namespace
+
+GatedRecurrentLayer::GatedRecurrentLayer(
+ const int input_size,
+ const int output_size,
+ const rtc::ArrayView<const int8_t> bias,
+ const rtc::ArrayView<const int8_t> weights,
+ const rtc::ArrayView<const int8_t> recurrent_weights,
+ const AvailableCpuFeatures& cpu_features,
+ absl::string_view layer_name)
+ : input_size_(input_size),
+ output_size_(output_size),
+ bias_(PreprocessGruTensor(bias, output_size)),
+ weights_(PreprocessGruTensor(weights, output_size)),
+ recurrent_weights_(PreprocessGruTensor(recurrent_weights, output_size)),
+ vector_math_(cpu_features) {
+ RTC_DCHECK_LE(output_size_, kGruLayerMaxUnits)
+ << "Insufficient GRU layer over-allocation (" << layer_name << ").";
+ RTC_DCHECK_EQ(kNumGruGates * output_size_, bias_.size())
+ << "Mismatching output size and bias terms array size (" << layer_name
+ << ").";
+ RTC_DCHECK_EQ(kNumGruGates * input_size_ * output_size_, weights_.size())
+ << "Mismatching input-output size and weight coefficients array size ("
+ << layer_name << ").";
+ RTC_DCHECK_EQ(kNumGruGates * output_size_ * output_size_,
+ recurrent_weights_.size())
+ << "Mismatching input-output size and recurrent weight coefficients array"
+ " size ("
+ << layer_name << ").";
+ Reset();
+}
+
+GatedRecurrentLayer::~GatedRecurrentLayer() = default;
+
+void GatedRecurrentLayer::Reset() {
+ state_.fill(0.f);
+}
+
+void GatedRecurrentLayer::ComputeOutput(rtc::ArrayView<const float> input) {
+ RTC_DCHECK_EQ(input.size(), input_size_);
+
+ // The tensors below are organized as a sequence of flattened tensors for the
+ // `update`, `reset` and `state` gates.
+ rtc::ArrayView<const float> bias(bias_);
+ rtc::ArrayView<const float> weights(weights_);
+ rtc::ArrayView<const float> recurrent_weights(recurrent_weights_);
+ // Strides to access to the flattened tensors for a specific gate.
+ const int stride_weights = input_size_ * output_size_;
+ const int stride_recurrent_weights = output_size_ * output_size_;
+
+ rtc::ArrayView<float> state(state_.data(), output_size_);
+
+ // Update gate.
+ std::array<float, kGruLayerMaxUnits> update;
+ ComputeUpdateResetGate(
+ input_size_, output_size_, vector_math_, input, state,
+ bias.subview(0, output_size_), weights.subview(0, stride_weights),
+ recurrent_weights.subview(0, stride_recurrent_weights), update);
+ // Reset gate.
+ std::array<float, kGruLayerMaxUnits> reset;
+ ComputeUpdateResetGate(input_size_, output_size_, vector_math_, input, state,
+ bias.subview(output_size_, output_size_),
+ weights.subview(stride_weights, stride_weights),
+ recurrent_weights.subview(stride_recurrent_weights,
+ stride_recurrent_weights),
+ reset);
+ // State gate.
+ ComputeStateGate(input_size_, output_size_, vector_math_, input, update,
+ reset, bias.subview(2 * output_size_, output_size_),
+ weights.subview(2 * stride_weights, stride_weights),
+ recurrent_weights.subview(2 * stride_recurrent_weights,
+ stride_recurrent_weights),
+ state);
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.h
new file mode 100644
index 0000000000..3407dfcdf1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_GRU_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_GRU_H_
+
+#include <array>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/vector_math.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Maximum number of units for a GRU layer.
+constexpr int kGruLayerMaxUnits = 24;
+
+// Recurrent layer with gated recurrent units (GRUs) with sigmoid and ReLU as
+// activation functions for the update/reset and output gates respectively.
+class GatedRecurrentLayer {
+ public:
+ // Ctor. `output_size` cannot be greater than `kGruLayerMaxUnits`.
+ GatedRecurrentLayer(int input_size,
+ int output_size,
+ rtc::ArrayView<const int8_t> bias,
+ rtc::ArrayView<const int8_t> weights,
+ rtc::ArrayView<const int8_t> recurrent_weights,
+ const AvailableCpuFeatures& cpu_features,
+ absl::string_view layer_name);
+ GatedRecurrentLayer(const GatedRecurrentLayer&) = delete;
+ GatedRecurrentLayer& operator=(const GatedRecurrentLayer&) = delete;
+ ~GatedRecurrentLayer();
+
+ // Returns the size of the input vector.
+ int input_size() const { return input_size_; }
+ // Returns the pointer to the first element of the output buffer.
+ const float* data() const { return state_.data(); }
+ // Returns the size of the output buffer.
+ int size() const { return output_size_; }
+
+ // Resets the GRU state.
+ void Reset();
+ // Computes the recurrent layer output and updates the status.
+ void ComputeOutput(rtc::ArrayView<const float> input);
+
+ private:
+ const int input_size_;
+ const int output_size_;
+ const std::vector<float> bias_;
+ const std::vector<float> weights_;
+ const std::vector<float> recurrent_weights_;
+ const VectorMath vector_math_;
+ // Over-allocated array with size equal to `output_size_`.
+ std::array<float, kGruLayerMaxUnits> state_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_RNN_GRU_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru_unittest.cc
new file mode 100644
index 0000000000..88ae72803a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru_unittest.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/rnn_gru.h"
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "modules/audio_processing/test/performance_timer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "test/gtest.h"
+#include "third_party/rnnoise/src/rnn_vad_weights.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+void TestGatedRecurrentLayer(
+ GatedRecurrentLayer& gru,
+ rtc::ArrayView<const float> input_sequence,
+ rtc::ArrayView<const float> expected_output_sequence) {
+ const int input_sequence_length = rtc::CheckedDivExact(
+ rtc::dchecked_cast<int>(input_sequence.size()), gru.input_size());
+ const int output_sequence_length = rtc::CheckedDivExact(
+ rtc::dchecked_cast<int>(expected_output_sequence.size()), gru.size());
+ ASSERT_EQ(input_sequence_length, output_sequence_length)
+ << "The test data length is invalid.";
+ // Feed the GRU layer and check the output at every step.
+ gru.Reset();
+ for (int i = 0; i < input_sequence_length; ++i) {
+ SCOPED_TRACE(i);
+ gru.ComputeOutput(
+ input_sequence.subview(i * gru.input_size(), gru.input_size()));
+ const auto expected_output =
+ expected_output_sequence.subview(i * gru.size(), gru.size());
+ ExpectNearAbsolute(expected_output, gru, 3e-6f);
+ }
+}
+
+// Gated recurrent units layer test data.
+constexpr int kGruInputSize = 5;
+constexpr int kGruOutputSize = 4;
+constexpr std::array<int8_t, 12> kGruBias = {96, -99, -81, -114, 49, 119,
+ -118, 68, -76, 91, 121, 125};
+constexpr std::array<int8_t, 60> kGruWeights = {
+ // Input 0.
+ 124, 9, 1, 116, // Update.
+ -66, -21, -118, -110, // Reset.
+ 104, 75, -23, -51, // Output.
+ // Input 1.
+ -72, -111, 47, 93, // Update.
+ 77, -98, 41, -8, // Reset.
+ 40, -23, -43, -107, // Output.
+ // Input 2.
+ 9, -73, 30, -32, // Update.
+ -2, 64, -26, 91, // Reset.
+ -48, -24, -28, -104, // Output.
+ // Input 3.
+ 74, -46, 116, 15, // Update.
+ 32, 52, -126, -38, // Reset.
+ -121, 12, -16, 110, // Output.
+ // Input 4.
+ -95, 66, -103, -35, // Update.
+ -38, 3, -126, -61, // Reset.
+ 28, 98, -117, -43 // Output.
+};
+constexpr std::array<int8_t, 48> kGruRecurrentWeights = {
+ // Output 0.
+ -3, 87, 50, 51, // Update.
+ -22, 27, -39, 62, // Reset.
+ 31, -83, -52, -48, // Output.
+ // Output 1.
+ -6, 83, -19, 104, // Update.
+ 105, 48, 23, 68, // Reset.
+ 23, 40, 7, -120, // Output.
+ // Output 2.
+ 64, -62, 117, 85, // Update.
+ 51, -43, 54, -105, // Reset.
+ 120, 56, -128, -107, // Output.
+ // Output 3.
+ 39, 50, -17, -47, // Update.
+ -117, 14, 108, 12, // Reset.
+ -7, -72, 103, -87, // Output.
+};
+constexpr std::array<float, 20> kGruInputSequence = {
+ 0.89395463f, 0.93224651f, 0.55788344f, 0.32341808f, 0.93355054f,
+ 0.13475326f, 0.97370994f, 0.14253306f, 0.93710381f, 0.76093364f,
+ 0.65780413f, 0.41657975f, 0.49403164f, 0.46843281f, 0.75138855f,
+ 0.24517593f, 0.47657707f, 0.57064998f, 0.435184f, 0.19319285f};
+constexpr std::array<float, 16> kGruExpectedOutputSequence = {
+ 0.0239123f, 0.5773077f, 0.f, 0.f,
+ 0.01282811f, 0.64330572f, 0.f, 0.04863098f,
+ 0.00781069f, 0.75267816f, 0.f, 0.02579715f,
+ 0.00471378f, 0.59162533f, 0.11087593f, 0.01334511f};
+
+class RnnGruParametrization
+ : public ::testing::TestWithParam<AvailableCpuFeatures> {};
+
+// Checks that the output of a GRU layer is within tolerance given test input
+// data.
+TEST_P(RnnGruParametrization, CheckGatedRecurrentLayer) {
+ GatedRecurrentLayer gru(kGruInputSize, kGruOutputSize, kGruBias, kGruWeights,
+ kGruRecurrentWeights,
+ /*cpu_features=*/GetParam(),
+ /*layer_name=*/"GRU");
+ TestGatedRecurrentLayer(gru, kGruInputSequence, kGruExpectedOutputSequence);
+}
+
+TEST_P(RnnGruParametrization, DISABLED_BenchmarkGatedRecurrentLayer) {
+ // Prefetch test data.
+ std::unique_ptr<FileReader> reader = CreateGruInputReader();
+ std::vector<float> gru_input_sequence(reader->size());
+ reader->ReadChunk(gru_input_sequence);
+
+ using ::rnnoise::kHiddenGruBias;
+ using ::rnnoise::kHiddenGruRecurrentWeights;
+ using ::rnnoise::kHiddenGruWeights;
+ using ::rnnoise::kHiddenLayerOutputSize;
+ using ::rnnoise::kInputLayerOutputSize;
+
+ GatedRecurrentLayer gru(kInputLayerOutputSize, kHiddenLayerOutputSize,
+ kHiddenGruBias, kHiddenGruWeights,
+ kHiddenGruRecurrentWeights,
+ /*cpu_features=*/GetParam(),
+ /*layer_name=*/"GRU");
+
+ rtc::ArrayView<const float> input_sequence(gru_input_sequence);
+ ASSERT_EQ(input_sequence.size() % kInputLayerOutputSize,
+ static_cast<size_t>(0));
+ const int input_sequence_length =
+ input_sequence.size() / kInputLayerOutputSize;
+
+ constexpr int kNumTests = 100;
+ ::webrtc::test::PerformanceTimer perf_timer(kNumTests);
+ for (int k = 0; k < kNumTests; ++k) {
+ perf_timer.StartTimer();
+ for (int i = 0; i < input_sequence_length; ++i) {
+ gru.ComputeOutput(
+ input_sequence.subview(i * gru.input_size(), gru.input_size()));
+ }
+ perf_timer.StopTimer();
+ }
+ RTC_LOG(LS_INFO) << (perf_timer.GetDurationAverage() / 1000) << " +/- "
+ << (perf_timer.GetDurationStandardDeviation() / 1000)
+ << " ms";
+}
+
+// Finds the relevant CPU features combinations to test.
+std::vector<AvailableCpuFeatures> GetCpuFeaturesToTest() {
+ std::vector<AvailableCpuFeatures> v;
+ v.push_back(NoAvailableCpuFeatures());
+ AvailableCpuFeatures available = GetAvailableCpuFeatures();
+ if (available.sse2) {
+ v.push_back({/*sse2=*/true, /*avx2=*/false, /*neon=*/false});
+ }
+ if (available.avx2) {
+ v.push_back({/*sse2=*/false, /*avx2=*/true, /*neon=*/false});
+ }
+ if (available.neon) {
+ v.push_back({/*sse2=*/false, /*avx2=*/false, /*neon=*/true});
+ }
+ return v;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RnnVadTest,
+ RnnGruParametrization,
+ ::testing::ValuesIn(GetCpuFeaturesToTest()),
+ [](const ::testing::TestParamInfo<AvailableCpuFeatures>& info) {
+ return info.param.ToString();
+ });
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_unittest.cc
new file mode 100644
index 0000000000..4c5409a14e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_unittest.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/rnn.h"
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr std::array<float, kFeatureVectorSize> kFeatures = {
+ -1.00131f, -0.627069f, -7.81097f, 7.86285f, -2.87145f, 3.32365f,
+ -0.653161f, 0.529839f, -0.425307f, 0.25583f, 0.235094f, 0.230527f,
+ -0.144687f, 0.182785f, 0.57102f, 0.125039f, 0.479482f, -0.0255439f,
+ -0.0073141f, -0.147346f, -0.217106f, -0.0846906f, -8.34943f, 3.09065f,
+ 1.42628f, -0.85235f, -0.220207f, -0.811163f, 2.09032f, -2.01425f,
+ -0.690268f, -0.925327f, -0.541354f, 0.58455f, -0.606726f, -0.0372358f,
+ 0.565991f, 0.435854f, 0.420812f, 0.162198f, -2.13f, 10.0089f};
+
+void WarmUpRnnVad(RnnVad& rnn_vad) {
+ for (int i = 0; i < 10; ++i) {
+ rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/false);
+ }
+}
+
+// Checks that the speech probability is zero with silence.
+TEST(RnnVadTest, CheckZeroProbabilityWithSilence) {
+ RnnVad rnn_vad(GetAvailableCpuFeatures());
+ WarmUpRnnVad(rnn_vad);
+ EXPECT_EQ(rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/true), 0.f);
+}
+
+// Checks that the same output is produced after reset given the same input
+// sequence.
+TEST(RnnVadTest, CheckRnnVadReset) {
+ RnnVad rnn_vad(GetAvailableCpuFeatures());
+ WarmUpRnnVad(rnn_vad);
+ float pre = rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/false);
+ rnn_vad.Reset();
+ WarmUpRnnVad(rnn_vad);
+ float post = rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/false);
+ EXPECT_EQ(pre, post);
+}
+
+// Checks that the same output is produced after silence is observed given the
+// same input sequence.
+TEST(RnnVadTest, CheckRnnVadSilence) {
+ RnnVad rnn_vad(GetAvailableCpuFeatures());
+ WarmUpRnnVad(rnn_vad);
+ float pre = rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/false);
+ rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/true);
+ WarmUpRnnVad(rnn_vad);
+ float post = rnn_vad.ComputeVadProbability(kFeatures, /*is_silence=*/false);
+ EXPECT_EQ(pre, post);
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_auto_correlation_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_auto_correlation_gn/moz.build
new file mode 100644
index 0000000000..1e156ad957
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_auto_correlation_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_auto_correlation_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_common_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_common_gn/moz.build
new file mode 100644
index 0000000000..3bb95f7979
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_common_gn/moz.build
@@ -0,0 +1,204 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_common_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_gn/moz.build
new file mode 100644
index 0000000000..26fa033b16
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/features_extraction.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_layers_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_layers_gn/moz.build
new file mode 100644
index 0000000000..9bac4ab5e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_layers_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_layers_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_lp_residual_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_lp_residual_gn/moz.build
new file mode 100644
index 0000000000..19e87379fa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_lp_residual_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/lp_residual.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_lp_residual_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_pitch_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_pitch_gn/moz.build
new file mode 100644
index 0000000000..4d64842117
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_pitch_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_pitch_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_ring_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_ring_buffer_gn/moz.build
new file mode 100644
index 0000000000..cc26a37594
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_ring_buffer_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_ring_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_sequence_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_sequence_buffer_gn/moz.build
new file mode 100644
index 0000000000..874bdcaab7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_sequence_buffer_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_sequence_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_spectral_features_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_spectral_features_gn/moz.build
new file mode 100644
index 0000000000..1cdba1b497
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_spectral_features_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.cc",
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_spectral_features_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_symmetric_matrix_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_symmetric_matrix_buffer_gn/moz.build
new file mode 100644
index 0000000000..e92fb28f27
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_symmetric_matrix_buffer_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rnn_vad_symmetric_matrix_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_tool.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_tool.cc
new file mode 100644
index 0000000000..a0e1242eb4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_tool.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h"
+#include "modules/audio_processing/agc2/rnn_vad/rnn.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+ABSL_FLAG(std::string, i, "", "Path to the input wav file");
+ABSL_FLAG(std::string, f, "", "Path to the output features file");
+ABSL_FLAG(std::string, o, "", "Path to the output VAD probabilities file");
+
+namespace webrtc {
+namespace rnn_vad {
+namespace test {
+
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+ rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+
+ // Open wav input file and check properties.
+ const std::string input_wav_file = absl::GetFlag(FLAGS_i);
+ WavReader wav_reader(input_wav_file);
+ if (wav_reader.num_channels() != 1) {
+ RTC_LOG(LS_ERROR) << "Only mono wav files are supported";
+ return 1;
+ }
+ if (wav_reader.sample_rate() % 100 != 0) {
+ RTC_LOG(LS_ERROR) << "The sample rate rate must allow 10 ms frames.";
+ return 1;
+ }
+ RTC_LOG(LS_INFO) << "Input sample rate: " << wav_reader.sample_rate();
+
+ // Init output files.
+ const std::string output_vad_probs_file = absl::GetFlag(FLAGS_o);
+ FILE* vad_probs_file = fopen(output_vad_probs_file.c_str(), "wb");
+ FILE* features_file = nullptr;
+ const std::string output_feature_file = absl::GetFlag(FLAGS_f);
+ if (!output_feature_file.empty()) {
+ features_file = fopen(output_feature_file.c_str(), "wb");
+ }
+
+ // Initialize.
+ const int frame_size_10ms =
+ rtc::CheckedDivExact(wav_reader.sample_rate(), 100);
+ std::vector<float> samples_10ms;
+ samples_10ms.resize(frame_size_10ms);
+ std::array<float, kFrameSize10ms24kHz> samples_10ms_24kHz;
+ PushSincResampler resampler(frame_size_10ms, kFrameSize10ms24kHz);
+ const AvailableCpuFeatures cpu_features = GetAvailableCpuFeatures();
+ FeaturesExtractor features_extractor(cpu_features);
+ std::array<float, kFeatureVectorSize> feature_vector;
+ RnnVad rnn_vad(cpu_features);
+
+ // Compute VAD probabilities.
+ while (true) {
+ // Read frame at the input sample rate.
+ const size_t read_samples =
+ wav_reader.ReadSamples(frame_size_10ms, samples_10ms.data());
+ if (rtc::SafeLt(read_samples, frame_size_10ms)) {
+ break; // EOF.
+ }
+ // Resample input.
+ resampler.Resample(samples_10ms.data(), samples_10ms.size(),
+ samples_10ms_24kHz.data(), samples_10ms_24kHz.size());
+
+ // Extract features and feed the RNN.
+ bool is_silence = features_extractor.CheckSilenceComputeFeatures(
+ samples_10ms_24kHz, feature_vector);
+ float vad_probability =
+ rnn_vad.ComputeVadProbability(feature_vector, is_silence);
+ // Write voice probability.
+ RTC_DCHECK_GE(vad_probability, 0.f);
+ RTC_DCHECK_GE(1.f, vad_probability);
+ fwrite(&vad_probability, sizeof(float), 1, vad_probs_file);
+ // Write features.
+ if (features_file) {
+ const float float_is_silence = is_silence ? 1.f : 0.f;
+ fwrite(&float_is_silence, sizeof(float), 1, features_file);
+ if (is_silence) {
+ // Do not write uninitialized values.
+ feature_vector.fill(0.f);
+ }
+ fwrite(feature_vector.data(), sizeof(float), kFeatureVectorSize,
+ features_file);
+ }
+ }
+
+ // Close output file(s).
+ fclose(vad_probs_file);
+ RTC_LOG(LS_INFO) << "VAD probabilities written to " << output_vad_probs_file;
+ if (features_file) {
+ fclose(features_file);
+ RTC_LOG(LS_INFO) << "features written to " << output_feature_file;
+ }
+
+ return 0;
+}
+
+} // namespace test
+} // namespace rnn_vad
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::rnn_vad::test::main(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc
new file mode 100644
index 0000000000..f33cd14a8a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h"
+#include "modules/audio_processing/agc2/rnn_vad/rnn.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "modules/audio_processing/test/performance_timer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "test/gtest.h"
+#include "third_party/rnnoise/src/rnn_activations.h"
+#include "third_party/rnnoise/src/rnn_vad_weights.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int kFrameSize10ms48kHz = 480;
+
+void DumpPerfStats(int num_samples,
+ int sample_rate,
+ double average_us,
+ double standard_deviation) {
+ float audio_track_length_ms =
+ 1e3f * static_cast<float>(num_samples) / static_cast<float>(sample_rate);
+ float average_ms = static_cast<float>(average_us) / 1e3f;
+ float speed = audio_track_length_ms / average_ms;
+ RTC_LOG(LS_INFO) << "track duration (ms): " << audio_track_length_ms;
+ RTC_LOG(LS_INFO) << "average processing time (ms): " << average_ms << " +/- "
+ << (standard_deviation / 1e3);
+ RTC_LOG(LS_INFO) << "speed: " << speed << "x";
+}
+
+// When the RNN VAD model is updated and the expected output changes, set the
+// constant below to true in order to write new expected output binary files.
+constexpr bool kWriteComputedOutputToFile = false;
+
+// Avoids that one forgets to set `kWriteComputedOutputToFile` back to false
+// when the expected output files are re-exported.
+TEST(RnnVadTest, CheckWriteComputedOutputIsFalse) {
+ ASSERT_FALSE(kWriteComputedOutputToFile)
+ << "Cannot land if kWriteComputedOutput is true.";
+}
+
+class RnnVadProbabilityParametrization
+ : public ::testing::TestWithParam<AvailableCpuFeatures> {};
+
+// Checks that the computed VAD probability for a test input sequence sampled at
+// 48 kHz is within tolerance.
+TEST_P(RnnVadProbabilityParametrization, RnnVadProbabilityWithinTolerance) {
+ // Init resampler, feature extractor and RNN.
+ PushSincResampler decimator(kFrameSize10ms48kHz, kFrameSize10ms24kHz);
+ const AvailableCpuFeatures cpu_features = GetParam();
+ FeaturesExtractor features_extractor(cpu_features);
+ RnnVad rnn_vad(cpu_features);
+
+ // Init input samples and expected output readers.
+ std::unique_ptr<FileReader> samples_reader = CreatePcmSamplesReader();
+ std::unique_ptr<FileReader> expected_vad_prob_reader = CreateVadProbsReader();
+
+ // Input length. The last incomplete frame is ignored.
+ const int num_frames = samples_reader->size() / kFrameSize10ms48kHz;
+
+ // Init buffers.
+ std::vector<float> samples_48k(kFrameSize10ms48kHz);
+ std::vector<float> samples_24k(kFrameSize10ms24kHz);
+ std::vector<float> feature_vector(kFeatureVectorSize);
+ std::vector<float> computed_vad_prob(num_frames);
+ std::vector<float> expected_vad_prob(num_frames);
+
+ // Read expected output.
+ ASSERT_TRUE(expected_vad_prob_reader->ReadChunk(expected_vad_prob));
+
+ // Compute VAD probabilities on the downsampled input.
+ float cumulative_error = 0.f;
+ for (int i = 0; i < num_frames; ++i) {
+ ASSERT_TRUE(samples_reader->ReadChunk(samples_48k));
+ decimator.Resample(samples_48k.data(), samples_48k.size(),
+ samples_24k.data(), samples_24k.size());
+ bool is_silence = features_extractor.CheckSilenceComputeFeatures(
+ {samples_24k.data(), kFrameSize10ms24kHz},
+ {feature_vector.data(), kFeatureVectorSize});
+ computed_vad_prob[i] = rnn_vad.ComputeVadProbability(
+ {feature_vector.data(), kFeatureVectorSize}, is_silence);
+ EXPECT_NEAR(computed_vad_prob[i], expected_vad_prob[i], 1e-3f);
+ cumulative_error += std::abs(computed_vad_prob[i] - expected_vad_prob[i]);
+ }
+ // Check average error.
+ EXPECT_LT(cumulative_error / num_frames, 1e-4f);
+
+ if (kWriteComputedOutputToFile) {
+ FileWriter vad_prob_writer("new_vad_prob.dat");
+ vad_prob_writer.WriteChunk(computed_vad_prob);
+ }
+}
+
+// Performance test for the RNN VAD (pre-fetching and downsampling are
+// excluded). Keep disabled and only enable locally to measure performance as
+// follows:
+// - on desktop: run the this unit test adding "--logs";
+// - on android: run the this unit test adding "--logcat-output-file".
+TEST_P(RnnVadProbabilityParametrization, DISABLED_RnnVadPerformance) {
+ // PCM samples reader and buffers.
+ std::unique_ptr<FileReader> samples_reader = CreatePcmSamplesReader();
+ // The last incomplete frame is ignored.
+ const int num_frames = samples_reader->size() / kFrameSize10ms48kHz;
+ std::array<float, kFrameSize10ms48kHz> samples;
+ // Pre-fetch and decimate samples.
+ PushSincResampler decimator(kFrameSize10ms48kHz, kFrameSize10ms24kHz);
+ std::vector<float> prefetched_decimated_samples;
+ prefetched_decimated_samples.resize(num_frames * kFrameSize10ms24kHz);
+ for (int i = 0; i < num_frames; ++i) {
+ ASSERT_TRUE(samples_reader->ReadChunk(samples));
+ decimator.Resample(samples.data(), samples.size(),
+ &prefetched_decimated_samples[i * kFrameSize10ms24kHz],
+ kFrameSize10ms24kHz);
+ }
+ // Initialize.
+ const AvailableCpuFeatures cpu_features = GetParam();
+ FeaturesExtractor features_extractor(cpu_features);
+ std::array<float, kFeatureVectorSize> feature_vector;
+ RnnVad rnn_vad(cpu_features);
+ constexpr int number_of_tests = 100;
+ ::webrtc::test::PerformanceTimer perf_timer(number_of_tests);
+ for (int k = 0; k < number_of_tests; ++k) {
+ features_extractor.Reset();
+ rnn_vad.Reset();
+ // Process frames.
+ perf_timer.StartTimer();
+ for (int i = 0; i < num_frames; ++i) {
+ bool is_silence = features_extractor.CheckSilenceComputeFeatures(
+ {&prefetched_decimated_samples[i * kFrameSize10ms24kHz],
+ kFrameSize10ms24kHz},
+ feature_vector);
+ rnn_vad.ComputeVadProbability(feature_vector, is_silence);
+ }
+ perf_timer.StopTimer();
+ }
+ DumpPerfStats(num_frames * kFrameSize10ms24kHz, kSampleRate24kHz,
+ perf_timer.GetDurationAverage(),
+ perf_timer.GetDurationStandardDeviation());
+}
+
+// Finds the relevant CPU features combinations to test.
+std::vector<AvailableCpuFeatures> GetCpuFeaturesToTest() {
+ std::vector<AvailableCpuFeatures> v;
+ v.push_back(NoAvailableCpuFeatures());
+ AvailableCpuFeatures available = GetAvailableCpuFeatures();
+ if (available.avx2 && available.sse2) {
+ v.push_back({/*sse2=*/true, /*avx2=*/true, /*neon=*/false});
+ }
+ if (available.sse2) {
+ v.push_back({/*sse2=*/true, /*avx2=*/false, /*neon=*/false});
+ }
+ if (available.neon) {
+ v.push_back({/*sse2=*/false, /*avx2=*/false, /*neon=*/true});
+ }
+ return v;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RnnVadTest,
+ RnnVadProbabilityParametrization,
+ ::testing::ValuesIn(GetCpuFeaturesToTest()),
+ [](const ::testing::TestParamInfo<AvailableCpuFeatures>& info) {
+ return info.param.ToString();
+ });
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer.h
new file mode 100644
index 0000000000..a7402788c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SEQUENCE_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SEQUENCE_BUFFER_H_
+
+#include <algorithm>
+#include <cstring>
+#include <type_traits>
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Linear buffer implementation to (i) push fixed size chunks of sequential data
+// and (ii) view contiguous parts of the buffer. The buffer and the pushed
+// chunks have size S and N respectively. For instance, when S = 2N the first
+// half of the sequence buffer is replaced with its second half, and the new N
+// values are written at the end of the buffer.
+// The class also provides a view on the most recent M values, where 0 < M <= S
+// and by default M = N.
+template <typename T, int S, int N, int M = N>
+class SequenceBuffer {
+ static_assert(N <= S,
+ "The new chunk size cannot be larger than the sequence buffer "
+ "size.");
+ static_assert(std::is_arithmetic<T>::value,
+ "Integral or floating point required.");
+
+ public:
+ SequenceBuffer() : buffer_(S) {
+ RTC_DCHECK_EQ(S, buffer_.size());
+ Reset();
+ }
+ SequenceBuffer(const SequenceBuffer&) = delete;
+ SequenceBuffer& operator=(const SequenceBuffer&) = delete;
+ ~SequenceBuffer() = default;
+ int size() const { return S; }
+ int chunks_size() const { return N; }
+ // Sets the sequence buffer values to zero.
+ void Reset() { std::fill(buffer_.begin(), buffer_.end(), 0); }
+ // Returns a view on the whole buffer.
+ rtc::ArrayView<const T, S> GetBufferView() const {
+ return {buffer_.data(), S};
+ }
+ // Returns a view on the M most recent values of the buffer.
+ rtc::ArrayView<const T, M> GetMostRecentValuesView() const {
+ static_assert(M <= S,
+ "The number of most recent values cannot be larger than the "
+ "sequence buffer size.");
+ return {buffer_.data() + S - M, M};
+ }
+ // Shifts left the buffer by N items and add new N items at the end.
+ void Push(rtc::ArrayView<const T, N> new_values) {
+ // Make space for the new values.
+ if (S > N)
+ std::memmove(buffer_.data(), buffer_.data() + N, (S - N) * sizeof(T));
+ // Copy the new values at the end of the buffer.
+ std::memcpy(buffer_.data() + S - N, new_values.data(), N * sizeof(T));
+ }
+
+ private:
+ std::vector<T> buffer_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SEQUENCE_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc
new file mode 100644
index 0000000000..af005833c1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/sequence_buffer.h"
+
+#include <algorithm>
+#include <array>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+template <typename T, int S, int N>
+void TestSequenceBufferPushOp() {
+ SCOPED_TRACE(S);
+ SCOPED_TRACE(N);
+ SequenceBuffer<T, S, N> seq_buf;
+ auto seq_buf_view = seq_buf.GetBufferView();
+ std::array<T, N> chunk;
+
+ // Check that a chunk is fully gone after ceil(S / N) push ops.
+ chunk.fill(1);
+ seq_buf.Push(chunk);
+ chunk.fill(0);
+ constexpr int required_push_ops = (S % N) ? S / N + 1 : S / N;
+ for (int i = 0; i < required_push_ops - 1; ++i) {
+ SCOPED_TRACE(i);
+ seq_buf.Push(chunk);
+ // Still in the buffer.
+ const auto* m = std::max_element(seq_buf_view.begin(), seq_buf_view.end());
+ EXPECT_EQ(1, *m);
+ }
+ // Gone after another push.
+ seq_buf.Push(chunk);
+ const auto* m = std::max_element(seq_buf_view.begin(), seq_buf_view.end());
+ EXPECT_EQ(0, *m);
+
+ // Check that the last item moves left by N positions after a push op.
+ if (S > N) {
+ // Fill in with non-zero values.
+ for (int i = 0; i < N; ++i)
+ chunk[i] = static_cast<T>(i + 1);
+ seq_buf.Push(chunk);
+ // With the next Push(), `last` will be moved left by N positions.
+ const T last = chunk[N - 1];
+ for (int i = 0; i < N; ++i)
+ chunk[i] = static_cast<T>(last + i + 1);
+ seq_buf.Push(chunk);
+ EXPECT_EQ(last, seq_buf_view[S - N - 1]);
+ }
+}
+
+TEST(RnnVadTest, SequenceBufferGetters) {
+ constexpr int buffer_size = 8;
+ constexpr int chunk_size = 8;
+ SequenceBuffer<int, buffer_size, chunk_size> seq_buf;
+ EXPECT_EQ(buffer_size, seq_buf.size());
+ EXPECT_EQ(chunk_size, seq_buf.chunks_size());
+ // Test view.
+ auto seq_buf_view = seq_buf.GetBufferView();
+ EXPECT_EQ(0, seq_buf_view[0]);
+ EXPECT_EQ(0, seq_buf_view[seq_buf_view.size() - 1]);
+ constexpr std::array<int, chunk_size> chunk = {10, 20, 30, 40,
+ 50, 60, 70, 80};
+ seq_buf.Push(chunk);
+ EXPECT_EQ(10, *seq_buf_view.begin());
+ EXPECT_EQ(80, *(seq_buf_view.end() - 1));
+}
+
+TEST(RnnVadTest, SequenceBufferPushOpsUnsigned) {
+ TestSequenceBufferPushOp<uint8_t, 32, 8>(); // Chunk size: 25%.
+ TestSequenceBufferPushOp<uint8_t, 32, 16>(); // Chunk size: 50%.
+ TestSequenceBufferPushOp<uint8_t, 32, 32>(); // Chunk size: 100%.
+ TestSequenceBufferPushOp<uint8_t, 23, 7>(); // Non-integer ratio.
+}
+
+TEST(RnnVadTest, SequenceBufferPushOpsSigned) {
+ TestSequenceBufferPushOp<int, 32, 8>(); // Chunk size: 25%.
+ TestSequenceBufferPushOp<int, 32, 16>(); // Chunk size: 50%.
+ TestSequenceBufferPushOp<int, 32, 32>(); // Chunk size: 100%.
+ TestSequenceBufferPushOp<int, 23, 7>(); // Non-integer ratio.
+}
+
+TEST(RnnVadTest, SequenceBufferPushOpsFloating) {
+ TestSequenceBufferPushOp<float, 32, 8>(); // Chunk size: 25%.
+ TestSequenceBufferPushOp<float, 32, 16>(); // Chunk size: 50%.
+ TestSequenceBufferPushOp<float, 32, 32>(); // Chunk size: 100%.
+ TestSequenceBufferPushOp<float, 23, 7>(); // Non-integer ratio.
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.cc
new file mode 100644
index 0000000000..96086babb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.cc
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/spectral_features.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr float kSilenceThreshold = 0.04f;
+
+// Computes the new cepstral difference stats and pushes them into the passed
+// symmetric matrix buffer.
+void UpdateCepstralDifferenceStats(
+ rtc::ArrayView<const float, kNumBands> new_cepstral_coeffs,
+ const RingBuffer<float, kNumBands, kCepstralCoeffsHistorySize>& ring_buf,
+ SymmetricMatrixBuffer<float, kCepstralCoeffsHistorySize>* sym_matrix_buf) {
+ RTC_DCHECK(sym_matrix_buf);
+ // Compute the new cepstral distance stats.
+ std::array<float, kCepstralCoeffsHistorySize - 1> distances;
+ for (int i = 0; i < kCepstralCoeffsHistorySize - 1; ++i) {
+ const int delay = i + 1;
+ auto old_cepstral_coeffs = ring_buf.GetArrayView(delay);
+ distances[i] = 0.f;
+ for (int k = 0; k < kNumBands; ++k) {
+ const float c = new_cepstral_coeffs[k] - old_cepstral_coeffs[k];
+ distances[i] += c * c;
+ }
+ }
+ // Push the new spectral distance stats into the symmetric matrix buffer.
+ sym_matrix_buf->Push(distances);
+}
+
+// Computes the first half of the Vorbis window.
+std::array<float, kFrameSize20ms24kHz / 2> ComputeScaledHalfVorbisWindow(
+ float scaling = 1.f) {
+ constexpr int kHalfSize = kFrameSize20ms24kHz / 2;
+ std::array<float, kHalfSize> half_window{};
+ for (int i = 0; i < kHalfSize; ++i) {
+ half_window[i] =
+ scaling *
+ std::sin(0.5 * kPi * std::sin(0.5 * kPi * (i + 0.5) / kHalfSize) *
+ std::sin(0.5 * kPi * (i + 0.5) / kHalfSize));
+ }
+ return half_window;
+}
+
+// Computes the forward FFT on a 20 ms frame to which a given window function is
+// applied. The Fourier coefficient corresponding to the Nyquist frequency is
+// set to zero (it is never used and this allows to simplify the code).
+void ComputeWindowedForwardFft(
+ rtc::ArrayView<const float, kFrameSize20ms24kHz> frame,
+ const std::array<float, kFrameSize20ms24kHz / 2>& half_window,
+ Pffft::FloatBuffer* fft_input_buffer,
+ Pffft::FloatBuffer* fft_output_buffer,
+ Pffft* fft) {
+ RTC_DCHECK_EQ(frame.size(), 2 * half_window.size());
+ // Apply windowing.
+ auto in = fft_input_buffer->GetView();
+ for (int i = 0, j = kFrameSize20ms24kHz - 1;
+ rtc::SafeLt(i, half_window.size()); ++i, --j) {
+ in[i] = frame[i] * half_window[i];
+ in[j] = frame[j] * half_window[i];
+ }
+ fft->ForwardTransform(*fft_input_buffer, fft_output_buffer, /*ordered=*/true);
+ // Set the Nyquist frequency coefficient to zero.
+ auto out = fft_output_buffer->GetView();
+ out[1] = 0.f;
+}
+
+} // namespace
+
+SpectralFeaturesExtractor::SpectralFeaturesExtractor()
+ : half_window_(ComputeScaledHalfVorbisWindow(
+ 1.f / static_cast<float>(kFrameSize20ms24kHz))),
+ fft_(kFrameSize20ms24kHz, Pffft::FftType::kReal),
+ fft_buffer_(fft_.CreateBuffer()),
+ reference_frame_fft_(fft_.CreateBuffer()),
+ lagged_frame_fft_(fft_.CreateBuffer()),
+ dct_table_(ComputeDctTable()) {}
+
+SpectralFeaturesExtractor::~SpectralFeaturesExtractor() = default;
+
+void SpectralFeaturesExtractor::Reset() {
+ cepstral_coeffs_ring_buf_.Reset();
+ cepstral_diffs_buf_.Reset();
+}
+
+bool SpectralFeaturesExtractor::CheckSilenceComputeFeatures(
+ rtc::ArrayView<const float, kFrameSize20ms24kHz> reference_frame,
+ rtc::ArrayView<const float, kFrameSize20ms24kHz> lagged_frame,
+ rtc::ArrayView<float, kNumBands - kNumLowerBands> higher_bands_cepstrum,
+ rtc::ArrayView<float, kNumLowerBands> average,
+ rtc::ArrayView<float, kNumLowerBands> first_derivative,
+ rtc::ArrayView<float, kNumLowerBands> second_derivative,
+ rtc::ArrayView<float, kNumLowerBands> bands_cross_corr,
+ float* variability) {
+ // Compute the Opus band energies for the reference frame.
+ ComputeWindowedForwardFft(reference_frame, half_window_, fft_buffer_.get(),
+ reference_frame_fft_.get(), &fft_);
+ spectral_correlator_.ComputeAutoCorrelation(
+ reference_frame_fft_->GetConstView(), reference_frame_bands_energy_);
+ // Check if the reference frame has silence.
+ const float tot_energy =
+ std::accumulate(reference_frame_bands_energy_.begin(),
+ reference_frame_bands_energy_.end(), 0.f);
+ if (tot_energy < kSilenceThreshold) {
+ return true;
+ }
+ // Compute the Opus band energies for the lagged frame.
+ ComputeWindowedForwardFft(lagged_frame, half_window_, fft_buffer_.get(),
+ lagged_frame_fft_.get(), &fft_);
+ spectral_correlator_.ComputeAutoCorrelation(lagged_frame_fft_->GetConstView(),
+ lagged_frame_bands_energy_);
+ // Log of the band energies for the reference frame.
+ std::array<float, kNumBands> log_bands_energy;
+ ComputeSmoothedLogMagnitudeSpectrum(reference_frame_bands_energy_,
+ log_bands_energy);
+ // Reference frame cepstrum.
+ std::array<float, kNumBands> cepstrum;
+ ComputeDct(log_bands_energy, dct_table_, cepstrum);
+ // Ad-hoc correction terms for the first two cepstral coefficients.
+ cepstrum[0] -= 12.f;
+ cepstrum[1] -= 4.f;
+ // Update the ring buffer and the cepstral difference stats.
+ cepstral_coeffs_ring_buf_.Push(cepstrum);
+ UpdateCepstralDifferenceStats(cepstrum, cepstral_coeffs_ring_buf_,
+ &cepstral_diffs_buf_);
+ // Write the higher bands cepstral coefficients.
+ RTC_DCHECK_EQ(cepstrum.size() - kNumLowerBands, higher_bands_cepstrum.size());
+ std::copy(cepstrum.begin() + kNumLowerBands, cepstrum.end(),
+ higher_bands_cepstrum.begin());
+ // Compute and write remaining features.
+ ComputeAvgAndDerivatives(average, first_derivative, second_derivative);
+ ComputeNormalizedCepstralCorrelation(bands_cross_corr);
+ RTC_DCHECK(variability);
+ *variability = ComputeVariability();
+ return false;
+}
+
+void SpectralFeaturesExtractor::ComputeAvgAndDerivatives(
+ rtc::ArrayView<float, kNumLowerBands> average,
+ rtc::ArrayView<float, kNumLowerBands> first_derivative,
+ rtc::ArrayView<float, kNumLowerBands> second_derivative) const {
+ auto curr = cepstral_coeffs_ring_buf_.GetArrayView(0);
+ auto prev1 = cepstral_coeffs_ring_buf_.GetArrayView(1);
+ auto prev2 = cepstral_coeffs_ring_buf_.GetArrayView(2);
+ RTC_DCHECK_EQ(average.size(), first_derivative.size());
+ RTC_DCHECK_EQ(first_derivative.size(), second_derivative.size());
+ RTC_DCHECK_LE(average.size(), curr.size());
+ for (int i = 0; rtc::SafeLt(i, average.size()); ++i) {
+ // Average, kernel: [1, 1, 1].
+ average[i] = curr[i] + prev1[i] + prev2[i];
+ // First derivative, kernel: [1, 0, - 1].
+ first_derivative[i] = curr[i] - prev2[i];
+ // Second derivative, Laplacian kernel: [1, -2, 1].
+ second_derivative[i] = curr[i] - 2 * prev1[i] + prev2[i];
+ }
+}
+
+void SpectralFeaturesExtractor::ComputeNormalizedCepstralCorrelation(
+ rtc::ArrayView<float, kNumLowerBands> bands_cross_corr) {
+ spectral_correlator_.ComputeCrossCorrelation(
+ reference_frame_fft_->GetConstView(), lagged_frame_fft_->GetConstView(),
+ bands_cross_corr_);
+ // Normalize.
+ for (int i = 0; rtc::SafeLt(i, bands_cross_corr_.size()); ++i) {
+ bands_cross_corr_[i] =
+ bands_cross_corr_[i] /
+ std::sqrt(0.001f + reference_frame_bands_energy_[i] *
+ lagged_frame_bands_energy_[i]);
+ }
+ // Cepstrum.
+ ComputeDct(bands_cross_corr_, dct_table_, bands_cross_corr);
+ // Ad-hoc correction terms for the first two cepstral coefficients.
+ bands_cross_corr[0] -= 1.3f;
+ bands_cross_corr[1] -= 0.9f;
+}
+
+float SpectralFeaturesExtractor::ComputeVariability() const {
+ // Compute cepstral variability score.
+ float variability = 0.f;
+ for (int delay1 = 0; delay1 < kCepstralCoeffsHistorySize; ++delay1) {
+ float min_dist = std::numeric_limits<float>::max();
+ for (int delay2 = 0; delay2 < kCepstralCoeffsHistorySize; ++delay2) {
+ if (delay1 == delay2) // The distance would be 0.
+ continue;
+ min_dist =
+ std::min(min_dist, cepstral_diffs_buf_.GetValue(delay1, delay2));
+ }
+ variability += min_dist;
+ }
+ // Normalize (based on training set stats).
+ // TODO(bugs.webrtc.org/10480): Isolate normalization from feature extraction.
+ return variability / kCepstralCoeffsHistorySize - 2.1f;
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.h
new file mode 100644
index 0000000000..d327ef8e01
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SPECTRAL_FEATURES_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SPECTRAL_FEATURES_H_
+
+#include <array>
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/ring_buffer.h"
+#include "modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h"
+#include "modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h"
+#include "modules/audio_processing/utility/pffft_wrapper.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Class to compute spectral features.
+class SpectralFeaturesExtractor {
+ public:
+ SpectralFeaturesExtractor();
+ SpectralFeaturesExtractor(const SpectralFeaturesExtractor&) = delete;
+ SpectralFeaturesExtractor& operator=(const SpectralFeaturesExtractor&) =
+ delete;
+ ~SpectralFeaturesExtractor();
+ // Resets the internal state of the feature extractor.
+ void Reset();
+ // Analyzes a pair of reference and lagged frames from the pitch buffer,
+ // detects silence and computes features. If silence is detected, the output
+ // is neither computed nor written.
+ bool CheckSilenceComputeFeatures(
+ rtc::ArrayView<const float, kFrameSize20ms24kHz> reference_frame,
+ rtc::ArrayView<const float, kFrameSize20ms24kHz> lagged_frame,
+ rtc::ArrayView<float, kNumBands - kNumLowerBands> higher_bands_cepstrum,
+ rtc::ArrayView<float, kNumLowerBands> average,
+ rtc::ArrayView<float, kNumLowerBands> first_derivative,
+ rtc::ArrayView<float, kNumLowerBands> second_derivative,
+ rtc::ArrayView<float, kNumLowerBands> bands_cross_corr,
+ float* variability);
+
+ private:
+ void ComputeAvgAndDerivatives(
+ rtc::ArrayView<float, kNumLowerBands> average,
+ rtc::ArrayView<float, kNumLowerBands> first_derivative,
+ rtc::ArrayView<float, kNumLowerBands> second_derivative) const;
+ void ComputeNormalizedCepstralCorrelation(
+ rtc::ArrayView<float, kNumLowerBands> bands_cross_corr);
+ float ComputeVariability() const;
+
+ const std::array<float, kFrameSize20ms24kHz / 2> half_window_;
+ Pffft fft_;
+ std::unique_ptr<Pffft::FloatBuffer> fft_buffer_;
+ std::unique_ptr<Pffft::FloatBuffer> reference_frame_fft_;
+ std::unique_ptr<Pffft::FloatBuffer> lagged_frame_fft_;
+ SpectralCorrelator spectral_correlator_;
+ std::array<float, kOpusBands24kHz> reference_frame_bands_energy_;
+ std::array<float, kOpusBands24kHz> lagged_frame_bands_energy_;
+ std::array<float, kOpusBands24kHz> bands_cross_corr_;
+ const std::array<float, kNumBands * kNumBands> dct_table_;
+ RingBuffer<float, kNumBands, kCepstralCoeffsHistorySize>
+ cepstral_coeffs_ring_buf_;
+ SymmetricMatrixBuffer<float, kCepstralCoeffsHistorySize> cepstral_diffs_buf_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SPECTRAL_FEATURES_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc
new file mode 100644
index 0000000000..a10b0f7ec9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Weights for each FFT coefficient for each Opus band (Nyquist frequency
+// excluded). The size of each band is specified in
+// `kOpusScaleNumBins24kHz20ms`.
+constexpr std::array<float, kFrameSize20ms24kHz / 2> kOpusBandWeights24kHz20ms =
+ {{
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 0
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 1
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 2
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 3
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 4
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 5
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 6
+ 0.f, 0.25f, 0.5f, 0.75f, // Band 7
+ 0.f, 0.125f, 0.25f, 0.375f, 0.5f,
+ 0.625f, 0.75f, 0.875f, // Band 8
+ 0.f, 0.125f, 0.25f, 0.375f, 0.5f,
+ 0.625f, 0.75f, 0.875f, // Band 9
+ 0.f, 0.125f, 0.25f, 0.375f, 0.5f,
+ 0.625f, 0.75f, 0.875f, // Band 10
+ 0.f, 0.125f, 0.25f, 0.375f, 0.5f,
+ 0.625f, 0.75f, 0.875f, // Band 11
+ 0.f, 0.0625f, 0.125f, 0.1875f, 0.25f,
+ 0.3125f, 0.375f, 0.4375f, 0.5f, 0.5625f,
+ 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f,
+ 0.9375f, // Band 12
+ 0.f, 0.0625f, 0.125f, 0.1875f, 0.25f,
+ 0.3125f, 0.375f, 0.4375f, 0.5f, 0.5625f,
+ 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f,
+ 0.9375f, // Band 13
+ 0.f, 0.0625f, 0.125f, 0.1875f, 0.25f,
+ 0.3125f, 0.375f, 0.4375f, 0.5f, 0.5625f,
+ 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f,
+ 0.9375f, // Band 14
+ 0.f, 0.0416667f, 0.0833333f, 0.125f, 0.166667f,
+ 0.208333f, 0.25f, 0.291667f, 0.333333f, 0.375f,
+ 0.416667f, 0.458333f, 0.5f, 0.541667f, 0.583333f,
+ 0.625f, 0.666667f, 0.708333f, 0.75f, 0.791667f,
+ 0.833333f, 0.875f, 0.916667f, 0.958333f, // Band 15
+ 0.f, 0.0416667f, 0.0833333f, 0.125f, 0.166667f,
+ 0.208333f, 0.25f, 0.291667f, 0.333333f, 0.375f,
+ 0.416667f, 0.458333f, 0.5f, 0.541667f, 0.583333f,
+ 0.625f, 0.666667f, 0.708333f, 0.75f, 0.791667f,
+ 0.833333f, 0.875f, 0.916667f, 0.958333f, // Band 16
+ 0.f, 0.03125f, 0.0625f, 0.09375f, 0.125f,
+ 0.15625f, 0.1875f, 0.21875f, 0.25f, 0.28125f,
+ 0.3125f, 0.34375f, 0.375f, 0.40625f, 0.4375f,
+ 0.46875f, 0.5f, 0.53125f, 0.5625f, 0.59375f,
+ 0.625f, 0.65625f, 0.6875f, 0.71875f, 0.75f,
+ 0.78125f, 0.8125f, 0.84375f, 0.875f, 0.90625f,
+ 0.9375f, 0.96875f, // Band 17
+ 0.f, 0.0208333f, 0.0416667f, 0.0625f, 0.0833333f,
+ 0.104167f, 0.125f, 0.145833f, 0.166667f, 0.1875f,
+ 0.208333f, 0.229167f, 0.25f, 0.270833f, 0.291667f,
+ 0.3125f, 0.333333f, 0.354167f, 0.375f, 0.395833f,
+ 0.416667f, 0.4375f, 0.458333f, 0.479167f, 0.5f,
+ 0.520833f, 0.541667f, 0.5625f, 0.583333f, 0.604167f,
+ 0.625f, 0.645833f, 0.666667f, 0.6875f, 0.708333f,
+ 0.729167f, 0.75f, 0.770833f, 0.791667f, 0.8125f,
+ 0.833333f, 0.854167f, 0.875f, 0.895833f, 0.916667f,
+ 0.9375f, 0.958333f, 0.979167f // Band 18
+ }};
+
+} // namespace
+
+SpectralCorrelator::SpectralCorrelator()
+ : weights_(kOpusBandWeights24kHz20ms.begin(),
+ kOpusBandWeights24kHz20ms.end()) {}
+
+SpectralCorrelator::~SpectralCorrelator() = default;
+
+void SpectralCorrelator::ComputeAutoCorrelation(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float, kOpusBands24kHz> auto_corr) const {
+ ComputeCrossCorrelation(x, x, auto_corr);
+}
+
+void SpectralCorrelator::ComputeCrossCorrelation(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float, kOpusBands24kHz> cross_corr) const {
+ RTC_DCHECK_EQ(x.size(), kFrameSize20ms24kHz);
+ RTC_DCHECK_EQ(x.size(), y.size());
+ RTC_DCHECK_EQ(x[1], 0.f) << "The Nyquist coefficient must be zeroed.";
+ RTC_DCHECK_EQ(y[1], 0.f) << "The Nyquist coefficient must be zeroed.";
+ constexpr auto kOpusScaleNumBins24kHz20ms = GetOpusScaleNumBins24kHz20ms();
+ int k = 0; // Next Fourier coefficient index.
+ cross_corr[0] = 0.f;
+ for (int i = 0; i < kOpusBands24kHz - 1; ++i) {
+ cross_corr[i + 1] = 0.f;
+ for (int j = 0; j < kOpusScaleNumBins24kHz20ms[i]; ++j) { // Band size.
+ const float v = x[2 * k] * y[2 * k] + x[2 * k + 1] * y[2 * k + 1];
+ const float tmp = weights_[k] * v;
+ cross_corr[i] += v - tmp;
+ cross_corr[i + 1] += tmp;
+ k++;
+ }
+ }
+ cross_corr[0] *= 2.f; // The first band only gets half contribution.
+ RTC_DCHECK_EQ(k, kFrameSize20ms24kHz / 2); // Nyquist coefficient never used.
+}
+
+void ComputeSmoothedLogMagnitudeSpectrum(
+ rtc::ArrayView<const float> bands_energy,
+ rtc::ArrayView<float, kNumBands> log_bands_energy) {
+ RTC_DCHECK_LE(bands_energy.size(), kNumBands);
+ constexpr float kOneByHundred = 1e-2f;
+ constexpr float kLogOneByHundred = -2.f;
+ // Init.
+ float log_max = kLogOneByHundred;
+ float follow = kLogOneByHundred;
+ const auto smooth = [&log_max, &follow](float x) {
+ x = std::max(log_max - 7.f, std::max(follow - 1.5f, x));
+ log_max = std::max(log_max, x);
+ follow = std::max(follow - 1.5f, x);
+ return x;
+ };
+ // Smoothing over the bands for which the band energy is defined.
+ for (int i = 0; rtc::SafeLt(i, bands_energy.size()); ++i) {
+ log_bands_energy[i] = smooth(std::log10(kOneByHundred + bands_energy[i]));
+ }
+ // Smoothing over the remaining bands (zero energy).
+ for (int i = bands_energy.size(); i < kNumBands; ++i) {
+ log_bands_energy[i] = smooth(kLogOneByHundred);
+ }
+}
+
+std::array<float, kNumBands * kNumBands> ComputeDctTable() {
+ std::array<float, kNumBands * kNumBands> dct_table;
+ const double k = std::sqrt(0.5);
+ for (int i = 0; i < kNumBands; ++i) {
+ for (int j = 0; j < kNumBands; ++j)
+ dct_table[i * kNumBands + j] = std::cos((i + 0.5) * j * kPi / kNumBands);
+ dct_table[i * kNumBands] *= k;
+ }
+ return dct_table;
+}
+
+void ComputeDct(rtc::ArrayView<const float> in,
+ rtc::ArrayView<const float, kNumBands * kNumBands> dct_table,
+ rtc::ArrayView<float> out) {
+ // DCT scaling factor - i.e., sqrt(2 / kNumBands).
+ constexpr float kDctScalingFactor = 0.301511345f;
+ constexpr float kDctScalingFactorError =
+ kDctScalingFactor * kDctScalingFactor -
+ 2.f / static_cast<float>(kNumBands);
+ static_assert(
+ (kDctScalingFactorError >= 0.f && kDctScalingFactorError < 1e-1f) ||
+ (kDctScalingFactorError < 0.f && kDctScalingFactorError > -1e-1f),
+ "kNumBands changed and kDctScalingFactor has not been updated.");
+ RTC_DCHECK_NE(in.data(), out.data()) << "In-place DCT is not supported.";
+ RTC_DCHECK_LE(in.size(), kNumBands);
+ RTC_DCHECK_LE(1, out.size());
+ RTC_DCHECK_LE(out.size(), in.size());
+ for (int i = 0; rtc::SafeLt(i, out.size()); ++i) {
+ out[i] = 0.f;
+ for (int j = 0; rtc::SafeLt(j, in.size()); ++j) {
+ out[i] += in[j] * dct_table[j * kNumBands + i];
+ }
+ // TODO(bugs.webrtc.org/10480): Scaling factor in the DCT table.
+ out[i] *= kDctScalingFactor;
+ }
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h
new file mode 100644
index 0000000000..f4b293a567
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SPECTRAL_FEATURES_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SPECTRAL_FEATURES_INTERNAL_H_
+
+#include <stddef.h>
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// At a sample rate of 24 kHz, the last 3 Opus bands are beyond the Nyquist
+// frequency. However, band #19 gets the contributions from band #18 because
+// of the symmetric triangular filter with peak response at 12 kHz.
+constexpr int kOpusBands24kHz = 20;
+static_assert(kOpusBands24kHz < kNumBands,
+ "The number of bands at 24 kHz must be less than those defined "
+ "in the Opus scale at 48 kHz.");
+
+// Number of FFT frequency bins covered by each band in the Opus scale at a
+// sample rate of 24 kHz for 20 ms frames.
+// Declared here for unit testing.
+constexpr std::array<int, kOpusBands24kHz - 1> GetOpusScaleNumBins24kHz20ms() {
+ return {4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 16, 16, 16, 24, 24, 32, 48};
+}
+
+// TODO(bugs.webrtc.org/10480): Move to a separate file.
+// Class to compute band-wise spectral features in the Opus perceptual scale
+// for 20 ms frames sampled at 24 kHz. The analysis methods apply triangular
+// filters with peak response at the each band boundary.
+class SpectralCorrelator {
+ public:
+ // Ctor.
+ SpectralCorrelator();
+ SpectralCorrelator(const SpectralCorrelator&) = delete;
+ SpectralCorrelator& operator=(const SpectralCorrelator&) = delete;
+ ~SpectralCorrelator();
+
+ // Computes the band-wise spectral auto-correlations.
+ // `x` must:
+ // - have size equal to `kFrameSize20ms24kHz`;
+ // - be encoded as vectors of interleaved real-complex FFT coefficients
+ // where x[1] = y[1] = 0 (the Nyquist frequency coefficient is omitted).
+ void ComputeAutoCorrelation(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<float, kOpusBands24kHz> auto_corr) const;
+
+ // Computes the band-wise spectral cross-correlations.
+ // `x` and `y` must:
+ // - have size equal to `kFrameSize20ms24kHz`;
+ // - be encoded as vectors of interleaved real-complex FFT coefficients where
+ // x[1] = y[1] = 0 (the Nyquist frequency coefficient is omitted).
+ void ComputeCrossCorrelation(
+ rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y,
+ rtc::ArrayView<float, kOpusBands24kHz> cross_corr) const;
+
+ private:
+ const std::vector<float> weights_; // Weights for each Fourier coefficient.
+};
+
+// TODO(bugs.webrtc.org/10480): Move to anonymous namespace in
+// spectral_features.cc. Given a vector of Opus-bands energy coefficients,
+// computes the log magnitude spectrum applying smoothing both over time and
+// over frequency. Declared here for unit testing.
+void ComputeSmoothedLogMagnitudeSpectrum(
+ rtc::ArrayView<const float> bands_energy,
+ rtc::ArrayView<float, kNumBands> log_bands_energy);
+
+// TODO(bugs.webrtc.org/10480): Move to anonymous namespace in
+// spectral_features.cc. Creates a DCT table for arrays having size equal to
+// `kNumBands`. Declared here for unit testing.
+std::array<float, kNumBands * kNumBands> ComputeDctTable();
+
+// TODO(bugs.webrtc.org/10480): Move to anonymous namespace in
+// spectral_features.cc. Computes DCT for `in` given a pre-computed DCT table.
+// In-place computation is not allowed and `out` can be smaller than `in` in
+// order to only compute the first DCT coefficients. Declared here for unit
+// testing.
+void ComputeDct(rtc::ArrayView<const float> in,
+ rtc::ArrayView<const float, kNumBands * kNumBands> dct_table,
+ rtc::ArrayView<float> out);
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SPECTRAL_FEATURES_INTERNAL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc
new file mode 100644
index 0000000000..ece4eb5024
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h"
+
+#include <algorithm>
+#include <array>
+#include <complex>
+#include <numeric>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "modules/audio_processing/utility/pffft_wrapper.h"
+#include "rtc_base/numerics/safe_compare.h"
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// Generates the values for the array named `kOpusBandWeights24kHz20ms` in the
+// anonymous namespace of the .cc file, which is the array of FFT coefficient
+// weights for the Opus scale triangular filters.
+std::vector<float> ComputeTriangularFiltersWeights() {
+ constexpr auto kOpusScaleNumBins24kHz20ms = GetOpusScaleNumBins24kHz20ms();
+ const auto& v = kOpusScaleNumBins24kHz20ms; // Alias.
+ const int num_weights = std::accumulate(kOpusScaleNumBins24kHz20ms.begin(),
+ kOpusScaleNumBins24kHz20ms.end(), 0);
+ std::vector<float> weights(num_weights);
+ int next_fft_coeff_index = 0;
+ for (int band = 0; rtc::SafeLt(band, v.size()); ++band) {
+ const int band_size = v[band];
+ for (int j = 0; rtc::SafeLt(j, band_size); ++j) {
+ weights[next_fft_coeff_index + j] = static_cast<float>(j) / band_size;
+ }
+ next_fft_coeff_index += band_size;
+ }
+ return weights;
+}
+
+// Checks that the values returned by GetOpusScaleNumBins24kHz20ms() match the
+// Opus scale frequency boundaries.
+TEST(RnnVadTest, TestOpusScaleBoundaries) {
+ constexpr int kBandFrequencyBoundariesHz[kNumBands - 1] = {
+ 200, 400, 600, 800, 1000, 1200, 1400, 1600, 2000, 2400, 2800,
+ 3200, 4000, 4800, 5600, 6800, 8000, 9600, 12000, 15600, 20000};
+ constexpr auto kOpusScaleNumBins24kHz20ms = GetOpusScaleNumBins24kHz20ms();
+ int prev = 0;
+ for (int i = 0; rtc::SafeLt(i, kOpusScaleNumBins24kHz20ms.size()); ++i) {
+ int boundary =
+ kBandFrequencyBoundariesHz[i] * kFrameSize20ms24kHz / kSampleRate24kHz;
+ EXPECT_EQ(kOpusScaleNumBins24kHz20ms[i], boundary - prev);
+ prev = boundary;
+ }
+}
+
+// Checks that the computed triangular filters weights for the Opus scale are
+// monotonic withing each Opus band. This test should only be enabled when
+// ComputeTriangularFiltersWeights() is changed and `kOpusBandWeights24kHz20ms`
+// is updated accordingly.
+TEST(RnnVadTest, DISABLED_TestOpusScaleWeights) {
+ auto weights = ComputeTriangularFiltersWeights();
+ int i = 0;
+ for (int band_size : GetOpusScaleNumBins24kHz20ms()) {
+ SCOPED_TRACE(band_size);
+ rtc::ArrayView<float> band_weights(weights.data() + i, band_size);
+ float prev = -1.f;
+ for (float weight : band_weights) {
+ EXPECT_LT(prev, weight);
+ prev = weight;
+ }
+ i += band_size;
+ }
+}
+
+// Checks that the computed band-wise auto-correlation is non-negative for a
+// simple input vector of FFT coefficients.
+TEST(RnnVadTest, SpectralCorrelatorValidOutput) {
+ // Input: vector of (1, 1j) values.
+ Pffft fft(kFrameSize20ms24kHz, Pffft::FftType::kReal);
+ auto in = fft.CreateBuffer();
+ std::array<float, kOpusBands24kHz> out;
+ auto in_view = in->GetView();
+ std::fill(in_view.begin(), in_view.end(), 1.f);
+ in_view[1] = 0.f; // Nyquist frequency.
+ // Compute and check output.
+ SpectralCorrelator e;
+ e.ComputeAutoCorrelation(in_view, out);
+ for (int i = 0; i < kOpusBands24kHz; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_GT(out[i], 0.f);
+ }
+}
+
+// Checks that the computed smoothed log magnitude spectrum is within tolerance
+// given hard-coded test input data.
+TEST(RnnVadTest, ComputeSmoothedLogMagnitudeSpectrumWithinTolerance) {
+ constexpr std::array<float, kNumBands> input = {
+ {86.060539245605f, 275.668334960938f, 43.406528472900f, 6.541896820068f,
+ 17.964015960693f, 8.090919494629f, 1.261920094490f, 1.212702631950f,
+ 1.619154453278f, 0.508935272694f, 0.346316039562f, 0.237035423517f,
+ 0.172424271703f, 0.271657168865f, 0.126088857651f, 0.139967113733f,
+ 0.207200810313f, 0.155893072486f, 0.091090843081f, 0.033391401172f,
+ 0.013879744336f, 0.011973354965f}};
+ constexpr std::array<float, kNumBands> expected_output = {
+ {1.934854507446f, 2.440402746201f, 1.637655138969f, 0.816367030144f,
+ 1.254645109177f, 0.908534288406f, 0.104459829628f, 0.087320849299f,
+ 0.211962252855f, -0.284886807203f, -0.448164641857f, -0.607240796089f,
+ -0.738917350769f, -0.550279200077f, -0.866177439690f, -0.824003994465f,
+ -0.663138568401f, -0.780171751976f, -0.995288193226f, -1.362596273422f,
+ -1.621970295906f, -1.658103585243f}};
+ std::array<float, kNumBands> computed_output;
+ {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ ComputeSmoothedLogMagnitudeSpectrum(input, computed_output);
+ ExpectNearAbsolute(expected_output, computed_output, 1e-5f);
+ }
+}
+
+// Checks that the computed DCT is within tolerance given hard-coded test input
+// data.
+TEST(RnnVadTest, ComputeDctWithinTolerance) {
+ constexpr std::array<float, kNumBands> input = {
+ {0.232155621052f, 0.678957760334f, 0.220818966627f, -0.077363930643f,
+ -0.559227049351f, 0.432545185089f, 0.353900641203f, 0.398993015289f,
+ 0.409774333239f, 0.454977899790f, 0.300520688295f, -0.010286616161f,
+ 0.272525429726f, 0.098067551851f, 0.083649002016f, 0.046226885170f,
+ -0.033228103071f, 0.144773483276f, -0.117661058903f, -0.005628800020f,
+ -0.009547689930f, -0.045382082462f}};
+ constexpr std::array<float, kNumBands> expected_output = {
+ {0.697072803974f, 0.442710995674f, -0.293156713247f, -0.060711503029f,
+ 0.292050391436f, 0.489301353693f, 0.402255415916f, 0.134404733777f,
+ -0.086305990815f, -0.199605688453f, -0.234511867166f, -0.413774639368f,
+ -0.388507157564f, -0.032798115164f, 0.044605545700f, 0.112466648221f,
+ -0.050096966326f, 0.045971218497f, -0.029815061018f, -0.410366982222f,
+ -0.209233760834f, -0.128037497401f}};
+ auto dct_table = ComputeDctTable();
+ std::array<float, kNumBands> computed_output;
+ {
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+ ComputeDct(input, dct_table, computed_output);
+ ExpectNearAbsolute(expected_output, computed_output, 1e-5f);
+ }
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc
new file mode 100644
index 0000000000..324d694957
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/spectral_features.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+// TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+// #include "test/fpe_observer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int kTestFeatureVectorSize = kNumBands + 3 * kNumLowerBands + 1;
+
+// Writes non-zero sample values.
+void WriteTestData(rtc::ArrayView<float> samples) {
+ for (int i = 0; rtc::SafeLt(i, samples.size()); ++i) {
+ samples[i] = i % 100;
+ }
+}
+
+rtc::ArrayView<float, kNumBands - kNumLowerBands> GetHigherBandsSpectrum(
+ std::array<float, kTestFeatureVectorSize>* feature_vector) {
+ return {feature_vector->data() + kNumLowerBands, kNumBands - kNumLowerBands};
+}
+
+rtc::ArrayView<float, kNumLowerBands> GetAverage(
+ std::array<float, kTestFeatureVectorSize>* feature_vector) {
+ return {feature_vector->data(), kNumLowerBands};
+}
+
+rtc::ArrayView<float, kNumLowerBands> GetFirstDerivative(
+ std::array<float, kTestFeatureVectorSize>* feature_vector) {
+ return {feature_vector->data() + kNumBands, kNumLowerBands};
+}
+
+rtc::ArrayView<float, kNumLowerBands> GetSecondDerivative(
+ std::array<float, kTestFeatureVectorSize>* feature_vector) {
+ return {feature_vector->data() + kNumBands + kNumLowerBands, kNumLowerBands};
+}
+
+rtc::ArrayView<float, kNumLowerBands> GetCepstralCrossCorrelation(
+ std::array<float, kTestFeatureVectorSize>* feature_vector) {
+ return {feature_vector->data() + kNumBands + 2 * kNumLowerBands,
+ kNumLowerBands};
+}
+
+float* GetCepstralVariability(
+ std::array<float, kTestFeatureVectorSize>* feature_vector) {
+ return feature_vector->data() + kNumBands + 3 * kNumLowerBands;
+}
+
+constexpr float kInitialFeatureVal = -9999.f;
+
+// Checks that silence is detected when the input signal is 0 and that the
+// feature vector is written only if the input signal is not tagged as silence.
+TEST(RnnVadTest, SpectralFeaturesWithAndWithoutSilence) {
+ // Initialize.
+ SpectralFeaturesExtractor sfe;
+ std::array<float, kFrameSize20ms24kHz> samples;
+ rtc::ArrayView<float, kFrameSize20ms24kHz> samples_view(samples);
+ bool is_silence;
+ std::array<float, kTestFeatureVectorSize> feature_vector;
+
+ // Write an initial value in the feature vector to detect changes.
+ std::fill(feature_vector.begin(), feature_vector.end(), kInitialFeatureVal);
+
+ // TODO(bugs.webrtc.org/8948): Add when the issue is fixed.
+ // FloatingPointExceptionObserver fpe_observer;
+
+ // With silence.
+ std::fill(samples.begin(), samples.end(), 0.f);
+ is_silence = sfe.CheckSilenceComputeFeatures(
+ samples_view, samples_view, GetHigherBandsSpectrum(&feature_vector),
+ GetAverage(&feature_vector), GetFirstDerivative(&feature_vector),
+ GetSecondDerivative(&feature_vector),
+ GetCepstralCrossCorrelation(&feature_vector),
+ GetCepstralVariability(&feature_vector));
+ // Silence is expected, the output won't be overwritten.
+ EXPECT_TRUE(is_silence);
+ EXPECT_TRUE(std::all_of(feature_vector.begin(), feature_vector.end(),
+ [](float x) { return x == kInitialFeatureVal; }));
+
+ // With no silence.
+ WriteTestData(samples);
+ is_silence = sfe.CheckSilenceComputeFeatures(
+ samples_view, samples_view, GetHigherBandsSpectrum(&feature_vector),
+ GetAverage(&feature_vector), GetFirstDerivative(&feature_vector),
+ GetSecondDerivative(&feature_vector),
+ GetCepstralCrossCorrelation(&feature_vector),
+ GetCepstralVariability(&feature_vector));
+ // Silence is not expected, the output will be overwritten.
+ EXPECT_FALSE(is_silence);
+ EXPECT_FALSE(std::all_of(feature_vector.begin(), feature_vector.end(),
+ [](float x) { return x == kInitialFeatureVal; }));
+}
+
+// Feeds a constant input signal and checks that:
+// - the cepstral coefficients average does not change;
+// - the derivatives are zero;
+// - the cepstral variability score does not change.
+TEST(RnnVadTest, CepstralFeaturesConstantAverageZeroDerivative) {
+ // Initialize.
+ SpectralFeaturesExtractor sfe;
+ std::array<float, kFrameSize20ms24kHz> samples;
+ rtc::ArrayView<float, kFrameSize20ms24kHz> samples_view(samples);
+ WriteTestData(samples);
+
+ // Fill the spectral features with test data.
+ std::array<float, kTestFeatureVectorSize> feature_vector;
+ for (int i = 0; i < kCepstralCoeffsHistorySize; ++i) {
+ sfe.CheckSilenceComputeFeatures(
+ samples_view, samples_view, GetHigherBandsSpectrum(&feature_vector),
+ GetAverage(&feature_vector), GetFirstDerivative(&feature_vector),
+ GetSecondDerivative(&feature_vector),
+ GetCepstralCrossCorrelation(&feature_vector),
+ GetCepstralVariability(&feature_vector));
+ }
+
+ // Feed the test data one last time but using a different output vector.
+ std::array<float, kTestFeatureVectorSize> feature_vector_last;
+ sfe.CheckSilenceComputeFeatures(
+ samples_view, samples_view, GetHigherBandsSpectrum(&feature_vector_last),
+ GetAverage(&feature_vector_last),
+ GetFirstDerivative(&feature_vector_last),
+ GetSecondDerivative(&feature_vector_last),
+ GetCepstralCrossCorrelation(&feature_vector_last),
+ GetCepstralVariability(&feature_vector_last));
+
+ // Average is unchanged.
+ ExpectEqualFloatArray({feature_vector.data(), kNumLowerBands},
+ {feature_vector_last.data(), kNumLowerBands});
+ // First and second derivatives are zero.
+ constexpr std::array<float, kNumLowerBands> zeros{};
+ ExpectEqualFloatArray(
+ {feature_vector_last.data() + kNumBands, kNumLowerBands}, zeros);
+ ExpectEqualFloatArray(
+ {feature_vector_last.data() + kNumBands + kNumLowerBands, kNumLowerBands},
+ zeros);
+ // Variability is unchanged.
+ EXPECT_FLOAT_EQ(feature_vector[kNumBands + 3 * kNumLowerBands],
+ feature_vector_last[kNumBands + 3 * kNumLowerBands]);
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h
new file mode 100644
index 0000000000..d186479551
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SYMMETRIC_MATRIX_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SYMMETRIC_MATRIX_BUFFER_H_
+
+#include <algorithm>
+#include <array>
+#include <cstring>
+#include <utility>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Data structure to buffer the results of pair-wise comparisons between items
+// stored in a ring buffer. Every time that the oldest item is replaced in the
+// ring buffer, the new one is compared to the remaining items in the ring
+// buffer. The results of such comparisons need to be buffered and automatically
+// removed when one of the two corresponding items that have been compared is
+// removed from the ring buffer. It is assumed that the comparison is symmetric
+// and that comparing an item with itself is not needed.
+template <typename T, int S>
+class SymmetricMatrixBuffer {
+ static_assert(S > 2, "");
+
+ public:
+ SymmetricMatrixBuffer() = default;
+ SymmetricMatrixBuffer(const SymmetricMatrixBuffer&) = delete;
+ SymmetricMatrixBuffer& operator=(const SymmetricMatrixBuffer&) = delete;
+ ~SymmetricMatrixBuffer() = default;
+ // Sets the buffer values to zero.
+ void Reset() {
+ static_assert(std::is_arithmetic<T>::value,
+ "Integral or floating point required.");
+ buf_.fill(0);
+ }
+ // Pushes the results from the comparison between the most recent item and
+ // those that are still in the ring buffer. The first element in `values` must
+ // correspond to the comparison between the most recent item and the second
+ // most recent one in the ring buffer, whereas the last element in `values`
+ // must correspond to the comparison between the most recent item and the
+ // oldest one in the ring buffer.
+ void Push(rtc::ArrayView<T, S - 1> values) {
+ // Move the lower-right sub-matrix of size (S-2) x (S-2) one row up and one
+ // column left.
+ std::memmove(buf_.data(), buf_.data() + S, (buf_.size() - S) * sizeof(T));
+ // Copy new values in the last column in the right order.
+ for (int i = 0; rtc::SafeLt(i, values.size()); ++i) {
+ const int index = (S - 1 - i) * (S - 1) - 1;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_LT(index, buf_.size());
+ buf_[index] = values[i];
+ }
+ }
+ // Reads the value that corresponds to comparison of two items in the ring
+ // buffer having delay `delay1` and `delay2`. The two arguments must not be
+ // equal and both must be in {0, ..., S - 1}.
+ T GetValue(int delay1, int delay2) const {
+ int row = S - 1 - delay1;
+ int col = S - 1 - delay2;
+ RTC_DCHECK_NE(row, col) << "The diagonal cannot be accessed.";
+ if (row > col)
+ std::swap(row, col); // Swap to access the upper-right triangular part.
+ RTC_DCHECK_LE(0, row);
+ RTC_DCHECK_LT(row, S - 1) << "Not enforcing row < col and row != col.";
+ RTC_DCHECK_LE(1, col) << "Not enforcing row < col and row != col.";
+ RTC_DCHECK_LT(col, S);
+ const int index = row * (S - 1) + (col - 1);
+ RTC_DCHECK_LE(0, index);
+ RTC_DCHECK_LT(index, buf_.size());
+ return buf_[index];
+ }
+
+ private:
+ // Encode an upper-right triangular matrix (excluding its diagonal) using a
+ // square matrix. This allows to move the data in Push() with one single
+ // operation.
+ std::array<T, (S - 1) * (S - 1)> buf_{};
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_SYMMETRIC_MATRIX_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc
new file mode 100644
index 0000000000..1509ca5ac1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h"
+
+#include "modules/audio_processing/agc2/rnn_vad/ring_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+template <typename T, int S>
+void CheckSymmetry(const SymmetricMatrixBuffer<T, S>* sym_matrix_buf) {
+ for (int row = 0; row < S - 1; ++row)
+ for (int col = row + 1; col < S; ++col)
+ EXPECT_EQ(sym_matrix_buf->GetValue(row, col),
+ sym_matrix_buf->GetValue(col, row));
+}
+
+using PairType = std::pair<int, int>;
+
+// Checks that the symmetric matrix buffer contains any pair with a value equal
+// to the given one.
+template <int S>
+bool CheckPairsWithValueExist(
+ const SymmetricMatrixBuffer<PairType, S>* sym_matrix_buf,
+ const int value) {
+ for (int row = 0; row < S - 1; ++row) {
+ for (int col = row + 1; col < S; ++col) {
+ auto p = sym_matrix_buf->GetValue(row, col);
+ if (p.first == value || p.second == value)
+ return true;
+ }
+ }
+ return false;
+}
+
+// Test that shows how to combine RingBuffer and SymmetricMatrixBuffer to
+// efficiently compute pair-wise scores. This test verifies that the evolution
+// of a SymmetricMatrixBuffer instance follows that of RingBuffer.
+TEST(RnnVadTest, SymmetricMatrixBufferUseCase) {
+ // Instance a ring buffer which will be fed with a series of integer values.
+ constexpr int kRingBufSize = 10;
+ RingBuffer<int, 1, kRingBufSize> ring_buf;
+ // Instance a symmetric matrix buffer for the ring buffer above. It stores
+ // pairs of integers with which this test can easily check that the evolution
+ // of RingBuffer and SymmetricMatrixBuffer match.
+ SymmetricMatrixBuffer<PairType, kRingBufSize> sym_matrix_buf;
+ for (int t = 1; t <= 100; ++t) { // Evolution steps.
+ SCOPED_TRACE(t);
+ const int t_removed = ring_buf.GetArrayView(kRingBufSize - 1)[0];
+ ring_buf.Push({&t, 1});
+ // The head of the ring buffer is `t`.
+ ASSERT_EQ(t, ring_buf.GetArrayView(0)[0]);
+ // Create the comparisons between `t` and the older elements in the ring
+ // buffer.
+ std::array<PairType, kRingBufSize - 1> new_comparions;
+ for (int i = 0; i < kRingBufSize - 1; ++i) {
+ // Start comparing `t` to the second newest element in the ring buffer.
+ const int delay = i + 1;
+ const auto t_prev = ring_buf.GetArrayView(delay)[0];
+ ASSERT_EQ(std::max(0, t - delay), t_prev);
+ // Compare the last element `t` with `t_prev`.
+ new_comparions[i].first = t_prev;
+ new_comparions[i].second = t;
+ }
+ // Push the new comparisons in the symmetric matrix buffer.
+ sym_matrix_buf.Push({new_comparions.data(), new_comparions.size()});
+ // Tests.
+ CheckSymmetry(&sym_matrix_buf);
+ // Check that the pairs resulting from the content in the ring buffer are
+ // in the right position.
+ for (int delay1 = 0; delay1 < kRingBufSize - 1; ++delay1) {
+ for (int delay2 = delay1 + 1; delay2 < kRingBufSize; ++delay2) {
+ const auto t1 = ring_buf.GetArrayView(delay1)[0];
+ const auto t2 = ring_buf.GetArrayView(delay2)[0];
+ ASSERT_LE(t2, t1);
+ const auto p = sym_matrix_buf.GetValue(delay1, delay2);
+ EXPECT_EQ(p.first, t2);
+ EXPECT_EQ(p.second, t1);
+ }
+ }
+ // Check that every older element in the ring buffer still has a
+ // corresponding pair in the symmetric matrix buffer.
+ for (int delay = 1; delay < kRingBufSize; ++delay) {
+ const auto t_prev = ring_buf.GetArrayView(delay)[0];
+ EXPECT_TRUE(CheckPairsWithValueExist(&sym_matrix_buf, t_prev));
+ }
+ // Check that the element removed from the ring buffer has no corresponding
+ // pairs in the symmetric matrix buffer.
+ if (t > kRingBufSize - 1) {
+ EXPECT_FALSE(CheckPairsWithValueExist(&sym_matrix_buf, t_removed));
+ }
+ }
+}
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.cc
new file mode 100644
index 0000000000..857a9f2706
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.cc
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/test_utils.h"
+
+#include <algorithm>
+#include <fstream>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+// File reader for binary files that contain a sequence of values with
+// arithmetic type `T`. The values of type `T` that are read are cast to float.
+template <typename T>
+class FloatFileReader : public FileReader {
+ public:
+ static_assert(std::is_arithmetic<T>::value, "");
+ explicit FloatFileReader(absl::string_view filename)
+ : is_(std::string(filename), std::ios::binary | std::ios::ate),
+ size_(is_.tellg() / sizeof(T)) {
+ RTC_CHECK(is_);
+ SeekBeginning();
+ }
+ FloatFileReader(const FloatFileReader&) = delete;
+ FloatFileReader& operator=(const FloatFileReader&) = delete;
+ ~FloatFileReader() = default;
+
+ int size() const override { return size_; }
+ bool ReadChunk(rtc::ArrayView<float> dst) override {
+ const std::streamsize bytes_to_read = dst.size() * sizeof(T);
+ if (std::is_same<T, float>::value) {
+ is_.read(reinterpret_cast<char*>(dst.data()), bytes_to_read);
+ } else {
+ buffer_.resize(dst.size());
+ is_.read(reinterpret_cast<char*>(buffer_.data()), bytes_to_read);
+ std::transform(buffer_.begin(), buffer_.end(), dst.begin(),
+ [](const T& v) -> float { return static_cast<float>(v); });
+ }
+ return is_.gcount() == bytes_to_read;
+ }
+ bool ReadValue(float& dst) override { return ReadChunk({&dst, 1}); }
+ void SeekForward(int hop) override { is_.seekg(hop * sizeof(T), is_.cur); }
+ void SeekBeginning() override { is_.seekg(0, is_.beg); }
+
+ private:
+ std::ifstream is_;
+ const int size_;
+ std::vector<T> buffer_;
+};
+
+} // namespace
+
+using webrtc::test::ResourcePath;
+
+void ExpectEqualFloatArray(rtc::ArrayView<const float> expected,
+ rtc::ArrayView<const float> computed) {
+ ASSERT_EQ(expected.size(), computed.size());
+ for (int i = 0; rtc::SafeLt(i, expected.size()); ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_FLOAT_EQ(expected[i], computed[i]);
+ }
+}
+
+void ExpectNearAbsolute(rtc::ArrayView<const float> expected,
+ rtc::ArrayView<const float> computed,
+ float tolerance) {
+ ASSERT_EQ(expected.size(), computed.size());
+ for (int i = 0; rtc::SafeLt(i, expected.size()); ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_NEAR(expected[i], computed[i], tolerance);
+ }
+}
+
+std::unique_ptr<FileReader> CreatePcmSamplesReader() {
+ return std::make_unique<FloatFileReader<int16_t>>(
+ /*filename=*/test::ResourcePath("audio_processing/agc2/rnn_vad/samples",
+ "pcm"));
+}
+
+ChunksFileReader CreatePitchBuffer24kHzReader() {
+ auto reader = std::make_unique<FloatFileReader<float>>(
+ /*filename=*/test::ResourcePath(
+ "audio_processing/agc2/rnn_vad/pitch_buf_24k", "dat"));
+ const int num_chunks = rtc::CheckedDivExact(reader->size(), kBufSize24kHz);
+ return {/*chunk_size=*/kBufSize24kHz, num_chunks, std::move(reader)};
+}
+
+ChunksFileReader CreateLpResidualAndPitchInfoReader() {
+ constexpr int kPitchInfoSize = 2; // Pitch period and strength.
+ constexpr int kChunkSize = kBufSize24kHz + kPitchInfoSize;
+ auto reader = std::make_unique<FloatFileReader<float>>(
+ /*filename=*/test::ResourcePath(
+ "audio_processing/agc2/rnn_vad/pitch_lp_res", "dat"));
+ const int num_chunks = rtc::CheckedDivExact(reader->size(), kChunkSize);
+ return {kChunkSize, num_chunks, std::move(reader)};
+}
+
+std::unique_ptr<FileReader> CreateGruInputReader() {
+ return std::make_unique<FloatFileReader<float>>(
+ /*filename=*/test::ResourcePath("audio_processing/agc2/rnn_vad/gru_in",
+ "dat"));
+}
+
+std::unique_ptr<FileReader> CreateVadProbsReader() {
+ return std::make_unique<FloatFileReader<float>>(
+ /*filename=*/test::ResourcePath("audio_processing/agc2/rnn_vad/vad_prob",
+ "dat"));
+}
+
+PitchTestData::PitchTestData() {
+ FloatFileReader<float> reader(
+ /*filename=*/ResourcePath(
+ "audio_processing/agc2/rnn_vad/pitch_search_int", "dat"));
+ reader.ReadChunk(pitch_buffer_24k_);
+ reader.ReadChunk(square_energies_24k_);
+ reader.ReadChunk(auto_correlation_12k_);
+ // Reverse the order of the squared energy values.
+ // Required after the WebRTC CL 191703 which switched to forward computation.
+ std::reverse(square_energies_24k_.begin(), square_energies_24k_.end());
+}
+
+PitchTestData::~PitchTestData() = default;
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.h
new file mode 100644
index 0000000000..e64b7b7ecd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/test_utils.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_TEST_UTILS_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_TEST_UTILS_H_
+
+#include <array>
+#include <fstream>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+constexpr float kFloatMin = std::numeric_limits<float>::min();
+
+// Fails for every pair from two equally sized rtc::ArrayView<float> views such
+// that the values in the pair do not match.
+void ExpectEqualFloatArray(rtc::ArrayView<const float> expected,
+ rtc::ArrayView<const float> computed);
+
+// Fails for every pair from two equally sized rtc::ArrayView<float> views such
+// that their absolute error is above a given threshold.
+void ExpectNearAbsolute(rtc::ArrayView<const float> expected,
+ rtc::ArrayView<const float> computed,
+ float tolerance);
+
+// File reader interface.
+class FileReader {
+ public:
+ virtual ~FileReader() = default;
+ // Number of values in the file.
+ virtual int size() const = 0;
+ // Reads `dst.size()` float values into `dst`, advances the internal file
+ // position according to the number of read bytes and returns true if the
+ // values are correctly read. If the number of remaining bytes in the file is
+ // not sufficient to read `dst.size()` float values, `dst` is partially
+ // modified and false is returned.
+ virtual bool ReadChunk(rtc::ArrayView<float> dst) = 0;
+ // Reads a single float value, advances the internal file position according
+ // to the number of read bytes and returns true if the value is correctly
+ // read. If the number of remaining bytes in the file is not sufficient to
+ // read one float, `dst` is not modified and false is returned.
+ virtual bool ReadValue(float& dst) = 0;
+ // Advances the internal file position by `hop` float values.
+ virtual void SeekForward(int hop) = 0;
+ // Resets the internal file position to BOF.
+ virtual void SeekBeginning() = 0;
+};
+
+// File reader for files that contain `num_chunks` chunks with size equal to
+// `chunk_size`.
+struct ChunksFileReader {
+ const int chunk_size;
+ const int num_chunks;
+ std::unique_ptr<FileReader> reader;
+};
+
+// Creates a reader for the PCM S16 samples file.
+std::unique_ptr<FileReader> CreatePcmSamplesReader();
+
+// Creates a reader for the 24 kHz pitch buffer test data.
+ChunksFileReader CreatePitchBuffer24kHzReader();
+
+// Creates a reader for the LP residual and pitch information test data.
+ChunksFileReader CreateLpResidualAndPitchInfoReader();
+
+// Creates a reader for the sequence of GRU input vectors.
+std::unique_ptr<FileReader> CreateGruInputReader();
+
+// Creates a reader for the VAD probabilities test data.
+std::unique_ptr<FileReader> CreateVadProbsReader();
+
+// Class to retrieve a test pitch buffer content and the expected output for the
+// analysis steps.
+class PitchTestData {
+ public:
+ PitchTestData();
+ ~PitchTestData();
+ rtc::ArrayView<const float, kBufSize24kHz> PitchBuffer24kHzView() const {
+ return pitch_buffer_24k_;
+ }
+ rtc::ArrayView<const float, kRefineNumLags24kHz> SquareEnergies24kHzView()
+ const {
+ return square_energies_24k_;
+ }
+ rtc::ArrayView<const float, kNumLags12kHz> AutoCorrelation12kHzView() const {
+ return auto_correlation_12k_;
+ }
+
+ private:
+ std::array<float, kBufSize24kHz> pitch_buffer_24k_;
+ std::array<float, kRefineNumLags24kHz> square_energies_24k_;
+ std::array<float, kNumLags12kHz> auto_correlation_12k_;
+};
+
+// Writer for binary files.
+class FileWriter {
+ public:
+ explicit FileWriter(absl::string_view file_path)
+ : os_(std::string(file_path), std::ios::binary) {}
+ FileWriter(const FileWriter&) = delete;
+ FileWriter& operator=(const FileWriter&) = delete;
+ ~FileWriter() = default;
+ void WriteChunk(rtc::ArrayView<const float> value) {
+ const std::streamsize bytes_to_write = value.size() * sizeof(float);
+ os_.write(reinterpret_cast<const char*>(value.data()), bytes_to_write);
+ }
+
+ private:
+ std::ofstream os_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_TEST_UTILS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math.h b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math.h
new file mode 100644
index 0000000000..47f681196a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_VECTOR_MATH_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_VECTOR_MATH_H_
+
+// Defines WEBRTC_ARCH_X86_FAMILY, used below.
+#include "rtc_base/system/arch.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+
+#include <numeric>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+// Provides optimizations for mathematical operations having vectors as
+// operand(s).
+class VectorMath {
+ public:
+ explicit VectorMath(AvailableCpuFeatures cpu_features)
+ : cpu_features_(cpu_features) {}
+
+ // Computes the dot product between two equally sized vectors.
+ float DotProduct(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y) const {
+ RTC_DCHECK_EQ(x.size(), y.size());
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ if (cpu_features_.avx2) {
+ return DotProductAvx2(x, y);
+ } else if (cpu_features_.sse2) {
+ __m128 accumulator = _mm_setzero_ps();
+ constexpr int kBlockSizeLog2 = 2;
+ constexpr int kBlockSize = 1 << kBlockSizeLog2;
+ const int incomplete_block_index = (x.size() >> kBlockSizeLog2)
+ << kBlockSizeLog2;
+ for (int i = 0; i < incomplete_block_index; i += kBlockSize) {
+ RTC_DCHECK_LE(i + kBlockSize, x.size());
+ const __m128 x_i = _mm_loadu_ps(&x[i]);
+ const __m128 y_i = _mm_loadu_ps(&y[i]);
+ // Multiply-add.
+ const __m128 z_j = _mm_mul_ps(x_i, y_i);
+ accumulator = _mm_add_ps(accumulator, z_j);
+ }
+ // Reduce `accumulator` by addition.
+ __m128 high = _mm_movehl_ps(accumulator, accumulator);
+ accumulator = _mm_add_ps(accumulator, high);
+ high = _mm_shuffle_ps(accumulator, accumulator, 1);
+ accumulator = _mm_add_ps(accumulator, high);
+ float dot_product = _mm_cvtss_f32(accumulator);
+ // Add the result for the last block if incomplete.
+ for (int i = incomplete_block_index;
+ i < rtc::dchecked_cast<int>(x.size()); ++i) {
+ dot_product += x[i] * y[i];
+ }
+ return dot_product;
+ }
+#elif defined(WEBRTC_HAS_NEON) && defined(WEBRTC_ARCH_ARM64)
+ if (cpu_features_.neon) {
+ float32x4_t accumulator = vdupq_n_f32(0.f);
+ constexpr int kBlockSizeLog2 = 2;
+ constexpr int kBlockSize = 1 << kBlockSizeLog2;
+ const int incomplete_block_index = (x.size() >> kBlockSizeLog2)
+ << kBlockSizeLog2;
+ for (int i = 0; i < incomplete_block_index; i += kBlockSize) {
+ RTC_DCHECK_LE(i + kBlockSize, x.size());
+ const float32x4_t x_i = vld1q_f32(&x[i]);
+ const float32x4_t y_i = vld1q_f32(&y[i]);
+ accumulator = vfmaq_f32(accumulator, x_i, y_i);
+ }
+ // Reduce `accumulator` by addition.
+ const float32x2_t tmp =
+ vpadd_f32(vget_low_f32(accumulator), vget_high_f32(accumulator));
+ float dot_product = vget_lane_f32(vpadd_f32(tmp, vrev64_f32(tmp)), 0);
+ // Add the result for the last block if incomplete.
+ for (int i = incomplete_block_index;
+ i < rtc::dchecked_cast<int>(x.size()); ++i) {
+ dot_product += x[i] * y[i];
+ }
+ return dot_product;
+ }
+#endif
+ return std::inner_product(x.begin(), x.end(), y.begin(), 0.f);
+ }
+
+ private:
+ float DotProductAvx2(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y) const;
+
+ const AvailableCpuFeatures cpu_features_;
+};
+
+} // namespace rnn_vad
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_RNN_VAD_VECTOR_MATH_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2.cc
new file mode 100644
index 0000000000..e4d246d9ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/vector_math.h"
+
+#include <immintrin.h>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace rnn_vad {
+
+float VectorMath::DotProductAvx2(rtc::ArrayView<const float> x,
+ rtc::ArrayView<const float> y) const {
+ RTC_DCHECK(cpu_features_.avx2);
+ RTC_DCHECK_EQ(x.size(), y.size());
+ __m256 accumulator = _mm256_setzero_ps();
+ constexpr int kBlockSizeLog2 = 3;
+ constexpr int kBlockSize = 1 << kBlockSizeLog2;
+ const int incomplete_block_index = (x.size() >> kBlockSizeLog2)
+ << kBlockSizeLog2;
+ for (int i = 0; i < incomplete_block_index; i += kBlockSize) {
+ RTC_DCHECK_LE(i + kBlockSize, x.size());
+ const __m256 x_i = _mm256_loadu_ps(&x[i]);
+ const __m256 y_i = _mm256_loadu_ps(&y[i]);
+ accumulator = _mm256_fmadd_ps(x_i, y_i, accumulator);
+ }
+ // Reduce `accumulator` by addition.
+ __m128 high = _mm256_extractf128_ps(accumulator, 1);
+ __m128 low = _mm256_extractf128_ps(accumulator, 0);
+ low = _mm_add_ps(high, low);
+ high = _mm_movehl_ps(high, low);
+ low = _mm_add_ps(high, low);
+ high = _mm_shuffle_ps(low, low, 1);
+ low = _mm_add_ss(high, low);
+ float dot_product = _mm_cvtss_f32(low);
+ // Add the result for the last block if incomplete.
+ for (int i = incomplete_block_index; i < rtc::dchecked_cast<int>(x.size());
+ ++i) {
+ dot_product += x[i] * y[i];
+ }
+ return dot_product;
+}
+
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2_gn/moz.build
new file mode 100644
index 0000000000..a640c1993a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2_gn/moz.build
@@ -0,0 +1,173 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+CXXFLAGS += [
+ "-mavx2",
+ "-mfma"
+]
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+Library("vector_math_avx2_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_gn/moz.build
new file mode 100644
index 0000000000..f3e853fd55
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_gn/moz.build
@@ -0,0 +1,204 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("vector_math_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_unittest.cc
new file mode 100644
index 0000000000..45fd65d61e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_unittest.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/rnn_vad/vector_math.h"
+
+#include <vector>
+
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rnn_vad {
+namespace {
+
+constexpr int kSizeOfX = 19;
+constexpr float kX[kSizeOfX] = {
+ 0.31593041f, 0.9350786f, -0.25252445f, -0.86956251f, -0.9673632f,
+ 0.54571901f, -0.72504495f, -0.79509912f, -0.25525012f, -0.73340473f,
+ 0.15747377f, -0.04370565f, 0.76135145f, -0.57239645f, 0.68616848f,
+ 0.3740298f, 0.34710799f, -0.92207423f, 0.10738454f};
+constexpr int kSizeOfXSubSpan = 16;
+static_assert(kSizeOfXSubSpan < kSizeOfX, "");
+constexpr float kEnergyOfX = 7.315563958160327f;
+constexpr float kEnergyOfXSubspan = 6.333327669592963f;
+
+class VectorMathParametrization
+ : public ::testing::TestWithParam<AvailableCpuFeatures> {};
+
+TEST_P(VectorMathParametrization, TestDotProduct) {
+ VectorMath vector_math(/*cpu_features=*/GetParam());
+ EXPECT_FLOAT_EQ(vector_math.DotProduct(kX, kX), kEnergyOfX);
+ EXPECT_FLOAT_EQ(
+ vector_math.DotProduct({kX, kSizeOfXSubSpan}, {kX, kSizeOfXSubSpan}),
+ kEnergyOfXSubspan);
+}
+
+// Finds the relevant CPU features combinations to test.
+std::vector<AvailableCpuFeatures> GetCpuFeaturesToTest() {
+ std::vector<AvailableCpuFeatures> v;
+ v.push_back({/*sse2=*/false, /*avx2=*/false, /*neon=*/false});
+ AvailableCpuFeatures available = GetAvailableCpuFeatures();
+ if (available.avx2) {
+ v.push_back({/*sse2=*/false, /*avx2=*/true, /*neon=*/false});
+ }
+ if (available.sse2) {
+ v.push_back({/*sse2=*/true, /*avx2=*/false, /*neon=*/false});
+ }
+ if (available.neon) {
+ v.push_back({/*sse2=*/false, /*avx2=*/false, /*neon=*/true});
+ }
+ return v;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RnnVadTest,
+ VectorMathParametrization,
+ ::testing::ValuesIn(GetCpuFeaturesToTest()),
+ [](const ::testing::TestParamInfo<AvailableCpuFeatures>& info) {
+ return info.param.ToString();
+ });
+
+} // namespace
+} // namespace rnn_vad
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.cc b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.cc
new file mode 100644
index 0000000000..961baf4cd3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/saturation_protector.h"
+
+#include <memory>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/saturation_protector_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kPeakEnveloperSuperFrameLengthMs = 400;
+constexpr float kMinMarginDb = 12.0f;
+constexpr float kMaxMarginDb = 25.0f;
+constexpr float kAttack = 0.9988493699365052f;
+constexpr float kDecay = 0.9997697679981565f;
+
+// Saturation protector state. Defined outside of `SaturationProtectorImpl` to
+// implement check-point and restore ops.
+struct SaturationProtectorState {
+ bool operator==(const SaturationProtectorState& s) const {
+ return headroom_db == s.headroom_db &&
+ peak_delay_buffer == s.peak_delay_buffer &&
+ max_peaks_dbfs == s.max_peaks_dbfs &&
+ time_since_push_ms == s.time_since_push_ms;
+ }
+ inline bool operator!=(const SaturationProtectorState& s) const {
+ return !(*this == s);
+ }
+
+ float headroom_db;
+ SaturationProtectorBuffer peak_delay_buffer;
+ float max_peaks_dbfs;
+ int time_since_push_ms; // Time since the last ring buffer push operation.
+};
+
+// Resets the saturation protector state.
+void ResetSaturationProtectorState(float initial_headroom_db,
+ SaturationProtectorState& state) {
+ state.headroom_db = initial_headroom_db;
+ state.peak_delay_buffer.Reset();
+ state.max_peaks_dbfs = kMinLevelDbfs;
+ state.time_since_push_ms = 0;
+}
+
+// Updates `state` by analyzing the estimated speech level `speech_level_dbfs`
+// and the peak level `peak_dbfs` for an observed frame. `state` must not be
+// modified without calling this function.
+void UpdateSaturationProtectorState(float peak_dbfs,
+ float speech_level_dbfs,
+ SaturationProtectorState& state) {
+ // Get the max peak over `kPeakEnveloperSuperFrameLengthMs` ms.
+ state.max_peaks_dbfs = std::max(state.max_peaks_dbfs, peak_dbfs);
+ state.time_since_push_ms += kFrameDurationMs;
+ if (rtc::SafeGt(state.time_since_push_ms, kPeakEnveloperSuperFrameLengthMs)) {
+ // Push `max_peaks_dbfs` back into the ring buffer.
+ state.peak_delay_buffer.PushBack(state.max_peaks_dbfs);
+ // Reset.
+ state.max_peaks_dbfs = kMinLevelDbfs;
+ state.time_since_push_ms = 0;
+ }
+
+ // Update the headroom by comparing the estimated speech level and the delayed
+ // max speech peak.
+ const float delayed_peak_dbfs =
+ state.peak_delay_buffer.Front().value_or(state.max_peaks_dbfs);
+ const float difference_db = delayed_peak_dbfs - speech_level_dbfs;
+ if (difference_db > state.headroom_db) {
+ // Attack.
+ state.headroom_db =
+ state.headroom_db * kAttack + difference_db * (1.0f - kAttack);
+ } else {
+ // Decay.
+ state.headroom_db =
+ state.headroom_db * kDecay + difference_db * (1.0f - kDecay);
+ }
+
+ state.headroom_db =
+ rtc::SafeClamp<float>(state.headroom_db, kMinMarginDb, kMaxMarginDb);
+}
+
+// Saturation protector which recommends a headroom based on the recent peaks.
+class SaturationProtectorImpl : public SaturationProtector {
+ public:
+ explicit SaturationProtectorImpl(float initial_headroom_db,
+ int adjacent_speech_frames_threshold,
+ ApmDataDumper* apm_data_dumper)
+ : apm_data_dumper_(apm_data_dumper),
+ initial_headroom_db_(initial_headroom_db),
+ adjacent_speech_frames_threshold_(adjacent_speech_frames_threshold) {
+ Reset();
+ }
+ SaturationProtectorImpl(const SaturationProtectorImpl&) = delete;
+ SaturationProtectorImpl& operator=(const SaturationProtectorImpl&) = delete;
+ ~SaturationProtectorImpl() = default;
+
+ float HeadroomDb() override { return headroom_db_; }
+
+ void Analyze(float speech_probability,
+ float peak_dbfs,
+ float speech_level_dbfs) override {
+ if (speech_probability < kVadConfidenceThreshold) {
+ // Not a speech frame.
+ if (adjacent_speech_frames_threshold_ > 1) {
+ // When two or more adjacent speech frames are required in order to
+ // update the state, we need to decide whether to discard or confirm the
+ // updates based on the speech sequence length.
+ if (num_adjacent_speech_frames_ >= adjacent_speech_frames_threshold_) {
+ // First non-speech frame after a long enough sequence of speech
+ // frames. Update the reliable state.
+ reliable_state_ = preliminary_state_;
+ } else if (num_adjacent_speech_frames_ > 0) {
+ // First non-speech frame after a too short sequence of speech frames.
+ // Reset to the last reliable state.
+ preliminary_state_ = reliable_state_;
+ }
+ }
+ num_adjacent_speech_frames_ = 0;
+ } else {
+ // Speech frame observed.
+ num_adjacent_speech_frames_++;
+
+ // Update preliminary level estimate.
+ UpdateSaturationProtectorState(peak_dbfs, speech_level_dbfs,
+ preliminary_state_);
+
+ if (num_adjacent_speech_frames_ >= adjacent_speech_frames_threshold_) {
+ // `preliminary_state_` is now reliable. Update the headroom.
+ headroom_db_ = preliminary_state_.headroom_db;
+ }
+ }
+ DumpDebugData();
+ }
+
+ void Reset() override {
+ num_adjacent_speech_frames_ = 0;
+ headroom_db_ = initial_headroom_db_;
+ ResetSaturationProtectorState(initial_headroom_db_, preliminary_state_);
+ ResetSaturationProtectorState(initial_headroom_db_, reliable_state_);
+ }
+
+ private:
+ void DumpDebugData() {
+ apm_data_dumper_->DumpRaw(
+ "agc2_saturation_protector_preliminary_max_peak_dbfs",
+ preliminary_state_.max_peaks_dbfs);
+ apm_data_dumper_->DumpRaw(
+ "agc2_saturation_protector_reliable_max_peak_dbfs",
+ reliable_state_.max_peaks_dbfs);
+ }
+
+ ApmDataDumper* const apm_data_dumper_;
+ const float initial_headroom_db_;
+ const int adjacent_speech_frames_threshold_;
+ int num_adjacent_speech_frames_;
+ float headroom_db_;
+ SaturationProtectorState preliminary_state_;
+ SaturationProtectorState reliable_state_;
+};
+
+} // namespace
+
+std::unique_ptr<SaturationProtector> CreateSaturationProtector(
+ float initial_headroom_db,
+ int adjacent_speech_frames_threshold,
+ ApmDataDumper* apm_data_dumper) {
+ return std::make_unique<SaturationProtectorImpl>(
+ initial_headroom_db, adjacent_speech_frames_threshold, apm_data_dumper);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.h b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.h
new file mode 100644
index 0000000000..ef22145d5f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_SATURATION_PROTECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_SATURATION_PROTECTOR_H_
+
+#include <memory>
+
+namespace webrtc {
+class ApmDataDumper;
+
+// Saturation protector. Analyzes peak levels and recommends a headroom to
+// reduce the chances of clipping.
+class SaturationProtector {
+ public:
+ virtual ~SaturationProtector() = default;
+
+ // Returns the recommended headroom in dB.
+ virtual float HeadroomDb() = 0;
+
+ // Analyzes the peak level of a 10 ms frame along with its speech probability
+ // and the current speech level estimate to update the recommended headroom.
+ virtual void Analyze(float speech_probability,
+ float peak_dbfs,
+ float speech_level_dbfs) = 0;
+
+ // Resets the internal state.
+ virtual void Reset() = 0;
+};
+
+// Creates a saturation protector that starts at `initial_headroom_db`.
+std::unique_ptr<SaturationProtector> CreateSaturationProtector(
+ float initial_headroom_db,
+ int adjacent_speech_frames_threshold,
+ ApmDataDumper* apm_data_dumper);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_SATURATION_PROTECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.cc b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.cc
new file mode 100644
index 0000000000..41efdad2c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/saturation_protector_buffer.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+
+SaturationProtectorBuffer::SaturationProtectorBuffer() = default;
+
+SaturationProtectorBuffer::~SaturationProtectorBuffer() = default;
+
+bool SaturationProtectorBuffer::operator==(
+ const SaturationProtectorBuffer& b) const {
+ RTC_DCHECK_LE(size_, buffer_.size());
+ RTC_DCHECK_LE(b.size_, b.buffer_.size());
+ if (size_ != b.size_) {
+ return false;
+ }
+ for (int i = 0, i0 = FrontIndex(), i1 = b.FrontIndex(); i < size_;
+ ++i, ++i0, ++i1) {
+ if (buffer_[i0 % buffer_.size()] != b.buffer_[i1 % b.buffer_.size()]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int SaturationProtectorBuffer::Capacity() const {
+ return buffer_.size();
+}
+
+int SaturationProtectorBuffer::Size() const {
+ return size_;
+}
+
+void SaturationProtectorBuffer::Reset() {
+ next_ = 0;
+ size_ = 0;
+}
+
+void SaturationProtectorBuffer::PushBack(float v) {
+ RTC_DCHECK_GE(next_, 0);
+ RTC_DCHECK_GE(size_, 0);
+ RTC_DCHECK_LT(next_, buffer_.size());
+ RTC_DCHECK_LE(size_, buffer_.size());
+ buffer_[next_++] = v;
+ if (rtc::SafeEq(next_, buffer_.size())) {
+ next_ = 0;
+ }
+ if (rtc::SafeLt(size_, buffer_.size())) {
+ size_++;
+ }
+}
+
+absl::optional<float> SaturationProtectorBuffer::Front() const {
+ if (size_ == 0) {
+ return absl::nullopt;
+ }
+ RTC_DCHECK_LT(FrontIndex(), buffer_.size());
+ return buffer_[FrontIndex()];
+}
+
+int SaturationProtectorBuffer::FrontIndex() const {
+ return rtc::SafeEq(size_, buffer_.size()) ? next_ : 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.h b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.h
new file mode 100644
index 0000000000..e17d0998c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_SATURATION_PROTECTOR_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_SATURATION_PROTECTOR_BUFFER_H_
+
+#include <array>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+
+namespace webrtc {
+
+// Ring buffer for the saturation protector which only supports (i) push back
+// and (ii) read oldest item.
+class SaturationProtectorBuffer {
+ public:
+ SaturationProtectorBuffer();
+ ~SaturationProtectorBuffer();
+
+ bool operator==(const SaturationProtectorBuffer& b) const;
+ inline bool operator!=(const SaturationProtectorBuffer& b) const {
+ return !(*this == b);
+ }
+
+ // Maximum number of values that the buffer can contain.
+ int Capacity() const;
+
+ // Number of values in the buffer.
+ int Size() const;
+
+ void Reset();
+
+ // Pushes back `v`. If the buffer is full, the oldest value is replaced.
+ void PushBack(float v);
+
+ // Returns the oldest item in the buffer. Returns an empty value if the
+ // buffer is empty.
+ absl::optional<float> Front() const;
+
+ private:
+ int FrontIndex() const;
+ // `buffer_` has `size_` elements (up to the size of `buffer_`) and `next_` is
+ // the position where the next new value is written in `buffer_`.
+ std::array<float, kSaturationProtectorBufferSize> buffer_;
+ int next_ = 0;
+ int size_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_SATURATION_PROTECTOR_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer_unittest.cc
new file mode 100644
index 0000000000..22187bf027
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_buffer_unittest.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/saturation_protector_buffer.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Eq;
+using ::testing::Optional;
+
+TEST(GainController2SaturationProtectorBuffer, Init) {
+ SaturationProtectorBuffer b;
+ EXPECT_EQ(b.Size(), 0);
+ EXPECT_FALSE(b.Front().has_value());
+}
+
+TEST(GainController2SaturationProtectorBuffer, PushBack) {
+ SaturationProtectorBuffer b;
+ constexpr float kValue = 123.0f;
+ b.PushBack(kValue);
+ EXPECT_EQ(b.Size(), 1);
+ EXPECT_THAT(b.Front(), Optional(Eq(kValue)));
+}
+
+TEST(GainController2SaturationProtectorBuffer, Reset) {
+ SaturationProtectorBuffer b;
+ b.PushBack(123.0f);
+ b.Reset();
+ EXPECT_EQ(b.Size(), 0);
+ EXPECT_FALSE(b.Front().has_value());
+}
+
+// Checks that the front value does not change until the ring buffer gets full.
+TEST(GainController2SaturationProtectorBuffer, FrontUntilBufferIsFull) {
+ SaturationProtectorBuffer b;
+ constexpr float kValue = 123.0f;
+ b.PushBack(kValue);
+ for (int i = 1; i < b.Capacity(); ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_THAT(b.Front(), Optional(Eq(kValue)));
+ b.PushBack(kValue + i);
+ }
+}
+
+// Checks that when the buffer is full it behaves as a shift register.
+TEST(GainController2SaturationProtectorBuffer, FrontIsDelayed) {
+ SaturationProtectorBuffer b;
+ // Fill the buffer.
+ for (int i = 0; i < b.Capacity(); ++i) {
+ b.PushBack(i);
+ }
+ // The ring buffer should now behave as a shift register with a delay equal to
+ // its capacity.
+ for (int i = b.Capacity(); i < 2 * b.Capacity() + 1; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_THAT(b.Front(), Optional(Eq(i - b.Capacity())));
+ b.PushBack(i);
+ }
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_unittest.cc
new file mode 100644
index 0000000000..3b104be8cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/saturation_protector_unittest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/saturation_protector.h"
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kInitialHeadroomDb = 20.0f;
+constexpr int kNoAdjacentSpeechFramesRequired = 1;
+constexpr float kMaxSpeechProbability = 1.0f;
+
+// Calls `Analyze(speech_probability, peak_dbfs, speech_level_dbfs)`
+// `num_iterations` times on `saturation_protector` and return the largest
+// headroom difference between two consecutive calls.
+float RunOnConstantLevel(int num_iterations,
+ float speech_probability,
+ float peak_dbfs,
+ float speech_level_dbfs,
+ SaturationProtector& saturation_protector) {
+ float last_headroom = saturation_protector.HeadroomDb();
+ float max_difference = 0.0f;
+ for (int i = 0; i < num_iterations; ++i) {
+ saturation_protector.Analyze(speech_probability, peak_dbfs,
+ speech_level_dbfs);
+ const float new_headroom = saturation_protector.HeadroomDb();
+ max_difference =
+ std::max(max_difference, std::fabs(new_headroom - last_headroom));
+ last_headroom = new_headroom;
+ }
+ return max_difference;
+}
+
+// Checks that the returned headroom value is correctly reset.
+TEST(GainController2SaturationProtector, Reset) {
+ ApmDataDumper apm_data_dumper(0);
+ auto saturation_protector = CreateSaturationProtector(
+ kInitialHeadroomDb, kNoAdjacentSpeechFramesRequired, &apm_data_dumper);
+ const float initial_headroom_db = saturation_protector->HeadroomDb();
+ RunOnConstantLevel(/*num_iterations=*/10, kMaxSpeechProbability,
+ /*peak_dbfs=*/0.0f,
+ /*speech_level_dbfs=*/-10.0f, *saturation_protector);
+ // Make sure that there are side-effects.
+ ASSERT_NE(initial_headroom_db, saturation_protector->HeadroomDb());
+ saturation_protector->Reset();
+ EXPECT_EQ(initial_headroom_db, saturation_protector->HeadroomDb());
+}
+
+// Checks that the estimate converges to the ratio between peaks and level
+// estimator values after a while.
+TEST(GainController2SaturationProtector, EstimatesCrestRatio) {
+ constexpr int kNumIterations = 2000;
+ constexpr float kPeakLevelDbfs = -20.0f;
+ constexpr float kCrestFactorDb = kInitialHeadroomDb + 1.0f;
+ constexpr float kSpeechLevelDbfs = kPeakLevelDbfs - kCrestFactorDb;
+ const float kMaxDifferenceDb =
+ 0.5f * std::fabs(kInitialHeadroomDb - kCrestFactorDb);
+
+ ApmDataDumper apm_data_dumper(0);
+ auto saturation_protector = CreateSaturationProtector(
+ kInitialHeadroomDb, kNoAdjacentSpeechFramesRequired, &apm_data_dumper);
+ RunOnConstantLevel(kNumIterations, kMaxSpeechProbability, kPeakLevelDbfs,
+ kSpeechLevelDbfs, *saturation_protector);
+ EXPECT_NEAR(saturation_protector->HeadroomDb(), kCrestFactorDb,
+ kMaxDifferenceDb);
+}
+
+// Checks that the headroom does not change too quickly.
+TEST(GainController2SaturationProtector, ChangeSlowly) {
+ constexpr int kNumIterations = 1000;
+ constexpr float kPeakLevelDbfs = -20.f;
+ constexpr float kCrestFactorDb = kInitialHeadroomDb - 5.f;
+ constexpr float kOtherCrestFactorDb = kInitialHeadroomDb;
+ constexpr float kSpeechLevelDbfs = kPeakLevelDbfs - kCrestFactorDb;
+ constexpr float kOtherSpeechLevelDbfs = kPeakLevelDbfs - kOtherCrestFactorDb;
+
+ ApmDataDumper apm_data_dumper(0);
+ auto saturation_protector = CreateSaturationProtector(
+ kInitialHeadroomDb, kNoAdjacentSpeechFramesRequired, &apm_data_dumper);
+ float max_difference_db =
+ RunOnConstantLevel(kNumIterations, kMaxSpeechProbability, kPeakLevelDbfs,
+ kSpeechLevelDbfs, *saturation_protector);
+ max_difference_db = std::max(
+ RunOnConstantLevel(kNumIterations, kMaxSpeechProbability, kPeakLevelDbfs,
+ kOtherSpeechLevelDbfs, *saturation_protector),
+ max_difference_db);
+ constexpr float kMaxChangeSpeedDbPerSecond = 0.5f; // 1 db / 2 seconds.
+ EXPECT_LE(max_difference_db,
+ kMaxChangeSpeedDbPerSecond / 1000 * kFrameDurationMs);
+}
+
+class SaturationProtectorParametrization
+ : public ::testing::TestWithParam<int> {
+ protected:
+ int adjacent_speech_frames_threshold() const { return GetParam(); }
+};
+
+TEST_P(SaturationProtectorParametrization, DoNotAdaptToShortSpeechSegments) {
+ ApmDataDumper apm_data_dumper(0);
+ auto saturation_protector = CreateSaturationProtector(
+ kInitialHeadroomDb, adjacent_speech_frames_threshold(), &apm_data_dumper);
+ const float initial_headroom_db = saturation_protector->HeadroomDb();
+ RunOnConstantLevel(/*num_iterations=*/adjacent_speech_frames_threshold() - 1,
+ kMaxSpeechProbability,
+ /*peak_dbfs=*/0.0f,
+ /*speech_level_dbfs=*/-10.0f, *saturation_protector);
+ // No adaptation expected.
+ EXPECT_EQ(initial_headroom_db, saturation_protector->HeadroomDb());
+}
+
+TEST_P(SaturationProtectorParametrization, AdaptToEnoughSpeechSegments) {
+ ApmDataDumper apm_data_dumper(0);
+ auto saturation_protector = CreateSaturationProtector(
+ kInitialHeadroomDb, adjacent_speech_frames_threshold(), &apm_data_dumper);
+ const float initial_headroom_db = saturation_protector->HeadroomDb();
+ RunOnConstantLevel(/*num_iterations=*/adjacent_speech_frames_threshold() + 1,
+ kMaxSpeechProbability,
+ /*peak_dbfs=*/0.0f,
+ /*speech_level_dbfs=*/-10.0f, *saturation_protector);
+ // Adaptation expected.
+ EXPECT_NE(initial_headroom_db, saturation_protector->HeadroomDb());
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController2,
+ SaturationProtectorParametrization,
+ ::testing::Values(2, 9, 17));
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.cc b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.cc
new file mode 100644
index 0000000000..91448f8d86
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/vad_wrapper.h"
+
+#include <array>
+#include <utility>
+
+#include "api/array_view.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/rnn_vad/common.h"
+#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h"
+#include "modules/audio_processing/agc2/rnn_vad/rnn.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kNumFramesPerSecond = 100;
+
+class MonoVadImpl : public VoiceActivityDetectorWrapper::MonoVad {
+ public:
+ explicit MonoVadImpl(const AvailableCpuFeatures& cpu_features)
+ : features_extractor_(cpu_features), rnn_vad_(cpu_features) {}
+ MonoVadImpl(const MonoVadImpl&) = delete;
+ MonoVadImpl& operator=(const MonoVadImpl&) = delete;
+ ~MonoVadImpl() = default;
+
+ int SampleRateHz() const override { return rnn_vad::kSampleRate24kHz; }
+ void Reset() override { rnn_vad_.Reset(); }
+ float Analyze(rtc::ArrayView<const float> frame) override {
+ RTC_DCHECK_EQ(frame.size(), rnn_vad::kFrameSize10ms24kHz);
+ std::array<float, rnn_vad::kFeatureVectorSize> feature_vector;
+ const bool is_silence = features_extractor_.CheckSilenceComputeFeatures(
+ /*samples=*/{frame.data(), rnn_vad::kFrameSize10ms24kHz},
+ feature_vector);
+ return rnn_vad_.ComputeVadProbability(feature_vector, is_silence);
+ }
+
+ private:
+ rnn_vad::FeaturesExtractor features_extractor_;
+ rnn_vad::RnnVad rnn_vad_;
+};
+
+} // namespace
+
+VoiceActivityDetectorWrapper::VoiceActivityDetectorWrapper(
+ int vad_reset_period_ms,
+ const AvailableCpuFeatures& cpu_features,
+ int sample_rate_hz)
+ : VoiceActivityDetectorWrapper(vad_reset_period_ms,
+ std::make_unique<MonoVadImpl>(cpu_features),
+ sample_rate_hz) {}
+
+VoiceActivityDetectorWrapper::VoiceActivityDetectorWrapper(
+ int vad_reset_period_ms,
+ std::unique_ptr<MonoVad> vad,
+ int sample_rate_hz)
+ : vad_reset_period_frames_(
+ rtc::CheckedDivExact(vad_reset_period_ms, kFrameDurationMs)),
+ time_to_vad_reset_(vad_reset_period_frames_),
+ vad_(std::move(vad)) {
+ RTC_DCHECK(vad_);
+ RTC_DCHECK_GT(vad_reset_period_frames_, 1);
+ resampled_buffer_.resize(
+ rtc::CheckedDivExact(vad_->SampleRateHz(), kNumFramesPerSecond));
+ Initialize(sample_rate_hz);
+}
+
+VoiceActivityDetectorWrapper::~VoiceActivityDetectorWrapper() = default;
+
+void VoiceActivityDetectorWrapper::Initialize(int sample_rate_hz) {
+ RTC_DCHECK_GT(sample_rate_hz, 0);
+ frame_size_ = rtc::CheckedDivExact(sample_rate_hz, kNumFramesPerSecond);
+ int status =
+ resampler_.InitializeIfNeeded(sample_rate_hz, vad_->SampleRateHz(),
+ /*num_channels=*/1);
+ constexpr int kStatusOk = 0;
+ RTC_DCHECK_EQ(status, kStatusOk);
+ vad_->Reset();
+}
+
+float VoiceActivityDetectorWrapper::Analyze(AudioFrameView<const float> frame) {
+ // Periodically reset the VAD.
+ time_to_vad_reset_--;
+ if (time_to_vad_reset_ <= 0) {
+ vad_->Reset();
+ time_to_vad_reset_ = vad_reset_period_frames_;
+ }
+ // Resample the first channel of `frame`.
+ RTC_DCHECK_EQ(frame.samples_per_channel(), frame_size_);
+ resampler_.Resample(frame.channel(0).data(), frame_size_,
+ resampled_buffer_.data(), resampled_buffer_.size());
+
+ return vad_->Analyze(resampled_buffer_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.h b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.h
new file mode 100644
index 0000000000..6df0ead271
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_VAD_WRAPPER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_VAD_WRAPPER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+
+// Wraps a single-channel Voice Activity Detector (VAD) which is used to analyze
+// the first channel of the input audio frames. Takes care of resampling the
+// input frames to match the sample rate of the wrapped VAD and periodically
+// resets the VAD.
+class VoiceActivityDetectorWrapper {
+ public:
+ // Single channel VAD interface.
+ class MonoVad {
+ public:
+ virtual ~MonoVad() = default;
+ // Returns the sample rate (Hz) required for the input frames analyzed by
+ // `ComputeProbability`.
+ virtual int SampleRateHz() const = 0;
+ // Resets the internal state.
+ virtual void Reset() = 0;
+ // Analyzes an audio frame and returns the speech probability.
+ virtual float Analyze(rtc::ArrayView<const float> frame) = 0;
+ };
+
+ // Ctor. `vad_reset_period_ms` indicates the period in milliseconds to call
+ // `MonoVad::Reset()`; it must be equal to or greater than the duration of two
+ // frames. Uses `cpu_features` to instantiate the default VAD.
+ VoiceActivityDetectorWrapper(int vad_reset_period_ms,
+ const AvailableCpuFeatures& cpu_features,
+ int sample_rate_hz);
+ // Ctor. Uses a custom `vad`.
+ VoiceActivityDetectorWrapper(int vad_reset_period_ms,
+ std::unique_ptr<MonoVad> vad,
+ int sample_rate_hz);
+
+ VoiceActivityDetectorWrapper(const VoiceActivityDetectorWrapper&) = delete;
+ VoiceActivityDetectorWrapper& operator=(const VoiceActivityDetectorWrapper&) =
+ delete;
+ ~VoiceActivityDetectorWrapper();
+
+ // Initializes the VAD wrapper.
+ void Initialize(int sample_rate_hz);
+
+ // Analyzes the first channel of `frame` and returns the speech probability.
+ // `frame` must be a 10 ms frame with the sample rate specified in the last
+ // `Initialize()` call.
+ float Analyze(AudioFrameView<const float> frame);
+
+ private:
+ const int vad_reset_period_frames_;
+ int frame_size_;
+ int time_to_vad_reset_;
+ PushResampler<float> resampler_;
+ std::unique_ptr<MonoVad> vad_;
+ std::vector<float> resampled_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_VAD_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_gn/moz.build
new file mode 100644
index 0000000000..30e1f28164
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("vad_wrapper_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_unittest.cc b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_unittest.cc
new file mode 100644
index 0000000000..91efdb566e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/vad_wrapper_unittest.cc
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/vad_wrapper.h"
+
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::AnyNumber;
+using ::testing::Return;
+using ::testing::ReturnRoundRobin;
+using ::testing::Truly;
+
+constexpr int kNumFramesPerSecond = 100;
+
+constexpr int kNoVadPeriodicReset =
+ kFrameDurationMs * (std::numeric_limits<int>::max() / kFrameDurationMs);
+
+constexpr int kSampleRate8kHz = 8000;
+
+class MockVad : public VoiceActivityDetectorWrapper::MonoVad {
+ public:
+ MOCK_METHOD(int, SampleRateHz, (), (const, override));
+ MOCK_METHOD(void, Reset, (), (override));
+ MOCK_METHOD(float, Analyze, (rtc::ArrayView<const float> frame), (override));
+};
+
+// Checks that the ctor and `Initialize()` read the sample rate of the wrapped
+// VAD.
+TEST(GainController2VoiceActivityDetectorWrapper, CtorAndInitReadSampleRate) {
+ auto vad = std::make_unique<MockVad>();
+ EXPECT_CALL(*vad, SampleRateHz)
+ .Times(2)
+ .WillRepeatedly(Return(kSampleRate8kHz));
+ EXPECT_CALL(*vad, Reset).Times(AnyNumber());
+ auto vad_wrapper = std::make_unique<VoiceActivityDetectorWrapper>(
+ kNoVadPeriodicReset, std::move(vad), kSampleRate8kHz);
+}
+
+// Creates a `VoiceActivityDetectorWrapper` injecting a mock VAD that
+// repeatedly returns the next value from `speech_probabilities` and that
+// restarts from the beginning when after the last element is returned.
+std::unique_ptr<VoiceActivityDetectorWrapper> CreateMockVadWrapper(
+ int vad_reset_period_ms,
+ int sample_rate_hz,
+ const std::vector<float>& speech_probabilities,
+ int expected_vad_reset_calls) {
+ auto vad = std::make_unique<MockVad>();
+ EXPECT_CALL(*vad, SampleRateHz)
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(sample_rate_hz));
+ if (expected_vad_reset_calls >= 0) {
+ EXPECT_CALL(*vad, Reset).Times(expected_vad_reset_calls);
+ }
+ EXPECT_CALL(*vad, Analyze)
+ .Times(AnyNumber())
+ .WillRepeatedly(ReturnRoundRobin(speech_probabilities));
+ return std::make_unique<VoiceActivityDetectorWrapper>(
+ vad_reset_period_ms, std::move(vad), kSampleRate8kHz);
+}
+
+// 10 ms mono frame.
+struct FrameWithView {
+ // Ctor. Initializes the frame samples with `value`.
+ explicit FrameWithView(int sample_rate_hz)
+ : samples(rtc::CheckedDivExact(sample_rate_hz, kNumFramesPerSecond),
+ 0.0f),
+ channel0(samples.data()),
+ view(&channel0, /*num_channels=*/1, samples.size()) {}
+ std::vector<float> samples;
+ const float* const channel0;
+ const AudioFrameView<const float> view;
+};
+
+// Checks that the expected speech probabilities are returned.
+TEST(GainController2VoiceActivityDetectorWrapper, CheckSpeechProbabilities) {
+ const std::vector<float> speech_probabilities{0.709f, 0.484f, 0.882f, 0.167f,
+ 0.44f, 0.525f, 0.858f, 0.314f,
+ 0.653f, 0.965f, 0.413f, 0.0f};
+ auto vad_wrapper = CreateMockVadWrapper(kNoVadPeriodicReset, kSampleRate8kHz,
+ speech_probabilities,
+ /*expected_vad_reset_calls=*/1);
+ FrameWithView frame(kSampleRate8kHz);
+ for (int i = 0; rtc::SafeLt(i, speech_probabilities.size()); ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(speech_probabilities[i], vad_wrapper->Analyze(frame.view));
+ }
+}
+
+// Checks that the VAD is not periodically reset.
+TEST(GainController2VoiceActivityDetectorWrapper, VadNoPeriodicReset) {
+ constexpr int kNumFrames = 19;
+ auto vad_wrapper = CreateMockVadWrapper(kNoVadPeriodicReset, kSampleRate8kHz,
+ /*speech_probabilities=*/{1.0f},
+ /*expected_vad_reset_calls=*/1);
+ FrameWithView frame(kSampleRate8kHz);
+ for (int i = 0; i < kNumFrames; ++i) {
+ vad_wrapper->Analyze(frame.view);
+ }
+}
+
+class VadPeriodResetParametrization
+ : public ::testing::TestWithParam<std::tuple<int, int>> {
+ protected:
+ int num_frames() const { return std::get<0>(GetParam()); }
+ int vad_reset_period_frames() const { return std::get<1>(GetParam()); }
+};
+
+// Checks that the VAD is periodically reset with the expected period.
+TEST_P(VadPeriodResetParametrization, VadPeriodicReset) {
+ auto vad_wrapper = CreateMockVadWrapper(
+ /*vad_reset_period_ms=*/vad_reset_period_frames() * kFrameDurationMs,
+ kSampleRate8kHz,
+ /*speech_probabilities=*/{1.0f},
+ /*expected_vad_reset_calls=*/1 +
+ num_frames() / vad_reset_period_frames());
+ FrameWithView frame(kSampleRate8kHz);
+ for (int i = 0; i < num_frames(); ++i) {
+ vad_wrapper->Analyze(frame.view);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(GainController2VoiceActivityDetectorWrapper,
+ VadPeriodResetParametrization,
+ ::testing::Combine(::testing::Values(1, 19, 123),
+ ::testing::Values(2, 5, 20, 53)));
+
+class VadResamplingParametrization
+ : public ::testing::TestWithParam<std::tuple<int, int>> {
+ protected:
+ int input_sample_rate_hz() const { return std::get<0>(GetParam()); }
+ int vad_sample_rate_hz() const { return std::get<1>(GetParam()); }
+};
+
+// Checks that regardless of the input audio sample rate, the wrapped VAD
+// analyzes frames having the expected size, that is according to its internal
+// sample rate.
+TEST_P(VadResamplingParametrization, CheckResampledFrameSize) {
+ auto vad = std::make_unique<MockVad>();
+ EXPECT_CALL(*vad, SampleRateHz)
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(vad_sample_rate_hz()));
+ EXPECT_CALL(*vad, Reset).Times(1);
+ EXPECT_CALL(*vad, Analyze(Truly([this](rtc::ArrayView<const float> frame) {
+ return rtc::SafeEq(frame.size(), rtc::CheckedDivExact(vad_sample_rate_hz(),
+ kNumFramesPerSecond));
+ }))).Times(1);
+ auto vad_wrapper = std::make_unique<VoiceActivityDetectorWrapper>(
+ kNoVadPeriodicReset, std::move(vad), input_sample_rate_hz());
+ FrameWithView frame(input_sample_rate_hz());
+ vad_wrapper->Analyze(frame.view);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ GainController2VoiceActivityDetectorWrapper,
+ VadResamplingParametrization,
+ ::testing::Combine(::testing::Values(8000, 16000, 44100, 48000),
+ ::testing::Values(6000, 8000, 12000, 16000, 24000)));
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.cc b/third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.cc
new file mode 100644
index 0000000000..a70d815196
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+
+namespace webrtc {
+
+namespace {
+
+std::vector<float*> ConstructChannelPointers(
+ std::vector<std::vector<float>>* x) {
+ std::vector<float*> channel_ptrs;
+ for (auto& v : *x) {
+ channel_ptrs.push_back(v.data());
+ }
+ return channel_ptrs;
+}
+} // namespace
+
+VectorFloatFrame::VectorFloatFrame(int num_channels,
+ int samples_per_channel,
+ float start_value)
+ : channels_(num_channels,
+ std::vector<float>(samples_per_channel, start_value)),
+ channel_ptrs_(ConstructChannelPointers(&channels_)),
+ float_frame_view_(channel_ptrs_.data(),
+ channels_.size(),
+ samples_per_channel) {}
+
+VectorFloatFrame::~VectorFloatFrame() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.h b/third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.h
new file mode 100644
index 0000000000..b521f346f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/vector_float_frame.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_VECTOR_FLOAT_FRAME_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_VECTOR_FLOAT_FRAME_H_
+
+#include <vector>
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+
+// A construct consisting of a multi-channel audio frame, and a FloatFrame view
+// of it.
+class VectorFloatFrame {
+ public:
+ VectorFloatFrame(int num_channels,
+ int samples_per_channel,
+ float start_value);
+ const AudioFrameView<float>& float_frame_view() { return float_frame_view_; }
+ AudioFrameView<const float> float_frame_view() const {
+ return float_frame_view_;
+ }
+
+ ~VectorFloatFrame();
+
+ private:
+ std::vector<std::vector<float>> channels_;
+ std::vector<float*> channel_ptrs_;
+ AudioFrameView<float> float_frame_view_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC2_VECTOR_FLOAT_FRAME_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/api_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/api_gn/moz.build
new file mode 100644
index 0000000000..eb8d4aaa2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/api_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/include/audio_processing.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("api_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/apm_logging_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/apm_logging_gn/moz.build
new file mode 100644
index 0000000000..555f8dcf74
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/apm_logging_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("apm_logging_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_buffer.cc b/third_party/libwebrtc/modules/audio_processing/audio_buffer.cc
new file mode 100644
index 0000000000..3dbe1fe072
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_buffer.cc
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_buffer.h"
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "common_audio/channel_buffer.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "modules/audio_processing/splitting_filter.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kSamplesPer32kHzChannel = 320;
+constexpr size_t kSamplesPer48kHzChannel = 480;
+constexpr size_t kMaxSamplesPerChannel = AudioBuffer::kMaxSampleRate / 100;
+
+size_t NumBandsFromFramesPerChannel(size_t num_frames) {
+ if (num_frames == kSamplesPer32kHzChannel) {
+ return 2;
+ }
+ if (num_frames == kSamplesPer48kHzChannel) {
+ return 3;
+ }
+ return 1;
+}
+
+} // namespace
+
+AudioBuffer::AudioBuffer(size_t input_rate,
+ size_t input_num_channels,
+ size_t buffer_rate,
+ size_t buffer_num_channels,
+ size_t output_rate,
+ size_t output_num_channels)
+ : input_num_frames_(static_cast<int>(input_rate) / 100),
+ input_num_channels_(input_num_channels),
+ buffer_num_frames_(static_cast<int>(buffer_rate) / 100),
+ buffer_num_channels_(buffer_num_channels),
+ output_num_frames_(static_cast<int>(output_rate) / 100),
+ output_num_channels_(0),
+ num_channels_(buffer_num_channels),
+ num_bands_(NumBandsFromFramesPerChannel(buffer_num_frames_)),
+ num_split_frames_(rtc::CheckedDivExact(buffer_num_frames_, num_bands_)),
+ data_(
+ new ChannelBuffer<float>(buffer_num_frames_, buffer_num_channels_)) {
+ RTC_DCHECK_GT(input_num_frames_, 0);
+ RTC_DCHECK_GT(buffer_num_frames_, 0);
+ RTC_DCHECK_GT(output_num_frames_, 0);
+ RTC_DCHECK_GT(input_num_channels_, 0);
+ RTC_DCHECK_GT(buffer_num_channels_, 0);
+ RTC_DCHECK_LE(buffer_num_channels_, input_num_channels_);
+
+ const bool input_resampling_needed = input_num_frames_ != buffer_num_frames_;
+ const bool output_resampling_needed =
+ output_num_frames_ != buffer_num_frames_;
+ if (input_resampling_needed) {
+ for (size_t i = 0; i < buffer_num_channels_; ++i) {
+ input_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
+ new PushSincResampler(input_num_frames_, buffer_num_frames_)));
+ }
+ }
+
+ if (output_resampling_needed) {
+ for (size_t i = 0; i < buffer_num_channels_; ++i) {
+ output_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
+ new PushSincResampler(buffer_num_frames_, output_num_frames_)));
+ }
+ }
+
+ if (num_bands_ > 1) {
+ split_data_.reset(new ChannelBuffer<float>(
+ buffer_num_frames_, buffer_num_channels_, num_bands_));
+ splitting_filter_.reset(new SplittingFilter(
+ buffer_num_channels_, num_bands_, buffer_num_frames_));
+ }
+}
+
+AudioBuffer::~AudioBuffer() {}
+
+void AudioBuffer::set_downmixing_to_specific_channel(size_t channel) {
+ downmix_by_averaging_ = false;
+ RTC_DCHECK_GT(input_num_channels_, channel);
+ channel_for_downmixing_ = std::min(channel, input_num_channels_ - 1);
+}
+
+void AudioBuffer::set_downmixing_by_averaging() {
+ downmix_by_averaging_ = true;
+}
+
+void AudioBuffer::CopyFrom(const float* const* stacked_data,
+ const StreamConfig& stream_config) {
+ RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
+ RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_);
+ RestoreNumChannels();
+ const bool downmix_needed = input_num_channels_ > 1 && num_channels_ == 1;
+
+ const bool resampling_needed = input_num_frames_ != buffer_num_frames_;
+
+ if (downmix_needed) {
+ RTC_DCHECK_GE(kMaxSamplesPerChannel, input_num_frames_);
+
+ std::array<float, kMaxSamplesPerChannel> downmix;
+ if (downmix_by_averaging_) {
+ const float kOneByNumChannels = 1.f / input_num_channels_;
+ for (size_t i = 0; i < input_num_frames_; ++i) {
+ float value = stacked_data[0][i];
+ for (size_t j = 1; j < input_num_channels_; ++j) {
+ value += stacked_data[j][i];
+ }
+ downmix[i] = value * kOneByNumChannels;
+ }
+ }
+ const float* downmixed_data = downmix_by_averaging_
+ ? downmix.data()
+ : stacked_data[channel_for_downmixing_];
+
+ if (resampling_needed) {
+ input_resamplers_[0]->Resample(downmixed_data, input_num_frames_,
+ data_->channels()[0], buffer_num_frames_);
+ }
+ const float* data_to_convert =
+ resampling_needed ? data_->channels()[0] : downmixed_data;
+ FloatToFloatS16(data_to_convert, buffer_num_frames_, data_->channels()[0]);
+ } else {
+ if (resampling_needed) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ input_resamplers_[i]->Resample(stacked_data[i], input_num_frames_,
+ data_->channels()[i],
+ buffer_num_frames_);
+ FloatToFloatS16(data_->channels()[i], buffer_num_frames_,
+ data_->channels()[i]);
+ }
+ } else {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ FloatToFloatS16(stacked_data[i], buffer_num_frames_,
+ data_->channels()[i]);
+ }
+ }
+ }
+}
+
+void AudioBuffer::CopyTo(const StreamConfig& stream_config,
+ float* const* stacked_data) {
+ RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
+
+ const bool resampling_needed = output_num_frames_ != buffer_num_frames_;
+ if (resampling_needed) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ FloatS16ToFloat(data_->channels()[i], buffer_num_frames_,
+ data_->channels()[i]);
+ output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_,
+ stacked_data[i], output_num_frames_);
+ }
+ } else {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ FloatS16ToFloat(data_->channels()[i], buffer_num_frames_,
+ stacked_data[i]);
+ }
+ }
+
+ for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
+ memcpy(stacked_data[i], stacked_data[0],
+ output_num_frames_ * sizeof(**stacked_data));
+ }
+}
+
+void AudioBuffer::CopyTo(AudioBuffer* buffer) const {
+ RTC_DCHECK_EQ(buffer->num_frames(), output_num_frames_);
+
+ const bool resampling_needed = output_num_frames_ != buffer_num_frames_;
+ if (resampling_needed) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_,
+ buffer->channels()[i],
+ buffer->num_frames());
+ }
+ } else {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ memcpy(buffer->channels()[i], data_->channels()[i],
+ buffer_num_frames_ * sizeof(**buffer->channels()));
+ }
+ }
+
+ for (size_t i = num_channels_; i < buffer->num_channels(); ++i) {
+ memcpy(buffer->channels()[i], buffer->channels()[0],
+ output_num_frames_ * sizeof(**buffer->channels()));
+ }
+}
+
+void AudioBuffer::RestoreNumChannels() {
+ num_channels_ = buffer_num_channels_;
+ data_->set_num_channels(buffer_num_channels_);
+ if (split_data_.get()) {
+ split_data_->set_num_channels(buffer_num_channels_);
+ }
+}
+
+void AudioBuffer::set_num_channels(size_t num_channels) {
+ RTC_DCHECK_GE(buffer_num_channels_, num_channels);
+ num_channels_ = num_channels;
+ data_->set_num_channels(num_channels);
+ if (split_data_.get()) {
+ split_data_->set_num_channels(num_channels);
+ }
+}
+
+// The resampler is only for supporting 48kHz to 16kHz in the reverse stream.
+void AudioBuffer::CopyFrom(const int16_t* const interleaved_data,
+ const StreamConfig& stream_config) {
+ RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_);
+ RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
+ RestoreNumChannels();
+
+ const bool resampling_required = input_num_frames_ != buffer_num_frames_;
+
+ const int16_t* interleaved = interleaved_data;
+ if (num_channels_ == 1) {
+ if (input_num_channels_ == 1) {
+ if (resampling_required) {
+ std::array<float, kMaxSamplesPerChannel> float_buffer;
+ S16ToFloatS16(interleaved, input_num_frames_, float_buffer.data());
+ input_resamplers_[0]->Resample(float_buffer.data(), input_num_frames_,
+ data_->channels()[0],
+ buffer_num_frames_);
+ } else {
+ S16ToFloatS16(interleaved, input_num_frames_, data_->channels()[0]);
+ }
+ } else {
+ std::array<float, kMaxSamplesPerChannel> float_buffer;
+ float* downmixed_data =
+ resampling_required ? float_buffer.data() : data_->channels()[0];
+ if (downmix_by_averaging_) {
+ for (size_t j = 0, k = 0; j < input_num_frames_; ++j) {
+ int32_t sum = 0;
+ for (size_t i = 0; i < input_num_channels_; ++i, ++k) {
+ sum += interleaved[k];
+ }
+ downmixed_data[j] = sum / static_cast<int16_t>(input_num_channels_);
+ }
+ } else {
+ for (size_t j = 0, k = channel_for_downmixing_; j < input_num_frames_;
+ ++j, k += input_num_channels_) {
+ downmixed_data[j] = interleaved[k];
+ }
+ }
+
+ if (resampling_required) {
+ input_resamplers_[0]->Resample(downmixed_data, input_num_frames_,
+ data_->channels()[0],
+ buffer_num_frames_);
+ }
+ }
+ } else {
+ auto deinterleave_channel = [](size_t channel, size_t num_channels,
+ size_t samples_per_channel, const int16_t* x,
+ float* y) {
+ for (size_t j = 0, k = channel; j < samples_per_channel;
+ ++j, k += num_channels) {
+ y[j] = x[k];
+ }
+ };
+
+ if (resampling_required) {
+ std::array<float, kMaxSamplesPerChannel> float_buffer;
+ for (size_t i = 0; i < num_channels_; ++i) {
+ deinterleave_channel(i, num_channels_, input_num_frames_, interleaved,
+ float_buffer.data());
+ input_resamplers_[i]->Resample(float_buffer.data(), input_num_frames_,
+ data_->channels()[i],
+ buffer_num_frames_);
+ }
+ } else {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ deinterleave_channel(i, num_channels_, input_num_frames_, interleaved,
+ data_->channels()[i]);
+ }
+ }
+ }
+}
+
+void AudioBuffer::CopyTo(const StreamConfig& stream_config,
+ int16_t* const interleaved_data) {
+ const size_t config_num_channels = stream_config.num_channels();
+
+ RTC_DCHECK(config_num_channels == num_channels_ || num_channels_ == 1);
+ RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
+
+ const bool resampling_required = buffer_num_frames_ != output_num_frames_;
+
+ int16_t* interleaved = interleaved_data;
+ if (num_channels_ == 1) {
+ std::array<float, kMaxSamplesPerChannel> float_buffer;
+
+ if (resampling_required) {
+ output_resamplers_[0]->Resample(data_->channels()[0], buffer_num_frames_,
+ float_buffer.data(), output_num_frames_);
+ }
+ const float* deinterleaved =
+ resampling_required ? float_buffer.data() : data_->channels()[0];
+
+ if (config_num_channels == 1) {
+ for (size_t j = 0; j < output_num_frames_; ++j) {
+ interleaved[j] = FloatS16ToS16(deinterleaved[j]);
+ }
+ } else {
+ for (size_t i = 0, k = 0; i < output_num_frames_; ++i) {
+ float tmp = FloatS16ToS16(deinterleaved[i]);
+ for (size_t j = 0; j < config_num_channels; ++j, ++k) {
+ interleaved[k] = tmp;
+ }
+ }
+ }
+ } else {
+ auto interleave_channel = [](size_t channel, size_t num_channels,
+ size_t samples_per_channel, const float* x,
+ int16_t* y) {
+ for (size_t k = 0, j = channel; k < samples_per_channel;
+ ++k, j += num_channels) {
+ y[j] = FloatS16ToS16(x[k]);
+ }
+ };
+
+ if (resampling_required) {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ std::array<float, kMaxSamplesPerChannel> float_buffer;
+ output_resamplers_[i]->Resample(data_->channels()[i],
+ buffer_num_frames_, float_buffer.data(),
+ output_num_frames_);
+ interleave_channel(i, config_num_channels, output_num_frames_,
+ float_buffer.data(), interleaved);
+ }
+ } else {
+ for (size_t i = 0; i < num_channels_; ++i) {
+ interleave_channel(i, config_num_channels, output_num_frames_,
+ data_->channels()[i], interleaved);
+ }
+ }
+
+ for (size_t i = num_channels_; i < config_num_channels; ++i) {
+ for (size_t j = 0, k = i, n = num_channels_; j < output_num_frames_;
+ ++j, k += config_num_channels, n += config_num_channels) {
+ interleaved[k] = interleaved[n];
+ }
+ }
+ }
+}
+
+void AudioBuffer::SplitIntoFrequencyBands() {
+ splitting_filter_->Analysis(data_.get(), split_data_.get());
+}
+
+void AudioBuffer::MergeFrequencyBands() {
+ splitting_filter_->Synthesis(split_data_.get(), data_.get());
+}
+
+void AudioBuffer::ExportSplitChannelData(
+ size_t channel,
+ int16_t* const* split_band_data) const {
+ for (size_t k = 0; k < num_bands(); ++k) {
+ const float* band_data = split_bands_const(channel)[k];
+
+ RTC_DCHECK(split_band_data[k]);
+ RTC_DCHECK(band_data);
+ for (size_t i = 0; i < num_frames_per_band(); ++i) {
+ split_band_data[k][i] = FloatS16ToS16(band_data[i]);
+ }
+ }
+}
+
+void AudioBuffer::ImportSplitChannelData(
+ size_t channel,
+ const int16_t* const* split_band_data) {
+ for (size_t k = 0; k < num_bands(); ++k) {
+ float* band_data = split_bands(channel)[k];
+ RTC_DCHECK(split_band_data[k]);
+ RTC_DCHECK(band_data);
+ for (size_t i = 0; i < num_frames_per_band(); ++i) {
+ band_data[i] = split_band_data[k][i];
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_buffer.h b/third_party/libwebrtc/modules/audio_processing/audio_buffer.h
new file mode 100644
index 0000000000..d866b8bce5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_buffer.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+class PushSincResampler;
+class SplittingFilter;
+
+enum Band { kBand0To8kHz = 0, kBand8To16kHz = 1, kBand16To24kHz = 2 };
+
+// Stores any audio data in a way that allows the audio processing module to
+// operate on it in a controlled manner.
+class AudioBuffer {
+ public:
+ static const int kSplitBandSize = 160;
+ static const size_t kMaxSampleRate = 384000;
+ AudioBuffer(size_t input_rate,
+ size_t input_num_channels,
+ size_t buffer_rate,
+ size_t buffer_num_channels,
+ size_t output_rate,
+ size_t output_num_channels);
+
+ virtual ~AudioBuffer();
+
+ AudioBuffer(const AudioBuffer&) = delete;
+ AudioBuffer& operator=(const AudioBuffer&) = delete;
+
+ // Specify that downmixing should be done by selecting a single channel.
+ void set_downmixing_to_specific_channel(size_t channel);
+
+ // Specify that downmixing should be done by averaging all channels,.
+ void set_downmixing_by_averaging();
+
+ // Set the number of channels in the buffer. The specified number of channels
+ // cannot be larger than the specified buffer_num_channels. The number is also
+ // reset at each call to CopyFrom or InterleaveFrom.
+ void set_num_channels(size_t num_channels);
+
+ size_t num_channels() const { return num_channels_; }
+ size_t num_frames() const { return buffer_num_frames_; }
+ size_t num_frames_per_band() const { return num_split_frames_; }
+ size_t num_bands() const { return num_bands_; }
+
+ // Returns pointer arrays to the full-band channels.
+ // Usage:
+ // channels()[channel][sample].
+ // Where:
+ // 0 <= channel < `buffer_num_channels_`
+ // 0 <= sample < `buffer_num_frames_`
+ float* const* channels() { return data_->channels(); }
+ const float* const* channels_const() const { return data_->channels(); }
+
+ // Returns pointer arrays to the bands for a specific channel.
+ // Usage:
+ // split_bands(channel)[band][sample].
+ // Where:
+ // 0 <= channel < `buffer_num_channels_`
+ // 0 <= band < `num_bands_`
+ // 0 <= sample < `num_split_frames_`
+ const float* const* split_bands_const(size_t channel) const {
+ return split_data_.get() ? split_data_->bands(channel)
+ : data_->bands(channel);
+ }
+ float* const* split_bands(size_t channel) {
+ return split_data_.get() ? split_data_->bands(channel)
+ : data_->bands(channel);
+ }
+
+ // Returns a pointer array to the channels for a specific band.
+ // Usage:
+ // split_channels(band)[channel][sample].
+ // Where:
+ // 0 <= band < `num_bands_`
+ // 0 <= channel < `buffer_num_channels_`
+ // 0 <= sample < `num_split_frames_`
+ const float* const* split_channels_const(Band band) const {
+ if (split_data_.get()) {
+ return split_data_->channels(band);
+ } else {
+ return band == kBand0To8kHz ? data_->channels() : nullptr;
+ }
+ }
+
+ // Copies data into the buffer.
+ void CopyFrom(const int16_t* const interleaved_data,
+ const StreamConfig& stream_config);
+ void CopyFrom(const float* const* stacked_data,
+ const StreamConfig& stream_config);
+
+ // Copies data from the buffer.
+ void CopyTo(const StreamConfig& stream_config,
+ int16_t* const interleaved_data);
+ void CopyTo(const StreamConfig& stream_config, float* const* stacked_data);
+ void CopyTo(AudioBuffer* buffer) const;
+
+ // Splits the buffer data into frequency bands.
+ void SplitIntoFrequencyBands();
+
+ // Recombines the frequency bands into a full-band signal.
+ void MergeFrequencyBands();
+
+ // Copies the split bands data into the integer two-dimensional array.
+ void ExportSplitChannelData(size_t channel,
+ int16_t* const* split_band_data) const;
+
+ // Copies the data in the integer two-dimensional array into the split_bands
+ // data.
+ void ImportSplitChannelData(size_t channel,
+ const int16_t* const* split_band_data);
+
+ static const size_t kMaxSplitFrameLength = 160;
+ static const size_t kMaxNumBands = 3;
+
+ // Deprecated methods, will be removed soon.
+ float* const* channels_f() { return channels(); }
+ const float* const* channels_const_f() const { return channels_const(); }
+ const float* const* split_bands_const_f(size_t channel) const {
+ return split_bands_const(channel);
+ }
+ float* const* split_bands_f(size_t channel) { return split_bands(channel); }
+ const float* const* split_channels_const_f(Band band) const {
+ return split_channels_const(band);
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(AudioBufferTest,
+ SetNumChannelsSetsChannelBuffersNumChannels);
+ void RestoreNumChannels();
+
+ const size_t input_num_frames_;
+ const size_t input_num_channels_;
+ const size_t buffer_num_frames_;
+ const size_t buffer_num_channels_;
+ const size_t output_num_frames_;
+ const size_t output_num_channels_;
+
+ size_t num_channels_;
+ size_t num_bands_;
+ size_t num_split_frames_;
+
+ std::unique_ptr<ChannelBuffer<float>> data_;
+ std::unique_ptr<ChannelBuffer<float>> split_data_;
+ std::unique_ptr<SplittingFilter> splitting_filter_;
+ std::vector<std::unique_ptr<PushSincResampler>> input_resamplers_;
+ std::vector<std::unique_ptr<PushSincResampler>> output_resamplers_;
+ bool downmix_by_averaging_ = true;
+ size_t channel_for_downmixing_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_buffer_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/audio_buffer_gn/moz.build
new file mode 100644
index 0000000000..e642368635
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_buffer_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/audio_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/splitting_filter.cc",
+ "/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_buffer_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/audio_buffer_unittest.cc
new file mode 100644
index 0000000000..f3b2ddc689
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_buffer_unittest.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_buffer.h"
+
+#include <cmath>
+
+#include "test/gtest.h"
+#include "test/testsupport/rtc_expect_death.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kSampleRateHz = 48000u;
+const size_t kStereo = 2u;
+const size_t kMono = 1u;
+
+void ExpectNumChannels(const AudioBuffer& ab, size_t num_channels) {
+ EXPECT_EQ(ab.num_channels(), num_channels);
+}
+
+} // namespace
+
+TEST(AudioBufferTest, SetNumChannelsSetsChannelBuffersNumChannels) {
+ AudioBuffer ab(kSampleRateHz, kStereo, kSampleRateHz, kStereo, kSampleRateHz,
+ kStereo);
+ ExpectNumChannels(ab, kStereo);
+ ab.set_num_channels(1);
+ ExpectNumChannels(ab, kMono);
+ ab.RestoreNumChannels();
+ ExpectNumChannels(ab, kStereo);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(AudioBufferDeathTest, SetNumChannelsDeathTest) {
+ AudioBuffer ab(kSampleRateHz, kMono, kSampleRateHz, kMono, kSampleRateHz,
+ kMono);
+ RTC_EXPECT_DEATH(ab.set_num_channels(kStereo), "num_channels");
+}
+#endif
+
+TEST(AudioBufferTest, CopyWithoutResampling) {
+ AudioBuffer ab1(32000, 2, 32000, 2, 32000, 2);
+ AudioBuffer ab2(32000, 2, 32000, 2, 32000, 2);
+ // Fill first buffer.
+ for (size_t ch = 0; ch < ab1.num_channels(); ++ch) {
+ for (size_t i = 0; i < ab1.num_frames(); ++i) {
+ ab1.channels()[ch][i] = i + ch;
+ }
+ }
+ // Copy to second buffer.
+ ab1.CopyTo(&ab2);
+ // Verify content of second buffer.
+ for (size_t ch = 0; ch < ab2.num_channels(); ++ch) {
+ for (size_t i = 0; i < ab2.num_frames(); ++i) {
+ EXPECT_EQ(ab2.channels()[ch][i], i + ch);
+ }
+ }
+}
+
+TEST(AudioBufferTest, CopyWithResampling) {
+ AudioBuffer ab1(32000, 2, 32000, 2, 48000, 2);
+ AudioBuffer ab2(48000, 2, 48000, 2, 48000, 2);
+ float energy_ab1 = 0.f;
+ float energy_ab2 = 0.f;
+ const float pi = std::acos(-1.f);
+ // Put a sine and compute energy of first buffer.
+ for (size_t ch = 0; ch < ab1.num_channels(); ++ch) {
+ for (size_t i = 0; i < ab1.num_frames(); ++i) {
+ ab1.channels()[ch][i] = std::sin(2 * pi * 100.f / 32000.f * i);
+ energy_ab1 += ab1.channels()[ch][i] * ab1.channels()[ch][i];
+ }
+ }
+ // Copy to second buffer.
+ ab1.CopyTo(&ab2);
+ // Compute energy of second buffer.
+ for (size_t ch = 0; ch < ab2.num_channels(); ++ch) {
+ for (size_t i = 0; i < ab2.num_frames(); ++i) {
+ energy_ab2 += ab2.channels()[ch][i] * ab2.channels()[ch][i];
+ }
+ }
+ // Verify that energies match.
+ EXPECT_NEAR(energy_ab1, energy_ab2 * 32000.f / 48000.f, .01f * energy_ab1);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_frame_proxies_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/audio_frame_proxies_gn/moz.build
new file mode 100644
index 0000000000..5485891626
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_frame_proxies_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_frame_proxies_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_frame_view_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/audio_frame_view_gn/moz.build
new file mode 100644
index 0000000000..970deab9df
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_frame_view_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_frame_view_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_frame_view_unittest.cc b/third_party/libwebrtc/modules/audio_processing/audio_frame_view_unittest.cc
new file mode 100644
index 0000000000..fd25bc3b0b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_frame_view_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+TEST(AudioFrameTest, ConstructFromAudioBuffer) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kNumChannels = 2;
+ constexpr float kFloatConstant = 1272.f;
+ constexpr float kIntConstant = 17252;
+ const webrtc::StreamConfig stream_config(kSampleRateHz, kNumChannels);
+ webrtc::AudioBuffer buffer(
+ stream_config.sample_rate_hz(), stream_config.num_channels(),
+ stream_config.sample_rate_hz(), stream_config.num_channels(),
+ stream_config.sample_rate_hz(), stream_config.num_channels());
+
+ AudioFrameView<float> non_const_view(buffer.channels(), buffer.num_channels(),
+ buffer.num_frames());
+ // Modification is allowed.
+ non_const_view.channel(0)[0] = kFloatConstant;
+ EXPECT_EQ(buffer.channels()[0][0], kFloatConstant);
+
+ AudioFrameView<const float> const_view(
+ buffer.channels(), buffer.num_channels(), buffer.num_frames());
+ // Modification is not allowed.
+ // const_view.channel(0)[0] = kFloatConstant;
+
+ // Assignment is allowed.
+ AudioFrameView<const float> other_const_view = non_const_view;
+ static_cast<void>(other_const_view);
+
+ // But not the other way. The following will fail:
+ // non_const_view = other_const_view;
+
+ AudioFrameView<float> non_const_float_view(
+ buffer.channels(), buffer.num_channels(), buffer.num_frames());
+ non_const_float_view.channel(0)[0] = kIntConstant;
+ EXPECT_EQ(buffer.channels()[0][0], kIntConstant);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_builder_impl.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_builder_impl.cc
new file mode 100644
index 0000000000..a246448c26
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_builder_impl.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/make_ref_counted.h"
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+AudioProcessingBuilder::AudioProcessingBuilder() = default;
+AudioProcessingBuilder::~AudioProcessingBuilder() = default;
+
+rtc::scoped_refptr<AudioProcessing> AudioProcessingBuilder::Create() {
+#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
+ // Return a null pointer when the APM is excluded from the build.
+ return nullptr;
+#else // WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
+ return rtc::make_ref_counted<AudioProcessingImpl>(
+ config_, std::move(capture_post_processing_),
+ std::move(render_pre_processing_), std::move(echo_control_factory_),
+ std::move(echo_detector_), std::move(capture_analyzer_));
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/audio_processing_gn/moz.build
new file mode 100644
index 0000000000..0e55763c21
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_gn/moz.build
@@ -0,0 +1,219 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.cc",
+ "/third_party/libwebrtc/modules/audio_processing/gain_control_impl.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/audio_processing_builder_impl.cc",
+ "/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_processing_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc
new file mode 100644
index 0000000000..57d51a2a65
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc
@@ -0,0 +1,2193 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_processing_impl.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/audio_frame.h"
+#include "common_audio/audio_converter.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/optionally_built_submodule_creators.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/denormal_disabler.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+#define RETURN_ON_ERR(expr) \
+ do { \
+ int err = (expr); \
+ if (err != kNoError) { \
+ return err; \
+ } \
+ } while (0)
+
+namespace webrtc {
+
+namespace {
+
+bool SampleRateSupportsMultiBand(int sample_rate_hz) {
+ return sample_rate_hz == AudioProcessing::kSampleRate32kHz ||
+ sample_rate_hz == AudioProcessing::kSampleRate48kHz;
+}
+
+// Checks whether the high-pass filter should be done in the full-band.
+bool EnforceSplitBandHpf() {
+ return field_trial::IsEnabled("WebRTC-FullBandHpfKillSwitch");
+}
+
+// Checks whether AEC3 should be allowed to decide what the default
+// configuration should be based on the render and capture channel configuration
+// at hand.
+bool UseSetupSpecificDefaultAec3Congfig() {
+ return !field_trial::IsEnabled(
+ "WebRTC-Aec3SetupSpecificDefaultConfigDefaultsKillSwitch");
+}
+
+// If the "WebRTC-Audio-TransientSuppressorVadMode" field trial is unspecified,
+// returns `TransientSuppressor::VadMode::kDefault`, otherwise parses the field
+// trial and returns the specified mode:
+// - WebRTC-Audio-TransientSuppressorVadMode/Enabled-Default returns `kDefault`;
+// - WebRTC-Audio-TransientSuppressorVadMode/Enabled-RnnVad returns `kRnnVad`;
+// - WebRTC-Audio-TransientSuppressorVadMode/Enabled-NoVad returns `kNoVad`.
+TransientSuppressor::VadMode GetTransientSuppressorVadMode() {
+ constexpr char kFieldTrial[] = "WebRTC-Audio-TransientSuppressorVadMode";
+ std::string full_name = webrtc::field_trial::FindFullName(kFieldTrial);
+ if (full_name.empty() || absl::EndsWith(full_name, "-Default")) {
+ return TransientSuppressor::VadMode::kDefault;
+ }
+ if (absl::EndsWith(full_name, "-RnnVad")) {
+ return TransientSuppressor::VadMode::kRnnVad;
+ }
+ if (absl::EndsWith(full_name, "-NoVad")) {
+ return TransientSuppressor::VadMode::kNoVad;
+ }
+ // Fallback to default.
+ RTC_LOG(LS_WARNING) << "Invalid parameter for " << kFieldTrial;
+ return TransientSuppressor::VadMode::kDefault;
+}
+
+// Identify the native processing rate that best handles a sample rate.
+int SuitableProcessRate(int minimum_rate,
+ int max_splitting_rate,
+ bool band_splitting_required) {
+ const int uppermost_native_rate =
+ band_splitting_required ? max_splitting_rate : 48000;
+ for (auto rate : {16000, 32000, 48000}) {
+ if (rate >= uppermost_native_rate) {
+ return uppermost_native_rate;
+ }
+ if (rate >= minimum_rate) {
+ return rate;
+ }
+ }
+ RTC_DCHECK_NOTREACHED();
+ return uppermost_native_rate;
+}
+
+GainControl::Mode Agc1ConfigModeToInterfaceMode(
+ AudioProcessing::Config::GainController1::Mode mode) {
+ using Agc1Config = AudioProcessing::Config::GainController1;
+ switch (mode) {
+ case Agc1Config::kAdaptiveAnalog:
+ return GainControl::kAdaptiveAnalog;
+ case Agc1Config::kAdaptiveDigital:
+ return GainControl::kAdaptiveDigital;
+ case Agc1Config::kFixedDigital:
+ return GainControl::kFixedDigital;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+bool MinimizeProcessingForUnusedOutput() {
+ return !field_trial::IsEnabled("WebRTC-MutedStateKillSwitch");
+}
+
+// Maximum lengths that frame of samples being passed from the render side to
+// the capture side can have (does not apply to AEC3).
+static const size_t kMaxAllowedValuesOfSamplesPerBand = 160;
+static const size_t kMaxAllowedValuesOfSamplesPerFrame = 480;
+
+// Maximum number of frames to buffer in the render queue.
+// TODO(peah): Decrease this once we properly handle hugely unbalanced
+// reverse and forward call numbers.
+static const size_t kMaxNumFramesToBuffer = 100;
+
+void PackRenderAudioBufferForEchoDetector(const AudioBuffer& audio,
+ std::vector<float>& packed_buffer) {
+ packed_buffer.clear();
+ packed_buffer.insert(packed_buffer.end(), audio.channels_const()[0],
+ audio.channels_const()[0] + audio.num_frames());
+}
+
+} // namespace
+
+// Throughout webrtc, it's assumed that success is represented by zero.
+static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
+
+AudioProcessingImpl::SubmoduleStates::SubmoduleStates(
+ bool capture_post_processor_enabled,
+ bool render_pre_processor_enabled,
+ bool capture_analyzer_enabled)
+ : capture_post_processor_enabled_(capture_post_processor_enabled),
+ render_pre_processor_enabled_(render_pre_processor_enabled),
+ capture_analyzer_enabled_(capture_analyzer_enabled) {}
+
+bool AudioProcessingImpl::SubmoduleStates::Update(
+ bool high_pass_filter_enabled,
+ bool mobile_echo_controller_enabled,
+ bool noise_suppressor_enabled,
+ bool adaptive_gain_controller_enabled,
+ bool gain_controller2_enabled,
+ bool voice_activity_detector_enabled,
+ bool gain_adjustment_enabled,
+ bool echo_controller_enabled,
+ bool transient_suppressor_enabled) {
+ bool changed = false;
+ changed |= (high_pass_filter_enabled != high_pass_filter_enabled_);
+ changed |=
+ (mobile_echo_controller_enabled != mobile_echo_controller_enabled_);
+ changed |= (noise_suppressor_enabled != noise_suppressor_enabled_);
+ changed |=
+ (adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_);
+ changed |= (gain_controller2_enabled != gain_controller2_enabled_);
+ changed |=
+ (voice_activity_detector_enabled != voice_activity_detector_enabled_);
+ changed |= (gain_adjustment_enabled != gain_adjustment_enabled_);
+ changed |= (echo_controller_enabled != echo_controller_enabled_);
+ changed |= (transient_suppressor_enabled != transient_suppressor_enabled_);
+ if (changed) {
+ high_pass_filter_enabled_ = high_pass_filter_enabled;
+ mobile_echo_controller_enabled_ = mobile_echo_controller_enabled;
+ noise_suppressor_enabled_ = noise_suppressor_enabled;
+ adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled;
+ gain_controller2_enabled_ = gain_controller2_enabled;
+ voice_activity_detector_enabled_ = voice_activity_detector_enabled;
+ gain_adjustment_enabled_ = gain_adjustment_enabled;
+ echo_controller_enabled_ = echo_controller_enabled;
+ transient_suppressor_enabled_ = transient_suppressor_enabled;
+ }
+
+ changed |= first_update_;
+ first_update_ = false;
+ return changed;
+}
+
+bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandSubModulesActive()
+ const {
+ return CaptureMultiBandProcessingPresent();
+}
+
+bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandProcessingPresent()
+ const {
+ // If echo controller is present, assume it performs active processing.
+ return CaptureMultiBandProcessingActive(/*ec_processing_active=*/true);
+}
+
+bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandProcessingActive(
+ bool ec_processing_active) const {
+ return high_pass_filter_enabled_ || mobile_echo_controller_enabled_ ||
+ noise_suppressor_enabled_ || adaptive_gain_controller_enabled_ ||
+ (echo_controller_enabled_ && ec_processing_active);
+}
+
+bool AudioProcessingImpl::SubmoduleStates::CaptureFullBandProcessingActive()
+ const {
+ return gain_controller2_enabled_ || capture_post_processor_enabled_ ||
+ gain_adjustment_enabled_;
+}
+
+bool AudioProcessingImpl::SubmoduleStates::CaptureAnalyzerActive() const {
+ return capture_analyzer_enabled_;
+}
+
+bool AudioProcessingImpl::SubmoduleStates::RenderMultiBandSubModulesActive()
+ const {
+ return RenderMultiBandProcessingActive() || mobile_echo_controller_enabled_ ||
+ adaptive_gain_controller_enabled_ || echo_controller_enabled_;
+}
+
+bool AudioProcessingImpl::SubmoduleStates::RenderFullBandProcessingActive()
+ const {
+ return render_pre_processor_enabled_;
+}
+
+bool AudioProcessingImpl::SubmoduleStates::RenderMultiBandProcessingActive()
+ const {
+ return false;
+}
+
+bool AudioProcessingImpl::SubmoduleStates::HighPassFilteringRequired() const {
+ return high_pass_filter_enabled_ || mobile_echo_controller_enabled_ ||
+ noise_suppressor_enabled_;
+}
+
+AudioProcessingImpl::AudioProcessingImpl()
+ : AudioProcessingImpl(/*config=*/{},
+ /*capture_post_processor=*/nullptr,
+ /*render_pre_processor=*/nullptr,
+ /*echo_control_factory=*/nullptr,
+ /*echo_detector=*/nullptr,
+ /*capture_analyzer=*/nullptr) {}
+
+std::atomic<int> AudioProcessingImpl::instance_count_(0);
+
+AudioProcessingImpl::AudioProcessingImpl(
+ const AudioProcessing::Config& config,
+ std::unique_ptr<CustomProcessing> capture_post_processor,
+ std::unique_ptr<CustomProcessing> render_pre_processor,
+ std::unique_ptr<EchoControlFactory> echo_control_factory,
+ rtc::scoped_refptr<EchoDetector> echo_detector,
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer)
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ use_setup_specific_default_aec3_config_(
+ UseSetupSpecificDefaultAec3Congfig()),
+ use_denormal_disabler_(
+ !field_trial::IsEnabled("WebRTC-ApmDenormalDisablerKillSwitch")),
+ transient_suppressor_vad_mode_(GetTransientSuppressorVadMode()),
+ capture_runtime_settings_(RuntimeSettingQueueSize()),
+ render_runtime_settings_(RuntimeSettingQueueSize()),
+ capture_runtime_settings_enqueuer_(&capture_runtime_settings_),
+ render_runtime_settings_enqueuer_(&render_runtime_settings_),
+ echo_control_factory_(std::move(echo_control_factory)),
+ config_(config),
+ submodule_states_(!!capture_post_processor,
+ !!render_pre_processor,
+ !!capture_analyzer),
+ submodules_(std::move(capture_post_processor),
+ std::move(render_pre_processor),
+ std::move(echo_detector),
+ std::move(capture_analyzer)),
+ constants_(!field_trial::IsEnabled(
+ "WebRTC-ApmExperimentalMultiChannelRenderKillSwitch"),
+ !field_trial::IsEnabled(
+ "WebRTC-ApmExperimentalMultiChannelCaptureKillSwitch"),
+ EnforceSplitBandHpf(),
+ MinimizeProcessingForUnusedOutput(),
+ field_trial::IsEnabled("WebRTC-TransientSuppressorForcedOff")),
+ capture_(),
+ capture_nonlocked_() {
+ RTC_LOG(LS_INFO) << "Injected APM submodules:"
+ "\nEcho control factory: "
+ << !!echo_control_factory_
+ << "\nEcho detector: " << !!submodules_.echo_detector
+ << "\nCapture analyzer: " << !!submodules_.capture_analyzer
+ << "\nCapture post processor: "
+ << !!submodules_.capture_post_processor
+ << "\nRender pre processor: "
+ << !!submodules_.render_pre_processor;
+ RTC_LOG(LS_INFO) << "Denormal disabler: "
+ << (DenormalDisabler::IsSupported() ? "supported"
+ : "unsupported");
+
+ // Mark Echo Controller enabled if a factory is injected.
+ capture_nonlocked_.echo_controller_enabled =
+ static_cast<bool>(echo_control_factory_);
+
+ Initialize();
+}
+
+AudioProcessingImpl::~AudioProcessingImpl() = default;
+
+int AudioProcessingImpl::Initialize() {
+ // Run in a single-threaded manner during initialization.
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+ InitializeLocked();
+ return kNoError;
+}
+
+int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
+ // Run in a single-threaded manner during initialization.
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+ return InitializeLocked(processing_config);
+}
+
+int AudioProcessingImpl::MaybeInitializeRender(
+ const ProcessingConfig& processing_config) {
+ // Called from both threads. Thread check is therefore not possible.
+ if (processing_config == formats_.api_format) {
+ return kNoError;
+ }
+
+ MutexLock lock_capture(&mutex_capture_);
+ return InitializeLocked(processing_config);
+}
+
+void AudioProcessingImpl::InitializeLocked() {
+ UpdateActiveSubmoduleStates();
+
+ const int render_audiobuffer_sample_rate_hz =
+ formats_.api_format.reverse_output_stream().num_frames() == 0
+ ? formats_.render_processing_format.sample_rate_hz()
+ : formats_.api_format.reverse_output_stream().sample_rate_hz();
+ if (formats_.api_format.reverse_input_stream().num_channels() > 0) {
+ render_.render_audio.reset(new AudioBuffer(
+ formats_.api_format.reverse_input_stream().sample_rate_hz(),
+ formats_.api_format.reverse_input_stream().num_channels(),
+ formats_.render_processing_format.sample_rate_hz(),
+ formats_.render_processing_format.num_channels(),
+ render_audiobuffer_sample_rate_hz,
+ formats_.render_processing_format.num_channels()));
+ if (formats_.api_format.reverse_input_stream() !=
+ formats_.api_format.reverse_output_stream()) {
+ render_.render_converter = AudioConverter::Create(
+ formats_.api_format.reverse_input_stream().num_channels(),
+ formats_.api_format.reverse_input_stream().num_frames(),
+ formats_.api_format.reverse_output_stream().num_channels(),
+ formats_.api_format.reverse_output_stream().num_frames());
+ } else {
+ render_.render_converter.reset(nullptr);
+ }
+ } else {
+ render_.render_audio.reset(nullptr);
+ render_.render_converter.reset(nullptr);
+ }
+
+ capture_.capture_audio.reset(new AudioBuffer(
+ formats_.api_format.input_stream().sample_rate_hz(),
+ formats_.api_format.input_stream().num_channels(),
+ capture_nonlocked_.capture_processing_format.sample_rate_hz(),
+ formats_.api_format.output_stream().num_channels(),
+ formats_.api_format.output_stream().sample_rate_hz(),
+ formats_.api_format.output_stream().num_channels()));
+
+ if (capture_nonlocked_.capture_processing_format.sample_rate_hz() <
+ formats_.api_format.output_stream().sample_rate_hz() &&
+ formats_.api_format.output_stream().sample_rate_hz() == 48000) {
+ capture_.capture_fullband_audio.reset(
+ new AudioBuffer(formats_.api_format.input_stream().sample_rate_hz(),
+ formats_.api_format.input_stream().num_channels(),
+ formats_.api_format.output_stream().sample_rate_hz(),
+ formats_.api_format.output_stream().num_channels(),
+ formats_.api_format.output_stream().sample_rate_hz(),
+ formats_.api_format.output_stream().num_channels()));
+ } else {
+ capture_.capture_fullband_audio.reset();
+ }
+
+ AllocateRenderQueue();
+
+ InitializeGainController1();
+ InitializeTransientSuppressor();
+ InitializeHighPassFilter(true);
+ InitializeResidualEchoDetector();
+ InitializeEchoController();
+ InitializeGainController2(/*config_has_changed=*/true);
+ InitializeVoiceActivityDetector(/*config_has_changed=*/true);
+ InitializeNoiseSuppressor();
+ InitializeAnalyzer();
+ InitializePostProcessor();
+ InitializePreProcessor();
+ InitializeCaptureLevelsAdjuster();
+
+ if (aec_dump_) {
+ aec_dump_->WriteInitMessage(formats_.api_format, rtc::TimeUTCMillis());
+ }
+}
+
+int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
+ UpdateActiveSubmoduleStates();
+
+ for (const auto& stream : config.streams) {
+ if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
+ return kBadSampleRateError;
+ }
+ }
+
+ const size_t num_in_channels = config.input_stream().num_channels();
+ const size_t num_out_channels = config.output_stream().num_channels();
+
+ // Need at least one input channel.
+ // Need either one output channel or as many outputs as there are inputs.
+ if (num_in_channels == 0 ||
+ !(num_out_channels == 1 || num_out_channels == num_in_channels)) {
+ return kBadNumberChannelsError;
+ }
+
+ formats_.api_format = config;
+
+ // Choose maximum rate to use for the split filtering.
+ RTC_DCHECK(config_.pipeline.maximum_internal_processing_rate == 48000 ||
+ config_.pipeline.maximum_internal_processing_rate == 32000);
+ int max_splitting_rate = 48000;
+ if (config_.pipeline.maximum_internal_processing_rate == 32000) {
+ max_splitting_rate = config_.pipeline.maximum_internal_processing_rate;
+ }
+
+ int capture_processing_rate = SuitableProcessRate(
+ std::min(formats_.api_format.input_stream().sample_rate_hz(),
+ formats_.api_format.output_stream().sample_rate_hz()),
+ max_splitting_rate,
+ submodule_states_.CaptureMultiBandSubModulesActive() ||
+ submodule_states_.RenderMultiBandSubModulesActive());
+ RTC_DCHECK_NE(8000, capture_processing_rate);
+
+ capture_nonlocked_.capture_processing_format =
+ StreamConfig(capture_processing_rate);
+
+ int render_processing_rate;
+ if (!capture_nonlocked_.echo_controller_enabled) {
+ render_processing_rate = SuitableProcessRate(
+ std::min(formats_.api_format.reverse_input_stream().sample_rate_hz(),
+ formats_.api_format.reverse_output_stream().sample_rate_hz()),
+ max_splitting_rate,
+ submodule_states_.CaptureMultiBandSubModulesActive() ||
+ submodule_states_.RenderMultiBandSubModulesActive());
+ } else {
+ render_processing_rate = capture_processing_rate;
+ }
+
+ // If the forward sample rate is 8 kHz, the render stream is also processed
+ // at this rate.
+ if (capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
+ kSampleRate8kHz) {
+ render_processing_rate = kSampleRate8kHz;
+ } else {
+ render_processing_rate =
+ std::max(render_processing_rate, static_cast<int>(kSampleRate16kHz));
+ }
+
+ RTC_DCHECK_NE(8000, render_processing_rate);
+
+ if (submodule_states_.RenderMultiBandSubModulesActive()) {
+ // By default, downmix the render stream to mono for analysis. This has been
+ // demonstrated to work well for AEC in most practical scenarios.
+ const bool multi_channel_render = config_.pipeline.multi_channel_render &&
+ constants_.multi_channel_render_support;
+ int render_processing_num_channels =
+ multi_channel_render
+ ? formats_.api_format.reverse_input_stream().num_channels()
+ : 1;
+ formats_.render_processing_format =
+ StreamConfig(render_processing_rate, render_processing_num_channels);
+ } else {
+ formats_.render_processing_format = StreamConfig(
+ formats_.api_format.reverse_input_stream().sample_rate_hz(),
+ formats_.api_format.reverse_input_stream().num_channels());
+ }
+
+ if (capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
+ kSampleRate32kHz ||
+ capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
+ kSampleRate48kHz) {
+ capture_nonlocked_.split_rate = kSampleRate16kHz;
+ } else {
+ capture_nonlocked_.split_rate =
+ capture_nonlocked_.capture_processing_format.sample_rate_hz();
+ }
+
+ InitializeLocked();
+ return kNoError;
+}
+
+void AudioProcessingImpl::ApplyConfig(const AudioProcessing::Config& config) {
+ RTC_LOG(LS_INFO) << "AudioProcessing::ApplyConfig: " << config.ToString();
+
+ // Run in a single-threaded manner when applying the settings.
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+
+ const bool pipeline_config_changed =
+ config_.pipeline.multi_channel_render !=
+ config.pipeline.multi_channel_render ||
+ config_.pipeline.multi_channel_capture !=
+ config.pipeline.multi_channel_capture ||
+ config_.pipeline.maximum_internal_processing_rate !=
+ config.pipeline.maximum_internal_processing_rate;
+
+ const bool aec_config_changed =
+ config_.echo_canceller.enabled != config.echo_canceller.enabled ||
+ config_.echo_canceller.mobile_mode != config.echo_canceller.mobile_mode;
+
+ const bool agc1_config_changed =
+ config_.gain_controller1 != config.gain_controller1;
+
+ const bool agc2_config_changed =
+ config_.gain_controller2 != config.gain_controller2;
+
+ const bool ns_config_changed =
+ config_.noise_suppression.enabled != config.noise_suppression.enabled ||
+ config_.noise_suppression.level != config.noise_suppression.level;
+
+ const bool ts_config_changed = config_.transient_suppression.enabled !=
+ config.transient_suppression.enabled;
+
+ const bool pre_amplifier_config_changed =
+ config_.pre_amplifier.enabled != config.pre_amplifier.enabled ||
+ config_.pre_amplifier.fixed_gain_factor !=
+ config.pre_amplifier.fixed_gain_factor;
+
+ const bool gain_adjustment_config_changed =
+ config_.capture_level_adjustment != config.capture_level_adjustment;
+
+ config_ = config;
+
+ if (aec_config_changed) {
+ InitializeEchoController();
+ }
+
+ if (ns_config_changed) {
+ InitializeNoiseSuppressor();
+ }
+
+ if (ts_config_changed) {
+ InitializeTransientSuppressor();
+ }
+
+ InitializeHighPassFilter(false);
+
+ if (agc1_config_changed) {
+ InitializeGainController1();
+ }
+
+ const bool config_ok = GainController2::Validate(config_.gain_controller2);
+ if (!config_ok) {
+ RTC_LOG(LS_ERROR)
+ << "Invalid Gain Controller 2 config; using the default config.";
+ config_.gain_controller2 = AudioProcessing::Config::GainController2();
+ }
+
+ InitializeGainController2(agc2_config_changed);
+ InitializeVoiceActivityDetector(agc2_config_changed);
+
+ if (pre_amplifier_config_changed || gain_adjustment_config_changed) {
+ InitializeCaptureLevelsAdjuster();
+ }
+
+ // Reinitialization must happen after all submodule configuration to avoid
+ // additional reinitializations on the next capture / render processing call.
+ if (pipeline_config_changed) {
+ InitializeLocked(formats_.api_format);
+ }
+}
+
+void AudioProcessingImpl::OverrideSubmoduleCreationForTesting(
+ const ApmSubmoduleCreationOverrides& overrides) {
+ MutexLock lock(&mutex_capture_);
+ submodule_creation_overrides_ = overrides;
+}
+
+int AudioProcessingImpl::proc_sample_rate_hz() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.capture_processing_format.sample_rate_hz();
+}
+
+int AudioProcessingImpl::proc_fullband_sample_rate_hz() const {
+ return capture_.capture_fullband_audio
+ ? capture_.capture_fullband_audio->num_frames() * 100
+ : capture_nonlocked_.capture_processing_format.sample_rate_hz();
+}
+
+int AudioProcessingImpl::proc_split_sample_rate_hz() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.split_rate;
+}
+
+size_t AudioProcessingImpl::num_reverse_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return formats_.render_processing_format.num_channels();
+}
+
+size_t AudioProcessingImpl::num_input_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return formats_.api_format.input_stream().num_channels();
+}
+
+size_t AudioProcessingImpl::num_proc_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ const bool multi_channel_capture = config_.pipeline.multi_channel_capture &&
+ constants_.multi_channel_capture_support;
+ if (capture_nonlocked_.echo_controller_enabled && !multi_channel_capture) {
+ return 1;
+ }
+ return num_output_channels();
+}
+
+size_t AudioProcessingImpl::num_output_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return formats_.api_format.output_stream().num_channels();
+}
+
+void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
+ MutexLock lock(&mutex_capture_);
+ HandleCaptureOutputUsedSetting(!muted);
+}
+
+void AudioProcessingImpl::HandleCaptureOutputUsedSetting(
+ bool capture_output_used) {
+ capture_.capture_output_used =
+ capture_output_used || !constants_.minimize_processing_for_unused_output;
+
+ if (submodules_.agc_manager.get()) {
+ submodules_.agc_manager->HandleCaptureOutputUsedChange(
+ capture_.capture_output_used);
+ }
+ if (submodules_.echo_controller) {
+ submodules_.echo_controller->SetCaptureOutputUsage(
+ capture_.capture_output_used);
+ }
+ if (submodules_.noise_suppressor) {
+ submodules_.noise_suppressor->SetCaptureOutputUsage(
+ capture_.capture_output_used);
+ }
+}
+
+void AudioProcessingImpl::SetRuntimeSetting(RuntimeSetting setting) {
+ PostRuntimeSetting(setting);
+}
+
+bool AudioProcessingImpl::PostRuntimeSetting(RuntimeSetting setting) {
+ switch (setting.type()) {
+ case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting:
+ case RuntimeSetting::Type::kPlayoutAudioDeviceChange:
+ return render_runtime_settings_enqueuer_.Enqueue(setting);
+ case RuntimeSetting::Type::kCapturePreGain:
+ case RuntimeSetting::Type::kCapturePostGain:
+ case RuntimeSetting::Type::kCaptureCompressionGain:
+ case RuntimeSetting::Type::kCaptureFixedPostGain:
+ case RuntimeSetting::Type::kCaptureOutputUsed:
+ return capture_runtime_settings_enqueuer_.Enqueue(setting);
+ case RuntimeSetting::Type::kPlayoutVolumeChange: {
+ bool enqueueing_successful;
+ enqueueing_successful =
+ capture_runtime_settings_enqueuer_.Enqueue(setting);
+ enqueueing_successful =
+ render_runtime_settings_enqueuer_.Enqueue(setting) &&
+ enqueueing_successful;
+ return enqueueing_successful;
+ }
+ case RuntimeSetting::Type::kNotSpecified:
+ RTC_DCHECK_NOTREACHED();
+ return true;
+ }
+ // The language allows the enum to have a non-enumerator
+ // value. Check that this doesn't happen.
+ RTC_DCHECK_NOTREACHED();
+ return true;
+}
+
+AudioProcessingImpl::RuntimeSettingEnqueuer::RuntimeSettingEnqueuer(
+ SwapQueue<RuntimeSetting>* runtime_settings)
+ : runtime_settings_(*runtime_settings) {
+ RTC_DCHECK(runtime_settings);
+}
+
+AudioProcessingImpl::RuntimeSettingEnqueuer::~RuntimeSettingEnqueuer() =
+ default;
+
+bool AudioProcessingImpl::RuntimeSettingEnqueuer::Enqueue(
+ RuntimeSetting setting) {
+ const bool successful_insert = runtime_settings_.Insert(&setting);
+
+ if (!successful_insert) {
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.ApmRuntimeSettingCannotEnqueue", 1);
+ RTC_LOG(LS_ERROR) << "Cannot enqueue a new runtime setting.";
+ }
+ return successful_insert;
+}
+
+int AudioProcessingImpl::MaybeInitializeCapture(
+ const StreamConfig& input_config,
+ const StreamConfig& output_config) {
+ ProcessingConfig processing_config;
+ bool reinitialization_required = false;
+ {
+ // Acquire the capture lock in order to access api_format. The lock is
+ // released immediately, as we may need to acquire the render lock as part
+ // of the conditional reinitialization.
+ MutexLock lock_capture(&mutex_capture_);
+ processing_config = formats_.api_format;
+ reinitialization_required = UpdateActiveSubmoduleStates();
+ }
+
+ if (processing_config.input_stream() != input_config) {
+ processing_config.input_stream() = input_config;
+ reinitialization_required = true;
+ }
+
+ if (processing_config.output_stream() != output_config) {
+ processing_config.output_stream() = output_config;
+ reinitialization_required = true;
+ }
+
+ if (reinitialization_required) {
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+ RETURN_ON_ERR(InitializeLocked(processing_config));
+ }
+ return kNoError;
+}
+
+int AudioProcessingImpl::ProcessStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig");
+ if (!src || !dest) {
+ return kNullPointerError;
+ }
+
+ RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config));
+
+ MutexLock lock_capture(&mutex_capture_);
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ if (aec_dump_) {
+ RecordUnprocessedCaptureStream(src);
+ }
+
+ capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
+ if (capture_.capture_fullband_audio) {
+ capture_.capture_fullband_audio->CopyFrom(
+ src, formats_.api_format.input_stream());
+ }
+ RETURN_ON_ERR(ProcessCaptureStreamLocked());
+ if (capture_.capture_fullband_audio) {
+ capture_.capture_fullband_audio->CopyTo(formats_.api_format.output_stream(),
+ dest);
+ } else {
+ capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
+ }
+
+ if (aec_dump_) {
+ RecordProcessedCaptureStream(dest);
+ }
+ return kNoError;
+}
+
+void AudioProcessingImpl::HandleCaptureRuntimeSettings() {
+ RuntimeSetting setting;
+ int num_settings_processed = 0;
+ while (capture_runtime_settings_.Remove(&setting)) {
+ if (aec_dump_) {
+ aec_dump_->WriteRuntimeSetting(setting);
+ }
+ switch (setting.type()) {
+ case RuntimeSetting::Type::kCapturePreGain:
+ if (config_.pre_amplifier.enabled ||
+ config_.capture_level_adjustment.enabled) {
+ float value;
+ setting.GetFloat(&value);
+ // If the pre-amplifier is used, apply the new gain to the
+ // pre-amplifier regardless if the capture level adjustment is
+ // activated. This approach allows both functionalities to coexist
+ // until they have been properly merged.
+ if (config_.pre_amplifier.enabled) {
+ config_.pre_amplifier.fixed_gain_factor = value;
+ } else {
+ config_.capture_level_adjustment.pre_gain_factor = value;
+ }
+
+ // Use both the pre-amplifier and the capture level adjustment gains
+ // as pre-gains.
+ float gain = 1.f;
+ if (config_.pre_amplifier.enabled) {
+ gain *= config_.pre_amplifier.fixed_gain_factor;
+ }
+ if (config_.capture_level_adjustment.enabled) {
+ gain *= config_.capture_level_adjustment.pre_gain_factor;
+ }
+
+ submodules_.capture_levels_adjuster->SetPreGain(gain);
+ }
+ // TODO(bugs.chromium.org/9138): Log setting handling by Aec Dump.
+ break;
+ case RuntimeSetting::Type::kCapturePostGain:
+ if (config_.capture_level_adjustment.enabled) {
+ float value;
+ setting.GetFloat(&value);
+ config_.capture_level_adjustment.post_gain_factor = value;
+ submodules_.capture_levels_adjuster->SetPostGain(
+ config_.capture_level_adjustment.post_gain_factor);
+ }
+ // TODO(bugs.chromium.org/9138): Log setting handling by Aec Dump.
+ break;
+ case RuntimeSetting::Type::kCaptureCompressionGain: {
+ if (!submodules_.agc_manager) {
+ float value;
+ setting.GetFloat(&value);
+ int int_value = static_cast<int>(value + .5f);
+ config_.gain_controller1.compression_gain_db = int_value;
+ if (submodules_.gain_control) {
+ int error =
+ submodules_.gain_control->set_compression_gain_db(int_value);
+ RTC_DCHECK_EQ(kNoError, error);
+ }
+ }
+ break;
+ }
+ case RuntimeSetting::Type::kCaptureFixedPostGain: {
+ if (submodules_.gain_controller2) {
+ float value;
+ setting.GetFloat(&value);
+ config_.gain_controller2.fixed_digital.gain_db = value;
+ submodules_.gain_controller2->SetFixedGainDb(value);
+ }
+ break;
+ }
+ case RuntimeSetting::Type::kPlayoutVolumeChange: {
+ int value;
+ setting.GetInt(&value);
+ capture_.playout_volume = value;
+ break;
+ }
+ case RuntimeSetting::Type::kPlayoutAudioDeviceChange:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case RuntimeSetting::Type::kNotSpecified:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case RuntimeSetting::Type::kCaptureOutputUsed:
+ bool value;
+ setting.GetBool(&value);
+ HandleCaptureOutputUsedSetting(value);
+ break;
+ }
+ ++num_settings_processed;
+ }
+
+ if (num_settings_processed >= RuntimeSettingQueueSize()) {
+ // Handle overrun of the runtime settings queue, which likely will has
+ // caused settings to be discarded.
+ HandleOverrunInCaptureRuntimeSettingsQueue();
+ }
+}
+
+void AudioProcessingImpl::HandleOverrunInCaptureRuntimeSettingsQueue() {
+ // Fall back to a safe state for the case when a setting for capture output
+ // usage setting has been missed.
+ HandleCaptureOutputUsedSetting(/*capture_output_used=*/true);
+}
+
+void AudioProcessingImpl::HandleRenderRuntimeSettings() {
+ RuntimeSetting setting;
+ while (render_runtime_settings_.Remove(&setting)) {
+ if (aec_dump_) {
+ aec_dump_->WriteRuntimeSetting(setting);
+ }
+ switch (setting.type()) {
+ case RuntimeSetting::Type::kPlayoutAudioDeviceChange: // fall-through
+ case RuntimeSetting::Type::kPlayoutVolumeChange: // fall-through
+ case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting:
+ if (submodules_.render_pre_processor) {
+ submodules_.render_pre_processor->SetRuntimeSetting(setting);
+ }
+ break;
+ case RuntimeSetting::Type::kCapturePreGain: // fall-through
+ case RuntimeSetting::Type::kCapturePostGain: // fall-through
+ case RuntimeSetting::Type::kCaptureCompressionGain: // fall-through
+ case RuntimeSetting::Type::kCaptureFixedPostGain: // fall-through
+ case RuntimeSetting::Type::kCaptureOutputUsed: // fall-through
+ case RuntimeSetting::Type::kNotSpecified:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ }
+}
+
+void AudioProcessingImpl::QueueBandedRenderAudio(AudioBuffer* audio) {
+ RTC_DCHECK_GE(160, audio->num_frames_per_band());
+
+ if (submodules_.echo_control_mobile) {
+ EchoControlMobileImpl::PackRenderAudioBuffer(audio, num_output_channels(),
+ num_reverse_channels(),
+ &aecm_render_queue_buffer_);
+ RTC_DCHECK(aecm_render_signal_queue_);
+ // Insert the samples into the queue.
+ if (!aecm_render_signal_queue_->Insert(&aecm_render_queue_buffer_)) {
+ // The data queue is full and needs to be emptied.
+ EmptyQueuedRenderAudio();
+
+ // Retry the insert (should always work).
+ bool result =
+ aecm_render_signal_queue_->Insert(&aecm_render_queue_buffer_);
+ RTC_DCHECK(result);
+ }
+ }
+
+ if (!submodules_.agc_manager && submodules_.gain_control) {
+ GainControlImpl::PackRenderAudioBuffer(*audio, &agc_render_queue_buffer_);
+ // Insert the samples into the queue.
+ if (!agc_render_signal_queue_->Insert(&agc_render_queue_buffer_)) {
+ // The data queue is full and needs to be emptied.
+ EmptyQueuedRenderAudio();
+
+ // Retry the insert (should always work).
+ bool result = agc_render_signal_queue_->Insert(&agc_render_queue_buffer_);
+ RTC_DCHECK(result);
+ }
+ }
+}
+
+void AudioProcessingImpl::QueueNonbandedRenderAudio(AudioBuffer* audio) {
+ if (submodules_.echo_detector) {
+ PackRenderAudioBufferForEchoDetector(*audio, red_render_queue_buffer_);
+ RTC_DCHECK(red_render_signal_queue_);
+ // Insert the samples into the queue.
+ if (!red_render_signal_queue_->Insert(&red_render_queue_buffer_)) {
+ // The data queue is full and needs to be emptied.
+ EmptyQueuedRenderAudio();
+
+ // Retry the insert (should always work).
+ bool result = red_render_signal_queue_->Insert(&red_render_queue_buffer_);
+ RTC_DCHECK(result);
+ }
+ }
+}
+
+void AudioProcessingImpl::AllocateRenderQueue() {
+ const size_t new_agc_render_queue_element_max_size =
+ std::max(static_cast<size_t>(1), kMaxAllowedValuesOfSamplesPerBand);
+
+ const size_t new_red_render_queue_element_max_size =
+ std::max(static_cast<size_t>(1), kMaxAllowedValuesOfSamplesPerFrame);
+
+ // Reallocate the queues if the queue item sizes are too small to fit the
+ // data to put in the queues.
+
+ if (agc_render_queue_element_max_size_ <
+ new_agc_render_queue_element_max_size) {
+ agc_render_queue_element_max_size_ = new_agc_render_queue_element_max_size;
+
+ std::vector<int16_t> template_queue_element(
+ agc_render_queue_element_max_size_);
+
+ agc_render_signal_queue_.reset(
+ new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
+ kMaxNumFramesToBuffer, template_queue_element,
+ RenderQueueItemVerifier<int16_t>(
+ agc_render_queue_element_max_size_)));
+
+ agc_render_queue_buffer_.resize(agc_render_queue_element_max_size_);
+ agc_capture_queue_buffer_.resize(agc_render_queue_element_max_size_);
+ } else {
+ agc_render_signal_queue_->Clear();
+ }
+
+ if (submodules_.echo_detector) {
+ if (red_render_queue_element_max_size_ <
+ new_red_render_queue_element_max_size) {
+ red_render_queue_element_max_size_ =
+ new_red_render_queue_element_max_size;
+
+ std::vector<float> template_queue_element(
+ red_render_queue_element_max_size_);
+
+ red_render_signal_queue_.reset(
+ new SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>(
+ kMaxNumFramesToBuffer, template_queue_element,
+ RenderQueueItemVerifier<float>(
+ red_render_queue_element_max_size_)));
+
+ red_render_queue_buffer_.resize(red_render_queue_element_max_size_);
+ red_capture_queue_buffer_.resize(red_render_queue_element_max_size_);
+ } else {
+ red_render_signal_queue_->Clear();
+ }
+ }
+}
+
+void AudioProcessingImpl::EmptyQueuedRenderAudio() {
+ MutexLock lock_capture(&mutex_capture_);
+ EmptyQueuedRenderAudioLocked();
+}
+
+void AudioProcessingImpl::EmptyQueuedRenderAudioLocked() {
+ if (submodules_.echo_control_mobile) {
+ RTC_DCHECK(aecm_render_signal_queue_);
+ while (aecm_render_signal_queue_->Remove(&aecm_capture_queue_buffer_)) {
+ submodules_.echo_control_mobile->ProcessRenderAudio(
+ aecm_capture_queue_buffer_);
+ }
+ }
+
+ if (submodules_.gain_control) {
+ while (agc_render_signal_queue_->Remove(&agc_capture_queue_buffer_)) {
+ submodules_.gain_control->ProcessRenderAudio(agc_capture_queue_buffer_);
+ }
+ }
+
+ if (submodules_.echo_detector) {
+ while (red_render_signal_queue_->Remove(&red_capture_queue_buffer_)) {
+ submodules_.echo_detector->AnalyzeRenderAudio(red_capture_queue_buffer_);
+ }
+ }
+}
+
+int AudioProcessingImpl::ProcessStream(const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_AudioFrame");
+ RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config));
+
+ MutexLock lock_capture(&mutex_capture_);
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ if (aec_dump_) {
+ RecordUnprocessedCaptureStream(src, input_config);
+ }
+
+ capture_.capture_audio->CopyFrom(src, input_config);
+ if (capture_.capture_fullband_audio) {
+ capture_.capture_fullband_audio->CopyFrom(src, input_config);
+ }
+ RETURN_ON_ERR(ProcessCaptureStreamLocked());
+ if (submodule_states_.CaptureMultiBandProcessingPresent() ||
+ submodule_states_.CaptureFullBandProcessingActive()) {
+ if (capture_.capture_fullband_audio) {
+ capture_.capture_fullband_audio->CopyTo(output_config, dest);
+ } else {
+ capture_.capture_audio->CopyTo(output_config, dest);
+ }
+ }
+
+ if (aec_dump_) {
+ RecordProcessedCaptureStream(dest, output_config);
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::ProcessCaptureStreamLocked() {
+ EmptyQueuedRenderAudioLocked();
+ HandleCaptureRuntimeSettings();
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ // Ensure that not both the AEC and AECM are active at the same time.
+ // TODO(peah): Simplify once the public API Enable functions for these
+ // are moved to APM.
+ RTC_DCHECK_LE(
+ !!submodules_.echo_controller + !!submodules_.echo_control_mobile, 1);
+
+ AudioBuffer* capture_buffer = capture_.capture_audio.get(); // For brevity.
+ AudioBuffer* linear_aec_buffer = capture_.linear_aec_output.get();
+
+ if (submodules_.high_pass_filter &&
+ config_.high_pass_filter.apply_in_full_band &&
+ !constants_.enforce_split_band_hpf) {
+ submodules_.high_pass_filter->Process(capture_buffer,
+ /*use_split_band_data=*/false);
+ }
+
+ if (submodules_.capture_levels_adjuster) {
+ // If the analog mic gain emulation is active, get the emulated analog mic
+ // gain and pass it to the analog gain control functionality.
+ if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
+ int level = submodules_.capture_levels_adjuster->GetAnalogMicGainLevel();
+ if (submodules_.agc_manager) {
+ submodules_.agc_manager->set_stream_analog_level(level);
+ } else if (submodules_.gain_control) {
+ int error = submodules_.gain_control->set_stream_analog_level(level);
+ RTC_DCHECK_EQ(kNoError, error);
+ }
+ }
+
+ submodules_.capture_levels_adjuster->ApplyPreLevelAdjustment(
+ *capture_buffer);
+ }
+
+ capture_input_rms_.Analyze(rtc::ArrayView<const float>(
+ capture_buffer->channels_const()[0],
+ capture_nonlocked_.capture_processing_format.num_frames()));
+ const bool log_rms = ++capture_rms_interval_counter_ >= 1000;
+ if (log_rms) {
+ capture_rms_interval_counter_ = 0;
+ RmsLevel::Levels levels = capture_input_rms_.AverageAndPeak();
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureInputLevelAverageRms",
+ levels.average, 1, RmsLevel::kMinLevelDb, 64);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureInputLevelPeakRms",
+ levels.peak, 1, RmsLevel::kMinLevelDb, 64);
+ }
+
+ // Detect an analog gain change.
+ int analog_mic_level = recommended_stream_analog_level_locked();
+ const bool analog_mic_level_changed =
+ capture_.prev_analog_mic_level != analog_mic_level &&
+ capture_.prev_analog_mic_level != -1;
+ capture_.prev_analog_mic_level = analog_mic_level;
+ analog_gain_stats_reporter_.UpdateStatistics(analog_mic_level);
+
+ if (submodules_.echo_controller) {
+ capture_.echo_path_gain_change = analog_mic_level_changed;
+
+ // Detect and flag any change in the capture level adjustment pre-gain.
+ if (submodules_.capture_levels_adjuster) {
+ float pre_adjustment_gain =
+ submodules_.capture_levels_adjuster->GetPreAdjustmentGain();
+ capture_.echo_path_gain_change =
+ capture_.echo_path_gain_change ||
+ (capture_.prev_pre_adjustment_gain != pre_adjustment_gain &&
+ capture_.prev_pre_adjustment_gain >= 0.f);
+ capture_.prev_pre_adjustment_gain = pre_adjustment_gain;
+ }
+
+ // Detect volume change.
+ capture_.echo_path_gain_change =
+ capture_.echo_path_gain_change ||
+ (capture_.prev_playout_volume != capture_.playout_volume &&
+ capture_.prev_playout_volume >= 0);
+ capture_.prev_playout_volume = capture_.playout_volume;
+
+ submodules_.echo_controller->AnalyzeCapture(capture_buffer);
+ }
+
+ if (submodules_.agc_manager) {
+ submodules_.agc_manager->AnalyzePreProcess(capture_buffer);
+ }
+
+ if (submodule_states_.CaptureMultiBandSubModulesActive() &&
+ SampleRateSupportsMultiBand(
+ capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
+ capture_buffer->SplitIntoFrequencyBands();
+ }
+
+ const bool multi_channel_capture = config_.pipeline.multi_channel_capture &&
+ constants_.multi_channel_capture_support;
+ if (submodules_.echo_controller && !multi_channel_capture) {
+ // Force down-mixing of the number of channels after the detection of
+ // capture signal saturation.
+ // TODO(peah): Look into ensuring that this kind of tampering with the
+ // AudioBuffer functionality should not be needed.
+ capture_buffer->set_num_channels(1);
+ }
+
+ if (submodules_.high_pass_filter &&
+ (!config_.high_pass_filter.apply_in_full_band ||
+ constants_.enforce_split_band_hpf)) {
+ submodules_.high_pass_filter->Process(capture_buffer,
+ /*use_split_band_data=*/true);
+ }
+
+ if (submodules_.gain_control) {
+ RETURN_ON_ERR(
+ submodules_.gain_control->AnalyzeCaptureAudio(*capture_buffer));
+ }
+
+ if ((!config_.noise_suppression.analyze_linear_aec_output_when_available ||
+ !linear_aec_buffer || submodules_.echo_control_mobile) &&
+ submodules_.noise_suppressor) {
+ submodules_.noise_suppressor->Analyze(*capture_buffer);
+ }
+
+ if (submodules_.echo_control_mobile) {
+ // Ensure that the stream delay was set before the call to the
+ // AECM ProcessCaptureAudio function.
+ if (!capture_.was_stream_delay_set) {
+ return AudioProcessing::kStreamParameterNotSetError;
+ }
+
+ if (submodules_.noise_suppressor) {
+ submodules_.noise_suppressor->Process(capture_buffer);
+ }
+
+ RETURN_ON_ERR(submodules_.echo_control_mobile->ProcessCaptureAudio(
+ capture_buffer, stream_delay_ms()));
+ } else {
+ if (submodules_.echo_controller) {
+ data_dumper_->DumpRaw("stream_delay", stream_delay_ms());
+
+ if (capture_.was_stream_delay_set) {
+ submodules_.echo_controller->SetAudioBufferDelay(stream_delay_ms());
+ }
+
+ submodules_.echo_controller->ProcessCapture(
+ capture_buffer, linear_aec_buffer, capture_.echo_path_gain_change);
+ }
+
+ if (config_.noise_suppression.analyze_linear_aec_output_when_available &&
+ linear_aec_buffer && submodules_.noise_suppressor) {
+ submodules_.noise_suppressor->Analyze(*linear_aec_buffer);
+ }
+
+ if (submodules_.noise_suppressor) {
+ submodules_.noise_suppressor->Process(capture_buffer);
+ }
+ }
+
+ if (submodules_.agc_manager) {
+ submodules_.agc_manager->Process(capture_buffer);
+
+ absl::optional<int> new_digital_gain =
+ submodules_.agc_manager->GetDigitalComressionGain();
+ if (new_digital_gain && submodules_.gain_control) {
+ submodules_.gain_control->set_compression_gain_db(*new_digital_gain);
+ }
+ }
+
+ if (submodules_.gain_control) {
+ // TODO(peah): Add reporting from AEC3 whether there is echo.
+ RETURN_ON_ERR(submodules_.gain_control->ProcessCaptureAudio(
+ capture_buffer, /*stream_has_echo*/ false));
+ }
+
+ if (submodule_states_.CaptureMultiBandProcessingPresent() &&
+ SampleRateSupportsMultiBand(
+ capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
+ capture_buffer->MergeFrequencyBands();
+ }
+
+ if (capture_.capture_output_used) {
+ if (capture_.capture_fullband_audio) {
+ const auto& ec = submodules_.echo_controller;
+ bool ec_active = ec ? ec->ActiveProcessing() : false;
+ // Only update the fullband buffer if the multiband processing has changed
+ // the signal. Keep the original signal otherwise.
+ if (submodule_states_.CaptureMultiBandProcessingActive(ec_active)) {
+ capture_buffer->CopyTo(capture_.capture_fullband_audio.get());
+ }
+ capture_buffer = capture_.capture_fullband_audio.get();
+ }
+
+ if (submodules_.echo_detector) {
+ submodules_.echo_detector->AnalyzeCaptureAudio(
+ rtc::ArrayView<const float>(capture_buffer->channels()[0],
+ capture_buffer->num_frames()));
+ }
+
+ absl::optional<float> voice_probability;
+ if (!!submodules_.voice_activity_detector) {
+ voice_probability = submodules_.voice_activity_detector->Analyze(
+ AudioFrameView<const float>(capture_buffer->channels(),
+ capture_buffer->num_channels(),
+ capture_buffer->num_frames()));
+ }
+
+ if (submodules_.transient_suppressor) {
+ float transient_suppressor_voice_probability = 1.0f;
+ switch (transient_suppressor_vad_mode_) {
+ case TransientSuppressor::VadMode::kDefault:
+ if (submodules_.agc_manager) {
+ transient_suppressor_voice_probability =
+ submodules_.agc_manager->voice_probability();
+ }
+ break;
+ case TransientSuppressor::VadMode::kRnnVad:
+ RTC_DCHECK(voice_probability.has_value());
+ transient_suppressor_voice_probability = *voice_probability;
+ break;
+ case TransientSuppressor::VadMode::kNoVad:
+ // The transient suppressor will ignore `voice_probability`.
+ break;
+ }
+ float delayed_voice_probability =
+ submodules_.transient_suppressor->Suppress(
+ capture_buffer->channels()[0], capture_buffer->num_frames(),
+ capture_buffer->num_channels(),
+ capture_buffer->split_bands_const(0)[kBand0To8kHz],
+ capture_buffer->num_frames_per_band(),
+ /*reference_data=*/nullptr, /*reference_length=*/0,
+ transient_suppressor_voice_probability, capture_.key_pressed);
+ if (voice_probability.has_value()) {
+ *voice_probability = delayed_voice_probability;
+ }
+ }
+
+ // Experimental APM sub-module that analyzes `capture_buffer`.
+ if (submodules_.capture_analyzer) {
+ submodules_.capture_analyzer->Analyze(capture_buffer);
+ }
+
+ if (submodules_.gain_controller2) {
+ submodules_.gain_controller2->NotifyAnalogLevel(
+ recommended_stream_analog_level_locked());
+ submodules_.gain_controller2->Process(voice_probability, capture_buffer);
+ }
+
+ if (submodules_.capture_post_processor) {
+ submodules_.capture_post_processor->Process(capture_buffer);
+ }
+
+ capture_output_rms_.Analyze(rtc::ArrayView<const float>(
+ capture_buffer->channels_const()[0],
+ capture_nonlocked_.capture_processing_format.num_frames()));
+ if (log_rms) {
+ RmsLevel::Levels levels = capture_output_rms_.AverageAndPeak();
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.ApmCaptureOutputLevelAverageRms", levels.average, 1,
+ RmsLevel::kMinLevelDb, 64);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelPeakRms",
+ levels.peak, 1, RmsLevel::kMinLevelDb, 64);
+ }
+
+ if (submodules_.agc_manager) {
+ int level = recommended_stream_analog_level_locked();
+ data_dumper_->DumpRaw("experimental_gain_control_stream_analog_level", 1,
+ &level);
+ }
+
+ // Compute echo-detector stats.
+ if (submodules_.echo_detector) {
+ auto ed_metrics = submodules_.echo_detector->GetMetrics();
+ capture_.stats.residual_echo_likelihood = ed_metrics.echo_likelihood;
+ capture_.stats.residual_echo_likelihood_recent_max =
+ ed_metrics.echo_likelihood_recent_max;
+ }
+ }
+
+ // Compute echo-controller stats.
+ if (submodules_.echo_controller) {
+ auto ec_metrics = submodules_.echo_controller->GetMetrics();
+ capture_.stats.echo_return_loss = ec_metrics.echo_return_loss;
+ capture_.stats.echo_return_loss_enhancement =
+ ec_metrics.echo_return_loss_enhancement;
+ capture_.stats.delay_ms = ec_metrics.delay_ms;
+ }
+
+ // Pass stats for reporting.
+ stats_reporter_.UpdateStatistics(capture_.stats);
+
+ if (submodules_.capture_levels_adjuster) {
+ submodules_.capture_levels_adjuster->ApplyPostLevelAdjustment(
+ *capture_buffer);
+
+ // If the analog mic gain emulation is active, retrieve the level from the
+ // analog gain control and set it to mic gain emulator.
+ if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
+ if (submodules_.agc_manager) {
+ submodules_.capture_levels_adjuster->SetAnalogMicGainLevel(
+ submodules_.agc_manager->stream_analog_level());
+ } else if (submodules_.gain_control) {
+ submodules_.capture_levels_adjuster->SetAnalogMicGainLevel(
+ submodules_.gain_control->stream_analog_level());
+ }
+ }
+ }
+
+ // Temporarily set the output to zero after the stream has been unmuted
+ // (capture output is again used). The purpose of this is to avoid clicks and
+ // artefacts in the audio that results when the processing again is
+ // reactivated after unmuting.
+ if (!capture_.capture_output_used_last_frame &&
+ capture_.capture_output_used) {
+ for (size_t ch = 0; ch < capture_buffer->num_channels(); ++ch) {
+ rtc::ArrayView<float> channel_view(capture_buffer->channels()[ch],
+ capture_buffer->num_frames());
+ std::fill(channel_view.begin(), channel_view.end(), 0.f);
+ }
+ }
+ capture_.capture_output_used_last_frame = capture_.capture_output_used;
+
+ capture_.was_stream_delay_set = false;
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(
+ const float* const* data,
+ const StreamConfig& reverse_config) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_StreamConfig");
+ MutexLock lock(&mutex_render_);
+ return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config);
+}
+
+int AudioProcessingImpl::ProcessReverseStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig");
+ MutexLock lock(&mutex_render_);
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, input_config, output_config));
+ if (submodule_states_.RenderMultiBandProcessingActive() ||
+ submodule_states_.RenderFullBandProcessingActive()) {
+ render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(),
+ dest);
+ } else if (formats_.api_format.reverse_input_stream() !=
+ formats_.api_format.reverse_output_stream()) {
+ render_.render_converter->Convert(src, input_config.num_samples(), dest,
+ output_config.num_samples());
+ } else {
+ CopyAudioIfNeeded(src, input_config.num_frames(),
+ input_config.num_channels(), dest);
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStreamLocked(
+ const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config) {
+ if (src == nullptr) {
+ return kNullPointerError;
+ }
+
+ if (input_config.num_channels() == 0) {
+ return kBadNumberChannelsError;
+ }
+
+ ProcessingConfig processing_config = formats_.api_format;
+ processing_config.reverse_input_stream() = input_config;
+ processing_config.reverse_output_stream() = output_config;
+
+ RETURN_ON_ERR(MaybeInitializeRender(processing_config));
+ RTC_DCHECK_EQ(input_config.num_frames(),
+ formats_.api_format.reverse_input_stream().num_frames());
+
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ if (aec_dump_) {
+ const size_t channel_size =
+ formats_.api_format.reverse_input_stream().num_frames();
+ const size_t num_channels =
+ formats_.api_format.reverse_input_stream().num_channels();
+ aec_dump_->WriteRenderStreamMessage(
+ AudioFrameView<const float>(src, num_channels, channel_size));
+ }
+ render_.render_audio->CopyFrom(src,
+ formats_.api_format.reverse_input_stream());
+ return ProcessRenderStreamLocked();
+}
+
+int AudioProcessingImpl::ProcessReverseStream(const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
+
+ if (input_config.num_channels() <= 0) {
+ return AudioProcessing::Error::kBadNumberChannelsError;
+ }
+
+ MutexLock lock(&mutex_render_);
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ ProcessingConfig processing_config = formats_.api_format;
+ processing_config.reverse_input_stream().set_sample_rate_hz(
+ input_config.sample_rate_hz());
+ processing_config.reverse_input_stream().set_num_channels(
+ input_config.num_channels());
+ processing_config.reverse_output_stream().set_sample_rate_hz(
+ output_config.sample_rate_hz());
+ processing_config.reverse_output_stream().set_num_channels(
+ output_config.num_channels());
+
+ RETURN_ON_ERR(MaybeInitializeRender(processing_config));
+ if (input_config.num_frames() !=
+ formats_.api_format.reverse_input_stream().num_frames()) {
+ return kBadDataLengthError;
+ }
+
+ if (aec_dump_) {
+ aec_dump_->WriteRenderStreamMessage(src, input_config.num_frames(),
+ input_config.num_channels());
+ }
+
+ render_.render_audio->CopyFrom(src, input_config);
+ RETURN_ON_ERR(ProcessRenderStreamLocked());
+ if (submodule_states_.RenderMultiBandProcessingActive() ||
+ submodule_states_.RenderFullBandProcessingActive()) {
+ render_.render_audio->CopyTo(output_config, dest);
+ }
+ return kNoError;
+}
+
+int AudioProcessingImpl::ProcessRenderStreamLocked() {
+ AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity.
+
+ HandleRenderRuntimeSettings();
+ DenormalDisabler denormal_disabler(use_denormal_disabler_);
+
+ if (submodules_.render_pre_processor) {
+ submodules_.render_pre_processor->Process(render_buffer);
+ }
+
+ QueueNonbandedRenderAudio(render_buffer);
+
+ if (submodule_states_.RenderMultiBandSubModulesActive() &&
+ SampleRateSupportsMultiBand(
+ formats_.render_processing_format.sample_rate_hz())) {
+ render_buffer->SplitIntoFrequencyBands();
+ }
+
+ if (submodule_states_.RenderMultiBandSubModulesActive()) {
+ QueueBandedRenderAudio(render_buffer);
+ }
+
+ // TODO(peah): Perform the queuing inside QueueRenderAudiuo().
+ if (submodules_.echo_controller) {
+ submodules_.echo_controller->AnalyzeRender(render_buffer);
+ }
+
+ if (submodule_states_.RenderMultiBandProcessingActive() &&
+ SampleRateSupportsMultiBand(
+ formats_.render_processing_format.sample_rate_hz())) {
+ render_buffer->MergeFrequencyBands();
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::set_stream_delay_ms(int delay) {
+ MutexLock lock(&mutex_capture_);
+ Error retval = kNoError;
+ capture_.was_stream_delay_set = true;
+
+ if (delay < 0) {
+ delay = 0;
+ retval = kBadStreamParameterWarning;
+ }
+
+ // TODO(ajm): the max is rather arbitrarily chosen; investigate.
+ if (delay > 500) {
+ delay = 500;
+ retval = kBadStreamParameterWarning;
+ }
+
+ capture_nonlocked_.stream_delay_ms = delay;
+ return retval;
+}
+
+bool AudioProcessingImpl::GetLinearAecOutput(
+ rtc::ArrayView<std::array<float, 160>> linear_output) const {
+ MutexLock lock(&mutex_capture_);
+ AudioBuffer* linear_aec_buffer = capture_.linear_aec_output.get();
+
+ RTC_DCHECK(linear_aec_buffer);
+ if (linear_aec_buffer) {
+ RTC_DCHECK_EQ(1, linear_aec_buffer->num_bands());
+ RTC_DCHECK_EQ(linear_output.size(), linear_aec_buffer->num_channels());
+
+ for (size_t ch = 0; ch < linear_aec_buffer->num_channels(); ++ch) {
+ RTC_DCHECK_EQ(linear_output[ch].size(), linear_aec_buffer->num_frames());
+ rtc::ArrayView<const float> channel_view =
+ rtc::ArrayView<const float>(linear_aec_buffer->channels_const()[ch],
+ linear_aec_buffer->num_frames());
+ FloatS16ToFloat(channel_view.data(), channel_view.size(),
+ linear_output[ch].data());
+ }
+ return true;
+ }
+ RTC_LOG(LS_ERROR) << "No linear AEC output available";
+ RTC_DCHECK_NOTREACHED();
+ return false;
+}
+
+int AudioProcessingImpl::stream_delay_ms() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.stream_delay_ms;
+}
+
+void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
+ MutexLock lock(&mutex_capture_);
+ capture_.key_pressed = key_pressed;
+}
+
+void AudioProcessingImpl::set_stream_analog_level(int level) {
+ MutexLock lock_capture(&mutex_capture_);
+
+ if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
+ // If the analog mic gain is emulated internally, simply cache the level for
+ // later reporting back as the recommended stream analog level to use.
+ capture_.cached_stream_analog_level_ = level;
+ return;
+ }
+
+ if (submodules_.agc_manager) {
+ submodules_.agc_manager->set_stream_analog_level(level);
+ data_dumper_->DumpRaw("experimental_gain_control_set_stream_analog_level",
+ 1, &level);
+ return;
+ }
+
+ if (submodules_.gain_control) {
+ int error = submodules_.gain_control->set_stream_analog_level(level);
+ RTC_DCHECK_EQ(kNoError, error);
+ return;
+ }
+
+ // If no analog mic gain control functionality is in place, cache the level
+ // for later reporting back as the recommended stream analog level to use.
+ capture_.cached_stream_analog_level_ = level;
+}
+
+int AudioProcessingImpl::recommended_stream_analog_level() const {
+ MutexLock lock_capture(&mutex_capture_);
+ return recommended_stream_analog_level_locked();
+}
+
+int AudioProcessingImpl::recommended_stream_analog_level_locked() const {
+ if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
+ return capture_.cached_stream_analog_level_;
+ }
+
+ if (submodules_.agc_manager) {
+ return submodules_.agc_manager->stream_analog_level();
+ }
+
+ if (submodules_.gain_control) {
+ return submodules_.gain_control->stream_analog_level();
+ }
+
+ return capture_.cached_stream_analog_level_;
+}
+
+bool AudioProcessingImpl::CreateAndAttachAecDump(absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ std::unique_ptr<AecDump> aec_dump =
+ AecDumpFactory::Create(file_name, max_log_size_bytes, worker_queue);
+ if (!aec_dump) {
+ return false;
+ }
+
+ AttachAecDump(std::move(aec_dump));
+ return true;
+}
+
+bool AudioProcessingImpl::CreateAndAttachAecDump(FILE* handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) {
+ std::unique_ptr<AecDump> aec_dump =
+ AecDumpFactory::Create(handle, max_log_size_bytes, worker_queue);
+ if (!aec_dump) {
+ return false;
+ }
+
+ AttachAecDump(std::move(aec_dump));
+ return true;
+}
+
+void AudioProcessingImpl::AttachAecDump(std::unique_ptr<AecDump> aec_dump) {
+ RTC_DCHECK(aec_dump);
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+
+ // The previously attached AecDump will be destroyed with the
+ // 'aec_dump' parameter, which is after locks are released.
+ aec_dump_.swap(aec_dump);
+ WriteAecDumpConfigMessage(true);
+ aec_dump_->WriteInitMessage(formats_.api_format, rtc::TimeUTCMillis());
+}
+
+void AudioProcessingImpl::DetachAecDump() {
+ // The d-tor of a task-queue based AecDump blocks until all pending
+ // tasks are done. This construction avoids blocking while holding
+ // the render and capture locks.
+ std::unique_ptr<AecDump> aec_dump = nullptr;
+ {
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+ aec_dump = std::move(aec_dump_);
+ }
+}
+
+AudioProcessing::Config AudioProcessingImpl::GetConfig() const {
+ MutexLock lock_render(&mutex_render_);
+ MutexLock lock_capture(&mutex_capture_);
+ return config_;
+}
+
+bool AudioProcessingImpl::UpdateActiveSubmoduleStates() {
+ return submodule_states_.Update(
+ config_.high_pass_filter.enabled, !!submodules_.echo_control_mobile,
+ !!submodules_.noise_suppressor, !!submodules_.gain_control,
+ !!submodules_.gain_controller2, !!submodules_.voice_activity_detector,
+ config_.pre_amplifier.enabled || config_.capture_level_adjustment.enabled,
+ capture_nonlocked_.echo_controller_enabled,
+ !!submodules_.transient_suppressor);
+}
+
+void AudioProcessingImpl::InitializeTransientSuppressor() {
+ if (config_.transient_suppression.enabled &&
+ !constants_.transient_suppressor_forced_off) {
+ // Attempt to create a transient suppressor, if one is not already created.
+ if (!submodules_.transient_suppressor) {
+ submodules_.transient_suppressor = CreateTransientSuppressor(
+ submodule_creation_overrides_, transient_suppressor_vad_mode_,
+ proc_fullband_sample_rate_hz(), capture_nonlocked_.split_rate,
+ num_proc_channels());
+ if (!submodules_.transient_suppressor) {
+ RTC_LOG(LS_WARNING)
+ << "No transient suppressor created (probably disabled)";
+ }
+ } else {
+ submodules_.transient_suppressor->Initialize(
+ proc_fullband_sample_rate_hz(), capture_nonlocked_.split_rate,
+ num_proc_channels());
+ }
+ } else {
+ submodules_.transient_suppressor.reset();
+ }
+}
+
+void AudioProcessingImpl::InitializeHighPassFilter(bool forced_reset) {
+ bool high_pass_filter_needed_by_aec =
+ config_.echo_canceller.enabled &&
+ config_.echo_canceller.enforce_high_pass_filtering &&
+ !config_.echo_canceller.mobile_mode;
+ if (submodule_states_.HighPassFilteringRequired() ||
+ high_pass_filter_needed_by_aec) {
+ bool use_full_band = config_.high_pass_filter.apply_in_full_band &&
+ !constants_.enforce_split_band_hpf;
+ int rate = use_full_band ? proc_fullband_sample_rate_hz()
+ : proc_split_sample_rate_hz();
+ size_t num_channels =
+ use_full_band ? num_output_channels() : num_proc_channels();
+
+ if (!submodules_.high_pass_filter ||
+ rate != submodules_.high_pass_filter->sample_rate_hz() ||
+ forced_reset ||
+ num_channels != submodules_.high_pass_filter->num_channels()) {
+ submodules_.high_pass_filter.reset(
+ new HighPassFilter(rate, num_channels));
+ }
+ } else {
+ submodules_.high_pass_filter.reset();
+ }
+}
+
+void AudioProcessingImpl::InitializeEchoController() {
+ bool use_echo_controller =
+ echo_control_factory_ ||
+ (config_.echo_canceller.enabled && !config_.echo_canceller.mobile_mode);
+
+ if (use_echo_controller) {
+ // Create and activate the echo controller.
+ if (echo_control_factory_) {
+ submodules_.echo_controller = echo_control_factory_->Create(
+ proc_sample_rate_hz(), num_reverse_channels(), num_proc_channels());
+ RTC_DCHECK(submodules_.echo_controller);
+ } else {
+ EchoCanceller3Config config;
+ absl::optional<EchoCanceller3Config> multichannel_config;
+ if (use_setup_specific_default_aec3_config_) {
+ multichannel_config = EchoCanceller3::CreateDefaultMultichannelConfig();
+ }
+ submodules_.echo_controller = std::make_unique<EchoCanceller3>(
+ config, multichannel_config, proc_sample_rate_hz(),
+ num_reverse_channels(), num_proc_channels());
+ }
+
+ // Setup the storage for returning the linear AEC output.
+ if (config_.echo_canceller.export_linear_aec_output) {
+ constexpr int kLinearOutputRateHz = 16000;
+ capture_.linear_aec_output = std::make_unique<AudioBuffer>(
+ kLinearOutputRateHz, num_proc_channels(), kLinearOutputRateHz,
+ num_proc_channels(), kLinearOutputRateHz, num_proc_channels());
+ } else {
+ capture_.linear_aec_output.reset();
+ }
+
+ capture_nonlocked_.echo_controller_enabled = true;
+
+ submodules_.echo_control_mobile.reset();
+ aecm_render_signal_queue_.reset();
+ return;
+ }
+
+ submodules_.echo_controller.reset();
+ capture_nonlocked_.echo_controller_enabled = false;
+ capture_.linear_aec_output.reset();
+
+ if (!config_.echo_canceller.enabled) {
+ submodules_.echo_control_mobile.reset();
+ aecm_render_signal_queue_.reset();
+ return;
+ }
+
+ if (config_.echo_canceller.mobile_mode) {
+ // Create and activate AECM.
+ size_t max_element_size =
+ std::max(static_cast<size_t>(1),
+ kMaxAllowedValuesOfSamplesPerBand *
+ EchoControlMobileImpl::NumCancellersRequired(
+ num_output_channels(), num_reverse_channels()));
+
+ std::vector<int16_t> template_queue_element(max_element_size);
+
+ aecm_render_signal_queue_.reset(
+ new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
+ kMaxNumFramesToBuffer, template_queue_element,
+ RenderQueueItemVerifier<int16_t>(max_element_size)));
+
+ aecm_render_queue_buffer_.resize(max_element_size);
+ aecm_capture_queue_buffer_.resize(max_element_size);
+
+ submodules_.echo_control_mobile.reset(new EchoControlMobileImpl());
+
+ submodules_.echo_control_mobile->Initialize(proc_split_sample_rate_hz(),
+ num_reverse_channels(),
+ num_output_channels());
+ return;
+ }
+
+ submodules_.echo_control_mobile.reset();
+ aecm_render_signal_queue_.reset();
+}
+
+void AudioProcessingImpl::InitializeGainController1() {
+ if (!config_.gain_controller1.enabled) {
+ submodules_.agc_manager.reset();
+ submodules_.gain_control.reset();
+ return;
+ }
+
+ if (!submodules_.gain_control) {
+ submodules_.gain_control.reset(new GainControlImpl());
+ }
+
+ submodules_.gain_control->Initialize(num_proc_channels(),
+ proc_sample_rate_hz());
+ if (!config_.gain_controller1.analog_gain_controller.enabled) {
+ int error = submodules_.gain_control->set_mode(
+ Agc1ConfigModeToInterfaceMode(config_.gain_controller1.mode));
+ RTC_DCHECK_EQ(kNoError, error);
+ error = submodules_.gain_control->set_target_level_dbfs(
+ config_.gain_controller1.target_level_dbfs);
+ RTC_DCHECK_EQ(kNoError, error);
+ error = submodules_.gain_control->set_compression_gain_db(
+ config_.gain_controller1.compression_gain_db);
+ RTC_DCHECK_EQ(kNoError, error);
+ error = submodules_.gain_control->enable_limiter(
+ config_.gain_controller1.enable_limiter);
+ RTC_DCHECK_EQ(kNoError, error);
+ constexpr int kAnalogLevelMinimum = 0;
+ constexpr int kAnalogLevelMaximum = 255;
+ error = submodules_.gain_control->set_analog_level_limits(
+ kAnalogLevelMinimum, kAnalogLevelMaximum);
+ RTC_DCHECK_EQ(kNoError, error);
+
+ submodules_.agc_manager.reset();
+ return;
+ }
+
+ if (!submodules_.agc_manager.get() ||
+ submodules_.agc_manager->num_channels() !=
+ static_cast<int>(num_proc_channels())) {
+ int stream_analog_level = -1;
+ const bool re_creation = !!submodules_.agc_manager;
+ if (re_creation) {
+ stream_analog_level = submodules_.agc_manager->stream_analog_level();
+ }
+ submodules_.agc_manager.reset(new AgcManagerDirect(
+ num_proc_channels(), config_.gain_controller1.analog_gain_controller));
+ if (re_creation) {
+ submodules_.agc_manager->set_stream_analog_level(stream_analog_level);
+ }
+ }
+ submodules_.agc_manager->Initialize();
+ submodules_.agc_manager->SetupDigitalGainControl(*submodules_.gain_control);
+ submodules_.agc_manager->HandleCaptureOutputUsedChange(
+ capture_.capture_output_used);
+}
+
+void AudioProcessingImpl::InitializeGainController2(bool config_has_changed) {
+ if (!config_has_changed) {
+ return;
+ }
+ if (!config_.gain_controller2.enabled) {
+ submodules_.gain_controller2.reset();
+ return;
+ }
+ if (!submodules_.gain_controller2 || config_has_changed) {
+ const bool use_internal_vad =
+ transient_suppressor_vad_mode_ != TransientSuppressor::VadMode::kRnnVad;
+ submodules_.gain_controller2 = std::make_unique<GainController2>(
+ config_.gain_controller2, proc_fullband_sample_rate_hz(),
+ num_input_channels(), use_internal_vad);
+ }
+}
+
+void AudioProcessingImpl::InitializeVoiceActivityDetector(
+ bool config_has_changed) {
+ if (!config_has_changed) {
+ return;
+ }
+ const bool use_vad =
+ transient_suppressor_vad_mode_ == TransientSuppressor::VadMode::kRnnVad &&
+ config_.gain_controller2.enabled &&
+ config_.gain_controller2.adaptive_digital.enabled;
+ if (!use_vad) {
+ submodules_.voice_activity_detector.reset();
+ return;
+ }
+ if (!submodules_.voice_activity_detector || config_has_changed) {
+ RTC_DCHECK(!!submodules_.gain_controller2);
+ // TODO(bugs.webrtc.org/13663): Cache CPU features in APM and use here.
+ submodules_.voice_activity_detector =
+ std::make_unique<VoiceActivityDetectorWrapper>(
+ config_.gain_controller2.adaptive_digital.vad_reset_period_ms,
+ submodules_.gain_controller2->GetCpuFeatures(),
+ proc_fullband_sample_rate_hz());
+ }
+}
+
+void AudioProcessingImpl::InitializeNoiseSuppressor() {
+ submodules_.noise_suppressor.reset();
+
+ if (config_.noise_suppression.enabled) {
+ auto map_level =
+ [](AudioProcessing::Config::NoiseSuppression::Level level) {
+ using NoiseSuppresionConfig =
+ AudioProcessing::Config::NoiseSuppression;
+ switch (level) {
+ case NoiseSuppresionConfig::kLow:
+ return NsConfig::SuppressionLevel::k6dB;
+ case NoiseSuppresionConfig::kModerate:
+ return NsConfig::SuppressionLevel::k12dB;
+ case NoiseSuppresionConfig::kHigh:
+ return NsConfig::SuppressionLevel::k18dB;
+ case NoiseSuppresionConfig::kVeryHigh:
+ return NsConfig::SuppressionLevel::k21dB;
+ }
+ RTC_CHECK_NOTREACHED();
+ };
+
+ NsConfig cfg;
+ cfg.target_level = map_level(config_.noise_suppression.level);
+ submodules_.noise_suppressor = std::make_unique<NoiseSuppressor>(
+ cfg, proc_sample_rate_hz(), num_proc_channels());
+ }
+}
+
+void AudioProcessingImpl::InitializeCaptureLevelsAdjuster() {
+ if (config_.pre_amplifier.enabled ||
+ config_.capture_level_adjustment.enabled) {
+ // Use both the pre-amplifier and the capture level adjustment gains as
+ // pre-gains.
+ float pre_gain = 1.f;
+ if (config_.pre_amplifier.enabled) {
+ pre_gain *= config_.pre_amplifier.fixed_gain_factor;
+ }
+ if (config_.capture_level_adjustment.enabled) {
+ pre_gain *= config_.capture_level_adjustment.pre_gain_factor;
+ }
+
+ submodules_.capture_levels_adjuster =
+ std::make_unique<CaptureLevelsAdjuster>(
+ config_.capture_level_adjustment.analog_mic_gain_emulation.enabled,
+ config_.capture_level_adjustment.analog_mic_gain_emulation
+ .initial_level,
+ pre_gain, config_.capture_level_adjustment.post_gain_factor);
+ } else {
+ submodules_.capture_levels_adjuster.reset();
+ }
+}
+
+void AudioProcessingImpl::InitializeResidualEchoDetector() {
+ if (submodules_.echo_detector) {
+ submodules_.echo_detector->Initialize(
+ proc_fullband_sample_rate_hz(), 1,
+ formats_.render_processing_format.sample_rate_hz(), 1);
+ }
+}
+
+void AudioProcessingImpl::InitializeAnalyzer() {
+ if (submodules_.capture_analyzer) {
+ submodules_.capture_analyzer->Initialize(proc_fullband_sample_rate_hz(),
+ num_proc_channels());
+ }
+}
+
+void AudioProcessingImpl::InitializePostProcessor() {
+ if (submodules_.capture_post_processor) {
+ submodules_.capture_post_processor->Initialize(
+ proc_fullband_sample_rate_hz(), num_proc_channels());
+ }
+}
+
+void AudioProcessingImpl::InitializePreProcessor() {
+ if (submodules_.render_pre_processor) {
+ submodules_.render_pre_processor->Initialize(
+ formats_.render_processing_format.sample_rate_hz(),
+ formats_.render_processing_format.num_channels());
+ }
+}
+
+void AudioProcessingImpl::WriteAecDumpConfigMessage(bool forced) {
+ if (!aec_dump_) {
+ return;
+ }
+
+ std::string experiments_description = "";
+ // TODO(peah): Add semicolon-separated concatenations of experiment
+ // descriptions for other submodules.
+ if (config_.gain_controller1.analog_gain_controller.clipped_level_min !=
+ kClippedLevelMin) {
+ experiments_description += "AgcClippingLevelExperiment;";
+ }
+ if (!!submodules_.capture_post_processor) {
+ experiments_description += "CapturePostProcessor;";
+ }
+ if (!!submodules_.render_pre_processor) {
+ experiments_description += "RenderPreProcessor;";
+ }
+ if (capture_nonlocked_.echo_controller_enabled) {
+ experiments_description += "EchoController;";
+ }
+ if (config_.gain_controller2.enabled) {
+ experiments_description += "GainController2;";
+ }
+
+ InternalAPMConfig apm_config;
+
+ apm_config.aec_enabled = config_.echo_canceller.enabled;
+ apm_config.aec_delay_agnostic_enabled = false;
+ apm_config.aec_extended_filter_enabled = false;
+ apm_config.aec_suppression_level = 0;
+
+ apm_config.aecm_enabled = !!submodules_.echo_control_mobile;
+ apm_config.aecm_comfort_noise_enabled =
+ submodules_.echo_control_mobile &&
+ submodules_.echo_control_mobile->is_comfort_noise_enabled();
+ apm_config.aecm_routing_mode =
+ submodules_.echo_control_mobile
+ ? static_cast<int>(submodules_.echo_control_mobile->routing_mode())
+ : 0;
+
+ apm_config.agc_enabled = !!submodules_.gain_control;
+
+ apm_config.agc_mode = submodules_.gain_control
+ ? static_cast<int>(submodules_.gain_control->mode())
+ : GainControl::kAdaptiveAnalog;
+ apm_config.agc_limiter_enabled =
+ submodules_.gain_control ? submodules_.gain_control->is_limiter_enabled()
+ : false;
+ apm_config.noise_robust_agc_enabled = !!submodules_.agc_manager;
+
+ apm_config.hpf_enabled = config_.high_pass_filter.enabled;
+
+ apm_config.ns_enabled = config_.noise_suppression.enabled;
+ apm_config.ns_level = static_cast<int>(config_.noise_suppression.level);
+
+ apm_config.transient_suppression_enabled =
+ config_.transient_suppression.enabled;
+ apm_config.experiments_description = experiments_description;
+ apm_config.pre_amplifier_enabled = config_.pre_amplifier.enabled;
+ apm_config.pre_amplifier_fixed_gain_factor =
+ config_.pre_amplifier.fixed_gain_factor;
+
+ if (!forced && apm_config == apm_config_for_aec_dump_) {
+ return;
+ }
+ aec_dump_->WriteConfig(apm_config);
+ apm_config_for_aec_dump_ = apm_config;
+}
+
+void AudioProcessingImpl::RecordUnprocessedCaptureStream(
+ const float* const* src) {
+ RTC_DCHECK(aec_dump_);
+ WriteAecDumpConfigMessage(false);
+
+ const size_t channel_size = formats_.api_format.input_stream().num_frames();
+ const size_t num_channels = formats_.api_format.input_stream().num_channels();
+ aec_dump_->AddCaptureStreamInput(
+ AudioFrameView<const float>(src, num_channels, channel_size));
+ RecordAudioProcessingState();
+}
+
+void AudioProcessingImpl::RecordUnprocessedCaptureStream(
+ const int16_t* const data,
+ const StreamConfig& config) {
+ RTC_DCHECK(aec_dump_);
+ WriteAecDumpConfigMessage(false);
+
+ aec_dump_->AddCaptureStreamInput(data, config.num_channels(),
+ config.num_frames());
+ RecordAudioProcessingState();
+}
+
+void AudioProcessingImpl::RecordProcessedCaptureStream(
+ const float* const* processed_capture_stream) {
+ RTC_DCHECK(aec_dump_);
+
+ const size_t channel_size = formats_.api_format.output_stream().num_frames();
+ const size_t num_channels =
+ formats_.api_format.output_stream().num_channels();
+ aec_dump_->AddCaptureStreamOutput(AudioFrameView<const float>(
+ processed_capture_stream, num_channels, channel_size));
+ aec_dump_->WriteCaptureStreamMessage();
+}
+
+void AudioProcessingImpl::RecordProcessedCaptureStream(
+ const int16_t* const data,
+ const StreamConfig& config) {
+ RTC_DCHECK(aec_dump_);
+
+ aec_dump_->AddCaptureStreamOutput(data, config.num_channels(),
+ config.num_frames());
+ aec_dump_->WriteCaptureStreamMessage();
+}
+
+void AudioProcessingImpl::RecordAudioProcessingState() {
+ RTC_DCHECK(aec_dump_);
+ AecDump::AudioProcessingState audio_proc_state;
+ audio_proc_state.delay = capture_nonlocked_.stream_delay_ms;
+ audio_proc_state.drift = 0;
+ audio_proc_state.level = recommended_stream_analog_level_locked();
+ audio_proc_state.keypress = capture_.key_pressed;
+ aec_dump_->AddAudioProcessingState(audio_proc_state);
+}
+
+AudioProcessingImpl::ApmCaptureState::ApmCaptureState()
+ : was_stream_delay_set(false),
+ capture_output_used(true),
+ capture_output_used_last_frame(true),
+ key_pressed(false),
+ capture_processing_format(kSampleRate16kHz),
+ split_rate(kSampleRate16kHz),
+ echo_path_gain_change(false),
+ prev_analog_mic_level(-1),
+ prev_pre_adjustment_gain(-1.f),
+ playout_volume(-1),
+ prev_playout_volume(-1) {}
+
+AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default;
+
+AudioProcessingImpl::ApmRenderState::ApmRenderState() = default;
+
+AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default;
+
+AudioProcessingImpl::ApmStatsReporter::ApmStatsReporter()
+ : stats_message_queue_(1) {}
+
+AudioProcessingImpl::ApmStatsReporter::~ApmStatsReporter() = default;
+
+AudioProcessingStats AudioProcessingImpl::ApmStatsReporter::GetStatistics() {
+ MutexLock lock_stats(&mutex_stats_);
+ bool new_stats_available = stats_message_queue_.Remove(&cached_stats_);
+ // If the message queue is full, return the cached stats.
+ static_cast<void>(new_stats_available);
+
+ return cached_stats_;
+}
+
+void AudioProcessingImpl::ApmStatsReporter::UpdateStatistics(
+ const AudioProcessingStats& new_stats) {
+ AudioProcessingStats stats_to_queue = new_stats;
+ bool stats_message_passed = stats_message_queue_.Insert(&stats_to_queue);
+ // If the message queue is full, discard the new stats.
+ static_cast<void>(stats_message_passed);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.h b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.h
new file mode 100644
index 0000000000..20135def1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.h
@@ -0,0 +1,549 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
+
+#include <stdio.h>
+
+#include <atomic>
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/function_view.h"
+#include "modules/audio_processing/aec3/echo_canceller3.h"
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+#include "modules/audio_processing/agc/analog_gain_stats_reporter.h"
+#include "modules/audio_processing/agc/gain_control.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h"
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/gain_control_impl.h"
+#include "modules/audio_processing/gain_controller2.h"
+#include "modules/audio_processing/high_pass_filter.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/audio_processing/include/audio_frame_proxies.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/audio_processing/ns/noise_suppressor.h"
+#include "modules/audio_processing/optionally_built_submodule_creators.h"
+#include "modules/audio_processing/render_queue_item_verifier.h"
+#include "modules/audio_processing/rms_level.h"
+#include "modules/audio_processing/transient/transient_suppressor.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/swap_queue.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioConverter;
+
+constexpr int RuntimeSettingQueueSize() {
+ return 100;
+}
+
+class AudioProcessingImpl : public AudioProcessing {
+ public:
+ // Methods forcing APM to run in a single-threaded manner.
+ // Acquires both the render and capture locks.
+ AudioProcessingImpl();
+ AudioProcessingImpl(const AudioProcessing::Config& config,
+ std::unique_ptr<CustomProcessing> capture_post_processor,
+ std::unique_ptr<CustomProcessing> render_pre_processor,
+ std::unique_ptr<EchoControlFactory> echo_control_factory,
+ rtc::scoped_refptr<EchoDetector> echo_detector,
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer);
+ ~AudioProcessingImpl() override;
+ int Initialize() override;
+ int Initialize(const ProcessingConfig& processing_config) override;
+ void ApplyConfig(const AudioProcessing::Config& config) override;
+ bool CreateAndAttachAecDump(absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) override;
+ bool CreateAndAttachAecDump(FILE* handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) override;
+ // TODO(webrtc:5298) Deprecated variant.
+ void AttachAecDump(std::unique_ptr<AecDump> aec_dump) override;
+ void DetachAecDump() override;
+ void SetRuntimeSetting(RuntimeSetting setting) override;
+ bool PostRuntimeSetting(RuntimeSetting setting) override;
+
+ // Capture-side exclusive methods possibly running APM in a
+ // multi-threaded manner. Acquire the capture lock.
+ int ProcessStream(const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest) override;
+ int ProcessStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) override;
+ bool GetLinearAecOutput(
+ rtc::ArrayView<std::array<float, 160>> linear_output) const override;
+ void set_output_will_be_muted(bool muted) override;
+ void HandleCaptureOutputUsedSetting(bool capture_output_used)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ int set_stream_delay_ms(int delay) override;
+ void set_stream_key_pressed(bool key_pressed) override;
+ void set_stream_analog_level(int level) override;
+ int recommended_stream_analog_level() const
+ RTC_LOCKS_EXCLUDED(mutex_capture_) override;
+
+ // Render-side exclusive methods possibly running APM in a
+ // multi-threaded manner. Acquire the render lock.
+ int ProcessReverseStream(const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest) override;
+ int AnalyzeReverseStream(const float* const* data,
+ const StreamConfig& reverse_config) override;
+ int ProcessReverseStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) override;
+
+ // Methods only accessed from APM submodules or
+ // from AudioProcessing tests in a single-threaded manner.
+ // Hence there is no need for locks in these.
+ int proc_sample_rate_hz() const override;
+ int proc_split_sample_rate_hz() const override;
+ size_t num_input_channels() const override;
+ size_t num_proc_channels() const override;
+ size_t num_output_channels() const override;
+ size_t num_reverse_channels() const override;
+ int stream_delay_ms() const override;
+
+ AudioProcessingStats GetStatistics(bool has_remote_tracks) override {
+ return GetStatistics();
+ }
+ AudioProcessingStats GetStatistics() override {
+ return stats_reporter_.GetStatistics();
+ }
+
+ AudioProcessing::Config GetConfig() const override;
+
+ protected:
+ // Overridden in a mock.
+ virtual void InitializeLocked()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_);
+ void AssertLockedForTest()
+ RTC_ASSERT_EXCLUSIVE_LOCK(mutex_render_, mutex_capture_) {
+ mutex_render_.AssertHeld();
+ mutex_capture_.AssertHeld();
+ }
+
+ private:
+ // TODO(peah): These friend classes should be removed as soon as the new
+ // parameter setting scheme allows.
+ FRIEND_TEST_ALL_PREFIXES(ApmConfiguration, DefaultBehavior);
+ FRIEND_TEST_ALL_PREFIXES(ApmConfiguration, ValidConfigBehavior);
+ FRIEND_TEST_ALL_PREFIXES(ApmConfiguration, InValidConfigBehavior);
+ FRIEND_TEST_ALL_PREFIXES(ApmWithSubmodulesExcludedTest,
+ ToggleTransientSuppressor);
+ FRIEND_TEST_ALL_PREFIXES(ApmWithSubmodulesExcludedTest,
+ ReinitializeTransientSuppressor);
+ FRIEND_TEST_ALL_PREFIXES(ApmWithSubmodulesExcludedTest,
+ BitexactWithDisabledModules);
+
+ int recommended_stream_analog_level_locked() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ void OverrideSubmoduleCreationForTesting(
+ const ApmSubmoduleCreationOverrides& overrides);
+
+ // Class providing thread-safe message pipe functionality for
+ // `runtime_settings_`.
+ class RuntimeSettingEnqueuer {
+ public:
+ explicit RuntimeSettingEnqueuer(
+ SwapQueue<RuntimeSetting>* runtime_settings);
+ ~RuntimeSettingEnqueuer();
+
+ // Enqueue setting and return whether the setting was successfully enqueued.
+ bool Enqueue(RuntimeSetting setting);
+
+ private:
+ SwapQueue<RuntimeSetting>& runtime_settings_;
+ };
+
+ const std::unique_ptr<ApmDataDumper> data_dumper_;
+ static std::atomic<int> instance_count_;
+ const bool use_setup_specific_default_aec3_config_;
+
+ const bool use_denormal_disabler_;
+
+ const TransientSuppressor::VadMode transient_suppressor_vad_mode_;
+
+ SwapQueue<RuntimeSetting> capture_runtime_settings_;
+ SwapQueue<RuntimeSetting> render_runtime_settings_;
+
+ RuntimeSettingEnqueuer capture_runtime_settings_enqueuer_;
+ RuntimeSettingEnqueuer render_runtime_settings_enqueuer_;
+
+ // EchoControl factory.
+ const std::unique_ptr<EchoControlFactory> echo_control_factory_;
+
+ class SubmoduleStates {
+ public:
+ SubmoduleStates(bool capture_post_processor_enabled,
+ bool render_pre_processor_enabled,
+ bool capture_analyzer_enabled);
+ // Updates the submodule state and returns true if it has changed.
+ bool Update(bool high_pass_filter_enabled,
+ bool mobile_echo_controller_enabled,
+ bool noise_suppressor_enabled,
+ bool adaptive_gain_controller_enabled,
+ bool gain_controller2_enabled,
+ bool voice_activity_detector_enabled,
+ bool gain_adjustment_enabled,
+ bool echo_controller_enabled,
+ bool transient_suppressor_enabled);
+ bool CaptureMultiBandSubModulesActive() const;
+ bool CaptureMultiBandProcessingPresent() const;
+ bool CaptureMultiBandProcessingActive(bool ec_processing_active) const;
+ bool CaptureFullBandProcessingActive() const;
+ bool CaptureAnalyzerActive() const;
+ bool RenderMultiBandSubModulesActive() const;
+ bool RenderFullBandProcessingActive() const;
+ bool RenderMultiBandProcessingActive() const;
+ bool HighPassFilteringRequired() const;
+
+ private:
+ const bool capture_post_processor_enabled_ = false;
+ const bool render_pre_processor_enabled_ = false;
+ const bool capture_analyzer_enabled_ = false;
+ bool high_pass_filter_enabled_ = false;
+ bool mobile_echo_controller_enabled_ = false;
+ bool noise_suppressor_enabled_ = false;
+ bool adaptive_gain_controller_enabled_ = false;
+ bool voice_activity_detector_enabled_ = false;
+ bool gain_controller2_enabled_ = false;
+ bool gain_adjustment_enabled_ = false;
+ bool echo_controller_enabled_ = false;
+ bool transient_suppressor_enabled_ = false;
+ bool first_update_ = true;
+ };
+
+ // Methods for modifying the formats struct that is used by both
+ // the render and capture threads. The check for whether modifications are
+ // needed is done while holding a single lock only, thereby avoiding that the
+ // capture thread blocks the render thread.
+ // Called by render: Holds the render lock when reading the format struct and
+ // acquires both locks if reinitialization is required.
+ int MaybeInitializeRender(const ProcessingConfig& processing_config)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+ // Called by capture: Holds the capture lock when reading the format struct
+ // and acquires both locks if reinitialization is needed.
+ int MaybeInitializeCapture(const StreamConfig& input_config,
+ const StreamConfig& output_config);
+
+ // Method for updating the state keeping track of the active submodules.
+ // Returns a bool indicating whether the state has changed.
+ bool UpdateActiveSubmoduleStates()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Methods requiring APM running in a single-threaded manner, requiring both
+ // the render and capture lock to be acquired.
+ int InitializeLocked(const ProcessingConfig& config)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_);
+ void InitializeResidualEchoDetector()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_);
+ void InitializeEchoController()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_);
+
+ // Initializations of capture-only sub-modules, requiring the capture lock
+ // already acquired.
+ void InitializeHighPassFilter(bool forced_reset)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void InitializeGainController1() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void InitializeTransientSuppressor()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ // Initializes the `GainController2` sub-module. If the sub-module is enabled
+ // and `config_has_changed` is true, recreates the sub-module.
+ void InitializeGainController2(bool config_has_changed)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ // Initializes the `VoiceActivityDetectorWrapper` sub-module. If the
+ // sub-module is enabled and `config_has_changed` is true, recreates the
+ // sub-module.
+ void InitializeVoiceActivityDetector(bool config_has_changed)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void InitializeNoiseSuppressor() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void InitializeCaptureLevelsAdjuster()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void InitializePostProcessor() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void InitializeAnalyzer() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Initializations of render-only submodules, requiring the render lock
+ // already acquired.
+ void InitializePreProcessor() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+
+ // Sample rate used for the fullband processing.
+ int proc_fullband_sample_rate_hz() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Empties and handles the respective RuntimeSetting queues.
+ void HandleCaptureRuntimeSettings()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void HandleRenderRuntimeSettings()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+
+ void EmptyQueuedRenderAudio() RTC_LOCKS_EXCLUDED(mutex_capture_);
+ void EmptyQueuedRenderAudioLocked()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+ void AllocateRenderQueue()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_);
+ void QueueBandedRenderAudio(AudioBuffer* audio)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+ void QueueNonbandedRenderAudio(AudioBuffer* audio)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+
+ // Capture-side exclusive methods possibly running APM in a multi-threaded
+ // manner that are called with the render lock already acquired.
+ int ProcessCaptureStreamLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Render-side exclusive methods possibly running APM in a multi-threaded
+ // manner that are called with the render lock already acquired.
+ // TODO(ekm): Remove once all clients updated to new interface.
+ int AnalyzeReverseStreamLocked(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+ int ProcessRenderStreamLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_);
+
+ // Collects configuration settings from public and private
+ // submodules to be saved as an audioproc::Config message on the
+ // AecDump if it is attached. If not `forced`, only writes the current
+ // config if it is different from the last saved one; if `forced`,
+ // writes the config regardless of the last saved.
+ void WriteAecDumpConfigMessage(bool forced)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Notifies attached AecDump of current configuration and capture data.
+ void RecordUnprocessedCaptureStream(const float* const* capture_stream)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ void RecordUnprocessedCaptureStream(const int16_t* const data,
+ const StreamConfig& config)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Notifies attached AecDump of current configuration and
+ // processed capture data and issues a capture stream recording
+ // request.
+ void RecordProcessedCaptureStream(
+ const float* const* processed_capture_stream)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ void RecordProcessedCaptureStream(const int16_t* const data,
+ const StreamConfig& config)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Notifies attached AecDump about current state (delay, drift, etc).
+ void RecordAudioProcessingState()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // Ensures that overruns in the capture runtime settings queue is properly
+ // handled by the code, providing safe-fallbacks to mitigate the implications
+ // of any settings being missed.
+ void HandleOverrunInCaptureRuntimeSettingsQueue()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
+
+ // AecDump instance used for optionally logging APM config, input
+ // and output to file in the AEC-dump format defined in debug.proto.
+ std::unique_ptr<AecDump> aec_dump_;
+
+ // Hold the last config written with AecDump for avoiding writing
+ // the same config twice.
+ InternalAPMConfig apm_config_for_aec_dump_ RTC_GUARDED_BY(mutex_capture_);
+
+ // Critical sections.
+ mutable Mutex mutex_render_ RTC_ACQUIRED_BEFORE(mutex_capture_);
+ mutable Mutex mutex_capture_;
+
+ // Struct containing the Config specifying the behavior of APM.
+ AudioProcessing::Config config_;
+
+ // Overrides for testing the exclusion of some submodules from the build.
+ ApmSubmoduleCreationOverrides submodule_creation_overrides_
+ RTC_GUARDED_BY(mutex_capture_);
+
+ // Class containing information about what submodules are active.
+ SubmoduleStates submodule_states_;
+
+ // Struct containing the pointers to the submodules.
+ struct Submodules {
+ Submodules(std::unique_ptr<CustomProcessing> capture_post_processor,
+ std::unique_ptr<CustomProcessing> render_pre_processor,
+ rtc::scoped_refptr<EchoDetector> echo_detector,
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer)
+ : echo_detector(std::move(echo_detector)),
+ capture_post_processor(std::move(capture_post_processor)),
+ render_pre_processor(std::move(render_pre_processor)),
+ capture_analyzer(std::move(capture_analyzer)) {}
+ // Accessed internally from capture or during initialization.
+ const rtc::scoped_refptr<EchoDetector> echo_detector;
+ const std::unique_ptr<CustomProcessing> capture_post_processor;
+ const std::unique_ptr<CustomProcessing> render_pre_processor;
+ const std::unique_ptr<CustomAudioAnalyzer> capture_analyzer;
+ std::unique_ptr<AgcManagerDirect> agc_manager;
+ std::unique_ptr<GainControlImpl> gain_control;
+ std::unique_ptr<GainController2> gain_controller2;
+ std::unique_ptr<VoiceActivityDetectorWrapper> voice_activity_detector;
+ std::unique_ptr<HighPassFilter> high_pass_filter;
+ std::unique_ptr<EchoControl> echo_controller;
+ std::unique_ptr<EchoControlMobileImpl> echo_control_mobile;
+ std::unique_ptr<NoiseSuppressor> noise_suppressor;
+ std::unique_ptr<TransientSuppressor> transient_suppressor;
+ std::unique_ptr<CaptureLevelsAdjuster> capture_levels_adjuster;
+ } submodules_;
+
+ // State that is written to while holding both the render and capture locks
+ // but can be read without any lock being held.
+ // As this is only accessed internally of APM, and all internal methods in APM
+ // either are holding the render or capture locks, this construct is safe as
+ // it is not possible to read the variables while writing them.
+ struct ApmFormatState {
+ ApmFormatState()
+ : // Format of processing streams at input/output call sites.
+ api_format({{{kSampleRate16kHz, 1},
+ {kSampleRate16kHz, 1},
+ {kSampleRate16kHz, 1},
+ {kSampleRate16kHz, 1}}}),
+ render_processing_format(kSampleRate16kHz, 1) {}
+ ProcessingConfig api_format;
+ StreamConfig render_processing_format;
+ } formats_;
+
+ // APM constants.
+ const struct ApmConstants {
+ ApmConstants(bool multi_channel_render_support,
+ bool multi_channel_capture_support,
+ bool enforce_split_band_hpf,
+ bool minimize_processing_for_unused_output,
+ bool transient_suppressor_forced_off)
+ : multi_channel_render_support(multi_channel_render_support),
+ multi_channel_capture_support(multi_channel_capture_support),
+ enforce_split_band_hpf(enforce_split_band_hpf),
+ minimize_processing_for_unused_output(
+ minimize_processing_for_unused_output),
+ transient_suppressor_forced_off(transient_suppressor_forced_off) {}
+ bool multi_channel_render_support;
+ bool multi_channel_capture_support;
+ bool enforce_split_band_hpf;
+ bool minimize_processing_for_unused_output;
+ bool transient_suppressor_forced_off;
+ } constants_;
+
+ struct ApmCaptureState {
+ ApmCaptureState();
+ ~ApmCaptureState();
+ bool was_stream_delay_set;
+ bool capture_output_used;
+ bool capture_output_used_last_frame;
+ bool key_pressed;
+ std::unique_ptr<AudioBuffer> capture_audio;
+ std::unique_ptr<AudioBuffer> capture_fullband_audio;
+ std::unique_ptr<AudioBuffer> linear_aec_output;
+ // Only the rate and samples fields of capture_processing_format_ are used
+ // because the capture processing number of channels is mutable and is
+ // tracked by the capture_audio_.
+ StreamConfig capture_processing_format;
+ int split_rate;
+ bool echo_path_gain_change;
+ int prev_analog_mic_level;
+ float prev_pre_adjustment_gain;
+ int playout_volume;
+ int prev_playout_volume;
+ AudioProcessingStats stats;
+ int cached_stream_analog_level_ = 0;
+ } capture_ RTC_GUARDED_BY(mutex_capture_);
+
+ struct ApmCaptureNonLockedState {
+ ApmCaptureNonLockedState()
+ : capture_processing_format(kSampleRate16kHz),
+ split_rate(kSampleRate16kHz),
+ stream_delay_ms(0) {}
+ // Only the rate and samples fields of capture_processing_format_ are used
+ // because the forward processing number of channels is mutable and is
+ // tracked by the capture_audio_.
+ StreamConfig capture_processing_format;
+ int split_rate;
+ int stream_delay_ms;
+ bool echo_controller_enabled = false;
+ } capture_nonlocked_;
+
+ struct ApmRenderState {
+ ApmRenderState();
+ ~ApmRenderState();
+ std::unique_ptr<AudioConverter> render_converter;
+ std::unique_ptr<AudioBuffer> render_audio;
+ } render_ RTC_GUARDED_BY(mutex_render_);
+
+ // Class for statistics reporting. The class is thread-safe and no lock is
+ // needed when accessing it.
+ class ApmStatsReporter {
+ public:
+ ApmStatsReporter();
+ ~ApmStatsReporter();
+
+ // Returns the most recently reported statistics.
+ AudioProcessingStats GetStatistics();
+
+ // Update the cached statistics.
+ void UpdateStatistics(const AudioProcessingStats& new_stats);
+
+ private:
+ Mutex mutex_stats_;
+ AudioProcessingStats cached_stats_ RTC_GUARDED_BY(mutex_stats_);
+ SwapQueue<AudioProcessingStats> stats_message_queue_;
+ } stats_reporter_;
+
+ std::vector<int16_t> aecm_render_queue_buffer_ RTC_GUARDED_BY(mutex_render_);
+ std::vector<int16_t> aecm_capture_queue_buffer_
+ RTC_GUARDED_BY(mutex_capture_);
+
+ size_t agc_render_queue_element_max_size_ RTC_GUARDED_BY(mutex_render_)
+ RTC_GUARDED_BY(mutex_capture_) = 0;
+ std::vector<int16_t> agc_render_queue_buffer_ RTC_GUARDED_BY(mutex_render_);
+ std::vector<int16_t> agc_capture_queue_buffer_ RTC_GUARDED_BY(mutex_capture_);
+
+ size_t red_render_queue_element_max_size_ RTC_GUARDED_BY(mutex_render_)
+ RTC_GUARDED_BY(mutex_capture_) = 0;
+ std::vector<float> red_render_queue_buffer_ RTC_GUARDED_BY(mutex_render_);
+ std::vector<float> red_capture_queue_buffer_ RTC_GUARDED_BY(mutex_capture_);
+
+ RmsLevel capture_input_rms_ RTC_GUARDED_BY(mutex_capture_);
+ RmsLevel capture_output_rms_ RTC_GUARDED_BY(mutex_capture_);
+ int capture_rms_interval_counter_ RTC_GUARDED_BY(mutex_capture_) = 0;
+
+ AnalogGainStatsReporter analog_gain_stats_reporter_
+ RTC_GUARDED_BY(mutex_capture_);
+
+ // Lock protection not needed.
+ std::unique_ptr<
+ SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
+ aecm_render_signal_queue_;
+ std::unique_ptr<
+ SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
+ agc_render_signal_queue_;
+ std::unique_ptr<SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>>
+ red_render_signal_queue_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
new file mode 100644
index 0000000000..7557e919d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -0,0 +1,1012 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/event.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kMaxFrameSize = 480;
+constexpr int kTestTimeOutLimit = 10 * 60 * 1000;
+
+class AudioProcessingImplLockTest;
+
+// Type of the render thread APM API call to use in the test.
+enum class RenderApiImpl {
+ ProcessReverseStreamImplInteger,
+ ProcessReverseStreamImplFloat,
+ AnalyzeReverseStreamImplFloat,
+};
+
+// Type of the capture thread APM API call to use in the test.
+enum class CaptureApiImpl { ProcessStreamImplInteger, ProcessStreamImplFloat };
+
+// The runtime parameter setting scheme to use in the test.
+enum class RuntimeParameterSettingScheme {
+ SparseStreamMetadataChangeScheme,
+ ExtremeStreamMetadataChangeScheme,
+ FixedMonoStreamMetadataScheme,
+ FixedStereoStreamMetadataScheme
+};
+
+// Variant of echo canceller settings to use in the test.
+enum class AecType {
+ BasicWebRtcAecSettings,
+ AecTurnedOff,
+ BasicWebRtcAecSettingsWithExtentedFilter,
+ BasicWebRtcAecSettingsWithDelayAgnosticAec,
+ BasicWebRtcAecSettingsWithAecMobile
+};
+
+// Thread-safe random number generator wrapper.
+class RandomGenerator {
+ public:
+ RandomGenerator() : rand_gen_(42U) {}
+
+ int RandInt(int min, int max) {
+ MutexLock lock(&mutex_);
+ return rand_gen_.Rand(min, max);
+ }
+
+ int RandInt(int max) {
+ MutexLock lock(&mutex_);
+ return rand_gen_.Rand(max);
+ }
+
+ float RandFloat() {
+ MutexLock lock(&mutex_);
+ return rand_gen_.Rand<float>();
+ }
+
+ private:
+ Mutex mutex_;
+ Random rand_gen_ RTC_GUARDED_BY(mutex_);
+};
+
+// Variables related to the audio data and formats.
+struct AudioFrameData {
+ explicit AudioFrameData(int max_frame_size) {
+ // Set up the two-dimensional arrays needed for the APM API calls.
+ input_framechannels.resize(2 * max_frame_size);
+ input_frame.resize(2);
+ input_frame[0] = &input_framechannels[0];
+ input_frame[1] = &input_framechannels[max_frame_size];
+
+ output_frame_channels.resize(2 * max_frame_size);
+ output_frame.resize(2);
+ output_frame[0] = &output_frame_channels[0];
+ output_frame[1] = &output_frame_channels[max_frame_size];
+
+ frame.resize(2 * max_frame_size);
+ }
+
+ std::vector<int16_t> frame;
+
+ std::vector<float*> output_frame;
+ std::vector<float> output_frame_channels;
+ std::vector<float*> input_frame;
+ std::vector<float> input_framechannels;
+
+ int input_sample_rate_hz = 16000;
+ int input_number_of_channels = 1;
+ int output_sample_rate_hz = 16000;
+ int output_number_of_channels = 1;
+};
+
+// The configuration for the test.
+struct TestConfig {
+ // Test case generator for the test configurations to use in the brief tests.
+ static std::vector<TestConfig> GenerateBriefTestConfigs() {
+ std::vector<TestConfig> test_configs;
+ AecType aec_types[] = {AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec,
+ AecType::BasicWebRtcAecSettingsWithAecMobile};
+ for (auto aec_type : aec_types) {
+ TestConfig test_config;
+ test_config.aec_type = aec_type;
+
+ test_config.min_number_of_calls = 300;
+
+ // Perform tests only with the extreme runtime parameter setting scheme.
+ test_config.runtime_parameter_setting_scheme =
+ RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme;
+
+ // Only test 16 kHz for this test suite.
+ test_config.initial_sample_rate_hz = 16000;
+
+ // Create test config for the Int16 processing API function set.
+ test_config.render_api_function =
+ RenderApiImpl::ProcessReverseStreamImplInteger;
+ test_config.capture_api_function =
+ CaptureApiImpl::ProcessStreamImplInteger;
+ test_configs.push_back(test_config);
+
+ // Create test config for the StreamConfig processing API function set.
+ test_config.render_api_function =
+ RenderApiImpl::ProcessReverseStreamImplFloat;
+ test_config.capture_api_function = CaptureApiImpl::ProcessStreamImplFloat;
+ test_configs.push_back(test_config);
+ }
+
+ // Return the created test configurations.
+ return test_configs;
+ }
+
+ // Test case generator for the test configurations to use in the extensive
+ // tests.
+ static std::vector<TestConfig> GenerateExtensiveTestConfigs() {
+ // Lambda functions for the test config generation.
+ auto add_processing_apis = [](TestConfig test_config) {
+ struct AllowedApiCallCombinations {
+ RenderApiImpl render_api;
+ CaptureApiImpl capture_api;
+ };
+
+ const AllowedApiCallCombinations api_calls[] = {
+ {RenderApiImpl::ProcessReverseStreamImplInteger,
+ CaptureApiImpl::ProcessStreamImplInteger},
+ {RenderApiImpl::ProcessReverseStreamImplFloat,
+ CaptureApiImpl::ProcessStreamImplFloat},
+ {RenderApiImpl::AnalyzeReverseStreamImplFloat,
+ CaptureApiImpl::ProcessStreamImplFloat},
+ {RenderApiImpl::ProcessReverseStreamImplInteger,
+ CaptureApiImpl::ProcessStreamImplFloat},
+ {RenderApiImpl::ProcessReverseStreamImplFloat,
+ CaptureApiImpl::ProcessStreamImplInteger}};
+ std::vector<TestConfig> out;
+ for (auto api_call : api_calls) {
+ test_config.render_api_function = api_call.render_api;
+ test_config.capture_api_function = api_call.capture_api;
+ out.push_back(test_config);
+ }
+ return out;
+ };
+
+ auto add_aec_settings = [](const std::vector<TestConfig>& in) {
+ std::vector<TestConfig> out;
+ AecType aec_types[] = {
+ AecType::BasicWebRtcAecSettings, AecType::AecTurnedOff,
+ AecType::BasicWebRtcAecSettingsWithExtentedFilter,
+ AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec,
+ AecType::BasicWebRtcAecSettingsWithAecMobile};
+ for (auto test_config : in) {
+ // Due to a VisualStudio 2015 compiler issue, the internal loop
+ // variable here cannot override a previously defined name.
+ // In other words "type" cannot be named "aec_type" here.
+ // https://connect.microsoft.com/VisualStudio/feedback/details/2291755
+ for (auto type : aec_types) {
+ test_config.aec_type = type;
+ out.push_back(test_config);
+ }
+ }
+ return out;
+ };
+
+ auto add_settings_scheme = [](const std::vector<TestConfig>& in) {
+ std::vector<TestConfig> out;
+ RuntimeParameterSettingScheme schemes[] = {
+ RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme,
+ RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme,
+ RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme,
+ RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme};
+
+ for (auto test_config : in) {
+ for (auto scheme : schemes) {
+ test_config.runtime_parameter_setting_scheme = scheme;
+ out.push_back(test_config);
+ }
+ }
+ return out;
+ };
+
+ auto add_sample_rates = [](const std::vector<TestConfig>& in) {
+ const int sample_rates[] = {8000, 16000, 32000, 48000};
+
+ std::vector<TestConfig> out;
+ for (auto test_config : in) {
+ auto available_rates =
+ (test_config.aec_type ==
+ AecType::BasicWebRtcAecSettingsWithAecMobile
+ ? rtc::ArrayView<const int>(sample_rates, 2)
+ : rtc::ArrayView<const int>(sample_rates));
+
+ for (auto rate : available_rates) {
+ test_config.initial_sample_rate_hz = rate;
+ out.push_back(test_config);
+ }
+ }
+ return out;
+ };
+
+ // Generate test configurations of the relevant combinations of the
+ // parameters to
+ // test.
+ TestConfig test_config;
+ test_config.min_number_of_calls = 10000;
+ return add_sample_rates(add_settings_scheme(
+ add_aec_settings(add_processing_apis(test_config))));
+ }
+
+ RenderApiImpl render_api_function =
+ RenderApiImpl::ProcessReverseStreamImplFloat;
+ CaptureApiImpl capture_api_function = CaptureApiImpl::ProcessStreamImplFloat;
+ RuntimeParameterSettingScheme runtime_parameter_setting_scheme =
+ RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme;
+ int initial_sample_rate_hz = 16000;
+ AecType aec_type = AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec;
+ int min_number_of_calls = 300;
+};
+
+// Handler for the frame counters.
+class FrameCounters {
+ public:
+ void IncreaseRenderCounter() {
+ MutexLock lock(&mutex_);
+ render_count++;
+ }
+
+ void IncreaseCaptureCounter() {
+ MutexLock lock(&mutex_);
+ capture_count++;
+ }
+
+ int GetCaptureCounter() const {
+ MutexLock lock(&mutex_);
+ return capture_count;
+ }
+
+ int GetRenderCounter() const {
+ MutexLock lock(&mutex_);
+ return render_count;
+ }
+
+ int CaptureMinusRenderCounters() const {
+ MutexLock lock(&mutex_);
+ return capture_count - render_count;
+ }
+
+ int RenderMinusCaptureCounters() const {
+ return -CaptureMinusRenderCounters();
+ }
+
+ bool BothCountersExceedeThreshold(int threshold) {
+ MutexLock lock(&mutex_);
+ return (render_count > threshold && capture_count > threshold);
+ }
+
+ private:
+ mutable Mutex mutex_;
+ int render_count RTC_GUARDED_BY(mutex_) = 0;
+ int capture_count RTC_GUARDED_BY(mutex_) = 0;
+};
+
+// Class for handling the capture side processing.
+class CaptureProcessor {
+ public:
+ CaptureProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ const TestConfig* test_config,
+ AudioProcessing* apm);
+ void Process();
+
+ private:
+ static constexpr int kMaxCallDifference = 10;
+ static constexpr float kCaptureInputFloatLevel = 0.03125f;
+ static constexpr int kCaptureInputFixLevel = 1024;
+
+ void PrepareFrame();
+ void CallApmCaptureSide();
+ void ApplyRuntimeSettingScheme();
+
+ RandomGenerator* const rand_gen_ = nullptr;
+ rtc::Event* const render_call_event_ = nullptr;
+ rtc::Event* const capture_call_event_ = nullptr;
+ FrameCounters* const frame_counters_ = nullptr;
+ const TestConfig* const test_config_ = nullptr;
+ AudioProcessing* const apm_ = nullptr;
+ AudioFrameData frame_data_;
+};
+
+// Class for handling the stats processing.
+class StatsProcessor {
+ public:
+ StatsProcessor(RandomGenerator* rand_gen,
+ const TestConfig* test_config,
+ AudioProcessing* apm);
+ void Process();
+
+ private:
+ RandomGenerator* rand_gen_ = nullptr;
+ const TestConfig* const test_config_ = nullptr;
+ AudioProcessing* apm_ = nullptr;
+};
+
+// Class for handling the render side processing.
+class RenderProcessor {
+ public:
+ RenderProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ const TestConfig* test_config,
+ AudioProcessing* apm);
+ void Process();
+
+ private:
+ static constexpr int kMaxCallDifference = 10;
+ static constexpr int kRenderInputFixLevel = 16384;
+ static constexpr float kRenderInputFloatLevel = 0.5f;
+
+ void PrepareFrame();
+ void CallApmRenderSide();
+ void ApplyRuntimeSettingScheme();
+
+ RandomGenerator* const rand_gen_ = nullptr;
+ rtc::Event* const render_call_event_ = nullptr;
+ rtc::Event* const capture_call_event_ = nullptr;
+ FrameCounters* const frame_counters_ = nullptr;
+ const TestConfig* const test_config_ = nullptr;
+ AudioProcessing* const apm_ = nullptr;
+ AudioFrameData frame_data_;
+ bool first_render_call_ = true;
+};
+
+class AudioProcessingImplLockTest
+ : public ::testing::TestWithParam<TestConfig> {
+ public:
+ AudioProcessingImplLockTest();
+ bool RunTest();
+ bool MaybeEndTest();
+
+ private:
+ void SetUp() override;
+ void TearDown() override;
+
+ // Tests whether all the required render and capture side calls have been
+ // done.
+ bool TestDone() {
+ return frame_counters_.BothCountersExceedeThreshold(
+ test_config_.min_number_of_calls);
+ }
+
+ // Start the threads used in the test.
+ void StartThreads() {
+ const auto attributes =
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
+ render_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!MaybeEndTest())
+ render_thread_state_.Process();
+ },
+ "render", attributes);
+ capture_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!MaybeEndTest()) {
+ capture_thread_state_.Process();
+ }
+ },
+ "capture", attributes);
+
+ stats_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (!MaybeEndTest())
+ stats_thread_state_.Process();
+ },
+ "stats", attributes);
+ }
+
+ // Event handlers for the test.
+ rtc::Event test_complete_;
+ rtc::Event render_call_event_;
+ rtc::Event capture_call_event_;
+
+ // Thread related variables.
+ mutable RandomGenerator rand_gen_;
+
+ const TestConfig test_config_;
+ rtc::scoped_refptr<AudioProcessing> apm_;
+ FrameCounters frame_counters_;
+ RenderProcessor render_thread_state_;
+ CaptureProcessor capture_thread_state_;
+ StatsProcessor stats_thread_state_;
+ rtc::PlatformThread render_thread_;
+ rtc::PlatformThread capture_thread_;
+ rtc::PlatformThread stats_thread_;
+};
+
+// Sleeps a random time between 0 and max_sleep milliseconds.
+void SleepRandomMs(int max_sleep, RandomGenerator* rand_gen) {
+ int sleeptime = rand_gen->RandInt(0, max_sleep);
+ SleepMs(sleeptime);
+}
+
+// Populates a float audio frame with random data.
+void PopulateAudioFrame(float** frame,
+ float amplitude,
+ size_t num_channels,
+ size_t samples_per_channel,
+ RandomGenerator* rand_gen) {
+ for (size_t ch = 0; ch < num_channels; ch++) {
+ for (size_t k = 0; k < samples_per_channel; k++) {
+ // Store random 16 bit quantized float number between +-amplitude.
+ frame[ch][k] = amplitude * (2 * rand_gen->RandFloat() - 1);
+ }
+ }
+}
+
+// Populates an integer audio frame with random data.
+void PopulateAudioFrame(float amplitude,
+ size_t num_channels,
+ size_t samples_per_channel,
+ rtc::ArrayView<int16_t> frame,
+ RandomGenerator* rand_gen) {
+ ASSERT_GT(amplitude, 0);
+ ASSERT_LE(amplitude, 32767);
+ for (size_t ch = 0; ch < num_channels; ch++) {
+ for (size_t k = 0; k < samples_per_channel; k++) {
+ // Store random 16 bit number between -(amplitude+1) and
+ // amplitude.
+ frame[k * ch] = rand_gen->RandInt(2 * amplitude + 1) - amplitude - 1;
+ }
+ }
+}
+
+AudioProcessing::Config GetApmTestConfig(AecType aec_type) {
+ AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = aec_type != AecType::AecTurnedOff;
+ apm_config.echo_canceller.mobile_mode =
+ aec_type == AecType::BasicWebRtcAecSettingsWithAecMobile;
+ apm_config.gain_controller1.enabled = true;
+ apm_config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveDigital;
+ apm_config.noise_suppression.enabled = true;
+ return apm_config;
+}
+
+AudioProcessingImplLockTest::AudioProcessingImplLockTest()
+ : test_config_(GetParam()),
+ apm_(AudioProcessingBuilderForTesting()
+ .SetConfig(GetApmTestConfig(test_config_.aec_type))
+ .Create()),
+ render_thread_state_(kMaxFrameSize,
+ &rand_gen_,
+ &render_call_event_,
+ &capture_call_event_,
+ &frame_counters_,
+ &test_config_,
+ apm_.get()),
+ capture_thread_state_(kMaxFrameSize,
+ &rand_gen_,
+ &render_call_event_,
+ &capture_call_event_,
+ &frame_counters_,
+ &test_config_,
+ apm_.get()),
+ stats_thread_state_(&rand_gen_, &test_config_, apm_.get()) {}
+
+// Run the test with a timeout.
+bool AudioProcessingImplLockTest::RunTest() {
+ StartThreads();
+ return test_complete_.Wait(kTestTimeOutLimit);
+}
+
+bool AudioProcessingImplLockTest::MaybeEndTest() {
+ if (HasFatalFailure() || TestDone()) {
+ test_complete_.Set();
+ return true;
+ }
+ return false;
+}
+
+void AudioProcessingImplLockTest::SetUp() {}
+
+void AudioProcessingImplLockTest::TearDown() {
+ render_call_event_.Set();
+ capture_call_event_.Set();
+}
+
+StatsProcessor::StatsProcessor(RandomGenerator* rand_gen,
+ const TestConfig* test_config,
+ AudioProcessing* apm)
+ : rand_gen_(rand_gen), test_config_(test_config), apm_(apm) {}
+
+// Implements the callback functionality for the statistics
+// collection thread.
+void StatsProcessor::Process() {
+ SleepRandomMs(100, rand_gen_);
+
+ AudioProcessing::Config apm_config = apm_->GetConfig();
+ if (test_config_->aec_type != AecType::AecTurnedOff) {
+ EXPECT_TRUE(apm_config.echo_canceller.enabled);
+ EXPECT_EQ(apm_config.echo_canceller.mobile_mode,
+ (test_config_->aec_type ==
+ AecType::BasicWebRtcAecSettingsWithAecMobile));
+ } else {
+ EXPECT_FALSE(apm_config.echo_canceller.enabled);
+ }
+ EXPECT_TRUE(apm_config.gain_controller1.enabled);
+ EXPECT_TRUE(apm_config.noise_suppression.enabled);
+
+ // The below return value is not testable.
+ apm_->GetStatistics();
+}
+
+CaptureProcessor::CaptureProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ const TestConfig* test_config,
+ AudioProcessing* apm)
+ : rand_gen_(rand_gen),
+ render_call_event_(render_call_event),
+ capture_call_event_(capture_call_event),
+ frame_counters_(shared_counters_state),
+ test_config_(test_config),
+ apm_(apm),
+ frame_data_(max_frame_size) {}
+
+// Implements the callback functionality for the capture thread.
+void CaptureProcessor::Process() {
+ // Sleep a random time to simulate thread jitter.
+ SleepRandomMs(3, rand_gen_);
+
+ // Ensure that the number of render and capture calls do not
+ // differ too much.
+ if (frame_counters_->CaptureMinusRenderCounters() > kMaxCallDifference) {
+ render_call_event_->Wait(rtc::Event::kForever);
+ }
+
+ // Apply any specified capture side APM non-processing runtime calls.
+ ApplyRuntimeSettingScheme();
+
+ // Apply the capture side processing call.
+ CallApmCaptureSide();
+
+ // Increase the number of capture-side calls.
+ frame_counters_->IncreaseCaptureCounter();
+
+ // Flag to the render thread that another capture API call has occurred
+ // by triggering this threads call event.
+ capture_call_event_->Set();
+}
+
+// Prepares a frame with relevant audio data and metadata.
+void CaptureProcessor::PrepareFrame() {
+ // Restrict to a common fixed sample rate if the integer
+ // interface is used.
+ if (test_config_->capture_api_function ==
+ CaptureApiImpl::ProcessStreamImplInteger) {
+ frame_data_.input_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ frame_data_.output_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ }
+
+ // Prepare the audio data.
+ StreamConfig input_stream_config(frame_data_.input_sample_rate_hz,
+ frame_data_.input_number_of_channels);
+
+ PopulateAudioFrame(kCaptureInputFixLevel, input_stream_config.num_channels(),
+ input_stream_config.num_frames(), frame_data_.frame,
+ rand_gen_);
+
+ PopulateAudioFrame(&frame_data_.input_frame[0], kCaptureInputFloatLevel,
+ input_stream_config.num_channels(),
+ input_stream_config.num_frames(), rand_gen_);
+}
+
+// Applies the capture side processing API call.
+void CaptureProcessor::CallApmCaptureSide() {
+ // Prepare a proper capture side processing API call input.
+ PrepareFrame();
+
+ // Set the stream delay.
+ apm_->set_stream_delay_ms(30);
+
+ // Set the analog level.
+ apm_->set_stream_analog_level(80);
+
+ // Call the specified capture side API processing method.
+ StreamConfig input_stream_config(frame_data_.input_sample_rate_hz,
+ frame_data_.input_number_of_channels);
+ StreamConfig output_stream_config(frame_data_.output_sample_rate_hz,
+ frame_data_.output_number_of_channels);
+ int result = AudioProcessing::kNoError;
+ switch (test_config_->capture_api_function) {
+ case CaptureApiImpl::ProcessStreamImplInteger:
+ result =
+ apm_->ProcessStream(frame_data_.frame.data(), input_stream_config,
+ output_stream_config, frame_data_.frame.data());
+ break;
+ case CaptureApiImpl::ProcessStreamImplFloat:
+ result = apm_->ProcessStream(&frame_data_.input_frame[0],
+ input_stream_config, output_stream_config,
+ &frame_data_.output_frame[0]);
+ break;
+ default:
+ FAIL();
+ }
+
+ // Retrieve the new analog level.
+ apm_->recommended_stream_analog_level();
+
+ // Check the return code for error.
+ ASSERT_EQ(AudioProcessing::kNoError, result);
+}
+
+// Applies any runtime capture APM API calls and audio stream characteristics
+// specified by the scheme for the test.
+void CaptureProcessor::ApplyRuntimeSettingScheme() {
+ const int capture_count_local = frame_counters_->GetCaptureCounter();
+
+ // Update the number of channels and sample rates for the input and output.
+ // Note that the counts frequencies for when to set parameters
+ // are set using prime numbers in order to ensure that the
+ // permutation scheme in the parameter setting changes.
+ switch (test_config_->runtime_parameter_setting_scheme) {
+ case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+ if (capture_count_local == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (capture_count_local % 11 == 0)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (capture_count_local % 73 == 0)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (capture_count_local % 89 == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (capture_count_local % 97 == 0)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ if (capture_count_local == 0)
+ frame_data_.input_number_of_channels = 1;
+ else if (capture_count_local % 4 == 0)
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+
+ if (capture_count_local == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (capture_count_local % 5 == 0)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (capture_count_local % 47 == 0)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (capture_count_local % 53 == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (capture_count_local % 71 == 0)
+ frame_data_.output_sample_rate_hz = 8000;
+
+ if (capture_count_local == 0)
+ frame_data_.output_number_of_channels = 1;
+ else if (capture_count_local % 8 == 0)
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ break;
+ case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+ if (capture_count_local % 2 == 0) {
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ } else {
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.input_sample_rate_hz == 8000)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (frame_data_.input_sample_rate_hz == 16000)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (frame_data_.input_sample_rate_hz == 32000)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (frame_data_.input_sample_rate_hz == 48000)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.output_sample_rate_hz == 8000)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (frame_data_.output_sample_rate_hz == 16000)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (frame_data_.output_sample_rate_hz == 32000)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (frame_data_.output_sample_rate_hz == 48000)
+ frame_data_.output_sample_rate_hz = 8000;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+ if (capture_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+ if (capture_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 2;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 2;
+ }
+ break;
+ default:
+ FAIL();
+ }
+
+ // Call any specified runtime APM setter and
+ // getter calls.
+ switch (test_config_->runtime_parameter_setting_scheme) {
+ case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+ case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+ break;
+ case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+ case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+ if (capture_count_local % 2 == 0) {
+ ASSERT_EQ(AudioProcessing::Error::kNoError,
+ apm_->set_stream_delay_ms(30));
+ apm_->set_stream_key_pressed(true);
+ } else {
+ ASSERT_EQ(AudioProcessing::Error::kNoError,
+ apm_->set_stream_delay_ms(50));
+ apm_->set_stream_key_pressed(false);
+ }
+ break;
+ default:
+ FAIL();
+ }
+
+ // Restric the number of output channels not to exceed
+ // the number of input channels.
+ frame_data_.output_number_of_channels =
+ std::min(frame_data_.output_number_of_channels,
+ frame_data_.input_number_of_channels);
+}
+
+RenderProcessor::RenderProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ const TestConfig* test_config,
+ AudioProcessing* apm)
+ : rand_gen_(rand_gen),
+ render_call_event_(render_call_event),
+ capture_call_event_(capture_call_event),
+ frame_counters_(shared_counters_state),
+ test_config_(test_config),
+ apm_(apm),
+ frame_data_(max_frame_size) {}
+
+// Implements the callback functionality for the render thread.
+void RenderProcessor::Process() {
+ // Conditional wait to ensure that a capture call has been done
+ // before the first render call is performed (implicitly
+ // required by the APM API).
+ if (first_render_call_) {
+ capture_call_event_->Wait(rtc::Event::kForever);
+ first_render_call_ = false;
+ }
+
+ // Sleep a random time to simulate thread jitter.
+ SleepRandomMs(3, rand_gen_);
+
+ // Ensure that the number of render and capture calls do not
+ // differ too much.
+ if (frame_counters_->RenderMinusCaptureCounters() > kMaxCallDifference) {
+ capture_call_event_->Wait(rtc::Event::kForever);
+ }
+
+ // Apply any specified render side APM non-processing runtime calls.
+ ApplyRuntimeSettingScheme();
+
+ // Apply the render side processing call.
+ CallApmRenderSide();
+
+ // Increase the number of render-side calls.
+ frame_counters_->IncreaseRenderCounter();
+
+ // Flag to the capture thread that another render API call has occurred
+ // by triggering this threads call event.
+ render_call_event_->Set();
+}
+
+// Prepares the render side frame and the accompanying metadata
+// with the appropriate information.
+void RenderProcessor::PrepareFrame() {
+ // Restrict to a common fixed sample rate if the integer interface is
+ // used.
+ if ((test_config_->render_api_function ==
+ RenderApiImpl::ProcessReverseStreamImplInteger) ||
+ (test_config_->aec_type !=
+ AecType::BasicWebRtcAecSettingsWithAecMobile)) {
+ frame_data_.input_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ frame_data_.output_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ }
+
+ // Prepare the audio data.
+ StreamConfig input_stream_config(frame_data_.input_sample_rate_hz,
+ frame_data_.input_number_of_channels);
+
+ PopulateAudioFrame(kRenderInputFixLevel, input_stream_config.num_channels(),
+ input_stream_config.num_frames(), frame_data_.frame,
+ rand_gen_);
+
+ PopulateAudioFrame(&frame_data_.input_frame[0], kRenderInputFloatLevel,
+ input_stream_config.num_channels(),
+ input_stream_config.num_frames(), rand_gen_);
+}
+
+// Makes the render side processing API call.
+void RenderProcessor::CallApmRenderSide() {
+ // Prepare a proper render side processing API call input.
+ PrepareFrame();
+
+ // Call the specified render side API processing method.
+ StreamConfig input_stream_config(frame_data_.input_sample_rate_hz,
+ frame_data_.input_number_of_channels);
+ StreamConfig output_stream_config(frame_data_.output_sample_rate_hz,
+ frame_data_.output_number_of_channels);
+ int result = AudioProcessing::kNoError;
+ switch (test_config_->render_api_function) {
+ case RenderApiImpl::ProcessReverseStreamImplInteger:
+ result = apm_->ProcessReverseStream(
+ frame_data_.frame.data(), input_stream_config, output_stream_config,
+ frame_data_.frame.data());
+ break;
+ case RenderApiImpl::ProcessReverseStreamImplFloat:
+ result = apm_->ProcessReverseStream(
+ &frame_data_.input_frame[0], input_stream_config,
+ output_stream_config, &frame_data_.output_frame[0]);
+ break;
+ case RenderApiImpl::AnalyzeReverseStreamImplFloat:
+ result = apm_->AnalyzeReverseStream(&frame_data_.input_frame[0],
+ input_stream_config);
+ break;
+ default:
+ FAIL();
+ }
+
+ // Check the return code for error.
+ ASSERT_EQ(AudioProcessing::kNoError, result);
+}
+
+// Applies any render capture side APM API calls and audio stream
+// characteristics
+// specified by the scheme for the test.
+void RenderProcessor::ApplyRuntimeSettingScheme() {
+ const int render_count_local = frame_counters_->GetRenderCounter();
+
+ // Update the number of channels and sample rates for the input and output.
+ // Note that the counts frequencies for when to set parameters
+ // are set using prime numbers in order to ensure that the
+ // permutation scheme in the parameter setting changes.
+ switch (test_config_->runtime_parameter_setting_scheme) {
+ case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+ if (render_count_local == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (render_count_local % 47 == 0)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (render_count_local % 71 == 0)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (render_count_local % 79 == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (render_count_local % 83 == 0)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ if (render_count_local == 0)
+ frame_data_.input_number_of_channels = 1;
+ else if (render_count_local % 4 == 0)
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+
+ if (render_count_local == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (render_count_local % 17 == 0)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (render_count_local % 19 == 0)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (render_count_local % 29 == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (render_count_local % 61 == 0)
+ frame_data_.output_sample_rate_hz = 8000;
+
+ if (render_count_local == 0)
+ frame_data_.output_number_of_channels = 1;
+ else if (render_count_local % 8 == 0)
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ break;
+ case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+ if (render_count_local == 0) {
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ } else {
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.input_sample_rate_hz == 8000)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (frame_data_.input_sample_rate_hz == 16000)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (frame_data_.input_sample_rate_hz == 32000)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (frame_data_.input_sample_rate_hz == 48000)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.output_sample_rate_hz == 8000)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (frame_data_.output_sample_rate_hz == 16000)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (frame_data_.output_sample_rate_hz == 32000)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (frame_data_.output_sample_rate_hz == 48000)
+ frame_data_.output_sample_rate_hz = 8000;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+ if (render_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+ if (render_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 2;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 2;
+ }
+ break;
+ default:
+ FAIL();
+ }
+
+ // Restric the number of output channels not to exceed
+ // the number of input channels.
+ frame_data_.output_number_of_channels =
+ std::min(frame_data_.output_number_of_channels,
+ frame_data_.input_number_of_channels);
+}
+
+} // namespace
+
+TEST_P(AudioProcessingImplLockTest, LockTest) {
+ // Run test and verify that it did not time out.
+ ASSERT_TRUE(RunTest());
+}
+
+// Instantiate tests from the extreme test configuration set.
+INSTANTIATE_TEST_SUITE_P(
+ DISABLED_AudioProcessingImplLockExtensive,
+ AudioProcessingImplLockTest,
+ ::testing::ValuesIn(TestConfig::GenerateExtensiveTestConfigs()));
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioProcessingImplLockBrief,
+ AudioProcessingImplLockTest,
+ ::testing::ValuesIn(TestConfig::GenerateBriefTestConfigs()));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_impl_unittest.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl_unittest.cc
new file mode 100644
index 0000000000..5e4e3557b7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl_unittest.cc
@@ -0,0 +1,814 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_processing_impl.h"
+
+#include <array>
+#include <memory>
+
+#include "api/make_ref_counted.h"
+#include "api/scoped_refptr.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/optionally_built_submodule_creators.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "modules/audio_processing/test/echo_control_mock.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Invoke;
+using ::testing::NotNull;
+
+class MockInitialize : public AudioProcessingImpl {
+ public:
+ MockInitialize() : AudioProcessingImpl() {}
+
+ MOCK_METHOD(void, InitializeLocked, (), (override));
+ void RealInitializeLocked() {
+ AssertLockedForTest();
+ AudioProcessingImpl::InitializeLocked();
+ }
+
+ MOCK_METHOD(void, AddRef, (), (const, override));
+ MOCK_METHOD(rtc::RefCountReleaseStatus, Release, (), (const, override));
+};
+
+// Creates MockEchoControl instances and provides a raw pointer access to
+// the next created one. The raw pointer is meant to be used with gmock.
+// Returning a pointer of the next created MockEchoControl instance is necessary
+// for the following reasons: (i) gmock expectations must be set before any call
+// occurs, (ii) APM is initialized the first time that
+// AudioProcessingImpl::ProcessStream() is called and the initialization leads
+// to the creation of a new EchoControl object.
+class MockEchoControlFactory : public EchoControlFactory {
+ public:
+ MockEchoControlFactory() : next_mock_(std::make_unique<MockEchoControl>()) {}
+ // Returns a pointer to the next MockEchoControl that this factory creates.
+ MockEchoControl* GetNext() const { return next_mock_.get(); }
+ std::unique_ptr<EchoControl> Create(int sample_rate_hz,
+ int num_render_channels,
+ int num_capture_channels) override {
+ std::unique_ptr<EchoControl> mock = std::move(next_mock_);
+ next_mock_ = std::make_unique<MockEchoControl>();
+ return mock;
+ }
+
+ private:
+ std::unique_ptr<MockEchoControl> next_mock_;
+};
+
+// Mocks EchoDetector and records the first samples of the last analyzed render
+// stream frame. Used to check what data is read by an EchoDetector
+// implementation injected into an APM.
+class TestEchoDetector : public EchoDetector {
+ public:
+ TestEchoDetector()
+ : analyze_render_audio_called_(false),
+ last_render_audio_first_sample_(0.f) {}
+ ~TestEchoDetector() override = default;
+ void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) override {
+ last_render_audio_first_sample_ = render_audio[0];
+ analyze_render_audio_called_ = true;
+ }
+ void AnalyzeCaptureAudio(rtc::ArrayView<const float> capture_audio) override {
+ }
+ void Initialize(int capture_sample_rate_hz,
+ int num_capture_channels,
+ int render_sample_rate_hz,
+ int num_render_channels) override {}
+ EchoDetector::Metrics GetMetrics() const override { return {}; }
+ // Returns true if AnalyzeRenderAudio() has been called at least once.
+ bool analyze_render_audio_called() const {
+ return analyze_render_audio_called_;
+ }
+ // Returns the first sample of the last analyzed render frame.
+ float last_render_audio_first_sample() const {
+ return last_render_audio_first_sample_;
+ }
+
+ private:
+ bool analyze_render_audio_called_;
+ float last_render_audio_first_sample_;
+};
+
+// Mocks CustomProcessing and applies ProcessSample() to all the samples.
+// Meant to be injected into an APM to modify samples in a known and detectable
+// way.
+class TestRenderPreProcessor : public CustomProcessing {
+ public:
+ TestRenderPreProcessor() = default;
+ ~TestRenderPreProcessor() = default;
+ void Initialize(int sample_rate_hz, int num_channels) override {}
+ void Process(AudioBuffer* audio) override {
+ for (size_t k = 0; k < audio->num_channels(); ++k) {
+ rtc::ArrayView<float> channel_view(audio->channels()[k],
+ audio->num_frames());
+ std::transform(channel_view.begin(), channel_view.end(),
+ channel_view.begin(), ProcessSample);
+ }
+ }
+ std::string ToString() const override { return "TestRenderPreProcessor"; }
+ void SetRuntimeSetting(AudioProcessing::RuntimeSetting setting) override {}
+ // Modifies a sample. This member is used in Process() to modify a frame and
+ // it is publicly visible to enable tests.
+ static constexpr float ProcessSample(float x) { return 2.f * x; }
+};
+
+} // namespace
+
+TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
+ MockInitialize mock;
+ ON_CALL(mock, InitializeLocked)
+ .WillByDefault(Invoke(&mock, &MockInitialize::RealInitializeLocked));
+
+ EXPECT_CALL(mock, InitializeLocked).Times(1);
+ mock.Initialize();
+
+ constexpr size_t kMaxSampleRateHz = 32000;
+ constexpr size_t kMaxNumChannels = 2;
+ std::array<int16_t, kMaxNumChannels * kMaxSampleRateHz / 100> frame;
+ frame.fill(0);
+ StreamConfig config(16000, 1);
+ // Call with the default parameters; there should be an init.
+ EXPECT_CALL(mock, InitializeLocked).Times(0);
+ EXPECT_NOERR(mock.ProcessStream(frame.data(), config, config, frame.data()));
+ EXPECT_NOERR(
+ mock.ProcessReverseStream(frame.data(), config, config, frame.data()));
+
+ // New sample rate. (Only impacts ProcessStream).
+ config = StreamConfig(32000, 1);
+ EXPECT_CALL(mock, InitializeLocked).Times(1);
+ EXPECT_NOERR(mock.ProcessStream(frame.data(), config, config, frame.data()));
+
+ // New number of channels.
+ // TODO(peah): Investigate why this causes 2 inits.
+ config = StreamConfig(32000, 2);
+ EXPECT_CALL(mock, InitializeLocked).Times(2);
+ EXPECT_NOERR(mock.ProcessStream(frame.data(), config, config, frame.data()));
+ // ProcessStream sets num_channels_ == num_output_channels.
+ EXPECT_NOERR(
+ mock.ProcessReverseStream(frame.data(), config, config, frame.data()));
+
+ // A new sample rate passed to ProcessReverseStream should cause an init.
+ config = StreamConfig(16000, 2);
+ EXPECT_CALL(mock, InitializeLocked).Times(1);
+ EXPECT_NOERR(
+ mock.ProcessReverseStream(frame.data(), config, config, frame.data()));
+}
+
+TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) {
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting().Create();
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.pre_amplifier.enabled = true;
+ apm_config.pre_amplifier.fixed_gain_factor = 1.f;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int kSampleRateHz = 48000;
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr size_t kNumChannels = 2;
+
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+ EXPECT_EQ(frame[100], kAudioLevel)
+ << "With factor 1, frame shouldn't be modified.";
+
+ constexpr float kGainFactor = 2.f;
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(kGainFactor));
+
+ // Process for two frames to have time to ramp up gain.
+ for (int i = 0; i < 2; ++i) {
+ frame.fill(kAudioLevel);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+ }
+ EXPECT_EQ(frame[100], kGainFactor * kAudioLevel)
+ << "Frame should be amplified.";
+}
+
+TEST(AudioProcessingImplTest,
+ LevelAdjustmentUpdateCapturePreGainRuntimeSetting) {
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting().Create();
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.capture_level_adjustment.enabled = true;
+ apm_config.capture_level_adjustment.pre_gain_factor = 1.f;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int kSampleRateHz = 48000;
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr size_t kNumChannels = 2;
+
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+ EXPECT_EQ(frame[100], kAudioLevel)
+ << "With factor 1, frame shouldn't be modified.";
+
+ constexpr float kGainFactor = 2.f;
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(kGainFactor));
+
+ // Process for two frames to have time to ramp up gain.
+ for (int i = 0; i < 2; ++i) {
+ frame.fill(kAudioLevel);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+ }
+ EXPECT_EQ(frame[100], kGainFactor * kAudioLevel)
+ << "Frame should be amplified.";
+}
+
+TEST(AudioProcessingImplTest,
+ LevelAdjustmentUpdateCapturePostGainRuntimeSetting) {
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting().Create();
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.capture_level_adjustment.enabled = true;
+ apm_config.capture_level_adjustment.post_gain_factor = 1.f;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int kSampleRateHz = 48000;
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr size_t kNumChannels = 2;
+
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+ EXPECT_EQ(frame[100], kAudioLevel)
+ << "With factor 1, frame shouldn't be modified.";
+
+ constexpr float kGainFactor = 2.f;
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePostGain(kGainFactor));
+
+ // Process for two frames to have time to ramp up gain.
+ for (int i = 0; i < 2; ++i) {
+ frame.fill(kAudioLevel);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+ }
+ EXPECT_EQ(frame[100], kGainFactor * kAudioLevel)
+ << "Frame should be amplified.";
+}
+
+TEST(AudioProcessingImplTest, EchoControllerObservesSetCaptureUsageChange) {
+ // Tests that the echo controller observes that the capture usage has been
+ // updated.
+ auto echo_control_factory = std::make_unique<MockEchoControlFactory>();
+ const MockEchoControlFactory* echo_control_factory_ptr =
+ echo_control_factory.get();
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kNumChannels = 2;
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+
+ MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext();
+
+ // Ensure that SetCaptureOutputUsage is not called when no runtime settings
+ // are passed.
+ EXPECT_CALL(*echo_control_mock, SetCaptureOutputUsage(testing::_)).Times(0);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+
+ // Ensure that SetCaptureOutputUsage is called with the right information when
+ // a runtime setting is passed.
+ EXPECT_CALL(*echo_control_mock,
+ SetCaptureOutputUsage(/*capture_output_used=*/false))
+ .Times(1);
+ EXPECT_TRUE(apm->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ /*capture_output_used=*/false)));
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock,
+ SetCaptureOutputUsage(/*capture_output_used=*/true))
+ .Times(1);
+ EXPECT_TRUE(apm->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ /*capture_output_used=*/true)));
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+
+ // The number of positions to place items in the queue is equal to the queue
+ // size minus 1.
+ constexpr int kNumSlotsInQueue = RuntimeSettingQueueSize();
+
+ // Ensure that SetCaptureOutputUsage is called with the right information when
+ // many runtime settings are passed.
+ for (int k = 0; k < kNumSlotsInQueue - 1; ++k) {
+ EXPECT_TRUE(apm->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ /*capture_output_used=*/false)));
+ }
+ EXPECT_CALL(*echo_control_mock,
+ SetCaptureOutputUsage(/*capture_output_used=*/false))
+ .Times(kNumSlotsInQueue - 1);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+
+ // Ensure that SetCaptureOutputUsage is properly called with the fallback
+ // value when the runtime settings queue becomes full.
+ for (int k = 0; k < kNumSlotsInQueue; ++k) {
+ EXPECT_TRUE(apm->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ /*capture_output_used=*/false)));
+ }
+ EXPECT_FALSE(apm->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ /*capture_output_used=*/false)));
+ EXPECT_FALSE(apm->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ /*capture_output_used=*/false)));
+ EXPECT_CALL(*echo_control_mock,
+ SetCaptureOutputUsage(/*capture_output_used=*/false))
+ .Times(kNumSlotsInQueue);
+ EXPECT_CALL(*echo_control_mock,
+ SetCaptureOutputUsage(/*capture_output_used=*/true))
+ .Times(1);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+}
+
+TEST(AudioProcessingImplTest,
+ EchoControllerObservesPreAmplifierEchoPathGainChange) {
+ // Tests that the echo controller observes an echo path gain change when the
+ // pre-amplifier submodule changes the gain.
+ auto echo_control_factory = std::make_unique<MockEchoControlFactory>();
+ const auto* echo_control_factory_ptr = echo_control_factory.get();
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+ // Disable AGC.
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.gain_controller2.enabled = false;
+ apm_config.pre_amplifier.enabled = true;
+ apm_config.pre_amplifier.fixed_gain_factor = 1.f;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr size_t kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 2;
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+
+ MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext();
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/false))
+ .Times(1);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/true))
+ .Times(1);
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(2.f));
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+}
+
+TEST(AudioProcessingImplTest,
+ EchoControllerObservesLevelAdjustmentPreGainEchoPathGainChange) {
+ // Tests that the echo controller observes an echo path gain change when the
+ // pre-amplifier submodule changes the gain.
+ auto echo_control_factory = std::make_unique<MockEchoControlFactory>();
+ const auto* echo_control_factory_ptr = echo_control_factory.get();
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+ // Disable AGC.
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.gain_controller2.enabled = false;
+ apm_config.capture_level_adjustment.enabled = true;
+ apm_config.capture_level_adjustment.pre_gain_factor = 1.f;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr size_t kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 2;
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+
+ MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext();
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/false))
+ .Times(1);
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/true))
+ .Times(1);
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(2.f));
+ apm->ProcessStream(frame.data(), config, config, frame.data());
+}
+
+TEST(AudioProcessingImplTest,
+ EchoControllerObservesAnalogAgc1EchoPathGainChange) {
+ // Tests that the echo controller observes an echo path gain change when the
+ // AGC1 analog adaptive submodule changes the analog gain.
+ auto echo_control_factory = std::make_unique<MockEchoControlFactory>();
+ const auto* echo_control_factory_ptr = echo_control_factory.get();
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+ webrtc::AudioProcessing::Config apm_config;
+ // Enable AGC1.
+ apm_config.gain_controller1.enabled = true;
+ apm_config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+ apm_config.gain_controller2.enabled = false;
+ apm_config.pre_amplifier.enabled = false;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int16_t kAudioLevel = 1000;
+ constexpr size_t kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 2;
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig stream_config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+
+ MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext();
+
+ const int initial_analog_gain = apm->recommended_stream_analog_level();
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock, ProcessCapture(NotNull(), testing::_, false))
+ .Times(1);
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+
+ // Force an analog gain change if it did not happen.
+ if (initial_analog_gain == apm->recommended_stream_analog_level()) {
+ apm->set_stream_analog_level(initial_analog_gain + 1);
+ }
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock, ProcessCapture(NotNull(), testing::_, true))
+ .Times(1);
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+}
+
+TEST(AudioProcessingImplTest,
+ EchoControllerObservesNoDigitalAgc2EchoPathGainChange) {
+ // Tests that the echo controller doesn't observe an echo path gain change
+ // when the AGC2 digital submodule changes the digital gain without analog
+ // gain changes.
+ auto echo_control_factory = std::make_unique<MockEchoControlFactory>();
+ const auto* echo_control_factory_ptr = echo_control_factory.get();
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+ webrtc::AudioProcessing::Config apm_config;
+ // Disable AGC1 analog.
+ apm_config.gain_controller1.enabled = false;
+ // Enable AGC2 digital.
+ apm_config.gain_controller2.enabled = true;
+ apm_config.gain_controller2.adaptive_digital.enabled = true;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int16_t kAudioLevel = 1000;
+ constexpr size_t kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 2;
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig stream_config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+
+ MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext();
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock, ProcessCapture(NotNull(), testing::_,
+ /*echo_path_change=*/false))
+ .Times(1);
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock, ProcessCapture(NotNull(), testing::_,
+ /*echo_path_change=*/false))
+ .Times(1);
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+}
+
+TEST(AudioProcessingImplTest, ProcessWithAgc2InjectedSpeechProbability) {
+ // Tests that a stream is successfully processed for the field trial
+ // `WebRTC-Audio-TransientSuppressorVadMode/Enabled-RnnVad/` using
+ // injected speech probability in AGC2 digital.
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-Audio-TransientSuppressorVadMode/Enabled-RnnVad/");
+ rtc::scoped_refptr<AudioProcessing> apm = AudioProcessingBuilder().Create();
+ ASSERT_EQ(apm->Initialize(), AudioProcessing::kNoError);
+ webrtc::AudioProcessing::Config apm_config;
+ // Disable AGC1 analog.
+ apm_config.gain_controller1.enabled = false;
+ // Enable AGC2 digital.
+ apm_config.gain_controller2.enabled = true;
+ apm_config.gain_controller2.adaptive_digital.enabled = true;
+ apm->ApplyConfig(apm_config);
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kNumChannels = 1;
+ std::array<float, kSampleRateHz / 100> buffer;
+ float* channel_pointers[] = {buffer.data()};
+ StreamConfig stream_config(/*sample_rate_hz=*/kSampleRateHz,
+ /*num_channels=*/kNumChannels);
+ Random random_generator(2341U);
+ constexpr int kFramesToProcess = 10;
+ for (int i = 0; i < kFramesToProcess; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ ASSERT_EQ(apm->ProcessStream(channel_pointers, stream_config, stream_config,
+ channel_pointers),
+ kNoErr);
+ }
+}
+
+TEST(AudioProcessingImplTest, EchoControllerObservesPlayoutVolumeChange) {
+ // Tests that the echo controller observes an echo path gain change when a
+ // playout volume change is reported.
+ auto echo_control_factory = std::make_unique<MockEchoControlFactory>();
+ const auto* echo_control_factory_ptr = echo_control_factory.get();
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+ // Disable AGC.
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.gain_controller2.enabled = false;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int16_t kAudioLevel = 10000;
+ constexpr size_t kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 2;
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig stream_config(kSampleRateHz, kNumChannels);
+ frame.fill(kAudioLevel);
+
+ MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext();
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/false))
+ .Times(1);
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/false))
+ .Times(1);
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutVolumeChange(50));
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/false))
+ .Times(1);
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutVolumeChange(50));
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+
+ EXPECT_CALL(*echo_control_mock, AnalyzeCapture(testing::_)).Times(1);
+ EXPECT_CALL(*echo_control_mock,
+ ProcessCapture(NotNull(), testing::_, /*echo_path_change=*/true))
+ .Times(1);
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutVolumeChange(100));
+ apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data());
+}
+
+TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) {
+ // Make sure that signal changes caused by a render pre-processing sub-module
+ // take place before any echo detector analysis.
+ auto test_echo_detector = rtc::make_ref_counted<TestEchoDetector>();
+ std::unique_ptr<CustomProcessing> test_render_pre_processor(
+ new TestRenderPreProcessor());
+ // Create APM injecting the test echo detector and render pre-processor.
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoDetector(test_echo_detector)
+ .SetRenderPreProcessing(std::move(test_render_pre_processor))
+ .Create();
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.pre_amplifier.enabled = true;
+ apm->ApplyConfig(apm_config);
+
+ constexpr int16_t kAudioLevel = 1000;
+ constexpr int kSampleRateHz = 16000;
+ constexpr size_t kNumChannels = 1;
+ // Explicitly initialize APM to ensure no render frames are discarded.
+ const ProcessingConfig processing_config = {{
+ {kSampleRateHz, kNumChannels},
+ {kSampleRateHz, kNumChannels},
+ {kSampleRateHz, kNumChannels},
+ {kSampleRateHz, kNumChannels},
+ }};
+ apm->Initialize(processing_config);
+
+ std::array<int16_t, kNumChannels * kSampleRateHz / 100> frame;
+ StreamConfig stream_config(kSampleRateHz, kNumChannels);
+
+ constexpr float kAudioLevelFloat = static_cast<float>(kAudioLevel);
+ constexpr float kExpectedPreprocessedAudioLevel =
+ TestRenderPreProcessor::ProcessSample(kAudioLevelFloat);
+ ASSERT_NE(kAudioLevelFloat, kExpectedPreprocessedAudioLevel);
+
+ // Analyze a render stream frame.
+ frame.fill(kAudioLevel);
+ ASSERT_EQ(AudioProcessing::Error::kNoError,
+ apm->ProcessReverseStream(frame.data(), stream_config,
+ stream_config, frame.data()));
+ // Trigger a call to in EchoDetector::AnalyzeRenderAudio() via
+ // ProcessStream().
+ frame.fill(kAudioLevel);
+ ASSERT_EQ(AudioProcessing::Error::kNoError,
+ apm->ProcessStream(frame.data(), stream_config, stream_config,
+ frame.data()));
+ // Regardless of how the call to in EchoDetector::AnalyzeRenderAudio() is
+ // triggered, the line below checks that the call has occurred. If not, the
+ // APM implementation may have changed and this test might need to be adapted.
+ ASSERT_TRUE(test_echo_detector->analyze_render_audio_called());
+ // Check that the data read in EchoDetector::AnalyzeRenderAudio() is that
+ // produced by the render pre-processor.
+ EXPECT_EQ(kExpectedPreprocessedAudioLevel,
+ test_echo_detector->last_render_audio_first_sample());
+}
+
+// Disabling build-optional submodules and trying to enable them via the APM
+// config should be bit-exact with running APM with said submodules disabled.
+// This mainly tests that SetCreateOptionalSubmodulesForTesting has an effect.
+TEST(ApmWithSubmodulesExcludedTest, BitexactWithDisabledModules) {
+ auto apm = rtc::make_ref_counted<AudioProcessingImpl>();
+ ASSERT_EQ(apm->Initialize(), AudioProcessing::kNoError);
+
+ ApmSubmoduleCreationOverrides overrides;
+ overrides.transient_suppression = true;
+ apm->OverrideSubmoduleCreationForTesting(overrides);
+
+ AudioProcessing::Config apm_config = apm->GetConfig();
+ apm_config.transient_suppression.enabled = true;
+ apm->ApplyConfig(apm_config);
+
+ rtc::scoped_refptr<AudioProcessing> apm_reference =
+ AudioProcessingBuilder().Create();
+ apm_config = apm_reference->GetConfig();
+ apm_config.transient_suppression.enabled = false;
+ apm_reference->ApplyConfig(apm_config);
+
+ constexpr int kSampleRateHz = 16000;
+ constexpr int kNumChannels = 1;
+ std::array<float, kSampleRateHz / 100> buffer;
+ std::array<float, kSampleRateHz / 100> buffer_reference;
+ float* channel_pointers[] = {buffer.data()};
+ float* channel_pointers_reference[] = {buffer_reference.data()};
+ StreamConfig stream_config(/*sample_rate_hz=*/kSampleRateHz,
+ /*num_channels=*/kNumChannels);
+ Random random_generator(2341U);
+ constexpr int kFramesToProcessPerConfiguration = 10;
+
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ std::copy(buffer.begin(), buffer.end(), buffer_reference.begin());
+ ASSERT_EQ(apm->ProcessStream(channel_pointers, stream_config, stream_config,
+ channel_pointers),
+ kNoErr);
+ ASSERT_EQ(
+ apm_reference->ProcessStream(channel_pointers_reference, stream_config,
+ stream_config, channel_pointers_reference),
+ kNoErr);
+ for (int j = 0; j < kSampleRateHz / 100; ++j) {
+ EXPECT_EQ(buffer[j], buffer_reference[j]);
+ }
+ }
+}
+
+// Disable transient suppressor creation and run APM in ways that should trigger
+// calls to the transient suppressor API.
+TEST(ApmWithSubmodulesExcludedTest, ReinitializeTransientSuppressor) {
+ auto apm = rtc::make_ref_counted<AudioProcessingImpl>();
+ ASSERT_EQ(apm->Initialize(), kNoErr);
+
+ ApmSubmoduleCreationOverrides overrides;
+ overrides.transient_suppression = true;
+ apm->OverrideSubmoduleCreationForTesting(overrides);
+
+ AudioProcessing::Config config = apm->GetConfig();
+ config.transient_suppression.enabled = true;
+ apm->ApplyConfig(config);
+ // 960 samples per frame: 10 ms of <= 48 kHz audio with <= 2 channels.
+ float buffer[960];
+ float* channel_pointers[] = {&buffer[0], &buffer[480]};
+ Random random_generator(2341U);
+ constexpr int kFramesToProcessPerConfiguration = 3;
+
+ StreamConfig initial_stream_config(/*sample_rate_hz=*/16000,
+ /*num_channels=*/1);
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ EXPECT_EQ(apm->ProcessStream(channel_pointers, initial_stream_config,
+ initial_stream_config, channel_pointers),
+ kNoErr);
+ }
+
+ StreamConfig stereo_stream_config(/*sample_rate_hz=*/16000,
+ /*num_channels=*/2);
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ EXPECT_EQ(apm->ProcessStream(channel_pointers, stereo_stream_config,
+ stereo_stream_config, channel_pointers),
+ kNoErr);
+ }
+
+ StreamConfig high_sample_rate_stream_config(/*sample_rate_hz=*/48000,
+ /*num_channels=*/2);
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ EXPECT_EQ(
+ apm->ProcessStream(channel_pointers, high_sample_rate_stream_config,
+ high_sample_rate_stream_config, channel_pointers),
+ kNoErr);
+ }
+}
+
+// Disable transient suppressor creation and run APM in ways that should trigger
+// calls to the transient suppressor API.
+TEST(ApmWithSubmodulesExcludedTest, ToggleTransientSuppressor) {
+ auto apm = rtc::make_ref_counted<AudioProcessingImpl>();
+ ASSERT_EQ(apm->Initialize(), AudioProcessing::kNoError);
+
+ ApmSubmoduleCreationOverrides overrides;
+ overrides.transient_suppression = true;
+ apm->OverrideSubmoduleCreationForTesting(overrides);
+
+ // 960 samples per frame: 10 ms of <= 48 kHz audio with <= 2 channels.
+ float buffer[960];
+ float* channel_pointers[] = {&buffer[0], &buffer[480]};
+ Random random_generator(2341U);
+ constexpr int kFramesToProcessPerConfiguration = 3;
+ StreamConfig stream_config(/*sample_rate_hz=*/16000,
+ /*num_channels=*/1);
+
+ AudioProcessing::Config config = apm->GetConfig();
+ config.transient_suppression.enabled = true;
+ apm->ApplyConfig(config);
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ EXPECT_EQ(apm->ProcessStream(channel_pointers, stream_config, stream_config,
+ channel_pointers),
+ kNoErr);
+ }
+
+ config = apm->GetConfig();
+ config.transient_suppression.enabled = false;
+ apm->ApplyConfig(config);
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ EXPECT_EQ(apm->ProcessStream(channel_pointers, stream_config, stream_config,
+ channel_pointers),
+ kNoErr);
+ }
+
+ config = apm->GetConfig();
+ config.transient_suppression.enabled = true;
+ apm->ApplyConfig(config);
+ for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) {
+ RandomizeSampleVector(&random_generator, buffer);
+ EXPECT_EQ(apm->ProcessStream(channel_pointers, stream_config, stream_config,
+ channel_pointers),
+ kNoErr);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_performance_unittest.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_performance_unittest.cc
new file mode 100644
index 0000000000..51d1962875
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_performance_unittest.cc
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <math.h>
+
+#include <algorithm>
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/event.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/testsupport/perf_test.h"
+
+namespace webrtc {
+
+namespace {
+
+static const bool kPrintAllDurations = false;
+
+class CallSimulator;
+
+// Type of the render thread APM API call to use in the test.
+enum class ProcessorType { kRender, kCapture };
+
+// Variant of APM processing settings to use in the test.
+enum class SettingsType {
+ kDefaultApmDesktop,
+ kDefaultApmMobile,
+ kAllSubmodulesTurnedOff,
+ kDefaultApmDesktopWithoutDelayAgnostic,
+ kDefaultApmDesktopWithoutExtendedFilter
+};
+
+// Variables related to the audio data and formats.
+struct AudioFrameData {
+ explicit AudioFrameData(size_t max_frame_size) {
+ // Set up the two-dimensional arrays needed for the APM API calls.
+ input_framechannels.resize(2 * max_frame_size);
+ input_frame.resize(2);
+ input_frame[0] = &input_framechannels[0];
+ input_frame[1] = &input_framechannels[max_frame_size];
+
+ output_frame_channels.resize(2 * max_frame_size);
+ output_frame.resize(2);
+ output_frame[0] = &output_frame_channels[0];
+ output_frame[1] = &output_frame_channels[max_frame_size];
+ }
+
+ std::vector<float> output_frame_channels;
+ std::vector<float*> output_frame;
+ std::vector<float> input_framechannels;
+ std::vector<float*> input_frame;
+ StreamConfig input_stream_config;
+ StreamConfig output_stream_config;
+};
+
+// The configuration for the test.
+struct SimulationConfig {
+ SimulationConfig(int sample_rate_hz, SettingsType simulation_settings)
+ : sample_rate_hz(sample_rate_hz),
+ simulation_settings(simulation_settings) {}
+
+ static std::vector<SimulationConfig> GenerateSimulationConfigs() {
+ std::vector<SimulationConfig> simulation_configs;
+#ifndef WEBRTC_ANDROID
+ const SettingsType desktop_settings[] = {
+ SettingsType::kDefaultApmDesktop, SettingsType::kAllSubmodulesTurnedOff,
+ SettingsType::kDefaultApmDesktopWithoutDelayAgnostic,
+ SettingsType::kDefaultApmDesktopWithoutExtendedFilter};
+
+ const int desktop_sample_rates[] = {8000, 16000, 32000, 48000};
+
+ for (auto sample_rate : desktop_sample_rates) {
+ for (auto settings : desktop_settings) {
+ simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+ }
+ }
+#endif
+
+ const SettingsType mobile_settings[] = {SettingsType::kDefaultApmMobile};
+
+ const int mobile_sample_rates[] = {8000, 16000};
+
+ for (auto sample_rate : mobile_sample_rates) {
+ for (auto settings : mobile_settings) {
+ simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+ }
+ }
+
+ return simulation_configs;
+ }
+
+ std::string SettingsDescription() const {
+ std::string description;
+ switch (simulation_settings) {
+ case SettingsType::kDefaultApmMobile:
+ description = "DefaultApmMobile";
+ break;
+ case SettingsType::kDefaultApmDesktop:
+ description = "DefaultApmDesktop";
+ break;
+ case SettingsType::kAllSubmodulesTurnedOff:
+ description = "AllSubmodulesOff";
+ break;
+ case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic:
+ description = "DefaultApmDesktopWithoutDelayAgnostic";
+ break;
+ case SettingsType::kDefaultApmDesktopWithoutExtendedFilter:
+ description = "DefaultApmDesktopWithoutExtendedFilter";
+ break;
+ }
+ return description;
+ }
+
+ int sample_rate_hz = 16000;
+ SettingsType simulation_settings = SettingsType::kDefaultApmDesktop;
+};
+
+// Handler for the frame counters.
+class FrameCounters {
+ public:
+ void IncreaseRenderCounter() { render_count_.fetch_add(1); }
+
+ void IncreaseCaptureCounter() { capture_count_.fetch_add(1); }
+
+ int CaptureMinusRenderCounters() const {
+ // The return value will be approximate, but that's good enough since
+ // by the time we return the value, it's not guaranteed to be correct
+ // anyway.
+ return capture_count_.load(std::memory_order_acquire) -
+ render_count_.load(std::memory_order_acquire);
+ }
+
+ int RenderMinusCaptureCounters() const {
+ return -CaptureMinusRenderCounters();
+ }
+
+ bool BothCountersExceedeThreshold(int threshold) const {
+ // TODO(tommi): We could use an event to signal this so that we don't need
+ // to be polling from the main thread and possibly steal cycles.
+ const int capture_count = capture_count_.load(std::memory_order_acquire);
+ const int render_count = render_count_.load(std::memory_order_acquire);
+ return (render_count > threshold && capture_count > threshold);
+ }
+
+ private:
+ std::atomic<int> render_count_{0};
+ std::atomic<int> capture_count_{0};
+};
+
+// Class that represents a flag that can only be raised.
+class LockedFlag {
+ public:
+ bool get_flag() const { return flag_.load(std::memory_order_acquire); }
+
+ void set_flag() {
+ if (!get_flag()) {
+ // read-only operation to avoid affecting the cache-line.
+ int zero = 0;
+ flag_.compare_exchange_strong(zero, 1);
+ }
+ }
+
+ private:
+ std::atomic<int> flag_{0};
+};
+
+// Parent class for the thread processors.
+class TimedThreadApiProcessor {
+ public:
+ TimedThreadApiProcessor(ProcessorType processor_type,
+ Random* rand_gen,
+ FrameCounters* shared_counters_state,
+ LockedFlag* capture_call_checker,
+ CallSimulator* test_framework,
+ const SimulationConfig* simulation_config,
+ AudioProcessing* apm,
+ int num_durations_to_store,
+ float input_level,
+ int num_channels)
+ : rand_gen_(rand_gen),
+ frame_counters_(shared_counters_state),
+ capture_call_checker_(capture_call_checker),
+ test_(test_framework),
+ simulation_config_(simulation_config),
+ apm_(apm),
+ frame_data_(kMaxFrameSize),
+ clock_(webrtc::Clock::GetRealTimeClock()),
+ num_durations_to_store_(num_durations_to_store),
+ input_level_(input_level),
+ processor_type_(processor_type),
+ num_channels_(num_channels) {
+ api_call_durations_.reserve(num_durations_to_store_);
+ }
+
+ // Implements the callback functionality for the threads.
+ bool Process();
+
+ // Method for printing out the simulation statistics.
+ void print_processor_statistics(absl::string_view processor_name) const {
+ const std::string modifier = "_api_call_duration";
+
+ const std::string sample_rate_name =
+ "_" + std::to_string(simulation_config_->sample_rate_hz) + "Hz";
+
+ webrtc::test::PrintResultMeanAndError(
+ "apm_timing", sample_rate_name, processor_name, GetDurationAverage(),
+ GetDurationStandardDeviation(), "us", false);
+
+ if (kPrintAllDurations) {
+ webrtc::test::PrintResultList("apm_call_durations", sample_rate_name,
+ processor_name, api_call_durations_, "us",
+ false);
+ }
+ }
+
+ void AddDuration(int64_t duration) {
+ if (api_call_durations_.size() < num_durations_to_store_) {
+ api_call_durations_.push_back(duration);
+ }
+ }
+
+ private:
+ static const int kMaxCallDifference = 10;
+ static const int kMaxFrameSize = 480;
+ static const int kNumInitializationFrames = 5;
+
+ int64_t GetDurationStandardDeviation() const {
+ double variance = 0;
+ const int64_t average_duration = GetDurationAverage();
+ for (size_t k = kNumInitializationFrames; k < api_call_durations_.size();
+ k++) {
+ int64_t tmp = api_call_durations_[k] - average_duration;
+ variance += static_cast<double>(tmp * tmp);
+ }
+ const int denominator = rtc::checked_cast<int>(api_call_durations_.size()) -
+ kNumInitializationFrames;
+ return (denominator > 0
+ ? rtc::checked_cast<int64_t>(sqrt(variance / denominator))
+ : -1);
+ }
+
+ int64_t GetDurationAverage() const {
+ int64_t average_duration = 0;
+ for (size_t k = kNumInitializationFrames; k < api_call_durations_.size();
+ k++) {
+ average_duration += api_call_durations_[k];
+ }
+ const int denominator = rtc::checked_cast<int>(api_call_durations_.size()) -
+ kNumInitializationFrames;
+ return (denominator > 0 ? average_duration / denominator : -1);
+ }
+
+ int ProcessCapture() {
+ // Set the stream delay.
+ apm_->set_stream_delay_ms(30);
+
+ // Call and time the specified capture side API processing method.
+ const int64_t start_time = clock_->TimeInMicroseconds();
+ const int result = apm_->ProcessStream(
+ &frame_data_.input_frame[0], frame_data_.input_stream_config,
+ frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+ const int64_t end_time = clock_->TimeInMicroseconds();
+
+ frame_counters_->IncreaseCaptureCounter();
+
+ AddDuration(end_time - start_time);
+
+ if (first_process_call_) {
+ // Flag that the capture side has been called at least once
+ // (needed to ensure that a capture call has been done
+ // before the first render call is performed (implicitly
+ // required by the APM API).
+ capture_call_checker_->set_flag();
+ first_process_call_ = false;
+ }
+ return result;
+ }
+
+ bool ReadyToProcessCapture() {
+ return (frame_counters_->CaptureMinusRenderCounters() <=
+ kMaxCallDifference);
+ }
+
+ int ProcessRender() {
+ // Call and time the specified render side API processing method.
+ const int64_t start_time = clock_->TimeInMicroseconds();
+ const int result = apm_->ProcessReverseStream(
+ &frame_data_.input_frame[0], frame_data_.input_stream_config,
+ frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+ const int64_t end_time = clock_->TimeInMicroseconds();
+ frame_counters_->IncreaseRenderCounter();
+
+ AddDuration(end_time - start_time);
+
+ return result;
+ }
+
+ bool ReadyToProcessRender() {
+ // Do not process until at least one capture call has been done.
+ // (implicitly required by the APM API).
+ if (first_process_call_ && !capture_call_checker_->get_flag()) {
+ return false;
+ }
+
+ // Ensure that the number of render and capture calls do not differ too
+ // much.
+ if (frame_counters_->RenderMinusCaptureCounters() > kMaxCallDifference) {
+ return false;
+ }
+
+ first_process_call_ = false;
+ return true;
+ }
+
+ void PrepareFrame() {
+ // Lambda function for populating a float multichannel audio frame
+ // with random data.
+ auto populate_audio_frame = [](float amplitude, size_t num_channels,
+ size_t samples_per_channel, Random* rand_gen,
+ float** frame) {
+ for (size_t ch = 0; ch < num_channels; ch++) {
+ for (size_t k = 0; k < samples_per_channel; k++) {
+ // Store random float number with a value between +-amplitude.
+ frame[ch][k] = amplitude * (2 * rand_gen->Rand<float>() - 1);
+ }
+ }
+ };
+
+ // Prepare the audio input data and metadata.
+ frame_data_.input_stream_config.set_sample_rate_hz(
+ simulation_config_->sample_rate_hz);
+ frame_data_.input_stream_config.set_num_channels(num_channels_);
+ populate_audio_frame(input_level_, num_channels_,
+ (simulation_config_->sample_rate_hz *
+ AudioProcessing::kChunkSizeMs / 1000),
+ rand_gen_, &frame_data_.input_frame[0]);
+
+ // Prepare the float audio output data and metadata.
+ frame_data_.output_stream_config.set_sample_rate_hz(
+ simulation_config_->sample_rate_hz);
+ frame_data_.output_stream_config.set_num_channels(1);
+ }
+
+ bool ReadyToProcess() {
+ switch (processor_type_) {
+ case ProcessorType::kRender:
+ return ReadyToProcessRender();
+
+ case ProcessorType::kCapture:
+ return ReadyToProcessCapture();
+ }
+
+ // Should not be reached, but the return statement is needed for the code to
+ // build successfully on Android.
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ Random* rand_gen_ = nullptr;
+ FrameCounters* frame_counters_ = nullptr;
+ LockedFlag* capture_call_checker_ = nullptr;
+ CallSimulator* test_ = nullptr;
+ const SimulationConfig* const simulation_config_ = nullptr;
+ AudioProcessing* apm_ = nullptr;
+ AudioFrameData frame_data_;
+ webrtc::Clock* clock_;
+ const size_t num_durations_to_store_;
+ std::vector<double> api_call_durations_;
+ const float input_level_;
+ bool first_process_call_ = true;
+ const ProcessorType processor_type_;
+ const int num_channels_ = 1;
+};
+
+// Class for managing the test simulation.
+class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
+ public:
+ CallSimulator()
+ : rand_gen_(42U),
+ simulation_config_(static_cast<SimulationConfig>(GetParam())) {}
+
+ // Run the call simulation with a timeout.
+ bool Run() {
+ StartThreads();
+
+ bool result = test_complete_.Wait(kTestTimeout);
+
+ StopThreads();
+
+ render_thread_state_->print_processor_statistics(
+ simulation_config_.SettingsDescription() + "_render");
+ capture_thread_state_->print_processor_statistics(
+ simulation_config_.SettingsDescription() + "_capture");
+
+ return result;
+ }
+
+ // Tests whether all the required render and capture side calls have been
+ // done.
+ bool MaybeEndTest() {
+ if (frame_counters_.BothCountersExceedeThreshold(kMinNumFramesToProcess)) {
+ test_complete_.Set();
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ static const float kCaptureInputFloatLevel;
+ static const float kRenderInputFloatLevel;
+ static const int kMinNumFramesToProcess = 150;
+ static const int32_t kTestTimeout = 3 * 10 * kMinNumFramesToProcess;
+
+ // Stop all running threads.
+ void StopThreads() {
+ render_thread_.Finalize();
+ capture_thread_.Finalize();
+ }
+
+ // Simulator and APM setup.
+ void SetUp() override {
+ // Lambda function for setting the default APM runtime settings for desktop.
+ auto set_default_desktop_apm_runtime_settings = [](AudioProcessing* apm) {
+ AudioProcessing::Config apm_config = apm->GetConfig();
+ apm_config.echo_canceller.enabled = true;
+ apm_config.echo_canceller.mobile_mode = false;
+ apm_config.noise_suppression.enabled = true;
+ apm_config.gain_controller1.enabled = true;
+ apm_config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveDigital;
+ apm->ApplyConfig(apm_config);
+ };
+
+ // Lambda function for setting the default APM runtime settings for mobile.
+ auto set_default_mobile_apm_runtime_settings = [](AudioProcessing* apm) {
+ AudioProcessing::Config apm_config = apm->GetConfig();
+ apm_config.echo_canceller.enabled = true;
+ apm_config.echo_canceller.mobile_mode = true;
+ apm_config.noise_suppression.enabled = true;
+ apm_config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveDigital;
+ apm->ApplyConfig(apm_config);
+ };
+
+ // Lambda function for turning off all of the APM runtime settings
+ // submodules.
+ auto turn_off_default_apm_runtime_settings = [](AudioProcessing* apm) {
+ AudioProcessing::Config apm_config = apm->GetConfig();
+ apm_config.echo_canceller.enabled = false;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.noise_suppression.enabled = false;
+ apm->ApplyConfig(apm_config);
+ };
+
+ int num_capture_channels = 1;
+ switch (simulation_config_.simulation_settings) {
+ case SettingsType::kDefaultApmMobile: {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ ASSERT_TRUE(!!apm_);
+ set_default_mobile_apm_runtime_settings(apm_.get());
+ break;
+ }
+ case SettingsType::kDefaultApmDesktop: {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ break;
+ }
+ case SettingsType::kAllSubmodulesTurnedOff: {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ ASSERT_TRUE(!!apm_);
+ turn_off_default_apm_runtime_settings(apm_.get());
+ break;
+ }
+ case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ break;
+ }
+ case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ break;
+ }
+ }
+
+ render_thread_state_.reset(new TimedThreadApiProcessor(
+ ProcessorType::kRender, &rand_gen_, &frame_counters_,
+ &capture_call_checker_, this, &simulation_config_, apm_.get(),
+ kMinNumFramesToProcess, kRenderInputFloatLevel, 1));
+ capture_thread_state_.reset(new TimedThreadApiProcessor(
+ ProcessorType::kCapture, &rand_gen_, &frame_counters_,
+ &capture_call_checker_, this, &simulation_config_, apm_.get(),
+ kMinNumFramesToProcess, kCaptureInputFloatLevel, num_capture_channels));
+ }
+
+ // Start the threads used in the test.
+ void StartThreads() {
+ const auto attributes =
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
+ render_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (render_thread_state_->Process()) {
+ }
+ },
+ "render", attributes);
+ capture_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (capture_thread_state_->Process()) {
+ }
+ },
+ "capture", attributes);
+ }
+
+ // Event handler for the test.
+ rtc::Event test_complete_;
+
+ // Thread related variables.
+ Random rand_gen_;
+
+ rtc::scoped_refptr<AudioProcessing> apm_;
+ const SimulationConfig simulation_config_;
+ FrameCounters frame_counters_;
+ LockedFlag capture_call_checker_;
+ std::unique_ptr<TimedThreadApiProcessor> render_thread_state_;
+ std::unique_ptr<TimedThreadApiProcessor> capture_thread_state_;
+ rtc::PlatformThread render_thread_;
+ rtc::PlatformThread capture_thread_;
+};
+
+// Implements the callback functionality for the threads.
+bool TimedThreadApiProcessor::Process() {
+ PrepareFrame();
+
+ // Wait in a spinlock manner until it is ok to start processing.
+ // Note that SleepMs is not applicable since it only allows sleeping
+ // on a millisecond basis which is too long.
+ // TODO(tommi): This loop may affect the performance of the test that it's
+ // meant to measure. See if we could use events instead to signal readiness.
+ while (!ReadyToProcess()) {
+ }
+
+ int result = AudioProcessing::kNoError;
+ switch (processor_type_) {
+ case ProcessorType::kRender:
+ result = ProcessRender();
+ break;
+ case ProcessorType::kCapture:
+ result = ProcessCapture();
+ break;
+ }
+
+ EXPECT_EQ(result, AudioProcessing::kNoError);
+
+ return !test_->MaybeEndTest();
+}
+
+const float CallSimulator::kRenderInputFloatLevel = 0.5f;
+const float CallSimulator::kCaptureInputFloatLevel = 0.03125f;
+} // anonymous namespace
+
+// TODO(peah): Reactivate once issue 7712 has been resolved.
+TEST_P(CallSimulator, DISABLED_ApiCallDurationTest) {
+ // Run test and verify that it did not time out.
+ EXPECT_TRUE(Run());
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioProcessingPerformanceTest,
+ CallSimulator,
+ ::testing::ValuesIn(SimulationConfig::GenerateSimulationConfigs()));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_statistics_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/audio_processing_statistics_gn/moz.build
new file mode 100644
index 0000000000..9446476264
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_statistics_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_processing_statistics_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_unittest.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_unittest.cc
new file mode 100644
index 0000000000..a0514744c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_unittest.cc
@@ -0,0 +1,3155 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/include/audio_processing.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <memory>
+#include <numeric>
+#include <queue>
+#include <string>
+
+#include "absl/flags/flag.h"
+#include "absl/strings/string_view.h"
+#include "api/audio/echo_detector_creator.h"
+#include "api/make_ref_counted.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+#include "modules/audio_processing/test/protobuf_utils.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/protobuf_utils.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/swap_queue.h"
+#include "rtc_base/system/arch.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/thread.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#include "modules/audio_processing/debug.pb.h"
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
+#else
+#include "modules/audio_processing/test/unittest.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+ABSL_FLAG(bool,
+ write_apm_ref_data,
+ false,
+ "Write ApmTest.Process results to file, instead of comparing results "
+ "to the existing reference data file.");
+
+namespace webrtc {
+namespace {
+
+// All sample rates used by APM internally during processing. Other input /
+// output rates are resampled to / from one of these.
+const int kProcessSampleRates[] = {16000, 32000, 48000};
+
+enum StreamDirection { kForward = 0, kReverse };
+
+void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
+ ChannelBuffer<int16_t> cb_int(cb->num_frames(), cb->num_channels());
+ Deinterleave(int_data, cb->num_frames(), cb->num_channels(),
+ cb_int.channels());
+ for (size_t i = 0; i < cb->num_channels(); ++i) {
+ S16ToFloat(cb_int.channels()[i], cb->num_frames(), cb->channels()[i]);
+ }
+}
+
+void ConvertToFloat(const Int16FrameData& frame, ChannelBuffer<float>* cb) {
+ ConvertToFloat(frame.data.data(), cb);
+}
+
+void MixStereoToMono(const float* stereo,
+ float* mono,
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
+ mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
+}
+
+void MixStereoToMono(const int16_t* stereo,
+ int16_t* mono,
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
+ mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
+}
+
+void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ stereo[i * 2 + 1] = stereo[i * 2];
+ }
+}
+
+void VerifyChannelsAreEqual(const int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
+ }
+}
+
+void SetFrameTo(Int16FrameData* frame, int16_t value) {
+ for (size_t i = 0; i < frame->samples_per_channel * frame->num_channels;
+ ++i) {
+ frame->data[i] = value;
+ }
+}
+
+void SetFrameTo(Int16FrameData* frame, int16_t left, int16_t right) {
+ ASSERT_EQ(2u, frame->num_channels);
+ for (size_t i = 0; i < frame->samples_per_channel * 2; i += 2) {
+ frame->data[i] = left;
+ frame->data[i + 1] = right;
+ }
+}
+
+void ScaleFrame(Int16FrameData* frame, float scale) {
+ for (size_t i = 0; i < frame->samples_per_channel * frame->num_channels;
+ ++i) {
+ frame->data[i] = FloatS16ToS16(frame->data[i] * scale);
+ }
+}
+
+bool FrameDataAreEqual(const Int16FrameData& frame1,
+ const Int16FrameData& frame2) {
+ if (frame1.samples_per_channel != frame2.samples_per_channel) {
+ return false;
+ }
+ if (frame1.num_channels != frame2.num_channels) {
+ return false;
+ }
+ if (memcmp(
+ frame1.data.data(), frame2.data.data(),
+ frame1.samples_per_channel * frame1.num_channels * sizeof(int16_t))) {
+ return false;
+ }
+ return true;
+}
+
+rtc::ArrayView<int16_t> GetMutableFrameData(Int16FrameData* frame) {
+ int16_t* ptr = frame->data.data();
+ const size_t len = frame->samples_per_channel * frame->num_channels;
+ return rtc::ArrayView<int16_t>(ptr, len);
+}
+
+rtc::ArrayView<const int16_t> GetFrameData(const Int16FrameData& frame) {
+ const int16_t* ptr = frame.data.data();
+ const size_t len = frame.samples_per_channel * frame.num_channels;
+ return rtc::ArrayView<const int16_t>(ptr, len);
+}
+
+void EnableAllAPComponents(AudioProcessing* ap) {
+ AudioProcessing::Config apm_config = ap->GetConfig();
+ apm_config.echo_canceller.enabled = true;
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+ apm_config.echo_canceller.mobile_mode = true;
+
+ apm_config.gain_controller1.enabled = true;
+ apm_config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveDigital;
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+ apm_config.echo_canceller.mobile_mode = false;
+
+ apm_config.gain_controller1.enabled = true;
+ apm_config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+#endif
+
+ apm_config.noise_suppression.enabled = true;
+
+ apm_config.high_pass_filter.enabled = true;
+ apm_config.pipeline.maximum_internal_processing_rate = 48000;
+ ap->ApplyConfig(apm_config);
+}
+
+// These functions are only used by ApmTest.Process.
+template <class T>
+T AbsValue(T a) {
+ return a > 0 ? a : -a;
+}
+
+int16_t MaxAudioFrame(const Int16FrameData& frame) {
+ const size_t length = frame.samples_per_channel * frame.num_channels;
+ int16_t max_data = AbsValue(frame.data[0]);
+ for (size_t i = 1; i < length; i++) {
+ max_data = std::max(max_data, AbsValue(frame.data[i]));
+ }
+
+ return max_data;
+}
+
+void OpenFileAndWriteMessage(absl::string_view filename,
+ const MessageLite& msg) {
+ FILE* file = fopen(std::string(filename).c_str(), "wb");
+ ASSERT_TRUE(file != NULL);
+
+ int32_t size = rtc::checked_cast<int32_t>(msg.ByteSizeLong());
+ ASSERT_GT(size, 0);
+ std::unique_ptr<uint8_t[]> array(new uint8_t[size]);
+ ASSERT_TRUE(msg.SerializeToArray(array.get(), size));
+
+ ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
+ ASSERT_EQ(static_cast<size_t>(size),
+ fwrite(array.get(), sizeof(array[0]), size, file));
+ fclose(file);
+}
+
+std::string ResourceFilePath(absl::string_view name, int sample_rate_hz) {
+ rtc::StringBuilder ss;
+ // Resource files are all stereo.
+ ss << name << sample_rate_hz / 1000 << "_stereo";
+ return test::ResourcePath(ss.str(), "pcm");
+}
+
+// Temporary filenames unique to this process. Used to be able to run these
+// tests in parallel as each process needs to be running in isolation they can't
+// have competing filenames.
+std::map<std::string, std::string> temp_filenames;
+
+std::string OutputFilePath(absl::string_view name,
+ int input_rate,
+ int output_rate,
+ int reverse_input_rate,
+ int reverse_output_rate,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_input_channels,
+ size_t num_reverse_output_channels,
+ StreamDirection file_direction) {
+ rtc::StringBuilder ss;
+ ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
+ << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_";
+ if (num_output_channels == 1) {
+ ss << "mono";
+ } else if (num_output_channels == 2) {
+ ss << "stereo";
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ ss << output_rate / 1000;
+ if (num_reverse_output_channels == 1) {
+ ss << "_rmono";
+ } else if (num_reverse_output_channels == 2) {
+ ss << "_rstereo";
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ ss << reverse_output_rate / 1000;
+ ss << "_d" << file_direction << "_pcm";
+
+ std::string filename = ss.str();
+ if (temp_filenames[filename].empty())
+ temp_filenames[filename] = test::TempFilename(test::OutputPath(), filename);
+ return temp_filenames[filename];
+}
+
+void ClearTempFiles() {
+ for (auto& kv : temp_filenames)
+ remove(kv.second.c_str());
+}
+
+// Only remove "out" files. Keep "ref" files.
+void ClearTempOutFiles() {
+ for (auto it = temp_filenames.begin(); it != temp_filenames.end();) {
+ const std::string& filename = it->first;
+ if (filename.substr(0, 3).compare("out") == 0) {
+ remove(it->second.c_str());
+ temp_filenames.erase(it++);
+ } else {
+ it++;
+ }
+ }
+}
+
+void OpenFileAndReadMessage(absl::string_view filename, MessageLite* msg) {
+ FILE* file = fopen(std::string(filename).c_str(), "rb");
+ ASSERT_TRUE(file != NULL);
+ ReadMessageFromFile(file, msg);
+ fclose(file);
+}
+
+// Reads a 10 ms chunk (actually AudioProcessing::GetFrameSize() samples per
+// channel) of int16 interleaved audio from the given (assumed stereo) file,
+// converts to deinterleaved float (optionally downmixing) and returns the
+// result in `cb`. Returns false if the file ended (or on error) and true
+// otherwise.
+//
+// `int_data` and `float_data` are just temporary space that must be
+// sufficiently large to hold the 10 ms chunk.
+bool ReadChunk(FILE* file,
+ int16_t* int_data,
+ float* float_data,
+ ChannelBuffer<float>* cb) {
+ // The files always contain stereo audio.
+ size_t frame_size = cb->num_frames() * 2;
+ size_t read_count = fread(int_data, sizeof(int16_t), frame_size, file);
+ if (read_count != frame_size) {
+ // Check that the file really ended.
+ RTC_DCHECK(feof(file));
+ return false; // This is expected.
+ }
+
+ S16ToFloat(int_data, frame_size, float_data);
+ if (cb->num_channels() == 1) {
+ MixStereoToMono(float_data, cb->channels()[0], cb->num_frames());
+ } else {
+ Deinterleave(float_data, cb->num_frames(), 2, cb->channels());
+ }
+
+ return true;
+}
+
+// Returns the reference file name that matches the current CPU
+// architecture/optimizations.
+std::string GetReferenceFilename() {
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+ return test::ResourcePath("audio_processing/output_data_fixed", "pb");
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+ if (GetCPUInfo(kAVX2) != 0) {
+ return test::ResourcePath("audio_processing/output_data_float_avx2", "pb");
+ }
+ return test::ResourcePath("audio_processing/output_data_float", "pb");
+#endif
+}
+
+// Flag that can temporarily be enabled for local debugging to inspect
+// `ApmTest.VerifyDebugDump(Int|Float)` failures. Do not upload code changes
+// with this flag set to true.
+constexpr bool kDumpWhenExpectMessageEqFails = false;
+
+// Checks the debug constants values used in this file so that no code change is
+// submitted with values temporarily used for local debugging.
+TEST(ApmUnitTests, CheckDebugConstants) {
+ ASSERT_FALSE(kDumpWhenExpectMessageEqFails);
+}
+
+// Expects the equality of `actual` and `expected` by inspecting a hard-coded
+// subset of `audioproc::Stream` fields.
+void ExpectStreamFieldsEq(const audioproc::Stream& actual,
+ const audioproc::Stream& expected) {
+ EXPECT_EQ(actual.input_data(), expected.input_data());
+ EXPECT_EQ(actual.output_data(), expected.output_data());
+ EXPECT_EQ(actual.delay(), expected.delay());
+ EXPECT_EQ(actual.drift(), expected.drift());
+ EXPECT_EQ(actual.level(), expected.level());
+ EXPECT_EQ(actual.keypress(), expected.keypress());
+}
+
+// Expects the equality of `actual` and `expected` by inspecting a hard-coded
+// subset of `audioproc::Event` fields.
+void ExpectEventFieldsEq(const audioproc::Event& actual,
+ const audioproc::Event& expected) {
+ EXPECT_EQ(actual.type(), expected.type());
+ if (actual.type() != expected.type()) {
+ return;
+ }
+ switch (actual.type()) {
+ case audioproc::Event::STREAM:
+ ExpectStreamFieldsEq(actual.stream(), expected.stream());
+ break;
+ default:
+ // Not implemented.
+ break;
+ }
+}
+
+// Returns true if the `actual` and `expected` byte streams share the same size
+// and contain the same data. If they differ and `kDumpWhenExpectMessageEqFails`
+// is true, checks the equality of a subset of `audioproc::Event` (nested)
+// fields.
+bool ExpectMessageEq(rtc::ArrayView<const uint8_t> actual,
+ rtc::ArrayView<const uint8_t> expected) {
+ EXPECT_EQ(actual.size(), expected.size());
+ if (actual.size() != expected.size()) {
+ return false;
+ }
+ if (memcmp(actual.data(), expected.data(), actual.size()) == 0) {
+ // Same message. No need to parse.
+ return true;
+ }
+ if (kDumpWhenExpectMessageEqFails) {
+ // Parse differing messages and expect equality to produce detailed error
+ // messages.
+ audioproc::Event event_actual, event_expected;
+ RTC_DCHECK(event_actual.ParseFromArray(actual.data(), actual.size()));
+ RTC_DCHECK(event_expected.ParseFromArray(expected.data(), expected.size()));
+ ExpectEventFieldsEq(event_actual, event_expected);
+ }
+ return false;
+}
+
+class ApmTest : public ::testing::Test {
+ protected:
+ ApmTest();
+ virtual void SetUp();
+ virtual void TearDown();
+
+ static void SetUpTestSuite() {}
+
+ static void TearDownTestSuite() { ClearTempFiles(); }
+
+ // Used to select between int and float interface tests.
+ enum Format { kIntFormat, kFloatFormat };
+
+ void Init(int sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_channels,
+ bool open_output_file);
+ void Init(AudioProcessing* ap);
+ void EnableAllComponents();
+ bool ReadFrame(FILE* file, Int16FrameData* frame);
+ bool ReadFrame(FILE* file, Int16FrameData* frame, ChannelBuffer<float>* cb);
+ void ReadFrameWithRewind(FILE* file, Int16FrameData* frame);
+ void ReadFrameWithRewind(FILE* file,
+ Int16FrameData* frame,
+ ChannelBuffer<float>* cb);
+ void ProcessDelayVerificationTest(int delay_ms,
+ int system_delay_ms,
+ int delay_min,
+ int delay_max);
+ void TestChangingChannelsInt16Interface(
+ size_t num_channels,
+ AudioProcessing::Error expected_return);
+ void TestChangingForwardChannels(size_t num_in_channels,
+ size_t num_out_channels,
+ AudioProcessing::Error expected_return);
+ void TestChangingReverseChannels(size_t num_rev_channels,
+ AudioProcessing::Error expected_return);
+ void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
+ void RunManualVolumeChangeIsPossibleTest(int sample_rate);
+ void StreamParametersTest(Format format);
+ int ProcessStreamChooser(Format format);
+ int AnalyzeReverseStreamChooser(Format format);
+ void ProcessDebugDump(absl::string_view in_filename,
+ absl::string_view out_filename,
+ Format format,
+ int max_size_bytes);
+ void VerifyDebugDumpTest(Format format);
+
+ const std::string output_path_;
+ const std::string ref_filename_;
+ rtc::scoped_refptr<AudioProcessing> apm_;
+ Int16FrameData frame_;
+ Int16FrameData revframe_;
+ std::unique_ptr<ChannelBuffer<float>> float_cb_;
+ std::unique_ptr<ChannelBuffer<float>> revfloat_cb_;
+ int output_sample_rate_hz_;
+ size_t num_output_channels_;
+ FILE* far_file_;
+ FILE* near_file_;
+ FILE* out_file_;
+};
+
+ApmTest::ApmTest()
+ : output_path_(test::OutputPath()),
+ ref_filename_(GetReferenceFilename()),
+ output_sample_rate_hz_(0),
+ num_output_channels_(0),
+ far_file_(NULL),
+ near_file_(NULL),
+ out_file_(NULL) {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ AudioProcessing::Config apm_config = apm_->GetConfig();
+ apm_config.gain_controller1.analog_gain_controller.enabled = false;
+ apm_config.pipeline.maximum_internal_processing_rate = 48000;
+ apm_->ApplyConfig(apm_config);
+}
+
+void ApmTest::SetUp() {
+ ASSERT_TRUE(apm_.get() != NULL);
+
+ Init(32000, 32000, 32000, 2, 2, 2, false);
+}
+
+void ApmTest::TearDown() {
+ if (far_file_) {
+ ASSERT_EQ(0, fclose(far_file_));
+ }
+ far_file_ = NULL;
+
+ if (near_file_) {
+ ASSERT_EQ(0, fclose(near_file_));
+ }
+ near_file_ = NULL;
+
+ if (out_file_) {
+ ASSERT_EQ(0, fclose(out_file_));
+ }
+ out_file_ = NULL;
+}
+
+void ApmTest::Init(AudioProcessing* ap) {
+ ASSERT_EQ(
+ kNoErr,
+ ap->Initialize({{{frame_.sample_rate_hz, frame_.num_channels},
+ {output_sample_rate_hz_, num_output_channels_},
+ {revframe_.sample_rate_hz, revframe_.num_channels},
+ {revframe_.sample_rate_hz, revframe_.num_channels}}}));
+}
+
+void ApmTest::Init(int sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_channels,
+ bool open_output_file) {
+ SetContainerFormat(sample_rate_hz, num_input_channels, &frame_, &float_cb_);
+ output_sample_rate_hz_ = output_sample_rate_hz;
+ num_output_channels_ = num_output_channels;
+
+ SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, &revframe_,
+ &revfloat_cb_);
+ Init(apm_.get());
+
+ if (far_file_) {
+ ASSERT_EQ(0, fclose(far_file_));
+ }
+ std::string filename = ResourceFilePath("far", sample_rate_hz);
+ far_file_ = fopen(filename.c_str(), "rb");
+ ASSERT_TRUE(far_file_ != NULL) << "Could not open file " << filename << "\n";
+
+ if (near_file_) {
+ ASSERT_EQ(0, fclose(near_file_));
+ }
+ filename = ResourceFilePath("near", sample_rate_hz);
+ near_file_ = fopen(filename.c_str(), "rb");
+ ASSERT_TRUE(near_file_ != NULL) << "Could not open file " << filename << "\n";
+
+ if (open_output_file) {
+ if (out_file_) {
+ ASSERT_EQ(0, fclose(out_file_));
+ }
+ filename = OutputFilePath(
+ "out", sample_rate_hz, output_sample_rate_hz, reverse_sample_rate_hz,
+ reverse_sample_rate_hz, num_input_channels, num_output_channels,
+ num_reverse_channels, num_reverse_channels, kForward);
+ out_file_ = fopen(filename.c_str(), "wb");
+ ASSERT_TRUE(out_file_ != NULL)
+ << "Could not open file " << filename << "\n";
+ }
+}
+
+void ApmTest::EnableAllComponents() {
+ EnableAllAPComponents(apm_.get());
+}
+
+bool ApmTest::ReadFrame(FILE* file,
+ Int16FrameData* frame,
+ ChannelBuffer<float>* cb) {
+ // The files always contain stereo audio.
+ size_t frame_size = frame->samples_per_channel * 2;
+ size_t read_count =
+ fread(frame->data.data(), sizeof(int16_t), frame_size, file);
+ if (read_count != frame_size) {
+ // Check that the file really ended.
+ EXPECT_NE(0, feof(file));
+ return false; // This is expected.
+ }
+
+ if (frame->num_channels == 1) {
+ MixStereoToMono(frame->data.data(), frame->data.data(),
+ frame->samples_per_channel);
+ }
+
+ if (cb) {
+ ConvertToFloat(*frame, cb);
+ }
+ return true;
+}
+
+bool ApmTest::ReadFrame(FILE* file, Int16FrameData* frame) {
+ return ReadFrame(file, frame, NULL);
+}
+
+// If the end of the file has been reached, rewind it and attempt to read the
+// frame again.
+void ApmTest::ReadFrameWithRewind(FILE* file,
+ Int16FrameData* frame,
+ ChannelBuffer<float>* cb) {
+ if (!ReadFrame(near_file_, &frame_, cb)) {
+ rewind(near_file_);
+ ASSERT_TRUE(ReadFrame(near_file_, &frame_, cb));
+ }
+}
+
+void ApmTest::ReadFrameWithRewind(FILE* file, Int16FrameData* frame) {
+ ReadFrameWithRewind(file, frame, NULL);
+}
+
+int ApmTest::ProcessStreamChooser(Format format) {
+ if (format == kIntFormat) {
+ return apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data());
+ }
+ return apm_->ProcessStream(
+ float_cb_->channels(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(output_sample_rate_hz_, num_output_channels_),
+ float_cb_->channels());
+}
+
+int ApmTest::AnalyzeReverseStreamChooser(Format format) {
+ if (format == kIntFormat) {
+ return apm_->ProcessReverseStream(
+ revframe_.data.data(),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ revframe_.data.data());
+ }
+ return apm_->AnalyzeReverseStream(
+ revfloat_cb_->channels(),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels));
+}
+
+void ApmTest::ProcessDelayVerificationTest(int delay_ms,
+ int system_delay_ms,
+ int delay_min,
+ int delay_max) {
+ // The `revframe_` and `frame_` should include the proper frame information,
+ // hence can be used for extracting information.
+ Int16FrameData tmp_frame;
+ std::queue<Int16FrameData*> frame_queue;
+ bool causal = true;
+
+ tmp_frame.CopyFrom(revframe_);
+ SetFrameTo(&tmp_frame, 0);
+
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ // Initialize the `frame_queue` with empty frames.
+ int frame_delay = delay_ms / 10;
+ while (frame_delay < 0) {
+ Int16FrameData* frame = new Int16FrameData();
+ frame->CopyFrom(tmp_frame);
+ frame_queue.push(frame);
+ frame_delay++;
+ causal = false;
+ }
+ while (frame_delay > 0) {
+ Int16FrameData* frame = new Int16FrameData();
+ frame->CopyFrom(tmp_frame);
+ frame_queue.push(frame);
+ frame_delay--;
+ }
+ // Run for 4.5 seconds, skipping statistics from the first 2.5 seconds. We
+ // need enough frames with audio to have reliable estimates, but as few as
+ // possible to keep processing time down. 4.5 seconds seemed to be a good
+ // compromise for this recording.
+ for (int frame_count = 0; frame_count < 450; ++frame_count) {
+ Int16FrameData* frame = new Int16FrameData();
+ frame->CopyFrom(tmp_frame);
+ // Use the near end recording, since that has more speech in it.
+ ASSERT_TRUE(ReadFrame(near_file_, frame));
+ frame_queue.push(frame);
+ Int16FrameData* reverse_frame = frame;
+ Int16FrameData* process_frame = frame_queue.front();
+ if (!causal) {
+ reverse_frame = frame_queue.front();
+ // When we call ProcessStream() the frame is modified, so we can't use the
+ // pointer directly when things are non-causal. Use an intermediate frame
+ // and copy the data.
+ process_frame = &tmp_frame;
+ process_frame->CopyFrom(*frame);
+ }
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(
+ reverse_frame->data.data(),
+ StreamConfig(reverse_frame->sample_rate_hz,
+ reverse_frame->num_channels),
+ StreamConfig(reverse_frame->sample_rate_hz,
+ reverse_frame->num_channels),
+ reverse_frame->data.data()));
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(system_delay_ms));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(process_frame->data.data(),
+ StreamConfig(process_frame->sample_rate_hz,
+ process_frame->num_channels),
+ StreamConfig(process_frame->sample_rate_hz,
+ process_frame->num_channels),
+ process_frame->data.data()));
+ frame = frame_queue.front();
+ frame_queue.pop();
+ delete frame;
+
+ if (frame_count == 250) {
+ // Discard the first delay metrics to avoid convergence effects.
+ static_cast<void>(apm_->GetStatistics());
+ }
+ }
+
+ rewind(near_file_);
+ while (!frame_queue.empty()) {
+ Int16FrameData* frame = frame_queue.front();
+ frame_queue.pop();
+ delete frame;
+ }
+ // Calculate expected delay estimate and acceptable regions. Further,
+ // limit them w.r.t. AEC delay estimation support.
+ const size_t samples_per_ms =
+ rtc::SafeMin<size_t>(16u, frame_.samples_per_channel / 10);
+ const int expected_median =
+ rtc::SafeClamp<int>(delay_ms - system_delay_ms, delay_min, delay_max);
+ const int expected_median_high = rtc::SafeClamp<int>(
+ expected_median + rtc::dchecked_cast<int>(96 / samples_per_ms), delay_min,
+ delay_max);
+ const int expected_median_low = rtc::SafeClamp<int>(
+ expected_median - rtc::dchecked_cast<int>(96 / samples_per_ms), delay_min,
+ delay_max);
+ // Verify delay metrics.
+ AudioProcessingStats stats = apm_->GetStatistics();
+ ASSERT_TRUE(stats.delay_median_ms.has_value());
+ int32_t median = *stats.delay_median_ms;
+ EXPECT_GE(expected_median_high, median);
+ EXPECT_LE(expected_median_low, median);
+}
+
+void ApmTest::StreamParametersTest(Format format) {
+ // No errors when the components are disabled.
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+
+ // -- Missing AGC level --
+ AudioProcessing::Config apm_config = apm_->GetConfig();
+ apm_config.gain_controller1.enabled = true;
+ apm_->ApplyConfig(apm_config);
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, ProcessStreamChooser(format));
+
+ // Resets after successful ProcessStream().
+ apm_->set_stream_analog_level(127);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, ProcessStreamChooser(format));
+
+ // Other stream parameters set correctly.
+ apm_config.echo_canceller.enabled = true;
+ apm_config.echo_canceller.mobile_mode = false;
+ apm_->ApplyConfig(apm_config);
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, ProcessStreamChooser(format));
+ apm_config.gain_controller1.enabled = false;
+ apm_->ApplyConfig(apm_config);
+
+ // -- Missing delay --
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+
+ // Resets after successful ProcessStream().
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+
+ // Other stream parameters set correctly.
+ apm_config.gain_controller1.enabled = true;
+ apm_->ApplyConfig(apm_config);
+ apm_->set_stream_analog_level(127);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+ apm_config.gain_controller1.enabled = false;
+ apm_->ApplyConfig(apm_config);
+
+ // -- No stream parameters --
+ EXPECT_EQ(apm_->kNoError, AnalyzeReverseStreamChooser(format));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+
+ // -- All there --
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ apm_->set_stream_analog_level(127);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+}
+
+TEST_F(ApmTest, StreamParametersInt) {
+ StreamParametersTest(kIntFormat);
+}
+
+TEST_F(ApmTest, StreamParametersFloat) {
+ StreamParametersTest(kFloatFormat);
+}
+
+void ApmTest::TestChangingChannelsInt16Interface(
+ size_t num_channels,
+ AudioProcessing::Error expected_return) {
+ frame_.num_channels = num_channels;
+
+ EXPECT_EQ(expected_return,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_EQ(expected_return,
+ apm_->ProcessReverseStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+}
+
+void ApmTest::TestChangingForwardChannels(
+ size_t num_in_channels,
+ size_t num_out_channels,
+ AudioProcessing::Error expected_return) {
+ const StreamConfig input_stream = {frame_.sample_rate_hz, num_in_channels};
+ const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
+
+ EXPECT_EQ(expected_return,
+ apm_->ProcessStream(float_cb_->channels(), input_stream,
+ output_stream, float_cb_->channels()));
+}
+
+void ApmTest::TestChangingReverseChannels(
+ size_t num_rev_channels,
+ AudioProcessing::Error expected_return) {
+ const ProcessingConfig processing_config = {
+ {{frame_.sample_rate_hz, apm_->num_input_channels()},
+ {output_sample_rate_hz_, apm_->num_output_channels()},
+ {frame_.sample_rate_hz, num_rev_channels},
+ {frame_.sample_rate_hz, num_rev_channels}}};
+
+ EXPECT_EQ(
+ expected_return,
+ apm_->ProcessReverseStream(
+ float_cb_->channels(), processing_config.reverse_input_stream(),
+ processing_config.reverse_output_stream(), float_cb_->channels()));
+}
+
+TEST_F(ApmTest, ChannelsInt16Interface) {
+ // Testing number of invalid and valid channels.
+ Init(16000, 16000, 16000, 4, 4, 4, false);
+
+ TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
+
+ for (size_t i = 1; i < 4; i++) {
+ TestChangingChannelsInt16Interface(i, kNoErr);
+ EXPECT_EQ(i, apm_->num_input_channels());
+ }
+}
+
+TEST_F(ApmTest, Channels) {
+ // Testing number of invalid and valid channels.
+ Init(16000, 16000, 16000, 4, 4, 4, false);
+
+ TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
+ TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
+
+ for (size_t i = 1; i < 4; ++i) {
+ for (size_t j = 0; j < 1; ++j) {
+ // Output channels much be one or match input channels.
+ if (j == 1 || i == j) {
+ TestChangingForwardChannels(i, j, kNoErr);
+ TestChangingReverseChannels(i, kNoErr);
+
+ EXPECT_EQ(i, apm_->num_input_channels());
+ EXPECT_EQ(j, apm_->num_output_channels());
+ // The number of reverse channels used for processing to is always 1.
+ EXPECT_EQ(1u, apm_->num_reverse_channels());
+ } else {
+ TestChangingForwardChannels(i, j,
+ AudioProcessing::kBadNumberChannelsError);
+ }
+ }
+ }
+}
+
+TEST_F(ApmTest, SampleRatesInt) {
+ // Testing some valid sample rates.
+ for (int sample_rate : {8000, 12000, 16000, 32000, 44100, 48000, 96000}) {
+ SetContainerFormat(sample_rate, 2, &frame_, &float_cb_);
+ EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
+ }
+}
+
+// This test repeatedly reconfigures the pre-amplifier in APM, processes a
+// number of frames, and checks that output signal has the right level.
+TEST_F(ApmTest, PreAmplifier) {
+ // Fill the audio frame with a sawtooth pattern.
+ rtc::ArrayView<int16_t> frame_data = GetMutableFrameData(&frame_);
+ const size_t samples_per_channel = frame_.samples_per_channel;
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ for (size_t ch = 0; ch < frame_.num_channels; ++ch) {
+ frame_data[i + ch * samples_per_channel] = 10000 * ((i % 3) - 1);
+ }
+ }
+ // Cache the frame in tmp_frame.
+ Int16FrameData tmp_frame;
+ tmp_frame.CopyFrom(frame_);
+
+ auto compute_power = [](const Int16FrameData& frame) {
+ rtc::ArrayView<const int16_t> data = GetFrameData(frame);
+ return std::accumulate(data.begin(), data.end(), 0.0f,
+ [](float a, float b) { return a + b * b; }) /
+ data.size() / 32768 / 32768;
+ };
+
+ const float input_power = compute_power(tmp_frame);
+ // Double-check that the input data is large compared to the error kEpsilon.
+ constexpr float kEpsilon = 1e-4f;
+ RTC_DCHECK_GE(input_power, 10 * kEpsilon);
+
+ // 1. Enable pre-amp with 0 dB gain.
+ AudioProcessing::Config config = apm_->GetConfig();
+ config.pre_amplifier.enabled = true;
+ config.pre_amplifier.fixed_gain_factor = 1.0f;
+ apm_->ApplyConfig(config);
+
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+ float output_power = compute_power(frame_);
+ EXPECT_NEAR(output_power, input_power, kEpsilon);
+ config = apm_->GetConfig();
+ EXPECT_EQ(config.pre_amplifier.fixed_gain_factor, 1.0f);
+
+ // 2. Change pre-amp gain via ApplyConfig.
+ config.pre_amplifier.fixed_gain_factor = 2.0f;
+ apm_->ApplyConfig(config);
+
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+ output_power = compute_power(frame_);
+ EXPECT_NEAR(output_power, 4 * input_power, kEpsilon);
+ config = apm_->GetConfig();
+ EXPECT_EQ(config.pre_amplifier.fixed_gain_factor, 2.0f);
+
+ // 3. Change pre-amp gain via a RuntimeSetting.
+ apm_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(1.5f));
+
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+ output_power = compute_power(frame_);
+ EXPECT_NEAR(output_power, 2.25 * input_power, kEpsilon);
+ config = apm_->GetConfig();
+ EXPECT_EQ(config.pre_amplifier.fixed_gain_factor, 1.5f);
+}
+
+// This test a simple test that ensures that the emulated analog mic gain
+// functionality runs without crashing.
+TEST_F(ApmTest, AnalogMicGainEmulation) {
+ // Fill the audio frame with a sawtooth pattern.
+ rtc::ArrayView<int16_t> frame_data = GetMutableFrameData(&frame_);
+ const size_t samples_per_channel = frame_.samples_per_channel;
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ for (size_t ch = 0; ch < frame_.num_channels; ++ch) {
+ frame_data[i + ch * samples_per_channel] = 100 * ((i % 3) - 1);
+ }
+ }
+ // Cache the frame in tmp_frame.
+ Int16FrameData tmp_frame;
+ tmp_frame.CopyFrom(frame_);
+
+ // Enable the analog gain emulation.
+ AudioProcessing::Config config = apm_->GetConfig();
+ config.capture_level_adjustment.enabled = true;
+ config.capture_level_adjustment.analog_mic_gain_emulation.enabled = true;
+ config.capture_level_adjustment.analog_mic_gain_emulation.initial_level = 21;
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::Mode::kAdaptiveAnalog;
+ config.gain_controller1.analog_gain_controller.enabled = true;
+ apm_->ApplyConfig(config);
+
+ // Process a number of frames to ensure that the code runs without crashes.
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+}
+
+// This test repeatedly reconfigures the capture level adjustment functionality
+// in APM, processes a number of frames, and checks that output signal has the
+// right level.
+TEST_F(ApmTest, CaptureLevelAdjustment) {
+ // Fill the audio frame with a sawtooth pattern.
+ rtc::ArrayView<int16_t> frame_data = GetMutableFrameData(&frame_);
+ const size_t samples_per_channel = frame_.samples_per_channel;
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ for (size_t ch = 0; ch < frame_.num_channels; ++ch) {
+ frame_data[i + ch * samples_per_channel] = 100 * ((i % 3) - 1);
+ }
+ }
+ // Cache the frame in tmp_frame.
+ Int16FrameData tmp_frame;
+ tmp_frame.CopyFrom(frame_);
+
+ auto compute_power = [](const Int16FrameData& frame) {
+ rtc::ArrayView<const int16_t> data = GetFrameData(frame);
+ return std::accumulate(data.begin(), data.end(), 0.0f,
+ [](float a, float b) { return a + b * b; }) /
+ data.size() / 32768 / 32768;
+ };
+
+ const float input_power = compute_power(tmp_frame);
+ // Double-check that the input data is large compared to the error kEpsilon.
+ constexpr float kEpsilon = 1e-20f;
+ RTC_DCHECK_GE(input_power, 10 * kEpsilon);
+
+ // 1. Enable pre-amp with 0 dB gain.
+ AudioProcessing::Config config = apm_->GetConfig();
+ config.capture_level_adjustment.enabled = true;
+ config.capture_level_adjustment.pre_gain_factor = 0.5f;
+ config.capture_level_adjustment.post_gain_factor = 4.f;
+ const float expected_output_power1 =
+ config.capture_level_adjustment.pre_gain_factor *
+ config.capture_level_adjustment.pre_gain_factor *
+ config.capture_level_adjustment.post_gain_factor *
+ config.capture_level_adjustment.post_gain_factor * input_power;
+ apm_->ApplyConfig(config);
+
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+ float output_power = compute_power(frame_);
+ EXPECT_NEAR(output_power, expected_output_power1, kEpsilon);
+ config = apm_->GetConfig();
+ EXPECT_EQ(config.capture_level_adjustment.pre_gain_factor, 0.5f);
+ EXPECT_EQ(config.capture_level_adjustment.post_gain_factor, 4.f);
+
+ // 2. Change pre-amp gain via ApplyConfig.
+ config.capture_level_adjustment.pre_gain_factor = 1.0f;
+ config.capture_level_adjustment.post_gain_factor = 2.f;
+ const float expected_output_power2 =
+ config.capture_level_adjustment.pre_gain_factor *
+ config.capture_level_adjustment.pre_gain_factor *
+ config.capture_level_adjustment.post_gain_factor *
+ config.capture_level_adjustment.post_gain_factor * input_power;
+ apm_->ApplyConfig(config);
+
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+ output_power = compute_power(frame_);
+ EXPECT_NEAR(output_power, expected_output_power2, kEpsilon);
+ config = apm_->GetConfig();
+ EXPECT_EQ(config.capture_level_adjustment.pre_gain_factor, 1.0f);
+ EXPECT_EQ(config.capture_level_adjustment.post_gain_factor, 2.f);
+
+ // 3. Change pre-amp gain via a RuntimeSetting.
+ constexpr float kPreGain3 = 0.5f;
+ constexpr float kPostGain3 = 3.f;
+ const float expected_output_power3 =
+ kPreGain3 * kPreGain3 * kPostGain3 * kPostGain3 * input_power;
+
+ apm_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(kPreGain3));
+ apm_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePostGain(kPostGain3));
+
+ for (int i = 0; i < 20; ++i) {
+ frame_.CopyFrom(tmp_frame);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+ }
+ output_power = compute_power(frame_);
+ EXPECT_NEAR(output_power, expected_output_power3, kEpsilon);
+ config = apm_->GetConfig();
+ EXPECT_EQ(config.capture_level_adjustment.pre_gain_factor, 0.5f);
+ EXPECT_EQ(config.capture_level_adjustment.post_gain_factor, 3.f);
+}
+
+TEST_F(ApmTest, GainControl) {
+ AudioProcessing::Config config = apm_->GetConfig();
+ config.gain_controller1.enabled = false;
+ apm_->ApplyConfig(config);
+ config.gain_controller1.enabled = true;
+ apm_->ApplyConfig(config);
+
+ // Testing gain modes
+ for (auto mode :
+ {AudioProcessing::Config::GainController1::kAdaptiveDigital,
+ AudioProcessing::Config::GainController1::kFixedDigital,
+ AudioProcessing::Config::GainController1::kAdaptiveAnalog}) {
+ config.gain_controller1.mode = mode;
+ apm_->ApplyConfig(config);
+ apm_->set_stream_analog_level(100);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+ }
+
+ // Testing target levels
+ for (int target_level_dbfs : {0, 15, 31}) {
+ config.gain_controller1.target_level_dbfs = target_level_dbfs;
+ apm_->ApplyConfig(config);
+ apm_->set_stream_analog_level(100);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+ }
+
+ // Testing compression gains
+ for (int compression_gain_db : {0, 10, 90}) {
+ config.gain_controller1.compression_gain_db = compression_gain_db;
+ apm_->ApplyConfig(config);
+ apm_->set_stream_analog_level(100);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+ }
+
+ // Testing limiter off/on
+ for (bool enable : {false, true}) {
+ config.gain_controller1.enable_limiter = enable;
+ apm_->ApplyConfig(config);
+ apm_->set_stream_analog_level(100);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+ }
+
+ // Testing level limits.
+ constexpr int kMinLevel = 0;
+ constexpr int kMaxLevel = 255;
+ apm_->set_stream_analog_level(kMinLevel);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+ apm_->set_stream_analog_level((kMinLevel + kMaxLevel) / 2);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+ apm_->set_stream_analog_level(kMaxLevel);
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+using ApmDeathTest = ApmTest;
+
+TEST_F(ApmDeathTest, GainControlDiesOnTooLowTargetLevelDbfs) {
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.target_level_dbfs = -1;
+ EXPECT_DEATH(apm_->ApplyConfig(config), "");
+}
+
+TEST_F(ApmDeathTest, GainControlDiesOnTooHighTargetLevelDbfs) {
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.target_level_dbfs = 32;
+ EXPECT_DEATH(apm_->ApplyConfig(config), "");
+}
+
+TEST_F(ApmDeathTest, GainControlDiesOnTooLowCompressionGainDb) {
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.compression_gain_db = -1;
+ EXPECT_DEATH(apm_->ApplyConfig(config), "");
+}
+
+TEST_F(ApmDeathTest, GainControlDiesOnTooHighCompressionGainDb) {
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.compression_gain_db = 91;
+ EXPECT_DEATH(apm_->ApplyConfig(config), "");
+}
+
+TEST_F(ApmDeathTest, ApmDiesOnTooLowAnalogLevel) {
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ apm_->ApplyConfig(config);
+ EXPECT_DEATH(apm_->set_stream_analog_level(-1), "");
+}
+
+TEST_F(ApmDeathTest, ApmDiesOnTooHighAnalogLevel) {
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ apm_->ApplyConfig(config);
+ EXPECT_DEATH(apm_->set_stream_analog_level(256), "");
+}
+#endif
+
+void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
+ Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+ apm_->ApplyConfig(config);
+
+ int out_analog_level = 0;
+ for (int i = 0; i < 2000; ++i) {
+ ReadFrameWithRewind(near_file_, &frame_);
+ // Ensure the audio is at a low level, so the AGC will try to increase it.
+ ScaleFrame(&frame_, 0.25);
+
+ // Always pass in the same volume.
+ apm_->set_stream_analog_level(100);
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ out_analog_level = apm_->recommended_stream_analog_level();
+ }
+
+ // Ensure the AGC is still able to reach the maximum.
+ EXPECT_EQ(255, out_analog_level);
+}
+
+// Verifies that despite volume slider quantization, the AGC can continue to
+// increase its volume.
+TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
+ for (size_t sample_rate_hz : kProcessSampleRates) {
+ SCOPED_TRACE(::testing::Message() << "sample_rate_hz=" << sample_rate_hz);
+ RunQuantizedVolumeDoesNotGetStuckTest(sample_rate_hz);
+ }
+}
+
+void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
+ Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
+ auto config = apm_->GetConfig();
+ config.gain_controller1.enabled = true;
+ config.gain_controller1.mode =
+ AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+ apm_->ApplyConfig(config);
+
+ int out_analog_level = 100;
+ for (int i = 0; i < 1000; ++i) {
+ ReadFrameWithRewind(near_file_, &frame_);
+ // Ensure the audio is at a low level, so the AGC will try to increase it.
+ ScaleFrame(&frame_, 0.25);
+
+ apm_->set_stream_analog_level(out_analog_level);
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ out_analog_level = apm_->recommended_stream_analog_level();
+ }
+
+ // Ensure the volume was raised.
+ EXPECT_GT(out_analog_level, 100);
+ int highest_level_reached = out_analog_level;
+ // Simulate a user manual volume change.
+ out_analog_level = 100;
+
+ for (int i = 0; i < 300; ++i) {
+ ReadFrameWithRewind(near_file_, &frame_);
+ ScaleFrame(&frame_, 0.25);
+
+ apm_->set_stream_analog_level(out_analog_level);
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ out_analog_level = apm_->recommended_stream_analog_level();
+ // Check that AGC respected the manually adjusted volume.
+ EXPECT_LT(out_analog_level, highest_level_reached);
+ }
+ // Check that the volume was still raised.
+ EXPECT_GT(out_analog_level, 100);
+}
+
+TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
+ for (size_t sample_rate_hz : kProcessSampleRates) {
+ SCOPED_TRACE(::testing::Message() << "sample_rate_hz=" << sample_rate_hz);
+ RunManualVolumeChangeIsPossibleTest(sample_rate_hz);
+ }
+}
+
+TEST_F(ApmTest, HighPassFilter) {
+ // Turn HP filter on/off
+ AudioProcessing::Config apm_config;
+ apm_config.high_pass_filter.enabled = true;
+ apm_->ApplyConfig(apm_config);
+ apm_config.high_pass_filter.enabled = false;
+ apm_->ApplyConfig(apm_config);
+}
+
+TEST_F(ApmTest, AllProcessingDisabledByDefault) {
+ AudioProcessing::Config config = apm_->GetConfig();
+ EXPECT_FALSE(config.echo_canceller.enabled);
+ EXPECT_FALSE(config.high_pass_filter.enabled);
+ EXPECT_FALSE(config.gain_controller1.enabled);
+ EXPECT_FALSE(config.noise_suppression.enabled);
+}
+
+TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledInt) {
+ // Test that ProcessStream simply copies input to output when all components
+ // are disabled.
+ // Runs over all processing rates, and some particularly common or special
+ // rates.
+ // - 8000 Hz: lowest sample rate seen in Chrome metrics,
+ // - 22050 Hz: APM input/output frames are not exactly 10 ms,
+ // - 44100 Hz: very common desktop sample rate.
+ constexpr int kSampleRatesHz[] = {8000, 16000, 22050, 32000, 44100, 48000};
+ for (size_t sample_rate_hz : kSampleRatesHz) {
+ SCOPED_TRACE(::testing::Message() << "sample_rate_hz=" << sample_rate_hz);
+ Init(sample_rate_hz, sample_rate_hz, sample_rate_hz, 2, 2, 2, false);
+ SetFrameTo(&frame_, 1000, 2000);
+ Int16FrameData frame_copy;
+ frame_copy.CopyFrom(frame_);
+ for (int j = 0; j < 1000; j++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessReverseStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+ }
+ }
+}
+
+TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
+ // Test that ProcessStream simply copies input to output when all components
+ // are disabled.
+ const size_t kSamples = 160;
+ const int sample_rate = 16000;
+ const float src[kSamples] = {-1.0f, 0.0f, 1.0f};
+ float dest[kSamples] = {};
+
+ auto src_channels = &src[0];
+ auto dest_channels = &dest[0];
+
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ EXPECT_NOERR(apm_->ProcessStream(&src_channels, StreamConfig(sample_rate, 1),
+ StreamConfig(sample_rate, 1),
+ &dest_channels));
+
+ for (size_t i = 0; i < kSamples; ++i) {
+ EXPECT_EQ(src[i], dest[i]);
+ }
+
+ // Same for ProcessReverseStream.
+ float rev_dest[kSamples] = {};
+ auto rev_dest_channels = &rev_dest[0];
+
+ StreamConfig input_stream = {sample_rate, 1};
+ StreamConfig output_stream = {sample_rate, 1};
+ EXPECT_NOERR(apm_->ProcessReverseStream(&src_channels, input_stream,
+ output_stream, &rev_dest_channels));
+
+ for (size_t i = 0; i < kSamples; ++i) {
+ EXPECT_EQ(src[i], rev_dest[i]);
+ }
+}
+
+TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
+ EnableAllComponents();
+
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
+ Init(kProcessSampleRates[i], kProcessSampleRates[i], kProcessSampleRates[i],
+ 2, 2, 2, false);
+ int analog_level = 127;
+ ASSERT_EQ(0, feof(far_file_));
+ ASSERT_EQ(0, feof(near_file_));
+ while (ReadFrame(far_file_, &revframe_) && ReadFrame(near_file_, &frame_)) {
+ CopyLeftToRightChannel(revframe_.data.data(),
+ revframe_.samples_per_channel);
+
+ ASSERT_EQ(
+ kNoErr,
+ apm_->ProcessReverseStream(
+ revframe_.data.data(),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ revframe_.data.data()));
+
+ CopyLeftToRightChannel(frame_.data.data(), frame_.samples_per_channel);
+
+ ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0));
+ apm_->set_stream_analog_level(analog_level);
+ ASSERT_EQ(kNoErr,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ analog_level = apm_->recommended_stream_analog_level();
+
+ VerifyChannelsAreEqual(frame_.data.data(), frame_.samples_per_channel);
+ }
+ rewind(far_file_);
+ rewind(near_file_);
+ }
+}
+
+TEST_F(ApmTest, SplittingFilter) {
+ // Verify the filter is not active through undistorted audio when:
+ // 1. No components are enabled...
+ SetFrameTo(&frame_, 1000);
+ Int16FrameData frame_copy;
+ frame_copy.CopyFrom(frame_);
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+
+ // 2. Only the level estimator is enabled...
+ auto apm_config = apm_->GetConfig();
+ SetFrameTo(&frame_, 1000);
+ frame_copy.CopyFrom(frame_);
+ apm_->ApplyConfig(apm_config);
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+ apm_->ApplyConfig(apm_config);
+
+ // Check the test is valid. We should have distortion from the filter
+ // when AEC is enabled (which won't affect the audio).
+ apm_config.echo_canceller.enabled = true;
+ apm_config.echo_canceller.mobile_mode = false;
+ apm_->ApplyConfig(apm_config);
+ frame_.samples_per_channel = 320;
+ frame_.num_channels = 2;
+ frame_.sample_rate_hz = 32000;
+ SetFrameTo(&frame_, 1000);
+ frame_copy.CopyFrom(frame_);
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_FALSE(FrameDataAreEqual(frame_, frame_copy));
+}
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+void ApmTest::ProcessDebugDump(absl::string_view in_filename,
+ absl::string_view out_filename,
+ Format format,
+ int max_size_bytes) {
+ TaskQueueForTest worker_queue("ApmTest_worker_queue");
+ FILE* in_file = fopen(std::string(in_filename).c_str(), "rb");
+ ASSERT_TRUE(in_file != NULL);
+ audioproc::Event event_msg;
+ bool first_init = true;
+
+ while (ReadMessageFromFile(in_file, &event_msg)) {
+ if (event_msg.type() == audioproc::Event::INIT) {
+ const audioproc::Init msg = event_msg.init();
+ int reverse_sample_rate = msg.sample_rate();
+ if (msg.has_reverse_sample_rate()) {
+ reverse_sample_rate = msg.reverse_sample_rate();
+ }
+ int output_sample_rate = msg.sample_rate();
+ if (msg.has_output_sample_rate()) {
+ output_sample_rate = msg.output_sample_rate();
+ }
+
+ Init(msg.sample_rate(), output_sample_rate, reverse_sample_rate,
+ msg.num_input_channels(), msg.num_output_channels(),
+ msg.num_reverse_channels(), false);
+ if (first_init) {
+ // AttachAecDump() writes an additional init message. Don't start
+ // recording until after the first init to avoid the extra message.
+ auto aec_dump =
+ AecDumpFactory::Create(out_filename, max_size_bytes, &worker_queue);
+ EXPECT_TRUE(aec_dump);
+ apm_->AttachAecDump(std::move(aec_dump));
+ first_init = false;
+ }
+
+ } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) {
+ const audioproc::ReverseStream msg = event_msg.reverse_stream();
+
+ if (msg.channel_size() > 0) {
+ ASSERT_EQ(revframe_.num_channels,
+ static_cast<size_t>(msg.channel_size()));
+ for (int i = 0; i < msg.channel_size(); ++i) {
+ memcpy(revfloat_cb_->channels()[i], msg.channel(i).data(),
+ msg.channel(i).size());
+ }
+ } else {
+ memcpy(revframe_.data.data(), msg.data().data(), msg.data().size());
+ if (format == kFloatFormat) {
+ // We're using an int16 input file; convert to float.
+ ConvertToFloat(revframe_, revfloat_cb_.get());
+ }
+ }
+ AnalyzeReverseStreamChooser(format);
+
+ } else if (event_msg.type() == audioproc::Event::STREAM) {
+ const audioproc::Stream msg = event_msg.stream();
+ // ProcessStream could have changed this for the output frame.
+ frame_.num_channels = apm_->num_input_channels();
+
+ apm_->set_stream_analog_level(msg.level());
+ EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
+ if (msg.has_keypress()) {
+ apm_->set_stream_key_pressed(msg.keypress());
+ } else {
+ apm_->set_stream_key_pressed(true);
+ }
+
+ if (msg.input_channel_size() > 0) {
+ ASSERT_EQ(frame_.num_channels,
+ static_cast<size_t>(msg.input_channel_size()));
+ for (int i = 0; i < msg.input_channel_size(); ++i) {
+ memcpy(float_cb_->channels()[i], msg.input_channel(i).data(),
+ msg.input_channel(i).size());
+ }
+ } else {
+ memcpy(frame_.data.data(), msg.input_data().data(),
+ msg.input_data().size());
+ if (format == kFloatFormat) {
+ // We're using an int16 input file; convert to float.
+ ConvertToFloat(frame_, float_cb_.get());
+ }
+ }
+ ProcessStreamChooser(format);
+ }
+ }
+ apm_->DetachAecDump();
+ fclose(in_file);
+}
+
+void ApmTest::VerifyDebugDumpTest(Format format) {
+ rtc::ScopedFakeClock fake_clock;
+ const std::string in_filename = test::ResourcePath("ref03", "aecdump");
+ std::string format_string;
+ switch (format) {
+ case kIntFormat:
+ format_string = "_int";
+ break;
+ case kFloatFormat:
+ format_string = "_float";
+ break;
+ }
+ const std::string ref_filename = test::TempFilename(
+ test::OutputPath(), std::string("ref") + format_string + "_aecdump");
+ const std::string out_filename = test::TempFilename(
+ test::OutputPath(), std::string("out") + format_string + "_aecdump");
+ const std::string limited_filename = test::TempFilename(
+ test::OutputPath(), std::string("limited") + format_string + "_aecdump");
+ const size_t logging_limit_bytes = 100000;
+ // We expect at least this many bytes in the created logfile.
+ const size_t logging_expected_bytes = 95000;
+ EnableAllComponents();
+ ProcessDebugDump(in_filename, ref_filename, format, -1);
+ ProcessDebugDump(ref_filename, out_filename, format, -1);
+ ProcessDebugDump(ref_filename, limited_filename, format, logging_limit_bytes);
+
+ FILE* ref_file = fopen(ref_filename.c_str(), "rb");
+ FILE* out_file = fopen(out_filename.c_str(), "rb");
+ FILE* limited_file = fopen(limited_filename.c_str(), "rb");
+ ASSERT_TRUE(ref_file != NULL);
+ ASSERT_TRUE(out_file != NULL);
+ ASSERT_TRUE(limited_file != NULL);
+ std::unique_ptr<uint8_t[]> ref_bytes;
+ std::unique_ptr<uint8_t[]> out_bytes;
+ std::unique_ptr<uint8_t[]> limited_bytes;
+
+ size_t ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
+ size_t out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
+ size_t limited_size = ReadMessageBytesFromFile(limited_file, &limited_bytes);
+ size_t bytes_read = 0;
+ size_t bytes_read_limited = 0;
+ while (ref_size > 0 && out_size > 0) {
+ bytes_read += ref_size;
+ bytes_read_limited += limited_size;
+ EXPECT_EQ(ref_size, out_size);
+ EXPECT_GE(ref_size, limited_size);
+ EXPECT_TRUE(ExpectMessageEq(/*actual=*/{out_bytes.get(), out_size},
+ /*expected=*/{ref_bytes.get(), ref_size}));
+ if (limited_size > 0) {
+ EXPECT_TRUE(
+ ExpectMessageEq(/*actual=*/{limited_bytes.get(), limited_size},
+ /*expected=*/{ref_bytes.get(), ref_size}));
+ }
+ ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
+ out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
+ limited_size = ReadMessageBytesFromFile(limited_file, &limited_bytes);
+ }
+ EXPECT_GT(bytes_read, 0u);
+ EXPECT_GT(bytes_read_limited, logging_expected_bytes);
+ EXPECT_LE(bytes_read_limited, logging_limit_bytes);
+ EXPECT_NE(0, feof(ref_file));
+ EXPECT_NE(0, feof(out_file));
+ EXPECT_NE(0, feof(limited_file));
+ ASSERT_EQ(0, fclose(ref_file));
+ ASSERT_EQ(0, fclose(out_file));
+ ASSERT_EQ(0, fclose(limited_file));
+ remove(ref_filename.c_str());
+ remove(out_filename.c_str());
+ remove(limited_filename.c_str());
+}
+
+TEST_F(ApmTest, VerifyDebugDumpInt) {
+ VerifyDebugDumpTest(kIntFormat);
+}
+
+TEST_F(ApmTest, VerifyDebugDumpFloat) {
+ VerifyDebugDumpTest(kFloatFormat);
+}
+#endif
+
+// TODO(andrew): expand test to verify output.
+TEST_F(ApmTest, DebugDump) {
+ TaskQueueForTest worker_queue("ApmTest_worker_queue");
+ const std::string filename =
+ test::TempFilename(test::OutputPath(), "debug_aec");
+ {
+ auto aec_dump = AecDumpFactory::Create("", -1, &worker_queue);
+ EXPECT_FALSE(aec_dump);
+ }
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ // Stopping without having started should be OK.
+ apm_->DetachAecDump();
+
+ auto aec_dump = AecDumpFactory::Create(filename, -1, &worker_queue);
+ EXPECT_TRUE(aec_dump);
+ apm_->AttachAecDump(std::move(aec_dump));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessReverseStream(
+ revframe_.data.data(),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ revframe_.data.data()));
+ apm_->DetachAecDump();
+
+ // Verify the file has been written.
+ FILE* fid = fopen(filename.c_str(), "r");
+ ASSERT_TRUE(fid != NULL);
+
+ // Clean it up.
+ ASSERT_EQ(0, fclose(fid));
+ ASSERT_EQ(0, remove(filename.c_str()));
+#else
+ // Verify the file has NOT been written.
+ ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
+#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
+}
+
+// TODO(andrew): expand test to verify output.
+TEST_F(ApmTest, DebugDumpFromFileHandle) {
+ TaskQueueForTest worker_queue("ApmTest_worker_queue");
+
+ const std::string filename =
+ test::TempFilename(test::OutputPath(), "debug_aec");
+ FileWrapper f = FileWrapper::OpenWriteOnly(filename);
+ ASSERT_TRUE(f.is_open());
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ // Stopping without having started should be OK.
+ apm_->DetachAecDump();
+
+ auto aec_dump = AecDumpFactory::Create(std::move(f), -1, &worker_queue);
+ EXPECT_TRUE(aec_dump);
+ apm_->AttachAecDump(std::move(aec_dump));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessReverseStream(
+ revframe_.data.data(),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ revframe_.data.data()));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+ apm_->DetachAecDump();
+
+ // Verify the file has been written.
+ FILE* fid = fopen(filename.c_str(), "r");
+ ASSERT_TRUE(fid != NULL);
+
+ // Clean it up.
+ ASSERT_EQ(0, fclose(fid));
+ ASSERT_EQ(0, remove(filename.c_str()));
+#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
+}
+
+// TODO(andrew): Add a test to process a few frames with different combinations
+// of enabled components.
+
+TEST_F(ApmTest, Process) {
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+ audioproc::OutputData ref_data;
+
+ if (!absl::GetFlag(FLAGS_write_apm_ref_data)) {
+ OpenFileAndReadMessage(ref_filename_, &ref_data);
+ } else {
+ const int kChannels[] = {1, 2};
+ // Write the desired tests to the protobuf reference file.
+ for (size_t i = 0; i < arraysize(kChannels); i++) {
+ for (size_t j = 0; j < arraysize(kChannels); j++) {
+ for (int sample_rate_hz : AudioProcessing::kNativeSampleRatesHz) {
+ audioproc::Test* test = ref_data.add_test();
+ test->set_num_reverse_channels(kChannels[i]);
+ test->set_num_input_channels(kChannels[j]);
+ test->set_num_output_channels(kChannels[j]);
+ test->set_sample_rate(sample_rate_hz);
+ test->set_use_aec_extended_filter(false);
+ }
+ }
+ }
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+ // To test the extended filter mode.
+ audioproc::Test* test = ref_data.add_test();
+ test->set_num_reverse_channels(2);
+ test->set_num_input_channels(2);
+ test->set_num_output_channels(2);
+ test->set_sample_rate(AudioProcessing::kSampleRate32kHz);
+ test->set_use_aec_extended_filter(true);
+#endif
+ }
+
+ for (int i = 0; i < ref_data.test_size(); i++) {
+ printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
+
+ audioproc::Test* test = ref_data.mutable_test(i);
+ // TODO(ajm): We no longer allow different input and output channels. Skip
+ // these tests for now, but they should be removed from the set.
+ if (test->num_input_channels() != test->num_output_channels())
+ continue;
+
+ apm_ = AudioProcessingBuilderForTesting()
+ .SetEchoDetector(CreateEchoDetector())
+ .Create();
+ AudioProcessing::Config apm_config = apm_->GetConfig();
+ apm_config.gain_controller1.analog_gain_controller.enabled = false;
+ apm_->ApplyConfig(apm_config);
+
+ EnableAllComponents();
+
+ Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
+ static_cast<size_t>(test->num_input_channels()),
+ static_cast<size_t>(test->num_output_channels()),
+ static_cast<size_t>(test->num_reverse_channels()), true);
+
+ int frame_count = 0;
+ int analog_level = 127;
+ int analog_level_average = 0;
+ int max_output_average = 0;
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+ int stats_index = 0;
+#endif
+
+ while (ReadFrame(far_file_, &revframe_) && ReadFrame(near_file_, &frame_)) {
+ EXPECT_EQ(
+ apm_->kNoError,
+ apm_->ProcessReverseStream(
+ revframe_.data.data(),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+ revframe_.data.data()));
+
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ apm_->set_stream_analog_level(analog_level);
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(
+ frame_.data.data(),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+ frame_.data.data()));
+
+ // Ensure the frame was downmixed properly.
+ EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
+ frame_.num_channels);
+
+ max_output_average += MaxAudioFrame(frame_);
+
+ analog_level = apm_->recommended_stream_analog_level();
+ analog_level_average += analog_level;
+ AudioProcessingStats stats = apm_->GetStatistics();
+
+ size_t frame_size = frame_.samples_per_channel * frame_.num_channels;
+ size_t write_count =
+ fwrite(frame_.data.data(), sizeof(int16_t), frame_size, out_file_);
+ ASSERT_EQ(frame_size, write_count);
+
+ // Reset in case of downmixing.
+ frame_.num_channels = static_cast<size_t>(test->num_input_channels());
+ frame_count++;
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+ const int kStatsAggregationFrameNum = 100; // 1 second.
+ if (frame_count % kStatsAggregationFrameNum == 0) {
+ // Get echo and delay metrics.
+ AudioProcessingStats stats2 = apm_->GetStatistics();
+
+ // Echo metrics.
+ const float echo_return_loss = stats2.echo_return_loss.value_or(-1.0f);
+ const float echo_return_loss_enhancement =
+ stats2.echo_return_loss_enhancement.value_or(-1.0f);
+ const float residual_echo_likelihood =
+ stats2.residual_echo_likelihood.value_or(-1.0f);
+ const float residual_echo_likelihood_recent_max =
+ stats2.residual_echo_likelihood_recent_max.value_or(-1.0f);
+
+ if (!absl::GetFlag(FLAGS_write_apm_ref_data)) {
+ const audioproc::Test::EchoMetrics& reference =
+ test->echo_metrics(stats_index);
+ constexpr float kEpsilon = 0.01;
+ EXPECT_NEAR(echo_return_loss, reference.echo_return_loss(), kEpsilon);
+ EXPECT_NEAR(echo_return_loss_enhancement,
+ reference.echo_return_loss_enhancement(), kEpsilon);
+ EXPECT_NEAR(residual_echo_likelihood,
+ reference.residual_echo_likelihood(), kEpsilon);
+ EXPECT_NEAR(residual_echo_likelihood_recent_max,
+ reference.residual_echo_likelihood_recent_max(),
+ kEpsilon);
+ ++stats_index;
+ } else {
+ audioproc::Test::EchoMetrics* message_echo = test->add_echo_metrics();
+ message_echo->set_echo_return_loss(echo_return_loss);
+ message_echo->set_echo_return_loss_enhancement(
+ echo_return_loss_enhancement);
+ message_echo->set_residual_echo_likelihood(residual_echo_likelihood);
+ message_echo->set_residual_echo_likelihood_recent_max(
+ residual_echo_likelihood_recent_max);
+ }
+ }
+#endif // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE).
+ }
+ max_output_average /= frame_count;
+ analog_level_average /= frame_count;
+
+ if (!absl::GetFlag(FLAGS_write_apm_ref_data)) {
+ const int kIntNear = 1;
+ // All numbers being consistently higher on N7 compare to the reference
+ // data.
+ // TODO(bjornv): If we start getting more of these offsets on Android we
+ // should consider a different approach. Either using one slack for all,
+ // or generate a separate android reference.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+ const int kMaxOutputAverageOffset = 9;
+ const int kMaxOutputAverageNear = 26;
+#else
+ const int kMaxOutputAverageOffset = 0;
+ const int kMaxOutputAverageNear = kIntNear;
+#endif
+ EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear);
+ EXPECT_NEAR(test->max_output_average(),
+ max_output_average - kMaxOutputAverageOffset,
+ kMaxOutputAverageNear);
+ } else {
+ test->set_analog_level_average(analog_level_average);
+ test->set_max_output_average(max_output_average);
+ }
+
+ rewind(far_file_);
+ rewind(near_file_);
+ }
+
+ if (absl::GetFlag(FLAGS_write_apm_ref_data)) {
+ OpenFileAndWriteMessage(ref_filename_, ref_data);
+ }
+}
+
+// Compares the reference and test arrays over a region around the expected
+// delay. Finds the highest SNR in that region and adds the variance and squared
+// error results to the supplied accumulators.
+void UpdateBestSNR(const float* ref,
+ const float* test,
+ size_t length,
+ int expected_delay,
+ double* variance_acc,
+ double* sq_error_acc) {
+ RTC_CHECK_LT(expected_delay, length)
+ << "delay greater than signal length, cannot compute SNR";
+ double best_snr = std::numeric_limits<double>::min();
+ double best_variance = 0;
+ double best_sq_error = 0;
+ // Search over a region of nine samples around the expected delay.
+ for (int delay = std::max(expected_delay - 4, 0); delay <= expected_delay + 4;
+ ++delay) {
+ double sq_error = 0;
+ double variance = 0;
+ for (size_t i = 0; i < length - delay; ++i) {
+ double error = test[i + delay] - ref[i];
+ sq_error += error * error;
+ variance += ref[i] * ref[i];
+ }
+
+ if (sq_error == 0) {
+ *variance_acc += variance;
+ return;
+ }
+ double snr = variance / sq_error;
+ if (snr > best_snr) {
+ best_snr = snr;
+ best_variance = variance;
+ best_sq_error = sq_error;
+ }
+ }
+
+ *variance_acc += best_variance;
+ *sq_error_acc += best_sq_error;
+}
+
+// Used to test a multitude of sample rate and channel combinations. It works
+// by first producing a set of reference files (in SetUpTestCase) that are
+// assumed to be correct, as the used parameters are verified by other tests
+// in this collection. Primarily the reference files are all produced at
+// "native" rates which do not involve any resampling.
+
+// Each test pass produces an output file with a particular format. The output
+// is matched against the reference file closest to its internal processing
+// format. If necessary the output is resampled back to its process format.
+// Due to the resampling distortion, we don't expect identical results, but
+// enforce SNR thresholds which vary depending on the format. 0 is a special
+// case SNR which corresponds to inf, or zero error.
+typedef std::tuple<int, int, int, int, double, double> AudioProcessingTestData;
+class AudioProcessingTest
+ : public ::testing::TestWithParam<AudioProcessingTestData> {
+ public:
+ AudioProcessingTest()
+ : input_rate_(std::get<0>(GetParam())),
+ output_rate_(std::get<1>(GetParam())),
+ reverse_input_rate_(std::get<2>(GetParam())),
+ reverse_output_rate_(std::get<3>(GetParam())),
+ expected_snr_(std::get<4>(GetParam())),
+ expected_reverse_snr_(std::get<5>(GetParam())) {}
+
+ virtual ~AudioProcessingTest() {}
+
+ static void SetUpTestSuite() {
+ // Create all needed output reference files.
+ const size_t kNumChannels[] = {1, 2};
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); ++i) {
+ for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
+ for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
+ // The reference files always have matching input and output channels.
+ ProcessFormat(kProcessSampleRates[i], kProcessSampleRates[i],
+ kProcessSampleRates[i], kProcessSampleRates[i],
+ kNumChannels[j], kNumChannels[j], kNumChannels[k],
+ kNumChannels[k], "ref");
+ }
+ }
+ }
+ }
+
+ void TearDown() {
+ // Remove "out" files after each test.
+ ClearTempOutFiles();
+ }
+
+ static void TearDownTestSuite() { ClearTempFiles(); }
+
+ // Runs a process pass on files with the given parameters and dumps the output
+ // to a file specified with `output_file_prefix`. Both forward and reverse
+ // output streams are dumped.
+ static void ProcessFormat(int input_rate,
+ int output_rate,
+ int reverse_input_rate,
+ int reverse_output_rate,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_input_channels,
+ size_t num_reverse_output_channels,
+ absl::string_view output_file_prefix) {
+ AudioProcessing::Config apm_config;
+ apm_config.gain_controller1.analog_gain_controller.enabled = false;
+ rtc::scoped_refptr<AudioProcessing> ap =
+ AudioProcessingBuilderForTesting().SetConfig(apm_config).Create();
+
+ EnableAllAPComponents(ap.get());
+
+ ProcessingConfig processing_config = {
+ {{input_rate, num_input_channels},
+ {output_rate, num_output_channels},
+ {reverse_input_rate, num_reverse_input_channels},
+ {reverse_output_rate, num_reverse_output_channels}}};
+ ap->Initialize(processing_config);
+
+ FILE* far_file =
+ fopen(ResourceFilePath("far", reverse_input_rate).c_str(), "rb");
+ FILE* near_file = fopen(ResourceFilePath("near", input_rate).c_str(), "rb");
+ FILE* out_file = fopen(
+ OutputFilePath(
+ output_file_prefix, input_rate, output_rate, reverse_input_rate,
+ reverse_output_rate, num_input_channels, num_output_channels,
+ num_reverse_input_channels, num_reverse_output_channels, kForward)
+ .c_str(),
+ "wb");
+ FILE* rev_out_file = fopen(
+ OutputFilePath(
+ output_file_prefix, input_rate, output_rate, reverse_input_rate,
+ reverse_output_rate, num_input_channels, num_output_channels,
+ num_reverse_input_channels, num_reverse_output_channels, kReverse)
+ .c_str(),
+ "wb");
+ ASSERT_TRUE(far_file != NULL);
+ ASSERT_TRUE(near_file != NULL);
+ ASSERT_TRUE(out_file != NULL);
+ ASSERT_TRUE(rev_out_file != NULL);
+
+ ChannelBuffer<float> fwd_cb(AudioProcessing::GetFrameSize(input_rate),
+ num_input_channels);
+ ChannelBuffer<float> rev_cb(
+ AudioProcessing::GetFrameSize(reverse_input_rate),
+ num_reverse_input_channels);
+ ChannelBuffer<float> out_cb(AudioProcessing::GetFrameSize(output_rate),
+ num_output_channels);
+ ChannelBuffer<float> rev_out_cb(
+ AudioProcessing::GetFrameSize(reverse_output_rate),
+ num_reverse_output_channels);
+
+ // Temporary buffers.
+ const int max_length =
+ 2 * std::max(std::max(out_cb.num_frames(), rev_out_cb.num_frames()),
+ std::max(fwd_cb.num_frames(), rev_cb.num_frames()));
+ std::unique_ptr<float[]> float_data(new float[max_length]);
+ std::unique_ptr<int16_t[]> int_data(new int16_t[max_length]);
+
+ int analog_level = 127;
+ while (ReadChunk(far_file, int_data.get(), float_data.get(), &rev_cb) &&
+ ReadChunk(near_file, int_data.get(), float_data.get(), &fwd_cb)) {
+ EXPECT_NOERR(ap->ProcessReverseStream(
+ rev_cb.channels(), processing_config.reverse_input_stream(),
+ processing_config.reverse_output_stream(), rev_out_cb.channels()));
+
+ EXPECT_NOERR(ap->set_stream_delay_ms(0));
+ ap->set_stream_analog_level(analog_level);
+
+ EXPECT_NOERR(ap->ProcessStream(
+ fwd_cb.channels(), StreamConfig(input_rate, num_input_channels),
+ StreamConfig(output_rate, num_output_channels), out_cb.channels()));
+
+ // Dump forward output to file.
+ Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(),
+ float_data.get());
+ size_t out_length = out_cb.num_channels() * out_cb.num_frames();
+
+ ASSERT_EQ(out_length, fwrite(float_data.get(), sizeof(float_data[0]),
+ out_length, out_file));
+
+ // Dump reverse output to file.
+ Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(),
+ rev_out_cb.num_channels(), float_data.get());
+ size_t rev_out_length =
+ rev_out_cb.num_channels() * rev_out_cb.num_frames();
+
+ ASSERT_EQ(rev_out_length, fwrite(float_data.get(), sizeof(float_data[0]),
+ rev_out_length, rev_out_file));
+
+ analog_level = ap->recommended_stream_analog_level();
+ }
+ fclose(far_file);
+ fclose(near_file);
+ fclose(out_file);
+ fclose(rev_out_file);
+ }
+
+ protected:
+ int input_rate_;
+ int output_rate_;
+ int reverse_input_rate_;
+ int reverse_output_rate_;
+ double expected_snr_;
+ double expected_reverse_snr_;
+};
+
+TEST_P(AudioProcessingTest, Formats) {
+ struct ChannelFormat {
+ int num_input;
+ int num_output;
+ int num_reverse_input;
+ int num_reverse_output;
+ };
+ ChannelFormat cf[] = {
+ {1, 1, 1, 1}, {1, 1, 2, 1}, {2, 1, 1, 1},
+ {2, 1, 2, 1}, {2, 2, 1, 1}, {2, 2, 2, 2},
+ };
+
+ for (size_t i = 0; i < arraysize(cf); ++i) {
+ ProcessFormat(input_rate_, output_rate_, reverse_input_rate_,
+ reverse_output_rate_, cf[i].num_input, cf[i].num_output,
+ cf[i].num_reverse_input, cf[i].num_reverse_output, "out");
+
+ // Verify output for both directions.
+ std::vector<StreamDirection> stream_directions;
+ stream_directions.push_back(kForward);
+ stream_directions.push_back(kReverse);
+ for (StreamDirection file_direction : stream_directions) {
+ const int in_rate = file_direction ? reverse_input_rate_ : input_rate_;
+ const int out_rate = file_direction ? reverse_output_rate_ : output_rate_;
+ const int out_num =
+ file_direction ? cf[i].num_reverse_output : cf[i].num_output;
+ const double expected_snr =
+ file_direction ? expected_reverse_snr_ : expected_snr_;
+
+ const int min_ref_rate = std::min(in_rate, out_rate);
+ int ref_rate;
+ if (min_ref_rate > 32000) {
+ ref_rate = 48000;
+ } else if (min_ref_rate > 16000) {
+ ref_rate = 32000;
+ } else {
+ ref_rate = 16000;
+ }
+
+ FILE* out_file = fopen(
+ OutputFilePath("out", input_rate_, output_rate_, reverse_input_rate_,
+ reverse_output_rate_, cf[i].num_input,
+ cf[i].num_output, cf[i].num_reverse_input,
+ cf[i].num_reverse_output, file_direction)
+ .c_str(),
+ "rb");
+ // The reference files always have matching input and output channels.
+ FILE* ref_file =
+ fopen(OutputFilePath("ref", ref_rate, ref_rate, ref_rate, ref_rate,
+ cf[i].num_output, cf[i].num_output,
+ cf[i].num_reverse_output,
+ cf[i].num_reverse_output, file_direction)
+ .c_str(),
+ "rb");
+ ASSERT_TRUE(out_file != NULL);
+ ASSERT_TRUE(ref_file != NULL);
+
+ const size_t ref_length =
+ AudioProcessing::GetFrameSize(ref_rate) * out_num;
+ const size_t out_length =
+ AudioProcessing::GetFrameSize(out_rate) * out_num;
+ // Data from the reference file.
+ std::unique_ptr<float[]> ref_data(new float[ref_length]);
+ // Data from the output file.
+ std::unique_ptr<float[]> out_data(new float[out_length]);
+ // Data from the resampled output, in case the reference and output rates
+ // don't match.
+ std::unique_ptr<float[]> cmp_data(new float[ref_length]);
+
+ PushResampler<float> resampler;
+ resampler.InitializeIfNeeded(out_rate, ref_rate, out_num);
+
+ // Compute the resampling delay of the output relative to the reference,
+ // to find the region over which we should search for the best SNR.
+ float expected_delay_sec = 0;
+ if (in_rate != ref_rate) {
+ // Input resampling delay.
+ expected_delay_sec +=
+ PushSincResampler::AlgorithmicDelaySeconds(in_rate);
+ }
+ if (out_rate != ref_rate) {
+ // Output resampling delay.
+ expected_delay_sec +=
+ PushSincResampler::AlgorithmicDelaySeconds(ref_rate);
+ // Delay of converting the output back to its processing rate for
+ // testing.
+ expected_delay_sec +=
+ PushSincResampler::AlgorithmicDelaySeconds(out_rate);
+ }
+ // The delay is multiplied by the number of channels because
+ // UpdateBestSNR() computes the SNR over interleaved data without taking
+ // channels into account.
+ int expected_delay =
+ std::floor(expected_delay_sec * ref_rate + 0.5f) * out_num;
+
+ double variance = 0;
+ double sq_error = 0;
+ while (fread(out_data.get(), sizeof(out_data[0]), out_length, out_file) &&
+ fread(ref_data.get(), sizeof(ref_data[0]), ref_length, ref_file)) {
+ float* out_ptr = out_data.get();
+ if (out_rate != ref_rate) {
+ // Resample the output back to its internal processing rate if
+ // necessary.
+ ASSERT_EQ(ref_length,
+ static_cast<size_t>(resampler.Resample(
+ out_ptr, out_length, cmp_data.get(), ref_length)));
+ out_ptr = cmp_data.get();
+ }
+
+ // Update the `sq_error` and `variance` accumulators with the highest
+ // SNR of reference vs output.
+ UpdateBestSNR(ref_data.get(), out_ptr, ref_length, expected_delay,
+ &variance, &sq_error);
+ }
+
+ std::cout << "(" << input_rate_ << ", " << output_rate_ << ", "
+ << reverse_input_rate_ << ", " << reverse_output_rate_ << ", "
+ << cf[i].num_input << ", " << cf[i].num_output << ", "
+ << cf[i].num_reverse_input << ", " << cf[i].num_reverse_output
+ << ", " << file_direction << "): ";
+ if (sq_error > 0) {
+ double snr = 10 * log10(variance / sq_error);
+ EXPECT_GE(snr, expected_snr);
+ EXPECT_NE(0, expected_snr);
+ std::cout << "SNR=" << snr << " dB" << std::endl;
+ } else {
+ std::cout << "SNR=inf dB" << std::endl;
+ }
+
+ fclose(out_file);
+ fclose(ref_file);
+ }
+ }
+}
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+INSTANTIATE_TEST_SUITE_P(
+ CommonFormats,
+ AudioProcessingTest,
+ // Internal processing rates and the particularly common sample rate 44100
+ // Hz are tested in a grid of combinations (capture in, render in, out).
+ ::testing::Values(std::make_tuple(48000, 48000, 48000, 48000, 0, 0),
+ std::make_tuple(48000, 48000, 32000, 48000, 40, 30),
+ std::make_tuple(48000, 48000, 16000, 48000, 40, 20),
+ std::make_tuple(48000, 44100, 48000, 44100, 20, 20),
+ std::make_tuple(48000, 44100, 32000, 44100, 20, 15),
+ std::make_tuple(48000, 44100, 16000, 44100, 20, 15),
+ std::make_tuple(48000, 32000, 48000, 32000, 30, 35),
+ std::make_tuple(48000, 32000, 32000, 32000, 30, 0),
+ std::make_tuple(48000, 32000, 16000, 32000, 30, 20),
+ std::make_tuple(48000, 16000, 48000, 16000, 25, 20),
+ std::make_tuple(48000, 16000, 32000, 16000, 25, 20),
+ std::make_tuple(48000, 16000, 16000, 16000, 25, 0),
+
+ std::make_tuple(44100, 48000, 48000, 48000, 30, 0),
+ std::make_tuple(44100, 48000, 32000, 48000, 30, 30),
+ std::make_tuple(44100, 48000, 16000, 48000, 30, 20),
+ std::make_tuple(44100, 44100, 48000, 44100, 20, 20),
+ std::make_tuple(44100, 44100, 32000, 44100, 20, 15),
+ std::make_tuple(44100, 44100, 16000, 44100, 20, 15),
+ std::make_tuple(44100, 32000, 48000, 32000, 30, 35),
+ std::make_tuple(44100, 32000, 32000, 32000, 30, 0),
+ std::make_tuple(44100, 32000, 16000, 32000, 30, 20),
+ std::make_tuple(44100, 16000, 48000, 16000, 25, 20),
+ std::make_tuple(44100, 16000, 32000, 16000, 25, 20),
+ std::make_tuple(44100, 16000, 16000, 16000, 25, 0),
+
+ std::make_tuple(32000, 48000, 48000, 48000, 15, 0),
+ std::make_tuple(32000, 48000, 32000, 48000, 15, 30),
+ std::make_tuple(32000, 48000, 16000, 48000, 15, 20),
+ std::make_tuple(32000, 44100, 48000, 44100, 19, 20),
+ std::make_tuple(32000, 44100, 32000, 44100, 19, 15),
+ std::make_tuple(32000, 44100, 16000, 44100, 19, 15),
+ std::make_tuple(32000, 32000, 48000, 32000, 40, 35),
+ std::make_tuple(32000, 32000, 32000, 32000, 0, 0),
+ std::make_tuple(32000, 32000, 16000, 32000, 39, 20),
+ std::make_tuple(32000, 16000, 48000, 16000, 25, 20),
+ std::make_tuple(32000, 16000, 32000, 16000, 25, 20),
+ std::make_tuple(32000, 16000, 16000, 16000, 25, 0),
+
+ std::make_tuple(16000, 48000, 48000, 48000, 9, 0),
+ std::make_tuple(16000, 48000, 32000, 48000, 9, 30),
+ std::make_tuple(16000, 48000, 16000, 48000, 9, 20),
+ std::make_tuple(16000, 44100, 48000, 44100, 15, 20),
+ std::make_tuple(16000, 44100, 32000, 44100, 15, 15),
+ std::make_tuple(16000, 44100, 16000, 44100, 15, 15),
+ std::make_tuple(16000, 32000, 48000, 32000, 25, 35),
+ std::make_tuple(16000, 32000, 32000, 32000, 25, 0),
+ std::make_tuple(16000, 32000, 16000, 32000, 25, 20),
+ std::make_tuple(16000, 16000, 48000, 16000, 39, 20),
+ std::make_tuple(16000, 16000, 32000, 16000, 39, 20),
+ std::make_tuple(16000, 16000, 16000, 16000, 0, 0),
+
+ // Other sample rates are not tested exhaustively, to keep
+ // the test runtime manageable.
+ //
+ // Testing most other sample rates logged by Chrome UMA:
+ // - WebRTC.AudioInputSampleRate
+ // - WebRTC.AudioOutputSampleRate
+ // ApmConfiguration.HandlingOfRateCombinations covers
+ // remaining sample rates.
+ std::make_tuple(192000, 192000, 48000, 192000, 20, 40),
+ std::make_tuple(176400, 176400, 48000, 176400, 20, 35),
+ std::make_tuple(96000, 96000, 48000, 96000, 20, 40),
+ std::make_tuple(88200, 88200, 48000, 88200, 20, 20),
+ std::make_tuple(44100, 44100, 48000, 44100, 20, 20)));
+
+#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+INSTANTIATE_TEST_SUITE_P(
+ CommonFormats,
+ AudioProcessingTest,
+ ::testing::Values(std::make_tuple(48000, 48000, 48000, 48000, 19, 0),
+ std::make_tuple(48000, 48000, 32000, 48000, 19, 30),
+ std::make_tuple(48000, 48000, 16000, 48000, 19, 20),
+ std::make_tuple(48000, 44100, 48000, 44100, 15, 20),
+ std::make_tuple(48000, 44100, 32000, 44100, 15, 15),
+ std::make_tuple(48000, 44100, 16000, 44100, 15, 15),
+ std::make_tuple(48000, 32000, 48000, 32000, 19, 35),
+ std::make_tuple(48000, 32000, 32000, 32000, 19, 0),
+ std::make_tuple(48000, 32000, 16000, 32000, 19, 20),
+ std::make_tuple(48000, 16000, 48000, 16000, 20, 20),
+ std::make_tuple(48000, 16000, 32000, 16000, 20, 20),
+ std::make_tuple(48000, 16000, 16000, 16000, 20, 0),
+
+ std::make_tuple(44100, 48000, 48000, 48000, 15, 0),
+ std::make_tuple(44100, 48000, 32000, 48000, 15, 30),
+ std::make_tuple(44100, 48000, 16000, 48000, 15, 20),
+ std::make_tuple(44100, 44100, 48000, 44100, 15, 20),
+ std::make_tuple(44100, 44100, 32000, 44100, 15, 15),
+ std::make_tuple(44100, 44100, 16000, 44100, 15, 15),
+ std::make_tuple(44100, 32000, 48000, 32000, 18, 35),
+ std::make_tuple(44100, 32000, 32000, 32000, 18, 0),
+ std::make_tuple(44100, 32000, 16000, 32000, 18, 20),
+ std::make_tuple(44100, 16000, 48000, 16000, 19, 20),
+ std::make_tuple(44100, 16000, 32000, 16000, 19, 20),
+ std::make_tuple(44100, 16000, 16000, 16000, 19, 0),
+
+ std::make_tuple(32000, 48000, 48000, 48000, 17, 0),
+ std::make_tuple(32000, 48000, 32000, 48000, 17, 30),
+ std::make_tuple(32000, 48000, 16000, 48000, 17, 20),
+ std::make_tuple(32000, 44100, 48000, 44100, 20, 20),
+ std::make_tuple(32000, 44100, 32000, 44100, 20, 15),
+ std::make_tuple(32000, 44100, 16000, 44100, 20, 15),
+ std::make_tuple(32000, 32000, 48000, 32000, 27, 35),
+ std::make_tuple(32000, 32000, 32000, 32000, 0, 0),
+ std::make_tuple(32000, 32000, 16000, 32000, 30, 20),
+ std::make_tuple(32000, 16000, 48000, 16000, 20, 20),
+ std::make_tuple(32000, 16000, 32000, 16000, 20, 20),
+ std::make_tuple(32000, 16000, 16000, 16000, 20, 0),
+
+ std::make_tuple(16000, 48000, 48000, 48000, 11, 0),
+ std::make_tuple(16000, 48000, 32000, 48000, 11, 30),
+ std::make_tuple(16000, 48000, 16000, 48000, 11, 20),
+ std::make_tuple(16000, 44100, 48000, 44100, 15, 20),
+ std::make_tuple(16000, 44100, 32000, 44100, 15, 15),
+ std::make_tuple(16000, 44100, 16000, 44100, 15, 15),
+ std::make_tuple(16000, 32000, 48000, 32000, 24, 35),
+ std::make_tuple(16000, 32000, 32000, 32000, 24, 0),
+ std::make_tuple(16000, 32000, 16000, 32000, 25, 20),
+ std::make_tuple(16000, 16000, 48000, 16000, 28, 20),
+ std::make_tuple(16000, 16000, 32000, 16000, 28, 20),
+ std::make_tuple(16000, 16000, 16000, 16000, 0, 0),
+
+ std::make_tuple(192000, 192000, 48000, 192000, 20, 40),
+ std::make_tuple(176400, 176400, 48000, 176400, 20, 35),
+ std::make_tuple(96000, 96000, 48000, 96000, 20, 40),
+ std::make_tuple(88200, 88200, 48000, 88200, 20, 20),
+ std::make_tuple(44100, 44100, 48000, 44100, 20, 20)));
+#endif
+
+// Produces a scoped trace debug output.
+std::string ProduceDebugText(int render_input_sample_rate_hz,
+ int render_output_sample_rate_hz,
+ int capture_input_sample_rate_hz,
+ int capture_output_sample_rate_hz,
+ size_t render_input_num_channels,
+ size_t render_output_num_channels,
+ size_t capture_input_num_channels,
+ size_t capture_output_num_channels) {
+ rtc::StringBuilder ss;
+ ss << "Sample rates:"
+ "\n Render input: "
+ << render_input_sample_rate_hz
+ << " Hz"
+ "\n Render output: "
+ << render_output_sample_rate_hz
+ << " Hz"
+ "\n Capture input: "
+ << capture_input_sample_rate_hz
+ << " Hz"
+ "\n Capture output: "
+ << capture_output_sample_rate_hz
+ << " Hz"
+ "\nNumber of channels:"
+ "\n Render input: "
+ << render_input_num_channels
+ << "\n Render output: " << render_output_num_channels
+ << "\n Capture input: " << capture_input_num_channels
+ << "\n Capture output: " << capture_output_num_channels;
+ return ss.Release();
+}
+
+// Validates that running the audio processing module using various combinations
+// of sample rates and number of channels works as intended.
+void RunApmRateAndChannelTest(
+ rtc::ArrayView<const int> sample_rates_hz,
+ rtc::ArrayView<const int> render_channel_counts,
+ rtc::ArrayView<const int> capture_channel_counts) {
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.pipeline.multi_channel_render = true;
+ apm_config.pipeline.multi_channel_capture = true;
+ apm_config.echo_canceller.enabled = true;
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting().SetConfig(apm_config).Create();
+
+ StreamConfig render_input_stream_config;
+ StreamConfig render_output_stream_config;
+ StreamConfig capture_input_stream_config;
+ StreamConfig capture_output_stream_config;
+
+ std::vector<float> render_input_frame_channels;
+ std::vector<float*> render_input_frame;
+ std::vector<float> render_output_frame_channels;
+ std::vector<float*> render_output_frame;
+ std::vector<float> capture_input_frame_channels;
+ std::vector<float*> capture_input_frame;
+ std::vector<float> capture_output_frame_channels;
+ std::vector<float*> capture_output_frame;
+
+ for (auto render_input_sample_rate_hz : sample_rates_hz) {
+ for (auto render_output_sample_rate_hz : sample_rates_hz) {
+ for (auto capture_input_sample_rate_hz : sample_rates_hz) {
+ for (auto capture_output_sample_rate_hz : sample_rates_hz) {
+ for (size_t render_input_num_channels : render_channel_counts) {
+ for (size_t capture_input_num_channels : capture_channel_counts) {
+ size_t render_output_num_channels = render_input_num_channels;
+ size_t capture_output_num_channels = capture_input_num_channels;
+ auto populate_audio_frame = [](int sample_rate_hz,
+ size_t num_channels,
+ StreamConfig* cfg,
+ std::vector<float>* channels_data,
+ std::vector<float*>* frame_data) {
+ cfg->set_sample_rate_hz(sample_rate_hz);
+ cfg->set_num_channels(num_channels);
+
+ size_t max_frame_size =
+ AudioProcessing::GetFrameSize(sample_rate_hz);
+ channels_data->resize(num_channels * max_frame_size);
+ std::fill(channels_data->begin(), channels_data->end(), 0.5f);
+ frame_data->resize(num_channels);
+ for (size_t channel = 0; channel < num_channels; ++channel) {
+ (*frame_data)[channel] =
+ &(*channels_data)[channel * max_frame_size];
+ }
+ };
+
+ populate_audio_frame(
+ render_input_sample_rate_hz, render_input_num_channels,
+ &render_input_stream_config, &render_input_frame_channels,
+ &render_input_frame);
+ populate_audio_frame(
+ render_output_sample_rate_hz, render_output_num_channels,
+ &render_output_stream_config, &render_output_frame_channels,
+ &render_output_frame);
+ populate_audio_frame(
+ capture_input_sample_rate_hz, capture_input_num_channels,
+ &capture_input_stream_config, &capture_input_frame_channels,
+ &capture_input_frame);
+ populate_audio_frame(
+ capture_output_sample_rate_hz, capture_output_num_channels,
+ &capture_output_stream_config, &capture_output_frame_channels,
+ &capture_output_frame);
+
+ for (size_t frame = 0; frame < 2; ++frame) {
+ SCOPED_TRACE(ProduceDebugText(
+ render_input_sample_rate_hz, render_output_sample_rate_hz,
+ capture_input_sample_rate_hz, capture_output_sample_rate_hz,
+ render_input_num_channels, render_output_num_channels,
+ render_input_num_channels, capture_output_num_channels));
+
+ int result = apm->ProcessReverseStream(
+ &render_input_frame[0], render_input_stream_config,
+ render_output_stream_config, &render_output_frame[0]);
+ EXPECT_EQ(result, AudioProcessing::kNoError);
+ result = apm->ProcessStream(
+ &capture_input_frame[0], capture_input_stream_config,
+ capture_output_stream_config, &capture_output_frame[0]);
+ EXPECT_EQ(result, AudioProcessing::kNoError);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+constexpr void Toggle(bool& b) {
+ b ^= true;
+}
+
+} // namespace
+
+TEST(RuntimeSettingTest, TestDefaultCtor) {
+ auto s = AudioProcessing::RuntimeSetting();
+ EXPECT_EQ(AudioProcessing::RuntimeSetting::Type::kNotSpecified, s.type());
+}
+
+TEST(RuntimeSettingTest, TestUsageWithSwapQueue) {
+ SwapQueue<AudioProcessing::RuntimeSetting> q(1);
+ auto s = AudioProcessing::RuntimeSetting();
+ ASSERT_TRUE(q.Insert(&s));
+ ASSERT_TRUE(q.Remove(&s));
+ EXPECT_EQ(AudioProcessing::RuntimeSetting::Type::kNotSpecified, s.type());
+}
+
+TEST(ApmConfiguration, EnablePostProcessing) {
+ // Verify that apm uses a capture post processing module if one is provided.
+ auto mock_post_processor_ptr =
+ new ::testing::NiceMock<test::MockCustomProcessing>();
+ auto mock_post_processor =
+ std::unique_ptr<CustomProcessing>(mock_post_processor_ptr);
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetCapturePostProcessing(std::move(mock_post_processor))
+ .Create();
+
+ Int16FrameData audio;
+ audio.num_channels = 1;
+ SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+
+ EXPECT_CALL(*mock_post_processor_ptr, Process(::testing::_)).Times(1);
+ apm->ProcessStream(audio.data.data(),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+}
+
+TEST(ApmConfiguration, EnablePreProcessing) {
+ // Verify that apm uses a capture post processing module if one is provided.
+ auto mock_pre_processor_ptr =
+ new ::testing::NiceMock<test::MockCustomProcessing>();
+ auto mock_pre_processor =
+ std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetRenderPreProcessing(std::move(mock_pre_processor))
+ .Create();
+
+ Int16FrameData audio;
+ audio.num_channels = 1;
+ SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+
+ EXPECT_CALL(*mock_pre_processor_ptr, Process(::testing::_)).Times(1);
+ apm->ProcessReverseStream(
+ audio.data.data(), StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+}
+
+TEST(ApmConfiguration, EnableCaptureAnalyzer) {
+ // Verify that apm uses a capture analyzer if one is provided.
+ auto mock_capture_analyzer_ptr =
+ new ::testing::NiceMock<test::MockCustomAudioAnalyzer>();
+ auto mock_capture_analyzer =
+ std::unique_ptr<CustomAudioAnalyzer>(mock_capture_analyzer_ptr);
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetCaptureAnalyzer(std::move(mock_capture_analyzer))
+ .Create();
+
+ Int16FrameData audio;
+ audio.num_channels = 1;
+ SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+
+ EXPECT_CALL(*mock_capture_analyzer_ptr, Analyze(::testing::_)).Times(1);
+ apm->ProcessStream(audio.data.data(),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+}
+
+TEST(ApmConfiguration, PreProcessingReceivesRuntimeSettings) {
+ auto mock_pre_processor_ptr =
+ new ::testing::NiceMock<test::MockCustomProcessing>();
+ auto mock_pre_processor =
+ std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetRenderPreProcessing(std::move(mock_pre_processor))
+ .Create();
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCustomRenderSetting(0));
+
+ // RuntimeSettings forwarded during 'Process*Stream' calls.
+ // Therefore we have to make one such call.
+ Int16FrameData audio;
+ audio.num_channels = 1;
+ SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+
+ EXPECT_CALL(*mock_pre_processor_ptr, SetRuntimeSetting(::testing::_))
+ .Times(1);
+ apm->ProcessReverseStream(
+ audio.data.data(), StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+}
+
+class MyEchoControlFactory : public EchoControlFactory {
+ public:
+ std::unique_ptr<EchoControl> Create(int sample_rate_hz) {
+ auto ec = new test::MockEchoControl();
+ EXPECT_CALL(*ec, AnalyzeRender(::testing::_)).Times(1);
+ EXPECT_CALL(*ec, AnalyzeCapture(::testing::_)).Times(2);
+ EXPECT_CALL(*ec, ProcessCapture(::testing::_, ::testing::_, ::testing::_))
+ .Times(2);
+ return std::unique_ptr<EchoControl>(ec);
+ }
+
+ std::unique_ptr<EchoControl> Create(int sample_rate_hz,
+ int num_render_channels,
+ int num_capture_channels) {
+ return Create(sample_rate_hz);
+ }
+};
+
+TEST(ApmConfiguration, EchoControlInjection) {
+ // Verify that apm uses an injected echo controller if one is provided.
+ std::unique_ptr<EchoControlFactory> echo_control_factory(
+ new MyEchoControlFactory());
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoControlFactory(std::move(echo_control_factory))
+ .Create();
+
+ Int16FrameData audio;
+ audio.num_channels = 1;
+ SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+ apm->ProcessStream(audio.data.data(),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+ apm->ProcessReverseStream(
+ audio.data.data(), StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+ apm->ProcessStream(audio.data.data(),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ StreamConfig(audio.sample_rate_hz, audio.num_channels),
+ audio.data.data());
+}
+
+TEST(ApmConfiguration, EchoDetectorInjection) {
+ using ::testing::_;
+ rtc::scoped_refptr<test::MockEchoDetector> mock_echo_detector =
+ rtc::make_ref_counted<::testing::StrictMock<test::MockEchoDetector>>();
+ EXPECT_CALL(*mock_echo_detector,
+ Initialize(/*capture_sample_rate_hz=*/16000, _,
+ /*render_sample_rate_hz=*/16000, _))
+ .Times(1);
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoDetector(mock_echo_detector)
+ .Create();
+
+ // The echo detector is included in processing when enabled.
+ EXPECT_CALL(*mock_echo_detector, AnalyzeRenderAudio(_))
+ .WillOnce([](rtc::ArrayView<const float> render_audio) {
+ EXPECT_EQ(render_audio.size(), 160u);
+ });
+ EXPECT_CALL(*mock_echo_detector, AnalyzeCaptureAudio(_))
+ .WillOnce([](rtc::ArrayView<const float> capture_audio) {
+ EXPECT_EQ(capture_audio.size(), 160u);
+ });
+ EXPECT_CALL(*mock_echo_detector, GetMetrics()).Times(1);
+
+ Int16FrameData frame;
+ frame.num_channels = 1;
+ SetFrameSampleRate(&frame, 16000);
+
+ apm->ProcessReverseStream(frame.data.data(), StreamConfig(16000, 1),
+ StreamConfig(16000, 1), frame.data.data());
+ apm->ProcessStream(frame.data.data(), StreamConfig(16000, 1),
+ StreamConfig(16000, 1), frame.data.data());
+
+ // When processing rates change, the echo detector is also reinitialized to
+ // match those.
+ EXPECT_CALL(*mock_echo_detector,
+ Initialize(/*capture_sample_rate_hz=*/48000, _,
+ /*render_sample_rate_hz=*/16000, _))
+ .Times(1);
+ EXPECT_CALL(*mock_echo_detector,
+ Initialize(/*capture_sample_rate_hz=*/48000, _,
+ /*render_sample_rate_hz=*/48000, _))
+ .Times(1);
+ EXPECT_CALL(*mock_echo_detector, AnalyzeRenderAudio(_))
+ .WillOnce([](rtc::ArrayView<const float> render_audio) {
+ EXPECT_EQ(render_audio.size(), 480u);
+ });
+ EXPECT_CALL(*mock_echo_detector, AnalyzeCaptureAudio(_))
+ .Times(2)
+ .WillRepeatedly([](rtc::ArrayView<const float> capture_audio) {
+ EXPECT_EQ(capture_audio.size(), 480u);
+ });
+ EXPECT_CALL(*mock_echo_detector, GetMetrics()).Times(2);
+
+ SetFrameSampleRate(&frame, 48000);
+ apm->ProcessStream(frame.data.data(), StreamConfig(48000, 1),
+ StreamConfig(48000, 1), frame.data.data());
+ apm->ProcessReverseStream(frame.data.data(), StreamConfig(48000, 1),
+ StreamConfig(48000, 1), frame.data.data());
+ apm->ProcessStream(frame.data.data(), StreamConfig(48000, 1),
+ StreamConfig(48000, 1), frame.data.data());
+}
+
+rtc::scoped_refptr<AudioProcessing> CreateApm(bool mobile_aec) {
+ // Enable residual echo detection, for stats.
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoDetector(CreateEchoDetector())
+ .Create();
+ if (!apm) {
+ return apm;
+ }
+
+ ProcessingConfig processing_config = {
+ {{32000, 1}, {32000, 1}, {32000, 1}, {32000, 1}}};
+
+ if (apm->Initialize(processing_config) != 0) {
+ return nullptr;
+ }
+
+ // Disable all components except for an AEC.
+ AudioProcessing::Config apm_config;
+ apm_config.high_pass_filter.enabled = false;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.gain_controller2.enabled = false;
+ apm_config.echo_canceller.enabled = true;
+ apm_config.echo_canceller.mobile_mode = mobile_aec;
+ apm_config.noise_suppression.enabled = false;
+ apm->ApplyConfig(apm_config);
+ return apm;
+}
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
+#define MAYBE_ApmStatistics DISABLED_ApmStatistics
+#else
+#define MAYBE_ApmStatistics ApmStatistics
+#endif
+
+TEST(MAYBE_ApmStatistics, AECEnabledTest) {
+ // Set up APM with AEC3 and process some audio.
+ rtc::scoped_refptr<AudioProcessing> apm = CreateApm(false);
+ ASSERT_TRUE(apm);
+ AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = true;
+ apm->ApplyConfig(apm_config);
+
+ // Set up an audioframe.
+ Int16FrameData frame;
+ frame.num_channels = 1;
+ SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+
+ // Fill the audio frame with a sawtooth pattern.
+ int16_t* ptr = frame.data.data();
+ for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) {
+ ptr[i] = 10000 * ((i % 3) - 1);
+ }
+
+ // Do some processing.
+ for (int i = 0; i < 200; i++) {
+ EXPECT_EQ(apm->ProcessReverseStream(
+ frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ EXPECT_EQ(apm->set_stream_delay_ms(0), 0);
+ EXPECT_EQ(apm->ProcessStream(
+ frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ }
+
+ // Test statistics interface.
+ AudioProcessingStats stats = apm->GetStatistics();
+ // We expect all statistics to be set and have a sensible value.
+ ASSERT_TRUE(stats.residual_echo_likelihood.has_value());
+ EXPECT_GE(*stats.residual_echo_likelihood, 0.0);
+ EXPECT_LE(*stats.residual_echo_likelihood, 1.0);
+ ASSERT_TRUE(stats.residual_echo_likelihood_recent_max.has_value());
+ EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0);
+ EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0);
+ ASSERT_TRUE(stats.echo_return_loss.has_value());
+ EXPECT_NE(*stats.echo_return_loss, -100.0);
+ ASSERT_TRUE(stats.echo_return_loss_enhancement.has_value());
+ EXPECT_NE(*stats.echo_return_loss_enhancement, -100.0);
+}
+
+TEST(MAYBE_ApmStatistics, AECMEnabledTest) {
+ // Set up APM with AECM and process some audio.
+ rtc::scoped_refptr<AudioProcessing> apm = CreateApm(true);
+ ASSERT_TRUE(apm);
+
+ // Set up an audioframe.
+ Int16FrameData frame;
+ frame.num_channels = 1;
+ SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+
+ // Fill the audio frame with a sawtooth pattern.
+ int16_t* ptr = frame.data.data();
+ for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) {
+ ptr[i] = 10000 * ((i % 3) - 1);
+ }
+
+ // Do some processing.
+ for (int i = 0; i < 200; i++) {
+ EXPECT_EQ(apm->ProcessReverseStream(
+ frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ EXPECT_EQ(apm->set_stream_delay_ms(0), 0);
+ EXPECT_EQ(apm->ProcessStream(
+ frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ }
+
+ // Test statistics interface.
+ AudioProcessingStats stats = apm->GetStatistics();
+ // We expect only the residual echo detector statistics to be set and have a
+ // sensible value.
+ ASSERT_TRUE(stats.residual_echo_likelihood.has_value());
+ EXPECT_GE(*stats.residual_echo_likelihood, 0.0);
+ EXPECT_LE(*stats.residual_echo_likelihood, 1.0);
+ ASSERT_TRUE(stats.residual_echo_likelihood_recent_max.has_value());
+ EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0);
+ EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0);
+ EXPECT_FALSE(stats.echo_return_loss.has_value());
+ EXPECT_FALSE(stats.echo_return_loss_enhancement.has_value());
+}
+
+TEST(ApmStatistics, DoNotReportVoiceDetectedStat) {
+ ProcessingConfig processing_config = {
+ {{32000, 1}, {32000, 1}, {32000, 1}, {32000, 1}}};
+
+ // Set up an audioframe.
+ Int16FrameData frame;
+ frame.num_channels = 1;
+ SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+
+ // Fill the audio frame with a sawtooth pattern.
+ int16_t* ptr = frame.data.data();
+ for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) {
+ ptr[i] = 10000 * ((i % 3) - 1);
+ }
+
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting().Create();
+ apm->Initialize(processing_config);
+
+ // No metric should be reported.
+ EXPECT_EQ(
+ apm->ProcessStream(frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ EXPECT_FALSE(apm->GetStatistics().voice_detected.has_value());
+}
+
+TEST(ApmStatistics, GetStatisticsReportsNoEchoDetectorStatsWhenDisabled) {
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting().Create();
+ Int16FrameData frame;
+ frame.num_channels = 1;
+ SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+ ASSERT_EQ(
+ apm->ProcessStream(frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ // Echo detector is disabled by default, no stats reported.
+ AudioProcessingStats stats = apm->GetStatistics();
+ EXPECT_FALSE(stats.residual_echo_likelihood.has_value());
+ EXPECT_FALSE(stats.residual_echo_likelihood_recent_max.has_value());
+}
+
+TEST(ApmStatistics, GetStatisticsReportsEchoDetectorStatsWhenEnabled) {
+ // Create APM with an echo detector injected.
+ rtc::scoped_refptr<AudioProcessing> apm =
+ AudioProcessingBuilderForTesting()
+ .SetEchoDetector(CreateEchoDetector())
+ .Create();
+ Int16FrameData frame;
+ frame.num_channels = 1;
+ SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+ // Echo detector enabled: Report stats.
+ ASSERT_EQ(
+ apm->ProcessStream(frame.data.data(),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ StreamConfig(frame.sample_rate_hz, frame.num_channels),
+ frame.data.data()),
+ 0);
+ AudioProcessingStats stats = apm->GetStatistics();
+ EXPECT_TRUE(stats.residual_echo_likelihood.has_value());
+ EXPECT_TRUE(stats.residual_echo_likelihood_recent_max.has_value());
+}
+
+TEST(ApmConfiguration, HandlingOfRateAndChannelCombinations) {
+ std::array<int, 3> sample_rates_hz = {16000, 32000, 48000};
+ std::array<int, 2> render_channel_counts = {1, 7};
+ std::array<int, 2> capture_channel_counts = {1, 7};
+ RunApmRateAndChannelTest(sample_rates_hz, render_channel_counts,
+ capture_channel_counts);
+}
+
+TEST(ApmConfiguration, HandlingOfChannelCombinations) {
+ std::array<int, 1> sample_rates_hz = {48000};
+ std::array<int, 8> render_channel_counts = {1, 2, 3, 4, 5, 6, 7, 8};
+ std::array<int, 8> capture_channel_counts = {1, 2, 3, 4, 5, 6, 7, 8};
+ RunApmRateAndChannelTest(sample_rates_hz, render_channel_counts,
+ capture_channel_counts);
+}
+
+TEST(ApmConfiguration, HandlingOfRateCombinations) {
+ // Test rates <= 96000 logged by Chrome UMA:
+ // - WebRTC.AudioInputSampleRate
+ // - WebRTC.AudioOutputSampleRate
+ // Higher rates are tested in AudioProcessingTest.Format, to keep the number
+ // of combinations in this test manageable.
+ std::array<int, 9> sample_rates_hz = {8000, 11025, 16000, 22050, 32000,
+ 44100, 48000, 88200, 96000};
+ std::array<int, 1> render_channel_counts = {2};
+ std::array<int, 1> capture_channel_counts = {2};
+ RunApmRateAndChannelTest(sample_rates_hz, render_channel_counts,
+ capture_channel_counts);
+}
+
+TEST(ApmConfiguration, SelfAssignment) {
+ // At some point memory sanitizer was complaining about self-assigment.
+ // Make sure we don't regress.
+ AudioProcessing::Config config;
+ AudioProcessing::Config* config2 = &config;
+ *config2 = *config2; // Workaround -Wself-assign-overloaded
+ SUCCEED(); // Real success is absence of defects from asan/msan/ubsan.
+}
+
+TEST(AudioProcessing, GainController1ConfigEqual) {
+ AudioProcessing::Config::GainController1 a;
+ AudioProcessing::Config::GainController1 b;
+ EXPECT_EQ(a, b);
+
+ Toggle(a.enabled);
+ b.enabled = a.enabled;
+ EXPECT_EQ(a, b);
+
+ a.mode = AudioProcessing::Config::GainController1::Mode::kAdaptiveDigital;
+ b.mode = a.mode;
+ EXPECT_EQ(a, b);
+
+ a.target_level_dbfs++;
+ b.target_level_dbfs = a.target_level_dbfs;
+ EXPECT_EQ(a, b);
+
+ a.compression_gain_db++;
+ b.compression_gain_db = a.compression_gain_db;
+ EXPECT_EQ(a, b);
+
+ Toggle(a.enable_limiter);
+ b.enable_limiter = a.enable_limiter;
+ EXPECT_EQ(a, b);
+
+ auto& a_analog = a.analog_gain_controller;
+ auto& b_analog = b.analog_gain_controller;
+
+ Toggle(a_analog.enabled);
+ b_analog.enabled = a_analog.enabled;
+ EXPECT_EQ(a, b);
+
+ a_analog.startup_min_volume++;
+ b_analog.startup_min_volume = a_analog.startup_min_volume;
+ EXPECT_EQ(a, b);
+
+ a_analog.clipped_level_min++;
+ b_analog.clipped_level_min = a_analog.clipped_level_min;
+ EXPECT_EQ(a, b);
+
+ Toggle(a_analog.enable_digital_adaptive);
+ b_analog.enable_digital_adaptive = a_analog.enable_digital_adaptive;
+ EXPECT_EQ(a, b);
+}
+
+// Checks that one differing parameter is sufficient to make two configs
+// different.
+TEST(AudioProcessing, GainController1ConfigNotEqual) {
+ AudioProcessing::Config::GainController1 a;
+ const AudioProcessing::Config::GainController1 b;
+
+ Toggle(a.enabled);
+ EXPECT_NE(a, b);
+ a = b;
+
+ a.mode = AudioProcessing::Config::GainController1::Mode::kAdaptiveDigital;
+ EXPECT_NE(a, b);
+ a = b;
+
+ a.target_level_dbfs++;
+ EXPECT_NE(a, b);
+ a = b;
+
+ a.compression_gain_db++;
+ EXPECT_NE(a, b);
+ a = b;
+
+ Toggle(a.enable_limiter);
+ EXPECT_NE(a, b);
+ a = b;
+
+ auto& a_analog = a.analog_gain_controller;
+ const auto& b_analog = b.analog_gain_controller;
+
+ Toggle(a_analog.enabled);
+ EXPECT_NE(a, b);
+ a_analog = b_analog;
+
+ a_analog.startup_min_volume++;
+ EXPECT_NE(a, b);
+ a_analog = b_analog;
+
+ a_analog.clipped_level_min++;
+ EXPECT_NE(a, b);
+ a_analog = b_analog;
+
+ Toggle(a_analog.enable_digital_adaptive);
+ EXPECT_NE(a, b);
+ a_analog = b_analog;
+}
+
+TEST(AudioProcessing, GainController2ConfigEqual) {
+ AudioProcessing::Config::GainController2 a;
+ AudioProcessing::Config::GainController2 b;
+ EXPECT_EQ(a, b);
+
+ Toggle(a.enabled);
+ b.enabled = a.enabled;
+ EXPECT_EQ(a, b);
+
+ a.fixed_digital.gain_db += 1.0f;
+ b.fixed_digital.gain_db = a.fixed_digital.gain_db;
+ EXPECT_EQ(a, b);
+
+ auto& a_adaptive = a.adaptive_digital;
+ auto& b_adaptive = b.adaptive_digital;
+
+ Toggle(a_adaptive.enabled);
+ b_adaptive.enabled = a_adaptive.enabled;
+ EXPECT_EQ(a, b);
+
+ Toggle(a_adaptive.dry_run);
+ b_adaptive.dry_run = a_adaptive.dry_run;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.headroom_db += 1.0f;
+ b_adaptive.headroom_db = a_adaptive.headroom_db;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.max_gain_db += 1.0f;
+ b_adaptive.max_gain_db = a_adaptive.max_gain_db;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.initial_gain_db += 1.0f;
+ b_adaptive.initial_gain_db = a_adaptive.initial_gain_db;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.vad_reset_period_ms++;
+ b_adaptive.vad_reset_period_ms = a_adaptive.vad_reset_period_ms;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.adjacent_speech_frames_threshold++;
+ b_adaptive.adjacent_speech_frames_threshold =
+ a_adaptive.adjacent_speech_frames_threshold;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.max_gain_change_db_per_second += 1.0f;
+ b_adaptive.max_gain_change_db_per_second =
+ a_adaptive.max_gain_change_db_per_second;
+ EXPECT_EQ(a, b);
+
+ a_adaptive.max_output_noise_level_dbfs += 1.0f;
+ b_adaptive.max_output_noise_level_dbfs =
+ a_adaptive.max_output_noise_level_dbfs;
+ EXPECT_EQ(a, b);
+}
+
+// Checks that one differing parameter is sufficient to make two configs
+// different.
+TEST(AudioProcessing, GainController2ConfigNotEqual) {
+ AudioProcessing::Config::GainController2 a;
+ const AudioProcessing::Config::GainController2 b;
+
+ Toggle(a.enabled);
+ EXPECT_NE(a, b);
+ a = b;
+
+ a.fixed_digital.gain_db += 1.0f;
+ EXPECT_NE(a, b);
+ a.fixed_digital = b.fixed_digital;
+
+ auto& a_adaptive = a.adaptive_digital;
+ const auto& b_adaptive = b.adaptive_digital;
+
+ Toggle(a_adaptive.enabled);
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ Toggle(a_adaptive.dry_run);
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.headroom_db += 1.0f;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.max_gain_db += 1.0f;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.initial_gain_db += 1.0f;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.vad_reset_period_ms++;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.adjacent_speech_frames_threshold++;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.max_gain_change_db_per_second += 1.0f;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+
+ a_adaptive.max_output_noise_level_dbfs += 1.0f;
+ EXPECT_NE(a, b);
+ a_adaptive = b_adaptive;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/BUILD.gn
new file mode 100644
index 0000000000..cf976ba995
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/BUILD.gn
@@ -0,0 +1,47 @@
+# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("capture_levels_adjuster") {
+ visibility = [ "*" ]
+
+ sources = [
+ "audio_samples_scaler.cc",
+ "audio_samples_scaler.h",
+ "capture_levels_adjuster.cc",
+ "capture_levels_adjuster.h",
+ ]
+
+ defines = []
+
+ deps = [
+ "..:audio_buffer",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:safe_minmax",
+ ]
+}
+
+if (!build_with_mozilla) {
+rtc_library("capture_levels_adjuster_unittests") {
+ testonly = true
+
+ sources = [
+ "audio_samples_scaler_unittest.cc",
+ "capture_levels_adjuster_unittest.cc",
+ ]
+ deps = [
+ ":capture_levels_adjuster",
+ "..:audioproc_test_utils",
+ "../../../rtc_base:gunit_helpers",
+ "../../../rtc_base:stringutils",
+ "../../../test:test_support",
+ ]
+}
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.cc b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.cc
new file mode 100644
index 0000000000..cb2336b87d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h"
+
+#include <algorithm>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+AudioSamplesScaler::AudioSamplesScaler(float initial_gain)
+ : previous_gain_(initial_gain), target_gain_(initial_gain) {}
+
+void AudioSamplesScaler::Process(AudioBuffer& audio_buffer) {
+ if (static_cast<int>(audio_buffer.num_frames()) != samples_per_channel_) {
+ // Update the members depending on audio-buffer length if needed.
+ RTC_DCHECK_GT(audio_buffer.num_frames(), 0);
+ samples_per_channel_ = static_cast<int>(audio_buffer.num_frames());
+ one_by_samples_per_channel_ = 1.f / samples_per_channel_;
+ }
+
+ if (target_gain_ == 1.f && previous_gain_ == target_gain_) {
+ // If only a gain of 1 is to be applied, do an early return without applying
+ // any gain.
+ return;
+ }
+
+ float gain = previous_gain_;
+ if (previous_gain_ == target_gain_) {
+ // Apply a non-changing gain.
+ for (size_t channel = 0; channel < audio_buffer.num_channels(); ++channel) {
+ rtc::ArrayView<float> channel_view(audio_buffer.channels()[channel],
+ samples_per_channel_);
+ for (float& sample : channel_view) {
+ sample *= gain;
+ }
+ }
+ } else {
+ const float increment =
+ (target_gain_ - previous_gain_) * one_by_samples_per_channel_;
+
+ if (increment > 0.f) {
+ // Apply an increasing gain.
+ for (size_t channel = 0; channel < audio_buffer.num_channels();
+ ++channel) {
+ gain = previous_gain_;
+ rtc::ArrayView<float> channel_view(audio_buffer.channels()[channel],
+ samples_per_channel_);
+ for (float& sample : channel_view) {
+ gain = std::min(gain + increment, target_gain_);
+ sample *= gain;
+ }
+ }
+ } else {
+ // Apply a decreasing gain.
+ for (size_t channel = 0; channel < audio_buffer.num_channels();
+ ++channel) {
+ gain = previous_gain_;
+ rtc::ArrayView<float> channel_view(audio_buffer.channels()[channel],
+ samples_per_channel_);
+ for (float& sample : channel_view) {
+ gain = std::max(gain + increment, target_gain_);
+ sample *= gain;
+ }
+ }
+ }
+ }
+ previous_gain_ = target_gain_;
+
+ // Saturate the samples to be in the S16 range.
+ for (size_t channel = 0; channel < audio_buffer.num_channels(); ++channel) {
+ rtc::ArrayView<float> channel_view(audio_buffer.channels()[channel],
+ samples_per_channel_);
+ for (float& sample : channel_view) {
+ constexpr float kMinFloatS16Value = -32768.f;
+ constexpr float kMaxFloatS16Value = 32767.f;
+ sample = rtc::SafeClamp(sample, kMinFloatS16Value, kMaxFloatS16Value);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h
new file mode 100644
index 0000000000..2ae8533940
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_CAPTURE_LEVELS_ADJUSTER_AUDIO_SAMPLES_SCALER_H_
+#define MODULES_AUDIO_PROCESSING_CAPTURE_LEVELS_ADJUSTER_AUDIO_SAMPLES_SCALER_H_
+
+#include <stddef.h>
+
+#include "modules/audio_processing/audio_buffer.h"
+
+namespace webrtc {
+
+// Handles and applies a gain to the samples in an audio buffer.
+// The gain is applied for each sample and any changes in the gain take effect
+// gradually (in a linear manner) over one frame.
+class AudioSamplesScaler {
+ public:
+ // C-tor. The supplied `initial_gain` is used immediately at the first call to
+ // Process(), i.e., in contrast to the gain supplied by SetGain(...) there is
+ // no gradual change to the `initial_gain`.
+ explicit AudioSamplesScaler(float initial_gain);
+ AudioSamplesScaler(const AudioSamplesScaler&) = delete;
+ AudioSamplesScaler& operator=(const AudioSamplesScaler&) = delete;
+
+ // Applies the specified gain to the audio in `audio_buffer`.
+ void Process(AudioBuffer& audio_buffer);
+
+ // Sets the gain to apply to each sample.
+ void SetGain(float gain) { target_gain_ = gain; }
+
+ private:
+ float previous_gain_ = 1.f;
+ float target_gain_ = 1.f;
+ int samples_per_channel_ = -1;
+ float one_by_samples_per_channel_ = -1.f;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_CAPTURE_LEVELS_ADJUSTER_AUDIO_SAMPLES_SCALER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler_unittest.cc b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler_unittest.cc
new file mode 100644
index 0000000000..6e5fc2cbe3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler_unittest.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h"
+
+#include <tuple>
+
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+float SampleValueForChannel(int channel) {
+ constexpr float kSampleBaseValue = 100.f;
+ constexpr float kSampleChannelOffset = 1.f;
+ return kSampleBaseValue + channel * kSampleChannelOffset;
+}
+
+void PopulateBuffer(AudioBuffer& audio_buffer) {
+ for (size_t ch = 0; ch < audio_buffer.num_channels(); ++ch) {
+ test::FillBufferChannel(SampleValueForChannel(ch), ch, audio_buffer);
+ }
+}
+
+constexpr int kNumFramesToProcess = 10;
+
+class AudioSamplesScalerTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::tuple<int, int, float>> {
+ protected:
+ int sample_rate_hz() const { return std::get<0>(GetParam()); }
+ int num_channels() const { return std::get<1>(GetParam()); }
+ float initial_gain() const { return std::get<2>(GetParam()); }
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioSamplesScalerTestSuite,
+ AudioSamplesScalerTest,
+ ::testing::Combine(::testing::Values(16000, 32000, 48000),
+ ::testing::Values(1, 2, 4),
+ ::testing::Values(0.1f, 1.f, 2.f, 4.f)));
+
+TEST_P(AudioSamplesScalerTest, InitialGainIsRespected) {
+ AudioSamplesScaler scaler(initial_gain());
+
+ AudioBuffer audio_buffer(sample_rate_hz(), num_channels(), sample_rate_hz(),
+ num_channels(), sample_rate_hz(), num_channels());
+
+ for (int frame = 0; frame < kNumFramesToProcess; ++frame) {
+ PopulateBuffer(audio_buffer);
+ scaler.Process(audio_buffer);
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ initial_gain() * SampleValueForChannel(ch));
+ }
+ }
+ }
+}
+
+TEST_P(AudioSamplesScalerTest, VerifyGainAdjustment) {
+ const float higher_gain = initial_gain();
+ const float lower_gain = higher_gain / 2.f;
+
+ AudioSamplesScaler scaler(lower_gain);
+
+ AudioBuffer audio_buffer(sample_rate_hz(), num_channels(), sample_rate_hz(),
+ num_channels(), sample_rate_hz(), num_channels());
+
+ // Allow the intial, lower, gain to take effect.
+ PopulateBuffer(audio_buffer);
+
+ scaler.Process(audio_buffer);
+
+ // Set the new, higher, gain.
+ scaler.SetGain(higher_gain);
+
+ // Ensure that the new, higher, gain is achieved gradually over one frame.
+ PopulateBuffer(audio_buffer);
+
+ scaler.Process(audio_buffer);
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames() - 1; ++i) {
+ EXPECT_LT(audio_buffer.channels_const()[ch][i],
+ higher_gain * SampleValueForChannel(ch));
+ EXPECT_LE(audio_buffer.channels_const()[ch][i],
+ audio_buffer.channels_const()[ch][i + 1]);
+ }
+ EXPECT_LE(audio_buffer.channels_const()[ch][audio_buffer.num_frames() - 1],
+ higher_gain * SampleValueForChannel(ch));
+ }
+
+ // Ensure that the new, higher, gain is achieved and stay unchanged.
+ for (int frame = 0; frame < kNumFramesToProcess; ++frame) {
+ PopulateBuffer(audio_buffer);
+ scaler.Process(audio_buffer);
+
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ higher_gain * SampleValueForChannel(ch));
+ }
+ }
+ }
+
+ // Set the new, lower, gain.
+ scaler.SetGain(lower_gain);
+
+ // Ensure that the new, lower, gain is achieved gradually over one frame.
+ PopulateBuffer(audio_buffer);
+ scaler.Process(audio_buffer);
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames() - 1; ++i) {
+ EXPECT_GT(audio_buffer.channels_const()[ch][i],
+ lower_gain * SampleValueForChannel(ch));
+ EXPECT_GE(audio_buffer.channels_const()[ch][i],
+ audio_buffer.channels_const()[ch][i + 1]);
+ }
+ EXPECT_GE(audio_buffer.channels_const()[ch][audio_buffer.num_frames() - 1],
+ lower_gain * SampleValueForChannel(ch));
+ }
+
+ // Ensure that the new, lower, gain is achieved and stay unchanged.
+ for (int frame = 0; frame < kNumFramesToProcess; ++frame) {
+ PopulateBuffer(audio_buffer);
+ scaler.Process(audio_buffer);
+
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ lower_gain * SampleValueForChannel(ch));
+ }
+ }
+ }
+}
+
+TEST(AudioSamplesScaler, UpwardsClamping) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kNumChannels = 1;
+ constexpr float kGain = 10.f;
+ constexpr float kMaxClampedSampleValue = 32767.f;
+ static_assert(kGain > 1.f, "");
+
+ AudioSamplesScaler scaler(kGain);
+
+ AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz,
+ kNumChannels, kSampleRateHz, kNumChannels);
+
+ for (int frame = 0; frame < kNumFramesToProcess; ++frame) {
+ for (size_t ch = 0; ch < audio_buffer.num_channels(); ++ch) {
+ test::FillBufferChannel(
+ kMaxClampedSampleValue - audio_buffer.num_channels() + 1.f + ch, ch,
+ audio_buffer);
+ }
+
+ scaler.Process(audio_buffer);
+ for (int ch = 0; ch < kNumChannels; ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ kMaxClampedSampleValue);
+ }
+ }
+ }
+}
+
+TEST(AudioSamplesScaler, DownwardsClamping) {
+ constexpr int kSampleRateHz = 48000;
+ constexpr int kNumChannels = 1;
+ constexpr float kGain = 10.f;
+ constexpr float kMinClampedSampleValue = -32768.f;
+ static_assert(kGain > 1.f, "");
+
+ AudioSamplesScaler scaler(kGain);
+
+ AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz,
+ kNumChannels, kSampleRateHz, kNumChannels);
+
+ for (int frame = 0; frame < kNumFramesToProcess; ++frame) {
+ for (size_t ch = 0; ch < audio_buffer.num_channels(); ++ch) {
+ test::FillBufferChannel(
+ kMinClampedSampleValue + audio_buffer.num_channels() - 1.f + ch, ch,
+ audio_buffer);
+ }
+
+ scaler.Process(audio_buffer);
+ for (int ch = 0; ch < kNumChannels; ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ kMinClampedSampleValue);
+ }
+ }
+ }
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.cc b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.cc
new file mode 100644
index 0000000000..dfda582915
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h"
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kMinAnalogMicGainLevel = 0;
+constexpr int kMaxAnalogMicGainLevel = 255;
+
+float ComputeLevelBasedGain(int emulated_analog_mic_gain_level) {
+ static_assert(
+ kMinAnalogMicGainLevel == 0,
+ "The minimum gain level must be 0 for the maths below to work.");
+ static_assert(kMaxAnalogMicGainLevel > 0,
+ "The minimum gain level must be larger than 0 for the maths "
+ "below to work.");
+ constexpr float kGainToLevelMultiplier = 1.f / kMaxAnalogMicGainLevel;
+
+ RTC_DCHECK_GE(emulated_analog_mic_gain_level, kMinAnalogMicGainLevel);
+ RTC_DCHECK_LE(emulated_analog_mic_gain_level, kMaxAnalogMicGainLevel);
+ return kGainToLevelMultiplier * emulated_analog_mic_gain_level;
+}
+
+float ComputePreGain(float pre_gain,
+ int emulated_analog_mic_gain_level,
+ bool emulated_analog_mic_gain_enabled) {
+ return emulated_analog_mic_gain_enabled
+ ? pre_gain * ComputeLevelBasedGain(emulated_analog_mic_gain_level)
+ : pre_gain;
+}
+
+} // namespace
+
+CaptureLevelsAdjuster::CaptureLevelsAdjuster(
+ bool emulated_analog_mic_gain_enabled,
+ int emulated_analog_mic_gain_level,
+ float pre_gain,
+ float post_gain)
+ : emulated_analog_mic_gain_enabled_(emulated_analog_mic_gain_enabled),
+ emulated_analog_mic_gain_level_(emulated_analog_mic_gain_level),
+ pre_gain_(pre_gain),
+ pre_adjustment_gain_(ComputePreGain(pre_gain_,
+ emulated_analog_mic_gain_level_,
+ emulated_analog_mic_gain_enabled_)),
+ pre_scaler_(pre_adjustment_gain_),
+ post_scaler_(post_gain) {}
+
+void CaptureLevelsAdjuster::ApplyPreLevelAdjustment(AudioBuffer& audio_buffer) {
+ pre_scaler_.Process(audio_buffer);
+}
+
+void CaptureLevelsAdjuster::ApplyPostLevelAdjustment(
+ AudioBuffer& audio_buffer) {
+ post_scaler_.Process(audio_buffer);
+}
+
+void CaptureLevelsAdjuster::SetPreGain(float pre_gain) {
+ pre_gain_ = pre_gain;
+ UpdatePreAdjustmentGain();
+}
+
+void CaptureLevelsAdjuster::SetPostGain(float post_gain) {
+ post_scaler_.SetGain(post_gain);
+}
+
+void CaptureLevelsAdjuster::SetAnalogMicGainLevel(int level) {
+ RTC_DCHECK_GE(level, kMinAnalogMicGainLevel);
+ RTC_DCHECK_LE(level, kMaxAnalogMicGainLevel);
+ int clamped_level =
+ rtc::SafeClamp(level, kMinAnalogMicGainLevel, kMaxAnalogMicGainLevel);
+
+ emulated_analog_mic_gain_level_ = clamped_level;
+ UpdatePreAdjustmentGain();
+}
+
+void CaptureLevelsAdjuster::UpdatePreAdjustmentGain() {
+ pre_adjustment_gain_ =
+ ComputePreGain(pre_gain_, emulated_analog_mic_gain_level_,
+ emulated_analog_mic_gain_enabled_);
+ pre_scaler_.SetGain(pre_adjustment_gain_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h
new file mode 100644
index 0000000000..38b68ad06c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_AUDIO_PROCESSING_CAPTURE_LEVELS_ADJUSTER_CAPTURE_LEVELS_ADJUSTER_H_
+#define MODULES_AUDIO_PROCESSING_CAPTURE_LEVELS_ADJUSTER_CAPTURE_LEVELS_ADJUSTER_H_
+
+#include <stddef.h>
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.h"
+
+namespace webrtc {
+
+// Adjusts the level of the capture signal before and after all capture-side
+// processing is done using a combination of explicitly specified gains
+// and an emulated analog gain functionality where a specified analog level
+// results in an additional gain. The pre-adjustment is achieved by combining
+// the gain value `pre_gain` and the level `emulated_analog_mic_gain_level` to
+// form a combined gain of `pre_gain`*`emulated_analog_mic_gain_level`/255 which
+// is multiplied to each sample. The intention of the
+// `emulated_analog_mic_gain_level` is to be controlled by the analog AGC
+// functionality and to produce an emulated analog mic gain equal to
+// `emulated_analog_mic_gain_level`/255. The post level adjustment is achieved
+// by multiplying each sample with the value of `post_gain`. Any changes in the
+// gains take are done smoothly over one frame and the scaled samples are
+// clamped to fit into the allowed S16 sample range.
+class CaptureLevelsAdjuster {
+ public:
+ // C-tor. The values for the level and the gains must fulfill
+ // 0 <= emulated_analog_mic_gain_level <= 255.
+ // 0.f <= pre_gain.
+ // 0.f <= post_gain.
+ CaptureLevelsAdjuster(bool emulated_analog_mic_gain_enabled,
+ int emulated_analog_mic_gain_level,
+ float pre_gain,
+ float post_gain);
+ CaptureLevelsAdjuster(const CaptureLevelsAdjuster&) = delete;
+ CaptureLevelsAdjuster& operator=(const CaptureLevelsAdjuster&) = delete;
+
+ // Adjusts the level of the signal. This should be called before any of the
+ // other processing is performed.
+ void ApplyPreLevelAdjustment(AudioBuffer& audio_buffer);
+
+ // Adjusts the level of the signal. This should be called after all of the
+ // other processing have been performed.
+ void ApplyPostLevelAdjustment(AudioBuffer& audio_buffer);
+
+ // Sets the gain to apply to each sample before any of the other processing is
+ // performed.
+ void SetPreGain(float pre_gain);
+
+ // Returns the total pre-adjustment gain applied, comprising both the pre_gain
+ // as well as the gain from the emulated analog mic, to each sample before any
+ // of the other processing is performed.
+ float GetPreAdjustmentGain() const { return pre_adjustment_gain_; }
+
+ // Sets the gain to apply to each sample after all of the other processing
+ // have been performed.
+ void SetPostGain(float post_gain);
+
+ // Sets the analog gain level to use for the emulated analog gain.
+ // `level` must be in the range [0...255].
+ void SetAnalogMicGainLevel(int level);
+
+ // Returns the current analog gain level used for the emulated analog gain.
+ int GetAnalogMicGainLevel() const { return emulated_analog_mic_gain_level_; }
+
+ private:
+ // Updates the value of `pre_adjustment_gain_` based on the supplied values
+ // for `pre_gain` and `emulated_analog_mic_gain_level_`.
+ void UpdatePreAdjustmentGain();
+
+ const bool emulated_analog_mic_gain_enabled_;
+ int emulated_analog_mic_gain_level_;
+ float pre_gain_;
+ float pre_adjustment_gain_;
+ AudioSamplesScaler pre_scaler_;
+ AudioSamplesScaler post_scaler_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_CAPTURE_LEVELS_ADJUSTER_CAPTURE_LEVELS_ADJUSTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_gn/moz.build
new file mode 100644
index 0000000000..ded0d721d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.cc",
+ "/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("capture_levels_adjuster_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_unittest.cc b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_unittest.cc
new file mode 100644
index 0000000000..1183441a14
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster_unittest.cc
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h"
+
+#include <algorithm>
+#include <tuple>
+
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+float SampleValueForChannel(int channel) {
+ constexpr float kSampleBaseValue = 100.f;
+ constexpr float kSampleChannelOffset = 1.f;
+ return kSampleBaseValue + channel * kSampleChannelOffset;
+}
+
+void PopulateBuffer(AudioBuffer& audio_buffer) {
+ for (size_t ch = 0; ch < audio_buffer.num_channels(); ++ch) {
+ test::FillBufferChannel(SampleValueForChannel(ch), ch, audio_buffer);
+ }
+}
+
+float ComputeExpectedSignalGainAfterApplyPreLevelAdjustment(
+ bool emulated_analog_mic_gain_enabled,
+ int emulated_analog_mic_gain_level,
+ float pre_gain) {
+ if (!emulated_analog_mic_gain_enabled) {
+ return pre_gain;
+ }
+ return pre_gain * std::min(emulated_analog_mic_gain_level, 255) / 255.f;
+}
+
+float ComputeExpectedSignalGainAfterApplyPostLevelAdjustment(
+ bool emulated_analog_mic_gain_enabled,
+ int emulated_analog_mic_gain_level,
+ float pre_gain,
+ float post_gain) {
+ return post_gain * ComputeExpectedSignalGainAfterApplyPreLevelAdjustment(
+ emulated_analog_mic_gain_enabled,
+ emulated_analog_mic_gain_level, pre_gain);
+}
+
+constexpr int kNumFramesToProcess = 10;
+
+class CaptureLevelsAdjusterTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<
+ std::tuple<int, int, bool, int, float, float>> {
+ protected:
+ int sample_rate_hz() const { return std::get<0>(GetParam()); }
+ int num_channels() const { return std::get<1>(GetParam()); }
+ bool emulated_analog_mic_gain_enabled() const {
+ return std::get<2>(GetParam());
+ }
+ int emulated_analog_mic_gain_level() const { return std::get<3>(GetParam()); }
+ float pre_gain() const { return std::get<4>(GetParam()); }
+ float post_gain() const { return std::get<5>(GetParam()); }
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ CaptureLevelsAdjusterTestSuite,
+ CaptureLevelsAdjusterTest,
+ ::testing::Combine(::testing::Values(16000, 32000, 48000),
+ ::testing::Values(1, 2, 4),
+ ::testing::Values(false, true),
+ ::testing::Values(21, 255),
+ ::testing::Values(0.1f, 1.f, 4.f),
+ ::testing::Values(0.1f, 1.f, 4.f)));
+
+TEST_P(CaptureLevelsAdjusterTest, InitialGainIsInstantlyAchieved) {
+ CaptureLevelsAdjuster adjuster(emulated_analog_mic_gain_enabled(),
+ emulated_analog_mic_gain_level(), pre_gain(),
+ post_gain());
+
+ AudioBuffer audio_buffer(sample_rate_hz(), num_channels(), sample_rate_hz(),
+ num_channels(), sample_rate_hz(), num_channels());
+
+ const float expected_signal_gain_after_pre_gain =
+ ComputeExpectedSignalGainAfterApplyPreLevelAdjustment(
+ emulated_analog_mic_gain_enabled(), emulated_analog_mic_gain_level(),
+ pre_gain());
+ const float expected_signal_gain_after_post_level_adjustment =
+ ComputeExpectedSignalGainAfterApplyPostLevelAdjustment(
+ emulated_analog_mic_gain_enabled(), emulated_analog_mic_gain_level(),
+ pre_gain(), post_gain());
+
+ for (int frame = 0; frame < kNumFramesToProcess; ++frame) {
+ PopulateBuffer(audio_buffer);
+ adjuster.ApplyPreLevelAdjustment(audio_buffer);
+ EXPECT_FLOAT_EQ(adjuster.GetPreAdjustmentGain(),
+ expected_signal_gain_after_pre_gain);
+
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(
+ audio_buffer.channels_const()[ch][i],
+ expected_signal_gain_after_pre_gain * SampleValueForChannel(ch));
+ }
+ }
+ adjuster.ApplyPostLevelAdjustment(audio_buffer);
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ expected_signal_gain_after_post_level_adjustment *
+ SampleValueForChannel(ch));
+ }
+ }
+ }
+}
+
+TEST_P(CaptureLevelsAdjusterTest, NewGainsAreAchieved) {
+ const int lower_emulated_analog_mic_gain_level =
+ emulated_analog_mic_gain_level();
+ const float lower_pre_gain = pre_gain();
+ const float lower_post_gain = post_gain();
+ const int higher_emulated_analog_mic_gain_level =
+ std::min(lower_emulated_analog_mic_gain_level * 2, 255);
+ const float higher_pre_gain = lower_pre_gain * 2.f;
+ const float higher_post_gain = lower_post_gain * 2.f;
+
+ CaptureLevelsAdjuster adjuster(emulated_analog_mic_gain_enabled(),
+ lower_emulated_analog_mic_gain_level,
+ lower_pre_gain, lower_post_gain);
+
+ AudioBuffer audio_buffer(sample_rate_hz(), num_channels(), sample_rate_hz(),
+ num_channels(), sample_rate_hz(), num_channels());
+
+ const float expected_signal_gain_after_pre_gain =
+ ComputeExpectedSignalGainAfterApplyPreLevelAdjustment(
+ emulated_analog_mic_gain_enabled(),
+ higher_emulated_analog_mic_gain_level, higher_pre_gain);
+ const float expected_signal_gain_after_post_level_adjustment =
+ ComputeExpectedSignalGainAfterApplyPostLevelAdjustment(
+ emulated_analog_mic_gain_enabled(),
+ higher_emulated_analog_mic_gain_level, higher_pre_gain,
+ higher_post_gain);
+
+ adjuster.SetPreGain(higher_pre_gain);
+ adjuster.SetPostGain(higher_post_gain);
+ adjuster.SetAnalogMicGainLevel(higher_emulated_analog_mic_gain_level);
+
+ PopulateBuffer(audio_buffer);
+ adjuster.ApplyPreLevelAdjustment(audio_buffer);
+ adjuster.ApplyPostLevelAdjustment(audio_buffer);
+ EXPECT_EQ(adjuster.GetAnalogMicGainLevel(),
+ higher_emulated_analog_mic_gain_level);
+
+ for (int frame = 1; frame < kNumFramesToProcess; ++frame) {
+ PopulateBuffer(audio_buffer);
+ adjuster.ApplyPreLevelAdjustment(audio_buffer);
+ EXPECT_FLOAT_EQ(adjuster.GetPreAdjustmentGain(),
+ expected_signal_gain_after_pre_gain);
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(
+ audio_buffer.channels_const()[ch][i],
+ expected_signal_gain_after_pre_gain * SampleValueForChannel(ch));
+ }
+ }
+
+ adjuster.ApplyPostLevelAdjustment(audio_buffer);
+ for (int ch = 0; ch < num_channels(); ++ch) {
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[ch][i],
+ expected_signal_gain_after_post_level_adjustment *
+ SampleValueForChannel(ch));
+ }
+ }
+
+ EXPECT_EQ(adjuster.GetAnalogMicGainLevel(),
+ higher_emulated_analog_mic_gain_level);
+ }
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/debug.proto b/third_party/libwebrtc/modules/audio_processing/debug.proto
new file mode 100644
index 0000000000..4bc1a52160
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/debug.proto
@@ -0,0 +1,115 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+// Contains the format of input/output/reverse audio. An Init message is added
+// when any of the fields are changed.
+message Init {
+ optional int32 sample_rate = 1;
+ optional int32 device_sample_rate = 2 [deprecated=true];
+ optional int32 num_input_channels = 3;
+ optional int32 num_output_channels = 4;
+ optional int32 num_reverse_channels = 5;
+ optional int32 reverse_sample_rate = 6;
+ optional int32 output_sample_rate = 7;
+ optional int32 reverse_output_sample_rate = 8;
+ optional int32 num_reverse_output_channels = 9;
+ optional int64 timestamp_ms = 10;
+}
+
+// May contain interleaved or deinterleaved data, but don't store both formats.
+message ReverseStream {
+ // int16 interleaved data.
+ optional bytes data = 1;
+
+ // float deinterleaved data, where each repeated element points to a single
+ // channel buffer of data.
+ repeated bytes channel = 2;
+}
+
+// May contain interleaved or deinterleaved data, but don't store both formats.
+message Stream {
+ // int16 interleaved data.
+ optional bytes input_data = 1;
+ optional bytes output_data = 2;
+
+ optional int32 delay = 3;
+ optional sint32 drift = 4;
+ optional int32 level = 5;
+ optional bool keypress = 6;
+
+ // float deinterleaved data, where each repeated element points to a single
+ // channel buffer of data.
+ repeated bytes input_channel = 7;
+ repeated bytes output_channel = 8;
+}
+
+// Contains the configurations of various APM component. A Config message is
+// added when any of the fields are changed.
+message Config {
+ // Acoustic echo canceler.
+ optional bool aec_enabled = 1;
+ optional bool aec_delay_agnostic_enabled = 2;
+ optional bool aec_drift_compensation_enabled = 3;
+ optional bool aec_extended_filter_enabled = 4;
+ optional int32 aec_suppression_level = 5;
+ // Mobile AEC.
+ optional bool aecm_enabled = 6;
+ optional bool aecm_comfort_noise_enabled = 7 [deprecated = true];
+ optional int32 aecm_routing_mode = 8 [deprecated = true];
+ // Automatic gain controller.
+ optional bool agc_enabled = 9;
+ optional int32 agc_mode = 10;
+ optional bool agc_limiter_enabled = 11;
+ optional bool noise_robust_agc_enabled = 12;
+ // High pass filter.
+ optional bool hpf_enabled = 13;
+ // Noise suppression.
+ optional bool ns_enabled = 14;
+ optional int32 ns_level = 15;
+ // Transient suppression.
+ optional bool transient_suppression_enabled = 16;
+ // Semicolon-separated string containing experimental feature
+ // descriptions.
+ optional string experiments_description = 17;
+ reserved 18; // Intelligibility enhancer enabled (deprecated).
+ // Pre amplifier.
+ optional bool pre_amplifier_enabled = 19;
+ optional float pre_amplifier_fixed_gain_factor = 20;
+
+ // Next field number 21.
+}
+
+message PlayoutAudioDeviceInfo {
+ optional int32 id = 1;
+ optional int32 max_volume = 2;
+}
+
+message RuntimeSetting {
+ optional float capture_pre_gain = 1;
+ optional float custom_render_processing_setting = 2;
+ optional float capture_fixed_post_gain = 3;
+ optional int32 playout_volume_change = 4;
+ optional PlayoutAudioDeviceInfo playout_audio_device_change = 5;
+ optional bool capture_output_used = 6;
+ optional float capture_post_gain = 7;
+}
+
+message Event {
+ enum Type {
+ INIT = 0;
+ REVERSE_STREAM = 1;
+ STREAM = 2;
+ CONFIG = 3;
+ UNKNOWN_EVENT = 4;
+ RUNTIME_SETTING = 5;
+ }
+
+ required Type type = 1;
+
+ optional Init init = 2;
+ optional ReverseStream reverse_stream = 3;
+ optional Stream stream = 4;
+ optional Config config = 5;
+ optional RuntimeSetting runtime_setting = 6;
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc
new file mode 100644
index 0000000000..f351811e08
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// TODO(peah): Increase the number of frames to proces when the issue of
+// non repeatable test results have been found.
+const int kNumFramesToProcess = 200;
+
+void SetupComponent(int sample_rate_hz,
+ EchoControlMobileImpl::RoutingMode routing_mode,
+ bool comfort_noise_enabled,
+ EchoControlMobileImpl* echo_control_mobile) {
+ echo_control_mobile->Initialize(
+ sample_rate_hz > 16000 ? 16000 : sample_rate_hz, 1, 1);
+ echo_control_mobile->set_routing_mode(routing_mode);
+ echo_control_mobile->enable_comfort_noise(comfort_noise_enabled);
+}
+
+void ProcessOneFrame(int sample_rate_hz,
+ int stream_delay_ms,
+ AudioBuffer* render_audio_buffer,
+ AudioBuffer* capture_audio_buffer,
+ EchoControlMobileImpl* echo_control_mobile) {
+ if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+ render_audio_buffer->SplitIntoFrequencyBands();
+ capture_audio_buffer->SplitIntoFrequencyBands();
+ }
+
+ std::vector<int16_t> render_audio;
+ EchoControlMobileImpl::PackRenderAudioBuffer(
+ render_audio_buffer, 1, render_audio_buffer->num_channels(),
+ &render_audio);
+ echo_control_mobile->ProcessRenderAudio(render_audio);
+
+ echo_control_mobile->ProcessCaptureAudio(capture_audio_buffer,
+ stream_delay_ms);
+
+ if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+ capture_audio_buffer->MergeFrequencyBands();
+ }
+}
+
+void RunBitexactnessTest(int sample_rate_hz,
+ size_t num_channels,
+ int stream_delay_ms,
+ EchoControlMobileImpl::RoutingMode routing_mode,
+ bool comfort_noise_enabled,
+ const rtc::ArrayView<const float>& output_reference) {
+ EchoControlMobileImpl echo_control_mobile;
+ SetupComponent(sample_rate_hz, routing_mode, comfort_noise_enabled,
+ &echo_control_mobile);
+
+ const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+ const StreamConfig render_config(sample_rate_hz, num_channels);
+ AudioBuffer render_buffer(
+ render_config.sample_rate_hz(), render_config.num_channels(),
+ render_config.sample_rate_hz(), 1, render_config.sample_rate_hz(), 1);
+ test::InputAudioFile render_file(
+ test::GetApmRenderTestVectorFileName(sample_rate_hz));
+ std::vector<float> render_input(samples_per_channel * num_channels);
+
+ const StreamConfig capture_config(sample_rate_hz, num_channels);
+ AudioBuffer capture_buffer(
+ capture_config.sample_rate_hz(), capture_config.num_channels(),
+ capture_config.sample_rate_hz(), 1, capture_config.sample_rate_hz(), 1);
+ test::InputAudioFile capture_file(
+ test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+ std::vector<float> capture_input(samples_per_channel * num_channels);
+
+ for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+ ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+ &render_file, render_input);
+ ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+ &capture_file, capture_input);
+
+ test::CopyVectorToAudioBuffer(render_config, render_input, &render_buffer);
+ test::CopyVectorToAudioBuffer(capture_config, capture_input,
+ &capture_buffer);
+
+ ProcessOneFrame(sample_rate_hz, stream_delay_ms, &render_buffer,
+ &capture_buffer, &echo_control_mobile);
+ }
+
+ // Extract and verify the test results.
+ std::vector<float> capture_output;
+ test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+ &capture_output);
+
+ // Compare the output with the reference. Only the first values of the output
+ // from last frame processed are compared in order not having to specify all
+ // preceeding frames as testvectors. As the algorithm being tested has a
+ // memory, testing only the last frame implicitly also tests the preceeding
+ // frames.
+ const float kElementErrorBound = 1.0f / 32768.0f;
+ EXPECT_TRUE(test::VerifyDeinterleavedArray(
+ capture_config.num_frames(), capture_config.num_channels(),
+ output_reference, capture_output, kElementErrorBound));
+}
+
+} // namespace
+
+// TODO(peah): Renable once the integer overflow issue in aecm_core.c:932:69
+// has been solved.
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono8kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.005280f, 0.002380f, -0.000427f};
+
+ RunBitexactnessTest(8000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.003601f, 0.002991f, 0.001923f};
+ RunBitexactnessTest(16000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono32kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.002258f, 0.002899f, 0.003906f};
+
+ RunBitexactnessTest(32000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono48kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {-0.000046f, 0.000041f, 0.000249f};
+
+ RunBitexactnessTest(48000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_LoudSpeakerPhone_CngOff_StreamDelay0) {
+ const float kOutputReference[] = {0.000000f, 0.000000f, 0.000000f};
+
+ RunBitexactnessTest(16000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ false, kOutputReference);
+}
+
+// TODO(peah): Renable once the integer overflow issue in aecm_core.c:932:69
+// has been solved.
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_LoudSpeakerPhone_CngOn_StreamDelay5) {
+ const float kOutputReference[] = {0.003693f, 0.002930f, 0.001801f};
+
+ RunBitexactnessTest(16000, 1, 5,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ Mono16kHz_LoudSpeakerPhone_CngOn_StreamDelay10) {
+ const float kOutputReference[] = {-0.002380f, -0.002533f, -0.002563f};
+
+ RunBitexactnessTest(16000, 1, 10,
+ EchoControlMobileImpl::RoutingMode::kLoudSpeakerphone,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_QuietEarpieceOrHeadset_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.000397f, 0.000000f, -0.000305f};
+
+ RunBitexactnessTest(
+ 16000, 1, 0, EchoControlMobileImpl::RoutingMode::kQuietEarpieceOrHeadset,
+ true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_Earpiece_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.002167f, 0.001617f, 0.001038f};
+
+ RunBitexactnessTest(16000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kEarpiece, true,
+ kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_LoudEarpiece_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.003540f, 0.002899f, 0.001862f};
+
+ RunBitexactnessTest(16000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kLoudEarpiece, true,
+ kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+ DISABLED_Mono16kHz_SpeakerPhone_CngOn_StreamDelay0) {
+ const float kOutputReference[] = {0.003632f, 0.003052f, 0.001984f};
+
+ RunBitexactnessTest(16000, 1, 0,
+ EchoControlMobileImpl::RoutingMode::kSpeakerphone, true,
+ kOutputReference);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.cc b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.cc
new file mode 100644
index 0000000000..fa5cb8ffec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+int16_t MapSetting(EchoControlMobileImpl::RoutingMode mode) {
+ switch (mode) {
+ case EchoControlMobileImpl::kQuietEarpieceOrHeadset:
+ return 0;
+ case EchoControlMobileImpl::kEarpiece:
+ return 1;
+ case EchoControlMobileImpl::kLoudEarpiece:
+ return 2;
+ case EchoControlMobileImpl::kSpeakerphone:
+ return 3;
+ case EchoControlMobileImpl::kLoudSpeakerphone:
+ return 4;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+}
+
+AudioProcessing::Error MapError(int err) {
+ switch (err) {
+ case AECM_UNSUPPORTED_FUNCTION_ERROR:
+ return AudioProcessing::kUnsupportedFunctionError;
+ case AECM_NULL_POINTER_ERROR:
+ return AudioProcessing::kNullPointerError;
+ case AECM_BAD_PARAMETER_ERROR:
+ return AudioProcessing::kBadParameterError;
+ case AECM_BAD_PARAMETER_WARNING:
+ return AudioProcessing::kBadStreamParameterWarning;
+ default:
+ // AECM_UNSPECIFIED_ERROR
+ // AECM_UNINITIALIZED_ERROR
+ return AudioProcessing::kUnspecifiedError;
+ }
+}
+
+} // namespace
+
+struct EchoControlMobileImpl::StreamProperties {
+ StreamProperties() = delete;
+ StreamProperties(int sample_rate_hz,
+ size_t num_reverse_channels,
+ size_t num_output_channels)
+ : sample_rate_hz(sample_rate_hz),
+ num_reverse_channels(num_reverse_channels),
+ num_output_channels(num_output_channels) {}
+
+ int sample_rate_hz;
+ size_t num_reverse_channels;
+ size_t num_output_channels;
+};
+
+class EchoControlMobileImpl::Canceller {
+ public:
+ Canceller() {
+ state_ = WebRtcAecm_Create();
+ RTC_CHECK(state_);
+ }
+
+ ~Canceller() {
+ RTC_DCHECK(state_);
+ WebRtcAecm_Free(state_);
+ }
+
+ Canceller(const Canceller&) = delete;
+ Canceller& operator=(const Canceller&) = delete;
+
+ void* state() {
+ RTC_DCHECK(state_);
+ return state_;
+ }
+
+ void Initialize(int sample_rate_hz) {
+ RTC_DCHECK(state_);
+ int error = WebRtcAecm_Init(state_, sample_rate_hz);
+ RTC_DCHECK_EQ(AudioProcessing::kNoError, error);
+ }
+
+ private:
+ void* state_;
+};
+
+EchoControlMobileImpl::EchoControlMobileImpl()
+ : routing_mode_(kSpeakerphone), comfort_noise_enabled_(false) {}
+
+EchoControlMobileImpl::~EchoControlMobileImpl() {}
+
+void EchoControlMobileImpl::ProcessRenderAudio(
+ rtc::ArrayView<const int16_t> packed_render_audio) {
+ RTC_DCHECK(stream_properties_);
+
+ size_t buffer_index = 0;
+ size_t num_frames_per_band =
+ packed_render_audio.size() / (stream_properties_->num_output_channels *
+ stream_properties_->num_reverse_channels);
+
+ for (auto& canceller : cancellers_) {
+ WebRtcAecm_BufferFarend(canceller->state(),
+ &packed_render_audio[buffer_index],
+ num_frames_per_band);
+
+ buffer_index += num_frames_per_band;
+ }
+}
+
+void EchoControlMobileImpl::PackRenderAudioBuffer(
+ const AudioBuffer* audio,
+ size_t num_output_channels,
+ size_t num_channels,
+ std::vector<int16_t>* packed_buffer) {
+ RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength,
+ audio->num_frames_per_band());
+ RTC_DCHECK_EQ(num_channels, audio->num_channels());
+
+ // The ordering convention must be followed to pass to the correct AECM.
+ packed_buffer->clear();
+ int render_channel = 0;
+ for (size_t i = 0; i < num_output_channels; i++) {
+ for (size_t j = 0; j < audio->num_channels(); j++) {
+ std::array<int16_t, AudioBuffer::kMaxSplitFrameLength> data_to_buffer;
+ FloatS16ToS16(audio->split_bands_const(render_channel)[kBand0To8kHz],
+ audio->num_frames_per_band(), data_to_buffer.data());
+
+ // Buffer the samples in the render queue.
+ packed_buffer->insert(
+ packed_buffer->end(), data_to_buffer.data(),
+ data_to_buffer.data() + audio->num_frames_per_band());
+ render_channel = (render_channel + 1) % audio->num_channels();
+ }
+ }
+}
+
+size_t EchoControlMobileImpl::NumCancellersRequired(
+ size_t num_output_channels,
+ size_t num_reverse_channels) {
+ return num_output_channels * num_reverse_channels;
+}
+
+int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio,
+ int stream_delay_ms) {
+ RTC_DCHECK(stream_properties_);
+ RTC_DCHECK_GE(160, audio->num_frames_per_band());
+ RTC_DCHECK_EQ(audio->num_channels(), stream_properties_->num_output_channels);
+ RTC_DCHECK_GE(cancellers_.size(), stream_properties_->num_reverse_channels *
+ audio->num_channels());
+
+ int err = AudioProcessing::kNoError;
+
+ // The ordering convention must be followed to pass to the correct AECM.
+ size_t handle_index = 0;
+ for (size_t capture = 0; capture < audio->num_channels(); ++capture) {
+ // TODO(ajm): improve how this works, possibly inside AECM.
+ // This is kind of hacked up.
+ RTC_DCHECK_LT(capture, low_pass_reference_.size());
+ const int16_t* noisy =
+ reference_copied_ ? low_pass_reference_[capture].data() : nullptr;
+
+ RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength,
+ audio->num_frames_per_band());
+
+ std::array<int16_t, AudioBuffer::kMaxSplitFrameLength> split_bands_data;
+ int16_t* split_bands = split_bands_data.data();
+ const int16_t* clean = split_bands_data.data();
+ if (audio->split_bands(capture)[kBand0To8kHz]) {
+ FloatS16ToS16(audio->split_bands(capture)[kBand0To8kHz],
+ audio->num_frames_per_band(), split_bands_data.data());
+ } else {
+ clean = nullptr;
+ split_bands = nullptr;
+ }
+
+ if (noisy == NULL) {
+ noisy = clean;
+ clean = NULL;
+ }
+ for (size_t render = 0; render < stream_properties_->num_reverse_channels;
+ ++render) {
+ err = WebRtcAecm_Process(cancellers_[handle_index]->state(), noisy, clean,
+ split_bands, audio->num_frames_per_band(),
+ stream_delay_ms);
+
+ if (split_bands) {
+ S16ToFloatS16(split_bands, audio->num_frames_per_band(),
+ audio->split_bands(capture)[kBand0To8kHz]);
+ }
+
+ if (err != AudioProcessing::kNoError) {
+ return MapError(err);
+ }
+
+ ++handle_index;
+ }
+ for (size_t band = 1u; band < audio->num_bands(); ++band) {
+ memset(audio->split_bands_f(capture)[band], 0,
+ audio->num_frames_per_band() *
+ sizeof(audio->split_bands_f(capture)[band][0]));
+ }
+ }
+ return AudioProcessing::kNoError;
+}
+
+int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
+ if (MapSetting(mode) == -1) {
+ return AudioProcessing::kBadParameterError;
+ }
+ routing_mode_ = mode;
+ return Configure();
+}
+
+EchoControlMobileImpl::RoutingMode EchoControlMobileImpl::routing_mode() const {
+ return routing_mode_;
+}
+
+int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
+ comfort_noise_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
+ return comfort_noise_enabled_;
+}
+
+void EchoControlMobileImpl::Initialize(int sample_rate_hz,
+ size_t num_reverse_channels,
+ size_t num_output_channels) {
+ low_pass_reference_.resize(num_output_channels);
+ for (auto& reference : low_pass_reference_) {
+ reference.fill(0);
+ }
+
+ stream_properties_.reset(new StreamProperties(
+ sample_rate_hz, num_reverse_channels, num_output_channels));
+
+ // AECM only supports 16 kHz or lower sample rates.
+ RTC_DCHECK_LE(stream_properties_->sample_rate_hz,
+ AudioProcessing::kSampleRate16kHz);
+
+ cancellers_.resize(
+ NumCancellersRequired(stream_properties_->num_output_channels,
+ stream_properties_->num_reverse_channels));
+
+ for (auto& canceller : cancellers_) {
+ if (!canceller) {
+ canceller.reset(new Canceller());
+ }
+ canceller->Initialize(sample_rate_hz);
+ }
+ Configure();
+}
+
+int EchoControlMobileImpl::Configure() {
+ AecmConfig config;
+ config.cngMode = comfort_noise_enabled_;
+ config.echoMode = MapSetting(routing_mode_);
+ int error = AudioProcessing::kNoError;
+ for (auto& canceller : cancellers_) {
+ int handle_error = WebRtcAecm_set_config(canceller->state(), config);
+ if (handle_error != AudioProcessing::kNoError) {
+ error = handle_error;
+ }
+ }
+ return error;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.h b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.h
new file mode 100644
index 0000000000..f7f2626a0e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_impl.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+// The acoustic echo control for mobile (AECM) component is a low complexity
+// robust option intended for use on mobile devices.
+class EchoControlMobileImpl {
+ public:
+ EchoControlMobileImpl();
+
+ ~EchoControlMobileImpl();
+
+ // Recommended settings for particular audio routes. In general, the louder
+ // the echo is expected to be, the higher this value should be set. The
+ // preferred setting may vary from device to device.
+ enum RoutingMode {
+ kQuietEarpieceOrHeadset,
+ kEarpiece,
+ kLoudEarpiece,
+ kSpeakerphone,
+ kLoudSpeakerphone
+ };
+
+ // Sets echo control appropriate for the audio routing `mode` on the device.
+ // It can and should be updated during a call if the audio routing changes.
+ int set_routing_mode(RoutingMode mode);
+ RoutingMode routing_mode() const;
+
+ // Comfort noise replaces suppressed background noise to maintain a
+ // consistent signal level.
+ int enable_comfort_noise(bool enable);
+ bool is_comfort_noise_enabled() const;
+
+ void ProcessRenderAudio(rtc::ArrayView<const int16_t> packed_render_audio);
+ int ProcessCaptureAudio(AudioBuffer* audio, int stream_delay_ms);
+
+ void Initialize(int sample_rate_hz,
+ size_t num_reverse_channels,
+ size_t num_output_channels);
+
+ static void PackRenderAudioBuffer(const AudioBuffer* audio,
+ size_t num_output_channels,
+ size_t num_channels,
+ std::vector<int16_t>* packed_buffer);
+
+ static size_t NumCancellersRequired(size_t num_output_channels,
+ size_t num_reverse_channels);
+
+ private:
+ class Canceller;
+ struct StreamProperties;
+
+ int Configure();
+
+ RoutingMode routing_mode_;
+ bool comfort_noise_enabled_;
+
+ std::vector<std::unique_ptr<Canceller>> cancellers_;
+ std::unique_ptr<StreamProperties> stream_properties_;
+ std::vector<std::array<int16_t, 160>> low_pass_reference_;
+ bool reference_copied_ = false;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_unittest.cc b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_unittest.cc
new file mode 100644
index 0000000000..ed0393043c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_control_mobile_unittest.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+TEST(EchoControlMobileTest, InterfaceConfiguration) {
+ EchoControlMobileImpl aecm;
+ aecm.Initialize(AudioProcessing::kSampleRate16kHz, 2, 2);
+
+ // Toggle routing modes
+ std::array<EchoControlMobileImpl::RoutingMode, 5> routing_modes = {
+ EchoControlMobileImpl::kQuietEarpieceOrHeadset,
+ EchoControlMobileImpl::kEarpiece,
+ EchoControlMobileImpl::kLoudEarpiece,
+ EchoControlMobileImpl::kSpeakerphone,
+ EchoControlMobileImpl::kLoudSpeakerphone,
+ };
+ for (auto mode : routing_modes) {
+ EXPECT_EQ(0, aecm.set_routing_mode(mode));
+ EXPECT_EQ(mode, aecm.routing_mode());
+ }
+
+ // Turn comfort noise off/on
+ EXPECT_EQ(0, aecm.enable_comfort_noise(false));
+ EXPECT_FALSE(aecm.is_comfort_noise_enabled());
+ EXPECT_EQ(0, aecm.enable_comfort_noise(true));
+ EXPECT_TRUE(aecm.is_comfort_noise_enabled());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.cc
new file mode 100644
index 0000000000..a6d10edfe2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/circular_buffer.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+CircularBuffer::CircularBuffer(size_t size) : buffer_(size) {}
+CircularBuffer::~CircularBuffer() = default;
+
+void CircularBuffer::Push(float value) {
+ buffer_[next_insertion_index_] = value;
+ ++next_insertion_index_;
+ next_insertion_index_ %= buffer_.size();
+ RTC_DCHECK_LT(next_insertion_index_, buffer_.size());
+ nr_elements_in_buffer_ = std::min(nr_elements_in_buffer_ + 1, buffer_.size());
+ RTC_DCHECK_LE(nr_elements_in_buffer_, buffer_.size());
+}
+
+absl::optional<float> CircularBuffer::Pop() {
+ if (nr_elements_in_buffer_ == 0) {
+ return absl::nullopt;
+ }
+ const size_t index =
+ (buffer_.size() + next_insertion_index_ - nr_elements_in_buffer_) %
+ buffer_.size();
+ RTC_DCHECK_LT(index, buffer_.size());
+ --nr_elements_in_buffer_;
+ return buffer_[index];
+}
+
+void CircularBuffer::Clear() {
+ std::fill(buffer_.begin(), buffer_.end(), 0.f);
+ next_insertion_index_ = 0;
+ nr_elements_in_buffer_ = 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.h b/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.h
new file mode 100644
index 0000000000..db1aeaebf6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_CIRCULAR_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_CIRCULAR_BUFFER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// Ring buffer containing floating point values.
+struct CircularBuffer {
+ public:
+ explicit CircularBuffer(size_t size);
+ ~CircularBuffer();
+
+ void Push(float value);
+ absl::optional<float> Pop();
+ size_t Size() const { return nr_elements_in_buffer_; }
+ // This function fills the buffer with zeros, but does not change its size.
+ void Clear();
+
+ private:
+ std::vector<float> buffer_;
+ size_t next_insertion_index_ = 0;
+ // This is the number of elements that have been pushed into the circular
+ // buffer, not the allocated buffer size.
+ size_t nr_elements_in_buffer_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_CIRCULAR_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer_unittest.cc
new file mode 100644
index 0000000000..7a234d4a55
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/circular_buffer_unittest.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/circular_buffer.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(CircularBufferTests, LessThanMaxTest) {
+ CircularBuffer test_buffer(3);
+ test_buffer.Push(1.f);
+ test_buffer.Push(2.f);
+ EXPECT_EQ(1.f, test_buffer.Pop());
+ EXPECT_EQ(2.f, test_buffer.Pop());
+}
+
+TEST(CircularBufferTests, FillTest) {
+ CircularBuffer test_buffer(3);
+ test_buffer.Push(1.f);
+ test_buffer.Push(2.f);
+ test_buffer.Push(3.f);
+ EXPECT_EQ(1.f, test_buffer.Pop());
+ EXPECT_EQ(2.f, test_buffer.Pop());
+ EXPECT_EQ(3.f, test_buffer.Pop());
+}
+
+TEST(CircularBufferTests, OverflowTest) {
+ CircularBuffer test_buffer(3);
+ test_buffer.Push(1.f);
+ test_buffer.Push(2.f);
+ test_buffer.Push(3.f);
+ test_buffer.Push(4.f);
+ // Because the circular buffer has a size of 3, the first insert should have
+ // been forgotten.
+ EXPECT_EQ(2.f, test_buffer.Pop());
+ EXPECT_EQ(3.f, test_buffer.Pop());
+ EXPECT_EQ(4.f, test_buffer.Pop());
+}
+
+TEST(CircularBufferTests, ReadFromEmpty) {
+ CircularBuffer test_buffer(3);
+ EXPECT_EQ(absl::nullopt, test_buffer.Pop());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.cc
new file mode 100644
index 0000000000..a9ebb8cd92
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/mean_variance_estimator.h"
+
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Parameter controlling the adaptation speed.
+constexpr float kAlpha = 0.001f;
+
+} // namespace
+
+void MeanVarianceEstimator::Update(float value) {
+ mean_ = (1.f - kAlpha) * mean_ + kAlpha * value;
+ variance_ =
+ (1.f - kAlpha) * variance_ + kAlpha * (value - mean_) * (value - mean_);
+ RTC_DCHECK(std::isfinite(mean_));
+ RTC_DCHECK(std::isfinite(variance_));
+}
+
+float MeanVarianceEstimator::std_deviation() const {
+ RTC_DCHECK_GE(variance_, 0.f);
+ return sqrtf(variance_);
+}
+
+float MeanVarianceEstimator::mean() const {
+ return mean_;
+}
+
+void MeanVarianceEstimator::Clear() {
+ mean_ = 0.f;
+ variance_ = 0.f;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.h b/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.h
new file mode 100644
index 0000000000..7f793df1e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MEAN_VARIANCE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MEAN_VARIANCE_ESTIMATOR_H_
+
+namespace webrtc {
+
+// This class iteratively estimates the mean and variance of a signal.
+class MeanVarianceEstimator {
+ public:
+ void Update(float value);
+ float std_deviation() const;
+ float mean() const;
+ void Clear();
+
+ private:
+ // Estimate of the expected value of the input values.
+ float mean_ = 0.f;
+ // Estimate of the variance of the input values.
+ float variance_ = 0.f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MEAN_VARIANCE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc
new file mode 100644
index 0000000000..8327d23e8a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc
@@ -0,0 +1,65 @@
+
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/mean_variance_estimator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MeanVarianceEstimatorTests, InsertTwoValues) {
+ MeanVarianceEstimator test_estimator;
+ // Insert two values.
+ test_estimator.Update(3.f);
+ test_estimator.Update(5.f);
+
+ EXPECT_GT(test_estimator.mean(), 0.f);
+ EXPECT_GT(test_estimator.std_deviation(), 0.f);
+ // Test Clear method
+ test_estimator.Clear();
+ EXPECT_EQ(test_estimator.mean(), 0.f);
+ EXPECT_EQ(test_estimator.std_deviation(), 0.f);
+}
+
+TEST(MeanVarianceEstimatorTests, InsertZeroes) {
+ MeanVarianceEstimator test_estimator;
+ // Insert the same value many times.
+ for (size_t i = 0; i < 20000; i++) {
+ test_estimator.Update(0.f);
+ }
+ EXPECT_EQ(test_estimator.mean(), 0.f);
+ EXPECT_EQ(test_estimator.std_deviation(), 0.f);
+}
+
+TEST(MeanVarianceEstimatorTests, ConstantValueTest) {
+ MeanVarianceEstimator test_estimator;
+ for (size_t i = 0; i < 20000; i++) {
+ test_estimator.Update(3.f);
+ }
+ // The mean should be close to three, and the standard deviation should be
+ // close to zero.
+ EXPECT_NEAR(3.0f, test_estimator.mean(), 0.01f);
+ EXPECT_NEAR(0.0f, test_estimator.std_deviation(), 0.01f);
+}
+
+TEST(MeanVarianceEstimatorTests, AlternatingValueTest) {
+ MeanVarianceEstimator test_estimator;
+ for (size_t i = 0; i < 20000; i++) {
+ test_estimator.Update(1.f);
+ test_estimator.Update(-1.f);
+ }
+ // The mean should be close to zero, and the standard deviation should be
+ // close to one.
+ EXPECT_NEAR(0.0f, test_estimator.mean(), 0.01f);
+ EXPECT_NEAR(1.0f, test_estimator.std_deviation(), 0.01f);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.cc
new file mode 100644
index 0000000000..3054e98bd3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/moving_max.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Parameter for controlling how fast the estimated maximum decays after the
+// previous maximum is no longer valid. With a value of 0.99, the maximum will
+// decay to 1% of its former value after 460 updates.
+constexpr float kDecayFactor = 0.99f;
+
+} // namespace
+
+MovingMax::MovingMax(size_t window_size) : window_size_(window_size) {
+ RTC_DCHECK_GT(window_size, 0);
+}
+
+MovingMax::~MovingMax() {}
+
+void MovingMax::Update(float value) {
+ if (counter_ >= window_size_ - 1) {
+ max_value_ *= kDecayFactor;
+ } else {
+ ++counter_;
+ }
+ if (value > max_value_) {
+ max_value_ = value;
+ counter_ = 0;
+ }
+}
+
+float MovingMax::max() const {
+ return max_value_;
+}
+
+void MovingMax::Clear() {
+ max_value_ = 0.f;
+ counter_ = 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.h b/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.h
new file mode 100644
index 0000000000..f7d8ee8137
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MOVING_MAX_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MOVING_MAX_H_
+
+#include <stddef.h>
+
+namespace webrtc {
+
+class MovingMax {
+ public:
+ explicit MovingMax(size_t window_size);
+ ~MovingMax();
+
+ void Update(float value);
+ float max() const;
+ // Reset all of the state in this class.
+ void Clear();
+
+ private:
+ float max_value_ = 0.f;
+ size_t counter_ = 0;
+ size_t window_size_ = 1;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MOVING_MAX_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max_unittest.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max_unittest.cc
new file mode 100644
index 0000000000..9429127a2b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/moving_max_unittest.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/moving_max.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Test if the maximum is correctly found.
+TEST(MovingMaxTests, SimpleTest) {
+ MovingMax test_moving_max(5);
+ test_moving_max.Update(1.0f);
+ test_moving_max.Update(1.1f);
+ test_moving_max.Update(1.9f);
+ test_moving_max.Update(1.87f);
+ test_moving_max.Update(1.89f);
+ EXPECT_EQ(1.9f, test_moving_max.max());
+}
+
+// Test if values fall out of the window when expected.
+TEST(MovingMaxTests, SlidingWindowTest) {
+ MovingMax test_moving_max(5);
+ test_moving_max.Update(1.0f);
+ test_moving_max.Update(1.9f);
+ test_moving_max.Update(1.7f);
+ test_moving_max.Update(1.87f);
+ test_moving_max.Update(1.89f);
+ test_moving_max.Update(1.3f);
+ test_moving_max.Update(1.2f);
+ EXPECT_LT(test_moving_max.max(), 1.9f);
+}
+
+// Test if Clear() works as expected.
+TEST(MovingMaxTests, ClearTest) {
+ MovingMax test_moving_max(5);
+ test_moving_max.Update(1.0f);
+ test_moving_max.Update(1.1f);
+ test_moving_max.Update(1.9f);
+ test_moving_max.Update(1.87f);
+ test_moving_max.Update(1.89f);
+ EXPECT_EQ(1.9f, test_moving_max.max());
+ test_moving_max.Clear();
+ EXPECT_EQ(0.f, test_moving_max.max());
+}
+
+// Test the decay of the estimated maximum.
+TEST(MovingMaxTests, DecayTest) {
+ MovingMax test_moving_max(1);
+ test_moving_max.Update(1.0f);
+ float previous_value = 1.0f;
+ for (int i = 0; i < 500; i++) {
+ test_moving_max.Update(0.0f);
+ EXPECT_LT(test_moving_max.max(), previous_value);
+ EXPECT_GT(test_moving_max.max(), 0.0f);
+ previous_value = test_moving_max.max();
+ }
+ EXPECT_LT(test_moving_max.max(), 0.01f);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc
new file mode 100644
index 0000000000..8ec9fe9f0b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/normalized_covariance_estimator.h"
+
+#include <math.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Parameter controlling the adaptation speed.
+constexpr float kAlpha = 0.001f;
+
+} // namespace
+
+void NormalizedCovarianceEstimator::Update(float x,
+ float x_mean,
+ float x_sigma,
+ float y,
+ float y_mean,
+ float y_sigma) {
+ covariance_ =
+ (1.f - kAlpha) * covariance_ + kAlpha * (x - x_mean) * (y - y_mean);
+ normalized_cross_correlation_ = covariance_ / (x_sigma * y_sigma + .0001f);
+ RTC_DCHECK(isfinite(covariance_));
+ RTC_DCHECK(isfinite(normalized_cross_correlation_));
+}
+
+void NormalizedCovarianceEstimator::Clear() {
+ covariance_ = 0.f;
+ normalized_cross_correlation_ = 0.f;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.h b/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.h
new file mode 100644
index 0000000000..e3c36d88ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_NORMALIZED_COVARIANCE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_NORMALIZED_COVARIANCE_ESTIMATOR_H_
+
+namespace webrtc {
+
+// This class iteratively estimates the normalized covariance between two
+// signals.
+class NormalizedCovarianceEstimator {
+ public:
+ void Update(float x,
+ float x_mean,
+ float x_var,
+ float y,
+ float y_mean,
+ float y_var);
+ // This function returns an estimate of the Pearson product-moment correlation
+ // coefficient of the two signals.
+ float normalized_cross_correlation() const {
+ return normalized_cross_correlation_;
+ }
+ float covariance() const { return covariance_; }
+ // This function resets the estimated values to zero.
+ void Clear();
+
+ private:
+ float normalized_cross_correlation_ = 0.f;
+ // Estimate of the covariance value.
+ float covariance_ = 0.f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_NORMALIZED_COVARIANCE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc
new file mode 100644
index 0000000000..89fb9383f6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/normalized_covariance_estimator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(NormalizedCovarianceEstimatorTests, IdenticalSignalTest) {
+ NormalizedCovarianceEstimator test_estimator;
+ for (size_t i = 0; i < 10000; i++) {
+ test_estimator.Update(1.f, 0.f, 1.f, 1.f, 0.f, 1.f);
+ test_estimator.Update(-1.f, 0.f, 1.f, -1.f, 0.f, 1.f);
+ }
+ // A normalized covariance value close to 1 is expected.
+ EXPECT_NEAR(1.f, test_estimator.normalized_cross_correlation(), 0.01f);
+ test_estimator.Clear();
+ EXPECT_EQ(0.f, test_estimator.normalized_cross_correlation());
+}
+
+TEST(NormalizedCovarianceEstimatorTests, OppositeSignalTest) {
+ NormalizedCovarianceEstimator test_estimator;
+ // Insert the same value many times.
+ for (size_t i = 0; i < 10000; i++) {
+ test_estimator.Update(1.f, 0.f, 1.f, -1.f, 0.f, 1.f);
+ test_estimator.Update(-1.f, 0.f, 1.f, 1.f, 0.f, 1.f);
+ }
+ // A normalized covariance value close to -1 is expected.
+ EXPECT_NEAR(-1.f, test_estimator.normalized_cross_correlation(), 0.01f);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/g3doc/audio_processing_module.md b/third_party/libwebrtc/modules/audio_processing/g3doc/audio_processing_module.md
new file mode 100644
index 0000000000..fc63b34dfe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/g3doc/audio_processing_module.md
@@ -0,0 +1,26 @@
+# Audio Processing Module (APM)
+
+<?% config.freshness.owner = 'peah' %?>
+<?% config.freshness.reviewed = '2021-04-13' %?>
+
+## Overview
+
+The APM is responsible for applying speech enhancements effects to the
+microphone signal. These effects are required for VoIP calling and some
+examples include echo cancellation (AEC), noise suppression (NS) and
+automatic gain control (AGC).
+
+The API for APM resides in [`/modules/audio_processing/include`][https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_processing/include].
+APM is created using the [`AudioProcessingBuilder`][https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_processing/include/audio_processing.h]
+builder that allows it to be customized and configured.
+
+Some specific aspects of APM include that:
+* APM is fully thread-safe in that it can be accessed concurrently from
+ different threads.
+* APM handles for any input sample rates < 384 kHz and achieves this by
+ automatic reconfiguration whenever a new sample format is observed.
+* APM handles any number of microphone channels and loudspeaker channels, with
+ the same automatic reconfiguration as for the sample rates.
+
+
+APM can either be used as part of the WebRTC native pipeline, or standalone.
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_control_impl.cc b/third_party/libwebrtc/modules/audio_processing/gain_control_impl.cc
new file mode 100644
index 0000000000..3fac1f7f56
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_control_impl.cc
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/gain_control_impl.h"
+
+#include <cstdint>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/agc/legacy/gain_control.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+int16_t MapSetting(GainControl::Mode mode) {
+ switch (mode) {
+ case GainControl::kAdaptiveAnalog:
+ return kAgcModeAdaptiveAnalog;
+ case GainControl::kAdaptiveDigital:
+ return kAgcModeAdaptiveDigital;
+ case GainControl::kFixedDigital:
+ return kAgcModeFixedDigital;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+}
+
+// Checks whether the legacy digital gain application should be used.
+bool UseLegacyDigitalGainApplier() {
+ return field_trial::IsEnabled("WebRTC-UseLegacyDigitalGainApplier");
+}
+
+// Floating point variant of WebRtcAgc_Process.
+void ApplyDigitalGain(const int32_t gains[11],
+ size_t num_bands,
+ float* const* out) {
+ constexpr float kScaling = 1.f / 65536.f;
+ constexpr int kNumSubSections = 16;
+ constexpr float kOneByNumSubSections = 1.f / kNumSubSections;
+
+ float gains_scaled[11];
+ for (int k = 0; k < 11; ++k) {
+ gains_scaled[k] = gains[k] * kScaling;
+ }
+
+ for (size_t b = 0; b < num_bands; ++b) {
+ float* out_band = out[b];
+ for (int k = 0, sample = 0; k < 10; ++k) {
+ const float delta =
+ (gains_scaled[k + 1] - gains_scaled[k]) * kOneByNumSubSections;
+ float gain = gains_scaled[k];
+ for (int n = 0; n < kNumSubSections; ++n, ++sample) {
+ RTC_DCHECK_EQ(k * kNumSubSections + n, sample);
+ out_band[sample] *= gain;
+ out_band[sample] =
+ std::min(32767.f, std::max(-32768.f, out_band[sample]));
+ gain += delta;
+ }
+ }
+ }
+}
+
+} // namespace
+
+struct GainControlImpl::MonoAgcState {
+ MonoAgcState() {
+ state = WebRtcAgc_Create();
+ RTC_CHECK(state);
+ }
+
+ ~MonoAgcState() {
+ RTC_DCHECK(state);
+ WebRtcAgc_Free(state);
+ }
+
+ MonoAgcState(const MonoAgcState&) = delete;
+ MonoAgcState& operator=(const MonoAgcState&) = delete;
+ int32_t gains[11];
+ Handle* state;
+};
+
+int GainControlImpl::instance_counter_ = 0;
+
+GainControlImpl::GainControlImpl()
+ : data_dumper_(new ApmDataDumper(instance_counter_)),
+ use_legacy_gain_applier_(UseLegacyDigitalGainApplier()),
+ mode_(kAdaptiveAnalog),
+ minimum_capture_level_(0),
+ maximum_capture_level_(255),
+ limiter_enabled_(true),
+ target_level_dbfs_(3),
+ compression_gain_db_(9),
+ analog_capture_level_(0),
+ was_analog_level_set_(false),
+ stream_is_saturated_(false) {}
+
+GainControlImpl::~GainControlImpl() = default;
+
+void GainControlImpl::ProcessRenderAudio(
+ rtc::ArrayView<const int16_t> packed_render_audio) {
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ WebRtcAgc_AddFarend(mono_agcs_[ch]->state, packed_render_audio.data(),
+ packed_render_audio.size());
+ }
+}
+
+void GainControlImpl::PackRenderAudioBuffer(
+ const AudioBuffer& audio,
+ std::vector<int16_t>* packed_buffer) {
+ RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, audio.num_frames_per_band());
+ std::array<int16_t, AudioBuffer::kMaxSplitFrameLength>
+ mixed_16_kHz_render_data;
+ rtc::ArrayView<const int16_t> mixed_16_kHz_render(
+ mixed_16_kHz_render_data.data(), audio.num_frames_per_band());
+ if (audio.num_channels() == 1) {
+ FloatS16ToS16(audio.split_bands_const(0)[kBand0To8kHz],
+ audio.num_frames_per_band(), mixed_16_kHz_render_data.data());
+ } else {
+ const int num_channels = static_cast<int>(audio.num_channels());
+ for (size_t i = 0; i < audio.num_frames_per_band(); ++i) {
+ int32_t sum = 0;
+ for (int ch = 0; ch < num_channels; ++ch) {
+ sum += FloatS16ToS16(audio.split_channels_const(kBand0To8kHz)[ch][i]);
+ }
+ mixed_16_kHz_render_data[i] = sum / num_channels;
+ }
+ }
+
+ packed_buffer->clear();
+ packed_buffer->insert(
+ packed_buffer->end(), mixed_16_kHz_render.data(),
+ (mixed_16_kHz_render.data() + audio.num_frames_per_band()));
+}
+
+int GainControlImpl::AnalyzeCaptureAudio(const AudioBuffer& audio) {
+ RTC_DCHECK(num_proc_channels_);
+ RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, audio.num_frames_per_band());
+ RTC_DCHECK_EQ(audio.num_channels(), *num_proc_channels_);
+ RTC_DCHECK_LE(*num_proc_channels_, mono_agcs_.size());
+
+ int16_t split_band_data[AudioBuffer::kMaxNumBands]
+ [AudioBuffer::kMaxSplitFrameLength];
+ int16_t* split_bands[AudioBuffer::kMaxNumBands] = {
+ split_band_data[0], split_band_data[1], split_band_data[2]};
+
+ if (mode_ == kAdaptiveAnalog) {
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ capture_levels_[ch] = analog_capture_level_;
+
+ audio.ExportSplitChannelData(ch, split_bands);
+
+ int err =
+ WebRtcAgc_AddMic(mono_agcs_[ch]->state, split_bands,
+ audio.num_bands(), audio.num_frames_per_band());
+
+ if (err != AudioProcessing::kNoError) {
+ return AudioProcessing::kUnspecifiedError;
+ }
+ }
+ } else if (mode_ == kAdaptiveDigital) {
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ int32_t capture_level_out = 0;
+
+ audio.ExportSplitChannelData(ch, split_bands);
+
+ int err =
+ WebRtcAgc_VirtualMic(mono_agcs_[ch]->state, split_bands,
+ audio.num_bands(), audio.num_frames_per_band(),
+ analog_capture_level_, &capture_level_out);
+
+ capture_levels_[ch] = capture_level_out;
+
+ if (err != AudioProcessing::kNoError) {
+ return AudioProcessing::kUnspecifiedError;
+ }
+ }
+ }
+
+ return AudioProcessing::kNoError;
+}
+
+int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio,
+ bool stream_has_echo) {
+ if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
+ return AudioProcessing::kStreamParameterNotSetError;
+ }
+
+ RTC_DCHECK(num_proc_channels_);
+ RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength,
+ audio->num_frames_per_band());
+ RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
+
+ stream_is_saturated_ = false;
+ bool error_reported = false;
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ int16_t split_band_data[AudioBuffer::kMaxNumBands]
+ [AudioBuffer::kMaxSplitFrameLength];
+ int16_t* split_bands[AudioBuffer::kMaxNumBands] = {
+ split_band_data[0], split_band_data[1], split_band_data[2]};
+ audio->ExportSplitChannelData(ch, split_bands);
+
+ // The call to stream_has_echo() is ok from a deadlock perspective
+ // as the capture lock is allready held.
+ int32_t new_capture_level = 0;
+ uint8_t saturation_warning = 0;
+ int err_analyze = WebRtcAgc_Analyze(
+ mono_agcs_[ch]->state, split_bands, audio->num_bands(),
+ audio->num_frames_per_band(), capture_levels_[ch], &new_capture_level,
+ stream_has_echo, &saturation_warning, mono_agcs_[ch]->gains);
+ capture_levels_[ch] = new_capture_level;
+
+ error_reported = error_reported || err_analyze != AudioProcessing::kNoError;
+
+ stream_is_saturated_ = stream_is_saturated_ || saturation_warning == 1;
+ }
+
+ // Choose the minimun gain for application
+ size_t index_to_apply = 0;
+ for (size_t ch = 1; ch < mono_agcs_.size(); ++ch) {
+ if (mono_agcs_[index_to_apply]->gains[10] < mono_agcs_[ch]->gains[10]) {
+ index_to_apply = ch;
+ }
+ }
+
+ if (use_legacy_gain_applier_) {
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ int16_t split_band_data[AudioBuffer::kMaxNumBands]
+ [AudioBuffer::kMaxSplitFrameLength];
+ int16_t* split_bands[AudioBuffer::kMaxNumBands] = {
+ split_band_data[0], split_band_data[1], split_band_data[2]};
+ audio->ExportSplitChannelData(ch, split_bands);
+
+ int err_process = WebRtcAgc_Process(
+ mono_agcs_[ch]->state, mono_agcs_[index_to_apply]->gains, split_bands,
+ audio->num_bands(), split_bands);
+ RTC_DCHECK_EQ(err_process, 0);
+
+ audio->ImportSplitChannelData(ch, split_bands);
+ }
+ } else {
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ ApplyDigitalGain(mono_agcs_[index_to_apply]->gains, audio->num_bands(),
+ audio->split_bands(ch));
+ }
+ }
+
+ RTC_DCHECK_LT(0ul, *num_proc_channels_);
+ if (mode_ == kAdaptiveAnalog) {
+ // Take the analog level to be the minimum accross all channels.
+ analog_capture_level_ = capture_levels_[0];
+ for (size_t ch = 1; ch < mono_agcs_.size(); ++ch) {
+ analog_capture_level_ =
+ std::min(analog_capture_level_, capture_levels_[ch]);
+ }
+ }
+
+ if (error_reported) {
+ return AudioProcessing::kUnspecifiedError;
+ }
+
+ was_analog_level_set_ = false;
+
+ return AudioProcessing::kNoError;
+}
+
+
+// TODO(ajm): ensure this is called under kAdaptiveAnalog.
+int GainControlImpl::set_stream_analog_level(int level) {
+ data_dumper_->DumpRaw("gain_control_set_stream_analog_level", 1, &level);
+
+ was_analog_level_set_ = true;
+ if (level < minimum_capture_level_ || level > maximum_capture_level_) {
+ return AudioProcessing::kBadParameterError;
+ }
+ analog_capture_level_ = level;
+
+ return AudioProcessing::kNoError;
+}
+
+int GainControlImpl::stream_analog_level() const {
+ data_dumper_->DumpRaw("gain_control_stream_analog_level", 1,
+ &analog_capture_level_);
+ return analog_capture_level_;
+}
+
+int GainControlImpl::set_mode(Mode mode) {
+ if (MapSetting(mode) == -1) {
+ return AudioProcessing::kBadParameterError;
+ }
+
+ mode_ = mode;
+ RTC_DCHECK(num_proc_channels_);
+ RTC_DCHECK(sample_rate_hz_);
+ Initialize(*num_proc_channels_, *sample_rate_hz_);
+ return AudioProcessing::kNoError;
+}
+
+
+int GainControlImpl::set_analog_level_limits(int minimum, int maximum) {
+ if (minimum < 0 || maximum > 65535 || maximum < minimum) {
+ return AudioProcessing::kBadParameterError;
+ }
+
+ minimum_capture_level_ = minimum;
+ maximum_capture_level_ = maximum;
+
+ RTC_DCHECK(num_proc_channels_);
+ RTC_DCHECK(sample_rate_hz_);
+ Initialize(*num_proc_channels_, *sample_rate_hz_);
+ return AudioProcessing::kNoError;
+}
+
+
+int GainControlImpl::set_target_level_dbfs(int level) {
+ if (level > 31 || level < 0) {
+ return AudioProcessing::kBadParameterError;
+ }
+ target_level_dbfs_ = level;
+ return Configure();
+}
+
+int GainControlImpl::set_compression_gain_db(int gain) {
+ if (gain < 0 || gain > 90) {
+ RTC_LOG(LS_ERROR) << "set_compression_gain_db(" << gain << ") failed.";
+ return AudioProcessing::kBadParameterError;
+ }
+ compression_gain_db_ = gain;
+ return Configure();
+}
+
+int GainControlImpl::enable_limiter(bool enable) {
+ limiter_enabled_ = enable;
+ return Configure();
+}
+
+void GainControlImpl::Initialize(size_t num_proc_channels, int sample_rate_hz) {
+ data_dumper_->InitiateNewSetOfRecordings();
+
+ RTC_DCHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000 ||
+ sample_rate_hz == 48000);
+
+ num_proc_channels_ = num_proc_channels;
+ sample_rate_hz_ = sample_rate_hz;
+
+ mono_agcs_.resize(*num_proc_channels_);
+ capture_levels_.resize(*num_proc_channels_);
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ if (!mono_agcs_[ch]) {
+ mono_agcs_[ch].reset(new MonoAgcState());
+ }
+
+ int error = WebRtcAgc_Init(mono_agcs_[ch]->state, minimum_capture_level_,
+ maximum_capture_level_, MapSetting(mode_),
+ *sample_rate_hz_);
+ RTC_DCHECK_EQ(error, 0);
+ capture_levels_[ch] = analog_capture_level_;
+ }
+
+ Configure();
+}
+
+int GainControlImpl::Configure() {
+ WebRtcAgcConfig config;
+ // TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
+ // change the interface.
+ // RTC_DCHECK_LE(target_level_dbfs_, 0);
+ // config.targetLevelDbfs = static_cast<int16_t>(-target_level_dbfs_);
+ config.targetLevelDbfs = static_cast<int16_t>(target_level_dbfs_);
+ config.compressionGaindB = static_cast<int16_t>(compression_gain_db_);
+ config.limiterEnable = limiter_enabled_;
+
+ int error = AudioProcessing::kNoError;
+ for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
+ int error_ch = WebRtcAgc_set_config(mono_agcs_[ch]->state, config);
+ if (error_ch != AudioProcessing::kNoError) {
+ error = error_ch;
+ }
+ }
+ return error;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_control_impl.h b/third_party/libwebrtc/modules/audio_processing/gain_control_impl.h
new file mode 100644
index 0000000000..b65d697945
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_control_impl.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_GAIN_CONTROL_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_GAIN_CONTROL_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/agc/gain_control.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioBuffer;
+
+class GainControlImpl : public GainControl {
+ public:
+ GainControlImpl();
+ GainControlImpl(const GainControlImpl&) = delete;
+ GainControlImpl& operator=(const GainControlImpl&) = delete;
+
+ ~GainControlImpl() override;
+
+ void ProcessRenderAudio(rtc::ArrayView<const int16_t> packed_render_audio);
+ int AnalyzeCaptureAudio(const AudioBuffer& audio);
+ int ProcessCaptureAudio(AudioBuffer* audio, bool stream_has_echo);
+
+ void Initialize(size_t num_proc_channels, int sample_rate_hz);
+
+ static void PackRenderAudioBuffer(const AudioBuffer& audio,
+ std::vector<int16_t>* packed_buffer);
+
+ // GainControl implementation.
+ int stream_analog_level() const override;
+ bool is_limiter_enabled() const override { return limiter_enabled_; }
+ Mode mode() const override { return mode_; }
+ int set_mode(Mode mode) override;
+ int compression_gain_db() const override { return compression_gain_db_; }
+ int set_analog_level_limits(int minimum, int maximum) override;
+ int set_compression_gain_db(int gain) override;
+ int set_target_level_dbfs(int level) override;
+ int enable_limiter(bool enable) override;
+ int set_stream_analog_level(int level) override;
+
+ private:
+ struct MonoAgcState;
+
+ // GainControl implementation.
+ int target_level_dbfs() const override { return target_level_dbfs_; }
+ int analog_level_minimum() const override { return minimum_capture_level_; }
+ int analog_level_maximum() const override { return maximum_capture_level_; }
+ bool stream_is_saturated() const override { return stream_is_saturated_; }
+
+ int Configure();
+
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+
+ const bool use_legacy_gain_applier_;
+ Mode mode_;
+ int minimum_capture_level_;
+ int maximum_capture_level_;
+ bool limiter_enabled_;
+ int target_level_dbfs_;
+ int compression_gain_db_;
+ int analog_capture_level_ = 0;
+ bool was_analog_level_set_;
+ bool stream_is_saturated_;
+
+ std::vector<std::unique_ptr<MonoAgcState>> mono_agcs_;
+ std::vector<int> capture_levels_;
+
+ absl::optional<size_t> num_proc_channels_;
+ absl::optional<int> sample_rate_hz_;
+
+ static int instance_counter_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_GAIN_CONTROL_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_control_unittest.cc b/third_party/libwebrtc/modules/audio_processing/gain_control_unittest.cc
new file mode 100644
index 0000000000..1662dc506f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_control_unittest.cc
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/gain_control_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kNumFramesToProcess = 100;
+
+void ProcessOneFrame(int sample_rate_hz,
+ AudioBuffer* render_audio_buffer,
+ AudioBuffer* capture_audio_buffer,
+ GainControlImpl* gain_controller) {
+ if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+ render_audio_buffer->SplitIntoFrequencyBands();
+ capture_audio_buffer->SplitIntoFrequencyBands();
+ }
+
+ std::vector<int16_t> render_audio;
+ GainControlImpl::PackRenderAudioBuffer(*render_audio_buffer, &render_audio);
+ gain_controller->ProcessRenderAudio(render_audio);
+ gain_controller->AnalyzeCaptureAudio(*capture_audio_buffer);
+ gain_controller->ProcessCaptureAudio(capture_audio_buffer, false);
+
+ if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+ capture_audio_buffer->MergeFrequencyBands();
+ }
+}
+
+void SetupComponent(int sample_rate_hz,
+ GainControl::Mode mode,
+ int target_level_dbfs,
+ int stream_analog_level,
+ int compression_gain_db,
+ bool enable_limiter,
+ int analog_level_min,
+ int analog_level_max,
+ GainControlImpl* gain_controller) {
+ gain_controller->Initialize(1, sample_rate_hz);
+ GainControl* gc = static_cast<GainControl*>(gain_controller);
+ gc->set_mode(mode);
+ gc->set_stream_analog_level(stream_analog_level);
+ gc->set_target_level_dbfs(target_level_dbfs);
+ gc->set_compression_gain_db(compression_gain_db);
+ gc->enable_limiter(enable_limiter);
+ gc->set_analog_level_limits(analog_level_min, analog_level_max);
+}
+
+void RunBitExactnessTest(int sample_rate_hz,
+ size_t num_channels,
+ GainControl::Mode mode,
+ int target_level_dbfs,
+ int stream_analog_level,
+ int compression_gain_db,
+ bool enable_limiter,
+ int analog_level_min,
+ int analog_level_max,
+ int achieved_stream_analog_level_reference,
+ rtc::ArrayView<const float> output_reference) {
+ GainControlImpl gain_controller;
+ SetupComponent(sample_rate_hz, mode, target_level_dbfs, stream_analog_level,
+ compression_gain_db, enable_limiter, analog_level_min,
+ analog_level_max, &gain_controller);
+
+ const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+ const StreamConfig render_config(sample_rate_hz, num_channels);
+ AudioBuffer render_buffer(
+ render_config.sample_rate_hz(), render_config.num_channels(),
+ render_config.sample_rate_hz(), 1, render_config.sample_rate_hz(), 1);
+ test::InputAudioFile render_file(
+ test::GetApmRenderTestVectorFileName(sample_rate_hz));
+ std::vector<float> render_input(samples_per_channel * num_channels);
+
+ const StreamConfig capture_config(sample_rate_hz, num_channels);
+ AudioBuffer capture_buffer(
+ capture_config.sample_rate_hz(), capture_config.num_channels(),
+ capture_config.sample_rate_hz(), 1, capture_config.sample_rate_hz(), 1);
+ test::InputAudioFile capture_file(
+ test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+ std::vector<float> capture_input(samples_per_channel * num_channels);
+
+ for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+ ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+ &render_file, render_input);
+ ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+ &capture_file, capture_input);
+
+ test::CopyVectorToAudioBuffer(render_config, render_input, &render_buffer);
+ test::CopyVectorToAudioBuffer(capture_config, capture_input,
+ &capture_buffer);
+
+ ProcessOneFrame(sample_rate_hz, &render_buffer, &capture_buffer,
+ &gain_controller);
+ }
+
+ // Extract and verify the test results.
+ std::vector<float> capture_output;
+ test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+ &capture_output);
+
+ EXPECT_EQ(achieved_stream_analog_level_reference,
+ gain_controller.stream_analog_level());
+
+ // Compare the output with the reference. Only the first values of the output
+ // from last frame processed are compared in order not having to specify all
+ // preceeding frames as testvectors. As the algorithm being tested has a
+ // memory, testing only the last frame implicitly also tests the preceeding
+ // frames.
+ const float kElementErrorBound = 1.0f / 32768.0f;
+ EXPECT_TRUE(test::VerifyDeinterleavedArray(
+ capture_config.num_frames(), capture_config.num_channels(),
+ output_reference, capture_output, kElementErrorBound));
+}
+
+} // namespace
+
+// TODO(peah): Activate all these tests for ARM and ARM64 once the issue on the
+// Chromium ARM and ARM64 boths have been identified. This is tracked in the
+// issue https://bugs.chromium.org/p/webrtc/issues/detail?id=5711.
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.006561f, -0.004608f, -0.002899f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Stereo16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Stereo16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.027313f, -0.015900f, -0.028107f,
+ -0.027313f, -0.015900f, -0.028107f};
+ RunBitExactnessTest(16000, 2, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono32kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono32kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.010162f, -0.009155f, -0.008301f};
+ RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono48kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono48kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.010162f, -0.009155f, -0.008301f};
+ RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.003967f, -0.002777f, -0.001770f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Stereo16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Stereo16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.015411f, -0.008972f, -0.015839f,
+ -0.015411f, -0.008972f, -0.015839f};
+ RunBitExactnessTest(16000, 2, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono32kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono32kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.006134f, -0.005524f, -0.005005f};
+ RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono48kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono48kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.006134f, -0.005524f, -0.005005};
+ RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.011749f, -0.008270f, -0.005219f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Stereo16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Stereo16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.048896f, -0.028479f, -0.050345f,
+ -0.048896f, -0.028479f, -0.050345f};
+ RunBitExactnessTest(16000, 2, GainControl::Mode::kFixedDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono32kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono32kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.018158f, -0.016357f, -0.014832f};
+ RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono48kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono48kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 50;
+ const float kOutputReference[] = {-0.018158f, -0.016357f, -0.014832f};
+ RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveAnalog_Tl10_SL10_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveAnalog_Tl10_SL10_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 12;
+ const float kOutputReference[] = {-0.006561f, -0.004608f, -0.002899f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 10, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveAnalog_Tl10_SL100_CG5_Lim_AL70_80) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveAnalog_Tl10_SL100_CG5_Lim_AL70_80) {
+#endif
+ const int kStreamAnalogLevelReference = 100;
+ const float kOutputReference[] = {-0.003998f, -0.002808f, -0.001770f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 100, 5,
+ true, 70, 80, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveDigital_Tl10_SL100_CG5_NoLim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveDigital_Tl10_SL100_CG5_NoLim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 100;
+ const float kOutputReference[] = {-0.004028f, -0.002838f, -0.001770f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 10, 100, 5,
+ false, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveDigital_Tl40_SL100_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveDigital_Tl40_SL100_CG5_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 100;
+ const float kOutputReference[] = {-0.008728f, -0.006134f, -0.003845f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 40, 100, 5,
+ true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+ defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+ Mono16kHz_AdaptiveDigital_Tl10_SL100_CG30_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+ DISABLED_Mono16kHz_AdaptiveDigital_Tl10_SL100_CG30_Lim_AL0_100) {
+#endif
+ const int kStreamAnalogLevelReference = 100;
+ const float kOutputReference[] = {-0.005859f, -0.004120f, -0.002594f};
+ RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 10, 100,
+ 30, true, 0, 100, kStreamAnalogLevelReference,
+ kOutputReference);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_controller2.cc b/third_party/libwebrtc/modules/audio_processing/gain_controller2.cc
new file mode 100644
index 0000000000..f1907bbd92
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_controller2.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/gain_controller2.h"
+
+#include <memory>
+#include <utility>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+using Agc2Config = AudioProcessing::Config::GainController2;
+
+constexpr int kUnspecifiedAnalogLevel = -1;
+constexpr int kLogLimiterStatsPeriodMs = 30'000;
+constexpr int kFrameLengthMs = 10;
+constexpr int kLogLimiterStatsPeriodNumFrames =
+ kLogLimiterStatsPeriodMs / kFrameLengthMs;
+
+// Detects the available CPU features and applies any kill-switches.
+AvailableCpuFeatures GetAllowedCpuFeatures() {
+ AvailableCpuFeatures features = GetAvailableCpuFeatures();
+ if (field_trial::IsEnabled("WebRTC-Agc2SimdSse2KillSwitch")) {
+ features.sse2 = false;
+ }
+ if (field_trial::IsEnabled("WebRTC-Agc2SimdAvx2KillSwitch")) {
+ features.avx2 = false;
+ }
+ if (field_trial::IsEnabled("WebRTC-Agc2SimdNeonKillSwitch")) {
+ features.neon = false;
+ }
+ return features;
+}
+
+// Creates an adaptive digital gain controller if enabled.
+std::unique_ptr<AdaptiveDigitalGainController> CreateAdaptiveDigitalController(
+ const Agc2Config::AdaptiveDigital& config,
+ int sample_rate_hz,
+ int num_channels,
+ ApmDataDumper* data_dumper) {
+ if (config.enabled) {
+ return std::make_unique<AdaptiveDigitalGainController>(
+ data_dumper, config, sample_rate_hz, num_channels);
+ }
+ return nullptr;
+}
+
+} // namespace
+
+std::atomic<int> GainController2::instance_count_(0);
+
+GainController2::GainController2(const Agc2Config& config,
+ int sample_rate_hz,
+ int num_channels,
+ bool use_internal_vad)
+ : cpu_features_(GetAllowedCpuFeatures()),
+ data_dumper_(instance_count_.fetch_add(1) + 1),
+ fixed_gain_applier_(
+ /*hard_clip_samples=*/false,
+ /*initial_gain_factor=*/DbToRatio(config.fixed_digital.gain_db)),
+ adaptive_digital_controller_(
+ CreateAdaptiveDigitalController(config.adaptive_digital,
+ sample_rate_hz,
+ num_channels,
+ &data_dumper_)),
+ limiter_(sample_rate_hz, &data_dumper_, /*histogram_name_prefix=*/"Agc2"),
+ calls_since_last_limiter_log_(0),
+ analog_level_(kUnspecifiedAnalogLevel) {
+ RTC_DCHECK(Validate(config));
+ data_dumper_.InitiateNewSetOfRecordings();
+ const bool use_vad = config.adaptive_digital.enabled;
+ if (use_vad && use_internal_vad) {
+ // TODO(bugs.webrtc.org/7494): Move `vad_reset_period_ms` from adaptive
+ // digital to gain controller 2 config.
+ vad_ = std::make_unique<VoiceActivityDetectorWrapper>(
+ config.adaptive_digital.vad_reset_period_ms, cpu_features_,
+ sample_rate_hz);
+ }
+}
+
+GainController2::~GainController2() = default;
+
+void GainController2::Initialize(int sample_rate_hz, int num_channels) {
+ RTC_DCHECK(sample_rate_hz == AudioProcessing::kSampleRate8kHz ||
+ sample_rate_hz == AudioProcessing::kSampleRate16kHz ||
+ sample_rate_hz == AudioProcessing::kSampleRate32kHz ||
+ sample_rate_hz == AudioProcessing::kSampleRate48kHz);
+ // TODO(bugs.webrtc.org/7494): Initialize `fixed_gain_applier_`.
+ limiter_.SetSampleRate(sample_rate_hz);
+ if (vad_) {
+ vad_->Initialize(sample_rate_hz);
+ }
+ if (adaptive_digital_controller_) {
+ adaptive_digital_controller_->Initialize(sample_rate_hz, num_channels);
+ }
+ data_dumper_.InitiateNewSetOfRecordings();
+ calls_since_last_limiter_log_ = 0;
+ analog_level_ = kUnspecifiedAnalogLevel;
+}
+
+void GainController2::SetFixedGainDb(float gain_db) {
+ const float gain_factor = DbToRatio(gain_db);
+ if (fixed_gain_applier_.GetGainFactor() != gain_factor) {
+ // Reset the limiter to quickly react on abrupt level changes caused by
+ // large changes of the fixed gain.
+ limiter_.Reset();
+ }
+ fixed_gain_applier_.SetGainFactor(gain_factor);
+}
+
+void GainController2::Process(absl::optional<float> speech_probability,
+ AudioBuffer* audio) {
+ data_dumper_.DumpRaw("agc2_notified_analog_level", analog_level_);
+ AudioFrameView<float> float_frame(audio->channels(), audio->num_channels(),
+ audio->num_frames());
+ if (vad_) {
+ speech_probability = vad_->Analyze(float_frame);
+ } else if (speech_probability.has_value()) {
+ RTC_DCHECK_GE(speech_probability.value(), 0.0f);
+ RTC_DCHECK_LE(speech_probability.value(), 1.0f);
+ }
+ if (speech_probability.has_value()) {
+ data_dumper_.DumpRaw("agc2_speech_probability", speech_probability.value());
+ }
+ fixed_gain_applier_.ApplyGain(float_frame);
+ if (adaptive_digital_controller_) {
+ RTC_DCHECK(speech_probability.has_value());
+ adaptive_digital_controller_->Process(
+ float_frame, speech_probability.value(), limiter_.LastAudioLevel());
+ }
+ limiter_.Process(float_frame);
+
+ // Periodically log limiter stats.
+ if (++calls_since_last_limiter_log_ == kLogLimiterStatsPeriodNumFrames) {
+ calls_since_last_limiter_log_ = 0;
+ InterpolatedGainCurve::Stats stats = limiter_.GetGainCurveStats();
+ RTC_LOG(LS_INFO) << "AGC2 limiter stats"
+ << " | identity: " << stats.look_ups_identity_region
+ << " | knee: " << stats.look_ups_knee_region
+ << " | limiter: " << stats.look_ups_limiter_region
+ << " | saturation: " << stats.look_ups_saturation_region;
+ }
+}
+
+void GainController2::NotifyAnalogLevel(int level) {
+ if (analog_level_ != level && adaptive_digital_controller_) {
+ adaptive_digital_controller_->HandleInputGainChange();
+ }
+ analog_level_ = level;
+}
+
+bool GainController2::Validate(
+ const AudioProcessing::Config::GainController2& config) {
+ const auto& fixed = config.fixed_digital;
+ const auto& adaptive = config.adaptive_digital;
+ return fixed.gain_db >= 0.0f && fixed.gain_db < 50.f &&
+ adaptive.headroom_db >= 0.0f && adaptive.max_gain_db > 0.0f &&
+ adaptive.initial_gain_db >= 0.0f &&
+ adaptive.max_gain_change_db_per_second > 0.0f &&
+ adaptive.max_output_noise_level_dbfs <= 0.0f;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_controller2.h b/third_party/libwebrtc/modules/audio_processing/gain_controller2.h
new file mode 100644
index 0000000000..304fa40489
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_controller2.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_GAIN_CONTROLLER2_H_
+#define MODULES_AUDIO_PROCESSING_GAIN_CONTROLLER2_H_
+
+#include <atomic>
+#include <memory>
+#include <string>
+
+#include "modules/audio_processing/agc2/adaptive_digital_gain_controller.h"
+#include "modules/audio_processing/agc2/cpu_features.h"
+#include "modules/audio_processing/agc2/gain_applier.h"
+#include "modules/audio_processing/agc2/limiter.h"
+#include "modules/audio_processing/agc2/vad_wrapper.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+// Gain Controller 2 aims to automatically adjust levels by acting on the
+// microphone gain and/or applying digital gain.
+class GainController2 {
+ public:
+ // Ctor. If `use_internal_vad` is true, an internal voice activity
+ // detector is used for digital adaptive gain.
+ GainController2(const AudioProcessing::Config::GainController2& config,
+ int sample_rate_hz,
+ int num_channels,
+ bool use_internal_vad);
+ GainController2(const GainController2&) = delete;
+ GainController2& operator=(const GainController2&) = delete;
+ ~GainController2();
+
+ // Detects and handles changes of sample rate and/or number of channels.
+ void Initialize(int sample_rate_hz, int num_channels);
+
+ // Sets the fixed digital gain.
+ void SetFixedGainDb(float gain_db);
+
+ // Applies fixed and adaptive digital gains to `audio` and runs a limiter.
+ // If the internal VAD is used, `speech_probability` is ignored. Otherwise
+ // `speech_probability` is used for digital adaptive gain if it's available
+ // (limited to values [0.0, 1.0]).
+ void Process(absl::optional<float> speech_probability, AudioBuffer* audio);
+
+ // Handles analog level changes.
+ void NotifyAnalogLevel(int level);
+
+ static bool Validate(const AudioProcessing::Config::GainController2& config);
+
+ AvailableCpuFeatures GetCpuFeatures() const { return cpu_features_; }
+
+ private:
+ static std::atomic<int> instance_count_;
+ const AvailableCpuFeatures cpu_features_;
+ ApmDataDumper data_dumper_;
+ GainApplier fixed_gain_applier_;
+ std::unique_ptr<VoiceActivityDetectorWrapper> vad_;
+ std::unique_ptr<AdaptiveDigitalGainController> adaptive_digital_controller_;
+ Limiter limiter_;
+ int calls_since_last_limiter_log_;
+ int analog_level_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_GAIN_CONTROLLER2_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_controller2_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/gain_controller2_gn/moz.build
new file mode 100644
index 0000000000..b63751c501
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_controller2_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/gain_controller2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("gain_controller2_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/gain_controller2_unittest.cc b/third_party/libwebrtc/modules/audio_processing/gain_controller2_unittest.cc
new file mode 100644
index 0000000000..88a93b0cdb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/gain_controller2_unittest.cc
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/gain_controller2.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <numeric>
+#include <tuple>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using Agc2Config = AudioProcessing::Config::GainController2;
+
+// Sets all the samples in `ab` to `value`.
+void SetAudioBufferSamples(float value, AudioBuffer& ab) {
+ for (size_t k = 0; k < ab.num_channels(); ++k) {
+ std::fill(ab.channels()[k], ab.channels()[k] + ab.num_frames(), value);
+ }
+}
+
+float RunAgc2WithConstantInput(GainController2& agc2,
+ float input_level,
+ int num_frames,
+ int sample_rate_hz) {
+ const int num_samples = rtc::CheckedDivExact(sample_rate_hz, 100);
+ AudioBuffer ab(sample_rate_hz, 1, sample_rate_hz, 1, sample_rate_hz, 1);
+
+ // Give time to the level estimator to converge.
+ for (int i = 0; i < num_frames + 1; ++i) {
+ SetAudioBufferSamples(input_level, ab);
+ agc2.Process(/*speech_probability=*/absl::nullopt, &ab);
+ }
+
+ // Return the last sample from the last processed frame.
+ return ab.channels()[0][num_samples - 1];
+}
+
+std::unique_ptr<GainController2> CreateAgc2FixedDigitalMode(
+ float fixed_gain_db,
+ int sample_rate_hz) {
+ Agc2Config config;
+ config.adaptive_digital.enabled = false;
+ config.fixed_digital.gain_db = fixed_gain_db;
+ EXPECT_TRUE(GainController2::Validate(config));
+ return std::make_unique<GainController2>(config, sample_rate_hz,
+ /*num_channels=*/1,
+ /*use_internal_vad=*/true);
+}
+
+} // namespace
+
+TEST(GainController2, CheckDefaultConfig) {
+ Agc2Config config;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+TEST(GainController2, CheckFixedDigitalConfig) {
+ Agc2Config config;
+ // Attenuation is not allowed.
+ config.fixed_digital.gain_db = -5.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ // No gain is allowed.
+ config.fixed_digital.gain_db = 0.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+ // Positive gain is allowed.
+ config.fixed_digital.gain_db = 15.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+TEST(GainController2, CheckHeadroomDb) {
+ Agc2Config config;
+ config.adaptive_digital.headroom_db = -1.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.headroom_db = 0.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+ config.adaptive_digital.headroom_db = 5.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+TEST(GainController2, CheckMaxGainDb) {
+ Agc2Config config;
+ config.adaptive_digital.max_gain_db = -1.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.max_gain_db = 0.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.max_gain_db = 5.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+TEST(GainController2, CheckInitialGainDb) {
+ Agc2Config config;
+ config.adaptive_digital.initial_gain_db = -1.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.initial_gain_db = 0.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+ config.adaptive_digital.initial_gain_db = 5.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+TEST(GainController2, CheckAdaptiveDigitalMaxGainChangeSpeedConfig) {
+ Agc2Config config;
+ config.adaptive_digital.max_gain_change_db_per_second = -1.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.max_gain_change_db_per_second = 0.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.max_gain_change_db_per_second = 5.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+TEST(GainController2, CheckAdaptiveDigitalMaxOutputNoiseLevelConfig) {
+ Agc2Config config;
+ config.adaptive_digital.max_output_noise_level_dbfs = 5.0f;
+ EXPECT_FALSE(GainController2::Validate(config));
+ config.adaptive_digital.max_output_noise_level_dbfs = 0.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+ config.adaptive_digital.max_output_noise_level_dbfs = -5.0f;
+ EXPECT_TRUE(GainController2::Validate(config));
+}
+
+// Checks that the default config is applied.
+TEST(GainController2, ApplyDefaultConfig) {
+ auto gain_controller2 = std::make_unique<GainController2>(
+ Agc2Config{}, /*sample_rate_hz=*/16000, /*num_channels=*/2,
+ /*use_internal_vad=*/true);
+ EXPECT_TRUE(gain_controller2.get());
+}
+
+TEST(GainController2FixedDigital, GainShouldChangeOnSetGain) {
+ constexpr float kInputLevel = 1000.0f;
+ constexpr size_t kNumFrames = 5;
+ constexpr size_t kSampleRateHz = 8000;
+ constexpr float kGain0Db = 0.0f;
+ constexpr float kGain20Db = 20.0f;
+
+ auto agc2_fixed = CreateAgc2FixedDigitalMode(kGain0Db, kSampleRateHz);
+
+ // Signal level is unchanged with 0 db gain.
+ EXPECT_FLOAT_EQ(RunAgc2WithConstantInput(*agc2_fixed, kInputLevel, kNumFrames,
+ kSampleRateHz),
+ kInputLevel);
+
+ // +20 db should increase signal by a factor of 10.
+ agc2_fixed->SetFixedGainDb(kGain20Db);
+ EXPECT_FLOAT_EQ(RunAgc2WithConstantInput(*agc2_fixed, kInputLevel, kNumFrames,
+ kSampleRateHz),
+ kInputLevel * 10);
+}
+
+TEST(GainController2FixedDigital, ChangeFixedGainShouldBeFastAndTimeInvariant) {
+ // Number of frames required for the fixed gain controller to adapt on the
+ // input signal when the gain changes.
+ constexpr size_t kNumFrames = 5;
+
+ constexpr float kInputLevel = 1000.0f;
+ constexpr size_t kSampleRateHz = 8000;
+ constexpr float kGainDbLow = 0.0f;
+ constexpr float kGainDbHigh = 25.0f;
+ static_assert(kGainDbLow < kGainDbHigh, "");
+
+ auto agc2_fixed = CreateAgc2FixedDigitalMode(kGainDbLow, kSampleRateHz);
+
+ // Start with a lower gain.
+ const float output_level_pre = RunAgc2WithConstantInput(
+ *agc2_fixed, kInputLevel, kNumFrames, kSampleRateHz);
+
+ // Increase gain.
+ agc2_fixed->SetFixedGainDb(kGainDbHigh);
+ static_cast<void>(RunAgc2WithConstantInput(*agc2_fixed, kInputLevel,
+ kNumFrames, kSampleRateHz));
+
+ // Back to the lower gain.
+ agc2_fixed->SetFixedGainDb(kGainDbLow);
+ const float output_level_post = RunAgc2WithConstantInput(
+ *agc2_fixed, kInputLevel, kNumFrames, kSampleRateHz);
+
+ EXPECT_EQ(output_level_pre, output_level_post);
+}
+
+class FixedDigitalTest
+ : public ::testing::TestWithParam<std::tuple<float, float, int, bool>> {
+ protected:
+ float gain_db_min() const { return std::get<0>(GetParam()); }
+ float gain_db_max() const { return std::get<1>(GetParam()); }
+ int sample_rate_hz() const { return std::get<2>(GetParam()); }
+ bool saturation_expected() const { return std::get<3>(GetParam()); }
+};
+
+TEST_P(FixedDigitalTest, CheckSaturationBehaviorWithLimiter) {
+ for (const float gain_db : test::LinSpace(gain_db_min(), gain_db_max(), 10)) {
+ SCOPED_TRACE(gain_db);
+ auto agc2_fixed = CreateAgc2FixedDigitalMode(gain_db, sample_rate_hz());
+ const float processed_sample =
+ RunAgc2WithConstantInput(*agc2_fixed, /*input_level=*/32767.0f,
+ /*num_frames=*/5, sample_rate_hz());
+ if (saturation_expected()) {
+ EXPECT_FLOAT_EQ(processed_sample, 32767.0f);
+ } else {
+ EXPECT_LT(processed_sample, 32767.0f);
+ }
+ }
+}
+
+static_assert(test::kLimiterMaxInputLevelDbFs < 10, "");
+INSTANTIATE_TEST_SUITE_P(
+ GainController2,
+ FixedDigitalTest,
+ ::testing::Values(
+ // When gain < `test::kLimiterMaxInputLevelDbFs`, the limiter will not
+ // saturate the signal (at any sample rate).
+ std::make_tuple(0.1f,
+ test::kLimiterMaxInputLevelDbFs - 0.01f,
+ 8000,
+ false),
+ std::make_tuple(0.1,
+ test::kLimiterMaxInputLevelDbFs - 0.01f,
+ 48000,
+ false),
+ // When gain > `test::kLimiterMaxInputLevelDbFs`, the limiter will
+ // saturate the signal (at any sample rate).
+ std::make_tuple(test::kLimiterMaxInputLevelDbFs + 0.01f,
+ 10.0f,
+ 8000,
+ true),
+ std::make_tuple(test::kLimiterMaxInputLevelDbFs + 0.01f,
+ 10.0f,
+ 48000,
+ true)));
+
+// Processes a test audio file and checks that the gain applied at the end of
+// the recording is close to the expected value.
+TEST(GainController2, CheckFinalGainWithAdaptiveDigitalController) {
+ constexpr int kSampleRateHz = AudioProcessing::kSampleRate48kHz;
+ constexpr int kStereo = 2;
+
+ // Create AGC2 enabling only the adaptive digital controller.
+ Agc2Config config;
+ config.fixed_digital.gain_db = 0.0f;
+ config.adaptive_digital.enabled = true;
+ GainController2 agc2(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/true);
+
+ test::InputAudioFile input_file(
+ test::GetApmCaptureTestVectorFileName(kSampleRateHz),
+ /*loop_at_end=*/true);
+ const StreamConfig stream_config(kSampleRateHz, kStereo);
+
+ // Init buffers.
+ constexpr int kFrameDurationMs = 10;
+ std::vector<float> frame(kStereo * stream_config.num_frames());
+ AudioBuffer audio_buffer(kSampleRateHz, kStereo, kSampleRateHz, kStereo,
+ kSampleRateHz, kStereo);
+
+ // Simulate.
+ constexpr float kGainDb = -6.0f;
+ const float gain = std::pow(10.0f, kGainDb / 20.0f);
+ constexpr int kDurationMs = 10000;
+ constexpr int kNumFramesToProcess = kDurationMs / kFrameDurationMs;
+ for (int i = 0; i < kNumFramesToProcess; ++i) {
+ ReadFloatSamplesFromStereoFile(stream_config.num_frames(),
+ stream_config.num_channels(), &input_file,
+ frame);
+ // Apply a fixed gain to the input audio.
+ for (float& x : frame) {
+ x *= gain;
+ }
+ test::CopyVectorToAudioBuffer(stream_config, frame, &audio_buffer);
+ agc2.Process(/*speech_probability=*/absl::nullopt, &audio_buffer);
+ }
+
+ // Estimate the applied gain by processing a probing frame.
+ SetAudioBufferSamples(/*value=*/1.0f, audio_buffer);
+ agc2.Process(/*speech_probability=*/absl::nullopt, &audio_buffer);
+ const float applied_gain_db =
+ 20.0f * std::log10(audio_buffer.channels_const()[0][0]);
+
+ constexpr float kExpectedGainDb = 5.6f;
+ constexpr float kToleranceDb = 0.3f;
+ EXPECT_NEAR(applied_gain_db, kExpectedGainDb, kToleranceDb);
+}
+
+// Processes a test audio file and checks that the injected speech probability
+// is ignored when the internal VAD is used.
+TEST(GainController2,
+ CheckInjectedVadProbabilityNotUsedWithAdaptiveDigitalController) {
+ constexpr int kSampleRateHz = AudioProcessing::kSampleRate48kHz;
+ constexpr int kStereo = 2;
+
+ // Create AGC2 enabling only the adaptive digital controller.
+ Agc2Config config;
+ config.fixed_digital.gain_db = 0.0f;
+ config.adaptive_digital.enabled = true;
+ GainController2 agc2(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/true);
+ GainController2 agc2_reference(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/true);
+
+ test::InputAudioFile input_file(
+ test::GetApmCaptureTestVectorFileName(kSampleRateHz),
+ /*loop_at_end=*/true);
+ const StreamConfig stream_config(kSampleRateHz, kStereo);
+
+ // Init buffers.
+ constexpr int kFrameDurationMs = 10;
+ std::vector<float> frame(kStereo * stream_config.num_frames());
+ AudioBuffer audio_buffer(kSampleRateHz, kStereo, kSampleRateHz, kStereo,
+ kSampleRateHz, kStereo);
+ AudioBuffer audio_buffer_reference(kSampleRateHz, kStereo, kSampleRateHz,
+ kStereo, kSampleRateHz, kStereo);
+
+ // Simulate.
+ constexpr float kGainDb = -6.0f;
+ const float gain = std::pow(10.0f, kGainDb / 20.0f);
+ constexpr int kDurationMs = 10000;
+ constexpr int kNumFramesToProcess = kDurationMs / kFrameDurationMs;
+ constexpr float kSpeechProbabilities[] = {1.0f, 0.3f};
+ constexpr float kEpsilon = 0.0001f;
+ bool all_samples_zero = true;
+ for (int i = 0, j = 0; i < kNumFramesToProcess; ++i, j = 1 - j) {
+ ReadFloatSamplesFromStereoFile(stream_config.num_frames(),
+ stream_config.num_channels(), &input_file,
+ frame);
+ // Apply a fixed gain to the input audio.
+ for (float& x : frame) {
+ x *= gain;
+ }
+ test::CopyVectorToAudioBuffer(stream_config, frame, &audio_buffer);
+ agc2.Process(kSpeechProbabilities[j], &audio_buffer);
+ test::CopyVectorToAudioBuffer(stream_config, frame,
+ &audio_buffer_reference);
+ agc2_reference.Process(absl::nullopt, &audio_buffer_reference);
+
+ // Check the output buffers.
+ for (int i = 0; i < kStereo; ++i) {
+ for (int j = 0; j < static_cast<int>(audio_buffer.num_frames()); ++j) {
+ all_samples_zero &=
+ fabs(audio_buffer.channels_const()[i][j]) < kEpsilon;
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[i][j],
+ audio_buffer_reference.channels_const()[i][j]);
+ }
+ }
+ }
+ EXPECT_FALSE(all_samples_zero);
+}
+
+// Processes a test audio file and checks that the injected speech probability
+// is not ignored when the internal VAD is not used.
+TEST(GainController2,
+ CheckInjectedVadProbabilityUsedWithAdaptiveDigitalController) {
+ constexpr int kSampleRateHz = AudioProcessing::kSampleRate48kHz;
+ constexpr int kStereo = 2;
+
+ // Create AGC2 enabling only the adaptive digital controller.
+ Agc2Config config;
+ config.fixed_digital.gain_db = 0.0f;
+ config.adaptive_digital.enabled = true;
+ GainController2 agc2(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/false);
+ GainController2 agc2_reference(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/true);
+
+ test::InputAudioFile input_file(
+ test::GetApmCaptureTestVectorFileName(kSampleRateHz),
+ /*loop_at_end=*/true);
+ const StreamConfig stream_config(kSampleRateHz, kStereo);
+
+ // Init buffers.
+ constexpr int kFrameDurationMs = 10;
+ std::vector<float> frame(kStereo * stream_config.num_frames());
+ AudioBuffer audio_buffer(kSampleRateHz, kStereo, kSampleRateHz, kStereo,
+ kSampleRateHz, kStereo);
+ AudioBuffer audio_buffer_reference(kSampleRateHz, kStereo, kSampleRateHz,
+ kStereo, kSampleRateHz, kStereo);
+ // Simulate.
+ constexpr float kGainDb = -6.0f;
+ const float gain = std::pow(10.0f, kGainDb / 20.0f);
+ constexpr int kDurationMs = 10000;
+ constexpr int kNumFramesToProcess = kDurationMs / kFrameDurationMs;
+ constexpr float kSpeechProbabilities[] = {1.0f, 0.3f};
+ constexpr float kEpsilon = 0.0001f;
+ bool all_samples_zero = true;
+ bool all_samples_equal = true;
+ for (int i = 0, j = 0; i < kNumFramesToProcess; ++i, j = 1 - j) {
+ ReadFloatSamplesFromStereoFile(stream_config.num_frames(),
+ stream_config.num_channels(), &input_file,
+ frame);
+ // Apply a fixed gain to the input audio.
+ for (float& x : frame) {
+ x *= gain;
+ }
+ test::CopyVectorToAudioBuffer(stream_config, frame, &audio_buffer);
+ agc2.Process(kSpeechProbabilities[j], &audio_buffer);
+ test::CopyVectorToAudioBuffer(stream_config, frame,
+ &audio_buffer_reference);
+ agc2_reference.Process(absl::nullopt, &audio_buffer_reference);
+ // Check the output buffers.
+ for (int i = 0; i < kStereo; ++i) {
+ for (int j = 0; j < static_cast<int>(audio_buffer.num_frames()); ++j) {
+ all_samples_zero &=
+ fabs(audio_buffer.channels_const()[i][j]) < kEpsilon;
+ all_samples_equal &=
+ fabs(audio_buffer.channels_const()[i][j] -
+ audio_buffer_reference.channels_const()[i][j]) < kEpsilon;
+ }
+ }
+ }
+ EXPECT_FALSE(all_samples_zero);
+ EXPECT_FALSE(all_samples_equal);
+}
+
+// Processes a test audio file and checks that the output is equal when
+// an injected speech probability from `VoiceActivityDetectorWrapper` and
+// the speech probability computed by the internal VAD are the same.
+TEST(GainController2,
+ CheckEqualResultFromInjectedVadProbabilityWithAdaptiveDigitalController) {
+ constexpr int kSampleRateHz = AudioProcessing::kSampleRate48kHz;
+ constexpr int kStereo = 2;
+
+ // Create AGC2 enabling only the adaptive digital controller.
+ Agc2Config config;
+ config.fixed_digital.gain_db = 0.0f;
+ config.adaptive_digital.enabled = true;
+ GainController2 agc2(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/false);
+ GainController2 agc2_reference(config, kSampleRateHz, kStereo,
+ /*use_internal_vad=*/true);
+ VoiceActivityDetectorWrapper vad(config.adaptive_digital.vad_reset_period_ms,
+ GetAvailableCpuFeatures(), kSampleRateHz);
+ test::InputAudioFile input_file(
+ test::GetApmCaptureTestVectorFileName(kSampleRateHz),
+ /*loop_at_end=*/true);
+ const StreamConfig stream_config(kSampleRateHz, kStereo);
+
+ // Init buffers.
+ constexpr int kFrameDurationMs = 10;
+ std::vector<float> frame(kStereo * stream_config.num_frames());
+ AudioBuffer audio_buffer(kSampleRateHz, kStereo, kSampleRateHz, kStereo,
+ kSampleRateHz, kStereo);
+ AudioBuffer audio_buffer_reference(kSampleRateHz, kStereo, kSampleRateHz,
+ kStereo, kSampleRateHz, kStereo);
+
+ // Simulate.
+ constexpr float kGainDb = -6.0f;
+ const float gain = std::pow(10.0f, kGainDb / 20.0f);
+ constexpr int kDurationMs = 10000;
+ constexpr int kNumFramesToProcess = kDurationMs / kFrameDurationMs;
+ for (int i = 0; i < kNumFramesToProcess; ++i) {
+ ReadFloatSamplesFromStereoFile(stream_config.num_frames(),
+ stream_config.num_channels(), &input_file,
+ frame);
+ // Apply a fixed gain to the input audio.
+ for (float& x : frame) {
+ x *= gain;
+ }
+ test::CopyVectorToAudioBuffer(stream_config, frame,
+ &audio_buffer_reference);
+ agc2_reference.Process(absl::nullopt, &audio_buffer_reference);
+ test::CopyVectorToAudioBuffer(stream_config, frame, &audio_buffer);
+ agc2.Process(vad.Analyze(AudioFrameView<const float>(
+ audio_buffer.channels(), audio_buffer.num_channels(),
+ audio_buffer.num_frames())),
+ &audio_buffer);
+ // Check the output buffer.
+ for (int i = 0; i < kStereo; ++i) {
+ for (int j = 0; j < static_cast<int>(audio_buffer.num_frames()); ++j) {
+ EXPECT_FLOAT_EQ(audio_buffer.channels_const()[i][j],
+ audio_buffer_reference.channels_const()[i][j]);
+ }
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/high_pass_filter.cc b/third_party/libwebrtc/modules/audio_processing/high_pass_filter.cc
new file mode 100644
index 0000000000..3b4740f6a5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/high_pass_filter.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/high_pass_filter.h"
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+// [B,A] = butter(2,100/8000,'high')
+constexpr CascadedBiQuadFilter::BiQuadCoefficients
+ kHighPassFilterCoefficients16kHz = {{0.97261f, -1.94523f, 0.97261f},
+ {-1.94448f, 0.94598f}};
+
+// [B,A] = butter(2,100/16000,'high')
+constexpr CascadedBiQuadFilter::BiQuadCoefficients
+ kHighPassFilterCoefficients32kHz = {{0.98621f, -1.97242f, 0.98621f},
+ {-1.97223f, 0.97261f}};
+
+// [B,A] = butter(2,100/24000,'high')
+constexpr CascadedBiQuadFilter::BiQuadCoefficients
+ kHighPassFilterCoefficients48kHz = {{0.99079f, -1.98157f, 0.99079f},
+ {-1.98149f, 0.98166f}};
+
+constexpr size_t kNumberOfHighPassBiQuads = 1;
+
+const CascadedBiQuadFilter::BiQuadCoefficients& ChooseCoefficients(
+ int sample_rate_hz) {
+ switch (sample_rate_hz) {
+ case 16000:
+ return kHighPassFilterCoefficients16kHz;
+ case 32000:
+ return kHighPassFilterCoefficients32kHz;
+ case 48000:
+ return kHighPassFilterCoefficients48kHz;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kHighPassFilterCoefficients16kHz;
+}
+
+} // namespace
+
+HighPassFilter::HighPassFilter(int sample_rate_hz, size_t num_channels)
+ : sample_rate_hz_(sample_rate_hz) {
+ filters_.resize(num_channels);
+ const auto& coefficients = ChooseCoefficients(sample_rate_hz_);
+ for (size_t k = 0; k < filters_.size(); ++k) {
+ filters_[k].reset(
+ new CascadedBiQuadFilter(coefficients, kNumberOfHighPassBiQuads));
+ }
+}
+
+HighPassFilter::~HighPassFilter() = default;
+
+void HighPassFilter::Process(AudioBuffer* audio, bool use_split_band_data) {
+ RTC_DCHECK(audio);
+ RTC_DCHECK_EQ(filters_.size(), audio->num_channels());
+ if (use_split_band_data) {
+ for (size_t k = 0; k < audio->num_channels(); ++k) {
+ rtc::ArrayView<float> channel_data = rtc::ArrayView<float>(
+ audio->split_bands(k)[0], audio->num_frames_per_band());
+ filters_[k]->Process(channel_data);
+ }
+ } else {
+ for (size_t k = 0; k < audio->num_channels(); ++k) {
+ rtc::ArrayView<float> channel_data =
+ rtc::ArrayView<float>(&audio->channels()[k][0], audio->num_frames());
+ filters_[k]->Process(channel_data);
+ }
+ }
+}
+
+void HighPassFilter::Process(std::vector<std::vector<float>>* audio) {
+ RTC_DCHECK_EQ(filters_.size(), audio->size());
+ for (size_t k = 0; k < audio->size(); ++k) {
+ filters_[k]->Process((*audio)[k]);
+ }
+}
+
+void HighPassFilter::Reset() {
+ for (size_t k = 0; k < filters_.size(); ++k) {
+ filters_[k]->Reset();
+ }
+}
+
+void HighPassFilter::Reset(size_t num_channels) {
+ const size_t old_num_channels = filters_.size();
+ filters_.resize(num_channels);
+ if (filters_.size() < old_num_channels) {
+ Reset();
+ } else {
+ for (size_t k = 0; k < old_num_channels; ++k) {
+ filters_[k]->Reset();
+ }
+ const auto& coefficients = ChooseCoefficients(sample_rate_hz_);
+ for (size_t k = old_num_channels; k < filters_.size(); ++k) {
+ filters_[k].reset(
+ new CascadedBiQuadFilter(coefficients, kNumberOfHighPassBiQuads));
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/high_pass_filter.h b/third_party/libwebrtc/modules/audio_processing/high_pass_filter.h
new file mode 100644
index 0000000000..7e7c370cd1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/high_pass_filter.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class HighPassFilter {
+ public:
+ HighPassFilter(int sample_rate_hz, size_t num_channels);
+ ~HighPassFilter();
+ HighPassFilter(const HighPassFilter&) = delete;
+ HighPassFilter& operator=(const HighPassFilter&) = delete;
+
+ void Process(AudioBuffer* audio, bool use_split_band_data);
+ void Process(std::vector<std::vector<float>>* audio);
+ void Reset();
+ void Reset(size_t num_channels);
+
+ int sample_rate_hz() const { return sample_rate_hz_; }
+ size_t num_channels() const { return filters_.size(); }
+
+ private:
+ const int sample_rate_hz_;
+ std::vector<std::unique_ptr<CascadedBiQuadFilter>> filters_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/high_pass_filter_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/high_pass_filter_gn/moz.build
new file mode 100644
index 0000000000..ca00a03c1d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/high_pass_filter_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/high_pass_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("high_pass_filter_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/high_pass_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/high_pass_filter_unittest.cc
new file mode 100644
index 0000000000..9f3c8fe595
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/high_pass_filter_unittest.cc
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/high_pass_filter.h"
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Process one frame of data via the AudioBuffer interface and produce the
+// output.
+std::vector<float> ProcessOneFrameAsAudioBuffer(
+ const std::vector<float>& frame_input,
+ const StreamConfig& stream_config,
+ HighPassFilter* high_pass_filter) {
+ AudioBuffer audio_buffer(
+ stream_config.sample_rate_hz(), stream_config.num_channels(),
+ stream_config.sample_rate_hz(), stream_config.num_channels(),
+ stream_config.sample_rate_hz(), stream_config.num_channels());
+
+ test::CopyVectorToAudioBuffer(stream_config, frame_input, &audio_buffer);
+ high_pass_filter->Process(&audio_buffer, /*use_split_band_data=*/false);
+ std::vector<float> frame_output;
+ test::ExtractVectorFromAudioBuffer(stream_config, &audio_buffer,
+ &frame_output);
+ return frame_output;
+}
+
+// Process one frame of data via the vector interface and produce the output.
+std::vector<float> ProcessOneFrameAsVector(
+ const std::vector<float>& frame_input,
+ const StreamConfig& stream_config,
+ HighPassFilter* high_pass_filter) {
+ std::vector<std::vector<float>> process_vector(
+ stream_config.num_channels(),
+ std::vector<float>(stream_config.num_frames()));
+
+ for (size_t k = 0; k < stream_config.num_frames(); ++k) {
+ for (size_t channel = 0; channel < stream_config.num_channels();
+ ++channel) {
+ process_vector[channel][k] =
+ frame_input[k * stream_config.num_channels() + channel];
+ }
+ }
+
+ high_pass_filter->Process(&process_vector);
+
+ std::vector<float> output;
+ for (size_t k = 0; k < stream_config.num_frames(); ++k) {
+ for (size_t channel = 0; channel < stream_config.num_channels();
+ ++channel) {
+ output.push_back(process_vector[channel][k]);
+ }
+ }
+
+ return process_vector[0];
+}
+
+// Processes a specified amount of frames, verifies the results and reports
+// any errors.
+void RunBitexactnessTest(int num_channels,
+ bool use_audio_buffer_interface,
+ const std::vector<float>& input,
+ const std::vector<float>& reference) {
+ const StreamConfig stream_config(16000, num_channels);
+ HighPassFilter high_pass_filter(16000, num_channels);
+
+ std::vector<float> output;
+ const size_t num_frames_to_process =
+ input.size() /
+ (stream_config.num_frames() * stream_config.num_channels());
+ for (size_t frame_no = 0; frame_no < num_frames_to_process; ++frame_no) {
+ std::vector<float> frame_input(
+ input.begin() + stream_config.num_frames() *
+ stream_config.num_channels() * frame_no,
+ input.begin() + stream_config.num_frames() *
+ stream_config.num_channels() * (frame_no + 1));
+ if (use_audio_buffer_interface) {
+ output = ProcessOneFrameAsAudioBuffer(frame_input, stream_config,
+ &high_pass_filter);
+ } else {
+ output = ProcessOneFrameAsVector(frame_input, stream_config,
+ &high_pass_filter);
+ }
+ }
+
+ // Form vector to compare the reference to. Only the last frame processed
+ // is compared in order not having to specify all preceeding frames as
+ // inputs. As the algorithm being tested has a memory, testing only
+ // the last frame implicitly also tests the preceeding frames.
+ const size_t reference_frame_length =
+ reference.size() / stream_config.num_channels();
+ std::vector<float> output_to_verify;
+ for (size_t channel_no = 0; channel_no < stream_config.num_channels();
+ ++channel_no) {
+ output_to_verify.insert(
+ output_to_verify.end(),
+ output.begin() + channel_no * stream_config.num_frames(),
+ output.begin() + channel_no * stream_config.num_frames() +
+ reference_frame_length);
+ }
+
+ const float kElementErrorBound = 1.0f / 32768.0f;
+ EXPECT_TRUE(test::VerifyDeinterleavedArray(
+ reference_frame_length, num_channels, reference, output_to_verify,
+ kElementErrorBound));
+}
+
+// Method for forming a vector out of an array.
+// TODO(peah): Remove once braced initialization is allowed.
+std::vector<float> CreateVector(const rtc::ArrayView<const float>& array_view) {
+ std::vector<float> v;
+ for (auto value : array_view) {
+ v.push_back(value);
+ }
+ return v;
+}
+} // namespace
+
+TEST(HighPassFilterAccuracyTest, ResetWithAudioBufferInterface) {
+ const StreamConfig stream_config_stereo(16000, 2);
+ const StreamConfig stream_config_mono(16000, 1);
+ std::vector<float> x_mono(160, 1.f);
+ std::vector<float> x_stereo(320, 1.f);
+ HighPassFilter hpf(16000, 1);
+ std::vector<float> y =
+ ProcessOneFrameAsAudioBuffer(x_mono, stream_config_mono, &hpf);
+ hpf.Reset(2);
+ y = ProcessOneFrameAsAudioBuffer(x_stereo, stream_config_stereo, &hpf);
+ hpf.Reset(1);
+ y = ProcessOneFrameAsAudioBuffer(x_mono, stream_config_mono, &hpf);
+ hpf.Reset();
+ y = ProcessOneFrameAsAudioBuffer(x_mono, stream_config_mono, &hpf);
+}
+
+TEST(HighPassFilterAccuracyTest, ResetWithVectorInterface) {
+ const StreamConfig stream_config_stereo(16000, 2);
+ const StreamConfig stream_config_mono(16000, 1);
+ std::vector<float> x_mono(160, 1.f);
+ std::vector<float> x_stereo(320, 1.f);
+ HighPassFilter hpf(16000, 1);
+ std::vector<float> y =
+ ProcessOneFrameAsVector(x_mono, stream_config_mono, &hpf);
+ hpf.Reset(2);
+ y = ProcessOneFrameAsVector(x_stereo, stream_config_stereo, &hpf);
+ hpf.Reset(1);
+ y = ProcessOneFrameAsVector(x_mono, stream_config_mono, &hpf);
+ hpf.Reset();
+ y = ProcessOneFrameAsVector(x_mono, stream_config_mono, &hpf);
+}
+
+TEST(HighPassFilterAccuracyTest, MonoInitial) {
+ const float kReferenceInput[] = {
+ 0.150254f, 0.512488f, -0.631245f, 0.240938f, 0.089080f, -0.365440f,
+ -0.121169f, 0.095748f, 1.000000f, 0.773932f, -0.377232f, 0.848124f,
+ 0.202718f, -0.017621f, 0.199738f, -0.057279f, -0.034693f, 0.416303f,
+ 0.393761f, 0.396041f, 0.187653f, -0.337438f, 0.200436f, 0.455577f,
+ 0.136624f, 0.289150f, 0.203131f, -0.084798f, 0.082124f, -0.220010f,
+ 0.248266f, -0.320554f, -0.298701f, -0.226218f, -0.822794f, 0.401962f,
+ 0.090876f, -0.210968f, 0.382936f, -0.478291f, -0.028572f, -0.067474f,
+ 0.089204f, 0.087430f, -0.241695f, -0.008398f, -0.046076f, 0.175416f,
+ 0.305518f, 0.309992f, -0.241352f, 0.021618f, -0.339291f, -0.311173f,
+ -0.001914f, 0.428301f, -0.215087f, 0.103784f, -0.063041f, 0.312250f,
+ -0.304344f, 0.009098f, 0.154406f, 0.307571f, 0.431537f, 0.024014f,
+ -0.416832f, -0.207440f, -0.296664f, 0.656846f, -0.172033f, 0.209054f,
+ -0.053772f, 0.248326f, -0.213741f, -0.391871f, -0.397490f, 0.136428f,
+ -0.049568f, -0.054788f, 0.396633f, 0.081485f, 0.055279f, 0.443690f,
+ -0.224812f, 0.194675f, 0.233369f, -0.068107f, 0.060270f, -0.325801f,
+ -0.320801f, 0.029308f, 0.201837f, 0.722528f, -0.186366f, 0.052351f,
+ -0.023053f, -0.540192f, -0.122671f, -0.501532f, 0.234847f, -0.248165f,
+ 0.027971f, -0.152171f, 0.084820f, -0.167764f, 0.136923f, 0.206619f,
+ 0.478395f, -0.054249f, -0.597574f, -0.234627f, 0.378548f, -0.299619f,
+ 0.268543f, 0.034666f, 0.401492f, -0.547983f, -0.055248f, -0.337538f,
+ 0.812657f, 0.230611f, 0.385360f, -0.295713f, -0.130957f, -0.076143f,
+ 0.306960f, -0.077653f, 0.196049f, -0.573390f, -0.098885f, -0.230155f,
+ -0.440716f, 0.141956f, 0.078802f, 0.009356f, -0.372703f, 0.315083f,
+ 0.097859f, -0.083575f, 0.006397f, -0.073216f, -0.489105f, -0.079827f,
+ -0.232329f, -0.273644f, -0.323162f, -0.149105f, -0.559646f, 0.269458f,
+ 0.145333f, -0.005597f, -0.009717f, -0.223051f, 0.284676f, -0.037228f,
+ -0.199679f, 0.377651f, -0.062813f, -0.164607f};
+ const float kReference[] = {0.146139f, 0.490336f, -0.649520f, 0.233881f,
+ 0.073214f, -0.373256f, -0.115394f, 0.102109f,
+ 0.976217f, 0.702270f, -0.457697f, 0.757116f};
+
+ for (bool use_audio_buffer_interface : {true, false}) {
+ RunBitexactnessTest(
+ 1, use_audio_buffer_interface,
+ CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+ CreateVector(rtc::ArrayView<const float>(kReference)));
+ }
+}
+
+TEST(HighPassFilterAccuracyTest, MonoConverged) {
+ const float kReferenceInput[] = {
+ 0.150254f, 0.512488f, -0.631245f, 0.240938f, 0.089080f, -0.365440f,
+ -0.121169f, 0.095748f, 1.000000f, 0.773932f, -0.377232f, 0.848124f,
+ 0.202718f, -0.017621f, 0.199738f, -0.057279f, -0.034693f, 0.416303f,
+ 0.393761f, 0.396041f, 0.187653f, -0.337438f, 0.200436f, 0.455577f,
+ 0.136624f, 0.289150f, 0.203131f, -0.084798f, 0.082124f, -0.220010f,
+ 0.248266f, -0.320554f, -0.298701f, -0.226218f, -0.822794f, 0.401962f,
+ 0.090876f, -0.210968f, 0.382936f, -0.478291f, -0.028572f, -0.067474f,
+ 0.089204f, 0.087430f, -0.241695f, -0.008398f, -0.046076f, 0.175416f,
+ 0.305518f, 0.309992f, -0.241352f, 0.021618f, -0.339291f, -0.311173f,
+ -0.001914f, 0.428301f, -0.215087f, 0.103784f, -0.063041f, 0.312250f,
+ -0.304344f, 0.009098f, 0.154406f, 0.307571f, 0.431537f, 0.024014f,
+ -0.416832f, -0.207440f, -0.296664f, 0.656846f, -0.172033f, 0.209054f,
+ -0.053772f, 0.248326f, -0.213741f, -0.391871f, -0.397490f, 0.136428f,
+ -0.049568f, -0.054788f, 0.396633f, 0.081485f, 0.055279f, 0.443690f,
+ -0.224812f, 0.194675f, 0.233369f, -0.068107f, 0.060270f, -0.325801f,
+ -0.320801f, 0.029308f, 0.201837f, 0.722528f, -0.186366f, 0.052351f,
+ -0.023053f, -0.540192f, -0.122671f, -0.501532f, 0.234847f, -0.248165f,
+ 0.027971f, -0.152171f, 0.084820f, -0.167764f, 0.136923f, 0.206619f,
+ 0.478395f, -0.054249f, -0.597574f, -0.234627f, 0.378548f, -0.299619f,
+ 0.268543f, 0.034666f, 0.401492f, -0.547983f, -0.055248f, -0.337538f,
+ 0.812657f, 0.230611f, 0.385360f, -0.295713f, -0.130957f, -0.076143f,
+ 0.306960f, -0.077653f, 0.196049f, -0.573390f, -0.098885f, -0.230155f,
+ -0.440716f, 0.141956f, 0.078802f, 0.009356f, -0.372703f, 0.315083f,
+ 0.097859f, -0.083575f, 0.006397f, -0.073216f, -0.489105f, -0.079827f,
+ -0.232329f, -0.273644f, -0.323162f, -0.149105f, -0.559646f, 0.269458f,
+ 0.145333f, -0.005597f, -0.009717f, -0.223051f, 0.284676f, -0.037228f,
+ -0.199679f, 0.377651f, -0.062813f, -0.164607f, -0.082091f, -0.236957f,
+ -0.313025f, 0.705903f, 0.462637f, 0.085942f, -0.351308f, -0.241859f,
+ -0.049333f, 0.221165f, -0.372235f, -0.651092f, -0.404957f, 0.093201f,
+ 0.109366f, 0.126224f, -0.036409f, 0.051333f, -0.133063f, 0.240896f,
+ -0.380532f, 0.127160f, -0.237176f, -0.093586f, 0.154478f, 0.290379f,
+ -0.312329f, 0.352297f, 0.184480f, -0.018965f, -0.054555f, -0.060811f,
+ -0.084705f, 0.006440f, 0.014333f, 0.230847f, 0.426721f, 0.130481f,
+ -0.058605f, 0.174712f, 0.051204f, -0.287773f, 0.265265f, 0.085810f,
+ 0.037775f, 0.143988f, 0.073051f, -0.263103f, -0.045366f, -0.040816f,
+ -0.148673f, 0.470072f, -0.244727f, -0.135204f, -0.198973f, -0.328139f,
+ -0.053722f, -0.076590f, 0.427586f, -0.069591f, -0.297399f, 0.448094f,
+ 0.345037f, -0.064170f, -0.420903f, -0.124253f, -0.043578f, 0.077149f,
+ -0.072983f, 0.123916f, 0.109517f, -0.349508f, -0.264912f, -0.207106f,
+ -0.141912f, -0.089586f, 0.003485f, -0.846518f, -0.127715f, 0.347208f,
+ -0.298095f, 0.260935f, 0.097899f, -0.008106f, 0.050987f, -0.437362f,
+ -0.023625f, 0.448230f, 0.027484f, 0.011562f, -0.205167f, -0.008611f,
+ 0.064930f, 0.119156f, -0.104183f, -0.066078f, 0.565530f, -0.631108f,
+ 0.623029f, 0.094334f, 0.279472f, -0.465059f, -0.164888f, -0.077706f,
+ 0.118130f, -0.466746f, 0.131800f, -0.338936f, 0.018497f, 0.182304f,
+ 0.091398f, 0.302547f, 0.281153f, -0.181899f, 0.071836f, -0.263911f,
+ -0.369380f, 0.258447f, 0.000014f, -0.015347f, 0.254619f, 0.166159f,
+ 0.097865f, 0.349389f, 0.259834f, 0.067003f, -0.192925f, -0.182080f,
+ 0.333139f, -0.450434f, -0.006836f, -0.544615f, 0.285183f, 0.240811f,
+ 0.000325f, -0.019796f, -0.694804f, 0.162411f, -0.612686f, -0.648134f,
+ 0.022338f, -0.265058f, 0.114993f, 0.189185f, 0.239697f, -0.193148f,
+ 0.125581f, 0.028122f, 0.230849f, 0.149832f, 0.250919f, -0.036871f,
+ -0.041136f, 0.281627f, -0.593466f, -0.141009f, -0.355074f, -0.106915f,
+ 0.181276f, 0.230753f, -0.283631f, -0.131643f, 0.038292f, -0.081563f,
+ 0.084345f, 0.111763f, -0.259882f, -0.049416f, -0.595824f, 0.320077f,
+ -0.175802f, -0.336422f, -0.070966f, -0.399242f, -0.005829f, -0.156680f,
+ 0.608591f, 0.318150f, -0.697767f, 0.123331f, -0.390716f, -0.071276f,
+ 0.045943f, 0.208958f, -0.076304f, 0.440505f, -0.134400f, 0.091525f,
+ 0.185763f, 0.023806f, 0.246186f, 0.090323f, -0.219133f, -0.504520f,
+ 0.519393f, -0.168939f, 0.028884f, 0.157380f, 0.031745f, -0.252830f,
+ -0.130705f, -0.034901f, 0.413302f, -0.240559f, 0.219279f, 0.086246f,
+ -0.065353f, -0.295376f, -0.079405f, -0.024226f, -0.410629f, 0.053706f,
+ -0.229794f, -0.026336f, 0.093956f, -0.252810f, -0.080555f, 0.097827f,
+ -0.513040f, 0.289508f, 0.677527f, 0.268109f, -0.088244f, 0.119781f,
+ -0.289511f, 0.524778f, 0.262884f, 0.220028f, -0.244767f, 0.089411f,
+ -0.156018f, -0.087030f, -0.159292f, -0.286646f, -0.253953f, -0.058657f,
+ -0.474756f, 0.169797f, -0.032919f, 0.195384f, 0.075355f, 0.138131f,
+ -0.414465f, -0.285118f, -0.124915f, 0.030645f, 0.315431f, -0.081032f,
+ 0.352546f, 0.132860f, 0.328112f, 0.035476f, -0.183550f, -0.413984f,
+ 0.043452f, 0.228748f, -0.081765f, -0.151125f, -0.086251f, -0.306448f,
+ -0.137774f, -0.050508f, 0.012811f, -0.017824f, 0.170841f, 0.030549f,
+ 0.506935f, 0.087197f, 0.504274f, -0.202080f, 0.147146f, -0.072728f,
+ 0.167713f, 0.165977f, -0.610894f, -0.370849f, -0.402698f, 0.112297f,
+ 0.410855f, -0.091330f, 0.227008f, 0.152454f, -0.293884f, 0.111074f,
+ -0.210121f, 0.423728f, -0.009101f, 0.457188f, -0.118785f, 0.164720f,
+ -0.017547f, -0.565046f, -0.274461f, 0.171169f, -0.015338f, -0.312635f,
+ -0.175044f, 0.069729f, -0.277504f, 0.272454f, -0.179049f, 0.505495f,
+ -0.301774f, 0.055664f, -0.425058f, -0.202222f, -0.165787f, 0.112155f,
+ 0.263284f, 0.083972f, -0.104256f, 0.227892f, 0.223253f, 0.033592f,
+ 0.159638f, 0.115358f, -0.275811f, 0.212265f, -0.183658f, -0.168768f};
+
+ const float kReference[] = {-0.248836f, -0.086982f, 0.083715f, -0.036787f,
+ 0.127212f, 0.147464f, -0.221733f, -0.004484f,
+ -0.535107f, 0.385999f, -0.116346f, -0.265302f};
+
+ for (bool use_audio_buffer_interface : {true, false}) {
+ RunBitexactnessTest(
+ 1, use_audio_buffer_interface,
+ CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+ CreateVector(rtc::ArrayView<const float>(kReference)));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/include/aec_dump.cc b/third_party/libwebrtc/modules/audio_processing/include/aec_dump.cc
new file mode 100644
index 0000000000..8f788cb802
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/aec_dump.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/aec_dump.h"
+
+namespace webrtc {
+InternalAPMConfig::InternalAPMConfig() = default;
+InternalAPMConfig::InternalAPMConfig(const InternalAPMConfig&) = default;
+InternalAPMConfig::InternalAPMConfig(InternalAPMConfig&&) = default;
+InternalAPMConfig& InternalAPMConfig::operator=(const InternalAPMConfig&) =
+ default;
+
+bool InternalAPMConfig::operator==(const InternalAPMConfig& other) const {
+ return aec_enabled == other.aec_enabled &&
+ aec_delay_agnostic_enabled == other.aec_delay_agnostic_enabled &&
+ aec_drift_compensation_enabled ==
+ other.aec_drift_compensation_enabled &&
+ aec_extended_filter_enabled == other.aec_extended_filter_enabled &&
+ aec_suppression_level == other.aec_suppression_level &&
+ aecm_enabled == other.aecm_enabled &&
+ aecm_comfort_noise_enabled == other.aecm_comfort_noise_enabled &&
+ aecm_routing_mode == other.aecm_routing_mode &&
+ agc_enabled == other.agc_enabled && agc_mode == other.agc_mode &&
+ agc_limiter_enabled == other.agc_limiter_enabled &&
+ hpf_enabled == other.hpf_enabled && ns_enabled == other.ns_enabled &&
+ ns_level == other.ns_level &&
+ transient_suppression_enabled == other.transient_suppression_enabled &&
+ noise_robust_agc_enabled == other.noise_robust_agc_enabled &&
+ pre_amplifier_enabled == other.pre_amplifier_enabled &&
+ pre_amplifier_fixed_gain_factor ==
+ other.pre_amplifier_fixed_gain_factor &&
+ experiments_description == other.experiments_description;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/include/aec_dump.h b/third_party/libwebrtc/modules/audio_processing/include/aec_dump.h
new file mode 100644
index 0000000000..07477d2f82
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/aec_dump.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+// Struct for passing current config from APM without having to
+// include protobuf headers.
+struct InternalAPMConfig {
+ InternalAPMConfig();
+ InternalAPMConfig(const InternalAPMConfig&);
+ InternalAPMConfig(InternalAPMConfig&&);
+
+ InternalAPMConfig& operator=(const InternalAPMConfig&);
+ InternalAPMConfig& operator=(InternalAPMConfig&&) = delete;
+
+ bool operator==(const InternalAPMConfig& other) const;
+
+ bool aec_enabled = false;
+ bool aec_delay_agnostic_enabled = false;
+ bool aec_drift_compensation_enabled = false;
+ bool aec_extended_filter_enabled = false;
+ int aec_suppression_level = 0;
+ bool aecm_enabled = false;
+ bool aecm_comfort_noise_enabled = false;
+ int aecm_routing_mode = 0;
+ bool agc_enabled = false;
+ int agc_mode = 0;
+ bool agc_limiter_enabled = false;
+ bool hpf_enabled = false;
+ bool ns_enabled = false;
+ int ns_level = 0;
+ bool transient_suppression_enabled = false;
+ bool noise_robust_agc_enabled = false;
+ bool pre_amplifier_enabled = false;
+ float pre_amplifier_fixed_gain_factor = 1.f;
+ std::string experiments_description = "";
+};
+
+// An interface for recording configuration and input/output streams
+// of the Audio Processing Module. The recordings are called
+// 'aec-dumps' and are stored in a protobuf format defined in
+// debug.proto.
+// The Write* methods are always safe to call concurrently or
+// otherwise for all implementing subclasses. The intended mode of
+// operation is to create a protobuf object from the input, and send
+// it away to be written to file asynchronously.
+class AecDump {
+ public:
+ struct AudioProcessingState {
+ int delay;
+ int drift;
+ int level;
+ bool keypress;
+ };
+
+ virtual ~AecDump() = default;
+
+ // Logs Event::Type INIT message.
+ virtual void WriteInitMessage(const ProcessingConfig& api_format,
+ int64_t time_now_ms) = 0;
+ ABSL_DEPRECATED("")
+ void WriteInitMessage(const ProcessingConfig& api_format) {
+ WriteInitMessage(api_format, 0);
+ }
+
+ // Logs Event::Type STREAM message. To log an input/output pair,
+ // call the AddCapture* and AddAudioProcessingState methods followed
+ // by a WriteCaptureStreamMessage call.
+ virtual void AddCaptureStreamInput(
+ const AudioFrameView<const float>& src) = 0;
+ virtual void AddCaptureStreamOutput(
+ const AudioFrameView<const float>& src) = 0;
+ virtual void AddCaptureStreamInput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) = 0;
+ virtual void AddCaptureStreamOutput(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) = 0;
+ virtual void AddAudioProcessingState(const AudioProcessingState& state) = 0;
+ virtual void WriteCaptureStreamMessage() = 0;
+
+ // Logs Event::Type REVERSE_STREAM message.
+ virtual void WriteRenderStreamMessage(const int16_t* const data,
+ int num_channels,
+ int samples_per_channel) = 0;
+ virtual void WriteRenderStreamMessage(
+ const AudioFrameView<const float>& src) = 0;
+
+ virtual void WriteRuntimeSetting(
+ const AudioProcessing::RuntimeSetting& runtime_setting) = 0;
+
+ // Logs Event::Type CONFIG message.
+ virtual void WriteConfig(const InternalAPMConfig& config) = 0;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.cc b/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.cc
new file mode 100644
index 0000000000..7cc4fb75e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_frame_proxies.h"
+
+#include "api/audio/audio_frame.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame) {
+ if (!frame || !ap) {
+ return AudioProcessing::Error::kNullPointerError;
+ }
+
+ StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_);
+ StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_);
+ RTC_DCHECK_EQ(frame->samples_per_channel(), input_config.num_frames());
+
+ int result = ap->ProcessStream(frame->data(), input_config, output_config,
+ frame->mutable_data());
+
+ AudioProcessingStats stats = ap->GetStatistics();
+
+ if (stats.voice_detected) {
+ frame->vad_activity_ = *stats.voice_detected
+ ? AudioFrame::VADActivity::kVadActive
+ : AudioFrame::VADActivity::kVadPassive;
+ }
+
+ return result;
+}
+
+int ProcessReverseAudioFrame(AudioProcessing* ap, AudioFrame* frame) {
+ if (!frame || !ap) {
+ return AudioProcessing::Error::kNullPointerError;
+ }
+
+ // Must be a native rate.
+ if (frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate8kHz &&
+ frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate16kHz &&
+ frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate32kHz &&
+ frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate48kHz) {
+ return AudioProcessing::Error::kBadSampleRateError;
+ }
+
+ if (frame->num_channels_ <= 0) {
+ return AudioProcessing::Error::kBadNumberChannelsError;
+ }
+
+ StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_);
+ StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_);
+
+ int result = ap->ProcessReverseStream(frame->data(), input_config,
+ output_config, frame->mutable_data());
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.h b/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.h
new file mode 100644
index 0000000000..5dd111ca2b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_frame_proxies.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_PROXIES_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_PROXIES_H_
+
+namespace webrtc {
+
+class AudioFrame;
+class AudioProcessing;
+
+// Processes a 10 ms `frame` of the primary audio stream using the provided
+// AudioProcessing object. On the client-side, this is the near-end (or
+// captured) audio. The `sample_rate_hz_`, `num_channels_`, and
+// `samples_per_channel_` members of `frame` must be valid. If changed from the
+// previous call to this function, it will trigger an initialization of the
+// provided AudioProcessing object.
+// The function returns any error codes passed from the AudioProcessing
+// ProcessStream method.
+int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame);
+
+// Processes a 10 ms `frame` of the reverse direction audio stream using the
+// provided AudioProcessing object. The frame may be modified. On the
+// client-side, this is the far-end (or to be rendered) audio. The
+// `sample_rate_hz_`, `num_channels_`, and `samples_per_channel_` members of
+// `frame` must be valid. If changed from the previous call to this function, it
+// will trigger an initialization of the provided AudioProcessing object.
+// The function returns any error codes passed from the AudioProcessing
+// ProcessReverseStream method.
+int ProcessReverseAudioFrame(AudioProcessing* ap, AudioFrame* frame);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_PROXIES_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_frame_view.h b/third_party/libwebrtc/modules/audio_processing/include/audio_frame_view.h
new file mode 100644
index 0000000000..164784a7cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_frame_view.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Class to pass audio data in T** format, where T is a numeric type.
+template <class T>
+class AudioFrameView {
+ public:
+ // `num_channels` and `channel_size` describe the T**
+ // `audio_samples`. `audio_samples` is assumed to point to a
+ // two-dimensional |num_channels * channel_size| array of floats.
+ AudioFrameView(T* const* audio_samples, int num_channels, int channel_size)
+ : audio_samples_(audio_samples),
+ num_channels_(num_channels),
+ channel_size_(channel_size) {
+ RTC_DCHECK_GE(num_channels_, 0);
+ RTC_DCHECK_GE(channel_size_, 0);
+ }
+
+ // Implicit cast to allow converting Frame<float> to
+ // Frame<const float>.
+ template <class U>
+ AudioFrameView(AudioFrameView<U> other)
+ : audio_samples_(other.data()),
+ num_channels_(other.num_channels()),
+ channel_size_(other.samples_per_channel()) {}
+
+ AudioFrameView() = delete;
+
+ int num_channels() const { return num_channels_; }
+
+ int samples_per_channel() const { return channel_size_; }
+
+ rtc::ArrayView<T> channel(int idx) {
+ RTC_DCHECK_LE(0, idx);
+ RTC_DCHECK_LE(idx, num_channels_);
+ return rtc::ArrayView<T>(audio_samples_[idx], channel_size_);
+ }
+
+ rtc::ArrayView<const T> channel(int idx) const {
+ RTC_DCHECK_LE(0, idx);
+ RTC_DCHECK_LE(idx, num_channels_);
+ return rtc::ArrayView<const T>(audio_samples_[idx], channel_size_);
+ }
+
+ T* const* data() { return audio_samples_; }
+
+ private:
+ T* const* audio_samples_;
+ int num_channels_;
+ int channel_size_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_processing.cc b/third_party/libwebrtc/modules/audio_processing/include/audio_processing.cc
new file mode 100644
index 0000000000..86edaee087
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_processing.cc
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_processing.h"
+
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+namespace {
+
+using Agc1Config = AudioProcessing::Config::GainController1;
+using Agc2Config = AudioProcessing::Config::GainController2;
+
+std::string NoiseSuppressionLevelToString(
+ const AudioProcessing::Config::NoiseSuppression::Level& level) {
+ switch (level) {
+ case AudioProcessing::Config::NoiseSuppression::Level::kLow:
+ return "Low";
+ case AudioProcessing::Config::NoiseSuppression::Level::kModerate:
+ return "Moderate";
+ case AudioProcessing::Config::NoiseSuppression::Level::kHigh:
+ return "High";
+ case AudioProcessing::Config::NoiseSuppression::Level::kVeryHigh:
+ return "VeryHigh";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+std::string GainController1ModeToString(const Agc1Config::Mode& mode) {
+ switch (mode) {
+ case Agc1Config::Mode::kAdaptiveAnalog:
+ return "AdaptiveAnalog";
+ case Agc1Config::Mode::kAdaptiveDigital:
+ return "AdaptiveDigital";
+ case Agc1Config::Mode::kFixedDigital:
+ return "FixedDigital";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace
+
+constexpr int AudioProcessing::kNativeSampleRatesHz[];
+
+void CustomProcessing::SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting setting) {}
+
+bool Agc1Config::operator==(const Agc1Config& rhs) const {
+ const auto& analog_lhs = analog_gain_controller;
+ const auto& analog_rhs = rhs.analog_gain_controller;
+ return enabled == rhs.enabled && mode == rhs.mode &&
+ target_level_dbfs == rhs.target_level_dbfs &&
+ compression_gain_db == rhs.compression_gain_db &&
+ enable_limiter == rhs.enable_limiter &&
+ analog_lhs.enabled == analog_rhs.enabled &&
+ analog_lhs.startup_min_volume == analog_rhs.startup_min_volume &&
+ analog_lhs.clipped_level_min == analog_rhs.clipped_level_min &&
+ analog_lhs.enable_digital_adaptive ==
+ analog_rhs.enable_digital_adaptive &&
+ analog_lhs.clipped_level_step == analog_rhs.clipped_level_step &&
+ analog_lhs.clipped_ratio_threshold ==
+ analog_rhs.clipped_ratio_threshold &&
+ analog_lhs.clipped_wait_frames == analog_rhs.clipped_wait_frames &&
+ analog_lhs.clipping_predictor.mode ==
+ analog_rhs.clipping_predictor.mode &&
+ analog_lhs.clipping_predictor.window_length ==
+ analog_rhs.clipping_predictor.window_length &&
+ analog_lhs.clipping_predictor.reference_window_length ==
+ analog_rhs.clipping_predictor.reference_window_length &&
+ analog_lhs.clipping_predictor.reference_window_delay ==
+ analog_rhs.clipping_predictor.reference_window_delay &&
+ analog_lhs.clipping_predictor.clipping_threshold ==
+ analog_rhs.clipping_predictor.clipping_threshold &&
+ analog_lhs.clipping_predictor.crest_factor_margin ==
+ analog_rhs.clipping_predictor.crest_factor_margin &&
+ analog_lhs.clipping_predictor.use_predicted_step ==
+ analog_rhs.clipping_predictor.use_predicted_step;
+}
+
+bool Agc2Config::AdaptiveDigital::operator==(
+ const Agc2Config::AdaptiveDigital& rhs) const {
+ return enabled == rhs.enabled && dry_run == rhs.dry_run &&
+ headroom_db == rhs.headroom_db && max_gain_db == rhs.max_gain_db &&
+ initial_gain_db == rhs.initial_gain_db &&
+ vad_reset_period_ms == rhs.vad_reset_period_ms &&
+ adjacent_speech_frames_threshold ==
+ rhs.adjacent_speech_frames_threshold &&
+ max_gain_change_db_per_second == rhs.max_gain_change_db_per_second &&
+ max_output_noise_level_dbfs == rhs.max_output_noise_level_dbfs;
+}
+
+bool Agc2Config::operator==(const Agc2Config& rhs) const {
+ return enabled == rhs.enabled &&
+ fixed_digital.gain_db == rhs.fixed_digital.gain_db &&
+ adaptive_digital == rhs.adaptive_digital;
+}
+
+bool AudioProcessing::Config::CaptureLevelAdjustment::operator==(
+ const AudioProcessing::Config::CaptureLevelAdjustment& rhs) const {
+ return enabled == rhs.enabled && pre_gain_factor == rhs.pre_gain_factor &&
+ post_gain_factor == rhs.post_gain_factor &&
+ analog_mic_gain_emulation == rhs.analog_mic_gain_emulation;
+}
+
+bool AudioProcessing::Config::CaptureLevelAdjustment::AnalogMicGainEmulation::
+operator==(const AudioProcessing::Config::CaptureLevelAdjustment::
+ AnalogMicGainEmulation& rhs) const {
+ return enabled == rhs.enabled && initial_level == rhs.initial_level;
+}
+
+std::string AudioProcessing::Config::ToString() const {
+ char buf[2048];
+ rtc::SimpleStringBuilder builder(buf);
+ builder << "AudioProcessing::Config{ "
+ "pipeline: { "
+ "maximum_internal_processing_rate: "
+ << pipeline.maximum_internal_processing_rate
+ << ", multi_channel_render: " << pipeline.multi_channel_render
+ << ", multi_channel_capture: " << pipeline.multi_channel_capture
+ << " }, pre_amplifier: { enabled: " << pre_amplifier.enabled
+ << ", fixed_gain_factor: " << pre_amplifier.fixed_gain_factor
+ << " },capture_level_adjustment: { enabled: "
+ << capture_level_adjustment.enabled
+ << ", pre_gain_factor: " << capture_level_adjustment.pre_gain_factor
+ << ", post_gain_factor: " << capture_level_adjustment.post_gain_factor
+ << ", analog_mic_gain_emulation: { enabled: "
+ << capture_level_adjustment.analog_mic_gain_emulation.enabled
+ << ", initial_level: "
+ << capture_level_adjustment.analog_mic_gain_emulation.initial_level
+ << " }}, high_pass_filter: { enabled: " << high_pass_filter.enabled
+ << " }, echo_canceller: { enabled: " << echo_canceller.enabled
+ << ", mobile_mode: " << echo_canceller.mobile_mode
+ << ", enforce_high_pass_filtering: "
+ << echo_canceller.enforce_high_pass_filtering
+ << " }, noise_suppression: { enabled: " << noise_suppression.enabled
+ << ", level: "
+ << NoiseSuppressionLevelToString(noise_suppression.level)
+ << " }, transient_suppression: { enabled: "
+ << transient_suppression.enabled
+ << " }, gain_controller1: { enabled: " << gain_controller1.enabled
+ << ", mode: " << GainController1ModeToString(gain_controller1.mode)
+ << ", target_level_dbfs: " << gain_controller1.target_level_dbfs
+ << ", compression_gain_db: " << gain_controller1.compression_gain_db
+ << ", enable_limiter: " << gain_controller1.enable_limiter
+ << ", analog_gain_controller { enabled: "
+ << gain_controller1.analog_gain_controller.enabled
+ << ", startup_min_volume: "
+ << gain_controller1.analog_gain_controller.startup_min_volume
+ << ", clipped_level_min: "
+ << gain_controller1.analog_gain_controller.clipped_level_min
+ << ", enable_digital_adaptive: "
+ << gain_controller1.analog_gain_controller.enable_digital_adaptive
+ << ", clipped_level_step: "
+ << gain_controller1.analog_gain_controller.clipped_level_step
+ << ", clipped_ratio_threshold: "
+ << gain_controller1.analog_gain_controller.clipped_ratio_threshold
+ << ", clipped_wait_frames: "
+ << gain_controller1.analog_gain_controller.clipped_wait_frames
+ << ", clipping_predictor: { enabled: "
+ << gain_controller1.analog_gain_controller.clipping_predictor.enabled
+ << ", mode: "
+ << gain_controller1.analog_gain_controller.clipping_predictor.mode
+ << ", window_length: "
+ << gain_controller1.analog_gain_controller.clipping_predictor
+ .window_length
+ << ", reference_window_length: "
+ << gain_controller1.analog_gain_controller.clipping_predictor
+ .reference_window_length
+ << ", reference_window_delay: "
+ << gain_controller1.analog_gain_controller.clipping_predictor
+ .reference_window_delay
+ << ", clipping_threshold: "
+ << gain_controller1.analog_gain_controller.clipping_predictor
+ .clipping_threshold
+ << ", crest_factor_margin: "
+ << gain_controller1.analog_gain_controller.clipping_predictor
+ .crest_factor_margin
+ << ", use_predicted_step: "
+ << gain_controller1.analog_gain_controller.clipping_predictor
+ .use_predicted_step
+ << " }}}, gain_controller2: { enabled: " << gain_controller2.enabled
+ << ", fixed_digital: { gain_db: "
+ << gain_controller2.fixed_digital.gain_db
+ << " }, adaptive_digital: { enabled: "
+ << gain_controller2.adaptive_digital.enabled
+ << ", dry_run: " << gain_controller2.adaptive_digital.dry_run
+ << ", headroom_db: " << gain_controller2.adaptive_digital.headroom_db
+ << ", max_gain_db: " << gain_controller2.adaptive_digital.max_gain_db
+ << ", initial_gain_db: "
+ << gain_controller2.adaptive_digital.initial_gain_db
+ << ", vad_reset_period_ms: "
+ << gain_controller2.adaptive_digital.vad_reset_period_ms
+ << ", adjacent_speech_frames_threshold: "
+ << gain_controller2.adaptive_digital.adjacent_speech_frames_threshold
+ << ", max_gain_change_db_per_second: "
+ << gain_controller2.adaptive_digital.max_gain_change_db_per_second
+ << ", max_output_noise_level_dbfs: "
+ << gain_controller2.adaptive_digital.max_output_noise_level_dbfs
+ << "}}";
+ return builder.str();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_processing.h b/third_party/libwebrtc/modules/audio_processing/include/audio_processing.h
new file mode 100644
index 0000000000..cce35aec17
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_processing.h
@@ -0,0 +1,932 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+// MOZILLA: this is already defined in mozilla-config.h
+// #define _USE_MATH_DEFINES
+
+#include <math.h>
+#include <stddef.h> // size_t
+#include <stdio.h> // FILE
+#include <string.h>
+
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "api/scoped_refptr.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace rtc {
+class TaskQueue;
+} // namespace rtc
+
+namespace webrtc {
+
+class AecDump;
+class AudioBuffer;
+
+class StreamConfig;
+class ProcessingConfig;
+
+class EchoDetector;
+class CustomAudioAnalyzer;
+class CustomProcessing;
+
+// Use to enable experimental gain control (AGC). At startup the experimental
+// AGC moves the microphone volume up to `startup_min_volume` if the current
+// microphone volume is set too low. The value is clamped to its operating range
+// [12, 255]. Here, 255 maps to 100%.
+//
+// Must be provided through AudioProcessingBuilder().Create(config).
+#if defined(WEBRTC_CHROMIUM_BUILD)
+static constexpr int kAgcStartupMinVolume = 85;
+#else
+static constexpr int kAgcStartupMinVolume = 0;
+#endif // defined(WEBRTC_CHROMIUM_BUILD)
+static constexpr int kClippedLevelMin = 70;
+
+// The Audio Processing Module (APM) provides a collection of voice processing
+// components designed for real-time communications software.
+//
+// APM operates on two audio streams on a frame-by-frame basis. Frames of the
+// primary stream, on which all processing is applied, are passed to
+// `ProcessStream()`. Frames of the reverse direction stream are passed to
+// `ProcessReverseStream()`. On the client-side, this will typically be the
+// near-end (capture) and far-end (render) streams, respectively. APM should be
+// placed in the signal chain as close to the audio hardware abstraction layer
+// (HAL) as possible.
+//
+// On the server-side, the reverse stream will normally not be used, with
+// processing occurring on each incoming stream.
+//
+// Component interfaces follow a similar pattern and are accessed through
+// corresponding getters in APM. All components are disabled at create-time,
+// with default settings that are recommended for most situations. New settings
+// can be applied without enabling a component. Enabling a component triggers
+// memory allocation and initialization to allow it to start processing the
+// streams.
+//
+// Thread safety is provided with the following assumptions to reduce locking
+// overhead:
+// 1. The stream getters and setters are called from the same thread as
+// ProcessStream(). More precisely, stream functions are never called
+// concurrently with ProcessStream().
+// 2. Parameter getters are never called concurrently with the corresponding
+// setter.
+//
+// APM accepts only linear PCM audio data in chunks of ~10 ms (see
+// AudioProcessing::GetFrameSize() for details). The int16 interfaces use
+// interleaved data, while the float interfaces use deinterleaved data.
+//
+// Usage example, omitting error checking:
+// AudioProcessing* apm = AudioProcessingBuilder().Create();
+//
+// AudioProcessing::Config config;
+// config.echo_canceller.enabled = true;
+// config.echo_canceller.mobile_mode = false;
+//
+// config.gain_controller1.enabled = true;
+// config.gain_controller1.mode =
+// AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+// config.gain_controller1.analog_level_minimum = 0;
+// config.gain_controller1.analog_level_maximum = 255;
+//
+// config.gain_controller2.enabled = true;
+//
+// config.high_pass_filter.enabled = true;
+//
+// apm->ApplyConfig(config)
+//
+// apm->noise_reduction()->set_level(kHighSuppression);
+// apm->noise_reduction()->Enable(true);
+//
+// // Start a voice call...
+//
+// // ... Render frame arrives bound for the audio HAL ...
+// apm->ProcessReverseStream(render_frame);
+//
+// // ... Capture frame arrives from the audio HAL ...
+// // Call required set_stream_ functions.
+// apm->set_stream_delay_ms(delay_ms);
+// apm->set_stream_analog_level(analog_level);
+//
+// apm->ProcessStream(capture_frame);
+//
+// // Call required stream_ functions.
+// analog_level = apm->recommended_stream_analog_level();
+// has_voice = apm->stream_has_voice();
+//
+// // Repeat render and capture processing for the duration of the call...
+// // Start a new call...
+// apm->Initialize();
+//
+// // Close the application...
+// delete apm;
+//
+class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface {
+ public:
+ // The struct below constitutes the new parameter scheme for the audio
+ // processing. It is being introduced gradually and until it is fully
+ // introduced, it is prone to change.
+ // TODO(peah): Remove this comment once the new config scheme is fully rolled
+ // out.
+ //
+ // The parameters and behavior of the audio processing module are controlled
+ // by changing the default values in the AudioProcessing::Config struct.
+ // The config is applied by passing the struct to the ApplyConfig method.
+ //
+ // This config is intended to be used during setup, and to enable/disable
+ // top-level processing effects. Use during processing may cause undesired
+ // submodule resets, affecting the audio quality. Use the RuntimeSetting
+ // construct for runtime configuration.
+ struct RTC_EXPORT Config {
+ // Sets the properties of the audio processing pipeline.
+ struct RTC_EXPORT Pipeline {
+ // Maximum allowed processing rate used internally. May only be set to
+ // 32000 or 48000 and any differing values will be treated as 48000.
+ int maximum_internal_processing_rate = 48000;
+ // Allow multi-channel processing of render audio.
+ bool multi_channel_render = false;
+ // Allow multi-channel processing of capture audio when AEC3 is active
+ // or a custom AEC is injected..
+ bool multi_channel_capture = false;
+ } pipeline;
+
+ // Enabled the pre-amplifier. It amplifies the capture signal
+ // before any other processing is done.
+ // TODO(webrtc:5298): Deprecate and use the pre-gain functionality in
+ // capture_level_adjustment instead.
+ struct PreAmplifier {
+ bool enabled = false;
+ float fixed_gain_factor = 1.0f;
+ } pre_amplifier;
+
+ // Functionality for general level adjustment in the capture pipeline. This
+ // should not be used together with the legacy PreAmplifier functionality.
+ struct CaptureLevelAdjustment {
+ bool operator==(const CaptureLevelAdjustment& rhs) const;
+ bool operator!=(const CaptureLevelAdjustment& rhs) const {
+ return !(*this == rhs);
+ }
+ bool enabled = false;
+ // The `pre_gain_factor` scales the signal before any processing is done.
+ float pre_gain_factor = 1.0f;
+ // The `post_gain_factor` scales the signal after all processing is done.
+ float post_gain_factor = 1.0f;
+ struct AnalogMicGainEmulation {
+ bool operator==(const AnalogMicGainEmulation& rhs) const;
+ bool operator!=(const AnalogMicGainEmulation& rhs) const {
+ return !(*this == rhs);
+ }
+ bool enabled = false;
+ // Initial analog gain level to use for the emulated analog gain. Must
+ // be in the range [0...255].
+ int initial_level = 255;
+ } analog_mic_gain_emulation;
+ } capture_level_adjustment;
+
+ struct HighPassFilter {
+ bool enabled = false;
+ bool apply_in_full_band = true;
+ } high_pass_filter;
+
+ struct EchoCanceller {
+ bool enabled = false;
+ bool mobile_mode = false;
+ bool export_linear_aec_output = false;
+ // Enforce the highpass filter to be on (has no effect for the mobile
+ // mode).
+ bool enforce_high_pass_filtering = true;
+ } echo_canceller;
+
+ // Enables background noise suppression.
+ struct NoiseSuppression {
+ bool enabled = false;
+ enum Level { kLow, kModerate, kHigh, kVeryHigh };
+ Level level = kModerate;
+ bool analyze_linear_aec_output_when_available = false;
+ } noise_suppression;
+
+ // Enables transient suppression.
+ struct TransientSuppression {
+ bool enabled = false;
+ } transient_suppression;
+
+ // Enables automatic gain control (AGC) functionality.
+ // The automatic gain control (AGC) component brings the signal to an
+ // appropriate range. This is done by applying a digital gain directly and,
+ // in the analog mode, prescribing an analog gain to be applied at the audio
+ // HAL.
+ // Recommended to be enabled on the client-side.
+ struct RTC_EXPORT GainController1 {
+ bool operator==(const GainController1& rhs) const;
+ bool operator!=(const GainController1& rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool enabled = false;
+ enum Mode {
+ // Adaptive mode intended for use if an analog volume control is
+ // available on the capture device. It will require the user to provide
+ // coupling between the OS mixer controls and AGC through the
+ // stream_analog_level() functions.
+ // It consists of an analog gain prescription for the audio device and a
+ // digital compression stage.
+ kAdaptiveAnalog,
+ // Adaptive mode intended for situations in which an analog volume
+ // control is unavailable. It operates in a similar fashion to the
+ // adaptive analog mode, but with scaling instead applied in the digital
+ // domain. As with the analog mode, it additionally uses a digital
+ // compression stage.
+ kAdaptiveDigital,
+ // Fixed mode which enables only the digital compression stage also used
+ // by the two adaptive modes.
+ // It is distinguished from the adaptive modes by considering only a
+ // short time-window of the input signal. It applies a fixed gain
+ // through most of the input level range, and compresses (gradually
+ // reduces gain with increasing level) the input signal at higher
+ // levels. This mode is preferred on embedded devices where the capture
+ // signal level is predictable, so that a known gain can be applied.
+ kFixedDigital
+ };
+ Mode mode = kAdaptiveAnalog;
+ // Sets the target peak level (or envelope) of the AGC in dBFs (decibels
+ // from digital full-scale). The convention is to use positive values. For
+ // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
+ // level 3 dB below full-scale. Limited to [0, 31].
+ int target_level_dbfs = 3;
+ // Sets the maximum gain the digital compression stage may apply, in dB. A
+ // higher number corresponds to greater compression, while a value of 0
+ // will leave the signal uncompressed. Limited to [0, 90].
+ // For updates after APM setup, use a RuntimeSetting instead.
+ int compression_gain_db = 9;
+ // When enabled, the compression stage will hard limit the signal to the
+ // target level. Otherwise, the signal will be compressed but not limited
+ // above the target level.
+ bool enable_limiter = true;
+
+ // Enables the analog gain controller functionality.
+ struct AnalogGainController {
+ bool enabled = true;
+ // TODO(bugs.webrtc.org/1275566): Describe `startup_min_volume`.
+ int startup_min_volume = kAgcStartupMinVolume;
+ // Lowest analog microphone level that will be applied in response to
+ // clipping.
+ int clipped_level_min = kClippedLevelMin;
+ // If true, an adaptive digital gain is applied.
+ bool enable_digital_adaptive = true;
+ // Amount the microphone level is lowered with every clipping event.
+ // Limited to (0, 255].
+ int clipped_level_step = 15;
+ // Proportion of clipped samples required to declare a clipping event.
+ // Limited to (0.f, 1.f).
+ float clipped_ratio_threshold = 0.1f;
+ // Time in frames to wait after a clipping event before checking again.
+ // Limited to values higher than 0.
+ int clipped_wait_frames = 300;
+
+ // Enables clipping prediction functionality.
+ struct ClippingPredictor {
+ bool enabled = false;
+ enum Mode {
+ // Clipping event prediction mode with fixed step estimation.
+ kClippingEventPrediction,
+ // Clipped peak estimation mode with adaptive step estimation.
+ kAdaptiveStepClippingPeakPrediction,
+ // Clipped peak estimation mode with fixed step estimation.
+ kFixedStepClippingPeakPrediction,
+ };
+ Mode mode = kClippingEventPrediction;
+ // Number of frames in the sliding analysis window.
+ int window_length = 5;
+ // Number of frames in the sliding reference window.
+ int reference_window_length = 5;
+ // Reference window delay (unit: number of frames).
+ int reference_window_delay = 5;
+ // Clipping prediction threshold (dBFS).
+ float clipping_threshold = -1.0f;
+ // Crest factor drop threshold (dB).
+ float crest_factor_margin = 3.0f;
+ // If true, the recommended clipped level step is used to modify the
+ // analog gain. Otherwise, the predictor runs without affecting the
+ // analog gain.
+ bool use_predicted_step = true;
+ } clipping_predictor;
+ } analog_gain_controller;
+ } gain_controller1;
+
+ // Enables the next generation AGC functionality. This feature replaces the
+ // standard methods of gain control in the previous AGC. Enabling this
+ // submodule enables an adaptive digital AGC followed by a limiter. By
+ // setting `fixed_gain_db`, the limiter can be turned into a compressor that
+ // first applies a fixed gain. The adaptive digital AGC can be turned off by
+ // setting |adaptive_digital_mode=false|.
+ struct RTC_EXPORT GainController2 {
+ bool operator==(const GainController2& rhs) const;
+ bool operator!=(const GainController2& rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool enabled = false;
+ struct FixedDigital {
+ float gain_db = 0.0f;
+ } fixed_digital;
+ struct RTC_EXPORT AdaptiveDigital {
+ bool operator==(const AdaptiveDigital& rhs) const;
+ bool operator!=(const AdaptiveDigital& rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool enabled = false;
+ // When true, the adaptive digital controller runs but the signal is not
+ // modified.
+ bool dry_run = false;
+ float headroom_db = 6.0f;
+ // TODO(bugs.webrtc.org/7494): Consider removing and inferring from
+ // `max_output_noise_level_dbfs`.
+ float max_gain_db = 30.0f;
+ float initial_gain_db = 8.0f;
+ int vad_reset_period_ms = 1500;
+ int adjacent_speech_frames_threshold = 12;
+ float max_gain_change_db_per_second = 3.0f;
+ float max_output_noise_level_dbfs = -50.0f;
+ } adaptive_digital;
+ } gain_controller2;
+
+ std::string ToString() const;
+ };
+
+ // Specifies the properties of a setting to be passed to AudioProcessing at
+ // runtime.
+ class RuntimeSetting {
+ public:
+ enum class Type {
+ kNotSpecified,
+ kCapturePreGain,
+ kCaptureCompressionGain,
+ kCaptureFixedPostGain,
+ kPlayoutVolumeChange,
+ kCustomRenderProcessingRuntimeSetting,
+ kPlayoutAudioDeviceChange,
+ kCapturePostGain,
+ kCaptureOutputUsed
+ };
+
+ // Play-out audio device properties.
+ struct PlayoutAudioDeviceInfo {
+ int id; // Identifies the audio device.
+ int max_volume; // Maximum play-out volume.
+ };
+
+ RuntimeSetting() : type_(Type::kNotSpecified), value_(0.0f) {}
+ ~RuntimeSetting() = default;
+
+ static RuntimeSetting CreateCapturePreGain(float gain) {
+ return {Type::kCapturePreGain, gain};
+ }
+
+ static RuntimeSetting CreateCapturePostGain(float gain) {
+ return {Type::kCapturePostGain, gain};
+ }
+
+ // Corresponds to Config::GainController1::compression_gain_db, but for
+ // runtime configuration.
+ static RuntimeSetting CreateCompressionGainDb(int gain_db) {
+ RTC_DCHECK_GE(gain_db, 0);
+ RTC_DCHECK_LE(gain_db, 90);
+ return {Type::kCaptureCompressionGain, static_cast<float>(gain_db)};
+ }
+
+ // Corresponds to Config::GainController2::fixed_digital::gain_db, but for
+ // runtime configuration.
+ static RuntimeSetting CreateCaptureFixedPostGain(float gain_db) {
+ RTC_DCHECK_GE(gain_db, 0.0f);
+ RTC_DCHECK_LE(gain_db, 90.0f);
+ return {Type::kCaptureFixedPostGain, gain_db};
+ }
+
+ // Creates a runtime setting to notify play-out (aka render) audio device
+ // changes.
+ static RuntimeSetting CreatePlayoutAudioDeviceChange(
+ PlayoutAudioDeviceInfo audio_device) {
+ return {Type::kPlayoutAudioDeviceChange, audio_device};
+ }
+
+ // Creates a runtime setting to notify play-out (aka render) volume changes.
+ // `volume` is the unnormalized volume, the maximum of which
+ static RuntimeSetting CreatePlayoutVolumeChange(int volume) {
+ return {Type::kPlayoutVolumeChange, volume};
+ }
+
+ static RuntimeSetting CreateCustomRenderSetting(float payload) {
+ return {Type::kCustomRenderProcessingRuntimeSetting, payload};
+ }
+
+ static RuntimeSetting CreateCaptureOutputUsedSetting(
+ bool capture_output_used) {
+ return {Type::kCaptureOutputUsed, capture_output_used};
+ }
+
+ Type type() const { return type_; }
+ // Getters do not return a value but instead modify the argument to protect
+ // from implicit casting.
+ void GetFloat(float* value) const {
+ RTC_DCHECK(value);
+ *value = value_.float_value;
+ }
+ void GetInt(int* value) const {
+ RTC_DCHECK(value);
+ *value = value_.int_value;
+ }
+ void GetBool(bool* value) const {
+ RTC_DCHECK(value);
+ *value = value_.bool_value;
+ }
+ void GetPlayoutAudioDeviceInfo(PlayoutAudioDeviceInfo* value) const {
+ RTC_DCHECK(value);
+ *value = value_.playout_audio_device_info;
+ }
+
+ private:
+ RuntimeSetting(Type id, float value) : type_(id), value_(value) {}
+ RuntimeSetting(Type id, int value) : type_(id), value_(value) {}
+ RuntimeSetting(Type id, PlayoutAudioDeviceInfo value)
+ : type_(id), value_(value) {}
+ Type type_;
+ union U {
+ U() {}
+ U(int value) : int_value(value) {}
+ U(float value) : float_value(value) {}
+ U(PlayoutAudioDeviceInfo value) : playout_audio_device_info(value) {}
+ float float_value;
+ int int_value;
+ bool bool_value;
+ PlayoutAudioDeviceInfo playout_audio_device_info;
+ } value_;
+ };
+
+ ~AudioProcessing() override {}
+
+ // Initializes internal states, while retaining all user settings. This
+ // should be called before beginning to process a new audio stream. However,
+ // it is not necessary to call before processing the first stream after
+ // creation.
+ //
+ // It is also not necessary to call if the audio parameters (sample
+ // rate and number of channels) have changed. Passing updated parameters
+ // directly to `ProcessStream()` and `ProcessReverseStream()` is permissible.
+ // If the parameters are known at init-time though, they may be provided.
+ // TODO(webrtc:5298): Change to return void.
+ virtual int Initialize() = 0;
+
+ // The int16 interfaces require:
+ // - only `NativeRate`s be used
+ // - that the input, output and reverse rates must match
+ // - that `processing_config.output_stream()` matches
+ // `processing_config.input_stream()`.
+ //
+ // The float interfaces accept arbitrary rates and support differing input and
+ // output layouts, but the output must have either one channel or the same
+ // number of channels as the input.
+ virtual int Initialize(const ProcessingConfig& processing_config) = 0;
+
+ // TODO(peah): This method is a temporary solution used to take control
+ // over the parameters in the audio processing module and is likely to change.
+ virtual void ApplyConfig(const Config& config) = 0;
+
+ // TODO(ajm): Only intended for internal use. Make private and friend the
+ // necessary classes?
+ virtual int proc_sample_rate_hz() const = 0;
+ virtual int proc_split_sample_rate_hz() const = 0;
+ virtual size_t num_input_channels() const = 0;
+ virtual size_t num_proc_channels() const = 0;
+ virtual size_t num_output_channels() const = 0;
+ virtual size_t num_reverse_channels() const = 0;
+
+ // Set to true when the output of AudioProcessing will be muted or in some
+ // other way not used. Ideally, the captured audio would still be processed,
+ // but some components may change behavior based on this information.
+ // Default false. This method takes a lock. To achieve this in a lock-less
+ // manner the PostRuntimeSetting can instead be used.
+ virtual void set_output_will_be_muted(bool muted) = 0;
+
+ // Enqueues a runtime setting.
+ virtual void SetRuntimeSetting(RuntimeSetting setting) = 0;
+
+ // Enqueues a runtime setting. Returns a bool indicating whether the
+ // enqueueing was successfull.
+ virtual bool PostRuntimeSetting(RuntimeSetting setting) = 0;
+
+ // Accepts and produces a ~10 ms frame of interleaved 16 bit integer audio as
+ // specified in `input_config` and `output_config`. `src` and `dest` may use
+ // the same memory, if desired.
+ virtual int ProcessStream(const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest) = 0;
+
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
+ // `src` points to a channel buffer, arranged according to `input_stream`. At
+ // output, the channels will be arranged according to `output_stream` in
+ // `dest`.
+ //
+ // The output must have one channel or as many channels as the input. `src`
+ // and `dest` may use the same memory, if desired.
+ virtual int ProcessStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) = 0;
+
+ // Accepts and produces a ~10 ms frame of interleaved 16 bit integer audio for
+ // the reverse direction audio stream as specified in `input_config` and
+ // `output_config`. `src` and `dest` may use the same memory, if desired.
+ virtual int ProcessReverseStream(const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest) = 0;
+
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
+ // `data` points to a channel buffer, arranged according to `reverse_config`.
+ virtual int ProcessReverseStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) = 0;
+
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+ // of `data` points to a channel buffer, arranged according to
+ // `reverse_config`.
+ virtual int AnalyzeReverseStream(const float* const* data,
+ const StreamConfig& reverse_config) = 0;
+
+ // Returns the most recently produced ~10 ms of the linear AEC output at a
+ // rate of 16 kHz. If there is more than one capture channel, a mono
+ // representation of the input is returned. Returns true/false to indicate
+ // whether an output returned.
+ virtual bool GetLinearAecOutput(
+ rtc::ArrayView<std::array<float, 160>> linear_output) const = 0;
+
+ // This must be called prior to ProcessStream() if and only if adaptive analog
+ // gain control is enabled, to pass the current analog level from the audio
+ // HAL. Must be within the range [0, 255].
+ virtual void set_stream_analog_level(int level) = 0;
+
+ // When an analog mode is set, this should be called after ProcessStream()
+ // to obtain the recommended new analog level for the audio HAL. It is the
+ // user's responsibility to apply this level.
+ virtual int recommended_stream_analog_level() const = 0;
+
+ // This must be called if and only if echo processing is enabled.
+ //
+ // Sets the `delay` in ms between ProcessReverseStream() receiving a far-end
+ // frame and ProcessStream() receiving a near-end frame containing the
+ // corresponding echo. On the client-side this can be expressed as
+ // delay = (t_render - t_analyze) + (t_process - t_capture)
+ // where,
+ // - t_analyze is the time a frame is passed to ProcessReverseStream() and
+ // t_render is the time the first sample of the same frame is rendered by
+ // the audio hardware.
+ // - t_capture is the time the first sample of a frame is captured by the
+ // audio hardware and t_process is the time the same frame is passed to
+ // ProcessStream().
+ virtual int set_stream_delay_ms(int delay) = 0;
+ virtual int stream_delay_ms() const = 0;
+
+ // Call to signal that a key press occurred (true) or did not occur (false)
+ // with this chunk of audio.
+ virtual void set_stream_key_pressed(bool key_pressed) = 0;
+
+ // Creates and attaches an webrtc::AecDump for recording debugging
+ // information.
+ // The `worker_queue` may not be null and must outlive the created
+ // AecDump instance. |max_log_size_bytes == -1| means the log size
+ // will be unlimited. `handle` may not be null. The AecDump takes
+ // responsibility for `handle` and closes it in the destructor. A
+ // return value of true indicates that the file has been
+ // sucessfully opened, while a value of false indicates that
+ // opening the file failed.
+ virtual bool CreateAndAttachAecDump(absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) = 0;
+ virtual bool CreateAndAttachAecDump(FILE* handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue) = 0;
+
+ // TODO(webrtc:5298) Deprecated variant.
+ // Attaches provided webrtc::AecDump for recording debugging
+ // information. Log file and maximum file size logic is supposed to
+ // be handled by implementing instance of AecDump. Calling this
+ // method when another AecDump is attached resets the active AecDump
+ // with a new one. This causes the d-tor of the earlier AecDump to
+ // be called. The d-tor call may block until all pending logging
+ // tasks are completed.
+ virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) = 0;
+
+ // If no AecDump is attached, this has no effect. If an AecDump is
+ // attached, it's destructor is called. The d-tor may block until
+ // all pending logging tasks are completed.
+ virtual void DetachAecDump() = 0;
+
+ // Get audio processing statistics.
+ virtual AudioProcessingStats GetStatistics() = 0;
+ // TODO(webrtc:5298) Deprecated variant. The `has_remote_tracks` argument
+ // should be set if there are active remote tracks (this would usually be true
+ // during a call). If there are no remote tracks some of the stats will not be
+ // set by AudioProcessing, because they only make sense if there is at least
+ // one remote track.
+ virtual AudioProcessingStats GetStatistics(bool has_remote_tracks) = 0;
+
+ // Returns the last applied configuration.
+ virtual AudioProcessing::Config GetConfig() const = 0;
+
+ enum Error {
+ // Fatal errors.
+ kNoError = 0,
+ kUnspecifiedError = -1,
+ kCreationFailedError = -2,
+ kUnsupportedComponentError = -3,
+ kUnsupportedFunctionError = -4,
+ kNullPointerError = -5,
+ kBadParameterError = -6,
+ kBadSampleRateError = -7,
+ kBadDataLengthError = -8,
+ kBadNumberChannelsError = -9,
+ kFileError = -10,
+ kStreamParameterNotSetError = -11,
+ kNotEnabledError = -12,
+
+ // Warnings are non-fatal.
+ // This results when a set_stream_ parameter is out of range. Processing
+ // will continue, but the parameter may have been truncated.
+ kBadStreamParameterWarning = -13
+ };
+
+ // Native rates supported by the integer interfaces.
+ enum NativeRate {
+ kSampleRate8kHz = 8000,
+ kSampleRate16kHz = 16000,
+ kSampleRate32kHz = 32000,
+ kSampleRate44_1kHz = 44100,
+ kSampleRate48kHz = 48000
+ };
+
+ // TODO(kwiberg): We currently need to support a compiler (Visual C++) that
+ // complains if we don't explicitly state the size of the array here. Remove
+ // the size when that's no longer the case.
+ static constexpr int kNativeSampleRatesHz[4] = {
+ kSampleRate8kHz, kSampleRate16kHz, kSampleRate32kHz, kSampleRate48kHz};
+ static constexpr size_t kNumNativeSampleRates =
+ arraysize(kNativeSampleRatesHz);
+ static constexpr int kMaxNativeSampleRateHz =
+ kNativeSampleRatesHz[kNumNativeSampleRates - 1];
+
+ // APM processes audio in chunks of about 10 ms. See GetFrameSize() for
+ // details.
+ static constexpr int kChunkSizeMs = 10;
+
+ // Returns floor(sample_rate_hz/100): the number of samples per channel used
+ // as input and output to the audio processing module in calls to
+ // ProcessStream, ProcessReverseStream, AnalyzeReverseStream, and
+ // GetLinearAecOutput.
+ //
+ // This is exactly 10 ms for sample rates divisible by 100. For example:
+ // - 48000 Hz (480 samples per channel),
+ // - 44100 Hz (441 samples per channel),
+ // - 16000 Hz (160 samples per channel).
+ //
+ // Sample rates not divisible by 100 are received/produced in frames of
+ // approximately 10 ms. For example:
+ // - 22050 Hz (220 samples per channel, or ~9.98 ms per frame),
+ // - 11025 Hz (110 samples per channel, or ~9.98 ms per frame).
+ // These nondivisible sample rates yield lower audio quality compared to
+ // multiples of 100. Internal resampling to 10 ms frames causes a simulated
+ // clock drift effect which impacts the performance of (for example) echo
+ // cancellation.
+ static int GetFrameSize(int sample_rate_hz) { return sample_rate_hz / 100; }
+};
+
+class RTC_EXPORT AudioProcessingBuilder {
+ public:
+ AudioProcessingBuilder();
+ AudioProcessingBuilder(const AudioProcessingBuilder&) = delete;
+ AudioProcessingBuilder& operator=(const AudioProcessingBuilder&) = delete;
+ ~AudioProcessingBuilder();
+
+ // Sets the APM configuration.
+ AudioProcessingBuilder& SetConfig(const AudioProcessing::Config& config) {
+ config_ = config;
+ return *this;
+ }
+
+ // Sets the echo controller factory to inject when APM is created.
+ AudioProcessingBuilder& SetEchoControlFactory(
+ std::unique_ptr<EchoControlFactory> echo_control_factory) {
+ echo_control_factory_ = std::move(echo_control_factory);
+ return *this;
+ }
+
+ // Sets the capture post-processing sub-module to inject when APM is created.
+ AudioProcessingBuilder& SetCapturePostProcessing(
+ std::unique_ptr<CustomProcessing> capture_post_processing) {
+ capture_post_processing_ = std::move(capture_post_processing);
+ return *this;
+ }
+
+ // Sets the render pre-processing sub-module to inject when APM is created.
+ AudioProcessingBuilder& SetRenderPreProcessing(
+ std::unique_ptr<CustomProcessing> render_pre_processing) {
+ render_pre_processing_ = std::move(render_pre_processing);
+ return *this;
+ }
+
+ // Sets the echo detector to inject when APM is created.
+ AudioProcessingBuilder& SetEchoDetector(
+ rtc::scoped_refptr<EchoDetector> echo_detector) {
+ echo_detector_ = std::move(echo_detector);
+ return *this;
+ }
+
+ // Sets the capture analyzer sub-module to inject when APM is created.
+ AudioProcessingBuilder& SetCaptureAnalyzer(
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
+ capture_analyzer_ = std::move(capture_analyzer);
+ return *this;
+ }
+
+ // Creates an APM instance with the specified config or the default one if
+ // unspecified. Injects the specified components transferring the ownership
+ // to the newly created APM instance - i.e., except for the config, the
+ // builder is reset to its initial state.
+ rtc::scoped_refptr<AudioProcessing> Create();
+
+ private:
+ AudioProcessing::Config config_;
+ std::unique_ptr<EchoControlFactory> echo_control_factory_;
+ std::unique_ptr<CustomProcessing> capture_post_processing_;
+ std::unique_ptr<CustomProcessing> render_pre_processing_;
+ rtc::scoped_refptr<EchoDetector> echo_detector_;
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
+};
+
+class StreamConfig {
+ public:
+ // sample_rate_hz: The sampling rate of the stream.
+ // num_channels: The number of audio channels in the stream.
+ StreamConfig(int sample_rate_hz = 0, size_t num_channels = 0)
+ : sample_rate_hz_(sample_rate_hz),
+ num_channels_(num_channels),
+ num_frames_(calculate_frames(sample_rate_hz)) {}
+
+ void set_sample_rate_hz(int value) {
+ sample_rate_hz_ = value;
+ num_frames_ = calculate_frames(value);
+ }
+ void set_num_channels(size_t value) { num_channels_ = value; }
+
+ int sample_rate_hz() const { return sample_rate_hz_; }
+
+ // The number of channels in the stream.
+ size_t num_channels() const { return num_channels_; }
+
+ size_t num_frames() const { return num_frames_; }
+ size_t num_samples() const { return num_channels_ * num_frames_; }
+
+ bool operator==(const StreamConfig& other) const {
+ return sample_rate_hz_ == other.sample_rate_hz_ &&
+ num_channels_ == other.num_channels_;
+ }
+
+ bool operator!=(const StreamConfig& other) const { return !(*this == other); }
+
+ private:
+ static size_t calculate_frames(int sample_rate_hz) {
+ return static_cast<size_t>(AudioProcessing::GetFrameSize(sample_rate_hz));
+ }
+
+ int sample_rate_hz_;
+ size_t num_channels_;
+ size_t num_frames_;
+};
+
+class ProcessingConfig {
+ public:
+ enum StreamName {
+ kInputStream,
+ kOutputStream,
+ kReverseInputStream,
+ kReverseOutputStream,
+ kNumStreamNames,
+ };
+
+ const StreamConfig& input_stream() const {
+ return streams[StreamName::kInputStream];
+ }
+ const StreamConfig& output_stream() const {
+ return streams[StreamName::kOutputStream];
+ }
+ const StreamConfig& reverse_input_stream() const {
+ return streams[StreamName::kReverseInputStream];
+ }
+ const StreamConfig& reverse_output_stream() const {
+ return streams[StreamName::kReverseOutputStream];
+ }
+
+ StreamConfig& input_stream() { return streams[StreamName::kInputStream]; }
+ StreamConfig& output_stream() { return streams[StreamName::kOutputStream]; }
+ StreamConfig& reverse_input_stream() {
+ return streams[StreamName::kReverseInputStream];
+ }
+ StreamConfig& reverse_output_stream() {
+ return streams[StreamName::kReverseOutputStream];
+ }
+
+ bool operator==(const ProcessingConfig& other) const {
+ for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
+ if (this->streams[i] != other.streams[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const ProcessingConfig& other) const {
+ return !(*this == other);
+ }
+
+ StreamConfig streams[StreamName::kNumStreamNames];
+};
+
+// Experimental interface for a custom analysis submodule.
+class CustomAudioAnalyzer {
+ public:
+ // (Re-) Initializes the submodule.
+ virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
+ // Analyzes the given capture or render signal.
+ virtual void Analyze(const AudioBuffer* audio) = 0;
+ // Returns a string representation of the module state.
+ virtual std::string ToString() const = 0;
+
+ virtual ~CustomAudioAnalyzer() {}
+};
+
+// Interface for a custom processing submodule.
+class CustomProcessing {
+ public:
+ // (Re-)Initializes the submodule.
+ virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
+ // Processes the given capture or render signal.
+ virtual void Process(AudioBuffer* audio) = 0;
+ // Returns a string representation of the module state.
+ virtual std::string ToString() const = 0;
+ // Handles RuntimeSettings. TODO(webrtc:9262): make pure virtual
+ // after updating dependencies.
+ virtual void SetRuntimeSetting(AudioProcessing::RuntimeSetting setting);
+
+ virtual ~CustomProcessing() {}
+};
+
+// Interface for an echo detector submodule.
+class EchoDetector : public rtc::RefCountInterface {
+ public:
+ // (Re-)Initializes the submodule.
+ virtual void Initialize(int capture_sample_rate_hz,
+ int num_capture_channels,
+ int render_sample_rate_hz,
+ int num_render_channels) = 0;
+
+ // Analysis (not changing) of the first channel of the render signal.
+ virtual void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) = 0;
+
+ // Analysis (not changing) of the capture signal.
+ virtual void AnalyzeCaptureAudio(
+ rtc::ArrayView<const float> capture_audio) = 0;
+
+ struct Metrics {
+ absl::optional<double> echo_likelihood;
+ absl::optional<double> echo_likelihood_recent_max;
+ };
+
+ // Collect current metrics from the echo detector.
+ virtual Metrics GetMetrics() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.cc b/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.cc
new file mode 100644
index 0000000000..7139ee502e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.cc
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+
+namespace webrtc {
+
+AudioProcessingStats::AudioProcessingStats() = default;
+
+AudioProcessingStats::AudioProcessingStats(const AudioProcessingStats& other) =
+ default;
+
+AudioProcessingStats::~AudioProcessingStats() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.h b/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.h
new file mode 100644
index 0000000000..3b43319951
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/audio_processing_statistics.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_STATISTICS_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_STATISTICS_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+// This version of the stats uses Optionals, it will replace the regular
+// AudioProcessingStatistics struct.
+struct RTC_EXPORT AudioProcessingStats {
+ AudioProcessingStats();
+ AudioProcessingStats(const AudioProcessingStats& other);
+ ~AudioProcessingStats();
+
+ // Deprecated.
+ // TODO(bugs.webrtc.org/11226): Remove.
+ // True if voice is detected in the last capture frame, after processing.
+ // It is conservative in flagging audio as speech, with low likelihood of
+ // incorrectly flagging a frame as voice.
+ // Only reported if voice detection is enabled in AudioProcessing::Config.
+ absl::optional<bool> voice_detected;
+
+ // AEC Statistics.
+ // ERL = 10log_10(P_far / P_echo)
+ absl::optional<double> echo_return_loss;
+ // ERLE = 10log_10(P_echo / P_out)
+ absl::optional<double> echo_return_loss_enhancement;
+ // Fraction of time that the AEC linear filter is divergent, in a 1-second
+ // non-overlapped aggregation window.
+ absl::optional<double> divergent_filter_fraction;
+
+ // The delay metrics consists of the delay median and standard deviation. It
+ // also consists of the fraction of delay estimates that can make the echo
+ // cancellation perform poorly. The values are aggregated until the first
+ // call to `GetStatistics()` and afterwards aggregated and updated every
+ // second. Note that if there are several clients pulling metrics from
+ // `GetStatistics()` during a session the first call from any of them will
+ // change to one second aggregation window for all.
+ absl::optional<int32_t> delay_median_ms;
+ absl::optional<int32_t> delay_standard_deviation_ms;
+
+ // Residual echo detector likelihood.
+ absl::optional<double> residual_echo_likelihood;
+ // Maximum residual echo likelihood from the last time period.
+ absl::optional<double> residual_echo_likelihood_recent_max;
+
+ // The instantaneous delay estimate produced in the AEC. The unit is in
+ // milliseconds and the value is the instantaneous value at the time of the
+ // call to `GetStatistics()`.
+ absl::optional<int32_t> delay_ms;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_STATISTICS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/include/mock_audio_processing.h b/third_party/libwebrtc/modules/audio_processing/include/mock_audio_processing.h
new file mode 100644
index 0000000000..2ea1a865c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/include/mock_audio_processing.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_MOCK_AUDIO_PROCESSING_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_MOCK_AUDIO_PROCESSING_H_
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+namespace test {
+class MockCustomProcessing : public CustomProcessing {
+ public:
+ virtual ~MockCustomProcessing() {}
+ MOCK_METHOD(void,
+ Initialize,
+ (int sample_rate_hz, int num_channels),
+ (override));
+ MOCK_METHOD(void, Process, (AudioBuffer * audio), (override));
+ MOCK_METHOD(void,
+ SetRuntimeSetting,
+ (AudioProcessing::RuntimeSetting setting),
+ (override));
+ MOCK_METHOD(std::string, ToString, (), (const, override));
+};
+
+class MockCustomAudioAnalyzer : public CustomAudioAnalyzer {
+ public:
+ virtual ~MockCustomAudioAnalyzer() {}
+ MOCK_METHOD(void,
+ Initialize,
+ (int sample_rate_hz, int num_channels),
+ (override));
+ MOCK_METHOD(void, Analyze, (const AudioBuffer* audio), (override));
+ MOCK_METHOD(std::string, ToString, (), (const, override));
+};
+
+class MockEchoControl : public EchoControl {
+ public:
+ virtual ~MockEchoControl() {}
+ MOCK_METHOD(void, AnalyzeRender, (AudioBuffer * render), (override));
+ MOCK_METHOD(void, AnalyzeCapture, (AudioBuffer * capture), (override));
+ MOCK_METHOD(void,
+ ProcessCapture,
+ (AudioBuffer * capture, bool echo_path_change),
+ (override));
+ MOCK_METHOD(void,
+ ProcessCapture,
+ (AudioBuffer * capture,
+ AudioBuffer* linear_output,
+ bool echo_path_change),
+ (override));
+ MOCK_METHOD(Metrics, GetMetrics, (), (const, override));
+ MOCK_METHOD(void, SetAudioBufferDelay, (int delay_ms), (override));
+ MOCK_METHOD(bool, ActiveProcessing, (), (const, override));
+};
+
+class MockEchoDetector : public EchoDetector {
+ public:
+ virtual ~MockEchoDetector() {}
+ MOCK_METHOD(void,
+ Initialize,
+ (int capture_sample_rate_hz,
+ int num_capture_channels,
+ int render_sample_rate_hz,
+ int num_render_channels),
+ (override));
+ MOCK_METHOD(void,
+ AnalyzeRenderAudio,
+ (rtc::ArrayView<const float> render_audio),
+ (override));
+ MOCK_METHOD(void,
+ AnalyzeCaptureAudio,
+ (rtc::ArrayView<const float> capture_audio),
+ (override));
+ MOCK_METHOD(Metrics, GetMetrics, (), (const, override));
+};
+
+class MockAudioProcessing : public AudioProcessing {
+ public:
+ MockAudioProcessing() {}
+
+ virtual ~MockAudioProcessing() {}
+
+ MOCK_METHOD(int, Initialize, (), (override));
+ MOCK_METHOD(int,
+ Initialize,
+ (const ProcessingConfig& processing_config),
+ (override));
+ MOCK_METHOD(void, ApplyConfig, (const Config& config), (override));
+ MOCK_METHOD(int, proc_sample_rate_hz, (), (const, override));
+ MOCK_METHOD(int, proc_split_sample_rate_hz, (), (const, override));
+ MOCK_METHOD(size_t, num_input_channels, (), (const, override));
+ MOCK_METHOD(size_t, num_proc_channels, (), (const, override));
+ MOCK_METHOD(size_t, num_output_channels, (), (const, override));
+ MOCK_METHOD(size_t, num_reverse_channels, (), (const, override));
+ MOCK_METHOD(void, set_output_will_be_muted, (bool muted), (override));
+ MOCK_METHOD(void, SetRuntimeSetting, (RuntimeSetting setting), (override));
+ MOCK_METHOD(bool, PostRuntimeSetting, (RuntimeSetting setting), (override));
+ MOCK_METHOD(int,
+ ProcessStream,
+ (const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest),
+ (override));
+ MOCK_METHOD(int,
+ ProcessStream,
+ (const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest),
+ (override));
+ MOCK_METHOD(int,
+ ProcessReverseStream,
+ (const int16_t* const src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ int16_t* const dest),
+ (override));
+ MOCK_METHOD(int,
+ AnalyzeReverseStream,
+ (const float* const* data, const StreamConfig& reverse_config),
+ (override));
+ MOCK_METHOD(int,
+ ProcessReverseStream,
+ (const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest),
+ (override));
+ MOCK_METHOD(bool,
+ GetLinearAecOutput,
+ ((rtc::ArrayView<std::array<float, 160>> linear_output)),
+ (const, override));
+ MOCK_METHOD(int, set_stream_delay_ms, (int delay), (override));
+ MOCK_METHOD(int, stream_delay_ms, (), (const, override));
+ MOCK_METHOD(void, set_stream_key_pressed, (bool key_pressed), (override));
+ MOCK_METHOD(void, set_stream_analog_level, (int), (override));
+ MOCK_METHOD(int, recommended_stream_analog_level, (), (const, override));
+ MOCK_METHOD(bool,
+ CreateAndAttachAecDump,
+ (absl::string_view file_name,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue),
+ (override));
+ MOCK_METHOD(bool,
+ CreateAndAttachAecDump,
+ (FILE * handle,
+ int64_t max_log_size_bytes,
+ rtc::TaskQueue* worker_queue),
+ (override));
+ MOCK_METHOD(void, AttachAecDump, (std::unique_ptr<AecDump>), (override));
+ MOCK_METHOD(void, DetachAecDump, (), (override));
+
+ MOCK_METHOD(AudioProcessingStats, GetStatistics, (), (override));
+ MOCK_METHOD(AudioProcessingStats, GetStatistics, (bool), (override));
+
+ MOCK_METHOD(AudioProcessing::Config, GetConfig, (), (const, override));
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_INCLUDE_MOCK_AUDIO_PROCESSING_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.cc b/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.cc
new file mode 100644
index 0000000000..a15321ad48
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/strings/string_builder.h"
+
+// Check to verify that the define is properly set.
+#if !defined(WEBRTC_APM_DEBUG_DUMP) || \
+ (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1)
+#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1"
+#endif
+
+namespace webrtc {
+namespace {
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+
+#if defined(WEBRTC_WIN)
+constexpr char kPathDelimiter = '\\';
+#else
+constexpr char kPathDelimiter = '/';
+#endif
+
+std::string FormFileName(absl::string_view output_dir,
+ absl::string_view name,
+ int instance_index,
+ int reinit_index,
+ absl::string_view suffix) {
+#ifdef WEBRTC_WIN
+ char sep = '\\';
+#else
+ char sep = '/';
+#endif
+
+ std::stringstream ss;
+ std::string base = rtc::LogMessage::aec_debug_filename();
+ ss << base;
+
+ if (base.length() && base.back() != sep) {
+ ss << sep;
+ }
+
+ ss << name << "_" << instance_index << "-" << reinit_index << suffix;
+ return ss.str();
+}
+#endif
+
+} // namespace
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ApmDataDumper::ApmDataDumper(int instance_index)
+ : instance_index_(instance_index)
+ , debug_written_(0) {}
+#else
+ApmDataDumper::ApmDataDumper(int instance_index) {}
+#endif
+
+ApmDataDumper::~ApmDataDumper() = default;
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+bool ApmDataDumper::recording_activated_ = false;
+absl::optional<int> ApmDataDumper::dump_set_to_use_;
+char ApmDataDumper::output_dir_[] = "";
+
+FILE* ApmDataDumper::GetRawFile(absl::string_view name) {
+ std::string filename = FormFileName(output_dir_, name, instance_index_,
+ recording_set_index_, ".dat");
+ auto& f = raw_files_[filename];
+ if (!f) {
+ f.reset(fopen(filename.c_str(), "wb"));
+ RTC_CHECK(f.get()) << "Cannot write to " << filename << ".";
+ }
+ return f.get();
+}
+
+WavWriter* ApmDataDumper::GetWavFile(absl::string_view name,
+ int sample_rate_hz,
+ int num_channels,
+ WavFile::SampleFormat format) {
+ std::string filename = FormFileName(output_dir_, name, instance_index_,
+ recording_set_index_, ".wav");
+ auto& f = wav_files_[filename];
+ if (!f) {
+ f.reset(
+ new WavWriter(filename.c_str(), sample_rate_hz, num_channels, format));
+ }
+ return f.get();
+}
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.h b/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.h
new file mode 100644
index 0000000000..aa8496819b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/logging/apm_data_dumper.h
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_
+#define MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+#include <memory>
+#include <string>
+#include <unordered_map>
+#endif
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#if WEBRTC_APM_DEBUG_DUMP == 1
+#include "common_audio/wav_file.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/string_utils.h"
+#endif
+
+// Check to verify that the define is properly set.
+#if !defined(WEBRTC_APM_DEBUG_DUMP) || \
+ (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1)
+#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1"
+#endif
+
+namespace webrtc {
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+// Functor used to use as a custom deleter in the map of file pointers to raw
+// files.
+struct RawFileCloseFunctor {
+ void operator()(FILE* f) const { if (f) fclose(f); }
+};
+#endif
+
+// Class that handles dumping of variables into files.
+class ApmDataDumper {
+ public:
+ // Constructor that takes an instance index that may
+ // be used to distinguish data dumped from different
+ // instances of the code.
+ explicit ApmDataDumper(int instance_index);
+
+ ApmDataDumper() = delete;
+ ApmDataDumper(const ApmDataDumper&) = delete;
+ ApmDataDumper& operator=(const ApmDataDumper&) = delete;
+
+ ~ApmDataDumper();
+
+ // Activates or deactivate the dumping functionality.
+ static void SetActivated(bool activated) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ recording_activated_ = activated;
+#endif
+ }
+
+ // Returns whether dumping functionality is enabled/available.
+ static bool IsAvailable() {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ // Default dump set.
+ static constexpr size_t kDefaultDumpSet = 0;
+
+ // Specifies what dump set to use. All dump commands with a different dump set
+ // than the one specified will be discarded. If not specificed, all dump sets
+ // will be used.
+ static void SetDumpSetToUse(int dump_set_to_use) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ dump_set_to_use_ = dump_set_to_use;
+#endif
+ }
+
+ // Set an optional output directory.
+ static void SetOutputDirectory(absl::string_view output_dir) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ RTC_CHECK_LT(output_dir.size(), kOutputDirMaxLength);
+ rtc::strcpyn(output_dir_, output_dir.size(), output_dir);
+#endif
+ }
+
+ // Reinitializes the data dumping such that new versions
+ // of all files being dumped to are created.
+ void InitiateNewSetOfRecordings() {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ ++recording_set_index_;
+ debug_written_ = 0;
+#endif
+ }
+
+ // Methods for performing dumping of data of various types into
+ // various formats.
+ void DumpRaw(absl::string_view name,
+ double v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(&v, sizeof(v), 1, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v_length,
+ const double* v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(v, sizeof(v[0]), v_length, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ rtc::ArrayView<const double> v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpRaw(name, v.size(), v.data());
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ float v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(&v, sizeof(v), 1, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v_length,
+ const float* v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(v, sizeof(v[0]), v_length, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ rtc::ArrayView<const float> v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpRaw(name, v.size(), v.data());
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name, bool v, int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpRaw(name, static_cast<int16_t>(v));
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v_length,
+ const bool* v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ for (size_t k = 0; k < v_length; ++k) {
+ int16_t value = static_cast<int16_t>(v[k]);
+ fwrite(&value, sizeof(value), 1, file);
+ }
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ rtc::ArrayView<const bool> v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpRaw(name, v.size(), v.data());
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ int16_t v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(&v, sizeof(v), 1, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v_length,
+ const int16_t* v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(v, sizeof(v[0]), v_length, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ rtc::ArrayView<const int16_t> v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpRaw(name, v.size(), v.data());
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ int32_t v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(&v, sizeof(v), 1, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v_length,
+ const int32_t* v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(v, sizeof(v[0]), v_length, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(&v, sizeof(v), 1, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ size_t v_length,
+ const size_t* v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ FILE* file = GetRawFile(name);
+ if (file) {
+ fwrite(v, sizeof(v[0]), v_length, file);
+ }
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ rtc::ArrayView<const int32_t> v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpRaw(name, v.size(), v.data());
+ }
+#endif
+ }
+
+ void DumpRaw(absl::string_view name,
+ rtc::ArrayView<const size_t> v,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ DumpRaw(name, v.size(), v.data());
+#endif
+ }
+
+ void DumpWav(absl::string_view name,
+ size_t v_length,
+ const float* v,
+ int sample_rate_hz,
+ int num_channels,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ WavWriter* file = GetWavFile(name, sample_rate_hz, num_channels,
+ WavFile::SampleFormat::kFloat);
+ file->WriteSamples(v, v_length);
+ // Cheat and use aec_near as a stand-in for "size of the largest file"
+ // in the dump. We're looking to limit the total time, and that's a
+ // reasonable stand-in.
+ if (strcmp(name, "aec_near") == 0) {
+ updateDebugWritten(v_length * sizeof(float));
+ }
+ }
+#endif
+ }
+
+ void DumpWav(absl::string_view name,
+ rtc::ArrayView<const float> v,
+ int sample_rate_hz,
+ int num_channels,
+ int dump_set = kDefaultDumpSet) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
+ return;
+
+ if (recording_activated_) {
+ DumpWav(name, v.size(), v.data(), sample_rate_hz, num_channels);
+ }
+#endif
+ }
+
+ private:
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ static bool recording_activated_;
+ static absl::optional<int> dump_set_to_use_;
+ static constexpr size_t kOutputDirMaxLength = 1024;
+ static char output_dir_[kOutputDirMaxLength];
+ const int instance_index_;
+ int recording_set_index_ = 0;
+ std::unordered_map<std::string, std::unique_ptr<FILE, RawFileCloseFunctor>>
+ raw_files_;
+ std::unordered_map<std::string, std::unique_ptr<WavWriter>> wav_files_;
+
+ FILE* GetRawFile(absl::string_view name);
+ WavWriter* GetWavFile(absl::string_view name,
+ int sample_rate_hz,
+ int num_channels,
+ WavFile::SampleFormat format);
+
+ uint32_t debug_written_ = 0;
+
+ void updateDebugWritten(uint32_t amount) {
+ debug_written_ += amount;
+ if (debug_written_ >= webrtc::Trace::aec_debug_size()) {
+ SetActivated(false);
+ }
+ }
+
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/ns/BUILD.gn
new file mode 100644
index 0000000000..8c2e9dba84
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/BUILD.gn
@@ -0,0 +1,104 @@
+# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_static_library("ns") {
+ visibility = [ "*" ]
+ configs += [ "..:apm_debug_dump" ]
+ sources = [
+ "fast_math.cc",
+ "fast_math.h",
+ "histograms.cc",
+ "histograms.h",
+ "noise_estimator.cc",
+ "noise_estimator.h",
+ "noise_suppressor.cc",
+ "noise_suppressor.h",
+ "ns_common.h",
+ "ns_config.h",
+ "ns_fft.cc",
+ "ns_fft.h",
+ "prior_signal_model.cc",
+ "prior_signal_model.h",
+ "prior_signal_model_estimator.cc",
+ "prior_signal_model_estimator.h",
+ "quantile_noise_estimator.cc",
+ "quantile_noise_estimator.h",
+ "signal_model.cc",
+ "signal_model.h",
+ "signal_model_estimator.cc",
+ "signal_model_estimator.h",
+ "speech_probability_estimator.cc",
+ "speech_probability_estimator.h",
+ "suppression_params.cc",
+ "suppression_params.h",
+ "wiener_filter.cc",
+ "wiener_filter.h",
+ ]
+
+ defines = []
+ if (rtc_build_with_neon && target_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+
+ deps = [
+ "..:apm_logging",
+ "..:audio_buffer",
+ "..:high_pass_filter",
+ "../../../api:array_view",
+ "../../../common_audio:common_audio_c",
+ "../../../common_audio/third_party/ooura:fft_size_128",
+ "../../../common_audio/third_party/ooura:fft_size_256",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base/system:arch",
+ "../../../system_wrappers",
+ "../../../system_wrappers:field_trial",
+ "../../../system_wrappers:metrics",
+ "../utility:cascaded_biquad_filter",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+if (rtc_include_tests) {
+ rtc_source_set("ns_unittests") {
+ testonly = true
+
+ configs += [ "..:apm_debug_dump" ]
+ sources = [ "noise_suppressor_unittest.cc" ]
+
+ deps = [
+ ":ns",
+ "..:apm_logging",
+ "..:audio_buffer",
+ "..:audio_processing",
+ "..:high_pass_filter",
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base:stringutils",
+ "../../../rtc_base/system:arch",
+ "../../../system_wrappers",
+ "../../../test:test_support",
+ "../utility:cascaded_biquad_filter",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+
+ defines = []
+
+ if (rtc_enable_protobuf) {
+ sources += []
+ }
+
+ if (!build_with_chromium) {
+ deps += [ "..:audio_processing_unittests" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/fast_math.cc b/third_party/libwebrtc/modules/audio_processing/ns/fast_math.cc
new file mode 100644
index 0000000000..d13110c43f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/fast_math.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/fast_math.h"
+
+#include <math.h>
+#include <stdint.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+float FastLog2f(float in) {
+ RTC_DCHECK_GT(in, .0f);
+ // Read and interpret float as uint32_t and then cast to float.
+ // This is done to extract the exponent (bits 30 - 23).
+ // "Right shift" of the exponent is then performed by multiplying
+ // with the constant (1/2^23). Finally, we subtract a constant to
+ // remove the bias (https://en.wikipedia.org/wiki/Exponent_bias).
+ union {
+ float dummy;
+ uint32_t a;
+ } x = {in};
+ float out = x.a;
+ out *= 1.1920929e-7f; // 1/2^23
+ out -= 126.942695f; // Remove bias.
+ return out;
+}
+
+} // namespace
+
+float SqrtFastApproximation(float f) {
+ // TODO(peah): Add fast approximate implementation.
+ return sqrtf(f);
+}
+
+float Pow2Approximation(float p) {
+ // TODO(peah): Add fast approximate implementation.
+ return powf(2.f, p);
+}
+
+float PowApproximation(float x, float p) {
+ return Pow2Approximation(p * FastLog2f(x));
+}
+
+float LogApproximation(float x) {
+ constexpr float kLogOf2 = 0.69314718056f;
+ return FastLog2f(x) * kLogOf2;
+}
+
+void LogApproximation(rtc::ArrayView<const float> x, rtc::ArrayView<float> y) {
+ for (size_t k = 0; k < x.size(); ++k) {
+ y[k] = LogApproximation(x[k]);
+ }
+}
+
+float ExpApproximation(float x) {
+ constexpr float kLog10Ofe = 0.4342944819f;
+ return PowApproximation(10.f, x * kLog10Ofe);
+}
+
+void ExpApproximation(rtc::ArrayView<const float> x, rtc::ArrayView<float> y) {
+ for (size_t k = 0; k < x.size(); ++k) {
+ y[k] = ExpApproximation(x[k]);
+ }
+}
+
+void ExpApproximationSignFlip(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y) {
+ for (size_t k = 0; k < x.size(); ++k) {
+ y[k] = ExpApproximation(-x[k]);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/fast_math.h b/third_party/libwebrtc/modules/audio_processing/ns/fast_math.h
new file mode 100644
index 0000000000..0aefee940b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/fast_math.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_FAST_MATH_H_
+#define MODULES_AUDIO_PROCESSING_NS_FAST_MATH_H_
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Sqrt approximation.
+float SqrtFastApproximation(float f);
+
+// Log base conversion log(x) = log2(x)/log2(e).
+float LogApproximation(float x);
+void LogApproximation(rtc::ArrayView<const float> x, rtc::ArrayView<float> y);
+
+// 2^x approximation.
+float Pow2Approximation(float p);
+
+// x^p approximation.
+float PowApproximation(float x, float p);
+
+// e^x approximation.
+float ExpApproximation(float x);
+void ExpApproximation(rtc::ArrayView<const float> x, rtc::ArrayView<float> y);
+void ExpApproximationSignFlip(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y);
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_FAST_MATH_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/histograms.cc b/third_party/libwebrtc/modules/audio_processing/ns/histograms.cc
new file mode 100644
index 0000000000..1d4f4590d2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/histograms.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/histograms.h"
+
+namespace webrtc {
+
+Histograms::Histograms() {
+ Clear();
+}
+
+void Histograms::Clear() {
+ lrt_.fill(0);
+ spectral_flatness_.fill(0);
+ spectral_diff_.fill(0);
+}
+
+void Histograms::Update(const SignalModel& features_) {
+ // Update the histogram for the LRT.
+ constexpr float kOneByBinSizeLrt = 1.f / kBinSizeLrt;
+ if (features_.lrt < kHistogramSize * kBinSizeLrt && features_.lrt >= 0.f) {
+ ++lrt_[kOneByBinSizeLrt * features_.lrt];
+ }
+
+ // Update histogram for the spectral flatness.
+ constexpr float kOneByBinSizeSpecFlat = 1.f / kBinSizeSpecFlat;
+ if (features_.spectral_flatness < kHistogramSize * kBinSizeSpecFlat &&
+ features_.spectral_flatness >= 0.f) {
+ ++spectral_flatness_[features_.spectral_flatness * kOneByBinSizeSpecFlat];
+ }
+
+ // Update histogram for the spectral difference.
+ constexpr float kOneByBinSizeSpecDiff = 1.f / kBinSizeSpecDiff;
+ if (features_.spectral_diff < kHistogramSize * kBinSizeSpecDiff &&
+ features_.spectral_diff >= 0.f) {
+ ++spectral_diff_[features_.spectral_diff * kOneByBinSizeSpecDiff];
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/histograms.h b/third_party/libwebrtc/modules/audio_processing/ns/histograms.h
new file mode 100644
index 0000000000..9640e743cf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/histograms.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_HISTOGRAMS_H_
+#define MODULES_AUDIO_PROCESSING_NS_HISTOGRAMS_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/ns_common.h"
+#include "modules/audio_processing/ns/signal_model.h"
+
+namespace webrtc {
+
+constexpr int kHistogramSize = 1000;
+
+// Class for handling the updating of histograms.
+class Histograms {
+ public:
+ Histograms();
+ Histograms(const Histograms&) = delete;
+ Histograms& operator=(const Histograms&) = delete;
+
+ // Clears the histograms.
+ void Clear();
+
+ // Extracts thresholds for feature parameters and updates the corresponding
+ // histogram.
+ void Update(const SignalModel& features_);
+
+ // Methods for accessing the histograms.
+ rtc::ArrayView<const int, kHistogramSize> get_lrt() const { return lrt_; }
+ rtc::ArrayView<const int, kHistogramSize> get_spectral_flatness() const {
+ return spectral_flatness_;
+ }
+ rtc::ArrayView<const int, kHistogramSize> get_spectral_diff() const {
+ return spectral_diff_;
+ }
+
+ private:
+ std::array<int, kHistogramSize> lrt_;
+ std::array<int, kHistogramSize> spectral_flatness_;
+ std::array<int, kHistogramSize> spectral_diff_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_HISTOGRAMS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.cc b/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.cc
new file mode 100644
index 0000000000..5367545f25
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/noise_estimator.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/ns/fast_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Log(i).
+constexpr std::array<float, 129> log_table = {
+ 0.f, 0.f, 0.f, 0.f, 0.f, 1.609438f, 1.791759f,
+ 1.945910f, 2.079442f, 2.197225f, 2.302585f, 2.397895f, 2.484907f, 2.564949f,
+ 2.639057f, 2.708050f, 2.772589f, 2.833213f, 2.890372f, 2.944439f, 2.995732f,
+ 3.044522f, 3.091043f, 3.135494f, 3.178054f, 3.218876f, 3.258097f, 3.295837f,
+ 3.332205f, 3.367296f, 3.401197f, 3.433987f, 3.465736f, 3.496507f, 3.526361f,
+ 3.555348f, 3.583519f, 3.610918f, 3.637586f, 3.663562f, 3.688879f, 3.713572f,
+ 3.737669f, 3.761200f, 3.784190f, 3.806663f, 3.828641f, 3.850147f, 3.871201f,
+ 3.891820f, 3.912023f, 3.931826f, 3.951244f, 3.970292f, 3.988984f, 4.007333f,
+ 4.025352f, 4.043051f, 4.060443f, 4.077538f, 4.094345f, 4.110874f, 4.127134f,
+ 4.143135f, 4.158883f, 4.174387f, 4.189655f, 4.204693f, 4.219508f, 4.234107f,
+ 4.248495f, 4.262680f, 4.276666f, 4.290460f, 4.304065f, 4.317488f, 4.330733f,
+ 4.343805f, 4.356709f, 4.369448f, 4.382027f, 4.394449f, 4.406719f, 4.418841f,
+ 4.430817f, 4.442651f, 4.454347f, 4.465908f, 4.477337f, 4.488636f, 4.499810f,
+ 4.510859f, 4.521789f, 4.532599f, 4.543295f, 4.553877f, 4.564348f, 4.574711f,
+ 4.584968f, 4.595119f, 4.605170f, 4.615121f, 4.624973f, 4.634729f, 4.644391f,
+ 4.653960f, 4.663439f, 4.672829f, 4.682131f, 4.691348f, 4.700480f, 4.709530f,
+ 4.718499f, 4.727388f, 4.736198f, 4.744932f, 4.753591f, 4.762174f, 4.770685f,
+ 4.779124f, 4.787492f, 4.795791f, 4.804021f, 4.812184f, 4.820282f, 4.828314f,
+ 4.836282f, 4.844187f, 4.852030f};
+
+} // namespace
+
+NoiseEstimator::NoiseEstimator(const SuppressionParams& suppression_params)
+ : suppression_params_(suppression_params) {
+ noise_spectrum_.fill(0.f);
+ prev_noise_spectrum_.fill(0.f);
+ conservative_noise_spectrum_.fill(0.f);
+ parametric_noise_spectrum_.fill(0.f);
+}
+
+void NoiseEstimator::PrepareAnalysis() {
+ std::copy(noise_spectrum_.begin(), noise_spectrum_.end(),
+ prev_noise_spectrum_.begin());
+}
+
+void NoiseEstimator::PreUpdate(
+ int32_t num_analyzed_frames,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum) {
+ quantile_noise_estimator_.Estimate(signal_spectrum, noise_spectrum_);
+
+ if (num_analyzed_frames < kShortStartupPhaseBlocks) {
+ // Compute simplified noise model during startup.
+ const size_t kStartBand = 5;
+ float sum_log_i_log_magn = 0.f;
+ float sum_log_i = 0.f;
+ float sum_log_i_square = 0.f;
+ float sum_log_magn = 0.f;
+ for (size_t i = kStartBand; i < kFftSizeBy2Plus1; ++i) {
+ float log_i = log_table[i];
+ sum_log_i += log_i;
+ sum_log_i_square += log_i * log_i;
+ float log_signal = LogApproximation(signal_spectrum[i]);
+ sum_log_magn += log_signal;
+ sum_log_i_log_magn += log_i * log_signal;
+ }
+
+ // Estimate the parameter for the level of the white noise.
+ constexpr float kOneByFftSizeBy2Plus1 = 1.f / kFftSizeBy2Plus1;
+ white_noise_level_ += signal_spectral_sum * kOneByFftSizeBy2Plus1 *
+ suppression_params_.over_subtraction_factor;
+
+ // Estimate pink noise parameters.
+ float denom = sum_log_i_square * (kFftSizeBy2Plus1 - kStartBand) -
+ sum_log_i * sum_log_i;
+ float num =
+ sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn;
+ RTC_DCHECK_NE(denom, 0.f);
+ float pink_noise_adjustment = num / denom;
+
+ // Constrain the estimated spectrum to be positive.
+ pink_noise_adjustment = std::max(pink_noise_adjustment, 0.f);
+ pink_noise_numerator_ += pink_noise_adjustment;
+ num = sum_log_i * sum_log_magn -
+ (kFftSizeBy2Plus1 - kStartBand) * sum_log_i_log_magn;
+ RTC_DCHECK_NE(denom, 0.f);
+ pink_noise_adjustment = num / denom;
+
+ // Constrain the pink noise power to be in the interval [0, 1].
+ pink_noise_adjustment = std::max(std::min(pink_noise_adjustment, 1.f), 0.f);
+
+ pink_noise_exp_ += pink_noise_adjustment;
+
+ const float one_by_num_analyzed_frames_plus_1 =
+ 1.f / (num_analyzed_frames + 1.f);
+
+ // Calculate the frequency-independent parts of parametric noise estimate.
+ float parametric_exp = 0.f;
+ float parametric_num = 0.f;
+ if (pink_noise_exp_ > 0.f) {
+ // Use pink noise estimate.
+ parametric_num = ExpApproximation(pink_noise_numerator_ *
+ one_by_num_analyzed_frames_plus_1);
+ parametric_num *= num_analyzed_frames + 1.f;
+ parametric_exp = pink_noise_exp_ * one_by_num_analyzed_frames_plus_1;
+ }
+
+ constexpr float kOneByShortStartupPhaseBlocks =
+ 1.f / kShortStartupPhaseBlocks;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ // Estimate the background noise using the white and pink noise
+ // parameters.
+ if (pink_noise_exp_ == 0.f) {
+ // Use white noise estimate.
+ parametric_noise_spectrum_[i] = white_noise_level_;
+ } else {
+ // Use pink noise estimate.
+ float use_band = i < kStartBand ? kStartBand : i;
+ float denom = PowApproximation(use_band, parametric_exp);
+ RTC_DCHECK_NE(denom, 0.f);
+ parametric_noise_spectrum_[i] = parametric_num / denom;
+ }
+ }
+
+ // Weight quantile noise with modeled noise.
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ noise_spectrum_[i] *= num_analyzed_frames;
+ float tmp = parametric_noise_spectrum_[i] *
+ (kShortStartupPhaseBlocks - num_analyzed_frames);
+ noise_spectrum_[i] += tmp * one_by_num_analyzed_frames_plus_1;
+ noise_spectrum_[i] *= kOneByShortStartupPhaseBlocks;
+ }
+ }
+}
+
+void NoiseEstimator::PostUpdate(
+ rtc::ArrayView<const float> speech_probability,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum) {
+ // Time-avg parameter for noise_spectrum update.
+ constexpr float kNoiseUpdate = 0.9f;
+
+ float gamma = kNoiseUpdate;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ const float prob_speech = speech_probability[i];
+ const float prob_non_speech = 1.f - prob_speech;
+
+ // Temporary noise update used for speech frames if update value is less
+ // than previous.
+ float noise_update_tmp =
+ gamma * prev_noise_spectrum_[i] +
+ (1.f - gamma) * (prob_non_speech * signal_spectrum[i] +
+ prob_speech * prev_noise_spectrum_[i]);
+
+ // Time-constant based on speech/noise_spectrum state.
+ float gamma_old = gamma;
+
+ // Increase gamma for frame likely to be seech.
+ constexpr float kProbRange = .2f;
+ gamma = prob_speech > kProbRange ? .99f : kNoiseUpdate;
+
+ // Conservative noise_spectrum update.
+ if (prob_speech < kProbRange) {
+ conservative_noise_spectrum_[i] +=
+ 0.05f * (signal_spectrum[i] - conservative_noise_spectrum_[i]);
+ }
+
+ // Noise_spectrum update.
+ if (gamma == gamma_old) {
+ noise_spectrum_[i] = noise_update_tmp;
+ } else {
+ noise_spectrum_[i] =
+ gamma * prev_noise_spectrum_[i] +
+ (1.f - gamma) * (prob_non_speech * signal_spectrum[i] +
+ prob_speech * prev_noise_spectrum_[i]);
+ // Allow for noise_spectrum update downwards: If noise_spectrum update
+ // decreases the noise_spectrum, it is safe, so allow it to happen.
+ noise_spectrum_[i] = std::min(noise_spectrum_[i], noise_update_tmp);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.h b/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.h
new file mode 100644
index 0000000000..0c0466a679
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NOISE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_NS_NOISE_ESTIMATOR_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/ns_common.h"
+#include "modules/audio_processing/ns/quantile_noise_estimator.h"
+#include "modules/audio_processing/ns/suppression_params.h"
+
+namespace webrtc {
+
+// Class for estimating the spectral characteristics of the noise in an incoming
+// signal.
+class NoiseEstimator {
+ public:
+ explicit NoiseEstimator(const SuppressionParams& suppression_params);
+
+ // Prepare the estimator for analysis of a new frame.
+ void PrepareAnalysis();
+
+ // Performs the first step of the estimator update.
+ void PreUpdate(int32_t num_analyzed_frames,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum);
+
+ // Performs the second step of the estimator update.
+ void PostUpdate(
+ rtc::ArrayView<const float> speech_probability,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum);
+
+ // Returns the noise spectral estimate.
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> get_noise_spectrum() const {
+ return noise_spectrum_;
+ }
+
+ // Returns the noise from the previous frame.
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> get_prev_noise_spectrum()
+ const {
+ return prev_noise_spectrum_;
+ }
+
+ // Returns a noise spectral estimate based on white and pink noise parameters.
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> get_parametric_noise_spectrum()
+ const {
+ return parametric_noise_spectrum_;
+ }
+ rtc::ArrayView<const float, kFftSizeBy2Plus1>
+ get_conservative_noise_spectrum() const {
+ return conservative_noise_spectrum_;
+ }
+
+ private:
+ const SuppressionParams& suppression_params_;
+ float white_noise_level_ = 0.f;
+ float pink_noise_numerator_ = 0.f;
+ float pink_noise_exp_ = 0.f;
+ std::array<float, kFftSizeBy2Plus1> prev_noise_spectrum_;
+ std::array<float, kFftSizeBy2Plus1> conservative_noise_spectrum_;
+ std::array<float, kFftSizeBy2Plus1> parametric_noise_spectrum_;
+ std::array<float, kFftSizeBy2Plus1> noise_spectrum_;
+ QuantileNoiseEstimator quantile_noise_estimator_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_NOISE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.cc b/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.cc
new file mode 100644
index 0000000000..d66faa6ed4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.cc
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/noise_suppressor.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+#include "modules/audio_processing/ns/fast_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Maps sample rate to number of bands.
+size_t NumBandsForRate(size_t sample_rate_hz) {
+ RTC_DCHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000 ||
+ sample_rate_hz == 48000);
+ return sample_rate_hz / 16000;
+}
+
+// Maximum number of channels for which the channel data is stored on
+// the stack. If the number of channels are larger than this, they are stored
+// using scratch memory that is pre-allocated on the heap. The reason for this
+// partitioning is not to waste heap space for handling the more common numbers
+// of channels, while at the same time not limiting the support for higher
+// numbers of channels by enforcing the channel data to be stored on the
+// stack using a fixed maximum value.
+constexpr size_t kMaxNumChannelsOnStack = 2;
+
+// Chooses the number of channels to store on the heap when that is required due
+// to the number of channels being larger than the pre-defined number
+// of channels to store on the stack.
+size_t NumChannelsOnHeap(size_t num_channels) {
+ return num_channels > kMaxNumChannelsOnStack ? num_channels : 0;
+}
+
+// Hybrib Hanning and flat window for the filterbank.
+constexpr std::array<float, 96> kBlocks160w256FirstHalf = {
+ 0.00000000f, 0.01636173f, 0.03271908f, 0.04906767f, 0.06540313f,
+ 0.08172107f, 0.09801714f, 0.11428696f, 0.13052619f, 0.14673047f,
+ 0.16289547f, 0.17901686f, 0.19509032f, 0.21111155f, 0.22707626f,
+ 0.24298018f, 0.25881905f, 0.27458862f, 0.29028468f, 0.30590302f,
+ 0.32143947f, 0.33688985f, 0.35225005f, 0.36751594f, 0.38268343f,
+ 0.39774847f, 0.41270703f, 0.42755509f, 0.44228869f, 0.45690388f,
+ 0.47139674f, 0.48576339f, 0.50000000f, 0.51410274f, 0.52806785f,
+ 0.54189158f, 0.55557023f, 0.56910015f, 0.58247770f, 0.59569930f,
+ 0.60876143f, 0.62166057f, 0.63439328f, 0.64695615f, 0.65934582f,
+ 0.67155895f, 0.68359230f, 0.69544264f, 0.70710678f, 0.71858162f,
+ 0.72986407f, 0.74095113f, 0.75183981f, 0.76252720f, 0.77301045f,
+ 0.78328675f, 0.79335334f, 0.80320753f, 0.81284668f, 0.82226822f,
+ 0.83146961f, 0.84044840f, 0.84920218f, 0.85772861f, 0.86602540f,
+ 0.87409034f, 0.88192126f, 0.88951608f, 0.89687274f, 0.90398929f,
+ 0.91086382f, 0.91749450f, 0.92387953f, 0.93001722f, 0.93590593f,
+ 0.94154407f, 0.94693013f, 0.95206268f, 0.95694034f, 0.96156180f,
+ 0.96592583f, 0.97003125f, 0.97387698f, 0.97746197f, 0.98078528f,
+ 0.98384601f, 0.98664333f, 0.98917651f, 0.99144486f, 0.99344778f,
+ 0.99518473f, 0.99665524f, 0.99785892f, 0.99879546f, 0.99946459f,
+ 0.99986614f};
+
+// Applies the filterbank window to a buffer.
+void ApplyFilterBankWindow(rtc::ArrayView<float, kFftSize> x) {
+ for (size_t i = 0; i < 96; ++i) {
+ x[i] = kBlocks160w256FirstHalf[i] * x[i];
+ }
+
+ for (size_t i = 161, k = 95; i < kFftSize; ++i, --k) {
+ RTC_DCHECK_NE(0, k);
+ x[i] = kBlocks160w256FirstHalf[k] * x[i];
+ }
+}
+
+// Extends a frame with previous data.
+void FormExtendedFrame(rtc::ArrayView<const float, kNsFrameSize> frame,
+ rtc::ArrayView<float, kFftSize - kNsFrameSize> old_data,
+ rtc::ArrayView<float, kFftSize> extended_frame) {
+ std::copy(old_data.begin(), old_data.end(), extended_frame.begin());
+ std::copy(frame.begin(), frame.end(),
+ extended_frame.begin() + old_data.size());
+ std::copy(extended_frame.end() - old_data.size(), extended_frame.end(),
+ old_data.begin());
+}
+
+// Uses overlap-and-add to produce an output frame.
+void OverlapAndAdd(rtc::ArrayView<const float, kFftSize> extended_frame,
+ rtc::ArrayView<float, kOverlapSize> overlap_memory,
+ rtc::ArrayView<float, kNsFrameSize> output_frame) {
+ for (size_t i = 0; i < kOverlapSize; ++i) {
+ output_frame[i] = overlap_memory[i] + extended_frame[i];
+ }
+ std::copy(extended_frame.begin() + kOverlapSize,
+ extended_frame.begin() + kNsFrameSize,
+ output_frame.begin() + kOverlapSize);
+ std::copy(extended_frame.begin() + kNsFrameSize, extended_frame.end(),
+ overlap_memory.begin());
+}
+
+// Produces a delayed frame.
+void DelaySignal(rtc::ArrayView<const float, kNsFrameSize> frame,
+ rtc::ArrayView<float, kFftSize - kNsFrameSize> delay_buffer,
+ rtc::ArrayView<float, kNsFrameSize> delayed_frame) {
+ constexpr size_t kSamplesFromFrame = kNsFrameSize - (kFftSize - kNsFrameSize);
+ std::copy(delay_buffer.begin(), delay_buffer.end(), delayed_frame.begin());
+ std::copy(frame.begin(), frame.begin() + kSamplesFromFrame,
+ delayed_frame.begin() + delay_buffer.size());
+
+ std::copy(frame.begin() + kSamplesFromFrame, frame.end(),
+ delay_buffer.begin());
+}
+
+// Computes the energy of an extended frame.
+float ComputeEnergyOfExtendedFrame(rtc::ArrayView<const float, kFftSize> x) {
+ float energy = 0.f;
+ for (float x_k : x) {
+ energy += x_k * x_k;
+ }
+
+ return energy;
+}
+
+// Computes the energy of an extended frame based on its subcomponents.
+float ComputeEnergyOfExtendedFrame(
+ rtc::ArrayView<const float, kNsFrameSize> frame,
+ rtc::ArrayView<float, kFftSize - kNsFrameSize> old_data) {
+ float energy = 0.f;
+ for (float v : old_data) {
+ energy += v * v;
+ }
+ for (float v : frame) {
+ energy += v * v;
+ }
+
+ return energy;
+}
+
+// Computes the magnitude spectrum based on an FFT output.
+void ComputeMagnitudeSpectrum(
+ rtc::ArrayView<const float, kFftSize> real,
+ rtc::ArrayView<const float, kFftSize> imag,
+ rtc::ArrayView<float, kFftSizeBy2Plus1> signal_spectrum) {
+ signal_spectrum[0] = fabsf(real[0]) + 1.f;
+ signal_spectrum[kFftSizeBy2Plus1 - 1] =
+ fabsf(real[kFftSizeBy2Plus1 - 1]) + 1.f;
+
+ for (size_t i = 1; i < kFftSizeBy2Plus1 - 1; ++i) {
+ signal_spectrum[i] =
+ SqrtFastApproximation(real[i] * real[i] + imag[i] * imag[i]) + 1.f;
+ }
+}
+
+// Compute prior and post SNR.
+void ComputeSnr(rtc::ArrayView<const float, kFftSizeBy2Plus1> filter,
+ rtc::ArrayView<const float> prev_signal_spectrum,
+ rtc::ArrayView<const float> signal_spectrum,
+ rtc::ArrayView<const float> prev_noise_spectrum,
+ rtc::ArrayView<const float> noise_spectrum,
+ rtc::ArrayView<float> prior_snr,
+ rtc::ArrayView<float> post_snr) {
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ // Previous post SNR.
+ // Previous estimate: based on previous frame with gain filter.
+ float prev_estimate = prev_signal_spectrum[i] /
+ (prev_noise_spectrum[i] + 0.0001f) * filter[i];
+ // Post SNR.
+ if (signal_spectrum[i] > noise_spectrum[i]) {
+ post_snr[i] = signal_spectrum[i] / (noise_spectrum[i] + 0.0001f) - 1.f;
+ } else {
+ post_snr[i] = 0.f;
+ }
+ // The directed decision estimate of the prior SNR is a sum the current and
+ // previous estimates.
+ prior_snr[i] = 0.98f * prev_estimate + (1.f - 0.98f) * post_snr[i];
+ }
+}
+
+// Computes the attenuating gain for the noise suppression of the upper bands.
+float ComputeUpperBandsGain(
+ float minimum_attenuating_gain,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> filter,
+ rtc::ArrayView<const float> speech_probability,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prev_analysis_signal_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum) {
+ // Average speech prob and filter gain for the end of the lowest band.
+ constexpr int kNumAvgBins = 32;
+ constexpr float kOneByNumAvgBins = 1.f / kNumAvgBins;
+
+ float avg_prob_speech = 0.f;
+ float avg_filter_gain = 0.f;
+ for (size_t i = kFftSizeBy2Plus1 - kNumAvgBins - 1; i < kFftSizeBy2Plus1 - 1;
+ i++) {
+ avg_prob_speech += speech_probability[i];
+ avg_filter_gain += filter[i];
+ }
+ avg_prob_speech = avg_prob_speech * kOneByNumAvgBins;
+ avg_filter_gain = avg_filter_gain * kOneByNumAvgBins;
+
+ // If the speech was suppressed by a component between Analyze and Process, an
+ // example being by an AEC, it should not be considered speech for the purpose
+ // of high band suppression. To that end, the speech probability is scaled
+ // accordingly.
+ float sum_analysis_spectrum = 0.f;
+ float sum_processing_spectrum = 0.f;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ sum_analysis_spectrum += prev_analysis_signal_spectrum[i];
+ sum_processing_spectrum += signal_spectrum[i];
+ }
+
+ // The magnitude spectrum computation enforces the spectrum to be strictly
+ // positive.
+ RTC_DCHECK_GT(sum_analysis_spectrum, 0.f);
+ avg_prob_speech *= sum_processing_spectrum / sum_analysis_spectrum;
+
+ // Compute gain based on speech probability.
+ float gain =
+ 0.5f * (1.f + static_cast<float>(tanh(2.f * avg_prob_speech - 1.f)));
+
+ // Combine gain with low band gain.
+ if (avg_prob_speech >= 0.5f) {
+ gain = 0.25f * gain + 0.75f * avg_filter_gain;
+ } else {
+ gain = 0.5f * gain + 0.5f * avg_filter_gain;
+ }
+
+ // Make sure gain is within flooring range.
+ return std::min(std::max(gain, minimum_attenuating_gain), 1.f);
+}
+
+} // namespace
+
+NoiseSuppressor::ChannelState::ChannelState(
+ const SuppressionParams& suppression_params,
+ size_t num_bands)
+ : wiener_filter(suppression_params),
+ noise_estimator(suppression_params),
+ process_delay_memory(num_bands > 1 ? num_bands - 1 : 0) {
+ analyze_analysis_memory.fill(0.f);
+ prev_analysis_signal_spectrum.fill(1.f);
+ process_analysis_memory.fill(0.f);
+ process_synthesis_memory.fill(0.f);
+ for (auto& d : process_delay_memory) {
+ d.fill(0.f);
+ }
+}
+
+NoiseSuppressor::NoiseSuppressor(const NsConfig& config,
+ size_t sample_rate_hz,
+ size_t num_channels)
+ : num_bands_(NumBandsForRate(sample_rate_hz)),
+ num_channels_(num_channels),
+ suppression_params_(config.target_level),
+ filter_bank_states_heap_(NumChannelsOnHeap(num_channels_)),
+ upper_band_gains_heap_(NumChannelsOnHeap(num_channels_)),
+ energies_before_filtering_heap_(NumChannelsOnHeap(num_channels_)),
+ gain_adjustments_heap_(NumChannelsOnHeap(num_channels_)),
+ channels_(num_channels_) {
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ channels_[ch] =
+ std::make_unique<ChannelState>(suppression_params_, num_bands_);
+ }
+}
+
+void NoiseSuppressor::AggregateWienerFilters(
+ rtc::ArrayView<float, kFftSizeBy2Plus1> filter) const {
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> filter0 =
+ channels_[0]->wiener_filter.get_filter();
+ std::copy(filter0.begin(), filter0.end(), filter.begin());
+
+ for (size_t ch = 1; ch < num_channels_; ++ch) {
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> filter_ch =
+ channels_[ch]->wiener_filter.get_filter();
+
+ for (size_t k = 0; k < kFftSizeBy2Plus1; ++k) {
+ filter[k] = std::min(filter[k], filter_ch[k]);
+ }
+ }
+}
+
+void NoiseSuppressor::Analyze(const AudioBuffer& audio) {
+ // Prepare the noise estimator for the analysis stage.
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ channels_[ch]->noise_estimator.PrepareAnalysis();
+ }
+
+ // Check for zero frames.
+ bool zero_frame = true;
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ rtc::ArrayView<const float, kNsFrameSize> y_band0(
+ &audio.split_bands_const(ch)[0][0], kNsFrameSize);
+ float energy = ComputeEnergyOfExtendedFrame(
+ y_band0, channels_[ch]->analyze_analysis_memory);
+ if (energy > 0.f) {
+ zero_frame = false;
+ break;
+ }
+ }
+
+ if (zero_frame) {
+ // We want to avoid updating statistics in this case:
+ // Updating feature statistics when we have zeros only will cause
+ // thresholds to move towards zero signal situations. This in turn has the
+ // effect that once the signal is "turned on" (non-zero values) everything
+ // will be treated as speech and there is no noise suppression effect.
+ // Depending on the duration of the inactive signal it takes a
+ // considerable amount of time for the system to learn what is noise and
+ // what is speech.
+ return;
+ }
+
+ // Only update analysis counter for frames that are properly analyzed.
+ if (++num_analyzed_frames_ < 0) {
+ num_analyzed_frames_ = 0;
+ }
+
+ // Analyze all channels.
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ std::unique_ptr<ChannelState>& ch_p = channels_[ch];
+ rtc::ArrayView<const float, kNsFrameSize> y_band0(
+ &audio.split_bands_const(ch)[0][0], kNsFrameSize);
+
+ // Form an extended frame and apply analysis filter bank windowing.
+ std::array<float, kFftSize> extended_frame;
+ FormExtendedFrame(y_band0, ch_p->analyze_analysis_memory, extended_frame);
+ ApplyFilterBankWindow(extended_frame);
+
+ // Compute the magnitude spectrum.
+ std::array<float, kFftSize> real;
+ std::array<float, kFftSize> imag;
+ fft_.Fft(extended_frame, real, imag);
+
+ std::array<float, kFftSizeBy2Plus1> signal_spectrum;
+ ComputeMagnitudeSpectrum(real, imag, signal_spectrum);
+
+ // Compute energies.
+ float signal_energy = 0.f;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ signal_energy += real[i] * real[i] + imag[i] * imag[i];
+ }
+ signal_energy /= kFftSizeBy2Plus1;
+
+ float signal_spectral_sum = 0.f;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ signal_spectral_sum += signal_spectrum[i];
+ }
+
+ // Estimate the noise spectra and the probability estimates of speech
+ // presence.
+ ch_p->noise_estimator.PreUpdate(num_analyzed_frames_, signal_spectrum,
+ signal_spectral_sum);
+
+ std::array<float, kFftSizeBy2Plus1> post_snr;
+ std::array<float, kFftSizeBy2Plus1> prior_snr;
+ ComputeSnr(ch_p->wiener_filter.get_filter(),
+ ch_p->prev_analysis_signal_spectrum, signal_spectrum,
+ ch_p->noise_estimator.get_prev_noise_spectrum(),
+ ch_p->noise_estimator.get_noise_spectrum(), prior_snr, post_snr);
+
+ ch_p->speech_probability_estimator.Update(
+ num_analyzed_frames_, prior_snr, post_snr,
+ ch_p->noise_estimator.get_conservative_noise_spectrum(),
+ signal_spectrum, signal_spectral_sum, signal_energy);
+
+ ch_p->noise_estimator.PostUpdate(
+ ch_p->speech_probability_estimator.get_probability(), signal_spectrum);
+
+ // Store the magnitude spectrum to make it avalilable for the process
+ // method.
+ std::copy(signal_spectrum.begin(), signal_spectrum.end(),
+ ch_p->prev_analysis_signal_spectrum.begin());
+ }
+}
+
+void NoiseSuppressor::Process(AudioBuffer* audio) {
+ // Select the space for storing data during the processing.
+ std::array<FilterBankState, kMaxNumChannelsOnStack> filter_bank_states_stack;
+ rtc::ArrayView<FilterBankState> filter_bank_states(
+ filter_bank_states_stack.data(), num_channels_);
+ std::array<float, kMaxNumChannelsOnStack> upper_band_gains_stack;
+ rtc::ArrayView<float> upper_band_gains(upper_band_gains_stack.data(),
+ num_channels_);
+ std::array<float, kMaxNumChannelsOnStack> energies_before_filtering_stack;
+ rtc::ArrayView<float> energies_before_filtering(
+ energies_before_filtering_stack.data(), num_channels_);
+ std::array<float, kMaxNumChannelsOnStack> gain_adjustments_stack;
+ rtc::ArrayView<float> gain_adjustments(gain_adjustments_stack.data(),
+ num_channels_);
+ if (NumChannelsOnHeap(num_channels_) > 0) {
+ // If the stack-allocated space is too small, use the heap for storing the
+ // data.
+ filter_bank_states = rtc::ArrayView<FilterBankState>(
+ filter_bank_states_heap_.data(), num_channels_);
+ upper_band_gains =
+ rtc::ArrayView<float>(upper_band_gains_heap_.data(), num_channels_);
+ energies_before_filtering = rtc::ArrayView<float>(
+ energies_before_filtering_heap_.data(), num_channels_);
+ gain_adjustments =
+ rtc::ArrayView<float>(gain_adjustments_heap_.data(), num_channels_);
+ }
+
+ // Compute the suppression filters for all channels.
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ // Form an extended frame and apply analysis filter bank windowing.
+ rtc::ArrayView<float, kNsFrameSize> y_band0(&audio->split_bands(ch)[0][0],
+ kNsFrameSize);
+
+ FormExtendedFrame(y_band0, channels_[ch]->process_analysis_memory,
+ filter_bank_states[ch].extended_frame);
+
+ ApplyFilterBankWindow(filter_bank_states[ch].extended_frame);
+
+ energies_before_filtering[ch] =
+ ComputeEnergyOfExtendedFrame(filter_bank_states[ch].extended_frame);
+
+ // Perform filter bank analysis and compute the magnitude spectrum.
+ fft_.Fft(filter_bank_states[ch].extended_frame, filter_bank_states[ch].real,
+ filter_bank_states[ch].imag);
+
+ std::array<float, kFftSizeBy2Plus1> signal_spectrum;
+ ComputeMagnitudeSpectrum(filter_bank_states[ch].real,
+ filter_bank_states[ch].imag, signal_spectrum);
+
+ // Compute the frequency domain gain filter for noise attenuation.
+ channels_[ch]->wiener_filter.Update(
+ num_analyzed_frames_,
+ channels_[ch]->noise_estimator.get_noise_spectrum(),
+ channels_[ch]->noise_estimator.get_prev_noise_spectrum(),
+ channels_[ch]->noise_estimator.get_parametric_noise_spectrum(),
+ signal_spectrum);
+
+ if (num_bands_ > 1) {
+ // Compute the time-domain gain for attenuating the noise in the upper
+ // bands.
+
+ upper_band_gains[ch] = ComputeUpperBandsGain(
+ suppression_params_.minimum_attenuating_gain,
+ channels_[ch]->wiener_filter.get_filter(),
+ channels_[ch]->speech_probability_estimator.get_probability(),
+ channels_[ch]->prev_analysis_signal_spectrum, signal_spectrum);
+ }
+ }
+
+ // Only do the below processing if the output of the audio processing module
+ // is used.
+ if (!capture_output_used_) {
+ return;
+ }
+
+ // Aggregate the Wiener filters for all channels.
+ std::array<float, kFftSizeBy2Plus1> filter_data;
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> filter = filter_data;
+ if (num_channels_ == 1) {
+ filter = channels_[0]->wiener_filter.get_filter();
+ } else {
+ AggregateWienerFilters(filter_data);
+ }
+
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ // Apply the filter to the lower band.
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ filter_bank_states[ch].real[i] *= filter[i];
+ filter_bank_states[ch].imag[i] *= filter[i];
+ }
+ }
+
+ // Perform filter bank synthesis
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ fft_.Ifft(filter_bank_states[ch].real, filter_bank_states[ch].imag,
+ filter_bank_states[ch].extended_frame);
+ }
+
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ const float energy_after_filtering =
+ ComputeEnergyOfExtendedFrame(filter_bank_states[ch].extended_frame);
+
+ // Apply synthesis window.
+ ApplyFilterBankWindow(filter_bank_states[ch].extended_frame);
+
+ // Compute the adjustment of the noise attenuation filter based on the
+ // effect of the attenuation.
+ gain_adjustments[ch] =
+ channels_[ch]->wiener_filter.ComputeOverallScalingFactor(
+ num_analyzed_frames_,
+ channels_[ch]->speech_probability_estimator.get_prior_probability(),
+ energies_before_filtering[ch], energy_after_filtering);
+ }
+
+ // Select and apply adjustment of the noise attenuation filter based on the
+ // effect of the attenuation.
+ float gain_adjustment = gain_adjustments[0];
+ for (size_t ch = 1; ch < num_channels_; ++ch) {
+ gain_adjustment = std::min(gain_adjustment, gain_adjustments[ch]);
+ }
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ for (size_t i = 0; i < kFftSize; ++i) {
+ filter_bank_states[ch].extended_frame[i] =
+ gain_adjustment * filter_bank_states[ch].extended_frame[i];
+ }
+ }
+
+ // Use overlap-and-add to form the output frame of the lowest band.
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ rtc::ArrayView<float, kNsFrameSize> y_band0(&audio->split_bands(ch)[0][0],
+ kNsFrameSize);
+ OverlapAndAdd(filter_bank_states[ch].extended_frame,
+ channels_[ch]->process_synthesis_memory, y_band0);
+ }
+
+ if (num_bands_ > 1) {
+ // Select the noise attenuating gain to apply to the upper band.
+ float upper_band_gain = upper_band_gains[0];
+ for (size_t ch = 1; ch < num_channels_; ++ch) {
+ upper_band_gain = std::min(upper_band_gain, upper_band_gains[ch]);
+ }
+
+ // Process the upper bands.
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ for (size_t b = 1; b < num_bands_; ++b) {
+ // Delay the upper bands to match the delay of the filterbank applied to
+ // the lowest band.
+ rtc::ArrayView<float, kNsFrameSize> y_band(
+ &audio->split_bands(ch)[b][0], kNsFrameSize);
+ std::array<float, kNsFrameSize> delayed_frame;
+ DelaySignal(y_band, channels_[ch]->process_delay_memory[b - 1],
+ delayed_frame);
+
+ // Apply the time-domain noise-attenuating gain.
+ for (size_t j = 0; j < kNsFrameSize; j++) {
+ y_band[j] = upper_band_gain * delayed_frame[j];
+ }
+ }
+ }
+ }
+
+ // Limit the output the allowed range.
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ for (size_t b = 0; b < num_bands_; ++b) {
+ rtc::ArrayView<float, kNsFrameSize> y_band(&audio->split_bands(ch)[b][0],
+ kNsFrameSize);
+ for (size_t j = 0; j < kNsFrameSize; j++) {
+ y_band[j] = std::min(std::max(y_band[j], -32768.f), 32767.f);
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.h b/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.h
new file mode 100644
index 0000000000..1e321cf4a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSOR_H_
+#define MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/ns/noise_estimator.h"
+#include "modules/audio_processing/ns/ns_common.h"
+#include "modules/audio_processing/ns/ns_config.h"
+#include "modules/audio_processing/ns/ns_fft.h"
+#include "modules/audio_processing/ns/speech_probability_estimator.h"
+#include "modules/audio_processing/ns/wiener_filter.h"
+
+namespace webrtc {
+
+// Class for suppressing noise in a signal.
+class NoiseSuppressor {
+ public:
+ NoiseSuppressor(const NsConfig& config,
+ size_t sample_rate_hz,
+ size_t num_channels);
+ NoiseSuppressor(const NoiseSuppressor&) = delete;
+ NoiseSuppressor& operator=(const NoiseSuppressor&) = delete;
+
+ // Analyses the signal (typically applied before the AEC to avoid analyzing
+ // any comfort noise signal).
+ void Analyze(const AudioBuffer& audio);
+
+ // Applies noise suppression.
+ void Process(AudioBuffer* audio);
+
+ // Specifies whether the capture output will be used. The purpose of this is
+ // to allow the noise suppressor to deactivate some of the processing when the
+ // resulting output is anyway not used, for instance when the endpoint is
+ // muted.
+ void SetCaptureOutputUsage(bool capture_output_used) {
+ capture_output_used_ = capture_output_used;
+ }
+
+ private:
+ const size_t num_bands_;
+ const size_t num_channels_;
+ const SuppressionParams suppression_params_;
+ int32_t num_analyzed_frames_ = -1;
+ NrFft fft_;
+ bool capture_output_used_ = true;
+
+ struct ChannelState {
+ ChannelState(const SuppressionParams& suppression_params, size_t num_bands);
+
+ SpeechProbabilityEstimator speech_probability_estimator;
+ WienerFilter wiener_filter;
+ NoiseEstimator noise_estimator;
+ std::array<float, kFftSizeBy2Plus1> prev_analysis_signal_spectrum;
+ std::array<float, kFftSize - kNsFrameSize> analyze_analysis_memory;
+ std::array<float, kOverlapSize> process_analysis_memory;
+ std::array<float, kOverlapSize> process_synthesis_memory;
+ std::vector<std::array<float, kOverlapSize>> process_delay_memory;
+ };
+
+ struct FilterBankState {
+ std::array<float, kFftSize> real;
+ std::array<float, kFftSize> imag;
+ std::array<float, kFftSize> extended_frame;
+ };
+
+ std::vector<FilterBankState> filter_bank_states_heap_;
+ std::vector<float> upper_band_gains_heap_;
+ std::vector<float> energies_before_filtering_heap_;
+ std::vector<float> gain_adjustments_heap_;
+ std::vector<std::unique_ptr<ChannelState>> channels_;
+
+ // Aggregates the Wiener filters into a single filter to use.
+ void AggregateWienerFilters(
+ rtc::ArrayView<float, kFftSizeBy2Plus1> filter) const;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor_unittest.cc b/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor_unittest.cc
new file mode 100644
index 0000000000..28ea63ae40
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor_unittest.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/noise_suppressor.h"
+
+#include <deque>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz,
+ size_t num_channels,
+ NsConfig::SuppressionLevel level) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz << ", num_channels: " << num_channels
+ << ", level: " << static_cast<int>(level);
+ return ss.Release();
+}
+
+void PopulateInputFrameWithIdenticalChannels(size_t num_channels,
+ size_t num_bands,
+ size_t frame_index,
+ AudioBuffer* audio) {
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ for (size_t b = 0; b < num_bands; ++b) {
+ for (size_t i = 0; i < 160; ++i) {
+ float value = static_cast<int>(frame_index * 160 + i);
+ audio->split_bands(ch)[b][i] = (value > 0 ? 5000 * b + value : 0);
+ }
+ }
+ }
+}
+
+void VerifyIdenticalChannels(size_t num_channels,
+ size_t num_bands,
+ size_t frame_index,
+ const AudioBuffer& audio) {
+ EXPECT_GT(num_channels, 1u);
+ for (size_t ch = 1; ch < num_channels; ++ch) {
+ for (size_t b = 0; b < num_bands; ++b) {
+ for (size_t i = 0; i < 160; ++i) {
+ EXPECT_EQ(audio.split_bands_const(ch)[b][i],
+ audio.split_bands_const(0)[b][i]);
+ }
+ }
+ }
+}
+
+} // namespace
+
+// Verifies that the same noise reduction effect is applied to all channels.
+TEST(NoiseSuppressor, IdenticalChannelEffects) {
+ for (auto rate : {16000, 32000, 48000}) {
+ for (auto num_channels : {1, 4, 8}) {
+ for (auto level :
+ {NsConfig::SuppressionLevel::k6dB, NsConfig::SuppressionLevel::k12dB,
+ NsConfig::SuppressionLevel::k18dB,
+ NsConfig::SuppressionLevel::k21dB}) {
+ SCOPED_TRACE(ProduceDebugText(rate, num_channels, level));
+
+ const size_t num_bands = rate / 16000;
+ // const int frame_length = rtc::CheckedDivExact(rate, 100);
+ AudioBuffer audio(rate, num_channels, rate, num_channels, rate,
+ num_channels);
+ NsConfig cfg;
+ NoiseSuppressor ns(cfg, rate, num_channels);
+ for (size_t frame_index = 0; frame_index < 1000; ++frame_index) {
+ if (rate > 16000) {
+ audio.SplitIntoFrequencyBands();
+ }
+
+ PopulateInputFrameWithIdenticalChannels(num_channels, num_bands,
+ frame_index, &audio);
+
+ ns.Analyze(audio);
+ ns.Process(&audio);
+ if (num_channels > 1) {
+ VerifyIdenticalChannels(num_channels, num_bands, frame_index,
+ audio);
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/ns_common.h b/third_party/libwebrtc/modules/audio_processing/ns/ns_common.h
new file mode 100644
index 0000000000..d6149f72a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/ns_common.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NS_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_NS_NS_COMMON_H_
+
+#include <cstddef>
+
+namespace webrtc {
+
+constexpr size_t kFftSize = 256;
+constexpr size_t kFftSizeBy2Plus1 = kFftSize / 2 + 1;
+constexpr size_t kNsFrameSize = 160;
+constexpr size_t kOverlapSize = kFftSize - kNsFrameSize;
+
+constexpr int kShortStartupPhaseBlocks = 50;
+constexpr int kLongStartupPhaseBlocks = 200;
+constexpr int kFeatureUpdateWindowSize = 500;
+
+constexpr float kLtrFeatureThr = 0.5f;
+constexpr float kBinSizeLrt = 0.1f;
+constexpr float kBinSizeSpecFlat = 0.05f;
+constexpr float kBinSizeSpecDiff = 0.1f;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_NS_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/ns_config.h b/third_party/libwebrtc/modules/audio_processing/ns/ns_config.h
new file mode 100644
index 0000000000..0a285e9cea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/ns_config.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NS_CONFIG_H_
+#define MODULES_AUDIO_PROCESSING_NS_NS_CONFIG_H_
+
+namespace webrtc {
+
+// Config struct for the noise suppressor
+struct NsConfig {
+ enum class SuppressionLevel { k6dB, k12dB, k18dB, k21dB };
+ SuppressionLevel target_level = SuppressionLevel::k12dB;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_NS_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.cc b/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.cc
new file mode 100644
index 0000000000..264c46972c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/ns_fft.h"
+
+#include "common_audio/third_party/ooura/fft_size_256/fft4g.h"
+
+namespace webrtc {
+
+NrFft::NrFft() : bit_reversal_state_(kFftSize / 2), tables_(kFftSize / 2) {
+ // Initialize WebRtc_rdt (setting (bit_reversal_state_[0] to 0 triggers
+ // initialization)
+ bit_reversal_state_[0] = 0.f;
+ std::array<float, kFftSize> tmp_buffer;
+ tmp_buffer.fill(0.f);
+ WebRtc_rdft(kFftSize, 1, tmp_buffer.data(), bit_reversal_state_.data(),
+ tables_.data());
+}
+
+void NrFft::Fft(rtc::ArrayView<float, kFftSize> time_data,
+ rtc::ArrayView<float, kFftSize> real,
+ rtc::ArrayView<float, kFftSize> imag) {
+ WebRtc_rdft(kFftSize, 1, time_data.data(), bit_reversal_state_.data(),
+ tables_.data());
+
+ imag[0] = 0;
+ real[0] = time_data[0];
+
+ imag[kFftSizeBy2Plus1 - 1] = 0;
+ real[kFftSizeBy2Plus1 - 1] = time_data[1];
+
+ for (size_t i = 1; i < kFftSizeBy2Plus1 - 1; ++i) {
+ real[i] = time_data[2 * i];
+ imag[i] = time_data[2 * i + 1];
+ }
+}
+
+void NrFft::Ifft(rtc::ArrayView<const float> real,
+ rtc::ArrayView<const float> imag,
+ rtc::ArrayView<float> time_data) {
+ time_data[0] = real[0];
+ time_data[1] = real[kFftSizeBy2Plus1 - 1];
+ for (size_t i = 1; i < kFftSizeBy2Plus1 - 1; ++i) {
+ time_data[2 * i] = real[i];
+ time_data[2 * i + 1] = imag[i];
+ }
+ WebRtc_rdft(kFftSize, -1, time_data.data(), bit_reversal_state_.data(),
+ tables_.data());
+
+ // Scale the output
+ constexpr float kScaling = 2.f / kFftSize;
+ for (float& d : time_data) {
+ d *= kScaling;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.h b/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.h
new file mode 100644
index 0000000000..539251eef2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NS_FFT_H_
+#define MODULES_AUDIO_PROCESSING_NS_NS_FFT_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/ns_common.h"
+
+namespace webrtc {
+
+// Wrapper class providing 256 point FFT functionality.
+class NrFft {
+ public:
+ NrFft();
+ NrFft(const NrFft&) = delete;
+ NrFft& operator=(const NrFft&) = delete;
+
+ // Transforms the signal from time to frequency domain.
+ void Fft(rtc::ArrayView<float, kFftSize> time_data,
+ rtc::ArrayView<float, kFftSize> real,
+ rtc::ArrayView<float, kFftSize> imag);
+
+ // Transforms the signal from frequency to time domain.
+ void Ifft(rtc::ArrayView<const float> real,
+ rtc::ArrayView<const float> imag,
+ rtc::ArrayView<float> time_data);
+
+ private:
+ std::vector<size_t> bit_reversal_state_;
+ std::vector<float> tables_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_NS_FFT_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/ns_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/ns/ns_gn/moz.build
new file mode 100644
index 0000000000..d83a789b5c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/ns_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/ns/fast_math.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/histograms.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/noise_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/noise_suppressor.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/ns_fft.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/signal_model.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.cc",
+ "/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("ns_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.cc b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.cc
new file mode 100644
index 0000000000..f25a1e2060
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/prior_signal_model.h"
+
+namespace webrtc {
+
+PriorSignalModel::PriorSignalModel(float lrt_initial_value)
+ : lrt(lrt_initial_value) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.h b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.h
new file mode 100644
index 0000000000..dcfa7ea709
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_PRIOR_SIGNAL_MODEL_H_
+#define MODULES_AUDIO_PROCESSING_NS_PRIOR_SIGNAL_MODEL_H_
+
+namespace webrtc {
+
+// Struct for storing the prior signal model parameters.
+struct PriorSignalModel {
+ explicit PriorSignalModel(float lrt_initial_value);
+ PriorSignalModel(const PriorSignalModel&) = delete;
+ PriorSignalModel& operator=(const PriorSignalModel&) = delete;
+
+ float lrt;
+ float flatness_threshold = .5f;
+ float template_diff_threshold = .5f;
+ float lrt_weighting = 1.f;
+ float flatness_weighting = 0.f;
+ float difference_weighting = 0.f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_PRIOR_SIGNAL_MODEL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.cc b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.cc
new file mode 100644
index 0000000000..c814658e57
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/prior_signal_model_estimator.h"
+
+#include <math.h>
+#include <algorithm>
+
+#include "modules/audio_processing/ns/fast_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Identifies the first of the two largest peaks in the histogram.
+void FindFirstOfTwoLargestPeaks(
+ float bin_size,
+ rtc::ArrayView<const int, kHistogramSize> spectral_flatness,
+ float* peak_position,
+ int* peak_weight) {
+ RTC_DCHECK(peak_position);
+ RTC_DCHECK(peak_weight);
+
+ int peak_value = 0;
+ int secondary_peak_value = 0;
+ *peak_position = 0.f;
+ float secondary_peak_position = 0.f;
+ *peak_weight = 0;
+ int secondary_peak_weight = 0;
+
+ // Identify the two largest peaks.
+ for (int i = 0; i < kHistogramSize; ++i) {
+ const float bin_mid = (i + 0.5f) * bin_size;
+ if (spectral_flatness[i] > peak_value) {
+ // Found new "first" peak candidate.
+ secondary_peak_value = peak_value;
+ secondary_peak_weight = *peak_weight;
+ secondary_peak_position = *peak_position;
+
+ peak_value = spectral_flatness[i];
+ *peak_weight = spectral_flatness[i];
+ *peak_position = bin_mid;
+ } else if (spectral_flatness[i] > secondary_peak_value) {
+ // Found new "second" peak candidate.
+ secondary_peak_value = spectral_flatness[i];
+ secondary_peak_weight = spectral_flatness[i];
+ secondary_peak_position = bin_mid;
+ }
+ }
+
+ // Merge the peaks if they are close.
+ if ((fabs(secondary_peak_position - *peak_position) < 2 * bin_size) &&
+ (secondary_peak_weight > 0.5f * (*peak_weight))) {
+ *peak_weight += secondary_peak_weight;
+ *peak_position = 0.5f * (*peak_position + secondary_peak_position);
+ }
+}
+
+void UpdateLrt(rtc::ArrayView<const int, kHistogramSize> lrt_histogram,
+ float* prior_model_lrt,
+ bool* low_lrt_fluctuations) {
+ RTC_DCHECK(prior_model_lrt);
+ RTC_DCHECK(low_lrt_fluctuations);
+
+ float average = 0.f;
+ float average_compl = 0.f;
+ float average_squared = 0.f;
+ int count = 0;
+
+ for (int i = 0; i < 10; ++i) {
+ float bin_mid = (i + 0.5f) * kBinSizeLrt;
+ average += lrt_histogram[i] * bin_mid;
+ count += lrt_histogram[i];
+ }
+ if (count > 0) {
+ average = average / count;
+ }
+
+ for (int i = 0; i < kHistogramSize; ++i) {
+ float bin_mid = (i + 0.5f) * kBinSizeLrt;
+ average_squared += lrt_histogram[i] * bin_mid * bin_mid;
+ average_compl += lrt_histogram[i] * bin_mid;
+ }
+ constexpr float kOneFeatureUpdateWindowSize = 1.f / kFeatureUpdateWindowSize;
+ average_squared = average_squared * kOneFeatureUpdateWindowSize;
+ average_compl = average_compl * kOneFeatureUpdateWindowSize;
+
+ // Fluctuation limit of LRT feature.
+ *low_lrt_fluctuations = average_squared - average * average_compl < 0.05f;
+
+ // Get threshold for LRT feature.
+ constexpr float kMaxLrt = 1.f;
+ constexpr float kMinLrt = .2f;
+ if (*low_lrt_fluctuations) {
+ // Very low fluctuation, so likely noise.
+ *prior_model_lrt = kMaxLrt;
+ } else {
+ *prior_model_lrt = std::min(kMaxLrt, std::max(kMinLrt, 1.2f * average));
+ }
+}
+
+} // namespace
+
+PriorSignalModelEstimator::PriorSignalModelEstimator(float lrt_initial_value)
+ : prior_model_(lrt_initial_value) {}
+
+// Extract thresholds for feature parameters and computes the threshold/weights.
+void PriorSignalModelEstimator::Update(const Histograms& histograms) {
+ bool low_lrt_fluctuations;
+ UpdateLrt(histograms.get_lrt(), &prior_model_.lrt, &low_lrt_fluctuations);
+
+ // For spectral flatness and spectral difference: compute the main peaks of
+ // the histograms.
+ float spectral_flatness_peak_position;
+ int spectral_flatness_peak_weight;
+ FindFirstOfTwoLargestPeaks(
+ kBinSizeSpecFlat, histograms.get_spectral_flatness(),
+ &spectral_flatness_peak_position, &spectral_flatness_peak_weight);
+
+ float spectral_diff_peak_position = 0.f;
+ int spectral_diff_peak_weight = 0;
+ FindFirstOfTwoLargestPeaks(kBinSizeSpecDiff, histograms.get_spectral_diff(),
+ &spectral_diff_peak_position,
+ &spectral_diff_peak_weight);
+
+ // Reject if weight of peaks is not large enough, or peak value too small.
+ // Peak limit for spectral flatness (varies between 0 and 1).
+ const int use_spec_flat = spectral_flatness_peak_weight < 0.3f * 500 ||
+ spectral_flatness_peak_position < 0.6f
+ ? 0
+ : 1;
+
+ // Reject if weight of peaks is not large enough or if fluctuation of the LRT
+ // feature are very low, indicating a noise state.
+ const int use_spec_diff =
+ spectral_diff_peak_weight < 0.3f * 500 || low_lrt_fluctuations ? 0 : 1;
+
+ // Update the model.
+ prior_model_.template_diff_threshold = 1.2f * spectral_diff_peak_position;
+ prior_model_.template_diff_threshold =
+ std::min(1.f, std::max(0.16f, prior_model_.template_diff_threshold));
+
+ float one_by_feature_sum = 1.f / (1.f + use_spec_flat + use_spec_diff);
+ prior_model_.lrt_weighting = one_by_feature_sum;
+
+ if (use_spec_flat == 1) {
+ prior_model_.flatness_threshold = 0.9f * spectral_flatness_peak_position;
+ prior_model_.flatness_threshold =
+ std::min(.95f, std::max(0.1f, prior_model_.flatness_threshold));
+ prior_model_.flatness_weighting = one_by_feature_sum;
+ } else {
+ prior_model_.flatness_weighting = 0.f;
+ }
+
+ if (use_spec_diff == 1) {
+ prior_model_.difference_weighting = one_by_feature_sum;
+ } else {
+ prior_model_.difference_weighting = 0.f;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.h b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.h
new file mode 100644
index 0000000000..d178323dba
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/prior_signal_model_estimator.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_PRIOR_SIGNAL_MODEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_NS_PRIOR_SIGNAL_MODEL_ESTIMATOR_H_
+
+#include "modules/audio_processing/ns/histograms.h"
+#include "modules/audio_processing/ns/prior_signal_model.h"
+
+namespace webrtc {
+
+// Estimator of the prior signal model parameters.
+class PriorSignalModelEstimator {
+ public:
+ explicit PriorSignalModelEstimator(float lrt_initial_value);
+ PriorSignalModelEstimator(const PriorSignalModelEstimator&) = delete;
+ PriorSignalModelEstimator& operator=(const PriorSignalModelEstimator&) =
+ delete;
+
+ // Updates the model estimate.
+ void Update(const Histograms& h);
+
+ // Returns the estimated model.
+ const PriorSignalModel& get_prior_model() const { return prior_model_; }
+
+ private:
+ PriorSignalModel prior_model_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_PRIOR_SIGNAL_MODEL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.cc b/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.cc
new file mode 100644
index 0000000000..bab494ff21
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/quantile_noise_estimator.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/ns/fast_math.h"
+
+namespace webrtc {
+
+QuantileNoiseEstimator::QuantileNoiseEstimator() {
+ quantile_.fill(0.f);
+ density_.fill(0.3f);
+ log_quantile_.fill(8.f);
+
+ constexpr float kOneBySimult = 1.f / kSimult;
+ for (size_t i = 0; i < kSimult; ++i) {
+ counter_[i] = floor(kLongStartupPhaseBlocks * (i + 1.f) * kOneBySimult);
+ }
+}
+
+void QuantileNoiseEstimator::Estimate(
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ rtc::ArrayView<float, kFftSizeBy2Plus1> noise_spectrum) {
+ std::array<float, kFftSizeBy2Plus1> log_spectrum;
+ LogApproximation(signal_spectrum, log_spectrum);
+
+ int quantile_index_to_return = -1;
+ // Loop over simultaneous estimates.
+ for (int s = 0, k = 0; s < kSimult;
+ ++s, k += static_cast<int>(kFftSizeBy2Plus1)) {
+ const float one_by_counter_plus_1 = 1.f / (counter_[s] + 1.f);
+ for (int i = 0, j = k; i < static_cast<int>(kFftSizeBy2Plus1); ++i, ++j) {
+ // Update log quantile estimate.
+ const float delta = density_[j] > 1.f ? 40.f / density_[j] : 40.f;
+
+ const float multiplier = delta * one_by_counter_plus_1;
+ if (log_spectrum[i] > log_quantile_[j]) {
+ log_quantile_[j] += 0.25f * multiplier;
+ } else {
+ log_quantile_[j] -= 0.75f * multiplier;
+ }
+
+ // Update density estimate.
+ constexpr float kWidth = 0.01f;
+ constexpr float kOneByWidthPlus2 = 1.f / (2.f * kWidth);
+ if (fabs(log_spectrum[i] - log_quantile_[j]) < kWidth) {
+ density_[j] = (counter_[s] * density_[j] + kOneByWidthPlus2) *
+ one_by_counter_plus_1;
+ }
+ }
+
+ if (counter_[s] >= kLongStartupPhaseBlocks) {
+ counter_[s] = 0;
+ if (num_updates_ >= kLongStartupPhaseBlocks) {
+ quantile_index_to_return = k;
+ }
+ }
+
+ ++counter_[s];
+ }
+
+ // Sequentially update the noise during startup.
+ if (num_updates_ < kLongStartupPhaseBlocks) {
+ // Use the last "s" to get noise during startup that differ from zero.
+ quantile_index_to_return = kFftSizeBy2Plus1 * (kSimult - 1);
+ ++num_updates_;
+ }
+
+ if (quantile_index_to_return >= 0) {
+ ExpApproximation(
+ rtc::ArrayView<const float>(&log_quantile_[quantile_index_to_return],
+ kFftSizeBy2Plus1),
+ quantile_);
+ }
+
+ std::copy(quantile_.begin(), quantile_.end(), noise_spectrum.begin());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.h b/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.h
new file mode 100644
index 0000000000..67d1512209
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/quantile_noise_estimator.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_QUANTILE_NOISE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_NS_QUANTILE_NOISE_ESTIMATOR_H_
+
+#include <math.h>
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/ns_common.h"
+
+namespace webrtc {
+
+constexpr int kSimult = 3;
+
+// For quantile noise estimation.
+class QuantileNoiseEstimator {
+ public:
+ QuantileNoiseEstimator();
+ QuantileNoiseEstimator(const QuantileNoiseEstimator&) = delete;
+ QuantileNoiseEstimator& operator=(const QuantileNoiseEstimator&) = delete;
+
+ // Estimate noise.
+ void Estimate(rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ rtc::ArrayView<float, kFftSizeBy2Plus1> noise_spectrum);
+
+ private:
+ std::array<float, kSimult * kFftSizeBy2Plus1> density_;
+ std::array<float, kSimult * kFftSizeBy2Plus1> log_quantile_;
+ std::array<float, kFftSizeBy2Plus1> quantile_;
+ std::array<int, kSimult> counter_;
+ int num_updates_ = 1;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_QUANTILE_NOISE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/signal_model.cc b/third_party/libwebrtc/modules/audio_processing/ns/signal_model.cc
new file mode 100644
index 0000000000..364bfd00d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/signal_model.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/signal_model.h"
+
+namespace webrtc {
+
+SignalModel::SignalModel() {
+ constexpr float kSfFeatureThr = 0.5f;
+
+ lrt = kLtrFeatureThr;
+ spectral_flatness = kSfFeatureThr;
+ spectral_diff = kSfFeatureThr;
+ avg_log_lrt.fill(kLtrFeatureThr);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/signal_model.h b/third_party/libwebrtc/modules/audio_processing/ns/signal_model.h
new file mode 100644
index 0000000000..6614d38a38
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/signal_model.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_SIGNAL_MODEL_H_
+#define MODULES_AUDIO_PROCESSING_NS_SIGNAL_MODEL_H_
+
+#include <array>
+
+#include "modules/audio_processing/ns/ns_common.h"
+
+namespace webrtc {
+
+struct SignalModel {
+ SignalModel();
+ SignalModel(const SignalModel&) = delete;
+ SignalModel& operator=(const SignalModel&) = delete;
+
+ float lrt;
+ float spectral_diff;
+ float spectral_flatness;
+ // Log LRT factor with time-smoothing.
+ std::array<float, kFftSizeBy2Plus1> avg_log_lrt;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_SIGNAL_MODEL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.cc b/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.cc
new file mode 100644
index 0000000000..67dd3bb687
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.cc
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/signal_model_estimator.h"
+
+#include "modules/audio_processing/ns/fast_math.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kOneByFftSizeBy2Plus1 = 1.f / kFftSizeBy2Plus1;
+
+// Computes the difference measure between input spectrum and a template/learned
+// noise spectrum.
+float ComputeSpectralDiff(
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> conservative_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum,
+ float diff_normalization) {
+ // spectral_diff = var(signal_spectrum) - cov(signal_spectrum, magnAvgPause)^2
+ // / var(magnAvgPause)
+
+ // Compute average quantities.
+ float noise_average = 0.f;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ // Conservative smooth noise spectrum from pause frames.
+ noise_average += conservative_noise_spectrum[i];
+ }
+ noise_average = noise_average * kOneByFftSizeBy2Plus1;
+ float signal_average = signal_spectral_sum * kOneByFftSizeBy2Plus1;
+
+ // Compute variance and covariance quantities.
+ float covariance = 0.f;
+ float noise_variance = 0.f;
+ float signal_variance = 0.f;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ float signal_diff = signal_spectrum[i] - signal_average;
+ float noise_diff = conservative_noise_spectrum[i] - noise_average;
+ covariance += signal_diff * noise_diff;
+ noise_variance += noise_diff * noise_diff;
+ signal_variance += signal_diff * signal_diff;
+ }
+ covariance *= kOneByFftSizeBy2Plus1;
+ noise_variance *= kOneByFftSizeBy2Plus1;
+ signal_variance *= kOneByFftSizeBy2Plus1;
+
+ // Update of average magnitude spectrum.
+ float spectral_diff =
+ signal_variance - (covariance * covariance) / (noise_variance + 0.0001f);
+ // Normalize.
+ return spectral_diff / (diff_normalization + 0.0001f);
+}
+
+// Updates the spectral flatness based on the input spectrum.
+void UpdateSpectralFlatness(
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum,
+ float* spectral_flatness) {
+ RTC_DCHECK(spectral_flatness);
+
+ // Compute log of ratio of the geometric to arithmetic mean (handle the log(0)
+ // separately).
+ constexpr float kAveraging = 0.3f;
+ float avg_spect_flatness_num = 0.f;
+ for (size_t i = 1; i < kFftSizeBy2Plus1; ++i) {
+ if (signal_spectrum[i] == 0.f) {
+ *spectral_flatness -= kAveraging * (*spectral_flatness);
+ return;
+ }
+ }
+
+ for (size_t i = 1; i < kFftSizeBy2Plus1; ++i) {
+ avg_spect_flatness_num += LogApproximation(signal_spectrum[i]);
+ }
+
+ float avg_spect_flatness_denom = signal_spectral_sum - signal_spectrum[0];
+
+ avg_spect_flatness_denom = avg_spect_flatness_denom * kOneByFftSizeBy2Plus1;
+ avg_spect_flatness_num = avg_spect_flatness_num * kOneByFftSizeBy2Plus1;
+
+ float spectral_tmp =
+ ExpApproximation(avg_spect_flatness_num) / avg_spect_flatness_denom;
+
+ // Time-avg update of spectral flatness feature.
+ *spectral_flatness += kAveraging * (spectral_tmp - *spectral_flatness);
+}
+
+// Updates the log LRT measures.
+void UpdateSpectralLrt(rtc::ArrayView<const float, kFftSizeBy2Plus1> prior_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> post_snr,
+ rtc::ArrayView<float, kFftSizeBy2Plus1> avg_log_lrt,
+ float* lrt) {
+ RTC_DCHECK(lrt);
+
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ float tmp1 = 1.f + 2.f * prior_snr[i];
+ float tmp2 = 2.f * prior_snr[i] / (tmp1 + 0.0001f);
+ float bessel_tmp = (post_snr[i] + 1.f) * tmp2;
+ avg_log_lrt[i] +=
+ .5f * (bessel_tmp - LogApproximation(tmp1) - avg_log_lrt[i]);
+ }
+
+ float log_lrt_time_avg_k_sum = 0.f;
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ log_lrt_time_avg_k_sum += avg_log_lrt[i];
+ }
+ *lrt = log_lrt_time_avg_k_sum * kOneByFftSizeBy2Plus1;
+}
+
+} // namespace
+
+SignalModelEstimator::SignalModelEstimator()
+ : prior_model_estimator_(kLtrFeatureThr) {}
+
+void SignalModelEstimator::AdjustNormalization(int32_t num_analyzed_frames,
+ float signal_energy) {
+ diff_normalization_ *= num_analyzed_frames;
+ diff_normalization_ += signal_energy;
+ diff_normalization_ /= (num_analyzed_frames + 1);
+}
+
+// Update the noise features.
+void SignalModelEstimator::Update(
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prior_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> post_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> conservative_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum,
+ float signal_energy) {
+ // Compute spectral flatness on input spectrum.
+ UpdateSpectralFlatness(signal_spectrum, signal_spectral_sum,
+ &features_.spectral_flatness);
+
+ // Compute difference of input spectrum with learned/estimated noise spectrum.
+ float spectral_diff =
+ ComputeSpectralDiff(conservative_noise_spectrum, signal_spectrum,
+ signal_spectral_sum, diff_normalization_);
+ // Compute time-avg update of difference feature.
+ features_.spectral_diff += 0.3f * (spectral_diff - features_.spectral_diff);
+
+ signal_energy_sum_ += signal_energy;
+
+ // Compute histograms for parameter decisions (thresholds and weights for
+ // features). Parameters are extracted periodically.
+ if (--histogram_analysis_counter_ > 0) {
+ histograms_.Update(features_);
+ } else {
+ // Compute model parameters.
+ prior_model_estimator_.Update(histograms_);
+
+ // Clear histograms for next update.
+ histograms_.Clear();
+
+ histogram_analysis_counter_ = kFeatureUpdateWindowSize;
+
+ // Update every window:
+ // Compute normalization for the spectral difference for next estimation.
+ signal_energy_sum_ = signal_energy_sum_ / kFeatureUpdateWindowSize;
+ diff_normalization_ = 0.5f * (signal_energy_sum_ + diff_normalization_);
+ signal_energy_sum_ = 0.f;
+ }
+
+ // Compute the LRT.
+ UpdateSpectralLrt(prior_snr, post_snr, features_.avg_log_lrt, &features_.lrt);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.h b/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.h
new file mode 100644
index 0000000000..58ce00acbf
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/signal_model_estimator.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_SIGNAL_MODEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_NS_SIGNAL_MODEL_ESTIMATOR_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/histograms.h"
+#include "modules/audio_processing/ns/ns_common.h"
+#include "modules/audio_processing/ns/prior_signal_model.h"
+#include "modules/audio_processing/ns/prior_signal_model_estimator.h"
+#include "modules/audio_processing/ns/signal_model.h"
+
+namespace webrtc {
+
+class SignalModelEstimator {
+ public:
+ SignalModelEstimator();
+ SignalModelEstimator(const SignalModelEstimator&) = delete;
+ SignalModelEstimator& operator=(const SignalModelEstimator&) = delete;
+
+ // Compute signal normalization during the initial startup phase.
+ void AdjustNormalization(int32_t num_analyzed_frames, float signal_energy);
+
+ void Update(
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prior_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> post_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> conservative_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum,
+ float signal_energy);
+
+ const PriorSignalModel& get_prior_model() const {
+ return prior_model_estimator_.get_prior_model();
+ }
+ const SignalModel& get_model() { return features_; }
+
+ private:
+ float diff_normalization_ = 0.f;
+ float signal_energy_sum_ = 0.f;
+ Histograms histograms_;
+ int histogram_analysis_counter_ = 500;
+ PriorSignalModelEstimator prior_model_estimator_;
+ SignalModel features_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_SIGNAL_MODEL_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.cc b/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.cc
new file mode 100644
index 0000000000..fce9bc8e07
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/speech_probability_estimator.h"
+
+#include <math.h>
+#include <algorithm>
+
+#include "modules/audio_processing/ns/fast_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+SpeechProbabilityEstimator::SpeechProbabilityEstimator() {
+ speech_probability_.fill(0.f);
+}
+
+void SpeechProbabilityEstimator::Update(
+ int32_t num_analyzed_frames,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prior_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> post_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> conservative_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum,
+ float signal_energy) {
+ // Update models.
+ if (num_analyzed_frames < kLongStartupPhaseBlocks) {
+ signal_model_estimator_.AdjustNormalization(num_analyzed_frames,
+ signal_energy);
+ }
+ signal_model_estimator_.Update(prior_snr, post_snr,
+ conservative_noise_spectrum, signal_spectrum,
+ signal_spectral_sum, signal_energy);
+
+ const SignalModel& model = signal_model_estimator_.get_model();
+ const PriorSignalModel& prior_model =
+ signal_model_estimator_.get_prior_model();
+
+ // Width parameter in sigmoid map for prior model.
+ constexpr float kWidthPrior0 = 4.f;
+ // Width for pause region: lower range, so increase width in tanh map.
+ constexpr float kWidthPrior1 = 2.f * kWidthPrior0;
+
+ // Average LRT feature: use larger width in tanh map for pause regions.
+ float width_prior = model.lrt < prior_model.lrt ? kWidthPrior1 : kWidthPrior0;
+
+ // Compute indicator function: sigmoid map.
+ float indicator0 =
+ 0.5f * (tanh(width_prior * (model.lrt - prior_model.lrt)) + 1.f);
+
+ // Spectral flatness feature: use larger width in tanh map for pause regions.
+ width_prior = model.spectral_flatness > prior_model.flatness_threshold
+ ? kWidthPrior1
+ : kWidthPrior0;
+
+ // Compute indicator function: sigmoid map.
+ float indicator1 =
+ 0.5f * (tanh(1.f * width_prior *
+ (prior_model.flatness_threshold - model.spectral_flatness)) +
+ 1.f);
+
+ // For template spectrum-difference : use larger width in tanh map for pause
+ // regions.
+ width_prior = model.spectral_diff < prior_model.template_diff_threshold
+ ? kWidthPrior1
+ : kWidthPrior0;
+
+ // Compute indicator function: sigmoid map.
+ float indicator2 =
+ 0.5f * (tanh(width_prior * (model.spectral_diff -
+ prior_model.template_diff_threshold)) +
+ 1.f);
+
+ // Combine the indicator function with the feature weights.
+ float ind_prior = prior_model.lrt_weighting * indicator0 +
+ prior_model.flatness_weighting * indicator1 +
+ prior_model.difference_weighting * indicator2;
+
+ // Compute the prior probability.
+ prior_speech_prob_ += 0.1f * (ind_prior - prior_speech_prob_);
+
+ // Make sure probabilities are within range: keep floor to 0.01.
+ prior_speech_prob_ = std::max(std::min(prior_speech_prob_, 1.f), 0.01f);
+
+ // Final speech probability: combine prior model with LR factor:.
+ float gain_prior =
+ (1.f - prior_speech_prob_) / (prior_speech_prob_ + 0.0001f);
+
+ std::array<float, kFftSizeBy2Plus1> inv_lrt;
+ ExpApproximationSignFlip(model.avg_log_lrt, inv_lrt);
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ speech_probability_[i] = 1.f / (1.f + gain_prior * inv_lrt[i]);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.h b/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.h
new file mode 100644
index 0000000000..259c3b6776
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/speech_probability_estimator.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_SPEECH_PROBABILITY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_NS_SPEECH_PROBABILITY_ESTIMATOR_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/ns_common.h"
+#include "modules/audio_processing/ns/signal_model_estimator.h"
+
+namespace webrtc {
+
+// Class for estimating the probability of speech.
+class SpeechProbabilityEstimator {
+ public:
+ SpeechProbabilityEstimator();
+ SpeechProbabilityEstimator(const SpeechProbabilityEstimator&) = delete;
+ SpeechProbabilityEstimator& operator=(const SpeechProbabilityEstimator&) =
+ delete;
+
+ // Compute speech probability.
+ void Update(
+ int32_t num_analyzed_frames,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prior_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> post_snr,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> conservative_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum,
+ float signal_spectral_sum,
+ float signal_energy);
+
+ float get_prior_probability() const { return prior_speech_prob_; }
+ rtc::ArrayView<const float> get_probability() { return speech_probability_; }
+
+ private:
+ SignalModelEstimator signal_model_estimator_;
+ float prior_speech_prob_ = .5f;
+ std::array<float, kFftSizeBy2Plus1> speech_probability_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_SPEECH_PROBABILITY_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.cc b/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.cc
new file mode 100644
index 0000000000..7bf18346f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/suppression_params.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+SuppressionParams::SuppressionParams(
+ NsConfig::SuppressionLevel suppression_level) {
+ switch (suppression_level) {
+ case NsConfig::SuppressionLevel::k6dB:
+ over_subtraction_factor = 1.f;
+ // 6 dB attenuation.
+ minimum_attenuating_gain = 0.5f;
+ use_attenuation_adjustment = false;
+ break;
+ case NsConfig::SuppressionLevel::k12dB:
+ over_subtraction_factor = 1.f;
+ // 12 dB attenuation.
+ minimum_attenuating_gain = 0.25f;
+ use_attenuation_adjustment = true;
+ break;
+ case NsConfig::SuppressionLevel::k18dB:
+ over_subtraction_factor = 1.1f;
+ // 18 dB attenuation.
+ minimum_attenuating_gain = 0.125f;
+ use_attenuation_adjustment = true;
+ break;
+ case NsConfig::SuppressionLevel::k21dB:
+ over_subtraction_factor = 1.25f;
+ // 20.9 dB attenuation.
+ minimum_attenuating_gain = 0.09f;
+ use_attenuation_adjustment = true;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.h b/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.h
new file mode 100644
index 0000000000..ad11977d81
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/suppression_params.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_SUPPRESSION_PARAMS_H_
+#define MODULES_AUDIO_PROCESSING_NS_SUPPRESSION_PARAMS_H_
+
+#include "modules/audio_processing/ns/ns_config.h"
+
+namespace webrtc {
+
+struct SuppressionParams {
+ explicit SuppressionParams(NsConfig::SuppressionLevel suppression_level);
+ SuppressionParams(const SuppressionParams&) = delete;
+ SuppressionParams& operator=(const SuppressionParams&) = delete;
+
+ float over_subtraction_factor;
+ float minimum_attenuating_gain;
+ bool use_attenuation_adjustment;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_SUPPRESSION_PARAMS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.cc b/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.cc
new file mode 100644
index 0000000000..e14b7970d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/wiener_filter.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+#include "modules/audio_processing/ns/fast_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+WienerFilter::WienerFilter(const SuppressionParams& suppression_params)
+ : suppression_params_(suppression_params) {
+ filter_.fill(1.f);
+ initial_spectral_estimate_.fill(0.f);
+ spectrum_prev_process_.fill(0.f);
+}
+
+void WienerFilter::Update(
+ int32_t num_analyzed_frames,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prev_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> parametric_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum) {
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ // Previous estimate based on previous frame with gain filter.
+ float prev_tsa = spectrum_prev_process_[i] /
+ (prev_noise_spectrum[i] + 0.0001f) * filter_[i];
+
+ // Current estimate.
+ float current_tsa;
+ if (signal_spectrum[i] > noise_spectrum[i]) {
+ current_tsa = signal_spectrum[i] / (noise_spectrum[i] + 0.0001f) - 1.f;
+ } else {
+ current_tsa = 0.f;
+ }
+
+ // Directed decision estimate is sum of two terms: current estimate and
+ // previous estimate.
+ float snr_prior = 0.98f * prev_tsa + (1.f - 0.98f) * current_tsa;
+ filter_[i] =
+ snr_prior / (suppression_params_.over_subtraction_factor + snr_prior);
+ filter_[i] = std::max(std::min(filter_[i], 1.f),
+ suppression_params_.minimum_attenuating_gain);
+ }
+
+ if (num_analyzed_frames < kShortStartupPhaseBlocks) {
+ for (size_t i = 0; i < kFftSizeBy2Plus1; ++i) {
+ initial_spectral_estimate_[i] += signal_spectrum[i];
+ float filter_initial = initial_spectral_estimate_[i] -
+ suppression_params_.over_subtraction_factor *
+ parametric_noise_spectrum[i];
+ filter_initial /= initial_spectral_estimate_[i] + 0.0001f;
+
+ filter_initial = std::max(std::min(filter_initial, 1.f),
+ suppression_params_.minimum_attenuating_gain);
+
+ // Weight the two suppression filters.
+ constexpr float kOnyByShortStartupPhaseBlocks =
+ 1.f / kShortStartupPhaseBlocks;
+ filter_initial *= kShortStartupPhaseBlocks - num_analyzed_frames;
+ filter_[i] *= num_analyzed_frames;
+ filter_[i] += filter_initial;
+ filter_[i] *= kOnyByShortStartupPhaseBlocks;
+ }
+ }
+
+ std::copy(signal_spectrum.begin(), signal_spectrum.end(),
+ spectrum_prev_process_.begin());
+}
+
+float WienerFilter::ComputeOverallScalingFactor(
+ int32_t num_analyzed_frames,
+ float prior_speech_probability,
+ float energy_before_filtering,
+ float energy_after_filtering) const {
+ if (!suppression_params_.use_attenuation_adjustment ||
+ num_analyzed_frames <= kLongStartupPhaseBlocks) {
+ return 1.f;
+ }
+
+ float gain = SqrtFastApproximation(energy_after_filtering /
+ (energy_before_filtering + 1.f));
+
+ // Scaling for new version. Threshold in final energy gain factor calculation.
+ constexpr float kBLim = 0.5f;
+ float scale_factor1 = 1.f;
+ if (gain > kBLim) {
+ scale_factor1 = 1.f + 1.3f * (gain - kBLim);
+ if (gain * scale_factor1 > 1.f) {
+ scale_factor1 = 1.f / gain;
+ }
+ }
+
+ float scale_factor2 = 1.f;
+ if (gain < kBLim) {
+ // Do not reduce scale too much for pause regions: attenuation here should
+ // be controlled by flooring.
+ gain = std::max(gain, suppression_params_.minimum_attenuating_gain);
+ scale_factor2 = 1.f - 0.3f * (kBLim - gain);
+ }
+
+ // Combine both scales with speech/noise prob: note prior
+ // (prior_speech_probability) is not frequency dependent.
+ return prior_speech_probability * scale_factor1 +
+ (1.f - prior_speech_probability) * scale_factor2;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.h b/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.h
new file mode 100644
index 0000000000..b55c5dc59d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/ns/wiener_filter.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_WIENER_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_NS_WIENER_FILTER_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/ns/ns_common.h"
+#include "modules/audio_processing/ns/suppression_params.h"
+
+namespace webrtc {
+
+// Estimates a Wiener-filter based frequency domain noise reduction filter.
+class WienerFilter {
+ public:
+ explicit WienerFilter(const SuppressionParams& suppression_params);
+ WienerFilter(const WienerFilter&) = delete;
+ WienerFilter& operator=(const WienerFilter&) = delete;
+
+ // Updates the filter estimate.
+ void Update(
+ int32_t num_analyzed_frames,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> prev_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> parametric_noise_spectrum,
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> signal_spectrum);
+
+ // Compute an overall gain scaling factor.
+ float ComputeOverallScalingFactor(int32_t num_analyzed_frames,
+ float prior_speech_probability,
+ float energy_before_filtering,
+ float energy_after_filtering) const;
+
+ // Returns the filter.
+ rtc::ArrayView<const float, kFftSizeBy2Plus1> get_filter() const {
+ return filter_;
+ }
+
+ private:
+ const SuppressionParams& suppression_params_;
+ std::array<float, kFftSizeBy2Plus1> spectrum_prev_process_;
+ std::array<float, kFftSizeBy2Plus1> initial_spectral_estimate_;
+ std::array<float, kFftSizeBy2Plus1> filter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_NS_WIENER_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.cc b/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.cc
new file mode 100644
index 0000000000..cea5c837dc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/optionally_built_submodule_creators.h"
+
+#include <memory>
+
+#include "modules/audio_processing/transient/transient_suppressor_impl.h"
+
+namespace webrtc {
+
+std::unique_ptr<TransientSuppressor> CreateTransientSuppressor(
+ const ApmSubmoduleCreationOverrides& overrides,
+ TransientSuppressor::VadMode vad_mode,
+ int sample_rate_hz,
+ int detection_rate_hz,
+ int num_channels) {
+#ifdef WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR
+ return nullptr;
+#else
+ if (overrides.transient_suppression) {
+ return nullptr;
+ }
+ return std::make_unique<TransientSuppressorImpl>(
+ vad_mode, sample_rate_hz, detection_rate_hz, num_channels);
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.h b/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.h
new file mode 100644
index 0000000000..1be2743986
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_OPTIONALLY_BUILT_SUBMODULE_CREATORS_H_
+#define MODULES_AUDIO_PROCESSING_OPTIONALLY_BUILT_SUBMODULE_CREATORS_H_
+
+#include <memory>
+
+#include "modules/audio_processing/transient/transient_suppressor.h"
+
+namespace webrtc {
+
+// These overrides are only to be used for testing purposes.
+// Each flag emulates a preprocessor macro to exclude a submodule of APM from
+// the build, e.g. WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR. If the corresponding
+// flag `transient_suppression` is enabled, then the creators will return
+// nullptr instead of a submodule instance, as if the macro had been defined.
+struct ApmSubmoduleCreationOverrides {
+ bool transient_suppression = false;
+};
+
+// Creates a transient suppressor.
+// Will instead return nullptr if one of the following is true:
+// * WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR is defined
+// * The corresponding override in `overrides` is enabled.
+std::unique_ptr<TransientSuppressor> CreateTransientSuppressor(
+ const ApmSubmoduleCreationOverrides& overrides,
+ TransientSuppressor::VadMode vad_mode,
+ int sample_rate_hz,
+ int detection_rate_hz,
+ int num_channels);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_OPTIONALLY_BUILT_SUBMODULE_CREATORS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators_gn/moz.build
new file mode 100644
index 0000000000..b32a08b8bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/optionally_built_submodule_creators.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("optionally_built_submodule_creators_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/render_queue_item_verifier.h b/third_party/libwebrtc/modules/audio_processing/render_queue_item_verifier.h
new file mode 100644
index 0000000000..b8aff4a107
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/render_queue_item_verifier.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_RENDER_QUEUE_ITEM_VERIFIER_H_
+#define MODULES_AUDIO_PROCESSING_RENDER_QUEUE_ITEM_VERIFIER_H_
+
+#include <vector>
+
+namespace webrtc {
+
+// Functor to use when supplying a verifier function for the queue item
+// verifcation.
+template <typename T>
+class RenderQueueItemVerifier {
+ public:
+ explicit RenderQueueItemVerifier(size_t minimum_capacity)
+ : minimum_capacity_(minimum_capacity) {}
+
+ bool operator()(const std::vector<T>& v) const {
+ return v.capacity() >= minimum_capacity_;
+ }
+
+ private:
+ size_t minimum_capacity_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_RENDER_QUEUE_ITEM_VERIFIER_H__
diff --git a/third_party/libwebrtc/modules/audio_processing/residual_echo_detector.cc b/third_party/libwebrtc/modules/audio_processing/residual_echo_detector.cc
new file mode 100644
index 0000000000..2a564fc233
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/residual_echo_detector.cc
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/residual_echo_detector.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace {
+
+float Power(rtc::ArrayView<const float> input) {
+ if (input.empty()) {
+ return 0.f;
+ }
+ return std::inner_product(input.begin(), input.end(), input.begin(), 0.f) /
+ input.size();
+}
+
+constexpr size_t kLookbackFrames = 650;
+// TODO(ivoc): Verify the size of this buffer.
+constexpr size_t kRenderBufferSize = 30;
+constexpr float kAlpha = 0.001f;
+// 10 seconds of data, updated every 10 ms.
+constexpr size_t kAggregationBufferSize = 10 * 100;
+
+} // namespace
+
+namespace webrtc {
+
+std::atomic<int> ResidualEchoDetector::instance_count_(0);
+
+ResidualEchoDetector::ResidualEchoDetector()
+ : data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
+ render_buffer_(kRenderBufferSize),
+ render_power_(kLookbackFrames),
+ render_power_mean_(kLookbackFrames),
+ render_power_std_dev_(kLookbackFrames),
+ covariances_(kLookbackFrames),
+ recent_likelihood_max_(kAggregationBufferSize) {}
+
+ResidualEchoDetector::~ResidualEchoDetector() = default;
+
+void ResidualEchoDetector::AnalyzeRenderAudio(
+ rtc::ArrayView<const float> render_audio) {
+ // Dump debug data assuming 48 kHz sample rate (if this assumption is not
+ // valid the dumped audio will need to be converted offline accordingly).
+ data_dumper_->DumpWav("ed_render", render_audio.size(), render_audio.data(),
+ 48000, 1);
+
+ if (render_buffer_.Size() == 0) {
+ frames_since_zero_buffer_size_ = 0;
+ } else if (frames_since_zero_buffer_size_ >= kRenderBufferSize) {
+ // This can happen in a few cases: at the start of a call, due to a glitch
+ // or due to clock drift. The excess capture value will be ignored.
+ // TODO(ivoc): Include how often this happens in APM stats.
+ render_buffer_.Pop();
+ frames_since_zero_buffer_size_ = 0;
+ }
+ ++frames_since_zero_buffer_size_;
+ float power = Power(render_audio);
+ render_buffer_.Push(power);
+}
+
+void ResidualEchoDetector::AnalyzeCaptureAudio(
+ rtc::ArrayView<const float> capture_audio) {
+ // Dump debug data assuming 48 kHz sample rate (if this assumption is not
+ // valid the dumped audio will need to be converted offline accordingly).
+ data_dumper_->DumpWav("ed_capture", capture_audio.size(),
+ capture_audio.data(), 48000, 1);
+
+ if (first_process_call_) {
+ // On the first process call (so the start of a call), we must flush the
+ // render buffer, otherwise the render data will be delayed.
+ render_buffer_.Clear();
+ first_process_call_ = false;
+ }
+
+ // Get the next render value.
+ const absl::optional<float> buffered_render_power = render_buffer_.Pop();
+ if (!buffered_render_power) {
+ // This can happen in a few cases: at the start of a call, due to a glitch
+ // or due to clock drift. The excess capture value will be ignored.
+ // TODO(ivoc): Include how often this happens in APM stats.
+ return;
+ }
+ // Update the render statistics, and store the statistics in circular buffers.
+ render_statistics_.Update(*buffered_render_power);
+ RTC_DCHECK_LT(next_insertion_index_, kLookbackFrames);
+ render_power_[next_insertion_index_] = *buffered_render_power;
+ render_power_mean_[next_insertion_index_] = render_statistics_.mean();
+ render_power_std_dev_[next_insertion_index_] =
+ render_statistics_.std_deviation();
+
+ // Get the next capture value, update capture statistics and add the relevant
+ // values to the buffers.
+ const float capture_power = Power(capture_audio);
+ capture_statistics_.Update(capture_power);
+ const float capture_mean = capture_statistics_.mean();
+ const float capture_std_deviation = capture_statistics_.std_deviation();
+
+ // Update the covariance values and determine the new echo likelihood.
+ echo_likelihood_ = 0.f;
+ size_t read_index = next_insertion_index_;
+
+ int best_delay = -1;
+ for (size_t delay = 0; delay < covariances_.size(); ++delay) {
+ RTC_DCHECK_LT(read_index, render_power_.size());
+ covariances_[delay].Update(capture_power, capture_mean,
+ capture_std_deviation, render_power_[read_index],
+ render_power_mean_[read_index],
+ render_power_std_dev_[read_index]);
+ read_index = read_index > 0 ? read_index - 1 : kLookbackFrames - 1;
+
+ if (covariances_[delay].normalized_cross_correlation() > echo_likelihood_) {
+ echo_likelihood_ = covariances_[delay].normalized_cross_correlation();
+ best_delay = static_cast<int>(delay);
+ }
+ }
+ // This is a temporary log message to help find the underlying cause for echo
+ // likelihoods > 1.0.
+ // TODO(ivoc): Remove once the issue is resolved.
+ if (echo_likelihood_ > 1.1f) {
+ // Make sure we don't spam the log.
+ if (log_counter_ < 5 && best_delay != -1) {
+ size_t read_index = kLookbackFrames + next_insertion_index_ - best_delay;
+ if (read_index >= kLookbackFrames) {
+ read_index -= kLookbackFrames;
+ }
+ RTC_DCHECK_LT(read_index, render_power_.size());
+ RTC_LOG_F(LS_ERROR) << "Echo detector internal state: {"
+ "Echo likelihood: "
+ << echo_likelihood_ << ", Best Delay: " << best_delay
+ << ", Covariance: "
+ << covariances_[best_delay].covariance()
+ << ", Last capture power: " << capture_power
+ << ", Capture mean: " << capture_mean
+ << ", Capture_standard deviation: "
+ << capture_std_deviation << ", Last render power: "
+ << render_power_[read_index]
+ << ", Render mean: " << render_power_mean_[read_index]
+ << ", Render standard deviation: "
+ << render_power_std_dev_[read_index]
+ << ", Reliability: " << reliability_ << "}";
+ log_counter_++;
+ }
+ }
+ RTC_DCHECK_LT(echo_likelihood_, 1.1f);
+
+ reliability_ = (1.0f - kAlpha) * reliability_ + kAlpha * 1.0f;
+ echo_likelihood_ *= reliability_;
+ // This is a temporary fix to prevent echo likelihood values > 1.0.
+ // TODO(ivoc): Find the root cause of this issue and fix it.
+ echo_likelihood_ = std::min(echo_likelihood_, 1.0f);
+ int echo_percentage = static_cast<int>(echo_likelihood_ * 100);
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.ResidualEchoDetector.EchoLikelihood",
+ echo_percentage, 0, 100, 100 /* number of bins */);
+
+ // Update the buffer of recent likelihood values.
+ recent_likelihood_max_.Update(echo_likelihood_);
+
+ // Update the next insertion index.
+ next_insertion_index_ = next_insertion_index_ < (kLookbackFrames - 1)
+ ? next_insertion_index_ + 1
+ : 0;
+}
+
+void ResidualEchoDetector::Initialize(int /*capture_sample_rate_hz*/,
+ int /*num_capture_channels*/,
+ int /*render_sample_rate_hz*/,
+ int /*num_render_channels*/) {
+ render_buffer_.Clear();
+ std::fill(render_power_.begin(), render_power_.end(), 0.f);
+ std::fill(render_power_mean_.begin(), render_power_mean_.end(), 0.f);
+ std::fill(render_power_std_dev_.begin(), render_power_std_dev_.end(), 0.f);
+ render_statistics_.Clear();
+ capture_statistics_.Clear();
+ recent_likelihood_max_.Clear();
+ for (auto& cov : covariances_) {
+ cov.Clear();
+ }
+ echo_likelihood_ = 0.f;
+ next_insertion_index_ = 0;
+ reliability_ = 0.f;
+}
+
+EchoDetector::Metrics ResidualEchoDetector::GetMetrics() const {
+ EchoDetector::Metrics metrics;
+ metrics.echo_likelihood = echo_likelihood_;
+ metrics.echo_likelihood_recent_max = recent_likelihood_max_.max();
+ return metrics;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/residual_echo_detector.h b/third_party/libwebrtc/modules/audio_processing/residual_echo_detector.h
new file mode 100644
index 0000000000..ac554b17c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/residual_echo_detector.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_RESIDUAL_ECHO_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_RESIDUAL_ECHO_DETECTOR_H_
+
+#include <atomic>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/echo_detector/circular_buffer.h"
+#include "modules/audio_processing/echo_detector/mean_variance_estimator.h"
+#include "modules/audio_processing/echo_detector/moving_max.h"
+#include "modules/audio_processing/echo_detector/normalized_covariance_estimator.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioBuffer;
+
+class ResidualEchoDetector : public EchoDetector {
+ public:
+ ResidualEchoDetector();
+ ~ResidualEchoDetector() override;
+
+ // This function should be called while holding the render lock.
+ void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) override;
+
+ // This function should be called while holding the capture lock.
+ void AnalyzeCaptureAudio(rtc::ArrayView<const float> capture_audio) override;
+
+ // This function should be called while holding the capture lock.
+ void Initialize(int capture_sample_rate_hz,
+ int num_capture_channels,
+ int render_sample_rate_hz,
+ int num_render_channels) override;
+
+ // This function is for testing purposes only.
+ void SetReliabilityForTest(float value) { reliability_ = value; }
+
+ // This function should be called while holding the capture lock.
+ EchoDetector::Metrics GetMetrics() const override;
+
+ private:
+ static std::atomic<int> instance_count_;
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ // Keep track if the `Process` function has been previously called.
+ bool first_process_call_ = true;
+ // Buffer for storing the power of incoming farend buffers. This is needed for
+ // cases where calls to BufferFarend and Process are jittery.
+ CircularBuffer render_buffer_;
+ // Count how long ago it was that the size of `render_buffer_` was zero. This
+ // value is also reset to zero when clock drift is detected and a value from
+ // the renderbuffer is discarded, even though the buffer is not actually zero
+ // at that point. This is done to avoid repeatedly removing elements in this
+ // situation.
+ size_t frames_since_zero_buffer_size_ = 0;
+
+ // Circular buffers containing delayed versions of the power, mean and
+ // standard deviation, for calculating the delayed covariance values.
+ std::vector<float> render_power_;
+ std::vector<float> render_power_mean_;
+ std::vector<float> render_power_std_dev_;
+ // Covariance estimates for different delay values.
+ std::vector<NormalizedCovarianceEstimator> covariances_;
+ // Index where next element should be inserted in all of the above circular
+ // buffers.
+ size_t next_insertion_index_ = 0;
+
+ MeanVarianceEstimator render_statistics_;
+ MeanVarianceEstimator capture_statistics_;
+ // Current echo likelihood.
+ float echo_likelihood_ = 0.f;
+ // Reliability of the current likelihood.
+ float reliability_ = 0.f;
+ MovingMax recent_likelihood_max_;
+
+ int log_counter_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_RESIDUAL_ECHO_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/residual_echo_detector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/residual_echo_detector_unittest.cc
new file mode 100644
index 0000000000..d8c227a443
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/residual_echo_detector_unittest.cc
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/residual_echo_detector.h"
+
+#include <vector>
+
+#include "api/make_ref_counted.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ResidualEchoDetectorTests, Echo) {
+ auto echo_detector = rtc::make_ref_counted<ResidualEchoDetector>();
+ echo_detector->SetReliabilityForTest(1.0f);
+ std::vector<float> ones(160, 1.f);
+ std::vector<float> zeros(160, 0.f);
+
+ // In this test the capture signal has a delay of 10 frames w.r.t. the render
+ // signal, but is otherwise identical. Both signals are periodic with a 20
+ // frame interval.
+ for (int i = 0; i < 1000; i++) {
+ if (i % 20 == 0) {
+ echo_detector->AnalyzeRenderAudio(ones);
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ } else if (i % 20 == 10) {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ echo_detector->AnalyzeCaptureAudio(ones);
+ } else {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ }
+ }
+ // We expect to detect echo with near certain likelihood.
+ auto ed_metrics = echo_detector->GetMetrics();
+ ASSERT_TRUE(ed_metrics.echo_likelihood);
+ EXPECT_NEAR(1.f, ed_metrics.echo_likelihood.value(), 0.01f);
+}
+
+TEST(ResidualEchoDetectorTests, NoEcho) {
+ auto echo_detector = rtc::make_ref_counted<ResidualEchoDetector>();
+ echo_detector->SetReliabilityForTest(1.0f);
+ std::vector<float> ones(160, 1.f);
+ std::vector<float> zeros(160, 0.f);
+
+ // In this test the capture signal is always zero, so no echo should be
+ // detected.
+ for (int i = 0; i < 1000; i++) {
+ if (i % 20 == 0) {
+ echo_detector->AnalyzeRenderAudio(ones);
+ } else {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ }
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ }
+ // We expect to not detect any echo.
+ auto ed_metrics = echo_detector->GetMetrics();
+ ASSERT_TRUE(ed_metrics.echo_likelihood);
+ EXPECT_NEAR(0.f, ed_metrics.echo_likelihood.value(), 0.01f);
+}
+
+TEST(ResidualEchoDetectorTests, EchoWithRenderClockDrift) {
+ auto echo_detector = rtc::make_ref_counted<ResidualEchoDetector>();
+ echo_detector->SetReliabilityForTest(1.0f);
+ std::vector<float> ones(160, 1.f);
+ std::vector<float> zeros(160, 0.f);
+
+ // In this test the capture signal has a delay of 10 frames w.r.t. the render
+ // signal, but is otherwise identical. Both signals are periodic with a 20
+ // frame interval. There is a simulated clock drift of 1% in this test, with
+ // the render side producing data slightly faster.
+ for (int i = 0; i < 1000; i++) {
+ if (i % 20 == 0) {
+ echo_detector->AnalyzeRenderAudio(ones);
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ } else if (i % 20 == 10) {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ echo_detector->AnalyzeCaptureAudio(ones);
+ } else {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ }
+ if (i % 100 == 0) {
+ // This is causing the simulated clock drift.
+ echo_detector->AnalyzeRenderAudio(zeros);
+ }
+ }
+ // We expect to detect echo with high likelihood. Clock drift is harder to
+ // correct on the render side than on the capture side. This is due to the
+ // render buffer, clock drift can only be discovered after a certain delay.
+ // A growing buffer can be caused by jitter or clock drift and it's not
+ // possible to make this decision right away. For this reason we only expect
+ // an echo likelihood of 75% in this test.
+ auto ed_metrics = echo_detector->GetMetrics();
+ ASSERT_TRUE(ed_metrics.echo_likelihood);
+ EXPECT_GT(ed_metrics.echo_likelihood.value(), 0.75f);
+}
+
+TEST(ResidualEchoDetectorTests, EchoWithCaptureClockDrift) {
+ auto echo_detector = rtc::make_ref_counted<ResidualEchoDetector>();
+ echo_detector->SetReliabilityForTest(1.0f);
+ std::vector<float> ones(160, 1.f);
+ std::vector<float> zeros(160, 0.f);
+
+ // In this test the capture signal has a delay of 10 frames w.r.t. the render
+ // signal, but is otherwise identical. Both signals are periodic with a 20
+ // frame interval. There is a simulated clock drift of 1% in this test, with
+ // the capture side producing data slightly faster.
+ for (int i = 0; i < 1000; i++) {
+ if (i % 20 == 0) {
+ echo_detector->AnalyzeRenderAudio(ones);
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ } else if (i % 20 == 10) {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ echo_detector->AnalyzeCaptureAudio(ones);
+ } else {
+ echo_detector->AnalyzeRenderAudio(zeros);
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ }
+ if (i % 100 == 0) {
+ // This is causing the simulated clock drift.
+ echo_detector->AnalyzeCaptureAudio(zeros);
+ }
+ }
+ // We expect to detect echo with near certain likelihood.
+ auto ed_metrics = echo_detector->GetMetrics();
+ ASSERT_TRUE(ed_metrics.echo_likelihood);
+ EXPECT_NEAR(1.f, ed_metrics.echo_likelihood.value(), 0.01f);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/rms_level.cc b/third_party/libwebrtc/modules/audio_processing/rms_level.cc
new file mode 100644
index 0000000000..b0a45cb403
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/rms_level.cc
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/rms_level.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+static constexpr float kMaxSquaredLevel = 32768 * 32768;
+// kMinLevel is the level corresponding to kMinLevelDb, that is 10^(-127/10).
+static constexpr float kMinLevel = 1.995262314968883e-13f;
+
+// Calculates the normalized RMS value from a mean square value. The input
+// should be the sum of squared samples divided by the number of samples. The
+// value will be normalized to full range before computing the RMS, wich is
+// returned as a negated dBfs. That is, 0 is full amplitude while 127 is very
+// faint.
+int ComputeRms(float mean_square) {
+ if (mean_square <= kMinLevel * kMaxSquaredLevel) {
+ // Very faint; simply return the minimum value.
+ return RmsLevel::kMinLevelDb;
+ }
+ // Normalize by the max level.
+ const float mean_square_norm = mean_square / kMaxSquaredLevel;
+ RTC_DCHECK_GT(mean_square_norm, kMinLevel);
+ // 20log_10(x^0.5) = 10log_10(x)
+ const float rms = 10.f * std::log10(mean_square_norm);
+ RTC_DCHECK_LE(rms, 0.f);
+ RTC_DCHECK_GT(rms, -RmsLevel::kMinLevelDb);
+ // Return the negated value.
+ return static_cast<int>(-rms + 0.5f);
+}
+} // namespace
+
+RmsLevel::RmsLevel() {
+ Reset();
+}
+
+RmsLevel::~RmsLevel() = default;
+
+void RmsLevel::Reset() {
+ sum_square_ = 0.f;
+ sample_count_ = 0;
+ max_sum_square_ = 0.f;
+ block_size_ = absl::nullopt;
+}
+
+void RmsLevel::Analyze(rtc::ArrayView<const int16_t> data) {
+ if (data.empty()) {
+ return;
+ }
+
+ CheckBlockSize(data.size());
+
+ const float sum_square =
+ std::accumulate(data.begin(), data.end(), 0.f,
+ [](float a, int16_t b) { return a + b * b; });
+ RTC_DCHECK_GE(sum_square, 0.f);
+ sum_square_ += sum_square;
+ sample_count_ += data.size();
+
+ max_sum_square_ = std::max(max_sum_square_, sum_square);
+}
+
+void RmsLevel::Analyze(rtc::ArrayView<const float> data) {
+ if (data.empty()) {
+ return;
+ }
+
+ CheckBlockSize(data.size());
+
+ float sum_square = 0.f;
+
+ for (float data_k : data) {
+ int16_t tmp =
+ static_cast<int16_t>(std::min(std::max(data_k, -32768.f), 32767.f));
+ sum_square += tmp * tmp;
+ }
+ RTC_DCHECK_GE(sum_square, 0.f);
+ sum_square_ += sum_square;
+ sample_count_ += data.size();
+
+ max_sum_square_ = std::max(max_sum_square_, sum_square);
+}
+
+void RmsLevel::AnalyzeMuted(size_t length) {
+ CheckBlockSize(length);
+ sample_count_ += length;
+}
+
+int RmsLevel::Average() {
+ const bool have_samples = (sample_count_ != 0);
+ int rms = have_samples ? ComputeRms(sum_square_ / sample_count_)
+ : RmsLevel::kMinLevelDb;
+
+ // To ensure that kMinLevelDb represents digital silence (muted audio
+ // sources) we'll check here if the sum_square is actually 0. If it's not
+ // we'll bump up the return value to `kInaudibleButNotMuted`.
+ // https://datatracker.ietf.org/doc/html/rfc6464
+ if (have_samples && rms == RmsLevel::kMinLevelDb && sum_square_ != 0.0f) {
+ rms = kInaudibleButNotMuted;
+ }
+
+ Reset();
+ return rms;
+}
+
+RmsLevel::Levels RmsLevel::AverageAndPeak() {
+ // Note that block_size_ should by design always be non-empty when
+ // sample_count_ != 0. Also, the * operator of absl::optional enforces this
+ // with a DCHECK.
+ Levels levels = (sample_count_ == 0)
+ ? Levels{RmsLevel::kMinLevelDb, RmsLevel::kMinLevelDb}
+ : Levels{ComputeRms(sum_square_ / sample_count_),
+ ComputeRms(max_sum_square_ / *block_size_)};
+ Reset();
+ return levels;
+}
+
+void RmsLevel::CheckBlockSize(size_t block_size) {
+ if (block_size_ != block_size) {
+ Reset();
+ block_size_ = block_size;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/rms_level.h b/third_party/libwebrtc/modules/audio_processing/rms_level.h
new file mode 100644
index 0000000000..fbece19ecd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/rms_level.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+#define MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Computes the root mean square (RMS) level in dBFs (decibels from digital
+// full-scale) of audio data. The computation follows RFC 6465:
+// https://tools.ietf.org/html/rfc6465
+// with the intent that it can provide the RTP audio level indication.
+//
+// The expected approach is to provide constant-sized chunks of audio to
+// Analyze(). When enough chunks have been accumulated to form a packet, call
+// Average() to get the audio level indicator for the RTP header.
+class RmsLevel {
+ public:
+ struct Levels {
+ int average;
+ int peak;
+ };
+
+ enum : int { kMinLevelDb = 127, kInaudibleButNotMuted = 126 };
+
+ RmsLevel();
+ ~RmsLevel();
+
+ // Can be called to reset internal states, but is not required during normal
+ // operation.
+ void Reset();
+
+ // Pass each chunk of audio to Analyze() to accumulate the level.
+ void Analyze(rtc::ArrayView<const int16_t> data);
+ void Analyze(rtc::ArrayView<const float> data);
+
+ // If all samples with the given `length` have a magnitude of zero, this is
+ // a shortcut to avoid some computation.
+ void AnalyzeMuted(size_t length);
+
+ // Computes the RMS level over all data passed to Analyze() since the last
+ // call to Average(). The returned value is positive but should be interpreted
+ // as negative as per the RFC. It is constrained to [0, 127]. Resets the
+ // internal state to start a new measurement period.
+ int Average();
+
+ // Like Average() above, but also returns the RMS peak value. Resets the
+ // internal state to start a new measurement period.
+ Levels AverageAndPeak();
+
+ private:
+ // Compares `block_size` with `block_size_`. If they are different, calls
+ // Reset() and stores the new size.
+ void CheckBlockSize(size_t block_size);
+
+ float sum_square_;
+ size_t sample_count_;
+ float max_sum_square_;
+ absl::optional<size_t> block_size_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/rms_level_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/rms_level_gn/moz.build
new file mode 100644
index 0000000000..cb54ed67d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/rms_level_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/rms_level.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rms_level_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/rms_level_unittest.cc b/third_party/libwebrtc/modules/audio_processing/rms_level_unittest.cc
new file mode 100644
index 0000000000..4cbad461e7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/rms_level_unittest.cc
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+#include "modules/audio_processing/rms_level.h"
+
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kSampleRateHz = 48000;
+constexpr size_t kBlockSizeSamples = kSampleRateHz / 100;
+
+std::unique_ptr<RmsLevel> RunTest(rtc::ArrayView<const int16_t> input) {
+ std::unique_ptr<RmsLevel> level(new RmsLevel);
+ for (size_t n = 0; n + kBlockSizeSamples <= input.size();
+ n += kBlockSizeSamples) {
+ level->Analyze(input.subview(n, kBlockSizeSamples));
+ }
+ return level;
+}
+
+std::unique_ptr<RmsLevel> RunTest(rtc::ArrayView<const float> input) {
+ std::unique_ptr<RmsLevel> level(new RmsLevel);
+ for (size_t n = 0; n + kBlockSizeSamples <= input.size();
+ n += kBlockSizeSamples) {
+ level->Analyze(input.subview(n, kBlockSizeSamples));
+ }
+ return level;
+}
+
+std::vector<int16_t> CreateInt16Sinusoid(int frequency_hz,
+ int amplitude,
+ size_t num_samples) {
+ std::vector<int16_t> x(num_samples);
+ for (size_t n = 0; n < num_samples; ++n) {
+ x[n] = rtc::saturated_cast<int16_t>(
+ amplitude * std::sin(2 * M_PI * n * frequency_hz / kSampleRateHz));
+ }
+ return x;
+}
+
+std::vector<float> CreateFloatSinusoid(int frequency_hz,
+ int amplitude,
+ size_t num_samples) {
+ std::vector<int16_t> x16 =
+ CreateInt16Sinusoid(frequency_hz, amplitude, num_samples);
+ std::vector<float> x(x16.size());
+ for (size_t n = 0; n < x.size(); ++n) {
+ x[n] = x16[n];
+ }
+ return x;
+}
+
+} // namespace
+
+TEST(RmsLevelTest, VerifyIndentityBetweenFloatAndFix) {
+ auto x_f = CreateFloatSinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto x_i = CreateFloatSinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level_f = RunTest(x_f);
+ auto level_i = RunTest(x_i);
+ int avg_i = level_i->Average();
+ int avg_f = level_f->Average();
+ EXPECT_EQ(3, avg_i); // -3 dBFS
+ EXPECT_EQ(avg_f, avg_i);
+}
+
+TEST(RmsLevelTest, Run1000HzFullScale) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level = RunTest(x);
+ EXPECT_EQ(3, level->Average()); // -3 dBFS
+}
+
+TEST(RmsLevelTest, Run1000HzFullScaleAverageAndPeak) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level = RunTest(x);
+ auto stats = level->AverageAndPeak();
+ EXPECT_EQ(3, stats.average); // -3 dBFS
+ EXPECT_EQ(3, stats.peak);
+}
+
+TEST(RmsLevelTest, Run1000HzHalfScale) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX / 2, kSampleRateHz);
+ auto level = RunTest(x);
+ EXPECT_EQ(9, level->Average()); // -9 dBFS
+}
+
+TEST(RmsLevelTest, RunZeros) {
+ std::vector<int16_t> x(kSampleRateHz, 0); // 1 second of pure silence.
+ auto level = RunTest(x);
+ EXPECT_EQ(127, level->Average());
+}
+
+TEST(RmsLevelTest, RunZerosAverageAndPeak) {
+ std::vector<int16_t> x(kSampleRateHz, 0); // 1 second of pure silence.
+ auto level = RunTest(x);
+ auto stats = level->AverageAndPeak();
+ EXPECT_EQ(127, stats.average);
+ EXPECT_EQ(127, stats.peak);
+}
+
+TEST(RmsLevelTest, NoSamples) {
+ RmsLevel level;
+ EXPECT_EQ(127, level.Average()); // Return minimum if no samples are given.
+}
+
+TEST(RmsLevelTest, NoSamplesAverageAndPeak) {
+ RmsLevel level;
+ auto stats = level.AverageAndPeak();
+ EXPECT_EQ(127, stats.average);
+ EXPECT_EQ(127, stats.peak);
+}
+
+TEST(RmsLevelTest, PollTwice) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level = RunTest(x);
+ level->Average();
+ EXPECT_EQ(127, level->Average()); // Stats should be reset at this point.
+}
+
+TEST(RmsLevelTest, Reset) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level = RunTest(x);
+ level->Reset();
+ EXPECT_EQ(127, level->Average()); // Stats should be reset at this point.
+}
+
+// Inserts 1 second of full-scale sinusoid, followed by 1 second of muted.
+TEST(RmsLevelTest, ProcessMuted) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level = RunTest(x);
+ const size_t kBlocksPerSecond = rtc::CheckedDivExact(
+ static_cast<size_t>(kSampleRateHz), kBlockSizeSamples);
+ for (size_t i = 0; i < kBlocksPerSecond; ++i) {
+ level->AnalyzeMuted(kBlockSizeSamples);
+ }
+ EXPECT_EQ(6, level->Average()); // Average RMS halved due to the silence.
+}
+
+// Digital silence must yield 127 and anything else should yield 126 or lower.
+TEST(RmsLevelTest, OnlyDigitalSilenceIs127) {
+ std::vector<int16_t> test_buffer(kSampleRateHz, 0);
+ auto level = RunTest(test_buffer);
+ EXPECT_EQ(127, level->Average());
+ // Change one sample to something other than 0 to make the buffer not strictly
+ // represent digital silence.
+ test_buffer[0] = 1;
+ level = RunTest(test_buffer);
+ EXPECT_LT(level->Average(), 127);
+}
+
+// Inserts 1 second of half-scale sinusoid, follwed by 10 ms of full-scale, and
+// finally 1 second of half-scale again. Expect the average to be -9 dBFS due
+// to the vast majority of the signal being half-scale, and the peak to be
+// -3 dBFS.
+TEST(RmsLevelTest, RunHalfScaleAndInsertFullScale) {
+ auto half_scale = CreateInt16Sinusoid(1000, INT16_MAX / 2, kSampleRateHz);
+ auto full_scale = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz / 100);
+ auto x = half_scale;
+ x.insert(x.end(), full_scale.begin(), full_scale.end());
+ x.insert(x.end(), half_scale.begin(), half_scale.end());
+ ASSERT_EQ(static_cast<size_t>(2 * kSampleRateHz + kSampleRateHz / 100),
+ x.size());
+ auto level = RunTest(x);
+ auto stats = level->AverageAndPeak();
+ EXPECT_EQ(9, stats.average);
+ EXPECT_EQ(3, stats.peak);
+}
+
+TEST(RmsLevelTest, ResetOnBlockSizeChange) {
+ auto x = CreateInt16Sinusoid(1000, INT16_MAX, kSampleRateHz);
+ auto level = RunTest(x);
+ // Create a new signal with half amplitude, but double block length.
+ auto y = CreateInt16Sinusoid(1000, INT16_MAX / 2, kBlockSizeSamples * 2);
+ level->Analyze(y);
+ auto stats = level->AverageAndPeak();
+ // Expect all stats to only be influenced by the last signal (y), since the
+ // changed block size should reset the stats.
+ EXPECT_EQ(9, stats.average);
+ EXPECT_EQ(9, stats.peak);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/splitting_filter.cc b/third_party/libwebrtc/modules/audio_processing/splitting_filter.cc
new file mode 100644
index 0000000000..d47090bc03
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/splitting_filter.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/splitting_filter.h"
+
+#include <array>
+
+#include "api/array_view.h"
+#include "common_audio/channel_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kSamplesPerBand = 160;
+constexpr size_t kTwoBandFilterSamplesPerFrame = 320;
+
+} // namespace
+
+SplittingFilter::SplittingFilter(size_t num_channels,
+ size_t num_bands,
+ size_t num_frames)
+ : num_bands_(num_bands),
+ two_bands_states_(num_bands_ == 2 ? num_channels : 0),
+ three_band_filter_banks_(num_bands_ == 3 ? num_channels : 0) {
+ RTC_CHECK(num_bands_ == 2 || num_bands_ == 3);
+}
+
+SplittingFilter::~SplittingFilter() = default;
+
+void SplittingFilter::Analysis(const ChannelBuffer<float>* data,
+ ChannelBuffer<float>* bands) {
+ RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+ RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+ RTC_DCHECK_EQ(data->num_frames(),
+ bands->num_frames_per_band() * bands->num_bands());
+ if (bands->num_bands() == 2) {
+ TwoBandsAnalysis(data, bands);
+ } else if (bands->num_bands() == 3) {
+ ThreeBandsAnalysis(data, bands);
+ }
+}
+
+void SplittingFilter::Synthesis(const ChannelBuffer<float>* bands,
+ ChannelBuffer<float>* data) {
+ RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+ RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+ RTC_DCHECK_EQ(data->num_frames(),
+ bands->num_frames_per_band() * bands->num_bands());
+ if (bands->num_bands() == 2) {
+ TwoBandsSynthesis(bands, data);
+ } else if (bands->num_bands() == 3) {
+ ThreeBandsSynthesis(bands, data);
+ }
+}
+
+void SplittingFilter::TwoBandsAnalysis(const ChannelBuffer<float>* data,
+ ChannelBuffer<float>* bands) {
+ RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
+ RTC_DCHECK_EQ(data->num_frames(), kTwoBandFilterSamplesPerFrame);
+
+ for (size_t i = 0; i < two_bands_states_.size(); ++i) {
+ std::array<std::array<int16_t, kSamplesPerBand>, 2> bands16;
+ std::array<int16_t, kTwoBandFilterSamplesPerFrame> full_band16;
+ FloatS16ToS16(data->channels(0)[i], full_band16.size(), full_band16.data());
+ WebRtcSpl_AnalysisQMF(full_band16.data(), data->num_frames(),
+ bands16[0].data(), bands16[1].data(),
+ two_bands_states_[i].analysis_state1,
+ two_bands_states_[i].analysis_state2);
+ S16ToFloatS16(bands16[0].data(), bands16[0].size(), bands->channels(0)[i]);
+ S16ToFloatS16(bands16[1].data(), bands16[1].size(), bands->channels(1)[i]);
+ }
+}
+
+void SplittingFilter::TwoBandsSynthesis(const ChannelBuffer<float>* bands,
+ ChannelBuffer<float>* data) {
+ RTC_DCHECK_LE(data->num_channels(), two_bands_states_.size());
+ RTC_DCHECK_EQ(data->num_frames(), kTwoBandFilterSamplesPerFrame);
+ for (size_t i = 0; i < data->num_channels(); ++i) {
+ std::array<std::array<int16_t, kSamplesPerBand>, 2> bands16;
+ std::array<int16_t, kTwoBandFilterSamplesPerFrame> full_band16;
+ FloatS16ToS16(bands->channels(0)[i], bands16[0].size(), bands16[0].data());
+ FloatS16ToS16(bands->channels(1)[i], bands16[1].size(), bands16[1].data());
+ WebRtcSpl_SynthesisQMF(bands16[0].data(), bands16[1].data(),
+ bands->num_frames_per_band(), full_band16.data(),
+ two_bands_states_[i].synthesis_state1,
+ two_bands_states_[i].synthesis_state2);
+ S16ToFloatS16(full_band16.data(), full_band16.size(), data->channels(0)[i]);
+ }
+}
+
+void SplittingFilter::ThreeBandsAnalysis(const ChannelBuffer<float>* data,
+ ChannelBuffer<float>* bands) {
+ RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
+ RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size());
+ RTC_DCHECK_LE(data->num_channels(), bands->num_channels());
+ RTC_DCHECK_EQ(data->num_frames(), ThreeBandFilterBank::kFullBandSize);
+ RTC_DCHECK_EQ(bands->num_frames(), ThreeBandFilterBank::kFullBandSize);
+ RTC_DCHECK_EQ(bands->num_bands(), ThreeBandFilterBank::kNumBands);
+ RTC_DCHECK_EQ(bands->num_frames_per_band(),
+ ThreeBandFilterBank::kSplitBandSize);
+
+ for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
+ three_band_filter_banks_[i].Analysis(
+ rtc::ArrayView<const float, ThreeBandFilterBank::kFullBandSize>(
+ data->channels_view()[i].data(),
+ ThreeBandFilterBank::kFullBandSize),
+ rtc::ArrayView<const rtc::ArrayView<float>,
+ ThreeBandFilterBank::kNumBands>(
+ bands->bands_view(i).data(), ThreeBandFilterBank::kNumBands));
+ }
+}
+
+void SplittingFilter::ThreeBandsSynthesis(const ChannelBuffer<float>* bands,
+ ChannelBuffer<float>* data) {
+ RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size());
+ RTC_DCHECK_LE(data->num_channels(), bands->num_channels());
+ RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size());
+ RTC_DCHECK_EQ(data->num_frames(), ThreeBandFilterBank::kFullBandSize);
+ RTC_DCHECK_EQ(bands->num_frames(), ThreeBandFilterBank::kFullBandSize);
+ RTC_DCHECK_EQ(bands->num_bands(), ThreeBandFilterBank::kNumBands);
+ RTC_DCHECK_EQ(bands->num_frames_per_band(),
+ ThreeBandFilterBank::kSplitBandSize);
+
+ for (size_t i = 0; i < data->num_channels(); ++i) {
+ three_band_filter_banks_[i].Synthesis(
+ rtc::ArrayView<const rtc::ArrayView<float>,
+ ThreeBandFilterBank::kNumBands>(
+ bands->bands_view(i).data(), ThreeBandFilterBank::kNumBands),
+ rtc::ArrayView<float, ThreeBandFilterBank::kFullBandSize>(
+ data->channels_view()[i].data(),
+ ThreeBandFilterBank::kFullBandSize));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/splitting_filter.h b/third_party/libwebrtc/modules/audio_processing/splitting_filter.h
new file mode 100644
index 0000000000..e578dd07c1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/splitting_filter.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_
+
+#include <cstring>
+#include <memory>
+#include <vector>
+
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_processing/three_band_filter_bank.h"
+
+namespace webrtc {
+
+struct TwoBandsStates {
+ TwoBandsStates() {
+ memset(analysis_state1, 0, sizeof(analysis_state1));
+ memset(analysis_state2, 0, sizeof(analysis_state2));
+ memset(synthesis_state1, 0, sizeof(synthesis_state1));
+ memset(synthesis_state2, 0, sizeof(synthesis_state2));
+ }
+
+ static const int kStateSize = 6;
+ int analysis_state1[kStateSize];
+ int analysis_state2[kStateSize];
+ int synthesis_state1[kStateSize];
+ int synthesis_state2[kStateSize];
+};
+
+// Splitting filter which is able to split into and merge from 2 or 3 frequency
+// bands. The number of channels needs to be provided at construction time.
+//
+// For each block, Analysis() is called to split into bands and then Synthesis()
+// to merge these bands again. The input and output signals are contained in
+// ChannelBuffers and for the different bands an array of ChannelBuffers is
+// used.
+class SplittingFilter {
+ public:
+ SplittingFilter(size_t num_channels, size_t num_bands, size_t num_frames);
+ ~SplittingFilter();
+
+ void Analysis(const ChannelBuffer<float>* data, ChannelBuffer<float>* bands);
+ void Synthesis(const ChannelBuffer<float>* bands, ChannelBuffer<float>* data);
+
+ private:
+ // Two-band analysis and synthesis work for 640 samples or less.
+ void TwoBandsAnalysis(const ChannelBuffer<float>* data,
+ ChannelBuffer<float>* bands);
+ void TwoBandsSynthesis(const ChannelBuffer<float>* bands,
+ ChannelBuffer<float>* data);
+ void ThreeBandsAnalysis(const ChannelBuffer<float>* data,
+ ChannelBuffer<float>* bands);
+ void ThreeBandsSynthesis(const ChannelBuffer<float>* bands,
+ ChannelBuffer<float>* data);
+ void InitBuffers();
+
+ const size_t num_bands_;
+ std::vector<TwoBandsStates> two_bands_states_;
+ std::vector<ThreeBandFilterBank> three_band_filter_banks_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/splitting_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/splitting_filter_unittest.cc
new file mode 100644
index 0000000000..30fe4caf9c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/splitting_filter_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/splitting_filter.h"
+
+#include <cmath>
+
+#include "common_audio/channel_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer48kHzChannel = 480;
+
+} // namespace
+
+// Generates a signal from presence or absence of sine waves of different
+// frequencies.
+// Splits into 3 bands and checks their presence or absence.
+// Recombines the bands.
+// Calculates the delay.
+// Checks that the cross correlation of input and output is high enough at the
+// calculated delay.
+TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
+ static const int kChannels = 1;
+ static const int kSampleRateHz = 48000;
+ static const size_t kNumBands = 3;
+ static const int kFrequenciesHz[kNumBands] = {1000, 12000, 18000};
+ static const float kAmplitude = 8192.f;
+ static const size_t kChunks = 8;
+ SplittingFilter splitting_filter(kChannels, kNumBands,
+ kSamplesPer48kHzChannel);
+ ChannelBuffer<float> in_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
+ ChannelBuffer<float> bands(kSamplesPer48kHzChannel, kChannels, kNumBands);
+ ChannelBuffer<float> out_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
+ for (size_t i = 0; i < kChunks; ++i) {
+ // Input signal generation.
+ bool is_present[kNumBands];
+ memset(in_data.channels()[0], 0,
+ kSamplesPer48kHzChannel * sizeof(in_data.channels()[0][0]));
+ for (size_t j = 0; j < kNumBands; ++j) {
+ is_present[j] = i & (static_cast<size_t>(1) << j);
+ float amplitude = is_present[j] ? kAmplitude : 0.f;
+ for (size_t k = 0; k < kSamplesPer48kHzChannel; ++k) {
+ in_data.channels()[0][k] +=
+ amplitude * sin(2.f * M_PI * kFrequenciesHz[j] *
+ (i * kSamplesPer48kHzChannel + k) / kSampleRateHz);
+ }
+ }
+ // Three band splitting filter.
+ splitting_filter.Analysis(&in_data, &bands);
+ // Energy calculation.
+ float energy[kNumBands];
+ for (size_t j = 0; j < kNumBands; ++j) {
+ energy[j] = 0.f;
+ for (size_t k = 0; k < kSamplesPer16kHzChannel; ++k) {
+ energy[j] += bands.channels(j)[0][k] * bands.channels(j)[0][k];
+ }
+ energy[j] /= kSamplesPer16kHzChannel;
+ if (is_present[j]) {
+ EXPECT_GT(energy[j], kAmplitude * kAmplitude / 4);
+ } else {
+ EXPECT_LT(energy[j], kAmplitude * kAmplitude / 4);
+ }
+ }
+ // Three band merge.
+ splitting_filter.Synthesis(&bands, &out_data);
+ // Delay and cross correlation estimation.
+ float xcorr = 0.f;
+ for (size_t delay = 0; delay < kSamplesPer48kHzChannel; ++delay) {
+ float tmpcorr = 0.f;
+ for (size_t j = delay; j < kSamplesPer48kHzChannel; ++j) {
+ tmpcorr += in_data.channels()[0][j - delay] * out_data.channels()[0][j];
+ }
+ tmpcorr /= kSamplesPer48kHzChannel;
+ if (tmpcorr > xcorr) {
+ xcorr = tmpcorr;
+ }
+ }
+ // High cross correlation check.
+ bool any_present = false;
+ for (size_t j = 0; j < kNumBands; ++j) {
+ any_present |= is_present[j];
+ }
+ if (any_present) {
+ EXPECT_GT(xcorr, kAmplitude * kAmplitude / 4);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.cc b/third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.cc
new file mode 100644
index 0000000000..ec35dd345c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.cc
@@ -0,0 +1,654 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/aec_dump_based_simulator.h"
+
+#include <iostream>
+#include <memory>
+
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/aec_dump_based_simulator.h"
+#include "modules/audio_processing/test/protobuf_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+// Verify output bitexactness for the fixed interface.
+// TODO(peah): Check whether it would make sense to add a threshold
+// to use for checking the bitexactness in a soft manner.
+bool VerifyFixedBitExactness(const webrtc::audioproc::Stream& msg,
+ const Int16Frame& frame) {
+ if (sizeof(frame.data[0]) * frame.data.size() != msg.output_data().size()) {
+ return false;
+ } else {
+ const int16_t* frame_data = frame.data.data();
+ for (int k = 0; k < frame.num_channels * frame.samples_per_channel; ++k) {
+ if (msg.output_data().data()[k] != frame_data[k]) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Verify output bitexactness for the float interface.
+bool VerifyFloatBitExactness(const webrtc::audioproc::Stream& msg,
+ const StreamConfig& out_config,
+ const ChannelBuffer<float>& out_buf) {
+ if (static_cast<size_t>(msg.output_channel_size()) !=
+ out_config.num_channels() ||
+ msg.output_channel(0).size() != out_config.num_frames()) {
+ return false;
+ } else {
+ for (int ch = 0; ch < msg.output_channel_size(); ++ch) {
+ for (size_t sample = 0; sample < out_config.num_frames(); ++sample) {
+ if (msg.output_channel(ch).data()[sample] !=
+ out_buf.channels()[ch][sample]) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+// Selectively reads the next proto-buf message from dump-file or string input.
+// Returns a bool indicating whether a new message was available.
+bool ReadNextMessage(bool use_dump_file,
+ FILE* dump_input_file,
+ std::stringstream& input,
+ webrtc::audioproc::Event& event_msg) {
+ if (use_dump_file) {
+ return ReadMessageFromFile(dump_input_file, &event_msg);
+ }
+ return ReadMessageFromString(&input, &event_msg);
+}
+
+} // namespace
+
+AecDumpBasedSimulator::AecDumpBasedSimulator(
+ const SimulationSettings& settings,
+ rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder)
+ : AudioProcessingSimulator(settings,
+ std::move(audio_processing),
+ std::move(ap_builder)) {
+ MaybeOpenCallOrderFile();
+}
+
+AecDumpBasedSimulator::~AecDumpBasedSimulator() = default;
+
+void AecDumpBasedSimulator::PrepareProcessStreamCall(
+ const webrtc::audioproc::Stream& msg) {
+ if (msg.has_input_data()) {
+ // Fixed interface processing.
+ // Verify interface invariance.
+ RTC_CHECK(interface_used_ == InterfaceType::kFixedInterface ||
+ interface_used_ == InterfaceType::kNotSpecified);
+ interface_used_ = InterfaceType::kFixedInterface;
+
+ // Populate input buffer.
+ RTC_CHECK_EQ(sizeof(fwd_frame_.data[0]) * fwd_frame_.data.size(),
+ msg.input_data().size());
+ memcpy(fwd_frame_.data.data(), msg.input_data().data(),
+ msg.input_data().size());
+ } else {
+ // Float interface processing.
+ // Verify interface invariance.
+ RTC_CHECK(interface_used_ == InterfaceType::kFloatInterface ||
+ interface_used_ == InterfaceType::kNotSpecified);
+ interface_used_ = InterfaceType::kFloatInterface;
+
+ RTC_CHECK_EQ(in_buf_->num_channels(),
+ static_cast<size_t>(msg.input_channel_size()));
+
+ // Populate input buffer.
+ for (size_t i = 0; i < in_buf_->num_channels(); ++i) {
+ RTC_CHECK_EQ(in_buf_->num_frames() * sizeof(*in_buf_->channels()[i]),
+ msg.input_channel(i).size());
+ std::memcpy(in_buf_->channels()[i], msg.input_channel(i).data(),
+ msg.input_channel(i).size());
+ }
+ }
+
+ if (artificial_nearend_buffer_reader_) {
+ if (artificial_nearend_buffer_reader_->Read(
+ artificial_nearend_buf_.get())) {
+ if (msg.has_input_data()) {
+ int16_t* fwd_frame_data = fwd_frame_.data.data();
+ for (size_t k = 0; k < in_buf_->num_frames(); ++k) {
+ fwd_frame_data[k] = rtc::saturated_cast<int16_t>(
+ fwd_frame_data[k] +
+ static_cast<int16_t>(32767 *
+ artificial_nearend_buf_->channels()[0][k]));
+ }
+ } else {
+ for (int i = 0; i < msg.input_channel_size(); ++i) {
+ for (size_t k = 0; k < in_buf_->num_frames(); ++k) {
+ in_buf_->channels()[i][k] +=
+ artificial_nearend_buf_->channels()[0][k];
+ in_buf_->channels()[i][k] = std::min(
+ 32767.f, std::max(-32768.f, in_buf_->channels()[i][k]));
+ }
+ }
+ }
+ } else {
+ if (!artificial_nearend_eof_reported_) {
+ std::cout << "The artificial nearend file ended before the recording.";
+ artificial_nearend_eof_reported_ = true;
+ }
+ }
+ }
+
+ if (!settings_.use_stream_delay || *settings_.use_stream_delay) {
+ if (!settings_.stream_delay) {
+ if (msg.has_delay()) {
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ ap_->set_stream_delay_ms(msg.delay()));
+ }
+ } else {
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ ap_->set_stream_delay_ms(*settings_.stream_delay));
+ }
+ }
+
+ if (settings_.override_key_pressed.has_value()) {
+ // Key pressed state overridden.
+ ap_->set_stream_key_pressed(*settings_.override_key_pressed);
+ } else {
+ // Set the recorded key pressed state.
+ if (msg.has_keypress()) {
+ ap_->set_stream_key_pressed(msg.keypress());
+ }
+ }
+
+ // Level is always logged in AEC dumps.
+ RTC_CHECK(msg.has_level());
+ aec_dump_mic_level_ = msg.level();
+}
+
+void AecDumpBasedSimulator::VerifyProcessStreamBitExactness(
+ const webrtc::audioproc::Stream& msg) {
+ if (bitexact_output_) {
+ if (interface_used_ == InterfaceType::kFixedInterface) {
+ bitexact_output_ = VerifyFixedBitExactness(msg, fwd_frame_);
+ } else {
+ bitexact_output_ = VerifyFloatBitExactness(msg, out_config_, *out_buf_);
+ }
+ }
+}
+
+void AecDumpBasedSimulator::PrepareReverseProcessStreamCall(
+ const webrtc::audioproc::ReverseStream& msg) {
+ if (msg.has_data()) {
+ // Fixed interface processing.
+ // Verify interface invariance.
+ RTC_CHECK(interface_used_ == InterfaceType::kFixedInterface ||
+ interface_used_ == InterfaceType::kNotSpecified);
+ interface_used_ = InterfaceType::kFixedInterface;
+
+ // Populate input buffer.
+ RTC_CHECK_EQ(sizeof(rev_frame_.data[0]) * rev_frame_.data.size(),
+ msg.data().size());
+ memcpy(rev_frame_.data.data(), msg.data().data(), msg.data().size());
+ } else {
+ // Float interface processing.
+ // Verify interface invariance.
+ RTC_CHECK(interface_used_ == InterfaceType::kFloatInterface ||
+ interface_used_ == InterfaceType::kNotSpecified);
+ interface_used_ = InterfaceType::kFloatInterface;
+
+ RTC_CHECK_EQ(reverse_in_buf_->num_channels(),
+ static_cast<size_t>(msg.channel_size()));
+
+ // Populate input buffer.
+ for (int i = 0; i < msg.channel_size(); ++i) {
+ RTC_CHECK_EQ(reverse_in_buf_->num_frames() *
+ sizeof(*reverse_in_buf_->channels()[i]),
+ msg.channel(i).size());
+ std::memcpy(reverse_in_buf_->channels()[i], msg.channel(i).data(),
+ msg.channel(i).size());
+ }
+ }
+}
+
+void AecDumpBasedSimulator::Process() {
+ ConfigureAudioProcessor();
+
+ if (settings_.artificial_nearend_filename) {
+ std::unique_ptr<WavReader> artificial_nearend_file(
+ new WavReader(settings_.artificial_nearend_filename->c_str()));
+
+ RTC_CHECK_EQ(1, artificial_nearend_file->num_channels())
+ << "Only mono files for the artificial nearend are supported, "
+ "reverted to not using the artificial nearend file";
+
+ const int sample_rate_hz = artificial_nearend_file->sample_rate();
+ artificial_nearend_buffer_reader_.reset(
+ new ChannelBufferWavReader(std::move(artificial_nearend_file)));
+ artificial_nearend_buf_.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(sample_rate_hz, kChunksPerSecond), 1));
+ }
+
+ const bool use_dump_file = !settings_.aec_dump_input_string.has_value();
+ std::stringstream input;
+ if (use_dump_file) {
+ dump_input_file_ =
+ OpenFile(settings_.aec_dump_input_filename->c_str(), "rb");
+ } else {
+ input << settings_.aec_dump_input_string.value();
+ }
+
+ webrtc::audioproc::Event event_msg;
+ int capture_frames_since_init = 0;
+ int init_index = 0;
+ while (ReadNextMessage(use_dump_file, dump_input_file_, input, event_msg)) {
+ SelectivelyToggleDataDumping(init_index, capture_frames_since_init);
+ HandleEvent(event_msg, capture_frames_since_init, init_index);
+
+ // Perfom an early exit if the init block to process has been fully
+ // processed
+ if (finished_processing_specified_init_block_) {
+ break;
+ }
+ RTC_CHECK(!settings_.init_to_process ||
+ *settings_.init_to_process >= init_index);
+ }
+
+ if (use_dump_file) {
+ fclose(dump_input_file_);
+ }
+
+ DetachAecDump();
+}
+
+void AecDumpBasedSimulator::Analyze() {
+ const bool use_dump_file = !settings_.aec_dump_input_string.has_value();
+ std::stringstream input;
+ if (use_dump_file) {
+ dump_input_file_ =
+ OpenFile(settings_.aec_dump_input_filename->c_str(), "rb");
+ } else {
+ input << settings_.aec_dump_input_string.value();
+ }
+
+ webrtc::audioproc::Event event_msg;
+ int num_capture_frames = 0;
+ int num_render_frames = 0;
+ int init_index = 0;
+ while (ReadNextMessage(use_dump_file, dump_input_file_, input, event_msg)) {
+ if (event_msg.type() == webrtc::audioproc::Event::INIT) {
+ ++init_index;
+ constexpr float kNumFramesPerSecond = 100.f;
+ float capture_time_seconds = num_capture_frames / kNumFramesPerSecond;
+ float render_time_seconds = num_render_frames / kNumFramesPerSecond;
+
+ std::cout << "Inits:" << std::endl;
+ std::cout << init_index << ": -->" << std::endl;
+ std::cout << " Time:" << std::endl;
+ std::cout << " Capture: " << capture_time_seconds << " s ("
+ << num_capture_frames << " frames) " << std::endl;
+ std::cout << " Render: " << render_time_seconds << " s ("
+ << num_render_frames << " frames) " << std::endl;
+ } else if (event_msg.type() == webrtc::audioproc::Event::STREAM) {
+ ++num_capture_frames;
+ } else if (event_msg.type() == webrtc::audioproc::Event::REVERSE_STREAM) {
+ ++num_render_frames;
+ }
+ }
+
+ if (use_dump_file) {
+ fclose(dump_input_file_);
+ }
+}
+
+void AecDumpBasedSimulator::HandleEvent(
+ const webrtc::audioproc::Event& event_msg,
+ int& capture_frames_since_init,
+ int& init_index) {
+ switch (event_msg.type()) {
+ case webrtc::audioproc::Event::INIT:
+ RTC_CHECK(event_msg.has_init());
+ ++init_index;
+ capture_frames_since_init = 0;
+ HandleMessage(event_msg.init(), init_index);
+ break;
+ case webrtc::audioproc::Event::STREAM:
+ RTC_CHECK(event_msg.has_stream());
+ ++capture_frames_since_init;
+ HandleMessage(event_msg.stream());
+ break;
+ case webrtc::audioproc::Event::REVERSE_STREAM:
+ RTC_CHECK(event_msg.has_reverse_stream());
+ HandleMessage(event_msg.reverse_stream());
+ break;
+ case webrtc::audioproc::Event::CONFIG:
+ RTC_CHECK(event_msg.has_config());
+ HandleMessage(event_msg.config());
+ break;
+ case webrtc::audioproc::Event::RUNTIME_SETTING:
+ HandleMessage(event_msg.runtime_setting());
+ break;
+ case webrtc::audioproc::Event::UNKNOWN_EVENT:
+ RTC_CHECK_NOTREACHED();
+ }
+}
+
+void AecDumpBasedSimulator::HandleMessage(
+ const webrtc::audioproc::Config& msg) {
+ if (settings_.use_verbose_logging) {
+ std::cout << "Config at frame:" << std::endl;
+ std::cout << " Forward: " << get_num_process_stream_calls() << std::endl;
+ std::cout << " Reverse: " << get_num_reverse_process_stream_calls()
+ << std::endl;
+ }
+
+ if (!settings_.discard_all_settings_in_aecdump) {
+ if (settings_.use_verbose_logging) {
+ std::cout << "Setting used in config:" << std::endl;
+ }
+ AudioProcessing::Config apm_config = ap_->GetConfig();
+
+ if (msg.has_aec_enabled() || settings_.use_aec) {
+ bool enable = settings_.use_aec ? *settings_.use_aec : msg.aec_enabled();
+ apm_config.echo_canceller.enabled = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " aec_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_aecm_enabled() || settings_.use_aecm) {
+ bool enable =
+ settings_.use_aecm ? *settings_.use_aecm : msg.aecm_enabled();
+ apm_config.echo_canceller.enabled |= enable;
+ apm_config.echo_canceller.mobile_mode = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " aecm_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_aecm_comfort_noise_enabled() &&
+ msg.aecm_comfort_noise_enabled()) {
+ RTC_LOG(LS_ERROR) << "Ignoring deprecated setting: AECM comfort noise";
+ }
+
+ if (msg.has_aecm_routing_mode() &&
+ static_cast<webrtc::EchoControlMobileImpl::RoutingMode>(
+ msg.aecm_routing_mode()) != EchoControlMobileImpl::kSpeakerphone) {
+ RTC_LOG(LS_ERROR) << "Ignoring deprecated setting: AECM routing mode: "
+ << msg.aecm_routing_mode();
+ }
+
+ if (msg.has_agc_enabled() || settings_.use_agc) {
+ bool enable = settings_.use_agc ? *settings_.use_agc : msg.agc_enabled();
+ apm_config.gain_controller1.enabled = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " agc_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_agc_mode() || settings_.agc_mode) {
+ int mode = settings_.agc_mode ? *settings_.agc_mode : msg.agc_mode();
+ apm_config.gain_controller1.mode =
+ static_cast<webrtc::AudioProcessing::Config::GainController1::Mode>(
+ mode);
+ if (settings_.use_verbose_logging) {
+ std::cout << " agc_mode: " << mode << std::endl;
+ }
+ }
+
+ if (msg.has_agc_limiter_enabled() || settings_.use_agc_limiter) {
+ bool enable = settings_.use_agc_limiter ? *settings_.use_agc_limiter
+ : msg.agc_limiter_enabled();
+ apm_config.gain_controller1.enable_limiter = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " agc_limiter_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (settings_.use_agc2) {
+ bool enable = *settings_.use_agc2;
+ apm_config.gain_controller2.enabled = enable;
+ if (settings_.agc2_fixed_gain_db) {
+ apm_config.gain_controller2.fixed_digital.gain_db =
+ *settings_.agc2_fixed_gain_db;
+ }
+ if (settings_.use_verbose_logging) {
+ std::cout << " agc2_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_noise_robust_agc_enabled()) {
+ apm_config.gain_controller1.analog_gain_controller.enabled =
+ settings_.use_analog_agc ? *settings_.use_analog_agc
+ : msg.noise_robust_agc_enabled();
+ if (settings_.use_verbose_logging) {
+ std::cout << " noise_robust_agc_enabled: "
+ << (msg.noise_robust_agc_enabled() ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_transient_suppression_enabled() || settings_.use_ts) {
+ bool enable = settings_.use_ts ? *settings_.use_ts
+ : msg.transient_suppression_enabled();
+ apm_config.transient_suppression.enabled = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " transient_suppression_enabled: "
+ << (enable ? "true" : "false") << std::endl;
+ }
+ }
+
+ if (msg.has_hpf_enabled() || settings_.use_hpf) {
+ bool enable = settings_.use_hpf ? *settings_.use_hpf : msg.hpf_enabled();
+ apm_config.high_pass_filter.enabled = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " hpf_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_ns_enabled() || settings_.use_ns) {
+ bool enable = settings_.use_ns ? *settings_.use_ns : msg.ns_enabled();
+ apm_config.noise_suppression.enabled = enable;
+ if (settings_.use_verbose_logging) {
+ std::cout << " ns_enabled: " << (enable ? "true" : "false")
+ << std::endl;
+ }
+ }
+
+ if (msg.has_ns_level() || settings_.ns_level) {
+ int level = settings_.ns_level ? *settings_.ns_level : msg.ns_level();
+ apm_config.noise_suppression.level =
+ static_cast<AudioProcessing::Config::NoiseSuppression::Level>(level);
+ if (settings_.use_verbose_logging) {
+ std::cout << " ns_level: " << level << std::endl;
+ }
+ }
+
+ if (msg.has_pre_amplifier_enabled() || settings_.use_pre_amplifier) {
+ const bool enable = settings_.use_pre_amplifier
+ ? *settings_.use_pre_amplifier
+ : msg.pre_amplifier_enabled();
+ apm_config.pre_amplifier.enabled = enable;
+ }
+
+ if (msg.has_pre_amplifier_fixed_gain_factor() ||
+ settings_.pre_amplifier_gain_factor) {
+ const float gain = settings_.pre_amplifier_gain_factor
+ ? *settings_.pre_amplifier_gain_factor
+ : msg.pre_amplifier_fixed_gain_factor();
+ apm_config.pre_amplifier.fixed_gain_factor = gain;
+ }
+
+ if (settings_.use_verbose_logging && msg.has_experiments_description() &&
+ !msg.experiments_description().empty()) {
+ std::cout << " experiments not included by default in the simulation: "
+ << msg.experiments_description() << std::endl;
+ }
+
+ ap_->ApplyConfig(apm_config);
+ }
+}
+
+void AecDumpBasedSimulator::HandleMessage(const webrtc::audioproc::Init& msg,
+ int init_index) {
+ RTC_CHECK(msg.has_sample_rate());
+ RTC_CHECK(msg.has_num_input_channels());
+ RTC_CHECK(msg.has_num_reverse_channels());
+ RTC_CHECK(msg.has_reverse_sample_rate());
+
+ // Do not perform the init if the init block to process is fully processed
+ if (settings_.init_to_process && *settings_.init_to_process < init_index) {
+ finished_processing_specified_init_block_ = true;
+ }
+
+ MaybeOpenCallOrderFile();
+
+ if (settings_.use_verbose_logging) {
+ std::cout << "Init at frame:" << std::endl;
+ std::cout << " Forward: " << get_num_process_stream_calls() << std::endl;
+ std::cout << " Reverse: " << get_num_reverse_process_stream_calls()
+ << std::endl;
+ }
+
+ int num_output_channels;
+ if (settings_.output_num_channels) {
+ num_output_channels = *settings_.output_num_channels;
+ } else {
+ num_output_channels = msg.has_num_output_channels()
+ ? msg.num_output_channels()
+ : msg.num_input_channels();
+ }
+
+ int output_sample_rate;
+ if (settings_.output_sample_rate_hz) {
+ output_sample_rate = *settings_.output_sample_rate_hz;
+ } else {
+ output_sample_rate = msg.has_output_sample_rate() ? msg.output_sample_rate()
+ : msg.sample_rate();
+ }
+
+ int num_reverse_output_channels;
+ if (settings_.reverse_output_num_channels) {
+ num_reverse_output_channels = *settings_.reverse_output_num_channels;
+ } else {
+ num_reverse_output_channels = msg.has_num_reverse_output_channels()
+ ? msg.num_reverse_output_channels()
+ : msg.num_reverse_channels();
+ }
+
+ int reverse_output_sample_rate;
+ if (settings_.reverse_output_sample_rate_hz) {
+ reverse_output_sample_rate = *settings_.reverse_output_sample_rate_hz;
+ } else {
+ reverse_output_sample_rate = msg.has_reverse_output_sample_rate()
+ ? msg.reverse_output_sample_rate()
+ : msg.reverse_sample_rate();
+ }
+
+ SetupBuffersConfigsOutputs(
+ msg.sample_rate(), output_sample_rate, msg.reverse_sample_rate(),
+ reverse_output_sample_rate, msg.num_input_channels(), num_output_channels,
+ msg.num_reverse_channels(), num_reverse_output_channels);
+}
+
+void AecDumpBasedSimulator::HandleMessage(
+ const webrtc::audioproc::Stream& msg) {
+ if (call_order_output_file_) {
+ *call_order_output_file_ << "c";
+ }
+ PrepareProcessStreamCall(msg);
+ ProcessStream(interface_used_ == InterfaceType::kFixedInterface);
+ VerifyProcessStreamBitExactness(msg);
+}
+
+void AecDumpBasedSimulator::HandleMessage(
+ const webrtc::audioproc::ReverseStream& msg) {
+ if (call_order_output_file_) {
+ *call_order_output_file_ << "r";
+ }
+ PrepareReverseProcessStreamCall(msg);
+ ProcessReverseStream(interface_used_ == InterfaceType::kFixedInterface);
+}
+
+void AecDumpBasedSimulator::HandleMessage(
+ const webrtc::audioproc::RuntimeSetting& msg) {
+ RTC_CHECK(ap_.get());
+ if (msg.has_capture_pre_gain()) {
+ // Handle capture pre-gain runtime setting only if not overridden.
+ const bool pre_amplifier_overridden =
+ (!settings_.use_pre_amplifier || *settings_.use_pre_amplifier) &&
+ !settings_.pre_amplifier_gain_factor;
+ const bool capture_level_adjustment_overridden =
+ (!settings_.use_capture_level_adjustment ||
+ *settings_.use_capture_level_adjustment) &&
+ !settings_.pre_gain_factor;
+ if (pre_amplifier_overridden || capture_level_adjustment_overridden) {
+ ap_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(
+ msg.capture_pre_gain()));
+ }
+ } else if (msg.has_capture_post_gain()) {
+ // Handle capture post-gain runtime setting only if not overridden.
+ if ((!settings_.use_capture_level_adjustment ||
+ *settings_.use_capture_level_adjustment) &&
+ !settings_.post_gain_factor) {
+ ap_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(
+ msg.capture_pre_gain()));
+ }
+ } else if (msg.has_capture_fixed_post_gain()) {
+ // Handle capture fixed-post-gain runtime setting only if not overridden.
+ if ((!settings_.use_agc2 || *settings_.use_agc2) &&
+ !settings_.agc2_fixed_gain_db) {
+ ap_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureFixedPostGain(
+ msg.capture_fixed_post_gain()));
+ }
+ } else if (msg.has_playout_volume_change()) {
+ ap_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutVolumeChange(
+ msg.playout_volume_change()));
+ } else if (msg.has_playout_audio_device_change()) {
+ ap_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutAudioDeviceChange(
+ {msg.playout_audio_device_change().id(),
+ msg.playout_audio_device_change().max_volume()}));
+ } else if (msg.has_capture_output_used()) {
+ ap_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ msg.capture_output_used()));
+ }
+}
+
+void AecDumpBasedSimulator::MaybeOpenCallOrderFile() {
+ if (settings_.call_order_output_filename.has_value()) {
+ const std::string filename = settings_.store_intermediate_output
+ ? *settings_.call_order_output_filename +
+ "_" +
+ std::to_string(output_reset_counter_)
+ : *settings_.call_order_output_filename;
+ call_order_output_file_ = std::make_unique<std::ofstream>(filename);
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.h b/third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.h
new file mode 100644
index 0000000000..e2c1f3e4ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/aec_dump_based_simulator.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_AEC_DUMP_BASED_SIMULATOR_H_
+#define MODULES_AUDIO_PROCESSING_TEST_AEC_DUMP_BASED_SIMULATOR_H_
+
+#include <fstream>
+#include <string>
+
+#include "modules/audio_processing/test/audio_processing_simulator.h"
+#include "rtc_base/ignore_wundef.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "modules/audio_processing/debug.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+namespace test {
+
+// Used to perform an audio processing simulation from an aec dump.
+class AecDumpBasedSimulator final : public AudioProcessingSimulator {
+ public:
+ AecDumpBasedSimulator(const SimulationSettings& settings,
+ rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder);
+
+ AecDumpBasedSimulator() = delete;
+ AecDumpBasedSimulator(const AecDumpBasedSimulator&) = delete;
+ AecDumpBasedSimulator& operator=(const AecDumpBasedSimulator&) = delete;
+
+ ~AecDumpBasedSimulator() override;
+
+ // Processes the messages in the aecdump file.
+ void Process() override;
+
+ // Analyzes the data in the aecdump file and reports the resulting statistics.
+ void Analyze() override;
+
+ private:
+ void HandleEvent(const webrtc::audioproc::Event& event_msg,
+ int& num_forward_chunks_processed,
+ int& init_index);
+ void HandleMessage(const webrtc::audioproc::Init& msg, int init_index);
+ void HandleMessage(const webrtc::audioproc::Stream& msg);
+ void HandleMessage(const webrtc::audioproc::ReverseStream& msg);
+ void HandleMessage(const webrtc::audioproc::Config& msg);
+ void HandleMessage(const webrtc::audioproc::RuntimeSetting& msg);
+ void PrepareProcessStreamCall(const webrtc::audioproc::Stream& msg);
+ void PrepareReverseProcessStreamCall(
+ const webrtc::audioproc::ReverseStream& msg);
+ void VerifyProcessStreamBitExactness(const webrtc::audioproc::Stream& msg);
+ void MaybeOpenCallOrderFile();
+ enum InterfaceType {
+ kFixedInterface,
+ kFloatInterface,
+ kNotSpecified,
+ };
+
+ FILE* dump_input_file_;
+ std::unique_ptr<ChannelBuffer<float>> artificial_nearend_buf_;
+ std::unique_ptr<ChannelBufferWavReader> artificial_nearend_buffer_reader_;
+ bool artificial_nearend_eof_reported_ = false;
+ InterfaceType interface_used_ = InterfaceType::kNotSpecified;
+ std::unique_ptr<std::ofstream> call_order_output_file_;
+ bool finished_processing_specified_init_block_ = false;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_AEC_DUMP_BASED_SIMULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/AndroidManifest.xml b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/AndroidManifest.xml
new file mode 100644
index 0000000000..c6063b3d76
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/AndroidManifest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- BEGIN_INCLUDE(manifest) -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.example.native_activity"
+ android:versionCode="1"
+ android:versionName="1.0">
+
+ <!-- This is the platform API where NativeActivity was introduced. -->
+ <uses-sdk android:minSdkVersion="8" />
+
+ <!-- This .apk has no Java code itself, so set hasCode to false. -->
+ <application android:label="@string/app_name" android:hasCode="false" android:debuggable="true">
+
+ <!-- Our activity is the built-in NativeActivity framework class.
+ This will take care of integrating with our NDK code. -->
+ <activity android:name="android.app.NativeActivity"
+ android:label="@string/app_name"
+ android:configChanges="orientation|keyboardHidden">
+ <!-- Tell NativeActivity the name of or .so -->
+ <meta-data android:name="android.app.lib_name"
+ android:value="apmtest-activity" />
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+
+</manifest>
+<!-- END_INCLUDE(manifest) -->
diff --git a/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/default.properties b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/default.properties
new file mode 100644
index 0000000000..9a2c9f6c88
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/default.properties
@@ -0,0 +1,11 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system use,
+# "build.properties", and override values to adapt the script to your
+# project structure.
+
+# Project target.
+target=android-9
diff --git a/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/jni/main.c b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/jni/main.c
new file mode 100644
index 0000000000..2e19635683
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/jni/main.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//BEGIN_INCLUDE(all)
+#include <jni.h>
+#include <errno.h>
+
+#include <EGL/egl.h>
+#include <GLES/gl.h>
+
+#include <android/sensor.h>
+#include <android/log.h>
+#include <android_native_app_glue.h>
+
+#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "native-activity", __VA_ARGS__))
+#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "native-activity", __VA_ARGS__))
+
+/**
+ * Our saved state data.
+ */
+struct saved_state {
+ float angle;
+ int32_t x;
+ int32_t y;
+};
+
+/**
+ * Shared state for our app.
+ */
+struct engine {
+ struct android_app* app;
+
+ ASensorManager* sensorManager;
+ const ASensor* accelerometerSensor;
+ ASensorEventQueue* sensorEventQueue;
+
+ int animating;
+ EGLDisplay display;
+ EGLSurface surface;
+ EGLContext context;
+ int32_t width;
+ int32_t height;
+ struct saved_state state;
+};
+
+/**
+ * Initialize an EGL context for the current display.
+ */
+static int engine_init_display(struct engine* engine) {
+ // initialize OpenGL ES and EGL
+
+ /*
+ * Here specify the attributes of the desired configuration.
+ * Below, we select an EGLConfig with at least 8 bits per color
+ * component compatible with on-screen windows
+ */
+ const EGLint attribs[] = {
+ EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_BLUE_SIZE, 8,
+ EGL_GREEN_SIZE, 8,
+ EGL_RED_SIZE, 8,
+ EGL_NONE
+ };
+ EGLint w, h, dummy, format;
+ EGLint numConfigs;
+ EGLConfig config;
+ EGLSurface surface;
+ EGLContext context;
+
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+
+ eglInitialize(display, 0, 0);
+
+ /* Here, the application chooses the configuration it desires. In this
+ * sample, we have a very simplified selection process, where we pick
+ * the first EGLConfig that matches our criteria */
+ eglChooseConfig(display, attribs, &config, 1, &numConfigs);
+
+ /* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
+ * guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
+ * As soon as we picked a EGLConfig, we can safely reconfigure the
+ * ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
+ eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
+
+ ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
+
+ surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
+ context = eglCreateContext(display, config, NULL, NULL);
+
+ if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
+ LOGW("Unable to eglMakeCurrent");
+ return -1;
+ }
+
+ eglQuerySurface(display, surface, EGL_WIDTH, &w);
+ eglQuerySurface(display, surface, EGL_HEIGHT, &h);
+
+ engine->display = display;
+ engine->context = context;
+ engine->surface = surface;
+ engine->width = w;
+ engine->height = h;
+ engine->state.angle = 0;
+
+ // Initialize GL state.
+ glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
+ glEnable(GL_CULL_FACE);
+ glShadeModel(GL_SMOOTH);
+ glDisable(GL_DEPTH_TEST);
+
+ return 0;
+}
+
+/**
+ * Just the current frame in the display.
+ */
+static void engine_draw_frame(struct engine* engine) {
+ if (engine->display == NULL) {
+ // No display.
+ return;
+ }
+
+ // Just fill the screen with a color.
+ glClearColor(((float)engine->state.x)/engine->width, engine->state.angle,
+ ((float)engine->state.y)/engine->height, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ eglSwapBuffers(engine->display, engine->surface);
+}
+
+/**
+ * Tear down the EGL context currently associated with the display.
+ */
+static void engine_term_display(struct engine* engine) {
+ if (engine->display != EGL_NO_DISPLAY) {
+ eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ if (engine->context != EGL_NO_CONTEXT) {
+ eglDestroyContext(engine->display, engine->context);
+ }
+ if (engine->surface != EGL_NO_SURFACE) {
+ eglDestroySurface(engine->display, engine->surface);
+ }
+ eglTerminate(engine->display);
+ }
+ engine->animating = 0;
+ engine->display = EGL_NO_DISPLAY;
+ engine->context = EGL_NO_CONTEXT;
+ engine->surface = EGL_NO_SURFACE;
+}
+
+/**
+ * Process the next input event.
+ */
+static int32_t engine_handle_input(struct android_app* app, AInputEvent* event) {
+ struct engine* engine = (struct engine*)app->userData;
+ if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
+ engine->animating = 1;
+ engine->state.x = AMotionEvent_getX(event, 0);
+ engine->state.y = AMotionEvent_getY(event, 0);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Process the next main command.
+ */
+static void engine_handle_cmd(struct android_app* app, int32_t cmd) {
+ struct engine* engine = (struct engine*)app->userData;
+ switch (cmd) {
+ case APP_CMD_SAVE_STATE:
+ // The system has asked us to save our current state. Do so.
+ engine->app->savedState = malloc(sizeof(struct saved_state));
+ *((struct saved_state*)engine->app->savedState) = engine->state;
+ engine->app->savedStateSize = sizeof(struct saved_state);
+ break;
+ case APP_CMD_INIT_WINDOW:
+ // The window is being shown, get it ready.
+ if (engine->app->window != NULL) {
+ engine_init_display(engine);
+ engine_draw_frame(engine);
+ }
+ break;
+ case APP_CMD_TERM_WINDOW:
+ // The window is being hidden or closed, clean it up.
+ engine_term_display(engine);
+ break;
+ case APP_CMD_GAINED_FOCUS:
+ // When our app gains focus, we start monitoring the accelerometer.
+ if (engine->accelerometerSensor != NULL) {
+ ASensorEventQueue_enableSensor(engine->sensorEventQueue,
+ engine->accelerometerSensor);
+ // We'd like to get 60 events per second (in us).
+ ASensorEventQueue_setEventRate(engine->sensorEventQueue,
+ engine->accelerometerSensor, (1000L/60)*1000);
+ }
+ break;
+ case APP_CMD_LOST_FOCUS:
+ // When our app loses focus, we stop monitoring the accelerometer.
+ // This is to avoid consuming battery while not being used.
+ if (engine->accelerometerSensor != NULL) {
+ ASensorEventQueue_disableSensor(engine->sensorEventQueue,
+ engine->accelerometerSensor);
+ }
+ // Also stop animating.
+ engine->animating = 0;
+ engine_draw_frame(engine);
+ break;
+ }
+}
+
+/**
+ * This is the main entry point of a native application that is using
+ * android_native_app_glue. It runs in its own thread, with its own
+ * event loop for receiving input events and doing other things.
+ */
+void android_main(struct android_app* state) {
+ struct engine engine;
+
+ // Make sure glue isn't stripped.
+ app_dummy();
+
+ memset(&engine, 0, sizeof(engine));
+ state->userData = &engine;
+ state->onAppCmd = engine_handle_cmd;
+ state->onInputEvent = engine_handle_input;
+ engine.app = state;
+
+ // Prepare to monitor accelerometer
+ engine.sensorManager = ASensorManager_getInstance();
+ engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,
+ ASENSOR_TYPE_ACCELEROMETER);
+ engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,
+ state->looper, LOOPER_ID_USER, NULL, NULL);
+
+ if (state->savedState != NULL) {
+ // We are starting with a previous saved state; restore from it.
+ engine.state = *(struct saved_state*)state->savedState;
+ }
+
+ // loop waiting for stuff to do.
+
+ while (1) {
+ // Read all pending events.
+ int ident;
+ int events;
+ struct android_poll_source* source;
+
+ // If not animating, we will block forever waiting for events.
+ // If animating, we loop until all events are read, then continue
+ // to draw the next frame of animation.
+ while ((ident=ALooper_pollAll(engine.animating ? 0 : -1, NULL, &events,
+ (void**)&source)) >= 0) {
+
+ // Process this event.
+ if (source != NULL) {
+ source->process(state, source);
+ }
+
+ // If a sensor has data, process it now.
+ if (ident == LOOPER_ID_USER) {
+ if (engine.accelerometerSensor != NULL) {
+ ASensorEvent event;
+ while (ASensorEventQueue_getEvents(engine.sensorEventQueue,
+ &event, 1) > 0) {
+ LOGI("accelerometer: x=%f y=%f z=%f",
+ event.acceleration.x, event.acceleration.y,
+ event.acceleration.z);
+ }
+ }
+ }
+
+ // Check if we are exiting.
+ if (state->destroyRequested != 0) {
+ engine_term_display(&engine);
+ return;
+ }
+ }
+
+ if (engine.animating) {
+ // Done with events; draw next animation frame.
+ engine.state.angle += .01f;
+ if (engine.state.angle > 1) {
+ engine.state.angle = 0;
+ }
+
+ // Drawing is throttled to the screen update rate, so there
+ // is no need to do timing here.
+ engine_draw_frame(&engine);
+ }
+ }
+}
+//END_INCLUDE(all)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/res/values/strings.xml b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/res/values/strings.xml
new file mode 100644
index 0000000000..d0bd0f3051
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/android/apmtest/res/values/strings.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+ <string name="app_name">apmtest</string>
+</resources>
diff --git a/third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.cc b/third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.cc
new file mode 100644
index 0000000000..ee8a308596
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/api_call_statistics.h"
+
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+namespace test {
+
+void ApiCallStatistics::Add(int64_t duration_nanos, CallType call_type) {
+ calls_.push_back(CallData(duration_nanos, call_type));
+}
+
+void ApiCallStatistics::PrintReport() const {
+ int64_t min_render = std::numeric_limits<int64_t>::max();
+ int64_t min_capture = std::numeric_limits<int64_t>::max();
+ int64_t max_render = 0;
+ int64_t max_capture = 0;
+ int64_t sum_render = 0;
+ int64_t sum_capture = 0;
+ int64_t num_render = 0;
+ int64_t num_capture = 0;
+ int64_t avg_render = 0;
+ int64_t avg_capture = 0;
+
+ for (auto v : calls_) {
+ if (v.call_type == CallType::kRender) {
+ ++num_render;
+ min_render = std::min(min_render, v.duration_nanos);
+ max_render = std::max(max_render, v.duration_nanos);
+ sum_render += v.duration_nanos;
+ } else {
+ ++num_capture;
+ min_capture = std::min(min_capture, v.duration_nanos);
+ max_capture = std::max(max_capture, v.duration_nanos);
+ sum_capture += v.duration_nanos;
+ }
+ }
+ min_render /= rtc::kNumNanosecsPerMicrosec;
+ max_render /= rtc::kNumNanosecsPerMicrosec;
+ sum_render /= rtc::kNumNanosecsPerMicrosec;
+ min_capture /= rtc::kNumNanosecsPerMicrosec;
+ max_capture /= rtc::kNumNanosecsPerMicrosec;
+ sum_capture /= rtc::kNumNanosecsPerMicrosec;
+ avg_render = num_render > 0 ? sum_render / num_render : 0;
+ avg_capture = num_capture > 0 ? sum_capture / num_capture : 0;
+
+ std::cout << std::endl
+ << "Total time: " << (sum_capture + sum_render) * 1e-6 << " s"
+ << std::endl
+ << " Render API calls:" << std::endl
+ << " min: " << min_render << " us" << std::endl
+ << " max: " << max_render << " us" << std::endl
+ << " avg: " << avg_render << " us" << std::endl
+ << " Capture API calls:" << std::endl
+ << " min: " << min_capture << " us" << std::endl
+ << " max: " << max_capture << " us" << std::endl
+ << " avg: " << avg_capture << " us" << std::endl;
+}
+
+void ApiCallStatistics::WriteReportToFile(absl::string_view filename) const {
+ std::unique_ptr<std::ofstream> out =
+ std::make_unique<std::ofstream>(std::string(filename));
+ for (auto v : calls_) {
+ if (v.call_type == CallType::kRender) {
+ *out << "render, ";
+ } else {
+ *out << "capture, ";
+ }
+ *out << (v.duration_nanos / rtc::kNumNanosecsPerMicrosec) << std::endl;
+ }
+}
+
+ApiCallStatistics::CallData::CallData(int64_t duration_nanos,
+ CallType call_type)
+ : duration_nanos(duration_nanos), call_type(call_type) {}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.h b/third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.h
new file mode 100644
index 0000000000..8fced104f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/api_call_statistics.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_API_CALL_STATISTICS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_API_CALL_STATISTICS_H_
+
+#include <vector>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+namespace test {
+
+// Collects statistics about the API call durations.
+class ApiCallStatistics {
+ public:
+ enum class CallType { kRender, kCapture };
+
+ // Adds a new datapoint.
+ void Add(int64_t duration_nanos, CallType call_type);
+
+ // Prints out a report of the statistics.
+ void PrintReport() const;
+
+ // Writes the call information to a file.
+ void WriteReportToFile(absl::string_view filename) const;
+
+ private:
+ struct CallData {
+ CallData(int64_t duration_nanos, CallType call_type);
+ int64_t duration_nanos;
+ CallType call_type;
+ };
+ std::vector<CallData> calls_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_API_CALL_STATISTICS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/apmtest.m b/third_party/libwebrtc/modules/audio_processing/test/apmtest.m
new file mode 100644
index 0000000000..1c8183c3ec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/apmtest.m
@@ -0,0 +1,365 @@
+%
+% Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+%
+% Use of this source code is governed by a BSD-style license
+% that can be found in the LICENSE file in the root of the source
+% tree. An additional intellectual property rights grant can be found
+% in the file PATENTS. All contributing project authors may
+% be found in the AUTHORS file in the root of the source tree.
+%
+
+function apmtest(task, testname, filepath, casenumber, legacy)
+%APMTEST is a tool to process APM file sets and easily display the output.
+% APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:
+% 'test' Processes the files to produce test output.
+% 'list' Prints a list of cases in the test set, preceded by their
+% CASENUMBERs.
+% 'show' Uses spclab to show the test case specified by the
+% CASENUMBER parameter.
+%
+% using a set of test files determined by TESTNAME:
+% 'all' All tests.
+% 'apm' The standard APM test set (default).
+% 'apmm' The mobile APM test set.
+% 'aec' The AEC test set.
+% 'aecm' The AECM test set.
+% 'agc' The AGC test set.
+% 'ns' The NS test set.
+% 'vad' The VAD test set.
+%
+% FILEPATH specifies the path to the test data files.
+%
+% CASENUMBER can be used to select a single test case. Omit CASENUMBER,
+% or set to zero, to use all test cases.
+%
+
+if nargin < 5 || isempty(legacy)
+ % Set to true to run old VQE recordings.
+ legacy = false;
+end
+
+if nargin < 4 || isempty(casenumber)
+ casenumber = 0;
+end
+
+if nargin < 3 || isempty(filepath)
+ filepath = 'data/';
+end
+
+if nargin < 2 || isempty(testname)
+ testname = 'all';
+end
+
+if nargin < 1 || isempty(task)
+ task = 'test';
+end
+
+if ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')
+ error(['TASK ' task ' is not recognized']);
+end
+
+if casenumber == 0 && strcmp(task, 'show')
+ error(['CASENUMBER must be specified for TASK ' task]);
+end
+
+inpath = [filepath 'input/'];
+outpath = [filepath 'output/'];
+refpath = [filepath 'reference/'];
+
+if strcmp(testname, 'all')
+ tests = {'apm','apmm','aec','aecm','agc','ns','vad'};
+else
+ tests = {testname};
+end
+
+if legacy
+ progname = './test';
+else
+ progname = './process_test';
+end
+
+global farFile;
+global nearFile;
+global eventFile;
+global delayFile;
+global driftFile;
+
+if legacy
+ farFile = 'vqeFar.pcm';
+ nearFile = 'vqeNear.pcm';
+ eventFile = 'vqeEvent.dat';
+ delayFile = 'vqeBuf.dat';
+ driftFile = 'vqeDrift.dat';
+else
+ farFile = 'apm_far.pcm';
+ nearFile = 'apm_near.pcm';
+ eventFile = 'apm_event.dat';
+ delayFile = 'apm_delay.dat';
+ driftFile = 'apm_drift.dat';
+end
+
+simulateMode = false;
+nErr = 0;
+nCases = 0;
+for i=1:length(tests)
+ simulateMode = false;
+
+ if strcmp(tests{i}, 'apm')
+ testdir = ['apm/'];
+ outfile = ['out'];
+ if legacy
+ opt = ['-ec 1 -agc 2 -nc 2 -vad 3'];
+ else
+ opt = ['--no_progress -hpf' ...
+ ' -aec --drift_compensation -agc --fixed_digital' ...
+ ' -ns --ns_moderate -vad'];
+ end
+
+ elseif strcmp(tests{i}, 'apm-swb')
+ simulateMode = true;
+ testdir = ['apm-swb/'];
+ outfile = ['out'];
+ if legacy
+ opt = ['-fs 32000 -ec 1 -agc 2 -nc 2'];
+ else
+ opt = ['--no_progress -fs 32000 -hpf' ...
+ ' -aec --drift_compensation -agc --adaptive_digital' ...
+ ' -ns --ns_moderate -vad'];
+ end
+ elseif strcmp(tests{i}, 'apmm')
+ testdir = ['apmm/'];
+ outfile = ['out'];
+ opt = ['-aec --drift_compensation -agc --fixed_digital -hpf -ns ' ...
+ '--ns_moderate'];
+
+ else
+ error(['TESTNAME ' tests{i} ' is not recognized']);
+ end
+
+ inpathtest = [inpath testdir];
+ outpathtest = [outpath testdir];
+ refpathtest = [refpath testdir];
+
+ if ~exist(inpathtest,'dir')
+ error(['Input directory ' inpathtest ' does not exist']);
+ end
+
+ if ~exist(refpathtest,'dir')
+ warning(['Reference directory ' refpathtest ' does not exist']);
+ end
+
+ [status, errMsg] = mkdir(outpathtest);
+ if (status == 0)
+ error(errMsg);
+ end
+
+ [nErr, nCases] = recurseDir(inpathtest, outpathtest, refpathtest, outfile, ...
+ progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);
+
+ if strcmp(task, 'test') || strcmp(task, 'show')
+ system(['rm ' farFile]);
+ system(['rm ' nearFile]);
+ if simulateMode == false
+ system(['rm ' eventFile]);
+ system(['rm ' delayFile]);
+ system(['rm ' driftFile]);
+ end
+ end
+end
+
+if ~strcmp(task, 'list')
+ if nErr == 0
+ fprintf(1, '\nAll files are bit-exact to reference\n', nErr);
+ else
+ fprintf(1, '\n%d files are NOT bit-exact to reference\n', nErr);
+ end
+end
+
+
+function [nErrOut, nCases] = recurseDir(inpath, outpath, refpath, ...
+ outfile, progname, opt, simulateMode, nErr, nCases, task, casenumber, ...
+ legacy)
+
+global farFile;
+global nearFile;
+global eventFile;
+global delayFile;
+global driftFile;
+
+dirs = dir(inpath);
+nDirs = 0;
+nErrOut = nErr;
+for i=3:length(dirs) % skip . and ..
+ nDirs = nDirs + dirs(i).isdir;
+end
+
+
+if nDirs == 0
+ nCases = nCases + 1;
+
+ if casenumber == nCases || casenumber == 0
+
+ if strcmp(task, 'list')
+ fprintf([num2str(nCases) '. ' outfile '\n'])
+ else
+ vadoutfile = ['vad_' outfile '.dat'];
+ outfile = [outfile '.pcm'];
+
+ % Check for VAD test
+ vadTest = 0;
+ if ~isempty(findstr(opt, '-vad'))
+ vadTest = 1;
+ if legacy
+ opt = [opt ' ' outpath vadoutfile];
+ else
+ opt = [opt ' --vad_out_file ' outpath vadoutfile];
+ end
+ end
+
+ if exist([inpath 'vqeFar.pcm'])
+ system(['ln -s -f ' inpath 'vqeFar.pcm ' farFile]);
+ elseif exist([inpath 'apm_far.pcm'])
+ system(['ln -s -f ' inpath 'apm_far.pcm ' farFile]);
+ end
+
+ if exist([inpath 'vqeNear.pcm'])
+ system(['ln -s -f ' inpath 'vqeNear.pcm ' nearFile]);
+ elseif exist([inpath 'apm_near.pcm'])
+ system(['ln -s -f ' inpath 'apm_near.pcm ' nearFile]);
+ end
+
+ if exist([inpath 'vqeEvent.dat'])
+ system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);
+ elseif exist([inpath 'apm_event.dat'])
+ system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);
+ end
+
+ if exist([inpath 'vqeBuf.dat'])
+ system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);
+ elseif exist([inpath 'apm_delay.dat'])
+ system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);
+ end
+
+ if exist([inpath 'vqeSkew.dat'])
+ system(['ln -s -f ' inpath 'vqeSkew.dat ' driftFile]);
+ elseif exist([inpath 'vqeDrift.dat'])
+ system(['ln -s -f ' inpath 'vqeDrift.dat ' driftFile]);
+ elseif exist([inpath 'apm_drift.dat'])
+ system(['ln -s -f ' inpath 'apm_drift.dat ' driftFile]);
+ end
+
+ if simulateMode == false
+ command = [progname ' -o ' outpath outfile ' ' opt];
+ else
+ if legacy
+ inputCmd = [' -in ' nearFile];
+ else
+ inputCmd = [' -i ' nearFile];
+ end
+
+ if exist([farFile])
+ if legacy
+ inputCmd = [' -if ' farFile inputCmd];
+ else
+ inputCmd = [' -ir ' farFile inputCmd];
+ end
+ end
+ command = [progname inputCmd ' -o ' outpath outfile ' ' opt];
+ end
+ % This prevents MATLAB from using its own C libraries.
+ shellcmd = ['bash -c "unset LD_LIBRARY_PATH;'];
+ fprintf([command '\n']);
+ [status, result] = system([shellcmd command '"']);
+ fprintf(result);
+
+ fprintf(['Reference file: ' refpath outfile '\n']);
+
+ if vadTest == 1
+ equal_to_ref = are_files_equal([outpath vadoutfile], ...
+ [refpath vadoutfile], ...
+ 'int8');
+ if ~equal_to_ref
+ nErr = nErr + 1;
+ end
+ end
+
+ [equal_to_ref, diffvector] = are_files_equal([outpath outfile], ...
+ [refpath outfile], ...
+ 'int16');
+ if ~equal_to_ref
+ nErr = nErr + 1;
+ end
+
+ if strcmp(task, 'show')
+ % Assume the last init gives the sample rate of interest.
+ str_idx = strfind(result, 'Sample rate:');
+ fs = str2num(result(str_idx(end) + 13:str_idx(end) + 17));
+ fprintf('Using %d Hz\n', fs);
+
+ if exist([farFile])
+ spclab(fs, farFile, nearFile, [refpath outfile], ...
+ [outpath outfile], diffvector);
+ %spclab(fs, diffvector);
+ else
+ spclab(fs, nearFile, [refpath outfile], [outpath outfile], ...
+ diffvector);
+ %spclab(fs, diffvector);
+ end
+ end
+ end
+ end
+else
+
+ for i=3:length(dirs)
+ if dirs(i).isdir
+ [nErr, nCases] = recurseDir([inpath dirs(i).name '/'], outpath, ...
+ refpath,[outfile '_' dirs(i).name], progname, opt, ...
+ simulateMode, nErr, nCases, task, casenumber, legacy);
+ end
+ end
+end
+nErrOut = nErr;
+
+function [are_equal, diffvector] = ...
+ are_files_equal(newfile, reffile, precision, diffvector)
+
+are_equal = false;
+diffvector = 0;
+if ~exist(newfile,'file')
+ warning(['Output file ' newfile ' does not exist']);
+ return
+end
+
+if ~exist(reffile,'file')
+ warning(['Reference file ' reffile ' does not exist']);
+ return
+end
+
+fid = fopen(newfile,'rb');
+new = fread(fid,inf,precision);
+fclose(fid);
+
+fid = fopen(reffile,'rb');
+ref = fread(fid,inf,precision);
+fclose(fid);
+
+if length(new) ~= length(ref)
+ warning('Reference is not the same length as output');
+ minlength = min(length(new), length(ref));
+ new = new(1:minlength);
+ ref = ref(1:minlength);
+end
+diffvector = new - ref;
+
+if isequal(new, ref)
+ fprintf([newfile ' is bit-exact to reference\n']);
+ are_equal = true;
+else
+ if isempty(new)
+ warning([newfile ' is empty']);
+ return
+ end
+ snr = snrseg(new,ref,80);
+ fprintf('\n');
+ are_equal = false;
+end
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.cc b/third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.cc
new file mode 100644
index 0000000000..64fb9c7ab1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+
+#include <string.h>
+
+namespace webrtc {
+namespace test {
+
+void SetupFrame(const StreamConfig& stream_config,
+ std::vector<float*>* frame,
+ std::vector<float>* frame_samples) {
+ frame_samples->resize(stream_config.num_channels() *
+ stream_config.num_frames());
+ frame->resize(stream_config.num_channels());
+ for (size_t ch = 0; ch < stream_config.num_channels(); ++ch) {
+ (*frame)[ch] = &(*frame_samples)[ch * stream_config.num_frames()];
+ }
+}
+
+void CopyVectorToAudioBuffer(const StreamConfig& stream_config,
+ rtc::ArrayView<const float> source,
+ AudioBuffer* destination) {
+ std::vector<float*> input;
+ std::vector<float> input_samples;
+
+ SetupFrame(stream_config, &input, &input_samples);
+
+ RTC_CHECK_EQ(input_samples.size(), source.size());
+ memcpy(input_samples.data(), source.data(),
+ source.size() * sizeof(source[0]));
+
+ destination->CopyFrom(&input[0], stream_config);
+}
+
+void ExtractVectorFromAudioBuffer(const StreamConfig& stream_config,
+ AudioBuffer* source,
+ std::vector<float>* destination) {
+ std::vector<float*> output;
+
+ SetupFrame(stream_config, &output, destination);
+
+ source->CopyTo(stream_config, &output[0]);
+}
+
+void FillBuffer(float value, AudioBuffer& audio_buffer) {
+ for (size_t ch = 0; ch < audio_buffer.num_channels(); ++ch) {
+ FillBufferChannel(value, ch, audio_buffer);
+ }
+}
+
+void FillBufferChannel(float value, int channel, AudioBuffer& audio_buffer) {
+ RTC_CHECK_LT(channel, audio_buffer.num_channels());
+ for (size_t i = 0; i < audio_buffer.num_frames(); ++i) {
+ audio_buffer.channels()[channel][i] = value;
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.h b/third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.h
new file mode 100644
index 0000000000..faac4bf9ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audio_buffer_tools.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIO_BUFFER_TOOLS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_AUDIO_BUFFER_TOOLS_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+namespace test {
+
+// Copies a vector into an audiobuffer.
+void CopyVectorToAudioBuffer(const StreamConfig& stream_config,
+ rtc::ArrayView<const float> source,
+ AudioBuffer* destination);
+
+// Extracts a vector from an audiobuffer.
+void ExtractVectorFromAudioBuffer(const StreamConfig& stream_config,
+ AudioBuffer* source,
+ std::vector<float>* destination);
+
+// Sets all values in `audio_buffer` to `value`.
+void FillBuffer(float value, AudioBuffer& audio_buffer);
+
+// Sets all values channel `channel` for `audio_buffer` to `value`.
+void FillBufferChannel(float value, int channel, AudioBuffer& audio_buffer);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIO_BUFFER_TOOLS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.cc b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.cc
new file mode 100644
index 0000000000..6bd266dc58
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/audio_processing/audio_processing_impl.h"
+
+namespace webrtc {
+
+AudioProcessingBuilderForTesting::AudioProcessingBuilderForTesting() = default;
+AudioProcessingBuilderForTesting::~AudioProcessingBuilderForTesting() = default;
+
+#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
+
+rtc::scoped_refptr<AudioProcessing> AudioProcessingBuilderForTesting::Create() {
+ return rtc::make_ref_counted<AudioProcessingImpl>(
+ config_, std::move(capture_post_processing_),
+ std::move(render_pre_processing_), std::move(echo_control_factory_),
+ std::move(echo_detector_), std::move(capture_analyzer_));
+}
+
+#else
+
+rtc::scoped_refptr<AudioProcessing> AudioProcessingBuilderForTesting::Create() {
+ AudioProcessingBuilder builder;
+ TransferOwnershipsToBuilder(&builder);
+ return builder.SetConfig(config_).Create();
+}
+
+#endif
+
+void AudioProcessingBuilderForTesting::TransferOwnershipsToBuilder(
+ AudioProcessingBuilder* builder) {
+ builder->SetCapturePostProcessing(std::move(capture_post_processing_));
+ builder->SetRenderPreProcessing(std::move(render_pre_processing_));
+ builder->SetEchoControlFactory(std::move(echo_control_factory_));
+ builder->SetEchoDetector(std::move(echo_detector_));
+ builder->SetCaptureAnalyzer(std::move(capture_analyzer_));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.h b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.h
new file mode 100644
index 0000000000..e73706c1b6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_builder_for_testing.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
+#define MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
+
+#include <list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+// Facilitates building of AudioProcessingImp for the tests.
+class AudioProcessingBuilderForTesting {
+ public:
+ AudioProcessingBuilderForTesting();
+ AudioProcessingBuilderForTesting(const AudioProcessingBuilderForTesting&) =
+ delete;
+ AudioProcessingBuilderForTesting& operator=(
+ const AudioProcessingBuilderForTesting&) = delete;
+ ~AudioProcessingBuilderForTesting();
+
+ // Sets the APM configuration.
+ AudioProcessingBuilderForTesting& SetConfig(
+ const AudioProcessing::Config& config) {
+ config_ = config;
+ return *this;
+ }
+
+ // Sets the echo controller factory to inject when APM is created.
+ AudioProcessingBuilderForTesting& SetEchoControlFactory(
+ std::unique_ptr<EchoControlFactory> echo_control_factory) {
+ echo_control_factory_ = std::move(echo_control_factory);
+ return *this;
+ }
+
+ // Sets the capture post-processing sub-module to inject when APM is created.
+ AudioProcessingBuilderForTesting& SetCapturePostProcessing(
+ std::unique_ptr<CustomProcessing> capture_post_processing) {
+ capture_post_processing_ = std::move(capture_post_processing);
+ return *this;
+ }
+
+ // Sets the render pre-processing sub-module to inject when APM is created.
+ AudioProcessingBuilderForTesting& SetRenderPreProcessing(
+ std::unique_ptr<CustomProcessing> render_pre_processing) {
+ render_pre_processing_ = std::move(render_pre_processing);
+ return *this;
+ }
+
+ // Sets the echo detector to inject when APM is created.
+ AudioProcessingBuilderForTesting& SetEchoDetector(
+ rtc::scoped_refptr<EchoDetector> echo_detector) {
+ echo_detector_ = std::move(echo_detector);
+ return *this;
+ }
+
+ // Sets the capture analyzer sub-module to inject when APM is created.
+ AudioProcessingBuilderForTesting& SetCaptureAnalyzer(
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
+ capture_analyzer_ = std::move(capture_analyzer);
+ return *this;
+ }
+
+ // Creates an APM instance with the specified config or the default one if
+ // unspecified. Injects the specified components transferring the ownership
+ // to the newly created APM instance - i.e., except for the config, the
+ // builder is reset to its initial state.
+ rtc::scoped_refptr<AudioProcessing> Create();
+
+ private:
+ // Transfers the ownership to a non-testing builder.
+ void TransferOwnershipsToBuilder(AudioProcessingBuilder* builder);
+
+ AudioProcessing::Config config_;
+ std::unique_ptr<EchoControlFactory> echo_control_factory_;
+ std::unique_ptr<CustomProcessing> capture_post_processing_;
+ std::unique_ptr<CustomProcessing> render_pre_processing_;
+ rtc::scoped_refptr<EchoDetector> echo_detector_;
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.cc b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.cc
new file mode 100644
index 0000000000..b29027c35e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.cc
@@ -0,0 +1,609 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/audio_processing_simulator.h"
+
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/audio/echo_canceller3_config_json.h"
+#include "api/audio/echo_canceller3_factory.h"
+#include "api/audio/echo_detector_creator.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/fake_recording_device.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/json.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+// Helper for reading JSON from a file and parsing it to an AEC3 configuration.
+EchoCanceller3Config ReadAec3ConfigFromJsonFile(absl::string_view filename) {
+ std::string json_string;
+ std::string s;
+ std::ifstream f(std::string(filename).c_str());
+ if (f.fail()) {
+ std::cout << "Failed to open the file " << filename << std::endl;
+ RTC_CHECK_NOTREACHED();
+ }
+ while (std::getline(f, s)) {
+ json_string += s;
+ }
+
+ bool parsing_successful;
+ EchoCanceller3Config cfg;
+ Aec3ConfigFromJsonString(json_string, &cfg, &parsing_successful);
+ if (!parsing_successful) {
+ std::cout << "Parsing of json string failed: " << std::endl
+ << json_string << std::endl;
+ RTC_CHECK_NOTREACHED();
+ }
+ RTC_CHECK(EchoCanceller3Config::Validate(&cfg));
+
+ return cfg;
+}
+
+std::string GetIndexedOutputWavFilename(absl::string_view wav_name,
+ int counter) {
+ rtc::StringBuilder ss;
+ ss << wav_name.substr(0, wav_name.size() - 4) << "_" << counter
+ << wav_name.substr(wav_name.size() - 4);
+ return ss.Release();
+}
+
+void WriteEchoLikelihoodGraphFileHeader(std::ofstream* output_file) {
+ (*output_file) << "import numpy as np" << std::endl
+ << "import matplotlib.pyplot as plt" << std::endl
+ << "y = np.array([";
+}
+
+void WriteEchoLikelihoodGraphFileFooter(std::ofstream* output_file) {
+ (*output_file) << "])" << std::endl
+ << "if __name__ == '__main__':" << std::endl
+ << " x = np.arange(len(y))*.01" << std::endl
+ << " plt.plot(x, y)" << std::endl
+ << " plt.ylabel('Echo likelihood')" << std::endl
+ << " plt.xlabel('Time (s)')" << std::endl
+ << " plt.show()" << std::endl;
+}
+
+// RAII class for execution time measurement. Updates the provided
+// ApiCallStatistics based on the time between ScopedTimer creation and
+// leaving the enclosing scope.
+class ScopedTimer {
+ public:
+ ScopedTimer(ApiCallStatistics* api_call_statistics,
+ ApiCallStatistics::CallType call_type)
+ : start_time_(rtc::TimeNanos()),
+ call_type_(call_type),
+ api_call_statistics_(api_call_statistics) {}
+
+ ~ScopedTimer() {
+ api_call_statistics_->Add(rtc::TimeNanos() - start_time_, call_type_);
+ }
+
+ private:
+ const int64_t start_time_;
+ const ApiCallStatistics::CallType call_type_;
+ ApiCallStatistics* const api_call_statistics_;
+};
+
+} // namespace
+
+SimulationSettings::SimulationSettings() = default;
+SimulationSettings::SimulationSettings(const SimulationSettings&) = default;
+SimulationSettings::~SimulationSettings() = default;
+
+AudioProcessingSimulator::AudioProcessingSimulator(
+ const SimulationSettings& settings,
+ rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder)
+ : settings_(settings),
+ ap_(std::move(audio_processing)),
+ analog_mic_level_(settings.initial_mic_level),
+ fake_recording_device_(
+ settings.initial_mic_level,
+ settings_.simulate_mic_gain ? *settings.simulated_mic_kind : 0),
+ worker_queue_("file_writer_task_queue") {
+ RTC_CHECK(!settings_.dump_internal_data || WEBRTC_APM_DEBUG_DUMP == 1);
+ if (settings_.dump_start_frame || settings_.dump_end_frame) {
+ ApmDataDumper::SetActivated(!settings_.dump_start_frame);
+ } else {
+ ApmDataDumper::SetActivated(settings_.dump_internal_data);
+ }
+
+ if (settings_.dump_set_to_use) {
+ ApmDataDumper::SetDumpSetToUse(*settings_.dump_set_to_use);
+ }
+
+ if (settings_.dump_internal_data_output_dir.has_value()) {
+ ApmDataDumper::SetOutputDirectory(
+ settings_.dump_internal_data_output_dir.value());
+ }
+
+ if (settings_.ed_graph_output_filename &&
+ !settings_.ed_graph_output_filename->empty()) {
+ residual_echo_likelihood_graph_writer_.open(
+ *settings_.ed_graph_output_filename);
+ RTC_CHECK(residual_echo_likelihood_graph_writer_.is_open());
+ WriteEchoLikelihoodGraphFileHeader(&residual_echo_likelihood_graph_writer_);
+ }
+
+ if (settings_.simulate_mic_gain)
+ RTC_LOG(LS_VERBOSE) << "Simulating analog mic gain";
+
+ // Create the audio processing object.
+ RTC_CHECK(!(ap_ && ap_builder))
+ << "The AudioProcessing and the AudioProcessingBuilder cannot both be "
+ "specified at the same time.";
+
+ if (ap_) {
+ RTC_CHECK(!settings_.aec_settings_filename);
+ RTC_CHECK(!settings_.print_aec_parameter_values);
+ } else {
+ // Use specied builder if such is provided, otherwise create a new builder.
+ std::unique_ptr<AudioProcessingBuilder> builder =
+ !!ap_builder ? std::move(ap_builder)
+ : std::make_unique<AudioProcessingBuilder>();
+
+ // Create and set an EchoCanceller3Factory if needed.
+ const bool use_aec = settings_.use_aec && *settings_.use_aec;
+ if (use_aec) {
+ EchoCanceller3Config cfg;
+ if (settings_.aec_settings_filename) {
+ if (settings_.use_verbose_logging) {
+ std::cout << "Reading AEC Parameters from JSON input." << std::endl;
+ }
+ cfg = ReadAec3ConfigFromJsonFile(*settings_.aec_settings_filename);
+ }
+
+ if (settings_.linear_aec_output_filename) {
+ cfg.filter.export_linear_aec_output = true;
+ }
+
+ if (settings_.print_aec_parameter_values) {
+ if (!settings_.use_quiet_output) {
+ std::cout << "AEC settings:" << std::endl;
+ }
+ std::cout << Aec3ConfigToJsonString(cfg) << std::endl;
+ }
+
+ auto echo_control_factory = std::make_unique<EchoCanceller3Factory>(cfg);
+ builder->SetEchoControlFactory(std::move(echo_control_factory));
+ }
+
+ if (settings_.use_ed && *settings.use_ed) {
+ builder->SetEchoDetector(CreateEchoDetector());
+ }
+
+ // Create an audio processing object.
+ ap_ = builder->Create();
+ RTC_CHECK(ap_);
+ }
+}
+
+AudioProcessingSimulator::~AudioProcessingSimulator() {
+ if (residual_echo_likelihood_graph_writer_.is_open()) {
+ WriteEchoLikelihoodGraphFileFooter(&residual_echo_likelihood_graph_writer_);
+ residual_echo_likelihood_graph_writer_.close();
+ }
+}
+
+void AudioProcessingSimulator::ProcessStream(bool fixed_interface) {
+ // Optionally use the fake recording device to simulate analog gain.
+ if (settings_.simulate_mic_gain) {
+ if (settings_.aec_dump_input_filename) {
+ // When the analog gain is simulated and an AEC dump is used as input, set
+ // the undo level to `aec_dump_mic_level_` to virtually restore the
+ // unmodified microphone signal level.
+ fake_recording_device_.SetUndoMicLevel(aec_dump_mic_level_);
+ }
+
+ if (fixed_interface) {
+ fake_recording_device_.SimulateAnalogGain(fwd_frame_.data);
+ } else {
+ fake_recording_device_.SimulateAnalogGain(in_buf_.get());
+ }
+
+ // Notify the current mic level to AGC.
+ ap_->set_stream_analog_level(fake_recording_device_.MicLevel());
+ } else {
+ // Notify the current mic level to AGC.
+ ap_->set_stream_analog_level(settings_.aec_dump_input_filename
+ ? aec_dump_mic_level_
+ : analog_mic_level_);
+ }
+
+ // Post any scheduled runtime settings.
+ if (settings_.frame_for_sending_capture_output_used_false &&
+ *settings_.frame_for_sending_capture_output_used_false ==
+ static_cast<int>(num_process_stream_calls_)) {
+ ap_->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(false));
+ }
+ if (settings_.frame_for_sending_capture_output_used_true &&
+ *settings_.frame_for_sending_capture_output_used_true ==
+ static_cast<int>(num_process_stream_calls_)) {
+ ap_->PostRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(true));
+ }
+
+ // Process the current audio frame.
+ if (fixed_interface) {
+ {
+ const auto st = ScopedTimer(&api_call_statistics_,
+ ApiCallStatistics::CallType::kCapture);
+ RTC_CHECK_EQ(
+ AudioProcessing::kNoError,
+ ap_->ProcessStream(fwd_frame_.data.data(), fwd_frame_.config,
+ fwd_frame_.config, fwd_frame_.data.data()));
+ }
+ fwd_frame_.CopyTo(out_buf_.get());
+ } else {
+ const auto st = ScopedTimer(&api_call_statistics_,
+ ApiCallStatistics::CallType::kCapture);
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ ap_->ProcessStream(in_buf_->channels(), in_config_,
+ out_config_, out_buf_->channels()));
+ }
+
+ // Store the mic level suggested by AGC.
+ // Note that when the analog gain is simulated and an AEC dump is used as
+ // input, `analog_mic_level_` will not be used with set_stream_analog_level().
+ analog_mic_level_ = ap_->recommended_stream_analog_level();
+ if (settings_.simulate_mic_gain) {
+ fake_recording_device_.SetMicLevel(analog_mic_level_);
+ }
+ if (buffer_memory_writer_) {
+ RTC_CHECK(!buffer_file_writer_);
+ buffer_memory_writer_->Write(*out_buf_);
+ } else if (buffer_file_writer_) {
+ RTC_CHECK(!buffer_memory_writer_);
+ buffer_file_writer_->Write(*out_buf_);
+ }
+
+ if (linear_aec_output_file_writer_) {
+ bool output_available = ap_->GetLinearAecOutput(linear_aec_output_buf_);
+ RTC_CHECK(output_available);
+ RTC_CHECK_GT(linear_aec_output_buf_.size(), 0);
+ RTC_CHECK_EQ(linear_aec_output_buf_[0].size(), 160);
+ for (size_t k = 0; k < linear_aec_output_buf_[0].size(); ++k) {
+ for (size_t ch = 0; ch < linear_aec_output_buf_.size(); ++ch) {
+ RTC_CHECK_EQ(linear_aec_output_buf_[ch].size(), 160);
+ float sample = FloatToFloatS16(linear_aec_output_buf_[ch][k]);
+ linear_aec_output_file_writer_->WriteSamples(&sample, 1);
+ }
+ }
+ }
+
+ if (residual_echo_likelihood_graph_writer_.is_open()) {
+ auto stats = ap_->GetStatistics();
+ residual_echo_likelihood_graph_writer_
+ << stats.residual_echo_likelihood.value_or(-1.f) << ", ";
+ }
+
+ ++num_process_stream_calls_;
+}
+
+void AudioProcessingSimulator::ProcessReverseStream(bool fixed_interface) {
+ if (fixed_interface) {
+ {
+ const auto st = ScopedTimer(&api_call_statistics_,
+ ApiCallStatistics::CallType::kRender);
+ RTC_CHECK_EQ(
+ AudioProcessing::kNoError,
+ ap_->ProcessReverseStream(rev_frame_.data.data(), rev_frame_.config,
+ rev_frame_.config, rev_frame_.data.data()));
+ }
+ rev_frame_.CopyTo(reverse_out_buf_.get());
+ } else {
+ const auto st = ScopedTimer(&api_call_statistics_,
+ ApiCallStatistics::CallType::kRender);
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ ap_->ProcessReverseStream(
+ reverse_in_buf_->channels(), reverse_in_config_,
+ reverse_out_config_, reverse_out_buf_->channels()));
+ }
+
+ if (reverse_buffer_file_writer_) {
+ reverse_buffer_file_writer_->Write(*reverse_out_buf_);
+ }
+
+ ++num_reverse_process_stream_calls_;
+}
+
+void AudioProcessingSimulator::SetupBuffersConfigsOutputs(
+ int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_input_sample_rate_hz,
+ int reverse_output_sample_rate_hz,
+ int input_num_channels,
+ int output_num_channels,
+ int reverse_input_num_channels,
+ int reverse_output_num_channels) {
+ in_config_ = StreamConfig(input_sample_rate_hz, input_num_channels);
+ in_buf_.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(input_sample_rate_hz, kChunksPerSecond),
+ input_num_channels));
+
+ reverse_in_config_ =
+ StreamConfig(reverse_input_sample_rate_hz, reverse_input_num_channels);
+ reverse_in_buf_.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(reverse_input_sample_rate_hz, kChunksPerSecond),
+ reverse_input_num_channels));
+
+ out_config_ = StreamConfig(output_sample_rate_hz, output_num_channels);
+ out_buf_.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(output_sample_rate_hz, kChunksPerSecond),
+ output_num_channels));
+
+ reverse_out_config_ =
+ StreamConfig(reverse_output_sample_rate_hz, reverse_output_num_channels);
+ reverse_out_buf_.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(reverse_output_sample_rate_hz, kChunksPerSecond),
+ reverse_output_num_channels));
+
+ fwd_frame_.SetFormat(input_sample_rate_hz, input_num_channels);
+ rev_frame_.SetFormat(reverse_input_sample_rate_hz,
+ reverse_input_num_channels);
+
+ if (settings_.use_verbose_logging) {
+ rtc::LogMessage::LogToDebug(rtc::LS_VERBOSE);
+
+ std::cout << "Sample rates:" << std::endl;
+ std::cout << " Forward input: " << input_sample_rate_hz << std::endl;
+ std::cout << " Forward output: " << output_sample_rate_hz << std::endl;
+ std::cout << " Reverse input: " << reverse_input_sample_rate_hz
+ << std::endl;
+ std::cout << " Reverse output: " << reverse_output_sample_rate_hz
+ << std::endl;
+ std::cout << "Number of channels: " << std::endl;
+ std::cout << " Forward input: " << input_num_channels << std::endl;
+ std::cout << " Forward output: " << output_num_channels << std::endl;
+ std::cout << " Reverse input: " << reverse_input_num_channels << std::endl;
+ std::cout << " Reverse output: " << reverse_output_num_channels
+ << std::endl;
+ }
+
+ SetupOutput();
+}
+
+void AudioProcessingSimulator::SelectivelyToggleDataDumping(
+ int init_index,
+ int capture_frames_since_init) const {
+ if (!(settings_.dump_start_frame || settings_.dump_end_frame)) {
+ return;
+ }
+
+ if (settings_.init_to_process && *settings_.init_to_process != init_index) {
+ return;
+ }
+
+ if (settings_.dump_start_frame &&
+ *settings_.dump_start_frame == capture_frames_since_init) {
+ ApmDataDumper::SetActivated(true);
+ }
+
+ if (settings_.dump_end_frame &&
+ *settings_.dump_end_frame == capture_frames_since_init) {
+ ApmDataDumper::SetActivated(false);
+ }
+}
+
+void AudioProcessingSimulator::SetupOutput() {
+ if (settings_.output_filename) {
+ std::string filename;
+ if (settings_.store_intermediate_output) {
+ filename = GetIndexedOutputWavFilename(*settings_.output_filename,
+ output_reset_counter_);
+ } else {
+ filename = *settings_.output_filename;
+ }
+
+ std::unique_ptr<WavWriter> out_file(
+ new WavWriter(filename, out_config_.sample_rate_hz(),
+ static_cast<size_t>(out_config_.num_channels()),
+ settings_.wav_output_format));
+ buffer_file_writer_.reset(new ChannelBufferWavWriter(std::move(out_file)));
+ } else if (settings_.aec_dump_input_string.has_value()) {
+ buffer_memory_writer_ = std::make_unique<ChannelBufferVectorWriter>(
+ settings_.processed_capture_samples);
+ }
+
+ if (settings_.linear_aec_output_filename) {
+ std::string filename;
+ if (settings_.store_intermediate_output) {
+ filename = GetIndexedOutputWavFilename(
+ *settings_.linear_aec_output_filename, output_reset_counter_);
+ } else {
+ filename = *settings_.linear_aec_output_filename;
+ }
+
+ linear_aec_output_file_writer_.reset(
+ new WavWriter(filename, 16000, out_config_.num_channels(),
+ settings_.wav_output_format));
+
+ linear_aec_output_buf_.resize(out_config_.num_channels());
+ }
+
+ if (settings_.reverse_output_filename) {
+ std::string filename;
+ if (settings_.store_intermediate_output) {
+ filename = GetIndexedOutputWavFilename(*settings_.reverse_output_filename,
+ output_reset_counter_);
+ } else {
+ filename = *settings_.reverse_output_filename;
+ }
+
+ std::unique_ptr<WavWriter> reverse_out_file(
+ new WavWriter(filename, reverse_out_config_.sample_rate_hz(),
+ static_cast<size_t>(reverse_out_config_.num_channels()),
+ settings_.wav_output_format));
+ reverse_buffer_file_writer_.reset(
+ new ChannelBufferWavWriter(std::move(reverse_out_file)));
+ }
+
+ ++output_reset_counter_;
+}
+
+void AudioProcessingSimulator::DetachAecDump() {
+ if (settings_.aec_dump_output_filename) {
+ ap_->DetachAecDump();
+ }
+}
+
+void AudioProcessingSimulator::ConfigureAudioProcessor() {
+ AudioProcessing::Config apm_config;
+ if (settings_.use_ts) {
+ apm_config.transient_suppression.enabled = *settings_.use_ts != 0;
+ }
+ if (settings_.multi_channel_render) {
+ apm_config.pipeline.multi_channel_render = *settings_.multi_channel_render;
+ }
+
+ if (settings_.multi_channel_capture) {
+ apm_config.pipeline.multi_channel_capture =
+ *settings_.multi_channel_capture;
+ }
+
+ if (settings_.use_agc2) {
+ apm_config.gain_controller2.enabled = *settings_.use_agc2;
+ if (settings_.agc2_fixed_gain_db) {
+ apm_config.gain_controller2.fixed_digital.gain_db =
+ *settings_.agc2_fixed_gain_db;
+ }
+ if (settings_.agc2_use_adaptive_gain) {
+ apm_config.gain_controller2.adaptive_digital.enabled =
+ *settings_.agc2_use_adaptive_gain;
+ }
+ }
+ if (settings_.use_pre_amplifier) {
+ apm_config.pre_amplifier.enabled = *settings_.use_pre_amplifier;
+ if (settings_.pre_amplifier_gain_factor) {
+ apm_config.pre_amplifier.fixed_gain_factor =
+ *settings_.pre_amplifier_gain_factor;
+ }
+ }
+
+ if (settings_.use_analog_mic_gain_emulation) {
+ if (*settings_.use_analog_mic_gain_emulation) {
+ apm_config.capture_level_adjustment.enabled = true;
+ apm_config.capture_level_adjustment.analog_mic_gain_emulation.enabled =
+ true;
+ } else {
+ apm_config.capture_level_adjustment.analog_mic_gain_emulation.enabled =
+ false;
+ }
+ }
+ if (settings_.analog_mic_gain_emulation_initial_level) {
+ apm_config.capture_level_adjustment.analog_mic_gain_emulation
+ .initial_level = *settings_.analog_mic_gain_emulation_initial_level;
+ }
+
+ if (settings_.use_capture_level_adjustment) {
+ apm_config.capture_level_adjustment.enabled =
+ *settings_.use_capture_level_adjustment;
+ }
+ if (settings_.pre_gain_factor) {
+ apm_config.capture_level_adjustment.pre_gain_factor =
+ *settings_.pre_gain_factor;
+ }
+ if (settings_.post_gain_factor) {
+ apm_config.capture_level_adjustment.post_gain_factor =
+ *settings_.post_gain_factor;
+ }
+
+ const bool use_aec = settings_.use_aec && *settings_.use_aec;
+ const bool use_aecm = settings_.use_aecm && *settings_.use_aecm;
+ if (use_aec || use_aecm) {
+ apm_config.echo_canceller.enabled = true;
+ apm_config.echo_canceller.mobile_mode = use_aecm;
+ }
+ apm_config.echo_canceller.export_linear_aec_output =
+ !!settings_.linear_aec_output_filename;
+
+ if (settings_.use_hpf) {
+ apm_config.high_pass_filter.enabled = *settings_.use_hpf;
+ }
+
+ if (settings_.use_agc) {
+ apm_config.gain_controller1.enabled = *settings_.use_agc;
+ }
+ if (settings_.agc_mode) {
+ apm_config.gain_controller1.mode =
+ static_cast<webrtc::AudioProcessing::Config::GainController1::Mode>(
+ *settings_.agc_mode);
+ }
+ if (settings_.use_agc_limiter) {
+ apm_config.gain_controller1.enable_limiter = *settings_.use_agc_limiter;
+ }
+ if (settings_.agc_target_level) {
+ apm_config.gain_controller1.target_level_dbfs = *settings_.agc_target_level;
+ }
+ if (settings_.agc_compression_gain) {
+ apm_config.gain_controller1.compression_gain_db =
+ *settings_.agc_compression_gain;
+ }
+ if (settings_.use_analog_agc) {
+ apm_config.gain_controller1.analog_gain_controller.enabled =
+ *settings_.use_analog_agc;
+ }
+ if (settings_.analog_agc_use_digital_adaptive_controller) {
+ apm_config.gain_controller1.analog_gain_controller.enable_digital_adaptive =
+ *settings_.analog_agc_use_digital_adaptive_controller;
+ }
+
+ if (settings_.maximum_internal_processing_rate) {
+ apm_config.pipeline.maximum_internal_processing_rate =
+ *settings_.maximum_internal_processing_rate;
+ }
+
+ if (settings_.use_ns) {
+ apm_config.noise_suppression.enabled = *settings_.use_ns;
+ }
+ if (settings_.ns_level) {
+ const int level = *settings_.ns_level;
+ RTC_CHECK_GE(level, 0);
+ RTC_CHECK_LE(level, 3);
+ apm_config.noise_suppression.level =
+ static_cast<AudioProcessing::Config::NoiseSuppression::Level>(level);
+ }
+ if (settings_.ns_analysis_on_linear_aec_output) {
+ apm_config.noise_suppression.analyze_linear_aec_output_when_available =
+ *settings_.ns_analysis_on_linear_aec_output;
+ }
+
+ ap_->ApplyConfig(apm_config);
+
+ if (settings_.use_ts) {
+ // Default to key pressed if activating the transient suppressor with
+ // continuous key events.
+ ap_->set_stream_key_pressed(*settings_.use_ts == 2);
+ }
+
+ if (settings_.aec_dump_output_filename) {
+ ap_->AttachAecDump(AecDumpFactory::Create(
+ *settings_.aec_dump_output_filename, -1, &worker_queue_));
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.h b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.h
new file mode 100644
index 0000000000..b63bc12d6f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audio_processing_simulator.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_SIMULATOR_H_
+#define MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_SIMULATOR_H_
+
+#include <algorithm>
+#include <fstream>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "common_audio/channel_buffer.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/api_call_statistics.h"
+#include "modules/audio_processing/test/fake_recording_device.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+namespace test {
+
+static const int kChunksPerSecond = 1000 / AudioProcessing::kChunkSizeMs;
+
+struct Int16Frame {
+ void SetFormat(int sample_rate_hz, int num_channels) {
+ this->sample_rate_hz = sample_rate_hz;
+ samples_per_channel =
+ rtc::CheckedDivExact(sample_rate_hz, kChunksPerSecond);
+ this->num_channels = num_channels;
+ config = StreamConfig(sample_rate_hz, num_channels);
+ data.resize(num_channels * samples_per_channel);
+ }
+
+ void CopyTo(ChannelBuffer<float>* dest) {
+ RTC_DCHECK(dest);
+ RTC_CHECK_EQ(num_channels, dest->num_channels());
+ RTC_CHECK_EQ(samples_per_channel, dest->num_frames());
+ // Copy the data from the input buffer.
+ std::vector<float> tmp(samples_per_channel * num_channels);
+ S16ToFloat(data.data(), tmp.size(), tmp.data());
+ Deinterleave(tmp.data(), samples_per_channel, num_channels,
+ dest->channels());
+ }
+
+ void CopyFrom(const ChannelBuffer<float>& src) {
+ RTC_CHECK_EQ(src.num_channels(), num_channels);
+ RTC_CHECK_EQ(src.num_frames(), samples_per_channel);
+ data.resize(num_channels * samples_per_channel);
+ int16_t* dest_data = data.data();
+ for (int ch = 0; ch < num_channels; ++ch) {
+ for (int sample = 0; sample < samples_per_channel; ++sample) {
+ dest_data[sample * num_channels + ch] =
+ src.channels()[ch][sample] * 32767;
+ }
+ }
+ }
+
+ int sample_rate_hz;
+ int samples_per_channel;
+ int num_channels;
+
+ StreamConfig config;
+
+ std::vector<int16_t> data;
+};
+
+// Holds all the parameters available for controlling the simulation.
+struct SimulationSettings {
+ SimulationSettings();
+ SimulationSettings(const SimulationSettings&);
+ ~SimulationSettings();
+ absl::optional<int> stream_delay;
+ absl::optional<bool> use_stream_delay;
+ absl::optional<int> output_sample_rate_hz;
+ absl::optional<int> output_num_channels;
+ absl::optional<int> reverse_output_sample_rate_hz;
+ absl::optional<int> reverse_output_num_channels;
+ absl::optional<std::string> output_filename;
+ absl::optional<std::string> reverse_output_filename;
+ absl::optional<std::string> input_filename;
+ absl::optional<std::string> reverse_input_filename;
+ absl::optional<std::string> artificial_nearend_filename;
+ absl::optional<std::string> linear_aec_output_filename;
+ absl::optional<bool> use_aec;
+ absl::optional<bool> use_aecm;
+ absl::optional<bool> use_ed; // Residual Echo Detector.
+ absl::optional<std::string> ed_graph_output_filename;
+ absl::optional<bool> use_agc;
+ absl::optional<bool> use_agc2;
+ absl::optional<bool> use_pre_amplifier;
+ absl::optional<bool> use_capture_level_adjustment;
+ absl::optional<bool> use_analog_mic_gain_emulation;
+ absl::optional<bool> use_hpf;
+ absl::optional<bool> use_ns;
+ absl::optional<int> use_ts;
+ absl::optional<bool> use_analog_agc;
+ absl::optional<bool> use_all;
+ absl::optional<bool> analog_agc_use_digital_adaptive_controller;
+ absl::optional<int> agc_mode;
+ absl::optional<int> agc_target_level;
+ absl::optional<bool> use_agc_limiter;
+ absl::optional<int> agc_compression_gain;
+ absl::optional<bool> agc2_use_adaptive_gain;
+ absl::optional<float> agc2_fixed_gain_db;
+ absl::optional<float> pre_amplifier_gain_factor;
+ absl::optional<float> pre_gain_factor;
+ absl::optional<float> post_gain_factor;
+ absl::optional<float> analog_mic_gain_emulation_initial_level;
+ absl::optional<int> ns_level;
+ absl::optional<bool> ns_analysis_on_linear_aec_output;
+ absl::optional<bool> override_key_pressed;
+ absl::optional<int> maximum_internal_processing_rate;
+ int initial_mic_level;
+ bool simulate_mic_gain = false;
+ absl::optional<bool> multi_channel_render;
+ absl::optional<bool> multi_channel_capture;
+ absl::optional<int> simulated_mic_kind;
+ absl::optional<int> frame_for_sending_capture_output_used_false;
+ absl::optional<int> frame_for_sending_capture_output_used_true;
+ bool report_performance = false;
+ absl::optional<std::string> performance_report_output_filename;
+ bool report_bitexactness = false;
+ bool use_verbose_logging = false;
+ bool use_quiet_output = false;
+ bool discard_all_settings_in_aecdump = true;
+ absl::optional<std::string> aec_dump_input_filename;
+ absl::optional<std::string> aec_dump_output_filename;
+ bool fixed_interface = false;
+ bool store_intermediate_output = false;
+ bool print_aec_parameter_values = false;
+ bool dump_internal_data = false;
+ WavFile::SampleFormat wav_output_format = WavFile::SampleFormat::kInt16;
+ absl::optional<std::string> dump_internal_data_output_dir;
+ absl::optional<int> dump_set_to_use;
+ absl::optional<std::string> call_order_input_filename;
+ absl::optional<std::string> call_order_output_filename;
+ absl::optional<std::string> aec_settings_filename;
+ absl::optional<absl::string_view> aec_dump_input_string;
+ std::vector<float>* processed_capture_samples = nullptr;
+ bool analysis_only = false;
+ absl::optional<int> dump_start_frame;
+ absl::optional<int> dump_end_frame;
+ absl::optional<int> init_to_process;
+};
+
+// Provides common functionality for performing audioprocessing simulations.
+class AudioProcessingSimulator {
+ public:
+ AudioProcessingSimulator(const SimulationSettings& settings,
+ rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder);
+
+ AudioProcessingSimulator() = delete;
+ AudioProcessingSimulator(const AudioProcessingSimulator&) = delete;
+ AudioProcessingSimulator& operator=(const AudioProcessingSimulator&) = delete;
+
+ virtual ~AudioProcessingSimulator();
+
+ // Processes the data in the input.
+ virtual void Process() = 0;
+
+ // Returns the execution times of all AudioProcessing calls.
+ const ApiCallStatistics& GetApiCallStatistics() const {
+ return api_call_statistics_;
+ }
+
+ // Analyzes the data in the input and reports the resulting statistics.
+ virtual void Analyze() = 0;
+
+ // Reports whether the processed recording was bitexact.
+ bool OutputWasBitexact() { return bitexact_output_; }
+
+ size_t get_num_process_stream_calls() { return num_process_stream_calls_; }
+ size_t get_num_reverse_process_stream_calls() {
+ return num_reverse_process_stream_calls_;
+ }
+
+ protected:
+ void ProcessStream(bool fixed_interface);
+ void ProcessReverseStream(bool fixed_interface);
+ void ConfigureAudioProcessor();
+ void DetachAecDump();
+ void SetupBuffersConfigsOutputs(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_input_sample_rate_hz,
+ int reverse_output_sample_rate_hz,
+ int input_num_channels,
+ int output_num_channels,
+ int reverse_input_num_channels,
+ int reverse_output_num_channels);
+ void SelectivelyToggleDataDumping(int init_index,
+ int capture_frames_since_init) const;
+
+ const SimulationSettings settings_;
+ rtc::scoped_refptr<AudioProcessing> ap_;
+
+ std::unique_ptr<ChannelBuffer<float>> in_buf_;
+ std::unique_ptr<ChannelBuffer<float>> out_buf_;
+ std::unique_ptr<ChannelBuffer<float>> reverse_in_buf_;
+ std::unique_ptr<ChannelBuffer<float>> reverse_out_buf_;
+ std::vector<std::array<float, 160>> linear_aec_output_buf_;
+ StreamConfig in_config_;
+ StreamConfig out_config_;
+ StreamConfig reverse_in_config_;
+ StreamConfig reverse_out_config_;
+ std::unique_ptr<ChannelBufferWavReader> buffer_reader_;
+ std::unique_ptr<ChannelBufferWavReader> reverse_buffer_reader_;
+ Int16Frame rev_frame_;
+ Int16Frame fwd_frame_;
+ bool bitexact_output_ = true;
+ int aec_dump_mic_level_ = 0;
+
+ protected:
+ size_t output_reset_counter_ = 0;
+
+ private:
+ void SetupOutput();
+
+ size_t num_process_stream_calls_ = 0;
+ size_t num_reverse_process_stream_calls_ = 0;
+ std::unique_ptr<ChannelBufferWavWriter> buffer_file_writer_;
+ std::unique_ptr<ChannelBufferWavWriter> reverse_buffer_file_writer_;
+ std::unique_ptr<ChannelBufferVectorWriter> buffer_memory_writer_;
+ std::unique_ptr<WavWriter> linear_aec_output_file_writer_;
+ ApiCallStatistics api_call_statistics_;
+ std::ofstream residual_echo_likelihood_graph_writer_;
+ int analog_mic_level_;
+ FakeRecordingDevice fake_recording_device_;
+
+ TaskQueueForTest worker_queue_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_SIMULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.cc b/third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.cc
new file mode 100644
index 0000000000..dd9fc70734
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.cc
@@ -0,0 +1,815 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/audioproc_float_impl.h"
+
+#include <string.h>
+
+#include <iostream>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/aec_dump_based_simulator.h"
+#include "modules/audio_processing/test/audio_processing_simulator.h"
+#include "modules/audio_processing/test/wav_based_simulator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/field_trial.h"
+
+constexpr int kParameterNotSpecifiedValue = -10000;
+
+ABSL_FLAG(std::string, dump_input, "", "Aec dump input filename");
+ABSL_FLAG(std::string, dump_output, "", "Aec dump output filename");
+ABSL_FLAG(std::string, i, "", "Forward stream input wav filename");
+ABSL_FLAG(std::string, o, "", "Forward stream output wav filename");
+ABSL_FLAG(std::string, ri, "", "Reverse stream input wav filename");
+ABSL_FLAG(std::string, ro, "", "Reverse stream output wav filename");
+ABSL_FLAG(std::string,
+ artificial_nearend,
+ "",
+ "Artificial nearend wav filename");
+ABSL_FLAG(std::string, linear_aec_output, "", "Linear AEC output wav filename");
+ABSL_FLAG(int,
+ output_num_channels,
+ kParameterNotSpecifiedValue,
+ "Number of forward stream output channels");
+ABSL_FLAG(int,
+ reverse_output_num_channels,
+ kParameterNotSpecifiedValue,
+ "Number of Reverse stream output channels");
+ABSL_FLAG(int,
+ output_sample_rate_hz,
+ kParameterNotSpecifiedValue,
+ "Forward stream output sample rate in Hz");
+ABSL_FLAG(int,
+ reverse_output_sample_rate_hz,
+ kParameterNotSpecifiedValue,
+ "Reverse stream output sample rate in Hz");
+ABSL_FLAG(bool,
+ fixed_interface,
+ false,
+ "Use the fixed interface when operating on wav files");
+ABSL_FLAG(int,
+ aec,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the echo canceller");
+ABSL_FLAG(int,
+ aecm,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the mobile echo controller");
+ABSL_FLAG(int,
+ ed,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the residual echo detector");
+ABSL_FLAG(std::string,
+ ed_graph,
+ "",
+ "Output filename for graph of echo likelihood");
+ABSL_FLAG(int,
+ agc,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the AGC");
+ABSL_FLAG(int,
+ agc2,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the AGC2");
+ABSL_FLAG(int,
+ pre_amplifier,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate(0) the pre amplifier");
+ABSL_FLAG(
+ int,
+ capture_level_adjustment,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate(0) the capture level adjustment functionality");
+ABSL_FLAG(int,
+ analog_mic_gain_emulation,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate(0) the analog mic gain emulation in the "
+ "production (non-test) code.");
+ABSL_FLAG(int,
+ hpf,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the high-pass filter");
+ABSL_FLAG(int,
+ ns,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the noise suppressor");
+ABSL_FLAG(int,
+ ts,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the transient suppressor");
+ABSL_FLAG(int,
+ analog_agc,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the analog AGC");
+ABSL_FLAG(bool,
+ all_default,
+ false,
+ "Activate all of the default components (will be overridden by any "
+ "other settings)");
+ABSL_FLAG(int,
+ analog_agc_use_digital_adaptive_controller,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) digital adaptation in AGC1. "
+ "Digital adaptation is active by default.");
+ABSL_FLAG(int,
+ agc_mode,
+ kParameterNotSpecifiedValue,
+ "Specify the AGC mode (0-2)");
+ABSL_FLAG(int,
+ agc_target_level,
+ kParameterNotSpecifiedValue,
+ "Specify the AGC target level (0-31)");
+ABSL_FLAG(int,
+ agc_limiter,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the level estimator");
+ABSL_FLAG(int,
+ agc_compression_gain,
+ kParameterNotSpecifiedValue,
+ "Specify the AGC compression gain (0-90)");
+ABSL_FLAG(int,
+ agc2_enable_adaptive_gain,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) the AGC2 adaptive gain");
+ABSL_FLAG(float,
+ agc2_fixed_gain_db,
+ kParameterNotSpecifiedValue,
+ "AGC2 fixed gain (dB) to apply");
+ABSL_FLAG(float,
+ pre_amplifier_gain_factor,
+ kParameterNotSpecifiedValue,
+ "Pre-amplifier gain factor (linear) to apply");
+ABSL_FLAG(float,
+ pre_gain_factor,
+ kParameterNotSpecifiedValue,
+ "Pre-gain factor (linear) to apply in the capture level adjustment");
+ABSL_FLAG(float,
+ post_gain_factor,
+ kParameterNotSpecifiedValue,
+ "Post-gain factor (linear) to apply in the capture level adjustment");
+ABSL_FLAG(float,
+ analog_mic_gain_emulation_initial_level,
+ kParameterNotSpecifiedValue,
+ "Emulated analog mic level to apply initially in the production "
+ "(non-test) code.");
+ABSL_FLAG(int,
+ ns_level,
+ kParameterNotSpecifiedValue,
+ "Specify the NS level (0-3)");
+ABSL_FLAG(int,
+ ns_analysis_on_linear_aec_output,
+ kParameterNotSpecifiedValue,
+ "Specifies whether the noise suppression analysis is done on the "
+ "linear AEC output");
+ABSL_FLAG(int,
+ maximum_internal_processing_rate,
+ kParameterNotSpecifiedValue,
+ "Set a maximum internal processing rate (32000 or 48000) to override "
+ "the default rate");
+ABSL_FLAG(int,
+ stream_delay,
+ kParameterNotSpecifiedValue,
+ "Specify the stream delay in ms to use");
+ABSL_FLAG(int,
+ use_stream_delay,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) reporting the stream delay");
+ABSL_FLAG(int,
+ stream_drift_samples,
+ kParameterNotSpecifiedValue,
+ "Specify the number of stream drift samples to use");
+ABSL_FLAG(int,
+ initial_mic_level,
+ 100,
+ "Initial mic level (0-255) for the analog mic gain simulation in the "
+ "test code");
+ABSL_FLAG(int,
+ simulate_mic_gain,
+ 0,
+ "Activate (1) or deactivate(0) the analog mic gain simulation in the "
+ "test code");
+ABSL_FLAG(int,
+ multi_channel_render,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) multi-channel render processing in "
+ "APM pipeline");
+ABSL_FLAG(int,
+ multi_channel_capture,
+ kParameterNotSpecifiedValue,
+ "Activate (1) or deactivate (0) multi-channel capture processing in "
+ "APM pipeline");
+ABSL_FLAG(int,
+ simulated_mic_kind,
+ kParameterNotSpecifiedValue,
+ "Specify which microphone kind to use for microphone simulation");
+ABSL_FLAG(int,
+ override_key_pressed,
+ kParameterNotSpecifiedValue,
+ "Always set to true (1) or to false (0) the key press state. If "
+ "unspecified, false is set with Wav files or, with AEC dumps, the "
+ "recorded event is used.");
+ABSL_FLAG(int,
+ frame_for_sending_capture_output_used_false,
+ kParameterNotSpecifiedValue,
+ "Capture frame index for sending a runtime setting for that the "
+ "capture output is not used.");
+ABSL_FLAG(int,
+ frame_for_sending_capture_output_used_true,
+ kParameterNotSpecifiedValue,
+ "Capture frame index for sending a runtime setting for that the "
+ "capture output is used.");
+ABSL_FLAG(bool, performance_report, false, "Report the APM performance ");
+ABSL_FLAG(std::string,
+ performance_report_output_file,
+ "",
+ "Generate a CSV file with the API call durations");
+ABSL_FLAG(bool, verbose, false, "Produce verbose output");
+ABSL_FLAG(bool,
+ quiet,
+ false,
+ "Avoid producing information about the progress.");
+ABSL_FLAG(bool,
+ bitexactness_report,
+ false,
+ "Report bitexactness for aec dump result reproduction");
+ABSL_FLAG(bool,
+ discard_settings_in_aecdump,
+ false,
+ "Discard any config settings specified in the aec dump");
+ABSL_FLAG(bool,
+ store_intermediate_output,
+ false,
+ "Creates new output files after each init");
+ABSL_FLAG(std::string,
+ custom_call_order_file,
+ "",
+ "Custom process API call order file");
+ABSL_FLAG(std::string,
+ output_custom_call_order_file,
+ "",
+ "Generate custom process API call order file from AEC dump");
+ABSL_FLAG(bool,
+ print_aec_parameter_values,
+ false,
+ "Print parameter values used in AEC in JSON-format");
+ABSL_FLAG(std::string,
+ aec_settings,
+ "",
+ "File in JSON-format with custom AEC settings");
+ABSL_FLAG(bool,
+ dump_data,
+ false,
+ "Dump internal data during the call (requires build flag)");
+ABSL_FLAG(std::string,
+ dump_data_output_dir,
+ "",
+ "Internal data dump output directory");
+ABSL_FLAG(int,
+ dump_set_to_use,
+ kParameterNotSpecifiedValue,
+ "Specifies the dump set to use (if not all the dump sets will "
+ "be used");
+ABSL_FLAG(bool,
+ analyze,
+ false,
+ "Only analyze the call setup behavior (no processing)");
+ABSL_FLAG(float,
+ dump_start_seconds,
+ kParameterNotSpecifiedValue,
+ "Start of when to dump data (seconds).");
+ABSL_FLAG(float,
+ dump_end_seconds,
+ kParameterNotSpecifiedValue,
+ "End of when to dump data (seconds).");
+ABSL_FLAG(int,
+ dump_start_frame,
+ kParameterNotSpecifiedValue,
+ "Start of when to dump data (frames).");
+ABSL_FLAG(int,
+ dump_end_frame,
+ kParameterNotSpecifiedValue,
+ "End of when to dump data (frames).");
+ABSL_FLAG(int,
+ init_to_process,
+ kParameterNotSpecifiedValue,
+ "Init index to process.");
+
+ABSL_FLAG(bool,
+ float_wav_output,
+ false,
+ "Produce floating point wav output files.");
+
+ABSL_FLAG(std::string,
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+ " will assign the group Enable to field trial WebRTC-FooFeature.");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+const char kUsageDescription[] =
+ "Usage: audioproc_f [options] -i <input.wav>\n"
+ " or\n"
+ " audioproc_f [options] -dump_input <aec_dump>\n"
+ "\n\n"
+ "Command-line tool to simulate a call using the audio "
+ "processing module, either based on wav files or "
+ "protobuf debug dump recordings.\n";
+
+void SetSettingIfSpecified(absl::string_view value,
+ absl::optional<std::string>* parameter) {
+ if (value.compare("") != 0) {
+ *parameter = std::string(value);
+ }
+}
+
+void SetSettingIfSpecified(int value, absl::optional<int>* parameter) {
+ if (value != kParameterNotSpecifiedValue) {
+ *parameter = value;
+ }
+}
+
+void SetSettingIfSpecified(float value, absl::optional<float>* parameter) {
+ constexpr float kFloatParameterNotSpecifiedValue =
+ kParameterNotSpecifiedValue;
+ if (value != kFloatParameterNotSpecifiedValue) {
+ *parameter = value;
+ }
+}
+
+void SetSettingIfFlagSet(int32_t flag, absl::optional<bool>* parameter) {
+ if (flag == 0) {
+ *parameter = false;
+ } else if (flag == 1) {
+ *parameter = true;
+ }
+}
+
+SimulationSettings CreateSettings() {
+ SimulationSettings settings;
+ if (absl::GetFlag(FLAGS_all_default)) {
+ settings.use_ts = true;
+ settings.use_analog_agc = true;
+ settings.use_ns = true;
+ settings.use_hpf = true;
+ settings.use_agc = true;
+ settings.use_agc2 = false;
+ settings.use_pre_amplifier = false;
+ settings.use_aec = true;
+ settings.use_aecm = false;
+ settings.use_ed = false;
+ }
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_input),
+ &settings.aec_dump_input_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_output),
+ &settings.aec_dump_output_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_i), &settings.input_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_o), &settings.output_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_ri),
+ &settings.reverse_input_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_ro),
+ &settings.reverse_output_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_artificial_nearend),
+ &settings.artificial_nearend_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_linear_aec_output),
+ &settings.linear_aec_output_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_output_num_channels),
+ &settings.output_num_channels);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_reverse_output_num_channels),
+ &settings.reverse_output_num_channels);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_output_sample_rate_hz),
+ &settings.output_sample_rate_hz);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_reverse_output_sample_rate_hz),
+ &settings.reverse_output_sample_rate_hz);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_aec), &settings.use_aec);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_aecm), &settings.use_aecm);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_ed), &settings.use_ed);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_ed_graph),
+ &settings.ed_graph_output_filename);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_agc), &settings.use_agc);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_agc2), &settings.use_agc2);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_pre_amplifier),
+ &settings.use_pre_amplifier);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_capture_level_adjustment),
+ &settings.use_capture_level_adjustment);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_analog_mic_gain_emulation),
+ &settings.use_analog_mic_gain_emulation);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_hpf), &settings.use_hpf);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_ns), &settings.use_ns);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_ts), &settings.use_ts);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_analog_agc),
+ &settings.use_analog_agc);
+ SetSettingIfFlagSet(
+ absl::GetFlag(FLAGS_analog_agc_use_digital_adaptive_controller),
+ &settings.analog_agc_use_digital_adaptive_controller);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_agc_mode), &settings.agc_mode);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_agc_target_level),
+ &settings.agc_target_level);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_agc_limiter),
+ &settings.use_agc_limiter);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_agc_compression_gain),
+ &settings.agc_compression_gain);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_agc2_enable_adaptive_gain),
+ &settings.agc2_use_adaptive_gain);
+
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_agc2_fixed_gain_db),
+ &settings.agc2_fixed_gain_db);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_pre_amplifier_gain_factor),
+ &settings.pre_amplifier_gain_factor);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_pre_gain_factor),
+ &settings.pre_gain_factor);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_post_gain_factor),
+ &settings.post_gain_factor);
+ SetSettingIfSpecified(
+ absl::GetFlag(FLAGS_analog_mic_gain_emulation_initial_level),
+ &settings.analog_mic_gain_emulation_initial_level);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_ns_level), &settings.ns_level);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_ns_analysis_on_linear_aec_output),
+ &settings.ns_analysis_on_linear_aec_output);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_maximum_internal_processing_rate),
+ &settings.maximum_internal_processing_rate);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_stream_delay),
+ &settings.stream_delay);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_use_stream_delay),
+ &settings.use_stream_delay);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_custom_call_order_file),
+ &settings.call_order_input_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_output_custom_call_order_file),
+ &settings.call_order_output_filename);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_aec_settings),
+ &settings.aec_settings_filename);
+ settings.initial_mic_level = absl::GetFlag(FLAGS_initial_mic_level);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_multi_channel_render),
+ &settings.multi_channel_render);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_multi_channel_capture),
+ &settings.multi_channel_capture);
+ settings.simulate_mic_gain = absl::GetFlag(FLAGS_simulate_mic_gain);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_simulated_mic_kind),
+ &settings.simulated_mic_kind);
+ SetSettingIfFlagSet(absl::GetFlag(FLAGS_override_key_pressed),
+ &settings.override_key_pressed);
+ SetSettingIfSpecified(
+ absl::GetFlag(FLAGS_frame_for_sending_capture_output_used_false),
+ &settings.frame_for_sending_capture_output_used_false);
+ SetSettingIfSpecified(
+ absl::GetFlag(FLAGS_frame_for_sending_capture_output_used_true),
+ &settings.frame_for_sending_capture_output_used_true);
+ settings.report_performance = absl::GetFlag(FLAGS_performance_report);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_performance_report_output_file),
+ &settings.performance_report_output_filename);
+ settings.use_verbose_logging = absl::GetFlag(FLAGS_verbose);
+ settings.use_quiet_output = absl::GetFlag(FLAGS_quiet);
+ settings.report_bitexactness = absl::GetFlag(FLAGS_bitexactness_report);
+ settings.discard_all_settings_in_aecdump =
+ absl::GetFlag(FLAGS_discard_settings_in_aecdump);
+ settings.fixed_interface = absl::GetFlag(FLAGS_fixed_interface);
+ settings.store_intermediate_output =
+ absl::GetFlag(FLAGS_store_intermediate_output);
+ settings.print_aec_parameter_values =
+ absl::GetFlag(FLAGS_print_aec_parameter_values);
+ settings.dump_internal_data = absl::GetFlag(FLAGS_dump_data);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_data_output_dir),
+ &settings.dump_internal_data_output_dir);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_set_to_use),
+ &settings.dump_set_to_use);
+ settings.wav_output_format = absl::GetFlag(FLAGS_float_wav_output)
+ ? WavFile::SampleFormat::kFloat
+ : WavFile::SampleFormat::kInt16;
+
+ settings.analysis_only = absl::GetFlag(FLAGS_analyze);
+
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_start_frame),
+ &settings.dump_start_frame);
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_end_frame),
+ &settings.dump_end_frame);
+
+ constexpr int kFramesPerSecond = 100;
+ absl::optional<float> start_seconds;
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_start_seconds),
+ &start_seconds);
+ if (start_seconds) {
+ settings.dump_start_frame = *start_seconds * kFramesPerSecond;
+ }
+
+ absl::optional<float> end_seconds;
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_end_seconds), &end_seconds);
+ if (end_seconds) {
+ settings.dump_end_frame = *end_seconds * kFramesPerSecond;
+ }
+
+ SetSettingIfSpecified(absl::GetFlag(FLAGS_init_to_process),
+ &settings.init_to_process);
+
+ return settings;
+}
+
+void ReportConditionalErrorAndExit(bool condition, absl::string_view message) {
+ if (condition) {
+ std::cerr << message << std::endl;
+ exit(1);
+ }
+}
+
+void PerformBasicParameterSanityChecks(
+ const SimulationSettings& settings,
+ bool pre_constructed_ap_provided,
+ bool pre_constructed_ap_builder_provided) {
+ if (settings.input_filename || settings.reverse_input_filename) {
+ ReportConditionalErrorAndExit(
+ !!settings.aec_dump_input_filename,
+ "Error: The aec dump file cannot be specified "
+ "together with input wav files!\n");
+
+ ReportConditionalErrorAndExit(
+ !!settings.aec_dump_input_string,
+ "Error: The aec dump input string cannot be specified "
+ "together with input wav files!\n");
+
+ ReportConditionalErrorAndExit(!!settings.artificial_nearend_filename,
+ "Error: The artificial nearend cannot be "
+ "specified together with input wav files!\n");
+
+ ReportConditionalErrorAndExit(!settings.input_filename,
+ "Error: When operating at wav files, the "
+ "input wav filename must be "
+ "specified!\n");
+
+ ReportConditionalErrorAndExit(
+ settings.reverse_output_filename && !settings.reverse_input_filename,
+ "Error: When operating at wav files, the reverse input wav filename "
+ "must be specified if the reverse output wav filename is specified!\n");
+ } else {
+ ReportConditionalErrorAndExit(
+ !settings.aec_dump_input_filename && !settings.aec_dump_input_string,
+ "Error: Either the aec dump input file, the wav "
+ "input file or the aec dump input string must be specified!\n");
+ ReportConditionalErrorAndExit(
+ settings.aec_dump_input_filename && settings.aec_dump_input_string,
+ "Error: The aec dump input file cannot be specified together with the "
+ "aec dump input string!\n");
+ }
+
+ ReportConditionalErrorAndExit(settings.use_aec && !(*settings.use_aec) &&
+ settings.linear_aec_output_filename,
+ "Error: The linear AEC ouput filename cannot "
+ "be specified without the AEC being active");
+
+ ReportConditionalErrorAndExit(
+ settings.use_aec && *settings.use_aec && settings.use_aecm &&
+ *settings.use_aecm,
+ "Error: The AEC and the AECM cannot be activated at the same time!\n");
+
+ ReportConditionalErrorAndExit(
+ settings.output_sample_rate_hz && *settings.output_sample_rate_hz <= 0,
+ "Error: --output_sample_rate_hz must be positive!\n");
+
+ ReportConditionalErrorAndExit(
+ settings.reverse_output_sample_rate_hz &&
+ settings.output_sample_rate_hz &&
+ *settings.output_sample_rate_hz <= 0,
+ "Error: --reverse_output_sample_rate_hz must be positive!\n");
+
+ ReportConditionalErrorAndExit(
+ settings.output_num_channels && *settings.output_num_channels <= 0,
+ "Error: --output_num_channels must be positive!\n");
+
+ ReportConditionalErrorAndExit(
+ settings.reverse_output_num_channels &&
+ *settings.reverse_output_num_channels <= 0,
+ "Error: --reverse_output_num_channels must be positive!\n");
+
+ ReportConditionalErrorAndExit(
+ settings.agc_target_level && ((*settings.agc_target_level) < 0 ||
+ (*settings.agc_target_level) > 31),
+ "Error: --agc_target_level must be specified between 0 and 31.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.agc_compression_gain && ((*settings.agc_compression_gain) < 0 ||
+ (*settings.agc_compression_gain) > 90),
+ "Error: --agc_compression_gain must be specified between 0 and 90.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.agc2_fixed_gain_db && ((*settings.agc2_fixed_gain_db) < 0 ||
+ (*settings.agc2_fixed_gain_db) > 90),
+ "Error: --agc2_fixed_gain_db must be specified between 0 and 90.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.ns_level &&
+ ((*settings.ns_level) < 0 || (*settings.ns_level) > 3),
+ "Error: --ns_level must be specified between 0 and 3.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.report_bitexactness && !settings.aec_dump_input_filename,
+ "Error: --bitexactness_report can only be used when operating on an "
+ "aecdump\n");
+
+ ReportConditionalErrorAndExit(
+ settings.call_order_input_filename && settings.aec_dump_input_filename,
+ "Error: --custom_call_order_file cannot be used when operating on an "
+ "aecdump\n");
+
+ ReportConditionalErrorAndExit(
+ (settings.initial_mic_level < 0 || settings.initial_mic_level > 255),
+ "Error: --initial_mic_level must be specified between 0 and 255.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.simulated_mic_kind && !settings.simulate_mic_gain,
+ "Error: --simulated_mic_kind cannot be specified when mic simulation is "
+ "disabled\n");
+
+ ReportConditionalErrorAndExit(
+ !settings.simulated_mic_kind && settings.simulate_mic_gain,
+ "Error: --simulated_mic_kind must be specified when mic simulation is "
+ "enabled\n");
+
+ auto valid_wav_name = [](absl::string_view wav_file_name) {
+ if (wav_file_name.size() < 5) {
+ return false;
+ }
+ if ((wav_file_name.compare(wav_file_name.size() - 4, 4, ".wav") == 0) ||
+ (wav_file_name.compare(wav_file_name.size() - 4, 4, ".WAV") == 0)) {
+ return true;
+ }
+ return false;
+ };
+
+ ReportConditionalErrorAndExit(
+ settings.input_filename && (!valid_wav_name(*settings.input_filename)),
+ "Error: --i must be a valid .wav file name.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.output_filename && (!valid_wav_name(*settings.output_filename)),
+ "Error: --o must be a valid .wav file name.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.reverse_input_filename &&
+ (!valid_wav_name(*settings.reverse_input_filename)),
+ "Error: --ri must be a valid .wav file name.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.reverse_output_filename &&
+ (!valid_wav_name(*settings.reverse_output_filename)),
+ "Error: --ro must be a valid .wav file name.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.artificial_nearend_filename &&
+ !valid_wav_name(*settings.artificial_nearend_filename),
+ "Error: --artifical_nearend must be a valid .wav file name.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.linear_aec_output_filename &&
+ (!valid_wav_name(*settings.linear_aec_output_filename)),
+ "Error: --linear_aec_output must be a valid .wav file name.\n");
+
+ ReportConditionalErrorAndExit(
+ WEBRTC_APM_DEBUG_DUMP == 0 && settings.dump_internal_data,
+ "Error: --dump_data cannot be set without proper build support.\n");
+
+ ReportConditionalErrorAndExit(settings.init_to_process &&
+ *settings.init_to_process != 1 &&
+ !settings.aec_dump_input_filename,
+ "Error: --init_to_process must be set to 1 for "
+ "wav-file based simulations.\n");
+
+ ReportConditionalErrorAndExit(
+ !settings.init_to_process &&
+ (settings.dump_start_frame || settings.dump_end_frame),
+ "Error: --init_to_process must be set when specifying a start and/or end "
+ "frame for when to dump internal data.\n");
+
+ ReportConditionalErrorAndExit(
+ !settings.dump_internal_data &&
+ settings.dump_internal_data_output_dir.has_value(),
+ "Error: --dump_data_output_dir cannot be set without --dump_data.\n");
+
+ ReportConditionalErrorAndExit(
+ !settings.aec_dump_input_filename &&
+ settings.call_order_output_filename.has_value(),
+ "Error: --output_custom_call_order_file needs an AEC dump input file.\n");
+
+ ReportConditionalErrorAndExit(
+ (!settings.use_pre_amplifier || !(*settings.use_pre_amplifier)) &&
+ settings.pre_amplifier_gain_factor.has_value(),
+ "Error: --pre_amplifier_gain_factor needs --pre_amplifier to be "
+ "specified and set.\n");
+
+ ReportConditionalErrorAndExit(
+ pre_constructed_ap_provided && pre_constructed_ap_builder_provided,
+ "Error: The AudioProcessing and the AudioProcessingBuilder cannot both "
+ "be specified at the same time.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.aec_settings_filename && pre_constructed_ap_provided,
+ "Error: The aec_settings_filename cannot be specified when a "
+ "pre-constructed audio processing object is provided.\n");
+
+ ReportConditionalErrorAndExit(
+ settings.aec_settings_filename && pre_constructed_ap_provided,
+ "Error: The print_aec_parameter_values cannot be set when a "
+ "pre-constructed audio processing object is provided.\n");
+
+ if (settings.linear_aec_output_filename && pre_constructed_ap_provided) {
+ std::cout << "Warning: For the linear AEC output to be stored, this must "
+ "be configured in the AEC that is part of the provided "
+ "AudioProcessing object."
+ << std::endl;
+ }
+}
+
+int RunSimulation(rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder,
+ int argc,
+ char* argv[],
+ absl::string_view input_aecdump,
+ std::vector<float>* processed_capture_samples) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ if (args.size() != 1) {
+ printf("%s", kUsageDescription);
+ return 1;
+ }
+ // InitFieldTrialsFromString stores the char*, so the char array must
+ // outlive the application.
+ const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
+ webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
+
+ SimulationSettings settings = CreateSettings();
+ if (!input_aecdump.empty()) {
+ settings.aec_dump_input_string = input_aecdump;
+ settings.processed_capture_samples = processed_capture_samples;
+ RTC_CHECK(settings.processed_capture_samples);
+ }
+ PerformBasicParameterSanityChecks(settings, !!audio_processing, !!ap_builder);
+ std::unique_ptr<AudioProcessingSimulator> processor;
+
+ if (settings.aec_dump_input_filename || settings.aec_dump_input_string) {
+ processor.reset(new AecDumpBasedSimulator(
+ settings, std::move(audio_processing), std::move(ap_builder)));
+ } else {
+ processor.reset(new WavBasedSimulator(settings, std::move(audio_processing),
+ std::move(ap_builder)));
+ }
+
+ if (settings.analysis_only) {
+ processor->Analyze();
+ } else {
+ processor->Process();
+ }
+
+ if (settings.report_performance) {
+ processor->GetApiCallStatistics().PrintReport();
+ }
+ if (settings.performance_report_output_filename) {
+ processor->GetApiCallStatistics().WriteReportToFile(
+ *settings.performance_report_output_filename);
+ }
+
+ if (settings.report_bitexactness && settings.aec_dump_input_filename) {
+ if (processor->OutputWasBitexact()) {
+ std::cout << "The processing was bitexact.";
+ } else {
+ std::cout << "The processing was not bitexact.";
+ }
+ }
+
+ return 0;
+}
+
+} // namespace
+
+int AudioprocFloatImpl(rtc::scoped_refptr<AudioProcessing> audio_processing,
+ int argc,
+ char* argv[]) {
+ return RunSimulation(
+ std::move(audio_processing), /*ap_builder=*/nullptr, argc, argv,
+ /*input_aecdump=*/"", /*processed_capture_samples=*/nullptr);
+}
+
+int AudioprocFloatImpl(std::unique_ptr<AudioProcessingBuilder> ap_builder,
+ int argc,
+ char* argv[],
+ absl::string_view input_aecdump,
+ std::vector<float>* processed_capture_samples) {
+ return RunSimulation(/*audio_processing=*/nullptr, std::move(ap_builder),
+ argc, argv, input_aecdump, processed_capture_samples);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.h b/third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.h
new file mode 100644
index 0000000000..5ed3aefab7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/audioproc_float_impl.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIOPROC_FLOAT_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_TEST_AUDIOPROC_FLOAT_IMPL_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+namespace test {
+
+// This function implements the audio processing simulation utility. Pass
+// `input_aecdump` to provide the content of an AEC dump file as a string; if
+// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified
+// via the `argv` argument. Pass `processed_capture_samples` to write in it the
+// samples processed on the capture side; if `processed_capture_samples` is not
+// passed, the output file can optionally be specified via the `argv` argument.
+// Any audio_processing object specified in the input is used for the
+// simulation. Note that when the audio_processing object is specified all
+// functionality that relies on using the internal builder is deactivated,
+// since the AudioProcessing object is already created and the builder is not
+// used in the simulation.
+int AudioprocFloatImpl(rtc::scoped_refptr<AudioProcessing> audio_processing,
+ int argc,
+ char* argv[]);
+
+// This function implements the audio processing simulation utility. Pass
+// `input_aecdump` to provide the content of an AEC dump file as a string; if
+// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified
+// via the `argv` argument. Pass `processed_capture_samples` to write in it the
+// samples processed on the capture side; if `processed_capture_samples` is not
+// passed, the output file can optionally be specified via the `argv` argument.
+int AudioprocFloatImpl(std::unique_ptr<AudioProcessingBuilder> ap_builder,
+ int argc,
+ char* argv[],
+ absl::string_view input_aecdump,
+ std::vector<float>* processed_capture_samples);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIOPROC_FLOAT_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.cc b/third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.cc
new file mode 100644
index 0000000000..0464345364
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/bitexactness_tools.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+std::string GetApmRenderTestVectorFileName(int sample_rate_hz) {
+ switch (sample_rate_hz) {
+ case 8000:
+ return ResourcePath("far8_stereo", "pcm");
+ case 16000:
+ return ResourcePath("far16_stereo", "pcm");
+ case 32000:
+ return ResourcePath("far32_stereo", "pcm");
+ case 48000:
+ return ResourcePath("far48_stereo", "pcm");
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return "";
+}
+
+std::string GetApmCaptureTestVectorFileName(int sample_rate_hz) {
+ switch (sample_rate_hz) {
+ case 8000:
+ return ResourcePath("near8_stereo", "pcm");
+ case 16000:
+ return ResourcePath("near16_stereo", "pcm");
+ case 32000:
+ return ResourcePath("near32_stereo", "pcm");
+ case 48000:
+ return ResourcePath("near48_stereo", "pcm");
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return "";
+}
+
+void ReadFloatSamplesFromStereoFile(size_t samples_per_channel,
+ size_t num_channels,
+ InputAudioFile* stereo_pcm_file,
+ rtc::ArrayView<float> data) {
+ RTC_DCHECK_LE(num_channels, 2);
+ RTC_DCHECK_EQ(data.size(), samples_per_channel * num_channels);
+ std::vector<int16_t> read_samples(samples_per_channel * 2);
+ stereo_pcm_file->Read(samples_per_channel * 2, read_samples.data());
+
+ // Convert samples to float and discard any channels not needed.
+ for (size_t sample = 0; sample < samples_per_channel; ++sample) {
+ for (size_t channel = 0; channel < num_channels; ++channel) {
+ data[sample * num_channels + channel] =
+ read_samples[sample * 2 + channel] / 32768.0f;
+ }
+ }
+}
+
+::testing::AssertionResult VerifyDeinterleavedArray(
+ size_t samples_per_channel,
+ size_t num_channels,
+ rtc::ArrayView<const float> reference,
+ rtc::ArrayView<const float> output,
+ float element_error_bound) {
+ // Form vectors to compare the reference to. Only the first values of the
+ // outputs are compared in order not having to specify all preceeding frames
+ // as testvectors.
+ const size_t reference_frame_length =
+ rtc::CheckedDivExact(reference.size(), num_channels);
+
+ std::vector<float> output_to_verify;
+ for (size_t channel_no = 0; channel_no < num_channels; ++channel_no) {
+ output_to_verify.insert(output_to_verify.end(),
+ output.begin() + channel_no * samples_per_channel,
+ output.begin() + channel_no * samples_per_channel +
+ reference_frame_length);
+ }
+
+ return VerifyArray(reference, output_to_verify, element_error_bound);
+}
+
+::testing::AssertionResult VerifyArray(rtc::ArrayView<const float> reference,
+ rtc::ArrayView<const float> output,
+ float element_error_bound) {
+ // The vectors are deemed to be bitexact only if
+ // a) output have a size at least as long as the reference.
+ // b) the samples in the reference are bitexact with the corresponding samples
+ // in the output.
+
+ bool equal = true;
+ if (output.size() < reference.size()) {
+ equal = false;
+ } else {
+ // Compare the first samples in the vectors.
+ for (size_t k = 0; k < reference.size(); ++k) {
+ if (fabs(output[k] - reference[k]) > element_error_bound) {
+ equal = false;
+ break;
+ }
+ }
+ }
+
+ if (equal) {
+ return ::testing::AssertionSuccess();
+ }
+
+ // Lambda function that produces a formatted string with the data in the
+ // vector.
+ auto print_vector_in_c_format = [](rtc::ArrayView<const float> v,
+ size_t num_values_to_print) {
+ std::string s = "{ ";
+ for (size_t k = 0; k < std::min(num_values_to_print, v.size()); ++k) {
+ s += std::to_string(v[k]) + "f";
+ s += (k < (num_values_to_print - 1)) ? ", " : "";
+ }
+ return s + " }";
+ };
+
+ // If the vectors are deemed not to be similar, return a report of the
+ // difference.
+ return ::testing::AssertionFailure()
+ << std::endl
+ << " Actual values : "
+ << print_vector_in_c_format(output,
+ std::min(output.size(), reference.size()))
+ << std::endl
+ << " Expected values: "
+ << print_vector_in_c_format(reference, reference.size()) << std::endl;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.h b/third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.h
new file mode 100644
index 0000000000..2d3113276d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/bitexactness_tools.h
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_BITEXACTNESS_TOOLS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_BITEXACTNESS_TOOLS_H_
+
+#include <string>
+
+#include "api/array_view.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+// Returns test vector to use for the render signal in an
+// APM bitexactness test.
+std::string GetApmRenderTestVectorFileName(int sample_rate_hz);
+
+// Returns test vector to use for the capture signal in an
+// APM bitexactness test.
+std::string GetApmCaptureTestVectorFileName(int sample_rate_hz);
+
+// Extract float samples of up to two channels from a pcm file.
+void ReadFloatSamplesFromStereoFile(size_t samples_per_channel,
+ size_t num_channels,
+ InputAudioFile* stereo_pcm_file,
+ rtc::ArrayView<float> data);
+
+// Verifies a frame against a reference and returns the results as an
+// AssertionResult.
+::testing::AssertionResult VerifyDeinterleavedArray(
+ size_t samples_per_channel,
+ size_t num_channels,
+ rtc::ArrayView<const float> reference,
+ rtc::ArrayView<const float> output,
+ float element_error_bound);
+
+// Verifies a vector against a reference and returns the results as an
+// AssertionResult.
+::testing::AssertionResult VerifyArray(rtc::ArrayView<const float> reference,
+ rtc::ArrayView<const float> output,
+ float element_error_bound);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_BITEXACTNESS_TOOLS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/BUILD.gn
new file mode 100644
index 0000000000..2c3678092e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/BUILD.gn
@@ -0,0 +1,81 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../../webrtc.gni")
+
+if (!build_with_chromium) {
+ group("conversational_speech") {
+ testonly = true
+ deps = [ ":conversational_speech_generator" ]
+ }
+
+ rtc_executable("conversational_speech_generator") {
+ testonly = true
+ sources = [ "generator.cc" ]
+ deps = [
+ ":lib",
+ "../../../../test:fileutils",
+ "../../../../test:test_support",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+}
+
+rtc_library("lib") {
+ testonly = true
+ sources = [
+ "config.cc",
+ "config.h",
+ "multiend_call.cc",
+ "multiend_call.h",
+ "simulator.cc",
+ "simulator.h",
+ "timing.cc",
+ "timing.h",
+ "wavreader_abstract_factory.h",
+ "wavreader_factory.cc",
+ "wavreader_factory.h",
+ "wavreader_interface.h",
+ ]
+ deps = [
+ "../../../../api:array_view",
+ "../../../../common_audio",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:logging",
+ "../../../../rtc_base:safe_conversions",
+ "../../../../rtc_base:stringutils",
+ "../../../../test:fileutils",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+}
+
+rtc_library("unittest") {
+ testonly = true
+ sources = [
+ "generator_unittest.cc",
+ "mock_wavreader.cc",
+ "mock_wavreader.h",
+ "mock_wavreader_factory.cc",
+ "mock_wavreader_factory.h",
+ ]
+ deps = [
+ ":lib",
+ "../../../../api:array_view",
+ "../../../../common_audio",
+ "../../../../rtc_base:logging",
+ "../../../../test:fileutils",
+ "../../../../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/OWNERS b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/OWNERS
new file mode 100644
index 0000000000..07cff405e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/OWNERS
@@ -0,0 +1,3 @@
+alessiob@webrtc.org
+henrik.lundin@webrtc.org
+peah@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/README.md b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/README.md
new file mode 100644
index 0000000000..0fa66669e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/README.md
@@ -0,0 +1,74 @@
+# Conversational Speech generator tool
+
+Tool to generate multiple-end audio tracks to simulate conversational speech
+with two or more participants.
+
+The input to the tool is a directory containing a number of audio tracks and
+a text file indicating how to time the sequence of speech turns (see the Example
+section).
+
+Since the timing of the speaking turns is specified by the user, the generated
+tracks may not be suitable for testing scenarios in which there is unpredictable
+network delay (e.g., end-to-end RTC assessment).
+
+Instead, the generated pairs can be used when the delay is constant (obviously
+including the case in which there is no delay).
+For instance, echo cancellation in the APM module can be evaluated using two-end
+audio tracks as input and reverse input.
+
+By indicating negative and positive time offsets, one can reproduce cross-talk
+(aka double-talk) and silence in the conversation.
+
+### Example
+
+For each end, there is a set of audio tracks, e.g., a1, a2 and a3 (speaker A)
+and b1, b2 (speaker B).
+The text file with the timing information may look like this:
+
+```
+A a1 0
+B b1 0
+A a2 100
+B b2 -200
+A a3 0
+A a4 0
+```
+
+The first column indicates the speaker name, the second contains the audio track
+file names, and the third the offsets (in milliseconds) used to concatenate the
+chunks. An optional fourth column contains positive or negative integral gains
+in dB that will be applied to the tracks. It's possible to specify the gain for
+some turns but not for others. If the gain is left out, no gain is applied.
+
+Assume that all the audio tracks in the example above are 1000 ms long.
+The tool will then generate two tracks (A and B) that look like this:
+
+**Track A**
+```
+ a1 (1000 ms)
+ silence (1100 ms)
+ a2 (1000 ms)
+ silence (800 ms)
+ a3 (1000 ms)
+ a4 (1000 ms)
+```
+
+**Track B**
+```
+ silence (1000 ms)
+ b1 (1000 ms)
+ silence (900 ms)
+ b2 (1000 ms)
+ silence (2000 ms)
+```
+
+The two tracks can be also visualized as follows (one characheter represents
+100 ms, "." is silence and "*" is speech).
+
+```
+t: 0 1 2 3 4 5 6 (s)
+A: **********...........**********........********************
+B: ..........**********.........**********....................
+ ^ 200 ms cross-talk
+ 100 ms silence ^
+```
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.cc
new file mode 100644
index 0000000000..76d3de8108
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/config.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+const std::string& Config::audiotracks_path() const {
+ return audiotracks_path_;
+}
+
+const std::string& Config::timing_filepath() const {
+ return timing_filepath_;
+}
+
+const std::string& Config::output_path() const {
+ return output_path_;
+}
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.h
new file mode 100644
index 0000000000..5a847e06a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/config.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_CONFIG_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_CONFIG_H_
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+struct Config {
+ Config(absl::string_view audiotracks_path,
+ absl::string_view timing_filepath,
+ absl::string_view output_path)
+ : audiotracks_path_(audiotracks_path),
+ timing_filepath_(timing_filepath),
+ output_path_(output_path) {}
+
+ const std::string& audiotracks_path() const;
+ const std::string& timing_filepath() const;
+ const std::string& output_path() const;
+
+ const std::string audiotracks_path_;
+ const std::string timing_filepath_;
+ const std::string output_path_;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator.cc
new file mode 100644
index 0000000000..d0bc2f2319
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <iostream>
+#include <vector>
+
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "modules/audio_processing/test/conversational_speech/config.h"
+#include "modules/audio_processing/test/conversational_speech/multiend_call.h"
+#include "modules/audio_processing/test/conversational_speech/simulator.h"
+#include "modules/audio_processing/test/conversational_speech/timing.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_factory.h"
+#include "test/testsupport/file_utils.h"
+
+ABSL_FLAG(std::string, i, "", "Directory containing the speech turn wav files");
+ABSL_FLAG(std::string, t, "", "Path to the timing text file");
+ABSL_FLAG(std::string, o, "", "Output wav files destination path");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+const char kUsageDescription[] =
+ "Usage: conversational_speech_generator\n"
+ " -i <path/to/source/audiotracks>\n"
+ " -t <path/to/timing_file.txt>\n"
+ " -o <output/path>\n"
+ "\n\n"
+ "Command-line tool to generate multiple-end audio tracks to simulate "
+ "conversational speech with two or more participants.\n";
+
+} // namespace
+
+int main(int argc, char* argv[]) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ if (args.size() != 1) {
+ printf("%s", kUsageDescription);
+ return 1;
+ }
+ RTC_CHECK(DirExists(absl::GetFlag(FLAGS_i)));
+ RTC_CHECK(FileExists(absl::GetFlag(FLAGS_t)));
+ RTC_CHECK(DirExists(absl::GetFlag(FLAGS_o)));
+
+ conversational_speech::Config config(
+ absl::GetFlag(FLAGS_i), absl::GetFlag(FLAGS_t), absl::GetFlag(FLAGS_o));
+
+ // Load timing.
+ std::vector<conversational_speech::Turn> timing =
+ conversational_speech::LoadTiming(config.timing_filepath());
+
+ // Parse timing and audio tracks.
+ auto wavreader_factory =
+ std::make_unique<conversational_speech::WavReaderFactory>();
+ conversational_speech::MultiEndCall multiend_call(
+ timing, config.audiotracks_path(), std::move(wavreader_factory));
+
+ // Generate output audio tracks.
+ auto generated_audiotrack_pairs =
+ conversational_speech::Simulate(multiend_call, config.output_path());
+
+ // Show paths to created audio tracks.
+ std::cout << "Output files:" << std::endl;
+ for (const auto& output_paths_entry : *generated_audiotrack_pairs) {
+ std::cout << " speaker: " << output_paths_entry.first << std::endl;
+ std::cout << " near end: " << output_paths_entry.second.near_end
+ << std::endl;
+ std::cout << " far end: " << output_paths_entry.second.far_end
+ << std::endl;
+ }
+
+ return 0;
+}
+
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::main(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator_unittest.cc
new file mode 100644
index 0000000000..17714440d4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/generator_unittest.cc
@@ -0,0 +1,675 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file consists of unit tests for webrtc::test::conversational_speech
+// members. Part of them focus on accepting or rejecting different
+// conversational speech setups. A setup is defined by a set of audio tracks and
+// timing information).
+// The docstring at the beginning of each TEST(ConversationalSpeechTest,
+// MultiEndCallSetup*) function looks like the drawing below and indicates which
+// setup is tested.
+//
+// Accept:
+// A 0****.....
+// B .....1****
+//
+// The drawing indicates the following:
+// - the illustrated setup should be accepted,
+// - there are two speakers (namely, A and B),
+// - A is the first speaking, B is the second one,
+// - each character after the speaker's letter indicates a time unit (e.g., 100
+// ms),
+// - "*" indicates speaking, "." listening,
+// - numbers indicate the turn index in std::vector<Turn>.
+//
+// Note that the same speaker can appear in multiple lines in order to depict
+// cases in which there are wrong offsets leading to self cross-talk (which is
+// rejected).
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <stdio.h>
+
+#include <cmath>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/test/conversational_speech/config.h"
+#include "modules/audio_processing/test/conversational_speech/mock_wavreader_factory.h"
+#include "modules/audio_processing/test/conversational_speech/multiend_call.h"
+#include "modules/audio_processing/test/conversational_speech/simulator.h"
+#include "modules/audio_processing/test/conversational_speech/timing.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_factory.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using conversational_speech::LoadTiming;
+using conversational_speech::MockWavReaderFactory;
+using conversational_speech::MultiEndCall;
+using conversational_speech::SaveTiming;
+using conversational_speech::Turn;
+using conversational_speech::WavReaderFactory;
+
+const char* const audiotracks_path = "/path/to/audiotracks";
+const char* const timing_filepath = "/path/to/timing_file.txt";
+const char* const output_path = "/path/to/output_dir";
+
+const std::vector<Turn> expected_timing = {
+ {"A", "a1", 0, 0}, {"B", "b1", 0, 0}, {"A", "a2", 100, 0},
+ {"B", "b2", -200, 0}, {"A", "a3", 0, 0}, {"A", "a3", 0, 0},
+};
+const std::size_t kNumberOfTurns = expected_timing.size();
+
+// Default arguments for MockWavReaderFactory ctor.
+// Fake audio track parameters.
+constexpr int kDefaultSampleRate = 48000;
+const std::map<std::string, const MockWavReaderFactory::Params>
+ kDefaultMockWavReaderFactoryParamsMap = {
+ {"t300", {kDefaultSampleRate, 1u, 14400u}}, // Mono, 0.3 seconds.
+ {"t500", {kDefaultSampleRate, 1u, 24000u}}, // Mono, 0.5 seconds.
+ {"t1000", {kDefaultSampleRate, 1u, 48000u}}, // Mono, 1.0 seconds.
+ {"sr8000", {8000, 1u, 8000u}}, // 8kHz sample rate, mono, 1 second.
+ {"sr16000", {16000, 1u, 16000u}}, // 16kHz sample rate, mono, 1 second.
+ {"sr16000_stereo", {16000, 2u, 16000u}}, // Like sr16000, but stereo.
+};
+const MockWavReaderFactory::Params& kDefaultMockWavReaderFactoryParams =
+ kDefaultMockWavReaderFactoryParamsMap.at("t500");
+
+std::unique_ptr<MockWavReaderFactory> CreateMockWavReaderFactory() {
+ return std::unique_ptr<MockWavReaderFactory>(
+ new MockWavReaderFactory(kDefaultMockWavReaderFactoryParams,
+ kDefaultMockWavReaderFactoryParamsMap));
+}
+
+void CreateSineWavFile(absl::string_view filepath,
+ const MockWavReaderFactory::Params& params,
+ float frequency = 440.0f) {
+ // Create samples.
+ constexpr double two_pi = 2.0 * M_PI;
+ std::vector<int16_t> samples(params.num_samples);
+ for (std::size_t i = 0; i < params.num_samples; ++i) {
+ // TODO(alessiob): the produced tone is not pure, improve.
+ samples[i] = std::lround(
+ 32767.0f * std::sin(two_pi * i * frequency / params.sample_rate));
+ }
+
+ // Write samples.
+ WavWriter wav_writer(filepath, params.sample_rate, params.num_channels);
+ wav_writer.WriteSamples(samples.data(), params.num_samples);
+}
+
+// Parameters to generate audio tracks with CreateSineWavFile.
+struct SineAudioTrackParams {
+ MockWavReaderFactory::Params params;
+ float frequency;
+};
+
+// Creates a temporary directory in which sine audio tracks are written.
+std::string CreateTemporarySineAudioTracks(
+ const std::map<std::string, SineAudioTrackParams>& sine_tracks_params) {
+ // Create temporary directory.
+ std::string temp_directory =
+ OutputPath() + "TempConversationalSpeechAudioTracks";
+ CreateDir(temp_directory);
+
+ // Create sine tracks.
+ for (const auto& it : sine_tracks_params) {
+ const std::string temp_filepath = JoinFilename(temp_directory, it.first);
+ CreateSineWavFile(temp_filepath, it.second.params, it.second.frequency);
+ }
+
+ return temp_directory;
+}
+
+void CheckAudioTrackParams(const WavReaderFactory& wav_reader_factory,
+ absl::string_view filepath,
+ const MockWavReaderFactory::Params& expeted_params) {
+ auto wav_reader = wav_reader_factory.Create(filepath);
+ EXPECT_EQ(expeted_params.sample_rate, wav_reader->SampleRate());
+ EXPECT_EQ(expeted_params.num_channels, wav_reader->NumChannels());
+ EXPECT_EQ(expeted_params.num_samples, wav_reader->NumSamples());
+}
+
+void DeleteFolderAndContents(absl::string_view dir) {
+ if (!DirExists(dir)) {
+ return;
+ }
+ absl::optional<std::vector<std::string>> dir_content = ReadDirectory(dir);
+ EXPECT_TRUE(dir_content);
+ for (const auto& path : *dir_content) {
+ if (DirExists(path)) {
+ DeleteFolderAndContents(path);
+ } else if (FileExists(path)) {
+ // TODO(alessiob): Wrap with EXPECT_TRUE() once webrtc:7769 bug fixed.
+ RemoveFile(path);
+ } else {
+ FAIL();
+ }
+ }
+ // TODO(alessiob): Wrap with EXPECT_TRUE() once webrtc:7769 bug fixed.
+ RemoveDir(dir);
+}
+
+} // namespace
+
+using ::testing::_;
+
+TEST(ConversationalSpeechTest, Settings) {
+ const conversational_speech::Config config(audiotracks_path, timing_filepath,
+ output_path);
+
+ // Test getters.
+ EXPECT_EQ(audiotracks_path, config.audiotracks_path());
+ EXPECT_EQ(timing_filepath, config.timing_filepath());
+ EXPECT_EQ(output_path, config.output_path());
+}
+
+TEST(ConversationalSpeechTest, TimingSaveLoad) {
+ // Save test timing.
+ const std::string temporary_filepath =
+ TempFilename(OutputPath(), "TempTimingTestFile");
+ SaveTiming(temporary_filepath, expected_timing);
+
+ // Create a std::vector<Turn> instance by loading from file.
+ std::vector<Turn> actual_timing = LoadTiming(temporary_filepath);
+ RemoveFile(temporary_filepath);
+
+ // Check size.
+ EXPECT_EQ(expected_timing.size(), actual_timing.size());
+
+ // Check Turn instances.
+ for (size_t index = 0; index < expected_timing.size(); ++index) {
+ EXPECT_EQ(expected_timing[index], actual_timing[index])
+ << "turn #" << index << " not matching";
+ }
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallCreate) {
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are 5 unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(5);
+
+ // Inject the mock wav reader factory.
+ conversational_speech::MultiEndCall multiend_call(
+ expected_timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(2u, multiend_call.speaker_names().size());
+ EXPECT_EQ(5u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(6u, multiend_call.speaking_turns().size());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupDifferentSampleRates) {
+ const std::vector<Turn> timing = {
+ {"A", "sr8000", 0, 0},
+ {"B", "sr16000", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(::testing::_)).Times(2);
+
+ MultiEndCall multiend_call(timing, audiotracks_path,
+ std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupMultipleChannels) {
+ const std::vector<Turn> timing = {
+ {"A", "sr16000_stereo", 0, 0},
+ {"B", "sr16000_stereo", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(::testing::_)).Times(1);
+
+ MultiEndCall multiend_call(timing, audiotracks_path,
+ std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest,
+ MultiEndCallSetupDifferentSampleRatesAndMultipleNumChannels) {
+ const std::vector<Turn> timing = {
+ {"A", "sr8000", 0, 0},
+ {"B", "sr16000_stereo", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(::testing::_)).Times(2);
+
+ MultiEndCall multiend_call(timing, audiotracks_path,
+ std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupFirstOffsetNegative) {
+ const std::vector<Turn> timing = {
+ {"A", "t500", -100, 0},
+ {"B", "t500", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupSimple) {
+ // Accept:
+ // A 0****.....
+ // B .....1****
+ constexpr std::size_t expected_duration = kDefaultSampleRate;
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0},
+ {"B", "t500", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(2u, multiend_call.speaker_names().size());
+ EXPECT_EQ(1u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(2u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupPause) {
+ // Accept:
+ // A 0****.......
+ // B .......1****
+ constexpr std::size_t expected_duration = kDefaultSampleRate * 1.2;
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0},
+ {"B", "t500", 200, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(2u, multiend_call.speaker_names().size());
+ EXPECT_EQ(1u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(2u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupCrossTalk) {
+ // Accept:
+ // A 0****....
+ // B ....1****
+ constexpr std::size_t expected_duration = kDefaultSampleRate * 0.9;
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0},
+ {"B", "t500", -100, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(2u, multiend_call.speaker_names().size());
+ EXPECT_EQ(1u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(2u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupInvalidOrder) {
+ // Reject:
+ // A ..0****
+ // B .1****. The n-th turn cannot start before the (n-1)-th one.
+ const std::vector<Turn> timing = {
+ {"A", "t500", 200, 0},
+ {"B", "t500", -600, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupCrossTalkThree) {
+ // Accept:
+ // A 0****2****...
+ // B ...1*********
+ constexpr std::size_t expected_duration = kDefaultSampleRate * 1.3;
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0},
+ {"B", "t1000", -200, 0},
+ {"A", "t500", -800, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(2u, multiend_call.speaker_names().size());
+ EXPECT_EQ(2u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(3u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupSelfCrossTalkNearInvalid) {
+ // Reject:
+ // A 0****......
+ // A ...1****...
+ // B ......2****
+ // ^ Turn #1 overlaps with #0 which is from the same speaker.
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0},
+ {"A", "t500", -200, 0},
+ {"B", "t500", -200, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupSelfCrossTalkFarInvalid) {
+ // Reject:
+ // A 0*********
+ // B 1**.......
+ // C ...2**....
+ // A ......3**.
+ // ^ Turn #3 overlaps with #0 which is from the same speaker.
+ const std::vector<Turn> timing = {
+ {"A", "t1000", 0, 0},
+ {"B", "t300", -1000, 0},
+ {"C", "t300", 0, 0},
+ {"A", "t300", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupCrossTalkMiddleValid) {
+ // Accept:
+ // A 0*********..
+ // B ..1****.....
+ // C .......2****
+ constexpr std::size_t expected_duration = kDefaultSampleRate * 1.2;
+ const std::vector<Turn> timing = {
+ {"A", "t1000", 0, 0},
+ {"B", "t500", -800, 0},
+ {"C", "t500", 0, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(3u, multiend_call.speaker_names().size());
+ EXPECT_EQ(2u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(3u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupCrossTalkMiddleInvalid) {
+ // Reject:
+ // A 0*********
+ // B ..1****...
+ // C ....2****.
+ // ^ Turn #2 overlaps both with #0 and #1 (cross-talk with 3+ speakers
+ // not permitted).
+ const std::vector<Turn> timing = {
+ {"A", "t1000", 0, 0},
+ {"B", "t500", -800, 0},
+ {"C", "t500", -300, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupCrossTalkMiddleAndPause) {
+ // Accept:
+ // A 0*********..
+ // B .2****......
+ // C .......3****
+ constexpr std::size_t expected_duration = kDefaultSampleRate * 1.2;
+ const std::vector<Turn> timing = {
+ {"A", "t1000", 0, 0},
+ {"B", "t500", -900, 0},
+ {"C", "t500", 100, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(3u, multiend_call.speaker_names().size());
+ EXPECT_EQ(2u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(3u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupCrossTalkFullOverlapValid) {
+ // Accept:
+ // A 0****
+ // B 1****
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0},
+ {"B", "t500", -500, 0},
+ };
+ auto mock_wavreader_factory = CreateMockWavReaderFactory();
+
+ // There is one unique audio track to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(1);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(2u, multiend_call.speaker_names().size());
+ EXPECT_EQ(1u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(2u, multiend_call.speaking_turns().size());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupLongSequence) {
+ // Accept:
+ // A 0****....3****.5**.
+ // B .....1****...4**...
+ // C ......2**.......6**..
+ constexpr std::size_t expected_duration = kDefaultSampleRate * 1.9;
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0}, {"B", "t500", 0, 0}, {"C", "t300", -400, 0},
+ {"A", "t500", 0, 0}, {"B", "t300", -100, 0}, {"A", "t300", -100, 0},
+ {"C", "t300", -200, 0},
+ };
+ auto mock_wavreader_factory = std::unique_ptr<MockWavReaderFactory>(
+ new MockWavReaderFactory(kDefaultMockWavReaderFactoryParams,
+ kDefaultMockWavReaderFactoryParamsMap));
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_TRUE(multiend_call.valid());
+
+ // Test.
+ EXPECT_EQ(3u, multiend_call.speaker_names().size());
+ EXPECT_EQ(2u, multiend_call.audiotrack_readers().size());
+ EXPECT_EQ(7u, multiend_call.speaking_turns().size());
+ EXPECT_EQ(expected_duration, multiend_call.total_duration_samples());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallSetupLongSequenceInvalid) {
+ // Reject:
+ // A 0****....3****.6**
+ // B .....1****...4**..
+ // C ......2**.....5**..
+ // ^ Turns #4, #5 and #6 overlapping (cross-talk with 3+
+ // speakers not permitted).
+ const std::vector<Turn> timing = {
+ {"A", "t500", 0, 0}, {"B", "t500", 0, 0}, {"C", "t300", -400, 0},
+ {"A", "t500", 0, 0}, {"B", "t300", -100, 0}, {"A", "t300", -200, 0},
+ {"C", "t300", -200, 0},
+ };
+ auto mock_wavreader_factory = std::unique_ptr<MockWavReaderFactory>(
+ new MockWavReaderFactory(kDefaultMockWavReaderFactoryParams,
+ kDefaultMockWavReaderFactoryParamsMap));
+
+ // There are two unique audio tracks to read.
+ EXPECT_CALL(*mock_wavreader_factory, Create(_)).Times(2);
+
+ conversational_speech::MultiEndCall multiend_call(
+ timing, audiotracks_path, std::move(mock_wavreader_factory));
+ EXPECT_FALSE(multiend_call.valid());
+}
+
+TEST(ConversationalSpeechTest, MultiEndCallWavReaderAdaptorSine) {
+ // Parameters with which wav files are created.
+ constexpr int duration_seconds = 5;
+ const int sample_rates[] = {8000, 11025, 16000, 22050, 32000, 44100, 48000};
+
+ for (int sample_rate : sample_rates) {
+ const std::string temp_filename = OutputPath() + "TempSineWavFile_" +
+ std::to_string(sample_rate) + ".wav";
+
+ // Write wav file.
+ const std::size_t num_samples = duration_seconds * sample_rate;
+ MockWavReaderFactory::Params params = {sample_rate, 1u, num_samples};
+ CreateSineWavFile(temp_filename, params);
+
+ // Load wav file and check if params match.
+ WavReaderFactory wav_reader_factory;
+ MockWavReaderFactory::Params expeted_params = {sample_rate, 1u,
+ num_samples};
+ CheckAudioTrackParams(wav_reader_factory, temp_filename, expeted_params);
+
+ // Clean up.
+ RemoveFile(temp_filename);
+ }
+}
+
+TEST(ConversationalSpeechTest, DISABLED_MultiEndCallSimulator) {
+ // Simulated call (one character corresponding to 500 ms):
+ // A 0*********...........2*********.....
+ // B ...........1*********.....3*********
+ const std::vector<Turn> expected_timing = {
+ {"A", "t5000_440.wav", 0, 0},
+ {"B", "t5000_880.wav", 500, 0},
+ {"A", "t5000_440.wav", 0, 0},
+ {"B", "t5000_880.wav", -2500, 0},
+ };
+ const std::size_t expected_duration_seconds = 18;
+
+ // Create temporary audio track files.
+ const int sample_rate = 16000;
+ const std::map<std::string, SineAudioTrackParams> sine_tracks_params = {
+ {"t5000_440.wav", {{sample_rate, 1u, sample_rate * 5}, 440.0}},
+ {"t5000_880.wav", {{sample_rate, 1u, sample_rate * 5}, 880.0}},
+ };
+ const std::string audiotracks_path =
+ CreateTemporarySineAudioTracks(sine_tracks_params);
+
+ // Set up the multi-end call.
+ auto wavreader_factory =
+ std::unique_ptr<WavReaderFactory>(new WavReaderFactory());
+ MultiEndCall multiend_call(expected_timing, audiotracks_path,
+ std::move(wavreader_factory));
+
+ // Simulate the call.
+ std::string output_path = JoinFilename(audiotracks_path, "output");
+ CreateDir(output_path);
+ RTC_LOG(LS_VERBOSE) << "simulator output path: " << output_path;
+ auto generated_audiotrak_pairs =
+ conversational_speech::Simulate(multiend_call, output_path);
+ EXPECT_EQ(2u, generated_audiotrak_pairs->size());
+
+ // Check the output.
+ WavReaderFactory wav_reader_factory;
+ const MockWavReaderFactory::Params expeted_params = {
+ sample_rate, 1u, sample_rate * expected_duration_seconds};
+ for (const auto& it : *generated_audiotrak_pairs) {
+ RTC_LOG(LS_VERBOSE) << "checking far/near-end for <" << it.first << ">";
+ CheckAudioTrackParams(wav_reader_factory, it.second.near_end,
+ expeted_params);
+ CheckAudioTrackParams(wav_reader_factory, it.second.far_end,
+ expeted_params);
+ }
+
+ // Clean.
+ EXPECT_NO_FATAL_FAILURE(DeleteFolderAndContents(audiotracks_path));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.cc
new file mode 100644
index 0000000000..1263e938c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/mock_wavreader.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+using ::testing::Return;
+
+MockWavReader::MockWavReader(int sample_rate,
+ size_t num_channels,
+ size_t num_samples)
+ : sample_rate_(sample_rate),
+ num_channels_(num_channels),
+ num_samples_(num_samples) {
+ ON_CALL(*this, SampleRate()).WillByDefault(Return(sample_rate_));
+ ON_CALL(*this, NumChannels()).WillByDefault(Return(num_channels_));
+ ON_CALL(*this, NumSamples()).WillByDefault(Return(num_samples_));
+}
+
+MockWavReader::~MockWavReader() = default;
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.h
new file mode 100644
index 0000000000..94e20b9ec6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MOCK_WAVREADER_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MOCK_WAVREADER_H_
+
+#include <cstddef>
+#include <string>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_interface.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+class MockWavReader : public WavReaderInterface {
+ public:
+ MockWavReader(int sample_rate, size_t num_channels, size_t num_samples);
+ ~MockWavReader();
+
+ // TODO(alessiob): use ON_CALL to return random samples if needed.
+ MOCK_METHOD(size_t, ReadFloatSamples, (rtc::ArrayView<float>), (override));
+ MOCK_METHOD(size_t, ReadInt16Samples, (rtc::ArrayView<int16_t>), (override));
+
+ MOCK_METHOD(int, SampleRate, (), (const, override));
+ MOCK_METHOD(size_t, NumChannels, (), (const, override));
+ MOCK_METHOD(size_t, NumSamples, (), (const, override));
+
+ private:
+ const int sample_rate_;
+ const size_t num_channels_;
+ const size_t num_samples_;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MOCK_WAVREADER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.cc
new file mode 100644
index 0000000000..a377cce7e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/mock_wavreader_factory.h"
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/conversational_speech/mock_wavreader.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+using ::testing::_;
+using ::testing::Invoke;
+
+MockWavReaderFactory::MockWavReaderFactory(
+ const Params& default_params,
+ const std::map<std::string, const Params>& params)
+ : default_params_(default_params), audiotrack_names_params_(params) {
+ ON_CALL(*this, Create(_))
+ .WillByDefault(Invoke(this, &MockWavReaderFactory::CreateMock));
+}
+
+MockWavReaderFactory::MockWavReaderFactory(const Params& default_params)
+ : MockWavReaderFactory(default_params,
+ std::map<std::string, const Params>{}) {}
+
+MockWavReaderFactory::~MockWavReaderFactory() = default;
+
+std::unique_ptr<WavReaderInterface> MockWavReaderFactory::CreateMock(
+ absl::string_view filepath) {
+ // Search the parameters corresponding to filepath.
+ size_t delimiter = filepath.find_last_of("/\\"); // Either windows or posix
+ std::string filename(filepath.substr(
+ delimiter == absl::string_view::npos ? 0 : delimiter + 1));
+ const auto it = audiotrack_names_params_.find(filename);
+
+ // If not found, use default parameters.
+ if (it == audiotrack_names_params_.end()) {
+ RTC_LOG(LS_VERBOSE) << "using default parameters for " << filepath;
+ return std::unique_ptr<WavReaderInterface>(new MockWavReader(
+ default_params_.sample_rate, default_params_.num_channels,
+ default_params_.num_samples));
+ }
+
+ // Found, use the audiotrack-specific parameters.
+ RTC_LOG(LS_VERBOSE) << "using ad-hoc parameters for " << filepath;
+ RTC_LOG(LS_VERBOSE) << "sample_rate " << it->second.sample_rate;
+ RTC_LOG(LS_VERBOSE) << "num_channels " << it->second.num_channels;
+ RTC_LOG(LS_VERBOSE) << "num_samples " << it->second.num_samples;
+ return std::unique_ptr<WavReaderInterface>(new MockWavReader(
+ it->second.sample_rate, it->second.num_channels, it->second.num_samples));
+}
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.h
new file mode 100644
index 0000000000..bcc7f3069b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/mock_wavreader_factory.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MOCK_WAVREADER_FACTORY_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MOCK_WAVREADER_FACTORY_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_interface.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+class MockWavReaderFactory : public WavReaderAbstractFactory {
+ public:
+ struct Params {
+ int sample_rate;
+ size_t num_channels;
+ size_t num_samples;
+ };
+
+ MockWavReaderFactory(const Params& default_params,
+ const std::map<std::string, const Params>& params);
+ explicit MockWavReaderFactory(const Params& default_params);
+ ~MockWavReaderFactory();
+
+ MOCK_METHOD(std::unique_ptr<WavReaderInterface>,
+ Create,
+ (absl::string_view),
+ (const, override));
+
+ private:
+ // Creates a MockWavReader instance using the parameters in
+ // audiotrack_names_params_ if the entry corresponding to filepath exists,
+ // otherwise creates a MockWavReader instance using the default parameters.
+ std::unique_ptr<WavReaderInterface> CreateMock(absl::string_view filepath);
+
+ const Params& default_params_;
+ std::map<std::string, const Params> audiotrack_names_params_;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MOCK_WAVREADER_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.cc
new file mode 100644
index 0000000000..952114a78b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.cc
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/multiend_call.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/logging.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+MultiEndCall::MultiEndCall(
+ rtc::ArrayView<const Turn> timing,
+ absl::string_view audiotracks_path,
+ std::unique_ptr<WavReaderAbstractFactory> wavreader_abstract_factory)
+ : timing_(timing),
+ audiotracks_path_(audiotracks_path),
+ wavreader_abstract_factory_(std::move(wavreader_abstract_factory)),
+ valid_(false) {
+ FindSpeakerNames();
+ if (CreateAudioTrackReaders())
+ valid_ = CheckTiming();
+}
+
+MultiEndCall::~MultiEndCall() = default;
+
+void MultiEndCall::FindSpeakerNames() {
+ RTC_DCHECK(speaker_names_.empty());
+ for (const Turn& turn : timing_) {
+ speaker_names_.emplace(turn.speaker_name);
+ }
+}
+
+bool MultiEndCall::CreateAudioTrackReaders() {
+ RTC_DCHECK(audiotrack_readers_.empty());
+ sample_rate_hz_ = 0; // Sample rate will be set when reading the first track.
+ for (const Turn& turn : timing_) {
+ auto it = audiotrack_readers_.find(turn.audiotrack_file_name);
+ if (it != audiotrack_readers_.end())
+ continue;
+
+ const std::string audiotrack_file_path =
+ test::JoinFilename(audiotracks_path_, turn.audiotrack_file_name);
+
+ // Map the audiotrack file name to a new instance of WavReaderInterface.
+ std::unique_ptr<WavReaderInterface> wavreader =
+ wavreader_abstract_factory_->Create(
+ test::JoinFilename(audiotracks_path_, turn.audiotrack_file_name));
+
+ if (sample_rate_hz_ == 0) {
+ sample_rate_hz_ = wavreader->SampleRate();
+ } else if (sample_rate_hz_ != wavreader->SampleRate()) {
+ RTC_LOG(LS_ERROR)
+ << "All the audio tracks should have the same sample rate.";
+ return false;
+ }
+
+ if (wavreader->NumChannels() != 1) {
+ RTC_LOG(LS_ERROR) << "Only mono audio tracks supported.";
+ return false;
+ }
+
+ audiotrack_readers_.emplace(turn.audiotrack_file_name,
+ std::move(wavreader));
+ }
+
+ return true;
+}
+
+bool MultiEndCall::CheckTiming() {
+ struct Interval {
+ size_t begin;
+ size_t end;
+ };
+ size_t number_of_turns = timing_.size();
+ auto millisecond_to_samples = [](int ms, int sr) -> int {
+ // Truncation may happen if the sampling rate is not an integer multiple
+ // of 1000 (e.g., 44100).
+ return ms * sr / 1000;
+ };
+ auto in_interval = [](size_t value, const Interval& interval) {
+ return interval.begin <= value && value < interval.end;
+ };
+ total_duration_samples_ = 0;
+ speaking_turns_.clear();
+
+ // Begin and end timestamps for the last two turns (unit: number of samples).
+ Interval second_last_turn = {0, 0};
+ Interval last_turn = {0, 0};
+
+ // Initialize map to store speaking turn indices of each speaker (used to
+ // detect self cross-talk).
+ std::map<std::string, std::vector<size_t>> speaking_turn_indices;
+ for (const std::string& speaker_name : speaker_names_) {
+ speaking_turn_indices.emplace(std::piecewise_construct,
+ std::forward_as_tuple(speaker_name),
+ std::forward_as_tuple());
+ }
+
+ // Parse turns.
+ for (size_t turn_index = 0; turn_index < number_of_turns; ++turn_index) {
+ const Turn& turn = timing_[turn_index];
+ auto it = audiotrack_readers_.find(turn.audiotrack_file_name);
+ RTC_CHECK(it != audiotrack_readers_.end())
+ << "Audio track reader not created";
+
+ // Begin and end timestamps for the current turn.
+ int offset_samples =
+ millisecond_to_samples(turn.offset, it->second->SampleRate());
+ std::size_t begin_timestamp = last_turn.end + offset_samples;
+ std::size_t end_timestamp = begin_timestamp + it->second->NumSamples();
+ RTC_LOG(LS_INFO) << "turn #" << turn_index << " " << begin_timestamp << "-"
+ << end_timestamp << " ms";
+
+ // The order is invalid if the offset is negative and its absolute value is
+ // larger then the duration of the previous turn.
+ if (offset_samples < 0 &&
+ -offset_samples > static_cast<int>(last_turn.end - last_turn.begin)) {
+ RTC_LOG(LS_ERROR) << "invalid order";
+ return false;
+ }
+
+ // Cross-talk with 3 or more speakers occurs when the beginning of the
+ // current interval falls in the last two turns.
+ if (turn_index > 1 && in_interval(begin_timestamp, last_turn) &&
+ in_interval(begin_timestamp, second_last_turn)) {
+ RTC_LOG(LS_ERROR) << "cross-talk with 3+ speakers";
+ return false;
+ }
+
+ // Append turn.
+ speaking_turns_.emplace_back(turn.speaker_name, turn.audiotrack_file_name,
+ begin_timestamp, end_timestamp, turn.gain);
+
+ // Save speaking turn index for self cross-talk detection.
+ RTC_DCHECK_EQ(speaking_turns_.size(), turn_index + 1);
+ speaking_turn_indices[turn.speaker_name].push_back(turn_index);
+
+ // Update total duration of the consversational speech.
+ if (total_duration_samples_ < end_timestamp)
+ total_duration_samples_ = end_timestamp;
+
+ // Update and continue with next turn.
+ second_last_turn = last_turn;
+ last_turn.begin = begin_timestamp;
+ last_turn.end = end_timestamp;
+ }
+
+ // Detect self cross-talk.
+ for (const std::string& speaker_name : speaker_names_) {
+ RTC_LOG(LS_INFO) << "checking self cross-talk for <" << speaker_name << ">";
+
+ // Copy all turns for this speaker to new vector.
+ std::vector<SpeakingTurn> speaking_turns_for_name;
+ std::copy_if(speaking_turns_.begin(), speaking_turns_.end(),
+ std::back_inserter(speaking_turns_for_name),
+ [&speaker_name](const SpeakingTurn& st) {
+ return st.speaker_name == speaker_name;
+ });
+
+ // Check for overlap between adjacent elements.
+ // This is a sufficient condition for self cross-talk since the intervals
+ // are sorted by begin timestamp.
+ auto overlap = std::adjacent_find(
+ speaking_turns_for_name.begin(), speaking_turns_for_name.end(),
+ [](const SpeakingTurn& a, const SpeakingTurn& b) {
+ return a.end > b.begin;
+ });
+
+ if (overlap != speaking_turns_for_name.end()) {
+ RTC_LOG(LS_ERROR) << "Self cross-talk detected";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.h
new file mode 100644
index 0000000000..63283465fa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/multiend_call.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MULTIEND_CALL_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MULTIEND_CALL_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "modules/audio_processing/test/conversational_speech/timing.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_interface.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+class MultiEndCall {
+ public:
+ struct SpeakingTurn {
+ // Constructor required in order to use std::vector::emplace_back().
+ SpeakingTurn(absl::string_view new_speaker_name,
+ absl::string_view new_audiotrack_file_name,
+ size_t new_begin,
+ size_t new_end,
+ int gain)
+ : speaker_name(new_speaker_name),
+ audiotrack_file_name(new_audiotrack_file_name),
+ begin(new_begin),
+ end(new_end),
+ gain(gain) {}
+ std::string speaker_name;
+ std::string audiotrack_file_name;
+ size_t begin;
+ size_t end;
+ int gain;
+ };
+
+ MultiEndCall(
+ rtc::ArrayView<const Turn> timing,
+ absl::string_view audiotracks_path,
+ std::unique_ptr<WavReaderAbstractFactory> wavreader_abstract_factory);
+ ~MultiEndCall();
+
+ MultiEndCall(const MultiEndCall&) = delete;
+ MultiEndCall& operator=(const MultiEndCall&) = delete;
+
+ const std::set<std::string>& speaker_names() const { return speaker_names_; }
+ const std::map<std::string, std::unique_ptr<WavReaderInterface>>&
+ audiotrack_readers() const {
+ return audiotrack_readers_;
+ }
+ bool valid() const { return valid_; }
+ int sample_rate() const { return sample_rate_hz_; }
+ size_t total_duration_samples() const { return total_duration_samples_; }
+ const std::vector<SpeakingTurn>& speaking_turns() const {
+ return speaking_turns_;
+ }
+
+ private:
+ // Finds unique speaker names.
+ void FindSpeakerNames();
+
+ // Creates one WavReader instance for each unique audiotrack. It returns false
+ // if the audio tracks do not have the same sample rate or if they are not
+ // mono.
+ bool CreateAudioTrackReaders();
+
+ // Validates the speaking turns timing information. Accepts cross-talk, but
+ // only up to 2 speakers. Rejects unordered turns and self cross-talk.
+ bool CheckTiming();
+
+ rtc::ArrayView<const Turn> timing_;
+ std::string audiotracks_path_;
+ std::unique_ptr<WavReaderAbstractFactory> wavreader_abstract_factory_;
+ std::set<std::string> speaker_names_;
+ std::map<std::string, std::unique_ptr<WavReaderInterface>>
+ audiotrack_readers_;
+ bool valid_;
+ int sample_rate_hz_;
+ size_t total_duration_samples_;
+ std::vector<SpeakingTurn> speaking_turns_;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_MULTIEND_CALL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.cc
new file mode 100644
index 0000000000..89bcd48d84
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.cc
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/simulator.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_interface.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using conversational_speech::MultiEndCall;
+using conversational_speech::SpeakerOutputFilePaths;
+using conversational_speech::WavReaderInterface;
+
+// Combines output path and speaker names to define the output file paths for
+// the near-end and far=end audio tracks.
+std::unique_ptr<std::map<std::string, SpeakerOutputFilePaths>>
+InitSpeakerOutputFilePaths(const std::set<std::string>& speaker_names,
+ absl::string_view output_path) {
+ // Create map.
+ auto speaker_output_file_paths_map =
+ std::make_unique<std::map<std::string, SpeakerOutputFilePaths>>();
+
+ // Add near-end and far-end output paths into the map.
+ for (const auto& speaker_name : speaker_names) {
+ const std::string near_end_path =
+ test::JoinFilename(output_path, "s_" + speaker_name + "-near_end.wav");
+ RTC_LOG(LS_VERBOSE) << "The near-end audio track will be created in "
+ << near_end_path << ".";
+
+ const std::string far_end_path =
+ test::JoinFilename(output_path, "s_" + speaker_name + "-far_end.wav");
+ RTC_LOG(LS_VERBOSE) << "The far-end audio track will be created in "
+ << far_end_path << ".";
+
+ // Add to map.
+ speaker_output_file_paths_map->emplace(
+ std::piecewise_construct, std::forward_as_tuple(speaker_name),
+ std::forward_as_tuple(near_end_path, far_end_path));
+ }
+
+ return speaker_output_file_paths_map;
+}
+
+// Class that provides one WavWriter for the near-end and one for the far-end
+// output track of a speaker.
+class SpeakerWavWriters {
+ public:
+ SpeakerWavWriters(const SpeakerOutputFilePaths& output_file_paths,
+ int sample_rate)
+ : near_end_wav_writer_(output_file_paths.near_end, sample_rate, 1u),
+ far_end_wav_writer_(output_file_paths.far_end, sample_rate, 1u) {}
+ WavWriter* near_end_wav_writer() { return &near_end_wav_writer_; }
+ WavWriter* far_end_wav_writer() { return &far_end_wav_writer_; }
+
+ private:
+ WavWriter near_end_wav_writer_;
+ WavWriter far_end_wav_writer_;
+};
+
+// Initializes one WavWriter instance for each speaker and both the near-end and
+// far-end output tracks.
+std::unique_ptr<std::map<std::string, SpeakerWavWriters>>
+InitSpeakersWavWriters(const std::map<std::string, SpeakerOutputFilePaths>&
+ speaker_output_file_paths,
+ int sample_rate) {
+ // Create map.
+ auto speaker_wav_writers_map =
+ std::make_unique<std::map<std::string, SpeakerWavWriters>>();
+
+ // Add SpeakerWavWriters instance into the map.
+ for (auto it = speaker_output_file_paths.begin();
+ it != speaker_output_file_paths.end(); ++it) {
+ speaker_wav_writers_map->emplace(
+ std::piecewise_construct, std::forward_as_tuple(it->first),
+ std::forward_as_tuple(it->second, sample_rate));
+ }
+
+ return speaker_wav_writers_map;
+}
+
+// Reads all the samples for each audio track.
+std::unique_ptr<std::map<std::string, std::vector<int16_t>>> PreloadAudioTracks(
+ const std::map<std::string, std::unique_ptr<WavReaderInterface>>&
+ audiotrack_readers) {
+ // Create map.
+ auto audiotracks_map =
+ std::make_unique<std::map<std::string, std::vector<int16_t>>>();
+
+ // Add audio track vectors.
+ for (auto it = audiotrack_readers.begin(); it != audiotrack_readers.end();
+ ++it) {
+ // Add map entry.
+ audiotracks_map->emplace(std::piecewise_construct,
+ std::forward_as_tuple(it->first),
+ std::forward_as_tuple(it->second->NumSamples()));
+
+ // Read samples.
+ it->second->ReadInt16Samples(audiotracks_map->at(it->first));
+ }
+
+ return audiotracks_map;
+}
+
+// Writes all the values in `source_samples` via `wav_writer`. If the number of
+// previously written samples in `wav_writer` is less than `interval_begin`, it
+// adds zeros as left padding. The padding corresponds to intervals during which
+// a speaker is not active.
+void PadLeftWriteChunk(rtc::ArrayView<const int16_t> source_samples,
+ size_t interval_begin,
+ WavWriter* wav_writer) {
+ // Add left padding.
+ RTC_CHECK(wav_writer);
+ RTC_CHECK_GE(interval_begin, wav_writer->num_samples());
+ size_t padding_size = interval_begin - wav_writer->num_samples();
+ if (padding_size != 0) {
+ const std::vector<int16_t> padding(padding_size, 0);
+ wav_writer->WriteSamples(padding.data(), padding_size);
+ }
+
+ // Write source samples.
+ wav_writer->WriteSamples(source_samples.data(), source_samples.size());
+}
+
+// Appends zeros via `wav_writer`. The number of zeros is always non-negative
+// and equal to the difference between the previously written samples and
+// `pad_samples`.
+void PadRightWrite(WavWriter* wav_writer, size_t pad_samples) {
+ RTC_CHECK(wav_writer);
+ RTC_CHECK_GE(pad_samples, wav_writer->num_samples());
+ size_t padding_size = pad_samples - wav_writer->num_samples();
+ if (padding_size != 0) {
+ const std::vector<int16_t> padding(padding_size, 0);
+ wav_writer->WriteSamples(padding.data(), padding_size);
+ }
+}
+
+void ScaleSignal(rtc::ArrayView<const int16_t> source_samples,
+ int gain,
+ rtc::ArrayView<int16_t> output_samples) {
+ const float gain_linear = DbToRatio(gain);
+ RTC_DCHECK_EQ(source_samples.size(), output_samples.size());
+ std::transform(source_samples.begin(), source_samples.end(),
+ output_samples.begin(), [gain_linear](int16_t x) -> int16_t {
+ return rtc::saturated_cast<int16_t>(x * gain_linear);
+ });
+}
+
+} // namespace
+
+namespace conversational_speech {
+
+std::unique_ptr<std::map<std::string, SpeakerOutputFilePaths>> Simulate(
+ const MultiEndCall& multiend_call,
+ absl::string_view output_path) {
+ // Set output file paths and initialize wav writers.
+ const auto& speaker_names = multiend_call.speaker_names();
+ auto speaker_output_file_paths =
+ InitSpeakerOutputFilePaths(speaker_names, output_path);
+ auto speakers_wav_writers = InitSpeakersWavWriters(
+ *speaker_output_file_paths, multiend_call.sample_rate());
+
+ // Preload all the input audio tracks.
+ const auto& audiotrack_readers = multiend_call.audiotrack_readers();
+ auto audiotracks = PreloadAudioTracks(audiotrack_readers);
+
+ // TODO(alessiob): When speaker_names.size() == 2, near-end and far-end
+ // across the 2 speakers are symmetric; hence, the code below could be
+ // replaced by only creating the near-end or the far-end. However, this would
+ // require to split the unit tests and document the behavior in README.md.
+ // In practice, it should not be an issue since the files are not expected to
+ // be signinificant.
+
+ // Write near-end and far-end output tracks.
+ for (const auto& speaking_turn : multiend_call.speaking_turns()) {
+ const std::string& active_speaker_name = speaking_turn.speaker_name;
+ const auto source_audiotrack =
+ audiotracks->at(speaking_turn.audiotrack_file_name);
+ std::vector<int16_t> scaled_audiotrack(source_audiotrack.size());
+ ScaleSignal(source_audiotrack, speaking_turn.gain, scaled_audiotrack);
+
+ // Write active speaker's chunk to active speaker's near-end.
+ PadLeftWriteChunk(
+ scaled_audiotrack, speaking_turn.begin,
+ speakers_wav_writers->at(active_speaker_name).near_end_wav_writer());
+
+ // Write active speaker's chunk to other participants' far-ends.
+ for (const std::string& speaker_name : speaker_names) {
+ if (speaker_name == active_speaker_name)
+ continue;
+ PadLeftWriteChunk(
+ scaled_audiotrack, speaking_turn.begin,
+ speakers_wav_writers->at(speaker_name).far_end_wav_writer());
+ }
+ }
+
+ // Finalize all the output tracks with right padding.
+ // This is required to make all the output tracks duration equal.
+ size_t duration_samples = multiend_call.total_duration_samples();
+ for (const std::string& speaker_name : speaker_names) {
+ PadRightWrite(speakers_wav_writers->at(speaker_name).near_end_wav_writer(),
+ duration_samples);
+ PadRightWrite(speakers_wav_writers->at(speaker_name).far_end_wav_writer(),
+ duration_samples);
+ }
+
+ return speaker_output_file_paths;
+}
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.h
new file mode 100644
index 0000000000..2f311e16b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/simulator.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_SIMULATOR_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_SIMULATOR_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/conversational_speech/multiend_call.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+struct SpeakerOutputFilePaths {
+ SpeakerOutputFilePaths(absl::string_view new_near_end,
+ absl::string_view new_far_end)
+ : near_end(new_near_end), far_end(new_far_end) {}
+ // Paths to the near-end and far-end audio track files.
+ const std::string near_end;
+ const std::string far_end;
+};
+
+// Generates the near-end and far-end audio track pairs for each speaker.
+std::unique_ptr<std::map<std::string, SpeakerOutputFilePaths>> Simulate(
+ const MultiEndCall& multiend_call,
+ absl::string_view output_path);
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_SIMULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.cc
new file mode 100644
index 0000000000..95ec9f542e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/timing.h"
+
+#include <fstream>
+#include <iostream>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/string_encode.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+bool Turn::operator==(const Turn& b) const {
+ return b.speaker_name == speaker_name &&
+ b.audiotrack_file_name == audiotrack_file_name && b.offset == offset &&
+ b.gain == gain;
+}
+
+std::vector<Turn> LoadTiming(absl::string_view timing_filepath) {
+ // Line parser.
+ auto parse_line = [](absl::string_view line) {
+ std::vector<absl::string_view> fields = rtc::split(line, ' ');
+ RTC_CHECK_GE(fields.size(), 3);
+ RTC_CHECK_LE(fields.size(), 4);
+ int gain = 0;
+ if (fields.size() == 4) {
+ gain = rtc::StringToNumber<int>(fields[3]).value_or(0);
+ }
+ return Turn(fields[0], fields[1],
+ rtc::StringToNumber<int>(fields[2]).value_or(0), gain);
+ };
+
+ // Init.
+ std::vector<Turn> timing;
+
+ // Parse lines.
+ std::string line;
+ std::ifstream infile(std::string{timing_filepath});
+ while (std::getline(infile, line)) {
+ if (line.empty())
+ continue;
+ timing.push_back(parse_line(line));
+ }
+ infile.close();
+
+ return timing;
+}
+
+void SaveTiming(absl::string_view timing_filepath,
+ rtc::ArrayView<const Turn> timing) {
+ std::ofstream outfile(std::string{timing_filepath});
+ RTC_CHECK(outfile.is_open());
+ for (const Turn& turn : timing) {
+ outfile << turn.speaker_name << " " << turn.audiotrack_file_name << " "
+ << turn.offset << " " << turn.gain << std::endl;
+ }
+ outfile.close();
+}
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.h
new file mode 100644
index 0000000000..9314f6fc43
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/timing.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_TIMING_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_TIMING_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+struct Turn {
+ Turn(absl::string_view new_speaker_name,
+ absl::string_view new_audiotrack_file_name,
+ int new_offset,
+ int gain)
+ : speaker_name(new_speaker_name),
+ audiotrack_file_name(new_audiotrack_file_name),
+ offset(new_offset),
+ gain(gain) {}
+ bool operator==(const Turn& b) const;
+ std::string speaker_name;
+ std::string audiotrack_file_name;
+ int offset;
+ int gain;
+};
+
+// Loads a list of turns from a file.
+std::vector<Turn> LoadTiming(absl::string_view timing_filepath);
+
+// Writes a list of turns into a file.
+void SaveTiming(absl::string_view timing_filepath,
+ rtc::ArrayView<const Turn> timing);
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_TIMING_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h
new file mode 100644
index 0000000000..14ddfc7539
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_ABSTRACT_FACTORY_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_ABSTRACT_FACTORY_H_
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_interface.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+class WavReaderAbstractFactory {
+ public:
+ virtual ~WavReaderAbstractFactory() = default;
+ virtual std::unique_ptr<WavReaderInterface> Create(
+ absl::string_view filepath) const = 0;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_ABSTRACT_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.cc b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.cc
new file mode 100644
index 0000000000..99b1686484
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/conversational_speech/wavreader_factory.h"
+
+#include <cstddef>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "common_audio/wav_file.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using conversational_speech::WavReaderInterface;
+
+class WavReaderAdaptor final : public WavReaderInterface {
+ public:
+ explicit WavReaderAdaptor(absl::string_view filepath)
+ : wav_reader_(filepath) {}
+ ~WavReaderAdaptor() override = default;
+
+ size_t ReadFloatSamples(rtc::ArrayView<float> samples) override {
+ return wav_reader_.ReadSamples(samples.size(), samples.begin());
+ }
+
+ size_t ReadInt16Samples(rtc::ArrayView<int16_t> samples) override {
+ return wav_reader_.ReadSamples(samples.size(), samples.begin());
+ }
+
+ int SampleRate() const override { return wav_reader_.sample_rate(); }
+
+ size_t NumChannels() const override { return wav_reader_.num_channels(); }
+
+ size_t NumSamples() const override { return wav_reader_.num_samples(); }
+
+ private:
+ WavReader wav_reader_;
+};
+
+} // namespace
+
+namespace conversational_speech {
+
+WavReaderFactory::WavReaderFactory() = default;
+
+WavReaderFactory::~WavReaderFactory() = default;
+
+std::unique_ptr<WavReaderInterface> WavReaderFactory::Create(
+ absl::string_view filepath) const {
+ return std::unique_ptr<WavReaderAdaptor>(new WavReaderAdaptor(filepath));
+}
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.h
new file mode 100644
index 0000000000..f2e5b61055
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_factory.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_FACTORY_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_FACTORY_H_
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h"
+#include "modules/audio_processing/test/conversational_speech/wavreader_interface.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+class WavReaderFactory : public WavReaderAbstractFactory {
+ public:
+ WavReaderFactory();
+ ~WavReaderFactory() override;
+ std::unique_ptr<WavReaderInterface> Create(
+ absl::string_view filepath) const override;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_interface.h b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_interface.h
new file mode 100644
index 0000000000..c74f639461
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/conversational_speech/wavreader_interface.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_INTERFACE_H_
+#define MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_INTERFACE_H_
+
+#include <stddef.h>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+namespace test {
+namespace conversational_speech {
+
+class WavReaderInterface {
+ public:
+ virtual ~WavReaderInterface() = default;
+
+ // Returns the number of samples read.
+ virtual size_t ReadFloatSamples(rtc::ArrayView<float> samples) = 0;
+ virtual size_t ReadInt16Samples(rtc::ArrayView<int16_t> samples) = 0;
+
+ // Getters.
+ virtual int SampleRate() const = 0;
+ virtual size_t NumChannels() const = 0;
+ virtual size_t NumSamples() const = 0;
+};
+
+} // namespace conversational_speech
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_CONVERSATIONAL_SPEECH_WAVREADER_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.cc b/third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.cc
new file mode 100644
index 0000000000..2419313e9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.cc
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/debug_dump_replayer.h"
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+#include "modules/audio_processing/test/protobuf_utils.h"
+#include "modules/audio_processing/test/runtime_setting_util.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+void MaybeResetBuffer(std::unique_ptr<ChannelBuffer<float>>* buffer,
+ const StreamConfig& config) {
+ auto& buffer_ref = *buffer;
+ if (!buffer_ref.get() || buffer_ref->num_frames() != config.num_frames() ||
+ buffer_ref->num_channels() != config.num_channels()) {
+ buffer_ref.reset(
+ new ChannelBuffer<float>(config.num_frames(), config.num_channels()));
+ }
+}
+
+} // namespace
+
+DebugDumpReplayer::DebugDumpReplayer()
+ : input_(nullptr), // will be created upon usage.
+ reverse_(nullptr),
+ output_(nullptr),
+ apm_(nullptr),
+ debug_file_(nullptr) {}
+
+DebugDumpReplayer::~DebugDumpReplayer() {
+ if (debug_file_)
+ fclose(debug_file_);
+}
+
+bool DebugDumpReplayer::SetDumpFile(absl::string_view filename) {
+ debug_file_ = fopen(std::string(filename).c_str(), "rb");
+ LoadNextMessage();
+ return debug_file_;
+}
+
+// Get next event that has not run.
+absl::optional<audioproc::Event> DebugDumpReplayer::GetNextEvent() const {
+ if (!has_next_event_)
+ return absl::nullopt;
+ else
+ return next_event_;
+}
+
+// Run the next event. Returns the event type.
+bool DebugDumpReplayer::RunNextEvent() {
+ if (!has_next_event_)
+ return false;
+ switch (next_event_.type()) {
+ case audioproc::Event::INIT:
+ OnInitEvent(next_event_.init());
+ break;
+ case audioproc::Event::STREAM:
+ OnStreamEvent(next_event_.stream());
+ break;
+ case audioproc::Event::REVERSE_STREAM:
+ OnReverseStreamEvent(next_event_.reverse_stream());
+ break;
+ case audioproc::Event::CONFIG:
+ OnConfigEvent(next_event_.config());
+ break;
+ case audioproc::Event::RUNTIME_SETTING:
+ OnRuntimeSettingEvent(next_event_.runtime_setting());
+ break;
+ case audioproc::Event::UNKNOWN_EVENT:
+ // We do not expect to receive UNKNOWN event.
+ RTC_CHECK_NOTREACHED();
+ }
+ LoadNextMessage();
+ return true;
+}
+
+const ChannelBuffer<float>* DebugDumpReplayer::GetOutput() const {
+ return output_.get();
+}
+
+StreamConfig DebugDumpReplayer::GetOutputConfig() const {
+ return output_config_;
+}
+
+// OnInitEvent reset the input/output/reserve channel format.
+void DebugDumpReplayer::OnInitEvent(const audioproc::Init& msg) {
+ RTC_CHECK(msg.has_num_input_channels());
+ RTC_CHECK(msg.has_output_sample_rate());
+ RTC_CHECK(msg.has_num_output_channels());
+ RTC_CHECK(msg.has_reverse_sample_rate());
+ RTC_CHECK(msg.has_num_reverse_channels());
+
+ input_config_ = StreamConfig(msg.sample_rate(), msg.num_input_channels());
+ output_config_ =
+ StreamConfig(msg.output_sample_rate(), msg.num_output_channels());
+ reverse_config_ =
+ StreamConfig(msg.reverse_sample_rate(), msg.num_reverse_channels());
+
+ MaybeResetBuffer(&input_, input_config_);
+ MaybeResetBuffer(&output_, output_config_);
+ MaybeResetBuffer(&reverse_, reverse_config_);
+}
+
+// OnStreamEvent replays an input signal and verifies the output.
+void DebugDumpReplayer::OnStreamEvent(const audioproc::Stream& msg) {
+ // APM should have been created.
+ RTC_CHECK(apm_.get());
+
+ apm_->set_stream_analog_level(msg.level());
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ apm_->set_stream_delay_ms(msg.delay()));
+
+ if (msg.has_keypress()) {
+ apm_->set_stream_key_pressed(msg.keypress());
+ } else {
+ apm_->set_stream_key_pressed(true);
+ }
+
+ RTC_CHECK_EQ(input_config_.num_channels(),
+ static_cast<size_t>(msg.input_channel_size()));
+ RTC_CHECK_EQ(input_config_.num_frames() * sizeof(float),
+ msg.input_channel(0).size());
+
+ for (int i = 0; i < msg.input_channel_size(); ++i) {
+ memcpy(input_->channels()[i], msg.input_channel(i).data(),
+ msg.input_channel(i).size());
+ }
+
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ apm_->ProcessStream(input_->channels(), input_config_,
+ output_config_, output_->channels()));
+}
+
+void DebugDumpReplayer::OnReverseStreamEvent(
+ const audioproc::ReverseStream& msg) {
+ // APM should have been created.
+ RTC_CHECK(apm_.get());
+
+ RTC_CHECK_GT(msg.channel_size(), 0);
+ RTC_CHECK_EQ(reverse_config_.num_channels(),
+ static_cast<size_t>(msg.channel_size()));
+ RTC_CHECK_EQ(reverse_config_.num_frames() * sizeof(float),
+ msg.channel(0).size());
+
+ for (int i = 0; i < msg.channel_size(); ++i) {
+ memcpy(reverse_->channels()[i], msg.channel(i).data(),
+ msg.channel(i).size());
+ }
+
+ RTC_CHECK_EQ(
+ AudioProcessing::kNoError,
+ apm_->ProcessReverseStream(reverse_->channels(), reverse_config_,
+ reverse_config_, reverse_->channels()));
+}
+
+void DebugDumpReplayer::OnConfigEvent(const audioproc::Config& msg) {
+ MaybeRecreateApm(msg);
+ ConfigureApm(msg);
+}
+
+void DebugDumpReplayer::OnRuntimeSettingEvent(
+ const audioproc::RuntimeSetting& msg) {
+ RTC_CHECK(apm_.get());
+ ReplayRuntimeSetting(apm_.get(), msg);
+}
+
+void DebugDumpReplayer::MaybeRecreateApm(const audioproc::Config& msg) {
+ // These configurations cannot be changed on the fly.
+ RTC_CHECK(msg.has_aec_delay_agnostic_enabled());
+ RTC_CHECK(msg.has_aec_extended_filter_enabled());
+
+ // We only create APM once, since changes on these fields should not
+ // happen in current implementation.
+ if (!apm_.get()) {
+ apm_ = AudioProcessingBuilderForTesting().Create();
+ }
+}
+
+void DebugDumpReplayer::ConfigureApm(const audioproc::Config& msg) {
+ AudioProcessing::Config apm_config;
+
+ // AEC2/AECM configs.
+ RTC_CHECK(msg.has_aec_enabled());
+ RTC_CHECK(msg.has_aecm_enabled());
+ apm_config.echo_canceller.enabled = msg.aec_enabled() || msg.aecm_enabled();
+ apm_config.echo_canceller.mobile_mode = msg.aecm_enabled();
+
+ // HPF configs.
+ RTC_CHECK(msg.has_hpf_enabled());
+ apm_config.high_pass_filter.enabled = msg.hpf_enabled();
+
+ // Preamp configs.
+ RTC_CHECK(msg.has_pre_amplifier_enabled());
+ apm_config.pre_amplifier.enabled = msg.pre_amplifier_enabled();
+ apm_config.pre_amplifier.fixed_gain_factor =
+ msg.pre_amplifier_fixed_gain_factor();
+
+ // NS configs.
+ RTC_CHECK(msg.has_ns_enabled());
+ RTC_CHECK(msg.has_ns_level());
+ apm_config.noise_suppression.enabled = msg.ns_enabled();
+ apm_config.noise_suppression.level =
+ static_cast<AudioProcessing::Config::NoiseSuppression::Level>(
+ msg.ns_level());
+
+ // TS configs.
+ RTC_CHECK(msg.has_transient_suppression_enabled());
+ apm_config.transient_suppression.enabled =
+ msg.transient_suppression_enabled();
+
+ // AGC configs.
+ RTC_CHECK(msg.has_agc_enabled());
+ RTC_CHECK(msg.has_agc_mode());
+ RTC_CHECK(msg.has_agc_limiter_enabled());
+ apm_config.gain_controller1.enabled = msg.agc_enabled();
+ apm_config.gain_controller1.mode =
+ static_cast<AudioProcessing::Config::GainController1::Mode>(
+ msg.agc_mode());
+ apm_config.gain_controller1.enable_limiter = msg.agc_limiter_enabled();
+ RTC_CHECK(msg.has_noise_robust_agc_enabled());
+ apm_config.gain_controller1.analog_gain_controller.enabled =
+ msg.noise_robust_agc_enabled();
+
+ apm_->ApplyConfig(apm_config);
+}
+
+void DebugDumpReplayer::LoadNextMessage() {
+ has_next_event_ =
+ debug_file_ && ReadMessageFromFile(debug_file_, &next_event_);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.h b/third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.h
new file mode 100644
index 0000000000..be21c68663
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/debug_dump_replayer.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_DEBUG_DUMP_REPLAYER_H_
+#define MODULES_AUDIO_PROCESSING_TEST_DEBUG_DUMP_REPLAYER_H_
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/ignore_wundef.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#include "modules/audio_processing/debug.pb.h"
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+namespace test {
+
+class DebugDumpReplayer {
+ public:
+ DebugDumpReplayer();
+ ~DebugDumpReplayer();
+
+ // Set dump file
+ bool SetDumpFile(absl::string_view filename);
+
+ // Return next event.
+ absl::optional<audioproc::Event> GetNextEvent() const;
+
+ // Run the next event. Returns true if succeeded.
+ bool RunNextEvent();
+
+ const ChannelBuffer<float>* GetOutput() const;
+ StreamConfig GetOutputConfig() const;
+
+ private:
+ // Following functions are facilities for replaying debug dumps.
+ void OnInitEvent(const audioproc::Init& msg);
+ void OnStreamEvent(const audioproc::Stream& msg);
+ void OnReverseStreamEvent(const audioproc::ReverseStream& msg);
+ void OnConfigEvent(const audioproc::Config& msg);
+ void OnRuntimeSettingEvent(const audioproc::RuntimeSetting& msg);
+
+ void MaybeRecreateApm(const audioproc::Config& msg);
+ void ConfigureApm(const audioproc::Config& msg);
+
+ void LoadNextMessage();
+
+ // Buffer for APM input/output.
+ std::unique_ptr<ChannelBuffer<float>> input_;
+ std::unique_ptr<ChannelBuffer<float>> reverse_;
+ std::unique_ptr<ChannelBuffer<float>> output_;
+
+ rtc::scoped_refptr<AudioProcessing> apm_;
+
+ FILE* debug_file_;
+
+ StreamConfig input_config_;
+ StreamConfig reverse_config_;
+ StreamConfig output_config_;
+
+ bool has_next_event_;
+ audioproc::Event next_event_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_DEBUG_DUMP_REPLAYER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/debug_dump_test.cc b/third_party/libwebrtc/modules/audio_processing/test/debug_dump_test.cc
new file mode 100644
index 0000000000..d69d3a4eea
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/debug_dump_test.cc
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h> // size_t
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/audio/echo_canceller3_factory.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+#include "modules/audio_processing/test/debug_dump_replayer.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+void MaybeResetBuffer(std::unique_ptr<ChannelBuffer<float>>* buffer,
+ const StreamConfig& config) {
+ auto& buffer_ref = *buffer;
+ if (!buffer_ref.get() || buffer_ref->num_frames() != config.num_frames() ||
+ buffer_ref->num_channels() != config.num_channels()) {
+ buffer_ref.reset(
+ new ChannelBuffer<float>(config.num_frames(), config.num_channels()));
+ }
+}
+
+class DebugDumpGenerator {
+ public:
+ DebugDumpGenerator(absl::string_view input_file_name,
+ int input_rate_hz,
+ int input_channels,
+ absl::string_view reverse_file_name,
+ int reverse_rate_hz,
+ int reverse_channels,
+ absl::string_view dump_file_name,
+ bool enable_pre_amplifier);
+
+ // Constructor that uses default input files.
+ explicit DebugDumpGenerator(const AudioProcessing::Config& apm_config);
+
+ ~DebugDumpGenerator();
+
+ // Changes the sample rate of the input audio to the APM.
+ void SetInputRate(int rate_hz);
+
+ // Sets if converts stereo input signal to mono by discarding other channels.
+ void ForceInputMono(bool mono);
+
+ // Changes the sample rate of the reverse audio to the APM.
+ void SetReverseRate(int rate_hz);
+
+ // Sets if converts stereo reverse signal to mono by discarding other
+ // channels.
+ void ForceReverseMono(bool mono);
+
+ // Sets the required sample rate of the APM output.
+ void SetOutputRate(int rate_hz);
+
+ // Sets the required channels of the APM output.
+ void SetOutputChannels(int channels);
+
+ std::string dump_file_name() const { return dump_file_name_; }
+
+ void StartRecording();
+ void Process(size_t num_blocks);
+ void StopRecording();
+ AudioProcessing* apm() const { return apm_.get(); }
+
+ private:
+ static void ReadAndDeinterleave(ResampleInputAudioFile* audio,
+ int channels,
+ const StreamConfig& config,
+ float* const* buffer);
+
+ // APM input/output settings.
+ StreamConfig input_config_;
+ StreamConfig reverse_config_;
+ StreamConfig output_config_;
+
+ // Input file format.
+ const std::string input_file_name_;
+ ResampleInputAudioFile input_audio_;
+ const int input_file_channels_;
+
+ // Reverse file format.
+ const std::string reverse_file_name_;
+ ResampleInputAudioFile reverse_audio_;
+ const int reverse_file_channels_;
+
+ // Buffer for APM input/output.
+ std::unique_ptr<ChannelBuffer<float>> input_;
+ std::unique_ptr<ChannelBuffer<float>> reverse_;
+ std::unique_ptr<ChannelBuffer<float>> output_;
+
+ bool enable_pre_amplifier_;
+
+ TaskQueueForTest worker_queue_;
+ rtc::scoped_refptr<AudioProcessing> apm_;
+
+ const std::string dump_file_name_;
+};
+
+DebugDumpGenerator::DebugDumpGenerator(absl::string_view input_file_name,
+ int input_rate_hz,
+ int input_channels,
+ absl::string_view reverse_file_name,
+ int reverse_rate_hz,
+ int reverse_channels,
+ absl::string_view dump_file_name,
+ bool enable_pre_amplifier)
+ : input_config_(input_rate_hz, input_channels),
+ reverse_config_(reverse_rate_hz, reverse_channels),
+ output_config_(input_rate_hz, input_channels),
+ input_audio_(input_file_name, input_rate_hz, input_rate_hz),
+ input_file_channels_(input_channels),
+ reverse_audio_(reverse_file_name, reverse_rate_hz, reverse_rate_hz),
+ reverse_file_channels_(reverse_channels),
+ input_(new ChannelBuffer<float>(input_config_.num_frames(),
+ input_config_.num_channels())),
+ reverse_(new ChannelBuffer<float>(reverse_config_.num_frames(),
+ reverse_config_.num_channels())),
+ output_(new ChannelBuffer<float>(output_config_.num_frames(),
+ output_config_.num_channels())),
+ enable_pre_amplifier_(enable_pre_amplifier),
+ worker_queue_("debug_dump_generator_worker_queue"),
+ dump_file_name_(dump_file_name) {
+ AudioProcessingBuilderForTesting apm_builder;
+ apm_ = apm_builder.Create();
+}
+
+DebugDumpGenerator::DebugDumpGenerator(
+ const AudioProcessing::Config& apm_config)
+ : DebugDumpGenerator(ResourcePath("near32_stereo", "pcm"),
+ 32000,
+ 2,
+ ResourcePath("far32_stereo", "pcm"),
+ 32000,
+ 2,
+ TempFilename(OutputPath(), "debug_aec"),
+ apm_config.pre_amplifier.enabled) {
+ apm_->ApplyConfig(apm_config);
+}
+
+DebugDumpGenerator::~DebugDumpGenerator() {
+ remove(dump_file_name_.c_str());
+}
+
+void DebugDumpGenerator::SetInputRate(int rate_hz) {
+ input_audio_.set_output_rate_hz(rate_hz);
+ input_config_.set_sample_rate_hz(rate_hz);
+ MaybeResetBuffer(&input_, input_config_);
+}
+
+void DebugDumpGenerator::ForceInputMono(bool mono) {
+ const int channels = mono ? 1 : input_file_channels_;
+ input_config_.set_num_channels(channels);
+ MaybeResetBuffer(&input_, input_config_);
+}
+
+void DebugDumpGenerator::SetReverseRate(int rate_hz) {
+ reverse_audio_.set_output_rate_hz(rate_hz);
+ reverse_config_.set_sample_rate_hz(rate_hz);
+ MaybeResetBuffer(&reverse_, reverse_config_);
+}
+
+void DebugDumpGenerator::ForceReverseMono(bool mono) {
+ const int channels = mono ? 1 : reverse_file_channels_;
+ reverse_config_.set_num_channels(channels);
+ MaybeResetBuffer(&reverse_, reverse_config_);
+}
+
+void DebugDumpGenerator::SetOutputRate(int rate_hz) {
+ output_config_.set_sample_rate_hz(rate_hz);
+ MaybeResetBuffer(&output_, output_config_);
+}
+
+void DebugDumpGenerator::SetOutputChannels(int channels) {
+ output_config_.set_num_channels(channels);
+ MaybeResetBuffer(&output_, output_config_);
+}
+
+void DebugDumpGenerator::StartRecording() {
+ apm_->AttachAecDump(
+ AecDumpFactory::Create(dump_file_name_.c_str(), -1, &worker_queue_));
+}
+
+void DebugDumpGenerator::Process(size_t num_blocks) {
+ for (size_t i = 0; i < num_blocks; ++i) {
+ ReadAndDeinterleave(&reverse_audio_, reverse_file_channels_,
+ reverse_config_, reverse_->channels());
+ ReadAndDeinterleave(&input_audio_, input_file_channels_, input_config_,
+ input_->channels());
+ RTC_CHECK_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(100));
+ apm_->set_stream_analog_level(100);
+ if (enable_pre_amplifier_) {
+ apm_->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(1 + i % 10));
+ }
+ apm_->set_stream_key_pressed(i % 10 == 9);
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ apm_->ProcessStream(input_->channels(), input_config_,
+ output_config_, output_->channels()));
+
+ RTC_CHECK_EQ(
+ AudioProcessing::kNoError,
+ apm_->ProcessReverseStream(reverse_->channels(), reverse_config_,
+ reverse_config_, reverse_->channels()));
+ }
+}
+
+void DebugDumpGenerator::StopRecording() {
+ apm_->DetachAecDump();
+}
+
+void DebugDumpGenerator::ReadAndDeinterleave(ResampleInputAudioFile* audio,
+ int channels,
+ const StreamConfig& config,
+ float* const* buffer) {
+ const size_t num_frames = config.num_frames();
+ const int out_channels = config.num_channels();
+
+ std::vector<int16_t> signal(channels * num_frames);
+
+ audio->Read(num_frames * channels, &signal[0]);
+
+ // We only allow reducing number of channels by discarding some channels.
+ RTC_CHECK_LE(out_channels, channels);
+ for (int channel = 0; channel < out_channels; ++channel) {
+ for (size_t i = 0; i < num_frames; ++i) {
+ buffer[channel][i] = S16ToFloat(signal[i * channels + channel]);
+ }
+ }
+}
+
+} // namespace
+
+class DebugDumpTest : public ::testing::Test {
+ public:
+ // VerifyDebugDump replays a debug dump using APM and verifies that the result
+ // is bit-exact-identical to the output channel in the dump. This is only
+ // guaranteed if the debug dump is started on the first frame.
+ void VerifyDebugDump(absl::string_view in_filename);
+
+ private:
+ DebugDumpReplayer debug_dump_replayer_;
+};
+
+void DebugDumpTest::VerifyDebugDump(absl::string_view in_filename) {
+ ASSERT_TRUE(debug_dump_replayer_.SetDumpFile(in_filename));
+
+ while (const absl::optional<audioproc::Event> event =
+ debug_dump_replayer_.GetNextEvent()) {
+ debug_dump_replayer_.RunNextEvent();
+ if (event->type() == audioproc::Event::STREAM) {
+ const audioproc::Stream* msg = &event->stream();
+ const StreamConfig output_config = debug_dump_replayer_.GetOutputConfig();
+ const ChannelBuffer<float>* output = debug_dump_replayer_.GetOutput();
+ // Check that output of APM is bit-exact to the output in the dump.
+ ASSERT_EQ(output_config.num_channels(),
+ static_cast<size_t>(msg->output_channel_size()));
+ ASSERT_EQ(output_config.num_frames() * sizeof(float),
+ msg->output_channel(0).size());
+ for (int i = 0; i < msg->output_channel_size(); ++i) {
+ ASSERT_EQ(0,
+ memcmp(output->channels()[i], msg->output_channel(i).data(),
+ msg->output_channel(i).size()));
+ }
+ }
+ }
+}
+
+TEST_F(DebugDumpTest, SimpleCase) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ChangeInputFormat) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+
+ generator.StartRecording();
+ generator.Process(100);
+ generator.SetInputRate(48000);
+
+ generator.ForceInputMono(true);
+ // Number of output channel should not be larger than that of input. APM will
+ // fail otherwise.
+ generator.SetOutputChannels(1);
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ChangeReverseFormat) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+ generator.StartRecording();
+ generator.Process(100);
+ generator.SetReverseRate(48000);
+ generator.ForceReverseMono(true);
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ChangeOutputFormat) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+ generator.StartRecording();
+ generator.Process(100);
+ generator.SetOutputRate(48000);
+ generator.SetOutputChannels(1);
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ToggleAec) {
+ AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = true;
+ DebugDumpGenerator generator(apm_config);
+ generator.StartRecording();
+ generator.Process(100);
+
+ apm_config.echo_canceller.enabled = false;
+ generator.apm()->ApplyConfig(apm_config);
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, VerifyCombinedExperimentalStringInclusive) {
+ AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = true;
+ apm_config.gain_controller1.analog_gain_controller.enabled = true;
+ apm_config.gain_controller1.analog_gain_controller.startup_min_volume = 0;
+ // Arbitrarily set clipping gain to 17, which will never be the default.
+ apm_config.gain_controller1.analog_gain_controller.clipped_level_min = 17;
+ DebugDumpGenerator generator(apm_config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+
+ DebugDumpReplayer debug_dump_replayer_;
+
+ ASSERT_TRUE(debug_dump_replayer_.SetDumpFile(generator.dump_file_name()));
+
+ while (const absl::optional<audioproc::Event> event =
+ debug_dump_replayer_.GetNextEvent()) {
+ debug_dump_replayer_.RunNextEvent();
+ if (event->type() == audioproc::Event::CONFIG) {
+ const audioproc::Config* msg = &event->config();
+ ASSERT_TRUE(msg->has_experiments_description());
+ EXPECT_PRED_FORMAT2(::testing::IsSubstring, "EchoController",
+ msg->experiments_description().c_str());
+ EXPECT_PRED_FORMAT2(::testing::IsSubstring, "AgcClippingLevelExperiment",
+ msg->experiments_description().c_str());
+ }
+ }
+}
+
+TEST_F(DebugDumpTest, VerifyCombinedExperimentalStringExclusive) {
+ AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = true;
+ DebugDumpGenerator generator(apm_config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+
+ DebugDumpReplayer debug_dump_replayer_;
+
+ ASSERT_TRUE(debug_dump_replayer_.SetDumpFile(generator.dump_file_name()));
+
+ while (const absl::optional<audioproc::Event> event =
+ debug_dump_replayer_.GetNextEvent()) {
+ debug_dump_replayer_.RunNextEvent();
+ if (event->type() == audioproc::Event::CONFIG) {
+ const audioproc::Config* msg = &event->config();
+ ASSERT_TRUE(msg->has_experiments_description());
+ EXPECT_PRED_FORMAT2(::testing::IsNotSubstring,
+ "AgcClippingLevelExperiment",
+ msg->experiments_description().c_str());
+ }
+ }
+}
+
+TEST_F(DebugDumpTest, VerifyAec3ExperimentalString) {
+ AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = true;
+ DebugDumpGenerator generator(apm_config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+
+ DebugDumpReplayer debug_dump_replayer_;
+
+ ASSERT_TRUE(debug_dump_replayer_.SetDumpFile(generator.dump_file_name()));
+
+ while (const absl::optional<audioproc::Event> event =
+ debug_dump_replayer_.GetNextEvent()) {
+ debug_dump_replayer_.RunNextEvent();
+ if (event->type() == audioproc::Event::CONFIG) {
+ const audioproc::Config* msg = &event->config();
+ ASSERT_TRUE(msg->has_experiments_description());
+ EXPECT_PRED_FORMAT2(::testing::IsSubstring, "EchoController",
+ msg->experiments_description().c_str());
+ }
+ }
+}
+
+TEST_F(DebugDumpTest, VerifyAgcClippingLevelExperimentalString) {
+ AudioProcessing::Config apm_config;
+ apm_config.gain_controller1.analog_gain_controller.enabled = true;
+ apm_config.gain_controller1.analog_gain_controller.startup_min_volume = 0;
+ // Arbitrarily set clipping gain to 17, which will never be the default.
+ apm_config.gain_controller1.analog_gain_controller.clipped_level_min = 17;
+ DebugDumpGenerator generator(apm_config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+
+ DebugDumpReplayer debug_dump_replayer_;
+
+ ASSERT_TRUE(debug_dump_replayer_.SetDumpFile(generator.dump_file_name()));
+
+ while (const absl::optional<audioproc::Event> event =
+ debug_dump_replayer_.GetNextEvent()) {
+ debug_dump_replayer_.RunNextEvent();
+ if (event->type() == audioproc::Event::CONFIG) {
+ const audioproc::Config* msg = &event->config();
+ ASSERT_TRUE(msg->has_experiments_description());
+ EXPECT_PRED_FORMAT2(::testing::IsSubstring, "AgcClippingLevelExperiment",
+ msg->experiments_description().c_str());
+ }
+ }
+}
+
+TEST_F(DebugDumpTest, VerifyEmptyExperimentalString) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+
+ DebugDumpReplayer debug_dump_replayer_;
+
+ ASSERT_TRUE(debug_dump_replayer_.SetDumpFile(generator.dump_file_name()));
+
+ while (const absl::optional<audioproc::Event> event =
+ debug_dump_replayer_.GetNextEvent()) {
+ debug_dump_replayer_.RunNextEvent();
+ if (event->type() == audioproc::Event::CONFIG) {
+ const audioproc::Config* msg = &event->config();
+ ASSERT_TRUE(msg->has_experiments_description());
+ EXPECT_EQ(0u, msg->experiments_description().size());
+ }
+ }
+}
+
+// AGC is not supported on Android or iOS.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_ToggleAgc DISABLED_ToggleAgc
+#else
+#define MAYBE_ToggleAgc ToggleAgc
+#endif
+TEST_F(DebugDumpTest, MAYBE_ToggleAgc) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+ generator.StartRecording();
+ generator.Process(100);
+
+ AudioProcessing::Config apm_config = generator.apm()->GetConfig();
+ apm_config.gain_controller1.enabled = !apm_config.gain_controller1.enabled;
+ generator.apm()->ApplyConfig(apm_config);
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ToggleNs) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+ generator.StartRecording();
+ generator.Process(100);
+
+ AudioProcessing::Config apm_config = generator.apm()->GetConfig();
+ apm_config.noise_suppression.enabled = !apm_config.noise_suppression.enabled;
+ generator.apm()->ApplyConfig(apm_config);
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, TransientSuppressionOn) {
+ DebugDumpGenerator generator(/*apm_config=*/{});
+
+ AudioProcessing::Config apm_config = generator.apm()->GetConfig();
+ apm_config.transient_suppression.enabled = true;
+ generator.apm()->ApplyConfig(apm_config);
+
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, PreAmplifierIsOn) {
+ AudioProcessing::Config apm_config;
+ apm_config.pre_amplifier.enabled = true;
+ DebugDumpGenerator generator(apm_config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.cc b/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.cc
new file mode 100644
index 0000000000..1d36b954f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void RandomizeSampleVector(Random* random_generator, rtc::ArrayView<float> v) {
+ RandomizeSampleVector(random_generator, v,
+ /*amplitude=*/32767.f);
+}
+
+void RandomizeSampleVector(Random* random_generator,
+ rtc::ArrayView<float> v,
+ float amplitude) {
+ for (auto& v_k : v) {
+ v_k = 2 * amplitude * random_generator->Rand<float>() - amplitude;
+ }
+}
+
+template <typename T>
+void DelayBuffer<T>::Delay(rtc::ArrayView<const T> x,
+ rtc::ArrayView<T> x_delayed) {
+ RTC_DCHECK_EQ(x.size(), x_delayed.size());
+ if (buffer_.empty()) {
+ std::copy(x.begin(), x.end(), x_delayed.begin());
+ } else {
+ for (size_t k = 0; k < x.size(); ++k) {
+ x_delayed[k] = buffer_[next_insert_index_];
+ buffer_[next_insert_index_] = x[k];
+ next_insert_index_ = (next_insert_index_ + 1) % buffer_.size();
+ }
+ }
+}
+
+template class DelayBuffer<float>;
+template class DelayBuffer<int>;
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.h b/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.h
new file mode 100644
index 0000000000..0d70cd39c6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_ECHO_CANCELLER_TEST_TOOLS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_ECHO_CANCELLER_TEST_TOOLS_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/random.h"
+
+namespace webrtc {
+
+// Randomizes the elements in a vector with values -32767.f:32767.f.
+void RandomizeSampleVector(Random* random_generator, rtc::ArrayView<float> v);
+
+// Randomizes the elements in a vector with values -amplitude:amplitude.
+void RandomizeSampleVector(Random* random_generator,
+ rtc::ArrayView<float> v,
+ float amplitude);
+
+// Class for delaying a signal a fixed number of samples.
+template <typename T>
+class DelayBuffer {
+ public:
+ explicit DelayBuffer(size_t delay) : buffer_(delay) {}
+ ~DelayBuffer() = default;
+
+ // Produces a delayed signal copy of x.
+ void Delay(rtc::ArrayView<const T> x, rtc::ArrayView<T> x_delayed);
+
+ private:
+ std::vector<T> buffer_;
+ size_t next_insert_index_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_ECHO_CANCELLER_TEST_TOOLS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools_unittest.cc b/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools_unittest.cc
new file mode 100644
index 0000000000..164d28fa16
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/echo_canceller_test_tools_unittest.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(EchoCancellerTestTools, FloatDelayBuffer) {
+ constexpr size_t kDelay = 10;
+ DelayBuffer<float> delay_buffer(kDelay);
+ std::vector<float> v(1000, 0.f);
+ for (size_t k = 0; k < v.size(); ++k) {
+ v[k] = k;
+ }
+ std::vector<float> v_delayed = v;
+ constexpr size_t kBlockSize = 50;
+ for (size_t k = 0; k < rtc::CheckedDivExact(v.size(), kBlockSize); ++k) {
+ delay_buffer.Delay(
+ rtc::ArrayView<const float>(&v[k * kBlockSize], kBlockSize),
+ rtc::ArrayView<float>(&v_delayed[k * kBlockSize], kBlockSize));
+ }
+ for (size_t k = kDelay; k < v.size(); ++k) {
+ EXPECT_EQ(v[k - kDelay], v_delayed[k]);
+ }
+}
+
+TEST(EchoCancellerTestTools, IntDelayBuffer) {
+ constexpr size_t kDelay = 10;
+ DelayBuffer<int> delay_buffer(kDelay);
+ std::vector<int> v(1000, 0);
+ for (size_t k = 0; k < v.size(); ++k) {
+ v[k] = k;
+ }
+ std::vector<int> v_delayed = v;
+ const size_t kBlockSize = 50;
+ for (size_t k = 0; k < rtc::CheckedDivExact(v.size(), kBlockSize); ++k) {
+ delay_buffer.Delay(
+ rtc::ArrayView<const int>(&v[k * kBlockSize], kBlockSize),
+ rtc::ArrayView<int>(&v_delayed[k * kBlockSize], kBlockSize));
+ }
+ for (size_t k = kDelay; k < v.size(); ++k) {
+ EXPECT_EQ(v[k - kDelay], v_delayed[k]);
+ }
+}
+
+TEST(EchoCancellerTestTools, RandomizeSampleVector) {
+ Random random_generator(42U);
+ std::vector<float> v(50, 0.f);
+ std::vector<float> v_ref = v;
+ RandomizeSampleVector(&random_generator, v);
+ EXPECT_NE(v, v_ref);
+ v_ref = v;
+ RandomizeSampleVector(&random_generator, v);
+ EXPECT_NE(v, v_ref);
+}
+
+TEST(EchoCancellerTestTools, RandomizeSampleVectorWithAmplitude) {
+ Random random_generator(42U);
+ std::vector<float> v(50, 0.f);
+ RandomizeSampleVector(&random_generator, v, 1000.f);
+ EXPECT_GE(1000.f, *std::max_element(v.begin(), v.end()));
+ EXPECT_LE(-1000.f, *std::min_element(v.begin(), v.end()));
+ RandomizeSampleVector(&random_generator, v, 100.f);
+ EXPECT_GE(100.f, *std::max_element(v.begin(), v.end()));
+ EXPECT_LE(-100.f, *std::min_element(v.begin(), v.end()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/echo_control_mock.h b/third_party/libwebrtc/modules/audio_processing/test/echo_control_mock.h
new file mode 100644
index 0000000000..763d6e4f0b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/echo_control_mock.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_ECHO_CONTROL_MOCK_H_
+#define MODULES_AUDIO_PROCESSING_TEST_ECHO_CONTROL_MOCK_H_
+
+#include "api/audio/echo_control.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class MockEchoControl : public EchoControl {
+ public:
+ MOCK_METHOD(void, AnalyzeRender, (AudioBuffer * render), (override));
+ MOCK_METHOD(void, AnalyzeCapture, (AudioBuffer * capture), (override));
+ MOCK_METHOD(void,
+ ProcessCapture,
+ (AudioBuffer * capture, bool echo_path_change),
+ (override));
+ MOCK_METHOD(void,
+ ProcessCapture,
+ (AudioBuffer * capture,
+ AudioBuffer* linear_output,
+ bool echo_path_change),
+ (override));
+ MOCK_METHOD(EchoControl::Metrics, GetMetrics, (), (const, override));
+ MOCK_METHOD(void, SetAudioBufferDelay, (int delay_ms), (override));
+ MOCK_METHOD(void,
+ SetCaptureOutputUsage,
+ (bool capture_output_used),
+ (override));
+ MOCK_METHOD(bool, ActiveProcessing, (), (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_ECHO_CONTROL_MOCK_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.cc b/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.cc
new file mode 100644
index 0000000000..3a35ee9d74
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/fake_recording_device.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/agc/gain_map_internal.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+constexpr float kFloatSampleMin = -32768.f;
+constexpr float kFloatSampleMax = 32767.0f;
+
+} // namespace
+
+// Abstract class for the different fake recording devices.
+class FakeRecordingDeviceWorker {
+ public:
+ explicit FakeRecordingDeviceWorker(const int initial_mic_level)
+ : mic_level_(initial_mic_level) {}
+ int mic_level() const { return mic_level_; }
+ void set_mic_level(const int level) { mic_level_ = level; }
+ void set_undo_mic_level(const int level) { undo_mic_level_ = level; }
+ virtual ~FakeRecordingDeviceWorker() = default;
+ virtual void ModifyBufferInt16(rtc::ArrayView<int16_t> buffer) = 0;
+ virtual void ModifyBufferFloat(ChannelBuffer<float>* buffer) = 0;
+
+ protected:
+ // Mic level to simulate.
+ int mic_level_;
+ // Optional mic level to undo.
+ absl::optional<int> undo_mic_level_;
+};
+
+namespace {
+
+// Identity fake recording device. The samples are not modified, which is
+// equivalent to a constant gain curve at 1.0 - only used for testing.
+class FakeRecordingDeviceIdentity final : public FakeRecordingDeviceWorker {
+ public:
+ explicit FakeRecordingDeviceIdentity(const int initial_mic_level)
+ : FakeRecordingDeviceWorker(initial_mic_level) {}
+ ~FakeRecordingDeviceIdentity() override = default;
+ void ModifyBufferInt16(rtc::ArrayView<int16_t> buffer) override {}
+ void ModifyBufferFloat(ChannelBuffer<float>* buffer) override {}
+};
+
+// Linear fake recording device. The gain curve is a linear function mapping the
+// mic levels range [0, 255] to [0.0, 1.0].
+class FakeRecordingDeviceLinear final : public FakeRecordingDeviceWorker {
+ public:
+ explicit FakeRecordingDeviceLinear(const int initial_mic_level)
+ : FakeRecordingDeviceWorker(initial_mic_level) {}
+ ~FakeRecordingDeviceLinear() override = default;
+ void ModifyBufferInt16(rtc::ArrayView<int16_t> buffer) override {
+ const size_t number_of_samples = buffer.size();
+ int16_t* data = buffer.data();
+ // If an undo level is specified, virtually restore the unmodified
+ // microphone level; otherwise simulate the mic gain only.
+ const float divisor =
+ (undo_mic_level_ && *undo_mic_level_ > 0) ? *undo_mic_level_ : 255.f;
+ for (size_t i = 0; i < number_of_samples; ++i) {
+ data[i] = rtc::saturated_cast<int16_t>(data[i] * mic_level_ / divisor);
+ }
+ }
+ void ModifyBufferFloat(ChannelBuffer<float>* buffer) override {
+ // If an undo level is specified, virtually restore the unmodified
+ // microphone level; otherwise simulate the mic gain only.
+ const float divisor =
+ (undo_mic_level_ && *undo_mic_level_ > 0) ? *undo_mic_level_ : 255.f;
+ for (size_t c = 0; c < buffer->num_channels(); ++c) {
+ for (size_t i = 0; i < buffer->num_frames(); ++i) {
+ buffer->channels()[c][i] =
+ rtc::SafeClamp(buffer->channels()[c][i] * mic_level_ / divisor,
+ kFloatSampleMin, kFloatSampleMax);
+ }
+ }
+ }
+};
+
+float ComputeAgc1LinearFactor(const absl::optional<int>& undo_mic_level,
+ int mic_level) {
+ // If an undo level is specified, virtually restore the unmodified
+ // microphone level; otherwise simulate the mic gain only.
+ const int undo_level =
+ (undo_mic_level && *undo_mic_level > 0) ? *undo_mic_level : 100;
+ return DbToRatio(kGainMap[mic_level] - kGainMap[undo_level]);
+}
+
+// Roughly dB-scale fake recording device. Valid levels are [0, 255]. The mic
+// applies a gain from kGainMap in agc/gain_map_internal.h.
+class FakeRecordingDeviceAgc1 final : public FakeRecordingDeviceWorker {
+ public:
+ explicit FakeRecordingDeviceAgc1(const int initial_mic_level)
+ : FakeRecordingDeviceWorker(initial_mic_level) {}
+ ~FakeRecordingDeviceAgc1() override = default;
+ void ModifyBufferInt16(rtc::ArrayView<int16_t> buffer) override {
+ const float scaling_factor =
+ ComputeAgc1LinearFactor(undo_mic_level_, mic_level_);
+ const size_t number_of_samples = buffer.size();
+ int16_t* data = buffer.data();
+ for (size_t i = 0; i < number_of_samples; ++i) {
+ data[i] = rtc::saturated_cast<int16_t>(data[i] * scaling_factor);
+ }
+ }
+ void ModifyBufferFloat(ChannelBuffer<float>* buffer) override {
+ const float scaling_factor =
+ ComputeAgc1LinearFactor(undo_mic_level_, mic_level_);
+ for (size_t c = 0; c < buffer->num_channels(); ++c) {
+ for (size_t i = 0; i < buffer->num_frames(); ++i) {
+ buffer->channels()[c][i] =
+ rtc::SafeClamp(buffer->channels()[c][i] * scaling_factor,
+ kFloatSampleMin, kFloatSampleMax);
+ }
+ }
+ }
+};
+
+} // namespace
+
+FakeRecordingDevice::FakeRecordingDevice(int initial_mic_level,
+ int device_kind) {
+ switch (device_kind) {
+ case 0:
+ worker_ =
+ std::make_unique<FakeRecordingDeviceIdentity>(initial_mic_level);
+ break;
+ case 1:
+ worker_ = std::make_unique<FakeRecordingDeviceLinear>(initial_mic_level);
+ break;
+ case 2:
+ worker_ = std::make_unique<FakeRecordingDeviceAgc1>(initial_mic_level);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+}
+
+FakeRecordingDevice::~FakeRecordingDevice() = default;
+
+int FakeRecordingDevice::MicLevel() const {
+ RTC_CHECK(worker_);
+ return worker_->mic_level();
+}
+
+void FakeRecordingDevice::SetMicLevel(const int level) {
+ RTC_CHECK(worker_);
+ if (level != worker_->mic_level())
+ RTC_LOG(LS_INFO) << "Simulate mic level update: " << level;
+ worker_->set_mic_level(level);
+}
+
+void FakeRecordingDevice::SetUndoMicLevel(const int level) {
+ RTC_DCHECK(worker_);
+ // TODO(alessiob): The behavior with undo level equal to zero is not clear yet
+ // and will be defined in future CLs once more FakeRecordingDeviceWorker
+ // implementations need to be added.
+ RTC_CHECK(level > 0) << "Zero undo mic level is unsupported";
+ worker_->set_undo_mic_level(level);
+}
+
+void FakeRecordingDevice::SimulateAnalogGain(rtc::ArrayView<int16_t> buffer) {
+ RTC_DCHECK(worker_);
+ worker_->ModifyBufferInt16(buffer);
+}
+
+void FakeRecordingDevice::SimulateAnalogGain(ChannelBuffer<float>* buffer) {
+ RTC_DCHECK(worker_);
+ worker_->ModifyBufferFloat(buffer);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.h b/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.h
new file mode 100644
index 0000000000..da3c0cf794
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_FAKE_RECORDING_DEVICE_H_
+#define MODULES_AUDIO_PROCESSING_TEST_FAKE_RECORDING_DEVICE_H_
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "common_audio/channel_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+class FakeRecordingDeviceWorker;
+
+// Class for simulating a microphone with analog gain.
+//
+// The intended modes of operation are the following:
+//
+// FakeRecordingDevice fake_mic(255, 1);
+//
+// fake_mic.SetMicLevel(170);
+// fake_mic.SimulateAnalogGain(buffer);
+//
+// When the mic level to undo is known:
+//
+// fake_mic.SetMicLevel(170);
+// fake_mic.SetUndoMicLevel(30);
+// fake_mic.SimulateAnalogGain(buffer);
+//
+// The second option virtually restores the unmodified microphone level. Calling
+// SimulateAnalogGain() will first "undo" the gain applied by the real
+// microphone (e.g., 30).
+class FakeRecordingDevice final {
+ public:
+ FakeRecordingDevice(int initial_mic_level, int device_kind);
+ ~FakeRecordingDevice();
+
+ int MicLevel() const;
+ void SetMicLevel(int level);
+ void SetUndoMicLevel(int level);
+
+ // Simulates the analog gain.
+ // If `real_device_level` is a valid level, the unmodified mic signal is
+ // virtually restored. To skip the latter step set `real_device_level` to
+ // an empty value.
+ void SimulateAnalogGain(rtc::ArrayView<int16_t> buffer);
+
+ // Simulates the analog gain.
+ // If `real_device_level` is a valid level, the unmodified mic signal is
+ // virtually restored. To skip the latter step set `real_device_level` to
+ // an empty value.
+ void SimulateAnalogGain(ChannelBuffer<float>* buffer);
+
+ private:
+ // Fake recording device worker.
+ std::unique_ptr<FakeRecordingDeviceWorker> worker_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_FAKE_RECORDING_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device_unittest.cc b/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device_unittest.cc
new file mode 100644
index 0000000000..2ac8b1dc48
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/fake_recording_device_unittest.cc
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/fake_recording_device.h"
+
+#include <cmath>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr int kInitialMicLevel = 100;
+
+// TODO(alessiob): Add new fake recording device kind values here as they are
+// added in FakeRecordingDevice::FakeRecordingDevice.
+const std::vector<int> kFakeRecDeviceKinds = {0, 1, 2};
+
+const std::vector<std::vector<float>> kTestMultiChannelSamples{
+ std::vector<float>{-10.f, -1.f, -0.1f, 0.f, 0.1f, 1.f, 10.f}};
+
+// Writes samples into ChannelBuffer<float>.
+void WritesDataIntoChannelBuffer(const std::vector<std::vector<float>>& data,
+ ChannelBuffer<float>* buff) {
+ EXPECT_EQ(data.size(), buff->num_channels());
+ EXPECT_EQ(data[0].size(), buff->num_frames());
+ for (size_t c = 0; c < buff->num_channels(); ++c) {
+ for (size_t f = 0; f < buff->num_frames(); ++f) {
+ buff->channels()[c][f] = data[c][f];
+ }
+ }
+}
+
+std::unique_ptr<ChannelBuffer<float>> CreateChannelBufferWithData(
+ const std::vector<std::vector<float>>& data) {
+ auto buff =
+ std::make_unique<ChannelBuffer<float>>(data[0].size(), data.size());
+ WritesDataIntoChannelBuffer(data, buff.get());
+ return buff;
+}
+
+// Checks that the samples modified using monotonic level values are also
+// monotonic.
+void CheckIfMonotoneSamplesModules(const ChannelBuffer<float>* prev,
+ const ChannelBuffer<float>* curr) {
+ RTC_DCHECK_EQ(prev->num_channels(), curr->num_channels());
+ RTC_DCHECK_EQ(prev->num_frames(), curr->num_frames());
+ bool valid = true;
+ for (size_t i = 0; i < prev->num_channels(); ++i) {
+ for (size_t j = 0; j < prev->num_frames(); ++j) {
+ valid = std::fabs(prev->channels()[i][j]) <=
+ std::fabs(curr->channels()[i][j]);
+ if (!valid) {
+ break;
+ }
+ }
+ if (!valid) {
+ break;
+ }
+ }
+ EXPECT_TRUE(valid);
+}
+
+// Checks that the samples in each pair have the same sign unless the sample in
+// `dst` is zero (because of zero gain).
+void CheckSameSign(const ChannelBuffer<float>* src,
+ const ChannelBuffer<float>* dst) {
+ RTC_DCHECK_EQ(src->num_channels(), dst->num_channels());
+ RTC_DCHECK_EQ(src->num_frames(), dst->num_frames());
+ const auto fsgn = [](float x) { return ((x < 0) ? -1 : (x > 0) ? 1 : 0); };
+ bool valid = true;
+ for (size_t i = 0; i < src->num_channels(); ++i) {
+ for (size_t j = 0; j < src->num_frames(); ++j) {
+ valid = dst->channels()[i][j] == 0.0f ||
+ fsgn(src->channels()[i][j]) == fsgn(dst->channels()[i][j]);
+ if (!valid) {
+ break;
+ }
+ }
+ if (!valid) {
+ break;
+ }
+ }
+ EXPECT_TRUE(valid);
+}
+
+std::string FakeRecordingDeviceKindToString(int fake_rec_device_kind) {
+ rtc::StringBuilder ss;
+ ss << "fake recording device: " << fake_rec_device_kind;
+ return ss.Release();
+}
+
+std::string AnalogLevelToString(int level) {
+ rtc::StringBuilder ss;
+ ss << "analog level: " << level;
+ return ss.Release();
+}
+
+} // namespace
+
+TEST(FakeRecordingDevice, CheckHelperFunctions) {
+ constexpr size_t kC = 0; // Channel index.
+ constexpr size_t kS = 1; // Sample index.
+
+ // Check read.
+ auto buff = CreateChannelBufferWithData(kTestMultiChannelSamples);
+ for (size_t c = 0; c < kTestMultiChannelSamples.size(); ++c) {
+ for (size_t s = 0; s < kTestMultiChannelSamples[0].size(); ++s) {
+ EXPECT_EQ(kTestMultiChannelSamples[c][s], buff->channels()[c][s]);
+ }
+ }
+
+ // Check write.
+ buff->channels()[kC][kS] = -5.0f;
+ RTC_DCHECK_NE(buff->channels()[kC][kS], kTestMultiChannelSamples[kC][kS]);
+
+ // Check reset.
+ WritesDataIntoChannelBuffer(kTestMultiChannelSamples, buff.get());
+ EXPECT_EQ(buff->channels()[kC][kS], kTestMultiChannelSamples[kC][kS]);
+}
+
+// Implicitly checks that changes to the mic and undo levels are visible to the
+// FakeRecordingDeviceWorker implementation are injected in FakeRecordingDevice.
+TEST(FakeRecordingDevice, TestWorkerAbstractClass) {
+ FakeRecordingDevice fake_recording_device(kInitialMicLevel, 1);
+
+ auto buff1 = CreateChannelBufferWithData(kTestMultiChannelSamples);
+ fake_recording_device.SetMicLevel(100);
+ fake_recording_device.SimulateAnalogGain(buff1.get());
+
+ auto buff2 = CreateChannelBufferWithData(kTestMultiChannelSamples);
+ fake_recording_device.SetMicLevel(200);
+ fake_recording_device.SimulateAnalogGain(buff2.get());
+
+ for (size_t c = 0; c < kTestMultiChannelSamples.size(); ++c) {
+ for (size_t s = 0; s < kTestMultiChannelSamples[0].size(); ++s) {
+ EXPECT_LE(std::abs(buff1->channels()[c][s]),
+ std::abs(buff2->channels()[c][s]));
+ }
+ }
+
+ auto buff3 = CreateChannelBufferWithData(kTestMultiChannelSamples);
+ fake_recording_device.SetMicLevel(200);
+ fake_recording_device.SetUndoMicLevel(100);
+ fake_recording_device.SimulateAnalogGain(buff3.get());
+
+ for (size_t c = 0; c < kTestMultiChannelSamples.size(); ++c) {
+ for (size_t s = 0; s < kTestMultiChannelSamples[0].size(); ++s) {
+ EXPECT_LE(std::abs(buff1->channels()[c][s]),
+ std::abs(buff3->channels()[c][s]));
+ EXPECT_LE(std::abs(buff2->channels()[c][s]),
+ std::abs(buff3->channels()[c][s]));
+ }
+ }
+}
+
+TEST(FakeRecordingDevice, GainCurveShouldBeMonotone) {
+ // Create input-output buffers.
+ auto buff_prev = CreateChannelBufferWithData(kTestMultiChannelSamples);
+ auto buff_curr = CreateChannelBufferWithData(kTestMultiChannelSamples);
+
+ // Test different mappings.
+ for (auto fake_rec_device_kind : kFakeRecDeviceKinds) {
+ SCOPED_TRACE(FakeRecordingDeviceKindToString(fake_rec_device_kind));
+ FakeRecordingDevice fake_recording_device(kInitialMicLevel,
+ fake_rec_device_kind);
+ // TODO(alessiob): The test below is designed for state-less recording
+ // devices. If, for instance, a device has memory, the test might need
+ // to be redesigned (e.g., re-initialize fake recording device).
+
+ // Apply lowest analog level.
+ WritesDataIntoChannelBuffer(kTestMultiChannelSamples, buff_prev.get());
+ fake_recording_device.SetMicLevel(0);
+ fake_recording_device.SimulateAnalogGain(buff_prev.get());
+
+ // Increment analog level to check monotonicity.
+ for (int i = 1; i <= 255; ++i) {
+ SCOPED_TRACE(AnalogLevelToString(i));
+ WritesDataIntoChannelBuffer(kTestMultiChannelSamples, buff_curr.get());
+ fake_recording_device.SetMicLevel(i);
+ fake_recording_device.SimulateAnalogGain(buff_curr.get());
+ CheckIfMonotoneSamplesModules(buff_prev.get(), buff_curr.get());
+
+ // Update prev.
+ buff_prev.swap(buff_curr);
+ }
+ }
+}
+
+TEST(FakeRecordingDevice, GainCurveShouldNotChangeSign) {
+ // Create view on original samples.
+ std::unique_ptr<const ChannelBuffer<float>> buff_orig =
+ CreateChannelBufferWithData(kTestMultiChannelSamples);
+
+ // Create output buffer.
+ auto buff = CreateChannelBufferWithData(kTestMultiChannelSamples);
+
+ // Test different mappings.
+ for (auto fake_rec_device_kind : kFakeRecDeviceKinds) {
+ SCOPED_TRACE(FakeRecordingDeviceKindToString(fake_rec_device_kind));
+ FakeRecordingDevice fake_recording_device(kInitialMicLevel,
+ fake_rec_device_kind);
+
+ // TODO(alessiob): The test below is designed for state-less recording
+ // devices. If, for instance, a device has memory, the test might need
+ // to be redesigned (e.g., re-initialize fake recording device).
+ for (int i = 0; i <= 255; ++i) {
+ SCOPED_TRACE(AnalogLevelToString(i));
+ WritesDataIntoChannelBuffer(kTestMultiChannelSamples, buff.get());
+ fake_recording_device.SetMicLevel(i);
+ fake_recording_device.SimulateAnalogGain(buff.get());
+ CheckSameSign(buff_orig.get(), buff.get());
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/performance_timer.cc b/third_party/libwebrtc/modules/audio_processing/test/performance_timer.cc
new file mode 100644
index 0000000000..1a82258903
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/performance_timer.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/performance_timer.h"
+
+#include <math.h>
+
+#include <numeric>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+PerformanceTimer::PerformanceTimer(int num_frames_to_process)
+ : clock_(webrtc::Clock::GetRealTimeClock()) {
+ timestamps_us_.reserve(num_frames_to_process);
+}
+
+PerformanceTimer::~PerformanceTimer() = default;
+
+void PerformanceTimer::StartTimer() {
+ start_timestamp_us_ = clock_->TimeInMicroseconds();
+}
+
+void PerformanceTimer::StopTimer() {
+ RTC_DCHECK(start_timestamp_us_);
+ timestamps_us_.push_back(clock_->TimeInMicroseconds() - *start_timestamp_us_);
+}
+
+double PerformanceTimer::GetDurationAverage() const {
+ return GetDurationAverage(0);
+}
+
+double PerformanceTimer::GetDurationStandardDeviation() const {
+ return GetDurationStandardDeviation(0);
+}
+
+double PerformanceTimer::GetDurationAverage(
+ size_t number_of_warmup_samples) const {
+ RTC_DCHECK_GT(timestamps_us_.size(), number_of_warmup_samples);
+ const size_t number_of_samples =
+ timestamps_us_.size() - number_of_warmup_samples;
+ return static_cast<double>(
+ std::accumulate(timestamps_us_.begin() + number_of_warmup_samples,
+ timestamps_us_.end(), static_cast<int64_t>(0))) /
+ number_of_samples;
+}
+
+double PerformanceTimer::GetDurationStandardDeviation(
+ size_t number_of_warmup_samples) const {
+ RTC_DCHECK_GT(timestamps_us_.size(), number_of_warmup_samples);
+ const size_t number_of_samples =
+ timestamps_us_.size() - number_of_warmup_samples;
+ RTC_DCHECK_GT(number_of_samples, 0);
+ double average_duration = GetDurationAverage(number_of_warmup_samples);
+
+ double variance = std::accumulate(
+ timestamps_us_.begin() + number_of_warmup_samples, timestamps_us_.end(),
+ 0.0, [average_duration](const double& a, const int64_t& b) {
+ return a + (b - average_duration) * (b - average_duration);
+ });
+
+ return sqrt(variance / number_of_samples);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/performance_timer.h b/third_party/libwebrtc/modules/audio_processing/test/performance_timer.h
new file mode 100644
index 0000000000..5375ba74e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/performance_timer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_PERFORMANCE_TIMER_H_
+#define MODULES_AUDIO_PROCESSING_TEST_PERFORMANCE_TIMER_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace test {
+
+class PerformanceTimer {
+ public:
+ explicit PerformanceTimer(int num_frames_to_process);
+ ~PerformanceTimer();
+
+ void StartTimer();
+ void StopTimer();
+
+ double GetDurationAverage() const;
+ double GetDurationStandardDeviation() const;
+
+ // These methods are the same as those above, but they ignore the first
+ // `number_of_warmup_samples` measurements.
+ double GetDurationAverage(size_t number_of_warmup_samples) const;
+ double GetDurationStandardDeviation(size_t number_of_warmup_samples) const;
+
+ private:
+ webrtc::Clock* clock_;
+ absl::optional<int64_t> start_timestamp_us_;
+ std::vector<int64_t> timestamps_us_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_PERFORMANCE_TIMER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.cc b/third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.cc
new file mode 100644
index 0000000000..75574961b0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/protobuf_utils.h"
+
+#include <memory>
+
+#include "rtc_base/system/arch.h"
+
+namespace {
+// Allocates new memory in the memory owned by the unique_ptr to fit the raw
+// message and returns the number of bytes read when having a string stream as
+// input.
+size_t ReadMessageBytesFromString(std::stringstream* input,
+ std::unique_ptr<uint8_t[]>* bytes) {
+ int32_t size = 0;
+ input->read(reinterpret_cast<char*>(&size), sizeof(int32_t));
+ int32_t size_read = input->gcount();
+ if (size_read != sizeof(int32_t))
+ return 0;
+ if (size <= 0)
+ return 0;
+
+ *bytes = std::make_unique<uint8_t[]>(size);
+ input->read(reinterpret_cast<char*>(bytes->get()),
+ size * sizeof((*bytes)[0]));
+ size_read = input->gcount();
+ return size_read == size ? size : 0;
+}
+} // namespace
+
+namespace webrtc {
+
+size_t ReadMessageBytesFromFile(FILE* file, std::unique_ptr<uint8_t[]>* bytes) {
+// The "wire format" for the size is little-endian. Assume we're running on
+// a little-endian machine.
+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+#error "Need to convert messsage from little-endian."
+#endif
+ int32_t size = 0;
+ if (fread(&size, sizeof(size), 1, file) != 1)
+ return 0;
+ if (size <= 0)
+ return 0;
+
+ *bytes = std::make_unique<uint8_t[]>(size);
+ return fread(bytes->get(), sizeof((*bytes)[0]), size, file);
+}
+
+// Returns true on success, false on error or end-of-file.
+bool ReadMessageFromFile(FILE* file, MessageLite* msg) {
+ std::unique_ptr<uint8_t[]> bytes;
+ size_t size = ReadMessageBytesFromFile(file, &bytes);
+ if (!size)
+ return false;
+
+ msg->Clear();
+ return msg->ParseFromArray(bytes.get(), size);
+}
+
+// Returns true on success, false on error or end of string stream.
+bool ReadMessageFromString(std::stringstream* input, MessageLite* msg) {
+ std::unique_ptr<uint8_t[]> bytes;
+ size_t size = ReadMessageBytesFromString(input, &bytes);
+ if (!size)
+ return false;
+
+ msg->Clear();
+ return msg->ParseFromArray(bytes.get(), size);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.h b/third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.h
new file mode 100644
index 0000000000..b9c2e819f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/protobuf_utils.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_PROTOBUF_UTILS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_PROTOBUF_UTILS_H_
+
+#include <memory>
+#include <sstream> // no-presubmit-check TODO(webrtc:8982)
+
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/protobuf_utils.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#include "modules/audio_processing/debug.pb.h"
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+
+// Allocates new memory in the unique_ptr to fit the raw message and returns the
+// number of bytes read.
+size_t ReadMessageBytesFromFile(FILE* file, std::unique_ptr<uint8_t[]>* bytes);
+
+// Returns true on success, false on error or end-of-file.
+bool ReadMessageFromFile(FILE* file, MessageLite* msg);
+
+// Returns true on success, false on error or end of string stream.
+bool ReadMessageFromString(
+ std::stringstream* input, // no-presubmit-check TODO(webrtc:8982)
+ MessageLite* msg);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_PROTOBUF_UTILS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/BUILD.gn
new file mode 100644
index 0000000000..e53a829623
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/BUILD.gn
@@ -0,0 +1,170 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../../webrtc.gni")
+
+if (!build_with_chromium) {
+ group("py_quality_assessment") {
+ testonly = true
+ deps = [
+ ":scripts",
+ ":unit_tests",
+ ]
+ }
+
+ copy("scripts") {
+ testonly = true
+ sources = [
+ "README.md",
+ "apm_quality_assessment.py",
+ "apm_quality_assessment.sh",
+ "apm_quality_assessment_boxplot.py",
+ "apm_quality_assessment_export.py",
+ "apm_quality_assessment_gencfgs.py",
+ "apm_quality_assessment_optimize.py",
+ ]
+ outputs = [ "$root_build_dir/py_quality_assessment/{{source_file_part}}" ]
+ deps = [
+ ":apm_configs",
+ ":lib",
+ ":output",
+ "../../../../resources/audio_processing/test/py_quality_assessment:probing_signals",
+ "../../../../rtc_tools:audioproc_f",
+ ]
+ }
+
+ copy("apm_configs") {
+ testonly = true
+ sources = [ "apm_configs/default.json" ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ outputs = [
+ "$root_build_dir/py_quality_assessment/apm_configs/{{source_file_part}}",
+ ]
+ } # apm_configs
+
+ copy("lib") {
+ testonly = true
+ sources = [
+ "quality_assessment/__init__.py",
+ "quality_assessment/annotations.py",
+ "quality_assessment/audioproc_wrapper.py",
+ "quality_assessment/collect_data.py",
+ "quality_assessment/data_access.py",
+ "quality_assessment/echo_path_simulation.py",
+ "quality_assessment/echo_path_simulation_factory.py",
+ "quality_assessment/eval_scores.py",
+ "quality_assessment/eval_scores_factory.py",
+ "quality_assessment/evaluation.py",
+ "quality_assessment/exceptions.py",
+ "quality_assessment/export.py",
+ "quality_assessment/export_unittest.py",
+ "quality_assessment/external_vad.py",
+ "quality_assessment/input_mixer.py",
+ "quality_assessment/input_signal_creator.py",
+ "quality_assessment/results.css",
+ "quality_assessment/results.js",
+ "quality_assessment/signal_processing.py",
+ "quality_assessment/simulation.py",
+ "quality_assessment/test_data_generation.py",
+ "quality_assessment/test_data_generation_factory.py",
+ ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ outputs = [ "$root_build_dir/py_quality_assessment/quality_assessment/{{source_file_part}}" ]
+ deps = [ "../../../../resources/audio_processing/test/py_quality_assessment:noise_tracks" ]
+ }
+
+ copy("output") {
+ testonly = true
+ sources = [ "output/README.md" ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ outputs =
+ [ "$root_build_dir/py_quality_assessment/output/{{source_file_part}}" ]
+ }
+
+ group("unit_tests") {
+ testonly = true
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ deps = [
+ ":apm_vad",
+ ":fake_polqa",
+ ":lib_unit_tests",
+ ":scripts_unit_tests",
+ ":vad",
+ ]
+ }
+
+ rtc_executable("fake_polqa") {
+ testonly = true
+ sources = [ "quality_assessment/fake_polqa.cc" ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ output_dir = "${root_out_dir}/py_quality_assessment/quality_assessment"
+ deps = [
+ "../../../../rtc_base:checks",
+ "//third_party/abseil-cpp/absl/strings",
+ ]
+ }
+
+ rtc_executable("vad") {
+ testonly = true
+ sources = [ "quality_assessment/vad.cc" ]
+ deps = [
+ "../../../../common_audio",
+ "../../../../rtc_base:logging",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+
+ rtc_executable("apm_vad") {
+ testonly = true
+ sources = [ "quality_assessment/apm_vad.cc" ]
+ deps = [
+ "../..",
+ "../../../../common_audio",
+ "../../../../rtc_base:logging",
+ "../../vad",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+
+ rtc_executable("sound_level") {
+ testonly = true
+ sources = [ "quality_assessment/sound_level.cc" ]
+ deps = [
+ "../..",
+ "../../../../common_audio",
+ "../../../../rtc_base:logging",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+
+ copy("lib_unit_tests") {
+ testonly = true
+ sources = [
+ "quality_assessment/annotations_unittest.py",
+ "quality_assessment/echo_path_simulation_unittest.py",
+ "quality_assessment/eval_scores_unittest.py",
+ "quality_assessment/fake_external_vad.py",
+ "quality_assessment/input_mixer_unittest.py",
+ "quality_assessment/signal_processing_unittest.py",
+ "quality_assessment/simulation_unittest.py",
+ "quality_assessment/test_data_generation_unittest.py",
+ ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ outputs = [ "$root_build_dir/py_quality_assessment/quality_assessment/{{source_file_part}}" ]
+ }
+
+ copy("scripts_unit_tests") {
+ testonly = true
+ sources = [ "apm_quality_assessment_unittest.py" ]
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ outputs = [ "$root_build_dir/py_quality_assessment/{{source_file_part}}" ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/OWNERS b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/OWNERS
new file mode 100644
index 0000000000..9f56bb830d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/OWNERS
@@ -0,0 +1,5 @@
+aleloi@webrtc.org
+alessiob@webrtc.org
+henrik.lundin@webrtc.org
+ivoc@webrtc.org
+peah@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/README.md b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/README.md
new file mode 100644
index 0000000000..4156112df2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/README.md
@@ -0,0 +1,125 @@
+# APM Quality Assessment tool
+
+Python wrapper of APM simulators (e.g., `audioproc_f`) with which quality
+assessment can be automatized. The tool allows to simulate different noise
+conditions, input signals, APM configurations and it computes different scores.
+Once the scores are computed, the results can be easily exported to an HTML page
+which allows to listen to the APM input and output signals and also the
+reference one used for evaluation.
+
+## Dependencies
+ - OS: Linux
+ - Python 2.7
+ - Python libraries: enum34, numpy, scipy, pydub (0.17.0+), pandas (0.20.1+),
+ pyquery (1.2+), jsmin (2.2+), csscompressor (0.9.4)
+ - It is recommended that a dedicated Python environment is used
+ - install `virtualenv`
+ - `$ sudo apt-get install python-virtualenv`
+ - setup a new Python environment (e.g., `my_env`)
+ - `$ cd ~ && virtualenv my_env`
+ - activate the new Python environment
+ - `$ source ~/my_env/bin/activate`
+ - add dependcies via `pip`
+ - `(my_env)$ pip install enum34 numpy pydub scipy pandas pyquery jsmin \`
+ `csscompressor`
+ - PolqaOem64 (see http://www.polqa.info/)
+ - Tested with POLQA Library v1.180 / P863 v2.400
+ - Aachen Impulse Response (AIR) Database
+ - Download https://www2.iks.rwth-aachen.de/air/air_database_release_1_4.zip
+ - Input probing signals and noise tracks (you can make your own dataset - *1)
+
+## Build
+ - Compile WebRTC
+ - Go to `out/Default/py_quality_assessment` and check that
+ `apm_quality_assessment.py` exists
+
+## Unit tests
+ - Compile WebRTC
+ - Go to `out/Default/py_quality_assessment`
+ - Run `python -m unittest discover -p "*_unittest.py"`
+
+## First time setup
+ - Deploy PolqaOem64 and set the `POLQA_PATH` environment variable
+ - e.g., `$ export POLQA_PATH=/var/opt/PolqaOem64`
+ - Deploy the AIR Database and set the `AECHEN_IR_DATABASE_PATH` environment
+ variable
+ - e.g., `$ export AECHEN_IR_DATABASE_PATH=/var/opt/AIR_1_4`
+ - Deploy probing signal tracks into
+ - `out/Default/py_quality_assessment/probing_signals` (*1)
+ - Deploy noise tracks into
+ - `out/Default/py_quality_assessment/noise_tracks` (*1, *2)
+
+(*1) You can use custom files as long as they are mono tracks sampled at 48kHz
+encoded in the 16 bit signed format (it is recommended that the tracks are
+converted and exported with Audacity).
+
+## Usage (scores computation)
+ - Go to `out/Default/py_quality_assessment`
+ - Check the `apm_quality_assessment.sh` as an example script to parallelize the
+ experiments
+ - Adjust the script according to your preferences (e.g., output path)
+ - Run `apm_quality_assessment.sh`
+ - The script will end by opening the browser and showing ALL the computed
+ scores
+
+## Usage (export reports)
+Showing all the results at once can be confusing. You therefore may want to
+export separate reports. In this case, you can use the
+`apm_quality_assessment_export.py` script as follows:
+
+ - Set `--output_dir, -o` to the same value used in `apm_quality_assessment.sh`
+ - Use regular expressions to select/filter out scores by
+ - APM configurations: `--config_names, -c`
+ - capture signals: `--capture_names, -i`
+ - render signals: `--render_names, -r`
+ - echo simulator: `--echo_simulator_names, -e`
+ - test data generators: `--test_data_generators, -t`
+ - scores: `--eval_scores, -s`
+ - Assign a suffix to the report name using `-f <suffix>`
+
+For instance:
+
+```
+$ ./apm_quality_assessment_export.py \
+ -o output/ \
+ -c "(^default$)|(.*AE.*)" \
+ -t \(white_noise\) \
+ -s \(polqa\) \
+ -f echo
+```
+
+## Usage (boxplot)
+After generating stats, it can help to visualize how a score depends on a
+certain APM simulator parameter. The `apm_quality_assessment_boxplot.py` script
+helps with that, producing plots similar to [this
+one](https://matplotlib.org/mpl_examples/pylab_examples/boxplot_demo_06.png).
+
+Suppose some scores come from running the APM simulator `audioproc_f` with
+or without the level controller: `--lc=1` or `--lc=0`. Then two boxplots
+side by side can be generated with
+
+```
+$ ./apm_quality_assessment_boxplot.py \
+ -o /path/to/output
+ -v <score_name>
+ -n /path/to/dir/with/apm_configs
+ -z lc
+```
+
+## Troubleshooting
+The input wav file must be:
+ - sampled at a sample rate that is a multiple of 100 (required by POLQA)
+ - in the 16 bit format (required by `audioproc_f`)
+ - encoded in the Microsoft WAV signed 16 bit PCM format (Audacity default
+ when exporting)
+
+Depending on the license, the POLQA tool may take “breaks” as a way to limit the
+throughput. When this happens, the APM Quality Assessment tool is slowed down.
+For more details about this limitation, check Section 10.9.1 in the POLQA manual
+v.1.18.
+
+In case of issues with the POLQA score computation, check
+`py_quality_assessment/eval_scores.py` and adapt
+`PolqaScore._parse_output_file()`.
+The code can be also fixed directly into the build directory (namely,
+`out/Default/py_quality_assessment/eval_scores.py`).
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_configs/default.json b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_configs/default.json
new file mode 100644
index 0000000000..5c3277bac0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_configs/default.json
@@ -0,0 +1 @@
+{"-all_default": null}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.py
new file mode 100755
index 0000000000..e067ecb692
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Perform APM module quality assessment on one or more input files using one or
+ more APM simulator configuration files and one or more test data generators.
+
+Usage: apm_quality_assessment.py -i audio1.wav [audio2.wav ...]
+ -c cfg1.json [cfg2.json ...]
+ -n white [echo ...]
+ -e audio_level [polqa ...]
+ -o /path/to/output
+"""
+
+import argparse
+import logging
+import os
+import sys
+
+import quality_assessment.audioproc_wrapper as audioproc_wrapper
+import quality_assessment.echo_path_simulation as echo_path_simulation
+import quality_assessment.eval_scores as eval_scores
+import quality_assessment.evaluation as evaluation
+import quality_assessment.eval_scores_factory as eval_scores_factory
+import quality_assessment.external_vad as external_vad
+import quality_assessment.test_data_generation as test_data_generation
+import quality_assessment.test_data_generation_factory as \
+ test_data_generation_factory
+import quality_assessment.simulation as simulation
+
+_ECHO_PATH_SIMULATOR_NAMES = (
+ echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES)
+_TEST_DATA_GENERATOR_CLASSES = (
+ test_data_generation.TestDataGenerator.REGISTERED_CLASSES)
+_TEST_DATA_GENERATORS_NAMES = _TEST_DATA_GENERATOR_CLASSES.keys()
+_EVAL_SCORE_WORKER_CLASSES = eval_scores.EvaluationScore.REGISTERED_CLASSES
+_EVAL_SCORE_WORKER_NAMES = _EVAL_SCORE_WORKER_CLASSES.keys()
+
+_DEFAULT_CONFIG_FILE = 'apm_configs/default.json'
+
+_POLQA_BIN_NAME = 'PolqaOem64'
+
+
+def _InstanceArgumentsParser():
+ """Arguments parser factory.
+ """
+ parser = argparse.ArgumentParser(description=(
+ 'Perform APM module quality assessment on one or more input files using '
+ 'one or more APM simulator configuration files and one or more '
+ 'test data generators.'))
+
+ parser.add_argument('-c',
+ '--config_files',
+ nargs='+',
+ required=False,
+ help=('path to the configuration files defining the '
+ 'arguments with which the APM simulator tool is '
+ 'called'),
+ default=[_DEFAULT_CONFIG_FILE])
+
+ parser.add_argument(
+ '-i',
+ '--capture_input_files',
+ nargs='+',
+ required=True,
+ help='path to the capture input wav files (one or more)')
+
+ parser.add_argument('-r',
+ '--render_input_files',
+ nargs='+',
+ required=False,
+ help=('path to the render input wav files; either '
+ 'omitted or one file for each file in '
+ '--capture_input_files (files will be paired by '
+ 'index)'),
+ default=None)
+
+ parser.add_argument('-p',
+ '--echo_path_simulator',
+ required=False,
+ help=('custom echo path simulator name; required if '
+ '--render_input_files is specified'),
+ choices=_ECHO_PATH_SIMULATOR_NAMES,
+ default=echo_path_simulation.NoEchoPathSimulator.NAME)
+
+ parser.add_argument('-t',
+ '--test_data_generators',
+ nargs='+',
+ required=False,
+ help='custom list of test data generators to use',
+ choices=_TEST_DATA_GENERATORS_NAMES,
+ default=_TEST_DATA_GENERATORS_NAMES)
+
+ parser.add_argument('--additive_noise_tracks_path', required=False,
+ help='path to the wav files for the additive',
+ default=test_data_generation. \
+ AdditiveNoiseTestDataGenerator. \
+ DEFAULT_NOISE_TRACKS_PATH)
+
+ parser.add_argument('-e',
+ '--eval_scores',
+ nargs='+',
+ required=False,
+ help='custom list of evaluation scores to use',
+ choices=_EVAL_SCORE_WORKER_NAMES,
+ default=_EVAL_SCORE_WORKER_NAMES)
+
+ parser.add_argument('-o',
+ '--output_dir',
+ required=False,
+ help=('base path to the output directory in which the '
+ 'output wav files and the evaluation outcomes '
+ 'are saved'),
+ default='output')
+
+ parser.add_argument('--polqa_path',
+ required=True,
+ help='path to the POLQA tool')
+
+ parser.add_argument('--air_db_path',
+ required=True,
+ help='path to the Aechen IR database')
+
+ parser.add_argument('--apm_sim_path', required=False,
+ help='path to the APM simulator tool',
+ default=audioproc_wrapper. \
+ AudioProcWrapper. \
+ DEFAULT_APM_SIMULATOR_BIN_PATH)
+
+ parser.add_argument('--echo_metric_tool_bin_path',
+ required=False,
+ help=('path to the echo metric binary '
+ '(required for the echo eval score)'),
+ default=None)
+
+ parser.add_argument(
+ '--copy_with_identity_generator',
+ required=False,
+ help=('If true, the identity test data generator makes a '
+ 'copy of the clean speech input file.'),
+ default=False)
+
+ parser.add_argument('--external_vad_paths',
+ nargs='+',
+ required=False,
+ help=('Paths to external VAD programs. Each must take'
+ '\'-i <wav file> -o <output>\' inputs'),
+ default=[])
+
+ parser.add_argument('--external_vad_names',
+ nargs='+',
+ required=False,
+ help=('Keys to the vad paths. Must be different and '
+ 'as many as the paths.'),
+ default=[])
+
+ return parser
+
+
+def _ValidateArguments(args, parser):
+ if args.capture_input_files and args.render_input_files and (len(
+ args.capture_input_files) != len(args.render_input_files)):
+ parser.error(
+ '--render_input_files and --capture_input_files must be lists '
+ 'having the same length')
+ sys.exit(1)
+
+ if args.render_input_files and not args.echo_path_simulator:
+ parser.error(
+ 'when --render_input_files is set, --echo_path_simulator is '
+ 'also required')
+ sys.exit(1)
+
+ if len(args.external_vad_names) != len(args.external_vad_paths):
+ parser.error('If provided, --external_vad_paths and '
+ '--external_vad_names must '
+ 'have the same number of arguments.')
+ sys.exit(1)
+
+
+def main():
+ # TODO(alessiob): level = logging.INFO once debugged.
+ logging.basicConfig(level=logging.DEBUG)
+ parser = _InstanceArgumentsParser()
+ args = parser.parse_args()
+ _ValidateArguments(args, parser)
+
+ simulator = simulation.ApmModuleSimulator(
+ test_data_generator_factory=(
+ test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path=args.air_db_path,
+ noise_tracks_path=args.additive_noise_tracks_path,
+ copy_with_identity=args.copy_with_identity_generator)),
+ evaluation_score_factory=eval_scores_factory.
+ EvaluationScoreWorkerFactory(
+ polqa_tool_bin_path=os.path.join(args.polqa_path, _POLQA_BIN_NAME),
+ echo_metric_tool_bin_path=args.echo_metric_tool_bin_path),
+ ap_wrapper=audioproc_wrapper.AudioProcWrapper(args.apm_sim_path),
+ evaluator=evaluation.ApmModuleEvaluator(),
+ external_vads=external_vad.ExternalVad.ConstructVadDict(
+ args.external_vad_paths, args.external_vad_names))
+ simulator.Run(config_filepaths=args.config_files,
+ capture_input_filepaths=args.capture_input_files,
+ render_input_filepaths=args.render_input_files,
+ echo_path_simulator_name=args.echo_path_simulator,
+ test_data_generator_names=args.test_data_generators,
+ eval_score_names=args.eval_scores,
+ output_dir=args.output_dir)
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.sh b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.sh
new file mode 100755
index 0000000000..aa563ee26b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# Path to the POLQA tool.
+if [ -z ${POLQA_PATH} ]; then # Check if defined.
+ # Default location.
+ export POLQA_PATH='/var/opt/PolqaOem64'
+fi
+if [ -d "${POLQA_PATH}" ]; then
+ echo "POLQA found in ${POLQA_PATH}"
+else
+ echo "POLQA not found in ${POLQA_PATH}"
+ exit 1
+fi
+
+# Path to the Aechen IR database.
+if [ -z ${AECHEN_IR_DATABASE_PATH} ]; then # Check if defined.
+ # Default location.
+ export AECHEN_IR_DATABASE_PATH='/var/opt/AIR_1_4'
+fi
+if [ -d "${AECHEN_IR_DATABASE_PATH}" ]; then
+ echo "AIR database found in ${AECHEN_IR_DATABASE_PATH}"
+else
+ echo "AIR database not found in ${AECHEN_IR_DATABASE_PATH}"
+ exit 1
+fi
+
+# Customize probing signals, test data generators and scores if needed.
+CAPTURE_SIGNALS=(probing_signals/*.wav)
+TEST_DATA_GENERATORS=( \
+ "identity" \
+ "white_noise" \
+ # "environmental_noise" \
+ # "reverberation" \
+)
+SCORES=( \
+ # "polqa" \
+ "audio_level_peak" \
+ "audio_level_mean" \
+)
+OUTPUT_PATH=output
+
+# Generate standard APM config files.
+chmod +x apm_quality_assessment_gencfgs.py
+./apm_quality_assessment_gencfgs.py
+
+# Customize APM configurations if needed.
+APM_CONFIGS=(apm_configs/*.json)
+
+# Add output path if missing.
+if [ ! -d ${OUTPUT_PATH} ]; then
+ mkdir ${OUTPUT_PATH}
+fi
+
+# Start one process for each "probing signal"-"test data source" pair.
+chmod +x apm_quality_assessment.py
+for capture_signal_filepath in "${CAPTURE_SIGNALS[@]}" ; do
+ probing_signal_name="$(basename $capture_signal_filepath)"
+ probing_signal_name="${probing_signal_name%.*}"
+ for test_data_gen_name in "${TEST_DATA_GENERATORS[@]}" ; do
+ LOG_FILE="${OUTPUT_PATH}/apm_qa-${probing_signal_name}-"`
+ `"${test_data_gen_name}.log"
+ echo "Starting ${probing_signal_name} ${test_data_gen_name} "`
+ `"(see ${LOG_FILE})"
+ ./apm_quality_assessment.py \
+ --polqa_path ${POLQA_PATH}\
+ --air_db_path ${AECHEN_IR_DATABASE_PATH}\
+ -i ${capture_signal_filepath} \
+ -o ${OUTPUT_PATH} \
+ -t ${test_data_gen_name} \
+ -c "${APM_CONFIGS[@]}" \
+ -e "${SCORES[@]}" > $LOG_FILE 2>&1 &
+ done
+done
+
+# Join Python processes running apm_quality_assessment.py.
+wait
+
+# Export results.
+chmod +x ./apm_quality_assessment_export.py
+./apm_quality_assessment_export.py -o ${OUTPUT_PATH}
+
+# Show results in the browser.
+RESULTS_FILE="$(realpath ${OUTPUT_PATH}/results.html)"
+sensible-browser "file://${RESULTS_FILE}" > /dev/null 2>&1 &
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py
new file mode 100644
index 0000000000..c425885b95
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Shows boxplots of given score for different values of selected
+parameters. Can be used to compare scores by audioproc_f flag.
+
+Usage: apm_quality_assessment_boxplot.py -o /path/to/output
+ -v polqa
+ -n /path/to/dir/with/apm_configs
+ -z audioproc_f_arg1 [arg2 ...]
+
+Arguments --config_names, --render_names, --echo_simulator_names,
+--test_data_generators, --eval_scores can be used to filter the data
+used for plotting.
+"""
+
+import collections
+import logging
+import matplotlib.pyplot as plt
+import os
+
+import quality_assessment.data_access as data_access
+import quality_assessment.collect_data as collect_data
+
+
+def InstanceArgumentsParser():
+ """Arguments parser factory.
+ """
+ parser = collect_data.InstanceArgumentsParser()
+ parser.description = (
+ 'Shows boxplot of given score for different values of selected'
+ 'parameters. Can be used to compare scores by audioproc_f flag')
+
+ parser.add_argument('-v',
+ '--eval_score',
+ required=True,
+ help=('Score name for constructing boxplots'))
+
+ parser.add_argument(
+ '-n',
+ '--config_dir',
+ required=False,
+ help=('path to the folder with the configuration files'),
+ default='apm_configs')
+
+ parser.add_argument('-z',
+ '--params_to_plot',
+ required=True,
+ nargs='+',
+ help=('audioproc_f parameter values'
+ 'by which to group scores (no leading dash)'))
+
+ return parser
+
+
+def FilterScoresByParams(data_frame, filter_params, score_name, config_dir):
+ """Filters data on the values of one or more parameters.
+
+ Args:
+ data_frame: pandas.DataFrame of all used input data.
+
+ filter_params: each config of the input data is assumed to have
+ exactly one parameter from `filter_params` defined. Every value
+ of the parameters in `filter_params` is a key in the returned
+ dict; the associated value is all cells of the data with that
+ value of the parameter.
+
+ score_name: Name of score which value is boxplotted. Currently cannot do
+ more than one value.
+
+ config_dir: path to dir with APM configs.
+
+ Returns: dictionary, key is a param value, result is all scores for
+ that param value (see `filter_params` for explanation).
+ """
+ results = collections.defaultdict(dict)
+ config_names = data_frame['apm_config'].drop_duplicates().values.tolist()
+
+ for config_name in config_names:
+ config_json = data_access.AudioProcConfigFile.Load(
+ os.path.join(config_dir, config_name + '.json'))
+ data_with_config = data_frame[data_frame.apm_config == config_name]
+ data_cell_scores = data_with_config[data_with_config.eval_score_name ==
+ score_name]
+
+ # Exactly one of `params_to_plot` must match:
+ (matching_param, ) = [
+ x for x in filter_params if '-' + x in config_json
+ ]
+
+ # Add scores for every track to the result.
+ for capture_name in data_cell_scores.capture:
+ result_score = float(data_cell_scores[data_cell_scores.capture ==
+ capture_name].score)
+ config_dict = results[config_json['-' + matching_param]]
+ if capture_name not in config_dict:
+ config_dict[capture_name] = {}
+
+ config_dict[capture_name][matching_param] = result_score
+
+ return results
+
+
+def _FlattenToScoresList(config_param_score_dict):
+ """Extracts a list of scores from input data structure.
+
+ Args:
+ config_param_score_dict: of the form {'capture_name':
+ {'param_name' : score_value,.. } ..}
+
+ Returns: Plain list of all score value present in input data
+ structure
+ """
+ result = []
+ for capture_name in config_param_score_dict:
+ result += list(config_param_score_dict[capture_name].values())
+ return result
+
+
+def main():
+ # Init.
+ # TODO(alessiob): INFO once debugged.
+ logging.basicConfig(level=logging.DEBUG)
+ parser = InstanceArgumentsParser()
+ args = parser.parse_args()
+
+ # Get the scores.
+ src_path = collect_data.ConstructSrcPath(args)
+ logging.debug(src_path)
+ scores_data_frame = collect_data.FindScores(src_path, args)
+
+ # Filter the data by `args.params_to_plot`
+ scores_filtered = FilterScoresByParams(scores_data_frame,
+ args.params_to_plot,
+ args.eval_score, args.config_dir)
+
+ data_list = sorted(scores_filtered.items())
+ data_values = [_FlattenToScoresList(x) for (_, x) in data_list]
+ data_labels = [x for (x, _) in data_list]
+
+ _, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 6))
+ axes.boxplot(data_values, labels=data_labels)
+ axes.set_ylabel(args.eval_score)
+ axes.set_xlabel('/'.join(args.params_to_plot))
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_export.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_export.py
new file mode 100755
index 0000000000..c20accb9dc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_export.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Export the scores computed by the apm_quality_assessment.py script into an
+ HTML file.
+"""
+
+import logging
+import os
+import sys
+
+import quality_assessment.collect_data as collect_data
+import quality_assessment.export as export
+
+
+def _BuildOutputFilename(filename_suffix):
+ """Builds the filename for the exported file.
+
+ Args:
+ filename_suffix: suffix for the output file name.
+
+ Returns:
+ A string.
+ """
+ if filename_suffix is None:
+ return 'results.html'
+ return 'results-{}.html'.format(filename_suffix)
+
+
+def main():
+ # Init.
+ logging.basicConfig(
+ level=logging.DEBUG) # TODO(alessio): INFO once debugged.
+ parser = collect_data.InstanceArgumentsParser()
+ parser.add_argument('-f',
+ '--filename_suffix',
+ help=('suffix of the exported file'))
+ parser.description = ('Exports pre-computed APM module quality assessment '
+ 'results into HTML tables')
+ args = parser.parse_args()
+
+ # Get the scores.
+ src_path = collect_data.ConstructSrcPath(args)
+ logging.debug(src_path)
+ scores_data_frame = collect_data.FindScores(src_path, args)
+
+ # Export.
+ output_filepath = os.path.join(args.output_dir,
+ _BuildOutputFilename(args.filename_suffix))
+ exporter = export.HtmlExport(output_filepath)
+ exporter.Export(scores_data_frame)
+
+ logging.info('output file successfully written in %s', output_filepath)
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_gencfgs.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_gencfgs.py
new file mode 100755
index 0000000000..ca80f85bd1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_gencfgs.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Generate .json files with which the APM module can be tested using the
+ apm_quality_assessment.py script and audioproc_f as APM simulator.
+"""
+
+import logging
+import os
+
+import quality_assessment.data_access as data_access
+
+OUTPUT_PATH = os.path.abspath('apm_configs')
+
+
+def _GenerateDefaultOverridden(config_override):
+ """Generates one or more APM overriden configurations.
+
+ For each item in config_override, it overrides the default configuration and
+ writes a new APM configuration file.
+
+ The default settings are loaded via "-all_default".
+ Check "src/modules/audio_processing/test/audioproc_float.cc" and search
+ for "if (FLAG_all_default) {".
+
+ For instance, in 55eb6d621489730084927868fed195d3645a9ec9 the default is this:
+ settings.use_aec = rtc::Optional<bool>(true);
+ settings.use_aecm = rtc::Optional<bool>(false);
+ settings.use_agc = rtc::Optional<bool>(true);
+ settings.use_bf = rtc::Optional<bool>(false);
+ settings.use_ed = rtc::Optional<bool>(false);
+ settings.use_hpf = rtc::Optional<bool>(true);
+ settings.use_le = rtc::Optional<bool>(true);
+ settings.use_ns = rtc::Optional<bool>(true);
+ settings.use_ts = rtc::Optional<bool>(true);
+ settings.use_vad = rtc::Optional<bool>(true);
+
+ Args:
+ config_override: dict of APM configuration file names as keys; the values
+ are dict instances encoding the audioproc_f flags.
+ """
+ for config_filename in config_override:
+ config = config_override[config_filename]
+ config['-all_default'] = None
+
+ config_filepath = os.path.join(
+ OUTPUT_PATH, 'default-{}.json'.format(config_filename))
+ logging.debug('config file <%s> | %s', config_filepath, config)
+
+ data_access.AudioProcConfigFile.Save(config_filepath, config)
+ logging.info('config file created: <%s>', config_filepath)
+
+
+def _GenerateAllDefaultButOne():
+ """Disables the flags enabled by default one-by-one.
+ """
+ config_sets = {
+ 'no_AEC': {
+ '-aec': 0,
+ },
+ 'no_AGC': {
+ '-agc': 0,
+ },
+ 'no_HP_filter': {
+ '-hpf': 0,
+ },
+ 'no_level_estimator': {
+ '-le': 0,
+ },
+ 'no_noise_suppressor': {
+ '-ns': 0,
+ },
+ 'no_transient_suppressor': {
+ '-ts': 0,
+ },
+ 'no_vad': {
+ '-vad': 0,
+ },
+ }
+ _GenerateDefaultOverridden(config_sets)
+
+
+def _GenerateAllDefaultPlusOne():
+ """Enables the flags disabled by default one-by-one.
+ """
+ config_sets = {
+ 'with_AECM': {
+ '-aec': 0,
+ '-aecm': 1,
+ }, # AEC and AECM are exclusive.
+ 'with_AGC_limiter': {
+ '-agc_limiter': 1,
+ },
+ 'with_AEC_delay_agnostic': {
+ '-delay_agnostic': 1,
+ },
+ 'with_drift_compensation': {
+ '-drift_compensation': 1,
+ },
+ 'with_residual_echo_detector': {
+ '-ed': 1,
+ },
+ 'with_AEC_extended_filter': {
+ '-extended_filter': 1,
+ },
+ 'with_LC': {
+ '-lc': 1,
+ },
+ 'with_refined_adaptive_filter': {
+ '-refined_adaptive_filter': 1,
+ },
+ }
+ _GenerateDefaultOverridden(config_sets)
+
+
+def main():
+ logging.basicConfig(level=logging.INFO)
+ _GenerateAllDefaultPlusOne()
+ _GenerateAllDefaultButOne()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py
new file mode 100644
index 0000000000..ecae2ed995
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Finds the APM configuration that maximizes a provided metric by
+parsing the output generated apm_quality_assessment.py.
+"""
+
+from __future__ import division
+
+import collections
+import logging
+import os
+
+import quality_assessment.data_access as data_access
+import quality_assessment.collect_data as collect_data
+
+
+def _InstanceArgumentsParser():
+ """Arguments parser factory. Extends the arguments from 'collect_data'
+ with a few extra for selecting what parameters to optimize for.
+ """
+ parser = collect_data.InstanceArgumentsParser()
+ parser.description = (
+ 'Rudimentary optimization of a function over different parameter'
+ 'combinations.')
+
+ parser.add_argument(
+ '-n',
+ '--config_dir',
+ required=False,
+ help=('path to the folder with the configuration files'),
+ default='apm_configs')
+
+ parser.add_argument('-p',
+ '--params',
+ required=True,
+ nargs='+',
+ help=('parameters to parse from the config files in'
+ 'config_dir'))
+
+ parser.add_argument(
+ '-z',
+ '--params_not_to_optimize',
+ required=False,
+ nargs='+',
+ default=[],
+ help=('parameters from `params` not to be optimized for'))
+
+ return parser
+
+
+def _ConfigurationAndScores(data_frame, params, params_not_to_optimize,
+ config_dir):
+ """Returns a list of all configurations and scores.
+
+ Args:
+ data_frame: A pandas data frame with the scores and config name
+ returned by _FindScores.
+ params: The parameter names to parse from configs the config
+ directory
+
+ params_not_to_optimize: The parameter names which shouldn't affect
+ the optimal parameter
+ selection. E.g., fixed settings and not
+ tunable parameters.
+
+ config_dir: Path to folder with config files.
+
+ Returns:
+ Dictionary of the form
+ {param_combination: [{params: {param1: value1, ...},
+ scores: {score1: value1, ...}}]}.
+
+ The key `param_combination` runs over all parameter combinations
+ of the parameters in `params` and not in
+ `params_not_to_optimize`. A corresponding value is a list of all
+ param combinations for params in `params_not_to_optimize` and
+ their scores.
+ """
+ results = collections.defaultdict(list)
+ config_names = data_frame['apm_config'].drop_duplicates().values.tolist()
+ score_names = data_frame['eval_score_name'].drop_duplicates(
+ ).values.tolist()
+
+ # Normalize the scores
+ normalization_constants = {}
+ for score_name in score_names:
+ scores = data_frame[data_frame.eval_score_name == score_name].score
+ normalization_constants[score_name] = max(scores)
+
+ params_to_optimize = [p for p in params if p not in params_not_to_optimize]
+ param_combination = collections.namedtuple("ParamCombination",
+ params_to_optimize)
+
+ for config_name in config_names:
+ config_json = data_access.AudioProcConfigFile.Load(
+ os.path.join(config_dir, config_name + ".json"))
+ scores = {}
+ data_cell = data_frame[data_frame.apm_config == config_name]
+ for score_name in score_names:
+ data_cell_scores = data_cell[data_cell.eval_score_name ==
+ score_name].score
+ scores[score_name] = sum(data_cell_scores) / len(data_cell_scores)
+ scores[score_name] /= normalization_constants[score_name]
+
+ result = {'scores': scores, 'params': {}}
+ config_optimize_params = {}
+ for param in params:
+ if param in params_to_optimize:
+ config_optimize_params[param] = config_json['-' + param]
+ else:
+ result['params'][param] = config_json['-' + param]
+
+ current_param_combination = param_combination(**config_optimize_params)
+ results[current_param_combination].append(result)
+ return results
+
+
+def _FindOptimalParameter(configs_and_scores, score_weighting):
+ """Finds the config producing the maximal score.
+
+ Args:
+ configs_and_scores: structure of the form returned by
+ _ConfigurationAndScores
+
+ score_weighting: a function to weight together all score values of
+ the form [{params: {param1: value1, ...}, scores:
+ {score1: value1, ...}}] into a numeric
+ value
+ Returns:
+ the config that has the largest values of `score_weighting` applied
+ to its scores.
+ """
+
+ min_score = float('+inf')
+ best_params = None
+ for config in configs_and_scores:
+ scores_and_params = configs_and_scores[config]
+ current_score = score_weighting(scores_and_params)
+ if current_score < min_score:
+ min_score = current_score
+ best_params = config
+ logging.debug("Score: %f", current_score)
+ logging.debug("Config: %s", str(config))
+ return best_params
+
+
+def _ExampleWeighting(scores_and_configs):
+ """Example argument to `_FindOptimalParameter`
+ Args:
+ scores_and_configs: a list of configs and scores, in the form
+ described in _FindOptimalParameter
+ Returns:
+ numeric value, the sum of all scores
+ """
+ res = 0
+ for score_config in scores_and_configs:
+ res += sum(score_config['scores'].values())
+ return res
+
+
+def main():
+ # Init.
+ # TODO(alessiob): INFO once debugged.
+ logging.basicConfig(level=logging.DEBUG)
+ parser = _InstanceArgumentsParser()
+ args = parser.parse_args()
+
+ # Get the scores.
+ src_path = collect_data.ConstructSrcPath(args)
+ logging.debug('Src path <%s>', src_path)
+ scores_data_frame = collect_data.FindScores(src_path, args)
+ all_scores = _ConfigurationAndScores(scores_data_frame, args.params,
+ args.params_not_to_optimize,
+ args.config_dir)
+
+ opt_param = _FindOptimalParameter(all_scores, _ExampleWeighting)
+
+ logging.info('Optimal parameter combination: <%s>', opt_param)
+ logging.info('It\'s score values: <%s>', all_scores[opt_param])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_unittest.py
new file mode 100644
index 0000000000..80338c1373
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_unittest.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the apm_quality_assessment module.
+"""
+
+import sys
+import unittest
+
+import mock
+
+import apm_quality_assessment
+
+
+class TestSimulationScript(unittest.TestCase):
+ """Unit tests for the apm_quality_assessment module.
+ """
+
+ def testMain(self):
+ # Exit with error code if no arguments are passed.
+ with self.assertRaises(SystemExit) as cm, mock.patch.object(
+ sys, 'argv', ['apm_quality_assessment.py']):
+ apm_quality_assessment.main()
+ self.assertGreater(cm.exception.code, 0)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/output/README.md b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/output/README.md
new file mode 100644
index 0000000000..66e2a1c848
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/output/README.md
@@ -0,0 +1 @@
+You can use this folder for the output generated by the apm_quality_assessment scripts.
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/__init__.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/__init__.py
new file mode 100644
index 0000000000..b870dfaef3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations.py
new file mode 100644
index 0000000000..93a8248397
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations.py
@@ -0,0 +1,296 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Extraction of annotations from audio files.
+"""
+
+from __future__ import division
+import logging
+import os
+import shutil
+import struct
+import subprocess
+import sys
+import tempfile
+
+try:
+ import numpy as np
+except ImportError:
+ logging.critical('Cannot import the third-party Python package numpy')
+ sys.exit(1)
+
+from . import external_vad
+from . import exceptions
+from . import signal_processing
+
+
+class AudioAnnotationsExtractor(object):
+ """Extracts annotations from audio files.
+ """
+
+ class VadType(object):
+ ENERGY_THRESHOLD = 1 # TODO(alessiob): Consider switching to P56 standard.
+ WEBRTC_COMMON_AUDIO = 2 # common_audio/vad/include/vad.h
+ WEBRTC_APM = 4 # modules/audio_processing/vad/vad.h
+
+ def __init__(self, value):
+ if (not isinstance(value, int)) or not 0 <= value <= 7:
+ raise exceptions.InitializationException('Invalid vad type: ' +
+ value)
+ self._value = value
+
+ def Contains(self, vad_type):
+ return self._value | vad_type == self._value
+
+ def __str__(self):
+ vads = []
+ if self.Contains(self.ENERGY_THRESHOLD):
+ vads.append("energy")
+ if self.Contains(self.WEBRTC_COMMON_AUDIO):
+ vads.append("common_audio")
+ if self.Contains(self.WEBRTC_APM):
+ vads.append("apm")
+ return "VadType({})".format(", ".join(vads))
+
+ _OUTPUT_FILENAME_TEMPLATE = '{}annotations.npz'
+
+ # Level estimation params.
+ _ONE_DB_REDUCTION = np.power(10.0, -1.0 / 20.0)
+ _LEVEL_FRAME_SIZE_MS = 1.0
+ # The time constants in ms indicate the time it takes for the level estimate
+ # to go down/up by 1 db if the signal is zero.
+ _LEVEL_ATTACK_MS = 5.0
+ _LEVEL_DECAY_MS = 20.0
+
+ # VAD params.
+ _VAD_THRESHOLD = 1
+ _VAD_WEBRTC_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ os.pardir, os.pardir)
+ _VAD_WEBRTC_COMMON_AUDIO_PATH = os.path.join(_VAD_WEBRTC_PATH, 'vad')
+
+ _VAD_WEBRTC_APM_PATH = os.path.join(_VAD_WEBRTC_PATH, 'apm_vad')
+
+ def __init__(self, vad_type, external_vads=None):
+ self._signal = None
+ self._level = None
+ self._level_frame_size = None
+ self._common_audio_vad = None
+ self._energy_vad = None
+ self._apm_vad_probs = None
+ self._apm_vad_rms = None
+ self._vad_frame_size = None
+ self._vad_frame_size_ms = None
+ self._c_attack = None
+ self._c_decay = None
+
+ self._vad_type = self.VadType(vad_type)
+ logging.info('VADs used for annotations: ' + str(self._vad_type))
+
+ if external_vads is None:
+ external_vads = {}
+ self._external_vads = external_vads
+
+ assert len(self._external_vads) == len(external_vads), (
+ 'The external VAD names must be unique.')
+ for vad in external_vads.values():
+ if not isinstance(vad, external_vad.ExternalVad):
+ raise exceptions.InitializationException('Invalid vad type: ' +
+ str(type(vad)))
+ logging.info('External VAD used for annotation: ' + str(vad.name))
+
+ assert os.path.exists(self._VAD_WEBRTC_COMMON_AUDIO_PATH), \
+ self._VAD_WEBRTC_COMMON_AUDIO_PATH
+ assert os.path.exists(self._VAD_WEBRTC_APM_PATH), \
+ self._VAD_WEBRTC_APM_PATH
+
+ @classmethod
+ def GetOutputFileNameTemplate(cls):
+ return cls._OUTPUT_FILENAME_TEMPLATE
+
+ def GetLevel(self):
+ return self._level
+
+ def GetLevelFrameSize(self):
+ return self._level_frame_size
+
+ @classmethod
+ def GetLevelFrameSizeMs(cls):
+ return cls._LEVEL_FRAME_SIZE_MS
+
+ def GetVadOutput(self, vad_type):
+ if vad_type == self.VadType.ENERGY_THRESHOLD:
+ return self._energy_vad
+ elif vad_type == self.VadType.WEBRTC_COMMON_AUDIO:
+ return self._common_audio_vad
+ elif vad_type == self.VadType.WEBRTC_APM:
+ return (self._apm_vad_probs, self._apm_vad_rms)
+ else:
+ raise exceptions.InitializationException('Invalid vad type: ' +
+ vad_type)
+
+ def GetVadFrameSize(self):
+ return self._vad_frame_size
+
+ def GetVadFrameSizeMs(self):
+ return self._vad_frame_size_ms
+
+ def Extract(self, filepath):
+ # Load signal.
+ self._signal = signal_processing.SignalProcessingUtils.LoadWav(
+ filepath)
+ if self._signal.channels != 1:
+ raise NotImplementedError(
+ 'Multiple-channel annotations not implemented')
+
+ # Level estimation params.
+ self._level_frame_size = int(self._signal.frame_rate / 1000 *
+ (self._LEVEL_FRAME_SIZE_MS))
+ self._c_attack = 0.0 if self._LEVEL_ATTACK_MS == 0 else (
+ self._ONE_DB_REDUCTION**(self._LEVEL_FRAME_SIZE_MS /
+ self._LEVEL_ATTACK_MS))
+ self._c_decay = 0.0 if self._LEVEL_DECAY_MS == 0 else (
+ self._ONE_DB_REDUCTION**(self._LEVEL_FRAME_SIZE_MS /
+ self._LEVEL_DECAY_MS))
+
+ # Compute level.
+ self._LevelEstimation()
+
+ # Ideal VAD output, it requires clean speech with high SNR as input.
+ if self._vad_type.Contains(self.VadType.ENERGY_THRESHOLD):
+ # Naive VAD based on level thresholding.
+ vad_threshold = np.percentile(self._level, self._VAD_THRESHOLD)
+ self._energy_vad = np.uint8(self._level > vad_threshold)
+ self._vad_frame_size = self._level_frame_size
+ self._vad_frame_size_ms = self._LEVEL_FRAME_SIZE_MS
+ if self._vad_type.Contains(self.VadType.WEBRTC_COMMON_AUDIO):
+ # WebRTC common_audio/ VAD.
+ self._RunWebRtcCommonAudioVad(filepath, self._signal.frame_rate)
+ if self._vad_type.Contains(self.VadType.WEBRTC_APM):
+ # WebRTC modules/audio_processing/ VAD.
+ self._RunWebRtcApmVad(filepath)
+ for extvad_name in self._external_vads:
+ self._external_vads[extvad_name].Run(filepath)
+
+ def Save(self, output_path, annotation_name=""):
+ ext_kwargs = {
+ 'extvad_conf-' + ext_vad:
+ self._external_vads[ext_vad].GetVadOutput()
+ for ext_vad in self._external_vads
+ }
+ np.savez_compressed(file=os.path.join(
+ output_path,
+ self.GetOutputFileNameTemplate().format(annotation_name)),
+ level=self._level,
+ level_frame_size=self._level_frame_size,
+ level_frame_size_ms=self._LEVEL_FRAME_SIZE_MS,
+ vad_output=self._common_audio_vad,
+ vad_energy_output=self._energy_vad,
+ vad_frame_size=self._vad_frame_size,
+ vad_frame_size_ms=self._vad_frame_size_ms,
+ vad_probs=self._apm_vad_probs,
+ vad_rms=self._apm_vad_rms,
+ **ext_kwargs)
+
+ def _LevelEstimation(self):
+ # Read samples.
+ samples = signal_processing.SignalProcessingUtils.AudioSegmentToRawData(
+ self._signal).astype(np.float32) / 32768.0
+ num_frames = len(samples) // self._level_frame_size
+ num_samples = num_frames * self._level_frame_size
+
+ # Envelope.
+ self._level = np.max(np.reshape(np.abs(samples[:num_samples]),
+ (num_frames, self._level_frame_size)),
+ axis=1)
+ assert len(self._level) == num_frames
+
+ # Envelope smoothing.
+ smooth = lambda curr, prev, k: (1 - k) * curr + k * prev
+ self._level[0] = smooth(self._level[0], 0.0, self._c_attack)
+ for i in range(1, num_frames):
+ self._level[i] = smooth(
+ self._level[i], self._level[i - 1], self._c_attack if
+ (self._level[i] > self._level[i - 1]) else self._c_decay)
+
+ def _RunWebRtcCommonAudioVad(self, wav_file_path, sample_rate):
+ self._common_audio_vad = None
+ self._vad_frame_size = None
+
+ # Create temporary output path.
+ tmp_path = tempfile.mkdtemp()
+ output_file_path = os.path.join(
+ tmp_path,
+ os.path.split(wav_file_path)[1] + '_vad.tmp')
+
+ # Call WebRTC VAD.
+ try:
+ subprocess.call([
+ self._VAD_WEBRTC_COMMON_AUDIO_PATH, '-i', wav_file_path, '-o',
+ output_file_path
+ ],
+ cwd=self._VAD_WEBRTC_PATH)
+
+ # Read bytes.
+ with open(output_file_path, 'rb') as f:
+ raw_data = f.read()
+
+ # Parse side information.
+ self._vad_frame_size_ms = struct.unpack('B', raw_data[0])[0]
+ self._vad_frame_size = self._vad_frame_size_ms * sample_rate / 1000
+ assert self._vad_frame_size_ms in [10, 20, 30]
+ extra_bits = struct.unpack('B', raw_data[-1])[0]
+ assert 0 <= extra_bits <= 8
+
+ # Init VAD vector.
+ num_bytes = len(raw_data)
+ num_frames = 8 * (num_bytes -
+ 2) - extra_bits # 8 frames for each byte.
+ self._common_audio_vad = np.zeros(num_frames, np.uint8)
+
+ # Read VAD decisions.
+ for i, byte in enumerate(raw_data[1:-1]):
+ byte = struct.unpack('B', byte)[0]
+ for j in range(8 if i < num_bytes - 3 else (8 - extra_bits)):
+ self._common_audio_vad[i * 8 + j] = int(byte & 1)
+ byte = byte >> 1
+ except Exception as e:
+ logging.error('Error while running the WebRTC VAD (' + e.message +
+ ')')
+ finally:
+ if os.path.exists(tmp_path):
+ shutil.rmtree(tmp_path)
+
+ def _RunWebRtcApmVad(self, wav_file_path):
+ # Create temporary output path.
+ tmp_path = tempfile.mkdtemp()
+ output_file_path_probs = os.path.join(
+ tmp_path,
+ os.path.split(wav_file_path)[1] + '_vad_probs.tmp')
+ output_file_path_rms = os.path.join(
+ tmp_path,
+ os.path.split(wav_file_path)[1] + '_vad_rms.tmp')
+
+ # Call WebRTC VAD.
+ try:
+ subprocess.call([
+ self._VAD_WEBRTC_APM_PATH, '-i', wav_file_path, '-o_probs',
+ output_file_path_probs, '-o_rms', output_file_path_rms
+ ],
+ cwd=self._VAD_WEBRTC_PATH)
+
+ # Parse annotations.
+ self._apm_vad_probs = np.fromfile(output_file_path_probs,
+ np.double)
+ self._apm_vad_rms = np.fromfile(output_file_path_rms, np.double)
+ assert len(self._apm_vad_rms) == len(self._apm_vad_probs)
+
+ except Exception as e:
+ logging.error('Error while running the WebRTC APM VAD (' +
+ e.message + ')')
+ finally:
+ if os.path.exists(tmp_path):
+ shutil.rmtree(tmp_path)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations_unittest.py
new file mode 100644
index 0000000000..8230208808
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations_unittest.py
@@ -0,0 +1,160 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the annotations module.
+"""
+
+from __future__ import division
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+import numpy as np
+
+from . import annotations
+from . import external_vad
+from . import input_signal_creator
+from . import signal_processing
+
+
+class TestAnnotationsExtraction(unittest.TestCase):
+ """Unit tests for the annotations module.
+ """
+
+ _CLEAN_TMP_OUTPUT = True
+ _DEBUG_PLOT_VAD = False
+ _VAD_TYPE_CLASS = annotations.AudioAnnotationsExtractor.VadType
+ _ALL_VAD_TYPES = (_VAD_TYPE_CLASS.ENERGY_THRESHOLD
+ | _VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO
+ | _VAD_TYPE_CLASS.WEBRTC_APM)
+
+ def setUp(self):
+ """Create temporary folder."""
+ self._tmp_path = tempfile.mkdtemp()
+ self._wav_file_path = os.path.join(self._tmp_path, 'tone.wav')
+ pure_tone, _ = input_signal_creator.InputSignalCreator.Create(
+ 'pure_tone', [440, 1000])
+ signal_processing.SignalProcessingUtils.SaveWav(
+ self._wav_file_path, pure_tone)
+ self._sample_rate = pure_tone.frame_rate
+
+ def tearDown(self):
+ """Recursively delete temporary folder."""
+ if self._CLEAN_TMP_OUTPUT:
+ shutil.rmtree(self._tmp_path)
+ else:
+ logging.warning(self.id() + ' did not clean the temporary path ' +
+ (self._tmp_path))
+
+ def testFrameSizes(self):
+ e = annotations.AudioAnnotationsExtractor(self._ALL_VAD_TYPES)
+ e.Extract(self._wav_file_path)
+ samples_to_ms = lambda n, sr: 1000 * n // sr
+ self.assertEqual(
+ samples_to_ms(e.GetLevelFrameSize(), self._sample_rate),
+ e.GetLevelFrameSizeMs())
+ self.assertEqual(samples_to_ms(e.GetVadFrameSize(), self._sample_rate),
+ e.GetVadFrameSizeMs())
+
+ def testVoiceActivityDetectors(self):
+ for vad_type_value in range(0, self._ALL_VAD_TYPES + 1):
+ vad_type = self._VAD_TYPE_CLASS(vad_type_value)
+ e = annotations.AudioAnnotationsExtractor(vad_type=vad_type_value)
+ e.Extract(self._wav_file_path)
+ if vad_type.Contains(self._VAD_TYPE_CLASS.ENERGY_THRESHOLD):
+ # pylint: disable=unpacking-non-sequence
+ vad_output = e.GetVadOutput(
+ self._VAD_TYPE_CLASS.ENERGY_THRESHOLD)
+ self.assertGreater(len(vad_output), 0)
+ self.assertGreaterEqual(
+ float(np.sum(vad_output)) / len(vad_output), 0.95)
+
+ if vad_type.Contains(self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO):
+ # pylint: disable=unpacking-non-sequence
+ vad_output = e.GetVadOutput(
+ self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO)
+ self.assertGreater(len(vad_output), 0)
+ self.assertGreaterEqual(
+ float(np.sum(vad_output)) / len(vad_output), 0.95)
+
+ if vad_type.Contains(self._VAD_TYPE_CLASS.WEBRTC_APM):
+ # pylint: disable=unpacking-non-sequence
+ (vad_probs,
+ vad_rms) = e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)
+ self.assertGreater(len(vad_probs), 0)
+ self.assertGreater(len(vad_rms), 0)
+ self.assertGreaterEqual(
+ float(np.sum(vad_probs)) / len(vad_probs), 0.5)
+ self.assertGreaterEqual(
+ float(np.sum(vad_rms)) / len(vad_rms), 20000)
+
+ if self._DEBUG_PLOT_VAD:
+ frame_times_s = lambda num_frames, frame_size_ms: np.arange(
+ num_frames).astype(np.float32) * frame_size_ms / 1000.0
+ level = e.GetLevel()
+ t_level = frame_times_s(num_frames=len(level),
+ frame_size_ms=e.GetLevelFrameSizeMs())
+ t_vad = frame_times_s(num_frames=len(vad_output),
+ frame_size_ms=e.GetVadFrameSizeMs())
+ import matplotlib.pyplot as plt
+ plt.figure()
+ plt.hold(True)
+ plt.plot(t_level, level)
+ plt.plot(t_vad, vad_output * np.max(level), '.')
+ plt.show()
+
+ def testSaveLoad(self):
+ e = annotations.AudioAnnotationsExtractor(self._ALL_VAD_TYPES)
+ e.Extract(self._wav_file_path)
+ e.Save(self._tmp_path, "fake-annotation")
+
+ data = np.load(
+ os.path.join(
+ self._tmp_path,
+ e.GetOutputFileNameTemplate().format("fake-annotation")))
+ np.testing.assert_array_equal(e.GetLevel(), data['level'])
+ self.assertEqual(np.float32, data['level'].dtype)
+ np.testing.assert_array_equal(
+ e.GetVadOutput(self._VAD_TYPE_CLASS.ENERGY_THRESHOLD),
+ data['vad_energy_output'])
+ np.testing.assert_array_equal(
+ e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO),
+ data['vad_output'])
+ np.testing.assert_array_equal(
+ e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)[0],
+ data['vad_probs'])
+ np.testing.assert_array_equal(
+ e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)[1],
+ data['vad_rms'])
+ self.assertEqual(np.uint8, data['vad_energy_output'].dtype)
+ self.assertEqual(np.float64, data['vad_probs'].dtype)
+ self.assertEqual(np.float64, data['vad_rms'].dtype)
+
+ def testEmptyExternalShouldNotCrash(self):
+ for vad_type_value in range(0, self._ALL_VAD_TYPES + 1):
+ annotations.AudioAnnotationsExtractor(vad_type_value, {})
+
+ def testFakeExternalSaveLoad(self):
+ def FakeExternalFactory():
+ return external_vad.ExternalVad(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ 'fake_external_vad.py'), 'fake')
+
+ for vad_type_value in range(0, self._ALL_VAD_TYPES + 1):
+ e = annotations.AudioAnnotationsExtractor(
+ vad_type_value, {'fake': FakeExternalFactory()})
+ e.Extract(self._wav_file_path)
+ e.Save(self._tmp_path, annotation_name="fake-annotation")
+ data = np.load(
+ os.path.join(
+ self._tmp_path,
+ e.GetOutputFileNameTemplate().format("fake-annotation")))
+ self.assertEqual(np.float32, data['extvad_conf-fake'].dtype)
+ np.testing.assert_almost_equal(np.arange(100, dtype=np.float32),
+ data['extvad_conf-fake'])
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_configs/default.json b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_configs/default.json
new file mode 100644
index 0000000000..5c3277bac0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_configs/default.json
@@ -0,0 +1 @@
+{"-all_default": null}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_vad.cc b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_vad.cc
new file mode 100644
index 0000000000..73ce4ed3f7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/apm_vad.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#include <array>
+#include <fstream>
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+#include "rtc_base/logging.h"
+
+ABSL_FLAG(std::string, i, "", "Input wav file");
+ABSL_FLAG(std::string, o_probs, "", "VAD probabilities output file");
+ABSL_FLAG(std::string, o_rms, "", "VAD output file");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr uint8_t kAudioFrameLengthMilliseconds = 10;
+constexpr int kMaxSampleRate = 48000;
+constexpr size_t kMaxFrameLen =
+ kAudioFrameLengthMilliseconds * kMaxSampleRate / 1000;
+
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+ const std::string input_file = absl::GetFlag(FLAGS_i);
+ const std::string output_probs_file = absl::GetFlag(FLAGS_o_probs);
+ const std::string output_file = absl::GetFlag(FLAGS_o_rms);
+ // Open wav input file and check properties.
+ WavReader wav_reader(input_file);
+ if (wav_reader.num_channels() != 1) {
+ RTC_LOG(LS_ERROR) << "Only mono wav files supported";
+ return 1;
+ }
+ if (wav_reader.sample_rate() > kMaxSampleRate) {
+ RTC_LOG(LS_ERROR) << "Beyond maximum sample rate (" << kMaxSampleRate
+ << ")";
+ return 1;
+ }
+ const size_t audio_frame_len = rtc::CheckedDivExact(
+ kAudioFrameLengthMilliseconds * wav_reader.sample_rate(), 1000);
+ if (audio_frame_len > kMaxFrameLen) {
+ RTC_LOG(LS_ERROR) << "The frame size and/or the sample rate are too large.";
+ return 1;
+ }
+
+ // Create output file and write header.
+ std::ofstream out_probs_file(output_probs_file, std::ofstream::binary);
+ std::ofstream out_rms_file(output_file, std::ofstream::binary);
+
+ // Run VAD and write decisions.
+ VoiceActivityDetector vad;
+ std::array<int16_t, kMaxFrameLen> samples;
+
+ while (true) {
+ // Process frame.
+ const auto read_samples =
+ wav_reader.ReadSamples(audio_frame_len, samples.data());
+ if (read_samples < audio_frame_len) {
+ break;
+ }
+ vad.ProcessChunk(samples.data(), audio_frame_len, wav_reader.sample_rate());
+ // Write output.
+ auto probs = vad.chunkwise_voice_probabilities();
+ auto rms = vad.chunkwise_rms();
+ RTC_CHECK_EQ(probs.size(), rms.size());
+ RTC_CHECK_EQ(sizeof(double), 8);
+
+ for (const auto& p : probs) {
+ out_probs_file.write(reinterpret_cast<const char*>(&p), 8);
+ }
+ for (const auto& r : rms) {
+ out_rms_file.write(reinterpret_cast<const char*>(&r), 8);
+ }
+ }
+
+ out_probs_file.close();
+ out_rms_file.close();
+ return 0;
+}
+
+} // namespace
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::main(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/audioproc_wrapper.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/audioproc_wrapper.py
new file mode 100644
index 0000000000..04aeaa95b9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/audioproc_wrapper.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Class implementing a wrapper for APM simulators.
+"""
+
+import cProfile
+import logging
+import os
+import subprocess
+
+from . import data_access
+from . import exceptions
+
+
+class AudioProcWrapper(object):
+ """Wrapper for APM simulators.
+ """
+
+ DEFAULT_APM_SIMULATOR_BIN_PATH = os.path.abspath(
+ os.path.join(os.pardir, 'audioproc_f'))
+ OUTPUT_FILENAME = 'output.wav'
+
+ def __init__(self, simulator_bin_path):
+ """Ctor.
+
+ Args:
+ simulator_bin_path: path to the APM simulator binary.
+ """
+ self._simulator_bin_path = simulator_bin_path
+ self._config = None
+ self._output_signal_filepath = None
+
+ # Profiler instance to measure running time.
+ self._profiler = cProfile.Profile()
+
+ @property
+ def output_filepath(self):
+ return self._output_signal_filepath
+
+ def Run(self,
+ config_filepath,
+ capture_input_filepath,
+ output_path,
+ render_input_filepath=None):
+ """Runs APM simulator.
+
+ Args:
+ config_filepath: path to the configuration file specifying the arguments
+ for the APM simulator.
+ capture_input_filepath: path to the capture audio track input file (aka
+ forward or near-end).
+ output_path: path of the audio track output file.
+ render_input_filepath: path to the render audio track input file (aka
+ reverse or far-end).
+ """
+ # Init.
+ self._output_signal_filepath = os.path.join(output_path,
+ self.OUTPUT_FILENAME)
+ profiling_stats_filepath = os.path.join(output_path, 'profiling.stats')
+
+ # Skip if the output has already been generated.
+ if os.path.exists(self._output_signal_filepath) and os.path.exists(
+ profiling_stats_filepath):
+ return
+
+ # Load configuration.
+ self._config = data_access.AudioProcConfigFile.Load(config_filepath)
+
+ # Set remaining parameters.
+ if not os.path.exists(capture_input_filepath):
+ raise exceptions.FileNotFoundError(
+ 'cannot find capture input file')
+ self._config['-i'] = capture_input_filepath
+ self._config['-o'] = self._output_signal_filepath
+ if render_input_filepath is not None:
+ if not os.path.exists(render_input_filepath):
+ raise exceptions.FileNotFoundError(
+ 'cannot find render input file')
+ self._config['-ri'] = render_input_filepath
+
+ # Build arguments list.
+ args = [self._simulator_bin_path]
+ for param_name in self._config:
+ args.append(param_name)
+ if self._config[param_name] is not None:
+ args.append(str(self._config[param_name]))
+ logging.debug(' '.join(args))
+
+ # Run.
+ self._profiler.enable()
+ subprocess.call(args)
+ self._profiler.disable()
+
+ # Save profiling stats.
+ self._profiler.dump_stats(profiling_stats_filepath)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/collect_data.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/collect_data.py
new file mode 100644
index 0000000000..38aac0cbe2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/collect_data.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Imports a filtered subset of the scores and configurations computed
+by apm_quality_assessment.py into a pandas data frame.
+"""
+
+import argparse
+import glob
+import logging
+import os
+import re
+import sys
+
+try:
+ import pandas as pd
+except ImportError:
+ logging.critical('Cannot import the third-party Python package pandas')
+ sys.exit(1)
+
+from . import data_access as data_access
+from . import simulation as sim
+
+# Compiled regular expressions used to extract score descriptors.
+RE_CONFIG_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixApmConfig() +
+ r'(.+)')
+RE_CAPTURE_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixCapture() +
+ r'(.+)')
+RE_RENDER_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixRender() + r'(.+)')
+RE_ECHO_SIM_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixEchoSimulator() +
+ r'(.+)')
+RE_TEST_DATA_GEN_NAME = re.compile(
+ sim.ApmModuleSimulator.GetPrefixTestDataGenerator() + r'(.+)')
+RE_TEST_DATA_GEN_PARAMS = re.compile(
+ sim.ApmModuleSimulator.GetPrefixTestDataGeneratorParameters() + r'(.+)')
+RE_SCORE_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixScore() +
+ r'(.+)(\..+)')
+
+
+def InstanceArgumentsParser():
+ """Arguments parser factory.
+ """
+ parser = argparse.ArgumentParser(
+ description=('Override this description in a user script by changing'
+ ' `parser.description` of the returned parser.'))
+
+ parser.add_argument('-o',
+ '--output_dir',
+ required=True,
+ help=('the same base path used with the '
+ 'apm_quality_assessment tool'))
+
+ parser.add_argument(
+ '-c',
+ '--config_names',
+ type=re.compile,
+ help=('regular expression to filter the APM configuration'
+ ' names'))
+
+ parser.add_argument(
+ '-i',
+ '--capture_names',
+ type=re.compile,
+ help=('regular expression to filter the capture signal '
+ 'names'))
+
+ parser.add_argument('-r',
+ '--render_names',
+ type=re.compile,
+ help=('regular expression to filter the render signal '
+ 'names'))
+
+ parser.add_argument(
+ '-e',
+ '--echo_simulator_names',
+ type=re.compile,
+ help=('regular expression to filter the echo simulator '
+ 'names'))
+
+ parser.add_argument('-t',
+ '--test_data_generators',
+ type=re.compile,
+ help=('regular expression to filter the test data '
+ 'generator names'))
+
+ parser.add_argument(
+ '-s',
+ '--eval_scores',
+ type=re.compile,
+ help=('regular expression to filter the evaluation score '
+ 'names'))
+
+ return parser
+
+
+def _GetScoreDescriptors(score_filepath):
+ """Extracts a score descriptor from the given score file path.
+
+ Args:
+ score_filepath: path to the score file.
+
+ Returns:
+ A tuple of strings (APM configuration name, capture audio track name,
+ render audio track name, echo simulator name, test data generator name,
+ test data generator parameters as string, evaluation score name).
+ """
+ fields = score_filepath.split(os.sep)[-7:]
+ extract_name = lambda index, reg_expr: (reg_expr.match(fields[index]).
+ groups(0)[0])
+ return (
+ extract_name(0, RE_CONFIG_NAME),
+ extract_name(1, RE_CAPTURE_NAME),
+ extract_name(2, RE_RENDER_NAME),
+ extract_name(3, RE_ECHO_SIM_NAME),
+ extract_name(4, RE_TEST_DATA_GEN_NAME),
+ extract_name(5, RE_TEST_DATA_GEN_PARAMS),
+ extract_name(6, RE_SCORE_NAME),
+ )
+
+
+def _ExcludeScore(config_name, capture_name, render_name, echo_simulator_name,
+ test_data_gen_name, score_name, args):
+ """Decides whether excluding a score.
+
+ A set of optional regular expressions in args is used to determine if the
+ score should be excluded (depending on its |*_name| descriptors).
+
+ Args:
+ config_name: APM configuration name.
+ capture_name: capture audio track name.
+ render_name: render audio track name.
+ echo_simulator_name: echo simulator name.
+ test_data_gen_name: test data generator name.
+ score_name: evaluation score name.
+ args: parsed arguments.
+
+ Returns:
+ A boolean.
+ """
+ value_regexpr_pairs = [
+ (config_name, args.config_names),
+ (capture_name, args.capture_names),
+ (render_name, args.render_names),
+ (echo_simulator_name, args.echo_simulator_names),
+ (test_data_gen_name, args.test_data_generators),
+ (score_name, args.eval_scores),
+ ]
+
+ # Score accepted if each value matches the corresponding regular expression.
+ for value, regexpr in value_regexpr_pairs:
+ if regexpr is None:
+ continue
+ if not regexpr.match(value):
+ return True
+
+ return False
+
+
+def FindScores(src_path, args):
+ """Given a search path, find scores and return a DataFrame object.
+
+ Args:
+ src_path: Search path pattern.
+ args: parsed arguments.
+
+ Returns:
+ A DataFrame object.
+ """
+ # Get scores.
+ scores = []
+ for score_filepath in glob.iglob(src_path):
+ # Extract score descriptor fields from the path.
+ (config_name, capture_name, render_name, echo_simulator_name,
+ test_data_gen_name, test_data_gen_params,
+ score_name) = _GetScoreDescriptors(score_filepath)
+
+ # Ignore the score if required.
+ if _ExcludeScore(config_name, capture_name, render_name,
+ echo_simulator_name, test_data_gen_name, score_name,
+ args):
+ logging.info('ignored score: %s %s %s %s %s %s', config_name,
+ capture_name, render_name, echo_simulator_name,
+ test_data_gen_name, score_name)
+ continue
+
+ # Read metadata and score.
+ metadata = data_access.Metadata.LoadAudioTestDataPaths(
+ os.path.split(score_filepath)[0])
+ score = data_access.ScoreFile.Load(score_filepath)
+
+ # Add a score with its descriptor fields.
+ scores.append((
+ metadata['clean_capture_input_filepath'],
+ metadata['echo_free_capture_filepath'],
+ metadata['echo_filepath'],
+ metadata['render_filepath'],
+ metadata['capture_filepath'],
+ metadata['apm_output_filepath'],
+ metadata['apm_reference_filepath'],
+ config_name,
+ capture_name,
+ render_name,
+ echo_simulator_name,
+ test_data_gen_name,
+ test_data_gen_params,
+ score_name,
+ score,
+ ))
+
+ return pd.DataFrame(data=scores,
+ columns=(
+ 'clean_capture_input_filepath',
+ 'echo_free_capture_filepath',
+ 'echo_filepath',
+ 'render_filepath',
+ 'capture_filepath',
+ 'apm_output_filepath',
+ 'apm_reference_filepath',
+ 'apm_config',
+ 'capture',
+ 'render',
+ 'echo_simulator',
+ 'test_data_gen',
+ 'test_data_gen_params',
+ 'eval_score_name',
+ 'score',
+ ))
+
+
+def ConstructSrcPath(args):
+ return os.path.join(
+ args.output_dir,
+ sim.ApmModuleSimulator.GetPrefixApmConfig() + '*',
+ sim.ApmModuleSimulator.GetPrefixCapture() + '*',
+ sim.ApmModuleSimulator.GetPrefixRender() + '*',
+ sim.ApmModuleSimulator.GetPrefixEchoSimulator() + '*',
+ sim.ApmModuleSimulator.GetPrefixTestDataGenerator() + '*',
+ sim.ApmModuleSimulator.GetPrefixTestDataGeneratorParameters() + '*',
+ sim.ApmModuleSimulator.GetPrefixScore() + '*')
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/data_access.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/data_access.py
new file mode 100644
index 0000000000..c1aebb67f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/data_access.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Data access utility functions and classes.
+"""
+
+import json
+import os
+
+
+def MakeDirectory(path):
+ """Makes a directory recursively without rising exceptions if existing.
+
+ Args:
+ path: path to the directory to be created.
+ """
+ if os.path.exists(path):
+ return
+ os.makedirs(path)
+
+
+class Metadata(object):
+ """Data access class to save and load metadata.
+ """
+
+ def __init__(self):
+ pass
+
+ _GENERIC_METADATA_SUFFIX = '.mdata'
+ _AUDIO_TEST_DATA_FILENAME = 'audio_test_data.json'
+
+ @classmethod
+ def LoadFileMetadata(cls, filepath):
+ """Loads generic metadata linked to a file.
+
+ Args:
+ filepath: path to the metadata file to read.
+
+ Returns:
+ A dict.
+ """
+ with open(filepath + cls._GENERIC_METADATA_SUFFIX) as f:
+ return json.load(f)
+
+ @classmethod
+ def SaveFileMetadata(cls, filepath, metadata):
+ """Saves generic metadata linked to a file.
+
+ Args:
+ filepath: path to the metadata file to write.
+ metadata: a dict.
+ """
+ with open(filepath + cls._GENERIC_METADATA_SUFFIX, 'w') as f:
+ json.dump(metadata, f)
+
+ @classmethod
+ def LoadAudioTestDataPaths(cls, metadata_path):
+ """Loads the input and the reference audio track paths.
+
+ Args:
+ metadata_path: path to the directory containing the metadata file.
+
+ Returns:
+ Tuple with the paths to the input and output audio tracks.
+ """
+ metadata_filepath = os.path.join(metadata_path,
+ cls._AUDIO_TEST_DATA_FILENAME)
+ with open(metadata_filepath) as f:
+ return json.load(f)
+
+ @classmethod
+ def SaveAudioTestDataPaths(cls, output_path, **filepaths):
+ """Saves the input and the reference audio track paths.
+
+ Args:
+ output_path: path to the directory containing the metadata file.
+
+ Keyword Args:
+ filepaths: collection of audio track file paths to save.
+ """
+ output_filepath = os.path.join(output_path,
+ cls._AUDIO_TEST_DATA_FILENAME)
+ with open(output_filepath, 'w') as f:
+ json.dump(filepaths, f)
+
+
+class AudioProcConfigFile(object):
+ """Data access to load/save APM simulator argument lists.
+
+ The arguments stored in the config files are used to control the APM flags.
+ """
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def Load(cls, filepath):
+ """Loads a configuration file for an APM simulator.
+
+ Args:
+ filepath: path to the configuration file.
+
+ Returns:
+ A dict containing the configuration.
+ """
+ with open(filepath) as f:
+ return json.load(f)
+
+ @classmethod
+ def Save(cls, filepath, config):
+ """Saves a configuration file for an APM simulator.
+
+ Args:
+ filepath: path to the configuration file.
+ config: a dict containing the configuration.
+ """
+ with open(filepath, 'w') as f:
+ json.dump(config, f)
+
+
+class ScoreFile(object):
+ """Data access class to save and load float scalar scores.
+ """
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def Load(cls, filepath):
+ """Loads a score from file.
+
+ Args:
+ filepath: path to the score file.
+
+ Returns:
+ A float encoding the score.
+ """
+ with open(filepath) as f:
+ return float(f.readline().strip())
+
+ @classmethod
+ def Save(cls, filepath, score):
+ """Saves a score into a file.
+
+ Args:
+ filepath: path to the score file.
+ score: float encoding the score.
+ """
+ with open(filepath, 'w') as f:
+ f.write('{0:f}\n'.format(score))
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation.py
new file mode 100644
index 0000000000..65903ea32d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Echo path simulation module.
+"""
+
+import hashlib
+import os
+
+from . import signal_processing
+
+
+class EchoPathSimulator(object):
+ """Abstract class for the echo path simulators.
+
+ In general, an echo path simulator is a function of the render signal and
+ simulates the propagation of the latter into the microphone (e.g., due to
+ mechanical or electrical paths).
+ """
+
+ NAME = None
+ REGISTERED_CLASSES = {}
+
+ def __init__(self):
+ pass
+
+ def Simulate(self, output_path):
+ """Creates the echo signal and stores it in an audio file (abstract method).
+
+ Args:
+ output_path: Path in which any output can be saved.
+
+ Returns:
+ Path to the generated audio track file or None if no echo is present.
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def RegisterClass(cls, class_to_register):
+ """Registers an EchoPathSimulator implementation.
+
+ Decorator to automatically register the classes that extend
+ EchoPathSimulator.
+ Example usage:
+
+ @EchoPathSimulator.RegisterClass
+ class NoEchoPathSimulator(EchoPathSimulator):
+ pass
+ """
+ cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register
+ return class_to_register
+
+
+@EchoPathSimulator.RegisterClass
+class NoEchoPathSimulator(EchoPathSimulator):
+ """Simulates absence of echo."""
+
+ NAME = 'noecho'
+
+ def __init__(self):
+ EchoPathSimulator.__init__(self)
+
+ def Simulate(self, output_path):
+ return None
+
+
+@EchoPathSimulator.RegisterClass
+class LinearEchoPathSimulator(EchoPathSimulator):
+ """Simulates linear echo path.
+
+ This class applies a given impulse response to the render input and then it
+ sums the signal to the capture input signal.
+ """
+
+ NAME = 'linear'
+
+ def __init__(self, render_input_filepath, impulse_response):
+ """
+ Args:
+ render_input_filepath: Render audio track file.
+ impulse_response: list or numpy vector of float values.
+ """
+ EchoPathSimulator.__init__(self)
+ self._render_input_filepath = render_input_filepath
+ self._impulse_response = impulse_response
+
+ def Simulate(self, output_path):
+ """Simulates linear echo path."""
+ # Form the file name with a hash of the impulse response.
+ impulse_response_hash = hashlib.sha256(
+ str(self._impulse_response).encode('utf-8', 'ignore')).hexdigest()
+ echo_filepath = os.path.join(
+ output_path, 'linear_echo_{}.wav'.format(impulse_response_hash))
+
+ # If the simulated echo audio track file does not exists, create it.
+ if not os.path.exists(echo_filepath):
+ render = signal_processing.SignalProcessingUtils.LoadWav(
+ self._render_input_filepath)
+ echo = signal_processing.SignalProcessingUtils.ApplyImpulseResponse(
+ render, self._impulse_response)
+ signal_processing.SignalProcessingUtils.SaveWav(
+ echo_filepath, echo)
+
+ return echo_filepath
+
+
+@EchoPathSimulator.RegisterClass
+class RecordedEchoPathSimulator(EchoPathSimulator):
+ """Uses recorded echo.
+
+ This class uses the clean capture input file name to build the file name of
+ the corresponding recording containing echo (a predefined suffix is used).
+ Such a file is expected to be already existing.
+ """
+
+ NAME = 'recorded'
+
+ _FILE_NAME_SUFFIX = '_echo'
+
+ def __init__(self, render_input_filepath):
+ EchoPathSimulator.__init__(self)
+ self._render_input_filepath = render_input_filepath
+
+ def Simulate(self, output_path):
+ """Uses recorded echo path."""
+ path, file_name_ext = os.path.split(self._render_input_filepath)
+ file_name, file_ext = os.path.splitext(file_name_ext)
+ echo_filepath = os.path.join(
+ path, '{}{}{}'.format(file_name, self._FILE_NAME_SUFFIX, file_ext))
+ assert os.path.exists(echo_filepath), (
+ 'cannot find the echo audio track file {}'.format(echo_filepath))
+ return echo_filepath
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_factory.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_factory.py
new file mode 100644
index 0000000000..4b46b36b47
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_factory.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Echo path simulation factory module.
+"""
+
+import numpy as np
+
+from . import echo_path_simulation
+
+
+class EchoPathSimulatorFactory(object):
+
+ # TODO(alessiob): Replace 20 ms delay (at 48 kHz sample rate) with a more
+ # realistic impulse response.
+ _LINEAR_ECHO_IMPULSE_RESPONSE = np.array([0.0] * (20 * 48) + [0.15])
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def GetInstance(cls, echo_path_simulator_class, render_input_filepath):
+ """Creates an EchoPathSimulator instance given a class object.
+
+ Args:
+ echo_path_simulator_class: EchoPathSimulator class object (not an
+ instance).
+ render_input_filepath: Path to the render audio track file.
+
+ Returns:
+ An EchoPathSimulator instance.
+ """
+ assert render_input_filepath is not None or (
+ echo_path_simulator_class ==
+ echo_path_simulation.NoEchoPathSimulator)
+
+ if echo_path_simulator_class == echo_path_simulation.NoEchoPathSimulator:
+ return echo_path_simulation.NoEchoPathSimulator()
+ elif echo_path_simulator_class == (
+ echo_path_simulation.LinearEchoPathSimulator):
+ return echo_path_simulation.LinearEchoPathSimulator(
+ render_input_filepath, cls._LINEAR_ECHO_IMPULSE_RESPONSE)
+ else:
+ return echo_path_simulator_class(render_input_filepath)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_unittest.py
new file mode 100644
index 0000000000..b6cc8abdde
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_unittest.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the echo path simulation module.
+"""
+
+import shutil
+import os
+import tempfile
+import unittest
+
+import pydub
+
+from . import echo_path_simulation
+from . import echo_path_simulation_factory
+from . import signal_processing
+
+
+class TestEchoPathSimulators(unittest.TestCase):
+ """Unit tests for the eval_scores module.
+ """
+
+ def setUp(self):
+ """Creates temporary data."""
+ self._tmp_path = tempfile.mkdtemp()
+
+ # Create and save white noise.
+ silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
+ white_noise = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ silence)
+ self._audio_track_num_samples = (
+ signal_processing.SignalProcessingUtils.CountSamples(white_noise))
+ self._audio_track_filepath = os.path.join(self._tmp_path,
+ 'white_noise.wav')
+ signal_processing.SignalProcessingUtils.SaveWav(
+ self._audio_track_filepath, white_noise)
+
+ # Make a copy the white noise audio track file; it will be used by
+ # echo_path_simulation.RecordedEchoPathSimulator.
+ shutil.copy(self._audio_track_filepath,
+ os.path.join(self._tmp_path, 'white_noise_echo.wav'))
+
+ def tearDown(self):
+ """Recursively deletes temporary folders."""
+ shutil.rmtree(self._tmp_path)
+
+ def testRegisteredClasses(self):
+ # Check that there is at least one registered echo path simulator.
+ registered_classes = (
+ echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES)
+ self.assertIsInstance(registered_classes, dict)
+ self.assertGreater(len(registered_classes), 0)
+
+ # Instance factory.
+ factory = echo_path_simulation_factory.EchoPathSimulatorFactory()
+
+ # Try each registered echo path simulator.
+ for echo_path_simulator_name in registered_classes:
+ simulator = factory.GetInstance(
+ echo_path_simulator_class=registered_classes[
+ echo_path_simulator_name],
+ render_input_filepath=self._audio_track_filepath)
+
+ echo_filepath = simulator.Simulate(self._tmp_path)
+ if echo_filepath is None:
+ self.assertEqual(echo_path_simulation.NoEchoPathSimulator.NAME,
+ echo_path_simulator_name)
+ # No other tests in this case.
+ continue
+
+ # Check that the echo audio track file exists and its length is greater or
+ # equal to that of the render audio track.
+ self.assertTrue(os.path.exists(echo_filepath))
+ echo = signal_processing.SignalProcessingUtils.LoadWav(
+ echo_filepath)
+ self.assertGreaterEqual(
+ signal_processing.SignalProcessingUtils.CountSamples(echo),
+ self._audio_track_num_samples)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py
new file mode 100644
index 0000000000..59c5f74be4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py
@@ -0,0 +1,427 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Evaluation score abstract class and implementations.
+"""
+
+from __future__ import division
+import logging
+import os
+import re
+import subprocess
+import sys
+
+try:
+ import numpy as np
+except ImportError:
+ logging.critical('Cannot import the third-party Python package numpy')
+ sys.exit(1)
+
+from . import data_access
+from . import exceptions
+from . import signal_processing
+
+
+class EvaluationScore(object):
+
+ NAME = None
+ REGISTERED_CLASSES = {}
+
+ def __init__(self, score_filename_prefix):
+ self._score_filename_prefix = score_filename_prefix
+ self._input_signal_metadata = None
+ self._reference_signal = None
+ self._reference_signal_filepath = None
+ self._tested_signal = None
+ self._tested_signal_filepath = None
+ self._output_filepath = None
+ self._score = None
+ self._render_signal_filepath = None
+
+ @classmethod
+ def RegisterClass(cls, class_to_register):
+ """Registers an EvaluationScore implementation.
+
+ Decorator to automatically register the classes that extend EvaluationScore.
+ Example usage:
+
+ @EvaluationScore.RegisterClass
+ class AudioLevelScore(EvaluationScore):
+ pass
+ """
+ cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register
+ return class_to_register
+
+ @property
+ def output_filepath(self):
+ return self._output_filepath
+
+ @property
+ def score(self):
+ return self._score
+
+ def SetInputSignalMetadata(self, metadata):
+ """Sets input signal metadata.
+
+ Args:
+ metadata: dict instance.
+ """
+ self._input_signal_metadata = metadata
+
+ def SetReferenceSignalFilepath(self, filepath):
+ """Sets the path to the audio track used as reference signal.
+
+ Args:
+ filepath: path to the reference audio track.
+ """
+ self._reference_signal_filepath = filepath
+
+ def SetTestedSignalFilepath(self, filepath):
+ """Sets the path to the audio track used as test signal.
+
+ Args:
+ filepath: path to the test audio track.
+ """
+ self._tested_signal_filepath = filepath
+
+ def SetRenderSignalFilepath(self, filepath):
+ """Sets the path to the audio track used as render signal.
+
+ Args:
+ filepath: path to the test audio track.
+ """
+ self._render_signal_filepath = filepath
+
+ def Run(self, output_path):
+ """Extracts the score for the set test data pair.
+
+ Args:
+ output_path: path to the directory where the output is written.
+ """
+ self._output_filepath = os.path.join(
+ output_path, self._score_filename_prefix + self.NAME + '.txt')
+ try:
+ # If the score has already been computed, load.
+ self._LoadScore()
+ logging.debug('score found and loaded')
+ except IOError:
+ # Compute the score.
+ logging.debug('score not found, compute')
+ self._Run(output_path)
+
+ def _Run(self, output_path):
+ # Abstract method.
+ raise NotImplementedError()
+
+ def _LoadReferenceSignal(self):
+ assert self._reference_signal_filepath is not None
+ self._reference_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ self._reference_signal_filepath)
+
+ def _LoadTestedSignal(self):
+ assert self._tested_signal_filepath is not None
+ self._tested_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ self._tested_signal_filepath)
+
+ def _LoadScore(self):
+ return data_access.ScoreFile.Load(self._output_filepath)
+
+ def _SaveScore(self):
+ return data_access.ScoreFile.Save(self._output_filepath, self._score)
+
+
+@EvaluationScore.RegisterClass
+class AudioLevelPeakScore(EvaluationScore):
+ """Peak audio level score.
+
+ Defined as the difference between the peak audio level of the tested and
+ the reference signals.
+
+ Unit: dB
+ Ideal: 0 dB
+ Worst case: +/-inf dB
+ """
+
+ NAME = 'audio_level_peak'
+
+ def __init__(self, score_filename_prefix):
+ EvaluationScore.__init__(self, score_filename_prefix)
+
+ def _Run(self, output_path):
+ self._LoadReferenceSignal()
+ self._LoadTestedSignal()
+ self._score = self._tested_signal.dBFS - self._reference_signal.dBFS
+ self._SaveScore()
+
+
+@EvaluationScore.RegisterClass
+class MeanAudioLevelScore(EvaluationScore):
+ """Mean audio level score.
+
+ Defined as the difference between the mean audio level of the tested and
+ the reference signals.
+
+ Unit: dB
+ Ideal: 0 dB
+ Worst case: +/-inf dB
+ """
+
+ NAME = 'audio_level_mean'
+
+ def __init__(self, score_filename_prefix):
+ EvaluationScore.__init__(self, score_filename_prefix)
+
+ def _Run(self, output_path):
+ self._LoadReferenceSignal()
+ self._LoadTestedSignal()
+
+ dbfs_diffs_sum = 0.0
+ seconds = min(len(self._tested_signal), len(
+ self._reference_signal)) // 1000
+ for t in range(seconds):
+ t0 = t * seconds
+ t1 = t0 + seconds
+ dbfs_diffs_sum += (self._tested_signal[t0:t1].dBFS -
+ self._reference_signal[t0:t1].dBFS)
+ self._score = dbfs_diffs_sum / float(seconds)
+ self._SaveScore()
+
+
+@EvaluationScore.RegisterClass
+class EchoMetric(EvaluationScore):
+ """Echo score.
+
+ Proportion of detected echo.
+
+ Unit: ratio
+ Ideal: 0
+ Worst case: 1
+ """
+
+ NAME = 'echo_metric'
+
+ def __init__(self, score_filename_prefix, echo_detector_bin_filepath):
+ EvaluationScore.__init__(self, score_filename_prefix)
+
+ # POLQA binary file path.
+ self._echo_detector_bin_filepath = echo_detector_bin_filepath
+ if not os.path.exists(self._echo_detector_bin_filepath):
+ logging.error('cannot find EchoMetric tool binary file')
+ raise exceptions.FileNotFoundError()
+
+ self._echo_detector_bin_path, _ = os.path.split(
+ self._echo_detector_bin_filepath)
+
+ def _Run(self, output_path):
+ echo_detector_out_filepath = os.path.join(output_path,
+ 'echo_detector.out')
+ if os.path.exists(echo_detector_out_filepath):
+ os.unlink(echo_detector_out_filepath)
+
+ logging.debug("Render signal filepath: %s",
+ self._render_signal_filepath)
+ if not os.path.exists(self._render_signal_filepath):
+ logging.error(
+ "Render input required for evaluating the echo metric.")
+
+ args = [
+ self._echo_detector_bin_filepath, '--output_file',
+ echo_detector_out_filepath, '--', '-i',
+ self._tested_signal_filepath, '-ri', self._render_signal_filepath
+ ]
+ logging.debug(' '.join(args))
+ subprocess.call(args, cwd=self._echo_detector_bin_path)
+
+ # Parse Echo detector tool output and extract the score.
+ self._score = self._ParseOutputFile(echo_detector_out_filepath)
+ self._SaveScore()
+
+ @classmethod
+ def _ParseOutputFile(cls, echo_metric_file_path):
+ """
+ Parses the POLQA tool output formatted as a table ('-t' option).
+
+ Args:
+ polqa_out_filepath: path to the POLQA tool output file.
+
+ Returns:
+ The score as a number in [0, 1].
+ """
+ with open(echo_metric_file_path) as f:
+ return float(f.read())
+
+
+@EvaluationScore.RegisterClass
+class PolqaScore(EvaluationScore):
+ """POLQA score.
+
+ See http://www.polqa.info/.
+
+ Unit: MOS
+ Ideal: 4.5
+ Worst case: 1.0
+ """
+
+ NAME = 'polqa'
+
+ def __init__(self, score_filename_prefix, polqa_bin_filepath):
+ EvaluationScore.__init__(self, score_filename_prefix)
+
+ # POLQA binary file path.
+ self._polqa_bin_filepath = polqa_bin_filepath
+ if not os.path.exists(self._polqa_bin_filepath):
+ logging.error('cannot find POLQA tool binary file')
+ raise exceptions.FileNotFoundError()
+
+ # Path to the POLQA directory with binary and license files.
+ self._polqa_tool_path, _ = os.path.split(self._polqa_bin_filepath)
+
+ def _Run(self, output_path):
+ polqa_out_filepath = os.path.join(output_path, 'polqa.out')
+ if os.path.exists(polqa_out_filepath):
+ os.unlink(polqa_out_filepath)
+
+ args = [
+ self._polqa_bin_filepath,
+ '-t',
+ '-q',
+ '-Overwrite',
+ '-Ref',
+ self._reference_signal_filepath,
+ '-Test',
+ self._tested_signal_filepath,
+ '-LC',
+ 'NB',
+ '-Out',
+ polqa_out_filepath,
+ ]
+ logging.debug(' '.join(args))
+ subprocess.call(args, cwd=self._polqa_tool_path)
+
+ # Parse POLQA tool output and extract the score.
+ polqa_output = self._ParseOutputFile(polqa_out_filepath)
+ self._score = float(polqa_output['PolqaScore'])
+
+ self._SaveScore()
+
+ @classmethod
+ def _ParseOutputFile(cls, polqa_out_filepath):
+ """
+ Parses the POLQA tool output formatted as a table ('-t' option).
+
+ Args:
+ polqa_out_filepath: path to the POLQA tool output file.
+
+ Returns:
+ A dict.
+ """
+ data = []
+ with open(polqa_out_filepath) as f:
+ for line in f:
+ line = line.strip()
+ if len(line) == 0 or line.startswith('*'):
+ # Ignore comments.
+ continue
+ # Read fields.
+ data.append(re.split(r'\t+', line))
+
+ # Two rows expected (header and values).
+ assert len(data) == 2, 'Cannot parse POLQA output'
+ number_of_fields = len(data[0])
+ assert number_of_fields == len(data[1])
+
+ # Build and return a dictionary with field names (header) as keys and the
+ # corresponding field values as values.
+ return {
+ data[0][index]: data[1][index]
+ for index in range(number_of_fields)
+ }
+
+
+@EvaluationScore.RegisterClass
+class TotalHarmonicDistorsionScore(EvaluationScore):
+ """Total harmonic distorsion plus noise score.
+
+ Total harmonic distorsion plus noise score.
+ See "https://en.wikipedia.org/wiki/Total_harmonic_distortion#THD.2BN".
+
+ Unit: -.
+ Ideal: 0.
+ Worst case: +inf
+ """
+
+ NAME = 'thd'
+
+ def __init__(self, score_filename_prefix):
+ EvaluationScore.__init__(self, score_filename_prefix)
+ self._input_frequency = None
+
+ def _Run(self, output_path):
+ self._CheckInputSignal()
+
+ self._LoadTestedSignal()
+ if self._tested_signal.channels != 1:
+ raise exceptions.EvaluationScoreException(
+ 'unsupported number of channels')
+ samples = signal_processing.SignalProcessingUtils.AudioSegmentToRawData(
+ self._tested_signal)
+
+ # Init.
+ num_samples = len(samples)
+ duration = len(self._tested_signal) / 1000.0
+ scaling = 2.0 / num_samples
+ max_freq = self._tested_signal.frame_rate / 2
+ f0_freq = float(self._input_frequency)
+ t = np.linspace(0, duration, num_samples)
+
+ # Analyze harmonics.
+ b_terms = []
+ n = 1
+ while f0_freq * n < max_freq:
+ x_n = np.sum(
+ samples * np.sin(2.0 * np.pi * n * f0_freq * t)) * scaling
+ y_n = np.sum(
+ samples * np.cos(2.0 * np.pi * n * f0_freq * t)) * scaling
+ b_terms.append(np.sqrt(x_n**2 + y_n**2))
+ n += 1
+
+ output_without_fundamental = samples - b_terms[0] * np.sin(
+ 2.0 * np.pi * f0_freq * t)
+ distortion_and_noise = np.sqrt(
+ np.sum(output_without_fundamental**2) * np.pi * scaling)
+
+ # TODO(alessiob): Fix or remove if not needed.
+ # thd = np.sqrt(np.sum(b_terms[1:]**2)) / b_terms[0]
+
+ # TODO(alessiob): Check the range of `thd_plus_noise` and update the class
+ # docstring above if accordingly.
+ thd_plus_noise = distortion_and_noise / b_terms[0]
+
+ self._score = thd_plus_noise
+ self._SaveScore()
+
+ def _CheckInputSignal(self):
+ # Check input signal and get properties.
+ try:
+ if self._input_signal_metadata['signal'] != 'pure_tone':
+ raise exceptions.EvaluationScoreException(
+ 'The THD score requires a pure tone as input signal')
+ self._input_frequency = self._input_signal_metadata['frequency']
+ if self._input_signal_metadata[
+ 'test_data_gen_name'] != 'identity' or (
+ self._input_signal_metadata['test_data_gen_config'] !=
+ 'default'):
+ raise exceptions.EvaluationScoreException(
+ 'The THD score cannot be used with any test data generator other '
+ 'than "identity"')
+ except TypeError:
+ raise exceptions.EvaluationScoreException(
+ 'The THD score requires an input signal with associated metadata'
+ )
+ except KeyError:
+ raise exceptions.EvaluationScoreException(
+ 'Invalid input signal metadata to compute the THD score')
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_factory.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_factory.py
new file mode 100644
index 0000000000..5749a8924b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_factory.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""EvaluationScore factory class.
+"""
+
+import logging
+
+from . import exceptions
+from . import eval_scores
+
+
+class EvaluationScoreWorkerFactory(object):
+ """Factory class used to instantiate evaluation score workers.
+
+ The ctor gets the parametrs that are used to instatiate the evaluation score
+ workers.
+ """
+
+ def __init__(self, polqa_tool_bin_path, echo_metric_tool_bin_path):
+ self._score_filename_prefix = None
+ self._polqa_tool_bin_path = polqa_tool_bin_path
+ self._echo_metric_tool_bin_path = echo_metric_tool_bin_path
+
+ def SetScoreFilenamePrefix(self, prefix):
+ self._score_filename_prefix = prefix
+
+ def GetInstance(self, evaluation_score_class):
+ """Creates an EvaluationScore instance given a class object.
+
+ Args:
+ evaluation_score_class: EvaluationScore class object (not an instance).
+
+ Returns:
+ An EvaluationScore instance.
+ """
+ if self._score_filename_prefix is None:
+ raise exceptions.InitializationException(
+ 'The score file name prefix for evaluation score workers is not set'
+ )
+ logging.debug('factory producing a %s evaluation score',
+ evaluation_score_class)
+
+ if evaluation_score_class == eval_scores.PolqaScore:
+ return eval_scores.PolqaScore(self._score_filename_prefix,
+ self._polqa_tool_bin_path)
+ elif evaluation_score_class == eval_scores.EchoMetric:
+ return eval_scores.EchoMetric(self._score_filename_prefix,
+ self._echo_metric_tool_bin_path)
+ else:
+ return evaluation_score_class(self._score_filename_prefix)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py
new file mode 100644
index 0000000000..12e043320e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the eval_scores module.
+"""
+
+import os
+import shutil
+import tempfile
+import unittest
+
+import pydub
+
+from . import data_access
+from . import eval_scores
+from . import eval_scores_factory
+from . import signal_processing
+
+
+class TestEvalScores(unittest.TestCase):
+ """Unit tests for the eval_scores module.
+ """
+
+ def setUp(self):
+ """Create temporary output folder and two audio track files."""
+ self._output_path = tempfile.mkdtemp()
+
+ # Create fake reference and tested (i.e., APM output) audio track files.
+ silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
+ fake_reference_signal = (signal_processing.SignalProcessingUtils.
+ GenerateWhiteNoise(silence))
+ fake_tested_signal = (signal_processing.SignalProcessingUtils.
+ GenerateWhiteNoise(silence))
+
+ # Save fake audio tracks.
+ self._fake_reference_signal_filepath = os.path.join(
+ self._output_path, 'fake_ref.wav')
+ signal_processing.SignalProcessingUtils.SaveWav(
+ self._fake_reference_signal_filepath, fake_reference_signal)
+ self._fake_tested_signal_filepath = os.path.join(
+ self._output_path, 'fake_test.wav')
+ signal_processing.SignalProcessingUtils.SaveWav(
+ self._fake_tested_signal_filepath, fake_tested_signal)
+
+ def tearDown(self):
+ """Recursively delete temporary folder."""
+ shutil.rmtree(self._output_path)
+
+ def testRegisteredClasses(self):
+ # Evaluation score names to exclude (tested separately).
+ exceptions = ['thd', 'echo_metric']
+
+ # Preliminary check.
+ self.assertTrue(os.path.exists(self._output_path))
+
+ # Check that there is at least one registered evaluation score worker.
+ registered_classes = eval_scores.EvaluationScore.REGISTERED_CLASSES
+ self.assertIsInstance(registered_classes, dict)
+ self.assertGreater(len(registered_classes), 0)
+
+ # Instance evaluation score workers factory with fake dependencies.
+ eval_score_workers_factory = (
+ eval_scores_factory.EvaluationScoreWorkerFactory(
+ polqa_tool_bin_path=os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), 'fake_polqa'),
+ echo_metric_tool_bin_path=None))
+ eval_score_workers_factory.SetScoreFilenamePrefix('scores-')
+
+ # Try each registered evaluation score worker.
+ for eval_score_name in registered_classes:
+ if eval_score_name in exceptions:
+ continue
+
+ # Instance evaluation score worker.
+ eval_score_worker = eval_score_workers_factory.GetInstance(
+ registered_classes[eval_score_name])
+
+ # Set fake input metadata and reference and test file paths, then run.
+ eval_score_worker.SetReferenceSignalFilepath(
+ self._fake_reference_signal_filepath)
+ eval_score_worker.SetTestedSignalFilepath(
+ self._fake_tested_signal_filepath)
+ eval_score_worker.Run(self._output_path)
+
+ # Check output.
+ score = data_access.ScoreFile.Load(
+ eval_score_worker.output_filepath)
+ self.assertTrue(isinstance(score, float))
+
+ def testTotalHarmonicDistorsionScore(self):
+ # Init.
+ pure_tone_freq = 5000.0
+ eval_score_worker = eval_scores.TotalHarmonicDistorsionScore('scores-')
+ eval_score_worker.SetInputSignalMetadata({
+ 'signal':
+ 'pure_tone',
+ 'frequency':
+ pure_tone_freq,
+ 'test_data_gen_name':
+ 'identity',
+ 'test_data_gen_config':
+ 'default',
+ })
+ template = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
+
+ # Create 3 test signals: pure tone, pure tone + white noise, white noise
+ # only.
+ pure_tone = signal_processing.SignalProcessingUtils.GeneratePureTone(
+ template, pure_tone_freq)
+ white_noise = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ template)
+ noisy_tone = signal_processing.SignalProcessingUtils.MixSignals(
+ pure_tone, white_noise)
+
+ # Compute scores for increasingly distorted pure tone signals.
+ scores = [None, None, None]
+ for index, tested_signal in enumerate(
+ [pure_tone, noisy_tone, white_noise]):
+ # Save signal.
+ tmp_filepath = os.path.join(self._output_path, 'tmp_thd.wav')
+ signal_processing.SignalProcessingUtils.SaveWav(
+ tmp_filepath, tested_signal)
+
+ # Compute score.
+ eval_score_worker.SetTestedSignalFilepath(tmp_filepath)
+ eval_score_worker.Run(self._output_path)
+ scores[index] = eval_score_worker.score
+
+ # Remove output file to avoid caching.
+ os.remove(eval_score_worker.output_filepath)
+
+ # Validate scores (lowest score with a pure tone).
+ self.assertTrue(all([scores[i + 1] > scores[i] for i in range(2)]))
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py
new file mode 100644
index 0000000000..2599085329
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Evaluator of the APM module.
+"""
+
+import logging
+
+
+class ApmModuleEvaluator(object):
+ """APM evaluator class.
+ """
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def Run(cls, evaluation_score_workers, apm_input_metadata,
+ apm_output_filepath, reference_input_filepath,
+ render_input_filepath, output_path):
+ """Runs the evaluation.
+
+ Iterates over the given evaluation score workers.
+
+ Args:
+ evaluation_score_workers: list of EvaluationScore instances.
+ apm_input_metadata: dictionary with metadata of the APM input.
+ apm_output_filepath: path to the audio track file with the APM output.
+ reference_input_filepath: path to the reference audio track file.
+ output_path: output path.
+
+ Returns:
+ A dict of evaluation score name and score pairs.
+ """
+ # Init.
+ scores = {}
+
+ for evaluation_score_worker in evaluation_score_workers:
+ logging.info(' computing <%s> score',
+ evaluation_score_worker.NAME)
+ evaluation_score_worker.SetInputSignalMetadata(apm_input_metadata)
+ evaluation_score_worker.SetReferenceSignalFilepath(
+ reference_input_filepath)
+ evaluation_score_worker.SetTestedSignalFilepath(
+ apm_output_filepath)
+ evaluation_score_worker.SetRenderSignalFilepath(
+ render_input_filepath)
+
+ evaluation_score_worker.Run(output_path)
+ scores[
+ evaluation_score_worker.NAME] = evaluation_score_worker.score
+
+ return scores
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/exceptions.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/exceptions.py
new file mode 100644
index 0000000000..893901d359
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/exceptions.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Exception classes.
+"""
+
+
+class FileNotFoundError(Exception):
+ """File not found exception.
+ """
+ pass
+
+
+class SignalProcessingException(Exception):
+ """Signal processing exception.
+ """
+ pass
+
+
+class InputMixerException(Exception):
+ """Input mixer exception.
+ """
+ pass
+
+
+class InputSignalCreatorException(Exception):
+ """Input signal creator exception.
+ """
+ pass
+
+
+class EvaluationScoreException(Exception):
+ """Evaluation score exception.
+ """
+ pass
+
+
+class InitializationException(Exception):
+ """Initialization exception.
+ """
+ pass
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
new file mode 100644
index 0000000000..fe3a6c7cb9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
@@ -0,0 +1,426 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import functools
+import hashlib
+import logging
+import os
+import re
+import sys
+
+try:
+ import csscompressor
+except ImportError:
+ logging.critical(
+ 'Cannot import the third-party Python package csscompressor')
+ sys.exit(1)
+
+try:
+ import jsmin
+except ImportError:
+ logging.critical('Cannot import the third-party Python package jsmin')
+ sys.exit(1)
+
+
+class HtmlExport(object):
+ """HTML exporter class for APM quality scores."""
+
+ _NEW_LINE = '\n'
+
+ # CSS and JS file paths.
+ _PATH = os.path.dirname(os.path.realpath(__file__))
+ _CSS_FILEPATH = os.path.join(_PATH, 'results.css')
+ _CSS_MINIFIED = True
+ _JS_FILEPATH = os.path.join(_PATH, 'results.js')
+ _JS_MINIFIED = True
+
+ def __init__(self, output_filepath):
+ self._scores_data_frame = None
+ self._output_filepath = output_filepath
+
+ def Export(self, scores_data_frame):
+ """Exports scores into an HTML file.
+
+ Args:
+ scores_data_frame: DataFrame instance.
+ """
+ self._scores_data_frame = scores_data_frame
+ html = [
+ '<html>',
+ self._BuildHeader(),
+ ('<script type="text/javascript">'
+ '(function () {'
+ 'window.addEventListener(\'load\', function () {'
+ 'var inspector = new AudioInspector();'
+ '});'
+ '})();'
+ '</script>'), '<body>',
+ self._BuildBody(), '</body>', '</html>'
+ ]
+ self._Save(self._output_filepath, self._NEW_LINE.join(html))
+
+ def _BuildHeader(self):
+ """Builds the <head> section of the HTML file.
+
+ The header contains the page title and either embedded or linked CSS and JS
+ files.
+
+ Returns:
+ A string with <head>...</head> HTML.
+ """
+ html = ['<head>', '<title>Results</title>']
+
+ # Add Material Design hosted libs.
+ html.append('<link rel="stylesheet" href="http://fonts.googleapis.com/'
+ 'css?family=Roboto:300,400,500,700" type="text/css">')
+ html.append(
+ '<link rel="stylesheet" href="https://fonts.googleapis.com/'
+ 'icon?family=Material+Icons">')
+ html.append(
+ '<link rel="stylesheet" href="https://code.getmdl.io/1.3.0/'
+ 'material.indigo-pink.min.css">')
+ html.append('<script defer src="https://code.getmdl.io/1.3.0/'
+ 'material.min.js"></script>')
+
+ # Embed custom JavaScript and CSS files.
+ html.append('<script>')
+ with open(self._JS_FILEPATH) as f:
+ html.append(
+ jsmin.jsmin(f.read()) if self._JS_MINIFIED else (
+ f.read().rstrip()))
+ html.append('</script>')
+ html.append('<style>')
+ with open(self._CSS_FILEPATH) as f:
+ html.append(
+ csscompressor.compress(f.read()) if self._CSS_MINIFIED else (
+ f.read().rstrip()))
+ html.append('</style>')
+
+ html.append('</head>')
+
+ return self._NEW_LINE.join(html)
+
+ def _BuildBody(self):
+ """Builds the content of the <body> section."""
+ score_names = self._scores_data_frame[
+ 'eval_score_name'].drop_duplicates().values.tolist()
+
+ html = [
+ ('<div class="mdl-layout mdl-js-layout mdl-layout--fixed-header '
+ 'mdl-layout--fixed-tabs">'),
+ '<header class="mdl-layout__header">',
+ '<div class="mdl-layout__header-row">',
+ '<span class="mdl-layout-title">APM QA results ({})</span>'.format(
+ self._output_filepath),
+ '</div>',
+ ]
+
+ # Tab selectors.
+ html.append('<div class="mdl-layout__tab-bar mdl-js-ripple-effect">')
+ for tab_index, score_name in enumerate(score_names):
+ is_active = tab_index == 0
+ html.append('<a href="#score-tab-{}" class="mdl-layout__tab{}">'
+ '{}</a>'.format(tab_index,
+ ' is-active' if is_active else '',
+ self._FormatName(score_name)))
+ html.append('</div>')
+
+ html.append('</header>')
+ html.append(
+ '<main class="mdl-layout__content" style="overflow-x: auto;">')
+
+ # Tabs content.
+ for tab_index, score_name in enumerate(score_names):
+ html.append('<section class="mdl-layout__tab-panel{}" '
+ 'id="score-tab-{}">'.format(
+ ' is-active' if is_active else '', tab_index))
+ html.append('<div class="page-content">')
+ html.append(
+ self._BuildScoreTab(score_name, ('s{}'.format(tab_index), )))
+ html.append('</div>')
+ html.append('</section>')
+
+ html.append('</main>')
+ html.append('</div>')
+
+ # Add snackbar for notifications.
+ html.append(
+ '<div id="snackbar" aria-live="assertive" aria-atomic="true"'
+ ' aria-relevant="text" class="mdl-snackbar mdl-js-snackbar">'
+ '<div class="mdl-snackbar__text"></div>'
+ '<button type="button" class="mdl-snackbar__action"></button>'
+ '</div>')
+
+ return self._NEW_LINE.join(html)
+
+ def _BuildScoreTab(self, score_name, anchor_data):
+ """Builds the content of a tab."""
+ # Find unique values.
+ scores = self._scores_data_frame[
+ self._scores_data_frame.eval_score_name == score_name]
+ apm_configs = sorted(self._FindUniqueTuples(scores, ['apm_config']))
+ test_data_gen_configs = sorted(
+ self._FindUniqueTuples(scores,
+ ['test_data_gen', 'test_data_gen_params']))
+
+ html = [
+ '<div class="mdl-grid">',
+ '<div class="mdl-layout-spacer"></div>',
+ '<div class="mdl-cell mdl-cell--10-col">',
+ ('<table class="mdl-data-table mdl-js-data-table mdl-shadow--2dp" '
+ 'style="width: 100%;">'),
+ ]
+
+ # Header.
+ html.append('<thead><tr><th>APM config / Test data generator</th>')
+ for test_data_gen_info in test_data_gen_configs:
+ html.append('<th>{} {}</th>'.format(
+ self._FormatName(test_data_gen_info[0]),
+ test_data_gen_info[1]))
+ html.append('</tr></thead>')
+
+ # Body.
+ html.append('<tbody>')
+ for apm_config in apm_configs:
+ html.append('<tr><td>' + self._FormatName(apm_config[0]) + '</td>')
+ for test_data_gen_info in test_data_gen_configs:
+ dialog_id = self._ScoreStatsInspectorDialogId(
+ score_name, apm_config[0], test_data_gen_info[0],
+ test_data_gen_info[1])
+ html.append(
+ '<td onclick="openScoreStatsInspector(\'{}\')">{}</td>'.
+ format(
+ dialog_id,
+ self._BuildScoreTableCell(score_name,
+ test_data_gen_info[0],
+ test_data_gen_info[1],
+ apm_config[0])))
+ html.append('</tr>')
+ html.append('</tbody>')
+
+ html.append(
+ '</table></div><div class="mdl-layout-spacer"></div></div>')
+
+ html.append(
+ self._BuildScoreStatsInspectorDialogs(score_name, apm_configs,
+ test_data_gen_configs,
+ anchor_data))
+
+ return self._NEW_LINE.join(html)
+
+ def _BuildScoreTableCell(self, score_name, test_data_gen,
+ test_data_gen_params, apm_config):
+ """Builds the content of a table cell for a score table."""
+ scores = self._SliceDataForScoreTableCell(score_name, apm_config,
+ test_data_gen,
+ test_data_gen_params)
+ stats = self._ComputeScoreStats(scores)
+
+ html = []
+ items_id_prefix = (score_name + test_data_gen + test_data_gen_params +
+ apm_config)
+ if stats['count'] == 1:
+ # Show the only available score.
+ item_id = hashlib.md5(items_id_prefix.encode('utf-8')).hexdigest()
+ html.append('<div id="single-value-{0}">{1:f}</div>'.format(
+ item_id, scores['score'].mean()))
+ html.append(
+ '<div class="mdl-tooltip" data-mdl-for="single-value-{}">{}'
+ '</div>'.format(item_id, 'single value'))
+ else:
+ # Show stats.
+ for stat_name in ['min', 'max', 'mean', 'std dev']:
+ item_id = hashlib.md5(
+ (items_id_prefix + stat_name).encode('utf-8')).hexdigest()
+ html.append('<div id="stats-{0}">{1:f}</div>'.format(
+ item_id, stats[stat_name]))
+ html.append(
+ '<div class="mdl-tooltip" data-mdl-for="stats-{}">{}'
+ '</div>'.format(item_id, stat_name))
+
+ return self._NEW_LINE.join(html)
+
+ def _BuildScoreStatsInspectorDialogs(self, score_name, apm_configs,
+ test_data_gen_configs, anchor_data):
+ """Builds a set of score stats inspector dialogs."""
+ html = []
+ for apm_config in apm_configs:
+ for test_data_gen_info in test_data_gen_configs:
+ dialog_id = self._ScoreStatsInspectorDialogId(
+ score_name, apm_config[0], test_data_gen_info[0],
+ test_data_gen_info[1])
+
+ html.append('<dialog class="mdl-dialog" id="{}" '
+ 'style="width: 40%;">'.format(dialog_id))
+
+ # Content.
+ html.append('<div class="mdl-dialog__content">')
+ html.append(
+ '<h6><strong>APM config preset</strong>: {}<br/>'
+ '<strong>Test data generator</strong>: {} ({})</h6>'.
+ format(self._FormatName(apm_config[0]),
+ self._FormatName(test_data_gen_info[0]),
+ test_data_gen_info[1]))
+ html.append(
+ self._BuildScoreStatsInspectorDialog(
+ score_name, apm_config[0], test_data_gen_info[0],
+ test_data_gen_info[1], anchor_data + (dialog_id, )))
+ html.append('</div>')
+
+ # Actions.
+ html.append('<div class="mdl-dialog__actions">')
+ html.append('<button type="button" class="mdl-button" '
+ 'onclick="closeScoreStatsInspector()">'
+ 'Close</button>')
+ html.append('</div>')
+
+ html.append('</dialog>')
+
+ return self._NEW_LINE.join(html)
+
+ def _BuildScoreStatsInspectorDialog(self, score_name, apm_config,
+ test_data_gen, test_data_gen_params,
+ anchor_data):
+ """Builds one score stats inspector dialog."""
+ scores = self._SliceDataForScoreTableCell(score_name, apm_config,
+ test_data_gen,
+ test_data_gen_params)
+
+ capture_render_pairs = sorted(
+ self._FindUniqueTuples(scores, ['capture', 'render']))
+ echo_simulators = sorted(
+ self._FindUniqueTuples(scores, ['echo_simulator']))
+
+ html = [
+ '<table class="mdl-data-table mdl-js-data-table mdl-shadow--2dp">'
+ ]
+
+ # Header.
+ html.append('<thead><tr><th>Capture-Render / Echo simulator</th>')
+ for echo_simulator in echo_simulators:
+ html.append('<th>' + self._FormatName(echo_simulator[0]) + '</th>')
+ html.append('</tr></thead>')
+
+ # Body.
+ html.append('<tbody>')
+ for row, (capture, render) in enumerate(capture_render_pairs):
+ html.append('<tr><td><div>{}</div><div>{}</div></td>'.format(
+ capture, render))
+ for col, echo_simulator in enumerate(echo_simulators):
+ score_tuple = self._SliceDataForScoreStatsTableCell(
+ scores, capture, render, echo_simulator[0])
+ cell_class = 'r{}c{}'.format(row, col)
+ html.append('<td class="single-score-cell {}">{}</td>'.format(
+ cell_class,
+ self._BuildScoreStatsInspectorTableCell(
+ score_tuple, anchor_data + (cell_class, ))))
+ html.append('</tr>')
+ html.append('</tbody>')
+
+ html.append('</table>')
+
+ # Placeholder for the audio inspector.
+ html.append('<div class="audio-inspector-placeholder"></div>')
+
+ return self._NEW_LINE.join(html)
+
+ def _BuildScoreStatsInspectorTableCell(self, score_tuple, anchor_data):
+ """Builds the content of a cell of a score stats inspector."""
+ anchor = '&'.join(anchor_data)
+ html = [('<div class="v">{}</div>'
+ '<button class="mdl-button mdl-js-button mdl-button--icon"'
+ ' data-anchor="{}">'
+ '<i class="material-icons mdl-color-text--blue-grey">link</i>'
+ '</button>').format(score_tuple.score, anchor)]
+
+ # Add all the available file paths as hidden data.
+ for field_name in score_tuple.keys():
+ if field_name.endswith('_filepath'):
+ html.append(
+ '<input type="hidden" name="{}" value="{}">'.format(
+ field_name, score_tuple[field_name]))
+
+ return self._NEW_LINE.join(html)
+
+ def _SliceDataForScoreTableCell(self, score_name, apm_config,
+ test_data_gen, test_data_gen_params):
+ """Slices `self._scores_data_frame` to extract the data for a tab."""
+ masks = []
+ masks.append(self._scores_data_frame.eval_score_name == score_name)
+ masks.append(self._scores_data_frame.apm_config == apm_config)
+ masks.append(self._scores_data_frame.test_data_gen == test_data_gen)
+ masks.append(self._scores_data_frame.test_data_gen_params ==
+ test_data_gen_params)
+ mask = functools.reduce((lambda i1, i2: i1 & i2), masks)
+ del masks
+ return self._scores_data_frame[mask]
+
+ @classmethod
+ def _SliceDataForScoreStatsTableCell(cls, scores, capture, render,
+ echo_simulator):
+ """Slices `scores` to extract the data for a tab."""
+ masks = []
+
+ masks.append(scores.capture == capture)
+ masks.append(scores.render == render)
+ masks.append(scores.echo_simulator == echo_simulator)
+ mask = functools.reduce((lambda i1, i2: i1 & i2), masks)
+ del masks
+
+ sliced_data = scores[mask]
+ assert len(sliced_data) == 1, 'single score is expected'
+ return sliced_data.iloc[0]
+
+ @classmethod
+ def _FindUniqueTuples(cls, data_frame, fields):
+ """Slices `data_frame` to a list of fields and finds unique tuples."""
+ return data_frame[fields].drop_duplicates().values.tolist()
+
+ @classmethod
+ def _ComputeScoreStats(cls, data_frame):
+ """Computes score stats."""
+ scores = data_frame['score']
+ return {
+ 'count': scores.count(),
+ 'min': scores.min(),
+ 'max': scores.max(),
+ 'mean': scores.mean(),
+ 'std dev': scores.std(),
+ }
+
+ @classmethod
+ def _ScoreStatsInspectorDialogId(cls, score_name, apm_config,
+ test_data_gen, test_data_gen_params):
+ """Assigns a unique name to a dialog."""
+ return 'score-stats-dialog-' + hashlib.md5(
+ 'score-stats-inspector-{}-{}-{}-{}'.format(
+ score_name, apm_config, test_data_gen,
+ test_data_gen_params).encode('utf-8')).hexdigest()
+
+ @classmethod
+ def _Save(cls, output_filepath, html):
+ """Writes the HTML file.
+
+ Args:
+ output_filepath: output file path.
+ html: string with the HTML content.
+ """
+ with open(output_filepath, 'w') as f:
+ f.write(html)
+
+ @classmethod
+ def _FormatName(cls, name):
+ """Formats a name.
+
+ Args:
+ name: a string.
+
+ Returns:
+ A copy of name in which underscores and dashes are replaced with a space.
+ """
+ return re.sub(r'[_\-]', ' ', name)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py
new file mode 100644
index 0000000000..412aa7c4e7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py
@@ -0,0 +1,86 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the export module.
+"""
+
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+import pyquery as pq
+
+from . import audioproc_wrapper
+from . import collect_data
+from . import eval_scores_factory
+from . import evaluation
+from . import export
+from . import simulation
+from . import test_data_generation_factory
+
+
+class TestExport(unittest.TestCase):
+ """Unit tests for the export module.
+ """
+
+ _CLEAN_TMP_OUTPUT = True
+
+ def setUp(self):
+ """Creates temporary data to export."""
+ self._tmp_path = tempfile.mkdtemp()
+
+ # Run a fake experiment to produce data to export.
+ simulator = simulation.ApmModuleSimulator(
+ test_data_generator_factory=(
+ test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path='',
+ noise_tracks_path='',
+ copy_with_identity=False)),
+ evaluation_score_factory=(
+ eval_scores_factory.EvaluationScoreWorkerFactory(
+ polqa_tool_bin_path=os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ 'fake_polqa'),
+ echo_metric_tool_bin_path=None)),
+ ap_wrapper=audioproc_wrapper.AudioProcWrapper(
+ audioproc_wrapper.AudioProcWrapper.
+ DEFAULT_APM_SIMULATOR_BIN_PATH),
+ evaluator=evaluation.ApmModuleEvaluator())
+ simulator.Run(
+ config_filepaths=['apm_configs/default.json'],
+ capture_input_filepaths=[
+ os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'),
+ os.path.join(self._tmp_path, 'pure_tone-880_1000.wav'),
+ ],
+ test_data_generator_names=['identity', 'white_noise'],
+ eval_score_names=['audio_level_peak', 'audio_level_mean'],
+ output_dir=self._tmp_path)
+
+ # Export results.
+ p = collect_data.InstanceArgumentsParser()
+ args = p.parse_args(['--output_dir', self._tmp_path])
+ src_path = collect_data.ConstructSrcPath(args)
+ self._data_to_export = collect_data.FindScores(src_path, args)
+
+ def tearDown(self):
+ """Recursively deletes temporary folders."""
+ if self._CLEAN_TMP_OUTPUT:
+ shutil.rmtree(self._tmp_path)
+ else:
+ logging.warning(self.id() + ' did not clean the temporary path ' +
+ (self._tmp_path))
+
+ def testCreateHtmlReport(self):
+ fn_out = os.path.join(self._tmp_path, 'results.html')
+ exporter = export.HtmlExport(fn_out)
+ exporter.Export(self._data_to_export)
+
+ document = pq.PyQuery(filename=fn_out)
+ self.assertIsInstance(document, pq.PyQuery)
+ # TODO(alessiob): Use PyQuery API to check the HTML file.
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py
new file mode 100644
index 0000000000..a7db7b4840
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+from __future__ import division
+
+import logging
+import os
+import subprocess
+import shutil
+import sys
+import tempfile
+
+try:
+ import numpy as np
+except ImportError:
+ logging.critical('Cannot import the third-party Python package numpy')
+ sys.exit(1)
+
+from . import signal_processing
+
+
+class ExternalVad(object):
+ def __init__(self, path_to_binary, name):
+ """Args:
+ path_to_binary: path to binary that accepts '-i <wav>', '-o
+ <float probabilities>'. There must be one float value per
+ 10ms audio
+ name: a name to identify the external VAD. Used for saving
+ the output as extvad_output-<name>.
+ """
+ self._path_to_binary = path_to_binary
+ self.name = name
+ assert os.path.exists(self._path_to_binary), (self._path_to_binary)
+ self._vad_output = None
+
+ def Run(self, wav_file_path):
+ _signal = signal_processing.SignalProcessingUtils.LoadWav(
+ wav_file_path)
+ if _signal.channels != 1:
+ raise NotImplementedError('Multiple-channel'
+ ' annotations not implemented')
+ if _signal.frame_rate != 48000:
+ raise NotImplementedError('Frame rates '
+ 'other than 48000 not implemented')
+
+ tmp_path = tempfile.mkdtemp()
+ try:
+ output_file_path = os.path.join(tmp_path, self.name + '_vad.tmp')
+ subprocess.call([
+ self._path_to_binary, '-i', wav_file_path, '-o',
+ output_file_path
+ ])
+ self._vad_output = np.fromfile(output_file_path, np.float32)
+ except Exception as e:
+ logging.error('Error while running the ' + self.name + ' VAD (' +
+ e.message + ')')
+ finally:
+ if os.path.exists(tmp_path):
+ shutil.rmtree(tmp_path)
+
+ def GetVadOutput(self):
+ assert self._vad_output is not None
+ return self._vad_output
+
+ @classmethod
+ def ConstructVadDict(cls, vad_paths, vad_names):
+ external_vads = {}
+ for path, name in zip(vad_paths, vad_names):
+ external_vads[name] = ExternalVad(path, name)
+ return external_vads
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py
new file mode 100755
index 0000000000..f679f8c94a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+import argparse
+import numpy as np
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-i', required=True)
+ parser.add_argument('-o', required=True)
+
+ args = parser.parse_args()
+
+ array = np.arange(100, dtype=np.float32)
+ array.tofile(open(args.o, 'w'))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc
new file mode 100644
index 0000000000..6f3b2d1dd7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <fstream>
+#include <iostream>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+const char* const kErrorMessage = "-Out /path/to/output/file is mandatory";
+
+// Writes fake output intended to be parsed by
+// quality_assessment.eval_scores.PolqaScore.
+void WriteOutputFile(absl::string_view output_file_path) {
+ RTC_CHECK_NE(output_file_path, "");
+ std::ofstream out(std::string{output_file_path});
+ RTC_CHECK(!out.bad());
+ out << "* Fake Polqa output" << std::endl;
+ out << "FakeField1\tPolqaScore\tFakeField2" << std::endl;
+ out << "FakeValue1\t3.25\tFakeValue2" << std::endl;
+ out.close();
+}
+
+} // namespace
+
+int main(int argc, char* argv[]) {
+ // Find "-Out" and use its next argument as output file path.
+ RTC_CHECK_GE(argc, 3) << kErrorMessage;
+ const std::string kSoughtFlagName = "-Out";
+ for (int i = 1; i < argc - 1; ++i) {
+ if (kSoughtFlagName.compare(argv[i]) == 0) {
+ WriteOutputFile(argv[i + 1]);
+ return 0;
+ }
+ }
+ RTC_FATAL() << kErrorMessage;
+}
+
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::main(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py
new file mode 100644
index 0000000000..af022bd461
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Input mixer module.
+"""
+
+import logging
+import os
+
+from . import exceptions
+from . import signal_processing
+
+
+class ApmInputMixer(object):
+ """Class to mix a set of audio segments down to the APM input."""
+
+ _HARD_CLIPPING_LOG_MSG = 'hard clipping detected in the mixed signal'
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def HardClippingLogMessage(cls):
+ """Returns the log message used when hard clipping is detected in the mix.
+
+ This method is mainly intended to be used by the unit tests.
+ """
+ return cls._HARD_CLIPPING_LOG_MSG
+
+ @classmethod
+ def Mix(cls, output_path, capture_input_filepath, echo_filepath):
+ """Mixes capture and echo.
+
+ Creates the overall capture input for APM by mixing the "echo-free" capture
+ signal with the echo signal (e.g., echo simulated via the
+ echo_path_simulation module).
+
+ The echo signal cannot be shorter than the capture signal and the generated
+ mix will have the same duration of the capture signal. The latter property
+ is enforced in order to let the input of APM and the reference signal
+ created by TestDataGenerator have the same length (required for the
+ evaluation step).
+
+ Hard-clipping may occur in the mix; a warning is raised when this happens.
+
+ If `echo_filepath` is None, nothing is done and `capture_input_filepath` is
+ returned.
+
+ Args:
+ speech: AudioSegment instance.
+ echo_path: AudioSegment instance or None.
+
+ Returns:
+ Path to the mix audio track file.
+ """
+ if echo_filepath is None:
+ return capture_input_filepath
+
+ # Build the mix output file name as a function of the echo file name.
+ # This ensures that if the internal parameters of the echo path simulator
+ # change, no erroneous cache hit occurs.
+ echo_file_name, _ = os.path.splitext(os.path.split(echo_filepath)[1])
+ capture_input_file_name, _ = os.path.splitext(
+ os.path.split(capture_input_filepath)[1])
+ mix_filepath = os.path.join(
+ output_path,
+ 'mix_capture_{}_{}.wav'.format(capture_input_file_name,
+ echo_file_name))
+
+ # Create the mix if not done yet.
+ mix = None
+ if not os.path.exists(mix_filepath):
+ echo_free_capture = signal_processing.SignalProcessingUtils.LoadWav(
+ capture_input_filepath)
+ echo = signal_processing.SignalProcessingUtils.LoadWav(
+ echo_filepath)
+
+ if signal_processing.SignalProcessingUtils.CountSamples(echo) < (
+ signal_processing.SignalProcessingUtils.CountSamples(
+ echo_free_capture)):
+ raise exceptions.InputMixerException(
+ 'echo cannot be shorter than capture')
+
+ mix = echo_free_capture.overlay(echo)
+ signal_processing.SignalProcessingUtils.SaveWav(mix_filepath, mix)
+
+ # Check if hard clipping occurs.
+ if mix is None:
+ mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
+ if signal_processing.SignalProcessingUtils.DetectHardClipping(mix):
+ logging.warning(cls._HARD_CLIPPING_LOG_MSG)
+
+ return mix_filepath
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py
new file mode 100644
index 0000000000..4fd5e4f1ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the input mixer module.
+"""
+
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+import mock
+
+from . import exceptions
+from . import input_mixer
+from . import signal_processing
+
+
+class TestApmInputMixer(unittest.TestCase):
+ """Unit tests for the ApmInputMixer class.
+ """
+
+ # Audio track file names created in setUp().
+ _FILENAMES = ['capture', 'echo_1', 'echo_2', 'shorter', 'longer']
+
+ # Target peak power level (dBFS) of each audio track file created in setUp().
+ # These values are hand-crafted in order to make saturation happen when
+ # capture and echo_2 are mixed and the contrary for capture and echo_1.
+ # None means that the power is not changed.
+ _MAX_PEAK_POWER_LEVELS = [-10.0, -5.0, 0.0, None, None]
+
+ # Audio track file durations in milliseconds.
+ _DURATIONS = [1000, 1000, 1000, 800, 1200]
+
+ _SAMPLE_RATE = 48000
+
+ def setUp(self):
+ """Creates temporary data."""
+ self._tmp_path = tempfile.mkdtemp()
+
+ # Create audio track files.
+ self._audio_tracks = {}
+ for filename, peak_power, duration in zip(self._FILENAMES,
+ self._MAX_PEAK_POWER_LEVELS,
+ self._DURATIONS):
+ audio_track_filepath = os.path.join(self._tmp_path,
+ '{}.wav'.format(filename))
+
+ # Create a pure tone with the target peak power level.
+ template = signal_processing.SignalProcessingUtils.GenerateSilence(
+ duration=duration, sample_rate=self._SAMPLE_RATE)
+ signal = signal_processing.SignalProcessingUtils.GeneratePureTone(
+ template)
+ if peak_power is not None:
+ signal = signal.apply_gain(-signal.max_dBFS + peak_power)
+
+ signal_processing.SignalProcessingUtils.SaveWav(
+ audio_track_filepath, signal)
+ self._audio_tracks[filename] = {
+ 'filepath':
+ audio_track_filepath,
+ 'num_samples':
+ signal_processing.SignalProcessingUtils.CountSamples(signal)
+ }
+
+ def tearDown(self):
+ """Recursively deletes temporary folders."""
+ shutil.rmtree(self._tmp_path)
+
+ def testCheckMixSameDuration(self):
+ """Checks the duration when mixing capture and echo with same duration."""
+ mix_filepath = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['echo_1']['filepath'])
+ self.assertTrue(os.path.exists(mix_filepath))
+
+ mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
+ self.assertEqual(
+ self._audio_tracks['capture']['num_samples'],
+ signal_processing.SignalProcessingUtils.CountSamples(mix))
+
+ def testRejectShorterEcho(self):
+ """Rejects echo signals that are shorter than the capture signal."""
+ try:
+ _ = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['shorter']['filepath'])
+ self.fail('no exception raised')
+ except exceptions.InputMixerException:
+ pass
+
+ def testCheckMixDurationWithLongerEcho(self):
+ """Checks the duration when mixing an echo longer than the capture."""
+ mix_filepath = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['longer']['filepath'])
+ self.assertTrue(os.path.exists(mix_filepath))
+
+ mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
+ self.assertEqual(
+ self._audio_tracks['capture']['num_samples'],
+ signal_processing.SignalProcessingUtils.CountSamples(mix))
+
+ def testCheckOutputFileNamesConflict(self):
+ """Checks that different echo files lead to different output file names."""
+ mix1_filepath = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['echo_1']['filepath'])
+ self.assertTrue(os.path.exists(mix1_filepath))
+
+ mix2_filepath = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['echo_2']['filepath'])
+ self.assertTrue(os.path.exists(mix2_filepath))
+
+ self.assertNotEqual(mix1_filepath, mix2_filepath)
+
+ def testHardClippingLogExpected(self):
+ """Checks that hard clipping warning is raised when occurring."""
+ logging.warning = mock.MagicMock(name='warning')
+ _ = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['echo_2']['filepath'])
+ logging.warning.assert_called_once_with(
+ input_mixer.ApmInputMixer.HardClippingLogMessage())
+
+ def testHardClippingLogNotExpected(self):
+ """Checks that hard clipping warning is not raised when not occurring."""
+ logging.warning = mock.MagicMock(name='warning')
+ _ = input_mixer.ApmInputMixer.Mix(
+ self._tmp_path, self._audio_tracks['capture']['filepath'],
+ self._audio_tracks['echo_1']['filepath'])
+ self.assertNotIn(
+ mock.call(input_mixer.ApmInputMixer.HardClippingLogMessage()),
+ logging.warning.call_args_list)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py
new file mode 100644
index 0000000000..b64fdcca89
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Input signal creator module.
+"""
+
+from . import exceptions
+from . import signal_processing
+
+
+class InputSignalCreator(object):
+ """Input signal creator class.
+ """
+
+ @classmethod
+ def Create(cls, name, raw_params):
+ """Creates a input signal and its metadata.
+
+ Args:
+ name: Input signal creator name.
+ raw_params: Tuple of parameters to pass to the specific signal creator.
+
+ Returns:
+ (AudioSegment, dict) tuple.
+ """
+ try:
+ signal = {}
+ params = {}
+
+ if name == 'pure_tone':
+ params['frequency'] = float(raw_params[0])
+ params['duration'] = int(raw_params[1])
+ signal = cls._CreatePureTone(params['frequency'],
+ params['duration'])
+ else:
+ raise exceptions.InputSignalCreatorException(
+ 'Invalid input signal creator name')
+
+ # Complete metadata.
+ params['signal'] = name
+
+ return signal, params
+ except (TypeError, AssertionError) as e:
+ raise exceptions.InputSignalCreatorException(
+ 'Invalid signal creator parameters: {}'.format(e))
+
+ @classmethod
+ def _CreatePureTone(cls, frequency, duration):
+ """
+ Generates a pure tone at 48000 Hz.
+
+ Args:
+ frequency: Float in (0-24000] (Hz).
+ duration: Integer (milliseconds).
+
+ Returns:
+ AudioSegment instance.
+ """
+ assert 0 < frequency <= 24000
+ assert duration > 0
+ template = signal_processing.SignalProcessingUtils.GenerateSilence(
+ duration)
+ return signal_processing.SignalProcessingUtils.GeneratePureTone(
+ template, frequency)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.css b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.css
new file mode 100644
index 0000000000..2f406bb002
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.css
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+td.selected-score {
+ background-color: #DDD;
+}
+
+td.single-score-cell{
+ text-align: center;
+}
+
+.audio-inspector {
+ text-align: center;
+}
+
+.audio-inspector div{
+ margin-bottom: 0;
+ padding-bottom: 0;
+ padding-top: 0;
+}
+
+.audio-inspector div div{
+ margin-bottom: 0;
+ padding-bottom: 0;
+ padding-top: 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.js b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.js
new file mode 100644
index 0000000000..8e47411058
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/results.js
@@ -0,0 +1,376 @@
+// Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+/**
+ * Opens the score stats inspector dialog.
+ * @param {String} dialogId: identifier of the dialog to show.
+ * @return {DOMElement} The dialog element that has been opened.
+ */
+function openScoreStatsInspector(dialogId) {
+ var dialog = document.getElementById(dialogId);
+ dialog.showModal();
+ return dialog;
+}
+
+/**
+ * Closes the score stats inspector dialog.
+ */
+function closeScoreStatsInspector() {
+ var dialog = document.querySelector('dialog[open]');
+ if (dialog == null)
+ return;
+ dialog.close();
+}
+
+/**
+ * Audio inspector class.
+ * @constructor
+ */
+function AudioInspector() {
+ console.debug('Creating an AudioInspector instance.');
+ this.audioPlayer_ = new Audio();
+ this.metadata_ = {};
+ this.currentScore_ = null;
+ this.audioInspector_ = null;
+ this.snackbarContainer_ = document.querySelector('#snackbar');
+
+ // Get base URL without anchors.
+ this.baseUrl_ = window.location.href;
+ var index = this.baseUrl_.indexOf('#');
+ if (index > 0)
+ this.baseUrl_ = this.baseUrl_.substr(0, index)
+ console.info('Base URL set to "' + window.location.href + '".');
+
+ window.event.stopPropagation();
+ this.createTextAreasForCopy_();
+ this.createAudioInspector_();
+ this.initializeEventHandlers_();
+
+ // When MDL is ready, parse the anchor (if any) to show the requested
+ // experiment.
+ var self = this;
+ document.querySelectorAll('header a')[0].addEventListener(
+ 'mdl-componentupgraded', function() {
+ if (!self.parseWindowAnchor()) {
+ // If not experiment is requested, open the first section.
+ console.info('No anchor parsing, opening the first section.');
+ document.querySelectorAll('header a > span')[0].click();
+ }
+ });
+}
+
+/**
+ * Parse the anchor in the window URL.
+ * @return {bool} True if the parsing succeeded.
+ */
+AudioInspector.prototype.parseWindowAnchor = function() {
+ var index = location.href.indexOf('#');
+ if (index == -1) {
+ console.debug('No # found in the URL.');
+ return false;
+ }
+
+ var anchor = location.href.substr(index - location.href.length + 1);
+ console.info('Anchor changed: "' + anchor + '".');
+
+ var parts = anchor.split('&');
+ if (parts.length != 3) {
+ console.info('Ignoring anchor with invalid number of fields.');
+ return false;
+ }
+
+ var openDialog = document.querySelector('dialog[open]');
+ try {
+ // Open the requested dialog if not already open.
+ if (!openDialog || openDialog.id != parts[1]) {
+ !openDialog || openDialog.close();
+ document.querySelectorAll('header a > span')[
+ parseInt(parts[0].substr(1))].click();
+ openDialog = openScoreStatsInspector(parts[1]);
+ }
+
+ // Trigger click on cell.
+ var cell = openDialog.querySelector('td.' + parts[2]);
+ cell.focus();
+ cell.click();
+
+ this.showNotification_('Experiment selected.');
+ return true;
+ } catch (e) {
+ this.showNotification_('Cannot select experiment :(');
+ console.error('Exception caught while selecting experiment: "' + e + '".');
+ }
+
+ return false;
+}
+
+/**
+ * Set up the inspector for a new score.
+ * @param {DOMElement} element: Element linked to the selected score.
+ */
+AudioInspector.prototype.selectedScoreChange = function(element) {
+ if (this.currentScore_ == element) { return; }
+ if (this.currentScore_ != null) {
+ this.currentScore_.classList.remove('selected-score');
+ }
+ this.currentScore_ = element;
+ this.currentScore_.classList.add('selected-score');
+ this.stopAudio();
+
+ // Read metadata.
+ var matches = element.querySelectorAll('input[type=hidden]');
+ this.metadata_ = {};
+ for (var index = 0; index < matches.length; ++index) {
+ this.metadata_[matches[index].name] = matches[index].value;
+ }
+
+ // Show the audio inspector interface.
+ var container = element.parentNode.parentNode.parentNode.parentNode;
+ var audioInspectorPlaceholder = container.querySelector(
+ '.audio-inspector-placeholder');
+ this.moveInspector_(audioInspectorPlaceholder);
+};
+
+/**
+ * Stop playing audio.
+ */
+AudioInspector.prototype.stopAudio = function() {
+ console.info('Pausing audio play out.');
+ this.audioPlayer_.pause();
+};
+
+/**
+ * Show a text message using the snackbar.
+ */
+AudioInspector.prototype.showNotification_ = function(text) {
+ try {
+ this.snackbarContainer_.MaterialSnackbar.showSnackbar({
+ message: text, timeout: 2000});
+ } catch (e) {
+ // Fallback to an alert.
+ alert(text);
+ console.warn('Cannot use snackbar: "' + e + '"');
+ }
+}
+
+/**
+ * Move the audio inspector DOM node into the given parent.
+ * @param {DOMElement} newParentNode: New parent for the inspector.
+ */
+AudioInspector.prototype.moveInspector_ = function(newParentNode) {
+ newParentNode.appendChild(this.audioInspector_);
+};
+
+/**
+ * Play audio file from url.
+ * @param {string} metadataFieldName: Metadata field name.
+ */
+AudioInspector.prototype.playAudio = function(metadataFieldName) {
+ if (this.metadata_[metadataFieldName] == undefined) { return; }
+ if (this.metadata_[metadataFieldName] == 'None') {
+ alert('The selected stream was not used during the experiment.');
+ return;
+ }
+ this.stopAudio();
+ this.audioPlayer_.src = this.metadata_[metadataFieldName];
+ console.debug('Audio source URL: "' + this.audioPlayer_.src + '"');
+ this.audioPlayer_.play();
+ console.info('Playing out audio.');
+};
+
+/**
+ * Create hidden text areas to copy URLs.
+ *
+ * For each dialog, one text area is created since it is not possible to select
+ * text on a text area outside of the active dialog.
+ */
+AudioInspector.prototype.createTextAreasForCopy_ = function() {
+ var self = this;
+ document.querySelectorAll('dialog.mdl-dialog').forEach(function(element) {
+ var textArea = document.createElement("textarea");
+ textArea.classList.add('url-copy');
+ textArea.style.position = 'fixed';
+ textArea.style.bottom = 0;
+ textArea.style.left = 0;
+ textArea.style.width = '2em';
+ textArea.style.height = '2em';
+ textArea.style.border = 'none';
+ textArea.style.outline = 'none';
+ textArea.style.boxShadow = 'none';
+ textArea.style.background = 'transparent';
+ textArea.style.fontSize = '6px';
+ element.appendChild(textArea);
+ });
+}
+
+/**
+ * Create audio inspector.
+ */
+AudioInspector.prototype.createAudioInspector_ = function() {
+ var buttonIndex = 0;
+ function getButtonHtml(icon, toolTipText, caption, metadataFieldName) {
+ var buttonId = 'audioInspectorButton' + buttonIndex++;
+ html = caption == null ? '' : caption;
+ html += '<button class="mdl-button mdl-js-button mdl-button--icon ' +
+ 'mdl-js-ripple-effect" id="' + buttonId + '">' +
+ '<i class="material-icons">' + icon + '</i>' +
+ '<div class="mdl-tooltip" data-mdl-for="' + buttonId + '">' +
+ toolTipText +
+ '</div>';
+ if (metadataFieldName != null) {
+ html += '<input type="hidden" value="' + metadataFieldName + '">'
+ }
+ html += '</button>'
+
+ return html;
+ }
+
+ // TODO(alessiob): Add timeline and highlight current track by changing icon
+ // color.
+
+ this.audioInspector_ = document.createElement('div');
+ this.audioInspector_.classList.add('audio-inspector');
+ this.audioInspector_.innerHTML =
+ '<div class="mdl-grid">' +
+ '<div class="mdl-layout-spacer"></div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'Simulated echo', 'E<sub>in</sub>',
+ 'echo_filepath') +
+ '</div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('stop', 'Stop playing [S]', null, '__stop__') +
+ '</div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'Render stream', 'R<sub>in</sub>',
+ 'render_filepath') +
+ '</div>' +
+ '<div class="mdl-layout-spacer"></div>' +
+ '</div>' +
+ '<div class="mdl-grid">' +
+ '<div class="mdl-layout-spacer"></div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'Capture stream (APM input) [1]',
+ 'Y\'<sub>in</sub>', 'capture_filepath') +
+ '</div>' +
+ '<div class="mdl-cell mdl-cell--2-col"><strong>APM</strong></div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'APM output [2]', 'Y<sub>out</sub>',
+ 'apm_output_filepath') +
+ '</div>' +
+ '<div class="mdl-layout-spacer"></div>' +
+ '</div>' +
+ '<div class="mdl-grid">' +
+ '<div class="mdl-layout-spacer"></div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'Echo-free capture stream',
+ 'Y<sub>in</sub>', 'echo_free_capture_filepath') +
+ '</div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'Clean capture stream',
+ 'Y<sub>clean</sub>', 'clean_capture_input_filepath') +
+ '</div>' +
+ '<div class="mdl-cell mdl-cell--2-col">' +
+ getButtonHtml('play_arrow', 'APM reference [3]', 'Y<sub>ref</sub>',
+ 'apm_reference_filepath') +
+ '</div>' +
+ '<div class="mdl-layout-spacer"></div>' +
+ '</div>';
+
+ // Add an invisible node as initial container for the audio inspector.
+ var parent = document.createElement('div');
+ parent.style.display = 'none';
+ this.moveInspector_(parent);
+ document.body.appendChild(parent);
+};
+
+/**
+ * Initialize event handlers.
+ */
+AudioInspector.prototype.initializeEventHandlers_ = function() {
+ var self = this;
+
+ // Score cells.
+ document.querySelectorAll('td.single-score-cell').forEach(function(element) {
+ element.onclick = function() {
+ self.selectedScoreChange(this);
+ }
+ });
+
+ // Copy anchor URLs icons.
+ if (document.queryCommandSupported('copy')) {
+ document.querySelectorAll('td.single-score-cell button').forEach(
+ function(element) {
+ element.onclick = function() {
+ // Find the text area in the dialog.
+ var textArea = element.closest('dialog').querySelector(
+ 'textarea.url-copy');
+
+ // Copy.
+ textArea.value = self.baseUrl_ + '#' + element.getAttribute(
+ 'data-anchor');
+ textArea.select();
+ try {
+ if (!document.execCommand('copy'))
+ throw 'Copy returned false';
+ self.showNotification_('Experiment URL copied.');
+ } catch (e) {
+ self.showNotification_('Cannot copy experiment URL :(');
+ console.error(e);
+ }
+ }
+ });
+ } else {
+ self.showNotification_(
+ 'The copy command is disabled. URL copy is not enabled.');
+ }
+
+ // Audio inspector buttons.
+ this.audioInspector_.querySelectorAll('button').forEach(function(element) {
+ var target = element.querySelector('input[type=hidden]');
+ if (target == null) { return; }
+ element.onclick = function() {
+ if (target.value == '__stop__') {
+ self.stopAudio();
+ } else {
+ self.playAudio(target.value);
+ }
+ };
+ });
+
+ // Dialog close handlers.
+ var dialogs = document.querySelectorAll('dialog').forEach(function(element) {
+ element.onclose = function() {
+ self.stopAudio();
+ }
+ });
+
+ // Keyboard shortcuts.
+ window.onkeyup = function(e) {
+ var key = e.keyCode ? e.keyCode : e.which;
+ switch (key) {
+ case 49: // 1.
+ self.playAudio('capture_filepath');
+ break;
+ case 50: // 2.
+ self.playAudio('apm_output_filepath');
+ break;
+ case 51: // 3.
+ self.playAudio('apm_reference_filepath');
+ break;
+ case 83: // S.
+ case 115: // s.
+ self.stopAudio();
+ break;
+ }
+ };
+
+ // Hash change.
+ window.onhashchange = function(e) {
+ self.parseWindowAnchor();
+ }
+};
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py
new file mode 100644
index 0000000000..95e801903d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py
@@ -0,0 +1,359 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Signal processing utility module.
+"""
+
+import array
+import logging
+import os
+import sys
+import enum
+
+try:
+ import numpy as np
+except ImportError:
+ logging.critical('Cannot import the third-party Python package numpy')
+ sys.exit(1)
+
+try:
+ import pydub
+ import pydub.generators
+except ImportError:
+ logging.critical('Cannot import the third-party Python package pydub')
+ sys.exit(1)
+
+try:
+ import scipy.signal
+ import scipy.fftpack
+except ImportError:
+ logging.critical('Cannot import the third-party Python package scipy')
+ sys.exit(1)
+
+from . import exceptions
+
+
+class SignalProcessingUtils(object):
+ """Collection of signal processing utilities.
+ """
+
+ @enum.unique
+ class MixPadding(enum.Enum):
+ NO_PADDING = 0
+ ZERO_PADDING = 1
+ LOOP = 2
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def LoadWav(cls, filepath, channels=1):
+ """Loads wav file.
+
+ Args:
+ filepath: path to the wav audio track file to load.
+ channels: number of channels (downmixing to mono by default).
+
+ Returns:
+ AudioSegment instance.
+ """
+ if not os.path.exists(filepath):
+ logging.error('cannot find the <%s> audio track file', filepath)
+ raise exceptions.FileNotFoundError()
+ return pydub.AudioSegment.from_file(filepath,
+ format='wav',
+ channels=channels)
+
+ @classmethod
+ def SaveWav(cls, output_filepath, signal):
+ """Saves wav file.
+
+ Args:
+ output_filepath: path to the wav audio track file to save.
+ signal: AudioSegment instance.
+ """
+ return signal.export(output_filepath, format='wav')
+
+ @classmethod
+ def CountSamples(cls, signal):
+ """Number of samples per channel.
+
+ Args:
+ signal: AudioSegment instance.
+
+ Returns:
+ An integer.
+ """
+ number_of_samples = len(signal.get_array_of_samples())
+ assert signal.channels > 0
+ assert number_of_samples % signal.channels == 0
+ return number_of_samples / signal.channels
+
+ @classmethod
+ def GenerateSilence(cls, duration=1000, sample_rate=48000):
+ """Generates silence.
+
+ This method can also be used to create a template AudioSegment instance.
+ A template can then be used with other Generate*() methods accepting an
+ AudioSegment instance as argument.
+
+ Args:
+ duration: duration in ms.
+ sample_rate: sample rate.
+
+ Returns:
+ AudioSegment instance.
+ """
+ return pydub.AudioSegment.silent(duration, sample_rate)
+
+ @classmethod
+ def GeneratePureTone(cls, template, frequency=440.0):
+ """Generates a pure tone.
+
+ The pure tone is generated with the same duration and in the same format of
+ the given template signal.
+
+ Args:
+ template: AudioSegment instance.
+ frequency: Frequency of the pure tone in Hz.
+
+ Return:
+ AudioSegment instance.
+ """
+ if frequency > template.frame_rate >> 1:
+ raise exceptions.SignalProcessingException('Invalid frequency')
+
+ generator = pydub.generators.Sine(sample_rate=template.frame_rate,
+ bit_depth=template.sample_width * 8,
+ freq=frequency)
+
+ return generator.to_audio_segment(duration=len(template), volume=0.0)
+
+ @classmethod
+ def GenerateWhiteNoise(cls, template):
+ """Generates white noise.
+
+ The white noise is generated with the same duration and in the same format
+ of the given template signal.
+
+ Args:
+ template: AudioSegment instance.
+
+ Return:
+ AudioSegment instance.
+ """
+ generator = pydub.generators.WhiteNoise(
+ sample_rate=template.frame_rate,
+ bit_depth=template.sample_width * 8)
+ return generator.to_audio_segment(duration=len(template), volume=0.0)
+
+ @classmethod
+ def AudioSegmentToRawData(cls, signal):
+ samples = signal.get_array_of_samples()
+ if samples.typecode != 'h':
+ raise exceptions.SignalProcessingException(
+ 'Unsupported samples type')
+ return np.array(signal.get_array_of_samples(), np.int16)
+
+ @classmethod
+ def Fft(cls, signal, normalize=True):
+ if signal.channels != 1:
+ raise NotImplementedError('multiple-channel FFT not implemented')
+ x = cls.AudioSegmentToRawData(signal).astype(np.float32)
+ if normalize:
+ x /= max(abs(np.max(x)), 1.0)
+ y = scipy.fftpack.fft(x)
+ return y[:len(y) / 2]
+
+ @classmethod
+ def DetectHardClipping(cls, signal, threshold=2):
+ """Detects hard clipping.
+
+ Hard clipping is simply detected by counting samples that touch either the
+ lower or upper bound too many times in a row (according to `threshold`).
+ The presence of a single sequence of samples meeting such property is enough
+ to label the signal as hard clipped.
+
+ Args:
+ signal: AudioSegment instance.
+ threshold: minimum number of samples at full-scale in a row.
+
+ Returns:
+ True if hard clipping is detect, False otherwise.
+ """
+ if signal.channels != 1:
+ raise NotImplementedError(
+ 'multiple-channel clipping not implemented')
+ if signal.sample_width != 2: # Note that signal.sample_width is in bytes.
+ raise exceptions.SignalProcessingException(
+ 'hard-clipping detection only supported for 16 bit samples')
+ samples = cls.AudioSegmentToRawData(signal)
+
+ # Detect adjacent clipped samples.
+ samples_type_info = np.iinfo(samples.dtype)
+ mask_min = samples == samples_type_info.min
+ mask_max = samples == samples_type_info.max
+
+ def HasLongSequence(vector, min_legth=threshold):
+ """Returns True if there are one or more long sequences of True flags."""
+ seq_length = 0
+ for b in vector:
+ seq_length = seq_length + 1 if b else 0
+ if seq_length >= min_legth:
+ return True
+ return False
+
+ return HasLongSequence(mask_min) or HasLongSequence(mask_max)
+
+ @classmethod
+ def ApplyImpulseResponse(cls, signal, impulse_response):
+ """Applies an impulse response to a signal.
+
+ Args:
+ signal: AudioSegment instance.
+ impulse_response: list or numpy vector of float values.
+
+ Returns:
+ AudioSegment instance.
+ """
+ # Get samples.
+ assert signal.channels == 1, (
+ 'multiple-channel recordings not supported')
+ samples = signal.get_array_of_samples()
+
+ # Convolve.
+ logging.info(
+ 'applying %d order impulse response to a signal lasting %d ms',
+ len(impulse_response), len(signal))
+ convolved_samples = scipy.signal.fftconvolve(in1=samples,
+ in2=impulse_response,
+ mode='full').astype(
+ np.int16)
+ logging.info('convolution computed')
+
+ # Cast.
+ convolved_samples = array.array(signal.array_type, convolved_samples)
+
+ # Verify.
+ logging.debug('signal length: %d samples', len(samples))
+ logging.debug('convolved signal length: %d samples',
+ len(convolved_samples))
+ assert len(convolved_samples) > len(samples)
+
+ # Generate convolved signal AudioSegment instance.
+ convolved_signal = pydub.AudioSegment(data=convolved_samples,
+ metadata={
+ 'sample_width':
+ signal.sample_width,
+ 'frame_rate':
+ signal.frame_rate,
+ 'frame_width':
+ signal.frame_width,
+ 'channels': signal.channels,
+ })
+ assert len(convolved_signal) > len(signal)
+
+ return convolved_signal
+
+ @classmethod
+ def Normalize(cls, signal):
+ """Normalizes a signal.
+
+ Args:
+ signal: AudioSegment instance.
+
+ Returns:
+ An AudioSegment instance.
+ """
+ return signal.apply_gain(-signal.max_dBFS)
+
+ @classmethod
+ def Copy(cls, signal):
+ """Makes a copy os a signal.
+
+ Args:
+ signal: AudioSegment instance.
+
+ Returns:
+ An AudioSegment instance.
+ """
+ return pydub.AudioSegment(data=signal.get_array_of_samples(),
+ metadata={
+ 'sample_width': signal.sample_width,
+ 'frame_rate': signal.frame_rate,
+ 'frame_width': signal.frame_width,
+ 'channels': signal.channels,
+ })
+
+ @classmethod
+ def MixSignals(cls,
+ signal,
+ noise,
+ target_snr=0.0,
+ pad_noise=MixPadding.NO_PADDING):
+ """Mixes `signal` and `noise` with a target SNR.
+
+ Mix `signal` and `noise` with a desired SNR by scaling `noise`.
+ If the target SNR is +/- infinite, a copy of signal/noise is returned.
+ If `signal` is shorter than `noise`, the length of the mix equals that of
+ `signal`. Otherwise, the mix length depends on whether padding is applied.
+ When padding is not applied, that is `pad_noise` is set to NO_PADDING
+ (default), the mix length equals that of `noise` - i.e., `signal` is
+ truncated. Otherwise, `noise` is extended and the resulting mix has the same
+ length of `signal`.
+
+ Args:
+ signal: AudioSegment instance (signal).
+ noise: AudioSegment instance (noise).
+ target_snr: float, numpy.Inf or -numpy.Inf (dB).
+ pad_noise: SignalProcessingUtils.MixPadding, default: NO_PADDING.
+
+ Returns:
+ An AudioSegment instance.
+ """
+ # Handle infinite target SNR.
+ if target_snr == -np.Inf:
+ # Return a copy of noise.
+ logging.warning('SNR = -Inf, returning noise')
+ return cls.Copy(noise)
+ elif target_snr == np.Inf:
+ # Return a copy of signal.
+ logging.warning('SNR = +Inf, returning signal')
+ return cls.Copy(signal)
+
+ # Check signal and noise power.
+ signal_power = float(signal.dBFS)
+ noise_power = float(noise.dBFS)
+ if signal_power == -np.Inf:
+ logging.error('signal has -Inf power, cannot mix')
+ raise exceptions.SignalProcessingException(
+ 'cannot mix a signal with -Inf power')
+ if noise_power == -np.Inf:
+ logging.error('noise has -Inf power, cannot mix')
+ raise exceptions.SignalProcessingException(
+ 'cannot mix a signal with -Inf power')
+
+ # Mix.
+ gain_db = signal_power - noise_power - target_snr
+ signal_duration = len(signal)
+ noise_duration = len(noise)
+ if signal_duration <= noise_duration:
+ # Ignore `pad_noise`, `noise` is truncated if longer that `signal`, the
+ # mix will have the same length of `signal`.
+ return signal.overlay(noise.apply_gain(gain_db))
+ elif pad_noise == cls.MixPadding.NO_PADDING:
+ # `signal` is longer than `noise`, but no padding is applied to `noise`.
+ # Truncate `signal`.
+ return noise.overlay(signal, gain_during_overlay=gain_db)
+ elif pad_noise == cls.MixPadding.ZERO_PADDING:
+ # TODO(alessiob): Check that this works as expected.
+ return signal.overlay(noise.apply_gain(gain_db))
+ elif pad_noise == cls.MixPadding.LOOP:
+ # `signal` is longer than `noise`, extend `noise` by looping.
+ return signal.overlay(noise.apply_gain(gain_db), loop=True)
+ else:
+ raise exceptions.SignalProcessingException('invalid padding type')
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py
new file mode 100644
index 0000000000..881fb66800
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the signal_processing module.
+"""
+
+import unittest
+
+import numpy as np
+import pydub
+
+from . import exceptions
+from . import signal_processing
+
+
+class TestSignalProcessing(unittest.TestCase):
+ """Unit tests for the signal_processing module.
+ """
+
+ def testMixSignals(self):
+ # Generate a template signal with which white noise can be generated.
+ silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
+
+ # Generate two distinct AudioSegment instances with 1 second of white noise.
+ signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ silence)
+ noise = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ silence)
+
+ # Extract samples.
+ signal_samples = signal.get_array_of_samples()
+ noise_samples = noise.get_array_of_samples()
+
+ # Test target SNR -Inf (noise expected).
+ mix_neg_inf = signal_processing.SignalProcessingUtils.MixSignals(
+ signal, noise, -np.Inf)
+ self.assertTrue(len(noise), len(mix_neg_inf)) # Check duration.
+ mix_neg_inf_samples = mix_neg_inf.get_array_of_samples()
+ self.assertTrue( # Check samples.
+ all([x == y for x, y in zip(noise_samples, mix_neg_inf_samples)]))
+
+ # Test target SNR 0.0 (different data expected).
+ mix_0 = signal_processing.SignalProcessingUtils.MixSignals(
+ signal, noise, 0.0)
+ self.assertTrue(len(signal), len(mix_0)) # Check duration.
+ self.assertTrue(len(noise), len(mix_0))
+ mix_0_samples = mix_0.get_array_of_samples()
+ self.assertTrue(
+ any([x != y for x, y in zip(signal_samples, mix_0_samples)]))
+ self.assertTrue(
+ any([x != y for x, y in zip(noise_samples, mix_0_samples)]))
+
+ # Test target SNR +Inf (signal expected).
+ mix_pos_inf = signal_processing.SignalProcessingUtils.MixSignals(
+ signal, noise, np.Inf)
+ self.assertTrue(len(signal), len(mix_pos_inf)) # Check duration.
+ mix_pos_inf_samples = mix_pos_inf.get_array_of_samples()
+ self.assertTrue( # Check samples.
+ all([x == y for x, y in zip(signal_samples, mix_pos_inf_samples)]))
+
+ def testMixSignalsMinInfPower(self):
+ silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
+ signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ silence)
+
+ with self.assertRaises(exceptions.SignalProcessingException):
+ _ = signal_processing.SignalProcessingUtils.MixSignals(
+ signal, silence, 0.0)
+
+ with self.assertRaises(exceptions.SignalProcessingException):
+ _ = signal_processing.SignalProcessingUtils.MixSignals(
+ silence, signal, 0.0)
+
+ def testMixSignalNoiseDifferentLengths(self):
+ # Test signals.
+ shorter = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ pydub.AudioSegment.silent(duration=1000, frame_rate=8000))
+ longer = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ pydub.AudioSegment.silent(duration=2000, frame_rate=8000))
+
+ # When the signal is shorter than the noise, the mix length always equals
+ # that of the signal regardless of whether padding is applied.
+ # No noise padding, length of signal less than that of noise.
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=shorter,
+ noise=longer,
+ pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
+ NO_PADDING)
+ self.assertEqual(len(shorter), len(mix))
+ # With noise padding, length of signal less than that of noise.
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=shorter,
+ noise=longer,
+ pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
+ ZERO_PADDING)
+ self.assertEqual(len(shorter), len(mix))
+
+ # When the signal is longer than the noise, the mix length depends on
+ # whether padding is applied.
+ # No noise padding, length of signal greater than that of noise.
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=longer,
+ noise=shorter,
+ pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
+ NO_PADDING)
+ self.assertEqual(len(shorter), len(mix))
+ # With noise padding, length of signal greater than that of noise.
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=longer,
+ noise=shorter,
+ pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
+ ZERO_PADDING)
+ self.assertEqual(len(longer), len(mix))
+
+ def testMixSignalNoisePaddingTypes(self):
+ # Test signals.
+ shorter = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ pydub.AudioSegment.silent(duration=1000, frame_rate=8000))
+ longer = signal_processing.SignalProcessingUtils.GeneratePureTone(
+ pydub.AudioSegment.silent(duration=2000, frame_rate=8000), 440.0)
+
+ # Zero padding: expect pure tone only in 1-2s.
+ mix_zero_pad = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=longer,
+ noise=shorter,
+ target_snr=-6,
+ pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
+ ZERO_PADDING)
+
+ # Loop: expect pure tone plus noise in 1-2s.
+ mix_loop = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=longer,
+ noise=shorter,
+ target_snr=-6,
+ pad_noise=signal_processing.SignalProcessingUtils.MixPadding.LOOP)
+
+ def Energy(signal):
+ samples = signal_processing.SignalProcessingUtils.AudioSegmentToRawData(
+ signal).astype(np.float32)
+ return np.sum(samples * samples)
+
+ e_mix_zero_pad = Energy(mix_zero_pad[-1000:])
+ e_mix_loop = Energy(mix_loop[-1000:])
+ self.assertLess(0, e_mix_zero_pad)
+ self.assertLess(e_mix_zero_pad, e_mix_loop)
+
+ def testMixSignalSnr(self):
+ # Test signals.
+ tone_low = signal_processing.SignalProcessingUtils.GeneratePureTone(
+ pydub.AudioSegment.silent(duration=64, frame_rate=8000), 250.0)
+ tone_high = signal_processing.SignalProcessingUtils.GeneratePureTone(
+ pydub.AudioSegment.silent(duration=64, frame_rate=8000), 3000.0)
+
+ def ToneAmplitudes(mix):
+ """Returns the amplitude of the coefficients #16 and #192, which
+ correspond to the tones at 250 and 3k Hz respectively."""
+ mix_fft = np.absolute(
+ signal_processing.SignalProcessingUtils.Fft(mix))
+ return mix_fft[16], mix_fft[192]
+
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=tone_low, noise=tone_high, target_snr=-6)
+ ampl_low, ampl_high = ToneAmplitudes(mix)
+ self.assertLess(ampl_low, ampl_high)
+
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=tone_high, noise=tone_low, target_snr=-6)
+ ampl_low, ampl_high = ToneAmplitudes(mix)
+ self.assertLess(ampl_high, ampl_low)
+
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=tone_low, noise=tone_high, target_snr=6)
+ ampl_low, ampl_high = ToneAmplitudes(mix)
+ self.assertLess(ampl_high, ampl_low)
+
+ mix = signal_processing.SignalProcessingUtils.MixSignals(
+ signal=tone_high, noise=tone_low, target_snr=6)
+ ampl_low, ampl_high = ToneAmplitudes(mix)
+ self.assertLess(ampl_low, ampl_high)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py
new file mode 100644
index 0000000000..69b3a1624e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py
@@ -0,0 +1,446 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""APM module simulator.
+"""
+
+import logging
+import os
+
+from . import annotations
+from . import data_access
+from . import echo_path_simulation
+from . import echo_path_simulation_factory
+from . import eval_scores
+from . import exceptions
+from . import input_mixer
+from . import input_signal_creator
+from . import signal_processing
+from . import test_data_generation
+
+
+class ApmModuleSimulator(object):
+ """Audio processing module (APM) simulator class.
+ """
+
+ _TEST_DATA_GENERATOR_CLASSES = (
+ test_data_generation.TestDataGenerator.REGISTERED_CLASSES)
+ _EVAL_SCORE_WORKER_CLASSES = eval_scores.EvaluationScore.REGISTERED_CLASSES
+
+ _PREFIX_APM_CONFIG = 'apmcfg-'
+ _PREFIX_CAPTURE = 'capture-'
+ _PREFIX_RENDER = 'render-'
+ _PREFIX_ECHO_SIMULATOR = 'echosim-'
+ _PREFIX_TEST_DATA_GEN = 'datagen-'
+ _PREFIX_TEST_DATA_GEN_PARAMS = 'datagen_params-'
+ _PREFIX_SCORE = 'score-'
+
+ def __init__(self,
+ test_data_generator_factory,
+ evaluation_score_factory,
+ ap_wrapper,
+ evaluator,
+ external_vads=None):
+ if external_vads is None:
+ external_vads = {}
+ self._test_data_generator_factory = test_data_generator_factory
+ self._evaluation_score_factory = evaluation_score_factory
+ self._audioproc_wrapper = ap_wrapper
+ self._evaluator = evaluator
+ self._annotator = annotations.AudioAnnotationsExtractor(
+ annotations.AudioAnnotationsExtractor.VadType.ENERGY_THRESHOLD
+ | annotations.AudioAnnotationsExtractor.VadType.WEBRTC_COMMON_AUDIO
+ | annotations.AudioAnnotationsExtractor.VadType.WEBRTC_APM,
+ external_vads)
+
+ # Init.
+ self._test_data_generator_factory.SetOutputDirectoryPrefix(
+ self._PREFIX_TEST_DATA_GEN_PARAMS)
+ self._evaluation_score_factory.SetScoreFilenamePrefix(
+ self._PREFIX_SCORE)
+
+ # Properties for each run.
+ self._base_output_path = None
+ self._output_cache_path = None
+ self._test_data_generators = None
+ self._evaluation_score_workers = None
+ self._config_filepaths = None
+ self._capture_input_filepaths = None
+ self._render_input_filepaths = None
+ self._echo_path_simulator_class = None
+
+ @classmethod
+ def GetPrefixApmConfig(cls):
+ return cls._PREFIX_APM_CONFIG
+
+ @classmethod
+ def GetPrefixCapture(cls):
+ return cls._PREFIX_CAPTURE
+
+ @classmethod
+ def GetPrefixRender(cls):
+ return cls._PREFIX_RENDER
+
+ @classmethod
+ def GetPrefixEchoSimulator(cls):
+ return cls._PREFIX_ECHO_SIMULATOR
+
+ @classmethod
+ def GetPrefixTestDataGenerator(cls):
+ return cls._PREFIX_TEST_DATA_GEN
+
+ @classmethod
+ def GetPrefixTestDataGeneratorParameters(cls):
+ return cls._PREFIX_TEST_DATA_GEN_PARAMS
+
+ @classmethod
+ def GetPrefixScore(cls):
+ return cls._PREFIX_SCORE
+
+ def Run(self,
+ config_filepaths,
+ capture_input_filepaths,
+ test_data_generator_names,
+ eval_score_names,
+ output_dir,
+ render_input_filepaths=None,
+ echo_path_simulator_name=(
+ echo_path_simulation.NoEchoPathSimulator.NAME)):
+ """Runs the APM simulation.
+
+ Initializes paths and required instances, then runs all the simulations.
+ The render input can be optionally added. If added, the number of capture
+ input audio tracks and the number of render input audio tracks have to be
+ equal. The two lists are used to form pairs of capture and render input.
+
+ Args:
+ config_filepaths: set of APM configuration files to test.
+ capture_input_filepaths: set of capture input audio track files to test.
+ test_data_generator_names: set of test data generator names to test.
+ eval_score_names: set of evaluation score names to test.
+ output_dir: base path to the output directory for wav files and outcomes.
+ render_input_filepaths: set of render input audio track files to test.
+ echo_path_simulator_name: name of the echo path simulator to use when
+ render input is provided.
+ """
+ assert render_input_filepaths is None or (
+ len(capture_input_filepaths) == len(render_input_filepaths)), (
+ 'render input set size not matching input set size')
+ assert render_input_filepaths is None or echo_path_simulator_name in (
+ echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES), (
+ 'invalid echo path simulator')
+ self._base_output_path = os.path.abspath(output_dir)
+
+ # Output path used to cache the data shared across simulations.
+ self._output_cache_path = os.path.join(self._base_output_path,
+ '_cache')
+
+ # Instance test data generators.
+ self._test_data_generators = [
+ self._test_data_generator_factory.GetInstance(
+ test_data_generators_class=(
+ self._TEST_DATA_GENERATOR_CLASSES[name]))
+ for name in (test_data_generator_names)
+ ]
+
+ # Instance evaluation score workers.
+ self._evaluation_score_workers = [
+ self._evaluation_score_factory.GetInstance(
+ evaluation_score_class=self._EVAL_SCORE_WORKER_CLASSES[name])
+ for (name) in eval_score_names
+ ]
+
+ # Set APM configuration file paths.
+ self._config_filepaths = self._CreatePathsCollection(config_filepaths)
+
+ # Set probing signal file paths.
+ if render_input_filepaths is None:
+ # Capture input only.
+ self._capture_input_filepaths = self._CreatePathsCollection(
+ capture_input_filepaths)
+ self._render_input_filepaths = None
+ else:
+ # Set both capture and render input signals.
+ self._SetTestInputSignalFilePaths(capture_input_filepaths,
+ render_input_filepaths)
+
+ # Set the echo path simulator class.
+ self._echo_path_simulator_class = (
+ echo_path_simulation.EchoPathSimulator.
+ REGISTERED_CLASSES[echo_path_simulator_name])
+
+ self._SimulateAll()
+
+ def _SimulateAll(self):
+ """Runs all the simulations.
+
+ Iterates over the combinations of APM configurations, probing signals, and
+ test data generators. This method is mainly responsible for the creation of
+ the cache and output directories required in order to call _Simulate().
+ """
+ without_render_input = self._render_input_filepaths is None
+
+ # Try different APM config files.
+ for config_name in self._config_filepaths:
+ config_filepath = self._config_filepaths[config_name]
+
+ # Try different capture-render pairs.
+ for capture_input_name in self._capture_input_filepaths:
+ # Output path for the capture signal annotations.
+ capture_annotations_cache_path = os.path.join(
+ self._output_cache_path,
+ self._PREFIX_CAPTURE + capture_input_name)
+ data_access.MakeDirectory(capture_annotations_cache_path)
+
+ # Capture.
+ capture_input_filepath = self._capture_input_filepaths[
+ capture_input_name]
+ if not os.path.exists(capture_input_filepath):
+ # If the input signal file does not exist, try to create using the
+ # available input signal creators.
+ self._CreateInputSignal(capture_input_filepath)
+ assert os.path.exists(capture_input_filepath)
+ self._ExtractCaptureAnnotations(
+ capture_input_filepath, capture_annotations_cache_path)
+
+ # Render and simulated echo path (optional).
+ render_input_filepath = None if without_render_input else (
+ self._render_input_filepaths[capture_input_name])
+ render_input_name = '(none)' if without_render_input else (
+ self._ExtractFileName(render_input_filepath))
+ echo_path_simulator = (echo_path_simulation_factory.
+ EchoPathSimulatorFactory.GetInstance(
+ self._echo_path_simulator_class,
+ render_input_filepath))
+
+ # Try different test data generators.
+ for test_data_generators in self._test_data_generators:
+ logging.info(
+ 'APM config preset: <%s>, capture: <%s>, render: <%s>,'
+ 'test data generator: <%s>, echo simulator: <%s>',
+ config_name, capture_input_name, render_input_name,
+ test_data_generators.NAME, echo_path_simulator.NAME)
+
+ # Output path for the generated test data.
+ test_data_cache_path = os.path.join(
+ capture_annotations_cache_path,
+ self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME)
+ data_access.MakeDirectory(test_data_cache_path)
+ logging.debug('test data cache path: <%s>',
+ test_data_cache_path)
+
+ # Output path for the echo simulator and APM input mixer output.
+ echo_test_data_cache_path = os.path.join(
+ test_data_cache_path,
+ 'echosim-{}'.format(echo_path_simulator.NAME))
+ data_access.MakeDirectory(echo_test_data_cache_path)
+ logging.debug('echo test data cache path: <%s>',
+ echo_test_data_cache_path)
+
+ # Full output path.
+ output_path = os.path.join(
+ self._base_output_path,
+ self._PREFIX_APM_CONFIG + config_name,
+ self._PREFIX_CAPTURE + capture_input_name,
+ self._PREFIX_RENDER + render_input_name,
+ self._PREFIX_ECHO_SIMULATOR + echo_path_simulator.NAME,
+ self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME)
+ data_access.MakeDirectory(output_path)
+ logging.debug('output path: <%s>', output_path)
+
+ self._Simulate(test_data_generators,
+ capture_input_filepath,
+ render_input_filepath, test_data_cache_path,
+ echo_test_data_cache_path, output_path,
+ config_filepath, echo_path_simulator)
+
+ @staticmethod
+ def _CreateInputSignal(input_signal_filepath):
+ """Creates a missing input signal file.
+
+ The file name is parsed to extract input signal creator and params. If a
+ creator is matched and the parameters are valid, a new signal is generated
+ and written in `input_signal_filepath`.
+
+ Args:
+ input_signal_filepath: Path to the input signal audio file to write.
+
+ Raises:
+ InputSignalCreatorException
+ """
+ filename = os.path.splitext(
+ os.path.split(input_signal_filepath)[-1])[0]
+ filename_parts = filename.split('-')
+
+ if len(filename_parts) < 2:
+ raise exceptions.InputSignalCreatorException(
+ 'Cannot parse input signal file name')
+
+ signal, metadata = input_signal_creator.InputSignalCreator.Create(
+ filename_parts[0], filename_parts[1].split('_'))
+
+ signal_processing.SignalProcessingUtils.SaveWav(
+ input_signal_filepath, signal)
+ data_access.Metadata.SaveFileMetadata(input_signal_filepath, metadata)
+
+ def _ExtractCaptureAnnotations(self,
+ input_filepath,
+ output_path,
+ annotation_name=""):
+ self._annotator.Extract(input_filepath)
+ self._annotator.Save(output_path, annotation_name)
+
+ def _Simulate(self, test_data_generators, clean_capture_input_filepath,
+ render_input_filepath, test_data_cache_path,
+ echo_test_data_cache_path, output_path, config_filepath,
+ echo_path_simulator):
+ """Runs a single set of simulation.
+
+ Simulates a given combination of APM configuration, probing signal, and
+ test data generator. It iterates over the test data generator
+ internal configurations.
+
+ Args:
+ test_data_generators: TestDataGenerator instance.
+ clean_capture_input_filepath: capture input audio track file to be
+ processed by a test data generator and
+ not affected by echo.
+ render_input_filepath: render input audio track file to test.
+ test_data_cache_path: path for the generated test audio track files.
+ echo_test_data_cache_path: path for the echo simulator.
+ output_path: base output path for the test data generator.
+ config_filepath: APM configuration file to test.
+ echo_path_simulator: EchoPathSimulator instance.
+ """
+ # Generate pairs of noisy input and reference signal files.
+ test_data_generators.Generate(
+ input_signal_filepath=clean_capture_input_filepath,
+ test_data_cache_path=test_data_cache_path,
+ base_output_path=output_path)
+
+ # Extract metadata linked to the clean input file (if any).
+ apm_input_metadata = None
+ try:
+ apm_input_metadata = data_access.Metadata.LoadFileMetadata(
+ clean_capture_input_filepath)
+ except IOError as e:
+ apm_input_metadata = {}
+ apm_input_metadata['test_data_gen_name'] = test_data_generators.NAME
+ apm_input_metadata['test_data_gen_config'] = None
+
+ # For each test data pair, simulate a call and evaluate.
+ for config_name in test_data_generators.config_names:
+ logging.info(' - test data generator config: <%s>', config_name)
+ apm_input_metadata['test_data_gen_config'] = config_name
+
+ # Paths to the test data generator output.
+ # Note that the reference signal does not depend on the render input
+ # which is optional.
+ noisy_capture_input_filepath = (
+ test_data_generators.noisy_signal_filepaths[config_name])
+ reference_signal_filepath = (
+ test_data_generators.reference_signal_filepaths[config_name])
+
+ # Output path for the evaluation (e.g., APM output file).
+ evaluation_output_path = test_data_generators.apm_output_paths[
+ config_name]
+
+ # Paths to the APM input signals.
+ echo_path_filepath = echo_path_simulator.Simulate(
+ echo_test_data_cache_path)
+ apm_input_filepath = input_mixer.ApmInputMixer.Mix(
+ echo_test_data_cache_path, noisy_capture_input_filepath,
+ echo_path_filepath)
+
+ # Extract annotations for the APM input mix.
+ apm_input_basepath, apm_input_filename = os.path.split(
+ apm_input_filepath)
+ self._ExtractCaptureAnnotations(
+ apm_input_filepath, apm_input_basepath,
+ os.path.splitext(apm_input_filename)[0] + '-')
+
+ # Simulate a call using APM.
+ self._audioproc_wrapper.Run(
+ config_filepath=config_filepath,
+ capture_input_filepath=apm_input_filepath,
+ render_input_filepath=render_input_filepath,
+ output_path=evaluation_output_path)
+
+ try:
+ # Evaluate.
+ self._evaluator.Run(
+ evaluation_score_workers=self._evaluation_score_workers,
+ apm_input_metadata=apm_input_metadata,
+ apm_output_filepath=self._audioproc_wrapper.
+ output_filepath,
+ reference_input_filepath=reference_signal_filepath,
+ render_input_filepath=render_input_filepath,
+ output_path=evaluation_output_path,
+ )
+
+ # Save simulation metadata.
+ data_access.Metadata.SaveAudioTestDataPaths(
+ output_path=evaluation_output_path,
+ clean_capture_input_filepath=clean_capture_input_filepath,
+ echo_free_capture_filepath=noisy_capture_input_filepath,
+ echo_filepath=echo_path_filepath,
+ render_filepath=render_input_filepath,
+ capture_filepath=apm_input_filepath,
+ apm_output_filepath=self._audioproc_wrapper.
+ output_filepath,
+ apm_reference_filepath=reference_signal_filepath,
+ apm_config_filepath=config_filepath,
+ )
+ except exceptions.EvaluationScoreException as e:
+ logging.warning('the evaluation failed: %s', e.message)
+ continue
+
+ def _SetTestInputSignalFilePaths(self, capture_input_filepaths,
+ render_input_filepaths):
+ """Sets input and render input file paths collections.
+
+ Pairs the input and render input files by storing the file paths into two
+ collections. The key is the file name of the input file.
+
+ Args:
+ capture_input_filepaths: list of file paths.
+ render_input_filepaths: list of file paths.
+ """
+ self._capture_input_filepaths = {}
+ self._render_input_filepaths = {}
+ assert len(capture_input_filepaths) == len(render_input_filepaths)
+ for capture_input_filepath, render_input_filepath in zip(
+ capture_input_filepaths, render_input_filepaths):
+ name = self._ExtractFileName(capture_input_filepath)
+ self._capture_input_filepaths[name] = os.path.abspath(
+ capture_input_filepath)
+ self._render_input_filepaths[name] = os.path.abspath(
+ render_input_filepath)
+
+ @classmethod
+ def _CreatePathsCollection(cls, filepaths):
+ """Creates a collection of file paths.
+
+ Given a list of file paths, makes a collection with one item for each file
+ path. The value is absolute path, the key is the file name without
+ extenstion.
+
+ Args:
+ filepaths: list of file paths.
+
+ Returns:
+ A dict.
+ """
+ filepaths_collection = {}
+ for filepath in filepaths:
+ name = cls._ExtractFileName(filepath)
+ filepaths_collection[name] = os.path.abspath(filepath)
+ return filepaths_collection
+
+ @classmethod
+ def _ExtractFileName(cls, filepath):
+ return os.path.splitext(os.path.split(filepath)[-1])[0]
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py
new file mode 100644
index 0000000000..78ca17f589
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the simulation module.
+"""
+
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+import mock
+import pydub
+
+from . import audioproc_wrapper
+from . import eval_scores_factory
+from . import evaluation
+from . import external_vad
+from . import signal_processing
+from . import simulation
+from . import test_data_generation_factory
+
+
+class TestApmModuleSimulator(unittest.TestCase):
+ """Unit tests for the ApmModuleSimulator class.
+ """
+
+ def setUp(self):
+ """Create temporary folders and fake audio track."""
+ self._output_path = tempfile.mkdtemp()
+ self._tmp_path = tempfile.mkdtemp()
+
+ silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
+ fake_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ silence)
+ self._fake_audio_track_path = os.path.join(self._output_path,
+ 'fake.wav')
+ signal_processing.SignalProcessingUtils.SaveWav(
+ self._fake_audio_track_path, fake_signal)
+
+ def tearDown(self):
+ """Recursively delete temporary folders."""
+ shutil.rmtree(self._output_path)
+ shutil.rmtree(self._tmp_path)
+
+ def testSimulation(self):
+ # Instance dependencies to mock and inject.
+ ap_wrapper = audioproc_wrapper.AudioProcWrapper(
+ audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH)
+ evaluator = evaluation.ApmModuleEvaluator()
+ ap_wrapper.Run = mock.MagicMock(name='Run')
+ evaluator.Run = mock.MagicMock(name='Run')
+
+ # Instance non-mocked dependencies.
+ test_data_generator_factory = (
+ test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path='',
+ noise_tracks_path='',
+ copy_with_identity=False))
+ evaluation_score_factory = eval_scores_factory.EvaluationScoreWorkerFactory(
+ polqa_tool_bin_path=os.path.join(os.path.dirname(__file__),
+ 'fake_polqa'),
+ echo_metric_tool_bin_path=None)
+
+ # Instance simulator.
+ simulator = simulation.ApmModuleSimulator(
+ test_data_generator_factory=test_data_generator_factory,
+ evaluation_score_factory=evaluation_score_factory,
+ ap_wrapper=ap_wrapper,
+ evaluator=evaluator,
+ external_vads={
+ 'fake':
+ external_vad.ExternalVad(
+ os.path.join(os.path.dirname(__file__),
+ 'fake_external_vad.py'), 'fake')
+ })
+
+ # What to simulate.
+ config_files = ['apm_configs/default.json']
+ input_files = [self._fake_audio_track_path]
+ test_data_generators = ['identity', 'white_noise']
+ eval_scores = ['audio_level_mean', 'polqa']
+
+ # Run all simulations.
+ simulator.Run(config_filepaths=config_files,
+ capture_input_filepaths=input_files,
+ test_data_generator_names=test_data_generators,
+ eval_score_names=eval_scores,
+ output_dir=self._output_path)
+
+ # Check.
+ # TODO(alessiob): Once the TestDataGenerator classes can be configured by
+ # the client code (e.g., number of SNR pairs for the white noise test data
+ # generator), the exact number of calls to ap_wrapper.Run and evaluator.Run
+ # is known; use that with assertEqual.
+ min_number_of_simulations = len(config_files) * len(input_files) * len(
+ test_data_generators)
+ self.assertGreaterEqual(len(ap_wrapper.Run.call_args_list),
+ min_number_of_simulations)
+ self.assertGreaterEqual(len(evaluator.Run.call_args_list),
+ min_number_of_simulations)
+
+ def testInputSignalCreation(self):
+ # Instance simulator.
+ simulator = simulation.ApmModuleSimulator(
+ test_data_generator_factory=(
+ test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path='',
+ noise_tracks_path='',
+ copy_with_identity=False)),
+ evaluation_score_factory=(
+ eval_scores_factory.EvaluationScoreWorkerFactory(
+ polqa_tool_bin_path=os.path.join(os.path.dirname(__file__),
+ 'fake_polqa'),
+ echo_metric_tool_bin_path=None)),
+ ap_wrapper=audioproc_wrapper.AudioProcWrapper(
+ audioproc_wrapper.AudioProcWrapper.
+ DEFAULT_APM_SIMULATOR_BIN_PATH),
+ evaluator=evaluation.ApmModuleEvaluator())
+
+ # Inexistent input files to be silently created.
+ input_files = [
+ os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'),
+ os.path.join(self._tmp_path, 'pure_tone-1000_500.wav'),
+ ]
+ self.assertFalse(
+ any([os.path.exists(input_file) for input_file in (input_files)]))
+
+ # The input files are created during the simulation.
+ simulator.Run(config_filepaths=['apm_configs/default.json'],
+ capture_input_filepaths=input_files,
+ test_data_generator_names=['identity'],
+ eval_score_names=['audio_level_peak'],
+ output_dir=self._output_path)
+ self.assertTrue(
+ all([os.path.exists(input_file) for input_file in (input_files)]))
+
+ def testPureToneGenerationWithTotalHarmonicDistorsion(self):
+ logging.warning = mock.MagicMock(name='warning')
+
+ # Instance simulator.
+ simulator = simulation.ApmModuleSimulator(
+ test_data_generator_factory=(
+ test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path='',
+ noise_tracks_path='',
+ copy_with_identity=False)),
+ evaluation_score_factory=(
+ eval_scores_factory.EvaluationScoreWorkerFactory(
+ polqa_tool_bin_path=os.path.join(os.path.dirname(__file__),
+ 'fake_polqa'),
+ echo_metric_tool_bin_path=None)),
+ ap_wrapper=audioproc_wrapper.AudioProcWrapper(
+ audioproc_wrapper.AudioProcWrapper.
+ DEFAULT_APM_SIMULATOR_BIN_PATH),
+ evaluator=evaluation.ApmModuleEvaluator())
+
+ # What to simulate.
+ config_files = ['apm_configs/default.json']
+ input_files = [os.path.join(self._tmp_path, 'pure_tone-440_1000.wav')]
+ eval_scores = ['thd']
+
+ # Should work.
+ simulator.Run(config_filepaths=config_files,
+ capture_input_filepaths=input_files,
+ test_data_generator_names=['identity'],
+ eval_score_names=eval_scores,
+ output_dir=self._output_path)
+ self.assertFalse(logging.warning.called)
+
+ # Warning expected.
+ simulator.Run(
+ config_filepaths=config_files,
+ capture_input_filepaths=input_files,
+ test_data_generator_names=['white_noise'], # Not allowed with THD.
+ eval_score_names=eval_scores,
+ output_dir=self._output_path)
+ logging.warning.assert_called_with('the evaluation failed: %s', (
+ 'The THD score cannot be used with any test data generator other than '
+ '"identity"'))
+
+ # # Init.
+ # generator = test_data_generation.IdentityTestDataGenerator('tmp')
+ # input_signal_filepath = os.path.join(
+ # self._test_data_cache_path, 'pure_tone-440_1000.wav')
+
+ # # Check that the input signal is generated.
+ # self.assertFalse(os.path.exists(input_signal_filepath))
+ # generator.Generate(
+ # input_signal_filepath=input_signal_filepath,
+ # test_data_cache_path=self._test_data_cache_path,
+ # base_output_path=self._base_output_path)
+ # self.assertTrue(os.path.exists(input_signal_filepath))
+
+ # # Check input signal properties.
+ # input_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ # input_signal_filepath)
+ # self.assertEqual(1000, len(input_signal))
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/sound_level.cc b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/sound_level.cc
new file mode 100644
index 0000000000..1f24d9d370
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/sound_level.cc
@@ -0,0 +1,127 @@
+// Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <fstream>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/wav_file.h"
+#include "rtc_base/logging.h"
+
+ABSL_FLAG(std::string, i, "", "Input wav file");
+ABSL_FLAG(std::string, oc, "", "Config output file");
+ABSL_FLAG(std::string, ol, "", "Levels output file");
+ABSL_FLAG(float, a, 5.f, "Attack (ms)");
+ABSL_FLAG(float, d, 20.f, "Decay (ms)");
+ABSL_FLAG(int, f, 10, "Frame length (ms)");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr int kMaxSampleRate = 48000;
+constexpr uint8_t kMaxFrameLenMs = 30;
+constexpr size_t kMaxFrameLen = kMaxFrameLenMs * kMaxSampleRate / 1000;
+
+const double kOneDbReduction = DbToRatio(-1.0);
+
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+ // Check parameters.
+ if (absl::GetFlag(FLAGS_f) < 1 || absl::GetFlag(FLAGS_f) > kMaxFrameLenMs) {
+ RTC_LOG(LS_ERROR) << "Invalid frame length (min: 1, max: " << kMaxFrameLenMs
+ << ")";
+ return 1;
+ }
+ if (absl::GetFlag(FLAGS_a) < 0 || absl::GetFlag(FLAGS_d) < 0) {
+ RTC_LOG(LS_ERROR) << "Attack and decay must be non-negative";
+ return 1;
+ }
+
+ // Open wav input file and check properties.
+ const std::string input_file = absl::GetFlag(FLAGS_i);
+ const std::string config_output_file = absl::GetFlag(FLAGS_oc);
+ const std::string levels_output_file = absl::GetFlag(FLAGS_ol);
+ WavReader wav_reader(input_file);
+ if (wav_reader.num_channels() != 1) {
+ RTC_LOG(LS_ERROR) << "Only mono wav files supported";
+ return 1;
+ }
+ if (wav_reader.sample_rate() > kMaxSampleRate) {
+ RTC_LOG(LS_ERROR) << "Beyond maximum sample rate (" << kMaxSampleRate
+ << ")";
+ return 1;
+ }
+
+ // Map from milliseconds to samples.
+ const size_t audio_frame_length = rtc::CheckedDivExact(
+ absl::GetFlag(FLAGS_f) * wav_reader.sample_rate(), 1000);
+ auto time_const = [](double c) {
+ return std::pow(kOneDbReduction, absl::GetFlag(FLAGS_f) / c);
+ };
+ const float attack =
+ absl::GetFlag(FLAGS_a) == 0.0 ? 0.0 : time_const(absl::GetFlag(FLAGS_a));
+ const float decay =
+ absl::GetFlag(FLAGS_d) == 0.0 ? 0.0 : time_const(absl::GetFlag(FLAGS_d));
+
+ // Write config to file.
+ std::ofstream out_config(config_output_file);
+ out_config << "{"
+ "'frame_len_ms': "
+ << absl::GetFlag(FLAGS_f)
+ << ", "
+ "'attack_ms': "
+ << absl::GetFlag(FLAGS_a)
+ << ", "
+ "'decay_ms': "
+ << absl::GetFlag(FLAGS_d) << "}\n";
+ out_config.close();
+
+ // Measure level frame-by-frame.
+ std::ofstream out_levels(levels_output_file, std::ofstream::binary);
+ std::array<int16_t, kMaxFrameLen> samples;
+ float level_prev = 0.f;
+ while (true) {
+ // Process frame.
+ const auto read_samples =
+ wav_reader.ReadSamples(audio_frame_length, samples.data());
+ if (read_samples < audio_frame_length)
+ break; // EOF.
+
+ // Frame peak level.
+ std::transform(samples.begin(), samples.begin() + audio_frame_length,
+ samples.begin(), [](int16_t s) { return std::abs(s); });
+ const int16_t peak_level = *std::max_element(
+ samples.cbegin(), samples.cbegin() + audio_frame_length);
+ const float level_curr = static_cast<float>(peak_level) / 32768.f;
+
+ // Temporal smoothing.
+ auto smooth = [&level_prev, &level_curr](float c) {
+ return (1.0 - c) * level_curr + c * level_prev;
+ };
+ level_prev = smooth(level_curr > level_prev ? attack : decay);
+
+ // Write output.
+ out_levels.write(reinterpret_cast<const char*>(&level_prev), sizeof(float));
+ }
+ out_levels.close();
+
+ return 0;
+}
+
+} // namespace
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::main(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py
new file mode 100644
index 0000000000..7e86faccec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py
@@ -0,0 +1,526 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Test data generators producing signals pairs intended to be used to
+test the APM module. Each pair consists of a noisy input and a reference signal.
+The former is used as APM input and it is generated by adding noise to a
+clean audio track. The reference is the expected APM output.
+
+Throughout this file, the following naming convention is used:
+ - input signal: the clean signal (e.g., speech),
+ - noise signal: the noise to be summed up to the input signal (e.g., white
+ noise, Gaussian noise),
+ - noisy signal: input + noise.
+The noise signal may or may not be a function of the clean signal. For
+instance, white noise is independently generated, whereas reverberation is
+obtained by convolving the input signal with an impulse response.
+"""
+
+import logging
+import os
+import shutil
+import sys
+
+try:
+ import scipy.io
+except ImportError:
+ logging.critical('Cannot import the third-party Python package scipy')
+ sys.exit(1)
+
+from . import data_access
+from . import exceptions
+from . import signal_processing
+
+
+class TestDataGenerator(object):
+ """Abstract class responsible for the generation of noisy signals.
+
+ Given a clean signal, it generates two streams named noisy signal and
+ reference. The former is the clean signal deteriorated by the noise source,
+ the latter goes through the same deterioration process, but more "gently".
+ Noisy signal and reference are produced so that the reference is the signal
+ expected at the output of the APM module when the latter is fed with the noisy
+ signal.
+
+ An test data generator generates one or more pairs.
+ """
+
+ NAME = None
+ REGISTERED_CLASSES = {}
+
+ def __init__(self, output_directory_prefix):
+ self._output_directory_prefix = output_directory_prefix
+ # Init dictionaries with one entry for each test data generator
+ # configuration (e.g., different SNRs).
+ # Noisy audio track files (stored separately in a cache folder).
+ self._noisy_signal_filepaths = None
+ # Path to be used for the APM simulation output files.
+ self._apm_output_paths = None
+ # Reference audio track files (stored separately in a cache folder).
+ self._reference_signal_filepaths = None
+ self.Clear()
+
+ @classmethod
+ def RegisterClass(cls, class_to_register):
+ """Registers a TestDataGenerator implementation.
+
+ Decorator to automatically register the classes that extend
+ TestDataGenerator.
+ Example usage:
+
+ @TestDataGenerator.RegisterClass
+ class IdentityGenerator(TestDataGenerator):
+ pass
+ """
+ cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register
+ return class_to_register
+
+ @property
+ def config_names(self):
+ return self._noisy_signal_filepaths.keys()
+
+ @property
+ def noisy_signal_filepaths(self):
+ return self._noisy_signal_filepaths
+
+ @property
+ def apm_output_paths(self):
+ return self._apm_output_paths
+
+ @property
+ def reference_signal_filepaths(self):
+ return self._reference_signal_filepaths
+
+ def Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ """Generates a set of noisy input and reference audiotrack file pairs.
+
+ This method initializes an empty set of pairs and calls the _Generate()
+ method implemented in a concrete class.
+
+ Args:
+ input_signal_filepath: path to the clean input audio track file.
+ test_data_cache_path: path to the cache of the generated audio track
+ files.
+ base_output_path: base path where output is written.
+ """
+ self.Clear()
+ self._Generate(input_signal_filepath, test_data_cache_path,
+ base_output_path)
+
+ def Clear(self):
+ """Clears the generated output path dictionaries.
+ """
+ self._noisy_signal_filepaths = {}
+ self._apm_output_paths = {}
+ self._reference_signal_filepaths = {}
+
+ def _Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ """Abstract method to be implemented in each concrete class.
+ """
+ raise NotImplementedError()
+
+ def _AddNoiseSnrPairs(self, base_output_path, noisy_mix_filepaths,
+ snr_value_pairs):
+ """Adds noisy-reference signal pairs.
+
+ Args:
+ base_output_path: noisy tracks base output path.
+ noisy_mix_filepaths: nested dictionary of noisy signal paths organized
+ by noisy track name and SNR level.
+ snr_value_pairs: list of SNR pairs.
+ """
+ for noise_track_name in noisy_mix_filepaths:
+ for snr_noisy, snr_refence in snr_value_pairs:
+ config_name = '{0}_{1:d}_{2:d}_SNR'.format(
+ noise_track_name, snr_noisy, snr_refence)
+ output_path = self._MakeDir(base_output_path, config_name)
+ self._AddNoiseReferenceFilesPair(
+ config_name=config_name,
+ noisy_signal_filepath=noisy_mix_filepaths[noise_track_name]
+ [snr_noisy],
+ reference_signal_filepath=noisy_mix_filepaths[
+ noise_track_name][snr_refence],
+ output_path=output_path)
+
+ def _AddNoiseReferenceFilesPair(self, config_name, noisy_signal_filepath,
+ reference_signal_filepath, output_path):
+ """Adds one noisy-reference signal pair.
+
+ Args:
+ config_name: name of the APM configuration.
+ noisy_signal_filepath: path to noisy audio track file.
+ reference_signal_filepath: path to reference audio track file.
+ output_path: APM output path.
+ """
+ assert config_name not in self._noisy_signal_filepaths
+ self._noisy_signal_filepaths[config_name] = os.path.abspath(
+ noisy_signal_filepath)
+ self._apm_output_paths[config_name] = os.path.abspath(output_path)
+ self._reference_signal_filepaths[config_name] = os.path.abspath(
+ reference_signal_filepath)
+
+ def _MakeDir(self, base_output_path, test_data_generator_config_name):
+ output_path = os.path.join(
+ base_output_path,
+ self._output_directory_prefix + test_data_generator_config_name)
+ data_access.MakeDirectory(output_path)
+ return output_path
+
+
+@TestDataGenerator.RegisterClass
+class IdentityTestDataGenerator(TestDataGenerator):
+ """Generator that adds no noise.
+
+ Both the noisy and the reference signals are the input signal.
+ """
+
+ NAME = 'identity'
+
+ def __init__(self, output_directory_prefix, copy_with_identity):
+ TestDataGenerator.__init__(self, output_directory_prefix)
+ self._copy_with_identity = copy_with_identity
+
+ @property
+ def copy_with_identity(self):
+ return self._copy_with_identity
+
+ def _Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ config_name = 'default'
+ output_path = self._MakeDir(base_output_path, config_name)
+
+ if self._copy_with_identity:
+ input_signal_filepath_new = os.path.join(
+ test_data_cache_path,
+ os.path.split(input_signal_filepath)[1])
+ logging.info('copying ' + input_signal_filepath + ' to ' +
+ (input_signal_filepath_new))
+ shutil.copy(input_signal_filepath, input_signal_filepath_new)
+ input_signal_filepath = input_signal_filepath_new
+
+ self._AddNoiseReferenceFilesPair(
+ config_name=config_name,
+ noisy_signal_filepath=input_signal_filepath,
+ reference_signal_filepath=input_signal_filepath,
+ output_path=output_path)
+
+
+@TestDataGenerator.RegisterClass
+class WhiteNoiseTestDataGenerator(TestDataGenerator):
+ """Generator that adds white noise.
+ """
+
+ NAME = 'white_noise'
+
+ # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
+ # The reference (second value of each pair) always has a lower amount of noise
+ # - i.e., the SNR is 10 dB higher.
+ _SNR_VALUE_PAIRS = [
+ [20, 30], # Smallest noise.
+ [10, 20],
+ [5, 15],
+ [0, 10], # Largest noise.
+ ]
+
+ _NOISY_SIGNAL_FILENAME_TEMPLATE = 'noise_{0:d}_SNR.wav'
+
+ def __init__(self, output_directory_prefix):
+ TestDataGenerator.__init__(self, output_directory_prefix)
+
+ def _Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ # Load the input signal.
+ input_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ input_signal_filepath)
+
+ # Create the noise track.
+ noise_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
+ input_signal)
+
+ # Create the noisy mixes (once for each unique SNR value).
+ noisy_mix_filepaths = {}
+ snr_values = set(
+ [snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
+ for snr in snr_values:
+ noisy_signal_filepath = os.path.join(
+ test_data_cache_path,
+ self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(snr))
+
+ # Create and save if not done.
+ if not os.path.exists(noisy_signal_filepath):
+ # Create noisy signal.
+ noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
+ input_signal, noise_signal, snr)
+
+ # Save.
+ signal_processing.SignalProcessingUtils.SaveWav(
+ noisy_signal_filepath, noisy_signal)
+
+ # Add file to the collection of mixes.
+ noisy_mix_filepaths[snr] = noisy_signal_filepath
+
+ # Add all the noisy-reference signal pairs.
+ for snr_noisy, snr_refence in self._SNR_VALUE_PAIRS:
+ config_name = '{0:d}_{1:d}_SNR'.format(snr_noisy, snr_refence)
+ output_path = self._MakeDir(base_output_path, config_name)
+ self._AddNoiseReferenceFilesPair(
+ config_name=config_name,
+ noisy_signal_filepath=noisy_mix_filepaths[snr_noisy],
+ reference_signal_filepath=noisy_mix_filepaths[snr_refence],
+ output_path=output_path)
+
+
+# TODO(alessiob): remove comment when class implemented.
+# @TestDataGenerator.RegisterClass
+class NarrowBandNoiseTestDataGenerator(TestDataGenerator):
+ """Generator that adds narrow-band noise.
+ """
+
+ NAME = 'narrow_band_noise'
+
+ def __init__(self, output_directory_prefix):
+ TestDataGenerator.__init__(self, output_directory_prefix)
+
+ def _Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ # TODO(alessiob): implement.
+ pass
+
+
+@TestDataGenerator.RegisterClass
+class AdditiveNoiseTestDataGenerator(TestDataGenerator):
+ """Generator that adds noise loops.
+
+ This generator uses all the wav files in a given path (default: noise_tracks/)
+ and mixes them to the clean speech with different target SNRs (hard-coded).
+ """
+
+ NAME = 'additive_noise'
+ _NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav'
+
+ DEFAULT_NOISE_TRACKS_PATH = os.path.join(os.path.dirname(__file__),
+ os.pardir, 'noise_tracks')
+
+ # TODO(alessiob): Make the list of SNR pairs customizable.
+ # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
+ # The reference (second value of each pair) always has a lower amount of noise
+ # - i.e., the SNR is 10 dB higher.
+ _SNR_VALUE_PAIRS = [
+ [20, 30], # Smallest noise.
+ [10, 20],
+ [5, 15],
+ [0, 10], # Largest noise.
+ ]
+
+ def __init__(self, output_directory_prefix, noise_tracks_path):
+ TestDataGenerator.__init__(self, output_directory_prefix)
+ self._noise_tracks_path = noise_tracks_path
+ self._noise_tracks_file_names = [
+ n for n in os.listdir(self._noise_tracks_path)
+ if n.lower().endswith('.wav')
+ ]
+ if len(self._noise_tracks_file_names) == 0:
+ raise exceptions.InitializationException(
+ 'No wav files found in the noise tracks path %s' %
+ (self._noise_tracks_path))
+
+ def _Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ """Generates test data pairs using environmental noise.
+
+ For each noise track and pair of SNR values, the following two audio tracks
+ are created: the noisy signal and the reference signal. The former is
+ obtained by mixing the (clean) input signal to the corresponding noise
+ track enforcing the target SNR.
+ """
+ # Init.
+ snr_values = set(
+ [snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
+
+ # Load the input signal.
+ input_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ input_signal_filepath)
+
+ noisy_mix_filepaths = {}
+ for noise_track_filename in self._noise_tracks_file_names:
+ # Load the noise track.
+ noise_track_name, _ = os.path.splitext(noise_track_filename)
+ noise_track_filepath = os.path.join(self._noise_tracks_path,
+ noise_track_filename)
+ if not os.path.exists(noise_track_filepath):
+ logging.error('cannot find the <%s> noise track',
+ noise_track_filename)
+ raise exceptions.FileNotFoundError()
+
+ noise_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ noise_track_filepath)
+
+ # Create the noisy mixes (once for each unique SNR value).
+ noisy_mix_filepaths[noise_track_name] = {}
+ for snr in snr_values:
+ noisy_signal_filepath = os.path.join(
+ test_data_cache_path,
+ self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(
+ noise_track_name, snr))
+
+ # Create and save if not done.
+ if not os.path.exists(noisy_signal_filepath):
+ # Create noisy signal.
+ noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
+ input_signal,
+ noise_signal,
+ snr,
+ pad_noise=signal_processing.SignalProcessingUtils.
+ MixPadding.LOOP)
+
+ # Save.
+ signal_processing.SignalProcessingUtils.SaveWav(
+ noisy_signal_filepath, noisy_signal)
+
+ # Add file to the collection of mixes.
+ noisy_mix_filepaths[noise_track_name][
+ snr] = noisy_signal_filepath
+
+ # Add all the noise-SNR pairs.
+ self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths,
+ self._SNR_VALUE_PAIRS)
+
+
+@TestDataGenerator.RegisterClass
+class ReverberationTestDataGenerator(TestDataGenerator):
+ """Generator that adds reverberation noise.
+
+ TODO(alessiob): Make this class more generic since the impulse response can be
+ anything (not just reverberation); call it e.g.,
+ ConvolutionalNoiseTestDataGenerator.
+ """
+
+ NAME = 'reverberation'
+
+ _IMPULSE_RESPONSES = {
+ 'lecture': 'air_binaural_lecture_0_0_1.mat', # Long echo.
+ 'booth': 'air_binaural_booth_0_0_1.mat', # Short echo.
+ }
+ _MAX_IMPULSE_RESPONSE_LENGTH = None
+
+ # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
+ # The reference (second value of each pair) always has a lower amount of noise
+ # - i.e., the SNR is 5 dB higher.
+ _SNR_VALUE_PAIRS = [
+ [3, 8], # Smallest noise.
+ [-3, 2], # Largest noise.
+ ]
+
+ _NOISE_TRACK_FILENAME_TEMPLATE = '{0}.wav'
+ _NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav'
+
+ def __init__(self, output_directory_prefix, aechen_ir_database_path):
+ TestDataGenerator.__init__(self, output_directory_prefix)
+ self._aechen_ir_database_path = aechen_ir_database_path
+
+ def _Generate(self, input_signal_filepath, test_data_cache_path,
+ base_output_path):
+ """Generates test data pairs using reverberation noise.
+
+ For each impulse response, one noise track is created. For each impulse
+ response and pair of SNR values, the following 2 audio tracks are
+ created: the noisy signal and the reference signal. The former is
+ obtained by mixing the (clean) input signal to the corresponding noise
+ track enforcing the target SNR.
+ """
+ # Init.
+ snr_values = set(
+ [snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
+
+ # Load the input signal.
+ input_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ input_signal_filepath)
+
+ noisy_mix_filepaths = {}
+ for impulse_response_name in self._IMPULSE_RESPONSES:
+ noise_track_filename = self._NOISE_TRACK_FILENAME_TEMPLATE.format(
+ impulse_response_name)
+ noise_track_filepath = os.path.join(test_data_cache_path,
+ noise_track_filename)
+ noise_signal = None
+ try:
+ # Load noise track.
+ noise_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ noise_track_filepath)
+ except exceptions.FileNotFoundError:
+ # Generate noise track by applying the impulse response.
+ impulse_response_filepath = os.path.join(
+ self._aechen_ir_database_path,
+ self._IMPULSE_RESPONSES[impulse_response_name])
+ noise_signal = self._GenerateNoiseTrack(
+ noise_track_filepath, input_signal,
+ impulse_response_filepath)
+ assert noise_signal is not None
+
+ # Create the noisy mixes (once for each unique SNR value).
+ noisy_mix_filepaths[impulse_response_name] = {}
+ for snr in snr_values:
+ noisy_signal_filepath = os.path.join(
+ test_data_cache_path,
+ self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(
+ impulse_response_name, snr))
+
+ # Create and save if not done.
+ if not os.path.exists(noisy_signal_filepath):
+ # Create noisy signal.
+ noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
+ input_signal, noise_signal, snr)
+
+ # Save.
+ signal_processing.SignalProcessingUtils.SaveWav(
+ noisy_signal_filepath, noisy_signal)
+
+ # Add file to the collection of mixes.
+ noisy_mix_filepaths[impulse_response_name][
+ snr] = noisy_signal_filepath
+
+ # Add all the noise-SNR pairs.
+ self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths,
+ self._SNR_VALUE_PAIRS)
+
+ def _GenerateNoiseTrack(self, noise_track_filepath, input_signal,
+ impulse_response_filepath):
+ """Generates noise track.
+
+ Generate a signal by convolving input_signal with the impulse response in
+ impulse_response_filepath; then save to noise_track_filepath.
+
+ Args:
+ noise_track_filepath: output file path for the noise track.
+ input_signal: (clean) input signal samples.
+ impulse_response_filepath: impulse response file path.
+
+ Returns:
+ AudioSegment instance.
+ """
+ # Load impulse response.
+ data = scipy.io.loadmat(impulse_response_filepath)
+ impulse_response = data['h_air'].flatten()
+ if self._MAX_IMPULSE_RESPONSE_LENGTH is not None:
+ logging.info('truncating impulse response from %d to %d samples',
+ len(impulse_response),
+ self._MAX_IMPULSE_RESPONSE_LENGTH)
+ impulse_response = impulse_response[:self.
+ _MAX_IMPULSE_RESPONSE_LENGTH]
+
+ # Apply impulse response.
+ processed_signal = (
+ signal_processing.SignalProcessingUtils.ApplyImpulseResponse(
+ input_signal, impulse_response))
+
+ # Save.
+ signal_processing.SignalProcessingUtils.SaveWav(
+ noise_track_filepath, processed_signal)
+
+ return processed_signal
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py
new file mode 100644
index 0000000000..948888e775
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""TestDataGenerator factory class.
+"""
+
+import logging
+
+from . import exceptions
+from . import test_data_generation
+
+
+class TestDataGeneratorFactory(object):
+ """Factory class used to create test data generators.
+
+ Usage: Create a factory passing parameters to the ctor with which the
+ generators will be produced.
+ """
+
+ def __init__(self, aechen_ir_database_path, noise_tracks_path,
+ copy_with_identity):
+ """Ctor.
+
+ Args:
+ aechen_ir_database_path: Path to the Aechen Impulse Response database.
+ noise_tracks_path: Path to the noise tracks to add.
+ copy_with_identity: Flag indicating whether the identity generator has to
+ make copies of the clean speech input files.
+ """
+ self._output_directory_prefix = None
+ self._aechen_ir_database_path = aechen_ir_database_path
+ self._noise_tracks_path = noise_tracks_path
+ self._copy_with_identity = copy_with_identity
+
+ def SetOutputDirectoryPrefix(self, prefix):
+ self._output_directory_prefix = prefix
+
+ def GetInstance(self, test_data_generators_class):
+ """Creates an TestDataGenerator instance given a class object.
+
+ Args:
+ test_data_generators_class: TestDataGenerator class object (not an
+ instance).
+
+ Returns:
+ TestDataGenerator instance.
+ """
+ if self._output_directory_prefix is None:
+ raise exceptions.InitializationException(
+ 'The output directory prefix for test data generators is not set'
+ )
+ logging.debug('factory producing %s', test_data_generators_class)
+
+ if test_data_generators_class == (
+ test_data_generation.IdentityTestDataGenerator):
+ return test_data_generation.IdentityTestDataGenerator(
+ self._output_directory_prefix, self._copy_with_identity)
+ elif test_data_generators_class == (
+ test_data_generation.ReverberationTestDataGenerator):
+ return test_data_generation.ReverberationTestDataGenerator(
+ self._output_directory_prefix, self._aechen_ir_database_path)
+ elif test_data_generators_class == (
+ test_data_generation.AdditiveNoiseTestDataGenerator):
+ return test_data_generation.AdditiveNoiseTestDataGenerator(
+ self._output_directory_prefix, self._noise_tracks_path)
+ else:
+ return test_data_generators_class(self._output_directory_prefix)
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py
new file mode 100644
index 0000000000..f75098ae2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Unit tests for the test_data_generation module.
+"""
+
+import os
+import shutil
+import tempfile
+import unittest
+
+import numpy as np
+import scipy.io
+
+from . import test_data_generation
+from . import test_data_generation_factory
+from . import signal_processing
+
+
+class TestTestDataGenerators(unittest.TestCase):
+ """Unit tests for the test_data_generation module.
+ """
+
+ def setUp(self):
+ """Create temporary folders."""
+ self._base_output_path = tempfile.mkdtemp()
+ self._test_data_cache_path = tempfile.mkdtemp()
+ self._fake_air_db_path = tempfile.mkdtemp()
+
+ # Fake AIR DB impulse responses.
+ # TODO(alessiob): ReverberationTestDataGenerator will change to allow custom
+ # impulse responses. When changed, the coupling below between
+ # impulse_response_mat_file_names and
+ # ReverberationTestDataGenerator._IMPULSE_RESPONSES can be removed.
+ impulse_response_mat_file_names = [
+ 'air_binaural_lecture_0_0_1.mat',
+ 'air_binaural_booth_0_0_1.mat',
+ ]
+ for impulse_response_mat_file_name in impulse_response_mat_file_names:
+ data = {'h_air': np.random.rand(1, 1000).astype('<f8')}
+ scipy.io.savemat(
+ os.path.join(self._fake_air_db_path,
+ impulse_response_mat_file_name), data)
+
+ def tearDown(self):
+ """Recursively delete temporary folders."""
+ shutil.rmtree(self._base_output_path)
+ shutil.rmtree(self._test_data_cache_path)
+ shutil.rmtree(self._fake_air_db_path)
+
+ def testTestDataGenerators(self):
+ # Preliminary check.
+ self.assertTrue(os.path.exists(self._base_output_path))
+ self.assertTrue(os.path.exists(self._test_data_cache_path))
+
+ # Check that there is at least one registered test data generator.
+ registered_classes = (
+ test_data_generation.TestDataGenerator.REGISTERED_CLASSES)
+ self.assertIsInstance(registered_classes, dict)
+ self.assertGreater(len(registered_classes), 0)
+
+ # Instance generators factory.
+ generators_factory = test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path=self._fake_air_db_path,
+ noise_tracks_path=test_data_generation. \
+ AdditiveNoiseTestDataGenerator. \
+ DEFAULT_NOISE_TRACKS_PATH,
+ copy_with_identity=False)
+ generators_factory.SetOutputDirectoryPrefix('datagen-')
+
+ # Use a simple input file as clean input signal.
+ input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',
+ 'tone-880.wav')
+ self.assertTrue(os.path.exists(input_signal_filepath))
+
+ # Load input signal.
+ input_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ input_signal_filepath)
+
+ # Try each registered test data generator.
+ for generator_name in registered_classes:
+ # Instance test data generator.
+ generator = generators_factory.GetInstance(
+ registered_classes[generator_name])
+
+ # Generate the noisy input - reference pairs.
+ generator.Generate(input_signal_filepath=input_signal_filepath,
+ test_data_cache_path=self._test_data_cache_path,
+ base_output_path=self._base_output_path)
+
+ # Perform checks.
+ self._CheckGeneratedPairsListSizes(generator)
+ self._CheckGeneratedPairsSignalDurations(generator, input_signal)
+ self._CheckGeneratedPairsOutputPaths(generator)
+
+ def testTestidentityDataGenerator(self):
+ # Preliminary check.
+ self.assertTrue(os.path.exists(self._base_output_path))
+ self.assertTrue(os.path.exists(self._test_data_cache_path))
+
+ # Use a simple input file as clean input signal.
+ input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',
+ 'tone-880.wav')
+ self.assertTrue(os.path.exists(input_signal_filepath))
+
+ def GetNoiseReferenceFilePaths(identity_generator):
+ noisy_signal_filepaths = identity_generator.noisy_signal_filepaths
+ reference_signal_filepaths = identity_generator.reference_signal_filepaths
+ assert noisy_signal_filepaths.keys(
+ ) == reference_signal_filepaths.keys()
+ assert len(noisy_signal_filepaths.keys()) == 1
+ key = noisy_signal_filepaths.keys()[0]
+ return noisy_signal_filepaths[key], reference_signal_filepaths[key]
+
+ # Test the `copy_with_identity` flag.
+ for copy_with_identity in [False, True]:
+ # Instance the generator through the factory.
+ factory = test_data_generation_factory.TestDataGeneratorFactory(
+ aechen_ir_database_path='',
+ noise_tracks_path='',
+ copy_with_identity=copy_with_identity)
+ factory.SetOutputDirectoryPrefix('datagen-')
+ generator = factory.GetInstance(
+ test_data_generation.IdentityTestDataGenerator)
+ # Check `copy_with_identity` is set correctly.
+ self.assertEqual(copy_with_identity, generator.copy_with_identity)
+
+ # Generate test data and extract the paths to the noise and the reference
+ # files.
+ generator.Generate(input_signal_filepath=input_signal_filepath,
+ test_data_cache_path=self._test_data_cache_path,
+ base_output_path=self._base_output_path)
+ noisy_signal_filepath, reference_signal_filepath = (
+ GetNoiseReferenceFilePaths(generator))
+
+ # Check that a copy is made if and only if `copy_with_identity` is True.
+ if copy_with_identity:
+ self.assertNotEqual(noisy_signal_filepath,
+ input_signal_filepath)
+ self.assertNotEqual(reference_signal_filepath,
+ input_signal_filepath)
+ else:
+ self.assertEqual(noisy_signal_filepath, input_signal_filepath)
+ self.assertEqual(reference_signal_filepath,
+ input_signal_filepath)
+
+ def _CheckGeneratedPairsListSizes(self, generator):
+ config_names = generator.config_names
+ number_of_pairs = len(config_names)
+ self.assertEqual(number_of_pairs,
+ len(generator.noisy_signal_filepaths))
+ self.assertEqual(number_of_pairs, len(generator.apm_output_paths))
+ self.assertEqual(number_of_pairs,
+ len(generator.reference_signal_filepaths))
+
+ def _CheckGeneratedPairsSignalDurations(self, generator, input_signal):
+ """Checks duration of the generated signals.
+
+ Checks that the noisy input and the reference tracks are audio files
+ with duration equal to or greater than that of the input signal.
+
+ Args:
+ generator: TestDataGenerator instance.
+ input_signal: AudioSegment instance.
+ """
+ input_signal_length = (
+ signal_processing.SignalProcessingUtils.CountSamples(input_signal))
+
+ # Iterate over the noisy signal - reference pairs.
+ for config_name in generator.config_names:
+ # Load the noisy input file.
+ noisy_signal_filepath = generator.noisy_signal_filepaths[
+ config_name]
+ noisy_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ noisy_signal_filepath)
+
+ # Check noisy input signal length.
+ noisy_signal_length = (signal_processing.SignalProcessingUtils.
+ CountSamples(noisy_signal))
+ self.assertGreaterEqual(noisy_signal_length, input_signal_length)
+
+ # Load the reference file.
+ reference_signal_filepath = generator.reference_signal_filepaths[
+ config_name]
+ reference_signal = signal_processing.SignalProcessingUtils.LoadWav(
+ reference_signal_filepath)
+
+ # Check noisy input signal length.
+ reference_signal_length = (signal_processing.SignalProcessingUtils.
+ CountSamples(reference_signal))
+ self.assertGreaterEqual(reference_signal_length,
+ input_signal_length)
+
+ def _CheckGeneratedPairsOutputPaths(self, generator):
+ """Checks that the output path created by the generator exists.
+
+ Args:
+ generator: TestDataGenerator instance.
+ """
+ # Iterate over the noisy signal - reference pairs.
+ for config_name in generator.config_names:
+ output_path = generator.apm_output_paths[config_name]
+ self.assertTrue(os.path.exists(output_path))
diff --git a/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc
new file mode 100644
index 0000000000..b47f6221cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#include "common_audio/vad/include/vad.h"
+
+#include <array>
+#include <fstream>
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/wav_file.h"
+#include "rtc_base/logging.h"
+
+ABSL_FLAG(std::string, i, "", "Input wav file");
+ABSL_FLAG(std::string, o, "", "VAD output file");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+// The allowed values are 10, 20 or 30 ms.
+constexpr uint8_t kAudioFrameLengthMilliseconds = 30;
+constexpr int kMaxSampleRate = 48000;
+constexpr size_t kMaxFrameLen =
+ kAudioFrameLengthMilliseconds * kMaxSampleRate / 1000;
+
+constexpr uint8_t kBitmaskBuffSize = 8;
+
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+ const std::string input_file = absl::GetFlag(FLAGS_i);
+ const std::string output_file = absl::GetFlag(FLAGS_o);
+ // Open wav input file and check properties.
+ WavReader wav_reader(input_file);
+ if (wav_reader.num_channels() != 1) {
+ RTC_LOG(LS_ERROR) << "Only mono wav files supported";
+ return 1;
+ }
+ if (wav_reader.sample_rate() > kMaxSampleRate) {
+ RTC_LOG(LS_ERROR) << "Beyond maximum sample rate (" << kMaxSampleRate
+ << ")";
+ return 1;
+ }
+ const size_t audio_frame_length = rtc::CheckedDivExact(
+ kAudioFrameLengthMilliseconds * wav_reader.sample_rate(), 1000);
+ if (audio_frame_length > kMaxFrameLen) {
+ RTC_LOG(LS_ERROR) << "The frame size and/or the sample rate are too large.";
+ return 1;
+ }
+
+ // Create output file and write header.
+ std::ofstream out_file(output_file, std::ofstream::binary);
+ const char audio_frame_length_ms = kAudioFrameLengthMilliseconds;
+ out_file.write(&audio_frame_length_ms, 1); // Header.
+
+ // Run VAD and write decisions.
+ std::unique_ptr<Vad> vad = CreateVad(Vad::Aggressiveness::kVadNormal);
+ std::array<int16_t, kMaxFrameLen> samples;
+ char buff = 0; // Buffer to write one bit per frame.
+ uint8_t next = 0; // Points to the next bit to write in `buff`.
+ while (true) {
+ // Process frame.
+ const auto read_samples =
+ wav_reader.ReadSamples(audio_frame_length, samples.data());
+ if (read_samples < audio_frame_length)
+ break;
+ const auto is_speech = vad->VoiceActivity(
+ samples.data(), audio_frame_length, wav_reader.sample_rate());
+
+ // Write output.
+ buff = is_speech ? buff | (1 << next) : buff & ~(1 << next);
+ if (++next == kBitmaskBuffSize) {
+ out_file.write(&buff, 1); // Flush.
+ buff = 0; // Reset.
+ next = 0;
+ }
+ }
+
+ // Finalize.
+ char extra_bits = 0;
+ if (next > 0) {
+ extra_bits = kBitmaskBuffSize - next;
+ out_file.write(&buff, 1); // Flush.
+ }
+ out_file.write(&extra_bits, 1);
+ out_file.close();
+
+ return 0;
+}
+
+} // namespace
+} // namespace test
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ return webrtc::test::main(argc, argv);
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.cc b/third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.cc
new file mode 100644
index 0000000000..4899d2d459
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/runtime_setting_util.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void ReplayRuntimeSetting(AudioProcessing* apm,
+ const webrtc::audioproc::RuntimeSetting& setting) {
+ RTC_CHECK(apm);
+ // TODO(bugs.webrtc.org/9138): Add ability to handle different types
+ // of settings. Currently CapturePreGain, CaptureFixedPostGain and
+ // PlayoutVolumeChange are supported.
+ RTC_CHECK(setting.has_capture_pre_gain() ||
+ setting.has_capture_fixed_post_gain() ||
+ setting.has_playout_volume_change());
+
+ if (setting.has_capture_pre_gain()) {
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCapturePreGain(
+ setting.capture_pre_gain()));
+ } else if (setting.has_capture_fixed_post_gain()) {
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureFixedPostGain(
+ setting.capture_fixed_post_gain()));
+ } else if (setting.has_playout_volume_change()) {
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutVolumeChange(
+ setting.playout_volume_change()));
+ } else if (setting.has_playout_audio_device_change()) {
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreatePlayoutAudioDeviceChange(
+ {setting.playout_audio_device_change().id(),
+ setting.playout_audio_device_change().max_volume()}));
+ } else if (setting.has_capture_output_used()) {
+ apm->SetRuntimeSetting(
+ AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting(
+ setting.capture_output_used()));
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.h b/third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.h
new file mode 100644
index 0000000000..d8cbe82076
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/runtime_setting_util.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_RUNTIME_SETTING_UTIL_H_
+#define MODULES_AUDIO_PROCESSING_TEST_RUNTIME_SETTING_UTIL_H_
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/protobuf_utils.h"
+
+namespace webrtc {
+
+void ReplayRuntimeSetting(AudioProcessing* apm,
+ const webrtc::audioproc::RuntimeSetting& setting);
+}
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_RUNTIME_SETTING_UTIL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.cc b/third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.cc
new file mode 100644
index 0000000000..458f6ced76
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/simulator_buffers.h"
+
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+SimulatorBuffers::SimulatorBuffers(int render_input_sample_rate_hz,
+ int capture_input_sample_rate_hz,
+ int render_output_sample_rate_hz,
+ int capture_output_sample_rate_hz,
+ size_t num_render_input_channels,
+ size_t num_capture_input_channels,
+ size_t num_render_output_channels,
+ size_t num_capture_output_channels) {
+ Random rand_gen(42);
+ CreateConfigAndBuffer(render_input_sample_rate_hz, num_render_input_channels,
+ &rand_gen, &render_input_buffer, &render_input_config,
+ &render_input, &render_input_samples);
+
+ CreateConfigAndBuffer(render_output_sample_rate_hz,
+ num_render_output_channels, &rand_gen,
+ &render_output_buffer, &render_output_config,
+ &render_output, &render_output_samples);
+
+ CreateConfigAndBuffer(capture_input_sample_rate_hz,
+ num_capture_input_channels, &rand_gen,
+ &capture_input_buffer, &capture_input_config,
+ &capture_input, &capture_input_samples);
+
+ CreateConfigAndBuffer(capture_output_sample_rate_hz,
+ num_capture_output_channels, &rand_gen,
+ &capture_output_buffer, &capture_output_config,
+ &capture_output, &capture_output_samples);
+
+ UpdateInputBuffers();
+}
+
+SimulatorBuffers::~SimulatorBuffers() = default;
+
+void SimulatorBuffers::CreateConfigAndBuffer(
+ int sample_rate_hz,
+ size_t num_channels,
+ Random* rand_gen,
+ std::unique_ptr<AudioBuffer>* buffer,
+ StreamConfig* config,
+ std::vector<float*>* buffer_data,
+ std::vector<float>* buffer_data_samples) {
+ int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+ *config = StreamConfig(sample_rate_hz, num_channels);
+ buffer->reset(
+ new AudioBuffer(config->sample_rate_hz(), config->num_channels(),
+ config->sample_rate_hz(), config->num_channels(),
+ config->sample_rate_hz(), config->num_channels()));
+
+ buffer_data_samples->resize(samples_per_channel * num_channels);
+ for (auto& v : *buffer_data_samples) {
+ v = rand_gen->Rand<float>();
+ }
+
+ buffer_data->resize(num_channels);
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ (*buffer_data)[ch] = &(*buffer_data_samples)[ch * samples_per_channel];
+ }
+}
+
+void SimulatorBuffers::UpdateInputBuffers() {
+ test::CopyVectorToAudioBuffer(capture_input_config, capture_input_samples,
+ capture_input_buffer.get());
+ test::CopyVectorToAudioBuffer(render_input_config, render_input_samples,
+ render_input_buffer.get());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.h b/third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.h
new file mode 100644
index 0000000000..36dcf301a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/simulator_buffers.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_SIMULATOR_BUFFERS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_SIMULATOR_BUFFERS_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/random.h"
+
+namespace webrtc {
+namespace test {
+
+struct SimulatorBuffers {
+ SimulatorBuffers(int render_input_sample_rate_hz,
+ int capture_input_sample_rate_hz,
+ int render_output_sample_rate_hz,
+ int capture_output_sample_rate_hz,
+ size_t num_render_input_channels,
+ size_t num_capture_input_channels,
+ size_t num_render_output_channels,
+ size_t num_capture_output_channels);
+ ~SimulatorBuffers();
+
+ void CreateConfigAndBuffer(int sample_rate_hz,
+ size_t num_channels,
+ Random* rand_gen,
+ std::unique_ptr<AudioBuffer>* buffer,
+ StreamConfig* config,
+ std::vector<float*>* buffer_data,
+ std::vector<float>* buffer_data_samples);
+
+ void UpdateInputBuffers();
+
+ std::unique_ptr<AudioBuffer> render_input_buffer;
+ std::unique_ptr<AudioBuffer> capture_input_buffer;
+ std::unique_ptr<AudioBuffer> render_output_buffer;
+ std::unique_ptr<AudioBuffer> capture_output_buffer;
+ StreamConfig render_input_config;
+ StreamConfig capture_input_config;
+ StreamConfig render_output_config;
+ StreamConfig capture_output_config;
+ std::vector<float*> render_input;
+ std::vector<float> render_input_samples;
+ std::vector<float*> capture_input;
+ std::vector<float> capture_input_samples;
+ std::vector<float*> render_output;
+ std::vector<float> render_output_samples;
+ std::vector<float*> capture_output;
+ std::vector<float> capture_output_samples;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_SIMULATOR_BUFFERS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/test_utils.cc b/third_party/libwebrtc/modules/audio_processing/test/test_utils.cc
new file mode 100644
index 0000000000..9aeebe5155
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/test_utils.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/test_utils.h"
+
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+ChannelBufferWavReader::ChannelBufferWavReader(std::unique_ptr<WavReader> file)
+ : file_(std::move(file)) {}
+
+ChannelBufferWavReader::~ChannelBufferWavReader() = default;
+
+bool ChannelBufferWavReader::Read(ChannelBuffer<float>* buffer) {
+ RTC_CHECK_EQ(file_->num_channels(), buffer->num_channels());
+ interleaved_.resize(buffer->size());
+ if (file_->ReadSamples(interleaved_.size(), &interleaved_[0]) !=
+ interleaved_.size()) {
+ return false;
+ }
+
+ FloatS16ToFloat(&interleaved_[0], interleaved_.size(), &interleaved_[0]);
+ Deinterleave(&interleaved_[0], buffer->num_frames(), buffer->num_channels(),
+ buffer->channels());
+ return true;
+}
+
+ChannelBufferWavWriter::ChannelBufferWavWriter(std::unique_ptr<WavWriter> file)
+ : file_(std::move(file)) {}
+
+ChannelBufferWavWriter::~ChannelBufferWavWriter() = default;
+
+void ChannelBufferWavWriter::Write(const ChannelBuffer<float>& buffer) {
+ RTC_CHECK_EQ(file_->num_channels(), buffer.num_channels());
+ interleaved_.resize(buffer.size());
+ Interleave(buffer.channels(), buffer.num_frames(), buffer.num_channels(),
+ &interleaved_[0]);
+ FloatToFloatS16(&interleaved_[0], interleaved_.size(), &interleaved_[0]);
+ file_->WriteSamples(&interleaved_[0], interleaved_.size());
+}
+
+ChannelBufferVectorWriter::ChannelBufferVectorWriter(std::vector<float>* output)
+ : output_(output) {
+ RTC_DCHECK(output_);
+}
+
+ChannelBufferVectorWriter::~ChannelBufferVectorWriter() = default;
+
+void ChannelBufferVectorWriter::Write(const ChannelBuffer<float>& buffer) {
+ // Account for sample rate changes throughout a simulation.
+ interleaved_buffer_.resize(buffer.size());
+ Interleave(buffer.channels(), buffer.num_frames(), buffer.num_channels(),
+ interleaved_buffer_.data());
+ size_t old_size = output_->size();
+ output_->resize(old_size + interleaved_buffer_.size());
+ FloatToFloatS16(interleaved_buffer_.data(), interleaved_buffer_.size(),
+ output_->data() + old_size);
+}
+
+FILE* OpenFile(absl::string_view filename, absl::string_view mode) {
+ std::string filename_str(filename);
+ FILE* file = fopen(filename_str.c_str(), std::string(mode).c_str());
+ if (!file) {
+ printf("Unable to open file %s\n", filename_str.c_str());
+ exit(1);
+ }
+ return file;
+}
+
+void SetFrameSampleRate(Int16FrameData* frame, int sample_rate_hz) {
+ frame->sample_rate_hz = sample_rate_hz;
+ frame->samples_per_channel =
+ AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/test_utils.h b/third_party/libwebrtc/modules/audio_processing/test/test_utils.h
new file mode 100644
index 0000000000..bf82f9d66d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/test_utils.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_TEST_UTILS_H_
+#define MODULES_AUDIO_PROCESSING_TEST_TEST_UTILS_H_
+
+#include <math.h>
+
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <sstream> // no-presubmit-check TODO(webrtc:8982)
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "common_audio/channel_buffer.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+static const AudioProcessing::Error kNoErr = AudioProcessing::kNoError;
+#define EXPECT_NOERR(expr) EXPECT_EQ(kNoErr, (expr))
+
+// Encapsulates samples and metadata for an integer frame.
+struct Int16FrameData {
+ // Max data size that matches the data size of the AudioFrame class, providing
+ // storage for 8 channels of 96 kHz data.
+ static const int kMaxDataSizeSamples = 7680;
+
+ Int16FrameData() {
+ sample_rate_hz = 0;
+ num_channels = 0;
+ samples_per_channel = 0;
+ data.fill(0);
+ }
+
+ void CopyFrom(const Int16FrameData& src) {
+ samples_per_channel = src.samples_per_channel;
+ sample_rate_hz = src.sample_rate_hz;
+ num_channels = src.num_channels;
+
+ const size_t length = samples_per_channel * num_channels;
+ RTC_CHECK_LE(length, kMaxDataSizeSamples);
+ memcpy(data.data(), src.data.data(), sizeof(int16_t) * length);
+ }
+ std::array<int16_t, kMaxDataSizeSamples> data;
+ int32_t sample_rate_hz;
+ size_t num_channels;
+ size_t samples_per_channel;
+};
+
+// Reads ChannelBuffers from a provided WavReader.
+class ChannelBufferWavReader final {
+ public:
+ explicit ChannelBufferWavReader(std::unique_ptr<WavReader> file);
+ ~ChannelBufferWavReader();
+
+ ChannelBufferWavReader(const ChannelBufferWavReader&) = delete;
+ ChannelBufferWavReader& operator=(const ChannelBufferWavReader&) = delete;
+
+ // Reads data from the file according to the `buffer` format. Returns false if
+ // a full buffer can't be read from the file.
+ bool Read(ChannelBuffer<float>* buffer);
+
+ private:
+ std::unique_ptr<WavReader> file_;
+ std::vector<float> interleaved_;
+};
+
+// Writes ChannelBuffers to a provided WavWriter.
+class ChannelBufferWavWriter final {
+ public:
+ explicit ChannelBufferWavWriter(std::unique_ptr<WavWriter> file);
+ ~ChannelBufferWavWriter();
+
+ ChannelBufferWavWriter(const ChannelBufferWavWriter&) = delete;
+ ChannelBufferWavWriter& operator=(const ChannelBufferWavWriter&) = delete;
+
+ void Write(const ChannelBuffer<float>& buffer);
+
+ private:
+ std::unique_ptr<WavWriter> file_;
+ std::vector<float> interleaved_;
+};
+
+// Takes a pointer to a vector. Allows appending the samples of channel buffers
+// to the given vector, by interleaving the samples and converting them to float
+// S16.
+class ChannelBufferVectorWriter final {
+ public:
+ explicit ChannelBufferVectorWriter(std::vector<float>* output);
+ ChannelBufferVectorWriter(const ChannelBufferVectorWriter&) = delete;
+ ChannelBufferVectorWriter& operator=(const ChannelBufferVectorWriter&) =
+ delete;
+ ~ChannelBufferVectorWriter();
+
+ // Creates an interleaved copy of `buffer`, converts the samples to float S16
+ // and appends the result to output_.
+ void Write(const ChannelBuffer<float>& buffer);
+
+ private:
+ std::vector<float> interleaved_buffer_;
+ std::vector<float>* output_;
+};
+
+// Exits on failure; do not use in unit tests.
+FILE* OpenFile(absl::string_view filename, absl::string_view mode);
+
+void SetFrameSampleRate(Int16FrameData* frame, int sample_rate_hz);
+
+template <typename T>
+void SetContainerFormat(int sample_rate_hz,
+ size_t num_channels,
+ Int16FrameData* frame,
+ std::unique_ptr<ChannelBuffer<T> >* cb) {
+ SetFrameSampleRate(frame, sample_rate_hz);
+ frame->num_channels = num_channels;
+ cb->reset(new ChannelBuffer<T>(frame->samples_per_channel, num_channels));
+}
+
+template <typename T>
+float ComputeSNR(const T* ref, const T* test, size_t length, float* variance) {
+ float mse = 0;
+ float mean = 0;
+ *variance = 0;
+ for (size_t i = 0; i < length; ++i) {
+ T error = ref[i] - test[i];
+ mse += error * error;
+ *variance += ref[i] * ref[i];
+ mean += ref[i];
+ }
+ mse /= length;
+ *variance /= length;
+ mean /= length;
+ *variance -= mean * mean;
+
+ float snr = 100; // We assign 100 dB to the zero-error case.
+ if (mse > 0)
+ snr = 10 * log10(*variance / mse);
+ return snr;
+}
+
+// Returns a vector<T> parsed from whitespace delimited values in to_parse,
+// or an empty vector if the string could not be parsed.
+template <typename T>
+std::vector<T> ParseList(absl::string_view to_parse) {
+ std::vector<T> values;
+
+ std::istringstream str( // no-presubmit-check TODO(webrtc:8982)
+ std::string{to_parse});
+ std::copy(
+ std::istream_iterator<T>(str), // no-presubmit-check TODO(webrtc:8982)
+ std::istream_iterator<T>(), // no-presubmit-check TODO(webrtc:8982)
+ std::back_inserter(values));
+
+ return values;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_TEST_UTILS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/test/unittest.proto b/third_party/libwebrtc/modules/audio_processing/test/unittest.proto
new file mode 100644
index 0000000000..07d1cda6c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/unittest.proto
@@ -0,0 +1,48 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+message Test {
+ optional int32 num_reverse_channels = 1;
+ optional int32 num_input_channels = 2;
+ optional int32 num_output_channels = 3;
+ optional int32 sample_rate = 4;
+
+ message Frame {
+ }
+
+ repeated Frame frame = 5;
+
+ optional int32 analog_level_average = 6;
+ optional int32 max_output_average = 7;
+ optional int32 has_voice_count = 9;
+ optional int32 is_saturated_count = 10;
+
+ message EchoMetrics {
+ optional float echo_return_loss = 1;
+ optional float echo_return_loss_enhancement = 2;
+ optional float divergent_filter_fraction = 3;
+ optional float residual_echo_likelihood = 4;
+ optional float residual_echo_likelihood_recent_max = 5;
+ }
+
+ repeated EchoMetrics echo_metrics = 11;
+
+ message DelayMetrics {
+ optional int32 median = 1;
+ optional int32 std = 2;
+ }
+
+ repeated DelayMetrics delay_metrics = 12;
+
+ optional float rms_dbfs_average = 13;
+
+ optional float ns_speech_probability_average = 14;
+
+ optional bool use_aec_extended_filter = 15;
+}
+
+message OutputData {
+ repeated Test test = 1;
+}
+
diff --git a/third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.cc b/third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.cc
new file mode 100644
index 0000000000..ee87f9e1a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/wav_based_simulator.h"
+
+#include <stdio.h>
+
+#include <iostream>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+namespace test {
+
+std::vector<WavBasedSimulator::SimulationEventType>
+WavBasedSimulator::GetCustomEventChain(absl::string_view filename) {
+ std::vector<WavBasedSimulator::SimulationEventType> call_chain;
+ FileWrapper file_wrapper = FileWrapper::OpenReadOnly(filename);
+
+ RTC_CHECK(file_wrapper.is_open())
+ << "Could not open the custom call order file, reverting "
+ "to using the default call order";
+
+ char c;
+ size_t num_read = file_wrapper.Read(&c, sizeof(char));
+ while (num_read > 0) {
+ switch (c) {
+ case 'r':
+ call_chain.push_back(SimulationEventType::kProcessReverseStream);
+ break;
+ case 'c':
+ call_chain.push_back(SimulationEventType::kProcessStream);
+ break;
+ case '\n':
+ break;
+ default:
+ RTC_FATAL() << "Incorrect custom call order file";
+ }
+
+ num_read = file_wrapper.Read(&c, sizeof(char));
+ }
+
+ return call_chain;
+}
+
+WavBasedSimulator::WavBasedSimulator(
+ const SimulationSettings& settings,
+ rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder)
+ : AudioProcessingSimulator(settings,
+ std::move(audio_processing),
+ std::move(ap_builder)) {
+ if (settings_.call_order_input_filename) {
+ call_chain_ = WavBasedSimulator::GetCustomEventChain(
+ *settings_.call_order_input_filename);
+ } else {
+ call_chain_ = WavBasedSimulator::GetDefaultEventChain();
+ }
+}
+
+WavBasedSimulator::~WavBasedSimulator() = default;
+
+std::vector<WavBasedSimulator::SimulationEventType>
+WavBasedSimulator::GetDefaultEventChain() {
+ std::vector<WavBasedSimulator::SimulationEventType> call_chain(2);
+ call_chain[0] = SimulationEventType::kProcessStream;
+ call_chain[1] = SimulationEventType::kProcessReverseStream;
+ return call_chain;
+}
+
+void WavBasedSimulator::PrepareProcessStreamCall() {
+ if (settings_.fixed_interface) {
+ fwd_frame_.CopyFrom(*in_buf_);
+ }
+ ap_->set_stream_key_pressed(settings_.override_key_pressed.value_or(false));
+
+ if (!settings_.use_stream_delay || *settings_.use_stream_delay) {
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ ap_->set_stream_delay_ms(
+ settings_.stream_delay ? *settings_.stream_delay : 0));
+ }
+}
+
+void WavBasedSimulator::PrepareReverseProcessStreamCall() {
+ if (settings_.fixed_interface) {
+ rev_frame_.CopyFrom(*reverse_in_buf_);
+ }
+}
+
+void WavBasedSimulator::Process() {
+ ConfigureAudioProcessor();
+
+ Initialize();
+
+ bool samples_left_to_process = true;
+ int call_chain_index = 0;
+ int capture_frames_since_init = 0;
+ constexpr int kInitIndex = 1;
+ while (samples_left_to_process) {
+ switch (call_chain_[call_chain_index]) {
+ case SimulationEventType::kProcessStream:
+ SelectivelyToggleDataDumping(kInitIndex, capture_frames_since_init);
+
+ samples_left_to_process = HandleProcessStreamCall();
+ ++capture_frames_since_init;
+ break;
+ case SimulationEventType::kProcessReverseStream:
+ if (settings_.reverse_input_filename) {
+ samples_left_to_process = HandleProcessReverseStreamCall();
+ }
+ break;
+ default:
+ RTC_CHECK_NOTREACHED();
+ }
+
+ call_chain_index = (call_chain_index + 1) % call_chain_.size();
+ }
+
+ DetachAecDump();
+}
+
+void WavBasedSimulator::Analyze() {
+ std::cout << "Inits:" << std::endl;
+ std::cout << "1: -->" << std::endl;
+ std::cout << " Time:" << std::endl;
+ std::cout << " Capture: 0 s (0 frames) " << std::endl;
+ std::cout << " Render: 0 s (0 frames)" << std::endl;
+}
+
+bool WavBasedSimulator::HandleProcessStreamCall() {
+ bool samples_left_to_process = buffer_reader_->Read(in_buf_.get());
+ if (samples_left_to_process) {
+ PrepareProcessStreamCall();
+ ProcessStream(settings_.fixed_interface);
+ }
+ return samples_left_to_process;
+}
+
+bool WavBasedSimulator::HandleProcessReverseStreamCall() {
+ bool samples_left_to_process =
+ reverse_buffer_reader_->Read(reverse_in_buf_.get());
+ if (samples_left_to_process) {
+ PrepareReverseProcessStreamCall();
+ ProcessReverseStream(settings_.fixed_interface);
+ }
+ return samples_left_to_process;
+}
+
+void WavBasedSimulator::Initialize() {
+ std::unique_ptr<WavReader> in_file(
+ new WavReader(settings_.input_filename->c_str()));
+ int input_sample_rate_hz = in_file->sample_rate();
+ int input_num_channels = in_file->num_channels();
+ buffer_reader_.reset(new ChannelBufferWavReader(std::move(in_file)));
+
+ int output_sample_rate_hz = settings_.output_sample_rate_hz
+ ? *settings_.output_sample_rate_hz
+ : input_sample_rate_hz;
+ int output_num_channels = settings_.output_num_channels
+ ? *settings_.output_num_channels
+ : input_num_channels;
+
+ int reverse_sample_rate_hz = 48000;
+ int reverse_num_channels = 1;
+ int reverse_output_sample_rate_hz = 48000;
+ int reverse_output_num_channels = 1;
+ if (settings_.reverse_input_filename) {
+ std::unique_ptr<WavReader> reverse_in_file(
+ new WavReader(settings_.reverse_input_filename->c_str()));
+ reverse_sample_rate_hz = reverse_in_file->sample_rate();
+ reverse_num_channels = reverse_in_file->num_channels();
+ reverse_buffer_reader_.reset(
+ new ChannelBufferWavReader(std::move(reverse_in_file)));
+
+ reverse_output_sample_rate_hz =
+ settings_.reverse_output_sample_rate_hz
+ ? *settings_.reverse_output_sample_rate_hz
+ : reverse_sample_rate_hz;
+ reverse_output_num_channels = settings_.reverse_output_num_channels
+ ? *settings_.reverse_output_num_channels
+ : reverse_num_channels;
+ }
+
+ SetupBuffersConfigsOutputs(
+ input_sample_rate_hz, output_sample_rate_hz, reverse_sample_rate_hz,
+ reverse_output_sample_rate_hz, input_num_channels, output_num_channels,
+ reverse_num_channels, reverse_output_num_channels);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.h b/third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.h
new file mode 100644
index 0000000000..44e9ee2b7f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/test/wav_based_simulator.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_WAV_BASED_SIMULATOR_H_
+#define MODULES_AUDIO_PROCESSING_TEST_WAV_BASED_SIMULATOR_H_
+
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_processing/test/audio_processing_simulator.h"
+
+namespace webrtc {
+namespace test {
+
+// Used to perform an audio processing simulation from wav files.
+class WavBasedSimulator final : public AudioProcessingSimulator {
+ public:
+ WavBasedSimulator(const SimulationSettings& settings,
+ rtc::scoped_refptr<AudioProcessing> audio_processing,
+ std::unique_ptr<AudioProcessingBuilder> ap_builder);
+
+ WavBasedSimulator() = delete;
+ WavBasedSimulator(const WavBasedSimulator&) = delete;
+ WavBasedSimulator& operator=(const WavBasedSimulator&) = delete;
+
+ ~WavBasedSimulator() override;
+
+ // Processes the WAV input.
+ void Process() override;
+
+ // Only analyzes the data for the simulation, instead of perform any
+ // processing.
+ void Analyze() override;
+
+ private:
+ enum SimulationEventType {
+ kProcessStream,
+ kProcessReverseStream,
+ };
+
+ void Initialize();
+ bool HandleProcessStreamCall();
+ bool HandleProcessReverseStreamCall();
+ void PrepareProcessStreamCall();
+ void PrepareReverseProcessStreamCall();
+ static std::vector<SimulationEventType> GetDefaultEventChain();
+ static std::vector<SimulationEventType> GetCustomEventChain(
+ absl::string_view filename);
+
+ std::vector<SimulationEventType> call_chain_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_WAV_BASED_SIMULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.cc b/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.cc
new file mode 100644
index 0000000000..bd1c50477a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.cc
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to
+// the proposed in "Multirate Signal Processing for Communication Systems" by
+// Fredric J Harris.
+//
+// The idea is to take a heterodyne system and change the order of the
+// components to get something which is efficient to implement digitally.
+//
+// It is possible to separate the filter using the noble identity as follows:
+//
+// H(z) = H0(z^3) + z^-1 * H1(z^3) + z^-2 * H2(z^3)
+//
+// This is used in the analysis stage to first downsample serial to parallel
+// and then filter each branch with one of these polyphase decompositions of the
+// lowpass prototype. Because each filter is only a modulation of the prototype,
+// it is enough to multiply each coefficient by the respective cosine value to
+// shift it to the desired band. But because the cosine period is 12 samples,
+// it requires separating the prototype even further using the noble identity.
+// After filtering and modulating for each band, the output of all filters is
+// accumulated to get the downsampled bands.
+//
+// A similar logic can be applied to the synthesis stage.
+
+#include "modules/audio_processing/three_band_filter_bank.h"
+
+#include <array>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Factors to take into account when choosing `kFilterSize`:
+// 1. Higher `kFilterSize`, means faster transition, which ensures less
+// aliasing. This is especially important when there is non-linear
+// processing between the splitting and merging.
+// 2. The delay that this filter bank introduces is
+// `kNumBands` * `kSparsity` * `kFilterSize` / 2, so it increases linearly
+// with `kFilterSize`.
+// 3. The computation complexity also increases linearly with `kFilterSize`.
+
+// The Matlab code to generate these `kFilterCoeffs` is:
+//
+// N = kNumBands * kSparsity * kFilterSize - 1;
+// h = fir1(N, 1 / (2 * kNumBands), kaiser(N + 1, 3.5));
+// reshape(h, kNumBands * kSparsity, kFilterSize);
+//
+// The code below uses the values of kFilterSize, kNumBands and kSparsity
+// specified in the header.
+
+// Because the total bandwidth of the lower and higher band is double the middle
+// one (because of the spectrum parity), the low-pass prototype is half the
+// bandwidth of 1 / (2 * `kNumBands`) and is then shifted with cosine modulation
+// to the right places.
+// A Kaiser window is used because of its flexibility and the alpha is set to
+// 3.5, since that sets a stop band attenuation of 40dB ensuring a fast
+// transition.
+
+constexpr int kSubSampling = ThreeBandFilterBank::kNumBands;
+constexpr int kDctSize = ThreeBandFilterBank::kNumBands;
+static_assert(ThreeBandFilterBank::kNumBands *
+ ThreeBandFilterBank::kSplitBandSize ==
+ ThreeBandFilterBank::kFullBandSize,
+ "The full band must be split in equally sized subbands");
+
+const float
+ kFilterCoeffs[ThreeBandFilterBank::kNumNonZeroFilters][kFilterSize] = {
+ {-0.00047749f, -0.00496888f, +0.16547118f, +0.00425496f},
+ {-0.00173287f, -0.01585778f, +0.14989004f, +0.00994113f},
+ {-0.00304815f, -0.02536082f, +0.12154542f, +0.01157993f},
+ {-0.00346946f, -0.02587886f, +0.04760441f, +0.00607594f},
+ {-0.00154717f, -0.01136076f, +0.01387458f, +0.00186353f},
+ {+0.00186353f, +0.01387458f, -0.01136076f, -0.00154717f},
+ {+0.00607594f, +0.04760441f, -0.02587886f, -0.00346946f},
+ {+0.00983212f, +0.08543175f, -0.02982767f, -0.00383509f},
+ {+0.00994113f, +0.14989004f, -0.01585778f, -0.00173287f},
+ {+0.00425496f, +0.16547118f, -0.00496888f, -0.00047749f}};
+
+constexpr int kZeroFilterIndex1 = 3;
+constexpr int kZeroFilterIndex2 = 9;
+
+const float kDctModulation[ThreeBandFilterBank::kNumNonZeroFilters][kDctSize] =
+ {{2.f, 2.f, 2.f},
+ {1.73205077f, 0.f, -1.73205077f},
+ {1.f, -2.f, 1.f},
+ {-1.f, 2.f, -1.f},
+ {-1.73205077f, 0.f, 1.73205077f},
+ {-2.f, -2.f, -2.f},
+ {-1.73205077f, 0.f, 1.73205077f},
+ {-1.f, 2.f, -1.f},
+ {1.f, -2.f, 1.f},
+ {1.73205077f, 0.f, -1.73205077f}};
+
+// Filters the input signal `in` with the filter `filter` using a shift by
+// `in_shift`, taking into account the previous state.
+void FilterCore(
+ rtc::ArrayView<const float, kFilterSize> filter,
+ rtc::ArrayView<const float, ThreeBandFilterBank::kSplitBandSize> in,
+ const int in_shift,
+ rtc::ArrayView<float, ThreeBandFilterBank::kSplitBandSize> out,
+ rtc::ArrayView<float, kMemorySize> state) {
+ constexpr int kMaxInShift = (kStride - 1);
+ RTC_DCHECK_GE(in_shift, 0);
+ RTC_DCHECK_LE(in_shift, kMaxInShift);
+ std::fill(out.begin(), out.end(), 0.f);
+
+ for (int k = 0; k < in_shift; ++k) {
+ for (int i = 0, j = kMemorySize + k - in_shift; i < kFilterSize;
+ ++i, j -= kStride) {
+ out[k] += state[j] * filter[i];
+ }
+ }
+
+ for (int k = in_shift, shift = 0; k < kFilterSize * kStride; ++k, ++shift) {
+ RTC_DCHECK_GE(shift, 0);
+ const int loop_limit = std::min(kFilterSize, 1 + (shift >> kStrideLog2));
+ for (int i = 0, j = shift; i < loop_limit; ++i, j -= kStride) {
+ out[k] += in[j] * filter[i];
+ }
+ for (int i = loop_limit, j = kMemorySize + shift - loop_limit * kStride;
+ i < kFilterSize; ++i, j -= kStride) {
+ out[k] += state[j] * filter[i];
+ }
+ }
+
+ for (int k = kFilterSize * kStride, shift = kFilterSize * kStride - in_shift;
+ k < ThreeBandFilterBank::kSplitBandSize; ++k, ++shift) {
+ for (int i = 0, j = shift; i < kFilterSize; ++i, j -= kStride) {
+ out[k] += in[j] * filter[i];
+ }
+ }
+
+ // Update current state.
+ std::copy(in.begin() + ThreeBandFilterBank::kSplitBandSize - kMemorySize,
+ in.end(), state.begin());
+}
+
+} // namespace
+
+// Because the low-pass filter prototype has half bandwidth it is possible to
+// use a DCT to shift it in both directions at the same time, to the center
+// frequencies [1 / 12, 3 / 12, 5 / 12].
+ThreeBandFilterBank::ThreeBandFilterBank() {
+ RTC_DCHECK_EQ(state_analysis_.size(), kNumNonZeroFilters);
+ RTC_DCHECK_EQ(state_synthesis_.size(), kNumNonZeroFilters);
+ for (int k = 0; k < kNumNonZeroFilters; ++k) {
+ RTC_DCHECK_EQ(state_analysis_[k].size(), kMemorySize);
+ RTC_DCHECK_EQ(state_synthesis_[k].size(), kMemorySize);
+
+ state_analysis_[k].fill(0.f);
+ state_synthesis_[k].fill(0.f);
+ }
+}
+
+ThreeBandFilterBank::~ThreeBandFilterBank() = default;
+
+// The analysis can be separated in these steps:
+// 1. Serial to parallel downsampling by a factor of `kNumBands`.
+// 2. Filtering of `kSparsity` different delayed signals with polyphase
+// decomposition of the low-pass prototype filter and upsampled by a factor
+// of `kSparsity`.
+// 3. Modulating with cosines and accumulating to get the desired band.
+void ThreeBandFilterBank::Analysis(
+ rtc::ArrayView<const float, kFullBandSize> in,
+ rtc::ArrayView<const rtc::ArrayView<float>, ThreeBandFilterBank::kNumBands>
+ out) {
+ // Initialize the output to zero.
+ for (int band = 0; band < ThreeBandFilterBank::kNumBands; ++band) {
+ RTC_DCHECK_EQ(out[band].size(), kSplitBandSize);
+ std::fill(out[band].begin(), out[band].end(), 0);
+ }
+
+ for (int downsampling_index = 0; downsampling_index < kSubSampling;
+ ++downsampling_index) {
+ // Downsample to form the filter input.
+ std::array<float, kSplitBandSize> in_subsampled;
+ for (int k = 0; k < kSplitBandSize; ++k) {
+ in_subsampled[k] =
+ in[(kSubSampling - 1) - downsampling_index + kSubSampling * k];
+ }
+
+ for (int in_shift = 0; in_shift < kStride; ++in_shift) {
+ // Choose filter, skip zero filters.
+ const int index = downsampling_index + in_shift * kSubSampling;
+ if (index == kZeroFilterIndex1 || index == kZeroFilterIndex2) {
+ continue;
+ }
+ const int filter_index =
+ index < kZeroFilterIndex1
+ ? index
+ : (index < kZeroFilterIndex2 ? index - 1 : index - 2);
+
+ rtc::ArrayView<const float, kFilterSize> filter(
+ kFilterCoeffs[filter_index]);
+ rtc::ArrayView<const float, kDctSize> dct_modulation(
+ kDctModulation[filter_index]);
+ rtc::ArrayView<float, kMemorySize> state(state_analysis_[filter_index]);
+
+ // Filter.
+ std::array<float, kSplitBandSize> out_subsampled;
+ FilterCore(filter, in_subsampled, in_shift, out_subsampled, state);
+
+ // Band and modulate the output.
+ for (int band = 0; band < ThreeBandFilterBank::kNumBands; ++band) {
+ float* out_band = out[band].data();
+ for (int n = 0; n < kSplitBandSize; ++n) {
+ out_band[n] += dct_modulation[band] * out_subsampled[n];
+ }
+ }
+ }
+ }
+}
+
+// The synthesis can be separated in these steps:
+// 1. Modulating with cosines.
+// 2. Filtering each one with a polyphase decomposition of the low-pass
+// prototype filter upsampled by a factor of `kSparsity` and accumulating
+// `kSparsity` signals with different delays.
+// 3. Parallel to serial upsampling by a factor of `kNumBands`.
+void ThreeBandFilterBank::Synthesis(
+ rtc::ArrayView<const rtc::ArrayView<float>, ThreeBandFilterBank::kNumBands>
+ in,
+ rtc::ArrayView<float, kFullBandSize> out) {
+ std::fill(out.begin(), out.end(), 0);
+ for (int upsampling_index = 0; upsampling_index < kSubSampling;
+ ++upsampling_index) {
+ for (int in_shift = 0; in_shift < kStride; ++in_shift) {
+ // Choose filter, skip zero filters.
+ const int index = upsampling_index + in_shift * kSubSampling;
+ if (index == kZeroFilterIndex1 || index == kZeroFilterIndex2) {
+ continue;
+ }
+ const int filter_index =
+ index < kZeroFilterIndex1
+ ? index
+ : (index < kZeroFilterIndex2 ? index - 1 : index - 2);
+
+ rtc::ArrayView<const float, kFilterSize> filter(
+ kFilterCoeffs[filter_index]);
+ rtc::ArrayView<const float, kDctSize> dct_modulation(
+ kDctModulation[filter_index]);
+ rtc::ArrayView<float, kMemorySize> state(state_synthesis_[filter_index]);
+
+ // Prepare filter input by modulating the banded input.
+ std::array<float, kSplitBandSize> in_subsampled;
+ std::fill(in_subsampled.begin(), in_subsampled.end(), 0.f);
+ for (int band = 0; band < ThreeBandFilterBank::kNumBands; ++band) {
+ RTC_DCHECK_EQ(in[band].size(), kSplitBandSize);
+ const float* in_band = in[band].data();
+ for (int n = 0; n < kSplitBandSize; ++n) {
+ in_subsampled[n] += dct_modulation[band] * in_band[n];
+ }
+ }
+
+ // Filter.
+ std::array<float, kSplitBandSize> out_subsampled;
+ FilterCore(filter, in_subsampled, in_shift, out_subsampled, state);
+
+ // Upsample.
+ constexpr float kUpsamplingScaling = kSubSampling;
+ for (int k = 0; k < kSplitBandSize; ++k) {
+ out[upsampling_index + kSubSampling * k] +=
+ kUpsamplingScaling * out_subsampled[k];
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.h b/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.h
new file mode 100644
index 0000000000..db66caba4a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/three_band_filter_bank.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_
+#define MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_
+
+#include <array>
+#include <cstring>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+constexpr int kSparsity = 4;
+constexpr int kStrideLog2 = 2;
+constexpr int kStride = 1 << kStrideLog2;
+constexpr int kNumZeroFilters = 2;
+constexpr int kFilterSize = 4;
+constexpr int kMemorySize = kFilterSize * kStride - 1;
+static_assert(kMemorySize == 15,
+ "The memory size must be sufficient to provide memory for the "
+ "shifted filters");
+
+// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to
+// the proposed in "Multirate Signal Processing for Communication Systems" by
+// Fredric J Harris.
+// The low-pass filter prototype has these characteristics:
+// * Pass-band ripple = 0.3dB
+// * Pass-band frequency = 0.147 (7kHz at 48kHz)
+// * Stop-band attenuation = 40dB
+// * Stop-band frequency = 0.192 (9.2kHz at 48kHz)
+// * Delay = 24 samples (500us at 48kHz)
+// * Linear phase
+// This filter bank does not satisfy perfect reconstruction. The SNR after
+// analysis and synthesis (with no processing in between) is approximately 9.5dB
+// depending on the input signal after compensating for the delay.
+class ThreeBandFilterBank final {
+ public:
+ static const int kNumBands = 3;
+ static const int kFullBandSize = 480;
+ static const int kSplitBandSize =
+ ThreeBandFilterBank::kFullBandSize / ThreeBandFilterBank::kNumBands;
+ static const int kNumNonZeroFilters =
+ kSparsity * ThreeBandFilterBank::kNumBands - kNumZeroFilters;
+
+ ThreeBandFilterBank();
+ ~ThreeBandFilterBank();
+
+ // Splits `in` of size kFullBandSize into 3 downsampled frequency bands in
+ // `out`, each of size 160.
+ void Analysis(rtc::ArrayView<const float, kFullBandSize> in,
+ rtc::ArrayView<const rtc::ArrayView<float>, kNumBands> out);
+
+ // Merges the 3 downsampled frequency bands in `in`, each of size 160, into
+ // `out`, which is of size kFullBandSize.
+ void Synthesis(rtc::ArrayView<const rtc::ArrayView<float>, kNumBands> in,
+ rtc::ArrayView<float, kFullBandSize> out);
+
+ private:
+ std::array<std::array<float, kMemorySize>, kNumNonZeroFilters>
+ state_analysis_;
+ std::array<std::array<float, kMemorySize>, kNumNonZeroFilters>
+ state_synthesis_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/transient/BUILD.gn
new file mode 100644
index 0000000000..41aeab0abe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/BUILD.gn
@@ -0,0 +1,133 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_source_set("transient_suppressor_api") {
+ sources = [ "transient_suppressor.h" ]
+}
+
+rtc_library("transient_suppressor_impl") {
+ visibility = [
+ ":click_annotate",
+ ":transient_suppression_test",
+ ":transient_suppression_unittests",
+ "..:optionally_built_submodule_creators",
+ ]
+ sources = [
+ "common.h",
+ "daubechies_8_wavelet_coeffs.h",
+ "dyadic_decimator.h",
+ "moving_moments.cc",
+ "moving_moments.h",
+ "transient_detector.cc",
+ "transient_detector.h",
+ "transient_suppressor_impl.cc",
+ "transient_suppressor_impl.h",
+ "windows_private.h",
+ "wpd_node.cc",
+ "wpd_node.h",
+ "wpd_tree.cc",
+ "wpd_tree.h",
+ ]
+ deps = [
+ ":transient_suppressor_api",
+ ":voice_probability_delay_unit",
+ "../../../common_audio:common_audio",
+ "../../../common_audio:common_audio_c",
+ "../../../common_audio:fir_filter",
+ "../../../common_audio:fir_filter_factory",
+ "../../../common_audio/third_party/ooura:fft_size_256",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:gtest_prod",
+ "../../../rtc_base:logging",
+ ]
+}
+
+rtc_library("voice_probability_delay_unit") {
+ sources = [
+ "voice_probability_delay_unit.cc",
+ "voice_probability_delay_unit.h",
+ ]
+ deps = [ "../../../rtc_base:checks" ]
+}
+
+if (rtc_include_tests) {
+ if (!build_with_chromium) {
+ rtc_executable("click_annotate") {
+ testonly = true
+ sources = [
+ "click_annotate.cc",
+ "file_utils.cc",
+ "file_utils.h",
+ ]
+ deps = [
+ ":transient_suppressor_impl",
+ "..:audio_processing",
+ "../../../rtc_base/system:file_wrapper",
+ "../../../system_wrappers",
+ ]
+ }
+
+ rtc_executable("transient_suppression_test") {
+ testonly = true
+ sources = [
+ "file_utils.cc",
+ "file_utils.h",
+ "transient_suppression_test.cc",
+ "voice_probability_delay_unit_unittest.cc",
+ ]
+ deps = [
+ ":transient_suppressor_api",
+ ":transient_suppressor_impl",
+ ":voice_probability_delay_unit",
+ "..:audio_processing",
+ "../../../common_audio",
+ "../../../rtc_base/system:file_wrapper",
+ "../../../system_wrappers",
+ "../../../test:fileutils",
+ "../../../test:test_support",
+ "../agc:level_estimation",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+ }
+
+ rtc_library("transient_suppression_unittests") {
+ testonly = true
+ sources = [
+ "dyadic_decimator_unittest.cc",
+ "file_utils.cc",
+ "file_utils.h",
+ "file_utils_unittest.cc",
+ "moving_moments_unittest.cc",
+ "transient_detector_unittest.cc",
+ "transient_suppressor_unittest.cc",
+ "voice_probability_delay_unit_unittest.cc",
+ "wpd_node_unittest.cc",
+ "wpd_tree_unittest.cc",
+ ]
+ deps = [
+ ":transient_suppressor_api",
+ ":transient_suppressor_impl",
+ ":voice_probability_delay_unit",
+ "../../../rtc_base:stringutils",
+ "../../../rtc_base/system:file_wrapper",
+ "../../../test:fileutils",
+ "../../../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/click_annotate.cc b/third_party/libwebrtc/modules/audio_processing/transient/click_annotate.cc
new file mode 100644
index 0000000000..f3f040f9aa
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/click_annotate.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cfloat>
+#include <cstdio>
+#include <cstdlib>
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/transient/file_utils.h"
+#include "modules/audio_processing/transient/transient_detector.h"
+#include "rtc_base/system/file_wrapper.h"
+
+using webrtc::FileWrapper;
+using webrtc::TransientDetector;
+
+// Application to generate a RTP timing file.
+// Opens the PCM file and divides the signal in frames.
+// Creates a send times array, one for each step.
+// Each block that contains a transient, has an infinite send time.
+// The resultant array is written to a DAT file
+// Returns -1 on error or `lost_packets` otherwise.
+int main(int argc, char* argv[]) {
+ if (argc != 5) {
+ printf("\n%s - Application to generate a RTP timing file.\n\n", argv[0]);
+ printf("%s PCMfile DATfile chunkSize sampleRate\n\n", argv[0]);
+ printf("Opens the PCMfile with sampleRate in Hertz.\n");
+ printf("Creates a send times array, one for each chunkSize ");
+ printf("milliseconds step.\n");
+ printf("Each block that contains a transient, has an infinite send time. ");
+ printf("The resultant array is written to a DATfile.\n\n");
+ return 0;
+ }
+
+ FileWrapper pcm_file = FileWrapper::OpenReadOnly(argv[1]);
+ if (!pcm_file.is_open()) {
+ printf("\nThe %s could not be opened.\n\n", argv[1]);
+ return -1;
+ }
+
+ FileWrapper dat_file = FileWrapper::OpenWriteOnly(argv[2]);
+ if (!dat_file.is_open()) {
+ printf("\nThe %s could not be opened.\n\n", argv[2]);
+ return -1;
+ }
+
+ int chunk_size_ms = atoi(argv[3]);
+ if (chunk_size_ms <= 0) {
+ printf("\nThe chunkSize must be a positive integer\n\n");
+ return -1;
+ }
+
+ int sample_rate_hz = atoi(argv[4]);
+ if (sample_rate_hz <= 0) {
+ printf("\nThe sampleRate must be a positive integer\n\n");
+ return -1;
+ }
+
+ TransientDetector detector(sample_rate_hz);
+ int lost_packets = 0;
+ size_t audio_buffer_length = chunk_size_ms * sample_rate_hz / 1000;
+ std::unique_ptr<float[]> audio_buffer(new float[audio_buffer_length]);
+ std::vector<float> send_times;
+
+ // Read first buffer from the PCM test file.
+ size_t file_samples_read = ReadInt16FromFileToFloatBuffer(
+ &pcm_file, audio_buffer_length, audio_buffer.get());
+ for (int time = 0; file_samples_read > 0; time += chunk_size_ms) {
+ // Pad the rest of the buffer with zeros.
+ for (size_t i = file_samples_read; i < audio_buffer_length; ++i) {
+ audio_buffer[i] = 0.0;
+ }
+ float value =
+ detector.Detect(audio_buffer.get(), audio_buffer_length, NULL, 0);
+ if (value < 0.5f) {
+ value = time;
+ } else {
+ value = FLT_MAX;
+ ++lost_packets;
+ }
+ send_times.push_back(value);
+
+ // Read next buffer from the PCM test file.
+ file_samples_read = ReadInt16FromFileToFloatBuffer(
+ &pcm_file, audio_buffer_length, audio_buffer.get());
+ }
+
+ size_t floats_written =
+ WriteFloatBufferToFile(&dat_file, send_times.size(), &send_times[0]);
+
+ if (floats_written == 0) {
+ printf("\nThe send times could not be written to DAT file\n\n");
+ return -1;
+ }
+
+ pcm_file.Close();
+ dat_file.Close();
+
+ return lost_packets;
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/common.h b/third_party/libwebrtc/modules/audio_processing/transient/common.h
new file mode 100644
index 0000000000..63c9a7b315
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/common.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_COMMON_H_
+namespace webrtc {
+namespace ts {
+
+static const float kPi = 3.14159265358979323846f;
+static const int kChunkSizeMs = 10;
+enum {
+ kSampleRate8kHz = 8000,
+ kSampleRate16kHz = 16000,
+ kSampleRate32kHz = 32000,
+ kSampleRate48kHz = 48000
+};
+
+} // namespace ts
+} // namespace webrtc
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h b/third_party/libwebrtc/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h
new file mode 100644
index 0000000000..92233bfd74
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This header file defines the coefficients of the FIR based approximation of
+// the Meyer Wavelet
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_DAUBECHIES_8_WAVELET_COEFFS_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_DAUBECHIES_8_WAVELET_COEFFS_H_
+
+// Decomposition coefficients Daubechies 8.
+
+namespace webrtc {
+
+const int kDaubechies8CoefficientsLength = 16;
+
+const float kDaubechies8HighPassCoefficients[kDaubechies8CoefficientsLength] = {
+ -5.44158422430816093862e-02f, 3.12871590914465924627e-01f,
+ -6.75630736298012846142e-01f, 5.85354683654869090148e-01f,
+ 1.58291052560238926228e-02f, -2.84015542962428091389e-01f,
+ -4.72484573997972536787e-04f, 1.28747426620186011803e-01f,
+ 1.73693010020221083600e-02f, -4.40882539310647192377e-02f,
+ -1.39810279170155156436e-02f, 8.74609404701565465445e-03f,
+ 4.87035299301066034600e-03f, -3.91740372995977108837e-04f,
+ -6.75449405998556772109e-04f, -1.17476784002281916305e-04f};
+
+const float kDaubechies8LowPassCoefficients[kDaubechies8CoefficientsLength] = {
+ -1.17476784002281916305e-04f, 6.75449405998556772109e-04f,
+ -3.91740372995977108837e-04f, -4.87035299301066034600e-03f,
+ 8.74609404701565465445e-03f, 1.39810279170155156436e-02f,
+ -4.40882539310647192377e-02f, -1.73693010020221083600e-02f,
+ 1.28747426620186011803e-01f, 4.72484573997972536787e-04f,
+ -2.84015542962428091389e-01f, -1.58291052560238926228e-02f,
+ 5.85354683654869090148e-01f, 6.75630736298012846142e-01f,
+ 3.12871590914465924627e-01f, 5.44158422430816093862e-02f};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_DAUBECHIES_8_WAVELET_COEFFS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator.h b/third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator.h
new file mode 100644
index 0000000000..52467e8c25
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_DYADIC_DECIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_DYADIC_DECIMATOR_H_
+
+#include <cstdlib>
+
+// Provides a set of static methods to perform dyadic decimations.
+
+namespace webrtc {
+
+// Returns the proper length of the output buffer that you should use for the
+// given `in_length` and decimation `odd_sequence`.
+// Return -1 on error.
+inline size_t GetOutLengthToDyadicDecimate(size_t in_length,
+ bool odd_sequence) {
+ size_t out_length = in_length / 2;
+
+ if (in_length % 2 == 1 && !odd_sequence) {
+ ++out_length;
+ }
+
+ return out_length;
+}
+
+// Performs a dyadic decimation: removes every odd/even member of a sequence
+// halving its overall length.
+// Arguments:
+// in: array of `in_length`.
+// odd_sequence: If false, the odd members will be removed (1, 3, 5, ...);
+// if true, the even members will be removed (0, 2, 4, ...).
+// out: array of `out_length`. `out_length` must be large enough to
+// hold the decimated output. The necessary length can be provided by
+// GetOutLengthToDyadicDecimate().
+// Must be previously allocated.
+// Returns the number of output samples, -1 on error.
+template <typename T>
+static size_t DyadicDecimate(const T* in,
+ size_t in_length,
+ bool odd_sequence,
+ T* out,
+ size_t out_length) {
+ size_t half_length = GetOutLengthToDyadicDecimate(in_length, odd_sequence);
+
+ if (!in || !out || in_length <= 0 || out_length < half_length) {
+ return 0;
+ }
+
+ size_t output_samples = 0;
+ size_t index_adjustment = odd_sequence ? 1 : 0;
+ for (output_samples = 0; output_samples < half_length; ++output_samples) {
+ out[output_samples] = in[output_samples * 2 + index_adjustment];
+ }
+
+ return output_samples;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_DYADIC_DECIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator_unittest.cc
new file mode 100644
index 0000000000..e4776d694f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/dyadic_decimator_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/dyadic_decimator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const size_t kEvenBufferLength = 6;
+static const size_t kOddBufferLength = 5;
+static const size_t kOutBufferLength = 3;
+
+int16_t const test_buffer_even_len[] = {0, 1, 2, 3, 4, 5};
+int16_t const test_buffer_odd_len[] = {0, 1, 2, 3, 4};
+int16_t test_buffer_out[kOutBufferLength];
+
+TEST(DyadicDecimatorTest, GetOutLengthToDyadicDecimate) {
+ EXPECT_EQ(3u, GetOutLengthToDyadicDecimate(6, false));
+ EXPECT_EQ(3u, GetOutLengthToDyadicDecimate(6, true));
+ EXPECT_EQ(3u, GetOutLengthToDyadicDecimate(5, false));
+ EXPECT_EQ(2u, GetOutLengthToDyadicDecimate(5, true));
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateErrorValues) {
+ size_t out_samples = 0;
+
+ out_samples = DyadicDecimate(static_cast<int16_t*>(NULL), kEvenBufferLength,
+ false, // Even sequence.
+ test_buffer_out, kOutBufferLength);
+ EXPECT_EQ(0u, out_samples);
+
+ out_samples = DyadicDecimate(test_buffer_even_len, kEvenBufferLength,
+ false, // Even sequence.
+ static_cast<int16_t*>(NULL), kOutBufferLength);
+ EXPECT_EQ(0u, out_samples);
+
+ // Less than required `out_length`.
+ out_samples = DyadicDecimate(test_buffer_even_len, kEvenBufferLength,
+ false, // Even sequence.
+ test_buffer_out, 2);
+ EXPECT_EQ(0u, out_samples);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateEvenLengthEvenSequence) {
+ size_t expected_out_samples =
+ GetOutLengthToDyadicDecimate(kEvenBufferLength, false);
+
+ size_t out_samples = DyadicDecimate(test_buffer_even_len, kEvenBufferLength,
+ false, // Even sequence.
+ test_buffer_out, kOutBufferLength);
+
+ EXPECT_EQ(expected_out_samples, out_samples);
+
+ EXPECT_EQ(0, test_buffer_out[0]);
+ EXPECT_EQ(2, test_buffer_out[1]);
+ EXPECT_EQ(4, test_buffer_out[2]);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateEvenLengthOddSequence) {
+ size_t expected_out_samples =
+ GetOutLengthToDyadicDecimate(kEvenBufferLength, true);
+
+ size_t out_samples = DyadicDecimate(test_buffer_even_len, kEvenBufferLength,
+ true, // Odd sequence.
+ test_buffer_out, kOutBufferLength);
+
+ EXPECT_EQ(expected_out_samples, out_samples);
+
+ EXPECT_EQ(1, test_buffer_out[0]);
+ EXPECT_EQ(3, test_buffer_out[1]);
+ EXPECT_EQ(5, test_buffer_out[2]);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateOddLengthEvenSequence) {
+ size_t expected_out_samples =
+ GetOutLengthToDyadicDecimate(kOddBufferLength, false);
+
+ size_t out_samples = DyadicDecimate(test_buffer_odd_len, kOddBufferLength,
+ false, // Even sequence.
+ test_buffer_out, kOutBufferLength);
+
+ EXPECT_EQ(expected_out_samples, out_samples);
+
+ EXPECT_EQ(0, test_buffer_out[0]);
+ EXPECT_EQ(2, test_buffer_out[1]);
+ EXPECT_EQ(4, test_buffer_out[2]);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateOddLengthOddSequence) {
+ size_t expected_out_samples =
+ GetOutLengthToDyadicDecimate(kOddBufferLength, true);
+
+ size_t out_samples = DyadicDecimate(test_buffer_odd_len, kOddBufferLength,
+ true, // Odd sequence.
+ test_buffer_out, kOutBufferLength);
+
+ EXPECT_EQ(expected_out_samples, out_samples);
+
+ EXPECT_EQ(1, test_buffer_out[0]);
+ EXPECT_EQ(3, test_buffer_out[1]);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/file_utils.cc b/third_party/libwebrtc/modules/audio_processing/transient/file_utils.cc
new file mode 100644
index 0000000000..58f99325d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/file_utils.cc
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/file_utils.h"
+
+#include <memory>
+
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+
+int ConvertByteArrayToFloat(const uint8_t bytes[4], float* out) {
+ if (!bytes || !out) {
+ return -1;
+ }
+
+ uint32_t binary_value = 0;
+ for (int i = 3; i >= 0; --i) {
+ binary_value <<= 8;
+ binary_value += bytes[i];
+ }
+
+ *out = bit_cast<float>(binary_value);
+
+ return 0;
+}
+
+int ConvertByteArrayToDouble(const uint8_t bytes[8], double* out) {
+ if (!bytes || !out) {
+ return -1;
+ }
+
+ uint64_t binary_value = 0;
+ for (int i = 7; i >= 0; --i) {
+ binary_value <<= 8;
+ binary_value += bytes[i];
+ }
+
+ *out = bit_cast<double>(binary_value);
+
+ return 0;
+}
+
+int ConvertFloatToByteArray(float value, uint8_t out_bytes[4]) {
+ if (!out_bytes) {
+ return -1;
+ }
+
+ uint32_t binary_value = bit_cast<uint32_t>(value);
+ for (size_t i = 0; i < 4; ++i) {
+ out_bytes[i] = binary_value;
+ binary_value >>= 8;
+ }
+
+ return 0;
+}
+
+int ConvertDoubleToByteArray(double value, uint8_t out_bytes[8]) {
+ if (!out_bytes) {
+ return -1;
+ }
+
+ uint64_t binary_value = bit_cast<uint64_t>(value);
+ for (size_t i = 0; i < 8; ++i) {
+ out_bytes[i] = binary_value;
+ binary_value >>= 8;
+ }
+
+ return 0;
+}
+
+size_t ReadInt16BufferFromFile(FileWrapper* file,
+ size_t length,
+ int16_t* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<uint8_t[]> byte_array(new uint8_t[2]);
+
+ size_t int16s_read = 0;
+
+ while (int16s_read < length) {
+ size_t bytes_read = file->Read(byte_array.get(), 2);
+ if (bytes_read < 2) {
+ break;
+ }
+ int16_t value = byte_array[1];
+ value <<= 8;
+ value += byte_array[0];
+ buffer[int16s_read] = value;
+ ++int16s_read;
+ }
+
+ return int16s_read;
+}
+
+size_t ReadInt16FromFileToFloatBuffer(FileWrapper* file,
+ size_t length,
+ float* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<int16_t[]> buffer16(new int16_t[length]);
+
+ size_t int16s_read = ReadInt16BufferFromFile(file, length, buffer16.get());
+
+ for (size_t i = 0; i < int16s_read; ++i) {
+ buffer[i] = buffer16[i];
+ }
+
+ return int16s_read;
+}
+
+size_t ReadInt16FromFileToDoubleBuffer(FileWrapper* file,
+ size_t length,
+ double* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<int16_t[]> buffer16(new int16_t[length]);
+
+ size_t int16s_read = ReadInt16BufferFromFile(file, length, buffer16.get());
+
+ for (size_t i = 0; i < int16s_read; ++i) {
+ buffer[i] = buffer16[i];
+ }
+
+ return int16s_read;
+}
+
+size_t ReadFloatBufferFromFile(FileWrapper* file,
+ size_t length,
+ float* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<uint8_t[]> byte_array(new uint8_t[4]);
+
+ size_t floats_read = 0;
+
+ while (floats_read < length) {
+ size_t bytes_read = file->Read(byte_array.get(), 4);
+ if (bytes_read < 4) {
+ break;
+ }
+ ConvertByteArrayToFloat(byte_array.get(), &buffer[floats_read]);
+ ++floats_read;
+ }
+
+ return floats_read;
+}
+
+size_t ReadDoubleBufferFromFile(FileWrapper* file,
+ size_t length,
+ double* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<uint8_t[]> byte_array(new uint8_t[8]);
+
+ size_t doubles_read = 0;
+
+ while (doubles_read < length) {
+ size_t bytes_read = file->Read(byte_array.get(), 8);
+ if (bytes_read < 8) {
+ break;
+ }
+ ConvertByteArrayToDouble(byte_array.get(), &buffer[doubles_read]);
+ ++doubles_read;
+ }
+
+ return doubles_read;
+}
+
+size_t WriteInt16BufferToFile(FileWrapper* file,
+ size_t length,
+ const int16_t* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<uint8_t[]> byte_array(new uint8_t[2]);
+
+ size_t int16s_written = 0;
+
+ for (int16s_written = 0; int16s_written < length; ++int16s_written) {
+ // Get byte representation.
+ byte_array[0] = buffer[int16s_written] & 0xFF;
+ byte_array[1] = (buffer[int16s_written] >> 8) & 0xFF;
+
+ file->Write(byte_array.get(), 2);
+ }
+
+ file->Flush();
+
+ return int16s_written;
+}
+
+size_t WriteFloatBufferToFile(FileWrapper* file,
+ size_t length,
+ const float* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<uint8_t[]> byte_array(new uint8_t[4]);
+
+ size_t floats_written = 0;
+
+ for (floats_written = 0; floats_written < length; ++floats_written) {
+ // Get byte representation.
+ ConvertFloatToByteArray(buffer[floats_written], byte_array.get());
+
+ file->Write(byte_array.get(), 4);
+ }
+
+ file->Flush();
+
+ return floats_written;
+}
+
+size_t WriteDoubleBufferToFile(FileWrapper* file,
+ size_t length,
+ const double* buffer) {
+ if (!file || !file->is_open() || !buffer || length <= 0) {
+ return 0;
+ }
+
+ std::unique_ptr<uint8_t[]> byte_array(new uint8_t[8]);
+
+ size_t doubles_written = 0;
+
+ for (doubles_written = 0; doubles_written < length; ++doubles_written) {
+ // Get byte representation.
+ ConvertDoubleToByteArray(buffer[doubles_written], byte_array.get());
+
+ file->Write(byte_array.get(), 8);
+ }
+
+ file->Flush();
+
+ return doubles_written;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/file_utils.h b/third_party/libwebrtc/modules/audio_processing/transient/file_utils.h
new file mode 100644
index 0000000000..b748337773
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/file_utils.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_FILE_UTILS_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_FILE_UTILS_H_
+
+#include <string.h>
+
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+
+// This is a copy of the cast included in the Chromium codebase here:
+// http://cs.chromium.org/src/third_party/cld/base/casts.h
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ // A compile error here means your Dest and Source have different sizes.
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "Dest and Source have different sizes");
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+// Converts the byte array with binary float representation to float.
+// Bytes must be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertByteArrayToFloat(const uint8_t bytes[4], float* out);
+
+// Converts the byte array with binary double representation to double.
+// Bytes must be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertByteArrayToDouble(const uint8_t bytes[8], double* out);
+
+// Converts a float to a byte array with binary float representation.
+// Bytes will be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertFloatToByteArray(float value, uint8_t out_bytes[4]);
+
+// Converts a double to a byte array with binary double representation.
+// Bytes will be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertDoubleToByteArray(double value, uint8_t out_bytes[8]);
+
+// Reads `length` 16-bit integers from `file` to `buffer`.
+// `file` must be previously opened.
+// Returns the number of 16-bit integers read or -1 on error.
+size_t ReadInt16BufferFromFile(FileWrapper* file,
+ size_t length,
+ int16_t* buffer);
+
+// Reads `length` 16-bit integers from `file` and stores those values
+// (converting them) in `buffer`.
+// `file` must be previously opened.
+// Returns the number of 16-bit integers read or -1 on error.
+size_t ReadInt16FromFileToFloatBuffer(FileWrapper* file,
+ size_t length,
+ float* buffer);
+
+// Reads `length` 16-bit integers from `file` and stores those values
+// (converting them) in `buffer`.
+// `file` must be previously opened.
+// Returns the number of 16-bit integers read or -1 on error.
+size_t ReadInt16FromFileToDoubleBuffer(FileWrapper* file,
+ size_t length,
+ double* buffer);
+
+// Reads `length` floats in binary representation (4 bytes) from `file` to
+// `buffer`.
+// `file` must be previously opened.
+// Returns the number of floats read or -1 on error.
+size_t ReadFloatBufferFromFile(FileWrapper* file, size_t length, float* buffer);
+
+// Reads `length` doubles in binary representation (8 bytes) from `file` to
+// `buffer`.
+// `file` must be previously opened.
+// Returns the number of doubles read or -1 on error.
+size_t ReadDoubleBufferFromFile(FileWrapper* file,
+ size_t length,
+ double* buffer);
+
+// Writes `length` 16-bit integers from `buffer` in binary representation (2
+// bytes) to `file`. It flushes `file`, so after this call there are no
+// writings pending.
+// `file` must be previously opened.
+// Returns the number of doubles written or -1 on error.
+size_t WriteInt16BufferToFile(FileWrapper* file,
+ size_t length,
+ const int16_t* buffer);
+
+// Writes `length` floats from `buffer` in binary representation (4 bytes) to
+// `file`. It flushes `file`, so after this call there are no writtings pending.
+// `file` must be previously opened.
+// Returns the number of doubles written or -1 on error.
+size_t WriteFloatBufferToFile(FileWrapper* file,
+ size_t length,
+ const float* buffer);
+
+// Writes `length` doubles from `buffer` in binary representation (8 bytes) to
+// `file`. It flushes `file`, so after this call there are no writings pending.
+// `file` must be previously opened.
+// Returns the number of doubles written or -1 on error.
+size_t WriteDoubleBufferToFile(FileWrapper* file,
+ size_t length,
+ const double* buffer);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_FILE_UTILS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/file_utils_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/file_utils_unittest.cc
new file mode 100644
index 0000000000..a9dddb1eda
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/file_utils_unittest.cc
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/file_utils.h"
+
+#include <string.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+static const uint8_t kPiBytesf[4] = {0xDB, 0x0F, 0x49, 0x40};
+static const uint8_t kEBytesf[4] = {0x54, 0xF8, 0x2D, 0x40};
+static const uint8_t kAvogadroBytesf[4] = {0x2F, 0x0C, 0xFF, 0x66};
+
+static const uint8_t kPiBytes[8] = {0x18, 0x2D, 0x44, 0x54,
+ 0xFB, 0x21, 0x09, 0x40};
+static const uint8_t kEBytes[8] = {0x69, 0x57, 0x14, 0x8B,
+ 0x0A, 0xBF, 0x05, 0x40};
+static const uint8_t kAvogadroBytes[8] = {0xF4, 0xBC, 0xA8, 0xDF,
+ 0x85, 0xE1, 0xDF, 0x44};
+
+static const double kPi = 3.14159265358979323846;
+static const double kE = 2.71828182845904523536;
+static const double kAvogadro = 602214100000000000000000.0;
+
+class TransientFileUtilsTest : public ::testing::Test {
+ protected:
+ TransientFileUtilsTest()
+ : kTestFileName(
+ test::ResourcePath("audio_processing/transient/double-utils",
+ "dat")),
+ kTestFileNamef(
+ test::ResourcePath("audio_processing/transient/float-utils",
+ "dat")) {}
+
+ ~TransientFileUtilsTest() override { CleanupTempFiles(); }
+
+ std::string CreateTempFilename(absl::string_view dir,
+ absl::string_view prefix) {
+ std::string filename = test::TempFilename(dir, prefix);
+ temp_filenames_.push_back(filename);
+ return filename;
+ }
+
+ void CleanupTempFiles() {
+ for (const std::string& filename : temp_filenames_) {
+ remove(filename.c_str());
+ }
+ temp_filenames_.clear();
+ }
+
+ // This file (used in some tests) contains binary data. The data correspond to
+ // the double representation of the constants: Pi, E, and the Avogadro's
+ // Number;
+ // appended in that order.
+ const std::string kTestFileName;
+
+ // This file (used in some tests) contains binary data. The data correspond to
+ // the float representation of the constants: Pi, E, and the Avogadro's
+ // Number;
+ // appended in that order.
+ const std::string kTestFileNamef;
+
+ // List of temporary filenames created by CreateTempFilename.
+ std::vector<std::string> temp_filenames_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertByteArrayToFloat DISABLED_ConvertByteArrayToFloat
+#else
+#define MAYBE_ConvertByteArrayToFloat ConvertByteArrayToFloat
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertByteArrayToFloat) {
+ float value = 0.0;
+
+ EXPECT_EQ(0, ConvertByteArrayToFloat(kPiBytesf, &value));
+ EXPECT_FLOAT_EQ(kPi, value);
+
+ EXPECT_EQ(0, ConvertByteArrayToFloat(kEBytesf, &value));
+ EXPECT_FLOAT_EQ(kE, value);
+
+ EXPECT_EQ(0, ConvertByteArrayToFloat(kAvogadroBytesf, &value));
+ EXPECT_FLOAT_EQ(kAvogadro, value);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertByteArrayToDouble DISABLED_ConvertByteArrayToDouble
+#else
+#define MAYBE_ConvertByteArrayToDouble ConvertByteArrayToDouble
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertByteArrayToDouble) {
+ double value = 0.0;
+
+ EXPECT_EQ(0, ConvertByteArrayToDouble(kPiBytes, &value));
+ EXPECT_DOUBLE_EQ(kPi, value);
+
+ EXPECT_EQ(0, ConvertByteArrayToDouble(kEBytes, &value));
+ EXPECT_DOUBLE_EQ(kE, value);
+
+ EXPECT_EQ(0, ConvertByteArrayToDouble(kAvogadroBytes, &value));
+ EXPECT_DOUBLE_EQ(kAvogadro, value);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertFloatToByteArray DISABLED_ConvertFloatToByteArray
+#else
+#define MAYBE_ConvertFloatToByteArray ConvertFloatToByteArray
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertFloatToByteArray) {
+ std::unique_ptr<uint8_t[]> bytes(new uint8_t[4]);
+
+ EXPECT_EQ(0, ConvertFloatToByteArray(kPi, bytes.get()));
+ EXPECT_EQ(0, memcmp(bytes.get(), kPiBytesf, 4));
+
+ EXPECT_EQ(0, ConvertFloatToByteArray(kE, bytes.get()));
+ EXPECT_EQ(0, memcmp(bytes.get(), kEBytesf, 4));
+
+ EXPECT_EQ(0, ConvertFloatToByteArray(kAvogadro, bytes.get()));
+ EXPECT_EQ(0, memcmp(bytes.get(), kAvogadroBytesf, 4));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertDoubleToByteArray DISABLED_ConvertDoubleToByteArray
+#else
+#define MAYBE_ConvertDoubleToByteArray ConvertDoubleToByteArray
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertDoubleToByteArray) {
+ std::unique_ptr<uint8_t[]> bytes(new uint8_t[8]);
+
+ EXPECT_EQ(0, ConvertDoubleToByteArray(kPi, bytes.get()));
+ EXPECT_EQ(0, memcmp(bytes.get(), kPiBytes, 8));
+
+ EXPECT_EQ(0, ConvertDoubleToByteArray(kE, bytes.get()));
+ EXPECT_EQ(0, memcmp(bytes.get(), kEBytes, 8));
+
+ EXPECT_EQ(0, ConvertDoubleToByteArray(kAvogadro, bytes.get()));
+ EXPECT_EQ(0, memcmp(bytes.get(), kAvogadroBytes, 8));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16BufferFromFile DISABLED_ReadInt16BufferFromFile
+#else
+#define MAYBE_ReadInt16BufferFromFile ReadInt16BufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16BufferFromFile) {
+ std::string test_filename = kTestFileName;
+
+ FileWrapper file = FileWrapper::OpenReadOnly(test_filename);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kTestFileName.c_str();
+
+ const size_t kBufferLength = 12;
+ std::unique_ptr<int16_t[]> buffer(new int16_t[kBufferLength]);
+
+ EXPECT_EQ(kBufferLength,
+ ReadInt16BufferFromFile(&file, kBufferLength, buffer.get()));
+ EXPECT_EQ(22377, buffer[4]);
+ EXPECT_EQ(16389, buffer[7]);
+ EXPECT_EQ(17631, buffer[kBufferLength - 1]);
+
+ file.Rewind();
+
+ // The next test is for checking the case where there are not as much data as
+ // needed in the file, but reads to the end, and it returns the number of
+ // int16s read.
+ const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+ buffer.reset(new int16_t[kBufferLenghtLargerThanFile]);
+ EXPECT_EQ(kBufferLength,
+ ReadInt16BufferFromFile(&file, kBufferLenghtLargerThanFile,
+ buffer.get()));
+ EXPECT_EQ(11544, buffer[0]);
+ EXPECT_EQ(22377, buffer[4]);
+ EXPECT_EQ(16389, buffer[7]);
+ EXPECT_EQ(17631, buffer[kBufferLength - 1]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16FromFileToFloatBuffer \
+ DISABLED_ReadInt16FromFileToFloatBuffer
+#else
+#define MAYBE_ReadInt16FromFileToFloatBuffer ReadInt16FromFileToFloatBuffer
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16FromFileToFloatBuffer) {
+ std::string test_filename = kTestFileName;
+
+ FileWrapper file = FileWrapper::OpenReadOnly(test_filename);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kTestFileName.c_str();
+
+ const size_t kBufferLength = 12;
+ std::unique_ptr<float[]> buffer(new float[kBufferLength]);
+
+ EXPECT_EQ(kBufferLength,
+ ReadInt16FromFileToFloatBuffer(&file, kBufferLength, buffer.get()));
+
+ EXPECT_DOUBLE_EQ(11544, buffer[0]);
+ EXPECT_DOUBLE_EQ(22377, buffer[4]);
+ EXPECT_DOUBLE_EQ(16389, buffer[7]);
+ EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+
+ file.Rewind();
+
+ // The next test is for checking the case where there are not as much data as
+ // needed in the file, but reads to the end, and it returns the number of
+ // int16s read.
+ const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+ buffer.reset(new float[kBufferLenghtLargerThanFile]);
+ EXPECT_EQ(kBufferLength,
+ ReadInt16FromFileToFloatBuffer(&file, kBufferLenghtLargerThanFile,
+ buffer.get()));
+ EXPECT_DOUBLE_EQ(11544, buffer[0]);
+ EXPECT_DOUBLE_EQ(22377, buffer[4]);
+ EXPECT_DOUBLE_EQ(16389, buffer[7]);
+ EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16FromFileToDoubleBuffer \
+ DISABLED_ReadInt16FromFileToDoubleBuffer
+#else
+#define MAYBE_ReadInt16FromFileToDoubleBuffer ReadInt16FromFileToDoubleBuffer
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16FromFileToDoubleBuffer) {
+ std::string test_filename = kTestFileName;
+
+ FileWrapper file = FileWrapper::OpenReadOnly(test_filename);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kTestFileName.c_str();
+
+ const size_t kBufferLength = 12;
+ std::unique_ptr<double[]> buffer(new double[kBufferLength]);
+
+ EXPECT_EQ(kBufferLength, ReadInt16FromFileToDoubleBuffer(&file, kBufferLength,
+ buffer.get()));
+ EXPECT_DOUBLE_EQ(11544, buffer[0]);
+ EXPECT_DOUBLE_EQ(22377, buffer[4]);
+ EXPECT_DOUBLE_EQ(16389, buffer[7]);
+ EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+
+ file.Rewind();
+
+ // The next test is for checking the case where there are not as much data as
+ // needed in the file, but reads to the end, and it returns the number of
+ // int16s read.
+ const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+ buffer.reset(new double[kBufferLenghtLargerThanFile]);
+ EXPECT_EQ(kBufferLength,
+ ReadInt16FromFileToDoubleBuffer(&file, kBufferLenghtLargerThanFile,
+ buffer.get()));
+ EXPECT_DOUBLE_EQ(11544, buffer[0]);
+ EXPECT_DOUBLE_EQ(22377, buffer[4]);
+ EXPECT_DOUBLE_EQ(16389, buffer[7]);
+ EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadFloatBufferFromFile DISABLED_ReadFloatBufferFromFile
+#else
+#define MAYBE_ReadFloatBufferFromFile ReadFloatBufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadFloatBufferFromFile) {
+ std::string test_filename = kTestFileNamef;
+
+ FileWrapper file = FileWrapper::OpenReadOnly(test_filename);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kTestFileNamef.c_str();
+
+ const size_t kBufferLength = 3;
+ std::unique_ptr<float[]> buffer(new float[kBufferLength]);
+
+ EXPECT_EQ(kBufferLength,
+ ReadFloatBufferFromFile(&file, kBufferLength, buffer.get()));
+ EXPECT_FLOAT_EQ(kPi, buffer[0]);
+ EXPECT_FLOAT_EQ(kE, buffer[1]);
+ EXPECT_FLOAT_EQ(kAvogadro, buffer[2]);
+
+ file.Rewind();
+
+ // The next test is for checking the case where there are not as much data as
+ // needed in the file, but reads to the end, and it returns the number of
+ // doubles read.
+ const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+ buffer.reset(new float[kBufferLenghtLargerThanFile]);
+ EXPECT_EQ(kBufferLength,
+ ReadFloatBufferFromFile(&file, kBufferLenghtLargerThanFile,
+ buffer.get()));
+ EXPECT_FLOAT_EQ(kPi, buffer[0]);
+ EXPECT_FLOAT_EQ(kE, buffer[1]);
+ EXPECT_FLOAT_EQ(kAvogadro, buffer[2]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadDoubleBufferFromFile DISABLED_ReadDoubleBufferFromFile
+#else
+#define MAYBE_ReadDoubleBufferFromFile ReadDoubleBufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadDoubleBufferFromFile) {
+ std::string test_filename = kTestFileName;
+
+ FileWrapper file = FileWrapper::OpenReadOnly(test_filename);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kTestFileName.c_str();
+
+ const size_t kBufferLength = 3;
+ std::unique_ptr<double[]> buffer(new double[kBufferLength]);
+
+ EXPECT_EQ(kBufferLength,
+ ReadDoubleBufferFromFile(&file, kBufferLength, buffer.get()));
+ EXPECT_DOUBLE_EQ(kPi, buffer[0]);
+ EXPECT_DOUBLE_EQ(kE, buffer[1]);
+ EXPECT_DOUBLE_EQ(kAvogadro, buffer[2]);
+
+ file.Rewind();
+
+ // The next test is for checking the case where there are not as much data as
+ // needed in the file, but reads to the end, and it returns the number of
+ // doubles read.
+ const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+ buffer.reset(new double[kBufferLenghtLargerThanFile]);
+ EXPECT_EQ(kBufferLength,
+ ReadDoubleBufferFromFile(&file, kBufferLenghtLargerThanFile,
+ buffer.get()));
+ EXPECT_DOUBLE_EQ(kPi, buffer[0]);
+ EXPECT_DOUBLE_EQ(kE, buffer[1]);
+ EXPECT_DOUBLE_EQ(kAvogadro, buffer[2]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteInt16BufferToFile DISABLED_WriteInt16BufferToFile
+#else
+#define MAYBE_WriteInt16BufferToFile WriteInt16BufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteInt16BufferToFile) {
+ std::string kOutFileName =
+ CreateTempFilename(test::OutputPath(), "utils_test");
+
+ FileWrapper file = FileWrapper::OpenWriteOnly(kOutFileName);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kOutFileName.c_str();
+
+ const size_t kBufferLength = 3;
+ std::unique_ptr<int16_t[]> written_buffer(new int16_t[kBufferLength]);
+ std::unique_ptr<int16_t[]> read_buffer(new int16_t[kBufferLength]);
+
+ written_buffer[0] = 1;
+ written_buffer[1] = 2;
+ written_buffer[2] = 3;
+
+ EXPECT_EQ(kBufferLength,
+ WriteInt16BufferToFile(&file, kBufferLength, written_buffer.get()));
+
+ file.Close();
+
+ file = FileWrapper::OpenReadOnly(kOutFileName);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kOutFileName.c_str();
+
+ EXPECT_EQ(kBufferLength,
+ ReadInt16BufferFromFile(&file, kBufferLength, read_buffer.get()));
+ EXPECT_EQ(0, memcmp(written_buffer.get(), read_buffer.get(),
+ kBufferLength * sizeof(written_buffer[0])));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteFloatBufferToFile DISABLED_WriteFloatBufferToFile
+#else
+#define MAYBE_WriteFloatBufferToFile WriteFloatBufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteFloatBufferToFile) {
+ std::string kOutFileName =
+ CreateTempFilename(test::OutputPath(), "utils_test");
+
+ FileWrapper file = FileWrapper::OpenWriteOnly(kOutFileName);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kOutFileName.c_str();
+
+ const size_t kBufferLength = 3;
+ std::unique_ptr<float[]> written_buffer(new float[kBufferLength]);
+ std::unique_ptr<float[]> read_buffer(new float[kBufferLength]);
+
+ written_buffer[0] = static_cast<float>(kPi);
+ written_buffer[1] = static_cast<float>(kE);
+ written_buffer[2] = static_cast<float>(kAvogadro);
+
+ EXPECT_EQ(kBufferLength,
+ WriteFloatBufferToFile(&file, kBufferLength, written_buffer.get()));
+
+ file.Close();
+
+ file = FileWrapper::OpenReadOnly(kOutFileName);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kOutFileName.c_str();
+
+ EXPECT_EQ(kBufferLength,
+ ReadFloatBufferFromFile(&file, kBufferLength, read_buffer.get()));
+ EXPECT_EQ(0, memcmp(written_buffer.get(), read_buffer.get(),
+ kBufferLength * sizeof(written_buffer[0])));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteDoubleBufferToFile DISABLED_WriteDoubleBufferToFile
+#else
+#define MAYBE_WriteDoubleBufferToFile WriteDoubleBufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteDoubleBufferToFile) {
+ std::string kOutFileName =
+ CreateTempFilename(test::OutputPath(), "utils_test");
+
+ FileWrapper file = FileWrapper::OpenWriteOnly(kOutFileName);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kOutFileName.c_str();
+
+ const size_t kBufferLength = 3;
+ std::unique_ptr<double[]> written_buffer(new double[kBufferLength]);
+ std::unique_ptr<double[]> read_buffer(new double[kBufferLength]);
+
+ written_buffer[0] = kPi;
+ written_buffer[1] = kE;
+ written_buffer[2] = kAvogadro;
+
+ EXPECT_EQ(kBufferLength, WriteDoubleBufferToFile(&file, kBufferLength,
+ written_buffer.get()));
+
+ file.Close();
+
+ file = FileWrapper::OpenReadOnly(kOutFileName);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kOutFileName.c_str();
+
+ EXPECT_EQ(kBufferLength,
+ ReadDoubleBufferFromFile(&file, kBufferLength, read_buffer.get()));
+ EXPECT_EQ(0, memcmp(written_buffer.get(), read_buffer.get(),
+ kBufferLength * sizeof(written_buffer[0])));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ExpectedErrorReturnValues DISABLED_ExpectedErrorReturnValues
+#else
+#define MAYBE_ExpectedErrorReturnValues ExpectedErrorReturnValues
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ExpectedErrorReturnValues) {
+ std::string test_filename = kTestFileName;
+
+ double value;
+ std::unique_ptr<int16_t[]> int16_buffer(new int16_t[1]);
+ std::unique_ptr<double[]> double_buffer(new double[1]);
+ FileWrapper file;
+
+ EXPECT_EQ(-1, ConvertByteArrayToDouble(NULL, &value));
+ EXPECT_EQ(-1, ConvertByteArrayToDouble(kPiBytes, NULL));
+
+ EXPECT_EQ(-1, ConvertDoubleToByteArray(kPi, NULL));
+
+ // Tests with file not opened.
+ EXPECT_EQ(0u, ReadInt16BufferFromFile(&file, 1, int16_buffer.get()));
+ EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(&file, 1, double_buffer.get()));
+ EXPECT_EQ(0u, ReadDoubleBufferFromFile(&file, 1, double_buffer.get()));
+ EXPECT_EQ(0u, WriteInt16BufferToFile(&file, 1, int16_buffer.get()));
+ EXPECT_EQ(0u, WriteDoubleBufferToFile(&file, 1, double_buffer.get()));
+
+ file = FileWrapper::OpenReadOnly(test_filename);
+ ASSERT_TRUE(file.is_open()) << "File could not be opened:\n"
+ << kTestFileName.c_str();
+
+ EXPECT_EQ(0u, ReadInt16BufferFromFile(NULL, 1, int16_buffer.get()));
+ EXPECT_EQ(0u, ReadInt16BufferFromFile(&file, 1, NULL));
+ EXPECT_EQ(0u, ReadInt16BufferFromFile(&file, 0, int16_buffer.get()));
+
+ EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(NULL, 1, double_buffer.get()));
+ EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(&file, 1, NULL));
+ EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(&file, 0, double_buffer.get()));
+
+ EXPECT_EQ(0u, ReadDoubleBufferFromFile(NULL, 1, double_buffer.get()));
+ EXPECT_EQ(0u, ReadDoubleBufferFromFile(&file, 1, NULL));
+ EXPECT_EQ(0u, ReadDoubleBufferFromFile(&file, 0, double_buffer.get()));
+
+ EXPECT_EQ(0u, WriteInt16BufferToFile(NULL, 1, int16_buffer.get()));
+ EXPECT_EQ(0u, WriteInt16BufferToFile(&file, 1, NULL));
+ EXPECT_EQ(0u, WriteInt16BufferToFile(&file, 0, int16_buffer.get()));
+
+ EXPECT_EQ(0u, WriteDoubleBufferToFile(NULL, 1, double_buffer.get()));
+ EXPECT_EQ(0u, WriteDoubleBufferToFile(&file, 1, NULL));
+ EXPECT_EQ(0u, WriteDoubleBufferToFile(&file, 0, double_buffer.get()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.cc b/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.cc
new file mode 100644
index 0000000000..83810bfe3c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/moving_moments.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+MovingMoments::MovingMoments(size_t length)
+ : length_(length), queue_(), sum_(0.0), sum_of_squares_(0.0) {
+ RTC_DCHECK_GT(length, 0);
+ for (size_t i = 0; i < length; ++i) {
+ queue_.push(0.0);
+ }
+}
+
+MovingMoments::~MovingMoments() {}
+
+void MovingMoments::CalculateMoments(const float* in,
+ size_t in_length,
+ float* first,
+ float* second) {
+ RTC_DCHECK(in);
+ RTC_DCHECK_GT(in_length, 0);
+ RTC_DCHECK(first);
+ RTC_DCHECK(second);
+
+ for (size_t i = 0; i < in_length; ++i) {
+ const float old_value = queue_.front();
+ queue_.pop();
+ queue_.push(in[i]);
+
+ sum_ += in[i] - old_value;
+ sum_of_squares_ += in[i] * in[i] - old_value * old_value;
+ first[i] = sum_ / length_;
+ second[i] = std::max(0.f, sum_of_squares_ / length_);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.h b/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.h
new file mode 100644
index 0000000000..70451dcb71
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_MOVING_MOMENTS_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_MOVING_MOMENTS_H_
+
+#include <stddef.h>
+
+#include <queue>
+
+namespace webrtc {
+
+// Calculates the first and second moments for each value of a buffer taking
+// into account a given number of previous values.
+// It preserves its state, so it can be multiple-called.
+// TODO(chadan): Implement a function that takes a buffer of first moments and a
+// buffer of second moments; and calculates the variances. When needed.
+// TODO(chadan): Add functionality to update with a buffer but only output are
+// the last values of the moments. When needed.
+class MovingMoments {
+ public:
+ // Creates a Moving Moments object, that uses the last `length` values
+ // (including the new value introduced in every new calculation).
+ explicit MovingMoments(size_t length);
+ ~MovingMoments();
+
+ // Calculates the new values using `in`. Results will be in the out buffers.
+ // `first` and `second` must be allocated with at least `in_length`.
+ void CalculateMoments(const float* in,
+ size_t in_length,
+ float* first,
+ float* second);
+
+ private:
+ size_t length_;
+ // A queue holding the `length_` latest input values.
+ std::queue<float> queue_;
+ // Sum of the values of the queue.
+ float sum_;
+ // Sum of the squares of the values of the queue.
+ float sum_of_squares_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_MOVING_MOMENTS_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/moving_moments_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/moving_moments_unittest.cc
new file mode 100644
index 0000000000..b0e613e7ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/moving_moments_unittest.cc
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/moving_moments.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const float kTolerance = 0.0001f;
+
+class MovingMomentsTest : public ::testing::Test {
+ protected:
+ static const size_t kMovingMomentsBufferLength = 5;
+ static const size_t kMaxOutputLength = 20; // Valid for this tests only.
+
+ virtual void SetUp();
+ // Calls CalculateMoments and verifies that it produces the expected
+ // outputs.
+ void CalculateMomentsAndVerify(const float* input,
+ size_t input_length,
+ const float* expected_mean,
+ const float* expected_mean_squares);
+
+ std::unique_ptr<MovingMoments> moving_moments_;
+ float output_mean_[kMaxOutputLength];
+ float output_mean_squares_[kMaxOutputLength];
+};
+
+const size_t MovingMomentsTest::kMaxOutputLength;
+
+void MovingMomentsTest::SetUp() {
+ moving_moments_.reset(new MovingMoments(kMovingMomentsBufferLength));
+}
+
+void MovingMomentsTest::CalculateMomentsAndVerify(
+ const float* input,
+ size_t input_length,
+ const float* expected_mean,
+ const float* expected_mean_squares) {
+ ASSERT_LE(input_length, kMaxOutputLength);
+
+ moving_moments_->CalculateMoments(input, input_length, output_mean_,
+ output_mean_squares_);
+
+ for (size_t i = 1; i < input_length; ++i) {
+ EXPECT_NEAR(expected_mean[i], output_mean_[i], kTolerance);
+ EXPECT_NEAR(expected_mean_squares[i], output_mean_squares_[i], kTolerance);
+ }
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAnAllZerosBuffer) {
+ const float kInput[] = {0.f, 0.f, 0.f, 0.f, 0.f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ const float expected_mean[kInputLength] = {0.f, 0.f, 0.f, 0.f, 0.f};
+ const float expected_mean_squares[kInputLength] = {0.f, 0.f, 0.f, 0.f, 0.f};
+
+ CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+ expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAConstantBuffer) {
+ const float kInput[] = {5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ const float expected_mean[kInputLength] = {1.f, 2.f, 3.f, 4.f, 5.f,
+ 5.f, 5.f, 5.f, 5.f, 5.f};
+ const float expected_mean_squares[kInputLength] = {
+ 5.f, 10.f, 15.f, 20.f, 25.f, 25.f, 25.f, 25.f, 25.f, 25.f};
+
+ CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+ expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAnIncreasingBuffer) {
+ const float kInput[] = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ const float expected_mean[kInputLength] = {0.2f, 0.6f, 1.2f, 2.f, 3.f,
+ 4.f, 5.f, 6.f, 7.f};
+ const float expected_mean_squares[kInputLength] = {
+ 0.2f, 1.f, 2.8f, 6.f, 11.f, 18.f, 27.f, 38.f, 51.f};
+
+ CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+ expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfADecreasingBuffer) {
+ const float kInput[] = {-1.f, -2.f, -3.f, -4.f, -5.f, -6.f, -7.f, -8.f, -9.f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ const float expected_mean[kInputLength] = {-0.2f, -0.6f, -1.2f, -2.f, -3.f,
+ -4.f, -5.f, -6.f, -7.f};
+ const float expected_mean_squares[kInputLength] = {
+ 0.2f, 1.f, 2.8f, 6.f, 11.f, 18.f, 27.f, 38.f, 51.f};
+
+ CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+ expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAZeroMeanSequence) {
+ const size_t kMovingMomentsBufferLength = 4;
+ moving_moments_.reset(new MovingMoments(kMovingMomentsBufferLength));
+ const float kInput[] = {1.f, -1.f, 1.f, -1.f, 1.f,
+ -1.f, 1.f, -1.f, 1.f, -1.f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ const float expected_mean[kInputLength] = {0.25f, 0.f, 0.25f, 0.f, 0.f,
+ 0.f, 0.f, 0.f, 0.f, 0.f};
+ const float expected_mean_squares[kInputLength] = {
+ 0.25f, 0.5f, 0.75f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f};
+
+ CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+ expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAnArbitraryBuffer) {
+ const float kInput[] = {0.2f, 0.3f, 0.5f, 0.7f, 0.11f,
+ 0.13f, 0.17f, 0.19f, 0.23f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ const float expected_mean[kInputLength] = {
+ 0.04f, 0.1f, 0.2f, 0.34f, 0.362f, 0.348f, 0.322f, 0.26f, 0.166f};
+ const float expected_mean_squares[kInputLength] = {0.008f, 0.026f, 0.076f,
+ 0.174f, 0.1764f, 0.1718f,
+ 0.1596f, 0.1168f, 0.0294f};
+
+ CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+ expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, MutipleCalculateMomentsCalls) {
+ const float kInputFirstCall[] = {0.2f, 0.3f, 0.5f, 0.7f, 0.11f,
+ 0.13f, 0.17f, 0.19f, 0.23f};
+ const size_t kInputFirstCallLength =
+ sizeof(kInputFirstCall) / sizeof(kInputFirstCall[0]);
+ const float kInputSecondCall[] = {0.29f, 0.31f};
+ const size_t kInputSecondCallLength =
+ sizeof(kInputSecondCall) / sizeof(kInputSecondCall[0]);
+ const float kInputThirdCall[] = {0.37f, 0.41f, 0.43f, 0.47f};
+ const size_t kInputThirdCallLength =
+ sizeof(kInputThirdCall) / sizeof(kInputThirdCall[0]);
+
+ const float expected_mean_first_call[kInputFirstCallLength] = {
+ 0.04f, 0.1f, 0.2f, 0.34f, 0.362f, 0.348f, 0.322f, 0.26f, 0.166f};
+ const float expected_mean_squares_first_call[kInputFirstCallLength] = {
+ 0.008f, 0.026f, 0.076f, 0.174f, 0.1764f,
+ 0.1718f, 0.1596f, 0.1168f, 0.0294f};
+
+ const float expected_mean_second_call[kInputSecondCallLength] = {0.202f,
+ 0.238f};
+ const float expected_mean_squares_second_call[kInputSecondCallLength] = {
+ 0.0438f, 0.0596f};
+
+ const float expected_mean_third_call[kInputThirdCallLength] = {
+ 0.278f, 0.322f, 0.362f, 0.398f};
+ const float expected_mean_squares_third_call[kInputThirdCallLength] = {
+ 0.0812f, 0.1076f, 0.134f, 0.1614f};
+
+ CalculateMomentsAndVerify(kInputFirstCall, kInputFirstCallLength,
+ expected_mean_first_call,
+ expected_mean_squares_first_call);
+
+ CalculateMomentsAndVerify(kInputSecondCall, kInputSecondCallLength,
+ expected_mean_second_call,
+ expected_mean_squares_second_call);
+
+ CalculateMomentsAndVerify(kInputThirdCall, kInputThirdCallLength,
+ expected_mean_third_call,
+ expected_mean_squares_third_call);
+}
+
+TEST_F(MovingMomentsTest, VerifySampleBasedVsBlockBasedCalculation) {
+ const float kInput[] = {0.2f, 0.3f, 0.5f, 0.7f, 0.11f,
+ 0.13f, 0.17f, 0.19f, 0.23f};
+ const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+ float output_mean_block_based[kInputLength];
+ float output_mean_squares_block_based[kInputLength];
+
+ float output_mean_sample_based;
+ float output_mean_squares_sample_based;
+
+ moving_moments_->CalculateMoments(kInput, kInputLength,
+ output_mean_block_based,
+ output_mean_squares_block_based);
+ moving_moments_.reset(new MovingMoments(kMovingMomentsBufferLength));
+ for (size_t i = 0; i < kInputLength; ++i) {
+ moving_moments_->CalculateMoments(&kInput[i], 1, &output_mean_sample_based,
+ &output_mean_squares_sample_based);
+ EXPECT_FLOAT_EQ(output_mean_block_based[i], output_mean_sample_based);
+ EXPECT_FLOAT_EQ(output_mean_squares_block_based[i],
+ output_mean_squares_sample_based);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/test/plotDetection.m b/third_party/libwebrtc/modules/audio_processing/transient/test/plotDetection.m
new file mode 100644
index 0000000000..8e12ab920b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/test/plotDetection.m
@@ -0,0 +1,22 @@
+%
+% Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+%
+% Use of this source code is governed by a BSD-style license
+% that can be found in the LICENSE file in the root of the source
+% tree. An additional intellectual property rights grant can be found
+% in the file PATENTS. All contributing project authors may
+% be found in the AUTHORS file in the root of the source tree.
+%
+
+function [] = plotDetection(PCMfile, DATfile, fs, chunkSize)
+%[] = plotDetection(PCMfile, DATfile, fs, chunkSize)
+%
+%Plots the signal alongside the detection values.
+%
+%PCMfile: The file of the input signal in PCM format.
+%DATfile: The file containing the detection values in binary float format.
+%fs: The sample rate of the signal in Hertz.
+%chunkSize: The chunk size used to compute the detection values in seconds.
+[x, tx] = readPCM(PCMfile, fs);
+[d, td] = readDetection(DATfile, fs, chunkSize);
+plot(tx, x, td, d);
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/test/readDetection.m b/third_party/libwebrtc/modules/audio_processing/transient/test/readDetection.m
new file mode 100644
index 0000000000..832bf31ec8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/test/readDetection.m
@@ -0,0 +1,26 @@
+%
+% Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+%
+% Use of this source code is governed by a BSD-style license
+% that can be found in the LICENSE file in the root of the source
+% tree. An additional intellectual property rights grant can be found
+% in the file PATENTS. All contributing project authors may
+% be found in the AUTHORS file in the root of the source tree.
+%
+
+function [d, t] = readDetection(file, fs, chunkSize)
+%[d, t] = readDetection(file, fs, chunkSize)
+%
+%Reads a detection signal from a DAT file.
+%
+%d: The detection signal.
+%t: The respective time vector.
+%
+%file: The DAT file where the detection signal is stored in float format.
+%fs: The signal sample rate in Hertz.
+%chunkSize: The chunk size used for the detection in seconds.
+fid = fopen(file);
+d = fread(fid, inf, 'float');
+fclose(fid);
+t = 0:(1 / fs):(length(d) * chunkSize - 1 / fs);
+d = d(floor(t / chunkSize) + 1);
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/test/readPCM.m b/third_party/libwebrtc/modules/audio_processing/transient/test/readPCM.m
new file mode 100644
index 0000000000..cd3cef8a3c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/test/readPCM.m
@@ -0,0 +1,26 @@
+%
+% Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+%
+% Use of this source code is governed by a BSD-style license
+% that can be found in the LICENSE file in the root of the source
+% tree. An additional intellectual property rights grant can be found
+% in the file PATENTS. All contributing project authors may
+% be found in the AUTHORS file in the root of the source tree.
+%
+
+function [x, t] = readPCM(file, fs)
+%[x, t] = readPCM(file, fs)
+%
+%Reads a signal from a PCM file.
+%
+%x: The read signal after normalization.
+%t: The respective time vector.
+%
+%file: The PCM file where the signal is stored in int16 format.
+%fs: The signal sample rate in Hertz.
+fid = fopen(file);
+x = fread(fid, inf, 'int16');
+fclose(fid);
+x = x - mean(x);
+x = x / max(abs(x));
+t = 0:(1 / fs):((length(x) - 1) / fs);
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.cc b/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.cc
new file mode 100644
index 0000000000..5c35505368
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.cc
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_detector.h"
+
+#include <float.h>
+#include <string.h>
+
+#include <algorithm>
+#include <cmath>
+
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h"
+#include "modules/audio_processing/transient/moving_moments.h"
+#include "modules/audio_processing/transient/wpd_node.h"
+#include "modules/audio_processing/transient/wpd_tree.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+static const int kTransientLengthMs = 30;
+static const int kChunksAtStartupLeftToDelete =
+ kTransientLengthMs / ts::kChunkSizeMs;
+static const float kDetectThreshold = 16.f;
+
+TransientDetector::TransientDetector(int sample_rate_hz)
+ : samples_per_chunk_(sample_rate_hz * ts::kChunkSizeMs / 1000),
+ last_first_moment_(),
+ last_second_moment_(),
+ chunks_at_startup_left_to_delete_(kChunksAtStartupLeftToDelete),
+ reference_energy_(1.f),
+ using_reference_(false) {
+ RTC_DCHECK(sample_rate_hz == ts::kSampleRate8kHz ||
+ sample_rate_hz == ts::kSampleRate16kHz ||
+ sample_rate_hz == ts::kSampleRate32kHz ||
+ sample_rate_hz == ts::kSampleRate48kHz);
+ int samples_per_transient = sample_rate_hz * kTransientLengthMs / 1000;
+ // Adjustment to avoid data loss while downsampling, making
+ // `samples_per_chunk_` and `samples_per_transient` always divisible by
+ // `kLeaves`.
+ samples_per_chunk_ -= samples_per_chunk_ % kLeaves;
+ samples_per_transient -= samples_per_transient % kLeaves;
+
+ tree_leaves_data_length_ = samples_per_chunk_ / kLeaves;
+ wpd_tree_.reset(new WPDTree(samples_per_chunk_,
+ kDaubechies8HighPassCoefficients,
+ kDaubechies8LowPassCoefficients,
+ kDaubechies8CoefficientsLength, kLevels));
+ for (size_t i = 0; i < kLeaves; ++i) {
+ moving_moments_[i].reset(
+ new MovingMoments(samples_per_transient / kLeaves));
+ }
+
+ first_moments_.reset(new float[tree_leaves_data_length_]);
+ second_moments_.reset(new float[tree_leaves_data_length_]);
+
+ for (int i = 0; i < kChunksAtStartupLeftToDelete; ++i) {
+ previous_results_.push_back(0.f);
+ }
+}
+
+TransientDetector::~TransientDetector() {}
+
+float TransientDetector::Detect(const float* data,
+ size_t data_length,
+ const float* reference_data,
+ size_t reference_length) {
+ RTC_DCHECK(data);
+ RTC_DCHECK_EQ(samples_per_chunk_, data_length);
+
+ // TODO(aluebs): Check if these errors can logically happen and if not assert
+ // on them.
+ if (wpd_tree_->Update(data, samples_per_chunk_) != 0) {
+ return -1.f;
+ }
+
+ float result = 0.f;
+
+ for (size_t i = 0; i < kLeaves; ++i) {
+ WPDNode* leaf = wpd_tree_->NodeAt(kLevels, i);
+
+ moving_moments_[i]->CalculateMoments(leaf->data(), tree_leaves_data_length_,
+ first_moments_.get(),
+ second_moments_.get());
+
+ // Add value delayed (Use the last moments from the last call to Detect).
+ float unbiased_data = leaf->data()[0] - last_first_moment_[i];
+ result +=
+ unbiased_data * unbiased_data / (last_second_moment_[i] + FLT_MIN);
+
+ // Add new values.
+ for (size_t j = 1; j < tree_leaves_data_length_; ++j) {
+ unbiased_data = leaf->data()[j] - first_moments_[j - 1];
+ result +=
+ unbiased_data * unbiased_data / (second_moments_[j - 1] + FLT_MIN);
+ }
+
+ last_first_moment_[i] = first_moments_[tree_leaves_data_length_ - 1];
+ last_second_moment_[i] = second_moments_[tree_leaves_data_length_ - 1];
+ }
+
+ result /= tree_leaves_data_length_;
+
+ result *= ReferenceDetectionValue(reference_data, reference_length);
+
+ if (chunks_at_startup_left_to_delete_ > 0) {
+ chunks_at_startup_left_to_delete_--;
+ result = 0.f;
+ }
+
+ if (result >= kDetectThreshold) {
+ result = 1.f;
+ } else {
+ // Get proportional value.
+ // Proportion achieved with a squared raised cosine function with domain
+ // [0, kDetectThreshold) and image [0, 1), it's always increasing.
+ const float horizontal_scaling = ts::kPi / kDetectThreshold;
+ const float kHorizontalShift = ts::kPi;
+ const float kVerticalScaling = 0.5f;
+ const float kVerticalShift = 1.f;
+
+ result = (std::cos(result * horizontal_scaling + kHorizontalShift) +
+ kVerticalShift) *
+ kVerticalScaling;
+ result *= result;
+ }
+
+ previous_results_.pop_front();
+ previous_results_.push_back(result);
+
+ // In the current implementation we return the max of the current result and
+ // the previous results, so the high results have a width equals to
+ // `transient_length`.
+ return *std::max_element(previous_results_.begin(), previous_results_.end());
+}
+
+// Looks for the highest slope and compares it with the previous ones.
+// An exponential transformation takes this to the [0, 1] range. This value is
+// multiplied by the detection result to avoid false positives.
+float TransientDetector::ReferenceDetectionValue(const float* data,
+ size_t length) {
+ if (data == NULL) {
+ using_reference_ = false;
+ return 1.f;
+ }
+ static const float kEnergyRatioThreshold = 0.2f;
+ static const float kReferenceNonLinearity = 20.f;
+ static const float kMemory = 0.99f;
+ float reference_energy = 0.f;
+ for (size_t i = 1; i < length; ++i) {
+ reference_energy += data[i] * data[i];
+ }
+ if (reference_energy == 0.f) {
+ using_reference_ = false;
+ return 1.f;
+ }
+ RTC_DCHECK_NE(0, reference_energy_);
+ float result = 1.f / (1.f + std::exp(kReferenceNonLinearity *
+ (kEnergyRatioThreshold -
+ reference_energy / reference_energy_)));
+ reference_energy_ =
+ kMemory * reference_energy_ + (1.f - kMemory) * reference_energy;
+
+ using_reference_ = true;
+
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.h b/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.h
new file mode 100644
index 0000000000..a3dbb7ffde
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_DETECTOR_H_
+
+#include <stddef.h>
+
+#include <deque>
+#include <memory>
+
+#include "modules/audio_processing/transient/moving_moments.h"
+#include "modules/audio_processing/transient/wpd_tree.h"
+
+namespace webrtc {
+
+// This is an implementation of the transient detector described in "Causal
+// Wavelet based transient detector".
+// Calculates the log-likelihood of a transient to happen on a signal at any
+// given time based on the previous samples; it uses a WPD tree to analyze the
+// signal. It preserves its state, so it can be multiple-called.
+class TransientDetector {
+ public:
+ // TODO(chadan): The only supported wavelet is Daubechies 8 using a WPD tree
+ // of 3 levels. Make an overloaded constructor to allow different wavelets and
+ // depths of the tree. When needed.
+
+ // Creates a wavelet based transient detector.
+ TransientDetector(int sample_rate_hz);
+
+ ~TransientDetector();
+
+ // Calculates the log-likelihood of the existence of a transient in `data`.
+ // `data_length` has to be equal to `samples_per_chunk_`.
+ // Returns a value between 0 and 1, as a non linear representation of this
+ // likelihood.
+ // Returns a negative value on error.
+ float Detect(const float* data,
+ size_t data_length,
+ const float* reference_data,
+ size_t reference_length);
+
+ bool using_reference() { return using_reference_; }
+
+ private:
+ float ReferenceDetectionValue(const float* data, size_t length);
+
+ static const size_t kLevels = 3;
+ static const size_t kLeaves = 1 << kLevels;
+
+ size_t samples_per_chunk_;
+
+ std::unique_ptr<WPDTree> wpd_tree_;
+ size_t tree_leaves_data_length_;
+
+ // A MovingMoments object is needed for each leaf in the WPD tree.
+ std::unique_ptr<MovingMoments> moving_moments_[kLeaves];
+
+ std::unique_ptr<float[]> first_moments_;
+ std::unique_ptr<float[]> second_moments_;
+
+ // Stores the last calculated moments from the previous detection.
+ float last_first_moment_[kLeaves];
+ float last_second_moment_[kLeaves];
+
+ // We keep track of the previous results from the previous chunks, so it can
+ // be used to effectively give results according to the `transient_length`.
+ std::deque<float> previous_results_;
+
+ // Number of chunks that are going to return only zeros at the beginning of
+ // the detection. It helps to avoid infs and nans due to the lack of
+ // information.
+ int chunks_at_startup_left_to_delete_;
+
+ float reference_energy_;
+
+ bool using_reference_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_detector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/transient_detector_unittest.cc
new file mode 100644
index 0000000000..a7364626fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_detector_unittest.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_detector.h"
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/file_utils.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+static const int kSampleRatesHz[] = {ts::kSampleRate8kHz, ts::kSampleRate16kHz,
+ ts::kSampleRate32kHz,
+ ts::kSampleRate48kHz};
+static const size_t kNumberOfSampleRates =
+ sizeof(kSampleRatesHz) / sizeof(*kSampleRatesHz);
+
+// This test is for the correctness of the transient detector.
+// Checks the results comparing them with the ones stored in the detect files in
+// the directory: resources/audio_processing/transient/
+// The files contain all the results in double precision (Little endian).
+// The audio files used with different sample rates are stored in the same
+// directory.
+#if defined(WEBRTC_IOS)
+TEST(TransientDetectorTest, DISABLED_CorrectnessBasedOnFiles) {
+#else
+TEST(TransientDetectorTest, CorrectnessBasedOnFiles) {
+#endif
+ for (size_t i = 0; i < kNumberOfSampleRates; ++i) {
+ int sample_rate_hz = kSampleRatesHz[i];
+
+ // Prepare detect file.
+ rtc::StringBuilder detect_file_name;
+ detect_file_name << "audio_processing/transient/detect"
+ << (sample_rate_hz / 1000) << "kHz";
+
+ FileWrapper detect_file = FileWrapper::OpenReadOnly(
+ test::ResourcePath(detect_file_name.str(), "dat"));
+
+ bool file_opened = detect_file.is_open();
+ ASSERT_TRUE(file_opened) << "File could not be opened.\n"
+ << detect_file_name.str().c_str();
+
+ // Prepare audio file.
+ rtc::StringBuilder audio_file_name;
+ audio_file_name << "audio_processing/transient/audio"
+ << (sample_rate_hz / 1000) << "kHz";
+
+ FileWrapper audio_file = FileWrapper::OpenReadOnly(
+ test::ResourcePath(audio_file_name.str(), "pcm"));
+
+ // Create detector.
+ TransientDetector detector(sample_rate_hz);
+
+ const size_t buffer_length = sample_rate_hz * ts::kChunkSizeMs / 1000;
+ std::unique_ptr<float[]> buffer(new float[buffer_length]);
+
+ const float kTolerance = 0.02f;
+
+ size_t frames_read = 0;
+
+ while (ReadInt16FromFileToFloatBuffer(&audio_file, buffer_length,
+ buffer.get()) == buffer_length) {
+ ++frames_read;
+
+ float detector_value =
+ detector.Detect(buffer.get(), buffer_length, NULL, 0);
+ double file_value;
+ ASSERT_EQ(1u, ReadDoubleBufferFromFile(&detect_file, 1, &file_value))
+ << "Detect test file is malformed.\n";
+
+ // Compare results with data from the matlab test file.
+ EXPECT_NEAR(file_value, detector_value, kTolerance)
+ << "Frame: " << frames_read;
+ }
+
+ detect_file.Close();
+ audio_file.Close();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppression_test.cc b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppression_test.cc
new file mode 100644
index 0000000000..2d8baf9416
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppression_test.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc/agc.h"
+#include "modules/audio_processing/transient/transient_suppressor.h"
+#include "modules/audio_processing/transient/transient_suppressor_impl.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+ABSL_FLAG(std::string, in_file_name, "", "PCM file that contains the signal.");
+ABSL_FLAG(std::string,
+ detection_file_name,
+ "",
+ "PCM file that contains the detection signal.");
+ABSL_FLAG(std::string,
+ reference_file_name,
+ "",
+ "PCM file that contains the reference signal.");
+
+ABSL_FLAG(int,
+ chunk_size_ms,
+ 10,
+ "Time between each chunk of samples in milliseconds.");
+
+ABSL_FLAG(int,
+ sample_rate_hz,
+ 16000,
+ "Sampling frequency of the signal in Hertz.");
+ABSL_FLAG(int,
+ detection_rate_hz,
+ 0,
+ "Sampling frequency of the detection signal in Hertz.");
+
+ABSL_FLAG(int, num_channels, 1, "Number of channels.");
+
+namespace webrtc {
+
+const char kUsage[] =
+ "\nDetects and suppresses transients from file.\n\n"
+ "This application loads the signal from the in_file_name with a specific\n"
+ "num_channels and sample_rate_hz, the detection signal from the\n"
+ "detection_file_name with a specific detection_rate_hz, and the reference\n"
+ "signal from the reference_file_name with sample_rate_hz, divides them\n"
+ "into chunk_size_ms blocks, computes its voice value and depending on the\n"
+ "voice_threshold does the respective restoration. You can always get the\n"
+ "all-voiced or all-unvoiced cases by setting the voice_threshold to 0 or\n"
+ "1 respectively.\n\n";
+
+// Read next buffers from the test files (signed 16-bit host-endian PCM
+// format). audio_buffer has int16 samples, detection_buffer has float samples
+// with range [-32768,32767], and reference_buffer has float samples with range
+// [-1,1]. Return true iff all the buffers were filled completely.
+bool ReadBuffers(FILE* in_file,
+ size_t audio_buffer_size,
+ int num_channels,
+ int16_t* audio_buffer,
+ FILE* detection_file,
+ size_t detection_buffer_size,
+ float* detection_buffer,
+ FILE* reference_file,
+ float* reference_buffer) {
+ std::unique_ptr<int16_t[]> tmpbuf;
+ int16_t* read_ptr = audio_buffer;
+ if (num_channels > 1) {
+ tmpbuf.reset(new int16_t[num_channels * audio_buffer_size]);
+ read_ptr = tmpbuf.get();
+ }
+ if (fread(read_ptr, sizeof(*read_ptr), num_channels * audio_buffer_size,
+ in_file) != num_channels * audio_buffer_size) {
+ return false;
+ }
+ // De-interleave.
+ if (num_channels > 1) {
+ for (int i = 0; i < num_channels; ++i) {
+ for (size_t j = 0; j < audio_buffer_size; ++j) {
+ audio_buffer[i * audio_buffer_size + j] =
+ read_ptr[i + j * num_channels];
+ }
+ }
+ }
+ if (detection_file) {
+ std::unique_ptr<int16_t[]> ibuf(new int16_t[detection_buffer_size]);
+ if (fread(ibuf.get(), sizeof(ibuf[0]), detection_buffer_size,
+ detection_file) != detection_buffer_size)
+ return false;
+ for (size_t i = 0; i < detection_buffer_size; ++i)
+ detection_buffer[i] = ibuf[i];
+ }
+ if (reference_file) {
+ std::unique_ptr<int16_t[]> ibuf(new int16_t[audio_buffer_size]);
+ if (fread(ibuf.get(), sizeof(ibuf[0]), audio_buffer_size, reference_file) !=
+ audio_buffer_size)
+ return false;
+ S16ToFloat(ibuf.get(), audio_buffer_size, reference_buffer);
+ }
+ return true;
+}
+
+// Write a number of samples to an open signed 16-bit host-endian PCM file.
+static void WritePCM(FILE* f,
+ size_t num_samples,
+ int num_channels,
+ const float* buffer) {
+ std::unique_ptr<int16_t[]> ibuf(new int16_t[num_channels * num_samples]);
+ // Interleave.
+ for (int i = 0; i < num_channels; ++i) {
+ for (size_t j = 0; j < num_samples; ++j) {
+ ibuf[i + j * num_channels] = FloatS16ToS16(buffer[i * num_samples + j]);
+ }
+ }
+ fwrite(ibuf.get(), sizeof(ibuf[0]), num_channels * num_samples, f);
+}
+
+// This application tests the transient suppression by providing a processed
+// PCM file, which has to be listened to in order to evaluate the
+// performance.
+// It gets an audio file, and its voice gain information, and the suppressor
+// process it giving the output file "suppressed_keystrokes.pcm".
+void void_main() {
+ // TODO(aluebs): Remove all FileWrappers.
+ // Prepare the input file.
+ FILE* in_file = fopen(absl::GetFlag(FLAGS_in_file_name).c_str(), "rb");
+ ASSERT_TRUE(in_file != NULL);
+
+ // Prepare the detection file.
+ FILE* detection_file = NULL;
+ if (!absl::GetFlag(FLAGS_detection_file_name).empty()) {
+ detection_file =
+ fopen(absl::GetFlag(FLAGS_detection_file_name).c_str(), "rb");
+ }
+
+ // Prepare the reference file.
+ FILE* reference_file = NULL;
+ if (!absl::GetFlag(FLAGS_reference_file_name).empty()) {
+ reference_file =
+ fopen(absl::GetFlag(FLAGS_reference_file_name).c_str(), "rb");
+ }
+
+ // Prepare the output file.
+ std::string out_file_name = test::OutputPath() + "suppressed_keystrokes.pcm";
+ FILE* out_file = fopen(out_file_name.c_str(), "wb");
+ ASSERT_TRUE(out_file != NULL);
+
+ int detection_rate_hz = absl::GetFlag(FLAGS_detection_rate_hz);
+ if (detection_rate_hz == 0) {
+ detection_rate_hz = absl::GetFlag(FLAGS_sample_rate_hz);
+ }
+
+ Agc agc;
+
+ TransientSuppressorImpl suppressor(TransientSuppressor::VadMode::kDefault,
+ absl::GetFlag(FLAGS_sample_rate_hz),
+ detection_rate_hz,
+ absl::GetFlag(FLAGS_num_channels));
+
+ const size_t audio_buffer_size = absl::GetFlag(FLAGS_chunk_size_ms) *
+ absl::GetFlag(FLAGS_sample_rate_hz) / 1000;
+ const size_t detection_buffer_size =
+ absl::GetFlag(FLAGS_chunk_size_ms) * detection_rate_hz / 1000;
+
+ // int16 and float variants of the same data.
+ std::unique_ptr<int16_t[]> audio_buffer_i(
+ new int16_t[absl::GetFlag(FLAGS_num_channels) * audio_buffer_size]);
+ std::unique_ptr<float[]> audio_buffer_f(
+ new float[absl::GetFlag(FLAGS_num_channels) * audio_buffer_size]);
+
+ std::unique_ptr<float[]> detection_buffer, reference_buffer;
+
+ if (detection_file)
+ detection_buffer.reset(new float[detection_buffer_size]);
+ if (reference_file)
+ reference_buffer.reset(new float[audio_buffer_size]);
+
+ while (ReadBuffers(
+ in_file, audio_buffer_size, absl::GetFlag(FLAGS_num_channels),
+ audio_buffer_i.get(), detection_file, detection_buffer_size,
+ detection_buffer.get(), reference_file, reference_buffer.get())) {
+ agc.Process({audio_buffer_i.get(), audio_buffer_size});
+
+ for (size_t i = 0;
+ i < absl::GetFlag(FLAGS_num_channels) * audio_buffer_size; ++i) {
+ audio_buffer_f[i] = audio_buffer_i[i];
+ }
+
+ suppressor.Suppress(audio_buffer_f.get(), audio_buffer_size,
+ absl::GetFlag(FLAGS_num_channels),
+ detection_buffer.get(), detection_buffer_size,
+ reference_buffer.get(), audio_buffer_size,
+ agc.voice_probability(), true);
+
+ // Write result to out file.
+ WritePCM(out_file, audio_buffer_size, absl::GetFlag(FLAGS_num_channels),
+ audio_buffer_f.get());
+ }
+
+ fclose(in_file);
+ if (detection_file) {
+ fclose(detection_file);
+ }
+ if (reference_file) {
+ fclose(reference_file);
+ }
+ fclose(out_file);
+}
+
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ std::vector<char*> args = absl::ParseCommandLine(argc, argv);
+ if (args.size() != 1) {
+ printf("%s", webrtc::kUsage);
+ return 1;
+ }
+ RTC_CHECK_GT(absl::GetFlag(FLAGS_chunk_size_ms), 0);
+ RTC_CHECK_GT(absl::GetFlag(FLAGS_sample_rate_hz), 0);
+ RTC_CHECK_GT(absl::GetFlag(FLAGS_num_channels), 0);
+
+ webrtc::void_main();
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor.h b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor.h
new file mode 100644
index 0000000000..ecb3c3baab
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_H_
+
+#include <cstddef>
+
+namespace webrtc {
+
+// Detects transients in an audio stream and suppress them using a simple
+// restoration algorithm that attenuates unexpected spikes in the spectrum.
+class TransientSuppressor {
+ public:
+ // Type of VAD used by the caller to compute the `voice_probability` argument
+ // `Suppress()`.
+ enum class VadMode {
+ // By default, `TransientSuppressor` assumes that `voice_probability` is
+ // computed by `AgcManagerDirect`.
+ kDefault = 0,
+ // Use this mode when `TransientSuppressor` must assume that
+ // `voice_probability` is computed by the RNN VAD.
+ kRnnVad,
+ // Use this mode to let `TransientSuppressor::Suppressor()` ignore
+ // `voice_probability` and behave as if voice information is unavailable
+ // (regardless of the passed value).
+ kNoVad,
+ };
+
+ virtual ~TransientSuppressor() {}
+
+ virtual void Initialize(int sample_rate_hz,
+ int detector_rate_hz,
+ int num_channels) = 0;
+
+ // Processes a `data` chunk, and returns it with keystrokes suppressed from
+ // it. The float format is assumed to be int16 ranged. If there are more than
+ // one channel, the chunks are concatenated one after the other in `data`.
+ // `data_length` must be equal to `data_length_`.
+ // `num_channels` must be equal to `num_channels_`.
+ // A sub-band, ideally the higher, can be used as `detection_data`. If it is
+ // NULL, `data` is used for the detection too. The `detection_data` is always
+ // assumed mono.
+ // If a reference signal (e.g. keyboard microphone) is available, it can be
+ // passed in as `reference_data`. It is assumed mono and must have the same
+ // length as `data`. NULL is accepted if unavailable.
+ // This suppressor performs better if voice information is available.
+ // `voice_probability` is the probability of voice being present in this chunk
+ // of audio. If voice information is not available, `voice_probability` must
+ // always be set to 1.
+ // `key_pressed` determines if a key was pressed on this audio chunk.
+ // Returns a delayed version of `voice_probability` according to the
+ // algorithmic delay introduced by this method. In this way, the modified
+ // `data` and the returned voice probability will be temporally aligned.
+ virtual float Suppress(float* data,
+ size_t data_length,
+ int num_channels,
+ const float* detection_data,
+ size_t detection_length,
+ const float* reference_data,
+ size_t reference_length,
+ float voice_probability,
+ bool key_pressed) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_api_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_api_gn/moz.build
new file mode 100644
index 0000000000..24dda85922
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_api_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("transient_suppressor_api_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.cc b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.cc
new file mode 100644
index 0000000000..90428464e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.cc
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_suppressor_impl.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cmath>
+#include <complex>
+#include <deque>
+#include <limits>
+#include <set>
+#include <string>
+
+#include "common_audio/include/audio_util.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/third_party/ooura/fft_size_256/fft4g.h"
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/transient_detector.h"
+#include "modules/audio_processing/transient/transient_suppressor.h"
+#include "modules/audio_processing/transient/windows_private.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static const float kMeanIIRCoefficient = 0.5f;
+
+// TODO(aluebs): Check if these values work also for 48kHz.
+static const size_t kMinVoiceBin = 3;
+static const size_t kMaxVoiceBin = 60;
+
+namespace {
+
+float ComplexMagnitude(float a, float b) {
+ return std::abs(a) + std::abs(b);
+}
+
+std::string GetVadModeLabel(TransientSuppressor::VadMode vad_mode) {
+ switch (vad_mode) {
+ case TransientSuppressor::VadMode::kDefault:
+ return "default";
+ case TransientSuppressor::VadMode::kRnnVad:
+ return "RNN VAD";
+ case TransientSuppressor::VadMode::kNoVad:
+ return "no VAD";
+ }
+}
+
+} // namespace
+
+TransientSuppressorImpl::TransientSuppressorImpl(VadMode vad_mode,
+ int sample_rate_hz,
+ int detector_rate_hz,
+ int num_channels)
+ : vad_mode_(vad_mode),
+ voice_probability_delay_unit_(/*delay_num_samples=*/0, sample_rate_hz),
+ analyzed_audio_is_silent_(false),
+ data_length_(0),
+ detection_length_(0),
+ analysis_length_(0),
+ buffer_delay_(0),
+ complex_analysis_length_(0),
+ num_channels_(0),
+ window_(NULL),
+ detector_smoothed_(0.f),
+ keypress_counter_(0),
+ chunks_since_keypress_(0),
+ detection_enabled_(false),
+ suppression_enabled_(false),
+ use_hard_restoration_(false),
+ chunks_since_voice_change_(0),
+ seed_(182),
+ using_reference_(false) {
+ RTC_LOG(LS_INFO) << "VAD mode: " << GetVadModeLabel(vad_mode_);
+ Initialize(sample_rate_hz, detector_rate_hz, num_channels);
+}
+
+TransientSuppressorImpl::~TransientSuppressorImpl() {}
+
+void TransientSuppressorImpl::Initialize(int sample_rate_hz,
+ int detection_rate_hz,
+ int num_channels) {
+ RTC_DCHECK(sample_rate_hz == ts::kSampleRate8kHz ||
+ sample_rate_hz == ts::kSampleRate16kHz ||
+ sample_rate_hz == ts::kSampleRate32kHz ||
+ sample_rate_hz == ts::kSampleRate48kHz);
+ RTC_DCHECK(detection_rate_hz == ts::kSampleRate8kHz ||
+ detection_rate_hz == ts::kSampleRate16kHz ||
+ detection_rate_hz == ts::kSampleRate32kHz ||
+ detection_rate_hz == ts::kSampleRate48kHz);
+ RTC_DCHECK_GT(num_channels, 0);
+
+ switch (sample_rate_hz) {
+ case ts::kSampleRate8kHz:
+ analysis_length_ = 128u;
+ window_ = kBlocks80w128;
+ break;
+ case ts::kSampleRate16kHz:
+ analysis_length_ = 256u;
+ window_ = kBlocks160w256;
+ break;
+ case ts::kSampleRate32kHz:
+ analysis_length_ = 512u;
+ window_ = kBlocks320w512;
+ break;
+ case ts::kSampleRate48kHz:
+ analysis_length_ = 1024u;
+ window_ = kBlocks480w1024;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+
+ detector_.reset(new TransientDetector(detection_rate_hz));
+ data_length_ = sample_rate_hz * ts::kChunkSizeMs / 1000;
+ RTC_DCHECK_LE(data_length_, analysis_length_);
+ buffer_delay_ = analysis_length_ - data_length_;
+
+ voice_probability_delay_unit_.Initialize(/*delay_num_samples=*/buffer_delay_,
+ sample_rate_hz);
+
+ complex_analysis_length_ = analysis_length_ / 2 + 1;
+ RTC_DCHECK_GE(complex_analysis_length_, kMaxVoiceBin);
+ num_channels_ = num_channels;
+ in_buffer_.reset(new float[analysis_length_ * num_channels_]);
+ memset(in_buffer_.get(), 0,
+ analysis_length_ * num_channels_ * sizeof(in_buffer_[0]));
+ detection_length_ = detection_rate_hz * ts::kChunkSizeMs / 1000;
+ detection_buffer_.reset(new float[detection_length_]);
+ memset(detection_buffer_.get(), 0,
+ detection_length_ * sizeof(detection_buffer_[0]));
+ out_buffer_.reset(new float[analysis_length_ * num_channels_]);
+ memset(out_buffer_.get(), 0,
+ analysis_length_ * num_channels_ * sizeof(out_buffer_[0]));
+ // ip[0] must be zero to trigger initialization using rdft().
+ size_t ip_length = 2 + sqrtf(analysis_length_);
+ ip_.reset(new size_t[ip_length]());
+ memset(ip_.get(), 0, ip_length * sizeof(ip_[0]));
+ wfft_.reset(new float[complex_analysis_length_ - 1]);
+ memset(wfft_.get(), 0, (complex_analysis_length_ - 1) * sizeof(wfft_[0]));
+ spectral_mean_.reset(new float[complex_analysis_length_ * num_channels_]);
+ memset(spectral_mean_.get(), 0,
+ complex_analysis_length_ * num_channels_ * sizeof(spectral_mean_[0]));
+ fft_buffer_.reset(new float[analysis_length_ + 2]);
+ memset(fft_buffer_.get(), 0, (analysis_length_ + 2) * sizeof(fft_buffer_[0]));
+ magnitudes_.reset(new float[complex_analysis_length_]);
+ memset(magnitudes_.get(), 0,
+ complex_analysis_length_ * sizeof(magnitudes_[0]));
+ mean_factor_.reset(new float[complex_analysis_length_]);
+
+ static const float kFactorHeight = 10.f;
+ static const float kLowSlope = 1.f;
+ static const float kHighSlope = 0.3f;
+ for (size_t i = 0; i < complex_analysis_length_; ++i) {
+ mean_factor_[i] =
+ kFactorHeight /
+ (1.f + std::exp(kLowSlope * static_cast<int>(i - kMinVoiceBin))) +
+ kFactorHeight /
+ (1.f + std::exp(kHighSlope * static_cast<int>(kMaxVoiceBin - i)));
+ }
+ detector_smoothed_ = 0.f;
+ keypress_counter_ = 0;
+ chunks_since_keypress_ = 0;
+ detection_enabled_ = false;
+ suppression_enabled_ = false;
+ use_hard_restoration_ = false;
+ chunks_since_voice_change_ = 0;
+ seed_ = 182;
+ using_reference_ = false;
+}
+
+float TransientSuppressorImpl::Suppress(float* data,
+ size_t data_length,
+ int num_channels,
+ const float* detection_data,
+ size_t detection_length,
+ const float* reference_data,
+ size_t reference_length,
+ float voice_probability,
+ bool key_pressed) {
+ if (!data || data_length != data_length_ || num_channels != num_channels_ ||
+ detection_length != detection_length_ || voice_probability < 0 ||
+ voice_probability > 1) {
+ // The audio is not modified, so the voice probability is returned as is
+ // (delay not applied).
+ return voice_probability;
+ }
+
+ UpdateKeypress(key_pressed);
+ UpdateBuffers(data);
+
+ if (detection_enabled_) {
+ UpdateRestoration(voice_probability);
+
+ if (!detection_data) {
+ // Use the input data of the first channel if special detection data is
+ // not supplied.
+ detection_data = &in_buffer_[buffer_delay_];
+ }
+
+ float detector_result = detector_->Detect(detection_data, detection_length,
+ reference_data, reference_length);
+ if (detector_result < 0) {
+ // The audio is not modified, so the voice probability is returned as is
+ // (delay not applied).
+ return voice_probability;
+ }
+
+ using_reference_ = detector_->using_reference();
+
+ // `detector_smoothed_` follows the `detector_result` when this last one is
+ // increasing, but has an exponential decaying tail to be able to suppress
+ // the ringing of keyclicks.
+ float smooth_factor = using_reference_ ? 0.6 : 0.1;
+ detector_smoothed_ = detector_result >= detector_smoothed_
+ ? detector_result
+ : smooth_factor * detector_smoothed_ +
+ (1 - smooth_factor) * detector_result;
+
+ for (int i = 0; i < num_channels_; ++i) {
+ Suppress(&in_buffer_[i * analysis_length_],
+ &spectral_mean_[i * complex_analysis_length_],
+ &out_buffer_[i * analysis_length_]);
+ }
+ }
+
+ // If the suppression isn't enabled, we use the in buffer to delay the signal
+ // appropriately. This also gives time for the out buffer to be refreshed with
+ // new data between detection and suppression getting enabled.
+ for (int i = 0; i < num_channels_; ++i) {
+ memcpy(&data[i * data_length_],
+ suppression_enabled_ ? &out_buffer_[i * analysis_length_]
+ : &in_buffer_[i * analysis_length_],
+ data_length_ * sizeof(*data));
+ }
+
+ // The audio has been modified, return the delayed voice probability.
+ return voice_probability_delay_unit_.Delay(voice_probability);
+}
+
+// This should only be called when detection is enabled. UpdateBuffers() must
+// have been called. At return, `out_buffer_` will be filled with the
+// processed output.
+void TransientSuppressorImpl::Suppress(float* in_ptr,
+ float* spectral_mean,
+ float* out_ptr) {
+ // Go to frequency domain.
+ for (size_t i = 0; i < analysis_length_; ++i) {
+ // TODO(aluebs): Rename windows
+ fft_buffer_[i] = in_ptr[i] * window_[i];
+ }
+
+ WebRtc_rdft(analysis_length_, 1, fft_buffer_.get(), ip_.get(), wfft_.get());
+
+ // Since WebRtc_rdft puts R[n/2] in fft_buffer_[1], we move it to the end
+ // for convenience.
+ fft_buffer_[analysis_length_] = fft_buffer_[1];
+ fft_buffer_[analysis_length_ + 1] = 0.f;
+ fft_buffer_[1] = 0.f;
+
+ for (size_t i = 0; i < complex_analysis_length_; ++i) {
+ magnitudes_[i] =
+ ComplexMagnitude(fft_buffer_[i * 2], fft_buffer_[i * 2 + 1]);
+ }
+ // Restore audio if necessary.
+ if (suppression_enabled_) {
+ if (use_hard_restoration_) {
+ HardRestoration(spectral_mean);
+ } else {
+ SoftRestoration(spectral_mean);
+ }
+ }
+
+ // Update the spectral mean.
+ for (size_t i = 0; i < complex_analysis_length_; ++i) {
+ spectral_mean[i] = (1 - kMeanIIRCoefficient) * spectral_mean[i] +
+ kMeanIIRCoefficient * magnitudes_[i];
+ }
+
+ // Back to time domain.
+ // Put R[n/2] back in fft_buffer_[1].
+ fft_buffer_[1] = fft_buffer_[analysis_length_];
+
+ WebRtc_rdft(analysis_length_, -1, fft_buffer_.get(), ip_.get(), wfft_.get());
+ const float fft_scaling = 2.f / analysis_length_;
+
+ for (size_t i = 0; i < analysis_length_; ++i) {
+ out_ptr[i] += fft_buffer_[i] * window_[i] * fft_scaling;
+ }
+}
+
+void TransientSuppressorImpl::UpdateKeypress(bool key_pressed) {
+ const int kKeypressPenalty = 1000 / ts::kChunkSizeMs;
+ const int kIsTypingThreshold = 1000 / ts::kChunkSizeMs;
+ const int kChunksUntilNotTyping = 4000 / ts::kChunkSizeMs; // 4 seconds.
+
+ if (key_pressed) {
+ keypress_counter_ += kKeypressPenalty;
+ chunks_since_keypress_ = 0;
+ detection_enabled_ = true;
+ }
+ keypress_counter_ = std::max(0, keypress_counter_ - 1);
+
+ if (keypress_counter_ > kIsTypingThreshold) {
+ if (!suppression_enabled_) {
+ RTC_LOG(LS_INFO) << "[ts] Transient suppression is now enabled.";
+ }
+ suppression_enabled_ = true;
+ keypress_counter_ = 0;
+ }
+
+ if (detection_enabled_ && ++chunks_since_keypress_ > kChunksUntilNotTyping) {
+ if (suppression_enabled_) {
+ RTC_LOG(LS_INFO) << "[ts] Transient suppression is now disabled.";
+ }
+ detection_enabled_ = false;
+ suppression_enabled_ = false;
+ keypress_counter_ = 0;
+ }
+}
+
+void TransientSuppressorImpl::UpdateRestoration(float voice_probability) {
+ bool not_voiced;
+ switch (vad_mode_) {
+ case TransientSuppressor::VadMode::kDefault: {
+ constexpr float kVoiceThreshold = 0.02f;
+ not_voiced = voice_probability < kVoiceThreshold;
+ break;
+ }
+ case TransientSuppressor::VadMode::kRnnVad: {
+ constexpr float kVoiceThreshold = 0.7f;
+ not_voiced = voice_probability < kVoiceThreshold;
+ break;
+ }
+ case TransientSuppressor::VadMode::kNoVad:
+ // Always assume that voice is detected.
+ not_voiced = false;
+ break;
+ }
+
+ if (not_voiced == use_hard_restoration_) {
+ chunks_since_voice_change_ = 0;
+ } else {
+ ++chunks_since_voice_change_;
+
+ // Number of 10 ms frames to wait to transition to and from hard
+ // restoration.
+ constexpr int kHardRestorationOffsetDelay = 3;
+ constexpr int kHardRestorationOnsetDelay = 80;
+
+ if ((use_hard_restoration_ &&
+ chunks_since_voice_change_ > kHardRestorationOffsetDelay) ||
+ (!use_hard_restoration_ &&
+ chunks_since_voice_change_ > kHardRestorationOnsetDelay)) {
+ use_hard_restoration_ = not_voiced;
+ chunks_since_voice_change_ = 0;
+ }
+ }
+}
+
+// Shift buffers to make way for new data. Must be called after
+// `detection_enabled_` is updated by UpdateKeypress().
+void TransientSuppressorImpl::UpdateBuffers(float* data) {
+ // TODO(aluebs): Change to ring buffer.
+ memmove(in_buffer_.get(), &in_buffer_[data_length_],
+ (buffer_delay_ + (num_channels_ - 1) * analysis_length_) *
+ sizeof(in_buffer_[0]));
+ // Copy new chunk to buffer.
+ for (int i = 0; i < num_channels_; ++i) {
+ memcpy(&in_buffer_[buffer_delay_ + i * analysis_length_],
+ &data[i * data_length_], data_length_ * sizeof(*data));
+ }
+ if (detection_enabled_) {
+ // Shift previous chunk in out buffer.
+ memmove(out_buffer_.get(), &out_buffer_[data_length_],
+ (buffer_delay_ + (num_channels_ - 1) * analysis_length_) *
+ sizeof(out_buffer_[0]));
+ // Initialize new chunk in out buffer.
+ for (int i = 0; i < num_channels_; ++i) {
+ memset(&out_buffer_[buffer_delay_ + i * analysis_length_], 0,
+ data_length_ * sizeof(out_buffer_[0]));
+ }
+ }
+}
+
+// Restores the unvoiced signal if a click is present.
+// Attenuates by a certain factor every peak in the `fft_buffer_` that exceeds
+// the spectral mean. The attenuation depends on `detector_smoothed_`.
+// If a restoration takes place, the `magnitudes_` are updated to the new value.
+void TransientSuppressorImpl::HardRestoration(float* spectral_mean) {
+ const float detector_result =
+ 1.f - std::pow(1.f - detector_smoothed_, using_reference_ ? 200.f : 50.f);
+ // To restore, we get the peaks in the spectrum. If higher than the previous
+ // spectral mean we adjust them.
+ for (size_t i = 0; i < complex_analysis_length_; ++i) {
+ if (magnitudes_[i] > spectral_mean[i] && magnitudes_[i] > 0) {
+ // RandU() generates values on [0, int16::max()]
+ const float phase = 2 * ts::kPi * WebRtcSpl_RandU(&seed_) /
+ std::numeric_limits<int16_t>::max();
+ const float scaled_mean = detector_result * spectral_mean[i];
+
+ fft_buffer_[i * 2] = (1 - detector_result) * fft_buffer_[i * 2] +
+ scaled_mean * cosf(phase);
+ fft_buffer_[i * 2 + 1] = (1 - detector_result) * fft_buffer_[i * 2 + 1] +
+ scaled_mean * sinf(phase);
+ magnitudes_[i] = magnitudes_[i] -
+ detector_result * (magnitudes_[i] - spectral_mean[i]);
+ }
+ }
+}
+
+// Restores the voiced signal if a click is present.
+// Attenuates by a certain factor every peak in the `fft_buffer_` that exceeds
+// the spectral mean and that is lower than some function of the current block
+// frequency mean. The attenuation depends on `detector_smoothed_`.
+// If a restoration takes place, the `magnitudes_` are updated to the new value.
+void TransientSuppressorImpl::SoftRestoration(float* spectral_mean) {
+ // Get the spectral magnitude mean of the current block.
+ float block_frequency_mean = 0;
+ for (size_t i = kMinVoiceBin; i < kMaxVoiceBin; ++i) {
+ block_frequency_mean += magnitudes_[i];
+ }
+ block_frequency_mean /= (kMaxVoiceBin - kMinVoiceBin);
+
+ // To restore, we get the peaks in the spectrum. If higher than the
+ // previous spectral mean and lower than a factor of the block mean
+ // we adjust them. The factor is a double sigmoid that has a minimum in the
+ // voice frequency range (300Hz - 3kHz).
+ for (size_t i = 0; i < complex_analysis_length_; ++i) {
+ if (magnitudes_[i] > spectral_mean[i] && magnitudes_[i] > 0 &&
+ (using_reference_ ||
+ magnitudes_[i] < block_frequency_mean * mean_factor_[i])) {
+ const float new_magnitude =
+ magnitudes_[i] -
+ detector_smoothed_ * (magnitudes_[i] - spectral_mean[i]);
+ const float magnitude_ratio = new_magnitude / magnitudes_[i];
+
+ fft_buffer_[i * 2] *= magnitude_ratio;
+ fft_buffer_[i * 2 + 1] *= magnitude_ratio;
+ magnitudes_[i] = new_magnitude;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.h b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.h
new file mode 100644
index 0000000000..4005a16b0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "modules/audio_processing/transient/transient_suppressor.h"
+#include "modules/audio_processing/transient/voice_probability_delay_unit.h"
+#include "rtc_base/gtest_prod_util.h"
+
+namespace webrtc {
+
+class TransientDetector;
+
+// Detects transients in an audio stream and suppress them using a simple
+// restoration algorithm that attenuates unexpected spikes in the spectrum.
+class TransientSuppressorImpl : public TransientSuppressor {
+ public:
+ TransientSuppressorImpl(VadMode vad_mode,
+ int sample_rate_hz,
+ int detector_rate_hz,
+ int num_channels);
+ ~TransientSuppressorImpl() override;
+
+ void Initialize(int sample_rate_hz,
+ int detector_rate_hz,
+ int num_channels) override;
+
+ float Suppress(float* data,
+ size_t data_length,
+ int num_channels,
+ const float* detection_data,
+ size_t detection_length,
+ const float* reference_data,
+ size_t reference_length,
+ float voice_probability,
+ bool key_pressed) override;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(TransientSuppressorVadModeParametrization,
+ TypingDetectionLogicWorksAsExpectedForMono);
+ void Suppress(float* in_ptr, float* spectral_mean, float* out_ptr);
+
+ void UpdateKeypress(bool key_pressed);
+ void UpdateRestoration(float voice_probability);
+
+ void UpdateBuffers(float* data);
+
+ void HardRestoration(float* spectral_mean);
+ void SoftRestoration(float* spectral_mean);
+
+ const VadMode vad_mode_;
+ VoiceProbabilityDelayUnit voice_probability_delay_unit_;
+
+ std::unique_ptr<TransientDetector> detector_;
+
+ bool analyzed_audio_is_silent_;
+
+ size_t data_length_;
+ size_t detection_length_;
+ size_t analysis_length_;
+ size_t buffer_delay_;
+ size_t complex_analysis_length_;
+ int num_channels_;
+ // Input buffer where the original samples are stored.
+ std::unique_ptr<float[]> in_buffer_;
+ std::unique_ptr<float[]> detection_buffer_;
+ // Output buffer where the restored samples are stored.
+ std::unique_ptr<float[]> out_buffer_;
+
+ // Arrays for fft.
+ std::unique_ptr<size_t[]> ip_;
+ std::unique_ptr<float[]> wfft_;
+
+ std::unique_ptr<float[]> spectral_mean_;
+
+ // Stores the data for the fft.
+ std::unique_ptr<float[]> fft_buffer_;
+
+ std::unique_ptr<float[]> magnitudes_;
+
+ const float* window_;
+
+ std::unique_ptr<float[]> mean_factor_;
+
+ float detector_smoothed_;
+
+ int keypress_counter_;
+ int chunks_since_keypress_;
+ bool detection_enabled_;
+ bool suppression_enabled_;
+
+ bool use_hard_restoration_;
+ int chunks_since_voice_change_;
+
+ uint32_t seed_;
+
+ bool using_reference_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl_gn/moz.build
new file mode 100644
index 0000000000..b7f5a0e971
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl_gn/moz.build
@@ -0,0 +1,216 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/transient/moving_moments.cc",
+ "/third_party/libwebrtc/modules/audio_processing/transient/transient_detector.cc",
+ "/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_impl.cc",
+ "/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.cc",
+ "/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("transient_suppressor_impl_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_unittest.cc
new file mode 100644
index 0000000000..ab48504af6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/transient_suppressor_unittest.cc
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_suppressor.h"
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/transient_suppressor_impl.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kMono = 1;
+
+// Returns the index of the first non-zero sample in `samples` or an unspecified
+// value if no value is zero.
+absl::optional<int> FindFirstNonZeroSample(const std::vector<float>& samples) {
+ for (size_t i = 0; i < samples.size(); ++i) {
+ if (samples[i] != 0.0f) {
+ return i;
+ }
+ }
+ return absl::nullopt;
+}
+
+} // namespace
+
+class TransientSuppressorVadModeParametrization
+ : public ::testing::TestWithParam<TransientSuppressor::VadMode> {};
+
+TEST_P(TransientSuppressorVadModeParametrization,
+ TypingDetectionLogicWorksAsExpectedForMono) {
+ TransientSuppressorImpl ts(GetParam(), ts::kSampleRate16kHz,
+ ts::kSampleRate16kHz, kMono);
+
+ // Each key-press enables detection.
+ EXPECT_FALSE(ts.detection_enabled_);
+ ts.UpdateKeypress(true);
+ EXPECT_TRUE(ts.detection_enabled_);
+
+ // It takes four seconds without any key-press to disable the detection
+ for (int time_ms = 0; time_ms < 3990; time_ms += ts::kChunkSizeMs) {
+ ts.UpdateKeypress(false);
+ EXPECT_TRUE(ts.detection_enabled_);
+ }
+ ts.UpdateKeypress(false);
+ EXPECT_FALSE(ts.detection_enabled_);
+
+ // Key-presses that are more than a second apart from each other don't enable
+ // suppression.
+ for (int i = 0; i < 100; ++i) {
+ EXPECT_FALSE(ts.suppression_enabled_);
+ ts.UpdateKeypress(true);
+ EXPECT_TRUE(ts.detection_enabled_);
+ EXPECT_FALSE(ts.suppression_enabled_);
+ for (int time_ms = 0; time_ms < 990; time_ms += ts::kChunkSizeMs) {
+ ts.UpdateKeypress(false);
+ EXPECT_TRUE(ts.detection_enabled_);
+ EXPECT_FALSE(ts.suppression_enabled_);
+ }
+ ts.UpdateKeypress(false);
+ }
+
+ // Two consecutive key-presses is enough to enable the suppression.
+ ts.UpdateKeypress(true);
+ EXPECT_FALSE(ts.suppression_enabled_);
+ ts.UpdateKeypress(true);
+ EXPECT_TRUE(ts.suppression_enabled_);
+
+ // Key-presses that are less than a second apart from each other don't disable
+ // detection nor suppression.
+ for (int i = 0; i < 100; ++i) {
+ for (int time_ms = 0; time_ms < 1000; time_ms += ts::kChunkSizeMs) {
+ ts.UpdateKeypress(false);
+ EXPECT_TRUE(ts.detection_enabled_);
+ EXPECT_TRUE(ts.suppression_enabled_);
+ }
+ ts.UpdateKeypress(true);
+ EXPECT_TRUE(ts.detection_enabled_);
+ EXPECT_TRUE(ts.suppression_enabled_);
+ }
+
+ // It takes four seconds without any key-press to disable the detection and
+ // suppression.
+ for (int time_ms = 0; time_ms < 3990; time_ms += ts::kChunkSizeMs) {
+ ts.UpdateKeypress(false);
+ EXPECT_TRUE(ts.detection_enabled_);
+ EXPECT_TRUE(ts.suppression_enabled_);
+ }
+ for (int time_ms = 0; time_ms < 1000; time_ms += ts::kChunkSizeMs) {
+ ts.UpdateKeypress(false);
+ EXPECT_FALSE(ts.detection_enabled_);
+ EXPECT_FALSE(ts.suppression_enabled_);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ TransientSuppressorImplTest,
+ TransientSuppressorVadModeParametrization,
+ ::testing::Values(TransientSuppressor::VadMode::kDefault,
+ TransientSuppressor::VadMode::kRnnVad,
+ TransientSuppressor::VadMode::kNoVad));
+
+class TransientSuppressorSampleRateParametrization
+ : public ::testing::TestWithParam<int> {};
+
+// Checks that voice probability and processed audio data are temporally aligned
+// after `Suppress()` is called.
+TEST_P(TransientSuppressorSampleRateParametrization,
+ CheckAudioAndVoiceProbabilityTemporallyAligned) {
+ const int sample_rate_hz = GetParam();
+ TransientSuppressorImpl ts(TransientSuppressor::VadMode::kDefault,
+ sample_rate_hz,
+ /*detection_rate_hz=*/sample_rate_hz, kMono);
+
+ const int frame_size = sample_rate_hz * ts::kChunkSizeMs / 1000;
+ std::vector<float> frame(frame_size);
+
+ constexpr int kMaxAttempts = 3;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ SCOPED_TRACE(i);
+
+ // Call `Suppress()` on frames of non-zero audio samples.
+ std::fill(frame.begin(), frame.end(), 1000.0f);
+ float delayed_voice_probability = ts.Suppress(
+ frame.data(), frame.size(), kMono, /*detection_data=*/nullptr,
+ /*detection_length=*/frame_size, /*reference_data=*/nullptr,
+ /*reference_length=*/frame_size, /*voice_probability=*/1.0f,
+ /*key_pressed=*/false);
+
+ // Detect the algorithmic delay of `TransientSuppressorImpl`.
+ absl::optional<int> frame_delay = FindFirstNonZeroSample(frame);
+
+ // Check that the delayed voice probability is delayed according to the
+ // measured delay.
+ if (frame_delay.has_value()) {
+ if (*frame_delay == 0) {
+ // When the delay is a multiple integer of the frame duration,
+ // `Suppress()` returns a copy of a previously observed voice
+ // probability value.
+ EXPECT_EQ(delayed_voice_probability, 1.0f);
+ } else {
+ // Instead, when the delay is fractional, `Suppress()` returns an
+ // interpolated value. Since the exact value depends on the
+ // interpolation method, we only check that the delayed voice
+ // probability is not zero as it must converge towards the previoulsy
+ // observed value.
+ EXPECT_GT(delayed_voice_probability, 0.0f);
+ }
+ break;
+ } else {
+ // The algorithmic delay is longer than the duration of a single frame.
+ // Until the delay is detected, the delayed voice probability is zero.
+ EXPECT_EQ(delayed_voice_probability, 0.0f);
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(TransientSuppressorImplTest,
+ TransientSuppressorSampleRateParametrization,
+ ::testing::Values(ts::kSampleRate8kHz,
+ ts::kSampleRate16kHz,
+ ts::kSampleRate32kHz,
+ ts::kSampleRate48kHz));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.cc b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.cc
new file mode 100644
index 0000000000..27b2b42b38
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/voice_probability_delay_unit.h"
+
+#include <array>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+VoiceProbabilityDelayUnit::VoiceProbabilityDelayUnit(int delay_num_samples,
+ int sample_rate_hz) {
+ Initialize(delay_num_samples, sample_rate_hz);
+}
+
+void VoiceProbabilityDelayUnit::Initialize(int delay_num_samples,
+ int sample_rate_hz) {
+ RTC_DCHECK_GE(delay_num_samples, 0);
+ RTC_DCHECK_LE(delay_num_samples, sample_rate_hz / 50)
+ << "The implementation does not support delays greater than 20 ms.";
+ int frame_size = rtc::CheckedDivExact(sample_rate_hz, 100); // 10 ms.
+ if (delay_num_samples <= frame_size) {
+ weights_[0] = 0.0f;
+ weights_[1] = static_cast<float>(delay_num_samples) / frame_size;
+ weights_[2] =
+ static_cast<float>(frame_size - delay_num_samples) / frame_size;
+ } else {
+ delay_num_samples -= frame_size;
+ weights_[0] = static_cast<float>(delay_num_samples) / frame_size;
+ weights_[1] =
+ static_cast<float>(frame_size - delay_num_samples) / frame_size;
+ weights_[2] = 0.0f;
+ }
+
+ // Resets the delay unit.
+ last_probabilities_.fill(0.0f);
+}
+
+float VoiceProbabilityDelayUnit::Delay(float voice_probability) {
+ float weighted_probability = weights_[0] * last_probabilities_[0] +
+ weights_[1] * last_probabilities_[1] +
+ weights_[2] * voice_probability;
+ last_probabilities_[0] = last_probabilities_[1];
+ last_probabilities_[1] = voice_probability;
+ return weighted_probability;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.h b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.h
new file mode 100644
index 0000000000..05961663e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_VOICE_PROBABILITY_DELAY_UNIT_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_VOICE_PROBABILITY_DELAY_UNIT_H_
+
+#include <array>
+
+namespace webrtc {
+
+// Iteratively produces a sequence of delayed voice probability values given a
+// fixed delay between 0 and 20 ms and given a sequence of voice probability
+// values observed every 10 ms. Supports fractional delays, that are delays
+// which are not a multiple integer of 10 ms. Applies interpolation with
+// fractional delays; otherwise, returns a previously observed value according
+// to the given fixed delay.
+class VoiceProbabilityDelayUnit {
+ public:
+ // Ctor. `delay_num_samples` is the delay in number of samples and it must be
+ // non-negative and less than 20 ms.
+ VoiceProbabilityDelayUnit(int delay_num_samples, int sample_rate_hz);
+
+ // Handles delay and sample rate changes and resets the delay unit.
+ void Initialize(int delay_num_samples, int sample_rate_hz);
+
+ // Observes `voice_probability` and returns a delayed voice probability.
+ float Delay(float voice_probability);
+
+ private:
+ std::array<float, 3> weights_;
+ std::array<float, 2> last_probabilities_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_VOICE_PROBABILITY_DELAY_UNIT_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_gn/moz.build
new file mode 100644
index 0000000000..61917bc8c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("voice_probability_delay_unit_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_unittest.cc
new file mode 100644
index 0000000000..04848e6f2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/voice_probability_delay_unit_unittest.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/voice_probability_delay_unit.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Checks that with zero delay, the observed value is immediately returned as
+// delayed value.
+TEST(VoiceProbabilityDelayUnit, NoDelay) {
+ VoiceProbabilityDelayUnit delay_unit(/*delay_num_samples=*/0,
+ /*sample_rate_hz=*/48000);
+ constexpr int kMax = 5;
+ for (int i = 0; i <= kMax; ++i) {
+ SCOPED_TRACE(i);
+ float voice_probability = static_cast<float>(i) / kMax;
+ EXPECT_EQ(voice_probability, delay_unit.Delay(voice_probability));
+ }
+}
+
+// Checks that with integer delays, an exact copy of a previously observed value
+// is returned.
+TEST(VoiceProbabilityDelayUnit, IntegerDelay) {
+ VoiceProbabilityDelayUnit delay_unit_10ms(/*delay_num_samples=*/480,
+ /*sample_rate_hz=*/48000);
+ delay_unit_10ms.Delay(0.125f);
+ EXPECT_EQ(0.125f, delay_unit_10ms.Delay(0.9f));
+
+ VoiceProbabilityDelayUnit delay_unit_20ms(/*delay_num_samples=*/960,
+ /*sample_rate_hz=*/48000);
+ delay_unit_20ms.Delay(0.125f);
+ delay_unit_20ms.Delay(0.8f);
+ EXPECT_EQ(0.125f, delay_unit_20ms.Delay(0.9f));
+}
+
+// Checks that with a fractional delay < 10 ms, interpolation is applied.
+TEST(VoiceProbabilityDelayUnit, FractionalDelayLessThan10ms) {
+ // Create delay unit with fractional delay of 6 ms.
+ VoiceProbabilityDelayUnit delay_unit(/*delay_num_samples=*/288,
+ /*sample_rate_hz=*/48000);
+ // frame 0
+ // --------- frame 1
+ // ---------
+ // 0000001111
+ delay_unit.Delay(1.0f);
+ EXPECT_FLOAT_EQ(0.68f, delay_unit.Delay(0.2f));
+}
+
+// Checks that with a fractional delay > 10 ms, interpolation is applied.
+TEST(VoiceProbabilityDelayUnit, FractionalDelayGreaterThan10ms) {
+ // Create delay unit with fractional delay of 14 ms.
+ VoiceProbabilityDelayUnit delay_unit(/*delay_num_samples=*/672,
+ /*sample_rate_hz=*/48000);
+ // frame 0
+ // --------- frame 1
+ // --------- frame 2
+ // ---------
+ // 0000111111
+ delay_unit.Delay(1.0f);
+ delay_unit.Delay(0.2f);
+ EXPECT_FLOAT_EQ(0.52f, delay_unit.Delay(1.0f));
+}
+
+// Checks that `Initialize()` resets the delay unit.
+TEST(VoiceProbabilityDelayUnit, InitializeResetsDelayUnit) {
+ VoiceProbabilityDelayUnit delay_unit(/*delay_num_samples=*/960,
+ /*sample_rate_hz=*/48000);
+ delay_unit.Delay(1.0f);
+ delay_unit.Delay(0.9f);
+
+ delay_unit.Initialize(/*delay_num_samples=*/160, /*sample_rate_hz=*/8000);
+ EXPECT_EQ(0.0f, delay_unit.Delay(0.1f));
+ EXPECT_EQ(0.0f, delay_unit.Delay(0.2f));
+ EXPECT_EQ(0.1f, delay_unit.Delay(0.3f));
+}
+
+// Checks that `Initialize()` handles delay changes.
+TEST(VoiceProbabilityDelayUnit, InitializeHandlesDelayChanges) {
+ // Start with a 20 ms delay.
+ VoiceProbabilityDelayUnit delay_unit(/*delay_num_samples=*/960,
+ /*sample_rate_hz=*/48000);
+ delay_unit.Delay(1.0f);
+ delay_unit.Delay(0.9f);
+
+ // Lower the delay to 10 ms.
+ delay_unit.Initialize(/*delay_num_samples=*/80, /*sample_rate_hz=*/8000);
+ EXPECT_EQ(0.0f, delay_unit.Delay(0.1f));
+ EXPECT_EQ(0.1f, delay_unit.Delay(0.2f));
+
+ // Increase the delay to 15 ms.
+ delay_unit.Initialize(/*delay_num_samples=*/120, /*sample_rate_hz=*/8000);
+ EXPECT_EQ(0.0f, delay_unit.Delay(0.1f));
+ EXPECT_EQ(0.05f, delay_unit.Delay(0.2f));
+ EXPECT_EQ(0.15f, delay_unit.Delay(0.3f));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/windows_private.h b/third_party/libwebrtc/modules/audio_processing/transient/windows_private.h
new file mode 100644
index 0000000000..54e3c25785
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/windows_private.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_WINDOWS_PRIVATE_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_WINDOWS_PRIVATE_H_
+
+namespace webrtc {
+
+// Hanning window for 4ms 16kHz
+static const float kHanning64w128[128] = {
+ 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
+ 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
+ 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+ 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f,
+ 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f,
+ 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+ 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f,
+ 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f,
+ 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+ 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f,
+ 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f,
+ 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+ 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f,
+ 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f,
+ 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+ 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f,
+ 1.00000000000000f, 0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
+ 0.99518472667220f, 0.99247953459871f, 0.98917650996478f, 0.98527764238894f,
+ 0.98078528040323f, 0.97570213003853f, 0.97003125319454f, 0.96377606579544f,
+ 0.95694033573221f, 0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
+ 0.92387953251129f, 0.91420975570353f, 0.90398929312344f, 0.89322430119552f,
+ 0.88192126434835f, 0.87008699110871f, 0.85772861000027f, 0.84485356524971f,
+ 0.83146961230255f, 0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
+ 0.77301045336274f, 0.75720884650648f, 0.74095112535496f, 0.72424708295147f,
+ 0.70710678118655f, 0.68954054473707f, 0.67155895484702f, 0.65317284295378f,
+ 0.63439328416365f, 0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
+ 0.55557023301960f, 0.53499761988710f, 0.51410274419322f, 0.49289819222978f,
+ 0.47139673682600f, 0.44961132965461f, 0.42755509343028f, 0.40524131400499f,
+ 0.38268343236509f, 0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
+ 0.29028467725446f, 0.26671275747490f, 0.24298017990326f, 0.21910124015687f,
+ 0.19509032201613f, 0.17096188876030f, 0.14673047445536f, 0.12241067519922f,
+ 0.09801714032956f, 0.07356456359967f, 0.04906767432742f, 0.02454122852291f};
+
+// hybrib Hanning & flat window
+static const float kBlocks80w128[128] = {
+ 0.00000000f, 0.03271908f, 0.06540313f, 0.09801714f, 0.13052619f,
+ 0.16289547f, 0.19509032f, 0.22707626f, 0.25881905f, 0.29028468f,
+ 0.32143947f, 0.35225005f, 0.38268343f, 0.41270703f, 0.44228869f,
+ 0.47139674f, 0.50000000f, 0.52806785f, 0.55557023f, 0.58247770f,
+ 0.60876143f, 0.63439328f, 0.65934582f, 0.68359230f, 0.70710678f,
+ 0.72986407f, 0.75183981f, 0.77301045f, 0.79335334f, 0.81284668f,
+ 0.83146961f, 0.84920218f, 0.86602540f, 0.88192126f, 0.89687274f,
+ 0.91086382f, 0.92387953f, 0.93590593f, 0.94693013f, 0.95694034f,
+ 0.96592583f, 0.97387698f, 0.98078528f, 0.98664333f, 0.99144486f,
+ 0.99518473f, 0.99785892f, 0.99946459f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 0.99946459f, 0.99785892f, 0.99518473f, 0.99144486f,
+ 0.98664333f, 0.98078528f, 0.97387698f, 0.96592583f, 0.95694034f,
+ 0.94693013f, 0.93590593f, 0.92387953f, 0.91086382f, 0.89687274f,
+ 0.88192126f, 0.86602540f, 0.84920218f, 0.83146961f, 0.81284668f,
+ 0.79335334f, 0.77301045f, 0.75183981f, 0.72986407f, 0.70710678f,
+ 0.68359230f, 0.65934582f, 0.63439328f, 0.60876143f, 0.58247770f,
+ 0.55557023f, 0.52806785f, 0.50000000f, 0.47139674f, 0.44228869f,
+ 0.41270703f, 0.38268343f, 0.35225005f, 0.32143947f, 0.29028468f,
+ 0.25881905f, 0.22707626f, 0.19509032f, 0.16289547f, 0.13052619f,
+ 0.09801714f, 0.06540313f, 0.03271908f};
+
+// hybrib Hanning & flat window
+static const float kBlocks160w256[256] = {
+ 0.00000000f, 0.01636173f, 0.03271908f, 0.04906767f, 0.06540313f,
+ 0.08172107f, 0.09801714f, 0.11428696f, 0.13052619f, 0.14673047f,
+ 0.16289547f, 0.17901686f, 0.19509032f, 0.21111155f, 0.22707626f,
+ 0.24298018f, 0.25881905f, 0.27458862f, 0.29028468f, 0.30590302f,
+ 0.32143947f, 0.33688985f, 0.35225005f, 0.36751594f, 0.38268343f,
+ 0.39774847f, 0.41270703f, 0.42755509f, 0.44228869f, 0.45690388f,
+ 0.47139674f, 0.48576339f, 0.50000000f, 0.51410274f, 0.52806785f,
+ 0.54189158f, 0.55557023f, 0.56910015f, 0.58247770f, 0.59569930f,
+ 0.60876143f, 0.62166057f, 0.63439328f, 0.64695615f, 0.65934582f,
+ 0.67155895f, 0.68359230f, 0.69544264f, 0.70710678f, 0.71858162f,
+ 0.72986407f, 0.74095113f, 0.75183981f, 0.76252720f, 0.77301045f,
+ 0.78328675f, 0.79335334f, 0.80320753f, 0.81284668f, 0.82226822f,
+ 0.83146961f, 0.84044840f, 0.84920218f, 0.85772861f, 0.86602540f,
+ 0.87409034f, 0.88192126f, 0.88951608f, 0.89687274f, 0.90398929f,
+ 0.91086382f, 0.91749450f, 0.92387953f, 0.93001722f, 0.93590593f,
+ 0.94154407f, 0.94693013f, 0.95206268f, 0.95694034f, 0.96156180f,
+ 0.96592583f, 0.97003125f, 0.97387698f, 0.97746197f, 0.98078528f,
+ 0.98384601f, 0.98664333f, 0.98917651f, 0.99144486f, 0.99344778f,
+ 0.99518473f, 0.99665524f, 0.99785892f, 0.99879546f, 0.99946459f,
+ 0.99986614f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 0.99986614f, 0.99946459f, 0.99879546f, 0.99785892f,
+ 0.99665524f, 0.99518473f, 0.99344778f, 0.99144486f, 0.98917651f,
+ 0.98664333f, 0.98384601f, 0.98078528f, 0.97746197f, 0.97387698f,
+ 0.97003125f, 0.96592583f, 0.96156180f, 0.95694034f, 0.95206268f,
+ 0.94693013f, 0.94154407f, 0.93590593f, 0.93001722f, 0.92387953f,
+ 0.91749450f, 0.91086382f, 0.90398929f, 0.89687274f, 0.88951608f,
+ 0.88192126f, 0.87409034f, 0.86602540f, 0.85772861f, 0.84920218f,
+ 0.84044840f, 0.83146961f, 0.82226822f, 0.81284668f, 0.80320753f,
+ 0.79335334f, 0.78328675f, 0.77301045f, 0.76252720f, 0.75183981f,
+ 0.74095113f, 0.72986407f, 0.71858162f, 0.70710678f, 0.69544264f,
+ 0.68359230f, 0.67155895f, 0.65934582f, 0.64695615f, 0.63439328f,
+ 0.62166057f, 0.60876143f, 0.59569930f, 0.58247770f, 0.56910015f,
+ 0.55557023f, 0.54189158f, 0.52806785f, 0.51410274f, 0.50000000f,
+ 0.48576339f, 0.47139674f, 0.45690388f, 0.44228869f, 0.42755509f,
+ 0.41270703f, 0.39774847f, 0.38268343f, 0.36751594f, 0.35225005f,
+ 0.33688985f, 0.32143947f, 0.30590302f, 0.29028468f, 0.27458862f,
+ 0.25881905f, 0.24298018f, 0.22707626f, 0.21111155f, 0.19509032f,
+ 0.17901686f, 0.16289547f, 0.14673047f, 0.13052619f, 0.11428696f,
+ 0.09801714f, 0.08172107f, 0.06540313f, 0.04906767f, 0.03271908f,
+ 0.01636173f};
+
+// hybrib Hanning & flat window: for 20ms
+static const float kBlocks320w512[512] = {
+ 0.00000000f, 0.00818114f, 0.01636173f, 0.02454123f, 0.03271908f,
+ 0.04089475f, 0.04906767f, 0.05723732f, 0.06540313f, 0.07356456f,
+ 0.08172107f, 0.08987211f, 0.09801714f, 0.10615561f, 0.11428696f,
+ 0.12241068f, 0.13052619f, 0.13863297f, 0.14673047f, 0.15481816f,
+ 0.16289547f, 0.17096189f, 0.17901686f, 0.18705985f, 0.19509032f,
+ 0.20310773f, 0.21111155f, 0.21910124f, 0.22707626f, 0.23503609f,
+ 0.24298018f, 0.25090801f, 0.25881905f, 0.26671276f, 0.27458862f,
+ 0.28244610f, 0.29028468f, 0.29810383f, 0.30590302f, 0.31368174f,
+ 0.32143947f, 0.32917568f, 0.33688985f, 0.34458148f, 0.35225005f,
+ 0.35989504f, 0.36751594f, 0.37511224f, 0.38268343f, 0.39022901f,
+ 0.39774847f, 0.40524131f, 0.41270703f, 0.42014512f, 0.42755509f,
+ 0.43493645f, 0.44228869f, 0.44961133f, 0.45690388f, 0.46416584f,
+ 0.47139674f, 0.47859608f, 0.48576339f, 0.49289819f, 0.50000000f,
+ 0.50706834f, 0.51410274f, 0.52110274f, 0.52806785f, 0.53499762f,
+ 0.54189158f, 0.54874927f, 0.55557023f, 0.56235401f, 0.56910015f,
+ 0.57580819f, 0.58247770f, 0.58910822f, 0.59569930f, 0.60225052f,
+ 0.60876143f, 0.61523159f, 0.62166057f, 0.62804795f, 0.63439328f,
+ 0.64069616f, 0.64695615f, 0.65317284f, 0.65934582f, 0.66547466f,
+ 0.67155895f, 0.67759830f, 0.68359230f, 0.68954054f, 0.69544264f,
+ 0.70129818f, 0.70710678f, 0.71286806f, 0.71858162f, 0.72424708f,
+ 0.72986407f, 0.73543221f, 0.74095113f, 0.74642045f, 0.75183981f,
+ 0.75720885f, 0.76252720f, 0.76779452f, 0.77301045f, 0.77817464f,
+ 0.78328675f, 0.78834643f, 0.79335334f, 0.79830715f, 0.80320753f,
+ 0.80805415f, 0.81284668f, 0.81758481f, 0.82226822f, 0.82689659f,
+ 0.83146961f, 0.83598698f, 0.84044840f, 0.84485357f, 0.84920218f,
+ 0.85349396f, 0.85772861f, 0.86190585f, 0.86602540f, 0.87008699f,
+ 0.87409034f, 0.87803519f, 0.88192126f, 0.88574831f, 0.88951608f,
+ 0.89322430f, 0.89687274f, 0.90046115f, 0.90398929f, 0.90745693f,
+ 0.91086382f, 0.91420976f, 0.91749450f, 0.92071783f, 0.92387953f,
+ 0.92697940f, 0.93001722f, 0.93299280f, 0.93590593f, 0.93875641f,
+ 0.94154407f, 0.94426870f, 0.94693013f, 0.94952818f, 0.95206268f,
+ 0.95453345f, 0.95694034f, 0.95928317f, 0.96156180f, 0.96377607f,
+ 0.96592583f, 0.96801094f, 0.97003125f, 0.97198664f, 0.97387698f,
+ 0.97570213f, 0.97746197f, 0.97915640f, 0.98078528f, 0.98234852f,
+ 0.98384601f, 0.98527764f, 0.98664333f, 0.98794298f, 0.98917651f,
+ 0.99034383f, 0.99144486f, 0.99247953f, 0.99344778f, 0.99434953f,
+ 0.99518473f, 0.99595331f, 0.99665524f, 0.99729046f, 0.99785892f,
+ 0.99836060f, 0.99879546f, 0.99916346f, 0.99946459f, 0.99969882f,
+ 0.99986614f, 0.99996653f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f, 1.00000000f,
+ 1.00000000f, 0.99996653f, 0.99986614f, 0.99969882f, 0.99946459f,
+ 0.99916346f, 0.99879546f, 0.99836060f, 0.99785892f, 0.99729046f,
+ 0.99665524f, 0.99595331f, 0.99518473f, 0.99434953f, 0.99344778f,
+ 0.99247953f, 0.99144486f, 0.99034383f, 0.98917651f, 0.98794298f,
+ 0.98664333f, 0.98527764f, 0.98384601f, 0.98234852f, 0.98078528f,
+ 0.97915640f, 0.97746197f, 0.97570213f, 0.97387698f, 0.97198664f,
+ 0.97003125f, 0.96801094f, 0.96592583f, 0.96377607f, 0.96156180f,
+ 0.95928317f, 0.95694034f, 0.95453345f, 0.95206268f, 0.94952818f,
+ 0.94693013f, 0.94426870f, 0.94154407f, 0.93875641f, 0.93590593f,
+ 0.93299280f, 0.93001722f, 0.92697940f, 0.92387953f, 0.92071783f,
+ 0.91749450f, 0.91420976f, 0.91086382f, 0.90745693f, 0.90398929f,
+ 0.90046115f, 0.89687274f, 0.89322430f, 0.88951608f, 0.88574831f,
+ 0.88192126f, 0.87803519f, 0.87409034f, 0.87008699f, 0.86602540f,
+ 0.86190585f, 0.85772861f, 0.85349396f, 0.84920218f, 0.84485357f,
+ 0.84044840f, 0.83598698f, 0.83146961f, 0.82689659f, 0.82226822f,
+ 0.81758481f, 0.81284668f, 0.80805415f, 0.80320753f, 0.79830715f,
+ 0.79335334f, 0.78834643f, 0.78328675f, 0.77817464f, 0.77301045f,
+ 0.76779452f, 0.76252720f, 0.75720885f, 0.75183981f, 0.74642045f,
+ 0.74095113f, 0.73543221f, 0.72986407f, 0.72424708f, 0.71858162f,
+ 0.71286806f, 0.70710678f, 0.70129818f, 0.69544264f, 0.68954054f,
+ 0.68359230f, 0.67759830f, 0.67155895f, 0.66547466f, 0.65934582f,
+ 0.65317284f, 0.64695615f, 0.64069616f, 0.63439328f, 0.62804795f,
+ 0.62166057f, 0.61523159f, 0.60876143f, 0.60225052f, 0.59569930f,
+ 0.58910822f, 0.58247770f, 0.57580819f, 0.56910015f, 0.56235401f,
+ 0.55557023f, 0.54874927f, 0.54189158f, 0.53499762f, 0.52806785f,
+ 0.52110274f, 0.51410274f, 0.50706834f, 0.50000000f, 0.49289819f,
+ 0.48576339f, 0.47859608f, 0.47139674f, 0.46416584f, 0.45690388f,
+ 0.44961133f, 0.44228869f, 0.43493645f, 0.42755509f, 0.42014512f,
+ 0.41270703f, 0.40524131f, 0.39774847f, 0.39022901f, 0.38268343f,
+ 0.37511224f, 0.36751594f, 0.35989504f, 0.35225005f, 0.34458148f,
+ 0.33688985f, 0.32917568f, 0.32143947f, 0.31368174f, 0.30590302f,
+ 0.29810383f, 0.29028468f, 0.28244610f, 0.27458862f, 0.26671276f,
+ 0.25881905f, 0.25090801f, 0.24298018f, 0.23503609f, 0.22707626f,
+ 0.21910124f, 0.21111155f, 0.20310773f, 0.19509032f, 0.18705985f,
+ 0.17901686f, 0.17096189f, 0.16289547f, 0.15481816f, 0.14673047f,
+ 0.13863297f, 0.13052619f, 0.12241068f, 0.11428696f, 0.10615561f,
+ 0.09801714f, 0.08987211f, 0.08172107f, 0.07356456f, 0.06540313f,
+ 0.05723732f, 0.04906767f, 0.04089475f, 0.03271908f, 0.02454123f,
+ 0.01636173f, 0.00818114f};
+
+// Hanning window: for 15ms at 16kHz with symmetric zeros
+static const float kBlocks240w512[512] = {
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00654494f, 0.01308960f, 0.01963369f,
+ 0.02617695f, 0.03271908f, 0.03925982f, 0.04579887f, 0.05233596f,
+ 0.05887080f, 0.06540313f, 0.07193266f, 0.07845910f, 0.08498218f,
+ 0.09150162f, 0.09801714f, 0.10452846f, 0.11103531f, 0.11753740f,
+ 0.12403446f, 0.13052620f, 0.13701233f, 0.14349262f, 0.14996676f,
+ 0.15643448f, 0.16289547f, 0.16934951f, 0.17579629f, 0.18223552f,
+ 0.18866697f, 0.19509032f, 0.20150533f, 0.20791170f, 0.21430916f,
+ 0.22069745f, 0.22707628f, 0.23344538f, 0.23980446f, 0.24615330f,
+ 0.25249159f, 0.25881904f, 0.26513544f, 0.27144045f, 0.27773386f,
+ 0.28401536f, 0.29028466f, 0.29654160f, 0.30278578f, 0.30901700f,
+ 0.31523499f, 0.32143945f, 0.32763019f, 0.33380687f, 0.33996925f,
+ 0.34611708f, 0.35225007f, 0.35836795f, 0.36447051f, 0.37055743f,
+ 0.37662852f, 0.38268346f, 0.38872197f, 0.39474389f, 0.40074885f,
+ 0.40673664f, 0.41270703f, 0.41865975f, 0.42459452f, 0.43051112f,
+ 0.43640924f, 0.44228873f, 0.44814920f, 0.45399052f, 0.45981237f,
+ 0.46561453f, 0.47139674f, 0.47715878f, 0.48290035f, 0.48862126f,
+ 0.49432120f, 0.50000000f, 0.50565743f, 0.51129311f, 0.51690692f,
+ 0.52249855f, 0.52806789f, 0.53361452f, 0.53913832f, 0.54463905f,
+ 0.55011642f, 0.55557024f, 0.56100029f, 0.56640625f, 0.57178795f,
+ 0.57714522f, 0.58247769f, 0.58778524f, 0.59306765f, 0.59832460f,
+ 0.60355598f, 0.60876143f, 0.61394083f, 0.61909395f, 0.62422055f,
+ 0.62932038f, 0.63439333f, 0.63943899f, 0.64445734f, 0.64944810f,
+ 0.65441096f, 0.65934587f, 0.66425246f, 0.66913062f, 0.67398012f,
+ 0.67880076f, 0.68359232f, 0.68835455f, 0.69308740f, 0.69779050f,
+ 0.70246369f, 0.70710677f, 0.71171963f, 0.71630198f, 0.72085363f,
+ 0.72537440f, 0.72986406f, 0.73432255f, 0.73874950f, 0.74314487f,
+ 0.74750835f, 0.75183982f, 0.75613910f, 0.76040596f, 0.76464027f,
+ 0.76884186f, 0.77301043f, 0.77714598f, 0.78124821f, 0.78531694f,
+ 0.78935206f, 0.79335338f, 0.79732066f, 0.80125386f, 0.80515265f,
+ 0.80901700f, 0.81284672f, 0.81664157f, 0.82040149f, 0.82412618f,
+ 0.82781565f, 0.83146966f, 0.83508795f, 0.83867061f, 0.84221727f,
+ 0.84572780f, 0.84920216f, 0.85264021f, 0.85604161f, 0.85940641f,
+ 0.86273444f, 0.86602545f, 0.86927933f, 0.87249607f, 0.87567532f,
+ 0.87881714f, 0.88192129f, 0.88498765f, 0.88801610f, 0.89100653f,
+ 0.89395881f, 0.89687276f, 0.89974827f, 0.90258533f, 0.90538365f,
+ 0.90814316f, 0.91086388f, 0.91354549f, 0.91618794f, 0.91879123f,
+ 0.92135513f, 0.92387950f, 0.92636442f, 0.92880958f, 0.93121493f,
+ 0.93358046f, 0.93590593f, 0.93819135f, 0.94043654f, 0.94264150f,
+ 0.94480604f, 0.94693011f, 0.94901365f, 0.95105654f, 0.95305866f,
+ 0.95501995f, 0.95694035f, 0.95881975f, 0.96065807f, 0.96245527f,
+ 0.96421117f, 0.96592581f, 0.96759909f, 0.96923089f, 0.97082120f,
+ 0.97236991f, 0.97387701f, 0.97534233f, 0.97676587f, 0.97814763f,
+ 0.97948742f, 0.98078531f, 0.98204112f, 0.98325491f, 0.98442656f,
+ 0.98555607f, 0.98664331f, 0.98768836f, 0.98869103f, 0.98965138f,
+ 0.99056935f, 0.99144489f, 0.99227792f, 0.99306846f, 0.99381649f,
+ 0.99452192f, 0.99518472f, 0.99580491f, 0.99638247f, 0.99691731f,
+ 0.99740952f, 0.99785894f, 0.99826562f, 0.99862951f, 0.99895066f,
+ 0.99922901f, 0.99946457f, 0.99965733f, 0.99980724f, 0.99991435f,
+ 0.99997860f, 1.00000000f, 0.99997860f, 0.99991435f, 0.99980724f,
+ 0.99965733f, 0.99946457f, 0.99922901f, 0.99895066f, 0.99862951f,
+ 0.99826562f, 0.99785894f, 0.99740946f, 0.99691731f, 0.99638247f,
+ 0.99580491f, 0.99518472f, 0.99452192f, 0.99381644f, 0.99306846f,
+ 0.99227792f, 0.99144489f, 0.99056935f, 0.98965138f, 0.98869103f,
+ 0.98768836f, 0.98664331f, 0.98555607f, 0.98442656f, 0.98325491f,
+ 0.98204112f, 0.98078525f, 0.97948742f, 0.97814757f, 0.97676587f,
+ 0.97534227f, 0.97387695f, 0.97236991f, 0.97082120f, 0.96923089f,
+ 0.96759909f, 0.96592581f, 0.96421117f, 0.96245521f, 0.96065807f,
+ 0.95881969f, 0.95694029f, 0.95501995f, 0.95305860f, 0.95105648f,
+ 0.94901365f, 0.94693011f, 0.94480604f, 0.94264150f, 0.94043654f,
+ 0.93819129f, 0.93590593f, 0.93358046f, 0.93121493f, 0.92880952f,
+ 0.92636436f, 0.92387950f, 0.92135507f, 0.91879123f, 0.91618794f,
+ 0.91354543f, 0.91086382f, 0.90814310f, 0.90538365f, 0.90258527f,
+ 0.89974827f, 0.89687276f, 0.89395875f, 0.89100647f, 0.88801610f,
+ 0.88498759f, 0.88192123f, 0.87881714f, 0.87567532f, 0.87249595f,
+ 0.86927933f, 0.86602539f, 0.86273432f, 0.85940641f, 0.85604161f,
+ 0.85264009f, 0.84920216f, 0.84572780f, 0.84221715f, 0.83867055f,
+ 0.83508795f, 0.83146954f, 0.82781565f, 0.82412612f, 0.82040137f,
+ 0.81664157f, 0.81284660f, 0.80901700f, 0.80515265f, 0.80125374f,
+ 0.79732066f, 0.79335332f, 0.78935200f, 0.78531694f, 0.78124815f,
+ 0.77714586f, 0.77301049f, 0.76884180f, 0.76464021f, 0.76040596f,
+ 0.75613904f, 0.75183970f, 0.74750835f, 0.74314481f, 0.73874938f,
+ 0.73432249f, 0.72986400f, 0.72537428f, 0.72085363f, 0.71630186f,
+ 0.71171951f, 0.70710677f, 0.70246363f, 0.69779032f, 0.69308734f,
+ 0.68835449f, 0.68359220f, 0.67880070f, 0.67398006f, 0.66913044f,
+ 0.66425240f, 0.65934575f, 0.65441096f, 0.64944804f, 0.64445722f,
+ 0.63943905f, 0.63439327f, 0.62932026f, 0.62422055f, 0.61909389f,
+ 0.61394072f, 0.60876143f, 0.60355592f, 0.59832448f, 0.59306765f,
+ 0.58778518f, 0.58247757f, 0.57714522f, 0.57178789f, 0.56640613f,
+ 0.56100023f, 0.55557019f, 0.55011630f, 0.54463905f, 0.53913826f,
+ 0.53361434f, 0.52806783f, 0.52249849f, 0.51690674f, 0.51129305f,
+ 0.50565726f, 0.50000006f, 0.49432117f, 0.48862115f, 0.48290038f,
+ 0.47715873f, 0.47139663f, 0.46561456f, 0.45981231f, 0.45399037f,
+ 0.44814920f, 0.44228864f, 0.43640912f, 0.43051112f, 0.42459446f,
+ 0.41865960f, 0.41270703f, 0.40673658f, 0.40074870f, 0.39474386f,
+ 0.38872188f, 0.38268328f, 0.37662849f, 0.37055734f, 0.36447033f,
+ 0.35836792f, 0.35224995f, 0.34611690f, 0.33996922f, 0.33380675f,
+ 0.32763001f, 0.32143945f, 0.31523487f, 0.30901679f, 0.30278572f,
+ 0.29654145f, 0.29028472f, 0.28401530f, 0.27773371f, 0.27144048f,
+ 0.26513538f, 0.25881892f, 0.25249159f, 0.24615324f, 0.23980433f,
+ 0.23344538f, 0.22707619f, 0.22069728f, 0.21430916f, 0.20791161f,
+ 0.20150517f, 0.19509031f, 0.18866688f, 0.18223536f, 0.17579627f,
+ 0.16934940f, 0.16289529f, 0.15643445f, 0.14996666f, 0.14349243f,
+ 0.13701232f, 0.13052608f, 0.12403426f, 0.11753736f, 0.11103519f,
+ 0.10452849f, 0.09801710f, 0.09150149f, 0.08498220f, 0.07845904f,
+ 0.07193252f, 0.06540315f, 0.05887074f, 0.05233581f, 0.04579888f,
+ 0.03925974f, 0.03271893f, 0.02617695f, 0.01963361f, 0.01308943f,
+ 0.00654493f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f};
+
+// Hanning window: for 30ms with 1024 fft with symmetric zeros at 16kHz
+static const float kBlocks480w1024[1024] = {
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00327249f, 0.00654494f,
+ 0.00981732f, 0.01308960f, 0.01636173f, 0.01963369f, 0.02290544f,
+ 0.02617695f, 0.02944817f, 0.03271908f, 0.03598964f, 0.03925982f,
+ 0.04252957f, 0.04579887f, 0.04906768f, 0.05233596f, 0.05560368f,
+ 0.05887080f, 0.06213730f, 0.06540313f, 0.06866825f, 0.07193266f,
+ 0.07519628f, 0.07845910f, 0.08172107f, 0.08498218f, 0.08824237f,
+ 0.09150162f, 0.09475989f, 0.09801714f, 0.10127335f, 0.10452846f,
+ 0.10778246f, 0.11103531f, 0.11428697f, 0.11753740f, 0.12078657f,
+ 0.12403446f, 0.12728101f, 0.13052620f, 0.13376999f, 0.13701233f,
+ 0.14025325f, 0.14349262f, 0.14673047f, 0.14996676f, 0.15320145f,
+ 0.15643448f, 0.15966582f, 0.16289547f, 0.16612339f, 0.16934951f,
+ 0.17257382f, 0.17579629f, 0.17901687f, 0.18223552f, 0.18545224f,
+ 0.18866697f, 0.19187967f, 0.19509032f, 0.19829889f, 0.20150533f,
+ 0.20470962f, 0.20791170f, 0.21111156f, 0.21430916f, 0.21750447f,
+ 0.22069745f, 0.22388805f, 0.22707628f, 0.23026206f, 0.23344538f,
+ 0.23662618f, 0.23980446f, 0.24298020f, 0.24615330f, 0.24932377f,
+ 0.25249159f, 0.25565669f, 0.25881904f, 0.26197866f, 0.26513544f,
+ 0.26828939f, 0.27144045f, 0.27458861f, 0.27773386f, 0.28087610f,
+ 0.28401536f, 0.28715158f, 0.29028466f, 0.29341471f, 0.29654160f,
+ 0.29966527f, 0.30278578f, 0.30590302f, 0.30901700f, 0.31212768f,
+ 0.31523499f, 0.31833893f, 0.32143945f, 0.32453656f, 0.32763019f,
+ 0.33072028f, 0.33380687f, 0.33688986f, 0.33996925f, 0.34304500f,
+ 0.34611708f, 0.34918544f, 0.35225007f, 0.35531089f, 0.35836795f,
+ 0.36142117f, 0.36447051f, 0.36751595f, 0.37055743f, 0.37359497f,
+ 0.37662852f, 0.37965801f, 0.38268346f, 0.38570479f, 0.38872197f,
+ 0.39173502f, 0.39474389f, 0.39774847f, 0.40074885f, 0.40374491f,
+ 0.40673664f, 0.40972406f, 0.41270703f, 0.41568562f, 0.41865975f,
+ 0.42162940f, 0.42459452f, 0.42755508f, 0.43051112f, 0.43346250f,
+ 0.43640924f, 0.43935132f, 0.44228873f, 0.44522133f, 0.44814920f,
+ 0.45107228f, 0.45399052f, 0.45690390f, 0.45981237f, 0.46271592f,
+ 0.46561453f, 0.46850815f, 0.47139674f, 0.47428030f, 0.47715878f,
+ 0.48003215f, 0.48290035f, 0.48576337f, 0.48862126f, 0.49147385f,
+ 0.49432120f, 0.49716330f, 0.50000000f, 0.50283140f, 0.50565743f,
+ 0.50847799f, 0.51129311f, 0.51410276f, 0.51690692f, 0.51970553f,
+ 0.52249855f, 0.52528602f, 0.52806789f, 0.53084403f, 0.53361452f,
+ 0.53637928f, 0.53913832f, 0.54189163f, 0.54463905f, 0.54738063f,
+ 0.55011642f, 0.55284631f, 0.55557024f, 0.55828828f, 0.56100029f,
+ 0.56370628f, 0.56640625f, 0.56910014f, 0.57178795f, 0.57446963f,
+ 0.57714522f, 0.57981455f, 0.58247769f, 0.58513463f, 0.58778524f,
+ 0.59042960f, 0.59306765f, 0.59569931f, 0.59832460f, 0.60094351f,
+ 0.60355598f, 0.60616195f, 0.60876143f, 0.61135441f, 0.61394083f,
+ 0.61652070f, 0.61909395f, 0.62166059f, 0.62422055f, 0.62677383f,
+ 0.62932038f, 0.63186020f, 0.63439333f, 0.63691956f, 0.63943899f,
+ 0.64195162f, 0.64445734f, 0.64695615f, 0.64944810f, 0.65193301f,
+ 0.65441096f, 0.65688187f, 0.65934587f, 0.66180271f, 0.66425246f,
+ 0.66669512f, 0.66913062f, 0.67155898f, 0.67398012f, 0.67639405f,
+ 0.67880076f, 0.68120021f, 0.68359232f, 0.68597710f, 0.68835455f,
+ 0.69072467f, 0.69308740f, 0.69544262f, 0.69779050f, 0.70013082f,
+ 0.70246369f, 0.70478904f, 0.70710677f, 0.70941699f, 0.71171963f,
+ 0.71401459f, 0.71630198f, 0.71858168f, 0.72085363f, 0.72311789f,
+ 0.72537440f, 0.72762316f, 0.72986406f, 0.73209721f, 0.73432255f,
+ 0.73653996f, 0.73874950f, 0.74095118f, 0.74314487f, 0.74533057f,
+ 0.74750835f, 0.74967808f, 0.75183982f, 0.75399351f, 0.75613910f,
+ 0.75827658f, 0.76040596f, 0.76252723f, 0.76464027f, 0.76674515f,
+ 0.76884186f, 0.77093029f, 0.77301043f, 0.77508241f, 0.77714598f,
+ 0.77920127f, 0.78124821f, 0.78328675f, 0.78531694f, 0.78733873f,
+ 0.78935206f, 0.79135692f, 0.79335338f, 0.79534125f, 0.79732066f,
+ 0.79929149f, 0.80125386f, 0.80320752f, 0.80515265f, 0.80708915f,
+ 0.80901700f, 0.81093621f, 0.81284672f, 0.81474853f, 0.81664157f,
+ 0.81852591f, 0.82040149f, 0.82226825f, 0.82412618f, 0.82597536f,
+ 0.82781565f, 0.82964706f, 0.83146966f, 0.83328325f, 0.83508795f,
+ 0.83688378f, 0.83867061f, 0.84044838f, 0.84221727f, 0.84397703f,
+ 0.84572780f, 0.84746957f, 0.84920216f, 0.85092574f, 0.85264021f,
+ 0.85434544f, 0.85604161f, 0.85772866f, 0.85940641f, 0.86107504f,
+ 0.86273444f, 0.86438453f, 0.86602545f, 0.86765707f, 0.86927933f,
+ 0.87089235f, 0.87249607f, 0.87409031f, 0.87567532f, 0.87725097f,
+ 0.87881714f, 0.88037390f, 0.88192129f, 0.88345921f, 0.88498765f,
+ 0.88650668f, 0.88801610f, 0.88951612f, 0.89100653f, 0.89248741f,
+ 0.89395881f, 0.89542055f, 0.89687276f, 0.89831537f, 0.89974827f,
+ 0.90117162f, 0.90258533f, 0.90398932f, 0.90538365f, 0.90676826f,
+ 0.90814316f, 0.90950841f, 0.91086388f, 0.91220951f, 0.91354549f,
+ 0.91487163f, 0.91618794f, 0.91749454f, 0.91879123f, 0.92007810f,
+ 0.92135513f, 0.92262226f, 0.92387950f, 0.92512691f, 0.92636442f,
+ 0.92759192f, 0.92880958f, 0.93001723f, 0.93121493f, 0.93240267f,
+ 0.93358046f, 0.93474817f, 0.93590593f, 0.93705362f, 0.93819135f,
+ 0.93931901f, 0.94043654f, 0.94154406f, 0.94264150f, 0.94372880f,
+ 0.94480604f, 0.94587320f, 0.94693011f, 0.94797695f, 0.94901365f,
+ 0.95004016f, 0.95105654f, 0.95206273f, 0.95305866f, 0.95404440f,
+ 0.95501995f, 0.95598525f, 0.95694035f, 0.95788521f, 0.95881975f,
+ 0.95974404f, 0.96065807f, 0.96156180f, 0.96245527f, 0.96333838f,
+ 0.96421117f, 0.96507370f, 0.96592581f, 0.96676767f, 0.96759909f,
+ 0.96842021f, 0.96923089f, 0.97003126f, 0.97082120f, 0.97160077f,
+ 0.97236991f, 0.97312868f, 0.97387701f, 0.97461486f, 0.97534233f,
+ 0.97605932f, 0.97676587f, 0.97746199f, 0.97814763f, 0.97882277f,
+ 0.97948742f, 0.98014158f, 0.98078531f, 0.98141843f, 0.98204112f,
+ 0.98265332f, 0.98325491f, 0.98384601f, 0.98442656f, 0.98499662f,
+ 0.98555607f, 0.98610497f, 0.98664331f, 0.98717111f, 0.98768836f,
+ 0.98819500f, 0.98869103f, 0.98917651f, 0.98965138f, 0.99011570f,
+ 0.99056935f, 0.99101239f, 0.99144489f, 0.99186671f, 0.99227792f,
+ 0.99267852f, 0.99306846f, 0.99344778f, 0.99381649f, 0.99417448f,
+ 0.99452192f, 0.99485862f, 0.99518472f, 0.99550015f, 0.99580491f,
+ 0.99609905f, 0.99638247f, 0.99665523f, 0.99691731f, 0.99716878f,
+ 0.99740952f, 0.99763954f, 0.99785894f, 0.99806762f, 0.99826562f,
+ 0.99845290f, 0.99862951f, 0.99879545f, 0.99895066f, 0.99909520f,
+ 0.99922901f, 0.99935216f, 0.99946457f, 0.99956632f, 0.99965733f,
+ 0.99973762f, 0.99980724f, 0.99986613f, 0.99991435f, 0.99995178f,
+ 0.99997860f, 0.99999464f, 1.00000000f, 0.99999464f, 0.99997860f,
+ 0.99995178f, 0.99991435f, 0.99986613f, 0.99980724f, 0.99973762f,
+ 0.99965733f, 0.99956632f, 0.99946457f, 0.99935216f, 0.99922901f,
+ 0.99909520f, 0.99895066f, 0.99879545f, 0.99862951f, 0.99845290f,
+ 0.99826562f, 0.99806762f, 0.99785894f, 0.99763954f, 0.99740946f,
+ 0.99716872f, 0.99691731f, 0.99665523f, 0.99638247f, 0.99609905f,
+ 0.99580491f, 0.99550015f, 0.99518472f, 0.99485862f, 0.99452192f,
+ 0.99417448f, 0.99381644f, 0.99344778f, 0.99306846f, 0.99267852f,
+ 0.99227792f, 0.99186671f, 0.99144489f, 0.99101239f, 0.99056935f,
+ 0.99011564f, 0.98965138f, 0.98917651f, 0.98869103f, 0.98819494f,
+ 0.98768836f, 0.98717111f, 0.98664331f, 0.98610497f, 0.98555607f,
+ 0.98499656f, 0.98442656f, 0.98384601f, 0.98325491f, 0.98265326f,
+ 0.98204112f, 0.98141843f, 0.98078525f, 0.98014158f, 0.97948742f,
+ 0.97882277f, 0.97814757f, 0.97746193f, 0.97676587f, 0.97605932f,
+ 0.97534227f, 0.97461486f, 0.97387695f, 0.97312862f, 0.97236991f,
+ 0.97160077f, 0.97082120f, 0.97003126f, 0.96923089f, 0.96842015f,
+ 0.96759909f, 0.96676761f, 0.96592581f, 0.96507365f, 0.96421117f,
+ 0.96333838f, 0.96245521f, 0.96156180f, 0.96065807f, 0.95974404f,
+ 0.95881969f, 0.95788515f, 0.95694029f, 0.95598525f, 0.95501995f,
+ 0.95404440f, 0.95305860f, 0.95206267f, 0.95105648f, 0.95004016f,
+ 0.94901365f, 0.94797695f, 0.94693011f, 0.94587314f, 0.94480604f,
+ 0.94372880f, 0.94264150f, 0.94154406f, 0.94043654f, 0.93931895f,
+ 0.93819129f, 0.93705362f, 0.93590593f, 0.93474817f, 0.93358046f,
+ 0.93240267f, 0.93121493f, 0.93001723f, 0.92880952f, 0.92759192f,
+ 0.92636436f, 0.92512691f, 0.92387950f, 0.92262226f, 0.92135507f,
+ 0.92007804f, 0.91879123f, 0.91749448f, 0.91618794f, 0.91487157f,
+ 0.91354543f, 0.91220951f, 0.91086382f, 0.90950835f, 0.90814310f,
+ 0.90676820f, 0.90538365f, 0.90398932f, 0.90258527f, 0.90117157f,
+ 0.89974827f, 0.89831525f, 0.89687276f, 0.89542055f, 0.89395875f,
+ 0.89248741f, 0.89100647f, 0.88951600f, 0.88801610f, 0.88650662f,
+ 0.88498759f, 0.88345915f, 0.88192123f, 0.88037384f, 0.87881714f,
+ 0.87725091f, 0.87567532f, 0.87409031f, 0.87249595f, 0.87089223f,
+ 0.86927933f, 0.86765701f, 0.86602539f, 0.86438447f, 0.86273432f,
+ 0.86107504f, 0.85940641f, 0.85772860f, 0.85604161f, 0.85434544f,
+ 0.85264009f, 0.85092574f, 0.84920216f, 0.84746951f, 0.84572780f,
+ 0.84397697f, 0.84221715f, 0.84044844f, 0.83867055f, 0.83688372f,
+ 0.83508795f, 0.83328319f, 0.83146954f, 0.82964706f, 0.82781565f,
+ 0.82597530f, 0.82412612f, 0.82226813f, 0.82040137f, 0.81852591f,
+ 0.81664157f, 0.81474847f, 0.81284660f, 0.81093609f, 0.80901700f,
+ 0.80708915f, 0.80515265f, 0.80320752f, 0.80125374f, 0.79929143f,
+ 0.79732066f, 0.79534125f, 0.79335332f, 0.79135686f, 0.78935200f,
+ 0.78733861f, 0.78531694f, 0.78328675f, 0.78124815f, 0.77920121f,
+ 0.77714586f, 0.77508223f, 0.77301049f, 0.77093029f, 0.76884180f,
+ 0.76674509f, 0.76464021f, 0.76252711f, 0.76040596f, 0.75827658f,
+ 0.75613904f, 0.75399339f, 0.75183970f, 0.74967796f, 0.74750835f,
+ 0.74533057f, 0.74314481f, 0.74095106f, 0.73874938f, 0.73653996f,
+ 0.73432249f, 0.73209721f, 0.72986400f, 0.72762305f, 0.72537428f,
+ 0.72311789f, 0.72085363f, 0.71858162f, 0.71630186f, 0.71401453f,
+ 0.71171951f, 0.70941705f, 0.70710677f, 0.70478898f, 0.70246363f,
+ 0.70013070f, 0.69779032f, 0.69544268f, 0.69308734f, 0.69072461f,
+ 0.68835449f, 0.68597704f, 0.68359220f, 0.68120021f, 0.67880070f,
+ 0.67639399f, 0.67398006f, 0.67155886f, 0.66913044f, 0.66669512f,
+ 0.66425240f, 0.66180259f, 0.65934575f, 0.65688181f, 0.65441096f,
+ 0.65193301f, 0.64944804f, 0.64695609f, 0.64445722f, 0.64195150f,
+ 0.63943905f, 0.63691956f, 0.63439327f, 0.63186014f, 0.62932026f,
+ 0.62677372f, 0.62422055f, 0.62166059f, 0.61909389f, 0.61652064f,
+ 0.61394072f, 0.61135429f, 0.60876143f, 0.60616189f, 0.60355592f,
+ 0.60094339f, 0.59832448f, 0.59569913f, 0.59306765f, 0.59042960f,
+ 0.58778518f, 0.58513451f, 0.58247757f, 0.57981461f, 0.57714522f,
+ 0.57446963f, 0.57178789f, 0.56910002f, 0.56640613f, 0.56370628f,
+ 0.56100023f, 0.55828822f, 0.55557019f, 0.55284619f, 0.55011630f,
+ 0.54738069f, 0.54463905f, 0.54189152f, 0.53913826f, 0.53637916f,
+ 0.53361434f, 0.53084403f, 0.52806783f, 0.52528596f, 0.52249849f,
+ 0.51970541f, 0.51690674f, 0.51410276f, 0.51129305f, 0.50847787f,
+ 0.50565726f, 0.50283122f, 0.50000006f, 0.49716327f, 0.49432117f,
+ 0.49147379f, 0.48862115f, 0.48576325f, 0.48290038f, 0.48003212f,
+ 0.47715873f, 0.47428021f, 0.47139663f, 0.46850798f, 0.46561456f,
+ 0.46271589f, 0.45981231f, 0.45690379f, 0.45399037f, 0.45107210f,
+ 0.44814920f, 0.44522130f, 0.44228864f, 0.43935123f, 0.43640912f,
+ 0.43346232f, 0.43051112f, 0.42755505f, 0.42459446f, 0.42162928f,
+ 0.41865960f, 0.41568545f, 0.41270703f, 0.40972400f, 0.40673658f,
+ 0.40374479f, 0.40074870f, 0.39774850f, 0.39474386f, 0.39173496f,
+ 0.38872188f, 0.38570464f, 0.38268328f, 0.37965804f, 0.37662849f,
+ 0.37359491f, 0.37055734f, 0.36751580f, 0.36447033f, 0.36142117f,
+ 0.35836792f, 0.35531086f, 0.35224995f, 0.34918529f, 0.34611690f,
+ 0.34304500f, 0.33996922f, 0.33688980f, 0.33380675f, 0.33072016f,
+ 0.32763001f, 0.32453656f, 0.32143945f, 0.31833887f, 0.31523487f,
+ 0.31212750f, 0.30901679f, 0.30590302f, 0.30278572f, 0.29966521f,
+ 0.29654145f, 0.29341453f, 0.29028472f, 0.28715155f, 0.28401530f,
+ 0.28087601f, 0.27773371f, 0.27458847f, 0.27144048f, 0.26828936f,
+ 0.26513538f, 0.26197854f, 0.25881892f, 0.25565651f, 0.25249159f,
+ 0.24932374f, 0.24615324f, 0.24298008f, 0.23980433f, 0.23662600f,
+ 0.23344538f, 0.23026201f, 0.22707619f, 0.22388794f, 0.22069728f,
+ 0.21750426f, 0.21430916f, 0.21111152f, 0.20791161f, 0.20470949f,
+ 0.20150517f, 0.19829892f, 0.19509031f, 0.19187963f, 0.18866688f,
+ 0.18545210f, 0.18223536f, 0.17901689f, 0.17579627f, 0.17257376f,
+ 0.16934940f, 0.16612324f, 0.16289529f, 0.15966584f, 0.15643445f,
+ 0.15320137f, 0.14996666f, 0.14673033f, 0.14349243f, 0.14025325f,
+ 0.13701232f, 0.13376991f, 0.13052608f, 0.12728085f, 0.12403426f,
+ 0.12078657f, 0.11753736f, 0.11428688f, 0.11103519f, 0.10778230f,
+ 0.10452849f, 0.10127334f, 0.09801710f, 0.09475980f, 0.09150149f,
+ 0.08824220f, 0.08498220f, 0.08172106f, 0.07845904f, 0.07519618f,
+ 0.07193252f, 0.06866808f, 0.06540315f, 0.06213728f, 0.05887074f,
+ 0.05560357f, 0.05233581f, 0.04906749f, 0.04579888f, 0.04252954f,
+ 0.03925974f, 0.03598953f, 0.03271893f, 0.02944798f, 0.02617695f,
+ 0.02290541f, 0.01963361f, 0.01636161f, 0.01308943f, 0.00981712f,
+ 0.00654493f, 0.00327244f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_WINDOWS_PRIVATE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.cc b/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.cc
new file mode 100644
index 0000000000..2e0ee7e5b7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_node.h"
+
+#include <math.h>
+#include <string.h>
+
+#include "common_audio/fir_filter.h"
+#include "common_audio/fir_filter_factory.h"
+#include "modules/audio_processing/transient/dyadic_decimator.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+WPDNode::WPDNode(size_t length,
+ const float* coefficients,
+ size_t coefficients_length)
+ : // The data buffer has parent data length to be able to contain and
+ // filter it.
+ data_(new float[2 * length + 1]),
+ length_(length),
+ filter_(
+ CreateFirFilter(coefficients, coefficients_length, 2 * length + 1)) {
+ RTC_DCHECK_GT(length, 0);
+ RTC_DCHECK(coefficients);
+ RTC_DCHECK_GT(coefficients_length, 0);
+ memset(data_.get(), 0.f, (2 * length + 1) * sizeof(data_[0]));
+}
+
+WPDNode::~WPDNode() {}
+
+int WPDNode::Update(const float* parent_data, size_t parent_data_length) {
+ if (!parent_data || (parent_data_length / 2) != length_) {
+ return -1;
+ }
+
+ // Filter data.
+ filter_->Filter(parent_data, parent_data_length, data_.get());
+
+ // Decimate data.
+ const bool kOddSequence = true;
+ size_t output_samples = DyadicDecimate(data_.get(), parent_data_length,
+ kOddSequence, data_.get(), length_);
+ if (output_samples != length_) {
+ return -1;
+ }
+
+ // Get abs to all values.
+ for (size_t i = 0; i < length_; ++i) {
+ data_[i] = fabs(data_[i]);
+ }
+
+ return 0;
+}
+
+int WPDNode::set_data(const float* new_data, size_t length) {
+ if (!new_data || length != length_) {
+ return -1;
+ }
+ memcpy(data_.get(), new_data, length * sizeof(data_[0]));
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.h b/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.h
new file mode 100644
index 0000000000..41614fab0f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/wpd_node.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_NODE_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_NODE_H_
+
+#include <memory>
+
+namespace webrtc {
+
+class FIRFilter;
+
+// A single node of a Wavelet Packet Decomposition (WPD) tree.
+class WPDNode {
+ public:
+ // Creates a WPDNode. The data vector will contain zeros. The filter will have
+ // the coefficients provided.
+ WPDNode(size_t length, const float* coefficients, size_t coefficients_length);
+ ~WPDNode();
+
+ // Updates the node data. `parent_data` / 2 must be equals to `length_`.
+ // Returns 0 if correct, and -1 otherwise.
+ int Update(const float* parent_data, size_t parent_data_length);
+
+ const float* data() const { return data_.get(); }
+ // Returns 0 if correct, and -1 otherwise.
+ int set_data(const float* new_data, size_t length);
+ size_t length() const { return length_; }
+
+ private:
+ std::unique_ptr<float[]> data_;
+ size_t length_;
+ std::unique_ptr<FIRFilter> filter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_NODE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/wpd_node_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/wpd_node_unittest.cc
new file mode 100644
index 0000000000..5f9238255c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/wpd_node_unittest.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_node.h"
+
+#include <string.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const size_t kDataLength = 5;
+static const float kTolerance = 0.0001f;
+
+static const size_t kParentDataLength = kDataLength * 2;
+static const float kParentData[kParentDataLength] = {1.f, 2.f, 3.f, 4.f, 5.f,
+ 6.f, 7.f, 8.f, 9.f, 10.f};
+
+static const float kCoefficients[] = {0.2f, -0.3f, 0.5f, -0.7f, 0.11f};
+static const size_t kCoefficientsLength =
+ sizeof(kCoefficients) / sizeof(kCoefficients[0]);
+
+TEST(WPDNodeTest, Accessors) {
+ WPDNode node(kDataLength, kCoefficients, kCoefficientsLength);
+ EXPECT_EQ(0, node.set_data(kParentData, kDataLength));
+ EXPECT_EQ(0, memcmp(node.data(), kParentData,
+ kDataLength * sizeof(node.data()[0])));
+}
+
+TEST(WPDNodeTest, UpdateThatOnlyDecimates) {
+ const float kIndentyCoefficient = 1.f;
+ WPDNode node(kDataLength, &kIndentyCoefficient, 1);
+ EXPECT_EQ(0, node.Update(kParentData, kParentDataLength));
+ for (size_t i = 0; i < kDataLength; ++i) {
+ EXPECT_FLOAT_EQ(kParentData[i * 2 + 1], node.data()[i]);
+ }
+}
+
+TEST(WPDNodeTest, UpdateWithArbitraryDataAndArbitraryFilter) {
+ WPDNode node(kDataLength, kCoefficients, kCoefficientsLength);
+ EXPECT_EQ(0, node.Update(kParentData, kParentDataLength));
+ EXPECT_NEAR(0.1f, node.data()[0], kTolerance);
+ EXPECT_NEAR(0.2f, node.data()[1], kTolerance);
+ EXPECT_NEAR(0.18f, node.data()[2], kTolerance);
+ EXPECT_NEAR(0.56f, node.data()[3], kTolerance);
+ EXPECT_NEAR(0.94f, node.data()[4], kTolerance);
+}
+
+TEST(WPDNodeTest, ExpectedErrorReturnValue) {
+ WPDNode node(kDataLength, kCoefficients, kCoefficientsLength);
+ EXPECT_EQ(-1, node.Update(kParentData, kParentDataLength - 1));
+ EXPECT_EQ(-1, node.Update(NULL, kParentDataLength));
+ EXPECT_EQ(-1, node.set_data(kParentData, kDataLength - 1));
+ EXPECT_EQ(-1, node.set_data(NULL, kDataLength));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.cc b/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.cc
new file mode 100644
index 0000000000..c8aa615881
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_tree.h"
+
+#include <string.h>
+
+#include "modules/audio_processing/transient/wpd_node.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+WPDTree::WPDTree(size_t data_length,
+ const float* high_pass_coefficients,
+ const float* low_pass_coefficients,
+ size_t coefficients_length,
+ int levels)
+ : data_length_(data_length),
+ levels_(levels),
+ num_nodes_((1 << (levels + 1)) - 1) {
+ RTC_DCHECK_GT(data_length, (static_cast<size_t>(1) << levels));
+ RTC_DCHECK(high_pass_coefficients);
+ RTC_DCHECK(low_pass_coefficients);
+ RTC_DCHECK_GT(levels, 0);
+ // Size is 1 more, so we can use the array as 1-based. nodes_[0] is never
+ // allocated.
+ nodes_.reset(new std::unique_ptr<WPDNode>[num_nodes_ + 1]);
+
+ // Create the first node
+ const float kRootCoefficient = 1.f; // Identity Coefficient.
+ nodes_[1].reset(new WPDNode(data_length, &kRootCoefficient, 1));
+ // Variables used to create the rest of the nodes.
+ size_t index = 1;
+ size_t index_left_child = 0;
+ size_t index_right_child = 0;
+
+ int num_nodes_at_curr_level = 0;
+
+ // Branching each node in each level to create its children. The last level is
+ // not branched (all the nodes of that level are leaves).
+ for (int current_level = 0; current_level < levels; ++current_level) {
+ num_nodes_at_curr_level = 1 << current_level;
+ for (int i = 0; i < num_nodes_at_curr_level; ++i) {
+ index = (1 << current_level) + i;
+ // Obtain the index of the current node children.
+ index_left_child = index * 2;
+ index_right_child = index_left_child + 1;
+ nodes_[index_left_child].reset(new WPDNode(nodes_[index]->length() / 2,
+ low_pass_coefficients,
+ coefficients_length));
+ nodes_[index_right_child].reset(new WPDNode(nodes_[index]->length() / 2,
+ high_pass_coefficients,
+ coefficients_length));
+ }
+ }
+}
+
+WPDTree::~WPDTree() {}
+
+WPDNode* WPDTree::NodeAt(int level, int index) {
+ if (level < 0 || level > levels_ || index < 0 || index >= 1 << level) {
+ return NULL;
+ }
+
+ return nodes_[(1 << level) + index].get();
+}
+
+int WPDTree::Update(const float* data, size_t data_length) {
+ if (!data || data_length != data_length_) {
+ return -1;
+ }
+
+ // Update the root node.
+ int update_result = nodes_[1]->set_data(data, data_length);
+ if (update_result != 0) {
+ return -1;
+ }
+
+ // Variables used to update the rest of the nodes.
+ size_t index = 1;
+ size_t index_left_child = 0;
+ size_t index_right_child = 0;
+
+ int num_nodes_at_curr_level = 0;
+
+ for (int current_level = 0; current_level < levels_; ++current_level) {
+ num_nodes_at_curr_level = 1 << current_level;
+ for (int i = 0; i < num_nodes_at_curr_level; ++i) {
+ index = (1 << current_level) + i;
+ // Obtain the index of the current node children.
+ index_left_child = index * 2;
+ index_right_child = index_left_child + 1;
+
+ update_result = nodes_[index_left_child]->Update(nodes_[index]->data(),
+ nodes_[index]->length());
+ if (update_result != 0) {
+ return -1;
+ }
+
+ update_result = nodes_[index_right_child]->Update(
+ nodes_[index]->data(), nodes_[index]->length());
+ if (update_result != 0) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.h b/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.h
new file mode 100644
index 0000000000..13cb8d9c2f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_TREE_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_TREE_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "modules/audio_processing/transient/wpd_node.h"
+
+namespace webrtc {
+
+// Tree of a Wavelet Packet Decomposition (WPD).
+//
+// The root node contains all the data provided; for each node in the tree, the
+// left child contains the approximation coefficients extracted from the node,
+// and the right child contains the detail coefficients.
+// It preserves its state, so it can be multiple-called.
+//
+// The number of nodes in the tree will be 2 ^ levels - 1.
+//
+// Implementation details: Since the tree always will be a complete binary tree,
+// it is implemented using a single linear array instead of managing the
+// relationships in each node. For convience is better to use a array that
+// starts in 1 (instead of 0). Taking that into account, the following formulas
+// apply:
+// Root node index: 1.
+// Node(Level, Index in that level): 2 ^ Level + (Index in that level).
+// Left Child: Current node index * 2.
+// Right Child: Current node index * 2 + 1.
+// Parent: Current Node Index / 2 (Integer division).
+class WPDTree {
+ public:
+ // Creates a WPD tree using the data length and coefficients provided.
+ WPDTree(size_t data_length,
+ const float* high_pass_coefficients,
+ const float* low_pass_coefficients,
+ size_t coefficients_length,
+ int levels);
+ ~WPDTree();
+
+ // Returns the number of nodes at any given level.
+ static int NumberOfNodesAtLevel(int level) { return 1 << level; }
+
+ // Returns a pointer to the node at the given level and index(of that level).
+ // Level goes from 0 to levels().
+ // Index goes from 0 to the number of NumberOfNodesAtLevel(level) - 1.
+ //
+ // You can use the following formulas to get any node within the tree:
+ // Notation: (Level, Index of node in that level).
+ // Root node: (0/0).
+ // Left Child: (Current node level + 1, Current node index * 2).
+ // Right Child: (Current node level + 1, Current node index * 2 + 1).
+ // Parent: (Current node level - 1, Current node index / 2) (Integer division)
+ //
+ // If level or index are out of bounds the function will return NULL.
+ WPDNode* NodeAt(int level, int index);
+
+ // Updates all the nodes of the tree with the new data. `data_length` must be
+ // teh same that was used for the creation of the tree.
+ // Returns 0 if correct, and -1 otherwise.
+ int Update(const float* data, size_t data_length);
+
+ // Returns the total number of levels below the root. Root is cosidered level
+ // 0.
+ int levels() const { return levels_; }
+
+ // Returns the total number of nodes.
+ int num_nodes() const { return num_nodes_; }
+
+ // Returns the total number of leaves.
+ int num_leaves() const { return 1 << levels_; }
+
+ private:
+ size_t data_length_;
+ int levels_;
+ int num_nodes_;
+ std::unique_ptr<std::unique_ptr<WPDNode>[]> nodes_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_TREE_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree_unittest.cc b/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree_unittest.cc
new file mode 100644
index 0000000000..bf3ff987d7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/transient/wpd_tree_unittest.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_tree.h"
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h"
+#include "modules/audio_processing/transient/file_utils.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(WPDTreeTest, Construction) {
+ const size_t kTestBufferSize = 100;
+ const int kLevels = 5;
+ const int kExpectedNumberOfNodes = (1 << (kLevels + 1)) - 1;
+
+ float test_buffer[kTestBufferSize];
+ memset(test_buffer, 0.f, kTestBufferSize * sizeof(*test_buffer));
+ float test_coefficients[] = {1.f, 2.f, 3.f, 4.f, 5.f};
+ const size_t kTestCoefficientsLength =
+ sizeof(test_coefficients) / sizeof(test_coefficients[0]);
+ WPDTree tree(kTestBufferSize, test_coefficients, test_coefficients,
+ kTestCoefficientsLength, kLevels);
+ ASSERT_EQ(kExpectedNumberOfNodes, tree.num_nodes());
+ // Checks for NodeAt(level, index).
+ int nodes_at_level = 0;
+ for (int level = 0; level <= kLevels; ++level) {
+ nodes_at_level = 1 << level;
+ for (int i = 0; i < nodes_at_level; ++i) {
+ ASSERT_TRUE(NULL != tree.NodeAt(level, i));
+ }
+ // Out of bounds.
+ EXPECT_EQ(NULL, tree.NodeAt(level, -1));
+ EXPECT_EQ(NULL, tree.NodeAt(level, -12));
+ EXPECT_EQ(NULL, tree.NodeAt(level, nodes_at_level));
+ EXPECT_EQ(NULL, tree.NodeAt(level, nodes_at_level + 5));
+ }
+ // Out of bounds.
+ EXPECT_EQ(NULL, tree.NodeAt(-1, 0));
+ EXPECT_EQ(NULL, tree.NodeAt(-12, 0));
+ EXPECT_EQ(NULL, tree.NodeAt(kLevels + 1, 0));
+ EXPECT_EQ(NULL, tree.NodeAt(kLevels + 5, 0));
+ // Checks for Update().
+ EXPECT_EQ(0, tree.Update(test_buffer, kTestBufferSize));
+ EXPECT_EQ(-1, tree.Update(NULL, kTestBufferSize));
+ EXPECT_EQ(-1, tree.Update(test_buffer, kTestBufferSize - 1));
+}
+
+// This test is for the correctness of the tree.
+// Checks the results from the Matlab equivalent, it is done comparing the
+// results that are stored in the output files from Matlab.
+// It also writes the results in its own set of files in the out directory.
+// Matlab and output files contain all the results in double precision (Little
+// endian) appended.
+#if defined(WEBRTC_IOS)
+TEST(WPDTreeTest, DISABLED_CorrectnessBasedOnMatlabFiles) {
+#else
+TEST(WPDTreeTest, CorrectnessBasedOnMatlabFiles) {
+#endif
+ // 10 ms at 16000 Hz.
+ const size_t kTestBufferSize = 160;
+ const int kLevels = 3;
+ const int kLeaves = 1 << kLevels;
+ const size_t kLeavesSamples = kTestBufferSize >> kLevels;
+ // Create tree with Discrete Meyer Wavelet Coefficients.
+ WPDTree tree(kTestBufferSize, kDaubechies8HighPassCoefficients,
+ kDaubechies8LowPassCoefficients, kDaubechies8CoefficientsLength,
+ kLevels);
+ // Allocate and open all matlab and out files.
+ FileWrapper matlab_files_data[kLeaves];
+ FileWrapper out_files_data[kLeaves];
+
+ for (int i = 0; i < kLeaves; ++i) {
+ // Matlab files.
+ rtc::StringBuilder matlab_stream;
+ matlab_stream << "audio_processing/transient/wpd" << i;
+ std::string matlab_string = test::ResourcePath(matlab_stream.str(), "dat");
+ matlab_files_data[i] = FileWrapper::OpenReadOnly(matlab_string);
+
+ bool file_opened = matlab_files_data[i].is_open();
+ ASSERT_TRUE(file_opened) << "File could not be opened.\n" << matlab_string;
+
+ // Out files.
+ rtc::StringBuilder out_stream;
+ out_stream << test::OutputPath() << "wpd_" << i << ".out";
+ std::string out_string = out_stream.str();
+
+ out_files_data[i] = FileWrapper::OpenWriteOnly(out_string);
+
+ file_opened = out_files_data[i].is_open();
+ ASSERT_TRUE(file_opened) << "File could not be opened.\n" << out_string;
+ }
+
+ // Prepare the test file.
+ std::string test_file_name = test::ResourcePath(
+ "audio_processing/transient/ajm-macbook-1-spke16m", "pcm");
+
+ FileWrapper test_file = FileWrapper::OpenReadOnly(test_file_name);
+
+ bool file_opened = test_file.is_open();
+ ASSERT_TRUE(file_opened) << "File could not be opened.\n" << test_file_name;
+
+ float test_buffer[kTestBufferSize];
+
+ // Only the first frames of the audio file are tested. The matlab files also
+ // only contains information about the first frames.
+ const size_t kMaxFramesToTest = 100;
+ const float kTolerance = 0.03f;
+
+ size_t frames_read = 0;
+
+ // Read first buffer from the PCM test file.
+ size_t file_samples_read =
+ ReadInt16FromFileToFloatBuffer(&test_file, kTestBufferSize, test_buffer);
+ while (file_samples_read > 0 && frames_read < kMaxFramesToTest) {
+ ++frames_read;
+
+ if (file_samples_read < kTestBufferSize) {
+ // Pad the rest of the buffer with zeros.
+ for (size_t i = file_samples_read; i < kTestBufferSize; ++i) {
+ test_buffer[i] = 0.0;
+ }
+ }
+ tree.Update(test_buffer, kTestBufferSize);
+ double matlab_buffer[kTestBufferSize];
+
+ // Compare results with data from the matlab test files.
+ for (int i = 0; i < kLeaves; ++i) {
+ // Compare data values
+ size_t matlab_samples_read = ReadDoubleBufferFromFile(
+ &matlab_files_data[i], kLeavesSamples, matlab_buffer);
+
+ ASSERT_EQ(kLeavesSamples, matlab_samples_read)
+ << "Matlab test files are malformed.\n"
+ "File: 3_"
+ << i;
+ // Get output data from the corresponding node
+ const float* node_data = tree.NodeAt(kLevels, i)->data();
+ // Compare with matlab files.
+ for (size_t j = 0; j < kLeavesSamples; ++j) {
+ EXPECT_NEAR(matlab_buffer[j], node_data[j], kTolerance)
+ << "\nLeaf: " << i << "\nSample: " << j
+ << "\nFrame: " << frames_read - 1;
+ }
+
+ // Write results to out files.
+ WriteFloatBufferToFile(&out_files_data[i], kLeavesSamples, node_data);
+ }
+
+ // Read next buffer from the PCM test file.
+ file_samples_read = ReadInt16FromFileToFloatBuffer(
+ &test_file, kTestBufferSize, test_buffer);
+ }
+
+ // Close all matlab and out files.
+ for (int i = 0; i < kLeaves; ++i) {
+ matlab_files_data[i].Close();
+ out_files_data[i].Close();
+ }
+
+ test_file.Close();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/utility/BUILD.gn
new file mode 100644
index 0000000000..4851e77b03
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/BUILD.gn
@@ -0,0 +1,79 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("cascaded_biquad_filter") {
+ sources = [
+ "cascaded_biquad_filter.cc",
+ "cascaded_biquad_filter.h",
+ ]
+ deps = [
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ ]
+}
+
+rtc_library("legacy_delay_estimator") {
+ sources = [
+ "delay_estimator.cc",
+ "delay_estimator.h",
+ "delay_estimator_internal.h",
+ "delay_estimator_wrapper.cc",
+ "delay_estimator_wrapper.h",
+ ]
+ deps = [ "../../../rtc_base:checks" ]
+}
+
+rtc_library("pffft_wrapper") {
+ visibility = [ "../*" ]
+ sources = [
+ "pffft_wrapper.cc",
+ "pffft_wrapper.h",
+ ]
+ deps = [
+ "../../../api:array_view",
+ "../../../rtc_base:checks",
+ "//third_party/pffft",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("cascaded_biquad_filter_unittest") {
+ testonly = true
+
+ sources = [ "cascaded_biquad_filter_unittest.cc" ]
+ deps = [
+ ":cascaded_biquad_filter",
+ "../../../test:test_support",
+ "//testing/gtest",
+ ]
+ }
+
+ rtc_library("legacy_delay_estimator_unittest") {
+ testonly = true
+
+ sources = [ "delay_estimator_unittest.cc" ]
+ deps = [
+ ":legacy_delay_estimator",
+ "../../../test:test_support",
+ "//testing/gtest",
+ ]
+ }
+
+ rtc_library("pffft_wrapper_unittest") {
+ testonly = true
+ sources = [ "pffft_wrapper_unittest.cc" ]
+ deps = [
+ ":pffft_wrapper",
+ "../../../test:test_support",
+ "//testing/gtest",
+ "//third_party/pffft",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/DEPS b/third_party/libwebrtc/modules/audio_processing/utility/DEPS
new file mode 100644
index 0000000000..c72d810b24
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+third_party/pffft",
+]
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.cc b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.cc
new file mode 100644
index 0000000000..0d236ce0be
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+CascadedBiQuadFilter::BiQuadParam::BiQuadParam(std::complex<float> zero,
+ std::complex<float> pole,
+ float gain,
+ bool mirror_zero_along_i_axis)
+ : zero(zero),
+ pole(pole),
+ gain(gain),
+ mirror_zero_along_i_axis(mirror_zero_along_i_axis) {}
+
+CascadedBiQuadFilter::BiQuadParam::BiQuadParam(const BiQuadParam&) = default;
+
+CascadedBiQuadFilter::BiQuad::BiQuad(
+ const CascadedBiQuadFilter::BiQuadParam& param)
+ : x(), y() {
+ float z_r = std::real(param.zero);
+ float z_i = std::imag(param.zero);
+ float p_r = std::real(param.pole);
+ float p_i = std::imag(param.pole);
+ float gain = param.gain;
+
+ if (param.mirror_zero_along_i_axis) {
+ // Assuming zeroes at z_r and -z_r.
+ RTC_DCHECK(z_i == 0.f);
+ coefficients.b[0] = gain * 1.f;
+ coefficients.b[1] = 0.f;
+ coefficients.b[2] = gain * -(z_r * z_r);
+ } else {
+ // Assuming zeros at (z_r + z_i*i) and (z_r - z_i*i).
+ coefficients.b[0] = gain * 1.f;
+ coefficients.b[1] = gain * -2.f * z_r;
+ coefficients.b[2] = gain * (z_r * z_r + z_i * z_i);
+ }
+
+ // Assuming poles at (p_r + p_i*i) and (p_r - p_i*i).
+ coefficients.a[0] = -2.f * p_r;
+ coefficients.a[1] = p_r * p_r + p_i * p_i;
+}
+
+void CascadedBiQuadFilter::BiQuad::BiQuad::Reset() {
+ x[0] = x[1] = y[0] = y[1] = 0.f;
+}
+
+CascadedBiQuadFilter::CascadedBiQuadFilter(
+ const CascadedBiQuadFilter::BiQuadCoefficients& coefficients,
+ size_t num_biquads)
+ : biquads_(num_biquads, BiQuad(coefficients)) {}
+
+CascadedBiQuadFilter::CascadedBiQuadFilter(
+ const std::vector<CascadedBiQuadFilter::BiQuadParam>& biquad_params) {
+ for (const auto& param : biquad_params) {
+ biquads_.push_back(BiQuad(param));
+ }
+}
+
+CascadedBiQuadFilter::~CascadedBiQuadFilter() = default;
+
+void CascadedBiQuadFilter::Process(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y) {
+ if (biquads_.size() > 0) {
+ ApplyBiQuad(x, y, &biquads_[0]);
+ for (size_t k = 1; k < biquads_.size(); ++k) {
+ ApplyBiQuad(y, y, &biquads_[k]);
+ }
+ } else {
+ std::copy(x.begin(), x.end(), y.begin());
+ }
+}
+
+void CascadedBiQuadFilter::Process(rtc::ArrayView<float> y) {
+ for (auto& biquad : biquads_) {
+ ApplyBiQuad(y, y, &biquad);
+ }
+}
+
+void CascadedBiQuadFilter::Reset() {
+ for (auto& biquad : biquads_) {
+ biquad.Reset();
+ }
+}
+
+void CascadedBiQuadFilter::ApplyBiQuad(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y,
+ CascadedBiQuadFilter::BiQuad* biquad) {
+ RTC_DCHECK_EQ(x.size(), y.size());
+ const float c_a_0 = biquad->coefficients.a[0];
+ const float c_a_1 = biquad->coefficients.a[1];
+ const float c_b_0 = biquad->coefficients.b[0];
+ const float c_b_1 = biquad->coefficients.b[1];
+ const float c_b_2 = biquad->coefficients.b[2];
+ float m_x_0 = biquad->x[0];
+ float m_x_1 = biquad->x[1];
+ float m_y_0 = biquad->y[0];
+ float m_y_1 = biquad->y[1];
+ for (size_t k = 0; k < x.size(); ++k) {
+ const float tmp = x[k];
+ y[k] = c_b_0 * tmp + c_b_1 * m_x_0 + c_b_2 * m_x_1 - c_a_0 * m_y_0 -
+ c_a_1 * m_y_1;
+ m_x_1 = m_x_0;
+ m_x_0 = tmp;
+ m_y_1 = m_y_0;
+ m_y_0 = y[k];
+ }
+ biquad->x[0] = m_x_0;
+ biquad->x[1] = m_x_1;
+ biquad->y[0] = m_y_0;
+ biquad->y[1] = m_y_1;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.h b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.h
new file mode 100644
index 0000000000..120b52aa57
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_CASCADED_BIQUAD_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_CASCADED_BIQUAD_FILTER_H_
+
+#include <stddef.h>
+
+#include <complex>
+#include <vector>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Applies a number of biquads in a cascaded manner. The filter implementation
+// is direct form 1.
+class CascadedBiQuadFilter {
+ public:
+ struct BiQuadParam {
+ BiQuadParam(std::complex<float> zero,
+ std::complex<float> pole,
+ float gain,
+ bool mirror_zero_along_i_axis = false);
+ explicit BiQuadParam(const BiQuadParam&);
+ std::complex<float> zero;
+ std::complex<float> pole;
+ float gain;
+ bool mirror_zero_along_i_axis;
+ };
+
+ struct BiQuadCoefficients {
+ float b[3];
+ float a[2];
+ };
+
+ struct BiQuad {
+ explicit BiQuad(const BiQuadCoefficients& coefficients)
+ : coefficients(coefficients), x(), y() {}
+ explicit BiQuad(const CascadedBiQuadFilter::BiQuadParam& param);
+ void Reset();
+ BiQuadCoefficients coefficients;
+ float x[2];
+ float y[2];
+ };
+
+ CascadedBiQuadFilter(
+ const CascadedBiQuadFilter::BiQuadCoefficients& coefficients,
+ size_t num_biquads);
+ explicit CascadedBiQuadFilter(
+ const std::vector<CascadedBiQuadFilter::BiQuadParam>& biquad_params);
+ ~CascadedBiQuadFilter();
+ CascadedBiQuadFilter(const CascadedBiQuadFilter&) = delete;
+ CascadedBiQuadFilter& operator=(const CascadedBiQuadFilter&) = delete;
+
+ // Applies the biquads on the values in x in order to form the output in y.
+ void Process(rtc::ArrayView<const float> x, rtc::ArrayView<float> y);
+ // Applies the biquads on the values in y in an in-place manner.
+ void Process(rtc::ArrayView<float> y);
+ // Resets the filter to its initial state.
+ void Reset();
+
+ private:
+ void ApplyBiQuad(rtc::ArrayView<const float> x,
+ rtc::ArrayView<float> y,
+ CascadedBiQuadFilter::BiQuad* biquad);
+
+ std::vector<BiQuad> biquads_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_UTILITY_CASCADED_BIQUAD_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_gn/moz.build
new file mode 100644
index 0000000000..9147ecb214
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("cascaded_biquad_filter_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc
new file mode 100644
index 0000000000..ff7022dba4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/cascaded_biquad_filter.h"
+
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Coefficients for a second order Butterworth high-pass filter with cutoff
+// frequency 100 Hz.
+const CascadedBiQuadFilter::BiQuadCoefficients kHighPassFilterCoefficients = {
+ {0.97261f, -1.94523f, 0.97261f},
+ {-1.94448f, 0.94598f}};
+
+const CascadedBiQuadFilter::BiQuadCoefficients kTransparentCoefficients = {
+ {1.f, 0.f, 0.f},
+ {0.f, 0.f}};
+
+const CascadedBiQuadFilter::BiQuadCoefficients kBlockingCoefficients = {
+ {0.f, 0.f, 0.f},
+ {0.f, 0.f}};
+
+std::vector<float> CreateInputWithIncreasingValues(size_t vector_length) {
+ std::vector<float> v(vector_length);
+ for (size_t k = 0; k < v.size(); ++k) {
+ v[k] = k;
+ }
+ return v;
+}
+
+} // namespace
+
+// Verifies that the filter applies an effect which removes the input signal.
+// The test also verifies that the in-place Process API call works as intended.
+TEST(CascadedBiquadFilter, BlockingConfiguration) {
+ std::vector<float> values = CreateInputWithIncreasingValues(1000);
+
+ CascadedBiQuadFilter filter(kBlockingCoefficients, 1);
+ filter.Process(values);
+
+ EXPECT_EQ(std::vector<float>(1000, 0.f), values);
+}
+
+// Verifies that the filter is able to form a zero-mean output from a
+// non-zeromean input signal when coefficients for a high-pass filter are
+// applied. The test also verifies that the filter works with multiple biquads.
+TEST(CascadedBiquadFilter, HighPassConfiguration) {
+ std::vector<float> values(1000);
+ for (size_t k = 0; k < values.size(); ++k) {
+ values[k] = 1.f;
+ }
+
+ CascadedBiQuadFilter filter(kHighPassFilterCoefficients, 2);
+ filter.Process(values);
+
+ for (size_t k = values.size() / 2; k < values.size(); ++k) {
+ EXPECT_NEAR(0.f, values[k], 1e-4);
+ }
+}
+
+// Verifies that the reset functionality works as intended.
+TEST(CascadedBiquadFilter, HighPassConfigurationResetFunctionality) {
+ CascadedBiQuadFilter filter(kHighPassFilterCoefficients, 2);
+
+ std::vector<float> values1(100, 1.f);
+ filter.Process(values1);
+
+ filter.Reset();
+
+ std::vector<float> values2(100, 1.f);
+ filter.Process(values2);
+
+ for (size_t k = 0; k < values1.size(); ++k) {
+ EXPECT_EQ(values1[k], values2[k]);
+ }
+}
+
+// Verifies that the filter is able to produce a transparent effect with no
+// impact on the data when the proper coefficients are applied. The test also
+// verifies that the non-in-place Process API call works as intended.
+TEST(CascadedBiquadFilter, TransparentConfiguration) {
+ const std::vector<float> input = CreateInputWithIncreasingValues(1000);
+ std::vector<float> output(input.size());
+
+ CascadedBiQuadFilter filter(kTransparentCoefficients, 1);
+ filter.Process(input, output);
+
+ EXPECT_EQ(input, output);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the check of the lengths for the input and output works for the
+// non-in-place call.
+TEST(CascadedBiquadFilterDeathTest, InputSizeCheckVerification) {
+ const std::vector<float> input = CreateInputWithIncreasingValues(10);
+ std::vector<float> output(input.size() - 1);
+
+ CascadedBiQuadFilter filter(kTransparentCoefficients, 1);
+ EXPECT_DEATH(filter.Process(input, output), "");
+}
+#endif
+
+// Verifies the conversion from zero, pole, gain to filter coefficients for
+// lowpass filter.
+TEST(CascadedBiquadFilter, BiQuadParamLowPass) {
+ CascadedBiQuadFilter::BiQuadParam param(
+ {-1.0f, 0.0f}, {0.23146901f, 0.39514232f}, 0.1866943331163784f);
+ CascadedBiQuadFilter::BiQuad filter(param);
+ const float epsilon = 1e-6f;
+ EXPECT_NEAR(filter.coefficients.b[0], 0.18669433f, epsilon);
+ EXPECT_NEAR(filter.coefficients.b[1], 0.37338867f, epsilon);
+ EXPECT_NEAR(filter.coefficients.b[2], 0.18669433f, epsilon);
+ EXPECT_NEAR(filter.coefficients.a[0], -0.46293803f, epsilon);
+ EXPECT_NEAR(filter.coefficients.a[1], 0.20971536f, epsilon);
+}
+
+// Verifies the conversion from zero, pole, gain to filter coefficients for
+// highpass filter.
+TEST(CascadedBiquadFilter, BiQuadParamHighPass) {
+ CascadedBiQuadFilter::BiQuadParam param(
+ {1.0f, 0.0f}, {0.72712179f, 0.21296904f}, 0.75707637533388494f);
+ CascadedBiQuadFilter::BiQuad filter(param);
+ const float epsilon = 1e-6f;
+ EXPECT_NEAR(filter.coefficients.b[0], 0.75707638f, epsilon);
+ EXPECT_NEAR(filter.coefficients.b[1], -1.51415275f, epsilon);
+ EXPECT_NEAR(filter.coefficients.b[2], 0.75707638f, epsilon);
+ EXPECT_NEAR(filter.coefficients.a[0], -1.45424359f, epsilon);
+ EXPECT_NEAR(filter.coefficients.a[1], 0.57406192f, epsilon);
+}
+
+// Verifies the conversion from zero, pole, gain to filter coefficients for
+// bandpass filter.
+TEST(CascadedBiquadFilter, BiQuadParamBandPass) {
+ CascadedBiQuadFilter::BiQuadParam param(
+ {1.0f, 0.0f}, {1.11022302e-16f, 0.71381051f}, 0.2452372752527856f, true);
+ CascadedBiQuadFilter::BiQuad filter(param);
+ const float epsilon = 1e-6f;
+ EXPECT_NEAR(filter.coefficients.b[0], 0.24523728f, epsilon);
+ EXPECT_NEAR(filter.coefficients.b[1], 0.f, epsilon);
+ EXPECT_NEAR(filter.coefficients.b[2], -0.24523728f, epsilon);
+ EXPECT_NEAR(filter.coefficients.a[0], -2.22044605e-16f, epsilon);
+ EXPECT_NEAR(filter.coefficients.a[1], 5.09525449e-01f, epsilon);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.cc b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.cc
new file mode 100644
index 0000000000..6868392f6f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.cc
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Number of right shifts for scaling is linearly depending on number of bits in
+// the far-end binary spectrum.
+static const int kShiftsAtZero = 13; // Right shifts at zero binary spectrum.
+static const int kShiftsLinearSlope = 3;
+
+static const int32_t kProbabilityOffset = 1024; // 2 in Q9.
+static const int32_t kProbabilityLowerLimit = 8704; // 17 in Q9.
+static const int32_t kProbabilityMinSpread = 2816; // 5.5 in Q9.
+
+// Robust validation settings
+static const float kHistogramMax = 3000.f;
+static const float kLastHistogramMax = 250.f;
+static const float kMinHistogramThreshold = 1.5f;
+static const int kMinRequiredHits = 10;
+static const int kMaxHitsWhenPossiblyNonCausal = 10;
+static const int kMaxHitsWhenPossiblyCausal = 1000;
+static const float kQ14Scaling = 1.f / (1 << 14); // Scaling by 2^14 to get Q0.
+static const float kFractionSlope = 0.05f;
+static const float kMinFractionWhenPossiblyCausal = 0.5f;
+static const float kMinFractionWhenPossiblyNonCausal = 0.25f;
+
+} // namespace
+
+// Counts and returns number of bits of a 32-bit word.
+static int BitCount(uint32_t u32) {
+ uint32_t tmp =
+ u32 - ((u32 >> 1) & 033333333333) - ((u32 >> 2) & 011111111111);
+ tmp = ((tmp + (tmp >> 3)) & 030707070707);
+ tmp = (tmp + (tmp >> 6));
+ tmp = (tmp + (tmp >> 12) + (tmp >> 24)) & 077;
+
+ return ((int)tmp);
+}
+
+// Compares the `binary_vector` with all rows of the `binary_matrix` and counts
+// per row the number of times they have the same value.
+//
+// Inputs:
+// - binary_vector : binary "vector" stored in a long
+// - binary_matrix : binary "matrix" stored as a vector of long
+// - matrix_size : size of binary "matrix"
+//
+// Output:
+// - bit_counts : "Vector" stored as a long, containing for each
+// row the number of times the matrix row and the
+// input vector have the same value
+//
+static void BitCountComparison(uint32_t binary_vector,
+ const uint32_t* binary_matrix,
+ int matrix_size,
+ int32_t* bit_counts) {
+ int n = 0;
+
+ // Compare `binary_vector` with all rows of the `binary_matrix`
+ for (; n < matrix_size; n++) {
+ bit_counts[n] = (int32_t)BitCount(binary_vector ^ binary_matrix[n]);
+ }
+}
+
+// Collects necessary statistics for the HistogramBasedValidation(). This
+// function has to be called prior to calling HistogramBasedValidation(). The
+// statistics updated and used by the HistogramBasedValidation() are:
+// 1. the number of `candidate_hits`, which states for how long we have had the
+// same `candidate_delay`
+// 2. the `histogram` of candidate delays over time. This histogram is
+// weighted with respect to a reliability measure and time-varying to cope
+// with possible delay shifts.
+// For further description see commented code.
+//
+// Inputs:
+// - candidate_delay : The delay to validate.
+// - valley_depth_q14 : The cost function has a valley/minimum at the
+// `candidate_delay` location. `valley_depth_q14` is the
+// cost function difference between the minimum and
+// maximum locations. The value is in the Q14 domain.
+// - valley_level_q14 : Is the cost function value at the minimum, in Q14.
+static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self,
+ int candidate_delay,
+ int32_t valley_depth_q14,
+ int32_t valley_level_q14) {
+ const float valley_depth = valley_depth_q14 * kQ14Scaling;
+ float decrease_in_last_set = valley_depth;
+ const int max_hits_for_slow_change = (candidate_delay < self->last_delay)
+ ? kMaxHitsWhenPossiblyNonCausal
+ : kMaxHitsWhenPossiblyCausal;
+ int i = 0;
+
+ RTC_DCHECK_EQ(self->history_size, self->farend->history_size);
+ // Reset `candidate_hits` if we have a new candidate.
+ if (candidate_delay != self->last_candidate_delay) {
+ self->candidate_hits = 0;
+ self->last_candidate_delay = candidate_delay;
+ }
+ self->candidate_hits++;
+
+ // The `histogram` is updated differently across the bins.
+ // 1. The `candidate_delay` histogram bin is increased with the
+ // `valley_depth`, which is a simple measure of how reliable the
+ // `candidate_delay` is. The histogram is not increased above
+ // `kHistogramMax`.
+ self->histogram[candidate_delay] += valley_depth;
+ if (self->histogram[candidate_delay] > kHistogramMax) {
+ self->histogram[candidate_delay] = kHistogramMax;
+ }
+ // 2. The histogram bins in the neighborhood of `candidate_delay` are
+ // unaffected. The neighborhood is defined as x + {-2, -1, 0, 1}.
+ // 3. The histogram bins in the neighborhood of `last_delay` are decreased
+ // with `decrease_in_last_set`. This value equals the difference between
+ // the cost function values at the locations `candidate_delay` and
+ // `last_delay` until we reach `max_hits_for_slow_change` consecutive hits
+ // at the `candidate_delay`. If we exceed this amount of hits the
+ // `candidate_delay` is a "potential" candidate and we start decreasing
+ // these histogram bins more rapidly with `valley_depth`.
+ if (self->candidate_hits < max_hits_for_slow_change) {
+ decrease_in_last_set =
+ (self->mean_bit_counts[self->compare_delay] - valley_level_q14) *
+ kQ14Scaling;
+ }
+ // 4. All other bins are decreased with `valley_depth`.
+ // TODO(bjornv): Investigate how to make this loop more efficient. Split up
+ // the loop? Remove parts that doesn't add too much.
+ for (i = 0; i < self->history_size; ++i) {
+ int is_in_last_set = (i >= self->last_delay - 2) &&
+ (i <= self->last_delay + 1) && (i != candidate_delay);
+ int is_in_candidate_set =
+ (i >= candidate_delay - 2) && (i <= candidate_delay + 1);
+ self->histogram[i] -=
+ decrease_in_last_set * is_in_last_set +
+ valley_depth * (!is_in_last_set && !is_in_candidate_set);
+ // 5. No histogram bin can go below 0.
+ if (self->histogram[i] < 0) {
+ self->histogram[i] = 0;
+ }
+ }
+}
+
+// Validates the `candidate_delay`, estimated in WebRtc_ProcessBinarySpectrum(),
+// based on a mix of counting concurring hits with a modified histogram
+// of recent delay estimates. In brief a candidate is valid (returns 1) if it
+// is the most likely according to the histogram. There are a couple of
+// exceptions that are worth mentioning:
+// 1. If the `candidate_delay` < `last_delay` it can be that we are in a
+// non-causal state, breaking a possible echo control algorithm. Hence, we
+// open up for a quicker change by allowing the change even if the
+// `candidate_delay` is not the most likely one according to the histogram.
+// 2. There's a minimum number of hits (kMinRequiredHits) and the histogram
+// value has to reached a minimum (kMinHistogramThreshold) to be valid.
+// 3. The action is also depending on the filter length used for echo control.
+// If the delay difference is larger than what the filter can capture, we
+// also move quicker towards a change.
+// For further description see commented code.
+//
+// Input:
+// - candidate_delay : The delay to validate.
+//
+// Return value:
+// - is_histogram_valid : 1 - The `candidate_delay` is valid.
+// 0 - Otherwise.
+static int HistogramBasedValidation(const BinaryDelayEstimator* self,
+ int candidate_delay) {
+ float fraction = 1.f;
+ float histogram_threshold = self->histogram[self->compare_delay];
+ const int delay_difference = candidate_delay - self->last_delay;
+ int is_histogram_valid = 0;
+
+ // The histogram based validation of `candidate_delay` is done by comparing
+ // the `histogram` at bin `candidate_delay` with a `histogram_threshold`.
+ // This `histogram_threshold` equals a `fraction` of the `histogram` at bin
+ // `last_delay`. The `fraction` is a piecewise linear function of the
+ // `delay_difference` between the `candidate_delay` and the `last_delay`
+ // allowing for a quicker move if
+ // i) a potential echo control filter can not handle these large differences.
+ // ii) keeping `last_delay` instead of updating to `candidate_delay` could
+ // force an echo control into a non-causal state.
+ // We further require the histogram to have reached a minimum value of
+ // `kMinHistogramThreshold`. In addition, we also require the number of
+ // `candidate_hits` to be more than `kMinRequiredHits` to remove spurious
+ // values.
+
+ // Calculate a comparison histogram value (`histogram_threshold`) that is
+ // depending on the distance between the `candidate_delay` and `last_delay`.
+ // TODO(bjornv): How much can we gain by turning the fraction calculation
+ // into tables?
+ if (delay_difference > self->allowed_offset) {
+ fraction = 1.f - kFractionSlope * (delay_difference - self->allowed_offset);
+ fraction = (fraction > kMinFractionWhenPossiblyCausal
+ ? fraction
+ : kMinFractionWhenPossiblyCausal);
+ } else if (delay_difference < 0) {
+ fraction =
+ kMinFractionWhenPossiblyNonCausal - kFractionSlope * delay_difference;
+ fraction = (fraction > 1.f ? 1.f : fraction);
+ }
+ histogram_threshold *= fraction;
+ histogram_threshold =
+ (histogram_threshold > kMinHistogramThreshold ? histogram_threshold
+ : kMinHistogramThreshold);
+
+ is_histogram_valid =
+ (self->histogram[candidate_delay] >= histogram_threshold) &&
+ (self->candidate_hits > kMinRequiredHits);
+
+ return is_histogram_valid;
+}
+
+// Performs a robust validation of the `candidate_delay` estimated in
+// WebRtc_ProcessBinarySpectrum(). The algorithm takes the
+// `is_instantaneous_valid` and the `is_histogram_valid` and combines them
+// into a robust validation. The HistogramBasedValidation() has to be called
+// prior to this call.
+// For further description on how the combination is done, see commented code.
+//
+// Inputs:
+// - candidate_delay : The delay to validate.
+// - is_instantaneous_valid : The instantaneous validation performed in
+// WebRtc_ProcessBinarySpectrum().
+// - is_histogram_valid : The histogram based validation.
+//
+// Return value:
+// - is_robust : 1 - The candidate_delay is valid according to a
+// combination of the two inputs.
+// : 0 - Otherwise.
+static int RobustValidation(const BinaryDelayEstimator* self,
+ int candidate_delay,
+ int is_instantaneous_valid,
+ int is_histogram_valid) {
+ int is_robust = 0;
+
+ // The final robust validation is based on the two algorithms; 1) the
+ // `is_instantaneous_valid` and 2) the histogram based with result stored in
+ // `is_histogram_valid`.
+ // i) Before we actually have a valid estimate (`last_delay` == -2), we say
+ // a candidate is valid if either algorithm states so
+ // (`is_instantaneous_valid` OR `is_histogram_valid`).
+ is_robust =
+ (self->last_delay < 0) && (is_instantaneous_valid || is_histogram_valid);
+ // ii) Otherwise, we need both algorithms to be certain
+ // (`is_instantaneous_valid` AND `is_histogram_valid`)
+ is_robust |= is_instantaneous_valid && is_histogram_valid;
+ // iii) With one exception, i.e., the histogram based algorithm can overrule
+ // the instantaneous one if `is_histogram_valid` = 1 and the histogram
+ // is significantly strong.
+ is_robust |= is_histogram_valid &&
+ (self->histogram[candidate_delay] > self->last_delay_histogram);
+
+ return is_robust;
+}
+
+void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) {
+ if (self == NULL) {
+ return;
+ }
+
+ free(self->binary_far_history);
+ self->binary_far_history = NULL;
+
+ free(self->far_bit_counts);
+ self->far_bit_counts = NULL;
+
+ free(self);
+}
+
+BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend(
+ int history_size) {
+ BinaryDelayEstimatorFarend* self = NULL;
+
+ if (history_size > 1) {
+ // Sanity conditions fulfilled.
+ self = static_cast<BinaryDelayEstimatorFarend*>(
+ malloc(sizeof(BinaryDelayEstimatorFarend)));
+ }
+ if (self == NULL) {
+ return NULL;
+ }
+
+ self->history_size = 0;
+ self->binary_far_history = NULL;
+ self->far_bit_counts = NULL;
+ if (WebRtc_AllocateFarendBufferMemory(self, history_size) == 0) {
+ WebRtc_FreeBinaryDelayEstimatorFarend(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self,
+ int history_size) {
+ RTC_DCHECK(self);
+ // (Re-)Allocate memory for history buffers.
+ self->binary_far_history = static_cast<uint32_t*>(
+ realloc(self->binary_far_history,
+ history_size * sizeof(*self->binary_far_history)));
+ self->far_bit_counts = static_cast<int*>(realloc(
+ self->far_bit_counts, history_size * sizeof(*self->far_bit_counts)));
+ if ((self->binary_far_history == NULL) || (self->far_bit_counts == NULL)) {
+ history_size = 0;
+ }
+ // Fill with zeros if we have expanded the buffers.
+ if (history_size > self->history_size) {
+ int size_diff = history_size - self->history_size;
+ memset(&self->binary_far_history[self->history_size], 0,
+ sizeof(*self->binary_far_history) * size_diff);
+ memset(&self->far_bit_counts[self->history_size], 0,
+ sizeof(*self->far_bit_counts) * size_diff);
+ }
+ self->history_size = history_size;
+
+ return self->history_size;
+}
+
+void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) {
+ RTC_DCHECK(self);
+ memset(self->binary_far_history, 0, sizeof(uint32_t) * self->history_size);
+ memset(self->far_bit_counts, 0, sizeof(int) * self->history_size);
+}
+
+void WebRtc_SoftResetBinaryDelayEstimatorFarend(
+ BinaryDelayEstimatorFarend* self,
+ int delay_shift) {
+ int abs_shift = abs(delay_shift);
+ int shift_size = 0;
+ int dest_index = 0;
+ int src_index = 0;
+ int padding_index = 0;
+
+ RTC_DCHECK(self);
+ shift_size = self->history_size - abs_shift;
+ RTC_DCHECK_GT(shift_size, 0);
+ if (delay_shift == 0) {
+ return;
+ } else if (delay_shift > 0) {
+ dest_index = abs_shift;
+ } else if (delay_shift < 0) {
+ src_index = abs_shift;
+ padding_index = shift_size;
+ }
+
+ // Shift and zero pad buffers.
+ memmove(&self->binary_far_history[dest_index],
+ &self->binary_far_history[src_index],
+ sizeof(*self->binary_far_history) * shift_size);
+ memset(&self->binary_far_history[padding_index], 0,
+ sizeof(*self->binary_far_history) * abs_shift);
+ memmove(&self->far_bit_counts[dest_index], &self->far_bit_counts[src_index],
+ sizeof(*self->far_bit_counts) * shift_size);
+ memset(&self->far_bit_counts[padding_index], 0,
+ sizeof(*self->far_bit_counts) * abs_shift);
+}
+
+void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle,
+ uint32_t binary_far_spectrum) {
+ RTC_DCHECK(handle);
+ // Shift binary spectrum history and insert current `binary_far_spectrum`.
+ memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]),
+ (handle->history_size - 1) * sizeof(uint32_t));
+ handle->binary_far_history[0] = binary_far_spectrum;
+
+ // Shift history of far-end binary spectrum bit counts and insert bit count
+ // of current `binary_far_spectrum`.
+ memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]),
+ (handle->history_size - 1) * sizeof(int));
+ handle->far_bit_counts[0] = BitCount(binary_far_spectrum);
+}
+
+void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self) {
+ if (self == NULL) {
+ return;
+ }
+
+ free(self->mean_bit_counts);
+ self->mean_bit_counts = NULL;
+
+ free(self->bit_counts);
+ self->bit_counts = NULL;
+
+ free(self->binary_near_history);
+ self->binary_near_history = NULL;
+
+ free(self->histogram);
+ self->histogram = NULL;
+
+ // BinaryDelayEstimator does not have ownership of `farend`, hence we do not
+ // free the memory here. That should be handled separately by the user.
+ self->farend = NULL;
+
+ free(self);
+}
+
+BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
+ BinaryDelayEstimatorFarend* farend,
+ int max_lookahead) {
+ BinaryDelayEstimator* self = NULL;
+
+ if ((farend != NULL) && (max_lookahead >= 0)) {
+ // Sanity conditions fulfilled.
+ self = static_cast<BinaryDelayEstimator*>(
+ malloc(sizeof(BinaryDelayEstimator)));
+ }
+ if (self == NULL) {
+ return NULL;
+ }
+
+ self->farend = farend;
+ self->near_history_size = max_lookahead + 1;
+ self->history_size = 0;
+ self->robust_validation_enabled = 0; // Disabled by default.
+ self->allowed_offset = 0;
+
+ self->lookahead = max_lookahead;
+
+ // Allocate memory for spectrum and history buffers.
+ self->mean_bit_counts = NULL;
+ self->bit_counts = NULL;
+ self->histogram = NULL;
+ self->binary_near_history = static_cast<uint32_t*>(
+ malloc((max_lookahead + 1) * sizeof(*self->binary_near_history)));
+ if (self->binary_near_history == NULL ||
+ WebRtc_AllocateHistoryBufferMemory(self, farend->history_size) == 0) {
+ WebRtc_FreeBinaryDelayEstimator(self);
+ self = NULL;
+ }
+
+ return self;
+}
+
+int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self,
+ int history_size) {
+ BinaryDelayEstimatorFarend* far = self->farend;
+ // (Re-)Allocate memory for spectrum and history buffers.
+ if (history_size != far->history_size) {
+ // Only update far-end buffers if we need.
+ history_size = WebRtc_AllocateFarendBufferMemory(far, history_size);
+ }
+ // The extra array element in `mean_bit_counts` and `histogram` is a dummy
+ // element only used while `last_delay` == -2, i.e., before we have a valid
+ // estimate.
+ self->mean_bit_counts = static_cast<int32_t*>(
+ realloc(self->mean_bit_counts,
+ (history_size + 1) * sizeof(*self->mean_bit_counts)));
+ self->bit_counts = static_cast<int32_t*>(
+ realloc(self->bit_counts, history_size * sizeof(*self->bit_counts)));
+ self->histogram = static_cast<float*>(
+ realloc(self->histogram, (history_size + 1) * sizeof(*self->histogram)));
+
+ if ((self->mean_bit_counts == NULL) || (self->bit_counts == NULL) ||
+ (self->histogram == NULL)) {
+ history_size = 0;
+ }
+ // Fill with zeros if we have expanded the buffers.
+ if (history_size > self->history_size) {
+ int size_diff = history_size - self->history_size;
+ memset(&self->mean_bit_counts[self->history_size], 0,
+ sizeof(*self->mean_bit_counts) * size_diff);
+ memset(&self->bit_counts[self->history_size], 0,
+ sizeof(*self->bit_counts) * size_diff);
+ memset(&self->histogram[self->history_size], 0,
+ sizeof(*self->histogram) * size_diff);
+ }
+ self->history_size = history_size;
+
+ return self->history_size;
+}
+
+void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self) {
+ int i = 0;
+ RTC_DCHECK(self);
+
+ memset(self->bit_counts, 0, sizeof(int32_t) * self->history_size);
+ memset(self->binary_near_history, 0,
+ sizeof(uint32_t) * self->near_history_size);
+ for (i = 0; i <= self->history_size; ++i) {
+ self->mean_bit_counts[i] = (20 << 9); // 20 in Q9.
+ self->histogram[i] = 0.f;
+ }
+ self->minimum_probability = kMaxBitCountsQ9; // 32 in Q9.
+ self->last_delay_probability = (int)kMaxBitCountsQ9; // 32 in Q9.
+
+ // Default return value if we're unable to estimate. -1 is used for errors.
+ self->last_delay = -2;
+
+ self->last_candidate_delay = -2;
+ self->compare_delay = self->history_size;
+ self->candidate_hits = 0;
+ self->last_delay_histogram = 0.f;
+}
+
+int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self,
+ int delay_shift) {
+ int lookahead = 0;
+ RTC_DCHECK(self);
+ lookahead = self->lookahead;
+ self->lookahead -= delay_shift;
+ if (self->lookahead < 0) {
+ self->lookahead = 0;
+ }
+ if (self->lookahead > self->near_history_size - 1) {
+ self->lookahead = self->near_history_size - 1;
+ }
+ return lookahead - self->lookahead;
+}
+
+int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
+ uint32_t binary_near_spectrum) {
+ int i = 0;
+ int candidate_delay = -1;
+ int valid_candidate = 0;
+
+ int32_t value_best_candidate = kMaxBitCountsQ9;
+ int32_t value_worst_candidate = 0;
+ int32_t valley_depth = 0;
+
+ RTC_DCHECK(self);
+ if (self->farend->history_size != self->history_size) {
+ // Non matching history sizes.
+ return -1;
+ }
+ if (self->near_history_size > 1) {
+ // If we apply lookahead, shift near-end binary spectrum history. Insert
+ // current `binary_near_spectrum` and pull out the delayed one.
+ memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]),
+ (self->near_history_size - 1) * sizeof(uint32_t));
+ self->binary_near_history[0] = binary_near_spectrum;
+ binary_near_spectrum = self->binary_near_history[self->lookahead];
+ }
+
+ // Compare with delayed spectra and store the `bit_counts` for each delay.
+ BitCountComparison(binary_near_spectrum, self->farend->binary_far_history,
+ self->history_size, self->bit_counts);
+
+ // Update `mean_bit_counts`, which is the smoothed version of `bit_counts`.
+ for (i = 0; i < self->history_size; i++) {
+ // `bit_counts` is constrained to [0, 32], meaning we can smooth with a
+ // factor up to 2^26. We use Q9.
+ int32_t bit_count = (self->bit_counts[i] << 9); // Q9.
+
+ // Update `mean_bit_counts` only when far-end signal has something to
+ // contribute. If `far_bit_counts` is zero the far-end signal is weak and
+ // we likely have a poor echo condition, hence don't update.
+ if (self->farend->far_bit_counts[i] > 0) {
+ // Make number of right shifts piecewise linear w.r.t. `far_bit_counts`.
+ int shifts = kShiftsAtZero;
+ shifts -= (kShiftsLinearSlope * self->farend->far_bit_counts[i]) >> 4;
+ WebRtc_MeanEstimatorFix(bit_count, shifts, &(self->mean_bit_counts[i]));
+ }
+ }
+
+ // Find `candidate_delay`, `value_best_candidate` and `value_worst_candidate`
+ // of `mean_bit_counts`.
+ for (i = 0; i < self->history_size; i++) {
+ if (self->mean_bit_counts[i] < value_best_candidate) {
+ value_best_candidate = self->mean_bit_counts[i];
+ candidate_delay = i;
+ }
+ if (self->mean_bit_counts[i] > value_worst_candidate) {
+ value_worst_candidate = self->mean_bit_counts[i];
+ }
+ }
+ valley_depth = value_worst_candidate - value_best_candidate;
+
+ // The `value_best_candidate` is a good indicator on the probability of
+ // `candidate_delay` being an accurate delay (a small `value_best_candidate`
+ // means a good binary match). In the following sections we make a decision
+ // whether to update `last_delay` or not.
+ // 1) If the difference bit counts between the best and the worst delay
+ // candidates is too small we consider the situation to be unreliable and
+ // don't update `last_delay`.
+ // 2) If the situation is reliable we update `last_delay` if the value of the
+ // best candidate delay has a value less than
+ // i) an adaptive threshold `minimum_probability`, or
+ // ii) this corresponding value `last_delay_probability`, but updated at
+ // this time instant.
+
+ // Update `minimum_probability`.
+ if ((self->minimum_probability > kProbabilityLowerLimit) &&
+ (valley_depth > kProbabilityMinSpread)) {
+ // The "hard" threshold can't be lower than 17 (in Q9).
+ // The valley in the curve also has to be distinct, i.e., the
+ // difference between `value_worst_candidate` and `value_best_candidate` has
+ // to be large enough.
+ int32_t threshold = value_best_candidate + kProbabilityOffset;
+ if (threshold < kProbabilityLowerLimit) {
+ threshold = kProbabilityLowerLimit;
+ }
+ if (self->minimum_probability > threshold) {
+ self->minimum_probability = threshold;
+ }
+ }
+ // Update `last_delay_probability`.
+ // We use a Markov type model, i.e., a slowly increasing level over time.
+ self->last_delay_probability++;
+ // Validate `candidate_delay`. We have a reliable instantaneous delay
+ // estimate if
+ // 1) The valley is distinct enough (`valley_depth` > `kProbabilityOffset`)
+ // and
+ // 2) The depth of the valley is deep enough
+ // (`value_best_candidate` < `minimum_probability`)
+ // and deeper than the best estimate so far
+ // (`value_best_candidate` < `last_delay_probability`)
+ valid_candidate = ((valley_depth > kProbabilityOffset) &&
+ ((value_best_candidate < self->minimum_probability) ||
+ (value_best_candidate < self->last_delay_probability)));
+
+ // Check for nonstationary farend signal.
+ const bool non_stationary_farend =
+ std::any_of(self->farend->far_bit_counts,
+ self->farend->far_bit_counts + self->history_size,
+ [](int a) { return a > 0; });
+
+ if (non_stationary_farend) {
+ // Only update the validation statistics when the farend is nonstationary
+ // as the underlying estimates are otherwise frozen.
+ UpdateRobustValidationStatistics(self, candidate_delay, valley_depth,
+ value_best_candidate);
+ }
+
+ if (self->robust_validation_enabled) {
+ int is_histogram_valid = HistogramBasedValidation(self, candidate_delay);
+ valid_candidate = RobustValidation(self, candidate_delay, valid_candidate,
+ is_histogram_valid);
+ }
+
+ // Only update the delay estimate when the farend is nonstationary and when
+ // a valid delay candidate is available.
+ if (non_stationary_farend && valid_candidate) {
+ if (candidate_delay != self->last_delay) {
+ self->last_delay_histogram =
+ (self->histogram[candidate_delay] > kLastHistogramMax
+ ? kLastHistogramMax
+ : self->histogram[candidate_delay]);
+ // Adjust the histogram if we made a change to `last_delay`, though it was
+ // not the most likely one according to the histogram.
+ if (self->histogram[candidate_delay] <
+ self->histogram[self->compare_delay]) {
+ self->histogram[self->compare_delay] = self->histogram[candidate_delay];
+ }
+ }
+ self->last_delay = candidate_delay;
+ if (value_best_candidate < self->last_delay_probability) {
+ self->last_delay_probability = value_best_candidate;
+ }
+ self->compare_delay = self->last_delay;
+ }
+
+ return self->last_delay;
+}
+
+int WebRtc_binary_last_delay(BinaryDelayEstimator* self) {
+ RTC_DCHECK(self);
+ return self->last_delay;
+}
+
+float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) {
+ float quality = 0;
+ RTC_DCHECK(self);
+
+ if (self->robust_validation_enabled) {
+ // Simply a linear function of the histogram height at delay estimate.
+ quality = self->histogram[self->compare_delay] / kHistogramMax;
+ } else {
+ // Note that `last_delay_probability` states how deep the minimum of the
+ // cost function is, so it is rather an error probability.
+ quality = (float)(kMaxBitCountsQ9 - self->last_delay_probability) /
+ kMaxBitCountsQ9;
+ if (quality < 0) {
+ quality = 0;
+ }
+ }
+ return quality;
+}
+
+void WebRtc_MeanEstimatorFix(int32_t new_value,
+ int factor,
+ int32_t* mean_value) {
+ int32_t diff = new_value - *mean_value;
+
+ // mean_new = mean_value + ((new_value - mean_value) >> factor);
+ if (diff < 0) {
+ diff = -((-diff) >> factor);
+ } else {
+ diff = (diff >> factor);
+ }
+ *mean_value += diff;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.h b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.h
new file mode 100644
index 0000000000..b6fc36a759
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs delay estimation on binary converted spectra.
+// The return value is 0 - OK and -1 - Error, unless otherwise stated.
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+static const int32_t kMaxBitCountsQ9 = (32 << 9); // 32 matching bits in Q9.
+
+typedef struct {
+ // Pointer to bit counts.
+ int* far_bit_counts;
+ // Binary history variables.
+ uint32_t* binary_far_history;
+ int history_size;
+} BinaryDelayEstimatorFarend;
+
+typedef struct {
+ // Pointer to bit counts.
+ int32_t* mean_bit_counts;
+ // Array only used locally in ProcessBinarySpectrum() but whose size is
+ // determined at run-time.
+ int32_t* bit_counts;
+
+ // Binary history variables.
+ uint32_t* binary_near_history;
+ int near_history_size;
+ int history_size;
+
+ // Delay estimation variables.
+ int32_t minimum_probability;
+ int last_delay_probability;
+
+ // Delay memory.
+ int last_delay;
+
+ // Robust validation
+ int robust_validation_enabled;
+ int allowed_offset;
+ int last_candidate_delay;
+ int compare_delay;
+ int candidate_hits;
+ float* histogram;
+ float last_delay_histogram;
+
+ // For dynamically changing the lookahead when using SoftReset...().
+ int lookahead;
+
+ // Far-end binary spectrum history buffer etc.
+ BinaryDelayEstimatorFarend* farend;
+} BinaryDelayEstimator;
+
+// Releases the memory allocated by
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+// Input:
+// - self : Pointer to the binary delay estimation far-end
+// instance which is the return value of
+// WebRtc_CreateBinaryDelayEstimatorFarend().
+//
+void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self);
+
+// Allocates the memory needed by the far-end part of the binary delay
+// estimation. The memory needs to be initialized separately through
+// WebRtc_InitBinaryDelayEstimatorFarend(...).
+//
+// Inputs:
+// - history_size : Size of the far-end binary spectrum history.
+//
+// Return value:
+// - BinaryDelayEstimatorFarend*
+// : Created `handle`. If the memory can't be allocated
+// or if any of the input parameters are invalid NULL
+// is returned.
+//
+BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend(
+ int history_size);
+
+// Re-allocates the buffers.
+//
+// Inputs:
+// - self : Pointer to the binary estimation far-end instance
+// which is the return value of
+// WebRtc_CreateBinaryDelayEstimatorFarend().
+// - history_size : Size of the far-end binary spectrum history.
+//
+// Return value:
+// - history_size : The history size allocated.
+int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self,
+ int history_size);
+
+// Initializes the delay estimation far-end instance created with
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+//
+// Input:
+// - self : Pointer to the delay estimation far-end instance.
+//
+// Output:
+// - self : Initialized far-end instance.
+//
+void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self);
+
+// Soft resets the delay estimation far-end instance created with
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+//
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+void WebRtc_SoftResetBinaryDelayEstimatorFarend(
+ BinaryDelayEstimatorFarend* self,
+ int delay_shift);
+
+// Adds the binary far-end spectrum to the internal far-end history buffer. This
+// spectrum is used as reference when calculating the delay using
+// WebRtc_ProcessBinarySpectrum().
+//
+// Inputs:
+// - self : Pointer to the delay estimation far-end
+// instance.
+// - binary_far_spectrum : Far-end binary spectrum.
+//
+// Output:
+// - self : Updated far-end instance.
+//
+void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* self,
+ uint32_t binary_far_spectrum);
+
+// Releases the memory allocated by WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Note that BinaryDelayEstimator utilizes BinaryDelayEstimatorFarend, but does
+// not take ownership of it, hence the BinaryDelayEstimator has to be torn down
+// before the far-end.
+//
+// Input:
+// - self : Pointer to the binary delay estimation instance
+// which is the return value of
+// WebRtc_CreateBinaryDelayEstimator().
+//
+void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self);
+
+// Allocates the memory needed by the binary delay estimation. The memory needs
+// to be initialized separately through WebRtc_InitBinaryDelayEstimator(...).
+//
+// See WebRtc_CreateDelayEstimator(..) in delay_estimator_wrapper.c for detailed
+// description.
+BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
+ BinaryDelayEstimatorFarend* farend,
+ int max_lookahead);
+
+// Re-allocates `history_size` dependent buffers. The far-end buffers will be
+// updated at the same time if needed.
+//
+// Input:
+// - self : Pointer to the binary estimation instance which is
+// the return value of
+// WebRtc_CreateBinaryDelayEstimator().
+// - history_size : Size of the history buffers.
+//
+// Return value:
+// - history_size : The history size allocated.
+int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self,
+ int history_size);
+
+// Initializes the delay estimation instance created with
+// WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Input:
+// - self : Pointer to the delay estimation instance.
+//
+// Output:
+// - self : Initialized instance.
+//
+void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self);
+
+// Soft resets the delay estimation instance created with
+// WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+// Return value:
+// - actual_shifts : The actual number of shifts performed.
+//
+int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self,
+ int delay_shift);
+
+// Estimates and returns the delay between the binary far-end and binary near-
+// end spectra. It is assumed the binary far-end spectrum has been added using
+// WebRtc_AddBinaryFarSpectrum() prior to this call. The value will be offset by
+// the lookahead (i.e. the lookahead should be subtracted from the returned
+// value).
+//
+// Inputs:
+// - self : Pointer to the delay estimation instance.
+// - binary_near_spectrum : Near-end binary spectrum of the current block.
+//
+// Output:
+// - self : Updated instance.
+//
+// Return value:
+// - delay : >= 0 - Calculated delay value.
+// -2 - Insufficient data for estimation.
+//
+int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
+ uint32_t binary_near_spectrum);
+
+// Returns the last calculated delay updated by the function
+// WebRtc_ProcessBinarySpectrum(...).
+//
+// Input:
+// - self : Pointer to the delay estimation instance.
+//
+// Return value:
+// - delay : >= 0 - Last calculated delay value
+// -2 - Insufficient data for estimation.
+//
+int WebRtc_binary_last_delay(BinaryDelayEstimator* self);
+
+// Returns the estimation quality of the last calculated delay updated by the
+// function WebRtc_ProcessBinarySpectrum(...). The estimation quality is a value
+// in the interval [0, 1]. The higher the value, the better the quality.
+//
+// Return value:
+// - delay_quality : >= 0 - Estimation quality of last calculated
+// delay value.
+float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self);
+
+// Updates the `mean_value` recursively with a step size of 2^-`factor`. This
+// function is used internally in the Binary Delay Estimator as well as the
+// Fixed point wrapper.
+//
+// Inputs:
+// - new_value : The new value the mean should be updated with.
+// - factor : The step size, in number of right shifts.
+//
+// Input/Output:
+// - mean_value : Pointer to the mean value.
+//
+void WebRtc_MeanEstimatorFix(int32_t new_value,
+ int factor,
+ int32_t* mean_value);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_internal.h b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_internal.h
new file mode 100644
index 0000000000..891e20027d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_internal.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Header file including the delay estimator handle used for testing.
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+
+namespace webrtc {
+
+typedef union {
+ float float_;
+ int32_t int32_;
+} SpectrumType;
+
+typedef struct {
+ // Pointers to mean values of spectrum.
+ SpectrumType* mean_far_spectrum;
+ // `mean_far_spectrum` initialization indicator.
+ int far_spectrum_initialized;
+
+ int spectrum_size;
+
+ // Far-end part of binary spectrum based delay estimation.
+ BinaryDelayEstimatorFarend* binary_farend;
+} DelayEstimatorFarend;
+
+typedef struct {
+ // Pointers to mean values of spectrum.
+ SpectrumType* mean_near_spectrum;
+ // `mean_near_spectrum` initialization indicator.
+ int near_spectrum_initialized;
+
+ int spectrum_size;
+
+ // Binary spectrum based delay estimator
+ BinaryDelayEstimator* binary_handle;
+} DelayEstimator;
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_unittest.cc b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_unittest.cc
new file mode 100644
index 0000000000..651d836c82
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_unittest.cc
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+
+#include "modules/audio_processing/utility/delay_estimator_internal.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+enum { kSpectrumSize = 65 };
+// Delay history sizes.
+enum { kMaxDelay = 100 };
+enum { kLookahead = 10 };
+enum { kHistorySize = kMaxDelay + kLookahead };
+// Length of binary spectrum sequence.
+enum { kSequenceLength = 400 };
+
+const int kDifferentHistorySize = 3;
+const int kDifferentLookahead = 1;
+
+const int kEnable[] = {0, 1};
+const size_t kSizeEnable = sizeof(kEnable) / sizeof(*kEnable);
+
+class DelayEstimatorTest : public ::testing::Test {
+ protected:
+ DelayEstimatorTest();
+ void SetUp() override;
+ void TearDown() override;
+
+ void Init();
+ void InitBinary();
+ void VerifyDelay(BinaryDelayEstimator* binary_handle, int offset, int delay);
+ void RunBinarySpectra(BinaryDelayEstimator* binary1,
+ BinaryDelayEstimator* binary2,
+ int near_offset,
+ int lookahead_offset,
+ int far_offset);
+ void RunBinarySpectraTest(int near_offset,
+ int lookahead_offset,
+ int ref_robust_validation,
+ int robust_validation);
+
+ void* handle_;
+ DelayEstimator* self_;
+ void* farend_handle_;
+ DelayEstimatorFarend* farend_self_;
+ BinaryDelayEstimator* binary_;
+ BinaryDelayEstimatorFarend* binary_farend_;
+ int spectrum_size_;
+ // Dummy input spectra.
+ float far_f_[kSpectrumSize];
+ float near_f_[kSpectrumSize];
+ uint16_t far_u16_[kSpectrumSize];
+ uint16_t near_u16_[kSpectrumSize];
+ uint32_t binary_spectrum_[kSequenceLength + kHistorySize];
+};
+
+DelayEstimatorTest::DelayEstimatorTest()
+ : handle_(NULL),
+ self_(NULL),
+ farend_handle_(NULL),
+ farend_self_(NULL),
+ binary_(NULL),
+ binary_farend_(NULL),
+ spectrum_size_(kSpectrumSize) {
+ // Dummy input data are set with more or less arbitrary non-zero values.
+ memset(far_f_, 1, sizeof(far_f_));
+ memset(near_f_, 2, sizeof(near_f_));
+ memset(far_u16_, 1, sizeof(far_u16_));
+ memset(near_u16_, 2, sizeof(near_u16_));
+ // Construct a sequence of binary spectra used to verify delay estimate. The
+ // `kSequenceLength` has to be long enough for the delay estimation to leave
+ // the initialized state.
+ binary_spectrum_[0] = 1;
+ for (int i = 1; i < (kSequenceLength + kHistorySize); i++) {
+ binary_spectrum_[i] = 3 * binary_spectrum_[i - 1];
+ }
+}
+
+void DelayEstimatorTest::SetUp() {
+ farend_handle_ =
+ WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, kHistorySize);
+ ASSERT_TRUE(farend_handle_ != NULL);
+ farend_self_ = reinterpret_cast<DelayEstimatorFarend*>(farend_handle_);
+ handle_ = WebRtc_CreateDelayEstimator(farend_handle_, kLookahead);
+ ASSERT_TRUE(handle_ != NULL);
+ self_ = reinterpret_cast<DelayEstimator*>(handle_);
+ binary_farend_ = WebRtc_CreateBinaryDelayEstimatorFarend(kHistorySize);
+ ASSERT_TRUE(binary_farend_ != NULL);
+ binary_ = WebRtc_CreateBinaryDelayEstimator(binary_farend_, kLookahead);
+ ASSERT_TRUE(binary_ != NULL);
+}
+
+void DelayEstimatorTest::TearDown() {
+ WebRtc_FreeDelayEstimator(handle_);
+ handle_ = NULL;
+ self_ = NULL;
+ WebRtc_FreeDelayEstimatorFarend(farend_handle_);
+ farend_handle_ = NULL;
+ farend_self_ = NULL;
+ WebRtc_FreeBinaryDelayEstimator(binary_);
+ binary_ = NULL;
+ WebRtc_FreeBinaryDelayEstimatorFarend(binary_farend_);
+ binary_farend_ = NULL;
+}
+
+void DelayEstimatorTest::Init() {
+ // Initialize Delay Estimator
+ EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_));
+ EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_));
+ // Verify initialization.
+ EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
+ EXPECT_EQ(0, self_->near_spectrum_initialized);
+ EXPECT_EQ(-2, WebRtc_last_delay(handle_)); // Delay in initial state.
+ EXPECT_FLOAT_EQ(0, WebRtc_last_delay_quality(handle_)); // Zero quality.
+}
+
+void DelayEstimatorTest::InitBinary() {
+ // Initialize Binary Delay Estimator (far-end part).
+ WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_);
+ // Initialize Binary Delay Estimator
+ WebRtc_InitBinaryDelayEstimator(binary_);
+ // Verify initialization. This does not guarantee a complete check, since
+ // `last_delay` may be equal to -2 before initialization if done on the fly.
+ EXPECT_EQ(-2, binary_->last_delay);
+}
+
+void DelayEstimatorTest::VerifyDelay(BinaryDelayEstimator* binary_handle,
+ int offset,
+ int delay) {
+ // Verify that we WebRtc_binary_last_delay() returns correct delay.
+ EXPECT_EQ(delay, WebRtc_binary_last_delay(binary_handle));
+
+ if (delay != -2) {
+ // Verify correct delay estimate. In the non-causal case the true delay
+ // is equivalent with the `offset`.
+ EXPECT_EQ(offset, delay);
+ }
+}
+
+void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1,
+ BinaryDelayEstimator* binary2,
+ int near_offset,
+ int lookahead_offset,
+ int far_offset) {
+ int different_validations =
+ binary1->robust_validation_enabled ^ binary2->robust_validation_enabled;
+ WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_);
+ WebRtc_InitBinaryDelayEstimator(binary1);
+ WebRtc_InitBinaryDelayEstimator(binary2);
+ // Verify initialization. This does not guarantee a complete check, since
+ // `last_delay` may be equal to -2 before initialization if done on the fly.
+ EXPECT_EQ(-2, binary1->last_delay);
+ EXPECT_EQ(-2, binary2->last_delay);
+ for (int i = kLookahead; i < (kSequenceLength + kLookahead); i++) {
+ WebRtc_AddBinaryFarSpectrum(binary_farend_,
+ binary_spectrum_[i + far_offset]);
+ int delay_1 = WebRtc_ProcessBinarySpectrum(binary1, binary_spectrum_[i]);
+ int delay_2 = WebRtc_ProcessBinarySpectrum(
+ binary2, binary_spectrum_[i - near_offset]);
+
+ VerifyDelay(binary1, far_offset + kLookahead, delay_1);
+ VerifyDelay(binary2,
+ far_offset + kLookahead + lookahead_offset + near_offset,
+ delay_2);
+ // Expect the two delay estimates to be offset by `lookahead_offset` +
+ // `near_offset` when we have left the initial state.
+ if ((delay_1 != -2) && (delay_2 != -2)) {
+ EXPECT_EQ(delay_1, delay_2 - lookahead_offset - near_offset);
+ }
+ // For the case of identical signals `delay_1` and `delay_2` should match
+ // all the time, unless one of them has robust validation turned on. In
+ // that case the robust validation leaves the initial state faster.
+ if ((near_offset == 0) && (lookahead_offset == 0)) {
+ if (!different_validations) {
+ EXPECT_EQ(delay_1, delay_2);
+ } else {
+ if (binary1->robust_validation_enabled) {
+ EXPECT_GE(delay_1, delay_2);
+ } else {
+ EXPECT_GE(delay_2, delay_1);
+ }
+ }
+ }
+ }
+ // Verify that we have left the initialized state.
+ EXPECT_NE(-2, WebRtc_binary_last_delay(binary1));
+ EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary1));
+ EXPECT_NE(-2, WebRtc_binary_last_delay(binary2));
+ EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary2));
+}
+
+void DelayEstimatorTest::RunBinarySpectraTest(int near_offset,
+ int lookahead_offset,
+ int ref_robust_validation,
+ int robust_validation) {
+ BinaryDelayEstimator* binary2 = WebRtc_CreateBinaryDelayEstimator(
+ binary_farend_, kLookahead + lookahead_offset);
+ // Verify the delay for both causal and non-causal systems. For causal systems
+ // the delay is equivalent with a positive `offset` of the far-end sequence.
+ // For non-causal systems the delay is equivalent with a negative `offset` of
+ // the far-end sequence.
+ binary_->robust_validation_enabled = ref_robust_validation;
+ binary2->robust_validation_enabled = robust_validation;
+ for (int offset = -kLookahead;
+ offset < kMaxDelay - lookahead_offset - near_offset; offset++) {
+ RunBinarySpectra(binary_, binary2, near_offset, lookahead_offset, offset);
+ }
+ WebRtc_FreeBinaryDelayEstimator(binary2);
+ binary2 = NULL;
+ binary_->robust_validation_enabled = 0; // Reset reference.
+}
+
+TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) {
+ // In this test we verify correct error returns on invalid API calls.
+
+ // WebRtc_CreateDelayEstimatorFarend() and WebRtc_CreateDelayEstimator()
+ // should return a NULL pointer on invalid input values.
+ // Make sure we have a non-NULL value at start, so we can detect NULL after
+ // create failure.
+ void* handle = farend_handle_;
+ handle = WebRtc_CreateDelayEstimatorFarend(33, kHistorySize);
+ EXPECT_TRUE(handle == NULL);
+ handle = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, 1);
+ EXPECT_TRUE(handle == NULL);
+
+ handle = handle_;
+ handle = WebRtc_CreateDelayEstimator(NULL, kLookahead);
+ EXPECT_TRUE(handle == NULL);
+ handle = WebRtc_CreateDelayEstimator(farend_handle_, -1);
+ EXPECT_TRUE(handle == NULL);
+
+ // WebRtc_InitDelayEstimatorFarend() and WebRtc_InitDelayEstimator() should
+ // return -1 if we have a NULL pointer as `handle`.
+ EXPECT_EQ(-1, WebRtc_InitDelayEstimatorFarend(NULL));
+ EXPECT_EQ(-1, WebRtc_InitDelayEstimator(NULL));
+
+ // WebRtc_AddFarSpectrumFloat() should return -1 if we have:
+ // 1) NULL pointer as `handle`.
+ // 2) NULL pointer as far-end spectrum.
+ // 3) Incorrect spectrum size.
+ EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(NULL, far_f_, spectrum_size_));
+ // Use `farend_handle_` which is properly created at SetUp().
+ EXPECT_EQ(-1,
+ WebRtc_AddFarSpectrumFloat(farend_handle_, NULL, spectrum_size_));
+ EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_,
+ spectrum_size_ + 1));
+
+ // WebRtc_AddFarSpectrumFix() should return -1 if we have:
+ // 1) NULL pointer as `handle`.
+ // 2) NULL pointer as far-end spectrum.
+ // 3) Incorrect spectrum size.
+ // 4) Too high precision in far-end spectrum (Q-domain > 15).
+ EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(NULL, far_u16_, spectrum_size_, 0));
+ EXPECT_EQ(-1,
+ WebRtc_AddFarSpectrumFix(farend_handle_, NULL, spectrum_size_, 0));
+ EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+ spectrum_size_ + 1, 0));
+ EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+ spectrum_size_, 16));
+
+ // WebRtc_set_history_size() should return -1 if:
+ // 1) `handle` is a NULL.
+ // 2) `history_size` <= 1.
+ EXPECT_EQ(-1, WebRtc_set_history_size(NULL, 1));
+ EXPECT_EQ(-1, WebRtc_set_history_size(handle_, 1));
+ // WebRtc_history_size() should return -1 if:
+ // 1) NULL pointer input.
+ EXPECT_EQ(-1, WebRtc_history_size(NULL));
+ // 2) there is a mismatch between history size.
+ void* tmp_handle = WebRtc_CreateDelayEstimator(farend_handle_, kHistorySize);
+ EXPECT_EQ(0, WebRtc_InitDelayEstimator(tmp_handle));
+ EXPECT_EQ(kDifferentHistorySize,
+ WebRtc_set_history_size(tmp_handle, kDifferentHistorySize));
+ EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(tmp_handle));
+ EXPECT_EQ(kHistorySize, WebRtc_set_history_size(handle_, kHistorySize));
+ EXPECT_EQ(-1, WebRtc_history_size(tmp_handle));
+
+ // WebRtc_set_lookahead() should return -1 if we try a value outside the
+ /// buffer.
+ EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, kLookahead + 1));
+ EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, -1));
+
+ // WebRtc_set_allowed_offset() should return -1 if we have:
+ // 1) NULL pointer as `handle`.
+ // 2) `allowed_offset` < 0.
+ EXPECT_EQ(-1, WebRtc_set_allowed_offset(NULL, 0));
+ EXPECT_EQ(-1, WebRtc_set_allowed_offset(handle_, -1));
+
+ EXPECT_EQ(-1, WebRtc_get_allowed_offset(NULL));
+
+ // WebRtc_enable_robust_validation() should return -1 if we have:
+ // 1) NULL pointer as `handle`.
+ // 2) Incorrect `enable` value (not 0 or 1).
+ EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, kEnable[0]));
+ EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, -1));
+ EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, 2));
+
+ // WebRtc_is_robust_validation_enabled() should return -1 if we have NULL
+ // pointer as `handle`.
+ EXPECT_EQ(-1, WebRtc_is_robust_validation_enabled(NULL));
+
+ // WebRtc_DelayEstimatorProcessFloat() should return -1 if we have:
+ // 1) NULL pointer as `handle`.
+ // 2) NULL pointer as near-end spectrum.
+ // 3) Incorrect spectrum size.
+ // 4) Non matching history sizes if multiple delay estimators using the same
+ // far-end reference.
+ EXPECT_EQ(-1,
+ WebRtc_DelayEstimatorProcessFloat(NULL, near_f_, spectrum_size_));
+ // Use `handle_` which is properly created at SetUp().
+ EXPECT_EQ(-1,
+ WebRtc_DelayEstimatorProcessFloat(handle_, NULL, spectrum_size_));
+ EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_,
+ spectrum_size_ + 1));
+ // `tmp_handle` is already in a non-matching state.
+ EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(tmp_handle, near_f_,
+ spectrum_size_));
+
+ // WebRtc_DelayEstimatorProcessFix() should return -1 if we have:
+ // 1) NULL pointer as `handle`.
+ // 2) NULL pointer as near-end spectrum.
+ // 3) Incorrect spectrum size.
+ // 4) Too high precision in near-end spectrum (Q-domain > 15).
+ // 5) Non matching history sizes if multiple delay estimators using the same
+ // far-end reference.
+ EXPECT_EQ(
+ -1, WebRtc_DelayEstimatorProcessFix(NULL, near_u16_, spectrum_size_, 0));
+ EXPECT_EQ(-1,
+ WebRtc_DelayEstimatorProcessFix(handle_, NULL, spectrum_size_, 0));
+ EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+ spectrum_size_ + 1, 0));
+ EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+ spectrum_size_, 16));
+ // `tmp_handle` is already in a non-matching state.
+ EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(tmp_handle, near_u16_,
+ spectrum_size_, 0));
+ WebRtc_FreeDelayEstimator(tmp_handle);
+
+ // WebRtc_last_delay() should return -1 if we have a NULL pointer as `handle`.
+ EXPECT_EQ(-1, WebRtc_last_delay(NULL));
+
+ // Free any local memory if needed.
+ WebRtc_FreeDelayEstimator(handle);
+}
+
+TEST_F(DelayEstimatorTest, VerifyAllowedOffset) {
+ // Is set to zero by default.
+ EXPECT_EQ(0, WebRtc_get_allowed_offset(handle_));
+ for (int i = 1; i >= 0; i--) {
+ EXPECT_EQ(0, WebRtc_set_allowed_offset(handle_, i));
+ EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_));
+ Init();
+ // Unaffected over a reset.
+ EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_));
+ }
+}
+
+TEST_F(DelayEstimatorTest, VerifyEnableRobustValidation) {
+ // Disabled by default.
+ EXPECT_EQ(0, WebRtc_is_robust_validation_enabled(handle_));
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ EXPECT_EQ(0, WebRtc_enable_robust_validation(handle_, kEnable[i]));
+ EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_));
+ Init();
+ // Unaffected over a reset.
+ EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_));
+ }
+}
+
+TEST_F(DelayEstimatorTest, InitializedSpectrumAfterProcess) {
+ // In this test we verify that the mean spectra are initialized after first
+ // time we call WebRtc_AddFarSpectrum() and Process() respectively. The test
+ // also verifies the state is not left for zero spectra.
+ const float kZerosFloat[kSpectrumSize] = {0.0};
+ const uint16_t kZerosU16[kSpectrumSize] = {0};
+
+ // For floating point operations, process one frame and verify initialization
+ // flag.
+ Init();
+ EXPECT_EQ(0, WebRtc_AddFarSpectrumFloat(farend_handle_, kZerosFloat,
+ spectrum_size_));
+ EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
+ EXPECT_EQ(0,
+ WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_, spectrum_size_));
+ EXPECT_EQ(1, farend_self_->far_spectrum_initialized);
+ EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFloat(handle_, kZerosFloat,
+ spectrum_size_));
+ EXPECT_EQ(0, self_->near_spectrum_initialized);
+ EXPECT_EQ(
+ -2, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_, spectrum_size_));
+ EXPECT_EQ(1, self_->near_spectrum_initialized);
+
+ // For fixed point operations, process one frame and verify initialization
+ // flag.
+ Init();
+ EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, kZerosU16,
+ spectrum_size_, 0));
+ EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
+ EXPECT_EQ(
+ 0, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_, spectrum_size_, 0));
+ EXPECT_EQ(1, farend_self_->far_spectrum_initialized);
+ EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFix(handle_, kZerosU16,
+ spectrum_size_, 0));
+ EXPECT_EQ(0, self_->near_spectrum_initialized);
+ EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+ spectrum_size_, 0));
+ EXPECT_EQ(1, self_->near_spectrum_initialized);
+}
+
+TEST_F(DelayEstimatorTest, CorrectLastDelay) {
+ // In this test we verify that we get the correct last delay upon valid call.
+ // We simply process the same data until we leave the initialized state
+ // (`last_delay` = -2). Then we compare the Process() output with the
+ // last_delay() call.
+
+ // TODO(bjornv): Update quality values for robust validation.
+ int last_delay = 0;
+ // Floating point operations.
+ Init();
+ for (int i = 0; i < 200; i++) {
+ EXPECT_EQ(
+ 0, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_, spectrum_size_));
+ last_delay =
+ WebRtc_DelayEstimatorProcessFloat(handle_, near_f_, spectrum_size_);
+ if (last_delay != -2) {
+ EXPECT_EQ(last_delay, WebRtc_last_delay(handle_));
+ if (!WebRtc_is_robust_validation_enabled(handle_)) {
+ EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9,
+ WebRtc_last_delay_quality(handle_));
+ }
+ break;
+ }
+ }
+ // Verify that we have left the initialized state.
+ EXPECT_NE(-2, WebRtc_last_delay(handle_));
+ EXPECT_LT(0, WebRtc_last_delay_quality(handle_));
+
+ // Fixed point operations.
+ Init();
+ for (int i = 0; i < 200; i++) {
+ EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+ spectrum_size_, 0));
+ last_delay =
+ WebRtc_DelayEstimatorProcessFix(handle_, near_u16_, spectrum_size_, 0);
+ if (last_delay != -2) {
+ EXPECT_EQ(last_delay, WebRtc_last_delay(handle_));
+ if (!WebRtc_is_robust_validation_enabled(handle_)) {
+ EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9,
+ WebRtc_last_delay_quality(handle_));
+ }
+ break;
+ }
+ }
+ // Verify that we have left the initialized state.
+ EXPECT_NE(-2, WebRtc_last_delay(handle_));
+ EXPECT_LT(0, WebRtc_last_delay_quality(handle_));
+}
+
+TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimatorFarend) {
+ // In this test we verify correct output on invalid API calls to the Binary
+ // Delay Estimator (far-end part).
+
+ BinaryDelayEstimatorFarend* binary = binary_farend_;
+ // WebRtc_CreateBinaryDelayEstimatorFarend() should return -1 if the input
+ // history size is less than 2. This is to make sure the buffer shifting
+ // applies properly.
+ // Make sure we have a non-NULL value at start, so we can detect NULL after
+ // create failure.
+ binary = WebRtc_CreateBinaryDelayEstimatorFarend(1);
+ EXPECT_TRUE(binary == NULL);
+}
+
+TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimator) {
+ // In this test we verify correct output on invalid API calls to the Binary
+ // Delay Estimator.
+
+ BinaryDelayEstimator* binary_handle = binary_;
+ // WebRtc_CreateBinaryDelayEstimator() should return -1 if we have a NULL
+ // pointer as `binary_farend` or invalid input values. Upon failure, the
+ // `binary_handle` should be NULL.
+ // Make sure we have a non-NULL value at start, so we can detect NULL after
+ // create failure.
+ binary_handle = WebRtc_CreateBinaryDelayEstimator(NULL, kLookahead);
+ EXPECT_TRUE(binary_handle == NULL);
+ binary_handle = WebRtc_CreateBinaryDelayEstimator(binary_farend_, -1);
+ EXPECT_TRUE(binary_handle == NULL);
+}
+
+TEST_F(DelayEstimatorTest, MeanEstimatorFix) {
+ // In this test we verify that we update the mean value in correct direction
+ // only. With "direction" we mean increase or decrease.
+
+ int32_t mean_value = 4000;
+ int32_t mean_value_before = mean_value;
+ int32_t new_mean_value = mean_value * 2;
+
+ // Increasing `mean_value`.
+ WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value);
+ EXPECT_LT(mean_value_before, mean_value);
+ EXPECT_GT(new_mean_value, mean_value);
+
+ // Decreasing `mean_value`.
+ new_mean_value = mean_value / 2;
+ mean_value_before = mean_value;
+ WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value);
+ EXPECT_GT(mean_value_before, mean_value);
+ EXPECT_LT(new_mean_value, mean_value);
+}
+
+TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearSameSpectrum) {
+ // In this test we verify that we get the correct delay estimates if we shift
+ // the signal accordingly. We create two Binary Delay Estimators and feed them
+ // with the same signals, so they should output the same results.
+ // We verify both causal and non-causal delays.
+ // For these noise free signals, the robust validation should not have an
+ // impact, hence we turn robust validation on/off for both reference and
+ // delayed near end.
+
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ for (size_t j = 0; j < kSizeEnable; ++j) {
+ RunBinarySpectraTest(0, 0, kEnable[i], kEnable[j]);
+ }
+ }
+}
+
+TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentSpectrum) {
+ // In this test we use the same setup as above, but we now feed the two Binary
+ // Delay Estimators with different signals, so they should output different
+ // results.
+ // For these noise free signals, the robust validation should not have an
+ // impact, hence we turn robust validation on/off for both reference and
+ // delayed near end.
+
+ const int kNearOffset = 1;
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ for (size_t j = 0; j < kSizeEnable; ++j) {
+ RunBinarySpectraTest(kNearOffset, 0, kEnable[i], kEnable[j]);
+ }
+ }
+}
+
+TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentLookahead) {
+ // In this test we use the same setup as above, feeding the two Binary
+ // Delay Estimators with the same signals. The difference is that we create
+ // them with different lookahead.
+ // For these noise free signals, the robust validation should not have an
+ // impact, hence we turn robust validation on/off for both reference and
+ // delayed near end.
+
+ const int kLookaheadOffset = 1;
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ for (size_t j = 0; j < kSizeEnable; ++j) {
+ RunBinarySpectraTest(0, kLookaheadOffset, kEnable[i], kEnable[j]);
+ }
+ }
+}
+
+TEST_F(DelayEstimatorTest, AllowedOffsetNoImpactWhenRobustValidationDisabled) {
+ // The same setup as in ExactDelayEstimateMultipleNearSameSpectrum with the
+ // difference that `allowed_offset` is set for the reference binary delay
+ // estimator.
+
+ binary_->allowed_offset = 10;
+ RunBinarySpectraTest(0, 0, 0, 0);
+ binary_->allowed_offset = 0; // Reset reference.
+}
+
+TEST_F(DelayEstimatorTest, VerifyLookaheadAtCreate) {
+ void* farend_handle =
+ WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, kMaxDelay);
+ ASSERT_TRUE(farend_handle != NULL);
+ void* handle = WebRtc_CreateDelayEstimator(farend_handle, kLookahead);
+ ASSERT_TRUE(handle != NULL);
+ EXPECT_EQ(kLookahead, WebRtc_lookahead(handle));
+ WebRtc_FreeDelayEstimator(handle);
+ WebRtc_FreeDelayEstimatorFarend(farend_handle);
+}
+
+TEST_F(DelayEstimatorTest, VerifyLookaheadIsSetAndKeptAfterInit) {
+ EXPECT_EQ(kLookahead, WebRtc_lookahead(handle_));
+ EXPECT_EQ(kDifferentLookahead,
+ WebRtc_set_lookahead(handle_, kDifferentLookahead));
+ EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_));
+ EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_));
+ EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_));
+ EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_));
+ EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_));
+}
+
+TEST_F(DelayEstimatorTest, VerifyHistorySizeAtCreate) {
+ EXPECT_EQ(kHistorySize, WebRtc_history_size(handle_));
+}
+
+TEST_F(DelayEstimatorTest, VerifyHistorySizeIsSetAndKeptAfterInit) {
+ EXPECT_EQ(kHistorySize, WebRtc_history_size(handle_));
+ EXPECT_EQ(kDifferentHistorySize,
+ WebRtc_set_history_size(handle_, kDifferentHistorySize));
+ EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_));
+ EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_));
+ EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_));
+ EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_));
+ EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_));
+}
+
+// TODO(bjornv): Add tests for SoftReset...(...).
+
+} // namespace
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc
new file mode 100644
index 0000000000..3b1409cc0b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+#include "modules/audio_processing/utility/delay_estimator_internal.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Only bit `kBandFirst` through bit `kBandLast` are processed and
+// `kBandFirst` - `kBandLast` must be < 32.
+constexpr int kBandFirst = 12;
+constexpr int kBandLast = 43;
+
+static __inline uint32_t SetBit(uint32_t in, int pos) {
+ uint32_t mask = (1 << pos);
+ uint32_t out = (in | mask);
+
+ return out;
+}
+
+// Calculates the mean recursively. Same version as WebRtc_MeanEstimatorFix(),
+// but for float.
+//
+// Inputs:
+// - new_value : New additional value.
+// - scale : Scale for smoothing (should be less than 1.0).
+//
+// Input/Output:
+// - mean_value : Pointer to the mean value for updating.
+//
+static void MeanEstimatorFloat(float new_value,
+ float scale,
+ float* mean_value) {
+ RTC_DCHECK_LT(scale, 1.0f);
+ *mean_value += (new_value - *mean_value) * scale;
+}
+
+// Computes the binary spectrum by comparing the input `spectrum` with a
+// `threshold_spectrum`. Float and fixed point versions.
+//
+// Inputs:
+// - spectrum : Spectrum of which the binary spectrum should be
+// calculated.
+// - threshold_spectrum : Threshold spectrum with which the input
+// spectrum is compared.
+// Return:
+// - out : Binary spectrum.
+//
+static uint32_t BinarySpectrumFix(const uint16_t* spectrum,
+ SpectrumType* threshold_spectrum,
+ int q_domain,
+ int* threshold_initialized) {
+ int i = kBandFirst;
+ uint32_t out = 0;
+
+ RTC_DCHECK_LT(q_domain, 16);
+
+ if (!(*threshold_initialized)) {
+ // Set the `threshold_spectrum` to half the input `spectrum` as starting
+ // value. This speeds up the convergence.
+ for (i = kBandFirst; i <= kBandLast; i++) {
+ if (spectrum[i] > 0) {
+ // Convert input spectrum from Q(`q_domain`) to Q15.
+ int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain);
+ threshold_spectrum[i].int32_ = (spectrum_q15 >> 1);
+ *threshold_initialized = 1;
+ }
+ }
+ }
+ for (i = kBandFirst; i <= kBandLast; i++) {
+ // Convert input spectrum from Q(`q_domain`) to Q15.
+ int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain);
+ // Update the `threshold_spectrum`.
+ WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_));
+ // Convert `spectrum` at current frequency bin to a binary value.
+ if (spectrum_q15 > threshold_spectrum[i].int32_) {
+ out = SetBit(out, i - kBandFirst);
+ }
+ }
+
+ return out;
+}
+
+static uint32_t BinarySpectrumFloat(const float* spectrum,
+ SpectrumType* threshold_spectrum,
+ int* threshold_initialized) {
+ int i = kBandFirst;
+ uint32_t out = 0;
+ const float kScale = 1 / 64.0;
+
+ if (!(*threshold_initialized)) {
+ // Set the `threshold_spectrum` to half the input `spectrum` as starting
+ // value. This speeds up the convergence.
+ for (i = kBandFirst; i <= kBandLast; i++) {
+ if (spectrum[i] > 0.0f) {
+ threshold_spectrum[i].float_ = (spectrum[i] / 2);
+ *threshold_initialized = 1;
+ }
+ }
+ }
+
+ for (i = kBandFirst; i <= kBandLast; i++) {
+ // Update the `threshold_spectrum`.
+ MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_));
+ // Convert `spectrum` at current frequency bin to a binary value.
+ if (spectrum[i] > threshold_spectrum[i].float_) {
+ out = SetBit(out, i - kBandFirst);
+ }
+ }
+
+ return out;
+}
+
+void WebRtc_FreeDelayEstimatorFarend(void* handle) {
+ DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle;
+
+ if (handle == NULL) {
+ return;
+ }
+
+ free(self->mean_far_spectrum);
+ self->mean_far_spectrum = NULL;
+
+ WebRtc_FreeBinaryDelayEstimatorFarend(self->binary_farend);
+ self->binary_farend = NULL;
+
+ free(self);
+}
+
+void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size) {
+ DelayEstimatorFarend* self = NULL;
+
+ // Check if the sub band used in the delay estimation is small enough to fit
+ // the binary spectra in a uint32_t.
+ static_assert(kBandLast - kBandFirst < 32, "");
+
+ if (spectrum_size >= kBandLast) {
+ self = static_cast<DelayEstimatorFarend*>(
+ malloc(sizeof(DelayEstimatorFarend)));
+ }
+
+ if (self != NULL) {
+ int memory_fail = 0;
+
+ // Allocate memory for the binary far-end spectrum handling.
+ self->binary_farend = WebRtc_CreateBinaryDelayEstimatorFarend(history_size);
+ memory_fail |= (self->binary_farend == NULL);
+
+ // Allocate memory for spectrum buffers.
+ self->mean_far_spectrum = static_cast<SpectrumType*>(
+ malloc(spectrum_size * sizeof(SpectrumType)));
+ memory_fail |= (self->mean_far_spectrum == NULL);
+
+ self->spectrum_size = spectrum_size;
+
+ if (memory_fail) {
+ WebRtc_FreeDelayEstimatorFarend(self);
+ self = NULL;
+ }
+ }
+
+ return self;
+}
+
+int WebRtc_InitDelayEstimatorFarend(void* handle) {
+ DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+
+ // Initialize far-end part of binary delay estimator.
+ WebRtc_InitBinaryDelayEstimatorFarend(self->binary_farend);
+
+ // Set averaged far and near end spectra to zero.
+ memset(self->mean_far_spectrum, 0,
+ sizeof(SpectrumType) * self->spectrum_size);
+ // Reset initialization indicators.
+ self->far_spectrum_initialized = 0;
+
+ return 0;
+}
+
+void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift) {
+ DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle;
+ RTC_DCHECK(self);
+ WebRtc_SoftResetBinaryDelayEstimatorFarend(self->binary_farend, delay_shift);
+}
+
+int WebRtc_AddFarSpectrumFix(void* handle,
+ const uint16_t* far_spectrum,
+ int spectrum_size,
+ int far_q) {
+ DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle;
+ uint32_t binary_spectrum = 0;
+
+ if (self == NULL) {
+ return -1;
+ }
+ if (far_spectrum == NULL) {
+ // Empty far end spectrum.
+ return -1;
+ }
+ if (spectrum_size != self->spectrum_size) {
+ // Data sizes don't match.
+ return -1;
+ }
+ if (far_q > 15) {
+ // If `far_q` is larger than 15 we cannot guarantee no wrap around.
+ return -1;
+ }
+
+ // Get binary spectrum.
+ binary_spectrum = BinarySpectrumFix(far_spectrum, self->mean_far_spectrum,
+ far_q, &(self->far_spectrum_initialized));
+ WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum);
+
+ return 0;
+}
+
+int WebRtc_AddFarSpectrumFloat(void* handle,
+ const float* far_spectrum,
+ int spectrum_size) {
+ DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle;
+ uint32_t binary_spectrum = 0;
+
+ if (self == NULL) {
+ return -1;
+ }
+ if (far_spectrum == NULL) {
+ // Empty far end spectrum.
+ return -1;
+ }
+ if (spectrum_size != self->spectrum_size) {
+ // Data sizes don't match.
+ return -1;
+ }
+
+ // Get binary spectrum.
+ binary_spectrum = BinarySpectrumFloat(far_spectrum, self->mean_far_spectrum,
+ &(self->far_spectrum_initialized));
+ WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum);
+
+ return 0;
+}
+
+void WebRtc_FreeDelayEstimator(void* handle) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+
+ if (handle == NULL) {
+ return;
+ }
+
+ free(self->mean_near_spectrum);
+ self->mean_near_spectrum = NULL;
+
+ WebRtc_FreeBinaryDelayEstimator(self->binary_handle);
+ self->binary_handle = NULL;
+
+ free(self);
+}
+
+void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead) {
+ DelayEstimator* self = NULL;
+ DelayEstimatorFarend* farend = (DelayEstimatorFarend*)farend_handle;
+
+ if (farend_handle != NULL) {
+ self = static_cast<DelayEstimator*>(malloc(sizeof(DelayEstimator)));
+ }
+
+ if (self != NULL) {
+ int memory_fail = 0;
+
+ // Allocate memory for the farend spectrum handling.
+ self->binary_handle =
+ WebRtc_CreateBinaryDelayEstimator(farend->binary_farend, max_lookahead);
+ memory_fail |= (self->binary_handle == NULL);
+
+ // Allocate memory for spectrum buffers.
+ self->mean_near_spectrum = static_cast<SpectrumType*>(
+ malloc(farend->spectrum_size * sizeof(SpectrumType)));
+ memory_fail |= (self->mean_near_spectrum == NULL);
+
+ self->spectrum_size = farend->spectrum_size;
+
+ if (memory_fail) {
+ WebRtc_FreeDelayEstimator(self);
+ self = NULL;
+ }
+ }
+
+ return self;
+}
+
+int WebRtc_InitDelayEstimator(void* handle) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+
+ // Initialize binary delay estimator.
+ WebRtc_InitBinaryDelayEstimator(self->binary_handle);
+
+ // Set averaged far and near end spectra to zero.
+ memset(self->mean_near_spectrum, 0,
+ sizeof(SpectrumType) * self->spectrum_size);
+ // Reset initialization indicators.
+ self->near_spectrum_initialized = 0;
+
+ return 0;
+}
+
+int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+ RTC_DCHECK(self);
+ return WebRtc_SoftResetBinaryDelayEstimator(self->binary_handle, delay_shift);
+}
+
+int WebRtc_set_history_size(void* handle, int history_size) {
+ DelayEstimator* self = static_cast<DelayEstimator*>(handle);
+
+ if ((self == NULL) || (history_size <= 1)) {
+ return -1;
+ }
+ return WebRtc_AllocateHistoryBufferMemory(self->binary_handle, history_size);
+}
+
+int WebRtc_history_size(const void* handle) {
+ const DelayEstimator* self = static_cast<const DelayEstimator*>(handle);
+
+ if (self == NULL) {
+ return -1;
+ }
+ if (self->binary_handle->farend->history_size !=
+ self->binary_handle->history_size) {
+ // Non matching history sizes.
+ return -1;
+ }
+ return self->binary_handle->history_size;
+}
+
+int WebRtc_set_lookahead(void* handle, int lookahead) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+ RTC_DCHECK(self);
+ RTC_DCHECK(self->binary_handle);
+ if ((lookahead > self->binary_handle->near_history_size - 1) ||
+ (lookahead < 0)) {
+ return -1;
+ }
+ self->binary_handle->lookahead = lookahead;
+ return self->binary_handle->lookahead;
+}
+
+int WebRtc_lookahead(void* handle) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+ RTC_DCHECK(self);
+ RTC_DCHECK(self->binary_handle);
+ return self->binary_handle->lookahead;
+}
+
+int WebRtc_set_allowed_offset(void* handle, int allowed_offset) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+
+ if ((self == NULL) || (allowed_offset < 0)) {
+ return -1;
+ }
+ self->binary_handle->allowed_offset = allowed_offset;
+ return 0;
+}
+
+int WebRtc_get_allowed_offset(const void* handle) {
+ const DelayEstimator* self = (const DelayEstimator*)handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+ return self->binary_handle->allowed_offset;
+}
+
+int WebRtc_enable_robust_validation(void* handle, int enable) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+ if ((enable < 0) || (enable > 1)) {
+ return -1;
+ }
+ RTC_DCHECK(self->binary_handle);
+ self->binary_handle->robust_validation_enabled = enable;
+ return 0;
+}
+
+int WebRtc_is_robust_validation_enabled(const void* handle) {
+ const DelayEstimator* self = (const DelayEstimator*)handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+ return self->binary_handle->robust_validation_enabled;
+}
+
+int WebRtc_DelayEstimatorProcessFix(void* handle,
+ const uint16_t* near_spectrum,
+ int spectrum_size,
+ int near_q) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+ uint32_t binary_spectrum = 0;
+
+ if (self == NULL) {
+ return -1;
+ }
+ if (near_spectrum == NULL) {
+ // Empty near end spectrum.
+ return -1;
+ }
+ if (spectrum_size != self->spectrum_size) {
+ // Data sizes don't match.
+ return -1;
+ }
+ if (near_q > 15) {
+ // If `near_q` is larger than 15 we cannot guarantee no wrap around.
+ return -1;
+ }
+
+ // Get binary spectra.
+ binary_spectrum =
+ BinarySpectrumFix(near_spectrum, self->mean_near_spectrum, near_q,
+ &(self->near_spectrum_initialized));
+
+ return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum);
+}
+
+int WebRtc_DelayEstimatorProcessFloat(void* handle,
+ const float* near_spectrum,
+ int spectrum_size) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+ uint32_t binary_spectrum = 0;
+
+ if (self == NULL) {
+ return -1;
+ }
+ if (near_spectrum == NULL) {
+ // Empty near end spectrum.
+ return -1;
+ }
+ if (spectrum_size != self->spectrum_size) {
+ // Data sizes don't match.
+ return -1;
+ }
+
+ // Get binary spectrum.
+ binary_spectrum = BinarySpectrumFloat(near_spectrum, self->mean_near_spectrum,
+ &(self->near_spectrum_initialized));
+
+ return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum);
+}
+
+int WebRtc_last_delay(void* handle) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+
+ return WebRtc_binary_last_delay(self->binary_handle);
+}
+
+float WebRtc_last_delay_quality(void* handle) {
+ DelayEstimator* self = (DelayEstimator*)handle;
+ RTC_DCHECK(self);
+ return WebRtc_binary_last_delay_quality(self->binary_handle);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.h b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.h
new file mode 100644
index 0000000000..a90cbe31cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs delay estimation on block by block basis.
+// The return value is 0 - OK and -1 - Error, unless otherwise stated.
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+// Releases the memory allocated by WebRtc_CreateDelayEstimatorFarend(...)
+void WebRtc_FreeDelayEstimatorFarend(void* handle);
+
+// Allocates the memory needed by the far-end part of the delay estimation. The
+// memory needs to be initialized separately through
+// WebRtc_InitDelayEstimatorFarend(...).
+//
+// Inputs:
+// - spectrum_size : Size of the spectrum used both in far-end and
+// near-end. Used to allocate memory for spectrum
+// specific buffers.
+// - history_size : The far-end history buffer size. A change in buffer
+// size can be forced with WebRtc_set_history_size().
+// Note that the maximum delay which can be estimated is
+// determined together with WebRtc_set_lookahead().
+//
+// Return value:
+// - void* : Created `handle`. If the memory can't be allocated or
+// if any of the input parameters are invalid NULL is
+// returned.
+void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size);
+
+// Initializes the far-end part of the delay estimation instance returned by
+// WebRtc_CreateDelayEstimatorFarend(...)
+int WebRtc_InitDelayEstimatorFarend(void* handle);
+
+// Soft resets the far-end part of the delay estimation instance returned by
+// WebRtc_CreateDelayEstimatorFarend(...).
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift);
+
+// Adds the far-end spectrum to the far-end history buffer. This spectrum is
+// used as reference when calculating the delay using
+// WebRtc_ProcessSpectrum().
+//
+// Inputs:
+// - far_spectrum : Far-end spectrum.
+// - spectrum_size : The size of the data arrays (same for both far- and
+// near-end).
+// - far_q : The Q-domain of the far-end data.
+//
+// Output:
+// - handle : Updated far-end instance.
+//
+int WebRtc_AddFarSpectrumFix(void* handle,
+ const uint16_t* far_spectrum,
+ int spectrum_size,
+ int far_q);
+
+// See WebRtc_AddFarSpectrumFix() for description.
+int WebRtc_AddFarSpectrumFloat(void* handle,
+ const float* far_spectrum,
+ int spectrum_size);
+
+// Releases the memory allocated by WebRtc_CreateDelayEstimator(...)
+void WebRtc_FreeDelayEstimator(void* handle);
+
+// Allocates the memory needed by the delay estimation. The memory needs to be
+// initialized separately through WebRtc_InitDelayEstimator(...).
+//
+// Inputs:
+// - farend_handle : Pointer to the far-end part of the delay estimation
+// instance created prior to this call using
+// WebRtc_CreateDelayEstimatorFarend().
+//
+// Note that WebRtc_CreateDelayEstimator does not take
+// ownership of `farend_handle`, which has to be torn
+// down properly after this instance.
+//
+// - max_lookahead : Maximum amount of non-causal lookahead allowed. The
+// actual amount of lookahead used can be controlled by
+// WebRtc_set_lookahead(...). The default `lookahead` is
+// set to `max_lookahead` at create time. Use
+// WebRtc_set_lookahead(...) before start if a different
+// value is desired.
+//
+// Using lookahead can detect cases in which a near-end
+// signal occurs before the corresponding far-end signal.
+// It will delay the estimate for the current block by an
+// equal amount, and the returned values will be offset
+// by it.
+//
+// A value of zero is the typical no-lookahead case.
+// This also represents the minimum delay which can be
+// estimated.
+//
+// Note that the effective range of delay estimates is
+// [-`lookahead`,... ,`history_size`-`lookahead`)
+// where `history_size` is set through
+// WebRtc_set_history_size().
+//
+// Return value:
+// - void* : Created `handle`. If the memory can't be allocated or
+// if any of the input parameters are invalid NULL is
+// returned.
+void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead);
+
+// Initializes the delay estimation instance returned by
+// WebRtc_CreateDelayEstimator(...)
+int WebRtc_InitDelayEstimator(void* handle);
+
+// Soft resets the delay estimation instance returned by
+// WebRtc_CreateDelayEstimator(...)
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+// Return value:
+// - actual_shifts : The actual number of shifts performed.
+int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift);
+
+// Sets the effective `history_size` used. Valid values from 2. We simply need
+// at least two delays to compare to perform an estimate. If `history_size` is
+// changed, buffers are reallocated filling in with zeros if necessary.
+// Note that changing the `history_size` affects both buffers in far-end and
+// near-end. Hence it is important to change all DelayEstimators that use the
+// same reference far-end, to the same `history_size` value.
+// Inputs:
+// - handle : Pointer to the delay estimation instance.
+// - history_size : Effective history size to be used.
+// Return value:
+// - new_history_size : The new history size used. If the memory was not able
+// to be allocated 0 is returned.
+int WebRtc_set_history_size(void* handle, int history_size);
+
+// Returns the history_size currently used.
+// Input:
+// - handle : Pointer to the delay estimation instance.
+int WebRtc_history_size(const void* handle);
+
+// Sets the amount of `lookahead` to use. Valid values are [0, max_lookahead]
+// where `max_lookahead` was set at create time through
+// WebRtc_CreateDelayEstimator(...).
+//
+// Input:
+// - handle : Pointer to the delay estimation instance.
+// - lookahead : The amount of lookahead to be used.
+//
+// Return value:
+// - new_lookahead : The actual amount of lookahead set, unless `handle` is
+// a NULL pointer or `lookahead` is invalid, for which an
+// error is returned.
+int WebRtc_set_lookahead(void* handle, int lookahead);
+
+// Returns the amount of lookahead we currently use.
+// Input:
+// - handle : Pointer to the delay estimation instance.
+int WebRtc_lookahead(void* handle);
+
+// Sets the `allowed_offset` used in the robust validation scheme. If the
+// delay estimator is used in an echo control component, this parameter is
+// related to the filter length. In principle `allowed_offset` should be set to
+// the echo control filter length minus the expected echo duration, i.e., the
+// delay offset the echo control can handle without quality regression. The
+// default value, used if not set manually, is zero. Note that `allowed_offset`
+// has to be non-negative.
+// Inputs:
+// - handle : Pointer to the delay estimation instance.
+// - allowed_offset : The amount of delay offset, measured in partitions,
+// the echo control filter can handle.
+int WebRtc_set_allowed_offset(void* handle, int allowed_offset);
+
+// Returns the `allowed_offset` in number of partitions.
+int WebRtc_get_allowed_offset(const void* handle);
+
+// Enables/Disables a robust validation functionality in the delay estimation.
+// This is by default set to disabled at create time. The state is preserved
+// over a reset.
+// Inputs:
+// - handle : Pointer to the delay estimation instance.
+// - enable : Enable (1) or disable (0) this feature.
+int WebRtc_enable_robust_validation(void* handle, int enable);
+
+// Returns 1 if robust validation is enabled and 0 if disabled.
+int WebRtc_is_robust_validation_enabled(const void* handle);
+
+// Estimates and returns the delay between the far-end and near-end blocks. The
+// value will be offset by the lookahead (i.e. the lookahead should be
+// subtracted from the returned value).
+// Inputs:
+// - handle : Pointer to the delay estimation instance.
+// - near_spectrum : Pointer to the near-end spectrum data of the current
+// block.
+// - spectrum_size : The size of the data arrays (same for both far- and
+// near-end).
+// - near_q : The Q-domain of the near-end data.
+//
+// Output:
+// - handle : Updated instance.
+//
+// Return value:
+// - delay : >= 0 - Calculated delay value.
+// -1 - Error.
+// -2 - Insufficient data for estimation.
+int WebRtc_DelayEstimatorProcessFix(void* handle,
+ const uint16_t* near_spectrum,
+ int spectrum_size,
+ int near_q);
+
+// See WebRtc_DelayEstimatorProcessFix() for description.
+int WebRtc_DelayEstimatorProcessFloat(void* handle,
+ const float* near_spectrum,
+ int spectrum_size);
+
+// Returns the last calculated delay updated by the function
+// WebRtc_DelayEstimatorProcess(...).
+//
+// Input:
+// - handle : Pointer to the delay estimation instance.
+//
+// Return value:
+// - delay : >= 0 - Last calculated delay value.
+// -1 - Error.
+// -2 - Insufficient data for estimation.
+int WebRtc_last_delay(void* handle);
+
+// Returns the estimation quality/probability of the last calculated delay
+// updated by the function WebRtc_DelayEstimatorProcess(...). The estimation
+// quality is a value in the interval [0, 1]. The higher the value, the better
+// the quality.
+//
+// Return value:
+// - delay_quality : >= 0 - Estimation quality of last calculated delay.
+float WebRtc_last_delay_quality(void* handle);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/legacy_delay_estimator_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/utility/legacy_delay_estimator_gn/moz.build
new file mode 100644
index 0000000000..2df791788a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/legacy_delay_estimator_gn/moz.build
@@ -0,0 +1,202 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator.cc",
+ "/third_party/libwebrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("legacy_delay_estimator_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.cc b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.cc
new file mode 100644
index 0000000000..88642fb12b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/pffft_wrapper.h"
+
+#include "rtc_base/checks.h"
+#include "third_party/pffft/src/pffft.h"
+
+namespace webrtc {
+namespace {
+
+size_t GetBufferSize(size_t fft_size, Pffft::FftType fft_type) {
+ return fft_size * (fft_type == Pffft::FftType::kReal ? 1 : 2);
+}
+
+float* AllocatePffftBuffer(size_t size) {
+ return static_cast<float*>(pffft_aligned_malloc(size * sizeof(float)));
+}
+
+} // namespace
+
+Pffft::FloatBuffer::FloatBuffer(size_t fft_size, FftType fft_type)
+ : size_(GetBufferSize(fft_size, fft_type)),
+ data_(AllocatePffftBuffer(size_)) {}
+
+Pffft::FloatBuffer::~FloatBuffer() {
+ pffft_aligned_free(data_);
+}
+
+rtc::ArrayView<const float> Pffft::FloatBuffer::GetConstView() const {
+ return {data_, size_};
+}
+
+rtc::ArrayView<float> Pffft::FloatBuffer::GetView() {
+ return {data_, size_};
+}
+
+Pffft::Pffft(size_t fft_size, FftType fft_type)
+ : fft_size_(fft_size),
+ fft_type_(fft_type),
+ pffft_status_(pffft_new_setup(
+ fft_size_,
+ fft_type == Pffft::FftType::kReal ? PFFFT_REAL : PFFFT_COMPLEX)),
+ scratch_buffer_(
+ AllocatePffftBuffer(GetBufferSize(fft_size_, fft_type_))) {
+ RTC_DCHECK(pffft_status_);
+ RTC_DCHECK(scratch_buffer_);
+}
+
+Pffft::~Pffft() {
+ pffft_destroy_setup(pffft_status_);
+ pffft_aligned_free(scratch_buffer_);
+}
+
+bool Pffft::IsValidFftSize(size_t fft_size, FftType fft_type) {
+ if (fft_size == 0) {
+ return false;
+ }
+ // PFFFT only supports transforms for inputs of length N of the form
+ // N = (2^a)*(3^b)*(5^c) where b >=0 and c >= 0 and a >= 5 for the real FFT
+ // and a >= 4 for the complex FFT.
+ constexpr int kFactors[] = {2, 3, 5};
+ int factorization[] = {0, 0, 0};
+ int n = static_cast<int>(fft_size);
+ for (int i = 0; i < 3; ++i) {
+ while (n % kFactors[i] == 0) {
+ n = n / kFactors[i];
+ factorization[i]++;
+ }
+ }
+ int a_min = (fft_type == Pffft::FftType::kReal) ? 5 : 4;
+ return factorization[0] >= a_min && n == 1;
+}
+
+bool Pffft::IsSimdEnabled() {
+ return pffft_simd_size() > 1;
+}
+
+std::unique_ptr<Pffft::FloatBuffer> Pffft::CreateBuffer() const {
+ // Cannot use make_unique from absl because Pffft is the only friend of
+ // Pffft::FloatBuffer.
+ std::unique_ptr<Pffft::FloatBuffer> buffer(
+ new Pffft::FloatBuffer(fft_size_, fft_type_));
+ return buffer;
+}
+
+void Pffft::ForwardTransform(const FloatBuffer& in,
+ FloatBuffer* out,
+ bool ordered) {
+ RTC_DCHECK_EQ(in.size(), GetBufferSize(fft_size_, fft_type_));
+ RTC_DCHECK_EQ(in.size(), out->size());
+ RTC_DCHECK(scratch_buffer_);
+ if (ordered) {
+ pffft_transform_ordered(pffft_status_, in.const_data(), out->data(),
+ scratch_buffer_, PFFFT_FORWARD);
+ } else {
+ pffft_transform(pffft_status_, in.const_data(), out->data(),
+ scratch_buffer_, PFFFT_FORWARD);
+ }
+}
+
+void Pffft::BackwardTransform(const FloatBuffer& in,
+ FloatBuffer* out,
+ bool ordered) {
+ RTC_DCHECK_EQ(in.size(), GetBufferSize(fft_size_, fft_type_));
+ RTC_DCHECK_EQ(in.size(), out->size());
+ RTC_DCHECK(scratch_buffer_);
+ if (ordered) {
+ pffft_transform_ordered(pffft_status_, in.const_data(), out->data(),
+ scratch_buffer_, PFFFT_BACKWARD);
+ } else {
+ pffft_transform(pffft_status_, in.const_data(), out->data(),
+ scratch_buffer_, PFFFT_BACKWARD);
+ }
+}
+
+void Pffft::FrequencyDomainConvolve(const FloatBuffer& fft_x,
+ const FloatBuffer& fft_y,
+ FloatBuffer* out,
+ float scaling) {
+ RTC_DCHECK_EQ(fft_x.size(), GetBufferSize(fft_size_, fft_type_));
+ RTC_DCHECK_EQ(fft_x.size(), fft_y.size());
+ RTC_DCHECK_EQ(fft_x.size(), out->size());
+ pffft_zconvolve_accumulate(pffft_status_, fft_x.const_data(),
+ fft_y.const_data(), out->data(), scaling);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.h b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.h
new file mode 100644
index 0000000000..983c2fd1bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_PFFFT_WRAPPER_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_PFFFT_WRAPPER_H_
+
+#include <memory>
+
+#include "api/array_view.h"
+
+// Forward declaration.
+struct PFFFT_Setup;
+
+namespace webrtc {
+
+// Pretty-Fast Fast Fourier Transform (PFFFT) wrapper class.
+// Not thread safe.
+class Pffft {
+ public:
+ enum class FftType { kReal, kComplex };
+
+ // 1D floating point buffer used as input/output data type for the FFT ops.
+ // It must be constructed using Pffft::CreateBuffer().
+ class FloatBuffer {
+ public:
+ FloatBuffer(const FloatBuffer&) = delete;
+ FloatBuffer& operator=(const FloatBuffer&) = delete;
+ ~FloatBuffer();
+
+ rtc::ArrayView<const float> GetConstView() const;
+ rtc::ArrayView<float> GetView();
+
+ private:
+ friend class Pffft;
+ FloatBuffer(size_t fft_size, FftType fft_type);
+ const float* const_data() const { return data_; }
+ float* data() { return data_; }
+ size_t size() const { return size_; }
+
+ const size_t size_;
+ float* const data_;
+ };
+
+ // TODO(https://crbug.com/webrtc/9577): Consider adding a factory and making
+ // the ctor private.
+ // static std::unique_ptr<Pffft> Create(size_t fft_size,
+ // FftType fft_type); Ctor. `fft_size` must be a supported size (see
+ // Pffft::IsValidFftSize()). If not supported, the code will crash.
+ Pffft(size_t fft_size, FftType fft_type);
+ Pffft(const Pffft&) = delete;
+ Pffft& operator=(const Pffft&) = delete;
+ ~Pffft();
+
+ // Returns true if the FFT size is supported.
+ static bool IsValidFftSize(size_t fft_size, FftType fft_type);
+
+ // Returns true if SIMD code optimizations are being used.
+ static bool IsSimdEnabled();
+
+ // Creates a buffer of the right size.
+ std::unique_ptr<FloatBuffer> CreateBuffer() const;
+
+ // TODO(https://crbug.com/webrtc/9577): Overload with rtc::ArrayView args.
+ // Computes the forward fast Fourier transform.
+ void ForwardTransform(const FloatBuffer& in, FloatBuffer* out, bool ordered);
+ // Computes the backward fast Fourier transform.
+ void BackwardTransform(const FloatBuffer& in, FloatBuffer* out, bool ordered);
+
+ // Multiplies the frequency components of `fft_x` and `fft_y` and accumulates
+ // them into `out`. The arrays must have been obtained with
+ // ForwardTransform(..., /*ordered=*/false) - i.e., `fft_x` and `fft_y` must
+ // not be ordered.
+ void FrequencyDomainConvolve(const FloatBuffer& fft_x,
+ const FloatBuffer& fft_y,
+ FloatBuffer* out,
+ float scaling = 1.f);
+
+ private:
+ const size_t fft_size_;
+ const FftType fft_type_;
+ PFFFT_Setup* pffft_status_;
+ float* const scratch_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_UTILITY_PFFFT_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_gn/moz.build
new file mode 100644
index 0000000000..9ecfbdf7f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("pffft_wrapper_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_unittest.cc b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_unittest.cc
new file mode 100644
index 0000000000..2ad6849cd4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/utility/pffft_wrapper_unittest.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/pffft_wrapper.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <memory>
+
+#include "test/gtest.h"
+#include "third_party/pffft/src/pffft.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr size_t kMaxValidSizeCheck = 1024;
+
+static constexpr int kFftSizes[] = {
+ 16, 32, 64, 96, 128, 160, 192, 256, 288, 384, 5 * 96, 512,
+ 576, 5 * 128, 800, 864, 1024, 2048, 2592, 4000, 4096, 12000, 36864};
+
+void CreatePffftWrapper(size_t fft_size, Pffft::FftType fft_type) {
+ Pffft pffft_wrapper(fft_size, fft_type);
+}
+
+float* AllocateScratchBuffer(size_t fft_size, bool complex_fft) {
+ return static_cast<float*>(
+ pffft_aligned_malloc(fft_size * (complex_fft ? 2 : 1) * sizeof(float)));
+}
+
+double frand() {
+ return std::rand() / static_cast<double>(RAND_MAX);
+}
+
+void ExpectArrayViewsEquality(rtc::ArrayView<const float> a,
+ rtc::ArrayView<const float> b) {
+ ASSERT_EQ(a.size(), b.size());
+ for (size_t i = 0; i < a.size(); ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(a[i], b[i]);
+ }
+}
+
+// Compares the output of the PFFFT C++ wrapper to that of the C PFFFT.
+// Bit-exactness is expected.
+void PffftValidateWrapper(size_t fft_size, bool complex_fft) {
+ // Always use the same seed to avoid flakiness.
+ std::srand(0);
+
+ // Init PFFFT.
+ PFFFT_Setup* pffft_status =
+ pffft_new_setup(fft_size, complex_fft ? PFFFT_COMPLEX : PFFFT_REAL);
+ ASSERT_TRUE(pffft_status) << "FFT size (" << fft_size << ") not supported.";
+ size_t num_floats = fft_size * (complex_fft ? 2 : 1);
+ int num_bytes = static_cast<int>(num_floats) * sizeof(float);
+ float* in = static_cast<float*>(pffft_aligned_malloc(num_bytes));
+ float* out = static_cast<float*>(pffft_aligned_malloc(num_bytes));
+ float* scratch = AllocateScratchBuffer(fft_size, complex_fft);
+
+ // Init PFFFT C++ wrapper.
+ Pffft::FftType fft_type =
+ complex_fft ? Pffft::FftType::kComplex : Pffft::FftType::kReal;
+ ASSERT_TRUE(Pffft::IsValidFftSize(fft_size, fft_type));
+ Pffft pffft_wrapper(fft_size, fft_type);
+ auto in_wrapper = pffft_wrapper.CreateBuffer();
+ auto out_wrapper = pffft_wrapper.CreateBuffer();
+
+ // Input and output buffers views.
+ rtc::ArrayView<float> in_view(in, num_floats);
+ rtc::ArrayView<float> out_view(out, num_floats);
+ auto in_wrapper_view = in_wrapper->GetView();
+ EXPECT_EQ(in_wrapper_view.size(), num_floats);
+ auto out_wrapper_view = out_wrapper->GetConstView();
+ EXPECT_EQ(out_wrapper_view.size(), num_floats);
+
+ // Random input data.
+ for (size_t i = 0; i < num_floats; ++i) {
+ in_wrapper_view[i] = in[i] = static_cast<float>(frand() * 2.0 - 1.0);
+ }
+
+ // Forward transform.
+ pffft_transform(pffft_status, in, out, scratch, PFFFT_FORWARD);
+ pffft_wrapper.ForwardTransform(*in_wrapper, out_wrapper.get(),
+ /*ordered=*/false);
+ ExpectArrayViewsEquality(out_view, out_wrapper_view);
+
+ // Copy the FFT results into the input buffers to compute the backward FFT.
+ std::copy(out_view.begin(), out_view.end(), in_view.begin());
+ std::copy(out_wrapper_view.begin(), out_wrapper_view.end(),
+ in_wrapper_view.begin());
+
+ // Backward transform.
+ pffft_transform(pffft_status, in, out, scratch, PFFFT_BACKWARD);
+ pffft_wrapper.BackwardTransform(*in_wrapper, out_wrapper.get(),
+ /*ordered=*/false);
+ ExpectArrayViewsEquality(out_view, out_wrapper_view);
+
+ pffft_destroy_setup(pffft_status);
+ pffft_aligned_free(in);
+ pffft_aligned_free(out);
+ pffft_aligned_free(scratch);
+}
+
+} // namespace
+
+TEST(PffftTest, CreateWrapperWithValidSize) {
+ for (size_t fft_size = 0; fft_size < kMaxValidSizeCheck; ++fft_size) {
+ SCOPED_TRACE(fft_size);
+ if (Pffft::IsValidFftSize(fft_size, Pffft::FftType::kReal)) {
+ CreatePffftWrapper(fft_size, Pffft::FftType::kReal);
+ }
+ if (Pffft::IsValidFftSize(fft_size, Pffft::FftType::kComplex)) {
+ CreatePffftWrapper(fft_size, Pffft::FftType::kComplex);
+ }
+ }
+}
+
+#if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+class PffftInvalidSizeDeathTest : public ::testing::Test,
+ public ::testing::WithParamInterface<size_t> {
+};
+
+TEST_P(PffftInvalidSizeDeathTest, DoNotCreateRealWrapper) {
+ size_t fft_size = GetParam();
+ ASSERT_FALSE(Pffft::IsValidFftSize(fft_size, Pffft::FftType::kReal));
+ EXPECT_DEATH(CreatePffftWrapper(fft_size, Pffft::FftType::kReal), "");
+}
+
+TEST_P(PffftInvalidSizeDeathTest, DoNotCreateComplexWrapper) {
+ size_t fft_size = GetParam();
+ ASSERT_FALSE(Pffft::IsValidFftSize(fft_size, Pffft::FftType::kComplex));
+ EXPECT_DEATH(CreatePffftWrapper(fft_size, Pffft::FftType::kComplex), "");
+}
+
+INSTANTIATE_TEST_SUITE_P(PffftTest,
+ PffftInvalidSizeDeathTest,
+ ::testing::Values(17,
+ 33,
+ 65,
+ 97,
+ 129,
+ 161,
+ 193,
+ 257,
+ 289,
+ 385,
+ 481,
+ 513,
+ 577,
+ 641,
+ 801,
+ 865,
+ 1025));
+
+#endif
+
+// TODO(https://crbug.com/webrtc/9577): Enable once SIMD is always enabled.
+TEST(PffftTest, DISABLED_CheckSimd) {
+ EXPECT_TRUE(Pffft::IsSimdEnabled());
+}
+
+TEST(PffftTest, FftBitExactness) {
+ for (int fft_size : kFftSizes) {
+ SCOPED_TRACE(fft_size);
+ if (fft_size != 16) {
+ PffftValidateWrapper(fft_size, /*complex_fft=*/false);
+ }
+ PffftValidateWrapper(fft_size, /*complex_fft=*/true);
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/BUILD.gn b/third_party/libwebrtc/modules/audio_processing/vad/BUILD.gn
new file mode 100644
index 0000000000..71e079d3a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/BUILD.gn
@@ -0,0 +1,69 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+rtc_library("vad") {
+ visibility = [
+ "../*",
+ "../../../rtc_tools:*",
+ ]
+ sources = [
+ "common.h",
+ "gmm.cc",
+ "gmm.h",
+ "noise_gmm_tables.h",
+ "pitch_based_vad.cc",
+ "pitch_based_vad.h",
+ "pitch_internal.cc",
+ "pitch_internal.h",
+ "pole_zero_filter.cc",
+ "pole_zero_filter.h",
+ "standalone_vad.cc",
+ "standalone_vad.h",
+ "vad_audio_proc.cc",
+ "vad_audio_proc.h",
+ "vad_audio_proc_internal.h",
+ "vad_circular_buffer.cc",
+ "vad_circular_buffer.h",
+ "voice_activity_detector.cc",
+ "voice_activity_detector.h",
+ "voice_gmm_tables.h",
+ ]
+ deps = [
+ "../../../audio/utility:audio_frame_operations",
+ "../../../common_audio",
+ "../../../common_audio:common_audio_c",
+ "../../../common_audio/third_party/ooura:fft_size_256",
+ "../../../rtc_base:checks",
+ "../../audio_coding:isac_vad",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("vad_unittests") {
+ testonly = true
+ sources = [
+ "gmm_unittest.cc",
+ "pitch_based_vad_unittest.cc",
+ "pitch_internal_unittest.cc",
+ "pole_zero_filter_unittest.cc",
+ "standalone_vad_unittest.cc",
+ "vad_audio_proc_unittest.cc",
+ "vad_circular_buffer_unittest.cc",
+ "voice_activity_detector_unittest.cc",
+ ]
+ deps = [
+ ":vad",
+ "../../../common_audio",
+ "../../../test:fileutils",
+ "../../../test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/common.h b/third_party/libwebrtc/modules/audio_processing/vad/common.h
new file mode 100644
index 0000000000..b5a5fb385b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/common.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
+
+#include <stddef.h>
+
+static const int kSampleRateHz = 16000;
+static const size_t kLength10Ms = kSampleRateHz / 100;
+static const size_t kMaxNumFrames = 4;
+
+struct AudioFeatures {
+ double log_pitch_gain[kMaxNumFrames];
+ double pitch_lag_hz[kMaxNumFrames];
+ double spectral_peak[kMaxNumFrames];
+ double rms[kMaxNumFrames];
+ size_t num_frames;
+ bool silence;
+};
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/gmm.cc b/third_party/libwebrtc/modules/audio_processing/vad/gmm.cc
new file mode 100644
index 0000000000..3b8764c4d0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/gmm.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/gmm.h"
+
+#include <math.h>
+
+namespace webrtc {
+
+static const int kMaxDimension = 10;
+
+static void RemoveMean(const double* in,
+ const double* mean_vec,
+ int dimension,
+ double* out) {
+ for (int n = 0; n < dimension; ++n)
+ out[n] = in[n] - mean_vec[n];
+}
+
+static double ComputeExponent(const double* in,
+ const double* covar_inv,
+ int dimension) {
+ double q = 0;
+ for (int i = 0; i < dimension; ++i) {
+ double v = 0;
+ for (int j = 0; j < dimension; j++)
+ v += (*covar_inv++) * in[j];
+ q += v * in[i];
+ }
+ q *= -0.5;
+ return q;
+}
+
+double EvaluateGmm(const double* x, const GmmParameters& gmm_parameters) {
+ if (gmm_parameters.dimension > kMaxDimension) {
+ return -1; // This is invalid pdf so the caller can check this.
+ }
+ double f = 0;
+ double v[kMaxDimension];
+ const double* mean_vec = gmm_parameters.mean;
+ const double* covar_inv = gmm_parameters.covar_inverse;
+
+ for (int n = 0; n < gmm_parameters.num_mixtures; n++) {
+ RemoveMean(x, mean_vec, gmm_parameters.dimension, v);
+ double q = ComputeExponent(v, covar_inv, gmm_parameters.dimension) +
+ gmm_parameters.weight[n];
+ f += exp(q);
+ mean_vec += gmm_parameters.dimension;
+ covar_inv += gmm_parameters.dimension * gmm_parameters.dimension;
+ }
+ return f;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/gmm.h b/third_party/libwebrtc/modules/audio_processing/vad/gmm.h
new file mode 100644
index 0000000000..d9d68ecfdc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/gmm.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_GMM_H_
+#define MODULES_AUDIO_PROCESSING_VAD_GMM_H_
+
+namespace webrtc {
+
+// A structure that specifies a GMM.
+// A GMM is formulated as
+// f(x) = w[0] * mixture[0] + w[1] * mixture[1] + ... +
+// w[num_mixtures - 1] * mixture[num_mixtures - 1];
+// Where a 'mixture' is a Gaussian density.
+
+struct GmmParameters {
+ // weight[n] = log(w[n]) - `dimension`/2 * log(2*pi) - 1/2 * log(det(cov[n]));
+ // where cov[n] is the covariance matrix of mixture n;
+ const double* weight;
+ // pointer to the first element of a `num_mixtures`x`dimension` matrix
+ // where kth row is the mean of the kth mixture.
+ const double* mean;
+ // pointer to the first element of a `num_mixtures`x`dimension`x`dimension`
+ // 3D-matrix, where the kth 2D-matrix is the inverse of the covariance
+ // matrix of the kth mixture.
+ const double* covar_inverse;
+ // Dimensionality of the mixtures.
+ int dimension;
+ // number of the mixtures.
+ int num_mixtures;
+};
+
+// Evaluate the given GMM, according to `gmm_parameters`, at the given point
+// `x`. If the dimensionality of the given GMM is larger that the maximum
+// acceptable dimension by the following function -1 is returned.
+double EvaluateGmm(const double* x, const GmmParameters& gmm_parameters);
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_PROCESSING_VAD_GMM_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/gmm_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/gmm_unittest.cc
new file mode 100644
index 0000000000..d895afab7b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/gmm_unittest.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/gmm.h"
+
+#include <math.h>
+
+#include "modules/audio_processing/vad/noise_gmm_tables.h"
+#include "modules/audio_processing/vad/voice_gmm_tables.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(GmmTest, EvaluateGmm) {
+ GmmParameters noise_gmm;
+ GmmParameters voice_gmm;
+
+ // Setup noise GMM.
+ noise_gmm.dimension = kNoiseGmmDim;
+ noise_gmm.num_mixtures = kNoiseGmmNumMixtures;
+ noise_gmm.weight = kNoiseGmmWeights;
+ noise_gmm.mean = &kNoiseGmmMean[0][0];
+ noise_gmm.covar_inverse = &kNoiseGmmCovarInverse[0][0][0];
+
+ // Setup voice GMM.
+ voice_gmm.dimension = kVoiceGmmDim;
+ voice_gmm.num_mixtures = kVoiceGmmNumMixtures;
+ voice_gmm.weight = kVoiceGmmWeights;
+ voice_gmm.mean = &kVoiceGmmMean[0][0];
+ voice_gmm.covar_inverse = &kVoiceGmmCovarInverse[0][0][0];
+
+ // Test vectors. These are the mean of the GMM means.
+ const double kXVoice[kVoiceGmmDim] = {-1.35893162459863, 602.862491970368,
+ 178.022069191324};
+ const double kXNoise[kNoiseGmmDim] = {-2.33443722724409, 2827.97828765184,
+ 141.114178166812};
+
+ // Expected pdf values. These values are computed in MATLAB using EvalGmm.m
+ const double kPdfNoise = 1.88904409403101e-07;
+ const double kPdfVoice = 1.30453996982266e-06;
+
+ // Relative error should be smaller that the following value.
+ const double kAcceptedRelativeErr = 1e-10;
+
+ // Test Voice.
+ double pdf = EvaluateGmm(kXVoice, voice_gmm);
+ EXPECT_GT(pdf, 0);
+ double relative_error = fabs(pdf - kPdfVoice) / kPdfVoice;
+ EXPECT_LE(relative_error, kAcceptedRelativeErr);
+
+ // Test Noise.
+ pdf = EvaluateGmm(kXNoise, noise_gmm);
+ EXPECT_GT(pdf, 0);
+ relative_error = fabs(pdf - kPdfNoise) / kPdfNoise;
+ EXPECT_LE(relative_error, kAcceptedRelativeErr);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/noise_gmm_tables.h b/third_party/libwebrtc/modules/audio_processing/vad/noise_gmm_tables.h
new file mode 100644
index 0000000000..944a5401cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/noise_gmm_tables.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// GMM tables for inactive segments. Generated by MakeGmmTables.m.
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_NOISE_GMM_TABLES_H_
+#define MODULES_AUDIO_PROCESSING_VAD_NOISE_GMM_TABLES_H_
+
+namespace webrtc {
+
+static const int kNoiseGmmNumMixtures = 12;
+static const int kNoiseGmmDim = 3;
+
+static const double
+ kNoiseGmmCovarInverse[kNoiseGmmNumMixtures][kNoiseGmmDim][kNoiseGmmDim] = {
+ {{7.36219567592941e+00, 4.83060785179861e-03, 1.23335151497610e-02},
+ {4.83060785179861e-03, 1.65289507047817e-04, -2.41490588169997e-04},
+ {1.23335151497610e-02, -2.41490588169997e-04, 6.59472060689382e-03}},
+ {{8.70265239309140e+00, -5.30636201431086e-04, 5.44014966585347e-03},
+ {-5.30636201431086e-04, 3.11095453521008e-04, -1.86287206836035e-04},
+ {5.44014966585347e-03, -1.86287206836035e-04, 6.29493388790744e-04}},
+ {{4.53467851955055e+00, -3.92977536695197e-03, -2.46521420693317e-03},
+ {-3.92977536695197e-03, 4.94650752632750e-05, -1.08587438501826e-05},
+ {-2.46521420693317e-03, -1.08587438501826e-05, 9.28793975422261e-05}},
+ {{9.26817997114275e-01, -4.03976069276753e-04, -3.56441427392165e-03},
+ {-4.03976069276753e-04, 2.51976251631430e-06, 1.46914206734572e-07},
+ {-3.56441427392165e-03, 1.46914206734572e-07, 8.19914567685373e-05}},
+ {{7.61715986787441e+00, -1.54889041216888e-04, 2.41756280071656e-02},
+ {-1.54889041216888e-04, 3.50282550461672e-07, -6.27251196972490e-06},
+ {2.41756280071656e-02, -6.27251196972490e-06, 1.45061847649872e-02}},
+ {{8.31193642663158e+00, -3.84070508164323e-04, -3.09750630821876e-02},
+ {-3.84070508164323e-04, 3.80433432277336e-07, -1.14321142836636e-06},
+ {-3.09750630821876e-02, -1.14321142836636e-06, 8.35091486289997e-04}},
+ {{9.67283151270894e-01, 5.82465812445039e-05, -3.18350798617053e-03},
+ {5.82465812445039e-05, 2.23762672000318e-07, -7.74196587408623e-07},
+ {-3.18350798617053e-03, -7.74196587408623e-07, 3.85120938338325e-04}},
+ {{8.28066236985388e+00, 5.87634508319763e-05, 6.99303090891743e-03},
+ {5.87634508319763e-05, 2.93746018618058e-07, 3.40843332882272e-07},
+ {6.99303090891743e-03, 3.40843332882272e-07, 1.99379171190344e-04}},
+ {{6.07488998675646e+00, -1.11494526618473e-02, 5.10013111123381e-03},
+ {-1.11494526618473e-02, 6.99238879921751e-04, 5.36718550370870e-05},
+ {5.10013111123381e-03, 5.36718550370870e-05, 5.26909853276753e-04}},
+ {{6.90492021419175e+00, 4.20639355257863e-04, -2.38612752336481e-03},
+ {4.20639355257863e-04, 3.31246767338153e-06, -2.42052288150859e-08},
+ {-2.38612752336481e-03, -2.42052288150859e-08, 4.46608368363412e-04}},
+ {{1.31069150869715e+01, -1.73718583865670e-04, -1.97591814508578e-02},
+ {-1.73718583865670e-04, 2.80451716300124e-07, 9.96570755379865e-07},
+ {-1.97591814508578e-02, 9.96570755379865e-07, 2.41361900868847e-03}},
+ {{4.69566344239814e+00, -2.61077567563690e-04, 5.26359000761433e-03},
+ {-2.61077567563690e-04, 1.82420859823767e-06, -7.83645887541601e-07},
+ {5.26359000761433e-03, -7.83645887541601e-07, 1.33586288288802e-02}}};
+
+static const double kNoiseGmmMean[kNoiseGmmNumMixtures][kNoiseGmmDim] = {
+ {-2.01386094766163e+00, 1.69702162045397e+02, 7.41715804872181e+01},
+ {-1.94684591777290e+00, 1.42398396732668e+02, 1.64186321157831e+02},
+ {-2.29319297562437e+00, 3.86415425589868e+02, 2.13452215267125e+02},
+ {-3.25487177070268e+00, 1.08668712553616e+03, 2.33119949467419e+02},
+ {-2.13159632447467e+00, 4.83821702557717e+03, 6.86786166673740e+01},
+ {-2.26171410780526e+00, 4.79420193982422e+03, 1.53222513286450e+02},
+ {-3.32166740703185e+00, 4.35161135834358e+03, 1.33206448431316e+02},
+ {-2.19290322814343e+00, 3.98325506609408e+03, 2.13249167359934e+02},
+ {-2.02898459255404e+00, 7.37039893155007e+03, 1.12518527491926e+02},
+ {-2.26150236399500e+00, 1.54896745196145e+03, 1.49717357868579e+02},
+ {-2.00417668301790e+00, 3.82434760310304e+03, 1.07438913004312e+02},
+ {-2.30193040814533e+00, 1.43953696546439e+03, 7.04085275122649e+01}};
+
+static const double kNoiseGmmWeights[kNoiseGmmNumMixtures] = {
+ -1.09422832086193e+01, -1.10847897513425e+01, -1.36767587732187e+01,
+ -1.79789356118641e+01, -1.42830169160894e+01, -1.56500228061379e+01,
+ -1.83124990950113e+01, -1.69979436177477e+01, -1.12329424387828e+01,
+ -1.41311785780639e+01, -1.47171861448585e+01, -1.35963362781839e+01};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_NOISE_GMM_TABLES_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.cc b/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.cc
new file mode 100644
index 0000000000..68e60dc66a
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_based_vad.h"
+
+#include <string.h>
+
+#include "modules/audio_processing/vad/common.h"
+#include "modules/audio_processing/vad/noise_gmm_tables.h"
+#include "modules/audio_processing/vad/vad_circular_buffer.h"
+#include "modules/audio_processing/vad/voice_gmm_tables.h"
+
+namespace webrtc {
+
+static_assert(kNoiseGmmDim == kVoiceGmmDim,
+ "noise and voice gmm dimension not equal");
+
+// These values should match MATLAB counterparts for unit-tests to pass.
+static const int kPosteriorHistorySize = 500; // 5 sec of 10 ms frames.
+static const double kInitialPriorProbability = 0.3;
+static const int kTransientWidthThreshold = 7;
+static const double kLowProbabilityThreshold = 0.2;
+
+static double LimitProbability(double p) {
+ const double kLimHigh = 0.99;
+ const double kLimLow = 0.01;
+
+ if (p > kLimHigh)
+ p = kLimHigh;
+ else if (p < kLimLow)
+ p = kLimLow;
+ return p;
+}
+
+PitchBasedVad::PitchBasedVad()
+ : p_prior_(kInitialPriorProbability),
+ circular_buffer_(VadCircularBuffer::Create(kPosteriorHistorySize)) {
+ // Setup noise GMM.
+ noise_gmm_.dimension = kNoiseGmmDim;
+ noise_gmm_.num_mixtures = kNoiseGmmNumMixtures;
+ noise_gmm_.weight = kNoiseGmmWeights;
+ noise_gmm_.mean = &kNoiseGmmMean[0][0];
+ noise_gmm_.covar_inverse = &kNoiseGmmCovarInverse[0][0][0];
+
+ // Setup voice GMM.
+ voice_gmm_.dimension = kVoiceGmmDim;
+ voice_gmm_.num_mixtures = kVoiceGmmNumMixtures;
+ voice_gmm_.weight = kVoiceGmmWeights;
+ voice_gmm_.mean = &kVoiceGmmMean[0][0];
+ voice_gmm_.covar_inverse = &kVoiceGmmCovarInverse[0][0][0];
+}
+
+PitchBasedVad::~PitchBasedVad() {}
+
+int PitchBasedVad::VoicingProbability(const AudioFeatures& features,
+ double* p_combined) {
+ double p;
+ double gmm_features[3];
+ double pdf_features_given_voice;
+ double pdf_features_given_noise;
+ // These limits are the same in matlab implementation 'VoicingProbGMM().'
+ const double kLimLowLogPitchGain = -2.0;
+ const double kLimHighLogPitchGain = -0.9;
+ const double kLimLowSpectralPeak = 200;
+ const double kLimHighSpectralPeak = 2000;
+ const double kEps = 1e-12;
+ for (size_t n = 0; n < features.num_frames; n++) {
+ gmm_features[0] = features.log_pitch_gain[n];
+ gmm_features[1] = features.spectral_peak[n];
+ gmm_features[2] = features.pitch_lag_hz[n];
+
+ pdf_features_given_voice = EvaluateGmm(gmm_features, voice_gmm_);
+ pdf_features_given_noise = EvaluateGmm(gmm_features, noise_gmm_);
+
+ if (features.spectral_peak[n] < kLimLowSpectralPeak ||
+ features.spectral_peak[n] > kLimHighSpectralPeak ||
+ features.log_pitch_gain[n] < kLimLowLogPitchGain) {
+ pdf_features_given_voice = kEps * pdf_features_given_noise;
+ } else if (features.log_pitch_gain[n] > kLimHighLogPitchGain) {
+ pdf_features_given_noise = kEps * pdf_features_given_voice;
+ }
+
+ p = p_prior_ * pdf_features_given_voice /
+ (pdf_features_given_voice * p_prior_ +
+ pdf_features_given_noise * (1 - p_prior_));
+
+ p = LimitProbability(p);
+
+ // Combine pitch-based probability with standalone probability, before
+ // updating prior probabilities.
+ double prod_active = p * p_combined[n];
+ double prod_inactive = (1 - p) * (1 - p_combined[n]);
+ p_combined[n] = prod_active / (prod_active + prod_inactive);
+
+ if (UpdatePrior(p_combined[n]) < 0)
+ return -1;
+ // Limit prior probability. With a zero prior probability the posterior
+ // probability is always zero.
+ p_prior_ = LimitProbability(p_prior_);
+ }
+ return 0;
+}
+
+int PitchBasedVad::UpdatePrior(double p) {
+ circular_buffer_->Insert(p);
+ if (circular_buffer_->RemoveTransient(kTransientWidthThreshold,
+ kLowProbabilityThreshold) < 0)
+ return -1;
+ p_prior_ = circular_buffer_->Mean();
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.h b/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.h
new file mode 100644
index 0000000000..fa3abc2d28
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_PITCH_BASED_VAD_H_
+#define MODULES_AUDIO_PROCESSING_VAD_PITCH_BASED_VAD_H_
+
+#include <memory>
+
+#include "modules/audio_processing/vad/common.h"
+#include "modules/audio_processing/vad/gmm.h"
+
+namespace webrtc {
+
+class VadCircularBuffer;
+
+// Computes the probability of the input audio frame to be active given
+// the corresponding pitch-gain and lag of the frame.
+class PitchBasedVad {
+ public:
+ PitchBasedVad();
+ ~PitchBasedVad();
+
+ // Compute pitch-based voicing probability, given the features.
+ // features: a structure containing features required for computing voicing
+ // probabilities.
+ //
+ // p_combined: an array which contains the combined activity probabilities
+ // computed prior to the call of this function. The method,
+ // then, computes the voicing probabilities and combine them
+ // with the given values. The result are returned in `p`.
+ int VoicingProbability(const AudioFeatures& features, double* p_combined);
+
+ private:
+ int UpdatePrior(double p);
+
+ // TODO(turajs): maybe defining this at a higher level (maybe enum) so that
+ // all the code recognize it as "no-error."
+ static const int kNoError = 0;
+
+ GmmParameters noise_gmm_;
+ GmmParameters voice_gmm_;
+
+ double p_prior_;
+
+ std::unique_ptr<VadCircularBuffer> circular_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_PITCH_BASED_VAD_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad_unittest.cc
new file mode 100644
index 0000000000..4a8331a769
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad_unittest.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_based_vad.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <string>
+
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(PitchBasedVadTest, VoicingProbabilityTest) {
+ std::string spectral_peak_file_name =
+ test::ResourcePath("audio_processing/agc/agc_spectral_peak", "dat");
+ FILE* spectral_peak_file = fopen(spectral_peak_file_name.c_str(), "rb");
+ ASSERT_TRUE(spectral_peak_file != NULL);
+
+ std::string pitch_gain_file_name =
+ test::ResourcePath("audio_processing/agc/agc_pitch_gain", "dat");
+ FILE* pitch_gain_file = fopen(pitch_gain_file_name.c_str(), "rb");
+ ASSERT_TRUE(pitch_gain_file != NULL);
+
+ std::string pitch_lag_file_name =
+ test::ResourcePath("audio_processing/agc/agc_pitch_lag", "dat");
+ FILE* pitch_lag_file = fopen(pitch_lag_file_name.c_str(), "rb");
+ ASSERT_TRUE(pitch_lag_file != NULL);
+
+ std::string voicing_prob_file_name =
+ test::ResourcePath("audio_processing/agc/agc_voicing_prob", "dat");
+ FILE* voicing_prob_file = fopen(voicing_prob_file_name.c_str(), "rb");
+ ASSERT_TRUE(voicing_prob_file != NULL);
+
+ PitchBasedVad vad_;
+
+ double reference_activity_probability;
+
+ AudioFeatures audio_features;
+ memset(&audio_features, 0, sizeof(audio_features));
+ audio_features.num_frames = 1;
+ while (fread(audio_features.spectral_peak,
+ sizeof(audio_features.spectral_peak[0]), 1,
+ spectral_peak_file) == 1u) {
+ double p;
+ ASSERT_EQ(1u, fread(audio_features.log_pitch_gain,
+ sizeof(audio_features.log_pitch_gain[0]), 1,
+ pitch_gain_file));
+ ASSERT_EQ(1u,
+ fread(audio_features.pitch_lag_hz,
+ sizeof(audio_features.pitch_lag_hz[0]), 1, pitch_lag_file));
+ ASSERT_EQ(1u, fread(&reference_activity_probability,
+ sizeof(reference_activity_probability), 1,
+ voicing_prob_file));
+
+ p = 0.5; // Initialize to the neutral value for combining probabilities.
+ EXPECT_EQ(0, vad_.VoicingProbability(audio_features, &p));
+ EXPECT_NEAR(p, reference_activity_probability, 0.01);
+ }
+
+ fclose(spectral_peak_file);
+ fclose(pitch_gain_file);
+ fclose(pitch_lag_file);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.cc b/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.cc
new file mode 100644
index 0000000000..8f86918644
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_internal.h"
+
+#include <cmath>
+
+namespace webrtc {
+
+// A 4-to-3 linear interpolation.
+// The interpolation constants are derived as following:
+// Input pitch parameters are updated every 7.5 ms. Within a 30-ms interval
+// we are interested in pitch parameters of 0-5 ms, 10-15ms and 20-25ms. This is
+// like interpolating 4-to-6 and keep the odd samples.
+// The reason behind this is that LPC coefficients are computed for the first
+// half of each 10ms interval.
+static void PitchInterpolation(double old_val, const double* in, double* out) {
+ out[0] = 1. / 6. * old_val + 5. / 6. * in[0];
+ out[1] = 5. / 6. * in[1] + 1. / 6. * in[2];
+ out[2] = 0.5 * in[2] + 0.5 * in[3];
+}
+
+void GetSubframesPitchParameters(int sampling_rate_hz,
+ double* gains,
+ double* lags,
+ int num_in_frames,
+ int num_out_frames,
+ double* log_old_gain,
+ double* old_lag,
+ double* log_pitch_gain,
+ double* pitch_lag_hz) {
+ // Gain interpolation is in log-domain, also returned in log-domain.
+ for (int n = 0; n < num_in_frames; n++)
+ gains[n] = log(gains[n] + 1e-12);
+
+ // Interpolate lags and gains.
+ PitchInterpolation(*log_old_gain, gains, log_pitch_gain);
+ *log_old_gain = gains[num_in_frames - 1];
+ PitchInterpolation(*old_lag, lags, pitch_lag_hz);
+ *old_lag = lags[num_in_frames - 1];
+
+ // Convert pitch-lags to Hertz.
+ for (int n = 0; n < num_out_frames; n++) {
+ pitch_lag_hz[n] = (sampling_rate_hz) / (pitch_lag_hz[n]);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.h b/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.h
new file mode 100644
index 0000000000..e382c1fbde
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_PITCH_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_VAD_PITCH_INTERNAL_H_
+
+namespace webrtc {
+
+// TODO(turajs): Write a description of this function. Also be consistent with
+// usage of `sampling_rate_hz` vs `kSamplingFreqHz`.
+void GetSubframesPitchParameters(int sampling_rate_hz,
+ double* gains,
+ double* lags,
+ int num_in_frames,
+ int num_out_frames,
+ double* log_old_gain,
+ double* old_lag,
+ double* log_pitch_gain,
+ double* pitch_lag_hz);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_PITCH_INTERNAL_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal_unittest.cc
new file mode 100644
index 0000000000..c851421ba7
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal_unittest.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_internal.h"
+
+#include <math.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(PitchInternalTest, test) {
+ const int kSamplingRateHz = 8000;
+ const int kNumInputParameters = 4;
+ const int kNumOutputParameters = 3;
+ // Inputs
+ double log_old_gain = log(0.5);
+ double gains[] = {0.6, 0.2, 0.5, 0.4};
+
+ double old_lag = 70;
+ double lags[] = {90, 111, 122, 50};
+
+ // Expected outputs
+ double expected_log_pitch_gain[] = {-0.541212549898316, -1.45672279045507,
+ -0.80471895621705};
+ double expected_log_old_gain = log(gains[kNumInputParameters - 1]);
+
+ double expected_pitch_lag_hz[] = {92.3076923076923, 70.9010339734121,
+ 93.0232558139535};
+ double expected_old_lag = lags[kNumInputParameters - 1];
+
+ double log_pitch_gain[kNumOutputParameters];
+ double pitch_lag_hz[kNumInputParameters];
+
+ GetSubframesPitchParameters(kSamplingRateHz, gains, lags, kNumInputParameters,
+ kNumOutputParameters, &log_old_gain, &old_lag,
+ log_pitch_gain, pitch_lag_hz);
+
+ for (int n = 0; n < 3; n++) {
+ EXPECT_NEAR(pitch_lag_hz[n], expected_pitch_lag_hz[n], 1e-6);
+ EXPECT_NEAR(log_pitch_gain[n], expected_log_pitch_gain[n], 1e-8);
+ }
+ EXPECT_NEAR(old_lag, expected_old_lag, 1e-6);
+ EXPECT_NEAR(log_old_gain, expected_log_old_gain, 1e-8);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.cc b/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.cc
new file mode 100644
index 0000000000..e7a611309c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pole_zero_filter.h"
+
+#include <string.h>
+
+#include <algorithm>
+
+namespace webrtc {
+
+PoleZeroFilter* PoleZeroFilter::Create(const float* numerator_coefficients,
+ size_t order_numerator,
+ const float* denominator_coefficients,
+ size_t order_denominator) {
+ if (order_numerator > kMaxFilterOrder ||
+ order_denominator > kMaxFilterOrder || denominator_coefficients[0] == 0 ||
+ numerator_coefficients == NULL || denominator_coefficients == NULL)
+ return NULL;
+ return new PoleZeroFilter(numerator_coefficients, order_numerator,
+ denominator_coefficients, order_denominator);
+}
+
+PoleZeroFilter::PoleZeroFilter(const float* numerator_coefficients,
+ size_t order_numerator,
+ const float* denominator_coefficients,
+ size_t order_denominator)
+ : past_input_(),
+ past_output_(),
+ numerator_coefficients_(),
+ denominator_coefficients_(),
+ order_numerator_(order_numerator),
+ order_denominator_(order_denominator),
+ highest_order_(std::max(order_denominator, order_numerator)) {
+ memcpy(numerator_coefficients_, numerator_coefficients,
+ sizeof(numerator_coefficients_[0]) * (order_numerator_ + 1));
+ memcpy(denominator_coefficients_, denominator_coefficients,
+ sizeof(denominator_coefficients_[0]) * (order_denominator_ + 1));
+
+ if (denominator_coefficients_[0] != 1) {
+ for (size_t n = 0; n <= order_numerator_; n++)
+ numerator_coefficients_[n] /= denominator_coefficients_[0];
+ for (size_t n = 0; n <= order_denominator_; n++)
+ denominator_coefficients_[n] /= denominator_coefficients_[0];
+ }
+}
+
+template <typename T>
+static float FilterArPast(const T* past,
+ size_t order,
+ const float* coefficients) {
+ float sum = 0.0f;
+ size_t past_index = order - 1;
+ for (size_t k = 1; k <= order; k++, past_index--)
+ sum += coefficients[k] * past[past_index];
+ return sum;
+}
+
+int PoleZeroFilter::Filter(const int16_t* in,
+ size_t num_input_samples,
+ float* output) {
+ if (in == NULL || output == NULL)
+ return -1;
+ // This is the typical case, just a memcpy.
+ const size_t k = std::min(num_input_samples, highest_order_);
+ size_t n;
+ for (n = 0; n < k; n++) {
+ output[n] = in[n] * numerator_coefficients_[0];
+ output[n] += FilterArPast(&past_input_[n], order_numerator_,
+ numerator_coefficients_);
+ output[n] -= FilterArPast(&past_output_[n], order_denominator_,
+ denominator_coefficients_);
+
+ past_input_[n + order_numerator_] = in[n];
+ past_output_[n + order_denominator_] = output[n];
+ }
+ if (highest_order_ < num_input_samples) {
+ for (size_t m = 0; n < num_input_samples; n++, m++) {
+ output[n] = in[n] * numerator_coefficients_[0];
+ output[n] +=
+ FilterArPast(&in[m], order_numerator_, numerator_coefficients_);
+ output[n] -= FilterArPast(&output[m], order_denominator_,
+ denominator_coefficients_);
+ }
+ // Record into the past signal.
+ memcpy(past_input_, &in[num_input_samples - order_numerator_],
+ sizeof(in[0]) * order_numerator_);
+ memcpy(past_output_, &output[num_input_samples - order_denominator_],
+ sizeof(output[0]) * order_denominator_);
+ } else {
+ // Odd case that the length of the input is shorter that filter order.
+ memmove(past_input_, &past_input_[num_input_samples],
+ order_numerator_ * sizeof(past_input_[0]));
+ memmove(past_output_, &past_output_[num_input_samples],
+ order_denominator_ * sizeof(past_output_[0]));
+ }
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.h b/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.h
new file mode 100644
index 0000000000..11a05114d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+class PoleZeroFilter {
+ public:
+ ~PoleZeroFilter() {}
+
+ static PoleZeroFilter* Create(const float* numerator_coefficients,
+ size_t order_numerator,
+ const float* denominator_coefficients,
+ size_t order_denominator);
+
+ int Filter(const int16_t* in, size_t num_input_samples, float* output);
+
+ private:
+ PoleZeroFilter(const float* numerator_coefficients,
+ size_t order_numerator,
+ const float* denominator_coefficients,
+ size_t order_denominator);
+
+ static const int kMaxFilterOrder = 24;
+
+ int16_t past_input_[kMaxFilterOrder * 2];
+ float past_output_[kMaxFilterOrder * 2];
+
+ float numerator_coefficients_[kMaxFilterOrder + 1];
+ float denominator_coefficients_[kMaxFilterOrder + 1];
+
+ size_t order_numerator_;
+ size_t order_denominator_;
+ size_t highest_order_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter_unittest.cc
new file mode 100644
index 0000000000..8088b40125
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pole_zero_filter.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <memory>
+
+#include "modules/audio_processing/vad/vad_audio_proc_internal.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+static const int kInputSamples = 50;
+
+static const int16_t kInput[kInputSamples] = {
+ -2136, -7116, 10715, 2464, 3164, 8139, 11393, 24013, -32117, -5544,
+ -27740, 10181, 14190, -24055, -15912, 17393, 6359, -9950, -13894, 32432,
+ -23944, 3437, -8381, 19768, 3087, -19795, -5920, 13310, 1407, 3876,
+ 4059, 3524, -23130, 19121, -27900, -24840, 4089, 21422, -3625, 3015,
+ -11236, 28856, 13424, 6571, -19761, -6361, 15821, -9469, 29727, 32229};
+
+static const float kReferenceOutput[kInputSamples] = {
+ -2082.230472f, -6878.572941f, 10697.090871f, 2358.373952f,
+ 2973.936512f, 7738.580650f, 10690.803213f, 22687.091576f,
+ -32676.684717f, -5879.621684f, -27359.297432f, 10368.735888f,
+ 13994.584604f, -23676.126249f, -15078.250390f, 17818.253338f,
+ 6577.743123f, -9498.369315f, -13073.651079f, 32460.026588f,
+ -23391.849347f, 3953.805667f, -7667.761363f, 19995.153447f,
+ 3185.575477f, -19207.365160f, -5143.103201f, 13756.317237f,
+ 1779.654794f, 4142.269755f, 4209.475034f, 3572.991789f,
+ -22509.089546f, 19307.878964f, -27060.439759f, -23319.042810f,
+ 5547.685267f, 22312.718676f, -2707.309027f, 3852.358490f,
+ -10135.510093f, 29241.509970f, 13394.397233f, 6340.721417f,
+ -19510.207905f, -5908.442086f, 15882.301634f, -9211.335255f,
+ 29253.056735f, 30874.443046f};
+
+class PoleZeroFilterTest : public ::testing::Test {
+ protected:
+ PoleZeroFilterTest()
+ : my_filter_(PoleZeroFilter::Create(kCoeffNumerator,
+ kFilterOrder,
+ kCoeffDenominator,
+ kFilterOrder)) {}
+
+ ~PoleZeroFilterTest() override {}
+
+ void FilterSubframes(int num_subframes);
+
+ private:
+ void TestClean();
+ std::unique_ptr<PoleZeroFilter> my_filter_;
+};
+
+void PoleZeroFilterTest::FilterSubframes(int num_subframes) {
+ float output[kInputSamples];
+ const int num_subframe_samples = kInputSamples / num_subframes;
+ EXPECT_EQ(num_subframe_samples * num_subframes, kInputSamples);
+
+ for (int n = 0; n < num_subframes; n++) {
+ my_filter_->Filter(&kInput[n * num_subframe_samples], num_subframe_samples,
+ &output[n * num_subframe_samples]);
+ }
+ for (int n = 0; n < kInputSamples; n++) {
+ EXPECT_NEAR(output[n], kReferenceOutput[n], 1);
+ }
+}
+
+TEST_F(PoleZeroFilterTest, OneSubframe) {
+ FilterSubframes(1);
+}
+
+TEST_F(PoleZeroFilterTest, TwoSubframes) {
+ FilterSubframes(2);
+}
+
+TEST_F(PoleZeroFilterTest, FiveSubframes) {
+ FilterSubframes(5);
+}
+
+TEST_F(PoleZeroFilterTest, TenSubframes) {
+ FilterSubframes(10);
+}
+
+TEST_F(PoleZeroFilterTest, TwentyFiveSubframes) {
+ FilterSubframes(25);
+}
+
+TEST_F(PoleZeroFilterTest, FiftySubframes) {
+ FilterSubframes(50);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.cc b/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.cc
new file mode 100644
index 0000000000..1397668eb4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/standalone_vad.h"
+
+#include <string.h>
+
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+static const int kDefaultStandaloneVadMode = 3;
+
+StandaloneVad::StandaloneVad(VadInst* vad)
+ : vad_(vad), buffer_(), index_(0), mode_(kDefaultStandaloneVadMode) {}
+
+StandaloneVad::~StandaloneVad() {
+ WebRtcVad_Free(vad_);
+}
+
+StandaloneVad* StandaloneVad::Create() {
+ VadInst* vad = WebRtcVad_Create();
+ if (!vad)
+ return nullptr;
+
+ int err = WebRtcVad_Init(vad);
+ err |= WebRtcVad_set_mode(vad, kDefaultStandaloneVadMode);
+ if (err != 0) {
+ WebRtcVad_Free(vad);
+ return nullptr;
+ }
+ return new StandaloneVad(vad);
+}
+
+int StandaloneVad::AddAudio(const int16_t* data, size_t length) {
+ if (length != kLength10Ms)
+ return -1;
+
+ if (index_ + length > kLength10Ms * kMaxNum10msFrames)
+ // Reset the buffer if it's full.
+ // TODO(ajm): Instead, consider just processing every 10 ms frame. Then we
+ // can forgo the buffering.
+ index_ = 0;
+
+ memcpy(&buffer_[index_], data, sizeof(int16_t) * length);
+ index_ += length;
+ return 0;
+}
+
+int StandaloneVad::GetActivity(double* p, size_t length_p) {
+ if (index_ == 0)
+ return -1;
+
+ const size_t num_frames = index_ / kLength10Ms;
+ if (num_frames > length_p)
+ return -1;
+ RTC_DCHECK_EQ(0, WebRtcVad_ValidRateAndFrameLength(kSampleRateHz, index_));
+
+ int activity = WebRtcVad_Process(vad_, kSampleRateHz, buffer_, index_);
+ if (activity < 0)
+ return -1;
+ else if (activity == 0)
+ p[0] = 0.01; // Arbitrary but small and non-zero.
+ else
+ p[0] = 0.5; // 0.5 is neutral values when combinned by other probabilities.
+ for (size_t n = 1; n < num_frames; n++)
+ p[n] = p[0];
+ // Reset the buffer to start from the beginning.
+ index_ = 0;
+ return activity;
+}
+
+int StandaloneVad::set_mode(int mode) {
+ if (mode < 0 || mode > 3)
+ return -1;
+ if (WebRtcVad_set_mode(vad_, mode) != 0)
+ return -1;
+
+ mode_ = mode;
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.h b/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.h
new file mode 100644
index 0000000000..b08463374e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_STANDALONE_VAD_H_
+#define MODULES_AUDIO_PROCESSING_AGC_STANDALONE_VAD_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "modules/audio_processing/vad/common.h"
+
+namespace webrtc {
+
+class StandaloneVad {
+ public:
+ static StandaloneVad* Create();
+ ~StandaloneVad();
+
+ // Outputs
+ // p: a buffer where probabilities are written to.
+ // length_p: number of elements of `p`.
+ //
+ // return value:
+ // -1: if no audio is stored or VAD returns error.
+ // 0: in success.
+ // In case of error the content of `activity` is unchanged.
+ //
+ // Note that due to a high false-positive (VAD decision is active while the
+ // processed audio is just background noise) rate, stand-alone VAD is used as
+ // a one-sided indicator. The activity probability is 0.5 if the frame is
+ // classified as active, and the probability is 0.01 if the audio is
+ // classified as passive. In this way, when probabilities are combined, the
+ // effect of the stand-alone VAD is neutral if the input is classified as
+ // active.
+ int GetActivity(double* p, size_t length_p);
+
+ // Expecting 10 ms of 16 kHz audio to be pushed in.
+ int AddAudio(const int16_t* data, size_t length);
+
+ // Set aggressiveness of VAD, 0 is the least aggressive and 3 is the most
+ // aggressive mode. Returns -1 if the input is less than 0 or larger than 3,
+ // otherwise 0 is returned.
+ int set_mode(int mode);
+ // Get the agressiveness of the current VAD.
+ int mode() const { return mode_; }
+
+ private:
+ explicit StandaloneVad(VadInst* vad);
+
+ static const size_t kMaxNum10msFrames = 3;
+
+ // TODO(turajs): Is there a way to use scoped-pointer here?
+ VadInst* vad_;
+ int16_t buffer_[kMaxNum10msFrames * kLength10Ms];
+ size_t index_;
+ int mode_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_AGC_STANDALONE_VAD_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
new file mode 100644
index 0000000000..0fa2ed78b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/standalone_vad.h"
+
+#include <string.h>
+
+#include <memory>
+
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(StandaloneVadTest, Api) {
+ std::unique_ptr<StandaloneVad> vad(StandaloneVad::Create());
+ int16_t data[kLength10Ms] = {0};
+
+ // Valid frame length (for 32 kHz rate), but not what the VAD is expecting.
+ EXPECT_EQ(-1, vad->AddAudio(data, 320));
+
+ const size_t kMaxNumFrames = 3;
+ double p[kMaxNumFrames];
+ for (size_t n = 0; n < kMaxNumFrames; n++)
+ EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
+
+ // Pretend `p` is shorter that it should be.
+ EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames - 1));
+
+ EXPECT_EQ(0, vad->GetActivity(p, kMaxNumFrames));
+
+ // Ask for activity when buffer is empty.
+ EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames));
+
+ // Should reset and result in one buffer.
+ for (size_t n = 0; n < kMaxNumFrames + 1; n++)
+ EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
+ EXPECT_EQ(0, vad->GetActivity(p, 1));
+
+ // Wrong modes
+ EXPECT_EQ(-1, vad->set_mode(-1));
+ EXPECT_EQ(-1, vad->set_mode(4));
+
+ // Valid mode.
+ const int kMode = 2;
+ EXPECT_EQ(0, vad->set_mode(kMode));
+ EXPECT_EQ(kMode, vad->mode());
+}
+
+#if defined(WEBRTC_IOS)
+TEST(StandaloneVadTest, DISABLED_ActivityDetection) {
+#else
+TEST(StandaloneVadTest, ActivityDetection) {
+#endif
+ std::unique_ptr<StandaloneVad> vad(StandaloneVad::Create());
+ const size_t kDataLength = kLength10Ms;
+ int16_t data[kDataLength] = {0};
+
+ FILE* pcm_file =
+ fopen(test::ResourcePath("audio_processing/agc/agc_audio", "pcm").c_str(),
+ "rb");
+ ASSERT_TRUE(pcm_file != NULL);
+
+ FILE* reference_file = fopen(
+ test::ResourcePath("audio_processing/agc/agc_vad", "dat").c_str(), "rb");
+ ASSERT_TRUE(reference_file != NULL);
+
+ // Reference activities are prepared with 0 aggressiveness.
+ ASSERT_EQ(0, vad->set_mode(0));
+
+ // Stand-alone VAD can operate on 1, 2 or 3 frames of length 10 ms. The
+ // reference file is created for 30 ms frame.
+ const int kNumVadFramesToProcess = 3;
+ int num_frames = 0;
+ while (fread(data, sizeof(int16_t), kDataLength, pcm_file) == kDataLength) {
+ vad->AddAudio(data, kDataLength);
+ num_frames++;
+ if (num_frames == kNumVadFramesToProcess) {
+ num_frames = 0;
+ int referece_activity;
+ double p[kNumVadFramesToProcess];
+ EXPECT_EQ(1u, fread(&referece_activity, sizeof(referece_activity), 1,
+ reference_file));
+ int activity = vad->GetActivity(p, kNumVadFramesToProcess);
+ EXPECT_EQ(referece_activity, activity);
+ if (activity != 0) {
+ // When active, probabilities are set to 0.5.
+ for (int n = 0; n < kNumVadFramesToProcess; n++)
+ EXPECT_EQ(0.5, p[n]);
+ } else {
+ // When inactive, probabilities are set to 0.01.
+ for (int n = 0; n < kNumVadFramesToProcess; n++)
+ EXPECT_EQ(0.01, p[n]);
+ }
+ }
+ }
+ fclose(reference_file);
+ fclose(pcm_file);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.cc b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.cc
new file mode 100644
index 0000000000..aaf8214d7c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/vad_audio_proc.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "common_audio/third_party/ooura/fft_size_256/fft4g.h"
+#include "modules/audio_processing/vad/pitch_internal.h"
+#include "modules/audio_processing/vad/pole_zero_filter.h"
+#include "modules/audio_processing/vad/vad_audio_proc_internal.h"
+#include "rtc_base/checks.h"
+extern "C" {
+#include "modules/audio_coding/codecs/isac/main/source/filter_functions.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+}
+
+namespace webrtc {
+
+// The following structures are declared anonymous in iSAC's structs.h. To
+// forward declare them, we use this derived class trick.
+struct VadAudioProc::PitchAnalysisStruct : public ::PitchAnalysisStruct {};
+struct VadAudioProc::PreFiltBankstr : public ::PreFiltBankstr {};
+
+static constexpr float kFrequencyResolution =
+ kSampleRateHz / static_cast<float>(VadAudioProc::kDftSize);
+static constexpr int kSilenceRms = 5;
+
+// TODO(turajs): Make a Create or Init for VadAudioProc.
+VadAudioProc::VadAudioProc()
+ : audio_buffer_(),
+ num_buffer_samples_(kNumPastSignalSamples),
+ log_old_gain_(-2),
+ old_lag_(50), // Arbitrary but valid as pitch-lag (in samples).
+ pitch_analysis_handle_(new PitchAnalysisStruct),
+ pre_filter_handle_(new PreFiltBankstr),
+ high_pass_filter_(PoleZeroFilter::Create(kCoeffNumerator,
+ kFilterOrder,
+ kCoeffDenominator,
+ kFilterOrder)) {
+ static_assert(kNumPastSignalSamples + kNumSubframeSamples ==
+ sizeof(kLpcAnalWin) / sizeof(kLpcAnalWin[0]),
+ "lpc analysis window incorrect size");
+ static_assert(kLpcOrder + 1 == sizeof(kCorrWeight) / sizeof(kCorrWeight[0]),
+ "correlation weight incorrect size");
+
+ // TODO(turajs): Are we doing too much in the constructor?
+ float data[kDftSize];
+ // Make FFT to initialize.
+ ip_[0] = 0;
+ WebRtc_rdft(kDftSize, 1, data, ip_, w_fft_);
+ // TODO(turajs): Need to initialize high-pass filter.
+
+ // Initialize iSAC components.
+ WebRtcIsac_InitPreFilterbank(pre_filter_handle_.get());
+ WebRtcIsac_InitPitchAnalysis(pitch_analysis_handle_.get());
+}
+
+VadAudioProc::~VadAudioProc() {}
+
+void VadAudioProc::ResetBuffer() {
+ memcpy(audio_buffer_, &audio_buffer_[kNumSamplesToProcess],
+ sizeof(audio_buffer_[0]) * kNumPastSignalSamples);
+ num_buffer_samples_ = kNumPastSignalSamples;
+}
+
+int VadAudioProc::ExtractFeatures(const int16_t* frame,
+ size_t length,
+ AudioFeatures* features) {
+ features->num_frames = 0;
+ if (length != kNumSubframeSamples) {
+ return -1;
+ }
+
+ // High-pass filter to remove the DC component and very low frequency content.
+ // We have experienced that this high-pass filtering improves voice/non-voiced
+ // classification.
+ if (high_pass_filter_->Filter(frame, kNumSubframeSamples,
+ &audio_buffer_[num_buffer_samples_]) != 0) {
+ return -1;
+ }
+
+ num_buffer_samples_ += kNumSubframeSamples;
+ if (num_buffer_samples_ < kBufferLength) {
+ return 0;
+ }
+ RTC_DCHECK_EQ(num_buffer_samples_, kBufferLength);
+ features->num_frames = kNum10msSubframes;
+ features->silence = false;
+
+ Rms(features->rms, kMaxNumFrames);
+ for (size_t i = 0; i < kNum10msSubframes; ++i) {
+ if (features->rms[i] < kSilenceRms) {
+ // PitchAnalysis can cause NaNs in the pitch gain if it's fed silence.
+ // Bail out here instead.
+ features->silence = true;
+ ResetBuffer();
+ return 0;
+ }
+ }
+
+ PitchAnalysis(features->log_pitch_gain, features->pitch_lag_hz,
+ kMaxNumFrames);
+ FindFirstSpectralPeaks(features->spectral_peak, kMaxNumFrames);
+ ResetBuffer();
+ return 0;
+}
+
+// Computes |kLpcOrder + 1| correlation coefficients.
+void VadAudioProc::SubframeCorrelation(double* corr,
+ size_t length_corr,
+ size_t subframe_index) {
+ RTC_DCHECK_GE(length_corr, kLpcOrder + 1);
+ double windowed_audio[kNumSubframeSamples + kNumPastSignalSamples];
+ size_t buffer_index = subframe_index * kNumSubframeSamples;
+
+ for (size_t n = 0; n < kNumSubframeSamples + kNumPastSignalSamples; n++)
+ windowed_audio[n] = audio_buffer_[buffer_index++] * kLpcAnalWin[n];
+
+ WebRtcIsac_AutoCorr(corr, windowed_audio,
+ kNumSubframeSamples + kNumPastSignalSamples, kLpcOrder);
+}
+
+// Compute `kNum10msSubframes` sets of LPC coefficients, one per 10 ms input.
+// The analysis window is 15 ms long and it is centered on the first half of
+// each 10ms sub-frame. This is equivalent to computing LPC coefficients for the
+// first half of each 10 ms subframe.
+void VadAudioProc::GetLpcPolynomials(double* lpc, size_t length_lpc) {
+ RTC_DCHECK_GE(length_lpc, kNum10msSubframes * (kLpcOrder + 1));
+ double corr[kLpcOrder + 1];
+ double reflec_coeff[kLpcOrder];
+ for (size_t i = 0, offset_lpc = 0; i < kNum10msSubframes;
+ i++, offset_lpc += kLpcOrder + 1) {
+ SubframeCorrelation(corr, kLpcOrder + 1, i);
+ corr[0] *= 1.0001;
+ // This makes Lev-Durb a bit more stable.
+ for (size_t k = 0; k < kLpcOrder + 1; k++) {
+ corr[k] *= kCorrWeight[k];
+ }
+ WebRtcIsac_LevDurb(&lpc[offset_lpc], reflec_coeff, corr, kLpcOrder);
+ }
+}
+
+// Fit a second order curve to these 3 points and find the location of the
+// extremum. The points are inverted before curve fitting.
+static float QuadraticInterpolation(float prev_val,
+ float curr_val,
+ float next_val) {
+ // Doing the interpolation in |1 / A(z)|^2.
+ float fractional_index = 0;
+ next_val = 1.0f / next_val;
+ prev_val = 1.0f / prev_val;
+ curr_val = 1.0f / curr_val;
+
+ fractional_index =
+ -(next_val - prev_val) * 0.5f / (next_val + prev_val - 2.f * curr_val);
+ RTC_DCHECK_LT(fabs(fractional_index), 1);
+ return fractional_index;
+}
+
+// 1 / A(z), where A(z) is defined by `lpc` is a model of the spectral envelope
+// of the input signal. The local maximum of the spectral envelope corresponds
+// with the local minimum of A(z). It saves complexity, as we save one
+// inversion. Furthermore, we find the first local maximum of magnitude squared,
+// to save on one square root.
+void VadAudioProc::FindFirstSpectralPeaks(double* f_peak,
+ size_t length_f_peak) {
+ RTC_DCHECK_GE(length_f_peak, kNum10msSubframes);
+ double lpc[kNum10msSubframes * (kLpcOrder + 1)];
+ // For all sub-frames.
+ GetLpcPolynomials(lpc, kNum10msSubframes * (kLpcOrder + 1));
+
+ const size_t kNumDftCoefficients = kDftSize / 2 + 1;
+ float data[kDftSize];
+
+ for (size_t i = 0; i < kNum10msSubframes; i++) {
+ // Convert to float with zero pad.
+ memset(data, 0, sizeof(data));
+ for (size_t n = 0; n < kLpcOrder + 1; n++) {
+ data[n] = static_cast<float>(lpc[i * (kLpcOrder + 1) + n]);
+ }
+ // Transform to frequency domain.
+ WebRtc_rdft(kDftSize, 1, data, ip_, w_fft_);
+
+ size_t index_peak = 0;
+ float prev_magn_sqr = data[0] * data[0];
+ float curr_magn_sqr = data[2] * data[2] + data[3] * data[3];
+ float next_magn_sqr;
+ bool found_peak = false;
+ for (size_t n = 2; n < kNumDftCoefficients - 1; n++) {
+ next_magn_sqr =
+ data[2 * n] * data[2 * n] + data[2 * n + 1] * data[2 * n + 1];
+ if (curr_magn_sqr < prev_magn_sqr && curr_magn_sqr < next_magn_sqr) {
+ found_peak = true;
+ index_peak = n - 1;
+ break;
+ }
+ prev_magn_sqr = curr_magn_sqr;
+ curr_magn_sqr = next_magn_sqr;
+ }
+ float fractional_index = 0;
+ if (!found_peak) {
+ // Checking if |kNumDftCoefficients - 1| is the local minimum.
+ next_magn_sqr = data[1] * data[1];
+ if (curr_magn_sqr < prev_magn_sqr && curr_magn_sqr < next_magn_sqr) {
+ index_peak = kNumDftCoefficients - 1;
+ }
+ } else {
+ // A peak is found, do a simple quadratic interpolation to get a more
+ // accurate estimate of the peak location.
+ fractional_index =
+ QuadraticInterpolation(prev_magn_sqr, curr_magn_sqr, next_magn_sqr);
+ }
+ f_peak[i] = (index_peak + fractional_index) * kFrequencyResolution;
+ }
+}
+
+// Using iSAC functions to estimate pitch gains & lags.
+void VadAudioProc::PitchAnalysis(double* log_pitch_gains,
+ double* pitch_lags_hz,
+ size_t length) {
+ // TODO(turajs): This can be "imported" from iSAC & and the next two
+ // constants.
+ RTC_DCHECK_GE(length, kNum10msSubframes);
+ const int kNumPitchSubframes = 4;
+ double gains[kNumPitchSubframes];
+ double lags[kNumPitchSubframes];
+
+ const int kNumSubbandFrameSamples = 240;
+ const int kNumLookaheadSamples = 24;
+
+ float lower[kNumSubbandFrameSamples];
+ float upper[kNumSubbandFrameSamples];
+ double lower_lookahead[kNumSubbandFrameSamples];
+ double upper_lookahead[kNumSubbandFrameSamples];
+ double lower_lookahead_pre_filter[kNumSubbandFrameSamples +
+ kNumLookaheadSamples];
+
+ // Split signal to lower and upper bands
+ WebRtcIsac_SplitAndFilterFloat(&audio_buffer_[kNumPastSignalSamples], lower,
+ upper, lower_lookahead, upper_lookahead,
+ pre_filter_handle_.get());
+ WebRtcIsac_PitchAnalysis(lower_lookahead, lower_lookahead_pre_filter,
+ pitch_analysis_handle_.get(), lags, gains);
+
+ // Lags are computed on lower-band signal with sampling rate half of the
+ // input signal.
+ GetSubframesPitchParameters(
+ kSampleRateHz / 2, gains, lags, kNumPitchSubframes, kNum10msSubframes,
+ &log_old_gain_, &old_lag_, log_pitch_gains, pitch_lags_hz);
+}
+
+void VadAudioProc::Rms(double* rms, size_t length_rms) {
+ RTC_DCHECK_GE(length_rms, kNum10msSubframes);
+ size_t offset = kNumPastSignalSamples;
+ for (size_t i = 0; i < kNum10msSubframes; i++) {
+ rms[i] = 0;
+ for (size_t n = 0; n < kNumSubframeSamples; n++, offset++)
+ rms[i] += audio_buffer_[offset] * audio_buffer_[offset];
+ rms[i] = sqrt(rms[i] / kNumSubframeSamples);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.h b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.h
new file mode 100644
index 0000000000..cbdd707129
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "modules/audio_processing/vad/common.h" // AudioFeatures, kSampleR...
+
+namespace webrtc {
+
+class PoleZeroFilter;
+
+class VadAudioProc {
+ public:
+ // Forward declare iSAC structs.
+ struct PitchAnalysisStruct;
+ struct PreFiltBankstr;
+
+ VadAudioProc();
+ ~VadAudioProc();
+
+ int ExtractFeatures(const int16_t* audio_frame,
+ size_t length,
+ AudioFeatures* audio_features);
+
+ static constexpr size_t kDftSize = 512;
+
+ private:
+ void PitchAnalysis(double* pitch_gains, double* pitch_lags_hz, size_t length);
+ void SubframeCorrelation(double* corr,
+ size_t length_corr,
+ size_t subframe_index);
+ void GetLpcPolynomials(double* lpc, size_t length_lpc);
+ void FindFirstSpectralPeaks(double* f_peak, size_t length_f_peak);
+ void Rms(double* rms, size_t length_rms);
+ void ResetBuffer();
+
+ // To compute spectral peak we perform LPC analysis to get spectral envelope.
+ // For every 30 ms we compute 3 spectral peak there for 3 LPC analysis.
+ // LPC is computed over 15 ms of windowed audio. For every 10 ms sub-frame
+ // we need 5 ms of past signal to create the input of LPC analysis.
+ static constexpr size_t kNumPastSignalSamples =
+ static_cast<size_t>(kSampleRateHz / 200);
+
+ // TODO(turajs): maybe defining this at a higher level (maybe enum) so that
+ // all the code recognize it as "no-error."
+ static constexpr int kNoError = 0;
+
+ static constexpr size_t kNum10msSubframes = 3;
+ static constexpr size_t kNumSubframeSamples =
+ static_cast<size_t>(kSampleRateHz / 100);
+ // Samples in 30 ms @ given sampling rate.
+ static constexpr size_t kNumSamplesToProcess =
+ size_t{kNum10msSubframes} * kNumSubframeSamples;
+ static constexpr size_t kBufferLength =
+ size_t{kNumPastSignalSamples} + kNumSamplesToProcess;
+ static constexpr size_t kIpLength = kDftSize >> 1;
+ static constexpr size_t kWLength = kDftSize >> 1;
+ static constexpr size_t kLpcOrder = 16;
+
+ size_t ip_[kIpLength];
+ float w_fft_[kWLength];
+
+ // A buffer of 5 ms (past audio) + 30 ms (one iSAC frame ).
+ float audio_buffer_[kBufferLength];
+ size_t num_buffer_samples_;
+
+ double log_old_gain_;
+ double old_lag_;
+
+ std::unique_ptr<PitchAnalysisStruct> pitch_analysis_handle_;
+ std::unique_ptr<PreFiltBankstr> pre_filter_handle_;
+ std::unique_ptr<PoleZeroFilter> high_pass_filter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_internal.h b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
new file mode 100644
index 0000000000..915524f474
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_INTERNAL_H_
+
+namespace webrtc {
+
+// These values should match MATLAB counterparts for unit-tests to pass.
+static const double kCorrWeight[] = {
+ 1.000000, 0.985000, 0.970225, 0.955672, 0.941337, 0.927217,
+ 0.913308, 0.899609, 0.886115, 0.872823, 0.859730, 0.846834,
+ 0.834132, 0.821620, 0.809296, 0.797156, 0.785199};
+
+static const double kLpcAnalWin[] = {
+ 0.00000000, 0.01314436, 0.02628645, 0.03942400, 0.05255473, 0.06567639,
+ 0.07878670, 0.09188339, 0.10496421, 0.11802689, 0.13106918, 0.14408883,
+ 0.15708358, 0.17005118, 0.18298941, 0.19589602, 0.20876878, 0.22160547,
+ 0.23440387, 0.24716177, 0.25987696, 0.27254725, 0.28517045, 0.29774438,
+ 0.31026687, 0.32273574, 0.33514885, 0.34750406, 0.35979922, 0.37203222,
+ 0.38420093, 0.39630327, 0.40833713, 0.42030043, 0.43219112, 0.44400713,
+ 0.45574642, 0.46740697, 0.47898676, 0.49048379, 0.50189608, 0.51322164,
+ 0.52445853, 0.53560481, 0.54665854, 0.55761782, 0.56848075, 0.57924546,
+ 0.58991008, 0.60047278, 0.61093173, 0.62128512, 0.63153117, 0.64166810,
+ 0.65169416, 0.66160761, 0.67140676, 0.68108990, 0.69065536, 0.70010148,
+ 0.70942664, 0.71862923, 0.72770765, 0.73666033, 0.74548573, 0.75418233,
+ 0.76274862, 0.77118312, 0.77948437, 0.78765094, 0.79568142, 0.80357442,
+ 0.81132858, 0.81894256, 0.82641504, 0.83374472, 0.84093036, 0.84797069,
+ 0.85486451, 0.86161063, 0.86820787, 0.87465511, 0.88095122, 0.88709512,
+ 0.89308574, 0.89892206, 0.90460306, 0.91012776, 0.91549520, 0.92070447,
+ 0.92575465, 0.93064488, 0.93537432, 0.93994213, 0.94434755, 0.94858979,
+ 0.95266814, 0.95658189, 0.96033035, 0.96391289, 0.96732888, 0.97057773,
+ 0.97365889, 0.97657181, 0.97931600, 0.98189099, 0.98429632, 0.98653158,
+ 0.98859639, 0.99049038, 0.99221324, 0.99376466, 0.99514438, 0.99635215,
+ 0.99738778, 0.99825107, 0.99894188, 0.99946010, 0.99980562, 0.99997840,
+ 0.99997840, 0.99980562, 0.99946010, 0.99894188, 0.99825107, 0.99738778,
+ 0.99635215, 0.99514438, 0.99376466, 0.99221324, 0.99049038, 0.98859639,
+ 0.98653158, 0.98429632, 0.98189099, 0.97931600, 0.97657181, 0.97365889,
+ 0.97057773, 0.96732888, 0.96391289, 0.96033035, 0.95658189, 0.95266814,
+ 0.94858979, 0.94434755, 0.93994213, 0.93537432, 0.93064488, 0.92575465,
+ 0.92070447, 0.91549520, 0.91012776, 0.90460306, 0.89892206, 0.89308574,
+ 0.88709512, 0.88095122, 0.87465511, 0.86820787, 0.86161063, 0.85486451,
+ 0.84797069, 0.84093036, 0.83374472, 0.82641504, 0.81894256, 0.81132858,
+ 0.80357442, 0.79568142, 0.78765094, 0.77948437, 0.77118312, 0.76274862,
+ 0.75418233, 0.74548573, 0.73666033, 0.72770765, 0.71862923, 0.70942664,
+ 0.70010148, 0.69065536, 0.68108990, 0.67140676, 0.66160761, 0.65169416,
+ 0.64166810, 0.63153117, 0.62128512, 0.61093173, 0.60047278, 0.58991008,
+ 0.57924546, 0.56848075, 0.55761782, 0.54665854, 0.53560481, 0.52445853,
+ 0.51322164, 0.50189608, 0.49048379, 0.47898676, 0.46740697, 0.45574642,
+ 0.44400713, 0.43219112, 0.42030043, 0.40833713, 0.39630327, 0.38420093,
+ 0.37203222, 0.35979922, 0.34750406, 0.33514885, 0.32273574, 0.31026687,
+ 0.29774438, 0.28517045, 0.27254725, 0.25987696, 0.24716177, 0.23440387,
+ 0.22160547, 0.20876878, 0.19589602, 0.18298941, 0.17005118, 0.15708358,
+ 0.14408883, 0.13106918, 0.11802689, 0.10496421, 0.09188339, 0.07878670,
+ 0.06567639, 0.05255473, 0.03942400, 0.02628645, 0.01314436, 0.00000000};
+
+static const size_t kFilterOrder = 2;
+static const float kCoeffNumerator[kFilterOrder + 1] = {0.974827f, -1.949650f,
+ 0.974827f};
+static const float kCoeffDenominator[kFilterOrder + 1] = {1.0f, -1.971999f,
+ 0.972457f};
+
+static_assert(kFilterOrder + 1 ==
+ sizeof(kCoeffNumerator) / sizeof(kCoeffNumerator[0]),
+ "numerator coefficients incorrect size");
+static_assert(kFilterOrder + 1 ==
+ sizeof(kCoeffDenominator) / sizeof(kCoeffDenominator[0]),
+ "denominator coefficients incorrect size");
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROCESSING_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
new file mode 100644
index 0000000000..0afed84c35
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// We don't test the value of pitch gain and lags as they are created by iSAC
+// routines. However, interpolation of pitch-gain and lags is in a separate
+// class and has its own unit-test.
+
+#include "modules/audio_processing/vad/vad_audio_proc.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <string>
+
+#include "modules/audio_processing/vad/common.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(AudioProcessingTest, DISABLED_ComputingFirstSpectralPeak) {
+ VadAudioProc audioproc;
+
+ std::string peak_file_name =
+ test::ResourcePath("audio_processing/agc/agc_spectral_peak", "dat");
+ FILE* peak_file = fopen(peak_file_name.c_str(), "rb");
+ ASSERT_TRUE(peak_file != NULL);
+
+ std::string pcm_file_name =
+ test::ResourcePath("audio_processing/agc/agc_audio", "pcm");
+ FILE* pcm_file = fopen(pcm_file_name.c_str(), "rb");
+ ASSERT_TRUE(pcm_file != NULL);
+
+ // Read 10 ms audio in each iteration.
+ const size_t kDataLength = kLength10Ms;
+ int16_t data[kDataLength] = {0};
+ AudioFeatures features;
+ double sp[kMaxNumFrames];
+ while (fread(data, sizeof(int16_t), kDataLength, pcm_file) == kDataLength) {
+ audioproc.ExtractFeatures(data, kDataLength, &features);
+ if (features.num_frames > 0) {
+ ASSERT_LT(features.num_frames, kMaxNumFrames);
+ // Read reference values.
+ const size_t num_frames = features.num_frames;
+ ASSERT_EQ(num_frames, fread(sp, sizeof(sp[0]), num_frames, peak_file));
+ for (size_t n = 0; n < features.num_frames; n++)
+ EXPECT_NEAR(features.spectral_peak[n], sp[n], 3);
+ }
+ }
+
+ fclose(peak_file);
+ fclose(pcm_file);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.cc b/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.cc
new file mode 100644
index 0000000000..31f14d7f64
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/vad_circular_buffer.h"
+
+#include <stdlib.h>
+
+namespace webrtc {
+
+VadCircularBuffer::VadCircularBuffer(int buffer_size)
+ : buffer_(new double[buffer_size]),
+ is_full_(false),
+ index_(0),
+ buffer_size_(buffer_size),
+ sum_(0) {}
+
+VadCircularBuffer::~VadCircularBuffer() {}
+
+void VadCircularBuffer::Reset() {
+ is_full_ = false;
+ index_ = 0;
+ sum_ = 0;
+}
+
+VadCircularBuffer* VadCircularBuffer::Create(int buffer_size) {
+ if (buffer_size <= 0)
+ return NULL;
+ return new VadCircularBuffer(buffer_size);
+}
+
+double VadCircularBuffer::Oldest() const {
+ if (!is_full_)
+ return buffer_[0];
+ else
+ return buffer_[index_];
+}
+
+double VadCircularBuffer::Mean() {
+ double m;
+ if (is_full_) {
+ m = sum_ / buffer_size_;
+ } else {
+ if (index_ > 0)
+ m = sum_ / index_;
+ else
+ m = 0;
+ }
+ return m;
+}
+
+void VadCircularBuffer::Insert(double value) {
+ if (is_full_) {
+ sum_ -= buffer_[index_];
+ }
+ sum_ += value;
+ buffer_[index_] = value;
+ index_++;
+ if (index_ >= buffer_size_) {
+ is_full_ = true;
+ index_ = 0;
+ }
+}
+int VadCircularBuffer::BufferLevel() {
+ if (is_full_)
+ return buffer_size_;
+ return index_;
+}
+
+int VadCircularBuffer::Get(int index, double* value) const {
+ int err = ConvertToLinearIndex(&index);
+ if (err < 0)
+ return -1;
+ *value = buffer_[index];
+ return 0;
+}
+
+int VadCircularBuffer::Set(int index, double value) {
+ int err = ConvertToLinearIndex(&index);
+ if (err < 0)
+ return -1;
+
+ sum_ -= buffer_[index];
+ buffer_[index] = value;
+ sum_ += value;
+ return 0;
+}
+
+int VadCircularBuffer::ConvertToLinearIndex(int* index) const {
+ if (*index < 0 || *index >= buffer_size_)
+ return -1;
+
+ if (!is_full_ && *index >= index_)
+ return -1;
+
+ *index = index_ - 1 - *index;
+ if (*index < 0)
+ *index += buffer_size_;
+ return 0;
+}
+
+int VadCircularBuffer::RemoveTransient(int width_threshold,
+ double val_threshold) {
+ if (!is_full_ && index_ < width_threshold + 2)
+ return 0;
+
+ int index_1 = 0;
+ int index_2 = width_threshold + 1;
+ double v = 0;
+ if (Get(index_1, &v) < 0)
+ return -1;
+ if (v < val_threshold) {
+ Set(index_1, 0);
+ int index;
+ for (index = index_2; index > index_1; index--) {
+ if (Get(index, &v) < 0)
+ return -1;
+ if (v < val_threshold)
+ break;
+ }
+ for (; index > index_1; index--) {
+ if (Set(index, 0.0) < 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.h b/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.h
new file mode 100644
index 0000000000..c1806f9e83
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VAD_CIRCULAR_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VAD_CIRCULAR_BUFFER_H_
+
+#include <memory>
+
+namespace webrtc {
+
+// A circular buffer tailored to the need of this project. It stores last
+// K samples of the input, and keeps track of the mean of the last samples.
+//
+// It is used in class "PitchBasedActivity" to keep track of posterior
+// probabilities in the past few seconds. The posterior probabilities are used
+// to recursively update prior probabilities.
+class VadCircularBuffer {
+ public:
+ static VadCircularBuffer* Create(int buffer_size);
+ ~VadCircularBuffer();
+
+ // If buffer is wrapped around.
+ bool is_full() const { return is_full_; }
+ // Get the oldest entry in the buffer.
+ double Oldest() const;
+ // Insert new value into the buffer.
+ void Insert(double value);
+ // Reset buffer, forget the past, start fresh.
+ void Reset();
+
+ // The mean value of the elements in the buffer. The return value is zero if
+ // buffer is empty, i.e. no value is inserted.
+ double Mean();
+ // Remove transients. If the values exceed `val_threshold` for a period
+ // shorter then or equal to `width_threshold`, then that period is considered
+ // transient and set to zero.
+ int RemoveTransient(int width_threshold, double val_threshold);
+
+ private:
+ explicit VadCircularBuffer(int buffer_size);
+ // Get previous values. |index = 0| corresponds to the most recent
+ // insertion. |index = 1| is the one before the most recent insertion, and
+ // so on.
+ int Get(int index, double* value) const;
+ // Set a given position to `value`. `index` is interpreted as above.
+ int Set(int index, double value);
+ // Return the number of valid elements in the buffer.
+ int BufferLevel();
+
+ // Convert an index with the interpretation as get() method to the
+ // corresponding linear index.
+ int ConvertToLinearIndex(int* index) const;
+
+ std::unique_ptr<double[]> buffer_;
+ bool is_full_;
+ int index_;
+ int buffer_size_;
+ double sum_;
+};
+
+} // namespace webrtc
+#endif // MODULES_AUDIO_PROCESSING_VAD_VAD_CIRCULAR_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer_unittest.cc
new file mode 100644
index 0000000000..efbd70d9d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer_unittest.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/vad_circular_buffer.h"
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const int kWidthThreshold = 7;
+static const double kValThreshold = 1.0;
+static const int kLongBuffSize = 100;
+static const int kShortBuffSize = 10;
+
+static void InsertSequentially(int k, VadCircularBuffer* circular_buffer) {
+ double mean_val;
+ for (int n = 1; n <= k; n++) {
+ EXPECT_TRUE(!circular_buffer->is_full());
+ circular_buffer->Insert(n);
+ mean_val = circular_buffer->Mean();
+ EXPECT_EQ((n + 1.0) / 2., mean_val);
+ }
+}
+
+static void Insert(double value,
+ int num_insertion,
+ VadCircularBuffer* circular_buffer) {
+ for (int n = 0; n < num_insertion; n++)
+ circular_buffer->Insert(value);
+}
+
+static void InsertZeros(int num_zeros, VadCircularBuffer* circular_buffer) {
+ Insert(0.0, num_zeros, circular_buffer);
+}
+
+TEST(VadCircularBufferTest, GeneralTest) {
+ std::unique_ptr<VadCircularBuffer> circular_buffer(
+ VadCircularBuffer::Create(kShortBuffSize));
+ double mean_val;
+
+ // Mean should return zero if nothing is inserted.
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(0.0, mean_val);
+ InsertSequentially(kShortBuffSize, circular_buffer.get());
+
+ // Should be full.
+ EXPECT_TRUE(circular_buffer->is_full());
+ // Correct update after being full.
+ for (int n = 1; n < kShortBuffSize; n++) {
+ circular_buffer->Insert(n);
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ((kShortBuffSize + 1.) / 2., mean_val);
+ EXPECT_TRUE(circular_buffer->is_full());
+ }
+
+ // Check reset. This should be like starting fresh.
+ circular_buffer->Reset();
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(0, mean_val);
+ InsertSequentially(kShortBuffSize, circular_buffer.get());
+ EXPECT_TRUE(circular_buffer->is_full());
+}
+
+TEST(VadCircularBufferTest, TransientsRemoval) {
+ std::unique_ptr<VadCircularBuffer> circular_buffer(
+ VadCircularBuffer::Create(kLongBuffSize));
+ // Let the first transient be in wrap-around.
+ InsertZeros(kLongBuffSize - kWidthThreshold / 2, circular_buffer.get());
+
+ double push_val = kValThreshold;
+ double mean_val;
+ for (int k = kWidthThreshold; k >= 1; k--) {
+ Insert(push_val, k, circular_buffer.get());
+ circular_buffer->Insert(0);
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(k * push_val / kLongBuffSize, mean_val);
+ circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold);
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(0, mean_val);
+ }
+}
+
+TEST(VadCircularBufferTest, TransientDetection) {
+ std::unique_ptr<VadCircularBuffer> circular_buffer(
+ VadCircularBuffer::Create(kLongBuffSize));
+ // Let the first transient be in wrap-around.
+ int num_insertion = kLongBuffSize - kWidthThreshold / 2;
+ InsertZeros(num_insertion, circular_buffer.get());
+
+ double push_val = 2;
+ // This is longer than a transient and shouldn't be removed.
+ int num_non_zero_elements = kWidthThreshold + 1;
+ Insert(push_val, num_non_zero_elements, circular_buffer.get());
+
+ double mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+ circular_buffer->Insert(0);
+ EXPECT_EQ(0,
+ circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold));
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+
+ // A transient right after a non-transient, should be removed and mean is
+ // not changed.
+ num_insertion = 3;
+ Insert(push_val, num_insertion, circular_buffer.get());
+ circular_buffer->Insert(0);
+ EXPECT_EQ(0,
+ circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold));
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+
+ // Last input is larger than threshold, although the sequence is short but
+ // it shouldn't be considered transient.
+ Insert(push_val, num_insertion, circular_buffer.get());
+ num_non_zero_elements += num_insertion;
+ EXPECT_EQ(0,
+ circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold));
+ mean_val = circular_buffer->Mean();
+ EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/vad_gn/moz.build b/third_party/libwebrtc/modules/audio_processing/vad/vad_gn/moz.build
new file mode 100644
index 0000000000..8c94a1281f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/vad_gn/moz.build
@@ -0,0 +1,219 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_processing/vad/gmm.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/pitch_based_vad.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/pitch_internal.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/pole_zero_filter.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/standalone_vad.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/vad_audio_proc.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/vad_circular_buffer.cc",
+ "/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("vad_gn")
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.cc b/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.cc
new file mode 100644
index 0000000000..02023d6a72
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const size_t kNumChannels = 1;
+
+const double kDefaultVoiceValue = 1.0;
+const double kNeutralProbability = 0.5;
+const double kLowProbability = 0.01;
+
+} // namespace
+
+VoiceActivityDetector::VoiceActivityDetector()
+ : last_voice_probability_(kDefaultVoiceValue),
+ standalone_vad_(StandaloneVad::Create()) {}
+
+VoiceActivityDetector::~VoiceActivityDetector() = default;
+
+// Because ISAC has a different chunk length, it updates
+// `chunkwise_voice_probabilities_` and `chunkwise_rms_` when there is new data.
+// Otherwise it clears them.
+void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
+ size_t length,
+ int sample_rate_hz) {
+ RTC_DCHECK_EQ(length, sample_rate_hz / 100);
+ // TODO(bugs.webrtc.org/7494): Remove resampling and force 16 kHz audio.
+ // Resample to the required rate.
+ const int16_t* resampled_ptr = audio;
+ if (sample_rate_hz != kSampleRateHz) {
+ RTC_CHECK_EQ(
+ resampler_.ResetIfNeeded(sample_rate_hz, kSampleRateHz, kNumChannels),
+ 0);
+ resampler_.Push(audio, length, resampled_, kLength10Ms, length);
+ resampled_ptr = resampled_;
+ }
+ RTC_DCHECK_EQ(length, kLength10Ms);
+
+ // Each chunk needs to be passed into `standalone_vad_`, because internally it
+ // buffers the audio and processes it all at once when GetActivity() is
+ // called.
+ RTC_CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
+
+ audio_processing_.ExtractFeatures(resampled_ptr, length, &features_);
+
+ chunkwise_voice_probabilities_.resize(features_.num_frames);
+ chunkwise_rms_.resize(features_.num_frames);
+ std::copy(features_.rms, features_.rms + chunkwise_rms_.size(),
+ chunkwise_rms_.begin());
+ if (features_.num_frames > 0) {
+ if (features_.silence) {
+ // The other features are invalid, so set the voice probabilities to an
+ // arbitrary low value.
+ std::fill(chunkwise_voice_probabilities_.begin(),
+ chunkwise_voice_probabilities_.end(), kLowProbability);
+ } else {
+ std::fill(chunkwise_voice_probabilities_.begin(),
+ chunkwise_voice_probabilities_.end(), kNeutralProbability);
+ RTC_CHECK_GE(
+ standalone_vad_->GetActivity(&chunkwise_voice_probabilities_[0],
+ chunkwise_voice_probabilities_.size()),
+ 0);
+ RTC_CHECK_GE(pitch_based_vad_.VoicingProbability(
+ features_, &chunkwise_voice_probabilities_[0]),
+ 0);
+ }
+ last_voice_probability_ = chunkwise_voice_probabilities_.back();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.h b/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.h
new file mode 100644
index 0000000000..92b9a8c208
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VOICE_ACTIVITY_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VOICE_ACTIVITY_DETECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/resampler/include/resampler.h"
+#include "modules/audio_processing/vad/common.h"
+#include "modules/audio_processing/vad/pitch_based_vad.h"
+#include "modules/audio_processing/vad/standalone_vad.h"
+#include "modules/audio_processing/vad/vad_audio_proc.h"
+
+namespace webrtc {
+
+// A Voice Activity Detector (VAD) that combines the voice probability from the
+// StandaloneVad and PitchBasedVad to get a more robust estimation.
+class VoiceActivityDetector {
+ public:
+ VoiceActivityDetector();
+ ~VoiceActivityDetector();
+
+ // Processes each audio chunk and estimates the voice probability.
+ // TODO(bugs.webrtc.org/7494): Switch to rtc::ArrayView and remove
+ // `sample_rate_hz`.
+ void ProcessChunk(const int16_t* audio, size_t length, int sample_rate_hz);
+
+ // Returns a vector of voice probabilities for each chunk. It can be empty for
+ // some chunks, but it catches up afterwards returning multiple values at
+ // once.
+ const std::vector<double>& chunkwise_voice_probabilities() const {
+ return chunkwise_voice_probabilities_;
+ }
+
+ // Returns a vector of RMS values for each chunk. It has the same length as
+ // chunkwise_voice_probabilities().
+ const std::vector<double>& chunkwise_rms() const { return chunkwise_rms_; }
+
+ // Returns the last voice probability, regardless of the internal
+ // implementation, although it has a few chunks of delay.
+ float last_voice_probability() const { return last_voice_probability_; }
+
+ private:
+ // TODO(aluebs): Change these to float.
+ std::vector<double> chunkwise_voice_probabilities_;
+ std::vector<double> chunkwise_rms_;
+
+ float last_voice_probability_;
+
+ Resampler resampler_;
+ VadAudioProc audio_processing_;
+
+ std::unique_ptr<StandaloneVad> standalone_vad_;
+ PitchBasedVad pitch_based_vad_;
+
+ int16_t resampled_[kLength10Ms];
+ AudioFeatures features_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_VAD_VOICE_ACTIVITY_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector_unittest.cc b/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector_unittest.cc
new file mode 100644
index 0000000000..80f21c8db0
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/voice_activity_detector_unittest.cc
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace {
+
+const int kStartTimeSec = 16;
+const float kMeanSpeechProbability = 0.3f;
+const float kMaxNoiseProbability = 0.1f;
+const size_t kNumChunks = 300u;
+const size_t kNumChunksPerIsacBlock = 3;
+
+void GenerateNoise(std::vector<int16_t>* data) {
+ for (size_t i = 0; i < data->size(); ++i) {
+ // std::rand returns between 0 and RAND_MAX, but this will work because it
+ // wraps into some random place.
+ (*data)[i] = std::rand();
+ }
+}
+
+} // namespace
+
+TEST(VoiceActivityDetectorTest, ConstructorSetsDefaultValues) {
+ const float kDefaultVoiceValue = 1.f;
+
+ VoiceActivityDetector vad;
+
+ std::vector<double> p = vad.chunkwise_voice_probabilities();
+ std::vector<double> rms = vad.chunkwise_rms();
+
+ EXPECT_EQ(p.size(), 0u);
+ EXPECT_EQ(rms.size(), 0u);
+
+ EXPECT_FLOAT_EQ(vad.last_voice_probability(), kDefaultVoiceValue);
+}
+
+TEST(VoiceActivityDetectorTest, Speech16kHzHasHighVoiceProbabilities) {
+ const int kSampleRateHz = 16000;
+ const int kLength10Ms = kSampleRateHz / 100;
+
+ VoiceActivityDetector vad;
+
+ std::vector<int16_t> data(kLength10Ms);
+ float mean_probability = 0.f;
+
+ FILE* pcm_file =
+ fopen(test::ResourcePath("audio_processing/transient/audio16kHz", "pcm")
+ .c_str(),
+ "rb");
+ ASSERT_TRUE(pcm_file != nullptr);
+ // The silences in the file are skipped to get a more robust voice probability
+ // for speech.
+ ASSERT_EQ(fseek(pcm_file, kStartTimeSec * kSampleRateHz * sizeof(data[0]),
+ SEEK_SET),
+ 0);
+
+ size_t num_chunks = 0;
+ while (fread(&data[0], sizeof(data[0]), data.size(), pcm_file) ==
+ data.size()) {
+ vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
+
+ mean_probability += vad.last_voice_probability();
+
+ ++num_chunks;
+ }
+
+ mean_probability /= num_chunks;
+
+ EXPECT_GT(mean_probability, kMeanSpeechProbability);
+}
+
+TEST(VoiceActivityDetectorTest, Speech32kHzHasHighVoiceProbabilities) {
+ const int kSampleRateHz = 32000;
+ const int kLength10Ms = kSampleRateHz / 100;
+
+ VoiceActivityDetector vad;
+
+ std::vector<int16_t> data(kLength10Ms);
+ float mean_probability = 0.f;
+
+ FILE* pcm_file =
+ fopen(test::ResourcePath("audio_processing/transient/audio32kHz", "pcm")
+ .c_str(),
+ "rb");
+ ASSERT_TRUE(pcm_file != nullptr);
+ // The silences in the file are skipped to get a more robust voice probability
+ // for speech.
+ ASSERT_EQ(fseek(pcm_file, kStartTimeSec * kSampleRateHz * sizeof(data[0]),
+ SEEK_SET),
+ 0);
+
+ size_t num_chunks = 0;
+ while (fread(&data[0], sizeof(data[0]), data.size(), pcm_file) ==
+ data.size()) {
+ vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
+
+ mean_probability += vad.last_voice_probability();
+
+ ++num_chunks;
+ }
+
+ mean_probability /= num_chunks;
+
+ EXPECT_GT(mean_probability, kMeanSpeechProbability);
+}
+
+TEST(VoiceActivityDetectorTest, Noise16kHzHasLowVoiceProbabilities) {
+ VoiceActivityDetector vad;
+
+ std::vector<int16_t> data(kLength10Ms);
+ float max_probability = 0.f;
+
+ std::srand(42);
+
+ for (size_t i = 0; i < kNumChunks; ++i) {
+ GenerateNoise(&data);
+
+ vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
+
+ // Before the |vad has enough data to process an ISAC block it will return
+ // the default value, 1.f, which would ruin the `max_probability` value.
+ if (i > kNumChunksPerIsacBlock) {
+ max_probability = std::max(max_probability, vad.last_voice_probability());
+ }
+ }
+
+ EXPECT_LT(max_probability, kMaxNoiseProbability);
+}
+
+TEST(VoiceActivityDetectorTest, Noise32kHzHasLowVoiceProbabilities) {
+ VoiceActivityDetector vad;
+
+ std::vector<int16_t> data(2 * kLength10Ms);
+ float max_probability = 0.f;
+
+ std::srand(42);
+
+ for (size_t i = 0; i < kNumChunks; ++i) {
+ GenerateNoise(&data);
+
+ vad.ProcessChunk(&data[0], data.size(), 2 * kSampleRateHz);
+
+ // Before the |vad has enough data to process an ISAC block it will return
+ // the default value, 1.f, which would ruin the `max_probability` value.
+ if (i > kNumChunksPerIsacBlock) {
+ max_probability = std::max(max_probability, vad.last_voice_probability());
+ }
+ }
+
+ EXPECT_LT(max_probability, kMaxNoiseProbability);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_processing/vad/voice_gmm_tables.h b/third_party/libwebrtc/modules/audio_processing/vad/voice_gmm_tables.h
new file mode 100644
index 0000000000..ef4ad7e21e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_processing/vad/voice_gmm_tables.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// GMM tables for active segments. Generated by MakeGmmTables.m.
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VOICE_GMM_TABLES_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VOICE_GMM_TABLES_H_
+
+static const int kVoiceGmmNumMixtures = 12;
+static const int kVoiceGmmDim = 3;
+
+static const double
+ kVoiceGmmCovarInverse[kVoiceGmmNumMixtures][kVoiceGmmDim][kVoiceGmmDim] = {
+ {{1.83673825579513e+00, -8.09791637570095e-04, 4.60106414365986e-03},
+ {-8.09791637570095e-04, 8.89351738394608e-04, -9.80188953277734e-04},
+ {4.60106414365986e-03, -9.80188953277734e-04, 1.38706060206582e-03}},
+ {{6.76228912850703e+01, -1.98893120119660e-02, -3.53548357253551e-03},
+ {-1.98893120119660e-02, 3.96216858500530e-05, -4.08492938394097e-05},
+ {-3.53548357253551e-03, -4.08492938394097e-05, 9.31864352856416e-04}},
+ {{9.98612435944558e+00, -5.27880954316893e-03, -6.30342541619017e-03},
+ {-5.27880954316893e-03, 4.54359480225226e-05, 6.30804591626044e-05},
+ {-6.30342541619017e-03, 6.30804591626044e-05, 5.36466441382942e-04}},
+ {{3.39917474216349e+01, -1.56213579433191e-03, -4.01459014990225e-02},
+ {-1.56213579433191e-03, 6.40415424897724e-05, 6.20076342427833e-05},
+ {-4.01459014990225e-02, 6.20076342427833e-05, 3.51199070103063e-03}},
+ {{1.34545062271428e+01, -7.94513610147144e-03, -5.34401019341728e-02},
+ {-7.94513610147144e-03, 1.16511820098649e-04, 4.66063702069293e-05},
+ {-5.34401019341728e-02, 4.66063702069293e-05, 2.72354323774163e-03}},
+ {{1.08557844314806e+02, -1.54885805673668e-02, -1.88029692674851e-02},
+ {-1.54885805673668e-02, 1.16404042786406e-04, 6.45579292702802e-06},
+ {-1.88029692674851e-02, 6.45579292702802e-06, 4.32330478391416e-04}},
+ {{8.22940066541450e+01, -1.15903110231303e-02, -4.92166764865343e-02},
+ {-1.15903110231303e-02, 7.42510742165261e-05, 3.73007314191290e-06},
+ {-4.92166764865343e-02, 3.73007314191290e-06, 3.64005221593244e-03}},
+ {{2.31133605685660e+00, -7.83261568950254e-04, 7.45744012346313e-04},
+ {-7.83261568950254e-04, 1.29460648214142e-05, -2.22774455093730e-06},
+ {7.45744012346313e-04, -2.22774455093730e-06, 1.05117294093010e-04}},
+ {{3.78767849189611e+02, 1.57759761011568e-03, -2.08551217988774e-02},
+ {1.57759761011568e-03, 4.76066236886865e-05, -2.33977412299324e-05},
+ {-2.08551217988774e-02, -2.33977412299324e-05, 5.24261005371196e-04}},
+ {{6.98580096506135e-01, -5.13850255217378e-04, -4.01124551717056e-04},
+ {-5.13850255217378e-04, 1.40501021984840e-06, -2.09496928716569e-06},
+ {-4.01124551717056e-04, -2.09496928716569e-06, 2.82879357740037e-04}},
+ {{2.62770945162399e+00, -2.31825753241430e-03, -5.30447217466318e-03},
+ {-2.31825753241430e-03, 4.59108572227649e-05, 7.67631886355405e-05},
+ {-5.30447217466318e-03, 7.67631886355405e-05, 2.28521601674098e-03}},
+ {{1.89940391362152e+02, -4.23280856852379e-03, -2.70608873541399e-02},
+ {-4.23280856852379e-03, 6.77547582742563e-05, 2.69154203800467e-05},
+ {-2.70608873541399e-02, 2.69154203800467e-05, 3.88574543373470e-03}}};
+
+static const double kVoiceGmmMean[kVoiceGmmNumMixtures][kVoiceGmmDim] = {
+ {-2.15020241646536e+00, 4.97079062999877e+02, 4.77078119504505e+02},
+ {-8.92097680029190e-01, 5.92064964199921e+02, 1.81045145941059e+02},
+ {-1.29435784144398e+00, 4.98450293410611e+02, 1.71991263804064e+02},
+ {-1.03925228397884e+00, 4.99511274321571e+02, 1.05838336539105e+02},
+ {-1.29229047206129e+00, 4.15026762566707e+02, 1.12861119017125e+02},
+ {-7.88748114599810e-01, 4.48739336688113e+02, 1.89784216956337e+02},
+ {-8.77777402332642e-01, 4.86620285054533e+02, 1.13477708016491e+02},
+ {-2.06465957063057e+00, 6.33385049870607e+02, 2.32758546796149e+02},
+ {-6.98893789231685e-01, 5.93622051503385e+02, 1.92536982473203e+02},
+ {-2.55901217508894e+00, 1.55914919756205e+03, 1.39769980835570e+02},
+ {-1.92070024165837e+00, 4.87983940444185e+02, 1.02745468128289e+02},
+ {-7.29187507662854e-01, 5.22717685022855e+02, 1.16377942283991e+02}};
+
+static const double kVoiceGmmWeights[kVoiceGmmNumMixtures] = {
+ -1.39789694361035e+01, -1.19527720202104e+01, -1.32396317929055e+01,
+ -1.09436815209238e+01, -1.13440027478149e+01, -1.12200721834504e+01,
+ -1.02537324043693e+01, -1.60789861938302e+01, -1.03394494048344e+01,
+ -1.83207938586818e+01, -1.31186044948288e+01, -9.52479998673554e+00};
+#endif // MODULES_AUDIO_PROCESSING_VAD_VOICE_GMM_TABLES_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/BUILD.gn b/third_party/libwebrtc/modules/congestion_controller/BUILD.gn
new file mode 100644
index 0000000000..774fc84d67
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/BUILD.gn
@@ -0,0 +1,73 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+config("bwe_test_logging") {
+ if (rtc_enable_bwe_test_logging) {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1" ]
+ } else {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0" ]
+ }
+}
+
+rtc_library("congestion_controller") {
+ visibility = [ "*" ]
+ configs += [ ":bwe_test_logging" ]
+ sources = [
+ "include/receive_side_congestion_controller.h",
+ "receive_side_congestion_controller.cc",
+ "remb_throttler.cc",
+ "remb_throttler.h",
+ ]
+
+ deps = [
+ "../../api/transport:field_trial_based_config",
+ "../../api/transport:network_control",
+ "../../api/units:data_rate",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base/synchronization:mutex",
+ "../pacing",
+ "../remote_bitrate_estimator",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+
+ if (!build_with_mozilla) {
+ deps += [ "../../rtc_base" ]
+ }
+}
+
+if (rtc_include_tests && !build_with_chromium) {
+ rtc_library("congestion_controller_unittests") {
+ testonly = true
+
+ sources = [
+ "receive_side_congestion_controller_unittest.cc",
+ "remb_throttler_unittest.cc",
+ ]
+ deps = [
+ ":congestion_controller",
+ "../../api/test/network_emulation",
+ "../../api/test/network_emulation:create_cross_traffic",
+ "../../api/units:data_rate",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../system_wrappers",
+ "../../test:test_support",
+ "../../test/scenario",
+ "../pacing",
+ "goog_cc:estimators",
+ "goog_cc:goog_cc_unittests",
+ "pcc:pcc_unittests",
+ "rtp:congestion_controller_unittests",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/congestion_controller/DEPS b/third_party/libwebrtc/modules/congestion_controller/DEPS
new file mode 100644
index 0000000000..2ed9952e22
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "+logging/rtc_event_log",
+ "+system_wrappers",
+ "+video",
+]
diff --git a/third_party/libwebrtc/modules/congestion_controller/OWNERS b/third_party/libwebrtc/modules/congestion_controller/OWNERS
new file mode 100644
index 0000000000..c74790f876
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/OWNERS
@@ -0,0 +1,6 @@
+stefan@webrtc.org
+terelius@webrtc.org
+philipel@webrtc.org
+mflodman@webrtc.org
+yinwa@webrtc.org
+perkj@webrtc.org
diff --git a/third_party/libwebrtc/modules/congestion_controller/congestion_controller_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/congestion_controller_gn/moz.build
new file mode 100644
index 0000000000..17ab56bb0e
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/congestion_controller_gn/moz.build
@@ -0,0 +1,216 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/remb_throttler.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("congestion_controller_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/BUILD.gn b/third_party/libwebrtc/modules/congestion_controller/goog_cc/BUILD.gn
new file mode 100644
index 0000000000..ff95ef7f9a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/BUILD.gn
@@ -0,0 +1,367 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+config("bwe_test_logging") {
+ if (rtc_enable_bwe_test_logging) {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1" ]
+ } else {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0" ]
+ }
+}
+
+rtc_library("goog_cc") {
+ configs += [ ":bwe_test_logging" ]
+ sources = [
+ "goog_cc_network_control.cc",
+ "goog_cc_network_control.h",
+ ]
+
+ deps = [
+ ":alr_detector",
+ ":delay_based_bwe",
+ ":estimators",
+ ":probe_controller",
+ ":pushback_controller",
+ ":send_side_bwe",
+ "../../../api:field_trials_view",
+ "../../../api:network_state_predictor_api",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:field_trial_based_config",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../logging:rtc_event_bwe",
+ "../../../logging:rtc_event_pacing",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base/experiments:alr_experiment",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../rtc_base/experiments:rate_control_settings",
+ "../../../system_wrappers",
+ "../../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("link_capacity_estimator") {
+ sources = [
+ "link_capacity_estimator.cc",
+ "link_capacity_estimator.h",
+ ]
+ deps = [
+ "../../../api/units:data_rate",
+ "../../../rtc_base:safe_minmax",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("pushback_controller") {
+ sources = [
+ "congestion_window_pushback_controller.cc",
+ "congestion_window_pushback_controller.h",
+ ]
+ deps = [
+ "../../../api:field_trials_view",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_size",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/experiments:rate_control_settings",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("alr_detector") {
+ sources = [
+ "alr_detector.cc",
+ "alr_detector.h",
+ ]
+ deps = [
+ "../../../api:field_trials_view",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:field_trial_based_config",
+ "../../../logging:rtc_event_pacing",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base:timeutils",
+ "../../../rtc_base/experiments:alr_experiment",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../pacing:interval_budget",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+rtc_library("estimators") {
+ configs += [ ":bwe_test_logging" ]
+ sources = [
+ "acknowledged_bitrate_estimator.cc",
+ "acknowledged_bitrate_estimator.h",
+ "acknowledged_bitrate_estimator_interface.cc",
+ "acknowledged_bitrate_estimator_interface.h",
+ "bitrate_estimator.cc",
+ "bitrate_estimator.h",
+ "delay_increase_detector_interface.h",
+ "probe_bitrate_estimator.cc",
+ "probe_bitrate_estimator.h",
+ "robust_throughput_estimator.cc",
+ "robust_throughput_estimator.h",
+ "trendline_estimator.cc",
+ "trendline_estimator.h",
+ ]
+
+ deps = [
+ "../../../api:field_trials_view",
+ "../../../api:network_state_predictor_api",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../logging:rtc_event_bwe",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:rtc_numerics",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("loss_based_bwe_v2") {
+ sources = [
+ "loss_based_bwe_v2.cc",
+ "loss_based_bwe_v2.h",
+ ]
+ deps = [
+ "../../../api:array_view",
+ "../../../api:field_trials_view",
+ "../../../api:network_state_predictor_api",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:logging",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("loss_based_bwe_v1") {
+ configs += [ ":bwe_test_logging" ]
+ sources = [
+ "loss_based_bandwidth_estimation.cc",
+ "loss_based_bandwidth_estimation.h",
+ ]
+ deps = [
+ "../../../api:field_trials_view",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/experiments:field_trial_parser",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+rtc_library("send_side_bwe") {
+ configs += [ ":bwe_test_logging" ]
+ sources = [
+ "send_side_bandwidth_estimation.cc",
+ "send_side_bandwidth_estimation.h",
+ ]
+ deps = [
+ ":loss_based_bwe_v1",
+ ":loss_based_bwe_v2",
+ "../../../api:field_trials_view",
+ "../../../api:network_state_predictor_api",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../logging:rtc_event_bwe",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../system_wrappers:field_trial",
+ "../../../system_wrappers:metrics",
+ "../../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("delay_based_bwe") {
+ configs += [ ":bwe_test_logging" ]
+ sources = [
+ "delay_based_bwe.cc",
+ "delay_based_bwe.h",
+ "inter_arrival_delta.cc",
+ "inter_arrival_delta.h",
+ ]
+
+ deps = [
+ ":estimators",
+ "../../../api:field_trials_view",
+ "../../../api:network_state_predictor_api",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:network_control",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../logging:rtc_event_bwe",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:race_checker",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../system_wrappers:metrics",
+ "../../pacing",
+ "../../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("probe_controller") {
+ sources = [
+ "probe_controller.cc",
+ "probe_controller.h",
+ ]
+
+ deps = [
+ "../../../api:field_trials_view",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../logging:rtc_event_bwe",
+ "../../../logging:rtc_event_pacing",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../system_wrappers:metrics",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("test_goog_cc_printer") {
+ testonly = true
+ sources = [
+ "test/goog_cc_printer.cc",
+ "test/goog_cc_printer.h",
+ ]
+ deps = [
+ ":alr_detector",
+ ":delay_based_bwe",
+ ":estimators",
+ ":goog_cc",
+ "../../../api/rtc_event_log",
+ "../../../api/transport:goog_cc",
+ "../../../api/transport:network_control",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:checks",
+ "../../../test/logging:log_writer",
+ "../../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+ if (!build_with_chromium) {
+ rtc_library("goog_cc_unittests") {
+ testonly = true
+
+ sources = [
+ "acknowledged_bitrate_estimator_unittest.cc",
+ "alr_detector_unittest.cc",
+ "congestion_window_pushback_controller_unittest.cc",
+ "delay_based_bwe_unittest.cc",
+ "delay_based_bwe_unittest_helper.cc",
+ "delay_based_bwe_unittest_helper.h",
+ "goog_cc_network_control_unittest.cc",
+ "loss_based_bwe_v2_test.cc",
+ "probe_bitrate_estimator_unittest.cc",
+ "probe_controller_unittest.cc",
+ "robust_throughput_estimator_unittest.cc",
+ "send_side_bandwidth_estimation_unittest.cc",
+ "trendline_estimator_unittest.cc",
+ ]
+ deps = [
+ ":alr_detector",
+ ":delay_based_bwe",
+ ":estimators",
+ ":goog_cc",
+ ":loss_based_bwe_v2",
+ ":probe_controller",
+ ":pushback_controller",
+ ":send_side_bwe",
+ "../../../api:field_trials_view",
+ "../../../api:network_state_predictor_api",
+ "../../../api/rtc_event_log",
+ "../../../api/test/network_emulation",
+ "../../../api/test/network_emulation:create_cross_traffic",
+ "../../../api/transport:field_trial_based_config",
+ "../../../api/transport:goog_cc",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../logging:mocks",
+ "../../../logging:rtc_event_bwe",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:random",
+ "../../../rtc_base:rtc_base_tests_utils",
+ "../../../rtc_base:stringutils",
+ "../../../rtc_base/experiments:alr_experiment",
+ "../../../system_wrappers",
+ "../../../test:explicit_key_value_config",
+ "../../../test:field_trial",
+ "../../../test:test_support",
+ "../../../test/scenario",
+ "../../pacing",
+ "//testing/gmock",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings:strings" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.cc
new file mode 100644
index 0000000000..08b42a8168
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+AcknowledgedBitrateEstimator::AcknowledgedBitrateEstimator(
+ const FieldTrialsView* key_value_config)
+ : AcknowledgedBitrateEstimator(
+ key_value_config,
+ std::make_unique<BitrateEstimator>(key_value_config)) {}
+
+AcknowledgedBitrateEstimator::~AcknowledgedBitrateEstimator() {}
+
+AcknowledgedBitrateEstimator::AcknowledgedBitrateEstimator(
+ const FieldTrialsView* key_value_config,
+ std::unique_ptr<BitrateEstimator> bitrate_estimator)
+ : in_alr_(false), bitrate_estimator_(std::move(bitrate_estimator)) {}
+
+void AcknowledgedBitrateEstimator::IncomingPacketFeedbackVector(
+ const std::vector<PacketResult>& packet_feedback_vector) {
+ RTC_DCHECK(std::is_sorted(packet_feedback_vector.begin(),
+ packet_feedback_vector.end(),
+ PacketResult::ReceiveTimeOrder()));
+ for (const auto& packet : packet_feedback_vector) {
+ if (alr_ended_time_ && packet.sent_packet.send_time > *alr_ended_time_) {
+ bitrate_estimator_->ExpectFastRateChange();
+ alr_ended_time_.reset();
+ }
+ DataSize acknowledged_estimate = packet.sent_packet.size;
+ acknowledged_estimate += packet.sent_packet.prior_unacked_data;
+ bitrate_estimator_->Update(packet.receive_time, acknowledged_estimate,
+ in_alr_);
+ }
+}
+
+absl::optional<DataRate> AcknowledgedBitrateEstimator::bitrate() const {
+ return bitrate_estimator_->bitrate();
+}
+
+absl::optional<DataRate> AcknowledgedBitrateEstimator::PeekRate() const {
+ return bitrate_estimator_->PeekRate();
+}
+
+void AcknowledgedBitrateEstimator::SetAlrEndedTime(Timestamp alr_ended_time) {
+ alr_ended_time_.emplace(alr_ended_time);
+}
+
+void AcknowledgedBitrateEstimator::SetAlr(bool in_alr) {
+ in_alr_ = in_alr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h
new file mode 100644
index 0000000000..d10846ab3a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_ACKNOWLEDGED_BITRATE_ESTIMATOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_ACKNOWLEDGED_BITRATE_ESTIMATOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h"
+#include "modules/congestion_controller/goog_cc/bitrate_estimator.h"
+
+namespace webrtc {
+
+class AcknowledgedBitrateEstimator
+ : public AcknowledgedBitrateEstimatorInterface {
+ public:
+ AcknowledgedBitrateEstimator(
+ const FieldTrialsView* key_value_config,
+ std::unique_ptr<BitrateEstimator> bitrate_estimator);
+
+ explicit AcknowledgedBitrateEstimator(
+ const FieldTrialsView* key_value_config);
+ ~AcknowledgedBitrateEstimator() override;
+
+ void IncomingPacketFeedbackVector(
+ const std::vector<PacketResult>& packet_feedback_vector) override;
+ absl::optional<DataRate> bitrate() const override;
+ absl::optional<DataRate> PeekRate() const override;
+ void SetAlr(bool in_alr) override;
+ void SetAlrEndedTime(Timestamp alr_ended_time) override;
+
+ private:
+ absl::optional<Timestamp> alr_ended_time_;
+ bool in_alr_;
+ std::unique_ptr<BitrateEstimator> bitrate_estimator_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_ACKNOWLEDGED_BITRATE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.cc
new file mode 100644
index 0000000000..c043353a7a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h"
+
+#include <algorithm>
+
+#include "api/units/time_delta.h"
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h"
+#include "modules/congestion_controller/goog_cc/robust_throughput_estimator.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+constexpr char RobustThroughputEstimatorSettings::kKey[];
+
+RobustThroughputEstimatorSettings::RobustThroughputEstimatorSettings(
+ const FieldTrialsView* key_value_config) {
+ Parser()->Parse(
+ key_value_config->Lookup(RobustThroughputEstimatorSettings::kKey));
+ if (window_packets < 10 || 1000 < window_packets) {
+ RTC_LOG(LS_WARNING) << "Window size must be between 10 and 1000 packets";
+ window_packets = 20;
+ }
+ if (max_window_packets < 10 || 1000 < max_window_packets) {
+ RTC_LOG(LS_WARNING)
+ << "Max window size must be between 10 and 1000 packets";
+ max_window_packets = 500;
+ }
+ max_window_packets = std::max(max_window_packets, window_packets);
+
+ if (required_packets < 10 || 1000 < required_packets) {
+ RTC_LOG(LS_WARNING) << "Required number of initial packets must be between "
+ "10 and 1000 packets";
+ required_packets = 10;
+ }
+ required_packets = std::min(required_packets, window_packets);
+
+ if (min_window_duration < TimeDelta::Millis(100) ||
+ TimeDelta::Millis(3000) < min_window_duration) {
+ RTC_LOG(LS_WARNING) << "Window duration must be between 100 and 3000 ms";
+ min_window_duration = TimeDelta::Millis(750);
+ }
+ if (max_window_duration < TimeDelta::Seconds(1) ||
+ TimeDelta::Seconds(15) < max_window_duration) {
+ RTC_LOG(LS_WARNING) << "Max window duration must be between 1 and 15 s";
+ max_window_duration = TimeDelta::Seconds(5);
+ }
+ min_window_duration = std::min(min_window_duration, max_window_duration);
+
+ if (unacked_weight < 0.0 || 1.0 < unacked_weight) {
+ RTC_LOG(LS_WARNING)
+ << "Weight for prior unacked size must be between 0 and 1.";
+ unacked_weight = 1.0;
+ }
+}
+
+std::unique_ptr<StructParametersParser>
+RobustThroughputEstimatorSettings::Parser() {
+ return StructParametersParser::Create(
+ "enabled", &enabled, //
+ "window_packets", &window_packets, //
+ "max_window_packets", &max_window_packets, //
+ "window_duration", &min_window_duration, //
+ "max_window_duration", &max_window_duration, //
+ "required_packets", &required_packets, //
+ "unacked_weight", &unacked_weight);
+}
+
+AcknowledgedBitrateEstimatorInterface::
+ ~AcknowledgedBitrateEstimatorInterface() {}
+
+std::unique_ptr<AcknowledgedBitrateEstimatorInterface>
+AcknowledgedBitrateEstimatorInterface::Create(
+ const FieldTrialsView* key_value_config) {
+ RobustThroughputEstimatorSettings simplified_estimator_settings(
+ key_value_config);
+ if (simplified_estimator_settings.enabled) {
+ return std::make_unique<RobustThroughputEstimator>(
+ simplified_estimator_settings);
+ }
+ return std::make_unique<AcknowledgedBitrateEstimator>(key_value_config);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h
new file mode 100644
index 0000000000..515af1efc9
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_ACKNOWLEDGED_BITRATE_ESTIMATOR_INTERFACE_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_ACKNOWLEDGED_BITRATE_ESTIMATOR_INTERFACE_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+
+namespace webrtc {
+
+struct RobustThroughputEstimatorSettings {
+ static constexpr char kKey[] = "WebRTC-Bwe-RobustThroughputEstimatorSettings";
+
+ RobustThroughputEstimatorSettings() = delete;
+ explicit RobustThroughputEstimatorSettings(
+ const FieldTrialsView* key_value_config);
+
+ bool enabled = false; // Set to true to use RobustThroughputEstimator.
+
+ // The estimator keeps the smallest window containing at least
+ // `window_packets` and at least the packets received during the last
+ // `min_window_duration` milliseconds.
+ // (This means that it may store more than `window_packets` at high bitrates,
+ // and a longer duration than `min_window_duration` at low bitrates.)
+ // However, if will never store more than kMaxPackets (for performance
+ // reasons), and never longer than max_window_duration (to avoid very old
+ // packets influencing the estimate for example when sending is paused).
+ unsigned window_packets = 20;
+ unsigned max_window_packets = 500;
+ TimeDelta min_window_duration = TimeDelta::Seconds(1);
+ TimeDelta max_window_duration = TimeDelta::Seconds(5);
+
+ // The estimator window requires at least `required_packets` packets
+ // to produce an estimate.
+ unsigned required_packets = 10;
+
+ // If audio packets aren't included in allocation (i.e. the
+ // estimated available bandwidth is divided only among the video
+ // streams), then `unacked_weight` should be set to 0.
+ // If audio packets are included in allocation, but not in bandwidth
+ // estimation (i.e. they don't have transport-wide sequence numbers,
+ // but we nevertheless divide the estimated available bandwidth among
+ // both audio and video streams), then `unacked_weight` should be set to 1.
+ // If all packets have transport-wide sequence numbers, then the value
+ // of `unacked_weight` doesn't matter.
+ double unacked_weight = 1.0;
+
+ std::unique_ptr<StructParametersParser> Parser();
+};
+
+class AcknowledgedBitrateEstimatorInterface {
+ public:
+ static std::unique_ptr<AcknowledgedBitrateEstimatorInterface> Create(
+ const FieldTrialsView* key_value_config);
+ virtual ~AcknowledgedBitrateEstimatorInterface();
+
+ virtual void IncomingPacketFeedbackVector(
+ const std::vector<PacketResult>& packet_feedback_vector) = 0;
+ virtual absl::optional<DataRate> bitrate() const = 0;
+ virtual absl::optional<DataRate> PeekRate() const = 0;
+ virtual void SetAlr(bool in_alr) = 0;
+ virtual void SetAlrEndedTime(Timestamp alr_ended_time) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_ACKNOWLEDGED_BITRATE_ESTIMATOR_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc
new file mode 100644
index 0000000000..e5b733b119
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/transport/field_trial_based_config.h"
+#include "rtc_base/fake_clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+namespace webrtc {
+
+namespace {
+
+constexpr int64_t kFirstArrivalTimeMs = 10;
+constexpr int64_t kFirstSendTimeMs = 10;
+constexpr uint16_t kSequenceNumber = 1;
+constexpr size_t kPayloadSize = 10;
+
+class MockBitrateEstimator : public BitrateEstimator {
+ public:
+ using BitrateEstimator::BitrateEstimator;
+ MOCK_METHOD(void,
+ Update,
+ (Timestamp at_time, DataSize data_size, bool in_alr),
+ (override));
+ MOCK_METHOD(absl::optional<DataRate>, bitrate, (), (const, override));
+ MOCK_METHOD(void, ExpectFastRateChange, (), (override));
+};
+
+struct AcknowledgedBitrateEstimatorTestStates {
+ FieldTrialBasedConfig field_trial_config;
+ std::unique_ptr<AcknowledgedBitrateEstimator> acknowledged_bitrate_estimator;
+ MockBitrateEstimator* mock_bitrate_estimator;
+};
+
+AcknowledgedBitrateEstimatorTestStates CreateTestStates() {
+ AcknowledgedBitrateEstimatorTestStates states;
+ auto mock_bitrate_estimator =
+ std::make_unique<MockBitrateEstimator>(&states.field_trial_config);
+ states.mock_bitrate_estimator = mock_bitrate_estimator.get();
+ states.acknowledged_bitrate_estimator =
+ std::make_unique<AcknowledgedBitrateEstimator>(
+ &states.field_trial_config, std::move(mock_bitrate_estimator));
+ return states;
+}
+
+std::vector<PacketResult> CreateFeedbackVector() {
+ std::vector<PacketResult> packet_feedback_vector(2);
+ packet_feedback_vector[0].receive_time =
+ Timestamp::Millis(kFirstArrivalTimeMs);
+ packet_feedback_vector[0].sent_packet.send_time =
+ Timestamp::Millis(kFirstSendTimeMs);
+ packet_feedback_vector[0].sent_packet.sequence_number = kSequenceNumber;
+ packet_feedback_vector[0].sent_packet.size = DataSize::Bytes(kPayloadSize);
+ packet_feedback_vector[1].receive_time =
+ Timestamp::Millis(kFirstArrivalTimeMs + 10);
+ packet_feedback_vector[1].sent_packet.send_time =
+ Timestamp::Millis(kFirstSendTimeMs + 10);
+ packet_feedback_vector[1].sent_packet.sequence_number = kSequenceNumber;
+ packet_feedback_vector[1].sent_packet.size =
+ DataSize::Bytes(kPayloadSize + 10);
+ return packet_feedback_vector;
+}
+
+} // anonymous namespace
+
+TEST(TestAcknowledgedBitrateEstimator, UpdateBandwidth) {
+ auto states = CreateTestStates();
+ auto packet_feedback_vector = CreateFeedbackVector();
+ {
+ InSequence dummy;
+ EXPECT_CALL(*states.mock_bitrate_estimator,
+ Update(packet_feedback_vector[0].receive_time,
+ packet_feedback_vector[0].sent_packet.size,
+ /*in_alr*/ false))
+ .Times(1);
+ EXPECT_CALL(*states.mock_bitrate_estimator,
+ Update(packet_feedback_vector[1].receive_time,
+ packet_feedback_vector[1].sent_packet.size,
+ /*in_alr*/ false))
+ .Times(1);
+ }
+ states.acknowledged_bitrate_estimator->IncomingPacketFeedbackVector(
+ packet_feedback_vector);
+}
+
+TEST(TestAcknowledgedBitrateEstimator, ExpectFastRateChangeWhenLeftAlr) {
+ auto states = CreateTestStates();
+ auto packet_feedback_vector = CreateFeedbackVector();
+ {
+ InSequence dummy;
+ EXPECT_CALL(*states.mock_bitrate_estimator,
+ Update(packet_feedback_vector[0].receive_time,
+ packet_feedback_vector[0].sent_packet.size,
+ /*in_alr*/ false))
+ .Times(1);
+ EXPECT_CALL(*states.mock_bitrate_estimator, ExpectFastRateChange())
+ .Times(1);
+ EXPECT_CALL(*states.mock_bitrate_estimator,
+ Update(packet_feedback_vector[1].receive_time,
+ packet_feedback_vector[1].sent_packet.size,
+ /*in_alr*/ false))
+ .Times(1);
+ }
+ states.acknowledged_bitrate_estimator->SetAlrEndedTime(
+ Timestamp::Millis(kFirstArrivalTimeMs + 1));
+ states.acknowledged_bitrate_estimator->IncomingPacketFeedbackVector(
+ packet_feedback_vector);
+}
+
+TEST(TestAcknowledgedBitrateEstimator, ReturnBitrate) {
+ auto states = CreateTestStates();
+ absl::optional<DataRate> return_value = DataRate::KilobitsPerSec(42);
+ EXPECT_CALL(*states.mock_bitrate_estimator, bitrate())
+ .Times(1)
+ .WillOnce(Return(return_value));
+ EXPECT_EQ(return_value, states.acknowledged_bitrate_estimator->bitrate());
+}
+
+} // namespace webrtc*/
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.cc
new file mode 100644
index 0000000000..f1e649b7cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/alr_detector.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+
+#include "api/rtc_event_log/rtc_event.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_alr_state.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+AlrDetectorConfig GetConfigFromTrials(const FieldTrialsView* key_value_config) {
+ RTC_CHECK(AlrExperimentSettings::MaxOneFieldTrialEnabled(*key_value_config));
+ absl::optional<AlrExperimentSettings> experiment_settings =
+ AlrExperimentSettings::CreateFromFieldTrial(
+ *key_value_config,
+ AlrExperimentSettings::kScreenshareProbingBweExperimentName);
+ if (!experiment_settings) {
+ experiment_settings = AlrExperimentSettings::CreateFromFieldTrial(
+ *key_value_config,
+ AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
+ }
+ AlrDetectorConfig conf;
+ if (experiment_settings) {
+ conf.bandwidth_usage_ratio =
+ experiment_settings->alr_bandwidth_usage_percent / 100.0;
+ conf.start_budget_level_ratio =
+ experiment_settings->alr_start_budget_level_percent / 100.0;
+ conf.stop_budget_level_ratio =
+ experiment_settings->alr_stop_budget_level_percent / 100.0;
+ }
+ conf.Parser()->Parse(
+ key_value_config->Lookup("WebRTC-AlrDetectorParameters"));
+ return conf;
+}
+} // namespace
+
+std::unique_ptr<StructParametersParser> AlrDetectorConfig::Parser() {
+ return StructParametersParser::Create( //
+ "bw_usage", &bandwidth_usage_ratio, //
+ "start", &start_budget_level_ratio, //
+ "stop", &stop_budget_level_ratio);
+}
+
+AlrDetector::AlrDetector(AlrDetectorConfig config, RtcEventLog* event_log)
+ : conf_(config), alr_budget_(0, true), event_log_(event_log) {}
+
+AlrDetector::AlrDetector(const FieldTrialsView* key_value_config)
+ : AlrDetector(GetConfigFromTrials(key_value_config), nullptr) {}
+
+AlrDetector::AlrDetector(const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log)
+ : AlrDetector(GetConfigFromTrials(key_value_config), event_log) {}
+AlrDetector::~AlrDetector() {}
+
+void AlrDetector::OnBytesSent(size_t bytes_sent, int64_t send_time_ms) {
+ if (!last_send_time_ms_.has_value()) {
+ last_send_time_ms_ = send_time_ms;
+ // Since the duration for sending the bytes is unknwon, return without
+ // updating alr state.
+ return;
+ }
+ int64_t delta_time_ms = send_time_ms - *last_send_time_ms_;
+ last_send_time_ms_ = send_time_ms;
+
+ alr_budget_.UseBudget(bytes_sent);
+ alr_budget_.IncreaseBudget(delta_time_ms);
+ bool state_changed = false;
+ if (alr_budget_.budget_ratio() > conf_.start_budget_level_ratio &&
+ !alr_started_time_ms_) {
+ alr_started_time_ms_.emplace(rtc::TimeMillis());
+ state_changed = true;
+ } else if (alr_budget_.budget_ratio() < conf_.stop_budget_level_ratio &&
+ alr_started_time_ms_) {
+ state_changed = true;
+ alr_started_time_ms_.reset();
+ }
+ if (event_log_ && state_changed) {
+ event_log_->Log(
+ std::make_unique<RtcEventAlrState>(alr_started_time_ms_.has_value()));
+ }
+}
+
+void AlrDetector::SetEstimatedBitrate(int bitrate_bps) {
+ RTC_DCHECK(bitrate_bps);
+ int target_rate_kbps =
+ static_cast<double>(bitrate_bps) * conf_.bandwidth_usage_ratio / 1000;
+ alr_budget_.set_target_rate_kbps(target_rate_kbps);
+}
+
+absl::optional<int64_t> AlrDetector::GetApplicationLimitedRegionStartTime()
+ const {
+ return alr_started_time_ms_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.h
new file mode 100644
index 0000000000..5e7a3e1075
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_ALR_DETECTOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_ALR_DETECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "modules/pacing/interval_budget.h"
+#include "rtc_base/experiments/alr_experiment.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+
+namespace webrtc {
+
+class RtcEventLog;
+
+struct AlrDetectorConfig {
+ // Sent traffic ratio as a function of network capacity used to determine
+ // application-limited region. ALR region start when bandwidth usage drops
+ // below kAlrStartUsageRatio and ends when it raises above
+ // kAlrEndUsageRatio. NOTE: This is intentionally conservative at the moment
+ // until BW adjustments of application limited region is fine tuned.
+ double bandwidth_usage_ratio = 0.65;
+ double start_budget_level_ratio = 0.80;
+ double stop_budget_level_ratio = 0.50;
+ std::unique_ptr<StructParametersParser> Parser();
+};
+// Application limited region detector is a class that utilizes signals of
+// elapsed time and bytes sent to estimate whether network traffic is
+// currently limited by the application's ability to generate traffic.
+//
+// AlrDetector provides a signal that can be utilized to adjust
+// estimate bandwidth.
+// Note: This class is not thread-safe.
+class AlrDetector {
+ public:
+ AlrDetector(AlrDetectorConfig config, RtcEventLog* event_log);
+ explicit AlrDetector(const FieldTrialsView* key_value_config);
+ AlrDetector(const FieldTrialsView* key_value_config, RtcEventLog* event_log);
+ ~AlrDetector();
+
+ void OnBytesSent(size_t bytes_sent, int64_t send_time_ms);
+
+ // Set current estimated bandwidth.
+ void SetEstimatedBitrate(int bitrate_bps);
+
+ // Returns time in milliseconds when the current application-limited region
+ // started or empty result if the sender is currently not application-limited.
+ absl::optional<int64_t> GetApplicationLimitedRegionStartTime() const;
+
+ private:
+ friend class GoogCcStatePrinter;
+ const AlrDetectorConfig conf_;
+
+ absl::optional<int64_t> last_send_time_ms_;
+
+ IntervalBudget alr_budget_;
+ absl::optional<int64_t> alr_started_time_ms_;
+
+ RtcEventLog* event_log_;
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_ALR_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_gn/moz.build
new file mode 100644
index 0000000000..622fd6f57c
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("alr_detector_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_unittest.cc
new file mode 100644
index 0000000000..eac19d0081
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/alr_detector_unittest.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/alr_detector.h"
+
+#include "api/transport/field_trial_based_config.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/alr_experiment.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace {
+
+constexpr int kEstimatedBitrateBps = 300000;
+
+} // namespace
+
+namespace webrtc {
+namespace {
+class SimulateOutgoingTrafficIn {
+ public:
+ explicit SimulateOutgoingTrafficIn(AlrDetector* alr_detector,
+ int64_t* timestamp_ms)
+ : alr_detector_(alr_detector), timestamp_ms_(timestamp_ms) {
+ RTC_CHECK(alr_detector_);
+ }
+
+ SimulateOutgoingTrafficIn& ForTimeMs(int time_ms) {
+ interval_ms_ = time_ms;
+ ProduceTraffic();
+ return *this;
+ }
+
+ SimulateOutgoingTrafficIn& AtPercentOfEstimatedBitrate(int usage_percentage) {
+ usage_percentage_.emplace(usage_percentage);
+ ProduceTraffic();
+ return *this;
+ }
+
+ private:
+ void ProduceTraffic() {
+ if (!interval_ms_ || !usage_percentage_)
+ return;
+ const int kTimeStepMs = 10;
+ for (int t = 0; t < *interval_ms_; t += kTimeStepMs) {
+ *timestamp_ms_ += kTimeStepMs;
+ alr_detector_->OnBytesSent(kEstimatedBitrateBps * *usage_percentage_ *
+ kTimeStepMs / (8 * 100 * 1000),
+ *timestamp_ms_);
+ }
+ int remainder_ms = *interval_ms_ % kTimeStepMs;
+ if (remainder_ms > 0) {
+ *timestamp_ms_ += kTimeStepMs;
+ alr_detector_->OnBytesSent(kEstimatedBitrateBps * *usage_percentage_ *
+ remainder_ms / (8 * 100 * 1000),
+ *timestamp_ms_);
+ }
+ }
+ AlrDetector* const alr_detector_;
+ int64_t* timestamp_ms_;
+ absl::optional<int> interval_ms_;
+ absl::optional<int> usage_percentage_;
+};
+} // namespace
+
+TEST(AlrDetectorTest, AlrDetection) {
+ FieldTrialBasedConfig field_trials;
+ int64_t timestamp_ms = 1000;
+ AlrDetector alr_detector(&field_trials);
+ alr_detector.SetEstimatedBitrate(kEstimatedBitrateBps);
+
+ // Start in non-ALR state.
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // Stay in non-ALR state when usage is close to 100%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(1000)
+ .AtPercentOfEstimatedBitrate(90);
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // Verify that we ALR starts when bitrate drops below 20%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(1500)
+ .AtPercentOfEstimatedBitrate(20);
+ EXPECT_TRUE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // Verify that ALR ends when usage is above 65%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(4000)
+ .AtPercentOfEstimatedBitrate(100);
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+}
+
+TEST(AlrDetectorTest, ShortSpike) {
+ FieldTrialBasedConfig field_trials;
+ int64_t timestamp_ms = 1000;
+ AlrDetector alr_detector(&field_trials);
+ alr_detector.SetEstimatedBitrate(kEstimatedBitrateBps);
+ // Start in non-ALR state.
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // Verify that we ALR starts when bitrate drops below 20%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(1000)
+ .AtPercentOfEstimatedBitrate(20);
+ EXPECT_TRUE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // Verify that we stay in ALR region even after a short bitrate spike.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(100)
+ .AtPercentOfEstimatedBitrate(150);
+ EXPECT_TRUE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // ALR ends when usage is above 65%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(3000)
+ .AtPercentOfEstimatedBitrate(100);
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+}
+
+TEST(AlrDetectorTest, BandwidthEstimateChanges) {
+ FieldTrialBasedConfig field_trials;
+ int64_t timestamp_ms = 1000;
+ AlrDetector alr_detector(&field_trials);
+ alr_detector.SetEstimatedBitrate(kEstimatedBitrateBps);
+
+ // Start in non-ALR state.
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // ALR starts when bitrate drops below 20%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(1000)
+ .AtPercentOfEstimatedBitrate(20);
+ EXPECT_TRUE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // When bandwidth estimate drops the detector should stay in ALR mode and quit
+ // it shortly afterwards as the sender continues sending the same amount of
+ // traffic. This is necessary to ensure that ProbeController can still react
+ // to the BWE drop by initiating a new probe.
+ alr_detector.SetEstimatedBitrate(kEstimatedBitrateBps / 5);
+ EXPECT_TRUE(alr_detector.GetApplicationLimitedRegionStartTime());
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(1000)
+ .AtPercentOfEstimatedBitrate(50);
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+}
+
+TEST(AlrDetectorTest, ParseControlFieldTrial) {
+ webrtc::test::ScopedFieldTrials scoped_field_trial(
+ "WebRTC-ProbingScreenshareBwe/Control/");
+ absl::optional<AlrExperimentSettings> parsed_params =
+ AlrExperimentSettings::CreateFromFieldTrial(
+ FieldTrialBasedConfig(), "WebRTC-ProbingScreenshareBwe");
+ EXPECT_FALSE(static_cast<bool>(parsed_params));
+}
+
+TEST(AlrDetectorTest, ParseActiveFieldTrial) {
+ webrtc::test::ScopedFieldTrials scoped_field_trial(
+ "WebRTC-ProbingScreenshareBwe/1.1,2875,85,20,-20,1/");
+ absl::optional<AlrExperimentSettings> parsed_params =
+ AlrExperimentSettings::CreateFromFieldTrial(
+ FieldTrialBasedConfig(), "WebRTC-ProbingScreenshareBwe");
+ ASSERT_TRUE(static_cast<bool>(parsed_params));
+ EXPECT_EQ(1.1f, parsed_params->pacing_factor);
+ EXPECT_EQ(2875, parsed_params->max_paced_queue_time);
+ EXPECT_EQ(85, parsed_params->alr_bandwidth_usage_percent);
+ EXPECT_EQ(20, parsed_params->alr_start_budget_level_percent);
+ EXPECT_EQ(-20, parsed_params->alr_stop_budget_level_percent);
+ EXPECT_EQ(1, parsed_params->group_id);
+}
+
+TEST(AlrDetectorTest, ParseAlrSpecificFieldTrial) {
+ webrtc::test::ScopedFieldTrials scoped_field_trial(
+ "WebRTC-AlrDetectorParameters/"
+ "bw_usage:90%,start:0%,stop:-10%/");
+ FieldTrialBasedConfig field_trials;
+ AlrDetector alr_detector(&field_trials);
+ int64_t timestamp_ms = 1000;
+ alr_detector.SetEstimatedBitrate(kEstimatedBitrateBps);
+
+ // Start in non-ALR state.
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // ALR does not start at 100% utilization.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(1000)
+ .AtPercentOfEstimatedBitrate(100);
+ EXPECT_FALSE(alr_detector.GetApplicationLimitedRegionStartTime());
+
+ // ALR does start at 85% utilization.
+ // Overused 10% above so it should take about 2s to reach a budget level of
+ // 0%.
+ SimulateOutgoingTrafficIn(&alr_detector, &timestamp_ms)
+ .ForTimeMs(2100)
+ .AtPercentOfEstimatedBitrate(85);
+ EXPECT_TRUE(alr_detector.GetApplicationLimitedRegionStartTime());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.cc
new file mode 100644
index 0000000000..9c68e48886
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/bitrate_estimator.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <string>
+
+#include "api/units/data_rate.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kInitialRateWindowMs = 500;
+constexpr int kRateWindowMs = 150;
+constexpr int kMinRateWindowMs = 150;
+constexpr int kMaxRateWindowMs = 1000;
+
+const char kBweThroughputWindowConfig[] = "WebRTC-BweThroughputWindowConfig";
+
+} // namespace
+
+BitrateEstimator::BitrateEstimator(const FieldTrialsView* key_value_config)
+ : sum_(0),
+ initial_window_ms_("initial_window_ms",
+ kInitialRateWindowMs,
+ kMinRateWindowMs,
+ kMaxRateWindowMs),
+ noninitial_window_ms_("window_ms",
+ kRateWindowMs,
+ kMinRateWindowMs,
+ kMaxRateWindowMs),
+ uncertainty_scale_("scale", 10.0),
+ uncertainty_scale_in_alr_("scale_alr", uncertainty_scale_),
+ small_sample_uncertainty_scale_("scale_small", uncertainty_scale_),
+ small_sample_threshold_("small_thresh", DataSize::Zero()),
+ uncertainty_symmetry_cap_("symmetry_cap", DataRate::Zero()),
+ estimate_floor_("floor", DataRate::Zero()),
+ current_window_ms_(0),
+ prev_time_ms_(-1),
+ bitrate_estimate_kbps_(-1.0f),
+ bitrate_estimate_var_(50.0f) {
+ // E.g WebRTC-BweThroughputWindowConfig/initial_window_ms:350,window_ms:250/
+ ParseFieldTrial(
+ {&initial_window_ms_, &noninitial_window_ms_, &uncertainty_scale_,
+ &uncertainty_scale_in_alr_, &small_sample_uncertainty_scale_,
+ &small_sample_threshold_, &uncertainty_symmetry_cap_, &estimate_floor_},
+ key_value_config->Lookup(kBweThroughputWindowConfig));
+}
+
+BitrateEstimator::~BitrateEstimator() = default;
+
+void BitrateEstimator::Update(Timestamp at_time, DataSize amount, bool in_alr) {
+ int rate_window_ms = noninitial_window_ms_.Get();
+ // We use a larger window at the beginning to get a more stable sample that
+ // we can use to initialize the estimate.
+ if (bitrate_estimate_kbps_ < 0.f)
+ rate_window_ms = initial_window_ms_.Get();
+ bool is_small_sample = false;
+ float bitrate_sample_kbps = UpdateWindow(at_time.ms(), amount.bytes(),
+ rate_window_ms, &is_small_sample);
+ if (bitrate_sample_kbps < 0.0f)
+ return;
+ if (bitrate_estimate_kbps_ < 0.0f) {
+ // This is the very first sample we get. Use it to initialize the estimate.
+ bitrate_estimate_kbps_ = bitrate_sample_kbps;
+ return;
+ }
+ // Optionally use higher uncertainty for very small samples to avoid dropping
+ // estimate and for samples obtained in ALR.
+ float scale = uncertainty_scale_;
+ if (is_small_sample && bitrate_sample_kbps < bitrate_estimate_kbps_) {
+ scale = small_sample_uncertainty_scale_;
+ } else if (in_alr && bitrate_sample_kbps < bitrate_estimate_kbps_) {
+ // Optionally use higher uncertainty for samples obtained during ALR.
+ scale = uncertainty_scale_in_alr_;
+ }
+ // Define the sample uncertainty as a function of how far away it is from the
+ // current estimate. With low values of uncertainty_symmetry_cap_ we add more
+ // uncertainty to increases than to decreases. For higher values we approach
+ // symmetry.
+ float sample_uncertainty =
+ scale * std::abs(bitrate_estimate_kbps_ - bitrate_sample_kbps) /
+ (bitrate_estimate_kbps_ +
+ std::min(bitrate_sample_kbps,
+ uncertainty_symmetry_cap_.Get().kbps<float>()));
+
+ float sample_var = sample_uncertainty * sample_uncertainty;
+ // Update a bayesian estimate of the rate, weighting it lower if the sample
+ // uncertainty is large.
+ // The bitrate estimate uncertainty is increased with each update to model
+ // that the bitrate changes over time.
+ float pred_bitrate_estimate_var = bitrate_estimate_var_ + 5.f;
+ bitrate_estimate_kbps_ = (sample_var * bitrate_estimate_kbps_ +
+ pred_bitrate_estimate_var * bitrate_sample_kbps) /
+ (sample_var + pred_bitrate_estimate_var);
+ bitrate_estimate_kbps_ =
+ std::max(bitrate_estimate_kbps_, estimate_floor_.Get().kbps<float>());
+ bitrate_estimate_var_ = sample_var * pred_bitrate_estimate_var /
+ (sample_var + pred_bitrate_estimate_var);
+ BWE_TEST_LOGGING_PLOT(1, "acknowledged_bitrate", at_time.ms(),
+ bitrate_estimate_kbps_ * 1000);
+}
+
+float BitrateEstimator::UpdateWindow(int64_t now_ms,
+ int bytes,
+ int rate_window_ms,
+ bool* is_small_sample) {
+ RTC_DCHECK(is_small_sample != nullptr);
+ // Reset if time moves backwards.
+ if (now_ms < prev_time_ms_) {
+ prev_time_ms_ = -1;
+ sum_ = 0;
+ current_window_ms_ = 0;
+ }
+ if (prev_time_ms_ >= 0) {
+ current_window_ms_ += now_ms - prev_time_ms_;
+ // Reset if nothing has been received for more than a full window.
+ if (now_ms - prev_time_ms_ > rate_window_ms) {
+ sum_ = 0;
+ current_window_ms_ %= rate_window_ms;
+ }
+ }
+ prev_time_ms_ = now_ms;
+ float bitrate_sample = -1.0f;
+ if (current_window_ms_ >= rate_window_ms) {
+ *is_small_sample = sum_ < small_sample_threshold_->bytes();
+ bitrate_sample = 8.0f * sum_ / static_cast<float>(rate_window_ms);
+ current_window_ms_ -= rate_window_ms;
+ sum_ = 0;
+ }
+ sum_ += bytes;
+ return bitrate_sample;
+}
+
+absl::optional<DataRate> BitrateEstimator::bitrate() const {
+ if (bitrate_estimate_kbps_ < 0.f)
+ return absl::nullopt;
+ return DataRate::KilobitsPerSec(bitrate_estimate_kbps_);
+}
+
+absl::optional<DataRate> BitrateEstimator::PeekRate() const {
+ if (current_window_ms_ > 0)
+ return DataSize::Bytes(sum_) / TimeDelta::Millis(current_window_ms_);
+ return absl::nullopt;
+}
+
+void BitrateEstimator::ExpectFastRateChange() {
+ // By setting the bitrate-estimate variance to a higher value we allow the
+ // bitrate to change fast for the next few samples.
+ bitrate_estimate_var_ += 200;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.h
new file mode 100644
index 0000000000..a6f985800e
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_BITRATE_ESTIMATOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_BITRATE_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/data_rate.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+
+// Computes a bayesian estimate of the throughput given acks containing
+// the arrival time and payload size. Samples which are far from the current
+// estimate or are based on few packets are given a smaller weight, as they
+// are considered to be more likely to have been caused by, e.g., delay spikes
+// unrelated to congestion.
+class BitrateEstimator {
+ public:
+ explicit BitrateEstimator(const FieldTrialsView* key_value_config);
+ virtual ~BitrateEstimator();
+ virtual void Update(Timestamp at_time, DataSize amount, bool in_alr);
+
+ virtual absl::optional<DataRate> bitrate() const;
+ absl::optional<DataRate> PeekRate() const;
+
+ virtual void ExpectFastRateChange();
+
+ private:
+ float UpdateWindow(int64_t now_ms,
+ int bytes,
+ int rate_window_ms,
+ bool* is_small_sample);
+ int sum_;
+ FieldTrialConstrained<int> initial_window_ms_;
+ FieldTrialConstrained<int> noninitial_window_ms_;
+ FieldTrialParameter<double> uncertainty_scale_;
+ FieldTrialParameter<double> uncertainty_scale_in_alr_;
+ FieldTrialParameter<double> small_sample_uncertainty_scale_;
+ FieldTrialParameter<DataSize> small_sample_threshold_;
+ FieldTrialParameter<DataRate> uncertainty_symmetry_cap_;
+ FieldTrialParameter<DataRate> estimate_floor_;
+ int64_t current_window_ms_;
+ int64_t prev_time_ms_;
+ float bitrate_estimate_kbps_;
+ float bitrate_estimate_var_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_BITRATE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.cc
new file mode 100644
index 0000000000..2f188f30ca
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+
+namespace webrtc {
+
+CongestionWindowPushbackController::CongestionWindowPushbackController(
+ const FieldTrialsView* key_value_config)
+ : add_pacing_(
+ absl::StartsWith(key_value_config->Lookup(
+ "WebRTC-AddPacingToCongestionWindowPushback"),
+ "Enabled")),
+ min_pushback_target_bitrate_bps_(
+ RateControlSettings::ParseFromKeyValueConfig(key_value_config)
+ .CongestionWindowMinPushbackTargetBitrateBps()),
+ current_data_window_(
+ RateControlSettings::ParseFromKeyValueConfig(key_value_config)
+ .CongestionWindowInitialDataWindow()) {}
+
+void CongestionWindowPushbackController::UpdateOutstandingData(
+ int64_t outstanding_bytes) {
+ outstanding_bytes_ = outstanding_bytes;
+}
+void CongestionWindowPushbackController::UpdatePacingQueue(
+ int64_t pacing_bytes) {
+ pacing_bytes_ = pacing_bytes;
+}
+
+void CongestionWindowPushbackController::SetDataWindow(DataSize data_window) {
+ current_data_window_ = data_window;
+}
+
+uint32_t CongestionWindowPushbackController::UpdateTargetBitrate(
+ uint32_t bitrate_bps) {
+ if (!current_data_window_ || current_data_window_->IsZero())
+ return bitrate_bps;
+ int64_t total_bytes = outstanding_bytes_;
+ if (add_pacing_)
+ total_bytes += pacing_bytes_;
+ double fill_ratio =
+ total_bytes / static_cast<double>(current_data_window_->bytes());
+ if (fill_ratio > 1.5) {
+ encoding_rate_ratio_ *= 0.9;
+ } else if (fill_ratio > 1) {
+ encoding_rate_ratio_ *= 0.95;
+ } else if (fill_ratio < 0.1) {
+ encoding_rate_ratio_ = 1.0;
+ } else {
+ encoding_rate_ratio_ *= 1.05;
+ encoding_rate_ratio_ = std::min(encoding_rate_ratio_, 1.0);
+ }
+ uint32_t adjusted_target_bitrate_bps =
+ static_cast<uint32_t>(bitrate_bps * encoding_rate_ratio_);
+
+ // Do not adjust below the minimum pushback bitrate but do obey if the
+ // original estimate is below it.
+ bitrate_bps = adjusted_target_bitrate_bps < min_pushback_target_bitrate_bps_
+ ? std::min(bitrate_bps, min_pushback_target_bitrate_bps_)
+ : adjusted_target_bitrate_bps;
+ return bitrate_bps;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h
new file mode 100644
index 0000000000..ea9ed97c3d
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_CONGESTION_WINDOW_PUSHBACK_CONTROLLER_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_CONGESTION_WINDOW_PUSHBACK_CONTROLLER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/data_size.h"
+
+namespace webrtc {
+
+// This class enables pushback from congestion window directly to video encoder.
+// When the congestion window is filling up, the video encoder target bitrate
+// will be reduced accordingly to accommodate the network changes. To avoid
+// pausing video too frequently, a minimum encoder target bitrate threshold is
+// used to prevent video pause due to a full congestion window.
+class CongestionWindowPushbackController {
+ public:
+ explicit CongestionWindowPushbackController(
+ const FieldTrialsView* key_value_config);
+ void UpdateOutstandingData(int64_t outstanding_bytes);
+ void UpdatePacingQueue(int64_t pacing_bytes);
+ uint32_t UpdateTargetBitrate(uint32_t bitrate_bps);
+ void SetDataWindow(DataSize data_window);
+
+ private:
+ const bool add_pacing_;
+ const uint32_t min_pushback_target_bitrate_bps_;
+ absl::optional<DataSize> current_data_window_;
+ int64_t outstanding_bytes_ = 0;
+ int64_t pacing_bytes_ = 0;
+ double encoding_rate_ratio_ = 1.0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_CONGESTION_WINDOW_PUSHBACK_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller_unittest.cc
new file mode 100644
index 0000000000..62dde02323
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h"
+
+#include <memory>
+
+#include "api/transport/field_trial_based_config.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+
+namespace webrtc {
+namespace test {
+
+class CongestionWindowPushbackControllerTest : public ::testing::Test {
+ public:
+ CongestionWindowPushbackControllerTest() {
+ cwnd_controller_.reset(
+ new CongestionWindowPushbackController(&field_trial_config_));
+ }
+
+ protected:
+ FieldTrialBasedConfig field_trial_config_;
+
+ std::unique_ptr<CongestionWindowPushbackController> cwnd_controller_;
+};
+
+TEST_F(CongestionWindowPushbackControllerTest, FullCongestionWindow) {
+ cwnd_controller_->UpdateOutstandingData(100000);
+ cwnd_controller_->SetDataWindow(DataSize::Bytes(50000));
+
+ uint32_t bitrate_bps = 80000;
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_EQ(72000u, bitrate_bps);
+
+ cwnd_controller_->SetDataWindow(DataSize::Bytes(50000));
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_EQ(static_cast<uint32_t>(72000 * 0.9 * 0.9), bitrate_bps);
+}
+
+TEST_F(CongestionWindowPushbackControllerTest, NormalCongestionWindow) {
+ cwnd_controller_->UpdateOutstandingData(199999);
+ cwnd_controller_->SetDataWindow(DataSize::Bytes(200000));
+
+ uint32_t bitrate_bps = 80000;
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_EQ(80000u, bitrate_bps);
+}
+
+TEST_F(CongestionWindowPushbackControllerTest, LowBitrate) {
+ cwnd_controller_->UpdateOutstandingData(100000);
+ cwnd_controller_->SetDataWindow(DataSize::Bytes(50000));
+
+ uint32_t bitrate_bps = 35000;
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_EQ(static_cast<uint32_t>(35000 * 0.9), bitrate_bps);
+
+ cwnd_controller_->SetDataWindow(DataSize::Bytes(20000));
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_EQ(30000u, bitrate_bps);
+}
+
+TEST_F(CongestionWindowPushbackControllerTest, NoPushbackOnDataWindowUnset) {
+ cwnd_controller_->UpdateOutstandingData(1e8); // Large number
+
+ uint32_t bitrate_bps = 80000;
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_EQ(80000u, bitrate_bps);
+}
+
+TEST_F(CongestionWindowPushbackControllerTest, PushbackOnInititialDataWindow) {
+ test::ScopedFieldTrials trials("WebRTC-CongestionWindow/InitWin:100000/");
+ cwnd_controller_.reset(
+ new CongestionWindowPushbackController(&field_trial_config_));
+ cwnd_controller_->UpdateOutstandingData(1e8); // Large number
+
+ uint32_t bitrate_bps = 80000;
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_GT(80000u, bitrate_bps);
+}
+
+TEST_F(CongestionWindowPushbackControllerTest, PushbackDropFrame) {
+ test::ScopedFieldTrials trials("WebRTC-CongestionWindow/DropFrame:true/");
+ cwnd_controller_.reset(
+ new CongestionWindowPushbackController(&field_trial_config_));
+ cwnd_controller_->UpdateOutstandingData(1e8); // Large number
+ cwnd_controller_->SetDataWindow(DataSize::Bytes(50000));
+
+ uint32_t bitrate_bps = 80000;
+ bitrate_bps = cwnd_controller_->UpdateTargetBitrate(bitrate_bps);
+ EXPECT_GT(80000u, bitrate_bps);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.cc
new file mode 100644
index 0000000000..07ac599148
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.cc
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "api/rtc_event_log/rtc_event.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/units/time_delta.h"
+#include "logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h"
+#include "modules/congestion_controller/goog_cc/trendline_estimator.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+constexpr TimeDelta kStreamTimeOut = TimeDelta::Seconds(2);
+constexpr TimeDelta kSendTimeGroupLength = TimeDelta::Millis(5);
+
+// This ssrc is used to fulfill the current API but will be removed
+// after the API has been changed.
+constexpr uint32_t kFixedSsrc = 0;
+} // namespace
+
+constexpr char BweSeparateAudioPacketsSettings::kKey[];
+
+BweSeparateAudioPacketsSettings::BweSeparateAudioPacketsSettings(
+ const FieldTrialsView* key_value_config) {
+ Parser()->Parse(
+ key_value_config->Lookup(BweSeparateAudioPacketsSettings::kKey));
+}
+
+std::unique_ptr<StructParametersParser>
+BweSeparateAudioPacketsSettings::Parser() {
+ return StructParametersParser::Create( //
+ "enabled", &enabled, //
+ "packet_threshold", &packet_threshold, //
+ "time_threshold", &time_threshold);
+}
+
+DelayBasedBwe::Result::Result()
+ : updated(false),
+ probe(false),
+ target_bitrate(DataRate::Zero()),
+ recovered_from_overuse(false) {}
+
+DelayBasedBwe::DelayBasedBwe(const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log,
+ NetworkStatePredictor* network_state_predictor)
+ : event_log_(event_log),
+ key_value_config_(key_value_config),
+ separate_audio_(key_value_config),
+ audio_packets_since_last_video_(0),
+ last_video_packet_recv_time_(Timestamp::MinusInfinity()),
+ network_state_predictor_(network_state_predictor),
+ video_delay_detector_(
+ new TrendlineEstimator(key_value_config_, network_state_predictor_)),
+ audio_delay_detector_(
+ new TrendlineEstimator(key_value_config_, network_state_predictor_)),
+ active_delay_detector_(video_delay_detector_.get()),
+ last_seen_packet_(Timestamp::MinusInfinity()),
+ uma_recorded_(false),
+ rate_control_(key_value_config, /*send_side=*/true),
+ prev_bitrate_(DataRate::Zero()),
+ prev_state_(BandwidthUsage::kBwNormal) {
+ RTC_LOG(LS_INFO)
+ << "Initialized DelayBasedBwe with separate audio overuse detection"
+ << separate_audio_.Parser()->Encode();
+}
+
+DelayBasedBwe::~DelayBasedBwe() {}
+
+DelayBasedBwe::Result DelayBasedBwe::IncomingPacketFeedbackVector(
+ const TransportPacketsFeedback& msg,
+ absl::optional<DataRate> acked_bitrate,
+ absl::optional<DataRate> probe_bitrate,
+ absl::optional<NetworkStateEstimate> network_estimate,
+ bool in_alr) {
+ RTC_DCHECK_RUNS_SERIALIZED(&network_race_);
+
+ auto packet_feedback_vector = msg.SortedByReceiveTime();
+ // TODO(holmer): An empty feedback vector here likely means that
+ // all acks were too late and that the send time history had
+ // timed out. We should reduce the rate when this occurs.
+ if (packet_feedback_vector.empty()) {
+ RTC_LOG(LS_WARNING) << "Very late feedback received.";
+ return DelayBasedBwe::Result();
+ }
+
+ if (!uma_recorded_) {
+ RTC_HISTOGRAM_ENUMERATION(kBweTypeHistogram,
+ BweNames::kSendSideTransportSeqNum,
+ BweNames::kBweNamesMax);
+ uma_recorded_ = true;
+ }
+ bool delayed_feedback = true;
+ bool recovered_from_overuse = false;
+ BandwidthUsage prev_detector_state = active_delay_detector_->State();
+ for (const auto& packet_feedback : packet_feedback_vector) {
+ delayed_feedback = false;
+ IncomingPacketFeedback(packet_feedback, msg.feedback_time);
+ if (prev_detector_state == BandwidthUsage::kBwUnderusing &&
+ active_delay_detector_->State() == BandwidthUsage::kBwNormal) {
+ recovered_from_overuse = true;
+ }
+ prev_detector_state = active_delay_detector_->State();
+ }
+
+ if (delayed_feedback) {
+ // TODO(bugs.webrtc.org/10125): Design a better mechanism to safe-guard
+ // against building very large network queues.
+ return Result();
+ }
+ rate_control_.SetInApplicationLimitedRegion(in_alr);
+ rate_control_.SetNetworkStateEstimate(network_estimate);
+ return MaybeUpdateEstimate(acked_bitrate, probe_bitrate,
+ std::move(network_estimate),
+ recovered_from_overuse, in_alr, msg.feedback_time);
+}
+
+void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback,
+ Timestamp at_time) {
+ // Reset if the stream has timed out.
+ if (last_seen_packet_.IsInfinite() ||
+ at_time - last_seen_packet_ > kStreamTimeOut) {
+ video_inter_arrival_delta_ =
+ std::make_unique<InterArrivalDelta>(kSendTimeGroupLength);
+ audio_inter_arrival_delta_ =
+ std::make_unique<InterArrivalDelta>(kSendTimeGroupLength);
+
+ video_delay_detector_.reset(
+ new TrendlineEstimator(key_value_config_, network_state_predictor_));
+ audio_delay_detector_.reset(
+ new TrendlineEstimator(key_value_config_, network_state_predictor_));
+ active_delay_detector_ = video_delay_detector_.get();
+ }
+ last_seen_packet_ = at_time;
+
+ // As an alternative to ignoring small packets, we can separate audio and
+ // video packets for overuse detection.
+ DelayIncreaseDetectorInterface* delay_detector_for_packet =
+ video_delay_detector_.get();
+ if (separate_audio_.enabled) {
+ if (packet_feedback.sent_packet.audio) {
+ delay_detector_for_packet = audio_delay_detector_.get();
+ audio_packets_since_last_video_++;
+ if (audio_packets_since_last_video_ > separate_audio_.packet_threshold &&
+ packet_feedback.receive_time - last_video_packet_recv_time_ >
+ separate_audio_.time_threshold) {
+ active_delay_detector_ = audio_delay_detector_.get();
+ }
+ } else {
+ audio_packets_since_last_video_ = 0;
+ last_video_packet_recv_time_ =
+ std::max(last_video_packet_recv_time_, packet_feedback.receive_time);
+ active_delay_detector_ = video_delay_detector_.get();
+ }
+ }
+ DataSize packet_size = packet_feedback.sent_packet.size;
+
+ TimeDelta send_delta = TimeDelta::Zero();
+ TimeDelta recv_delta = TimeDelta::Zero();
+ int size_delta = 0;
+
+ InterArrivalDelta* inter_arrival_for_packet =
+ (separate_audio_.enabled && packet_feedback.sent_packet.audio)
+ ? audio_inter_arrival_delta_.get()
+ : video_inter_arrival_delta_.get();
+ bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas(
+ packet_feedback.sent_packet.send_time, packet_feedback.receive_time,
+ at_time, packet_size.bytes(), &send_delta, &recv_delta, &size_delta);
+
+ delay_detector_for_packet->Update(recv_delta.ms<double>(),
+ send_delta.ms<double>(),
+ packet_feedback.sent_packet.send_time.ms(),
+ packet_feedback.receive_time.ms(),
+ packet_size.bytes(), calculated_deltas);
+}
+
+DataRate DelayBasedBwe::TriggerOveruse(Timestamp at_time,
+ absl::optional<DataRate> link_capacity) {
+ RateControlInput input(BandwidthUsage::kBwOverusing, link_capacity);
+ return rate_control_.Update(&input, at_time);
+}
+
+DelayBasedBwe::Result DelayBasedBwe::MaybeUpdateEstimate(
+ absl::optional<DataRate> acked_bitrate,
+ absl::optional<DataRate> probe_bitrate,
+ absl::optional<NetworkStateEstimate> state_estimate,
+ bool recovered_from_overuse,
+ bool in_alr,
+ Timestamp at_time) {
+ Result result;
+
+ // Currently overusing the bandwidth.
+ if (active_delay_detector_->State() == BandwidthUsage::kBwOverusing) {
+ if (acked_bitrate &&
+ rate_control_.TimeToReduceFurther(at_time, *acked_bitrate)) {
+ result.updated =
+ UpdateEstimate(at_time, acked_bitrate, &result.target_bitrate);
+ } else if (!acked_bitrate && rate_control_.ValidEstimate() &&
+ rate_control_.InitialTimeToReduceFurther(at_time)) {
+ // Overusing before we have a measured acknowledged bitrate. Reduce send
+ // rate by 50% every 200 ms.
+ // TODO(tschumim): Improve this and/or the acknowledged bitrate estimator
+ // so that we (almost) always have a bitrate estimate.
+ rate_control_.SetEstimate(rate_control_.LatestEstimate() / 2, at_time);
+ result.updated = true;
+ result.probe = false;
+ result.target_bitrate = rate_control_.LatestEstimate();
+ }
+ } else {
+ if (probe_bitrate) {
+ result.probe = true;
+ result.updated = true;
+ rate_control_.SetEstimate(*probe_bitrate, at_time);
+ result.target_bitrate = rate_control_.LatestEstimate();
+ } else {
+ result.updated =
+ UpdateEstimate(at_time, acked_bitrate, &result.target_bitrate);
+ result.recovered_from_overuse = recovered_from_overuse;
+ }
+ }
+ BandwidthUsage detector_state = active_delay_detector_->State();
+ if ((result.updated && prev_bitrate_ != result.target_bitrate) ||
+ detector_state != prev_state_) {
+ DataRate bitrate = result.updated ? result.target_bitrate : prev_bitrate_;
+
+ BWE_TEST_LOGGING_PLOT(1, "target_bitrate_bps", at_time.ms(), bitrate.bps());
+
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventBweUpdateDelayBased>(
+ bitrate.bps(), detector_state));
+ }
+
+ prev_bitrate_ = bitrate;
+ prev_state_ = detector_state;
+ }
+
+ result.delay_detector_state = detector_state;
+ return result;
+}
+
+bool DelayBasedBwe::UpdateEstimate(Timestamp at_time,
+ absl::optional<DataRate> acked_bitrate,
+ DataRate* target_rate) {
+ const RateControlInput input(active_delay_detector_->State(), acked_bitrate);
+ *target_rate = rate_control_.Update(&input, at_time);
+ return rate_control_.ValidEstimate();
+}
+
+void DelayBasedBwe::OnRttUpdate(TimeDelta avg_rtt) {
+ rate_control_.SetRtt(avg_rtt);
+}
+
+bool DelayBasedBwe::LatestEstimate(std::vector<uint32_t>* ssrcs,
+ DataRate* bitrate) const {
+ // Currently accessed from both the process thread (see
+ // ModuleRtpRtcpImpl::Process()) and the configuration thread (see
+ // Call::GetStats()). Should in the future only be accessed from a single
+ // thread.
+ RTC_DCHECK(ssrcs);
+ RTC_DCHECK(bitrate);
+ if (!rate_control_.ValidEstimate())
+ return false;
+
+ *ssrcs = {kFixedSsrc};
+ *bitrate = rate_control_.LatestEstimate();
+ return true;
+}
+
+void DelayBasedBwe::SetStartBitrate(DataRate start_bitrate) {
+ RTC_LOG(LS_INFO) << "BWE Setting start bitrate to: "
+ << ToString(start_bitrate);
+ rate_control_.SetStartBitrate(start_bitrate);
+}
+
+void DelayBasedBwe::SetMinBitrate(DataRate min_bitrate) {
+ // Called from both the configuration thread and the network thread. Shouldn't
+ // be called from the network thread in the future.
+ rate_control_.SetMinBitrate(min_bitrate);
+}
+
+TimeDelta DelayBasedBwe::GetExpectedBwePeriod() const {
+ return rate_control_.GetExpectedBandwidthPeriod();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.h
new file mode 100644
index 0000000000..21dff35735
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/transport/network_types.h"
+#include "modules/congestion_controller/goog_cc/delay_increase_detector_interface.h"
+#include "modules/congestion_controller/goog_cc/inter_arrival_delta.h"
+#include "modules/congestion_controller/goog_cc/probe_bitrate_estimator.h"
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+#include "modules/remote_bitrate_estimator/inter_arrival.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/race_checker.h"
+
+namespace webrtc {
+class RtcEventLog;
+
+struct BweSeparateAudioPacketsSettings {
+ static constexpr char kKey[] = "WebRTC-Bwe-SeparateAudioPackets";
+
+ BweSeparateAudioPacketsSettings() = default;
+ explicit BweSeparateAudioPacketsSettings(
+ const FieldTrialsView* key_value_config);
+
+ bool enabled = false;
+ int packet_threshold = 10;
+ TimeDelta time_threshold = TimeDelta::Seconds(1);
+
+ std::unique_ptr<StructParametersParser> Parser();
+};
+
+class DelayBasedBwe {
+ public:
+ struct Result {
+ Result();
+ ~Result() = default;
+ bool updated;
+ bool probe;
+ DataRate target_bitrate = DataRate::Zero();
+ bool recovered_from_overuse;
+ BandwidthUsage delay_detector_state;
+ };
+
+ explicit DelayBasedBwe(const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log,
+ NetworkStatePredictor* network_state_predictor);
+
+ DelayBasedBwe() = delete;
+ DelayBasedBwe(const DelayBasedBwe&) = delete;
+ DelayBasedBwe& operator=(const DelayBasedBwe&) = delete;
+
+ virtual ~DelayBasedBwe();
+
+ Result IncomingPacketFeedbackVector(
+ const TransportPacketsFeedback& msg,
+ absl::optional<DataRate> acked_bitrate,
+ absl::optional<DataRate> probe_bitrate,
+ absl::optional<NetworkStateEstimate> network_estimate,
+ bool in_alr);
+ void OnRttUpdate(TimeDelta avg_rtt);
+ bool LatestEstimate(std::vector<uint32_t>* ssrcs, DataRate* bitrate) const;
+ void SetStartBitrate(DataRate start_bitrate);
+ void SetMinBitrate(DataRate min_bitrate);
+ TimeDelta GetExpectedBwePeriod() const;
+ DataRate TriggerOveruse(Timestamp at_time,
+ absl::optional<DataRate> link_capacity);
+ DataRate last_estimate() const { return prev_bitrate_; }
+
+ private:
+ friend class GoogCcStatePrinter;
+ void IncomingPacketFeedback(const PacketResult& packet_feedback,
+ Timestamp at_time);
+ Result MaybeUpdateEstimate(
+ absl::optional<DataRate> acked_bitrate,
+ absl::optional<DataRate> probe_bitrate,
+ absl::optional<NetworkStateEstimate> state_estimate,
+ bool recovered_from_overuse,
+ bool in_alr,
+ Timestamp at_time);
+ // Updates the current remote rate estimate and returns true if a valid
+ // estimate exists.
+ bool UpdateEstimate(Timestamp at_time,
+ absl::optional<DataRate> acked_bitrate,
+ DataRate* target_rate);
+
+ rtc::RaceChecker network_race_;
+ RtcEventLog* const event_log_;
+ const FieldTrialsView* const key_value_config_;
+
+ // Alternatively, run two separate overuse detectors for audio and video,
+ // and fall back to the audio one if we haven't seen a video packet in a
+ // while.
+ BweSeparateAudioPacketsSettings separate_audio_;
+ int64_t audio_packets_since_last_video_;
+ Timestamp last_video_packet_recv_time_;
+
+ NetworkStatePredictor* network_state_predictor_;
+ std::unique_ptr<InterArrival> video_inter_arrival_;
+ std::unique_ptr<InterArrivalDelta> video_inter_arrival_delta_;
+ std::unique_ptr<DelayIncreaseDetectorInterface> video_delay_detector_;
+ std::unique_ptr<InterArrival> audio_inter_arrival_;
+ std::unique_ptr<InterArrivalDelta> audio_inter_arrival_delta_;
+ std::unique_ptr<DelayIncreaseDetectorInterface> audio_delay_detector_;
+ DelayIncreaseDetectorInterface* active_delay_detector_;
+
+ Timestamp last_seen_packet_;
+ bool uma_recorded_;
+ AimdRateControl rate_control_;
+ DataRate prev_bitrate_;
+ BandwidthUsage prev_state_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_gn/moz.build
new file mode 100644
index 0000000000..ac2e6ec0c5
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_gn/moz.build
@@ -0,0 +1,216 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("delay_based_bwe_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc
new file mode 100644
index 0000000000..b7dc6aae47
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
+
+#include <string>
+
+#include "api/transport/network_types.h"
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h"
+#include "modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kNumProbesCluster0 = 5;
+constexpr int kNumProbesCluster1 = 8;
+const PacedPacketInfo kPacingInfo0(0, kNumProbesCluster0, 2000);
+const PacedPacketInfo kPacingInfo1(1, kNumProbesCluster1, 4000);
+constexpr float kTargetUtilizationFraction = 0.95f;
+} // namespace
+
+TEST_F(DelayBasedBweTest, ProbeDetection) {
+ int64_t now_ms = clock_.TimeInMilliseconds();
+
+ // First burst sent at 8 * 1000 / 10 = 800 kbps.
+ for (int i = 0; i < kNumProbesCluster0; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingFeedback(now_ms, now_ms, 1000, kPacingInfo0);
+ }
+ EXPECT_TRUE(bitrate_observer_.updated());
+
+ // Second burst sent at 8 * 1000 / 5 = 1600 kbps.
+ for (int i = 0; i < kNumProbesCluster1; ++i) {
+ clock_.AdvanceTimeMilliseconds(5);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingFeedback(now_ms, now_ms, 1000, kPacingInfo1);
+ }
+
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_GT(bitrate_observer_.latest_bitrate(), 1500000u);
+}
+
+TEST_F(DelayBasedBweTest, ProbeDetectionNonPacedPackets) {
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 10 = 800 kbps, but with every other packet
+ // not being paced which could mess things up.
+ for (int i = 0; i < kNumProbesCluster0; ++i) {
+ clock_.AdvanceTimeMilliseconds(5);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingFeedback(now_ms, now_ms, 1000, kPacingInfo0);
+ // Non-paced packet, arriving 5 ms after.
+ clock_.AdvanceTimeMilliseconds(5);
+ IncomingFeedback(now_ms, now_ms, 100, PacedPacketInfo());
+ }
+
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_GT(bitrate_observer_.latest_bitrate(), 800000u);
+}
+
+TEST_F(DelayBasedBweTest, ProbeDetectionFasterArrival) {
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 10 = 800 kbps.
+ // Arriving at 8 * 1000 / 5 = 1600 kbps.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kNumProbesCluster0; ++i) {
+ clock_.AdvanceTimeMilliseconds(1);
+ send_time_ms += 10;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingFeedback(now_ms, send_time_ms, 1000, kPacingInfo0);
+ }
+
+ EXPECT_FALSE(bitrate_observer_.updated());
+}
+
+TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrival) {
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 5 = 1600 kbps.
+ // Arriving at 8 * 1000 / 7 = 1142 kbps.
+ // Since the receive rate is significantly below the send rate, we expect to
+ // use 95% of the estimated capacity.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kNumProbesCluster1; ++i) {
+ clock_.AdvanceTimeMilliseconds(7);
+ send_time_ms += 5;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingFeedback(now_ms, send_time_ms, 1000, kPacingInfo1);
+ }
+
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_NEAR(bitrate_observer_.latest_bitrate(),
+ kTargetUtilizationFraction * 1140000u, 10000u);
+}
+
+TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) {
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // Burst sent at 8 * 1000 / 1 = 8000 kbps.
+ // Arriving at 8 * 1000 / 2 = 4000 kbps.
+ // Since the receive rate is significantly below the send rate, we expect to
+ // use 95% of the estimated capacity.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kNumProbesCluster1; ++i) {
+ clock_.AdvanceTimeMilliseconds(2);
+ send_time_ms += 1;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingFeedback(now_ms, send_time_ms, 1000, kPacingInfo1);
+ }
+
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_NEAR(bitrate_observer_.latest_bitrate(),
+ kTargetUtilizationFraction * 4000000u, 10000u);
+}
+
+TEST_F(DelayBasedBweTest, GetExpectedBwePeriodMs) {
+ auto default_interval = bitrate_estimator_->GetExpectedBwePeriod();
+ EXPECT_GT(default_interval.ms(), 0);
+ CapacityDropTestHelper(1, true, 333, 0);
+ auto interval = bitrate_estimator_->GetExpectedBwePeriod();
+ EXPECT_GT(interval.ms(), 0);
+ EXPECT_NE(interval.ms(), default_interval.ms());
+}
+
+TEST_F(DelayBasedBweTest, InitialBehavior) {
+ InitialBehaviorTestHelper(730000);
+}
+
+TEST_F(DelayBasedBweTest, RateIncreaseReordering) {
+ RateIncreaseReorderingTestHelper(730000);
+}
+TEST_F(DelayBasedBweTest, RateIncreaseRtpTimestamps) {
+ RateIncreaseRtpTimestampsTestHelper(622);
+}
+
+TEST_F(DelayBasedBweTest, CapacityDropOneStream) {
+ CapacityDropTestHelper(1, false, 300, 0);
+}
+
+TEST_F(DelayBasedBweTest, CapacityDropPosOffsetChange) {
+ CapacityDropTestHelper(1, false, 867, 30000);
+}
+
+TEST_F(DelayBasedBweTest, CapacityDropNegOffsetChange) {
+ CapacityDropTestHelper(1, false, 933, -30000);
+}
+
+TEST_F(DelayBasedBweTest, CapacityDropOneStreamWrap) {
+ CapacityDropTestHelper(1, true, 333, 0);
+}
+
+TEST_F(DelayBasedBweTest, TestTimestampGrouping) {
+ TestTimestampGroupingTestHelper();
+}
+
+TEST_F(DelayBasedBweTest, TestShortTimeoutAndWrap) {
+ // Simulate a client leaving and rejoining the call after 35 seconds. This
+ // will make abs send time wrap, so if streams aren't timed out properly
+ // the next 30 seconds of packets will be out of order.
+ TestWrappingHelper(35);
+}
+
+TEST_F(DelayBasedBweTest, TestLongTimeoutAndWrap) {
+ // Simulate a client leaving and rejoining the call after some multiple of
+ // 64 seconds later. This will cause a zero difference in abs send times due
+ // to the wrap, but a big difference in arrival time, if streams aren't
+ // properly timed out.
+ TestWrappingHelper(10 * 64);
+}
+
+TEST_F(DelayBasedBweTest, TestInitialOveruse) {
+ const DataRate kStartBitrate = DataRate::KilobitsPerSec(300);
+ const DataRate kInitialCapacity = DataRate::KilobitsPerSec(200);
+ const uint32_t kDummySsrc = 0;
+ // High FPS to ensure that we send a lot of packets in a short time.
+ const int kFps = 90;
+
+ stream_generator_->AddStream(new test::RtpStream(kFps, kStartBitrate.bps()));
+ stream_generator_->set_capacity_bps(kInitialCapacity.bps());
+
+ // Needed to initialize the AimdRateControl.
+ bitrate_estimator_->SetStartBitrate(kStartBitrate);
+
+ // Produce 30 frames (in 1/3 second) and give them to the estimator.
+ int64_t bitrate_bps = kStartBitrate.bps();
+ bool seen_overuse = false;
+ for (int i = 0; i < 30; ++i) {
+ bool overuse = GenerateAndProcessFrame(kDummySsrc, bitrate_bps);
+ // The purpose of this test is to ensure that we back down even if we don't
+ // have any acknowledged bitrate estimate yet. Hence, if the test works
+ // as expected, we should not have a measured bitrate yet.
+ EXPECT_FALSE(acknowledged_bitrate_estimator_->bitrate().has_value());
+ if (overuse) {
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_NEAR(bitrate_observer_.latest_bitrate(), kStartBitrate.bps() / 2,
+ 15000);
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ seen_overuse = true;
+ break;
+ } else if (bitrate_observer_.updated()) {
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ bitrate_observer_.Reset();
+ }
+ }
+ EXPECT_TRUE(seen_overuse);
+ EXPECT_NEAR(bitrate_observer_.latest_bitrate(), kStartBitrate.bps() / 2,
+ 15000);
+}
+
+TEST_F(DelayBasedBweTest, TestTimestampPrecisionHandling) {
+ // This test does some basic checks to make sure that timestamps with higher
+ // than millisecond precision are handled properly and do not cause any
+ // problems in the estimator. Specifically, previously reported in
+ // webrtc:14023 and described in more details there, the rounding to the
+ // nearest milliseconds caused discrepancy in the accumulated delay. This lead
+ // to false-positive overuse detection.
+ // Technical details of the test:
+ // Send times(ms): 0.000, 9.725, 20.000, 29.725, 40.000, 49.725, ...
+ // Recv times(ms): 0.500, 10.000, 20.500, 30.000, 40.500, 50.000, ...
+ // Send deltas(ms): 9.750, 10.250, 9.750, 10.250, 9.750, ...
+ // Recv deltas(ms): 9.500, 10.500, 9.500, 10.500, 9.500, ...
+ // There is no delay building up between the send times and the receive times,
+ // therefore this case should never lead to an overuse detection. However, if
+ // the time deltas were accidentally rounded to the nearest milliseconds, then
+ // all the send deltas would be equal to 10ms while some recv deltas would
+ // round up to 11ms which would lead in a false illusion of delay build up.
+ uint32_t last_bitrate = bitrate_observer_.latest_bitrate();
+ for (int i = 0; i < 1000; ++i) {
+ clock_.AdvanceTimeMicroseconds(500);
+ IncomingFeedback(clock_.CurrentTime(),
+ clock_.CurrentTime() - TimeDelta::Micros(500), 1000,
+ PacedPacketInfo());
+ clock_.AdvanceTimeMicroseconds(9500);
+ IncomingFeedback(clock_.CurrentTime(),
+ clock_.CurrentTime() - TimeDelta::Micros(250), 1000,
+ PacedPacketInfo());
+ clock_.AdvanceTimeMicroseconds(10000);
+
+ // The bitrate should never decrease in this test.
+ EXPECT_LE(last_bitrate, bitrate_observer_.latest_bitrate());
+ last_bitrate = bitrate_observer_.latest_bitrate();
+ }
+}
+
+class DelayBasedBweTestWithBackoffTimeoutExperiment : public DelayBasedBweTest {
+ public:
+ DelayBasedBweTestWithBackoffTimeoutExperiment()
+ : DelayBasedBweTest(
+ "WebRTC-BweAimdRateControlConfig/initial_backoff_interval:200ms/") {
+ }
+};
+
+// This test subsumes and improves DelayBasedBweTest.TestInitialOveruse above.
+TEST_F(DelayBasedBweTestWithBackoffTimeoutExperiment, TestInitialOveruse) {
+ const DataRate kStartBitrate = DataRate::KilobitsPerSec(300);
+ const DataRate kInitialCapacity = DataRate::KilobitsPerSec(200);
+ const uint32_t kDummySsrc = 0;
+ // High FPS to ensure that we send a lot of packets in a short time.
+ const int kFps = 90;
+
+ stream_generator_->AddStream(new test::RtpStream(kFps, kStartBitrate.bps()));
+ stream_generator_->set_capacity_bps(kInitialCapacity.bps());
+
+ // Needed to initialize the AimdRateControl.
+ bitrate_estimator_->SetStartBitrate(kStartBitrate);
+
+ // Produce 30 frames (in 1/3 second) and give them to the estimator.
+ int64_t bitrate_bps = kStartBitrate.bps();
+ bool seen_overuse = false;
+ for (int frames = 0; frames < 30 && !seen_overuse; ++frames) {
+ bool overuse = GenerateAndProcessFrame(kDummySsrc, bitrate_bps);
+ // The purpose of this test is to ensure that we back down even if we don't
+ // have any acknowledged bitrate estimate yet. Hence, if the test works
+ // as expected, we should not have a measured bitrate yet.
+ EXPECT_FALSE(acknowledged_bitrate_estimator_->bitrate().has_value());
+ if (overuse) {
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_NEAR(bitrate_observer_.latest_bitrate(), kStartBitrate.bps() / 2,
+ 15000);
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ seen_overuse = true;
+ } else if (bitrate_observer_.updated()) {
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ bitrate_observer_.Reset();
+ }
+ }
+ EXPECT_TRUE(seen_overuse);
+ // Continue generating an additional 15 frames (equivalent to 167 ms) and
+ // verify that we don't back down further.
+ for (int frames = 0; frames < 15 && seen_overuse; ++frames) {
+ bool overuse = GenerateAndProcessFrame(kDummySsrc, bitrate_bps);
+ EXPECT_FALSE(overuse);
+ if (bitrate_observer_.updated()) {
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ EXPECT_GE(bitrate_bps, kStartBitrate.bps() / 2 - 15000);
+ EXPECT_LE(bitrate_bps, kInitialCapacity.bps() + 15000);
+ bitrate_observer_.Reset();
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc
new file mode 100644
index 0000000000..8618a7814e
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+constexpr size_t kMtu = 1200;
+constexpr uint32_t kAcceptedBitrateErrorBps = 50000;
+
+// Number of packets needed before we have a valid estimate.
+constexpr int kNumInitialPackets = 2;
+
+constexpr int kInitialProbingPackets = 5;
+
+namespace test {
+
+void TestBitrateObserver::OnReceiveBitrateChanged(uint32_t bitrate) {
+ latest_bitrate_ = bitrate;
+ updated_ = true;
+}
+
+RtpStream::RtpStream(int fps, int bitrate_bps)
+ : fps_(fps), bitrate_bps_(bitrate_bps), next_rtp_time_(0) {
+ RTC_CHECK_GT(fps_, 0);
+}
+
+// Generates a new frame for this stream. If called too soon after the
+// previous frame, no frame will be generated. The frame is split into
+// packets.
+int64_t RtpStream::GenerateFrame(int64_t time_now_us,
+ std::vector<PacketResult>* packets) {
+ if (time_now_us < next_rtp_time_) {
+ return next_rtp_time_;
+ }
+ RTC_CHECK(packets != NULL);
+ size_t bits_per_frame = (bitrate_bps_ + fps_ / 2) / fps_;
+ size_t n_packets =
+ std::max<size_t>((bits_per_frame + 4 * kMtu) / (8 * kMtu), 1u);
+ size_t payload_size = (bits_per_frame + 4 * n_packets) / (8 * n_packets);
+ for (size_t i = 0; i < n_packets; ++i) {
+ PacketResult packet;
+ packet.sent_packet.send_time =
+ Timestamp::Micros(time_now_us + kSendSideOffsetUs);
+ packet.sent_packet.size = DataSize::Bytes(payload_size);
+ packets->push_back(packet);
+ }
+ next_rtp_time_ = time_now_us + (1000000 + fps_ / 2) / fps_;
+ return next_rtp_time_;
+}
+
+// The send-side time when the next frame can be generated.
+int64_t RtpStream::next_rtp_time() const {
+ return next_rtp_time_;
+}
+
+void RtpStream::set_bitrate_bps(int bitrate_bps) {
+ ASSERT_GE(bitrate_bps, 0);
+ bitrate_bps_ = bitrate_bps;
+}
+
+int RtpStream::bitrate_bps() const {
+ return bitrate_bps_;
+}
+
+bool RtpStream::Compare(const std::unique_ptr<RtpStream>& lhs,
+ const std::unique_ptr<RtpStream>& rhs) {
+ return lhs->next_rtp_time_ < rhs->next_rtp_time_;
+}
+
+StreamGenerator::StreamGenerator(int capacity, int64_t time_now)
+ : capacity_(capacity), prev_arrival_time_us_(time_now) {}
+
+StreamGenerator::~StreamGenerator() = default;
+
+// Add a new stream.
+void StreamGenerator::AddStream(RtpStream* stream) {
+ streams_.push_back(std::unique_ptr<RtpStream>(stream));
+}
+
+// Set the link capacity.
+void StreamGenerator::set_capacity_bps(int capacity_bps) {
+ ASSERT_GT(capacity_bps, 0);
+ capacity_ = capacity_bps;
+}
+
+// Divides `bitrate_bps` among all streams. The allocated bitrate per stream
+// is decided by the current allocation ratios.
+void StreamGenerator::SetBitrateBps(int bitrate_bps) {
+ ASSERT_GE(streams_.size(), 0u);
+ int total_bitrate_before = 0;
+ for (const auto& stream : streams_) {
+ total_bitrate_before += stream->bitrate_bps();
+ }
+ int64_t bitrate_before = 0;
+ int total_bitrate_after = 0;
+ for (const auto& stream : streams_) {
+ bitrate_before += stream->bitrate_bps();
+ int64_t bitrate_after =
+ (bitrate_before * bitrate_bps + total_bitrate_before / 2) /
+ total_bitrate_before;
+ stream->set_bitrate_bps(bitrate_after - total_bitrate_after);
+ total_bitrate_after += stream->bitrate_bps();
+ }
+ ASSERT_EQ(bitrate_before, total_bitrate_before);
+ EXPECT_EQ(total_bitrate_after, bitrate_bps);
+}
+
+// TODO(holmer): Break out the channel simulation part from this class to make
+// it possible to simulate different types of channels.
+int64_t StreamGenerator::GenerateFrame(std::vector<PacketResult>* packets,
+ int64_t time_now_us) {
+ RTC_CHECK(packets != NULL);
+ RTC_CHECK(packets->empty());
+ RTC_CHECK_GT(capacity_, 0);
+ auto it =
+ std::min_element(streams_.begin(), streams_.end(), RtpStream::Compare);
+ (*it)->GenerateFrame(time_now_us, packets);
+ for (PacketResult& packet : *packets) {
+ int capacity_bpus = capacity_ / 1000;
+ int64_t required_network_time_us =
+ (8 * 1000 * packet.sent_packet.size.bytes() + capacity_bpus / 2) /
+ capacity_bpus;
+ prev_arrival_time_us_ =
+ std::max(time_now_us + required_network_time_us,
+ prev_arrival_time_us_ + required_network_time_us);
+ packet.receive_time = Timestamp::Micros(prev_arrival_time_us_);
+ }
+ it = std::min_element(streams_.begin(), streams_.end(), RtpStream::Compare);
+ return std::max((*it)->next_rtp_time(), time_now_us);
+}
+} // namespace test
+
+DelayBasedBweTest::DelayBasedBweTest() : DelayBasedBweTest("") {}
+
+DelayBasedBweTest::DelayBasedBweTest(absl::string_view field_trial_string)
+ : field_trial(
+ std::make_unique<test::ScopedFieldTrials>(field_trial_string)),
+ clock_(100000000),
+ acknowledged_bitrate_estimator_(
+ AcknowledgedBitrateEstimatorInterface::Create(&field_trial_config_)),
+ probe_bitrate_estimator_(new ProbeBitrateEstimator(nullptr)),
+ bitrate_estimator_(
+ new DelayBasedBwe(&field_trial_config_, nullptr, nullptr)),
+ stream_generator_(new test::StreamGenerator(1e6, // Capacity.
+ clock_.TimeInMicroseconds())),
+ arrival_time_offset_ms_(0),
+ first_update_(true) {}
+
+DelayBasedBweTest::~DelayBasedBweTest() {}
+
+void DelayBasedBweTest::AddDefaultStream() {
+ stream_generator_->AddStream(new test::RtpStream(30, 3e5));
+}
+
+const uint32_t DelayBasedBweTest::kDefaultSsrc = 0;
+
+void DelayBasedBweTest::IncomingFeedback(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ size_t payload_size) {
+ IncomingFeedback(arrival_time_ms, send_time_ms, payload_size,
+ PacedPacketInfo());
+}
+
+void DelayBasedBweTest::IncomingFeedback(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ size_t payload_size,
+ const PacedPacketInfo& pacing_info) {
+ RTC_CHECK_GE(arrival_time_ms + arrival_time_offset_ms_, 0);
+ IncomingFeedback(Timestamp::Millis(arrival_time_ms + arrival_time_offset_ms_),
+ Timestamp::Millis(send_time_ms), payload_size, pacing_info);
+}
+
+void DelayBasedBweTest::IncomingFeedback(Timestamp receive_time,
+ Timestamp send_time,
+ size_t payload_size,
+ const PacedPacketInfo& pacing_info) {
+ PacketResult packet;
+ packet.receive_time = receive_time;
+ packet.sent_packet.send_time = send_time;
+ packet.sent_packet.size = DataSize::Bytes(payload_size);
+ packet.sent_packet.pacing_info = pacing_info;
+ if (packet.sent_packet.pacing_info.probe_cluster_id !=
+ PacedPacketInfo::kNotAProbe)
+ probe_bitrate_estimator_->HandleProbeAndEstimateBitrate(packet);
+
+ TransportPacketsFeedback msg;
+ msg.feedback_time = Timestamp::Millis(clock_.TimeInMilliseconds());
+ msg.packet_feedbacks.push_back(packet);
+ acknowledged_bitrate_estimator_->IncomingPacketFeedbackVector(
+ msg.SortedByReceiveTime());
+ DelayBasedBwe::Result result =
+ bitrate_estimator_->IncomingPacketFeedbackVector(
+ msg, acknowledged_bitrate_estimator_->bitrate(),
+ probe_bitrate_estimator_->FetchAndResetLastEstimatedBitrate(),
+ /*network_estimate*/ absl::nullopt, /*in_alr*/ false);
+ if (result.updated) {
+ bitrate_observer_.OnReceiveBitrateChanged(result.target_bitrate.bps());
+ }
+}
+
+// Generates a frame of packets belonging to a stream at a given bitrate and
+// with a given ssrc. The stream is pushed through a very simple simulated
+// network, and is then given to the receive-side bandwidth estimator.
+// Returns true if an over-use was seen, false otherwise.
+// The StreamGenerator::updated() should be used to check for any changes in
+// target bitrate after the call to this function.
+bool DelayBasedBweTest::GenerateAndProcessFrame(uint32_t ssrc,
+ uint32_t bitrate_bps) {
+ stream_generator_->SetBitrateBps(bitrate_bps);
+ std::vector<PacketResult> packets;
+
+ int64_t next_time_us =
+ stream_generator_->GenerateFrame(&packets, clock_.TimeInMicroseconds());
+ if (packets.empty())
+ return false;
+
+ bool overuse = false;
+ bitrate_observer_.Reset();
+ clock_.AdvanceTimeMicroseconds(packets.back().receive_time.us() -
+ clock_.TimeInMicroseconds());
+ for (auto& packet : packets) {
+ RTC_CHECK_GE(packet.receive_time.ms() + arrival_time_offset_ms_, 0);
+ packet.receive_time += TimeDelta::Millis(arrival_time_offset_ms_);
+
+ if (packet.sent_packet.pacing_info.probe_cluster_id !=
+ PacedPacketInfo::kNotAProbe)
+ probe_bitrate_estimator_->HandleProbeAndEstimateBitrate(packet);
+ }
+
+ acknowledged_bitrate_estimator_->IncomingPacketFeedbackVector(packets);
+ TransportPacketsFeedback msg;
+ msg.packet_feedbacks = packets;
+ msg.feedback_time = Timestamp::Millis(clock_.TimeInMilliseconds());
+
+ DelayBasedBwe::Result result =
+ bitrate_estimator_->IncomingPacketFeedbackVector(
+ msg, acknowledged_bitrate_estimator_->bitrate(),
+ probe_bitrate_estimator_->FetchAndResetLastEstimatedBitrate(),
+ /*network_estimate*/ absl::nullopt, /*in_alr*/ false);
+ if (result.updated) {
+ bitrate_observer_.OnReceiveBitrateChanged(result.target_bitrate.bps());
+ if (!first_update_ && result.target_bitrate.bps() < bitrate_bps)
+ overuse = true;
+ first_update_ = false;
+ }
+
+ clock_.AdvanceTimeMicroseconds(next_time_us - clock_.TimeInMicroseconds());
+ return overuse;
+}
+
+// Run the bandwidth estimator with a stream of `number_of_frames` frames, or
+// until it reaches `target_bitrate`.
+// Can for instance be used to run the estimator for some time to get it
+// into a steady state.
+uint32_t DelayBasedBweTest::SteadyStateRun(uint32_t ssrc,
+ int max_number_of_frames,
+ uint32_t start_bitrate,
+ uint32_t min_bitrate,
+ uint32_t max_bitrate,
+ uint32_t target_bitrate) {
+ uint32_t bitrate_bps = start_bitrate;
+ bool bitrate_update_seen = false;
+ // Produce `number_of_frames` frames and give them to the estimator.
+ for (int i = 0; i < max_number_of_frames; ++i) {
+ bool overuse = GenerateAndProcessFrame(ssrc, bitrate_bps);
+ if (overuse) {
+ EXPECT_LT(bitrate_observer_.latest_bitrate(), max_bitrate);
+ EXPECT_GT(bitrate_observer_.latest_bitrate(), min_bitrate);
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ bitrate_update_seen = true;
+ } else if (bitrate_observer_.updated()) {
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ bitrate_observer_.Reset();
+ }
+ if (bitrate_update_seen && bitrate_bps > target_bitrate) {
+ break;
+ }
+ }
+ EXPECT_TRUE(bitrate_update_seen);
+ return bitrate_bps;
+}
+
+void DelayBasedBweTest::InitialBehaviorTestHelper(
+ uint32_t expected_converge_bitrate) {
+ const int kFramerate = 50; // 50 fps to avoid rounding errors.
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ const PacedPacketInfo kPacingInfo(0, 5, 5000);
+ DataRate bitrate = DataRate::Zero();
+ int64_t send_time_ms = 0;
+ std::vector<uint32_t> ssrcs;
+ EXPECT_FALSE(bitrate_estimator_->LatestEstimate(&ssrcs, &bitrate));
+ EXPECT_EQ(0u, ssrcs.size());
+ clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_FALSE(bitrate_estimator_->LatestEstimate(&ssrcs, &bitrate));
+ EXPECT_FALSE(bitrate_observer_.updated());
+ bitrate_observer_.Reset();
+ clock_.AdvanceTimeMilliseconds(1000);
+ // Inserting packets for 5 seconds to get a valid estimate.
+ for (int i = 0; i < 5 * kFramerate + 1 + kNumInitialPackets; ++i) {
+ // NOTE!!! If the following line is moved under the if case then this test
+ // wont work on windows realease bots.
+ PacedPacketInfo pacing_info =
+ i < kInitialProbingPackets ? kPacingInfo : PacedPacketInfo();
+
+ if (i == kNumInitialPackets) {
+ EXPECT_FALSE(bitrate_estimator_->LatestEstimate(&ssrcs, &bitrate));
+ EXPECT_EQ(0u, ssrcs.size());
+ EXPECT_FALSE(bitrate_observer_.updated());
+ bitrate_observer_.Reset();
+ }
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, kMtu,
+ pacing_info);
+ clock_.AdvanceTimeMilliseconds(1000 / kFramerate);
+ send_time_ms += kFrameIntervalMs;
+ }
+ EXPECT_TRUE(bitrate_estimator_->LatestEstimate(&ssrcs, &bitrate));
+ ASSERT_EQ(1u, ssrcs.size());
+ EXPECT_EQ(kDefaultSsrc, ssrcs.front());
+ EXPECT_NEAR(expected_converge_bitrate, bitrate.bps(),
+ kAcceptedBitrateErrorBps);
+ EXPECT_TRUE(bitrate_observer_.updated());
+ bitrate_observer_.Reset();
+ EXPECT_EQ(bitrate_observer_.latest_bitrate(), bitrate.bps());
+}
+
+void DelayBasedBweTest::RateIncreaseReorderingTestHelper(
+ uint32_t expected_bitrate_bps) {
+ const int kFramerate = 50; // 50 fps to avoid rounding errors.
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ const PacedPacketInfo kPacingInfo(0, 5, 5000);
+ int64_t send_time_ms = 0;
+ // Inserting packets for five seconds to get a valid estimate.
+ for (int i = 0; i < 5 * kFramerate + 1 + kNumInitialPackets; ++i) {
+ // NOTE!!! If the following line is moved under the if case then this test
+ // wont work on windows realease bots.
+ PacedPacketInfo pacing_info =
+ i < kInitialProbingPackets ? kPacingInfo : PacedPacketInfo();
+
+ // TODO(sprang): Remove this hack once the single stream estimator is gone,
+ // as it doesn't do anything in Process().
+ if (i == kNumInitialPackets) {
+ // Process after we have enough frames to get a valid input rate estimate.
+
+ EXPECT_FALSE(bitrate_observer_.updated()); // No valid estimate.
+ }
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, kMtu,
+ pacing_info);
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ send_time_ms += kFrameIntervalMs;
+ }
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_NEAR(expected_bitrate_bps, bitrate_observer_.latest_bitrate(),
+ kAcceptedBitrateErrorBps);
+ for (int i = 0; i < 10; ++i) {
+ clock_.AdvanceTimeMilliseconds(2 * kFrameIntervalMs);
+ send_time_ms += 2 * kFrameIntervalMs;
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, 1000);
+ IncomingFeedback(clock_.TimeInMilliseconds(),
+ send_time_ms - kFrameIntervalMs, 1000);
+ }
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_NEAR(expected_bitrate_bps, bitrate_observer_.latest_bitrate(),
+ kAcceptedBitrateErrorBps);
+}
+
+// Make sure we initially increase the bitrate as expected.
+void DelayBasedBweTest::RateIncreaseRtpTimestampsTestHelper(
+ int expected_iterations) {
+ // This threshold corresponds approximately to increasing linearly with
+ // bitrate(i) = 1.04 * bitrate(i-1) + 1000
+ // until bitrate(i) > 500000, with bitrate(1) ~= 30000.
+ uint32_t bitrate_bps = 30000;
+ int iterations = 0;
+ AddDefaultStream();
+ // Feed the estimator with a stream of packets and verify that it reaches
+ // 500 kbps at the expected time.
+ while (bitrate_bps < 5e5) {
+ bool overuse = GenerateAndProcessFrame(kDefaultSsrc, bitrate_bps);
+ if (overuse) {
+ EXPECT_GT(bitrate_observer_.latest_bitrate(), bitrate_bps);
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ bitrate_observer_.Reset();
+ } else if (bitrate_observer_.updated()) {
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ bitrate_observer_.Reset();
+ }
+ ++iterations;
+ }
+ ASSERT_EQ(expected_iterations, iterations);
+}
+
+void DelayBasedBweTest::CapacityDropTestHelper(
+ int number_of_streams,
+ bool wrap_time_stamp,
+ uint32_t expected_bitrate_drop_delta,
+ int64_t receiver_clock_offset_change_ms) {
+ const int kFramerate = 30;
+ const int kStartBitrate = 900e3;
+ const int kMinExpectedBitrate = 800e3;
+ const int kMaxExpectedBitrate = 1100e3;
+ const uint32_t kInitialCapacityBps = 1000e3;
+ const uint32_t kReducedCapacityBps = 500e3;
+
+ int steady_state_time = 0;
+ if (number_of_streams <= 1) {
+ steady_state_time = 10;
+ AddDefaultStream();
+ } else {
+ steady_state_time = 10 * number_of_streams;
+ int bitrate_sum = 0;
+ int kBitrateDenom = number_of_streams * (number_of_streams - 1);
+ for (int i = 0; i < number_of_streams; i++) {
+ // First stream gets half available bitrate, while the rest share the
+ // remaining half i.e.: 1/2 = Sum[n/(N*(N-1))] for n=1..N-1 (rounded up)
+ int bitrate = kStartBitrate / 2;
+ if (i > 0) {
+ bitrate = (kStartBitrate * i + kBitrateDenom / 2) / kBitrateDenom;
+ }
+ stream_generator_->AddStream(new test::RtpStream(kFramerate, bitrate));
+ bitrate_sum += bitrate;
+ }
+ ASSERT_EQ(bitrate_sum, kStartBitrate);
+ }
+
+ // Run in steady state to make the estimator converge.
+ stream_generator_->set_capacity_bps(kInitialCapacityBps);
+ uint32_t bitrate_bps = SteadyStateRun(
+ kDefaultSsrc, steady_state_time * kFramerate, kStartBitrate,
+ kMinExpectedBitrate, kMaxExpectedBitrate, kInitialCapacityBps);
+ EXPECT_NEAR(kInitialCapacityBps, bitrate_bps, 180000u);
+ bitrate_observer_.Reset();
+
+ // Add an offset to make sure the BWE can handle it.
+ arrival_time_offset_ms_ += receiver_clock_offset_change_ms;
+
+ // Reduce the capacity and verify the decrease time.
+ stream_generator_->set_capacity_bps(kReducedCapacityBps);
+ int64_t overuse_start_time = clock_.TimeInMilliseconds();
+ int64_t bitrate_drop_time = -1;
+ for (int i = 0; i < 100 * number_of_streams; ++i) {
+ GenerateAndProcessFrame(kDefaultSsrc, bitrate_bps);
+ if (bitrate_drop_time == -1 &&
+ bitrate_observer_.latest_bitrate() <= kReducedCapacityBps) {
+ bitrate_drop_time = clock_.TimeInMilliseconds();
+ }
+ if (bitrate_observer_.updated())
+ bitrate_bps = bitrate_observer_.latest_bitrate();
+ }
+
+ EXPECT_NEAR(expected_bitrate_drop_delta,
+ bitrate_drop_time - overuse_start_time, 33);
+}
+
+void DelayBasedBweTest::TestTimestampGroupingTestHelper() {
+ const int kFramerate = 50; // 50 fps to avoid rounding errors.
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ int64_t send_time_ms = 0;
+ // Initial set of frames to increase the bitrate. 6 seconds to have enough
+ // time for the first estimate to be generated and for Process() to be called.
+ for (int i = 0; i <= 6 * kFramerate; ++i) {
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, 1000);
+
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ send_time_ms += kFrameIntervalMs;
+ }
+ EXPECT_TRUE(bitrate_observer_.updated());
+ EXPECT_GE(bitrate_observer_.latest_bitrate(), 400000u);
+
+ // Insert batches of frames which were sent very close in time. Also simulate
+ // capacity over-use to see that we back off correctly.
+ const int kTimestampGroupLength = 15;
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < kTimestampGroupLength; ++j) {
+ // Insert `kTimestampGroupLength` frames with just 1 timestamp ticks in
+ // between. Should be treated as part of the same group by the estimator.
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, 100);
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs / kTimestampGroupLength);
+ send_time_ms += 1;
+ }
+ // Increase time until next batch to simulate over-use.
+ clock_.AdvanceTimeMilliseconds(10);
+ send_time_ms += kFrameIntervalMs - kTimestampGroupLength;
+ }
+ EXPECT_TRUE(bitrate_observer_.updated());
+ // Should have reduced the estimate.
+ EXPECT_LT(bitrate_observer_.latest_bitrate(), 400000u);
+}
+
+void DelayBasedBweTest::TestWrappingHelper(int silence_time_s) {
+ const int kFramerate = 100;
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ int64_t send_time_ms = 0;
+
+ for (size_t i = 0; i < 3000; ++i) {
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, 1000);
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ send_time_ms += kFrameIntervalMs;
+ }
+ DataRate bitrate_before = DataRate::Zero();
+ std::vector<uint32_t> ssrcs;
+ bitrate_estimator_->LatestEstimate(&ssrcs, &bitrate_before);
+
+ clock_.AdvanceTimeMilliseconds(silence_time_s * 1000);
+ send_time_ms += silence_time_s * 1000;
+
+ for (size_t i = 0; i < 24; ++i) {
+ IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, 1000);
+ clock_.AdvanceTimeMilliseconds(2 * kFrameIntervalMs);
+ send_time_ms += kFrameIntervalMs;
+ }
+ DataRate bitrate_after = DataRate::Zero();
+ bitrate_estimator_->LatestEstimate(&ssrcs, &bitrate_after);
+ EXPECT_LT(bitrate_after, bitrate_before);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h
new file mode 100644
index 0000000000..d56fe892d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_UNITTEST_HELPER_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_UNITTEST_HELPER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_types.h"
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h"
+#include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
+#include "system_wrappers/include/clock.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+class TestBitrateObserver {
+ public:
+ TestBitrateObserver() : updated_(false), latest_bitrate_(0) {}
+ ~TestBitrateObserver() {}
+
+ void OnReceiveBitrateChanged(uint32_t bitrate);
+
+ void Reset() { updated_ = false; }
+
+ bool updated() const { return updated_; }
+
+ uint32_t latest_bitrate() const { return latest_bitrate_; }
+
+ private:
+ bool updated_;
+ uint32_t latest_bitrate_;
+};
+
+class RtpStream {
+ public:
+ enum { kSendSideOffsetUs = 1000000 };
+
+ RtpStream(int fps, int bitrate_bps);
+
+ RtpStream(const RtpStream&) = delete;
+ RtpStream& operator=(const RtpStream&) = delete;
+
+ // Generates a new frame for this stream. If called too soon after the
+ // previous frame, no frame will be generated. The frame is split into
+ // packets.
+ int64_t GenerateFrame(int64_t time_now_us,
+ std::vector<PacketResult>* packets);
+
+ // The send-side time when the next frame can be generated.
+ int64_t next_rtp_time() const;
+
+ void set_bitrate_bps(int bitrate_bps);
+
+ int bitrate_bps() const;
+
+ static bool Compare(const std::unique_ptr<RtpStream>& lhs,
+ const std::unique_ptr<RtpStream>& rhs);
+
+ private:
+ int fps_;
+ int bitrate_bps_;
+ int64_t next_rtp_time_;
+};
+
+class StreamGenerator {
+ public:
+ StreamGenerator(int capacity, int64_t time_now);
+ ~StreamGenerator();
+
+ StreamGenerator(const StreamGenerator&) = delete;
+ StreamGenerator& operator=(const StreamGenerator&) = delete;
+
+ // Add a new stream.
+ void AddStream(RtpStream* stream);
+
+ // Set the link capacity.
+ void set_capacity_bps(int capacity_bps);
+
+ // Divides `bitrate_bps` among all streams. The allocated bitrate per stream
+ // is decided by the initial allocation ratios.
+ void SetBitrateBps(int bitrate_bps);
+
+ // Set the RTP timestamp offset for the stream identified by `ssrc`.
+ void set_rtp_timestamp_offset(uint32_t ssrc, uint32_t offset);
+
+ // TODO(holmer): Break out the channel simulation part from this class to make
+ // it possible to simulate different types of channels.
+ int64_t GenerateFrame(std::vector<PacketResult>* packets,
+ int64_t time_now_us);
+
+ private:
+ // Capacity of the simulated channel in bits per second.
+ int capacity_;
+ // The time when the last packet arrived.
+ int64_t prev_arrival_time_us_;
+ // All streams being transmitted on this simulated channel.
+ std::vector<std::unique_ptr<RtpStream>> streams_;
+};
+} // namespace test
+
+class DelayBasedBweTest : public ::testing::Test {
+ public:
+ DelayBasedBweTest();
+ explicit DelayBasedBweTest(absl::string_view field_trial_string);
+ ~DelayBasedBweTest() override;
+
+ protected:
+ void AddDefaultStream();
+
+ // Helpers to insert a single packet into the delay-based BWE.
+ void IncomingFeedback(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ size_t payload_size);
+ void IncomingFeedback(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ size_t payload_size,
+ const PacedPacketInfo& pacing_info);
+ void IncomingFeedback(Timestamp receive_time,
+ Timestamp send_time,
+ size_t payload_size,
+ const PacedPacketInfo& pacing_info);
+
+ // Generates a frame of packets belonging to a stream at a given bitrate and
+ // with a given ssrc. The stream is pushed through a very simple simulated
+ // network, and is then given to the receive-side bandwidth estimator.
+ // Returns true if an over-use was seen, false otherwise.
+ // The StreamGenerator::updated() should be used to check for any changes in
+ // target bitrate after the call to this function.
+ bool GenerateAndProcessFrame(uint32_t ssrc, uint32_t bitrate_bps);
+
+ // Run the bandwidth estimator with a stream of `number_of_frames` frames, or
+ // until it reaches `target_bitrate`.
+ // Can for instance be used to run the estimator for some time to get it
+ // into a steady state.
+ uint32_t SteadyStateRun(uint32_t ssrc,
+ int number_of_frames,
+ uint32_t start_bitrate,
+ uint32_t min_bitrate,
+ uint32_t max_bitrate,
+ uint32_t target_bitrate);
+
+ void TestTimestampGroupingTestHelper();
+
+ void TestWrappingHelper(int silence_time_s);
+
+ void InitialBehaviorTestHelper(uint32_t expected_converge_bitrate);
+ void RateIncreaseReorderingTestHelper(uint32_t expected_bitrate);
+ void RateIncreaseRtpTimestampsTestHelper(int expected_iterations);
+ void CapacityDropTestHelper(int number_of_streams,
+ bool wrap_time_stamp,
+ uint32_t expected_bitrate_drop_delta,
+ int64_t receiver_clock_offset_change_ms);
+
+ static const uint32_t kDefaultSsrc;
+ FieldTrialBasedConfig field_trial_config_;
+
+ std::unique_ptr<test::ScopedFieldTrials>
+ field_trial; // Must be initialized first.
+ SimulatedClock clock_; // Time at the receiver.
+ test::TestBitrateObserver bitrate_observer_;
+ std::unique_ptr<AcknowledgedBitrateEstimatorInterface>
+ acknowledged_bitrate_estimator_;
+ const std::unique_ptr<ProbeBitrateEstimator> probe_bitrate_estimator_;
+ std::unique_ptr<DelayBasedBwe> bitrate_estimator_;
+ std::unique_ptr<test::StreamGenerator> stream_generator_;
+ int64_t arrival_time_offset_ms_;
+ bool first_update_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_UNITTEST_HELPER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h
new file mode 100644
index 0000000000..fc12cff7d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_INCREASE_DETECTOR_INTERFACE_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_INCREASE_DETECTOR_INTERFACE_H_
+
+#include <stdint.h>
+
+#include "api/network_state_predictor.h"
+
+namespace webrtc {
+
+class DelayIncreaseDetectorInterface {
+ public:
+ DelayIncreaseDetectorInterface() {}
+ virtual ~DelayIncreaseDetectorInterface() {}
+
+ DelayIncreaseDetectorInterface(const DelayIncreaseDetectorInterface&) =
+ delete;
+ DelayIncreaseDetectorInterface& operator=(
+ const DelayIncreaseDetectorInterface&) = delete;
+
+ // Update the detector with a new sample. The deltas should represent deltas
+ // between timestamp groups as defined by the InterArrival class.
+ virtual void Update(double recv_delta_ms,
+ double send_delta_ms,
+ int64_t send_time_ms,
+ int64_t arrival_time_ms,
+ size_t packet_size,
+ bool calculated_deltas) = 0;
+
+ virtual BandwidthUsage State() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_INCREASE_DETECTOR_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/estimators_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/estimators_gn/moz.build
new file mode 100644
index 0000000000..c2bee724dd
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/estimators_gn/moz.build
@@ -0,0 +1,220 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/bitrate_estimator.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("estimators_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_gn/moz.build
new file mode 100644
index 0000000000..ea12fe5d8f
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("goog_cc_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc
new file mode 100644
index 0000000000..87d16c0980
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc
@@ -0,0 +1,720 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/goog_cc_network_control.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "logging/rtc_event_log/events/rtc_event_remote_estimate.h"
+#include "modules/congestion_controller/goog_cc/alr_detector.h"
+#include "modules/congestion_controller/goog_cc/probe_controller.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+// From RTCPSender video report interval.
+constexpr TimeDelta kLossUpdateInterval = TimeDelta::Millis(1000);
+
+// Pacing-rate relative to our target send rate.
+// Multiplicative factor that is applied to the target bitrate to calculate
+// the number of bytes that can be transmitted per interval.
+// Increasing this factor will result in lower delays in cases of bitrate
+// overshoots from the encoder.
+constexpr float kDefaultPaceMultiplier = 2.5f;
+
+// If the probe result is far below the current throughput estimate
+// it's unlikely that the probe is accurate, so we don't want to drop too far.
+// However, if we actually are overusing, we want to drop to something slightly
+// below the current throughput estimate to drain the network queues.
+constexpr double kProbeDropThroughputFraction = 0.85;
+
+int64_t GetBpsOrDefault(const absl::optional<DataRate>& rate,
+ int64_t fallback_bps) {
+ if (rate && rate->IsFinite()) {
+ return rate->bps();
+ } else {
+ return fallback_bps;
+ }
+}
+
+bool IsEnabled(const FieldTrialsView* config, absl::string_view key) {
+ return absl::StartsWith(config->Lookup(key), "Enabled");
+}
+
+bool IsNotDisabled(const FieldTrialsView* config, absl::string_view key) {
+ return !absl::StartsWith(config->Lookup(key), "Disabled");
+}
+} // namespace
+
+GoogCcNetworkController::GoogCcNetworkController(NetworkControllerConfig config,
+ GoogCcConfig goog_cc_config)
+ : key_value_config_(config.key_value_config ? config.key_value_config
+ : &trial_based_config_),
+ event_log_(config.event_log),
+ packet_feedback_only_(goog_cc_config.feedback_only),
+ safe_reset_on_route_change_("Enabled"),
+ safe_reset_acknowledged_rate_("ack"),
+ use_min_allocatable_as_lower_bound_(
+ IsNotDisabled(key_value_config_, "WebRTC-Bwe-MinAllocAsLowerBound")),
+ ignore_probes_lower_than_network_estimate_(IsNotDisabled(
+ key_value_config_,
+ "WebRTC-Bwe-IgnoreProbesLowerThanNetworkStateEstimate")),
+ limit_probes_lower_than_throughput_estimate_(
+ IsEnabled(key_value_config_,
+ "WebRTC-Bwe-LimitProbesLowerThanThroughputEstimate")),
+ rate_control_settings_(
+ RateControlSettings::ParseFromKeyValueConfig(key_value_config_)),
+ loss_based_stable_rate_(
+ IsEnabled(key_value_config_, "WebRTC-Bwe-LossBasedStableRate")),
+ pace_at_max_of_bwe_and_lower_link_capacity_(
+ IsEnabled(key_value_config_,
+ "WebRTC-Bwe-PaceAtMaxOfBweAndLowerLinkCapacity")),
+ pace_at_loss_based_bwe_when_loss_(
+ IsEnabled(key_value_config_, "WebRTC-Bwe-PaceAtLossBaseBweWhenLoss")),
+ probe_controller_(
+ new ProbeController(key_value_config_, config.event_log)),
+ congestion_window_pushback_controller_(
+ rate_control_settings_.UseCongestionWindowPushback()
+ ? std::make_unique<CongestionWindowPushbackController>(
+ key_value_config_)
+ : nullptr),
+ bandwidth_estimation_(
+ std::make_unique<SendSideBandwidthEstimation>(key_value_config_,
+ event_log_)),
+ alr_detector_(
+ std::make_unique<AlrDetector>(key_value_config_, config.event_log)),
+ probe_bitrate_estimator_(new ProbeBitrateEstimator(config.event_log)),
+ network_estimator_(std::move(goog_cc_config.network_state_estimator)),
+ network_state_predictor_(
+ std::move(goog_cc_config.network_state_predictor)),
+ delay_based_bwe_(new DelayBasedBwe(key_value_config_,
+ event_log_,
+ network_state_predictor_.get())),
+ acknowledged_bitrate_estimator_(
+ AcknowledgedBitrateEstimatorInterface::Create(key_value_config_)),
+ initial_config_(config),
+ last_loss_based_target_rate_(*config.constraints.starting_rate),
+ last_pushback_target_rate_(last_loss_based_target_rate_),
+ last_stable_target_rate_(last_loss_based_target_rate_),
+ pacing_factor_(config.stream_based_config.pacing_factor.value_or(
+ kDefaultPaceMultiplier)),
+ min_total_allocated_bitrate_(
+ config.stream_based_config.min_total_allocated_bitrate.value_or(
+ DataRate::Zero())),
+ max_padding_rate_(config.stream_based_config.max_padding_rate.value_or(
+ DataRate::Zero())),
+ max_total_allocated_bitrate_(DataRate::Zero()) {
+ RTC_DCHECK(config.constraints.at_time.IsFinite());
+ ParseFieldTrial(
+ {&safe_reset_on_route_change_, &safe_reset_acknowledged_rate_},
+ key_value_config_->Lookup("WebRTC-Bwe-SafeResetOnRouteChange"));
+ if (delay_based_bwe_)
+ delay_based_bwe_->SetMinBitrate(kCongestionControllerMinBitrate);
+}
+
+GoogCcNetworkController::~GoogCcNetworkController() {}
+
+NetworkControlUpdate GoogCcNetworkController::OnNetworkAvailability(
+ NetworkAvailability msg) {
+ NetworkControlUpdate update;
+ update.probe_cluster_configs = probe_controller_->OnNetworkAvailability(msg);
+ return update;
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnNetworkRouteChange(
+ NetworkRouteChange msg) {
+ if (safe_reset_on_route_change_) {
+ absl::optional<DataRate> estimated_bitrate;
+ if (safe_reset_acknowledged_rate_) {
+ estimated_bitrate = acknowledged_bitrate_estimator_->bitrate();
+ if (!estimated_bitrate)
+ estimated_bitrate = acknowledged_bitrate_estimator_->PeekRate();
+ } else {
+ estimated_bitrate = bandwidth_estimation_->target_rate();
+ }
+ if (estimated_bitrate) {
+ if (msg.constraints.starting_rate) {
+ msg.constraints.starting_rate =
+ std::min(*msg.constraints.starting_rate, *estimated_bitrate);
+ } else {
+ msg.constraints.starting_rate = estimated_bitrate;
+ }
+ }
+ }
+
+ acknowledged_bitrate_estimator_ =
+ AcknowledgedBitrateEstimatorInterface::Create(key_value_config_);
+ probe_bitrate_estimator_.reset(new ProbeBitrateEstimator(event_log_));
+ if (network_estimator_)
+ network_estimator_->OnRouteChange(msg);
+ delay_based_bwe_.reset(new DelayBasedBwe(key_value_config_, event_log_,
+ network_state_predictor_.get()));
+ bandwidth_estimation_->OnRouteChange();
+ probe_controller_->Reset(msg.at_time.ms());
+ NetworkControlUpdate update;
+ update.probe_cluster_configs = ResetConstraints(msg.constraints);
+ MaybeTriggerOnNetworkChanged(&update, msg.at_time);
+ return update;
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnProcessInterval(
+ ProcessInterval msg) {
+ NetworkControlUpdate update;
+ if (initial_config_) {
+ update.probe_cluster_configs =
+ ResetConstraints(initial_config_->constraints);
+ update.pacer_config = GetPacingRates(msg.at_time);
+
+ if (initial_config_->stream_based_config.requests_alr_probing) {
+ probe_controller_->EnablePeriodicAlrProbing(
+ *initial_config_->stream_based_config.requests_alr_probing);
+ }
+ absl::optional<DataRate> total_bitrate =
+ initial_config_->stream_based_config.max_total_allocated_bitrate;
+ if (total_bitrate) {
+ auto probes = probe_controller_->OnMaxTotalAllocatedBitrate(
+ total_bitrate->bps(), msg.at_time.ms());
+ update.probe_cluster_configs.insert(update.probe_cluster_configs.end(),
+ probes.begin(), probes.end());
+
+ max_total_allocated_bitrate_ = *total_bitrate;
+ }
+ initial_config_.reset();
+ }
+ if (congestion_window_pushback_controller_ && msg.pacer_queue) {
+ congestion_window_pushback_controller_->UpdatePacingQueue(
+ msg.pacer_queue->bytes());
+ }
+ bandwidth_estimation_->UpdateEstimate(msg.at_time);
+ absl::optional<int64_t> start_time_ms =
+ alr_detector_->GetApplicationLimitedRegionStartTime();
+ probe_controller_->SetAlrStartTimeMs(start_time_ms);
+
+ auto probes = probe_controller_->Process(msg.at_time.ms());
+ update.probe_cluster_configs.insert(update.probe_cluster_configs.end(),
+ probes.begin(), probes.end());
+
+ if (rate_control_settings_.UseCongestionWindow() &&
+ last_packet_received_time_.IsFinite() && !feedback_max_rtts_.empty()) {
+ UpdateCongestionWindowSize();
+ }
+ if (congestion_window_pushback_controller_ && current_data_window_) {
+ congestion_window_pushback_controller_->SetDataWindow(
+ *current_data_window_);
+ } else {
+ update.congestion_window = current_data_window_;
+ }
+ MaybeTriggerOnNetworkChanged(&update, msg.at_time);
+ return update;
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnRemoteBitrateReport(
+ RemoteBitrateReport msg) {
+ if (packet_feedback_only_) {
+ RTC_LOG(LS_ERROR) << "Received REMB for packet feedback only GoogCC";
+ return NetworkControlUpdate();
+ }
+ bandwidth_estimation_->UpdateReceiverEstimate(msg.receive_time,
+ msg.bandwidth);
+ BWE_TEST_LOGGING_PLOT(1, "REMB_kbps", msg.receive_time.ms(),
+ msg.bandwidth.bps() / 1000);
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnRoundTripTimeUpdate(
+ RoundTripTimeUpdate msg) {
+ if (packet_feedback_only_ || msg.smoothed)
+ return NetworkControlUpdate();
+ RTC_DCHECK(!msg.round_trip_time.IsZero());
+ if (delay_based_bwe_)
+ delay_based_bwe_->OnRttUpdate(msg.round_trip_time);
+ bandwidth_estimation_->UpdateRtt(msg.round_trip_time, msg.receive_time);
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnSentPacket(
+ SentPacket sent_packet) {
+ alr_detector_->OnBytesSent(sent_packet.size.bytes(),
+ sent_packet.send_time.ms());
+ acknowledged_bitrate_estimator_->SetAlr(
+ alr_detector_->GetApplicationLimitedRegionStartTime().has_value());
+
+ if (!first_packet_sent_) {
+ first_packet_sent_ = true;
+ // Initialize feedback time to send time to allow estimation of RTT until
+ // first feedback is received.
+ bandwidth_estimation_->UpdatePropagationRtt(sent_packet.send_time,
+ TimeDelta::Zero());
+ }
+ bandwidth_estimation_->OnSentPacket(sent_packet);
+
+ if (congestion_window_pushback_controller_) {
+ congestion_window_pushback_controller_->UpdateOutstandingData(
+ sent_packet.data_in_flight.bytes());
+ NetworkControlUpdate update;
+ MaybeTriggerOnNetworkChanged(&update, sent_packet.send_time);
+ return update;
+ } else {
+ return NetworkControlUpdate();
+ }
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnReceivedPacket(
+ ReceivedPacket received_packet) {
+ last_packet_received_time_ = received_packet.receive_time;
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnStreamsConfig(
+ StreamsConfig msg) {
+ NetworkControlUpdate update;
+ if (msg.requests_alr_probing) {
+ probe_controller_->EnablePeriodicAlrProbing(*msg.requests_alr_probing);
+ }
+ if (msg.max_total_allocated_bitrate &&
+ *msg.max_total_allocated_bitrate != max_total_allocated_bitrate_) {
+ if (rate_control_settings_.TriggerProbeOnMaxAllocatedBitrateChange()) {
+ update.probe_cluster_configs =
+ probe_controller_->OnMaxTotalAllocatedBitrate(
+ msg.max_total_allocated_bitrate->bps(), msg.at_time.ms());
+ } else {
+ probe_controller_->SetMaxBitrate(msg.max_total_allocated_bitrate->bps());
+ }
+ max_total_allocated_bitrate_ = *msg.max_total_allocated_bitrate;
+ }
+ bool pacing_changed = false;
+ if (msg.pacing_factor && *msg.pacing_factor != pacing_factor_) {
+ pacing_factor_ = *msg.pacing_factor;
+ pacing_changed = true;
+ }
+ if (msg.min_total_allocated_bitrate &&
+ *msg.min_total_allocated_bitrate != min_total_allocated_bitrate_) {
+ min_total_allocated_bitrate_ = *msg.min_total_allocated_bitrate;
+ pacing_changed = true;
+
+ if (use_min_allocatable_as_lower_bound_) {
+ ClampConstraints();
+ delay_based_bwe_->SetMinBitrate(min_data_rate_);
+ bandwidth_estimation_->SetMinMaxBitrate(min_data_rate_, max_data_rate_);
+ }
+ }
+ if (msg.max_padding_rate && *msg.max_padding_rate != max_padding_rate_) {
+ max_padding_rate_ = *msg.max_padding_rate;
+ pacing_changed = true;
+ }
+
+ if (pacing_changed)
+ update.pacer_config = GetPacingRates(msg.at_time);
+ return update;
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnTargetRateConstraints(
+ TargetRateConstraints constraints) {
+ NetworkControlUpdate update;
+ update.probe_cluster_configs = ResetConstraints(constraints);
+ MaybeTriggerOnNetworkChanged(&update, constraints.at_time);
+ return update;
+}
+
+void GoogCcNetworkController::ClampConstraints() {
+ // TODO(holmer): We should make sure the default bitrates are set to 10 kbps,
+ // and that we don't try to set the min bitrate to 0 from any applications.
+ // The congestion controller should allow a min bitrate of 0.
+ min_data_rate_ = std::max(min_target_rate_, kCongestionControllerMinBitrate);
+ if (use_min_allocatable_as_lower_bound_) {
+ min_data_rate_ = std::max(min_data_rate_, min_total_allocated_bitrate_);
+ }
+ if (max_data_rate_ < min_data_rate_) {
+ RTC_LOG(LS_WARNING) << "max bitrate smaller than min bitrate";
+ max_data_rate_ = min_data_rate_;
+ }
+ if (starting_rate_ && starting_rate_ < min_data_rate_) {
+ RTC_LOG(LS_WARNING) << "start bitrate smaller than min bitrate";
+ starting_rate_ = min_data_rate_;
+ }
+}
+
+std::vector<ProbeClusterConfig> GoogCcNetworkController::ResetConstraints(
+ TargetRateConstraints new_constraints) {
+ min_target_rate_ = new_constraints.min_data_rate.value_or(DataRate::Zero());
+ max_data_rate_ =
+ new_constraints.max_data_rate.value_or(DataRate::PlusInfinity());
+ starting_rate_ = new_constraints.starting_rate;
+ ClampConstraints();
+
+ bandwidth_estimation_->SetBitrates(starting_rate_, min_data_rate_,
+ max_data_rate_, new_constraints.at_time);
+
+ if (starting_rate_)
+ delay_based_bwe_->SetStartBitrate(*starting_rate_);
+ delay_based_bwe_->SetMinBitrate(min_data_rate_);
+
+ return probe_controller_->SetBitrates(
+ min_data_rate_.bps(), GetBpsOrDefault(starting_rate_, -1),
+ max_data_rate_.bps_or(-1), new_constraints.at_time.ms());
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnTransportLossReport(
+ TransportLossReport msg) {
+ if (packet_feedback_only_)
+ return NetworkControlUpdate();
+ int64_t total_packets_delta =
+ msg.packets_received_delta + msg.packets_lost_delta;
+ bandwidth_estimation_->UpdatePacketsLost(
+ msg.packets_lost_delta, total_packets_delta, msg.receive_time);
+ return NetworkControlUpdate();
+}
+
+void GoogCcNetworkController::UpdateCongestionWindowSize() {
+ TimeDelta min_feedback_max_rtt = TimeDelta::Millis(
+ *std::min_element(feedback_max_rtts_.begin(), feedback_max_rtts_.end()));
+
+ const DataSize kMinCwnd = DataSize::Bytes(2 * 1500);
+ TimeDelta time_window =
+ min_feedback_max_rtt +
+ TimeDelta::Millis(
+ rate_control_settings_.GetCongestionWindowAdditionalTimeMs());
+
+ DataSize data_window = last_loss_based_target_rate_ * time_window;
+ if (current_data_window_) {
+ data_window =
+ std::max(kMinCwnd, (data_window + current_data_window_.value()) / 2);
+ } else {
+ data_window = std::max(kMinCwnd, data_window);
+ }
+ current_data_window_ = data_window;
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnTransportPacketsFeedback(
+ TransportPacketsFeedback report) {
+ if (report.packet_feedbacks.empty()) {
+ // TODO(bugs.webrtc.org/10125): Design a better mechanism to safe-guard
+ // against building very large network queues.
+ return NetworkControlUpdate();
+ }
+
+ if (congestion_window_pushback_controller_) {
+ congestion_window_pushback_controller_->UpdateOutstandingData(
+ report.data_in_flight.bytes());
+ }
+ TimeDelta max_feedback_rtt = TimeDelta::MinusInfinity();
+ TimeDelta min_propagation_rtt = TimeDelta::PlusInfinity();
+ Timestamp max_recv_time = Timestamp::MinusInfinity();
+
+ std::vector<PacketResult> feedbacks = report.ReceivedWithSendInfo();
+ for (const auto& feedback : feedbacks)
+ max_recv_time = std::max(max_recv_time, feedback.receive_time);
+
+ for (const auto& feedback : feedbacks) {
+ TimeDelta feedback_rtt =
+ report.feedback_time - feedback.sent_packet.send_time;
+ TimeDelta min_pending_time = feedback.receive_time - max_recv_time;
+ TimeDelta propagation_rtt = feedback_rtt - min_pending_time;
+ max_feedback_rtt = std::max(max_feedback_rtt, feedback_rtt);
+ min_propagation_rtt = std::min(min_propagation_rtt, propagation_rtt);
+ }
+
+ if (max_feedback_rtt.IsFinite()) {
+ feedback_max_rtts_.push_back(max_feedback_rtt.ms());
+ const size_t kMaxFeedbackRttWindow = 32;
+ if (feedback_max_rtts_.size() > kMaxFeedbackRttWindow)
+ feedback_max_rtts_.pop_front();
+ // TODO(srte): Use time since last unacknowledged packet.
+ bandwidth_estimation_->UpdatePropagationRtt(report.feedback_time,
+ min_propagation_rtt);
+ }
+ if (packet_feedback_only_) {
+ if (!feedback_max_rtts_.empty()) {
+ int64_t sum_rtt_ms = std::accumulate(feedback_max_rtts_.begin(),
+ feedback_max_rtts_.end(), 0);
+ int64_t mean_rtt_ms = sum_rtt_ms / feedback_max_rtts_.size();
+ if (delay_based_bwe_)
+ delay_based_bwe_->OnRttUpdate(TimeDelta::Millis(mean_rtt_ms));
+ }
+
+ TimeDelta feedback_min_rtt = TimeDelta::PlusInfinity();
+ for (const auto& packet_feedback : feedbacks) {
+ TimeDelta pending_time = packet_feedback.receive_time - max_recv_time;
+ TimeDelta rtt = report.feedback_time -
+ packet_feedback.sent_packet.send_time - pending_time;
+ // Value used for predicting NACK round trip time in FEC controller.
+ feedback_min_rtt = std::min(rtt, feedback_min_rtt);
+ }
+ if (feedback_min_rtt.IsFinite()) {
+ bandwidth_estimation_->UpdateRtt(feedback_min_rtt, report.feedback_time);
+ }
+
+ expected_packets_since_last_loss_update_ +=
+ report.PacketsWithFeedback().size();
+ for (const auto& packet_feedback : report.PacketsWithFeedback()) {
+ if (!packet_feedback.IsReceived())
+ lost_packets_since_last_loss_update_ += 1;
+ }
+ if (report.feedback_time > next_loss_update_) {
+ next_loss_update_ = report.feedback_time + kLossUpdateInterval;
+ bandwidth_estimation_->UpdatePacketsLost(
+ lost_packets_since_last_loss_update_,
+ expected_packets_since_last_loss_update_, report.feedback_time);
+ expected_packets_since_last_loss_update_ = 0;
+ lost_packets_since_last_loss_update_ = 0;
+ }
+ }
+ absl::optional<int64_t> alr_start_time =
+ alr_detector_->GetApplicationLimitedRegionStartTime();
+
+ if (previously_in_alr_ && !alr_start_time.has_value()) {
+ int64_t now_ms = report.feedback_time.ms();
+ acknowledged_bitrate_estimator_->SetAlrEndedTime(report.feedback_time);
+ probe_controller_->SetAlrEndedTimeMs(now_ms);
+ }
+ previously_in_alr_ = alr_start_time.has_value();
+ acknowledged_bitrate_estimator_->IncomingPacketFeedbackVector(
+ report.SortedByReceiveTime());
+ auto acknowledged_bitrate = acknowledged_bitrate_estimator_->bitrate();
+ bandwidth_estimation_->SetAcknowledgedRate(acknowledged_bitrate,
+ report.feedback_time);
+ for (const auto& feedback : report.SortedByReceiveTime()) {
+ if (feedback.sent_packet.pacing_info.probe_cluster_id !=
+ PacedPacketInfo::kNotAProbe) {
+ probe_bitrate_estimator_->HandleProbeAndEstimateBitrate(feedback);
+ }
+ }
+
+ if (network_estimator_) {
+ network_estimator_->OnTransportPacketsFeedback(report);
+ auto prev_estimate = estimate_;
+ estimate_ = network_estimator_->GetCurrentEstimate();
+ // TODO(srte): Make OnTransportPacketsFeedback signal whether the state
+ // changed to avoid the need for this check.
+ if (estimate_ && (!prev_estimate || estimate_->last_feed_time !=
+ prev_estimate->last_feed_time)) {
+ event_log_->Log(std::make_unique<RtcEventRemoteEstimate>(
+ estimate_->link_capacity_lower, estimate_->link_capacity_upper));
+ }
+ }
+ absl::optional<DataRate> probe_bitrate =
+ probe_bitrate_estimator_->FetchAndResetLastEstimatedBitrate();
+ if (ignore_probes_lower_than_network_estimate_ && probe_bitrate &&
+ estimate_ && *probe_bitrate < delay_based_bwe_->last_estimate() &&
+ *probe_bitrate < estimate_->link_capacity_lower) {
+ probe_bitrate.reset();
+ }
+ if (limit_probes_lower_than_throughput_estimate_ && probe_bitrate &&
+ acknowledged_bitrate) {
+ // Limit the backoff to something slightly below the acknowledged
+ // bitrate. ("Slightly below" because we want to drain the queues
+ // if we are actually overusing.)
+ // The acknowledged bitrate shouldn't normally be higher than the delay
+ // based estimate, but it could happen e.g. due to packet bursts or
+ // encoder overshoot. We use std::min to ensure that a probe result
+ // below the current BWE never causes an increase.
+ DataRate limit =
+ std::min(delay_based_bwe_->last_estimate(),
+ *acknowledged_bitrate * kProbeDropThroughputFraction);
+ probe_bitrate = std::max(*probe_bitrate, limit);
+ }
+
+ NetworkControlUpdate update;
+ bool recovered_from_overuse = false;
+
+ DelayBasedBwe::Result result;
+ result = delay_based_bwe_->IncomingPacketFeedbackVector(
+ report, acknowledged_bitrate, probe_bitrate, estimate_,
+ alr_start_time.has_value());
+
+ if (result.updated) {
+ if (result.probe) {
+ bandwidth_estimation_->SetSendBitrate(result.target_bitrate,
+ report.feedback_time);
+ }
+ // Since SetSendBitrate now resets the delay-based estimate, we have to
+ // call UpdateDelayBasedEstimate after SetSendBitrate.
+ bandwidth_estimation_->UpdateDelayBasedEstimate(report.feedback_time,
+ result.target_bitrate);
+ // Update the estimate in the ProbeController, in case we want to probe.
+ MaybeTriggerOnNetworkChanged(&update, report.feedback_time);
+ }
+ bandwidth_estimation_->UpdateLossBasedEstimator(report,
+ result.delay_detector_state);
+ recovered_from_overuse = result.recovered_from_overuse;
+
+ if (recovered_from_overuse) {
+ probe_controller_->SetAlrStartTimeMs(alr_start_time);
+ auto probes = probe_controller_->RequestProbe(report.feedback_time.ms());
+ update.probe_cluster_configs.insert(update.probe_cluster_configs.end(),
+ probes.begin(), probes.end());
+ }
+
+ // No valid RTT could be because send-side BWE isn't used, in which case
+ // we don't try to limit the outstanding packets.
+ if (rate_control_settings_.UseCongestionWindow() &&
+ max_feedback_rtt.IsFinite()) {
+ UpdateCongestionWindowSize();
+ }
+ if (congestion_window_pushback_controller_ && current_data_window_) {
+ congestion_window_pushback_controller_->SetDataWindow(
+ *current_data_window_);
+ } else {
+ update.congestion_window = current_data_window_;
+ }
+
+ return update;
+}
+
+NetworkControlUpdate GoogCcNetworkController::OnNetworkStateEstimate(
+ NetworkStateEstimate msg) {
+ estimate_ = msg;
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate GoogCcNetworkController::GetNetworkState(
+ Timestamp at_time) const {
+ NetworkControlUpdate update;
+ update.target_rate = TargetTransferRate();
+ update.target_rate->network_estimate.at_time = at_time;
+ update.target_rate->network_estimate.loss_rate_ratio =
+ last_estimated_fraction_loss_.value_or(0) / 255.0;
+ update.target_rate->network_estimate.round_trip_time =
+ last_estimated_round_trip_time_;
+ update.target_rate->network_estimate.bwe_period =
+ delay_based_bwe_->GetExpectedBwePeriod();
+
+ update.target_rate->at_time = at_time;
+ update.target_rate->target_rate = last_pushback_target_rate_;
+ update.target_rate->stable_target_rate =
+ bandwidth_estimation_->GetEstimatedLinkCapacity();
+ update.pacer_config = GetPacingRates(at_time);
+ update.congestion_window = current_data_window_;
+ return update;
+}
+
+void GoogCcNetworkController::MaybeTriggerOnNetworkChanged(
+ NetworkControlUpdate* update,
+ Timestamp at_time) {
+ uint8_t fraction_loss = bandwidth_estimation_->fraction_loss();
+ TimeDelta round_trip_time = bandwidth_estimation_->round_trip_time();
+ DataRate loss_based_target_rate = bandwidth_estimation_->target_rate();
+ DataRate pushback_target_rate = loss_based_target_rate;
+
+ BWE_TEST_LOGGING_PLOT(1, "fraction_loss_%", at_time.ms(),
+ (fraction_loss * 100) / 256);
+ BWE_TEST_LOGGING_PLOT(1, "rtt_ms", at_time.ms(), round_trip_time.ms());
+ BWE_TEST_LOGGING_PLOT(1, "Target_bitrate_kbps", at_time.ms(),
+ loss_based_target_rate.kbps());
+
+ double cwnd_reduce_ratio = 0.0;
+ if (congestion_window_pushback_controller_) {
+ int64_t pushback_rate =
+ congestion_window_pushback_controller_->UpdateTargetBitrate(
+ loss_based_target_rate.bps());
+ pushback_rate = std::max<int64_t>(bandwidth_estimation_->GetMinBitrate(),
+ pushback_rate);
+ pushback_target_rate = DataRate::BitsPerSec(pushback_rate);
+ if (rate_control_settings_.UseCongestionWindowDropFrameOnly()) {
+ cwnd_reduce_ratio = static_cast<double>(loss_based_target_rate.bps() -
+ pushback_target_rate.bps()) /
+ loss_based_target_rate.bps();
+ }
+ }
+ DataRate stable_target_rate =
+ bandwidth_estimation_->GetEstimatedLinkCapacity();
+ if (loss_based_stable_rate_) {
+ stable_target_rate = std::min(stable_target_rate, loss_based_target_rate);
+ } else {
+ stable_target_rate = std::min(stable_target_rate, pushback_target_rate);
+ }
+
+ if ((loss_based_target_rate != last_loss_based_target_rate_) ||
+ (fraction_loss != last_estimated_fraction_loss_) ||
+ (round_trip_time != last_estimated_round_trip_time_) ||
+ (pushback_target_rate != last_pushback_target_rate_) ||
+ (stable_target_rate != last_stable_target_rate_)) {
+ last_loss_based_target_rate_ = loss_based_target_rate;
+ last_pushback_target_rate_ = pushback_target_rate;
+ last_estimated_fraction_loss_ = fraction_loss;
+ last_estimated_round_trip_time_ = round_trip_time;
+ last_stable_target_rate_ = stable_target_rate;
+
+ alr_detector_->SetEstimatedBitrate(loss_based_target_rate.bps());
+
+ TimeDelta bwe_period = delay_based_bwe_->GetExpectedBwePeriod();
+
+ TargetTransferRate target_rate_msg;
+ target_rate_msg.at_time = at_time;
+ if (rate_control_settings_.UseCongestionWindowDropFrameOnly()) {
+ target_rate_msg.target_rate = loss_based_target_rate;
+ target_rate_msg.cwnd_reduce_ratio = cwnd_reduce_ratio;
+ } else {
+ target_rate_msg.target_rate = pushback_target_rate;
+ }
+ target_rate_msg.stable_target_rate = stable_target_rate;
+ target_rate_msg.network_estimate.at_time = at_time;
+ target_rate_msg.network_estimate.round_trip_time = round_trip_time;
+ target_rate_msg.network_estimate.loss_rate_ratio = fraction_loss / 255.0f;
+ target_rate_msg.network_estimate.bwe_period = bwe_period;
+
+ update->target_rate = target_rate_msg;
+
+ auto probes = probe_controller_->SetEstimatedBitrate(
+ loss_based_target_rate.bps(), at_time.ms());
+ update->probe_cluster_configs.insert(update->probe_cluster_configs.end(),
+ probes.begin(), probes.end());
+ update->pacer_config = GetPacingRates(at_time);
+
+ RTC_LOG(LS_VERBOSE) << "bwe " << at_time.ms() << " pushback_target_bps="
+ << last_pushback_target_rate_.bps()
+ << " estimate_bps=" << loss_based_target_rate.bps();
+ }
+}
+
+PacerConfig GoogCcNetworkController::GetPacingRates(Timestamp at_time) const {
+ // Pacing rate is based on target rate before congestion window pushback,
+ // because we don't want to build queues in the pacer when pushback occurs.
+ DataRate pacing_rate = DataRate::Zero();
+ if ((pace_at_max_of_bwe_and_lower_link_capacity_ ||
+ (pace_at_loss_based_bwe_when_loss_ &&
+ last_loss_based_target_rate_ >= delay_based_bwe_->last_estimate())) &&
+ estimate_) {
+ pacing_rate =
+ std::max({min_total_allocated_bitrate_, estimate_->link_capacity_lower,
+ last_loss_based_target_rate_}) *
+ pacing_factor_;
+ } else {
+ pacing_rate =
+ std::max(min_total_allocated_bitrate_, last_loss_based_target_rate_) *
+ pacing_factor_;
+ }
+ DataRate padding_rate =
+ std::min(max_padding_rate_, last_pushback_target_rate_);
+ PacerConfig msg;
+ msg.at_time = at_time;
+ msg.time_window = TimeDelta::Seconds(1);
+ msg.data_window = pacing_rate * msg.time_window;
+ msg.pad_window = padding_rate * msg.time_window;
+ return msg;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.h
new file mode 100644
index 0000000000..884b572740
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_GOOG_CC_NETWORK_CONTROL_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_GOOG_CC_NETWORK_CONTROL_H_
+
+#include <stdint.h>
+
+#include <deque>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_control.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/timestamp.h"
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h"
+#include "modules/congestion_controller/goog_cc/alr_detector.h"
+#include "modules/congestion_controller/goog_cc/congestion_window_pushback_controller.h"
+#include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
+#include "modules/congestion_controller/goog_cc/probe_controller.h"
+#include "modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+
+namespace webrtc {
+struct GoogCcConfig {
+ std::unique_ptr<NetworkStateEstimator> network_state_estimator = nullptr;
+ std::unique_ptr<NetworkStatePredictor> network_state_predictor = nullptr;
+ bool feedback_only = false;
+};
+
+class GoogCcNetworkController : public NetworkControllerInterface {
+ public:
+ GoogCcNetworkController(NetworkControllerConfig config,
+ GoogCcConfig goog_cc_config);
+
+ GoogCcNetworkController() = delete;
+ GoogCcNetworkController(const GoogCcNetworkController&) = delete;
+ GoogCcNetworkController& operator=(const GoogCcNetworkController&) = delete;
+
+ ~GoogCcNetworkController() override;
+
+ // NetworkControllerInterface
+ NetworkControlUpdate OnNetworkAvailability(NetworkAvailability msg) override;
+ NetworkControlUpdate OnNetworkRouteChange(NetworkRouteChange msg) override;
+ NetworkControlUpdate OnProcessInterval(ProcessInterval msg) override;
+ NetworkControlUpdate OnRemoteBitrateReport(RemoteBitrateReport msg) override;
+ NetworkControlUpdate OnRoundTripTimeUpdate(RoundTripTimeUpdate msg) override;
+ NetworkControlUpdate OnSentPacket(SentPacket msg) override;
+ NetworkControlUpdate OnReceivedPacket(ReceivedPacket msg) override;
+ NetworkControlUpdate OnStreamsConfig(StreamsConfig msg) override;
+ NetworkControlUpdate OnTargetRateConstraints(
+ TargetRateConstraints msg) override;
+ NetworkControlUpdate OnTransportLossReport(TransportLossReport msg) override;
+ NetworkControlUpdate OnTransportPacketsFeedback(
+ TransportPacketsFeedback msg) override;
+ NetworkControlUpdate OnNetworkStateEstimate(
+ NetworkStateEstimate msg) override;
+
+ NetworkControlUpdate GetNetworkState(Timestamp at_time) const;
+
+ private:
+ friend class GoogCcStatePrinter;
+ std::vector<ProbeClusterConfig> ResetConstraints(
+ TargetRateConstraints new_constraints);
+ void ClampConstraints();
+ void MaybeTriggerOnNetworkChanged(NetworkControlUpdate* update,
+ Timestamp at_time);
+ void UpdateCongestionWindowSize();
+ PacerConfig GetPacingRates(Timestamp at_time) const;
+ const FieldTrialBasedConfig trial_based_config_;
+
+ const FieldTrialsView* const key_value_config_;
+ RtcEventLog* const event_log_;
+ const bool packet_feedback_only_;
+ FieldTrialFlag safe_reset_on_route_change_;
+ FieldTrialFlag safe_reset_acknowledged_rate_;
+ const bool use_min_allocatable_as_lower_bound_;
+ const bool ignore_probes_lower_than_network_estimate_;
+ const bool limit_probes_lower_than_throughput_estimate_;
+ const RateControlSettings rate_control_settings_;
+ const bool loss_based_stable_rate_;
+ const bool pace_at_max_of_bwe_and_lower_link_capacity_;
+ const bool pace_at_loss_based_bwe_when_loss_;
+
+ const std::unique_ptr<ProbeController> probe_controller_;
+ const std::unique_ptr<CongestionWindowPushbackController>
+ congestion_window_pushback_controller_;
+
+ std::unique_ptr<SendSideBandwidthEstimation> bandwidth_estimation_;
+ std::unique_ptr<AlrDetector> alr_detector_;
+ std::unique_ptr<ProbeBitrateEstimator> probe_bitrate_estimator_;
+ std::unique_ptr<NetworkStateEstimator> network_estimator_;
+ std::unique_ptr<NetworkStatePredictor> network_state_predictor_;
+ std::unique_ptr<DelayBasedBwe> delay_based_bwe_;
+ std::unique_ptr<AcknowledgedBitrateEstimatorInterface>
+ acknowledged_bitrate_estimator_;
+
+ absl::optional<NetworkControllerConfig> initial_config_;
+
+ DataRate min_target_rate_ = DataRate::Zero();
+ DataRate min_data_rate_ = DataRate::Zero();
+ DataRate max_data_rate_ = DataRate::PlusInfinity();
+ absl::optional<DataRate> starting_rate_;
+
+ bool first_packet_sent_ = false;
+
+ absl::optional<NetworkStateEstimate> estimate_;
+
+ Timestamp next_loss_update_ = Timestamp::MinusInfinity();
+ int lost_packets_since_last_loss_update_ = 0;
+ int expected_packets_since_last_loss_update_ = 0;
+
+ std::deque<int64_t> feedback_max_rtts_;
+
+ DataRate last_loss_based_target_rate_;
+ DataRate last_pushback_target_rate_;
+ DataRate last_stable_target_rate_;
+
+ absl::optional<uint8_t> last_estimated_fraction_loss_ = 0;
+ TimeDelta last_estimated_round_trip_time_ = TimeDelta::PlusInfinity();
+ Timestamp last_packet_received_time_ = Timestamp::MinusInfinity();
+
+ double pacing_factor_;
+ DataRate min_total_allocated_bitrate_;
+ DataRate max_padding_rate_;
+ DataRate max_total_allocated_bitrate_;
+
+ bool previously_in_alr_ = false;
+
+ absl::optional<DataSize> current_data_window_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_GOOG_CC_NETWORK_CONTROL_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc
new file mode 100644
index 0000000000..8ba556c20e
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc
@@ -0,0 +1,958 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <queue>
+
+#include "absl/strings/string_view.h"
+#include "api/test/network_emulation/create_cross_traffic.h"
+#include "api/test/network_emulation/cross_traffic.h"
+#include "api/transport/goog_cc_factory.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/scenario/scenario.h"
+
+using ::testing::NiceMock;
+
+namespace webrtc {
+namespace test {
+namespace {
+// Count dips from a constant high bandwidth level within a short window.
+int CountBandwidthDips(std::queue<DataRate> bandwidth_history,
+ DataRate threshold) {
+ if (bandwidth_history.empty())
+ return true;
+ DataRate first = bandwidth_history.front();
+ bandwidth_history.pop();
+
+ int dips = 0;
+ bool state_high = true;
+ while (!bandwidth_history.empty()) {
+ if (bandwidth_history.front() + threshold < first && state_high) {
+ ++dips;
+ state_high = false;
+ } else if (bandwidth_history.front() == first) {
+ state_high = true;
+ } else if (bandwidth_history.front() > first) {
+ // If this is toggling we will catch it later when front becomes first.
+ state_high = false;
+ }
+ bandwidth_history.pop();
+ }
+ return dips;
+}
+GoogCcNetworkControllerFactory CreateFeedbackOnlyFactory() {
+ GoogCcFactoryConfig config;
+ config.feedback_only = true;
+ return GoogCcNetworkControllerFactory(std::move(config));
+}
+
+const uint32_t kInitialBitrateKbps = 60;
+const DataRate kInitialBitrate = DataRate::KilobitsPerSec(kInitialBitrateKbps);
+const float kDefaultPacingRate = 2.5f;
+
+CallClient* CreateVideoSendingClient(
+ Scenario* s,
+ CallClientConfig config,
+ std::vector<EmulatedNetworkNode*> send_link,
+ std::vector<EmulatedNetworkNode*> return_link) {
+ auto* client = s->CreateClient("send", std::move(config));
+ auto* route = s->CreateRoutes(client, send_link,
+ s->CreateClient("return", CallClientConfig()),
+ return_link);
+ s->CreateVideoStream(route->forward(), VideoStreamConfig());
+ return client;
+}
+
+NetworkRouteChange CreateRouteChange(
+ Timestamp time,
+ absl::optional<DataRate> start_rate = absl::nullopt,
+ absl::optional<DataRate> min_rate = absl::nullopt,
+ absl::optional<DataRate> max_rate = absl::nullopt) {
+ NetworkRouteChange route_change;
+ route_change.at_time = time;
+ route_change.constraints.at_time = time;
+ route_change.constraints.min_data_rate = min_rate;
+ route_change.constraints.max_data_rate = max_rate;
+ route_change.constraints.starting_rate = start_rate;
+ return route_change;
+}
+
+PacketResult CreatePacketResult(Timestamp arrival_time,
+ Timestamp send_time,
+ size_t payload_size,
+ PacedPacketInfo pacing_info) {
+ PacketResult packet_result;
+ packet_result.sent_packet = SentPacket();
+ packet_result.sent_packet.send_time = send_time;
+ packet_result.sent_packet.size = DataSize::Bytes(payload_size);
+ packet_result.sent_packet.pacing_info = pacing_info;
+ packet_result.receive_time = arrival_time;
+ return packet_result;
+}
+
+// Simulate sending packets and receiving transport feedback during
+// `runtime_ms`.
+absl::optional<DataRate> PacketTransmissionAndFeedbackBlock(
+ NetworkControllerInterface* controller,
+ int64_t runtime_ms,
+ int64_t delay,
+ Timestamp& current_time) {
+ NetworkControlUpdate update;
+ absl::optional<DataRate> target_bitrate;
+ int64_t delay_buildup = 0;
+ int64_t start_time_ms = current_time.ms();
+ while (current_time.ms() - start_time_ms < runtime_ms) {
+ constexpr size_t kPayloadSize = 1000;
+ PacketResult packet =
+ CreatePacketResult(current_time + TimeDelta::Millis(delay_buildup),
+ current_time, kPayloadSize, PacedPacketInfo());
+ delay_buildup += delay;
+ update = controller->OnSentPacket(packet.sent_packet);
+ if (update.target_rate) {
+ target_bitrate = update.target_rate->target_rate;
+ }
+ TransportPacketsFeedback feedback;
+ feedback.feedback_time = packet.receive_time;
+ feedback.packet_feedbacks.push_back(packet);
+ update = controller->OnTransportPacketsFeedback(feedback);
+ if (update.target_rate) {
+ target_bitrate = update.target_rate->target_rate;
+ }
+ current_time += TimeDelta::Millis(50);
+ update = controller->OnProcessInterval({.at_time = current_time});
+ if (update.target_rate) {
+ target_bitrate = update.target_rate->target_rate;
+ }
+ }
+ return target_bitrate;
+}
+
+// Scenarios:
+
+void UpdatesTargetRateBasedOnLinkCapacity(absl::string_view test_name = "") {
+ ScopedFieldTrials trial("WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ auto factory = CreateFeedbackOnlyFactory();
+ Scenario s("googcc_unit/target_capacity" + std::string(test_name), false);
+ CallClientConfig config;
+ config.transport.cc_factory = &factory;
+ config.transport.rates.min_rate = DataRate::KilobitsPerSec(10);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(1500);
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ auto send_net = s.CreateMutableSimulationNode([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(500);
+ c->delay = TimeDelta::Millis(100);
+ c->loss_rate = 0.0;
+ });
+ auto ret_net = s.CreateMutableSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(100); });
+ StatesPrinter* truth = s.CreatePrinter(
+ "send.truth.txt", TimeDelta::PlusInfinity(), {send_net->ConfigPrinter()});
+
+ auto* client = CreateVideoSendingClient(&s, config, {send_net->node()},
+ {ret_net->node()});
+
+ truth->PrintRow();
+ s.RunFor(TimeDelta::Seconds(25));
+ truth->PrintRow();
+ EXPECT_NEAR(client->target_rate().kbps(), 450, 100);
+
+ send_net->UpdateConfig([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(800);
+ c->delay = TimeDelta::Millis(100);
+ });
+
+ truth->PrintRow();
+ s.RunFor(TimeDelta::Seconds(20));
+ truth->PrintRow();
+ EXPECT_NEAR(client->target_rate().kbps(), 750, 150);
+
+ send_net->UpdateConfig([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(100);
+ c->delay = TimeDelta::Millis(200);
+ });
+ ret_net->UpdateConfig(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(200); });
+
+ truth->PrintRow();
+ s.RunFor(TimeDelta::Seconds(50));
+ truth->PrintRow();
+ EXPECT_NEAR(client->target_rate().kbps(), 90, 25);
+}
+
+DataRate RunRembDipScenario(absl::string_view test_name) {
+ Scenario s(test_name);
+ NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(2000);
+ net_conf.delay = TimeDelta::Millis(50);
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000);
+ });
+ auto send_net = {s.CreateSimulationNode(net_conf)};
+ auto ret_net = {s.CreateSimulationNode(net_conf)};
+ auto* route = s.CreateRoutes(
+ client, send_net, s.CreateClient("return", CallClientConfig()), ret_net);
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+
+ s.RunFor(TimeDelta::Seconds(10));
+ EXPECT_GT(client->send_bandwidth().kbps(), 1500);
+
+ DataRate RembLimit = DataRate::KilobitsPerSec(250);
+ client->SetRemoteBitrate(RembLimit);
+ s.RunFor(TimeDelta::Seconds(1));
+ EXPECT_EQ(client->send_bandwidth(), RembLimit);
+
+ DataRate RembLimitLifted = DataRate::KilobitsPerSec(10000);
+ client->SetRemoteBitrate(RembLimitLifted);
+ s.RunFor(TimeDelta::Seconds(10));
+
+ return client->send_bandwidth();
+}
+
+} // namespace
+
+class NetworkControllerTestFixture {
+ public:
+ NetworkControllerTestFixture() : factory_() {}
+
+ std::unique_ptr<NetworkControllerInterface> CreateController() {
+ NetworkControllerConfig config = InitialConfig();
+ std::unique_ptr<NetworkControllerInterface> controller =
+ factory_.Create(config);
+ return controller;
+ }
+
+ private:
+ NetworkControllerConfig InitialConfig(
+ int starting_bandwidth_kbps = kInitialBitrateKbps,
+ int min_data_rate_kbps = 0,
+ int max_data_rate_kbps = 5 * kInitialBitrateKbps) {
+ NetworkControllerConfig config;
+ config.constraints.at_time = Timestamp::Zero();
+ config.constraints.min_data_rate =
+ DataRate::KilobitsPerSec(min_data_rate_kbps);
+ config.constraints.max_data_rate =
+ DataRate::KilobitsPerSec(max_data_rate_kbps);
+ config.constraints.starting_rate =
+ DataRate::KilobitsPerSec(starting_bandwidth_kbps);
+ config.event_log = &event_log_;
+ return config;
+ }
+
+ NiceMock<MockRtcEventLog> event_log_;
+ GoogCcNetworkControllerFactory factory_;
+};
+
+TEST(GoogCcNetworkControllerTest, InitializeTargetRateOnFirstProcessInterval) {
+ NetworkControllerTestFixture fixture;
+ std::unique_ptr<NetworkControllerInterface> controller =
+ fixture.CreateController();
+
+ NetworkControlUpdate update =
+ controller->OnProcessInterval({.at_time = Timestamp::Millis(123456)});
+
+ EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate);
+ EXPECT_EQ(update.pacer_config->data_rate(),
+ kInitialBitrate * kDefaultPacingRate);
+ EXPECT_EQ(update.probe_cluster_configs[0].target_data_rate,
+ kInitialBitrate * 3);
+ EXPECT_EQ(update.probe_cluster_configs[1].target_data_rate,
+ kInitialBitrate * 5);
+}
+
+TEST(GoogCcNetworkControllerTest, ReactsToChangedNetworkConditions) {
+ NetworkControllerTestFixture fixture;
+ std::unique_ptr<NetworkControllerInterface> controller =
+ fixture.CreateController();
+ Timestamp current_time = Timestamp::Millis(123);
+ NetworkControlUpdate update =
+ controller->OnProcessInterval({.at_time = current_time});
+ update = controller->OnRemoteBitrateReport(
+ {.receive_time = current_time, .bandwidth = kInitialBitrate * 2});
+
+ current_time += TimeDelta::Millis(25);
+ update = controller->OnProcessInterval({.at_time = current_time});
+ EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate * 2);
+ EXPECT_EQ(update.pacer_config->data_rate(),
+ kInitialBitrate * 2 * kDefaultPacingRate);
+
+ update = controller->OnRemoteBitrateReport(
+ {.receive_time = current_time, .bandwidth = kInitialBitrate});
+ current_time += TimeDelta::Millis(25);
+ update = controller->OnProcessInterval({.at_time = current_time});
+ EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate);
+ EXPECT_EQ(update.pacer_config->data_rate(),
+ kInitialBitrate * kDefaultPacingRate);
+}
+
+TEST(GoogCcNetworkControllerTest, OnNetworkRouteChanged) {
+ NetworkControllerTestFixture fixture;
+ std::unique_ptr<NetworkControllerInterface> controller =
+ fixture.CreateController();
+ Timestamp current_time = Timestamp::Millis(123);
+ DataRate new_bitrate = DataRate::BitsPerSec(200000);
+ NetworkControlUpdate update = controller->OnNetworkRouteChange(
+ CreateRouteChange(current_time, new_bitrate));
+ EXPECT_EQ(update.target_rate->target_rate, new_bitrate);
+ EXPECT_EQ(update.pacer_config->data_rate(), new_bitrate * kDefaultPacingRate);
+ EXPECT_EQ(update.probe_cluster_configs.size(), 2u);
+
+ // If the bitrate is reset to -1, the new starting bitrate will be
+ // the minimum default bitrate.
+ const DataRate kDefaultMinBitrate = DataRate::KilobitsPerSec(5);
+ update = controller->OnNetworkRouteChange(CreateRouteChange(current_time));
+ EXPECT_EQ(update.target_rate->target_rate, kDefaultMinBitrate);
+ EXPECT_NEAR(update.pacer_config->data_rate().bps<double>(),
+ kDefaultMinBitrate.bps<double>() * kDefaultPacingRate, 10);
+ EXPECT_EQ(update.probe_cluster_configs.size(), 2u);
+}
+
+TEST(GoogCcNetworkControllerTest, ProbeOnRouteChange) {
+ NetworkControllerTestFixture fixture;
+ std::unique_ptr<NetworkControllerInterface> controller =
+ fixture.CreateController();
+ Timestamp current_time = Timestamp::Millis(123);
+ NetworkControlUpdate update = controller->OnNetworkRouteChange(
+ CreateRouteChange(current_time, 2 * kInitialBitrate, DataRate::Zero(),
+ 20 * kInitialBitrate));
+
+ EXPECT_TRUE(update.pacer_config.has_value());
+ EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate * 2);
+ EXPECT_EQ(update.probe_cluster_configs.size(), 2u);
+ EXPECT_EQ(update.probe_cluster_configs[0].target_data_rate,
+ kInitialBitrate * 6);
+ EXPECT_EQ(update.probe_cluster_configs[1].target_data_rate,
+ kInitialBitrate * 12);
+
+ update = controller->OnProcessInterval({.at_time = current_time});
+}
+
+// Bandwidth estimation is updated when feedbacks are received.
+// Feedbacks which show an increasing delay cause the estimation to be reduced.
+TEST(GoogCcNetworkControllerTest, UpdatesDelayBasedEstimate) {
+ NetworkControllerTestFixture fixture;
+ std::unique_ptr<NetworkControllerInterface> controller =
+ fixture.CreateController();
+ const int64_t kRunTimeMs = 6000;
+ Timestamp current_time = Timestamp::Millis(123);
+
+ // The test must run and insert packets/feedback long enough that the
+ // BWE computes a valid estimate. This is first done in an environment which
+ // simulates no bandwidth limitation, and therefore not built-up delay.
+ absl::optional<DataRate> target_bitrate_before_delay =
+ PacketTransmissionAndFeedbackBlock(controller.get(), kRunTimeMs, 0,
+ current_time);
+ ASSERT_TRUE(target_bitrate_before_delay.has_value());
+
+ // Repeat, but this time with a building delay, and make sure that the
+ // estimation is adjusted downwards.
+ absl::optional<DataRate> target_bitrate_after_delay =
+ PacketTransmissionAndFeedbackBlock(controller.get(), kRunTimeMs, 50,
+ current_time);
+ EXPECT_LT(*target_bitrate_after_delay, *target_bitrate_before_delay);
+}
+
+TEST(GoogCcNetworkControllerTest, PaceAtMaxOfLowerLinkCapacityAndBwe) {
+ ScopedFieldTrials trial(
+ "WebRTC-Bwe-PaceAtMaxOfBweAndLowerLinkCapacity/Enabled/");
+ NetworkControllerTestFixture fixture;
+ std::unique_ptr<NetworkControllerInterface> controller =
+ fixture.CreateController();
+ Timestamp current_time = Timestamp::Millis(123);
+ NetworkControlUpdate update =
+ controller->OnProcessInterval({.at_time = current_time});
+ current_time += TimeDelta::Millis(100);
+ NetworkStateEstimate network_estimate = {.link_capacity_lower =
+ 10 * kInitialBitrate};
+ update = controller->OnNetworkStateEstimate(network_estimate);
+ // OnNetworkStateEstimate does not trigger processing a new estimate. So add a
+ // dummy loss report to trigger a BWE update in the next process interval.
+ TransportLossReport loss_report;
+ loss_report.start_time = current_time;
+ loss_report.end_time = current_time;
+ loss_report.receive_time = current_time;
+ loss_report.packets_received_delta = 50;
+ loss_report.packets_lost_delta = 1;
+ update = controller->OnTransportLossReport(loss_report);
+ update = controller->OnProcessInterval({.at_time = current_time});
+ ASSERT_TRUE(update.pacer_config);
+ ASSERT_TRUE(update.target_rate);
+ ASSERT_LT(update.target_rate->target_rate,
+ network_estimate.link_capacity_lower);
+ EXPECT_EQ(update.pacer_config->data_rate().kbps(),
+ network_estimate.link_capacity_lower.kbps() * kDefaultPacingRate);
+
+ current_time += TimeDelta::Millis(100);
+ // Set a low link capacity estimate and verify that pacing rate is set
+ // relative to loss based/delay based estimate.
+ network_estimate = {.link_capacity_lower = 0.5 * kInitialBitrate};
+ update = controller->OnNetworkStateEstimate(network_estimate);
+ // Again, we need to inject a dummy loss report to trigger an update of the
+ // BWE in the next process interval.
+ loss_report.start_time = current_time;
+ loss_report.end_time = current_time;
+ loss_report.receive_time = current_time;
+ loss_report.packets_received_delta = 50;
+ loss_report.packets_lost_delta = 0;
+ update = controller->OnTransportLossReport(loss_report);
+ update = controller->OnProcessInterval({.at_time = current_time});
+ ASSERT_TRUE(update.target_rate);
+ ASSERT_GT(update.target_rate->target_rate,
+ network_estimate.link_capacity_lower);
+ EXPECT_EQ(update.pacer_config->data_rate().kbps(),
+ update.target_rate->target_rate.kbps() * kDefaultPacingRate);
+}
+
+// Test congestion window pushback on network delay happens.
+TEST(GoogCcScenario, CongestionWindowPushbackOnNetworkDelay) {
+ auto factory = CreateFeedbackOnlyFactory();
+ ScopedFieldTrials trial(
+ "WebRTC-CongestionWindow/QueueSize:800,MinBitrate:30000/");
+ Scenario s("googcc_unit/cwnd_on_delay", false);
+ auto send_net =
+ s.CreateMutableSimulationNode([=](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(1000);
+ c->delay = TimeDelta::Millis(100);
+ });
+ auto ret_net = s.CreateSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(100); });
+ CallClientConfig config;
+ config.transport.cc_factory = &factory;
+ // Start high so bandwidth drop has max effect.
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(2000);
+ config.transport.rates.min_rate = DataRate::KilobitsPerSec(10);
+
+ auto* client = CreateVideoSendingClient(&s, std::move(config),
+ {send_net->node()}, {ret_net});
+
+ s.RunFor(TimeDelta::Seconds(10));
+ send_net->PauseTransmissionUntil(s.Now() + TimeDelta::Seconds(10));
+ s.RunFor(TimeDelta::Seconds(3));
+
+ // After 3 seconds without feedback from any sent packets, we expect that the
+ // target rate is reduced to the minimum pushback threshold
+ // kDefaultMinPushbackTargetBitrateBps, which is defined as 30 kbps in
+ // congestion_window_pushback_controller.
+ EXPECT_LT(client->target_rate().kbps(), 40);
+}
+
+// Test congestion window pushback on network delay happens.
+TEST(GoogCcScenario, CongestionWindowPushbackDropFrameOnNetworkDelay) {
+ auto factory = CreateFeedbackOnlyFactory();
+ ScopedFieldTrials trial(
+ "WebRTC-CongestionWindow/QueueSize:800,MinBitrate:30000,DropFrame:true/");
+ Scenario s("googcc_unit/cwnd_on_delay", false);
+ auto send_net =
+ s.CreateMutableSimulationNode([=](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(1000);
+ c->delay = TimeDelta::Millis(100);
+ });
+ auto ret_net = s.CreateSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(100); });
+ CallClientConfig config;
+ config.transport.cc_factory = &factory;
+ // Start high so bandwidth drop has max effect.
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(2000);
+ config.transport.rates.min_rate = DataRate::KilobitsPerSec(10);
+
+ auto* client = CreateVideoSendingClient(&s, std::move(config),
+ {send_net->node()}, {ret_net});
+
+ s.RunFor(TimeDelta::Seconds(10));
+ send_net->PauseTransmissionUntil(s.Now() + TimeDelta::Seconds(10));
+ s.RunFor(TimeDelta::Seconds(3));
+
+ // As the dropframe is set, after 3 seconds without feedback from any sent
+ // packets, we expect that the target rate is not reduced by congestion
+ // window.
+ EXPECT_GT(client->target_rate().kbps(), 300);
+}
+
+TEST(GoogCcScenario, PaddingRateLimitedByCongestionWindowInTrial) {
+ ScopedFieldTrials trial(
+ "WebRTC-CongestionWindow/QueueSize:200,MinBitrate:30000/");
+
+ Scenario s("googcc_unit/padding_limited", false);
+ auto send_net =
+ s.CreateMutableSimulationNode([=](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(1000);
+ c->delay = TimeDelta::Millis(100);
+ });
+ auto ret_net = s.CreateSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(100); });
+ CallClientConfig config;
+ // Start high so bandwidth drop has max effect.
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(1000);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(2000);
+ auto* client = s.CreateClient("send", config);
+ auto* route =
+ s.CreateRoutes(client, {send_net->node()},
+ s.CreateClient("return", CallClientConfig()), {ret_net});
+ VideoStreamConfig video;
+ video.stream.pad_to_rate = config.transport.rates.max_rate;
+ s.CreateVideoStream(route->forward(), video);
+
+ // Run for a few seconds to allow the controller to stabilize.
+ s.RunFor(TimeDelta::Seconds(10));
+
+ // Check that padding rate matches target rate.
+ EXPECT_NEAR(client->padding_rate().kbps(), client->target_rate().kbps(), 1);
+
+ // Check this is also the case when congestion window pushback kicks in.
+ send_net->PauseTransmissionUntil(s.Now() + TimeDelta::Seconds(1));
+ EXPECT_NEAR(client->padding_rate().kbps(), client->target_rate().kbps(), 1);
+}
+
+TEST(GoogCcScenario, LimitsToFloorIfRttIsHighInTrial) {
+ // The field trial limits maximum RTT to 2 seconds, higher RTT means that the
+ // controller backs off until it reaches the minimum configured bitrate. This
+ // allows the RTT to recover faster than the regular control mechanism would
+ // achieve.
+ const DataRate kBandwidthFloor = DataRate::KilobitsPerSec(50);
+ ScopedFieldTrials trial("WebRTC-Bwe-MaxRttLimit/limit:2s,floor:" +
+ std::to_string(kBandwidthFloor.kbps()) + "kbps/");
+ // In the test case, we limit the capacity and add a cross traffic packet
+ // burst that blocks media from being sent. This causes the RTT to quickly
+ // increase above the threshold in the trial.
+ const DataRate kLinkCapacity = DataRate::KilobitsPerSec(100);
+ const TimeDelta kBufferBloatDuration = TimeDelta::Seconds(10);
+ Scenario s("googcc_unit/limit_trial", false);
+ auto send_net = s.CreateSimulationNode([=](NetworkSimulationConfig* c) {
+ c->bandwidth = kLinkCapacity;
+ c->delay = TimeDelta::Millis(100);
+ });
+ auto ret_net = s.CreateSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(100); });
+ CallClientConfig config;
+ config.transport.rates.start_rate = kLinkCapacity;
+
+ auto* client = CreateVideoSendingClient(&s, config, {send_net}, {ret_net});
+ // Run for a few seconds to allow the controller to stabilize.
+ s.RunFor(TimeDelta::Seconds(10));
+ const DataSize kBloatPacketSize = DataSize::Bytes(1000);
+ const int kBloatPacketCount =
+ static_cast<int>(kBufferBloatDuration * kLinkCapacity / kBloatPacketSize);
+ // This will cause the RTT to be large for a while.
+ s.TriggerPacketBurst({send_net}, kBloatPacketCount, kBloatPacketSize.bytes());
+ // Wait to allow the high RTT to be detected and acted upon.
+ s.RunFor(TimeDelta::Seconds(6));
+ // By now the target rate should have dropped to the minimum configured rate.
+ EXPECT_NEAR(client->target_rate().kbps(), kBandwidthFloor.kbps(), 5);
+}
+
+TEST(GoogCcScenario, UpdatesTargetRateBasedOnLinkCapacity) {
+ UpdatesTargetRateBasedOnLinkCapacity();
+}
+
+TEST(GoogCcScenario, StableEstimateDoesNotVaryInSteadyState) {
+ auto factory = CreateFeedbackOnlyFactory();
+ Scenario s("googcc_unit/stable_target", false);
+ CallClientConfig config;
+ config.transport.cc_factory = &factory;
+ NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(500);
+ net_conf.delay = TimeDelta::Millis(100);
+ auto send_net = s.CreateSimulationNode(net_conf);
+ auto ret_net = s.CreateSimulationNode(net_conf);
+
+ auto* client = CreateVideoSendingClient(&s, config, {send_net}, {ret_net});
+ // Run for a while to allow the estimate to stabilize.
+ s.RunFor(TimeDelta::Seconds(30));
+ DataRate min_stable_target = DataRate::PlusInfinity();
+ DataRate max_stable_target = DataRate::MinusInfinity();
+ DataRate min_target = DataRate::PlusInfinity();
+ DataRate max_target = DataRate::MinusInfinity();
+
+ // Measure variation in steady state.
+ for (int i = 0; i < 20; ++i) {
+ auto stable_target_rate = client->stable_target_rate();
+ auto target_rate = client->target_rate();
+ EXPECT_LE(stable_target_rate, target_rate);
+
+ min_stable_target = std::min(min_stable_target, stable_target_rate);
+ max_stable_target = std::max(max_stable_target, stable_target_rate);
+ min_target = std::min(min_target, target_rate);
+ max_target = std::max(max_target, target_rate);
+ s.RunFor(TimeDelta::Seconds(1));
+ }
+ // We should expect drops by at least 15% (default backoff.)
+ EXPECT_LT(min_target / max_target, 0.85);
+ // We should expect the stable target to be more stable than the immediate one
+ EXPECT_GE(min_stable_target / max_stable_target, min_target / max_target);
+}
+
+TEST(GoogCcScenario, LossBasedControlUpdatesTargetRateBasedOnLinkCapacity) {
+ ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/");
+ // TODO(srte): Should the behavior be unaffected at low loss rates?
+ UpdatesTargetRateBasedOnLinkCapacity("_loss_based");
+}
+
+TEST(GoogCcScenario, LossBasedControlDoesModestBackoffToHighLoss) {
+ ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/");
+ Scenario s("googcc_unit/high_loss_channel", false);
+ CallClientConfig config;
+ config.transport.rates.min_rate = DataRate::KilobitsPerSec(10);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(1500);
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ auto send_net = s.CreateSimulationNode([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(2000);
+ c->delay = TimeDelta::Millis(200);
+ c->loss_rate = 0.1;
+ });
+ auto ret_net = s.CreateSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(200); });
+
+ auto* client = CreateVideoSendingClient(&s, config, {send_net}, {ret_net});
+
+ s.RunFor(TimeDelta::Seconds(120));
+ // Without LossBasedControl trial, bandwidth drops to ~10 kbps.
+ EXPECT_GT(client->target_rate().kbps(), 100);
+}
+
+DataRate AverageBitrateAfterCrossInducedLoss(absl::string_view name) {
+ Scenario s(name, false);
+ NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(1000);
+ net_conf.delay = TimeDelta::Millis(100);
+ // Short queue length means that we'll induce loss when sudden TCP traffic
+ // spikes are induced. This corresponds to ca 200 ms for a packet size of 1000
+ // bytes. Such limited buffers are common on for instance wifi routers.
+ net_conf.packet_queue_length_limit = 25;
+
+ auto send_net = {s.CreateSimulationNode(net_conf)};
+ auto ret_net = {s.CreateSimulationNode(net_conf)};
+
+ auto* client = s.CreateClient("send", CallClientConfig());
+ auto* callee = s.CreateClient("return", CallClientConfig());
+ auto* route = s.CreateRoutes(client, send_net, callee, ret_net);
+ // TODO(srte): Make this work with RTX enabled or remove it.
+ auto* video = s.CreateVideoStream(route->forward(), [](VideoStreamConfig* c) {
+ c->stream.use_rtx = false;
+ });
+ s.RunFor(TimeDelta::Seconds(10));
+ for (int i = 0; i < 4; ++i) {
+ // Sends TCP cross traffic inducing loss.
+ auto* tcp_traffic = s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic(
+ s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net),
+ FakeTcpConfig()));
+ s.RunFor(TimeDelta::Seconds(2));
+ // Allow the ccongestion controller to recover.
+ s.net()->StopCrossTraffic(tcp_traffic);
+ s.RunFor(TimeDelta::Seconds(20));
+ }
+
+ // Querying the video stats from within the expected runtime environment
+ // (i.e. the TQ that belongs to the CallClient, not the Scenario TQ that
+ // we're currently on).
+ VideoReceiveStreamInterface::Stats video_receive_stats;
+ auto* video_stream = video->receive();
+ callee->SendTask([&video_stream, &video_receive_stats]() {
+ video_receive_stats = video_stream->GetStats();
+ });
+ return DataSize::Bytes(
+ video_receive_stats.rtp_stats.packet_counter.TotalBytes()) /
+ s.TimeSinceStart();
+}
+
+TEST(GoogCcScenario, LossBasedRecoversFasterAfterCrossInducedLoss) {
+ // This test acts as a reference for the test below, showing that without the
+ // trial, we have worse behavior.
+ DataRate average_bitrate_without_loss_based =
+ AverageBitrateAfterCrossInducedLoss("googcc_unit/no_cross_loss_based");
+
+ // We recover bitrate better when subject to loss spikes from cross traffic
+ // when loss based controller is used.
+ ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/");
+ DataRate average_bitrate_with_loss_based =
+ AverageBitrateAfterCrossInducedLoss("googcc_unit/cross_loss_based");
+
+ EXPECT_GE(average_bitrate_with_loss_based,
+ average_bitrate_without_loss_based * 1.05);
+}
+
+TEST(GoogCcScenario, LossBasedEstimatorCapsRateAtModerateLoss) {
+ ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/");
+ Scenario s("googcc_unit/moderate_loss_channel", false);
+ CallClientConfig config;
+ config.transport.rates.min_rate = DataRate::KilobitsPerSec(10);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(5000);
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(1000);
+
+ NetworkSimulationConfig network;
+ network.bandwidth = DataRate::KilobitsPerSec(2000);
+ network.delay = TimeDelta::Millis(100);
+ // 3% loss rate is in the moderate loss rate region at 2000 kbps, limiting the
+ // bitrate increase.
+ network.loss_rate = 0.03;
+ auto send_net = s.CreateMutableSimulationNode(network);
+ auto* client = s.CreateClient("send", std::move(config));
+ auto* route = s.CreateRoutes(client, {send_net->node()},
+ s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(network)});
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow the controller to stabilize at the lower bitrate.
+ s.RunFor(TimeDelta::Seconds(1));
+ // This increase in capacity would cause the target bitrate to increase to
+ // over 4000 kbps without LossBasedControl.
+ send_net->UpdateConfig([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(5000);
+ });
+ s.RunFor(TimeDelta::Seconds(20));
+ // Using LossBasedControl, the bitrate will not increase over 2500 kbps since
+ // we have detected moderate loss.
+ EXPECT_LT(client->target_rate().kbps(), 2500);
+}
+
+TEST(GoogCcScenario, MaintainsLowRateInSafeResetTrial) {
+ const DataRate kLinkCapacity = DataRate::KilobitsPerSec(200);
+ const DataRate kStartRate = DataRate::KilobitsPerSec(300);
+
+ ScopedFieldTrials trial("WebRTC-Bwe-SafeResetOnRouteChange/Enabled/");
+ Scenario s("googcc_unit/safe_reset_low");
+ auto* send_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = kLinkCapacity;
+ c->delay = TimeDelta::Millis(10);
+ });
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = kStartRate;
+ });
+ auto* route = s.CreateRoutes(
+ client, {send_net}, s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())});
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow the controller to stabilize.
+ s.RunFor(TimeDelta::Millis(500));
+ EXPECT_NEAR(client->send_bandwidth().kbps(), kLinkCapacity.kbps(), 50);
+ s.ChangeRoute(route->forward(), {send_net});
+ // Allow new settings to propagate.
+ s.RunFor(TimeDelta::Millis(100));
+ // Under the trial, the target should be unchanged for low rates.
+ EXPECT_NEAR(client->send_bandwidth().kbps(), kLinkCapacity.kbps(), 50);
+}
+
+TEST(GoogCcScenario, CutsHighRateInSafeResetTrial) {
+ const DataRate kLinkCapacity = DataRate::KilobitsPerSec(1000);
+ const DataRate kStartRate = DataRate::KilobitsPerSec(300);
+
+ ScopedFieldTrials trial("WebRTC-Bwe-SafeResetOnRouteChange/Enabled/");
+ Scenario s("googcc_unit/safe_reset_high_cut");
+ auto send_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = kLinkCapacity;
+ c->delay = TimeDelta::Millis(50);
+ });
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = kStartRate;
+ });
+ auto* route = s.CreateRoutes(
+ client, {send_net}, s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())});
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow the controller to stabilize.
+ s.RunFor(TimeDelta::Millis(500));
+ EXPECT_NEAR(client->send_bandwidth().kbps(), kLinkCapacity.kbps(), 300);
+ s.ChangeRoute(route->forward(), {send_net});
+ // Allow new settings to propagate.
+ s.RunFor(TimeDelta::Millis(50));
+ // Under the trial, the target should be reset from high values.
+ EXPECT_NEAR(client->send_bandwidth().kbps(), kStartRate.kbps(), 30);
+}
+
+TEST(GoogCcScenario, DetectsHighRateInSafeResetTrial) {
+ ScopedFieldTrials trial(
+ "WebRTC-Bwe-SafeResetOnRouteChange/Enabled,ack/"
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ const DataRate kInitialLinkCapacity = DataRate::KilobitsPerSec(200);
+ const DataRate kNewLinkCapacity = DataRate::KilobitsPerSec(800);
+ const DataRate kStartRate = DataRate::KilobitsPerSec(300);
+
+ Scenario s("googcc_unit/safe_reset_high_detect");
+ auto* initial_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = kInitialLinkCapacity;
+ c->delay = TimeDelta::Millis(50);
+ });
+ auto* new_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = kNewLinkCapacity;
+ c->delay = TimeDelta::Millis(50);
+ });
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = kStartRate;
+ });
+ auto* route = s.CreateRoutes(
+ client, {initial_net}, s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())});
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow the controller to stabilize.
+ s.RunFor(TimeDelta::Millis(2000));
+ EXPECT_NEAR(client->send_bandwidth().kbps(), kInitialLinkCapacity.kbps(), 50);
+ s.ChangeRoute(route->forward(), {new_net});
+ // Allow new settings to propagate, but not probes to be received.
+ s.RunFor(TimeDelta::Millis(50));
+ // Under the field trial, the target rate should be unchanged since it's lower
+ // than the starting rate.
+ EXPECT_NEAR(client->send_bandwidth().kbps(), kInitialLinkCapacity.kbps(), 50);
+ // However, probing should have made us detect the higher rate.
+ // NOTE: This test causes high loss rate, and the loss-based estimator reduces
+ // the bitrate, making the test fail if we wait longer than one second here.
+ s.RunFor(TimeDelta::Millis(1000));
+ EXPECT_GT(client->send_bandwidth().kbps(), kNewLinkCapacity.kbps() - 300);
+}
+
+TEST(GoogCcScenario, TargetRateReducedOnPacingBufferBuildupInTrial) {
+ // Configure strict pacing to ensure build-up.
+ ScopedFieldTrials trial(
+ "WebRTC-CongestionWindow/QueueSize:100,MinBitrate:30000/"
+ "WebRTC-Video-Pacing/factor:1.0/"
+ "WebRTC-AddPacingToCongestionWindowPushback/Enabled/");
+
+ const DataRate kLinkCapacity = DataRate::KilobitsPerSec(1000);
+ const DataRate kStartRate = DataRate::KilobitsPerSec(1000);
+
+ Scenario s("googcc_unit/pacing_buffer_buildup");
+ auto* net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = kLinkCapacity;
+ c->delay = TimeDelta::Millis(50);
+ });
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = kStartRate;
+ });
+ auto* route = s.CreateRoutes(
+ client, {net}, s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())});
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow some time for the buffer to build up.
+ s.RunFor(TimeDelta::Seconds(5));
+
+ // Without trial, pacer delay reaches ~250 ms.
+ EXPECT_LT(client->GetStats().pacer_delay_ms, 150);
+}
+
+TEST(GoogCcScenario, NoBandwidthTogglingInLossControlTrial) {
+ ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/");
+ Scenario s("googcc_unit/no_toggling");
+ auto* send_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(2000);
+ c->loss_rate = 0.2;
+ c->delay = TimeDelta::Millis(10);
+ });
+
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ });
+ auto* route = s.CreateRoutes(
+ client, {send_net}, s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())});
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow the controller to initialize.
+ s.RunFor(TimeDelta::Millis(250));
+
+ std::queue<DataRate> bandwidth_history;
+ const TimeDelta step = TimeDelta::Millis(50);
+ for (TimeDelta time = TimeDelta::Zero(); time < TimeDelta::Millis(2000);
+ time += step) {
+ s.RunFor(step);
+ const TimeDelta window = TimeDelta::Millis(500);
+ if (bandwidth_history.size() >= window / step)
+ bandwidth_history.pop();
+ bandwidth_history.push(client->send_bandwidth());
+ EXPECT_LT(
+ CountBandwidthDips(bandwidth_history, DataRate::KilobitsPerSec(100)),
+ 2);
+ }
+}
+
+TEST(GoogCcScenario, NoRttBackoffCollapseWhenVideoStops) {
+ ScopedFieldTrials trial("WebRTC-Bwe-MaxRttLimit/limit:2s/");
+ Scenario s("googcc_unit/rttbackoff_video_stop");
+ auto* send_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(2000);
+ c->delay = TimeDelta::Millis(100);
+ });
+
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000);
+ });
+ auto* route = s.CreateRoutes(
+ client, {send_net}, s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())});
+ auto* video = s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ // Allow the controller to initialize, then stop video.
+ s.RunFor(TimeDelta::Seconds(1));
+ video->send()->Stop();
+ s.RunFor(TimeDelta::Seconds(4));
+ EXPECT_GT(client->send_bandwidth().kbps(), 1000);
+}
+
+TEST(GoogCcScenario, NoCrashOnVeryLateFeedback) {
+ Scenario s;
+ auto ret_net = s.CreateMutableSimulationNode(NetworkSimulationConfig());
+ auto* route = s.CreateRoutes(
+ s.CreateClient("send", CallClientConfig()),
+ {s.CreateSimulationNode(NetworkSimulationConfig())},
+ s.CreateClient("return", CallClientConfig()), {ret_net->node()});
+ auto* video = s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ s.RunFor(TimeDelta::Seconds(5));
+ // Delay feedback by several minutes. This will cause removal of the send time
+ // history for the packets as long as kSendTimeHistoryWindow is configured for
+ // a shorter time span.
+ ret_net->PauseTransmissionUntil(s.Now() + TimeDelta::Seconds(300));
+ // Stopping video stream while waiting to save test execution time.
+ video->send()->Stop();
+ s.RunFor(TimeDelta::Seconds(299));
+ // Starting to cause addition of new packet to history, which cause old
+ // packets to be removed.
+ video->send()->Start();
+ // Runs until the lost packets are received. We expect that this will run
+ // without causing any runtime failures.
+ s.RunFor(TimeDelta::Seconds(2));
+}
+
+TEST(GoogCcScenario, IsFairToTCP) {
+ Scenario s("googcc_unit/tcp_fairness");
+ NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(1000);
+ net_conf.delay = TimeDelta::Millis(50);
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000);
+ });
+ auto send_net = {s.CreateSimulationNode(net_conf)};
+ auto ret_net = {s.CreateSimulationNode(net_conf)};
+ auto* route = s.CreateRoutes(
+ client, send_net, s.CreateClient("return", CallClientConfig()), ret_net);
+ s.CreateVideoStream(route->forward(), VideoStreamConfig());
+ s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic(
+ s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net),
+ FakeTcpConfig()));
+ s.RunFor(TimeDelta::Seconds(10));
+
+ // Currently only testing for the upper limit as we in practice back out
+ // quite a lot in this scenario. If this behavior is fixed, we should add a
+ // lower bound to ensure it stays fixed.
+ EXPECT_LT(client->send_bandwidth().kbps(), 750);
+}
+
+TEST(GoogCcScenario, FastRampupOnRembCapLifted) {
+ DataRate final_estimate =
+ RunRembDipScenario("googcc_unit/default_fast_rampup_on_remb_cap_lifted");
+ EXPECT_GT(final_estimate.kbps(), 1500);
+}
+
+TEST(GoogCcScenario, SlowRampupOnRembCapLiftedWithFieldTrial) {
+ ScopedFieldTrials trial("WebRTC-Bwe-ReceiverLimitCapsOnly/Disabled/");
+ DataRate final_estimate =
+ RunRembDipScenario("googcc_unit/legacy_slow_rampup_on_remb_cap_lifted");
+ EXPECT_LT(final_estimate.kbps(), 1000);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.cc
new file mode 100644
index 0000000000..ecd5742272
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/inter_arrival_delta.h"
+
+#include <algorithm>
+
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static constexpr TimeDelta kBurstDeltaThreshold = TimeDelta::Millis(5);
+static constexpr TimeDelta kMaxBurstDuration = TimeDelta::Millis(100);
+constexpr TimeDelta InterArrivalDelta::kArrivalTimeOffsetThreshold;
+
+InterArrivalDelta::InterArrivalDelta(TimeDelta send_time_group_length)
+ : send_time_group_length_(send_time_group_length),
+ current_timestamp_group_(),
+ prev_timestamp_group_(),
+ num_consecutive_reordered_packets_(0) {}
+
+bool InterArrivalDelta::ComputeDeltas(Timestamp send_time,
+ Timestamp arrival_time,
+ Timestamp system_time,
+ size_t packet_size,
+ TimeDelta* send_time_delta,
+ TimeDelta* arrival_time_delta,
+ int* packet_size_delta) {
+ bool calculated_deltas = false;
+ if (current_timestamp_group_.IsFirstPacket()) {
+ // We don't have enough data to update the filter, so we store it until we
+ // have two frames of data to process.
+ current_timestamp_group_.send_time = send_time;
+ current_timestamp_group_.first_send_time = send_time;
+ current_timestamp_group_.first_arrival = arrival_time;
+ } else if (current_timestamp_group_.first_send_time > send_time) {
+ // Reordered packet.
+ return false;
+ } else if (NewTimestampGroup(arrival_time, send_time)) {
+ // First packet of a later send burst, the previous packets sample is ready.
+ if (prev_timestamp_group_.complete_time.IsFinite()) {
+ *send_time_delta =
+ current_timestamp_group_.send_time - prev_timestamp_group_.send_time;
+ *arrival_time_delta = current_timestamp_group_.complete_time -
+ prev_timestamp_group_.complete_time;
+
+ TimeDelta system_time_delta = current_timestamp_group_.last_system_time -
+ prev_timestamp_group_.last_system_time;
+
+ if (*arrival_time_delta - system_time_delta >=
+ kArrivalTimeOffsetThreshold) {
+ RTC_LOG(LS_WARNING)
+ << "The arrival time clock offset has changed (diff = "
+ << arrival_time_delta->ms() - system_time_delta.ms()
+ << " ms), resetting.";
+ Reset();
+ return false;
+ }
+ if (*arrival_time_delta < TimeDelta::Zero()) {
+ // The group of packets has been reordered since receiving its local
+ // arrival timestamp.
+ ++num_consecutive_reordered_packets_;
+ if (num_consecutive_reordered_packets_ >= kReorderedResetThreshold) {
+ RTC_LOG(LS_WARNING)
+ << "Packets between send burst arrived out of order, resetting."
+ << " arrival_time_delta" << arrival_time_delta->ms()
+ << " send time delta " << send_time_delta->ms();
+ Reset();
+ }
+ return false;
+ } else {
+ num_consecutive_reordered_packets_ = 0;
+ }
+ *packet_size_delta = static_cast<int>(current_timestamp_group_.size) -
+ static_cast<int>(prev_timestamp_group_.size);
+ calculated_deltas = true;
+ }
+ prev_timestamp_group_ = current_timestamp_group_;
+ // The new timestamp is now the current frame.
+ current_timestamp_group_.first_send_time = send_time;
+ current_timestamp_group_.send_time = send_time;
+ current_timestamp_group_.first_arrival = arrival_time;
+ current_timestamp_group_.size = 0;
+ } else {
+ current_timestamp_group_.send_time =
+ std::max(current_timestamp_group_.send_time, send_time);
+ }
+ // Accumulate the frame size.
+ current_timestamp_group_.size += packet_size;
+ current_timestamp_group_.complete_time = arrival_time;
+ current_timestamp_group_.last_system_time = system_time;
+
+ return calculated_deltas;
+}
+
+// Assumes that `timestamp` is not reordered compared to
+// `current_timestamp_group_`.
+bool InterArrivalDelta::NewTimestampGroup(Timestamp arrival_time,
+ Timestamp send_time) const {
+ if (current_timestamp_group_.IsFirstPacket()) {
+ return false;
+ } else if (BelongsToBurst(arrival_time, send_time)) {
+ return false;
+ } else {
+ return send_time - current_timestamp_group_.first_send_time >
+ send_time_group_length_;
+ }
+}
+
+bool InterArrivalDelta::BelongsToBurst(Timestamp arrival_time,
+ Timestamp send_time) const {
+ RTC_DCHECK(current_timestamp_group_.complete_time.IsFinite());
+ TimeDelta arrival_time_delta =
+ arrival_time - current_timestamp_group_.complete_time;
+ TimeDelta send_time_delta = send_time - current_timestamp_group_.send_time;
+ if (send_time_delta.IsZero())
+ return true;
+ TimeDelta propagation_delta = arrival_time_delta - send_time_delta;
+ if (propagation_delta < TimeDelta::Zero() &&
+ arrival_time_delta <= kBurstDeltaThreshold &&
+ arrival_time - current_timestamp_group_.first_arrival < kMaxBurstDuration)
+ return true;
+ return false;
+}
+
+void InterArrivalDelta::Reset() {
+ num_consecutive_reordered_packets_ = 0;
+ current_timestamp_group_ = SendTimeGroup();
+ prev_timestamp_group_ = SendTimeGroup();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.h
new file mode 100644
index 0000000000..0617e34cdd
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_INTER_ARRIVAL_DELTA_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_INTER_ARRIVAL_DELTA_H_
+
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+
+namespace webrtc {
+
+// Helper class to compute the inter-arrival time delta and the size delta
+// between two send bursts. This code is branched from
+// modules/remote_bitrate_estimator/inter_arrival.
+class InterArrivalDelta {
+ public:
+ // After this many packet groups received out of order InterArrival will
+ // reset, assuming that clocks have made a jump.
+ static constexpr int kReorderedResetThreshold = 3;
+ static constexpr TimeDelta kArrivalTimeOffsetThreshold =
+ TimeDelta::Seconds(3);
+
+ // A send time group is defined as all packets with a send time which are at
+ // most send_time_group_length older than the first timestamp in that
+ // group.
+ explicit InterArrivalDelta(TimeDelta send_time_group_length);
+
+ InterArrivalDelta() = delete;
+ InterArrivalDelta(const InterArrivalDelta&) = delete;
+ InterArrivalDelta& operator=(const InterArrivalDelta&) = delete;
+
+ // This function returns true if a delta was computed, or false if the current
+ // group is still incomplete or if only one group has been completed.
+ // `send_time` is the send time.
+ // `arrival_time` is the time at which the packet arrived.
+ // `packet_size` is the size of the packet.
+ // `timestamp_delta` (output) is the computed send time delta.
+ // `arrival_time_delta_ms` (output) is the computed arrival-time delta.
+ // `packet_size_delta` (output) is the computed size delta.
+ bool ComputeDeltas(Timestamp send_time,
+ Timestamp arrival_time,
+ Timestamp system_time,
+ size_t packet_size,
+ TimeDelta* send_time_delta,
+ TimeDelta* arrival_time_delta,
+ int* packet_size_delta);
+
+ private:
+ struct SendTimeGroup {
+ SendTimeGroup()
+ : size(0),
+ first_send_time(Timestamp::MinusInfinity()),
+ send_time(Timestamp::MinusInfinity()),
+ first_arrival(Timestamp::MinusInfinity()),
+ complete_time(Timestamp::MinusInfinity()),
+ last_system_time(Timestamp::MinusInfinity()) {}
+
+ bool IsFirstPacket() const { return complete_time.IsInfinite(); }
+
+ size_t size;
+ Timestamp first_send_time;
+ Timestamp send_time;
+ Timestamp first_arrival;
+ Timestamp complete_time;
+ Timestamp last_system_time;
+ };
+
+ // Returns true if the last packet was the end of the current batch and the
+ // packet with `send_time` is the first of a new batch.
+ bool NewTimestampGroup(Timestamp arrival_time, Timestamp send_time) const;
+
+ bool BelongsToBurst(Timestamp arrival_time, Timestamp send_time) const;
+
+ void Reset();
+
+ const TimeDelta send_time_group_length_;
+ SendTimeGroup current_timestamp_group_;
+ SendTimeGroup prev_timestamp_group_;
+ int num_consecutive_reordered_packets_;
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_INTER_ARRIVAL_DELTA_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.cc
new file mode 100644
index 0000000000..9fd537a422
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/goog_cc/link_capacity_estimator.h"
+
+#include <algorithm>
+
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+LinkCapacityEstimator::LinkCapacityEstimator() {}
+
+DataRate LinkCapacityEstimator::UpperBound() const {
+ if (estimate_kbps_.has_value())
+ return DataRate::KilobitsPerSec(estimate_kbps_.value() +
+ 3 * deviation_estimate_kbps());
+ return DataRate::Infinity();
+}
+
+DataRate LinkCapacityEstimator::LowerBound() const {
+ if (estimate_kbps_.has_value())
+ return DataRate::KilobitsPerSec(
+ std::max(0.0, estimate_kbps_.value() - 3 * deviation_estimate_kbps()));
+ return DataRate::Zero();
+}
+
+void LinkCapacityEstimator::Reset() {
+ estimate_kbps_.reset();
+}
+
+void LinkCapacityEstimator::OnOveruseDetected(DataRate acknowledged_rate) {
+ Update(acknowledged_rate, 0.05);
+}
+
+void LinkCapacityEstimator::OnProbeRate(DataRate probe_rate) {
+ Update(probe_rate, 0.5);
+}
+
+void LinkCapacityEstimator::Update(DataRate capacity_sample, double alpha) {
+ double sample_kbps = capacity_sample.kbps();
+ if (!estimate_kbps_.has_value()) {
+ estimate_kbps_ = sample_kbps;
+ } else {
+ estimate_kbps_ = (1 - alpha) * estimate_kbps_.value() + alpha * sample_kbps;
+ }
+ // Estimate the variance of the link capacity estimate and normalize the
+ // variance with the link capacity estimate.
+ const double norm = std::max(estimate_kbps_.value(), 1.0);
+ double error_kbps = estimate_kbps_.value() - sample_kbps;
+ deviation_kbps_ =
+ (1 - alpha) * deviation_kbps_ + alpha * error_kbps * error_kbps / norm;
+ // 0.4 ~= 14 kbit/s at 500 kbit/s
+ // 2.5f ~= 35 kbit/s at 500 kbit/s
+ deviation_kbps_ = rtc::SafeClamp(deviation_kbps_, 0.4f, 2.5f);
+}
+
+bool LinkCapacityEstimator::has_estimate() const {
+ return estimate_kbps_.has_value();
+}
+
+DataRate LinkCapacityEstimator::estimate() const {
+ return DataRate::KilobitsPerSec(*estimate_kbps_);
+}
+
+double LinkCapacityEstimator::deviation_estimate_kbps() const {
+ // Calculate the max bit rate std dev given the normalized
+ // variance and the current throughput bitrate. The standard deviation will
+ // only be used if estimate_kbps_ has a value.
+ return sqrt(deviation_kbps_ * estimate_kbps_.value());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.h
new file mode 100644
index 0000000000..aa23491d9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_LINK_CAPACITY_ESTIMATOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_LINK_CAPACITY_ESTIMATOR_H_
+
+#include "absl/types/optional.h"
+#include "api/units/data_rate.h"
+
+namespace webrtc {
+class LinkCapacityEstimator {
+ public:
+ LinkCapacityEstimator();
+ DataRate UpperBound() const;
+ DataRate LowerBound() const;
+ void Reset();
+ void OnOveruseDetected(DataRate acknowledged_rate);
+ void OnProbeRate(DataRate probe_rate);
+ bool has_estimate() const;
+ DataRate estimate() const;
+
+ private:
+ friend class GoogCcStatePrinter;
+ void Update(DataRate capacity_sample, double alpha);
+
+ double deviation_estimate_kbps() const;
+ absl::optional<double> estimate_kbps_;
+ double deviation_kbps_ = 0.4;
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_LINK_CAPACITY_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator_gn/moz.build
new file mode 100644
index 0000000000..b153a0e354
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/link_capacity_estimator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("link_capacity_estimator_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc
new file mode 100644
index 0000000000..7524c84d92
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+namespace {
+const char kBweLossBasedControl[] = "WebRTC-Bwe-LossBasedControl";
+
+// Expecting RTCP feedback to be sent with roughly 1s intervals, a 5s gap
+// indicates a channel outage.
+constexpr TimeDelta kMaxRtcpFeedbackInterval = TimeDelta::Millis(5000);
+
+// Increase slower when RTT is high.
+double GetIncreaseFactor(const LossBasedControlConfig& config, TimeDelta rtt) {
+ // Clamp the RTT
+ if (rtt < config.increase_low_rtt) {
+ rtt = config.increase_low_rtt;
+ } else if (rtt > config.increase_high_rtt) {
+ rtt = config.increase_high_rtt;
+ }
+ auto rtt_range = config.increase_high_rtt.Get() - config.increase_low_rtt;
+ if (rtt_range <= TimeDelta::Zero()) {
+ RTC_DCHECK_NOTREACHED(); // Only on misconfiguration.
+ return config.min_increase_factor;
+ }
+ auto rtt_offset = rtt - config.increase_low_rtt;
+ auto relative_offset = std::max(0.0, std::min(rtt_offset / rtt_range, 1.0));
+ auto factor_range = config.max_increase_factor - config.min_increase_factor;
+ return config.min_increase_factor + (1 - relative_offset) * factor_range;
+}
+
+double LossFromBitrate(DataRate bitrate,
+ DataRate loss_bandwidth_balance,
+ double exponent) {
+ if (loss_bandwidth_balance >= bitrate)
+ return 1.0;
+ return pow(loss_bandwidth_balance / bitrate, exponent);
+}
+
+DataRate BitrateFromLoss(double loss,
+ DataRate loss_bandwidth_balance,
+ double exponent) {
+ if (exponent <= 0) {
+ RTC_DCHECK_NOTREACHED();
+ return DataRate::Infinity();
+ }
+ if (loss < 1e-5)
+ return DataRate::Infinity();
+ return loss_bandwidth_balance * pow(loss, -1.0 / exponent);
+}
+
+double ExponentialUpdate(TimeDelta window, TimeDelta interval) {
+ // Use the convention that exponential window length (which is really
+ // infinite) is the time it takes to dampen to 1/e.
+ if (window <= TimeDelta::Zero()) {
+ RTC_DCHECK_NOTREACHED();
+ return 1.0f;
+ }
+ return 1.0f - exp(interval / window * -1.0);
+}
+
+bool IsEnabled(const webrtc::FieldTrialsView& key_value_config,
+ absl::string_view name) {
+ return absl::StartsWith(key_value_config.Lookup(name), "Enabled");
+}
+
+} // namespace
+
+LossBasedControlConfig::LossBasedControlConfig(
+ const FieldTrialsView* key_value_config)
+ : enabled(IsEnabled(*key_value_config, kBweLossBasedControl)),
+ min_increase_factor("min_incr", 1.02),
+ max_increase_factor("max_incr", 1.08),
+ increase_low_rtt("incr_low_rtt", TimeDelta::Millis(200)),
+ increase_high_rtt("incr_high_rtt", TimeDelta::Millis(800)),
+ decrease_factor("decr", 0.99),
+ loss_window("loss_win", TimeDelta::Millis(800)),
+ loss_max_window("loss_max_win", TimeDelta::Millis(800)),
+ acknowledged_rate_max_window("ackrate_max_win", TimeDelta::Millis(800)),
+ increase_offset("incr_offset", DataRate::BitsPerSec(1000)),
+ loss_bandwidth_balance_increase("balance_incr",
+ DataRate::KilobitsPerSec(0.5)),
+ loss_bandwidth_balance_decrease("balance_decr",
+ DataRate::KilobitsPerSec(4)),
+ loss_bandwidth_balance_reset("balance_reset",
+ DataRate::KilobitsPerSec(0.1)),
+ loss_bandwidth_balance_exponent("exponent", 0.5),
+ allow_resets("resets", false),
+ decrease_interval("decr_intvl", TimeDelta::Millis(300)),
+ loss_report_timeout("timeout", TimeDelta::Millis(6000)) {
+ ParseFieldTrial(
+ {&min_increase_factor, &max_increase_factor, &increase_low_rtt,
+ &increase_high_rtt, &decrease_factor, &loss_window, &loss_max_window,
+ &acknowledged_rate_max_window, &increase_offset,
+ &loss_bandwidth_balance_increase, &loss_bandwidth_balance_decrease,
+ &loss_bandwidth_balance_reset, &loss_bandwidth_balance_exponent,
+ &allow_resets, &decrease_interval, &loss_report_timeout},
+ key_value_config->Lookup(kBweLossBasedControl));
+}
+LossBasedControlConfig::LossBasedControlConfig(const LossBasedControlConfig&) =
+ default;
+LossBasedControlConfig::~LossBasedControlConfig() = default;
+
+LossBasedBandwidthEstimation::LossBasedBandwidthEstimation(
+ const FieldTrialsView* key_value_config)
+ : config_(key_value_config),
+ average_loss_(0),
+ average_loss_max_(0),
+ loss_based_bitrate_(DataRate::Zero()),
+ acknowledged_bitrate_max_(DataRate::Zero()),
+ acknowledged_bitrate_last_update_(Timestamp::MinusInfinity()),
+ time_last_decrease_(Timestamp::MinusInfinity()),
+ has_decreased_since_last_loss_report_(false),
+ last_loss_packet_report_(Timestamp::MinusInfinity()),
+ last_loss_ratio_(0) {}
+
+void LossBasedBandwidthEstimation::UpdateLossStatistics(
+ const std::vector<PacketResult>& packet_results,
+ Timestamp at_time) {
+ if (packet_results.empty()) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+ int loss_count = 0;
+ for (const auto& pkt : packet_results) {
+ loss_count += !pkt.IsReceived() ? 1 : 0;
+ }
+ last_loss_ratio_ = static_cast<double>(loss_count) / packet_results.size();
+ const TimeDelta time_passed = last_loss_packet_report_.IsFinite()
+ ? at_time - last_loss_packet_report_
+ : TimeDelta::Seconds(1);
+ last_loss_packet_report_ = at_time;
+ has_decreased_since_last_loss_report_ = false;
+
+ average_loss_ += ExponentialUpdate(config_.loss_window, time_passed) *
+ (last_loss_ratio_ - average_loss_);
+ if (average_loss_ > average_loss_max_) {
+ average_loss_max_ = average_loss_;
+ } else {
+ average_loss_max_ +=
+ ExponentialUpdate(config_.loss_max_window, time_passed) *
+ (average_loss_ - average_loss_max_);
+ }
+}
+
+void LossBasedBandwidthEstimation::UpdateAcknowledgedBitrate(
+ DataRate acknowledged_bitrate,
+ Timestamp at_time) {
+ const TimeDelta time_passed =
+ acknowledged_bitrate_last_update_.IsFinite()
+ ? at_time - acknowledged_bitrate_last_update_
+ : TimeDelta::Seconds(1);
+ acknowledged_bitrate_last_update_ = at_time;
+ if (acknowledged_bitrate > acknowledged_bitrate_max_) {
+ acknowledged_bitrate_max_ = acknowledged_bitrate;
+ } else {
+ acknowledged_bitrate_max_ -=
+ ExponentialUpdate(config_.acknowledged_rate_max_window, time_passed) *
+ (acknowledged_bitrate_max_ - acknowledged_bitrate);
+ }
+}
+
+DataRate LossBasedBandwidthEstimation::Update(Timestamp at_time,
+ DataRate min_bitrate,
+ DataRate wanted_bitrate,
+ TimeDelta last_round_trip_time) {
+ if (loss_based_bitrate_.IsZero()) {
+ loss_based_bitrate_ = wanted_bitrate;
+ }
+
+ // Only increase if loss has been low for some time.
+ const double loss_estimate_for_increase = average_loss_max_;
+ // Avoid multiple decreases from averaging over one loss spike.
+ const double loss_estimate_for_decrease =
+ std::min(average_loss_, last_loss_ratio_);
+ const bool allow_decrease =
+ !has_decreased_since_last_loss_report_ &&
+ (at_time - time_last_decrease_ >=
+ last_round_trip_time + config_.decrease_interval);
+ // If packet lost reports are too old, dont increase bitrate.
+ const bool loss_report_valid =
+ at_time - last_loss_packet_report_ < 1.2 * kMaxRtcpFeedbackInterval;
+
+ if (loss_report_valid && config_.allow_resets &&
+ loss_estimate_for_increase < loss_reset_threshold()) {
+ loss_based_bitrate_ = wanted_bitrate;
+ } else if (loss_report_valid &&
+ loss_estimate_for_increase < loss_increase_threshold()) {
+ // Increase bitrate by RTT-adaptive ratio.
+ DataRate new_increased_bitrate =
+ min_bitrate * GetIncreaseFactor(config_, last_round_trip_time) +
+ config_.increase_offset;
+ // The bitrate that would make the loss "just high enough".
+ const DataRate new_increased_bitrate_cap = BitrateFromLoss(
+ loss_estimate_for_increase, config_.loss_bandwidth_balance_increase,
+ config_.loss_bandwidth_balance_exponent);
+ new_increased_bitrate =
+ std::min(new_increased_bitrate, new_increased_bitrate_cap);
+ loss_based_bitrate_ = std::max(new_increased_bitrate, loss_based_bitrate_);
+ } else if (loss_estimate_for_decrease > loss_decrease_threshold() &&
+ allow_decrease) {
+ // The bitrate that would make the loss "just acceptable".
+ const DataRate new_decreased_bitrate_floor = BitrateFromLoss(
+ loss_estimate_for_decrease, config_.loss_bandwidth_balance_decrease,
+ config_.loss_bandwidth_balance_exponent);
+ DataRate new_decreased_bitrate =
+ std::max(decreased_bitrate(), new_decreased_bitrate_floor);
+ if (new_decreased_bitrate < loss_based_bitrate_) {
+ time_last_decrease_ = at_time;
+ has_decreased_since_last_loss_report_ = true;
+ loss_based_bitrate_ = new_decreased_bitrate;
+ }
+ }
+ return loss_based_bitrate_;
+}
+
+void LossBasedBandwidthEstimation::Initialize(DataRate bitrate) {
+ loss_based_bitrate_ = bitrate;
+ average_loss_ = 0;
+ average_loss_max_ = 0;
+}
+
+double LossBasedBandwidthEstimation::loss_reset_threshold() const {
+ return LossFromBitrate(loss_based_bitrate_,
+ config_.loss_bandwidth_balance_reset,
+ config_.loss_bandwidth_balance_exponent);
+}
+
+double LossBasedBandwidthEstimation::loss_increase_threshold() const {
+ return LossFromBitrate(loss_based_bitrate_,
+ config_.loss_bandwidth_balance_increase,
+ config_.loss_bandwidth_balance_exponent);
+}
+
+double LossBasedBandwidthEstimation::loss_decrease_threshold() const {
+ return LossFromBitrate(loss_based_bitrate_,
+ config_.loss_bandwidth_balance_decrease,
+ config_.loss_bandwidth_balance_exponent);
+}
+
+DataRate LossBasedBandwidthEstimation::decreased_bitrate() const {
+ return config_.decrease_factor * acknowledged_bitrate_max_;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h
new file mode 100644
index 0000000000..9f69caba89
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BANDWIDTH_ESTIMATION_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BANDWIDTH_ESTIMATION_H_
+
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+
+struct LossBasedControlConfig {
+ explicit LossBasedControlConfig(const FieldTrialsView* key_value_config);
+ LossBasedControlConfig(const LossBasedControlConfig&);
+ LossBasedControlConfig& operator=(const LossBasedControlConfig&) = default;
+ ~LossBasedControlConfig();
+ bool enabled;
+ FieldTrialParameter<double> min_increase_factor;
+ FieldTrialParameter<double> max_increase_factor;
+ FieldTrialParameter<TimeDelta> increase_low_rtt;
+ FieldTrialParameter<TimeDelta> increase_high_rtt;
+ FieldTrialParameter<double> decrease_factor;
+ FieldTrialParameter<TimeDelta> loss_window;
+ FieldTrialParameter<TimeDelta> loss_max_window;
+ FieldTrialParameter<TimeDelta> acknowledged_rate_max_window;
+ FieldTrialParameter<DataRate> increase_offset;
+ FieldTrialParameter<DataRate> loss_bandwidth_balance_increase;
+ FieldTrialParameter<DataRate> loss_bandwidth_balance_decrease;
+ FieldTrialParameter<DataRate> loss_bandwidth_balance_reset;
+ FieldTrialParameter<double> loss_bandwidth_balance_exponent;
+ FieldTrialParameter<bool> allow_resets;
+ FieldTrialParameter<TimeDelta> decrease_interval;
+ FieldTrialParameter<TimeDelta> loss_report_timeout;
+};
+
+// Estimates an upper BWE limit based on loss.
+// It requires knowledge about lost packets and acknowledged bitrate.
+// Ie, this class require transport feedback.
+class LossBasedBandwidthEstimation {
+ public:
+ explicit LossBasedBandwidthEstimation(
+ const FieldTrialsView* key_value_config);
+ // Returns the new estimate.
+ DataRate Update(Timestamp at_time,
+ DataRate min_bitrate,
+ DataRate wanted_bitrate,
+ TimeDelta last_round_trip_time);
+ void UpdateAcknowledgedBitrate(DataRate acknowledged_bitrate,
+ Timestamp at_time);
+ void Initialize(DataRate bitrate);
+ bool Enabled() const { return config_.enabled; }
+ // Returns true if LossBasedBandwidthEstimation is enabled and have
+ // received loss statistics. Ie, this class require transport feedback.
+ bool InUse() const {
+ return Enabled() && last_loss_packet_report_.IsFinite();
+ }
+ void UpdateLossStatistics(const std::vector<PacketResult>& packet_results,
+ Timestamp at_time);
+ DataRate GetEstimate() const { return loss_based_bitrate_; }
+
+ private:
+ friend class GoogCcStatePrinter;
+ void Reset(DataRate bitrate);
+ double loss_increase_threshold() const;
+ double loss_decrease_threshold() const;
+ double loss_reset_threshold() const;
+
+ DataRate decreased_bitrate() const;
+
+ const LossBasedControlConfig config_;
+ double average_loss_;
+ double average_loss_max_;
+ DataRate loss_based_bitrate_;
+ DataRate acknowledged_bitrate_max_;
+ Timestamp acknowledged_bitrate_last_update_;
+ Timestamp time_last_decrease_;
+ bool has_decreased_since_last_loss_report_;
+ Timestamp last_loss_packet_report_;
+ double last_loss_ratio_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BANDWIDTH_ESTIMATION_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v1_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v1_gn/moz.build
new file mode 100644
index 0000000000..e3394c9420
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v1_gn/moz.build
@@ -0,0 +1,206 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("loss_based_bwe_v1_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc
new file mode 100644
index 0000000000..88ee8e4bf1
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc
@@ -0,0 +1,962 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h"
+
+#include <algorithm>
+#include <cmath>
+#include <complex>
+#include <cstddef>
+#include <cstdlib>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "rtc_base/experiments/field_trial_list.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+bool IsValid(DataRate datarate) {
+ return datarate.IsFinite();
+}
+
+bool IsValid(Timestamp timestamp) {
+ return timestamp.IsFinite();
+}
+
+struct PacketResultsSummary {
+ int num_packets = 0;
+ int num_lost_packets = 0;
+ DataSize total_size = DataSize::Zero();
+ Timestamp first_send_time = Timestamp::PlusInfinity();
+ Timestamp last_send_time = Timestamp::MinusInfinity();
+};
+
+// Returns a `PacketResultsSummary` where `first_send_time` is `PlusInfinity,
+// and `last_send_time` is `MinusInfinity`, if `packet_results` is empty.
+PacketResultsSummary GetPacketResultsSummary(
+ rtc::ArrayView<const PacketResult> packet_results) {
+ PacketResultsSummary packet_results_summary;
+
+ packet_results_summary.num_packets = packet_results.size();
+ for (const PacketResult& packet : packet_results) {
+ if (!packet.IsReceived()) {
+ packet_results_summary.num_lost_packets++;
+ }
+ packet_results_summary.total_size += packet.sent_packet.size;
+ packet_results_summary.first_send_time = std::min(
+ packet_results_summary.first_send_time, packet.sent_packet.send_time);
+ packet_results_summary.last_send_time = std::max(
+ packet_results_summary.last_send_time, packet.sent_packet.send_time);
+ }
+
+ return packet_results_summary;
+}
+
+double GetLossProbability(double inherent_loss,
+ DataRate loss_limited_bandwidth,
+ DataRate sending_rate) {
+ if (inherent_loss < 0.0 || inherent_loss > 1.0) {
+ RTC_LOG(LS_WARNING) << "The inherent loss must be in [0,1]: "
+ << inherent_loss;
+ inherent_loss = std::min(std::max(inherent_loss, 0.0), 1.0);
+ }
+ if (!sending_rate.IsFinite()) {
+ RTC_LOG(LS_WARNING) << "The sending rate must be finite: "
+ << ToString(sending_rate);
+ }
+ if (!loss_limited_bandwidth.IsFinite()) {
+ RTC_LOG(LS_WARNING) << "The loss limited bandwidth must be finite: "
+ << ToString(loss_limited_bandwidth);
+ }
+
+ double loss_probability = inherent_loss;
+ if (IsValid(sending_rate) && IsValid(loss_limited_bandwidth) &&
+ (sending_rate > loss_limited_bandwidth)) {
+ loss_probability += (1 - inherent_loss) *
+ (sending_rate - loss_limited_bandwidth) / sending_rate;
+ }
+ return std::min(std::max(loss_probability, 1.0e-6), 1.0 - 1.0e-6);
+}
+
+} // namespace
+
+LossBasedBweV2::LossBasedBweV2(const FieldTrialsView* key_value_config)
+ : config_(CreateConfig(key_value_config)) {
+ if (!config_.has_value()) {
+ RTC_LOG(LS_VERBOSE) << "The configuration does not specify that the "
+ "estimator should be enabled, disabling it.";
+ return;
+ }
+ if (!IsConfigValid()) {
+ RTC_LOG(LS_WARNING)
+ << "The configuration is not valid, disabling the estimator.";
+ config_.reset();
+ return;
+ }
+
+ current_estimate_.inherent_loss = config_->initial_inherent_loss_estimate;
+ observations_.resize(config_->observation_window_size);
+ temporal_weights_.resize(config_->observation_window_size);
+ instant_upper_bound_temporal_weights_.resize(
+ config_->observation_window_size);
+ CalculateTemporalWeights();
+}
+
+bool LossBasedBweV2::IsEnabled() const {
+ return config_.has_value();
+}
+
+bool LossBasedBweV2::IsReady() const {
+ return IsEnabled() && IsValid(current_estimate_.loss_limited_bandwidth) &&
+ num_observations_ > 0;
+}
+
+DataRate LossBasedBweV2::GetBandwidthEstimate(
+ DataRate delay_based_limit) const {
+ if (!IsReady()) {
+ if (!IsEnabled()) {
+ RTC_LOG(LS_WARNING)
+ << "The estimator must be enabled before it can be used.";
+ } else {
+ if (!IsValid(current_estimate_.loss_limited_bandwidth)) {
+ RTC_LOG(LS_WARNING)
+ << "The estimator must be initialized before it can be used.";
+ }
+ if (num_observations_ <= 0) {
+ RTC_LOG(LS_WARNING) << "The estimator must receive enough loss "
+ "statistics before it can be used.";
+ }
+ }
+ return IsValid(delay_based_limit) ? delay_based_limit
+ : DataRate::PlusInfinity();
+ }
+
+ if (delay_based_limit.IsFinite()) {
+ return std::min({current_estimate_.loss_limited_bandwidth,
+ GetInstantUpperBound(), delay_based_limit});
+ } else {
+ return std::min(current_estimate_.loss_limited_bandwidth,
+ GetInstantUpperBound());
+ }
+}
+
+void LossBasedBweV2::SetAcknowledgedBitrate(DataRate acknowledged_bitrate) {
+ if (IsValid(acknowledged_bitrate)) {
+ acknowledged_bitrate_ = acknowledged_bitrate;
+ } else {
+ RTC_LOG(LS_WARNING) << "The acknowledged bitrate must be finite: "
+ << ToString(acknowledged_bitrate);
+ }
+}
+
+void LossBasedBweV2::SetBandwidthEstimate(DataRate bandwidth_estimate) {
+ if (IsValid(bandwidth_estimate)) {
+ current_estimate_.loss_limited_bandwidth = bandwidth_estimate;
+ } else {
+ RTC_LOG(LS_WARNING) << "The bandwidth estimate must be finite: "
+ << ToString(bandwidth_estimate);
+ }
+}
+
+void LossBasedBweV2::UpdateBandwidthEstimate(
+ rtc::ArrayView<const PacketResult> packet_results,
+ DataRate delay_based_estimate,
+ BandwidthUsage delay_detector_state) {
+ if (!IsEnabled()) {
+ RTC_LOG(LS_WARNING)
+ << "The estimator must be enabled before it can be used.";
+ return;
+ }
+ if (packet_results.empty()) {
+ RTC_LOG(LS_VERBOSE)
+ << "The estimate cannot be updated without any loss statistics.";
+ return;
+ }
+
+ if (!PushBackObservation(packet_results, delay_detector_state)) {
+ return;
+ }
+
+ if (!IsValid(current_estimate_.loss_limited_bandwidth)) {
+ RTC_LOG(LS_VERBOSE)
+ << "The estimator must be initialized before it can be used.";
+ return;
+ }
+
+ ChannelParameters best_candidate = current_estimate_;
+ double objective_max = std::numeric_limits<double>::lowest();
+ for (ChannelParameters candidate : GetCandidates(delay_based_estimate)) {
+ NewtonsMethodUpdate(candidate);
+
+ const double candidate_objective = GetObjective(candidate);
+ if (candidate_objective > objective_max) {
+ objective_max = candidate_objective;
+ best_candidate = candidate;
+ }
+ }
+ if (best_candidate.loss_limited_bandwidth <
+ current_estimate_.loss_limited_bandwidth) {
+ last_time_estimate_reduced_ = last_send_time_most_recent_observation_;
+ }
+
+ // Do not increase the estimate if the average loss is greater than current
+ // inherent loss.
+ if (GetAverageReportedLossRatio() > best_candidate.inherent_loss &&
+ config_->not_increase_if_inherent_loss_less_than_average_loss &&
+ current_estimate_.loss_limited_bandwidth <
+ best_candidate.loss_limited_bandwidth) {
+ best_candidate.loss_limited_bandwidth =
+ current_estimate_.loss_limited_bandwidth;
+ }
+
+ // Bound the estimate increase if:
+ // 1. The estimate is limited due to loss, and
+ // 2. The estimate has been increased for less than `delayed_increase_window`
+ // ago, and
+ // 3. The best candidate is greater than bandwidth_limit_in_current_window.
+ if (limited_due_to_loss_candidate_ &&
+ recovering_after_loss_timestamp_.IsFinite() &&
+ recovering_after_loss_timestamp_ + config_->delayed_increase_window >
+ last_send_time_most_recent_observation_ &&
+ best_candidate.loss_limited_bandwidth >
+ bandwidth_limit_in_current_window_) {
+ best_candidate.loss_limited_bandwidth = bandwidth_limit_in_current_window_;
+ }
+ limited_due_to_loss_candidate_ =
+ delay_based_estimate.IsFinite() &&
+ best_candidate.loss_limited_bandwidth < delay_based_estimate;
+
+ if (limited_due_to_loss_candidate_ &&
+ (recovering_after_loss_timestamp_.IsInfinite() ||
+ recovering_after_loss_timestamp_ + config_->delayed_increase_window <
+ last_send_time_most_recent_observation_)) {
+ bandwidth_limit_in_current_window_ = std::max(
+ kCongestionControllerMinBitrate,
+ best_candidate.loss_limited_bandwidth * config_->max_increase_factor);
+ recovering_after_loss_timestamp_ = last_send_time_most_recent_observation_;
+ }
+
+ current_estimate_ = best_candidate;
+}
+
+// Returns a `LossBasedBweV2::Config` iff the `key_value_config` specifies a
+// configuration for the `LossBasedBweV2` which is explicitly enabled.
+absl::optional<LossBasedBweV2::Config> LossBasedBweV2::CreateConfig(
+ const FieldTrialsView* key_value_config) {
+ FieldTrialParameter<bool> enabled("Enabled", false);
+ FieldTrialParameter<double> bandwidth_rampup_upper_bound_factor(
+ "BwRampupUpperBoundFactor", 1.1);
+ FieldTrialParameter<double> rampup_acceleration_max_factor(
+ "BwRampupAccelMaxFactor", 0.0);
+ FieldTrialParameter<TimeDelta> rampup_acceleration_maxout_time(
+ "BwRampupAccelMaxoutTime", TimeDelta::Seconds(60));
+ FieldTrialList<double> candidate_factors("CandidateFactors",
+ {1.05, 1.0, 0.95});
+ FieldTrialParameter<double> higher_bandwidth_bias_factor("HigherBwBiasFactor",
+ 0.00001);
+ FieldTrialParameter<double> higher_log_bandwidth_bias_factor(
+ "HigherLogBwBiasFactor", 0.001);
+ FieldTrialParameter<double> inherent_loss_lower_bound(
+ "InherentLossLowerBound", 1.0e-3);
+ FieldTrialParameter<double> loss_threshold_of_high_bandwidth_preference(
+ "LossThresholdOfHighBandwidthPreference", 0.99);
+ FieldTrialParameter<double> bandwidth_preference_smoothing_factor(
+ "BandwidthPreferenceSmoothingFactor", 0.002);
+ FieldTrialParameter<DataRate> inherent_loss_upper_bound_bandwidth_balance(
+ "InherentLossUpperBoundBwBalance", DataRate::KilobitsPerSec(15.0));
+ FieldTrialParameter<double> inherent_loss_upper_bound_offset(
+ "InherentLossUpperBoundOffset", 0.05);
+ FieldTrialParameter<double> initial_inherent_loss_estimate(
+ "InitialInherentLossEstimate", 0.01);
+ FieldTrialParameter<int> newton_iterations("NewtonIterations", 1);
+ FieldTrialParameter<double> newton_step_size("NewtonStepSize", 0.5);
+ FieldTrialParameter<bool> append_acknowledged_rate_candidate(
+ "AckedRateCandidate", true);
+ FieldTrialParameter<bool> append_delay_based_estimate_candidate(
+ "DelayBasedCandidate", false);
+ FieldTrialParameter<TimeDelta> observation_duration_lower_bound(
+ "ObservationDurationLowerBound", TimeDelta::Seconds(1));
+ FieldTrialParameter<int> observation_window_size("ObservationWindowSize", 20);
+ FieldTrialParameter<double> sending_rate_smoothing_factor(
+ "SendingRateSmoothingFactor", 0.0);
+ FieldTrialParameter<double> instant_upper_bound_temporal_weight_factor(
+ "InstantUpperBoundTemporalWeightFactor", 0.99);
+ FieldTrialParameter<DataRate> instant_upper_bound_bandwidth_balance(
+ "InstantUpperBoundBwBalance", DataRate::KilobitsPerSec(15.0));
+ FieldTrialParameter<double> instant_upper_bound_loss_offset(
+ "InstantUpperBoundLossOffset", 0.05);
+ FieldTrialParameter<double> temporal_weight_factor("TemporalWeightFactor",
+ 0.99);
+ FieldTrialParameter<double> bandwidth_backoff_lower_bound_factor(
+ "BwBackoffLowerBoundFactor", 1.0);
+ FieldTrialParameter<bool> trendline_integration_enabled(
+ "TrendlineIntegrationEnabled", false);
+ FieldTrialParameter<int> trendline_observations_window_size(
+ "TrendlineObservationsWindowSize", 20);
+ FieldTrialParameter<double> max_increase_factor("MaxIncreaseFactor", 1000.0);
+ FieldTrialParameter<TimeDelta> delayed_increase_window(
+ "DelayedIncreaseWindow", TimeDelta::Millis(300));
+ FieldTrialParameter<bool> use_acked_bitrate_only_when_overusing(
+ "UseAckedBitrateOnlyWhenOverusing", false);
+ FieldTrialParameter<bool>
+ not_increase_if_inherent_loss_less_than_average_loss(
+ "NotIncreaseIfInherentLossLessThanAverageLoss", false);
+
+ if (key_value_config) {
+ ParseFieldTrial({&enabled,
+ &bandwidth_rampup_upper_bound_factor,
+ &rampup_acceleration_max_factor,
+ &rampup_acceleration_maxout_time,
+ &candidate_factors,
+ &higher_bandwidth_bias_factor,
+ &higher_log_bandwidth_bias_factor,
+ &inherent_loss_lower_bound,
+ &loss_threshold_of_high_bandwidth_preference,
+ &bandwidth_preference_smoothing_factor,
+ &inherent_loss_upper_bound_bandwidth_balance,
+ &inherent_loss_upper_bound_offset,
+ &initial_inherent_loss_estimate,
+ &newton_iterations,
+ &newton_step_size,
+ &append_acknowledged_rate_candidate,
+ &append_delay_based_estimate_candidate,
+ &observation_duration_lower_bound,
+ &observation_window_size,
+ &sending_rate_smoothing_factor,
+ &instant_upper_bound_temporal_weight_factor,
+ &instant_upper_bound_bandwidth_balance,
+ &instant_upper_bound_loss_offset,
+ &temporal_weight_factor,
+ &bandwidth_backoff_lower_bound_factor,
+ &trendline_integration_enabled,
+ &trendline_observations_window_size,
+ &max_increase_factor,
+ &delayed_increase_window,
+ &use_acked_bitrate_only_when_overusing,
+ &not_increase_if_inherent_loss_less_than_average_loss},
+ key_value_config->Lookup("WebRTC-Bwe-LossBasedBweV2"));
+ }
+
+ absl::optional<Config> config;
+ if (!enabled.Get()) {
+ return config;
+ }
+ config.emplace(Config());
+ config->bandwidth_rampup_upper_bound_factor =
+ bandwidth_rampup_upper_bound_factor.Get();
+ config->rampup_acceleration_max_factor = rampup_acceleration_max_factor.Get();
+ config->rampup_acceleration_maxout_time =
+ rampup_acceleration_maxout_time.Get();
+ config->candidate_factors = candidate_factors.Get();
+ config->higher_bandwidth_bias_factor = higher_bandwidth_bias_factor.Get();
+ config->higher_log_bandwidth_bias_factor =
+ higher_log_bandwidth_bias_factor.Get();
+ config->inherent_loss_lower_bound = inherent_loss_lower_bound.Get();
+ config->loss_threshold_of_high_bandwidth_preference =
+ loss_threshold_of_high_bandwidth_preference.Get();
+ config->bandwidth_preference_smoothing_factor =
+ bandwidth_preference_smoothing_factor.Get();
+ config->inherent_loss_upper_bound_bandwidth_balance =
+ inherent_loss_upper_bound_bandwidth_balance.Get();
+ config->inherent_loss_upper_bound_offset =
+ inherent_loss_upper_bound_offset.Get();
+ config->initial_inherent_loss_estimate = initial_inherent_loss_estimate.Get();
+ config->newton_iterations = newton_iterations.Get();
+ config->newton_step_size = newton_step_size.Get();
+ config->append_acknowledged_rate_candidate =
+ append_acknowledged_rate_candidate.Get();
+ config->append_delay_based_estimate_candidate =
+ append_delay_based_estimate_candidate.Get();
+ config->observation_duration_lower_bound =
+ observation_duration_lower_bound.Get();
+ config->observation_window_size = observation_window_size.Get();
+ config->sending_rate_smoothing_factor = sending_rate_smoothing_factor.Get();
+ config->instant_upper_bound_temporal_weight_factor =
+ instant_upper_bound_temporal_weight_factor.Get();
+ config->instant_upper_bound_bandwidth_balance =
+ instant_upper_bound_bandwidth_balance.Get();
+ config->instant_upper_bound_loss_offset =
+ instant_upper_bound_loss_offset.Get();
+ config->temporal_weight_factor = temporal_weight_factor.Get();
+ config->bandwidth_backoff_lower_bound_factor =
+ bandwidth_backoff_lower_bound_factor.Get();
+ config->trendline_integration_enabled = trendline_integration_enabled.Get();
+ config->trendline_observations_window_size =
+ trendline_observations_window_size.Get();
+ config->max_increase_factor = max_increase_factor.Get();
+ config->delayed_increase_window = delayed_increase_window.Get();
+ config->use_acked_bitrate_only_when_overusing =
+ use_acked_bitrate_only_when_overusing.Get();
+ config->not_increase_if_inherent_loss_less_than_average_loss =
+ not_increase_if_inherent_loss_less_than_average_loss.Get();
+ return config;
+}
+
+bool LossBasedBweV2::IsConfigValid() const {
+ if (!config_.has_value()) {
+ return false;
+ }
+
+ bool valid = true;
+
+ if (config_->bandwidth_rampup_upper_bound_factor <= 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The bandwidth rampup upper bound factor must be greater than 1: "
+ << config_->bandwidth_rampup_upper_bound_factor;
+ valid = false;
+ }
+ if (config_->rampup_acceleration_max_factor < 0.0) {
+ RTC_LOG(LS_WARNING)
+ << "The rampup acceleration max factor must be non-negative.: "
+ << config_->rampup_acceleration_max_factor;
+ valid = false;
+ }
+ if (config_->rampup_acceleration_maxout_time <= TimeDelta::Zero()) {
+ RTC_LOG(LS_WARNING)
+ << "The rampup acceleration maxout time must be above zero: "
+ << config_->rampup_acceleration_maxout_time.seconds();
+ valid = false;
+ }
+ for (double candidate_factor : config_->candidate_factors) {
+ if (candidate_factor <= 0.0) {
+ RTC_LOG(LS_WARNING) << "All candidate factors must be greater than zero: "
+ << candidate_factor;
+ valid = false;
+ }
+ }
+
+ // Ensure that the configuration allows generation of at least one candidate
+ // other than the current estimate.
+ if (!config_->append_acknowledged_rate_candidate &&
+ !config_->append_delay_based_estimate_candidate &&
+ !absl::c_any_of(config_->candidate_factors,
+ [](double cf) { return cf != 1.0; })) {
+ RTC_LOG(LS_WARNING)
+ << "The configuration does not allow generating candidates. Specify "
+ "a candidate factor other than 1.0, allow the acknowledged rate "
+ "to be a candidate, and/or allow the delay based estimate to be a "
+ "candidate.";
+ valid = false;
+ }
+
+ if (config_->higher_bandwidth_bias_factor < 0.0) {
+ RTC_LOG(LS_WARNING)
+ << "The higher bandwidth bias factor must be non-negative: "
+ << config_->higher_bandwidth_bias_factor;
+ valid = false;
+ }
+ if (config_->inherent_loss_lower_bound < 0.0 ||
+ config_->inherent_loss_lower_bound >= 1.0) {
+ RTC_LOG(LS_WARNING) << "The inherent loss lower bound must be in [0, 1): "
+ << config_->inherent_loss_lower_bound;
+ valid = false;
+ }
+ if (config_->loss_threshold_of_high_bandwidth_preference < 0.0 ||
+ config_->loss_threshold_of_high_bandwidth_preference >= 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The loss threshold of high bandwidth preference must be in [0, 1): "
+ << config_->loss_threshold_of_high_bandwidth_preference;
+ valid = false;
+ }
+ if (config_->bandwidth_preference_smoothing_factor <= 0.0 ||
+ config_->bandwidth_preference_smoothing_factor > 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The bandwidth preference smoothing factor must be in (0, 1]: "
+ << config_->bandwidth_preference_smoothing_factor;
+ valid = false;
+ }
+ if (config_->inherent_loss_upper_bound_bandwidth_balance <=
+ DataRate::Zero()) {
+ RTC_LOG(LS_WARNING)
+ << "The inherent loss upper bound bandwidth balance "
+ "must be positive: "
+ << ToString(config_->inherent_loss_upper_bound_bandwidth_balance);
+ valid = false;
+ }
+ if (config_->inherent_loss_upper_bound_offset <
+ config_->inherent_loss_lower_bound ||
+ config_->inherent_loss_upper_bound_offset >= 1.0) {
+ RTC_LOG(LS_WARNING) << "The inherent loss upper bound must be greater "
+ "than or equal to the inherent "
+ "loss lower bound, which is "
+ << config_->inherent_loss_lower_bound
+ << ", and less than 1: "
+ << config_->inherent_loss_upper_bound_offset;
+ valid = false;
+ }
+ if (config_->initial_inherent_loss_estimate < 0.0 ||
+ config_->initial_inherent_loss_estimate >= 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The initial inherent loss estimate must be in [0, 1): "
+ << config_->initial_inherent_loss_estimate;
+ valid = false;
+ }
+ if (config_->newton_iterations <= 0) {
+ RTC_LOG(LS_WARNING) << "The number of Newton iterations must be positive: "
+ << config_->newton_iterations;
+ valid = false;
+ }
+ if (config_->newton_step_size <= 0.0) {
+ RTC_LOG(LS_WARNING) << "The Newton step size must be positive: "
+ << config_->newton_step_size;
+ valid = false;
+ }
+ if (config_->observation_duration_lower_bound <= TimeDelta::Zero()) {
+ RTC_LOG(LS_WARNING)
+ << "The observation duration lower bound must be positive: "
+ << ToString(config_->observation_duration_lower_bound);
+ valid = false;
+ }
+ if (config_->observation_window_size < 2) {
+ RTC_LOG(LS_WARNING) << "The observation window size must be at least 2: "
+ << config_->observation_window_size;
+ valid = false;
+ }
+ if (config_->sending_rate_smoothing_factor < 0.0 ||
+ config_->sending_rate_smoothing_factor >= 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The sending rate smoothing factor must be in [0, 1): "
+ << config_->sending_rate_smoothing_factor;
+ valid = false;
+ }
+ if (config_->instant_upper_bound_temporal_weight_factor <= 0.0 ||
+ config_->instant_upper_bound_temporal_weight_factor > 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The instant upper bound temporal weight factor must be in (0, 1]"
+ << config_->instant_upper_bound_temporal_weight_factor;
+ valid = false;
+ }
+ if (config_->instant_upper_bound_bandwidth_balance <= DataRate::Zero()) {
+ RTC_LOG(LS_WARNING)
+ << "The instant upper bound bandwidth balance must be positive: "
+ << ToString(config_->instant_upper_bound_bandwidth_balance);
+ valid = false;
+ }
+ if (config_->instant_upper_bound_loss_offset < 0.0 ||
+ config_->instant_upper_bound_loss_offset >= 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The instant upper bound loss offset must be in [0, 1): "
+ << config_->instant_upper_bound_loss_offset;
+ valid = false;
+ }
+ if (config_->temporal_weight_factor <= 0.0 ||
+ config_->temporal_weight_factor > 1.0) {
+ RTC_LOG(LS_WARNING) << "The temporal weight factor must be in (0, 1]: "
+ << config_->temporal_weight_factor;
+ valid = false;
+ }
+ if (config_->bandwidth_backoff_lower_bound_factor > 1.0) {
+ RTC_LOG(LS_WARNING)
+ << "The bandwidth backoff lower bound factor must not be greater than "
+ "1: "
+ << config_->bandwidth_backoff_lower_bound_factor;
+ valid = false;
+ }
+ if (config_->trendline_observations_window_size < 2) {
+ RTC_LOG(LS_WARNING) << "The trendline window size must be at least 2: "
+ << config_->trendline_observations_window_size;
+ valid = false;
+ }
+ if (config_->max_increase_factor <= 0.0) {
+ RTC_LOG(LS_WARNING) << "The maximum increase factor must be positive: "
+ << config_->max_increase_factor;
+ valid = false;
+ }
+ if (config_->delayed_increase_window <= TimeDelta::Zero()) {
+ RTC_LOG(LS_WARNING) << "The delayed increase window must be positive: "
+ << config_->delayed_increase_window.ms();
+ valid = false;
+ }
+ return valid;
+}
+
+double LossBasedBweV2::GetAverageReportedLossRatio() const {
+ if (num_observations_ <= 0) {
+ return 0.0;
+ }
+
+ int num_packets = 0;
+ int num_lost_packets = 0;
+ for (const Observation& observation : observations_) {
+ if (!observation.IsInitialized()) {
+ continue;
+ }
+
+ double instant_temporal_weight =
+ instant_upper_bound_temporal_weights_[(num_observations_ - 1) -
+ observation.id];
+ num_packets += instant_temporal_weight * observation.num_packets;
+ num_lost_packets += instant_temporal_weight * observation.num_lost_packets;
+ }
+
+ return static_cast<double>(num_lost_packets) / num_packets;
+}
+
+DataRate LossBasedBweV2::GetCandidateBandwidthUpperBound(
+ DataRate delay_based_estimate) const {
+ DataRate candidate_bandwidth_upper_bound = DataRate::PlusInfinity();
+ if (limited_due_to_loss_candidate_) {
+ candidate_bandwidth_upper_bound = bandwidth_limit_in_current_window_;
+ }
+
+ if (config_->trendline_integration_enabled) {
+ candidate_bandwidth_upper_bound =
+ std::min(GetInstantUpperBound(), candidate_bandwidth_upper_bound);
+ if (IsValid(delay_based_estimate)) {
+ candidate_bandwidth_upper_bound =
+ std::min(delay_based_estimate, candidate_bandwidth_upper_bound);
+ }
+ }
+
+ if (!acknowledged_bitrate_.has_value())
+ return candidate_bandwidth_upper_bound;
+
+ candidate_bandwidth_upper_bound =
+ IsValid(candidate_bandwidth_upper_bound)
+ ? std::min(candidate_bandwidth_upper_bound,
+ config_->bandwidth_rampup_upper_bound_factor *
+ (*acknowledged_bitrate_))
+ : config_->bandwidth_rampup_upper_bound_factor *
+ (*acknowledged_bitrate_);
+
+ if (config_->rampup_acceleration_max_factor > 0.0) {
+ const TimeDelta time_since_bandwidth_reduced = std::min(
+ config_->rampup_acceleration_maxout_time,
+ std::max(TimeDelta::Zero(), last_send_time_most_recent_observation_ -
+ last_time_estimate_reduced_));
+ const double rampup_acceleration = config_->rampup_acceleration_max_factor *
+ time_since_bandwidth_reduced /
+ config_->rampup_acceleration_maxout_time;
+
+ candidate_bandwidth_upper_bound +=
+ rampup_acceleration * (*acknowledged_bitrate_);
+ }
+ return candidate_bandwidth_upper_bound;
+}
+
+std::vector<LossBasedBweV2::ChannelParameters> LossBasedBweV2::GetCandidates(
+ DataRate delay_based_estimate) const {
+ std::vector<DataRate> bandwidths;
+ bool can_increase_bitrate = TrendlineEsimateAllowBitrateIncrease();
+ for (double candidate_factor : config_->candidate_factors) {
+ if (!can_increase_bitrate && candidate_factor > 1.0) {
+ continue;
+ }
+ bandwidths.push_back(candidate_factor *
+ current_estimate_.loss_limited_bandwidth);
+ }
+
+ if (acknowledged_bitrate_.has_value() &&
+ config_->append_acknowledged_rate_candidate &&
+ TrendlineEsimateAllowEmergencyBackoff()) {
+ bandwidths.push_back(*acknowledged_bitrate_ *
+ config_->bandwidth_backoff_lower_bound_factor);
+ }
+
+ if (IsValid(delay_based_estimate) &&
+ config_->append_delay_based_estimate_candidate) {
+ if (can_increase_bitrate &&
+ delay_based_estimate > current_estimate_.loss_limited_bandwidth) {
+ bandwidths.push_back(delay_based_estimate);
+ }
+ }
+
+ const DataRate candidate_bandwidth_upper_bound =
+ GetCandidateBandwidthUpperBound(delay_based_estimate);
+
+ std::vector<ChannelParameters> candidates;
+ candidates.resize(bandwidths.size());
+ for (size_t i = 0; i < bandwidths.size(); ++i) {
+ ChannelParameters candidate = current_estimate_;
+ if (config_->trendline_integration_enabled) {
+ candidate.loss_limited_bandwidth =
+ std::min(bandwidths[i], candidate_bandwidth_upper_bound);
+ } else {
+ candidate.loss_limited_bandwidth = std::min(
+ bandwidths[i], std::max(current_estimate_.loss_limited_bandwidth,
+ candidate_bandwidth_upper_bound));
+ }
+ candidate.inherent_loss = GetFeasibleInherentLoss(candidate);
+ candidates[i] = candidate;
+ }
+ return candidates;
+}
+
+LossBasedBweV2::Derivatives LossBasedBweV2::GetDerivatives(
+ const ChannelParameters& channel_parameters) const {
+ Derivatives derivatives;
+
+ for (const Observation& observation : observations_) {
+ if (!observation.IsInitialized()) {
+ continue;
+ }
+
+ double loss_probability = GetLossProbability(
+ channel_parameters.inherent_loss,
+ channel_parameters.loss_limited_bandwidth, observation.sending_rate);
+
+ double temporal_weight =
+ temporal_weights_[(num_observations_ - 1) - observation.id];
+
+ derivatives.first +=
+ temporal_weight *
+ ((observation.num_lost_packets / loss_probability) -
+ (observation.num_received_packets / (1.0 - loss_probability)));
+ derivatives.second -=
+ temporal_weight *
+ ((observation.num_lost_packets / std::pow(loss_probability, 2)) +
+ (observation.num_received_packets /
+ std::pow(1.0 - loss_probability, 2)));
+ }
+
+ if (derivatives.second >= 0.0) {
+ RTC_LOG(LS_ERROR) << "The second derivative is mathematically guaranteed "
+ "to be negative but is "
+ << derivatives.second << ".";
+ derivatives.second = -1.0e-6;
+ }
+
+ return derivatives;
+}
+
+double LossBasedBweV2::GetFeasibleInherentLoss(
+ const ChannelParameters& channel_parameters) const {
+ return std::min(
+ std::max(channel_parameters.inherent_loss,
+ config_->inherent_loss_lower_bound),
+ GetInherentLossUpperBound(channel_parameters.loss_limited_bandwidth));
+}
+
+double LossBasedBweV2::GetInherentLossUpperBound(DataRate bandwidth) const {
+ if (bandwidth.IsZero()) {
+ return 1.0;
+ }
+
+ double inherent_loss_upper_bound =
+ config_->inherent_loss_upper_bound_offset +
+ config_->inherent_loss_upper_bound_bandwidth_balance / bandwidth;
+ return std::min(inherent_loss_upper_bound, 1.0);
+}
+
+double LossBasedBweV2::AdjustBiasFactor(double loss_rate,
+ double bias_factor) const {
+ return bias_factor *
+ (config_->loss_threshold_of_high_bandwidth_preference - loss_rate) /
+ (config_->bandwidth_preference_smoothing_factor +
+ std::abs(config_->loss_threshold_of_high_bandwidth_preference -
+ loss_rate));
+}
+
+double LossBasedBweV2::GetHighBandwidthBias(DataRate bandwidth) const {
+ if (IsValid(bandwidth)) {
+ const double average_reported_loss_ratio = GetAverageReportedLossRatio();
+ return AdjustBiasFactor(average_reported_loss_ratio,
+ config_->higher_bandwidth_bias_factor) *
+ bandwidth.kbps() +
+ AdjustBiasFactor(average_reported_loss_ratio,
+ config_->higher_log_bandwidth_bias_factor) *
+ std::log(1.0 + bandwidth.kbps());
+ }
+ return 0.0;
+}
+
+double LossBasedBweV2::GetObjective(
+ const ChannelParameters& channel_parameters) const {
+ double objective = 0.0;
+
+ const double high_bandwidth_bias =
+ GetHighBandwidthBias(channel_parameters.loss_limited_bandwidth);
+
+ for (const Observation& observation : observations_) {
+ if (!observation.IsInitialized()) {
+ continue;
+ }
+
+ double loss_probability = GetLossProbability(
+ channel_parameters.inherent_loss,
+ channel_parameters.loss_limited_bandwidth, observation.sending_rate);
+
+ double temporal_weight =
+ temporal_weights_[(num_observations_ - 1) - observation.id];
+
+ objective +=
+ temporal_weight *
+ ((observation.num_lost_packets * std::log(loss_probability)) +
+ (observation.num_received_packets * std::log(1.0 - loss_probability)));
+ objective +=
+ temporal_weight * high_bandwidth_bias * observation.num_packets;
+ }
+
+ return objective;
+}
+
+DataRate LossBasedBweV2::GetSendingRate(
+ DataRate instantaneous_sending_rate) const {
+ if (num_observations_ <= 0) {
+ return instantaneous_sending_rate;
+ }
+
+ const int most_recent_observation_idx =
+ (num_observations_ - 1) % config_->observation_window_size;
+ const Observation& most_recent_observation =
+ observations_[most_recent_observation_idx];
+ DataRate sending_rate_previous_observation =
+ most_recent_observation.sending_rate;
+
+ return config_->sending_rate_smoothing_factor *
+ sending_rate_previous_observation +
+ (1.0 - config_->sending_rate_smoothing_factor) *
+ instantaneous_sending_rate;
+}
+
+DataRate LossBasedBweV2::GetInstantUpperBound() const {
+ return cached_instant_upper_bound_.value_or(DataRate::PlusInfinity());
+}
+
+void LossBasedBweV2::CalculateInstantUpperBound() {
+ DataRate instant_limit = DataRate::PlusInfinity();
+ const double average_reported_loss_ratio = GetAverageReportedLossRatio();
+ if (average_reported_loss_ratio > config_->instant_upper_bound_loss_offset) {
+ instant_limit = config_->instant_upper_bound_bandwidth_balance /
+ (average_reported_loss_ratio -
+ config_->instant_upper_bound_loss_offset);
+ }
+ cached_instant_upper_bound_ = instant_limit;
+}
+
+void LossBasedBweV2::CalculateTemporalWeights() {
+ for (int i = 0; i < config_->observation_window_size; ++i) {
+ temporal_weights_[i] = std::pow(config_->temporal_weight_factor, i);
+ instant_upper_bound_temporal_weights_[i] =
+ std::pow(config_->instant_upper_bound_temporal_weight_factor, i);
+ }
+}
+
+void LossBasedBweV2::NewtonsMethodUpdate(
+ ChannelParameters& channel_parameters) const {
+ if (num_observations_ <= 0) {
+ return;
+ }
+
+ for (int i = 0; i < config_->newton_iterations; ++i) {
+ const Derivatives derivatives = GetDerivatives(channel_parameters);
+ channel_parameters.inherent_loss -=
+ config_->newton_step_size * derivatives.first / derivatives.second;
+ channel_parameters.inherent_loss =
+ GetFeasibleInherentLoss(channel_parameters);
+ }
+}
+
+bool LossBasedBweV2::TrendlineEsimateAllowBitrateIncrease() const {
+ if (!config_->trendline_integration_enabled) {
+ return true;
+ }
+
+ for (const auto& detector_state : delay_detector_states_) {
+ if (detector_state == BandwidthUsage::kBwOverusing ||
+ detector_state == BandwidthUsage::kBwUnderusing) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool LossBasedBweV2::TrendlineEsimateAllowEmergencyBackoff() const {
+ if (!config_->trendline_integration_enabled) {
+ return true;
+ }
+
+ if (!config_->use_acked_bitrate_only_when_overusing) {
+ return true;
+ }
+
+ for (const auto& detector_state : delay_detector_states_) {
+ if (detector_state == BandwidthUsage::kBwOverusing) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool LossBasedBweV2::PushBackObservation(
+ rtc::ArrayView<const PacketResult> packet_results,
+ BandwidthUsage delay_detector_state) {
+ delay_detector_states_.push_front(delay_detector_state);
+ if (static_cast<int>(delay_detector_states_.size()) >
+ config_->trendline_observations_window_size) {
+ delay_detector_states_.pop_back();
+ }
+
+ if (packet_results.empty()) {
+ return false;
+ }
+
+ PacketResultsSummary packet_results_summary =
+ GetPacketResultsSummary(packet_results);
+
+ partial_observation_.num_packets += packet_results_summary.num_packets;
+ partial_observation_.num_lost_packets +=
+ packet_results_summary.num_lost_packets;
+ partial_observation_.size += packet_results_summary.total_size;
+
+ // This is the first packet report we have received.
+ if (!IsValid(last_send_time_most_recent_observation_)) {
+ last_send_time_most_recent_observation_ =
+ packet_results_summary.first_send_time;
+ }
+
+ const Timestamp last_send_time = packet_results_summary.last_send_time;
+ const TimeDelta observation_duration =
+ last_send_time - last_send_time_most_recent_observation_;
+ // Too small to be meaningful.
+ if (observation_duration <= TimeDelta::Zero() ||
+ (observation_duration < config_->observation_duration_lower_bound &&
+ (delay_detector_state != BandwidthUsage::kBwOverusing ||
+ !config_->trendline_integration_enabled))) {
+ return false;
+ }
+
+ last_send_time_most_recent_observation_ = last_send_time;
+
+ Observation observation;
+ observation.num_packets = partial_observation_.num_packets;
+ observation.num_lost_packets = partial_observation_.num_lost_packets;
+ observation.num_received_packets =
+ observation.num_packets - observation.num_lost_packets;
+ observation.sending_rate =
+ GetSendingRate(partial_observation_.size / observation_duration);
+ observation.id = num_observations_++;
+ observations_[observation.id % config_->observation_window_size] =
+ observation;
+
+ partial_observation_ = PartialObservation();
+
+ CalculateInstantUpperBound();
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h
new file mode 100644
index 0000000000..fdfb440c72
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BWE_V2_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BWE_V2_H_
+
+#include <cstddef>
+#include <deque>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+
+namespace webrtc {
+
+class LossBasedBweV2 {
+ public:
+ // Creates a disabled `LossBasedBweV2` if the
+ // `key_value_config` is not valid.
+ explicit LossBasedBweV2(const FieldTrialsView* key_value_config);
+
+ LossBasedBweV2(const LossBasedBweV2&) = delete;
+ LossBasedBweV2& operator=(const LossBasedBweV2&) = delete;
+
+ ~LossBasedBweV2() = default;
+
+ bool IsEnabled() const;
+ // Returns true iff a BWE can be calculated, i.e., the estimator has been
+ // initialized with a BWE and then has received enough `PacketResult`s.
+ bool IsReady() const;
+
+ // Returns `DataRate::PlusInfinity` if no BWE can be calculated.
+ DataRate GetBandwidthEstimate(DataRate delay_based_limit) const;
+
+ void SetAcknowledgedBitrate(DataRate acknowledged_bitrate);
+ void SetBandwidthEstimate(DataRate bandwidth_estimate);
+
+ void UpdateBandwidthEstimate(
+ rtc::ArrayView<const PacketResult> packet_results,
+ DataRate delay_based_estimate,
+ BandwidthUsage delay_detector_state);
+
+ private:
+ struct ChannelParameters {
+ double inherent_loss = 0.0;
+ DataRate loss_limited_bandwidth = DataRate::MinusInfinity();
+ };
+
+ struct Config {
+ double bandwidth_rampup_upper_bound_factor = 0.0;
+ double rampup_acceleration_max_factor = 0.0;
+ TimeDelta rampup_acceleration_maxout_time = TimeDelta::Zero();
+ std::vector<double> candidate_factors;
+ double higher_bandwidth_bias_factor = 0.0;
+ double higher_log_bandwidth_bias_factor = 0.0;
+ double inherent_loss_lower_bound = 0.0;
+ double loss_threshold_of_high_bandwidth_preference = 0.0;
+ double bandwidth_preference_smoothing_factor = 0.0;
+ DataRate inherent_loss_upper_bound_bandwidth_balance =
+ DataRate::MinusInfinity();
+ double inherent_loss_upper_bound_offset = 0.0;
+ double initial_inherent_loss_estimate = 0.0;
+ int newton_iterations = 0;
+ double newton_step_size = 0.0;
+ bool append_acknowledged_rate_candidate = true;
+ bool append_delay_based_estimate_candidate = false;
+ TimeDelta observation_duration_lower_bound = TimeDelta::Zero();
+ int observation_window_size = 0;
+ double sending_rate_smoothing_factor = 0.0;
+ double instant_upper_bound_temporal_weight_factor = 0.0;
+ DataRate instant_upper_bound_bandwidth_balance = DataRate::MinusInfinity();
+ double instant_upper_bound_loss_offset = 0.0;
+ double temporal_weight_factor = 0.0;
+ double bandwidth_backoff_lower_bound_factor = 0.0;
+ bool trendline_integration_enabled = false;
+ int trendline_observations_window_size = 0;
+ double max_increase_factor = 0.0;
+ TimeDelta delayed_increase_window = TimeDelta::Zero();
+ bool use_acked_bitrate_only_when_overusing = false;
+ bool not_increase_if_inherent_loss_less_than_average_loss = false;
+ };
+
+ struct Derivatives {
+ double first = 0.0;
+ double second = 0.0;
+ };
+
+ struct Observation {
+ bool IsInitialized() const { return id != -1; }
+
+ int num_packets = 0;
+ int num_lost_packets = 0;
+ int num_received_packets = 0;
+ DataRate sending_rate = DataRate::MinusInfinity();
+ int id = -1;
+ };
+
+ struct PartialObservation {
+ int num_packets = 0;
+ int num_lost_packets = 0;
+ DataSize size = DataSize::Zero();
+ };
+
+ static absl::optional<Config> CreateConfig(
+ const FieldTrialsView* key_value_config);
+ bool IsConfigValid() const;
+
+ // Returns `0.0` if not enough loss statistics have been received.
+ double GetAverageReportedLossRatio() const;
+ std::vector<ChannelParameters> GetCandidates(
+ DataRate delay_based_estimate) const;
+ DataRate GetCandidateBandwidthUpperBound(DataRate delay_based_estimate) const;
+ Derivatives GetDerivatives(const ChannelParameters& channel_parameters) const;
+ double GetFeasibleInherentLoss(
+ const ChannelParameters& channel_parameters) const;
+ double GetInherentLossUpperBound(DataRate bandwidth) const;
+ double AdjustBiasFactor(double loss_rate, double bias_factor) const;
+ double GetHighBandwidthBias(DataRate bandwidth) const;
+ double GetObjective(const ChannelParameters& channel_parameters) const;
+ DataRate GetSendingRate(DataRate instantaneous_sending_rate) const;
+ DataRate GetInstantUpperBound() const;
+ void CalculateInstantUpperBound();
+
+ void CalculateTemporalWeights();
+ void NewtonsMethodUpdate(ChannelParameters& channel_parameters) const;
+
+ // Returns false if there exists a kBwOverusing or kBwUnderusing in the
+ // window.
+ bool TrendlineEsimateAllowBitrateIncrease() const;
+
+ // Returns true if there exists an overusing state in the window.
+ bool TrendlineEsimateAllowEmergencyBackoff() const;
+
+ // Returns false if no observation was created.
+ bool PushBackObservation(rtc::ArrayView<const PacketResult> packet_results,
+ BandwidthUsage delay_detector_state);
+ void UpdateTrendlineEstimator(
+ const std::vector<PacketResult>& packet_feedbacks,
+ Timestamp at_time);
+ void UpdateDelayDetector(BandwidthUsage delay_detector_state);
+
+ absl::optional<DataRate> acknowledged_bitrate_;
+ absl::optional<Config> config_;
+ ChannelParameters current_estimate_;
+ int num_observations_ = 0;
+ std::vector<Observation> observations_;
+ PartialObservation partial_observation_;
+ Timestamp last_send_time_most_recent_observation_ = Timestamp::PlusInfinity();
+ Timestamp last_time_estimate_reduced_ = Timestamp::MinusInfinity();
+ absl::optional<DataRate> cached_instant_upper_bound_;
+ std::vector<double> instant_upper_bound_temporal_weights_;
+ std::vector<double> temporal_weights_;
+ std::deque<BandwidthUsage> delay_detector_states_;
+ Timestamp recovering_after_loss_timestamp_ = Timestamp::MinusInfinity();
+ DataRate bandwidth_limit_in_current_window_ = DataRate::PlusInfinity();
+ bool limited_due_to_loss_candidate_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BWE_V2_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_gn/moz.build
new file mode 100644
index 0000000000..4064872b72
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("loss_based_bwe_v2_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc
new file mode 100644
index 0000000000..9dc6144217
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc
@@ -0,0 +1,921 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h"
+
+#include <string>
+#include <vector>
+
+#include "api/network_state_predictor.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::webrtc::test::ExplicitKeyValueConfig;
+
+constexpr TimeDelta kObservationDurationLowerBound = TimeDelta::Millis(200);
+constexpr TimeDelta kDelayedIncreaseWindow = TimeDelta::Millis(300);
+constexpr double kMaxIncreaseFactor = 1.5;
+
+class LossBasedBweV2Test : public ::testing::TestWithParam<bool> {
+ protected:
+ std::string Config(bool enabled,
+ bool valid,
+ bool trendline_integration_enabled) {
+ char buffer[1024];
+ rtc::SimpleStringBuilder config_string(buffer);
+
+ config_string << "WebRTC-Bwe-LossBasedBweV2/";
+
+ if (enabled) {
+ config_string << "Enabled:true";
+ } else {
+ config_string << "Enabled:false";
+ }
+
+ if (valid) {
+ config_string << ",BwRampupUpperBoundFactor:1.2";
+ } else {
+ config_string << ",BwRampupUpperBoundFactor:0.0";
+ }
+
+ if (trendline_integration_enabled) {
+ config_string << ",TrendlineIntegrationEnabled:true";
+ } else {
+ config_string << ",TrendlineIntegrationEnabled:false";
+ }
+
+ config_string
+ << ",CandidateFactors:1.1|1.0|0.95,HigherBwBiasFactor:0.01,"
+ "DelayBasedCandidate:true,"
+ "InherentLossLowerBound:0.001,InherentLossUpperBoundBwBalance:"
+ "14kbps,"
+ "InherentLossUpperBoundOffset:0.9,InitialInherentLossEstimate:0.01,"
+ "NewtonIterations:2,NewtonStepSize:0.4,ObservationWindowSize:15,"
+ "SendingRateSmoothingFactor:0.01,"
+ "InstantUpperBoundTemporalWeightFactor:0.97,"
+ "InstantUpperBoundBwBalance:90kbps,"
+ "InstantUpperBoundLossOffset:0.1,TemporalWeightFactor:0.98";
+
+ config_string.AppendFormat(
+ ",ObservationDurationLowerBound:%dms",
+ static_cast<int>(kObservationDurationLowerBound.ms()));
+ config_string.AppendFormat(",MaxIncreaseFactor:%f", kMaxIncreaseFactor);
+ config_string.AppendFormat(",DelayedIncreaseWindow:%dms",
+ static_cast<int>(kDelayedIncreaseWindow.ms()));
+
+ config_string << "/";
+
+ return config_string.str();
+ }
+
+ std::vector<PacketResult> CreatePacketResultsWithReceivedPackets(
+ Timestamp first_packet_timestamp) {
+ std::vector<PacketResult> enough_feedback(2);
+ enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[0].sent_packet.send_time = first_packet_timestamp;
+ enough_feedback[1].sent_packet.send_time =
+ first_packet_timestamp + kObservationDurationLowerBound;
+ enough_feedback[0].receive_time =
+ first_packet_timestamp + kObservationDurationLowerBound;
+ enough_feedback[1].receive_time =
+ first_packet_timestamp + 2 * kObservationDurationLowerBound;
+ return enough_feedback;
+ }
+
+ std::vector<PacketResult> CreatePacketResultsWith10pLossRate(
+ Timestamp first_packet_timestamp) {
+ std::vector<PacketResult> enough_feedback(10);
+ enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000);
+ for (unsigned i = 0; i < enough_feedback.size(); ++i) {
+ enough_feedback[i].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[i].sent_packet.send_time =
+ first_packet_timestamp +
+ static_cast<int>(i) * kObservationDurationLowerBound;
+ enough_feedback[i].receive_time =
+ first_packet_timestamp +
+ static_cast<int>(i + 1) * kObservationDurationLowerBound;
+ }
+ enough_feedback[9].receive_time = Timestamp::PlusInfinity();
+ return enough_feedback;
+ }
+
+ std::vector<PacketResult> CreatePacketResultsWith50pLossRate(
+ Timestamp first_packet_timestamp) {
+ std::vector<PacketResult> enough_feedback(2);
+ enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[0].sent_packet.send_time = first_packet_timestamp;
+ enough_feedback[1].sent_packet.send_time =
+ first_packet_timestamp + kObservationDurationLowerBound;
+ enough_feedback[0].receive_time =
+ first_packet_timestamp + kObservationDurationLowerBound;
+ enough_feedback[1].receive_time = Timestamp::PlusInfinity();
+ return enough_feedback;
+ }
+
+ std::vector<PacketResult> CreatePacketResultsWith100pLossRate(
+ Timestamp first_packet_timestamp) {
+ std::vector<PacketResult> enough_feedback(2);
+ enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000);
+ enough_feedback[0].sent_packet.send_time = first_packet_timestamp;
+ enough_feedback[1].sent_packet.send_time =
+ first_packet_timestamp + kObservationDurationLowerBound;
+ enough_feedback[0].receive_time = Timestamp::PlusInfinity();
+ enough_feedback[1].receive_time = Timestamp::PlusInfinity();
+ return enough_feedback;
+ }
+};
+
+TEST_P(LossBasedBweV2Test, EnabledWhenGivenValidConfigurationValues) {
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ EXPECT_TRUE(loss_based_bandwidth_estimator.IsEnabled());
+}
+
+TEST_P(LossBasedBweV2Test, DisabledWhenGivenDisabledConfiguration) {
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/false, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ EXPECT_FALSE(loss_based_bandwidth_estimator.IsEnabled());
+}
+
+TEST_P(LossBasedBweV2Test, DisabledWhenGivenNonValidConfigurationValues) {
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/false,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ EXPECT_FALSE(loss_based_bandwidth_estimator.IsEnabled());
+}
+
+TEST_P(LossBasedBweV2Test, DisabledWhenGivenNonPositiveCandidateFactor) {
+ ExplicitKeyValueConfig key_value_config_negative_candidate_factor(
+ "WebRTC-Bwe-LossBasedBweV2/Enabled:true,CandidateFactors:-1.3|1.1/");
+ LossBasedBweV2 loss_based_bandwidth_estimator_1(
+ &key_value_config_negative_candidate_factor);
+ EXPECT_FALSE(loss_based_bandwidth_estimator_1.IsEnabled());
+
+ ExplicitKeyValueConfig key_value_config_zero_candidate_factor(
+ "WebRTC-Bwe-LossBasedBweV2/Enabled:true,CandidateFactors:0.0|1.1/");
+ LossBasedBweV2 loss_based_bandwidth_estimator_2(
+ &key_value_config_zero_candidate_factor);
+ EXPECT_FALSE(loss_based_bandwidth_estimator_2.IsEnabled());
+}
+
+TEST_P(LossBasedBweV2Test,
+ DisabledWhenGivenConfigurationThatDoesNotAllowGeneratingCandidates) {
+ ExplicitKeyValueConfig key_value_config(
+ "WebRTC-Bwe-LossBasedBweV2/"
+ "Enabled:true,CandidateFactors:1.0,AckedRateCandidate:false,"
+ "DelayBasedCandidate:false/");
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ EXPECT_FALSE(loss_based_bandwidth_estimator.IsEnabled());
+}
+
+TEST_P(LossBasedBweV2Test, ReturnsDelayBasedEstimateWhenDisabled) {
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/false, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::KilobitsPerSec(100)),
+ DataRate::KilobitsPerSec(100));
+}
+
+TEST_P(LossBasedBweV2Test,
+ ReturnsDelayBasedEstimateWhenWhenGivenNonValidConfigurationValues) {
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/false,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::KilobitsPerSec(100)),
+ DataRate::KilobitsPerSec(100));
+}
+
+TEST_P(LossBasedBweV2Test,
+ BandwidthEstimateGivenInitializationAndThenFeedback) {
+ std::vector<PacketResult> enough_feedback =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_TRUE(loss_based_bandwidth_estimator.IsReady());
+ EXPECT_TRUE(
+ loss_based_bandwidth_estimator
+ .GetBandwidthEstimate(/*delay_based_limit=*/DataRate::PlusInfinity())
+ .IsFinite());
+}
+
+TEST_P(LossBasedBweV2Test, NoBandwidthEstimateGivenNoInitialization) {
+ std::vector<PacketResult> enough_feedback =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_FALSE(loss_based_bandwidth_estimator.IsReady());
+ EXPECT_TRUE(
+ loss_based_bandwidth_estimator
+ .GetBandwidthEstimate(/*delay_based_limit=*/DataRate::PlusInfinity())
+ .IsPlusInfinity());
+}
+
+TEST_P(LossBasedBweV2Test, NoBandwidthEstimateGivenNotEnoughFeedback) {
+ // Create packet results where the observation duration is less than the lower
+ // bound.
+ PacketResult not_enough_feedback[2];
+ not_enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000);
+ not_enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000);
+ not_enough_feedback[0].sent_packet.send_time = Timestamp::Zero();
+ not_enough_feedback[1].sent_packet.send_time =
+ Timestamp::Zero() + kObservationDurationLowerBound / 2;
+ not_enough_feedback[0].receive_time =
+ Timestamp::Zero() + kObservationDurationLowerBound / 2;
+ not_enough_feedback[1].receive_time =
+ Timestamp::Zero() + kObservationDurationLowerBound;
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+
+ EXPECT_FALSE(loss_based_bandwidth_estimator.IsReady());
+ EXPECT_TRUE(
+ loss_based_bandwidth_estimator
+ .GetBandwidthEstimate(/*delay_based_limit=*/DataRate::PlusInfinity())
+ .IsPlusInfinity());
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ not_enough_feedback, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_FALSE(loss_based_bandwidth_estimator.IsReady());
+ EXPECT_TRUE(
+ loss_based_bandwidth_estimator
+ .GetBandwidthEstimate(/*delay_based_limit=*/DataRate::PlusInfinity())
+ .IsPlusInfinity());
+}
+
+TEST_P(LossBasedBweV2Test,
+ SetValueIsTheEstimateUntilAdditionalFeedbackHasBeenReceived) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ 2 * kObservationDurationLowerBound);
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_NE(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(600));
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(600));
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_NE(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(600));
+}
+
+TEST_P(LossBasedBweV2Test,
+ SetAcknowledgedBitrateOnlyAffectsTheBweWhenAdditionalFeedbackIsGiven) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ 2 * kObservationDurationLowerBound);
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator_1(&key_value_config);
+ LossBasedBweV2 loss_based_bandwidth_estimator_2(&key_value_config);
+
+ loss_based_bandwidth_estimator_1.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator_2.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator_1.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ loss_based_bandwidth_estimator_2.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_EQ(loss_based_bandwidth_estimator_1.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(660));
+
+ loss_based_bandwidth_estimator_1.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(600));
+
+ EXPECT_EQ(loss_based_bandwidth_estimator_1.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(660));
+
+ loss_based_bandwidth_estimator_1.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ loss_based_bandwidth_estimator_2.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+
+ EXPECT_NE(loss_based_bandwidth_estimator_1.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ loss_based_bandwidth_estimator_2.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()));
+}
+
+TEST_P(LossBasedBweV2Test,
+ BandwidthEstimateIsCappedToBeTcpFairGivenTooHighLossRate) {
+ std::vector<PacketResult> enough_feedback_no_received_packets =
+ CreatePacketResultsWith100pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_no_received_packets, DataRate::PlusInfinity(),
+ BandwidthUsage::kBwNormal);
+
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(100));
+}
+
+TEST_P(LossBasedBweV2Test, BandwidthEstimateNotIncreaseWhenNetworkUnderusing) {
+ if (!GetParam()) {
+ GTEST_SKIP() << "This test should run only if "
+ "trendline_integration_enabled is enabled";
+ }
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ 2 * kObservationDurationLowerBound);
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(),
+ BandwidthUsage::kBwUnderusing);
+ EXPECT_LE(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ EXPECT_LE(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(600));
+}
+
+// When network is normal, estimate can increase but never be higher than
+// the delay based estimate.
+TEST_P(LossBasedBweV2Test,
+ BandwidthEstimateCappedByDelayBasedEstimateWhenNetworkNormal) {
+ // Create two packet results, network is in normal state, 100% packets are
+ // received, and no delay increase.
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ 2 * kObservationDurationLowerBound);
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ // If the delay based estimate is infinity, then loss based estimate increases
+ // and not bounded by delay based estimate.
+ EXPECT_GT(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ // If the delay based estimate is not infinity, then loss based estimate is
+ // bounded by delay based estimate.
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::KilobitsPerSec(500)),
+ DataRate::KilobitsPerSec(500));
+}
+
+// When loss based bwe receives a strong signal of overusing and an increase in
+// loss rate, it should acked bitrate for emegency backoff.
+TEST_P(LossBasedBweV2Test, UseAckedBitrateForEmegencyBackOff) {
+ // Create two packet results, first packet has 50% loss rate, second packet
+ // has 100% loss rate.
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWith50pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWith100pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ 2 * kObservationDurationLowerBound);
+
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ DataRate acked_bitrate = DataRate::KilobitsPerSec(300);
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(acked_bitrate);
+ // Update estimate when network is overusing, and 50% loss rate.
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(),
+ BandwidthUsage::kBwOverusing);
+ // Update estimate again when network is continuously overusing, and 100%
+ // loss rate.
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(),
+ BandwidthUsage::kBwOverusing);
+ // The estimate bitrate now is backed off based on acked bitrate.
+ EXPECT_LE(loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity()),
+ acked_bitrate);
+}
+
+// When receiving the same packet feedback, loss based bwe ignores the feedback
+// and returns the current estimate.
+TEST_P(LossBasedBweV2Test, NoBweChangeIfObservationDurationUnchanged) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(300));
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ DataRate estimate_1 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+
+ // Use the same feedback and check if the estimate is unchanged.
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ DataRate estimate_2 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+ EXPECT_EQ(estimate_2, estimate_1);
+}
+
+// When receiving feedback of packets that were sent within an observation
+// duration, and network is in the normal state, loss based bwe returns the
+// current estimate.
+TEST_P(LossBasedBweV2Test,
+ NoBweChangeIfObservationDurationIsSmallAndNetworkNormal) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound - TimeDelta::Millis(1));
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ DataRate estimate_1 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ DataRate estimate_2 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+ EXPECT_EQ(estimate_2, estimate_1);
+}
+
+// When receiving feedback of packets that were sent within an observation
+// duration, and network is in the underusing state, loss based bwe returns the
+// current estimate.
+TEST_P(LossBasedBweV2Test,
+ NoBweIncreaseIfObservationDurationIsSmallAndNetworkUnderusing) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound - TimeDelta::Millis(1));
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ DataRate estimate_1 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(),
+ BandwidthUsage::kBwUnderusing);
+ DataRate estimate_2 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+ EXPECT_LE(estimate_2, estimate_1);
+}
+
+// When receiving feedback of packets that were sent within an observation
+// duration, network is overusing, and trendline integration is enabled, loss
+// based bwe updates its estimate.
+TEST_P(LossBasedBweV2Test,
+ UpdateEstimateIfObservationDurationIsSmallAndNetworkOverusing) {
+ if (!GetParam()) {
+ GTEST_SKIP() << "This test should run only if "
+ "trendline_integration_enabled is enabled";
+ }
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWith50pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWith100pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound - TimeDelta::Millis(1));
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(300));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, DataRate::PlusInfinity(), BandwidthUsage::kBwNormal);
+ DataRate estimate_1 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, DataRate::PlusInfinity(),
+ BandwidthUsage::kBwOverusing);
+ DataRate estimate_2 = loss_based_bandwidth_estimator.GetBandwidthEstimate(
+ /*delay_based_limit=*/DataRate::PlusInfinity());
+ EXPECT_LT(estimate_2, estimate_1);
+}
+
+TEST_P(LossBasedBweV2Test,
+ IncreaseToDelayBasedEstimateIfNoLossOrDelayIncrease) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ 2 * kObservationDurationLowerBound);
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, delay_based_estimate, BandwidthUsage::kBwNormal);
+ EXPECT_EQ(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ delay_based_estimate);
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, delay_based_estimate, BandwidthUsage::kBwNormal);
+ EXPECT_EQ(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ delay_based_estimate);
+}
+
+// After loss based bwe backs off, the next estimate is capped by
+// MaxIncreaseFactor * current estimate.
+TEST_P(LossBasedBweV2Test,
+ IncreaseByMaxIncreaseFactorAfterLossBasedBweBacksOff) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound);
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(300));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, delay_based_estimate, BandwidthUsage::kBwNormal);
+ DataRate estimate_1 =
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate);
+ // Increase the acknowledged bitrate to make sure that the estimate is not
+ // capped too low.
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(5000));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, delay_based_estimate, BandwidthUsage::kBwNormal);
+
+ // The estimate is capped by current_estimate * kMaxIncreaseFactor because it
+ // recently backed off.
+ DataRate estimate_2 =
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate);
+ EXPECT_EQ(estimate_2, estimate_1 * kMaxIncreaseFactor);
+ EXPECT_LE(estimate_2, delay_based_estimate);
+}
+
+// After loss based bwe backs off, the estimate is bounded during the delayed
+// window.
+TEST_P(LossBasedBweV2Test,
+ EstimateBitrateIsBoundedDuringDelayedWindowAfterLossBasedBweBacksOff) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWith50pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kDelayedIncreaseWindow - TimeDelta::Millis(2));
+ std::vector<PacketResult> enough_feedback_3 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kDelayedIncreaseWindow - TimeDelta::Millis(1));
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(300));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, delay_based_estimate, BandwidthUsage::kBwNormal);
+ // Increase the acknowledged bitrate to make sure that the estimate is not
+ // capped too low.
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(5000));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, delay_based_estimate, BandwidthUsage::kBwNormal);
+
+ // The estimate is capped by current_estimate * kMaxIncreaseFactor because
+ // it recently backed off.
+ DataRate estimate_2 =
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate);
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_3, delay_based_estimate, BandwidthUsage::kBwNormal);
+ // The latest estimate is the same as the previous estimate since the sent
+ // packets were sent within the DelayedIncreaseWindow.
+ EXPECT_EQ(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ estimate_2);
+}
+
+// The estimate is not bounded after the delayed increase window.
+TEST_P(LossBasedBweV2Test, KeepIncreasingEstimateAfterDelayedIncreaseWindow) {
+ std::vector<PacketResult> enough_feedback_1 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ std::vector<PacketResult> enough_feedback_2 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kDelayedIncreaseWindow - TimeDelta::Millis(1));
+ std::vector<PacketResult> enough_feedback_3 =
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kDelayedIncreaseWindow + TimeDelta::Millis(1));
+ ExplicitKeyValueConfig key_value_config(
+ Config(/*enabled=*/true, /*valid=*/true,
+ /*trendline_integration_enabled=*/GetParam()));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(300));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_1, delay_based_estimate, BandwidthUsage::kBwNormal);
+ // Increase the acknowledged bitrate to make sure that the estimate is not
+ // capped too low.
+ loss_based_bandwidth_estimator.SetAcknowledgedBitrate(
+ DataRate::KilobitsPerSec(5000));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_2, delay_based_estimate, BandwidthUsage::kBwNormal);
+
+ // The estimate is capped by current_estimate * kMaxIncreaseFactor because it
+ // recently backed off.
+ DataRate estimate_2 =
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate);
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_3, delay_based_estimate, BandwidthUsage::kBwNormal);
+ // The estimate can continue increasing after the DelayedIncreaseWindow.
+ EXPECT_GE(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ estimate_2);
+}
+
+TEST_P(LossBasedBweV2Test, NotIncreaseIfInherentLossLessThanAverageLoss) {
+ ExplicitKeyValueConfig key_value_config(
+ "WebRTC-Bwe-LossBasedBweV2/"
+ "Enabled:true,CandidateFactors:1.2,AckedRateCandidate:false,"
+ "ObservationWindowSize:2,"
+ "DelayBasedCandidate:true,InstantUpperBoundBwBalance:100kbps,"
+ "ObservationDurationLowerBound:200ms,"
+ "NotIncreaseIfInherentLossLessThanAverageLoss:true/");
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+
+ std::vector<PacketResult> enough_feedback_10p_loss_1 =
+ CreatePacketResultsWith10pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_10p_loss_1, delay_based_estimate,
+ BandwidthUsage::kBwNormal);
+
+ std::vector<PacketResult> enough_feedback_10p_loss_2 =
+ CreatePacketResultsWith10pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound);
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_10p_loss_2, delay_based_estimate,
+ BandwidthUsage::kBwNormal);
+
+ // Do not increase the bitrate because inherent loss is less than average loss
+ EXPECT_EQ(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ DataRate::KilobitsPerSec(600));
+}
+
+TEST_P(LossBasedBweV2Test,
+ SelectHighBandwidthCandidateIfLossRateIsLessThanThreshold) {
+ ExplicitKeyValueConfig key_value_config(
+ "WebRTC-Bwe-LossBasedBweV2/"
+ "Enabled:true,CandidateFactors:1.2|0.8,AckedRateCandidate:false,"
+ "ObservationWindowSize:2,"
+ "DelayBasedCandidate:true,InstantUpperBoundBwBalance:100kbps,"
+ "ObservationDurationLowerBound:200ms,HigherBwBiasFactor:1000,"
+ "HigherLogBwBiasFactor:1000,LossThresholdOfHighBandwidthPreference:0."
+ "20/");
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+
+ std::vector<PacketResult> enough_feedback_10p_loss_1 =
+ CreatePacketResultsWith10pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_10p_loss_1, delay_based_estimate,
+ BandwidthUsage::kBwNormal);
+
+ std::vector<PacketResult> enough_feedback_10p_loss_2 =
+ CreatePacketResultsWith10pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound);
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_10p_loss_2, delay_based_estimate,
+ BandwidthUsage::kBwNormal);
+
+ // Because LossThresholdOfHighBandwidthPreference is 20%, the average loss is
+ // 10%, bandwidth estimate should increase.
+ EXPECT_GT(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ DataRate::KilobitsPerSec(600));
+}
+
+TEST_P(LossBasedBweV2Test,
+ SelectLowBandwidthCandidateIfLossRateIsIsHigherThanThreshold) {
+ ExplicitKeyValueConfig key_value_config(
+ "WebRTC-Bwe-LossBasedBweV2/"
+ "Enabled:true,CandidateFactors:1.2|0.8,AckedRateCandidate:false,"
+ "ObservationWindowSize:2,"
+ "DelayBasedCandidate:true,InstantUpperBoundBwBalance:100kbps,"
+ "ObservationDurationLowerBound:200ms,HigherBwBiasFactor:1000,"
+ "HigherLogBwBiasFactor:1000,LossThresholdOfHighBandwidthPreference:0."
+ "05/");
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ DataRate delay_based_estimate = DataRate::KilobitsPerSec(5000);
+
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(600));
+
+ std::vector<PacketResult> enough_feedback_10p_loss_1 =
+ CreatePacketResultsWith10pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero());
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_10p_loss_1, delay_based_estimate,
+ BandwidthUsage::kBwNormal);
+
+ std::vector<PacketResult> enough_feedback_10p_loss_2 =
+ CreatePacketResultsWith10pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound);
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ enough_feedback_10p_loss_2, delay_based_estimate,
+ BandwidthUsage::kBwNormal);
+
+ // Because LossThresholdOfHighBandwidthPreference is 5%, the average loss is
+ // 10%, bandwidth estimate should decrease.
+ EXPECT_LT(
+ loss_based_bandwidth_estimator.GetBandwidthEstimate(delay_based_estimate),
+ DataRate::KilobitsPerSec(600));
+}
+
+INSTANTIATE_TEST_SUITE_P(LossBasedBweV2Tests,
+ LossBasedBweV2Test,
+ ::testing::Bool());
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc
new file mode 100644
index 0000000000..a94f653157
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/probe_bitrate_estimator.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_probe_result_failure.h"
+#include "logging/rtc_event_log/events/rtc_event_probe_result_success.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace {
+// The minumum number of probes we need to receive feedback about in percent
+// in order to have a valid estimate.
+constexpr double kMinReceivedProbesRatio = .80;
+
+// The minumum number of bytes we need to receive feedback about in percent
+// in order to have a valid estimate.
+constexpr double kMinReceivedBytesRatio = .80;
+
+// The maximum |receive rate| / |send rate| ratio for a valid estimate.
+constexpr float kMaxValidRatio = 2.0f;
+
+// The minimum |receive rate| / |send rate| ratio assuming that the link is
+// not saturated, i.e. we assume that we will receive at least
+// kMinRatioForUnsaturatedLink * |send rate| if |send rate| is less than the
+// link capacity.
+constexpr float kMinRatioForUnsaturatedLink = 0.9f;
+
+// The target utilization of the link. If we know true link capacity
+// we'd like to send at 95% of that rate.
+constexpr float kTargetUtilizationFraction = 0.95f;
+
+// The maximum time period over which the cluster history is retained.
+// This is also the maximum time period beyond which a probing burst is not
+// expected to last.
+constexpr TimeDelta kMaxClusterHistory = TimeDelta::Seconds(1);
+
+// The maximum time interval between first and the last probe on a cluster
+// on the sender side as well as the receive side.
+constexpr TimeDelta kMaxProbeInterval = TimeDelta::Seconds(1);
+
+} // namespace
+
+ProbeBitrateEstimator::ProbeBitrateEstimator(RtcEventLog* event_log)
+ : event_log_(event_log) {}
+
+ProbeBitrateEstimator::~ProbeBitrateEstimator() = default;
+
+absl::optional<DataRate> ProbeBitrateEstimator::HandleProbeAndEstimateBitrate(
+ const PacketResult& packet_feedback) {
+ int cluster_id = packet_feedback.sent_packet.pacing_info.probe_cluster_id;
+ RTC_DCHECK_NE(cluster_id, PacedPacketInfo::kNotAProbe);
+
+ EraseOldClusters(packet_feedback.receive_time);
+
+ AggregatedCluster* cluster = &clusters_[cluster_id];
+
+ if (packet_feedback.sent_packet.send_time < cluster->first_send) {
+ cluster->first_send = packet_feedback.sent_packet.send_time;
+ }
+ if (packet_feedback.sent_packet.send_time > cluster->last_send) {
+ cluster->last_send = packet_feedback.sent_packet.send_time;
+ cluster->size_last_send = packet_feedback.sent_packet.size;
+ }
+ if (packet_feedback.receive_time < cluster->first_receive) {
+ cluster->first_receive = packet_feedback.receive_time;
+ cluster->size_first_receive = packet_feedback.sent_packet.size;
+ }
+ if (packet_feedback.receive_time > cluster->last_receive) {
+ cluster->last_receive = packet_feedback.receive_time;
+ }
+ cluster->size_total += packet_feedback.sent_packet.size;
+ cluster->num_probes += 1;
+
+ RTC_DCHECK_GT(
+ packet_feedback.sent_packet.pacing_info.probe_cluster_min_probes, 0);
+ RTC_DCHECK_GT(packet_feedback.sent_packet.pacing_info.probe_cluster_min_bytes,
+ 0);
+
+ int min_probes =
+ packet_feedback.sent_packet.pacing_info.probe_cluster_min_probes *
+ kMinReceivedProbesRatio;
+ DataSize min_size =
+ DataSize::Bytes(
+ packet_feedback.sent_packet.pacing_info.probe_cluster_min_bytes) *
+ kMinReceivedBytesRatio;
+ if (cluster->num_probes < min_probes || cluster->size_total < min_size)
+ return absl::nullopt;
+
+ TimeDelta send_interval = cluster->last_send - cluster->first_send;
+ TimeDelta receive_interval = cluster->last_receive - cluster->first_receive;
+
+ if (send_interval <= TimeDelta::Zero() || send_interval > kMaxProbeInterval ||
+ receive_interval <= TimeDelta::Zero() ||
+ receive_interval > kMaxProbeInterval) {
+ RTC_LOG(LS_INFO) << "Probing unsuccessful, invalid send/receive interval"
+ " [cluster id: "
+ << cluster_id
+ << "] [send interval: " << ToString(send_interval)
+ << "]"
+ " [receive interval: "
+ << ToString(receive_interval) << "]";
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventProbeResultFailure>(
+ cluster_id, ProbeFailureReason::kInvalidSendReceiveInterval));
+ }
+ return absl::nullopt;
+ }
+ // Since the `send_interval` does not include the time it takes to actually
+ // send the last packet the size of the last sent packet should not be
+ // included when calculating the send bitrate.
+ RTC_DCHECK_GT(cluster->size_total, cluster->size_last_send);
+ DataSize send_size = cluster->size_total - cluster->size_last_send;
+ DataRate send_rate = send_size / send_interval;
+
+ // Since the `receive_interval` does not include the time it takes to
+ // actually receive the first packet the size of the first received packet
+ // should not be included when calculating the receive bitrate.
+ RTC_DCHECK_GT(cluster->size_total, cluster->size_first_receive);
+ DataSize receive_size = cluster->size_total - cluster->size_first_receive;
+ DataRate receive_rate = receive_size / receive_interval;
+
+ double ratio = receive_rate / send_rate;
+ if (ratio > kMaxValidRatio) {
+ RTC_LOG(LS_INFO) << "Probing unsuccessful, receive/send ratio too high"
+ " [cluster id: "
+ << cluster_id << "] [send: " << ToString(send_size)
+ << " / " << ToString(send_interval) << " = "
+ << ToString(send_rate)
+ << "]"
+ " [receive: "
+ << ToString(receive_size) << " / "
+ << ToString(receive_interval) << " = "
+ << ToString(receive_rate)
+ << " ]"
+ " [ratio: "
+ << ToString(receive_rate) << " / " << ToString(send_rate)
+ << " = " << ratio << " > kMaxValidRatio ("
+ << kMaxValidRatio << ")]";
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventProbeResultFailure>(
+ cluster_id, ProbeFailureReason::kInvalidSendReceiveRatio));
+ }
+ return absl::nullopt;
+ }
+ RTC_LOG(LS_INFO) << "Probing successful"
+ " [cluster id: "
+ << cluster_id << "] [send: " << ToString(send_size) << " / "
+ << ToString(send_interval) << " = " << ToString(send_rate)
+ << " ]"
+ " [receive: "
+ << ToString(receive_size) << " / "
+ << ToString(receive_interval) << " = "
+ << ToString(receive_rate) << "]";
+
+ DataRate res = std::min(send_rate, receive_rate);
+ // If we're receiving at significantly lower bitrate than we were sending at,
+ // it suggests that we've found the true capacity of the link. In this case,
+ // set the target bitrate slightly lower to not immediately overuse.
+ if (receive_rate < kMinRatioForUnsaturatedLink * send_rate) {
+ RTC_DCHECK_GT(send_rate, receive_rate);
+ res = kTargetUtilizationFraction * receive_rate;
+ }
+ if (event_log_) {
+ event_log_->Log(
+ std::make_unique<RtcEventProbeResultSuccess>(cluster_id, res.bps()));
+ }
+ estimated_data_rate_ = res;
+ return estimated_data_rate_;
+}
+
+absl::optional<DataRate>
+ProbeBitrateEstimator::FetchAndResetLastEstimatedBitrate() {
+ absl::optional<DataRate> estimated_data_rate = estimated_data_rate_;
+ estimated_data_rate_.reset();
+ return estimated_data_rate;
+}
+
+void ProbeBitrateEstimator::EraseOldClusters(Timestamp timestamp) {
+ for (auto it = clusters_.begin(); it != clusters_.end();) {
+ if (it->second.last_receive + kMaxClusterHistory < timestamp) {
+ it = clusters_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h
new file mode 100644
index 0000000000..d5a523b7f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_PROBE_BITRATE_ESTIMATOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_PROBE_BITRATE_ESTIMATOR_H_
+
+#include <limits>
+#include <map>
+
+#include "absl/types/optional.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+
+namespace webrtc {
+class RtcEventLog;
+
+class ProbeBitrateEstimator {
+ public:
+ explicit ProbeBitrateEstimator(RtcEventLog* event_log);
+ ~ProbeBitrateEstimator();
+
+ // Should be called for every probe packet we receive feedback about.
+ // Returns the estimated bitrate if the probe completes a valid cluster.
+ absl::optional<DataRate> HandleProbeAndEstimateBitrate(
+ const PacketResult& packet_feedback);
+
+ absl::optional<DataRate> FetchAndResetLastEstimatedBitrate();
+
+ private:
+ struct AggregatedCluster {
+ int num_probes = 0;
+ Timestamp first_send = Timestamp::PlusInfinity();
+ Timestamp last_send = Timestamp::MinusInfinity();
+ Timestamp first_receive = Timestamp::PlusInfinity();
+ Timestamp last_receive = Timestamp::MinusInfinity();
+ DataSize size_last_send = DataSize::Zero();
+ DataSize size_first_receive = DataSize::Zero();
+ DataSize size_total = DataSize::Zero();
+ };
+
+ // Erases old cluster data that was seen before `timestamp`.
+ void EraseOldClusters(Timestamp timestamp);
+
+ std::map<int, AggregatedCluster> clusters_;
+ RtcEventLog* const event_log_;
+ absl::optional<DataRate> estimated_data_rate_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_PROBE_BITRATE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator_unittest.cc
new file mode 100644
index 0000000000..6b4146d2bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_bitrate_estimator_unittest.cc
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/probe_bitrate_estimator.h"
+
+#include <stddef.h>
+
+#include "api/transport/network_types.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kDefaultMinProbes = 5;
+constexpr int kDefaultMinBytes = 5000;
+constexpr float kTargetUtilizationFraction = 0.95f;
+} // anonymous namespace
+
+class TestProbeBitrateEstimator : public ::testing::Test {
+ public:
+ TestProbeBitrateEstimator() : probe_bitrate_estimator_(nullptr) {}
+
+ // TODO(philipel): Use PacedPacketInfo when ProbeBitrateEstimator is rewritten
+ // to use that information.
+ void AddPacketFeedback(int probe_cluster_id,
+ size_t size_bytes,
+ int64_t send_time_ms,
+ int64_t arrival_time_ms,
+ int min_probes = kDefaultMinProbes,
+ int min_bytes = kDefaultMinBytes) {
+ const Timestamp kReferenceTime = Timestamp::Seconds(1000);
+ PacketResult feedback;
+ feedback.sent_packet.send_time =
+ kReferenceTime + TimeDelta::Millis(send_time_ms);
+ feedback.sent_packet.size = DataSize::Bytes(size_bytes);
+ feedback.sent_packet.pacing_info =
+ PacedPacketInfo(probe_cluster_id, min_probes, min_bytes);
+ feedback.receive_time = kReferenceTime + TimeDelta::Millis(arrival_time_ms);
+ measured_data_rate_ =
+ probe_bitrate_estimator_.HandleProbeAndEstimateBitrate(feedback);
+ }
+
+ protected:
+ absl::optional<DataRate> measured_data_rate_;
+ ProbeBitrateEstimator probe_bitrate_estimator_;
+};
+
+TEST_F(TestProbeBitrateEstimator, OneCluster) {
+ AddPacketFeedback(0, 1000, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 20);
+ AddPacketFeedback(0, 1000, 20, 30);
+ AddPacketFeedback(0, 1000, 30, 40);
+
+ EXPECT_NEAR(measured_data_rate_->bps(), 800000, 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, OneClusterTooFewProbes) {
+ AddPacketFeedback(0, 2000, 0, 10);
+ AddPacketFeedback(0, 2000, 10, 20);
+ AddPacketFeedback(0, 2000, 20, 30);
+
+ EXPECT_FALSE(measured_data_rate_);
+}
+
+TEST_F(TestProbeBitrateEstimator, OneClusterTooFewBytes) {
+ const int kMinBytes = 6000;
+ AddPacketFeedback(0, 800, 0, 10, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 800, 10, 20, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 800, 20, 30, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 800, 30, 40, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 800, 40, 50, kDefaultMinProbes, kMinBytes);
+
+ EXPECT_FALSE(measured_data_rate_);
+}
+
+TEST_F(TestProbeBitrateEstimator, SmallCluster) {
+ const int kMinBytes = 1000;
+ AddPacketFeedback(0, 150, 0, 10, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 150, 10, 20, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 150, 20, 30, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 150, 30, 40, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 150, 40, 50, kDefaultMinProbes, kMinBytes);
+ AddPacketFeedback(0, 150, 50, 60, kDefaultMinProbes, kMinBytes);
+ EXPECT_NEAR(measured_data_rate_->bps(), 120000, 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, LargeCluster) {
+ const int kMinProbes = 30;
+ const int kMinBytes = 312500;
+
+ int64_t send_time = 0;
+ int64_t receive_time = 5;
+ for (int i = 0; i < 25; ++i) {
+ AddPacketFeedback(0, 12500, send_time, receive_time, kMinProbes, kMinBytes);
+ ++send_time;
+ ++receive_time;
+ }
+ EXPECT_NEAR(measured_data_rate_->bps(), 100000000, 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, FastReceive) {
+ AddPacketFeedback(0, 1000, 0, 15);
+ AddPacketFeedback(0, 1000, 10, 30);
+ AddPacketFeedback(0, 1000, 20, 35);
+ AddPacketFeedback(0, 1000, 30, 40);
+
+ EXPECT_NEAR(measured_data_rate_->bps(), 800000, 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, TooFastReceive) {
+ AddPacketFeedback(0, 1000, 0, 19);
+ AddPacketFeedback(0, 1000, 10, 22);
+ AddPacketFeedback(0, 1000, 20, 25);
+ AddPacketFeedback(0, 1000, 40, 27);
+
+ EXPECT_FALSE(measured_data_rate_);
+}
+
+TEST_F(TestProbeBitrateEstimator, SlowReceive) {
+ AddPacketFeedback(0, 1000, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 40);
+ AddPacketFeedback(0, 1000, 20, 70);
+ AddPacketFeedback(0, 1000, 30, 85);
+ // Expected send rate = 800 kbps, expected receive rate = 320 kbps.
+
+ EXPECT_NEAR(measured_data_rate_->bps(), kTargetUtilizationFraction * 320000,
+ 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, BurstReceive) {
+ AddPacketFeedback(0, 1000, 0, 50);
+ AddPacketFeedback(0, 1000, 10, 50);
+ AddPacketFeedback(0, 1000, 20, 50);
+ AddPacketFeedback(0, 1000, 40, 50);
+
+ EXPECT_FALSE(measured_data_rate_);
+}
+
+TEST_F(TestProbeBitrateEstimator, MultipleClusters) {
+ AddPacketFeedback(0, 1000, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 20);
+ AddPacketFeedback(0, 1000, 20, 30);
+ AddPacketFeedback(0, 1000, 40, 60);
+ // Expected send rate = 600 kbps, expected receive rate = 480 kbps.
+ EXPECT_NEAR(measured_data_rate_->bps(), kTargetUtilizationFraction * 480000,
+ 10);
+
+ AddPacketFeedback(0, 1000, 50, 60);
+ // Expected send rate = 640 kbps, expected receive rate = 640 kbps.
+ EXPECT_NEAR(measured_data_rate_->bps(), 640000, 10);
+
+ AddPacketFeedback(1, 1000, 60, 70);
+ AddPacketFeedback(1, 1000, 65, 77);
+ AddPacketFeedback(1, 1000, 70, 84);
+ AddPacketFeedback(1, 1000, 75, 90);
+ // Expected send rate = 1600 kbps, expected receive rate = 1200 kbps.
+
+ EXPECT_NEAR(measured_data_rate_->bps(), kTargetUtilizationFraction * 1200000,
+ 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, IgnoreOldClusters) {
+ AddPacketFeedback(0, 1000, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 20);
+ AddPacketFeedback(0, 1000, 20, 30);
+
+ AddPacketFeedback(1, 1000, 60, 70);
+ AddPacketFeedback(1, 1000, 65, 77);
+ AddPacketFeedback(1, 1000, 70, 84);
+ AddPacketFeedback(1, 1000, 75, 90);
+ // Expected send rate = 1600 kbps, expected receive rate = 1200 kbps.
+
+ EXPECT_NEAR(measured_data_rate_->bps(), kTargetUtilizationFraction * 1200000,
+ 10);
+
+ // Coming in 6s later
+ AddPacketFeedback(0, 1000, 40 + 6000, 60 + 6000);
+
+ EXPECT_FALSE(measured_data_rate_);
+}
+
+TEST_F(TestProbeBitrateEstimator, IgnoreSizeLastSendPacket) {
+ AddPacketFeedback(0, 1000, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 20);
+ AddPacketFeedback(0, 1000, 20, 30);
+ AddPacketFeedback(0, 1000, 30, 40);
+ AddPacketFeedback(0, 1500, 40, 50);
+ // Expected send rate = 800 kbps, expected receive rate = 900 kbps.
+
+ EXPECT_NEAR(measured_data_rate_->bps(), 800000, 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, IgnoreSizeFirstReceivePacket) {
+ AddPacketFeedback(0, 1500, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 20);
+ AddPacketFeedback(0, 1000, 20, 30);
+ AddPacketFeedback(0, 1000, 30, 40);
+ // Expected send rate = 933 kbps, expected receive rate = 800 kbps.
+
+ EXPECT_NEAR(measured_data_rate_->bps(), kTargetUtilizationFraction * 800000,
+ 10);
+}
+
+TEST_F(TestProbeBitrateEstimator, NoLastEstimatedBitrateBps) {
+ EXPECT_FALSE(probe_bitrate_estimator_.FetchAndResetLastEstimatedBitrate());
+}
+
+TEST_F(TestProbeBitrateEstimator, FetchLastEstimatedBitrateBps) {
+ AddPacketFeedback(0, 1000, 0, 10);
+ AddPacketFeedback(0, 1000, 10, 20);
+ AddPacketFeedback(0, 1000, 20, 30);
+ AddPacketFeedback(0, 1000, 30, 40);
+
+ auto estimated_bitrate =
+ probe_bitrate_estimator_.FetchAndResetLastEstimatedBitrate();
+ EXPECT_TRUE(estimated_bitrate);
+ EXPECT_NEAR(estimated_bitrate->bps(), 800000, 10);
+ EXPECT_FALSE(probe_bitrate_estimator_.FetchAndResetLastEstimatedBitrate());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc
new file mode 100644
index 0000000000..e70f2b310c
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/probe_controller.h"
+
+#include <algorithm>
+#include <initializer_list>
+#include <memory>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "logging/rtc_event_log/events/rtc_event_probe_cluster_created.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+// Maximum waiting time from the time of initiating probing to getting
+// the measured results back.
+constexpr int64_t kMaxWaitingTimeForProbingResultMs = 1000;
+
+// Value of `min_bitrate_to_probe_further_bps_` that indicates
+// further probing is disabled.
+constexpr int kExponentialProbingDisabled = 0;
+
+// Default probing bitrate limit. Applied only when the application didn't
+// specify max bitrate.
+constexpr int64_t kDefaultMaxProbingBitrateBps = 5000000;
+
+// If the bitrate drops to a factor `kBitrateDropThreshold` or lower
+// and we recover within `kBitrateDropTimeoutMs`, then we'll send
+// a probe at a fraction `kProbeFractionAfterDrop` of the original bitrate.
+constexpr double kBitrateDropThreshold = 0.66;
+constexpr int kBitrateDropTimeoutMs = 5000;
+constexpr double kProbeFractionAfterDrop = 0.85;
+
+// Timeout for probing after leaving ALR. If the bitrate drops significantly,
+// (as determined by the delay based estimator) and we leave ALR, then we will
+// send a probe if we recover within `kLeftAlrTimeoutMs` ms.
+constexpr int kAlrEndedTimeoutMs = 3000;
+
+// The expected uncertainty of probe result (as a fraction of the target probe
+// This is a limit on how often probing can be done when there is a BW
+// drop detected in ALR.
+constexpr int64_t kMinTimeBetweenAlrProbesMs = 5000;
+
+// bitrate). Used to avoid probing if the probe bitrate is close to our current
+// estimate.
+constexpr double kProbeUncertainty = 0.05;
+
+// Use probing to recover faster after large bitrate estimate drops.
+constexpr char kBweRapidRecoveryExperiment[] =
+ "WebRTC-BweRapidRecoveryExperiment";
+
+void MaybeLogProbeClusterCreated(RtcEventLog* event_log,
+ const ProbeClusterConfig& probe) {
+ RTC_DCHECK(event_log);
+ if (!event_log) {
+ return;
+ }
+
+ size_t min_bytes = static_cast<int32_t>(probe.target_data_rate.bps() *
+ probe.target_duration.ms() / 8000);
+ event_log->Log(std::make_unique<RtcEventProbeClusterCreated>(
+ probe.id, probe.target_data_rate.bps(), probe.target_probe_count,
+ min_bytes));
+}
+
+} // namespace
+
+ProbeControllerConfig::ProbeControllerConfig(
+ const FieldTrialsView* key_value_config)
+ : first_exponential_probe_scale("p1", 3.0),
+ second_exponential_probe_scale("p2", 6.0),
+ further_exponential_probe_scale("step_size", 2),
+ further_probe_threshold("further_probe_threshold", 0.7),
+ alr_probing_interval("alr_interval", TimeDelta::Seconds(5)),
+ alr_probe_scale("alr_scale", 2),
+ first_allocation_probe_scale("alloc_p1", 1),
+ second_allocation_probe_scale("alloc_p2", 2),
+ allocation_allow_further_probing("alloc_probe_further", false),
+ allocation_probe_max("alloc_probe_max", DataRate::PlusInfinity()),
+ min_probe_packets_sent("min_probe_packets_sent", 5),
+ min_probe_duration("min_probe_duration", TimeDelta::Millis(15)) {
+ ParseFieldTrial(
+ {&first_exponential_probe_scale, &second_exponential_probe_scale,
+ &further_exponential_probe_scale, &further_probe_threshold,
+ &alr_probing_interval, &alr_probe_scale, &first_allocation_probe_scale,
+ &second_allocation_probe_scale, &allocation_allow_further_probing},
+ key_value_config->Lookup("WebRTC-Bwe-ProbingConfiguration"));
+
+ // Specialized keys overriding subsets of WebRTC-Bwe-ProbingConfiguration
+ ParseFieldTrial(
+ {&first_exponential_probe_scale, &second_exponential_probe_scale},
+ key_value_config->Lookup("WebRTC-Bwe-InitialProbing"));
+ ParseFieldTrial({&further_exponential_probe_scale, &further_probe_threshold},
+ key_value_config->Lookup("WebRTC-Bwe-ExponentialProbing"));
+ ParseFieldTrial({&alr_probing_interval, &alr_probe_scale},
+ key_value_config->Lookup("WebRTC-Bwe-AlrProbing"));
+ ParseFieldTrial(
+ {&first_allocation_probe_scale, &second_allocation_probe_scale,
+ &allocation_allow_further_probing, &allocation_probe_max},
+ key_value_config->Lookup("WebRTC-Bwe-AllocationProbing"));
+ ParseFieldTrial({&min_probe_packets_sent, &min_probe_duration},
+ key_value_config->Lookup("WebRTC-Bwe-ProbingBehavior"));
+}
+
+ProbeControllerConfig::ProbeControllerConfig(const ProbeControllerConfig&) =
+ default;
+ProbeControllerConfig::~ProbeControllerConfig() = default;
+
+ProbeController::ProbeController(const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log)
+ : enable_periodic_alr_probing_(false),
+ in_rapid_recovery_experiment_(absl::StartsWith(
+ key_value_config->Lookup(kBweRapidRecoveryExperiment),
+ "Enabled")),
+ event_log_(event_log),
+ config_(ProbeControllerConfig(key_value_config)) {
+ Reset(0);
+}
+
+ProbeController::~ProbeController() {}
+
+std::vector<ProbeClusterConfig> ProbeController::SetBitrates(
+ int64_t min_bitrate_bps,
+ int64_t start_bitrate_bps,
+ int64_t max_bitrate_bps,
+ int64_t at_time_ms) {
+ if (start_bitrate_bps > 0) {
+ start_bitrate_bps_ = start_bitrate_bps;
+ estimated_bitrate_bps_ = start_bitrate_bps;
+ } else if (start_bitrate_bps_ == 0) {
+ start_bitrate_bps_ = min_bitrate_bps;
+ }
+
+ // The reason we use the variable `old_max_bitrate_pbs` is because we
+ // need to set `max_bitrate_bps_` before we call InitiateProbing.
+ int64_t old_max_bitrate_bps = max_bitrate_bps_;
+ max_bitrate_bps_ = max_bitrate_bps;
+
+ switch (state_) {
+ case State::kInit:
+ if (network_available_)
+ return InitiateExponentialProbing(at_time_ms);
+ break;
+
+ case State::kWaitingForProbingResult:
+ break;
+
+ case State::kProbingComplete:
+ // If the new max bitrate is higher than both the old max bitrate and the
+ // estimate then initiate probing.
+ if (estimated_bitrate_bps_ != 0 &&
+ old_max_bitrate_bps < max_bitrate_bps_ &&
+ estimated_bitrate_bps_ < max_bitrate_bps_) {
+ // The assumption is that if we jump more than 20% in the bandwidth
+ // estimate or if the bandwidth estimate is within 90% of the new
+ // max bitrate then the probing attempt was successful.
+ mid_call_probing_succcess_threshold_ =
+ std::min(estimated_bitrate_bps_ * 1.2, max_bitrate_bps_ * 0.9);
+ mid_call_probing_waiting_for_result_ = true;
+ mid_call_probing_bitrate_bps_ = max_bitrate_bps_;
+
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.BWE.MidCallProbing.Initiated",
+ max_bitrate_bps_ / 1000);
+
+ return InitiateProbing(at_time_ms, {max_bitrate_bps_}, false);
+ }
+ break;
+ }
+ return std::vector<ProbeClusterConfig>();
+}
+
+std::vector<ProbeClusterConfig> ProbeController::OnMaxTotalAllocatedBitrate(
+ int64_t max_total_allocated_bitrate,
+ int64_t at_time_ms) {
+ const bool in_alr = alr_start_time_ms_.has_value();
+ const bool allow_allocation_probe = in_alr;
+
+ if (state_ == State::kProbingComplete &&
+ max_total_allocated_bitrate != max_total_allocated_bitrate_ &&
+ estimated_bitrate_bps_ != 0 &&
+ (max_bitrate_bps_ <= 0 || estimated_bitrate_bps_ < max_bitrate_bps_) &&
+ estimated_bitrate_bps_ < max_total_allocated_bitrate &&
+ allow_allocation_probe) {
+ max_total_allocated_bitrate_ = max_total_allocated_bitrate;
+
+ if (!config_.first_allocation_probe_scale)
+ return std::vector<ProbeClusterConfig>();
+
+ DataRate first_probe_rate =
+ DataRate::BitsPerSec(max_total_allocated_bitrate) *
+ config_.first_allocation_probe_scale.Value();
+ DataRate probe_cap = config_.allocation_probe_max.Get();
+ first_probe_rate = std::min(first_probe_rate, probe_cap);
+ std::vector<int64_t> probes = {first_probe_rate.bps()};
+ if (config_.second_allocation_probe_scale) {
+ DataRate second_probe_rate =
+ DataRate::BitsPerSec(max_total_allocated_bitrate) *
+ config_.second_allocation_probe_scale.Value();
+ second_probe_rate = std::min(second_probe_rate, probe_cap);
+ if (second_probe_rate > first_probe_rate)
+ probes.push_back(second_probe_rate.bps());
+ }
+ return InitiateProbing(at_time_ms, probes,
+ config_.allocation_allow_further_probing.Get());
+ }
+ max_total_allocated_bitrate_ = max_total_allocated_bitrate;
+ return std::vector<ProbeClusterConfig>();
+}
+
+std::vector<ProbeClusterConfig> ProbeController::OnNetworkAvailability(
+ NetworkAvailability msg) {
+ network_available_ = msg.network_available;
+
+ if (!network_available_ && state_ == State::kWaitingForProbingResult) {
+ state_ = State::kProbingComplete;
+ min_bitrate_to_probe_further_bps_ = kExponentialProbingDisabled;
+ }
+
+ if (network_available_ && state_ == State::kInit && start_bitrate_bps_ > 0)
+ return InitiateExponentialProbing(msg.at_time.ms());
+ return std::vector<ProbeClusterConfig>();
+}
+
+std::vector<ProbeClusterConfig> ProbeController::InitiateExponentialProbing(
+ int64_t at_time_ms) {
+ RTC_DCHECK(network_available_);
+ RTC_DCHECK(state_ == State::kInit);
+ RTC_DCHECK_GT(start_bitrate_bps_, 0);
+
+ // When probing at 1.8 Mbps ( 6x 300), this represents a threshold of
+ // 1.2 Mbps to continue probing.
+ std::vector<int64_t> probes = {static_cast<int64_t>(
+ config_.first_exponential_probe_scale * start_bitrate_bps_)};
+ if (config_.second_exponential_probe_scale) {
+ probes.push_back(config_.second_exponential_probe_scale.Value() *
+ start_bitrate_bps_);
+ }
+ return InitiateProbing(at_time_ms, probes, true);
+}
+
+std::vector<ProbeClusterConfig> ProbeController::SetEstimatedBitrate(
+ int64_t bitrate_bps,
+ int64_t at_time_ms) {
+ if (mid_call_probing_waiting_for_result_ &&
+ bitrate_bps >= mid_call_probing_succcess_threshold_) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.BWE.MidCallProbing.Success",
+ mid_call_probing_bitrate_bps_ / 1000);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.BWE.MidCallProbing.ProbedKbps",
+ bitrate_bps / 1000);
+ mid_call_probing_waiting_for_result_ = false;
+ }
+ std::vector<ProbeClusterConfig> pending_probes;
+ if (state_ == State::kWaitingForProbingResult) {
+ // Continue probing if probing results indicate channel has greater
+ // capacity.
+ RTC_LOG(LS_INFO) << "Measured bitrate: " << bitrate_bps
+ << " Minimum to probe further: "
+ << min_bitrate_to_probe_further_bps_;
+
+ if (min_bitrate_to_probe_further_bps_ != kExponentialProbingDisabled &&
+ bitrate_bps > min_bitrate_to_probe_further_bps_) {
+ pending_probes = InitiateProbing(
+ at_time_ms,
+ {static_cast<int64_t>(config_.further_exponential_probe_scale *
+ bitrate_bps)},
+ true);
+ }
+ }
+
+ if (bitrate_bps < kBitrateDropThreshold * estimated_bitrate_bps_) {
+ time_of_last_large_drop_ms_ = at_time_ms;
+ bitrate_before_last_large_drop_bps_ = estimated_bitrate_bps_;
+ }
+
+ estimated_bitrate_bps_ = bitrate_bps;
+ return pending_probes;
+}
+
+void ProbeController::EnablePeriodicAlrProbing(bool enable) {
+ enable_periodic_alr_probing_ = enable;
+}
+
+void ProbeController::SetAlrStartTimeMs(
+ absl::optional<int64_t> alr_start_time_ms) {
+ alr_start_time_ms_ = alr_start_time_ms;
+}
+void ProbeController::SetAlrEndedTimeMs(int64_t alr_end_time_ms) {
+ alr_end_time_ms_.emplace(alr_end_time_ms);
+}
+
+std::vector<ProbeClusterConfig> ProbeController::RequestProbe(
+ int64_t at_time_ms) {
+ // Called once we have returned to normal state after a large drop in
+ // estimated bandwidth. The current response is to initiate a single probe
+ // session (if not already probing) at the previous bitrate.
+ //
+ // If the probe session fails, the assumption is that this drop was a
+ // real one from a competing flow or a network change.
+ bool in_alr = alr_start_time_ms_.has_value();
+ bool alr_ended_recently =
+ (alr_end_time_ms_.has_value() &&
+ at_time_ms - alr_end_time_ms_.value() < kAlrEndedTimeoutMs);
+ if (in_alr || alr_ended_recently || in_rapid_recovery_experiment_) {
+ if (state_ == State::kProbingComplete) {
+ uint32_t suggested_probe_bps =
+ kProbeFractionAfterDrop * bitrate_before_last_large_drop_bps_;
+ uint32_t min_expected_probe_result_bps =
+ (1 - kProbeUncertainty) * suggested_probe_bps;
+ int64_t time_since_drop_ms = at_time_ms - time_of_last_large_drop_ms_;
+ int64_t time_since_probe_ms = at_time_ms - last_bwe_drop_probing_time_ms_;
+ if (min_expected_probe_result_bps > estimated_bitrate_bps_ &&
+ time_since_drop_ms < kBitrateDropTimeoutMs &&
+ time_since_probe_ms > kMinTimeBetweenAlrProbesMs) {
+ RTC_LOG(LS_INFO) << "Detected big bandwidth drop, start probing.";
+ // Track how often we probe in response to bandwidth drop in ALR.
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.BWE.BweDropProbingIntervalInS",
+ (at_time_ms - last_bwe_drop_probing_time_ms_) / 1000);
+ last_bwe_drop_probing_time_ms_ = at_time_ms;
+ return InitiateProbing(at_time_ms, {suggested_probe_bps}, false);
+ }
+ }
+ }
+ return std::vector<ProbeClusterConfig>();
+}
+
+void ProbeController::SetMaxBitrate(int64_t max_bitrate_bps) {
+ max_bitrate_bps_ = max_bitrate_bps;
+}
+
+void ProbeController::Reset(int64_t at_time_ms) {
+ network_available_ = true;
+ state_ = State::kInit;
+ min_bitrate_to_probe_further_bps_ = kExponentialProbingDisabled;
+ time_last_probing_initiated_ms_ = 0;
+ estimated_bitrate_bps_ = 0;
+ start_bitrate_bps_ = 0;
+ max_bitrate_bps_ = 0;
+ int64_t now_ms = at_time_ms;
+ last_bwe_drop_probing_time_ms_ = now_ms;
+ alr_end_time_ms_.reset();
+ mid_call_probing_waiting_for_result_ = false;
+ time_of_last_large_drop_ms_ = now_ms;
+ bitrate_before_last_large_drop_bps_ = 0;
+ max_total_allocated_bitrate_ = 0;
+}
+
+std::vector<ProbeClusterConfig> ProbeController::Process(int64_t at_time_ms) {
+ if (at_time_ms - time_last_probing_initiated_ms_ >
+ kMaxWaitingTimeForProbingResultMs) {
+ mid_call_probing_waiting_for_result_ = false;
+
+ if (state_ == State::kWaitingForProbingResult) {
+ RTC_LOG(LS_INFO) << "kWaitingForProbingResult: timeout";
+ state_ = State::kProbingComplete;
+ min_bitrate_to_probe_further_bps_ = kExponentialProbingDisabled;
+ }
+ }
+
+ if (enable_periodic_alr_probing_ && state_ == State::kProbingComplete) {
+ // Probe bandwidth periodically when in ALR state.
+ if (alr_start_time_ms_ && estimated_bitrate_bps_ > 0) {
+ int64_t next_probe_time_ms =
+ std::max(*alr_start_time_ms_, time_last_probing_initiated_ms_) +
+ config_.alr_probing_interval->ms();
+ if (at_time_ms >= next_probe_time_ms) {
+ return InitiateProbing(at_time_ms,
+ {static_cast<int64_t>(estimated_bitrate_bps_ *
+ config_.alr_probe_scale)},
+ true);
+ }
+ }
+ }
+ return std::vector<ProbeClusterConfig>();
+}
+
+std::vector<ProbeClusterConfig> ProbeController::InitiateProbing(
+ int64_t now_ms,
+ std::vector<int64_t> bitrates_to_probe,
+ bool probe_further) {
+ int64_t max_probe_bitrate_bps =
+ max_bitrate_bps_ > 0 ? max_bitrate_bps_ : kDefaultMaxProbingBitrateBps;
+ if (max_total_allocated_bitrate_ > 0) {
+ // If a max allocated bitrate has been configured, allow probing up to 2x
+ // that rate. This allows some overhead to account for bursty streams,
+ // which otherwise would have to ramp up when the overshoot is already in
+ // progress.
+ // It also avoids minor quality reduction caused by probes often being
+ // received at slightly less than the target probe bitrate.
+ max_probe_bitrate_bps =
+ std::min(max_probe_bitrate_bps, max_total_allocated_bitrate_ * 2);
+ }
+
+ std::vector<ProbeClusterConfig> pending_probes;
+ for (int64_t bitrate : bitrates_to_probe) {
+ RTC_DCHECK_GT(bitrate, 0);
+
+ if (bitrate > max_probe_bitrate_bps) {
+ bitrate = max_probe_bitrate_bps;
+ probe_further = false;
+ }
+
+ ProbeClusterConfig config;
+ config.at_time = Timestamp::Millis(now_ms);
+ config.target_data_rate =
+ DataRate::BitsPerSec(rtc::dchecked_cast<int>(bitrate));
+ config.target_duration = config_.min_probe_duration;
+ config.target_probe_count = config_.min_probe_packets_sent;
+ config.id = next_probe_cluster_id_;
+ next_probe_cluster_id_++;
+ MaybeLogProbeClusterCreated(event_log_, config);
+ pending_probes.push_back(config);
+ }
+ time_last_probing_initiated_ms_ = now_ms;
+ if (probe_further) {
+ state_ = State::kWaitingForProbingResult;
+ min_bitrate_to_probe_further_bps_ =
+ (*(bitrates_to_probe.end() - 1)) * config_.further_probe_threshold;
+ } else {
+ state_ = State::kProbingComplete;
+ min_bitrate_to_probe_further_bps_ = kExponentialProbingDisabled;
+ }
+ return pending_probes;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h
new file mode 100644
index 0000000000..d71c045d2b
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_PROBE_CONTROLLER_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_PROBE_CONTROLLER_H_
+
+#include <stdint.h>
+
+#include <initializer_list>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/transport/network_control.h"
+#include "api/units/data_rate.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+
+struct ProbeControllerConfig {
+ explicit ProbeControllerConfig(const FieldTrialsView* key_value_config);
+ ProbeControllerConfig(const ProbeControllerConfig&);
+ ProbeControllerConfig& operator=(const ProbeControllerConfig&) = default;
+ ~ProbeControllerConfig();
+
+ // These parameters configure the initial probes. First we send one or two
+ // probes of sizes p1 * start_bitrate_bps_ and p2 * start_bitrate_bps_.
+ // Then whenever we get a bitrate estimate of at least further_probe_threshold
+ // times the size of the last sent probe we'll send another one of size
+ // step_size times the new estimate.
+ FieldTrialParameter<double> first_exponential_probe_scale;
+ FieldTrialOptional<double> second_exponential_probe_scale;
+ FieldTrialParameter<double> further_exponential_probe_scale;
+ FieldTrialParameter<double> further_probe_threshold;
+
+ // Configures how often we send ALR probes and how big they are.
+ FieldTrialParameter<TimeDelta> alr_probing_interval;
+ FieldTrialParameter<double> alr_probe_scale;
+
+ // Configures the probes emitted by changed to the allocated bitrate.
+ FieldTrialOptional<double> first_allocation_probe_scale;
+ FieldTrialOptional<double> second_allocation_probe_scale;
+ FieldTrialFlag allocation_allow_further_probing;
+ FieldTrialParameter<DataRate> allocation_probe_max;
+
+ // The minimum number probing packets used.
+ FieldTrialParameter<int> min_probe_packets_sent;
+ // The minimum probing duration.
+ FieldTrialParameter<TimeDelta> min_probe_duration;
+};
+
+// This class controls initiation of probing to estimate initial channel
+// capacity. There is also support for probing during a session when max
+// bitrate is adjusted by an application.
+class ProbeController {
+ public:
+ explicit ProbeController(const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log);
+ ~ProbeController();
+
+ ProbeController(const ProbeController&) = delete;
+ ProbeController& operator=(const ProbeController&) = delete;
+
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig> SetBitrates(
+ int64_t min_bitrate_bps,
+ int64_t start_bitrate_bps,
+ int64_t max_bitrate_bps,
+ int64_t at_time_ms);
+
+ // The total bitrate, as opposed to the max bitrate, is the sum of the
+ // configured bitrates for all active streams.
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig>
+ OnMaxTotalAllocatedBitrate(int64_t max_total_allocated_bitrate,
+ int64_t at_time_ms);
+
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig> OnNetworkAvailability(
+ NetworkAvailability msg);
+
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig> SetEstimatedBitrate(
+ int64_t bitrate_bps,
+ int64_t at_time_ms);
+
+ void EnablePeriodicAlrProbing(bool enable);
+
+ void SetAlrStartTimeMs(absl::optional<int64_t> alr_start_time);
+ void SetAlrEndedTimeMs(int64_t alr_end_time);
+
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig> RequestProbe(
+ int64_t at_time_ms);
+
+ // Sets a new maximum probing bitrate, without generating a new probe cluster.
+ void SetMaxBitrate(int64_t max_bitrate_bps);
+
+ // Resets the ProbeController to a state equivalent to as if it was just
+ // created EXCEPT for `enable_periodic_alr_probing_`.
+ void Reset(int64_t at_time_ms);
+
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig> Process(
+ int64_t at_time_ms);
+
+ private:
+ enum class State {
+ // Initial state where no probing has been triggered yet.
+ kInit,
+ // Waiting for probing results to continue further probing.
+ kWaitingForProbingResult,
+ // Probing is complete.
+ kProbingComplete,
+ };
+
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig>
+ InitiateExponentialProbing(int64_t at_time_ms);
+ ABSL_MUST_USE_RESULT std::vector<ProbeClusterConfig> InitiateProbing(
+ int64_t now_ms,
+ std::vector<int64_t> bitrates_to_probe,
+ bool probe_further);
+
+ bool network_available_;
+ State state_;
+ int64_t min_bitrate_to_probe_further_bps_;
+ int64_t time_last_probing_initiated_ms_;
+ int64_t estimated_bitrate_bps_;
+ int64_t start_bitrate_bps_;
+ int64_t max_bitrate_bps_;
+ int64_t last_bwe_drop_probing_time_ms_;
+ absl::optional<int64_t> alr_start_time_ms_;
+ absl::optional<int64_t> alr_end_time_ms_;
+ bool enable_periodic_alr_probing_;
+ int64_t time_of_last_large_drop_ms_;
+ int64_t bitrate_before_last_large_drop_bps_;
+ int64_t max_total_allocated_bitrate_;
+
+ const bool in_rapid_recovery_experiment_;
+ // For WebRTC.BWE.MidCallProbing.* metric.
+ bool mid_call_probing_waiting_for_result_;
+ int64_t mid_call_probing_bitrate_bps_;
+ int64_t mid_call_probing_succcess_threshold_;
+ RtcEventLog* event_log_;
+
+ int32_t next_probe_cluster_id_ = 1;
+
+ ProbeControllerConfig config_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_PROBE_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_gn/moz.build
new file mode 100644
index 0000000000..e337176f21
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("probe_controller_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc
new file mode 100644
index 0000000000..23329ee041
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/goog_cc/probe_controller.h"
+
+#include <memory>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/timestamp.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Field;
+using ::testing::Matcher;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+constexpr int kMinBitrateBps = 100;
+constexpr int kStartBitrateBps = 300;
+constexpr int kMaxBitrateBps = 10000;
+
+constexpr int kExponentialProbingTimeoutMs = 5000;
+
+constexpr int kAlrProbeInterval = 5000;
+constexpr int kAlrEndedTimeoutMs = 3000;
+constexpr int kBitrateDropTimeoutMs = 5000;
+} // namespace
+
+class ProbeControllerFixture {
+ public:
+ explicit ProbeControllerFixture(absl::string_view field_trials = "")
+ : field_trial_config_(field_trials), clock_(100000000L) {}
+
+ std::unique_ptr<ProbeController> CreateController() {
+ return std::make_unique<ProbeController>(&field_trial_config_,
+ &mock_rtc_event_log);
+ }
+
+ Timestamp CurrentTime() { return clock_.CurrentTime(); }
+ int64_t NowMs() { return clock_.TimeInMilliseconds(); }
+ void AdvanceTimeMilliseconds(int64_t delta_ms) {
+ clock_.AdvanceTimeMilliseconds(delta_ms);
+ }
+
+ ExplicitKeyValueConfig field_trial_config_;
+ SimulatedClock clock_;
+ NiceMock<MockRtcEventLog> mock_rtc_event_log;
+};
+
+TEST(ProbeControllerTest, InitiatesProbingAtStart) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_GE(probes.size(), 2u);
+}
+
+TEST(ProbeControllerTest, SetsDefaultTargetDurationAndTargetProbeCount) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ std::vector<ProbeClusterConfig> probes = probe_controller->SetBitrates(
+ kMinBitrateBps, kStartBitrateBps, kMaxBitrateBps, fixture.NowMs());
+ ASSERT_GE(probes.size(), 2u);
+
+ EXPECT_EQ(probes[0].target_duration, TimeDelta::Millis(15));
+ EXPECT_EQ(probes[0].target_probe_count, 5);
+}
+
+TEST(ProbeControllerTest,
+ FieldTrialsOverrideDefaultTargetDurationAndTargetProbeCount) {
+ ProbeControllerFixture fixture(
+ "WebRTC-Bwe-ProbingBehavior/"
+ "min_probe_packets_sent:2,min_probe_duration:123ms/");
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ std::vector<ProbeClusterConfig> probes = probe_controller->SetBitrates(
+ kMinBitrateBps, kStartBitrateBps, kMaxBitrateBps, fixture.NowMs());
+ ASSERT_GE(probes.size(), 2u);
+
+ EXPECT_EQ(probes[0].target_duration, TimeDelta::Millis(123));
+ EXPECT_EQ(probes[0].target_probe_count, 2);
+}
+
+TEST(ProbeControllerTest, ProbeOnlyWhenNetworkIsUp) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->OnNetworkAvailability(
+ {.at_time = fixture.CurrentTime(), .network_available = false});
+ probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+ probes = probe_controller->OnNetworkAvailability(
+ {.at_time = fixture.CurrentTime(), .network_available = true});
+ EXPECT_GE(probes.size(), 2u);
+}
+
+TEST(ProbeControllerTest, InitiatesProbingOnMaxBitrateIncrease) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ // Long enough to time out exponential probing.
+ fixture.AdvanceTimeMilliseconds(kExponentialProbingTimeoutMs);
+ probes =
+ probe_controller->SetEstimatedBitrate(kStartBitrateBps, fixture.NowMs());
+ probes = probe_controller->Process(fixture.NowMs());
+ probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps + 100, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), kMaxBitrateBps + 100);
+}
+
+TEST(ProbeControllerTest, ProbesOnMaxBitrateIncreaseOnlyWhenInAlr) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ probes = probe_controller->SetEstimatedBitrate(kMaxBitrateBps - 1,
+ fixture.NowMs());
+
+ // Wait long enough to time out exponential probing.
+ fixture.AdvanceTimeMilliseconds(kExponentialProbingTimeoutMs);
+ probes = probe_controller->Process(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+
+ // Probe when in alr.
+ probe_controller->SetAlrStartTimeMs(fixture.NowMs());
+ probes = probe_controller->OnMaxTotalAllocatedBitrate(kMaxBitrateBps + 1,
+ fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+
+ // Do not probe when not in alr.
+ probe_controller->SetAlrStartTimeMs(absl::nullopt);
+ probes = probe_controller->OnMaxTotalAllocatedBitrate(kMaxBitrateBps + 2,
+ fixture.NowMs());
+ EXPECT_TRUE(probes.empty());
+}
+
+TEST(ProbeControllerTest, InitiatesProbingOnMaxBitrateIncreaseAtMaxBitrate) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ // Long enough to time out exponential probing.
+ fixture.AdvanceTimeMilliseconds(kExponentialProbingTimeoutMs);
+ probes =
+ probe_controller->SetEstimatedBitrate(kStartBitrateBps, fixture.NowMs());
+ probes = probe_controller->Process(fixture.NowMs());
+ probes =
+ probe_controller->SetEstimatedBitrate(kMaxBitrateBps, fixture.NowMs());
+ probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps + 100, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), kMaxBitrateBps + 100);
+}
+
+TEST(ProbeControllerTest, TestExponentialProbing) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+
+ // Repeated probe should only be sent when estimated bitrate climbs above
+ // 0.7 * 6 * kStartBitrateBps = 1260.
+ probes = probe_controller->SetEstimatedBitrate(1000, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+
+ probes = probe_controller->SetEstimatedBitrate(1800, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 2 * 1800);
+}
+
+TEST(ProbeControllerTest, TestExponentialProbingTimeout) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ // Advance far enough to cause a time out in waiting for probing result.
+ fixture.AdvanceTimeMilliseconds(kExponentialProbingTimeoutMs);
+ probes = probe_controller->Process(fixture.NowMs());
+
+ probes = probe_controller->SetEstimatedBitrate(1800, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+}
+
+TEST(ProbeControllerTest, RequestProbeInAlr) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_GE(probes.size(), 2u);
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+
+ probe_controller->SetAlrStartTimeMs(fixture.NowMs());
+ fixture.AdvanceTimeMilliseconds(kAlrProbeInterval + 1);
+ probes = probe_controller->Process(fixture.NowMs());
+ probes = probe_controller->SetEstimatedBitrate(250, fixture.NowMs());
+ probes = probe_controller->RequestProbe(fixture.NowMs());
+
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 0.85 * 500);
+}
+
+TEST(ProbeControllerTest, RequestProbeWhenAlrEndedRecently) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+
+ probe_controller->SetAlrStartTimeMs(absl::nullopt);
+ fixture.AdvanceTimeMilliseconds(kAlrProbeInterval + 1);
+ probes = probe_controller->Process(fixture.NowMs());
+ probes = probe_controller->SetEstimatedBitrate(250, fixture.NowMs());
+ probe_controller->SetAlrEndedTimeMs(fixture.NowMs());
+ fixture.AdvanceTimeMilliseconds(kAlrEndedTimeoutMs - 1);
+ probes = probe_controller->RequestProbe(fixture.NowMs());
+
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 0.85 * 500);
+}
+
+TEST(ProbeControllerTest, RequestProbeWhenAlrNotEndedRecently) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+
+ probe_controller->SetAlrStartTimeMs(absl::nullopt);
+ fixture.AdvanceTimeMilliseconds(kAlrProbeInterval + 1);
+ probes = probe_controller->Process(fixture.NowMs());
+ probes = probe_controller->SetEstimatedBitrate(250, fixture.NowMs());
+ probe_controller->SetAlrEndedTimeMs(fixture.NowMs());
+ fixture.AdvanceTimeMilliseconds(kAlrEndedTimeoutMs + 1);
+ probes = probe_controller->RequestProbe(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+}
+
+TEST(ProbeControllerTest, RequestProbeWhenBweDropNotRecent) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+
+ probe_controller->SetAlrStartTimeMs(fixture.NowMs());
+ fixture.AdvanceTimeMilliseconds(kAlrProbeInterval + 1);
+ probes = probe_controller->Process(fixture.NowMs());
+ probes = probe_controller->SetEstimatedBitrate(250, fixture.NowMs());
+ fixture.AdvanceTimeMilliseconds(kBitrateDropTimeoutMs + 1);
+ probes = probe_controller->RequestProbe(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+}
+
+TEST(ProbeControllerTest, PeriodicProbing) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ probe_controller->EnablePeriodicAlrProbing(true);
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+
+ int64_t start_time = fixture.NowMs();
+
+ // Expect the controller to send a new probe after 5s has passed.
+ probe_controller->SetAlrStartTimeMs(start_time);
+ fixture.AdvanceTimeMilliseconds(5000);
+ probes = probe_controller->Process(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 1000);
+
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+
+ // The following probe should be sent at 10s into ALR.
+ probe_controller->SetAlrStartTimeMs(start_time);
+ fixture.AdvanceTimeMilliseconds(4000);
+ probes = probe_controller->Process(fixture.NowMs());
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+
+ probe_controller->SetAlrStartTimeMs(start_time);
+ fixture.AdvanceTimeMilliseconds(1000);
+ probes = probe_controller->Process(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ probes = probe_controller->SetEstimatedBitrate(500, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+}
+
+TEST(ProbeControllerTest, PeriodicProbingAfterReset) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ int64_t alr_start_time = fixture.NowMs();
+
+ probe_controller->SetAlrStartTimeMs(alr_start_time);
+ probe_controller->EnablePeriodicAlrProbing(true);
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ probe_controller->Reset(fixture.NowMs());
+
+ fixture.AdvanceTimeMilliseconds(10000);
+ probes = probe_controller->Process(fixture.NowMs());
+ // Since bitrates are not yet set, no probe is sent event though we are in ALR
+ // mode.
+ EXPECT_EQ(probes.size(), 0u);
+
+ probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ kMaxBitrateBps, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+
+ // Make sure we use `kStartBitrateBps` as the estimated bitrate
+ // until SetEstimatedBitrate is called with an updated estimate.
+ fixture.AdvanceTimeMilliseconds(10000);
+ probes = probe_controller->Process(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), kStartBitrateBps * 2);
+}
+
+TEST(ProbeControllerTest, TestExponentialProbingOverflow) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ const int64_t kMbpsMultiplier = 1000000;
+ auto probes =
+ probe_controller->SetBitrates(kMinBitrateBps, 10 * kMbpsMultiplier,
+ 100 * kMbpsMultiplier, fixture.NowMs());
+ // Verify that probe bitrate is capped at the specified max bitrate.
+ probes = probe_controller->SetEstimatedBitrate(60 * kMbpsMultiplier,
+ fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 100 * kMbpsMultiplier);
+ // Verify that repeated probes aren't sent.
+ probes = probe_controller->SetEstimatedBitrate(100 * kMbpsMultiplier,
+ fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+}
+
+TEST(ProbeControllerTest, TestAllocatedBitrateCap) {
+ ProbeControllerFixture fixture;
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ const int64_t kMbpsMultiplier = 1000000;
+ const int64_t kMaxBitrateBps = 100 * kMbpsMultiplier;
+ auto probes = probe_controller->SetBitrates(
+ kMinBitrateBps, 10 * kMbpsMultiplier, kMaxBitrateBps, fixture.NowMs());
+
+ // Configure ALR for periodic probing.
+ probe_controller->EnablePeriodicAlrProbing(true);
+ int64_t alr_start_time = fixture.NowMs();
+ probe_controller->SetAlrStartTimeMs(alr_start_time);
+
+ int64_t estimated_bitrate_bps = kMaxBitrateBps / 10;
+ probes = probe_controller->SetEstimatedBitrate(estimated_bitrate_bps,
+ fixture.NowMs());
+
+ // Set a max allocated bitrate below the current estimate.
+ int64_t max_allocated_bps = estimated_bitrate_bps - 1 * kMbpsMultiplier;
+ probes = probe_controller->OnMaxTotalAllocatedBitrate(max_allocated_bps,
+ fixture.NowMs());
+ EXPECT_TRUE(probes.empty()); // No probe since lower than current max.
+
+ // Probes such as ALR capped at 2x the max allocation limit.
+ fixture.AdvanceTimeMilliseconds(5000);
+ probes = probe_controller->Process(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 2 * max_allocated_bps);
+
+ // Remove allocation limit.
+ EXPECT_TRUE(
+ probe_controller->OnMaxTotalAllocatedBitrate(0, fixture.NowMs()).empty());
+ fixture.AdvanceTimeMilliseconds(5000);
+ probes = probe_controller->Process(fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), estimated_bitrate_bps * 2);
+}
+
+TEST(ProbeControllerTest, ConfigurableProbingFieldTrial) {
+ ProbeControllerFixture fixture(
+ "WebRTC-Bwe-ProbingConfiguration/"
+ "p1:2,p2:5,step_size:3,further_probe_threshold:0.8,"
+ "alloc_p1:2,alloc_p2/");
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+
+ auto probes = probe_controller->SetBitrates(kMinBitrateBps, kStartBitrateBps,
+ 5000000, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 2u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 600);
+ EXPECT_EQ(probes[1].target_data_rate.bps(), 1500);
+
+ // Repeated probe should only be sent when estimated bitrate climbs above
+ // 0.8 * 5 * kStartBitrateBps = 1200.
+ probes = probe_controller->SetEstimatedBitrate(1100, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 0u);
+
+ probes = probe_controller->SetEstimatedBitrate(1250, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 3 * 1250);
+
+ fixture.AdvanceTimeMilliseconds(5000);
+ probes = probe_controller->Process(fixture.NowMs());
+
+ probe_controller->SetAlrStartTimeMs(fixture.NowMs());
+ probes =
+ probe_controller->OnMaxTotalAllocatedBitrate(200000, fixture.NowMs());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes[0].target_data_rate.bps(), 400000);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/pushback_controller_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/pushback_controller_gn/moz.build
new file mode 100644
index 0000000000..b7b6673170
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/pushback_controller_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/congestion_window_pushback_controller.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("pushback_controller_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.cc
new file mode 100644
index 0000000000..792a93d41e
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.cc
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/robust_throughput_estimator.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+RobustThroughputEstimator::RobustThroughputEstimator(
+ const RobustThroughputEstimatorSettings& settings)
+ : settings_(settings),
+ latest_discarded_send_time_(Timestamp::MinusInfinity()) {
+ RTC_DCHECK(settings.enabled);
+}
+
+RobustThroughputEstimator::~RobustThroughputEstimator() {}
+
+bool RobustThroughputEstimator::FirstPacketOutsideWindow() {
+ if (window_.empty())
+ return false;
+ if (window_.size() > settings_.max_window_packets)
+ return true;
+ TimeDelta current_window_duration =
+ window_.back().receive_time - window_.front().receive_time;
+ if (current_window_duration > settings_.max_window_duration)
+ return true;
+ if (window_.size() > settings_.window_packets &&
+ current_window_duration > settings_.min_window_duration) {
+ return true;
+ }
+ return false;
+}
+
+void RobustThroughputEstimator::IncomingPacketFeedbackVector(
+ const std::vector<PacketResult>& packet_feedback_vector) {
+ RTC_DCHECK(std::is_sorted(packet_feedback_vector.begin(),
+ packet_feedback_vector.end(),
+ PacketResult::ReceiveTimeOrder()));
+ for (const auto& packet : packet_feedback_vector) {
+ // Ignore packets without valid send or receive times.
+ // (This should not happen in production since lost packets are filtered
+ // out before passing the feedback vector to the throughput estimator.
+ // However, explicitly handling this case makes the estimator more robust
+ // and avoids a hard-to-detect bad state.)
+ if (packet.receive_time.IsInfinite() ||
+ packet.sent_packet.send_time.IsInfinite()) {
+ continue;
+ }
+
+ // Insert the new packet.
+ window_.push_back(packet);
+ window_.back().sent_packet.prior_unacked_data =
+ window_.back().sent_packet.prior_unacked_data *
+ settings_.unacked_weight;
+ // In most cases, receive timestamps should already be in order, but in the
+ // rare case where feedback packets have been reordered, we do some swaps to
+ // ensure that the window is sorted.
+ for (size_t i = window_.size() - 1;
+ i > 0 && window_[i].receive_time < window_[i - 1].receive_time; i--) {
+ std::swap(window_[i], window_[i - 1]);
+ }
+ }
+
+ // Remove old packets.
+ while (FirstPacketOutsideWindow()) {
+ latest_discarded_send_time_ = std::max(
+ latest_discarded_send_time_, window_.front().sent_packet.send_time);
+ window_.pop_front();
+ }
+}
+
+absl::optional<DataRate> RobustThroughputEstimator::bitrate() const {
+ if (window_.empty() || window_.size() < settings_.required_packets)
+ return absl::nullopt;
+
+ TimeDelta largest_recv_gap(TimeDelta::Zero());
+ TimeDelta second_largest_recv_gap(TimeDelta::Zero());
+ for (size_t i = 1; i < window_.size(); i++) {
+ // Find receive time gaps.
+ TimeDelta gap = window_[i].receive_time - window_[i - 1].receive_time;
+ if (gap > largest_recv_gap) {
+ second_largest_recv_gap = largest_recv_gap;
+ largest_recv_gap = gap;
+ } else if (gap > second_largest_recv_gap) {
+ second_largest_recv_gap = gap;
+ }
+ }
+
+ Timestamp first_send_time = Timestamp::PlusInfinity();
+ Timestamp last_send_time = Timestamp::MinusInfinity();
+ Timestamp first_recv_time = Timestamp::PlusInfinity();
+ Timestamp last_recv_time = Timestamp::MinusInfinity();
+ DataSize recv_size = DataSize::Bytes(0);
+ DataSize send_size = DataSize::Bytes(0);
+ DataSize first_recv_size = DataSize::Bytes(0);
+ DataSize last_send_size = DataSize::Bytes(0);
+ size_t num_sent_packets_in_window = 0;
+ for (const auto& packet : window_) {
+ if (packet.receive_time < first_recv_time) {
+ first_recv_time = packet.receive_time;
+ first_recv_size =
+ packet.sent_packet.size + packet.sent_packet.prior_unacked_data;
+ }
+ last_recv_time = std::max(last_recv_time, packet.receive_time);
+ recv_size += packet.sent_packet.size;
+ recv_size += packet.sent_packet.prior_unacked_data;
+
+ if (packet.sent_packet.send_time < latest_discarded_send_time_) {
+ // If we have dropped packets from the window that were sent after
+ // this packet, then this packet was reordered. Ignore it from
+ // the send rate computation (since the send time may be very far
+ // in the past, leading to underestimation of the send rate.)
+ // However, ignoring packets creates a risk that we end up without
+ // any packets left to compute a send rate.
+ continue;
+ }
+ if (packet.sent_packet.send_time > last_send_time) {
+ last_send_time = packet.sent_packet.send_time;
+ last_send_size =
+ packet.sent_packet.size + packet.sent_packet.prior_unacked_data;
+ }
+ first_send_time = std::min(first_send_time, packet.sent_packet.send_time);
+
+ send_size += packet.sent_packet.size;
+ send_size += packet.sent_packet.prior_unacked_data;
+ ++num_sent_packets_in_window;
+ }
+
+ // Suppose a packet of size S is sent every T milliseconds.
+ // A window of N packets would contain N*S bytes, but the time difference
+ // between the first and the last packet would only be (N-1)*T. Thus, we
+ // need to remove the size of one packet to get the correct rate of S/T.
+ // Which packet to remove (if the packets have varying sizes),
+ // depends on the network model.
+ // Suppose that 2 packets with sizes s1 and s2, are received at times t1
+ // and t2, respectively. If the packets were transmitted back to back over
+ // a bottleneck with rate capacity r, then we'd expect t2 = t1 + r * s2.
+ // Thus, r = (t2-t1) / s2, so the size of the first packet doesn't affect
+ // the difference between t1 and t2.
+ // Analoguously, if the first packet is sent at time t1 and the sender
+ // paces the packets at rate r, then the second packet can be sent at time
+ // t2 = t1 + r * s1. Thus, the send rate estimate r = (t2-t1) / s1 doesn't
+ // depend on the size of the last packet.
+ recv_size -= first_recv_size;
+ send_size -= last_send_size;
+
+ // Remove the largest gap by replacing it by the second largest gap.
+ // This is to ensure that spurious "delay spikes" (i.e. when the
+ // network stops transmitting packets for a short period, followed
+ // by a burst of delayed packets), don't cause the estimate to drop.
+ // This could cause an overestimation, which we guard against by
+ // never returning an estimate above the send rate.
+ RTC_DCHECK(first_recv_time.IsFinite());
+ RTC_DCHECK(last_recv_time.IsFinite());
+ TimeDelta recv_duration = (last_recv_time - first_recv_time) -
+ largest_recv_gap + second_largest_recv_gap;
+ recv_duration = std::max(recv_duration, TimeDelta::Millis(1));
+
+ if (num_sent_packets_in_window < settings_.required_packets) {
+ // Too few send times to calculate a reliable send rate.
+ return recv_size / recv_duration;
+ }
+
+ RTC_DCHECK(first_send_time.IsFinite());
+ RTC_DCHECK(last_send_time.IsFinite());
+ TimeDelta send_duration = last_send_time - first_send_time;
+ send_duration = std::max(send_duration, TimeDelta::Millis(1));
+
+ return std::min(send_size / send_duration, recv_size / recv_duration);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.h
new file mode 100644
index 0000000000..9d89856496
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_ROBUST_THROUGHPUT_ESTIMATOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_ROBUST_THROUGHPUT_ESTIMATOR_H_
+
+#include <deque>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/timestamp.h"
+#include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h"
+
+namespace webrtc {
+
+class RobustThroughputEstimator : public AcknowledgedBitrateEstimatorInterface {
+ public:
+ explicit RobustThroughputEstimator(
+ const RobustThroughputEstimatorSettings& settings);
+ ~RobustThroughputEstimator() override;
+
+ void IncomingPacketFeedbackVector(
+ const std::vector<PacketResult>& packet_feedback_vector) override;
+
+ absl::optional<DataRate> bitrate() const override;
+
+ absl::optional<DataRate> PeekRate() const override { return bitrate(); }
+ void SetAlr(bool /*in_alr*/) override {}
+ void SetAlrEndedTime(Timestamp /*alr_ended_time*/) override {}
+
+ private:
+ bool FirstPacketOutsideWindow();
+
+ const RobustThroughputEstimatorSettings settings_;
+ std::deque<PacketResult> window_;
+ Timestamp latest_discarded_send_time_ = Timestamp::MinusInfinity();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_ROBUST_THROUGHPUT_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator_unittest.cc
new file mode 100644
index 0000000000..95ac525640
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/robust_throughput_estimator_unittest.cc
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/robust_throughput_estimator.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+RobustThroughputEstimatorSettings CreateRobustThroughputEstimatorSettings(
+ absl::string_view field_trial_string) {
+ test::ExplicitKeyValueConfig trials(field_trial_string);
+ RobustThroughputEstimatorSettings settings(&trials);
+ return settings;
+}
+
+class FeedbackGenerator {
+ public:
+ std::vector<PacketResult> CreateFeedbackVector(size_t number_of_packets,
+ DataSize packet_size,
+ DataRate send_rate,
+ DataRate recv_rate) {
+ std::vector<PacketResult> packet_feedback_vector(number_of_packets);
+ for (size_t i = 0; i < number_of_packets; i++) {
+ packet_feedback_vector[i].sent_packet.send_time = send_clock_;
+ packet_feedback_vector[i].sent_packet.sequence_number = sequence_number_;
+ packet_feedback_vector[i].sent_packet.size = packet_size;
+ send_clock_ += packet_size / send_rate;
+ recv_clock_ += packet_size / recv_rate;
+ sequence_number_ += 1;
+ packet_feedback_vector[i].receive_time = recv_clock_;
+ }
+ return packet_feedback_vector;
+ }
+
+ Timestamp CurrentReceiveClock() { return recv_clock_; }
+
+ void AdvanceReceiveClock(TimeDelta delta) { recv_clock_ += delta; }
+
+ void AdvanceSendClock(TimeDelta delta) { send_clock_ += delta; }
+
+ private:
+ Timestamp send_clock_ = Timestamp::Millis(100000);
+ Timestamp recv_clock_ = Timestamp::Millis(10000);
+ uint16_t sequence_number_ = 100;
+};
+
+TEST(RobustThroughputEstimatorTest, InitialEstimate) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ // No estimate until the estimator has enough data.
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(9, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ EXPECT_FALSE(throughput_estimator.bitrate().has_value());
+
+ // Estimate once `required_packets` packets have been received.
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 1, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+
+ // Estimate remains stable when send and receive rates are stable.
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 15, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+}
+
+TEST(RobustThroughputEstimatorTest, EstimateAdapts) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+
+ // 1 second, 800kbps, estimate is stable.
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+ for (int i = 0; i < 10; ++i) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+ }
+
+ // 1 second, 1600kbps, estimate increases
+ send_rate = DataRate::BytesPerSec(200000);
+ recv_rate = DataRate::BytesPerSec(200000);
+ for (int i = 0; i < 20; ++i) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_GE(throughput.value(), DataRate::BytesPerSec(100000));
+ EXPECT_LE(throughput.value(), send_rate);
+ }
+
+ // 1 second, 1600kbps, estimate is stable
+ for (int i = 0; i < 20; ++i) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+ }
+
+ // 1 second, 400kbps, estimate decreases
+ send_rate = DataRate::BytesPerSec(50000);
+ recv_rate = DataRate::BytesPerSec(50000);
+ for (int i = 0; i < 5; ++i) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_LE(throughput.value(), DataRate::BytesPerSec(200000));
+ EXPECT_GE(throughput.value(), send_rate);
+ }
+
+ // 1 second, 400kbps, estimate is stable
+ send_rate = DataRate::BytesPerSec(50000);
+ recv_rate = DataRate::BytesPerSec(50000);
+ for (int i = 0; i < 5; ++i) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+ }
+}
+
+TEST(RobustThroughputEstimatorTest, CappedByReceiveRate) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(25000));
+
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(20, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ recv_rate.bytes_per_sec<double>(),
+ 0.05 * recv_rate.bytes_per_sec<double>()); // Allow 5% error
+}
+
+TEST(RobustThroughputEstimatorTest, CappedBySendRate) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+ DataRate send_rate(DataRate::BytesPerSec(50000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(20, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ send_rate.bytes_per_sec<double>(),
+ 0.05 * send_rate.bytes_per_sec<double>()); // Allow 5% error
+}
+
+TEST(RobustThroughputEstimatorTest, DelaySpike) {
+ FeedbackGenerator feedback_generator;
+ // This test uses a 500ms window to amplify the effect
+ // of a delay spike.
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true,window_duration:500ms/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(20, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+
+ // Delay spike. 25 packets sent, but none received.
+ feedback_generator.AdvanceReceiveClock(TimeDelta::Millis(250));
+
+ // Deliver all of the packets during the next 50 ms. (During this time,
+ // we'll have sent an additional 5 packets, so we need to receive 30
+ // packets at 1000 bytes each in 50 ms, i.e. 600000 bytes per second).
+ recv_rate = DataRate::BytesPerSec(600000);
+ // Estimate should not drop.
+ for (int i = 0; i < 30; ++i) {
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 1, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ send_rate.bytes_per_sec<double>(),
+ 0.05 * send_rate.bytes_per_sec<double>()); // Allow 5% error
+ }
+
+ // Delivery at normal rate. When the packets received before the gap
+ // has left the estimator's window, the receive rate will be high, but the
+ // estimate should be capped by the send rate.
+ recv_rate = DataRate::BytesPerSec(100000);
+ for (int i = 0; i < 20; ++i) {
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 5, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ send_rate.bytes_per_sec<double>(),
+ 0.05 * send_rate.bytes_per_sec<double>()); // Allow 5% error
+ }
+}
+
+TEST(RobustThroughputEstimatorTest, HighLoss) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(20, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+
+ // 50% loss
+ for (size_t i = 0; i < packet_feedback.size(); i++) {
+ if (i % 2 == 1) {
+ packet_feedback[i].receive_time = Timestamp::PlusInfinity();
+ }
+ }
+
+ std::sort(packet_feedback.begin(), packet_feedback.end(),
+ PacketResult::ReceiveTimeOrder());
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ send_rate.bytes_per_sec<double>() / 2,
+ 0.05 * send_rate.bytes_per_sec<double>() / 2); // Allow 5% error
+}
+
+TEST(RobustThroughputEstimatorTest, ReorderedFeedback) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(20, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+
+ std::vector<PacketResult> delayed_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 10, DataSize::Bytes(1000), send_rate, recv_rate);
+
+ // Since we're missing some feedback, it's expected that the
+ // estimate will drop.
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_LT(throughput.value(), send_rate);
+
+ // But it should completely recover as soon as we get the feedback.
+ throughput_estimator.IncomingPacketFeedbackVector(delayed_feedback);
+ throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+
+ // It should then remain stable (as if the feedbacks weren't reordered.)
+ for (int i = 0; i < 10; ++i) {
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 15, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+ }
+}
+
+TEST(RobustThroughputEstimatorTest, DeepReordering) {
+ FeedbackGenerator feedback_generator;
+ // This test uses a 500ms window to amplify the
+ // effect of reordering.
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true,window_duration:500ms/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ std::vector<PacketResult> delayed_packets =
+ feedback_generator.CreateFeedbackVector(1, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+
+ for (int i = 0; i < 10; i++) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+ }
+
+ // Delayed packet arrives ~1 second after it should have.
+ // Since the window is 500 ms, the delayed packet was sent ~500
+ // ms before the second oldest packet. However, the send rate
+ // should not drop.
+ delayed_packets.front().receive_time =
+ feedback_generator.CurrentReceiveClock();
+ throughput_estimator.IncomingPacketFeedbackVector(delayed_packets);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ send_rate.bytes_per_sec<double>(),
+ 0.05 * send_rate.bytes_per_sec<double>()); // Allow 5% error
+
+ // Thoughput should stay stable.
+ for (int i = 0; i < 10; i++) {
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(10, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ ASSERT_TRUE(throughput.has_value());
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ send_rate.bytes_per_sec<double>(),
+ 0.05 * send_rate.bytes_per_sec<double>()); // Allow 5% error
+ }
+}
+
+TEST(RobustThroughputEstimatorTest, StreamPausedAndResumed) {
+ FeedbackGenerator feedback_generator;
+ RobustThroughputEstimator throughput_estimator(
+ CreateRobustThroughputEstimatorSettings(
+ "WebRTC-Bwe-RobustThroughputEstimatorSettings/"
+ "enabled:true/"));
+ DataRate send_rate(DataRate::BytesPerSec(100000));
+ DataRate recv_rate(DataRate::BytesPerSec(100000));
+
+ std::vector<PacketResult> packet_feedback =
+ feedback_generator.CreateFeedbackVector(20, DataSize::Bytes(1000),
+ send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ auto throughput = throughput_estimator.bitrate();
+ EXPECT_TRUE(throughput.has_value());
+ double expected_bytes_per_sec = 100 * 1000.0;
+ EXPECT_NEAR(throughput.value().bytes_per_sec<double>(),
+ expected_bytes_per_sec,
+ 0.05 * expected_bytes_per_sec); // Allow 5% error
+
+ // No packets sent or feedback received for 60s.
+ feedback_generator.AdvanceSendClock(TimeDelta::Seconds(60));
+ feedback_generator.AdvanceReceiveClock(TimeDelta::Seconds(60));
+
+ // Resume sending packets at the same rate as before. The estimate
+ // will initially be invalid, due to lack of recent data.
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 5, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ EXPECT_FALSE(throughput.has_value());
+
+ // But be back to the normal level once we have enough data.
+ for (int i = 0; i < 4; ++i) {
+ packet_feedback = feedback_generator.CreateFeedbackVector(
+ 5, DataSize::Bytes(1000), send_rate, recv_rate);
+ throughput_estimator.IncomingPacketFeedbackVector(packet_feedback);
+ throughput = throughput_estimator.bitrate();
+ EXPECT_EQ(throughput, send_rate);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc
new file mode 100644
index 0000000000..cc117b998a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc
@@ -0,0 +1,680 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/rtc_event_log/rtc_event.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h"
+#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+constexpr TimeDelta kBweIncreaseInterval = TimeDelta::Millis(1000);
+constexpr TimeDelta kBweDecreaseInterval = TimeDelta::Millis(300);
+constexpr TimeDelta kStartPhase = TimeDelta::Millis(2000);
+constexpr TimeDelta kBweConverganceTime = TimeDelta::Millis(20000);
+constexpr int kLimitNumPackets = 20;
+constexpr DataRate kDefaultMaxBitrate = DataRate::BitsPerSec(1000000000);
+constexpr TimeDelta kLowBitrateLogPeriod = TimeDelta::Millis(10000);
+constexpr TimeDelta kRtcEventLogPeriod = TimeDelta::Millis(5000);
+// Expecting that RTCP feedback is sent uniformly within [0.5, 1.5]s intervals.
+constexpr TimeDelta kMaxRtcpFeedbackInterval = TimeDelta::Millis(5000);
+
+constexpr float kDefaultLowLossThreshold = 0.02f;
+constexpr float kDefaultHighLossThreshold = 0.1f;
+constexpr DataRate kDefaultBitrateThreshold = DataRate::Zero();
+
+struct UmaRampUpMetric {
+ const char* metric_name;
+ int bitrate_kbps;
+};
+
+const UmaRampUpMetric kUmaRampupMetrics[] = {
+ {"WebRTC.BWE.RampUpTimeTo500kbpsInMs", 500},
+ {"WebRTC.BWE.RampUpTimeTo1000kbpsInMs", 1000},
+ {"WebRTC.BWE.RampUpTimeTo2000kbpsInMs", 2000}};
+const size_t kNumUmaRampupMetrics =
+ sizeof(kUmaRampupMetrics) / sizeof(kUmaRampupMetrics[0]);
+
+const char kBweLosExperiment[] = "WebRTC-BweLossExperiment";
+
+bool BweLossExperimentIsEnabled() {
+ std::string experiment_string =
+ webrtc::field_trial::FindFullName(kBweLosExperiment);
+ // The experiment is enabled iff the field trial string begins with "Enabled".
+ return absl::StartsWith(experiment_string, "Enabled");
+}
+
+bool ReadBweLossExperimentParameters(float* low_loss_threshold,
+ float* high_loss_threshold,
+ uint32_t* bitrate_threshold_kbps) {
+ RTC_DCHECK(low_loss_threshold);
+ RTC_DCHECK(high_loss_threshold);
+ RTC_DCHECK(bitrate_threshold_kbps);
+ std::string experiment_string =
+ webrtc::field_trial::FindFullName(kBweLosExperiment);
+ int parsed_values =
+ sscanf(experiment_string.c_str(), "Enabled-%f,%f,%u", low_loss_threshold,
+ high_loss_threshold, bitrate_threshold_kbps);
+ if (parsed_values == 3) {
+ RTC_CHECK_GT(*low_loss_threshold, 0.0f)
+ << "Loss threshold must be greater than 0.";
+ RTC_CHECK_LE(*low_loss_threshold, 1.0f)
+ << "Loss threshold must be less than or equal to 1.";
+ RTC_CHECK_GT(*high_loss_threshold, 0.0f)
+ << "Loss threshold must be greater than 0.";
+ RTC_CHECK_LE(*high_loss_threshold, 1.0f)
+ << "Loss threshold must be less than or equal to 1.";
+ RTC_CHECK_LE(*low_loss_threshold, *high_loss_threshold)
+ << "The low loss threshold must be less than or equal to the high loss "
+ "threshold.";
+ RTC_CHECK_GE(*bitrate_threshold_kbps, 0)
+ << "Bitrate threshold can't be negative.";
+ RTC_CHECK_LT(*bitrate_threshold_kbps,
+ std::numeric_limits<int>::max() / 1000)
+ << "Bitrate must be smaller enough to avoid overflows.";
+ return true;
+ }
+ RTC_LOG(LS_WARNING) << "Failed to parse parameters for BweLossExperiment "
+ "experiment from field trial string. Using default.";
+ *low_loss_threshold = kDefaultLowLossThreshold;
+ *high_loss_threshold = kDefaultHighLossThreshold;
+ *bitrate_threshold_kbps = kDefaultBitrateThreshold.kbps();
+ return false;
+}
+} // namespace
+
+LinkCapacityTracker::LinkCapacityTracker()
+ : tracking_rate("rate", TimeDelta::Seconds(10)) {
+ ParseFieldTrial({&tracking_rate},
+ field_trial::FindFullName("WebRTC-Bwe-LinkCapacity"));
+}
+
+LinkCapacityTracker::~LinkCapacityTracker() {}
+
+void LinkCapacityTracker::UpdateDelayBasedEstimate(
+ Timestamp at_time,
+ DataRate delay_based_bitrate) {
+ if (delay_based_bitrate < last_delay_based_estimate_) {
+ capacity_estimate_bps_ =
+ std::min(capacity_estimate_bps_, delay_based_bitrate.bps<double>());
+ last_link_capacity_update_ = at_time;
+ }
+ last_delay_based_estimate_ = delay_based_bitrate;
+}
+
+void LinkCapacityTracker::OnStartingRate(DataRate start_rate) {
+ if (last_link_capacity_update_.IsInfinite())
+ capacity_estimate_bps_ = start_rate.bps<double>();
+}
+
+void LinkCapacityTracker::OnRateUpdate(absl::optional<DataRate> acknowledged,
+ DataRate target,
+ Timestamp at_time) {
+ if (!acknowledged)
+ return;
+ DataRate acknowledged_target = std::min(*acknowledged, target);
+ if (acknowledged_target.bps() > capacity_estimate_bps_) {
+ TimeDelta delta = at_time - last_link_capacity_update_;
+ double alpha = delta.IsFinite() ? exp(-(delta / tracking_rate.Get())) : 0;
+ capacity_estimate_bps_ = alpha * capacity_estimate_bps_ +
+ (1 - alpha) * acknowledged_target.bps<double>();
+ }
+ last_link_capacity_update_ = at_time;
+}
+
+void LinkCapacityTracker::OnRttBackoff(DataRate backoff_rate,
+ Timestamp at_time) {
+ capacity_estimate_bps_ =
+ std::min(capacity_estimate_bps_, backoff_rate.bps<double>());
+ last_link_capacity_update_ = at_time;
+}
+
+DataRate LinkCapacityTracker::estimate() const {
+ return DataRate::BitsPerSec(capacity_estimate_bps_);
+}
+
+RttBasedBackoff::RttBasedBackoff(const FieldTrialsView* key_value_config)
+ : disabled_("Disabled"),
+ configured_limit_("limit", TimeDelta::Seconds(3)),
+ drop_fraction_("fraction", 0.8),
+ drop_interval_("interval", TimeDelta::Seconds(1)),
+ bandwidth_floor_("floor", DataRate::KilobitsPerSec(5)),
+ rtt_limit_(TimeDelta::PlusInfinity()),
+ // By initializing this to plus infinity, we make sure that we never
+ // trigger rtt backoff unless packet feedback is enabled.
+ last_propagation_rtt_update_(Timestamp::PlusInfinity()),
+ last_propagation_rtt_(TimeDelta::Zero()),
+ last_packet_sent_(Timestamp::MinusInfinity()) {
+ ParseFieldTrial({&disabled_, &configured_limit_, &drop_fraction_,
+ &drop_interval_, &bandwidth_floor_},
+ key_value_config->Lookup("WebRTC-Bwe-MaxRttLimit"));
+ if (!disabled_) {
+ rtt_limit_ = configured_limit_.Get();
+ }
+}
+
+void RttBasedBackoff::UpdatePropagationRtt(Timestamp at_time,
+ TimeDelta propagation_rtt) {
+ last_propagation_rtt_update_ = at_time;
+ last_propagation_rtt_ = propagation_rtt;
+}
+
+TimeDelta RttBasedBackoff::CorrectedRtt(Timestamp at_time) const {
+ TimeDelta time_since_rtt = at_time - last_propagation_rtt_update_;
+ TimeDelta timeout_correction = time_since_rtt;
+ // Avoid timeout when no packets are being sent.
+ TimeDelta time_since_packet_sent = at_time - last_packet_sent_;
+ timeout_correction =
+ std::max(time_since_rtt - time_since_packet_sent, TimeDelta::Zero());
+ return timeout_correction + last_propagation_rtt_;
+}
+
+RttBasedBackoff::~RttBasedBackoff() = default;
+
+SendSideBandwidthEstimation::SendSideBandwidthEstimation(
+ const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log)
+ : rtt_backoff_(key_value_config),
+ lost_packets_since_last_loss_update_(0),
+ expected_packets_since_last_loss_update_(0),
+ current_target_(DataRate::Zero()),
+ last_logged_target_(DataRate::Zero()),
+ min_bitrate_configured_(kCongestionControllerMinBitrate),
+ max_bitrate_configured_(kDefaultMaxBitrate),
+ last_low_bitrate_log_(Timestamp::MinusInfinity()),
+ has_decreased_since_last_fraction_loss_(false),
+ last_loss_feedback_(Timestamp::MinusInfinity()),
+ last_loss_packet_report_(Timestamp::MinusInfinity()),
+ last_fraction_loss_(0),
+ last_logged_fraction_loss_(0),
+ last_round_trip_time_(TimeDelta::Zero()),
+ receiver_limit_(DataRate::PlusInfinity()),
+ delay_based_limit_(DataRate::PlusInfinity()),
+ time_last_decrease_(Timestamp::MinusInfinity()),
+ first_report_time_(Timestamp::MinusInfinity()),
+ initially_lost_packets_(0),
+ bitrate_at_2_seconds_(DataRate::Zero()),
+ uma_update_state_(kNoUpdate),
+ uma_rtt_state_(kNoUpdate),
+ rampup_uma_stats_updated_(kNumUmaRampupMetrics, false),
+ event_log_(event_log),
+ last_rtc_event_log_(Timestamp::MinusInfinity()),
+ low_loss_threshold_(kDefaultLowLossThreshold),
+ high_loss_threshold_(kDefaultHighLossThreshold),
+ bitrate_threshold_(kDefaultBitrateThreshold),
+ loss_based_bandwidth_estimator_v1_(key_value_config),
+ loss_based_bandwidth_estimator_v2_(key_value_config),
+ disable_receiver_limit_caps_only_("Disabled") {
+ RTC_DCHECK(event_log);
+ if (BweLossExperimentIsEnabled()) {
+ uint32_t bitrate_threshold_kbps;
+ if (ReadBweLossExperimentParameters(&low_loss_threshold_,
+ &high_loss_threshold_,
+ &bitrate_threshold_kbps)) {
+ RTC_LOG(LS_INFO) << "Enabled BweLossExperiment with parameters "
+ << low_loss_threshold_ << ", " << high_loss_threshold_
+ << ", " << bitrate_threshold_kbps;
+ bitrate_threshold_ = DataRate::KilobitsPerSec(bitrate_threshold_kbps);
+ }
+ }
+ ParseFieldTrial({&disable_receiver_limit_caps_only_},
+ key_value_config->Lookup("WebRTC-Bwe-ReceiverLimitCapsOnly"));
+}
+
+SendSideBandwidthEstimation::~SendSideBandwidthEstimation() {}
+
+void SendSideBandwidthEstimation::OnRouteChange() {
+ lost_packets_since_last_loss_update_ = 0;
+ expected_packets_since_last_loss_update_ = 0;
+ current_target_ = DataRate::Zero();
+ min_bitrate_configured_ = kCongestionControllerMinBitrate;
+ max_bitrate_configured_ = kDefaultMaxBitrate;
+ last_low_bitrate_log_ = Timestamp::MinusInfinity();
+ has_decreased_since_last_fraction_loss_ = false;
+ last_loss_feedback_ = Timestamp::MinusInfinity();
+ last_loss_packet_report_ = Timestamp::MinusInfinity();
+ last_fraction_loss_ = 0;
+ last_logged_fraction_loss_ = 0;
+ last_round_trip_time_ = TimeDelta::Zero();
+ receiver_limit_ = DataRate::PlusInfinity();
+ delay_based_limit_ = DataRate::PlusInfinity();
+ time_last_decrease_ = Timestamp::MinusInfinity();
+ first_report_time_ = Timestamp::MinusInfinity();
+ initially_lost_packets_ = 0;
+ bitrate_at_2_seconds_ = DataRate::Zero();
+ uma_update_state_ = kNoUpdate;
+ uma_rtt_state_ = kNoUpdate;
+ last_rtc_event_log_ = Timestamp::MinusInfinity();
+}
+
+void SendSideBandwidthEstimation::SetBitrates(
+ absl::optional<DataRate> send_bitrate,
+ DataRate min_bitrate,
+ DataRate max_bitrate,
+ Timestamp at_time) {
+ SetMinMaxBitrate(min_bitrate, max_bitrate);
+ if (send_bitrate) {
+ link_capacity_.OnStartingRate(*send_bitrate);
+ SetSendBitrate(*send_bitrate, at_time);
+ }
+}
+
+void SendSideBandwidthEstimation::SetSendBitrate(DataRate bitrate,
+ Timestamp at_time) {
+ RTC_DCHECK_GT(bitrate, DataRate::Zero());
+ // Reset to avoid being capped by the estimate.
+ delay_based_limit_ = DataRate::PlusInfinity();
+ UpdateTargetBitrate(bitrate, at_time);
+ // Clear last sent bitrate history so the new value can be used directly
+ // and not capped.
+ min_bitrate_history_.clear();
+}
+
+void SendSideBandwidthEstimation::SetMinMaxBitrate(DataRate min_bitrate,
+ DataRate max_bitrate) {
+ min_bitrate_configured_ =
+ std::max(min_bitrate, kCongestionControllerMinBitrate);
+ if (max_bitrate > DataRate::Zero() && max_bitrate.IsFinite()) {
+ max_bitrate_configured_ = std::max(min_bitrate_configured_, max_bitrate);
+ } else {
+ max_bitrate_configured_ = kDefaultMaxBitrate;
+ }
+}
+
+int SendSideBandwidthEstimation::GetMinBitrate() const {
+ return min_bitrate_configured_.bps<int>();
+}
+
+DataRate SendSideBandwidthEstimation::target_rate() const {
+ DataRate target = current_target_;
+ if (!disable_receiver_limit_caps_only_)
+ target = std::min(target, receiver_limit_);
+ return std::max(min_bitrate_configured_, target);
+}
+
+DataRate SendSideBandwidthEstimation::GetEstimatedLinkCapacity() const {
+ return link_capacity_.estimate();
+}
+
+void SendSideBandwidthEstimation::UpdateReceiverEstimate(Timestamp at_time,
+ DataRate bandwidth) {
+ // TODO(srte): Ensure caller passes PlusInfinity, not zero, to represent no
+ // limitation.
+ receiver_limit_ = bandwidth.IsZero() ? DataRate::PlusInfinity() : bandwidth;
+ ApplyTargetLimits(at_time);
+}
+
+void SendSideBandwidthEstimation::UpdateDelayBasedEstimate(Timestamp at_time,
+ DataRate bitrate) {
+ link_capacity_.UpdateDelayBasedEstimate(at_time, bitrate);
+ // TODO(srte): Ensure caller passes PlusInfinity, not zero, to represent no
+ // limitation.
+ delay_based_limit_ = bitrate.IsZero() ? DataRate::PlusInfinity() : bitrate;
+ ApplyTargetLimits(at_time);
+}
+
+void SendSideBandwidthEstimation::SetAcknowledgedRate(
+ absl::optional<DataRate> acknowledged_rate,
+ Timestamp at_time) {
+ acknowledged_rate_ = acknowledged_rate;
+ if (!acknowledged_rate.has_value()) {
+ return;
+ }
+ if (LossBasedBandwidthEstimatorV1Enabled()) {
+ loss_based_bandwidth_estimator_v1_.UpdateAcknowledgedBitrate(
+ *acknowledged_rate, at_time);
+ }
+ if (LossBasedBandwidthEstimatorV2Enabled()) {
+ loss_based_bandwidth_estimator_v2_.SetAcknowledgedBitrate(
+ *acknowledged_rate);
+ }
+}
+
+void SendSideBandwidthEstimation::UpdateLossBasedEstimator(
+ const TransportPacketsFeedback& report,
+ BandwidthUsage delay_detector_state) {
+ if (LossBasedBandwidthEstimatorV1Enabled()) {
+ loss_based_bandwidth_estimator_v1_.UpdateLossStatistics(
+ report.packet_feedbacks, report.feedback_time);
+ }
+ if (LossBasedBandwidthEstimatorV2Enabled()) {
+ loss_based_bandwidth_estimator_v2_.UpdateBandwidthEstimate(
+ report.packet_feedbacks, delay_based_limit_, delay_detector_state);
+ }
+}
+
+void SendSideBandwidthEstimation::UpdatePacketsLost(int64_t packets_lost,
+ int64_t number_of_packets,
+ Timestamp at_time) {
+ last_loss_feedback_ = at_time;
+ if (first_report_time_.IsInfinite())
+ first_report_time_ = at_time;
+
+ // Check sequence number diff and weight loss report
+ if (number_of_packets > 0) {
+ int64_t expected =
+ expected_packets_since_last_loss_update_ + number_of_packets;
+
+ // Don't generate a loss rate until it can be based on enough packets.
+ if (expected < kLimitNumPackets) {
+ // Accumulate reports.
+ expected_packets_since_last_loss_update_ = expected;
+ lost_packets_since_last_loss_update_ += packets_lost;
+ return;
+ }
+
+ has_decreased_since_last_fraction_loss_ = false;
+ int64_t lost_q8 =
+ std::max<int64_t>(lost_packets_since_last_loss_update_ + packets_lost,
+ 0)
+ << 8;
+ last_fraction_loss_ = std::min<int>(lost_q8 / expected, 255);
+
+ // Reset accumulators.
+ lost_packets_since_last_loss_update_ = 0;
+ expected_packets_since_last_loss_update_ = 0;
+ last_loss_packet_report_ = at_time;
+ UpdateEstimate(at_time);
+ }
+
+ UpdateUmaStatsPacketsLost(at_time, packets_lost);
+}
+
+void SendSideBandwidthEstimation::UpdateUmaStatsPacketsLost(Timestamp at_time,
+ int packets_lost) {
+ DataRate bitrate_kbps =
+ DataRate::KilobitsPerSec((current_target_.bps() + 500) / 1000);
+ for (size_t i = 0; i < kNumUmaRampupMetrics; ++i) {
+ if (!rampup_uma_stats_updated_[i] &&
+ bitrate_kbps.kbps() >= kUmaRampupMetrics[i].bitrate_kbps) {
+ RTC_HISTOGRAMS_COUNTS_100000(i, kUmaRampupMetrics[i].metric_name,
+ (at_time - first_report_time_).ms());
+ rampup_uma_stats_updated_[i] = true;
+ }
+ }
+ if (IsInStartPhase(at_time)) {
+ initially_lost_packets_ += packets_lost;
+ } else if (uma_update_state_ == kNoUpdate) {
+ uma_update_state_ = kFirstDone;
+ bitrate_at_2_seconds_ = bitrate_kbps;
+ RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitiallyLostPackets",
+ initially_lost_packets_, 0, 100, 50);
+ RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialBandwidthEstimate",
+ bitrate_at_2_seconds_.kbps(), 0, 2000, 50);
+ } else if (uma_update_state_ == kFirstDone &&
+ at_time - first_report_time_ >= kBweConverganceTime) {
+ uma_update_state_ = kDone;
+ int bitrate_diff_kbps = std::max(
+ bitrate_at_2_seconds_.kbps<int>() - bitrate_kbps.kbps<int>(), 0);
+ RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialVsConvergedDiff", bitrate_diff_kbps,
+ 0, 2000, 50);
+ }
+}
+
+void SendSideBandwidthEstimation::UpdateRtt(TimeDelta rtt, Timestamp at_time) {
+ // Update RTT if we were able to compute an RTT based on this RTCP.
+ // FlexFEC doesn't send RTCP SR, which means we won't be able to compute RTT.
+ if (rtt > TimeDelta::Zero())
+ last_round_trip_time_ = rtt;
+
+ if (!IsInStartPhase(at_time) && uma_rtt_state_ == kNoUpdate) {
+ uma_rtt_state_ = kDone;
+ RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialRtt", rtt.ms<int>(), 0, 2000, 50);
+ }
+}
+
+void SendSideBandwidthEstimation::UpdateEstimate(Timestamp at_time) {
+ if (rtt_backoff_.CorrectedRtt(at_time) > rtt_backoff_.rtt_limit_) {
+ if (at_time - time_last_decrease_ >= rtt_backoff_.drop_interval_ &&
+ current_target_ > rtt_backoff_.bandwidth_floor_) {
+ time_last_decrease_ = at_time;
+ DataRate new_bitrate =
+ std::max(current_target_ * rtt_backoff_.drop_fraction_,
+ rtt_backoff_.bandwidth_floor_.Get());
+ link_capacity_.OnRttBackoff(new_bitrate, at_time);
+ UpdateTargetBitrate(new_bitrate, at_time);
+ return;
+ }
+ // TODO(srte): This is likely redundant in most cases.
+ ApplyTargetLimits(at_time);
+ return;
+ }
+
+ // We trust the REMB and/or delay-based estimate during the first 2 seconds if
+ // we haven't had any packet loss reported, to allow startup bitrate probing.
+ if (last_fraction_loss_ == 0 && IsInStartPhase(at_time)) {
+ DataRate new_bitrate = current_target_;
+ // TODO(srte): We should not allow the new_bitrate to be larger than the
+ // receiver limit here.
+ if (receiver_limit_.IsFinite())
+ new_bitrate = std::max(receiver_limit_, new_bitrate);
+ if (delay_based_limit_.IsFinite())
+ new_bitrate = std::max(delay_based_limit_, new_bitrate);
+ if (LossBasedBandwidthEstimatorV1Enabled()) {
+ loss_based_bandwidth_estimator_v1_.Initialize(new_bitrate);
+ }
+ if (LossBasedBandwidthEstimatorV2Enabled()) {
+ loss_based_bandwidth_estimator_v2_.SetBandwidthEstimate(new_bitrate);
+ }
+
+ if (new_bitrate != current_target_) {
+ min_bitrate_history_.clear();
+ if (LossBasedBandwidthEstimatorV1Enabled()) {
+ min_bitrate_history_.push_back(std::make_pair(at_time, new_bitrate));
+ } else {
+ min_bitrate_history_.push_back(
+ std::make_pair(at_time, current_target_));
+ }
+ UpdateTargetBitrate(new_bitrate, at_time);
+ return;
+ }
+ }
+ UpdateMinHistory(at_time);
+ if (last_loss_packet_report_.IsInfinite()) {
+ // No feedback received.
+ // TODO(srte): This is likely redundant in most cases.
+ ApplyTargetLimits(at_time);
+ return;
+ }
+
+ if (LossBasedBandwidthEstimatorV1ReadyForUse()) {
+ DataRate new_bitrate = loss_based_bandwidth_estimator_v1_.Update(
+ at_time, min_bitrate_history_.front().second, delay_based_limit_,
+ last_round_trip_time_);
+ UpdateTargetBitrate(new_bitrate, at_time);
+ return;
+ }
+
+ if (LossBasedBandwidthEstimatorV2ReadyForUse()) {
+ DataRate new_bitrate =
+ loss_based_bandwidth_estimator_v2_.GetBandwidthEstimate(
+ delay_based_limit_);
+ UpdateTargetBitrate(new_bitrate, at_time);
+ return;
+ }
+
+ TimeDelta time_since_loss_packet_report = at_time - last_loss_packet_report_;
+ if (time_since_loss_packet_report < 1.2 * kMaxRtcpFeedbackInterval) {
+ // We only care about loss above a given bitrate threshold.
+ float loss = last_fraction_loss_ / 256.0f;
+ // We only make decisions based on loss when the bitrate is above a
+ // threshold. This is a crude way of handling loss which is uncorrelated
+ // to congestion.
+ if (current_target_ < bitrate_threshold_ || loss <= low_loss_threshold_) {
+ // Loss < 2%: Increase rate by 8% of the min bitrate in the last
+ // kBweIncreaseInterval.
+ // Note that by remembering the bitrate over the last second one can
+ // rampup up one second faster than if only allowed to start ramping
+ // at 8% per second rate now. E.g.:
+ // If sending a constant 100kbps it can rampup immediately to 108kbps
+ // whenever a receiver report is received with lower packet loss.
+ // If instead one would do: current_bitrate_ *= 1.08^(delta time),
+ // it would take over one second since the lower packet loss to achieve
+ // 108kbps.
+ DataRate new_bitrate = DataRate::BitsPerSec(
+ min_bitrate_history_.front().second.bps() * 1.08 + 0.5);
+
+ // Add 1 kbps extra, just to make sure that we do not get stuck
+ // (gives a little extra increase at low rates, negligible at higher
+ // rates).
+ new_bitrate += DataRate::BitsPerSec(1000);
+ UpdateTargetBitrate(new_bitrate, at_time);
+ return;
+ } else if (current_target_ > bitrate_threshold_) {
+ if (loss <= high_loss_threshold_) {
+ // Loss between 2% - 10%: Do nothing.
+ } else {
+ // Loss > 10%: Limit the rate decreases to once a kBweDecreaseInterval
+ // + rtt.
+ if (!has_decreased_since_last_fraction_loss_ &&
+ (at_time - time_last_decrease_) >=
+ (kBweDecreaseInterval + last_round_trip_time_)) {
+ time_last_decrease_ = at_time;
+
+ // Reduce rate:
+ // newRate = rate * (1 - 0.5*lossRate);
+ // where packetLoss = 256*lossRate;
+ DataRate new_bitrate = DataRate::BitsPerSec(
+ (current_target_.bps() *
+ static_cast<double>(512 - last_fraction_loss_)) /
+ 512.0);
+ has_decreased_since_last_fraction_loss_ = true;
+ UpdateTargetBitrate(new_bitrate, at_time);
+ return;
+ }
+ }
+ }
+ }
+ // TODO(srte): This is likely redundant in most cases.
+ ApplyTargetLimits(at_time);
+}
+
+void SendSideBandwidthEstimation::UpdatePropagationRtt(
+ Timestamp at_time,
+ TimeDelta propagation_rtt) {
+ rtt_backoff_.UpdatePropagationRtt(at_time, propagation_rtt);
+}
+
+void SendSideBandwidthEstimation::OnSentPacket(const SentPacket& sent_packet) {
+ // Only feedback-triggering packets will be reported here.
+ rtt_backoff_.last_packet_sent_ = sent_packet.send_time;
+}
+
+bool SendSideBandwidthEstimation::IsInStartPhase(Timestamp at_time) const {
+ return first_report_time_.IsInfinite() ||
+ at_time - first_report_time_ < kStartPhase;
+}
+
+void SendSideBandwidthEstimation::UpdateMinHistory(Timestamp at_time) {
+ // Remove old data points from history.
+ // Since history precision is in ms, add one so it is able to increase
+ // bitrate if it is off by as little as 0.5ms.
+ while (!min_bitrate_history_.empty() &&
+ at_time - min_bitrate_history_.front().first + TimeDelta::Millis(1) >
+ kBweIncreaseInterval) {
+ min_bitrate_history_.pop_front();
+ }
+
+ // Typical minimum sliding-window algorithm: Pop values higher than current
+ // bitrate before pushing it.
+ while (!min_bitrate_history_.empty() &&
+ current_target_ <= min_bitrate_history_.back().second) {
+ min_bitrate_history_.pop_back();
+ }
+
+ min_bitrate_history_.push_back(std::make_pair(at_time, current_target_));
+}
+
+DataRate SendSideBandwidthEstimation::GetUpperLimit() const {
+ DataRate upper_limit = delay_based_limit_;
+ if (disable_receiver_limit_caps_only_)
+ upper_limit = std::min(upper_limit, receiver_limit_);
+ return std::min(upper_limit, max_bitrate_configured_);
+}
+
+void SendSideBandwidthEstimation::MaybeLogLowBitrateWarning(DataRate bitrate,
+ Timestamp at_time) {
+ if (at_time - last_low_bitrate_log_ > kLowBitrateLogPeriod) {
+ RTC_LOG(LS_WARNING) << "Estimated available bandwidth " << ToString(bitrate)
+ << " is below configured min bitrate "
+ << ToString(min_bitrate_configured_) << ".";
+ last_low_bitrate_log_ = at_time;
+ }
+}
+
+void SendSideBandwidthEstimation::MaybeLogLossBasedEvent(Timestamp at_time) {
+ if (current_target_ != last_logged_target_ ||
+ last_fraction_loss_ != last_logged_fraction_loss_ ||
+ at_time - last_rtc_event_log_ > kRtcEventLogPeriod) {
+ event_log_->Log(std::make_unique<RtcEventBweUpdateLossBased>(
+ current_target_.bps(), last_fraction_loss_,
+ expected_packets_since_last_loss_update_));
+ last_logged_fraction_loss_ = last_fraction_loss_;
+ last_logged_target_ = current_target_;
+ last_rtc_event_log_ = at_time;
+ }
+}
+
+void SendSideBandwidthEstimation::UpdateTargetBitrate(DataRate new_bitrate,
+ Timestamp at_time) {
+ new_bitrate = std::min(new_bitrate, GetUpperLimit());
+ if (new_bitrate < min_bitrate_configured_) {
+ MaybeLogLowBitrateWarning(new_bitrate, at_time);
+ new_bitrate = min_bitrate_configured_;
+ }
+ current_target_ = new_bitrate;
+ MaybeLogLossBasedEvent(at_time);
+ link_capacity_.OnRateUpdate(acknowledged_rate_, current_target_, at_time);
+}
+
+void SendSideBandwidthEstimation::ApplyTargetLimits(Timestamp at_time) {
+ UpdateTargetBitrate(current_target_, at_time);
+}
+
+bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV1Enabled() const {
+ return loss_based_bandwidth_estimator_v1_.Enabled() &&
+ !LossBasedBandwidthEstimatorV2Enabled();
+}
+
+bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV1ReadyForUse()
+ const {
+ return LossBasedBandwidthEstimatorV1Enabled() &&
+ loss_based_bandwidth_estimator_v1_.InUse();
+}
+
+bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV2Enabled() const {
+ return loss_based_bandwidth_estimator_v2_.IsEnabled();
+}
+
+bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV2ReadyForUse()
+ const {
+ return LossBasedBandwidthEstimatorV2Enabled() &&
+ loss_based_bandwidth_estimator_v2_.IsReady();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h
new file mode 100644
index 0000000000..d79d285c19
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ * FEC and NACK added bitrate is handled outside class
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_SEND_SIDE_BANDWIDTH_ESTIMATION_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_SEND_SIDE_BANDWIDTH_ESTIMATION_H_
+
+#include <stdint.h>
+
+#include <deque>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h"
+#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+
+class RtcEventLog;
+
+class LinkCapacityTracker {
+ public:
+ LinkCapacityTracker();
+ ~LinkCapacityTracker();
+ // Call when a new delay-based estimate is available.
+ void UpdateDelayBasedEstimate(Timestamp at_time,
+ DataRate delay_based_bitrate);
+ void OnStartingRate(DataRate start_rate);
+ void OnRateUpdate(absl::optional<DataRate> acknowledged,
+ DataRate target,
+ Timestamp at_time);
+ void OnRttBackoff(DataRate backoff_rate, Timestamp at_time);
+ DataRate estimate() const;
+
+ private:
+ FieldTrialParameter<TimeDelta> tracking_rate;
+ double capacity_estimate_bps_ = 0;
+ Timestamp last_link_capacity_update_ = Timestamp::MinusInfinity();
+ DataRate last_delay_based_estimate_ = DataRate::PlusInfinity();
+};
+
+class RttBasedBackoff {
+ public:
+ explicit RttBasedBackoff(const FieldTrialsView* key_value_config);
+ ~RttBasedBackoff();
+ void UpdatePropagationRtt(Timestamp at_time, TimeDelta propagation_rtt);
+ TimeDelta CorrectedRtt(Timestamp at_time) const;
+
+ FieldTrialFlag disabled_;
+ FieldTrialParameter<TimeDelta> configured_limit_;
+ FieldTrialParameter<double> drop_fraction_;
+ FieldTrialParameter<TimeDelta> drop_interval_;
+ FieldTrialParameter<DataRate> bandwidth_floor_;
+
+ public:
+ TimeDelta rtt_limit_;
+ Timestamp last_propagation_rtt_update_;
+ TimeDelta last_propagation_rtt_;
+ Timestamp last_packet_sent_;
+};
+
+class SendSideBandwidthEstimation {
+ public:
+ SendSideBandwidthEstimation() = delete;
+ SendSideBandwidthEstimation(const FieldTrialsView* key_value_config,
+ RtcEventLog* event_log);
+ ~SendSideBandwidthEstimation();
+
+ void OnRouteChange();
+
+ DataRate target_rate() const;
+ uint8_t fraction_loss() const { return last_fraction_loss_; }
+ TimeDelta round_trip_time() const { return last_round_trip_time_; }
+
+ DataRate GetEstimatedLinkCapacity() const;
+ // Call periodically to update estimate.
+ void UpdateEstimate(Timestamp at_time);
+ void OnSentPacket(const SentPacket& sent_packet);
+ void UpdatePropagationRtt(Timestamp at_time, TimeDelta propagation_rtt);
+
+ // Call when we receive a RTCP message with TMMBR or REMB.
+ void UpdateReceiverEstimate(Timestamp at_time, DataRate bandwidth);
+
+ // Call when a new delay-based estimate is available.
+ void UpdateDelayBasedEstimate(Timestamp at_time, DataRate bitrate);
+
+ // Call when we receive a RTCP message with a ReceiveBlock.
+ void UpdatePacketsLost(int64_t packets_lost,
+ int64_t number_of_packets,
+ Timestamp at_time);
+
+ // Call when we receive a RTCP message with a ReceiveBlock.
+ void UpdateRtt(TimeDelta rtt, Timestamp at_time);
+
+ void SetBitrates(absl::optional<DataRate> send_bitrate,
+ DataRate min_bitrate,
+ DataRate max_bitrate,
+ Timestamp at_time);
+ void SetSendBitrate(DataRate bitrate, Timestamp at_time);
+ void SetMinMaxBitrate(DataRate min_bitrate, DataRate max_bitrate);
+ int GetMinBitrate() const;
+ void SetAcknowledgedRate(absl::optional<DataRate> acknowledged_rate,
+ Timestamp at_time);
+ void UpdateLossBasedEstimator(const TransportPacketsFeedback& report,
+ BandwidthUsage delay_detector_state);
+
+ private:
+ friend class GoogCcStatePrinter;
+
+ enum UmaState { kNoUpdate, kFirstDone, kDone };
+
+ bool IsInStartPhase(Timestamp at_time) const;
+
+ void UpdateUmaStatsPacketsLost(Timestamp at_time, int packets_lost);
+
+ // Updates history of min bitrates.
+ // After this method returns min_bitrate_history_.front().second contains the
+ // min bitrate used during last kBweIncreaseIntervalMs.
+ void UpdateMinHistory(Timestamp at_time);
+
+ // Gets the upper limit for the target bitrate. This is the minimum of the
+ // delay based limit, the receiver limit and the loss based controller limit.
+ DataRate GetUpperLimit() const;
+ // Prints a warning if `bitrate` if sufficiently long time has past since last
+ // warning.
+ void MaybeLogLowBitrateWarning(DataRate bitrate, Timestamp at_time);
+ // Stores an update to the event log if the loss rate has changed, the target
+ // has changed, or sufficient time has passed since last stored event.
+ void MaybeLogLossBasedEvent(Timestamp at_time);
+
+ // Cap `bitrate` to [min_bitrate_configured_, max_bitrate_configured_] and
+ // set `current_bitrate_` to the capped value and updates the event log.
+ void UpdateTargetBitrate(DataRate bitrate, Timestamp at_time);
+ // Applies lower and upper bounds to the current target rate.
+ // TODO(srte): This seems to be called even when limits haven't changed, that
+ // should be cleaned up.
+ void ApplyTargetLimits(Timestamp at_time);
+
+ bool LossBasedBandwidthEstimatorV1Enabled() const;
+ bool LossBasedBandwidthEstimatorV2Enabled() const;
+
+ bool LossBasedBandwidthEstimatorV1ReadyForUse() const;
+ bool LossBasedBandwidthEstimatorV2ReadyForUse() const;
+
+ RttBasedBackoff rtt_backoff_;
+ LinkCapacityTracker link_capacity_;
+
+ std::deque<std::pair<Timestamp, DataRate> > min_bitrate_history_;
+
+ // incoming filters
+ int lost_packets_since_last_loss_update_;
+ int expected_packets_since_last_loss_update_;
+
+ absl::optional<DataRate> acknowledged_rate_;
+ DataRate current_target_;
+ DataRate last_logged_target_;
+ DataRate min_bitrate_configured_;
+ DataRate max_bitrate_configured_;
+ Timestamp last_low_bitrate_log_;
+
+ bool has_decreased_since_last_fraction_loss_;
+ Timestamp last_loss_feedback_;
+ Timestamp last_loss_packet_report_;
+ uint8_t last_fraction_loss_;
+ uint8_t last_logged_fraction_loss_;
+ TimeDelta last_round_trip_time_;
+
+ // The max bitrate as set by the receiver in the call. This is typically
+ // signalled using the REMB RTCP message and is used when we don't have any
+ // send side delay based estimate.
+ DataRate receiver_limit_;
+ DataRate delay_based_limit_;
+ Timestamp time_last_decrease_;
+ Timestamp first_report_time_;
+ int initially_lost_packets_;
+ DataRate bitrate_at_2_seconds_;
+ UmaState uma_update_state_;
+ UmaState uma_rtt_state_;
+ std::vector<bool> rampup_uma_stats_updated_;
+ RtcEventLog* const event_log_;
+ Timestamp last_rtc_event_log_;
+ float low_loss_threshold_;
+ float high_loss_threshold_;
+ DataRate bitrate_threshold_;
+ LossBasedBandwidthEstimation loss_based_bandwidth_estimator_v1_;
+ LossBasedBweV2 loss_based_bandwidth_estimator_v2_;
+ FieldTrialFlag disable_receiver_limit_caps_only_;
+};
+} // namespace webrtc
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_SEND_SIDE_BANDWIDTH_ESTIMATION_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc
new file mode 100644
index 0000000000..17d1aa1ada
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h"
+
+#include "api/rtc_event_log/rtc_event.h"
+#include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+MATCHER(LossBasedBweUpdateWithBitrateOnly, "") {
+ if (arg->GetType() != RtcEvent::Type::BweUpdateLossBased) {
+ return false;
+ }
+ auto bwe_event = static_cast<RtcEventBweUpdateLossBased*>(arg);
+ return bwe_event->bitrate_bps() > 0 && bwe_event->fraction_loss() == 0;
+}
+
+MATCHER(LossBasedBweUpdateWithBitrateAndLossFraction, "") {
+ if (arg->GetType() != RtcEvent::Type::BweUpdateLossBased) {
+ return false;
+ }
+ auto bwe_event = static_cast<RtcEventBweUpdateLossBased*>(arg);
+ return bwe_event->bitrate_bps() > 0 && bwe_event->fraction_loss() > 0;
+}
+
+void TestProbing(bool use_delay_based) {
+ ::testing::NiceMock<MockRtcEventLog> event_log;
+ test::ExplicitKeyValueConfig key_value_config("");
+ SendSideBandwidthEstimation bwe(&key_value_config, &event_log);
+ int64_t now_ms = 0;
+ bwe.SetMinMaxBitrate(DataRate::BitsPerSec(100000),
+ DataRate::BitsPerSec(1500000));
+ bwe.SetSendBitrate(DataRate::BitsPerSec(200000), Timestamp::Millis(now_ms));
+
+ const int kRembBps = 1000000;
+ const int kSecondRembBps = kRembBps + 500000;
+
+ bwe.UpdatePacketsLost(/*packets_lost=*/0, /*number_of_packets=*/1,
+ Timestamp::Millis(now_ms));
+ bwe.UpdateRtt(TimeDelta::Millis(50), Timestamp::Millis(now_ms));
+
+ // Initial REMB applies immediately.
+ if (use_delay_based) {
+ bwe.UpdateDelayBasedEstimate(Timestamp::Millis(now_ms),
+ DataRate::BitsPerSec(kRembBps));
+ } else {
+ bwe.UpdateReceiverEstimate(Timestamp::Millis(now_ms),
+ DataRate::BitsPerSec(kRembBps));
+ }
+ bwe.UpdateEstimate(Timestamp::Millis(now_ms));
+ EXPECT_EQ(kRembBps, bwe.target_rate().bps());
+
+ // Second REMB doesn't apply immediately.
+ now_ms += 2001;
+ if (use_delay_based) {
+ bwe.UpdateDelayBasedEstimate(Timestamp::Millis(now_ms),
+ DataRate::BitsPerSec(kSecondRembBps));
+ } else {
+ bwe.UpdateReceiverEstimate(Timestamp::Millis(now_ms),
+ DataRate::BitsPerSec(kSecondRembBps));
+ }
+ bwe.UpdateEstimate(Timestamp::Millis(now_ms));
+ EXPECT_EQ(kRembBps, bwe.target_rate().bps());
+}
+
+TEST(SendSideBweTest, InitialRembWithProbing) {
+ TestProbing(false);
+}
+
+TEST(SendSideBweTest, InitialDelayBasedBweWithProbing) {
+ TestProbing(true);
+}
+
+TEST(SendSideBweTest, DoesntReapplyBitrateDecreaseWithoutFollowingRemb) {
+ MockRtcEventLog event_log;
+ EXPECT_CALL(event_log, LogProxy(LossBasedBweUpdateWithBitrateOnly()))
+ .Times(1);
+ EXPECT_CALL(event_log,
+ LogProxy(LossBasedBweUpdateWithBitrateAndLossFraction()))
+ .Times(1);
+ test::ExplicitKeyValueConfig key_value_config("");
+ SendSideBandwidthEstimation bwe(&key_value_config, &event_log);
+ static const int kMinBitrateBps = 100000;
+ static const int kInitialBitrateBps = 1000000;
+ int64_t now_ms = 1000;
+ bwe.SetMinMaxBitrate(DataRate::BitsPerSec(kMinBitrateBps),
+ DataRate::BitsPerSec(1500000));
+ bwe.SetSendBitrate(DataRate::BitsPerSec(kInitialBitrateBps),
+ Timestamp::Millis(now_ms));
+
+ static const uint8_t kFractionLoss = 128;
+ static const int64_t kRttMs = 50;
+ now_ms += 10000;
+
+ EXPECT_EQ(kInitialBitrateBps, bwe.target_rate().bps());
+ EXPECT_EQ(0, bwe.fraction_loss());
+ EXPECT_EQ(0, bwe.round_trip_time().ms());
+
+ // Signal heavy loss to go down in bitrate.
+ bwe.UpdatePacketsLost(/*packets_lost=*/50, /*number_of_packets=*/100,
+ Timestamp::Millis(now_ms));
+ bwe.UpdateRtt(TimeDelta::Millis(kRttMs), Timestamp::Millis(now_ms));
+
+ // Trigger an update 2 seconds later to not be rate limited.
+ now_ms += 1000;
+ bwe.UpdateEstimate(Timestamp::Millis(now_ms));
+ EXPECT_LT(bwe.target_rate().bps(), kInitialBitrateBps);
+ // Verify that the obtained bitrate isn't hitting the min bitrate, or this
+ // test doesn't make sense. If this ever happens, update the thresholds or
+ // loss rates so that it doesn't hit min bitrate after one bitrate update.
+ EXPECT_GT(bwe.target_rate().bps(), kMinBitrateBps);
+ EXPECT_EQ(kFractionLoss, bwe.fraction_loss());
+ EXPECT_EQ(kRttMs, bwe.round_trip_time().ms());
+
+ // Triggering an update shouldn't apply further downgrade nor upgrade since
+ // there's no intermediate receiver block received indicating whether this is
+ // currently good or not.
+ int last_bitrate_bps = bwe.target_rate().bps();
+ // Trigger an update 2 seconds later to not be rate limited (but it still
+ // shouldn't update).
+ now_ms += 1000;
+ bwe.UpdateEstimate(Timestamp::Millis(now_ms));
+
+ EXPECT_EQ(last_bitrate_bps, bwe.target_rate().bps());
+ // The old loss rate should still be applied though.
+ EXPECT_EQ(kFractionLoss, bwe.fraction_loss());
+ EXPECT_EQ(kRttMs, bwe.round_trip_time().ms());
+}
+
+TEST(SendSideBweTest, SettingSendBitrateOverridesDelayBasedEstimate) {
+ ::testing::NiceMock<MockRtcEventLog> event_log;
+ test::ExplicitKeyValueConfig key_value_config("");
+ SendSideBandwidthEstimation bwe(&key_value_config, &event_log);
+ static const int kMinBitrateBps = 10000;
+ static const int kMaxBitrateBps = 10000000;
+ static const int kInitialBitrateBps = 300000;
+ static const int kDelayBasedBitrateBps = 350000;
+ static const int kForcedHighBitrate = 2500000;
+
+ int64_t now_ms = 0;
+
+ bwe.SetMinMaxBitrate(DataRate::BitsPerSec(kMinBitrateBps),
+ DataRate::BitsPerSec(kMaxBitrateBps));
+ bwe.SetSendBitrate(DataRate::BitsPerSec(kInitialBitrateBps),
+ Timestamp::Millis(now_ms));
+
+ bwe.UpdateDelayBasedEstimate(Timestamp::Millis(now_ms),
+ DataRate::BitsPerSec(kDelayBasedBitrateBps));
+ bwe.UpdateEstimate(Timestamp::Millis(now_ms));
+ EXPECT_GE(bwe.target_rate().bps(), kInitialBitrateBps);
+ EXPECT_LE(bwe.target_rate().bps(), kDelayBasedBitrateBps);
+
+ bwe.SetSendBitrate(DataRate::BitsPerSec(kForcedHighBitrate),
+ Timestamp::Millis(now_ms));
+ EXPECT_EQ(bwe.target_rate().bps(), kForcedHighBitrate);
+}
+
+TEST(RttBasedBackoff, DefaultEnabled) {
+ test::ExplicitKeyValueConfig key_value_config("");
+ RttBasedBackoff rtt_backoff(&key_value_config);
+ EXPECT_TRUE(rtt_backoff.rtt_limit_.IsFinite());
+}
+
+TEST(RttBasedBackoff, CanBeDisabled) {
+ test::ExplicitKeyValueConfig key_value_config(
+ "WebRTC-Bwe-MaxRttLimit/Disabled/");
+ RttBasedBackoff rtt_backoff(&key_value_config);
+ EXPECT_TRUE(rtt_backoff.rtt_limit_.IsPlusInfinity());
+}
+
+TEST(SendSideBweTest, FractionLossIsNotOverflowed) {
+ MockRtcEventLog event_log;
+ test::ExplicitKeyValueConfig key_value_config("");
+ SendSideBandwidthEstimation bwe(&key_value_config, &event_log);
+ static const int kMinBitrateBps = 100000;
+ static const int kInitialBitrateBps = 1000000;
+ int64_t now_ms = 1000;
+ bwe.SetMinMaxBitrate(DataRate::BitsPerSec(kMinBitrateBps),
+ DataRate::BitsPerSec(1500000));
+ bwe.SetSendBitrate(DataRate::BitsPerSec(kInitialBitrateBps),
+ Timestamp::Millis(now_ms));
+
+ now_ms += 10000;
+
+ EXPECT_EQ(kInitialBitrateBps, bwe.target_rate().bps());
+ EXPECT_EQ(0, bwe.fraction_loss());
+
+ // Signal negative loss.
+ bwe.UpdatePacketsLost(/*packets_lost=*/-1, /*number_of_packets=*/100,
+ Timestamp::Millis(now_ms));
+ EXPECT_EQ(0, bwe.fraction_loss());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bwe_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bwe_gn/moz.build
new file mode 100644
index 0000000000..742df43bbb
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bwe_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("send_side_bwe_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc
new file mode 100644
index 0000000000..6a8849ed6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/goog_cc/test/goog_cc_printer.h"
+
+#include <math.h>
+
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "modules/congestion_controller/goog_cc/alr_detector.h"
+#include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
+#include "modules/congestion_controller/goog_cc/trendline_estimator.h"
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+void WriteTypedValue(RtcEventLogOutput* out, int value) {
+ LogWriteFormat(out, "%i", value);
+}
+void WriteTypedValue(RtcEventLogOutput* out, double value) {
+ LogWriteFormat(out, "%.6f", value);
+}
+void WriteTypedValue(RtcEventLogOutput* out, absl::optional<DataRate> value) {
+ LogWriteFormat(out, "%.0f", value ? value->bytes_per_sec<double>() : NAN);
+}
+void WriteTypedValue(RtcEventLogOutput* out, absl::optional<DataSize> value) {
+ LogWriteFormat(out, "%.0f", value ? value->bytes<double>() : NAN);
+}
+void WriteTypedValue(RtcEventLogOutput* out, absl::optional<TimeDelta> value) {
+ LogWriteFormat(out, "%.3f", value ? value->seconds<double>() : NAN);
+}
+void WriteTypedValue(RtcEventLogOutput* out, absl::optional<Timestamp> value) {
+ LogWriteFormat(out, "%.3f", value ? value->seconds<double>() : NAN);
+}
+
+template <typename F>
+class TypedFieldLogger : public FieldLogger {
+ public:
+ TypedFieldLogger(absl::string_view name, F&& getter)
+ : name_(name), getter_(std::forward<F>(getter)) {}
+ const std::string& name() const override { return name_; }
+ void WriteValue(RtcEventLogOutput* out) override {
+ WriteTypedValue(out, getter_());
+ }
+
+ private:
+ std::string name_;
+ F getter_;
+};
+
+template <typename F>
+FieldLogger* Log(absl::string_view name, F&& getter) {
+ return new TypedFieldLogger<F>(name, std::forward<F>(getter));
+}
+
+} // namespace
+GoogCcStatePrinter::GoogCcStatePrinter() {
+ for (auto* logger : CreateLoggers()) {
+ loggers_.emplace_back(logger);
+ }
+}
+
+std::deque<FieldLogger*> GoogCcStatePrinter::CreateLoggers() {
+ auto stable_estimate = [this] {
+ return DataRate::KilobitsPerSec(
+ controller_->delay_based_bwe_->rate_control_.link_capacity_
+ .estimate_kbps_.value_or(-INFINITY));
+ };
+ auto rate_control_state = [this] {
+ return static_cast<int>(
+ controller_->delay_based_bwe_->rate_control_.rate_control_state_);
+ };
+ auto trend = [this] {
+ return reinterpret_cast<TrendlineEstimator*>(
+ controller_->delay_based_bwe_->active_delay_detector_);
+ };
+ auto acknowledged_rate = [this] {
+ return controller_->acknowledged_bitrate_estimator_->bitrate();
+ };
+ auto loss_cont = [&] {
+ return &controller_->bandwidth_estimation_
+ ->loss_based_bandwidth_estimator_v1_;
+ };
+ std::deque<FieldLogger*> loggers({
+ Log("time", [=] { return target_.at_time; }),
+ Log("rtt", [=] { return target_.network_estimate.round_trip_time; }),
+ Log("target", [=] { return target_.target_rate; }),
+ Log("stable_target", [=] { return target_.stable_target_rate; }),
+ Log("pacing", [=] { return pacing_.data_rate(); }),
+ Log("padding", [=] { return pacing_.pad_rate(); }),
+ Log("window", [=] { return congestion_window_; }),
+ Log("rate_control_state", [=] { return rate_control_state(); }),
+ Log("stable_estimate", [=] { return stable_estimate(); }),
+ Log("trendline", [=] { return trend()->prev_trend_; }),
+ Log("trendline_modified_offset",
+ [=] { return trend()->prev_modified_trend_; }),
+ Log("trendline_offset_threshold", [=] { return trend()->threshold_; }),
+ Log("acknowledged_rate", [=] { return acknowledged_rate(); }),
+ Log("est_capacity", [=] { return est_.link_capacity; }),
+ Log("est_capacity_dev", [=] { return est_.link_capacity_std_dev; }),
+ Log("est_capacity_min", [=] { return est_.link_capacity_min; }),
+ Log("est_cross_traffic", [=] { return est_.cross_traffic_ratio; }),
+ Log("est_cross_delay", [=] { return est_.cross_delay_rate; }),
+ Log("est_spike_delay", [=] { return est_.spike_delay_rate; }),
+ Log("est_pre_buffer", [=] { return est_.pre_link_buffer_delay; }),
+ Log("est_post_buffer", [=] { return est_.post_link_buffer_delay; }),
+ Log("est_propagation", [=] { return est_.propagation_delay; }),
+ Log("loss_ratio", [=] { return loss_cont()->last_loss_ratio_; }),
+ Log("loss_average", [=] { return loss_cont()->average_loss_; }),
+ Log("loss_average_max", [=] { return loss_cont()->average_loss_max_; }),
+ Log("loss_thres_inc",
+ [=] { return loss_cont()->loss_increase_threshold(); }),
+ Log("loss_thres_dec",
+ [=] { return loss_cont()->loss_decrease_threshold(); }),
+ Log("loss_dec_rate", [=] { return loss_cont()->decreased_bitrate(); }),
+ Log("loss_based_rate", [=] { return loss_cont()->loss_based_bitrate_; }),
+ Log("loss_ack_rate",
+ [=] { return loss_cont()->acknowledged_bitrate_max_; }),
+ Log("data_window", [=] { return controller_->current_data_window_; }),
+ Log("pushback_target",
+ [=] { return controller_->last_pushback_target_rate_; }),
+ });
+ return loggers;
+}
+GoogCcStatePrinter::~GoogCcStatePrinter() = default;
+
+void GoogCcStatePrinter::PrintHeaders(RtcEventLogOutput* log) {
+ int ix = 0;
+ for (const auto& logger : loggers_) {
+ if (ix++)
+ log->Write(" ");
+ log->Write(logger->name());
+ }
+ log->Write("\n");
+ log->Flush();
+}
+
+void GoogCcStatePrinter::PrintState(RtcEventLogOutput* log,
+ GoogCcNetworkController* controller,
+ Timestamp at_time) {
+ controller_ = controller;
+ auto state_update = controller_->GetNetworkState(at_time);
+ target_ = state_update.target_rate.value();
+ pacing_ = state_update.pacer_config.value();
+ if (state_update.congestion_window)
+ congestion_window_ = *state_update.congestion_window;
+ if (controller_->network_estimator_) {
+ est_ = controller_->network_estimator_->GetCurrentEstimate().value_or(
+ NetworkStateEstimate());
+ }
+
+ int ix = 0;
+ for (const auto& logger : loggers_) {
+ if (ix++)
+ log->Write(" ");
+ logger->WriteValue(log);
+ }
+
+ log->Write("\n");
+ log->Flush();
+}
+
+GoogCcDebugFactory::GoogCcDebugFactory()
+ : GoogCcDebugFactory(GoogCcFactoryConfig()) {}
+
+GoogCcDebugFactory::GoogCcDebugFactory(GoogCcFactoryConfig config)
+ : GoogCcNetworkControllerFactory(std::move(config)) {}
+
+std::unique_ptr<NetworkControllerInterface> GoogCcDebugFactory::Create(
+ NetworkControllerConfig config) {
+ RTC_CHECK(controller_ == nullptr);
+ auto controller = GoogCcNetworkControllerFactory::Create(config);
+ controller_ = static_cast<GoogCcNetworkController*>(controller.get());
+ return controller;
+}
+
+void GoogCcDebugFactory::PrintState(const Timestamp at_time) {
+ if (controller_ && log_writer_) {
+ printer_.PrintState(log_writer_.get(), controller_, at_time);
+ }
+}
+
+void GoogCcDebugFactory::AttachWriter(
+ std::unique_ptr<RtcEventLogOutput> log_writer) {
+ if (log_writer) {
+ log_writer_ = std::move(log_writer);
+ printer_.PrintHeaders(log_writer_.get());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.h
new file mode 100644
index 0000000000..16fa657e71
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/test/goog_cc_printer.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_TEST_GOOG_CC_PRINTER_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_TEST_GOOG_CC_PRINTER_H_
+
+#include <deque>
+#include <memory>
+#include <string>
+
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/transport/goog_cc_factory.h"
+#include "api/transport/network_control.h"
+#include "api/transport/network_types.h"
+#include "api/units/timestamp.h"
+#include "modules/congestion_controller/goog_cc/goog_cc_network_control.h"
+#include "test/logging/log_writer.h"
+
+namespace webrtc {
+
+class FieldLogger {
+ public:
+ virtual ~FieldLogger() = default;
+ virtual const std::string& name() const = 0;
+ virtual void WriteValue(RtcEventLogOutput* out) = 0;
+};
+
+class GoogCcStatePrinter {
+ public:
+ GoogCcStatePrinter();
+ GoogCcStatePrinter(const GoogCcStatePrinter&) = delete;
+ GoogCcStatePrinter& operator=(const GoogCcStatePrinter&) = delete;
+ ~GoogCcStatePrinter();
+
+ void PrintHeaders(RtcEventLogOutput* log);
+ void PrintState(RtcEventLogOutput* log,
+ GoogCcNetworkController* controller,
+ Timestamp at_time);
+
+ private:
+ std::deque<FieldLogger*> CreateLoggers();
+ std::deque<std::unique_ptr<FieldLogger>> loggers_;
+
+ GoogCcNetworkController* controller_ = nullptr;
+ TargetTransferRate target_;
+ PacerConfig pacing_;
+ DataSize congestion_window_ = DataSize::PlusInfinity();
+ NetworkStateEstimate est_;
+};
+
+class GoogCcDebugFactory : public GoogCcNetworkControllerFactory {
+ public:
+ GoogCcDebugFactory();
+ explicit GoogCcDebugFactory(GoogCcFactoryConfig config);
+ std::unique_ptr<NetworkControllerInterface> Create(
+ NetworkControllerConfig config) override;
+
+ void PrintState(Timestamp at_time);
+
+ void AttachWriter(std::unique_ptr<RtcEventLogOutput> log_writer);
+
+ private:
+ GoogCcStatePrinter printer_;
+ GoogCcNetworkController* controller_ = nullptr;
+ std::unique_ptr<RtcEventLogOutput> log_writer_;
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_TEST_GOOG_CC_PRINTER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.cc
new file mode 100644
index 0000000000..88182d4f80
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.cc
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/trendline_estimator.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/network_state_predictor.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+// Parameters for linear least squares fit of regression line to noisy data.
+constexpr double kDefaultTrendlineSmoothingCoeff = 0.9;
+constexpr double kDefaultTrendlineThresholdGain = 4.0;
+const char kBweWindowSizeInPacketsExperiment[] =
+ "WebRTC-BweWindowSizeInPackets";
+
+size_t ReadTrendlineFilterWindowSize(const FieldTrialsView* key_value_config) {
+ std::string experiment_string =
+ key_value_config->Lookup(kBweWindowSizeInPacketsExperiment);
+ size_t window_size;
+ int parsed_values =
+ sscanf(experiment_string.c_str(), "Enabled-%zu", &window_size);
+ if (parsed_values == 1) {
+ if (window_size > 1)
+ return window_size;
+ RTC_LOG(LS_WARNING) << "Window size must be greater than 1.";
+ }
+ RTC_LOG(LS_WARNING) << "Failed to parse parameters for BweWindowSizeInPackets"
+ " experiment from field trial string. Using default.";
+ return TrendlineEstimatorSettings::kDefaultTrendlineWindowSize;
+}
+
+absl::optional<double> LinearFitSlope(
+ const std::deque<TrendlineEstimator::PacketTiming>& packets) {
+ RTC_DCHECK(packets.size() >= 2);
+ // Compute the "center of mass".
+ double sum_x = 0;
+ double sum_y = 0;
+ for (const auto& packet : packets) {
+ sum_x += packet.arrival_time_ms;
+ sum_y += packet.smoothed_delay_ms;
+ }
+ double x_avg = sum_x / packets.size();
+ double y_avg = sum_y / packets.size();
+ // Compute the slope k = \sum (x_i-x_avg)(y_i-y_avg) / \sum (x_i-x_avg)^2
+ double numerator = 0;
+ double denominator = 0;
+ for (const auto& packet : packets) {
+ double x = packet.arrival_time_ms;
+ double y = packet.smoothed_delay_ms;
+ numerator += (x - x_avg) * (y - y_avg);
+ denominator += (x - x_avg) * (x - x_avg);
+ }
+ if (denominator == 0)
+ return absl::nullopt;
+ return numerator / denominator;
+}
+
+absl::optional<double> ComputeSlopeCap(
+ const std::deque<TrendlineEstimator::PacketTiming>& packets,
+ const TrendlineEstimatorSettings& settings) {
+ RTC_DCHECK(1 <= settings.beginning_packets &&
+ settings.beginning_packets < packets.size());
+ RTC_DCHECK(1 <= settings.end_packets &&
+ settings.end_packets < packets.size());
+ RTC_DCHECK(settings.beginning_packets + settings.end_packets <=
+ packets.size());
+ TrendlineEstimator::PacketTiming early = packets[0];
+ for (size_t i = 1; i < settings.beginning_packets; ++i) {
+ if (packets[i].raw_delay_ms < early.raw_delay_ms)
+ early = packets[i];
+ }
+ size_t late_start = packets.size() - settings.end_packets;
+ TrendlineEstimator::PacketTiming late = packets[late_start];
+ for (size_t i = late_start + 1; i < packets.size(); ++i) {
+ if (packets[i].raw_delay_ms < late.raw_delay_ms)
+ late = packets[i];
+ }
+ if (late.arrival_time_ms - early.arrival_time_ms < 1) {
+ return absl::nullopt;
+ }
+ return (late.raw_delay_ms - early.raw_delay_ms) /
+ (late.arrival_time_ms - early.arrival_time_ms) +
+ settings.cap_uncertainty;
+}
+
+constexpr double kMaxAdaptOffsetMs = 15.0;
+constexpr double kOverUsingTimeThreshold = 10;
+constexpr int kMinNumDeltas = 60;
+constexpr int kDeltaCounterMax = 1000;
+
+} // namespace
+
+constexpr char TrendlineEstimatorSettings::kKey[];
+
+TrendlineEstimatorSettings::TrendlineEstimatorSettings(
+ const FieldTrialsView* key_value_config) {
+ if (absl::StartsWith(
+ key_value_config->Lookup(kBweWindowSizeInPacketsExperiment),
+ "Enabled")) {
+ window_size = ReadTrendlineFilterWindowSize(key_value_config);
+ }
+ Parser()->Parse(key_value_config->Lookup(TrendlineEstimatorSettings::kKey));
+ if (window_size < 10 || 200 < window_size) {
+ RTC_LOG(LS_WARNING) << "Window size must be between 10 and 200 packets";
+ window_size = kDefaultTrendlineWindowSize;
+ }
+ if (enable_cap) {
+ if (beginning_packets < 1 || end_packets < 1 ||
+ beginning_packets > window_size || end_packets > window_size) {
+ RTC_LOG(LS_WARNING) << "Size of beginning and end must be between 1 and "
+ << window_size;
+ enable_cap = false;
+ beginning_packets = end_packets = 0;
+ cap_uncertainty = 0.0;
+ }
+ if (beginning_packets + end_packets > window_size) {
+ RTC_LOG(LS_WARNING)
+ << "Size of beginning plus end can't exceed the window size";
+ enable_cap = false;
+ beginning_packets = end_packets = 0;
+ cap_uncertainty = 0.0;
+ }
+ if (cap_uncertainty < 0.0 || 0.025 < cap_uncertainty) {
+ RTC_LOG(LS_WARNING) << "Cap uncertainty must be between 0 and 0.025";
+ cap_uncertainty = 0.0;
+ }
+ }
+}
+
+std::unique_ptr<StructParametersParser> TrendlineEstimatorSettings::Parser() {
+ return StructParametersParser::Create("sort", &enable_sort, //
+ "cap", &enable_cap, //
+ "beginning_packets",
+ &beginning_packets, //
+ "end_packets", &end_packets, //
+ "cap_uncertainty", &cap_uncertainty, //
+ "window_size", &window_size);
+}
+
+TrendlineEstimator::TrendlineEstimator(
+ const FieldTrialsView* key_value_config,
+ NetworkStatePredictor* network_state_predictor)
+ : settings_(key_value_config),
+ smoothing_coef_(kDefaultTrendlineSmoothingCoeff),
+ threshold_gain_(kDefaultTrendlineThresholdGain),
+ num_of_deltas_(0),
+ first_arrival_time_ms_(-1),
+ accumulated_delay_(0),
+ smoothed_delay_(0),
+ delay_hist_(),
+ k_up_(0.0087),
+ k_down_(0.039),
+ overusing_time_threshold_(kOverUsingTimeThreshold),
+ threshold_(12.5),
+ prev_modified_trend_(NAN),
+ last_update_ms_(-1),
+ prev_trend_(0.0),
+ time_over_using_(-1),
+ overuse_counter_(0),
+ hypothesis_(BandwidthUsage::kBwNormal),
+ hypothesis_predicted_(BandwidthUsage::kBwNormal),
+ network_state_predictor_(network_state_predictor) {
+ RTC_LOG(LS_INFO)
+ << "Using Trendline filter for delay change estimation with settings "
+ << settings_.Parser()->Encode() << " and "
+ << (network_state_predictor_ ? "injected" : "no")
+ << " network state predictor";
+}
+
+TrendlineEstimator::~TrendlineEstimator() {}
+
+void TrendlineEstimator::UpdateTrendline(double recv_delta_ms,
+ double send_delta_ms,
+ int64_t send_time_ms,
+ int64_t arrival_time_ms,
+ size_t packet_size) {
+ const double delta_ms = recv_delta_ms - send_delta_ms;
+ ++num_of_deltas_;
+ num_of_deltas_ = std::min(num_of_deltas_, kDeltaCounterMax);
+ if (first_arrival_time_ms_ == -1)
+ first_arrival_time_ms_ = arrival_time_ms;
+
+ // Exponential backoff filter.
+ accumulated_delay_ += delta_ms;
+ BWE_TEST_LOGGING_PLOT(1, "accumulated_delay_ms", arrival_time_ms,
+ accumulated_delay_);
+ smoothed_delay_ = smoothing_coef_ * smoothed_delay_ +
+ (1 - smoothing_coef_) * accumulated_delay_;
+ BWE_TEST_LOGGING_PLOT(1, "smoothed_delay_ms", arrival_time_ms,
+ smoothed_delay_);
+
+ // Maintain packet window
+ delay_hist_.emplace_back(
+ static_cast<double>(arrival_time_ms - first_arrival_time_ms_),
+ smoothed_delay_, accumulated_delay_);
+ if (settings_.enable_sort) {
+ for (size_t i = delay_hist_.size() - 1;
+ i > 0 &&
+ delay_hist_[i].arrival_time_ms < delay_hist_[i - 1].arrival_time_ms;
+ --i) {
+ std::swap(delay_hist_[i], delay_hist_[i - 1]);
+ }
+ }
+ if (delay_hist_.size() > settings_.window_size)
+ delay_hist_.pop_front();
+
+ // Simple linear regression.
+ double trend = prev_trend_;
+ if (delay_hist_.size() == settings_.window_size) {
+ // Update trend_ if it is possible to fit a line to the data. The delay
+ // trend can be seen as an estimate of (send_rate - capacity)/capacity.
+ // 0 < trend < 1 -> the delay increases, queues are filling up
+ // trend == 0 -> the delay does not change
+ // trend < 0 -> the delay decreases, queues are being emptied
+ trend = LinearFitSlope(delay_hist_).value_or(trend);
+ if (settings_.enable_cap) {
+ absl::optional<double> cap = ComputeSlopeCap(delay_hist_, settings_);
+ // We only use the cap to filter out overuse detections, not
+ // to detect additional underuses.
+ if (trend >= 0 && cap.has_value() && trend > cap.value()) {
+ trend = cap.value();
+ }
+ }
+ }
+ BWE_TEST_LOGGING_PLOT(1, "trendline_slope", arrival_time_ms, trend);
+
+ Detect(trend, send_delta_ms, arrival_time_ms);
+}
+
+void TrendlineEstimator::Update(double recv_delta_ms,
+ double send_delta_ms,
+ int64_t send_time_ms,
+ int64_t arrival_time_ms,
+ size_t packet_size,
+ bool calculated_deltas) {
+ if (calculated_deltas) {
+ UpdateTrendline(recv_delta_ms, send_delta_ms, send_time_ms, arrival_time_ms,
+ packet_size);
+ }
+ if (network_state_predictor_) {
+ hypothesis_predicted_ = network_state_predictor_->Update(
+ send_time_ms, arrival_time_ms, hypothesis_);
+ }
+}
+
+BandwidthUsage TrendlineEstimator::State() const {
+ return network_state_predictor_ ? hypothesis_predicted_ : hypothesis_;
+}
+
+void TrendlineEstimator::Detect(double trend, double ts_delta, int64_t now_ms) {
+ if (num_of_deltas_ < 2) {
+ hypothesis_ = BandwidthUsage::kBwNormal;
+ return;
+ }
+ const double modified_trend =
+ std::min(num_of_deltas_, kMinNumDeltas) * trend * threshold_gain_;
+ prev_modified_trend_ = modified_trend;
+ BWE_TEST_LOGGING_PLOT(1, "T", now_ms, modified_trend);
+ BWE_TEST_LOGGING_PLOT(1, "threshold", now_ms, threshold_);
+ if (modified_trend > threshold_) {
+ if (time_over_using_ == -1) {
+ // Initialize the timer. Assume that we've been
+ // over-using half of the time since the previous
+ // sample.
+ time_over_using_ = ts_delta / 2;
+ } else {
+ // Increment timer
+ time_over_using_ += ts_delta;
+ }
+ overuse_counter_++;
+ if (time_over_using_ > overusing_time_threshold_ && overuse_counter_ > 1) {
+ if (trend >= prev_trend_) {
+ time_over_using_ = 0;
+ overuse_counter_ = 0;
+ hypothesis_ = BandwidthUsage::kBwOverusing;
+ }
+ }
+ } else if (modified_trend < -threshold_) {
+ time_over_using_ = -1;
+ overuse_counter_ = 0;
+ hypothesis_ = BandwidthUsage::kBwUnderusing;
+ } else {
+ time_over_using_ = -1;
+ overuse_counter_ = 0;
+ hypothesis_ = BandwidthUsage::kBwNormal;
+ }
+ prev_trend_ = trend;
+ UpdateThreshold(modified_trend, now_ms);
+}
+
+void TrendlineEstimator::UpdateThreshold(double modified_trend,
+ int64_t now_ms) {
+ if (last_update_ms_ == -1)
+ last_update_ms_ = now_ms;
+
+ if (fabs(modified_trend) > threshold_ + kMaxAdaptOffsetMs) {
+ // Avoid adapting the threshold to big latency spikes, caused e.g.,
+ // by a sudden capacity drop.
+ last_update_ms_ = now_ms;
+ return;
+ }
+
+ const double k = fabs(modified_trend) < threshold_ ? k_down_ : k_up_;
+ const int64_t kMaxTimeDeltaMs = 100;
+ int64_t time_delta_ms = std::min(now_ms - last_update_ms_, kMaxTimeDeltaMs);
+ threshold_ += k * (fabs(modified_trend) - threshold_) * time_delta_ms;
+ threshold_ = rtc::SafeClamp(threshold_, 6.f, 600.f);
+ last_update_ms_ = now_ms;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.h
new file mode 100644
index 0000000000..ffda25df74
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_TRENDLINE_ESTIMATOR_H_
+#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_TRENDLINE_ESTIMATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <deque>
+#include <memory>
+#include <utility>
+
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "modules/congestion_controller/goog_cc/delay_increase_detector_interface.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+
+namespace webrtc {
+
+struct TrendlineEstimatorSettings {
+ static constexpr char kKey[] = "WebRTC-Bwe-TrendlineEstimatorSettings";
+ static constexpr unsigned kDefaultTrendlineWindowSize = 20;
+
+ TrendlineEstimatorSettings() = delete;
+ explicit TrendlineEstimatorSettings(const FieldTrialsView* key_value_config);
+
+ // Sort the packets in the window. Should be redundant,
+ // but then almost no cost.
+ bool enable_sort = false;
+
+ // Cap the trendline slope based on the minimum delay seen
+ // in the beginning_packets and end_packets respectively.
+ bool enable_cap = false;
+ unsigned beginning_packets = 7;
+ unsigned end_packets = 7;
+ double cap_uncertainty = 0.0;
+
+ // Size (in packets) of the window.
+ unsigned window_size = kDefaultTrendlineWindowSize;
+
+ std::unique_ptr<StructParametersParser> Parser();
+};
+
+class TrendlineEstimator : public DelayIncreaseDetectorInterface {
+ public:
+ TrendlineEstimator(const FieldTrialsView* key_value_config,
+ NetworkStatePredictor* network_state_predictor);
+
+ ~TrendlineEstimator() override;
+
+ TrendlineEstimator(const TrendlineEstimator&) = delete;
+ TrendlineEstimator& operator=(const TrendlineEstimator&) = delete;
+
+ // Update the estimator with a new sample. The deltas should represent deltas
+ // between timestamp groups as defined by the InterArrival class.
+ void Update(double recv_delta_ms,
+ double send_delta_ms,
+ int64_t send_time_ms,
+ int64_t arrival_time_ms,
+ size_t packet_size,
+ bool calculated_deltas) override;
+
+ void UpdateTrendline(double recv_delta_ms,
+ double send_delta_ms,
+ int64_t send_time_ms,
+ int64_t arrival_time_ms,
+ size_t packet_size);
+
+ BandwidthUsage State() const override;
+
+ struct PacketTiming {
+ PacketTiming(double arrival_time_ms,
+ double smoothed_delay_ms,
+ double raw_delay_ms)
+ : arrival_time_ms(arrival_time_ms),
+ smoothed_delay_ms(smoothed_delay_ms),
+ raw_delay_ms(raw_delay_ms) {}
+ double arrival_time_ms;
+ double smoothed_delay_ms;
+ double raw_delay_ms;
+ };
+
+ private:
+ friend class GoogCcStatePrinter;
+ void Detect(double trend, double ts_delta, int64_t now_ms);
+
+ void UpdateThreshold(double modified_offset, int64_t now_ms);
+
+ // Parameters.
+ TrendlineEstimatorSettings settings_;
+ const double smoothing_coef_;
+ const double threshold_gain_;
+ // Used by the existing threshold.
+ int num_of_deltas_;
+ // Keep the arrival times small by using the change from the first packet.
+ int64_t first_arrival_time_ms_;
+ // Exponential backoff filtering.
+ double accumulated_delay_;
+ double smoothed_delay_;
+ // Linear least squares regression.
+ std::deque<PacketTiming> delay_hist_;
+
+ const double k_up_;
+ const double k_down_;
+ double overusing_time_threshold_;
+ double threshold_;
+ double prev_modified_trend_;
+ int64_t last_update_ms_;
+ double prev_trend_;
+ double time_over_using_;
+ int overuse_counter_;
+ BandwidthUsage hypothesis_;
+ BandwidthUsage hypothesis_predicted_;
+ NetworkStatePredictor* network_state_predictor_;
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_TRENDLINE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator_unittest.cc
new file mode 100644
index 0000000000..b0195abdf5
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/trendline_estimator_unittest.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/goog_cc/trendline_estimator.h"
+
+#include <algorithm>
+#include <numeric>
+#include <vector>
+
+#include "api/transport/field_trial_based_config.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+class PacketTimeGenerator {
+ public:
+ PacketTimeGenerator(int64_t initial_clock, double time_between_packets)
+ : initial_clock_(initial_clock),
+ time_between_packets_(time_between_packets),
+ packets_(0) {}
+ int64_t operator()() {
+ return initial_clock_ + time_between_packets_ * packets_++;
+ }
+
+ private:
+ const int64_t initial_clock_;
+ const double time_between_packets_;
+ size_t packets_;
+};
+
+class TrendlineEstimatorTest : public testing::Test {
+ public:
+ TrendlineEstimatorTest()
+ : send_times(kPacketCount),
+ recv_times(kPacketCount),
+ packet_sizes(kPacketCount),
+ config(),
+ estimator(&config, nullptr),
+ count(1) {
+ std::fill(packet_sizes.begin(), packet_sizes.end(), kPacketSizeBytes);
+ }
+
+ void RunTestUntilStateChange() {
+ RTC_DCHECK_EQ(send_times.size(), kPacketCount);
+ RTC_DCHECK_EQ(recv_times.size(), kPacketCount);
+ RTC_DCHECK_EQ(packet_sizes.size(), kPacketCount);
+ RTC_DCHECK_GE(count, 1);
+ RTC_DCHECK_LT(count, kPacketCount);
+
+ auto initial_state = estimator.State();
+ for (; count < kPacketCount; count++) {
+ double recv_delta = recv_times[count] - recv_times[count - 1];
+ double send_delta = send_times[count] - send_times[count - 1];
+ estimator.Update(recv_delta, send_delta, send_times[count],
+ recv_times[count], packet_sizes[count], true);
+ if (estimator.State() != initial_state) {
+ return;
+ }
+ }
+ }
+
+ protected:
+ const size_t kPacketCount = 25;
+ const size_t kPacketSizeBytes = 1200;
+ std::vector<int64_t> send_times;
+ std::vector<int64_t> recv_times;
+ std::vector<size_t> packet_sizes;
+ const FieldTrialBasedConfig config;
+ TrendlineEstimator estimator;
+ size_t count;
+};
+} // namespace
+
+TEST_F(TrendlineEstimatorTest, Normal) {
+ PacketTimeGenerator send_time_generator(123456789 /*initial clock*/,
+ 20 /*20 ms between sent packets*/);
+ std::generate(send_times.begin(), send_times.end(), send_time_generator);
+
+ PacketTimeGenerator recv_time_generator(987654321 /*initial clock*/,
+ 20 /*delivered at the same pace*/);
+ std::generate(recv_times.begin(), recv_times.end(), recv_time_generator);
+
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwNormal);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwNormal);
+ EXPECT_EQ(count, kPacketCount); // All packets processed
+}
+
+TEST_F(TrendlineEstimatorTest, Overusing) {
+ PacketTimeGenerator send_time_generator(123456789 /*initial clock*/,
+ 20 /*20 ms between sent packets*/);
+ std::generate(send_times.begin(), send_times.end(), send_time_generator);
+
+ PacketTimeGenerator recv_time_generator(987654321 /*initial clock*/,
+ 1.1 * 20 /*10% slower delivery*/);
+ std::generate(recv_times.begin(), recv_times.end(), recv_time_generator);
+
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwNormal);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwOverusing);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwOverusing);
+ EXPECT_EQ(count, kPacketCount); // All packets processed
+}
+
+TEST_F(TrendlineEstimatorTest, Underusing) {
+ PacketTimeGenerator send_time_generator(123456789 /*initial clock*/,
+ 20 /*20 ms between sent packets*/);
+ std::generate(send_times.begin(), send_times.end(), send_time_generator);
+
+ PacketTimeGenerator recv_time_generator(987654321 /*initial clock*/,
+ 0.85 * 20 /*15% faster delivery*/);
+ std::generate(recv_times.begin(), recv_times.end(), recv_time_generator);
+
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwNormal);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwUnderusing);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwUnderusing);
+ EXPECT_EQ(count, kPacketCount); // All packets processed
+}
+
+TEST_F(TrendlineEstimatorTest, IncludesSmallPacketsByDefault) {
+ PacketTimeGenerator send_time_generator(123456789 /*initial clock*/,
+ 20 /*20 ms between sent packets*/);
+ std::generate(send_times.begin(), send_times.end(), send_time_generator);
+
+ PacketTimeGenerator recv_time_generator(987654321 /*initial clock*/,
+ 1.1 * 20 /*10% slower delivery*/);
+ std::generate(recv_times.begin(), recv_times.end(), recv_time_generator);
+
+ std::fill(packet_sizes.begin(), packet_sizes.end(), 100);
+
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwNormal);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwOverusing);
+ RunTestUntilStateChange();
+ EXPECT_EQ(estimator.State(), BandwidthUsage::kBwOverusing);
+ EXPECT_EQ(count, kPacketCount); // All packets processed
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/include/receive_side_congestion_controller.h b/third_party/libwebrtc/modules/congestion_controller/include/receive_side_congestion_controller.h
new file mode 100644
index 0000000000..96ee8a6e3d
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/include/receive_side_congestion_controller.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_INCLUDE_RECEIVE_SIDE_CONGESTION_CONTROLLER_H_
+#define MODULES_CONGESTION_CONTROLLER_INCLUDE_RECEIVE_SIDE_CONGESTION_CONTROLLER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_control.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "modules/congestion_controller/remb_throttler.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/remote_bitrate_estimator/remote_estimator_proxy.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+class RemoteBitrateEstimator;
+
+// This class represents the congestion control state for receive
+// streams. For send side bandwidth estimation, this is simply
+// relaying for each received RTP packet back to the sender. While for
+// receive side bandwidth estimation, we do the estimation locally and
+// send our results back to the sender.
+class ReceiveSideCongestionController : public CallStatsObserver {
+ public:
+ ReceiveSideCongestionController(
+ Clock* clock,
+ RemoteEstimatorProxy::TransportFeedbackSender feedback_sender,
+ RembThrottler::RembSender remb_sender,
+ NetworkStateEstimator* network_state_estimator);
+
+ ~ReceiveSideCongestionController() override {}
+
+ virtual void OnReceivedPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header);
+
+ void SetSendPeriodicFeedback(bool send_periodic_feedback);
+
+ // Implements CallStatsObserver.
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ // This is send bitrate, used to control the rate of feedback messages.
+ void OnBitrateChanged(int bitrate_bps);
+
+ // Ensures the remote party is notified of the receive bitrate no larger than
+ // `bitrate` using RTCP REMB.
+ void SetMaxDesiredReceiveBitrate(DataRate bitrate);
+
+ void SetTransportOverhead(DataSize overhead_per_packet);
+
+ // Returns latest receive side bandwidth estimation.
+ // Returns zero if receive side bandwidth estimation is unavailable.
+ DataRate LatestReceiveSideEstimate() const;
+
+ // Removes stream from receive side bandwidth estimation.
+ // Noop if receive side bwe is not used or stream doesn't participate in it.
+ void RemoveStream(uint32_t ssrc);
+
+ // Runs periodic tasks if it is time to run them, returns time until next
+ // call to `MaybeProcess` should be non idle.
+ TimeDelta MaybeProcess();
+
+ private:
+ void PickEstimatorFromHeader(const RTPHeader& header)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void PickEstimator() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock& clock_;
+ const FieldTrialBasedConfig field_trial_config_;
+ RembThrottler remb_throttler_;
+ RemoteEstimatorProxy remote_estimator_proxy_;
+
+ mutable Mutex mutex_;
+ std::unique_ptr<RemoteBitrateEstimator> rbe_ RTC_GUARDED_BY(mutex_);
+ bool using_absolute_send_time_ RTC_GUARDED_BY(mutex_);
+ uint32_t packets_since_absolute_send_time_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_INCLUDE_RECEIVE_SIDE_CONGESTION_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/BUILD.gn b/third_party/libwebrtc/modules/congestion_controller/pcc/BUILD.gn
new file mode 100644
index 0000000000..85b12b3771
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/BUILD.gn
@@ -0,0 +1,123 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("pcc") {
+ sources = [
+ "pcc_factory.cc",
+ "pcc_factory.h",
+ ]
+ deps = [
+ ":pcc_controller",
+ "../../../api/transport:network_control",
+ "../../../api/units:time_delta",
+ ]
+}
+
+rtc_library("pcc_controller") {
+ sources = [
+ "pcc_network_controller.cc",
+ "pcc_network_controller.h",
+ ]
+ deps = [
+ ":bitrate_controller",
+ ":monitor_interval",
+ ":rtt_tracker",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:random",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("monitor_interval") {
+ sources = [
+ "monitor_interval.cc",
+ "monitor_interval.h",
+ ]
+ deps = [
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:logging",
+ ]
+}
+
+rtc_library("rtt_tracker") {
+ sources = [
+ "rtt_tracker.cc",
+ "rtt_tracker.h",
+ ]
+ deps = [
+ "../../../api/transport:network_control",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ ]
+}
+
+rtc_library("utility_function") {
+ sources = [
+ "utility_function.cc",
+ "utility_function.h",
+ ]
+ deps = [
+ ":monitor_interval",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../rtc_base:checks",
+ ]
+}
+
+rtc_library("bitrate_controller") {
+ sources = [
+ "bitrate_controller.cc",
+ "bitrate_controller.h",
+ ]
+ deps = [
+ ":monitor_interval",
+ ":utility_function",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+if (rtc_include_tests && !build_with_chromium) {
+ rtc_library("pcc_unittests") {
+ testonly = true
+ sources = [
+ "bitrate_controller_unittest.cc",
+ "monitor_interval_unittest.cc",
+ "pcc_network_controller_unittest.cc",
+ "rtt_tracker_unittest.cc",
+ "utility_function_unittest.cc",
+ ]
+ deps = [
+ ":bitrate_controller",
+ ":monitor_interval",
+ ":pcc",
+ ":pcc_controller",
+ ":rtt_tracker",
+ ":utility_function",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../test:test_support",
+ "../../../test/scenario",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.cc
new file mode 100644
index 0000000000..16b8e6966f
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/bitrate_controller.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdlib>
+#include <memory>
+#include <utility>
+#include <vector>
+
+
+namespace webrtc {
+namespace pcc {
+
+PccBitrateController::PccBitrateController(double initial_conversion_factor,
+ double initial_dynamic_boundary,
+ double dynamic_boundary_increment,
+ double rtt_gradient_coefficient,
+ double loss_coefficient,
+ double throughput_coefficient,
+ double throughput_power,
+ double rtt_gradient_threshold,
+ double delay_gradient_negative_bound)
+ : PccBitrateController(initial_conversion_factor,
+ initial_dynamic_boundary,
+ dynamic_boundary_increment,
+ std::make_unique<ModifiedVivaceUtilityFunction>(
+ rtt_gradient_coefficient,
+ loss_coefficient,
+ throughput_coefficient,
+ throughput_power,
+ rtt_gradient_threshold,
+ delay_gradient_negative_bound)) {}
+
+PccBitrateController::PccBitrateController(
+ double initial_conversion_factor,
+ double initial_dynamic_boundary,
+ double dynamic_boundary_increment,
+ std::unique_ptr<PccUtilityFunctionInterface> utility_function)
+ : consecutive_boundary_adjustments_number_(0),
+ initial_dynamic_boundary_(initial_dynamic_boundary),
+ dynamic_boundary_increment_(dynamic_boundary_increment),
+ utility_function_(std::move(utility_function)),
+ step_size_adjustments_number_(0),
+ initial_conversion_factor_(initial_conversion_factor) {}
+
+PccBitrateController::~PccBitrateController() = default;
+
+double PccBitrateController::ComputeStepSize(double utility_gradient) {
+ // Computes number of consecutive step size adjustments.
+ if (utility_gradient > 0) {
+ step_size_adjustments_number_ =
+ std::max<int64_t>(step_size_adjustments_number_ + 1, 1);
+ } else if (utility_gradient < 0) {
+ step_size_adjustments_number_ =
+ std::min<int64_t>(step_size_adjustments_number_ - 1, -1);
+ } else {
+ step_size_adjustments_number_ = 0;
+ }
+ // Computes step size amplifier.
+ int64_t step_size_amplifier = 1;
+ if (std::abs(step_size_adjustments_number_) <= 3) {
+ step_size_amplifier =
+ std::max<int64_t>(std::abs(step_size_adjustments_number_), 1);
+ } else {
+ step_size_amplifier = 2 * std::abs(step_size_adjustments_number_) - 3;
+ }
+ return step_size_amplifier * initial_conversion_factor_;
+}
+
+double PccBitrateController::ApplyDynamicBoundary(double rate_change,
+ double bitrate) {
+ double rate_change_abs = std::abs(rate_change);
+ int64_t rate_change_sign = (rate_change > 0) ? 1 : -1;
+ if (consecutive_boundary_adjustments_number_ * rate_change_sign < 0) {
+ consecutive_boundary_adjustments_number_ = 0;
+ }
+ double dynamic_change_boundary =
+ initial_dynamic_boundary_ +
+ std::abs(consecutive_boundary_adjustments_number_) *
+ dynamic_boundary_increment_;
+ double boundary = bitrate * dynamic_change_boundary;
+ if (rate_change_abs > boundary) {
+ consecutive_boundary_adjustments_number_ += rate_change_sign;
+ return boundary * rate_change_sign;
+ }
+ // Rate change smaller than boundary. Reset boundary to the smallest possible
+ // that would allow the change.
+ while (rate_change_abs <= boundary &&
+ consecutive_boundary_adjustments_number_ * rate_change_sign > 0) {
+ consecutive_boundary_adjustments_number_ -= rate_change_sign;
+ dynamic_change_boundary =
+ initial_dynamic_boundary_ +
+ std::abs(consecutive_boundary_adjustments_number_) *
+ dynamic_boundary_increment_;
+ boundary = bitrate * dynamic_change_boundary;
+ }
+ consecutive_boundary_adjustments_number_ += rate_change_sign;
+ return rate_change;
+}
+
+absl::optional<DataRate>
+PccBitrateController::ComputeRateUpdateForSlowStartMode(
+ const PccMonitorInterval& monitor_interval) {
+ double utility_value = utility_function_->Compute(monitor_interval);
+ if (previous_utility_.has_value() && utility_value <= previous_utility_) {
+ return absl::nullopt;
+ }
+ previous_utility_ = utility_value;
+ return monitor_interval.GetTargetSendingRate();
+}
+
+DataRate PccBitrateController::ComputeRateUpdateForOnlineLearningMode(
+ const std::vector<PccMonitorInterval>& intervals,
+ DataRate bandwith_estimate) {
+ double first_utility = utility_function_->Compute(intervals[0]);
+ double second_utility = utility_function_->Compute(intervals[1]);
+ double first_bitrate_bps = intervals[0].GetTargetSendingRate().bps();
+ double second_bitrate_bps = intervals[1].GetTargetSendingRate().bps();
+ double gradient = (first_utility - second_utility) /
+ (first_bitrate_bps - second_bitrate_bps);
+ double rate_change_bps = gradient * ComputeStepSize(gradient); // delta_r
+ rate_change_bps =
+ ApplyDynamicBoundary(rate_change_bps, bandwith_estimate.bps());
+ return DataRate::BitsPerSec(
+ std::max(0.0, bandwith_estimate.bps() + rate_change_bps));
+}
+
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.h b/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.h
new file mode 100644
index 0000000000..fadeea1b55
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_PCC_BITRATE_CONTROLLER_H_
+#define MODULES_CONGESTION_CONTROLLER_PCC_BITRATE_CONTROLLER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/units/data_rate.h"
+#include "modules/congestion_controller/pcc/monitor_interval.h"
+#include "modules/congestion_controller/pcc/utility_function.h"
+
+namespace webrtc {
+namespace pcc {
+
+class PccBitrateController {
+ public:
+ PccBitrateController(double initial_conversion_factor,
+ double initial_dynamic_boundary,
+ double dynamic_boundary_increment,
+ double rtt_gradient_coefficient,
+ double loss_coefficient,
+ double throughput_coefficient,
+ double throughput_power,
+ double rtt_gradient_threshold,
+ double delay_gradient_negative_bound);
+
+ PccBitrateController(
+ double initial_conversion_factor,
+ double initial_dynamic_boundary,
+ double dynamic_boundary_increment,
+ std::unique_ptr<PccUtilityFunctionInterface> utility_function);
+
+ absl::optional<DataRate> ComputeRateUpdateForSlowStartMode(
+ const PccMonitorInterval& monitor_interval);
+
+ DataRate ComputeRateUpdateForOnlineLearningMode(
+ const std::vector<PccMonitorInterval>& block,
+ DataRate bandwidth_estimate);
+
+ ~PccBitrateController();
+
+ private:
+ double ApplyDynamicBoundary(double rate_change, double bitrate);
+ double ComputeStepSize(double utility_gradient);
+
+ // Dynamic boundary variables:
+ int64_t consecutive_boundary_adjustments_number_;
+ const double initial_dynamic_boundary_;
+ const double dynamic_boundary_increment_;
+
+ const std::unique_ptr<PccUtilityFunctionInterface> utility_function_;
+ // Step Size variables:
+ int64_t step_size_adjustments_number_;
+ const double initial_conversion_factor_;
+
+ absl::optional<double> previous_utility_;
+};
+
+} // namespace pcc
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_PCC_BITRATE_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller_unittest.cc
new file mode 100644
index 0000000000..957d99b1de
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/bitrate_controller_unittest.cc
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/bitrate_controller.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/congestion_controller/pcc/monitor_interval.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace pcc {
+namespace test {
+namespace {
+constexpr double kInitialConversionFactor = 1;
+constexpr double kInitialDynamicBoundary = 0.05;
+constexpr double kDynamicBoundaryIncrement = 0.1;
+
+constexpr double kDelayGradientCoefficient = 900;
+constexpr double kLossCoefficient = 11.35;
+constexpr double kThroughputCoefficient = 500 * 1000;
+constexpr double kThroughputPower = 0.99;
+constexpr double kDelayGradientThreshold = 0.01;
+constexpr double kDelayGradientNegativeBound = 10;
+
+const DataRate kTargetSendingRate = DataRate::KilobitsPerSec(300);
+const double kEpsilon = 0.05;
+const Timestamp kStartTime = Timestamp::Micros(0);
+const TimeDelta kPacketsDelta = TimeDelta::Millis(1);
+const TimeDelta kIntervalDuration = TimeDelta::Millis(1000);
+const TimeDelta kDefaultRtt = TimeDelta::Millis(1000);
+const DataSize kDefaultDataSize = DataSize::Bytes(100);
+
+std::vector<PacketResult> CreatePacketResults(
+ const std::vector<Timestamp>& packets_send_times,
+ const std::vector<Timestamp>& packets_received_times = {},
+ const std::vector<DataSize>& packets_sizes = {}) {
+ std::vector<PacketResult> packet_results;
+ PacketResult packet_result;
+ SentPacket sent_packet;
+ for (size_t i = 0; i < packets_send_times.size(); ++i) {
+ sent_packet.send_time = packets_send_times[i];
+ if (packets_sizes.empty()) {
+ sent_packet.size = kDefaultDataSize;
+ } else {
+ sent_packet.size = packets_sizes[i];
+ }
+ packet_result.sent_packet = sent_packet;
+ if (packets_received_times.empty()) {
+ packet_result.receive_time = packets_send_times[i] + kDefaultRtt;
+ } else {
+ packet_result.receive_time = packets_received_times[i];
+ }
+ packet_results.push_back(packet_result);
+ }
+ return packet_results;
+}
+
+class MockUtilityFunction : public PccUtilityFunctionInterface {
+ public:
+ MOCK_METHOD(double,
+ Compute,
+ (const PccMonitorInterval& monitor_interval),
+ (const, override));
+};
+
+} // namespace
+
+TEST(PccBitrateControllerTest, IncreaseRateWhenNoChangesForTestBitrates) {
+ PccBitrateController bitrate_controller(
+ kInitialConversionFactor, kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement, kDelayGradientCoefficient, kLossCoefficient,
+ kThroughputCoefficient, kThroughputPower, kDelayGradientThreshold,
+ kDelayGradientNegativeBound);
+ VivaceUtilityFunction utility_function(
+ kDelayGradientCoefficient, kLossCoefficient, kThroughputCoefficient,
+ kThroughputPower, kDelayGradientThreshold, kDelayGradientNegativeBound);
+ std::vector<PccMonitorInterval> monitor_block{
+ PccMonitorInterval(kTargetSendingRate * (1 + kEpsilon), kStartTime,
+ kIntervalDuration),
+ PccMonitorInterval(kTargetSendingRate * (1 - kEpsilon),
+ kStartTime + kIntervalDuration, kIntervalDuration)};
+ monitor_block[0].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + kPacketsDelta,
+ kStartTime + kIntervalDuration + kPacketsDelta,
+ kStartTime + 3 * kIntervalDuration},
+ {}, {}));
+ monitor_block[1].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + kPacketsDelta,
+ kStartTime + kIntervalDuration + kPacketsDelta,
+ kStartTime + 3 * kIntervalDuration},
+ {}, {}));
+ // For both of the monitor intervals there were no change in rtt gradient
+ // and in packet loss. Since the only difference is in the sending rate,
+ // the higher sending rate should be chosen by congestion controller.
+ EXPECT_GT(bitrate_controller
+ .ComputeRateUpdateForOnlineLearningMode(monitor_block,
+ kTargetSendingRate)
+ .bps(),
+ kTargetSendingRate.bps());
+}
+
+TEST(PccBitrateControllerTest, NoChangesWhenUtilityFunctionDoesntChange) {
+ std::unique_ptr<MockUtilityFunction> mock_utility_function =
+ std::make_unique<MockUtilityFunction>();
+ EXPECT_CALL(*mock_utility_function, Compute(::testing::_))
+ .Times(2)
+ .WillOnce(::testing::Return(100))
+ .WillOnce(::testing::Return(100));
+
+ PccBitrateController bitrate_controller(
+ kInitialConversionFactor, kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement, std::move(mock_utility_function));
+ std::vector<PccMonitorInterval> monitor_block{
+ PccMonitorInterval(kTargetSendingRate * (1 + kEpsilon), kStartTime,
+ kIntervalDuration),
+ PccMonitorInterval(kTargetSendingRate * (1 - kEpsilon),
+ kStartTime + kIntervalDuration, kIntervalDuration)};
+ // To complete collecting feedback within monitor intervals.
+ monitor_block[0].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ monitor_block[1].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ // Because we don't have any packets inside of monitor intervals, utility
+ // function should be zero for both of them and the sending rate should not
+ // change.
+ EXPECT_EQ(bitrate_controller
+ .ComputeRateUpdateForOnlineLearningMode(monitor_block,
+ kTargetSendingRate)
+ .bps(),
+ kTargetSendingRate.bps());
+}
+
+TEST(PccBitrateControllerTest, NoBoundaryWhenSmallGradient) {
+ std::unique_ptr<MockUtilityFunction> mock_utility_function =
+ std::make_unique<MockUtilityFunction>();
+ constexpr double kFirstMonitorIntervalUtility = 0;
+ const double kSecondMonitorIntervalUtility =
+ 2 * kTargetSendingRate.bps() * kEpsilon;
+
+ EXPECT_CALL(*mock_utility_function, Compute(::testing::_))
+ .Times(2)
+ .WillOnce(::testing::Return(kFirstMonitorIntervalUtility))
+ .WillOnce(::testing::Return(kSecondMonitorIntervalUtility));
+
+ PccBitrateController bitrate_controller(
+ kInitialConversionFactor, kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement, std::move(mock_utility_function));
+ std::vector<PccMonitorInterval> monitor_block{
+ PccMonitorInterval(kTargetSendingRate * (1 + kEpsilon), kStartTime,
+ kIntervalDuration),
+ PccMonitorInterval(kTargetSendingRate * (1 - kEpsilon),
+ kStartTime + kIntervalDuration, kIntervalDuration)};
+ // To complete collecting feedback within monitor intervals.
+ monitor_block[0].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ monitor_block[1].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+
+ double gradient =
+ (kFirstMonitorIntervalUtility - kSecondMonitorIntervalUtility) /
+ (kTargetSendingRate.bps() * 2 * kEpsilon);
+ // When the gradient is small we don't hit the dynamic boundary.
+ EXPECT_EQ(bitrate_controller
+ .ComputeRateUpdateForOnlineLearningMode(monitor_block,
+ kTargetSendingRate)
+ .bps(),
+ kTargetSendingRate.bps() + kInitialConversionFactor * gradient);
+}
+
+TEST(PccBitrateControllerTest, FaceBoundaryWhenLargeGradient) {
+ std::unique_ptr<MockUtilityFunction> mock_utility_function =
+ std::make_unique<MockUtilityFunction>();
+ constexpr double kFirstMonitorIntervalUtility = 0;
+ const double kSecondMonitorIntervalUtility =
+ 10 * kInitialDynamicBoundary * kTargetSendingRate.bps() * 2 *
+ kTargetSendingRate.bps() * kEpsilon;
+
+ EXPECT_CALL(*mock_utility_function, Compute(::testing::_))
+ .Times(4)
+ .WillOnce(::testing::Return(kFirstMonitorIntervalUtility))
+ .WillOnce(::testing::Return(kSecondMonitorIntervalUtility))
+ .WillOnce(::testing::Return(kFirstMonitorIntervalUtility))
+ .WillOnce(::testing::Return(kSecondMonitorIntervalUtility));
+
+ PccBitrateController bitrate_controller(
+ kInitialConversionFactor, kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement, std::move(mock_utility_function));
+ std::vector<PccMonitorInterval> monitor_block{
+ PccMonitorInterval(kTargetSendingRate * (1 + kEpsilon), kStartTime,
+ kIntervalDuration),
+ PccMonitorInterval(kTargetSendingRate * (1 - kEpsilon),
+ kStartTime + kIntervalDuration, kIntervalDuration)};
+ // To complete collecting feedback within monitor intervals.
+ monitor_block[0].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ monitor_block[1].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ // The utility function gradient is too big and we hit the dynamic boundary.
+ EXPECT_EQ(bitrate_controller.ComputeRateUpdateForOnlineLearningMode(
+ monitor_block, kTargetSendingRate),
+ kTargetSendingRate * (1 - kInitialDynamicBoundary));
+ // For the second time we hit the dynamic boundary in the same direction, the
+ // boundary should increase.
+ EXPECT_EQ(bitrate_controller
+ .ComputeRateUpdateForOnlineLearningMode(monitor_block,
+ kTargetSendingRate)
+ .bps(),
+ kTargetSendingRate.bps() *
+ (1 - kInitialDynamicBoundary - kDynamicBoundaryIncrement));
+}
+
+TEST(PccBitrateControllerTest, SlowStartMode) {
+ std::unique_ptr<MockUtilityFunction> mock_utility_function =
+ std::make_unique<MockUtilityFunction>();
+ constexpr double kFirstUtilityFunction = 1000;
+ EXPECT_CALL(*mock_utility_function, Compute(::testing::_))
+ .Times(4)
+ // For first 3 calls we expect to stay in the SLOW_START mode and double
+ // the sending rate since the utility function increases its value. For
+ // the last call utility function decreases its value, this means that
+ // we should not double the sending rate and exit SLOW_START mode.
+ .WillOnce(::testing::Return(kFirstUtilityFunction))
+ .WillOnce(::testing::Return(kFirstUtilityFunction + 1))
+ .WillOnce(::testing::Return(kFirstUtilityFunction + 2))
+ .WillOnce(::testing::Return(kFirstUtilityFunction + 1));
+
+ PccBitrateController bitrate_controller(
+ kInitialConversionFactor, kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement, std::move(mock_utility_function));
+ std::vector<PccMonitorInterval> monitor_block{PccMonitorInterval(
+ 2 * kTargetSendingRate, kStartTime, kIntervalDuration)};
+ // To complete collecting feedback within monitor intervals.
+ monitor_block[0].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ EXPECT_EQ(
+ bitrate_controller.ComputeRateUpdateForSlowStartMode(monitor_block[0]),
+ kTargetSendingRate * 2);
+ EXPECT_EQ(
+ bitrate_controller.ComputeRateUpdateForSlowStartMode(monitor_block[0]),
+ kTargetSendingRate * 2);
+ EXPECT_EQ(
+ bitrate_controller.ComputeRateUpdateForSlowStartMode(monitor_block[0]),
+ kTargetSendingRate * 2);
+ EXPECT_EQ(
+ bitrate_controller.ComputeRateUpdateForSlowStartMode(monitor_block[0]),
+ absl::nullopt);
+}
+
+TEST(PccBitrateControllerTest, StepSizeIncrease) {
+ std::unique_ptr<MockUtilityFunction> mock_utility_function =
+ std::make_unique<MockUtilityFunction>();
+ constexpr double kFirstMiUtilityFunction = 0;
+ const double kSecondMiUtilityFunction =
+ 2 * kTargetSendingRate.bps() * kEpsilon;
+
+ EXPECT_CALL(*mock_utility_function, Compute(::testing::_))
+ .Times(4)
+ .WillOnce(::testing::Return(kFirstMiUtilityFunction))
+ .WillOnce(::testing::Return(kSecondMiUtilityFunction))
+ .WillOnce(::testing::Return(kFirstMiUtilityFunction))
+ .WillOnce(::testing::Return(kSecondMiUtilityFunction));
+ std::vector<PccMonitorInterval> monitor_block{
+ PccMonitorInterval(kTargetSendingRate * (1 + kEpsilon), kStartTime,
+ kIntervalDuration),
+ PccMonitorInterval(kTargetSendingRate * (1 - kEpsilon),
+ kStartTime + kIntervalDuration, kIntervalDuration)};
+ // To complete collecting feedback within monitor intervals.
+ monitor_block[0].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+ monitor_block[1].OnPacketsFeedback(
+ CreatePacketResults({kStartTime + 3 * kIntervalDuration}, {}, {}));
+
+ double gradient = (kFirstMiUtilityFunction - kSecondMiUtilityFunction) /
+ (kTargetSendingRate.bps() * 2 * kEpsilon);
+ PccBitrateController bitrate_controller(
+ kInitialConversionFactor, kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement, std::move(mock_utility_function));
+ // If we are moving in the same direction - the step size should increase.
+ EXPECT_EQ(bitrate_controller
+ .ComputeRateUpdateForOnlineLearningMode(monitor_block,
+ kTargetSendingRate)
+ .bps(),
+ kTargetSendingRate.bps() + kInitialConversionFactor * gradient);
+ EXPECT_EQ(bitrate_controller
+ .ComputeRateUpdateForOnlineLearningMode(monitor_block,
+ kTargetSendingRate)
+ .bps(),
+ kTargetSendingRate.bps() + 2 * kInitialConversionFactor * gradient);
+}
+
+} // namespace test
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.cc
new file mode 100644
index 0000000000..de1e2d5e69
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/monitor_interval.h"
+
+#include <stddef.h>
+
+#include <cmath>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace pcc {
+
+PccMonitorInterval::PccMonitorInterval(DataRate target_sending_rate,
+ Timestamp start_time,
+ TimeDelta duration)
+ : target_sending_rate_(target_sending_rate),
+ start_time_(start_time),
+ interval_duration_(duration),
+ received_packets_size_(DataSize::Zero()),
+ feedback_collection_done_(false) {}
+
+PccMonitorInterval::~PccMonitorInterval() = default;
+
+PccMonitorInterval::PccMonitorInterval(const PccMonitorInterval& other) =
+ default;
+
+void PccMonitorInterval::OnPacketsFeedback(
+ const std::vector<PacketResult>& packets_results) {
+ for (const PacketResult& packet_result : packets_results) {
+ if (packet_result.sent_packet.send_time <= start_time_) {
+ continue;
+ }
+ // Here we assume that if some packets are reordered with packets sent
+ // after the end of the monitor interval, then they are lost. (Otherwise
+ // it is not clear how long should we wait for packets feedback to arrive).
+ if (packet_result.sent_packet.send_time >
+ start_time_ + interval_duration_) {
+ feedback_collection_done_ = true;
+ return;
+ }
+ if (!packet_result.IsReceived()) {
+ lost_packets_sent_time_.push_back(packet_result.sent_packet.send_time);
+ } else {
+ received_packets_.push_back(
+ {packet_result.receive_time - packet_result.sent_packet.send_time,
+ packet_result.sent_packet.send_time});
+ received_packets_size_ += packet_result.sent_packet.size;
+ }
+ }
+}
+
+// For the formula used in computations see formula for "slope" in the second
+// method:
+// https://www.johndcook.com/blog/2008/10/20/comparing-two-ways-to-fit-a-line-to-data/
+double PccMonitorInterval::ComputeDelayGradient(
+ double delay_gradient_threshold) const {
+ // Early return to prevent division by 0 in case all packets are sent at the
+ // same time.
+ if (received_packets_.empty() || received_packets_.front().sent_time ==
+ received_packets_.back().sent_time) {
+ return 0;
+ }
+ double sum_times = 0;
+ for (const ReceivedPacket& packet : received_packets_) {
+ double time_delta_us =
+ (packet.sent_time - received_packets_[0].sent_time).us();
+ sum_times += time_delta_us;
+ }
+ double sum_squared_scaled_time_deltas = 0;
+ double sum_scaled_time_delta_dot_delay = 0;
+ for (const ReceivedPacket& packet : received_packets_) {
+ double time_delta_us =
+ (packet.sent_time - received_packets_[0].sent_time).us();
+ double delay = packet.delay.us();
+ double scaled_time_delta_us =
+ time_delta_us - sum_times / received_packets_.size();
+ sum_squared_scaled_time_deltas +=
+ scaled_time_delta_us * scaled_time_delta_us;
+ sum_scaled_time_delta_dot_delay += scaled_time_delta_us * delay;
+ }
+ double rtt_gradient =
+ sum_scaled_time_delta_dot_delay / sum_squared_scaled_time_deltas;
+ if (std::abs(rtt_gradient) < delay_gradient_threshold)
+ rtt_gradient = 0;
+ return rtt_gradient;
+}
+
+bool PccMonitorInterval::IsFeedbackCollectionDone() const {
+ return feedback_collection_done_;
+}
+
+Timestamp PccMonitorInterval::GetEndTime() const {
+ return start_time_ + interval_duration_;
+}
+
+double PccMonitorInterval::GetLossRate() const {
+ size_t packets_lost = lost_packets_sent_time_.size();
+ size_t packets_received = received_packets_.size();
+ if (packets_lost == 0)
+ return 0;
+ return static_cast<double>(packets_lost) / (packets_lost + packets_received);
+}
+
+DataRate PccMonitorInterval::GetTargetSendingRate() const {
+ return target_sending_rate_;
+}
+
+DataRate PccMonitorInterval::GetTransmittedPacketsRate() const {
+ if (received_packets_.empty()) {
+ return target_sending_rate_;
+ }
+ Timestamp receive_time_of_first_packet =
+ received_packets_.front().sent_time + received_packets_.front().delay;
+ Timestamp receive_time_of_last_packet =
+ received_packets_.back().sent_time + received_packets_.back().delay;
+ if (receive_time_of_first_packet == receive_time_of_last_packet) {
+ RTC_LOG(LS_WARNING)
+ << "All packets in monitor interval were received at the same time.";
+ return target_sending_rate_;
+ }
+ return received_packets_size_ /
+ (receive_time_of_last_packet - receive_time_of_first_packet);
+}
+
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.h b/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.h
new file mode 100644
index 0000000000..51bd0f068a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_PCC_MONITOR_INTERVAL_H_
+#define MODULES_CONGESTION_CONTROLLER_PCC_MONITOR_INTERVAL_H_
+
+#include <vector>
+
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+
+namespace webrtc {
+namespace pcc {
+
+// PCC divides time into consecutive monitor intervals which are used to test
+// consequences for performance of sending at a certain rate.
+class PccMonitorInterval {
+ public:
+ PccMonitorInterval(DataRate target_sending_rate,
+ Timestamp start_time,
+ TimeDelta duration);
+ ~PccMonitorInterval();
+ PccMonitorInterval(const PccMonitorInterval& other);
+ void OnPacketsFeedback(const std::vector<PacketResult>& packets_results);
+ // Returns true if got complete information about packets.
+ // Notice, this only happens when received feedback about the first packet
+ // which were sent after the end of the monitor interval. If such event
+ // doesn't occur, we don't mind anyway and stay in the same state.
+ bool IsFeedbackCollectionDone() const;
+ Timestamp GetEndTime() const;
+
+ double GetLossRate() const;
+ // Estimates the gradient using linear regression on the 2-dimensional
+ // dataset (sampled packets delay, time of sampling).
+ double ComputeDelayGradient(double delay_gradient_threshold) const;
+ DataRate GetTargetSendingRate() const;
+ // How fast receiving side gets packets.
+ DataRate GetTransmittedPacketsRate() const;
+
+ private:
+ struct ReceivedPacket {
+ TimeDelta delay;
+ Timestamp sent_time;
+ };
+ // Target bitrate used to generate and pace the outgoing packets.
+ // Actually sent bitrate might not match the target exactly.
+ DataRate target_sending_rate_;
+ // Start time is not included into interval while end time is included.
+ Timestamp start_time_;
+ TimeDelta interval_duration_;
+ // Vectors below updates while receiving feedback.
+ std::vector<ReceivedPacket> received_packets_;
+ std::vector<Timestamp> lost_packets_sent_time_;
+ DataSize received_packets_size_;
+ bool feedback_collection_done_;
+};
+
+} // namespace pcc
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_PCC_MONITOR_INTERVAL_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval_unittest.cc
new file mode 100644
index 0000000000..aaff57bd2a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/monitor_interval_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/monitor_interval.h"
+
+#include <stddef.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace pcc {
+namespace test {
+namespace {
+const DataRate kTargetSendingRate = DataRate::KilobitsPerSec(300);
+const Timestamp kStartTime = Timestamp::Micros(0);
+const TimeDelta kPacketsDelta = TimeDelta::Millis(1);
+const TimeDelta kIntervalDuration = TimeDelta::Millis(100);
+const TimeDelta kDefaultDelay = TimeDelta::Millis(100);
+const DataSize kDefaultPacketSize = DataSize::Bytes(100);
+constexpr double kDelayGradientThreshold = 0.01;
+
+std::vector<PacketResult> CreatePacketResults(
+ const std::vector<Timestamp>& packets_send_times,
+ const std::vector<Timestamp>& packets_received_times = {},
+ const std::vector<DataSize>& packets_sizes = {}) {
+ std::vector<PacketResult> packet_results;
+ for (size_t i = 0; i < packets_send_times.size(); ++i) {
+ SentPacket sent_packet;
+ sent_packet.send_time = packets_send_times[i];
+ if (packets_sizes.empty()) {
+ sent_packet.size = kDefaultPacketSize;
+ } else {
+ sent_packet.size = packets_sizes[i];
+ }
+ PacketResult packet_result;
+ packet_result.sent_packet = sent_packet;
+ if (packets_received_times.empty()) {
+ packet_result.receive_time = packets_send_times[i] + kDefaultDelay;
+ } else {
+ packet_result.receive_time = packets_received_times[i];
+ }
+ packet_results.push_back(packet_result);
+ }
+ return packet_results;
+}
+
+} // namespace
+
+TEST(PccMonitorIntervalTest, InitialValuesAreEqualToOnesSetInConstructor) {
+ PccMonitorInterval interval{kTargetSendingRate, kStartTime,
+ kIntervalDuration};
+ EXPECT_EQ(interval.IsFeedbackCollectionDone(), false);
+ EXPECT_EQ(interval.GetEndTime(), kStartTime + kIntervalDuration);
+ EXPECT_EQ(interval.GetTargetSendingRate(), kTargetSendingRate);
+}
+
+TEST(PccMonitorIntervalTest, IndicatesDoneWhenFeedbackReceivedAfterInterval) {
+ PccMonitorInterval interval{kTargetSendingRate, kStartTime,
+ kIntervalDuration};
+ interval.OnPacketsFeedback(CreatePacketResults({kStartTime}));
+ EXPECT_EQ(interval.IsFeedbackCollectionDone(), false);
+ interval.OnPacketsFeedback(
+ CreatePacketResults({kStartTime, kStartTime + kIntervalDuration}));
+ EXPECT_EQ(interval.IsFeedbackCollectionDone(), false);
+ interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kIntervalDuration, kStartTime + 2 * kIntervalDuration}));
+ EXPECT_EQ(interval.IsFeedbackCollectionDone(), true);
+}
+
+TEST(PccMonitorIntervalTest, LossRateIsOneThirdIfLostOnePacketOutOfThree) {
+ PccMonitorInterval interval{kTargetSendingRate, kStartTime,
+ kIntervalDuration};
+ std::vector<Timestamp> start_times = {
+ kStartTime, kStartTime + 0.1 * kIntervalDuration,
+ kStartTime + 0.5 * kIntervalDuration, kStartTime + kIntervalDuration,
+ kStartTime + 2 * kIntervalDuration};
+ std::vector<Timestamp> end_times = {
+ kStartTime + 2 * kIntervalDuration, kStartTime + 2 * kIntervalDuration,
+ Timestamp::PlusInfinity(), kStartTime + 2 * kIntervalDuration,
+ kStartTime + 4 * kIntervalDuration};
+ std::vector<DataSize> packet_sizes = {
+ kDefaultPacketSize, 2 * kDefaultPacketSize, 3 * kDefaultPacketSize,
+ 4 * kDefaultPacketSize, 5 * kDefaultPacketSize};
+ std::vector<PacketResult> packet_results =
+ CreatePacketResults(start_times, end_times, packet_sizes);
+ interval.OnPacketsFeedback(packet_results);
+ EXPECT_EQ(interval.IsFeedbackCollectionDone(), true);
+
+ EXPECT_DOUBLE_EQ(interval.GetLossRate(), 1. / 3);
+}
+
+TEST(PccMonitorIntervalTest, DelayGradientIsZeroIfNoChangeInPacketDelay) {
+ PccMonitorInterval monitor_interval(kTargetSendingRate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + 2 * kPacketsDelta,
+ kStartTime + 3 * kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + kDefaultDelay + 2 * kPacketsDelta,
+ Timestamp::PlusInfinity()},
+ {}));
+ // Delay gradient should be zero, because both received packets have the
+ // same one way delay.
+ EXPECT_DOUBLE_EQ(
+ monitor_interval.ComputeDelayGradient(kDelayGradientThreshold), 0);
+}
+
+TEST(PccMonitorIntervalTest,
+ DelayGradientIsZeroWhenOnePacketSentInMonitorInterval) {
+ PccMonitorInterval monitor_interval(kTargetSendingRate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, kStartTime + 3 * kIntervalDuration}, {}));
+ // Only one received packet belongs to the monitor_interval, delay gradient
+ // should be zero in this case.
+ EXPECT_DOUBLE_EQ(
+ monitor_interval.ComputeDelayGradient(kDelayGradientThreshold), 0);
+}
+
+TEST(PccMonitorIntervalTest, DelayGradientIsOne) {
+ PccMonitorInterval monitor_interval(kTargetSendingRate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + 2 * kPacketsDelta,
+ kStartTime + 3 * kPacketsDelta, kStartTime + 3 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + 4 * kPacketsDelta + kDefaultDelay,
+ kStartTime + 3 * kIntervalDuration},
+ {}));
+ EXPECT_DOUBLE_EQ(
+ monitor_interval.ComputeDelayGradient(kDelayGradientThreshold), 1);
+}
+
+TEST(PccMonitorIntervalTest, DelayGradientIsMinusOne) {
+ PccMonitorInterval monitor_interval(kTargetSendingRate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + 2 * kPacketsDelta,
+ kStartTime + 5 * kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + kDefaultDelay, kStartTime + 3 * kIntervalDuration},
+ {}));
+ EXPECT_DOUBLE_EQ(
+ monitor_interval.ComputeDelayGradient(kDelayGradientThreshold), -1);
+}
+
+TEST(PccMonitorIntervalTest,
+ DelayGradientIsZeroIfItSmallerWhenGradientThreshold) {
+ PccMonitorInterval monitor_interval(kTargetSendingRate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + kPacketsDelta,
+ kStartTime + 102 * kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + kDefaultDelay + kPacketsDelta,
+ kStartTime + 3 * kIntervalDuration},
+ {}));
+ // Delay gradient is less than 0.01 hence should be treated as zero.
+ EXPECT_DOUBLE_EQ(
+ monitor_interval.ComputeDelayGradient(kDelayGradientThreshold), 0);
+}
+
+TEST(PccMonitorIntervalTest,
+ DelayGradientIsZeroWhenAllPacketsSentAtTheSameTime) {
+ PccMonitorInterval monitor_interval(kTargetSendingRate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + kPacketsDelta,
+ kStartTime + kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + kDefaultDelay + kPacketsDelta,
+ kStartTime + 3 * kIntervalDuration},
+ {}));
+ // If all packets were sent at the same time, then delay gradient should be
+ // zero.
+ EXPECT_DOUBLE_EQ(
+ monitor_interval.ComputeDelayGradient(kDelayGradientThreshold), 0);
+}
+
+} // namespace test
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.cc
new file mode 100644
index 0000000000..c35c6e8ab2
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/pcc_factory.h"
+
+#include <memory>
+
+#include "modules/congestion_controller/pcc/pcc_network_controller.h"
+
+namespace webrtc {
+
+PccNetworkControllerFactory::PccNetworkControllerFactory() {}
+
+std::unique_ptr<NetworkControllerInterface> PccNetworkControllerFactory::Create(
+ NetworkControllerConfig config) {
+ return std::make_unique<pcc::PccNetworkController>(config);
+}
+
+TimeDelta PccNetworkControllerFactory::GetProcessInterval() const {
+ return TimeDelta::PlusInfinity();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.h b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.h
new file mode 100644
index 0000000000..bb70d7a499
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_factory.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_PCC_PCC_FACTORY_H_
+#define MODULES_CONGESTION_CONTROLLER_PCC_PCC_FACTORY_H_
+
+#include <memory>
+
+#include "api/transport/network_control.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+class PccNetworkControllerFactory : public NetworkControllerFactoryInterface {
+ public:
+ PccNetworkControllerFactory();
+ std::unique_ptr<NetworkControllerInterface> Create(
+ NetworkControllerConfig config) override;
+ TimeDelta GetProcessInterval() const override;
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_PCC_PCC_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.cc
new file mode 100644
index 0000000000..8653470955
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.cc
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/pcc_network_controller.h"
+
+#include <algorithm>
+
+#include "absl/types/optional.h"
+#include "api/units/data_size.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace pcc {
+namespace {
+constexpr int64_t kInitialRttMs = 200;
+constexpr int64_t kInitialBandwidthKbps = 300;
+constexpr double kMonitorIntervalDurationRatio = 1;
+constexpr double kDefaultSamplingStep = 0.05;
+constexpr double kTimeoutRatio = 2;
+constexpr double kAlphaForRtt = 0.9;
+constexpr double kSlowStartModeIncrease = 1.5;
+
+constexpr double kAlphaForPacketInterval = 0.9;
+constexpr int64_t kMinPacketsNumberPerInterval = 20;
+const TimeDelta kMinDurationOfMonitorInterval = TimeDelta::Millis(50);
+const TimeDelta kStartupDuration = TimeDelta::Millis(500);
+constexpr double kMinRateChangeBps = 4000;
+constexpr DataRate kMinRateHaveMultiplicativeRateChange = DataRate::BitsPerSec(
+ static_cast<int64_t>(kMinRateChangeBps / kDefaultSamplingStep));
+
+// Bitrate controller constants.
+constexpr double kInitialConversionFactor = 5;
+constexpr double kInitialDynamicBoundary = 0.1;
+constexpr double kDynamicBoundaryIncrement = 0.1;
+// Utility function parameters.
+constexpr double kRttGradientCoefficientBps = 0.005;
+constexpr double kLossCoefficientBps = 10;
+constexpr double kThroughputCoefficient = 0.001;
+constexpr double kThroughputPower = 0.9;
+constexpr double kRttGradientThreshold = 0.01;
+constexpr double kDelayGradientNegativeBound = 0.1;
+
+constexpr int64_t kNumberOfPacketsToKeep = 20;
+const uint64_t kRandomSeed = 100;
+} // namespace
+
+PccNetworkController::PccNetworkController(NetworkControllerConfig config)
+ : start_time_(Timestamp::PlusInfinity()),
+ last_sent_packet_time_(Timestamp::PlusInfinity()),
+ smoothed_packets_sending_interval_(TimeDelta::Zero()),
+ mode_(Mode::kStartup),
+ default_bandwidth_(DataRate::KilobitsPerSec(kInitialBandwidthKbps)),
+ bandwidth_estimate_(default_bandwidth_),
+ rtt_tracker_(TimeDelta::Millis(kInitialRttMs), kAlphaForRtt),
+ monitor_interval_timeout_(TimeDelta::Millis(kInitialRttMs) *
+ kTimeoutRatio),
+ monitor_interval_length_strategy_(MonitorIntervalLengthStrategy::kFixed),
+ monitor_interval_duration_ratio_(kMonitorIntervalDurationRatio),
+ sampling_step_(kDefaultSamplingStep),
+ monitor_interval_timeout_ratio_(kTimeoutRatio),
+ min_packets_number_per_interval_(kMinPacketsNumberPerInterval),
+ bitrate_controller_(kInitialConversionFactor,
+ kInitialDynamicBoundary,
+ kDynamicBoundaryIncrement,
+ kRttGradientCoefficientBps,
+ kLossCoefficientBps,
+ kThroughputCoefficient,
+ kThroughputPower,
+ kRttGradientThreshold,
+ kDelayGradientNegativeBound),
+ monitor_intervals_duration_(TimeDelta::Zero()),
+ complete_feedback_monitor_interval_number_(0),
+ random_generator_(kRandomSeed) {
+ if (config.constraints.starting_rate) {
+ default_bandwidth_ = *config.constraints.starting_rate;
+ bandwidth_estimate_ = default_bandwidth_;
+ }
+}
+
+PccNetworkController::~PccNetworkController() {}
+
+NetworkControlUpdate PccNetworkController::CreateRateUpdate(
+ Timestamp at_time) const {
+ DataRate sending_rate = DataRate::Zero();
+ if (monitor_intervals_.empty() ||
+ (monitor_intervals_.size() >= monitor_intervals_bitrates_.size() &&
+ at_time >= monitor_intervals_.back().GetEndTime())) {
+ sending_rate = bandwidth_estimate_;
+ } else {
+ sending_rate = monitor_intervals_.back().GetTargetSendingRate();
+ }
+ // Set up config when sending rate is computed.
+ NetworkControlUpdate update;
+
+ // Set up target rate to encoder.
+ TargetTransferRate target_rate_msg;
+ target_rate_msg.at_time = at_time;
+ target_rate_msg.network_estimate.at_time = at_time;
+ target_rate_msg.network_estimate.round_trip_time = rtt_tracker_.GetRtt();
+ // TODO(koloskova): Add correct estimate.
+ target_rate_msg.network_estimate.loss_rate_ratio = 0;
+ target_rate_msg.network_estimate.bwe_period =
+ monitor_interval_duration_ratio_ * rtt_tracker_.GetRtt();
+
+ target_rate_msg.target_rate = sending_rate;
+ update.target_rate = target_rate_msg;
+
+ // Set up pacing/padding target rate.
+ PacerConfig pacer_config;
+ pacer_config.at_time = at_time;
+ pacer_config.time_window = TimeDelta::Millis(1);
+ pacer_config.data_window = sending_rate * pacer_config.time_window;
+ pacer_config.pad_window = sending_rate * pacer_config.time_window;
+
+ update.pacer_config = pacer_config;
+ return update;
+}
+
+NetworkControlUpdate PccNetworkController::OnSentPacket(SentPacket msg) {
+ // Start new monitor interval if previous has finished.
+ // Monitor interval is initialized in OnProcessInterval function.
+ if (start_time_.IsInfinite()) {
+ start_time_ = msg.send_time;
+ monitor_intervals_duration_ = kStartupDuration;
+ monitor_intervals_bitrates_ = {bandwidth_estimate_};
+ monitor_intervals_.emplace_back(bandwidth_estimate_, msg.send_time,
+ monitor_intervals_duration_);
+ complete_feedback_monitor_interval_number_ = 0;
+ }
+ if (last_sent_packet_time_.IsFinite()) {
+ smoothed_packets_sending_interval_ =
+ (msg.send_time - last_sent_packet_time_) * kAlphaForPacketInterval +
+ (1 - kAlphaForPacketInterval) * smoothed_packets_sending_interval_;
+ }
+ last_sent_packet_time_ = msg.send_time;
+ if (!monitor_intervals_.empty() &&
+ msg.send_time >= monitor_intervals_.back().GetEndTime() &&
+ monitor_intervals_bitrates_.size() > monitor_intervals_.size()) {
+ // Start new monitor interval.
+ monitor_intervals_.emplace_back(
+ monitor_intervals_bitrates_[monitor_intervals_.size()], msg.send_time,
+ monitor_intervals_duration_);
+ }
+ if (IsTimeoutExpired(msg.send_time)) {
+ DataSize received_size = DataSize::Zero();
+ for (size_t i = 1; i < last_received_packets_.size(); ++i) {
+ received_size += last_received_packets_[i].sent_packet.size;
+ }
+ TimeDelta sending_time = TimeDelta::Zero();
+ if (last_received_packets_.size() > 0)
+ sending_time = last_received_packets_.back().receive_time -
+ last_received_packets_.front().receive_time;
+ DataRate receiving_rate = bandwidth_estimate_;
+ if (sending_time > TimeDelta::Zero())
+ receiving_rate = received_size / sending_time;
+ bandwidth_estimate_ =
+ std::min<DataRate>(bandwidth_estimate_ * 0.5, receiving_rate);
+ if (mode_ == Mode::kSlowStart)
+ mode_ = Mode::kOnlineLearning;
+ }
+ if (mode_ == Mode::kStartup &&
+ msg.send_time - start_time_ >= kStartupDuration) {
+ DataSize received_size = DataSize::Zero();
+ for (size_t i = 1; i < last_received_packets_.size(); ++i) {
+ received_size += last_received_packets_[i].sent_packet.size;
+ }
+ TimeDelta sending_time = TimeDelta::Zero();
+ if (last_received_packets_.size() > 0)
+ sending_time = last_received_packets_.back().receive_time -
+ last_received_packets_.front().receive_time;
+ DataRate receiving_rate = bandwidth_estimate_;
+ if (sending_time > TimeDelta::Zero())
+ receiving_rate = received_size / sending_time;
+ bandwidth_estimate_ = receiving_rate;
+ monitor_intervals_.clear();
+ mode_ = Mode::kSlowStart;
+ monitor_intervals_duration_ = ComputeMonitorIntervalsDuration();
+ monitor_intervals_bitrates_ = {bandwidth_estimate_};
+ monitor_intervals_.emplace_back(bandwidth_estimate_, msg.send_time,
+ monitor_intervals_duration_);
+ bandwidth_estimate_ = bandwidth_estimate_ * (1 / kSlowStartModeIncrease);
+ complete_feedback_monitor_interval_number_ = 0;
+ return CreateRateUpdate(msg.send_time);
+ }
+ if (IsFeedbackCollectionDone() || IsTimeoutExpired(msg.send_time)) {
+ // Creating new monitor intervals.
+ monitor_intervals_.clear();
+ monitor_interval_timeout_ =
+ rtt_tracker_.GetRtt() * monitor_interval_timeout_ratio_;
+ monitor_intervals_duration_ = ComputeMonitorIntervalsDuration();
+ complete_feedback_monitor_interval_number_ = 0;
+ // Compute bitrates and start first monitor interval.
+ if (mode_ == Mode::kSlowStart) {
+ monitor_intervals_bitrates_ = {kSlowStartModeIncrease *
+ bandwidth_estimate_};
+ monitor_intervals_.emplace_back(
+ kSlowStartModeIncrease * bandwidth_estimate_, msg.send_time,
+ monitor_intervals_duration_);
+ } else {
+ RTC_DCHECK(mode_ == Mode::kOnlineLearning || mode_ == Mode::kDoubleCheck);
+ monitor_intervals_.clear();
+ int64_t sign = 2 * (random_generator_.Rand(0, 1) % 2) - 1;
+ RTC_DCHECK_GE(sign, -1);
+ RTC_DCHECK_LE(sign, 1);
+ if (bandwidth_estimate_ >= kMinRateHaveMultiplicativeRateChange) {
+ monitor_intervals_bitrates_ = {
+ bandwidth_estimate_ * (1 + sign * sampling_step_),
+ bandwidth_estimate_ * (1 - sign * sampling_step_)};
+ } else {
+ monitor_intervals_bitrates_ = {
+ DataRate::BitsPerSec(std::max<double>(
+ bandwidth_estimate_.bps() + sign * kMinRateChangeBps, 0)),
+ DataRate::BitsPerSec(std::max<double>(
+ bandwidth_estimate_.bps() - sign * kMinRateChangeBps, 0))};
+ }
+ monitor_intervals_.emplace_back(monitor_intervals_bitrates_[0],
+ msg.send_time,
+ monitor_intervals_duration_);
+ }
+ }
+ return CreateRateUpdate(msg.send_time);
+}
+
+TimeDelta PccNetworkController::ComputeMonitorIntervalsDuration() const {
+ TimeDelta monitor_intervals_duration = TimeDelta::Zero();
+ if (monitor_interval_length_strategy_ ==
+ MonitorIntervalLengthStrategy::kAdaptive) {
+ monitor_intervals_duration = std::max(
+ rtt_tracker_.GetRtt() * monitor_interval_duration_ratio_,
+ smoothed_packets_sending_interval_ * min_packets_number_per_interval_);
+ } else {
+ RTC_DCHECK(monitor_interval_length_strategy_ ==
+ MonitorIntervalLengthStrategy::kFixed);
+ monitor_intervals_duration =
+ smoothed_packets_sending_interval_ * min_packets_number_per_interval_;
+ }
+ monitor_intervals_duration =
+ std::max(kMinDurationOfMonitorInterval, monitor_intervals_duration);
+ return monitor_intervals_duration;
+}
+
+bool PccNetworkController::IsTimeoutExpired(Timestamp current_time) const {
+ if (complete_feedback_monitor_interval_number_ >= monitor_intervals_.size()) {
+ return false;
+ }
+ return current_time -
+ monitor_intervals_[complete_feedback_monitor_interval_number_]
+ .GetEndTime() >=
+ monitor_interval_timeout_;
+}
+
+bool PccNetworkController::IsFeedbackCollectionDone() const {
+ return complete_feedback_monitor_interval_number_ >=
+ monitor_intervals_bitrates_.size();
+}
+
+NetworkControlUpdate PccNetworkController::OnTransportPacketsFeedback(
+ TransportPacketsFeedback msg) {
+ if (msg.packet_feedbacks.empty())
+ return NetworkControlUpdate();
+ // Save packets to last_received_packets_ array.
+ for (const PacketResult& packet_result : msg.ReceivedWithSendInfo()) {
+ last_received_packets_.push_back(packet_result);
+ }
+ while (last_received_packets_.size() > kNumberOfPacketsToKeep) {
+ last_received_packets_.pop_front();
+ }
+ rtt_tracker_.OnPacketsFeedback(msg.PacketsWithFeedback(), msg.feedback_time);
+ // Skip rate update in case when online learning mode just started, but
+ // corresponding monitor intervals were not started yet.
+ if (mode_ == Mode::kOnlineLearning &&
+ monitor_intervals_bitrates_.size() < 2) {
+ return NetworkControlUpdate();
+ }
+ if (!IsFeedbackCollectionDone() && !monitor_intervals_.empty()) {
+ while (complete_feedback_monitor_interval_number_ <
+ monitor_intervals_.size()) {
+ monitor_intervals_[complete_feedback_monitor_interval_number_]
+ .OnPacketsFeedback(msg.PacketsWithFeedback());
+ if (!monitor_intervals_[complete_feedback_monitor_interval_number_]
+ .IsFeedbackCollectionDone())
+ break;
+ ++complete_feedback_monitor_interval_number_;
+ }
+ }
+ if (IsFeedbackCollectionDone()) {
+ if (mode_ == Mode::kDoubleCheck) {
+ mode_ = Mode::kOnlineLearning;
+ } else if (NeedDoubleCheckMeasurments()) {
+ mode_ = Mode::kDoubleCheck;
+ }
+ if (mode_ != Mode::kDoubleCheck)
+ UpdateSendingRateAndMode();
+ }
+ return NetworkControlUpdate();
+}
+
+bool PccNetworkController::NeedDoubleCheckMeasurments() const {
+ if (mode_ == Mode::kSlowStart) {
+ return false;
+ }
+ double first_loss_rate = monitor_intervals_[0].GetLossRate();
+ double second_loss_rate = monitor_intervals_[1].GetLossRate();
+ DataRate first_bitrate = monitor_intervals_[0].GetTargetSendingRate();
+ DataRate second_bitrate = monitor_intervals_[1].GetTargetSendingRate();
+ if ((first_bitrate.bps() - second_bitrate.bps()) *
+ (first_loss_rate - second_loss_rate) <
+ 0) {
+ return true;
+ }
+ return false;
+}
+
+void PccNetworkController::UpdateSendingRateAndMode() {
+ if (monitor_intervals_.empty() || !IsFeedbackCollectionDone()) {
+ return;
+ }
+ if (mode_ == Mode::kSlowStart) {
+ DataRate old_bandwidth_estimate = bandwidth_estimate_;
+ bandwidth_estimate_ =
+ bitrate_controller_
+ .ComputeRateUpdateForSlowStartMode(monitor_intervals_[0])
+ .value_or(bandwidth_estimate_);
+ if (bandwidth_estimate_ <= old_bandwidth_estimate)
+ mode_ = Mode::kOnlineLearning;
+ } else {
+ RTC_DCHECK(mode_ == Mode::kOnlineLearning);
+ bandwidth_estimate_ =
+ bitrate_controller_.ComputeRateUpdateForOnlineLearningMode(
+ monitor_intervals_, bandwidth_estimate_);
+ }
+}
+
+NetworkControlUpdate PccNetworkController::OnNetworkAvailability(
+ NetworkAvailability msg) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnNetworkRouteChange(
+ NetworkRouteChange msg) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnProcessInterval(
+ ProcessInterval msg) {
+ return CreateRateUpdate(msg.at_time);
+}
+
+NetworkControlUpdate PccNetworkController::OnTargetRateConstraints(
+ TargetRateConstraints msg) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnRemoteBitrateReport(
+ RemoteBitrateReport) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnRoundTripTimeUpdate(
+ RoundTripTimeUpdate) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnTransportLossReport(
+ TransportLossReport) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnStreamsConfig(StreamsConfig msg) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnReceivedPacket(
+ ReceivedPacket msg) {
+ return NetworkControlUpdate();
+}
+
+NetworkControlUpdate PccNetworkController::OnNetworkStateEstimate(
+ NetworkStateEstimate msg) {
+ return NetworkControlUpdate();
+}
+
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.h b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.h
new file mode 100644
index 0000000000..e5f65dd7d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_PCC_PCC_NETWORK_CONTROLLER_H_
+#define MODULES_CONGESTION_CONTROLLER_PCC_PCC_NETWORK_CONTROLLER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <deque>
+#include <vector>
+
+#include "api/transport/network_control.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/congestion_controller/pcc/bitrate_controller.h"
+#include "modules/congestion_controller/pcc/monitor_interval.h"
+#include "modules/congestion_controller/pcc/rtt_tracker.h"
+#include "rtc_base/random.h"
+
+namespace webrtc {
+namespace pcc {
+
+// PCC (Performance-oriented Congestion Control) Vivace is a congestion
+// control algorithm based on online (convex) optimization in machine learning.
+// It divides time into consecutive Monitor Intervals (MI) to test sending
+// rates r(1 + eps), r(1 - eps) for the current sending rate r.
+// At the end of each MI it computes utility function to transform the
+// performance statistics into a numerical value. Then it updates current
+// sending rate using gradient ascent to maximize utility function.
+class PccNetworkController : public NetworkControllerInterface {
+ public:
+ enum class Mode {
+ kStartup,
+ // Slow start phase of PCC doubles sending rate each monitor interval.
+ kSlowStart,
+ // After getting the first decrease in utility function PCC exits slow start
+ // and enters the online learning phase.
+ kOnlineLearning,
+ // If we got that sending with the lower rate resulted in higher packet
+ // loss, then the measurements are unreliable and we need to double check
+ // them.
+ kDoubleCheck
+ };
+
+ enum class MonitorIntervalLengthStrategy {
+ // Monitor interval length adaptive when it is proportional to packets RTT.
+ kAdaptive,
+ // Monitor interval length is fixed when it is equal to the time of sending
+ // predefined amount of packets (kMinPacketsNumberPerInterval).
+ kFixed
+ };
+
+ explicit PccNetworkController(NetworkControllerConfig config);
+ ~PccNetworkController() override;
+
+ // NetworkControllerInterface
+ NetworkControlUpdate OnNetworkAvailability(NetworkAvailability msg) override;
+ NetworkControlUpdate OnNetworkRouteChange(NetworkRouteChange msg) override;
+ NetworkControlUpdate OnProcessInterval(ProcessInterval msg) override;
+ NetworkControlUpdate OnSentPacket(SentPacket msg) override;
+ NetworkControlUpdate OnTargetRateConstraints(
+ TargetRateConstraints msg) override;
+ NetworkControlUpdate OnTransportPacketsFeedback(
+ TransportPacketsFeedback msg) override;
+
+ // Part of remote bitrate estimation api, not implemented for PCC
+ NetworkControlUpdate OnStreamsConfig(StreamsConfig msg) override;
+ NetworkControlUpdate OnRemoteBitrateReport(RemoteBitrateReport msg) override;
+ NetworkControlUpdate OnRoundTripTimeUpdate(RoundTripTimeUpdate msg) override;
+ NetworkControlUpdate OnTransportLossReport(TransportLossReport msg) override;
+ NetworkControlUpdate OnReceivedPacket(ReceivedPacket msg) override;
+ NetworkControlUpdate OnNetworkStateEstimate(
+ NetworkStateEstimate msg) override;
+
+ private:
+ void UpdateSendingRateAndMode();
+ NetworkControlUpdate CreateRateUpdate(Timestamp at_time) const;
+ TimeDelta ComputeMonitorIntervalsDuration() const;
+ bool NeedDoubleCheckMeasurments() const;
+ bool IsTimeoutExpired(Timestamp current_time) const;
+ bool IsFeedbackCollectionDone() const;
+
+ Timestamp start_time_;
+ Timestamp last_sent_packet_time_;
+ TimeDelta smoothed_packets_sending_interval_;
+ Mode mode_;
+
+ // Default value used for initializing bandwidth.
+ DataRate default_bandwidth_;
+ // Current estimate r.
+ DataRate bandwidth_estimate_;
+
+ RttTracker rtt_tracker_;
+ TimeDelta monitor_interval_timeout_;
+ const MonitorIntervalLengthStrategy monitor_interval_length_strategy_;
+ const double monitor_interval_duration_ratio_;
+ const double sampling_step_; // Epsilon.
+ const double monitor_interval_timeout_ratio_;
+ const int64_t min_packets_number_per_interval_;
+
+ PccBitrateController bitrate_controller_;
+
+ std::vector<PccMonitorInterval> monitor_intervals_;
+ std::vector<DataRate> monitor_intervals_bitrates_;
+ TimeDelta monitor_intervals_duration_;
+ size_t complete_feedback_monitor_interval_number_;
+
+ webrtc::Random random_generator_;
+ std::deque<PacketResult> last_received_packets_;
+};
+
+} // namespace pcc
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_PCC_PCC_NETWORK_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller_unittest.cc
new file mode 100644
index 0000000000..c98680c785
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/pcc_network_controller_unittest.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/pcc_network_controller.h"
+
+#include <memory>
+
+#include "modules/congestion_controller/pcc/pcc_factory.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scenario/scenario.h"
+
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Ge;
+using ::testing::Le;
+using ::testing::Matcher;
+using ::testing::Property;
+
+namespace webrtc {
+namespace test {
+namespace {
+
+const DataRate kInitialBitrate = DataRate::KilobitsPerSec(60);
+const Timestamp kDefaultStartTime = Timestamp::Millis(10000000);
+
+constexpr double kDataRateMargin = 0.20;
+constexpr double kMinDataRateFactor = 1 - kDataRateMargin;
+constexpr double kMaxDataRateFactor = 1 + kDataRateMargin;
+inline Matcher<TargetTransferRate> TargetRateCloseTo(DataRate rate) {
+ DataRate min_data_rate = rate * kMinDataRateFactor;
+ DataRate max_data_rate = rate * kMaxDataRateFactor;
+ return Field(&TargetTransferRate::target_rate,
+ AllOf(Ge(min_data_rate), Le(max_data_rate)));
+}
+
+NetworkControllerConfig InitialConfig(
+ int starting_bandwidth_kbps = kInitialBitrate.kbps(),
+ int min_data_rate_kbps = 0,
+ int max_data_rate_kbps = 5 * kInitialBitrate.kbps()) {
+ NetworkControllerConfig config;
+ config.constraints.at_time = kDefaultStartTime;
+ config.constraints.min_data_rate =
+ DataRate::KilobitsPerSec(min_data_rate_kbps);
+ config.constraints.max_data_rate =
+ DataRate::KilobitsPerSec(max_data_rate_kbps);
+ config.constraints.starting_rate =
+ DataRate::KilobitsPerSec(starting_bandwidth_kbps);
+ return config;
+}
+
+ProcessInterval InitialProcessInterval() {
+ ProcessInterval process_interval;
+ process_interval.at_time = kDefaultStartTime;
+ return process_interval;
+}
+
+} // namespace
+
+TEST(PccNetworkControllerTest, SendsConfigurationOnFirstProcess) {
+ std::unique_ptr<NetworkControllerInterface> controller_;
+ controller_.reset(new pcc::PccNetworkController(InitialConfig()));
+
+ NetworkControlUpdate update =
+ controller_->OnProcessInterval(InitialProcessInterval());
+ EXPECT_THAT(*update.target_rate, TargetRateCloseTo(kInitialBitrate));
+ EXPECT_THAT(*update.pacer_config,
+ Property(&PacerConfig::data_rate, Ge(kInitialBitrate)));
+}
+
+TEST(PccNetworkControllerTest, UpdatesTargetSendRate) {
+ PccNetworkControllerFactory factory;
+ Scenario s("pcc_unit/updates_rate", false);
+ CallClientConfig config;
+ config.transport.cc_factory = &factory;
+ config.transport.rates.min_rate = DataRate::KilobitsPerSec(10);
+ config.transport.rates.max_rate = DataRate::KilobitsPerSec(1500);
+ config.transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ auto send_net = s.CreateMutableSimulationNode([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(500);
+ c->delay = TimeDelta::Millis(100);
+ });
+ auto ret_net = s.CreateMutableSimulationNode(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(100); });
+
+ auto* client = s.CreateClient("send", config);
+ auto* route = s.CreateRoutes(client, {send_net->node()},
+ s.CreateClient("return", CallClientConfig()),
+ {ret_net->node()});
+ VideoStreamConfig video;
+ video.stream.use_rtx = false;
+ s.CreateVideoStream(route->forward(), video);
+ s.RunFor(TimeDelta::Seconds(30));
+ EXPECT_NEAR(client->target_rate().kbps(), 450, 100);
+ send_net->UpdateConfig([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(800);
+ c->delay = TimeDelta::Millis(100);
+ });
+ s.RunFor(TimeDelta::Seconds(20));
+ EXPECT_NEAR(client->target_rate().kbps(), 750, 150);
+ send_net->UpdateConfig([](NetworkSimulationConfig* c) {
+ c->bandwidth = DataRate::KilobitsPerSec(200);
+ c->delay = TimeDelta::Millis(200);
+ });
+ ret_net->UpdateConfig(
+ [](NetworkSimulationConfig* c) { c->delay = TimeDelta::Millis(200); });
+ s.RunFor(TimeDelta::Seconds(35));
+ EXPECT_NEAR(client->target_rate().kbps(), 170, 50);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.cc
new file mode 100644
index 0000000000..af9dc8f11b
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/rtt_tracker.h"
+
+#include <algorithm>
+
+namespace webrtc {
+namespace pcc {
+
+RttTracker::RttTracker(TimeDelta initial_rtt, double alpha)
+ : rtt_estimate_(initial_rtt), alpha_(alpha) {}
+
+void RttTracker::OnPacketsFeedback(
+ const std::vector<PacketResult>& packet_feedbacks,
+ Timestamp feedback_received_time) {
+ TimeDelta packet_rtt = TimeDelta::MinusInfinity();
+ for (const PacketResult& packet_result : packet_feedbacks) {
+ if (!packet_result.IsReceived())
+ continue;
+ packet_rtt = std::max<TimeDelta>(
+ packet_rtt,
+ feedback_received_time - packet_result.sent_packet.send_time);
+ }
+ if (packet_rtt.IsFinite())
+ rtt_estimate_ = (1 - alpha_) * rtt_estimate_ + alpha_ * packet_rtt;
+}
+
+TimeDelta RttTracker::GetRtt() const {
+ return rtt_estimate_;
+}
+
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.h b/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.h
new file mode 100644
index 0000000000..94033cd511
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_PCC_RTT_TRACKER_H_
+#define MODULES_CONGESTION_CONTROLLER_PCC_RTT_TRACKER_H_
+
+#include <vector>
+
+#include "api/transport/network_types.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+
+namespace webrtc {
+namespace pcc {
+
+class RttTracker {
+ public:
+ RttTracker(TimeDelta initial_rtt, double alpha);
+ // Updates RTT estimate.
+ void OnPacketsFeedback(const std::vector<PacketResult>& packet_feedbacks,
+ Timestamp feedback_received_time);
+ TimeDelta GetRtt() const;
+
+ private:
+ TimeDelta rtt_estimate_;
+ double alpha_;
+};
+
+} // namespace pcc
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_PCC_RTT_TRACKER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker_unittest.cc
new file mode 100644
index 0000000000..7d90e86822
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/rtt_tracker_unittest.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/rtt_tracker.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace pcc {
+namespace test {
+namespace {
+const TimeDelta kInitialRtt = TimeDelta::Micros(10);
+constexpr double kAlpha = 0.9;
+const Timestamp kStartTime = Timestamp::Seconds(0);
+
+PacketResult GetPacketWithRtt(TimeDelta rtt) {
+ SentPacket packet;
+ packet.send_time = kStartTime;
+ PacketResult packet_result;
+ packet_result.sent_packet = packet;
+ if (rtt.IsFinite()) {
+ packet_result.receive_time = kStartTime + rtt;
+ } else {
+ packet_result.receive_time = Timestamp::PlusInfinity();
+ }
+ return packet_result;
+}
+} // namespace
+
+TEST(PccRttTrackerTest, InitialValue) {
+ RttTracker tracker{kInitialRtt, kAlpha};
+ EXPECT_EQ(kInitialRtt, tracker.GetRtt());
+ for (int i = 0; i < 100; ++i) {
+ tracker.OnPacketsFeedback({GetPacketWithRtt(kInitialRtt)},
+ kStartTime + kInitialRtt);
+ }
+ EXPECT_EQ(kInitialRtt, tracker.GetRtt());
+}
+
+TEST(PccRttTrackerTest, DoNothingWhenPacketIsLost) {
+ RttTracker tracker{kInitialRtt, kAlpha};
+ tracker.OnPacketsFeedback({GetPacketWithRtt(TimeDelta::PlusInfinity())},
+ kStartTime + kInitialRtt);
+ EXPECT_EQ(tracker.GetRtt(), kInitialRtt);
+}
+
+TEST(PccRttTrackerTest, ChangeInRtt) {
+ RttTracker tracker{kInitialRtt, kAlpha};
+ const TimeDelta kNewRtt = TimeDelta::Micros(100);
+ tracker.OnPacketsFeedback({GetPacketWithRtt(kNewRtt)}, kStartTime + kNewRtt);
+ EXPECT_GT(tracker.GetRtt(), kInitialRtt);
+ EXPECT_LE(tracker.GetRtt(), kNewRtt);
+ for (int i = 0; i < 100; ++i) {
+ tracker.OnPacketsFeedback({GetPacketWithRtt(kNewRtt)},
+ kStartTime + kNewRtt);
+ }
+ const TimeDelta absolute_error = TimeDelta::Micros(1);
+ EXPECT_NEAR(tracker.GetRtt().us(), kNewRtt.us(), absolute_error.us());
+ EXPECT_LE(tracker.GetRtt(), kNewRtt);
+}
+
+} // namespace test
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.cc
new file mode 100644
index 0000000000..006a2fccd9
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/utility_function.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "api/units/data_rate.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace pcc {
+
+VivaceUtilityFunction::VivaceUtilityFunction(
+ double delay_gradient_coefficient,
+ double loss_coefficient,
+ double throughput_coefficient,
+ double throughput_power,
+ double delay_gradient_threshold,
+ double delay_gradient_negative_bound)
+ : delay_gradient_coefficient_(delay_gradient_coefficient),
+ loss_coefficient_(loss_coefficient),
+ throughput_power_(throughput_power),
+ throughput_coefficient_(throughput_coefficient),
+ delay_gradient_threshold_(delay_gradient_threshold),
+ delay_gradient_negative_bound_(delay_gradient_negative_bound) {
+ RTC_DCHECK_GE(delay_gradient_negative_bound_, 0);
+}
+
+double VivaceUtilityFunction::Compute(
+ const PccMonitorInterval& monitor_interval) const {
+ RTC_DCHECK(monitor_interval.IsFeedbackCollectionDone());
+ double bitrate = monitor_interval.GetTargetSendingRate().bps();
+ double loss_rate = monitor_interval.GetLossRate();
+ double rtt_gradient =
+ monitor_interval.ComputeDelayGradient(delay_gradient_threshold_);
+ rtt_gradient = std::max(rtt_gradient, -delay_gradient_negative_bound_);
+ return (throughput_coefficient_ * std::pow(bitrate, throughput_power_)) -
+ (delay_gradient_coefficient_ * bitrate * rtt_gradient) -
+ (loss_coefficient_ * bitrate * loss_rate);
+}
+
+VivaceUtilityFunction::~VivaceUtilityFunction() = default;
+
+ModifiedVivaceUtilityFunction::ModifiedVivaceUtilityFunction(
+ double delay_gradient_coefficient,
+ double loss_coefficient,
+ double throughput_coefficient,
+ double throughput_power,
+ double delay_gradient_threshold,
+ double delay_gradient_negative_bound)
+ : delay_gradient_coefficient_(delay_gradient_coefficient),
+ loss_coefficient_(loss_coefficient),
+ throughput_power_(throughput_power),
+ throughput_coefficient_(throughput_coefficient),
+ delay_gradient_threshold_(delay_gradient_threshold),
+ delay_gradient_negative_bound_(delay_gradient_negative_bound) {
+ RTC_DCHECK_GE(delay_gradient_negative_bound_, 0);
+}
+
+double ModifiedVivaceUtilityFunction::Compute(
+ const PccMonitorInterval& monitor_interval) const {
+ RTC_DCHECK(monitor_interval.IsFeedbackCollectionDone());
+ double bitrate = monitor_interval.GetTargetSendingRate().bps();
+ double loss_rate = monitor_interval.GetLossRate();
+ double rtt_gradient =
+ monitor_interval.ComputeDelayGradient(delay_gradient_threshold_);
+ rtt_gradient = std::max(rtt_gradient, -delay_gradient_negative_bound_);
+ return (throughput_coefficient_ * std::pow(bitrate, throughput_power_) *
+ bitrate) -
+ (delay_gradient_coefficient_ * bitrate * bitrate * rtt_gradient) -
+ (loss_coefficient_ * bitrate * bitrate * loss_rate);
+}
+
+ModifiedVivaceUtilityFunction::~ModifiedVivaceUtilityFunction() = default;
+
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.h b/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.h
new file mode 100644
index 0000000000..98bb0744c1
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_PCC_UTILITY_FUNCTION_H_
+#define MODULES_CONGESTION_CONTROLLER_PCC_UTILITY_FUNCTION_H_
+
+#include "modules/congestion_controller/pcc/monitor_interval.h"
+
+namespace webrtc {
+namespace pcc {
+
+// Utility function is used by PCC to transform the performance statistics
+// (sending rate, loss rate, packets latency) gathered at one monitor interval
+// into a numerical value.
+// https://www.usenix.org/conference/nsdi18/presentation/dong
+class PccUtilityFunctionInterface {
+ public:
+ virtual double Compute(const PccMonitorInterval& monitor_interval) const = 0;
+ virtual ~PccUtilityFunctionInterface() = default;
+};
+
+// Vivace utility function were suggested in the paper "PCC Vivace:
+// Online-Learning Congestion Control", Mo Dong et all.
+class VivaceUtilityFunction : public PccUtilityFunctionInterface {
+ public:
+ VivaceUtilityFunction(double delay_gradient_coefficient,
+ double loss_coefficient,
+ double throughput_coefficient,
+ double throughput_power,
+ double delay_gradient_threshold,
+ double delay_gradient_negative_bound);
+ double Compute(const PccMonitorInterval& monitor_interval) const override;
+ ~VivaceUtilityFunction() override;
+
+ private:
+ const double delay_gradient_coefficient_;
+ const double loss_coefficient_;
+ const double throughput_power_;
+ const double throughput_coefficient_;
+ const double delay_gradient_threshold_;
+ const double delay_gradient_negative_bound_;
+};
+
+// This utility function were obtained by tuning Vivace utility function.
+// The main difference is that gradient of modified utilify funtion (as well as
+// rate updates) scales proportionally to the sending rate which leads to
+// better performance in case of single sender.
+class ModifiedVivaceUtilityFunction : public PccUtilityFunctionInterface {
+ public:
+ ModifiedVivaceUtilityFunction(double delay_gradient_coefficient,
+ double loss_coefficient,
+ double throughput_coefficient,
+ double throughput_power,
+ double delay_gradient_threshold,
+ double delay_gradient_negative_bound);
+ double Compute(const PccMonitorInterval& monitor_interval) const override;
+ ~ModifiedVivaceUtilityFunction() override;
+
+ private:
+ const double delay_gradient_coefficient_;
+ const double loss_coefficient_;
+ const double throughput_power_;
+ const double throughput_coefficient_;
+ const double delay_gradient_threshold_;
+ const double delay_gradient_negative_bound_;
+};
+
+} // namespace pcc
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_PCC_UTILITY_FUNCTION_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function_unittest.cc
new file mode 100644
index 0000000000..19b2d15920
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/pcc/utility_function_unittest.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/pcc/utility_function.h"
+
+#include <stddef.h>
+
+#include <cmath>
+#include <type_traits>
+#include <vector>
+
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace pcc {
+namespace test {
+namespace {
+constexpr double kLossCoefficient = 11.35;
+constexpr double kThroughputPower = 0.9;
+constexpr double kThroughputCoefficient = 1;
+constexpr double kDelayGradientNegativeBound = 10;
+
+const Timestamp kStartTime = Timestamp::Micros(0);
+const TimeDelta kPacketsDelta = TimeDelta::Millis(1);
+const TimeDelta kIntervalDuration = TimeDelta::Millis(100);
+const DataRate kSendingBitrate = DataRate::BitsPerSec(1000);
+
+const DataSize kDefaultDataSize = DataSize::Bytes(100);
+const TimeDelta kDefaultDelay = TimeDelta::Millis(100);
+
+std::vector<PacketResult> CreatePacketResults(
+ const std::vector<Timestamp>& packets_send_times,
+ const std::vector<Timestamp>& packets_received_times = {},
+ const std::vector<DataSize>& packets_sizes = {}) {
+ std::vector<PacketResult> packet_results;
+ PacketResult packet_result;
+ SentPacket sent_packet;
+ for (size_t i = 0; i < packets_send_times.size(); ++i) {
+ sent_packet.send_time = packets_send_times[i];
+ if (packets_sizes.empty()) {
+ sent_packet.size = kDefaultDataSize;
+ } else {
+ sent_packet.size = packets_sizes[i];
+ }
+ packet_result.sent_packet = sent_packet;
+ if (packets_received_times.empty()) {
+ packet_result.receive_time = packets_send_times[i] + kDefaultDelay;
+ } else {
+ packet_result.receive_time = packets_received_times[i];
+ }
+ packet_results.push_back(packet_result);
+ }
+ return packet_results;
+}
+
+} // namespace
+
+TEST(PccVivaceUtilityFunctionTest,
+ UtilityIsThroughputTermIfAllRestCoefficientsAreZero) {
+ VivaceUtilityFunction utility_function(0, 0, kThroughputCoefficient,
+ kThroughputPower, 0,
+ kDelayGradientNegativeBound);
+ PccMonitorInterval monitor_interval(kSendingBitrate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + 2 * kPacketsDelta,
+ kStartTime + 3 * kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kPacketsDelta + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + kDefaultDelay + 3 * kPacketsDelta,
+ Timestamp::PlusInfinity()},
+ {kDefaultDataSize, kDefaultDataSize, kDefaultDataSize,
+ kDefaultDataSize}));
+ EXPECT_DOUBLE_EQ(utility_function.Compute(monitor_interval),
+ kThroughputCoefficient *
+ std::pow(kSendingBitrate.bps(), kThroughputPower));
+}
+
+TEST(PccVivaceUtilityFunctionTest,
+ LossTermIsNonZeroIfLossCoefficientIsNonZero) {
+ VivaceUtilityFunction utility_function(
+ 0, kLossCoefficient, kThroughputCoefficient, kThroughputPower, 0,
+ kDelayGradientNegativeBound);
+ PccMonitorInterval monitor_interval(kSendingBitrate, kStartTime,
+ kIntervalDuration);
+ monitor_interval.OnPacketsFeedback(CreatePacketResults(
+ {kStartTime + kPacketsDelta, kStartTime + 2 * kPacketsDelta,
+ kStartTime + 5 * kPacketsDelta, kStartTime + 2 * kIntervalDuration},
+ {kStartTime + kDefaultDelay, Timestamp::PlusInfinity(),
+ kStartTime + kDefaultDelay, kStartTime + 3 * kIntervalDuration},
+ {}));
+ // The second packet was lost.
+ EXPECT_DOUBLE_EQ(utility_function.Compute(monitor_interval),
+ kThroughputCoefficient *
+ std::pow(kSendingBitrate.bps(), kThroughputPower) -
+ kLossCoefficient * kSendingBitrate.bps() *
+ monitor_interval.GetLossRate());
+}
+
+} // namespace test
+} // namespace pcc
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller.cc b/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller.cc
new file mode 100644
index 0000000000..4f238835e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/include/receive_side_congestion_controller.h"
+
+#include "api/units/data_rate.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+static const uint32_t kTimeOffsetSwitchThreshold = 30;
+} // namespace
+
+void ReceiveSideCongestionController::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
+ MutexLock lock(&mutex_);
+ rbe_->OnRttUpdate(avg_rtt_ms, max_rtt_ms);
+}
+
+void ReceiveSideCongestionController::RemoveStream(uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ rbe_->RemoveStream(ssrc);
+}
+
+DataRate ReceiveSideCongestionController::LatestReceiveSideEstimate() const {
+ MutexLock lock(&mutex_);
+ return rbe_->LatestEstimate();
+}
+
+void ReceiveSideCongestionController::PickEstimatorFromHeader(
+ const RTPHeader& header) {
+ if (header.extension.hasAbsoluteSendTime) {
+ // If we see AST in header, switch RBE strategy immediately.
+ if (!using_absolute_send_time_) {
+ RTC_LOG(LS_INFO)
+ << "WrappingBitrateEstimator: Switching to absolute send time RBE.";
+ using_absolute_send_time_ = true;
+ PickEstimator();
+ }
+ packets_since_absolute_send_time_ = 0;
+ } else {
+ // When we don't see AST, wait for a few packets before going back to TOF.
+ if (using_absolute_send_time_) {
+ ++packets_since_absolute_send_time_;
+ if (packets_since_absolute_send_time_ >= kTimeOffsetSwitchThreshold) {
+ RTC_LOG(LS_INFO)
+ << "WrappingBitrateEstimator: Switching to transmission "
+ "time offset RBE.";
+ using_absolute_send_time_ = false;
+ PickEstimator();
+ }
+ }
+ }
+}
+
+// Instantiate RBE for Time Offset or Absolute Send Time extensions.
+void ReceiveSideCongestionController::PickEstimator() {
+ if (using_absolute_send_time_) {
+ rbe_ = std::make_unique<RemoteBitrateEstimatorAbsSendTime>(&remb_throttler_,
+ &clock_);
+ } else {
+ rbe_ = std::make_unique<RemoteBitrateEstimatorSingleStream>(
+ &remb_throttler_, &clock_);
+ }
+}
+
+ReceiveSideCongestionController::ReceiveSideCongestionController(
+ Clock* clock,
+ RemoteEstimatorProxy::TransportFeedbackSender feedback_sender,
+ RembThrottler::RembSender remb_sender,
+ NetworkStateEstimator* network_state_estimator)
+ : clock_(*clock),
+ remb_throttler_(std::move(remb_sender), clock),
+ remote_estimator_proxy_(std::move(feedback_sender),
+ &field_trial_config_,
+ network_state_estimator),
+ rbe_(new RemoteBitrateEstimatorSingleStream(&remb_throttler_, clock)),
+ using_absolute_send_time_(false),
+ packets_since_absolute_send_time_(0) {}
+
+void ReceiveSideCongestionController::OnReceivedPacket(
+ int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) {
+ remote_estimator_proxy_.IncomingPacket(arrival_time_ms, payload_size, header);
+ if (!header.extension.hasTransportSequenceNumber) {
+ // Receive-side BWE.
+ MutexLock lock(&mutex_);
+ PickEstimatorFromHeader(header);
+ rbe_->IncomingPacket(arrival_time_ms, payload_size, header);
+ }
+}
+
+void ReceiveSideCongestionController::SetSendPeriodicFeedback(
+ bool send_periodic_feedback) {
+ remote_estimator_proxy_.SetSendPeriodicFeedback(send_periodic_feedback);
+}
+
+void ReceiveSideCongestionController::OnBitrateChanged(int bitrate_bps) {
+ remote_estimator_proxy_.OnBitrateChanged(bitrate_bps);
+}
+
+TimeDelta ReceiveSideCongestionController::MaybeProcess() {
+ Timestamp now = clock_.CurrentTime();
+ mutex_.Lock();
+ TimeDelta time_until_rbe = rbe_->Process();
+ mutex_.Unlock();
+ TimeDelta time_until_rep = remote_estimator_proxy_.Process(now);
+ TimeDelta time_until = std::min(time_until_rbe, time_until_rep);
+ return std::max(time_until, TimeDelta::Zero());
+}
+
+void ReceiveSideCongestionController::SetMaxDesiredReceiveBitrate(
+ DataRate bitrate) {
+ remb_throttler_.SetMaxDesiredReceiveBitrate(bitrate);
+}
+
+void ReceiveSideCongestionController::SetTransportOverhead(
+ DataSize overhead_per_packet) {
+ remote_estimator_proxy_.SetTransportOverhead(overhead_per_packet);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller_unittest.cc
new file mode 100644
index 0000000000..f2fd6d11d7
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/receive_side_congestion_controller_unittest.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/include/receive_side_congestion_controller.h"
+
+#include "api/test/network_emulation/create_cross_traffic.h"
+#include "api/test/network_emulation/cross_traffic.h"
+#include "modules/pacing/packet_router.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scenario/scenario.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::ElementsAre;
+using ::testing::MockFunction;
+
+namespace webrtc {
+
+namespace {
+
+// Helper to convert some time format to resolution used in absolute send time
+// header extension, rounded upwards. `t` is the time to convert, in some
+// resolution. `denom` is the value to divide `t` by to get whole seconds,
+// e.g. `denom` = 1000 if `t` is in milliseconds.
+uint32_t AbsSendTime(int64_t t, int64_t denom) {
+ return (((t << 18) + (denom >> 1)) / denom) & 0x00fffffful;
+}
+
+const uint32_t kInitialBitrateBps = 60000;
+
+} // namespace
+
+namespace test {
+
+TEST(ReceiveSideCongestionControllerTest, SendsRembWithAbsSendTime) {
+ MockFunction<void(std::vector<std::unique_ptr<rtcp::RtcpPacket>>)>
+ feedback_sender;
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ SimulatedClock clock_(123456);
+
+ ReceiveSideCongestionController controller(
+ &clock_, feedback_sender.AsStdFunction(), remb_sender.AsStdFunction(),
+ nullptr);
+
+ size_t payload_size = 1000;
+ RTPHeader header;
+ header.ssrc = 0x11eb21c;
+ header.extension.hasAbsoluteSendTime = true;
+
+ EXPECT_CALL(remb_sender, Call(_, ElementsAre(header.ssrc))).Times(AtLeast(1));
+
+ for (int i = 0; i < 10; ++i) {
+ clock_.AdvanceTimeMilliseconds((1000 * payload_size) / kInitialBitrateBps);
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ header.extension.absoluteSendTime = AbsSendTime(now_ms, 1000);
+ controller.OnReceivedPacket(now_ms, payload_size, header);
+ }
+}
+
+TEST(ReceiveSideCongestionControllerTest,
+ SendsRembAfterSetMaxDesiredReceiveBitrate) {
+ MockFunction<void(std::vector<std::unique_ptr<rtcp::RtcpPacket>>)>
+ feedback_sender;
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ SimulatedClock clock_(123456);
+
+ ReceiveSideCongestionController controller(
+ &clock_, feedback_sender.AsStdFunction(), remb_sender.AsStdFunction(),
+ nullptr);
+ EXPECT_CALL(remb_sender, Call(123, _));
+ controller.SetMaxDesiredReceiveBitrate(DataRate::BitsPerSec(123));
+}
+
+TEST(ReceiveSideCongestionControllerTest, ConvergesToCapacity) {
+ Scenario s("receive_cc_unit/converge");
+ NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(1000);
+ net_conf.delay = TimeDelta::Millis(50);
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ });
+
+ auto* route = s.CreateRoutes(client, {s.CreateSimulationNode(net_conf)},
+ s.CreateClient("return", CallClientConfig()),
+ {s.CreateSimulationNode(net_conf)});
+ VideoStreamConfig video;
+ video.stream.packet_feedback = false;
+ s.CreateVideoStream(route->forward(), video);
+ s.RunFor(TimeDelta::Seconds(30));
+ EXPECT_NEAR(client->send_bandwidth().kbps(), 900, 150);
+}
+
+TEST(ReceiveSideCongestionControllerTest, IsFairToTCP) {
+ Scenario s("receive_cc_unit/tcp_fairness");
+ NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(1000);
+ net_conf.delay = TimeDelta::Millis(50);
+ auto* client = s.CreateClient("send", [&](CallClientConfig* c) {
+ c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000);
+ });
+ auto send_net = {s.CreateSimulationNode(net_conf)};
+ auto ret_net = {s.CreateSimulationNode(net_conf)};
+ auto* route = s.CreateRoutes(
+ client, send_net, s.CreateClient("return", CallClientConfig()), ret_net);
+ VideoStreamConfig video;
+ video.stream.packet_feedback = false;
+ s.CreateVideoStream(route->forward(), video);
+ s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic(
+ s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net),
+ FakeTcpConfig()));
+ s.RunFor(TimeDelta::Seconds(30));
+ // For some reason we get outcompeted by TCP here, this should probably be
+ // fixed and a lower bound should be added to the test.
+ EXPECT_LT(client->send_bandwidth().kbps(), 750);
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/remb_throttler.cc b/third_party/libwebrtc/modules/congestion_controller/remb_throttler.cc
new file mode 100644
index 0000000000..fcc30af9a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/remb_throttler.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/remb_throttler.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace webrtc {
+
+namespace {
+constexpr TimeDelta kRembSendInterval = TimeDelta::Millis(200);
+} // namespace
+
+RembThrottler::RembThrottler(RembSender remb_sender, Clock* clock)
+ : remb_sender_(std::move(remb_sender)),
+ clock_(clock),
+ last_remb_time_(Timestamp::MinusInfinity()),
+ last_send_remb_bitrate_(DataRate::PlusInfinity()),
+ max_remb_bitrate_(DataRate::PlusInfinity()) {}
+
+void RembThrottler::OnReceiveBitrateChanged(const std::vector<uint32_t>& ssrcs,
+ uint32_t bitrate_bps) {
+ DataRate receive_bitrate = DataRate::BitsPerSec(bitrate_bps);
+ Timestamp now = clock_->CurrentTime();
+ {
+ MutexLock lock(&mutex_);
+ // % threshold for if we should send a new REMB asap.
+ const int64_t kSendThresholdPercent = 103;
+ if (receive_bitrate * kSendThresholdPercent / 100 >
+ last_send_remb_bitrate_ &&
+ now < last_remb_time_ + kRembSendInterval) {
+ return;
+ }
+ last_remb_time_ = now;
+ last_send_remb_bitrate_ = receive_bitrate;
+ receive_bitrate = std::min(last_send_remb_bitrate_, max_remb_bitrate_);
+ }
+ remb_sender_(receive_bitrate.bps(), ssrcs);
+}
+
+void RembThrottler::SetMaxDesiredReceiveBitrate(DataRate bitrate) {
+ Timestamp now = clock_->CurrentTime();
+ {
+ MutexLock lock(&mutex_);
+ max_remb_bitrate_ = bitrate;
+ if (now - last_remb_time_ < kRembSendInterval &&
+ !last_send_remb_bitrate_.IsZero() &&
+ last_send_remb_bitrate_ <= max_remb_bitrate_) {
+ return;
+ }
+ }
+ remb_sender_(bitrate.bps(), /*ssrcs=*/{});
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/remb_throttler.h b/third_party/libwebrtc/modules/congestion_controller/remb_throttler.h
new file mode 100644
index 0000000000..85292cbc09
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/remb_throttler.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_CONGESTION_CONTROLLER_REMB_THROTTLER_H_
+#define MODULES_CONGESTION_CONTROLLER_REMB_THROTTLER_H_
+
+#include <functional>
+#include <vector>
+
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+// RembThrottler is a helper class used for throttling RTCP REMB messages.
+// Throttles small changes to the received BWE within 200ms.
+class RembThrottler : public RemoteBitrateObserver {
+ public:
+ using RembSender =
+ std::function<void(int64_t bitrate_bps, std::vector<uint32_t> ssrcs)>;
+ RembThrottler(RembSender remb_sender, Clock* clock);
+
+ // Ensures the remote party is notified of the receive bitrate no larger than
+ // `bitrate` using RTCP REMB.
+ void SetMaxDesiredReceiveBitrate(DataRate bitrate);
+
+ // Implements RemoteBitrateObserver;
+ // Called every time there is a new bitrate estimate for a receive channel
+ // group. This call will trigger a new RTCP REMB packet if the bitrate
+ // estimate has decreased or if no RTCP REMB packet has been sent for
+ // a certain time interval.
+ void OnReceiveBitrateChanged(const std::vector<uint32_t>& ssrcs,
+ uint32_t bitrate_bps) override;
+
+ private:
+ const RembSender remb_sender_;
+ Clock* const clock_;
+ mutable Mutex mutex_;
+ Timestamp last_remb_time_ RTC_GUARDED_BY(mutex_);
+ DataRate last_send_remb_bitrate_ RTC_GUARDED_BY(mutex_);
+ DataRate max_remb_bitrate_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+#endif // MODULES_CONGESTION_CONTROLLER_REMB_THROTTLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/remb_throttler_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/remb_throttler_unittest.cc
new file mode 100644
index 0000000000..3f8df8a7bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/remb_throttler_unittest.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/remb_throttler.h"
+
+#include <vector>
+
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::MockFunction;
+
+TEST(RembThrottlerTest, CallRembSenderOnFirstReceiveBitrateChange) {
+ SimulatedClock clock(Timestamp::Zero());
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock);
+
+ EXPECT_CALL(remb_sender, Call(12345, std::vector<uint32_t>({1, 2, 3})));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12345);
+}
+
+TEST(RembThrottlerTest, ThrottlesSmallReceiveBitrateDecrease) {
+ SimulatedClock clock(Timestamp::Zero());
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock);
+
+ EXPECT_CALL(remb_sender, Call);
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12346);
+ clock.AdvanceTime(TimeDelta::Millis(100));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12345);
+
+ EXPECT_CALL(remb_sender, Call(12345, _));
+ clock.AdvanceTime(TimeDelta::Millis(101));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12345);
+}
+
+TEST(RembThrottlerTest, DoNotThrottleLargeReceiveBitrateDecrease) {
+ SimulatedClock clock(Timestamp::Zero());
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock);
+
+ EXPECT_CALL(remb_sender, Call(2345, _));
+ EXPECT_CALL(remb_sender, Call(1234, _));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/2345);
+ clock.AdvanceTime(TimeDelta::Millis(1));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/1234);
+}
+
+TEST(RembThrottlerTest, ThrottlesReceiveBitrateIncrease) {
+ SimulatedClock clock(Timestamp::Zero());
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock);
+
+ EXPECT_CALL(remb_sender, Call);
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/1234);
+ clock.AdvanceTime(TimeDelta::Millis(100));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/2345);
+
+ // Updates 200ms after previous callback is not throttled.
+ EXPECT_CALL(remb_sender, Call(2345, _));
+ clock.AdvanceTime(TimeDelta::Millis(101));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/2345);
+}
+
+TEST(RembThrottlerTest, CallRembSenderOnSetMaxDesiredReceiveBitrate) {
+ SimulatedClock clock(Timestamp::Zero());
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock);
+ EXPECT_CALL(remb_sender, Call(1234, _));
+ remb_throttler.SetMaxDesiredReceiveBitrate(DataRate::BitsPerSec(1234));
+}
+
+TEST(RembThrottlerTest, CallRembSenderWithMinOfMaxDesiredAndOnReceivedBitrate) {
+ SimulatedClock clock(Timestamp::Zero());
+ MockFunction<void(uint64_t, std::vector<uint32_t>)> remb_sender;
+ RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock);
+
+ EXPECT_CALL(remb_sender, Call(1234, _));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/1234);
+ clock.AdvanceTime(TimeDelta::Millis(1));
+ remb_throttler.SetMaxDesiredReceiveBitrate(DataRate::BitsPerSec(4567));
+
+ clock.AdvanceTime(TimeDelta::Millis(200));
+ EXPECT_CALL(remb_sender, Call(4567, _));
+ remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/5678);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/BUILD.gn b/third_party/libwebrtc/modules/congestion_controller/rtp/BUILD.gn
new file mode 100644
index 0000000000..7057cdfe42
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/BUILD.gn
@@ -0,0 +1,104 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+config("bwe_test_logging") {
+ if (rtc_enable_bwe_test_logging) {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1" ]
+ } else {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0" ]
+ }
+}
+
+rtc_library("control_handler") {
+ visibility = [ "*" ]
+ sources = [
+ "control_handler.cc",
+ "control_handler.h",
+ ]
+
+ deps = [
+ "../../../api:sequence_checker",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_rate",
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base:safe_minmax",
+ "../../../rtc_base/system:no_unique_address",
+ "../../../system_wrappers:field_trial",
+ "../../pacing",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+
+ if (!build_with_mozilla) {
+ deps += [ "../../../rtc_base" ]
+ }
+}
+rtc_library("transport_feedback") {
+ visibility = [ "*" ]
+ sources = [
+ "transport_feedback_adapter.cc",
+ "transport_feedback_adapter.h",
+ "transport_feedback_demuxer.cc",
+ "transport_feedback_demuxer.h",
+ ]
+
+ deps = [
+ "../..:module_api_public",
+ "../../../api:sequence_checker",
+ "../../../api/transport:network_control",
+ "../../../api/units:data_size",
+ "../../../api/units:timestamp",
+ "../../../rtc_base",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base/network:sent_packet",
+ "../../../rtc_base/synchronization:mutex",
+ "../../../rtc_base/system:no_unique_address",
+ "../../../system_wrappers",
+ "../../../system_wrappers:field_trial",
+ "../../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("congestion_controller_unittests") {
+ testonly = true
+
+ sources = [
+ "transport_feedback_adapter_unittest.cc",
+ "transport_feedback_demuxer_unittest.cc",
+ ]
+ deps = [
+ ":transport_feedback",
+ "../:congestion_controller",
+ "../../../api/transport:network_control",
+ "../../../logging:mocks",
+ "../../../rtc_base",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base/network:sent_packet",
+ "../../../system_wrappers",
+ "../../../test:field_trial",
+ "../../../test:test_support",
+ "../../pacing",
+ "../../remote_bitrate_estimator",
+ "../../rtp_rtcp:rtp_rtcp_format",
+ "//testing/gmock",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.cc b/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.cc
new file mode 100644
index 0000000000..ffa373aeba
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/rtp/control_handler.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "api/units/data_rate.h"
+#include "modules/pacing/pacing_controller.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+
+// By default, pacer emergency stops encoder when buffer reaches a high level.
+bool IsPacerEmergencyStopDisabled() {
+ return field_trial::IsEnabled("WebRTC-DisablePacerEmergencyStop");
+}
+
+} // namespace
+CongestionControlHandler::CongestionControlHandler()
+ : disable_pacer_emergency_stop_(IsPacerEmergencyStopDisabled()) {
+ sequenced_checker_.Detach();
+}
+
+CongestionControlHandler::~CongestionControlHandler() {}
+
+void CongestionControlHandler::SetTargetRate(
+ TargetTransferRate new_target_rate) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ RTC_CHECK(new_target_rate.at_time.IsFinite());
+ last_incoming_ = new_target_rate;
+}
+
+void CongestionControlHandler::SetNetworkAvailability(bool network_available) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ network_available_ = network_available;
+}
+
+void CongestionControlHandler::SetPacerQueue(TimeDelta expected_queue_time) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ pacer_expected_queue_ms_ = expected_queue_time.ms();
+}
+
+absl::optional<TargetTransferRate> CongestionControlHandler::GetUpdate() {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ if (!last_incoming_.has_value())
+ return absl::nullopt;
+ TargetTransferRate new_outgoing = *last_incoming_;
+ DataRate log_target_rate = new_outgoing.target_rate;
+ bool pause_encoding = false;
+ if (!network_available_) {
+ pause_encoding = true;
+ } else if (!disable_pacer_emergency_stop_ &&
+ pacer_expected_queue_ms_ >
+ PacingController::kMaxExpectedQueueLength.ms()) {
+ pause_encoding = true;
+ }
+ if (pause_encoding)
+ new_outgoing.target_rate = DataRate::Zero();
+ if (!last_reported_ ||
+ last_reported_->target_rate != new_outgoing.target_rate ||
+ (!new_outgoing.target_rate.IsZero() &&
+ (last_reported_->network_estimate.loss_rate_ratio !=
+ new_outgoing.network_estimate.loss_rate_ratio ||
+ last_reported_->network_estimate.round_trip_time !=
+ new_outgoing.network_estimate.round_trip_time))) {
+ if (encoder_paused_in_last_report_ != pause_encoding)
+ RTC_LOG(LS_INFO) << "Bitrate estimate state changed, BWE: "
+ << ToString(log_target_rate) << ".";
+ encoder_paused_in_last_report_ = pause_encoding;
+ last_reported_ = new_outgoing;
+ return new_outgoing;
+ }
+ return absl::nullopt;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.h b/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.h
new file mode 100644
index 0000000000..d8e7263a02
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_RTP_CONTROL_HANDLER_H_
+#define MODULES_CONGESTION_CONTROLLER_RTP_CONTROL_HANDLER_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+// This is used to observe the network controller state and route calls to
+// the proper handler. It also keeps cached values for safe asynchronous use.
+// This makes sure that things running on the worker queue can't access state
+// in RtpTransportControllerSend, which would risk causing data race on
+// destruction unless members are properly ordered.
+class CongestionControlHandler {
+ public:
+ CongestionControlHandler();
+ ~CongestionControlHandler();
+
+ CongestionControlHandler(const CongestionControlHandler&) = delete;
+ CongestionControlHandler& operator=(const CongestionControlHandler&) = delete;
+
+ void SetTargetRate(TargetTransferRate new_target_rate);
+ void SetNetworkAvailability(bool network_available);
+ void SetPacerQueue(TimeDelta expected_queue_time);
+ absl::optional<TargetTransferRate> GetUpdate();
+
+ private:
+ absl::optional<TargetTransferRate> last_incoming_;
+ absl::optional<TargetTransferRate> last_reported_;
+ bool network_available_ = true;
+ bool encoder_paused_in_last_report_ = false;
+
+ const bool disable_pacer_emergency_stop_;
+ int64_t pacer_expected_queue_ms_ = 0;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequenced_checker_;
+};
+} // namespace webrtc
+#endif // MODULES_CONGESTION_CONTROLLER_RTP_CONTROL_HANDLER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler_gn/moz.build
new file mode 100644
index 0000000000..b81ef2d22a
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/rtp/control_handler.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("control_handler_gn")
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.cc b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.cc
new file mode 100644
index 0000000000..d4cc915fd1
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/rtp/transport_feedback_adapter.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <cmath>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+constexpr TimeDelta kSendTimeHistoryWindow = TimeDelta::Seconds(60);
+
+void InFlightBytesTracker::AddInFlightPacketBytes(
+ const PacketFeedback& packet) {
+ RTC_DCHECK(packet.sent.send_time.IsFinite());
+ auto it = in_flight_data_.find(packet.network_route);
+ if (it != in_flight_data_.end()) {
+ it->second += packet.sent.size;
+ } else {
+ in_flight_data_.insert({packet.network_route, packet.sent.size});
+ }
+}
+
+void InFlightBytesTracker::RemoveInFlightPacketBytes(
+ const PacketFeedback& packet) {
+ if (packet.sent.send_time.IsInfinite())
+ return;
+ auto it = in_flight_data_.find(packet.network_route);
+ if (it != in_flight_data_.end()) {
+ RTC_DCHECK_GE(it->second, packet.sent.size);
+ it->second -= packet.sent.size;
+ if (it->second.IsZero())
+ in_flight_data_.erase(it);
+ }
+}
+
+DataSize InFlightBytesTracker::GetOutstandingData(
+ const rtc::NetworkRoute& network_route) const {
+ auto it = in_flight_data_.find(network_route);
+ if (it != in_flight_data_.end()) {
+ return it->second;
+ } else {
+ return DataSize::Zero();
+ }
+}
+
+// Comparator for consistent map with NetworkRoute as key.
+bool InFlightBytesTracker::NetworkRouteComparator::operator()(
+ const rtc::NetworkRoute& a,
+ const rtc::NetworkRoute& b) const {
+ if (a.local.network_id() != b.local.network_id())
+ return a.local.network_id() < b.local.network_id();
+ if (a.remote.network_id() != b.remote.network_id())
+ return a.remote.network_id() < b.remote.network_id();
+
+ if (a.local.adapter_id() != b.local.adapter_id())
+ return a.local.adapter_id() < b.local.adapter_id();
+ if (a.remote.adapter_id() != b.remote.adapter_id())
+ return a.remote.adapter_id() < b.remote.adapter_id();
+
+ if (a.local.uses_turn() != b.local.uses_turn())
+ return a.local.uses_turn() < b.local.uses_turn();
+ if (a.remote.uses_turn() != b.remote.uses_turn())
+ return a.remote.uses_turn() < b.remote.uses_turn();
+
+ return a.connected < b.connected;
+}
+
+TransportFeedbackAdapter::TransportFeedbackAdapter() = default;
+
+
+void TransportFeedbackAdapter::AddPacket(const RtpPacketSendInfo& packet_info,
+ size_t overhead_bytes,
+ Timestamp creation_time) {
+ PacketFeedback packet;
+ packet.creation_time = creation_time;
+ packet.sent.sequence_number =
+ seq_num_unwrapper_.Unwrap(packet_info.transport_sequence_number);
+ packet.sent.size = DataSize::Bytes(packet_info.length + overhead_bytes);
+ packet.sent.audio = packet_info.packet_type == RtpPacketMediaType::kAudio;
+ packet.network_route = network_route_;
+ packet.sent.pacing_info = packet_info.pacing_info;
+
+ while (!history_.empty() &&
+ creation_time - history_.begin()->second.creation_time >
+ kSendTimeHistoryWindow) {
+ // TODO(sprang): Warn if erasing (too many) old items?
+ if (history_.begin()->second.sent.sequence_number > last_ack_seq_num_)
+ in_flight_.RemoveInFlightPacketBytes(history_.begin()->second);
+ history_.erase(history_.begin());
+ }
+ history_.insert(std::make_pair(packet.sent.sequence_number, packet));
+}
+
+absl::optional<SentPacket> TransportFeedbackAdapter::ProcessSentPacket(
+ const rtc::SentPacket& sent_packet) {
+ auto send_time = Timestamp::Millis(sent_packet.send_time_ms);
+ // TODO(srte): Only use one way to indicate that packet feedback is used.
+ if (sent_packet.info.included_in_feedback || sent_packet.packet_id != -1) {
+ int64_t unwrapped_seq_num =
+ seq_num_unwrapper_.Unwrap(sent_packet.packet_id);
+ auto it = history_.find(unwrapped_seq_num);
+ if (it != history_.end()) {
+ bool packet_retransmit = it->second.sent.send_time.IsFinite();
+ it->second.sent.send_time = send_time;
+ last_send_time_ = std::max(last_send_time_, send_time);
+ // TODO(srte): Don't do this on retransmit.
+ if (!pending_untracked_size_.IsZero()) {
+ if (send_time < last_untracked_send_time_)
+ RTC_LOG(LS_WARNING)
+ << "appending acknowledged data for out of order packet. (Diff: "
+ << ToString(last_untracked_send_time_ - send_time) << " ms.)";
+ it->second.sent.prior_unacked_data += pending_untracked_size_;
+ pending_untracked_size_ = DataSize::Zero();
+ }
+ if (!packet_retransmit) {
+ if (it->second.sent.sequence_number > last_ack_seq_num_)
+ in_flight_.AddInFlightPacketBytes(it->second);
+ it->second.sent.data_in_flight = GetOutstandingData();
+ return it->second.sent;
+ }
+ }
+ } else if (sent_packet.info.included_in_allocation) {
+ if (send_time < last_send_time_) {
+ RTC_LOG(LS_WARNING) << "ignoring untracked data for out of order packet.";
+ }
+ pending_untracked_size_ +=
+ DataSize::Bytes(sent_packet.info.packet_size_bytes);
+ last_untracked_send_time_ = std::max(last_untracked_send_time_, send_time);
+ }
+ return absl::nullopt;
+}
+
+absl::optional<TransportPacketsFeedback>
+TransportFeedbackAdapter::ProcessTransportFeedback(
+ const rtcp::TransportFeedback& feedback,
+ Timestamp feedback_receive_time) {
+ if (feedback.GetPacketStatusCount() == 0) {
+ RTC_LOG(LS_INFO) << "Empty transport feedback packet received.";
+ return absl::nullopt;
+ }
+
+ TransportPacketsFeedback msg;
+ msg.feedback_time = feedback_receive_time;
+
+ msg.prior_in_flight = in_flight_.GetOutstandingData(network_route_);
+ msg.packet_feedbacks =
+ ProcessTransportFeedbackInner(feedback, feedback_receive_time);
+ if (msg.packet_feedbacks.empty())
+ return absl::nullopt;
+
+ auto it = history_.find(last_ack_seq_num_);
+ if (it != history_.end()) {
+ msg.first_unacked_send_time = it->second.sent.send_time;
+ }
+ msg.data_in_flight = in_flight_.GetOutstandingData(network_route_);
+
+ return msg;
+}
+
+void TransportFeedbackAdapter::SetNetworkRoute(
+ const rtc::NetworkRoute& network_route) {
+ network_route_ = network_route;
+}
+
+DataSize TransportFeedbackAdapter::GetOutstandingData() const {
+ return in_flight_.GetOutstandingData(network_route_);
+}
+
+std::vector<PacketResult>
+TransportFeedbackAdapter::ProcessTransportFeedbackInner(
+ const rtcp::TransportFeedback& feedback,
+ Timestamp feedback_receive_time) {
+ // Add timestamp deltas to a local time base selected on first packet arrival.
+ // This won't be the true time base, but makes it easier to manually inspect
+ // time stamps.
+ if (last_timestamp_.IsInfinite()) {
+ current_offset_ = feedback_receive_time;
+ } else {
+ // TODO(srte): We shouldn't need to do rounding here.
+ const TimeDelta delta = feedback.GetBaseDelta(last_timestamp_)
+ .RoundDownTo(TimeDelta::Millis(1));
+ // Protect against assigning current_offset_ negative value.
+ if (delta < Timestamp::Zero() - current_offset_) {
+ RTC_LOG(LS_WARNING) << "Unexpected feedback timestamp received.";
+ current_offset_ = feedback_receive_time;
+ } else {
+ current_offset_ += delta;
+ }
+ }
+ last_timestamp_ = feedback.BaseTime();
+
+ std::vector<PacketResult> packet_result_vector;
+ packet_result_vector.reserve(feedback.GetPacketStatusCount());
+
+ size_t failed_lookups = 0;
+ size_t ignored = 0;
+ TimeDelta packet_offset = TimeDelta::Zero();
+ for (const auto& packet : feedback.GetAllPackets()) {
+ int64_t seq_num = seq_num_unwrapper_.Unwrap(packet.sequence_number());
+
+ if (seq_num > last_ack_seq_num_) {
+ // Starts at history_.begin() if last_ack_seq_num_ < 0, since any valid
+ // sequence number is >= 0.
+ for (auto it = history_.upper_bound(last_ack_seq_num_);
+ it != history_.upper_bound(seq_num); ++it) {
+ in_flight_.RemoveInFlightPacketBytes(it->second);
+ }
+ last_ack_seq_num_ = seq_num;
+ }
+
+ auto it = history_.find(seq_num);
+ if (it == history_.end()) {
+ ++failed_lookups;
+ continue;
+ }
+
+ if (it->second.sent.send_time.IsInfinite()) {
+ // TODO(srte): Fix the tests that makes this happen and make this a
+ // DCHECK.
+ RTC_DLOG(LS_ERROR)
+ << "Received feedback before packet was indicated as sent";
+ continue;
+ }
+
+ PacketFeedback packet_feedback = it->second;
+ if (packet.received()) {
+ packet_offset += packet.delta();
+ packet_feedback.receive_time =
+ current_offset_ + packet_offset.RoundDownTo(TimeDelta::Millis(1));
+ // Note: Lost packets are not removed from history because they might be
+ // reported as received by a later feedback.
+ history_.erase(it);
+ }
+ if (packet_feedback.network_route == network_route_) {
+ PacketResult result;
+ result.sent_packet = packet_feedback.sent;
+ result.receive_time = packet_feedback.receive_time;
+ packet_result_vector.push_back(result);
+ } else {
+ ++ignored;
+ }
+ }
+
+ if (failed_lookups > 0) {
+ RTC_LOG(LS_WARNING) << "Failed to lookup send time for " << failed_lookups
+ << " packet" << (failed_lookups > 1 ? "s" : "")
+ << ". Send time history too small?";
+ }
+ if (ignored > 0) {
+ RTC_LOG(LS_INFO) << "Ignoring " << ignored
+ << " packets because they were sent on a different route.";
+ }
+
+ return packet_result_vector;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.h b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.h
new file mode 100644
index 0000000000..f9f939db9c
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_CONGESTION_CONTROLLER_RTP_TRANSPORT_FEEDBACK_ADAPTER_H_
+#define MODULES_CONGESTION_CONTROLLER_RTP_TRANSPORT_FEEDBACK_ADAPTER_H_
+
+#include <deque>
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "api/transport/network_types.h"
+#include "api/units/timestamp.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+struct PacketFeedback {
+ PacketFeedback() = default;
+ // Time corresponding to when this object was created.
+ Timestamp creation_time = Timestamp::MinusInfinity();
+ SentPacket sent;
+ // Time corresponding to when the packet was received. Timestamped with the
+ // receiver's clock. For unreceived packet, Timestamp::PlusInfinity() is
+ // used.
+ Timestamp receive_time = Timestamp::PlusInfinity();
+
+ // The network route that this packet is associated with.
+ rtc::NetworkRoute network_route;
+};
+
+class InFlightBytesTracker {
+ public:
+ void AddInFlightPacketBytes(const PacketFeedback& packet);
+ void RemoveInFlightPacketBytes(const PacketFeedback& packet);
+ DataSize GetOutstandingData(const rtc::NetworkRoute& network_route) const;
+
+ private:
+ struct NetworkRouteComparator {
+ bool operator()(const rtc::NetworkRoute& a,
+ const rtc::NetworkRoute& b) const;
+ };
+ std::map<rtc::NetworkRoute, DataSize, NetworkRouteComparator> in_flight_data_;
+};
+
+class TransportFeedbackAdapter {
+ public:
+ TransportFeedbackAdapter();
+
+ void AddPacket(const RtpPacketSendInfo& packet_info,
+ size_t overhead_bytes,
+ Timestamp creation_time);
+ absl::optional<SentPacket> ProcessSentPacket(
+ const rtc::SentPacket& sent_packet);
+
+ absl::optional<TransportPacketsFeedback> ProcessTransportFeedback(
+ const rtcp::TransportFeedback& feedback,
+ Timestamp feedback_receive_time);
+
+ void SetNetworkRoute(const rtc::NetworkRoute& network_route);
+
+ DataSize GetOutstandingData() const;
+
+ private:
+ enum class SendTimeHistoryStatus { kNotAdded, kOk, kDuplicate };
+
+ std::vector<PacketResult> ProcessTransportFeedbackInner(
+ const rtcp::TransportFeedback& feedback,
+ Timestamp feedback_receive_time);
+
+ DataSize pending_untracked_size_ = DataSize::Zero();
+ Timestamp last_send_time_ = Timestamp::MinusInfinity();
+ Timestamp last_untracked_send_time_ = Timestamp::MinusInfinity();
+ SequenceNumberUnwrapper seq_num_unwrapper_;
+ std::map<int64_t, PacketFeedback> history_;
+
+ // Sequence numbers are never negative, using -1 as it always < a real
+ // sequence number.
+ int64_t last_ack_seq_num_ = -1;
+ InFlightBytesTracker in_flight_;
+
+ Timestamp current_offset_ = Timestamp::MinusInfinity();
+ Timestamp last_timestamp_ = Timestamp::MinusInfinity();
+
+ rtc::NetworkRoute network_route_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_RTP_TRANSPORT_FEEDBACK_ADAPTER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc
new file mode 100644
index 0000000000..14a2b13831
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/congestion_controller/rtp/transport_feedback_adapter.h"
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+namespace {
+constexpr uint32_t kSsrc = 8492;
+const PacedPacketInfo kPacingInfo0(0, 5, 2000);
+const PacedPacketInfo kPacingInfo1(1, 8, 4000);
+const PacedPacketInfo kPacingInfo2(2, 14, 7000);
+const PacedPacketInfo kPacingInfo3(3, 20, 10000);
+const PacedPacketInfo kPacingInfo4(4, 22, 10000);
+
+void ComparePacketFeedbackVectors(const std::vector<PacketResult>& truth,
+ const std::vector<PacketResult>& input) {
+ ASSERT_EQ(truth.size(), input.size());
+ size_t len = truth.size();
+ // truth contains the input data for the test, and input is what will be
+ // sent to the bandwidth estimator. truth.arrival_tims_ms is used to
+ // populate the transport feedback messages. As these times may be changed
+ // (because of resolution limits in the packets, and because of the time
+ // base adjustment performed by the TransportFeedbackAdapter at the first
+ // packet, the truth[x].arrival_time and input[x].arrival_time may not be
+ // equal. However, the difference must be the same for all x.
+ TimeDelta arrival_time_delta = truth[0].receive_time - input[0].receive_time;
+ for (size_t i = 0; i < len; ++i) {
+ RTC_CHECK(truth[i].IsReceived());
+ if (input[i].IsReceived()) {
+ EXPECT_EQ(truth[i].receive_time - input[i].receive_time,
+ arrival_time_delta);
+ }
+ EXPECT_EQ(truth[i].sent_packet.send_time, input[i].sent_packet.send_time);
+ EXPECT_EQ(truth[i].sent_packet.sequence_number,
+ input[i].sent_packet.sequence_number);
+ EXPECT_EQ(truth[i].sent_packet.size, input[i].sent_packet.size);
+ EXPECT_EQ(truth[i].sent_packet.pacing_info,
+ input[i].sent_packet.pacing_info);
+ }
+}
+
+PacketResult CreatePacket(int64_t receive_time_ms,
+ int64_t send_time_ms,
+ int64_t sequence_number,
+ size_t payload_size,
+ const PacedPacketInfo& pacing_info) {
+ PacketResult res;
+ res.receive_time = Timestamp::Millis(receive_time_ms);
+ res.sent_packet.send_time = Timestamp::Millis(send_time_ms);
+ res.sent_packet.sequence_number = sequence_number;
+ res.sent_packet.size = DataSize::Bytes(payload_size);
+ res.sent_packet.pacing_info = pacing_info;
+ return res;
+}
+
+class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver {
+ public:
+ MOCK_METHOD(void,
+ OnPacketFeedbackVector,
+ (std::vector<StreamPacketInfo> packet_feedback_vector),
+ (override));
+};
+
+} // namespace
+
+class TransportFeedbackAdapterTest : public ::testing::Test {
+ public:
+ TransportFeedbackAdapterTest() : clock_(0) {}
+
+ virtual ~TransportFeedbackAdapterTest() {}
+
+ virtual void SetUp() { adapter_.reset(new TransportFeedbackAdapter()); }
+
+ virtual void TearDown() { adapter_.reset(); }
+
+ protected:
+ void OnReceivedEstimatedBitrate(uint32_t bitrate) {}
+
+ void OnReceivedRtcpReceiverReport(const ReportBlockList& report_blocks,
+ int64_t rtt,
+ int64_t now_ms) {}
+
+ void OnSentPacket(const PacketResult& packet_feedback) {
+ RtpPacketSendInfo packet_info;
+ packet_info.media_ssrc = kSsrc;
+ packet_info.transport_sequence_number =
+ packet_feedback.sent_packet.sequence_number;
+ packet_info.rtp_sequence_number = 0;
+ packet_info.length = packet_feedback.sent_packet.size.bytes();
+ packet_info.pacing_info = packet_feedback.sent_packet.pacing_info;
+ packet_info.packet_type = RtpPacketMediaType::kVideo;
+ adapter_->AddPacket(RtpPacketSendInfo(packet_info), 0u,
+ clock_.CurrentTime());
+ adapter_->ProcessSentPacket(rtc::SentPacket(
+ packet_feedback.sent_packet.sequence_number,
+ packet_feedback.sent_packet.send_time.ms(), rtc::PacketInfo()));
+ }
+
+ SimulatedClock clock_;
+ std::unique_ptr<TransportFeedbackAdapter> adapter_;
+};
+
+TEST_F(TransportFeedbackAdapterTest, AdaptsFeedbackAndPopulatesSendTimes) {
+ std::vector<PacketResult> packets;
+ packets.push_back(CreatePacket(100, 200, 0, 1500, kPacingInfo0));
+ packets.push_back(CreatePacket(110, 210, 1, 1500, kPacingInfo0));
+ packets.push_back(CreatePacket(120, 220, 2, 1500, kPacingInfo0));
+ packets.push_back(CreatePacket(130, 230, 3, 1500, kPacingInfo1));
+ packets.push_back(CreatePacket(140, 240, 4, 1500, kPacingInfo1));
+
+ for (const auto& packet : packets)
+ OnSentPacket(packet);
+
+ rtcp::TransportFeedback feedback;
+ feedback.SetBase(packets[0].sent_packet.sequence_number,
+ packets[0].receive_time);
+
+ for (const auto& packet : packets) {
+ EXPECT_TRUE(feedback.AddReceivedPacket(packet.sent_packet.sequence_number,
+ packet.receive_time));
+ }
+
+ feedback.Build();
+
+ auto result =
+ adapter_->ProcessTransportFeedback(feedback, clock_.CurrentTime());
+ ComparePacketFeedbackVectors(packets, result->packet_feedbacks);
+}
+
+TEST_F(TransportFeedbackAdapterTest, FeedbackVectorReportsUnreceived) {
+ std::vector<PacketResult> sent_packets = {
+ CreatePacket(100, 220, 0, 1500, kPacingInfo0),
+ CreatePacket(110, 210, 1, 1500, kPacingInfo0),
+ CreatePacket(120, 220, 2, 1500, kPacingInfo0),
+ CreatePacket(130, 230, 3, 1500, kPacingInfo0),
+ CreatePacket(140, 240, 4, 1500, kPacingInfo0),
+ CreatePacket(150, 250, 5, 1500, kPacingInfo0),
+ CreatePacket(160, 260, 6, 1500, kPacingInfo0)};
+
+ for (const auto& packet : sent_packets)
+ OnSentPacket(packet);
+
+ // Note: Important to include the last packet, as only unreceived packets in
+ // between received packets can be inferred.
+ std::vector<PacketResult> received_packets = {
+ sent_packets[0], sent_packets[2], sent_packets[6]};
+
+ rtcp::TransportFeedback feedback;
+ feedback.SetBase(received_packets[0].sent_packet.sequence_number,
+ received_packets[0].receive_time);
+
+ for (const auto& packet : received_packets) {
+ EXPECT_TRUE(feedback.AddReceivedPacket(packet.sent_packet.sequence_number,
+ packet.receive_time));
+ }
+
+ feedback.Build();
+
+ auto res = adapter_->ProcessTransportFeedback(feedback, clock_.CurrentTime());
+ ComparePacketFeedbackVectors(sent_packets, res->packet_feedbacks);
+}
+
+TEST_F(TransportFeedbackAdapterTest, HandlesDroppedPackets) {
+ std::vector<PacketResult> packets;
+ packets.push_back(CreatePacket(100, 200, 0, 1500, kPacingInfo0));
+ packets.push_back(CreatePacket(110, 210, 1, 1500, kPacingInfo1));
+ packets.push_back(CreatePacket(120, 220, 2, 1500, kPacingInfo2));
+ packets.push_back(CreatePacket(130, 230, 3, 1500, kPacingInfo3));
+ packets.push_back(CreatePacket(140, 240, 4, 1500, kPacingInfo4));
+
+ const uint16_t kSendSideDropBefore = 1;
+ const uint16_t kReceiveSideDropAfter = 3;
+
+ for (const auto& packet : packets) {
+ if (packet.sent_packet.sequence_number >= kSendSideDropBefore)
+ OnSentPacket(packet);
+ }
+
+ rtcp::TransportFeedback feedback;
+ feedback.SetBase(packets[0].sent_packet.sequence_number,
+ packets[0].receive_time);
+
+ for (const auto& packet : packets) {
+ if (packet.sent_packet.sequence_number <= kReceiveSideDropAfter) {
+ EXPECT_TRUE(feedback.AddReceivedPacket(packet.sent_packet.sequence_number,
+ packet.receive_time));
+ }
+ }
+
+ feedback.Build();
+
+ std::vector<PacketResult> expected_packets(
+ packets.begin() + kSendSideDropBefore,
+ packets.begin() + kReceiveSideDropAfter + 1);
+ // Packets that have timed out on the send-side have lost the
+ // information stored on the send-side. And they will not be reported to
+ // observers since we won't know that they come from the same networks.
+
+ auto res = adapter_->ProcessTransportFeedback(feedback, clock_.CurrentTime());
+ ComparePacketFeedbackVectors(expected_packets, res->packet_feedbacks);
+}
+
+TEST_F(TransportFeedbackAdapterTest, SendTimeWrapsBothWays) {
+ TimeDelta kHighArrivalTime =
+ rtcp::TransportFeedback::kDeltaTick * (1 << 8) * ((1 << 23) - 1);
+ std::vector<PacketResult> packets;
+ packets.push_back(CreatePacket(kHighArrivalTime.ms() + 64, 210, 0, 1500,
+ PacedPacketInfo()));
+ packets.push_back(CreatePacket(kHighArrivalTime.ms() - 64, 210, 1, 1500,
+ PacedPacketInfo()));
+ packets.push_back(
+ CreatePacket(kHighArrivalTime.ms(), 220, 2, 1500, PacedPacketInfo()));
+
+ for (const auto& packet : packets)
+ OnSentPacket(packet);
+
+ for (size_t i = 0; i < packets.size(); ++i) {
+ std::unique_ptr<rtcp::TransportFeedback> feedback(
+ new rtcp::TransportFeedback());
+ feedback->SetBase(packets[i].sent_packet.sequence_number,
+ packets[i].receive_time);
+
+ EXPECT_TRUE(feedback->AddReceivedPacket(
+ packets[i].sent_packet.sequence_number, packets[i].receive_time));
+
+ rtc::Buffer raw_packet = feedback->Build();
+ feedback = rtcp::TransportFeedback::ParseFrom(raw_packet.data(),
+ raw_packet.size());
+
+ std::vector<PacketResult> expected_packets;
+ expected_packets.push_back(packets[i]);
+
+ auto res = adapter_->ProcessTransportFeedback(*feedback.get(),
+ clock_.CurrentTime());
+ ComparePacketFeedbackVectors(expected_packets, res->packet_feedbacks);
+ }
+}
+
+TEST_F(TransportFeedbackAdapterTest, HandlesArrivalReordering) {
+ std::vector<PacketResult> packets;
+ packets.push_back(CreatePacket(120, 200, 0, 1500, kPacingInfo0));
+ packets.push_back(CreatePacket(110, 210, 1, 1500, kPacingInfo0));
+ packets.push_back(CreatePacket(100, 220, 2, 1500, kPacingInfo0));
+
+ for (const auto& packet : packets)
+ OnSentPacket(packet);
+
+ rtcp::TransportFeedback feedback;
+ feedback.SetBase(packets[0].sent_packet.sequence_number,
+ packets[0].receive_time);
+
+ for (const auto& packet : packets) {
+ EXPECT_TRUE(feedback.AddReceivedPacket(packet.sent_packet.sequence_number,
+ packet.receive_time));
+ }
+
+ feedback.Build();
+
+ // Adapter keeps the packets ordered by sequence number (which is itself
+ // assigned by the order of transmission). Reordering by some other criteria,
+ // eg. arrival time, is up to the observers.
+ auto res = adapter_->ProcessTransportFeedback(feedback, clock_.CurrentTime());
+ ComparePacketFeedbackVectors(packets, res->packet_feedbacks);
+}
+
+TEST_F(TransportFeedbackAdapterTest, TimestampDeltas) {
+ std::vector<PacketResult> sent_packets;
+ // TODO(srte): Consider using us resolution in the constants.
+ const TimeDelta kSmallDelta = (rtcp::TransportFeedback::kDeltaTick * 0xFF)
+ .RoundDownTo(TimeDelta::Millis(1));
+ const TimeDelta kLargePositiveDelta = (rtcp::TransportFeedback::kDeltaTick *
+ std::numeric_limits<int16_t>::max())
+ .RoundDownTo(TimeDelta::Millis(1));
+ const TimeDelta kLargeNegativeDelta = (rtcp::TransportFeedback::kDeltaTick *
+ std::numeric_limits<int16_t>::min())
+ .RoundDownTo(TimeDelta::Millis(1));
+
+ PacketResult packet_feedback;
+ packet_feedback.sent_packet.sequence_number = 1;
+ packet_feedback.sent_packet.send_time = Timestamp::Millis(100);
+ packet_feedback.receive_time = Timestamp::Millis(200);
+ packet_feedback.sent_packet.size = DataSize::Bytes(1500);
+ sent_packets.push_back(packet_feedback);
+
+ // TODO(srte): This rounding maintains previous behavior, but should ot be
+ // required.
+ packet_feedback.sent_packet.send_time += kSmallDelta;
+ packet_feedback.receive_time += kSmallDelta;
+ ++packet_feedback.sent_packet.sequence_number;
+ sent_packets.push_back(packet_feedback);
+
+ packet_feedback.sent_packet.send_time += kLargePositiveDelta;
+ packet_feedback.receive_time += kLargePositiveDelta;
+ ++packet_feedback.sent_packet.sequence_number;
+ sent_packets.push_back(packet_feedback);
+
+ packet_feedback.sent_packet.send_time += kLargeNegativeDelta;
+ packet_feedback.receive_time += kLargeNegativeDelta;
+ ++packet_feedback.sent_packet.sequence_number;
+ sent_packets.push_back(packet_feedback);
+
+ // Too large, delta - will need two feedback messages.
+ packet_feedback.sent_packet.send_time +=
+ kLargePositiveDelta + TimeDelta::Millis(1);
+ packet_feedback.receive_time += kLargePositiveDelta + TimeDelta::Millis(1);
+ ++packet_feedback.sent_packet.sequence_number;
+
+ // Packets will be added to send history.
+ for (const auto& packet : sent_packets)
+ OnSentPacket(packet);
+ OnSentPacket(packet_feedback);
+
+ // Create expected feedback and send into adapter.
+ std::unique_ptr<rtcp::TransportFeedback> feedback(
+ new rtcp::TransportFeedback());
+ feedback->SetBase(sent_packets[0].sent_packet.sequence_number,
+ sent_packets[0].receive_time);
+
+ for (const auto& packet : sent_packets) {
+ EXPECT_TRUE(feedback->AddReceivedPacket(packet.sent_packet.sequence_number,
+ packet.receive_time));
+ }
+ EXPECT_FALSE(
+ feedback->AddReceivedPacket(packet_feedback.sent_packet.sequence_number,
+ packet_feedback.receive_time));
+
+ rtc::Buffer raw_packet = feedback->Build();
+ feedback =
+ rtcp::TransportFeedback::ParseFrom(raw_packet.data(), raw_packet.size());
+
+ std::vector<PacketResult> received_feedback;
+
+ EXPECT_TRUE(feedback.get() != nullptr);
+ auto res =
+ adapter_->ProcessTransportFeedback(*feedback.get(), clock_.CurrentTime());
+ ComparePacketFeedbackVectors(sent_packets, res->packet_feedbacks);
+
+ // Create a new feedback message and add the trailing item.
+ feedback.reset(new rtcp::TransportFeedback());
+ feedback->SetBase(packet_feedback.sent_packet.sequence_number,
+ packet_feedback.receive_time);
+ EXPECT_TRUE(
+ feedback->AddReceivedPacket(packet_feedback.sent_packet.sequence_number,
+ packet_feedback.receive_time));
+ raw_packet = feedback->Build();
+ feedback =
+ rtcp::TransportFeedback::ParseFrom(raw_packet.data(), raw_packet.size());
+
+ EXPECT_TRUE(feedback.get() != nullptr);
+ {
+ auto res = adapter_->ProcessTransportFeedback(*feedback.get(),
+ clock_.CurrentTime());
+ std::vector<PacketResult> expected_packets;
+ expected_packets.push_back(packet_feedback);
+ ComparePacketFeedbackVectors(expected_packets, res->packet_feedbacks);
+ }
+}
+
+TEST_F(TransportFeedbackAdapterTest, IgnoreDuplicatePacketSentCalls) {
+ auto packet = CreatePacket(100, 200, 0, 1500, kPacingInfo0);
+
+ // Add a packet and then mark it as sent.
+ RtpPacketSendInfo packet_info;
+ packet_info.media_ssrc = kSsrc;
+ packet_info.transport_sequence_number = packet.sent_packet.sequence_number;
+ packet_info.length = packet.sent_packet.size.bytes();
+ packet_info.pacing_info = packet.sent_packet.pacing_info;
+ packet_info.packet_type = RtpPacketMediaType::kVideo;
+ adapter_->AddPacket(packet_info, 0u, clock_.CurrentTime());
+ absl::optional<SentPacket> sent_packet = adapter_->ProcessSentPacket(
+ rtc::SentPacket(packet.sent_packet.sequence_number,
+ packet.sent_packet.send_time.ms(), rtc::PacketInfo()));
+ EXPECT_TRUE(sent_packet.has_value());
+
+ // Call ProcessSentPacket() again with the same sequence number. This packet
+ // has already been marked as sent and the call should be ignored.
+ absl::optional<SentPacket> duplicate_packet = adapter_->ProcessSentPacket(
+ rtc::SentPacket(packet.sent_packet.sequence_number,
+ packet.sent_packet.send_time.ms(), rtc::PacketInfo()));
+ EXPECT_FALSE(duplicate_packet.has_value());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.cc b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.cc
new file mode 100644
index 0000000000..50987b2302
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/rtp/transport_feedback_demuxer.h"
+#include "absl/algorithm/container.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+
+namespace webrtc {
+namespace {
+static const size_t kMaxPacketsInHistory = 5000;
+}
+
+TransportFeedbackDemuxer::TransportFeedbackDemuxer() {
+ // In case the construction thread is different from where the registration
+ // and callbacks occur, detach from the construction thread.
+ observer_checker_.Detach();
+}
+
+void TransportFeedbackDemuxer::RegisterStreamFeedbackObserver(
+ std::vector<uint32_t> ssrcs,
+ StreamFeedbackObserver* observer) {
+ RTC_DCHECK_RUN_ON(&observer_checker_);
+ RTC_DCHECK(observer);
+ RTC_DCHECK(absl::c_find_if(observers_, [=](const auto& pair) {
+ return pair.second == observer;
+ }) == observers_.end());
+ observers_.push_back({ssrcs, observer});
+}
+
+void TransportFeedbackDemuxer::DeRegisterStreamFeedbackObserver(
+ StreamFeedbackObserver* observer) {
+ RTC_DCHECK_RUN_ON(&observer_checker_);
+ RTC_DCHECK(observer);
+ const auto it = absl::c_find_if(
+ observers_, [=](const auto& pair) { return pair.second == observer; });
+ RTC_DCHECK(it != observers_.end());
+ observers_.erase(it);
+}
+
+void TransportFeedbackDemuxer::AddPacket(const RtpPacketSendInfo& packet_info) {
+ RTC_DCHECK_RUN_ON(&observer_checker_);
+
+ StreamFeedbackObserver::StreamPacketInfo info;
+ info.ssrc = packet_info.media_ssrc;
+ info.rtp_sequence_number = packet_info.rtp_sequence_number;
+ info.received = false;
+ info.is_retransmission =
+ packet_info.packet_type == RtpPacketMediaType::kRetransmission;
+ history_.insert(
+ {seq_num_unwrapper_.Unwrap(packet_info.transport_sequence_number), info});
+
+ while (history_.size() > kMaxPacketsInHistory) {
+ history_.erase(history_.begin());
+ }
+}
+
+void TransportFeedbackDemuxer::OnTransportFeedback(
+ const rtcp::TransportFeedback& feedback) {
+ RTC_DCHECK_RUN_ON(&observer_checker_);
+
+ std::vector<StreamFeedbackObserver::StreamPacketInfo> stream_feedbacks;
+ for (const auto& packet : feedback.GetAllPackets()) {
+ int64_t seq_num =
+ seq_num_unwrapper_.UnwrapWithoutUpdate(packet.sequence_number());
+ auto it = history_.find(seq_num);
+ if (it != history_.end()) {
+ auto packet_info = it->second;
+ packet_info.received = packet.received();
+ stream_feedbacks.push_back(std::move(packet_info));
+ if (packet.received())
+ history_.erase(it);
+ }
+ }
+
+ for (auto& observer : observers_) {
+ std::vector<StreamFeedbackObserver::StreamPacketInfo> selected_feedback;
+ for (const auto& packet_info : stream_feedbacks) {
+ if (absl::c_count(observer.first, packet_info.ssrc) > 0) {
+ selected_feedback.push_back(packet_info);
+ }
+ }
+ if (!selected_feedback.empty()) {
+ observer.second->OnPacketFeedbackVector(std::move(selected_feedback));
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.h b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.h
new file mode 100644
index 0000000000..7f4f5750d2
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_CONGESTION_CONTROLLER_RTP_TRANSPORT_FEEDBACK_DEMUXER_H_
+#define MODULES_CONGESTION_CONTROLLER_RTP_TRANSPORT_FEEDBACK_DEMUXER_H_
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+// Implementation of StreamFeedbackProvider that provides a way for
+// implementations of StreamFeedbackObserver to register for feedback callbacks
+// for a given set of SSRCs.
+// Registration methods need to be called from the same execution context
+// (thread or task queue) and callbacks to
+// StreamFeedbackObserver::OnPacketFeedbackVector will be made in that same
+// context.
+// TODO(tommi): This appears to be the only implementation of this interface.
+// Do we need the interface?
+class TransportFeedbackDemuxer final : public StreamFeedbackProvider {
+ public:
+ TransportFeedbackDemuxer();
+
+ // Implements StreamFeedbackProvider interface
+ void RegisterStreamFeedbackObserver(
+ std::vector<uint32_t> ssrcs,
+ StreamFeedbackObserver* observer) override;
+ void DeRegisterStreamFeedbackObserver(
+ StreamFeedbackObserver* observer) override;
+ void AddPacket(const RtpPacketSendInfo& packet_info);
+ void OnTransportFeedback(const rtcp::TransportFeedback& feedback);
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker observer_checker_;
+ SequenceNumberUnwrapper seq_num_unwrapper_ RTC_GUARDED_BY(&observer_checker_);
+ std::map<int64_t, StreamFeedbackObserver::StreamPacketInfo> history_
+ RTC_GUARDED_BY(&observer_checker_);
+
+ // Maps a set of ssrcs to corresponding observer. Vectors are used rather than
+ // set/map to ensure that the processing order is consistent independently of
+ // the randomized ssrcs.
+ std::vector<std::pair<std::vector<uint32_t>, StreamFeedbackObserver*>>
+ observers_ RTC_GUARDED_BY(&observer_checker_);
+};
+} // namespace webrtc
+
+#endif // MODULES_CONGESTION_CONTROLLER_RTP_TRANSPORT_FEEDBACK_DEMUXER_H_
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc
new file mode 100644
index 0000000000..52d8018bff
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/congestion_controller/rtp/transport_feedback_demuxer.h"
+
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::Field;
+using PacketInfo = StreamFeedbackObserver::StreamPacketInfo;
+
+static constexpr uint32_t kSsrc = 8492;
+
+class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver {
+ public:
+ MOCK_METHOD(void,
+ OnPacketFeedbackVector,
+ (std::vector<StreamPacketInfo> packet_feedback_vector),
+ (override));
+};
+
+RtpPacketSendInfo CreatePacket(uint32_t ssrc,
+ uint16_t rtp_sequence_number,
+ int64_t transport_sequence_number,
+ bool is_retransmission) {
+ RtpPacketSendInfo res;
+ res.media_ssrc = ssrc;
+ res.transport_sequence_number = transport_sequence_number;
+ res.rtp_sequence_number = rtp_sequence_number;
+ res.packet_type = is_retransmission ? RtpPacketMediaType::kRetransmission
+ : RtpPacketMediaType::kVideo;
+ return res;
+}
+} // namespace
+
+TEST(TransportFeedbackDemuxerTest, ObserverSanity) {
+ TransportFeedbackDemuxer demuxer;
+ MockStreamFeedbackObserver mock;
+ demuxer.RegisterStreamFeedbackObserver({kSsrc}, &mock);
+
+ const uint16_t kRtpStartSeq = 55;
+ const int64_t kTransportStartSeq = 1;
+ demuxer.AddPacket(CreatePacket(kSsrc, kRtpStartSeq, kTransportStartSeq,
+ /*is_retransmit=*/false));
+ demuxer.AddPacket(CreatePacket(kSsrc, kRtpStartSeq + 1,
+ kTransportStartSeq + 1,
+ /*is_retransmit=*/false));
+ demuxer.AddPacket(CreatePacket(
+ kSsrc, kRtpStartSeq + 2, kTransportStartSeq + 2, /*is_retransmit=*/true));
+
+ rtcp::TransportFeedback feedback;
+ feedback.SetBase(kTransportStartSeq, Timestamp::Millis(1));
+ ASSERT_TRUE(
+ feedback.AddReceivedPacket(kTransportStartSeq, Timestamp::Millis(1)));
+ // Drop middle packet.
+ ASSERT_TRUE(
+ feedback.AddReceivedPacket(kTransportStartSeq + 2, Timestamp::Millis(3)));
+
+ EXPECT_CALL(
+ mock, OnPacketFeedbackVector(ElementsAre(
+ AllOf(Field(&PacketInfo::received, true),
+ Field(&PacketInfo::ssrc, kSsrc),
+ Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq),
+ Field(&PacketInfo::is_retransmission, false)),
+ AllOf(Field(&PacketInfo::received, false),
+ Field(&PacketInfo::ssrc, kSsrc),
+ Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq + 1),
+ Field(&PacketInfo::is_retransmission, false)),
+ AllOf(Field(&PacketInfo::received, true),
+ Field(&PacketInfo::ssrc, kSsrc),
+ Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq + 2),
+ Field(&PacketInfo::is_retransmission, true)))));
+ demuxer.OnTransportFeedback(feedback);
+
+ demuxer.DeRegisterStreamFeedbackObserver(&mock);
+
+ demuxer.AddPacket(
+ CreatePacket(kSsrc, kRtpStartSeq + 3, kTransportStartSeq + 3, false));
+ rtcp::TransportFeedback second_feedback;
+ second_feedback.SetBase(kTransportStartSeq + 3, Timestamp::Millis(4));
+ ASSERT_TRUE(second_feedback.AddReceivedPacket(kTransportStartSeq + 3,
+ Timestamp::Millis(4)));
+
+ EXPECT_CALL(mock, OnPacketFeedbackVector).Times(0);
+ demuxer.OnTransportFeedback(second_feedback);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_gn/moz.build b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_gn/moz.build
new file mode 100644
index 0000000000..b4d2de8176
--- /dev/null
+++ b/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_adapter.cc",
+ "/third_party/libwebrtc/modules/congestion_controller/rtp/transport_feedback_demuxer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("transport_feedback_gn")
diff --git a/third_party/libwebrtc/modules/desktop_capture/BUILD.gn b/third_party/libwebrtc/modules/desktop_capture/BUILD.gn
new file mode 100644
index 0000000000..8010db0fba
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/BUILD.gn
@@ -0,0 +1,752 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//build/config/linux/gtk/gtk.gni")
+import("//build/config/linux/pkg_config.gni")
+import("//build/config/ui.gni")
+import("//tools/generate_stubs/rules.gni")
+import("../../webrtc.gni")
+
+use_desktop_capture_differ_sse2 = target_cpu == "x86" || target_cpu == "x64"
+
+config("x11_config") {
+ if (rtc_use_x11_extensions) {
+ defines = [ "WEBRTC_USE_X11" ]
+ }
+}
+
+rtc_library("primitives") {
+ visibility = [ "*" ]
+ sources = [
+ "desktop_capture_types.h",
+ "desktop_frame.cc",
+ "desktop_frame.h",
+ "desktop_geometry.cc",
+ "desktop_geometry.h",
+ "desktop_region.cc",
+ "desktop_region.h",
+ "shared_desktop_frame.cc",
+ "shared_desktop_frame.h",
+ "shared_memory.cc",
+ "shared_memory.h",
+ ]
+
+ deps = [
+ "../../api:scoped_refptr",
+ "../../rtc_base:checks",
+ "../../rtc_base:refcount",
+ "../../rtc_base/system:rtc_export",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+
+ if (!build_with_mozilla) {
+ deps += [ "../../rtc_base" ] # TODO(kjellander): Cleanup in
+ # bugs.webrtc.org/3806.
+ }
+}
+
+if (rtc_include_tests) {
+ rtc_library("desktop_capture_modules_tests") {
+ testonly = true
+
+ defines = []
+ sources = []
+ deps = [
+ ":desktop_capture",
+ "../../api:function_view",
+ "../../api:scoped_refptr",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:random",
+ "../../rtc_base:timeutils",
+ ]
+ if (rtc_desktop_capture_supported) {
+ deps += [
+ ":desktop_capture_mock",
+ ":primitives",
+ ":screen_drawer",
+ "../../rtc_base",
+ "../../rtc_base/third_party/base64",
+ "../../system_wrappers",
+ "../../test:test_support",
+ "../../test:video_test_support",
+ ]
+ sources += [
+ "screen_capturer_integration_test.cc",
+ "screen_drawer_unittest.cc",
+ "window_finder_unittest.cc",
+ ]
+
+ if ((is_linux || is_chromeos) && rtc_use_pipewire) {
+ configs += [ ":gio" ]
+ }
+
+ public_configs = [ ":x11_config" ]
+
+ if (is_win) {
+ deps += [ "../../rtc_base/win:windows_version" ]
+ }
+ }
+ }
+
+ rtc_library("desktop_capture_unittests") {
+ testonly = true
+
+ defines = []
+ sources = [
+ "blank_detector_desktop_capturer_wrapper_unittest.cc",
+ "cropped_desktop_frame_unittest.cc",
+ "desktop_and_cursor_composer_unittest.cc",
+ "desktop_capturer_differ_wrapper_unittest.cc",
+ "desktop_frame_rotation_unittest.cc",
+ "desktop_frame_unittest.cc",
+ "desktop_geometry_unittest.cc",
+ "desktop_region_unittest.cc",
+ "differ_block_unittest.cc",
+ "fallback_desktop_capturer_wrapper_unittest.cc",
+ "mouse_cursor_monitor_unittest.cc",
+ "rgba_color_unittest.cc",
+ "test_utils.cc",
+ "test_utils.h",
+ "test_utils_unittest.cc",
+ ]
+
+ if ((is_linux || is_chromeos) && rtc_use_pipewire) {
+ configs += [ ":gio" ]
+ }
+
+ deps = [
+ ":desktop_capture",
+ ":desktop_capture_mock",
+ ":primitives",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:random",
+ "../../rtc_base:timeutils",
+
+ # TODO(bugs.webrtc.org/9987): Remove this dep on rtc_base:rtc_base once
+ # rtc_base:threading is fully defined.
+ "../../rtc_base:rtc_base",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:threading",
+ "../../system_wrappers",
+ "../../test:test_support",
+ ]
+
+ if (is_win) {
+ sources += [
+ "win/cursor_unittest.cc",
+ "win/cursor_unittest_resources.h",
+ "win/cursor_unittest_resources.rc",
+ "win/screen_capture_utils_unittest.cc",
+ "win/screen_capturer_win_directx_unittest.cc",
+ "win/test_support/test_window.cc",
+ "win/test_support/test_window.h",
+ "win/window_capture_utils_unittest.cc",
+ ]
+ deps += [
+ "../../rtc_base/win:scoped_com_initializer",
+ "../../rtc_base/win:windows_version",
+ ]
+ }
+
+ if (rtc_desktop_capture_supported) {
+ sources += [
+ "screen_capturer_helper_unittest.cc",
+ "screen_capturer_unittest.cc",
+ "window_capturer_unittest.cc",
+ ]
+ if (is_mac) {
+ sources += [ "screen_capturer_mac_unittest.cc" ]
+ }
+ if (rtc_enable_win_wgc) {
+ sources += [
+ "win/wgc_capture_source_unittest.cc",
+ "win/wgc_capturer_win_unittest.cc",
+ ]
+ }
+ deps += [
+ ":desktop_capture_mock",
+ "../../system_wrappers:metrics",
+ ]
+ public_configs = [ ":x11_config" ]
+ }
+ }
+
+ rtc_library("screen_drawer") {
+ testonly = true
+
+ sources = [
+ "screen_drawer.cc",
+ "screen_drawer.h",
+ ]
+
+ if (is_linux || is_chromeos) {
+ sources += [ "screen_drawer_linux.cc" ]
+ libs = [ "X11" ]
+ }
+
+ if (is_mac) {
+ sources += [ "screen_drawer_mac.cc" ]
+ }
+
+ if (is_win) {
+ sources += [ "screen_drawer_win.cc" ]
+ }
+
+ deps = [
+ ":desktop_capture",
+ ":primitives",
+ "../../api:scoped_refptr",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+
+ if (is_posix || is_fuchsia) {
+ sources += [
+ "screen_drawer_lock_posix.cc",
+ "screen_drawer_lock_posix.h",
+ ]
+ }
+ }
+
+ rtc_library("desktop_capture_mock") {
+ testonly = true
+
+ sources = [
+ "mock_desktop_capturer_callback.cc",
+ "mock_desktop_capturer_callback.h",
+ ]
+
+ if ((is_linux || is_chromeos) && rtc_use_pipewire) {
+ configs += [ ":gio" ]
+ }
+
+ deps = [
+ ":desktop_capture",
+ ":primitives",
+ "../../test:test_support",
+ ]
+ }
+}
+
+if (is_linux || is_chromeos) {
+ if (rtc_use_pipewire) {
+ defines = [ "WEBRTC_USE_PIPEWIRE" ]
+ if (!build_with_mozilla) {
+ pkg_config("gio") {
+ packages = [
+ "gio-2.0",
+ "gio-unix-2.0",
+ ]
+ }
+
+ pkg_config("pipewire") {
+ packages = [ "libpipewire-0.3" ]
+ if (!rtc_link_pipewire) {
+ ignore_libs = true
+ }
+ }
+ }
+
+if (!build_with_mozilla) {
+ pkg_config("gbm") {
+ packages = [ "gbm" ]
+ }
+ pkg_config("egl") {
+ packages = [ "egl" ]
+ }
+ pkg_config("epoxy") {
+ packages = [ "epoxy" ]
+ ignore_libs = true
+ }
+ pkg_config("libdrm") {
+ packages = [ "libdrm" ]
+ if (!rtc_link_pipewire) {
+ ignore_libs = true
+ }
+ }
+}
+
+ if (!rtc_link_pipewire) {
+ # When libpipewire is not directly linked, use stubs to allow for dlopening of
+ # the binary.
+ if (!build_with_mozilla) {
+ generate_stubs("pipewire_stubs") {
+ configs = [
+ "../../:common_config",
+ ":pipewire",
+ ":libdrm",
+ ]
+ deps = [ "../../rtc_base" ]
+ extra_header = "linux/wayland/pipewire_stub_header.fragment"
+ logging_function = "RTC_LOG(LS_VERBOSE)"
+ logging_include = "rtc_base/logging.h"
+ output_name = "linux/wayland/pipewire_stubs"
+ path_from_source = "modules/desktop_capture/linux/wayland"
+ sigs = [
+ "linux/wayland/pipewire.sigs",
+ "linux/wayland/drm.sigs",
+ ]
+ if (!build_with_chromium) {
+ macro_include = "rtc_base/system/no_cfi_icall.h"
+ macro_deps = [ "../../rtc_base/system:no_cfi_icall" ]
+ }
+ }
+ }
+ }
+
+ config("pipewire_config") {
+ if (!build_with_mozilla) {
+ defines = [ "WEBRTC_USE_PIPEWIRE" ]
+ }
+ if (!rtc_link_pipewire) {
+ defines += [ "WEBRTC_DLOPEN_PIPEWIRE" ]
+ }
+
+ # Chromecast build config overrides `WEBRTC_USE_PIPEWIRE` even when
+ # `rtc_use_pipewire` is not set, which causes pipewire_config to not be
+ # included in targets. More details in: webrtc:13898
+ if (is_linux && !is_castos) {
+ defines += [ "WEBRTC_USE_GIO" ]
+ }
+ }
+ }
+}
+
+rtc_library("desktop_capture") {
+ visibility = [ "*" ]
+ defines = []
+ include_dirs = []
+ if (build_with_mozilla) {
+ include_dirs += [ "/media/libyuv/libyuv/include" ]
+ }
+ deps = []
+ public_configs = [ ":x11_config" ]
+ sources = [
+ "blank_detector_desktop_capturer_wrapper.cc",
+ "blank_detector_desktop_capturer_wrapper.h",
+ "cropped_desktop_frame.cc",
+ "cropped_desktop_frame.h",
+ "cropping_window_capturer.cc",
+ "cropping_window_capturer.h",
+ "desktop_and_cursor_composer.cc",
+ "desktop_and_cursor_composer.h",
+ "desktop_capture_metrics_helper.cc",
+ "desktop_capture_metrics_helper.h",
+ "desktop_capture_options.cc",
+ "desktop_capture_options.h",
+ "desktop_capturer.cc",
+ "desktop_capturer.h",
+ "desktop_capturer_differ_wrapper.cc",
+ "desktop_capturer_differ_wrapper.h",
+ "desktop_capturer_wrapper.cc",
+ "desktop_capturer_wrapper.h",
+ "desktop_frame_generator.cc",
+ "desktop_frame_generator.h",
+ "desktop_frame_rotation.cc",
+ "desktop_frame_rotation.h",
+ "differ_block.cc",
+ "differ_block.h",
+ "fake_desktop_capturer.cc",
+ "fake_desktop_capturer.h",
+ "fallback_desktop_capturer_wrapper.cc",
+ "fallback_desktop_capturer_wrapper.h",
+ "full_screen_application_handler.cc",
+ "full_screen_application_handler.h",
+ "full_screen_window_detector.cc",
+ "full_screen_window_detector.h",
+ "mouse_cursor.cc",
+ "mouse_cursor.h",
+ "mouse_cursor_monitor.h",
+ "resolution_tracker.cc",
+ "resolution_tracker.h",
+ "rgba_color.cc",
+ "rgba_color.h",
+ "screen_capture_frame_queue.h",
+ "screen_capturer_helper.cc",
+ "screen_capturer_helper.h",
+ "window_finder.cc",
+ "window_finder.h",
+ ]
+ if (is_linux && !is_castos && rtc_use_pipewire) {
+ sources += [ "desktop_capture_metadata.h" ]
+ }
+ if (is_mac) {
+ sources += [
+ "mac/desktop_configuration.h",
+ "mac/desktop_configuration_monitor.cc",
+ "mac/desktop_configuration_monitor.h",
+ "mac/full_screen_mac_application_handler.cc",
+ "mac/full_screen_mac_application_handler.h",
+ "mac/window_list_utils.cc",
+ "mac/window_list_utils.h",
+ ]
+ deps += [ ":desktop_capture_objc" ]
+ }
+ if (rtc_use_x11_extensions || rtc_use_pipewire) {
+ include_dirs += [ "/third_party/libwebrtc/third_party/pipewire" ]
+ sources += [
+ "mouse_cursor_monitor_linux.cc",
+ "screen_capturer_linux.cc",
+ "window_capturer_linux.cc",
+ ]
+ }
+
+ if (rtc_use_x11_extensions) {
+ sources += [
+ "linux/x11/mouse_cursor_monitor_x11.cc",
+ "linux/x11/mouse_cursor_monitor_x11.h",
+ "linux/x11/screen_capturer_x11.cc",
+ "linux/x11/screen_capturer_x11.h",
+ "linux/x11/shared_x_display.cc",
+ "linux/x11/shared_x_display.h",
+ "linux/x11/window_capturer_x11.cc",
+ "linux/x11/window_capturer_x11.h",
+ "linux/x11/window_finder_x11.cc",
+ "linux/x11/window_finder_x11.h",
+ "linux/x11/window_list_utils.cc",
+ "linux/x11/window_list_utils.h",
+ "linux/x11/x_atom_cache.cc",
+ "linux/x11/x_atom_cache.h",
+ "linux/x11/x_error_trap.cc",
+ "linux/x11/x_error_trap.h",
+ "linux/x11/x_server_pixel_buffer.cc",
+ "linux/x11/x_server_pixel_buffer.h",
+ "linux/x11/x_window_property.cc",
+ "linux/x11/x_window_property.h",
+ ]
+ libs = [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrender",
+ "Xrandr",
+ "Xtst",
+ ]
+ }
+
+ if (!is_win && !is_mac && !rtc_use_x11_extensions && !rtc_use_pipewire &&
+ !is_fuchsia) {
+ sources += [
+ "mouse_cursor_monitor_null.cc",
+ "screen_capturer_null.cc",
+ "window_capturer_null.cc",
+ ]
+ }
+
+ deps += [
+ ":primitives",
+ "../../api:function_view",
+ "../../api:make_ref_counted",
+ "../../api:refcountedbase",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:random",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/system:rtc_export",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ ]
+
+ if (is_fuchsia) {
+ sources += [
+ "mouse_cursor_monitor_null.cc",
+ "screen_capturer_fuchsia.cc",
+ "screen_capturer_fuchsia.h",
+ "window_capturer_null.cc",
+ ]
+ deps += [
+ "../../rtc_base:divide_round",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sysmem",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.ui.composition",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.ui.scenic",
+ "//third_party/fuchsia-sdk/sdk/pkg/scenic_cpp",
+ "//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
+ ]
+ }
+
+ if (is_win) {
+ sources += [
+ "cropping_window_capturer_win.cc",
+ "desktop_frame_win.cc",
+ "desktop_frame_win.h",
+ "mouse_cursor_monitor_win.cc",
+ "screen_capturer_win.cc",
+ "win/cursor.cc",
+ "win/cursor.h",
+ "win/d3d_device.cc",
+ "win/d3d_device.h",
+ "win/desktop.cc",
+ "win/desktop.h",
+ "win/desktop_capture_utils.cc",
+ "win/desktop_capture_utils.h",
+ "win/display_configuration_monitor.cc",
+ "win/display_configuration_monitor.h",
+ "win/dxgi_adapter_duplicator.cc",
+ "win/dxgi_adapter_duplicator.h",
+ "win/dxgi_context.cc",
+ "win/dxgi_context.h",
+ "win/dxgi_duplicator_controller.cc",
+ "win/dxgi_duplicator_controller.h",
+ "win/dxgi_frame.cc",
+ "win/dxgi_frame.h",
+ "win/dxgi_output_duplicator.cc",
+ "win/dxgi_output_duplicator.h",
+ "win/dxgi_texture.cc",
+ "win/dxgi_texture.h",
+ "win/dxgi_texture_mapping.cc",
+ "win/dxgi_texture_mapping.h",
+ "win/dxgi_texture_staging.cc",
+ "win/dxgi_texture_staging.h",
+ "win/full_screen_win_application_handler.cc",
+ "win/full_screen_win_application_handler.h",
+ "win/scoped_gdi_object.h",
+ "win/scoped_thread_desktop.cc",
+ "win/scoped_thread_desktop.h",
+ "win/screen_capture_utils.cc",
+ "win/screen_capture_utils.h",
+ "win/screen_capturer_win_directx.cc",
+ "win/screen_capturer_win_directx.h",
+ "win/screen_capturer_win_gdi.cc",
+ "win/screen_capturer_win_gdi.h",
+ "win/screen_capturer_win_magnifier.cc",
+ "win/screen_capturer_win_magnifier.h",
+ "win/selected_window_context.cc",
+ "win/selected_window_context.h",
+ "win/window_capture_utils.cc",
+ "win/window_capture_utils.h",
+ "win/window_capturer_win_gdi.cc",
+ "win/window_capturer_win_gdi.h",
+ "window_capturer_win.cc",
+ "window_finder_win.cc",
+ "window_finder_win.h",
+ ]
+ libs = [
+ "d3d11.lib",
+ "dxgi.lib",
+ ]
+ deps += [
+ "../../rtc_base:win32",
+ "../../rtc_base/win:create_direct3d_device",
+ "../../rtc_base/win:get_activation_factory",
+ "../../rtc_base/win:windows_version",
+ ]
+ }
+
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (rtc_use_x11_extensions) {
+ deps += [ "../../rtc_base:sanitizer" ]
+ }
+
+ if (!build_with_mozilla) {
+ deps += [ "//third_party/libyuv" ]
+ } else {
+ include_dirs += [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ "/third_party/pipewire",
+ ]
+ }
+
+ if (use_desktop_capture_differ_sse2) {
+ deps += [ ":desktop_capture_differ_sse2" ]
+ }
+
+ if (rtc_use_pipewire) {
+ if (!build_with_mozilla) {
+ sources += [
+ "linux/wayland/base_capturer_pipewire.cc",
+ "linux/wayland/base_capturer_pipewire.h",
+ "linux/wayland/egl_dmabuf.cc",
+ "linux/wayland/egl_dmabuf.h",
+ "linux/wayland/mouse_cursor_monitor_pipewire.cc",
+ "linux/wayland/mouse_cursor_monitor_pipewire.h",
+ "linux/wayland/portal_request_response.h",
+ "linux/wayland/restore_token_manager.cc",
+ "linux/wayland/restore_token_manager.h",
+ "linux/wayland/scoped_glib.cc",
+ "linux/wayland/scoped_glib.h",
+ "linux/wayland/screen_capture_portal_interface.cc",
+ "linux/wayland/screen_capture_portal_interface.h",
+ "linux/wayland/screencast_portal.cc",
+ "linux/wayland/screencast_portal.h",
+ "linux/wayland/screencast_stream_utils.cc",
+ "linux/wayland/screencast_stream_utils.h",
+ "linux/wayland/shared_screencast_stream.cc",
+ "linux/wayland/shared_screencast_stream.h",
+ "linux/wayland/xdg_desktop_portal_utils.cc",
+ "linux/wayland/xdg_desktop_portal_utils.h",
+ "linux/wayland/xdg_session_details.h",
+ ]
+ } else {
+ sources += [
+ "linux/wayland/moz_base_capturer_pipewire.cc",
+ "linux/wayland/moz_base_capturer_pipewire.h",
+ ]
+ }
+
+ if (!build_with_mozilla) {
+ configs += [
+ ":gio",
+ ":pipewire",
+ ":gbm",
+ ":egl",
+ ":epoxy",
+ ":libdrm",
+ ]
+ }
+
+ if (!rtc_link_pipewire) {
+ if (!build_with_mozilla) {
+ deps += [ ":pipewire_stubs" ]
+ }
+
+ if (!build_with_mozilla) {
+ configs += [
+ ":gio",
+ ":pipewire",
+ ]
+ } else {
+ defines += [ "WEBRTC_USE_PIPEWIRE" ]
+ include_dirs += [ "/third_party/pipewire" ]
+ }
+ }
+
+ if (!build_with_mozilla) {
+ public_configs += [ ":pipewire_config" ]
+ }
+
+ deps += [ "../../rtc_base:sanitizer" ]
+ }
+
+ if (rtc_enable_win_wgc) {
+ sources += [
+ "win/wgc_capture_session.cc",
+ "win/wgc_capture_session.h",
+ "win/wgc_capture_source.cc",
+ "win/wgc_capture_source.h",
+ "win/wgc_capturer_win.cc",
+ "win/wgc_capturer_win.h",
+ "win/wgc_desktop_frame.cc",
+ "win/wgc_desktop_frame.h",
+ ]
+ libs += [ "dwmapi.lib" ]
+ deps += [
+ "../../rtc_base:rtc_event",
+ "../../rtc_base/win:hstring",
+ ]
+ }
+}
+
+if (is_mac) {
+ rtc_library("desktop_capture_objc") {
+ # This target, needs to be separated from ":desktop_capture" because
+ # that is the C++ part of the target while this one is the Obj-C++ part.
+ # Aside from this, both represent a "desktop_capture" target.
+ # This target separation based on programming languages introduces a
+ # dependency cycle between ":desktop_capture" and
+ # ":desktop_capture_objc".
+ # To break this, ":desktop_capture_objc" shares some .h files with
+ # ":desktop_capture" but when external targets need one of these
+ # headers, they should depend on ":desktop_capture" and consider
+ # this target as private.
+ visibility = [ ":desktop_capture" ]
+ sources = [
+ "desktop_capture_options.h",
+ "desktop_capturer.h",
+ "full_screen_application_handler.h",
+ "full_screen_window_detector.h",
+ "mac/desktop_configuration.h",
+ "mac/desktop_configuration.mm",
+ "mac/desktop_configuration_monitor.h",
+ "mac/desktop_frame_cgimage.h",
+ "mac/desktop_frame_cgimage.mm",
+ "mac/desktop_frame_iosurface.h",
+ "mac/desktop_frame_iosurface.mm",
+ "mac/desktop_frame_provider.h",
+ "mac/desktop_frame_provider.mm",
+ "mac/screen_capturer_mac.h",
+ "mac/screen_capturer_mac.mm",
+ "mac/window_list_utils.h",
+ "mouse_cursor.h",
+ "mouse_cursor_monitor.h",
+ "mouse_cursor_monitor_mac.mm",
+ "screen_capture_frame_queue.h",
+ "screen_capturer_darwin.mm",
+ "screen_capturer_helper.h",
+ "window_capturer_mac.mm",
+ "window_finder.h",
+ "window_finder_mac.h",
+ "window_finder_mac.mm",
+ ]
+ deps = [
+ ":primitives",
+ "../../api:function_view",
+ "../../api:refcountedbase",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:rtc_export",
+ "../../sdk:helpers_objc",
+ ]
+ frameworks = [
+ "AppKit.framework",
+ "IOKit.framework",
+ "IOSurface.framework",
+ ]
+ }
+}
+
+if (use_desktop_capture_differ_sse2) {
+ # Have to be compiled as a separate target because it needs to be compiled
+ # with SSE2 enabled.
+ rtc_library("desktop_capture_differ_sse2") {
+ visibility = [ ":*" ]
+ sources = [
+ "differ_vector_sse2.cc",
+ "differ_vector_sse2.h",
+ ]
+
+ if (is_posix || is_fuchsia) {
+ cflags = [ "-msse2" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/desktop_capture/DEPS b/third_party/libwebrtc/modules/desktop_capture/DEPS
new file mode 100644
index 0000000000..8c894c4430
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/DEPS
@@ -0,0 +1,19 @@
+include_rules = [
+ "+system_wrappers",
+ "+third_party/libyuv",
+]
+
+specific_include_rules = {
+ "desktop_frame_cgimage\.h": [
+ "+sdk/objc",
+ ],
+ "desktop_frame_iosurface\.h": [
+ "+sdk/objc",
+ ],
+ "desktop_frame_provider\.h": [
+ "+sdk/objc",
+ ],
+ "screen_capturer_mac\.mm": [
+ "+sdk/objc",
+ ],
+}
diff --git a/third_party/libwebrtc/modules/desktop_capture/OWNERS b/third_party/libwebrtc/modules/desktop_capture/OWNERS
new file mode 100644
index 0000000000..e3bc32ee5c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/OWNERS
@@ -0,0 +1,2 @@
+alcooper@chromium.org
+mfoltz@chromium.org
diff --git a/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc b/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc
new file mode 100644
index 0000000000..8e56ffc3fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+BlankDetectorDesktopCapturerWrapper::BlankDetectorDesktopCapturerWrapper(
+ std::unique_ptr<DesktopCapturer> capturer,
+ RgbaColor blank_pixel,
+ bool check_per_capture)
+ : capturer_(std::move(capturer)),
+ blank_pixel_(blank_pixel),
+ check_per_capture_(check_per_capture) {
+ RTC_DCHECK(capturer_);
+}
+
+BlankDetectorDesktopCapturerWrapper::~BlankDetectorDesktopCapturerWrapper() =
+ default;
+
+void BlankDetectorDesktopCapturerWrapper::Start(
+ DesktopCapturer::Callback* callback) {
+ callback_ = callback;
+ capturer_->Start(this);
+}
+
+void BlankDetectorDesktopCapturerWrapper::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
+}
+
+void BlankDetectorDesktopCapturerWrapper::CaptureFrame() {
+ RTC_DCHECK(callback_);
+ capturer_->CaptureFrame();
+}
+
+void BlankDetectorDesktopCapturerWrapper::SetExcludedWindow(WindowId window) {
+ capturer_->SetExcludedWindow(window);
+}
+
+bool BlankDetectorDesktopCapturerWrapper::GetSourceList(SourceList* sources) {
+ return capturer_->GetSourceList(sources);
+}
+
+bool BlankDetectorDesktopCapturerWrapper::SelectSource(SourceId id) {
+ if (check_per_capture_) {
+ // If we start capturing a new source, we must reset these members
+ // so we don't short circuit the blank detection logic.
+ is_first_frame_ = true;
+ non_blank_frame_received_ = false;
+ }
+
+ return capturer_->SelectSource(id);
+}
+
+bool BlankDetectorDesktopCapturerWrapper::FocusOnSelectedSource() {
+ return capturer_->FocusOnSelectedSource();
+}
+
+bool BlankDetectorDesktopCapturerWrapper::IsOccluded(const DesktopVector& pos) {
+ return capturer_->IsOccluded(pos);
+}
+
+void BlankDetectorDesktopCapturerWrapper::OnCaptureResult(
+ Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ RTC_DCHECK(callback_);
+ if (result != Result::SUCCESS || non_blank_frame_received_) {
+ callback_->OnCaptureResult(result, std::move(frame));
+ return;
+ }
+
+ if (!frame) {
+ // Capturer can call the blank detector with empty frame. Blank
+ // detector regards it as a blank frame.
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY,
+ std::unique_ptr<DesktopFrame>());
+ return;
+ }
+
+ // If nothing has been changed in current frame, we do not need to check it
+ // again.
+ if (!frame->updated_region().is_empty() || is_first_frame_) {
+ last_frame_is_blank_ = IsBlankFrame(*frame);
+ is_first_frame_ = false;
+ }
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.DesktopCapture.BlankFrameDetected",
+ last_frame_is_blank_);
+ if (!last_frame_is_blank_) {
+ non_blank_frame_received_ = true;
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+ return;
+ }
+
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY,
+ std::unique_ptr<DesktopFrame>());
+}
+
+bool BlankDetectorDesktopCapturerWrapper::IsBlankFrame(
+ const DesktopFrame& frame) const {
+ // We will check 7489 pixels for a frame with 1024 x 768 resolution.
+ for (int i = 0; i < frame.size().width() * frame.size().height(); i += 105) {
+ const int x = i % frame.size().width();
+ const int y = i / frame.size().width();
+ if (!IsBlankPixel(frame, x, y)) {
+ return false;
+ }
+ }
+
+ // We are verifying the pixel in the center as well.
+ return IsBlankPixel(frame, frame.size().width() / 2,
+ frame.size().height() / 2);
+}
+
+bool BlankDetectorDesktopCapturerWrapper::IsBlankPixel(
+ const DesktopFrame& frame,
+ int x,
+ int y) const {
+ uint8_t* pixel_data = frame.GetFrameDataAtPos(DesktopVector(x, y));
+ return RgbaColor(pixel_data) == blank_pixel_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h b/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h
new file mode 100644
index 0000000000..d10f9cf725
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_BLANK_DETECTOR_DESKTOP_CAPTURER_WRAPPER_H_
+#define MODULES_DESKTOP_CAPTURE_BLANK_DETECTOR_DESKTOP_CAPTURER_WRAPPER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/rgba_color.h"
+#include "modules/desktop_capture/shared_memory.h"
+
+namespace webrtc {
+
+// A DesktopCapturer wrapper detects the return value of its owned
+// DesktopCapturer implementation. If sampled pixels returned by the
+// DesktopCapturer implementation all equal to the blank pixel, this wrapper
+// returns ERROR_TEMPORARY. If the DesktopCapturer implementation fails for too
+// many times, this wrapper returns ERROR_PERMANENT.
+class BlankDetectorDesktopCapturerWrapper final
+ : public DesktopCapturer,
+ public DesktopCapturer::Callback {
+ public:
+ // Creates BlankDetectorDesktopCapturerWrapper. BlankDesktopCapturerWrapper
+ // takes ownership of `capturer`. The `blank_pixel` is the unmodified color
+ // returned by the `capturer`.
+ BlankDetectorDesktopCapturerWrapper(std::unique_ptr<DesktopCapturer> capturer,
+ RgbaColor blank_pixel,
+ bool check_per_capture = false);
+ ~BlankDetectorDesktopCapturerWrapper() override;
+
+ // DesktopCapturer interface.
+ void Start(DesktopCapturer::Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ private:
+ // DesktopCapturer::Callback interface.
+ void OnCaptureResult(Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ bool IsBlankFrame(const DesktopFrame& frame) const;
+
+ // Detects whether pixel at (x, y) equals to `blank_pixel_`.
+ bool IsBlankPixel(const DesktopFrame& frame, int x, int y) const;
+
+ const std::unique_ptr<DesktopCapturer> capturer_;
+ const RgbaColor blank_pixel_;
+
+ // Whether a non-blank frame has been received.
+ bool non_blank_frame_received_ = false;
+
+ // Whether the last frame is blank.
+ bool last_frame_is_blank_ = false;
+
+ // Whether current frame is the first frame.
+ bool is_first_frame_ = true;
+
+ // Blank inspection is made per capture instead of once for all
+ // screens or windows.
+ bool check_per_capture_ = false;
+
+ DesktopCapturer::Callback* callback_ = nullptr;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_BLANK_DETECTOR_DESKTOP_CAPTURER_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper_unittest.cc
new file mode 100644
index 0000000000..25a81edd89
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper_unittest.cc
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_frame_generator.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/fake_desktop_capturer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class BlankDetectorDesktopCapturerWrapperTest
+ : public ::testing::Test,
+ public DesktopCapturer::Callback {
+ public:
+ BlankDetectorDesktopCapturerWrapperTest();
+ ~BlankDetectorDesktopCapturerWrapperTest() override;
+
+ protected:
+ void PerfTest(DesktopCapturer* capturer);
+
+ const int frame_width_ = 1024;
+ const int frame_height_ = 768;
+ std::unique_ptr<BlankDetectorDesktopCapturerWrapper> wrapper_;
+ DesktopCapturer* capturer_ = nullptr;
+ BlackWhiteDesktopFramePainter painter_;
+ int num_frames_captured_ = 0;
+ DesktopCapturer::Result last_result_ = DesktopCapturer::Result::SUCCESS;
+ std::unique_ptr<DesktopFrame> last_frame_;
+
+ private:
+ // DesktopCapturer::Callback interface.
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ PainterDesktopFrameGenerator frame_generator_;
+};
+
+BlankDetectorDesktopCapturerWrapperTest::
+ BlankDetectorDesktopCapturerWrapperTest() {
+ frame_generator_.size()->set(frame_width_, frame_height_);
+ frame_generator_.set_desktop_frame_painter(&painter_);
+ std::unique_ptr<DesktopCapturer> capturer(new FakeDesktopCapturer());
+ FakeDesktopCapturer* fake_capturer =
+ static_cast<FakeDesktopCapturer*>(capturer.get());
+ fake_capturer->set_frame_generator(&frame_generator_);
+ capturer_ = fake_capturer;
+ wrapper_.reset(new BlankDetectorDesktopCapturerWrapper(
+ std::move(capturer), RgbaColor(0, 0, 0, 0)));
+ wrapper_->Start(this);
+}
+
+BlankDetectorDesktopCapturerWrapperTest::
+ ~BlankDetectorDesktopCapturerWrapperTest() = default;
+
+void BlankDetectorDesktopCapturerWrapperTest::OnCaptureResult(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ last_result_ = result;
+ last_frame_ = std::move(frame);
+ num_frames_captured_++;
+}
+
+void BlankDetectorDesktopCapturerWrapperTest::PerfTest(
+ DesktopCapturer* capturer) {
+ for (int i = 0; i < 10000; i++) {
+ capturer->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, i + 1);
+ }
+}
+
+TEST_F(BlankDetectorDesktopCapturerWrapperTest, ShouldDetectBlankFrame) {
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 1);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::ERROR_TEMPORARY);
+ ASSERT_FALSE(last_frame_);
+}
+
+TEST_F(BlankDetectorDesktopCapturerWrapperTest, ShouldPassBlankDetection) {
+ painter_.updated_region()->AddRect(DesktopRect::MakeXYWH(0, 0, 100, 100));
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 1);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+
+ painter_.updated_region()->AddRect(
+ DesktopRect::MakeXYWH(frame_width_ - 100, frame_height_ - 100, 100, 100));
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 2);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+
+ painter_.updated_region()->AddRect(
+ DesktopRect::MakeXYWH(0, frame_height_ - 100, 100, 100));
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 3);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+
+ painter_.updated_region()->AddRect(
+ DesktopRect::MakeXYWH(frame_width_ - 100, 0, 100, 100));
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 4);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+
+ painter_.updated_region()->AddRect(DesktopRect::MakeXYWH(
+ (frame_width_ >> 1) - 50, (frame_height_ >> 1) - 50, 100, 100));
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 5);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+}
+
+TEST_F(BlankDetectorDesktopCapturerWrapperTest,
+ ShouldNotCheckAfterANonBlankFrameReceived) {
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 1);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::ERROR_TEMPORARY);
+ ASSERT_FALSE(last_frame_);
+
+ painter_.updated_region()->AddRect(
+ DesktopRect::MakeXYWH(frame_width_ - 100, 0, 100, 100));
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, 2);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+
+ for (int i = 0; i < 100; i++) {
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(num_frames_captured_, i + 3);
+ ASSERT_EQ(last_result_, DesktopCapturer::Result::SUCCESS);
+ ASSERT_TRUE(last_frame_);
+ }
+}
+
+// There is no perceptible impact by using BlankDetectorDesktopCapturerWrapper.
+// i.e. less than 0.2ms per frame.
+// [ OK ] DISABLED_Performance (10210 ms)
+// [ OK ] DISABLED_PerformanceComparison (8791 ms)
+TEST_F(BlankDetectorDesktopCapturerWrapperTest, DISABLED_Performance) {
+ PerfTest(wrapper_.get());
+}
+
+TEST_F(BlankDetectorDesktopCapturerWrapperTest,
+ DISABLED_PerformanceComparison) {
+ capturer_->Start(this);
+ PerfTest(capturer_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.cc b/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.cc
new file mode 100644
index 0000000000..54488b7d62
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/cropped_desktop_frame.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_region.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// A DesktopFrame that is a sub-rect of another DesktopFrame.
+class CroppedDesktopFrame : public DesktopFrame {
+ public:
+ CroppedDesktopFrame(std::unique_ptr<DesktopFrame> frame,
+ const DesktopRect& rect);
+
+ CroppedDesktopFrame(const CroppedDesktopFrame&) = delete;
+ CroppedDesktopFrame& operator=(const CroppedDesktopFrame&) = delete;
+
+ private:
+ const std::unique_ptr<DesktopFrame> frame_;
+};
+
+std::unique_ptr<DesktopFrame> CreateCroppedDesktopFrame(
+ std::unique_ptr<DesktopFrame> frame,
+ const DesktopRect& rect) {
+ RTC_DCHECK(frame);
+
+ DesktopRect intersection = DesktopRect::MakeSize(frame->size());
+ intersection.IntersectWith(rect);
+ if (intersection.is_empty()) {
+ return nullptr;
+ }
+
+ if (frame->size().equals(rect.size())) {
+ return frame;
+ }
+
+ return std::unique_ptr<DesktopFrame>(
+ new CroppedDesktopFrame(std::move(frame), intersection));
+}
+
+CroppedDesktopFrame::CroppedDesktopFrame(std::unique_ptr<DesktopFrame> frame,
+ const DesktopRect& rect)
+ : DesktopFrame(rect.size(),
+ frame->stride(),
+ frame->GetFrameDataAtPos(rect.top_left()),
+ frame->shared_memory()),
+ frame_(std::move(frame)) {
+ MoveFrameInfoFrom(frame_.get());
+ set_top_left(frame_->top_left().add(rect.top_left()));
+ mutable_updated_region()->IntersectWith(rect);
+ mutable_updated_region()->Translate(-rect.left(), -rect.top());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.h b/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.h
new file mode 100644
index 0000000000..5c672c7d32
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_CROPPED_DESKTOP_FRAME_H_
+#define MODULES_DESKTOP_CAPTURE_CROPPED_DESKTOP_FRAME_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Creates a DesktopFrame to contain only the area of `rect` in the original
+// `frame`.
+// `frame` should not be nullptr. `rect` is in `frame` coordinate, i.e.
+// `frame`->top_left() does not impact the area of `rect`.
+// Returns nullptr frame if `rect` is not contained by the bounds of `frame`.
+std::unique_ptr<DesktopFrame> RTC_EXPORT
+CreateCroppedDesktopFrame(std::unique_ptr<DesktopFrame> frame,
+ const DesktopRect& rect);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_CROPPED_DESKTOP_FRAME_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame_unittest.cc
new file mode 100644
index 0000000000..9becf69636
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame_unittest.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/cropped_desktop_frame.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+std::unique_ptr<DesktopFrame> CreateTestFrame() {
+ return std::make_unique<BasicDesktopFrame>(DesktopSize(10, 20));
+}
+
+TEST(CroppedDesktopFrameTest, DoNotCreateWrapperIfSizeIsNotChanged) {
+ std::unique_ptr<DesktopFrame> original = CreateTestFrame();
+ // owned by `original` and CroppedDesktopFrame.
+ DesktopFrame* raw_original = original.get();
+ std::unique_ptr<DesktopFrame> cropped = CreateCroppedDesktopFrame(
+ std::move(original), DesktopRect::MakeWH(10, 20));
+ ASSERT_EQ(cropped.get(), raw_original);
+}
+
+TEST(CroppedDesktopFrameTest, CropWhenPartiallyOutOfBounds) {
+ std::unique_ptr<DesktopFrame> cropped =
+ CreateCroppedDesktopFrame(CreateTestFrame(), DesktopRect::MakeWH(11, 10));
+ ASSERT_NE(nullptr, cropped);
+ ASSERT_EQ(cropped->size().width(), 10);
+ ASSERT_EQ(cropped->size().height(), 10);
+ ASSERT_EQ(cropped->top_left().x(), 0);
+ ASSERT_EQ(cropped->top_left().y(), 0);
+}
+
+TEST(CroppedDesktopFrameTest, ReturnNullIfCropRegionIsOutOfBounds) {
+ std::unique_ptr<DesktopFrame> frame = CreateTestFrame();
+ frame->set_top_left(DesktopVector(100, 200));
+ ASSERT_EQ(nullptr,
+ CreateCroppedDesktopFrame(
+ std::move(frame), DesktopRect::MakeLTRB(101, 203, 109, 218)));
+}
+
+TEST(CroppedDesktopFrameTest, CropASubArea) {
+ std::unique_ptr<DesktopFrame> cropped = CreateCroppedDesktopFrame(
+ CreateTestFrame(), DesktopRect::MakeLTRB(1, 2, 9, 19));
+ ASSERT_EQ(cropped->size().width(), 8);
+ ASSERT_EQ(cropped->size().height(), 17);
+ ASSERT_EQ(cropped->top_left().x(), 1);
+ ASSERT_EQ(cropped->top_left().y(), 2);
+}
+
+TEST(CroppedDesktopFrameTest, SetTopLeft) {
+ std::unique_ptr<DesktopFrame> frame = CreateTestFrame();
+ frame->set_top_left(DesktopVector(100, 200));
+ frame = CreateCroppedDesktopFrame(std::move(frame),
+ DesktopRect::MakeLTRB(1, 3, 9, 18));
+ ASSERT_EQ(frame->size().width(), 8);
+ ASSERT_EQ(frame->size().height(), 15);
+ ASSERT_EQ(frame->top_left().x(), 101);
+ ASSERT_EQ(frame->top_left().y(), 203);
+}
+
+TEST(CroppedDesktopFrameTest, InitializedWithZeros) {
+ std::unique_ptr<DesktopFrame> frame = CreateTestFrame();
+ const DesktopVector frame_origin = frame->top_left();
+ const DesktopSize frame_size = frame->size();
+ std::unique_ptr<DesktopFrame> cropped = CreateCroppedDesktopFrame(
+ std::move(frame), DesktopRect::MakeOriginSize(frame_origin, frame_size));
+ for (int j = 0; j < cropped->size().height(); ++j) {
+ for (int i = 0; i < cropped->stride(); ++i) {
+ ASSERT_EQ(cropped->data()[i + j * cropped->stride()], 0);
+ }
+ }
+}
+
+TEST(CroppedDesktopFrameTest, IccProfile) {
+ const uint8_t fake_icc_profile_data_array[] = {0x1a, 0x00, 0x2b, 0x00,
+ 0x3c, 0x00, 0x4d};
+ const std::vector<uint8_t> icc_profile(
+ fake_icc_profile_data_array,
+ fake_icc_profile_data_array + sizeof(fake_icc_profile_data_array));
+
+ std::unique_ptr<DesktopFrame> frame = CreateTestFrame();
+ EXPECT_EQ(frame->icc_profile().size(), 0UL);
+
+ frame->set_icc_profile(icc_profile);
+ EXPECT_EQ(frame->icc_profile().size(), 7UL);
+ EXPECT_EQ(frame->icc_profile(), icc_profile);
+
+ frame = CreateCroppedDesktopFrame(std::move(frame),
+ DesktopRect::MakeLTRB(2, 2, 8, 18));
+ EXPECT_EQ(frame->icc_profile().size(), 7UL);
+ EXPECT_EQ(frame->icc_profile(), icc_profile);
+
+ std::unique_ptr<SharedDesktopFrame> shared =
+ SharedDesktopFrame::Wrap(std::move(frame));
+ EXPECT_EQ(shared->icc_profile().size(), 7UL);
+ EXPECT_EQ(shared->icc_profile(), icc_profile);
+
+ std::unique_ptr<DesktopFrame> shared_other = shared->Share();
+ EXPECT_EQ(shared_other->icc_profile().size(), 7UL);
+ EXPECT_EQ(shared_other->icc_profile(), icc_profile);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.cc b/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.cc
new file mode 100644
index 0000000000..5e0faaade9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/cropping_window_capturer.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/cropped_desktop_frame.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+CroppingWindowCapturer::CroppingWindowCapturer(
+ const DesktopCaptureOptions& options)
+ : options_(options),
+ callback_(NULL),
+ window_capturer_(DesktopCapturer::CreateRawWindowCapturer(options)),
+ selected_window_(kNullWindowId),
+ excluded_window_(kNullWindowId) {}
+
+CroppingWindowCapturer::~CroppingWindowCapturer() {}
+
+void CroppingWindowCapturer::Start(DesktopCapturer::Callback* callback) {
+ callback_ = callback;
+ window_capturer_->Start(callback);
+}
+
+void CroppingWindowCapturer::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ window_capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
+}
+
+void CroppingWindowCapturer::CaptureFrame() {
+ if (ShouldUseScreenCapturer()) {
+ if (!screen_capturer_.get()) {
+ screen_capturer_ = DesktopCapturer::CreateRawScreenCapturer(options_);
+ if (excluded_window_) {
+ screen_capturer_->SetExcludedWindow(excluded_window_);
+ }
+ screen_capturer_->Start(this);
+ }
+ screen_capturer_->CaptureFrame();
+ } else {
+ window_capturer_->CaptureFrame();
+ }
+}
+
+void CroppingWindowCapturer::SetExcludedWindow(WindowId window) {
+ excluded_window_ = window;
+ if (screen_capturer_.get()) {
+ screen_capturer_->SetExcludedWindow(window);
+ }
+}
+
+bool CroppingWindowCapturer::GetSourceList(SourceList* sources) {
+ return window_capturer_->GetSourceList(sources);
+}
+
+bool CroppingWindowCapturer::SelectSource(SourceId id) {
+ if (window_capturer_->SelectSource(id)) {
+ selected_window_ = id;
+ return true;
+ }
+ return false;
+}
+
+bool CroppingWindowCapturer::FocusOnSelectedSource() {
+ return window_capturer_->FocusOnSelectedSource();
+}
+
+void CroppingWindowCapturer::OnCaptureResult(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> screen_frame) {
+ if (!ShouldUseScreenCapturer()) {
+ RTC_LOG(LS_INFO) << "Window no longer on top when ScreenCapturer finishes";
+ window_capturer_->CaptureFrame();
+ return;
+ }
+
+ if (result != Result::SUCCESS) {
+ RTC_LOG(LS_WARNING) << "ScreenCapturer failed to capture a frame";
+ callback_->OnCaptureResult(result, nullptr);
+ return;
+ }
+
+ DesktopRect window_rect = GetWindowRectInVirtualScreen();
+ if (window_rect.is_empty()) {
+ RTC_LOG(LS_WARNING) << "Window rect is empty";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> cropped_frame =
+ CreateCroppedDesktopFrame(std::move(screen_frame), window_rect);
+
+ if (!cropped_frame) {
+ RTC_LOG(LS_WARNING) << "Window is outside of the captured display";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(cropped_frame));
+}
+
+bool CroppingWindowCapturer::IsOccluded(const DesktopVector& pos) {
+ // Returns true if either capturer returns true.
+ if (window_capturer_->IsOccluded(pos)) {
+ return true;
+ }
+ if (screen_capturer_ != nullptr && screen_capturer_->IsOccluded(pos)) {
+ return true;
+ }
+ return false;
+}
+
+#if !defined(WEBRTC_WIN)
+// CroppingWindowCapturer is implemented only for windows. On other platforms
+// the regular window capturer is used.
+// static
+std::unique_ptr<DesktopCapturer> CroppingWindowCapturer::CreateCapturer(
+ const DesktopCaptureOptions& options) {
+ return DesktopCapturer::CreateWindowCapturer(options);
+}
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.h b/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.h
new file mode 100644
index 0000000000..56478030b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_CROPPING_WINDOW_CAPTURER_H_
+#define MODULES_DESKTOP_CAPTURE_CROPPING_WINDOW_CAPTURER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// WindowCapturer implementation that uses a screen capturer to capture the
+// whole screen and crops the video frame to the window area when the captured
+// window is on top.
+class RTC_EXPORT CroppingWindowCapturer : public DesktopCapturer,
+ public DesktopCapturer::Callback {
+ public:
+ static std::unique_ptr<DesktopCapturer> CreateCapturer(
+ const DesktopCaptureOptions& options);
+
+ ~CroppingWindowCapturer() override;
+
+ // DesktopCapturer implementation.
+ void Start(DesktopCapturer::Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ // DesktopCapturer::Callback implementation, passed to `screen_capturer_` to
+ // intercept the capture result.
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ protected:
+ explicit CroppingWindowCapturer(const DesktopCaptureOptions& options);
+
+ // The platform implementation should override these methods.
+
+ // Returns true if it is OK to capture the whole screen and crop to the
+ // selected window, i.e. the selected window is opaque, rectangular, and not
+ // occluded.
+ virtual bool ShouldUseScreenCapturer() = 0;
+
+ // Returns the window area relative to the top left of the virtual screen
+ // within the bounds of the virtual screen. This function should return the
+ // DesktopRect in full desktop coordinates, i.e. the top-left monitor starts
+ // from (0, 0).
+ virtual DesktopRect GetWindowRectInVirtualScreen() = 0;
+
+ WindowId selected_window() const { return selected_window_; }
+ WindowId excluded_window() const { return excluded_window_; }
+ DesktopCapturer* window_capturer() const { return window_capturer_.get(); }
+
+ private:
+ DesktopCaptureOptions options_;
+ DesktopCapturer::Callback* callback_;
+ std::unique_ptr<DesktopCapturer> window_capturer_;
+ std::unique_ptr<DesktopCapturer> screen_capturer_;
+ SourceId selected_window_;
+ WindowId excluded_window_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_CROPPING_WINDOW_CAPTURER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer_win.cc b/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer_win.cc
new file mode 100644
index 0000000000..64d9219e24
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer_win.cc
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/cropping_window_capturer.h"
+#include "modules/desktop_capture/desktop_capturer_differ_wrapper.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/selected_window_context.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "rtc_base/win/windows_version.h"
+
+namespace webrtc {
+
+namespace {
+
+// Used to pass input data for verifying the selected window is on top.
+struct TopWindowVerifierContext : public SelectedWindowContext {
+ TopWindowVerifierContext(HWND selected_window,
+ HWND excluded_window,
+ DesktopRect selected_window_rect,
+ WindowCaptureHelperWin* window_capture_helper)
+ : SelectedWindowContext(selected_window,
+ selected_window_rect,
+ window_capture_helper),
+ excluded_window(excluded_window) {
+ RTC_DCHECK_NE(selected_window, excluded_window);
+ }
+
+ // Determines whether the selected window is on top (not occluded by any
+ // windows except for those it owns or any excluded window).
+ bool IsTopWindow() {
+ if (!IsSelectedWindowValid()) {
+ return false;
+ }
+
+ // Enumerate all top-level windows above the selected window in Z-order,
+ // checking whether any overlaps it. This uses FindWindowEx rather than
+ // EnumWindows because the latter excludes certain system windows (e.g. the
+ // Start menu & other taskbar menus) that should be detected here to avoid
+ // inadvertent capture.
+ int num_retries = 0;
+ while (true) {
+ HWND hwnd = nullptr;
+ while ((hwnd = FindWindowEx(nullptr, hwnd, nullptr, nullptr))) {
+ if (hwnd == selected_window()) {
+ // Windows are enumerated in top-down Z-order, so we can stop
+ // enumerating upon reaching the selected window & report it's on top.
+ return true;
+ }
+
+ // Ignore the excluded window.
+ if (hwnd == excluded_window) {
+ continue;
+ }
+
+ // Ignore windows that aren't visible on the current desktop.
+ if (!window_capture_helper()->IsWindowVisibleOnCurrentDesktop(hwnd)) {
+ continue;
+ }
+
+ // Ignore Chrome notification windows, especially the notification for
+ // the ongoing window sharing. Notes:
+ // - This only works with notifications from Chrome, not other Apps.
+ // - All notifications from Chrome will be ignored.
+ // - This may cause part or whole of notification window being cropped
+ // into the capturing of the target window if there is overlapping.
+ if (window_capture_helper()->IsWindowChromeNotification(hwnd)) {
+ continue;
+ }
+
+ // Ignore windows owned by the selected window since we want to capture
+ // them.
+ if (IsWindowOwnedBySelectedWindow(hwnd)) {
+ continue;
+ }
+
+ // Check whether this window intersects with the selected window.
+ if (IsWindowOverlappingSelectedWindow(hwnd)) {
+ // If intersection is not empty, the selected window is not on top.
+ return false;
+ }
+ }
+
+ DWORD lastError = GetLastError();
+ if (lastError == ERROR_SUCCESS) {
+ // The enumeration completed successfully without finding the selected
+ // window (which may have been closed).
+ RTC_LOG(LS_WARNING) << "Failed to find selected window (only expected "
+ "if it was closed)";
+ RTC_DCHECK(!IsWindow(selected_window()));
+ return false;
+ } else if (lastError == ERROR_INVALID_WINDOW_HANDLE) {
+ // This error may occur if a window is closed around the time it's
+ // enumerated; retry the enumeration in this case up to 10 times
+ // (this should be a rare race & unlikely to recur).
+ if (++num_retries <= 10) {
+ RTC_LOG(LS_WARNING) << "Enumeration failed due to race with a window "
+ "closing; retrying - retry #"
+ << num_retries;
+ continue;
+ } else {
+ RTC_LOG(LS_ERROR)
+ << "Exhausted retry allowance around window enumeration failures "
+ "due to races with windows closing";
+ }
+ }
+
+ // The enumeration failed with an unexpected error (or more repeats of
+ // an infrequently-expected error than anticipated). After logging this &
+ // firing an assert when enabled, report that the selected window isn't
+ // topmost to avoid inadvertent capture of other windows.
+ RTC_LOG(LS_ERROR) << "Failed to enumerate windows: " << lastError;
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+ }
+
+ const HWND excluded_window;
+};
+
+class CroppingWindowCapturerWin : public CroppingWindowCapturer {
+ public:
+ explicit CroppingWindowCapturerWin(const DesktopCaptureOptions& options)
+ : CroppingWindowCapturer(options),
+ enumerate_current_process_windows_(
+ options.enumerate_current_process_windows()),
+ full_screen_window_detector_(options.full_screen_window_detector()) {}
+
+ void CaptureFrame() override;
+
+ private:
+ bool ShouldUseScreenCapturer() override;
+ DesktopRect GetWindowRectInVirtualScreen() override;
+
+ // Returns either selected by user sourceId or sourceId provided by
+ // FullScreenWindowDetector
+ WindowId GetWindowToCapture() const;
+
+ // The region from GetWindowRgn in the desktop coordinate if the region is
+ // rectangular, or the rect from GetWindowRect if the region is not set.
+ DesktopRect window_region_rect_;
+
+ WindowCaptureHelperWin window_capture_helper_;
+
+ bool enumerate_current_process_windows_;
+
+ rtc::scoped_refptr<FullScreenWindowDetector> full_screen_window_detector_;
+};
+
+void CroppingWindowCapturerWin::CaptureFrame() {
+ DesktopCapturer* win_capturer = window_capturer();
+ if (win_capturer) {
+ // Feed the actual list of windows into full screen window detector.
+ if (full_screen_window_detector_) {
+ full_screen_window_detector_->UpdateWindowListIfNeeded(
+ selected_window(), [this](DesktopCapturer::SourceList* sources) {
+ // Get the list of top level windows, including ones with empty
+ // title. win_capturer_->GetSourceList can't be used here
+ // cause it filters out the windows with empty titles and
+ // it uses responsiveness check which could lead to performance
+ // issues.
+ SourceList result;
+ int window_list_flags =
+ enumerate_current_process_windows_
+ ? GetWindowListFlags::kNone
+ : GetWindowListFlags::kIgnoreCurrentProcessWindows;
+
+ if (!webrtc::GetWindowList(window_list_flags, &result))
+ return false;
+
+ // Filter out windows not visible on current desktop
+ auto it = std::remove_if(
+ result.begin(), result.end(), [this](const auto& source) {
+ HWND hwnd = reinterpret_cast<HWND>(source.id);
+ return !window_capture_helper_
+ .IsWindowVisibleOnCurrentDesktop(hwnd);
+ });
+ result.erase(it, result.end());
+
+ sources->swap(result);
+ return true;
+ });
+ }
+ win_capturer->SelectSource(GetWindowToCapture());
+ }
+
+ CroppingWindowCapturer::CaptureFrame();
+}
+
+bool CroppingWindowCapturerWin::ShouldUseScreenCapturer() {
+ if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN8 &&
+ window_capture_helper_.IsAeroEnabled()) {
+ return false;
+ }
+
+ const HWND selected = reinterpret_cast<HWND>(GetWindowToCapture());
+ // Check if the window is visible on current desktop.
+ if (!window_capture_helper_.IsWindowVisibleOnCurrentDesktop(selected)) {
+ return false;
+ }
+
+ // Check if the window is a translucent layered window.
+ const LONG window_ex_style = GetWindowLong(selected, GWL_EXSTYLE);
+ if (window_ex_style & WS_EX_LAYERED) {
+ COLORREF color_ref_key = 0;
+ BYTE alpha = 0;
+ DWORD flags = 0;
+
+ // GetLayeredWindowAttributes fails if the window was setup with
+ // UpdateLayeredWindow. We have no way to know the opacity of the window in
+ // that case. This happens for Stiky Note (crbug/412726).
+ if (!GetLayeredWindowAttributes(selected, &color_ref_key, &alpha, &flags))
+ return false;
+
+ // UpdateLayeredWindow is the only way to set per-pixel alpha and will cause
+ // the previous GetLayeredWindowAttributes to fail. So we only need to check
+ // the window wide color key or alpha.
+ if ((flags & LWA_COLORKEY) || ((flags & LWA_ALPHA) && (alpha < 255))) {
+ return false;
+ }
+ }
+
+ if (!GetWindowRect(selected, &window_region_rect_)) {
+ return false;
+ }
+
+ DesktopRect content_rect;
+ if (!GetWindowContentRect(selected, &content_rect)) {
+ return false;
+ }
+
+ DesktopRect region_rect;
+ // Get the window region and check if it is rectangular.
+ const int region_type =
+ GetWindowRegionTypeWithBoundary(selected, &region_rect);
+
+ // Do not use the screen capturer if the region is empty or not rectangular.
+ if (region_type == COMPLEXREGION || region_type == NULLREGION) {
+ return false;
+ }
+
+ if (region_type == SIMPLEREGION) {
+ // The `region_rect` returned from GetRgnBox() is always in window
+ // coordinate.
+ region_rect.Translate(window_region_rect_.left(),
+ window_region_rect_.top());
+ // MSDN: The window region determines the area *within* the window where the
+ // system permits drawing.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dd144950(v=vs.85).aspx.
+ //
+ // `region_rect` should always be inside of `window_region_rect_`. So after
+ // the intersection, `window_region_rect_` == `region_rect`. If so, what's
+ // the point of the intersecting operations? Why cannot we directly retrieve
+ // `window_region_rect_` from GetWindowRegionTypeWithBoundary() function?
+ // TODO(zijiehe): Figure out the purpose of these intersections.
+ window_region_rect_.IntersectWith(region_rect);
+ content_rect.IntersectWith(region_rect);
+ }
+
+ // Check if the client area is out of the screen area. When the window is
+ // maximized, only its client area is visible in the screen, the border will
+ // be hidden. So we are using `content_rect` here.
+ if (!GetFullscreenRect().ContainsRect(content_rect)) {
+ return false;
+ }
+
+ // Check if the window is occluded by any other window, excluding the child
+ // windows, context menus, and `excluded_window_`.
+ // `content_rect` is preferred, see the comments on
+ // IsWindowIntersectWithSelectedWindow().
+ TopWindowVerifierContext context(selected,
+ reinterpret_cast<HWND>(excluded_window()),
+ content_rect, &window_capture_helper_);
+ return context.IsTopWindow();
+}
+
+DesktopRect CroppingWindowCapturerWin::GetWindowRectInVirtualScreen() {
+ TRACE_EVENT0("webrtc",
+ "CroppingWindowCapturerWin::GetWindowRectInVirtualScreen");
+ DesktopRect window_rect;
+ HWND hwnd = reinterpret_cast<HWND>(GetWindowToCapture());
+ if (!GetCroppedWindowRect(hwnd, /*avoid_cropping_border*/ false, &window_rect,
+ /*original_rect*/ nullptr)) {
+ RTC_LOG(LS_WARNING) << "Failed to get window info: " << GetLastError();
+ return window_rect;
+ }
+ window_rect.IntersectWith(window_region_rect_);
+
+ // Convert `window_rect` to be relative to the top-left of the virtual screen.
+ DesktopRect screen_rect(GetFullscreenRect());
+ window_rect.IntersectWith(screen_rect);
+ window_rect.Translate(-screen_rect.left(), -screen_rect.top());
+ return window_rect;
+}
+
+WindowId CroppingWindowCapturerWin::GetWindowToCapture() const {
+ const auto selected_source = selected_window();
+ const auto full_screen_source =
+ full_screen_window_detector_
+ ? full_screen_window_detector_->FindFullScreenWindow(selected_source)
+ : 0;
+ return full_screen_source ? full_screen_source : selected_source;
+}
+
+} // namespace
+
+// static
+std::unique_ptr<DesktopCapturer> CroppingWindowCapturer::CreateCapturer(
+ const DesktopCaptureOptions& options) {
+ std::unique_ptr<DesktopCapturer> capturer(
+ new CroppingWindowCapturerWin(options));
+ if (capturer && options.detect_updated_region()) {
+ capturer.reset(new DesktopCapturerDifferWrapper(std::move(capturer)));
+ }
+
+ return capturer;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
new file mode 100644
index 0000000000..dd688ac5f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_and_cursor_composer.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Helper function that blends one image into another. Source image must be
+// pre-multiplied with the alpha channel. Destination is assumed to be opaque.
+void AlphaBlend(uint8_t* dest,
+ int dest_stride,
+ const uint8_t* src,
+ int src_stride,
+ const DesktopSize& size) {
+ for (int y = 0; y < size.height(); ++y) {
+ for (int x = 0; x < size.width(); ++x) {
+ uint32_t base_alpha = 255 - src[x * DesktopFrame::kBytesPerPixel + 3];
+ if (base_alpha == 255) {
+ continue;
+ } else if (base_alpha == 0) {
+ memcpy(dest + x * DesktopFrame::kBytesPerPixel,
+ src + x * DesktopFrame::kBytesPerPixel,
+ DesktopFrame::kBytesPerPixel);
+ } else {
+ dest[x * DesktopFrame::kBytesPerPixel] =
+ dest[x * DesktopFrame::kBytesPerPixel] * base_alpha / 255 +
+ src[x * DesktopFrame::kBytesPerPixel];
+ dest[x * DesktopFrame::kBytesPerPixel + 1] =
+ dest[x * DesktopFrame::kBytesPerPixel + 1] * base_alpha / 255 +
+ src[x * DesktopFrame::kBytesPerPixel + 1];
+ dest[x * DesktopFrame::kBytesPerPixel + 2] =
+ dest[x * DesktopFrame::kBytesPerPixel + 2] * base_alpha / 255 +
+ src[x * DesktopFrame::kBytesPerPixel + 2];
+ }
+ }
+ src += src_stride;
+ dest += dest_stride;
+ }
+}
+
+// DesktopFrame wrapper that draws mouse on a frame and restores original
+// content before releasing the underlying frame.
+class DesktopFrameWithCursor : public DesktopFrame {
+ public:
+ // Takes ownership of `frame`.
+ DesktopFrameWithCursor(std::unique_ptr<DesktopFrame> frame,
+ const MouseCursor& cursor,
+ const DesktopVector& position,
+ const DesktopRect& previous_cursor_rect,
+ bool cursor_changed);
+ ~DesktopFrameWithCursor() override;
+
+ DesktopFrameWithCursor(const DesktopFrameWithCursor&) = delete;
+ DesktopFrameWithCursor& operator=(const DesktopFrameWithCursor&) = delete;
+
+ DesktopRect cursor_rect() const { return cursor_rect_; }
+
+ private:
+ const std::unique_ptr<DesktopFrame> original_frame_;
+
+ DesktopVector restore_position_;
+ std::unique_ptr<DesktopFrame> restore_frame_;
+ DesktopRect cursor_rect_;
+};
+
+DesktopFrameWithCursor::DesktopFrameWithCursor(
+ std::unique_ptr<DesktopFrame> frame,
+ const MouseCursor& cursor,
+ const DesktopVector& position,
+ const DesktopRect& previous_cursor_rect,
+ bool cursor_changed)
+ : DesktopFrame(frame->size(),
+ frame->stride(),
+ frame->data(),
+ frame->shared_memory()),
+ original_frame_(std::move(frame)) {
+ MoveFrameInfoFrom(original_frame_.get());
+
+ DesktopVector image_pos = position.subtract(cursor.hotspot());
+ cursor_rect_ = DesktopRect::MakeSize(cursor.image()->size());
+ cursor_rect_.Translate(image_pos);
+ DesktopVector cursor_origin = cursor_rect_.top_left();
+ cursor_rect_.IntersectWith(DesktopRect::MakeSize(size()));
+
+ if (!previous_cursor_rect.equals(cursor_rect_)) {
+ mutable_updated_region()->AddRect(cursor_rect_);
+ // TODO(crbug:1323241) Update this code to properly handle the case where
+ // |previous_cursor_rect| is outside of the boundaries of |frame|.
+ // Any boundary check has to take into account the fact that
+ // |previous_cursor_rect| can be in DPI or in pixels, based on the platform
+ // we're running on.
+ mutable_updated_region()->AddRect(previous_cursor_rect);
+ } else if (cursor_changed) {
+ mutable_updated_region()->AddRect(cursor_rect_);
+ }
+
+ if (cursor_rect_.is_empty())
+ return;
+
+ // Copy original screen content under cursor to `restore_frame_`.
+ restore_position_ = cursor_rect_.top_left();
+ restore_frame_.reset(new BasicDesktopFrame(cursor_rect_.size()));
+ restore_frame_->CopyPixelsFrom(*this, cursor_rect_.top_left(),
+ DesktopRect::MakeSize(restore_frame_->size()));
+
+ // Blit the cursor.
+ uint8_t* cursor_rect_data =
+ reinterpret_cast<uint8_t*>(data()) + cursor_rect_.top() * stride() +
+ cursor_rect_.left() * DesktopFrame::kBytesPerPixel;
+ DesktopVector origin_shift = cursor_rect_.top_left().subtract(cursor_origin);
+ AlphaBlend(cursor_rect_data, stride(),
+ cursor.image()->data() +
+ origin_shift.y() * cursor.image()->stride() +
+ origin_shift.x() * DesktopFrame::kBytesPerPixel,
+ cursor.image()->stride(), cursor_rect_.size());
+}
+
+DesktopFrameWithCursor::~DesktopFrameWithCursor() {
+ // Restore original content of the frame.
+ if (restore_frame_) {
+ DesktopRect target_rect = DesktopRect::MakeSize(restore_frame_->size());
+ target_rect.Translate(restore_position_);
+ CopyPixelsFrom(restore_frame_->data(), restore_frame_->stride(),
+ target_rect);
+ }
+}
+
+} // namespace
+
+DesktopAndCursorComposer::DesktopAndCursorComposer(
+ std::unique_ptr<DesktopCapturer> desktop_capturer,
+ const DesktopCaptureOptions& options)
+ : DesktopAndCursorComposer(desktop_capturer.release(),
+ MouseCursorMonitor::Create(options).release()) {}
+
+DesktopAndCursorComposer::DesktopAndCursorComposer(
+ DesktopCapturer* desktop_capturer,
+ MouseCursorMonitor* mouse_monitor)
+ : desktop_capturer_(desktop_capturer), mouse_monitor_(mouse_monitor) {
+ RTC_DCHECK(desktop_capturer_);
+}
+
+DesktopAndCursorComposer::~DesktopAndCursorComposer() = default;
+
+std::unique_ptr<DesktopAndCursorComposer>
+DesktopAndCursorComposer::CreateWithoutMouseCursorMonitor(
+ std::unique_ptr<DesktopCapturer> desktop_capturer) {
+ return std::unique_ptr<DesktopAndCursorComposer>(
+ new DesktopAndCursorComposer(desktop_capturer.release(), nullptr));
+}
+
+void DesktopAndCursorComposer::Start(DesktopCapturer::Callback* callback) {
+ callback_ = callback;
+ if (mouse_monitor_)
+ mouse_monitor_->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION);
+ desktop_capturer_->Start(this);
+}
+
+void DesktopAndCursorComposer::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ desktop_capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
+}
+
+void DesktopAndCursorComposer::CaptureFrame() {
+ if (mouse_monitor_)
+ mouse_monitor_->Capture();
+ desktop_capturer_->CaptureFrame();
+}
+
+void DesktopAndCursorComposer::SetExcludedWindow(WindowId window) {
+ desktop_capturer_->SetExcludedWindow(window);
+}
+
+bool DesktopAndCursorComposer::GetSourceList(SourceList* sources) {
+ return desktop_capturer_->GetSourceList(sources);
+}
+
+bool DesktopAndCursorComposer::SelectSource(SourceId id) {
+ return desktop_capturer_->SelectSource(id);
+}
+
+bool DesktopAndCursorComposer::FocusOnSelectedSource() {
+ return desktop_capturer_->FocusOnSelectedSource();
+}
+
+bool DesktopAndCursorComposer::IsOccluded(const DesktopVector& pos) {
+ return desktop_capturer_->IsOccluded(pos);
+}
+
+#if defined(WEBRTC_USE_GIO)
+DesktopCaptureMetadata DesktopAndCursorComposer::GetMetadata() {
+ return desktop_capturer_->GetMetadata();
+}
+#endif // defined(WEBRTC_USE_GIO)
+
+void DesktopAndCursorComposer::OnCaptureResult(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ if (frame && cursor_) {
+ if (!frame->may_contain_cursor() &&
+ frame->rect().Contains(cursor_position_) &&
+ !desktop_capturer_->IsOccluded(cursor_position_)) {
+ DesktopVector relative_position =
+ cursor_position_.subtract(frame->top_left());
+#if defined(WEBRTC_MAC) || defined(CHROMEOS)
+ // On OSX, the logical(DIP) and physical coordinates are used mixingly.
+ // For example, the captured cursor has its size in physical pixels(2x)
+ // and location in logical(DIP) pixels on Retina monitor. This will cause
+ // problem when the desktop is mixed with Retina and non-Retina monitors.
+ // So we use DIP pixel for all location info and compensate with the scale
+ // factor of current frame to the `relative_position`.
+ const float scale = frame->scale_factor();
+ relative_position.set(relative_position.x() * scale,
+ relative_position.y() * scale);
+#endif
+ auto frame_with_cursor = std::make_unique<DesktopFrameWithCursor>(
+ std::move(frame), *cursor_, relative_position, previous_cursor_rect_,
+ cursor_changed_);
+ previous_cursor_rect_ = frame_with_cursor->cursor_rect();
+ cursor_changed_ = false;
+ frame = std::move(frame_with_cursor);
+ frame->set_may_contain_cursor(true);
+ }
+ }
+
+ callback_->OnCaptureResult(result, std::move(frame));
+}
+
+void DesktopAndCursorComposer::OnMouseCursor(MouseCursor* cursor) {
+ cursor_changed_ = true;
+ cursor_.reset(cursor);
+}
+
+void DesktopAndCursorComposer::OnMouseCursorPosition(
+ const DesktopVector& position) {
+ cursor_position_ = position;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.h b/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.h
new file mode 100644
index 0000000000..a078b3eeef
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_AND_CURSOR_COMPOSER_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_AND_CURSOR_COMPOSER_H_
+
+#include <memory>
+#if defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/desktop_capture_metadata.h"
+#endif // defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// A wrapper for DesktopCapturer that also captures mouse using specified
+// MouseCursorMonitor and renders it on the generated streams.
+class RTC_EXPORT DesktopAndCursorComposer
+ : public DesktopCapturer,
+ public DesktopCapturer::Callback,
+ public MouseCursorMonitor::Callback {
+ public:
+ // Creates a new composer that captures mouse cursor using
+ // MouseCursorMonitor::Create(options) and renders it into the frames
+ // generated by `desktop_capturer`.
+ DesktopAndCursorComposer(std::unique_ptr<DesktopCapturer> desktop_capturer,
+ const DesktopCaptureOptions& options);
+
+ ~DesktopAndCursorComposer() override;
+
+ DesktopAndCursorComposer(const DesktopAndCursorComposer&) = delete;
+ DesktopAndCursorComposer& operator=(const DesktopAndCursorComposer&) = delete;
+
+ // Creates a new composer that relies on an external source for cursor shape
+ // and position information via the MouseCursorMonitor::Callback interface.
+ static std::unique_ptr<DesktopAndCursorComposer>
+ CreateWithoutMouseCursorMonitor(
+ std::unique_ptr<DesktopCapturer> desktop_capturer);
+
+ // DesktopCapturer interface.
+ void Start(DesktopCapturer::Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+#if defined(WEBRTC_USE_GIO)
+ DesktopCaptureMetadata GetMetadata() override;
+#endif // defined(WEBRTC_USE_GIO)
+
+ // MouseCursorMonitor::Callback interface.
+ void OnMouseCursor(MouseCursor* cursor) override;
+ void OnMouseCursorPosition(const DesktopVector& position) override;
+
+ private:
+ // Allows test cases to use a fake MouseCursorMonitor implementation.
+ friend class DesktopAndCursorComposerTest;
+
+ // Constructor to delegate both deprecated and new constructors and allows
+ // test cases to use a fake MouseCursorMonitor implementation.
+ DesktopAndCursorComposer(DesktopCapturer* desktop_capturer,
+ MouseCursorMonitor* mouse_monitor);
+
+ // DesktopCapturer::Callback interface.
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ const std::unique_ptr<DesktopCapturer> desktop_capturer_;
+ const std::unique_ptr<MouseCursorMonitor> mouse_monitor_;
+
+ DesktopCapturer::Callback* callback_;
+
+ std::unique_ptr<MouseCursor> cursor_;
+ DesktopVector cursor_position_;
+ DesktopRect previous_cursor_rect_;
+ bool cursor_changed_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_AND_CURSOR_COMPOSER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc
new file mode 100644
index 0000000000..c26dc208ac
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_and_cursor_composer.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "rtc_base/arraysize.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using testing::ElementsAre;
+
+const int kFrameXCoord = 100;
+const int kFrameYCoord = 200;
+const int kScreenWidth = 100;
+const int kScreenHeight = 100;
+const int kCursorWidth = 10;
+const int kCursorHeight = 10;
+
+const int kTestCursorSize = 3;
+const uint32_t kTestCursorData[kTestCursorSize][kTestCursorSize] = {
+ {
+ 0xffffffff,
+ 0x99990000,
+ 0xaa222222,
+ },
+ {
+ 0x88008800,
+ 0xaa0000aa,
+ 0xaa333333,
+ },
+ {
+ 0x00000000,
+ 0xaa0000aa,
+ 0xaa333333,
+ },
+};
+
+uint32_t GetFakeFramePixelValue(const DesktopVector& p) {
+ uint32_t r = 100 + p.x();
+ uint32_t g = 100 + p.y();
+ uint32_t b = 100 + p.x() + p.y();
+ return b + (g << 8) + (r << 16) + 0xff000000;
+}
+
+uint32_t GetFramePixel(const DesktopFrame& frame, const DesktopVector& pos) {
+ return *reinterpret_cast<uint32_t*>(frame.GetFrameDataAtPos(pos));
+}
+
+// Blends two pixel values taking into account alpha.
+uint32_t BlendPixels(uint32_t dest, uint32_t src) {
+ uint8_t alpha = 255 - ((src & 0xff000000) >> 24);
+ uint32_t r =
+ ((dest & 0x00ff0000) >> 16) * alpha / 255 + ((src & 0x00ff0000) >> 16);
+ uint32_t g =
+ ((dest & 0x0000ff00) >> 8) * alpha / 255 + ((src & 0x0000ff00) >> 8);
+ uint32_t b = (dest & 0x000000ff) * alpha / 255 + (src & 0x000000ff);
+ return b + (g << 8) + (r << 16) + 0xff000000;
+}
+
+DesktopFrame* CreateTestFrame(int width = kScreenWidth,
+ int height = kScreenHeight) {
+ DesktopFrame* frame = new BasicDesktopFrame(DesktopSize(width, height));
+ uint32_t* data = reinterpret_cast<uint32_t*>(frame->data());
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ *(data++) = GetFakeFramePixelValue(DesktopVector(x, y));
+ }
+ }
+ return frame;
+}
+
+MouseCursor* CreateTestCursor(DesktopVector hotspot) {
+ std::unique_ptr<DesktopFrame> image(
+ new BasicDesktopFrame(DesktopSize(kCursorWidth, kCursorHeight)));
+ uint32_t* data = reinterpret_cast<uint32_t*>(image->data());
+ // Set four pixels near the hotspot and leave all other blank.
+ for (int y = 0; y < kTestCursorSize; ++y) {
+ for (int x = 0; x < kTestCursorSize; ++x) {
+ data[(hotspot.y() + y) * kCursorWidth + (hotspot.x() + x)] =
+ kTestCursorData[y][x];
+ }
+ }
+ return new MouseCursor(image.release(), hotspot);
+}
+
+class FakeScreenCapturer : public DesktopCapturer {
+ public:
+ FakeScreenCapturer() {}
+
+ void Start(Callback* callback) override { callback_ = callback; }
+
+ void CaptureFrame() override {
+ callback_->OnCaptureResult(
+ next_frame_ ? Result::SUCCESS : Result::ERROR_TEMPORARY,
+ std::move(next_frame_));
+ }
+
+ void SetNextFrame(std::unique_ptr<DesktopFrame> next_frame) {
+ next_frame_ = std::move(next_frame);
+ }
+
+ bool IsOccluded(const DesktopVector& pos) override { return is_occluded_; }
+
+ void set_is_occluded(bool value) { is_occluded_ = value; }
+
+ private:
+ Callback* callback_ = nullptr;
+
+ std::unique_ptr<DesktopFrame> next_frame_;
+ bool is_occluded_ = false;
+};
+
+class FakeMouseMonitor : public MouseCursorMonitor {
+ public:
+ FakeMouseMonitor() : changed_(true) {}
+
+ void SetState(CursorState state, const DesktopVector& pos) {
+ state_ = state;
+ position_ = pos;
+ }
+
+ void SetHotspot(const DesktopVector& hotspot) {
+ if (!hotspot_.equals(hotspot))
+ changed_ = true;
+ hotspot_ = hotspot;
+ }
+
+ void Init(Callback* callback, Mode mode) { callback_ = callback; }
+
+ void Capture() override {
+ if (changed_) {
+ callback_->OnMouseCursor(CreateTestCursor(hotspot_));
+ }
+ callback_->OnMouseCursorPosition(position_);
+ }
+
+ private:
+ Callback* callback_;
+ CursorState state_;
+ DesktopVector position_;
+ DesktopVector hotspot_;
+ bool changed_;
+};
+
+void VerifyFrame(const DesktopFrame& frame,
+ MouseCursorMonitor::CursorState state,
+ const DesktopVector& pos) {
+ // Verify that all other pixels are set to their original values.
+ DesktopRect image_rect =
+ DesktopRect::MakeWH(kTestCursorSize, kTestCursorSize);
+ image_rect.Translate(pos);
+
+ for (int y = 0; y < kScreenHeight; ++y) {
+ for (int x = 0; x < kScreenWidth; ++x) {
+ DesktopVector p(x, y);
+ if (state == MouseCursorMonitor::INSIDE && image_rect.Contains(p)) {
+ EXPECT_EQ(BlendPixels(GetFakeFramePixelValue(p),
+ kTestCursorData[y - pos.y()][x - pos.x()]),
+ GetFramePixel(frame, p));
+ } else {
+ EXPECT_EQ(GetFakeFramePixelValue(p), GetFramePixel(frame, p));
+ }
+ }
+ }
+}
+
+} // namespace
+
+bool operator==(const DesktopRect& left, const DesktopRect& right) {
+ return left.equals(right);
+}
+
+std::ostream& operator<<(std::ostream& out, const DesktopRect& rect) {
+ out << "{" << rect.left() << "+" << rect.top() << "-" << rect.width() << "x"
+ << rect.height() << "}";
+ return out;
+}
+
+class DesktopAndCursorComposerTest : public ::testing::Test,
+ public DesktopCapturer::Callback {
+ public:
+ explicit DesktopAndCursorComposerTest(bool include_cursor = true)
+ : fake_screen_(new FakeScreenCapturer()),
+ fake_cursor_(include_cursor ? new FakeMouseMonitor() : nullptr),
+ blender_(fake_screen_, fake_cursor_) {
+ blender_.Start(this);
+ }
+
+ // DesktopCapturer::Callback interface
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override {
+ frame_ = std::move(frame);
+ }
+
+ protected:
+ // Owned by `blender_`.
+ FakeScreenCapturer* fake_screen_;
+ FakeMouseMonitor* fake_cursor_;
+
+ DesktopAndCursorComposer blender_;
+ std::unique_ptr<DesktopFrame> frame_;
+};
+
+class DesktopAndCursorComposerNoCursorMonitorTest
+ : public DesktopAndCursorComposerTest {
+ public:
+ DesktopAndCursorComposerNoCursorMonitorTest()
+ : DesktopAndCursorComposerTest(false) {}
+};
+
+TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfNoFrameCaptured) {
+ struct {
+ int x, y;
+ int hotspot_x, hotspot_y;
+ bool inside;
+ } tests[] = {
+ {0, 0, 0, 0, true}, {50, 50, 0, 0, true}, {100, 50, 0, 0, true},
+ {50, 100, 0, 0, true}, {100, 100, 0, 0, true}, {0, 0, 2, 5, true},
+ {1, 1, 2, 5, true}, {50, 50, 2, 5, true}, {100, 100, 2, 5, true},
+ {0, 0, 5, 2, true}, {50, 50, 5, 2, true}, {100, 100, 5, 2, true},
+ {0, 0, 0, 0, false},
+ };
+
+ for (size_t i = 0; i < arraysize(tests); i++) {
+ SCOPED_TRACE(i);
+
+ DesktopVector hotspot(tests[i].hotspot_x, tests[i].hotspot_y);
+ fake_cursor_->SetHotspot(hotspot);
+
+ MouseCursorMonitor::CursorState state = tests[i].inside
+ ? MouseCursorMonitor::INSIDE
+ : MouseCursorMonitor::OUTSIDE;
+ DesktopVector pos(tests[i].x, tests[i].y);
+ fake_cursor_->SetState(state, pos);
+
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+
+ blender_.CaptureFrame();
+ // If capturer captured nothing, then cursor should be ignored, not matter
+ // its state or position.
+ EXPECT_EQ(frame_, nullptr);
+ }
+}
+
+TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfFrameMayContainIt) {
+ // We can't use a shared frame because we need to detect modifications
+ // compared to a control.
+ std::unique_ptr<DesktopFrame> control_frame(CreateTestFrame());
+ control_frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord));
+
+ struct {
+ int x;
+ int y;
+ bool may_contain_cursor;
+ } tests[] = {
+ {100, 200, true},
+ {100, 200, false},
+ {150, 250, true},
+ {150, 250, false},
+ };
+
+ for (size_t i = 0; i < arraysize(tests); i++) {
+ SCOPED_TRACE(i);
+
+ std::unique_ptr<DesktopFrame> frame(CreateTestFrame());
+ frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord));
+ frame->set_may_contain_cursor(tests[i].may_contain_cursor);
+ fake_screen_->SetNextFrame(std::move(frame));
+
+ const DesktopVector abs_pos(tests[i].x, tests[i].y);
+ fake_cursor_->SetState(MouseCursorMonitor::INSIDE, abs_pos);
+ blender_.CaptureFrame();
+
+ // If the frame may already have contained the cursor, then `CaptureFrame()`
+ // should not have modified it, so it should be the same as the control.
+ EXPECT_TRUE(frame_);
+ const DesktopVector rel_pos(abs_pos.subtract(control_frame->top_left()));
+ if (tests[i].may_contain_cursor) {
+ EXPECT_EQ(
+ *reinterpret_cast<uint32_t*>(frame_->GetFrameDataAtPos(rel_pos)),
+ *reinterpret_cast<uint32_t*>(
+ control_frame->GetFrameDataAtPos(rel_pos)));
+
+ } else {
+ // `CaptureFrame()` should have modified the frame to have the cursor.
+ EXPECT_NE(
+ *reinterpret_cast<uint32_t*>(frame_->GetFrameDataAtPos(rel_pos)),
+ *reinterpret_cast<uint32_t*>(
+ control_frame->GetFrameDataAtPos(rel_pos)));
+ EXPECT_TRUE(frame_->may_contain_cursor());
+ }
+ }
+}
+
+TEST_F(DesktopAndCursorComposerTest,
+ CursorShouldBeIgnoredIfItIsOutOfDesktopFrame) {
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+ frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord));
+ // The frame covers (100, 200) - (200, 300).
+
+ struct {
+ int x;
+ int y;
+ } tests[] = {
+ {0, 0}, {50, 50}, {50, 150}, {100, 150}, {50, 200},
+ {99, 200}, {100, 199}, {200, 300}, {200, 299}, {199, 300},
+ {-1, -1}, {-10000, -10000}, {10000, 10000},
+ };
+ for (size_t i = 0; i < arraysize(tests); i++) {
+ SCOPED_TRACE(i);
+
+ fake_screen_->SetNextFrame(frame->Share());
+ // The CursorState is ignored when using absolute cursor position.
+ fake_cursor_->SetState(MouseCursorMonitor::OUTSIDE,
+ DesktopVector(tests[i].x, tests[i].y));
+ blender_.CaptureFrame();
+ VerifyFrame(*frame_, MouseCursorMonitor::OUTSIDE, DesktopVector(0, 0));
+ }
+}
+
+TEST_F(DesktopAndCursorComposerTest, IsOccludedShouldBeConsidered) {
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+ frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord));
+ // The frame covers (100, 200) - (200, 300).
+
+ struct {
+ int x;
+ int y;
+ } tests[] = {
+ {100, 200}, {101, 200}, {100, 201}, {101, 201}, {150, 250}, {199, 299},
+ };
+ fake_screen_->set_is_occluded(true);
+ for (size_t i = 0; i < arraysize(tests); i++) {
+ SCOPED_TRACE(i);
+
+ fake_screen_->SetNextFrame(frame->Share());
+ // The CursorState is ignored when using absolute cursor position.
+ fake_cursor_->SetState(MouseCursorMonitor::OUTSIDE,
+ DesktopVector(tests[i].x, tests[i].y));
+ blender_.CaptureFrame();
+ VerifyFrame(*frame_, MouseCursorMonitor::OUTSIDE, DesktopVector());
+ }
+}
+
+TEST_F(DesktopAndCursorComposerTest, CursorIncluded) {
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+ frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord));
+ // The frame covers (100, 200) - (200, 300).
+
+ struct {
+ int x;
+ int y;
+ } tests[] = {
+ {100, 200}, {101, 200}, {100, 201}, {101, 201}, {150, 250}, {199, 299},
+ };
+ for (size_t i = 0; i < arraysize(tests); i++) {
+ SCOPED_TRACE(i);
+
+ const DesktopVector abs_pos(tests[i].x, tests[i].y);
+ const DesktopVector rel_pos(abs_pos.subtract(frame->top_left()));
+
+ fake_screen_->SetNextFrame(frame->Share());
+ // The CursorState is ignored when using absolute cursor position.
+ fake_cursor_->SetState(MouseCursorMonitor::OUTSIDE, abs_pos);
+ blender_.CaptureFrame();
+ VerifyFrame(*frame_, MouseCursorMonitor::INSIDE, rel_pos);
+
+ // Verify that the cursor is erased before the frame buffer is returned to
+ // the screen capturer.
+ frame_.reset();
+ VerifyFrame(*frame, MouseCursorMonitor::OUTSIDE, DesktopVector());
+ }
+}
+
+TEST_F(DesktopAndCursorComposerNoCursorMonitorTest,
+ UpdatedRegionIncludesOldAndNewCursorRectsIfMoved) {
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+ DesktopRect first_cursor_rect;
+ {
+ // Block to scope test_cursor, which is invalidated by OnMouseCursor.
+ MouseCursor* test_cursor = CreateTestCursor(DesktopVector(0, 0));
+ first_cursor_rect = DesktopRect::MakeSize(test_cursor->image()->size());
+ blender_.OnMouseCursor(test_cursor);
+ }
+ blender_.OnMouseCursorPosition(DesktopVector(0, 0));
+ fake_screen_->SetNextFrame(frame->Share());
+ blender_.CaptureFrame();
+
+ DesktopVector cursor_move_offset(1, 1);
+ DesktopRect second_cursor_rect = first_cursor_rect;
+ second_cursor_rect.Translate(cursor_move_offset);
+ blender_.OnMouseCursorPosition(cursor_move_offset);
+ fake_screen_->SetNextFrame(frame->Share());
+ blender_.CaptureFrame();
+
+ EXPECT_TRUE(frame->updated_region().is_empty());
+ DesktopRegion expected_region;
+ expected_region.AddRect(first_cursor_rect);
+ expected_region.AddRect(second_cursor_rect);
+ EXPECT_TRUE(frame_->updated_region().Equals(expected_region));
+}
+
+TEST_F(DesktopAndCursorComposerNoCursorMonitorTest,
+ UpdatedRegionIncludesOldAndNewCursorRectsIfShapeChanged) {
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+ DesktopRect first_cursor_rect;
+ {
+ // Block to scope test_cursor, which is invalidated by OnMouseCursor.
+ MouseCursor* test_cursor = CreateTestCursor(DesktopVector(0, 0));
+ first_cursor_rect = DesktopRect::MakeSize(test_cursor->image()->size());
+ blender_.OnMouseCursor(test_cursor);
+ }
+ blender_.OnMouseCursorPosition(DesktopVector(0, 0));
+ fake_screen_->SetNextFrame(frame->Share());
+ blender_.CaptureFrame();
+
+ // Create a second cursor, the same shape as the first. Since the code doesn't
+ // compare the cursor pixels, this is sufficient, and avoids needing two test
+ // cursor bitmaps.
+ DesktopRect second_cursor_rect;
+ {
+ MouseCursor* test_cursor = CreateTestCursor(DesktopVector(0, 0));
+ second_cursor_rect = DesktopRect::MakeSize(test_cursor->image()->size());
+ blender_.OnMouseCursor(test_cursor);
+ }
+ fake_screen_->SetNextFrame(frame->Share());
+ blender_.CaptureFrame();
+
+ EXPECT_TRUE(frame->updated_region().is_empty());
+ DesktopRegion expected_region;
+ expected_region.AddRect(first_cursor_rect);
+ expected_region.AddRect(second_cursor_rect);
+ EXPECT_TRUE(frame_->updated_region().Equals(expected_region));
+}
+
+TEST_F(DesktopAndCursorComposerNoCursorMonitorTest,
+ UpdatedRegionUnchangedIfCursorUnchanged) {
+ std::unique_ptr<SharedDesktopFrame> frame(
+ SharedDesktopFrame::Wrap(CreateTestFrame()));
+ blender_.OnMouseCursor(CreateTestCursor(DesktopVector(0, 0)));
+ blender_.OnMouseCursorPosition(DesktopVector(0, 0));
+ fake_screen_->SetNextFrame(frame->Share());
+ blender_.CaptureFrame();
+ fake_screen_->SetNextFrame(frame->Share());
+ blender_.CaptureFrame();
+
+ EXPECT_TRUE(frame->updated_region().is_empty());
+ EXPECT_TRUE(frame_->updated_region().is_empty());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_differ_sse2_gn/moz.build b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_differ_sse2_gn/moz.build
new file mode 100644
index 0000000000..1d9b39c90e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_differ_sse2_gn/moz.build
@@ -0,0 +1,142 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("desktop_capture_differ_sse2_gn")
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_gn/moz.build b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_gn/moz.build
new file mode 100644
index 0000000000..8631744ab1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_gn/moz.build
@@ -0,0 +1,439 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+if CONFIG["MOZ_WIDGET_TOOLKIT"] == "gtk":
+ CXXFLAGS += CONFIG["MOZ_GTK3_CFLAGS"]
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/third_party/pipewire/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/cropped_desktop_frame.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_and_cursor_composer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/differ_block.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/rgba_color.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/window_finder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.cc"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_linux.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/screen_capturer_linux.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/window_capturer_linux.cc"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["WEBRTC_USE_X11"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrandr",
+ "Xrender",
+ "Xtst"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_linux.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/screen_capturer_linux.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/window_capturer_linux.cc"
+ ]
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "d3d11",
+ "dxgi",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/cropping_window_capturer_win.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/screen_capturer_win.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/cursor.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/desktop.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/window_capturer_win.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/window_finder_win.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+ DEFINES["WEBRTC_USE_PIPEWIRE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "ppc64":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_USE_X11"] = True
+
+ OS_LIBS += [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrandr",
+ "Xrender",
+ "Xtst"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc"
+ ]
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_USE_PIPEWIRE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_USE_PIPEWIRE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_USE_PIPEWIRE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_USE_X11"] = True
+
+ OS_LIBS += [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrandr",
+ "Xrender",
+ "Xtst"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_USE_X11"] = True
+
+ OS_LIBS += [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrandr",
+ "Xrender",
+ "Xtst"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_USE_X11"] = True
+
+ OS_LIBS += [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrandr",
+ "Xrender",
+ "Xtst"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_USE_X11"] = True
+
+ OS_LIBS += [
+ "X11",
+ "Xcomposite",
+ "Xdamage",
+ "Xext",
+ "Xfixes",
+ "Xrandr",
+ "Xrender",
+ "Xtst"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc"
+ ]
+
+Library("desktop_capture_gn")
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metadata.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metadata.h
new file mode 100644
index 0000000000..faca156e33
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metadata.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METADATA_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METADATA_H_
+
+#if defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/linux/wayland/xdg_session_details.h"
+#endif // defined(WEBRTC_USE_GIO)
+
+namespace webrtc {
+
+// Container for the metadata associated with a desktop capturer.
+struct DesktopCaptureMetadata {
+#if defined(WEBRTC_USE_GIO)
+ // Details about the XDG desktop session handle (used by wayland
+ // implementation in remoting)
+ xdg_portal::SessionDetails session_details;
+#endif // defined(WEBRTC_USE_GIO)
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METADATA_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.cc
new file mode 100644
index 0000000000..6b741ef4bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capture_metrics_helper.h"
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+// This enum is logged via UMA so entries should not be reordered or have their
+// values changed. This should also be kept in sync with the values in the
+// DesktopCapturerId namespace.
+enum class SequentialDesktopCapturerId {
+ kUnknown = 0,
+ kWgcCapturerWin = 1,
+ kScreenCapturerWinMagnifier = 2,
+ kWindowCapturerWinGdi = 3,
+ kScreenCapturerWinGdi = 4,
+ kScreenCapturerWinDirectx = 5,
+ kMaxValue = kScreenCapturerWinDirectx
+};
+} // namespace
+
+void RecordCapturerImpl(uint32_t capturer_id) {
+ SequentialDesktopCapturerId sequential_id;
+ switch (capturer_id) {
+ case DesktopCapturerId::kWgcCapturerWin:
+ sequential_id = SequentialDesktopCapturerId::kWgcCapturerWin;
+ break;
+ case DesktopCapturerId::kScreenCapturerWinMagnifier:
+ sequential_id = SequentialDesktopCapturerId::kScreenCapturerWinMagnifier;
+ break;
+ case DesktopCapturerId::kWindowCapturerWinGdi:
+ sequential_id = SequentialDesktopCapturerId::kWindowCapturerWinGdi;
+ break;
+ case DesktopCapturerId::kScreenCapturerWinGdi:
+ sequential_id = SequentialDesktopCapturerId::kScreenCapturerWinGdi;
+ break;
+ case DesktopCapturerId::kScreenCapturerWinDirectx:
+ sequential_id = SequentialDesktopCapturerId::kScreenCapturerWinDirectx;
+ break;
+ case DesktopCapturerId::kUnknown:
+ default:
+ sequential_id = SequentialDesktopCapturerId::kUnknown;
+ }
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.DesktopCapture.Win.DesktopCapturerImpl",
+ static_cast<int>(sequential_id),
+ static_cast<int>(SequentialDesktopCapturerId::kMaxValue));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.h
new file mode 100644
index 0000000000..37542b84bb
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_metrics_helper.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METRICS_HELPER_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METRICS_HELPER_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+void RecordCapturerImpl(uint32_t capturer_id);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METRICS_HELPER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_objc_gn/moz.build b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_objc_gn/moz.build
new file mode 100644
index 0000000000..e884abb54d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_objc_gn/moz.build
@@ -0,0 +1,72 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+CMMFLAGS += [
+ "-fobjc-arc"
+]
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MAC"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_POSIX"] = True
+DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+DEFINES["__STDC_CONSTANT_MACROS"] = True
+DEFINES["__STDC_FORMAT_MACROS"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/sdk/objc/",
+ "/third_party/libwebrtc/sdk/objc/base/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/screen_capturer_darwin.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/window_capturer_mac.mm",
+ "/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.mm"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+Library("desktop_capture_objc_gn")
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.cc
new file mode 100644
index 0000000000..767dbfefa5
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+
+#include "api/make_ref_counted.h"
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include "modules/desktop_capture/mac/full_screen_mac_application_handler.h"
+#elif defined(WEBRTC_WIN)
+#include "modules/desktop_capture/win/full_screen_win_application_handler.h"
+#endif
+#if defined(WEBRTC_USE_PIPEWIRE) && !defined(WEBRTC_MOZILLA_BUILD)
+#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h"
+#endif
+
+namespace webrtc {
+
+DesktopCaptureOptions::DesktopCaptureOptions() {}
+DesktopCaptureOptions::DesktopCaptureOptions(
+ const DesktopCaptureOptions& options) = default;
+DesktopCaptureOptions::DesktopCaptureOptions(DesktopCaptureOptions&& options) =
+ default;
+DesktopCaptureOptions::~DesktopCaptureOptions() {}
+
+DesktopCaptureOptions& DesktopCaptureOptions::operator=(
+ const DesktopCaptureOptions& options) = default;
+DesktopCaptureOptions& DesktopCaptureOptions::operator=(
+ DesktopCaptureOptions&& options) = default;
+
+// static
+DesktopCaptureOptions DesktopCaptureOptions::CreateDefault() {
+ DesktopCaptureOptions result;
+#if defined(WEBRTC_USE_X11)
+ result.set_x_display(SharedXDisplay::CreateDefault());
+#endif
+#if defined(WEBRTC_USE_PIPEWIRE) && !defined(WEBRTC_MOZILLA_BUILD)
+ result.set_screencast_stream(SharedScreenCastStream::CreateDefault());
+#endif
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ result.set_configuration_monitor(
+ rtc::make_ref_counted<DesktopConfigurationMonitor>());
+ result.set_full_screen_window_detector(
+ rtc::make_ref_counted<FullScreenWindowDetector>(
+ CreateFullScreenMacApplicationHandler));
+#elif defined(WEBRTC_WIN)
+ result.set_full_screen_window_detector(
+ rtc::make_ref_counted<FullScreenWindowDetector>(
+ CreateFullScreenWinApplicationHandler));
+#endif
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.h
new file mode 100644
index 0000000000..c11efdb615
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_options.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_OPTIONS_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_OPTIONS_H_
+
+#include "api/scoped_refptr.h"
+#include "rtc_base/system/rtc_export.h"
+
+#if defined(WEBRTC_USE_X11)
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#endif
+
+#if defined(WEBRTC_USE_PIPEWIRE) && !defined(WEBRTC_MOZILLA_BUILD)
+#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h"
+#endif
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#endif
+
+#include "modules/desktop_capture/full_screen_window_detector.h"
+
+namespace webrtc {
+
+// An object that stores initialization parameters for screen and window
+// capturers.
+class RTC_EXPORT DesktopCaptureOptions {
+ public:
+ // Returns instance of DesktopCaptureOptions with default parameters. On Linux
+ // also initializes X window connection. x_display() will be set to null if
+ // X11 connection failed (e.g. DISPLAY isn't set).
+ static DesktopCaptureOptions CreateDefault();
+
+ DesktopCaptureOptions();
+ DesktopCaptureOptions(const DesktopCaptureOptions& options);
+ DesktopCaptureOptions(DesktopCaptureOptions&& options);
+ ~DesktopCaptureOptions();
+
+ DesktopCaptureOptions& operator=(const DesktopCaptureOptions& options);
+ DesktopCaptureOptions& operator=(DesktopCaptureOptions&& options);
+
+#if defined(WEBRTC_USE_X11)
+ const rtc::scoped_refptr<SharedXDisplay>& x_display() const {
+ return x_display_;
+ }
+ void set_x_display(rtc::scoped_refptr<SharedXDisplay> x_display) {
+ x_display_ = x_display;
+ }
+#endif
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ // TODO(zijiehe): Remove both DesktopConfigurationMonitor and
+ // FullScreenChromeWindowDetector out of DesktopCaptureOptions. It's not
+ // reasonable for external consumers to set these two parameters.
+ const rtc::scoped_refptr<DesktopConfigurationMonitor>& configuration_monitor()
+ const {
+ return configuration_monitor_;
+ }
+ // If nullptr is set, ScreenCapturer won't work and WindowCapturer may return
+ // inaccurate result from IsOccluded() function.
+ void set_configuration_monitor(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> m) {
+ configuration_monitor_ = m;
+ }
+
+ bool allow_iosurface() const { return allow_iosurface_; }
+ void set_allow_iosurface(bool allow) { allow_iosurface_ = allow; }
+#endif
+
+ const rtc::scoped_refptr<FullScreenWindowDetector>&
+ full_screen_window_detector() const {
+ return full_screen_window_detector_;
+ }
+ void set_full_screen_window_detector(
+ rtc::scoped_refptr<FullScreenWindowDetector> detector) {
+ full_screen_window_detector_ = detector;
+ }
+
+ // Flag indicating that the capturer should use screen change notifications.
+ // Enables/disables use of XDAMAGE in the X11 capturer.
+ bool use_update_notifications() const { return use_update_notifications_; }
+ void set_use_update_notifications(bool use_update_notifications) {
+ use_update_notifications_ = use_update_notifications;
+ }
+
+ // Flag indicating if desktop effects (e.g. Aero) should be disabled when the
+ // capturer is active. Currently used only on Windows.
+ bool disable_effects() const { return disable_effects_; }
+ void set_disable_effects(bool disable_effects) {
+ disable_effects_ = disable_effects;
+ }
+
+ // Flag that should be set if the consumer uses updated_region() and the
+ // capturer should try to provide correct updated_region() for the frames it
+ // generates (e.g. by comparing each frame with the previous one).
+ bool detect_updated_region() const { return detect_updated_region_; }
+ void set_detect_updated_region(bool detect_updated_region) {
+ detect_updated_region_ = detect_updated_region;
+ }
+
+#if defined(WEBRTC_WIN)
+ // Enumerating windows owned by the current process on Windows has some
+ // complications due to |GetWindowText*()| APIs potentially causing a
+ // deadlock (see the comments in the `GetWindowListHandler()` function in
+ // window_capture_utils.cc for more details on the deadlock).
+ // To avoid this issue, consumers can either ensure that the thread that runs
+ // their message loop never waits on `GetSourceList()`, or they can set this
+ // flag to false which will prevent windows running in the current process
+ // from being enumerated and included in the results. Consumers can still
+ // provide the WindowId for their own windows to `SelectSource()` and capture
+ // them.
+ bool enumerate_current_process_windows() const {
+ return enumerate_current_process_windows_;
+ }
+ void set_enumerate_current_process_windows(
+ bool enumerate_current_process_windows) {
+ enumerate_current_process_windows_ = enumerate_current_process_windows;
+ }
+
+ bool allow_use_magnification_api() const {
+ return allow_use_magnification_api_;
+ }
+ void set_allow_use_magnification_api(bool allow) {
+ allow_use_magnification_api_ = allow;
+ }
+ // Allowing directx based capturer or not, this capturer works on windows 7
+ // with platform update / windows 8 or upper.
+ bool allow_directx_capturer() const { return allow_directx_capturer_; }
+ void set_allow_directx_capturer(bool enabled) {
+ allow_directx_capturer_ = enabled;
+ }
+
+ // Flag that may be set to allow use of the cropping window capturer (which
+ // captures the screen & crops that to the window region in some cases). An
+ // advantage of using this is significantly higher capture frame rates than
+ // capturing the window directly. A disadvantage of using this is the
+ // possibility of capturing unrelated content (e.g. overlapping windows that
+ // aren't detected properly, or neighboring regions when moving/resizing the
+ // captured window). Note: this flag influences the behavior of calls to
+ // DesktopCapturer::CreateWindowCapturer; calls to
+ // CroppingWindowCapturer::CreateCapturer ignore the flag (treat it as true).
+ bool allow_cropping_window_capturer() const {
+ return allow_cropping_window_capturer_;
+ }
+ void set_allow_cropping_window_capturer(bool allow) {
+ allow_cropping_window_capturer_ = allow;
+ }
+
+#if defined(RTC_ENABLE_WIN_WGC)
+ // This flag enables the WGC capturer for both window and screen capture.
+ // This capturer should offer similar or better performance than the cropping
+ // capturer without the disadvantages listed above. However, the WGC capturer
+ // is only available on Windows 10 version 1809 (Redstone 5) and up. This flag
+ // will have no affect on older versions.
+ // If set, and running a supported version of Win10, this flag will take
+ // precedence over the cropping, directx, and magnification flags.
+ bool allow_wgc_capturer() const { return allow_wgc_capturer_; }
+ void set_allow_wgc_capturer(bool allow) { allow_wgc_capturer_ = allow; }
+
+ // This flag enables the WGC capturer for fallback capturer.
+ // The flag is useful when the first capturer (eg. WindowCapturerWinGdi) is
+ // unreliable in certain devices where WGC is supported, but not used by
+ // default.
+ bool allow_wgc_capturer_fallback() const {
+ return allow_wgc_capturer_fallback_;
+ }
+ void set_allow_wgc_capturer_fallback(bool allow) {
+ allow_wgc_capturer_fallback_ = allow;
+ }
+#endif // defined(RTC_ENABLE_WIN_WGC)
+#endif // defined(WEBRTC_WIN)
+
+#if defined(WEBRTC_USE_PIPEWIRE)
+ bool allow_pipewire() const { return allow_pipewire_; }
+ void set_allow_pipewire(bool allow) { allow_pipewire_ = allow; }
+
+#if !defined(WEBRTC_MOZILLA_BUILD)
+ const rtc::scoped_refptr<SharedScreenCastStream>& screencast_stream() const {
+ return screencast_stream_;
+ }
+ void set_screencast_stream(
+ rtc::scoped_refptr<SharedScreenCastStream> stream) {
+ screencast_stream_ = stream;
+ }
+
+ void set_width(uint32_t width) { width_ = width; }
+ uint32_t get_width() const { return width_; }
+
+ void set_height(uint32_t height) { height_ = height; }
+ uint32_t get_height() const { return height_; }
+#endif
+#endif
+
+ private:
+#if defined(WEBRTC_USE_X11)
+ rtc::scoped_refptr<SharedXDisplay> x_display_;
+#endif
+#if defined(WEBRTC_USE_PIPEWIRE) && !defined(WEBRTC_MOZILLA_BUILD)
+ // An instance of shared PipeWire ScreenCast stream we share between
+ // BaseCapturerPipeWire and MouseCursorMonitorPipeWire as cursor information
+ // is sent together with screen content.
+ rtc::scoped_refptr<SharedScreenCastStream> screencast_stream_;
+#endif
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor_;
+ bool allow_iosurface_ = false;
+#endif
+
+ rtc::scoped_refptr<FullScreenWindowDetector> full_screen_window_detector_;
+
+#if defined(WEBRTC_WIN)
+ bool enumerate_current_process_windows_ = true;
+ bool allow_use_magnification_api_ = false;
+ bool allow_directx_capturer_ = false;
+ bool allow_cropping_window_capturer_ = false;
+#if defined(RTC_ENABLE_WIN_WGC)
+ bool allow_wgc_capturer_ = false;
+ bool allow_wgc_capturer_fallback_ = false;
+#endif
+#endif
+#if defined(WEBRTC_USE_X11)
+ bool use_update_notifications_ = false;
+#else
+ bool use_update_notifications_ = true;
+#endif
+ bool disable_effects_ = true;
+ bool detect_updated_region_ = false;
+#if defined(WEBRTC_USE_PIPEWIRE)
+ bool allow_pipewire_ = false;
+ uint32_t width_ = 0;
+ uint32_t height_ = 0;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_OPTIONS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capture_types.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_types.h
new file mode 100644
index 0000000000..94be2fb68b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capture_types.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_TYPES_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_TYPES_H_
+
+#ifndef XP_WIN
+#include <sys/types.h> // pid_t
+#endif
+#include <stdint.h>
+
+#ifdef XP_WIN // Moving this into the global namespace
+typedef int pid_t; // matching what used to be in
+#endif // video_capture_defines.h
+
+namespace webrtc {
+
+enum class CaptureType { kWindow, kScreen };
+
+// Type used to identify windows on the desktop. Values are platform-specific:
+// - On Windows: HWND cast to intptr_t.
+// - On Linux (with X11): X11 Window (unsigned long) type cast to intptr_t.
+// - On OSX: integer window number.
+typedef intptr_t WindowId;
+
+const WindowId kNullWindowId = 0;
+
+// Type used to identify screens on the desktop. Values are platform-specific:
+// - On Windows: integer display device index.
+// - On OSX: CGDirectDisplayID cast to intptr_t.
+// - On Linux (with X11): TBD.
+// - On ChromeOS: display::Display::id() is an int64_t.
+// On Windows, ScreenId is implementation dependent: sending a ScreenId from one
+// implementation to another usually won't work correctly.
+#if defined(CHROMEOS)
+ typedef int64_t ScreenId;
+#else
+ typedef intptr_t ScreenId;
+#endif
+
+// The screen id corresponds to all screen combined together.
+const ScreenId kFullDesktopScreenId = -1;
+
+const ScreenId kInvalidScreenId = -2;
+
+typedef intptr_t ProcessId;
+const ProcessId DesktopProcessId = 0;
+
+// Integers to attach to each DesktopFrame to differentiate the generator of
+// the frame. The entries in this namespace should remain in sync with the
+// SequentialDesktopCapturerId enum, which is logged via UMA.
+// `kScreenCapturerWinGdi` and `kScreenCapturerWinDirectx` values are preserved
+// to maintain compatibility
+namespace DesktopCapturerId {
+constexpr uint32_t CreateFourCC(char a, char b, char c, char d) {
+ return ((static_cast<uint32_t>(a)) | (static_cast<uint32_t>(b) << 8) |
+ (static_cast<uint32_t>(c) << 16) | (static_cast<uint32_t>(d) << 24));
+}
+
+constexpr uint32_t kUnknown = 0;
+constexpr uint32_t kWgcCapturerWin = 1;
+constexpr uint32_t kScreenCapturerWinMagnifier = 2;
+constexpr uint32_t kWindowCapturerWinGdi = 3;
+constexpr uint32_t kScreenCapturerWinGdi = CreateFourCC('G', 'D', 'I', ' ');
+constexpr uint32_t kScreenCapturerWinDirectx = CreateFourCC('D', 'X', 'G', 'I');
+} // namespace DesktopCapturerId
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_TYPES_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.cc
new file mode 100644
index 0000000000..9022652ec2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capturer.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <cstring>
+#include <utility>
+
+#include "modules/desktop_capture/cropping_window_capturer.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer_differ_wrapper.h"
+
+#if defined(RTC_ENABLE_WIN_WGC)
+#include "modules/desktop_capture/win/wgc_capturer_win.h"
+#include "rtc_base/win/windows_version.h"
+#endif // defined(RTC_ENABLE_WIN_WGC)
+
+#if defined(WEBRTC_USE_PIPEWIRE) || defined(WEBRTC_USE_X11)
+#include <gtk/gtk.h>
+#include <gtk/gtkx.h>
+#endif
+
+namespace webrtc {
+
+DesktopCapturer::~DesktopCapturer() = default;
+
+void DesktopCapturer::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {}
+
+void DesktopCapturer::SetExcludedWindow(WindowId window) {}
+
+bool DesktopCapturer::GetSourceList(SourceList* sources) {
+ return true;
+}
+
+bool DesktopCapturer::SelectSource(SourceId id) {
+ return false;
+}
+
+bool DesktopCapturer::FocusOnSelectedSource() {
+ return false;
+}
+
+bool DesktopCapturer::IsOccluded(const DesktopVector& pos) {
+ return false;
+}
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateWindowCapturer(
+ const DesktopCaptureOptions& options) {
+#if defined(RTC_ENABLE_WIN_WGC)
+ if (options.allow_wgc_capturer() && IsWgcSupported(CaptureType::kWindow)) {
+ return WgcCapturerWin::CreateRawWindowCapturer(options);
+ }
+#endif // defined(RTC_ENABLE_WIN_WGC)
+
+#if defined(WEBRTC_WIN)
+ if (options.allow_cropping_window_capturer()) {
+ return CroppingWindowCapturer::CreateCapturer(options);
+ }
+#endif // defined(WEBRTC_WIN)
+
+ std::unique_ptr<DesktopCapturer> capturer = CreateRawWindowCapturer(options);
+ if (capturer && options.detect_updated_region()) {
+ capturer.reset(new DesktopCapturerDifferWrapper(std::move(capturer)));
+ }
+
+ return capturer;
+}
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateScreenCapturer(
+ const DesktopCaptureOptions& options) {
+#if defined(RTC_ENABLE_WIN_WGC)
+ if (options.allow_wgc_capturer() && IsWgcSupported(CaptureType::kScreen)) {
+ return WgcCapturerWin::CreateRawScreenCapturer(options);
+ }
+#endif // defined(RTC_ENABLE_WIN_WGC)
+
+ std::unique_ptr<DesktopCapturer> capturer = CreateRawScreenCapturer(options);
+ if (capturer && options.detect_updated_region()) {
+ capturer.reset(new DesktopCapturerDifferWrapper(std::move(capturer)));
+ }
+
+ return capturer;
+}
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateTabCapturer(
+ const DesktopCaptureOptions& options) {
+ std::unique_ptr<DesktopCapturer> capturer = CreateRawTabCapturer(options);
+ if (capturer && options.detect_updated_region()) {
+ capturer.reset(new DesktopCapturerDifferWrapper(std::move(capturer)));
+ }
+
+ return capturer;
+}
+
+#if defined(WEBRTC_USE_PIPEWIRE) || defined(WEBRTC_USE_X11)
+bool DesktopCapturer::IsRunningUnderWayland() {
+ const char* xdg_session_type = getenv("XDG_SESSION_TYPE");
+ if (!xdg_session_type || strncmp(xdg_session_type, "wayland", 7) != 0)
+ return false;
+
+ if (!(getenv("WAYLAND_DISPLAY")))
+ return false;
+
+ return true;
+}
+#endif // defined(WEBRTC_USE_PIPEWIRE) || defined(WEBRTC_USE_X11)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.h
new file mode 100644
index 0000000000..6e9b95e214
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#if defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/desktop_capture_metadata.h"
+#endif // defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class DesktopCaptureOptions;
+class DesktopFrame;
+
+// Abstract interface for screen and window capturers.
+class RTC_EXPORT DesktopCapturer {
+ public:
+ enum class Result {
+ // The frame was captured successfully.
+ SUCCESS,
+
+ // There was a temporary error. The caller should continue calling
+ // CaptureFrame(), in the expectation that it will eventually recover.
+ ERROR_TEMPORARY,
+
+ // Capture has failed and will keep failing if the caller tries calling
+ // CaptureFrame() again.
+ ERROR_PERMANENT,
+
+ MAX_VALUE = ERROR_PERMANENT
+ };
+
+ // Interface that must be implemented by the DesktopCapturer consumers.
+ class Callback {
+ public:
+ // Called after a frame has been captured. `frame` is not nullptr if and
+ // only if `result` is SUCCESS.
+ virtual void OnCaptureResult(Result result,
+ std::unique_ptr<DesktopFrame> frame) = 0;
+
+ protected:
+ virtual ~Callback() {}
+ };
+
+#if defined(CHROMEOS)
+ typedef int64_t SourceId;
+#else
+ typedef intptr_t SourceId;
+#endif
+
+ static_assert(std::is_same<SourceId, ScreenId>::value,
+ "SourceId should be a same type as ScreenId.");
+
+ struct Source {
+ // The unique id to represent a Source of current DesktopCapturer.
+ SourceId id;
+ pid_t pid;
+
+ // Title of the window or screen in UTF-8 encoding, maybe empty. This field
+ // should not be used to identify a source.
+ std::string title;
+ };
+
+ typedef std::vector<Source> SourceList;
+
+ virtual ~DesktopCapturer();
+
+ // Called at the beginning of a capturing session. `callback` must remain
+ // valid until capturer is destroyed.
+ virtual void Start(Callback* callback) = 0;
+
+ // Sets SharedMemoryFactory that will be used to create buffers for the
+ // captured frames. The factory can be invoked on a thread other than the one
+ // where CaptureFrame() is called. It will be destroyed on the same thread.
+ // Shared memory is currently supported only by some DesktopCapturer
+ // implementations.
+ virtual void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory);
+
+ // Captures next frame, and involve callback provided by Start() function.
+ // Pending capture requests are canceled when DesktopCapturer is deleted.
+ virtual void CaptureFrame() = 0;
+
+ // Sets the window to be excluded from the captured image in the future
+ // Capture calls. Used to exclude the screenshare notification window for
+ // screen capturing.
+ virtual void SetExcludedWindow(WindowId window);
+
+ // TODO(zijiehe): Following functions should be pure virtual. The default
+ // implementations are for backward compatibility only. Remove default
+ // implementations once all DesktopCapturer implementations in Chromium have
+ // implemented these functions.
+
+ // Gets a list of sources current capturer supports. Returns false in case of
+ // a failure.
+ // For DesktopCapturer implementations to capture screens, this function
+ // should return monitors.
+ // For DesktopCapturer implementations to capture windows, this function
+ // should only return root windows owned by applications.
+ virtual bool GetSourceList(SourceList* sources);
+
+ // Selects a source to be captured. Returns false in case of a failure (e.g.
+ // if there is no source with the specified type and id.)
+ virtual bool SelectSource(SourceId id);
+
+ // Brings the selected source to the front and sets the input focus on it.
+ // Returns false in case of a failure or no source has been selected or the
+ // implementation does not support this functionality.
+ virtual bool FocusOnSelectedSource();
+
+ // Returns true if the `pos` on the selected source is covered by other
+ // elements on the display, and is not visible to the users.
+ // `pos` is in full desktop coordinates, i.e. the top-left monitor always
+ // starts from (0, 0).
+ // The return value if `pos` is out of the scope of the source is undefined.
+ virtual bool IsOccluded(const DesktopVector& pos);
+
+ // Creates a DesktopCapturer instance which targets to capture windows.
+ static std::unique_ptr<DesktopCapturer> CreateWindowCapturer(
+ const DesktopCaptureOptions& options);
+
+ // Creates a DesktopCapturer instance which targets to capture screens.
+ static std::unique_ptr<DesktopCapturer> CreateScreenCapturer(
+ const DesktopCaptureOptions& options);
+
+ // Creates a DesktopCapturer instance which targets to capture tab.
+ static std::unique_ptr<DesktopCapturer> CreateTabCapturer(
+ const DesktopCaptureOptions& options);
+
+#if defined(WEBRTC_USE_PIPEWIRE) || defined(WEBRTC_USE_X11)
+ static bool IsRunningUnderWayland();
+
+ virtual void UpdateResolution(uint32_t width, uint32_t height) {}
+#endif // defined(WEBRTC_USE_PIPEWIRE) || defined(WEBRTC_USE_X11)
+
+#if defined(WEBRTC_USE_GIO)
+ // Populates implementation specific metadata into the passed in pointer.
+ // Classes can choose to override it or use the default no-op implementation.
+ virtual DesktopCaptureMetadata GetMetadata() { return {}; }
+#endif // defined(WEBRTC_USE_GIO)
+
+ protected:
+ // CroppingWindowCapturer needs to create raw capturers without wrappers, so
+ // the following two functions are protected.
+
+ // Creates a platform specific DesktopCapturer instance which targets to
+ // capture windows.
+ static std::unique_ptr<DesktopCapturer> CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options);
+
+ // Creates a platform specific DesktopCapturer instance which targets to
+ // capture screens.
+ static std::unique_ptr<DesktopCapturer> CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options);
+
+ // Creates a platform specific DesktopCapturer instance which targets to
+ // capture apps.
+ static std::unique_ptr<DesktopCapturer> CreateRawAppCapturer(
+ const DesktopCaptureOptions& options);
+
+ // Creates a DesktopCapturer instance which targets to capture tabs
+ static std::unique_ptr<DesktopCapturer> CreateRawTabCapturer(
+ const DesktopCaptureOptions& options);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.cc
new file mode 100644
index 0000000000..77543e4060
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.cc
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capturer_differ_wrapper.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/differ_block.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+// Returns true if (0, 0) - (`width`, `height`) vector in `old_buffer` and
+// `new_buffer` are equal. `width` should be less than 32
+// (defined by kBlockSize), otherwise BlockDifference() should be used.
+bool PartialBlockDifference(const uint8_t* old_buffer,
+ const uint8_t* new_buffer,
+ int width,
+ int height,
+ int stride) {
+ RTC_DCHECK_LT(width, kBlockSize);
+ const int width_bytes = width * DesktopFrame::kBytesPerPixel;
+ for (int i = 0; i < height; i++) {
+ if (memcmp(old_buffer, new_buffer, width_bytes) != 0) {
+ return true;
+ }
+ old_buffer += stride;
+ new_buffer += stride;
+ }
+ return false;
+}
+
+// Compares columns in the range of [`left`, `right`), in a row in the
+// range of [`top`, `top` + `height`), starts from `old_buffer` and
+// `new_buffer`, and outputs updated regions into `output`. `stride` is the
+// DesktopFrame::stride().
+void CompareRow(const uint8_t* old_buffer,
+ const uint8_t* new_buffer,
+ const int left,
+ const int right,
+ const int top,
+ const int bottom,
+ const int stride,
+ DesktopRegion* const output) {
+ const int block_x_offset = kBlockSize * DesktopFrame::kBytesPerPixel;
+ const int width = right - left;
+ const int height = bottom - top;
+ const int block_count = (width - 1) / kBlockSize;
+ const int last_block_width = width - block_count * kBlockSize;
+ RTC_DCHECK_GT(last_block_width, 0);
+ RTC_DCHECK_LE(last_block_width, kBlockSize);
+
+ // The first block-column in a continuous dirty area in current block-row.
+ int first_dirty_x_block = -1;
+
+ // We always need to add dirty area into `output` in the last block, so handle
+ // it separatedly.
+ for (int x = 0; x < block_count; x++) {
+ if (BlockDifference(old_buffer, new_buffer, height, stride)) {
+ if (first_dirty_x_block == -1) {
+ // This is the first dirty block in a continuous dirty area.
+ first_dirty_x_block = x;
+ }
+ } else if (first_dirty_x_block != -1) {
+ // The block on the left is the last dirty block in a continuous
+ // dirty area.
+ output->AddRect(
+ DesktopRect::MakeLTRB(first_dirty_x_block * kBlockSize + left, top,
+ x * kBlockSize + left, bottom));
+ first_dirty_x_block = -1;
+ }
+ old_buffer += block_x_offset;
+ new_buffer += block_x_offset;
+ }
+
+ bool last_block_diff;
+ if (last_block_width < kBlockSize) {
+ // The last one is a partial vector.
+ last_block_diff = PartialBlockDifference(old_buffer, new_buffer,
+ last_block_width, height, stride);
+ } else {
+ last_block_diff = BlockDifference(old_buffer, new_buffer, height, stride);
+ }
+ if (last_block_diff) {
+ if (first_dirty_x_block == -1) {
+ first_dirty_x_block = block_count;
+ }
+ output->AddRect(DesktopRect::MakeLTRB(
+ first_dirty_x_block * kBlockSize + left, top, right, bottom));
+ } else if (first_dirty_x_block != -1) {
+ output->AddRect(
+ DesktopRect::MakeLTRB(first_dirty_x_block * kBlockSize + left, top,
+ block_count * kBlockSize + left, bottom));
+ }
+}
+
+// Compares `rect` area in `old_frame` and `new_frame`, and outputs dirty
+// regions into `output`.
+void CompareFrames(const DesktopFrame& old_frame,
+ const DesktopFrame& new_frame,
+ DesktopRect rect,
+ DesktopRegion* const output) {
+ RTC_DCHECK(old_frame.size().equals(new_frame.size()));
+ RTC_DCHECK_EQ(old_frame.stride(), new_frame.stride());
+ rect.IntersectWith(DesktopRect::MakeSize(old_frame.size()));
+
+ const int y_block_count = (rect.height() - 1) / kBlockSize;
+ const int last_y_block_height = rect.height() - y_block_count * kBlockSize;
+ // Offset from the start of one block-row to the next.
+ const int block_y_stride = old_frame.stride() * kBlockSize;
+ const uint8_t* prev_block_row_start =
+ old_frame.GetFrameDataAtPos(rect.top_left());
+ const uint8_t* curr_block_row_start =
+ new_frame.GetFrameDataAtPos(rect.top_left());
+
+ int top = rect.top();
+ // The last row may have a different height, so we handle it separately.
+ for (int y = 0; y < y_block_count; y++) {
+ CompareRow(prev_block_row_start, curr_block_row_start, rect.left(),
+ rect.right(), top, top + kBlockSize, old_frame.stride(), output);
+ top += kBlockSize;
+ prev_block_row_start += block_y_stride;
+ curr_block_row_start += block_y_stride;
+ }
+ CompareRow(prev_block_row_start, curr_block_row_start, rect.left(),
+ rect.right(), top, top + last_y_block_height, old_frame.stride(),
+ output);
+}
+
+} // namespace
+
+DesktopCapturerDifferWrapper::DesktopCapturerDifferWrapper(
+ std::unique_ptr<DesktopCapturer> base_capturer)
+ : base_capturer_(std::move(base_capturer)) {
+ RTC_DCHECK(base_capturer_);
+}
+
+DesktopCapturerDifferWrapper::~DesktopCapturerDifferWrapper() {}
+
+void DesktopCapturerDifferWrapper::Start(DesktopCapturer::Callback* callback) {
+ callback_ = callback;
+ base_capturer_->Start(this);
+}
+
+void DesktopCapturerDifferWrapper::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ base_capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
+}
+
+void DesktopCapturerDifferWrapper::CaptureFrame() {
+ base_capturer_->CaptureFrame();
+}
+
+void DesktopCapturerDifferWrapper::SetExcludedWindow(WindowId window) {
+ base_capturer_->SetExcludedWindow(window);
+}
+
+bool DesktopCapturerDifferWrapper::GetSourceList(SourceList* sources) {
+ return base_capturer_->GetSourceList(sources);
+}
+
+bool DesktopCapturerDifferWrapper::SelectSource(SourceId id) {
+ return base_capturer_->SelectSource(id);
+}
+
+bool DesktopCapturerDifferWrapper::FocusOnSelectedSource() {
+ return base_capturer_->FocusOnSelectedSource();
+}
+
+bool DesktopCapturerDifferWrapper::IsOccluded(const DesktopVector& pos) {
+ return base_capturer_->IsOccluded(pos);
+}
+
+#if defined(WEBRTC_USE_GIO)
+DesktopCaptureMetadata DesktopCapturerDifferWrapper::GetMetadata() {
+ return base_capturer_->GetMetadata();
+}
+#endif // defined(WEBRTC_USE_GIO)
+
+void DesktopCapturerDifferWrapper::OnCaptureResult(
+ Result result,
+ std::unique_ptr<DesktopFrame> input_frame) {
+ int64_t start_time_nanos = rtc::TimeNanos();
+ if (!input_frame) {
+ callback_->OnCaptureResult(result, nullptr);
+ return;
+ }
+ RTC_DCHECK(result == Result::SUCCESS);
+
+ std::unique_ptr<SharedDesktopFrame> frame =
+ SharedDesktopFrame::Wrap(std::move(input_frame));
+ if (last_frame_ && (last_frame_->size().width() != frame->size().width() ||
+ last_frame_->size().height() != frame->size().height() ||
+ last_frame_->stride() != frame->stride())) {
+ last_frame_.reset();
+ }
+
+ if (last_frame_) {
+ DesktopRegion hints;
+ hints.Swap(frame->mutable_updated_region());
+ for (DesktopRegion::Iterator it(hints); !it.IsAtEnd(); it.Advance()) {
+ CompareFrames(*last_frame_, *frame, it.rect(),
+ frame->mutable_updated_region());
+ }
+ } else {
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+ }
+ last_frame_ = frame->Share();
+
+ frame->set_capture_time_ms(frame->capture_time_ms() +
+ (rtc::TimeNanos() - start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec);
+ callback_->OnCaptureResult(result, std::move(frame));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.h
new file mode 100644
index 0000000000..6ebb5d7bc3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_DIFFER_WRAPPER_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_DIFFER_WRAPPER_H_
+
+#include <memory>
+#if defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/desktop_capture_metadata.h"
+#endif // defined(WEBRTC_USE_GIO)
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// DesktopCapturer wrapper that calculates updated_region() by comparing frames
+// content. This class always expects the underlying DesktopCapturer
+// implementation returns a superset of updated regions in DestkopFrame. If a
+// DesktopCapturer implementation does not know the updated region, it should
+// set updated_region() to full frame.
+//
+// This class marks entire frame as updated if the frame size or frame stride
+// has been changed.
+class RTC_EXPORT DesktopCapturerDifferWrapper
+ : public DesktopCapturer,
+ public DesktopCapturer::Callback {
+ public:
+ // Creates a DesktopCapturerDifferWrapper with a DesktopCapturer
+ // implementation, and takes its ownership.
+ explicit DesktopCapturerDifferWrapper(
+ std::unique_ptr<DesktopCapturer> base_capturer);
+
+ ~DesktopCapturerDifferWrapper() override;
+
+ // DesktopCapturer interface.
+ void Start(DesktopCapturer::Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* screens) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+#if defined(WEBRTC_USE_GIO)
+ DesktopCaptureMetadata GetMetadata() override;
+#endif // defined(WEBRTC_USE_GIO)
+ private:
+ // DesktopCapturer::Callback interface.
+ void OnCaptureResult(Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ const std::unique_ptr<DesktopCapturer> base_capturer_;
+ DesktopCapturer::Callback* callback_;
+ std::unique_ptr<SharedDesktopFrame> last_frame_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_DIFFER_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc
new file mode 100644
index 0000000000..9ccef3cc10
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capturer_differ_wrapper.h"
+
+#include <initializer_list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/differ_block.h"
+#include "modules/desktop_capture/fake_desktop_capturer.h"
+#include "modules/desktop_capture/mock_desktop_capturer_callback.h"
+#include "rtc_base/random.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Compares and asserts `frame`.updated_region() equals to `rects`. This
+// function does not care about the order of the `rects` and it does not expect
+// DesktopRegion to return an exact area for each rectangle in `rects`.
+template <template <typename, typename...> class T = std::initializer_list,
+ typename... Rect>
+void AssertUpdatedRegionIs(const DesktopFrame& frame,
+ const T<DesktopRect, Rect...>& rects) {
+ DesktopRegion region;
+ for (const auto& rect : rects) {
+ region.AddRect(rect);
+ }
+ ASSERT_TRUE(frame.updated_region().Equals(region));
+}
+
+// Compares and asserts `frame`.updated_region() covers all rectangles in
+// `rects`, but does not cover areas other than a kBlockSize expansion. This
+// function does not care about the order of the `rects`, and it does not expect
+// DesktopRegion to return an exact area of each rectangle in `rects`.
+template <template <typename, typename...> class T = std::initializer_list,
+ typename... Rect>
+void AssertUpdatedRegionCovers(const DesktopFrame& frame,
+ const T<DesktopRect, Rect...>& rects) {
+ DesktopRegion region;
+ for (const auto& rect : rects) {
+ region.AddRect(rect);
+ }
+
+ // Intersect of `rects` and `frame`.updated_region() should be `rects`. i.e.
+ // `frame`.updated_region() should be a superset of `rects`.
+ DesktopRegion intersect(region);
+ intersect.IntersectWith(frame.updated_region());
+ ASSERT_TRUE(region.Equals(intersect));
+
+ // Difference between `rects` and `frame`.updated_region() should not cover
+ // areas which have larger than twice of kBlockSize width and height.
+ //
+ // Explanation of the 'twice' of kBlockSize (indeed kBlockSize * 2 - 2) is
+ // following,
+ // (Each block in the following grid is a 8 x 8 pixels area. X means the real
+ // updated area, m means the updated area marked by
+ // DesktopCapturerDifferWrapper.)
+ // +---+---+---+---+---+---+---+---+
+ // | X | m | m | m | m | m | m | m |
+ // +---+---+---+---+---+---+---+---+
+ // | m | m | m | m | m | m | m | m |
+ // +---+---+---+---+---+---+---+---+
+ // | m | m | m | m | m | m | m | m |
+ // +---+---+---+---+---+---+---+---+
+ // | m | m | m | m | m | m | m | X |
+ // +---+---+---+---+---+---+---+---+
+ // The top left [0, 0] - [8, 8] and right bottom [56, 24] - [64, 32] blocks of
+ // this area are updated. But since DesktopCapturerDifferWrapper compares
+ // 32 x 32 blocks by default, this entire area is marked as updated. So the
+ // [8, 8] - [56, 32] is expected to be covered in the difference.
+ //
+ // But if [0, 0] - [8, 8] and [64, 24] - [72, 32] blocks are updated,
+ // +---+---+---+---+---+---+---+---+---+---+---+---+
+ // | X | m | m | m | | | | | m | m | m | m |
+ // +---+---+---+---+---+---+---+---+---+---+---+---+
+ // | m | m | m | m | | | | | m | m | m | m |
+ // +---+---+---+---+---+---+---+---+---+---+---+---+
+ // | m | m | m | m | | | | | m | m | m | m |
+ // +---+---+---+---+---+---+---+---+---+---+---+---+
+ // | m | m | m | m | | | | | X | m | m | m |
+ // +---+---+---+---+---+---+---+---+---+---+---+---+
+ // the [8, 8] - [64, 32] is not expected to be covered in the difference. As
+ // DesktopCapturerDifferWrapper should only mark [0, 0] - [32, 32] and
+ // [64, 0] - [96, 32] as updated.
+ DesktopRegion differ(frame.updated_region());
+ differ.Subtract(region);
+ for (DesktopRegion::Iterator it(differ); !it.IsAtEnd(); it.Advance()) {
+ ASSERT_TRUE(it.rect().width() <= kBlockSize * 2 - 2 ||
+ it.rect().height() <= kBlockSize * 2 - 2);
+ }
+}
+
+// Executes a DesktopCapturerDifferWrapper::Capture() and compares its output
+// DesktopFrame::updated_region() with `updated_region` if `check_result` is
+// true. If `exactly_match` is true, AssertUpdatedRegionIs() will be used,
+// otherwise AssertUpdatedRegionCovers() will be used.
+template <template <typename, typename...> class T = std::initializer_list,
+ typename... Rect>
+void ExecuteDifferWrapperCase(BlackWhiteDesktopFramePainter* frame_painter,
+ DesktopCapturerDifferWrapper* capturer,
+ MockDesktopCapturerCallback* callback,
+ const T<DesktopRect, Rect...>& updated_region,
+ bool check_result,
+ bool exactly_match) {
+ EXPECT_CALL(*callback, OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS,
+ ::testing::_))
+ .Times(1)
+ .WillOnce(
+ ::testing::Invoke([&updated_region, check_result, exactly_match](
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame) {
+ ASSERT_EQ(result, DesktopCapturer::Result::SUCCESS);
+ if (check_result) {
+ if (exactly_match) {
+ AssertUpdatedRegionIs(**frame, updated_region);
+ } else {
+ AssertUpdatedRegionCovers(**frame, updated_region);
+ }
+ }
+ }));
+ for (const auto& rect : updated_region) {
+ frame_painter->updated_region()->AddRect(rect);
+ }
+ capturer->CaptureFrame();
+}
+
+// Executes a DesktopCapturerDifferWrapper::Capture(), if updated_region() is
+// not set, this function will reset DesktopCapturerDifferWrapper internal
+// DesktopFrame into black.
+void ExecuteCapturer(DesktopCapturerDifferWrapper* capturer,
+ MockDesktopCapturerCallback* callback) {
+ EXPECT_CALL(*callback, OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS,
+ ::testing::_))
+ .Times(1);
+ capturer->CaptureFrame();
+}
+
+void ExecuteDifferWrapperTest(bool with_hints,
+ bool enlarge_updated_region,
+ bool random_updated_region,
+ bool check_result) {
+ const bool updated_region_should_exactly_match =
+ with_hints && !enlarge_updated_region && !random_updated_region;
+ BlackWhiteDesktopFramePainter frame_painter;
+ PainterDesktopFrameGenerator frame_generator;
+ frame_generator.set_desktop_frame_painter(&frame_painter);
+ std::unique_ptr<FakeDesktopCapturer> fake(new FakeDesktopCapturer());
+ fake->set_frame_generator(&frame_generator);
+ DesktopCapturerDifferWrapper capturer(std::move(fake));
+ MockDesktopCapturerCallback callback;
+ frame_generator.set_provide_updated_region_hints(with_hints);
+ frame_generator.set_enlarge_updated_region(enlarge_updated_region);
+ frame_generator.set_add_random_updated_region(random_updated_region);
+
+ capturer.Start(&callback);
+
+ EXPECT_CALL(callback, OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS,
+ ::testing::_))
+ .Times(1)
+ .WillOnce(::testing::Invoke([](DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame) {
+ ASSERT_EQ(result, DesktopCapturer::Result::SUCCESS);
+ AssertUpdatedRegionIs(**frame,
+ {DesktopRect::MakeSize((*frame)->size())});
+ }));
+ capturer.CaptureFrame();
+
+ ExecuteDifferWrapperCase(&frame_painter, &capturer, &callback,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(300, 300, 400, 400)},
+ check_result, updated_region_should_exactly_match);
+ ExecuteCapturer(&capturer, &callback);
+
+ ExecuteDifferWrapperCase(
+ &frame_painter, &capturer, &callback,
+ {DesktopRect::MakeLTRB(0, 0, 40, 40),
+ DesktopRect::MakeLTRB(0, frame_generator.size()->height() - 40, 40,
+ frame_generator.size()->height()),
+ DesktopRect::MakeLTRB(frame_generator.size()->width() - 40, 0,
+ frame_generator.size()->width(), 40),
+ DesktopRect::MakeLTRB(frame_generator.size()->width() - 40,
+ frame_generator.size()->height() - 40,
+ frame_generator.size()->width(),
+ frame_generator.size()->height())},
+ check_result, updated_region_should_exactly_match);
+
+ Random random(rtc::TimeMillis());
+ // Fuzzing tests.
+ for (int i = 0; i < 1000; i++) {
+ if (enlarge_updated_region) {
+ frame_generator.set_enlarge_range(random.Rand(1, 50));
+ }
+ frame_generator.size()->set(random.Rand(500, 2000), random.Rand(500, 2000));
+ ExecuteCapturer(&capturer, &callback);
+ std::vector<DesktopRect> updated_region;
+ for (int j = random.Rand(50); j >= 0; j--) {
+ // At least a 1 x 1 updated region.
+ const int left = random.Rand(0, frame_generator.size()->width() - 2);
+ const int top = random.Rand(0, frame_generator.size()->height() - 2);
+ const int right = random.Rand(left + 1, frame_generator.size()->width());
+ const int bottom = random.Rand(top + 1, frame_generator.size()->height());
+ updated_region.push_back(DesktopRect::MakeLTRB(left, top, right, bottom));
+ }
+ ExecuteDifferWrapperCase(&frame_painter, &capturer, &callback,
+ updated_region, check_result,
+ updated_region_should_exactly_match);
+ }
+}
+
+} // namespace
+
+TEST(DesktopCapturerDifferWrapperTest, CaptureWithoutHints) {
+ ExecuteDifferWrapperTest(false, false, false, true);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, CaptureWithHints) {
+ ExecuteDifferWrapperTest(true, false, false, true);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, CaptureWithEnlargedHints) {
+ ExecuteDifferWrapperTest(true, true, false, true);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, CaptureWithRandomHints) {
+ ExecuteDifferWrapperTest(true, false, true, true);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, CaptureWithEnlargedAndRandomHints) {
+ ExecuteDifferWrapperTest(true, true, true, true);
+}
+
+// When hints are provided, DesktopCapturerDifferWrapper has a slightly better
+// performance in current configuration, but not so significant. Following is
+// one run result.
+// [ RUN ] DISABLED_CaptureWithoutHintsPerf
+// [ OK ] DISABLED_CaptureWithoutHintsPerf (7118 ms)
+// [ RUN ] DISABLED_CaptureWithHintsPerf
+// [ OK ] DISABLED_CaptureWithHintsPerf (5580 ms)
+// [ RUN ] DISABLED_CaptureWithEnlargedHintsPerf
+// [ OK ] DISABLED_CaptureWithEnlargedHintsPerf (5974 ms)
+// [ RUN ] DISABLED_CaptureWithRandomHintsPerf
+// [ OK ] DISABLED_CaptureWithRandomHintsPerf (6184 ms)
+// [ RUN ] DISABLED_CaptureWithEnlargedAndRandomHintsPerf
+// [ OK ] DISABLED_CaptureWithEnlargedAndRandomHintsPerf (6347 ms)
+TEST(DesktopCapturerDifferWrapperTest, DISABLED_CaptureWithoutHintsPerf) {
+ int64_t started = rtc::TimeMillis();
+ ExecuteDifferWrapperTest(false, false, false, false);
+ ASSERT_LE(rtc::TimeMillis() - started, 15000);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, DISABLED_CaptureWithHintsPerf) {
+ int64_t started = rtc::TimeMillis();
+ ExecuteDifferWrapperTest(true, false, false, false);
+ ASSERT_LE(rtc::TimeMillis() - started, 15000);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, DISABLED_CaptureWithEnlargedHintsPerf) {
+ int64_t started = rtc::TimeMillis();
+ ExecuteDifferWrapperTest(true, true, false, false);
+ ASSERT_LE(rtc::TimeMillis() - started, 15000);
+}
+
+TEST(DesktopCapturerDifferWrapperTest, DISABLED_CaptureWithRandomHintsPerf) {
+ int64_t started = rtc::TimeMillis();
+ ExecuteDifferWrapperTest(true, false, true, false);
+ ASSERT_LE(rtc::TimeMillis() - started, 15000);
+}
+
+TEST(DesktopCapturerDifferWrapperTest,
+ DISABLED_CaptureWithEnlargedAndRandomHintsPerf) {
+ int64_t started = rtc::TimeMillis();
+ ExecuteDifferWrapperTest(true, true, true, false);
+ ASSERT_LE(rtc::TimeMillis() - started, 15000);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.cc
new file mode 100644
index 0000000000..4bbdd6c94f
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capturer_wrapper.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+DesktopCapturerWrapper::DesktopCapturerWrapper(
+ std::unique_ptr<DesktopCapturer> base_capturer)
+ : base_capturer_(std::move(base_capturer)) {
+ RTC_DCHECK(base_capturer_);
+}
+
+DesktopCapturerWrapper::~DesktopCapturerWrapper() = default;
+
+void DesktopCapturerWrapper::Start(Callback* callback) {
+ base_capturer_->Start(callback);
+}
+
+void DesktopCapturerWrapper::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ base_capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
+}
+
+void DesktopCapturerWrapper::CaptureFrame() {
+ base_capturer_->CaptureFrame();
+}
+
+void DesktopCapturerWrapper::SetExcludedWindow(WindowId window) {
+ base_capturer_->SetExcludedWindow(window);
+}
+
+bool DesktopCapturerWrapper::GetSourceList(SourceList* sources) {
+ return base_capturer_->GetSourceList(sources);
+}
+
+bool DesktopCapturerWrapper::SelectSource(SourceId id) {
+ return base_capturer_->SelectSource(id);
+}
+
+bool DesktopCapturerWrapper::FocusOnSelectedSource() {
+ return base_capturer_->FocusOnSelectedSource();
+}
+
+bool DesktopCapturerWrapper::IsOccluded(const DesktopVector& pos) {
+ return base_capturer_->IsOccluded(pos);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.h b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.h
new file mode 100644
index 0000000000..e0f50d79e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_capturer_wrapper.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_WRAPPER_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_WRAPPER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/shared_memory.h"
+
+namespace webrtc {
+
+// Wraps a DesktopCapturer and forwards all the function calls to it.
+class DesktopCapturerWrapper : public DesktopCapturer {
+ public:
+ explicit DesktopCapturerWrapper(
+ std::unique_ptr<DesktopCapturer> base_capturer);
+ ~DesktopCapturerWrapper() override;
+
+ // DesktopCapturer implementations.
+ void Start(Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ protected:
+ // Guaranteed to be valid.
+ const std::unique_ptr<DesktopCapturer> base_capturer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURER_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_frame.cc
new file mode 100644
index 0000000000..3e7b4770c6
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame.cc
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_frame.h"
+
+#include <string.h>
+
+#include <cmath>
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/checks.h"
+#include "libyuv/include/libyuv/planar_functions.h"
+
+namespace webrtc {
+
+DesktopFrame::DesktopFrame(DesktopSize size,
+ int stride,
+ uint8_t* data,
+ SharedMemory* shared_memory)
+ : data_(data),
+ shared_memory_(shared_memory),
+ size_(size),
+ stride_(stride),
+ capture_time_ms_(0),
+ capturer_id_(DesktopCapturerId::kUnknown) {
+ RTC_DCHECK(size_.width() >= 0);
+ RTC_DCHECK(size_.height() >= 0);
+}
+
+DesktopFrame::~DesktopFrame() = default;
+
+void DesktopFrame::CopyPixelsFrom(const uint8_t* src_buffer,
+ int src_stride,
+ const DesktopRect& dest_rect) {
+ RTC_CHECK(DesktopRect::MakeSize(size()).ContainsRect(dest_rect));
+
+ uint8_t* dest = GetFrameDataAtPos(dest_rect.top_left());
+ // TODO(crbug.com/1330019): Temporary workaround for a known libyuv crash when
+ // the height or width is 0. Remove this once this change has been merged.
+ if (dest_rect.width() && dest_rect.height()) {
+ libyuv::CopyPlane(src_buffer, src_stride, dest, stride(),
+ DesktopFrame::kBytesPerPixel * dest_rect.width(),
+ dest_rect.height());
+ }
+}
+
+void DesktopFrame::CopyPixelsFrom(const DesktopFrame& src_frame,
+ const DesktopVector& src_pos,
+ const DesktopRect& dest_rect) {
+ RTC_CHECK(DesktopRect::MakeSize(src_frame.size())
+ .ContainsRect(
+ DesktopRect::MakeOriginSize(src_pos, dest_rect.size())));
+
+ CopyPixelsFrom(src_frame.GetFrameDataAtPos(src_pos), src_frame.stride(),
+ dest_rect);
+}
+
+bool DesktopFrame::CopyIntersectingPixelsFrom(const DesktopFrame& src_frame,
+ double horizontal_scale,
+ double vertical_scale) {
+ const DesktopVector& origin = top_left();
+ const DesktopVector& src_frame_origin = src_frame.top_left();
+
+ DesktopVector src_frame_offset = src_frame_origin.subtract(origin);
+
+ // Determine the intersection, first adjusting its origin to account for any
+ // DPI scaling.
+ DesktopRect intersection_rect = src_frame.rect();
+ if (horizontal_scale != 1.0 || vertical_scale != 1.0) {
+ DesktopVector origin_adjustment(
+ static_cast<int>(
+ std::round((horizontal_scale - 1.0) * src_frame_offset.x())),
+ static_cast<int>(
+ std::round((vertical_scale - 1.0) * src_frame_offset.y())));
+
+ intersection_rect.Translate(origin_adjustment);
+
+ src_frame_offset = src_frame_offset.add(origin_adjustment);
+ }
+
+ intersection_rect.IntersectWith(rect());
+ if (intersection_rect.is_empty()) {
+ return false;
+ }
+
+ // Translate the intersection rect to be relative to the outer rect.
+ intersection_rect.Translate(-origin.x(), -origin.y());
+
+ // Determine source position for the copy (offsets of outer frame from
+ // source origin, if positive).
+ int32_t src_pos_x = std::max(0, -src_frame_offset.x());
+ int32_t src_pos_y = std::max(0, -src_frame_offset.y());
+
+ CopyPixelsFrom(src_frame, DesktopVector(src_pos_x, src_pos_y),
+ intersection_rect);
+ return true;
+}
+
+DesktopRect DesktopFrame::rect() const {
+ const float scale = scale_factor();
+ // Only scale the size.
+ return DesktopRect::MakeXYWH(top_left().x(), top_left().y(),
+ size().width() / scale, size().height() / scale);
+}
+
+float DesktopFrame::scale_factor() const {
+ float scale = 1.0f;
+
+#if defined(WEBRTC_MAC) || defined(CHROMEOS)
+ // At least on Windows the logical and physical pixel are the same
+ // See http://crbug.com/948362.
+ if (!dpi().is_zero() && dpi().x() == dpi().y())
+ scale = dpi().x() / kStandardDPI;
+#endif
+
+ return scale;
+}
+
+uint8_t* DesktopFrame::GetFrameDataAtPos(const DesktopVector& pos) const {
+ return data() + stride() * pos.y() + DesktopFrame::kBytesPerPixel * pos.x();
+}
+
+void DesktopFrame::CopyFrameInfoFrom(const DesktopFrame& other) {
+ set_dpi(other.dpi());
+ set_capture_time_ms(other.capture_time_ms());
+ set_capturer_id(other.capturer_id());
+ *mutable_updated_region() = other.updated_region();
+ set_top_left(other.top_left());
+ set_icc_profile(other.icc_profile());
+}
+
+void DesktopFrame::MoveFrameInfoFrom(DesktopFrame* other) {
+ set_dpi(other->dpi());
+ set_capture_time_ms(other->capture_time_ms());
+ set_capturer_id(other->capturer_id());
+ mutable_updated_region()->Swap(other->mutable_updated_region());
+ set_top_left(other->top_left());
+ set_icc_profile(other->icc_profile());
+}
+
+BasicDesktopFrame::BasicDesktopFrame(DesktopSize size)
+ : DesktopFrame(size,
+ kBytesPerPixel * size.width(),
+ new uint8_t[kBytesPerPixel * size.width() * size.height()](),
+ nullptr) {}
+
+BasicDesktopFrame::~BasicDesktopFrame() {
+ delete[] data_;
+}
+
+// static
+DesktopFrame* BasicDesktopFrame::CopyOf(const DesktopFrame& frame) {
+ DesktopFrame* result = new BasicDesktopFrame(frame.size());
+ // TODO(crbug.com/1330019): Temporary workaround for a known libyuv crash when
+ // the height or width is 0. Remove this once this change has been merged.
+ if (frame.size().width() && frame.size().height()) {
+ libyuv::CopyPlane(frame.data(), frame.stride(), result->data(),
+ result->stride(), frame.size().width() * kBytesPerPixel,
+ frame.size().height());
+ }
+ result->CopyFrameInfoFrom(frame);
+ return result;
+}
+
+// static
+std::unique_ptr<DesktopFrame> SharedMemoryDesktopFrame::Create(
+ DesktopSize size,
+ SharedMemoryFactory* shared_memory_factory) {
+ RTC_DCHECK(shared_memory_factory);
+
+ size_t buffer_size = size.height() * size.width() * kBytesPerPixel;
+ std::unique_ptr<SharedMemory> shared_memory =
+ shared_memory_factory->CreateSharedMemory(buffer_size);
+ if (!shared_memory)
+ return nullptr;
+
+ return std::make_unique<SharedMemoryDesktopFrame>(
+ size, size.width() * kBytesPerPixel, std::move(shared_memory));
+}
+
+SharedMemoryDesktopFrame::SharedMemoryDesktopFrame(DesktopSize size,
+ int stride,
+ SharedMemory* shared_memory)
+ : DesktopFrame(size,
+ stride,
+ reinterpret_cast<uint8_t*>(shared_memory->data()),
+ shared_memory) {}
+
+SharedMemoryDesktopFrame::SharedMemoryDesktopFrame(
+ DesktopSize size,
+ int stride,
+ std::unique_ptr<SharedMemory> shared_memory)
+ : SharedMemoryDesktopFrame(size, stride, shared_memory.release()) {}
+
+SharedMemoryDesktopFrame::~SharedMemoryDesktopFrame() {
+ delete shared_memory_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame.h b/third_party/libwebrtc/modules/desktop_capture/desktop_frame.h
new file mode 100644
index 0000000000..3ee1867e70
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+const float kStandardDPI = 96.0f;
+
+// DesktopFrame represents a video frame captured from the screen.
+class RTC_EXPORT DesktopFrame {
+ public:
+ // DesktopFrame objects always hold BGRA data.
+ static const int kBytesPerPixel = 4;
+
+ virtual ~DesktopFrame();
+
+ DesktopFrame(const DesktopFrame&) = delete;
+ DesktopFrame& operator=(const DesktopFrame&) = delete;
+
+ // Returns the rectangle in full desktop coordinates to indicate it covers
+ // the area of top_left() to top_letf() + size() / scale_factor().
+ DesktopRect rect() const;
+
+ // Returns the scale factor from DIPs to physical pixels of the frame.
+ // Assumes same scale in both X and Y directions at present.
+ float scale_factor() const;
+
+ // Size of the frame. In physical coordinates, mapping directly from the
+ // underlying buffer.
+ const DesktopSize& size() const { return size_; }
+
+ // The top-left of the frame in full desktop coordinates. E.g. the top left
+ // monitor should start from (0, 0). The desktop coordinates may be scaled by
+ // OS, but this is always consistent with the MouseCursorMonitor.
+ const DesktopVector& top_left() const { return top_left_; }
+ void set_top_left(const DesktopVector& top_left) { top_left_ = top_left; }
+
+ // Distance in the buffer between two neighboring rows in bytes.
+ int stride() const { return stride_; }
+
+ // Data buffer used for the frame.
+ uint8_t* data() const { return data_; }
+
+ // SharedMemory used for the buffer or NULL if memory is allocated on the
+ // heap. The result is guaranteed to be deleted only after the frame is
+ // deleted (classes that inherit from DesktopFrame must ensure it).
+ SharedMemory* shared_memory() const { return shared_memory_; }
+
+ // Indicates region of the screen that has changed since the previous frame.
+ const DesktopRegion& updated_region() const { return updated_region_; }
+ DesktopRegion* mutable_updated_region() { return &updated_region_; }
+
+ // DPI of the screen being captured. May be set to zero, e.g. if DPI is
+ // unknown.
+ const DesktopVector& dpi() const { return dpi_; }
+ void set_dpi(const DesktopVector& dpi) { dpi_ = dpi; }
+
+ // Indicates if this frame may have the mouse cursor in it. Capturers that
+ // support cursor capture may set this to true. If the cursor was
+ // outside of the captured area, this may be true even though the cursor is
+ // not in the image.
+ bool may_contain_cursor() const { return may_contain_cursor_; }
+ void set_may_contain_cursor(bool may_contain_cursor) {
+ may_contain_cursor_ = may_contain_cursor;
+ }
+
+ // Time taken to capture the frame in milliseconds.
+ int64_t capture_time_ms() const { return capture_time_ms_; }
+ void set_capture_time_ms(int64_t time_ms) { capture_time_ms_ = time_ms; }
+
+ // Copies pixels from a buffer or another frame. `dest_rect` rect must lay
+ // within bounds of this frame.
+ void CopyPixelsFrom(const uint8_t* src_buffer,
+ int src_stride,
+ const DesktopRect& dest_rect);
+ void CopyPixelsFrom(const DesktopFrame& src_frame,
+ const DesktopVector& src_pos,
+ const DesktopRect& dest_rect);
+
+ // Copies pixels from another frame, with the copied & overwritten regions
+ // representing the intersection between the two frames. Returns true if
+ // pixels were copied, or false if there's no intersection. The scale factors
+ // represent the ratios between pixel space & offset coordinate space (e.g.
+ // 2.0 would indicate the frames are scaled down by 50% for display, so any
+ // offset between their origins should be doubled).
+ bool CopyIntersectingPixelsFrom(const DesktopFrame& src_frame,
+ double horizontal_scale,
+ double vertical_scale);
+
+ // A helper to return the data pointer of a frame at the specified position.
+ uint8_t* GetFrameDataAtPos(const DesktopVector& pos) const;
+
+ // The DesktopCapturer implementation which generates current DesktopFrame.
+ // Not all DesktopCapturer implementations set this field; it's set to
+ // kUnknown by default.
+ uint32_t capturer_id() const { return capturer_id_; }
+ void set_capturer_id(uint32_t capturer_id) { capturer_id_ = capturer_id; }
+
+ // Copies various information from `other`. Anything initialized in
+ // constructor are not copied.
+ // This function is usually used when sharing a source DesktopFrame with
+ // several clients: the original DesktopFrame should be kept unchanged. For
+ // example, BasicDesktopFrame::CopyOf() and SharedDesktopFrame::Share().
+ void CopyFrameInfoFrom(const DesktopFrame& other);
+
+ // Copies various information from `other`. Anything initialized in
+ // constructor are not copied. Not like CopyFrameInfoFrom() function, this
+ // function uses swap or move constructor to avoid data copy. It won't break
+ // the `other`, but some of its information may be missing after this
+ // operation. E.g. other->updated_region_;
+ // This function is usually used when wrapping a DesktopFrame: the wrapper
+ // instance takes the ownership of `other`, so other components cannot access
+ // `other` anymore. For example, CroppedDesktopFrame and
+ // DesktopFrameWithCursor.
+ void MoveFrameInfoFrom(DesktopFrame* other);
+
+ // Set and get the ICC profile of the frame data pixels. Useful to build the
+ // a ColorSpace object from clients of webrtc library like chromium. The
+ // format of an ICC profile is defined in the following specification
+ // http://www.color.org/specification/ICC1v43_2010-12.pdf.
+ const std::vector<uint8_t>& icc_profile() const { return icc_profile_; }
+ void set_icc_profile(const std::vector<uint8_t>& icc_profile) {
+ icc_profile_ = icc_profile;
+ }
+
+ protected:
+ DesktopFrame(DesktopSize size,
+ int stride,
+ uint8_t* data,
+ SharedMemory* shared_memory);
+
+ // Ownership of the buffers is defined by the classes that inherit from this
+ // class. They must guarantee that the buffer is not deleted before the frame
+ // is deleted.
+ uint8_t* const data_;
+ SharedMemory* const shared_memory_;
+
+ private:
+ const DesktopSize size_;
+ const int stride_;
+
+ DesktopRegion updated_region_;
+ DesktopVector top_left_;
+ DesktopVector dpi_;
+ bool may_contain_cursor_ = false;
+ int64_t capture_time_ms_;
+ uint32_t capturer_id_;
+ std::vector<uint8_t> icc_profile_;
+};
+
+// A DesktopFrame that stores data in the heap.
+class RTC_EXPORT BasicDesktopFrame : public DesktopFrame {
+ public:
+ // The entire data buffer used for the frame is initialized with zeros.
+ explicit BasicDesktopFrame(DesktopSize size);
+
+ ~BasicDesktopFrame() override;
+
+ BasicDesktopFrame(const BasicDesktopFrame&) = delete;
+ BasicDesktopFrame& operator=(const BasicDesktopFrame&) = delete;
+
+ // Creates a BasicDesktopFrame that contains copy of `frame`.
+ // TODO(zijiehe): Return std::unique_ptr<DesktopFrame>
+ static DesktopFrame* CopyOf(const DesktopFrame& frame);
+};
+
+// A DesktopFrame that stores data in shared memory.
+class RTC_EXPORT SharedMemoryDesktopFrame : public DesktopFrame {
+ public:
+ // May return nullptr if `shared_memory_factory` failed to create a
+ // SharedMemory instance.
+ // `shared_memory_factory` should not be nullptr.
+ static std::unique_ptr<DesktopFrame> Create(
+ DesktopSize size,
+ SharedMemoryFactory* shared_memory_factory);
+
+ // Takes ownership of `shared_memory`.
+ // Deprecated, use the next constructor.
+ SharedMemoryDesktopFrame(DesktopSize size,
+ int stride,
+ SharedMemory* shared_memory);
+
+ // Preferred.
+ SharedMemoryDesktopFrame(DesktopSize size,
+ int stride,
+ std::unique_ptr<SharedMemory> shared_memory);
+
+ ~SharedMemoryDesktopFrame() override;
+
+ SharedMemoryDesktopFrame(const SharedMemoryDesktopFrame&) = delete;
+ SharedMemoryDesktopFrame& operator=(const SharedMemoryDesktopFrame&) = delete;
+
+ private:
+ // Avoid unexpected order of parameter evaluation.
+ // Executing both std::unique_ptr<T>::operator->() and
+ // std::unique_ptr<T>::release() in the member initializer list is not safe.
+ // Depends on the order of parameter evaluation,
+ // std::unique_ptr<T>::operator->() may trigger assertion failure if it has
+ // been evaluated after std::unique_ptr<T>::release(). By using this
+ // constructor, std::unique_ptr<T>::operator->() won't be involved anymore.
+ SharedMemoryDesktopFrame(DesktopRect rect,
+ int stride,
+ SharedMemory* shared_memory);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.cc
new file mode 100644
index 0000000000..b5dfc28e46
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_frame_generator.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/rgba_color.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+// Sets `updated_region` to `frame`. If `enlarge_updated_region` is
+// true, this function will randomly enlarge each DesktopRect in
+// `updated_region`. But the enlarged DesktopRegion won't excceed the
+// frame->size(). If `add_random_updated_region` is true, several random
+// rectangles will also be included in `frame`.
+void SetUpdatedRegion(DesktopFrame* frame,
+ const DesktopRegion& updated_region,
+ bool enlarge_updated_region,
+ int enlarge_range,
+ bool add_random_updated_region) {
+ const DesktopRect screen_rect = DesktopRect::MakeSize(frame->size());
+ Random random(rtc::TimeMicros());
+ frame->mutable_updated_region()->Clear();
+ for (DesktopRegion::Iterator it(updated_region); !it.IsAtEnd();
+ it.Advance()) {
+ DesktopRect rect = it.rect();
+ if (enlarge_updated_region && enlarge_range > 0) {
+ rect.Extend(random.Rand(enlarge_range), random.Rand(enlarge_range),
+ random.Rand(enlarge_range), random.Rand(enlarge_range));
+ rect.IntersectWith(screen_rect);
+ }
+ frame->mutable_updated_region()->AddRect(rect);
+ }
+
+ if (add_random_updated_region) {
+ for (int i = random.Rand(10); i >= 0; i--) {
+ // At least a 1 x 1 updated region.
+ const int left = random.Rand(0, frame->size().width() - 2);
+ const int top = random.Rand(0, frame->size().height() - 2);
+ const int right = random.Rand(left + 1, frame->size().width());
+ const int bottom = random.Rand(top + 1, frame->size().height());
+ frame->mutable_updated_region()->AddRect(
+ DesktopRect::MakeLTRB(left, top, right, bottom));
+ }
+ }
+}
+
+// Paints pixels in `rect` of `frame` to `color`.
+void PaintRect(DesktopFrame* frame, DesktopRect rect, RgbaColor rgba_color) {
+ static_assert(DesktopFrame::kBytesPerPixel == sizeof(uint32_t),
+ "kBytesPerPixel should be 4.");
+ RTC_DCHECK_GE(frame->size().width(), rect.right());
+ RTC_DCHECK_GE(frame->size().height(), rect.bottom());
+ uint32_t color = rgba_color.ToUInt32();
+ uint8_t* row = frame->GetFrameDataAtPos(rect.top_left());
+ for (int i = 0; i < rect.height(); i++) {
+ uint32_t* column = reinterpret_cast<uint32_t*>(row);
+ for (int j = 0; j < rect.width(); j++) {
+ column[j] = color;
+ }
+ row += frame->stride();
+ }
+}
+
+// Paints pixels in `region` of `frame` to `color`.
+void PaintRegion(DesktopFrame* frame,
+ DesktopRegion* region,
+ RgbaColor rgba_color) {
+ region->IntersectWith(DesktopRect::MakeSize(frame->size()));
+ for (DesktopRegion::Iterator it(*region); !it.IsAtEnd(); it.Advance()) {
+ PaintRect(frame, it.rect(), rgba_color);
+ }
+}
+
+} // namespace
+
+DesktopFrameGenerator::DesktopFrameGenerator() {}
+DesktopFrameGenerator::~DesktopFrameGenerator() {}
+
+DesktopFramePainter::DesktopFramePainter() {}
+DesktopFramePainter::~DesktopFramePainter() {}
+
+PainterDesktopFrameGenerator::PainterDesktopFrameGenerator()
+ : size_(1024, 768),
+ return_frame_(true),
+ provide_updated_region_hints_(false),
+ enlarge_updated_region_(false),
+ enlarge_range_(20),
+ add_random_updated_region_(false),
+ painter_(nullptr) {}
+PainterDesktopFrameGenerator::~PainterDesktopFrameGenerator() {}
+
+std::unique_ptr<DesktopFrame> PainterDesktopFrameGenerator::GetNextFrame(
+ SharedMemoryFactory* factory) {
+ if (!return_frame_) {
+ return nullptr;
+ }
+
+ std::unique_ptr<DesktopFrame> frame = std::unique_ptr<DesktopFrame>(
+ factory ? SharedMemoryDesktopFrame::Create(size_, factory).release()
+ : new BasicDesktopFrame(size_));
+ if (painter_) {
+ DesktopRegion updated_region;
+ if (!painter_->Paint(frame.get(), &updated_region)) {
+ return nullptr;
+ }
+
+ if (provide_updated_region_hints_) {
+ SetUpdatedRegion(frame.get(), updated_region, enlarge_updated_region_,
+ enlarge_range_, add_random_updated_region_);
+ } else {
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+ }
+ }
+
+ return frame;
+}
+
+DesktopSize* PainterDesktopFrameGenerator::size() {
+ return &size_;
+}
+
+void PainterDesktopFrameGenerator::set_return_frame(bool return_frame) {
+ return_frame_ = return_frame;
+}
+
+void PainterDesktopFrameGenerator::set_provide_updated_region_hints(
+ bool provide_updated_region_hints) {
+ provide_updated_region_hints_ = provide_updated_region_hints;
+}
+
+void PainterDesktopFrameGenerator::set_enlarge_updated_region(
+ bool enlarge_updated_region) {
+ enlarge_updated_region_ = enlarge_updated_region;
+}
+
+void PainterDesktopFrameGenerator::set_enlarge_range(int enlarge_range) {
+ enlarge_range_ = enlarge_range;
+}
+
+void PainterDesktopFrameGenerator::set_add_random_updated_region(
+ bool add_random_updated_region) {
+ add_random_updated_region_ = add_random_updated_region;
+}
+
+void PainterDesktopFrameGenerator::set_desktop_frame_painter(
+ DesktopFramePainter* painter) {
+ painter_ = painter;
+}
+
+BlackWhiteDesktopFramePainter::BlackWhiteDesktopFramePainter() {}
+BlackWhiteDesktopFramePainter::~BlackWhiteDesktopFramePainter() {}
+
+DesktopRegion* BlackWhiteDesktopFramePainter::updated_region() {
+ return &updated_region_;
+}
+
+bool BlackWhiteDesktopFramePainter::Paint(DesktopFrame* frame,
+ DesktopRegion* updated_region) {
+ RTC_DCHECK(updated_region->is_empty());
+ memset(frame->data(), 0, frame->stride() * frame->size().height());
+ PaintRegion(frame, &updated_region_, RgbaColor(0xFFFFFFFF));
+ updated_region_.Swap(updated_region);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.h b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.h
new file mode 100644
index 0000000000..3dedee9344
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_generator.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_GENERATOR_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_GENERATOR_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/shared_memory.h"
+
+namespace webrtc {
+
+// An interface to generate a DesktopFrame.
+class DesktopFrameGenerator {
+ public:
+ DesktopFrameGenerator();
+ virtual ~DesktopFrameGenerator();
+
+ virtual std::unique_ptr<DesktopFrame> GetNextFrame(
+ SharedMemoryFactory* factory) = 0;
+};
+
+// An interface to paint a DesktopFrame. This interface is used by
+// PainterDesktopFrameGenerator.
+class DesktopFramePainter {
+ public:
+ DesktopFramePainter();
+ virtual ~DesktopFramePainter();
+
+ virtual bool Paint(DesktopFrame* frame, DesktopRegion* updated_region) = 0;
+};
+
+// An implementation of DesktopFrameGenerator to take care about the
+// DesktopFrame size, filling updated_region(), etc, but leaves the real
+// painting work to a DesktopFramePainter implementation.
+class PainterDesktopFrameGenerator final : public DesktopFrameGenerator {
+ public:
+ PainterDesktopFrameGenerator();
+ ~PainterDesktopFrameGenerator() override;
+
+ std::unique_ptr<DesktopFrame> GetNextFrame(
+ SharedMemoryFactory* factory) override;
+
+ // Sets the size of the frame which will be returned in next GetNextFrame()
+ // call.
+ DesktopSize* size();
+
+ // Decides whether BaseDesktopFrameGenerator returns a frame in next Capture()
+ // callback. If return_frame_ is true, BaseDesktopFrameGenerator will create a
+ // frame according to both size_ and SharedMemoryFactory input, and uses
+ // Paint() function to paint it.
+ void set_return_frame(bool return_frame);
+
+ // Decides whether MockScreenCapturer returns a frame with updated regions.
+ // MockScreenCapturer will keep DesktopFrame::updated_region() empty if this
+ // field is false.
+ void set_provide_updated_region_hints(bool provide_updated_region_hints);
+
+ // Decides whether MockScreenCapturer randomly enlarges updated regions in the
+ // DesktopFrame. Set this field to true to simulate an inaccurate updated
+ // regions' return from OS APIs.
+ void set_enlarge_updated_region(bool enlarge_updated_region);
+
+ // The range to enlarge a updated region if `enlarge_updated_region_` is true.
+ // If this field is less than zero, it will be treated as zero, and
+ // `enlarge_updated_region_` will be ignored.
+ void set_enlarge_range(int enlarge_range);
+
+ // Decides whether BaseDesktopFrameGenerator randomly add some updated regions
+ // in the DesktopFrame. Set this field to true to simulate an inaccurate
+ // updated regions' return from OS APIs.
+ void set_add_random_updated_region(bool add_random_updated_region);
+
+ // Sets the painter object to do the real painting work, if no `painter_` has
+ // been set to this instance, the DesktopFrame returned by GetNextFrame()
+ // function will keep in an undefined but valid state.
+ // PainterDesktopFrameGenerator does not take ownership of the `painter`.
+ void set_desktop_frame_painter(DesktopFramePainter* painter);
+
+ private:
+ DesktopSize size_;
+ bool return_frame_;
+ bool provide_updated_region_hints_;
+ bool enlarge_updated_region_;
+ int enlarge_range_;
+ bool add_random_updated_region_;
+ DesktopFramePainter* painter_;
+};
+
+// An implementation of DesktopFramePainter to paint black on
+// mutable_updated_region(), and white elsewhere.
+class BlackWhiteDesktopFramePainter final : public DesktopFramePainter {
+ public:
+ BlackWhiteDesktopFramePainter();
+ ~BlackWhiteDesktopFramePainter() override;
+
+ // The black regions of the frame which will be returned in next Paint()
+ // call. BlackWhiteDesktopFramePainter will draw a white frame, with black
+ // in the updated_region_. Each Paint() call will consume updated_region_.
+ DesktopRegion* updated_region();
+
+ // DesktopFramePainter interface.
+ bool Paint(DesktopFrame* frame, DesktopRegion* updated_region) override;
+
+ private:
+ DesktopRegion updated_region_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.cc
new file mode 100644
index 0000000000..6e4e42708e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_frame_rotation.h"
+
+#include "rtc_base/checks.h"
+#include "libyuv/include/libyuv/rotate_argb.h"
+
+namespace webrtc {
+
+namespace {
+
+libyuv::RotationMode ToLibyuvRotationMode(Rotation rotation) {
+ switch (rotation) {
+ case Rotation::CLOCK_WISE_0:
+ return libyuv::kRotate0;
+ case Rotation::CLOCK_WISE_90:
+ return libyuv::kRotate90;
+ case Rotation::CLOCK_WISE_180:
+ return libyuv::kRotate180;
+ case Rotation::CLOCK_WISE_270:
+ return libyuv::kRotate270;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return libyuv::kRotate0;
+}
+
+DesktopRect RotateAndOffsetRect(DesktopRect rect,
+ DesktopSize size,
+ Rotation rotation,
+ DesktopVector offset) {
+ DesktopRect result = RotateRect(rect, size, rotation);
+ result.Translate(offset);
+ return result;
+}
+
+} // namespace
+
+Rotation ReverseRotation(Rotation rotation) {
+ switch (rotation) {
+ case Rotation::CLOCK_WISE_0:
+ return rotation;
+ case Rotation::CLOCK_WISE_90:
+ return Rotation::CLOCK_WISE_270;
+ case Rotation::CLOCK_WISE_180:
+ return Rotation::CLOCK_WISE_180;
+ case Rotation::CLOCK_WISE_270:
+ return Rotation::CLOCK_WISE_90;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return Rotation::CLOCK_WISE_0;
+}
+
+DesktopSize RotateSize(DesktopSize size, Rotation rotation) {
+ switch (rotation) {
+ case Rotation::CLOCK_WISE_0:
+ case Rotation::CLOCK_WISE_180:
+ return size;
+ case Rotation::CLOCK_WISE_90:
+ case Rotation::CLOCK_WISE_270:
+ return DesktopSize(size.height(), size.width());
+ }
+ RTC_DCHECK_NOTREACHED();
+ return DesktopSize();
+}
+
+DesktopRect RotateRect(DesktopRect rect, DesktopSize size, Rotation rotation) {
+ switch (rotation) {
+ case Rotation::CLOCK_WISE_0:
+ return rect;
+ case Rotation::CLOCK_WISE_90:
+ return DesktopRect::MakeXYWH(size.height() - rect.bottom(), rect.left(),
+ rect.height(), rect.width());
+ case Rotation::CLOCK_WISE_180:
+ return DesktopRect::MakeXYWH(size.width() - rect.right(),
+ size.height() - rect.bottom(), rect.width(),
+ rect.height());
+ case Rotation::CLOCK_WISE_270:
+ return DesktopRect::MakeXYWH(rect.top(), size.width() - rect.right(),
+ rect.height(), rect.width());
+ }
+ RTC_DCHECK_NOTREACHED();
+ return DesktopRect();
+}
+
+void RotateDesktopFrame(const DesktopFrame& source,
+ const DesktopRect& source_rect,
+ const Rotation& rotation,
+ const DesktopVector& target_offset,
+ DesktopFrame* target) {
+ RTC_DCHECK(target);
+ RTC_DCHECK(DesktopRect::MakeSize(source.size()).ContainsRect(source_rect));
+ // The rectangle in `target`.
+ const DesktopRect target_rect =
+ RotateAndOffsetRect(source_rect, source.size(), rotation, target_offset);
+ RTC_DCHECK(DesktopRect::MakeSize(target->size()).ContainsRect(target_rect));
+
+ if (target_rect.is_empty()) {
+ return;
+ }
+
+ int result = libyuv::ARGBRotate(
+ source.GetFrameDataAtPos(source_rect.top_left()), source.stride(),
+ target->GetFrameDataAtPos(target_rect.top_left()), target->stride(),
+ source_rect.width(), source_rect.height(),
+ ToLibyuvRotationMode(rotation));
+ RTC_DCHECK_EQ(result, 0);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.h b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.h
new file mode 100644
index 0000000000..6b51b2f883
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_ROTATION_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_ROTATION_H_
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+// Represents the rotation of a DesktopFrame.
+enum class Rotation {
+ CLOCK_WISE_0,
+ CLOCK_WISE_90,
+ CLOCK_WISE_180,
+ CLOCK_WISE_270,
+};
+
+// Rotates input DesktopFrame `source`, copies pixel in an unrotated rectangle
+// `source_rect` into the target rectangle of another DesktopFrame `target`.
+// Target rectangle here is the rotated `source_rect` plus `target_offset`.
+// `rotation` specifies `source` to `target` rotation. `source_rect` is in
+// `source` coordinate. `target_offset` is in `target` coordinate.
+// This function triggers check failure if `source` does not cover the
+// `source_rect`, or `target` does not cover the rotated `rect`.
+void RotateDesktopFrame(const DesktopFrame& source,
+ const DesktopRect& source_rect,
+ const Rotation& rotation,
+ const DesktopVector& target_offset,
+ DesktopFrame* target);
+
+// Returns a reverse rotation of `rotation`.
+Rotation ReverseRotation(Rotation rotation);
+
+// Returns a rotated DesktopSize of `size`.
+DesktopSize RotateSize(DesktopSize size, Rotation rotation);
+
+// Returns a rotated DesktopRect of `rect`. The `size` represents the size of
+// the DesktopFrame which `rect` belongs in.
+DesktopRect RotateRect(DesktopRect rect, DesktopSize size, Rotation rotation);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_ROTATION_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation_unittest.cc
new file mode 100644
index 0000000000..782ca63e61
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_rotation_unittest.cc
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_frame_rotation.h"
+
+#include <stdint.h>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/test_utils.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// A DesktopFrame implementation which stores data in an external int array.
+class ArrayDesktopFrame : public DesktopFrame {
+ public:
+ ArrayDesktopFrame(DesktopSize size, uint32_t* data);
+ ~ArrayDesktopFrame() override;
+};
+
+ArrayDesktopFrame::ArrayDesktopFrame(DesktopSize size, uint32_t* data)
+ : DesktopFrame(size,
+ size.width() * kBytesPerPixel,
+ reinterpret_cast<uint8_t*>(data),
+ nullptr) {}
+
+ArrayDesktopFrame::~ArrayDesktopFrame() = default;
+
+} // namespace
+
+TEST(DesktopFrameRotationTest, CopyRect3x4) {
+ // A DesktopFrame of 4-pixel width by 3-pixel height.
+ static uint32_t frame_pixels[] = {
+ 0, 1, 2, 3, //
+ 4, 5, 6, 7, //
+ 8, 9, 10, 11, //
+ };
+ ArrayDesktopFrame frame(DesktopSize(4, 3), frame_pixels);
+
+ {
+ BasicDesktopFrame target(DesktopSize(4, 3));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_0, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(frame, target));
+ }
+
+ // After Rotating clock-wise 90 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 8, 4, 0, //
+ 9, 5, 1, //
+ 10, 6, 2, //
+ 11, 7, 3, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 4), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 4));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_90, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 180 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 11, 10, 9, 8, //
+ 7, 6, 5, 4, //
+ 3, 2, 1, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(4, 3), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(4, 3));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_180, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 270 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 3, 7, 11, //
+ 2, 6, 10, //
+ 1, 5, 9, //
+ 0, 4, 8, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 4), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 4));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_270, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+}
+
+TEST(DesktopFrameRotationTest, CopyRect3x5) {
+ // A DesktopFrame of 5-pixel width by 3-pixel height.
+ static uint32_t frame_pixels[] = {
+ 0, 1, 2, 3, 4, //
+ 5, 6, 7, 8, 9, //
+ 10, 11, 12, 13, 14, //
+ };
+ ArrayDesktopFrame frame(DesktopSize(5, 3), frame_pixels);
+
+ {
+ BasicDesktopFrame target(DesktopSize(5, 3));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_0, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, frame));
+ }
+
+ // After Rotating clock-wise 90 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 10, 5, 0, //
+ 11, 6, 1, //
+ 12, 7, 2, //
+ 13, 8, 3, //
+ 14, 9, 4, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 5), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 5));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_90, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 180 degree
+ {
+ static uint32_t expected_pixels[]{
+ 14, 13, 12, 11, 10, //
+ 9, 8, 7, 6, 5, //
+ 4, 3, 2, 1, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(5, 3), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(5, 3));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_180, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 270 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 4, 9, 14, //
+ 3, 8, 13, //
+ 2, 7, 12, //
+ 1, 6, 11, //
+ 0, 5, 10, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 5), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 5));
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_270, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+}
+
+TEST(DesktopFrameRotationTest, PartialCopyRect3x5) {
+ // A DesktopFrame of 5-pixel width by 3-pixel height.
+ static uint32_t frame_pixels[] = {
+ 0, 1, 2, 3, 4, //
+ 5, 6, 7, 8, 9, //
+ 10, 11, 12, 13, 14, //
+ };
+ ArrayDesktopFrame frame(DesktopSize(5, 3), frame_pixels);
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, 0, 0, //
+ 0, 6, 7, 8, 0, //
+ 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(5, 3), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(5, 3));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 1, 3, 1),
+ Rotation::CLOCK_WISE_0, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 1, 2, 3, 0, //
+ 0, 6, 7, 8, 0, //
+ 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(5, 3), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(5, 3));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 0, 3, 2),
+ Rotation::CLOCK_WISE_0, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 90 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, //
+ 0, 6, 0, //
+ 0, 7, 0, //
+ 0, 8, 0, //
+ 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 5), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 5));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 1, 3, 1),
+ Rotation::CLOCK_WISE_90, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, //
+ 11, 6, 0, //
+ 12, 7, 0, //
+ 13, 8, 0, //
+ 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 5), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 5));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 1, 3, 2),
+ Rotation::CLOCK_WISE_90, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 180 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, 0, 0, //
+ 0, 8, 7, 6, 0, //
+ 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(5, 3), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(5, 3));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 1, 3, 1),
+ Rotation::CLOCK_WISE_180, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 13, 12, 11, 0, //
+ 0, 8, 7, 6, 0, //
+ 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(5, 3), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(5, 3));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 1, 3, 2),
+ Rotation::CLOCK_WISE_180, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ // After Rotating clock-wise 270 degree
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, //
+ 0, 8, 0, //
+ 0, 7, 0, //
+ 0, 6, 0, //
+ 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 5), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 5));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 1, 3, 1),
+ Rotation::CLOCK_WISE_270, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, //
+ 3, 8, 0, //
+ 2, 7, 0, //
+ 1, 6, 0, //
+ 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(3, 5), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(3, 5));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeXYWH(1, 0, 3, 2),
+ Rotation::CLOCK_WISE_270, DesktopVector(), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ }
+}
+
+TEST(DesktopFrameRotationTest, WithOffset) {
+ // A DesktopFrame of 4-pixel width by 3-pixel height.
+ static uint32_t frame_pixels[] = {
+ 0, 1, 2, 3, //
+ 4, 5, 6, 7, //
+ 8, 9, 10, 11, //
+ };
+ ArrayDesktopFrame frame(DesktopSize(4, 3), frame_pixels);
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, //
+ 0, 0, 1, 2, 3, 0, 0, 0, //
+ 0, 4, 5, 6, 7, 0, 0, 0, //
+ 0, 8, 9, 10, 11, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(8, 6), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(8, 6));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_0, DesktopVector(1, 1), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ target.mutable_updated_region()->Subtract(
+ DesktopRect::MakeOriginSize(DesktopVector(1, 1), frame.size()));
+ ASSERT_TRUE(target.updated_region().is_empty());
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, //
+ 0, 11, 10, 9, 8, 0, 0, 0, //
+ 0, 7, 6, 5, 4, 0, 0, 0, //
+ 0, 3, 2, 1, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(8, 6), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(8, 6));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_180, DesktopVector(1, 1), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ target.mutable_updated_region()->Subtract(
+ DesktopRect::MakeOriginSize(DesktopVector(1, 1), frame.size()));
+ ASSERT_TRUE(target.updated_region().is_empty());
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, 0, 0, 0, //
+ 0, 8, 4, 0, 0, 0, //
+ 0, 9, 5, 1, 0, 0, //
+ 0, 10, 6, 2, 0, 0, //
+ 0, 11, 7, 3, 0, 0, //
+ 0, 0, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(6, 8), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(6, 8));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_90, DesktopVector(1, 1), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ target.mutable_updated_region()->Subtract(
+ DesktopRect::MakeXYWH(1, 1, 3, 4));
+ ASSERT_TRUE(target.updated_region().is_empty());
+ }
+
+ {
+ static uint32_t expected_pixels[] = {
+ 0, 0, 0, 0, 0, 0, //
+ 0, 3, 7, 11, 0, 0, //
+ 0, 2, 6, 10, 0, 0, //
+ 0, 1, 5, 9, 0, 0, //
+ 0, 0, 4, 8, 0, 0, //
+ 0, 0, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, //
+ 0, 0, 0, 0, 0, 0, //
+ };
+ ArrayDesktopFrame expected(DesktopSize(6, 8), expected_pixels);
+
+ BasicDesktopFrame target(DesktopSize(6, 8));
+ ClearDesktopFrame(&target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_270, DesktopVector(1, 1), &target);
+ ASSERT_TRUE(DesktopFrameDataEquals(target, expected));
+ target.mutable_updated_region()->Subtract(
+ DesktopRect::MakeXYWH(1, 1, 3, 4));
+ ASSERT_TRUE(target.updated_region().is_empty());
+ }
+}
+
+// On a typical machine (Intel(R) Xeon(R) E5-1650 v3 @ 3.50GHz, with O2
+// optimization, the following case uses ~1.4s to finish. It means entirely
+// rotating one 2048 x 1536 frame, which is a large enough number to cover most
+// of desktop computer users, uses around 14ms.
+TEST(DesktopFrameRotationTest, DISABLED_PerformanceTest) {
+ BasicDesktopFrame frame(DesktopSize(2048, 1536));
+ BasicDesktopFrame target(DesktopSize(1536, 2048));
+ BasicDesktopFrame target2(DesktopSize(2048, 1536));
+ for (int i = 0; i < 100; i++) {
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_90, DesktopVector(), &target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_270, DesktopVector(), &target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_0, DesktopVector(), &target2);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_180, DesktopVector(), &target2);
+ }
+}
+
+// On a typical machine (Intel(R) Xeon(R) E5-1650 v3 @ 3.50GHz, with O2
+// optimization, the following case uses ~6.7s to finish. It means entirely
+// rotating one 4096 x 3072 frame uses around 67ms.
+TEST(DesktopFrameRotationTest, DISABLED_PerformanceTestOnLargeScreen) {
+ BasicDesktopFrame frame(DesktopSize(4096, 3072));
+ BasicDesktopFrame target(DesktopSize(3072, 4096));
+ BasicDesktopFrame target2(DesktopSize(4096, 3072));
+ for (int i = 0; i < 100; i++) {
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_90, DesktopVector(), &target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_270, DesktopVector(), &target);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_0, DesktopVector(), &target2);
+ RotateDesktopFrame(frame, DesktopRect::MakeSize(frame.size()),
+ Rotation::CLOCK_WISE_180, DesktopVector(), &target2);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_unittest.cc
new file mode 100644
index 0000000000..ce0cbb45f5
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_unittest.cc
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_frame.h"
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/test_utils.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+std::unique_ptr<DesktopFrame> CreateTestFrame(DesktopRect rect,
+ int pixels_value) {
+ DesktopSize size = rect.size();
+ auto frame = std::make_unique<BasicDesktopFrame>(size);
+ frame->set_top_left(rect.top_left());
+ memset(frame->data(), pixels_value, frame->stride() * size.height());
+ return frame;
+}
+
+struct TestData {
+ const char* description;
+ DesktopRect dest_frame_rect;
+ DesktopRect src_frame_rect;
+ double horizontal_scale;
+ double vertical_scale;
+ DesktopRect expected_overlap_rect;
+};
+
+void RunTest(const TestData& test) {
+ // Copy a source frame with all bits set into a dest frame with none set.
+ auto dest_frame = CreateTestFrame(test.dest_frame_rect, 0);
+ auto src_frame = CreateTestFrame(test.src_frame_rect, 0xff);
+
+ dest_frame->CopyIntersectingPixelsFrom(
+ *src_frame, test.horizontal_scale, test.vertical_scale);
+
+ // Translate the expected overlap rect to be relative to the dest frame/rect.
+ DesktopVector dest_frame_origin = test.dest_frame_rect.top_left();
+ DesktopRect relative_expected_overlap_rect = test.expected_overlap_rect;
+ relative_expected_overlap_rect.Translate(-dest_frame_origin.x(),
+ -dest_frame_origin.y());
+
+ // Confirm bits are now set in the dest frame if & only if they fall in the
+ // expected range.
+ for (int y = 0; y < dest_frame->size().height(); ++y) {
+ SCOPED_TRACE(y);
+
+ for (int x = 0; x < dest_frame->size().width(); ++x) {
+ SCOPED_TRACE(x);
+
+ DesktopVector point(x, y);
+ uint8_t* data = dest_frame->GetFrameDataAtPos(point);
+ uint32_t pixel_value = *reinterpret_cast<uint32_t*>(data);
+ bool was_copied = pixel_value == 0xffffffff;
+ ASSERT_TRUE(was_copied || pixel_value == 0);
+
+ bool expected_to_be_copied =
+ relative_expected_overlap_rect.Contains(point);
+
+ ASSERT_EQ(was_copied, expected_to_be_copied);
+ }
+ }
+}
+
+void RunTests(const TestData* tests, int num_tests) {
+ for (int i = 0; i < num_tests; i++) {
+ const TestData& test = tests[i];
+
+ SCOPED_TRACE(test.description);
+
+ RunTest(test);
+ }
+}
+
+} // namespace
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsMatchingRects) {
+ const TestData tests[] = {
+ {"0 origin",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 2, 2)},
+
+ {"Negative origin",
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(-1, -1, 2, 2)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsMatchingRectsScaled) {
+ // The scale factors shouldn't affect matching rects (they're only applied
+ // to any difference between the origins)
+ const TestData tests[] = {
+ {"0 origin 2x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(0, 0, 2, 2)},
+
+ {"0 origin 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(0, 0, 2, 2)},
+
+ {"Negative origin 2x",
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(-1, -1, 2, 2)},
+
+ {"Negative origin 0.5x",
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(-1, -1, 2, 2)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsFullyContainedRects) {
+ const TestData tests[] = {
+ {"0 origin top left",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 1, 1),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 1, 1)},
+
+ {"0 origin bottom right",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(1, 1, 1, 1),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(1, 1, 1, 1)},
+
+ {"Negative origin bottom left",
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ DesktopRect::MakeXYWH(-1, 0, 1, 1),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(-1, 0, 1, 1)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsFullyContainedRectsScaled) {
+ const TestData tests[] = {
+ {"0 origin top left 2x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 1, 1),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(0, 0, 1, 1)},
+
+ {"0 origin top left 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 1, 1),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(0, 0, 1, 1)},
+
+ {"0 origin bottom left 2x",
+ DesktopRect::MakeXYWH(0, 0, 4, 4),
+ DesktopRect::MakeXYWH(1, 1, 2, 2),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(2, 2, 2, 2)},
+
+ {"0 origin bottom middle 2x/1x",
+ DesktopRect::MakeXYWH(0, 0, 4, 3),
+ DesktopRect::MakeXYWH(1, 1, 2, 2),
+ 2.0, 1.0,
+ DesktopRect::MakeXYWH(2, 1, 2, 2)},
+
+ {"0 origin middle 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 3, 3),
+ DesktopRect::MakeXYWH(2, 2, 1, 1),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(1, 1, 1, 1)},
+
+ {"Negative origin bottom left 2x",
+ DesktopRect::MakeXYWH(-1, -1, 3, 3),
+ DesktopRect::MakeXYWH(-1, 0, 1, 1),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(-1, 1, 1, 1)},
+
+ {"Negative origin near middle 0.5x",
+ DesktopRect::MakeXYWH(-2, -2, 2, 2),
+ DesktopRect::MakeXYWH(0, 0, 1, 1),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(-1, -1, 1, 1)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsPartiallyContainedRects) {
+ const TestData tests[] = {
+ {"Top left",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(-1, -1, 2, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 1, 1)},
+
+ {"Top right",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(1, -1, 2, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(1, 0, 1, 1)},
+
+ {"Bottom right",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(1, 1, 2, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(1, 1, 1, 1)},
+
+ {"Bottom left",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(-1, 1, 2, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 1, 1, 1)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsPartiallyContainedRectsScaled) {
+ const TestData tests[] = {
+ {"Top left 2x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(-1, -1, 3, 3),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(0, 0, 1, 1)},
+
+ {"Top right 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(2, -2, 2, 2),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(1, 0, 1, 1)},
+
+ {"Bottom right 2x",
+ DesktopRect::MakeXYWH(0, 0, 3, 3),
+ DesktopRect::MakeXYWH(-1, 1, 3, 3),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(0, 2, 1, 1)},
+
+ {"Bottom left 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(-2, 2, 2, 2),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(0, 1, 1, 1)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsUncontainedRects) {
+ const TestData tests[] = {
+ {"Left",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(-1, 0, 1, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)},
+
+ {"Top",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, -1, 2, 1),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)},
+
+ {"Right",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(2, 0, 1, 2),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)},
+
+
+ {"Bottom",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 2, 2, 1),
+ 1.0, 1.0,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+TEST(DesktopFrameTest, CopyIntersectingPixelsUncontainedRectsScaled) {
+ const TestData tests[] = {
+ {"Left 2x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(-1, 0, 2, 2),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)},
+
+ {"Top 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, -2, 2, 1),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)},
+
+ {"Right 2x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(1, 0, 1, 2),
+ 2.0, 2.0,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)},
+
+
+ {"Bottom 0.5x",
+ DesktopRect::MakeXYWH(0, 0, 2, 2),
+ DesktopRect::MakeXYWH(0, 4, 2, 1),
+ 0.5, 0.5,
+ DesktopRect::MakeXYWH(0, 0, 0, 0)}
+ };
+
+ RunTests(tests, arraysize(tests));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.cc
new file mode 100644
index 0000000000..262ebbdec0
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_frame_win.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+DesktopFrameWin::DesktopFrameWin(DesktopSize size,
+ int stride,
+ uint8_t* data,
+ std::unique_ptr<SharedMemory> shared_memory,
+ HBITMAP bitmap)
+ : DesktopFrame(size, stride, data, shared_memory.get()),
+ bitmap_(bitmap),
+ owned_shared_memory_(std::move(shared_memory)) {}
+
+DesktopFrameWin::~DesktopFrameWin() {
+ DeleteObject(bitmap_);
+}
+
+// static
+std::unique_ptr<DesktopFrameWin> DesktopFrameWin::Create(
+ DesktopSize size,
+ SharedMemoryFactory* shared_memory_factory,
+ HDC hdc) {
+ int bytes_per_row = size.width() * kBytesPerPixel;
+ int buffer_size = bytes_per_row * size.height();
+
+ // Describe a device independent bitmap (DIB) that is the size of the desktop.
+ BITMAPINFO bmi = {};
+ bmi.bmiHeader.biHeight = -size.height();
+ bmi.bmiHeader.biWidth = size.width();
+ bmi.bmiHeader.biPlanes = 1;
+ bmi.bmiHeader.biBitCount = DesktopFrameWin::kBytesPerPixel * 8;
+ bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader);
+ bmi.bmiHeader.biSizeImage = bytes_per_row * size.height();
+
+ std::unique_ptr<SharedMemory> shared_memory;
+ HANDLE section_handle = nullptr;
+ if (shared_memory_factory) {
+ shared_memory = shared_memory_factory->CreateSharedMemory(buffer_size);
+ if (!shared_memory) {
+ RTC_LOG(LS_WARNING) << "Failed to allocate shared memory";
+ return nullptr;
+ }
+ section_handle = shared_memory->handle();
+ }
+ void* data = nullptr;
+ HBITMAP bitmap =
+ CreateDIBSection(hdc, &bmi, DIB_RGB_COLORS, &data, section_handle, 0);
+ if (!bitmap) {
+ RTC_LOG(LS_WARNING) << "Failed to allocate new window frame "
+ << GetLastError();
+ return nullptr;
+ }
+
+ return std::unique_ptr<DesktopFrameWin>(
+ new DesktopFrameWin(size, bytes_per_row, reinterpret_cast<uint8_t*>(data),
+ std::move(shared_memory), bitmap));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.h b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.h
new file mode 100644
index 0000000000..f8faad6777
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_frame_win.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_WIN_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_WIN_H_
+
+#include <windows.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+
+namespace webrtc {
+
+// DesktopFrame implementation used by screen and window captures on Windows.
+// Frame data is stored in a GDI bitmap.
+class DesktopFrameWin : public DesktopFrame {
+ public:
+ ~DesktopFrameWin() override;
+
+ DesktopFrameWin(const DesktopFrameWin&) = delete;
+ DesktopFrameWin& operator=(const DesktopFrameWin&) = delete;
+
+ static std::unique_ptr<DesktopFrameWin>
+ Create(DesktopSize size, SharedMemoryFactory* shared_memory_factory, HDC hdc);
+
+ HBITMAP bitmap() { return bitmap_; }
+
+ private:
+ DesktopFrameWin(DesktopSize size,
+ int stride,
+ uint8_t* data,
+ std::unique_ptr<SharedMemory> shared_memory,
+ HBITMAP bitmap);
+
+ HBITMAP bitmap_;
+ std::unique_ptr<SharedMemory> owned_shared_memory_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_FRAME_WIN_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.cc
new file mode 100644
index 0000000000..e0a5d7af83
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_geometry.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace webrtc {
+
+bool DesktopRect::Contains(const DesktopVector& point) const {
+ return point.x() >= left() && point.x() < right() && point.y() >= top() &&
+ point.y() < bottom();
+}
+
+bool DesktopRect::ContainsRect(const DesktopRect& rect) const {
+ return rect.left() >= left() && rect.right() <= right() &&
+ rect.top() >= top() && rect.bottom() <= bottom();
+}
+
+void DesktopRect::IntersectWith(const DesktopRect& rect) {
+ left_ = std::max(left(), rect.left());
+ top_ = std::max(top(), rect.top());
+ right_ = std::min(right(), rect.right());
+ bottom_ = std::min(bottom(), rect.bottom());
+ if (is_empty()) {
+ left_ = 0;
+ top_ = 0;
+ right_ = 0;
+ bottom_ = 0;
+ }
+}
+
+void DesktopRect::UnionWith(const DesktopRect& rect) {
+ if (is_empty()) {
+ *this = rect;
+ return;
+ }
+
+ if (rect.is_empty()) {
+ return;
+ }
+
+ left_ = std::min(left(), rect.left());
+ top_ = std::min(top(), rect.top());
+ right_ = std::max(right(), rect.right());
+ bottom_ = std::max(bottom(), rect.bottom());
+}
+
+void DesktopRect::Translate(int32_t dx, int32_t dy) {
+ left_ += dx;
+ top_ += dy;
+ right_ += dx;
+ bottom_ += dy;
+}
+
+void DesktopRect::Extend(int32_t left_offset,
+ int32_t top_offset,
+ int32_t right_offset,
+ int32_t bottom_offset) {
+ left_ -= left_offset;
+ top_ -= top_offset;
+ right_ += right_offset;
+ bottom_ += bottom_offset;
+}
+
+void DesktopRect::Scale(double horizontal, double vertical) {
+ right_ += static_cast<int>(std::round(width() * (horizontal - 1)));
+ bottom_ += static_cast<int>(std::round(height() * (vertical - 1)));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.h b/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.h
new file mode 100644
index 0000000000..691455df57
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_GEOMETRY_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_GEOMETRY_H_
+
+#include <stdint.h>
+
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// A vector in the 2D integer space. E.g. can be used to represent screen DPI.
+class DesktopVector {
+ public:
+ DesktopVector() : x_(0), y_(0) {}
+ DesktopVector(int32_t x, int32_t y) : x_(x), y_(y) {}
+
+ int32_t x() const { return x_; }
+ int32_t y() const { return y_; }
+ bool is_zero() const { return x_ == 0 && y_ == 0; }
+
+ bool equals(const DesktopVector& other) const {
+ return x_ == other.x_ && y_ == other.y_;
+ }
+
+ void set(int32_t x, int32_t y) {
+ x_ = x;
+ y_ = y;
+ }
+
+ DesktopVector add(const DesktopVector& other) const {
+ return DesktopVector(x() + other.x(), y() + other.y());
+ }
+ DesktopVector subtract(const DesktopVector& other) const {
+ return DesktopVector(x() - other.x(), y() - other.y());
+ }
+
+ DesktopVector operator-() const { return DesktopVector(-x_, -y_); }
+
+ private:
+ int32_t x_;
+ int32_t y_;
+};
+
+// Type used to represent screen/window size.
+class DesktopSize {
+ public:
+ DesktopSize() : width_(0), height_(0) {}
+ DesktopSize(int32_t width, int32_t height) : width_(width), height_(height) {}
+
+ int32_t width() const { return width_; }
+ int32_t height() const { return height_; }
+
+ bool is_empty() const { return width_ <= 0 || height_ <= 0; }
+
+ bool equals(const DesktopSize& other) const {
+ return width_ == other.width_ && height_ == other.height_;
+ }
+
+ void set(int32_t width, int32_t height) {
+ width_ = width;
+ height_ = height;
+ }
+
+ private:
+ int32_t width_;
+ int32_t height_;
+};
+
+// Represents a rectangle on the screen.
+class RTC_EXPORT DesktopRect {
+ public:
+ static DesktopRect MakeSize(const DesktopSize& size) {
+ return DesktopRect(0, 0, size.width(), size.height());
+ }
+ static DesktopRect MakeWH(int32_t width, int32_t height) {
+ return DesktopRect(0, 0, width, height);
+ }
+ static DesktopRect MakeXYWH(int32_t x,
+ int32_t y,
+ int32_t width,
+ int32_t height) {
+ return DesktopRect(x, y, x + width, y + height);
+ }
+ static DesktopRect MakeLTRB(int32_t left,
+ int32_t top,
+ int32_t right,
+ int32_t bottom) {
+ return DesktopRect(left, top, right, bottom);
+ }
+ static DesktopRect MakeOriginSize(const DesktopVector& origin,
+ const DesktopSize& size) {
+ return MakeXYWH(origin.x(), origin.y(), size.width(), size.height());
+ }
+
+ DesktopRect() : left_(0), top_(0), right_(0), bottom_(0) {}
+
+ int32_t left() const { return left_; }
+ int32_t top() const { return top_; }
+ int32_t right() const { return right_; }
+ int32_t bottom() const { return bottom_; }
+ int32_t width() const { return right_ - left_; }
+ int32_t height() const { return bottom_ - top_; }
+
+ void set_width(int32_t width) { right_ = left_ + width; }
+ void set_height(int32_t height) { bottom_ = top_ + height; }
+
+ DesktopVector top_left() const { return DesktopVector(left_, top_); }
+ DesktopSize size() const { return DesktopSize(width(), height()); }
+
+ bool is_empty() const { return left_ >= right_ || top_ >= bottom_; }
+
+ bool equals(const DesktopRect& other) const {
+ return left_ == other.left_ && top_ == other.top_ &&
+ right_ == other.right_ && bottom_ == other.bottom_;
+ }
+
+ // Returns true if `point` lies within the rectangle boundaries.
+ bool Contains(const DesktopVector& point) const;
+
+ // Returns true if `rect` lies within the boundaries of this rectangle.
+ bool ContainsRect(const DesktopRect& rect) const;
+
+ // Finds intersection with `rect`.
+ void IntersectWith(const DesktopRect& rect);
+
+ // Extends the rectangle to cover `rect`. If `this` is empty, replaces `this`
+ // with `rect`; if `rect` is empty, this function takes no effect.
+ void UnionWith(const DesktopRect& rect);
+
+ // Adds (dx, dy) to the position of the rectangle.
+ void Translate(int32_t dx, int32_t dy);
+ void Translate(DesktopVector d) { Translate(d.x(), d.y()); }
+
+ // Enlarges current DesktopRect by subtracting `left_offset` and `top_offset`
+ // from `left_` and `top_`, and adding `right_offset` and `bottom_offset` to
+ // `right_` and `bottom_`. This function does not normalize the result, so
+ // `left_` and `top_` may be less than zero or larger than `right_` and
+ // `bottom_`.
+ void Extend(int32_t left_offset,
+ int32_t top_offset,
+ int32_t right_offset,
+ int32_t bottom_offset);
+
+ // Scales current DesktopRect. This function does not impact the `top_` and
+ // `left_`.
+ void Scale(double horizontal, double vertical);
+
+ private:
+ DesktopRect(int32_t left, int32_t top, int32_t right, int32_t bottom)
+ : left_(left), top_(top), right_(right), bottom_(bottom) {}
+
+ int32_t left_;
+ int32_t top_;
+ int32_t right_;
+ int32_t bottom_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_GEOMETRY_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_geometry_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_geometry_unittest.cc
new file mode 100644
index 0000000000..f4a07fa46b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_geometry_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_geometry.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(DesktopRectTest, UnionBetweenTwoNonEmptyRects) {
+ DesktopRect rect = DesktopRect::MakeLTRB(1, 1, 2, 2);
+ rect.UnionWith(DesktopRect::MakeLTRB(-2, -2, -1, -1));
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeLTRB(-2, -2, 2, 2)));
+}
+
+TEST(DesktopRectTest, UnionWithEmptyRect) {
+ DesktopRect rect = DesktopRect::MakeWH(1, 1);
+ rect.UnionWith(DesktopRect());
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeWH(1, 1)));
+
+ rect = DesktopRect::MakeXYWH(1, 1, 2, 2);
+ rect.UnionWith(DesktopRect());
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeXYWH(1, 1, 2, 2)));
+
+ rect = DesktopRect::MakeXYWH(1, 1, 2, 2);
+ rect.UnionWith(DesktopRect::MakeXYWH(3, 3, 0, 0));
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeXYWH(1, 1, 2, 2)));
+}
+
+TEST(DesktopRectTest, EmptyRectUnionWithNonEmptyOne) {
+ DesktopRect rect;
+ rect.UnionWith(DesktopRect::MakeWH(1, 1));
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeWH(1, 1)));
+
+ rect = DesktopRect();
+ rect.UnionWith(DesktopRect::MakeXYWH(1, 1, 2, 2));
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeXYWH(1, 1, 2, 2)));
+
+ rect = DesktopRect::MakeXYWH(3, 3, 0, 0);
+ rect.UnionWith(DesktopRect::MakeXYWH(1, 1, 2, 2));
+ ASSERT_TRUE(rect.equals(DesktopRect::MakeXYWH(1, 1, 2, 2)));
+}
+
+TEST(DesktopRectTest, EmptyRectUnionWithEmptyOne) {
+ DesktopRect rect;
+ rect.UnionWith(DesktopRect());
+ ASSERT_TRUE(rect.is_empty());
+
+ rect = DesktopRect::MakeXYWH(1, 1, 0, 0);
+ rect.UnionWith(DesktopRect());
+ ASSERT_TRUE(rect.is_empty());
+
+ rect = DesktopRect();
+ rect.UnionWith(DesktopRect::MakeXYWH(1, 1, 0, 0));
+ ASSERT_TRUE(rect.is_empty());
+
+ rect = DesktopRect::MakeXYWH(1, 1, 0, 0);
+ rect.UnionWith(DesktopRect::MakeXYWH(-1, -1, 0, 0));
+ ASSERT_TRUE(rect.is_empty());
+}
+
+TEST(DesktopRectTest, Scale) {
+ DesktopRect rect = DesktopRect::MakeXYWH(100, 100, 100, 100);
+ rect.Scale(1.1, 1.1);
+ ASSERT_EQ(rect.top(), 100);
+ ASSERT_EQ(rect.left(), 100);
+ ASSERT_EQ(rect.width(), 110);
+ ASSERT_EQ(rect.height(), 110);
+
+ rect = DesktopRect::MakeXYWH(100, 100, 100, 100);
+ rect.Scale(0.01, 0.01);
+ ASSERT_EQ(rect.top(), 100);
+ ASSERT_EQ(rect.left(), 100);
+ ASSERT_EQ(rect.width(), 1);
+ ASSERT_EQ(rect.height(), 1);
+
+ rect = DesktopRect::MakeXYWH(100, 100, 100, 100);
+ rect.Scale(1.1, 0.9);
+ ASSERT_EQ(rect.top(), 100);
+ ASSERT_EQ(rect.left(), 100);
+ ASSERT_EQ(rect.width(), 110);
+ ASSERT_EQ(rect.height(), 90);
+
+ rect = DesktopRect::MakeXYWH(0, 0, 100, 100);
+ rect.Scale(1.1, 1.1);
+ ASSERT_EQ(rect.top(), 0);
+ ASSERT_EQ(rect.left(), 0);
+ ASSERT_EQ(rect.width(), 110);
+ ASSERT_EQ(rect.height(), 110);
+
+ rect = DesktopRect::MakeXYWH(0, 100, 100, 100);
+ rect.Scale(1.1, 1.1);
+ ASSERT_EQ(rect.top(), 100);
+ ASSERT_EQ(rect.left(), 0);
+ ASSERT_EQ(rect.width(), 110);
+ ASSERT_EQ(rect.height(), 110);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_region.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_region.cc
new file mode 100644
index 0000000000..2c87c11eb3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_region.cc
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_region.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+DesktopRegion::RowSpan::RowSpan(int32_t left, int32_t right)
+ : left(left), right(right) {}
+
+DesktopRegion::Row::Row(const Row&) = default;
+DesktopRegion::Row::Row(Row&&) = default;
+
+DesktopRegion::Row::Row(int32_t top, int32_t bottom)
+ : top(top), bottom(bottom) {}
+
+DesktopRegion::Row::~Row() {}
+
+DesktopRegion::DesktopRegion() {}
+
+DesktopRegion::DesktopRegion(const DesktopRect& rect) {
+ AddRect(rect);
+}
+
+DesktopRegion::DesktopRegion(const DesktopRect* rects, int count) {
+ AddRects(rects, count);
+}
+
+DesktopRegion::DesktopRegion(const DesktopRegion& other) {
+ *this = other;
+}
+
+DesktopRegion::~DesktopRegion() {
+ Clear();
+}
+
+DesktopRegion& DesktopRegion::operator=(const DesktopRegion& other) {
+ Clear();
+ rows_ = other.rows_;
+ for (Rows::iterator it = rows_.begin(); it != rows_.end(); ++it) {
+ // Copy each row.
+ Row* row = it->second;
+ it->second = new Row(*row);
+ }
+ return *this;
+}
+
+bool DesktopRegion::Equals(const DesktopRegion& region) const {
+ // Iterate over rows of the tow regions and compare each row.
+ Rows::const_iterator it1 = rows_.begin();
+ Rows::const_iterator it2 = region.rows_.begin();
+ while (it1 != rows_.end()) {
+ if (it2 == region.rows_.end() || it1->first != it2->first ||
+ it1->second->top != it2->second->top ||
+ it1->second->bottom != it2->second->bottom ||
+ it1->second->spans != it2->second->spans) {
+ return false;
+ }
+ ++it1;
+ ++it2;
+ }
+ return it2 == region.rows_.end();
+}
+
+void DesktopRegion::Clear() {
+ for (Rows::iterator row = rows_.begin(); row != rows_.end(); ++row) {
+ delete row->second;
+ }
+ rows_.clear();
+}
+
+void DesktopRegion::SetRect(const DesktopRect& rect) {
+ Clear();
+ AddRect(rect);
+}
+
+void DesktopRegion::AddRect(const DesktopRect& rect) {
+ if (rect.is_empty())
+ return;
+
+ // Top of the part of the `rect` that hasn't been inserted yet. Increased as
+ // we iterate over the rows until it reaches `rect.bottom()`.
+ int top = rect.top();
+
+ // Iterate over all rows that may intersect with `rect` and add new rows when
+ // necessary.
+ Rows::iterator row = rows_.upper_bound(top);
+ while (top < rect.bottom()) {
+ if (row == rows_.end() || top < row->second->top) {
+ // If `top` is above the top of the current `row` then add a new row above
+ // the current one.
+ int32_t bottom = rect.bottom();
+ if (row != rows_.end() && row->second->top < bottom)
+ bottom = row->second->top;
+ row = rows_.insert(row, Rows::value_type(bottom, new Row(top, bottom)));
+ } else if (top > row->second->top) {
+ // If the `top` falls in the middle of the `row` then split `row` into
+ // two, at `top`, and leave `row` referring to the lower of the two,
+ // ready to insert a new span into.
+ RTC_DCHECK_LE(top, row->second->bottom);
+ Rows::iterator new_row = rows_.insert(
+ row, Rows::value_type(top, new Row(row->second->top, top)));
+ row->second->top = top;
+ new_row->second->spans = row->second->spans;
+ }
+
+ if (rect.bottom() < row->second->bottom) {
+ // If the bottom of the `rect` falls in the middle of the `row` split
+ // `row` into two, at `top`, and leave `row` referring to the upper of
+ // the two, ready to insert a new span into.
+ Rows::iterator new_row = rows_.insert(
+ row, Rows::value_type(rect.bottom(), new Row(top, rect.bottom())));
+ row->second->top = rect.bottom();
+ new_row->second->spans = row->second->spans;
+ row = new_row;
+ }
+
+ // Add a new span to the current row.
+ AddSpanToRow(row->second, rect.left(), rect.right());
+ top = row->second->bottom;
+
+ MergeWithPrecedingRow(row);
+
+ // Move to the next row.
+ ++row;
+ }
+
+ if (row != rows_.end())
+ MergeWithPrecedingRow(row);
+}
+
+void DesktopRegion::AddRects(const DesktopRect* rects, int count) {
+ for (int i = 0; i < count; ++i) {
+ AddRect(rects[i]);
+ }
+}
+
+void DesktopRegion::MergeWithPrecedingRow(Rows::iterator row) {
+ RTC_DCHECK(row != rows_.end());
+
+ if (row != rows_.begin()) {
+ Rows::iterator previous_row = row;
+ previous_row--;
+
+ // If `row` and `previous_row` are next to each other and contain the same
+ // set of spans then they can be merged.
+ if (previous_row->second->bottom == row->second->top &&
+ previous_row->second->spans == row->second->spans) {
+ row->second->top = previous_row->second->top;
+ delete previous_row->second;
+ rows_.erase(previous_row);
+ }
+ }
+}
+
+void DesktopRegion::AddRegion(const DesktopRegion& region) {
+ // TODO(sergeyu): This function is not optimized - potentially it can iterate
+ // over rows of the two regions similar to how it works in Intersect().
+ for (Iterator it(region); !it.IsAtEnd(); it.Advance()) {
+ AddRect(it.rect());
+ }
+}
+
+void DesktopRegion::Intersect(const DesktopRegion& region1,
+ const DesktopRegion& region2) {
+ Clear();
+
+ Rows::const_iterator it1 = region1.rows_.begin();
+ Rows::const_iterator end1 = region1.rows_.end();
+ Rows::const_iterator it2 = region2.rows_.begin();
+ Rows::const_iterator end2 = region2.rows_.end();
+ if (it1 == end1 || it2 == end2)
+ return;
+
+ while (it1 != end1 && it2 != end2) {
+ // Arrange for `it1` to always be the top-most of the rows.
+ if (it2->second->top < it1->second->top) {
+ std::swap(it1, it2);
+ std::swap(end1, end2);
+ }
+
+ // Skip `it1` if it doesn't intersect `it2` at all.
+ if (it1->second->bottom <= it2->second->top) {
+ ++it1;
+ continue;
+ }
+
+ // Top of the `it1` row is above the top of `it2`, so top of the
+ // intersection is always the top of `it2`.
+ int32_t top = it2->second->top;
+ int32_t bottom = std::min(it1->second->bottom, it2->second->bottom);
+
+ Rows::iterator new_row = rows_.insert(
+ rows_.end(), Rows::value_type(bottom, new Row(top, bottom)));
+ IntersectRows(it1->second->spans, it2->second->spans,
+ &new_row->second->spans);
+ if (new_row->second->spans.empty()) {
+ delete new_row->second;
+ rows_.erase(new_row);
+ } else {
+ MergeWithPrecedingRow(new_row);
+ }
+
+ // If `it1` was completely consumed, move to the next one.
+ if (it1->second->bottom == bottom)
+ ++it1;
+ // If `it2` was completely consumed, move to the next one.
+ if (it2->second->bottom == bottom)
+ ++it2;
+ }
+}
+
+// static
+void DesktopRegion::IntersectRows(const RowSpanSet& set1,
+ const RowSpanSet& set2,
+ RowSpanSet* output) {
+ RowSpanSet::const_iterator it1 = set1.begin();
+ RowSpanSet::const_iterator end1 = set1.end();
+ RowSpanSet::const_iterator it2 = set2.begin();
+ RowSpanSet::const_iterator end2 = set2.end();
+ RTC_DCHECK(it1 != end1 && it2 != end2);
+
+ do {
+ // Arrange for `it1` to always be the left-most of the spans.
+ if (it2->left < it1->left) {
+ std::swap(it1, it2);
+ std::swap(end1, end2);
+ }
+
+ // Skip `it1` if it doesn't intersect `it2` at all.
+ if (it1->right <= it2->left) {
+ ++it1;
+ continue;
+ }
+
+ int32_t left = it2->left;
+ int32_t right = std::min(it1->right, it2->right);
+ RTC_DCHECK_LT(left, right);
+
+ output->push_back(RowSpan(left, right));
+
+ // If `it1` was completely consumed, move to the next one.
+ if (it1->right == right)
+ ++it1;
+ // If `it2` was completely consumed, move to the next one.
+ if (it2->right == right)
+ ++it2;
+ } while (it1 != end1 && it2 != end2);
+}
+
+void DesktopRegion::IntersectWith(const DesktopRegion& region) {
+ DesktopRegion old_region;
+ Swap(&old_region);
+ Intersect(old_region, region);
+}
+
+void DesktopRegion::IntersectWith(const DesktopRect& rect) {
+ DesktopRegion region;
+ region.AddRect(rect);
+ IntersectWith(region);
+}
+
+void DesktopRegion::Subtract(const DesktopRegion& region) {
+ if (region.rows_.empty())
+ return;
+
+ // `row_b` refers to the current row being subtracted.
+ Rows::const_iterator row_b = region.rows_.begin();
+
+ // Current vertical position at which subtraction is happening.
+ int top = row_b->second->top;
+
+ // `row_a` refers to the current row we are subtracting from. Skip all rows
+ // above `top`.
+ Rows::iterator row_a = rows_.upper_bound(top);
+
+ // Step through rows of the both regions subtracting content of `row_b` from
+ // `row_a`.
+ while (row_a != rows_.end() && row_b != region.rows_.end()) {
+ // Skip `row_a` if it doesn't intersect with the `row_b`.
+ if (row_a->second->bottom <= top) {
+ // Each output row is merged with previously-processed rows before further
+ // rows are processed.
+ MergeWithPrecedingRow(row_a);
+ ++row_a;
+ continue;
+ }
+
+ if (top > row_a->second->top) {
+ // If `top` falls in the middle of `row_a` then split `row_a` into two, at
+ // `top`, and leave `row_a` referring to the lower of the two, ready to
+ // subtract spans from.
+ RTC_DCHECK_LE(top, row_a->second->bottom);
+ Rows::iterator new_row = rows_.insert(
+ row_a, Rows::value_type(top, new Row(row_a->second->top, top)));
+ row_a->second->top = top;
+ new_row->second->spans = row_a->second->spans;
+ } else if (top < row_a->second->top) {
+ // If the `top` is above `row_a` then skip the range between `top` and
+ // top of `row_a` because it's empty.
+ top = row_a->second->top;
+ if (top >= row_b->second->bottom) {
+ ++row_b;
+ if (row_b != region.rows_.end())
+ top = row_b->second->top;
+ continue;
+ }
+ }
+
+ if (row_b->second->bottom < row_a->second->bottom) {
+ // If the bottom of `row_b` falls in the middle of the `row_a` split
+ // `row_a` into two, at `top`, and leave `row_a` referring to the upper of
+ // the two, ready to subtract spans from.
+ int bottom = row_b->second->bottom;
+ Rows::iterator new_row =
+ rows_.insert(row_a, Rows::value_type(bottom, new Row(top, bottom)));
+ row_a->second->top = bottom;
+ new_row->second->spans = row_a->second->spans;
+ row_a = new_row;
+ }
+
+ // At this point the vertical range covered by `row_a` lays within the
+ // range covered by `row_b`. Subtract `row_b` spans from `row_a`.
+ RowSpanSet new_spans;
+ SubtractRows(row_a->second->spans, row_b->second->spans, &new_spans);
+ new_spans.swap(row_a->second->spans);
+ top = row_a->second->bottom;
+
+ if (top >= row_b->second->bottom) {
+ ++row_b;
+ if (row_b != region.rows_.end())
+ top = row_b->second->top;
+ }
+
+ // Check if the row is empty after subtraction and delete it. Otherwise move
+ // to the next one.
+ if (row_a->second->spans.empty()) {
+ Rows::iterator row_to_delete = row_a;
+ ++row_a;
+ delete row_to_delete->second;
+ rows_.erase(row_to_delete);
+ } else {
+ MergeWithPrecedingRow(row_a);
+ ++row_a;
+ }
+ }
+
+ if (row_a != rows_.end())
+ MergeWithPrecedingRow(row_a);
+}
+
+void DesktopRegion::Subtract(const DesktopRect& rect) {
+ DesktopRegion region;
+ region.AddRect(rect);
+ Subtract(region);
+}
+
+void DesktopRegion::Translate(int32_t dx, int32_t dy) {
+ Rows new_rows;
+
+ for (Rows::iterator it = rows_.begin(); it != rows_.end(); ++it) {
+ Row* row = it->second;
+
+ row->top += dy;
+ row->bottom += dy;
+
+ if (dx != 0) {
+ // Translate each span.
+ for (RowSpanSet::iterator span = row->spans.begin();
+ span != row->spans.end(); ++span) {
+ span->left += dx;
+ span->right += dx;
+ }
+ }
+
+ if (dy != 0)
+ new_rows.insert(new_rows.end(), Rows::value_type(row->bottom, row));
+ }
+
+ if (dy != 0)
+ new_rows.swap(rows_);
+}
+
+void DesktopRegion::Swap(DesktopRegion* region) {
+ rows_.swap(region->rows_);
+}
+
+// static
+bool DesktopRegion::CompareSpanRight(const RowSpan& r, int32_t value) {
+ return r.right < value;
+}
+
+// static
+bool DesktopRegion::CompareSpanLeft(const RowSpan& r, int32_t value) {
+ return r.left < value;
+}
+
+// static
+void DesktopRegion::AddSpanToRow(Row* row, int left, int right) {
+ // First check if the new span is located to the right of all existing spans.
+ // This is an optimization to avoid binary search in the case when rectangles
+ // are inserted sequentially from left to right.
+ if (row->spans.empty() || left > row->spans.back().right) {
+ row->spans.push_back(RowSpan(left, right));
+ return;
+ }
+
+ // Find the first span that ends at or after `left`.
+ RowSpanSet::iterator start = std::lower_bound(
+ row->spans.begin(), row->spans.end(), left, CompareSpanRight);
+ RTC_DCHECK(start < row->spans.end());
+
+ // Find the first span that starts after `right`.
+ RowSpanSet::iterator end =
+ std::lower_bound(start, row->spans.end(), right + 1, CompareSpanLeft);
+ if (end == row->spans.begin()) {
+ // There are no overlaps. Just insert the new span at the beginning.
+ row->spans.insert(row->spans.begin(), RowSpan(left, right));
+ return;
+ }
+
+ // Move end to the left, so that it points the last span that ends at or
+ // before `right`.
+ end--;
+
+ // At this point [start, end] is the range of spans that intersect with the
+ // new one.
+ if (end < start) {
+ // There are no overlaps. Just insert the new span at the correct position.
+ row->spans.insert(start, RowSpan(left, right));
+ return;
+ }
+
+ left = std::min(left, start->left);
+ right = std::max(right, end->right);
+
+ // Replace range [start, end] with the new span.
+ *start = RowSpan(left, right);
+ ++start;
+ ++end;
+ if (start < end)
+ row->spans.erase(start, end);
+}
+
+// static
+bool DesktopRegion::IsSpanInRow(const Row& row, const RowSpan& span) {
+ // Find the first span that starts at or after `span.left` and then check if
+ // it's the same span.
+ RowSpanSet::const_iterator it = std::lower_bound(
+ row.spans.begin(), row.spans.end(), span.left, CompareSpanLeft);
+ return it != row.spans.end() && *it == span;
+}
+
+// static
+void DesktopRegion::SubtractRows(const RowSpanSet& set_a,
+ const RowSpanSet& set_b,
+ RowSpanSet* output) {
+ RTC_DCHECK(!set_a.empty() && !set_b.empty());
+
+ RowSpanSet::const_iterator it_b = set_b.begin();
+
+ // Iterate over all spans in `set_a` adding parts of it that do not intersect
+ // with `set_b` to the `output`.
+ for (RowSpanSet::const_iterator it_a = set_a.begin(); it_a != set_a.end();
+ ++it_a) {
+ // If there is no intersection then append the current span and continue.
+ if (it_b == set_b.end() || it_a->right < it_b->left) {
+ output->push_back(*it_a);
+ continue;
+ }
+
+ // Iterate over `set_b` spans that may intersect with `it_a`.
+ int pos = it_a->left;
+ while (it_b != set_b.end() && it_b->left < it_a->right) {
+ if (it_b->left > pos)
+ output->push_back(RowSpan(pos, it_b->left));
+ if (it_b->right > pos) {
+ pos = it_b->right;
+ if (pos >= it_a->right)
+ break;
+ }
+ ++it_b;
+ }
+ if (pos < it_a->right)
+ output->push_back(RowSpan(pos, it_a->right));
+ }
+}
+
+DesktopRegion::Iterator::Iterator(const DesktopRegion& region)
+ : region_(region),
+ row_(region.rows_.begin()),
+ previous_row_(region.rows_.end()) {
+ if (!IsAtEnd()) {
+ RTC_DCHECK_GT(row_->second->spans.size(), 0);
+ row_span_ = row_->second->spans.begin();
+ UpdateCurrentRect();
+ }
+}
+
+DesktopRegion::Iterator::~Iterator() {}
+
+bool DesktopRegion::Iterator::IsAtEnd() const {
+ return row_ == region_.rows_.end();
+}
+
+void DesktopRegion::Iterator::Advance() {
+ RTC_DCHECK(!IsAtEnd());
+
+ while (true) {
+ ++row_span_;
+ if (row_span_ == row_->second->spans.end()) {
+ previous_row_ = row_;
+ ++row_;
+ if (row_ != region_.rows_.end()) {
+ RTC_DCHECK_GT(row_->second->spans.size(), 0);
+ row_span_ = row_->second->spans.begin();
+ }
+ }
+
+ if (IsAtEnd())
+ return;
+
+ // If the same span exists on the previous row then skip it, as we've
+ // already returned this span merged into the previous one, via
+ // UpdateCurrentRect().
+ if (previous_row_ != region_.rows_.end() &&
+ previous_row_->second->bottom == row_->second->top &&
+ IsSpanInRow(*previous_row_->second, *row_span_)) {
+ continue;
+ }
+
+ break;
+ }
+
+ RTC_DCHECK(!IsAtEnd());
+ UpdateCurrentRect();
+}
+
+void DesktopRegion::Iterator::UpdateCurrentRect() {
+ // Merge the current rectangle with the matching spans from later rows.
+ int bottom;
+ Rows::const_iterator bottom_row = row_;
+ Rows::const_iterator previous;
+ do {
+ bottom = bottom_row->second->bottom;
+ previous = bottom_row;
+ ++bottom_row;
+ } while (bottom_row != region_.rows_.end() &&
+ previous->second->bottom == bottom_row->second->top &&
+ IsSpanInRow(*bottom_row->second, *row_span_));
+ rect_ = DesktopRect::MakeLTRB(row_span_->left, row_->second->top,
+ row_span_->right, bottom);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_region.h b/third_party/libwebrtc/modules/desktop_capture/desktop_region.h
new file mode 100644
index 0000000000..ae9d8a0ba9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_region.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_REGION_H_
+#define MODULES_DESKTOP_CAPTURE_DESKTOP_REGION_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// DesktopRegion represents a region of the screen or window.
+//
+// Internally each region is stored as a set of rows where each row contains one
+// or more rectangles aligned vertically.
+class RTC_EXPORT DesktopRegion {
+ private:
+ // The following private types need to be declared first because they are used
+ // in the public Iterator.
+
+ // RowSpan represents a horizontal span withing a single row.
+ struct RowSpan {
+ RowSpan(int32_t left, int32_t right);
+
+ // Used by std::vector<>.
+ bool operator==(const RowSpan& that) const {
+ return left == that.left && right == that.right;
+ }
+
+ int32_t left;
+ int32_t right;
+ };
+
+ typedef std::vector<RowSpan> RowSpanSet;
+
+ // Row represents a single row of a region. A row is set of rectangles that
+ // have the same vertical position.
+ struct Row {
+ Row(const Row&);
+ Row(Row&&);
+ Row(int32_t top, int32_t bottom);
+ ~Row();
+
+ int32_t top;
+ int32_t bottom;
+
+ RowSpanSet spans;
+ };
+
+ // Type used to store list of rows in the region. The bottom position of row
+ // is used as the key so that rows are always ordered by their position. The
+ // map stores pointers to make Translate() more efficient.
+ typedef std::map<int, Row*> Rows;
+
+ public:
+ // Iterator that can be used to iterate over rectangles of a DesktopRegion.
+ // The region must not be mutated while the iterator is used.
+ class RTC_EXPORT Iterator {
+ public:
+ explicit Iterator(const DesktopRegion& target);
+ ~Iterator();
+
+ bool IsAtEnd() const;
+ void Advance();
+
+ const DesktopRect& rect() const { return rect_; }
+
+ private:
+ const DesktopRegion& region_;
+
+ // Updates `rect_` based on the current `row_` and `row_span_`. If
+ // `row_span_` matches spans on consecutive rows then they are also merged
+ // into `rect_`, to generate more efficient output.
+ void UpdateCurrentRect();
+
+ Rows::const_iterator row_;
+ Rows::const_iterator previous_row_;
+ RowSpanSet::const_iterator row_span_;
+ DesktopRect rect_;
+ };
+
+ DesktopRegion();
+ explicit DesktopRegion(const DesktopRect& rect);
+ DesktopRegion(const DesktopRect* rects, int count);
+ DesktopRegion(const DesktopRegion& other);
+ ~DesktopRegion();
+
+ DesktopRegion& operator=(const DesktopRegion& other);
+
+ bool is_empty() const { return rows_.empty(); }
+
+ bool Equals(const DesktopRegion& region) const;
+
+ // Reset the region to be empty.
+ void Clear();
+
+ // Reset region to contain just `rect`.
+ void SetRect(const DesktopRect& rect);
+
+ // Adds specified rect(s) or region to the region.
+ void AddRect(const DesktopRect& rect);
+ void AddRects(const DesktopRect* rects, int count);
+ void AddRegion(const DesktopRegion& region);
+
+ // Finds intersection of two regions and stores them in the current region.
+ void Intersect(const DesktopRegion& region1, const DesktopRegion& region2);
+
+ // Same as above but intersects content of the current region with `region`.
+ void IntersectWith(const DesktopRegion& region);
+
+ // Clips the region by the `rect`.
+ void IntersectWith(const DesktopRect& rect);
+
+ // Subtracts `region` from the current content of the region.
+ void Subtract(const DesktopRegion& region);
+
+ // Subtracts `rect` from the current content of the region.
+ void Subtract(const DesktopRect& rect);
+
+ // Adds (dx, dy) to the position of the region.
+ void Translate(int32_t dx, int32_t dy);
+
+ void Swap(DesktopRegion* region);
+
+ private:
+ // Comparison functions used for std::lower_bound(). Compare left or right
+ // edges withs a given `value`.
+ static bool CompareSpanLeft(const RowSpan& r, int32_t value);
+ static bool CompareSpanRight(const RowSpan& r, int32_t value);
+
+ // Adds a new span to the row, coalescing spans if necessary.
+ static void AddSpanToRow(Row* row, int32_t left, int32_t right);
+
+ // Returns true if the `span` exists in the given `row`.
+ static bool IsSpanInRow(const Row& row, const RowSpan& rect);
+
+ // Calculates the intersection of two sets of spans.
+ static void IntersectRows(const RowSpanSet& set1,
+ const RowSpanSet& set2,
+ RowSpanSet* output);
+
+ static void SubtractRows(const RowSpanSet& set_a,
+ const RowSpanSet& set_b,
+ RowSpanSet* output);
+
+ // Merges `row` with the row above it if they contain the same spans. Doesn't
+ // do anything if called with `row` set to rows_.begin() (i.e. first row of
+ // the region). If the rows were merged `row` remains a valid iterator to the
+ // merged row.
+ void MergeWithPrecedingRow(Rows::iterator row);
+
+ Rows rows_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_REGION_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/desktop_region_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/desktop_region_unittest.cc
new file mode 100644
index 0000000000..b8bd78e990
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/desktop_region_unittest.cc
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_region.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <cstdint>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+int RadmonInt(int max) {
+ return (rand() / 256) % max;
+}
+
+void CompareRegion(const DesktopRegion& region,
+ const DesktopRect rects[],
+ int rects_size) {
+ DesktopRegion::Iterator it(region);
+ for (int i = 0; i < rects_size; ++i) {
+ SCOPED_TRACE(i);
+ ASSERT_FALSE(it.IsAtEnd());
+ EXPECT_TRUE(it.rect().equals(rects[i]))
+ << it.rect().left() << "-" << it.rect().right() << "."
+ << it.rect().top() << "-" << it.rect().bottom() << " "
+ << rects[i].left() << "-" << rects[i].right() << "." << rects[i].top()
+ << "-" << rects[i].bottom();
+ it.Advance();
+ }
+ EXPECT_TRUE(it.IsAtEnd());
+}
+
+} // namespace
+
+// Verify that regions are empty when created.
+TEST(DesktopRegionTest, Empty) {
+ DesktopRegion r;
+ CompareRegion(r, NULL, 0);
+}
+
+// Verify that empty rectangles are ignored.
+TEST(DesktopRegionTest, AddEmpty) {
+ DesktopRegion r;
+ DesktopRect rect = DesktopRect::MakeXYWH(1, 2, 0, 0);
+ r.AddRect(rect);
+ CompareRegion(r, NULL, 0);
+}
+
+// Verify that regions with a single rectangles are handled properly.
+TEST(DesktopRegionTest, SingleRect) {
+ DesktopRegion r;
+ DesktopRect rect = DesktopRect::MakeXYWH(1, 2, 3, 4);
+ r.AddRect(rect);
+ CompareRegion(r, &rect, 1);
+}
+
+// Verify that non-overlapping rectangles are not merged.
+TEST(DesktopRegionTest, NonOverlappingRects) {
+ struct Case {
+ int count;
+ DesktopRect rects[4];
+ } cases[] = {
+ {1, {DesktopRect::MakeXYWH(10, 10, 10, 10)}},
+ {2,
+ {DesktopRect::MakeXYWH(10, 10, 10, 10),
+ DesktopRect::MakeXYWH(30, 10, 10, 15)}},
+ {2,
+ {DesktopRect::MakeXYWH(10, 10, 10, 10),
+ DesktopRect::MakeXYWH(10, 30, 10, 5)}},
+ {3,
+ {DesktopRect::MakeXYWH(10, 10, 10, 9),
+ DesktopRect::MakeXYWH(30, 10, 15, 10),
+ DesktopRect::MakeXYWH(10, 30, 8, 10)}},
+ {4,
+ {DesktopRect::MakeXYWH(0, 0, 30, 10),
+ DesktopRect::MakeXYWH(40, 0, 10, 30),
+ DesktopRect::MakeXYWH(0, 20, 10, 30),
+ DesktopRect::MakeXYWH(20, 40, 30, 10)}},
+ {4,
+ {DesktopRect::MakeXYWH(0, 0, 10, 100),
+ DesktopRect::MakeXYWH(20, 10, 30, 10),
+ DesktopRect::MakeXYWH(20, 30, 30, 10),
+ DesktopRect::MakeXYWH(20, 50, 30, 10)}},
+ };
+
+ for (size_t i = 0; i < (sizeof(cases) / sizeof(Case)); ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r;
+
+ for (int j = 0; j < cases[i].count; ++j) {
+ r.AddRect(cases[i].rects[j]);
+ }
+ CompareRegion(r, cases[i].rects, cases[i].count);
+
+ SCOPED_TRACE("Reverse");
+
+ // Try inserting rects in reverse order.
+ r.Clear();
+ for (int j = cases[i].count - 1; j >= 0; --j) {
+ r.AddRect(cases[i].rects[j]);
+ }
+ CompareRegion(r, cases[i].rects, cases[i].count);
+ }
+}
+
+TEST(DesktopRegionTest, TwoRects) {
+ struct Case {
+ DesktopRect input_rect1;
+ DesktopRect input_rect2;
+ int expected_count;
+ DesktopRect expected_rects[3];
+ } cases[] = {
+ // Touching rectangles that merge into one.
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(0, 100, 100, 200),
+ 1,
+ {DesktopRect::MakeLTRB(0, 100, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(100, 0, 200, 100),
+ 1,
+ {DesktopRect::MakeLTRB(100, 0, 200, 200)}},
+
+ // Rectangles touching on the vertical edge.
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(0, 150, 100, 250),
+ 3,
+ {DesktopRect::MakeLTRB(100, 100, 200, 150),
+ DesktopRect::MakeLTRB(0, 150, 200, 200),
+ DesktopRect::MakeLTRB(0, 200, 100, 250)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(0, 50, 100, 150),
+ 3,
+ {DesktopRect::MakeLTRB(0, 50, 100, 100),
+ DesktopRect::MakeLTRB(0, 100, 200, 150),
+ DesktopRect::MakeLTRB(100, 150, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(0, 120, 100, 180),
+ 3,
+ {DesktopRect::MakeLTRB(100, 100, 200, 120),
+ DesktopRect::MakeLTRB(0, 120, 200, 180),
+ DesktopRect::MakeLTRB(100, 180, 200, 200)}},
+
+ // Rectangles touching on the horizontal edge.
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(150, 0, 250, 100),
+ 2,
+ {DesktopRect::MakeLTRB(150, 0, 250, 100),
+ DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(50, 0, 150, 100),
+ 2,
+ {DesktopRect::MakeLTRB(50, 0, 150, 100),
+ DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(120, 0, 180, 100),
+ 2,
+ {DesktopRect::MakeLTRB(120, 0, 180, 100),
+ DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+
+ // Overlapping rectangles.
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(50, 50, 150, 150),
+ 3,
+ {DesktopRect::MakeLTRB(50, 50, 150, 100),
+ DesktopRect::MakeLTRB(50, 100, 200, 150),
+ DesktopRect::MakeLTRB(100, 150, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(150, 50, 250, 150),
+ 3,
+ {DesktopRect::MakeLTRB(150, 50, 250, 100),
+ DesktopRect::MakeLTRB(100, 100, 250, 150),
+ DesktopRect::MakeLTRB(100, 150, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(0, 120, 150, 180),
+ 3,
+ {DesktopRect::MakeLTRB(100, 100, 200, 120),
+ DesktopRect::MakeLTRB(0, 120, 200, 180),
+ DesktopRect::MakeLTRB(100, 180, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(120, 0, 180, 150),
+ 2,
+ {DesktopRect::MakeLTRB(120, 0, 180, 100),
+ DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 0, 200, 300),
+ DesktopRect::MakeLTRB(0, 100, 300, 200),
+ 3,
+ {DesktopRect::MakeLTRB(100, 0, 200, 100),
+ DesktopRect::MakeLTRB(0, 100, 300, 200),
+ DesktopRect::MakeLTRB(100, 200, 200, 300)}},
+
+ // One rectangle enclosing another.
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(150, 150, 180, 180),
+ 1,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(100, 100, 180, 180),
+ 1,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(150, 150, 200, 200),
+ 1,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+ };
+
+ for (size_t i = 0; i < (sizeof(cases) / sizeof(Case)); ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r;
+
+ r.AddRect(cases[i].input_rect1);
+ r.AddRect(cases[i].input_rect2);
+ CompareRegion(r, cases[i].expected_rects, cases[i].expected_count);
+
+ SCOPED_TRACE("Reverse");
+
+ // Run the same test with rectangles inserted in reverse order.
+ r.Clear();
+ r.AddRect(cases[i].input_rect2);
+ r.AddRect(cases[i].input_rect1);
+ CompareRegion(r, cases[i].expected_rects, cases[i].expected_count);
+ }
+}
+
+// Verify that DesktopRegion::AddRectToRow() works correctly by creating a row
+// of not overlapping rectangles and insert an overlapping rectangle into the
+// row at different positions. Result is verified by building a map of the
+// region in an array and comparing it with the expected values.
+TEST(DesktopRegionTest, SameRow) {
+ const int kMapWidth = 50;
+ const int kLastRectSizes[] = {3, 27};
+
+ DesktopRegion base_region;
+ bool base_map[kMapWidth] = {
+ false,
+ };
+
+ base_region.AddRect(DesktopRect::MakeXYWH(5, 0, 5, 1));
+ std::fill_n(base_map + 5, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(15, 0, 5, 1));
+ std::fill_n(base_map + 15, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(25, 0, 5, 1));
+ std::fill_n(base_map + 25, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(35, 0, 5, 1));
+ std::fill_n(base_map + 35, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(45, 0, 5, 1));
+ std::fill_n(base_map + 45, 5, true);
+
+ for (size_t i = 0; i < sizeof(kLastRectSizes) / sizeof(kLastRectSizes[0]);
+ i++) {
+ int last_rect_size = kLastRectSizes[i];
+ for (int x = 0; x < kMapWidth - last_rect_size; x++) {
+ SCOPED_TRACE(x);
+
+ DesktopRegion r = base_region;
+ r.AddRect(DesktopRect::MakeXYWH(x, 0, last_rect_size, 1));
+
+ bool expected_map[kMapWidth];
+ std::copy(base_map, base_map + kMapWidth, expected_map);
+ std::fill_n(expected_map + x, last_rect_size, true);
+
+ bool map[kMapWidth] = {
+ false,
+ };
+
+ int pos = -1;
+ for (DesktopRegion::Iterator it(r); !it.IsAtEnd(); it.Advance()) {
+ EXPECT_GT(it.rect().left(), pos);
+ pos = it.rect().right();
+ std::fill_n(map + it.rect().left(), it.rect().width(), true);
+ }
+
+ EXPECT_TRUE(std::equal(map, map + kMapWidth, expected_map));
+ }
+ }
+}
+
+TEST(DesktopRegionTest, ComplexRegions) {
+ struct Case {
+ int input_count;
+ DesktopRect input_rects[4];
+ int expected_count;
+ DesktopRect expected_rects[6];
+ } cases[] = {
+ {3,
+ {
+ DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(0, 100, 100, 200),
+ DesktopRect::MakeLTRB(310, 110, 320, 120),
+ },
+ 2,
+ {DesktopRect::MakeLTRB(0, 100, 200, 200),
+ DesktopRect::MakeLTRB(310, 110, 320, 120)}},
+ {3,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(50, 50, 150, 150),
+ DesktopRect::MakeLTRB(300, 125, 350, 175)},
+ 4,
+ {DesktopRect::MakeLTRB(50, 50, 150, 100),
+ DesktopRect::MakeLTRB(50, 100, 200, 150),
+ DesktopRect::MakeLTRB(300, 125, 350, 175),
+ DesktopRect::MakeLTRB(100, 150, 200, 200)}},
+ {4,
+ {DesktopRect::MakeLTRB(0, 0, 30, 30),
+ DesktopRect::MakeLTRB(10, 10, 40, 40),
+ DesktopRect::MakeLTRB(20, 20, 50, 50),
+ DesktopRect::MakeLTRB(50, 0, 65, 15)},
+ 6,
+ {DesktopRect::MakeLTRB(0, 0, 30, 10),
+ DesktopRect::MakeLTRB(50, 0, 65, 15),
+ DesktopRect::MakeLTRB(0, 10, 40, 20),
+ DesktopRect::MakeLTRB(0, 20, 50, 30),
+ DesktopRect::MakeLTRB(10, 30, 50, 40),
+ DesktopRect::MakeLTRB(20, 40, 50, 50)}},
+ {3,
+ {DesktopRect::MakeLTRB(10, 10, 40, 20),
+ DesktopRect::MakeLTRB(10, 30, 40, 40),
+ DesktopRect::MakeLTRB(10, 20, 40, 30)},
+ 1,
+ {DesktopRect::MakeLTRB(10, 10, 40, 40)}},
+ };
+
+ for (size_t i = 0; i < (sizeof(cases) / sizeof(Case)); ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r;
+ r.AddRects(cases[i].input_rects, cases[i].input_count);
+ CompareRegion(r, cases[i].expected_rects, cases[i].expected_count);
+
+ // Try inserting rectangles in reverse order.
+ r.Clear();
+ for (int j = cases[i].input_count - 1; j >= 0; --j) {
+ r.AddRect(cases[i].input_rects[j]);
+ }
+ CompareRegion(r, cases[i].expected_rects, cases[i].expected_count);
+ }
+}
+
+TEST(DesktopRegionTest, Equals) {
+ struct Region {
+ int count;
+ DesktopRect rects[4];
+ int id;
+ } regions[] = {
+ // Same region with one of the rectangles 1 pixel wider/taller.
+ {2,
+ {DesktopRect::MakeLTRB(0, 100, 200, 200),
+ DesktopRect::MakeLTRB(310, 110, 320, 120)},
+ 0},
+ {2,
+ {DesktopRect::MakeLTRB(0, 100, 201, 200),
+ DesktopRect::MakeLTRB(310, 110, 320, 120)},
+ 1},
+ {2,
+ {DesktopRect::MakeLTRB(0, 100, 200, 201),
+ DesktopRect::MakeLTRB(310, 110, 320, 120)},
+ 2},
+
+ // Same region with one of the rectangles shifted horizontally and
+ // vertically.
+ {4,
+ {DesktopRect::MakeLTRB(0, 0, 30, 30),
+ DesktopRect::MakeLTRB(10, 10, 40, 40),
+ DesktopRect::MakeLTRB(20, 20, 50, 50),
+ DesktopRect::MakeLTRB(50, 0, 65, 15)},
+ 3},
+ {4,
+ {DesktopRect::MakeLTRB(0, 0, 30, 30),
+ DesktopRect::MakeLTRB(10, 10, 40, 40),
+ DesktopRect::MakeLTRB(20, 20, 50, 50),
+ DesktopRect::MakeLTRB(50, 1, 65, 16)},
+ 4},
+ {4,
+ {DesktopRect::MakeLTRB(0, 0, 30, 30),
+ DesktopRect::MakeLTRB(10, 10, 40, 40),
+ DesktopRect::MakeLTRB(20, 20, 50, 50),
+ DesktopRect::MakeLTRB(51, 0, 66, 15)},
+ 5},
+
+ // Same region defined by a different set of rectangles - one of the
+ // rectangle is split horizontally into two.
+ {3,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(50, 50, 150, 150),
+ DesktopRect::MakeLTRB(300, 125, 350, 175)},
+ 6},
+ {4,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200),
+ DesktopRect::MakeLTRB(50, 50, 100, 150),
+ DesktopRect::MakeLTRB(100, 50, 150, 150),
+ DesktopRect::MakeLTRB(300, 125, 350, 175)},
+ 6},
+
+ // Rectangle region defined by a set of rectangles that merge into one.
+ {3,
+ {DesktopRect::MakeLTRB(10, 10, 40, 20),
+ DesktopRect::MakeLTRB(10, 30, 40, 40),
+ DesktopRect::MakeLTRB(10, 20, 40, 30)},
+ 7},
+ {1, {DesktopRect::MakeLTRB(10, 10, 40, 40)}, 7},
+ };
+ int kTotalRegions = sizeof(regions) / sizeof(Region);
+
+ for (int i = 0; i < kTotalRegions; ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r1(regions[i].rects, regions[i].count);
+ for (int j = 0; j < kTotalRegions; ++j) {
+ SCOPED_TRACE(j);
+
+ DesktopRegion r2(regions[j].rects, regions[j].count);
+ EXPECT_EQ(regions[i].id == regions[j].id, r1.Equals(r2));
+ }
+ }
+}
+
+TEST(DesktopRegionTest, Translate) {
+ struct Case {
+ int input_count;
+ DesktopRect input_rects[4];
+ int dx;
+ int dy;
+ int expected_count;
+ DesktopRect expected_rects[5];
+ } cases[] = {
+ {3,
+ {DesktopRect::MakeLTRB(0, 0, 30, 30),
+ DesktopRect::MakeLTRB(10, 10, 40, 40),
+ DesktopRect::MakeLTRB(20, 20, 50, 50)},
+ 3,
+ 5,
+ 5,
+ {DesktopRect::MakeLTRB(3, 5, 33, 15),
+ DesktopRect::MakeLTRB(3, 15, 43, 25),
+ DesktopRect::MakeLTRB(3, 25, 53, 35),
+ DesktopRect::MakeLTRB(13, 35, 53, 45),
+ DesktopRect::MakeLTRB(23, 45, 53, 55)}},
+ };
+
+ for (size_t i = 0; i < (sizeof(cases) / sizeof(Case)); ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r(cases[i].input_rects, cases[i].input_count);
+ r.Translate(cases[i].dx, cases[i].dy);
+ CompareRegion(r, cases[i].expected_rects, cases[i].expected_count);
+ }
+}
+
+TEST(DesktopRegionTest, Intersect) {
+ struct Case {
+ int input1_count;
+ DesktopRect input1_rects[4];
+ int input2_count;
+ DesktopRect input2_rects[4];
+ int expected_count;
+ DesktopRect expected_rects[5];
+ } cases[] = {
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(50, 50, 150, 150)},
+ 1,
+ {DesktopRect::MakeLTRB(50, 50, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(100, 0, 200, 300)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 100, 300, 200)},
+ 1,
+ {DesktopRect::MakeLTRB(100, 100, 200, 200)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 2,
+ {DesktopRect::MakeLTRB(50, 10, 150, 30),
+ DesktopRect::MakeLTRB(50, 30, 160, 50)},
+ 1,
+ {DesktopRect::MakeLTRB(50, 10, 100, 50)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 2,
+ {DesktopRect::MakeLTRB(50, 10, 150, 30),
+ DesktopRect::MakeLTRB(50, 30, 90, 50)},
+ 2,
+ {DesktopRect::MakeLTRB(50, 10, 100, 30),
+ DesktopRect::MakeLTRB(50, 30, 90, 50)}},
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(100, 50, 200, 200)},
+ 0,
+ {}},
+ };
+
+ for (size_t i = 0; i < (sizeof(cases) / sizeof(Case)); ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r1(cases[i].input1_rects, cases[i].input1_count);
+ DesktopRegion r2(cases[i].input2_rects, cases[i].input2_count);
+
+ DesktopRegion r;
+ r.Intersect(r1, r2);
+
+ CompareRegion(r, cases[i].expected_rects, cases[i].expected_count);
+ }
+}
+
+TEST(DesktopRegionTest, Subtract) {
+ struct Case {
+ int input1_count;
+ DesktopRect input1_rects[4];
+ int input2_count;
+ DesktopRect input2_rects[4];
+ int expected_count;
+ DesktopRect expected_rects[5];
+ } cases[] = {
+ // Subtract one rect from another.
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(50, 50, 150, 150)},
+ 2,
+ {DesktopRect::MakeLTRB(0, 0, 100, 50),
+ DesktopRect::MakeLTRB(0, 50, 50, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(-50, -50, 50, 50)},
+ 2,
+ {DesktopRect::MakeLTRB(50, 0, 100, 50),
+ DesktopRect::MakeLTRB(0, 50, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(-50, 50, 50, 150)},
+ 2,
+ {DesktopRect::MakeLTRB(0, 0, 100, 50),
+ DesktopRect::MakeLTRB(50, 50, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(50, 50, 150, 70)},
+ 3,
+ {DesktopRect::MakeLTRB(0, 0, 100, 50),
+ DesktopRect::MakeLTRB(0, 50, 50, 70),
+ DesktopRect::MakeLTRB(0, 70, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(50, 50, 70, 70)},
+ 4,
+ {DesktopRect::MakeLTRB(0, 0, 100, 50),
+ DesktopRect::MakeLTRB(0, 50, 50, 70),
+ DesktopRect::MakeLTRB(70, 50, 100, 70),
+ DesktopRect::MakeLTRB(0, 70, 100, 100)}},
+
+ // Empty result.
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 0,
+ {}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(-10, -10, 110, 110)},
+ 0,
+ {}},
+
+ {2,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100),
+ DesktopRect::MakeLTRB(50, 50, 150, 150)},
+ 2,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100),
+ DesktopRect::MakeLTRB(50, 50, 150, 150)},
+ 0,
+ {}},
+
+ // One rect out of disjoint set.
+ {3,
+ {DesktopRect::MakeLTRB(0, 0, 10, 10),
+ DesktopRect::MakeLTRB(20, 20, 30, 30),
+ DesktopRect::MakeLTRB(40, 0, 50, 10)},
+ 1,
+ {DesktopRect::MakeLTRB(20, 20, 30, 30)},
+ 2,
+ {DesktopRect::MakeLTRB(0, 0, 10, 10),
+ DesktopRect::MakeLTRB(40, 0, 50, 10)}},
+
+ // Row merging.
+ {3,
+ {DesktopRect::MakeLTRB(0, 0, 100, 50),
+ DesktopRect::MakeLTRB(0, 50, 150, 70),
+ DesktopRect::MakeLTRB(0, 70, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(100, 50, 150, 70)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)}},
+
+ // No-op subtraction.
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(100, 0, 200, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(-100, 0, 0, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 100, 0, 200)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)}},
+
+ {1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)},
+ 1,
+ {DesktopRect::MakeLTRB(0, -100, 100, 0)},
+ 1,
+ {DesktopRect::MakeLTRB(0, 0, 100, 100)}},
+ };
+
+ for (size_t i = 0; i < (sizeof(cases) / sizeof(Case)); ++i) {
+ SCOPED_TRACE(i);
+
+ DesktopRegion r1(cases[i].input1_rects, cases[i].input1_count);
+ DesktopRegion r2(cases[i].input2_rects, cases[i].input2_count);
+
+ r1.Subtract(r2);
+
+ CompareRegion(r1, cases[i].expected_rects, cases[i].expected_count);
+ }
+}
+
+// Verify that DesktopRegion::SubtractRows() works correctly by creating a row
+// of not overlapping rectangles and subtracting a set of rectangle. Result
+// is verified by building a map of the region in an array and comparing it with
+// the expected values.
+TEST(DesktopRegionTest, SubtractRectOnSameRow) {
+ const int kMapWidth = 50;
+
+ struct SpanSet {
+ int count;
+ struct Range {
+ int start;
+ int end;
+ } spans[3];
+ } span_sets[] = {
+ {1, {{0, 3}}},
+ {1, {{0, 5}}},
+ {1, {{0, 7}}},
+ {1, {{0, 12}}},
+ {2, {{0, 3}, {4, 5}, {6, 16}}},
+ };
+
+ DesktopRegion base_region;
+ bool base_map[kMapWidth] = {
+ false,
+ };
+
+ base_region.AddRect(DesktopRect::MakeXYWH(5, 0, 5, 1));
+ std::fill_n(base_map + 5, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(15, 0, 5, 1));
+ std::fill_n(base_map + 15, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(25, 0, 5, 1));
+ std::fill_n(base_map + 25, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(35, 0, 5, 1));
+ std::fill_n(base_map + 35, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(45, 0, 5, 1));
+ std::fill_n(base_map + 45, 5, true);
+
+ for (size_t i = 0; i < sizeof(span_sets) / sizeof(span_sets[0]); i++) {
+ SCOPED_TRACE(i);
+ SpanSet& span_set = span_sets[i];
+ int span_set_end = span_set.spans[span_set.count - 1].end;
+ for (int x = 0; x < kMapWidth - span_set_end; ++x) {
+ SCOPED_TRACE(x);
+
+ DesktopRegion r = base_region;
+
+ bool expected_map[kMapWidth];
+ std::copy(base_map, base_map + kMapWidth, expected_map);
+
+ DesktopRegion region2;
+ for (int span = 0; span < span_set.count; span++) {
+ std::fill_n(x + expected_map + span_set.spans[span].start,
+ span_set.spans[span].end - span_set.spans[span].start,
+ false);
+ region2.AddRect(DesktopRect::MakeLTRB(x + span_set.spans[span].start, 0,
+ x + span_set.spans[span].end, 1));
+ }
+ r.Subtract(region2);
+
+ bool map[kMapWidth] = {
+ false,
+ };
+
+ int pos = -1;
+ for (DesktopRegion::Iterator it(r); !it.IsAtEnd(); it.Advance()) {
+ EXPECT_GT(it.rect().left(), pos);
+ pos = it.rect().right();
+ std::fill_n(map + it.rect().left(), it.rect().width(), true);
+ }
+
+ EXPECT_TRUE(std::equal(map, map + kMapWidth, expected_map));
+ }
+ }
+}
+
+// Verify that DesktopRegion::Subtract() works correctly by creating a column of
+// not overlapping rectangles and subtracting a set of rectangle on the same
+// column. Result is verified by building a map of the region in an array and
+// comparing it with the expected values.
+TEST(DesktopRegionTest, SubtractRectOnSameCol) {
+ const int kMapHeight = 50;
+
+ struct SpanSet {
+ int count;
+ struct Range {
+ int start;
+ int end;
+ } spans[3];
+ } span_sets[] = {
+ {1, {{0, 3}}},
+ {1, {{0, 5}}},
+ {1, {{0, 7}}},
+ {1, {{0, 12}}},
+ {2, {{0, 3}, {4, 5}, {6, 16}}},
+ };
+
+ DesktopRegion base_region;
+ bool base_map[kMapHeight] = {
+ false,
+ };
+
+ base_region.AddRect(DesktopRect::MakeXYWH(0, 5, 1, 5));
+ std::fill_n(base_map + 5, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(0, 15, 1, 5));
+ std::fill_n(base_map + 15, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(0, 25, 1, 5));
+ std::fill_n(base_map + 25, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(0, 35, 1, 5));
+ std::fill_n(base_map + 35, 5, true);
+ base_region.AddRect(DesktopRect::MakeXYWH(0, 45, 1, 5));
+ std::fill_n(base_map + 45, 5, true);
+
+ for (size_t i = 0; i < sizeof(span_sets) / sizeof(span_sets[0]); i++) {
+ SCOPED_TRACE(i);
+ SpanSet& span_set = span_sets[i];
+ int span_set_end = span_set.spans[span_set.count - 1].end;
+ for (int y = 0; y < kMapHeight - span_set_end; ++y) {
+ SCOPED_TRACE(y);
+
+ DesktopRegion r = base_region;
+
+ bool expected_map[kMapHeight];
+ std::copy(base_map, base_map + kMapHeight, expected_map);
+
+ DesktopRegion region2;
+ for (int span = 0; span < span_set.count; span++) {
+ std::fill_n(y + expected_map + span_set.spans[span].start,
+ span_set.spans[span].end - span_set.spans[span].start,
+ false);
+ region2.AddRect(DesktopRect::MakeLTRB(0, y + span_set.spans[span].start,
+ 1, y + span_set.spans[span].end));
+ }
+ r.Subtract(region2);
+
+ bool map[kMapHeight] = {
+ false,
+ };
+
+ int pos = -1;
+ for (DesktopRegion::Iterator it(r); !it.IsAtEnd(); it.Advance()) {
+ EXPECT_GT(it.rect().top(), pos);
+ pos = it.rect().bottom();
+ std::fill_n(map + it.rect().top(), it.rect().height(), true);
+ }
+
+ for (int j = 0; j < kMapHeight; j++) {
+ EXPECT_EQ(expected_map[j], map[j]) << "j = " << j;
+ }
+ }
+ }
+}
+
+TEST(DesktopRegionTest, DISABLED_Performance) {
+ for (int c = 0; c < 1000; ++c) {
+ DesktopRegion r;
+ for (int i = 0; i < 10; ++i) {
+ r.AddRect(
+ DesktopRect::MakeXYWH(RadmonInt(1000), RadmonInt(1000), 200, 200));
+ }
+
+ for (int i = 0; i < 1000; ++i) {
+ r.AddRect(DesktopRect::MakeXYWH(RadmonInt(1000), RadmonInt(1000),
+ 5 + RadmonInt(10) * 5,
+ 5 + RadmonInt(10) * 5));
+ }
+
+ // Iterate over the rectangles.
+ for (DesktopRegion::Iterator it(r); !it.IsAtEnd(); it.Advance()) {
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/differ_block.cc b/third_party/libwebrtc/modules/desktop_capture/differ_block.cc
new file mode 100644
index 0000000000..54ee0829ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/differ_block.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/differ_block.h"
+
+#include <string.h>
+
+#include "modules/desktop_capture/differ_vector_sse2.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+namespace {
+
+bool VectorDifference_C(const uint8_t* image1, const uint8_t* image2) {
+ return memcmp(image1, image2, kBlockSize * kBytesPerPixel) != 0;
+}
+
+} // namespace
+
+bool VectorDifference(const uint8_t* image1, const uint8_t* image2) {
+ static bool (*diff_proc)(const uint8_t*, const uint8_t*) = nullptr;
+
+ if (!diff_proc) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ bool have_sse2 = GetCPUInfo(kSSE2) != 0;
+ // For x86 processors, check if SSE2 is supported.
+ if (have_sse2 && kBlockSize == 32) {
+ diff_proc = &VectorDifference_SSE2_W32;
+ } else if (have_sse2 && kBlockSize == 16) {
+ diff_proc = &VectorDifference_SSE2_W16;
+ } else {
+ diff_proc = &VectorDifference_C;
+ }
+#else
+ // For other processors, always use C version.
+ // TODO(hclam): Implement a NEON version.
+ diff_proc = &VectorDifference_C;
+#endif
+ }
+
+ return diff_proc(image1, image2);
+}
+
+bool BlockDifference(const uint8_t* image1,
+ const uint8_t* image2,
+ int height,
+ int stride) {
+ for (int i = 0; i < height; i++) {
+ if (VectorDifference(image1, image2)) {
+ return true;
+ }
+ image1 += stride;
+ image2 += stride;
+ }
+ return false;
+}
+
+bool BlockDifference(const uint8_t* image1, const uint8_t* image2, int stride) {
+ return BlockDifference(image1, image2, kBlockSize, stride);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/differ_block.h b/third_party/libwebrtc/modules/desktop_capture/differ_block.h
new file mode 100644
index 0000000000..6c71e214e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/differ_block.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_DIFFER_BLOCK_H_
+#define MODULES_DESKTOP_CAPTURE_DIFFER_BLOCK_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+// Size (in pixels) of each square block used for diffing. This must be a
+// multiple of sizeof(uint64)/8.
+const int kBlockSize = 32;
+
+// Format: BGRA 32 bit.
+const int kBytesPerPixel = 4;
+
+// Low level function to compare 2 vectors of pixels of size kBlockSize. Returns
+// whether the blocks differ.
+bool VectorDifference(const uint8_t* image1, const uint8_t* image2);
+
+// Low level function to compare 2 blocks of pixels of size
+// (kBlockSize, `height`). Returns whether the blocks differ.
+bool BlockDifference(const uint8_t* image1,
+ const uint8_t* image2,
+ int height,
+ int stride);
+
+// Low level function to compare 2 blocks of pixels of size
+// (kBlockSize, kBlockSize). Returns whether the blocks differ.
+bool BlockDifference(const uint8_t* image1, const uint8_t* image2, int stride);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DIFFER_BLOCK_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/differ_block_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/differ_block_unittest.cc
new file mode 100644
index 0000000000..aa454c872d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/differ_block_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/differ_block.h"
+
+#include <string.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Run 900 times to mimic 1280x720.
+// TODO(fbarchard): Remove benchmark once performance is non-issue.
+static const int kTimesToRun = 900;
+
+static void GenerateData(uint8_t* data, int size) {
+ for (int i = 0; i < size; ++i) {
+ data[i] = i;
+ }
+}
+
+// Memory buffer large enough for 2 blocks aligned to 16 bytes.
+static const int kSizeOfBlock = kBlockSize * kBlockSize * kBytesPerPixel;
+uint8_t block_buffer[kSizeOfBlock * 2 + 16];
+
+void PrepareBuffers(uint8_t*& block1, uint8_t*& block2) {
+ block1 = reinterpret_cast<uint8_t*>(
+ (reinterpret_cast<uintptr_t>(&block_buffer[0]) + 15) & ~15);
+ GenerateData(block1, kSizeOfBlock);
+ block2 = block1 + kSizeOfBlock;
+ memcpy(block2, block1, kSizeOfBlock);
+}
+
+TEST(BlockDifferenceTestSame, BlockDifference) {
+ uint8_t* block1;
+ uint8_t* block2;
+ PrepareBuffers(block1, block2);
+
+ // These blocks should match.
+ for (int i = 0; i < kTimesToRun; ++i) {
+ int result = BlockDifference(block1, block2, kBlockSize * kBytesPerPixel);
+ EXPECT_EQ(0, result);
+ }
+}
+
+TEST(BlockDifferenceTestLast, BlockDifference) {
+ uint8_t* block1;
+ uint8_t* block2;
+ PrepareBuffers(block1, block2);
+ block2[kSizeOfBlock - 2] += 1;
+
+ for (int i = 0; i < kTimesToRun; ++i) {
+ int result = BlockDifference(block1, block2, kBlockSize * kBytesPerPixel);
+ EXPECT_EQ(1, result);
+ }
+}
+
+TEST(BlockDifferenceTestMid, BlockDifference) {
+ uint8_t* block1;
+ uint8_t* block2;
+ PrepareBuffers(block1, block2);
+ block2[kSizeOfBlock / 2 + 1] += 1;
+
+ for (int i = 0; i < kTimesToRun; ++i) {
+ int result = BlockDifference(block1, block2, kBlockSize * kBytesPerPixel);
+ EXPECT_EQ(1, result);
+ }
+}
+
+TEST(BlockDifferenceTestFirst, BlockDifference) {
+ uint8_t* block1;
+ uint8_t* block2;
+ PrepareBuffers(block1, block2);
+ block2[0] += 1;
+
+ for (int i = 0; i < kTimesToRun; ++i) {
+ int result = BlockDifference(block1, block2, kBlockSize * kBytesPerPixel);
+ EXPECT_EQ(1, result);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.cc b/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.cc
new file mode 100644
index 0000000000..1c8b602d71
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/differ_vector_sse2.h"
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else
+#include <emmintrin.h>
+#include <mmintrin.h>
+#endif
+
+namespace webrtc {
+
+extern bool VectorDifference_SSE2_W16(const uint8_t* image1,
+ const uint8_t* image2) {
+ __m128i acc = _mm_setzero_si128();
+ __m128i v0;
+ __m128i v1;
+ __m128i sad;
+ const __m128i* i1 = reinterpret_cast<const __m128i*>(image1);
+ const __m128i* i2 = reinterpret_cast<const __m128i*>(image2);
+ v0 = _mm_loadu_si128(i1);
+ v1 = _mm_loadu_si128(i2);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 1);
+ v1 = _mm_loadu_si128(i2 + 1);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 2);
+ v1 = _mm_loadu_si128(i2 + 2);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 3);
+ v1 = _mm_loadu_si128(i2 + 3);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+
+ // This essential means sad = acc >> 64. We only care about the lower 16
+ // bits.
+ sad = _mm_shuffle_epi32(acc, 0xEE);
+ sad = _mm_adds_epu16(sad, acc);
+ return _mm_cvtsi128_si32(sad) != 0;
+}
+
+extern bool VectorDifference_SSE2_W32(const uint8_t* image1,
+ const uint8_t* image2) {
+ __m128i acc = _mm_setzero_si128();
+ __m128i v0;
+ __m128i v1;
+ __m128i sad;
+ const __m128i* i1 = reinterpret_cast<const __m128i*>(image1);
+ const __m128i* i2 = reinterpret_cast<const __m128i*>(image2);
+ v0 = _mm_loadu_si128(i1);
+ v1 = _mm_loadu_si128(i2);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 1);
+ v1 = _mm_loadu_si128(i2 + 1);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 2);
+ v1 = _mm_loadu_si128(i2 + 2);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 3);
+ v1 = _mm_loadu_si128(i2 + 3);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 4);
+ v1 = _mm_loadu_si128(i2 + 4);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 5);
+ v1 = _mm_loadu_si128(i2 + 5);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 6);
+ v1 = _mm_loadu_si128(i2 + 6);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+ v0 = _mm_loadu_si128(i1 + 7);
+ v1 = _mm_loadu_si128(i2 + 7);
+ sad = _mm_sad_epu8(v0, v1);
+ acc = _mm_adds_epu16(acc, sad);
+
+ // This essential means sad = acc >> 64. We only care about the lower 16
+ // bits.
+ sad = _mm_shuffle_epi32(acc, 0xEE);
+ sad = _mm_adds_epu16(sad, acc);
+ return _mm_cvtsi128_si32(sad) != 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.h b/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.h
new file mode 100644
index 0000000000..a3c297eb9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/differ_vector_sse2.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This header file is used only differ_block.h. It defines the SSE2 rountines
+// for finding vector difference.
+
+#ifndef MODULES_DESKTOP_CAPTURE_DIFFER_VECTOR_SSE2_H_
+#define MODULES_DESKTOP_CAPTURE_DIFFER_VECTOR_SSE2_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+// Find vector difference of dimension 16.
+extern bool VectorDifference_SSE2_W16(const uint8_t* image1,
+ const uint8_t* image2);
+
+// Find vector difference of dimension 32.
+extern bool VectorDifference_SSE2_W32(const uint8_t* image1,
+ const uint8_t* image2);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_DIFFER_VECTOR_SSE2_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.cc b/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.cc
new file mode 100644
index 0000000000..67149bfcb9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/fake_desktop_capturer.h"
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+
+namespace webrtc {
+
+FakeDesktopCapturer::FakeDesktopCapturer() = default;
+FakeDesktopCapturer::~FakeDesktopCapturer() = default;
+
+void FakeDesktopCapturer::set_result(DesktopCapturer::Result result) {
+ result_ = result;
+}
+
+int FakeDesktopCapturer::num_frames_captured() const {
+ return num_frames_captured_;
+}
+
+int FakeDesktopCapturer::num_capture_attempts() const {
+ return num_capture_attempts_;
+}
+
+// Uses the `generator` provided as DesktopFrameGenerator, FakeDesktopCapturer
+// does
+// not take the ownership of `generator`.
+void FakeDesktopCapturer::set_frame_generator(
+ DesktopFrameGenerator* generator) {
+ generator_ = generator;
+}
+
+void FakeDesktopCapturer::Start(DesktopCapturer::Callback* callback) {
+ callback_ = callback;
+}
+
+void FakeDesktopCapturer::CaptureFrame() {
+ num_capture_attempts_++;
+ if (generator_) {
+ if (result_ != DesktopCapturer::Result::SUCCESS) {
+ callback_->OnCaptureResult(result_, nullptr);
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> frame(
+ generator_->GetNextFrame(shared_memory_factory_.get()));
+ if (frame) {
+ num_frames_captured_++;
+ callback_->OnCaptureResult(result_, std::move(frame));
+ } else {
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_TEMPORARY,
+ nullptr);
+ }
+ return;
+ }
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT, nullptr);
+}
+
+void FakeDesktopCapturer::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ shared_memory_factory_ = std::move(shared_memory_factory);
+}
+
+bool FakeDesktopCapturer::GetSourceList(DesktopCapturer::SourceList* sources) {
+ sources->push_back({kWindowId, 1, "A-Fake-DesktopCapturer-Window"});
+ sources->push_back({kScreenId, 1});
+ return true;
+}
+
+bool FakeDesktopCapturer::SelectSource(DesktopCapturer::SourceId id) {
+ return id == kWindowId || id == kScreenId || id == kFullDesktopScreenId;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.h b/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.h
new file mode 100644
index 0000000000..086e6df0e2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/fake_desktop_capturer.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_FAKE_DESKTOP_CAPTURER_H_
+#define MODULES_DESKTOP_CAPTURE_FAKE_DESKTOP_CAPTURER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame_generator.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// A fake implementation of DesktopCapturer or its derived interfaces to
+// generate DesktopFrame for testing purpose.
+//
+// Consumers can provide a FrameGenerator instance to generate instances of
+// DesktopFrame to return for each Capture() function call.
+// If no FrameGenerator provided, FakeDesktopCapturer will always return a
+// nullptr DesktopFrame.
+//
+// Double buffering is guaranteed by the FrameGenerator. FrameGenerator
+// implements in desktop_frame_generator.h guarantee double buffering, they
+// creates a new instance of DesktopFrame each time.
+class RTC_EXPORT FakeDesktopCapturer : public DesktopCapturer {
+ public:
+ FakeDesktopCapturer();
+ ~FakeDesktopCapturer() override;
+
+ // Decides the result which will be returned in next Capture() callback.
+ void set_result(DesktopCapturer::Result result);
+
+ // Uses the `generator` provided as DesktopFrameGenerator, FakeDesktopCapturer
+ // does not take the ownership of `generator`.
+ void set_frame_generator(DesktopFrameGenerator* generator);
+
+ // Count of DesktopFrame(s) have been returned by this instance. This field
+ // would never be negative.
+ int num_frames_captured() const;
+
+ // Count of CaptureFrame() calls have been made. This field would never be
+ // negative.
+ int num_capture_attempts() const;
+
+ // DesktopCapturer interface
+ void Start(DesktopCapturer::Callback* callback) override;
+ void CaptureFrame() override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ bool GetSourceList(DesktopCapturer::SourceList* sources) override;
+ bool SelectSource(DesktopCapturer::SourceId id) override;
+
+ private:
+ static constexpr DesktopCapturer::SourceId kWindowId = 1378277495;
+ static constexpr DesktopCapturer::SourceId kScreenId = 1378277496;
+
+ DesktopCapturer::Callback* callback_ = nullptr;
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory_;
+ DesktopCapturer::Result result_ = Result::SUCCESS;
+ DesktopFrameGenerator* generator_ = nullptr;
+ int num_frames_captured_ = 0;
+ int num_capture_attempts_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_FAKE_DESKTOP_CAPTURER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc b/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc
new file mode 100644
index 0000000000..61fdb416f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/fallback_desktop_capturer_wrapper.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "api/sequence_checker.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+// Implementation to share a SharedMemoryFactory between DesktopCapturer
+// instances. This class is designed for synchronized DesktopCapturer
+// implementations only.
+class SharedMemoryFactoryProxy : public SharedMemoryFactory {
+ public:
+ // Users should maintain the lifetime of `factory` to ensure it overlives
+ // current instance.
+ static std::unique_ptr<SharedMemoryFactory> Create(
+ SharedMemoryFactory* factory);
+ ~SharedMemoryFactoryProxy() override;
+
+ // Forwards CreateSharedMemory() calls to `factory_`. Users should always call
+ // this function in one thread. Users should not call this function after the
+ // SharedMemoryFactory which current instance created from has been destroyed.
+ std::unique_ptr<SharedMemory> CreateSharedMemory(size_t size) override;
+
+ private:
+ explicit SharedMemoryFactoryProxy(SharedMemoryFactory* factory);
+
+ SharedMemoryFactory* factory_ = nullptr;
+ SequenceChecker thread_checker_;
+};
+
+} // namespace
+
+SharedMemoryFactoryProxy::SharedMemoryFactoryProxy(
+ SharedMemoryFactory* factory) {
+ RTC_DCHECK(factory);
+ factory_ = factory;
+}
+
+// static
+std::unique_ptr<SharedMemoryFactory> SharedMemoryFactoryProxy::Create(
+ SharedMemoryFactory* factory) {
+ return std::unique_ptr<SharedMemoryFactory>(
+ new SharedMemoryFactoryProxy(factory));
+}
+
+SharedMemoryFactoryProxy::~SharedMemoryFactoryProxy() = default;
+
+std::unique_ptr<SharedMemory> SharedMemoryFactoryProxy::CreateSharedMemory(
+ size_t size) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return factory_->CreateSharedMemory(size);
+}
+
+FallbackDesktopCapturerWrapper::FallbackDesktopCapturerWrapper(
+ std::unique_ptr<DesktopCapturer> main_capturer,
+ std::unique_ptr<DesktopCapturer> secondary_capturer)
+ : main_capturer_(std::move(main_capturer)),
+ secondary_capturer_(std::move(secondary_capturer)) {
+ RTC_DCHECK(main_capturer_);
+ RTC_DCHECK(secondary_capturer_);
+}
+
+FallbackDesktopCapturerWrapper::~FallbackDesktopCapturerWrapper() = default;
+
+void FallbackDesktopCapturerWrapper::Start(
+ DesktopCapturer::Callback* callback) {
+ callback_ = callback;
+ // FallbackDesktopCapturerWrapper catchs the callback of the main capturer,
+ // and checks its return value to decide whether the secondary capturer should
+ // be involved.
+ main_capturer_->Start(this);
+ // For the secondary capturer, we do not have a backup plan anymore, so
+ // FallbackDesktopCapturerWrapper won't check its return value any more. It
+ // will directly return to the input `callback`.
+ secondary_capturer_->Start(callback);
+}
+
+void FallbackDesktopCapturerWrapper::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ shared_memory_factory_ = std::move(shared_memory_factory);
+ if (shared_memory_factory_) {
+ main_capturer_->SetSharedMemoryFactory(
+ SharedMemoryFactoryProxy::Create(shared_memory_factory_.get()));
+ secondary_capturer_->SetSharedMemoryFactory(
+ SharedMemoryFactoryProxy::Create(shared_memory_factory_.get()));
+ } else {
+ main_capturer_->SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory>());
+ secondary_capturer_->SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory>());
+ }
+}
+
+void FallbackDesktopCapturerWrapper::CaptureFrame() {
+ RTC_DCHECK(callback_);
+ if (main_capturer_permanent_error_) {
+ secondary_capturer_->CaptureFrame();
+ } else {
+ main_capturer_->CaptureFrame();
+ }
+}
+
+void FallbackDesktopCapturerWrapper::SetExcludedWindow(WindowId window) {
+ main_capturer_->SetExcludedWindow(window);
+ secondary_capturer_->SetExcludedWindow(window);
+}
+
+bool FallbackDesktopCapturerWrapper::GetSourceList(SourceList* sources) {
+ if (main_capturer_permanent_error_) {
+ return secondary_capturer_->GetSourceList(sources);
+ }
+ return main_capturer_->GetSourceList(sources);
+}
+
+bool FallbackDesktopCapturerWrapper::SelectSource(SourceId id) {
+ if (main_capturer_permanent_error_) {
+ return secondary_capturer_->SelectSource(id);
+ }
+ const bool main_capturer_result = main_capturer_->SelectSource(id);
+ RTC_HISTOGRAM_BOOLEAN(
+ "WebRTC.DesktopCapture.PrimaryCapturerSelectSourceError",
+ main_capturer_result);
+ if (!main_capturer_result) {
+ main_capturer_permanent_error_ = true;
+ }
+
+ return secondary_capturer_->SelectSource(id);
+}
+
+bool FallbackDesktopCapturerWrapper::FocusOnSelectedSource() {
+ if (main_capturer_permanent_error_) {
+ return secondary_capturer_->FocusOnSelectedSource();
+ }
+ return main_capturer_->FocusOnSelectedSource() ||
+ secondary_capturer_->FocusOnSelectedSource();
+}
+
+bool FallbackDesktopCapturerWrapper::IsOccluded(const DesktopVector& pos) {
+ // Returns true if either capturer returns true.
+ if (main_capturer_permanent_error_) {
+ return secondary_capturer_->IsOccluded(pos);
+ }
+ return main_capturer_->IsOccluded(pos) ||
+ secondary_capturer_->IsOccluded(pos);
+}
+
+void FallbackDesktopCapturerWrapper::OnCaptureResult(
+ Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ RTC_DCHECK(callback_);
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.DesktopCapture.PrimaryCapturerError",
+ result != Result::SUCCESS);
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.DesktopCapture.PrimaryCapturerPermanentError",
+ result == Result::ERROR_PERMANENT);
+ if (result == Result::SUCCESS) {
+ callback_->OnCaptureResult(result, std::move(frame));
+ return;
+ }
+
+ if (result == Result::ERROR_PERMANENT) {
+ main_capturer_permanent_error_ = true;
+ }
+ secondary_capturer_->CaptureFrame();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h b/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h
new file mode 100644
index 0000000000..2855eae7ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_FALLBACK_DESKTOP_CAPTURER_WRAPPER_H_
+#define MODULES_DESKTOP_CAPTURE_FALLBACK_DESKTOP_CAPTURER_WRAPPER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/shared_memory.h"
+
+namespace webrtc {
+
+// A DesktopCapturer wrapper owns two DesktopCapturer implementations. If the
+// main DesktopCapturer fails, it uses the secondary one instead. Two capturers
+// are expected to return same SourceList, and the meaning of each SourceId is
+// identical, otherwise FallbackDesktopCapturerWrapper may return frames from
+// different sources. Using asynchronized DesktopCapturer implementations with
+// SharedMemoryFactory is not supported, and may result crash or assertion
+// failure.
+class FallbackDesktopCapturerWrapper final : public DesktopCapturer,
+ public DesktopCapturer::Callback {
+ public:
+ FallbackDesktopCapturerWrapper(
+ std::unique_ptr<DesktopCapturer> main_capturer,
+ std::unique_ptr<DesktopCapturer> secondary_capturer);
+ ~FallbackDesktopCapturerWrapper() override;
+
+ // DesktopCapturer interface.
+ void Start(DesktopCapturer::Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ private:
+ // DesktopCapturer::Callback interface.
+ void OnCaptureResult(Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ const std::unique_ptr<DesktopCapturer> main_capturer_;
+ const std::unique_ptr<DesktopCapturer> secondary_capturer_;
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory_;
+ bool main_capturer_permanent_error_ = false;
+ DesktopCapturer::Callback* callback_ = nullptr;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_FALLBACK_DESKTOP_CAPTURER_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper_unittest.cc
new file mode 100644
index 0000000000..de66386434
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper_unittest.cc
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/fallback_desktop_capturer_wrapper.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame_generator.h"
+#include "modules/desktop_capture/fake_desktop_capturer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+std::unique_ptr<DesktopCapturer> CreateDesktopCapturer(
+ PainterDesktopFrameGenerator* frame_generator) {
+ std::unique_ptr<FakeDesktopCapturer> capturer(new FakeDesktopCapturer());
+ capturer->set_frame_generator(frame_generator);
+ return std::move(capturer);
+}
+
+class FakeSharedMemory : public SharedMemory {
+ public:
+ explicit FakeSharedMemory(size_t size);
+ ~FakeSharedMemory() override;
+
+ private:
+ static int next_id_;
+};
+
+// static
+int FakeSharedMemory::next_id_ = 0;
+
+FakeSharedMemory::FakeSharedMemory(size_t size)
+ : SharedMemory(new char[size], size, 0, next_id_++) {}
+
+FakeSharedMemory::~FakeSharedMemory() {
+ delete[] static_cast<char*>(data_);
+}
+
+class FakeSharedMemoryFactory : public SharedMemoryFactory {
+ public:
+ FakeSharedMemoryFactory() = default;
+ ~FakeSharedMemoryFactory() override = default;
+
+ std::unique_ptr<SharedMemory> CreateSharedMemory(size_t size) override;
+};
+
+std::unique_ptr<SharedMemory> FakeSharedMemoryFactory::CreateSharedMemory(
+ size_t size) {
+ return std::unique_ptr<SharedMemory>(new FakeSharedMemory(size));
+}
+
+} // namespace
+
+class FallbackDesktopCapturerWrapperTest : public ::testing::Test,
+ public DesktopCapturer::Callback {
+ public:
+ FallbackDesktopCapturerWrapperTest();
+ ~FallbackDesktopCapturerWrapperTest() override = default;
+
+ protected:
+ std::vector<std::pair<DesktopCapturer::Result, bool>> results_;
+ FakeDesktopCapturer* main_capturer_ = nullptr;
+ FakeDesktopCapturer* secondary_capturer_ = nullptr;
+ std::unique_ptr<FallbackDesktopCapturerWrapper> wrapper_;
+
+ private:
+ // DesktopCapturer::Callback interface
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+ PainterDesktopFrameGenerator frame_generator_;
+};
+
+FallbackDesktopCapturerWrapperTest::FallbackDesktopCapturerWrapperTest() {
+ frame_generator_.size()->set(1024, 768);
+ std::unique_ptr<DesktopCapturer> main_capturer =
+ CreateDesktopCapturer(&frame_generator_);
+ std::unique_ptr<DesktopCapturer> secondary_capturer =
+ CreateDesktopCapturer(&frame_generator_);
+ main_capturer_ = static_cast<FakeDesktopCapturer*>(main_capturer.get());
+ secondary_capturer_ =
+ static_cast<FakeDesktopCapturer*>(secondary_capturer.get());
+ wrapper_.reset(new FallbackDesktopCapturerWrapper(
+ std::move(main_capturer), std::move(secondary_capturer)));
+ wrapper_->Start(this);
+}
+
+void FallbackDesktopCapturerWrapperTest::OnCaptureResult(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ results_.emplace_back(result, !!frame);
+}
+
+TEST_F(FallbackDesktopCapturerWrapperTest, MainNeverFailed) {
+ wrapper_->CaptureFrame();
+ ASSERT_EQ(main_capturer_->num_capture_attempts(), 1);
+ ASSERT_EQ(main_capturer_->num_frames_captured(), 1);
+ ASSERT_EQ(secondary_capturer_->num_capture_attempts(), 0);
+ ASSERT_EQ(secondary_capturer_->num_frames_captured(), 0);
+ ASSERT_EQ(results_.size(), 1U);
+ ASSERT_EQ(results_[0],
+ std::make_pair(DesktopCapturer::Result::SUCCESS, true));
+}
+
+TEST_F(FallbackDesktopCapturerWrapperTest, MainFailedTemporarily) {
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::ERROR_TEMPORARY);
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::SUCCESS);
+ wrapper_->CaptureFrame();
+
+ ASSERT_EQ(main_capturer_->num_capture_attempts(), 3);
+ ASSERT_EQ(main_capturer_->num_frames_captured(), 2);
+ ASSERT_EQ(secondary_capturer_->num_capture_attempts(), 1);
+ ASSERT_EQ(secondary_capturer_->num_frames_captured(), 1);
+ ASSERT_EQ(results_.size(), 3U);
+ for (int i = 0; i < 3; i++) {
+ ASSERT_EQ(results_[i],
+ std::make_pair(DesktopCapturer::Result::SUCCESS, true));
+ }
+}
+
+TEST_F(FallbackDesktopCapturerWrapperTest, MainFailedPermanently) {
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::ERROR_PERMANENT);
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::SUCCESS);
+ wrapper_->CaptureFrame();
+
+ ASSERT_EQ(main_capturer_->num_capture_attempts(), 2);
+ ASSERT_EQ(main_capturer_->num_frames_captured(), 1);
+ ASSERT_EQ(secondary_capturer_->num_capture_attempts(), 2);
+ ASSERT_EQ(secondary_capturer_->num_frames_captured(), 2);
+ ASSERT_EQ(results_.size(), 3U);
+ for (int i = 0; i < 3; i++) {
+ ASSERT_EQ(results_[i],
+ std::make_pair(DesktopCapturer::Result::SUCCESS, true));
+ }
+}
+
+TEST_F(FallbackDesktopCapturerWrapperTest, BothFailed) {
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::ERROR_PERMANENT);
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::SUCCESS);
+ wrapper_->CaptureFrame();
+ secondary_capturer_->set_result(DesktopCapturer::Result::ERROR_TEMPORARY);
+ wrapper_->CaptureFrame();
+ secondary_capturer_->set_result(DesktopCapturer::Result::ERROR_PERMANENT);
+ wrapper_->CaptureFrame();
+ wrapper_->CaptureFrame();
+
+ ASSERT_EQ(main_capturer_->num_capture_attempts(), 2);
+ ASSERT_EQ(main_capturer_->num_frames_captured(), 1);
+ ASSERT_EQ(secondary_capturer_->num_capture_attempts(), 5);
+ ASSERT_EQ(secondary_capturer_->num_frames_captured(), 2);
+ ASSERT_EQ(results_.size(), 6U);
+ for (int i = 0; i < 3; i++) {
+ ASSERT_EQ(results_[i],
+ std::make_pair(DesktopCapturer::Result::SUCCESS, true));
+ }
+ ASSERT_EQ(results_[3],
+ std::make_pair(DesktopCapturer::Result::ERROR_TEMPORARY, false));
+ ASSERT_EQ(results_[4],
+ std::make_pair(DesktopCapturer::Result::ERROR_PERMANENT, false));
+ ASSERT_EQ(results_[5],
+ std::make_pair(DesktopCapturer::Result::ERROR_PERMANENT, false));
+}
+
+TEST_F(FallbackDesktopCapturerWrapperTest, WithSharedMemory) {
+ wrapper_->SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory>(new FakeSharedMemoryFactory()));
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::ERROR_TEMPORARY);
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::SUCCESS);
+ wrapper_->CaptureFrame();
+ main_capturer_->set_result(DesktopCapturer::Result::ERROR_PERMANENT);
+ wrapper_->CaptureFrame();
+ wrapper_->CaptureFrame();
+
+ ASSERT_EQ(main_capturer_->num_capture_attempts(), 4);
+ ASSERT_EQ(main_capturer_->num_frames_captured(), 2);
+ ASSERT_EQ(secondary_capturer_->num_capture_attempts(), 3);
+ ASSERT_EQ(secondary_capturer_->num_frames_captured(), 3);
+ ASSERT_EQ(results_.size(), 5U);
+ for (int i = 0; i < 5; i++) {
+ ASSERT_EQ(results_[i],
+ std::make_pair(DesktopCapturer::Result::SUCCESS, true));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.cc b/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.cc
new file mode 100644
index 0000000000..e0975570ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/full_screen_application_handler.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+FullScreenApplicationHandler::FullScreenApplicationHandler(
+ DesktopCapturer::SourceId sourceId)
+ : source_id_(sourceId) {}
+
+DesktopCapturer::SourceId FullScreenApplicationHandler::FindFullScreenWindow(
+ const DesktopCapturer::SourceList&,
+ int64_t) const {
+ return 0;
+}
+
+DesktopCapturer::SourceId FullScreenApplicationHandler::GetSourceId() const {
+ return source_id_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.h b/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.h
new file mode 100644
index 0000000000..b7e097a474
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/full_screen_application_handler.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_FULL_SCREEN_APPLICATION_HANDLER_H_
+#define MODULES_DESKTOP_CAPTURE_FULL_SCREEN_APPLICATION_HANDLER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+
+namespace webrtc {
+
+// Base class for application specific handler to check criteria for switch to
+// full-screen mode and find if possible the full-screen window to share.
+// Supposed to be created and owned by platform specific
+// FullScreenWindowDetector.
+class FullScreenApplicationHandler {
+ public:
+ virtual ~FullScreenApplicationHandler() {}
+
+ FullScreenApplicationHandler(const FullScreenApplicationHandler&) = delete;
+ FullScreenApplicationHandler& operator=(const FullScreenApplicationHandler&) =
+ delete;
+
+ explicit FullScreenApplicationHandler(DesktopCapturer::SourceId sourceId);
+
+ // Returns the full-screen window in place of the original window if all the
+ // criteria are met, or 0 if no such window found.
+ virtual DesktopCapturer::SourceId FindFullScreenWindow(
+ const DesktopCapturer::SourceList& window_list,
+ int64_t timestamp) const;
+
+ // Returns source id of original window associated with
+ // FullScreenApplicationHandler
+ DesktopCapturer::SourceId GetSourceId() const;
+
+ private:
+ const DesktopCapturer::SourceId source_id_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_FULL_SCREEN_APPLICATION_HANDLER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.cc b/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.cc
new file mode 100644
index 0000000000..d0bc9c7ca6
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/full_screen_window_detector.h"
+#include "modules/desktop_capture/full_screen_application_handler.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+FullScreenWindowDetector::FullScreenWindowDetector(
+ ApplicationHandlerFactory application_handler_factory)
+ : application_handler_factory_(application_handler_factory),
+ last_update_time_ms_(0),
+ previous_source_id_(0),
+ no_handler_source_id_(0) {}
+
+DesktopCapturer::SourceId FullScreenWindowDetector::FindFullScreenWindow(
+ DesktopCapturer::SourceId original_source_id) {
+ if (app_handler_ == nullptr ||
+ app_handler_->GetSourceId() != original_source_id) {
+ return 0;
+ }
+ return app_handler_->FindFullScreenWindow(window_list_, last_update_time_ms_);
+}
+
+void FullScreenWindowDetector::UpdateWindowListIfNeeded(
+ DesktopCapturer::SourceId original_source_id,
+ rtc::FunctionView<bool(DesktopCapturer::SourceList*)> get_sources) {
+ const bool skip_update = previous_source_id_ != original_source_id;
+ previous_source_id_ = original_source_id;
+
+ // Here is an attempt to avoid redundant creating application handler in case
+ // when an instance of WindowCapturer is used to generate a thumbnail to show
+ // in picker by calling SelectSource and CaptureFrame for every available
+ // source.
+ if (skip_update) {
+ return;
+ }
+
+ CreateApplicationHandlerIfNeeded(original_source_id);
+ if (app_handler_ == nullptr) {
+ // There is no FullScreenApplicationHandler specific for
+ // current application
+ return;
+ }
+
+ constexpr int64_t kUpdateIntervalMs = 500;
+
+ if ((rtc::TimeMillis() - last_update_time_ms_) <= kUpdateIntervalMs) {
+ return;
+ }
+
+ DesktopCapturer::SourceList window_list;
+ if (get_sources(&window_list)) {
+ last_update_time_ms_ = rtc::TimeMillis();
+ window_list_.swap(window_list);
+ }
+}
+
+void FullScreenWindowDetector::CreateApplicationHandlerIfNeeded(
+ DesktopCapturer::SourceId source_id) {
+ if (no_handler_source_id_ == source_id) {
+ return;
+ }
+
+ if (app_handler_ == nullptr || app_handler_->GetSourceId() != source_id) {
+ app_handler_ = application_handler_factory_
+ ? application_handler_factory_(source_id)
+ : nullptr;
+ }
+
+ if (app_handler_ == nullptr) {
+ no_handler_source_id_ = source_id;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.h b/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.h
new file mode 100644
index 0000000000..998b720d90
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/full_screen_window_detector.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_FULL_SCREEN_WINDOW_DETECTOR_H_
+#define MODULES_DESKTOP_CAPTURE_FULL_SCREEN_WINDOW_DETECTOR_H_
+
+#include <memory>
+
+#include "api/function_view.h"
+#include "api/ref_counted_base.h"
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/full_screen_application_handler.h"
+
+namespace webrtc {
+
+// This is a way to handle switch to full-screen mode for application in some
+// specific cases:
+// - Chrome on MacOS creates a new window in full-screen mode to
+// show a tab full-screen and minimizes the old window.
+// - PowerPoint creates new windows in full-screen mode when user goes to
+// presentation mode (Slide Show Window, Presentation Window).
+//
+// To continue capturing in these cases, we try to find the new full-screen
+// window using criteria provided by application specific
+// FullScreenApplicationHandler.
+
+class FullScreenWindowDetector
+ : public rtc::RefCountedNonVirtual<FullScreenWindowDetector> {
+ public:
+ using ApplicationHandlerFactory =
+ std::function<std::unique_ptr<FullScreenApplicationHandler>(
+ DesktopCapturer::SourceId sourceId)>;
+
+ FullScreenWindowDetector(
+ ApplicationHandlerFactory application_handler_factory);
+
+ FullScreenWindowDetector(const FullScreenWindowDetector&) = delete;
+ FullScreenWindowDetector& operator=(const FullScreenWindowDetector&) = delete;
+
+ // Returns the full-screen window in place of the original window if all the
+ // criteria provided by FullScreenApplicationHandler are met, or 0 if no such
+ // window found.
+ DesktopCapturer::SourceId FindFullScreenWindow(
+ DesktopCapturer::SourceId original_source_id);
+
+ // The caller should call this function periodically, implementation will
+ // update internal state no often than twice per second
+ void UpdateWindowListIfNeeded(
+ DesktopCapturer::SourceId original_source_id,
+ rtc::FunctionView<bool(DesktopCapturer::SourceList*)> get_sources);
+
+ static rtc::scoped_refptr<FullScreenWindowDetector>
+ CreateFullScreenWindowDetector();
+
+ protected:
+ std::unique_ptr<FullScreenApplicationHandler> app_handler_;
+
+ private:
+ void CreateApplicationHandlerIfNeeded(DesktopCapturer::SourceId source_id);
+
+ ApplicationHandlerFactory application_handler_factory_;
+
+ int64_t last_update_time_ms_;
+ DesktopCapturer::SourceId previous_source_id_;
+
+ // Save the source id when we fail to create an instance of
+ // CreateApplicationHandlerIfNeeded to avoid redundant attempt to do it again.
+ DesktopCapturer::SourceId no_handler_source_id_;
+
+ DesktopCapturer::SourceList window_list_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_FULL_SCREEN_WINDOW_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.cc
new file mode 100644
index 0000000000..e9158bf0cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.cc
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/base_capturer_pipewire.h"
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/linux/wayland/restore_token_manager.h"
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/random.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+using xdg_portal::RequestResponse;
+using xdg_portal::ScreenCapturePortalInterface;
+using xdg_portal::SessionDetails;
+
+} // namespace
+
+BaseCapturerPipeWire::BaseCapturerPipeWire(const DesktopCaptureOptions& options)
+ : BaseCapturerPipeWire(
+ options,
+ std::make_unique<ScreenCastPortal>(
+ ScreenCastPortal::CaptureSourceType::kAnyScreenContent,
+ this)) {
+ is_screencast_portal_ = true;
+}
+
+BaseCapturerPipeWire::BaseCapturerPipeWire(
+ const DesktopCaptureOptions& options,
+ std::unique_ptr<ScreenCapturePortalInterface> portal)
+ : options_(options),
+ is_screencast_portal_(false),
+ portal_(std::move(portal)) {
+ Random random(rtc::TimeMicros());
+ source_id_ = static_cast<SourceId>(random.Rand(1, INT_MAX));
+}
+
+BaseCapturerPipeWire::~BaseCapturerPipeWire() {}
+
+void BaseCapturerPipeWire::OnScreenCastRequestResult(RequestResponse result,
+ uint32_t stream_node_id,
+ int fd) {
+ if (result != RequestResponse::kSuccess ||
+ !options_.screencast_stream()->StartScreenCastStream(
+ stream_node_id, fd, options_.get_width(), options_.get_height())) {
+ capturer_failed_ = true;
+ RTC_LOG(LS_ERROR) << "ScreenCastPortal failed: "
+ << static_cast<uint>(result);
+ } else if (ScreenCastPortal* screencast_portal = GetScreenCastPortal()) {
+ if (!screencast_portal->RestoreToken().empty()) {
+ RestoreTokenManager::GetInstance().AddToken(
+ source_id_, screencast_portal->RestoreToken());
+ }
+ }
+}
+
+void BaseCapturerPipeWire::OnScreenCastSessionClosed() {
+ if (!capturer_failed_) {
+ options_.screencast_stream()->StopScreenCastStream();
+ }
+}
+
+void BaseCapturerPipeWire::UpdateResolution(uint32_t width, uint32_t height) {
+ if (!capturer_failed_) {
+ options_.screencast_stream()->UpdateScreenCastStreamResolution(width,
+ height);
+ }
+}
+
+void BaseCapturerPipeWire::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+
+ if (ScreenCastPortal* screencast_portal = GetScreenCastPortal()) {
+ screencast_portal->SetPersistMode(
+ ScreenCastPortal::PersistMode::kTransient);
+ if (selected_source_id_) {
+ screencast_portal->SetRestoreToken(
+ RestoreTokenManager::GetInstance().TakeToken(selected_source_id_));
+ }
+ }
+
+ portal_->Start();
+}
+
+void BaseCapturerPipeWire::CaptureFrame() {
+ if (capturer_failed_) {
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> frame =
+ options_.screencast_stream()->CaptureFrame();
+
+ if (!frame || !frame->data()) {
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ // TODO(julien.isorce): http://crbug.com/945468. Set the icc profile on
+ // the frame, see ScreenCapturerX11::CaptureFrame.
+
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+}
+
+// Keep in sync with defines at browser/actors/WebRTCParent.jsm
+// With PipeWire we can't select which system resource is shared so
+// we don't create a window/screen list. Instead we place these constants
+// as window name/id so frontend code can identify PipeWire backend
+// and does not try to create screen/window preview.
+
+#define PIPEWIRE_ID 0xaffffff
+#define PIPEWIRE_NAME "####_PIPEWIRE_PORTAL_####"
+
+bool BaseCapturerPipeWire::GetSourceList(SourceList* sources) {
+ RTC_DCHECK(sources->size() == 0);
+ // List of available screens is already presented by the xdg-desktop-portal,
+ // so we just need a (valid) source id for any callers to pass around, even
+ // though it doesn't mean anything to us. Until the user selects a source in
+ // xdg-desktop-portal we'll just end up returning empty frames. Note that "0"
+ // is often treated as a null/placeholder id, so we shouldn't use that.
+ // TODO(https://crbug.com/1297671): Reconsider type of ID when plumbing
+ // token that will enable stream re-use.
+ sources->push_back({source_id_});
+ return true;
+}
+
+bool BaseCapturerPipeWire::SelectSource(SourceId id) {
+ // Screen selection is handled by the xdg-desktop-portal.
+ selected_source_id_ = id;
+ return id == PIPEWIRE_ID;
+}
+
+SessionDetails BaseCapturerPipeWire::GetSessionDetails() {
+ return portal_->GetSessionDetails();
+}
+
+ScreenCastPortal* BaseCapturerPipeWire::GetScreenCastPortal() {
+ return is_screencast_portal_ ? static_cast<ScreenCastPortal*>(portal_.get())
+ : nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.h
new file mode 100644
index 0000000000..6e18f4d11b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/base_capturer_pipewire.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_BASE_CAPTURER_PIPEWIRE_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_BASE_CAPTURER_PIPEWIRE_H_
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/linux/wayland/portal_request_response.h"
+#include "modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h"
+#include "modules/desktop_capture/linux/wayland/screencast_portal.h"
+#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h"
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+#include "modules/desktop_capture/linux/wayland/xdg_session_details.h"
+
+namespace webrtc {
+
+class BaseCapturerPipeWire : public DesktopCapturer,
+ public ScreenCastPortal::PortalNotifier {
+ public:
+ explicit BaseCapturerPipeWire(const DesktopCaptureOptions& options);
+ BaseCapturerPipeWire(
+ const DesktopCaptureOptions& options,
+ std::unique_ptr<xdg_portal::ScreenCapturePortalInterface> portal);
+ ~BaseCapturerPipeWire() override;
+
+ BaseCapturerPipeWire(const BaseCapturerPipeWire&) = delete;
+ BaseCapturerPipeWire& operator=(const BaseCapturerPipeWire&) = delete;
+
+ // DesktopCapturer interface.
+ void Start(Callback* delegate) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+
+ // ScreenCastPortal::PortalNotifier interface.
+ void OnScreenCastRequestResult(xdg_portal::RequestResponse result,
+ uint32_t stream_node_id,
+ int fd) override;
+ void OnScreenCastSessionClosed() override;
+ void UpdateResolution(uint32_t width, uint32_t height) override;
+
+ xdg_portal::SessionDetails GetSessionDetails();
+
+ private:
+ ScreenCastPortal* GetScreenCastPortal();
+
+ DesktopCaptureOptions options_ = {};
+ Callback* callback_ = nullptr;
+ bool capturer_failed_ = false;
+ bool is_screencast_portal_ = false;
+
+ // SourceId that is selected using SelectSource() and that we previously
+ // returned in GetSourceList(). This should be a SourceId that has a restore
+ // token associated with it and can be restored if we have required version
+ // of xdg-desktop-portal.
+ SourceId selected_source_id_ = 0;
+ // SourceID we randomly generate and that is returned in GetSourceList() as
+ // available source that will later get assigned to a restore token in order
+ // to be restored later using SelectSource().
+ SourceId source_id_ = 0;
+ std::unique_ptr<xdg_portal::ScreenCapturePortalInterface> portal_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_BASE_CAPTURER_PIPEWIRE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/drm.sigs b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/drm.sigs
new file mode 100644
index 0000000000..226979fe16
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/drm.sigs
@@ -0,0 +1,11 @@
+// Copyright 2021 The WebRTC project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//------------------------------------------------
+// Functions from DRM used in capturer code.
+//--------
+
+// xf86drm.h
+int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices);
+void drmFreeDevices(drmDevicePtr devices[], int count);
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.cc
new file mode 100644
index 0000000000..5bbd5d7aba
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.cc
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/egl_dmabuf.h"
+
+#include <asm/ioctl.h>
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <libdrm/drm_fourcc.h>
+#include <linux/types.h>
+#include <spa/param/video/format-utils.h>
+#include <unistd.h>
+#include <xf86drm.h>
+
+#include "absl/memory/memory.h"
+#include "absl/types/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/sanitizer.h"
+#include "rtc_base/string_encode.h"
+
+namespace webrtc {
+
+// EGL
+typedef EGLBoolean (*eglBindAPI_func)(EGLenum api);
+typedef EGLContext (*eglCreateContext_func)(EGLDisplay dpy,
+ EGLConfig config,
+ EGLContext share_context,
+ const EGLint* attrib_list);
+typedef EGLBoolean (*eglDestroyContext_func)(EGLDisplay display,
+ EGLContext context);
+typedef EGLBoolean (*eglTerminate_func)(EGLDisplay display);
+typedef EGLImageKHR (*eglCreateImageKHR_func)(EGLDisplay dpy,
+ EGLContext ctx,
+ EGLenum target,
+ EGLClientBuffer buffer,
+ const EGLint* attrib_list);
+typedef EGLBoolean (*eglDestroyImageKHR_func)(EGLDisplay dpy,
+ EGLImageKHR image);
+typedef EGLint (*eglGetError_func)(void);
+typedef void* (*eglGetProcAddress_func)(const char*);
+typedef EGLDisplay (*eglGetPlatformDisplayEXT_func)(EGLenum platform,
+ void* native_display,
+ const EGLint* attrib_list);
+typedef EGLDisplay (*eglGetPlatformDisplay_func)(EGLenum platform,
+ void* native_display,
+ const EGLAttrib* attrib_list);
+
+typedef EGLBoolean (*eglInitialize_func)(EGLDisplay dpy,
+ EGLint* major,
+ EGLint* minor);
+typedef EGLBoolean (*eglMakeCurrent_func)(EGLDisplay dpy,
+ EGLSurface draw,
+ EGLSurface read,
+ EGLContext ctx);
+typedef EGLBoolean (*eglQueryDmaBufFormatsEXT_func)(EGLDisplay dpy,
+ EGLint max_formats,
+ EGLint* formats,
+ EGLint* num_formats);
+typedef EGLBoolean (*eglQueryDmaBufModifiersEXT_func)(EGLDisplay dpy,
+ EGLint format,
+ EGLint max_modifiers,
+ EGLuint64KHR* modifiers,
+ EGLBoolean* external_only,
+ EGLint* num_modifiers);
+typedef const char* (*eglQueryString_func)(EGLDisplay dpy, EGLint name);
+typedef void (*glEGLImageTargetTexture2DOES_func)(GLenum target,
+ GLeglImageOES image);
+
+// This doesn't follow naming conventions in WebRTC, where the naming
+// should look like e.g. egl_bind_api instead of EglBindAPI, however
+// we named them according to the exported functions they map to for
+// consistency.
+eglBindAPI_func EglBindAPI = nullptr;
+eglCreateContext_func EglCreateContext = nullptr;
+eglDestroyContext_func EglDestroyContext = nullptr;
+eglTerminate_func EglTerminate = nullptr;
+eglCreateImageKHR_func EglCreateImageKHR = nullptr;
+eglDestroyImageKHR_func EglDestroyImageKHR = nullptr;
+eglGetError_func EglGetError = nullptr;
+eglGetProcAddress_func EglGetProcAddress = nullptr;
+eglGetPlatformDisplayEXT_func EglGetPlatformDisplayEXT = nullptr;
+eglGetPlatformDisplay_func EglGetPlatformDisplay = nullptr;
+eglInitialize_func EglInitialize = nullptr;
+eglMakeCurrent_func EglMakeCurrent = nullptr;
+eglQueryDmaBufFormatsEXT_func EglQueryDmaBufFormatsEXT = nullptr;
+eglQueryDmaBufModifiersEXT_func EglQueryDmaBufModifiersEXT = nullptr;
+eglQueryString_func EglQueryString = nullptr;
+glEGLImageTargetTexture2DOES_func GlEGLImageTargetTexture2DOES = nullptr;
+
+// GL
+typedef void (*glBindTexture_func)(GLenum target, GLuint texture);
+typedef void (*glDeleteTextures_func)(GLsizei n, const GLuint* textures);
+typedef void (*glGenTextures_func)(GLsizei n, GLuint* textures);
+typedef GLenum (*glGetError_func)(void);
+typedef const GLubyte* (*glGetString_func)(GLenum name);
+typedef void (*glGetTexImage_func)(GLenum target,
+ GLint level,
+ GLenum format,
+ GLenum type,
+ void* pixels);
+typedef void (*glTexParameteri_func)(GLenum target, GLenum pname, GLint param);
+typedef void* (*glXGetProcAddressARB_func)(const char*);
+
+// This doesn't follow naming conventions in WebRTC, where the naming
+// should look like e.g. egl_bind_api instead of EglBindAPI, however
+// we named them according to the exported functions they map to for
+// consistency.
+glBindTexture_func GlBindTexture = nullptr;
+glDeleteTextures_func GlDeleteTextures = nullptr;
+glGenTextures_func GlGenTextures = nullptr;
+glGetError_func GlGetError = nullptr;
+glGetString_func GlGetString = nullptr;
+glGetTexImage_func GlGetTexImage = nullptr;
+glTexParameteri_func GlTexParameteri = nullptr;
+glXGetProcAddressARB_func GlXGetProcAddressARB = nullptr;
+
+static const std::string FormatGLError(GLenum err) {
+ switch (err) {
+ case GL_NO_ERROR:
+ return "GL_NO_ERROR";
+ case GL_INVALID_ENUM:
+ return "GL_INVALID_ENUM";
+ case GL_INVALID_VALUE:
+ return "GL_INVALID_VALUE";
+ case GL_INVALID_OPERATION:
+ return "GL_INVALID_OPERATION";
+ case GL_STACK_OVERFLOW:
+ return "GL_STACK_OVERFLOW";
+ case GL_STACK_UNDERFLOW:
+ return "GL_STACK_UNDERFLOW";
+ case GL_OUT_OF_MEMORY:
+ return "GL_OUT_OF_MEMORY";
+ default:
+ return "GL error code: " + std::to_string(err);
+ }
+}
+
+static const std::string FormatEGLError(EGLint err) {
+ switch (err) {
+ case EGL_NOT_INITIALIZED:
+ return "EGL_NOT_INITIALIZED";
+ case EGL_BAD_ACCESS:
+ return "EGL_BAD_ACCESS";
+ case EGL_BAD_ALLOC:
+ return "EGL_BAD_ALLOC";
+ case EGL_BAD_ATTRIBUTE:
+ return "EGL_BAD_ATTRIBUTE";
+ case EGL_BAD_CONTEXT:
+ return "EGL_BAD_CONTEXT";
+ case EGL_BAD_CONFIG:
+ return "EGL_BAD_CONFIG";
+ case EGL_BAD_CURRENT_SURFACE:
+ return "EGL_BAD_CURRENT_SURFACE";
+ case EGL_BAD_DISPLAY:
+ return "EGL_BAD_DISPLAY";
+ case EGL_BAD_SURFACE:
+ return "EGL_BAD_SURFACE";
+ case EGL_BAD_MATCH:
+ return "EGL_BAD_MATCH";
+ case EGL_BAD_PARAMETER:
+ return "EGL_BAD_PARAMETER";
+ case EGL_BAD_NATIVE_PIXMAP:
+ return "EGL_BAD_NATIVE_PIXMAP";
+ case EGL_BAD_NATIVE_WINDOW:
+ return "EGL_BAD_NATIVE_WINDOW";
+ case EGL_CONTEXT_LOST:
+ return "EGL_CONTEXT_LOST";
+ default:
+ return "EGL error code: " + std::to_string(err);
+ }
+}
+
+static uint32_t SpaPixelFormatToDrmFormat(uint32_t spa_format) {
+ switch (spa_format) {
+ case SPA_VIDEO_FORMAT_RGBA:
+ return DRM_FORMAT_ABGR8888;
+ case SPA_VIDEO_FORMAT_RGBx:
+ return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGRA:
+ return DRM_FORMAT_ARGB8888;
+ case SPA_VIDEO_FORMAT_BGRx:
+ return DRM_FORMAT_XRGB8888;
+ default:
+ return DRM_FORMAT_INVALID;
+ }
+}
+
+static void CloseLibrary(void* library) {
+ if (library) {
+ dlclose(library);
+ library = nullptr;
+ }
+}
+
+static void* g_lib_egl = nullptr;
+
+RTC_NO_SANITIZE("cfi-icall")
+static bool OpenEGL() {
+ g_lib_egl = dlopen("libEGL.so.1", RTLD_NOW | RTLD_GLOBAL);
+ if (g_lib_egl) {
+ EglGetProcAddress =
+ (eglGetProcAddress_func)dlsym(g_lib_egl, "eglGetProcAddress");
+ return EglGetProcAddress;
+ }
+
+ return false;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+static bool LoadEGL() {
+ if (OpenEGL()) {
+ EglBindAPI = (eglBindAPI_func)EglGetProcAddress("eglBindAPI");
+ EglCreateContext =
+ (eglCreateContext_func)EglGetProcAddress("eglCreateContext");
+ EglDestroyContext =
+ (eglDestroyContext_func)EglGetProcAddress("eglDestroyContext");
+ EglTerminate = (eglTerminate_func)EglGetProcAddress("eglTerminate");
+ EglCreateImageKHR =
+ (eglCreateImageKHR_func)EglGetProcAddress("eglCreateImageKHR");
+ EglDestroyImageKHR =
+ (eglDestroyImageKHR_func)EglGetProcAddress("eglDestroyImageKHR");
+ EglGetError = (eglGetError_func)EglGetProcAddress("eglGetError");
+ EglGetPlatformDisplayEXT = (eglGetPlatformDisplayEXT_func)EglGetProcAddress(
+ "eglGetPlatformDisplayEXT");
+ EglGetPlatformDisplay =
+ (eglGetPlatformDisplay_func)EglGetProcAddress("eglGetPlatformDisplay");
+ EglInitialize = (eglInitialize_func)EglGetProcAddress("eglInitialize");
+ EglMakeCurrent = (eglMakeCurrent_func)EglGetProcAddress("eglMakeCurrent");
+ EglQueryString = (eglQueryString_func)EglGetProcAddress("eglQueryString");
+ GlEGLImageTargetTexture2DOES =
+ (glEGLImageTargetTexture2DOES_func)EglGetProcAddress(
+ "glEGLImageTargetTexture2DOES");
+
+ return EglBindAPI && EglCreateContext && EglCreateImageKHR &&
+ EglTerminate && EglDestroyContext && EglDestroyImageKHR &&
+ EglGetError && EglGetPlatformDisplayEXT && EglGetPlatformDisplay &&
+ EglInitialize && EglMakeCurrent && EglQueryString &&
+ GlEGLImageTargetTexture2DOES;
+ }
+
+ return false;
+}
+
+static void* g_lib_gl = nullptr;
+
+RTC_NO_SANITIZE("cfi-icall")
+static bool OpenGL() {
+ std::vector<std::string> names = {"libGL.so.1", "libGL.so"};
+ for (const std::string& name : names) {
+ g_lib_gl = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
+ if (g_lib_gl) {
+ GlXGetProcAddressARB =
+ (glXGetProcAddressARB_func)dlsym(g_lib_gl, "glXGetProcAddressARB");
+ return GlXGetProcAddressARB;
+ }
+ }
+
+ return false;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+static bool LoadGL() {
+ if (OpenGL()) {
+ GlGetString = (glGetString_func)GlXGetProcAddressARB("glGetString");
+ if (!GlGetString) {
+ return false;
+ }
+
+ GlBindTexture = (glBindTexture_func)GlXGetProcAddressARB("glBindTexture");
+ GlDeleteTextures =
+ (glDeleteTextures_func)GlXGetProcAddressARB("glDeleteTextures");
+ GlGenTextures = (glGenTextures_func)GlXGetProcAddressARB("glGenTextures");
+ GlGetError = (glGetError_func)GlXGetProcAddressARB("glGetError");
+ GlGetTexImage = (glGetTexImage_func)GlXGetProcAddressARB("glGetTexImage");
+ GlTexParameteri =
+ (glTexParameteri_func)GlXGetProcAddressARB("glTexParameteri");
+
+ return GlBindTexture && GlDeleteTextures && GlGenTextures && GlGetError &&
+ GlGetTexImage && GlTexParameteri;
+ }
+
+ return false;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+EglDmaBuf::EglDmaBuf() {
+ if (!LoadEGL()) {
+ RTC_LOG(LS_ERROR) << "Unable to load EGL entry functions.";
+ CloseLibrary(g_lib_egl);
+ return;
+ }
+
+ if (!LoadGL()) {
+ RTC_LOG(LS_ERROR) << "Failed to load OpenGL entry functions.";
+ CloseLibrary(g_lib_gl);
+ return;
+ }
+
+ if (!GetClientExtensions(EGL_NO_DISPLAY, EGL_EXTENSIONS)) {
+ return;
+ }
+
+ bool has_platform_base_ext = false;
+ bool has_platform_gbm_ext = false;
+ bool has_khr_platform_gbm_ext = false;
+
+ for (const auto& extension : egl_.extensions) {
+ if (extension == "EGL_EXT_platform_base") {
+ has_platform_base_ext = true;
+ continue;
+ } else if (extension == "EGL_MESA_platform_gbm") {
+ has_platform_gbm_ext = true;
+ continue;
+ } else if (extension == "EGL_KHR_platform_gbm") {
+ has_khr_platform_gbm_ext = true;
+ continue;
+ }
+ }
+
+ if (!has_platform_base_ext || !has_platform_gbm_ext ||
+ !has_khr_platform_gbm_ext) {
+ RTC_LOG(LS_ERROR) << "One of required EGL extensions is missing";
+ return;
+ }
+
+ egl_.display = EglGetPlatformDisplay(EGL_PLATFORM_WAYLAND_KHR,
+ (void*)EGL_DEFAULT_DISPLAY, nullptr);
+
+ if (egl_.display == EGL_NO_DISPLAY) {
+ RTC_LOG(LS_ERROR) << "Failed to obtain default EGL display: "
+ << FormatEGLError(EglGetError()) << "\n"
+ << "Defaulting to using first available render node";
+ absl::optional<std::string> render_node = GetRenderNode();
+ if (!render_node) {
+ return;
+ }
+
+ drm_fd_ = open(render_node->c_str(), O_RDWR);
+
+ if (drm_fd_ < 0) {
+ RTC_LOG(LS_ERROR) << "Failed to open drm render node: "
+ << strerror(errno);
+ return;
+ }
+
+ gbm_device_ = gbm_create_device(drm_fd_);
+
+ if (!gbm_device_) {
+ RTC_LOG(LS_ERROR) << "Cannot create GBM device: " << strerror(errno);
+ close(drm_fd_);
+ return;
+ }
+
+ // Use eglGetPlatformDisplayEXT() to get the display pointer
+ // if the implementation supports it.
+ egl_.display =
+ EglGetPlatformDisplayEXT(EGL_PLATFORM_GBM_KHR, gbm_device_, nullptr);
+ }
+
+ if (egl_.display == EGL_NO_DISPLAY) {
+ RTC_LOG(LS_ERROR) << "Error during obtaining EGL display: "
+ << FormatEGLError(EglGetError());
+ return;
+ }
+
+ EGLint major, minor;
+ if (EglInitialize(egl_.display, &major, &minor) == EGL_FALSE) {
+ RTC_LOG(LS_ERROR) << "Error during eglInitialize: "
+ << FormatEGLError(EglGetError());
+ return;
+ }
+
+ if (EglBindAPI(EGL_OPENGL_API) == EGL_FALSE) {
+ RTC_LOG(LS_ERROR) << "bind OpenGL API failed";
+ return;
+ }
+
+ egl_.context =
+ EglCreateContext(egl_.display, nullptr, EGL_NO_CONTEXT, nullptr);
+
+ if (egl_.context == EGL_NO_CONTEXT) {
+ RTC_LOG(LS_ERROR) << "Couldn't create EGL context: "
+ << FormatGLError(EglGetError());
+ return;
+ }
+
+ if (!GetClientExtensions(egl_.display, EGL_EXTENSIONS)) {
+ return;
+ }
+
+ bool has_image_dma_buf_import_modifiers_ext = false;
+
+ for (const auto& extension : egl_.extensions) {
+ if (extension == "EGL_EXT_image_dma_buf_import") {
+ has_image_dma_buf_import_ext_ = true;
+ continue;
+ } else if (extension == "EGL_EXT_image_dma_buf_import_modifiers") {
+ has_image_dma_buf_import_modifiers_ext = true;
+ continue;
+ }
+ }
+
+ if (has_image_dma_buf_import_ext_ && has_image_dma_buf_import_modifiers_ext) {
+ EglQueryDmaBufFormatsEXT = (eglQueryDmaBufFormatsEXT_func)EglGetProcAddress(
+ "eglQueryDmaBufFormatsEXT");
+ EglQueryDmaBufModifiersEXT =
+ (eglQueryDmaBufModifiersEXT_func)EglGetProcAddress(
+ "eglQueryDmaBufModifiersEXT");
+ }
+
+ RTC_LOG(LS_INFO) << "Egl initialization succeeded";
+ egl_initialized_ = true;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+EglDmaBuf::~EglDmaBuf() {
+ if (gbm_device_) {
+ gbm_device_destroy(gbm_device_);
+ close(drm_fd_);
+ }
+
+ if (egl_.context != EGL_NO_CONTEXT) {
+ EglDestroyContext(egl_.display, egl_.context);
+ }
+
+ if (egl_.display != EGL_NO_DISPLAY) {
+ EglTerminate(egl_.display);
+ }
+
+ // BUG: crbug.com/1290566
+ // Closing libEGL.so.1 when using NVidia drivers causes a crash
+ // when EglGetPlatformDisplayEXT() is used, at least this one is enough
+ // to be called to make it crash.
+ // It also looks that libepoxy and glad don't dlclose it either
+ // CloseLibrary(g_lib_egl);
+ // CloseLibrary(g_lib_gl);
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+bool EglDmaBuf::GetClientExtensions(EGLDisplay dpy, EGLint name) {
+ // Get the list of client extensions
+ const char* client_extensions_cstring = EglQueryString(dpy, name);
+ if (!client_extensions_cstring) {
+ // If eglQueryString() returned NULL, the implementation doesn't support
+ // EGL_EXT_client_extensions. Expect an EGL_BAD_DISPLAY error.
+ RTC_LOG(LS_ERROR) << "No client extensions defined! "
+ << FormatEGLError(EglGetError());
+ return false;
+ }
+
+ std::vector<absl::string_view> client_extensions =
+ rtc::split(client_extensions_cstring, ' ');
+ for (const auto& extension : client_extensions) {
+ egl_.extensions.push_back(std::string(extension));
+ }
+
+ return true;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+std::unique_ptr<uint8_t[]> EglDmaBuf::ImageFromDmaBuf(
+ const DesktopSize& size,
+ uint32_t format,
+ const std::vector<PlaneData>& plane_datas,
+ uint64_t modifier) {
+ std::unique_ptr<uint8_t[]> src;
+
+ if (!egl_initialized_) {
+ return src;
+ }
+
+ if (plane_datas.size() <= 0) {
+ RTC_LOG(LS_ERROR) << "Failed to process buffer: invalid number of planes";
+ return src;
+ }
+
+ EGLint attribs[47];
+ int atti = 0;
+
+ attribs[atti++] = EGL_WIDTH;
+ attribs[atti++] = static_cast<EGLint>(size.width());
+ attribs[atti++] = EGL_HEIGHT;
+ attribs[atti++] = static_cast<EGLint>(size.height());
+ attribs[atti++] = EGL_LINUX_DRM_FOURCC_EXT;
+ attribs[atti++] = SpaPixelFormatToDrmFormat(format);
+
+ if (plane_datas.size() > 0) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE0_FD_EXT;
+ attribs[atti++] = plane_datas[0].fd;
+ attribs[atti++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
+ attribs[atti++] = plane_datas[0].offset;
+ attribs[atti++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
+ attribs[atti++] = plane_datas[0].stride;
+
+ if (modifier != DRM_FORMAT_MOD_INVALID) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
+ attribs[atti++] = modifier & 0xFFFFFFFF;
+ attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
+ attribs[atti++] = modifier >> 32;
+ }
+ }
+
+ if (plane_datas.size() > 1) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE1_FD_EXT;
+ attribs[atti++] = plane_datas[1].fd;
+ attribs[atti++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT;
+ attribs[atti++] = plane_datas[1].offset;
+ attribs[atti++] = EGL_DMA_BUF_PLANE1_PITCH_EXT;
+ attribs[atti++] = plane_datas[1].stride;
+
+ if (modifier != DRM_FORMAT_MOD_INVALID) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT;
+ attribs[atti++] = modifier & 0xFFFFFFFF;
+ attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT;
+ attribs[atti++] = modifier >> 32;
+ }
+ }
+
+ if (plane_datas.size() > 2) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE2_FD_EXT;
+ attribs[atti++] = plane_datas[2].fd;
+ attribs[atti++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT;
+ attribs[atti++] = plane_datas[2].offset;
+ attribs[atti++] = EGL_DMA_BUF_PLANE2_PITCH_EXT;
+ attribs[atti++] = plane_datas[2].stride;
+
+ if (modifier != DRM_FORMAT_MOD_INVALID) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT;
+ attribs[atti++] = modifier & 0xFFFFFFFF;
+ attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT;
+ attribs[atti++] = modifier >> 32;
+ }
+ }
+
+ if (plane_datas.size() > 3) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE3_FD_EXT;
+ attribs[atti++] = plane_datas[3].fd;
+ attribs[atti++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
+ attribs[atti++] = plane_datas[3].offset;
+ attribs[atti++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
+ attribs[atti++] = plane_datas[3].stride;
+
+ if (modifier != DRM_FORMAT_MOD_INVALID) {
+ attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT;
+ attribs[atti++] = modifier & 0xFFFFFFFF;
+ attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT;
+ attribs[atti++] = modifier >> 32;
+ }
+ }
+
+ attribs[atti++] = EGL_NONE;
+
+ // bind context to render thread
+ EglMakeCurrent(egl_.display, EGL_NO_SURFACE, EGL_NO_SURFACE, egl_.context);
+
+ // create EGL image from attribute list
+ EGLImageKHR image = EglCreateImageKHR(
+ egl_.display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, nullptr, attribs);
+
+ if (image == EGL_NO_IMAGE) {
+ RTC_LOG(LS_ERROR) << "Failed to record frame: Error creating EGLImage - "
+ << FormatEGLError(EglGetError());
+ return src;
+ }
+
+ // create GL 2D texture for framebuffer
+ GLuint texture;
+ GlGenTextures(1, &texture);
+ GlTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ GlTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ GlTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ GlTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ GlBindTexture(GL_TEXTURE_2D, texture);
+ GlEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+ src = std::make_unique<uint8_t[]>(plane_datas[0].stride * size.height());
+
+ GLenum gl_format = GL_BGRA;
+ switch (format) {
+ case SPA_VIDEO_FORMAT_RGBx:
+ gl_format = GL_RGBA;
+ break;
+ case SPA_VIDEO_FORMAT_RGBA:
+ gl_format = GL_RGBA;
+ break;
+ case SPA_VIDEO_FORMAT_BGRx:
+ gl_format = GL_BGRA;
+ break;
+ default:
+ gl_format = GL_BGRA;
+ break;
+ }
+ GlGetTexImage(GL_TEXTURE_2D, 0, gl_format, GL_UNSIGNED_BYTE, src.get());
+
+ if (GlGetError()) {
+ RTC_LOG(LS_ERROR) << "Failed to get image from DMA buffer.";
+ return src;
+ }
+
+ GlDeleteTextures(1, &texture);
+ EglDestroyImageKHR(egl_.display, image);
+
+ return src;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+std::vector<uint64_t> EglDmaBuf::QueryDmaBufModifiers(uint32_t format) {
+ if (!egl_initialized_) {
+ return {};
+ }
+
+ // Explicit modifiers not supported, return just DRM_FORMAT_MOD_INVALID as we
+ // can still use modifier-less DMA-BUFs if we have required extension
+ if (EglQueryDmaBufFormatsEXT == nullptr ||
+ EglQueryDmaBufModifiersEXT == nullptr) {
+ return has_image_dma_buf_import_ext_
+ ? std::vector<uint64_t>{DRM_FORMAT_MOD_INVALID}
+ : std::vector<uint64_t>{};
+ }
+
+ uint32_t drm_format = SpaPixelFormatToDrmFormat(format);
+ // Should never happen as it's us who controls the list of supported formats
+ RTC_DCHECK(drm_format != DRM_FORMAT_INVALID);
+
+ EGLint count = 0;
+ EGLBoolean success =
+ EglQueryDmaBufFormatsEXT(egl_.display, 0, nullptr, &count);
+
+ if (!success || !count) {
+ RTC_LOG(LS_WARNING) << "Cannot query the number of formats.";
+ return {DRM_FORMAT_MOD_INVALID};
+ }
+
+ std::vector<uint32_t> formats(count);
+ if (!EglQueryDmaBufFormatsEXT(egl_.display, count,
+ reinterpret_cast<EGLint*>(formats.data()),
+ &count)) {
+ RTC_LOG(LS_WARNING) << "Cannot query a list of formats.";
+ return {DRM_FORMAT_MOD_INVALID};
+ }
+
+ if (std::find(formats.begin(), formats.end(), drm_format) == formats.end()) {
+ RTC_LOG(LS_WARNING) << "Format " << drm_format
+ << " not supported for modifiers.";
+ return {DRM_FORMAT_MOD_INVALID};
+ }
+
+ success = EglQueryDmaBufModifiersEXT(egl_.display, drm_format, 0, nullptr,
+ nullptr, &count);
+
+ if (!success || !count) {
+ RTC_LOG(LS_WARNING) << "Cannot query the number of modifiers.";
+ return {DRM_FORMAT_MOD_INVALID};
+ }
+
+ std::vector<uint64_t> modifiers(count);
+ if (!EglQueryDmaBufModifiersEXT(egl_.display, drm_format, count,
+ modifiers.data(), nullptr, &count)) {
+ RTC_LOG(LS_WARNING) << "Cannot query a list of modifiers.";
+ }
+
+ // Support modifier-less buffers
+ modifiers.push_back(DRM_FORMAT_MOD_INVALID);
+ return modifiers;
+}
+
+absl::optional<std::string> EglDmaBuf::GetRenderNode() {
+ int max_devices = drmGetDevices2(0, nullptr, 0);
+ if (max_devices <= 0) {
+ RTC_LOG(LS_ERROR) << "drmGetDevices2() has not found any devices (errno="
+ << -max_devices << ")";
+ return absl::nullopt;
+ }
+
+ std::vector<drmDevicePtr> devices(max_devices);
+ int ret = drmGetDevices2(0, devices.data(), max_devices);
+ if (ret < 0) {
+ RTC_LOG(LS_ERROR) << "drmGetDevices2() returned an error " << ret;
+ return absl::nullopt;
+ }
+
+ std::string render_node;
+
+ for (const drmDevicePtr& device : devices) {
+ if (device->available_nodes & (1 << DRM_NODE_RENDER)) {
+ render_node = device->nodes[DRM_NODE_RENDER];
+ break;
+ }
+ }
+
+ drmFreeDevices(devices.data(), ret);
+ return render_node;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.h
new file mode 100644
index 0000000000..f1d96b2f80
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/egl_dmabuf.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_EGL_DMABUF_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_EGL_DMABUF_H_
+
+#include <epoxy/egl.h>
+#include <epoxy/gl.h>
+#include <gbm.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+class EglDmaBuf {
+ public:
+ struct EGLStruct {
+ std::vector<std::string> extensions;
+ EGLDisplay display = EGL_NO_DISPLAY;
+ EGLContext context = EGL_NO_CONTEXT;
+ };
+
+ struct PlaneData {
+ int32_t fd;
+ uint32_t stride;
+ uint32_t offset;
+ };
+
+ EglDmaBuf();
+ ~EglDmaBuf();
+
+ std::unique_ptr<uint8_t[]> ImageFromDmaBuf(
+ const DesktopSize& size,
+ uint32_t format,
+ const std::vector<PlaneData>& plane_datas,
+ uint64_t modifiers);
+ std::vector<uint64_t> QueryDmaBufModifiers(uint32_t format);
+
+ bool IsEglInitialized() const { return egl_initialized_; }
+
+ private:
+ bool GetClientExtensions(EGLDisplay dpy, EGLint name);
+
+ bool egl_initialized_ = false;
+ bool has_image_dma_buf_import_ext_ = false;
+ int32_t drm_fd_ = -1; // for GBM buffer mmap
+ gbm_device* gbm_device_ = nullptr; // for passed GBM buffer retrieval
+
+ EGLStruct egl_;
+
+ absl::optional<std::string> GetRenderNode();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_EGL_DMABUF_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.cc
new file mode 100644
index 0000000000..3d33b0fbb8
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.h"
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+MouseCursorMonitorPipeWire::MouseCursorMonitorPipeWire(
+ const DesktopCaptureOptions& options)
+ : options_(options) {
+ sequence_checker_.Detach();
+}
+
+MouseCursorMonitorPipeWire::~MouseCursorMonitorPipeWire() {}
+
+void MouseCursorMonitorPipeWire::Init(Callback* callback, Mode mode) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+ mode_ = mode;
+}
+
+void MouseCursorMonitorPipeWire::Capture() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(callback_);
+
+ std::unique_ptr<MouseCursor> mouse_cursor =
+ options_.screencast_stream()->CaptureCursor();
+
+ if (mouse_cursor && mouse_cursor->image()->data()) {
+ callback_->OnMouseCursor(mouse_cursor.release());
+ }
+
+ if (mode_ == SHAPE_AND_POSITION) {
+ absl::optional<DesktopVector> mouse_cursor_position =
+ options_.screencast_stream()->CaptureCursorPosition();
+ if (mouse_cursor_position) {
+ callback_->OnMouseCursorPosition(mouse_cursor_position.value());
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.h
new file mode 100644
index 0000000000..da670bece9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_MOUSE_CURSOR_MONITOR_PIPEWIRE_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_MOUSE_CURSOR_MONITOR_PIPEWIRE_H_
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class MouseCursorMonitorPipeWire : public MouseCursorMonitor {
+ public:
+ explicit MouseCursorMonitorPipeWire(const DesktopCaptureOptions& options);
+ ~MouseCursorMonitorPipeWire() override;
+
+ // MouseCursorMonitor:
+ void Init(Callback* callback, Mode mode) override;
+ void Capture() override;
+
+ DesktopCaptureOptions options_ RTC_GUARDED_BY(sequence_checker_);
+ Callback* callback_ RTC_GUARDED_BY(sequence_checker_) = nullptr;
+ Mode mode_ RTC_GUARDED_BY(sequence_checker_) = SHAPE_AND_POSITION;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_MOUSE_CURSOR_MONITOR_PIPEWIRE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc
new file mode 100644
index 0000000000..9bd7cec7ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.cc
@@ -0,0 +1,1081 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h"
+
+#include <gio/gunixfdlist.h>
+#include <glib-object.h>
+#include <spa/param/format-utils.h>
+#include <spa/param/props.h>
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include <cstring>
+#include <memory>
+#include <utility>
+
+#include "absl/memory/memory.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+#if defined(WEBRTC_DLOPEN_PIPEWIRE)
+#include "modules/desktop_capture/linux/pipewire_stubs.h"
+using modules_desktop_capture_linux::InitializeStubs;
+using modules_desktop_capture_linux::kModulePipewire03;
+using modules_desktop_capture_linux::StubPathMap;
+#endif // defined(WEBRTC_DLOPEN_PIPEWIRE)
+
+namespace webrtc {
+
+const char kDesktopBusName[] = "org.freedesktop.portal.Desktop";
+const char kDesktopObjectPath[] = "/org/freedesktop/portal/desktop";
+const char kDesktopRequestObjectPath[] =
+ "/org/freedesktop/portal/desktop/request";
+const char kSessionInterfaceName[] = "org.freedesktop.portal.Session";
+const char kRequestInterfaceName[] = "org.freedesktop.portal.Request";
+const char kScreenCastInterfaceName[] = "org.freedesktop.portal.ScreenCast";
+
+const int kBytesPerPixel = 4;
+
+#if defined(WEBRTC_DLOPEN_PIPEWIRE)
+const char kPipeWireLib[] = "libpipewire-0.3.so.0";
+#endif
+
+// static
+struct dma_buf_sync {
+ uint64_t flags;
+};
+#define DMA_BUF_SYNC_READ (1 << 0)
+#define DMA_BUF_SYNC_START (0 << 2)
+#define DMA_BUF_SYNC_END (1 << 2)
+#define DMA_BUF_BASE 'b'
+#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+static void SyncDmaBuf(int fd, uint64_t start_or_end) {
+ struct dma_buf_sync sync = {0};
+
+ sync.flags = start_or_end | DMA_BUF_SYNC_READ;
+
+ while (true) {
+ int ret;
+ ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
+ if (ret == -1 && errno == EINTR) {
+ continue;
+ } else if (ret == -1) {
+ RTC_LOG(LS_ERROR) << "Failed to synchronize DMA buffer: "
+ << g_strerror(errno);
+ break;
+ } else {
+ break;
+ }
+ }
+}
+
+class ScopedBuf {
+ public:
+ ScopedBuf() {}
+ ScopedBuf(unsigned char* map, int map_size, bool is_dma_buf, int fd)
+ : map_(map), map_size_(map_size), is_dma_buf_(is_dma_buf), fd_(fd) {}
+ ~ScopedBuf() {
+ if (map_ != MAP_FAILED) {
+ if (is_dma_buf_) {
+ SyncDmaBuf(fd_, DMA_BUF_SYNC_END);
+ }
+ munmap(map_, map_size_);
+ }
+ }
+
+ operator bool() { return map_ != MAP_FAILED; }
+
+ void initialize(unsigned char* map, int map_size, bool is_dma_buf, int fd) {
+ map_ = map;
+ map_size_ = map_size;
+ is_dma_buf_ = is_dma_buf;
+ fd_ = fd;
+ }
+
+ unsigned char* get() { return map_; }
+
+ protected:
+ unsigned char* map_ = nullptr;
+ int map_size_;
+ bool is_dma_buf_;
+ int fd_;
+};
+
+template <class T>
+class Scoped {
+ public:
+ Scoped() {}
+ explicit Scoped(T* val) { ptr_ = val; }
+ ~Scoped() { RTC_DCHECK_NOTREACHED(); }
+
+ T* operator->() { return ptr_; }
+
+ bool operator!() { return ptr_ == nullptr; }
+
+ T* get() { return ptr_; }
+
+ T** receive() {
+ RTC_CHECK(!ptr_);
+ return &ptr_;
+ }
+
+ Scoped& operator=(T* val) {
+ ptr_ = val;
+ return *this;
+ }
+
+ protected:
+ T* ptr_ = nullptr;
+};
+
+template <>
+Scoped<GError>::~Scoped() {
+ if (ptr_) {
+ g_error_free(ptr_);
+ }
+}
+
+template <>
+Scoped<gchar>::~Scoped() {
+ if (ptr_) {
+ g_free(ptr_);
+ }
+}
+
+template <>
+Scoped<GVariant>::~Scoped() {
+ if (ptr_) {
+ g_variant_unref(ptr_);
+ }
+}
+
+template <>
+Scoped<GVariantIter>::~Scoped() {
+ if (ptr_) {
+ g_variant_iter_free(ptr_);
+ }
+}
+
+template <>
+Scoped<GDBusMessage>::~Scoped() {
+ if (ptr_) {
+ g_object_unref(ptr_);
+ }
+}
+
+template <>
+Scoped<GUnixFDList>::~Scoped() {
+ if (ptr_) {
+ g_object_unref(ptr_);
+ }
+}
+
+void BaseCapturerPipeWire::OnCoreError(void* data,
+ uint32_t id,
+ int seq,
+ int res,
+ const char* message) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_ERROR) << "PipeWire remote error: " << message;
+}
+
+// static
+void BaseCapturerPipeWire::OnStreamStateChanged(void* data,
+ pw_stream_state old_state,
+ pw_stream_state state,
+ const char* error_message) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(data);
+ RTC_DCHECK(that);
+
+ switch (state) {
+ case PW_STREAM_STATE_ERROR:
+ RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message;
+ break;
+ case PW_STREAM_STATE_PAUSED:
+ case PW_STREAM_STATE_STREAMING:
+ case PW_STREAM_STATE_UNCONNECTED:
+ case PW_STREAM_STATE_CONNECTING:
+ break;
+ }
+}
+
+// static
+void BaseCapturerPipeWire::OnStreamParamChanged(void* data,
+ uint32_t id,
+ const struct spa_pod* format) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "PipeWire stream format changed.";
+
+ if (!format || id != SPA_PARAM_Format) {
+ return;
+ }
+
+ spa_format_video_raw_parse(format, &that->spa_video_format_);
+
+ auto width = that->spa_video_format_.size.width;
+ auto height = that->spa_video_format_.size.height;
+ // In order to be able to build in the non unified environment kBytesPerPixel
+ // must be fully qualified, see Bug 1725145
+ auto stride = SPA_ROUND_UP_N(width * BasicDesktopFrame::kBytesPerPixel, 4);
+ auto size = height * stride;
+
+ that->desktop_size_ = DesktopSize(width, height);
+
+ uint8_t buffer[1024] = {};
+ auto builder = spa_pod_builder{buffer, sizeof(buffer)};
+
+ // Setup buffers and meta header for new format.
+ const struct spa_pod* params[3];
+ params[0] = reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_dataType,
+ SPA_POD_CHOICE_FLAGS_Int((1<<SPA_DATA_MemPtr) |
+ (1<<SPA_DATA_MemFd)),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(size), SPA_PARAM_BUFFERS_stride,
+ SPA_POD_Int(stride), SPA_PARAM_BUFFERS_buffers,
+ SPA_POD_CHOICE_RANGE_Int(8, 1, 32)));
+ params[1] = reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
+ SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_size,
+ SPA_POD_Int(sizeof(struct spa_meta_header))));
+ params[2] = reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
+ SPA_POD_Id(SPA_META_VideoCrop), SPA_PARAM_META_size,
+ SPA_POD_Int(sizeof(struct spa_meta_region))));
+ pw_stream_update_params(that->pw_stream_, params, 3);
+}
+
+// static
+void BaseCapturerPipeWire::OnStreamProcess(void* data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(data);
+ RTC_DCHECK(that);
+
+ struct pw_buffer* next_buffer;
+ struct pw_buffer* buffer = nullptr;
+
+ next_buffer = pw_stream_dequeue_buffer(that->pw_stream_);
+ while (next_buffer) {
+ buffer = next_buffer;
+ next_buffer = pw_stream_dequeue_buffer(that->pw_stream_);
+
+ if (next_buffer) {
+ pw_stream_queue_buffer(that->pw_stream_, buffer);
+ }
+ }
+
+ if (!buffer) {
+ return;
+ }
+
+ that->HandleBuffer(buffer);
+
+ pw_stream_queue_buffer(that->pw_stream_, buffer);
+}
+
+BaseCapturerPipeWire::BaseCapturerPipeWire(CaptureSourceType source_type)
+ : capture_source_type_(source_type) {}
+
+BaseCapturerPipeWire::~BaseCapturerPipeWire() {
+ if (pw_main_loop_) {
+ pw_thread_loop_stop(pw_main_loop_);
+ }
+
+ if (pw_stream_) {
+ pw_stream_destroy(pw_stream_);
+ }
+
+ if (pw_core_) {
+ pw_core_disconnect(pw_core_);
+ }
+
+ if (pw_context_) {
+ pw_context_destroy(pw_context_);
+ }
+
+ if (pw_main_loop_) {
+ pw_thread_loop_destroy(pw_main_loop_);
+ }
+
+ if (start_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(connection_, start_request_signal_id_);
+ }
+ if (sources_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(connection_,
+ sources_request_signal_id_);
+ }
+ if (session_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(connection_,
+ session_request_signal_id_);
+ }
+
+ if (session_handle_) {
+ Scoped<GDBusMessage> message(g_dbus_message_new_method_call(
+ kDesktopBusName, session_handle_, kSessionInterfaceName, "Close"));
+ if (message.get()) {
+ Scoped<GError> error;
+ g_dbus_connection_send_message(connection_, message.get(),
+ G_DBUS_SEND_MESSAGE_FLAGS_NONE,
+ /*out_serial=*/nullptr, error.receive());
+ if (error.get()) {
+ RTC_LOG(LS_ERROR) << "Failed to close the session: " << error->message;
+ }
+ }
+ }
+
+ g_free(start_handle_);
+ g_free(sources_handle_);
+ g_free(session_handle_);
+ g_free(portal_handle_);
+
+ if (cancellable_) {
+ g_cancellable_cancel(cancellable_);
+ g_object_unref(cancellable_);
+ cancellable_ = nullptr;
+ }
+
+ if (proxy_) {
+ g_object_unref(proxy_);
+ proxy_ = nullptr;
+ }
+
+ if (pw_fd_ != -1) {
+ close(pw_fd_);
+ }
+}
+
+void BaseCapturerPipeWire::InitPortal() {
+ cancellable_ = g_cancellable_new();
+ g_dbus_proxy_new_for_bus(
+ G_BUS_TYPE_SESSION, G_DBUS_PROXY_FLAGS_NONE, /*info=*/nullptr,
+ kDesktopBusName, kDesktopObjectPath, kScreenCastInterfaceName,
+ cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnProxyRequested), this);
+}
+
+void BaseCapturerPipeWire::InitPipeWire() {
+#if defined(WEBRTC_DLOPEN_PIPEWIRE)
+ StubPathMap paths;
+
+ // Check if the PipeWire library is available.
+ paths[kModulePipewire03].push_back(kPipeWireLib);
+ if (!InitializeStubs(paths)) {
+ RTC_LOG(LS_ERROR) << "Failed to load the PipeWire library and symbols.";
+ portal_init_failed_ = true;
+ return;
+ }
+#endif // defined(WEBRTC_DLOPEN_PIPEWIRE)
+
+ pw_init(/*argc=*/nullptr, /*argc=*/nullptr);
+
+ pw_main_loop_ = pw_thread_loop_new("pipewire-main-loop", nullptr);
+
+ pw_thread_loop_lock(pw_main_loop_);
+
+ pw_context_ =
+ pw_context_new(pw_thread_loop_get_loop(pw_main_loop_), nullptr, 0);
+ if (!pw_context_) {
+ RTC_LOG(LS_ERROR) << "Failed to create PipeWire context";
+ return;
+ }
+
+ pw_core_ = pw_context_connect_fd(pw_context_, pw_fd_, nullptr, 0);
+ if (!pw_core_) {
+ RTC_LOG(LS_ERROR) << "Failed to connect PipeWire context";
+ return;
+ }
+
+ // Initialize event handlers, remote end and stream-related.
+ pw_core_events_.version = PW_VERSION_CORE_EVENTS;
+ pw_core_events_.error = &OnCoreError;
+
+ pw_stream_events_.version = PW_VERSION_STREAM_EVENTS;
+ pw_stream_events_.state_changed = &OnStreamStateChanged;
+ pw_stream_events_.param_changed = &OnStreamParamChanged;
+ pw_stream_events_.process = &OnStreamProcess;
+
+ pw_core_add_listener(pw_core_, &spa_core_listener_, &pw_core_events_, this);
+
+ pw_stream_ = CreateReceivingStream();
+ if (!pw_stream_) {
+ RTC_LOG(LS_ERROR) << "Failed to create PipeWire stream";
+ return;
+ }
+
+ if (pw_thread_loop_start(pw_main_loop_) < 0) {
+ RTC_LOG(LS_ERROR) << "Failed to start main PipeWire loop";
+ portal_init_failed_ = true;
+ }
+
+ pw_thread_loop_unlock(pw_main_loop_);
+
+ RTC_LOG(LS_INFO) << "PipeWire remote opened.";
+}
+
+pw_stream* BaseCapturerPipeWire::CreateReceivingStream() {
+ spa_rectangle pwMinScreenBounds = spa_rectangle{1, 1};
+ spa_rectangle pwMaxScreenBounds = spa_rectangle{UINT32_MAX, UINT32_MAX};
+
+ pw_properties* reuseProps =
+ pw_properties_new_string("pipewire.client.reuse=1");
+ auto stream = pw_stream_new(pw_core_, "webrtc-consume-stream", reuseProps);
+
+ uint8_t buffer[1024] = {};
+ const spa_pod* params[1];
+ spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
+
+ params[0] = reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_VIDEO_format,
+ SPA_POD_CHOICE_ENUM_Id(5, SPA_VIDEO_FORMAT_BGRx, SPA_VIDEO_FORMAT_RGBx,
+ SPA_VIDEO_FORMAT_RGBA, SPA_VIDEO_FORMAT_BGRx,
+ SPA_VIDEO_FORMAT_BGRA),
+ SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(&pwMinScreenBounds, &pwMinScreenBounds,
+ &pwMaxScreenBounds),
+ 0));
+
+ pw_stream_add_listener(stream, &spa_stream_listener_, &pw_stream_events_,
+ this);
+ if (pw_stream_connect(stream, PW_DIRECTION_INPUT, pw_stream_node_id_,
+ PW_STREAM_FLAG_AUTOCONNECT, params, 1) != 0) {
+ RTC_LOG(LS_ERROR) << "Could not connect receiving stream.";
+ portal_init_failed_ = true;
+ return nullptr;
+ }
+
+ return stream;
+}
+
+void BaseCapturerPipeWire::HandleBuffer(pw_buffer* buffer) {
+ spa_buffer* spaBuffer = buffer->buffer;
+ ScopedBuf map;
+ uint8_t* src = nullptr;
+
+ if (spaBuffer->datas[0].chunk->size == 0) {
+ RTC_LOG(LS_ERROR) << "Failed to get video stream: Zero size.";
+ return;
+ }
+
+ if (spaBuffer->datas[0].type == SPA_DATA_MemFd ||
+ spaBuffer->datas[0].type == SPA_DATA_DmaBuf) {
+ map.initialize(
+ static_cast<uint8_t*>(
+ mmap(nullptr,
+ spaBuffer->datas[0].maxsize + spaBuffer->datas[0].mapoffset,
+ PROT_READ, MAP_PRIVATE, spaBuffer->datas[0].fd, 0)),
+ spaBuffer->datas[0].maxsize + spaBuffer->datas[0].mapoffset,
+ spaBuffer->datas[0].type == SPA_DATA_DmaBuf,
+ spaBuffer->datas[0].fd);
+
+ if (!map) {
+ RTC_LOG(LS_ERROR) << "Failed to mmap the memory: "
+ << std::strerror(errno);
+ return;
+ }
+
+ if (spaBuffer->datas[0].type == SPA_DATA_DmaBuf) {
+ SyncDmaBuf(spaBuffer->datas[0].fd, DMA_BUF_SYNC_START);
+ }
+
+ src = SPA_MEMBER(map.get(), spaBuffer->datas[0].mapoffset, uint8_t);
+ } else if (spaBuffer->datas[0].type == SPA_DATA_MemPtr) {
+ src = static_cast<uint8_t*>(spaBuffer->datas[0].data);
+ }
+
+ if (!src) {
+ return;
+ }
+
+ struct spa_meta_region* video_metadata =
+ static_cast<struct spa_meta_region*>(spa_buffer_find_meta_data(
+ spaBuffer, SPA_META_VideoCrop, sizeof(*video_metadata)));
+
+ // Video size from metadata is bigger than an actual video stream size.
+ // The metadata are wrong or we should up-scale the video...in both cases
+ // just quit now.
+ if (video_metadata && (video_metadata->region.size.width >
+ static_cast<uint32_t>(desktop_size_.width()) ||
+ video_metadata->region.size.height >
+ static_cast<uint32_t>(desktop_size_.height()))) {
+ RTC_LOG(LS_ERROR) << "Stream metadata sizes are wrong!";
+ return;
+ }
+
+ // Use video metadata when video size from metadata is set and smaller than
+ // video stream size, so we need to adjust it.
+ bool video_metadata_use = false;
+
+ const struct spa_rectangle* video_metadata_size =
+ video_metadata ? &video_metadata->region.size : nullptr;
+
+ if (video_metadata_size && video_metadata_size->width != 0 &&
+ video_metadata_size->height != 0 &&
+ (static_cast<int>(video_metadata_size->width) < desktop_size_.width() ||
+ static_cast<int>(video_metadata_size->height) <
+ desktop_size_.height())) {
+ video_metadata_use = true;
+ }
+
+ DesktopSize video_size_prev = video_size_;
+ if (video_metadata_use) {
+ video_size_ =
+ DesktopSize(video_metadata_size->width, video_metadata_size->height);
+ } else {
+ video_size_ = desktop_size_;
+ }
+
+ webrtc::MutexLock lock(&current_frame_lock_);
+ if (!current_frame_ || !video_size_.equals(video_size_prev)) {
+ current_frame_ = std::make_unique<uint8_t[]>(
+ video_size_.width() * video_size_.height() * BasicDesktopFrame::kBytesPerPixel);
+ }
+
+ const int32_t dst_stride = video_size_.width() * BasicDesktopFrame::kBytesPerPixel;
+ const int32_t src_stride = spaBuffer->datas[0].chunk->stride;
+
+ if (src_stride != (desktop_size_.width() * BasicDesktopFrame::kBytesPerPixel)) {
+ RTC_LOG(LS_ERROR) << "Got buffer with stride different from screen stride: "
+ << src_stride
+ << " != " << (desktop_size_.width() * BasicDesktopFrame::kBytesPerPixel);
+ portal_init_failed_ = true;
+
+ return;
+ }
+
+ // Adjust source content based on metadata video position
+ if (video_metadata_use &&
+ (video_metadata->region.position.y + video_size_.height() <=
+ desktop_size_.height())) {
+ src += src_stride * video_metadata->region.position.y;
+ }
+ const int x_offset =
+ video_metadata_use &&
+ (video_metadata->region.position.x + video_size_.width() <=
+ desktop_size_.width())
+ ? video_metadata->region.position.x * BasicDesktopFrame::kBytesPerPixel
+ : 0;
+
+ uint8_t* dst = current_frame_.get();
+ for (int i = 0; i < video_size_.height(); ++i) {
+ // Adjust source content based on crop video position if needed
+ src += x_offset;
+ std::memcpy(dst, src, dst_stride);
+ // If both sides decided to go with the RGBx format we need to convert it to
+ // BGRx to match color format expected by WebRTC.
+ if (spa_video_format_.format == SPA_VIDEO_FORMAT_RGBx ||
+ spa_video_format_.format == SPA_VIDEO_FORMAT_RGBA) {
+ ConvertRGBxToBGRx(dst, dst_stride);
+ }
+ src += src_stride - x_offset;
+ dst += dst_stride;
+ }
+}
+
+void BaseCapturerPipeWire::ConvertRGBxToBGRx(uint8_t* frame, uint32_t size) {
+ // Change color format for KDE KWin which uses RGBx and not BGRx
+ for (uint32_t i = 0; i < size; i += 4) {
+ uint8_t tempR = frame[i];
+ uint8_t tempB = frame[i + 2];
+ frame[i] = tempB;
+ frame[i + 2] = tempR;
+ }
+}
+
+guint BaseCapturerPipeWire::SetupRequestResponseSignal(
+ const gchar* object_path,
+ GDBusSignalCallback callback) {
+ return g_dbus_connection_signal_subscribe(
+ connection_, kDesktopBusName, kRequestInterfaceName, "Response",
+ object_path, /*arg0=*/nullptr, G_DBUS_SIGNAL_FLAGS_NO_MATCH_RULE,
+ callback, this, /*user_data_free_func=*/nullptr);
+}
+
+// static
+void BaseCapturerPipeWire::OnProxyRequested(GObject* /*object*/,
+ GAsyncResult* result,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ GDBusProxy* proxy = g_dbus_proxy_new_finish(result, error.receive());
+ if (!proxy) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to create a proxy for the screen cast portal: "
+ << error->message;
+ that->portal_init_failed_ = true;
+ return;
+ }
+ that->proxy_ = proxy;
+ that->connection_ = g_dbus_proxy_get_connection(that->proxy_);
+
+ RTC_LOG(LS_INFO) << "Created proxy for the screen cast portal.";
+ that->SessionRequest();
+}
+
+// static
+gchar* BaseCapturerPipeWire::PrepareSignalHandle(GDBusConnection* connection,
+ const gchar* token) {
+ Scoped<gchar> sender(
+ g_strdup(g_dbus_connection_get_unique_name(connection) + 1));
+ for (int i = 0; sender.get()[i]; i++) {
+ if (sender.get()[i] == '.') {
+ sender.get()[i] = '_';
+ }
+ }
+
+ gchar* handle = g_strconcat(kDesktopRequestObjectPath, "/", sender.get(), "/",
+ token, /*end of varargs*/ nullptr);
+
+ return handle;
+}
+
+void BaseCapturerPipeWire::SessionRequest() {
+ GVariantBuilder builder;
+ Scoped<gchar> variant_string;
+
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+ variant_string =
+ g_strdup_printf("webrtc_session%d", g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "session_handle_token",
+ g_variant_new_string(variant_string.get()));
+ variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ portal_handle_ = PrepareSignalHandle(connection_, variant_string.get());
+ session_request_signal_id_ = SetupRequestResponseSignal(
+ portal_handle_, OnSessionRequestResponseSignal);
+
+ RTC_LOG(LS_INFO) << "Screen cast session requested.";
+ g_dbus_proxy_call(
+ proxy_, "CreateSession", g_variant_new("(a{sv})", &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnSessionRequested), this);
+}
+
+// static
+void BaseCapturerPipeWire::OnSessionRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ Scoped<GVariant> variant(
+ g_dbus_proxy_call_finish(proxy, result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to create a screen cast session: "
+ << error->message;
+ that->portal_init_failed_ = true;
+ return;
+ }
+ RTC_LOG(LS_INFO) << "Initializing the screen cast session.";
+
+ Scoped<gchar> handle;
+ g_variant_get_child(variant.get(), 0, "o", &handle);
+ if (!handle) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize the screen cast session.";
+ if (that->session_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(that->connection_,
+ that->session_request_signal_id_);
+ that->session_request_signal_id_ = 0;
+ }
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Subscribing to the screen cast session.";
+}
+
+// static
+void BaseCapturerPipeWire::OnSessionRequestResponseSignal(
+ GDBusConnection* connection,
+ const gchar* sender_name,
+ const gchar* object_path,
+ const gchar* interface_name,
+ const gchar* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO)
+ << "Received response for the screen cast session subscription.";
+
+ guint32 portal_response;
+ Scoped<GVariant> response_data;
+ g_variant_get(parameters, "(u@a{sv})", &portal_response,
+ response_data.receive());
+ g_variant_lookup(response_data.get(), "session_handle", "s",
+ &that->session_handle_);
+
+ if (!that->session_handle_ || portal_response) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to request the screen cast session subscription.";
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ that->SourcesRequest();
+}
+
+void BaseCapturerPipeWire::SourcesRequest() {
+ GVariantBuilder builder;
+ Scoped<gchar> variant_string;
+
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+ // We want to record monitor content.
+ g_variant_builder_add(
+ &builder, "{sv}", "types",
+ g_variant_new_uint32(static_cast<uint32_t>(capture_source_type_)));
+ // We don't want to allow selection of multiple sources.
+ g_variant_builder_add(&builder, "{sv}", "multiple",
+ g_variant_new_boolean(false));
+
+ Scoped<GVariant> variant(
+ g_dbus_proxy_get_cached_property(proxy_, "AvailableCursorModes"));
+ if (variant.get()) {
+ uint32_t modes = 0;
+ g_variant_get(variant.get(), "u", &modes);
+ // Request mouse cursor to be embedded as part of the stream, otherwise it
+ // is hidden by default. Make request only if this mode is advertised by
+ // the portal implementation.
+ if (modes &
+ static_cast<uint32_t>(BaseCapturerPipeWire::CursorMode::kEmbedded)) {
+ g_variant_builder_add(&builder, "{sv}", "cursor_mode",
+ g_variant_new_uint32(static_cast<uint32_t>(
+ BaseCapturerPipeWire::CursorMode::kEmbedded)));
+ }
+ }
+
+ variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ sources_handle_ = PrepareSignalHandle(connection_, variant_string.get());
+ sources_request_signal_id_ = SetupRequestResponseSignal(
+ sources_handle_, OnSourcesRequestResponseSignal);
+
+ RTC_LOG(LS_INFO) << "Requesting sources from the screen cast session.";
+ g_dbus_proxy_call(
+ proxy_, "SelectSources",
+ g_variant_new("(oa{sv})", session_handle_, &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnSourcesRequested), this);
+}
+
+// static
+void BaseCapturerPipeWire::OnSourcesRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ Scoped<GVariant> variant(
+ g_dbus_proxy_call_finish(proxy, result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to request the sources: " << error->message;
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Sources requested from the screen cast session.";
+
+ Scoped<gchar> handle;
+ g_variant_get_child(variant.get(), 0, "o", handle.receive());
+ if (!handle) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize the screen cast session.";
+ if (that->sources_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(that->connection_,
+ that->sources_request_signal_id_);
+ that->sources_request_signal_id_ = 0;
+ }
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Subscribed to sources signal.";
+}
+
+// static
+void BaseCapturerPipeWire::OnSourcesRequestResponseSignal(
+ GDBusConnection* connection,
+ const gchar* sender_name,
+ const gchar* object_path,
+ const gchar* interface_name,
+ const gchar* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "Received sources signal from session.";
+
+ guint32 portal_response;
+ g_variant_get(parameters, "(u@a{sv})", &portal_response, nullptr);
+ if (portal_response) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to select sources for the screen cast session.";
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ that->StartRequest();
+}
+
+void BaseCapturerPipeWire::StartRequest() {
+ GVariantBuilder builder;
+ Scoped<gchar> variant_string;
+
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+ variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ start_handle_ = PrepareSignalHandle(connection_, variant_string.get());
+ start_request_signal_id_ =
+ SetupRequestResponseSignal(start_handle_, OnStartRequestResponseSignal);
+
+ // "Identifier for the application window", this is Wayland, so not "x11:...".
+ const gchar parent_window[] = "";
+
+ RTC_LOG(LS_INFO) << "Starting the screen cast session.";
+ g_dbus_proxy_call(
+ proxy_, "Start",
+ g_variant_new("(osa{sv})", session_handle_, parent_window, &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnStartRequested), this);
+}
+
+// static
+void BaseCapturerPipeWire::OnStartRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ Scoped<GVariant> variant(
+ g_dbus_proxy_call_finish(proxy, result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to start the screen cast session: "
+ << error->message;
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Initializing the start of the screen cast session.";
+
+ Scoped<gchar> handle;
+ g_variant_get_child(variant.get(), 0, "o", handle.receive());
+ if (!handle) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to initialize the start of the screen cast session.";
+ if (that->start_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(that->connection_,
+ that->start_request_signal_id_);
+ that->start_request_signal_id_ = 0;
+ }
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Subscribed to the start signal.";
+}
+
+// static
+void BaseCapturerPipeWire::OnStartRequestResponseSignal(
+ GDBusConnection* connection,
+ const gchar* sender_name,
+ const gchar* object_path,
+ const gchar* interface_name,
+ const gchar* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "Start signal received.";
+ guint32 portal_response;
+ Scoped<GVariant> response_data;
+ Scoped<GVariantIter> iter;
+ g_variant_get(parameters, "(u@a{sv})", &portal_response,
+ response_data.receive());
+ if (portal_response || !response_data) {
+ RTC_LOG(LS_ERROR) << "Failed to start the screen cast session.";
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ // Array of PipeWire streams. See
+ // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml
+ // documentation for <method name="Start">.
+ if (g_variant_lookup(response_data.get(), "streams", "a(ua{sv})",
+ iter.receive())) {
+ Scoped<GVariant> variant;
+
+ while (g_variant_iter_next(iter.get(), "@(ua{sv})", variant.receive())) {
+ guint32 stream_id;
+ guint32 type;
+ Scoped<GVariant> options;
+
+ g_variant_get(variant.get(), "(u@a{sv})", &stream_id, options.receive());
+ RTC_DCHECK(options.get());
+
+ if (g_variant_lookup(options.get(), "source_type", "u", &type)) {
+ that->capture_source_type_ =
+ static_cast<BaseCapturerPipeWire::CaptureSourceType>(type);
+ }
+
+ that->pw_stream_node_id_ = stream_id;
+
+ break;
+ }
+ }
+
+ that->OpenPipeWireRemote();
+}
+
+void BaseCapturerPipeWire::OpenPipeWireRemote() {
+ GVariantBuilder builder;
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+
+ RTC_LOG(LS_INFO) << "Opening the PipeWire remote.";
+
+ g_dbus_proxy_call_with_unix_fd_list(
+ proxy_, "OpenPipeWireRemote",
+ g_variant_new("(oa{sv})", session_handle_, &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, /*fd_list=*/nullptr,
+ cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnOpenPipeWireRemoteRequested),
+ this);
+}
+
+// static
+void BaseCapturerPipeWire::OnOpenPipeWireRemoteRequested(
+ GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ BaseCapturerPipeWire* that = static_cast<BaseCapturerPipeWire*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ Scoped<GUnixFDList> outlist;
+ Scoped<GVariant> variant(g_dbus_proxy_call_with_unix_fd_list_finish(
+ proxy, outlist.receive(), result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to open the PipeWire remote: "
+ << error->message;
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ gint32 index;
+ g_variant_get(variant.get(), "(h)", &index);
+
+ if ((that->pw_fd_ =
+ g_unix_fd_list_get(outlist.get(), index, error.receive())) == -1) {
+ RTC_LOG(LS_ERROR) << "Failed to get file descriptor from the list: "
+ << error->message;
+ that->portal_init_failed_ = true;
+ return;
+ }
+
+ that->InitPipeWire();
+}
+
+void BaseCapturerPipeWire::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ InitPortal();
+
+ callback_ = callback;
+}
+
+void BaseCapturerPipeWire::CaptureFrame() {
+ if (portal_init_failed_) {
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ webrtc::MutexLock lock(&current_frame_lock_);
+ if (!current_frame_) {
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ DesktopSize frame_size = video_size_;
+
+ std::unique_ptr<DesktopFrame> result(new BasicDesktopFrame(frame_size));
+ result->CopyPixelsFrom(
+ current_frame_.get(), (frame_size.width() * BasicDesktopFrame::kBytesPerPixel),
+ DesktopRect::MakeWH(frame_size.width(), frame_size.height()));
+ if (!result) {
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ // TODO(julien.isorce): http://crbug.com/945468. Set the icc profile on the
+ // frame, see ScreenCapturerX11::CaptureFrame.
+
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(result));
+}
+
+// Keep in sync with defines at browser/actors/WebRTCParent.jsm
+// With PipeWire we can't select which system resource is shared so
+// we don't create a window/screen list. Instead we place these constants
+// as window name/id so frontend code can identify PipeWire backend
+// and does not try to create screen/window preview.
+
+#define PIPEWIRE_ID 0xaffffff
+#define PIPEWIRE_NAME "####_PIPEWIRE_PORTAL_####"
+
+bool BaseCapturerPipeWire::GetSourceList(SourceList* sources) {
+ sources->push_back({PIPEWIRE_ID, 0, PIPEWIRE_NAME});
+ return true;
+}
+
+bool BaseCapturerPipeWire::SelectSource(SourceId id) {
+ // Screen selection is handled by the xdg-desktop-portal.
+ return id == PIPEWIRE_ID;
+}
+
+// static
+std::unique_ptr<DesktopCapturer> BaseCapturerPipeWire::CreateRawCapturer(
+ const DesktopCaptureOptions& options) {
+ return std::make_unique<BaseCapturerPipeWire>(
+ BaseCapturerPipeWire::CaptureSourceType::kAny);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h
new file mode 100644
index 0000000000..a3d43336fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_BASE_CAPTURER_PIPEWIRE_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_BASE_CAPTURER_PIPEWIRE_H_
+#include <gio/gio.h>
+#define typeof __typeof__
+#include <pipewire/pipewire.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/utils/result.h>
+
+#include "absl/types/optional.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class BaseCapturerPipeWire : public DesktopCapturer {
+ public:
+ // Values are set based on source type property in
+ // xdg-desktop-portal/screencast
+ // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml
+ enum class CaptureSourceType : uint32_t {
+ kScreen = 0b01,
+ kWindow = 0b10,
+ kAny = 0b11
+ };
+
+ enum class CursorMode : uint32_t {
+ kHidden = 0b01,
+ kEmbedded = 0b10,
+ kMetadata = 0b100
+ };
+
+ explicit BaseCapturerPipeWire(CaptureSourceType source_type);
+ ~BaseCapturerPipeWire() override;
+
+ BaseCapturerPipeWire(const BaseCapturerPipeWire&) = delete;
+ BaseCapturerPipeWire& operator=(const BaseCapturerPipeWire&) = delete;
+
+ static std::unique_ptr<DesktopCapturer> CreateRawCapturer(
+ const DesktopCaptureOptions& options);
+
+ // DesktopCapturer interface.
+ void Start(Callback* delegate) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ // PipeWire types -->
+ struct pw_context* pw_context_ = nullptr;
+ struct pw_core* pw_core_ = nullptr;
+ struct pw_stream* pw_stream_ = nullptr;
+ struct pw_thread_loop* pw_main_loop_ = nullptr;
+
+ spa_hook spa_core_listener_;
+ spa_hook spa_stream_listener_;
+
+ // event handlers
+ pw_core_events pw_core_events_ = {};
+ pw_stream_events pw_stream_events_ = {};
+
+ struct spa_video_info_raw spa_video_format_;
+
+ guint32 pw_stream_node_id_ = 0;
+ gint32 pw_fd_ = -1;
+
+ CaptureSourceType capture_source_type_ =
+ BaseCapturerPipeWire::CaptureSourceType::kScreen;
+
+ // <-- end of PipeWire types
+
+ GDBusConnection* connection_ = nullptr;
+ GDBusProxy* proxy_ = nullptr;
+ GCancellable *cancellable_ = nullptr;
+ gchar* portal_handle_ = nullptr;
+ gchar* session_handle_ = nullptr;
+ gchar* sources_handle_ = nullptr;
+ gchar* start_handle_ = nullptr;
+ guint session_request_signal_id_ = 0;
+ guint sources_request_signal_id_ = 0;
+ guint start_request_signal_id_ = 0;
+
+ DesktopSize video_size_;
+ DesktopSize desktop_size_ = {};
+ DesktopCaptureOptions options_ = {};
+
+ webrtc::Mutex current_frame_lock_;
+ std::unique_ptr<uint8_t[]> current_frame_;
+ Callback* callback_ = nullptr;
+
+ bool portal_init_failed_ = false;
+
+ void InitPortal();
+ void InitPipeWire();
+ void InitPipeWireTypes();
+
+ pw_stream* CreateReceivingStream();
+ void HandleBuffer(pw_buffer* buffer);
+
+ void ConvertRGBxToBGRx(uint8_t* frame, uint32_t size);
+
+ static void OnCoreError(void* data,
+ uint32_t id,
+ int seq,
+ int res,
+ const char* message);
+ static void OnStreamParamChanged(void* data,
+ uint32_t id,
+ const struct spa_pod* format);
+ static void OnStreamStateChanged(void* data,
+ pw_stream_state old_state,
+ pw_stream_state state,
+ const char* error_message);
+
+ static void OnStreamProcess(void* data);
+ static void OnNewBuffer(void* data, uint32_t id);
+
+ guint SetupRequestResponseSignal(const gchar* object_path,
+ GDBusSignalCallback callback);
+
+ static void OnProxyRequested(GObject* object,
+ GAsyncResult* result,
+ gpointer user_data);
+
+ static gchar* PrepareSignalHandle(GDBusConnection* connection,
+ const gchar* token);
+
+ void SessionRequest();
+ static void OnSessionRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnSessionRequestResponseSignal(GDBusConnection* connection,
+ const gchar* sender_name,
+ const gchar* object_path,
+ const gchar* interface_name,
+ const gchar* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+
+ void SourcesRequest();
+ static void OnSourcesRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnSourcesRequestResponseSignal(GDBusConnection* connection,
+ const gchar* sender_name,
+ const gchar* object_path,
+ const gchar* interface_name,
+ const gchar* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+
+ void StartRequest();
+ static void OnStartRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnStartRequestResponseSignal(GDBusConnection* connection,
+ const gchar* sender_name,
+ const gchar* object_path,
+ const gchar* interface_name,
+ const gchar* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+
+ void OpenPipeWireRemote();
+ static void OnOpenPipeWireRemoteRequested(GDBusProxy *proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_BASE_CAPTURER_PIPEWIRE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire.sigs b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire.sigs
new file mode 100644
index 0000000000..06a97b8f29
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire.sigs
@@ -0,0 +1,50 @@
+// Copyright 2018 The WebRTC project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//------------------------------------------------
+// Functions from PipeWire used in capturer code.
+//------------------------------------------------
+
+// core.h
+int pw_core_disconnect(pw_core *core);
+
+// loop.h
+void pw_loop_destroy(pw_loop *loop);
+pw_loop * pw_loop_new(const spa_dict *props);
+
+
+// pipewire.h
+void pw_init(int *argc, char **argv[]);
+const char* pw_get_library_version();
+
+// properties.h
+pw_properties * pw_properties_new_string(const char *args);
+
+// stream.h
+void pw_stream_add_listener(pw_stream *stream, spa_hook *listener, const pw_stream_events *events, void *data);
+int pw_stream_connect(pw_stream *stream, enum pw_direction direction, uint32_t target_id, enum pw_stream_flags flags, const spa_pod **params, uint32_t n_params);
+int pw_stream_disconnect(pw_stream *stream);
+pw_buffer *pw_stream_dequeue_buffer(pw_stream *stream);
+void pw_stream_destroy(pw_stream *stream);
+pw_stream * pw_stream_new(pw_core *core, const char *name, pw_properties *props);
+int pw_stream_queue_buffer(pw_stream *stream, pw_buffer *buffer);
+int pw_stream_set_active(pw_stream *stream, bool active);
+int pw_stream_update_params(pw_stream *stream, const spa_pod **params, uint32_t n_params);
+
+// thread-loop.h
+void pw_thread_loop_destroy(pw_thread_loop *loop);
+pw_thread_loop * pw_thread_loop_new(const char *name, const spa_dict *props);
+int pw_thread_loop_start(pw_thread_loop *loop);
+void pw_thread_loop_stop(pw_thread_loop *loop);
+void pw_thread_loop_lock(pw_thread_loop *loop);
+void pw_thread_loop_unlock(pw_thread_loop *loop);
+pw_loop * pw_thread_loop_get_loop(pw_thread_loop *loop);
+void pw_thread_loop_signal(pw_thread_loop *loop, bool wait_for_accept);
+void pw_thread_loop_wait(pw_thread_loop *loop);
+
+// context.h
+void pw_context_destroy(pw_context *context);
+pw_context *pw_context_new(pw_loop *main_loop, pw_properties *props, size_t user_data_size);
+pw_core * pw_context_connect(pw_context *context, pw_properties *properties, size_t user_data_size);
+pw_core * pw_context_connect_fd(pw_context *context, int fd, pw_properties *properties, size_t user_data_size);
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire_stub_header.fragment b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire_stub_header.fragment
new file mode 100644
index 0000000000..06ae18dfd4
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/pipewire_stub_header.fragment
@@ -0,0 +1,9 @@
+// The extra include header needed in the generated stub file for defining
+// various PipeWire types.
+
+extern "C" {
+
+#include <pipewire/pipewire.h>
+
+#include <xf86drm.h>
+}
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/portal_request_response.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/portal_request_response.h
new file mode 100644
index 0000000000..dde9ac5eff
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/portal_request_response.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_PORTAL_REQUEST_RESPONSE_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_PORTAL_REQUEST_RESPONSE_H_
+
+namespace webrtc {
+namespace xdg_portal {
+
+// Contains type of responses that can be observed when making a request to
+// a desktop portal interface.
+enum class RequestResponse {
+ // Unknown, the initialized status.
+ kUnknown,
+ // Success, the request is carried out.
+ kSuccess,
+ // The user cancelled the interaction.
+ kUserCancelled,
+ // The user interaction was ended in some other way.
+ kError,
+
+ kMaxValue = kError,
+};
+
+} // namespace xdg_portal
+} // namespace webrtc
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_PORTAL_REQUEST_RESPONSE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.cc
new file mode 100644
index 0000000000..cc626d3065
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/restore_token_manager.h"
+
+namespace webrtc {
+
+// static
+RestoreTokenManager& RestoreTokenManager::GetInstance() {
+ static webrtc::RestoreTokenManager* manager = new RestoreTokenManager();
+ return *manager;
+}
+
+void RestoreTokenManager::AddToken(DesktopCapturer::SourceId id,
+ const std::string& token) {
+ restore_tokens_.insert({id, token});
+}
+
+std::string RestoreTokenManager::TakeToken(DesktopCapturer::SourceId id) {
+ std::string token = restore_tokens_[id];
+ // Remove the token as it cannot be used anymore
+ restore_tokens_.erase(id);
+ return token;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.h
new file mode 100644
index 0000000000..37c9a39cac
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/restore_token_manager.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_RESTORE_TOKEN_MANAGER_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_RESTORE_TOKEN_MANAGER_H_
+
+#include <mutex>
+#include <string>
+#include <unordered_map>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+
+namespace webrtc {
+
+class RestoreTokenManager {
+ public:
+ RestoreTokenManager(const RestoreTokenManager& manager) = delete;
+ RestoreTokenManager& operator=(const RestoreTokenManager& manager) = delete;
+
+ static RestoreTokenManager& GetInstance();
+
+ void AddToken(DesktopCapturer::SourceId id, const std::string& token);
+ std::string TakeToken(DesktopCapturer::SourceId id);
+
+ private:
+ RestoreTokenManager() = default;
+ ~RestoreTokenManager() = default;
+
+ std::unordered_map<DesktopCapturer::SourceId, std::string> restore_tokens_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_RESTORE_TOKEN_MANAGER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.cc
new file mode 100644
index 0000000000..0d9a87d7fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/scoped_glib.h"
+
+namespace webrtc {
+
+template <>
+Scoped<GError>::~Scoped() {
+ if (ptr_) {
+ g_error_free(ptr_);
+ }
+}
+
+template <>
+Scoped<char>::~Scoped() {
+ if (ptr_) {
+ g_free(ptr_);
+ }
+}
+
+template <>
+Scoped<GVariant>::~Scoped() {
+ if (ptr_) {
+ g_variant_unref(ptr_);
+ }
+}
+
+template <>
+Scoped<GVariantIter>::~Scoped() {
+ if (ptr_) {
+ g_variant_iter_free(ptr_);
+ }
+}
+
+template <>
+Scoped<GDBusMessage>::~Scoped() {
+ if (ptr_) {
+ g_object_unref(ptr_);
+ }
+}
+
+template <>
+Scoped<GUnixFDList>::~Scoped() {
+ if (ptr_) {
+ g_object_unref(ptr_);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.h
new file mode 100644
index 0000000000..908bd6f77d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/scoped_glib.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCOPED_GLIB_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCOPED_GLIB_H_
+
+#include <gio/gio.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+template <class T>
+class Scoped {
+ public:
+ Scoped() {}
+ explicit Scoped(T* val) { ptr_ = val; }
+ ~Scoped() { RTC_DCHECK_NOTREACHED(); }
+
+ T* operator->() const { return ptr_; }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ bool operator!() const { return ptr_ == nullptr; }
+
+ T* get() const { return ptr_; }
+
+ T** receive() {
+ RTC_CHECK(!ptr_);
+ return &ptr_;
+ }
+
+ Scoped& operator=(T* val) {
+ RTC_DCHECK(val);
+ ptr_ = val;
+ return *this;
+ }
+
+ protected:
+ T* ptr_ = nullptr;
+};
+
+template <>
+Scoped<GError>::~Scoped();
+template <>
+Scoped<char>::~Scoped();
+template <>
+Scoped<GVariant>::~Scoped();
+template <>
+Scoped<GVariantIter>::~Scoped();
+template <>
+Scoped<GDBusMessage>::~Scoped();
+template <>
+Scoped<GUnixFDList>::~Scoped();
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCOPED_GLIB_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.cc
new file mode 100644
index 0000000000..02d9d2e806
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h"
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+
+#include <string>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace xdg_portal {
+
+void ScreenCapturePortalInterface::RequestSessionUsingProxy(
+ GAsyncResult* result) {
+ Scoped<GError> error;
+ GDBusProxy* proxy = g_dbus_proxy_new_finish(result, error.receive());
+ if (!proxy) {
+ // Ignore the error caused by user cancelling the request via `cancellable_`
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to get a proxy for the portal: "
+ << error->message;
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Successfully created proxy for the portal.";
+ RequestSession(proxy);
+}
+
+void ScreenCapturePortalInterface::OnSessionRequestResult(
+ GDBusProxy* proxy,
+ GAsyncResult* result) {
+ Scoped<GError> error;
+ Scoped<GVariant> variant(
+ g_dbus_proxy_call_finish(proxy, result, error.receive()));
+ if (!variant) {
+ // Ignore the error caused by user cancelling the request via `cancellable_`
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to request session: " << error->message;
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Initializing the session.";
+
+ Scoped<char> handle;
+ g_variant_get_child(variant.get(), /*index=*/0, /*format_string=*/"o",
+ &handle);
+ if (!handle) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize the session.";
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+}
+
+void ScreenCapturePortalInterface::RegisterSessionClosedSignalHandler(
+ const SessionClosedSignalHandler session_close_signal_handler,
+ GVariant* parameters,
+ GDBusConnection* connection,
+ std::string& session_handle,
+ guint& session_closed_signal_id) {
+ uint32_t portal_response = 2;
+ Scoped<GVariant> response_data;
+ g_variant_get(parameters, /*format_string=*/"(u@a{sv})", &portal_response,
+ response_data.receive());
+
+ if (RequestResponseFromPortalResponse(portal_response) !=
+ RequestResponse::kSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to request the session subscription.";
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ Scoped<GVariant> g_session_handle(
+ g_variant_lookup_value(response_data.get(), /*key=*/"session_handle",
+ /*expected_type=*/nullptr));
+ session_handle = g_variant_get_string(
+ /*value=*/g_session_handle.get(), /*length=*/nullptr);
+
+ if (session_handle.empty()) {
+ RTC_LOG(LS_ERROR) << "Could not get session handle despite valid response";
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ session_closed_signal_id = g_dbus_connection_signal_subscribe(
+ connection, kDesktopBusName, kSessionInterfaceName, /*member=*/"Closed",
+ session_handle.c_str(), /*arg0=*/nullptr, G_DBUS_SIGNAL_FLAGS_NONE,
+ session_close_signal_handler, this, /*user_data_free_func=*/nullptr);
+}
+
+void ScreenCapturePortalInterface::OnStartRequestResult(GDBusProxy* proxy,
+ GAsyncResult* result) {
+ Scoped<GError> error;
+ Scoped<GVariant> variant(
+ g_dbus_proxy_call_finish(proxy, result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to start the portal session: "
+ << error->message;
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ Scoped<char> handle;
+ g_variant_get_child(variant.get(), 0, "o", handle.receive());
+ if (!handle) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize the start portal session.";
+ OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Subscribed to the start signal.";
+}
+
+} // namespace xdg_portal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h
new file mode 100644
index 0000000000..775ed1facc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREEN_CAPTURE_PORTAL_INTERFACE_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREEN_CAPTURE_PORTAL_INTERFACE_H_
+
+#include <gio/gio.h>
+
+#include <string>
+
+#include "modules/desktop_capture/linux/wayland/portal_request_response.h"
+#include "modules/desktop_capture/linux/wayland/scoped_glib.h"
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+#include "modules/desktop_capture/linux/wayland/xdg_session_details.h"
+
+namespace webrtc {
+namespace xdg_portal {
+
+using SessionClosedSignalHandler = void (*)(GDBusConnection*,
+ const char*,
+ const char*,
+ const char*,
+ const char*,
+ GVariant*,
+ gpointer);
+
+// A base class for XDG desktop portals that can capture desktop/screen.
+// Note: downstream clients inherit from this class so it is advisable to
+// provide a default implementation of any new virtual methods that may be added
+// to this class.
+class ScreenCapturePortalInterface {
+ public:
+ virtual ~ScreenCapturePortalInterface() {}
+ // Gets details about the session such as session handle.
+ virtual xdg_portal::SessionDetails GetSessionDetails() { return {}; }
+ // Starts the portal setup.
+ virtual void Start() {}
+ // Notifies observers about the success/fail state of the portal
+ // request/response.
+ virtual void OnPortalDone(xdg_portal::RequestResponse result) {}
+ // Sends a create session request to the portal.
+ virtual void RequestSession(GDBusProxy* proxy) {}
+
+ // Following methods should not be made virtual as they share a common
+ // implementation between portals.
+
+ // Requests portal session using the proxy object.
+ void RequestSessionUsingProxy(GAsyncResult* result);
+ // Handles the session request result.
+ void OnSessionRequestResult(GDBusProxy* proxy, GAsyncResult* result);
+ // Subscribes to session close signal and sets up a handler for it.
+ void RegisterSessionClosedSignalHandler(
+ const SessionClosedSignalHandler session_close_signal_handler,
+ GVariant* parameters,
+ GDBusConnection* connection,
+ std::string& session_handle,
+ guint& session_closed_signal_id);
+ // Handles the result of session start request.
+ void OnStartRequestResult(GDBusProxy* proxy, GAsyncResult* result);
+};
+
+} // namespace xdg_portal
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREEN_CAPTURE_PORTAL_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.cc
new file mode 100644
index 0000000000..8e45af7e24
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.cc
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/screencast_portal.h"
+
+#include <gio/gunixfdlist.h>
+#include <glib-object.h>
+
+#include "modules/desktop_capture/linux/wayland/scoped_glib.h"
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+using xdg_portal::kScreenCastInterfaceName;
+using xdg_portal::PrepareSignalHandle;
+using xdg_portal::RequestResponse;
+using xdg_portal::RequestSessionProxy;
+using xdg_portal::SetupRequestResponseSignal;
+using xdg_portal::SetupSessionRequestHandlers;
+using xdg_portal::StartSessionRequest;
+using xdg_portal::TearDownSession;
+using xdg_portal::RequestResponseFromPortalResponse;
+
+} // namespace
+
+ScreenCastPortal::ScreenCastPortal(
+ ScreenCastPortal::CaptureSourceType source_type,
+ PortalNotifier* notifier)
+ : ScreenCastPortal(source_type,
+ notifier,
+ OnProxyRequested,
+ OnSourcesRequestResponseSignal,
+ this) {}
+
+ScreenCastPortal::ScreenCastPortal(
+ CaptureSourceType source_type,
+ PortalNotifier* notifier,
+ ProxyRequestResponseHandler proxy_request_response_handler,
+ SourcesRequestResponseSignalHandler sources_request_response_signal_handler,
+ gpointer user_data)
+ : notifier_(notifier),
+ capture_source_type_(source_type),
+ proxy_request_response_handler_(proxy_request_response_handler),
+ sources_request_response_signal_handler_(
+ sources_request_response_signal_handler),
+ user_data_(user_data) {}
+
+ScreenCastPortal::~ScreenCastPortal() {
+ Cleanup();
+}
+
+void ScreenCastPortal::Cleanup() {
+ UnsubscribeSignalHandlers();
+ TearDownSession(std::move(session_handle_), proxy_, cancellable_,
+ connection_);
+ session_handle_ = "";
+ cancellable_ = nullptr;
+ proxy_ = nullptr;
+
+ if (pw_fd_ != -1) {
+ close(pw_fd_);
+ }
+}
+
+void ScreenCastPortal::UnsubscribeSignalHandlers() {
+ if (start_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(connection_, start_request_signal_id_);
+ start_request_signal_id_ = 0;
+ }
+
+ if (sources_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(connection_,
+ sources_request_signal_id_);
+ sources_request_signal_id_ = 0;
+ }
+
+ if (session_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(connection_,
+ session_request_signal_id_);
+ session_request_signal_id_ = 0;
+ }
+}
+
+void ScreenCastPortal::SetSessionDetails(
+ const xdg_portal::SessionDetails& session_details) {
+ if (session_details.proxy) {
+ proxy_ = session_details.proxy;
+ connection_ = g_dbus_proxy_get_connection(proxy_);
+ }
+ if (session_details.cancellable) {
+ cancellable_ = session_details.cancellable;
+ }
+ if (!session_details.session_handle.empty()) {
+ session_handle_ = session_details.session_handle;
+ }
+ if (session_details.pipewire_stream_node_id) {
+ pw_stream_node_id_ = session_details.pipewire_stream_node_id;
+ }
+}
+
+void ScreenCastPortal::Start() {
+ cancellable_ = g_cancellable_new();
+ RequestSessionProxy(kScreenCastInterfaceName, proxy_request_response_handler_,
+ cancellable_, this);
+}
+
+xdg_portal::SessionDetails ScreenCastPortal::GetSessionDetails() {
+ return {}; // No-op
+}
+
+void ScreenCastPortal::OnPortalDone(RequestResponse result) {
+ notifier_->OnScreenCastRequestResult(result, pw_stream_node_id_, pw_fd_);
+ if (result != RequestResponse::kSuccess) {
+ Cleanup();
+ }
+}
+
+// static
+void ScreenCastPortal::OnProxyRequested(GObject* gobject,
+ GAsyncResult* result,
+ gpointer user_data) {
+ static_cast<ScreenCastPortal*>(user_data)->RequestSessionUsingProxy(result);
+}
+
+void ScreenCastPortal::RequestSession(GDBusProxy* proxy) {
+ proxy_ = proxy;
+ connection_ = g_dbus_proxy_get_connection(proxy_);
+ SetupSessionRequestHandlers(
+ "webrtc", OnSessionRequested, OnSessionRequestResponseSignal, connection_,
+ proxy_, cancellable_, portal_handle_, session_request_signal_id_, this);
+}
+
+// static
+void ScreenCastPortal::OnSessionRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ static_cast<ScreenCastPortal*>(user_data)->OnSessionRequestResult(proxy,
+ result);
+}
+
+// static
+void ScreenCastPortal::OnSessionRequestResponseSignal(
+ GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ ScreenCastPortal* that = static_cast<ScreenCastPortal*>(user_data);
+ RTC_DCHECK(that);
+ that->RegisterSessionClosedSignalHandler(
+ OnSessionClosedSignal, parameters, that->connection_,
+ that->session_handle_, that->session_closed_signal_id_);
+
+ // Do not continue if we don't get session_handle back. The call above will
+ // already notify the capturer there is a failure, but we would still continue
+ // to make following request and crash on that.
+ if (!that->session_handle_.empty()) {
+ that->SourcesRequest();
+ }
+}
+
+// static
+void ScreenCastPortal::OnSessionClosedSignal(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ ScreenCastPortal* that = static_cast<ScreenCastPortal*>(user_data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "Received closed signal from session.";
+
+ that->notifier_->OnScreenCastSessionClosed();
+
+ // Unsubscribe from the signal and free the session handle to avoid calling
+ // Session::Close from the destructor since it's already closed
+ g_dbus_connection_signal_unsubscribe(that->connection_,
+ that->session_closed_signal_id_);
+}
+
+void ScreenCastPortal::SourcesRequest() {
+ GVariantBuilder builder;
+ Scoped<char> variant_string;
+
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+ // We want to record monitor content.
+ g_variant_builder_add(
+ &builder, "{sv}", "types",
+ g_variant_new_uint32(static_cast<uint32_t>(capture_source_type_)));
+ // We don't want to allow selection of multiple sources.
+ g_variant_builder_add(&builder, "{sv}", "multiple",
+ g_variant_new_boolean(false));
+
+ Scoped<GVariant> cursorModesVariant(
+ g_dbus_proxy_get_cached_property(proxy_, "AvailableCursorModes"));
+ if (cursorModesVariant.get()) {
+ uint32_t modes = 0;
+ g_variant_get(cursorModesVariant.get(), "u", &modes);
+ // Make request only if this mode is advertised by the portal
+ // implementation.
+ if (modes & static_cast<uint32_t>(cursor_mode_)) {
+ g_variant_builder_add(
+ &builder, "{sv}", "cursor_mode",
+ g_variant_new_uint32(static_cast<uint32_t>(cursor_mode_)));
+ }
+ }
+
+ Scoped<GVariant> versionVariant(
+ g_dbus_proxy_get_cached_property(proxy_, "version"));
+ if (versionVariant.get()) {
+ uint32_t version = 0;
+ g_variant_get(versionVariant.get(), "u", &version);
+ // Make request only if xdg-desktop-portal has required API version
+ if (version >= 4) {
+ g_variant_builder_add(
+ &builder, "{sv}", "persist_mode",
+ g_variant_new_uint32(static_cast<uint32_t>(persist_mode_)));
+ if (!restore_token_.empty()) {
+ g_variant_builder_add(&builder, "{sv}", "restore_token",
+ g_variant_new_string(restore_token_.c_str()));
+ }
+ }
+ }
+
+ variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ sources_handle_ = PrepareSignalHandle(variant_string.get(), connection_);
+ sources_request_signal_id_ = SetupRequestResponseSignal(
+ sources_handle_.c_str(), sources_request_response_signal_handler_,
+ user_data_, connection_);
+
+ RTC_LOG(LS_INFO) << "Requesting sources from the screen cast session.";
+ g_dbus_proxy_call(
+ proxy_, "SelectSources",
+ g_variant_new("(oa{sv})", session_handle_.c_str(), &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnSourcesRequested), this);
+}
+
+// static
+void ScreenCastPortal::OnSourcesRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ ScreenCastPortal* that = static_cast<ScreenCastPortal*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ Scoped<GVariant> variant(
+ g_dbus_proxy_call_finish(proxy, result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to request the sources: " << error->message;
+ that->OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Sources requested from the screen cast session.";
+
+ Scoped<char> handle;
+ g_variant_get_child(variant.get(), 0, "o", handle.receive());
+ if (!handle) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize the screen cast session.";
+ if (that->sources_request_signal_id_) {
+ g_dbus_connection_signal_unsubscribe(that->connection_,
+ that->sources_request_signal_id_);
+ that->sources_request_signal_id_ = 0;
+ }
+ that->OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ RTC_LOG(LS_INFO) << "Subscribed to sources signal.";
+}
+
+// static
+void ScreenCastPortal::OnSourcesRequestResponseSignal(
+ GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ ScreenCastPortal* that = static_cast<ScreenCastPortal*>(user_data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "Received sources signal from session.";
+
+ uint32_t portal_response;
+ g_variant_get(parameters, "(u@a{sv})", &portal_response, nullptr);
+ if (portal_response) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to select sources for the screen cast session.";
+ that->OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ that->StartRequest();
+}
+
+void ScreenCastPortal::StartRequest() {
+ StartSessionRequest("webrtc", session_handle_, OnStartRequestResponseSignal,
+ OnStartRequested, proxy_, connection_, cancellable_,
+ start_request_signal_id_, start_handle_, this);
+}
+
+// static
+void ScreenCastPortal::OnStartRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ static_cast<ScreenCastPortal*>(user_data)->OnStartRequestResult(proxy,
+ result);
+}
+
+// static
+void ScreenCastPortal::OnStartRequestResponseSignal(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data) {
+ ScreenCastPortal* that = static_cast<ScreenCastPortal*>(user_data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "Start signal received.";
+ uint32_t portal_response;
+ Scoped<GVariant> response_data;
+ Scoped<GVariantIter> iter;
+ Scoped<char> restore_token;
+ g_variant_get(parameters, "(u@a{sv})", &portal_response,
+ response_data.receive());
+ if (portal_response || !response_data) {
+ RTC_LOG(LS_ERROR) << "Failed to start the screen cast session.";
+ that->OnPortalDone(RequestResponseFromPortalResponse(portal_response));
+ return;
+ }
+
+ // Array of PipeWire streams. See
+ // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml
+ // documentation for <method name="Start">.
+ if (g_variant_lookup(response_data.get(), "streams", "a(ua{sv})",
+ iter.receive())) {
+ Scoped<GVariant> variant;
+
+ while (g_variant_iter_next(iter.get(), "@(ua{sv})", variant.receive())) {
+ uint32_t stream_id;
+ uint32_t type;
+ Scoped<GVariant> options;
+
+ g_variant_get(variant.get(), "(u@a{sv})", &stream_id, options.receive());
+ RTC_DCHECK(options.get());
+
+ if (g_variant_lookup(options.get(), "source_type", "u", &type)) {
+ that->capture_source_type_ =
+ static_cast<ScreenCastPortal::CaptureSourceType>(type);
+ }
+
+ that->pw_stream_node_id_ = stream_id;
+
+ break;
+ }
+ }
+
+ if (g_variant_lookup(response_data.get(), "restore_token", "s",
+ restore_token.receive())) {
+ that->restore_token_ = restore_token.get();
+ }
+
+ that->OpenPipeWireRemote();
+}
+
+uint32_t ScreenCastPortal::pipewire_stream_node_id() {
+ return pw_stream_node_id_;
+}
+
+void ScreenCastPortal::SetPersistMode(ScreenCastPortal::PersistMode mode) {
+ persist_mode_ = mode;
+}
+
+void ScreenCastPortal::SetRestoreToken(const std::string& token) {
+ restore_token_ = token;
+}
+
+std::string ScreenCastPortal::RestoreToken() const {
+ return restore_token_;
+}
+
+void ScreenCastPortal::OpenPipeWireRemote() {
+ GVariantBuilder builder;
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+
+ RTC_LOG(LS_INFO) << "Opening the PipeWire remote.";
+
+ g_dbus_proxy_call_with_unix_fd_list(
+ proxy_, "OpenPipeWireRemote",
+ g_variant_new("(oa{sv})", session_handle_.c_str(), &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, /*fd_list=*/nullptr, cancellable_,
+ reinterpret_cast<GAsyncReadyCallback>(OnOpenPipeWireRemoteRequested),
+ this);
+}
+
+// static
+void ScreenCastPortal::OnOpenPipeWireRemoteRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data) {
+ ScreenCastPortal* that = static_cast<ScreenCastPortal*>(user_data);
+ RTC_DCHECK(that);
+
+ Scoped<GError> error;
+ Scoped<GUnixFDList> outlist;
+ Scoped<GVariant> variant(g_dbus_proxy_call_with_unix_fd_list_finish(
+ proxy, outlist.receive(), result, error.receive()));
+ if (!variant) {
+ if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
+ return;
+ RTC_LOG(LS_ERROR) << "Failed to open the PipeWire remote: "
+ << error->message;
+ that->OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ int32_t index;
+ g_variant_get(variant.get(), "(h)", &index);
+
+ that->pw_fd_ = g_unix_fd_list_get(outlist.get(), index, error.receive());
+
+ if (that->pw_fd_ == -1) {
+ RTC_LOG(LS_ERROR) << "Failed to get file descriptor from the list: "
+ << error->message;
+ that->OnPortalDone(RequestResponse::kError);
+ return;
+ }
+
+ that->OnPortalDone(RequestResponse::kSuccess);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.h
new file mode 100644
index 0000000000..7970710c41
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_portal.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREENCAST_PORTAL_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREENCAST_PORTAL_H_
+
+#include <gio/gio.h>
+
+#include <string>
+
+#include "modules/desktop_capture/linux/wayland/portal_request_response.h"
+#include "modules/desktop_capture/linux/wayland/screen_capture_portal_interface.h"
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+#include "modules/desktop_capture/linux/wayland/xdg_session_details.h"
+
+namespace webrtc {
+
+class ScreenCastPortal : public xdg_portal::ScreenCapturePortalInterface {
+ public:
+ using ProxyRequestResponseHandler = void (*)(GObject* object,
+ GAsyncResult* result,
+ gpointer user_data);
+
+ using SourcesRequestResponseSignalHandler =
+ void (*)(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+
+ // Values are set based on source type property in
+ // xdg-desktop-portal/screencast
+ // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml
+ enum class CaptureSourceType : uint32_t {
+ kScreen = 0b01,
+ kWindow = 0b10,
+ kAnyScreenContent = kScreen | kWindow
+ };
+
+ // Values are set based on cursor mode property in
+ // xdg-desktop-portal/screencast
+ // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml
+ enum class CursorMode : uint32_t {
+ // Mouse cursor will not be included in any form
+ kHidden = 0b01,
+ // Mouse cursor will be part of the screen content
+ kEmbedded = 0b10,
+ // Mouse cursor information will be send separately in form of metadata
+ kMetadata = 0b100
+ };
+
+ // Values are set based on persist mode property in
+ // xdg-desktop-portal/screencast
+ // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml
+ enum class PersistMode : uint32_t {
+ // Do not allow to restore stream
+ kDoNotPersist = 0b00,
+ // The restore token is valid as long as the application is alive. It's
+ // stored in memory and revoked when the application closes its DBus
+ // connection
+ kTransient = 0b01,
+ // The restore token is stored in disk and is valid until the user manually
+ // revokes it
+ kPersistent = 0b10
+ };
+
+ // Interface that must be implemented by the ScreenCastPortal consumers.
+ class PortalNotifier {
+ public:
+ virtual void OnScreenCastRequestResult(xdg_portal::RequestResponse result,
+ uint32_t stream_node_id,
+ int fd) = 0;
+ virtual void OnScreenCastSessionClosed() = 0;
+
+ protected:
+ PortalNotifier() = default;
+ virtual ~PortalNotifier() = default;
+ };
+
+ explicit ScreenCastPortal(ScreenCastPortal::CaptureSourceType source_type,
+ PortalNotifier* notifier);
+ explicit ScreenCastPortal(
+ CaptureSourceType source_type,
+ PortalNotifier* notifier,
+ ProxyRequestResponseHandler proxy_request_response_handler,
+ SourcesRequestResponseSignalHandler
+ sources_request_response_signal_handler,
+ gpointer user_data);
+ ~ScreenCastPortal();
+
+ // Initialize ScreenCastPortal with series of DBus calls where we try to
+ // obtain all the required information, like PipeWire file descriptor and
+ // PipeWire stream node ID.
+ //
+ // The observer will return whether the communication with xdg-desktop-portal
+ // was successful and only then you will be able to get all the required
+ // information in order to continue working with PipeWire.
+ void Start() override;
+ xdg_portal::SessionDetails GetSessionDetails() override;
+
+ // Method to notify the reason for failure of a portal request.
+ void OnPortalDone(xdg_portal::RequestResponse result) override;
+
+ // Sends a create session request to the portal.
+ void RequestSession(GDBusProxy* proxy) override;
+ void Cleanup();
+
+ // Set of methods leveraged by remote desktop portal to setup a common session
+ // with screen cast portal.
+ void SetSessionDetails(const xdg_portal::SessionDetails& session_details);
+ uint32_t pipewire_stream_node_id();
+ void SourcesRequest();
+ void OpenPipeWireRemote();
+
+ // ScreenCast specific methods for stream restoration
+ void SetPersistMode(ScreenCastPortal::PersistMode mode);
+ void SetRestoreToken(const std::string& token);
+ std::string RestoreToken() const;
+
+ private:
+ PortalNotifier* notifier_;
+
+ // A PipeWire stream ID of stream we will be connecting to
+ uint32_t pw_stream_node_id_ = 0;
+ // A file descriptor of PipeWire socket
+ int pw_fd_ = -1;
+ // Restore token that can be used to restore previous session
+ std::string restore_token_;
+
+ CaptureSourceType capture_source_type_ =
+ ScreenCastPortal::CaptureSourceType::kScreen;
+
+ CursorMode cursor_mode_ = ScreenCastPortal::CursorMode::kMetadata;
+
+ PersistMode persist_mode_ = ScreenCastPortal::PersistMode::kDoNotPersist;
+
+ ProxyRequestResponseHandler proxy_request_response_handler_;
+ SourcesRequestResponseSignalHandler sources_request_response_signal_handler_;
+ gpointer user_data_;
+
+ GDBusConnection* connection_ = nullptr;
+ GDBusProxy* proxy_ = nullptr;
+ GCancellable* cancellable_ = nullptr;
+ std::string portal_handle_;
+ std::string session_handle_;
+ std::string sources_handle_;
+ std::string start_handle_;
+ guint session_request_signal_id_ = 0;
+ guint sources_request_signal_id_ = 0;
+ guint start_request_signal_id_ = 0;
+ guint session_closed_signal_id_ = 0;
+
+ void UnsubscribeSignalHandlers();
+ static void OnProxyRequested(GObject* object,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnSessionRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnSessionRequestResponseSignal(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+ static void OnSessionClosedSignal(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+ static void OnSourcesRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnSourcesRequestResponseSignal(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+
+ void StartRequest();
+ static void OnStartRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+ static void OnStartRequestResponseSignal(GDBusConnection* connection,
+ const char* sender_name,
+ const char* object_path,
+ const char* interface_name,
+ const char* signal_name,
+ GVariant* parameters,
+ gpointer user_data);
+
+ static void OnOpenPipeWireRemoteRequested(GDBusProxy* proxy,
+ GAsyncResult* result,
+ gpointer user_data);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREENCAST_PORTAL_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.cc
new file mode 100644
index 0000000000..dc0784791d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/screencast_stream_utils.h"
+
+#include <libdrm/drm_fourcc.h>
+#include <pipewire/pipewire.h>
+#include <spa/param/video/format-utils.h>
+
+#include <string>
+
+#include "rtc_base/string_to_number.h"
+
+#if !PW_CHECK_VERSION(0, 3, 29)
+#define SPA_POD_PROP_FLAG_MANDATORY (1u << 3)
+#endif
+#if !PW_CHECK_VERSION(0, 3, 33)
+#define SPA_POD_PROP_FLAG_DONT_FIXATE (1u << 4)
+#endif
+
+namespace webrtc {
+
+PipeWireThreadLoopLock::PipeWireThreadLoopLock(pw_thread_loop* loop)
+ : loop_(loop) {
+ pw_thread_loop_lock(loop_);
+}
+
+PipeWireThreadLoopLock::~PipeWireThreadLoopLock() {
+ pw_thread_loop_unlock(loop_);
+}
+
+PipeWireVersion PipeWireVersion::Parse(const absl::string_view& version) {
+ std::vector<absl::string_view> parsed_version = rtc::split(version, '.');
+
+ if (parsed_version.size() != 3) {
+ return {};
+ }
+
+ absl::optional<int> major = rtc::StringToNumber<int>(parsed_version.at(0));
+ absl::optional<int> minor = rtc::StringToNumber<int>(parsed_version.at(1));
+ absl::optional<int> micro = rtc::StringToNumber<int>(parsed_version.at(2));
+
+ // Return invalid version if we failed to parse it
+ if (!major || !minor || !micro) {
+ return {};
+ }
+
+ return {major.value(), minor.value(), micro.value()};
+}
+
+bool PipeWireVersion::operator>=(const PipeWireVersion& other) {
+ if (!major && !minor && !micro) {
+ return false;
+ }
+
+ return std::tie(major, minor, micro) >=
+ std::tie(other.major, other.minor, other.micro);
+}
+
+bool PipeWireVersion::operator<=(const PipeWireVersion& other) {
+ if (!major && !minor && !micro) {
+ return false;
+ }
+
+ return std::tie(major, minor, micro) <=
+ std::tie(other.major, other.minor, other.micro);
+}
+
+spa_pod* BuildFormat(spa_pod_builder* builder,
+ uint32_t format,
+ const std::vector<uint64_t>& modifiers,
+ const struct spa_rectangle* resolution) {
+ spa_pod_frame frames[2];
+ spa_rectangle pw_min_screen_bounds = spa_rectangle{1, 1};
+ spa_rectangle pw_max_screen_bounds = spa_rectangle{UINT32_MAX, UINT32_MAX};
+
+ spa_pod_builder_push_object(builder, &frames[0], SPA_TYPE_OBJECT_Format,
+ SPA_PARAM_EnumFormat);
+ spa_pod_builder_add(builder, SPA_FORMAT_mediaType,
+ SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
+ spa_pod_builder_add(builder, SPA_FORMAT_mediaSubtype,
+ SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
+ spa_pod_builder_add(builder, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
+
+ if (modifiers.size()) {
+ if (modifiers.size() == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ spa_pod_builder_prop(builder, SPA_FORMAT_VIDEO_modifier,
+ SPA_POD_PROP_FLAG_MANDATORY);
+ spa_pod_builder_long(builder, modifiers[0]);
+ } else {
+ spa_pod_builder_prop(
+ builder, SPA_FORMAT_VIDEO_modifier,
+ SPA_POD_PROP_FLAG_MANDATORY | SPA_POD_PROP_FLAG_DONT_FIXATE);
+ spa_pod_builder_push_choice(builder, &frames[1], SPA_CHOICE_Enum, 0);
+
+ // modifiers from the array
+ bool first = true;
+ for (int64_t val : modifiers) {
+ spa_pod_builder_long(builder, val);
+ // Add the first modifier twice as the very first value is the default
+ // option
+ if (first) {
+ spa_pod_builder_long(builder, val);
+ first = false;
+ }
+ }
+ spa_pod_builder_pop(builder, &frames[1]);
+ }
+ }
+
+ if (resolution) {
+ spa_pod_builder_add(builder, SPA_FORMAT_VIDEO_size,
+ SPA_POD_Rectangle(resolution), 0);
+ } else {
+ spa_pod_builder_add(builder, SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(&pw_min_screen_bounds,
+ &pw_min_screen_bounds,
+ &pw_max_screen_bounds),
+ 0);
+ }
+
+ return static_cast<spa_pod*>(spa_pod_builder_pop(builder, &frames[0]));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.h
new file mode 100644
index 0000000000..70262c2e39
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/screencast_stream_utils.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREENCAST_STREAM_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREENCAST_STREAM_UTILS_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "rtc_base/string_encode.h"
+
+struct pw_thread_loop;
+struct spa_pod;
+struct spa_pod_builder;
+struct spa_rectangle;
+
+namespace webrtc {
+
+// Locks pw_thread_loop in the current scope
+class PipeWireThreadLoopLock {
+ public:
+ explicit PipeWireThreadLoopLock(pw_thread_loop* loop);
+ ~PipeWireThreadLoopLock();
+
+ private:
+ pw_thread_loop* const loop_;
+};
+
+struct PipeWireVersion {
+ static PipeWireVersion Parse(const absl::string_view& version);
+
+ // Returns whether current version is newer or same as required version
+ bool operator>=(const PipeWireVersion& other);
+ // Returns whether current version is older or same as required version
+ bool operator<=(const PipeWireVersion& other);
+
+ int major = 0;
+ int minor = 0;
+ int micro = 0;
+};
+
+// Returns a spa_pod used to build PipeWire stream format using given
+// arguments. Modifiers are optional value and when present they will be
+// used with SPA_POD_PROP_FLAG_MANDATORY and SPA_POD_PROP_FLAG_DONT_FIXATE
+// flags.
+spa_pod* BuildFormat(spa_pod_builder* builder,
+ uint32_t format,
+ const std::vector<uint64_t>& modifiers,
+ const struct spa_rectangle* resolution);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SCREENCAST_STREAM_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.cc
new file mode 100644
index 0000000000..5cbeaee9bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.cc
@@ -0,0 +1,870 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h"
+
+#include <fcntl.h>
+#include <libdrm/drm_fourcc.h>
+#include <pipewire/pipewire.h>
+#include <spa/param/video/format-utils.h>
+#include <sys/mman.h>
+
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "modules/desktop_capture/linux/wayland/egl_dmabuf.h"
+#include "modules/desktop_capture/linux/wayland/screencast_stream_utils.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/sanitizer.h"
+#include "rtc_base/synchronization/mutex.h"
+
+#if defined(WEBRTC_DLOPEN_PIPEWIRE)
+#include "modules/desktop_capture/linux/wayland/pipewire_stubs.h"
+using modules_desktop_capture_linux_wayland::InitializeStubs;
+using modules_desktop_capture_linux_wayland::kModuleDrm;
+using modules_desktop_capture_linux_wayland::kModulePipewire;
+using modules_desktop_capture_linux_wayland::StubPathMap;
+#endif // defined(WEBRTC_DLOPEN_PIPEWIRE)
+
+namespace webrtc {
+
+const int kBytesPerPixel = 4;
+
+#if defined(WEBRTC_DLOPEN_PIPEWIRE)
+const char kPipeWireLib[] = "libpipewire-0.3.so.0";
+const char kDrmLib[] = "libdrm.so.2";
+#endif
+
+constexpr int kCursorBpp = 4;
+constexpr int CursorMetaSize(int w, int h) {
+ return (sizeof(struct spa_meta_cursor) + sizeof(struct spa_meta_bitmap) +
+ w * h * kCursorBpp);
+}
+
+constexpr PipeWireVersion kDmaBufMinVersion = {0, 3, 24};
+constexpr PipeWireVersion kDmaBufModifierMinVersion = {0, 3, 33};
+constexpr PipeWireVersion kDropSingleModifierMinVersion = {0, 3, 40};
+
+class ScopedBuf {
+ public:
+ ScopedBuf() {}
+ ScopedBuf(uint8_t* map, int map_size, int fd)
+ : map_(map), map_size_(map_size), fd_(fd) {}
+ ~ScopedBuf() {
+ if (map_ != MAP_FAILED) {
+ munmap(map_, map_size_);
+ }
+ }
+
+ explicit operator bool() { return map_ != MAP_FAILED; }
+
+ void initialize(uint8_t* map, int map_size, int fd) {
+ map_ = map;
+ map_size_ = map_size;
+ fd_ = fd;
+ }
+
+ uint8_t* get() { return map_; }
+
+ protected:
+ uint8_t* map_ = static_cast<uint8_t*>(MAP_FAILED);
+ int map_size_;
+ int fd_;
+};
+
+class SharedScreenCastStreamPrivate {
+ public:
+ SharedScreenCastStreamPrivate();
+ ~SharedScreenCastStreamPrivate();
+
+ bool StartScreenCastStream(uint32_t stream_node_id,
+ int fd,
+ uint32_t width = 0,
+ uint32_t height = 0);
+ void UpdateScreenCastStreamResolution(uint32_t width, uint32_t height);
+ void StopScreenCastStream();
+ std::unique_ptr<DesktopFrame> CaptureFrame();
+ std::unique_ptr<MouseCursor> CaptureCursor();
+ DesktopVector CaptureCursorPosition();
+
+ private:
+ uint32_t pw_stream_node_id_ = 0;
+
+ DesktopSize stream_size_ = {};
+ DesktopSize frame_size_;
+
+ webrtc::Mutex queue_lock_;
+ ScreenCaptureFrameQueue<SharedDesktopFrame> queue_
+ RTC_GUARDED_BY(&queue_lock_);
+ std::unique_ptr<MouseCursor> mouse_cursor_;
+ DesktopVector mouse_cursor_position_ = DesktopVector(-1, -1);
+
+ int64_t modifier_;
+ std::unique_ptr<EglDmaBuf> egl_dmabuf_;
+ // List of modifiers we query as supported by the graphics card/driver
+ std::vector<uint64_t> modifiers_;
+
+ // PipeWire types
+ struct pw_context* pw_context_ = nullptr;
+ struct pw_core* pw_core_ = nullptr;
+ struct pw_stream* pw_stream_ = nullptr;
+ struct pw_thread_loop* pw_main_loop_ = nullptr;
+ struct spa_source* renegotiate_ = nullptr;
+
+ spa_hook spa_core_listener_;
+ spa_hook spa_stream_listener_;
+
+ // A number used to verify all previous methods and the resulting
+ // events have been handled.
+ int server_version_sync_ = 0;
+ // Version of the running PipeWire server we communicate with
+ PipeWireVersion pw_server_version_;
+ // Version of the library used to run our code
+ PipeWireVersion pw_client_version_;
+
+ // Resolution parameters.
+ uint32_t width_ = 0;
+ uint32_t height_ = 0;
+ webrtc::Mutex resolution_lock_;
+ // Resolution changes are processed during buffer processing.
+ bool pending_resolution_change_ RTC_GUARDED_BY(&resolution_lock_) = false;
+
+ // event handlers
+ pw_core_events pw_core_events_ = {};
+ pw_stream_events pw_stream_events_ = {};
+
+ struct spa_video_info_raw spa_video_format_;
+
+ void ProcessBuffer(pw_buffer* buffer);
+ void ConvertRGBxToBGRx(uint8_t* frame, uint32_t size);
+
+ // PipeWire callbacks
+ static void OnCoreError(void* data,
+ uint32_t id,
+ int seq,
+ int res,
+ const char* message);
+ static void OnCoreDone(void* user_data, uint32_t id, int seq);
+ static void OnCoreInfo(void* user_data, const pw_core_info* info);
+ static void OnStreamParamChanged(void* data,
+ uint32_t id,
+ const struct spa_pod* format);
+ static void OnStreamStateChanged(void* data,
+ pw_stream_state old_state,
+ pw_stream_state state,
+ const char* error_message);
+ static void OnStreamProcess(void* data);
+ // This will be invoked in case we fail to process DMA-BUF PW buffer using
+ // negotiated stream parameters (modifier). We will drop the modifier we
+ // failed to use and try to use a different one or fallback to shared memory
+ // buffers.
+ static void OnRenegotiateFormat(void* data, uint64_t);
+};
+
+void SharedScreenCastStreamPrivate::OnCoreError(void* data,
+ uint32_t id,
+ int seq,
+ int res,
+ const char* message) {
+ SharedScreenCastStreamPrivate* that =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_ERROR) << "PipeWire remote error: " << message;
+}
+
+void SharedScreenCastStreamPrivate::OnCoreInfo(void* data,
+ const pw_core_info* info) {
+ SharedScreenCastStreamPrivate* stream =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(stream);
+
+ stream->pw_server_version_ = PipeWireVersion::Parse(info->version);
+}
+
+void SharedScreenCastStreamPrivate::OnCoreDone(void* data,
+ uint32_t id,
+ int seq) {
+ const SharedScreenCastStreamPrivate* stream =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(stream);
+
+ if (id == PW_ID_CORE && stream->server_version_sync_ == seq) {
+ pw_thread_loop_signal(stream->pw_main_loop_, false);
+ }
+}
+
+// static
+void SharedScreenCastStreamPrivate::OnStreamStateChanged(
+ void* data,
+ pw_stream_state old_state,
+ pw_stream_state state,
+ const char* error_message) {
+ SharedScreenCastStreamPrivate* that =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(that);
+
+ switch (state) {
+ case PW_STREAM_STATE_ERROR:
+ RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message;
+ break;
+ case PW_STREAM_STATE_PAUSED:
+ case PW_STREAM_STATE_STREAMING:
+ case PW_STREAM_STATE_UNCONNECTED:
+ case PW_STREAM_STATE_CONNECTING:
+ break;
+ }
+}
+
+// static
+void SharedScreenCastStreamPrivate::OnStreamParamChanged(
+ void* data,
+ uint32_t id,
+ const struct spa_pod* format) {
+ SharedScreenCastStreamPrivate* that =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(that);
+
+ RTC_LOG(LS_INFO) << "PipeWire stream format changed.";
+ if (!format || id != SPA_PARAM_Format) {
+ return;
+ }
+
+ spa_format_video_raw_parse(format, &that->spa_video_format_);
+
+ auto width = that->spa_video_format_.size.width;
+ auto height = that->spa_video_format_.size.height;
+ auto stride = SPA_ROUND_UP_N(width * kBytesPerPixel, 4);
+ auto size = height * stride;
+
+ that->stream_size_ = DesktopSize(width, height);
+
+ uint8_t buffer[1024] = {};
+ auto builder = spa_pod_builder{buffer, sizeof(buffer)};
+
+ // Setup buffers and meta header for new format.
+
+ // When SPA_FORMAT_VIDEO_modifier is present we can use DMA-BUFs as
+ // the server announces support for it.
+ // See https://github.com/PipeWire/pipewire/blob/master/doc/dma-buf.dox
+ const bool has_modifier =
+ spa_pod_find_prop(format, nullptr, SPA_FORMAT_VIDEO_modifier);
+ that->modifier_ =
+ has_modifier ? that->spa_video_format_.modifier : DRM_FORMAT_MOD_INVALID;
+ std::vector<const spa_pod*> params;
+ const int buffer_types =
+ has_modifier || (that->pw_server_version_ >= kDmaBufMinVersion)
+ ? (1 << SPA_DATA_DmaBuf) | (1 << SPA_DATA_MemFd) |
+ (1 << SPA_DATA_MemPtr)
+ : (1 << SPA_DATA_MemFd) | (1 << SPA_DATA_MemPtr);
+
+ params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(size), SPA_PARAM_BUFFERS_stride,
+ SPA_POD_Int(stride), SPA_PARAM_BUFFERS_buffers,
+ SPA_POD_CHOICE_RANGE_Int(8, 1, 32), SPA_PARAM_BUFFERS_dataType,
+ SPA_POD_CHOICE_FLAGS_Int(buffer_types))));
+ params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
+ SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_size,
+ SPA_POD_Int(sizeof(struct spa_meta_header)))));
+ params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
+ SPA_POD_Id(SPA_META_VideoCrop), SPA_PARAM_META_size,
+ SPA_POD_Int(sizeof(struct spa_meta_region)))));
+ params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
+ SPA_POD_Id(SPA_META_Cursor), SPA_PARAM_META_size,
+ SPA_POD_CHOICE_RANGE_Int(CursorMetaSize(64, 64), CursorMetaSize(1, 1),
+ CursorMetaSize(384, 384)))));
+ params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
+ &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
+ SPA_POD_Id(SPA_META_VideoDamage), SPA_PARAM_META_size,
+ SPA_POD_CHOICE_RANGE_Int(sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16))));
+
+ pw_stream_update_params(that->pw_stream_, params.data(), params.size());
+}
+
+// static
+void SharedScreenCastStreamPrivate::OnStreamProcess(void* data) {
+ SharedScreenCastStreamPrivate* that =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(that);
+
+ struct pw_buffer* next_buffer;
+ struct pw_buffer* buffer = nullptr;
+
+ next_buffer = pw_stream_dequeue_buffer(that->pw_stream_);
+ while (next_buffer) {
+ buffer = next_buffer;
+ next_buffer = pw_stream_dequeue_buffer(that->pw_stream_);
+
+ if (next_buffer) {
+ pw_stream_queue_buffer(that->pw_stream_, buffer);
+ }
+ }
+
+ if (!buffer) {
+ return;
+ }
+
+ that->ProcessBuffer(buffer);
+
+ pw_stream_queue_buffer(that->pw_stream_, buffer);
+}
+
+void SharedScreenCastStreamPrivate::OnRenegotiateFormat(void* data, uint64_t) {
+ SharedScreenCastStreamPrivate* that =
+ static_cast<SharedScreenCastStreamPrivate*>(data);
+ RTC_DCHECK(that);
+
+ {
+ PipeWireThreadLoopLock thread_loop_lock(that->pw_main_loop_);
+
+ uint8_t buffer[2048] = {};
+
+ spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
+
+ std::vector<const spa_pod*> params;
+ struct spa_rectangle resolution =
+ SPA_RECTANGLE(that->width_, that->height_);
+
+ webrtc::MutexLock lock(&that->resolution_lock_);
+ for (uint32_t format : {SPA_VIDEO_FORMAT_BGRA, SPA_VIDEO_FORMAT_RGBA,
+ SPA_VIDEO_FORMAT_BGRx, SPA_VIDEO_FORMAT_RGBx}) {
+ if (!that->modifiers_.empty()) {
+ params.push_back(BuildFormat(
+ &builder, format, that->modifiers_,
+ that->pending_resolution_change_ ? &resolution : nullptr));
+ }
+ params.push_back(BuildFormat(
+ &builder, format, /*modifiers=*/{},
+ that->pending_resolution_change_ ? &resolution : nullptr));
+ }
+
+ pw_stream_update_params(that->pw_stream_, params.data(), params.size());
+ that->pending_resolution_change_ = false;
+ }
+}
+
+SharedScreenCastStreamPrivate::SharedScreenCastStreamPrivate() {}
+
+SharedScreenCastStreamPrivate::~SharedScreenCastStreamPrivate() {
+ if (pw_main_loop_) {
+ pw_thread_loop_stop(pw_main_loop_);
+ }
+
+ if (pw_stream_) {
+ pw_stream_destroy(pw_stream_);
+ }
+
+ if (pw_core_) {
+ pw_core_disconnect(pw_core_);
+ }
+
+ if (pw_context_) {
+ pw_context_destroy(pw_context_);
+ }
+
+ if (pw_main_loop_) {
+ pw_thread_loop_destroy(pw_main_loop_);
+ }
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+bool SharedScreenCastStreamPrivate::StartScreenCastStream(
+ uint32_t stream_node_id,
+ int fd,
+ uint32_t width,
+ uint32_t height) {
+ width_ = width;
+ height_ = height;
+#if defined(WEBRTC_DLOPEN_PIPEWIRE)
+ StubPathMap paths;
+
+ // Check if the PipeWire and DRM libraries are available.
+ paths[kModulePipewire].push_back(kPipeWireLib);
+ paths[kModuleDrm].push_back(kDrmLib);
+
+ if (!InitializeStubs(paths)) {
+ RTC_LOG(LS_ERROR)
+ << "One of following libraries is missing on your system:\n"
+ << " - PipeWire (" << kPipeWireLib << ")\n"
+ << " - drm (" << kDrmLib << ")";
+ return false;
+ }
+#endif // defined(WEBRTC_DLOPEN_PIPEWIRE)
+ egl_dmabuf_ = std::make_unique<EglDmaBuf>();
+
+ pw_stream_node_id_ = stream_node_id;
+
+ pw_init(/*argc=*/nullptr, /*argc=*/nullptr);
+
+ pw_main_loop_ = pw_thread_loop_new("pipewire-main-loop", nullptr);
+
+ pw_context_ =
+ pw_context_new(pw_thread_loop_get_loop(pw_main_loop_), nullptr, 0);
+ if (!pw_context_) {
+ RTC_LOG(LS_ERROR) << "Failed to create PipeWire context";
+ return false;
+ }
+
+ if (pw_thread_loop_start(pw_main_loop_) < 0) {
+ RTC_LOG(LS_ERROR) << "Failed to start main PipeWire loop";
+ return false;
+ }
+
+ pw_client_version_ = PipeWireVersion::Parse(pw_get_library_version());
+
+ // Initialize event handlers, remote end and stream-related.
+ pw_core_events_.version = PW_VERSION_CORE_EVENTS;
+ pw_core_events_.info = &OnCoreInfo;
+ pw_core_events_.done = &OnCoreDone;
+ pw_core_events_.error = &OnCoreError;
+
+ pw_stream_events_.version = PW_VERSION_STREAM_EVENTS;
+ pw_stream_events_.state_changed = &OnStreamStateChanged;
+ pw_stream_events_.param_changed = &OnStreamParamChanged;
+ pw_stream_events_.process = &OnStreamProcess;
+
+ {
+ PipeWireThreadLoopLock thread_loop_lock(pw_main_loop_);
+
+ if (fd >= 0) {
+ pw_core_ = pw_context_connect_fd(
+ pw_context_, fcntl(fd, F_DUPFD_CLOEXEC), nullptr, 0);
+ } else {
+ pw_core_ = pw_context_connect(pw_context_, nullptr, 0);
+ }
+
+ if (!pw_core_) {
+ RTC_LOG(LS_ERROR) << "Failed to connect PipeWire context";
+ return false;
+ }
+
+ pw_core_add_listener(pw_core_, &spa_core_listener_, &pw_core_events_, this);
+
+ // Add an event that can be later invoked by pw_loop_signal_event()
+ renegotiate_ = pw_loop_add_event(pw_thread_loop_get_loop(pw_main_loop_),
+ OnRenegotiateFormat, this);
+
+ server_version_sync_ =
+ pw_core_sync(pw_core_, PW_ID_CORE, server_version_sync_);
+
+ pw_thread_loop_wait(pw_main_loop_);
+
+ pw_properties* reuseProps =
+ pw_properties_new_string("pipewire.client.reuse=1");
+ pw_stream_ = pw_stream_new(pw_core_, "webrtc-consume-stream", reuseProps);
+
+ if (!pw_stream_) {
+ RTC_LOG(LS_ERROR) << "Failed to create PipeWire stream";
+ return false;
+ }
+
+ pw_stream_add_listener(pw_stream_, &spa_stream_listener_,
+ &pw_stream_events_, this);
+ uint8_t buffer[2048] = {};
+
+ spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
+
+ std::vector<const spa_pod*> params;
+ const bool has_required_pw_client_version =
+ pw_client_version_ >= kDmaBufModifierMinVersion;
+ const bool has_required_pw_server_version =
+ pw_server_version_ >= kDmaBufModifierMinVersion;
+ struct spa_rectangle resolution;
+ bool set_resolution = false;
+ if (width && height) {
+ resolution = SPA_RECTANGLE(width, height);
+ set_resolution = true;
+ }
+ for (uint32_t format : {SPA_VIDEO_FORMAT_BGRA, SPA_VIDEO_FORMAT_RGBA,
+ SPA_VIDEO_FORMAT_BGRx, SPA_VIDEO_FORMAT_RGBx}) {
+ // Modifiers can be used with PipeWire >= 0.3.33
+ if (has_required_pw_client_version && has_required_pw_server_version) {
+ modifiers_ = egl_dmabuf_->QueryDmaBufModifiers(format);
+
+ if (!modifiers_.empty()) {
+ params.push_back(BuildFormat(&builder, format, modifiers_,
+ set_resolution ? &resolution : nullptr));
+ }
+ }
+
+ params.push_back(BuildFormat(&builder, format, /*modifiers=*/{},
+ set_resolution ? &resolution : nullptr));
+ }
+
+ if (pw_stream_connect(pw_stream_, PW_DIRECTION_INPUT, pw_stream_node_id_,
+ PW_STREAM_FLAG_AUTOCONNECT, params.data(),
+ params.size()) != 0) {
+ RTC_LOG(LS_ERROR) << "Could not connect receiving stream.";
+ return false;
+ }
+
+ RTC_LOG(LS_INFO) << "PipeWire remote opened.";
+ }
+ return true;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+void SharedScreenCastStreamPrivate::UpdateScreenCastStreamResolution(
+ uint32_t width,
+ uint32_t height) {
+ if (!width || !height) {
+ RTC_LOG(LS_WARNING) << "Bad resolution specified: " << width << "x"
+ << height;
+ return;
+ }
+ if (!pw_main_loop_) {
+ RTC_LOG(LS_WARNING) << "No main pipewire loop, ignoring resolution change";
+ return;
+ }
+ if (!renegotiate_) {
+ RTC_LOG(LS_WARNING) << "Can not renegotiate stream params, ignoring "
+ << "resolution change";
+ return;
+ }
+ if (width_ != width || height_ != height) {
+ width_ = width;
+ height_ = height;
+ {
+ webrtc::MutexLock lock(&resolution_lock_);
+ pending_resolution_change_ = true;
+ }
+ pw_loop_signal_event(pw_thread_loop_get_loop(pw_main_loop_), renegotiate_);
+ }
+}
+
+void SharedScreenCastStreamPrivate::StopScreenCastStream() {
+ if (pw_stream_) {
+ pw_stream_disconnect(pw_stream_);
+ }
+}
+
+std::unique_ptr<DesktopFrame> SharedScreenCastStreamPrivate::CaptureFrame() {
+ webrtc::MutexLock lock(&queue_lock_);
+
+ if (!queue_.current_frame()) {
+ return std::unique_ptr<DesktopFrame>{};
+ }
+
+ std::unique_ptr<SharedDesktopFrame> frame = queue_.current_frame()->Share();
+ return std::move(frame);
+}
+
+std::unique_ptr<MouseCursor> SharedScreenCastStreamPrivate::CaptureCursor() {
+ if (!mouse_cursor_) {
+ return nullptr;
+ }
+
+ return std::move(mouse_cursor_);
+}
+
+DesktopVector SharedScreenCastStreamPrivate::CaptureCursorPosition() {
+ return mouse_cursor_position_;
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+void SharedScreenCastStreamPrivate::ProcessBuffer(pw_buffer* buffer) {
+ spa_buffer* spa_buffer = buffer->buffer;
+ ScopedBuf map;
+ std::unique_ptr<uint8_t[]> src_unique_ptr;
+ uint8_t* src = nullptr;
+
+ // Try to update the mouse cursor first, because it can be the only
+ // information carried by the buffer
+ {
+ const struct spa_meta_cursor* cursor =
+ static_cast<struct spa_meta_cursor*>(spa_buffer_find_meta_data(
+ spa_buffer, SPA_META_Cursor, sizeof(*cursor)));
+ if (cursor && spa_meta_cursor_is_valid(cursor)) {
+ struct spa_meta_bitmap* bitmap = nullptr;
+
+ if (cursor->bitmap_offset)
+ bitmap =
+ SPA_MEMBER(cursor, cursor->bitmap_offset, struct spa_meta_bitmap);
+
+ if (bitmap && bitmap->size.width > 0 && bitmap->size.height > 0) {
+ const uint8_t* bitmap_data =
+ SPA_MEMBER(bitmap, bitmap->offset, uint8_t);
+ BasicDesktopFrame* mouse_frame = new BasicDesktopFrame(
+ DesktopSize(bitmap->size.width, bitmap->size.height));
+ mouse_frame->CopyPixelsFrom(
+ bitmap_data, bitmap->stride,
+ DesktopRect::MakeWH(bitmap->size.width, bitmap->size.height));
+ mouse_cursor_ = std::make_unique<MouseCursor>(
+ mouse_frame, DesktopVector(cursor->hotspot.x, cursor->hotspot.y));
+ }
+ mouse_cursor_position_.set(cursor->position.x, cursor->position.y);
+ }
+ }
+
+ if (spa_buffer->datas[0].chunk->size == 0) {
+ return;
+ }
+
+ if (spa_buffer->datas[0].type == SPA_DATA_MemFd) {
+ map.initialize(
+ static_cast<uint8_t*>(
+ mmap(nullptr,
+ spa_buffer->datas[0].maxsize + spa_buffer->datas[0].mapoffset,
+ PROT_READ, MAP_PRIVATE, spa_buffer->datas[0].fd, 0)),
+ spa_buffer->datas[0].maxsize + spa_buffer->datas[0].mapoffset,
+ spa_buffer->datas[0].fd);
+
+ if (!map) {
+ RTC_LOG(LS_ERROR) << "Failed to mmap the memory: "
+ << std::strerror(errno);
+ return;
+ }
+
+ src = SPA_MEMBER(map.get(), spa_buffer->datas[0].mapoffset, uint8_t);
+ } else if (spa_buffer->datas[0].type == SPA_DATA_DmaBuf) {
+ const uint n_planes = spa_buffer->n_datas;
+
+ if (!n_planes) {
+ return;
+ }
+
+ std::vector<EglDmaBuf::PlaneData> plane_datas;
+ for (uint32_t i = 0; i < n_planes; ++i) {
+ EglDmaBuf::PlaneData data = {
+ static_cast<int32_t>(spa_buffer->datas[i].fd),
+ static_cast<uint32_t>(spa_buffer->datas[i].chunk->stride),
+ static_cast<uint32_t>(spa_buffer->datas[i].chunk->offset)};
+ plane_datas.push_back(data);
+ }
+
+ // When importing DMA-BUFs, we use the stride (number of bytes from one row
+ // of pixels in the buffer) provided by PipeWire. The stride from PipeWire
+ // is given by the graphics driver and some drivers might add some
+ // additional padding for memory layout optimizations so not everytime the
+ // stride is equal to BYTES_PER_PIXEL x WIDTH. This is fine, because during
+ // the import we will use OpenGL and same graphics driver so it will be able
+ // to work with the stride it provided, but later on when we work with
+ // images we get from DMA-BUFs we will need to update the stride to be equal
+ // to BYTES_PER_PIXEL x WIDTH as that's the size of the DesktopFrame we
+ // allocate for each captured frame.
+ src_unique_ptr = egl_dmabuf_->ImageFromDmaBuf(
+ stream_size_, spa_video_format_.format, plane_datas, modifier_);
+ if (src_unique_ptr) {
+ src = src_unique_ptr.get();
+ } else {
+ RTC_LOG(LS_ERROR) << "Dropping DMA-BUF modifier: " << modifier_
+ << " and trying to renegotiate stream parameters";
+
+ if (pw_server_version_ >= kDropSingleModifierMinVersion) {
+ modifiers_.erase(
+ std::remove(modifiers_.begin(), modifiers_.end(), modifier_),
+ modifiers_.end());
+ } else {
+ modifiers_.clear();
+ }
+
+ pw_loop_signal_event(pw_thread_loop_get_loop(pw_main_loop_),
+ renegotiate_);
+ return;
+ }
+ } else if (spa_buffer->datas[0].type == SPA_DATA_MemPtr) {
+ src = static_cast<uint8_t*>(spa_buffer->datas[0].data);
+ }
+
+ if (!src) {
+ return;
+ }
+
+ // Use SPA_META_VideoCrop metadata to get the frame size. KDE and GNOME do
+ // handle screen/window sharing differently. KDE/KWin doesn't use
+ // SPA_META_VideoCrop metadata and when sharing a window, it always sets
+ // stream size to size of the window. With that we just allocate the
+ // DesktopFrame using the size of the stream itself. GNOME/Mutter
+ // always sets stream size to the size of the whole screen, even when sharing
+ // a window. To get the real window size we have to use SPA_META_VideoCrop
+ // metadata. This gives us the size we need in order to allocate the
+ // DesktopFrame.
+
+ struct spa_meta_region* videocrop_metadata =
+ static_cast<struct spa_meta_region*>(spa_buffer_find_meta_data(
+ spa_buffer, SPA_META_VideoCrop, sizeof(*videocrop_metadata)));
+
+ // Video size from metadata is bigger than an actual video stream size.
+ // The metadata are wrong or we should up-scale the video...in both cases
+ // just quit now.
+ if (videocrop_metadata &&
+ (videocrop_metadata->region.size.width >
+ static_cast<uint32_t>(stream_size_.width()) ||
+ videocrop_metadata->region.size.height >
+ static_cast<uint32_t>(stream_size_.height()))) {
+ RTC_LOG(LS_ERROR) << "Stream metadata sizes are wrong!";
+ return;
+ }
+
+ // Use SPA_META_VideoCrop metadata to get the DesktopFrame size in case
+ // a windows is shared and it represents just a small portion of the
+ // stream itself. This will be for example used in case of GNOME (Mutter)
+ // where the stream will have the size of the screen itself, but we care
+ // only about smaller portion representing the window inside.
+ bool videocrop_metadata_use = false;
+ const struct spa_rectangle* videocrop_metadata_size =
+ videocrop_metadata ? &videocrop_metadata->region.size : nullptr;
+
+ if (videocrop_metadata_size && videocrop_metadata_size->width != 0 &&
+ videocrop_metadata_size->height != 0 &&
+ (static_cast<int>(videocrop_metadata_size->width) <
+ stream_size_.width() ||
+ static_cast<int>(videocrop_metadata_size->height) <
+ stream_size_.height())) {
+ videocrop_metadata_use = true;
+ }
+
+ if (videocrop_metadata_use) {
+ frame_size_ = DesktopSize(videocrop_metadata_size->width,
+ videocrop_metadata_size->height);
+ } else {
+ frame_size_ = stream_size_;
+ }
+
+ // Get the position of the video crop within the stream. Just double-check
+ // that the position doesn't exceed the size of the stream itself. NOTE:
+ // Currently it looks there is no implementation using this.
+ uint32_t y_offset =
+ videocrop_metadata_use &&
+ (videocrop_metadata->region.position.y + frame_size_.height() <=
+ stream_size_.height())
+ ? videocrop_metadata->region.position.y
+ : 0;
+ uint32_t x_offset =
+ videocrop_metadata_use &&
+ (videocrop_metadata->region.position.x + frame_size_.width() <=
+ stream_size_.width())
+ ? videocrop_metadata->region.position.x
+ : 0;
+
+ const uint32_t stream_stride = kBytesPerPixel * stream_size_.width();
+ uint32_t buffer_stride = spa_buffer->datas[0].chunk->stride;
+ uint32_t src_stride = buffer_stride;
+
+ if (spa_buffer->datas[0].type == SPA_DATA_DmaBuf &&
+ buffer_stride > stream_stride) {
+ // When DMA-BUFs are used, sometimes spa_buffer->stride we get might
+ // contain additional padding, but after we import the buffer, the stride
+ // we used is no longer relevant and we should just calculate it based on
+ // the stream width. For more context see https://crbug.com/1333304.
+ src_stride = stream_stride;
+ }
+
+ uint8_t* updated_src =
+ src + (src_stride * y_offset) + (kBytesPerPixel * x_offset);
+
+ webrtc::MutexLock lock(&queue_lock_);
+
+ // Move to the next frame if the current one is being used and shared
+ if (queue_.current_frame() && queue_.current_frame()->IsShared()) {
+ queue_.MoveToNextFrame();
+ if (queue_.current_frame() && queue_.current_frame()->IsShared()) {
+ RTC_LOG(LS_WARNING)
+ << "Failed to process PipeWire buffer: no available frame";
+ return;
+ }
+ }
+
+ if (!queue_.current_frame() ||
+ !queue_.current_frame()->size().equals(frame_size_)) {
+ std::unique_ptr<DesktopFrame> frame(new BasicDesktopFrame(
+ DesktopSize(frame_size_.width(), frame_size_.height())));
+ queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(std::move(frame)));
+ }
+
+ queue_.current_frame()->CopyPixelsFrom(
+ updated_src, (src_stride - (kBytesPerPixel * x_offset)),
+ DesktopRect::MakeWH(frame_size_.width(), frame_size_.height()));
+
+ if (spa_video_format_.format == SPA_VIDEO_FORMAT_RGBx ||
+ spa_video_format_.format == SPA_VIDEO_FORMAT_RGBA) {
+ uint8_t* tmp_src = queue_.current_frame()->data();
+ for (int i = 0; i < frame_size_.height(); ++i) {
+ // If both sides decided to go with the RGBx format we need to convert
+ // it to BGRx to match color format expected by WebRTC.
+ ConvertRGBxToBGRx(tmp_src, queue_.current_frame()->stride());
+ tmp_src += queue_.current_frame()->stride();
+ }
+ }
+
+ queue_.current_frame()->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(queue_.current_frame()->size()));
+}
+
+void SharedScreenCastStreamPrivate::ConvertRGBxToBGRx(uint8_t* frame,
+ uint32_t size) {
+ for (uint32_t i = 0; i < size; i += 4) {
+ uint8_t tempR = frame[i];
+ uint8_t tempB = frame[i + 2];
+ frame[i] = tempB;
+ frame[i + 2] = tempR;
+ }
+}
+
+SharedScreenCastStream::SharedScreenCastStream()
+ : private_(std::make_unique<SharedScreenCastStreamPrivate>()) {}
+
+SharedScreenCastStream::~SharedScreenCastStream() {}
+
+rtc::scoped_refptr<SharedScreenCastStream>
+SharedScreenCastStream::CreateDefault() {
+ // Explicit new, to access non-public constructor.
+ return rtc::scoped_refptr(new SharedScreenCastStream());
+}
+
+bool SharedScreenCastStream::StartScreenCastStream(uint32_t stream_node_id) {
+ return private_->StartScreenCastStream(stream_node_id, -1);
+}
+
+bool SharedScreenCastStream::StartScreenCastStream(uint32_t stream_node_id,
+ int fd,
+ uint32_t width,
+ uint32_t height) {
+ return private_->StartScreenCastStream(stream_node_id, fd, width, height);
+}
+
+void SharedScreenCastStream::UpdateScreenCastStreamResolution(uint32_t width,
+ uint32_t height) {
+ private_->UpdateScreenCastStreamResolution(width, height);
+}
+
+void SharedScreenCastStream::StopScreenCastStream() {
+ private_->StopScreenCastStream();
+}
+
+std::unique_ptr<DesktopFrame> SharedScreenCastStream::CaptureFrame() {
+ return private_->CaptureFrame();
+}
+
+std::unique_ptr<MouseCursor> SharedScreenCastStream::CaptureCursor() {
+ return private_->CaptureCursor();
+}
+
+absl::optional<DesktopVector> SharedScreenCastStream::CaptureCursorPosition() {
+ DesktopVector position = private_->CaptureCursorPosition();
+
+ // Consider only (x >= 0 and y >= 0) a valid position
+ if (position.x() < 0 || position.y() < 0) {
+ return absl::nullopt;
+ }
+
+ return position;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.h
new file mode 100644
index 0000000000..66a3f45bdb
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/shared_screencast_stream.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SHARED_SCREENCAST_STREAM_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SHARED_SCREENCAST_STREAM_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/ref_counted_base.h"
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class SharedScreenCastStreamPrivate;
+
+class RTC_EXPORT SharedScreenCastStream
+ : public rtc::RefCountedNonVirtual<SharedScreenCastStream> {
+ public:
+ static rtc::scoped_refptr<SharedScreenCastStream> CreateDefault();
+
+ bool StartScreenCastStream(uint32_t stream_node_id);
+ bool StartScreenCastStream(uint32_t stream_node_id,
+ int fd,
+ uint32_t width = 0,
+ uint32_t height = 0);
+ void UpdateScreenCastStreamResolution(uint32_t width, uint32_t height);
+ void StopScreenCastStream();
+
+ // Below functions return the most recent information we get from a
+ // PipeWire buffer on each Process() callback. This assumes that we
+ // managed to successfuly connect to a PipeWire stream provided by the
+ // compositor (based on stream parameters). The cursor data are obtained
+ // from spa_meta_cursor stream metadata and therefore the cursor is not
+ // part of actual screen/window frame.
+
+ // Returns the most recent screen/window frame we obtained from PipeWire
+ // buffer. Will return an empty frame in case we didn't manage to get a frame
+ // from PipeWire buffer.
+ std::unique_ptr<DesktopFrame> CaptureFrame();
+
+ // Returns the most recent mouse cursor image. Will return an nullptr cursor
+ // in case we didn't manage to get a cursor from PipeWire buffer. NOTE: the
+ // cursor image might not be updated on every cursor location change, but
+ // actually only when its shape changes.
+ std::unique_ptr<MouseCursor> CaptureCursor();
+
+ // Returns the most recent mouse cursor position. Will not return a value in
+ // case we didn't manage to get it from PipeWire buffer.
+ absl::optional<DesktopVector> CaptureCursorPosition();
+
+ ~SharedScreenCastStream();
+
+ protected:
+ SharedScreenCastStream();
+
+ private:
+ SharedScreenCastStream(const SharedScreenCastStream&) = delete;
+ SharedScreenCastStream& operator=(const SharedScreenCastStream&) = delete;
+
+ std::unique_ptr<SharedScreenCastStreamPrivate> private_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_SHARED_SCREENCAST_STREAM_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.cc b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.cc
new file mode 100644
index 0000000000..75dbf2bdf3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h"
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/desktop_capture/linux/wayland/scoped_glib.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace xdg_portal {
+
+std::string RequestResponseToString(RequestResponse request) {
+ switch (request) {
+ case RequestResponse::kUnknown:
+ return "kUnknown";
+ case RequestResponse::kSuccess:
+ return "kSuccess";
+ case RequestResponse::kUserCancelled:
+ return "kUserCancelled";
+ case RequestResponse::kError:
+ return "kError";
+ default:
+ return "Uknown";
+ }
+}
+
+RequestResponse RequestResponseFromPortalResponse(uint32_t portal_response) {
+ // See:
+ // https://docs.flatpak.org/en/latest/portal-api-reference.html#gdbus-signal-org-freedesktop-portal-Request.Response
+ switch (portal_response) {
+ case 0:
+ return RequestResponse::kSuccess;
+ case 1:
+ return RequestResponse::kUserCancelled;
+ case 2:
+ return RequestResponse::kError;
+ default:
+ return RequestResponse::kUnknown;
+ }
+}
+
+std::string PrepareSignalHandle(absl::string_view token,
+ GDBusConnection* connection) {
+ Scoped<char> sender(
+ g_strdup(g_dbus_connection_get_unique_name(connection) + 1));
+ for (int i = 0; sender.get()[i]; ++i) {
+ if (sender.get()[i] == '.') {
+ sender.get()[i] = '_';
+ }
+ }
+ const char* handle =
+ g_strconcat(kDesktopRequestObjectPath, "/", sender.get(), "/",
+ std::string(token).c_str(), /*end of varargs*/ nullptr);
+ return handle;
+}
+
+uint32_t SetupRequestResponseSignal(absl::string_view object_path,
+ const GDBusSignalCallback callback,
+ gpointer user_data,
+ GDBusConnection* connection) {
+ return g_dbus_connection_signal_subscribe(
+ connection, kDesktopBusName, kRequestInterfaceName, "Response",
+ std::string(object_path).c_str(), /*arg0=*/nullptr,
+ G_DBUS_SIGNAL_FLAGS_NO_MATCH_RULE, callback, user_data,
+ /*user_data_free_func=*/nullptr);
+}
+
+void RequestSessionProxy(absl::string_view interface_name,
+ const ProxyRequestCallback proxy_request_callback,
+ GCancellable* cancellable,
+ gpointer user_data) {
+ g_dbus_proxy_new_for_bus(
+ G_BUS_TYPE_SESSION, G_DBUS_PROXY_FLAGS_NONE, /*info=*/nullptr,
+ kDesktopBusName, kDesktopObjectPath, std::string(interface_name).c_str(),
+ cancellable,
+ reinterpret_cast<GAsyncReadyCallback>(proxy_request_callback), user_data);
+}
+
+void SetupSessionRequestHandlers(
+ absl::string_view portal_prefix,
+ const SessionRequestCallback session_request_callback,
+ const SessionRequestResponseSignalHandler request_response_signale_handler,
+ GDBusConnection* connection,
+ GDBusProxy* proxy,
+ GCancellable* cancellable,
+ std::string& portal_handle,
+ guint& session_request_signal_id,
+ gpointer user_data) {
+ GVariantBuilder builder;
+ Scoped<char> variant_string;
+
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+ variant_string =
+ g_strdup_printf("%.*s_session%d", static_cast<int>(portal_prefix.size()),
+ portal_prefix.data(), g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "session_handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ variant_string =
+ g_strdup_printf("%.*s_%d", static_cast<int>(portal_prefix.size()),
+ portal_prefix.data(), g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ portal_handle = PrepareSignalHandle(variant_string.get(), connection);
+ session_request_signal_id = SetupRequestResponseSignal(
+ portal_handle.c_str(), request_response_signale_handler, user_data,
+ connection);
+
+ RTC_LOG(LS_INFO) << "Desktop session requested.";
+ g_dbus_proxy_call(
+ proxy, "CreateSession", g_variant_new("(a{sv})", &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, cancellable,
+ reinterpret_cast<GAsyncReadyCallback>(session_request_callback),
+ user_data);
+}
+
+void StartSessionRequest(
+ absl::string_view prefix,
+ absl::string_view session_handle,
+ const StartRequestResponseSignalHandler signal_handler,
+ const SessionStartRequestedHandler session_started_handler,
+ GDBusProxy* proxy,
+ GDBusConnection* connection,
+ GCancellable* cancellable,
+ guint& start_request_signal_id,
+ std::string& start_handle,
+ gpointer user_data) {
+ GVariantBuilder builder;
+ Scoped<char> variant_string;
+
+ g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
+ variant_string =
+ g_strdup_printf("%.*s%d", static_cast<int>(prefix.size()), prefix.data(),
+ g_random_int_range(0, G_MAXINT));
+ g_variant_builder_add(&builder, "{sv}", "handle_token",
+ g_variant_new_string(variant_string.get()));
+
+ start_handle = PrepareSignalHandle(variant_string.get(), connection);
+ start_request_signal_id = SetupRequestResponseSignal(
+ start_handle.c_str(), signal_handler, user_data, connection);
+
+ // "Identifier for the application window", this is Wayland, so not "x11:...".
+ const char parent_window[] = "";
+
+ RTC_LOG(LS_INFO) << "Starting the portal session.";
+ g_dbus_proxy_call(
+ proxy, "Start",
+ g_variant_new("(osa{sv})", std::string(session_handle).c_str(),
+ parent_window, &builder),
+ G_DBUS_CALL_FLAGS_NONE, /*timeout=*/-1, cancellable,
+ reinterpret_cast<GAsyncReadyCallback>(session_started_handler),
+ user_data);
+}
+
+void TearDownSession(absl::string_view session_handle,
+ GDBusProxy* proxy,
+ GCancellable* cancellable,
+ GDBusConnection* connection) {
+ if (!session_handle.empty()) {
+ Scoped<GDBusMessage> message(g_dbus_message_new_method_call(
+ kDesktopBusName, std::string(session_handle).c_str(),
+ kSessionInterfaceName, "Close"));
+ if (message.get()) {
+ Scoped<GError> error;
+ g_dbus_connection_send_message(connection, message.get(),
+ G_DBUS_SEND_MESSAGE_FLAGS_NONE,
+ /*out_serial=*/nullptr, error.receive());
+ if (error.get()) {
+ RTC_LOG(LS_ERROR) << "Failed to close the session: " << error->message;
+ }
+ }
+ }
+
+ if (cancellable) {
+ g_cancellable_cancel(cancellable);
+ g_object_unref(cancellable);
+ }
+
+ if (proxy) {
+ g_object_unref(proxy);
+ }
+}
+
+} // namespace xdg_portal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h
new file mode 100644
index 0000000000..f6ac92b5d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_desktop_portal_utils.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_XDG_DESKTOP_PORTAL_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_XDG_DESKTOP_PORTAL_UTILS_H_
+
+#include <gio/gio.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "modules/desktop_capture/linux/wayland/portal_request_response.h"
+#include "modules/desktop_capture/linux/wayland/scoped_glib.h"
+#include "modules/desktop_capture/linux/wayland/xdg_session_details.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace xdg_portal {
+
+constexpr char kDesktopBusName[] = "org.freedesktop.portal.Desktop";
+constexpr char kDesktopObjectPath[] = "/org/freedesktop/portal/desktop";
+constexpr char kDesktopRequestObjectPath[] =
+ "/org/freedesktop/portal/desktop/request";
+constexpr char kSessionInterfaceName[] = "org.freedesktop.portal.Session";
+constexpr char kRequestInterfaceName[] = "org.freedesktop.portal.Request";
+constexpr char kScreenCastInterfaceName[] = "org.freedesktop.portal.ScreenCast";
+
+using ProxyRequestCallback = void (*)(GObject*, GAsyncResult*, gpointer);
+using SessionRequestCallback = void (*)(GDBusProxy*, GAsyncResult*, gpointer);
+using SessionRequestResponseSignalHandler = void (*)(GDBusConnection*,
+ const char*,
+ const char*,
+ const char*,
+ const char*,
+ GVariant*,
+ gpointer);
+using StartRequestResponseSignalHandler = void (*)(GDBusConnection*,
+ const char*,
+ const char*,
+ const char*,
+ const char*,
+ GVariant*,
+ gpointer);
+using SessionStartRequestedHandler = void (*)(GDBusProxy*,
+ GAsyncResult*,
+ gpointer);
+
+std::string RequestResponseToString(RequestResponse request);
+
+RequestResponse RequestResponseFromPortalResponse(uint32_t portal_response);
+
+// Returns a string path for signal handle based on the provided connection and
+// token.
+std::string PrepareSignalHandle(absl::string_view token,
+ GDBusConnection* connection);
+
+// Sets up the callback to execute when a response signal is received for the
+// given object.
+uint32_t SetupRequestResponseSignal(absl::string_view object_path,
+ const GDBusSignalCallback callback,
+ gpointer user_data,
+ GDBusConnection* connection);
+
+void RequestSessionProxy(absl::string_view interface_name,
+ const ProxyRequestCallback proxy_request_callback,
+ GCancellable* cancellable,
+ gpointer user_data);
+
+void SetupSessionRequestHandlers(
+ absl::string_view portal_prefix,
+ const SessionRequestCallback session_request_callback,
+ const SessionRequestResponseSignalHandler request_response_signale_handler,
+ GDBusConnection* connection,
+ GDBusProxy* proxy,
+ GCancellable* cancellable,
+ std::string& portal_handle,
+ guint& session_request_signal_id,
+ gpointer user_data);
+
+void StartSessionRequest(
+ absl::string_view prefix,
+ absl::string_view session_handle,
+ const StartRequestResponseSignalHandler signal_handler,
+ const SessionStartRequestedHandler session_started_handler,
+ GDBusProxy* proxy,
+ GDBusConnection* connection,
+ GCancellable* cancellable,
+ guint& start_request_signal_id,
+ std::string& start_handle,
+ gpointer user_data);
+
+// Tears down the portal session and cleans up related objects.
+void TearDownSession(absl::string_view session_handle,
+ GDBusProxy* proxy,
+ GCancellable* cancellable,
+ GDBusConnection* connection);
+
+} // namespace xdg_portal
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_XDG_DESKTOP_PORTAL_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_session_details.h b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_session_details.h
new file mode 100644
index 0000000000..b70ac4aa59
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/wayland/xdg_session_details.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_XDG_SESSION_DETAILS_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_XDG_SESSION_DETAILS_H_
+
+#include <gio/gio.h>
+
+#include <string>
+
+namespace webrtc {
+namespace xdg_portal {
+
+// Details of the session associated with XDG desktop portal session. Portal API
+// calls can be invoked by utilizing the information here.
+struct SessionDetails {
+ GDBusProxy* proxy = nullptr;
+ GCancellable* cancellable = nullptr;
+ std::string session_handle;
+ uint32_t pipewire_stream_node_id = 0;
+};
+
+} // namespace xdg_portal
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_WAYLAND_XDG_SESSION_DETAILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc
new file mode 100644
index 0000000000..12f554df79
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.cc
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.h"
+
+#include <X11/Xlib.h>
+#include <X11/extensions/Xfixes.h>
+#include <X11/extensions/xfixeswire.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/linux/x11/x_error_trap.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace {
+
+// WindowCapturer returns window IDs of X11 windows with WM_STATE attribute.
+// These windows may not be immediate children of the root window, because
+// window managers may re-parent them to add decorations. However,
+// XQueryPointer() expects to be passed children of the root. This function
+// searches up the list of the windows to find the root child that corresponds
+// to `window`.
+Window GetTopLevelWindow(Display* display, Window window) {
+ webrtc::XErrorTrap error_trap(display);
+ while (true) {
+ // If the window is in WithdrawnState then look at all of its children.
+ ::Window root, parent;
+ ::Window* children;
+ unsigned int num_children;
+ if (!XQueryTree(display, window, &root, &parent, &children,
+ &num_children)) {
+ RTC_LOG(LS_ERROR) << "Failed to query for child windows although window"
+ "does not have a valid WM_STATE.";
+ return None;
+ }
+ if (children)
+ XFree(children);
+
+ if (parent == root)
+ break;
+
+ window = parent;
+ }
+
+ return window;
+}
+
+} // namespace
+
+namespace webrtc {
+
+MouseCursorMonitorX11::MouseCursorMonitorX11(
+ const DesktopCaptureOptions& options,
+ Window window)
+ : x_display_(options.x_display()),
+ callback_(NULL),
+ mode_(SHAPE_AND_POSITION),
+ window_(window),
+ have_xfixes_(false),
+ xfixes_event_base_(-1),
+ xfixes_error_base_(-1) {
+ // Set a default initial cursor shape in case XFixes is not present.
+ const int kSize = 5;
+ std::unique_ptr<DesktopFrame> default_cursor(
+ new BasicDesktopFrame(DesktopSize(kSize, kSize)));
+ const uint8_t pixels[kSize * kSize] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ uint8_t* ptr = default_cursor->data();
+ for (int y = 0; y < kSize; ++y) {
+ for (int x = 0; x < kSize; ++x) {
+ *ptr++ = pixels[kSize * y + x];
+ *ptr++ = pixels[kSize * y + x];
+ *ptr++ = pixels[kSize * y + x];
+ *ptr++ = 0xff;
+ }
+ }
+ DesktopVector hotspot(2, 2);
+ cursor_shape_.reset(new MouseCursor(default_cursor.release(), hotspot));
+}
+
+MouseCursorMonitorX11::~MouseCursorMonitorX11() {
+ if (have_xfixes_) {
+ x_display_->RemoveEventHandler(xfixes_event_base_ + XFixesCursorNotify,
+ this);
+ }
+}
+
+void MouseCursorMonitorX11::Init(Callback* callback, Mode mode) {
+ // Init can be called only if not started
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+ mode_ = mode;
+
+ have_xfixes_ =
+ XFixesQueryExtension(display(), &xfixes_event_base_, &xfixes_error_base_);
+
+ if (have_xfixes_) {
+ // Register for changes to the cursor shape.
+ XErrorTrap error_trap(display());
+ XFixesSelectCursorInput(display(), window_, XFixesDisplayCursorNotifyMask);
+ x_display_->AddEventHandler(xfixes_event_base_ + XFixesCursorNotify, this);
+
+ CaptureCursor();
+ } else {
+ RTC_LOG(LS_INFO) << "X server does not support XFixes.";
+ }
+}
+
+void MouseCursorMonitorX11::Capture() {
+ RTC_DCHECK(callback_);
+
+ // Process X11 events in case XFixes has sent cursor notification.
+ x_display_->ProcessPendingXEvents();
+
+ // cursor_shape_| is set only if we were notified of a cursor shape change.
+ if (cursor_shape_.get())
+ callback_->OnMouseCursor(cursor_shape_.release());
+
+ // Get cursor position if necessary.
+ if (mode_ == SHAPE_AND_POSITION) {
+ int root_x;
+ int root_y;
+ int win_x;
+ int win_y;
+ Window root_window;
+ Window child_window;
+ unsigned int mask;
+
+ XErrorTrap error_trap(display());
+ Bool result = XQueryPointer(display(), window_, &root_window, &child_window,
+ &root_x, &root_y, &win_x, &win_y, &mask);
+ CursorState state;
+ if (!result || error_trap.GetLastErrorAndDisable() != 0) {
+ state = OUTSIDE;
+ } else {
+ // In screen mode (window_ == root_window) the mouse is always inside.
+ // XQueryPointer() sets `child_window` to None if the cursor is outside
+ // `window_`.
+ state =
+ (window_ == root_window || child_window != None) ? INSIDE : OUTSIDE;
+ }
+
+ // As the comments to GetTopLevelWindow() above indicate, in window capture,
+ // the cursor position capture happens in `window_`, while the frame catpure
+ // happens in `child_window`. These two windows are not alwyas same, as
+ // window manager may add some decorations to the `window_`. So translate
+ // the coordinate in `window_` to the coordinate space of `child_window`.
+ if (window_ != root_window && state == INSIDE) {
+ int translated_x, translated_y;
+ Window unused;
+ if (XTranslateCoordinates(display(), window_, child_window, win_x, win_y,
+ &translated_x, &translated_y, &unused)) {
+ win_x = translated_x;
+ win_y = translated_y;
+ }
+ }
+
+ // X11 always starts the coordinate from (0, 0), so we do not need to
+ // translate here.
+ callback_->OnMouseCursorPosition(DesktopVector(root_x, root_y));
+ }
+}
+
+bool MouseCursorMonitorX11::HandleXEvent(const XEvent& event) {
+ if (have_xfixes_ && event.type == xfixes_event_base_ + XFixesCursorNotify) {
+ const XFixesCursorNotifyEvent* cursor_event =
+ reinterpret_cast<const XFixesCursorNotifyEvent*>(&event);
+ if (cursor_event->subtype == XFixesDisplayCursorNotify) {
+ CaptureCursor();
+ }
+ // Return false, even if the event has been handled, because there might be
+ // other listeners for cursor notifications.
+ }
+ return false;
+}
+
+void MouseCursorMonitorX11::CaptureCursor() {
+ RTC_DCHECK(have_xfixes_);
+
+ XFixesCursorImage* img;
+ {
+ XErrorTrap error_trap(display());
+ img = XFixesGetCursorImage(display());
+ if (!img || error_trap.GetLastErrorAndDisable() != 0)
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> image(
+ new BasicDesktopFrame(DesktopSize(img->width, img->height)));
+
+ // Xlib stores 32-bit data in longs, even if longs are 64-bits long.
+ unsigned long* src = img->pixels; // NOLINT(runtime/int)
+ uint32_t* dst = reinterpret_cast<uint32_t*>(image->data());
+ uint32_t* dst_end = dst + (img->width * img->height);
+ while (dst < dst_end) {
+ *dst++ = static_cast<uint32_t>(*src++);
+ }
+
+ DesktopVector hotspot(std::min(img->width, img->xhot),
+ std::min(img->height, img->yhot));
+
+ XFree(img);
+
+ cursor_shape_.reset(new MouseCursor(image.release(), hotspot));
+}
+
+// static
+MouseCursorMonitor* MouseCursorMonitorX11::CreateForWindow(
+ const DesktopCaptureOptions& options,
+ WindowId window) {
+ if (!options.x_display())
+ return NULL;
+ window = GetTopLevelWindow(options.x_display()->display(), window);
+ if (window == None)
+ return NULL;
+ return new MouseCursorMonitorX11(options, window);
+}
+
+MouseCursorMonitor* MouseCursorMonitorX11::CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen) {
+ if (!options.x_display())
+ return NULL;
+ WindowId window = DefaultRootWindow(options.x_display()->display());
+ return new MouseCursorMonitorX11(options, window);
+}
+
+std::unique_ptr<MouseCursorMonitor> MouseCursorMonitorX11::Create(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<MouseCursorMonitor>(
+ CreateForScreen(options, kFullDesktopScreenId));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.h
new file mode 100644
index 0000000000..980d254a0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_MOUSE_CURSOR_MONITOR_X11_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_MOUSE_CURSOR_MONITOR_X11_H_
+
+#include <X11/X.h>
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+
+namespace webrtc {
+
+class MouseCursorMonitorX11 : public MouseCursorMonitor,
+ public SharedXDisplay::XEventHandler {
+ public:
+ MouseCursorMonitorX11(const DesktopCaptureOptions& options, Window window);
+ ~MouseCursorMonitorX11() override;
+
+ static MouseCursorMonitor* CreateForWindow(
+ const DesktopCaptureOptions& options,
+ WindowId window);
+ static MouseCursorMonitor* CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen);
+ static std::unique_ptr<MouseCursorMonitor> Create(
+ const DesktopCaptureOptions& options);
+
+ void Init(Callback* callback, Mode mode) override;
+ void Capture() override;
+
+ private:
+ // SharedXDisplay::XEventHandler interface.
+ bool HandleXEvent(const XEvent& event) override;
+
+ Display* display() { return x_display_->display(); }
+
+ // Captures current cursor shape and stores it in `cursor_shape_`.
+ void CaptureCursor();
+
+ rtc::scoped_refptr<SharedXDisplay> x_display_;
+ Callback* callback_;
+ Mode mode_;
+ Window window_;
+
+ bool have_xfixes_;
+ int xfixes_event_base_;
+ int xfixes_error_base_;
+
+ std::unique_ptr<MouseCursor> cursor_shape_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_MOUSE_CURSOR_MONITOR_X11_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc
new file mode 100644
index 0000000000..684838bee5
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.cc
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/screen_capturer_x11.h"
+
+#include <X11/Xlib.h>
+#include <X11/extensions/Xdamage.h>
+#include <X11/extensions/Xfixes.h>
+#include <X11/extensions/damagewire.h>
+#include <dlfcn.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/linux/x11/x_server_pixel_buffer.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/screen_capturer_helper.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/sanitizer.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+ScreenCapturerX11::ScreenCapturerX11() {
+ helper_.SetLogGridSize(4);
+}
+
+ScreenCapturerX11::~ScreenCapturerX11() {
+ options_.x_display()->RemoveEventHandler(ConfigureNotify, this);
+ if (use_damage_) {
+ options_.x_display()->RemoveEventHandler(damage_event_base_ + XDamageNotify,
+ this);
+ }
+ if (use_randr_) {
+ options_.x_display()->RemoveEventHandler(
+ randr_event_base_ + RRScreenChangeNotify, this);
+ }
+ DeinitXlib();
+}
+
+bool ScreenCapturerX11::Init(const DesktopCaptureOptions& options) {
+ TRACE_EVENT0("webrtc", "ScreenCapturerX11::Init");
+ options_ = options;
+
+ atom_cache_ = std::make_unique<XAtomCache>(display());
+
+ root_window_ = RootWindow(display(), DefaultScreen(display()));
+ if (root_window_ == BadValue) {
+ RTC_LOG(LS_ERROR) << "Unable to get the root window";
+ DeinitXlib();
+ return false;
+ }
+
+ gc_ = XCreateGC(display(), root_window_, 0, NULL);
+ if (gc_ == NULL) {
+ RTC_LOG(LS_ERROR) << "Unable to get graphics context";
+ DeinitXlib();
+ return false;
+ }
+
+ options_.x_display()->AddEventHandler(ConfigureNotify, this);
+
+ // Check for XFixes extension. This is required for cursor shape
+ // notifications, and for our use of XDamage.
+ if (XFixesQueryExtension(display(), &xfixes_event_base_,
+ &xfixes_error_base_)) {
+ has_xfixes_ = true;
+ } else {
+ RTC_LOG(LS_INFO) << "X server does not support XFixes.";
+ }
+
+ // Register for changes to the dimensions of the root window.
+ XSelectInput(display(), root_window_, StructureNotifyMask);
+
+ if (!x_server_pixel_buffer_.Init(atom_cache_.get(),
+ DefaultRootWindow(display()))) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize pixel buffer.";
+ return false;
+ }
+
+ if (options_.use_update_notifications()) {
+ InitXDamage();
+ }
+
+ InitXrandr();
+
+ // Default source set here so that selected_monitor_rect_ is sized correctly.
+ SelectSource(kFullDesktopScreenId);
+
+ return true;
+}
+
+void ScreenCapturerX11::InitXDamage() {
+ // Our use of XDamage requires XFixes.
+ if (!has_xfixes_) {
+ return;
+ }
+
+ // Check for XDamage extension.
+ if (!XDamageQueryExtension(display(), &damage_event_base_,
+ &damage_error_base_)) {
+ RTC_LOG(LS_INFO) << "X server does not support XDamage.";
+ return;
+ }
+
+ // TODO(lambroslambrou): Disable DAMAGE in situations where it is known
+ // to fail, such as when Desktop Effects are enabled, with graphics
+ // drivers (nVidia, ATI) that fail to report DAMAGE notifications
+ // properly.
+
+ // Request notifications every time the screen becomes damaged.
+ damage_handle_ =
+ XDamageCreate(display(), root_window_, XDamageReportNonEmpty);
+ if (!damage_handle_) {
+ RTC_LOG(LS_ERROR) << "Unable to initialize XDamage.";
+ return;
+ }
+
+ // Create an XFixes server-side region to collate damage into.
+ damage_region_ = XFixesCreateRegion(display(), 0, 0);
+ if (!damage_region_) {
+ XDamageDestroy(display(), damage_handle_);
+ RTC_LOG(LS_ERROR) << "Unable to create XFixes region.";
+ return;
+ }
+
+ options_.x_display()->AddEventHandler(damage_event_base_ + XDamageNotify,
+ this);
+
+ use_damage_ = true;
+ RTC_LOG(LS_INFO) << "Using XDamage extension.";
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+void ScreenCapturerX11::InitXrandr() {
+ int major_version = 0;
+ int minor_version = 0;
+ int error_base_ignored = 0;
+ if (XRRQueryExtension(display(), &randr_event_base_, &error_base_ignored) &&
+ XRRQueryVersion(display(), &major_version, &minor_version)) {
+ if (major_version > 1 || (major_version == 1 && minor_version >= 5)) {
+ // Dynamically link XRRGetMonitors and XRRFreeMonitors as a workaround
+ // to avoid a dependency issue with Debian 8.
+ get_monitors_ = reinterpret_cast<get_monitors_func>(
+ dlsym(RTLD_DEFAULT, "XRRGetMonitors"));
+ free_monitors_ = reinterpret_cast<free_monitors_func>(
+ dlsym(RTLD_DEFAULT, "XRRFreeMonitors"));
+ if (get_monitors_ && free_monitors_) {
+ use_randr_ = true;
+ RTC_LOG(LS_INFO) << "Using XRandR extension v" << major_version << '.'
+ << minor_version << '.';
+ monitors_ =
+ get_monitors_(display(), root_window_, true, &num_monitors_);
+
+ // Register for screen change notifications
+ XRRSelectInput(display(), root_window_, RRScreenChangeNotifyMask);
+ options_.x_display()->AddEventHandler(
+ randr_event_base_ + RRScreenChangeNotify, this);
+ } else {
+ RTC_LOG(LS_ERROR) << "Unable to link XRandR monitor functions.";
+ }
+ } else {
+ RTC_LOG(LS_ERROR) << "XRandR entension is older than v1.5.";
+ }
+ } else {
+ RTC_LOG(LS_ERROR) << "X server does not support XRandR.";
+ }
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+void ScreenCapturerX11::UpdateMonitors() {
+ if (monitors_) {
+ free_monitors_(monitors_);
+ monitors_ = nullptr;
+ }
+
+ monitors_ = get_monitors_(display(), root_window_, true, &num_monitors_);
+
+ if (selected_monitor_name_) {
+ if (selected_monitor_name_ == static_cast<Atom>(kFullDesktopScreenId)) {
+ selected_monitor_rect_ =
+ DesktopRect::MakeSize(x_server_pixel_buffer_.window_size());
+ return;
+ }
+
+ for (int i = 0; i < num_monitors_; ++i) {
+ XRRMonitorInfo& m = monitors_[i];
+ if (selected_monitor_name_ == m.name) {
+ RTC_LOG(LS_INFO) << "XRandR monitor " << m.name << " rect updated.";
+ selected_monitor_rect_ =
+ DesktopRect::MakeXYWH(m.x, m.y, m.width, m.height);
+ const auto& pixel_buffer_rect = x_server_pixel_buffer_.window_rect();
+ if (!pixel_buffer_rect.ContainsRect(selected_monitor_rect_)) {
+ // This is never expected to happen, but crop the rectangle anyway
+ // just in case the server returns inconsistent information.
+ // CaptureScreen() expects `selected_monitor_rect_` to lie within
+ // the pixel-buffer's rectangle.
+ RTC_LOG(LS_WARNING)
+ << "Cropping selected monitor rect to fit the pixel-buffer.";
+ selected_monitor_rect_.IntersectWith(pixel_buffer_rect);
+ }
+ return;
+ }
+ }
+
+ // The selected monitor is not connected anymore
+ RTC_LOG(LS_INFO) << "XRandR selected monitor " << selected_monitor_name_
+ << " lost.";
+ selected_monitor_rect_ = DesktopRect::MakeWH(0, 0);
+ }
+}
+
+void ScreenCapturerX11::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+}
+
+void ScreenCapturerX11::CaptureFrame() {
+ TRACE_EVENT0("webrtc", "ScreenCapturerX11::CaptureFrame");
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ queue_.MoveToNextFrame();
+ if (queue_.current_frame() && queue_.current_frame()->IsShared()) {
+ RTC_DLOG(LS_WARNING) << "Overwriting frame that is still shared.";
+ }
+
+ // Process XEvents for XDamage and cursor shape tracking.
+ options_.x_display()->ProcessPendingXEvents();
+
+ // ProcessPendingXEvents() may call ScreenConfigurationChanged() which
+ // reinitializes `x_server_pixel_buffer_`. Check if the pixel buffer is still
+ // in a good shape.
+ if (!x_server_pixel_buffer_.is_initialized()) {
+ // We failed to initialize pixel buffer.
+ RTC_LOG(LS_ERROR) << "Pixel buffer is not initialized.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ // Allocate the current frame buffer only if it is not already allocated.
+ // Note that we can't reallocate other buffers at this point, since the caller
+ // may still be reading from them.
+ if (!queue_.current_frame()) {
+ std::unique_ptr<DesktopFrame> frame(
+ new BasicDesktopFrame(selected_monitor_rect_.size()));
+
+ // We set the top-left of the frame so the mouse cursor will be composited
+ // properly, and our frame buffer will not be overrun while blitting.
+ frame->set_top_left(selected_monitor_rect_.top_left());
+ queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(std::move(frame)));
+ }
+
+ std::unique_ptr<DesktopFrame> result = CaptureScreen();
+ if (!result) {
+ RTC_LOG(LS_WARNING) << "Temporarily failed to capture screen.";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ last_invalid_region_ = result->updated_region();
+ result->set_capture_time_ms((rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec);
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(result));
+}
+
+bool ScreenCapturerX11::GetSourceList(SourceList* sources) {
+ RTC_DCHECK(sources->size() == 0);
+ if (!use_randr_) {
+ sources->push_back({});
+ return true;
+ }
+
+ // Ensure that `monitors_` is updated with changes that may have happened
+ // between calls to GetSourceList().
+ options_.x_display()->ProcessPendingXEvents();
+
+ for (int i = 0; i < num_monitors_; ++i) {
+ XRRMonitorInfo& m = monitors_[i];
+ char* monitor_title = XGetAtomName(display(), m.name);
+
+ // Note name is an X11 Atom used to id the monitor.
+ sources->push_back({static_cast<SourceId>(m.name), 0, monitor_title});
+ XFree(monitor_title);
+ }
+
+ return true;
+}
+
+bool ScreenCapturerX11::SelectSource(SourceId id) {
+ // Prevent the reuse of any frame buffers allocated for a previously selected
+ // source. This is required to stop crashes, or old data from appearing in
+ // a captured frame, when the new source is sized differently then the source
+ // that was selected at the time a reused frame buffer was created.
+ queue_.Reset();
+
+ if (!use_randr_ || id == kFullDesktopScreenId) {
+ selected_monitor_name_ = kFullDesktopScreenId;
+ selected_monitor_rect_ =
+ DesktopRect::MakeSize(x_server_pixel_buffer_.window_size());
+ return true;
+ }
+
+ for (int i = 0; i < num_monitors_; ++i) {
+ if (id == static_cast<SourceId>(monitors_[i].name)) {
+ RTC_LOG(LS_INFO) << "XRandR selected source: " << id;
+ XRRMonitorInfo& m = monitors_[i];
+ selected_monitor_name_ = m.name;
+ selected_monitor_rect_ =
+ DesktopRect::MakeXYWH(m.x, m.y, m.width, m.height);
+ const auto& pixel_buffer_rect = x_server_pixel_buffer_.window_rect();
+ if (!pixel_buffer_rect.ContainsRect(selected_monitor_rect_)) {
+ RTC_LOG(LS_WARNING)
+ << "Cropping selected monitor rect to fit the pixel-buffer.";
+ selected_monitor_rect_.IntersectWith(pixel_buffer_rect);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ScreenCapturerX11::HandleXEvent(const XEvent& event) {
+ if (use_damage_ && (event.type == damage_event_base_ + XDamageNotify)) {
+ const XDamageNotifyEvent* damage_event =
+ reinterpret_cast<const XDamageNotifyEvent*>(&event);
+ if (damage_event->damage != damage_handle_)
+ return false;
+ RTC_DCHECK(damage_event->level == XDamageReportNonEmpty);
+ return true;
+ } else if (use_randr_ &&
+ event.type == randr_event_base_ + RRScreenChangeNotify) {
+ XRRUpdateConfiguration(const_cast<XEvent*>(&event));
+ UpdateMonitors();
+ RTC_LOG(LS_INFO) << "XRandR screen change event received.";
+ return false;
+ } else if (event.type == ConfigureNotify) {
+ ScreenConfigurationChanged();
+ return false;
+ }
+ return false;
+}
+
+std::unique_ptr<DesktopFrame> ScreenCapturerX11::CaptureScreen() {
+ std::unique_ptr<SharedDesktopFrame> frame = queue_.current_frame()->Share();
+ RTC_DCHECK(selected_monitor_rect_.size().equals(frame->size()));
+ RTC_DCHECK(selected_monitor_rect_.top_left().equals(frame->top_left()));
+
+ // Pass the screen size to the helper, so it can clip the invalid region if it
+ // expands that region to a grid. Note that the helper operates in the
+ // DesktopFrame coordinate system where the top-left pixel is (0, 0), even for
+ // a monitor with non-zero offset relative to `x_server_pixel_buffer_`.
+ helper_.set_size_most_recent(frame->size());
+
+ // In the DAMAGE case, ensure the frame is up-to-date with the previous frame
+ // if any. If there isn't a previous frame, that means a screen-resolution
+ // change occurred, and `invalid_rects` will be updated to include the whole
+ // screen.
+ if (use_damage_ && queue_.previous_frame())
+ SynchronizeFrame();
+
+ DesktopRegion* updated_region = frame->mutable_updated_region();
+
+ x_server_pixel_buffer_.Synchronize();
+ if (use_damage_ && queue_.previous_frame()) {
+ // Atomically fetch and clear the damage region.
+ XDamageSubtract(display(), damage_handle_, None, damage_region_);
+ int rects_num = 0;
+ XRectangle bounds;
+ XRectangle* rects = XFixesFetchRegionAndBounds(display(), damage_region_,
+ &rects_num, &bounds);
+ for (int i = 0; i < rects_num; ++i) {
+ auto damage_rect = DesktopRect::MakeXYWH(rects[i].x, rects[i].y,
+ rects[i].width, rects[i].height);
+
+ // Damage-regions are relative to `x_server_pixel_buffer`, so convert the
+ // region to DesktopFrame coordinates where the top-left is always (0, 0),
+ // before adding to the frame's updated_region. `helper_` also operates in
+ // DesktopFrame coordinates, and it will take care of cropping away any
+ // damage-regions that lie outside the selected monitor.
+ damage_rect.Translate(-frame->top_left());
+ updated_region->AddRect(damage_rect);
+ }
+ XFree(rects);
+ helper_.InvalidateRegion(*updated_region);
+
+ // Capture the damaged portions of the desktop.
+ helper_.TakeInvalidRegion(updated_region);
+
+ for (DesktopRegion::Iterator it(*updated_region); !it.IsAtEnd();
+ it.Advance()) {
+ auto rect = it.rect();
+ rect.Translate(frame->top_left());
+ if (!x_server_pixel_buffer_.CaptureRect(rect, frame.get()))
+ return nullptr;
+ }
+ } else {
+ // Doing full-screen polling, or this is the first capture after a
+ // screen-resolution change. In either case, need a full-screen capture.
+ if (!x_server_pixel_buffer_.CaptureRect(selected_monitor_rect_,
+ frame.get())) {
+ return nullptr;
+ }
+ updated_region->SetRect(DesktopRect::MakeSize(frame->size()));
+ }
+
+ return std::move(frame);
+}
+
+void ScreenCapturerX11::ScreenConfigurationChanged() {
+ TRACE_EVENT0("webrtc", "ScreenCapturerX11::ScreenConfigurationChanged");
+ // Make sure the frame buffers will be reallocated.
+ queue_.Reset();
+
+ helper_.ClearInvalidRegion();
+ if (!x_server_pixel_buffer_.Init(atom_cache_.get(),
+ DefaultRootWindow(display()))) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize pixel buffer after screen "
+ "configuration change.";
+ }
+
+ if (use_randr_) {
+ // Adding/removing RANDR monitors can generate a ConfigureNotify event
+ // without generating any RRScreenChangeNotify event. So it is important to
+ // update the monitors here even if the screen resolution hasn't changed.
+ UpdateMonitors();
+ } else {
+ selected_monitor_rect_ =
+ DesktopRect::MakeSize(x_server_pixel_buffer_.window_size());
+ }
+}
+
+void ScreenCapturerX11::SynchronizeFrame() {
+ // Synchronize the current buffer with the previous one since we do not
+ // capture the entire desktop. Note that encoder may be reading from the
+ // previous buffer at this time so thread access complaints are false
+ // positives.
+
+ // TODO(hclam): We can reduce the amount of copying here by subtracting
+ // `capturer_helper_`s region from `last_invalid_region_`.
+ // http://crbug.com/92354
+ RTC_DCHECK(queue_.previous_frame());
+
+ DesktopFrame* current = queue_.current_frame();
+ DesktopFrame* last = queue_.previous_frame();
+ RTC_DCHECK(current != last);
+ for (DesktopRegion::Iterator it(last_invalid_region_); !it.IsAtEnd();
+ it.Advance()) {
+ const DesktopRect& r = it.rect();
+ current->CopyPixelsFrom(*last, r.top_left(), r);
+ }
+}
+
+RTC_NO_SANITIZE("cfi-icall")
+void ScreenCapturerX11::DeinitXlib() {
+ if (monitors_) {
+ free_monitors_(monitors_);
+ monitors_ = nullptr;
+ }
+
+ if (gc_) {
+ XFreeGC(display(), gc_);
+ gc_ = nullptr;
+ }
+
+ x_server_pixel_buffer_.Release();
+
+ if (display()) {
+ if (damage_handle_) {
+ XDamageDestroy(display(), damage_handle_);
+ damage_handle_ = 0;
+ }
+
+ if (damage_region_) {
+ XFixesDestroyRegion(display(), damage_region_);
+ damage_region_ = 0;
+ }
+ }
+}
+
+// static
+std::unique_ptr<DesktopCapturer> ScreenCapturerX11::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ if (!options.x_display())
+ return nullptr;
+
+ std::unique_ptr<ScreenCapturerX11> capturer(new ScreenCapturerX11());
+ if (!capturer.get()->Init(options)) {
+ return nullptr;
+ }
+
+ return std::move(capturer);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.h
new file mode 100644
index 0000000000..d2a437aaa2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/screen_capturer_x11.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_SCREEN_CAPTURER_X11_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_SCREEN_CAPTURER_X11_H_
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+#include <X11/extensions/Xdamage.h>
+#include <X11/extensions/Xfixes.h>
+#include <X11/extensions/Xrandr.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#include "modules/desktop_capture/linux/x11/x_atom_cache.h"
+#include "modules/desktop_capture/linux/x11/x_server_pixel_buffer.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/screen_capturer_helper.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+
+namespace webrtc {
+
+// A class to perform video frame capturing for Linux on X11.
+//
+// If XDamage is used, this class sets DesktopFrame::updated_region() according
+// to the areas reported by XDamage. Otherwise this class does not detect
+// DesktopFrame::updated_region(), the field is always set to the entire frame
+// rectangle. ScreenCapturerDifferWrapper should be used if that functionality
+// is necessary.
+class ScreenCapturerX11 : public DesktopCapturer,
+ public SharedXDisplay::XEventHandler {
+ public:
+ ScreenCapturerX11();
+ ~ScreenCapturerX11() override;
+
+ ScreenCapturerX11(const ScreenCapturerX11&) = delete;
+ ScreenCapturerX11& operator=(const ScreenCapturerX11&) = delete;
+
+ static std::unique_ptr<DesktopCapturer> CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options);
+
+ // TODO(ajwong): Do we really want this to be synchronous?
+ bool Init(const DesktopCaptureOptions& options);
+
+ // DesktopCapturer interface.
+ void Start(Callback* delegate) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ Display* display() { return options_.x_display()->display(); }
+
+ // SharedXDisplay::XEventHandler interface.
+ bool HandleXEvent(const XEvent& event) override;
+
+ void InitXDamage();
+ void InitXrandr();
+ void UpdateMonitors();
+
+ // Capture screen pixels to the current buffer in the queue. In the DAMAGE
+ // case, the ScreenCapturerHelper already holds the list of invalid rectangles
+ // from HandleXEvent(). In the non-DAMAGE case, this captures the
+ // whole screen, then calculates some invalid rectangles that include any
+ // differences between this and the previous capture.
+ std::unique_ptr<DesktopFrame> CaptureScreen();
+
+ // Called when the screen configuration is changed.
+ void ScreenConfigurationChanged();
+
+ // Synchronize the current buffer with `last_buffer_`, by copying pixels from
+ // the area of `last_invalid_rects`.
+ // Note this only works on the assumption that kNumBuffers == 2, as
+ // `last_invalid_rects` holds the differences from the previous buffer and
+ // the one prior to that (which will then be the current buffer).
+ void SynchronizeFrame();
+
+ void DeinitXlib();
+
+ DesktopCaptureOptions options_;
+
+ Callback* callback_ = nullptr;
+
+ // X11 graphics context.
+ GC gc_ = nullptr;
+ Window root_window_ = BadValue;
+
+ // XRandR 1.5 monitors.
+ bool use_randr_ = false;
+ int randr_event_base_ = 0;
+ XRRMonitorInfo* monitors_ = nullptr;
+ int num_monitors_ = 0;
+ DesktopRect selected_monitor_rect_;
+ // selected_monitor_name_ will be changed to kFullDesktopScreenId
+ // by a call to SelectSource() at the end of Init() because
+ // selected_monitor_rect_ should be updated as well.
+ // Setting it to kFullDesktopScreenId here might be misleading.
+ Atom selected_monitor_name_ = 0;
+ typedef XRRMonitorInfo* (*get_monitors_func)(Display*, Window, Bool, int*);
+ typedef void (*free_monitors_func)(XRRMonitorInfo*);
+ get_monitors_func get_monitors_ = nullptr;
+ free_monitors_func free_monitors_ = nullptr;
+
+ // XFixes.
+ bool has_xfixes_ = false;
+ int xfixes_event_base_ = -1;
+ int xfixes_error_base_ = -1;
+
+ // XDamage information.
+ bool use_damage_ = false;
+ Damage damage_handle_ = 0;
+ int damage_event_base_ = -1;
+ int damage_error_base_ = -1;
+ XserverRegion damage_region_ = 0;
+
+ // Access to the X Server's pixel buffer.
+ XServerPixelBuffer x_server_pixel_buffer_;
+
+ // A thread-safe list of invalid rectangles, and the size of the most
+ // recently captured screen.
+ ScreenCapturerHelper helper_;
+
+ // Queue of the frames buffers.
+ ScreenCaptureFrameQueue<SharedDesktopFrame> queue_;
+
+ // Invalid region from the previous capture. This is used to synchronize the
+ // current with the last buffer used.
+ DesktopRegion last_invalid_region_;
+
+ std::unique_ptr<XAtomCache> atom_cache_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_SCREEN_CAPTURER_X11_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc
new file mode 100644
index 0000000000..b7849508b0
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+
+#include <X11/Xlib.h>
+#include <X11/extensions/XTest.h>
+
+#include <algorithm>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+SharedXDisplay::SharedXDisplay(Display* display) : display_(display) {
+ RTC_DCHECK(display_);
+}
+
+SharedXDisplay::~SharedXDisplay() {
+ RTC_DCHECK(event_handlers_.empty());
+ XCloseDisplay(display_);
+}
+
+// static
+rtc::scoped_refptr<SharedXDisplay> SharedXDisplay::Create(
+ absl::string_view display_name) {
+ Display* display = XOpenDisplay(
+ display_name.empty() ? NULL : std::string(display_name).c_str());
+ if (!display) {
+ RTC_LOG(LS_ERROR) << "Unable to open display";
+ return nullptr;
+ }
+ return rtc::scoped_refptr<SharedXDisplay>(new SharedXDisplay(display));
+}
+
+// static
+rtc::scoped_refptr<SharedXDisplay> SharedXDisplay::CreateDefault() {
+ return Create(std::string());
+}
+
+void SharedXDisplay::AddEventHandler(int type, XEventHandler* handler) {
+ event_handlers_[type].push_back(handler);
+}
+
+void SharedXDisplay::RemoveEventHandler(int type, XEventHandler* handler) {
+ EventHandlersMap::iterator handlers = event_handlers_.find(type);
+ if (handlers == event_handlers_.end())
+ return;
+
+ std::vector<XEventHandler*>::iterator new_end =
+ std::remove(handlers->second.begin(), handlers->second.end(), handler);
+ handlers->second.erase(new_end, handlers->second.end());
+
+ // Check if no handlers left for this event.
+ if (handlers->second.empty())
+ event_handlers_.erase(handlers);
+}
+
+void SharedXDisplay::ProcessPendingXEvents() {
+ // Hold reference to `this` to prevent it from being destroyed while
+ // processing events.
+ rtc::scoped_refptr<SharedXDisplay> self(this);
+
+ // Find the number of events that are outstanding "now." We don't just loop
+ // on XPending because we want to guarantee this terminates.
+ int events_to_process = XPending(display());
+ XEvent e;
+
+ for (int i = 0; i < events_to_process; i++) {
+ XNextEvent(display(), &e);
+ EventHandlersMap::iterator handlers = event_handlers_.find(e.type);
+ if (handlers == event_handlers_.end())
+ continue;
+ for (std::vector<XEventHandler*>::iterator it = handlers->second.begin();
+ it != handlers->second.end(); ++it) {
+ if ((*it)->HandleXEvent(e))
+ break;
+ }
+ }
+}
+
+void SharedXDisplay::IgnoreXServerGrabs() {
+ int test_event_base = 0;
+ int test_error_base = 0;
+ int major = 0;
+ int minor = 0;
+ if (XTestQueryExtension(display(), &test_event_base, &test_error_base, &major,
+ &minor)) {
+ XTestGrabControl(display(), true);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.h
new file mode 100644
index 0000000000..084da80167
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/shared_x_display.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_SHARED_X_DISPLAY_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_SHARED_X_DISPLAY_H_
+
+#include <map>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/ref_counted_base.h"
+#include "api/scoped_refptr.h"
+#include "rtc_base/system/rtc_export.h"
+
+// Including Xlib.h will involve evil defines (Bool, Status, True, False), which
+// easily conflict with other headers.
+typedef struct _XDisplay Display;
+typedef union _XEvent XEvent;
+
+namespace webrtc {
+
+// A ref-counted object to store XDisplay connection.
+class RTC_EXPORT SharedXDisplay
+ : public rtc::RefCountedNonVirtual<SharedXDisplay> {
+ public:
+ class XEventHandler {
+ public:
+ virtual ~XEventHandler() {}
+
+ // Processes XEvent. Returns true if the event has been handled.
+ virtual bool HandleXEvent(const XEvent& event) = 0;
+ };
+
+ // Creates a new X11 Display for the `display_name`. NULL is returned if X11
+ // connection failed. Equivalent to CreateDefault() when `display_name` is
+ // empty.
+ static rtc::scoped_refptr<SharedXDisplay> Create(
+ absl::string_view display_name);
+
+ // Creates X11 Display connection for the default display (e.g. specified in
+ // DISPLAY). NULL is returned if X11 connection failed.
+ static rtc::scoped_refptr<SharedXDisplay> CreateDefault();
+
+ Display* display() { return display_; }
+
+ // Adds a new event `handler` for XEvent's of `type`.
+ void AddEventHandler(int type, XEventHandler* handler);
+
+ // Removes event `handler` added using `AddEventHandler`. Doesn't do anything
+ // if `handler` is not registered.
+ void RemoveEventHandler(int type, XEventHandler* handler);
+
+ // Processes pending XEvents, calling corresponding event handlers.
+ void ProcessPendingXEvents();
+
+ void IgnoreXServerGrabs();
+
+ ~SharedXDisplay();
+
+ SharedXDisplay(const SharedXDisplay&) = delete;
+ SharedXDisplay& operator=(const SharedXDisplay&) = delete;
+
+ protected:
+ // Takes ownership of `display`.
+ explicit SharedXDisplay(Display* display);
+
+ private:
+ typedef std::map<int, std::vector<XEventHandler*> > EventHandlersMap;
+
+ Display* display_;
+
+ EventHandlersMap event_handlers_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_SHARED_X_DISPLAY_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc
new file mode 100644
index 0000000000..3015a474ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.cc
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/window_capturer_x11.h"
+
+#include <X11/Xutil.h>
+#include <X11/extensions/Xcomposite.h>
+#include <X11/extensions/composite.h>
+#include <string.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#include "modules/desktop_capture/linux/x11/window_finder_x11.h"
+#include "modules/desktop_capture/linux/x11/window_list_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+WindowCapturerX11::WindowCapturerX11(const DesktopCaptureOptions& options)
+ : x_display_(options.x_display()),
+ atom_cache_(display()),
+ window_finder_(&atom_cache_) {
+ int event_base, error_base, major_version, minor_version;
+ if (XCompositeQueryExtension(display(), &event_base, &error_base) &&
+ XCompositeQueryVersion(display(), &major_version, &minor_version) &&
+ // XCompositeNameWindowPixmap() requires version 0.2
+ (major_version > 0 || minor_version >= 2)) {
+ has_composite_extension_ = true;
+ } else {
+ RTC_LOG(LS_INFO) << "Xcomposite extension not available or too old.";
+ }
+
+ x_display_->AddEventHandler(ConfigureNotify, this);
+}
+
+WindowCapturerX11::~WindowCapturerX11() {
+ x_display_->RemoveEventHandler(ConfigureNotify, this);
+}
+
+bool WindowCapturerX11::GetSourceList(SourceList* sources) {
+ return GetWindowList(&atom_cache_, [this, sources](::Window window) {
+ Source w;
+ w.id = window;
+ w.pid = (pid_t)GetWindowProcessID(window);
+ if (this->GetWindowTitle(window, &w.title)) {
+ sources->push_back(w);
+ }
+ return true;
+ });
+}
+
+bool WindowCapturerX11::SelectSource(SourceId id) {
+ if (!x_server_pixel_buffer_.Init(&atom_cache_, id))
+ return false;
+
+ // Tell the X server to send us window resizing events.
+ XSelectInput(display(), id, StructureNotifyMask);
+
+ selected_window_ = id;
+
+ // In addition to needing X11 server-side support for Xcomposite, it actually
+ // needs to be turned on for the window. If the user has modern
+ // hardware/drivers but isn't using a compositing window manager, that won't
+ // be the case. Here we automatically turn it on.
+
+ // Redirect drawing to an offscreen buffer (ie, turn on compositing). X11
+ // remembers who has requested this and will turn it off for us when we exit.
+ XCompositeRedirectWindow(display(), id, CompositeRedirectAutomatic);
+
+ return true;
+}
+
+bool WindowCapturerX11::FocusOnSelectedSource() {
+ if (!selected_window_)
+ return false;
+
+ unsigned int num_children;
+ ::Window* children;
+ ::Window parent;
+ ::Window root;
+ // Find the root window to pass event to.
+ int status = XQueryTree(display(), selected_window_, &root, &parent,
+ &children, &num_children);
+ if (status == 0) {
+ RTC_LOG(LS_ERROR) << "Failed to query for the root window.";
+ return false;
+ }
+
+ if (children)
+ XFree(children);
+
+ XRaiseWindow(display(), selected_window_);
+
+ // Some window managers (e.g., metacity in GNOME) consider it illegal to
+ // raise a window without also giving it input focus with
+ // _NET_ACTIVE_WINDOW, so XRaiseWindow() on its own isn't enough.
+ Atom atom = XInternAtom(display(), "_NET_ACTIVE_WINDOW", True);
+ if (atom != None) {
+ XEvent xev;
+ xev.xclient.type = ClientMessage;
+ xev.xclient.serial = 0;
+ xev.xclient.send_event = True;
+ xev.xclient.window = selected_window_;
+ xev.xclient.message_type = atom;
+
+ // The format member is set to 8, 16, or 32 and specifies whether the
+ // data should be viewed as a list of bytes, shorts, or longs.
+ xev.xclient.format = 32;
+
+ memset(xev.xclient.data.l, 0, sizeof(xev.xclient.data.l));
+
+ XSendEvent(display(), root, False,
+ SubstructureRedirectMask | SubstructureNotifyMask, &xev);
+ }
+ XFlush(display());
+ return true;
+}
+
+void WindowCapturerX11::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+}
+
+void WindowCapturerX11::CaptureFrame() {
+ TRACE_EVENT0("webrtc", "WindowCapturerX11::CaptureFrame");
+ x_display_->ProcessPendingXEvents();
+
+ if (!x_server_pixel_buffer_.IsWindowValid()) {
+ RTC_LOG(LS_ERROR) << "The window is no longer valid.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ if (!has_composite_extension_) {
+ // Without the Xcomposite extension we capture when the whole window is
+ // visible on screen and not covered by any other window. This is not
+ // something we want so instead, just bail out.
+ RTC_LOG(LS_ERROR) << "No Xcomposite extension detected.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ if (GetWindowState(&atom_cache_, selected_window_) == IconicState) {
+ // Window is in minimized. Return a 1x1 frame as same as OSX/Win does.
+ std::unique_ptr<DesktopFrame> frame(
+ new BasicDesktopFrame(DesktopSize(1, 1)));
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> frame(
+ new BasicDesktopFrame(x_server_pixel_buffer_.window_size()));
+
+ x_server_pixel_buffer_.Synchronize();
+ if (!x_server_pixel_buffer_.CaptureRect(DesktopRect::MakeSize(frame->size()),
+ frame.get())) {
+ RTC_LOG(LS_WARNING) << "Temporarily failed to capture winodw.";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+ frame->set_top_left(x_server_pixel_buffer_.window_rect().top_left());
+
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+}
+
+bool WindowCapturerX11::IsOccluded(const DesktopVector& pos) {
+ return window_finder_.GetWindowUnderPoint(pos) !=
+ static_cast<WindowId>(selected_window_);
+}
+
+bool WindowCapturerX11::HandleXEvent(const XEvent& event) {
+ if (event.type == ConfigureNotify) {
+ XConfigureEvent xce = event.xconfigure;
+ if (xce.window == selected_window_) {
+ if (!DesktopRectFromXAttributes(xce).equals(
+ x_server_pixel_buffer_.window_rect())) {
+ if (!x_server_pixel_buffer_.Init(&atom_cache_, selected_window_)) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to initialize pixel buffer after resizing.";
+ }
+ }
+ }
+ }
+
+ // Always returns false, so other observers can still receive the events.
+ return false;
+}
+
+bool WindowCapturerX11::GetWindowTitle(::Window window, std::string* title) {
+ int status;
+ bool result = false;
+ XTextProperty window_name;
+ window_name.value = nullptr;
+ if (window) {
+ status = XGetWMName(display(), window, &window_name);
+ if (status && window_name.value && window_name.nitems) {
+ int cnt;
+ char** list = nullptr;
+ status =
+ Xutf8TextPropertyToTextList(display(), &window_name, &list, &cnt);
+ if (status >= Success && cnt && *list) {
+ if (cnt > 1) {
+ RTC_LOG(LS_INFO) << "Window has " << cnt
+ << " text properties, only using the first one.";
+ }
+ *title = *list;
+ result = true;
+ }
+ if (list)
+ XFreeStringList(list);
+ }
+ if (window_name.value)
+ XFree(window_name.value);
+ }
+ return result;
+}
+
+int WindowCapturerX11::GetWindowProcessID(::Window window) {
+ // Get _NET_WM_PID property of the window.
+ Atom process_atom = XInternAtom(display(), "_NET_WM_PID", True);
+ XWindowProperty<uint32_t> process_id(display(), window, process_atom);
+
+ return process_id.is_valid() ? *process_id.data() : 0;
+}
+
+// static
+std::unique_ptr<DesktopCapturer> WindowCapturerX11::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options) {
+ if (!options.x_display())
+ return nullptr;
+ return std::unique_ptr<DesktopCapturer>(new WindowCapturerX11(options));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.h
new file mode 100644
index 0000000000..cfd29eca66
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_capturer_x11.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_CAPTURER_X11_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_CAPTURER_X11_H_
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+
+#include <memory>
+#include <string>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#include "modules/desktop_capture/linux/x11/x_window_property.h"
+#include "modules/desktop_capture/linux/x11/window_finder_x11.h"
+#include "modules/desktop_capture/linux/x11/x_atom_cache.h"
+#include "modules/desktop_capture/linux/x11/x_server_pixel_buffer.h"
+
+namespace webrtc {
+
+class WindowCapturerX11 : public DesktopCapturer,
+ public SharedXDisplay::XEventHandler {
+ public:
+ explicit WindowCapturerX11(const DesktopCaptureOptions& options);
+ ~WindowCapturerX11() override;
+
+ WindowCapturerX11(const WindowCapturerX11&) = delete;
+ WindowCapturerX11& operator=(const WindowCapturerX11&) = delete;
+
+ static std::unique_ptr<DesktopCapturer> CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options);
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ // SharedXDisplay::XEventHandler interface.
+ bool HandleXEvent(const XEvent& event) override;
+
+ private:
+ Display* display() { return x_display_->display(); }
+
+ // Returns window title for the specified X `window`.
+ bool GetWindowTitle(::Window window, std::string* title);
+
+ // Returns the id of the owning process.
+ int GetWindowProcessID(::Window window);
+
+ Callback* callback_ = nullptr;
+
+ rtc::scoped_refptr<SharedXDisplay> x_display_;
+
+ bool has_composite_extension_ = false;
+
+ ::Window selected_window_ = 0;
+ XServerPixelBuffer x_server_pixel_buffer_;
+ XAtomCache atom_cache_;
+ WindowFinderX11 window_finder_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_CAPTURER_X11_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc
new file mode 100644
index 0000000000..dec17ab51f
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/window_finder_x11.h"
+
+#include <X11/X.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/linux/x11/window_list_utils.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+WindowFinderX11::WindowFinderX11(XAtomCache* cache) : cache_(cache) {
+ RTC_DCHECK(cache_);
+}
+
+WindowFinderX11::~WindowFinderX11() = default;
+
+WindowId WindowFinderX11::GetWindowUnderPoint(DesktopVector point) {
+ WindowId id = kNullWindowId;
+ GetWindowList(cache_, [&id, this, point](::Window window) {
+ DesktopRect rect;
+ if (GetWindowRect(this->cache_->display(), window, &rect) &&
+ rect.Contains(point)) {
+ id = window;
+ return false;
+ }
+ return true;
+ });
+ return id;
+}
+
+// static
+std::unique_ptr<WindowFinder> WindowFinder::Create(
+ const WindowFinder::Options& options) {
+ if (options.cache == nullptr) {
+ return nullptr;
+ }
+
+ return std::make_unique<WindowFinderX11>(options.cache);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.h
new file mode 100644
index 0000000000..91de876417
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_finder_x11.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_FINDER_X11_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_FINDER_X11_H_
+
+#include "modules/desktop_capture/window_finder.h"
+
+namespace webrtc {
+
+class XAtomCache;
+
+// The implementation of WindowFinder for X11.
+class WindowFinderX11 final : public WindowFinder {
+ public:
+ explicit WindowFinderX11(XAtomCache* cache);
+ ~WindowFinderX11() override;
+
+ // WindowFinder implementation.
+ WindowId GetWindowUnderPoint(DesktopVector point) override;
+
+ private:
+ XAtomCache* const cache_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_FINDER_X11_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc
new file mode 100644
index 0000000000..ff2d467e29
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/window_list_utils.h"
+
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "modules/desktop_capture/linux/x11/x_error_trap.h"
+#include "modules/desktop_capture/linux/x11/x_window_property.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+class DeferXFree {
+ public:
+ explicit DeferXFree(void* data) : data_(data) {}
+ ~DeferXFree();
+
+ private:
+ void* const data_;
+};
+
+DeferXFree::~DeferXFree() {
+ if (data_)
+ XFree(data_);
+}
+
+// Iterates through `window` hierarchy to find first visible window, i.e. one
+// that has WM_STATE property set to NormalState.
+// See http://tronche.com/gui/x/icccm/sec-4.html#s-4.1.3.1 .
+::Window GetApplicationWindow(XAtomCache* cache, ::Window window) {
+ int32_t state = GetWindowState(cache, window);
+ if (state == NormalState) {
+ // Window has WM_STATE==NormalState. Return it.
+ return window;
+ } else if (state == IconicState) {
+ // Window is in minimized. Skip it.
+ return 0;
+ }
+
+ RTC_DCHECK_EQ(state, WithdrawnState);
+ // If the window is in WithdrawnState then look at all of its children.
+ ::Window root, parent;
+ ::Window* children;
+ unsigned int num_children;
+ if (!XQueryTree(cache->display(), window, &root, &parent, &children,
+ &num_children)) {
+ RTC_LOG(LS_ERROR) << "Failed to query for child windows although window"
+ "does not have a valid WM_STATE.";
+ return 0;
+ }
+ ::Window app_window = 0;
+ for (unsigned int i = 0; i < num_children; ++i) {
+ app_window = GetApplicationWindow(cache, children[i]);
+ if (app_window)
+ break;
+ }
+
+ if (children)
+ XFree(children);
+ return app_window;
+}
+
+// Returns true if the `window` is a desktop element.
+bool IsDesktopElement(XAtomCache* cache, ::Window window) {
+ RTC_DCHECK(cache);
+ if (window == 0)
+ return false;
+
+ // First look for _NET_WM_WINDOW_TYPE. The standard
+ // (http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#id2760306)
+ // says this hint *should* be present on all windows, and we use the existence
+ // of _NET_WM_WINDOW_TYPE_NORMAL in the property to indicate a window is not
+ // a desktop element (that is, only "normal" windows should be shareable).
+ XWindowProperty<uint32_t> window_type(cache->display(), window,
+ cache->WindowType());
+ if (window_type.is_valid() && window_type.size() > 0) {
+ uint32_t* end = window_type.data() + window_type.size();
+ bool is_normal =
+ (end != std::find(window_type.data(), end, cache->WindowTypeNormal()));
+ return !is_normal;
+ }
+
+ // Fall back on using the hint.
+ XClassHint class_hint;
+ Status status = XGetClassHint(cache->display(), window, &class_hint);
+ if (status == 0) {
+ // No hints, assume this is a normal application window.
+ return false;
+ }
+
+ DeferXFree free_res_name(class_hint.res_name);
+ DeferXFree free_res_class(class_hint.res_class);
+ return strcmp("gnome-panel", class_hint.res_name) == 0 ||
+ strcmp("desktop_window", class_hint.res_name) == 0;
+}
+
+} // namespace
+
+int32_t GetWindowState(XAtomCache* cache, ::Window window) {
+ // Get WM_STATE property of the window.
+ XWindowProperty<uint32_t> window_state(cache->display(), window,
+ cache->WmState());
+
+ // WM_STATE is considered to be set to WithdrawnState when it missing.
+ return window_state.is_valid() ? *window_state.data() : WithdrawnState;
+}
+
+bool GetWindowList(XAtomCache* cache,
+ rtc::FunctionView<bool(::Window)> on_window) {
+ RTC_DCHECK(cache);
+ RTC_DCHECK(on_window);
+ ::Display* const display = cache->display();
+
+ int failed_screens = 0;
+ const int num_screens = XScreenCount(display);
+ for (int screen = 0; screen < num_screens; screen++) {
+ ::Window root_window = XRootWindow(display, screen);
+ ::Window parent;
+ ::Window* children;
+ unsigned int num_children;
+ {
+ XErrorTrap error_trap(display);
+ if (XQueryTree(display, root_window, &root_window, &parent, &children,
+ &num_children) == 0 ||
+ error_trap.GetLastErrorAndDisable() != 0) {
+ failed_screens++;
+ RTC_LOG(LS_ERROR) << "Failed to query for child windows for screen "
+ << screen;
+ continue;
+ }
+ }
+
+ DeferXFree free_children(children);
+
+ for (unsigned int i = 0; i < num_children; i++) {
+ // Iterates in reverse order to return windows from front to back.
+ ::Window app_window =
+ GetApplicationWindow(cache, children[num_children - 1 - i]);
+ if (app_window && !IsDesktopElement(cache, app_window)) {
+ if (!on_window(app_window)) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return failed_screens < num_screens;
+}
+
+bool GetWindowRect(::Display* display,
+ ::Window window,
+ DesktopRect* rect,
+ XWindowAttributes* attributes /* = nullptr */) {
+ XWindowAttributes local_attributes;
+ int offset_x;
+ int offset_y;
+ if (attributes == nullptr) {
+ attributes = &local_attributes;
+ }
+
+ {
+ XErrorTrap error_trap(display);
+ if (!XGetWindowAttributes(display, window, attributes) ||
+ error_trap.GetLastErrorAndDisable() != 0) {
+ return false;
+ }
+ }
+ *rect = DesktopRectFromXAttributes(*attributes);
+
+ {
+ XErrorTrap error_trap(display);
+ ::Window child;
+ if (!XTranslateCoordinates(display, window, attributes->root, -rect->left(),
+ -rect->top(), &offset_x, &offset_y, &child) ||
+ error_trap.GetLastErrorAndDisable() != 0) {
+ return false;
+ }
+ }
+ rect->Translate(offset_x, offset_y);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.h
new file mode 100644
index 0000000000..923842df14
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/window_list_utils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_LIST_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_LIST_UTILS_H_
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+#include <stdint.h>
+
+#include "api/function_view.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/linux/x11/x_atom_cache.h"
+
+namespace webrtc {
+
+// Synchronously iterates all on-screen windows in `cache`.display() in
+// decreasing z-order and sends them one-by-one to `on_window` function before
+// GetWindowList() returns. If `on_window` returns false, this function ignores
+// other windows and returns immediately. GetWindowList() returns false if
+// native APIs failed. If multiple screens are attached to the `display`, this
+// function returns false only when native APIs failed on all screens. Menus,
+// panels and minimized windows will be ignored.
+bool GetWindowList(XAtomCache* cache,
+ rtc::FunctionView<bool(::Window)> on_window);
+
+// Returns WM_STATE property of the `window`. This function returns
+// WithdrawnState if the `window` is missing.
+int32_t GetWindowState(XAtomCache* cache, ::Window window);
+
+// Returns the rectangle of the `window` in the coordinates of `display`. This
+// function returns false if native APIs failed. If `attributes` is provided, it
+// will be filled with the attributes of `window`. The `rect` is in system
+// coordinate, i.e. the primary monitor always starts from (0, 0).
+bool GetWindowRect(::Display* display,
+ ::Window window,
+ DesktopRect* rect,
+ XWindowAttributes* attributes = nullptr);
+
+// Creates a DesktopRect from `attributes`.
+template <typename T>
+DesktopRect DesktopRectFromXAttributes(const T& attributes) {
+ return DesktopRect::MakeXYWH(attributes.x, attributes.y, attributes.width,
+ attributes.height);
+}
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_WINDOW_LIST_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc
new file mode 100644
index 0000000000..157ba8b8fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/x_atom_cache.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+XAtomCache::XAtomCache(::Display* display) : display_(display) {
+ RTC_DCHECK(display_);
+}
+
+XAtomCache::~XAtomCache() = default;
+
+::Display* XAtomCache::display() const {
+ return display_;
+}
+
+Atom XAtomCache::WmState() {
+ return CreateIfNotExist(&wm_state_, "WM_STATE");
+}
+
+Atom XAtomCache::WindowType() {
+ return CreateIfNotExist(&window_type_, "_NET_WM_WINDOW_TYPE");
+}
+
+Atom XAtomCache::WindowTypeNormal() {
+ return CreateIfNotExist(&window_type_normal_, "_NET_WM_WINDOW_TYPE_NORMAL");
+}
+
+Atom XAtomCache::IccProfile() {
+ return CreateIfNotExist(&icc_profile_, "_ICC_PROFILE");
+}
+
+Atom XAtomCache::CreateIfNotExist(Atom* atom, const char* name) {
+ RTC_DCHECK(atom);
+ if (*atom == None) {
+ *atom = XInternAtom(display(), name, True);
+ }
+ return *atom;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.h
new file mode 100644
index 0000000000..39d957e98b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_atom_cache.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_X_ATOM_CACHE_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_X_ATOM_CACHE_H_
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+
+namespace webrtc {
+
+// A cache of Atom. Each Atom object is created on demand.
+class XAtomCache final {
+ public:
+ explicit XAtomCache(::Display* display);
+ ~XAtomCache();
+
+ ::Display* display() const;
+
+ Atom WmState();
+ Atom WindowType();
+ Atom WindowTypeNormal();
+ Atom IccProfile();
+
+ private:
+ // If |*atom| is None, this function uses XInternAtom() to retrieve an Atom.
+ Atom CreateIfNotExist(Atom* atom, const char* name);
+
+ ::Display* const display_;
+ Atom wm_state_ = None;
+ Atom window_type_ = None;
+ Atom window_type_normal_ = None;
+ Atom icc_profile_ = None;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_X_ATOM_CACHE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc
new file mode 100644
index 0000000000..3314dd286c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/x_error_trap.h"
+
+#include <stddef.h>
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+
+
+namespace webrtc {
+
+Bool XErrorTrap::XServerErrorHandler(Display* display, xReply* rep,
+ char* /* buf */, int /* len */,
+ XPointer data) {
+ XErrorTrap* self = reinterpret_cast<XErrorTrap*>(data);
+ if (rep->generic.type != X_Error ||
+ // Overflow-safe last_request_read <= last_ignored_request_ for skipping
+ // async replies from requests before XErrorTrap was created.
+ self->last_ignored_request_ - display->last_request_read <
+ std::numeric_limits<unsigned long>::max() >> 1)
+ return False;
+ self->last_xserver_error_code_ = rep->error.errorCode;
+ return True;
+}
+
+XErrorTrap::XErrorTrap(Display* display)
+ : display_(display),
+ last_xserver_error_code_(0),
+ enabled_(true) {
+ // Use async_handlers instead of XSetErrorHandler(). async_handlers can
+ // remain in place and then be safely removed at the right time even if a
+ // handler change happens concurrently on another thread. async_handlers
+ // are processed first and so can prevent errors reaching the global
+ // XSetErrorHandler handler. They also will not see errors from or affect
+ // handling of errors on other Displays, which may be processed on other
+ // threads.
+ LockDisplay(display);
+ async_handler_.next = display->async_handlers;
+ async_handler_.handler = XServerErrorHandler;
+ async_handler_.data = reinterpret_cast<XPointer>(this);
+ display->async_handlers = &async_handler_;
+ last_ignored_request_ = display->request;
+ UnlockDisplay(display);
+}
+
+int XErrorTrap::GetLastErrorAndDisable() {
+ assert(enabled_);
+ enabled_ = false;
+ LockDisplay(display_);
+ DeqAsyncHandler(display_, &async_handler_);
+ UnlockDisplay(display_);
+ return last_xserver_error_code_;
+}
+
+XErrorTrap::~XErrorTrap() {
+ if (enabled_)
+ GetLastErrorAndDisable();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.h
new file mode 100644
index 0000000000..df7e86bf03
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_error_trap.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_X_ERROR_TRAP_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_X_ERROR_TRAP_H_
+
+#include <X11/Xlibint.h>
+#undef max // Xlibint.h defines this and it breaks std::max
+#undef min // Xlibint.h defines this and it breaks std::min
+
+namespace webrtc {
+
+// Helper class that registers X Window error handler. Caller can use
+// GetLastErrorAndDisable() to get the last error that was caught, if any.
+// An XErrorTrap may be constructed on any thread, but errors are collected
+// from all threads and so |display| should be used only on one thread.
+// Other Displays are unaffected.
+class XErrorTrap {
+ public:
+ explicit XErrorTrap(Display* display);
+ ~XErrorTrap();
+
+ XErrorTrap(const XErrorTrap&) = delete;
+ XErrorTrap& operator=(const XErrorTrap&) = delete;
+
+ // Returns last error and removes unregisters the error handler.
+ // Must not be called more than once.
+ int GetLastErrorAndDisable();
+
+ private:
+ static Bool XServerErrorHandler(Display* display, xReply* rep,
+ char* /* buf */, int /* len */,
+ XPointer data);
+
+ _XAsyncHandler async_handler_;
+ Display* display_;
+ unsigned long last_ignored_request_;
+ int last_xserver_error_code_;
+ bool enabled_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_X_ERROR_TRAP_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc
new file mode 100644
index 0000000000..fd6fc7daf4
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.cc
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/x_server_pixel_buffer.h"
+
+#include <X11/Xutil.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/linux/x11/window_list_utils.h"
+#include "modules/desktop_capture/linux/x11/x_error_trap.h"
+#include "modules/desktop_capture/linux/x11/x_window_property.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// Returns the number of bits `mask` has to be shifted left so its last
+// (most-significant) bit set becomes the most-significant bit of the word.
+// When `mask` is 0 the function returns 31.
+uint32_t MaskToShift(uint32_t mask) {
+ int shift = 0;
+ if ((mask & 0xffff0000u) == 0) {
+ mask <<= 16;
+ shift += 16;
+ }
+ if ((mask & 0xff000000u) == 0) {
+ mask <<= 8;
+ shift += 8;
+ }
+ if ((mask & 0xf0000000u) == 0) {
+ mask <<= 4;
+ shift += 4;
+ }
+ if ((mask & 0xc0000000u) == 0) {
+ mask <<= 2;
+ shift += 2;
+ }
+ if ((mask & 0x80000000u) == 0)
+ shift += 1;
+
+ return shift;
+}
+
+// Returns true if `image` is in RGB format.
+bool IsXImageRGBFormat(XImage* image) {
+ return image->bits_per_pixel == 32 && image->red_mask == 0xff0000 &&
+ image->green_mask == 0xff00 && image->blue_mask == 0xff;
+}
+
+// We expose two forms of blitting to handle variations in the pixel format.
+// In FastBlit(), the operation is effectively a memcpy.
+void FastBlit(XImage* x_image,
+ uint8_t* src_pos,
+ const DesktopRect& rect,
+ DesktopFrame* frame) {
+ RTC_DCHECK_LE(frame->top_left().x(), rect.left());
+ RTC_DCHECK_LE(frame->top_left().y(), rect.top());
+
+ int src_stride = x_image->bytes_per_line;
+ int dst_x = rect.left() - frame->top_left().x();
+ int dst_y = rect.top() - frame->top_left().y();
+
+ uint8_t* dst_pos = frame->data() + frame->stride() * dst_y;
+ dst_pos += dst_x * DesktopFrame::kBytesPerPixel;
+
+ int height = rect.height();
+ int row_bytes = rect.width() * DesktopFrame::kBytesPerPixel;
+ for (int y = 0; y < height; ++y) {
+ memcpy(dst_pos, src_pos, row_bytes);
+ src_pos += src_stride;
+ dst_pos += frame->stride();
+ }
+}
+
+void SlowBlit(XImage* x_image,
+ uint8_t* src_pos,
+ const DesktopRect& rect,
+ DesktopFrame* frame) {
+ RTC_DCHECK_LE(frame->top_left().x(), rect.left());
+ RTC_DCHECK_LE(frame->top_left().y(), rect.top());
+
+ int src_stride = x_image->bytes_per_line;
+ int dst_x = rect.left() - frame->top_left().x();
+ int dst_y = rect.top() - frame->top_left().y();
+ int width = rect.width(), height = rect.height();
+
+ uint32_t red_mask = x_image->red_mask;
+ uint32_t green_mask = x_image->red_mask;
+ uint32_t blue_mask = x_image->blue_mask;
+
+ uint32_t red_shift = MaskToShift(red_mask);
+ uint32_t green_shift = MaskToShift(green_mask);
+ uint32_t blue_shift = MaskToShift(blue_mask);
+
+ int bits_per_pixel = x_image->bits_per_pixel;
+
+ uint8_t* dst_pos = frame->data() + frame->stride() * dst_y;
+ dst_pos += dst_x * DesktopFrame::kBytesPerPixel;
+ // TODO(hclam): Optimize, perhaps using MMX code or by converting to
+ // YUV directly.
+ // TODO(sergeyu): This code doesn't handle XImage byte order properly and
+ // won't work with 24bpp images. Fix it.
+ for (int y = 0; y < height; y++) {
+ uint32_t* dst_pos_32 = reinterpret_cast<uint32_t*>(dst_pos);
+ uint32_t* src_pos_32 = reinterpret_cast<uint32_t*>(src_pos);
+ uint16_t* src_pos_16 = reinterpret_cast<uint16_t*>(src_pos);
+ for (int x = 0; x < width; x++) {
+ // Dereference through an appropriately-aligned pointer.
+ uint32_t pixel;
+ if (bits_per_pixel == 32) {
+ pixel = src_pos_32[x];
+ } else if (bits_per_pixel == 16) {
+ pixel = src_pos_16[x];
+ } else {
+ pixel = src_pos[x];
+ }
+ uint32_t r = (pixel & red_mask) << red_shift;
+ uint32_t g = (pixel & green_mask) << green_shift;
+ uint32_t b = (pixel & blue_mask) << blue_shift;
+ // Write as 32-bit RGB.
+ dst_pos_32[x] =
+ ((r >> 8) & 0xff0000) | ((g >> 16) & 0xff00) | ((b >> 24) & 0xff);
+ }
+ dst_pos += frame->stride();
+ src_pos += src_stride;
+ }
+}
+
+} // namespace
+
+XServerPixelBuffer::XServerPixelBuffer() {}
+
+XServerPixelBuffer::~XServerPixelBuffer() {
+ Release();
+}
+
+void XServerPixelBuffer::Release() {
+ if (x_image_) {
+ XDestroyImage(x_image_);
+ x_image_ = nullptr;
+ }
+ if (x_shm_image_) {
+ XDestroyImage(x_shm_image_);
+ x_shm_image_ = nullptr;
+ }
+ if (shm_pixmap_) {
+ XFreePixmap(display_, shm_pixmap_);
+ shm_pixmap_ = 0;
+ }
+ if (shm_gc_) {
+ XFreeGC(display_, shm_gc_);
+ shm_gc_ = nullptr;
+ }
+
+ ReleaseSharedMemorySegment();
+
+ window_ = 0;
+}
+
+void XServerPixelBuffer::ReleaseSharedMemorySegment() {
+ if (!shm_segment_info_)
+ return;
+ if (shm_segment_info_->shmaddr != nullptr)
+ shmdt(shm_segment_info_->shmaddr);
+ if (shm_segment_info_->shmid != -1)
+ shmctl(shm_segment_info_->shmid, IPC_RMID, 0);
+ delete shm_segment_info_;
+ shm_segment_info_ = nullptr;
+}
+
+bool XServerPixelBuffer::Init(XAtomCache* cache, Window window) {
+ Release();
+ display_ = cache->display();
+
+ XWindowAttributes attributes;
+ if (!GetWindowRect(display_, window, &window_rect_, &attributes)) {
+ return false;
+ }
+
+ if (cache->IccProfile() != None) {
+ // `window` is the root window when doing screen capture.
+ XWindowProperty<uint8_t> icc_profile_property(cache->display(), window,
+ cache->IccProfile());
+ if (icc_profile_property.is_valid() && icc_profile_property.size() > 0) {
+ icc_profile_ = std::vector<uint8_t>(
+ icc_profile_property.data(),
+ icc_profile_property.data() + icc_profile_property.size());
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to get icc profile";
+ }
+ }
+
+ window_ = window;
+ InitShm(attributes);
+
+ return true;
+}
+
+void XServerPixelBuffer::InitShm(const XWindowAttributes& attributes) {
+ Visual* default_visual = attributes.visual;
+ int default_depth = attributes.depth;
+
+ int major, minor;
+ Bool have_pixmaps;
+ if (!XShmQueryVersion(display_, &major, &minor, &have_pixmaps)) {
+ // Shared memory not supported. CaptureRect will use the XImage API instead.
+ return;
+ }
+
+ bool using_shm = false;
+ shm_segment_info_ = new XShmSegmentInfo;
+ shm_segment_info_->shmid = -1;
+ shm_segment_info_->shmaddr = nullptr;
+ shm_segment_info_->readOnly = False;
+ x_shm_image_ = XShmCreateImage(display_, default_visual, default_depth,
+ ZPixmap, 0, shm_segment_info_,
+ window_rect_.width(), window_rect_.height());
+ if (x_shm_image_) {
+ shm_segment_info_->shmid =
+ shmget(IPC_PRIVATE, x_shm_image_->bytes_per_line * x_shm_image_->height,
+ IPC_CREAT | 0600);
+ if (shm_segment_info_->shmid != -1) {
+ void* shmat_result = shmat(shm_segment_info_->shmid, 0, 0);
+ if (shmat_result != reinterpret_cast<void*>(-1)) {
+ shm_segment_info_->shmaddr = reinterpret_cast<char*>(shmat_result);
+ x_shm_image_->data = shm_segment_info_->shmaddr;
+
+ XErrorTrap error_trap(display_);
+ using_shm = XShmAttach(display_, shm_segment_info_);
+ XSync(display_, False);
+ if (error_trap.GetLastErrorAndDisable() != 0)
+ using_shm = false;
+ if (using_shm) {
+ RTC_LOG(LS_VERBOSE)
+ << "Using X shared memory segment " << shm_segment_info_->shmid;
+ }
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to get shared memory segment. "
+ "Performance may be degraded.";
+ }
+ }
+
+ if (!using_shm) {
+ RTC_LOG(LS_WARNING)
+ << "Not using shared memory. Performance may be degraded.";
+ ReleaseSharedMemorySegment();
+ return;
+ }
+
+ if (have_pixmaps)
+ have_pixmaps = InitPixmaps(default_depth);
+
+ shmctl(shm_segment_info_->shmid, IPC_RMID, 0);
+ shm_segment_info_->shmid = -1;
+
+ RTC_LOG(LS_VERBOSE) << "Using X shared memory extension v" << major << "."
+ << minor << " with" << (have_pixmaps ? "" : "out")
+ << " pixmaps.";
+}
+
+bool XServerPixelBuffer::InitPixmaps(int depth) {
+ if (XShmPixmapFormat(display_) != ZPixmap)
+ return false;
+
+ {
+ XErrorTrap error_trap(display_);
+ shm_pixmap_ = XShmCreatePixmap(
+ display_, window_, shm_segment_info_->shmaddr, shm_segment_info_,
+ window_rect_.width(), window_rect_.height(), depth);
+ XSync(display_, False);
+ if (error_trap.GetLastErrorAndDisable() != 0) {
+ // `shm_pixmap_` is not not valid because the request was not processed
+ // by the X Server, so zero it.
+ shm_pixmap_ = 0;
+ return false;
+ }
+ }
+
+ {
+ XErrorTrap error_trap(display_);
+ XGCValues shm_gc_values;
+ shm_gc_values.subwindow_mode = IncludeInferiors;
+ shm_gc_values.graphics_exposures = False;
+ shm_gc_ = XCreateGC(display_, window_,
+ GCSubwindowMode | GCGraphicsExposures, &shm_gc_values);
+ XSync(display_, False);
+ if (error_trap.GetLastErrorAndDisable() != 0) {
+ XFreePixmap(display_, shm_pixmap_);
+ shm_pixmap_ = 0;
+ shm_gc_ = 0; // See shm_pixmap_ comment above.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool XServerPixelBuffer::IsWindowValid() const {
+ XWindowAttributes attributes;
+ {
+ XErrorTrap error_trap(display_);
+ if (!XGetWindowAttributes(display_, window_, &attributes) ||
+ error_trap.GetLastErrorAndDisable() != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void XServerPixelBuffer::Synchronize() {
+ if (shm_segment_info_ && !shm_pixmap_) {
+ // XShmGetImage can fail if the display is being reconfigured.
+ XErrorTrap error_trap(display_);
+ // XShmGetImage fails if the window is partially out of screen.
+ xshm_get_image_succeeded_ =
+ XShmGetImage(display_, window_, x_shm_image_, 0, 0, AllPlanes);
+ }
+}
+
+bool XServerPixelBuffer::CaptureRect(const DesktopRect& rect,
+ DesktopFrame* frame) {
+ RTC_DCHECK_LE(rect.right(), window_rect_.width());
+ RTC_DCHECK_LE(rect.bottom(), window_rect_.height());
+
+ XImage* image;
+ uint8_t* data;
+
+ if (shm_segment_info_ && (shm_pixmap_ || xshm_get_image_succeeded_)) {
+ if (shm_pixmap_) {
+ XCopyArea(display_, window_, shm_pixmap_, shm_gc_, rect.left(),
+ rect.top(), rect.width(), rect.height(), rect.left(),
+ rect.top());
+ XSync(display_, False);
+ }
+
+ image = x_shm_image_;
+ data = reinterpret_cast<uint8_t*>(image->data) +
+ rect.top() * image->bytes_per_line +
+ rect.left() * image->bits_per_pixel / 8;
+
+ } else {
+ if (x_image_)
+ XDestroyImage(x_image_);
+ x_image_ = XGetImage(display_, window_, rect.left(), rect.top(),
+ rect.width(), rect.height(), AllPlanes, ZPixmap);
+ if (!x_image_)
+ return false;
+
+ image = x_image_;
+ data = reinterpret_cast<uint8_t*>(image->data);
+ }
+
+ if (IsXImageRGBFormat(image)) {
+ FastBlit(image, data, rect, frame);
+ } else {
+ SlowBlit(image, data, rect, frame);
+ }
+
+ if (!icc_profile_.empty())
+ frame->set_icc_profile(icc_profile_);
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.h
new file mode 100644
index 0000000000..38af3a3e76
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_server_pixel_buffer.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Don't include this file in any .h files because it pulls in some X headers.
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_X_SERVER_PIXEL_BUFFER_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_X_SERVER_PIXEL_BUFFER_H_
+
+#include <X11/Xutil.h>
+#include <X11/extensions/XShm.h>
+
+#include <memory>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+class DesktopFrame;
+class XAtomCache;
+
+// A class to allow the X server's pixel buffer to be accessed as efficiently
+// as possible.
+class XServerPixelBuffer {
+ public:
+ XServerPixelBuffer();
+ ~XServerPixelBuffer();
+
+ XServerPixelBuffer(const XServerPixelBuffer&) = delete;
+ XServerPixelBuffer& operator=(const XServerPixelBuffer&) = delete;
+
+ void Release();
+
+ // Allocate (or reallocate) the pixel buffer for `window`. Returns false in
+ // case of an error (e.g. window doesn't exist).
+ bool Init(XAtomCache* cache, Window window);
+
+ bool is_initialized() { return window_ != 0; }
+
+ // Returns the size of the window the buffer was initialized for.
+ DesktopSize window_size() { return window_rect_.size(); }
+
+ // Returns the rectangle of the window the buffer was initialized for.
+ const DesktopRect& window_rect() { return window_rect_; }
+
+ // Returns true if the window can be found.
+ bool IsWindowValid() const;
+
+ // If shared memory is being used without pixmaps, synchronize this pixel
+ // buffer with the root window contents (otherwise, this is a no-op).
+ // This is to avoid doing a full-screen capture for each individual
+ // rectangle in the capture list, when it only needs to be done once at the
+ // beginning.
+ void Synchronize();
+
+ // Capture the specified rectangle and stores it in the `frame`. In the case
+ // where the full-screen data is captured by Synchronize(), this simply
+ // returns the pointer without doing any more work. The caller must ensure
+ // that `rect` is not larger than window_size().
+ bool CaptureRect(const DesktopRect& rect, DesktopFrame* frame);
+
+ private:
+ void ReleaseSharedMemorySegment();
+
+ void InitShm(const XWindowAttributes& attributes);
+ bool InitPixmaps(int depth);
+
+ Display* display_ = nullptr;
+ Window window_ = 0;
+ DesktopRect window_rect_;
+ XImage* x_image_ = nullptr;
+ XShmSegmentInfo* shm_segment_info_ = nullptr;
+ XImage* x_shm_image_ = nullptr;
+ Pixmap shm_pixmap_ = 0;
+ GC shm_gc_ = nullptr;
+ bool xshm_get_image_succeeded_ = false;
+ std::vector<uint8_t> icc_profile_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_X_SERVER_PIXEL_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc
new file mode 100644
index 0000000000..5e16dac404
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/linux/x11/x_window_property.h"
+
+namespace webrtc {
+
+XWindowPropertyBase::XWindowPropertyBase(Display* display,
+ Window window,
+ Atom property,
+ int expected_size) {
+ const int kBitsPerByte = 8;
+ Atom actual_type;
+ int actual_format;
+ unsigned long bytes_after; // NOLINT: type required by XGetWindowProperty
+ int status = XGetWindowProperty(display, window, property, 0L, ~0L, False,
+ AnyPropertyType, &actual_type, &actual_format,
+ &size_, &bytes_after, &data_);
+ if (status != Success) {
+ data_ = nullptr;
+ return;
+ }
+ if ((expected_size * kBitsPerByte) != actual_format) {
+ size_ = 0;
+ return;
+ }
+
+ is_valid_ = true;
+}
+
+XWindowPropertyBase::~XWindowPropertyBase() {
+ if (data_)
+ XFree(data_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.h b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.h
new file mode 100644
index 0000000000..28dfb97311
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/linux/x11/x_window_property.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_LINUX_X11_X_WINDOW_PROPERTY_H_
+#define MODULES_DESKTOP_CAPTURE_LINUX_X11_X_WINDOW_PROPERTY_H_
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+
+namespace webrtc {
+
+class XWindowPropertyBase {
+ public:
+ XWindowPropertyBase(Display* display,
+ Window window,
+ Atom property,
+ int expected_size);
+ virtual ~XWindowPropertyBase();
+
+ XWindowPropertyBase(const XWindowPropertyBase&) = delete;
+ XWindowPropertyBase& operator=(const XWindowPropertyBase&) = delete;
+
+ // True if we got properly value successfully.
+ bool is_valid() const { return is_valid_; }
+
+ // Size and value of the property.
+ size_t size() const { return size_; }
+
+ protected:
+ unsigned char* data_ = nullptr;
+
+ private:
+ bool is_valid_ = false;
+ unsigned long size_ = 0; // NOLINT: type required by XGetWindowProperty
+};
+
+// Convenience wrapper for XGetWindowProperty() results.
+template <class PropertyType>
+class XWindowProperty : public XWindowPropertyBase {
+ public:
+ XWindowProperty(Display* display, const Window window, const Atom property)
+ : XWindowPropertyBase(display, window, property, sizeof(PropertyType)) {}
+ ~XWindowProperty() override = default;
+
+ XWindowProperty(const XWindowProperty&) = delete;
+ XWindowProperty& operator=(const XWindowProperty&) = delete;
+
+ const PropertyType* data() const {
+ return reinterpret_cast<PropertyType*>(data_);
+ }
+ PropertyType* data() { return reinterpret_cast<PropertyType*>(data_); }
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_LINUX_X11_X_WINDOW_PROPERTY_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.h b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.h
new file mode 100644
index 0000000000..2ad5474e44
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_CONFIGURATION_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_CONFIGURATION_H_
+
+#include <ApplicationServices/ApplicationServices.h>
+
+#include <vector>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Describes the configuration of a specific display.
+struct MacDisplayConfiguration {
+ MacDisplayConfiguration();
+ MacDisplayConfiguration(const MacDisplayConfiguration& other);
+ MacDisplayConfiguration(MacDisplayConfiguration&& other);
+ ~MacDisplayConfiguration();
+
+ MacDisplayConfiguration& operator=(const MacDisplayConfiguration& other);
+ MacDisplayConfiguration& operator=(MacDisplayConfiguration&& other);
+
+ // Cocoa identifier for this display.
+ CGDirectDisplayID id = 0;
+
+ // Bounds of this display in Density-Independent Pixels (DIPs).
+ DesktopRect bounds;
+
+ // Bounds of this display in physical pixels.
+ DesktopRect pixel_bounds;
+
+ // Scale factor from DIPs to physical pixels.
+ float dip_to_pixel_scale = 1.0f;
+
+ // Display type, built-in or external.
+ bool is_builtin;
+};
+
+typedef std::vector<MacDisplayConfiguration> MacDisplayConfigurations;
+
+// Describes the configuration of the whole desktop.
+struct RTC_EXPORT MacDesktopConfiguration {
+ // Used to request bottom-up or top-down coordinates.
+ enum Origin { BottomLeftOrigin, TopLeftOrigin };
+
+ MacDesktopConfiguration();
+ MacDesktopConfiguration(const MacDesktopConfiguration& other);
+ MacDesktopConfiguration(MacDesktopConfiguration&& other);
+ ~MacDesktopConfiguration();
+
+ MacDesktopConfiguration& operator=(const MacDesktopConfiguration& other);
+ MacDesktopConfiguration& operator=(MacDesktopConfiguration&& other);
+
+ // Returns the desktop & display configurations.
+ // If BottomLeftOrigin is used, the output is in Cocoa-style "bottom-up"
+ // (the origin is the bottom-left of the primary monitor, and coordinates
+ // increase as you move up the screen). Otherwise, the configuration will be
+ // converted to follow top-left coordinate system as Windows and X11.
+ static MacDesktopConfiguration GetCurrent(Origin origin);
+
+ // Returns true if the given desktop configuration equals this one.
+ bool Equals(const MacDesktopConfiguration& other);
+
+ // If `id` corresponds to the built-in display, return its configuration,
+ // otherwise return the configuration for the display with the specified id,
+ // or nullptr if no such display exists.
+ const MacDisplayConfiguration* FindDisplayConfigurationById(
+ CGDirectDisplayID id);
+
+ // Bounds of the desktop excluding monitors with DPI settings different from
+ // the main monitor. In Density-Independent Pixels (DIPs).
+ DesktopRect bounds;
+
+ // Same as bounds, but expressed in physical pixels.
+ DesktopRect pixel_bounds;
+
+ // Scale factor from DIPs to physical pixels.
+ float dip_to_pixel_scale = 1.0f;
+
+ // Configurations of the displays making up the desktop area.
+ MacDisplayConfigurations displays;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_CONFIGURATION_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.mm b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.mm
new file mode 100644
index 0000000000..3b888c37c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration.mm
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+
+#include <math.h>
+#include <algorithm>
+#include <Cocoa/Cocoa.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+DesktopRect NSRectToDesktopRect(const NSRect& ns_rect) {
+ return DesktopRect::MakeLTRB(
+ static_cast<int>(floor(ns_rect.origin.x)),
+ static_cast<int>(floor(ns_rect.origin.y)),
+ static_cast<int>(ceil(ns_rect.origin.x + ns_rect.size.width)),
+ static_cast<int>(ceil(ns_rect.origin.y + ns_rect.size.height)));
+}
+
+// Inverts the position of `rect` from bottom-up coordinates to top-down,
+// relative to `bounds`.
+void InvertRectYOrigin(const DesktopRect& bounds,
+ DesktopRect* rect) {
+ RTC_DCHECK_EQ(bounds.top(), 0);
+ *rect = DesktopRect::MakeXYWH(
+ rect->left(), bounds.bottom() - rect->bottom(),
+ rect->width(), rect->height());
+}
+
+MacDisplayConfiguration GetConfigurationForScreen(NSScreen* screen) {
+ MacDisplayConfiguration display_config;
+
+ // Fetch the NSScreenNumber, which is also the CGDirectDisplayID.
+ NSDictionary* device_description = [screen deviceDescription];
+ display_config.id = static_cast<CGDirectDisplayID>(
+ [[device_description objectForKey:@"NSScreenNumber"] intValue]);
+
+ // Determine the display's logical & physical dimensions.
+ NSRect ns_bounds = [screen frame];
+ display_config.bounds = NSRectToDesktopRect(ns_bounds);
+
+ // If the host is running Mac OS X 10.7+ or later, query the scaling factor
+ // between logical and physical (aka "backing") pixels, otherwise assume 1:1.
+ if ([screen respondsToSelector:@selector(backingScaleFactor)] &&
+ [screen respondsToSelector:@selector(convertRectToBacking:)]) {
+ display_config.dip_to_pixel_scale = [screen backingScaleFactor];
+ NSRect ns_pixel_bounds = [screen convertRectToBacking: ns_bounds];
+ display_config.pixel_bounds = NSRectToDesktopRect(ns_pixel_bounds);
+ } else {
+ display_config.pixel_bounds = display_config.bounds;
+ }
+
+ // Determine if the display is built-in or external.
+ display_config.is_builtin = CGDisplayIsBuiltin(display_config.id);
+
+ return display_config;
+}
+
+} // namespace
+
+MacDisplayConfiguration::MacDisplayConfiguration() = default;
+MacDisplayConfiguration::MacDisplayConfiguration(
+ const MacDisplayConfiguration& other) = default;
+MacDisplayConfiguration::MacDisplayConfiguration(
+ MacDisplayConfiguration&& other) = default;
+MacDisplayConfiguration::~MacDisplayConfiguration() = default;
+
+MacDisplayConfiguration& MacDisplayConfiguration::operator=(
+ const MacDisplayConfiguration& other) = default;
+MacDisplayConfiguration& MacDisplayConfiguration::operator=(
+ MacDisplayConfiguration&& other) = default;
+
+MacDesktopConfiguration::MacDesktopConfiguration() = default;
+MacDesktopConfiguration::MacDesktopConfiguration(
+ const MacDesktopConfiguration& other) = default;
+MacDesktopConfiguration::MacDesktopConfiguration(
+ MacDesktopConfiguration&& other) = default;
+MacDesktopConfiguration::~MacDesktopConfiguration() = default;
+
+MacDesktopConfiguration& MacDesktopConfiguration::operator=(
+ const MacDesktopConfiguration& other) = default;
+MacDesktopConfiguration& MacDesktopConfiguration::operator=(
+ MacDesktopConfiguration&& other) = default;
+
+// static
+MacDesktopConfiguration MacDesktopConfiguration::GetCurrent(Origin origin) {
+ MacDesktopConfiguration desktop_config;
+
+ NSArray* screens = [NSScreen screens];
+ RTC_DCHECK(screens);
+
+ // Iterator over the monitors, adding the primary monitor and monitors whose
+ // DPI match that of the primary monitor.
+ for (NSUInteger i = 0; i < [screens count]; ++i) {
+ MacDisplayConfiguration display_config =
+ GetConfigurationForScreen([screens objectAtIndex: i]);
+
+ if (i == 0)
+ desktop_config.dip_to_pixel_scale = display_config.dip_to_pixel_scale;
+
+ // Cocoa uses bottom-up coordinates, so if the caller wants top-down then
+ // we need to invert the positions of secondary monitors relative to the
+ // primary one (the primary monitor's position is (0,0) in both systems).
+ if (i > 0 && origin == TopLeftOrigin) {
+ InvertRectYOrigin(desktop_config.displays[0].bounds,
+ &display_config.bounds);
+ // `display_bounds` is density dependent, so we need to convert the
+ // primay monitor's position into the secondary monitor's density context.
+ float scaling_factor = display_config.dip_to_pixel_scale /
+ desktop_config.displays[0].dip_to_pixel_scale;
+ DesktopRect primary_bounds = DesktopRect::MakeLTRB(
+ desktop_config.displays[0].pixel_bounds.left() * scaling_factor,
+ desktop_config.displays[0].pixel_bounds.top() * scaling_factor,
+ desktop_config.displays[0].pixel_bounds.right() * scaling_factor,
+ desktop_config.displays[0].pixel_bounds.bottom() * scaling_factor);
+ InvertRectYOrigin(primary_bounds, &display_config.pixel_bounds);
+ }
+
+ // Add the display to the configuration.
+ desktop_config.displays.push_back(display_config);
+
+ // Update the desktop bounds to account for this display, unless the current
+ // display uses different DPI settings.
+ if (display_config.dip_to_pixel_scale ==
+ desktop_config.dip_to_pixel_scale) {
+ desktop_config.bounds.UnionWith(display_config.bounds);
+ desktop_config.pixel_bounds.UnionWith(display_config.pixel_bounds);
+ }
+ }
+
+ return desktop_config;
+}
+
+// For convenience of comparing MacDisplayConfigurations in
+// MacDesktopConfiguration::Equals.
+bool operator==(const MacDisplayConfiguration& left,
+ const MacDisplayConfiguration& right) {
+ return left.id == right.id &&
+ left.bounds.equals(right.bounds) &&
+ left.pixel_bounds.equals(right.pixel_bounds) &&
+ left.dip_to_pixel_scale == right.dip_to_pixel_scale;
+}
+
+bool MacDesktopConfiguration::Equals(const MacDesktopConfiguration& other) {
+ return bounds.equals(other.bounds) &&
+ pixel_bounds.equals(other.pixel_bounds) &&
+ dip_to_pixel_scale == other.dip_to_pixel_scale &&
+ displays == other.displays;
+}
+
+const MacDisplayConfiguration*
+MacDesktopConfiguration::FindDisplayConfigurationById(
+ CGDirectDisplayID id) {
+ bool is_builtin = CGDisplayIsBuiltin(id);
+ for (MacDisplayConfigurations::const_iterator it = displays.begin();
+ it != displays.end(); ++it) {
+ // The MBP having both discrete and integrated graphic cards will do
+ // automate graphics switching by default. When it switches from discrete to
+ // integrated one, the current display ID of the built-in display will
+ // change and this will cause screen capture stops.
+ // So make screen capture of built-in display continuing even if its display
+ // ID is changed.
+ if ((is_builtin && it->is_builtin) || (!is_builtin && it->id == id)) return &(*it);
+ }
+ return NULL;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.cc b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.cc
new file mode 100644
index 0000000000..048a679ecc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+DesktopConfigurationMonitor::DesktopConfigurationMonitor() {
+ CGError err = CGDisplayRegisterReconfigurationCallback(
+ DesktopConfigurationMonitor::DisplaysReconfiguredCallback, this);
+ if (err != kCGErrorSuccess)
+ RTC_LOG(LS_ERROR) << "CGDisplayRegisterReconfigurationCallback " << err;
+ MutexLock lock(&desktop_configuration_lock_);
+ desktop_configuration_ = MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::TopLeftOrigin);
+}
+
+DesktopConfigurationMonitor::~DesktopConfigurationMonitor() {
+ CGError err = CGDisplayRemoveReconfigurationCallback(
+ DesktopConfigurationMonitor::DisplaysReconfiguredCallback, this);
+ if (err != kCGErrorSuccess)
+ RTC_LOG(LS_ERROR) << "CGDisplayRemoveReconfigurationCallback " << err;
+}
+
+MacDesktopConfiguration DesktopConfigurationMonitor::desktop_configuration() {
+ MutexLock lock(&desktop_configuration_lock_);
+ return desktop_configuration_;
+}
+
+// static
+// This method may be called on any system thread.
+void DesktopConfigurationMonitor::DisplaysReconfiguredCallback(
+ CGDirectDisplayID display,
+ CGDisplayChangeSummaryFlags flags,
+ void* user_parameter) {
+ DesktopConfigurationMonitor* monitor =
+ reinterpret_cast<DesktopConfigurationMonitor*>(user_parameter);
+ monitor->DisplaysReconfigured(display, flags);
+}
+
+void DesktopConfigurationMonitor::DisplaysReconfigured(
+ CGDirectDisplayID display,
+ CGDisplayChangeSummaryFlags flags) {
+ TRACE_EVENT0("webrtc", "DesktopConfigurationMonitor::DisplaysReconfigured");
+ RTC_LOG(LS_INFO) << "DisplaysReconfigured: "
+ "DisplayID "
+ << display << "; ChangeSummaryFlags " << flags;
+
+ if (flags & kCGDisplayBeginConfigurationFlag) {
+ reconfiguring_displays_.insert(display);
+ return;
+ }
+
+ reconfiguring_displays_.erase(display);
+ if (reconfiguring_displays_.empty()) {
+ MutexLock lock(&desktop_configuration_lock_);
+ desktop_configuration_ = MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::TopLeftOrigin);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h
new file mode 100644
index 0000000000..747295a538
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_CONFIGURATION_MONITOR_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_CONFIGURATION_MONITOR_H_
+
+#include <ApplicationServices/ApplicationServices.h>
+
+#include <memory>
+#include <set>
+
+#include "api/ref_counted_base.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+// The class provides functions to synchronize capturing and display
+// reconfiguring across threads, and the up-to-date MacDesktopConfiguration.
+class DesktopConfigurationMonitor final
+ : public rtc::RefCountedNonVirtual<DesktopConfigurationMonitor> {
+ public:
+ DesktopConfigurationMonitor();
+ ~DesktopConfigurationMonitor();
+
+ DesktopConfigurationMonitor(const DesktopConfigurationMonitor&) = delete;
+ DesktopConfigurationMonitor& operator=(const DesktopConfigurationMonitor&) =
+ delete;
+
+ // Returns the current desktop configuration.
+ MacDesktopConfiguration desktop_configuration();
+
+ private:
+ static void DisplaysReconfiguredCallback(CGDirectDisplayID display,
+ CGDisplayChangeSummaryFlags flags,
+ void* user_parameter);
+ void DisplaysReconfigured(CGDirectDisplayID display,
+ CGDisplayChangeSummaryFlags flags);
+
+ Mutex desktop_configuration_lock_;
+ MacDesktopConfiguration desktop_configuration_
+ RTC_GUARDED_BY(&desktop_configuration_lock_);
+ std::set<CGDirectDisplayID> reconfiguring_displays_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_CONFIGURATION_MONITOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.h b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.h
new file mode 100644
index 0000000000..d6279f9b36
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_CGIMAGE_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_CGIMAGE_H_
+
+#include <CoreGraphics/CoreGraphics.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "sdk/objc/helpers/scoped_cftyperef.h"
+
+namespace webrtc {
+
+class DesktopFrameCGImage final : public DesktopFrame {
+ public:
+ // Create an image containing a snapshot of the display at the time this is
+ // being called.
+ static std::unique_ptr<DesktopFrameCGImage> CreateForDisplay(
+ CGDirectDisplayID display_id);
+
+ // Create an image containing a snaphot of the given window at the time this
+ // is being called. This also works when the window is overlapped or in
+ // another workspace.
+ static std::unique_ptr<DesktopFrameCGImage> CreateForWindow(
+ CGWindowID window_id);
+
+ ~DesktopFrameCGImage() override;
+
+ DesktopFrameCGImage(const DesktopFrameCGImage&) = delete;
+ DesktopFrameCGImage& operator=(const DesktopFrameCGImage&) = delete;
+
+ private:
+ static std::unique_ptr<DesktopFrameCGImage> CreateFromCGImage(
+ rtc::ScopedCFTypeRef<CGImageRef> cg_image);
+
+ // This constructor expects `cg_image` to hold a non-null CGImageRef.
+ DesktopFrameCGImage(DesktopSize size,
+ int stride,
+ uint8_t* data,
+ rtc::ScopedCFTypeRef<CGImageRef> cg_image,
+ rtc::ScopedCFTypeRef<CFDataRef> cg_data);
+
+ const rtc::ScopedCFTypeRef<CGImageRef> cg_image_;
+ const rtc::ScopedCFTypeRef<CFDataRef> cg_data_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_CGIMAGE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.mm b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.mm
new file mode 100644
index 0000000000..0fb69b272d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_cgimage.mm
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/desktop_frame_cgimage.h"
+
+#include <AvailabilityMacros.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopFrameCGImage> DesktopFrameCGImage::CreateForDisplay(
+ CGDirectDisplayID display_id) {
+ // Create an image containing a snapshot of the display.
+ rtc::ScopedCFTypeRef<CGImageRef> cg_image(CGDisplayCreateImage(display_id));
+ if (!cg_image) {
+ return nullptr;
+ }
+
+ return DesktopFrameCGImage::CreateFromCGImage(cg_image);
+}
+
+// static
+std::unique_ptr<DesktopFrameCGImage> DesktopFrameCGImage::CreateForWindow(CGWindowID window_id) {
+ rtc::ScopedCFTypeRef<CGImageRef> cg_image(
+ CGWindowListCreateImage(CGRectNull,
+ kCGWindowListOptionIncludingWindow,
+ window_id,
+ kCGWindowImageBoundsIgnoreFraming));
+ if (!cg_image) {
+ return nullptr;
+ }
+
+ return DesktopFrameCGImage::CreateFromCGImage(cg_image);
+}
+
+// static
+std::unique_ptr<DesktopFrameCGImage> DesktopFrameCGImage::CreateFromCGImage(
+ rtc::ScopedCFTypeRef<CGImageRef> cg_image) {
+ // Verify that the image has 32-bit depth.
+ int bits_per_pixel = CGImageGetBitsPerPixel(cg_image.get());
+ if (bits_per_pixel / 8 != DesktopFrame::kBytesPerPixel) {
+ RTC_LOG(LS_ERROR) << "CGDisplayCreateImage() returned imaged with " << bits_per_pixel
+ << " bits per pixel. Only 32-bit depth is supported.";
+ return nullptr;
+ }
+
+ // Request access to the raw pixel data via the image's DataProvider.
+ CGDataProviderRef cg_provider = CGImageGetDataProvider(cg_image.get());
+ RTC_DCHECK(cg_provider);
+
+ // CGDataProviderCopyData returns a new data object containing a copy of the provider’s
+ // data.
+ rtc::ScopedCFTypeRef<CFDataRef> cg_data(CGDataProviderCopyData(cg_provider));
+ RTC_DCHECK(cg_data);
+
+ // CFDataGetBytePtr returns a read-only pointer to the bytes of a CFData object.
+ uint8_t* data = const_cast<uint8_t*>(CFDataGetBytePtr(cg_data.get()));
+ RTC_DCHECK(data);
+
+ DesktopSize size(CGImageGetWidth(cg_image.get()), CGImageGetHeight(cg_image.get()));
+ int stride = CGImageGetBytesPerRow(cg_image.get());
+
+ std::unique_ptr<DesktopFrameCGImage> frame(
+ new DesktopFrameCGImage(size, stride, data, cg_image, cg_data));
+
+ CGColorSpaceRef cg_color_space = CGImageGetColorSpace(cg_image.get());
+ if (cg_color_space) {
+#if !defined(MAC_OS_X_VERSION_10_13) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_13
+ rtc::ScopedCFTypeRef<CFDataRef> cf_icc_profile(CGColorSpaceCopyICCProfile(cg_color_space));
+#else
+ rtc::ScopedCFTypeRef<CFDataRef> cf_icc_profile(CGColorSpaceCopyICCData(cg_color_space));
+#endif
+ if (cf_icc_profile) {
+ const uint8_t* data_as_byte =
+ reinterpret_cast<const uint8_t*>(CFDataGetBytePtr(cf_icc_profile.get()));
+ const size_t data_size = CFDataGetLength(cf_icc_profile.get());
+ if (data_as_byte && data_size > 0) {
+ frame->set_icc_profile(std::vector<uint8_t>(data_as_byte, data_as_byte + data_size));
+ }
+ }
+ }
+
+ return frame;
+}
+
+DesktopFrameCGImage::DesktopFrameCGImage(DesktopSize size,
+ int stride,
+ uint8_t* data,
+ rtc::ScopedCFTypeRef<CGImageRef> cg_image,
+ rtc::ScopedCFTypeRef<CFDataRef> cg_data)
+ : DesktopFrame(size, stride, data, nullptr), cg_image_(cg_image), cg_data_(cg_data) {
+ RTC_DCHECK(cg_image_);
+ RTC_DCHECK(cg_data_);
+}
+
+DesktopFrameCGImage::~DesktopFrameCGImage() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.h b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.h
new file mode 100644
index 0000000000..73da0f693c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_IOSURFACE_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_IOSURFACE_H_
+
+#include <CoreGraphics/CoreGraphics.h>
+#include <IOSurface/IOSurface.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "sdk/objc/helpers/scoped_cftyperef.h"
+
+namespace webrtc {
+
+class DesktopFrameIOSurface final : public DesktopFrame {
+ public:
+ // Lock an IOSurfaceRef containing a snapshot of a display. Return NULL if
+ // failed to lock.
+ static std::unique_ptr<DesktopFrameIOSurface> Wrap(
+ rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface);
+
+ ~DesktopFrameIOSurface() override;
+
+ DesktopFrameIOSurface(const DesktopFrameIOSurface&) = delete;
+ DesktopFrameIOSurface& operator=(const DesktopFrameIOSurface&) = delete;
+
+ private:
+ // This constructor expects `io_surface` to hold a non-null IOSurfaceRef.
+ explicit DesktopFrameIOSurface(rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface);
+
+ const rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_IOSURFACE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.mm b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.mm
new file mode 100644
index 0000000000..b59b319db9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_iosurface.mm
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/desktop_frame_iosurface.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopFrameIOSurface> DesktopFrameIOSurface::Wrap(
+ rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface) {
+ if (!io_surface) {
+ return nullptr;
+ }
+
+ IOSurfaceIncrementUseCount(io_surface.get());
+ IOReturn status = IOSurfaceLock(io_surface.get(), kIOSurfaceLockReadOnly, nullptr);
+ if (status != kIOReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to lock the IOSurface with status " << status;
+ IOSurfaceDecrementUseCount(io_surface.get());
+ return nullptr;
+ }
+
+ // Verify that the image has 32-bit depth.
+ int bytes_per_pixel = IOSurfaceGetBytesPerElement(io_surface.get());
+ if (bytes_per_pixel != DesktopFrame::kBytesPerPixel) {
+ RTC_LOG(LS_ERROR) << "CGDisplayStream handler returned IOSurface with " << (8 * bytes_per_pixel)
+ << " bits per pixel. Only 32-bit depth is supported.";
+ IOSurfaceUnlock(io_surface.get(), kIOSurfaceLockReadOnly, nullptr);
+ IOSurfaceDecrementUseCount(io_surface.get());
+ return nullptr;
+ }
+
+ return std::unique_ptr<DesktopFrameIOSurface>(new DesktopFrameIOSurface(io_surface));
+}
+
+DesktopFrameIOSurface::DesktopFrameIOSurface(rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface)
+ : DesktopFrame(
+ DesktopSize(IOSurfaceGetWidth(io_surface.get()), IOSurfaceGetHeight(io_surface.get())),
+ IOSurfaceGetBytesPerRow(io_surface.get()),
+ static_cast<uint8_t*>(IOSurfaceGetBaseAddress(io_surface.get())),
+ nullptr),
+ io_surface_(io_surface) {
+ RTC_DCHECK(io_surface_);
+}
+
+DesktopFrameIOSurface::~DesktopFrameIOSurface() {
+ IOSurfaceUnlock(io_surface_.get(), kIOSurfaceLockReadOnly, nullptr);
+ IOSurfaceDecrementUseCount(io_surface_.get());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.h b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.h
new file mode 100644
index 0000000000..aad28d2f30
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_PROVIDER_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_PROVIDER_H_
+
+#include <CoreGraphics/CoreGraphics.h>
+#include <IOSurface/IOSurface.h>
+
+#include <map>
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "sdk/objc/helpers/scoped_cftyperef.h"
+
+namespace webrtc {
+
+class DesktopFrameProvider {
+ public:
+ explicit DesktopFrameProvider(bool allow_iosurface);
+ ~DesktopFrameProvider();
+
+ DesktopFrameProvider(const DesktopFrameProvider&) = delete;
+ DesktopFrameProvider& operator=(const DesktopFrameProvider&) = delete;
+
+ // The caller takes ownership of the returned desktop frame. Otherwise
+ // returns null if `display_id` is invalid or not ready. Note that this
+ // function does not remove the frame from the internal container. Caller
+ // has to call the Release function.
+ std::unique_ptr<DesktopFrame> TakeLatestFrameForDisplay(
+ CGDirectDisplayID display_id);
+
+ // OS sends the latest IOSurfaceRef through
+ // CGDisplayStreamFrameAvailableHandler callback; we store it here.
+ void InvalidateIOSurface(CGDirectDisplayID display_id,
+ rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface);
+
+ // Expected to be called before stopping the CGDisplayStreamRef streams.
+ void Release();
+
+ private:
+ SequenceChecker thread_checker_;
+ const bool allow_iosurface_;
+
+ // Most recent IOSurface that contains a capture of matching display.
+ std::map<CGDirectDisplayID, std::unique_ptr<SharedDesktopFrame>> io_surfaces_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_DESKTOP_FRAME_PROVIDER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.mm b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.mm
new file mode 100644
index 0000000000..009504a22b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/desktop_frame_provider.mm
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/desktop_frame_provider.h"
+
+#include <utility>
+
+#include "modules/desktop_capture/mac/desktop_frame_cgimage.h"
+#include "modules/desktop_capture/mac/desktop_frame_iosurface.h"
+
+namespace webrtc {
+
+DesktopFrameProvider::DesktopFrameProvider(bool allow_iosurface)
+ : allow_iosurface_(allow_iosurface) {
+ thread_checker_.Detach();
+}
+
+DesktopFrameProvider::~DesktopFrameProvider() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ Release();
+}
+
+std::unique_ptr<DesktopFrame> DesktopFrameProvider::TakeLatestFrameForDisplay(
+ CGDirectDisplayID display_id) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (!allow_iosurface_ || !io_surfaces_[display_id]) {
+ // Regenerate a snapshot. If iosurface is on it will be empty until the
+ // stream handler is called.
+ return DesktopFrameCGImage::CreateForDisplay(display_id);
+ }
+
+ return io_surfaces_[display_id]->Share();
+}
+
+void DesktopFrameProvider::InvalidateIOSurface(CGDirectDisplayID display_id,
+ rtc::ScopedCFTypeRef<IOSurfaceRef> io_surface) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (!allow_iosurface_) {
+ return;
+ }
+
+ std::unique_ptr<DesktopFrameIOSurface> desktop_frame_iosurface =
+ DesktopFrameIOSurface::Wrap(io_surface);
+
+ io_surfaces_[display_id] = desktop_frame_iosurface ?
+ SharedDesktopFrame::Wrap(std::move(desktop_frame_iosurface)) :
+ nullptr;
+}
+
+void DesktopFrameProvider::Release() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (!allow_iosurface_) {
+ return;
+ }
+
+ io_surfaces_.clear();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.cc b/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.cc
new file mode 100644
index 0000000000..45cd3223d2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/full_screen_mac_application_handler.h"
+
+#include <libproc.h>
+
+#include <algorithm>
+#include <functional>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/function_view.h"
+#include "modules/desktop_capture/mac/window_list_utils.h"
+
+namespace webrtc {
+namespace {
+
+static constexpr const char* kPowerPointSlideShowTitles[] = {
+ "PowerPoint-Bildschirmpräsentation",
+ "Προβολή παρουσίασης PowerPoint",
+ "PowerPoint スライド ショー",
+ "PowerPoint Slide Show",
+ "PowerPoint 幻灯片放映",
+ "Presentación de PowerPoint",
+ "PowerPoint-slideshow",
+ "Presentazione di PowerPoint",
+ "Prezentácia programu PowerPoint",
+ "Apresentação do PowerPoint",
+ "PowerPoint-bildspel",
+ "Prezentace v aplikaci PowerPoint",
+ "PowerPoint 슬라이드 쇼",
+ "PowerPoint-lysbildefremvisning",
+ "PowerPoint-vetítés",
+ "PowerPoint Slayt Gösterisi",
+ "Pokaz slajdów programu PowerPoint",
+ "PowerPoint 投影片放映",
+ "Демонстрация PowerPoint",
+ "Diaporama PowerPoint",
+ "PowerPoint-diaesitys",
+ "Peragaan Slide PowerPoint",
+ "PowerPoint-diavoorstelling",
+ "การนำเสนอสไลด์ PowerPoint",
+ "Apresentação de slides do PowerPoint",
+ "הצגת שקופיות של PowerPoint",
+ "عرض شرائح في PowerPoint"};
+
+class FullScreenMacApplicationHandler : public FullScreenApplicationHandler {
+ public:
+ using TitlePredicate =
+ std::function<bool(absl::string_view, absl::string_view)>;
+
+ FullScreenMacApplicationHandler(DesktopCapturer::SourceId sourceId,
+ TitlePredicate title_predicate,
+ bool ignore_original_window)
+ : FullScreenApplicationHandler(sourceId),
+ title_predicate_(title_predicate),
+ owner_pid_(GetWindowOwnerPid(sourceId)),
+ ignore_original_window_(ignore_original_window) {}
+
+ protected:
+ using CachePredicate =
+ rtc::FunctionView<bool(const DesktopCapturer::Source&)>;
+
+ void InvalidateCacheIfNeeded(const DesktopCapturer::SourceList& source_list,
+ int64_t timestamp,
+ CachePredicate predicate) const {
+ if (timestamp != cache_timestamp_) {
+ cache_sources_.clear();
+ std::copy_if(source_list.begin(), source_list.end(),
+ std::back_inserter(cache_sources_), predicate);
+ cache_timestamp_ = timestamp;
+ }
+ }
+
+ WindowId FindFullScreenWindowWithSamePid(
+ const DesktopCapturer::SourceList& source_list,
+ int64_t timestamp) const {
+ InvalidateCacheIfNeeded(source_list, timestamp,
+ [&](const DesktopCapturer::Source& src) {
+ return src.id != GetSourceId() &&
+ GetWindowOwnerPid(src.id) == owner_pid_;
+ });
+ if (cache_sources_.empty())
+ return kCGNullWindowID;
+
+ const auto original_window = GetSourceId();
+ const std::string title = GetWindowTitle(original_window);
+
+ // We can ignore any windows with empty titles cause regardless type of
+ // application it's impossible to verify that full screen window and
+ // original window are related to the same document.
+ if (title.empty())
+ return kCGNullWindowID;
+
+ MacDesktopConfiguration desktop_config =
+ MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::TopLeftOrigin);
+
+ const auto it = std::find_if(
+ cache_sources_.begin(), cache_sources_.end(),
+ [&](const DesktopCapturer::Source& src) {
+ const std::string window_title = GetWindowTitle(src.id);
+
+ if (window_title.empty())
+ return false;
+
+ if (title_predicate_ && !title_predicate_(title, window_title))
+ return false;
+
+ return IsWindowFullScreen(desktop_config, src.id);
+ });
+
+ return it != cache_sources_.end() ? it->id : 0;
+ }
+
+ DesktopCapturer::SourceId FindFullScreenWindow(
+ const DesktopCapturer::SourceList& source_list,
+ int64_t timestamp) const override {
+ return !ignore_original_window_ && IsWindowOnScreen(GetSourceId())
+ ? 0
+ : FindFullScreenWindowWithSamePid(source_list, timestamp);
+ }
+
+ protected:
+ const TitlePredicate title_predicate_;
+ const int owner_pid_;
+ const bool ignore_original_window_;
+ mutable int64_t cache_timestamp_ = 0;
+ mutable DesktopCapturer::SourceList cache_sources_;
+};
+
+bool equal_title_predicate(absl::string_view original_title,
+ absl::string_view title) {
+ return original_title == title;
+}
+
+bool slide_show_title_predicate(absl::string_view original_title,
+ absl::string_view title) {
+ if (title.find(original_title) == absl::string_view::npos)
+ return false;
+
+ for (const char* pp_slide_title : kPowerPointSlideShowTitles) {
+ if (absl::StartsWith(title, pp_slide_title))
+ return true;
+ }
+ return false;
+}
+
+class OpenOfficeApplicationHandler : public FullScreenMacApplicationHandler {
+ public:
+ OpenOfficeApplicationHandler(DesktopCapturer::SourceId sourceId)
+ : FullScreenMacApplicationHandler(sourceId, nullptr, false) {}
+
+ DesktopCapturer::SourceId FindFullScreenWindow(
+ const DesktopCapturer::SourceList& source_list,
+ int64_t timestamp) const override {
+ InvalidateCacheIfNeeded(source_list, timestamp,
+ [&](const DesktopCapturer::Source& src) {
+ return GetWindowOwnerPid(src.id) == owner_pid_;
+ });
+
+ const auto original_window = GetSourceId();
+ const std::string original_title = GetWindowTitle(original_window);
+
+ // Check if we have only one document window, otherwise it's not possible
+ // to securely match a document window and a slide show window which has
+ // empty title.
+ if (std::any_of(cache_sources_.begin(), cache_sources_.end(),
+ [&original_title](const DesktopCapturer::Source& src) {
+ return src.title.length() && src.title != original_title;
+ })) {
+ return kCGNullWindowID;
+ }
+
+ MacDesktopConfiguration desktop_config =
+ MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::TopLeftOrigin);
+
+ // Looking for slide show window,
+ // it must be a full screen window with empty title
+ const auto slide_show_window = std::find_if(
+ cache_sources_.begin(), cache_sources_.end(), [&](const auto& src) {
+ return src.title.empty() &&
+ IsWindowFullScreen(desktop_config, src.id);
+ });
+
+ if (slide_show_window == cache_sources_.end()) {
+ return kCGNullWindowID;
+ }
+
+ return slide_show_window->id;
+ }
+};
+
+} // namespace
+
+std::unique_ptr<FullScreenApplicationHandler>
+CreateFullScreenMacApplicationHandler(DesktopCapturer::SourceId sourceId) {
+ std::unique_ptr<FullScreenApplicationHandler> result;
+ int pid = GetWindowOwnerPid(sourceId);
+ char buffer[PROC_PIDPATHINFO_MAXSIZE];
+ int path_length = proc_pidpath(pid, buffer, sizeof(buffer));
+ if (path_length > 0) {
+ const char* last_slash = strrchr(buffer, '/');
+ const std::string name{last_slash ? last_slash + 1 : buffer};
+ const std::string owner_name = GetWindowOwnerName(sourceId);
+ FullScreenMacApplicationHandler::TitlePredicate predicate = nullptr;
+ bool ignore_original_window = false;
+ if (name.find("Google Chrome") == 0 || name == "Chromium") {
+ predicate = equal_title_predicate;
+ } else if (name == "Microsoft PowerPoint") {
+ predicate = slide_show_title_predicate;
+ ignore_original_window = true;
+ } else if (name == "Keynote") {
+ predicate = equal_title_predicate;
+ } else if (owner_name == "OpenOffice") {
+ return std::make_unique<OpenOfficeApplicationHandler>(sourceId);
+ }
+
+ if (predicate) {
+ result.reset(new FullScreenMacApplicationHandler(sourceId, predicate,
+ ignore_original_window));
+ }
+ }
+
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.h b/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.h
new file mode 100644
index 0000000000..f795a22030
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/full_screen_mac_application_handler.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_FULL_SCREEN_MAC_APPLICATION_HANDLER_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_FULL_SCREEN_MAC_APPLICATION_HANDLER_H_
+
+#include <memory>
+#include "modules/desktop_capture/full_screen_application_handler.h"
+
+namespace webrtc {
+
+std::unique_ptr<FullScreenApplicationHandler>
+CreateFullScreenMacApplicationHandler(DesktopCapturer::SourceId sourceId);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_FULL_SCREEN_MAC_APPLICATION_HANDLER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.h b/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.h
new file mode 100644
index 0000000000..7be05cc639
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_SCREEN_CAPTURER_MAC_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_SCREEN_CAPTURER_MAC_H_
+
+#include <CoreGraphics/CoreGraphics.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#include "modules/desktop_capture/mac/desktop_frame_provider.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/screen_capturer_helper.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+
+namespace webrtc {
+
+class DisplayStreamManager;
+
+// A class to perform video frame capturing for mac.
+class ScreenCapturerMac final : public DesktopCapturer {
+ public:
+ ScreenCapturerMac(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> desktop_config_monitor,
+ bool detect_updated_region,
+ bool allow_iosurface);
+ ~ScreenCapturerMac() override;
+
+ ScreenCapturerMac(const ScreenCapturerMac&) = delete;
+ ScreenCapturerMac& operator=(const ScreenCapturerMac&) = delete;
+
+ // TODO(julien.isorce): Remove Init() or make it private.
+ bool Init();
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* screens) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ // Returns false if the selected screen is no longer valid.
+ bool CgBlit(const DesktopFrame& frame, const DesktopRegion& region);
+
+ // Called when the screen configuration is changed.
+ void ScreenConfigurationChanged();
+
+ bool RegisterRefreshAndMoveHandlers();
+ void UnregisterRefreshAndMoveHandlers();
+
+ void ScreenRefresh(CGDirectDisplayID display_id,
+ CGRectCount count,
+ const CGRect* rect_array,
+ DesktopVector display_origin,
+ IOSurfaceRef io_surface);
+ void ReleaseBuffers();
+
+ std::unique_ptr<DesktopFrame> CreateFrame();
+
+ const bool detect_updated_region_;
+
+ Callback* callback_ = nullptr;
+
+ // Queue of the frames buffers.
+ ScreenCaptureFrameQueue<SharedDesktopFrame> queue_;
+
+ // Current display configuration.
+ MacDesktopConfiguration desktop_config_;
+
+ // Currently selected display, or 0 if the full desktop is selected. On OS X
+ // 10.6 and before, this is always 0.
+ CGDirectDisplayID current_display_ = 0;
+
+ // The physical pixel bounds of the current screen.
+ DesktopRect screen_pixel_bounds_;
+
+ // The dip to physical pixel scale of the current screen.
+ float dip_to_pixel_scale_ = 1.0f;
+
+ // A thread-safe list of invalid rectangles, and the size of the most
+ // recently captured screen.
+ ScreenCapturerHelper helper_;
+
+ // Contains an invalid region from the previous capture.
+ DesktopRegion last_invalid_region_;
+
+ // Monitoring display reconfiguration.
+ rtc::scoped_refptr<DesktopConfigurationMonitor> desktop_config_monitor_;
+
+ CGWindowID excluded_window_ = 0;
+
+ // List of streams, one per screen.
+ std::vector<CGDisplayStreamRef> display_streams_;
+
+ // Container holding latest state of the snapshot per displays.
+ DesktopFrameProvider desktop_frame_provider_;
+
+ // Start, CaptureFrame and destructor have to called in the same thread.
+ SequenceChecker thread_checker_;
+
+ // Used to force CaptureFrame to update it's screen configuration
+ // and reregister event handlers. This ensure that this
+ // occurs on the ScreenCapture thread. Read and written from
+ // both the VideoCapture thread and ScreenCapture thread.
+ // Protected by desktop_config_monitor_.
+ bool update_screen_configuration_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_SCREEN_CAPTURER_MAC_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.mm b/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.mm
new file mode 100644
index 0000000000..cad0c5b65b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/screen_capturer_mac.mm
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "modules/desktop_capture/mac/screen_capturer_mac.h"
+
+#include "modules/desktop_capture/mac/desktop_frame_provider.h"
+#include "modules/desktop_capture/mac/window_list_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "sdk/objc/helpers/scoped_cftyperef.h"
+
+namespace webrtc {
+
+namespace {
+
+// Scales all coordinates of a rect by a specified factor.
+DesktopRect ScaleAndRoundCGRect(const CGRect& rect, float scale) {
+ return DesktopRect::MakeLTRB(static_cast<int>(floor(rect.origin.x * scale)),
+ static_cast<int>(floor(rect.origin.y * scale)),
+ static_cast<int>(ceil((rect.origin.x + rect.size.width) * scale)),
+ static_cast<int>(ceil((rect.origin.y + rect.size.height) * scale)));
+}
+
+// Copy pixels in the `rect` from `src_place` to `dest_plane`. `rect` should be
+// relative to the origin of `src_plane` and `dest_plane`.
+void CopyRect(const uint8_t* src_plane,
+ int src_plane_stride,
+ uint8_t* dest_plane,
+ int dest_plane_stride,
+ int bytes_per_pixel,
+ const DesktopRect& rect) {
+ // Get the address of the starting point.
+ const int src_y_offset = src_plane_stride * rect.top();
+ const int dest_y_offset = dest_plane_stride * rect.top();
+ const int x_offset = bytes_per_pixel * rect.left();
+ src_plane += src_y_offset + x_offset;
+ dest_plane += dest_y_offset + x_offset;
+
+ // Copy pixels in the rectangle line by line.
+ const int bytes_per_line = bytes_per_pixel * rect.width();
+ const int height = rect.height();
+ for (int i = 0; i < height; ++i) {
+ memcpy(dest_plane, src_plane, bytes_per_line);
+ src_plane += src_plane_stride;
+ dest_plane += dest_plane_stride;
+ }
+}
+
+// Returns an array of CGWindowID for all the on-screen windows except
+// `window_to_exclude`, or NULL if the window is not found or it fails. The
+// caller should release the returned CFArrayRef.
+CFArrayRef CreateWindowListWithExclusion(CGWindowID window_to_exclude) {
+ if (!window_to_exclude) return nullptr;
+
+ CFArrayRef all_windows =
+ CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly, kCGNullWindowID);
+ if (!all_windows) return nullptr;
+
+ CFMutableArrayRef returned_array =
+ CFArrayCreateMutable(nullptr, CFArrayGetCount(all_windows), nullptr);
+
+ bool found = false;
+ for (CFIndex i = 0; i < CFArrayGetCount(all_windows); ++i) {
+ CFDictionaryRef window =
+ reinterpret_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(all_windows, i));
+
+ CGWindowID id = GetWindowId(window);
+ if (id == window_to_exclude) {
+ found = true;
+ continue;
+ }
+ CFArrayAppendValue(returned_array, reinterpret_cast<void*>(id));
+ }
+ CFRelease(all_windows);
+
+ if (!found) {
+ CFRelease(returned_array);
+ returned_array = nullptr;
+ }
+ return returned_array;
+}
+
+// Returns the bounds of `window` in physical pixels, enlarged by a small amount
+// on four edges to take account of the border/shadow effects.
+DesktopRect GetExcludedWindowPixelBounds(CGWindowID window, float dip_to_pixel_scale) {
+ // The amount of pixels to add to the actual window bounds to take into
+ // account of the border/shadow effects.
+ static const int kBorderEffectSize = 20;
+ CGRect rect;
+ CGWindowID ids[1];
+ ids[0] = window;
+
+ CFArrayRef window_id_array =
+ CFArrayCreate(nullptr, reinterpret_cast<const void**>(&ids), 1, nullptr);
+ CFArrayRef window_array = CGWindowListCreateDescriptionFromArray(window_id_array);
+
+ if (CFArrayGetCount(window_array) > 0) {
+ CFDictionaryRef win =
+ reinterpret_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(window_array, 0));
+ CFDictionaryRef bounds_ref =
+ reinterpret_cast<CFDictionaryRef>(CFDictionaryGetValue(win, kCGWindowBounds));
+ CGRectMakeWithDictionaryRepresentation(bounds_ref, &rect);
+ }
+
+ CFRelease(window_id_array);
+ CFRelease(window_array);
+
+ rect.origin.x -= kBorderEffectSize;
+ rect.origin.y -= kBorderEffectSize;
+ rect.size.width += kBorderEffectSize * 2;
+ rect.size.height += kBorderEffectSize * 2;
+ // `rect` is in DIP, so convert to physical pixels.
+ return ScaleAndRoundCGRect(rect, dip_to_pixel_scale);
+}
+
+// Create an image of the given region using the given `window_list`.
+// `pixel_bounds` should be in the primary display's coordinate in physical
+// pixels.
+rtc::ScopedCFTypeRef<CGImageRef> CreateExcludedWindowRegionImage(const DesktopRect& pixel_bounds,
+ float dip_to_pixel_scale,
+ CFArrayRef window_list) {
+ CGRect window_bounds;
+ // The origin is in DIP while the size is in physical pixels. That's what
+ // CGWindowListCreateImageFromArray expects.
+ window_bounds.origin.x = pixel_bounds.left() / dip_to_pixel_scale;
+ window_bounds.origin.y = pixel_bounds.top() / dip_to_pixel_scale;
+ window_bounds.size.width = pixel_bounds.width();
+ window_bounds.size.height = pixel_bounds.height();
+
+ return rtc::ScopedCFTypeRef<CGImageRef>(
+ CGWindowListCreateImageFromArray(window_bounds, window_list, kCGWindowImageDefault));
+}
+
+} // namespace
+
+ScreenCapturerMac::ScreenCapturerMac(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> desktop_config_monitor,
+ bool detect_updated_region,
+ bool allow_iosurface)
+ : detect_updated_region_(detect_updated_region),
+ desktop_config_monitor_(desktop_config_monitor),
+ desktop_frame_provider_(allow_iosurface) {
+ RTC_LOG(LS_INFO) << "Allow IOSurface: " << allow_iosurface;
+ thread_checker_.Detach();
+}
+
+ScreenCapturerMac::~ScreenCapturerMac() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ ReleaseBuffers();
+ UnregisterRefreshAndMoveHandlers();
+}
+
+bool ScreenCapturerMac::Init() {
+ TRACE_EVENT0("webrtc", "ScreenCapturerMac::Init");
+ desktop_config_ = desktop_config_monitor_->desktop_configuration();
+ return true;
+}
+
+void ScreenCapturerMac::ReleaseBuffers() {
+ // The buffers might be in use by the encoder, so don't delete them here.
+ // Instead, mark them as "needs update"; next time the buffers are used by
+ // the capturer, they will be recreated if necessary.
+ queue_.Reset();
+}
+
+void ScreenCapturerMac::Start(Callback* callback) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ TRACE_EVENT_INSTANT1(
+ "webrtc", "ScreenCapturermac::Start", "target display id ", current_display_);
+
+ callback_ = callback;
+ update_screen_configuration_ = false;
+ // Start and operate CGDisplayStream handler all from capture thread.
+ if (!RegisterRefreshAndMoveHandlers()) {
+ RTC_LOG(LS_ERROR) << "Failed to register refresh and move handlers.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+ ScreenConfigurationChanged();
+}
+
+void ScreenCapturerMac::CaptureFrame() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ TRACE_EVENT0("webrtc", "creenCapturerMac::CaptureFrame");
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ queue_.MoveToNextFrame();
+ if (queue_.current_frame() && queue_.current_frame()->IsShared()) {
+ RTC_DLOG(LS_WARNING) << "Overwriting frame that is still shared.";
+ }
+
+ MacDesktopConfiguration new_config = desktop_config_monitor_->desktop_configuration();
+ if (update_screen_configuration_ || !desktop_config_.Equals(new_config)) {
+ update_screen_configuration_ = false;
+ desktop_config_ = new_config;
+ // If the display configuraiton has changed then refresh capturer data
+ // structures. Occasionally, the refresh and move handlers are lost when
+ // the screen mode changes, so re-register them here.
+ UnregisterRefreshAndMoveHandlers();
+ if (!RegisterRefreshAndMoveHandlers()) {
+ RTC_LOG(LS_ERROR) << "Failed to register refresh and move handlers.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+ ScreenConfigurationChanged();
+ }
+
+ // When screen is zoomed in/out, OSX only updates the part of Rects currently
+ // displayed on screen, with relative location to current top-left on screen.
+ // This will cause problems when we copy the dirty regions to the captured
+ // image. So we invalidate the whole screen to copy all the screen contents.
+ // With CGI method, the zooming will be ignored and the whole screen contents
+ // will be captured as before.
+ // With IOSurface method, the zoomed screen contents will be captured.
+ if (UAZoomEnabled()) {
+ helper_.InvalidateScreen(screen_pixel_bounds_.size());
+ }
+
+ DesktopRegion region;
+ helper_.TakeInvalidRegion(&region);
+
+ // If the current buffer is from an older generation then allocate a new one.
+ // Note that we can't reallocate other buffers at this point, since the caller
+ // may still be reading from them.
+ if (!queue_.current_frame()) queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(CreateFrame()));
+
+ DesktopFrame* current_frame = queue_.current_frame();
+
+ if (!CgBlit(*current_frame, region)) {
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+ std::unique_ptr<DesktopFrame> new_frame = queue_.current_frame()->Share();
+ if (detect_updated_region_) {
+ *new_frame->mutable_updated_region() = region;
+ } else {
+ new_frame->mutable_updated_region()->AddRect(DesktopRect::MakeSize(new_frame->size()));
+ }
+
+ if (current_display_) {
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(current_display_);
+ if (config) {
+ new_frame->set_top_left(
+ config->bounds.top_left().subtract(desktop_config_.bounds.top_left()));
+ }
+ }
+
+ helper_.set_size_most_recent(new_frame->size());
+
+ new_frame->set_capture_time_ms((rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec);
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(new_frame));
+}
+
+void ScreenCapturerMac::SetExcludedWindow(WindowId window) {
+ excluded_window_ = window;
+}
+
+bool ScreenCapturerMac::GetSourceList(SourceList* screens) {
+ RTC_DCHECK(screens->size() == 0);
+
+ for (MacDisplayConfigurations::iterator it = desktop_config_.displays.begin();
+ it != desktop_config_.displays.end();
+ ++it) {
+ Source value = {it->id, 0, std::string()};
+ screens->push_back(value);
+ }
+ return true;
+}
+
+bool ScreenCapturerMac::SelectSource(SourceId id) {
+ if (id == kFullDesktopScreenId) {
+ current_display_ = 0;
+ } else {
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(static_cast<CGDirectDisplayID>(id));
+ if (!config) return false;
+ current_display_ = config->id;
+ }
+
+ ScreenConfigurationChanged();
+ return true;
+}
+
+bool ScreenCapturerMac::CgBlit(const DesktopFrame& frame, const DesktopRegion& region) {
+ // If not all screen region is dirty, copy the entire contents of the previous capture buffer,
+ // to capture over.
+ if (queue_.previous_frame() && !region.Equals(DesktopRegion(screen_pixel_bounds_))) {
+ memcpy(frame.data(), queue_.previous_frame()->data(), frame.stride() * frame.size().height());
+ }
+
+ MacDisplayConfigurations displays_to_capture;
+ if (current_display_) {
+ // Capturing a single screen. Note that the screen id may change when
+ // screens are added or removed.
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(current_display_);
+ if (config) {
+ displays_to_capture.push_back(*config);
+ } else {
+ RTC_LOG(LS_ERROR) << "The selected screen cannot be found for capturing.";
+ return false;
+ }
+ } else {
+ // Capturing the whole desktop.
+ displays_to_capture = desktop_config_.displays;
+ }
+
+ // Create the window list once for all displays.
+ CFArrayRef window_list = CreateWindowListWithExclusion(excluded_window_);
+
+ for (size_t i = 0; i < displays_to_capture.size(); ++i) {
+ const MacDisplayConfiguration& display_config = displays_to_capture[i];
+
+ // Capturing mixed-DPI on one surface is hard, so we only return displays
+ // that match the "primary" display's DPI. The primary display is always
+ // the first in the list.
+ if (i > 0 && display_config.dip_to_pixel_scale != displays_to_capture[0].dip_to_pixel_scale) {
+ continue;
+ }
+ // Determine the display's position relative to the desktop, in pixels.
+ DesktopRect display_bounds = display_config.pixel_bounds;
+ display_bounds.Translate(-screen_pixel_bounds_.left(), -screen_pixel_bounds_.top());
+
+ // Determine which parts of the blit region, if any, lay within the monitor.
+ DesktopRegion copy_region = region;
+ copy_region.IntersectWith(display_bounds);
+ if (copy_region.is_empty()) continue;
+
+ // Translate the region to be copied into display-relative coordinates.
+ copy_region.Translate(-display_bounds.left(), -display_bounds.top());
+
+ DesktopRect excluded_window_bounds;
+ rtc::ScopedCFTypeRef<CGImageRef> excluded_image;
+ if (excluded_window_ && window_list) {
+ // Get the region of the excluded window relative the primary display.
+ excluded_window_bounds =
+ GetExcludedWindowPixelBounds(excluded_window_, display_config.dip_to_pixel_scale);
+ excluded_window_bounds.IntersectWith(display_config.pixel_bounds);
+
+ // Create the image under the excluded window first, because it's faster
+ // than captuing the whole display.
+ if (!excluded_window_bounds.is_empty()) {
+ excluded_image = CreateExcludedWindowRegionImage(
+ excluded_window_bounds, display_config.dip_to_pixel_scale, window_list);
+ }
+ }
+
+ std::unique_ptr<DesktopFrame> frame_source =
+ desktop_frame_provider_.TakeLatestFrameForDisplay(display_config.id);
+ if (!frame_source) {
+ continue;
+ }
+
+ const uint8_t* display_base_address = frame_source->data();
+ int src_bytes_per_row = frame_source->stride();
+ RTC_DCHECK(display_base_address);
+
+ // `frame_source` size may be different from display_bounds in case the screen was
+ // resized recently.
+ copy_region.IntersectWith(frame_source->rect());
+
+ // Copy the dirty region from the display buffer into our desktop buffer.
+ uint8_t* out_ptr = frame.GetFrameDataAtPos(display_bounds.top_left());
+ for (DesktopRegion::Iterator it(copy_region); !it.IsAtEnd(); it.Advance()) {
+ CopyRect(display_base_address,
+ src_bytes_per_row,
+ out_ptr,
+ frame.stride(),
+ DesktopFrame::kBytesPerPixel,
+ it.rect());
+ }
+
+ if (excluded_image) {
+ CGDataProviderRef provider = CGImageGetDataProvider(excluded_image.get());
+ rtc::ScopedCFTypeRef<CFDataRef> excluded_image_data(CGDataProviderCopyData(provider));
+ RTC_DCHECK(excluded_image_data);
+ display_base_address = CFDataGetBytePtr(excluded_image_data.get());
+ src_bytes_per_row = CGImageGetBytesPerRow(excluded_image.get());
+
+ // Translate the bounds relative to the desktop, because `frame` data
+ // starts from the desktop top-left corner.
+ DesktopRect window_bounds_relative_to_desktop(excluded_window_bounds);
+ window_bounds_relative_to_desktop.Translate(-screen_pixel_bounds_.left(),
+ -screen_pixel_bounds_.top());
+
+ DesktopRect rect_to_copy = DesktopRect::MakeSize(excluded_window_bounds.size());
+ rect_to_copy.IntersectWith(DesktopRect::MakeWH(CGImageGetWidth(excluded_image.get()),
+ CGImageGetHeight(excluded_image.get())));
+
+ if (CGImageGetBitsPerPixel(excluded_image.get()) / 8 == DesktopFrame::kBytesPerPixel) {
+ CopyRect(display_base_address,
+ src_bytes_per_row,
+ frame.GetFrameDataAtPos(window_bounds_relative_to_desktop.top_left()),
+ frame.stride(),
+ DesktopFrame::kBytesPerPixel,
+ rect_to_copy);
+ }
+ }
+ }
+ if (window_list) CFRelease(window_list);
+ return true;
+}
+
+void ScreenCapturerMac::ScreenConfigurationChanged() {
+ if (current_display_) {
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(current_display_);
+ screen_pixel_bounds_ = config ? config->pixel_bounds : DesktopRect();
+ dip_to_pixel_scale_ = config ? config->dip_to_pixel_scale : 1.0f;
+ } else {
+ screen_pixel_bounds_ = desktop_config_.pixel_bounds;
+ dip_to_pixel_scale_ = desktop_config_.dip_to_pixel_scale;
+ }
+
+ // Release existing buffers, which will be of the wrong size.
+ ReleaseBuffers();
+
+ // Clear the dirty region, in case the display is down-sizing.
+ helper_.ClearInvalidRegion();
+
+ // Re-mark the entire desktop as dirty.
+ helper_.InvalidateScreen(screen_pixel_bounds_.size());
+
+ // Make sure the frame buffers will be reallocated.
+ queue_.Reset();
+}
+
+bool ScreenCapturerMac::RegisterRefreshAndMoveHandlers() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ desktop_config_ = desktop_config_monitor_->desktop_configuration();
+ for (const auto& config : desktop_config_.displays) {
+ size_t pixel_width = config.pixel_bounds.width();
+ size_t pixel_height = config.pixel_bounds.height();
+ if (pixel_width == 0 || pixel_height == 0) continue;
+ CGDirectDisplayID display_id = config.id;
+ DesktopVector display_origin = config.pixel_bounds.top_left();
+
+ CGDisplayStreamFrameAvailableHandler handler = ^(CGDisplayStreamFrameStatus status,
+ uint64_t display_time,
+ IOSurfaceRef frame_surface,
+ CGDisplayStreamUpdateRef updateRef) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (status == kCGDisplayStreamFrameStatusStopped) return;
+
+ // Only pay attention to frame updates.
+ if (status != kCGDisplayStreamFrameStatusFrameComplete) return;
+
+ size_t count = 0;
+ const CGRect* rects =
+ CGDisplayStreamUpdateGetRects(updateRef, kCGDisplayStreamUpdateDirtyRects, &count);
+ if (count != 0) {
+ // According to CGDisplayStream.h, it's safe to call
+ // CGDisplayStreamStop() from within the callback.
+ ScreenRefresh(display_id, count, rects, display_origin, frame_surface);
+ }
+ };
+
+ rtc::ScopedCFTypeRef<CFDictionaryRef> properties_dict(
+ CFDictionaryCreate(kCFAllocatorDefault,
+ (const void* []){kCGDisplayStreamShowCursor},
+ (const void* []){kCFBooleanFalse},
+ 1,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ CGDisplayStreamRef display_stream = CGDisplayStreamCreate(
+ display_id, pixel_width, pixel_height, 'BGRA', properties_dict.get(), handler);
+
+ if (display_stream) {
+ CGError error = CGDisplayStreamStart(display_stream);
+ if (error != kCGErrorSuccess) return false;
+
+ CFRunLoopSourceRef source = CGDisplayStreamGetRunLoopSource(display_stream);
+ CFRunLoopAddSource(CFRunLoopGetCurrent(), source, kCFRunLoopCommonModes);
+ display_streams_.push_back(display_stream);
+ }
+ }
+
+ return true;
+}
+
+void ScreenCapturerMac::UnregisterRefreshAndMoveHandlers() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ for (CGDisplayStreamRef stream : display_streams_) {
+ CFRunLoopSourceRef source = CGDisplayStreamGetRunLoopSource(stream);
+ CFRunLoopRemoveSource(CFRunLoopGetCurrent(), source, kCFRunLoopCommonModes);
+ CGDisplayStreamStop(stream);
+ CFRelease(stream);
+ }
+ display_streams_.clear();
+
+ // Release obsolete io surfaces.
+ desktop_frame_provider_.Release();
+}
+
+void ScreenCapturerMac::ScreenRefresh(CGDirectDisplayID display_id,
+ CGRectCount count,
+ const CGRect* rect_array,
+ DesktopVector display_origin,
+ IOSurfaceRef io_surface) {
+ if (screen_pixel_bounds_.is_empty()) ScreenConfigurationChanged();
+
+ // The refresh rects are in display coordinates. We want to translate to
+ // framebuffer coordinates. If a specific display is being captured, then no
+ // change is necessary. If all displays are being captured, then we want to
+ // translate by the origin of the display.
+ DesktopVector translate_vector;
+ if (!current_display_) translate_vector = display_origin;
+
+ DesktopRegion region;
+ for (CGRectCount i = 0; i < count; ++i) {
+ // All rects are already in physical pixel coordinates.
+ DesktopRect rect = DesktopRect::MakeXYWH(rect_array[i].origin.x,
+ rect_array[i].origin.y,
+ rect_array[i].size.width,
+ rect_array[i].size.height);
+
+ rect.Translate(translate_vector);
+
+ region.AddRect(rect);
+ }
+ // Always having the latest iosurface before invalidating a region.
+ // See https://bugs.chromium.org/p/webrtc/issues/detail?id=8652 for details.
+ desktop_frame_provider_.InvalidateIOSurface(
+ display_id, rtc::ScopedCFTypeRef<IOSurfaceRef>(io_surface, rtc::RetainPolicy::RETAIN));
+ helper_.InvalidateRegion(region);
+}
+
+std::unique_ptr<DesktopFrame> ScreenCapturerMac::CreateFrame() {
+ std::unique_ptr<DesktopFrame> frame(new BasicDesktopFrame(screen_pixel_bounds_.size()));
+ frame->set_dpi(
+ DesktopVector(kStandardDPI * dip_to_pixel_scale_, kStandardDPI * dip_to_pixel_scale_));
+ return frame;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.cc b/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.cc
new file mode 100644
index 0000000000..989ec7ea54
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.cc
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mac/window_list_utils.h"
+
+#include <ApplicationServices/ApplicationServices.h>
+
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include <limits>
+#include <list>
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+static_assert(static_cast<webrtc::WindowId>(kCGNullWindowID) ==
+ webrtc::kNullWindowId,
+ "kNullWindowId needs to equal to kCGNullWindowID.");
+
+namespace webrtc {
+
+namespace {
+
+// WindowName of the status indicator dot shown since Monterey in the taskbar.
+// Testing on 12.2.1 shows this is independent of system language setting.
+const CFStringRef kStatusIndicator = CFSTR("StatusIndicator");
+const CFStringRef kStatusIndicatorOwnerName = CFSTR("Window Server");
+
+bool ToUtf8(const CFStringRef str16, std::string* str8) {
+ size_t maxlen = CFStringGetMaximumSizeForEncoding(CFStringGetLength(str16),
+ kCFStringEncodingUTF8) +
+ 1;
+ std::unique_ptr<char[]> buffer(new char[maxlen]);
+ if (!buffer ||
+ !CFStringGetCString(str16, buffer.get(), maxlen, kCFStringEncodingUTF8)) {
+ return false;
+ }
+ str8->assign(buffer.get());
+ return true;
+}
+
+// Get CFDictionaryRef from `id` and call `on_window` against it. This function
+// returns false if native APIs fail, typically it indicates that the `id` does
+// not represent a window. `on_window` will not be called if false is returned
+// from this function.
+bool GetWindowRef(CGWindowID id,
+ rtc::FunctionView<void(CFDictionaryRef)> on_window) {
+ RTC_DCHECK(on_window);
+
+ // TODO(zijiehe): `id` is a 32-bit integer, casting it to an array seems not
+ // safe enough. Maybe we should create a new
+ // const void* arr[] = {
+ // reinterpret_cast<void*>(id) }
+ // };
+ CFArrayRef window_id_array =
+ CFArrayCreate(NULL, reinterpret_cast<const void**>(&id), 1, NULL);
+ CFArrayRef window_array =
+ CGWindowListCreateDescriptionFromArray(window_id_array);
+
+ bool result = false;
+ // TODO(zijiehe): CFArrayGetCount(window_array) should always return 1.
+ // Otherwise, we should treat it as failure.
+ if (window_array && CFArrayGetCount(window_array)) {
+ on_window(reinterpret_cast<CFDictionaryRef>(
+ CFArrayGetValueAtIndex(window_array, 0)));
+ result = true;
+ }
+
+ if (window_array) {
+ CFRelease(window_array);
+ }
+ CFRelease(window_id_array);
+ return result;
+}
+
+} // namespace
+
+bool GetWindowList(rtc::FunctionView<bool(CFDictionaryRef)> on_window,
+ bool ignore_minimized,
+ bool only_zero_layer) {
+ RTC_DCHECK(on_window);
+
+ // Only get on screen, non-desktop windows.
+ // According to
+ // https://developer.apple.com/documentation/coregraphics/cgwindowlistoption/1454105-optiononscreenonly
+ // , when kCGWindowListOptionOnScreenOnly is used, the order of windows are in
+ // decreasing z-order.
+ CFArrayRef window_array = CGWindowListCopyWindowInfo(
+ kCGWindowListOptionOnScreenOnly | kCGWindowListExcludeDesktopElements,
+ kCGNullWindowID);
+ if (!window_array)
+ return false;
+
+ MacDesktopConfiguration desktop_config = MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::TopLeftOrigin);
+
+ // Check windows to make sure they have an id, title, and use window layer
+ // other than 0.
+ CFIndex count = CFArrayGetCount(window_array);
+ for (CFIndex i = 0; i < count; i++) {
+ CFDictionaryRef window = reinterpret_cast<CFDictionaryRef>(
+ CFArrayGetValueAtIndex(window_array, i));
+ if (!window) {
+ continue;
+ }
+
+ CFNumberRef window_id = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowNumber));
+ if (!window_id) {
+ continue;
+ }
+
+ CFNumberRef window_layer = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowLayer));
+ if (!window_layer) {
+ continue;
+ }
+
+ // Skip windows with layer!=0 (menu, dock).
+ int layer;
+ if (!CFNumberGetValue(window_layer, kCFNumberIntType, &layer)) {
+ continue;
+ }
+ if (only_zero_layer && layer != 0) {
+ continue;
+ }
+
+ // Skip windows that are minimized and not full screen.
+ if (ignore_minimized && !IsWindowOnScreen(window) &&
+ !IsWindowFullScreen(desktop_config, window)) {
+ continue;
+ }
+
+ // If window title is empty, only consider it if it is either on screen or
+ // fullscreen.
+ CFStringRef window_title = reinterpret_cast<CFStringRef>(
+ CFDictionaryGetValue(window, kCGWindowName));
+ if (!window_title && !IsWindowOnScreen(window) &&
+ !IsWindowFullScreen(desktop_config, window)) {
+ continue;
+ }
+
+ CFStringRef window_owner_name = reinterpret_cast<CFStringRef>(
+ CFDictionaryGetValue(window, kCGWindowOwnerName));
+ // Ignore the red dot status indicator shown in the stats bar. Unlike the
+ // rest of the system UI it has a window_layer of 0, so was otherwise
+ // included. See crbug.com/1297731.
+ if (window_title && CFEqual(window_title, kStatusIndicator) &&
+ window_owner_name &&
+ CFEqual(window_owner_name, kStatusIndicatorOwnerName)) {
+ continue;
+ }
+
+ if (!on_window(window)) {
+ break;
+ }
+ }
+
+ CFRelease(window_array);
+ return true;
+}
+
+bool GetWindowList(DesktopCapturer::SourceList* windows,
+ bool ignore_minimized,
+ bool only_zero_layer) {
+ // Use a std::list so that iterators are preversed upon insertion and
+ // deletion.
+ std::list<DesktopCapturer::Source> sources;
+ std::map<int, std::list<DesktopCapturer::Source>::const_iterator> pid_itr_map;
+ const bool ret = GetWindowList(
+ [&sources, &pid_itr_map](CFDictionaryRef window) {
+ WindowId window_id = GetWindowId(window);
+ if (window_id != kNullWindowId) {
+ const std::string title = GetWindowTitle(window);
+ const int pid = GetWindowOwnerPid(window);
+ // Check if window for the same pid have been already inserted.
+ std::map<int,
+ std::list<DesktopCapturer::Source>::const_iterator>::iterator
+ itr = pid_itr_map.find(pid);
+
+ // Only consider empty titles if the app has no other window with a
+ // proper title.
+ if (title.empty()) {
+ std::string owner_name = GetWindowOwnerName(window);
+
+ // At this time we do not know if there will be other windows
+ // for the same pid unless they have been already inserted, hence
+ // the check in the map. Also skip the window if owner name is
+ // empty too.
+ if (!owner_name.empty() && (itr == pid_itr_map.end())) {
+ sources.push_back(DesktopCapturer::Source{window_id, pid, owner_name});
+ RTC_DCHECK(!sources.empty());
+ // Get an iterator on the last valid element in the source list.
+ std::list<DesktopCapturer::Source>::const_iterator last_source =
+ --sources.end();
+ pid_itr_map.insert(
+ std::pair<int,
+ std::list<DesktopCapturer::Source>::const_iterator>(
+ pid, last_source));
+ }
+ } else {
+ sources.push_back(DesktopCapturer::Source{window_id, pid, title});
+ // Once the window with empty title has been removed no other empty
+ // windows are allowed for the same pid.
+ if (itr != pid_itr_map.end() && (itr->second != sources.end())) {
+ sources.erase(itr->second);
+ // sdt::list::end() never changes during the lifetime of that
+ // list.
+ itr->second = sources.end();
+ }
+ }
+ }
+ return true;
+ },
+ ignore_minimized, only_zero_layer);
+
+ if (!ret)
+ return false;
+
+ RTC_DCHECK(windows);
+ windows->reserve(windows->size() + sources.size());
+ std::copy(std::begin(sources), std::end(sources),
+ std::back_inserter(*windows));
+
+ return true;
+}
+
+// Returns true if the window is occupying a full screen.
+bool IsWindowFullScreen(const MacDesktopConfiguration& desktop_config,
+ CFDictionaryRef window) {
+ bool fullscreen = false;
+ CFDictionaryRef bounds_ref = reinterpret_cast<CFDictionaryRef>(
+ CFDictionaryGetValue(window, kCGWindowBounds));
+
+ CGRect bounds;
+ if (bounds_ref &&
+ CGRectMakeWithDictionaryRepresentation(bounds_ref, &bounds)) {
+ for (MacDisplayConfigurations::const_iterator it =
+ desktop_config.displays.begin();
+ it != desktop_config.displays.end(); it++) {
+ if (it->bounds.equals(
+ DesktopRect::MakeXYWH(bounds.origin.x, bounds.origin.y,
+ bounds.size.width, bounds.size.height))) {
+ fullscreen = true;
+ break;
+ }
+ }
+ }
+
+ return fullscreen;
+}
+
+bool IsWindowFullScreen(const MacDesktopConfiguration& desktop_config,
+ CGWindowID id) {
+ bool fullscreen = false;
+ GetWindowRef(id, [&](CFDictionaryRef window) {
+ fullscreen = IsWindowFullScreen(desktop_config, window);
+ });
+ return fullscreen;
+}
+
+bool IsWindowOnScreen(CFDictionaryRef window) {
+ CFBooleanRef on_screen = reinterpret_cast<CFBooleanRef>(
+ CFDictionaryGetValue(window, kCGWindowIsOnscreen));
+ return on_screen != NULL && CFBooleanGetValue(on_screen);
+}
+
+bool IsWindowOnScreen(CGWindowID id) {
+ bool on_screen;
+ if (GetWindowRef(id, [&on_screen](CFDictionaryRef window) {
+ on_screen = IsWindowOnScreen(window);
+ })) {
+ return on_screen;
+ }
+ return false;
+}
+
+std::string GetWindowTitle(CFDictionaryRef window) {
+ CFStringRef title = reinterpret_cast<CFStringRef>(
+ CFDictionaryGetValue(window, kCGWindowName));
+ std::string result;
+ if (title && ToUtf8(title, &result)) {
+ return result;
+ }
+
+ return std::string();
+}
+
+std::string GetWindowTitle(CGWindowID id) {
+ std::string title;
+ if (GetWindowRef(id, [&title](CFDictionaryRef window) {
+ title = GetWindowTitle(window);
+ })) {
+ return title;
+ }
+ return std::string();
+}
+
+std::string GetWindowOwnerName(CFDictionaryRef window) {
+ CFStringRef owner_name = reinterpret_cast<CFStringRef>(
+ CFDictionaryGetValue(window, kCGWindowOwnerName));
+ std::string result;
+ if (owner_name && ToUtf8(owner_name, &result)) {
+ return result;
+ }
+ return std::string();
+}
+
+std::string GetWindowOwnerName(CGWindowID id) {
+ std::string owner_name;
+ if (GetWindowRef(id, [&owner_name](CFDictionaryRef window) {
+ owner_name = GetWindowOwnerName(window);
+ })) {
+ return owner_name;
+ }
+ return std::string();
+}
+
+WindowId GetWindowId(CFDictionaryRef window) {
+ CFNumberRef window_id = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowNumber));
+ if (!window_id) {
+ return kNullWindowId;
+ }
+
+ // Note: WindowId is 64-bit on 64-bit system, but CGWindowID is always 32-bit.
+ // CFNumberGetValue() fills only top 32 bits, so we should use CGWindowID to
+ // receive the window id.
+ CGWindowID id;
+ if (!CFNumberGetValue(window_id, kCFNumberIntType, &id)) {
+ return kNullWindowId;
+ }
+
+ return id;
+}
+
+int GetWindowOwnerPid(CFDictionaryRef window) {
+ CFNumberRef window_pid = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowOwnerPID));
+ if (!window_pid) {
+ return 0;
+ }
+
+ int pid;
+ if (!CFNumberGetValue(window_pid, kCFNumberIntType, &pid)) {
+ return 0;
+ }
+
+ return pid;
+}
+
+int GetWindowOwnerPid(CGWindowID id) {
+ int pid;
+ if (GetWindowRef(id, [&pid](CFDictionaryRef window) {
+ pid = GetWindowOwnerPid(window);
+ })) {
+ return pid;
+ }
+ return 0;
+}
+
+float GetScaleFactorAtPosition(const MacDesktopConfiguration& desktop_config,
+ DesktopVector position) {
+ // Find the dpi to physical pixel scale for the screen where the mouse cursor
+ // is.
+ for (auto it = desktop_config.displays.begin();
+ it != desktop_config.displays.end(); ++it) {
+ if (it->bounds.Contains(position)) {
+ return it->dip_to_pixel_scale;
+ }
+ }
+ return 1;
+}
+
+float GetWindowScaleFactor(CGWindowID id, DesktopSize size) {
+ DesktopRect window_bounds = GetWindowBounds(id);
+ float scale = 1.0f;
+
+ if (!window_bounds.is_empty() && !size.is_empty()) {
+ float scale_x = size.width() / window_bounds.width();
+ float scale_y = size.height() / window_bounds.height();
+ // Currently the scale in X and Y directions must be same.
+ if ((std::fabs(scale_x - scale_y) <=
+ std::numeric_limits<float>::epsilon() * std::max(scale_x, scale_y)) &&
+ scale_x > 0.0f) {
+ scale = scale_x;
+ }
+ }
+
+ return scale;
+}
+
+DesktopRect GetWindowBounds(CFDictionaryRef window) {
+ CFDictionaryRef window_bounds = reinterpret_cast<CFDictionaryRef>(
+ CFDictionaryGetValue(window, kCGWindowBounds));
+ if (!window_bounds) {
+ return DesktopRect();
+ }
+
+ CGRect gc_window_rect;
+ if (!CGRectMakeWithDictionaryRepresentation(window_bounds, &gc_window_rect)) {
+ return DesktopRect();
+ }
+
+ return DesktopRect::MakeXYWH(gc_window_rect.origin.x, gc_window_rect.origin.y,
+ gc_window_rect.size.width,
+ gc_window_rect.size.height);
+}
+
+DesktopRect GetWindowBounds(CGWindowID id) {
+ DesktopRect result;
+ if (GetWindowRef(id, [&result](CFDictionaryRef window) {
+ result = GetWindowBounds(window);
+ })) {
+ return result;
+ }
+ return DesktopRect();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.h b/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.h
new file mode 100644
index 0000000000..a9b1e7007c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mac/window_list_utils.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MAC_WINDOW_LIST_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_MAC_WINDOW_LIST_UTILS_H_
+
+#include <ApplicationServices/ApplicationServices.h>
+
+#include <string>
+#include "api/function_view.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+
+namespace webrtc {
+
+// Iterates all on-screen windows in decreasing z-order and sends them
+// one-by-one to `on_window` function. If `on_window` returns false, this
+// function returns immediately. GetWindowList() returns false if native APIs
+// failed. Menus, dock (if `only_zero_layer`), minimized windows (if
+// `ignore_minimized` is true) and any windows which do not have a valid window
+// id or title will be ignored.
+bool GetWindowList(rtc::FunctionView<bool(CFDictionaryRef)> on_window,
+ bool ignore_minimized,
+ bool only_zero_layer);
+
+// Another helper function to get the on-screen windows.
+bool GetWindowList(DesktopCapturer::SourceList* windows,
+ bool ignore_minimized,
+ bool only_zero_layer);
+
+// Returns true if the window is occupying a full screen.
+bool IsWindowFullScreen(const MacDesktopConfiguration& desktop_config,
+ CFDictionaryRef window);
+
+// Returns true if the window is occupying a full screen.
+bool IsWindowFullScreen(const MacDesktopConfiguration& desktop_config,
+ CGWindowID id);
+
+// Returns true if the `window` is on screen. This function returns false if
+// native APIs fail.
+bool IsWindowOnScreen(CFDictionaryRef window);
+
+// Returns true if the window is on screen. This function returns false if
+// native APIs fail or `id` cannot be found.
+bool IsWindowOnScreen(CGWindowID id);
+
+// Returns utf-8 encoded title of `window`. If `window` is not a window or no
+// valid title can be retrieved, this function returns an empty string.
+std::string GetWindowTitle(CFDictionaryRef window);
+
+// Returns utf-8 encoded title of window `id`. If `id` cannot be found or no
+// valid title can be retrieved, this function returns an empty string.
+std::string GetWindowTitle(CGWindowID id);
+
+// Returns utf-8 encoded owner name of `window`. If `window` is not a window or
+// if no valid owner name can be retrieved, returns an empty string.
+std::string GetWindowOwnerName(CFDictionaryRef window);
+
+// Returns utf-8 encoded owner name of the given window `id`. If `id` cannot be
+// found or if no valid owner name can be retrieved, returns an empty string.
+std::string GetWindowOwnerName(CGWindowID id);
+
+// Returns id of `window`. If `window` is not a window or the window id cannot
+// be retrieved, this function returns kNullWindowId.
+WindowId GetWindowId(CFDictionaryRef window);
+
+// Returns the pid of the process owning `window`. Return 0 if `window` is not
+// a window or no valid owner can be retrieved.
+int GetWindowOwnerPid(CFDictionaryRef window);
+
+// Returns the pid of the process owning the window `id`. Return 0 if `id`
+// cannot be found or no valid owner can be retrieved.
+int GetWindowOwnerPid(CGWindowID id);
+
+// Returns the DIP to physical pixel scale at `position`. `position` is in
+// *unscaled* system coordinate, i.e. it's device-independent and the primary
+// monitor starts from (0, 0). If `position` is out of the system display, this
+// function returns 1.
+float GetScaleFactorAtPosition(const MacDesktopConfiguration& desktop_config,
+ DesktopVector position);
+
+// Returns the DIP to physical pixel scale factor of the window with `id`.
+// The bounds of the window with `id` is in DIP coordinates and `size` is the
+// CGImage size of the window with `id` in physical coordinates. Comparing them
+// can give the current scale factor.
+// If the window overlaps multiple monitors, OS will decide on which monitor the
+// window is displayed and use its scale factor to the window. So this method
+// still works.
+float GetWindowScaleFactor(CGWindowID id, DesktopSize size);
+
+// Returns the bounds of `window`. If `window` is not a window or the bounds
+// cannot be retrieved, this function returns an empty DesktopRect. The returned
+// DesktopRect is in system coordinate, i.e. the primary monitor always starts
+// from (0, 0).
+// Deprecated: This function should be avoided in favor of the overload with
+// MacDesktopConfiguration.
+DesktopRect GetWindowBounds(CFDictionaryRef window);
+
+// Returns the bounds of window with `id`. If `id` does not represent a window
+// or the bounds cannot be retrieved, this function returns an empty
+// DesktopRect. The returned DesktopRect is in system coordinates.
+// Deprecated: This function should be avoided in favor of the overload with
+// MacDesktopConfiguration.
+DesktopRect GetWindowBounds(CGWindowID id);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MAC_WINDOW_LIST_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.cc b/third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.cc
new file mode 100644
index 0000000000..de77d99e18
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.cc
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mock_desktop_capturer_callback.h"
+
+namespace webrtc {
+
+MockDesktopCapturerCallback::MockDesktopCapturerCallback() = default;
+MockDesktopCapturerCallback::~MockDesktopCapturerCallback() = default;
+
+void MockDesktopCapturerCallback::OnCaptureResult(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ OnCaptureResultPtr(result, &frame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.h b/third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.h
new file mode 100644
index 0000000000..08de5ad737
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mock_desktop_capturer_callback.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MOCK_DESKTOP_CAPTURER_CALLBACK_H_
+#define MODULES_DESKTOP_CAPTURE_MOCK_DESKTOP_CAPTURER_CALLBACK_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDesktopCapturerCallback : public DesktopCapturer::Callback {
+ public:
+ MockDesktopCapturerCallback();
+ ~MockDesktopCapturerCallback() override;
+
+ MockDesktopCapturerCallback(const MockDesktopCapturerCallback&) = delete;
+ MockDesktopCapturerCallback& operator=(const MockDesktopCapturerCallback&) =
+ delete;
+
+ MOCK_METHOD(void,
+ OnCaptureResultPtr,
+ (DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame));
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) final override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MOCK_DESKTOP_CAPTURER_CALLBACK_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.cc b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.cc
new file mode 100644
index 0000000000..e826552b0f
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mouse_cursor.h"
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+MouseCursor::MouseCursor() {}
+
+MouseCursor::MouseCursor(DesktopFrame* image, const DesktopVector& hotspot)
+ : image_(image), hotspot_(hotspot) {
+ RTC_DCHECK(0 <= hotspot_.x() && hotspot_.x() <= image_->size().width());
+ RTC_DCHECK(0 <= hotspot_.y() && hotspot_.y() <= image_->size().height());
+}
+
+MouseCursor::~MouseCursor() {}
+
+// static
+MouseCursor* MouseCursor::CopyOf(const MouseCursor& cursor) {
+ return cursor.image()
+ ? new MouseCursor(BasicDesktopFrame::CopyOf(*cursor.image()),
+ cursor.hotspot())
+ : new MouseCursor();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.h b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.h
new file mode 100644
index 0000000000..2dd793179b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MOUSE_CURSOR_H_
+#define MODULES_DESKTOP_CAPTURE_MOUSE_CURSOR_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class RTC_EXPORT MouseCursor {
+ public:
+ MouseCursor();
+
+ // Takes ownership of `image`. `hotspot` must be within `image` boundaries.
+ MouseCursor(DesktopFrame* image, const DesktopVector& hotspot);
+
+ ~MouseCursor();
+
+ MouseCursor(const MouseCursor&) = delete;
+ MouseCursor& operator=(const MouseCursor&) = delete;
+
+ static MouseCursor* CopyOf(const MouseCursor& cursor);
+
+ void set_image(DesktopFrame* image) { image_.reset(image); }
+ const DesktopFrame* image() const { return image_.get(); }
+
+ void set_hotspot(const DesktopVector& hotspot) { hotspot_ = hotspot; }
+ const DesktopVector& hotspot() const { return hotspot_; }
+
+ private:
+ std::unique_ptr<DesktopFrame> image_;
+ DesktopVector hotspot_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MOUSE_CURSOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor.h b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor.h
new file mode 100644
index 0000000000..e88a5b7201
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_MOUSE_CURSOR_MONITOR_H_
+#define MODULES_DESKTOP_CAPTURE_MOUSE_CURSOR_MONITOR_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class DesktopCaptureOptions;
+class DesktopFrame;
+class MouseCursor;
+
+// Captures mouse shape and position.
+class MouseCursorMonitor {
+ public:
+ // Deprecated: CursorState will not be provided.
+ enum CursorState {
+ // Cursor on top of the window including window decorations.
+ INSIDE,
+
+ // Cursor is outside of the window.
+ OUTSIDE,
+ };
+
+ enum Mode {
+ // Capture only shape of the mouse cursor, but not position.
+ SHAPE_ONLY,
+
+ // Capture both, mouse cursor shape and position.
+ SHAPE_AND_POSITION,
+ };
+
+ // Callback interface used to pass current mouse cursor position and shape.
+ class Callback {
+ public:
+ // Called in response to Capture() when the cursor shape has changed. Must
+ // take ownership of `cursor`.
+ virtual void OnMouseCursor(MouseCursor* cursor) = 0;
+
+ // Called in response to Capture(). `position` indicates cursor position
+ // relative to the `window` specified in the constructor.
+ // Deprecated: use the following overload instead.
+ virtual void OnMouseCursorPosition(CursorState state,
+ const DesktopVector& position) {}
+
+ // Called in response to Capture(). `position` indicates cursor absolute
+ // position on the system in fullscreen coordinate, i.e. the top-left
+ // monitor always starts from (0, 0).
+ // The coordinates of the position is controlled by OS, but it's always
+ // consistent with DesktopFrame.rect().top_left().
+ // TODO(zijiehe): Ensure all implementations return the absolute position.
+ // TODO(zijiehe): Current this overload works correctly only when capturing
+ // mouse cursor against fullscreen.
+ virtual void OnMouseCursorPosition(const DesktopVector& position) {}
+
+ protected:
+ virtual ~Callback() {}
+ };
+
+ virtual ~MouseCursorMonitor() {}
+
+ // Creates a capturer that notifies of mouse cursor events while the cursor is
+ // over the specified window.
+ //
+ // Deprecated: use Create() function.
+ static MouseCursorMonitor* CreateForWindow(
+ const DesktopCaptureOptions& options,
+ WindowId window);
+
+ // Creates a capturer that monitors the mouse cursor shape and position over
+ // the specified screen.
+ //
+ // Deprecated: use Create() function.
+ static RTC_EXPORT MouseCursorMonitor* CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen);
+
+ // Creates a capturer that monitors the mouse cursor shape and position across
+ // the entire desktop. The capturer ensures that the top-left monitor starts
+ // from (0, 0).
+ static std::unique_ptr<MouseCursorMonitor> Create(
+ const DesktopCaptureOptions& options);
+
+ // Initializes the monitor with the `callback`, which must remain valid until
+ // capturer is destroyed.
+ virtual void Init(Callback* callback, Mode mode) = 0;
+
+ // Captures current cursor shape and position (depending on the `mode` passed
+ // to Init()). Calls Callback::OnMouseCursor() if cursor shape has
+ // changed since the last call (or when Capture() is called for the first
+ // time) and then Callback::OnMouseCursorPosition() if mode is set to
+ // SHAPE_AND_POSITION.
+ virtual void Capture() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_MOUSE_CURSOR_MONITOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_linux.cc b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_linux.cc
new file mode 100644
index 0000000000..6162970074
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_linux.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+
+#if defined(WEBRTC_USE_X11)
+#include "modules/desktop_capture/linux/x11/mouse_cursor_monitor_x11.h"
+#endif // defined(WEBRTC_USE_X11)
+
+#if defined(WEBRTC_USE_PIPEWIRE) && !defined(WEBRTC_MOZILLA_BUILD)
+#include "modules/desktop_capture/linux/wayland/mouse_cursor_monitor_pipewire.h"
+#endif // defined(WEBRTC_USE_PIPEWIRE)
+
+namespace webrtc {
+
+// static
+MouseCursorMonitor* MouseCursorMonitor::CreateForWindow(
+ const DesktopCaptureOptions& options,
+ WindowId window) {
+#if defined(WEBRTC_USE_X11)
+ return MouseCursorMonitorX11::CreateForWindow(options, window);
+#else
+ return nullptr;
+#endif // defined(WEBRTC_USE_X11)
+}
+
+// static
+MouseCursorMonitor* MouseCursorMonitor::CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen) {
+#if defined(WEBRTC_USE_X11)
+ return MouseCursorMonitorX11::CreateForScreen(options, screen);
+#else
+ return nullptr;
+#endif // defined(WEBRTC_USE_X11)
+}
+
+// static
+std::unique_ptr<MouseCursorMonitor> MouseCursorMonitor::Create(
+ const DesktopCaptureOptions& options) {
+#if defined(WEBRTC_USE_PIPEWIRE) && !defined(WEBRTC_MOZILLA_BUILD)
+ if (options.allow_pipewire() && DesktopCapturer::IsRunningUnderWayland() &&
+ options.screencast_stream()) {
+ return std::make_unique<MouseCursorMonitorPipeWire>(options);
+ }
+#endif // defined(WEBRTC_USE_PIPEWIRE)
+
+#if defined(WEBRTC_USE_X11)
+ return MouseCursorMonitorX11::Create(options);
+#else
+ return nullptr;
+#endif // defined(WEBRTC_USE_X11)
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm
new file mode 100644
index 0000000000..512103ab5e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+
+
+#include <memory>
+
+#include <ApplicationServices/ApplicationServices.h>
+#include <Cocoa/Cocoa.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#include "modules/desktop_capture/mac/window_list_utils.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+CGImageRef CreateScaledCGImage(CGImageRef image, int width, int height) {
+ // Create context, keeping original image properties.
+ CGColorSpaceRef colorspace = CGImageGetColorSpace(image);
+ CGContextRef context = CGBitmapContextCreate(nullptr,
+ width,
+ height,
+ CGImageGetBitsPerComponent(image),
+ width * DesktopFrame::kBytesPerPixel,
+ colorspace,
+ CGImageGetBitmapInfo(image));
+
+ if (!context) return nil;
+
+ // Draw image to context, resizing it.
+ CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
+ // Extract resulting image from context.
+ CGImageRef imgRef = CGBitmapContextCreateImage(context);
+ CGContextRelease(context);
+
+ return imgRef;
+}
+} // namespace
+
+class MouseCursorMonitorMac : public MouseCursorMonitor {
+ public:
+ MouseCursorMonitorMac(const DesktopCaptureOptions& options,
+ CGWindowID window_id,
+ ScreenId screen_id);
+ ~MouseCursorMonitorMac() override;
+
+ void Init(Callback* callback, Mode mode) override;
+ void Capture() override;
+
+ private:
+ static void DisplaysReconfiguredCallback(CGDirectDisplayID display,
+ CGDisplayChangeSummaryFlags flags,
+ void *user_parameter);
+ void DisplaysReconfigured(CGDirectDisplayID display,
+ CGDisplayChangeSummaryFlags flags);
+
+ void CaptureImage(float scale);
+
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor_;
+ CGWindowID window_id_;
+ ScreenId screen_id_;
+ Callback* callback_ = NULL;
+ Mode mode_;
+ __strong NSImage* last_cursor_ = NULL;
+};
+
+MouseCursorMonitorMac::MouseCursorMonitorMac(const DesktopCaptureOptions& options,
+ CGWindowID window_id,
+ ScreenId screen_id)
+ : configuration_monitor_(options.configuration_monitor()),
+ window_id_(window_id),
+ screen_id_(screen_id),
+ mode_(SHAPE_AND_POSITION) {
+ RTC_DCHECK(window_id == kCGNullWindowID || screen_id == kInvalidScreenId);
+}
+
+MouseCursorMonitorMac::~MouseCursorMonitorMac() {}
+
+void MouseCursorMonitorMac::Init(Callback* callback, Mode mode) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+ mode_ = mode;
+}
+
+void MouseCursorMonitorMac::Capture() {
+ RTC_DCHECK(callback_);
+
+ CGEventRef event = CGEventCreate(NULL);
+ CGPoint gc_position = CGEventGetLocation(event);
+ CFRelease(event);
+
+ DesktopVector position(gc_position.x, gc_position.y);
+
+ MacDesktopConfiguration configuration =
+ configuration_monitor_->desktop_configuration();
+ float scale = GetScaleFactorAtPosition(configuration, position);
+
+ CaptureImage(scale);
+
+ if (mode_ != SHAPE_AND_POSITION)
+ return;
+
+ // Always report cursor position in DIP pixel.
+ callback_->OnMouseCursorPosition(
+ position.subtract(configuration.bounds.top_left()));
+}
+
+void MouseCursorMonitorMac::CaptureImage(float scale) {
+ NSCursor* nscursor = [NSCursor currentSystemCursor];
+
+ NSImage* nsimage = [nscursor image];
+ if (nsimage == nil || !nsimage.isValid) {
+ return;
+ }
+ NSSize nssize = [nsimage size]; // DIP size
+
+ // No need to caputre cursor image if it's unchanged since last capture.
+ if (last_cursor_ && [[nsimage TIFFRepresentation] isEqual:[last_cursor_ TIFFRepresentation]]) return;
+ last_cursor_ = nsimage;
+
+ DesktopSize size(round(nssize.width * scale),
+ round(nssize.height * scale)); // Pixel size
+ NSPoint nshotspot = [nscursor hotSpot];
+ DesktopVector hotspot(
+ std::max(0,
+ std::min(size.width(), static_cast<int>(nshotspot.x * scale))),
+ std::max(0,
+ std::min(size.height(), static_cast<int>(nshotspot.y * scale))));
+ CGImageRef cg_image =
+ [nsimage CGImageForProposedRect:NULL context:nil hints:nil];
+ if (!cg_image)
+ return;
+
+ // Before 10.12, OSX may report 1X cursor on Retina screen. (See
+ // crbug.com/632995.) After 10.12, OSX may report 2X cursor on non-Retina
+ // screen. (See crbug.com/671436.) So scaling the cursor if needed.
+ CGImageRef scaled_cg_image = nil;
+ if (CGImageGetWidth(cg_image) != static_cast<size_t>(size.width())) {
+ scaled_cg_image = CreateScaledCGImage(cg_image, size.width(), size.height());
+ if (scaled_cg_image != nil) {
+ cg_image = scaled_cg_image;
+ }
+ }
+ if (CGImageGetBitsPerPixel(cg_image) != DesktopFrame::kBytesPerPixel * 8 ||
+ CGImageGetWidth(cg_image) != static_cast<size_t>(size.width()) ||
+ CGImageGetBitsPerComponent(cg_image) != 8) {
+ if (scaled_cg_image != nil) CGImageRelease(scaled_cg_image);
+ return;
+ }
+
+ CGDataProviderRef provider = CGImageGetDataProvider(cg_image);
+ CFDataRef image_data_ref = CGDataProviderCopyData(provider);
+ if (image_data_ref == NULL) {
+ if (scaled_cg_image != nil) CGImageRelease(scaled_cg_image);
+ return;
+ }
+
+ const uint8_t* src_data =
+ reinterpret_cast<const uint8_t*>(CFDataGetBytePtr(image_data_ref));
+
+ // Create a MouseCursor that describes the cursor and pass it to
+ // the client.
+ std::unique_ptr<DesktopFrame> image(
+ new BasicDesktopFrame(DesktopSize(size.width(), size.height())));
+
+ int src_stride = CGImageGetBytesPerRow(cg_image);
+ image->CopyPixelsFrom(src_data, src_stride, DesktopRect::MakeSize(size));
+
+ CFRelease(image_data_ref);
+ if (scaled_cg_image != nil) CGImageRelease(scaled_cg_image);
+
+ std::unique_ptr<MouseCursor> cursor(
+ new MouseCursor(image.release(), hotspot));
+
+ callback_->OnMouseCursor(cursor.release());
+}
+
+MouseCursorMonitor* MouseCursorMonitor::CreateForWindow(
+ const DesktopCaptureOptions& options, WindowId window) {
+ return new MouseCursorMonitorMac(options, window, kInvalidScreenId);
+}
+
+MouseCursorMonitor* MouseCursorMonitor::CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen) {
+ return new MouseCursorMonitorMac(options, kCGNullWindowID, screen);
+}
+
+std::unique_ptr<MouseCursorMonitor> MouseCursorMonitor::Create(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<MouseCursorMonitor>(
+ CreateForScreen(options, kFullDesktopScreenId));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_null.cc b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_null.cc
new file mode 100644
index 0000000000..ab1bc2fa33
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_null.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+
+namespace webrtc {
+
+MouseCursorMonitor* MouseCursorMonitor::CreateForWindow(
+ const DesktopCaptureOptions& options,
+ WindowId window) {
+ return NULL;
+}
+
+MouseCursorMonitor* MouseCursorMonitor::CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen) {
+ return NULL;
+}
+
+std::unique_ptr<MouseCursorMonitor> MouseCursorMonitor::Create(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<MouseCursorMonitor>(
+ CreateForScreen(options, kFullDesktopScreenId));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
new file mode 100644
index 0000000000..f771276a2b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class MouseCursorMonitorTest : public ::testing::Test,
+ public MouseCursorMonitor::Callback {
+ public:
+ MouseCursorMonitorTest() : position_received_(false) {}
+
+ // MouseCursorMonitor::Callback interface
+ void OnMouseCursor(MouseCursor* cursor_image) override {
+ cursor_image_.reset(cursor_image);
+ }
+
+ void OnMouseCursorPosition(const DesktopVector& position) override {
+ position_ = position;
+ position_received_ = true;
+ }
+
+ protected:
+ std::unique_ptr<MouseCursor> cursor_image_;
+ DesktopVector position_;
+ bool position_received_;
+};
+
+// TODO(sergeyu): On Mac we need to initialize NSApplication before running the
+// tests. Figure out how to do that without breaking other tests in
+// modules_unittests and enable these tests on Mac.
+// https://code.google.com/p/webrtc/issues/detail?id=2532
+//
+// Disabled on Windows due to flake, see:
+// https://code.google.com/p/webrtc/issues/detail?id=3408
+// Disabled on Linux due to flake, see:
+// https://code.google.com/p/webrtc/issues/detail?id=3245
+#if !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN) && !defined(WEBRTC_LINUX)
+#define MAYBE(x) x
+#else
+#define MAYBE(x) DISABLED_##x
+#endif
+
+TEST_F(MouseCursorMonitorTest, MAYBE(FromScreen)) {
+ std::unique_ptr<MouseCursorMonitor> capturer(
+ MouseCursorMonitor::CreateForScreen(
+ DesktopCaptureOptions::CreateDefault(),
+ webrtc::kFullDesktopScreenId));
+ RTC_DCHECK(capturer.get());
+ capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION);
+ capturer->Capture();
+
+ EXPECT_TRUE(cursor_image_.get());
+ EXPECT_GE(cursor_image_->hotspot().x(), 0);
+ EXPECT_LE(cursor_image_->hotspot().x(),
+ cursor_image_->image()->size().width());
+ EXPECT_GE(cursor_image_->hotspot().y(), 0);
+ EXPECT_LE(cursor_image_->hotspot().y(),
+ cursor_image_->image()->size().height());
+
+ EXPECT_TRUE(position_received_);
+}
+
+TEST_F(MouseCursorMonitorTest, MAYBE(FromWindow)) {
+ DesktopCaptureOptions options = DesktopCaptureOptions::CreateDefault();
+
+ // First get list of windows.
+ std::unique_ptr<DesktopCapturer> window_capturer(
+ DesktopCapturer::CreateWindowCapturer(options));
+
+ // If window capturing is not supported then skip this test.
+ if (!window_capturer.get())
+ return;
+
+ DesktopCapturer::SourceList sources;
+ EXPECT_TRUE(window_capturer->GetSourceList(&sources));
+
+ // Iterate over all windows and try capturing mouse cursor for each of them.
+ for (size_t i = 0; i < sources.size(); ++i) {
+ cursor_image_.reset();
+ position_received_ = false;
+
+ std::unique_ptr<MouseCursorMonitor> capturer(
+ MouseCursorMonitor::CreateForWindow(
+ DesktopCaptureOptions::CreateDefault(), sources[i].id));
+ RTC_DCHECK(capturer.get());
+
+ capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION);
+ capturer->Capture();
+
+ EXPECT_TRUE(cursor_image_.get());
+ EXPECT_TRUE(position_received_);
+ }
+}
+
+// Make sure that OnMouseCursorPosition() is not called in the SHAPE_ONLY mode.
+TEST_F(MouseCursorMonitorTest, MAYBE(ShapeOnly)) {
+ std::unique_ptr<MouseCursorMonitor> capturer(
+ MouseCursorMonitor::CreateForScreen(
+ DesktopCaptureOptions::CreateDefault(),
+ webrtc::kFullDesktopScreenId));
+ RTC_DCHECK(capturer.get());
+ capturer->Init(this, MouseCursorMonitor::SHAPE_ONLY);
+ capturer->Capture();
+
+ EXPECT_TRUE(cursor_image_.get());
+ EXPECT_FALSE(position_received_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
new file mode 100644
index 0000000000..c22425b5a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/mouse_cursor_monitor.h"
+#include "modules/desktop_capture/win/cursor.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "rtc_base/logging.h"
+
+#include <windows.h>
+
+namespace webrtc {
+
+namespace {
+
+bool IsSameCursorShape(const CURSORINFO& left, const CURSORINFO& right) {
+ // If the cursors are not showing, we do not care the hCursor handle.
+ return left.flags == right.flags &&
+ (left.flags != CURSOR_SHOWING || left.hCursor == right.hCursor);
+}
+
+} // namespace
+
+class MouseCursorMonitorWin : public MouseCursorMonitor {
+ public:
+ explicit MouseCursorMonitorWin(HWND window);
+ explicit MouseCursorMonitorWin(ScreenId screen);
+ ~MouseCursorMonitorWin() override;
+
+ void Init(Callback* callback, Mode mode) override;
+ void Capture() override;
+
+ private:
+ // Get the rect of the currently selected screen, relative to the primary
+ // display's top-left. If the screen is disabled or disconnected, or any error
+ // happens, an empty rect is returned.
+ DesktopRect GetScreenRect();
+
+ HWND window_;
+ ScreenId screen_;
+
+ Callback* callback_;
+ Mode mode_;
+
+ HDC desktop_dc_;
+
+ // The last CURSORINFO (converted to MouseCursor) we have sent to the client.
+ CURSORINFO last_cursor_;
+};
+
+MouseCursorMonitorWin::MouseCursorMonitorWin(HWND window)
+ : window_(window),
+ screen_(kInvalidScreenId),
+ callback_(NULL),
+ mode_(SHAPE_AND_POSITION),
+ desktop_dc_(NULL) {
+ memset(&last_cursor_, 0, sizeof(CURSORINFO));
+}
+
+MouseCursorMonitorWin::MouseCursorMonitorWin(ScreenId screen)
+ : window_(NULL),
+ screen_(screen),
+ callback_(NULL),
+ mode_(SHAPE_AND_POSITION),
+ desktop_dc_(NULL) {
+ RTC_DCHECK_GE(screen, kFullDesktopScreenId);
+ memset(&last_cursor_, 0, sizeof(CURSORINFO));
+}
+
+MouseCursorMonitorWin::~MouseCursorMonitorWin() {
+ if (desktop_dc_)
+ ReleaseDC(NULL, desktop_dc_);
+}
+
+void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ RTC_DCHECK(IsGUIThread(false));
+
+ callback_ = callback;
+ mode_ = mode;
+
+ desktop_dc_ = GetDC(NULL);
+}
+
+void MouseCursorMonitorWin::Capture() {
+// TODO: Bug 1666266. Commented out to pass new tests added in bug 1634044.
+// RTC_DCHECK(IsGUIThread(false));
+ RTC_DCHECK(callback_);
+
+ CURSORINFO cursor_info;
+ cursor_info.cbSize = sizeof(CURSORINFO);
+ if (!GetCursorInfo(&cursor_info)) {
+ RTC_LOG_F(LS_ERROR) << "Unable to get cursor info. Error = "
+ << GetLastError();
+ return;
+ }
+
+ if (!IsSameCursorShape(cursor_info, last_cursor_)) {
+ // Mozilla - CURSOR_SUPPRESSED is win8 and above; so we seem not to be able to see the symbol
+ if (cursor_info.flags != CURSOR_SHOWING) {
+ // The cursor is intentionally hidden now, send an empty bitmap.
+ last_cursor_ = cursor_info;
+ callback_->OnMouseCursor(new MouseCursor(
+ new BasicDesktopFrame(DesktopSize()), DesktopVector()));
+ } else {
+ // According to MSDN https://goo.gl/u6gyuC, HCURSOR instances returned by
+ // functions other than CreateCursor do not need to be actively destroyed.
+ // And CloseHandle function (https://goo.gl/ja5ycW) does not close a
+ // cursor, so assume a HCURSOR does not need to be closed.
+ if (cursor_info.flags == 0) {
+ // Host machine does not have a hardware mouse attached, we will send a
+ // default one instead.
+ // Note, Windows automatically caches cursor resource, so we do not need
+ // to cache the result of LoadCursor.
+ cursor_info.hCursor = LoadCursor(nullptr, IDC_ARROW);
+ }
+ std::unique_ptr<MouseCursor> cursor(
+ CreateMouseCursorFromHCursor(desktop_dc_, cursor_info.hCursor));
+ if (cursor) {
+ last_cursor_ = cursor_info;
+ callback_->OnMouseCursor(cursor.release());
+ }
+ }
+ }
+
+ if (mode_ != SHAPE_AND_POSITION)
+ return;
+
+ // CURSORINFO::ptScreenPos is in full desktop coordinate.
+ DesktopVector position(cursor_info.ptScreenPos.x, cursor_info.ptScreenPos.y);
+ bool inside = cursor_info.flags == CURSOR_SHOWING;
+
+ if (window_) {
+ DesktopRect original_rect;
+ DesktopRect cropped_rect;
+ if (!GetCroppedWindowRect(window_, /*avoid_cropping_border*/ false,
+ &cropped_rect, &original_rect)) {
+ position.set(0, 0);
+ inside = false;
+ } else {
+ if (inside) {
+ HWND windowUnderCursor = WindowFromPoint(cursor_info.ptScreenPos);
+ inside = windowUnderCursor
+ ? (window_ == GetAncestor(windowUnderCursor, GA_ROOT))
+ : false;
+ }
+ position = position.subtract(cropped_rect.top_left());
+ }
+ } else {
+ RTC_DCHECK_NE(screen_, kInvalidScreenId);
+ DesktopRect rect = GetScreenRect();
+ if (inside)
+ inside = rect.Contains(position);
+ position = position.subtract(rect.top_left());
+ }
+
+ callback_->OnMouseCursorPosition(position);
+}
+
+DesktopRect MouseCursorMonitorWin::GetScreenRect() {
+ RTC_DCHECK(IsGUIThread(false));
+ RTC_DCHECK_NE(screen_, kInvalidScreenId);
+ if (screen_ == kFullDesktopScreenId) {
+ return DesktopRect::MakeXYWH(GetSystemMetrics(SM_XVIRTUALSCREEN),
+ GetSystemMetrics(SM_YVIRTUALSCREEN),
+ GetSystemMetrics(SM_CXVIRTUALSCREEN),
+ GetSystemMetrics(SM_CYVIRTUALSCREEN));
+ }
+ DISPLAY_DEVICE device;
+ device.cb = sizeof(device);
+ BOOL result = EnumDisplayDevices(NULL, screen_, &device, 0);
+ if (!result)
+ return DesktopRect();
+
+ DEVMODE device_mode;
+ device_mode.dmSize = sizeof(device_mode);
+ device_mode.dmDriverExtra = 0;
+ result = EnumDisplaySettingsEx(device.DeviceName, ENUM_CURRENT_SETTINGS,
+ &device_mode, 0);
+ if (!result)
+ return DesktopRect();
+
+ return DesktopRect::MakeXYWH(
+ device_mode.dmPosition.x, device_mode.dmPosition.y,
+ device_mode.dmPelsWidth, device_mode.dmPelsHeight);
+}
+
+MouseCursorMonitor* MouseCursorMonitor::CreateForWindow(
+ const DesktopCaptureOptions& options,
+ WindowId window) {
+ return new MouseCursorMonitorWin(reinterpret_cast<HWND>(window));
+}
+
+MouseCursorMonitor* MouseCursorMonitor::CreateForScreen(
+ const DesktopCaptureOptions& options,
+ ScreenId screen) {
+ return new MouseCursorMonitorWin(screen);
+}
+
+std::unique_ptr<MouseCursorMonitor> MouseCursorMonitor::Create(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<MouseCursorMonitor>(
+ CreateForScreen(options, kFullDesktopScreenId));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/primitives_gn/moz.build b/third_party/libwebrtc/modules/desktop_capture/primitives_gn/moz.build
new file mode 100644
index 0000000000..c0ef33e2b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/primitives_gn/moz.build
@@ -0,0 +1,185 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_frame.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_geometry.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/desktop_region.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.cc",
+ "/third_party/libwebrtc/modules/desktop_capture/shared_memory.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "ppc64":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+Library("primitives_gn")
diff --git a/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.cc b/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.cc
new file mode 100644
index 0000000000..9639d627fa
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/resolution_tracker.h"
+
+namespace webrtc {
+
+bool ResolutionTracker::SetResolution(DesktopSize size) {
+ if (!initialized_) {
+ initialized_ = true;
+ last_size_ = size;
+ return false;
+ }
+
+ if (last_size_.equals(size)) {
+ return false;
+ }
+
+ last_size_ = size;
+ return true;
+}
+
+void ResolutionTracker::Reset() {
+ initialized_ = false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.h b/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.h
new file mode 100644
index 0000000000..8fe9d61862
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/resolution_tracker.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_RESOLUTION_TRACKER_H_
+#define MODULES_DESKTOP_CAPTURE_RESOLUTION_TRACKER_H_
+
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+class ResolutionTracker final {
+ public:
+ // Sets the resolution to `size`. Returns true if a previous size was recorded
+ // and differs from `size`.
+ bool SetResolution(DesktopSize size);
+
+ // Resets to the initial state.
+ void Reset();
+
+ private:
+ DesktopSize last_size_;
+ bool initialized_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_RESOLUTION_TRACKER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/rgba_color.cc b/third_party/libwebrtc/modules/desktop_capture/rgba_color.cc
new file mode 100644
index 0000000000..362928a474
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/rgba_color.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/rgba_color.h"
+
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+namespace {
+
+bool AlphaEquals(uint8_t i, uint8_t j) {
+ // On Linux and Windows 8 or early version, '0' was returned for alpha channel
+ // from capturer APIs, on Windows 10, '255' was returned. So a workaround is
+ // to treat 0 as 255.
+ return i == j || ((i == 0 || i == 255) && (j == 0 || j == 255));
+}
+
+} // namespace
+
+RgbaColor::RgbaColor(uint8_t blue, uint8_t green, uint8_t red, uint8_t alpha) {
+ this->blue = blue;
+ this->green = green;
+ this->red = red;
+ this->alpha = alpha;
+}
+
+RgbaColor::RgbaColor(uint8_t blue, uint8_t green, uint8_t red)
+ : RgbaColor(blue, green, red, 0xff) {}
+
+RgbaColor::RgbaColor(const uint8_t* bgra)
+ : RgbaColor(bgra[0], bgra[1], bgra[2], bgra[3]) {}
+
+RgbaColor::RgbaColor(uint32_t bgra)
+ : RgbaColor(reinterpret_cast<uint8_t*>(&bgra)) {}
+
+bool RgbaColor::operator==(const RgbaColor& right) const {
+ return blue == right.blue && green == right.green && red == right.red &&
+ AlphaEquals(alpha, right.alpha);
+}
+
+bool RgbaColor::operator!=(const RgbaColor& right) const {
+ return !(*this == right);
+}
+
+uint32_t RgbaColor::ToUInt32() const {
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
+ return blue | (green << 8) | (red << 16) | (alpha << 24);
+#else
+ return (blue << 24) | (green << 16) | (red << 8) | alpha;
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/rgba_color.h b/third_party/libwebrtc/modules/desktop_capture/rgba_color.h
new file mode 100644
index 0000000000..2b13430a45
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/rgba_color.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_RGBA_COLOR_H_
+#define MODULES_DESKTOP_CAPTURE_RGBA_COLOR_H_
+
+#include <stdint.h>
+
+#include "modules/desktop_capture/desktop_frame.h"
+
+namespace webrtc {
+
+// A four-byte structure to store a color in BGRA format. This structure also
+// provides functions to be created from uint8_t array, say,
+// DesktopFrame::data(). It always uses BGRA order for internal storage to match
+// DesktopFrame::data().
+struct RgbaColor final {
+ // Creates a color with BGRA channels.
+ RgbaColor(uint8_t blue, uint8_t green, uint8_t red, uint8_t alpha);
+
+ // Creates a color with BGR channels, and set alpha channel to 255 (opaque).
+ RgbaColor(uint8_t blue, uint8_t green, uint8_t red);
+
+ // Creates a color from four-byte in BGRA order, i.e. DesktopFrame::data().
+ explicit RgbaColor(const uint8_t* bgra);
+
+ // Creates a color from BGRA channels in a uint format. Consumers should make
+ // sure the memory order of the uint32_t is always BGRA from left to right, no
+ // matter the system endian. This function creates an equivalent RgbaColor
+ // instance from the ToUInt32() result of another RgbaColor instance.
+ explicit RgbaColor(uint32_t bgra);
+
+ // Returns true if `this` and `right` is the same color.
+ bool operator==(const RgbaColor& right) const;
+
+ // Returns true if `this` and `right` are different colors.
+ bool operator!=(const RgbaColor& right) const;
+
+ uint32_t ToUInt32() const;
+
+ uint8_t blue;
+ uint8_t green;
+ uint8_t red;
+ uint8_t alpha;
+};
+static_assert(
+ DesktopFrame::kBytesPerPixel == sizeof(RgbaColor),
+ "A pixel in DesktopFrame should be safe to be represented by a RgbaColor");
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_RGBA_COLOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/rgba_color_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/rgba_color_unittest.cc
new file mode 100644
index 0000000000..48e40594b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/rgba_color_unittest.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/rgba_color.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RgbaColorTest, ConvertFromAndToUInt32) {
+ static const std::vector<uint32_t> cases{
+ 0, 1000, 2693, 3725, 4097, 12532,
+ 19902, 27002, 27723, 30944, 65535, 65536,
+ 231194, 255985, 322871, 883798, 9585200, 12410056,
+ 12641940, 30496970, 105735668, 110117847, 482769275, 542368468,
+ 798173396, 2678656711, 3231043200, UINT32_MAX,
+ };
+
+ for (uint32_t value : cases) {
+ RgbaColor left(value);
+ ASSERT_EQ(left.ToUInt32(), value);
+ RgbaColor right(left);
+ ASSERT_EQ(left.ToUInt32(), right.ToUInt32());
+ }
+}
+
+TEST(RgbaColorTest, AlphaChannelEquality) {
+ RgbaColor left(10, 10, 10, 0);
+ RgbaColor right(10, 10, 10, 255);
+ ASSERT_EQ(left, right);
+ right.alpha = 128;
+ ASSERT_NE(left, right);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capture_frame_queue.h b/third_party/libwebrtc/modules/desktop_capture/screen_capture_frame_queue.h
new file mode 100644
index 0000000000..46e19da77e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capture_frame_queue.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURE_FRAME_QUEUE_H_
+#define MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURE_FRAME_QUEUE_H_
+
+#include <memory>
+
+namespace webrtc {
+
+// Represents a queue of reusable video frames. Provides access to the 'current'
+// frame - the frame that the caller is working with at the moment, and to the
+// 'previous' frame - the predecessor of the current frame swapped by
+// MoveToNextFrame() call, if any.
+//
+// The caller is expected to (re)allocate frames if current_frame() returns
+// NULL. The caller can mark all frames in the queue for reallocation (when,
+// say, frame dimensions change). The queue records which frames need updating
+// which the caller can query.
+//
+// Frame consumer is expected to never hold more than kQueueLength frames
+// created by this function and it should release the earliest one before trying
+// to capture a new frame (i.e. before MoveToNextFrame() is called).
+template <typename FrameType>
+class ScreenCaptureFrameQueue {
+ public:
+ ScreenCaptureFrameQueue() = default;
+ ~ScreenCaptureFrameQueue() = default;
+
+ ScreenCaptureFrameQueue(const ScreenCaptureFrameQueue&) = delete;
+ ScreenCaptureFrameQueue& operator=(const ScreenCaptureFrameQueue&) = delete;
+
+ // Moves to the next frame in the queue, moving the 'current' frame to become
+ // the 'previous' one.
+ void MoveToNextFrame() { current_ = (current_ + 1) % kQueueLength; }
+
+ // Replaces the current frame with a new one allocated by the caller. The
+ // existing frame (if any) is destroyed. Takes ownership of `frame`.
+ void ReplaceCurrentFrame(std::unique_ptr<FrameType> frame) {
+ frames_[current_] = std::move(frame);
+ }
+
+ // Marks all frames obsolete and resets the previous frame pointer. No
+ // frames are freed though as the caller can still access them.
+ void Reset() {
+ for (int i = 0; i < kQueueLength; i++) {
+ frames_[i].reset();
+ }
+ current_ = 0;
+ }
+
+ FrameType* current_frame() const { return frames_[current_].get(); }
+
+ FrameType* previous_frame() const {
+ return frames_[(current_ + kQueueLength - 1) % kQueueLength].get();
+ }
+
+ private:
+ // Index of the current frame.
+ int current_ = 0;
+
+ static const int kQueueLength = 2;
+ std::unique_ptr<FrameType> frames_[kQueueLength];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURE_FRAME_QUEUE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_darwin.mm b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_darwin.mm
new file mode 100644
index 0000000000..d5a7bb0522
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_darwin.mm
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/desktop_capture/mac/screen_capturer_mac.h"
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ if (!options.configuration_monitor()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<ScreenCapturerMac> capturer(new ScreenCapturerMac(
+ options.configuration_monitor(), options.detect_updated_region(), options.allow_iosurface()));
+ if (!capturer.get()->Init()) {
+ return nullptr;
+ }
+
+ return capturer;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.cc
new file mode 100644
index 0000000000..c0ad841c05
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.cc
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/screen_capturer_fuchsia.h"
+
+#include <fuchsia/sysmem/cpp/fidl.h>
+#include <fuchsia/ui/composition/cpp/fidl.h>
+#include <fuchsia/ui/scenic/cpp/fidl.h>
+#include <lib/sys/cpp/component_context.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/fallback_desktop_capturer_wrapper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/divide_round.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+
+static constexpr uint32_t kMinBufferCount = 2;
+static constexpr uint32_t kFuchsiaBytesPerPixel = 4;
+static constexpr DesktopCapturer::SourceId kFuchsiaScreenId = 1;
+// 500 milliseconds
+static constexpr zx::duration kEventDelay = zx::msec(500);
+static constexpr fuchsia::sysmem::ColorSpaceType kSRGBColorSpace =
+ fuchsia::sysmem::ColorSpaceType::SRGB;
+static constexpr fuchsia::sysmem::PixelFormatType kBGRA32PixelFormatType =
+ fuchsia::sysmem::PixelFormatType::BGRA32;
+
+// Round |value| up to the closest multiple of |multiple|
+size_t RoundUpToMultiple(size_t value, size_t multiple) {
+ return DivideRoundUp(value, multiple) * multiple;
+}
+
+} // namespace
+
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ if (ScreenCapturerFuchsia::CheckRequirements()) {
+ std::unique_ptr<ScreenCapturerFuchsia> capturer(
+ new ScreenCapturerFuchsia());
+ return capturer;
+ }
+ return nullptr;
+}
+
+ScreenCapturerFuchsia::ScreenCapturerFuchsia()
+ : component_context_(
+ sys::ComponentContext::CreateAndServeOutgoingDirectory()) {
+ RTC_DCHECK(CheckRequirements());
+}
+
+ScreenCapturerFuchsia::~ScreenCapturerFuchsia() {
+ // unmap virtual memory mapped pointers
+ uint32_t virt_mem_bytes =
+ buffer_collection_info_.settings.buffer_settings.size_bytes;
+ for (uint32_t buffer_index = 0;
+ buffer_index < buffer_collection_info_.buffer_count; buffer_index++) {
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(virtual_memory_mapped_addrs_[buffer_index]);
+ zx_status_t status = zx::vmar::root_self()->unmap(address, virt_mem_bytes);
+ RTC_DCHECK(status == ZX_OK);
+ }
+}
+
+// TODO(fxbug.dev/100303): Remove this function when Flatland is the only API.
+bool ScreenCapturerFuchsia::CheckRequirements() {
+ std::unique_ptr<sys::ComponentContext> component_context =
+ sys::ComponentContext::CreateAndServeOutgoingDirectory();
+ fuchsia::ui::scenic::ScenicSyncPtr scenic;
+ zx_status_t status = component_context->svc()->Connect(scenic.NewRequest());
+ if (status != ZX_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to connect to Scenic: " << status;
+ return false;
+ }
+
+ bool scenic_uses_flatland = false;
+ scenic->UsesFlatland(&scenic_uses_flatland);
+ if (!scenic_uses_flatland) {
+ RTC_LOG(LS_ERROR) << "Screen capture not supported without Flatland.";
+ }
+
+ return scenic_uses_flatland;
+}
+
+void ScreenCapturerFuchsia::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ callback_ = callback;
+
+ fatal_error_ = false;
+
+ SetupBuffers();
+}
+
+void ScreenCapturerFuchsia::CaptureFrame() {
+ if (fatal_error_) {
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ zx::event event;
+ zx::event dup;
+ zx_status_t status = zx::event::create(0, &event);
+ if (status != ZX_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to create event: " << status;
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+ event.duplicate(ZX_RIGHT_SAME_RIGHTS, &dup);
+
+ fuchsia::ui::composition::GetNextFrameArgs next_frame_args;
+ next_frame_args.set_event(std::move(dup));
+
+ fuchsia::ui::composition::ScreenCapture_GetNextFrame_Result result;
+ screen_capture_->GetNextFrame(std::move(next_frame_args), &result);
+ if (result.is_err()) {
+ RTC_LOG(LS_ERROR) << "fuchsia.ui.composition.GetNextFrame() failed: "
+ << result.err() << "\n";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ status = event.wait_one(ZX_EVENT_SIGNALED, zx::deadline_after(kEventDelay),
+ nullptr);
+ if (status != ZX_OK) {
+ RTC_LOG(LS_ERROR) << "Timed out waiting for ScreenCapture to render frame: "
+ << status;
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+ uint32_t buffer_index = result.response().buffer_id();
+
+ // TODO(bugs.webrtc.org/14097): Use SharedMemoryDesktopFrame and
+ // ScreenCaptureFrameQueue
+ std::unique_ptr<BasicDesktopFrame> frame(
+ new BasicDesktopFrame(DesktopSize(width_, height_)));
+
+ uint32_t pixels_per_row = GetPixelsPerRow(
+ buffer_collection_info_.settings.image_format_constraints);
+ uint32_t stride = kFuchsiaBytesPerPixel * pixels_per_row;
+ frame->CopyPixelsFrom(virtual_memory_mapped_addrs_[buffer_index], stride,
+ DesktopRect::MakeWH(width_, height_));
+
+ fuchsia::ui::composition::ScreenCapture_ReleaseFrame_Result release_result;
+ screen_capture_->ReleaseFrame(buffer_index, &release_result);
+ if (release_result.is_err()) {
+ RTC_LOG(LS_ERROR) << "fuchsia.ui.composition.ReleaseFrame() failed: "
+ << release_result.err();
+ }
+
+ int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec;
+ frame->set_capture_time_ms(capture_time_ms);
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+}
+
+bool ScreenCapturerFuchsia::GetSourceList(SourceList* screens) {
+ RTC_DCHECK(screens->size() == 0);
+ // Fuchsia only supports single monitor display at this point
+ screens->push_back({kFuchsiaScreenId, std::string("Fuchsia monitor")});
+ return true;
+}
+
+bool ScreenCapturerFuchsia::SelectSource(SourceId id) {
+ if (id == kFuchsiaScreenId || id == kFullDesktopScreenId) {
+ return true;
+ }
+ return false;
+}
+
+fuchsia::sysmem::BufferCollectionConstraints
+ScreenCapturerFuchsia::GetBufferConstraints() {
+ fuchsia::sysmem::BufferCollectionConstraints constraints;
+ constraints.usage.cpu =
+ fuchsia::sysmem::cpuUsageRead | fuchsia::sysmem::cpuUsageWrite;
+ constraints.min_buffer_count = kMinBufferCount;
+
+ constraints.has_buffer_memory_constraints = true;
+ constraints.buffer_memory_constraints.ram_domain_supported = true;
+ constraints.buffer_memory_constraints.cpu_domain_supported = true;
+
+ constraints.image_format_constraints_count = 1;
+ fuchsia::sysmem::ImageFormatConstraints& image_constraints =
+ constraints.image_format_constraints[0];
+ image_constraints.color_spaces_count = 1;
+ image_constraints.color_space[0] =
+ fuchsia::sysmem::ColorSpace{.type = kSRGBColorSpace};
+ image_constraints.pixel_format.type = kBGRA32PixelFormatType;
+ image_constraints.pixel_format.has_format_modifier = true;
+ image_constraints.pixel_format.format_modifier.value =
+ fuchsia::sysmem::FORMAT_MODIFIER_LINEAR;
+
+ image_constraints.required_min_coded_width = width_;
+ image_constraints.required_min_coded_height = height_;
+ image_constraints.required_max_coded_width = width_;
+ image_constraints.required_max_coded_height = height_;
+
+ image_constraints.bytes_per_row_divisor = kFuchsiaBytesPerPixel;
+
+ return constraints;
+}
+
+void ScreenCapturerFuchsia::SetupBuffers() {
+ fuchsia::ui::scenic::ScenicSyncPtr scenic;
+ zx_status_t status = component_context_->svc()->Connect(scenic.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to connect to Scenic: " << status;
+ return;
+ }
+
+ fuchsia::ui::gfx::DisplayInfo display_info;
+ status = scenic->GetDisplayInfo(&display_info);
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to connect to get display dimensions: "
+ << status;
+ return;
+ }
+ width_ = display_info.width_in_px;
+ height_ = display_info.height_in_px;
+
+ status = component_context_->svc()->Connect(sysmem_allocator_.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to connect to Sysmem Allocator: " << status;
+ return;
+ }
+
+ fuchsia::sysmem::BufferCollectionTokenSyncPtr sysmem_token;
+ status =
+ sysmem_allocator_->AllocateSharedCollection(sysmem_token.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR)
+ << "fuchsia.sysmem.Allocator.AllocateSharedCollection() failed: "
+ << status;
+ return;
+ }
+
+ fuchsia::sysmem::BufferCollectionTokenSyncPtr flatland_token;
+ status = sysmem_token->Duplicate(ZX_RIGHT_SAME_RIGHTS,
+ flatland_token.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR)
+ << "fuchsia.sysmem.BufferCollectionToken.Duplicate() failed: "
+ << status;
+ return;
+ }
+
+ status = sysmem_token->Sync();
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "fuchsia.sysmem.BufferCollectionToken.Sync() failed: "
+ << status;
+ return;
+ }
+
+ status = sysmem_allocator_->BindSharedCollection(std::move(sysmem_token),
+ collection_.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR)
+ << "fuchsia.sysmem.Allocator.BindSharedCollection() failed: " << status;
+ return;
+ }
+
+ status = collection_->SetConstraints(/*has_constraints=*/true,
+ GetBufferConstraints());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR)
+ << "fuchsia.sysmem.BufferCollection.SetConstraints() failed: "
+ << status;
+ return;
+ }
+
+ fuchsia::ui::composition::BufferCollectionImportToken import_token;
+ fuchsia::ui::composition::BufferCollectionExportToken export_token;
+ status = zx::eventpair::create(0, &export_token.value, &import_token.value);
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR)
+ << "Failed to create BufferCollection import and export tokens: "
+ << status;
+ return;
+ }
+
+ status = component_context_->svc()->Connect(flatland_allocator_.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to connect to Flatland Allocator: " << status;
+ return;
+ }
+
+ fuchsia::ui::composition::RegisterBufferCollectionArgs buffer_collection_args;
+ buffer_collection_args.set_export_token(std::move(export_token));
+ buffer_collection_args.set_buffer_collection_token(std::move(flatland_token));
+ buffer_collection_args.set_usage(
+ fuchsia::ui::composition::RegisterBufferCollectionUsage::SCREENSHOT);
+
+ fuchsia::ui::composition::Allocator_RegisterBufferCollection_Result
+ buffer_collection_result;
+ flatland_allocator_->RegisterBufferCollection(
+ std::move(buffer_collection_args), &buffer_collection_result);
+ if (buffer_collection_result.is_err()) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "fuchsia.ui.composition.Allocator."
+ "RegisterBufferCollection() failed.";
+ return;
+ }
+
+ zx_status_t allocation_status;
+ status = collection_->WaitForBuffersAllocated(&allocation_status,
+ &buffer_collection_info_);
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to wait for buffer collection info: "
+ << status;
+ return;
+ }
+ if (allocation_status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to allocate buffer collection: " << status;
+ return;
+ }
+ status = collection_->Close();
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to close buffer collection token: " << status;
+ return;
+ }
+
+ status = component_context_->svc()->Connect(screen_capture_.NewRequest());
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to connect to Screen Capture: " << status;
+ return;
+ }
+
+ // Configure buffers in ScreenCapture client.
+ fuchsia::ui::composition::ScreenCaptureConfig configure_args;
+ configure_args.set_import_token(std::move(import_token));
+ configure_args.set_buffer_count(buffer_collection_info_.buffer_count);
+ configure_args.set_size({width_, height_});
+
+ fuchsia::ui::composition::ScreenCapture_Configure_Result configure_result;
+ screen_capture_->Configure(std::move(configure_args), &configure_result);
+ if (configure_result.is_err()) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR)
+ << "fuchsia.ui.composition.ScreenCapture.Configure() failed: "
+ << configure_result.err();
+ return;
+ }
+
+ // We have a collection of virtual memory objects which the ScreenCapture
+ // client will write the frame data to when requested. We map each of these
+ // onto a pointer stored in virtual_memory_mapped_addrs_ which we can use to
+ // access this data.
+ uint32_t virt_mem_bytes =
+ buffer_collection_info_.settings.buffer_settings.size_bytes;
+ RTC_DCHECK(virt_mem_bytes > 0);
+ for (uint32_t buffer_index = 0;
+ buffer_index < buffer_collection_info_.buffer_count; buffer_index++) {
+ const zx::vmo& virt_mem = buffer_collection_info_.buffers[buffer_index].vmo;
+ virtual_memory_mapped_addrs_[buffer_index] = nullptr;
+ auto status = zx::vmar::root_self()->map(
+ ZX_VM_PERM_READ, /*vmar_offset*/ 0, virt_mem,
+ /*vmo_offset*/ 0, virt_mem_bytes,
+ reinterpret_cast<uintptr_t*>(
+ &virtual_memory_mapped_addrs_[buffer_index]));
+ if (status != ZX_OK) {
+ fatal_error_ = true;
+ RTC_LOG(LS_ERROR) << "Failed to map virtual memory: " << status;
+ return;
+ }
+ }
+}
+
+uint32_t ScreenCapturerFuchsia::GetPixelsPerRow(
+ const fuchsia::sysmem::ImageFormatConstraints& constraints) {
+ uint32_t stride = RoundUpToMultiple(
+ std::max(constraints.min_bytes_per_row, width_ * kFuchsiaBytesPerPixel),
+ constraints.bytes_per_row_divisor);
+ uint32_t pixels_per_row = stride / kFuchsiaBytesPerPixel;
+
+ return pixels_per_row;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.h b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.h
new file mode 100644
index 0000000000..444930963f
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_fuchsia.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURER_FUCHSIA_H_
+#define MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURER_FUCHSIA_H_
+
+#include <fuchsia/sysmem/cpp/fidl.h>
+#include <fuchsia/ui/composition/cpp/fidl.h>
+#include <lib/sys/cpp/component_context.h>
+
+#include <memory>
+#include <unordered_map>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+
+namespace webrtc {
+
+class ScreenCapturerFuchsia final : public DesktopCapturer {
+ public:
+ ScreenCapturerFuchsia();
+ ~ScreenCapturerFuchsia() override;
+
+ static bool CheckRequirements();
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* screens) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ fuchsia::sysmem::BufferCollectionConstraints GetBufferConstraints();
+ void SetupBuffers();
+ uint32_t GetPixelsPerRow(
+ const fuchsia::sysmem::ImageFormatConstraints& constraints);
+
+ Callback* callback_ = nullptr;
+
+ std::unique_ptr<sys::ComponentContext> component_context_;
+ fuchsia::sysmem::AllocatorSyncPtr sysmem_allocator_;
+ fuchsia::ui::composition::AllocatorSyncPtr flatland_allocator_;
+ fuchsia::ui::composition::ScreenCaptureSyncPtr screen_capture_;
+ fuchsia::sysmem::BufferCollectionSyncPtr collection_;
+ fuchsia::sysmem::BufferCollectionInfo_2 buffer_collection_info_;
+ std::unordered_map<uint32_t, uint8_t*> virtual_memory_mapped_addrs_;
+
+ bool fatal_error_;
+
+ // Dimensions of the screen we are capturing
+ uint32_t width_;
+ uint32_t height_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURER_FUCHSIA_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.cc
new file mode 100644
index 0000000000..f8261a90b0
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/screen_capturer_helper.h"
+
+
+namespace webrtc {
+
+void ScreenCapturerHelper::ClearInvalidRegion() {
+ MutexLock scoped_invalid_region_lock(&invalid_region_mutex_);
+ invalid_region_.Clear();
+}
+
+void ScreenCapturerHelper::InvalidateRegion(
+ const DesktopRegion& invalid_region) {
+ MutexLock scoped_invalid_region_lock(&invalid_region_mutex_);
+ invalid_region_.AddRegion(invalid_region);
+}
+
+void ScreenCapturerHelper::InvalidateScreen(const DesktopSize& size) {
+ MutexLock scoped_invalid_region_lock(&invalid_region_mutex_);
+ invalid_region_.AddRect(DesktopRect::MakeSize(size));
+}
+
+void ScreenCapturerHelper::TakeInvalidRegion(DesktopRegion* invalid_region) {
+ invalid_region->Clear();
+
+ {
+ MutexLock scoped_invalid_region_lock(&invalid_region_mutex_);
+ invalid_region->Swap(&invalid_region_);
+ }
+
+ if (log_grid_size_ > 0) {
+ DesktopRegion expanded_region;
+ ExpandToGrid(*invalid_region, log_grid_size_, &expanded_region);
+ expanded_region.Swap(invalid_region);
+
+ invalid_region->IntersectWith(DesktopRect::MakeSize(size_most_recent_));
+ }
+}
+
+void ScreenCapturerHelper::SetLogGridSize(int log_grid_size) {
+ log_grid_size_ = log_grid_size;
+}
+
+const DesktopSize& ScreenCapturerHelper::size_most_recent() const {
+ return size_most_recent_;
+}
+
+void ScreenCapturerHelper::set_size_most_recent(const DesktopSize& size) {
+ size_most_recent_ = size;
+}
+
+// Returns the largest multiple of `n` that is <= `x`.
+// `n` must be a power of 2. `nMask` is ~(`n` - 1).
+static int DownToMultiple(int x, int nMask) {
+ return (x & nMask);
+}
+
+// Returns the smallest multiple of `n` that is >= `x`.
+// `n` must be a power of 2. `nMask` is ~(`n` - 1).
+static int UpToMultiple(int x, int n, int nMask) {
+ return ((x + n - 1) & nMask);
+}
+
+void ScreenCapturerHelper::ExpandToGrid(const DesktopRegion& region,
+ int log_grid_size,
+ DesktopRegion* result) {
+ RTC_DCHECK_GE(log_grid_size, 1);
+ int grid_size = 1 << log_grid_size;
+ int grid_size_mask = ~(grid_size - 1);
+
+ result->Clear();
+ for (DesktopRegion::Iterator it(region); !it.IsAtEnd(); it.Advance()) {
+ int left = DownToMultiple(it.rect().left(), grid_size_mask);
+ int right = UpToMultiple(it.rect().right(), grid_size, grid_size_mask);
+ int top = DownToMultiple(it.rect().top(), grid_size_mask);
+ int bottom = UpToMultiple(it.rect().bottom(), grid_size, grid_size_mask);
+ result->AddRect(DesktopRect::MakeLTRB(left, top, right, bottom));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.h b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.h
new file mode 100644
index 0000000000..cd7fa689c0
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURER_HELPER_H_
+#define MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURER_HELPER_H_
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// ScreenCapturerHelper is intended to be used by an implementation of the
+// ScreenCapturer interface. It maintains a thread-safe invalid region, and
+// the size of the most recently captured screen, on behalf of the
+// ScreenCapturer that owns it.
+class ScreenCapturerHelper {
+ public:
+ ScreenCapturerHelper() = default;
+ ~ScreenCapturerHelper() = default;
+
+ ScreenCapturerHelper(const ScreenCapturerHelper&) = delete;
+ ScreenCapturerHelper& operator=(const ScreenCapturerHelper&) = delete;
+
+ // Clear out the invalid region.
+ void ClearInvalidRegion();
+
+ // Invalidate the specified region.
+ void InvalidateRegion(const DesktopRegion& invalid_region);
+
+ // Invalidate the entire screen, of a given size.
+ void InvalidateScreen(const DesktopSize& size);
+
+ // Copies current invalid region to `invalid_region` clears invalid region
+ // storage for the next frame.
+ void TakeInvalidRegion(DesktopRegion* invalid_region);
+
+ // Access the size of the most recently captured screen.
+ const DesktopSize& size_most_recent() const;
+ void set_size_most_recent(const DesktopSize& size);
+
+ // Lossy compression can result in color values leaking between pixels in one
+ // block. If part of a block changes, then unchanged parts of that block can
+ // be changed in the compressed output. So we need to re-render an entire
+ // block whenever part of the block changes.
+ //
+ // If `log_grid_size` is >= 1, then this function makes TakeInvalidRegion()
+ // produce an invalid region expanded so that its vertices lie on a grid of
+ // size 2 ^ `log_grid_size`. The expanded region is then clipped to the size
+ // of the most recently captured screen, as previously set by
+ // set_size_most_recent().
+ // If `log_grid_size` is <= 0, then the invalid region is not expanded.
+ void SetLogGridSize(int log_grid_size);
+
+ // Expands a region so that its vertices all lie on a grid.
+ // The grid size must be >= 2, so `log_grid_size` must be >= 1.
+ static void ExpandToGrid(const DesktopRegion& region,
+ int log_grid_size,
+ DesktopRegion* result);
+
+ private:
+ // A region that has been manually invalidated (through InvalidateRegion).
+ // These will be returned as dirty_region in the capture data during the next
+ // capture.
+ DesktopRegion invalid_region_ RTC_GUARDED_BY(invalid_region_mutex_);
+
+ // A lock protecting `invalid_region_` across threads.
+ Mutex invalid_region_mutex_;
+
+ // The size of the most recently captured screen.
+ DesktopSize size_most_recent_;
+
+ // The log (base 2) of the size of the grid to which the invalid region is
+ // expanded.
+ // If the value is <= 0, then the invalid region is not expanded to a grid.
+ int log_grid_size_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SCREEN_CAPTURER_HELPER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper_unittest.cc
new file mode 100644
index 0000000000..165bbe42de
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_helper_unittest.cc
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/screen_capturer_helper.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class ScreenCapturerHelperTest : public ::testing::Test {
+ protected:
+ ScreenCapturerHelper capturer_helper_;
+};
+
+TEST_F(ScreenCapturerHelperTest, ClearInvalidRegion) {
+ DesktopRegion region(DesktopRect::MakeXYWH(1, 2, 3, 4));
+ capturer_helper_.InvalidateRegion(region);
+ capturer_helper_.ClearInvalidRegion();
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(region.is_empty());
+}
+
+TEST_F(ScreenCapturerHelperTest, InvalidateRegion) {
+ DesktopRegion region;
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(region.is_empty());
+
+ region.SetRect(DesktopRect::MakeXYWH(1, 2, 3, 4));
+ capturer_helper_.InvalidateRegion(region);
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(1, 2, 3, 4)).Equals(region));
+
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(1, 2, 3, 4)));
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(4, 2, 3, 4)));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(1, 2, 6, 4)).Equals(region));
+}
+
+TEST_F(ScreenCapturerHelperTest, InvalidateScreen) {
+ DesktopRegion region;
+ capturer_helper_.InvalidateScreen(DesktopSize(12, 34));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeWH(12, 34)).Equals(region));
+}
+
+TEST_F(ScreenCapturerHelperTest, SizeMostRecent) {
+ EXPECT_TRUE(capturer_helper_.size_most_recent().is_empty());
+ capturer_helper_.set_size_most_recent(DesktopSize(12, 34));
+ EXPECT_TRUE(DesktopSize(12, 34).equals(capturer_helper_.size_most_recent()));
+}
+
+TEST_F(ScreenCapturerHelperTest, SetLogGridSize) {
+ capturer_helper_.set_size_most_recent(DesktopSize(10, 10));
+
+ DesktopRegion region;
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion().Equals(region));
+
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)).Equals(region));
+
+ capturer_helper_.SetLogGridSize(-1);
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)).Equals(region));
+
+ capturer_helper_.SetLogGridSize(0);
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)).Equals(region));
+
+ capturer_helper_.SetLogGridSize(1);
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)));
+ capturer_helper_.TakeInvalidRegion(&region);
+
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(6, 6, 2, 2)).Equals(region));
+
+ capturer_helper_.SetLogGridSize(2);
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(4, 4, 4, 4)).Equals(region));
+
+ capturer_helper_.SetLogGridSize(0);
+ capturer_helper_.InvalidateRegion(
+ DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)));
+ capturer_helper_.TakeInvalidRegion(&region);
+ EXPECT_TRUE(DesktopRegion(DesktopRect::MakeXYWH(7, 7, 1, 1)).Equals(region));
+}
+
+void TestExpandRegionToGrid(const DesktopRegion& region,
+ int log_grid_size,
+ const DesktopRegion& expanded_region_expected) {
+ DesktopRegion expanded_region1;
+ ScreenCapturerHelper::ExpandToGrid(region, log_grid_size, &expanded_region1);
+ EXPECT_TRUE(expanded_region_expected.Equals(expanded_region1));
+
+ DesktopRegion expanded_region2;
+ ScreenCapturerHelper::ExpandToGrid(expanded_region1, log_grid_size,
+ &expanded_region2);
+ EXPECT_TRUE(expanded_region1.Equals(expanded_region2));
+}
+
+void TestExpandRectToGrid(int l,
+ int t,
+ int r,
+ int b,
+ int log_grid_size,
+ int lExpanded,
+ int tExpanded,
+ int rExpanded,
+ int bExpanded) {
+ TestExpandRegionToGrid(DesktopRegion(DesktopRect::MakeLTRB(l, t, r, b)),
+ log_grid_size,
+ DesktopRegion(DesktopRect::MakeLTRB(
+ lExpanded, tExpanded, rExpanded, bExpanded)));
+}
+
+TEST_F(ScreenCapturerHelperTest, ExpandToGrid) {
+ const int kLogGridSize = 4;
+ const int kGridSize = 1 << kLogGridSize;
+ for (int i = -2; i <= 2; i++) {
+ int x = i * kGridSize;
+ for (int j = -2; j <= 2; j++) {
+ int y = j * kGridSize;
+ TestExpandRectToGrid(x + 0, y + 0, x + 1, y + 1, kLogGridSize, x + 0,
+ y + 0, x + kGridSize, y + kGridSize);
+ TestExpandRectToGrid(x + 0, y + kGridSize - 1, x + 1, y + kGridSize,
+ kLogGridSize, x + 0, y + 0, x + kGridSize,
+ y + kGridSize);
+ TestExpandRectToGrid(x + kGridSize - 1, y + kGridSize - 1, x + kGridSize,
+ y + kGridSize, kLogGridSize, x + 0, y + 0,
+ x + kGridSize, y + kGridSize);
+ TestExpandRectToGrid(x + kGridSize - 1, y + 0, x + kGridSize, y + 1,
+ kLogGridSize, x + 0, y + 0, x + kGridSize,
+ y + kGridSize);
+ TestExpandRectToGrid(x - 1, y + 0, x + 1, y + 1, kLogGridSize,
+ x - kGridSize, y + 0, x + kGridSize, y + kGridSize);
+ TestExpandRectToGrid(x - 1, y - 1, x + 1, y + 0, kLogGridSize,
+ x - kGridSize, y - kGridSize, x + kGridSize, y);
+ TestExpandRectToGrid(x + 0, y - 1, x + 1, y + 1, kLogGridSize, x,
+ y - kGridSize, x + kGridSize, y + kGridSize);
+ TestExpandRectToGrid(x - 1, y - 1, x + 0, y + 1, kLogGridSize,
+ x - kGridSize, y - kGridSize, x, y + kGridSize);
+
+ // Construct a region consisting of 3 pixels and verify that it's expanded
+ // properly to 3 squares that are kGridSize by kGridSize.
+ for (int q = 0; q < 4; ++q) {
+ DesktopRegion region;
+ DesktopRegion expanded_region_expected;
+
+ if (q != 0) {
+ region.AddRect(DesktopRect::MakeXYWH(x - 1, y - 1, 1, 1));
+ expanded_region_expected.AddRect(DesktopRect::MakeXYWH(
+ x - kGridSize, y - kGridSize, kGridSize, kGridSize));
+ }
+ if (q != 1) {
+ region.AddRect(DesktopRect::MakeXYWH(x, y - 1, 1, 1));
+ expanded_region_expected.AddRect(
+ DesktopRect::MakeXYWH(x, y - kGridSize, kGridSize, kGridSize));
+ }
+ if (q != 2) {
+ region.AddRect(DesktopRect::MakeXYWH(x - 1, y, 1, 1));
+ expanded_region_expected.AddRect(
+ DesktopRect::MakeXYWH(x - kGridSize, y, kGridSize, kGridSize));
+ }
+ if (q != 3) {
+ region.AddRect(DesktopRect::MakeXYWH(x, y, 1, 1));
+ expanded_region_expected.AddRect(
+ DesktopRect::MakeXYWH(x, y, kGridSize, kGridSize));
+ }
+
+ TestExpandRegionToGrid(region, kLogGridSize, expanded_region_expected);
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_integration_test.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_integration_test.cc
new file mode 100644
index 0000000000..b33427ad42
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_integration_test.cc
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include <algorithm>
+#include <initializer_list>
+#include <iostream> // TODO(zijiehe): Remove once flaky has been resolved.
+#include <memory>
+#include <utility>
+
+// TODO(zijiehe): Remove once flaky has been resolved.
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mock_desktop_capturer_callback.h"
+#include "modules/desktop_capture/rgba_color.h"
+#include "modules/desktop_capture/screen_drawer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/third_party/base64/base64.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+#if defined(WEBRTC_WIN)
+#include "modules/desktop_capture/win/screen_capturer_win_directx.h"
+#include "rtc_base/win/windows_version.h"
+#endif // defined(WEBRTC_WIN)
+
+using ::testing::_;
+
+namespace webrtc {
+
+namespace {
+
+ACTION_P2(SaveCaptureResult, result, dest) {
+ *result = arg0;
+ *dest = std::move(*arg1);
+}
+
+// Returns true if color in `rect` of `frame` is `color`.
+bool ArePixelsColoredBy(const DesktopFrame& frame,
+ DesktopRect rect,
+ RgbaColor color,
+ bool may_partially_draw) {
+ if (!may_partially_draw) {
+ // updated_region() should cover the painted area.
+ DesktopRegion updated_region(frame.updated_region());
+ updated_region.IntersectWith(rect);
+ if (!updated_region.Equals(DesktopRegion(rect))) {
+ return false;
+ }
+ }
+
+ // Color in the `rect` should be `color`.
+ uint8_t* row = frame.GetFrameDataAtPos(rect.top_left());
+ for (int i = 0; i < rect.height(); i++) {
+ uint8_t* column = row;
+ for (int j = 0; j < rect.width(); j++) {
+ if (color != RgbaColor(column)) {
+ return false;
+ }
+ column += DesktopFrame::kBytesPerPixel;
+ }
+ row += frame.stride();
+ }
+ return true;
+}
+
+} // namespace
+
+class ScreenCapturerIntegrationTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ capturer_ = DesktopCapturer::CreateScreenCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ }
+
+ protected:
+ void TestCaptureUpdatedRegion(
+ std::initializer_list<DesktopCapturer*> capturers) {
+ RTC_DCHECK(capturers.size() > 0);
+// A large enough area for the tests, which should be able to be fulfilled
+// by most systems.
+#if defined(WEBRTC_WIN)
+ // On Windows, an interesting warning window may pop up randomly. The root
+ // cause is still under investigation, so reduce the test area to work
+ // around. Bug https://bugs.chromium.org/p/webrtc/issues/detail?id=6666.
+ const int kTestArea = 416;
+#else
+ const int kTestArea = 512;
+#endif
+ const int kRectSize = 32;
+ std::unique_ptr<ScreenDrawer> drawer = ScreenDrawer::Create();
+ if (!drawer || drawer->DrawableRegion().is_empty()) {
+ RTC_LOG(LS_WARNING)
+ << "No ScreenDrawer implementation for current platform.";
+ return;
+ }
+ if (drawer->DrawableRegion().width() < kTestArea ||
+ drawer->DrawableRegion().height() < kTestArea) {
+ RTC_LOG(LS_WARNING)
+ << "ScreenDrawer::DrawableRegion() is too small for the "
+ "CaptureUpdatedRegion tests.";
+ return;
+ }
+
+ for (DesktopCapturer* capturer : capturers) {
+ capturer->Start(&callback_);
+ }
+
+ // Draw a set of `kRectSize` by `kRectSize` rectangles at (`i`, `i`), or
+ // `i` by `i` rectangles at (`kRectSize`, `kRectSize`). One of (controlled
+ // by `c`) its primary colors is `i`, and the other two are 0x7f. So we
+ // won't draw a black or white rectangle.
+ for (int c = 0; c < 3; c++) {
+ // A fixed size rectangle.
+ for (int i = 0; i < kTestArea - kRectSize; i += 16) {
+ DesktopRect rect = DesktopRect::MakeXYWH(i, i, kRectSize, kRectSize);
+ rect.Translate(drawer->DrawableRegion().top_left());
+ RgbaColor color((c == 0 ? (i & 0xff) : 0x7f),
+ (c == 1 ? (i & 0xff) : 0x7f),
+ (c == 2 ? (i & 0xff) : 0x7f));
+ // Fail fast.
+ ASSERT_NO_FATAL_FAILURE(
+ TestCaptureOneFrame(capturers, drawer.get(), rect, color));
+ }
+
+ // A variable-size rectangle.
+ for (int i = 0; i < kTestArea - kRectSize; i += 16) {
+ DesktopRect rect = DesktopRect::MakeXYWH(kRectSize, kRectSize, i, i);
+ rect.Translate(drawer->DrawableRegion().top_left());
+ RgbaColor color((c == 0 ? (i & 0xff) : 0x7f),
+ (c == 1 ? (i & 0xff) : 0x7f),
+ (c == 2 ? (i & 0xff) : 0x7f));
+ // Fail fast.
+ ASSERT_NO_FATAL_FAILURE(
+ TestCaptureOneFrame(capturers, drawer.get(), rect, color));
+ }
+ }
+ }
+
+ void TestCaptureUpdatedRegion() {
+ TestCaptureUpdatedRegion({capturer_.get()});
+ }
+
+#if defined(WEBRTC_WIN)
+ // Enable allow_directx_capturer in DesktopCaptureOptions, but let
+ // DesktopCapturer::CreateScreenCapturer() to decide whether a DirectX
+ // capturer should be used.
+ void MaybeCreateDirectxCapturer() {
+ DesktopCaptureOptions options(DesktopCaptureOptions::CreateDefault());
+ options.set_allow_directx_capturer(true);
+ capturer_ = DesktopCapturer::CreateScreenCapturer(options);
+ }
+
+ bool CreateDirectxCapturer() {
+ if (!ScreenCapturerWinDirectx::IsSupported()) {
+ RTC_LOG(LS_WARNING) << "Directx capturer is not supported";
+ return false;
+ }
+
+ MaybeCreateDirectxCapturer();
+ return true;
+ }
+
+ void CreateMagnifierCapturer() {
+ DesktopCaptureOptions options(DesktopCaptureOptions::CreateDefault());
+ options.set_allow_use_magnification_api(true);
+ capturer_ = DesktopCapturer::CreateScreenCapturer(options);
+ }
+#endif // defined(WEBRTC_WIN)
+
+ std::unique_ptr<DesktopCapturer> capturer_;
+ MockDesktopCapturerCallback callback_;
+
+ private:
+ // Repeats capturing the frame by using `capturers` one-by-one for 600 times,
+ // typically 30 seconds, until they succeeded captured a `color` rectangle at
+ // `rect`. This function uses `drawer`->WaitForPendingDraws() between two
+ // attempts to wait for the screen to update.
+ void TestCaptureOneFrame(std::vector<DesktopCapturer*> capturers,
+ ScreenDrawer* drawer,
+ DesktopRect rect,
+ RgbaColor color) {
+ const int wait_capture_round = 600;
+ drawer->Clear();
+ size_t succeeded_capturers = 0;
+ for (int i = 0; i < wait_capture_round; i++) {
+ drawer->DrawRectangle(rect, color);
+ drawer->WaitForPendingDraws();
+ for (size_t j = 0; j < capturers.size(); j++) {
+ if (capturers[j] == nullptr) {
+ // DesktopCapturer should return an empty updated_region() if no
+ // update detected. So we won't test it again if it has captured the
+ // rectangle we drew.
+ continue;
+ }
+ std::unique_ptr<DesktopFrame> frame = CaptureFrame(capturers[j]);
+ if (!frame) {
+ // CaptureFrame() has triggered an assertion failure already, we only
+ // need to return here.
+ return;
+ }
+
+ if (ArePixelsColoredBy(*frame, rect, color,
+ drawer->MayDrawIncompleteShapes())) {
+ capturers[j] = nullptr;
+ succeeded_capturers++;
+ }
+ // The following else if statement is for debugging purpose only, which
+ // should be removed after flaky of ScreenCapturerIntegrationTest has
+ // been resolved.
+ else if (i == wait_capture_round - 1) {
+ std::string result;
+ rtc::Base64::EncodeFromArray(
+ frame->data(), frame->size().height() * frame->stride(), &result);
+ std::cout << frame->size().width() << " x " << frame->size().height()
+ << std::endl;
+ // Split the entire string (can be over 4M) into several lines to
+ // avoid browser from sticking.
+ static const size_t kLineLength = 32768;
+ const char* result_end = result.c_str() + result.length();
+ for (const char* it = result.c_str(); it < result_end;
+ it += kLineLength) {
+ const size_t max_length = result_end - it;
+ std::cout << std::string(it, std::min(kLineLength, max_length))
+ << std::endl;
+ }
+ std::cout << "Failed to capture rectangle " << rect.left() << " x "
+ << rect.top() << " - " << rect.right() << " x "
+ << rect.bottom() << " with color ("
+ << static_cast<int>(color.red) << ", "
+ << static_cast<int>(color.green) << ", "
+ << static_cast<int>(color.blue) << ", "
+ << static_cast<int>(color.alpha) << ")" << std::endl;
+ ASSERT_TRUE(false) << "ScreenCapturerIntegrationTest may be flaky. "
+ "Please kindly FYI the broken link to "
+ "zijiehe@chromium.org for investigation. If "
+ "the failure continually happens, but I have "
+ "not responded as quick as expected, disable "
+ "*all* tests in "
+ "screen_capturer_integration_test.cc to "
+ "unblock other developers.";
+ }
+ }
+
+ if (succeeded_capturers == capturers.size()) {
+ break;
+ }
+ }
+
+ ASSERT_EQ(succeeded_capturers, capturers.size());
+ }
+
+ // Expects `capturer` to successfully capture a frame, and returns it.
+ std::unique_ptr<DesktopFrame> CaptureFrame(DesktopCapturer* capturer) {
+ for (int i = 0; i < 10; i++) {
+ std::unique_ptr<DesktopFrame> frame;
+ DesktopCapturer::Result result;
+ EXPECT_CALL(callback_, OnCaptureResultPtr(_, _))
+ .WillOnce(SaveCaptureResult(&result, &frame));
+ capturer->CaptureFrame();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+ if (result == DesktopCapturer::Result::SUCCESS) {
+ EXPECT_TRUE(frame);
+ return frame;
+ } else {
+ EXPECT_FALSE(frame);
+ }
+ }
+
+ EXPECT_TRUE(false);
+ return nullptr;
+ }
+};
+
+#if defined(WEBRTC_WIN)
+// ScreenCapturerWinGdi randomly returns blank screen, the root cause is still
+// unknown. Bug, https://bugs.chromium.org/p/webrtc/issues/detail?id=6843.
+#define MAYBE_CaptureUpdatedRegion DISABLED_CaptureUpdatedRegion
+#else
+#define MAYBE_CaptureUpdatedRegion CaptureUpdatedRegion
+#endif
+TEST_F(ScreenCapturerIntegrationTest, MAYBE_CaptureUpdatedRegion) {
+ TestCaptureUpdatedRegion();
+}
+
+#if defined(WEBRTC_WIN)
+// ScreenCapturerWinGdi randomly returns blank screen, the root cause is still
+// unknown. Bug, https://bugs.chromium.org/p/webrtc/issues/detail?id=6843.
+#define MAYBE_TwoCapturers DISABLED_TwoCapturers
+#else
+#define MAYBE_TwoCapturers TwoCapturers
+#endif
+TEST_F(ScreenCapturerIntegrationTest, MAYBE_TwoCapturers) {
+ std::unique_ptr<DesktopCapturer> capturer2 = std::move(capturer_);
+ SetUp();
+ TestCaptureUpdatedRegion({capturer_.get(), capturer2.get()});
+}
+
+#if defined(WEBRTC_WIN)
+
+// Windows cannot capture contents on VMs hosted in GCE. See bug
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=8153.
+TEST_F(ScreenCapturerIntegrationTest,
+ DISABLED_CaptureUpdatedRegionWithDirectxCapturer) {
+ if (!CreateDirectxCapturer()) {
+ return;
+ }
+
+ TestCaptureUpdatedRegion();
+}
+
+TEST_F(ScreenCapturerIntegrationTest, DISABLED_TwoDirectxCapturers) {
+ if (!CreateDirectxCapturer()) {
+ return;
+ }
+
+ std::unique_ptr<DesktopCapturer> capturer2 = std::move(capturer_);
+ RTC_CHECK(CreateDirectxCapturer());
+ TestCaptureUpdatedRegion({capturer_.get(), capturer2.get()});
+}
+
+TEST_F(ScreenCapturerIntegrationTest,
+ DISABLED_CaptureUpdatedRegionWithMagnifierCapturer) {
+ // On Windows 8 or later, magnifier APIs return a frame with a border on test
+ // environment, so disable these tests.
+ // Bug https://bugs.chromium.org/p/webrtc/issues/detail?id=6844
+ // TODO(zijiehe): Find the root cause of the border and failure, which cannot
+ // reproduce on my dev machine.
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN8) {
+ return;
+ }
+ CreateMagnifierCapturer();
+ TestCaptureUpdatedRegion();
+}
+
+TEST_F(ScreenCapturerIntegrationTest, DISABLED_TwoMagnifierCapturers) {
+ // On Windows 8 or later, magnifier APIs return a frame with a border on test
+ // environment, so disable these tests.
+ // Bug https://bugs.chromium.org/p/webrtc/issues/detail?id=6844
+ // TODO(zijiehe): Find the root cause of the border and failure, which cannot
+ // reproduce on my dev machine.
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN8) {
+ return;
+ }
+ CreateMagnifierCapturer();
+ std::unique_ptr<DesktopCapturer> capturer2 = std::move(capturer_);
+ CreateMagnifierCapturer();
+ TestCaptureUpdatedRegion({capturer_.get(), capturer2.get()});
+}
+
+TEST_F(ScreenCapturerIntegrationTest,
+ DISABLED_MaybeCaptureUpdatedRegionWithDirectxCapturer) {
+ if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN8) {
+ // ScreenCapturerWinGdi randomly returns blank screen, the root cause is
+ // still unknown. Bug,
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=6843.
+ // On Windows 7 or early version, MaybeCreateDirectxCapturer() always
+ // creates GDI capturer.
+ return;
+ }
+ // Even DirectX capturer is not supported in current system, we should be able
+ // to select a usable capturer.
+ MaybeCreateDirectxCapturer();
+ TestCaptureUpdatedRegion();
+}
+
+#endif // defined(WEBRTC_WIN)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_linux.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_linux.cc
new file mode 100644
index 0000000000..d9f2795130
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_linux.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+
+#if defined(WEBRTC_USE_PIPEWIRE)
+#if defined(WEBRTC_MOZILLA_BUILD)
+#include "modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h"
+#else
+#include "modules/desktop_capture/linux/wayland/base_capturer_pipewire.h"
+#endif
+#endif // defined(WEBRTC_USE_PIPEWIRE)
+
+#if defined(WEBRTC_USE_X11)
+#include "modules/desktop_capture/linux/x11/screen_capturer_x11.h"
+#endif // defined(WEBRTC_USE_X11)
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+#if defined(WEBRTC_USE_PIPEWIRE)
+ if (options.allow_pipewire() && DesktopCapturer::IsRunningUnderWayland()) {
+#if defined(WEBRTC_MOZILLA_BUILD)
+ return BaseCapturerPipeWire::CreateRawCapturer(options);
+#else
+ return std::make_unique<BaseCapturerPipeWire>(options);
+#endif
+ }
+#endif // defined(WEBRTC_USE_PIPEWIRE)
+
+#if defined(WEBRTC_USE_X11)
+ return ScreenCapturerX11::CreateRawScreenCapturer(options);
+#else
+ return nullptr;
+#endif // defined(WEBRTC_USE_X11)
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac.mm b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac.mm
new file mode 100644
index 0000000000..285086ffa6
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac.mm
@@ -0,0 +1,766 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+#include <utility>
+
+#include <ApplicationServices/ApplicationServices.h>
+#include <Cocoa/Cocoa.h>
+#include <CoreGraphics/CoreGraphics.h>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#include "modules/desktop_capture/mac/scoped_pixel_buffer_object.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/screen_capturer_helper.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/macutils.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+namespace {
+
+// CGDisplayStreamRefs need to be destroyed asynchronously after receiving a
+// kCGDisplayStreamFrameStatusStopped callback from CoreGraphics. This may
+// happen after the ScreenCapturerMac has been destroyed. DisplayStreamManager
+// is responsible for destroying all extant CGDisplayStreamRefs, and will
+// destroy itself once it's done.
+class DisplayStreamManager {
+ public:
+ int GetUniqueId() { return ++unique_id_generator_; }
+ void DestroyStream(int unique_id) {
+ auto it = display_stream_wrappers_.find(unique_id);
+ RTC_CHECK(it != display_stream_wrappers_.end());
+ RTC_CHECK(!it->second.active);
+ CFRelease(it->second.stream);
+ display_stream_wrappers_.erase(it);
+
+ if (ready_for_self_destruction_ && display_stream_wrappers_.empty())
+ delete this;
+ }
+
+ void SaveStream(int unique_id,
+ CGDisplayStreamRef stream) {
+ RTC_CHECK(unique_id <= unique_id_generator_);
+ DisplayStreamWrapper wrapper;
+ wrapper.stream = stream;
+ display_stream_wrappers_[unique_id] = wrapper;
+ }
+
+ void UnregisterActiveStreams() {
+ for (auto& pair : display_stream_wrappers_) {
+ DisplayStreamWrapper& wrapper = pair.second;
+ if (wrapper.active) {
+ wrapper.active = false;
+ CFRunLoopSourceRef source =
+ CGDisplayStreamGetRunLoopSource(wrapper.stream);
+ CFRunLoopRemoveSource(CFRunLoopGetCurrent(), source,
+ kCFRunLoopCommonModes);
+ CGDisplayStreamStop(wrapper.stream);
+ }
+ }
+ }
+
+ void PrepareForSelfDestruction() {
+ ready_for_self_destruction_ = true;
+
+ if (display_stream_wrappers_.empty())
+ delete this;
+ }
+
+ // Once the DisplayStreamManager is ready for destruction, the
+ // ScreenCapturerMac is no longer present. Any updates should be ignored.
+ bool ShouldIgnoreUpdates() { return ready_for_self_destruction_; }
+
+ private:
+ struct DisplayStreamWrapper {
+ // The registered CGDisplayStreamRef.
+ CGDisplayStreamRef stream = nullptr;
+
+ // Set to false when the stream has been stopped. An asynchronous callback
+ // from CoreGraphics will let us destroy the CGDisplayStreamRef.
+ bool active = true;
+ };
+
+ std::map<int, DisplayStreamWrapper> display_stream_wrappers_;
+ int unique_id_generator_ = 0;
+ bool ready_for_self_destruction_ = false;
+};
+
+// Standard Mac displays have 72dpi, but we report 96dpi for
+// consistency with Windows and Linux.
+const int kStandardDPI = 96;
+
+// Scales all coordinates of a rect by a specified factor.
+DesktopRect ScaleAndRoundCGRect(const CGRect& rect, float scale) {
+ return DesktopRect::MakeLTRB(
+ static_cast<int>(floor(rect.origin.x * scale)),
+ static_cast<int>(floor(rect.origin.y * scale)),
+ static_cast<int>(ceil((rect.origin.x + rect.size.width) * scale)),
+ static_cast<int>(ceil((rect.origin.y + rect.size.height) * scale)));
+}
+
+// Copy pixels in the |rect| from |src_place| to |dest_plane|. |rect| should be
+// relative to the origin of |src_plane| and |dest_plane|.
+void CopyRect(const uint8_t* src_plane,
+ int src_plane_stride,
+ uint8_t* dest_plane,
+ int dest_plane_stride,
+ int bytes_per_pixel,
+ const DesktopRect& rect) {
+ // Get the address of the starting point.
+ const int src_y_offset = src_plane_stride * rect.top();
+ const int dest_y_offset = dest_plane_stride * rect.top();
+ const int x_offset = bytes_per_pixel * rect.left();
+ src_plane += src_y_offset + x_offset;
+ dest_plane += dest_y_offset + x_offset;
+
+ // Copy pixels in the rectangle line by line.
+ const int bytes_per_line = bytes_per_pixel * rect.width();
+ const int height = rect.height();
+ for (int i = 0 ; i < height; ++i) {
+ memcpy(dest_plane, src_plane, bytes_per_line);
+ src_plane += src_plane_stride;
+ dest_plane += dest_plane_stride;
+ }
+}
+
+// Returns an array of CGWindowID for all the on-screen windows except
+// |window_to_exclude|, or NULL if the window is not found or it fails. The
+// caller should release the returned CFArrayRef.
+CFArrayRef CreateWindowListWithExclusion(CGWindowID window_to_exclude) {
+ if (!window_to_exclude)
+ return nullptr;
+
+ CFArrayRef all_windows = CGWindowListCopyWindowInfo(
+ kCGWindowListOptionOnScreenOnly, kCGNullWindowID);
+ if (!all_windows)
+ return nullptr;
+
+ CFMutableArrayRef returned_array =
+ CFArrayCreateMutable(nullptr, CFArrayGetCount(all_windows), nullptr);
+
+ bool found = false;
+ for (CFIndex i = 0; i < CFArrayGetCount(all_windows); ++i) {
+ CFDictionaryRef window = reinterpret_cast<CFDictionaryRef>(
+ CFArrayGetValueAtIndex(all_windows, i));
+
+ CFNumberRef id_ref = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowNumber));
+
+ CGWindowID id;
+ CFNumberGetValue(id_ref, kCFNumberIntType, &id);
+ if (id == window_to_exclude) {
+ found = true;
+ continue;
+ }
+ CFArrayAppendValue(returned_array, reinterpret_cast<void *>(id));
+ }
+ CFRelease(all_windows);
+
+ if (!found) {
+ CFRelease(returned_array);
+ returned_array = nullptr;
+ }
+ return returned_array;
+}
+
+// Returns the bounds of |window| in physical pixels, enlarged by a small amount
+// on four edges to take account of the border/shadow effects.
+DesktopRect GetExcludedWindowPixelBounds(CGWindowID window,
+ float dip_to_pixel_scale) {
+ // The amount of pixels to add to the actual window bounds to take into
+ // account of the border/shadow effects.
+ static const int kBorderEffectSize = 20;
+ CGRect rect;
+ CGWindowID ids[1];
+ ids[0] = window;
+
+ CFArrayRef window_id_array =
+ CFArrayCreate(nullptr, reinterpret_cast<const void**>(&ids), 1, nullptr);
+ CFArrayRef window_array =
+ CGWindowListCreateDescriptionFromArray(window_id_array);
+
+ if (CFArrayGetCount(window_array) > 0) {
+ CFDictionaryRef window = reinterpret_cast<CFDictionaryRef>(
+ CFArrayGetValueAtIndex(window_array, 0));
+ CFDictionaryRef bounds_ref = reinterpret_cast<CFDictionaryRef>(
+ CFDictionaryGetValue(window, kCGWindowBounds));
+ CGRectMakeWithDictionaryRepresentation(bounds_ref, &rect);
+ }
+
+ CFRelease(window_id_array);
+ CFRelease(window_array);
+
+ rect.origin.x -= kBorderEffectSize;
+ rect.origin.y -= kBorderEffectSize;
+ rect.size.width += kBorderEffectSize * 2;
+ rect.size.height += kBorderEffectSize * 2;
+ // |rect| is in DIP, so convert to physical pixels.
+ return ScaleAndRoundCGRect(rect, dip_to_pixel_scale);
+}
+
+// Create an image of the given region using the given |window_list|.
+// |pixel_bounds| should be in the primary display's coordinate in physical
+// pixels. The caller should release the returned CGImageRef and CFDataRef.
+CGImageRef CreateExcludedWindowRegionImage(const DesktopRect& pixel_bounds,
+ float dip_to_pixel_scale,
+ CFArrayRef window_list) {
+ CGRect window_bounds;
+ // The origin is in DIP while the size is in physical pixels. That's what
+ // CGWindowListCreateImageFromArray expects.
+ window_bounds.origin.x = pixel_bounds.left() / dip_to_pixel_scale;
+ window_bounds.origin.y = pixel_bounds.top() / dip_to_pixel_scale;
+ window_bounds.size.width = pixel_bounds.width();
+ window_bounds.size.height = pixel_bounds.height();
+
+ return CGWindowListCreateImageFromArray(
+ window_bounds, window_list, kCGWindowImageDefault);
+}
+
+// A class to perform video frame capturing for mac.
+class ScreenCapturerMac : public DesktopCapturer {
+ public:
+ explicit ScreenCapturerMac(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> desktop_config_monitor,
+ bool detect_updated_region);
+ ~ScreenCapturerMac() override;
+
+ bool Init();
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ void SetExcludedWindow(WindowId window) override;
+ bool GetSourceList(SourceList* screens) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ // Returns false if the selected screen is no longer valid.
+ bool CgBlit(const DesktopFrame& frame, const DesktopRegion& region);
+
+ // Called when the screen configuration is changed.
+ void ScreenConfigurationChanged();
+
+ bool RegisterRefreshAndMoveHandlers();
+ void UnregisterRefreshAndMoveHandlers();
+
+ void ScreenRefresh(CGRectCount count,
+ const CGRect *rect_array,
+ DesktopVector display_origin);
+ void ReleaseBuffers();
+
+ std::unique_ptr<DesktopFrame> CreateFrame();
+
+ const bool detect_updated_region_;
+
+ Callback* callback_ = nullptr;
+
+ ScopedPixelBufferObject pixel_buffer_object_;
+
+ // Queue of the frames buffers.
+ ScreenCaptureFrameQueue<SharedDesktopFrame> queue_;
+
+ // Current display configuration.
+ MacDesktopConfiguration desktop_config_;
+
+ // Currently selected display, or 0 if the full desktop is selected. On OS X
+ // 10.6 and before, this is always 0.
+ CGDirectDisplayID current_display_ = 0;
+
+ // The physical pixel bounds of the current screen.
+ DesktopRect screen_pixel_bounds_;
+
+ // The dip to physical pixel scale of the current screen.
+ float dip_to_pixel_scale_ = 1.0f;
+
+ // A thread-safe list of invalid rectangles, and the size of the most
+ // recently captured screen.
+ ScreenCapturerHelper helper_;
+
+ // Contains an invalid region from the previous capture.
+ DesktopRegion last_invalid_region_;
+
+ // Monitoring display reconfiguration.
+ rtc::scoped_refptr<DesktopConfigurationMonitor> desktop_config_monitor_;
+
+ CGWindowID excluded_window_ = 0;
+
+ // A self-owned object that will destroy itself after ScreenCapturerMac and
+ // all display streams have been destroyed..
+ DisplayStreamManager* display_stream_manager_;
+
+ // Used to force CaptureFrame to update it's screen configuration
+ // and reregister event handlers. This ensure that this
+ // occurs on the ScreenCapture thread. Read and written from
+ // both the VideoCapture thread and ScreenCapture thread.
+ // Protected by desktop_config_monitor_.
+ bool update_screen_configuration_ = false;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerMac);
+};
+
+// DesktopFrame wrapper that flips wrapped frame upside down by inverting
+// stride.
+class InvertedDesktopFrame : public DesktopFrame {
+ public:
+ InvertedDesktopFrame(std::unique_ptr<DesktopFrame> frame)
+ : DesktopFrame(
+ frame->size(),
+ -frame->stride(),
+ frame->data() + (frame->size().height() - 1) * frame->stride(),
+ frame->shared_memory()) {
+ original_frame_ = std::move(frame);
+ MoveFrameInfoFrom(original_frame_.get());
+ }
+ ~InvertedDesktopFrame() override {}
+
+ private:
+ std::unique_ptr<DesktopFrame> original_frame_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(InvertedDesktopFrame);
+};
+
+ScreenCapturerMac::ScreenCapturerMac(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> desktop_config_monitor,
+ bool detect_updated_region)
+ : detect_updated_region_(detect_updated_region),
+ desktop_config_monitor_(desktop_config_monitor) {
+ display_stream_manager_ = new DisplayStreamManager;
+}
+
+ScreenCapturerMac::~ScreenCapturerMac() {
+ ReleaseBuffers();
+ UnregisterRefreshAndMoveHandlers();
+ display_stream_manager_->PrepareForSelfDestruction();
+}
+
+bool ScreenCapturerMac::Init() {
+ desktop_config_monitor_->Lock();
+ desktop_config_ = desktop_config_monitor_->desktop_configuration();
+ desktop_config_monitor_->Unlock();
+ if (!RegisterRefreshAndMoveHandlers()) {
+ return false;
+ }
+ ScreenConfigurationChanged();
+ return true;
+}
+
+void ScreenCapturerMac::ReleaseBuffers() {
+ // The buffers might be in use by the encoder, so don't delete them here.
+ // Instead, mark them as "needs update"; next time the buffers are used by
+ // the capturer, they will be recreated if necessary.
+ queue_.Reset();
+}
+
+void ScreenCapturerMac::Start(Callback* callback) {
+ assert(!callback_);
+ assert(callback);
+
+ callback_ = callback;
+ desktop_config_monitor_->Lock();
+ update_screen_configuration_ = true;
+ desktop_config_monitor_->Unlock();
+}
+
+void ScreenCapturerMac::CaptureFrame() {
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ // Spin RunLoop for 1/100th of a second, handling at most one source
+ CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.01, true);
+
+ queue_.MoveToNextFrame();
+ RTC_DCHECK(!queue_.current_frame() || !queue_.current_frame()->IsShared());
+
+ desktop_config_monitor_->Lock();
+ MacDesktopConfiguration new_config =
+ desktop_config_monitor_->desktop_configuration();
+ if (update_screen_configuration_ || !desktop_config_.Equals(new_config)) {
+ update_screen_configuration_ = false;
+ desktop_config_ = new_config;
+ // If the display configuraiton has changed then refresh capturer data
+ // structures. Occasionally, the refresh and move handlers are lost when
+ // the screen mode changes, so re-register them here.
+ UnregisterRefreshAndMoveHandlers();
+ RegisterRefreshAndMoveHandlers();
+ ScreenConfigurationChanged();
+ }
+
+ DesktopRegion region;
+ helper_.TakeInvalidRegion(&region);
+
+ // If the current buffer is from an older generation then allocate a new one.
+ // Note that we can't reallocate other buffers at this point, since the caller
+ // may still be reading from them.
+ if (!queue_.current_frame())
+ queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(CreateFrame()));
+
+ DesktopFrame* current_frame = queue_.current_frame();
+
+ if (!CgBlit(*current_frame, region)) {
+ desktop_config_monitor_->Unlock();
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+ std::unique_ptr<DesktopFrame> new_frame = queue_.current_frame()->Share();
+ if (detect_updated_region_) {
+ *new_frame->mutable_updated_region() = region;
+ } else {
+ new_frame->mutable_updated_region()->AddRect(
+ DesktopRect::MakeSize(new_frame->size()));
+ }
+
+ if (current_display_) {
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(current_display_);
+ if (config) {
+ new_frame->set_top_left(config->bounds.top_left().subtract(
+ desktop_config_.bounds.top_left()));
+ }
+ }
+
+ helper_.set_size_most_recent(new_frame->size());
+
+ // Signal that we are done capturing data from the display framebuffer,
+ // and accessing display structures.
+ desktop_config_monitor_->Unlock();
+
+ new_frame->set_capture_time_ms((rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec);
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(new_frame));
+}
+
+void ScreenCapturerMac::SetExcludedWindow(WindowId window) {
+ excluded_window_ = window;
+}
+
+bool ScreenCapturerMac::GetSourceList(SourceList* screens) {
+ assert(screens->size() == 0);
+
+ for (MacDisplayConfigurations::iterator it = desktop_config_.displays.begin();
+ it != desktop_config_.displays.end(); ++it) {
+ screens->push_back({it->id});
+ }
+ return true;
+}
+
+bool ScreenCapturerMac::SelectSource(SourceId id) {
+ if (id == kFullDesktopScreenId) {
+ current_display_ = 0;
+ } else {
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(
+ static_cast<CGDirectDisplayID>(id));
+ if (!config)
+ return false;
+ current_display_ = config->id;
+ }
+
+ ScreenConfigurationChanged();
+ return true;
+}
+
+bool ScreenCapturerMac::CgBlit(const DesktopFrame& frame, const DesktopRegion& region) {
+ // Copy the entire contents of the previous capture buffer, to capture over.
+ // TODO(wez): Get rid of this as per crbug.com/145064, or implement
+ // crbug.com/92354.
+ if (queue_.previous_frame()) {
+ memcpy(frame.data(), queue_.previous_frame()->data(),
+ frame.stride() * frame.size().height());
+ }
+
+ MacDisplayConfigurations displays_to_capture;
+ if (current_display_) {
+ // Capturing a single screen. Note that the screen id may change when
+ // screens are added or removed.
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(current_display_);
+ if (config) {
+ displays_to_capture.push_back(*config);
+ } else {
+ RTC_LOG(LS_ERROR) << "The selected screen cannot be found for capturing.";
+ return false;
+ }
+ } else {
+ // Capturing the whole desktop.
+ displays_to_capture = desktop_config_.displays;
+ }
+
+ // Create the window list once for all displays.
+ CFArrayRef window_list = CreateWindowListWithExclusion(excluded_window_);
+
+ for (size_t i = 0; i < displays_to_capture.size(); ++i) {
+ const MacDisplayConfiguration& display_config = displays_to_capture[i];
+
+ // Capturing mixed-DPI on one surface is hard, so we only return displays
+ // that match the "primary" display's DPI. The primary display is always
+ // the first in the list.
+ if (i > 0 && display_config.dip_to_pixel_scale !=
+ displays_to_capture[0].dip_to_pixel_scale) {
+ continue;
+ }
+ // Determine the display's position relative to the desktop, in pixels.
+ DesktopRect display_bounds = display_config.pixel_bounds;
+ display_bounds.Translate(-screen_pixel_bounds_.left(),
+ -screen_pixel_bounds_.top());
+
+ // Determine which parts of the blit region, if any, lay within the monitor.
+ DesktopRegion copy_region = region;
+ copy_region.IntersectWith(display_bounds);
+ if (copy_region.is_empty())
+ continue;
+
+ // Translate the region to be copied into display-relative coordinates.
+ copy_region.Translate(-display_bounds.left(), -display_bounds.top());
+
+ DesktopRect excluded_window_bounds;
+ CGImageRef excluded_image = nullptr;
+ if (excluded_window_ && window_list) {
+ // Get the region of the excluded window relative the primary display.
+ excluded_window_bounds = GetExcludedWindowPixelBounds(
+ excluded_window_, display_config.dip_to_pixel_scale);
+ excluded_window_bounds.IntersectWith(display_config.pixel_bounds);
+
+ // Create the image under the excluded window first, because it's faster
+ // than captuing the whole display.
+ if (!excluded_window_bounds.is_empty()) {
+ excluded_image = CreateExcludedWindowRegionImage(
+ excluded_window_bounds, display_config.dip_to_pixel_scale,
+ window_list);
+ }
+ }
+
+ // Create an image containing a snapshot of the display.
+ CGImageRef image = CGDisplayCreateImage(display_config.id);
+ if (!image) {
+ if (excluded_image)
+ CFRelease(excluded_image);
+ continue;
+ }
+
+ // Verify that the image has 32-bit depth.
+ int bits_per_pixel = CGImageGetBitsPerPixel(image);
+ if (bits_per_pixel / 8 != DesktopFrame::kBytesPerPixel) {
+ RTC_LOG(LS_ERROR) << "CGDisplayCreateImage() returned imaged with " << bits_per_pixel
+ << " bits per pixel. Only 32-bit depth is supported.";
+ CFRelease(image);
+ if (excluded_image)
+ CFRelease(excluded_image);
+ return false;
+ }
+
+ // Request access to the raw pixel data via the image's DataProvider.
+ CGDataProviderRef provider = CGImageGetDataProvider(image);
+ CFDataRef data = CGDataProviderCopyData(provider);
+ assert(data);
+
+ const uint8_t* display_base_address = CFDataGetBytePtr(data);
+ int src_bytes_per_row = CGImageGetBytesPerRow(image);
+
+ // |image| size may be different from display_bounds in case the screen was
+ // resized recently.
+ copy_region.IntersectWith(
+ DesktopRect::MakeWH(CGImageGetWidth(image), CGImageGetHeight(image)));
+
+ // Copy the dirty region from the display buffer into our desktop buffer.
+ uint8_t* out_ptr = frame.GetFrameDataAtPos(display_bounds.top_left());
+ for (DesktopRegion::Iterator i(copy_region); !i.IsAtEnd(); i.Advance()) {
+ CopyRect(display_base_address, src_bytes_per_row, out_ptr, frame.stride(),
+ DesktopFrame::kBytesPerPixel, i.rect());
+ }
+
+ CFRelease(data);
+ CFRelease(image);
+
+ if (excluded_image) {
+ CGDataProviderRef provider = CGImageGetDataProvider(excluded_image);
+ CFDataRef excluded_image_data = CGDataProviderCopyData(provider);
+ assert(excluded_image_data);
+ display_base_address = CFDataGetBytePtr(excluded_image_data);
+ src_bytes_per_row = CGImageGetBytesPerRow(excluded_image);
+
+ // Translate the bounds relative to the desktop, because |frame| data
+ // starts from the desktop top-left corner.
+ DesktopRect window_bounds_relative_to_desktop(excluded_window_bounds);
+ window_bounds_relative_to_desktop.Translate(-screen_pixel_bounds_.left(),
+ -screen_pixel_bounds_.top());
+
+ DesktopRect rect_to_copy =
+ DesktopRect::MakeSize(excluded_window_bounds.size());
+ rect_to_copy.IntersectWith(DesktopRect::MakeWH(
+ CGImageGetWidth(excluded_image), CGImageGetHeight(excluded_image)));
+
+ if (CGImageGetBitsPerPixel(excluded_image) / 8 ==
+ DesktopFrame::kBytesPerPixel) {
+ CopyRect(display_base_address, src_bytes_per_row,
+ frame.GetFrameDataAtPos(
+ window_bounds_relative_to_desktop.top_left()),
+ frame.stride(), DesktopFrame::kBytesPerPixel, rect_to_copy);
+ }
+
+ CFRelease(excluded_image_data);
+ CFRelease(excluded_image);
+ }
+ }
+ if (window_list)
+ CFRelease(window_list);
+ return true;
+}
+
+void ScreenCapturerMac::ScreenConfigurationChanged() {
+ if (current_display_) {
+ const MacDisplayConfiguration* config =
+ desktop_config_.FindDisplayConfigurationById(current_display_);
+ screen_pixel_bounds_ = config ? config->pixel_bounds : DesktopRect();
+ dip_to_pixel_scale_ = config ? config->dip_to_pixel_scale : 1.0f;
+ } else {
+ screen_pixel_bounds_ = desktop_config_.pixel_bounds;
+ dip_to_pixel_scale_ = desktop_config_.dip_to_pixel_scale;
+ }
+
+ // Release existing buffers, which will be of the wrong size.
+ ReleaseBuffers();
+
+ // Clear the dirty region, in case the display is down-sizing.
+ helper_.ClearInvalidRegion();
+
+ // Re-mark the entire desktop as dirty.
+ helper_.InvalidateScreen(screen_pixel_bounds_.size());
+
+ // Make sure the frame buffers will be reallocated.
+ queue_.Reset();
+}
+
+bool ScreenCapturerMac::RegisterRefreshAndMoveHandlers() {
+ desktop_config_ = desktop_config_monitor_->desktop_configuration();
+ for (const auto& config : desktop_config_.displays) {
+ size_t pixel_width = config.pixel_bounds.width();
+ size_t pixel_height = config.pixel_bounds.height();
+ if (pixel_width == 0 || pixel_height == 0)
+ continue;
+ // Using a local variable forces the block to capture the raw pointer.
+ DisplayStreamManager* manager = display_stream_manager_;
+ int unique_id = manager->GetUniqueId();
+ CGDirectDisplayID display_id = config.id;
+ DesktopVector display_origin = config.pixel_bounds.top_left();
+
+ CGDisplayStreamFrameAvailableHandler handler =
+ ^(CGDisplayStreamFrameStatus status, uint64_t display_time,
+ IOSurfaceRef frame_surface, CGDisplayStreamUpdateRef updateRef) {
+ if (status == kCGDisplayStreamFrameStatusStopped) {
+ manager->DestroyStream(unique_id);
+ return;
+ }
+
+ if (manager->ShouldIgnoreUpdates())
+ return;
+
+ // Only pay attention to frame updates.
+ if (status != kCGDisplayStreamFrameStatusFrameComplete)
+ return;
+
+ size_t count = 0;
+ const CGRect* rects = CGDisplayStreamUpdateGetRects(
+ updateRef, kCGDisplayStreamUpdateDirtyRects, &count);
+ if (count != 0) {
+ // According to CGDisplayStream.h, it's safe to call
+ // CGDisplayStreamStop() from within the callback.
+ ScreenRefresh(count, rects, display_origin);
+ }
+ };
+ CGDisplayStreamRef display_stream = CGDisplayStreamCreate(
+ display_id, pixel_width, pixel_height, 'BGRA', nullptr, handler);
+
+ if (display_stream) {
+ CGError error = CGDisplayStreamStart(display_stream);
+ if (error != kCGErrorSuccess)
+ return false;
+
+ CFRunLoopSourceRef source =
+ CGDisplayStreamGetRunLoopSource(display_stream);
+ CFRunLoopAddSource(CFRunLoopGetCurrent(), source, kCFRunLoopCommonModes);
+ display_stream_manager_->SaveStream(unique_id, display_stream);
+ }
+ }
+
+ return true;
+}
+
+void ScreenCapturerMac::UnregisterRefreshAndMoveHandlers() {
+ display_stream_manager_->UnregisterActiveStreams();
+}
+
+void ScreenCapturerMac::ScreenRefresh(CGRectCount count,
+ const CGRect* rect_array,
+ DesktopVector display_origin) {
+ if (screen_pixel_bounds_.is_empty())
+ ScreenConfigurationChanged();
+
+ // The refresh rects are in display coordinates. We want to translate to
+ // framebuffer coordinates. If a specific display is being captured, then no
+ // change is necessary. If all displays are being captured, then we want to
+ // translate by the origin of the display.
+ DesktopVector translate_vector;
+ if (!current_display_)
+ translate_vector = display_origin;
+
+ DesktopRegion region;
+ for (CGRectCount i = 0; i < count; ++i) {
+ // All rects are already in physical pixel coordinates.
+ DesktopRect rect = DesktopRect::MakeXYWH(
+ rect_array[i].origin.x, rect_array[i].origin.y,
+ rect_array[i].size.width, rect_array[i].size.height);
+
+ rect.Translate(translate_vector);
+
+ region.AddRect(rect);
+ }
+
+ helper_.InvalidateRegion(region);
+}
+
+std::unique_ptr<DesktopFrame> ScreenCapturerMac::CreateFrame() {
+ std::unique_ptr<DesktopFrame> frame(
+ new BasicDesktopFrame(screen_pixel_bounds_.size()));
+ frame->set_dpi(DesktopVector(kStandardDPI * dip_to_pixel_scale_,
+ kStandardDPI * dip_to_pixel_scale_));
+ return frame;
+}
+
+} // namespace
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ if (!options.configuration_monitor())
+ return nullptr;
+
+ std::unique_ptr<ScreenCapturerMac> capturer(new ScreenCapturerMac(
+ options.configuration_monitor(), options.detect_updated_region()));
+ if (!capturer.get()->Init()) {
+ return nullptr;
+ }
+
+ return capturer;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac_unittest.cc
new file mode 100644
index 0000000000..96e844066a
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_mac_unittest.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <ApplicationServices/ApplicationServices.h>
+
+#include <memory>
+#include <ostream>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "modules/desktop_capture/mock_desktop_capturer_callback.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::Return;
+
+namespace webrtc {
+
+class ScreenCapturerMacTest : public ::testing::Test {
+ public:
+ // Verifies that the whole screen is initially dirty.
+ void CaptureDoneCallback1(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame);
+
+ // Verifies that a rectangle explicitly marked as dirty is propagated
+ // correctly.
+ void CaptureDoneCallback2(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame);
+
+ protected:
+ void SetUp() override {
+ capturer_ = DesktopCapturer::CreateScreenCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ }
+
+ std::unique_ptr<DesktopCapturer> capturer_;
+ MockDesktopCapturerCallback callback_;
+};
+
+void ScreenCapturerMacTest::CaptureDoneCallback1(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame) {
+ EXPECT_EQ(result, DesktopCapturer::Result::SUCCESS);
+
+ MacDesktopConfiguration config = MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::BottomLeftOrigin);
+
+ // Verify that the region contains full frame.
+ DesktopRegion::Iterator it((*frame)->updated_region());
+ EXPECT_TRUE(!it.IsAtEnd() && it.rect().equals(config.pixel_bounds));
+}
+
+void ScreenCapturerMacTest::CaptureDoneCallback2(
+ DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame>* frame) {
+ EXPECT_EQ(result, DesktopCapturer::Result::SUCCESS);
+
+ MacDesktopConfiguration config = MacDesktopConfiguration::GetCurrent(
+ MacDesktopConfiguration::BottomLeftOrigin);
+ int width = config.pixel_bounds.width();
+ int height = config.pixel_bounds.height();
+
+ EXPECT_EQ(width, (*frame)->size().width());
+ EXPECT_EQ(height, (*frame)->size().height());
+ EXPECT_TRUE((*frame)->data() != NULL);
+ // Depending on the capture method, the screen may be flipped or not, so
+ // the stride may be positive or negative.
+ EXPECT_EQ(static_cast<int>(sizeof(uint32_t) * width),
+ abs((*frame)->stride()));
+}
+
+TEST_F(ScreenCapturerMacTest, Capture) {
+ EXPECT_CALL(callback_,
+ OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS, _))
+ .Times(2)
+ .WillOnce(Invoke(this, &ScreenCapturerMacTest::CaptureDoneCallback1))
+ .WillOnce(Invoke(this, &ScreenCapturerMacTest::CaptureDoneCallback2));
+
+ SCOPED_TRACE("");
+ capturer_->Start(&callback_);
+
+ // Check that we get an initial full-screen updated.
+ capturer_->CaptureFrame();
+
+ // Check that subsequent dirty rects are propagated correctly.
+ capturer_->CaptureFrame();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_null.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_null.cc
new file mode 100644
index 0000000000..6b1ccb322e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_null.cc
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capturer.h"
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_unittest.cc
new file mode 100644
index 0000000000..8f5fe631f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_unittest.cc
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mock_desktop_capturer_callback.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+#if defined(WEBRTC_WIN)
+#include "modules/desktop_capture/win/screen_capturer_win_directx.h"
+#endif // defined(WEBRTC_WIN)
+
+using ::testing::_;
+
+const int kTestSharedMemoryId = 123;
+
+namespace webrtc {
+
+class ScreenCapturerTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ capturer_ = DesktopCapturer::CreateScreenCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ ASSERT_TRUE(capturer_);
+ }
+
+ protected:
+#if defined(WEBRTC_WIN)
+ // Enable allow_directx_capturer in DesktopCaptureOptions, but let
+ // DesktopCapturer::CreateScreenCapturer to decide whether a DirectX capturer
+ // should be used.
+ void MaybeCreateDirectxCapturer() {
+ DesktopCaptureOptions options(DesktopCaptureOptions::CreateDefault());
+ options.set_allow_directx_capturer(true);
+ capturer_ = DesktopCapturer::CreateScreenCapturer(options);
+ }
+
+ bool CreateDirectxCapturer() {
+ if (!ScreenCapturerWinDirectx::IsSupported()) {
+ RTC_LOG(LS_WARNING) << "Directx capturer is not supported";
+ return false;
+ }
+
+ MaybeCreateDirectxCapturer();
+ return true;
+ }
+
+ void CreateMagnifierCapturer() {
+ DesktopCaptureOptions options(DesktopCaptureOptions::CreateDefault());
+ options.set_allow_use_magnification_api(true);
+ capturer_ = DesktopCapturer::CreateScreenCapturer(options);
+ }
+#endif // defined(WEBRTC_WIN)
+
+ std::unique_ptr<DesktopCapturer> capturer_;
+ MockDesktopCapturerCallback callback_;
+};
+
+class FakeSharedMemory : public SharedMemory {
+ public:
+ FakeSharedMemory(char* buffer, size_t size)
+ : SharedMemory(buffer, size, 0, kTestSharedMemoryId), buffer_(buffer) {}
+ ~FakeSharedMemory() override { delete[] buffer_; }
+
+ FakeSharedMemory(const FakeSharedMemory&) = delete;
+ FakeSharedMemory& operator=(const FakeSharedMemory&) = delete;
+
+ private:
+ char* buffer_;
+};
+
+class FakeSharedMemoryFactory : public SharedMemoryFactory {
+ public:
+ FakeSharedMemoryFactory() {}
+ ~FakeSharedMemoryFactory() override {}
+
+ FakeSharedMemoryFactory(const FakeSharedMemoryFactory&) = delete;
+ FakeSharedMemoryFactory& operator=(const FakeSharedMemoryFactory&) = delete;
+
+ std::unique_ptr<SharedMemory> CreateSharedMemory(size_t size) override {
+ return std::unique_ptr<SharedMemory>(
+ new FakeSharedMemory(new char[size], size));
+ }
+};
+
+ACTION_P(SaveUniquePtrArg, dest) {
+ *dest = std::move(*arg1);
+}
+
+// TODO(bugs.webrtc.org/12950): Re-enable when libc++ issue is fixed.
+#if defined(WEBRTC_LINUX) && defined(MEMORY_SANITIZER)
+#define MAYBE_GetScreenListAndSelectScreen DISABLED_GetScreenListAndSelectScreen
+#else
+#define MAYBE_GetScreenListAndSelectScreen GetScreenListAndSelectScreen
+#endif
+TEST_F(ScreenCapturerTest, MAYBE_GetScreenListAndSelectScreen) {
+ webrtc::DesktopCapturer::SourceList screens;
+ EXPECT_TRUE(capturer_->GetSourceList(&screens));
+ for (const auto& screen : screens) {
+ EXPECT_TRUE(capturer_->SelectSource(screen.id));
+ }
+}
+
+// Flaky on Linux. See: crbug.com/webrtc/7830
+#if defined(WEBRTC_LINUX)
+#define MAYBE_StartCapturer DISABLED_StartCaptuerer
+#else
+#define MAYBE_StartCapturer StartCapturer
+#endif
+TEST_F(ScreenCapturerTest, MAYBE_StartCapturer) {
+ capturer_->Start(&callback_);
+}
+
+#if defined(WEBRTC_LINUX)
+#define MAYBE_Capture DISABLED_Capture
+#else
+#define MAYBE_Capture Capture
+#endif
+TEST_F(ScreenCapturerTest, MAYBE_Capture) {
+ // Assume that Start() treats the screen as invalid initially.
+ std::unique_ptr<DesktopFrame> frame;
+ EXPECT_CALL(callback_,
+ OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS, _))
+ .WillOnce(SaveUniquePtrArg(&frame));
+
+ capturer_->Start(&callback_);
+ capturer_->CaptureFrame();
+
+ ASSERT_TRUE(frame);
+ EXPECT_GT(frame->size().width(), 0);
+ EXPECT_GT(frame->size().height(), 0);
+ EXPECT_GE(frame->stride(),
+ frame->size().width() * DesktopFrame::kBytesPerPixel);
+ EXPECT_TRUE(frame->shared_memory() == NULL);
+
+ // Verify that the region contains whole screen.
+ EXPECT_FALSE(frame->updated_region().is_empty());
+ DesktopRegion::Iterator it(frame->updated_region());
+ ASSERT_TRUE(!it.IsAtEnd());
+ EXPECT_TRUE(it.rect().equals(DesktopRect::MakeSize(frame->size())));
+ it.Advance();
+ EXPECT_TRUE(it.IsAtEnd());
+}
+
+#if defined(WEBRTC_WIN)
+
+TEST_F(ScreenCapturerTest, UseSharedBuffers) {
+ std::unique_ptr<DesktopFrame> frame;
+ EXPECT_CALL(callback_,
+ OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS, _))
+ .WillOnce(SaveUniquePtrArg(&frame));
+
+ capturer_->Start(&callback_);
+ capturer_->SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory>(new FakeSharedMemoryFactory()));
+ capturer_->CaptureFrame();
+
+ ASSERT_TRUE(frame);
+ ASSERT_TRUE(frame->shared_memory());
+ EXPECT_EQ(frame->shared_memory()->id(), kTestSharedMemoryId);
+}
+
+TEST_F(ScreenCapturerTest, UseMagnifier) {
+ CreateMagnifierCapturer();
+ std::unique_ptr<DesktopFrame> frame;
+ EXPECT_CALL(callback_,
+ OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS, _))
+ .WillOnce(SaveUniquePtrArg(&frame));
+
+ capturer_->Start(&callback_);
+ capturer_->CaptureFrame();
+ ASSERT_TRUE(frame);
+}
+
+TEST_F(ScreenCapturerTest, UseDirectxCapturer) {
+ if (!CreateDirectxCapturer()) {
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> frame;
+ EXPECT_CALL(callback_,
+ OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS, _))
+ .WillOnce(SaveUniquePtrArg(&frame));
+
+ capturer_->Start(&callback_);
+ capturer_->CaptureFrame();
+ ASSERT_TRUE(frame);
+}
+
+TEST_F(ScreenCapturerTest, UseDirectxCapturerWithSharedBuffers) {
+ if (!CreateDirectxCapturer()) {
+ return;
+ }
+
+ std::unique_ptr<DesktopFrame> frame;
+ EXPECT_CALL(callback_,
+ OnCaptureResultPtr(DesktopCapturer::Result::SUCCESS, _))
+ .WillOnce(SaveUniquePtrArg(&frame));
+
+ capturer_->Start(&callback_);
+ capturer_->SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory>(new FakeSharedMemoryFactory()));
+ capturer_->CaptureFrame();
+ ASSERT_TRUE(frame);
+ ASSERT_TRUE(frame->shared_memory());
+ EXPECT_EQ(frame->shared_memory()->id(), kTestSharedMemoryId);
+}
+
+#endif // defined(WEBRTC_WIN)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_capturer_win.cc b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_win.cc
new file mode 100644
index 0000000000..b5935dc316
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_capturer_win.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/fallback_desktop_capturer_wrapper.h"
+#include "modules/desktop_capture/rgba_color.h"
+#include "modules/desktop_capture/win/screen_capturer_win_directx.h"
+#include "modules/desktop_capture/win/screen_capturer_win_gdi.h"
+#include "modules/desktop_capture/win/screen_capturer_win_magnifier.h"
+
+namespace webrtc {
+
+namespace {
+
+std::unique_ptr<DesktopCapturer> CreateScreenCapturerWinDirectx() {
+ std::unique_ptr<DesktopCapturer> capturer(new ScreenCapturerWinDirectx());
+ capturer.reset(new BlankDetectorDesktopCapturerWrapper(
+ std::move(capturer), RgbaColor(0, 0, 0, 0)));
+ return capturer;
+}
+
+} // namespace
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ std::unique_ptr<DesktopCapturer> capturer(new ScreenCapturerWinGdi(options));
+ if (options.allow_directx_capturer()) {
+ // `dxgi_duplicator_controller` should be alive in this scope to ensure it
+ // won't unload DxgiDuplicatorController.
+ auto dxgi_duplicator_controller = DxgiDuplicatorController::Instance();
+ if (ScreenCapturerWinDirectx::IsSupported()) {
+ capturer.reset(new FallbackDesktopCapturerWrapper(
+ CreateScreenCapturerWinDirectx(), std::move(capturer)));
+ }
+ }
+
+ if (options.allow_use_magnification_api()) {
+ // ScreenCapturerWinMagnifier cannot work on Windows XP or earlier, as well
+ // as 64-bit only Windows, and it may randomly crash on multi-screen
+ // systems. So we may need to fallback to use original capturer.
+ capturer.reset(new FallbackDesktopCapturerWrapper(
+ std::unique_ptr<DesktopCapturer>(new ScreenCapturerWinMagnifier()),
+ std::move(capturer)));
+ }
+
+ return capturer;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer.cc b/third_party/libwebrtc/modules/desktop_capture/screen_drawer.cc
new file mode 100644
index 0000000000..6460f19f65
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/screen_drawer.h"
+
+namespace webrtc {
+
+namespace {
+std::unique_ptr<ScreenDrawerLock> g_screen_drawer_lock;
+} // namespace
+
+ScreenDrawerLock::ScreenDrawerLock() = default;
+ScreenDrawerLock::~ScreenDrawerLock() = default;
+
+ScreenDrawer::ScreenDrawer() {
+ g_screen_drawer_lock = ScreenDrawerLock::Create();
+}
+
+ScreenDrawer::~ScreenDrawer() {
+ g_screen_drawer_lock.reset();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer.h b/third_party/libwebrtc/modules/desktop_capture/screen_drawer.h
new file mode 100644
index 0000000000..ad7c0ad8d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SCREEN_DRAWER_H_
+#define MODULES_DESKTOP_CAPTURE_SCREEN_DRAWER_H_
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/rgba_color.h"
+
+namespace webrtc {
+
+// A cross-process lock to ensure only one ScreenDrawer can be used at a certain
+// time.
+class ScreenDrawerLock {
+ public:
+ virtual ~ScreenDrawerLock();
+
+ static std::unique_ptr<ScreenDrawerLock> Create();
+
+ protected:
+ ScreenDrawerLock();
+};
+
+// A set of basic platform dependent functions to draw various shapes on the
+// screen.
+class ScreenDrawer {
+ public:
+ // Creates a ScreenDrawer for the current platform, returns nullptr if no
+ // ScreenDrawer implementation available.
+ // If the implementation cannot guarantee two ScreenDrawer instances won't
+ // impact each other, this function may block current thread until another
+ // ScreenDrawer has been destroyed.
+ static std::unique_ptr<ScreenDrawer> Create();
+
+ ScreenDrawer();
+ virtual ~ScreenDrawer();
+
+ // Returns the region inside which DrawRectangle() function are expected to
+ // work, in capturer coordinates (assuming ScreenCapturer::SelectScreen has
+ // not been called). This region may exclude regions of the screen reserved by
+ // the OS for things like menu bars or app launchers. The DesktopRect is in
+ // system coordinate, i.e. the primary monitor always starts from (0, 0).
+ virtual DesktopRect DrawableRegion() = 0;
+
+ // Draws a rectangle to cover `rect` with `color`. Note, rect.bottom() and
+ // rect.right() two lines are not included. The part of `rect` which is out of
+ // DrawableRegion() will be ignored.
+ virtual void DrawRectangle(DesktopRect rect, RgbaColor color) = 0;
+
+ // Clears all content on the screen by filling the area with black.
+ virtual void Clear() = 0;
+
+ // Blocks current thread until OS finishes previous DrawRectangle() actions.
+ // ScreenCapturer should be able to capture the changes after this function
+ // finish.
+ virtual void WaitForPendingDraws() = 0;
+
+ // Returns true if incomplete shapes previous actions required may be drawn on
+ // the screen after a WaitForPendingDraws() call. i.e. Though the complete
+ // shapes will eventually be drawn on the screen, due to some OS limitations,
+ // these shapes may be partially appeared sometimes.
+ virtual bool MayDrawIncompleteShapes() = 0;
+
+ // Returns the id of the drawer window. This function returns kNullWindowId if
+ // the implementation does not draw on a window of the system.
+ virtual WindowId window_id() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SCREEN_DRAWER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer_linux.cc b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_linux.cc
new file mode 100644
index 0000000000..fce036b4aa
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_linux.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+#include <string.h>
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#include "modules/desktop_capture/rgba_color.h"
+#include "modules/desktop_capture/screen_drawer.h"
+#include "modules/desktop_capture/screen_drawer_lock_posix.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+namespace {
+
+// A ScreenDrawer implementation for X11.
+class ScreenDrawerLinux : public ScreenDrawer {
+ public:
+ ScreenDrawerLinux();
+ ~ScreenDrawerLinux() override;
+
+ // ScreenDrawer interface.
+ DesktopRect DrawableRegion() override;
+ void DrawRectangle(DesktopRect rect, RgbaColor color) override;
+ void Clear() override;
+ void WaitForPendingDraws() override;
+ bool MayDrawIncompleteShapes() override;
+ WindowId window_id() const override;
+
+ private:
+ // Bring the window to the front, this can help to avoid the impact from other
+ // windows or shadow effect.
+ void BringToFront();
+
+ rtc::scoped_refptr<SharedXDisplay> display_;
+ int screen_num_;
+ DesktopRect rect_;
+ Window window_;
+ GC context_;
+ Colormap colormap_;
+};
+
+ScreenDrawerLinux::ScreenDrawerLinux() {
+ display_ = SharedXDisplay::CreateDefault();
+ RTC_CHECK(display_.get());
+ screen_num_ = DefaultScreen(display_->display());
+ XWindowAttributes root_attributes;
+ if (!XGetWindowAttributes(display_->display(),
+ RootWindow(display_->display(), screen_num_),
+ &root_attributes)) {
+ RTC_DCHECK_NOTREACHED() << "Failed to get root window size.";
+ }
+ window_ = XCreateSimpleWindow(
+ display_->display(), RootWindow(display_->display(), screen_num_), 0, 0,
+ root_attributes.width, root_attributes.height, 0,
+ BlackPixel(display_->display(), screen_num_),
+ BlackPixel(display_->display(), screen_num_));
+ XSelectInput(display_->display(), window_, StructureNotifyMask);
+ XMapWindow(display_->display(), window_);
+ while (true) {
+ XEvent event;
+ XNextEvent(display_->display(), &event);
+ if (event.type == MapNotify) {
+ break;
+ }
+ }
+ XFlush(display_->display());
+ Window child;
+ int x, y;
+ if (!XTranslateCoordinates(display_->display(), window_,
+ RootWindow(display_->display(), screen_num_), 0, 0,
+ &x, &y, &child)) {
+ RTC_DCHECK_NOTREACHED() << "Failed to get window position.";
+ }
+ // Some window manager does not allow a window to cover two or more monitors.
+ // So if the window is on the first monitor of a two-monitor system, the
+ // second half won't be able to show up without changing configurations of WM,
+ // and its DrawableRegion() is not accurate.
+ rect_ = DesktopRect::MakeLTRB(x, y, root_attributes.width,
+ root_attributes.height);
+ context_ = DefaultGC(display_->display(), screen_num_);
+ colormap_ = DefaultColormap(display_->display(), screen_num_);
+ BringToFront();
+ // Wait for window animations.
+ SleepMs(200);
+}
+
+ScreenDrawerLinux::~ScreenDrawerLinux() {
+ XUnmapWindow(display_->display(), window_);
+ XDestroyWindow(display_->display(), window_);
+}
+
+DesktopRect ScreenDrawerLinux::DrawableRegion() {
+ return rect_;
+}
+
+void ScreenDrawerLinux::DrawRectangle(DesktopRect rect, RgbaColor color) {
+ rect.Translate(-rect_.left(), -rect_.top());
+ XColor xcolor;
+ // X11 does not support Alpha.
+ // X11 uses 16 bits for each primary color, so we need to slightly normalize
+ // a 8 bits channel to 16 bits channel, by setting the low 8 bits as its high
+ // 8 bits to avoid a mismatch of color returned by capturer.
+ xcolor.red = (color.red << 8) + color.red;
+ xcolor.green = (color.green << 8) + color.green;
+ xcolor.blue = (color.blue << 8) + color.blue;
+ xcolor.flags = DoRed | DoGreen | DoBlue;
+ XAllocColor(display_->display(), colormap_, &xcolor);
+ XSetForeground(display_->display(), context_, xcolor.pixel);
+ XFillRectangle(display_->display(), window_, context_, rect.left(),
+ rect.top(), rect.width(), rect.height());
+ XFlush(display_->display());
+}
+
+void ScreenDrawerLinux::Clear() {
+ DrawRectangle(rect_, RgbaColor(0, 0, 0));
+}
+
+// TODO(zijiehe): Find the right signal from X11 to indicate the finish of all
+// pending paintings.
+void ScreenDrawerLinux::WaitForPendingDraws() {
+ SleepMs(50);
+}
+
+bool ScreenDrawerLinux::MayDrawIncompleteShapes() {
+ return true;
+}
+
+WindowId ScreenDrawerLinux::window_id() const {
+ return window_;
+}
+
+void ScreenDrawerLinux::BringToFront() {
+ Atom state_above = XInternAtom(display_->display(), "_NET_WM_STATE_ABOVE", 1);
+ Atom window_state = XInternAtom(display_->display(), "_NET_WM_STATE", 1);
+ if (state_above == None || window_state == None) {
+ // Fallback to use XRaiseWindow, it's not reliable if two windows are both
+ // raise itself to the top.
+ XRaiseWindow(display_->display(), window_);
+ return;
+ }
+
+ XEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = ClientMessage;
+ event.xclient.window = window_;
+ event.xclient.message_type = window_state;
+ event.xclient.format = 32;
+ event.xclient.data.l[0] = 1; // _NET_WM_STATE_ADD
+ event.xclient.data.l[1] = state_above;
+ XSendEvent(display_->display(), RootWindow(display_->display(), screen_num_),
+ False, SubstructureRedirectMask | SubstructureNotifyMask, &event);
+}
+
+} // namespace
+
+// static
+std::unique_ptr<ScreenDrawerLock> ScreenDrawerLock::Create() {
+ return std::make_unique<ScreenDrawerLockPosix>();
+}
+
+// static
+std::unique_ptr<ScreenDrawer> ScreenDrawer::Create() {
+ if (SharedXDisplay::CreateDefault().get()) {
+ return std::make_unique<ScreenDrawerLinux>();
+ }
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.cc b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.cc
new file mode 100644
index 0000000000..28cb501fe7
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/screen_drawer_lock_posix.h"
+
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// A uuid as the name of semaphore.
+static constexpr char kSemaphoreName[] = "GSDL54fe5552804711e6a7253f429a";
+
+} // namespace
+
+ScreenDrawerLockPosix::ScreenDrawerLockPosix()
+ : ScreenDrawerLockPosix(kSemaphoreName) {}
+
+ScreenDrawerLockPosix::ScreenDrawerLockPosix(const char* name) {
+ semaphore_ = sem_open(name, O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO, 1);
+ if (semaphore_ == SEM_FAILED) {
+ RTC_LOG_ERRNO(LS_ERROR) << "Failed to create named semaphore with " << name;
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ sem_wait(semaphore_);
+}
+
+ScreenDrawerLockPosix::~ScreenDrawerLockPosix() {
+ if (semaphore_ == SEM_FAILED) {
+ return;
+ }
+
+ sem_post(semaphore_);
+ sem_close(semaphore_);
+ // sem_unlink a named semaphore won't wait until other clients to release the
+ // sem_t. So if a new process starts, it will sem_open a different kernel
+ // object with the same name and eventually breaks the cross-process lock.
+}
+
+// static
+void ScreenDrawerLockPosix::Unlink(absl::string_view name) {
+ sem_unlink(std::string(name).c_str());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.h b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.h
new file mode 100644
index 0000000000..13899b2d75
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_lock_posix.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SCREEN_DRAWER_LOCK_POSIX_H_
+#define MODULES_DESKTOP_CAPTURE_SCREEN_DRAWER_LOCK_POSIX_H_
+
+#include <semaphore.h>
+
+#include "absl/strings/string_view.h"
+#include "modules/desktop_capture/screen_drawer.h"
+
+namespace webrtc {
+
+class ScreenDrawerLockPosix final : public ScreenDrawerLock {
+ public:
+ ScreenDrawerLockPosix();
+ // Provides a name other than the default one for test only.
+ explicit ScreenDrawerLockPosix(const char* name);
+ ~ScreenDrawerLockPosix() override;
+
+ // Unlinks the named semaphore actively. This will remove the sem_t object in
+ // the system and allow others to create a different sem_t object with the
+ // same/ name.
+ static void Unlink(absl::string_view name);
+
+ private:
+ sem_t* semaphore_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SCREEN_DRAWER_LOCK_POSIX_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer_mac.cc b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_mac.cc
new file mode 100644
index 0000000000..17719e4439
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_mac.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// TODO(zijiehe): Implement ScreenDrawerMac
+
+#include <memory>
+
+#include "modules/desktop_capture/screen_drawer.h"
+#include "modules/desktop_capture/screen_drawer_lock_posix.h"
+
+namespace webrtc {
+
+// static
+std::unique_ptr<ScreenDrawerLock> ScreenDrawerLock::Create() {
+ return std::make_unique<ScreenDrawerLockPosix>();
+}
+
+// static
+std::unique_ptr<ScreenDrawer> ScreenDrawer::Create() {
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_unittest.cc
new file mode 100644
index 0000000000..584770dbf8
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_unittest.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/screen_drawer.h"
+
+#include <stdint.h>
+
+#include <atomic>
+#include <memory>
+
+#include "api/function_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+#if defined(WEBRTC_POSIX)
+#include "modules/desktop_capture/screen_drawer_lock_posix.h"
+#endif
+
+namespace webrtc {
+
+namespace {
+
+void TestScreenDrawerLock(
+ rtc::FunctionView<std::unique_ptr<ScreenDrawerLock>()> ctor) {
+ constexpr int kLockDurationMs = 100;
+
+ std::atomic<bool> created(false);
+ std::atomic<bool> ready(false);
+
+ class Task {
+ public:
+ Task(std::atomic<bool>* created,
+ const std::atomic<bool>& ready,
+ rtc::FunctionView<std::unique_ptr<ScreenDrawerLock>()> ctor)
+ : created_(created), ready_(ready), ctor_(ctor) {}
+
+ ~Task() = default;
+
+ void RunTask() {
+ std::unique_ptr<ScreenDrawerLock> lock = ctor_();
+ ASSERT_TRUE(!!lock);
+ created_->store(true);
+ // Wait for the main thread to get the signal of created_.
+ while (!ready_.load()) {
+ SleepMs(1);
+ }
+ // At this point, main thread should begin to create a second lock. Though
+ // it's still possible the second lock won't be created before the
+ // following sleep has been finished, the possibility will be
+ // significantly reduced.
+ const int64_t current_ms = rtc::TimeMillis();
+ // SleepMs() may return early. See
+ // https://cs.chromium.org/chromium/src/third_party/webrtc/system_wrappers/include/sleep.h?rcl=4a604c80cecce18aff6fc5e16296d04675312d83&l=20
+ // But we need to ensure at least 100 ms has been passed before unlocking
+ // `lock`.
+ while (rtc::TimeMillis() - current_ms < kLockDurationMs) {
+ SleepMs(kLockDurationMs - (rtc::TimeMillis() - current_ms));
+ }
+ }
+
+ private:
+ std::atomic<bool>* const created_;
+ const std::atomic<bool>& ready_;
+ const rtc::FunctionView<std::unique_ptr<ScreenDrawerLock>()> ctor_;
+ } task(&created, ready, ctor);
+
+ auto lock_thread = rtc::PlatformThread::SpawnJoinable(
+ [&task] { task.RunTask(); }, "lock_thread");
+
+ // Wait for the first lock in Task::RunTask() to be created.
+ // TODO(zijiehe): Find a better solution to wait for the creation of the first
+ // lock. See
+ // https://chromium-review.googlesource.com/c/607688/13/webrtc/modules/desktop_capture/screen_drawer_unittest.cc
+ while (!created.load()) {
+ SleepMs(1);
+ }
+
+ const int64_t start_ms = rtc::TimeMillis();
+ ready.store(true);
+ // This is unlikely to fail, but just in case current thread is too laggy and
+ // cause the SleepMs() in RunTask() to finish before we creating another lock.
+ ASSERT_GT(kLockDurationMs, rtc::TimeMillis() - start_ms);
+ ctor();
+ ASSERT_LE(kLockDurationMs, rtc::TimeMillis() - start_ms);
+}
+
+} // namespace
+
+// These are a set of manual test cases, as we do not have an automatical way to
+// detect whether a ScreenDrawer on a certain platform works well without
+// ScreenCapturer(s). So you may execute these test cases with
+// --gtest_also_run_disabled_tests --gtest_filter=ScreenDrawerTest.*.
+TEST(ScreenDrawerTest, DISABLED_DrawRectangles) {
+ std::unique_ptr<ScreenDrawer> drawer = ScreenDrawer::Create();
+ if (!drawer) {
+ RTC_LOG(LS_WARNING)
+ << "No ScreenDrawer implementation for current platform.";
+ return;
+ }
+
+ if (drawer->DrawableRegion().is_empty()) {
+ RTC_LOG(LS_WARNING)
+ << "ScreenDrawer of current platform does not provide a "
+ "non-empty DrawableRegion().";
+ return;
+ }
+
+ DesktopRect rect = drawer->DrawableRegion();
+ Random random(rtc::TimeMicros());
+ for (int i = 0; i < 100; i++) {
+ // Make sure we at least draw one pixel.
+ int left = random.Rand(rect.left(), rect.right() - 2);
+ int top = random.Rand(rect.top(), rect.bottom() - 2);
+ drawer->DrawRectangle(
+ DesktopRect::MakeLTRB(left, top, random.Rand(left + 1, rect.right()),
+ random.Rand(top + 1, rect.bottom())),
+ RgbaColor(random.Rand<uint8_t>(), random.Rand<uint8_t>(),
+ random.Rand<uint8_t>(), random.Rand<uint8_t>()));
+
+ if (i == 50) {
+ SleepMs(10000);
+ }
+ }
+
+ SleepMs(10000);
+}
+
+#if defined(THREAD_SANITIZER) // bugs.webrtc.org/10019
+#define MAYBE_TwoScreenDrawerLocks DISABLED_TwoScreenDrawerLocks
+#else
+#define MAYBE_TwoScreenDrawerLocks TwoScreenDrawerLocks
+#endif
+TEST(ScreenDrawerTest, MAYBE_TwoScreenDrawerLocks) {
+#if defined(WEBRTC_POSIX)
+ // ScreenDrawerLockPosix won't be able to unlink the named semaphore. So use a
+ // different semaphore name here to avoid deadlock.
+ const char* semaphore_name = "GSDL8784541a812011e788ff67427b";
+ ScreenDrawerLockPosix::Unlink(semaphore_name);
+
+ TestScreenDrawerLock([semaphore_name]() {
+ return std::make_unique<ScreenDrawerLockPosix>(semaphore_name);
+ });
+#elif defined(WEBRTC_WIN)
+ TestScreenDrawerLock([]() { return ScreenDrawerLock::Create(); });
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/screen_drawer_win.cc b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_win.cc
new file mode 100644
index 0000000000..7cf634fe89
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/screen_drawer_win.cc
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <windows.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/screen_drawer.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+namespace {
+
+static constexpr TCHAR kMutexName[] =
+ TEXT("Local\\ScreenDrawerWin-da834f82-8044-11e6-ac81-73dcdd1c1869");
+
+class ScreenDrawerLockWin : public ScreenDrawerLock {
+ public:
+ ScreenDrawerLockWin();
+ ~ScreenDrawerLockWin() override;
+
+ private:
+ HANDLE mutex_;
+};
+
+ScreenDrawerLockWin::ScreenDrawerLockWin() {
+ while (true) {
+ mutex_ = CreateMutex(NULL, FALSE, kMutexName);
+ if (GetLastError() != ERROR_ALREADY_EXISTS && mutex_ != NULL) {
+ break;
+ } else {
+ if (mutex_) {
+ CloseHandle(mutex_);
+ }
+ SleepMs(1000);
+ }
+ }
+}
+
+ScreenDrawerLockWin::~ScreenDrawerLockWin() {
+ CloseHandle(mutex_);
+}
+
+DesktopRect GetScreenRect() {
+ HDC hdc = GetDC(NULL);
+ DesktopRect rect = DesktopRect::MakeWH(GetDeviceCaps(hdc, HORZRES),
+ GetDeviceCaps(hdc, VERTRES));
+ ReleaseDC(NULL, hdc);
+ return rect;
+}
+
+HWND CreateDrawerWindow(DesktopRect rect) {
+ HWND hwnd = CreateWindowA(
+ "STATIC", "DrawerWindow", WS_POPUPWINDOW | WS_VISIBLE, rect.left(),
+ rect.top(), rect.width(), rect.height(), NULL, NULL, NULL, NULL);
+ SetForegroundWindow(hwnd);
+ return hwnd;
+}
+
+COLORREF ColorToRef(RgbaColor color) {
+ // Windows device context does not support alpha.
+ return RGB(color.red, color.green, color.blue);
+}
+
+// A ScreenDrawer implementation for Windows.
+class ScreenDrawerWin : public ScreenDrawer {
+ public:
+ ScreenDrawerWin();
+ ~ScreenDrawerWin() override;
+
+ // ScreenDrawer interface.
+ DesktopRect DrawableRegion() override;
+ void DrawRectangle(DesktopRect rect, RgbaColor color) override;
+ void Clear() override;
+ void WaitForPendingDraws() override;
+ bool MayDrawIncompleteShapes() override;
+ WindowId window_id() const override;
+
+ private:
+ // Bring the window to the front, this can help to avoid the impact from other
+ // windows or shadow effects.
+ void BringToFront();
+
+ // Draw a line with `color`.
+ void DrawLine(DesktopVector start, DesktopVector end, RgbaColor color);
+
+ // Draw a dot with `color`.
+ void DrawDot(DesktopVector vect, RgbaColor color);
+
+ const DesktopRect rect_;
+ HWND window_;
+ HDC hdc_;
+};
+
+ScreenDrawerWin::ScreenDrawerWin()
+ : ScreenDrawer(),
+ rect_(GetScreenRect()),
+ window_(CreateDrawerWindow(rect_)),
+ hdc_(GetWindowDC(window_)) {
+ // We do not need to handle any messages for the `window_`, so disable Windows
+ // from processing windows ghosting feature.
+ DisableProcessWindowsGhosting();
+
+ // Always use stock pen (DC_PEN) and brush (DC_BRUSH).
+ SelectObject(hdc_, GetStockObject(DC_PEN));
+ SelectObject(hdc_, GetStockObject(DC_BRUSH));
+ BringToFront();
+}
+
+ScreenDrawerWin::~ScreenDrawerWin() {
+ ReleaseDC(NULL, hdc_);
+ DestroyWindow(window_);
+ // Unfortunately there is no EnableProcessWindowsGhosting() API.
+}
+
+DesktopRect ScreenDrawerWin::DrawableRegion() {
+ return rect_;
+}
+
+void ScreenDrawerWin::DrawRectangle(DesktopRect rect, RgbaColor color) {
+ if (rect.width() == 1 && rect.height() == 1) {
+ // Rectangle function cannot draw a 1 pixel rectangle.
+ DrawDot(rect.top_left(), color);
+ return;
+ }
+
+ if (rect.width() == 1 || rect.height() == 1) {
+ // Rectangle function cannot draw a 1 pixel rectangle.
+ DrawLine(rect.top_left(), DesktopVector(rect.right(), rect.bottom()),
+ color);
+ return;
+ }
+
+ SetDCBrushColor(hdc_, ColorToRef(color));
+ SetDCPenColor(hdc_, ColorToRef(color));
+ Rectangle(hdc_, rect.left(), rect.top(), rect.right(), rect.bottom());
+}
+
+void ScreenDrawerWin::Clear() {
+ DrawRectangle(rect_, RgbaColor(0, 0, 0));
+}
+
+// TODO(zijiehe): Find the right signal to indicate the finish of all pending
+// paintings.
+void ScreenDrawerWin::WaitForPendingDraws() {
+ BringToFront();
+ SleepMs(50);
+}
+
+bool ScreenDrawerWin::MayDrawIncompleteShapes() {
+ return true;
+}
+
+WindowId ScreenDrawerWin::window_id() const {
+ return reinterpret_cast<WindowId>(window_);
+}
+
+void ScreenDrawerWin::DrawLine(DesktopVector start,
+ DesktopVector end,
+ RgbaColor color) {
+ POINT points[2];
+ points[0].x = start.x();
+ points[0].y = start.y();
+ points[1].x = end.x();
+ points[1].y = end.y();
+ SetDCPenColor(hdc_, ColorToRef(color));
+ Polyline(hdc_, points, 2);
+}
+
+void ScreenDrawerWin::DrawDot(DesktopVector vect, RgbaColor color) {
+ SetPixel(hdc_, vect.x(), vect.y(), ColorToRef(color));
+}
+
+void ScreenDrawerWin::BringToFront() {
+ if (SetWindowPos(window_, HWND_TOPMOST, 0, 0, 0, 0,
+ SWP_NOMOVE | SWP_NOSIZE) != FALSE) {
+ return;
+ }
+
+ long ex_style = GetWindowLong(window_, GWL_EXSTYLE);
+ ex_style |= WS_EX_TOPMOST;
+ if (SetWindowLong(window_, GWL_EXSTYLE, ex_style) != 0) {
+ return;
+ }
+
+ BringWindowToTop(window_);
+}
+
+} // namespace
+
+// static
+std::unique_ptr<ScreenDrawerLock> ScreenDrawerLock::Create() {
+ return std::unique_ptr<ScreenDrawerLock>(new ScreenDrawerLockWin());
+}
+
+// static
+std::unique_ptr<ScreenDrawer> ScreenDrawer::Create() {
+ return std::unique_ptr<ScreenDrawer>(new ScreenDrawerWin());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.cc b/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.cc
new file mode 100644
index 0000000000..e374038cbc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/shared_desktop_frame.h"
+
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+namespace webrtc {
+
+SharedDesktopFrame::~SharedDesktopFrame() {}
+
+// static
+std::unique_ptr<SharedDesktopFrame> SharedDesktopFrame::Wrap(
+ std::unique_ptr<DesktopFrame> desktop_frame) {
+ return std::unique_ptr<SharedDesktopFrame>(new SharedDesktopFrame(
+ rtc::scoped_refptr<Core>(new Core(std::move(desktop_frame)))));
+}
+
+SharedDesktopFrame* SharedDesktopFrame::Wrap(DesktopFrame* desktop_frame) {
+ return Wrap(std::unique_ptr<DesktopFrame>(desktop_frame)).release();
+}
+
+DesktopFrame* SharedDesktopFrame::GetUnderlyingFrame() {
+ return core_->get();
+}
+
+bool SharedDesktopFrame::ShareFrameWith(const SharedDesktopFrame& other) const {
+ return core_->get() == other.core_->get();
+}
+
+std::unique_ptr<SharedDesktopFrame> SharedDesktopFrame::Share() {
+ std::unique_ptr<SharedDesktopFrame> result(new SharedDesktopFrame(core_));
+ result->CopyFrameInfoFrom(*this);
+ return result;
+}
+
+bool SharedDesktopFrame::IsShared() {
+ return !core_->HasOneRef();
+}
+
+SharedDesktopFrame::SharedDesktopFrame(rtc::scoped_refptr<Core> core)
+ : DesktopFrame((*core)->size(),
+ (*core)->stride(),
+ (*core)->data(),
+ (*core)->shared_memory()),
+ core_(core) {
+ CopyFrameInfoFrom(*(core_->get()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.h b/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.h
new file mode 100644
index 0000000000..c6f52247f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/shared_desktop_frame.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SHARED_DESKTOP_FRAME_H_
+#define MODULES_DESKTOP_CAPTURE_SHARED_DESKTOP_FRAME_H_
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "rtc_base/ref_counted_object.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// SharedDesktopFrame is a DesktopFrame that may have multiple instances all
+// sharing the same buffer.
+class RTC_EXPORT SharedDesktopFrame final : public DesktopFrame {
+ public:
+ ~SharedDesktopFrame() override;
+
+ SharedDesktopFrame(const SharedDesktopFrame&) = delete;
+ SharedDesktopFrame& operator=(const SharedDesktopFrame&) = delete;
+
+ static std::unique_ptr<SharedDesktopFrame> Wrap(
+ std::unique_ptr<DesktopFrame> desktop_frame);
+
+ // Deprecated.
+ // TODO(sergeyu): remove this method.
+ static SharedDesktopFrame* Wrap(DesktopFrame* desktop_frame);
+
+ // Deprecated. Clients do not need to know the underlying DesktopFrame
+ // instance.
+ // TODO(zijiehe): Remove this method.
+ // Returns the underlying instance of DesktopFrame.
+ DesktopFrame* GetUnderlyingFrame();
+
+ // Returns whether `this` and `other` share the underlying DesktopFrame.
+ bool ShareFrameWith(const SharedDesktopFrame& other) const;
+
+ // Creates a clone of this object.
+ std::unique_ptr<SharedDesktopFrame> Share();
+
+ // Checks if the frame is currently shared. If it returns false it's
+ // guaranteed that there are no clones of the object.
+ bool IsShared();
+
+ private:
+ typedef rtc::FinalRefCountedObject<std::unique_ptr<DesktopFrame>> Core;
+
+ SharedDesktopFrame(rtc::scoped_refptr<Core> core);
+
+ const rtc::scoped_refptr<Core> core_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SHARED_DESKTOP_FRAME_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/shared_memory.cc b/third_party/libwebrtc/modules/desktop_capture/shared_memory.cc
new file mode 100644
index 0000000000..b4ff78b2a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/shared_memory.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/shared_memory.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_WIN)
+const SharedMemory::Handle SharedMemory::kInvalidHandle = NULL;
+#else
+const SharedMemory::Handle SharedMemory::kInvalidHandle = -1;
+#endif
+
+SharedMemory::SharedMemory(void* data, size_t size, Handle handle, int id)
+ : data_(data), size_(size), handle_(handle), id_(id) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/shared_memory.h b/third_party/libwebrtc/modules/desktop_capture/shared_memory.h
new file mode 100644
index 0000000000..a7add4447b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/shared_memory.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_SHARED_MEMORY_H_
+#define MODULES_DESKTOP_CAPTURE_SHARED_MEMORY_H_
+
+#include <stddef.h>
+
+#if defined(WEBRTC_WIN)
+// Forward declare HANDLE in a windows.h compatible way so that we can avoid
+// including windows.h.
+typedef void* HANDLE;
+#endif
+
+#include <memory>
+
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// SharedMemory is a base class for shared memory. It stores all required
+// parameters of the buffer, but doesn't have any logic to allocate or destroy
+// the actual buffer. DesktopCapturer consumers that need to use shared memory
+// for video frames must extend this class with creation and destruction logic
+// specific for the target platform and then call
+// DesktopCapturer::SetSharedMemoryFactory().
+class RTC_EXPORT SharedMemory {
+ public:
+#if defined(WEBRTC_WIN)
+ typedef HANDLE Handle;
+ static const Handle kInvalidHandle;
+#else
+ typedef int Handle;
+ static const Handle kInvalidHandle;
+#endif
+
+ void* data() const { return data_; }
+ size_t size() const { return size_; }
+
+ // Platform-specific handle of the buffer.
+ Handle handle() const { return handle_; }
+
+ // Integer identifier that can be used used by consumers of DesktopCapturer
+ // interface to identify shared memory buffers it created.
+ int id() const { return id_; }
+
+ virtual ~SharedMemory() {}
+
+ SharedMemory(const SharedMemory&) = delete;
+ SharedMemory& operator=(const SharedMemory&) = delete;
+
+ protected:
+ SharedMemory(void* data, size_t size, Handle handle, int id);
+
+ void* const data_;
+ const size_t size_;
+ const Handle handle_;
+ const int id_;
+};
+
+// Interface used to create SharedMemory instances.
+class SharedMemoryFactory {
+ public:
+ SharedMemoryFactory() {}
+ virtual ~SharedMemoryFactory() {}
+
+ SharedMemoryFactory(const SharedMemoryFactory&) = delete;
+ SharedMemoryFactory& operator=(const SharedMemoryFactory&) = delete;
+
+ virtual std::unique_ptr<SharedMemory> CreateSharedMemory(size_t size) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_SHARED_MEMORY_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/test_utils.cc b/third_party/libwebrtc/modules/desktop_capture/test_utils.cc
new file mode 100644
index 0000000000..9483bf41ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/test_utils.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/test_utils.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void ClearDesktopFrame(DesktopFrame* frame) {
+ RTC_DCHECK(frame);
+ uint8_t* data = frame->data();
+ for (int i = 0; i < frame->size().height(); i++) {
+ memset(data, 0, frame->size().width() * DesktopFrame::kBytesPerPixel);
+ data += frame->stride();
+ }
+}
+
+bool DesktopFrameDataEquals(const DesktopFrame& left,
+ const DesktopFrame& right) {
+ if (!left.size().equals(right.size())) {
+ return false;
+ }
+
+ const uint8_t* left_array = left.data();
+ const uint8_t* right_array = right.data();
+ for (int i = 0; i < left.size().height(); i++) {
+ if (memcmp(left_array, right_array,
+ DesktopFrame::kBytesPerPixel * left.size().width()) != 0) {
+ return false;
+ }
+ left_array += left.stride();
+ right_array += right.stride();
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/test_utils.h b/third_party/libwebrtc/modules/desktop_capture/test_utils.h
new file mode 100644
index 0000000000..8669fecba3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/test_utils.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_TEST_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_TEST_UTILS_H_
+
+#include "modules/desktop_capture/desktop_frame.h"
+
+namespace webrtc {
+
+// Clears a DesktopFrame `frame` by setting its data() into 0.
+void ClearDesktopFrame(DesktopFrame* frame);
+
+// Compares size() and data() of two DesktopFrames `left` and `right`.
+bool DesktopFrameDataEquals(const DesktopFrame& left,
+ const DesktopFrame& right);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_TEST_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/test_utils_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/test_utils_unittest.cc
new file mode 100644
index 0000000000..c1326f01cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/test_utils_unittest.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/test_utils.h"
+
+#include <stdint.h>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/rgba_color.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void PaintDesktopFrame(DesktopFrame* frame,
+ DesktopVector pos,
+ RgbaColor color) {
+ ASSERT_TRUE(frame);
+ ASSERT_TRUE(DesktopRect::MakeSize(frame->size()).Contains(pos));
+ *reinterpret_cast<uint32_t*>(frame->GetFrameDataAtPos(pos)) =
+ color.ToUInt32();
+}
+
+// A DesktopFrame implementation to store data in heap, but the stide is
+// doubled.
+class DoubleSizeDesktopFrame : public DesktopFrame {
+ public:
+ explicit DoubleSizeDesktopFrame(DesktopSize size);
+ ~DoubleSizeDesktopFrame() override;
+};
+
+DoubleSizeDesktopFrame::DoubleSizeDesktopFrame(DesktopSize size)
+ : DesktopFrame(
+ size,
+ kBytesPerPixel * size.width() * 2,
+ new uint8_t[kBytesPerPixel * size.width() * size.height() * 2],
+ nullptr) {}
+
+DoubleSizeDesktopFrame::~DoubleSizeDesktopFrame() {
+ delete[] data_;
+}
+
+} // namespace
+
+TEST(TestUtilsTest, BasicDataEqualsCases) {
+ BasicDesktopFrame frame(DesktopSize(4, 4));
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ PaintDesktopFrame(&frame, DesktopVector(i, j), RgbaColor(4U * j + i));
+ }
+ }
+
+ ASSERT_TRUE(DesktopFrameDataEquals(frame, frame));
+ BasicDesktopFrame other(DesktopSize(4, 4));
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ PaintDesktopFrame(&other, DesktopVector(i, j), RgbaColor(4U * j + i));
+ }
+ }
+ ASSERT_TRUE(DesktopFrameDataEquals(frame, other));
+ PaintDesktopFrame(&other, DesktopVector(2, 2), RgbaColor(0U));
+ ASSERT_FALSE(DesktopFrameDataEquals(frame, other));
+}
+
+TEST(TestUtilsTest, DifferentSizeShouldNotEqual) {
+ BasicDesktopFrame frame(DesktopSize(4, 4));
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ PaintDesktopFrame(&frame, DesktopVector(i, j), RgbaColor(4U * j + i));
+ }
+ }
+
+ BasicDesktopFrame other(DesktopSize(2, 8));
+ for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < 8; j++) {
+ PaintDesktopFrame(&other, DesktopVector(i, j), RgbaColor(2U * j + i));
+ }
+ }
+
+ ASSERT_FALSE(DesktopFrameDataEquals(frame, other));
+}
+
+TEST(TestUtilsTest, DifferentStrideShouldBeComparable) {
+ BasicDesktopFrame frame(DesktopSize(4, 4));
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ PaintDesktopFrame(&frame, DesktopVector(i, j), RgbaColor(4U * j + i));
+ }
+ }
+
+ ASSERT_TRUE(DesktopFrameDataEquals(frame, frame));
+ DoubleSizeDesktopFrame other(DesktopSize(4, 4));
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ PaintDesktopFrame(&other, DesktopVector(i, j), RgbaColor(4U * j + i));
+ }
+ }
+ ASSERT_TRUE(DesktopFrameDataEquals(frame, other));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor.cc b/third_party/libwebrtc/modules/desktop_capture/win/cursor.cc
new file mode 100644
index 0000000000..1d645098e2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor.cc
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/cursor.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/win/scoped_gdi_object.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+namespace {
+
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
+
+#define RGBA(r, g, b, a) \
+ ((((a) << 24) & 0xff000000) | (((b) << 16) & 0xff0000) | \
+ (((g) << 8) & 0xff00) | ((r)&0xff))
+
+#else // !defined(WEBRTC_ARCH_LITTLE_ENDIAN)
+
+#define RGBA(r, g, b, a) \
+ ((((r) << 24) & 0xff000000) | (((g) << 16) & 0xff0000) | \
+ (((b) << 8) & 0xff00) | ((a)&0xff))
+
+#endif // !defined(WEBRTC_ARCH_LITTLE_ENDIAN)
+
+const int kBytesPerPixel = DesktopFrame::kBytesPerPixel;
+
+// Pixel colors used when generating cursor outlines.
+const uint32_t kPixelRgbaBlack = RGBA(0, 0, 0, 0xff);
+const uint32_t kPixelRgbaWhite = RGBA(0xff, 0xff, 0xff, 0xff);
+const uint32_t kPixelRgbaTransparent = RGBA(0, 0, 0, 0);
+
+const uint32_t kPixelRgbWhite = RGB(0xff, 0xff, 0xff);
+
+// Expands the cursor shape to add a white outline for visibility against
+// dark backgrounds.
+void AddCursorOutline(int width, int height, uint32_t* data) {
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ // If this is a transparent pixel (bgr == 0 and alpha = 0), check the
+ // neighbor pixels to see if this should be changed to an outline pixel.
+ if (*data == kPixelRgbaTransparent) {
+ // Change to white pixel if any neighbors (top, bottom, left, right)
+ // are black.
+ if ((y > 0 && data[-width] == kPixelRgbaBlack) ||
+ (y < height - 1 && data[width] == kPixelRgbaBlack) ||
+ (x > 0 && data[-1] == kPixelRgbaBlack) ||
+ (x < width - 1 && data[1] == kPixelRgbaBlack)) {
+ *data = kPixelRgbaWhite;
+ }
+ }
+ data++;
+ }
+ }
+}
+
+// Premultiplies RGB components of the pixel data in the given image by
+// the corresponding alpha components.
+void AlphaMul(uint32_t* data, int width, int height) {
+ static_assert(sizeof(uint32_t) == kBytesPerPixel,
+ "size of uint32 should be the number of bytes per pixel");
+
+ for (uint32_t* data_end = data + width * height; data != data_end; ++data) {
+ RGBQUAD* from = reinterpret_cast<RGBQUAD*>(data);
+ RGBQUAD* to = reinterpret_cast<RGBQUAD*>(data);
+ to->rgbBlue =
+ (static_cast<uint16_t>(from->rgbBlue) * from->rgbReserved) / 0xff;
+ to->rgbGreen =
+ (static_cast<uint16_t>(from->rgbGreen) * from->rgbReserved) / 0xff;
+ to->rgbRed =
+ (static_cast<uint16_t>(from->rgbRed) * from->rgbReserved) / 0xff;
+ }
+}
+
+// Scans a 32bpp bitmap looking for any pixels with non-zero alpha component.
+// Returns true if non-zero alpha is found. `stride` is expressed in pixels.
+bool HasAlphaChannel(const uint32_t* data, int stride, int width, int height) {
+ const RGBQUAD* plane = reinterpret_cast<const RGBQUAD*>(data);
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ if (plane->rgbReserved != 0)
+ return true;
+ plane += 1;
+ }
+ plane += stride - width;
+ }
+
+ return false;
+}
+
+} // namespace
+
+MouseCursor* CreateMouseCursorFromHCursor(HDC dc, HCURSOR cursor) {
+ ICONINFO iinfo;
+ if (!GetIconInfo(cursor, &iinfo)) {
+ RTC_LOG_F(LS_ERROR) << "Unable to get cursor icon info. Error = "
+ << GetLastError();
+ return NULL;
+ }
+
+ int hotspot_x = iinfo.xHotspot;
+ int hotspot_y = iinfo.yHotspot;
+
+ // Make sure the bitmaps will be freed.
+ win::ScopedBitmap scoped_mask(iinfo.hbmMask);
+ win::ScopedBitmap scoped_color(iinfo.hbmColor);
+ bool is_color = iinfo.hbmColor != NULL;
+
+ // Get `scoped_mask` dimensions.
+ BITMAP bitmap_info;
+ if (!GetObject(scoped_mask, sizeof(bitmap_info), &bitmap_info)) {
+ RTC_LOG_F(LS_ERROR) << "Unable to get bitmap info. Error = "
+ << GetLastError();
+ return NULL;
+ }
+
+ int width = bitmap_info.bmWidth;
+ int height = bitmap_info.bmHeight;
+ std::unique_ptr<uint32_t[]> mask_data(new uint32_t[width * height]);
+
+ // Get pixel data from `scoped_mask` converting it to 32bpp along the way.
+ // GetDIBits() sets the alpha component of every pixel to 0.
+ BITMAPV5HEADER bmi = {0};
+ bmi.bV5Size = sizeof(bmi);
+ bmi.bV5Width = width;
+ bmi.bV5Height = -height; // request a top-down bitmap.
+ bmi.bV5Planes = 1;
+ bmi.bV5BitCount = kBytesPerPixel * 8;
+ bmi.bV5Compression = BI_RGB;
+ bmi.bV5AlphaMask = 0xff000000;
+ bmi.bV5CSType = LCS_WINDOWS_COLOR_SPACE;
+ bmi.bV5Intent = LCS_GM_BUSINESS;
+ if (!GetDIBits(dc, scoped_mask, 0, height, mask_data.get(),
+ reinterpret_cast<BITMAPINFO*>(&bmi), DIB_RGB_COLORS)) {
+ RTC_LOG_F(LS_ERROR) << "Unable to get bitmap bits. Error = "
+ << GetLastError();
+ return NULL;
+ }
+
+ uint32_t* mask_plane = mask_data.get();
+ std::unique_ptr<DesktopFrame> image(
+ new BasicDesktopFrame(DesktopSize(width, height)));
+ bool has_alpha = false;
+
+ if (is_color) {
+ image.reset(new BasicDesktopFrame(DesktopSize(width, height)));
+ // Get the pixels from the color bitmap.
+ if (!GetDIBits(dc, scoped_color, 0, height, image->data(),
+ reinterpret_cast<BITMAPINFO*>(&bmi), DIB_RGB_COLORS)) {
+ RTC_LOG_F(LS_ERROR) << "Unable to get bitmap bits. Error = "
+ << GetLastError();
+ return NULL;
+ }
+
+ // GetDIBits() does not provide any indication whether the bitmap has alpha
+ // channel, so we use HasAlphaChannel() below to find it out.
+ has_alpha = HasAlphaChannel(reinterpret_cast<uint32_t*>(image->data()),
+ width, width, height);
+ } else {
+ // For non-color cursors, the mask contains both an AND and an XOR mask and
+ // the height includes both. Thus, the width is correct, but we need to
+ // divide by 2 to get the correct mask height.
+ height /= 2;
+
+ image.reset(new BasicDesktopFrame(DesktopSize(width, height)));
+
+ // The XOR mask becomes the color bitmap.
+ memcpy(image->data(), mask_plane + (width * height),
+ image->stride() * height);
+ }
+
+ // Reconstruct transparency from the mask if the color image does not has
+ // alpha channel.
+ if (!has_alpha) {
+ bool add_outline = false;
+ uint32_t* dst = reinterpret_cast<uint32_t*>(image->data());
+ uint32_t* mask = mask_plane;
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ // The two bitmaps combine as follows:
+ // mask color Windows Result Our result RGB Alpha
+ // 0 00 Black Black 00 ff
+ // 0 ff White White ff ff
+ // 1 00 Screen Transparent 00 00
+ // 1 ff Reverse-screen Black 00 ff
+ //
+ // Since we don't support XOR cursors, we replace the "Reverse Screen"
+ // with black. In this case, we also add an outline around the cursor
+ // so that it is visible against a dark background.
+ if (*mask == kPixelRgbWhite) {
+ if (*dst != 0) {
+ add_outline = true;
+ *dst = kPixelRgbaBlack;
+ } else {
+ *dst = kPixelRgbaTransparent;
+ }
+ } else {
+ *dst = kPixelRgbaBlack ^ *dst;
+ }
+
+ ++dst;
+ ++mask;
+ }
+ }
+ if (add_outline) {
+ AddCursorOutline(width, height,
+ reinterpret_cast<uint32_t*>(image->data()));
+ }
+ }
+
+ // Pre-multiply the resulting pixels since MouseCursor uses premultiplied
+ // images.
+ AlphaMul(reinterpret_cast<uint32_t*>(image->data()), width, height);
+
+ return new MouseCursor(image.release(), DesktopVector(hotspot_x, hotspot_y));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor.h b/third_party/libwebrtc/modules/desktop_capture/win/cursor.h
new file mode 100644
index 0000000000..54d78164a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_CURSOR_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_CURSOR_H_
+
+#include <windows.h>
+
+namespace webrtc {
+
+class MouseCursor;
+
+// Converts an HCURSOR into a `MouseCursor` instance.
+MouseCursor* CreateMouseCursorFromHCursor(HDC dc, HCURSOR cursor);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_CURSOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_24bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_24bpp.cur
new file mode 100644
index 0000000000..27702b825c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_24bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_32bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_32bpp.cur
new file mode 100644
index 0000000000..7e0d8596da
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_32bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_8bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_8bpp.cur
new file mode 100644
index 0000000000..fefb09e1a1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/1_8bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_1bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_1bpp.cur
new file mode 100644
index 0000000000..4f8a094f31
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_1bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_32bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_32bpp.cur
new file mode 100644
index 0000000000..ac9cdbfbb3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/2_32bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_32bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_32bpp.cur
new file mode 100644
index 0000000000..efdbee5415
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_32bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_4bpp.cur b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_4bpp.cur
new file mode 100644
index 0000000000..9678d55446
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_test_data/3_4bpp.cur
Binary files differ
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest.cc
new file mode 100644
index 0000000000..23f5d89571
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/cursor.h"
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/win/cursor_unittest_resources.h"
+#include "modules/desktop_capture/win/scoped_gdi_object.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+namespace {
+
+// Loads `left` from resources, converts it to a `MouseCursor` instance and
+// compares pixels with `right`. Returns true of MouseCursor bits match `right`.
+// `right` must be a 32bpp cursor with alpha channel.
+bool ConvertToMouseShapeAndCompare(unsigned left, unsigned right) {
+ HMODULE instance = GetModuleHandle(NULL);
+
+ // Load `left` from the EXE module's resources.
+ win::ScopedCursor cursor(reinterpret_cast<HCURSOR>(
+ LoadImage(instance, MAKEINTRESOURCE(left), IMAGE_CURSOR, 0, 0, 0)));
+ EXPECT_TRUE(cursor != NULL);
+
+ // Convert `cursor` to `mouse_shape`.
+ HDC dc = GetDC(NULL);
+ std::unique_ptr<MouseCursor> mouse_shape(
+ CreateMouseCursorFromHCursor(dc, cursor));
+ ReleaseDC(NULL, dc);
+
+ EXPECT_TRUE(mouse_shape.get());
+
+ // Load `right`.
+ cursor.Set(reinterpret_cast<HCURSOR>(
+ LoadImage(instance, MAKEINTRESOURCE(right), IMAGE_CURSOR, 0, 0, 0)));
+
+ ICONINFO iinfo;
+ EXPECT_TRUE(GetIconInfo(cursor, &iinfo));
+ EXPECT_TRUE(iinfo.hbmColor);
+
+ // Make sure the bitmaps will be freed.
+ win::ScopedBitmap scoped_mask(iinfo.hbmMask);
+ win::ScopedBitmap scoped_color(iinfo.hbmColor);
+
+ // Get `scoped_color` dimensions.
+ BITMAP bitmap_info;
+ EXPECT_TRUE(GetObject(scoped_color, sizeof(bitmap_info), &bitmap_info));
+
+ int width = bitmap_info.bmWidth;
+ int height = bitmap_info.bmHeight;
+ EXPECT_TRUE(DesktopSize(width, height).equals(mouse_shape->image()->size()));
+
+ // Get the pixels from `scoped_color`.
+ int size = width * height;
+ std::unique_ptr<uint32_t[]> data(new uint32_t[size]);
+ EXPECT_TRUE(GetBitmapBits(scoped_color, size * sizeof(uint32_t), data.get()));
+
+ // Compare the 32bpp image in `mouse_shape` with the one loaded from `right`.
+ return memcmp(data.get(), mouse_shape->image()->data(),
+ size * sizeof(uint32_t)) == 0;
+}
+
+} // namespace
+
+TEST(MouseCursorTest, MatchCursors) {
+ EXPECT_TRUE(
+ ConvertToMouseShapeAndCompare(IDD_CURSOR1_24BPP, IDD_CURSOR1_32BPP));
+
+ EXPECT_TRUE(
+ ConvertToMouseShapeAndCompare(IDD_CURSOR1_8BPP, IDD_CURSOR1_32BPP));
+
+ EXPECT_TRUE(
+ ConvertToMouseShapeAndCompare(IDD_CURSOR2_1BPP, IDD_CURSOR2_32BPP));
+
+ EXPECT_TRUE(
+ ConvertToMouseShapeAndCompare(IDD_CURSOR3_4BPP, IDD_CURSOR3_32BPP));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.h b/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.h
new file mode 100644
index 0000000000..f583554d68
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_CURSOR_UNITTEST_RESOURCES_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_CURSOR_UNITTEST_RESOURCES_H_
+
+#define IDD_CURSOR1_24BPP 101
+#define IDD_CURSOR1_32BPP 102
+#define IDD_CURSOR1_8BPP 103
+
+#define IDD_CURSOR2_1BPP 104
+#define IDD_CURSOR2_32BPP 105
+
+#define IDD_CURSOR3_4BPP 106
+#define IDD_CURSOR3_32BPP 107
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_CURSOR_UNITTEST_RESOURCES_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.rc b/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.rc
new file mode 100644
index 0000000000..90073791c9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/cursor_unittest_resources.rc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/cursor_unittest_resources.h"
+
+// These cursors are matched with their less than 32bpp counterparts below.
+IDD_CURSOR1_32BPP CURSOR "cursor_test_data/1_32bpp.cur"
+IDD_CURSOR2_32BPP CURSOR "cursor_test_data/2_32bpp.cur"
+IDD_CURSOR3_32BPP CURSOR "cursor_test_data/3_32bpp.cur"
+
+// Matches IDD_CURSOR1_32BPP.
+IDD_CURSOR1_24BPP CURSOR "cursor_test_data/1_24bpp.cur"
+
+// Matches IDD_CURSOR1_32BPP.
+IDD_CURSOR1_8BPP CURSOR "cursor_test_data/1_8bpp.cur"
+
+// Matches IDD_CURSOR2_32BPP.
+IDD_CURSOR2_1BPP CURSOR "cursor_test_data/2_1bpp.cur"
+
+// Matches IDD_CURSOR3_32BPP.
+IDD_CURSOR3_4BPP CURSOR "cursor_test_data/3_4bpp.cur"
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.cc b/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.cc
new file mode 100644
index 0000000000..3d46117501
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/d3d_device.h"
+
+#include <utility>
+
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+using Microsoft::WRL::ComPtr;
+
+D3dDevice::D3dDevice() = default;
+D3dDevice::D3dDevice(const D3dDevice& other) = default;
+D3dDevice::D3dDevice(D3dDevice&& other) = default;
+D3dDevice::~D3dDevice() = default;
+
+bool D3dDevice::Initialize(const ComPtr<IDXGIAdapter>& adapter) {
+ dxgi_adapter_ = adapter;
+ if (!dxgi_adapter_) {
+ RTC_LOG(LS_WARNING) << "An empty IDXGIAdapter instance has been received.";
+ return false;
+ }
+
+ D3D_FEATURE_LEVEL feature_level;
+ // Default feature levels contain D3D 9.1 through D3D 11.0.
+ _com_error error = D3D11CreateDevice(
+ adapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr,
+ D3D11_CREATE_DEVICE_BGRA_SUPPORT | D3D11_CREATE_DEVICE_SINGLETHREADED,
+ nullptr, 0, D3D11_SDK_VERSION, d3d_device_.GetAddressOf(), &feature_level,
+ context_.GetAddressOf());
+ if (error.Error() != S_OK || !d3d_device_ || !context_) {
+ RTC_LOG(LS_WARNING) << "D3D11CreateDevice returned: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ if (feature_level < D3D_FEATURE_LEVEL_11_0) {
+ RTC_LOG(LS_WARNING)
+ << "D3D11CreateDevice returned an instance without DirectX 11 support, "
+ << "level " << feature_level << ". Following initialization may fail.";
+ // D3D_FEATURE_LEVEL_11_0 is not officially documented on MSDN to be a
+ // requirement of Dxgi duplicator APIs.
+ }
+
+ error = d3d_device_.As(&dxgi_device_);
+ if (error.Error() != S_OK || !dxgi_device_) {
+ RTC_LOG(LS_WARNING)
+ << "ID3D11Device is not an implementation of IDXGIDevice, "
+ << "this usually means the system does not support DirectX "
+ << "11. Error received: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ return true;
+}
+
+// static
+std::vector<D3dDevice> D3dDevice::EnumDevices() {
+ ComPtr<IDXGIFactory1> factory;
+ _com_error error =
+ CreateDXGIFactory1(__uuidof(IDXGIFactory1),
+ reinterpret_cast<void**>(factory.GetAddressOf()));
+ if (error.Error() != S_OK || !factory) {
+ RTC_LOG(LS_WARNING) << "Cannot create IDXGIFactory1: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return std::vector<D3dDevice>();
+ }
+
+ std::vector<D3dDevice> result;
+ for (int i = 0;; i++) {
+ ComPtr<IDXGIAdapter> adapter;
+ error = factory->EnumAdapters(i, adapter.GetAddressOf());
+ if (error.Error() == S_OK) {
+ D3dDevice device;
+ if (device.Initialize(adapter)) {
+ result.push_back(std::move(device));
+ }
+ } else if (error.Error() == DXGI_ERROR_NOT_FOUND) {
+ break;
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "IDXGIFactory1::EnumAdapters returned an unexpected error: "
+ << desktop_capture::utils::ComErrorToString(error);
+ }
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.h b/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.h
new file mode 100644
index 0000000000..aeb9d1823a
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/d3d_device.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_D3D_DEVICE_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_D3D_DEVICE_H_
+
+#include <comdef.h>
+#include <d3d11.h>
+#include <dxgi.h>
+#include <wrl/client.h>
+
+#include <vector>
+
+namespace webrtc {
+
+// A wrapper of ID3D11Device and its corresponding context and IDXGIAdapter.
+// This class represents one video card in the system.
+class D3dDevice {
+ public:
+ D3dDevice(const D3dDevice& other);
+ D3dDevice(D3dDevice&& other);
+ ~D3dDevice();
+
+ ID3D11Device* d3d_device() const { return d3d_device_.Get(); }
+
+ ID3D11DeviceContext* context() const { return context_.Get(); }
+
+ IDXGIDevice* dxgi_device() const { return dxgi_device_.Get(); }
+
+ IDXGIAdapter* dxgi_adapter() const { return dxgi_adapter_.Get(); }
+
+ // Returns all D3dDevice instances on the system. Returns an empty vector if
+ // anything wrong.
+ static std::vector<D3dDevice> EnumDevices();
+
+ private:
+ // Instances of D3dDevice should only be created by EnumDevices() static
+ // function.
+ D3dDevice();
+
+ // Initializes the D3dDevice from an IDXGIAdapter.
+ bool Initialize(const Microsoft::WRL::ComPtr<IDXGIAdapter>& adapter);
+
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d_device_;
+ Microsoft::WRL::ComPtr<ID3D11DeviceContext> context_;
+ Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device_;
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_D3D_DEVICE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/desktop.cc b/third_party/libwebrtc/modules/desktop_capture/win/desktop.cc
new file mode 100644
index 0000000000..4a671dd9ae
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/desktop.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/desktop.h"
+
+#include <vector>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+
+Desktop::Desktop(HDESK desktop, bool own) : desktop_(desktop), own_(own) {}
+
+Desktop::~Desktop() {
+ if (own_ && desktop_ != NULL) {
+ if (!::CloseDesktop(desktop_)) {
+ RTC_LOG(LS_ERROR) << "Failed to close the owned desktop handle: "
+ << GetLastError();
+ }
+ }
+}
+
+bool Desktop::GetName(std::wstring* desktop_name_out) const {
+ if (desktop_ == NULL)
+ return false;
+
+ DWORD length = 0;
+ int rv = GetUserObjectInformationW(desktop_, UOI_NAME, NULL, 0, &length);
+ if (rv || GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ abort();
+
+ length /= sizeof(WCHAR);
+ std::vector<WCHAR> buffer(length);
+ if (!GetUserObjectInformationW(desktop_, UOI_NAME, &buffer[0],
+ length * sizeof(WCHAR), &length)) {
+ RTC_LOG(LS_ERROR) << "Failed to query the desktop name: " << GetLastError();
+ return false;
+ }
+
+ desktop_name_out->assign(&buffer[0], length / sizeof(WCHAR));
+ return true;
+}
+
+bool Desktop::IsSame(const Desktop& other) const {
+ std::wstring name;
+ if (!GetName(&name))
+ return false;
+
+ std::wstring other_name;
+ if (!other.GetName(&other_name))
+ return false;
+
+ return name == other_name;
+}
+
+bool Desktop::SetThreadDesktop() const {
+ if (!::SetThreadDesktop(desktop_)) {
+ RTC_LOG(LS_ERROR) << "Failed to assign the desktop to the current thread: "
+ << GetLastError();
+ return false;
+ }
+
+ return true;
+}
+
+Desktop* Desktop::GetDesktop(const WCHAR* desktop_name) {
+ ACCESS_MASK desired_access = DESKTOP_CREATEMENU | DESKTOP_CREATEWINDOW |
+ DESKTOP_ENUMERATE | DESKTOP_HOOKCONTROL |
+ DESKTOP_WRITEOBJECTS | DESKTOP_READOBJECTS |
+ DESKTOP_SWITCHDESKTOP | GENERIC_WRITE;
+ HDESK desktop = OpenDesktopW(desktop_name, 0, FALSE, desired_access);
+ if (desktop == NULL) {
+ RTC_LOG(LS_ERROR) << "Failed to open the desktop '"
+ << rtc::ToUtf8(desktop_name) << "': " << GetLastError();
+ return NULL;
+ }
+
+ return new Desktop(desktop, true);
+}
+
+Desktop* Desktop::GetInputDesktop() {
+ HDESK desktop = OpenInputDesktop(
+ 0, FALSE, GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE);
+ if (desktop == NULL)
+ return NULL;
+
+ return new Desktop(desktop, true);
+}
+
+Desktop* Desktop::GetThreadDesktop() {
+ HDESK desktop = ::GetThreadDesktop(GetCurrentThreadId());
+ if (desktop == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to retrieve the handle of the desktop assigned to "
+ "the current thread: "
+ << GetLastError();
+ return NULL;
+ }
+
+ return new Desktop(desktop, false);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/desktop.h b/third_party/libwebrtc/modules/desktop_capture/win/desktop.h
new file mode 100644
index 0000000000..01bed8592d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/desktop.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_H_
+
+#include <windows.h>
+
+#include <string>
+
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class RTC_EXPORT Desktop {
+ public:
+ ~Desktop();
+
+ Desktop(const Desktop&) = delete;
+ Desktop& operator=(const Desktop&) = delete;
+
+ // Returns the name of the desktop represented by the object. Return false if
+ // quering the name failed for any reason.
+ bool GetName(std::wstring* desktop_name_out) const;
+
+ // Returns true if `other` has the same name as this desktop. Returns false
+ // in any other case including failing Win32 APIs and uninitialized desktop
+ // handles.
+ bool IsSame(const Desktop& other) const;
+
+ // Assigns the desktop to the current thread. Returns false is the operation
+ // failed for any reason.
+ bool SetThreadDesktop() const;
+
+ // Returns the desktop by its name or NULL if an error occurs.
+ static Desktop* GetDesktop(const wchar_t* desktop_name);
+
+ // Returns the desktop currently receiving user input or NULL if an error
+ // occurs.
+ static Desktop* GetInputDesktop();
+
+ // Returns the desktop currently assigned to the calling thread or NULL if
+ // an error occurs.
+ static Desktop* GetThreadDesktop();
+
+ private:
+ Desktop(HDESK desktop, bool own);
+
+ // The desktop handle.
+ HDESK desktop_;
+
+ // True if `desktop_` must be closed on teardown.
+ bool own_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.cc b/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.cc
new file mode 100644
index 0000000000..476ddc4aba
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace desktop_capture {
+namespace utils {
+
+// Generates a human-readable string from a COM error.
+std::string ComErrorToString(const _com_error& error) {
+ char buffer[1024];
+ rtc::SimpleStringBuilder string_builder(buffer);
+ // Use _bstr_t to simplify the wchar to char conversion for ErrorMessage().
+ _bstr_t error_message(error.ErrorMessage());
+ string_builder.AppendFormat("HRESULT: 0x%08X, Message: %s", error.Error(),
+ static_cast<const char*>(error_message));
+ return string_builder.str();
+}
+
+} // namespace utils
+} // namespace desktop_capture
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.h b/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.h
new file mode 100644
index 0000000000..ebf31419ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/desktop_capture_utils.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_CAPTURE_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_CAPTURE_UTILS_H_
+
+#include <comdef.h>
+
+#include <string>
+
+namespace webrtc {
+namespace desktop_capture {
+namespace utils {
+
+// Generates a human-readable string from a COM error.
+std::string ComErrorToString(const _com_error& error);
+
+} // namespace utils
+} // namespace desktop_capture
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_CAPTURE_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.cc b/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.cc
new file mode 100644
index 0000000000..52d89214d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/display_configuration_monitor.h"
+
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+
+namespace webrtc {
+
+bool DisplayConfigurationMonitor::IsChanged() {
+ DesktopRect rect = GetFullscreenRect();
+ if (!initialized_) {
+ initialized_ = true;
+ rect_ = rect;
+ return false;
+ }
+
+ if (rect.equals(rect_)) {
+ return false;
+ }
+
+ rect_ = rect;
+ return true;
+}
+
+void DisplayConfigurationMonitor::Reset() {
+ initialized_ = false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.h b/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.h
new file mode 100644
index 0000000000..39c211cfbe
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/display_configuration_monitor.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DISPLAY_CONFIGURATION_MONITOR_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DISPLAY_CONFIGURATION_MONITOR_H_
+
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+// A passive monitor to detect the change of display configuration on a Windows
+// system.
+// TODO(zijiehe): Also check for pixel format changes.
+class DisplayConfigurationMonitor {
+ public:
+ // Checks whether the change of display configuration has happened after last
+ // IsChanged() call. This function won't return true for the first time after
+ // constructor or Reset() call.
+ bool IsChanged();
+
+ // Resets to the initial state.
+ void Reset();
+
+ private:
+ DesktopRect rect_;
+ bool initialized_ = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DISPLAY_CONFIGURATION_MONITOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.cc
new file mode 100644
index 0000000000..88ec4e25bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_adapter_duplicator.h"
+
+#include <comdef.h>
+#include <dxgi.h>
+
+#include <algorithm>
+
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+using Microsoft::WRL::ComPtr;
+
+namespace {
+
+bool IsValidRect(const RECT& rect) {
+ return rect.right > rect.left && rect.bottom > rect.top;
+}
+
+} // namespace
+
+DxgiAdapterDuplicator::DxgiAdapterDuplicator(const D3dDevice& device)
+ : device_(device) {}
+DxgiAdapterDuplicator::DxgiAdapterDuplicator(DxgiAdapterDuplicator&&) = default;
+DxgiAdapterDuplicator::~DxgiAdapterDuplicator() = default;
+
+bool DxgiAdapterDuplicator::Initialize() {
+ if (DoInitialize()) {
+ return true;
+ }
+ duplicators_.clear();
+ return false;
+}
+
+bool DxgiAdapterDuplicator::DoInitialize() {
+ for (int i = 0;; i++) {
+ ComPtr<IDXGIOutput> output;
+ _com_error error =
+ device_.dxgi_adapter()->EnumOutputs(i, output.GetAddressOf());
+ if (error.Error() == DXGI_ERROR_NOT_FOUND) {
+ break;
+ }
+
+ if (error.Error() == DXGI_ERROR_NOT_CURRENTLY_AVAILABLE) {
+ RTC_LOG(LS_WARNING) << "IDXGIAdapter::EnumOutputs returned "
+ << "NOT_CURRENTLY_AVAILABLE. This may happen when "
+ << "running in session 0.";
+ break;
+ }
+
+ if (error.Error() != S_OK || !output) {
+ RTC_LOG(LS_WARNING) << "IDXGIAdapter::EnumOutputs returned an unexpected "
+ << "result: "
+ << desktop_capture::utils::ComErrorToString(error);
+ continue;
+ }
+
+ DXGI_OUTPUT_DESC desc;
+ error = output->GetDesc(&desc);
+ if (error.Error() == S_OK) {
+ if (desc.AttachedToDesktop && IsValidRect(desc.DesktopCoordinates)) {
+ ComPtr<IDXGIOutput1> output1;
+ error = output.As(&output1);
+ if (error.Error() != S_OK || !output1) {
+ RTC_LOG(LS_WARNING)
+ << "Failed to convert IDXGIOutput to IDXGIOutput1, this usually "
+ << "means the system does not support DirectX 11";
+ continue;
+ }
+ DxgiOutputDuplicator duplicator(device_, output1, desc);
+ if (!duplicator.Initialize()) {
+ RTC_LOG(LS_WARNING) << "Failed to initialize DxgiOutputDuplicator on "
+ << "output " << i;
+ continue;
+ }
+
+ duplicators_.push_back(std::move(duplicator));
+ desktop_rect_.UnionWith(duplicators_.back().desktop_rect());
+ } else {
+ RTC_LOG(LS_ERROR) << (desc.AttachedToDesktop ? "Attached" : "Detached")
+ << " output " << i << " ("
+ << desc.DesktopCoordinates.top << ", "
+ << desc.DesktopCoordinates.left << ") - ("
+ << desc.DesktopCoordinates.bottom << ", "
+ << desc.DesktopCoordinates.right << ") is ignored.";
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to get output description of device " << i
+ << ", ignore.";
+ }
+ }
+
+ if (duplicators_.empty()) {
+ RTC_LOG(LS_WARNING)
+ << "Cannot initialize any DxgiOutputDuplicator instance.";
+ }
+
+ return !duplicators_.empty();
+}
+
+void DxgiAdapterDuplicator::Setup(Context* context) {
+ RTC_DCHECK(context->contexts.empty());
+ context->contexts.resize(duplicators_.size());
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ duplicators_[i].Setup(&context->contexts[i]);
+ }
+}
+
+void DxgiAdapterDuplicator::Unregister(const Context* const context) {
+ RTC_DCHECK_EQ(context->contexts.size(), duplicators_.size());
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ duplicators_[i].Unregister(&context->contexts[i]);
+ }
+}
+
+bool DxgiAdapterDuplicator::Duplicate(Context* context,
+ SharedDesktopFrame* target) {
+ RTC_DCHECK_EQ(context->contexts.size(), duplicators_.size());
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ if (!duplicators_[i].Duplicate(&context->contexts[i],
+ duplicators_[i].desktop_rect().top_left(),
+ target)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool DxgiAdapterDuplicator::DuplicateMonitor(Context* context,
+ int monitor_id,
+ SharedDesktopFrame* target) {
+ RTC_DCHECK_GE(monitor_id, 0);
+ RTC_DCHECK_LT(monitor_id, duplicators_.size());
+ RTC_DCHECK_EQ(context->contexts.size(), duplicators_.size());
+ return duplicators_[monitor_id].Duplicate(&context->contexts[monitor_id],
+ DesktopVector(), target);
+}
+
+DesktopRect DxgiAdapterDuplicator::ScreenRect(int id) const {
+ RTC_DCHECK_GE(id, 0);
+ RTC_DCHECK_LT(id, duplicators_.size());
+ return duplicators_[id].desktop_rect();
+}
+
+const std::string& DxgiAdapterDuplicator::GetDeviceName(int id) const {
+ RTC_DCHECK_GE(id, 0);
+ RTC_DCHECK_LT(id, duplicators_.size());
+ return duplicators_[id].device_name();
+}
+
+int DxgiAdapterDuplicator::screen_count() const {
+ return static_cast<int>(duplicators_.size());
+}
+
+int64_t DxgiAdapterDuplicator::GetNumFramesCaptured() const {
+ int64_t min = INT64_MAX;
+ for (const auto& duplicator : duplicators_) {
+ min = std::min(min, duplicator.num_frames_captured());
+ }
+
+ return min;
+}
+
+void DxgiAdapterDuplicator::TranslateRect(const DesktopVector& position) {
+ desktop_rect_.Translate(position);
+ RTC_DCHECK_GE(desktop_rect_.left(), 0);
+ RTC_DCHECK_GE(desktop_rect_.top(), 0);
+ for (auto& duplicator : duplicators_) {
+ duplicator.TranslateRect(position);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.h
new file mode 100644
index 0000000000..5931b51f9e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_adapter_duplicator.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_ADAPTER_DUPLICATOR_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_ADAPTER_DUPLICATOR_H_
+
+#include <wrl/client.h>
+
+#include <vector>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/win/d3d_device.h"
+#include "modules/desktop_capture/win/dxgi_context.h"
+#include "modules/desktop_capture/win/dxgi_output_duplicator.h"
+
+namespace webrtc {
+
+// A container of DxgiOutputDuplicators to duplicate monitors attached to a
+// single video card.
+class DxgiAdapterDuplicator {
+ public:
+ using Context = DxgiAdapterContext;
+
+ // Creates an instance of DxgiAdapterDuplicator from a D3dDevice. Only
+ // DxgiDuplicatorController can create an instance.
+ explicit DxgiAdapterDuplicator(const D3dDevice& device);
+
+ // Move constructor, to make it possible to store instances of
+ // DxgiAdapterDuplicator in std::vector<>.
+ DxgiAdapterDuplicator(DxgiAdapterDuplicator&& other);
+
+ ~DxgiAdapterDuplicator();
+
+ // Initializes the DxgiAdapterDuplicator from a D3dDevice.
+ bool Initialize();
+
+ // Sequentially calls Duplicate function of all the DxgiOutputDuplicator
+ // instances owned by this instance, and writes into `target`.
+ bool Duplicate(Context* context, SharedDesktopFrame* target);
+
+ // Captures one monitor and writes into `target`. `monitor_id` should be
+ // between [0, screen_count()).
+ bool DuplicateMonitor(Context* context,
+ int monitor_id,
+ SharedDesktopFrame* target);
+
+ // Returns desktop rect covered by this DxgiAdapterDuplicator.
+ DesktopRect desktop_rect() const { return desktop_rect_; }
+
+ // Returns the size of one screen owned by this DxgiAdapterDuplicator. `id`
+ // should be between [0, screen_count()).
+ DesktopRect ScreenRect(int id) const;
+
+ // Returns the device name of one screen owned by this DxgiAdapterDuplicator
+ // in utf8 encoding. `id` should be between [0, screen_count()).
+ const std::string& GetDeviceName(int id) const;
+
+ // Returns the count of screens owned by this DxgiAdapterDuplicator. These
+ // screens can be retrieved by an interger in the range of
+ // [0, screen_count()).
+ int screen_count() const;
+
+ void Setup(Context* context);
+
+ void Unregister(const Context* const context);
+
+ // The minimum num_frames_captured() returned by `duplicators_`.
+ int64_t GetNumFramesCaptured() const;
+
+ // Moves `desktop_rect_` and all underlying `duplicators_`. See
+ // DxgiDuplicatorController::TranslateRect().
+ void TranslateRect(const DesktopVector& position);
+
+ private:
+ bool DoInitialize();
+
+ const D3dDevice device_;
+ std::vector<DxgiOutputDuplicator> duplicators_;
+ DesktopRect desktop_rect_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_ADAPTER_DUPLICATOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.cc
new file mode 100644
index 0000000000..c18b238f03
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_context.h"
+
+#include "modules/desktop_capture/win/dxgi_duplicator_controller.h"
+
+namespace webrtc {
+
+DxgiAdapterContext::DxgiAdapterContext() = default;
+DxgiAdapterContext::DxgiAdapterContext(const DxgiAdapterContext& context) =
+ default;
+DxgiAdapterContext::~DxgiAdapterContext() = default;
+
+DxgiFrameContext::DxgiFrameContext() = default;
+
+DxgiFrameContext::~DxgiFrameContext() {
+ Reset();
+}
+
+void DxgiFrameContext::Reset() {
+ DxgiDuplicatorController::Instance()->Unregister(this);
+ controller_id = 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.h
new file mode 100644
index 0000000000..59c2112db5
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_context.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_CONTEXT_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_CONTEXT_H_
+
+#include <vector>
+
+#include "modules/desktop_capture/desktop_region.h"
+
+namespace webrtc {
+
+// A DxgiOutputContext stores the status of a single DxgiFrame of
+// DxgiOutputDuplicator.
+struct DxgiOutputContext final {
+ // The updated region DxgiOutputDuplicator::DetectUpdatedRegion() output
+ // during last Duplicate() function call. It's always relative to the (0, 0).
+ DesktopRegion updated_region;
+};
+
+// A DxgiAdapterContext stores the status of a single DxgiFrame of
+// DxgiAdapterDuplicator.
+struct DxgiAdapterContext final {
+ DxgiAdapterContext();
+ DxgiAdapterContext(const DxgiAdapterContext& other);
+ ~DxgiAdapterContext();
+
+ // Child DxgiOutputContext belongs to this AdapterContext.
+ std::vector<DxgiOutputContext> contexts;
+};
+
+// A DxgiFrameContext stores the status of a single DxgiFrame of
+// DxgiDuplicatorController.
+struct DxgiFrameContext final {
+ public:
+ DxgiFrameContext();
+ // Unregister this Context instance from DxgiDuplicatorController during
+ // destructing.
+ ~DxgiFrameContext();
+
+ // Reset current Context, so it will be reinitialized next time.
+ void Reset();
+
+ // A Context will have an exactly same `controller_id` as
+ // DxgiDuplicatorController, to ensure it has been correctly setted up after
+ // each DxgiDuplicatorController::Initialize().
+ int controller_id = 0;
+
+ // Child DxgiAdapterContext belongs to this DxgiFrameContext.
+ std::vector<DxgiAdapterContext> contexts;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_CONTEXT_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.cc
new file mode 100644
index 0000000000..a776896f6c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.cc
@@ -0,0 +1,514 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_duplicator_controller.h"
+
+#include <windows.h>
+
+#include <algorithm>
+#include <string>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/win/dxgi_frame.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr DWORD kInvalidSessionId = 0xFFFFFFFF;
+
+DWORD GetCurrentSessionId() {
+ DWORD session_id = kInvalidSessionId;
+ if (!::ProcessIdToSessionId(::GetCurrentProcessId(), &session_id)) {
+ RTC_LOG(LS_WARNING)
+ << "Failed to retrieve current session Id, current binary "
+ "may not have required priviledge.";
+ }
+ return session_id;
+}
+
+bool IsConsoleSession() {
+ return WTSGetActiveConsoleSessionId() == GetCurrentSessionId();
+}
+
+} // namespace
+
+// static
+std::string DxgiDuplicatorController::ResultName(
+ DxgiDuplicatorController::Result result) {
+ switch (result) {
+ case Result::SUCCEEDED:
+ return "Succeeded";
+ case Result::UNSUPPORTED_SESSION:
+ return "Unsupported session";
+ case Result::FRAME_PREPARE_FAILED:
+ return "Frame preparation failed";
+ case Result::INITIALIZATION_FAILED:
+ return "Initialization failed";
+ case Result::DUPLICATION_FAILED:
+ return "Duplication failed";
+ case Result::INVALID_MONITOR_ID:
+ return "Invalid monitor id";
+ default:
+ return "Unknown error";
+ }
+}
+
+// static
+rtc::scoped_refptr<DxgiDuplicatorController>
+DxgiDuplicatorController::Instance() {
+ // The static instance won't be deleted to ensure it can be used by other
+ // threads even during program exiting.
+ static DxgiDuplicatorController* instance = new DxgiDuplicatorController();
+ return rtc::scoped_refptr<DxgiDuplicatorController>(instance);
+}
+
+// static
+bool DxgiDuplicatorController::IsCurrentSessionSupported() {
+ DWORD current_session_id = GetCurrentSessionId();
+ return current_session_id != kInvalidSessionId && current_session_id != 0;
+}
+
+DxgiDuplicatorController::DxgiDuplicatorController() : refcount_(0) {}
+
+void DxgiDuplicatorController::AddRef() {
+ int refcount = (++refcount_);
+ RTC_DCHECK(refcount > 0);
+}
+
+void DxgiDuplicatorController::Release() {
+ int refcount = (--refcount_);
+ RTC_DCHECK(refcount >= 0);
+ if (refcount == 0) {
+ RTC_LOG(LS_WARNING) << "Count of references reaches zero, "
+ "DxgiDuplicatorController will be unloaded.";
+ Unload();
+ }
+}
+
+bool DxgiDuplicatorController::IsSupported() {
+ MutexLock lock(&mutex_);
+ return Initialize();
+}
+
+bool DxgiDuplicatorController::RetrieveD3dInfo(D3dInfo* info) {
+ bool result = false;
+ {
+ MutexLock lock(&mutex_);
+ result = Initialize();
+ *info = d3d_info_;
+ }
+ if (!result) {
+ RTC_LOG(LS_WARNING) << "Failed to initialize DXGI components, the D3dInfo "
+ "retrieved may not accurate or out of date.";
+ }
+ return result;
+}
+
+DxgiDuplicatorController::Result DxgiDuplicatorController::Duplicate(
+ DxgiFrame* frame) {
+ return DoDuplicate(frame, -1);
+}
+
+DxgiDuplicatorController::Result DxgiDuplicatorController::DuplicateMonitor(
+ DxgiFrame* frame,
+ int monitor_id) {
+ RTC_DCHECK_GE(monitor_id, 0);
+ return DoDuplicate(frame, monitor_id);
+}
+
+DesktopVector DxgiDuplicatorController::dpi() {
+ MutexLock lock(&mutex_);
+ if (Initialize()) {
+ return dpi_;
+ }
+ return DesktopVector();
+}
+
+int DxgiDuplicatorController::ScreenCount() {
+ MutexLock lock(&mutex_);
+ if (Initialize()) {
+ return ScreenCountUnlocked();
+ }
+ return 0;
+}
+
+bool DxgiDuplicatorController::GetDeviceNames(
+ std::vector<std::string>* output) {
+ MutexLock lock(&mutex_);
+ if (Initialize()) {
+ GetDeviceNamesUnlocked(output);
+ return true;
+ }
+ return false;
+}
+
+DxgiDuplicatorController::Result DxgiDuplicatorController::DoDuplicate(
+ DxgiFrame* frame,
+ int monitor_id) {
+ RTC_DCHECK(frame);
+ MutexLock lock(&mutex_);
+
+ // The dxgi components and APIs do not update the screen resolution without
+ // a reinitialization. So we use the GetDC() function to retrieve the screen
+ // resolution to decide whether dxgi components need to be reinitialized.
+ // If the screen resolution changed, it's very likely the next Duplicate()
+ // function call will fail because of a missing monitor or the frame size is
+ // not enough to store the output. So we reinitialize dxgi components in-place
+ // to avoid a capture failure.
+ // But there is no guarantee GetDC() function returns the same resolution as
+ // dxgi APIs, we still rely on dxgi components to return the output frame
+ // size.
+ // TODO(zijiehe): Confirm whether IDXGIOutput::GetDesc() and
+ // IDXGIOutputDuplication::GetDesc() can detect the resolution change without
+ // reinitialization.
+ if (display_configuration_monitor_.IsChanged()) {
+ Deinitialize();
+ }
+
+ if (!Initialize()) {
+ if (succeeded_duplications_ == 0 && !IsCurrentSessionSupported()) {
+ RTC_LOG(LS_WARNING) << "Current binary is running in session 0. DXGI "
+ "components cannot be initialized.";
+ return Result::UNSUPPORTED_SESSION;
+ }
+
+ // Cannot initialize COM components now, display mode may be changing.
+ return Result::INITIALIZATION_FAILED;
+ }
+
+ if (!frame->Prepare(SelectedDesktopSize(monitor_id), monitor_id)) {
+ return Result::FRAME_PREPARE_FAILED;
+ }
+
+ frame->frame()->mutable_updated_region()->Clear();
+
+ if (DoDuplicateUnlocked(frame->context(), monitor_id, frame->frame())) {
+ succeeded_duplications_++;
+ return Result::SUCCEEDED;
+ }
+ if (monitor_id >= ScreenCountUnlocked()) {
+ // It's a user error to provide a `monitor_id` larger than screen count. We
+ // do not need to deinitialize.
+ return Result::INVALID_MONITOR_ID;
+ }
+
+ // If the `monitor_id` is valid, but DoDuplicateUnlocked() failed, something
+ // must be wrong from capturer APIs. We should Deinitialize().
+ Deinitialize();
+ return Result::DUPLICATION_FAILED;
+}
+
+void DxgiDuplicatorController::Unload() {
+ MutexLock lock(&mutex_);
+ Deinitialize();
+}
+
+void DxgiDuplicatorController::Unregister(const Context* const context) {
+ MutexLock lock(&mutex_);
+ if (ContextExpired(context)) {
+ // The Context has not been setup after a recent initialization, so it
+ // should not been registered in duplicators.
+ return;
+ }
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ duplicators_[i].Unregister(&context->contexts[i]);
+ }
+}
+
+bool DxgiDuplicatorController::Initialize() {
+ if (!duplicators_.empty()) {
+ return true;
+ }
+
+ if (DoInitialize()) {
+ return true;
+ }
+ Deinitialize();
+ return false;
+}
+
+bool DxgiDuplicatorController::DoInitialize() {
+ RTC_DCHECK(desktop_rect_.is_empty());
+ RTC_DCHECK(duplicators_.empty());
+
+ d3d_info_.min_feature_level = static_cast<D3D_FEATURE_LEVEL>(0);
+ d3d_info_.max_feature_level = static_cast<D3D_FEATURE_LEVEL>(0);
+
+ std::vector<D3dDevice> devices = D3dDevice::EnumDevices();
+ if (devices.empty()) {
+ RTC_LOG(LS_WARNING) << "No D3dDevice found.";
+ return false;
+ }
+
+ for (size_t i = 0; i < devices.size(); i++) {
+ D3D_FEATURE_LEVEL feature_level =
+ devices[i].d3d_device()->GetFeatureLevel();
+ if (d3d_info_.max_feature_level == 0 ||
+ feature_level > d3d_info_.max_feature_level) {
+ d3d_info_.max_feature_level = feature_level;
+ }
+ if (d3d_info_.min_feature_level == 0 ||
+ feature_level < d3d_info_.min_feature_level) {
+ d3d_info_.min_feature_level = feature_level;
+ }
+
+ DxgiAdapterDuplicator duplicator(devices[i]);
+ // There may be several video cards on the system, some of them may not
+ // support IDXGOutputDuplication. But they should not impact others from
+ // taking effect, so we should continually try other adapters. This usually
+ // happens when a non-official virtual adapter is installed on the system.
+ if (!duplicator.Initialize()) {
+ RTC_LOG(LS_WARNING) << "Failed to initialize DxgiAdapterDuplicator on "
+ "adapter "
+ << i;
+ continue;
+ }
+ RTC_DCHECK(!duplicator.desktop_rect().is_empty());
+ duplicators_.push_back(std::move(duplicator));
+
+ desktop_rect_.UnionWith(duplicators_.back().desktop_rect());
+ }
+ TranslateRect();
+
+ HDC hdc = GetDC(nullptr);
+ // Use old DPI value if failed.
+ if (hdc) {
+ dpi_.set(GetDeviceCaps(hdc, LOGPIXELSX), GetDeviceCaps(hdc, LOGPIXELSY));
+ ReleaseDC(nullptr, hdc);
+ }
+
+ identity_++;
+
+ if (duplicators_.empty()) {
+ RTC_LOG(LS_WARNING)
+ << "Cannot initialize any DxgiAdapterDuplicator instance.";
+ }
+
+ return !duplicators_.empty();
+}
+
+void DxgiDuplicatorController::Deinitialize() {
+ desktop_rect_ = DesktopRect();
+ duplicators_.clear();
+ display_configuration_monitor_.Reset();
+}
+
+bool DxgiDuplicatorController::ContextExpired(
+ const Context* const context) const {
+ RTC_DCHECK(context);
+ return context->controller_id != identity_ ||
+ context->contexts.size() != duplicators_.size();
+}
+
+void DxgiDuplicatorController::Setup(Context* context) {
+ if (ContextExpired(context)) {
+ RTC_DCHECK(context);
+ context->contexts.clear();
+ context->contexts.resize(duplicators_.size());
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ duplicators_[i].Setup(&context->contexts[i]);
+ }
+ context->controller_id = identity_;
+ }
+}
+
+bool DxgiDuplicatorController::DoDuplicateUnlocked(Context* context,
+ int monitor_id,
+ SharedDesktopFrame* target) {
+ Setup(context);
+
+ if (!EnsureFrameCaptured(context, target)) {
+ return false;
+ }
+
+ bool result = false;
+ if (monitor_id < 0) {
+ // Capture entire screen.
+ result = DoDuplicateAll(context, target);
+ } else {
+ result = DoDuplicateOne(context, monitor_id, target);
+ }
+
+ if (result) {
+ target->set_dpi(dpi_);
+ return true;
+ }
+
+ return false;
+}
+
+bool DxgiDuplicatorController::DoDuplicateAll(Context* context,
+ SharedDesktopFrame* target) {
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ if (!duplicators_[i].Duplicate(&context->contexts[i], target)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool DxgiDuplicatorController::DoDuplicateOne(Context* context,
+ int monitor_id,
+ SharedDesktopFrame* target) {
+ RTC_DCHECK(monitor_id >= 0);
+ for (size_t i = 0; i < duplicators_.size() && i < context->contexts.size();
+ i++) {
+ if (monitor_id >= duplicators_[i].screen_count()) {
+ monitor_id -= duplicators_[i].screen_count();
+ } else {
+ if (duplicators_[i].DuplicateMonitor(&context->contexts[i], monitor_id,
+ target)) {
+ target->set_top_left(duplicators_[i].ScreenRect(monitor_id).top_left());
+ return true;
+ }
+ return false;
+ }
+ }
+ return false;
+}
+
+int64_t DxgiDuplicatorController::GetNumFramesCaptured() const {
+ int64_t min = INT64_MAX;
+ for (const auto& duplicator : duplicators_) {
+ min = std::min(min, duplicator.GetNumFramesCaptured());
+ }
+
+ return min;
+}
+
+DesktopSize DxgiDuplicatorController::desktop_size() const {
+ return desktop_rect_.size();
+}
+
+DesktopRect DxgiDuplicatorController::ScreenRect(int id) const {
+ RTC_DCHECK(id >= 0);
+ for (size_t i = 0; i < duplicators_.size(); i++) {
+ if (id >= duplicators_[i].screen_count()) {
+ id -= duplicators_[i].screen_count();
+ } else {
+ return duplicators_[i].ScreenRect(id);
+ }
+ }
+ return DesktopRect();
+}
+
+int DxgiDuplicatorController::ScreenCountUnlocked() const {
+ int result = 0;
+ for (auto& duplicator : duplicators_) {
+ result += duplicator.screen_count();
+ }
+ return result;
+}
+
+void DxgiDuplicatorController::GetDeviceNamesUnlocked(
+ std::vector<std::string>* output) const {
+ RTC_DCHECK(output);
+ for (auto& duplicator : duplicators_) {
+ for (int i = 0; i < duplicator.screen_count(); i++) {
+ output->push_back(duplicator.GetDeviceName(i));
+ }
+ }
+}
+
+DesktopSize DxgiDuplicatorController::SelectedDesktopSize(
+ int monitor_id) const {
+ if (monitor_id < 0) {
+ return desktop_size();
+ }
+
+ return ScreenRect(monitor_id).size();
+}
+
+bool DxgiDuplicatorController::EnsureFrameCaptured(Context* context,
+ SharedDesktopFrame* target) {
+ // On a modern system, the FPS / monitor refresh rate is usually larger than
+ // or equal to 60. So 17 milliseconds is enough to capture at least one frame.
+ const int64_t ms_per_frame = 17;
+ // Skip frames to ensure a full frame refresh has occurred and the DXGI
+ // machinery is producing frames before this function returns.
+ int64_t frames_to_skip = 1;
+ // The total time out milliseconds for this function. If we cannot get enough
+ // frames during this time interval, this function returns false, and cause
+ // the DXGI components to be reinitialized. This usually should not happen
+ // unless the system is switching display mode when this function is being
+ // called. 500 milliseconds should be enough for ~30 frames.
+ const int64_t timeout_ms = 500;
+
+ if (GetNumFramesCaptured() == 0 && !IsConsoleSession()) {
+ // When capturing a console session, waiting for a single frame is
+ // sufficient to ensure that DXGI output duplication is working. When the
+ // session is not attached to the console, it has been observed that DXGI
+ // may produce up to 4 frames (typically 1-2 though) before stopping. When
+ // this condition occurs, no errors are returned from the output duplication
+ // API, it simply appears that nothing is changing on the screen. Thus for
+ // detached sessions, we need to capture a few extra frames before we can be
+ // confident that output duplication was initialized properly.
+ frames_to_skip = 5;
+ }
+
+ if (GetNumFramesCaptured() >= frames_to_skip) {
+ return true;
+ }
+
+ std::unique_ptr<SharedDesktopFrame> fallback_frame;
+ SharedDesktopFrame* shared_frame = nullptr;
+ if (target->size().width() >= desktop_size().width() &&
+ target->size().height() >= desktop_size().height()) {
+ // `target` is large enough to cover entire screen, we do not need to use
+ // `fallback_frame`.
+ shared_frame = target;
+ } else {
+ fallback_frame = SharedDesktopFrame::Wrap(
+ std::unique_ptr<DesktopFrame>(new BasicDesktopFrame(desktop_size())));
+ shared_frame = fallback_frame.get();
+ }
+
+ const int64_t start_ms = rtc::TimeMillis();
+ while (GetNumFramesCaptured() < frames_to_skip) {
+ if (!DoDuplicateAll(context, shared_frame)) {
+ return false;
+ }
+
+ // Calling DoDuplicateAll() may change the number of frames captured.
+ if (GetNumFramesCaptured() >= frames_to_skip) {
+ break;
+ }
+
+ if (rtc::TimeMillis() - start_ms > timeout_ms) {
+ RTC_LOG(LS_ERROR) << "Failed to capture " << frames_to_skip
+ << " frames "
+ "within "
+ << timeout_ms << " milliseconds.";
+ return false;
+ }
+
+ // Sleep `ms_per_frame` before attempting to capture the next frame to
+ // ensure the video adapter has time to update the screen.
+ webrtc::SleepMs(ms_per_frame);
+ }
+ return true;
+}
+
+void DxgiDuplicatorController::TranslateRect() {
+ const DesktopVector position =
+ DesktopVector().subtract(desktop_rect_.top_left());
+ desktop_rect_.Translate(position);
+ for (auto& duplicator : duplicators_) {
+ duplicator.TranslateRect(position);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.h
new file mode 100644
index 0000000000..88c2939187
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_duplicator_controller.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_DUPLICATOR_CONTROLLER_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_DUPLICATOR_CONTROLLER_H_
+
+#include <d3dcommon.h>
+
+#include <atomic>
+#include <string>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/win/d3d_device.h"
+#include "modules/desktop_capture/win/display_configuration_monitor.h"
+#include "modules/desktop_capture/win/dxgi_adapter_duplicator.h"
+#include "modules/desktop_capture/win/dxgi_context.h"
+#include "modules/desktop_capture/win/dxgi_frame.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// A controller for all the objects we need to call Windows DirectX capture APIs
+// It's a singleton because only one IDXGIOutputDuplication instance per monitor
+// is allowed per application.
+//
+// Consumers should create a DxgiDuplicatorController::Context and keep it
+// throughout their lifetime, and pass it when calling Duplicate(). Consumers
+// can also call IsSupported() to determine whether the system supports DXGI
+// duplicator or not. If a previous IsSupported() function call returns true,
+// but a later Duplicate() returns false, this usually means the display mode is
+// changing. Consumers should retry after a while. (Typically 50 milliseconds,
+// but according to hardware performance, this time may vary.)
+// The underyling DxgiOutputDuplicators may take an additional reference on the
+// frame passed in to the Duplicate methods so that they can guarantee delivery
+// of new frames when requested; since if there have been no updates to the
+// surface, they may be unable to capture a frame.
+class RTC_EXPORT DxgiDuplicatorController {
+ public:
+ using Context = DxgiFrameContext;
+
+ // A collection of D3d information we are interested on, which may impact
+ // capturer performance or reliability.
+ struct D3dInfo {
+ // Each video adapter has its own D3D_FEATURE_LEVEL, so this structure
+ // contains the minimum and maximium D3D_FEATURE_LEVELs current system
+ // supports.
+ // Both fields can be 0, which is the default value to indicate no valid
+ // D3D_FEATURE_LEVEL has been retrieved from underlying OS APIs.
+ D3D_FEATURE_LEVEL min_feature_level;
+ D3D_FEATURE_LEVEL max_feature_level;
+
+ // TODO(zijiehe): Add more fields, such as manufacturer name, mode, driver
+ // version.
+ };
+
+ enum class Result {
+ SUCCEEDED,
+ UNSUPPORTED_SESSION,
+ FRAME_PREPARE_FAILED,
+ INITIALIZATION_FAILED,
+ DUPLICATION_FAILED,
+ INVALID_MONITOR_ID,
+ };
+
+ // Converts `result` into user-friendly string representation. The return
+ // value should not be used to identify error types.
+ static std::string ResultName(Result result);
+
+ // Returns the singleton instance of DxgiDuplicatorController.
+ static rtc::scoped_refptr<DxgiDuplicatorController> Instance();
+
+ // See ScreenCapturerWinDirectx::IsCurrentSessionSupported().
+ static bool IsCurrentSessionSupported();
+
+ // All the following public functions implicitly call Initialize() function.
+
+ // Detects whether the system supports DXGI based capturer.
+ bool IsSupported();
+
+ // Returns a copy of D3dInfo composed by last Initialize() function call. This
+ // function always copies the latest information into `info`. But once the
+ // function returns false, the information in `info` may not accurate.
+ bool RetrieveD3dInfo(D3dInfo* info);
+
+ // Captures current screen and writes into `frame`. May retain a reference to
+ // `frame`'s underlying |SharedDesktopFrame|.
+ // TODO(zijiehe): Windows cannot guarantee the frames returned by each
+ // IDXGIOutputDuplication are synchronized. But we are using a totally
+ // different threading model than the way Windows suggested, it's hard to
+ // synchronize them manually. We should find a way to do it.
+ Result Duplicate(DxgiFrame* frame);
+
+ // Captures one monitor and writes into target. `monitor_id` should >= 0. If
+ // `monitor_id` is greater than the total screen count of all the Duplicators,
+ // this function returns false. May retain a reference to `frame`'s underlying
+ // |SharedDesktopFrame|.
+ Result DuplicateMonitor(DxgiFrame* frame, int monitor_id);
+
+ // Returns dpi of current system. Returns an empty DesktopVector if system
+ // does not support DXGI based capturer.
+ DesktopVector dpi();
+
+ // Returns the count of screens on the system. These screens can be retrieved
+ // by an integer in the range of [0, ScreenCount()). If system does not
+ // support DXGI based capturer, this function returns 0.
+ int ScreenCount();
+
+ // Returns the device names of all screens on the system in utf8 encoding.
+ // These screens can be retrieved by an integer in the range of
+ // [0, output->size()). If system does not support DXGI based capturer, this
+ // function returns false.
+ bool GetDeviceNames(std::vector<std::string>* output);
+
+ private:
+ // DxgiFrameContext calls private Unregister(Context*) function in Reset().
+ friend void DxgiFrameContext::Reset();
+
+ // scoped_refptr<DxgiDuplicatorController> accesses private AddRef() and
+ // Release() functions.
+ friend class rtc::scoped_refptr<DxgiDuplicatorController>;
+
+ // A private constructor to ensure consumers to use
+ // DxgiDuplicatorController::Instance().
+ DxgiDuplicatorController();
+
+ // Not implemented: The singleton DxgiDuplicatorController instance should not
+ // be deleted.
+ ~DxgiDuplicatorController();
+
+ // RefCountedInterface implementations.
+ void AddRef();
+ void Release();
+
+ // Does the real duplication work. Setting `monitor_id` < 0 to capture entire
+ // screen. This function calls Initialize(). And if the duplication failed,
+ // this function calls Deinitialize() to ensure the Dxgi components can be
+ // reinitialized next time.
+ Result DoDuplicate(DxgiFrame* frame, int monitor_id);
+
+ // Unload all the DXGI components and releases the resources. This function
+ // wraps Deinitialize() with `mutex_`.
+ void Unload();
+
+ // Unregisters Context from this instance and all DxgiAdapterDuplicator(s)
+ // it owns.
+ void Unregister(const Context* const context);
+
+ // All functions below should be called in `mutex_` locked scope and should be
+ // after a successful Initialize().
+
+ // If current instance has not been initialized, executes DoInitialize()
+ // function, and returns initialize result. Otherwise directly returns true.
+ // This function may calls Deinitialize() if initialization failed.
+ bool Initialize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Does the real initialization work, this function should only be called in
+ // Initialize().
+ bool DoInitialize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Clears all COM components referred by this instance. So next Duplicate()
+ // call will eventually initialize this instance again.
+ void Deinitialize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // A helper function to check whether a Context has been expired.
+ bool ContextExpired(const Context* const context) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Updates Context if needed.
+ void Setup(Context* context) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ bool DoDuplicateUnlocked(Context* context,
+ int monitor_id,
+ SharedDesktopFrame* target)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Captures all monitors.
+ bool DoDuplicateAll(Context* context, SharedDesktopFrame* target)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Captures one monitor.
+ bool DoDuplicateOne(Context* context,
+ int monitor_id,
+ SharedDesktopFrame* target)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // The minimum GetNumFramesCaptured() returned by `duplicators_`.
+ int64_t GetNumFramesCaptured() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns a DesktopSize to cover entire `desktop_rect_`.
+ DesktopSize desktop_size() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns the size of one screen. `id` should be >= 0. If system does not
+ // support DXGI based capturer, or `id` is greater than the total screen count
+ // of all the Duplicators, this function returns an empty DesktopRect.
+ DesktopRect ScreenRect(int id) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ int ScreenCountUnlocked() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void GetDeviceNamesUnlocked(std::vector<std::string>* output) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns the desktop size of the selected screen `monitor_id`. Setting
+ // `monitor_id` < 0 to return the entire screen size.
+ DesktopSize SelectedDesktopSize(int monitor_id) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Retries DoDuplicateAll() for several times until GetNumFramesCaptured() is
+ // large enough. Returns false if DoDuplicateAll() returns false, or
+ // GetNumFramesCaptured() has never reached the requirement.
+ // According to http://crbug.com/682112, dxgi capturer returns a black frame
+ // during first several capture attempts.
+ bool EnsureFrameCaptured(Context* context, SharedDesktopFrame* target)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Moves `desktop_rect_` and all underlying `duplicators_`, putting top left
+ // corner of the desktop at (0, 0). This is necessary because DXGI_OUTPUT_DESC
+ // may return negative coordinates. Called from DoInitialize() after all
+ // DxgiAdapterDuplicator and DxgiOutputDuplicator instances are initialized.
+ void TranslateRect() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // The count of references which are now "living".
+ std::atomic_int refcount_;
+
+ // This lock must be locked whenever accessing any of the following objects.
+ Mutex mutex_;
+
+ // A self-incremented integer to compare with the one in Context. It ensures
+ // a Context instance is always initialized after DxgiDuplicatorController.
+ int identity_ RTC_GUARDED_BY(mutex_) = 0;
+ DesktopRect desktop_rect_ RTC_GUARDED_BY(mutex_);
+ DesktopVector dpi_ RTC_GUARDED_BY(mutex_);
+ std::vector<DxgiAdapterDuplicator> duplicators_ RTC_GUARDED_BY(mutex_);
+ D3dInfo d3d_info_ RTC_GUARDED_BY(mutex_);
+ DisplayConfigurationMonitor display_configuration_monitor_
+ RTC_GUARDED_BY(mutex_);
+ // A number to indicate how many succeeded duplications have been performed.
+ uint32_t succeeded_duplications_ RTC_GUARDED_BY(mutex_) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_DUPLICATOR_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.cc
new file mode 100644
index 0000000000..13d5b4b62e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_frame.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/win/dxgi_duplicator_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+DxgiFrame::DxgiFrame(SharedMemoryFactory* factory) : factory_(factory) {}
+
+DxgiFrame::~DxgiFrame() = default;
+
+bool DxgiFrame::Prepare(DesktopSize size, DesktopCapturer::SourceId source_id) {
+ if (source_id != source_id_) {
+ // Once the source has been changed, the entire source should be copied.
+ source_id_ = source_id;
+ context_.Reset();
+ }
+
+ if (resolution_tracker_.SetResolution(size)) {
+ // Once the output size changed, recreate the SharedDesktopFrame.
+ frame_.reset();
+ }
+
+ if (!frame_) {
+ std::unique_ptr<DesktopFrame> frame;
+ if (factory_) {
+ frame = SharedMemoryDesktopFrame::Create(size, factory_);
+
+ if (!frame) {
+ RTC_LOG(LS_WARNING) << "DxgiFrame cannot create a new DesktopFrame.";
+ return false;
+ }
+
+ // DirectX capturer won't paint each pixel in the frame due to its one
+ // capturer per monitor design. So once the new frame is created, we
+ // should clear it to avoid the legacy image to be remained on it. See
+ // http://crbug.com/708766.
+ RTC_DCHECK_EQ(frame->stride(),
+ frame->size().width() * DesktopFrame::kBytesPerPixel);
+ memset(frame->data(), 0, frame->stride() * frame->size().height());
+ } else {
+ frame.reset(new BasicDesktopFrame(size));
+ }
+
+ frame_ = SharedDesktopFrame::Wrap(std::move(frame));
+ }
+
+ return !!frame_;
+}
+
+SharedDesktopFrame* DxgiFrame::frame() const {
+ RTC_DCHECK(frame_);
+ return frame_.get();
+}
+
+DxgiFrame::Context* DxgiFrame::context() {
+ RTC_DCHECK(frame_);
+ return &context_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.h
new file mode 100644
index 0000000000..6a9ce868a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_frame.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_FRAME_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_FRAME_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/resolution_tracker.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "modules/desktop_capture/win/dxgi_context.h"
+
+namespace webrtc {
+
+class DxgiDuplicatorController;
+
+// A pair of a SharedDesktopFrame and a DxgiDuplicatorController::Context for
+// the client of DxgiDuplicatorController.
+class DxgiFrame final {
+ public:
+ using Context = DxgiFrameContext;
+
+ // DxgiFrame does not take ownership of `factory`, consumers should ensure it
+ // outlives this instance. nullptr is acceptable.
+ explicit DxgiFrame(SharedMemoryFactory* factory);
+ ~DxgiFrame();
+
+ // Should not be called if Prepare() is not executed or returns false.
+ SharedDesktopFrame* frame() const;
+
+ private:
+ // Allows DxgiDuplicatorController to access Prepare() and context() function
+ // as well as Context class.
+ friend class DxgiDuplicatorController;
+
+ // Prepares current instance with desktop size and source id.
+ bool Prepare(DesktopSize size, DesktopCapturer::SourceId source_id);
+
+ // Should not be called if Prepare() is not executed or returns false.
+ Context* context();
+
+ SharedMemoryFactory* const factory_;
+ ResolutionTracker resolution_tracker_;
+ DesktopCapturer::SourceId source_id_ = kFullDesktopScreenId;
+ std::unique_ptr<SharedDesktopFrame> frame_;
+ Context context_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_FRAME_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc
new file mode 100644
index 0000000000..caad6f43d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_output_duplicator.h"
+
+#include <dxgi.h>
+#include <dxgiformat.h>
+#include <string.h>
+#include <unknwn.h>
+#include <windows.h>
+
+#include <algorithm>
+
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+#include "modules/desktop_capture/win/dxgi_texture_mapping.h"
+#include "modules/desktop_capture/win/dxgi_texture_staging.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/win32.h"
+
+namespace webrtc {
+
+using Microsoft::WRL::ComPtr;
+
+namespace {
+
+// Timeout for AcquireNextFrame() call.
+// DxgiDuplicatorController leverages external components to do the capture
+// scheduling. So here DxgiOutputDuplicator does not need to actively wait for a
+// new frame.
+const int kAcquireTimeoutMs = 0;
+
+DesktopRect RECTToDesktopRect(const RECT& rect) {
+ return DesktopRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
+}
+
+Rotation DxgiRotationToRotation(DXGI_MODE_ROTATION rotation) {
+ switch (rotation) {
+ case DXGI_MODE_ROTATION_IDENTITY:
+ case DXGI_MODE_ROTATION_UNSPECIFIED:
+ return Rotation::CLOCK_WISE_0;
+ case DXGI_MODE_ROTATION_ROTATE90:
+ return Rotation::CLOCK_WISE_90;
+ case DXGI_MODE_ROTATION_ROTATE180:
+ return Rotation::CLOCK_WISE_180;
+ case DXGI_MODE_ROTATION_ROTATE270:
+ return Rotation::CLOCK_WISE_270;
+ }
+
+ RTC_DCHECK_NOTREACHED();
+ return Rotation::CLOCK_WISE_0;
+}
+
+} // namespace
+
+DxgiOutputDuplicator::DxgiOutputDuplicator(const D3dDevice& device,
+ const ComPtr<IDXGIOutput1>& output,
+ const DXGI_OUTPUT_DESC& desc)
+ : device_(device),
+ output_(output),
+ device_name_(rtc::ToUtf8(desc.DeviceName)),
+ desktop_rect_(RECTToDesktopRect(desc.DesktopCoordinates)) {
+ RTC_DCHECK(output_);
+ RTC_DCHECK(!desktop_rect_.is_empty());
+ RTC_DCHECK_GT(desktop_rect_.width(), 0);
+ RTC_DCHECK_GT(desktop_rect_.height(), 0);
+}
+
+DxgiOutputDuplicator::DxgiOutputDuplicator(DxgiOutputDuplicator&& other) =
+ default;
+
+DxgiOutputDuplicator::~DxgiOutputDuplicator() {
+ if (duplication_) {
+ duplication_->ReleaseFrame();
+ }
+ texture_.reset();
+}
+
+bool DxgiOutputDuplicator::Initialize() {
+ if (DuplicateOutput()) {
+ if (desc_.DesktopImageInSystemMemory) {
+ texture_.reset(new DxgiTextureMapping(duplication_.Get()));
+ } else {
+ texture_.reset(new DxgiTextureStaging(device_));
+ }
+ return true;
+ } else {
+ duplication_.Reset();
+ return false;
+ }
+}
+
+bool DxgiOutputDuplicator::DuplicateOutput() {
+ RTC_DCHECK(!duplication_);
+ _com_error error =
+ output_->DuplicateOutput(static_cast<IUnknown*>(device_.d3d_device()),
+ duplication_.GetAddressOf());
+ if (error.Error() != S_OK || !duplication_) {
+ RTC_LOG(LS_WARNING) << "Failed to duplicate output from IDXGIOutput1: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ memset(&desc_, 0, sizeof(desc_));
+ duplication_->GetDesc(&desc_);
+ if (desc_.ModeDesc.Format != DXGI_FORMAT_B8G8R8A8_UNORM) {
+ RTC_LOG(LS_ERROR) << "IDXGIDuplicateOutput does not use RGBA (8 bit) "
+ << "format, which is required by downstream components, "
+ << "format is " << desc_.ModeDesc.Format;
+ return false;
+ }
+
+ if (static_cast<int>(desc_.ModeDesc.Width) != desktop_rect_.width() ||
+ static_cast<int>(desc_.ModeDesc.Height) != desktop_rect_.height()) {
+ RTC_LOG(LS_ERROR)
+ << "IDXGIDuplicateOutput does not return a same size as its "
+ << "IDXGIOutput1, size returned by IDXGIDuplicateOutput is "
+ << desc_.ModeDesc.Width << " x " << desc_.ModeDesc.Height
+ << ", size returned by IDXGIOutput1 is " << desktop_rect_.width()
+ << " x " << desktop_rect_.height();
+ return false;
+ }
+
+ rotation_ = DxgiRotationToRotation(desc_.Rotation);
+ unrotated_size_ = RotateSize(desktop_size(), ReverseRotation(rotation_));
+
+ return true;
+}
+
+bool DxgiOutputDuplicator::ReleaseFrame() {
+ RTC_DCHECK(duplication_);
+ _com_error error = duplication_->ReleaseFrame();
+ if (error.Error() != S_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to release frame from IDXGIOutputDuplication: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+ return true;
+}
+
+bool DxgiOutputDuplicator::Duplicate(Context* context,
+ DesktopVector offset,
+ SharedDesktopFrame* target) {
+ RTC_DCHECK(duplication_);
+ RTC_DCHECK(texture_);
+ RTC_DCHECK(target);
+ if (!DesktopRect::MakeSize(target->size())
+ .ContainsRect(GetTranslatedDesktopRect(offset))) {
+ // target size is not large enough to cover current output region.
+ return false;
+ }
+
+ DXGI_OUTDUPL_FRAME_INFO frame_info;
+ memset(&frame_info, 0, sizeof(frame_info));
+ ComPtr<IDXGIResource> resource;
+ _com_error error = duplication_->AcquireNextFrame(
+ kAcquireTimeoutMs, &frame_info, resource.GetAddressOf());
+ if (error.Error() != S_OK && error.Error() != DXGI_ERROR_WAIT_TIMEOUT) {
+ RTC_LOG(LS_ERROR) << "Failed to capture frame: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ // We need to merge updated region with the one from context, but only spread
+ // updated region from current frame. So keeps a copy of updated region from
+ // context here. The `updated_region` always starts from (0, 0).
+ DesktopRegion updated_region;
+ updated_region.Swap(&context->updated_region);
+ if (error.Error() == S_OK && frame_info.AccumulatedFrames > 0 && resource) {
+ DetectUpdatedRegion(frame_info, &context->updated_region);
+ SpreadContextChange(context);
+ if (!texture_->CopyFrom(frame_info, resource.Get())) {
+ return false;
+ }
+ updated_region.AddRegion(context->updated_region);
+ // TODO(zijiehe): Figure out why clearing context->updated_region() here
+ // triggers screen flickering?
+
+ const DesktopFrame& source = texture_->AsDesktopFrame();
+ if (rotation_ != Rotation::CLOCK_WISE_0) {
+ for (DesktopRegion::Iterator it(updated_region); !it.IsAtEnd();
+ it.Advance()) {
+ // The `updated_region` returned by Windows is rotated, but the `source`
+ // frame is not. So we need to rotate it reversely.
+ const DesktopRect source_rect =
+ RotateRect(it.rect(), desktop_size(), ReverseRotation(rotation_));
+ RotateDesktopFrame(source, source_rect, rotation_, offset, target);
+ }
+ } else {
+ for (DesktopRegion::Iterator it(updated_region); !it.IsAtEnd();
+ it.Advance()) {
+ // The DesktopRect in `target`, starts from offset.
+ DesktopRect dest_rect = it.rect();
+ dest_rect.Translate(offset);
+ target->CopyPixelsFrom(source, it.rect().top_left(), dest_rect);
+ }
+ }
+ last_frame_ = target->Share();
+ last_frame_offset_ = offset;
+ updated_region.Translate(offset.x(), offset.y());
+ target->mutable_updated_region()->AddRegion(updated_region);
+ num_frames_captured_++;
+ return texture_->Release() && ReleaseFrame();
+ }
+
+ if (last_frame_) {
+ // No change since last frame or AcquireNextFrame() timed out, we will
+ // export last frame to the target.
+ for (DesktopRegion::Iterator it(updated_region); !it.IsAtEnd();
+ it.Advance()) {
+ // The DesktopRect in `source`, starts from last_frame_offset_.
+ DesktopRect source_rect = it.rect();
+ // The DesktopRect in `target`, starts from offset.
+ DesktopRect target_rect = source_rect;
+ source_rect.Translate(last_frame_offset_);
+ target_rect.Translate(offset);
+ target->CopyPixelsFrom(*last_frame_, source_rect.top_left(), target_rect);
+ }
+ updated_region.Translate(offset.x(), offset.y());
+ target->mutable_updated_region()->AddRegion(updated_region);
+ } else {
+ // If we were at the very first frame, and capturing failed, the
+ // context->updated_region should be kept unchanged for next attempt.
+ context->updated_region.Swap(&updated_region);
+ }
+ // If AcquireNextFrame() failed with timeout error, we do not need to release
+ // the frame.
+ return error.Error() == DXGI_ERROR_WAIT_TIMEOUT || ReleaseFrame();
+}
+
+DesktopRect DxgiOutputDuplicator::GetTranslatedDesktopRect(
+ DesktopVector offset) const {
+ DesktopRect result(DesktopRect::MakeSize(desktop_size()));
+ result.Translate(offset);
+ return result;
+}
+
+DesktopRect DxgiOutputDuplicator::GetUntranslatedDesktopRect() const {
+ return DesktopRect::MakeSize(desktop_size());
+}
+
+void DxgiOutputDuplicator::DetectUpdatedRegion(
+ const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ DesktopRegion* updated_region) {
+ if (DoDetectUpdatedRegion(frame_info, updated_region)) {
+ // Make sure even a region returned by Windows API is out of the scope of
+ // desktop_rect_, we still won't export it to the target DesktopFrame.
+ updated_region->IntersectWith(GetUntranslatedDesktopRect());
+ } else {
+ updated_region->SetRect(GetUntranslatedDesktopRect());
+ }
+}
+
+bool DxgiOutputDuplicator::DoDetectUpdatedRegion(
+ const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ DesktopRegion* updated_region) {
+ RTC_DCHECK(updated_region);
+ updated_region->Clear();
+ if (frame_info.TotalMetadataBufferSize == 0) {
+ // This should not happen, since frame_info.AccumulatedFrames > 0.
+ RTC_LOG(LS_ERROR) << "frame_info.AccumulatedFrames > 0, "
+ << "but TotalMetadataBufferSize == 0";
+ return false;
+ }
+
+ if (metadata_.size() < frame_info.TotalMetadataBufferSize) {
+ metadata_.clear(); // Avoid data copy
+ metadata_.resize(frame_info.TotalMetadataBufferSize);
+ }
+
+ UINT buff_size = 0;
+ DXGI_OUTDUPL_MOVE_RECT* move_rects =
+ reinterpret_cast<DXGI_OUTDUPL_MOVE_RECT*>(metadata_.data());
+ size_t move_rects_count = 0;
+ _com_error error = duplication_->GetFrameMoveRects(
+ static_cast<UINT>(metadata_.size()), move_rects, &buff_size);
+ if (error.Error() != S_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to get move rectangles: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+ move_rects_count = buff_size / sizeof(DXGI_OUTDUPL_MOVE_RECT);
+
+ RECT* dirty_rects = reinterpret_cast<RECT*>(metadata_.data() + buff_size);
+ size_t dirty_rects_count = 0;
+ error = duplication_->GetFrameDirtyRects(
+ static_cast<UINT>(metadata_.size()) - buff_size, dirty_rects, &buff_size);
+ if (error.Error() != S_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to get dirty rectangles: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+ dirty_rects_count = buff_size / sizeof(RECT);
+
+ while (move_rects_count > 0) {
+ // DirectX capturer API may randomly return unmoved move_rects, which should
+ // be skipped to avoid unnecessary wasting of differing and encoding
+ // resources.
+ // By using testing application it2me_standalone_host_main, this check
+ // reduces average capture time by 0.375% (4.07 -> 4.055), and average
+ // encode time by 0.313% (8.042 -> 8.016) without other impacts.
+ if (move_rects->SourcePoint.x != move_rects->DestinationRect.left ||
+ move_rects->SourcePoint.y != move_rects->DestinationRect.top) {
+ updated_region->AddRect(
+ RotateRect(DesktopRect::MakeXYWH(move_rects->SourcePoint.x,
+ move_rects->SourcePoint.y,
+ move_rects->DestinationRect.right -
+ move_rects->DestinationRect.left,
+ move_rects->DestinationRect.bottom -
+ move_rects->DestinationRect.top),
+ unrotated_size_, rotation_));
+ updated_region->AddRect(
+ RotateRect(DesktopRect::MakeLTRB(move_rects->DestinationRect.left,
+ move_rects->DestinationRect.top,
+ move_rects->DestinationRect.right,
+ move_rects->DestinationRect.bottom),
+ unrotated_size_, rotation_));
+ } else {
+ RTC_LOG(LS_INFO) << "Unmoved move_rect detected, ["
+ << move_rects->DestinationRect.left << ", "
+ << move_rects->DestinationRect.top << "] - ["
+ << move_rects->DestinationRect.right << ", "
+ << move_rects->DestinationRect.bottom << "].";
+ }
+ move_rects++;
+ move_rects_count--;
+ }
+
+ while (dirty_rects_count > 0) {
+ updated_region->AddRect(RotateRect(
+ DesktopRect::MakeLTRB(dirty_rects->left, dirty_rects->top,
+ dirty_rects->right, dirty_rects->bottom),
+ unrotated_size_, rotation_));
+ dirty_rects++;
+ dirty_rects_count--;
+ }
+
+ return true;
+}
+
+void DxgiOutputDuplicator::Setup(Context* context) {
+ RTC_DCHECK(context->updated_region.is_empty());
+ // Always copy entire monitor during the first Duplicate() function call.
+ context->updated_region.AddRect(GetUntranslatedDesktopRect());
+ RTC_DCHECK(std::find(contexts_.begin(), contexts_.end(), context) ==
+ contexts_.end());
+ contexts_.push_back(context);
+}
+
+void DxgiOutputDuplicator::Unregister(const Context* const context) {
+ auto it = std::find(contexts_.begin(), contexts_.end(), context);
+ RTC_DCHECK(it != contexts_.end());
+ contexts_.erase(it);
+}
+
+void DxgiOutputDuplicator::SpreadContextChange(const Context* const source) {
+ for (Context* dest : contexts_) {
+ RTC_DCHECK(dest);
+ if (dest != source) {
+ dest->updated_region.AddRegion(source->updated_region);
+ }
+ }
+}
+
+DesktopSize DxgiOutputDuplicator::desktop_size() const {
+ return desktop_rect_.size();
+}
+
+int64_t DxgiOutputDuplicator::num_frames_captured() const {
+#if !defined(NDEBUG)
+ RTC_DCHECK_EQ(!!last_frame_, num_frames_captured_ > 0);
+#endif
+ return num_frames_captured_;
+}
+
+void DxgiOutputDuplicator::TranslateRect(const DesktopVector& position) {
+ desktop_rect_.Translate(position);
+ RTC_DCHECK_GE(desktop_rect_.left(), 0);
+ RTC_DCHECK_GE(desktop_rect_.top(), 0);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.h
new file mode 100644
index 0000000000..df15fe566e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_OUTPUT_DUPLICATOR_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_OUTPUT_DUPLICATOR_H_
+
+#include <comdef.h>
+#include <dxgi.h>
+#include <dxgi1_2.h>
+#include <wrl/client.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_frame_rotation.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/win/d3d_device.h"
+#include "modules/desktop_capture/win/dxgi_context.h"
+#include "modules/desktop_capture/win/dxgi_texture.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// Duplicates the content on one IDXGIOutput, i.e. one monitor attached to one
+// video card. None of functions in this class is thread-safe.
+class DxgiOutputDuplicator {
+ public:
+ using Context = DxgiOutputContext;
+
+ // Creates an instance of DxgiOutputDuplicator from a D3dDevice and one of its
+ // IDXGIOutput1. Caller must maintain the lifetime of device, to make sure it
+ // outlives this instance. Only DxgiAdapterDuplicator can create an instance.
+ DxgiOutputDuplicator(const D3dDevice& device,
+ const Microsoft::WRL::ComPtr<IDXGIOutput1>& output,
+ const DXGI_OUTPUT_DESC& desc);
+
+ // To allow this class to work with vector.
+ DxgiOutputDuplicator(DxgiOutputDuplicator&& other);
+
+ // Destructs this instance. We need to make sure texture_ has been released
+ // before duplication_.
+ ~DxgiOutputDuplicator();
+
+ // Initializes duplication_ object.
+ bool Initialize();
+
+ // Copies the content of current IDXGIOutput to the `target`. To improve the
+ // performance, this function copies only regions merged from
+ // `context`->updated_region and DetectUpdatedRegion(). The `offset` decides
+ // the offset in the `target` where the content should be copied to. i.e. this
+ // function copies the content to the rectangle of (offset.x(), offset.y()) to
+ // (offset.x() + desktop_rect_.width(), offset.y() + desktop_rect_.height()).
+ // Returns false in case of a failure.
+ // May retain a reference to `target` so that a "captured" frame can be
+ // returned in the event that a new frame is not ready to be captured yet.
+ // (Or in other words, if the call to IDXGIOutputDuplication::AcquireNextFrame
+ // indicates that there is not yet a new frame, this is usually because no
+ // updates have occurred to the frame).
+ bool Duplicate(Context* context,
+ DesktopVector offset,
+ SharedDesktopFrame* target);
+
+ // Returns the desktop rect covered by this DxgiOutputDuplicator.
+ DesktopRect desktop_rect() const { return desktop_rect_; }
+
+ // Returns the device name from DXGI_OUTPUT_DESC in utf8 encoding.
+ const std::string& device_name() const { return device_name_; }
+
+ void Setup(Context* context);
+
+ void Unregister(const Context* const context);
+
+ // How many frames have been captured by this DxigOutputDuplicator.
+ int64_t num_frames_captured() const;
+
+ // Moves `desktop_rect_`. See DxgiDuplicatorController::TranslateRect().
+ void TranslateRect(const DesktopVector& position);
+
+ private:
+ // Calls DoDetectUpdatedRegion(). If it fails, this function sets the
+ // `updated_region` as entire UntranslatedDesktopRect().
+ void DetectUpdatedRegion(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ DesktopRegion* updated_region);
+
+ // Returns untranslated updated region, which are directly returned by Windows
+ // APIs. Returns false in case of a failure.
+ bool DoDetectUpdatedRegion(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ DesktopRegion* updated_region);
+
+ bool ReleaseFrame();
+
+ // Initializes duplication_ instance. Expects duplication_ is in empty status.
+ // Returns false if system does not support IDXGIOutputDuplication.
+ bool DuplicateOutput();
+
+ // Returns a DesktopRect with the same size of desktop_size(), but translated
+ // by offset.
+ DesktopRect GetTranslatedDesktopRect(DesktopVector offset) const;
+
+ // Returns a DesktopRect with the same size of desktop_size(), but starts from
+ // (0, 0).
+ DesktopRect GetUntranslatedDesktopRect() const;
+
+ // Spreads changes from `context` to other registered Context(s) in
+ // contexts_.
+ void SpreadContextChange(const Context* const context);
+
+ // Returns the size of desktop rectangle current instance representing.
+ DesktopSize desktop_size() const;
+
+ const D3dDevice device_;
+ const Microsoft::WRL::ComPtr<IDXGIOutput1> output_;
+ const std::string device_name_;
+ DesktopRect desktop_rect_;
+ Microsoft::WRL::ComPtr<IDXGIOutputDuplication> duplication_;
+ DXGI_OUTDUPL_DESC desc_;
+ std::vector<uint8_t> metadata_;
+ std::unique_ptr<DxgiTexture> texture_;
+ Rotation rotation_;
+ DesktopSize unrotated_size_;
+
+ // After each AcquireNextFrame() function call, updated_region_(s) of all
+ // active Context(s) need to be updated. Since they have missed the
+ // change this time. And during next Duplicate() function call, their
+ // updated_region_ will be merged and copied.
+ std::vector<Context*> contexts_;
+
+ // The last full frame of this output and its offset. If on AcquireNextFrame()
+ // failed because of timeout, i.e. no update, we can copy content from
+ // `last_frame_`.
+ std::unique_ptr<SharedDesktopFrame> last_frame_;
+ DesktopVector last_frame_offset_;
+
+ int64_t num_frames_captured_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_OUTPUT_DUPLICATOR_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.cc
new file mode 100644
index 0000000000..b8f5b81f90
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_texture.h"
+
+#include <comdef.h>
+#include <d3d11.h>
+#include <wrl/client.h>
+
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+
+namespace {
+
+class DxgiDesktopFrame : public DesktopFrame {
+ public:
+ explicit DxgiDesktopFrame(const DxgiTexture& texture)
+ : DesktopFrame(texture.desktop_size(),
+ texture.pitch(),
+ texture.bits(),
+ nullptr) {}
+
+ ~DxgiDesktopFrame() override = default;
+};
+
+} // namespace
+
+DxgiTexture::DxgiTexture() = default;
+DxgiTexture::~DxgiTexture() = default;
+
+bool DxgiTexture::CopyFrom(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ IDXGIResource* resource) {
+ RTC_DCHECK_GT(frame_info.AccumulatedFrames, 0);
+ RTC_DCHECK(resource);
+ ComPtr<ID3D11Texture2D> texture;
+ _com_error error = resource->QueryInterface(
+ __uuidof(ID3D11Texture2D),
+ reinterpret_cast<void**>(texture.GetAddressOf()));
+ if (error.Error() != S_OK || !texture) {
+ RTC_LOG(LS_ERROR) << "Failed to convert IDXGIResource to ID3D11Texture2D: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ D3D11_TEXTURE2D_DESC desc = {0};
+ texture->GetDesc(&desc);
+ desktop_size_.set(desc.Width, desc.Height);
+
+ return CopyFromTexture(frame_info, texture.Get());
+}
+
+const DesktopFrame& DxgiTexture::AsDesktopFrame() {
+ if (!frame_) {
+ frame_.reset(new DxgiDesktopFrame(*this));
+ }
+ return *frame_;
+}
+
+bool DxgiTexture::Release() {
+ frame_.reset();
+ return DoRelease();
+}
+
+DXGI_MAPPED_RECT* DxgiTexture::rect() {
+ return &rect_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.h
new file mode 100644
index 0000000000..a663b95a04
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_H_
+
+#include <d3d11.h>
+#include <dxgi1_2.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+class DesktopRegion;
+
+// A texture copied or mapped from a DXGI_OUTDUPL_FRAME_INFO and IDXGIResource.
+class DxgiTexture {
+ public:
+ // Creates a DxgiTexture instance, which represents the `desktop_size` area of
+ // entire screen -- usually a monitor on the system.
+ DxgiTexture();
+
+ virtual ~DxgiTexture();
+
+ // Copies selected regions of a frame represented by frame_info and resource.
+ // Returns false if anything wrong.
+ bool CopyFrom(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ IDXGIResource* resource);
+
+ const DesktopSize& desktop_size() const { return desktop_size_; }
+
+ uint8_t* bits() const { return static_cast<uint8_t*>(rect_.pBits); }
+
+ int pitch() const { return static_cast<int>(rect_.Pitch); }
+
+ // Releases the resource currently holds by this instance. Returns false if
+ // anything wrong, and this instance should be deprecated in this state. bits,
+ // pitch and AsDesktopFrame are only valid after a success CopyFrom() call,
+ // but before Release() call.
+ bool Release();
+
+ // Returns a DesktopFrame snapshot of a DxgiTexture instance. This
+ // DesktopFrame is used to copy a DxgiTexture content to another DesktopFrame
+ // only. And it should not outlive its DxgiTexture instance.
+ const DesktopFrame& AsDesktopFrame();
+
+ protected:
+ DXGI_MAPPED_RECT* rect();
+
+ virtual bool CopyFromTexture(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ ID3D11Texture2D* texture) = 0;
+
+ virtual bool DoRelease() = 0;
+
+ private:
+ DXGI_MAPPED_RECT rect_ = {0};
+ DesktopSize desktop_size_;
+ std::unique_ptr<DesktopFrame> frame_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.cc
new file mode 100644
index 0000000000..7ecf1adc61
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_texture_mapping.h"
+
+#include <comdef.h>
+#include <dxgi.h>
+#include <dxgi1_2.h>
+
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+DxgiTextureMapping::DxgiTextureMapping(IDXGIOutputDuplication* duplication)
+ : duplication_(duplication) {
+ RTC_DCHECK(duplication_);
+}
+
+DxgiTextureMapping::~DxgiTextureMapping() = default;
+
+bool DxgiTextureMapping::CopyFromTexture(
+ const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ ID3D11Texture2D* texture) {
+ RTC_DCHECK_GT(frame_info.AccumulatedFrames, 0);
+ RTC_DCHECK(texture);
+ *rect() = {0};
+ _com_error error = duplication_->MapDesktopSurface(rect());
+ if (error.Error() != S_OK) {
+ *rect() = {0};
+ RTC_LOG(LS_ERROR)
+ << "Failed to map the IDXGIOutputDuplication to a bitmap: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ return true;
+}
+
+bool DxgiTextureMapping::DoRelease() {
+ _com_error error = duplication_->UnMapDesktopSurface();
+ if (error.Error() != S_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to unmap the IDXGIOutputDuplication: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.h
new file mode 100644
index 0000000000..71f00b99ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_mapping.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_MAPPING_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_MAPPING_H_
+
+#include <d3d11.h>
+#include <dxgi1_2.h>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/win/dxgi_texture.h"
+
+namespace webrtc {
+
+// A DxgiTexture which directly maps bitmap from IDXGIResource. This class is
+// used when DXGI_OUTDUPL_DESC.DesktopImageInSystemMemory is true. (This usually
+// means the video card shares main memory with CPU, instead of having its own
+// individual memory.)
+class DxgiTextureMapping : public DxgiTexture {
+ public:
+ // Creates a DxgiTextureMapping instance. Caller must maintain the lifetime
+ // of input `duplication` to make sure it outlives this instance.
+ explicit DxgiTextureMapping(IDXGIOutputDuplication* duplication);
+
+ ~DxgiTextureMapping() override;
+
+ protected:
+ bool CopyFromTexture(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ ID3D11Texture2D* texture) override;
+
+ bool DoRelease() override;
+
+ private:
+ IDXGIOutputDuplication* const duplication_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_MAPPING_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.cc
new file mode 100644
index 0000000000..17e8518a7d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/dxgi_texture_staging.h"
+
+#include <comdef.h>
+#include <dxgi.h>
+#include <dxgi1_2.h>
+#include <unknwn.h>
+
+#include "modules/desktop_capture/win/desktop_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+
+DxgiTextureStaging::DxgiTextureStaging(const D3dDevice& device)
+ : device_(device) {}
+
+DxgiTextureStaging::~DxgiTextureStaging() = default;
+
+bool DxgiTextureStaging::InitializeStage(ID3D11Texture2D* texture) {
+ RTC_DCHECK(texture);
+ D3D11_TEXTURE2D_DESC desc = {0};
+ texture->GetDesc(&desc);
+
+ desc.ArraySize = 1;
+ desc.BindFlags = 0;
+ desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+ desc.MipLevels = 1;
+ desc.MiscFlags = 0;
+ desc.SampleDesc.Count = 1;
+ desc.SampleDesc.Quality = 0;
+ desc.Usage = D3D11_USAGE_STAGING;
+ if (stage_) {
+ AssertStageAndSurfaceAreSameObject();
+ D3D11_TEXTURE2D_DESC current_desc;
+ stage_->GetDesc(&current_desc);
+ const bool recreate_needed =
+ (memcmp(&desc, &current_desc, sizeof(D3D11_TEXTURE2D_DESC)) != 0);
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.DesktopCapture.StagingTextureRecreate",
+ recreate_needed);
+ if (!recreate_needed) {
+ return true;
+ }
+
+ // The descriptions are not consistent, we need to create a new
+ // ID3D11Texture2D instance.
+ stage_.Reset();
+ surface_.Reset();
+ } else {
+ RTC_DCHECK(!surface_);
+ }
+
+ _com_error error = device_.d3d_device()->CreateTexture2D(
+ &desc, nullptr, stage_.GetAddressOf());
+ if (error.Error() != S_OK || !stage_) {
+ RTC_LOG(LS_ERROR) << "Failed to create a new ID3D11Texture2D as stage: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ error = stage_.As(&surface_);
+ if (error.Error() != S_OK || !surface_) {
+ RTC_LOG(LS_ERROR) << "Failed to convert ID3D11Texture2D to IDXGISurface: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ return true;
+}
+
+void DxgiTextureStaging::AssertStageAndSurfaceAreSameObject() {
+ ComPtr<IUnknown> left;
+ ComPtr<IUnknown> right;
+ bool left_result = SUCCEEDED(stage_.As(&left));
+ bool right_result = SUCCEEDED(surface_.As(&right));
+ RTC_DCHECK(left_result);
+ RTC_DCHECK(right_result);
+ RTC_DCHECK(left.Get() == right.Get());
+}
+
+bool DxgiTextureStaging::CopyFromTexture(
+ const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ ID3D11Texture2D* texture) {
+ RTC_DCHECK_GT(frame_info.AccumulatedFrames, 0);
+ RTC_DCHECK(texture);
+
+ // AcquireNextFrame returns a CPU inaccessible IDXGIResource, so we need to
+ // copy it to a CPU accessible staging ID3D11Texture2D.
+ if (!InitializeStage(texture)) {
+ return false;
+ }
+
+ device_.context()->CopyResource(static_cast<ID3D11Resource*>(stage_.Get()),
+ static_cast<ID3D11Resource*>(texture));
+
+ *rect() = {0};
+ _com_error error = surface_->Map(rect(), DXGI_MAP_READ);
+ if (error.Error() != S_OK) {
+ *rect() = {0};
+ RTC_LOG(LS_ERROR) << "Failed to map the IDXGISurface to a bitmap: "
+ << desktop_capture::utils::ComErrorToString(error);
+ return false;
+ }
+
+ return true;
+}
+
+bool DxgiTextureStaging::DoRelease() {
+ _com_error error = surface_->Unmap();
+ if (error.Error() != S_OK) {
+ stage_.Reset();
+ surface_.Reset();
+ }
+ // If using staging mode, we only need to recreate ID3D11Texture2D instance.
+ // This will happen during next CopyFrom call. So this function always returns
+ // true.
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.h b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.h
new file mode 100644
index 0000000000..e8c2af6662
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_texture_staging.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_STAGING_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_STAGING_H_
+
+#include <d3d11.h>
+#include <dxgi1_2.h>
+#include <wrl/client.h>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/win/d3d_device.h"
+#include "modules/desktop_capture/win/dxgi_texture.h"
+
+namespace webrtc {
+
+// A pair of an ID3D11Texture2D and an IDXGISurface. We need an ID3D11Texture2D
+// instance to copy GPU texture to RAM, but an IDXGISurface instance to map the
+// texture into a bitmap buffer. These two instances are pointing to a same
+// object.
+//
+// An ID3D11Texture2D is created by an ID3D11Device, so a DxgiTexture cannot be
+// shared between two DxgiAdapterDuplicators.
+class DxgiTextureStaging : public DxgiTexture {
+ public:
+ // Creates a DxgiTextureStaging instance. Caller must maintain the lifetime
+ // of input device to make sure it outlives this instance.
+ explicit DxgiTextureStaging(const D3dDevice& device);
+
+ ~DxgiTextureStaging() override;
+
+ protected:
+ // Copies selected regions of a frame represented by frame_info and texture.
+ // Returns false if anything wrong.
+ bool CopyFromTexture(const DXGI_OUTDUPL_FRAME_INFO& frame_info,
+ ID3D11Texture2D* texture) override;
+
+ bool DoRelease() override;
+
+ private:
+ // Initializes stage_ from a CPU inaccessible IDXGIResource. Returns false if
+ // it failed to execute Windows APIs, or the size of the texture is not
+ // consistent with desktop_rect.
+ bool InitializeStage(ID3D11Texture2D* texture);
+
+ // Makes sure stage_ and surface_ are always pointing to a same object.
+ // We need an ID3D11Texture2D instance for
+ // ID3D11DeviceContext::CopySubresourceRegion, but an IDXGISurface for
+ // IDXGISurface::Map.
+ void AssertStageAndSurfaceAreSameObject();
+
+ const DesktopRect desktop_rect_;
+ const D3dDevice device_;
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> stage_;
+ Microsoft::WRL::ComPtr<IDXGISurface> surface_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_DXGI_TEXTURE_STAGING_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.cc b/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.cc
new file mode 100644
index 0000000000..c6143ef785
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/full_screen_win_application_handler.h"
+
+#include <algorithm>
+#if defined(WEBRTC_MOZILLA_BUILD)
+#include <cwctype>
+#endif
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/ascii.h"
+#include "absl/strings/match.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/logging.h" // For RTC_LOG_GLE
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+namespace {
+
+// Utility function to verify that `window` has class name equal to `class_name`
+bool CheckWindowClassName(HWND window, const wchar_t* class_name) {
+ const size_t classNameLength = wcslen(class_name);
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/winuser/ns-winuser-wndclassa
+ // says lpszClassName field in WNDCLASS is limited by 256 symbols, so we don't
+ // need to have a buffer bigger than that.
+ constexpr size_t kMaxClassNameLength = 256;
+ WCHAR buffer[kMaxClassNameLength];
+
+ const int length = ::GetClassNameW(window, buffer, kMaxClassNameLength);
+ if (length <= 0)
+ return false;
+
+ if (static_cast<size_t>(length) != classNameLength)
+ return false;
+ return wcsncmp(buffer, class_name, classNameLength) == 0;
+}
+
+std::string WindowText(HWND window) {
+ size_t len = ::GetWindowTextLength(window);
+ if (len == 0)
+ return std::string();
+
+ std::vector<wchar_t> buffer(len + 1, 0);
+ size_t copied = ::GetWindowTextW(window, buffer.data(), buffer.size());
+ if (copied == 0)
+ return std::string();
+ return rtc::ToUtf8(buffer.data(), copied);
+}
+
+DWORD WindowProcessId(HWND window) {
+ DWORD dwProcessId = 0;
+ ::GetWindowThreadProcessId(window, &dwProcessId);
+ return dwProcessId;
+}
+
+std::wstring FileNameFromPath(const std::wstring& path) {
+ auto found = path.rfind(L"\\");
+ if (found == std::string::npos)
+ return path;
+ return path.substr(found + 1);
+}
+
+// Returns windows which belong to given process id
+// `sources` is a full list of available windows
+// `processId` is a process identifier (window owner)
+// `window_to_exclude` is a window to be exluded from result
+DesktopCapturer::SourceList GetProcessWindows(
+ const DesktopCapturer::SourceList& sources,
+ DWORD processId,
+ HWND window_to_exclude) {
+ DesktopCapturer::SourceList result;
+ std::copy_if(sources.begin(), sources.end(), std::back_inserter(result),
+ [&](DesktopCapturer::Source source) {
+ const HWND source_hwnd = reinterpret_cast<HWND>(source.id);
+ return window_to_exclude != source_hwnd &&
+ WindowProcessId(source_hwnd) == processId;
+ });
+ return result;
+}
+
+class FullScreenPowerPointHandler : public FullScreenApplicationHandler {
+ public:
+ explicit FullScreenPowerPointHandler(DesktopCapturer::SourceId sourceId)
+ : FullScreenApplicationHandler(sourceId) {}
+
+ ~FullScreenPowerPointHandler() override {}
+
+ DesktopCapturer::SourceId FindFullScreenWindow(
+ const DesktopCapturer::SourceList& window_list,
+ int64_t timestamp) const override {
+ if (window_list.empty())
+ return 0;
+
+ HWND original_window = reinterpret_cast<HWND>(GetSourceId());
+ DWORD process_id = WindowProcessId(original_window);
+
+ DesktopCapturer::SourceList powerpoint_windows =
+ GetProcessWindows(window_list, process_id, original_window);
+
+ if (powerpoint_windows.empty())
+ return 0;
+
+ if (GetWindowType(original_window) != WindowType::kEditor)
+ return 0;
+
+ const auto original_document = GetDocumentFromEditorTitle(original_window);
+
+ for (const auto& source : powerpoint_windows) {
+ HWND window = reinterpret_cast<HWND>(source.id);
+
+ // Looking for slide show window for the same document
+ if (GetWindowType(window) != WindowType::kSlideShow ||
+ GetDocumentFromSlideShowTitle(window) != original_document) {
+ continue;
+ }
+
+ return source.id;
+ }
+
+ return 0;
+ }
+
+ private:
+ enum class WindowType { kEditor, kSlideShow, kOther };
+
+ WindowType GetWindowType(HWND window) const {
+ if (IsEditorWindow(window))
+ return WindowType::kEditor;
+ else if (IsSlideShowWindow(window))
+ return WindowType::kSlideShow;
+ else
+ return WindowType::kOther;
+ }
+
+ constexpr static char kDocumentTitleSeparator[] = " - ";
+
+ std::string GetDocumentFromEditorTitle(HWND window) const {
+ std::string title = WindowText(window);
+ auto position = title.find(kDocumentTitleSeparator);
+ return std::string(absl::StripAsciiWhitespace(
+ absl::string_view(title).substr(0, position)));
+ }
+
+ std::string GetDocumentFromSlideShowTitle(HWND window) const {
+ std::string title = WindowText(window);
+ auto left_pos = title.find(kDocumentTitleSeparator);
+ auto right_pos = title.rfind(kDocumentTitleSeparator);
+ constexpr size_t kSeparatorLength = arraysize(kDocumentTitleSeparator) - 1;
+ if (left_pos == std::string::npos || right_pos == std::string::npos)
+ return title;
+
+ if (right_pos > left_pos + kSeparatorLength) {
+ auto result_len = right_pos - left_pos - kSeparatorLength;
+ auto document = absl::string_view(title).substr(
+ left_pos + kSeparatorLength, result_len);
+ return std::string(absl::StripAsciiWhitespace(document));
+ } else {
+ auto document = absl::string_view(title).substr(
+ left_pos + kSeparatorLength, std::wstring::npos);
+ return std::string(absl::StripAsciiWhitespace(document));
+ }
+ }
+
+ bool IsEditorWindow(HWND window) const {
+ return CheckWindowClassName(window, L"PPTFrameClass");
+ }
+
+ bool IsSlideShowWindow(HWND window) const {
+ const LONG style = ::GetWindowLong(window, GWL_STYLE);
+ const bool min_box = WS_MINIMIZEBOX & style;
+ const bool max_box = WS_MAXIMIZEBOX & style;
+ return !min_box && !max_box;
+ }
+};
+
+class OpenOfficeApplicationHandler : public FullScreenApplicationHandler {
+ public:
+ explicit OpenOfficeApplicationHandler(DesktopCapturer::SourceId sourceId)
+ : FullScreenApplicationHandler(sourceId) {}
+
+ DesktopCapturer::SourceId FindFullScreenWindow(
+ const DesktopCapturer::SourceList& window_list,
+ int64_t timestamp) const override {
+ if (window_list.empty())
+ return 0;
+
+ DWORD process_id = WindowProcessId(reinterpret_cast<HWND>(GetSourceId()));
+
+ DesktopCapturer::SourceList app_windows =
+ GetProcessWindows(window_list, process_id, nullptr);
+
+ DesktopCapturer::SourceList document_windows;
+ std::copy_if(
+ app_windows.begin(), app_windows.end(),
+ std::back_inserter(document_windows),
+ [this](const DesktopCapturer::Source& x) { return IsEditorWindow(x); });
+
+ // Check if we have only one document window, otherwise it's not possible
+ // to securely match a document window and a slide show window which has
+ // empty title.
+ if (document_windows.size() != 1) {
+ return 0;
+ }
+
+ // Check if document window has been selected as a source
+ if (document_windows.front().id != GetSourceId()) {
+ return 0;
+ }
+
+ // Check if we have a slide show window.
+ auto slide_show_window =
+ std::find_if(app_windows.begin(), app_windows.end(),
+ [this](const DesktopCapturer::Source& x) {
+ return IsSlideShowWindow(x);
+ });
+
+ if (slide_show_window == app_windows.end())
+ return 0;
+
+ return slide_show_window->id;
+ }
+
+ private:
+ bool IsEditorWindow(const DesktopCapturer::Source& source) const {
+ if (source.title.empty()) {
+ return false;
+ }
+
+ return CheckWindowClassName(reinterpret_cast<HWND>(source.id), L"SALFRAME");
+ }
+
+ bool IsSlideShowWindow(const DesktopCapturer::Source& source) const {
+ // Check title size to filter out a Presenter Control window which shares
+ // window class with Slide Show window but has non empty title.
+ if (!source.title.empty()) {
+ return false;
+ }
+
+ return CheckWindowClassName(reinterpret_cast<HWND>(source.id),
+ L"SALTMPSUBFRAME");
+ }
+};
+
+std::wstring GetPathByWindowId(HWND window_id) {
+ DWORD process_id = WindowProcessId(window_id);
+ HANDLE process =
+ ::OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, process_id);
+ if (process == NULL)
+ return L"";
+ DWORD path_len = MAX_PATH;
+ WCHAR path[MAX_PATH];
+ std::wstring result;
+ if (::QueryFullProcessImageNameW(process, 0, path, &path_len))
+ result = std::wstring(path, path_len);
+ else
+ RTC_LOG_GLE(LS_ERROR) << "QueryFullProcessImageName failed.";
+
+ ::CloseHandle(process);
+ return result;
+}
+
+} // namespace
+
+std::unique_ptr<FullScreenApplicationHandler>
+CreateFullScreenWinApplicationHandler(DesktopCapturer::SourceId source_id) {
+ std::unique_ptr<FullScreenApplicationHandler> result;
+ HWND hwnd = reinterpret_cast<HWND>(source_id);
+ std::wstring exe_path = GetPathByWindowId(hwnd);
+ std::wstring file_name = FileNameFromPath(exe_path);
+ std::transform(file_name.begin(), file_name.end(), file_name.begin(),
+ std::towupper);
+
+ if (file_name == L"POWERPNT.EXE") {
+ result = std::make_unique<FullScreenPowerPointHandler>(source_id);
+ } else if (file_name == L"SOFFICE.BIN" &&
+ absl::EndsWith(WindowText(hwnd), "OpenOffice Impress")) {
+ result = std::make_unique<OpenOfficeApplicationHandler>(source_id);
+ }
+
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.h b/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.h
new file mode 100644
index 0000000000..c97cbe252b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/full_screen_win_application_handler.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_FULL_SCREEN_WIN_APPLICATION_HANDLER_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_FULL_SCREEN_WIN_APPLICATION_HANDLER_H_
+
+#include <memory>
+#include "modules/desktop_capture/full_screen_application_handler.h"
+
+namespace webrtc {
+
+std::unique_ptr<FullScreenApplicationHandler>
+CreateFullScreenWinApplicationHandler(DesktopCapturer::SourceId sourceId);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_FULL_SCREEN_WIN_APPLICATION_HANDLER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/scoped_gdi_object.h b/third_party/libwebrtc/modules/desktop_capture/win/scoped_gdi_object.h
new file mode 100644
index 0000000000..2b01941e20
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/scoped_gdi_object.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SCOPED_GDI_HANDLE_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SCOPED_GDI_HANDLE_H_
+
+#include <windows.h>
+
+namespace webrtc {
+namespace win {
+
+// Scoper for GDI objects.
+template <class T, class Traits>
+class ScopedGDIObject {
+ public:
+ ScopedGDIObject() : handle_(NULL) {}
+ explicit ScopedGDIObject(T object) : handle_(object) {}
+
+ ~ScopedGDIObject() { Traits::Close(handle_); }
+
+ ScopedGDIObject(const ScopedGDIObject&) = delete;
+ ScopedGDIObject& operator=(const ScopedGDIObject&) = delete;
+
+ T Get() { return handle_; }
+
+ void Set(T object) {
+ if (handle_ && object != handle_)
+ Traits::Close(handle_);
+ handle_ = object;
+ }
+
+ ScopedGDIObject& operator=(T object) {
+ Set(object);
+ return *this;
+ }
+
+ T release() {
+ T object = handle_;
+ handle_ = NULL;
+ return object;
+ }
+
+ operator T() { return handle_; }
+
+ private:
+ T handle_;
+};
+
+// The traits class that uses DeleteObject() to close a handle.
+template <typename T>
+class DeleteObjectTraits {
+ public:
+ DeleteObjectTraits() = delete;
+ DeleteObjectTraits(const DeleteObjectTraits&) = delete;
+ DeleteObjectTraits& operator=(const DeleteObjectTraits&) = delete;
+
+ // Closes the handle.
+ static void Close(T handle) {
+ if (handle)
+ DeleteObject(handle);
+ }
+};
+
+// The traits class that uses DestroyCursor() to close a handle.
+class DestroyCursorTraits {
+ public:
+ DestroyCursorTraits() = delete;
+ DestroyCursorTraits(const DestroyCursorTraits&) = delete;
+ DestroyCursorTraits& operator=(const DestroyCursorTraits&) = delete;
+
+ // Closes the handle.
+ static void Close(HCURSOR handle) {
+ if (handle)
+ DestroyCursor(handle);
+ }
+};
+
+typedef ScopedGDIObject<HBITMAP, DeleteObjectTraits<HBITMAP> > ScopedBitmap;
+typedef ScopedGDIObject<HCURSOR, DestroyCursorTraits> ScopedCursor;
+
+} // namespace win
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SCOPED_GDI_HANDLE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.cc b/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.cc
new file mode 100644
index 0000000000..22e8e7bc8f
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/scoped_thread_desktop.h"
+
+#include "modules/desktop_capture/win/desktop.h"
+
+namespace webrtc {
+
+ScopedThreadDesktop::ScopedThreadDesktop()
+ : initial_(Desktop::GetThreadDesktop()) {}
+
+ScopedThreadDesktop::~ScopedThreadDesktop() {
+ Revert();
+}
+
+bool ScopedThreadDesktop::IsSame(const Desktop& desktop) {
+ if (assigned_.get() != NULL) {
+ return assigned_->IsSame(desktop);
+ } else {
+ return initial_->IsSame(desktop);
+ }
+}
+
+void ScopedThreadDesktop::Revert() {
+ if (assigned_.get() != NULL) {
+ initial_->SetThreadDesktop();
+ assigned_.reset();
+ }
+}
+
+bool ScopedThreadDesktop::SetThreadDesktop(Desktop* desktop) {
+ Revert();
+
+ std::unique_ptr<Desktop> scoped_desktop(desktop);
+
+ if (initial_->IsSame(*desktop))
+ return true;
+
+ if (!desktop->SetThreadDesktop())
+ return false;
+
+ assigned_.reset(scoped_desktop.release());
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.h b/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.h
new file mode 100644
index 0000000000..98f151a46c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/scoped_thread_desktop.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SCOPED_THREAD_DESKTOP_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SCOPED_THREAD_DESKTOP_H_
+
+#include <windows.h>
+
+#include <memory>
+
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class Desktop;
+
+class RTC_EXPORT ScopedThreadDesktop {
+ public:
+ ScopedThreadDesktop();
+ ~ScopedThreadDesktop();
+
+ ScopedThreadDesktop(const ScopedThreadDesktop&) = delete;
+ ScopedThreadDesktop& operator=(const ScopedThreadDesktop&) = delete;
+
+ // Returns true if `desktop` has the same desktop name as the currently
+ // assigned desktop (if assigned) or as the initial desktop (if not assigned).
+ // Returns false in any other case including failing Win32 APIs and
+ // uninitialized desktop handles.
+ bool IsSame(const Desktop& desktop);
+
+ // Reverts the calling thread to use the initial desktop.
+ void Revert();
+
+ // Assigns `desktop` to be the calling thread. Returns true if the thread has
+ // been switched to `desktop` successfully. Takes ownership of `desktop`.
+ bool SetThreadDesktop(Desktop* desktop);
+
+ private:
+ // The desktop handle assigned to the calling thread by Set
+ std::unique_ptr<Desktop> assigned_;
+
+ // The desktop handle assigned to the calling thread at creation.
+ std::unique_ptr<Desktop> initial_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SCOPED_THREAD_DESKTOP_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.cc b/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.cc
new file mode 100644
index 0000000000..1dc2918d08
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+
+#include <windows.h>
+
+#include <string>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/win32.h"
+
+namespace webrtc {
+
+bool HasActiveDisplay() {
+ DesktopCapturer::SourceList screens;
+
+ return GetScreenList(&screens) && !screens.empty();
+}
+
+bool GetScreenList(DesktopCapturer::SourceList* screens,
+ std::vector<std::string>* device_names /* = nullptr */) {
+ RTC_DCHECK(screens->empty());
+ RTC_DCHECK(!device_names || device_names->empty());
+
+ BOOL enum_result = TRUE;
+ for (int device_index = 0;; ++device_index) {
+ DISPLAY_DEVICEW device;
+ device.cb = sizeof(device);
+ enum_result = EnumDisplayDevicesW(NULL, device_index, &device, 0);
+
+ // `enum_result` is 0 if we have enumerated all devices.
+ if (!enum_result) {
+ break;
+ }
+
+ // We only care about active displays.
+ if (!(device.StateFlags & DISPLAY_DEVICE_ACTIVE)) {
+ continue;
+ }
+
+ screens->push_back({device_index, 0, std::string()});
+ if (device_names) {
+ device_names->push_back(rtc::ToUtf8(device.DeviceName));
+ }
+ }
+ return true;
+}
+
+bool GetHmonitorFromDeviceIndex(const DesktopCapturer::SourceId device_index,
+ HMONITOR* hmonitor) {
+ // A device index of `kFullDesktopScreenId` or -1 represents all screens, an
+ // HMONITOR of 0 indicates the same.
+ if (device_index == kFullDesktopScreenId) {
+ *hmonitor = 0;
+ return true;
+ }
+
+ std::wstring device_key;
+ if (!IsScreenValid(device_index, &device_key)) {
+ return false;
+ }
+
+ DesktopRect screen_rect = GetScreenRect(device_index, device_key);
+ if (screen_rect.is_empty()) {
+ return false;
+ }
+
+ RECT rect = {screen_rect.left(), screen_rect.top(), screen_rect.right(),
+ screen_rect.bottom()};
+
+ HMONITOR monitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONULL);
+ if (monitor == NULL) {
+ RTC_LOG(LS_WARNING) << "No HMONITOR found for supplied device index.";
+ return false;
+ }
+
+ *hmonitor = monitor;
+ return true;
+}
+
+bool IsMonitorValid(const HMONITOR monitor) {
+ // An HMONITOR of 0 refers to a virtual monitor that spans all physical
+ // monitors.
+ if (monitor == 0) {
+ // There is a bug in a Windows OS API that causes a crash when capturing if
+ // there are no active displays. We must ensure there is an active display
+ // before returning true.
+ if (!HasActiveDisplay())
+ return false;
+
+ return true;
+ }
+
+ MONITORINFO monitor_info;
+ monitor_info.cbSize = sizeof(MONITORINFO);
+ return GetMonitorInfoA(monitor, &monitor_info);
+}
+
+DesktopRect GetMonitorRect(const HMONITOR monitor) {
+ MONITORINFO monitor_info;
+ monitor_info.cbSize = sizeof(MONITORINFO);
+ if (!GetMonitorInfoA(monitor, &monitor_info)) {
+ return DesktopRect();
+ }
+
+ return DesktopRect::MakeLTRB(
+ monitor_info.rcMonitor.left, monitor_info.rcMonitor.top,
+ monitor_info.rcMonitor.right, monitor_info.rcMonitor.bottom);
+}
+
+bool IsScreenValid(const DesktopCapturer::SourceId screen,
+ std::wstring* device_key) {
+ if (screen == kFullDesktopScreenId) {
+ *device_key = L"";
+ return true;
+ }
+
+ DISPLAY_DEVICEW device;
+ device.cb = sizeof(device);
+ BOOL enum_result = EnumDisplayDevicesW(NULL, screen, &device, 0);
+ if (enum_result) {
+ *device_key = device.DeviceKey;
+ }
+
+ return !!enum_result;
+}
+
+DesktopRect GetFullscreenRect() {
+ return DesktopRect::MakeXYWH(GetSystemMetrics(SM_XVIRTUALSCREEN),
+ GetSystemMetrics(SM_YVIRTUALSCREEN),
+ GetSystemMetrics(SM_CXVIRTUALSCREEN),
+ GetSystemMetrics(SM_CYVIRTUALSCREEN));
+}
+
+DesktopRect GetScreenRect(const DesktopCapturer::SourceId screen,
+ const std::wstring& device_key) {
+ RTC_DCHECK(IsGUIThread(false));
+ if (screen == kFullDesktopScreenId) {
+ return GetFullscreenRect();
+ }
+
+ DISPLAY_DEVICEW device;
+ device.cb = sizeof(device);
+ BOOL result = EnumDisplayDevicesW(NULL, screen, &device, 0);
+ if (!result) {
+ return DesktopRect();
+ }
+
+ // Verifies the device index still maps to the same display device, to make
+ // sure we are capturing the same device when devices are added or removed.
+ // DeviceKey is documented as reserved, but it actually contains the registry
+ // key for the device and is unique for each monitor, while DeviceID is not.
+ if (device_key != device.DeviceKey) {
+ return DesktopRect();
+ }
+
+ DEVMODEW device_mode;
+ device_mode.dmSize = sizeof(device_mode);
+ device_mode.dmDriverExtra = 0;
+ result = EnumDisplaySettingsExW(device.DeviceName, ENUM_CURRENT_SETTINGS,
+ &device_mode, 0);
+ if (!result) {
+ return DesktopRect();
+ }
+
+ return DesktopRect::MakeXYWH(
+ device_mode.dmPosition.x, device_mode.dmPosition.y,
+ device_mode.dmPelsWidth, device_mode.dmPelsHeight);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.h b/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.h
new file mode 100644
index 0000000000..97bfe816d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURE_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURE_UTILS_H_
+
+#if defined(WEBRTC_WIN)
+// Forward declare HMONITOR in a windows.h compatible way so that we can avoid
+// including windows.h.
+#define WEBRTC_DECLARE_HANDLE(name) \
+struct name##__; \
+typedef struct name##__* name
+WEBRTC_DECLARE_HANDLE(HMONITOR);
+#undef WEBRTC_DECLARE_HANDLE
+#endif
+
+#include <string>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Returns true if the system has at least one active display.
+bool HasActiveDisplay();
+
+// Output the list of active screens into `screens`. Returns true if succeeded,
+// or false if it fails to enumerate the display devices. If the `device_names`
+// is provided, it will be filled with the DISPLAY_DEVICE.DeviceName in UTF-8
+// encoding. If this function returns true, consumers can always assume that
+// `screens`[i] and `device_names`[i] indicate the same monitor on the system.
+bool GetScreenList(DesktopCapturer::SourceList* screens,
+ std::vector<std::string>* device_names = nullptr);
+
+// Converts a device index (which are returned by `GetScreenList`) into an
+// HMONITOR.
+bool GetHmonitorFromDeviceIndex(DesktopCapturer::SourceId device_index,
+ HMONITOR* hmonitor);
+
+// Returns true if `monitor` represents a valid display
+// monitor. Consumers should recheck the validity of HMONITORs before use if a
+// WM_DISPLAYCHANGE message has been received.
+bool IsMonitorValid(HMONITOR monitor);
+
+// Returns the rect of the monitor identified by `monitor`, relative to the
+// primary display's top-left. On failure, returns an empty rect.
+DesktopRect GetMonitorRect(HMONITOR monitor);
+
+// Returns true if `screen` is a valid screen. The screen device key is
+// returned through `device_key` if the screen is valid. The device key can be
+// used in GetScreenRect to verify the screen matches the previously obtained
+// id.
+bool IsScreenValid(DesktopCapturer::SourceId screen, std::wstring* device_key);
+
+// Get the rect of the entire system in system coordinate system. I.e. the
+// primary monitor always starts from (0, 0).
+DesktopRect GetFullscreenRect();
+
+// Get the rect of the screen identified by `screen`, relative to the primary
+// display's top-left. If the screen device key does not match `device_key`, or
+// the screen does not exist, or any error happens, an empty rect is returned.
+RTC_EXPORT DesktopRect GetScreenRect(DesktopCapturer::SourceId screen,
+ const std::wstring& device_key);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURE_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils_unittest.cc
new file mode 100644
index 0000000000..2e58c6b164
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capture_utils_unittest.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+
+#include <string>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "rtc_base/logging.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ScreenCaptureUtilsTest, GetScreenList) {
+ DesktopCapturer::SourceList screens;
+ std::vector<std::string> device_names;
+
+ ASSERT_TRUE(GetScreenList(&screens));
+ screens.clear();
+ ASSERT_TRUE(GetScreenList(&screens, &device_names));
+
+ ASSERT_EQ(screens.size(), device_names.size());
+}
+
+TEST(ScreenCaptureUtilsTest, DeviceIndexToHmonitor) {
+ DesktopCapturer::SourceList screens;
+ ASSERT_TRUE(GetScreenList(&screens));
+ if (screens.empty()) {
+ RTC_LOG(LS_INFO)
+ << "Skip ScreenCaptureUtilsTest on systems with no monitors.";
+ GTEST_SKIP();
+ }
+
+ HMONITOR hmonitor;
+ ASSERT_TRUE(GetHmonitorFromDeviceIndex(screens[0].id, &hmonitor));
+ ASSERT_TRUE(IsMonitorValid(hmonitor));
+}
+
+TEST(ScreenCaptureUtilsTest, FullScreenDeviceIndexToHmonitor) {
+ if (!HasActiveDisplay()) {
+ RTC_LOG(LS_INFO)
+ << "Skip ScreenCaptureUtilsTest on systems with no monitors.";
+ GTEST_SKIP();
+ }
+
+ HMONITOR hmonitor;
+ ASSERT_TRUE(GetHmonitorFromDeviceIndex(kFullDesktopScreenId, &hmonitor));
+ ASSERT_EQ(hmonitor, static_cast<HMONITOR>(0));
+ ASSERT_TRUE(IsMonitorValid(hmonitor));
+}
+
+TEST(ScreenCaptureUtilsTest, NoMonitors) {
+ if (HasActiveDisplay()) {
+ RTC_LOG(LS_INFO) << "Skip ScreenCaptureUtilsTest designed specifically for "
+ "systems with no monitors";
+ GTEST_SKIP();
+ }
+
+ HMONITOR hmonitor;
+ ASSERT_TRUE(GetHmonitorFromDeviceIndex(kFullDesktopScreenId, &hmonitor));
+ ASSERT_EQ(hmonitor, static_cast<HMONITOR>(0));
+
+ // The monitor should be invalid since the system has no attached displays.
+ ASSERT_FALSE(IsMonitorValid(hmonitor));
+}
+
+TEST(ScreenCaptureUtilsTest, InvalidDeviceIndexToHmonitor) {
+ HMONITOR hmonitor;
+ ASSERT_FALSE(GetHmonitorFromDeviceIndex(kInvalidScreenId, &hmonitor));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc
new file mode 100644
index 0000000000..efa763993a
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/screen_capturer_win_directx.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capture_metrics_helper.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+using Microsoft::WRL::ComPtr;
+
+// static
+bool ScreenCapturerWinDirectx::IsSupported() {
+ // Forwards IsSupported() function call to DxgiDuplicatorController.
+ return DxgiDuplicatorController::Instance()->IsSupported();
+}
+
+// static
+bool ScreenCapturerWinDirectx::RetrieveD3dInfo(D3dInfo* info) {
+ // Forwards SupportedFeatureLevels() function call to
+ // DxgiDuplicatorController.
+ return DxgiDuplicatorController::Instance()->RetrieveD3dInfo(info);
+}
+
+// static
+bool ScreenCapturerWinDirectx::IsCurrentSessionSupported() {
+ return DxgiDuplicatorController::IsCurrentSessionSupported();
+}
+
+// static
+bool ScreenCapturerWinDirectx::GetScreenListFromDeviceNames(
+ const std::vector<std::string>& device_names,
+ DesktopCapturer::SourceList* screens) {
+ RTC_DCHECK(screens->empty());
+
+ DesktopCapturer::SourceList gdi_screens;
+ std::vector<std::string> gdi_names;
+ if (!GetScreenList(&gdi_screens, &gdi_names)) {
+ return false;
+ }
+
+ RTC_DCHECK_EQ(gdi_screens.size(), gdi_names.size());
+
+ ScreenId max_screen_id = -1;
+ for (const DesktopCapturer::Source& screen : gdi_screens) {
+ max_screen_id = std::max(max_screen_id, screen.id);
+ }
+
+ for (const auto& device_name : device_names) {
+ const auto it = std::find(gdi_names.begin(), gdi_names.end(), device_name);
+ if (it == gdi_names.end()) {
+ // devices_names[i] has not been found in gdi_names, so use max_screen_id.
+ max_screen_id++;
+ screens->push_back({max_screen_id});
+ } else {
+ screens->push_back({gdi_screens[it - gdi_names.begin()]});
+ }
+ }
+
+ return true;
+}
+
+// static
+int ScreenCapturerWinDirectx::GetIndexFromScreenId(
+ ScreenId id,
+ const std::vector<std::string>& device_names) {
+ DesktopCapturer::SourceList screens;
+ if (!GetScreenListFromDeviceNames(device_names, &screens)) {
+ return -1;
+ }
+
+ RTC_DCHECK_EQ(device_names.size(), screens.size());
+
+ for (size_t i = 0; i < screens.size(); i++) {
+ if (screens[i].id == id) {
+ return static_cast<int>(i);
+ }
+ }
+
+ return -1;
+}
+
+ScreenCapturerWinDirectx::ScreenCapturerWinDirectx()
+ : controller_(DxgiDuplicatorController::Instance()) {}
+
+ScreenCapturerWinDirectx::~ScreenCapturerWinDirectx() = default;
+
+void ScreenCapturerWinDirectx::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ RecordCapturerImpl(DesktopCapturerId::kScreenCapturerWinDirectx);
+
+ callback_ = callback;
+}
+
+void ScreenCapturerWinDirectx::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ shared_memory_factory_ = std::move(shared_memory_factory);
+}
+
+void ScreenCapturerWinDirectx::CaptureFrame() {
+ RTC_DCHECK(callback_);
+ TRACE_EVENT0("webrtc", "ScreenCapturerWinDirectx::CaptureFrame");
+
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ // Note that the [] operator will create the ScreenCaptureFrameQueue if it
+ // doesn't exist, so this is safe.
+ ScreenCaptureFrameQueue<DxgiFrame>& frames =
+ frame_queue_map_[current_screen_id_];
+
+ frames.MoveToNextFrame();
+
+ if (!frames.current_frame()) {
+ frames.ReplaceCurrentFrame(
+ std::make_unique<DxgiFrame>(shared_memory_factory_.get()));
+ }
+
+ DxgiDuplicatorController::Result result;
+ if (current_screen_id_ == kFullDesktopScreenId) {
+ result = controller_->Duplicate(frames.current_frame());
+ } else {
+ result = controller_->DuplicateMonitor(frames.current_frame(),
+ current_screen_id_);
+ }
+
+ using DuplicateResult = DxgiDuplicatorController::Result;
+ if (result != DuplicateResult::SUCCEEDED) {
+ RTC_LOG(LS_ERROR) << "DxgiDuplicatorController failed to capture desktop, "
+ "error code "
+ << DxgiDuplicatorController::ResultName(result);
+ }
+ switch (result) {
+ case DuplicateResult::UNSUPPORTED_SESSION: {
+ RTC_LOG(LS_ERROR)
+ << "Current binary is running on a session not supported "
+ "by DirectX screen capturer.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ break;
+ }
+ case DuplicateResult::FRAME_PREPARE_FAILED: {
+ RTC_LOG(LS_ERROR) << "Failed to allocate a new DesktopFrame.";
+ // This usually means we do not have enough memory or SharedMemoryFactory
+ // cannot work correctly.
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ break;
+ }
+ case DuplicateResult::INVALID_MONITOR_ID: {
+ RTC_LOG(LS_ERROR) << "Invalid monitor id " << current_screen_id_;
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ break;
+ }
+ case DuplicateResult::INITIALIZATION_FAILED:
+ case DuplicateResult::DUPLICATION_FAILED: {
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ break;
+ }
+ case DuplicateResult::SUCCEEDED: {
+ std::unique_ptr<DesktopFrame> frame =
+ frames.current_frame()->frame()->Share();
+
+ int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec;
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.DesktopCapture.Win.DirectXCapturerFrameTime",
+ capture_time_ms);
+ frame->set_capture_time_ms(capture_time_ms);
+ frame->set_capturer_id(DesktopCapturerId::kScreenCapturerWinDirectx);
+
+ // TODO(julien.isorce): http://crbug.com/945468. Set the icc profile on
+ // the frame, see WindowCapturerMac::CaptureFrame.
+
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+ break;
+ }
+ }
+}
+
+bool ScreenCapturerWinDirectx::GetSourceList(SourceList* sources) {
+ std::vector<std::string> device_names;
+ if (!controller_->GetDeviceNames(&device_names)) {
+ return false;
+ }
+
+ return GetScreenListFromDeviceNames(device_names, sources);
+}
+
+bool ScreenCapturerWinDirectx::SelectSource(SourceId id) {
+ if (id == kFullDesktopScreenId) {
+ current_screen_id_ = id;
+ return true;
+ }
+
+ std::vector<std::string> device_names;
+ if (!controller_->GetDeviceNames(&device_names)) {
+ return false;
+ }
+
+ int index;
+ index = GetIndexFromScreenId(id, device_names);
+ if (index == -1) {
+ return false;
+ }
+
+ current_screen_id_ = index;
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.h b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.h
new file mode 100644
index 0000000000..801a0632fc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_DIRECTX_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_DIRECTX_H_
+
+#include <d3dcommon.h>
+
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/win/dxgi_duplicator_controller.h"
+#include "modules/desktop_capture/win/dxgi_frame.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// ScreenCapturerWinDirectx captures 32bit RGBA using DirectX.
+class RTC_EXPORT ScreenCapturerWinDirectx : public DesktopCapturer {
+ public:
+ using D3dInfo = DxgiDuplicatorController::D3dInfo;
+
+ // Whether the system supports DirectX based capturing.
+ static bool IsSupported();
+
+ // Returns a most recent D3dInfo composed by
+ // DxgiDuplicatorController::Initialize() function. This function implicitly
+ // calls DxgiDuplicatorController::Initialize() if it has not been
+ // initialized. This function returns false and output parameter is kept
+ // unchanged if DxgiDuplicatorController::Initialize() failed.
+ // The D3dInfo may change based on hardware configuration even without
+ // restarting the hardware and software. Refer to https://goo.gl/OOCppq. So
+ // consumers should not cache the result returned by this function.
+ static bool RetrieveD3dInfo(D3dInfo* info);
+
+ // Whether current process is running in a Windows session which is supported
+ // by ScreenCapturerWinDirectx.
+ // Usually using ScreenCapturerWinDirectx in unsupported sessions will fail.
+ // But this behavior may vary on different Windows version. So consumers can
+ // always try IsSupported() function.
+ static bool IsCurrentSessionSupported();
+
+ // Maps `device_names` with the result from GetScreenList() and creates a new
+ // SourceList to include only the ones in `device_names`. If this function
+ // returns true, consumers can always assume `device_names`.size() equals to
+ // `screens`->size(), meanwhile `device_names`[i] and `screens`[i] indicate
+ // the same monitor on the system.
+ // Public for test only.
+ static bool GetScreenListFromDeviceNames(
+ const std::vector<std::string>& device_names,
+ DesktopCapturer::SourceList* screens);
+
+ // Maps `id` with the result from GetScreenListFromDeviceNames() and returns
+ // the index of the entity in `device_names`. This function returns -1 if `id`
+ // cannot be found.
+ // Public for test only.
+ static int GetIndexFromScreenId(ScreenId id,
+ const std::vector<std::string>& device_names);
+
+ explicit ScreenCapturerWinDirectx();
+
+ ~ScreenCapturerWinDirectx() override;
+
+ ScreenCapturerWinDirectx(const ScreenCapturerWinDirectx&) = delete;
+ ScreenCapturerWinDirectx& operator=(const ScreenCapturerWinDirectx&) = delete;
+
+ // DesktopCapturer implementation.
+ void Start(Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ const rtc::scoped_refptr<DxgiDuplicatorController> controller_;
+
+ // The underlying DxgiDuplicators may retain a reference to the frames that
+ // we ask them to duplicate so that they can continue returning valid frames
+ // in the event that the target has not been updated. Thus, we need to ensure
+ // that we have a separate frame queue for each source id, so that these held
+ // frames don't get overwritten with the data from another Duplicator/monitor.
+ std::unordered_map<SourceId, ScreenCaptureFrameQueue<DxgiFrame>>
+ frame_queue_map_;
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory_;
+ Callback* callback_ = nullptr;
+ SourceId current_screen_id_ = kFullDesktopScreenId;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_DIRECTX_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx_unittest.cc
new file mode 100644
index 0000000000..c9f46f782c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_directx_unittest.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/screen_capturer_win_directx.h"
+
+#include <string>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// This test cannot ensure GetScreenListFromDeviceNames() won't reorder the
+// devices in its output, since the device name is missing.
+TEST(ScreenCaptureUtilsTest, GetScreenListFromDeviceNamesAndGetIndex) {
+ const std::vector<std::string> device_names = {
+ "\\\\.\\DISPLAY0",
+ "\\\\.\\DISPLAY1",
+ "\\\\.\\DISPLAY2",
+ };
+ DesktopCapturer::SourceList screens;
+ ASSERT_TRUE(ScreenCapturerWinDirectx::GetScreenListFromDeviceNames(
+ device_names, &screens));
+ ASSERT_EQ(device_names.size(), screens.size());
+
+ for (size_t i = 0; i < screens.size(); i++) {
+ ASSERT_EQ(ScreenCapturerWinDirectx::GetIndexFromScreenId(screens[i].id,
+ device_names),
+ static_cast<int>(i));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
new file mode 100644
index 0000000000..4ab864b4b9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/screen_capturer_win_gdi.h"
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_metrics_helper.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_frame_win.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/win/cursor.h"
+#include "modules/desktop_capture/win/desktop.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+// Constants from dwmapi.h.
+const UINT DWM_EC_DISABLECOMPOSITION = 0;
+const UINT DWM_EC_ENABLECOMPOSITION = 1;
+
+const wchar_t kDwmapiLibraryName[] = L"dwmapi.dll";
+
+} // namespace
+
+ScreenCapturerWinGdi::ScreenCapturerWinGdi(
+ const DesktopCaptureOptions& options) {
+ if (options.disable_effects()) {
+ // Load dwmapi.dll dynamically since it is not available on XP.
+ if (!dwmapi_library_)
+ dwmapi_library_ = LoadLibraryW(kDwmapiLibraryName);
+
+ if (dwmapi_library_) {
+ composition_func_ = reinterpret_cast<DwmEnableCompositionFunc>(
+ GetProcAddress(dwmapi_library_, "DwmEnableComposition"));
+ composition_enabled_func_ = reinterpret_cast<DwmIsCompositionEnabledFunc>
+ (GetProcAddress(dwmapi_library_, "DwmIsCompositionEnabled"));
+ }
+ }
+}
+
+ScreenCapturerWinGdi::~ScreenCapturerWinGdi() {
+ if (desktop_dc_)
+ ReleaseDC(NULL, desktop_dc_);
+ if (memory_dc_)
+ DeleteDC(memory_dc_);
+
+ // Restore Aero.
+ if (composition_func_)
+ (*composition_func_)(DWM_EC_ENABLECOMPOSITION);
+
+ if (dwmapi_library_)
+ FreeLibrary(dwmapi_library_);
+}
+
+void ScreenCapturerWinGdi::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ shared_memory_factory_ = std::move(shared_memory_factory);
+}
+
+void ScreenCapturerWinGdi::CaptureFrame() {
+ TRACE_EVENT0("webrtc", "ScreenCapturerWinGdi::CaptureFrame");
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ queue_.MoveToNextFrame();
+ if (queue_.current_frame() && queue_.current_frame()->IsShared()) {
+ RTC_DLOG(LS_WARNING) << "Overwriting frame that is still shared.";
+ }
+
+ // Make sure the GDI capture resources are up-to-date.
+ PrepareCaptureResources();
+
+ if (!CaptureImage()) {
+ RTC_LOG(LS_WARNING) << "Failed to capture screen by GDI.";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ // Emit the current frame.
+ std::unique_ptr<DesktopFrame> frame = queue_.current_frame()->Share();
+ frame->set_dpi(DesktopVector(GetDeviceCaps(desktop_dc_, LOGPIXELSX),
+ GetDeviceCaps(desktop_dc_, LOGPIXELSY)));
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+
+ int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec;
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.DesktopCapture.Win.ScreenGdiCapturerFrameTime", capture_time_ms);
+ frame->set_capture_time_ms(capture_time_ms);
+ frame->set_capturer_id(DesktopCapturerId::kScreenCapturerWinGdi);
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+}
+
+bool ScreenCapturerWinGdi::GetSourceList(SourceList* sources) {
+ return webrtc::GetScreenList(sources);
+}
+
+bool ScreenCapturerWinGdi::SelectSource(SourceId id) {
+ bool valid = IsScreenValid(id, &current_device_key_);
+ if (valid)
+ current_screen_id_ = id;
+ return valid;
+}
+
+void ScreenCapturerWinGdi::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ RecordCapturerImpl(DesktopCapturerId::kScreenCapturerWinGdi);
+
+ callback_ = callback;
+
+ // Vote to disable Aero composited desktop effects while capturing. Windows
+ // will restore Aero automatically if the process exits. This has no effect
+ // under Windows 8 or higher. See crbug.com/124018.
+ if (composition_func_)
+ (*composition_func_)(DWM_EC_DISABLECOMPOSITION);
+}
+
+void ScreenCapturerWinGdi::PrepareCaptureResources() {
+ // Switch to the desktop receiving user input if different from the current
+ // one.
+ std::unique_ptr<Desktop> input_desktop(Desktop::GetInputDesktop());
+ if (input_desktop && !desktop_.IsSame(*input_desktop)) {
+ // Release GDI resources otherwise SetThreadDesktop will fail.
+ if (desktop_dc_) {
+ ReleaseDC(NULL, desktop_dc_);
+ desktop_dc_ = nullptr;
+ }
+
+ if (memory_dc_) {
+ DeleteDC(memory_dc_);
+ memory_dc_ = nullptr;
+ }
+
+ // If SetThreadDesktop() fails, the thread is still assigned a desktop.
+ // So we can continue capture screen bits, just from the wrong desktop.
+ desktop_.SetThreadDesktop(input_desktop.release());
+
+ // Re-assert our vote to disable Aero.
+ // See crbug.com/124018 and crbug.com/129906.
+ if (composition_func_) {
+ (*composition_func_)(DWM_EC_DISABLECOMPOSITION);
+ }
+ }
+
+ // If the display configurations have changed then recreate GDI resources.
+ if (display_configuration_monitor_.IsChanged()) {
+ if (desktop_dc_) {
+ ReleaseDC(NULL, desktop_dc_);
+ desktop_dc_ = nullptr;
+ }
+ if (memory_dc_) {
+ DeleteDC(memory_dc_);
+ memory_dc_ = nullptr;
+ }
+ }
+
+ if (!desktop_dc_) {
+ RTC_DCHECK(!memory_dc_);
+
+ // Create GDI device contexts to capture from the desktop into memory.
+ desktop_dc_ = GetDC(nullptr);
+ RTC_CHECK(desktop_dc_);
+ memory_dc_ = CreateCompatibleDC(desktop_dc_);
+ RTC_CHECK(memory_dc_);
+
+ // Make sure the frame buffers will be reallocated.
+ queue_.Reset();
+ }
+}
+
+bool ScreenCapturerWinGdi::CaptureImage() {
+ RTC_DCHECK(IsGUIThread(false));
+ DesktopRect screen_rect =
+ GetScreenRect(current_screen_id_, current_device_key_);
+ if (screen_rect.is_empty()) {
+ RTC_LOG(LS_WARNING) << "Failed to get screen rect.";
+ return false;
+ }
+
+ DesktopSize size = screen_rect.size();
+ // If the current buffer is from an older generation then allocate a new one.
+ // Note that we can't reallocate other buffers at this point, since the caller
+ // may still be reading from them.
+ if (!queue_.current_frame() ||
+ !queue_.current_frame()->size().equals(screen_rect.size())) {
+ RTC_DCHECK(desktop_dc_);
+ RTC_DCHECK(memory_dc_);
+
+ std::unique_ptr<DesktopFrame> buffer = DesktopFrameWin::Create(
+ size, shared_memory_factory_.get(), desktop_dc_);
+ if (!buffer) {
+ RTC_LOG(LS_WARNING) << "Failed to create frame buffer.";
+ return false;
+ }
+ queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(std::move(buffer)));
+ }
+ queue_.current_frame()->set_top_left(
+ screen_rect.top_left().subtract(GetFullscreenRect().top_left()));
+
+ // Select the target bitmap into the memory dc and copy the rect from desktop
+ // to memory.
+ DesktopFrameWin* current = static_cast<DesktopFrameWin*>(
+ queue_.current_frame()->GetUnderlyingFrame());
+ HGDIOBJ previous_object = SelectObject(memory_dc_, current->bitmap());
+ if (!previous_object || previous_object == HGDI_ERROR) {
+ RTC_LOG(LS_WARNING) << "Failed to select current bitmap into memery dc.";
+ return false;
+ }
+
+ bool result = (BitBlt(memory_dc_, 0, 0, screen_rect.width(),
+ screen_rect.height(), desktop_dc_, screen_rect.left(),
+ screen_rect.top(), SRCCOPY | CAPTUREBLT) != FALSE);
+ if (!result) {
+ RTC_LOG_GLE(LS_WARNING) << "BitBlt failed";
+ }
+
+ // Select back the previously selected object to that the device contect
+ // could be destroyed independently of the bitmap if needed.
+ SelectObject(memory_dc_, previous_object);
+
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
new file mode 100644
index 0000000000..87c1ecfc8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_GDI_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_GDI_H_
+
+#include <windows.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/win/display_configuration_monitor.h"
+#include "modules/desktop_capture/win/scoped_thread_desktop.h"
+
+namespace webrtc {
+
+// ScreenCapturerWinGdi captures 32bit RGB using GDI.
+//
+// ScreenCapturerWinGdi is double-buffered as required by ScreenCapturer.
+// This class does not detect DesktopFrame::updated_region(), the field is
+// always set to the entire frame rectangle. ScreenCapturerDifferWrapper should
+// be used if that functionality is necessary.
+class ScreenCapturerWinGdi : public DesktopCapturer {
+ public:
+ explicit ScreenCapturerWinGdi(const DesktopCaptureOptions& options);
+ ~ScreenCapturerWinGdi() override;
+
+ ScreenCapturerWinGdi(const ScreenCapturerWinGdi&) = delete;
+ ScreenCapturerWinGdi& operator=(const ScreenCapturerWinGdi&) = delete;
+
+ // Overridden from ScreenCapturer:
+ void Start(Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ typedef HRESULT(WINAPI* DwmEnableCompositionFunc)(UINT);
+ typedef HRESULT(WINAPI* DwmIsCompositionEnabledFunc)(BOOL*);
+
+ // Make sure that the device contexts match the screen configuration.
+ void PrepareCaptureResources();
+
+ // Captures the current screen contents into the current buffer. Returns true
+ // if succeeded.
+ bool CaptureImage();
+
+ // Capture the current cursor shape.
+ void CaptureCursor();
+
+ Callback* callback_ = nullptr;
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory_;
+ SourceId current_screen_id_ = kFullDesktopScreenId;
+ std::wstring current_device_key_;
+
+ ScopedThreadDesktop desktop_;
+
+ // GDI resources used for screen capture.
+ HDC desktop_dc_ = NULL;
+ HDC memory_dc_ = NULL;
+
+ // Queue of the frames buffers.
+ ScreenCaptureFrameQueue<SharedDesktopFrame> queue_;
+
+ DisplayConfigurationMonitor display_configuration_monitor_;
+
+ HMODULE dwmapi_library_ = NULL;
+ DwmEnableCompositionFunc composition_func_ = nullptr;
+ DwmIsCompositionEnabledFunc composition_enabled_func_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_GDI_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
new file mode 100644
index 0000000000..ce747e0141
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/screen_capturer_win_magnifier.h"
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_metrics_helper.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_frame_win.h"
+#include "modules/desktop_capture/desktop_region.h"
+#include "modules/desktop_capture/mouse_cursor.h"
+#include "modules/desktop_capture/win/cursor.h"
+#include "modules/desktop_capture/win/desktop.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+DWORD GetTlsIndex() {
+ static const DWORD tls_index = TlsAlloc();
+ RTC_DCHECK(tls_index != TLS_OUT_OF_INDEXES);
+ return tls_index;
+}
+
+} // namespace
+
+// kMagnifierWindowClass has to be "Magnifier" according to the Magnification
+// API. The other strings can be anything.
+static wchar_t kMagnifierHostClass[] = L"ScreenCapturerWinMagnifierHost";
+static wchar_t kHostWindowName[] = L"MagnifierHost";
+static wchar_t kMagnifierWindowClass[] = L"Magnifier";
+static wchar_t kMagnifierWindowName[] = L"MagnifierWindow";
+
+ScreenCapturerWinMagnifier::ScreenCapturerWinMagnifier() = default;
+ScreenCapturerWinMagnifier::~ScreenCapturerWinMagnifier() {
+ // DestroyWindow must be called before MagUninitialize. magnifier_window_ is
+ // destroyed automatically when host_window_ is destroyed.
+ if (host_window_) {
+ DestroyWindow(host_window_);
+ host_window_ = NULL;
+ }
+
+ if (magnifier_initialized_) {
+ mag_uninitialize_func_();
+ magnifier_initialized_ = false;
+ }
+
+ if (mag_lib_handle_) {
+ FreeLibrary(mag_lib_handle_);
+ mag_lib_handle_ = NULL;
+ }
+
+ if (desktop_dc_) {
+ ReleaseDC(NULL, desktop_dc_);
+ desktop_dc_ = NULL;
+ }
+}
+
+void ScreenCapturerWinMagnifier::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ RecordCapturerImpl(DesktopCapturerId::kScreenCapturerWinMagnifier);
+
+ callback_ = callback;
+
+ if (!InitializeMagnifier()) {
+ RTC_LOG_F(LS_WARNING) << "Magnifier initialization failed.";
+ }
+}
+
+void ScreenCapturerWinMagnifier::SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
+ shared_memory_factory_ = std::move(shared_memory_factory);
+}
+
+void ScreenCapturerWinMagnifier::CaptureFrame() {
+ RTC_DCHECK(callback_);
+ if (!magnifier_initialized_) {
+ RTC_LOG_F(LS_WARNING) << "Magnifier initialization failed.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ // Switch to the desktop receiving user input if different from the current
+ // one.
+ std::unique_ptr<Desktop> input_desktop(Desktop::GetInputDesktop());
+ if (input_desktop.get() != NULL && !desktop_.IsSame(*input_desktop)) {
+ // Release GDI resources otherwise SetThreadDesktop will fail.
+ if (desktop_dc_) {
+ ReleaseDC(NULL, desktop_dc_);
+ desktop_dc_ = NULL;
+ }
+ // If SetThreadDesktop() fails, the thread is still assigned a desktop.
+ // So we can continue capture screen bits, just from the wrong desktop.
+ desktop_.SetThreadDesktop(input_desktop.release());
+ }
+
+ DesktopRect rect = GetScreenRect(current_screen_id_, current_device_key_);
+ queue_.MoveToNextFrame();
+ CreateCurrentFrameIfNecessary(rect.size());
+ // CaptureImage may fail in some situations, e.g. windows8 metro mode. So
+ // defer to the fallback capturer if magnifier capturer did not work.
+ if (!CaptureImage(rect)) {
+ RTC_LOG_F(LS_WARNING) << "Magnifier capturer failed to capture a frame.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ // Emit the current frame.
+ std::unique_ptr<DesktopFrame> frame = queue_.current_frame()->Share();
+ frame->set_dpi(DesktopVector(GetDeviceCaps(desktop_dc_, LOGPIXELSX),
+ GetDeviceCaps(desktop_dc_, LOGPIXELSY)));
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+
+ int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec;
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.DesktopCapture.Win.MagnifierCapturerFrameTime", capture_time_ms);
+ frame->set_capture_time_ms(capture_time_ms);
+ frame->set_capturer_id(DesktopCapturerId::kScreenCapturerWinMagnifier);
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+}
+
+bool ScreenCapturerWinMagnifier::GetSourceList(SourceList* sources) {
+ return webrtc::GetScreenList(sources);
+}
+
+bool ScreenCapturerWinMagnifier::SelectSource(SourceId id) {
+ if (IsScreenValid(id, &current_device_key_)) {
+ current_screen_id_ = id;
+ return true;
+ }
+
+ return false;
+}
+
+void ScreenCapturerWinMagnifier::SetExcludedWindow(WindowId excluded_window) {
+ excluded_window_ = (HWND)excluded_window;
+ if (excluded_window_ && magnifier_initialized_) {
+ set_window_filter_list_func_(magnifier_window_, MW_FILTERMODE_EXCLUDE, 1,
+ &excluded_window_);
+ }
+}
+
+bool ScreenCapturerWinMagnifier::CaptureImage(const DesktopRect& rect) {
+ RTC_DCHECK(magnifier_initialized_);
+
+ // Set the magnifier control to cover the captured rect. The content of the
+ // magnifier control will be the captured image.
+ BOOL result = SetWindowPos(magnifier_window_, NULL, rect.left(), rect.top(),
+ rect.width(), rect.height(), 0);
+ if (!result) {
+ RTC_LOG_F(LS_WARNING) << "Failed to call SetWindowPos: " << GetLastError()
+ << ". Rect = {" << rect.left() << ", " << rect.top()
+ << ", " << rect.right() << ", " << rect.bottom()
+ << "}";
+ return false;
+ }
+
+ magnifier_capture_succeeded_ = false;
+
+ RECT native_rect = {rect.left(), rect.top(), rect.right(), rect.bottom()};
+
+ TlsSetValue(GetTlsIndex(), this);
+ // OnCaptured will be called via OnMagImageScalingCallback and fill in the
+ // frame before set_window_source_func_ returns.
+ result = set_window_source_func_(magnifier_window_, native_rect);
+
+ if (!result) {
+ RTC_LOG_F(LS_WARNING) << "Failed to call MagSetWindowSource: "
+ << GetLastError() << ". Rect = {" << rect.left()
+ << ", " << rect.top() << ", " << rect.right() << ", "
+ << rect.bottom() << "}";
+ return false;
+ }
+
+ return magnifier_capture_succeeded_;
+}
+
+BOOL ScreenCapturerWinMagnifier::OnMagImageScalingCallback(
+ HWND hwnd,
+ void* srcdata,
+ MAGIMAGEHEADER srcheader,
+ void* destdata,
+ MAGIMAGEHEADER destheader,
+ RECT unclipped,
+ RECT clipped,
+ HRGN dirty) {
+ ScreenCapturerWinMagnifier* owner =
+ reinterpret_cast<ScreenCapturerWinMagnifier*>(TlsGetValue(GetTlsIndex()));
+ TlsSetValue(GetTlsIndex(), nullptr);
+ owner->OnCaptured(srcdata, srcheader);
+
+ return TRUE;
+}
+
+// TODO(zijiehe): These functions are available on Windows Vista or upper, so we
+// do not need to use LoadLibrary and GetProcAddress anymore. Use regular
+// include and function calls instead of a dynamical loaded library.
+bool ScreenCapturerWinMagnifier::InitializeMagnifier() {
+ RTC_DCHECK(!magnifier_initialized_);
+
+ if (GetSystemMetrics(SM_CMONITORS) != 1) {
+ // Do not try to use the magnifier in multi-screen setup (where the API
+ // crashes sometimes).
+ RTC_LOG_F(LS_WARNING) << "Magnifier capturer cannot work on multi-screen "
+ "system.";
+ return false;
+ }
+
+ desktop_dc_ = GetDC(nullptr);
+
+ mag_lib_handle_ = LoadLibraryW(L"Magnification.dll");
+ if (!mag_lib_handle_)
+ return false;
+
+ // Initialize Magnification API function pointers.
+ mag_initialize_func_ = reinterpret_cast<MagInitializeFunc>(
+ GetProcAddress(mag_lib_handle_, "MagInitialize"));
+ mag_uninitialize_func_ = reinterpret_cast<MagUninitializeFunc>(
+ GetProcAddress(mag_lib_handle_, "MagUninitialize"));
+ set_window_source_func_ = reinterpret_cast<MagSetWindowSourceFunc>(
+ GetProcAddress(mag_lib_handle_, "MagSetWindowSource"));
+ set_window_filter_list_func_ = reinterpret_cast<MagSetWindowFilterListFunc>(
+ GetProcAddress(mag_lib_handle_, "MagSetWindowFilterList"));
+ set_image_scaling_callback_func_ =
+ reinterpret_cast<MagSetImageScalingCallbackFunc>(
+ GetProcAddress(mag_lib_handle_, "MagSetImageScalingCallback"));
+
+ if (!mag_initialize_func_ || !mag_uninitialize_func_ ||
+ !set_window_source_func_ || !set_window_filter_list_func_ ||
+ !set_image_scaling_callback_func_) {
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "library functions missing.";
+ return false;
+ }
+
+ BOOL result = mag_initialize_func_();
+ if (!result) {
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "error from MagInitialize "
+ << GetLastError();
+ return false;
+ }
+
+ HMODULE hInstance = nullptr;
+ result =
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast<char*>(&DefWindowProc), &hInstance);
+ if (!result) {
+ mag_uninitialize_func_();
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "error from GetModulehandleExA "
+ << GetLastError();
+ return false;
+ }
+
+ // Register the host window class. See the MSDN documentation of the
+ // Magnification API for more infomation.
+ WNDCLASSEXW wcex = {};
+ wcex.cbSize = sizeof(WNDCLASSEX);
+ wcex.lpfnWndProc = &DefWindowProc;
+ wcex.hInstance = hInstance;
+ wcex.hCursor = LoadCursor(nullptr, IDC_ARROW);
+ wcex.lpszClassName = kMagnifierHostClass;
+
+ // Ignore the error which may happen when the class is already registered.
+ RegisterClassExW(&wcex);
+
+ // Create the host window.
+ host_window_ =
+ CreateWindowExW(WS_EX_LAYERED, kMagnifierHostClass, kHostWindowName, 0, 0,
+ 0, 0, 0, nullptr, nullptr, hInstance, nullptr);
+ if (!host_window_) {
+ mag_uninitialize_func_();
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "error from creating host window "
+ << GetLastError();
+ return false;
+ }
+
+ // Create the magnifier control.
+ magnifier_window_ = CreateWindowW(kMagnifierWindowClass, kMagnifierWindowName,
+ WS_CHILD | WS_VISIBLE, 0, 0, 0, 0,
+ host_window_, nullptr, hInstance, nullptr);
+ if (!magnifier_window_) {
+ mag_uninitialize_func_();
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "error from creating magnifier window "
+ << GetLastError();
+ return false;
+ }
+
+ // Hide the host window.
+ ShowWindow(host_window_, SW_HIDE);
+
+ // Set the scaling callback to receive captured image.
+ result = set_image_scaling_callback_func_(
+ magnifier_window_,
+ &ScreenCapturerWinMagnifier::OnMagImageScalingCallback);
+ if (!result) {
+ mag_uninitialize_func_();
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "error from MagSetImageScalingCallback "
+ << GetLastError();
+ return false;
+ }
+
+ if (excluded_window_) {
+ result = set_window_filter_list_func_(
+ magnifier_window_, MW_FILTERMODE_EXCLUDE, 1, &excluded_window_);
+ if (!result) {
+ mag_uninitialize_func_();
+ RTC_LOG_F(LS_WARNING)
+ << "Failed to initialize ScreenCapturerWinMagnifier: "
+ "error from MagSetWindowFilterList "
+ << GetLastError();
+ return false;
+ }
+ }
+
+ magnifier_initialized_ = true;
+ return true;
+}
+
+void ScreenCapturerWinMagnifier::OnCaptured(void* data,
+ const MAGIMAGEHEADER& header) {
+ DesktopFrame* current_frame = queue_.current_frame();
+
+ // Verify the format.
+ // TODO(jiayl): support capturing sources with pixel formats other than RGBA.
+ int captured_bytes_per_pixel = header.cbSize / header.width / header.height;
+ if (header.format != GUID_WICPixelFormat32bppRGBA ||
+ header.width != static_cast<UINT>(current_frame->size().width()) ||
+ header.height != static_cast<UINT>(current_frame->size().height()) ||
+ header.stride != static_cast<UINT>(current_frame->stride()) ||
+ captured_bytes_per_pixel != DesktopFrame::kBytesPerPixel) {
+ RTC_LOG_F(LS_WARNING)
+ << "Output format does not match the captured format: "
+ "width = "
+ << header.width
+ << ", "
+ "height = "
+ << header.height
+ << ", "
+ "stride = "
+ << header.stride
+ << ", "
+ "bpp = "
+ << captured_bytes_per_pixel
+ << ", "
+ "pixel format RGBA ? "
+ << (header.format == GUID_WICPixelFormat32bppRGBA) << ".";
+ return;
+ }
+
+ // Copy the data into the frame.
+ current_frame->CopyPixelsFrom(
+ reinterpret_cast<uint8_t*>(data), header.stride,
+ DesktopRect::MakeXYWH(0, 0, header.width, header.height));
+
+ magnifier_capture_succeeded_ = true;
+}
+
+void ScreenCapturerWinMagnifier::CreateCurrentFrameIfNecessary(
+ const DesktopSize& size) {
+ // If the current buffer is from an older generation then allocate a new one.
+ // Note that we can't reallocate other buffers at this point, since the caller
+ // may still be reading from them.
+ if (!queue_.current_frame() || !queue_.current_frame()->size().equals(size)) {
+ std::unique_ptr<DesktopFrame> frame =
+ shared_memory_factory_
+ ? SharedMemoryDesktopFrame::Create(size,
+ shared_memory_factory_.get())
+ : std::unique_ptr<DesktopFrame>(new BasicDesktopFrame(size));
+ queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(std::move(frame)));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
new file mode 100644
index 0000000000..07c5b1e9e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_MAGNIFIER_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_MAGNIFIER_H_
+
+#include <magnification.h>
+#include <wincodec.h>
+#include <windows.h>
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/screen_capture_frame_queue.h"
+#include "modules/desktop_capture/screen_capturer_helper.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/win/scoped_thread_desktop.h"
+
+namespace webrtc {
+
+class DesktopFrame;
+class DesktopRect;
+
+// Captures the screen using the Magnification API to support window exclusion.
+// Each capturer must run on a dedicated thread because it uses thread local
+// storage for redirecting the library callback. Also the thread must have a UI
+// message loop to handle the window messages for the magnifier window.
+//
+// This class does not detect DesktopFrame::updated_region(), the field is
+// always set to the entire frame rectangle. ScreenCapturerDifferWrapper should
+// be used if that functionality is necessary.
+class ScreenCapturerWinMagnifier : public DesktopCapturer {
+ public:
+ ScreenCapturerWinMagnifier();
+ ~ScreenCapturerWinMagnifier() override;
+
+ ScreenCapturerWinMagnifier(const ScreenCapturerWinMagnifier&) = delete;
+ ScreenCapturerWinMagnifier& operator=(const ScreenCapturerWinMagnifier&) =
+ delete;
+
+ // Overridden from ScreenCapturer:
+ void Start(Callback* callback) override;
+ void SetSharedMemoryFactory(
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* screens) override;
+ bool SelectSource(SourceId id) override;
+ void SetExcludedWindow(WindowId window) override;
+
+ private:
+ typedef BOOL(WINAPI* MagImageScalingCallback)(HWND hwnd,
+ void* srcdata,
+ MAGIMAGEHEADER srcheader,
+ void* destdata,
+ MAGIMAGEHEADER destheader,
+ RECT unclipped,
+ RECT clipped,
+ HRGN dirty);
+ typedef BOOL(WINAPI* MagInitializeFunc)(void);
+ typedef BOOL(WINAPI* MagUninitializeFunc)(void);
+ typedef BOOL(WINAPI* MagSetWindowSourceFunc)(HWND hwnd, RECT rect);
+ typedef BOOL(WINAPI* MagSetWindowFilterListFunc)(HWND hwnd,
+ DWORD dwFilterMode,
+ int count,
+ HWND* pHWND);
+ typedef BOOL(WINAPI* MagSetImageScalingCallbackFunc)(
+ HWND hwnd,
+ MagImageScalingCallback callback);
+
+ static BOOL WINAPI OnMagImageScalingCallback(HWND hwnd,
+ void* srcdata,
+ MAGIMAGEHEADER srcheader,
+ void* destdata,
+ MAGIMAGEHEADER destheader,
+ RECT unclipped,
+ RECT clipped,
+ HRGN dirty);
+
+ // Captures the screen within `rect` in the desktop coordinates. Returns true
+ // if succeeded.
+ // It can only capture the primary screen for now. The magnification library
+ // crashes under some screen configurations (e.g. secondary screen on top of
+ // primary screen) if it tries to capture a non-primary screen. The caller
+ // must make sure not calling it on non-primary screens.
+ bool CaptureImage(const DesktopRect& rect);
+
+ // Helper method for setting up the magnifier control. Returns true if
+ // succeeded.
+ bool InitializeMagnifier();
+
+ // Called by OnMagImageScalingCallback to output captured data.
+ void OnCaptured(void* data, const MAGIMAGEHEADER& header);
+
+ // Makes sure the current frame exists and matches `size`.
+ void CreateCurrentFrameIfNecessary(const DesktopSize& size);
+
+ Callback* callback_ = nullptr;
+ std::unique_ptr<SharedMemoryFactory> shared_memory_factory_;
+ ScreenId current_screen_id_ = kFullDesktopScreenId;
+ std::wstring current_device_key_;
+ HWND excluded_window_ = NULL;
+
+ // Queue of the frames buffers.
+ ScreenCaptureFrameQueue<SharedDesktopFrame> queue_;
+
+ ScopedThreadDesktop desktop_;
+
+ // Used for getting the screen dpi.
+ HDC desktop_dc_ = NULL;
+
+ HMODULE mag_lib_handle_ = NULL;
+ MagInitializeFunc mag_initialize_func_ = nullptr;
+ MagUninitializeFunc mag_uninitialize_func_ = nullptr;
+ MagSetWindowSourceFunc set_window_source_func_ = nullptr;
+ MagSetWindowFilterListFunc set_window_filter_list_func_ = nullptr;
+ MagSetImageScalingCallbackFunc set_image_scaling_callback_func_ = nullptr;
+
+ // The hidden window hosting the magnifier control.
+ HWND host_window_ = NULL;
+ // The magnifier control that captures the screen.
+ HWND magnifier_window_ = NULL;
+
+ // True if the magnifier control has been successfully initialized.
+ bool magnifier_initialized_ = false;
+
+ // True if the last OnMagImageScalingCallback was called and handled
+ // successfully. Reset at the beginning of each CaptureImage call.
+ bool magnifier_capture_succeeded_ = true;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_MAGNIFIER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.cc b/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.cc
new file mode 100644
index 0000000000..398ea1e53a
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/selected_window_context.h"
+
+namespace webrtc {
+
+SelectedWindowContext::SelectedWindowContext(
+ HWND selected_window,
+ DesktopRect selected_window_rect,
+ WindowCaptureHelperWin* window_capture_helper)
+ : selected_window_(selected_window),
+ selected_window_rect_(selected_window_rect),
+ window_capture_helper_(window_capture_helper) {
+ selected_window_thread_id_ =
+ GetWindowThreadProcessId(selected_window, &selected_window_process_id_);
+}
+
+bool SelectedWindowContext::IsSelectedWindowValid() const {
+ return selected_window_thread_id_ != 0;
+}
+
+bool SelectedWindowContext::IsWindowOwnedBySelectedWindow(HWND hwnd) const {
+ // This check works for drop-down menus & dialog pop-up windows.
+ if (GetAncestor(hwnd, GA_ROOTOWNER) == selected_window_) {
+ return true;
+ }
+
+ // Assume that all other windows are unrelated to the selected window.
+ // This will cause some windows that are actually related to be missed,
+ // e.g. context menus and tool-tips, but avoids the risk of capturing
+ // unrelated windows. Using heuristics such as matching the thread and
+ // process Ids suffers from false-positives, e.g. in multi-document
+ // applications.
+
+ return false;
+}
+
+bool SelectedWindowContext::IsWindowOverlappingSelectedWindow(HWND hwnd) const {
+ return window_capture_helper_->AreWindowsOverlapping(hwnd, selected_window_,
+ selected_window_rect_);
+}
+
+HWND SelectedWindowContext::selected_window() const {
+ return selected_window_;
+}
+
+WindowCaptureHelperWin* SelectedWindowContext::window_capture_helper() const {
+ return window_capture_helper_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.h b/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.h
new file mode 100644
index 0000000000..99e38e3fa2
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/selected_window_context.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_SELECTED_WINDOW_CONTEXT_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_SELECTED_WINDOW_CONTEXT_H_
+
+#include <windows.h>
+
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+
+namespace webrtc {
+
+class SelectedWindowContext {
+ public:
+ SelectedWindowContext(HWND selected_window,
+ DesktopRect selected_window_rect,
+ WindowCaptureHelperWin* window_capture_helper);
+
+ bool IsSelectedWindowValid() const;
+
+ bool IsWindowOwnedBySelectedWindow(HWND hwnd) const;
+ bool IsWindowOverlappingSelectedWindow(HWND hwnd) const;
+
+ HWND selected_window() const;
+ WindowCaptureHelperWin* window_capture_helper() const;
+
+ private:
+ const HWND selected_window_;
+ const DesktopRect selected_window_rect_;
+ WindowCaptureHelperWin* const window_capture_helper_;
+ DWORD selected_window_thread_id_;
+ DWORD selected_window_process_id_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_SELECTED_WINDOW_CONTEXT_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.cc b/third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.cc
new file mode 100644
index 0000000000..c07ff74aa5
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/test_support/test_window.h"
+
+namespace webrtc {
+namespace {
+
+const WCHAR kWindowClass[] = L"DesktopCaptureTestWindowClass";
+const int kWindowHeight = 200;
+const int kWindowWidth = 300;
+
+LRESULT CALLBACK WindowProc(HWND hwnd,
+ UINT msg,
+ WPARAM w_param,
+ LPARAM l_param) {
+ switch (msg) {
+ case WM_PAINT:
+ PAINTSTRUCT paint_struct;
+ HDC hdc = BeginPaint(hwnd, &paint_struct);
+
+ // Paint the window so the color is consistent and we can inspect the
+ // pixels in tests and know what to expect.
+ FillRect(hdc, &paint_struct.rcPaint,
+ CreateSolidBrush(RGB(kTestWindowRValue, kTestWindowGValue,
+ kTestWindowBValue)));
+
+ EndPaint(hwnd, &paint_struct);
+ }
+ return DefWindowProc(hwnd, msg, w_param, l_param);
+}
+
+} // namespace
+
+WindowInfo CreateTestWindow(const WCHAR* window_title,
+ const int height,
+ const int width,
+ const LONG extended_styles) {
+ WindowInfo info;
+ ::GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast<LPCWSTR>(&WindowProc),
+ &info.window_instance);
+
+ WNDCLASSEXW wcex;
+ memset(&wcex, 0, sizeof(wcex));
+ wcex.cbSize = sizeof(wcex);
+ wcex.style = CS_HREDRAW | CS_VREDRAW;
+ wcex.hInstance = info.window_instance;
+ wcex.lpfnWndProc = &WindowProc;
+ wcex.lpszClassName = kWindowClass;
+ info.window_class = ::RegisterClassExW(&wcex);
+
+ // Use the default height and width if the caller did not supply the optional
+ // height and width parameters, or if they supplied invalid values.
+ int window_height = height <= 0 ? kWindowHeight : height;
+ int window_width = width <= 0 ? kWindowWidth : width;
+ info.hwnd =
+ ::CreateWindowExW(extended_styles, kWindowClass, window_title,
+ WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT,
+ window_width, window_height, /*parent_window=*/nullptr,
+ /*menu_bar=*/nullptr, info.window_instance,
+ /*additional_params=*/nullptr);
+
+ ::ShowWindow(info.hwnd, SW_SHOWNORMAL);
+ ::UpdateWindow(info.hwnd);
+ return info;
+}
+
+void ResizeTestWindow(const HWND hwnd, const int width, const int height) {
+ // SWP_NOMOVE results in the x and y params being ignored.
+ ::SetWindowPos(hwnd, HWND_TOP, /*x-coord=*/0, /*y-coord=*/0, width, height,
+ SWP_SHOWWINDOW | SWP_NOMOVE);
+ ::UpdateWindow(hwnd);
+}
+
+void MoveTestWindow(const HWND hwnd, const int x, const int y) {
+ // SWP_NOSIZE results in the width and height params being ignored.
+ ::SetWindowPos(hwnd, HWND_TOP, x, y, /*width=*/0, /*height=*/0,
+ SWP_SHOWWINDOW | SWP_NOSIZE);
+ ::UpdateWindow(hwnd);
+}
+
+void MinimizeTestWindow(const HWND hwnd) {
+ ::ShowWindow(hwnd, SW_MINIMIZE);
+}
+
+void UnminimizeTestWindow(const HWND hwnd) {
+ ::OpenIcon(hwnd);
+}
+
+void DestroyTestWindow(WindowInfo info) {
+ ::DestroyWindow(info.hwnd);
+ ::UnregisterClass(MAKEINTATOM(info.window_class), info.window_instance);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.h b/third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.h
new file mode 100644
index 0000000000..b055da7ccd
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/test_support/test_window.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_TEST_SUPPORT_TEST_WINDOW_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_TEST_SUPPORT_TEST_WINDOW_H_
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+
+namespace webrtc {
+
+typedef unsigned char uint8_t;
+
+// Define an arbitrary color for the test window with unique R, G, and B values
+// so consumers can verify captured content in tests.
+const uint8_t kTestWindowRValue = 191;
+const uint8_t kTestWindowGValue = 99;
+const uint8_t kTestWindowBValue = 12;
+
+struct WindowInfo {
+ HWND hwnd;
+ HINSTANCE window_instance;
+ ATOM window_class;
+};
+
+WindowInfo CreateTestWindow(const WCHAR* window_title,
+ int height = 0,
+ int width = 0,
+ LONG extended_styles = 0);
+
+void ResizeTestWindow(HWND hwnd, int width, int height);
+
+void MoveTestWindow(HWND hwnd, int x, int y);
+
+void MinimizeTestWindow(HWND hwnd);
+
+void UnminimizeTestWindow(HWND hwnd);
+
+void DestroyTestWindow(WindowInfo info);
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_TEST_SUPPORT_TEST_WINDOW_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.cc b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.cc
new file mode 100644
index 0000000000..0ff2db27c7
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.cc
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/wgc_capture_session.h"
+
+#include <DispatcherQueue.h>
+#include <windows.graphics.capture.interop.h>
+#include <windows.graphics.directX.direct3d11.interop.h>
+#include <windows.graphics.h>
+#include <wrl/client.h>
+#include <wrl/event.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/win/wgc_desktop_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/win/create_direct3d_device.h"
+#include "rtc_base/win/get_activation_factory.h"
+#include "system_wrappers/include/metrics.h"
+
+using Microsoft::WRL::ComPtr;
+namespace WGC = ABI::Windows::Graphics::Capture;
+
+namespace webrtc {
+namespace {
+
+// We must use a BGRA pixel format that has 4 bytes per pixel, as required by
+// the DesktopFrame interface.
+constexpr auto kPixelFormat = ABI::Windows::Graphics::DirectX::
+ DirectXPixelFormat::DirectXPixelFormat_B8G8R8A8UIntNormalized;
+
+// The maximum time `GetFrame` will wait for a frame to arrive, if we don't have
+// any in the pool.
+constexpr int kMaxWaitForFrameMs = 50;
+constexpr int kMaxWaitForFirstFrameMs = 500;
+
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class StartCaptureResult {
+ kSuccess = 0,
+ kSourceClosed = 1,
+ kAddClosedFailed = 2,
+ kDxgiDeviceCastFailed = 3,
+ kD3dDelayLoadFailed = 4,
+ kD3dDeviceCreationFailed = 5,
+ kFramePoolActivationFailed = 6,
+ // kFramePoolCastFailed = 7, (deprecated)
+ // kGetItemSizeFailed = 8, (deprecated)
+ kCreateFramePoolFailed = 9,
+ kCreateCaptureSessionFailed = 10,
+ kStartCaptureFailed = 11,
+ kMaxValue = kStartCaptureFailed
+};
+
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class GetFrameResult {
+ kSuccess = 0,
+ kItemClosed = 1,
+ kTryGetNextFrameFailed = 2,
+ kFrameDropped = 3,
+ kGetSurfaceFailed = 4,
+ kDxgiInterfaceAccessFailed = 5,
+ kTexture2dCastFailed = 6,
+ kCreateMappedTextureFailed = 7,
+ kMapFrameFailed = 8,
+ kGetContentSizeFailed = 9,
+ kResizeMappedTextureFailed = 10,
+ kRecreateFramePoolFailed = 11,
+ kMaxValue = kRecreateFramePoolFailed
+};
+
+void RecordStartCaptureResult(StartCaptureResult error) {
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.DesktopCapture.Win.WgcCaptureSessionStartResult",
+ static_cast<int>(error), static_cast<int>(StartCaptureResult::kMaxValue));
+}
+
+void RecordGetFrameResult(GetFrameResult error) {
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.DesktopCapture.Win.WgcCaptureSessionGetFrameResult",
+ static_cast<int>(error), static_cast<int>(GetFrameResult::kMaxValue));
+}
+
+} // namespace
+
+WgcCaptureSession::WgcCaptureSession(ComPtr<ID3D11Device> d3d11_device,
+ ComPtr<WGC::IGraphicsCaptureItem> item,
+ ABI::Windows::Graphics::SizeInt32 size)
+ : d3d11_device_(std::move(d3d11_device)),
+ item_(std::move(item)),
+ size_(size) {}
+WgcCaptureSession::~WgcCaptureSession() {
+ RemoveEventHandlers();
+}
+
+HRESULT WgcCaptureSession::StartCapture() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_capture_started_);
+
+ if (item_closed_) {
+ RTC_LOG(LS_ERROR) << "The target source has been closed.";
+ RecordStartCaptureResult(StartCaptureResult::kSourceClosed);
+ return E_ABORT;
+ }
+
+ RTC_DCHECK(d3d11_device_);
+ RTC_DCHECK(item_);
+
+ // Listen for the Closed event, to detect if the source we are capturing is
+ // closed (e.g. application window is closed or monitor is disconnected). If
+ // it is, we should abort the capture.
+ item_closed_token_ = std::make_unique<EventRegistrationToken>();
+ auto closed_handler =
+ Microsoft::WRL::Callback<ABI::Windows::Foundation::ITypedEventHandler<
+ WGC::GraphicsCaptureItem*, IInspectable*>>(
+ this, &WgcCaptureSession::OnItemClosed);
+ HRESULT hr =
+ item_->add_Closed(closed_handler.Get(), item_closed_token_.get());
+ if (FAILED(hr)) {
+ RecordStartCaptureResult(StartCaptureResult::kAddClosedFailed);
+ return hr;
+ }
+
+ ComPtr<IDXGIDevice> dxgi_device;
+ hr = d3d11_device_->QueryInterface(IID_PPV_ARGS(&dxgi_device));
+ if (FAILED(hr)) {
+ RecordStartCaptureResult(StartCaptureResult::kDxgiDeviceCastFailed);
+ return hr;
+ }
+
+ if (!ResolveCoreWinRTDirect3DDelayload()) {
+ RecordStartCaptureResult(StartCaptureResult::kD3dDelayLoadFailed);
+ return E_FAIL;
+ }
+
+ hr = CreateDirect3DDeviceFromDXGIDevice(dxgi_device.Get(), &direct3d_device_);
+ if (FAILED(hr)) {
+ RecordStartCaptureResult(StartCaptureResult::kD3dDeviceCreationFailed);
+ return hr;
+ }
+
+ ComPtr<WGC::IDirect3D11CaptureFramePoolStatics> frame_pool_statics;
+ hr = GetActivationFactory<
+ WGC::IDirect3D11CaptureFramePoolStatics,
+ RuntimeClass_Windows_Graphics_Capture_Direct3D11CaptureFramePool>(
+ &frame_pool_statics);
+ if (FAILED(hr)) {
+ RecordStartCaptureResult(StartCaptureResult::kFramePoolActivationFailed);
+ return hr;
+ }
+
+ hr = frame_pool_statics->Create(direct3d_device_.Get(), kPixelFormat,
+ kNumBuffers, size_, &frame_pool_);
+ if (FAILED(hr)) {
+ RecordStartCaptureResult(StartCaptureResult::kCreateFramePoolFailed);
+ return hr;
+ }
+
+ frames_in_pool_ = 0;
+
+ // Because `WgcCapturerWin` created a `DispatcherQueue`, and we created
+ // `frame_pool_` via `Create`, the `FrameArrived` event will be delivered on
+ // the current thread.
+ frame_arrived_token_ = std::make_unique<EventRegistrationToken>();
+ auto frame_arrived_handler =
+ Microsoft::WRL::Callback<ABI::Windows::Foundation::ITypedEventHandler<
+ WGC::Direct3D11CaptureFramePool*, IInspectable*>>(
+ this, &WgcCaptureSession::OnFrameArrived);
+ hr = frame_pool_->add_FrameArrived(frame_arrived_handler.Get(),
+ frame_arrived_token_.get());
+
+ hr = frame_pool_->CreateCaptureSession(item_.Get(), &session_);
+ if (FAILED(hr)) {
+ RecordStartCaptureResult(StartCaptureResult::kCreateCaptureSessionFailed);
+ return hr;
+ }
+
+ hr = session_->StartCapture();
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "Failed to start CaptureSession: " << hr;
+ RecordStartCaptureResult(StartCaptureResult::kStartCaptureFailed);
+ return hr;
+ }
+
+ RecordStartCaptureResult(StartCaptureResult::kSuccess);
+
+ is_capture_started_ = true;
+ return hr;
+}
+
+HRESULT WgcCaptureSession::GetFrame(
+ std::unique_ptr<DesktopFrame>* output_frame) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ if (item_closed_) {
+ RTC_LOG(LS_ERROR) << "The target source has been closed.";
+ RecordGetFrameResult(GetFrameResult::kItemClosed);
+ return E_ABORT;
+ }
+
+ RTC_DCHECK(is_capture_started_);
+
+ if (frames_in_pool_ < 1)
+ wait_for_frame_event_.Wait(first_frame_ ? kMaxWaitForFirstFrameMs
+ : kMaxWaitForFrameMs);
+
+ ComPtr<WGC::IDirect3D11CaptureFrame> capture_frame;
+ HRESULT hr = frame_pool_->TryGetNextFrame(&capture_frame);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "TryGetNextFrame failed: " << hr;
+ RecordGetFrameResult(GetFrameResult::kTryGetNextFrameFailed);
+ return hr;
+ }
+
+ if (!capture_frame) {
+ RecordGetFrameResult(GetFrameResult::kFrameDropped);
+ return hr;
+ }
+
+ first_frame_ = false;
+ --frames_in_pool_;
+
+ // We need to get `capture_frame` as an `ID3D11Texture2D` so that we can get
+ // the raw image data in the format required by the `DesktopFrame` interface.
+ ComPtr<ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DSurface>
+ d3d_surface;
+ hr = capture_frame->get_Surface(&d3d_surface);
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kGetSurfaceFailed);
+ return hr;
+ }
+
+ ComPtr<Windows::Graphics::DirectX::Direct3D11::IDirect3DDxgiInterfaceAccess>
+ direct3DDxgiInterfaceAccess;
+ hr = d3d_surface->QueryInterface(IID_PPV_ARGS(&direct3DDxgiInterfaceAccess));
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kDxgiInterfaceAccessFailed);
+ return hr;
+ }
+
+ ComPtr<ID3D11Texture2D> texture_2D;
+ hr = direct3DDxgiInterfaceAccess->GetInterface(IID_PPV_ARGS(&texture_2D));
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kTexture2dCastFailed);
+ return hr;
+ }
+
+ if (!mapped_texture_) {
+ hr = CreateMappedTexture(texture_2D);
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kCreateMappedTextureFailed);
+ return hr;
+ }
+ }
+
+ // We need to copy `texture_2D` into `mapped_texture_` as the latter has the
+ // D3D11_CPU_ACCESS_READ flag set, which lets us access the image data.
+ // Otherwise it would only be readable by the GPU.
+ ComPtr<ID3D11DeviceContext> d3d_context;
+ d3d11_device_->GetImmediateContext(&d3d_context);
+
+ ABI::Windows::Graphics::SizeInt32 new_size;
+ hr = capture_frame->get_ContentSize(&new_size);
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kGetContentSizeFailed);
+ return hr;
+ }
+
+ // If the size changed, we must resize `mapped_texture_` and `frame_pool_` to
+ // fit the new size. This must be done before `CopySubresourceRegion` so that
+ // the textures are the same size.
+ if (size_.Height != new_size.Height || size_.Width != new_size.Width) {
+ hr = CreateMappedTexture(texture_2D, new_size.Width, new_size.Height);
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kResizeMappedTextureFailed);
+ return hr;
+ }
+
+ hr = frame_pool_->Recreate(direct3d_device_.Get(), kPixelFormat,
+ kNumBuffers, new_size);
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kRecreateFramePoolFailed);
+ return hr;
+ }
+ }
+
+ // If the size has changed since the last capture, we must be sure to use
+ // the smaller dimensions. Otherwise we might overrun our buffer, or
+ // read stale data from the last frame.
+ int image_height = std::min(size_.Height, new_size.Height);
+ int image_width = std::min(size_.Width, new_size.Width);
+
+ D3D11_BOX copy_region;
+ copy_region.left = 0;
+ copy_region.top = 0;
+ copy_region.right = image_width;
+ copy_region.bottom = image_height;
+ // Our textures are 2D so we just want one "slice" of the box.
+ copy_region.front = 0;
+ copy_region.back = 1;
+ d3d_context->CopySubresourceRegion(mapped_texture_.Get(),
+ /*dst_subresource_index=*/0, /*dst_x=*/0,
+ /*dst_y=*/0, /*dst_z=*/0, texture_2D.Get(),
+ /*src_subresource_index=*/0, &copy_region);
+
+ D3D11_MAPPED_SUBRESOURCE map_info;
+ hr = d3d_context->Map(mapped_texture_.Get(), /*subresource_index=*/0,
+ D3D11_MAP_READ, /*D3D11_MAP_FLAG_DO_NOT_WAIT=*/0,
+ &map_info);
+ if (FAILED(hr)) {
+ RecordGetFrameResult(GetFrameResult::kMapFrameFailed);
+ return hr;
+ }
+
+ int row_data_length = image_width * DesktopFrame::kBytesPerPixel;
+
+ // Make a copy of the data pointed to by `map_info.pData` so we are free to
+ // unmap our texture.
+ uint8_t* src_data = static_cast<uint8_t*>(map_info.pData);
+ std::vector<uint8_t> image_data;
+ image_data.resize(image_height * row_data_length);
+ uint8_t* image_data_ptr = image_data.data();
+ for (int i = 0; i < image_height; i++) {
+ memcpy(image_data_ptr, src_data, row_data_length);
+ image_data_ptr += row_data_length;
+ src_data += map_info.RowPitch;
+ }
+
+ d3d_context->Unmap(mapped_texture_.Get(), 0);
+
+ // Transfer ownership of `image_data` to the output_frame.
+ DesktopSize size(image_width, image_height);
+ *output_frame = std::make_unique<WgcDesktopFrame>(size, row_data_length,
+ std::move(image_data));
+
+ size_ = new_size;
+ RecordGetFrameResult(GetFrameResult::kSuccess);
+ return hr;
+}
+
+HRESULT WgcCaptureSession::CreateMappedTexture(
+ ComPtr<ID3D11Texture2D> src_texture,
+ UINT width,
+ UINT height) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ D3D11_TEXTURE2D_DESC src_desc;
+ src_texture->GetDesc(&src_desc);
+ D3D11_TEXTURE2D_DESC map_desc;
+ map_desc.Width = width == 0 ? src_desc.Width : width;
+ map_desc.Height = height == 0 ? src_desc.Height : height;
+ map_desc.MipLevels = src_desc.MipLevels;
+ map_desc.ArraySize = src_desc.ArraySize;
+ map_desc.Format = src_desc.Format;
+ map_desc.SampleDesc = src_desc.SampleDesc;
+ map_desc.Usage = D3D11_USAGE_STAGING;
+ map_desc.BindFlags = 0;
+ map_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+ map_desc.MiscFlags = 0;
+ return d3d11_device_->CreateTexture2D(&map_desc, nullptr, &mapped_texture_);
+}
+
+HRESULT WgcCaptureSession::OnFrameArrived(
+ WGC::IDirect3D11CaptureFramePool* sender,
+ IInspectable* event_args) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK_LT(frames_in_pool_, kNumBuffers);
+ ++frames_in_pool_;
+ wait_for_frame_event_.Set();
+ return S_OK;
+}
+
+HRESULT WgcCaptureSession::OnItemClosed(WGC::IGraphicsCaptureItem* sender,
+ IInspectable* event_args) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ RTC_LOG(LS_INFO) << "Capture target has been closed.";
+ item_closed_ = true;
+ is_capture_started_ = false;
+
+ RemoveEventHandlers();
+
+ mapped_texture_ = nullptr;
+ session_ = nullptr;
+ frame_pool_ = nullptr;
+ direct3d_device_ = nullptr;
+ item_ = nullptr;
+ d3d11_device_ = nullptr;
+
+ return S_OK;
+}
+
+void WgcCaptureSession::RemoveEventHandlers() {
+ HRESULT hr;
+ if (frame_pool_ && frame_arrived_token_) {
+ hr = frame_pool_->remove_FrameArrived(*frame_arrived_token_);
+ frame_arrived_token_.reset();
+ if (FAILED(hr)) {
+ RTC_LOG(LS_WARNING) << "Failed to remove FrameArrived event handler: "
+ << hr;
+ }
+ }
+ if (item_ && item_closed_token_) {
+ hr = item_->remove_Closed(*item_closed_token_);
+ item_closed_token_.reset();
+ if (FAILED(hr))
+ RTC_LOG(LS_WARNING) << "Failed to remove Closed event handler: " << hr;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.h b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.h
new file mode 100644
index 0000000000..27d412baf9
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_session.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SESSION_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SESSION_H_
+
+#include <d3d11.h>
+#include <windows.graphics.capture.h>
+#include <windows.graphics.h>
+#include <wrl/client.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/win/wgc_capture_source.h"
+#include "rtc_base/event.h"
+
+namespace webrtc {
+
+class WgcCaptureSession final {
+ public:
+ WgcCaptureSession(
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device,
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureItem> item,
+ ABI::Windows::Graphics::SizeInt32 size);
+
+ // Disallow copy and assign.
+ WgcCaptureSession(const WgcCaptureSession&) = delete;
+ WgcCaptureSession& operator=(const WgcCaptureSession&) = delete;
+
+ ~WgcCaptureSession();
+
+ HRESULT StartCapture();
+
+ // Returns a frame from the frame pool, if any are present.
+ HRESULT GetFrame(std::unique_ptr<DesktopFrame>* output_frame);
+
+ bool IsCaptureStarted() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return is_capture_started_;
+ }
+
+ // We keep 2 buffers in the frame pool to balance the staleness of the frame
+ // with having to wait for frames to arrive too frequently. Too many buffers
+ // will lead to a high latency, and too few will lead to poor performance.
+ // We make this public for tests.
+ static constexpr int kNumBuffers = 2;
+
+ private:
+ // Initializes `mapped_texture_` with the properties of the `src_texture`,
+ // overrides the values of some necessary properties like the
+ // D3D11_CPU_ACCESS_READ flag. Also has optional parameters for what size
+ // `mapped_texture_` should be, if they aren't provided we will use the size
+ // of `src_texture`.
+ HRESULT CreateMappedTexture(
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> src_texture,
+ UINT width = 0,
+ UINT height = 0);
+
+ // Event handler for `item_`'s Closed event.
+ HRESULT OnItemClosed(
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureItem* sender,
+ IInspectable* event_args);
+
+ // Event handler for `frame_pool_`'s FrameArrived event.
+ HRESULT OnFrameArrived(
+ ABI::Windows::Graphics::Capture::IDirect3D11CaptureFramePool* sender,
+ IInspectable* event_args);
+
+ void RemoveEventHandlers();
+
+ // We wait on this event in `GetFrame` if there are no frames in the pool.
+ // `OnFrameArrived` will set the event so we can proceed.
+ rtc::Event wait_for_frame_event_;
+ int frames_in_pool_;
+
+ // We're willing to wait for a frame a little longer if it's the first one.
+ bool first_frame_ = true;
+
+ std::unique_ptr<EventRegistrationToken> frame_arrived_token_;
+ std::unique_ptr<EventRegistrationToken> item_closed_token_;
+
+ // A Direct3D11 Device provided by the caller. We use this to create an
+ // IDirect3DDevice, and also to create textures that will hold the image data.
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
+
+ // This item represents what we are capturing, we use it to create the
+ // capture session, and also to listen for the Closed event.
+ Microsoft::WRL::ComPtr<ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>
+ item_;
+
+ // The IDirect3DDevice is necessary to instantiate the frame pool.
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice>
+ direct3d_device_;
+
+ // The frame pool is where frames are deposited during capture, we retrieve
+ // them from here with TryGetNextFrame().
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IDirect3D11CaptureFramePool>
+ frame_pool_;
+
+ // This texture holds the final image data. We made it a member so we can
+ // reuse it, instead of having to create a new texture every time we grab a
+ // frame.
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> mapped_texture_;
+
+ // This is the size of `mapped_texture_` and the buffers in `frame_pool_`. We
+ // store this as a member so we can compare it to the size of incoming frames
+ // and resize if necessary.
+ ABI::Windows::Graphics::SizeInt32 size_;
+
+ // The capture session lets us set properties about the capture before it
+ // starts such as whether to capture the mouse cursor, and it lets us tell WGC
+ // to start capturing frames.
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureSession>
+ session_;
+
+ bool item_closed_ = false;
+ bool is_capture_started_ = false;
+
+ SequenceChecker sequence_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SESSION_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.cc b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.cc
new file mode 100644
index 0000000000..24e6129ec7
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.cc
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/wgc_capture_source.h"
+
+#include <dwmapi.h>
+#include <windows.graphics.capture.interop.h>
+#include <windows.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "rtc_base/win/get_activation_factory.h"
+
+using Microsoft::WRL::ComPtr;
+namespace WGC = ABI::Windows::Graphics::Capture;
+
+namespace webrtc {
+
+WgcCaptureSource::WgcCaptureSource(DesktopCapturer::SourceId source_id)
+ : source_id_(source_id) {}
+WgcCaptureSource::~WgcCaptureSource() = default;
+
+bool WgcCaptureSource::IsCapturable() {
+ // If we can create a capture item, then we can capture it. Unfortunately,
+ // we can't cache this item because it may be created in a different COM
+ // apartment than where capture will eventually start from.
+ ComPtr<WGC::IGraphicsCaptureItem> item;
+ return SUCCEEDED(CreateCaptureItem(&item));
+}
+
+bool WgcCaptureSource::FocusOnSource() {
+ return false;
+}
+
+ABI::Windows::Graphics::SizeInt32 WgcCaptureSource::GetSize() {
+ if (!item_)
+ return {0, 0};
+
+ ABI::Windows::Graphics::SizeInt32 item_size;
+ HRESULT hr = item_->get_Size(&item_size);
+ if (FAILED(hr))
+ return {0, 0};
+
+ return item_size;
+}
+
+HRESULT WgcCaptureSource::GetCaptureItem(
+ ComPtr<WGC::IGraphicsCaptureItem>* result) {
+ HRESULT hr = S_OK;
+ if (!item_)
+ hr = CreateCaptureItem(&item_);
+
+ *result = item_;
+ return hr;
+}
+
+WgcCaptureSourceFactory::~WgcCaptureSourceFactory() = default;
+
+WgcWindowSourceFactory::WgcWindowSourceFactory() = default;
+WgcWindowSourceFactory::~WgcWindowSourceFactory() = default;
+
+std::unique_ptr<WgcCaptureSource> WgcWindowSourceFactory::CreateCaptureSource(
+ DesktopCapturer::SourceId source_id) {
+ return std::make_unique<WgcWindowSource>(source_id);
+}
+
+WgcScreenSourceFactory::WgcScreenSourceFactory() = default;
+WgcScreenSourceFactory::~WgcScreenSourceFactory() = default;
+
+std::unique_ptr<WgcCaptureSource> WgcScreenSourceFactory::CreateCaptureSource(
+ DesktopCapturer::SourceId source_id) {
+ return std::make_unique<WgcScreenSource>(source_id);
+}
+
+WgcWindowSource::WgcWindowSource(DesktopCapturer::SourceId source_id)
+ : WgcCaptureSource(source_id) {}
+WgcWindowSource::~WgcWindowSource() = default;
+
+DesktopVector WgcWindowSource::GetTopLeft() {
+ DesktopRect window_rect;
+ if (!GetWindowRect(reinterpret_cast<HWND>(GetSourceId()), &window_rect))
+ return DesktopVector();
+
+ return window_rect.top_left();
+}
+
+ABI::Windows::Graphics::SizeInt32 WgcWindowSource::GetSize() {
+ RECT window_rect;
+ HRESULT hr = ::DwmGetWindowAttribute(
+ reinterpret_cast<HWND>(GetSourceId()), DWMWA_EXTENDED_FRAME_BOUNDS,
+ reinterpret_cast<void*>(&window_rect), sizeof(window_rect));
+ if (FAILED(hr))
+ return WgcCaptureSource::GetSize();
+
+ return {window_rect.right - window_rect.left,
+ window_rect.bottom - window_rect.top};
+}
+
+bool WgcWindowSource::IsCapturable() {
+ if (!IsWindowValidAndVisible(reinterpret_cast<HWND>(GetSourceId())))
+ return false;
+
+ return WgcCaptureSource::IsCapturable();
+}
+
+bool WgcWindowSource::FocusOnSource() {
+ if (!IsWindowValidAndVisible(reinterpret_cast<HWND>(GetSourceId())))
+ return false;
+
+ return ::BringWindowToTop(reinterpret_cast<HWND>(GetSourceId())) &&
+ ::SetForegroundWindow(reinterpret_cast<HWND>(GetSourceId()));
+}
+
+HRESULT WgcWindowSource::CreateCaptureItem(
+ ComPtr<WGC::IGraphicsCaptureItem>* result) {
+ if (!ResolveCoreWinRTDelayload())
+ return E_FAIL;
+
+ ComPtr<IGraphicsCaptureItemInterop> interop;
+ HRESULT hr = GetActivationFactory<
+ IGraphicsCaptureItemInterop,
+ RuntimeClass_Windows_Graphics_Capture_GraphicsCaptureItem>(&interop);
+ if (FAILED(hr))
+ return hr;
+
+ ComPtr<WGC::IGraphicsCaptureItem> item;
+ hr = interop->CreateForWindow(reinterpret_cast<HWND>(GetSourceId()),
+ IID_PPV_ARGS(&item));
+ if (FAILED(hr))
+ return hr;
+
+ if (!item)
+ return E_HANDLE;
+
+ *result = std::move(item);
+ return hr;
+}
+
+WgcScreenSource::WgcScreenSource(DesktopCapturer::SourceId source_id)
+ : WgcCaptureSource(source_id) {
+ // Getting the HMONITOR could fail if the source_id is invalid. In that case,
+ // we leave hmonitor_ uninitialized and `IsCapturable()` will fail.
+ HMONITOR hmon;
+ if (GetHmonitorFromDeviceIndex(GetSourceId(), &hmon))
+ hmonitor_ = hmon;
+}
+
+WgcScreenSource::~WgcScreenSource() = default;
+
+DesktopVector WgcScreenSource::GetTopLeft() {
+ if (!hmonitor_)
+ return DesktopVector();
+
+ return GetMonitorRect(*hmonitor_).top_left();
+}
+
+ABI::Windows::Graphics::SizeInt32 WgcScreenSource::GetSize() {
+ ABI::Windows::Graphics::SizeInt32 size = WgcCaptureSource::GetSize();
+ if (!hmonitor_ || (size.Width != 0 && size.Height != 0))
+ return size;
+
+ DesktopRect rect = GetMonitorRect(*hmonitor_);
+ return {rect.width(), rect.height()};
+}
+
+bool WgcScreenSource::IsCapturable() {
+ if (!hmonitor_)
+ return false;
+
+ if (!IsMonitorValid(*hmonitor_))
+ return false;
+
+ return WgcCaptureSource::IsCapturable();
+}
+
+HRESULT WgcScreenSource::CreateCaptureItem(
+ ComPtr<WGC::IGraphicsCaptureItem>* result) {
+ if (!hmonitor_)
+ return E_ABORT;
+
+ if (!ResolveCoreWinRTDelayload())
+ return E_FAIL;
+
+ ComPtr<IGraphicsCaptureItemInterop> interop;
+ HRESULT hr = GetActivationFactory<
+ IGraphicsCaptureItemInterop,
+ RuntimeClass_Windows_Graphics_Capture_GraphicsCaptureItem>(&interop);
+ if (FAILED(hr))
+ return hr;
+
+ // Ensure the monitor is still valid (hasn't disconnected) before trying to
+ // create the item. On versions of Windows before Win11, `CreateForMonitor`
+ // will crash if no displays are connected.
+ if (!IsMonitorValid(hmonitor_.value()))
+ return E_ABORT;
+
+ ComPtr<WGC::IGraphicsCaptureItem> item;
+ hr = interop->CreateForMonitor(*hmonitor_, IID_PPV_ARGS(&item));
+ if (FAILED(hr))
+ return hr;
+
+ if (!item)
+ return E_HANDLE;
+
+ *result = std::move(item);
+ return hr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.h b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.h
new file mode 100644
index 0000000000..d1275b6168
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SOURCE_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SOURCE_H_
+
+#include <windows.graphics.capture.h>
+#include <windows.graphics.h>
+#include <wrl/client.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+// Abstract class to represent the source that WGC-based capturers capture
+// from. Could represent an application window or a screen. Consumers should use
+// the appropriate Wgc*SourceFactory class to create WgcCaptureSource objects
+// of the appropriate type.
+class WgcCaptureSource {
+ public:
+ explicit WgcCaptureSource(DesktopCapturer::SourceId source_id);
+ virtual ~WgcCaptureSource();
+
+ virtual DesktopVector GetTopLeft() = 0;
+ virtual bool IsCapturable();
+ virtual bool FocusOnSource();
+ virtual ABI::Windows::Graphics::SizeInt32 GetSize();
+ HRESULT GetCaptureItem(
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result);
+ DesktopCapturer::SourceId GetSourceId() { return source_id_; }
+
+ protected:
+ virtual HRESULT CreateCaptureItem(
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result) = 0;
+
+ private:
+ Microsoft::WRL::ComPtr<ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>
+ item_;
+ const DesktopCapturer::SourceId source_id_;
+};
+
+class WgcCaptureSourceFactory {
+ public:
+ virtual ~WgcCaptureSourceFactory();
+
+ virtual std::unique_ptr<WgcCaptureSource> CreateCaptureSource(
+ DesktopCapturer::SourceId) = 0;
+};
+
+class WgcWindowSourceFactory final : public WgcCaptureSourceFactory {
+ public:
+ WgcWindowSourceFactory();
+
+ // Disallow copy and assign.
+ WgcWindowSourceFactory(const WgcWindowSourceFactory&) = delete;
+ WgcWindowSourceFactory& operator=(const WgcWindowSourceFactory&) = delete;
+
+ ~WgcWindowSourceFactory() override;
+
+ std::unique_ptr<WgcCaptureSource> CreateCaptureSource(
+ DesktopCapturer::SourceId) override;
+};
+
+class WgcScreenSourceFactory final : public WgcCaptureSourceFactory {
+ public:
+ WgcScreenSourceFactory();
+
+ WgcScreenSourceFactory(const WgcScreenSourceFactory&) = delete;
+ WgcScreenSourceFactory& operator=(const WgcScreenSourceFactory&) = delete;
+
+ ~WgcScreenSourceFactory() override;
+
+ std::unique_ptr<WgcCaptureSource> CreateCaptureSource(
+ DesktopCapturer::SourceId) override;
+};
+
+// Class for capturing application windows.
+class WgcWindowSource final : public WgcCaptureSource {
+ public:
+ explicit WgcWindowSource(DesktopCapturer::SourceId source_id);
+
+ WgcWindowSource(const WgcWindowSource&) = delete;
+ WgcWindowSource& operator=(const WgcWindowSource&) = delete;
+
+ ~WgcWindowSource() override;
+
+ DesktopVector GetTopLeft() override;
+ ABI::Windows::Graphics::SizeInt32 GetSize() override;
+ bool IsCapturable() override;
+ bool FocusOnSource() override;
+
+ private:
+ HRESULT CreateCaptureItem(
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result)
+ override;
+};
+
+// Class for capturing screens/monitors/displays.
+class WgcScreenSource final : public WgcCaptureSource {
+ public:
+ explicit WgcScreenSource(DesktopCapturer::SourceId source_id);
+
+ WgcScreenSource(const WgcScreenSource&) = delete;
+ WgcScreenSource& operator=(const WgcScreenSource&) = delete;
+
+ ~WgcScreenSource() override;
+
+ DesktopVector GetTopLeft() override;
+ ABI::Windows::Graphics::SizeInt32 GetSize() override;
+ bool IsCapturable() override;
+
+ private:
+ HRESULT CreateCaptureItem(
+ Microsoft::WRL::ComPtr<
+ ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result)
+ override;
+
+ // To maintain compatibility with other capturers, this class accepts a
+ // device index as it's SourceId. However, WGC requires we use an HMONITOR to
+ // describe which screen to capture. So, we internally convert the supplied
+ // device index into an HMONITOR when `IsCapturable()` is called.
+ absl::optional<HMONITOR> hmonitor_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SOURCE_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source_unittest.cc
new file mode 100644
index 0000000000..dc37ec2e0d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capture_source_unittest.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/wgc_capture_source.h"
+
+#include <windows.graphics.capture.h>
+#include <wrl/client.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/test_support/test_window.h"
+#include "modules/desktop_capture/win/wgc_capturer_win.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const WCHAR kWindowTitle[] = L"WGC Capture Source Test Window";
+
+const int kFirstXCoord = 25;
+const int kFirstYCoord = 50;
+const int kSecondXCoord = 50;
+const int kSecondYCoord = 75;
+
+} // namespace
+
+class WgcCaptureSourceTest : public ::testing::TestWithParam<CaptureType> {
+ public:
+ void SetUp() override {
+ com_initializer_ =
+ std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
+ ASSERT_TRUE(com_initializer_->Succeeded());
+ }
+
+ void TearDown() override {
+ if (window_open_) {
+ DestroyTestWindow(window_info_);
+ }
+ }
+
+ void SetUpForWindowSource() {
+ window_info_ = CreateTestWindow(kWindowTitle);
+ window_open_ = true;
+ source_id_ = reinterpret_cast<DesktopCapturer::SourceId>(window_info_.hwnd);
+ source_factory_ = std::make_unique<WgcWindowSourceFactory>();
+ }
+
+ void SetUpForScreenSource() {
+ source_id_ = kFullDesktopScreenId;
+ source_factory_ = std::make_unique<WgcScreenSourceFactory>();
+ }
+
+ protected:
+ std::unique_ptr<ScopedCOMInitializer> com_initializer_;
+ std::unique_ptr<WgcCaptureSourceFactory> source_factory_;
+ std::unique_ptr<WgcCaptureSource> source_;
+ DesktopCapturer::SourceId source_id_;
+ WindowInfo window_info_;
+ bool window_open_ = false;
+};
+
+// Window specific test
+TEST_F(WgcCaptureSourceTest, WindowPosition) {
+ if (!IsWgcSupported(CaptureType::kWindow)) {
+ RTC_LOG(LS_INFO)
+ << "Skipping WgcCapturerWinTests on unsupported platforms.";
+ GTEST_SKIP();
+ }
+
+ SetUpForWindowSource();
+ source_ = source_factory_->CreateCaptureSource(source_id_);
+ ASSERT_TRUE(source_);
+ EXPECT_EQ(source_->GetSourceId(), source_id_);
+
+ MoveTestWindow(window_info_.hwnd, kFirstXCoord, kFirstYCoord);
+ DesktopVector source_vector = source_->GetTopLeft();
+ EXPECT_EQ(source_vector.x(), kFirstXCoord);
+ EXPECT_EQ(source_vector.y(), kFirstYCoord);
+
+ MoveTestWindow(window_info_.hwnd, kSecondXCoord, kSecondYCoord);
+ source_vector = source_->GetTopLeft();
+ EXPECT_EQ(source_vector.x(), kSecondXCoord);
+ EXPECT_EQ(source_vector.y(), kSecondYCoord);
+}
+
+// Screen specific test
+TEST_F(WgcCaptureSourceTest, ScreenPosition) {
+ if (!IsWgcSupported(CaptureType::kScreen)) {
+ RTC_LOG(LS_INFO)
+ << "Skipping WgcCapturerWinTests on unsupported platforms.";
+ GTEST_SKIP();
+ }
+
+ SetUpForScreenSource();
+ source_ = source_factory_->CreateCaptureSource(source_id_);
+ ASSERT_TRUE(source_);
+ EXPECT_EQ(source_id_, source_->GetSourceId());
+
+ DesktopRect screen_rect = GetFullscreenRect();
+ DesktopVector source_vector = source_->GetTopLeft();
+ EXPECT_EQ(source_vector.x(), screen_rect.left());
+ EXPECT_EQ(source_vector.y(), screen_rect.top());
+}
+
+// Source agnostic test
+TEST_P(WgcCaptureSourceTest, CreateSource) {
+ if (!IsWgcSupported(GetParam())) {
+ RTC_LOG(LS_INFO)
+ << "Skipping WgcCapturerWinTests on unsupported platforms.";
+ GTEST_SKIP();
+ }
+
+ if (GetParam() == CaptureType::kWindow) {
+ SetUpForWindowSource();
+ } else {
+ SetUpForScreenSource();
+ }
+
+ source_ = source_factory_->CreateCaptureSource(source_id_);
+ ASSERT_TRUE(source_);
+ EXPECT_EQ(source_id_, source_->GetSourceId());
+ EXPECT_TRUE(source_->IsCapturable());
+
+ Microsoft::WRL::ComPtr<ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>
+ item;
+ EXPECT_TRUE(SUCCEEDED(source_->GetCaptureItem(&item)));
+ EXPECT_TRUE(item);
+}
+
+INSTANTIATE_TEST_SUITE_P(SourceAgnostic,
+ WgcCaptureSourceTest,
+ ::testing::Values(CaptureType::kWindow,
+ CaptureType::kScreen));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.cc b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.cc
new file mode 100644
index 0000000000..ce5eb6b31f
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.cc
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/wgc_capturer_win.h"
+
+#include <DispatcherQueue.h>
+#include <windows.foundation.metadata.h>
+#include <windows.graphics.capture.h>
+
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_metrics_helper.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/win/wgc_desktop_frame.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/win/get_activation_factory.h"
+#include "rtc_base/win/hstring.h"
+#include "rtc_base/win/windows_version.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace WGC = ABI::Windows::Graphics::Capture;
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+
+namespace {
+
+constexpr wchar_t kCoreMessagingDll[] = L"CoreMessaging.dll";
+
+constexpr wchar_t kWgcSessionType[] =
+ L"Windows.Graphics.Capture.GraphicsCaptureSession";
+constexpr wchar_t kApiContract[] = L"Windows.Foundation.UniversalApiContract";
+constexpr UINT16 kRequiredApiContractVersion = 8;
+
+enum class WgcCapturerResult {
+ kSuccess = 0,
+ kNoDirect3dDevice = 1,
+ kNoSourceSelected = 2,
+ kItemCreationFailure = 3,
+ kSessionStartFailure = 4,
+ kGetFrameFailure = 5,
+ kFrameDropped = 6,
+ kCreateDispatcherQueueFailure = 7,
+ kMaxValue = kCreateDispatcherQueueFailure
+};
+
+void RecordWgcCapturerResult(WgcCapturerResult error) {
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.DesktopCapture.Win.WgcCapturerResult",
+ static_cast<int>(error),
+ static_cast<int>(WgcCapturerResult::kMaxValue));
+}
+
+} // namespace
+
+bool IsWgcSupported(CaptureType capture_type) {
+ if (!HasActiveDisplay()) {
+ // There is a bug in `CreateForMonitor` that causes a crash if there are no
+ // active displays. The crash was fixed in Win11, but we are still unable
+ // to capture screens without an active display.
+ if (capture_type == CaptureType::kScreen)
+ return false;
+
+ // There is a bug in the DWM (Desktop Window Manager) that prevents it from
+ // providing image data if there are no displays attached. This was fixed in
+ // Windows 11.
+ if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN11)
+ return false;
+ }
+
+ // A bug in the WGC API `CreateForMonitor` prevents capturing the entire
+ // virtual screen (all monitors simultaneously), this was fixed in 20H1. Since
+ // we can't assert that we won't be asked to capture the entire virtual
+ // screen, we report unsupported so we can fallback to another capturer.
+ if (capture_type == CaptureType::kScreen &&
+ rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN10_20H1) {
+ return false;
+ }
+
+ if (!ResolveCoreWinRTDelayload())
+ return false;
+
+ // We need to check if the WGC APIs are presesnt on the system. Certain SKUs
+ // of Windows ship without these APIs.
+ ComPtr<ABI::Windows::Foundation::Metadata::IApiInformationStatics>
+ api_info_statics;
+ HRESULT hr = GetActivationFactory<
+ ABI::Windows::Foundation::Metadata::IApiInformationStatics,
+ RuntimeClass_Windows_Foundation_Metadata_ApiInformation>(
+ &api_info_statics);
+ if (FAILED(hr))
+ return false;
+
+ HSTRING api_contract;
+ hr = webrtc::CreateHstring(kApiContract, wcslen(kApiContract), &api_contract);
+ if (FAILED(hr))
+ return false;
+
+ boolean is_api_present;
+ hr = api_info_statics->IsApiContractPresentByMajor(
+ api_contract, kRequiredApiContractVersion, &is_api_present);
+ webrtc::DeleteHstring(api_contract);
+ if (FAILED(hr) || !is_api_present)
+ return false;
+
+ HSTRING wgc_session_type;
+ hr = webrtc::CreateHstring(kWgcSessionType, wcslen(kWgcSessionType),
+ &wgc_session_type);
+ if (FAILED(hr))
+ return false;
+
+ boolean is_type_present;
+ hr = api_info_statics->IsTypePresent(wgc_session_type, &is_type_present);
+ webrtc::DeleteHstring(wgc_session_type);
+ if (FAILED(hr) || !is_type_present)
+ return false;
+
+ // If the APIs are present, we need to check that they are supported.
+ ComPtr<WGC::IGraphicsCaptureSessionStatics> capture_session_statics;
+ hr = GetActivationFactory<
+ WGC::IGraphicsCaptureSessionStatics,
+ RuntimeClass_Windows_Graphics_Capture_GraphicsCaptureSession>(
+ &capture_session_statics);
+ if (FAILED(hr))
+ return false;
+
+ boolean is_supported;
+ hr = capture_session_statics->IsSupported(&is_supported);
+ if (FAILED(hr) || !is_supported)
+ return false;
+
+ return true;
+}
+
+WgcCapturerWin::WgcCapturerWin(
+ std::unique_ptr<WgcCaptureSourceFactory> source_factory,
+ std::unique_ptr<SourceEnumerator> source_enumerator,
+ bool allow_delayed_capturable_check)
+ : source_factory_(std::move(source_factory)),
+ source_enumerator_(std::move(source_enumerator)),
+ allow_delayed_capturable_check_(allow_delayed_capturable_check) {
+ if (!core_messaging_library_)
+ core_messaging_library_ = LoadLibraryW(kCoreMessagingDll);
+
+ if (core_messaging_library_) {
+ create_dispatcher_queue_controller_func_ =
+ reinterpret_cast<CreateDispatcherQueueControllerFunc>(GetProcAddress(
+ core_messaging_library_, "CreateDispatcherQueueController"));
+ }
+}
+
+WgcCapturerWin::~WgcCapturerWin() {
+ if (core_messaging_library_)
+ FreeLibrary(core_messaging_library_);
+}
+
+// static
+std::unique_ptr<DesktopCapturer> WgcCapturerWin::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options,
+ bool allow_delayed_capturable_check) {
+ return std::make_unique<WgcCapturerWin>(
+ std::make_unique<WgcWindowSourceFactory>(),
+ std::make_unique<WindowEnumerator>(
+ options.enumerate_current_process_windows()),
+ allow_delayed_capturable_check);
+}
+
+// static
+std::unique_ptr<DesktopCapturer> WgcCapturerWin::CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options) {
+ return std::make_unique<WgcCapturerWin>(
+ std::make_unique<WgcScreenSourceFactory>(),
+ std::make_unique<ScreenEnumerator>(), false);
+}
+
+bool WgcCapturerWin::GetSourceList(SourceList* sources) {
+ return source_enumerator_->FindAllSources(sources);
+}
+
+bool WgcCapturerWin::SelectSource(DesktopCapturer::SourceId id) {
+ capture_source_ = source_factory_->CreateCaptureSource(id);
+ if (allow_delayed_capturable_check_)
+ return true;
+
+ return capture_source_->IsCapturable();
+}
+
+bool WgcCapturerWin::FocusOnSelectedSource() {
+ if (!capture_source_)
+ return false;
+
+ return capture_source_->FocusOnSource();
+}
+
+void WgcCapturerWin::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ RecordCapturerImpl(DesktopCapturerId::kWgcCapturerWin);
+
+ callback_ = callback;
+
+ // Create a Direct3D11 device to share amongst the WgcCaptureSessions. Many
+ // parameters are nullptr as the implemention uses defaults that work well for
+ // us.
+ HRESULT hr = D3D11CreateDevice(
+ /*adapter=*/nullptr, D3D_DRIVER_TYPE_HARDWARE,
+ /*software_rasterizer=*/nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT,
+ /*feature_levels=*/nullptr, /*feature_levels_size=*/0, D3D11_SDK_VERSION,
+ &d3d11_device_, /*feature_level=*/nullptr, /*device_context=*/nullptr);
+ if (hr == DXGI_ERROR_UNSUPPORTED) {
+ // If a hardware device could not be created, use WARP which is a high speed
+ // software device.
+ hr = D3D11CreateDevice(
+ /*adapter=*/nullptr, D3D_DRIVER_TYPE_WARP,
+ /*software_rasterizer=*/nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT,
+ /*feature_levels=*/nullptr, /*feature_levels_size=*/0,
+ D3D11_SDK_VERSION, &d3d11_device_, /*feature_level=*/nullptr,
+ /*device_context=*/nullptr);
+ }
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "Failed to create D3D11Device: " << hr;
+ }
+}
+
+void WgcCapturerWin::CaptureFrame() {
+ RTC_DCHECK(callback_);
+
+ if (!capture_source_) {
+ RTC_LOG(LS_ERROR) << "Source hasn't been selected";
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ RecordWgcCapturerResult(WgcCapturerResult::kNoSourceSelected);
+ return;
+ }
+
+ if (!d3d11_device_) {
+ RTC_LOG(LS_ERROR) << "No D3D11D3evice, cannot capture.";
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ RecordWgcCapturerResult(WgcCapturerResult::kNoDirect3dDevice);
+ return;
+ }
+
+ if (allow_delayed_capturable_check_ && !capture_source_->IsCapturable()) {
+ RTC_LOG(LS_ERROR) << "Source is not capturable.";
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ return;
+ }
+
+ HRESULT hr;
+ if (!dispatcher_queue_created_) {
+ // Set the apartment type to NONE because this thread should already be COM
+ // initialized.
+ DispatcherQueueOptions options{
+ sizeof(DispatcherQueueOptions),
+ DISPATCHERQUEUE_THREAD_TYPE::DQTYPE_THREAD_CURRENT,
+ DISPATCHERQUEUE_THREAD_APARTMENTTYPE::DQTAT_COM_NONE};
+ ComPtr<ABI::Windows::System::IDispatcherQueueController> queue_controller;
+ hr = create_dispatcher_queue_controller_func_(options, &queue_controller);
+
+ // If there is already a DispatcherQueue on this thread, that is fine. Its
+ // lifetime is tied to the thread's, and as long as the thread has one, even
+ // if we didn't create it, the capture session's events will be delivered on
+ // this thread.
+ if (FAILED(hr) && hr != RPC_E_WRONG_THREAD) {
+ RecordWgcCapturerResult(WgcCapturerResult::kCreateDispatcherQueueFailure);
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ } else {
+ dispatcher_queue_created_ = true;
+ }
+ }
+
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ WgcCaptureSession* capture_session = nullptr;
+ std::map<SourceId, WgcCaptureSession>::iterator session_iter =
+ ongoing_captures_.find(capture_source_->GetSourceId());
+ if (session_iter == ongoing_captures_.end()) {
+ ComPtr<WGC::IGraphicsCaptureItem> item;
+ hr = capture_source_->GetCaptureItem(&item);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "Failed to create a GraphicsCaptureItem: " << hr;
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ RecordWgcCapturerResult(WgcCapturerResult::kItemCreationFailure);
+ return;
+ }
+
+ std::pair<std::map<SourceId, WgcCaptureSession>::iterator, bool>
+ iter_success_pair = ongoing_captures_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(capture_source_->GetSourceId()),
+ std::forward_as_tuple(d3d11_device_, item,
+ capture_source_->GetSize()));
+ RTC_DCHECK(iter_success_pair.second);
+ capture_session = &iter_success_pair.first->second;
+ } else {
+ capture_session = &session_iter->second;
+ }
+
+ if (!capture_session->IsCaptureStarted()) {
+ hr = capture_session->StartCapture();
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "Failed to start capture: " << hr;
+ ongoing_captures_.erase(capture_source_->GetSourceId());
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ RecordWgcCapturerResult(WgcCapturerResult::kSessionStartFailure);
+ return;
+ }
+ }
+
+ std::unique_ptr<DesktopFrame> frame;
+ hr = capture_session->GetFrame(&frame);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "GetFrame failed: " << hr;
+ ongoing_captures_.erase(capture_source_->GetSourceId());
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT,
+ /*frame=*/nullptr);
+ RecordWgcCapturerResult(WgcCapturerResult::kGetFrameFailure);
+ return;
+ }
+
+ if (!frame) {
+ callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_TEMPORARY,
+ /*frame=*/nullptr);
+ RecordWgcCapturerResult(WgcCapturerResult::kFrameDropped);
+ return;
+ }
+
+ int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec;
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.DesktopCapture.Win.WgcCapturerFrameTime",
+ capture_time_ms);
+ frame->set_capture_time_ms(capture_time_ms);
+ frame->set_capturer_id(DesktopCapturerId::kWgcCapturerWin);
+ frame->set_may_contain_cursor(true);
+ frame->set_top_left(capture_source_->GetTopLeft());
+ RecordWgcCapturerResult(WgcCapturerResult::kSuccess);
+ callback_->OnCaptureResult(DesktopCapturer::Result::SUCCESS,
+ std::move(frame));
+}
+
+bool WgcCapturerWin::IsSourceBeingCaptured(DesktopCapturer::SourceId id) {
+ std::map<DesktopCapturer::SourceId, WgcCaptureSession>::iterator
+ session_iter = ongoing_captures_.find(id);
+ if (session_iter == ongoing_captures_.end())
+ return false;
+
+ return session_iter->second.IsCaptureStarted();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.h b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.h
new file mode 100644
index 0000000000..d9ee9d3fc6
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURER_WIN_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURER_WIN_H_
+
+#include <DispatcherQueue.h>
+#include <d3d11.h>
+#include <wrl/client.h>
+
+#include <map>
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/wgc_capture_session.h"
+#include "modules/desktop_capture/win/wgc_capture_source.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+
+namespace webrtc {
+
+// Checks if the WGC API is present and supported on the system.
+bool IsWgcSupported(CaptureType capture_type);
+
+// WgcCapturerWin is initialized with an implementation of this base class,
+// which it uses to find capturable sources of a particular type. This way,
+// WgcCapturerWin can remain source-agnostic.
+class SourceEnumerator {
+ public:
+ virtual ~SourceEnumerator() = default;
+
+ virtual bool FindAllSources(DesktopCapturer::SourceList* sources) = 0;
+};
+
+class WindowEnumerator final : public SourceEnumerator {
+ public:
+ explicit WindowEnumerator(bool enumerate_current_process_windows)
+ : enumerate_current_process_windows_(enumerate_current_process_windows) {}
+
+ WindowEnumerator(const WindowEnumerator&) = delete;
+ WindowEnumerator& operator=(const WindowEnumerator&) = delete;
+
+ ~WindowEnumerator() override = default;
+
+ bool FindAllSources(DesktopCapturer::SourceList* sources) override {
+ // WGC fails to capture windows with the WS_EX_TOOLWINDOW style, so we
+ // provide it as a filter to ensure windows with the style are not returned.
+ return window_capture_helper_.EnumerateCapturableWindows(
+ sources, enumerate_current_process_windows_, WS_EX_TOOLWINDOW);
+ }
+
+ private:
+ WindowCaptureHelperWin window_capture_helper_;
+ bool enumerate_current_process_windows_;
+};
+
+class ScreenEnumerator final : public SourceEnumerator {
+ public:
+ ScreenEnumerator() = default;
+
+ ScreenEnumerator(const ScreenEnumerator&) = delete;
+ ScreenEnumerator& operator=(const ScreenEnumerator&) = delete;
+
+ ~ScreenEnumerator() override = default;
+
+ bool FindAllSources(DesktopCapturer::SourceList* sources) override {
+ return webrtc::GetScreenList(sources);
+ }
+};
+
+// A capturer that uses the Window.Graphics.Capture APIs. It is suitable for
+// both window and screen capture (but only one type per instance). Consumers
+// should not instantiate this class directly, instead they should use
+// `CreateRawWindowCapturer()` or `CreateRawScreenCapturer()` to receive a
+// capturer appropriate for the type of source they want to capture.
+class WgcCapturerWin : public DesktopCapturer {
+ public:
+ WgcCapturerWin(std::unique_ptr<WgcCaptureSourceFactory> source_factory,
+ std::unique_ptr<SourceEnumerator> source_enumerator,
+ bool allow_delayed_capturable_check);
+
+ WgcCapturerWin(const WgcCapturerWin&) = delete;
+ WgcCapturerWin& operator=(const WgcCapturerWin&) = delete;
+
+ ~WgcCapturerWin() override;
+
+ static std::unique_ptr<DesktopCapturer> CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options,
+ bool allow_delayed_capturable_check = false);
+
+ static std::unique_ptr<DesktopCapturer> CreateRawScreenCapturer(
+ const DesktopCaptureOptions& options);
+
+ // DesktopCapturer interface.
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+
+ // Used in WgcCapturerTests.
+ bool IsSourceBeingCaptured(SourceId id);
+
+ private:
+ typedef HRESULT(WINAPI* CreateDispatcherQueueControllerFunc)(
+ DispatcherQueueOptions,
+ ABI::Windows::System::IDispatcherQueueController**);
+
+ // We need to either create or ensure that someone else created a
+ // `DispatcherQueue` on the current thread so that events will be delivered
+ // on the current thread rather than an arbitrary thread. A
+ // `DispatcherQueue`'s lifetime is tied to the thread's, and we don't post
+ // any work to it, so we don't need to hold a reference.
+ bool dispatcher_queue_created_ = false;
+
+ // Statically linking to CoreMessaging.lib is disallowed in Chromium, so we
+ // load it at runtime.
+ HMODULE core_messaging_library_ = NULL;
+ CreateDispatcherQueueControllerFunc create_dispatcher_queue_controller_func_ =
+ nullptr;
+
+ // Factory to create a WgcCaptureSource for us whenever SelectSource is
+ // called. Initialized at construction with a source-specific implementation.
+ std::unique_ptr<WgcCaptureSourceFactory> source_factory_;
+
+ // The source enumerator helps us find capturable sources of the appropriate
+ // type. Initialized at construction with a source-specific implementation.
+ std::unique_ptr<SourceEnumerator> source_enumerator_;
+
+ // The WgcCaptureSource represents the source we are capturing. It tells us
+ // if the source is capturable and it creates the GraphicsCaptureItem for us.
+ std::unique_ptr<WgcCaptureSource> capture_source_;
+
+ // A map of all the sources we are capturing and the associated
+ // WgcCaptureSession. Frames for the current source (indicated via
+ // SelectSource) will be retrieved from the appropriate session when
+ // requested via CaptureFrame.
+ // This helps us efficiently capture multiple sources (e.g. when consumers
+ // are trying to display a list of available capture targets with thumbnails).
+ std::map<SourceId, WgcCaptureSession> ongoing_captures_;
+
+ // The callback that we deliver frames to, synchronously, before CaptureFrame
+ // returns.
+ Callback* callback_ = nullptr;
+
+ // WgcCaptureSource::IsCapturable is expensive to run. So, caller can
+ // delay capturable check till capture frame is called if the WgcCapturerWin
+ // is used as a fallback capturer.
+ bool allow_delayed_capturable_check_ = false;
+
+ // A Direct3D11 device that is shared amongst the WgcCaptureSessions, who
+ // require one to perform the capture.
+ Microsoft::WRL::ComPtr<::ID3D11Device> d3d11_device_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURER_WIN_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win_unittest.cc
new file mode 100644
index 0000000000..a7b656fcfc
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_capturer_win_unittest.cc
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/wgc_capturer_win.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/win/test_support/test_window.h"
+#include "modules/desktop_capture/win/wgc_capture_session.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "rtc_base/win/windows_version.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr char kWindowThreadName[] = "wgc_capturer_test_window_thread";
+constexpr WCHAR kWindowTitle[] = L"WGC Capturer Test Window";
+
+constexpr char kCapturerImplHistogram[] =
+ "WebRTC.DesktopCapture.Win.DesktopCapturerImpl";
+
+constexpr char kCapturerResultHistogram[] =
+ "WebRTC.DesktopCapture.Win.WgcCapturerResult";
+constexpr int kSuccess = 0;
+constexpr int kSessionStartFailure = 4;
+
+constexpr char kCaptureSessionResultHistogram[] =
+ "WebRTC.DesktopCapture.Win.WgcCaptureSessionStartResult";
+constexpr int kSourceClosed = 1;
+
+constexpr char kCaptureTimeHistogram[] =
+ "WebRTC.DesktopCapture.Win.WgcCapturerFrameTime";
+
+// The capturer keeps `kNumBuffers` in its frame pool, so we need to request
+// that many frames to clear those out. The next frame will have the new size
+// (if the size has changed) so we will resize the frame pool at this point.
+// Then, we need to clear any frames that may have delivered to the frame pool
+// before the resize. Finally, the next frame will be guaranteed to be the new
+// size.
+constexpr int kNumCapturesToFlushBuffers =
+ WgcCaptureSession::kNumBuffers * 2 + 1;
+
+constexpr int kSmallWindowWidth = 200;
+constexpr int kSmallWindowHeight = 100;
+constexpr int kMediumWindowWidth = 300;
+constexpr int kMediumWindowHeight = 200;
+constexpr int kLargeWindowWidth = 400;
+constexpr int kLargeWindowHeight = 500;
+
+// The size of the image we capture is slightly smaller than the actual size of
+// the window.
+constexpr int kWindowWidthSubtrahend = 14;
+constexpr int kWindowHeightSubtrahend = 7;
+
+// Custom message constants so we can direct our thread to close windows and
+// quit running.
+constexpr UINT kDestroyWindow = WM_APP;
+constexpr UINT kQuitRunning = WM_APP + 1;
+
+// When testing changes to real windows, sometimes the effects (close or resize)
+// don't happen immediately, we want to keep trying until we see the effect but
+// only for a reasonable amount of time.
+constexpr int kMaxTries = 50;
+
+} // namespace
+
+class WgcCapturerWinTest : public ::testing::TestWithParam<CaptureType>,
+ public DesktopCapturer::Callback {
+ public:
+ void SetUp() override {
+ com_initializer_ =
+ std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
+ EXPECT_TRUE(com_initializer_->Succeeded());
+
+ if (!IsWgcSupported(GetParam())) {
+ RTC_LOG(LS_INFO)
+ << "Skipping WgcCapturerWinTests on unsupported platforms.";
+ GTEST_SKIP();
+ }
+ }
+
+ void SetUpForWindowCapture(int window_width = kMediumWindowWidth,
+ int window_height = kMediumWindowHeight) {
+ capturer_ = WgcCapturerWin::CreateRawWindowCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ CreateWindowOnSeparateThread(window_width, window_height);
+ StartWindowThreadMessageLoop();
+ source_id_ = GetTestWindowIdFromSourceList();
+ }
+
+ void SetUpForScreenCapture() {
+ capturer_ = WgcCapturerWin::CreateRawScreenCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ source_id_ = GetScreenIdFromSourceList();
+ }
+
+ void TearDown() override {
+ if (window_open_) {
+ CloseTestWindow();
+ }
+ }
+
+ // The window must live on a separate thread so that we can run a message pump
+ // without blocking the test thread. This is necessary if we are interested in
+ // having GraphicsCaptureItem events (i.e. the Closed event) fire, and it more
+ // closely resembles how capture works in the wild.
+ void CreateWindowOnSeparateThread(int window_width, int window_height) {
+ window_thread_ = rtc::Thread::Create();
+ window_thread_->SetName(kWindowThreadName, nullptr);
+ window_thread_->Start();
+ SendTask(window_thread_.get(), [this, window_width, window_height]() {
+ window_thread_id_ = GetCurrentThreadId();
+ window_info_ =
+ CreateTestWindow(kWindowTitle, window_height, window_width);
+ window_open_ = true;
+
+ while (!IsWindowResponding(window_info_.hwnd)) {
+ RTC_LOG(LS_INFO) << "Waiting for test window to become responsive in "
+ "WgcWindowCaptureTest.";
+ }
+
+ while (!IsWindowValidAndVisible(window_info_.hwnd)) {
+ RTC_LOG(LS_INFO) << "Waiting for test window to be visible in "
+ "WgcWindowCaptureTest.";
+ }
+ });
+
+ ASSERT_TRUE(window_thread_->RunningForTest());
+ ASSERT_FALSE(window_thread_->IsCurrent());
+ }
+
+ void StartWindowThreadMessageLoop() {
+ window_thread_->PostTask([this]() {
+ MSG msg;
+ BOOL gm;
+ while ((gm = ::GetMessage(&msg, NULL, 0, 0)) != 0 && gm != -1) {
+ ::DispatchMessage(&msg);
+ if (msg.message == kDestroyWindow) {
+ DestroyTestWindow(window_info_);
+ }
+ if (msg.message == kQuitRunning) {
+ PostQuitMessage(0);
+ }
+ }
+ });
+ }
+
+ void CloseTestWindow() {
+ ::PostThreadMessage(window_thread_id_, kDestroyWindow, 0, 0);
+ ::PostThreadMessage(window_thread_id_, kQuitRunning, 0, 0);
+ window_thread_->Stop();
+ window_open_ = false;
+ }
+
+ DesktopCapturer::SourceId GetTestWindowIdFromSourceList() {
+ // Frequently, the test window will not show up in GetSourceList because it
+ // was created too recently. Since we are confident the window will be found
+ // eventually we loop here until we find it.
+ intptr_t src_id = 0;
+ do {
+ DesktopCapturer::SourceList sources;
+ EXPECT_TRUE(capturer_->GetSourceList(&sources));
+ auto it = std::find_if(
+ sources.begin(), sources.end(),
+ [&](const DesktopCapturer::Source& src) {
+ return src.id == reinterpret_cast<intptr_t>(window_info_.hwnd);
+ });
+
+ if (it != sources.end())
+ src_id = it->id;
+ } while (src_id != reinterpret_cast<intptr_t>(window_info_.hwnd));
+
+ return src_id;
+ }
+
+ DesktopCapturer::SourceId GetScreenIdFromSourceList() {
+ DesktopCapturer::SourceList sources;
+ EXPECT_TRUE(capturer_->GetSourceList(&sources));
+ EXPECT_GT(sources.size(), 0ULL);
+ return sources[0].id;
+ }
+
+ void DoCapture(int num_captures = 1) {
+ // Capture the requested number of frames. We expect the first capture to
+ // always succeed. If we're asked for multiple frames, we do expect to see a
+ // a couple dropped frames due to resizing the window.
+ const int max_tries = num_captures == 1 ? 1 : kMaxTries;
+ int success_count = 0;
+ for (int i = 0; success_count < num_captures && i < max_tries; i++) {
+ capturer_->CaptureFrame();
+ if (result_ == DesktopCapturer::Result::ERROR_PERMANENT)
+ break;
+ if (result_ == DesktopCapturer::Result::SUCCESS)
+ success_count++;
+ }
+
+ total_successful_captures_ += success_count;
+ EXPECT_EQ(success_count, num_captures);
+ EXPECT_EQ(result_, DesktopCapturer::Result::SUCCESS);
+ EXPECT_TRUE(frame_);
+ EXPECT_GE(metrics::NumEvents(kCapturerResultHistogram, kSuccess),
+ total_successful_captures_);
+ }
+
+ void ValidateFrame(int expected_width, int expected_height) {
+ EXPECT_EQ(frame_->size().width(), expected_width - kWindowWidthSubtrahend);
+ EXPECT_EQ(frame_->size().height(),
+ expected_height - kWindowHeightSubtrahend);
+
+ // Verify the buffer contains as much data as it should.
+ int data_length = frame_->stride() * frame_->size().height();
+
+ // The first and last pixel should have the same color because they will be
+ // from the border of the window.
+ // Pixels have 4 bytes of data so the whole pixel needs a uint32_t to fit.
+ uint32_t first_pixel = static_cast<uint32_t>(*frame_->data());
+ uint32_t last_pixel = static_cast<uint32_t>(
+ *(frame_->data() + data_length - DesktopFrame::kBytesPerPixel));
+ EXPECT_EQ(first_pixel, last_pixel);
+
+ // Let's also check a pixel from the middle of the content area, which the
+ // test window will paint a consistent color for us to verify.
+ uint8_t* middle_pixel = frame_->data() + (data_length / 2);
+
+ int sub_pixel_offset = DesktopFrame::kBytesPerPixel / 4;
+ EXPECT_EQ(*middle_pixel, kTestWindowBValue);
+ middle_pixel += sub_pixel_offset;
+ EXPECT_EQ(*middle_pixel, kTestWindowGValue);
+ middle_pixel += sub_pixel_offset;
+ EXPECT_EQ(*middle_pixel, kTestWindowRValue);
+ middle_pixel += sub_pixel_offset;
+
+ // The window is opaque so we expect 0xFF for the Alpha channel.
+ EXPECT_EQ(*middle_pixel, 0xFF);
+ }
+
+ // DesktopCapturer::Callback interface
+ // The capturer synchronously invokes this method before `CaptureFrame()`
+ // returns.
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override {
+ result_ = result;
+ frame_ = std::move(frame);
+ }
+
+ protected:
+ std::unique_ptr<ScopedCOMInitializer> com_initializer_;
+ DWORD window_thread_id_;
+ std::unique_ptr<rtc::Thread> window_thread_;
+ WindowInfo window_info_;
+ intptr_t source_id_;
+ bool window_open_ = false;
+ DesktopCapturer::Result result_;
+ int total_successful_captures_ = 0;
+ std::unique_ptr<DesktopFrame> frame_;
+ std::unique_ptr<DesktopCapturer> capturer_;
+};
+
+TEST_P(WgcCapturerWinTest, SelectValidSource) {
+ if (GetParam() == CaptureType::kWindow) {
+ SetUpForWindowCapture();
+ } else {
+ SetUpForScreenCapture();
+ }
+
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+}
+
+TEST_P(WgcCapturerWinTest, SelectInvalidSource) {
+ if (GetParam() == CaptureType::kWindow) {
+ capturer_ = WgcCapturerWin::CreateRawWindowCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ source_id_ = kNullWindowId;
+ } else {
+ capturer_ = WgcCapturerWin::CreateRawScreenCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ source_id_ = kInvalidScreenId;
+ }
+
+ EXPECT_FALSE(capturer_->SelectSource(source_id_));
+}
+
+TEST_P(WgcCapturerWinTest, Capture) {
+ if (GetParam() == CaptureType::kWindow) {
+ SetUpForWindowCapture();
+ } else {
+ SetUpForScreenCapture();
+ }
+
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+
+ capturer_->Start(this);
+ EXPECT_GE(metrics::NumEvents(kCapturerImplHistogram,
+ DesktopCapturerId::kWgcCapturerWin),
+ 1);
+
+ DoCapture();
+ EXPECT_GT(frame_->size().width(), 0);
+ EXPECT_GT(frame_->size().height(), 0);
+}
+
+TEST_P(WgcCapturerWinTest, CaptureTime) {
+ if (GetParam() == CaptureType::kWindow) {
+ SetUpForWindowCapture();
+ } else {
+ SetUpForScreenCapture();
+ }
+
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+ capturer_->Start(this);
+
+ int64_t start_time;
+ start_time = rtc::TimeNanos();
+ capturer_->CaptureFrame();
+
+ int capture_time_ms =
+ (rtc::TimeNanos() - start_time) / rtc::kNumNanosecsPerMillisec;
+ EXPECT_EQ(result_, DesktopCapturer::Result::SUCCESS);
+ EXPECT_TRUE(frame_);
+
+ // The test may measure the time slightly differently than the capturer. So we
+ // just check if it's within 5 ms.
+ EXPECT_NEAR(frame_->capture_time_ms(), capture_time_ms, 5);
+ EXPECT_GE(
+ metrics::NumEvents(kCaptureTimeHistogram, frame_->capture_time_ms()), 1);
+}
+
+INSTANTIATE_TEST_SUITE_P(SourceAgnostic,
+ WgcCapturerWinTest,
+ ::testing::Values(CaptureType::kWindow,
+ CaptureType::kScreen));
+
+TEST(WgcCapturerNoMonitorTest, NoMonitors) {
+ ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+ EXPECT_TRUE(com_initializer.Succeeded());
+ if (HasActiveDisplay()) {
+ RTC_LOG(LS_INFO) << "Skip WgcCapturerWinTest designed specifically for "
+ "systems with no monitors";
+ GTEST_SKIP();
+ }
+
+ // A bug in `CreateForMonitor` prevents screen capture when no displays are
+ // attached.
+ EXPECT_FALSE(IsWgcSupported(CaptureType::kScreen));
+
+ // A bug in the DWM (Desktop Window Manager) prevents it from providing image
+ // data if there are no displays attached. This was fixed in Windows 11.
+ if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN11)
+ EXPECT_FALSE(IsWgcSupported(CaptureType::kWindow));
+ else
+ EXPECT_TRUE(IsWgcSupported(CaptureType::kWindow));
+}
+
+class WgcCapturerMonitorTest : public WgcCapturerWinTest {
+ public:
+ void SetUp() {
+ com_initializer_ =
+ std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
+ EXPECT_TRUE(com_initializer_->Succeeded());
+
+ if (!IsWgcSupported(CaptureType::kScreen)) {
+ RTC_LOG(LS_INFO)
+ << "Skipping WgcCapturerWinTests on unsupported platforms.";
+ GTEST_SKIP();
+ }
+ }
+};
+
+TEST_F(WgcCapturerMonitorTest, FocusOnMonitor) {
+ SetUpForScreenCapture();
+ EXPECT_TRUE(capturer_->SelectSource(0));
+
+ // You can't set focus on a monitor.
+ EXPECT_FALSE(capturer_->FocusOnSelectedSource());
+}
+
+TEST_F(WgcCapturerMonitorTest, CaptureAllMonitors) {
+ SetUpForScreenCapture();
+ EXPECT_TRUE(capturer_->SelectSource(kFullDesktopScreenId));
+
+ capturer_->Start(this);
+ DoCapture();
+ EXPECT_GT(frame_->size().width(), 0);
+ EXPECT_GT(frame_->size().height(), 0);
+}
+
+class WgcCapturerWindowTest : public WgcCapturerWinTest {
+ public:
+ void SetUp() {
+ com_initializer_ =
+ std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
+ EXPECT_TRUE(com_initializer_->Succeeded());
+
+ if (!IsWgcSupported(CaptureType::kWindow)) {
+ RTC_LOG(LS_INFO)
+ << "Skipping WgcCapturerWinTests on unsupported platforms.";
+ GTEST_SKIP();
+ }
+ }
+};
+
+TEST_F(WgcCapturerWindowTest, FocusOnWindow) {
+ capturer_ = WgcCapturerWin::CreateRawWindowCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ window_info_ = CreateTestWindow(kWindowTitle);
+ source_id_ = GetScreenIdFromSourceList();
+
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+ EXPECT_TRUE(capturer_->FocusOnSelectedSource());
+
+ HWND hwnd = reinterpret_cast<HWND>(source_id_);
+ EXPECT_EQ(hwnd, ::GetActiveWindow());
+ EXPECT_EQ(hwnd, ::GetForegroundWindow());
+ EXPECT_EQ(hwnd, ::GetFocus());
+ DestroyTestWindow(window_info_);
+}
+
+TEST_F(WgcCapturerWindowTest, SelectMinimizedWindow) {
+ SetUpForWindowCapture();
+ MinimizeTestWindow(reinterpret_cast<HWND>(source_id_));
+ EXPECT_FALSE(capturer_->SelectSource(source_id_));
+
+ UnminimizeTestWindow(reinterpret_cast<HWND>(source_id_));
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+}
+
+TEST_F(WgcCapturerWindowTest, SelectClosedWindow) {
+ SetUpForWindowCapture();
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+
+ CloseTestWindow();
+ EXPECT_FALSE(capturer_->SelectSource(source_id_));
+}
+
+TEST_F(WgcCapturerWindowTest, UnsupportedWindowStyle) {
+ // Create a window with the WS_EX_TOOLWINDOW style, which WGC does not
+ // support.
+ window_info_ = CreateTestWindow(kWindowTitle, kMediumWindowWidth,
+ kMediumWindowHeight, WS_EX_TOOLWINDOW);
+ capturer_ = WgcCapturerWin::CreateRawWindowCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ DesktopCapturer::SourceList sources;
+ EXPECT_TRUE(capturer_->GetSourceList(&sources));
+ auto it = std::find_if(
+ sources.begin(), sources.end(), [&](const DesktopCapturer::Source& src) {
+ return src.id == reinterpret_cast<intptr_t>(window_info_.hwnd);
+ });
+
+ // We should not find the window, since we filter for unsupported styles.
+ EXPECT_EQ(it, sources.end());
+ DestroyTestWindow(window_info_);
+}
+
+TEST_F(WgcCapturerWindowTest, IncreaseWindowSizeMidCapture) {
+ SetUpForWindowCapture(kSmallWindowWidth, kSmallWindowHeight);
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+
+ capturer_->Start(this);
+ DoCapture();
+ ValidateFrame(kSmallWindowWidth, kSmallWindowHeight);
+
+ ResizeTestWindow(window_info_.hwnd, kSmallWindowWidth, kMediumWindowHeight);
+ DoCapture(kNumCapturesToFlushBuffers);
+ ValidateFrame(kSmallWindowWidth, kMediumWindowHeight);
+
+ ResizeTestWindow(window_info_.hwnd, kLargeWindowWidth, kMediumWindowHeight);
+ DoCapture(kNumCapturesToFlushBuffers);
+ ValidateFrame(kLargeWindowWidth, kMediumWindowHeight);
+}
+
+TEST_F(WgcCapturerWindowTest, ReduceWindowSizeMidCapture) {
+ SetUpForWindowCapture(kLargeWindowWidth, kLargeWindowHeight);
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+
+ capturer_->Start(this);
+ DoCapture();
+ ValidateFrame(kLargeWindowWidth, kLargeWindowHeight);
+
+ ResizeTestWindow(window_info_.hwnd, kLargeWindowWidth, kMediumWindowHeight);
+ DoCapture(kNumCapturesToFlushBuffers);
+ ValidateFrame(kLargeWindowWidth, kMediumWindowHeight);
+
+ ResizeTestWindow(window_info_.hwnd, kSmallWindowWidth, kMediumWindowHeight);
+ DoCapture(kNumCapturesToFlushBuffers);
+ ValidateFrame(kSmallWindowWidth, kMediumWindowHeight);
+}
+
+TEST_F(WgcCapturerWindowTest, MinimizeWindowMidCapture) {
+ SetUpForWindowCapture();
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+
+ capturer_->Start(this);
+
+ // Minmize the window and capture should continue but return temporary errors.
+ MinimizeTestWindow(window_info_.hwnd);
+ for (int i = 0; i < 5; ++i) {
+ capturer_->CaptureFrame();
+ EXPECT_EQ(result_, DesktopCapturer::Result::ERROR_TEMPORARY);
+ }
+
+ // Reopen the window and the capture should continue normally.
+ UnminimizeTestWindow(window_info_.hwnd);
+ DoCapture();
+ // We can't verify the window size here because the test window does not
+ // repaint itself after it is unminimized, but capturing successfully is still
+ // a good test.
+}
+
+TEST_F(WgcCapturerWindowTest, CloseWindowMidCapture) {
+ SetUpForWindowCapture();
+ EXPECT_TRUE(capturer_->SelectSource(source_id_));
+
+ capturer_->Start(this);
+ DoCapture();
+ ValidateFrame(kMediumWindowWidth, kMediumWindowHeight);
+
+ CloseTestWindow();
+
+ // We need to pump our message queue so the Closed event will be delivered to
+ // the capturer's event handler. If we are too early and the Closed event
+ // hasn't arrived yet we should keep trying until the capturer receives it and
+ // stops.
+ auto* wgc_capturer = static_cast<WgcCapturerWin*>(capturer_.get());
+ MSG msg;
+ for (int i = 0;
+ wgc_capturer->IsSourceBeingCaptured(source_id_) && i < kMaxTries; ++i) {
+ // Unlike GetMessage, PeekMessage will not hang if there are no messages in
+ // the queue.
+ PeekMessage(&msg, 0, 0, 0, PM_REMOVE);
+ SleepMs(1);
+ }
+
+ EXPECT_FALSE(wgc_capturer->IsSourceBeingCaptured(source_id_));
+
+ // The frame pool can buffer `kNumBuffers` frames. We must consume these
+ // and then make one more call to CaptureFrame before we expect to see the
+ // failure.
+ int num_tries = 0;
+ do {
+ capturer_->CaptureFrame();
+ } while (result_ == DesktopCapturer::Result::SUCCESS &&
+ ++num_tries <= WgcCaptureSession::kNumBuffers);
+
+ EXPECT_GE(metrics::NumEvents(kCapturerResultHistogram, kSessionStartFailure),
+ 1);
+ EXPECT_GE(metrics::NumEvents(kCaptureSessionResultHistogram, kSourceClosed),
+ 1);
+ EXPECT_EQ(result_, DesktopCapturer::Result::ERROR_PERMANENT);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.cc b/third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.cc
new file mode 100644
index 0000000000..dd9009120b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/wgc_desktop_frame.h"
+
+#include <utility>
+
+namespace webrtc {
+
+WgcDesktopFrame::WgcDesktopFrame(DesktopSize size,
+ int stride,
+ std::vector<uint8_t>&& image_data)
+ : DesktopFrame(size, stride, image_data.data(), nullptr),
+ image_data_(std::move(image_data)) {}
+
+WgcDesktopFrame::~WgcDesktopFrame() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.h b/third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.h
new file mode 100644
index 0000000000..0a671cf2f8
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/wgc_desktop_frame.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_DESKTOP_FRAME_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_WGC_DESKTOP_FRAME_H_
+
+#include <d3d11.h>
+#include <wrl/client.h>
+
+#include <memory>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+// DesktopFrame implementation used by capturers that use the
+// Windows.Graphics.Capture API.
+class WgcDesktopFrame final : public DesktopFrame {
+ public:
+ // WgcDesktopFrame receives an rvalue reference to the `image_data` vector
+ // so that it can take ownership of it (and avoid a copy).
+ WgcDesktopFrame(DesktopSize size,
+ int stride,
+ std::vector<uint8_t>&& image_data);
+
+ WgcDesktopFrame(const WgcDesktopFrame&) = delete;
+ WgcDesktopFrame& operator=(const WgcDesktopFrame&) = delete;
+
+ ~WgcDesktopFrame() override;
+
+ private:
+ std::vector<uint8_t> image_data_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_DESKTOP_FRAME_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.cc b/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.cc
new file mode 100644
index 0000000000..d58c02e17c
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.cc
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/window_capture_utils.h"
+
+// Just for the DWMWINDOWATTRIBUTE enums (DWMWA_CLOAKED).
+#include <dwmapi.h>
+
+#include <algorithm>
+
+#include "modules/desktop_capture/win/scoped_gdi_object.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/win/windows_version.h"
+
+namespace webrtc {
+
+namespace {
+
+struct GetWindowListParams {
+ GetWindowListParams(int flags,
+ LONG ex_style_filters,
+ DesktopCapturer::SourceList* result)
+ : ignore_untitled(flags & GetWindowListFlags::kIgnoreUntitled),
+ ignore_unresponsive(flags & GetWindowListFlags::kIgnoreUnresponsive),
+ ignore_current_process_windows(
+ flags & GetWindowListFlags::kIgnoreCurrentProcessWindows),
+ ex_style_filters(ex_style_filters),
+ result(result) {}
+ const bool ignore_untitled;
+ const bool ignore_unresponsive;
+ const bool ignore_current_process_windows;
+ const LONG ex_style_filters;
+ DesktopCapturer::SourceList* const result;
+};
+
+bool IsWindowOwnedByCurrentProcess(HWND hwnd) {
+ DWORD process_id;
+ GetWindowThreadProcessId(hwnd, &process_id);
+ return process_id == GetCurrentProcessId();
+}
+
+BOOL CALLBACK GetWindowListHandler(HWND hwnd, LPARAM param) {
+ GetWindowListParams* params = reinterpret_cast<GetWindowListParams*>(param);
+ DesktopCapturer::SourceList* list = params->result;
+
+ // Skip invisible and minimized windows
+ if (!IsWindowVisible(hwnd) || IsIconic(hwnd)) {
+ return TRUE;
+ }
+
+ // Skip windows which are not presented in the taskbar,
+ // namely owned window if they don't have the app window style set
+ HWND owner = GetWindow(hwnd, GW_OWNER);
+ LONG exstyle = GetWindowLong(hwnd, GWL_EXSTYLE);
+ if (owner && !(exstyle & WS_EX_APPWINDOW)) {
+ return TRUE;
+ }
+
+ // Filter out windows that match the extended styles the caller has specified,
+ // e.g. WS_EX_TOOLWINDOW for capturers that don't support overlay windows.
+ if (exstyle & params->ex_style_filters) {
+ return TRUE;
+ }
+
+ if (params->ignore_unresponsive && !IsWindowResponding(hwnd)) {
+ return TRUE;
+ }
+
+ DesktopCapturer::Source window;
+ window.id = reinterpret_cast<WindowId>(hwnd);
+
+ DWORD pid;
+ GetWindowThreadProcessId(hwnd, &pid);
+ window.pid = static_cast<pid_t>(pid);
+
+ // GetWindowText* are potentially blocking operations if `hwnd` is
+ // owned by the current process. The APIs will send messages to the window's
+ // message loop, and if the message loop is waiting on this operation we will
+ // enter a deadlock.
+ // https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getwindowtexta#remarks
+ //
+ // To help consumers avoid this, there is a DesktopCaptureOption to ignore
+ // windows owned by the current process. Consumers should either ensure that
+ // the thread running their message loop never waits on this operation, or use
+ // the option to exclude these windows from the source list.
+ bool owned_by_current_process = IsWindowOwnedByCurrentProcess(hwnd);
+ if (owned_by_current_process && params->ignore_current_process_windows) {
+ return TRUE;
+ }
+
+ // Even if consumers request to enumerate windows owned by the current
+ // process, we should not call GetWindowText* on unresponsive windows owned by
+ // the current process because we will hang. Unfortunately, we could still
+ // hang if the window becomes unresponsive after this check, hence the option
+ // to avoid these completely.
+ if (!owned_by_current_process || IsWindowResponding(hwnd)) {
+ const size_t kTitleLength = 500;
+ WCHAR window_title[kTitleLength] = L"";
+ if (GetWindowTextLength(hwnd) != 0 &&
+ GetWindowTextW(hwnd, window_title, kTitleLength) > 0) {
+ window.title = rtc::ToUtf8(window_title);
+ }
+ }
+
+ // Skip windows when we failed to convert the title or it is empty.
+ if (params->ignore_untitled && window.title.empty())
+ return TRUE;
+
+ // Capture the window class name, to allow specific window classes to be
+ // skipped.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/winuser/ns-winuser-wndclassa
+ // says lpszClassName field in WNDCLASS is limited by 256 symbols, so we don't
+ // need to have a buffer bigger than that.
+ const size_t kMaxClassNameLength = 256;
+ WCHAR class_name[kMaxClassNameLength] = L"";
+ const int class_name_length =
+ GetClassNameW(hwnd, class_name, kMaxClassNameLength);
+ if (class_name_length < 1)
+ return TRUE;
+
+ // Skip Program Manager window.
+ if (wcscmp(class_name, L"Progman") == 0)
+ return TRUE;
+
+ // Skip Start button window on Windows Vista, Windows 7.
+ // On Windows 8, Windows 8.1, Windows 10 Start button is not a top level
+ // window, so it will not be examined here.
+ if (wcscmp(class_name, L"Button") == 0)
+ return TRUE;
+
+ list->push_back(window);
+
+ return TRUE;
+}
+
+} // namespace
+
+// Prefix used to match the window class for Chrome windows.
+const wchar_t kChromeWindowClassPrefix[] = L"Chrome_WidgetWin_";
+
+// The hiddgen taskbar will leave a 2 pixel margin on the screen.
+const int kHiddenTaskbarMarginOnScreen = 2;
+
+bool GetWindowRect(HWND window, DesktopRect* result) {
+ RECT rect;
+ if (!::GetWindowRect(window, &rect)) {
+ return false;
+ }
+ *result = DesktopRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
+ return true;
+}
+
+bool GetCroppedWindowRect(HWND window,
+ bool avoid_cropping_border,
+ DesktopRect* cropped_rect,
+ DesktopRect* original_rect) {
+ DesktopRect window_rect;
+ if (!GetWindowRect(window, &window_rect)) {
+ return false;
+ }
+
+ if (original_rect) {
+ *original_rect = window_rect;
+ }
+ *cropped_rect = window_rect;
+
+ bool is_maximized = false;
+ if (!IsWindowMaximized(window, &is_maximized)) {
+ return false;
+ }
+
+ // As of Windows8, transparent resize borders are added by the OS at
+ // left/bottom/right sides of a resizeable window. If the cropped window
+ // doesn't remove these borders, the background will be exposed a bit.
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN8 ||
+ is_maximized) {
+ // Only apply this cropping to windows with a resize border (otherwise,
+ // it'd clip the edges of captured pop-up windows without this border).
+ LONG style = GetWindowLong(window, GWL_STYLE);
+ if (style & WS_THICKFRAME || style & DS_MODALFRAME) {
+ int width = GetSystemMetrics(SM_CXSIZEFRAME);
+ int bottom_height = GetSystemMetrics(SM_CYSIZEFRAME);
+ const int visible_border_height = GetSystemMetrics(SM_CYBORDER);
+ int top_height = visible_border_height;
+
+ // If requested, avoid cropping the visible window border. This is used
+ // for pop-up windows to include their border, but not for the outermost
+ // window (where a partially-transparent border may expose the
+ // background a bit).
+ if (avoid_cropping_border) {
+ width = std::max(0, width - GetSystemMetrics(SM_CXBORDER));
+ bottom_height = std::max(0, bottom_height - visible_border_height);
+ top_height = 0;
+ }
+ cropped_rect->Extend(-width, -top_height, -width, -bottom_height);
+ }
+ }
+
+ return true;
+}
+
+bool GetWindowContentRect(HWND window, DesktopRect* result) {
+ if (!GetWindowRect(window, result)) {
+ return false;
+ }
+
+ RECT rect;
+ if (!::GetClientRect(window, &rect)) {
+ return false;
+ }
+
+ const int width = rect.right - rect.left;
+ // The GetClientRect() is not expected to return a larger area than
+ // GetWindowRect().
+ if (width > 0 && width < result->width()) {
+ // - GetClientRect() always set the left / top of RECT to 0. So we need to
+ // estimate the border width from GetClientRect() and GetWindowRect().
+ // - Border width of a window varies according to the window type.
+ // - GetClientRect() excludes the title bar, which should be considered as
+ // part of the content and included in the captured frame. So we always
+ // estimate the border width according to the window width.
+ // - We assume a window has same border width in each side.
+ // So we shrink half of the width difference from all four sides.
+ const int shrink = ((width - result->width()) / 2);
+ // When `shrink` is negative, DesktopRect::Extend() shrinks itself.
+ result->Extend(shrink, 0, shrink, 0);
+ // Usually this should not happen, just in case we have received a strange
+ // window, which has only left and right borders.
+ if (result->height() > shrink * 2) {
+ result->Extend(0, shrink, 0, shrink);
+ }
+ RTC_DCHECK(!result->is_empty());
+ }
+
+ return true;
+}
+
+int GetWindowRegionTypeWithBoundary(HWND window, DesktopRect* result) {
+ win::ScopedGDIObject<HRGN, win::DeleteObjectTraits<HRGN>> scoped_hrgn(
+ CreateRectRgn(0, 0, 0, 0));
+ const int region_type = GetWindowRgn(window, scoped_hrgn.Get());
+
+ if (region_type == SIMPLEREGION) {
+ RECT rect;
+ GetRgnBox(scoped_hrgn.Get(), &rect);
+ *result =
+ DesktopRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
+ }
+ return region_type;
+}
+
+bool GetDcSize(HDC hdc, DesktopSize* size) {
+ win::ScopedGDIObject<HGDIOBJ, win::DeleteObjectTraits<HGDIOBJ>> scoped_hgdi(
+ GetCurrentObject(hdc, OBJ_BITMAP));
+ BITMAP bitmap;
+ memset(&bitmap, 0, sizeof(BITMAP));
+ if (GetObject(scoped_hgdi.Get(), sizeof(BITMAP), &bitmap) == 0) {
+ return false;
+ }
+ size->set(bitmap.bmWidth, bitmap.bmHeight);
+ return true;
+}
+
+bool IsWindowMaximized(HWND window, bool* result) {
+ WINDOWPLACEMENT placement;
+ memset(&placement, 0, sizeof(WINDOWPLACEMENT));
+ placement.length = sizeof(WINDOWPLACEMENT);
+ if (!::GetWindowPlacement(window, &placement)) {
+ return false;
+ }
+
+ *result = (placement.showCmd == SW_SHOWMAXIMIZED);
+ return true;
+}
+
+bool IsWindowValidAndVisible(HWND window) {
+ return IsWindow(window) && IsWindowVisible(window) && !IsIconic(window);
+}
+
+bool IsWindowResponding(HWND window) {
+ // 50ms is chosen in case the system is under heavy load, but it's also not
+ // too long to delay window enumeration considerably.
+ const UINT uTimeoutMs = 50;
+ return SendMessageTimeout(window, WM_NULL, 0, 0, SMTO_ABORTIFHUNG, uTimeoutMs,
+ nullptr);
+}
+
+bool GetWindowList(int flags,
+ DesktopCapturer::SourceList* windows,
+ LONG ex_style_filters) {
+ GetWindowListParams params(flags, ex_style_filters, windows);
+ return ::EnumWindows(&GetWindowListHandler,
+ reinterpret_cast<LPARAM>(&params)) != 0;
+}
+
+// WindowCaptureHelperWin implementation.
+WindowCaptureHelperWin::WindowCaptureHelperWin() {
+ // Try to load dwmapi.dll dynamically since it is not available on XP.
+ dwmapi_library_ = LoadLibraryW(L"dwmapi.dll");
+ if (dwmapi_library_) {
+ func_ = reinterpret_cast<DwmIsCompositionEnabledFunc>(
+ GetProcAddress(dwmapi_library_, "DwmIsCompositionEnabled"));
+ dwm_get_window_attribute_func_ =
+ reinterpret_cast<DwmGetWindowAttributeFunc>(
+ GetProcAddress(dwmapi_library_, "DwmGetWindowAttribute"));
+ }
+
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN10) {
+ if (FAILED(::CoCreateInstance(__uuidof(VirtualDesktopManager), nullptr,
+ CLSCTX_ALL,
+ IID_PPV_ARGS(&virtual_desktop_manager_)))) {
+ RTC_LOG(LS_WARNING) << "Fail to create instance of VirtualDesktopManager";
+ }
+ }
+}
+
+WindowCaptureHelperWin::~WindowCaptureHelperWin() {
+ if (dwmapi_library_) {
+ FreeLibrary(dwmapi_library_);
+ }
+}
+
+bool WindowCaptureHelperWin::IsAeroEnabled() {
+ BOOL result = FALSE;
+ if (func_) {
+ func_(&result);
+ }
+ return result != FALSE;
+}
+
+// This is just a best guess of a notification window. Chrome uses the Windows
+// native framework for showing notifications. So far what we know about such a
+// window includes: no title, class name with prefix "Chrome_WidgetWin_" and
+// with certain extended styles.
+bool WindowCaptureHelperWin::IsWindowChromeNotification(HWND hwnd) {
+ const size_t kTitleLength = 32;
+ WCHAR window_title[kTitleLength];
+ GetWindowTextW(hwnd, window_title, kTitleLength);
+ if (wcsnlen_s(window_title, kTitleLength) != 0) {
+ return false;
+ }
+
+ const size_t kClassLength = 256;
+ WCHAR class_name[kClassLength];
+ const int class_name_length = GetClassNameW(hwnd, class_name, kClassLength);
+ if (class_name_length < 1 ||
+ wcsncmp(class_name, kChromeWindowClassPrefix,
+ wcsnlen_s(kChromeWindowClassPrefix, kClassLength)) != 0) {
+ return false;
+ }
+
+ const LONG exstyle = GetWindowLong(hwnd, GWL_EXSTYLE);
+ if ((exstyle & WS_EX_NOACTIVATE) && (exstyle & WS_EX_TOOLWINDOW) &&
+ (exstyle & WS_EX_TOPMOST)) {
+ return true;
+ }
+
+ return false;
+}
+
+// `content_rect` is preferred because,
+// 1. WindowCapturerWinGdi is using GDI capturer, which cannot capture DX
+// output.
+// So ScreenCapturer should be used as much as possible to avoid
+// uncapturable cases. Note: lots of new applications are using DX output
+// (hardware acceleration) to improve the performance which cannot be
+// captured by WindowCapturerWinGdi. See bug http://crbug.com/741770.
+// 2. WindowCapturerWinGdi is still useful because we do not want to expose the
+// content on other windows if the target window is covered by them.
+// 3. Shadow and borders should not be considered as "content" on other
+// windows because they do not expose any useful information.
+//
+// So we can bear the false-negative cases (target window is covered by the
+// borders or shadow of other windows, but we have not detected it) in favor
+// of using ScreenCapturer, rather than let the false-positive cases (target
+// windows is only covered by borders or shadow of other windows, but we treat
+// it as overlapping) impact the user experience.
+bool WindowCaptureHelperWin::AreWindowsOverlapping(
+ HWND hwnd,
+ HWND selected_hwnd,
+ const DesktopRect& selected_window_rect) {
+ DesktopRect content_rect;
+ if (!GetWindowContentRect(hwnd, &content_rect)) {
+ // Bail out if failed to get the window area.
+ return true;
+ }
+ content_rect.IntersectWith(selected_window_rect);
+
+ if (content_rect.is_empty()) {
+ return false;
+ }
+
+ // When the taskbar is automatically hidden, it will leave a 2 pixel margin on
+ // the screen which will overlap the maximized selected window that will use
+ // up the full screen area. Since there is no solid way to identify a hidden
+ // taskbar window, we have to make an exemption here if the overlapping is
+ // 2 x screen_width/height to a maximized window.
+ bool is_maximized = false;
+ IsWindowMaximized(selected_hwnd, &is_maximized);
+ bool overlaps_hidden_horizontal_taskbar =
+ selected_window_rect.width() == content_rect.width() &&
+ content_rect.height() == kHiddenTaskbarMarginOnScreen;
+ bool overlaps_hidden_vertical_taskbar =
+ selected_window_rect.height() == content_rect.height() &&
+ content_rect.width() == kHiddenTaskbarMarginOnScreen;
+ if (is_maximized && (overlaps_hidden_horizontal_taskbar ||
+ overlaps_hidden_vertical_taskbar)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool WindowCaptureHelperWin::IsWindowOnCurrentDesktop(HWND hwnd) {
+ // Make sure the window is on the current virtual desktop.
+ if (virtual_desktop_manager_) {
+ BOOL on_current_desktop;
+ if (SUCCEEDED(virtual_desktop_manager_->IsWindowOnCurrentVirtualDesktop(
+ hwnd, &on_current_desktop)) &&
+ !on_current_desktop) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WindowCaptureHelperWin::IsWindowVisibleOnCurrentDesktop(HWND hwnd) {
+ return IsWindowValidAndVisible(hwnd) && IsWindowOnCurrentDesktop(hwnd) &&
+ !IsWindowCloaked(hwnd);
+}
+
+// A cloaked window is composited but not visible to the user.
+// Example: Cortana or the Action Center when collapsed.
+bool WindowCaptureHelperWin::IsWindowCloaked(HWND hwnd) {
+ if (!dwm_get_window_attribute_func_) {
+ // Does not apply.
+ return false;
+ }
+
+ int res = 0;
+ if (dwm_get_window_attribute_func_(hwnd, DWMWA_CLOAKED, &res, sizeof(res)) !=
+ S_OK) {
+ // Cannot tell so assume not cloaked for backward compatibility.
+ return false;
+ }
+
+ return res != 0;
+}
+
+bool WindowCaptureHelperWin::EnumerateCapturableWindows(
+ DesktopCapturer::SourceList* results,
+ bool enumerate_current_process_windows,
+ LONG ex_style_filters) {
+ int flags = (GetWindowListFlags::kIgnoreUntitled |
+ GetWindowListFlags::kIgnoreUnresponsive);
+ if (!enumerate_current_process_windows) {
+ flags |= GetWindowListFlags::kIgnoreCurrentProcessWindows;
+ }
+
+ if (!webrtc::GetWindowList(flags, results, ex_style_filters)) {
+ return false;
+ }
+
+ for (auto it = results->begin(); it != results->end();) {
+ if (!IsWindowVisibleOnCurrentDesktop(reinterpret_cast<HWND>(it->id))) {
+ it = results->erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.h b/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.h
new file mode 100644
index 0000000000..caea07958d
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURE_UTILS_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURE_UTILS_H_
+
+#include <shlobj.h>
+#include <windows.h>
+#include <wrl/client.h>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+namespace webrtc {
+
+// Outputs the window rect. The returned DesktopRect is in system coordinates,
+// i.e. the primary monitor on the system always starts from (0, 0). This
+// function returns false if native APIs fail.
+bool GetWindowRect(HWND window, DesktopRect* result);
+
+// Outputs the window rect, with the left/right/bottom frame border cropped if
+// the window is maximized or has a transparent resize border.
+// `avoid_cropping_border` may be set to true to avoid cropping the visible
+// border when cropping any resize border.
+// `cropped_rect` is the cropped rect relative to the
+// desktop. `original_rect` is the original rect returned from GetWindowRect.
+// Returns true if all API calls succeeded. The returned DesktopRect is in
+// system coordinates, i.e. the primary monitor on the system always starts from
+// (0, 0). `original_rect` can be nullptr.
+//
+// TODO(zijiehe): Move this function to CroppingWindowCapturerWin after it has
+// been removed from MouseCursorMonitorWin.
+// This function should only be used by CroppingWindowCapturerWin. Instead a
+// DesktopRect CropWindowRect(const DesktopRect& rect)
+// should be added as a utility function to help CroppingWindowCapturerWin and
+// WindowCapturerWinGdi to crop out the borders or shadow according to their
+// scenarios. But this function is too generic and easy to be misused.
+bool GetCroppedWindowRect(HWND window,
+ bool avoid_cropping_border,
+ DesktopRect* cropped_rect,
+ DesktopRect* original_rect);
+
+// Retrieves the rectangle of the content area of `window`. Usually it contains
+// title bar and window client area, but borders or shadow are excluded. The
+// returned DesktopRect is in system coordinates, i.e. the primary monitor on
+// the system always starts from (0, 0). This function returns false if native
+// APIs fail.
+bool GetWindowContentRect(HWND window, DesktopRect* result);
+
+// Returns the region type of the `window` and fill `rect` with the region of
+// `window` if region type is SIMPLEREGION.
+int GetWindowRegionTypeWithBoundary(HWND window, DesktopRect* result);
+
+// Retrieves the size of the `hdc`. This function returns false if native APIs
+// fail.
+bool GetDcSize(HDC hdc, DesktopSize* size);
+
+// Retrieves whether the `window` is maximized and stores in `result`. This
+// function returns false if native APIs fail.
+bool IsWindowMaximized(HWND window, bool* result);
+
+// Checks that the HWND is for a valid window, that window's visibility state is
+// visible, and that it is not minimized.
+bool IsWindowValidAndVisible(HWND window);
+
+// Checks if a window responds to a message within 50ms.
+bool IsWindowResponding(HWND window);
+
+enum GetWindowListFlags {
+ kNone = 0x00,
+ kIgnoreUntitled = 1 << 0,
+ kIgnoreUnresponsive = 1 << 1,
+ kIgnoreCurrentProcessWindows = 1 << 2,
+};
+
+// Retrieves the list of top-level windows on the screen.
+// Some windows will be ignored:
+// - Those that are invisible or minimized.
+// - Program Manager & Start menu.
+// - [with kIgnoreUntitled] windows with no title.
+// - [with kIgnoreUnresponsive] windows that are unresponsive.
+// - [with kIgnoreCurrentProcessWindows] windows owned by the current process.
+// - Any windows with extended styles that match `ex_style_filters`.
+// Returns false if native APIs failed.
+bool GetWindowList(int flags,
+ DesktopCapturer::SourceList* windows,
+ LONG ex_style_filters = 0);
+
+typedef HRESULT(WINAPI* DwmIsCompositionEnabledFunc)(BOOL* enabled);
+typedef HRESULT(WINAPI* DwmGetWindowAttributeFunc)(HWND hwnd,
+ DWORD flag,
+ PVOID result_ptr,
+ DWORD result_size);
+class WindowCaptureHelperWin {
+ public:
+ WindowCaptureHelperWin();
+ ~WindowCaptureHelperWin();
+
+ WindowCaptureHelperWin(const WindowCaptureHelperWin&) = delete;
+ WindowCaptureHelperWin& operator=(const WindowCaptureHelperWin&) = delete;
+
+ bool IsAeroEnabled();
+ bool IsWindowChromeNotification(HWND hwnd);
+ bool AreWindowsOverlapping(HWND hwnd,
+ HWND selected_hwnd,
+ const DesktopRect& selected_window_rect);
+ bool IsWindowOnCurrentDesktop(HWND hwnd);
+ bool IsWindowVisibleOnCurrentDesktop(HWND hwnd);
+ bool IsWindowCloaked(HWND hwnd);
+
+ // The optional `ex_style_filters` parameter allows callers to provide
+ // extended window styles (e.g. WS_EX_TOOLWINDOW) and prevent windows that
+ // match from being included in `results`.
+ bool EnumerateCapturableWindows(DesktopCapturer::SourceList* results,
+ bool enumerate_current_process_windows,
+ LONG ex_style_filters = 0);
+
+ private:
+ HMODULE dwmapi_library_ = nullptr;
+ DwmIsCompositionEnabledFunc func_ = nullptr;
+ DwmGetWindowAttributeFunc dwm_get_window_attribute_func_ = nullptr;
+
+ // Only used on Win10+.
+ Microsoft::WRL::ComPtr<IVirtualDesktopManager> virtual_desktop_manager_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURE_UTILS_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils_unittest.cc
new file mode 100644
index 0000000000..137440b09e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/window_capture_utils_unittest.cc
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/window_capture_utils.h"
+
+#include <winuser.h>
+
+#include <algorithm>
+#include <memory>
+#include <mutex>
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/win/test_support/test_window.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/thread.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const char kWindowThreadName[] = "window_capture_utils_test_thread";
+const WCHAR kWindowTitle[] = L"Window Capture Utils Test";
+
+std::unique_ptr<rtc::Thread> SetUpUnresponsiveWindow(std::mutex& mtx,
+ WindowInfo& info) {
+ std::unique_ptr<rtc::Thread> window_thread;
+ window_thread = rtc::Thread::Create();
+ window_thread->SetName(kWindowThreadName, nullptr);
+ window_thread->Start();
+
+ SendTask(window_thread.get(), [&] { info = CreateTestWindow(kWindowTitle); });
+
+ // Intentionally create a deadlock to cause the window to become unresponsive.
+ mtx.lock();
+ window_thread->PostTask([&mtx]() {
+ mtx.lock();
+ mtx.unlock();
+ });
+
+ return window_thread;
+}
+
+} // namespace
+
+TEST(WindowCaptureUtilsTest, GetWindowList) {
+ WindowInfo info = CreateTestWindow(kWindowTitle);
+ DesktopCapturer::SourceList window_list;
+ ASSERT_TRUE(GetWindowList(GetWindowListFlags::kNone, &window_list));
+ EXPECT_GT(window_list.size(), 0ULL);
+ EXPECT_NE(std::find_if(window_list.begin(), window_list.end(),
+ [&info](DesktopCapturer::Source window) {
+ return reinterpret_cast<HWND>(window.id) ==
+ info.hwnd;
+ }),
+ window_list.end());
+ DestroyTestWindow(info);
+}
+
+TEST(WindowCaptureUtilsTest, IncludeUnresponsiveWindows) {
+ std::mutex mtx;
+ WindowInfo info;
+ std::unique_ptr<rtc::Thread> window_thread =
+ SetUpUnresponsiveWindow(mtx, info);
+
+ EXPECT_FALSE(IsWindowResponding(info.hwnd));
+
+ DesktopCapturer::SourceList window_list;
+ ASSERT_TRUE(GetWindowList(GetWindowListFlags::kNone, &window_list));
+ EXPECT_GT(window_list.size(), 0ULL);
+ EXPECT_NE(std::find_if(window_list.begin(), window_list.end(),
+ [&info](DesktopCapturer::Source window) {
+ return reinterpret_cast<HWND>(window.id) ==
+ info.hwnd;
+ }),
+ window_list.end());
+
+ mtx.unlock();
+ SendTask(window_thread.get(), [&info]() { DestroyTestWindow(info); });
+ window_thread->Stop();
+}
+
+TEST(WindowCaptureUtilsTest, IgnoreUnresponsiveWindows) {
+ std::mutex mtx;
+ WindowInfo info;
+ std::unique_ptr<rtc::Thread> window_thread =
+ SetUpUnresponsiveWindow(mtx, info);
+
+ EXPECT_FALSE(IsWindowResponding(info.hwnd));
+
+ DesktopCapturer::SourceList window_list;
+ ASSERT_TRUE(
+ GetWindowList(GetWindowListFlags::kIgnoreUnresponsive, &window_list));
+ EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(),
+ [&info](DesktopCapturer::Source window) {
+ return reinterpret_cast<HWND>(window.id) ==
+ info.hwnd;
+ }),
+ window_list.end());
+
+ mtx.unlock();
+ SendTask(window_thread.get(), [&info]() { DestroyTestWindow(info); });
+ window_thread->Stop();
+}
+
+TEST(WindowCaptureUtilsTest, IncludeUntitledWindows) {
+ WindowInfo info = CreateTestWindow(L"");
+ DesktopCapturer::SourceList window_list;
+ ASSERT_TRUE(GetWindowList(GetWindowListFlags::kNone, &window_list));
+ EXPECT_GT(window_list.size(), 0ULL);
+ EXPECT_NE(std::find_if(window_list.begin(), window_list.end(),
+ [&info](DesktopCapturer::Source window) {
+ return reinterpret_cast<HWND>(window.id) ==
+ info.hwnd;
+ }),
+ window_list.end());
+ DestroyTestWindow(info);
+}
+
+TEST(WindowCaptureUtilsTest, IgnoreUntitledWindows) {
+ WindowInfo info = CreateTestWindow(L"");
+ DesktopCapturer::SourceList window_list;
+ ASSERT_TRUE(GetWindowList(GetWindowListFlags::kIgnoreUntitled, &window_list));
+ EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(),
+ [&info](DesktopCapturer::Source window) {
+ return reinterpret_cast<HWND>(window.id) ==
+ info.hwnd;
+ }),
+ window_list.end());
+ DestroyTestWindow(info);
+}
+
+TEST(WindowCaptureUtilsTest, IgnoreCurrentProcessWindows) {
+ WindowInfo info = CreateTestWindow(kWindowTitle);
+ DesktopCapturer::SourceList window_list;
+ ASSERT_TRUE(GetWindowList(GetWindowListFlags::kIgnoreCurrentProcessWindows,
+ &window_list));
+ EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(),
+ [&info](DesktopCapturer::Source window) {
+ return reinterpret_cast<HWND>(window.id) ==
+ info.hwnd;
+ }),
+ window_list.end());
+ DestroyTestWindow(info);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.cc b/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.cc
new file mode 100644
index 0000000000..6fd3a4db6e
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.cc
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/win/window_capturer_win_gdi.h"
+
+#include <cmath>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/desktop_capture/cropped_desktop_frame.h"
+#include "modules/desktop_capture/desktop_capture_metrics_helper.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame_win.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/selected_window_context.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "rtc_base/win/windows_version.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+// Used to pass input/output data during the EnumWindows call to collect
+// owned/pop-up windows that should be captured.
+struct OwnedWindowCollectorContext : public SelectedWindowContext {
+ OwnedWindowCollectorContext(HWND selected_window,
+ DesktopRect selected_window_rect,
+ WindowCaptureHelperWin* window_capture_helper,
+ std::vector<HWND>* owned_windows)
+ : SelectedWindowContext(selected_window,
+ selected_window_rect,
+ window_capture_helper),
+ owned_windows(owned_windows) {}
+
+ std::vector<HWND>* owned_windows;
+};
+
+// Called via EnumWindows for each root window; adds owned/pop-up windows that
+// should be captured to a vector it's passed.
+BOOL CALLBACK OwnedWindowCollector(HWND hwnd, LPARAM param) {
+ OwnedWindowCollectorContext* context =
+ reinterpret_cast<OwnedWindowCollectorContext*>(param);
+ if (hwnd == context->selected_window()) {
+ // Windows are enumerated in top-down z-order, so we can stop enumerating
+ // upon reaching the selected window.
+ return FALSE;
+ }
+
+ // Skip windows that aren't visible pop-up windows.
+ if (!(GetWindowLong(hwnd, GWL_STYLE) & WS_POPUP) ||
+ !context->window_capture_helper()->IsWindowVisibleOnCurrentDesktop(
+ hwnd)) {
+ return TRUE;
+ }
+
+ // Owned windows that intersect the selected window should be captured.
+ if (context->IsWindowOwnedBySelectedWindow(hwnd) &&
+ context->IsWindowOverlappingSelectedWindow(hwnd)) {
+ // Skip windows that draw shadows around menus. These "SysShadow" windows
+ // would otherwise be captured as solid black bars with no transparency
+ // gradient (since this capturer doesn't detect / respect variations in the
+ // window alpha channel). Any other semi-transparent owned windows will be
+ // captured fully-opaque. This seems preferable to excluding them (at least
+ // when they have content aside from a solid fill color / visual adornment;
+ // e.g. some tooltips have the transparent style set).
+ if (GetWindowLong(hwnd, GWL_EXSTYLE) & WS_EX_TRANSPARENT) {
+ const WCHAR kSysShadow[] = L"SysShadow";
+ const size_t kClassLength = arraysize(kSysShadow);
+ WCHAR class_name[kClassLength];
+ const int class_name_length =
+ GetClassNameW(hwnd, class_name, kClassLength);
+ if (class_name_length == kClassLength - 1 &&
+ wcscmp(class_name, kSysShadow) == 0) {
+ return TRUE;
+ }
+ }
+
+ context->owned_windows->push_back(hwnd);
+ }
+
+ return TRUE;
+}
+
+WindowCapturerWinGdi::WindowCapturerWinGdi(
+ bool enumerate_current_process_windows)
+ : enumerate_current_process_windows_(enumerate_current_process_windows) {}
+WindowCapturerWinGdi::~WindowCapturerWinGdi() {}
+
+bool WindowCapturerWinGdi::GetSourceList(SourceList* sources) {
+ if (!window_capture_helper_.EnumerateCapturableWindows(
+ sources, enumerate_current_process_windows_))
+ return false;
+
+ std::map<HWND, DesktopSize> new_map;
+ for (const auto& item : *sources) {
+ HWND hwnd = reinterpret_cast<HWND>(item.id);
+ new_map[hwnd] = window_size_map_[hwnd];
+ }
+ window_size_map_.swap(new_map);
+
+ return true;
+}
+
+bool WindowCapturerWinGdi::SelectSource(SourceId id) {
+ HWND window = reinterpret_cast<HWND>(id);
+ if (!IsWindowValidAndVisible(window))
+ return false;
+
+ window_ = window;
+ // When a window is not in the map, window_size_map_[window] will create an
+ // item with DesktopSize (0, 0).
+ previous_size_ = window_size_map_[window];
+ return true;
+}
+
+bool WindowCapturerWinGdi::FocusOnSelectedSource() {
+ if (!window_)
+ return false;
+
+ if (!IsWindowValidAndVisible(window_))
+ return false;
+
+ return BringWindowToTop(window_) && SetForegroundWindow(window_);
+}
+
+bool WindowCapturerWinGdi::IsOccluded(const DesktopVector& pos) {
+ DesktopVector sys_pos = pos.add(GetFullscreenRect().top_left());
+ HWND hwnd =
+ reinterpret_cast<HWND>(window_finder_.GetWindowUnderPoint(sys_pos));
+
+ return hwnd != window_ &&
+ std::find(owned_windows_.begin(), owned_windows_.end(), hwnd) ==
+ owned_windows_.end();
+}
+
+void WindowCapturerWinGdi::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+ RecordCapturerImpl(DesktopCapturerId::kWindowCapturerWinGdi);
+
+ callback_ = callback;
+}
+
+void WindowCapturerWinGdi::CaptureFrame() {
+ RTC_DCHECK(callback_);
+ int64_t capture_start_time_nanos = rtc::TimeNanos();
+
+ CaptureResults results = CaptureFrame(/*capture_owned_windows*/ true);
+ if (!results.frame) {
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) /
+ rtc::kNumNanosecsPerMillisec;
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.DesktopCapture.Win.WindowGdiCapturerFrameTime", capture_time_ms);
+ results.frame->set_capture_time_ms(capture_time_ms);
+ results.frame->set_capturer_id(DesktopCapturerId::kWindowCapturerWinGdi);
+ callback_->OnCaptureResult(results.result, std::move(results.frame));
+}
+
+WindowCapturerWinGdi::CaptureResults WindowCapturerWinGdi::CaptureFrame(
+ bool capture_owned_windows) {
+ TRACE_EVENT0("webrtc", "WindowCapturerWinGdi::CaptureFrame");
+
+ if (!window_) {
+ RTC_LOG(LS_ERROR) << "Window hasn't been selected: " << GetLastError();
+ return {Result::ERROR_PERMANENT, nullptr};
+ }
+
+ // Stop capturing if the window has been closed.
+ if (!IsWindow(window_)) {
+ RTC_LOG(LS_ERROR) << "Target window has been closed.";
+ return {Result::ERROR_PERMANENT, nullptr};
+ }
+
+ // Determine the window region excluding any resize border, and including
+ // any visible border if capturing an owned window / dialog. (Don't include
+ // any visible border for the selected window for consistency with
+ // CroppingWindowCapturerWin, which would expose a bit of the background
+ // through the partially-transparent border.)
+ const bool avoid_cropping_border = !capture_owned_windows;
+ DesktopRect cropped_rect;
+ DesktopRect original_rect;
+
+ if (!GetCroppedWindowRect(window_, avoid_cropping_border, &cropped_rect,
+ &original_rect)) {
+ RTC_LOG(LS_WARNING) << "Failed to get drawable window area: "
+ << GetLastError();
+ return {Result::ERROR_TEMPORARY, nullptr};
+ }
+
+ // Return a 1x1 black frame if the window is minimized or invisible on current
+ // desktop, to match behavior on mace. Window can be temporarily invisible
+ // during the transition of full screen mode on/off.
+ if (original_rect.is_empty() ||
+ !window_capture_helper_.IsWindowVisibleOnCurrentDesktop(window_)) {
+ std::unique_ptr<DesktopFrame> frame(
+ new BasicDesktopFrame(DesktopSize(1, 1)));
+
+ previous_size_ = frame->size();
+ window_size_map_[window_] = previous_size_;
+ return {Result::SUCCESS, std::move(frame)};
+ }
+
+ HDC window_dc = GetWindowDC(window_);
+ if (!window_dc) {
+ RTC_LOG(LS_WARNING) << "Failed to get window DC: " << GetLastError();
+ return {Result::ERROR_TEMPORARY, nullptr};
+ }
+
+ DesktopRect unscaled_cropped_rect = cropped_rect;
+ double horizontal_scale = 1.0;
+ double vertical_scale = 1.0;
+
+ DesktopSize window_dc_size;
+ if (GetDcSize(window_dc, &window_dc_size)) {
+ // The `window_dc_size` is used to detect the scaling of the original
+ // window. If the application does not support high-DPI settings, it will
+ // be scaled by Windows according to the scaling setting.
+ // https://www.google.com/search?q=windows+scaling+settings&ie=UTF-8
+ // So the size of the `window_dc`, i.e. the bitmap we can retrieve from
+ // PrintWindow() or BitBlt() function, will be smaller than
+ // `original_rect` and `cropped_rect`. Part of the captured desktop frame
+ // will be black. See
+ // bug https://bugs.chromium.org/p/webrtc/issues/detail?id=8112 for
+ // details.
+
+ // If `window_dc_size` is smaller than `window_rect`, let's resize both
+ // `original_rect` and `cropped_rect` according to the scaling factor.
+ // This will adjust the width and height of the two rects.
+ horizontal_scale =
+ static_cast<double>(window_dc_size.width()) / original_rect.width();
+ vertical_scale =
+ static_cast<double>(window_dc_size.height()) / original_rect.height();
+ original_rect.Scale(horizontal_scale, vertical_scale);
+ cropped_rect.Scale(horizontal_scale, vertical_scale);
+
+ // Translate `cropped_rect` to the left so that its position within
+ // `original_rect` remains accurate after scaling.
+ // See crbug.com/1083527 for more info.
+ int translate_left = static_cast<int>(std::round(
+ (cropped_rect.left() - original_rect.left()) * (horizontal_scale - 1)));
+ int translate_top = static_cast<int>(std::round(
+ (cropped_rect.top() - original_rect.top()) * (vertical_scale - 1)));
+ cropped_rect.Translate(translate_left, translate_top);
+ }
+
+ std::unique_ptr<DesktopFrameWin> frame(
+ DesktopFrameWin::Create(original_rect.size(), nullptr, window_dc));
+ if (!frame.get()) {
+ RTC_LOG(LS_WARNING) << "Failed to create frame.";
+ ReleaseDC(window_, window_dc);
+ return {Result::ERROR_TEMPORARY, nullptr};
+ }
+
+ HDC mem_dc = CreateCompatibleDC(window_dc);
+ HGDIOBJ previous_object = SelectObject(mem_dc, frame->bitmap());
+ BOOL result = FALSE;
+
+ // When desktop composition (Aero) is enabled each window is rendered to a
+ // private buffer allowing BitBlt() to get the window content even if the
+ // window is occluded. PrintWindow() is slower but lets rendering the window
+ // contents to an off-screen device context when Aero is not available.
+ // PrintWindow() is not supported by some applications.
+ //
+ // If Aero is enabled, we prefer BitBlt() because it's faster and avoids
+ // window flickering. Otherwise, we prefer PrintWindow() because BitBlt() may
+ // render occluding windows on top of the desired window.
+ //
+ // When composition is enabled the DC returned by GetWindowDC() doesn't always
+ // have window frame rendered correctly. Windows renders it only once and then
+ // caches the result between captures. We hack it around by calling
+ // PrintWindow() whenever window size changes, including the first time of
+ // capturing - it somehow affects what we get from BitBlt() on the subsequent
+ // captures.
+ //
+ // For Windows 8.1 and later, we want to always use PrintWindow when the
+ // cropping screen capturer falls back to the window capturer. I.e.
+ // on Windows 8.1 and later, PrintWindow is only used when the window is
+ // occluded. When the window is not occluded, it is much faster to capture
+ // the screen and to crop it to the window position and size.
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN8) {
+ // Special flag that makes PrintWindow to work on Windows 8.1 and later.
+ // Indeed certain apps (e.g. those using DirectComposition rendering) can't
+ // be captured using BitBlt or PrintWindow without this flag. Note that on
+ // Windows 8.0 this flag is not supported so the block below will fallback
+ // to the other call to PrintWindow. It seems to be very tricky to detect
+ // Windows 8.0 vs 8.1 so a try/fallback is more approriate here.
+ const UINT flags = PW_RENDERFULLCONTENT;
+ result = PrintWindow(window_, mem_dc, flags);
+ }
+
+ if (!result && (!window_capture_helper_.IsAeroEnabled() ||
+ !previous_size_.equals(frame->size()))) {
+ result = PrintWindow(window_, mem_dc, 0);
+ }
+
+ // Aero is enabled or PrintWindow() failed, use BitBlt.
+ if (!result) {
+ result = BitBlt(mem_dc, 0, 0, frame->size().width(), frame->size().height(),
+ window_dc, 0, 0, SRCCOPY);
+ }
+
+ SelectObject(mem_dc, previous_object);
+ DeleteDC(mem_dc);
+ ReleaseDC(window_, window_dc);
+
+ previous_size_ = frame->size();
+ window_size_map_[window_] = previous_size_;
+
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+ frame->set_top_left(
+ original_rect.top_left().subtract(GetFullscreenRect().top_left()));
+
+ if (!result) {
+ RTC_LOG(LS_ERROR) << "Both PrintWindow() and BitBlt() failed.";
+ return {Result::ERROR_TEMPORARY, nullptr};
+ }
+
+ // Rect for the data is relative to the first pixel of the frame.
+ cropped_rect.Translate(-original_rect.left(), -original_rect.top());
+ std::unique_ptr<DesktopFrame> cropped_frame =
+ CreateCroppedDesktopFrame(std::move(frame), cropped_rect);
+ RTC_DCHECK(cropped_frame);
+
+ if (capture_owned_windows) {
+ // If any owned/pop-up windows overlap the selected window, capture them
+ // and copy/composite their contents into the frame.
+ owned_windows_.clear();
+ OwnedWindowCollectorContext context(window_, unscaled_cropped_rect,
+ &window_capture_helper_,
+ &owned_windows_);
+
+ if (context.IsSelectedWindowValid()) {
+ EnumWindows(OwnedWindowCollector, reinterpret_cast<LPARAM>(&context));
+
+ if (!owned_windows_.empty()) {
+ if (!owned_window_capturer_) {
+ owned_window_capturer_ = std::make_unique<WindowCapturerWinGdi>(
+ enumerate_current_process_windows_);
+ }
+
+ // Owned windows are stored in top-down z-order, so this iterates in
+ // reverse to capture / draw them in bottom-up z-order
+ for (auto it = owned_windows_.rbegin(); it != owned_windows_.rend();
+ it++) {
+ HWND hwnd = *it;
+ if (owned_window_capturer_->SelectSource(
+ reinterpret_cast<SourceId>(hwnd))) {
+ CaptureResults results = owned_window_capturer_->CaptureFrame(
+ /*capture_owned_windows*/ false);
+
+ if (results.result != DesktopCapturer::Result::SUCCESS) {
+ // Simply log any error capturing an owned/pop-up window without
+ // bubbling it up to the caller (an expected error here is that
+ // the owned/pop-up window was closed; any unexpected errors won't
+ // fail the outer capture).
+ RTC_LOG(LS_INFO) << "Capturing owned window failed (previous "
+ "error/warning pertained to that)";
+ } else {
+ // Copy / composite the captured frame into the outer frame. This
+ // may no-op if they no longer intersect (if the owned window was
+ // moved outside the owner bounds since scheduled for capture.)
+ cropped_frame->CopyIntersectingPixelsFrom(
+ *results.frame, horizontal_scale, vertical_scale);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return {Result::SUCCESS, std::move(cropped_frame)};
+}
+
+// static
+std::unique_ptr<DesktopCapturer> WindowCapturerWinGdi::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<DesktopCapturer>(
+ new WindowCapturerWinGdi(options.enumerate_current_process_windows()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.h b/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.h
new file mode 100644
index 0000000000..bf94dfe192
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/win/window_capturer_win_gdi.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURER_WIN_GDI_H_
+#define MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURER_WIN_GDI_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "modules/desktop_capture/window_finder_win.h"
+
+namespace webrtc {
+
+class WindowCapturerWinGdi : public DesktopCapturer {
+ public:
+ explicit WindowCapturerWinGdi(bool enumerate_current_process_windows);
+
+ // Disallow copy and assign
+ WindowCapturerWinGdi(const WindowCapturerWinGdi&) = delete;
+ WindowCapturerWinGdi& operator=(const WindowCapturerWinGdi&) = delete;
+
+ ~WindowCapturerWinGdi() override;
+
+ static std::unique_ptr<DesktopCapturer> CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options);
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ private:
+ struct CaptureResults {
+ Result result;
+ std::unique_ptr<DesktopFrame> frame;
+ };
+
+ CaptureResults CaptureFrame(bool capture_owned_windows);
+
+ Callback* callback_ = nullptr;
+
+ // HWND and HDC for the currently selected window or nullptr if window is not
+ // selected.
+ HWND window_ = nullptr;
+
+ DesktopSize previous_size_;
+
+ WindowCaptureHelperWin window_capture_helper_;
+
+ bool enumerate_current_process_windows_;
+
+ // This map is used to avoid flickering for the case when SelectWindow() calls
+ // are interleaved with Capture() calls.
+ std::map<HWND, DesktopSize> window_size_map_;
+
+ WindowFinderWin window_finder_;
+
+ std::vector<HWND> owned_windows_;
+ std::unique_ptr<WindowCapturerWinGdi> owned_window_capturer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURER_WIN_GDI_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_capturer_linux.cc b/third_party/libwebrtc/modules/desktop_capture/window_capturer_linux.cc
new file mode 100644
index 0000000000..b2b1e376ad
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_capturer_linux.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+
+#if defined(WEBRTC_USE_PIPEWIRE)
+#if defined(WEBRTC_MOZILLA_BUILD)
+#include "modules/desktop_capture/linux/wayland/moz_base_capturer_pipewire.h"
+#else
+#include "modules/desktop_capture/linux/wayland/base_capturer_pipewire.h"
+#endif
+#endif // defined(WEBRTC_USE_PIPEWIRE)
+
+#if defined(WEBRTC_USE_X11)
+#include "modules/desktop_capture/linux/x11/window_capturer_x11.h"
+#endif // defined(WEBRTC_USE_X11)
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options) {
+#if defined(WEBRTC_USE_PIPEWIRE)
+ if (options.allow_pipewire() && DesktopCapturer::IsRunningUnderWayland()) {
+#if defined(WEBRTC_MOZILLA_BUILD)
+ return BaseCapturerPipeWire::CreateRawCapturer(options);
+#else
+ return std::make_unique<BaseCapturerPipeWire>(options);
+#endif
+ }
+#endif // defined(WEBRTC_USE_PIPEWIRE)
+
+#if defined(WEBRTC_USE_X11)
+ return WindowCapturerX11::CreateRawWindowCapturer(options);
+#else
+ return nullptr;
+#endif // defined(WEBRTC_USE_X11)
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_capturer_mac.mm b/third_party/libwebrtc/modules/desktop_capture/window_capturer_mac.mm
new file mode 100644
index 0000000000..882498bc34
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_capturer_mac.mm
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <ApplicationServices/ApplicationServices.h>
+#include <Cocoa/Cocoa.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#include "modules/desktop_capture/mac/desktop_frame_cgimage.h"
+#include "modules/desktop_capture/mac/window_list_utils.h"
+#include "modules/desktop_capture/window_finder_mac.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+namespace {
+
+// Returns true if the window exists.
+bool IsWindowValid(CGWindowID id) {
+ CFArrayRef window_id_array =
+ CFArrayCreate(nullptr, reinterpret_cast<const void**>(&id), 1, nullptr);
+ CFArrayRef window_array =
+ CGWindowListCreateDescriptionFromArray(window_id_array);
+ bool valid = window_array && CFArrayGetCount(window_array);
+ CFRelease(window_id_array);
+ CFRelease(window_array);
+
+ return valid;
+}
+
+class WindowCapturerMac : public DesktopCapturer {
+ public:
+ explicit WindowCapturerMac(
+ rtc::scoped_refptr<FullScreenWindowDetector> full_screen_window_detector,
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor);
+ ~WindowCapturerMac() override;
+
+ WindowCapturerMac(const WindowCapturerMac&) = delete;
+ WindowCapturerMac& operator=(const WindowCapturerMac&) = delete;
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const DesktopVector& pos) override;
+
+ private:
+ Callback* callback_ = nullptr;
+
+ // The window being captured.
+ CGWindowID window_id_ = 0;
+
+ rtc::scoped_refptr<FullScreenWindowDetector> full_screen_window_detector_;
+
+ const rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor_;
+
+ WindowFinderMac window_finder_;
+};
+
+WindowCapturerMac::WindowCapturerMac(
+ rtc::scoped_refptr<FullScreenWindowDetector> full_screen_window_detector,
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor)
+ : full_screen_window_detector_(std::move(full_screen_window_detector)),
+ configuration_monitor_(std::move(configuration_monitor)),
+ window_finder_(configuration_monitor_) {}
+
+WindowCapturerMac::~WindowCapturerMac() {}
+
+bool WindowCapturerMac::GetSourceList(SourceList* sources) {
+ return webrtc::GetWindowList(sources, true, true);
+}
+
+bool WindowCapturerMac::SelectSource(SourceId id) {
+ if (!IsWindowValid(id))
+ return false;
+ window_id_ = id;
+ return true;
+}
+
+bool WindowCapturerMac::FocusOnSelectedSource() {
+ if (!window_id_)
+ return false;
+
+ CGWindowID ids[1];
+ ids[0] = window_id_;
+ CFArrayRef window_id_array =
+ CFArrayCreate(nullptr, reinterpret_cast<const void**>(&ids), 1, nullptr);
+
+ CFArrayRef window_array =
+ CGWindowListCreateDescriptionFromArray(window_id_array);
+ if (!window_array || 0 == CFArrayGetCount(window_array)) {
+ // Could not find the window. It might have been closed.
+ RTC_LOG(LS_INFO) << "Window not found";
+ CFRelease(window_id_array);
+ return false;
+ }
+
+ CFDictionaryRef window = reinterpret_cast<CFDictionaryRef>(
+ CFArrayGetValueAtIndex(window_array, 0));
+ CFNumberRef pid_ref = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowOwnerPID));
+
+ int pid;
+ CFNumberGetValue(pid_ref, kCFNumberIntType, &pid);
+
+ // TODO(jiayl): this will bring the process main window to the front. We
+ // should find a way to bring only the window to the front.
+ bool result =
+ [[NSRunningApplication runningApplicationWithProcessIdentifier: pid]
+ activateWithOptions: NSApplicationActivateIgnoringOtherApps];
+
+ CFRelease(window_id_array);
+ CFRelease(window_array);
+ return result;
+}
+
+bool WindowCapturerMac::IsOccluded(const DesktopVector& pos) {
+ DesktopVector sys_pos = pos;
+ if (configuration_monitor_) {
+ auto configuration = configuration_monitor_->desktop_configuration();
+ sys_pos = pos.add(configuration.bounds.top_left());
+ }
+ return window_finder_.GetWindowUnderPoint(sys_pos) != window_id_;
+}
+
+void WindowCapturerMac::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+}
+
+void WindowCapturerMac::CaptureFrame() {
+ TRACE_EVENT0("webrtc", "WindowCapturerMac::CaptureFrame");
+
+ if (!IsWindowValid(window_id_)) {
+ RTC_LOG(LS_ERROR) << "The window is not valid any longer.";
+ callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
+ return;
+ }
+
+ CGWindowID on_screen_window = window_id_;
+ if (full_screen_window_detector_) {
+ full_screen_window_detector_->UpdateWindowListIfNeeded(
+ window_id_, [](DesktopCapturer::SourceList* sources) {
+ // Not using webrtc::GetWindowList(sources, true, false)
+ // as it doesn't allow to have in the result window with
+ // empty title along with titled window owned by the same pid.
+ return webrtc::GetWindowList(
+ [sources](CFDictionaryRef window) {
+ WindowId window_id = GetWindowId(window);
+ int pid = GetWindowOwnerPid(window);
+ if (window_id != kNullWindowId) {
+ sources->push_back(DesktopCapturer::Source{window_id, pid, GetWindowTitle(window)});
+ }
+ return true;
+ },
+ true,
+ false);
+ });
+
+ CGWindowID full_screen_window = full_screen_window_detector_->FindFullScreenWindow(window_id_);
+
+ if (full_screen_window != kCGNullWindowID) on_screen_window = full_screen_window;
+ }
+
+ std::unique_ptr<DesktopFrame> frame = DesktopFrameCGImage::CreateForWindow(on_screen_window);
+ if (!frame) {
+ RTC_LOG(LS_WARNING) << "Temporarily failed to capture window.";
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+
+ frame->mutable_updated_region()->SetRect(
+ DesktopRect::MakeSize(frame->size()));
+ frame->set_top_left(GetWindowBounds(on_screen_window).top_left());
+
+ float scale_factor = GetWindowScaleFactor(window_id_, frame->size());
+ frame->set_dpi(DesktopVector(kStandardDPI * scale_factor, kStandardDPI * scale_factor));
+
+ callback_->OnCaptureResult(Result::SUCCESS, std::move(frame));
+}
+
+} // namespace
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<DesktopCapturer>(new WindowCapturerMac(
+ options.full_screen_window_detector(), options.configuration_monitor()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_capturer_null.cc b/third_party/libwebrtc/modules/desktop_capture/window_capturer_null.cc
new file mode 100644
index 0000000000..6da2a76691
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_capturer_null.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+class WindowCapturerNull : public DesktopCapturer {
+ public:
+ WindowCapturerNull();
+ ~WindowCapturerNull() override;
+
+ WindowCapturerNull(const WindowCapturerNull&) = delete;
+ WindowCapturerNull& operator=(const WindowCapturerNull&) = delete;
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+
+ private:
+ Callback* callback_ = nullptr;
+};
+
+WindowCapturerNull::WindowCapturerNull() {}
+WindowCapturerNull::~WindowCapturerNull() {}
+
+bool WindowCapturerNull::GetSourceList(SourceList* sources) {
+ // Not implemented yet.
+ return false;
+}
+
+bool WindowCapturerNull::SelectSource(SourceId id) {
+ // Not implemented yet.
+ return false;
+}
+
+void WindowCapturerNull::Start(Callback* callback) {
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
+
+ callback_ = callback;
+}
+
+void WindowCapturerNull::CaptureFrame() {
+ // Not implemented yet.
+ callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr);
+}
+
+} // namespace
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options) {
+ return std::unique_ptr<DesktopCapturer>(new WindowCapturerNull());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_capturer_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/window_capturer_unittest.cc
new file mode 100644
index 0000000000..519c04601b
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_capturer_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class WindowCapturerTest : public ::testing::Test,
+ public DesktopCapturer::Callback {
+ public:
+ void SetUp() override {
+ capturer_ = DesktopCapturer::CreateWindowCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ ASSERT_TRUE(capturer_);
+ }
+
+ void TearDown() override {}
+
+ // DesktopCapturer::Callback interface
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override {
+ frame_ = std::move(frame);
+ }
+
+ protected:
+ std::unique_ptr<DesktopCapturer> capturer_;
+ std::unique_ptr<DesktopFrame> frame_;
+};
+
+// Verify that we can enumerate windows.
+// TODO(bugs.webrtc.org/12950): Re-enable when libc++ issue is fixed
+#if defined(WEBRTC_LINUX) && defined(MEMORY_SANITIZER)
+#define MAYBE_Enumerate DISABLED_Enumerate
+#else
+#define MAYBE_Enumerate Enumerate
+#endif
+TEST_F(WindowCapturerTest, MAYBE_Enumerate) {
+ DesktopCapturer::SourceList sources;
+ EXPECT_TRUE(capturer_->GetSourceList(&sources));
+
+ // Verify that window titles are set.
+ for (auto it = sources.begin(); it != sources.end(); ++it) {
+ EXPECT_FALSE(it->title.empty());
+ }
+}
+
+// Flaky on Linux. See: crbug.com/webrtc/7830.
+// Failing on macOS 11: See bugs.webrtc.org/12801
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#define MAYBE_Capture DISABLED_Capture
+#else
+#define MAYBE_Capture Capture
+#endif
+// Verify we can capture a window.
+//
+// TODO(sergeyu): Currently this test just looks at the windows that already
+// exist. Ideally it should create a test window and capture from it, but there
+// is no easy cross-platform way to create new windows (potentially we could
+// have a python script showing Tk dialog, but launching code will differ
+// between platforms).
+TEST_F(WindowCapturerTest, MAYBE_Capture) {
+ DesktopCapturer::SourceList sources;
+ capturer_->Start(this);
+ EXPECT_TRUE(capturer_->GetSourceList(&sources));
+
+ // Verify that we can select and capture each window.
+ for (auto it = sources.begin(); it != sources.end(); ++it) {
+ frame_.reset();
+ if (capturer_->SelectSource(it->id)) {
+ capturer_->CaptureFrame();
+ }
+
+ // If we failed to capture a window make sure it no longer exists.
+ if (!frame_.get()) {
+ DesktopCapturer::SourceList new_list;
+ EXPECT_TRUE(capturer_->GetSourceList(&new_list));
+ for (auto new_list_it = new_list.begin(); new_list_it != new_list.end();
+ ++new_list_it) {
+ EXPECT_FALSE(it->id == new_list_it->id);
+ }
+ continue;
+ }
+
+ EXPECT_GT(frame_->size().width(), 0);
+ EXPECT_GT(frame_->size().height(), 0);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_capturer_win.cc b/third_party/libwebrtc/modules/desktop_capture/window_capturer_win.cc
new file mode 100644
index 0000000000..f289746e30
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_capturer_win.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/win/window_capturer_win_gdi.h"
+
+#if defined(RTC_ENABLE_WIN_WGC)
+#include "modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h"
+#include "modules/desktop_capture/fallback_desktop_capturer_wrapper.h"
+#include "modules/desktop_capture/win/wgc_capturer_win.h"
+#include "rtc_base/win/windows_version.h"
+#endif // defined(RTC_ENABLE_WIN_WGC)
+
+namespace webrtc {
+
+// static
+std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawWindowCapturer(
+ const DesktopCaptureOptions& options) {
+ std::unique_ptr<DesktopCapturer> capturer(
+ WindowCapturerWinGdi::CreateRawWindowCapturer(options));
+#if defined(RTC_ENABLE_WIN_WGC)
+ if (options.allow_wgc_capturer_fallback() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN11) {
+ // BlankDectector capturer will send an error when it detects a failed
+ // GDI rendering, then Fallback capturer will try to capture it again with
+ // WGC.
+ capturer = std::make_unique<BlankDetectorDesktopCapturerWrapper>(
+ std::move(capturer), RgbaColor(0, 0, 0, 0),
+ /*check_per_capture*/ true);
+
+ capturer = std::make_unique<FallbackDesktopCapturerWrapper>(
+ std::move(capturer),
+ WgcCapturerWin::CreateRawWindowCapturer(
+ options, /*allow_delayed_capturable_check*/ true));
+ }
+#endif // defined(RTC_ENABLE_WIN_WGC)
+ return capturer;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder.cc b/third_party/libwebrtc/modules/desktop_capture/window_finder.cc
new file mode 100644
index 0000000000..86127d4c05
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/window_finder.h"
+
+namespace webrtc {
+
+WindowFinder::Options::Options() = default;
+WindowFinder::Options::~Options() = default;
+WindowFinder::Options::Options(const WindowFinder::Options& other) = default;
+WindowFinder::Options::Options(WindowFinder::Options&& other) = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder.h b/third_party/libwebrtc/modules/desktop_capture/window_finder.h
new file mode 100644
index 0000000000..99e3cce559
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_H_
+#define MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_H_
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#endif
+
+namespace webrtc {
+
+#if defined(WEBRTC_USE_X11)
+class XAtomCache;
+#endif
+
+// An interface to return the id of the visible window under a certain point.
+class WindowFinder {
+ public:
+ WindowFinder() = default;
+ virtual ~WindowFinder() = default;
+
+ // Returns the id of the visible window under `point`. This function returns
+ // kNullWindowId if no window is under `point` and the platform does not have
+ // "root window" concept, i.e. the visible area under `point` is the desktop.
+ // `point` is always in system coordinate, i.e. the primary monitor always
+ // starts from (0, 0).
+ virtual WindowId GetWindowUnderPoint(DesktopVector point) = 0;
+
+ struct Options final {
+ Options();
+ ~Options();
+ Options(const Options& other);
+ Options(Options&& other);
+
+#if defined(WEBRTC_USE_X11)
+ XAtomCache* cache = nullptr;
+#endif
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor;
+#endif
+ };
+
+ // Creates a platform-independent WindowFinder implementation. This function
+ // returns nullptr if `options` does not contain enough information or
+ // WindowFinder does not support current platform.
+ static std::unique_ptr<WindowFinder> Create(const Options& options);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.h b/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.h
new file mode 100644
index 0000000000..988dd497dd
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_MAC_H_
+#define MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_MAC_H_
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/window_finder.h"
+
+namespace webrtc {
+
+class DesktopConfigurationMonitor;
+
+// The implementation of WindowFinder for Mac OSX.
+class WindowFinderMac final : public WindowFinder {
+ public:
+ explicit WindowFinderMac(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor);
+ ~WindowFinderMac() override;
+
+ // WindowFinder implementation.
+ WindowId GetWindowUnderPoint(DesktopVector point) override;
+
+ private:
+ const rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_MAC_H_
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.mm b/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.mm
new file mode 100644
index 0000000000..e1d0316c79
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder_mac.mm
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/window_finder_mac.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <memory>
+#include <utility>
+
+#include "modules/desktop_capture/mac/desktop_configuration.h"
+#include "modules/desktop_capture/mac/desktop_configuration_monitor.h"
+#include "modules/desktop_capture/mac/window_list_utils.h"
+
+namespace webrtc {
+
+WindowFinderMac::WindowFinderMac(
+ rtc::scoped_refptr<DesktopConfigurationMonitor> configuration_monitor)
+ : configuration_monitor_(std::move(configuration_monitor)) {}
+WindowFinderMac::~WindowFinderMac() = default;
+
+WindowId WindowFinderMac::GetWindowUnderPoint(DesktopVector point) {
+ WindowId id = kNullWindowId;
+ GetWindowList(
+ [&id, point](CFDictionaryRef window) {
+ DesktopRect bounds;
+ bounds = GetWindowBounds(window);
+ if (bounds.Contains(point)) {
+ id = GetWindowId(window);
+ return false;
+ }
+ return true;
+ },
+ true,
+ true);
+ return id;
+}
+
+// static
+std::unique_ptr<WindowFinder> WindowFinder::Create(
+ const WindowFinder::Options& options) {
+ return std::make_unique<WindowFinderMac>(options.configuration_monitor);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder_unittest.cc b/third_party/libwebrtc/modules/desktop_capture/window_finder_unittest.cc
new file mode 100644
index 0000000000..ac13f124d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder_unittest.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/window_finder.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "modules/desktop_capture/desktop_geometry.h"
+#include "modules/desktop_capture/screen_drawer.h"
+#include "rtc_base/logging.h"
+#include "test/gtest.h"
+
+#if defined(WEBRTC_USE_X11)
+#include "modules/desktop_capture/linux/x11/shared_x_display.h"
+#include "modules/desktop_capture/linux/x11/x_atom_cache.h"
+#endif
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+
+#include "modules/desktop_capture/win/window_capture_utils.h"
+#include "modules/desktop_capture/window_finder_win.h"
+#endif
+
+namespace webrtc {
+
+namespace {
+
+#if defined(WEBRTC_WIN)
+// ScreenDrawerWin does not have a message loop, so it's unresponsive to user
+// inputs. WindowFinderWin cannot detect this kind of unresponsive windows.
+// Instead, console window is used to test WindowFinderWin.
+TEST(WindowFinderTest, FindConsoleWindow) {
+ // Creates a ScreenDrawer to avoid this test from conflicting with
+ // ScreenCapturerIntegrationTest: both tests require its window to be in
+ // foreground.
+ //
+ // In ScreenCapturer related tests, this is controlled by
+ // ScreenDrawer, which has a global lock to ensure only one ScreenDrawer
+ // window is active. So even we do not use ScreenDrawer for Windows test,
+ // creating an instance can block ScreenCapturer related tests until this test
+ // finishes.
+ //
+ // Usually the test framework should take care of this "isolated test"
+ // requirement, but unfortunately WebRTC trybots do not support this.
+ std::unique_ptr<ScreenDrawer> drawer = ScreenDrawer::Create();
+ const int kMaxSize = 10000;
+ // Enlarges current console window.
+ system("mode 1000,1000");
+ const HWND console_window = GetConsoleWindow();
+ // Ensures that current console window is visible.
+ ShowWindow(console_window, SW_MAXIMIZE);
+ // Moves the window to the top-left of the display.
+ MoveWindow(console_window, 0, 0, kMaxSize, kMaxSize, true);
+
+ bool should_restore_notopmost =
+ (GetWindowLong(console_window, GWL_EXSTYLE) & WS_EX_TOPMOST) == 0;
+
+ // Brings console window to top.
+ SetWindowPos(console_window, HWND_TOPMOST, 0, 0, 0, 0,
+ SWP_NOMOVE | SWP_NOSIZE);
+ BringWindowToTop(console_window);
+
+ bool success = false;
+ WindowFinderWin finder;
+ for (int i = 0; i < kMaxSize; i++) {
+ const DesktopVector spot(i, i);
+ const HWND id = reinterpret_cast<HWND>(finder.GetWindowUnderPoint(spot));
+ if (id == console_window) {
+ success = true;
+ break;
+ }
+ }
+ if (should_restore_notopmost)
+ SetWindowPos(console_window, HWND_NOTOPMOST, 0, 0, 0, 0,
+ SWP_NOMOVE | SWP_NOSIZE | SWP_NOACTIVATE);
+
+ if (!success)
+ FAIL();
+}
+
+#else
+TEST(WindowFinderTest, FindDrawerWindow) {
+ WindowFinder::Options options;
+#if defined(WEBRTC_USE_X11)
+ std::unique_ptr<XAtomCache> cache;
+ const auto shared_x_display = SharedXDisplay::CreateDefault();
+ if (shared_x_display) {
+ cache = std::make_unique<XAtomCache>(shared_x_display->display());
+ options.cache = cache.get();
+ }
+#endif
+ std::unique_ptr<WindowFinder> finder = WindowFinder::Create(options);
+ if (!finder) {
+ RTC_LOG(LS_WARNING)
+ << "No WindowFinder implementation for current platform.";
+ return;
+ }
+
+ std::unique_ptr<ScreenDrawer> drawer = ScreenDrawer::Create();
+ if (!drawer) {
+ RTC_LOG(LS_WARNING)
+ << "No ScreenDrawer implementation for current platform.";
+ return;
+ }
+
+ if (drawer->window_id() == kNullWindowId) {
+ // TODO(zijiehe): WindowFinderTest can use a dedicated window without
+ // relying on ScreenDrawer.
+ RTC_LOG(LS_WARNING)
+ << "ScreenDrawer implementation for current platform does "
+ "create a window.";
+ return;
+ }
+
+ // ScreenDrawer may not be able to bring the window to the top. So we test
+ // several spots, at least one of them should succeed.
+ const DesktopRect region = drawer->DrawableRegion();
+ if (region.is_empty()) {
+ RTC_LOG(LS_WARNING)
+ << "ScreenDrawer::DrawableRegion() is too small for the "
+ "WindowFinderTest.";
+ return;
+ }
+
+ for (int i = 0; i < region.width(); i++) {
+ const DesktopVector spot(
+ region.left() + i, region.top() + i * region.height() / region.width());
+ const WindowId id = finder->GetWindowUnderPoint(spot);
+ if (id == drawer->window_id()) {
+ return;
+ }
+ }
+
+ FAIL();
+}
+#endif
+
+TEST(WindowFinderTest, ShouldReturnNullWindowIfSpotIsOutOfScreen) {
+ WindowFinder::Options options;
+#if defined(WEBRTC_USE_X11)
+ std::unique_ptr<XAtomCache> cache;
+ const auto shared_x_display = SharedXDisplay::CreateDefault();
+ if (shared_x_display) {
+ cache = std::make_unique<XAtomCache>(shared_x_display->display());
+ options.cache = cache.get();
+ }
+#endif
+ std::unique_ptr<WindowFinder> finder = WindowFinder::Create(options);
+ if (!finder) {
+ RTC_LOG(LS_WARNING)
+ << "No WindowFinder implementation for current platform.";
+ return;
+ }
+
+ ASSERT_EQ(kNullWindowId,
+ finder->GetWindowUnderPoint(DesktopVector(INT16_MAX, INT16_MAX)));
+ ASSERT_EQ(kNullWindowId,
+ finder->GetWindowUnderPoint(DesktopVector(INT16_MAX, INT16_MIN)));
+ ASSERT_EQ(kNullWindowId,
+ finder->GetWindowUnderPoint(DesktopVector(INT16_MIN, INT16_MAX)));
+ ASSERT_EQ(kNullWindowId,
+ finder->GetWindowUnderPoint(DesktopVector(INT16_MIN, INT16_MIN)));
+}
+
+} // namespace
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder_win.cc b/third_party/libwebrtc/modules/desktop_capture/window_finder_win.cc
new file mode 100644
index 0000000000..a8c3d39e19
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder_win.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/window_finder_win.h"
+
+#include <windows.h>
+
+#include <memory>
+
+namespace webrtc {
+
+WindowFinderWin::WindowFinderWin() = default;
+WindowFinderWin::~WindowFinderWin() = default;
+
+WindowId WindowFinderWin::GetWindowUnderPoint(DesktopVector point) {
+ HWND window = WindowFromPoint(POINT{point.x(), point.y()});
+ if (!window) {
+ return kNullWindowId;
+ }
+
+ // The difference between GA_ROOTOWNER and GA_ROOT can be found at
+ // https://groups.google.com/a/chromium.org/forum/#!topic/chromium-dev/Hirr_DkuZdw.
+ // In short, we should use GA_ROOT, since we only care about the root window
+ // but not the owner.
+ window = GetAncestor(window, GA_ROOT);
+ if (!window) {
+ return kNullWindowId;
+ }
+
+ return reinterpret_cast<WindowId>(window);
+}
+
+// static
+std::unique_ptr<WindowFinder> WindowFinder::Create(
+ const WindowFinder::Options& options) {
+ return std::make_unique<WindowFinderWin>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/desktop_capture/window_finder_win.h b/third_party/libwebrtc/modules/desktop_capture/window_finder_win.h
new file mode 100644
index 0000000000..a04e7e1aae
--- /dev/null
+++ b/third_party/libwebrtc/modules/desktop_capture/window_finder_win.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_WIN_H_
+#define MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_WIN_H_
+
+#include "modules/desktop_capture/window_finder.h"
+
+namespace webrtc {
+
+// The implementation of WindowFinder for Windows.
+class WindowFinderWin final : public WindowFinder {
+ public:
+ WindowFinderWin();
+ ~WindowFinderWin() override;
+
+ // WindowFinder implementation.
+ WindowId GetWindowUnderPoint(DesktopVector point) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_DESKTOP_CAPTURE_WINDOW_FINDER_WIN_H_
diff --git a/third_party/libwebrtc/modules/include/module_common_types.h b/third_party/libwebrtc/modules/include/module_common_types.h
new file mode 100644
index 0000000000..8e4e5465fa
--- /dev/null
+++ b/third_party/libwebrtc/modules/include/module_common_types.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
+#define MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+namespace webrtc {
+
+// Interface used by the CallStats class to distribute call statistics.
+// Callbacks will be triggered as soon as the class has been registered to a
+// CallStats object using RegisterStatsObserver.
+class CallStatsObserver {
+ public:
+ virtual void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) = 0;
+
+ virtual ~CallStatsObserver() {}
+};
+
+// Interface used by NackModule and JitterBuffer.
+class NackSender {
+ public:
+ // If `buffering_allowed`, other feedback messages (e.g. key frame requests)
+ // may be added to the same outgoing feedback message. In that case, it's up
+ // to the user of the interface to ensure that when all buffer-able messages
+ // have been added, the feedback message is triggered.
+ virtual void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) = 0;
+
+ protected:
+ virtual ~NackSender() {}
+};
+
+// Interface used by NackModule and JitterBuffer.
+class KeyFrameRequestSender {
+ public:
+ virtual void RequestKeyFrame() = 0;
+
+ protected:
+ virtual ~KeyFrameRequestSender() {}
+};
+
+// Interface used by LossNotificationController to communicate to RtpRtcp.
+class LossNotificationSender {
+ public:
+ virtual ~LossNotificationSender() {}
+
+ virtual void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
diff --git a/third_party/libwebrtc/modules/include/module_common_types_public.h b/third_party/libwebrtc/modules/include/module_common_types_public.h
new file mode 100644
index 0000000000..345e45ce12
--- /dev/null
+++ b/third_party/libwebrtc/modules/include/module_common_types_public.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_COMMON_TYPES_PUBLIC_H_
+#define MODULES_INCLUDE_MODULE_COMMON_TYPES_PUBLIC_H_
+
+#include <limits>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+template <typename U>
+inline bool IsNewer(U value, U prev_value) {
+ static_assert(!std::numeric_limits<U>::is_signed, "U must be unsigned");
+ // kBreakpoint is the half-way mark for the type U. For instance, for a
+ // uint16_t it will be 0x8000, and for a uint32_t, it will be 0x8000000.
+ constexpr U kBreakpoint = (std::numeric_limits<U>::max() >> 1) + 1;
+ // Distinguish between elements that are exactly kBreakpoint apart.
+ // If t1>t2 and |t1-t2| = kBreakpoint: IsNewer(t1,t2)=true,
+ // IsNewer(t2,t1)=false
+ // rather than having IsNewer(t1,t2) = IsNewer(t2,t1) = false.
+ if (value - prev_value == kBreakpoint) {
+ return value > prev_value;
+ }
+ return value != prev_value &&
+ static_cast<U>(value - prev_value) < kBreakpoint;
+}
+
+// Utility class to unwrap a number to a larger type. The numbers will never be
+// unwrapped to a negative value.
+template <typename U>
+class Unwrapper {
+ static_assert(!std::numeric_limits<U>::is_signed, "U must be unsigned");
+ static_assert(std::numeric_limits<U>::max() <=
+ std::numeric_limits<uint32_t>::max(),
+ "U must not be wider than 32 bits");
+
+ public:
+ // Get the unwrapped value, but don't update the internal state.
+ int64_t UnwrapWithoutUpdate(U value) const {
+ if (!last_value_)
+ return value;
+
+ constexpr int64_t kMaxPlusOne =
+ static_cast<int64_t>(std::numeric_limits<U>::max()) + 1;
+
+ U cropped_last = static_cast<U>(*last_value_);
+ int64_t delta = value - cropped_last;
+ if (IsNewer(value, cropped_last)) {
+ if (delta < 0)
+ delta += kMaxPlusOne; // Wrap forwards.
+ } else if (delta > 0 && (*last_value_ + delta - kMaxPlusOne) >= 0) {
+ // If value is older but delta is positive, this is a backwards
+ // wrap-around. However, don't wrap backwards past 0 (unwrapped).
+ delta -= kMaxPlusOne;
+ }
+
+ return *last_value_ + delta;
+ }
+
+ // Only update the internal state to the specified last (unwrapped) value.
+ void UpdateLast(int64_t last_value) { last_value_ = last_value; }
+
+ // Unwrap the value and update the internal state.
+ int64_t Unwrap(U value) {
+ int64_t unwrapped = UnwrapWithoutUpdate(value);
+ UpdateLast(unwrapped);
+ return unwrapped;
+ }
+
+ private:
+ absl::optional<int64_t> last_value_;
+};
+
+using SequenceNumberUnwrapper = Unwrapper<uint16_t>;
+using TimestampUnwrapper = Unwrapper<uint32_t>;
+
+// NB: Doesn't fulfill strict weak ordering requirements.
+// Mustn't be used as std::map Compare function.
+inline bool IsNewerSequenceNumber(uint16_t sequence_number,
+ uint16_t prev_sequence_number) {
+ return IsNewer(sequence_number, prev_sequence_number);
+}
+
+// NB: Doesn't fulfill strict weak ordering requirements.
+// Mustn't be used as std::map Compare function.
+inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
+ return IsNewer(timestamp, prev_timestamp);
+}
+
+inline uint16_t LatestSequenceNumber(uint16_t sequence_number1,
+ uint16_t sequence_number2) {
+ return IsNewerSequenceNumber(sequence_number1, sequence_number2)
+ ? sequence_number1
+ : sequence_number2;
+}
+
+inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) {
+ return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 : timestamp2;
+}
+
+} // namespace webrtc
+#endif // MODULES_INCLUDE_MODULE_COMMON_TYPES_PUBLIC_H_
diff --git a/third_party/libwebrtc/modules/include/module_fec_types.h b/third_party/libwebrtc/modules/include/module_fec_types.h
new file mode 100644
index 0000000000..fa35342764
--- /dev/null
+++ b/third_party/libwebrtc/modules/include/module_fec_types.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_FEC_TYPES_H_
+#define MODULES_INCLUDE_MODULE_FEC_TYPES_H_
+
+namespace webrtc {
+
+// Types for the FEC packet masks. The type `kFecMaskRandom` is based on a
+// random loss model. The type `kFecMaskBursty` is based on a bursty/consecutive
+// loss model. The packet masks are defined in
+// modules/rtp_rtcp/fec_private_tables_random(bursty).h
+enum FecMaskType {
+ kFecMaskRandom,
+ kFecMaskBursty,
+};
+
+// Struct containing forward error correction settings.
+struct FecProtectionParams {
+ int fec_rate = 0;
+ int max_fec_frames = 0;
+ FecMaskType fec_mask_type = FecMaskType::kFecMaskRandom;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_INCLUDE_MODULE_FEC_TYPES_H_
diff --git a/third_party/libwebrtc/modules/module_api_gn/moz.build b/third_party/libwebrtc/modules/module_api_gn/moz.build
new file mode 100644
index 0000000000..9cb0e4d620
--- /dev/null
+++ b/third_party/libwebrtc/modules/module_api_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("module_api_gn")
diff --git a/third_party/libwebrtc/modules/module_api_public_gn/moz.build b/third_party/libwebrtc/modules/module_api_public_gn/moz.build
new file mode 100644
index 0000000000..03b9f6c4e5
--- /dev/null
+++ b/third_party/libwebrtc/modules/module_api_public_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("module_api_public_gn")
diff --git a/third_party/libwebrtc/modules/module_common_types_unittest.cc b/third_party/libwebrtc/modules/module_common_types_unittest.cc
new file mode 100644
index 0000000000..0979af38e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/module_common_types_unittest.cc
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/include/module_common_types.h"
+
+#include "modules/include/module_common_types_public.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(IsNewerSequenceNumber, Equal) {
+ EXPECT_FALSE(IsNewerSequenceNumber(0x0001, 0x0001));
+}
+
+TEST(IsNewerSequenceNumber, NoWrap) {
+ EXPECT_TRUE(IsNewerSequenceNumber(0xFFFF, 0xFFFE));
+ EXPECT_TRUE(IsNewerSequenceNumber(0x0001, 0x0000));
+ EXPECT_TRUE(IsNewerSequenceNumber(0x0100, 0x00FF));
+}
+
+TEST(IsNewerSequenceNumber, ForwardWrap) {
+ EXPECT_TRUE(IsNewerSequenceNumber(0x0000, 0xFFFF));
+ EXPECT_TRUE(IsNewerSequenceNumber(0x0000, 0xFF00));
+ EXPECT_TRUE(IsNewerSequenceNumber(0x00FF, 0xFFFF));
+ EXPECT_TRUE(IsNewerSequenceNumber(0x00FF, 0xFF00));
+}
+
+TEST(IsNewerSequenceNumber, BackwardWrap) {
+ EXPECT_FALSE(IsNewerSequenceNumber(0xFFFF, 0x0000));
+ EXPECT_FALSE(IsNewerSequenceNumber(0xFF00, 0x0000));
+ EXPECT_FALSE(IsNewerSequenceNumber(0xFFFF, 0x00FF));
+ EXPECT_FALSE(IsNewerSequenceNumber(0xFF00, 0x00FF));
+}
+
+TEST(IsNewerSequenceNumber, HalfWayApart) {
+ EXPECT_TRUE(IsNewerSequenceNumber(0x8000, 0x0000));
+ EXPECT_FALSE(IsNewerSequenceNumber(0x0000, 0x8000));
+}
+
+TEST(IsNewerTimestamp, Equal) {
+ EXPECT_FALSE(IsNewerTimestamp(0x00000001, 0x000000001));
+}
+
+TEST(IsNewerTimestamp, NoWrap) {
+ EXPECT_TRUE(IsNewerTimestamp(0xFFFFFFFF, 0xFFFFFFFE));
+ EXPECT_TRUE(IsNewerTimestamp(0x00000001, 0x00000000));
+ EXPECT_TRUE(IsNewerTimestamp(0x00010000, 0x0000FFFF));
+}
+
+TEST(IsNewerTimestamp, ForwardWrap) {
+ EXPECT_TRUE(IsNewerTimestamp(0x00000000, 0xFFFFFFFF));
+ EXPECT_TRUE(IsNewerTimestamp(0x00000000, 0xFFFF0000));
+ EXPECT_TRUE(IsNewerTimestamp(0x0000FFFF, 0xFFFFFFFF));
+ EXPECT_TRUE(IsNewerTimestamp(0x0000FFFF, 0xFFFF0000));
+}
+
+TEST(IsNewerTimestamp, BackwardWrap) {
+ EXPECT_FALSE(IsNewerTimestamp(0xFFFFFFFF, 0x00000000));
+ EXPECT_FALSE(IsNewerTimestamp(0xFFFF0000, 0x00000000));
+ EXPECT_FALSE(IsNewerTimestamp(0xFFFFFFFF, 0x0000FFFF));
+ EXPECT_FALSE(IsNewerTimestamp(0xFFFF0000, 0x0000FFFF));
+}
+
+TEST(IsNewerTimestamp, HalfWayApart) {
+ EXPECT_TRUE(IsNewerTimestamp(0x80000000, 0x00000000));
+ EXPECT_FALSE(IsNewerTimestamp(0x00000000, 0x80000000));
+}
+
+TEST(LatestSequenceNumber, NoWrap) {
+ EXPECT_EQ(0xFFFFu, LatestSequenceNumber(0xFFFF, 0xFFFE));
+ EXPECT_EQ(0x0001u, LatestSequenceNumber(0x0001, 0x0000));
+ EXPECT_EQ(0x0100u, LatestSequenceNumber(0x0100, 0x00FF));
+
+ EXPECT_EQ(0xFFFFu, LatestSequenceNumber(0xFFFE, 0xFFFF));
+ EXPECT_EQ(0x0001u, LatestSequenceNumber(0x0000, 0x0001));
+ EXPECT_EQ(0x0100u, LatestSequenceNumber(0x00FF, 0x0100));
+}
+
+TEST(LatestSequenceNumber, Wrap) {
+ EXPECT_EQ(0x0000u, LatestSequenceNumber(0x0000, 0xFFFF));
+ EXPECT_EQ(0x0000u, LatestSequenceNumber(0x0000, 0xFF00));
+ EXPECT_EQ(0x00FFu, LatestSequenceNumber(0x00FF, 0xFFFF));
+ EXPECT_EQ(0x00FFu, LatestSequenceNumber(0x00FF, 0xFF00));
+
+ EXPECT_EQ(0x0000u, LatestSequenceNumber(0xFFFF, 0x0000));
+ EXPECT_EQ(0x0000u, LatestSequenceNumber(0xFF00, 0x0000));
+ EXPECT_EQ(0x00FFu, LatestSequenceNumber(0xFFFF, 0x00FF));
+ EXPECT_EQ(0x00FFu, LatestSequenceNumber(0xFF00, 0x00FF));
+}
+
+TEST(LatestTimestamp, NoWrap) {
+ EXPECT_EQ(0xFFFFFFFFu, LatestTimestamp(0xFFFFFFFF, 0xFFFFFFFE));
+ EXPECT_EQ(0x00000001u, LatestTimestamp(0x00000001, 0x00000000));
+ EXPECT_EQ(0x00010000u, LatestTimestamp(0x00010000, 0x0000FFFF));
+}
+
+TEST(LatestTimestamp, Wrap) {
+ EXPECT_EQ(0x00000000u, LatestTimestamp(0x00000000, 0xFFFFFFFF));
+ EXPECT_EQ(0x00000000u, LatestTimestamp(0x00000000, 0xFFFF0000));
+ EXPECT_EQ(0x0000FFFFu, LatestTimestamp(0x0000FFFF, 0xFFFFFFFF));
+ EXPECT_EQ(0x0000FFFFu, LatestTimestamp(0x0000FFFF, 0xFFFF0000));
+
+ EXPECT_EQ(0x00000000u, LatestTimestamp(0xFFFFFFFF, 0x00000000));
+ EXPECT_EQ(0x00000000u, LatestTimestamp(0xFFFF0000, 0x00000000));
+ EXPECT_EQ(0x0000FFFFu, LatestTimestamp(0xFFFFFFFF, 0x0000FFFF));
+ EXPECT_EQ(0x0000FFFFu, LatestTimestamp(0xFFFF0000, 0x0000FFFF));
+}
+
+TEST(SequenceNumberUnwrapper, Limits) {
+ SequenceNumberUnwrapper unwrapper;
+
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+ EXPECT_EQ(0x8000, unwrapper.Unwrap(0x8000));
+ // Delta is exactly 0x8000 but current is lower than input, wrap backwards.
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+
+ EXPECT_EQ(0x8000, unwrapper.Unwrap(0x8000));
+ EXPECT_EQ(0xFFFF, unwrapper.Unwrap(0xFFFF));
+ EXPECT_EQ(0x10000, unwrapper.Unwrap(0));
+ EXPECT_EQ(0xFFFF, unwrapper.Unwrap(0xFFFF));
+ EXPECT_EQ(0x8000, unwrapper.Unwrap(0x8000));
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+
+ // Don't allow negative values.
+ EXPECT_EQ(0xFFFF, unwrapper.Unwrap(0xFFFF));
+}
+
+TEST(SequenceNumberUnwrapper, ForwardWraps) {
+ int64_t seq = 0;
+ SequenceNumberUnwrapper unwrapper;
+
+ const int kMaxIncrease = 0x8000 - 1;
+ const int kNumWraps = 4;
+ for (int i = 0; i < kNumWraps * 2; ++i) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ seq += kMaxIncrease;
+ }
+
+ unwrapper.UpdateLast(0);
+ for (int seq = 0; seq < kNumWraps * 0xFFFF; ++seq) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ }
+}
+
+TEST(SequenceNumberUnwrapper, BackwardWraps) {
+ SequenceNumberUnwrapper unwrapper;
+
+ const int kMaxDecrease = 0x8000 - 1;
+ const int kNumWraps = 4;
+ int64_t seq = kNumWraps * 2 * kMaxDecrease;
+ unwrapper.UpdateLast(seq);
+ for (int i = kNumWraps * 2; i >= 0; --i) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ seq -= kMaxDecrease;
+ }
+
+ seq = kNumWraps * 0xFFFF;
+ unwrapper.UpdateLast(seq);
+ for (; seq >= 0; --seq) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ }
+}
+
+TEST(TimestampUnwrapper, Limits) {
+ TimestampUnwrapper unwrapper;
+
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+ EXPECT_EQ(0x80000000, unwrapper.Unwrap(0x80000000));
+ // Delta is exactly 0x80000000 but current is lower than input, wrap
+ // backwards.
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+
+ EXPECT_EQ(0x80000000, unwrapper.Unwrap(0x80000000));
+ EXPECT_EQ(0xFFFFFFFF, unwrapper.Unwrap(0xFFFFFFFF));
+ EXPECT_EQ(0x100000000, unwrapper.Unwrap(0x00000000));
+ EXPECT_EQ(0xFFFFFFFF, unwrapper.Unwrap(0xFFFFFFFF));
+ EXPECT_EQ(0x80000000, unwrapper.Unwrap(0x80000000));
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+
+ // Don't allow negative values.
+ EXPECT_EQ(0xFFFFFFFF, unwrapper.Unwrap(0xFFFFFFFF));
+}
+
+TEST(TimestampUnwrapper, ForwardWraps) {
+ int64_t ts = 0;
+ TimestampUnwrapper unwrapper;
+
+ const int64_t kMaxIncrease = 0x80000000 - 1;
+ const int kNumWraps = 4;
+ for (int i = 0; i < kNumWraps * 2; ++i) {
+ int64_t unwrapped =
+ unwrapper.Unwrap(static_cast<uint32_t>(ts & 0xFFFFFFFF));
+ EXPECT_EQ(ts, unwrapped);
+ ts += kMaxIncrease;
+ }
+}
+
+TEST(TimestampUnwrapper, BackwardWraps) {
+ TimestampUnwrapper unwrapper;
+
+ const int64_t kMaxDecrease = 0x80000000 - 1;
+ const int kNumWraps = 4;
+ int64_t ts = kNumWraps * 2 * kMaxDecrease;
+ unwrapper.UpdateLast(ts);
+ for (int i = 0; i <= kNumWraps * 2; ++i) {
+ int64_t unwrapped =
+ unwrapper.Unwrap(static_cast<uint32_t>(ts & 0xFFFFFFFF));
+ EXPECT_EQ(ts, unwrapped);
+ ts -= kMaxDecrease;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/module_fec_api_gn/moz.build b/third_party/libwebrtc/modules/module_fec_api_gn/moz.build
new file mode 100644
index 0000000000..4c59791a10
--- /dev/null
+++ b/third_party/libwebrtc/modules/module_fec_api_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("module_fec_api_gn")
diff --git a/third_party/libwebrtc/modules/pacing/BUILD.gn b/third_party/libwebrtc/modules/pacing/BUILD.gn
new file mode 100644
index 0000000000..3cea1a3fa8
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/BUILD.gn
@@ -0,0 +1,119 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("pacing") {
+ # Client code SHOULD NOT USE THIS TARGET, but for now it needs to be public
+ # because there exists client code that uses it.
+ # TODO(bugs.webrtc.org/9808): Move to private visibility as soon as that
+ # client code gets updated.
+ visibility = [ "*" ]
+ sources = [
+ "bitrate_prober.cc",
+ "bitrate_prober.h",
+ "pacing_controller.cc",
+ "pacing_controller.h",
+ "packet_router.cc",
+ "packet_router.h",
+ "prioritized_packet_queue.cc",
+ "prioritized_packet_queue.h",
+ "round_robin_packet_queue.cc",
+ "round_robin_packet_queue.h",
+ "rtp_packet_pacer.h",
+ "task_queue_paced_sender.cc",
+ "task_queue_paced_sender.h",
+ ]
+
+ deps = [
+ ":interval_budget",
+ "../../api:field_trials_view",
+ "../../api:field_trials_view",
+ "../../api:function_view",
+ "../../api:sequence_checker",
+ "../../api/rtc_event_log",
+ "../../api/task_queue:task_queue",
+ "../../api/transport:field_trial_based_config",
+ "../../api/transport:network_control",
+ "../../api/units:data_rate",
+ "../../api/units:data_size",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../logging:rtc_event_bwe",
+ "../../logging:rtc_event_pacing",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:location",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:rtc_task_queue",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:unused",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ "../rtp_rtcp",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("interval_budget") {
+ sources = [
+ "interval_budget.cc",
+ "interval_budget.h",
+ ]
+
+ deps = [
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("pacing_unittests") {
+ testonly = true
+
+ sources = [
+ "bitrate_prober_unittest.cc",
+ "interval_budget_unittest.cc",
+ "pacing_controller_unittest.cc",
+ "packet_router_unittest.cc",
+ "prioritized_packet_queue_unittest.cc",
+ "round_robin_packet_queue_unittest.cc",
+ "task_queue_paced_sender_unittest.cc",
+ ]
+ deps = [
+ ":interval_budget",
+ ":pacing",
+ "../../api/task_queue:task_queue",
+ "../../api/transport:network_control",
+ "../../api/units:data_rate",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../rtc_base:checks",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base/experiments:alr_experiment",
+ "../../system_wrappers",
+ "../../test:explicit_key_value_config",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ "../../test/time_controller:time_controller",
+ "../rtp_rtcp",
+ "../rtp_rtcp:mock_rtp_rtcp",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/functional:any_invocable" ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/pacing/DEPS b/third_party/libwebrtc/modules/pacing/DEPS
new file mode 100644
index 0000000000..42f3dfcb14
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+system_wrappers",
+ # Avoid directly using field_trial. Instead use FieldTrialsView.
+ "-system_wrappers/include/field_trial.h",
+ "+logging/rtc_event_log"
+]
diff --git a/third_party/libwebrtc/modules/pacing/OWNERS b/third_party/libwebrtc/modules/pacing/OWNERS
new file mode 100644
index 0000000000..0a77688b1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/OWNERS
@@ -0,0 +1,6 @@
+stefan@webrtc.org
+mflodman@webrtc.org
+asapersson@webrtc.org
+philipel@webrtc.org
+srte@webrtc.org
+sprang@webrtc.org
diff --git a/third_party/libwebrtc/modules/pacing/bitrate_prober.cc b/third_party/libwebrtc/modules/pacing/bitrate_prober.cc
new file mode 100644
index 0000000000..ec9182fca2
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/bitrate_prober.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/bitrate_prober.h"
+
+#include <algorithm>
+
+#include "absl/memory/memory.h"
+#include "api/rtc_event_log/rtc_event.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_probe_cluster_created.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+// The min probe packet size is scaled with the bitrate we're probing at.
+// This defines the max min probe packet size, meaning that on high bitrates
+// we have a min probe packet size of 200 bytes.
+constexpr DataSize kMinProbePacketSize = DataSize::Bytes(200);
+
+constexpr TimeDelta kProbeClusterTimeout = TimeDelta::Seconds(5);
+
+} // namespace
+
+BitrateProberConfig::BitrateProberConfig(
+ const FieldTrialsView* key_value_config)
+ : min_probe_delta("min_probe_delta", TimeDelta::Millis(1)),
+ max_probe_delay("max_probe_delay", TimeDelta::Millis(10)) {
+ ParseFieldTrial({&min_probe_delta, &max_probe_delay},
+ key_value_config->Lookup("WebRTC-Bwe-ProbingBehavior"));
+}
+
+BitrateProber::~BitrateProber() {
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.BWE.Probing.TotalProbeClustersRequested",
+ total_probe_count_);
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.BWE.Probing.TotalFailedProbeClusters",
+ total_failed_probe_count_);
+}
+
+BitrateProber::BitrateProber(const FieldTrialsView& field_trials)
+ : probing_state_(ProbingState::kDisabled),
+ next_probe_time_(Timestamp::PlusInfinity()),
+ total_probe_count_(0),
+ total_failed_probe_count_(0),
+ config_(&field_trials) {
+ SetEnabled(true);
+}
+
+void BitrateProber::SetEnabled(bool enable) {
+ if (enable) {
+ if (probing_state_ == ProbingState::kDisabled) {
+ probing_state_ = ProbingState::kInactive;
+ RTC_LOG(LS_INFO) << "Bandwidth probing enabled, set to inactive";
+ }
+ } else {
+ probing_state_ = ProbingState::kDisabled;
+ RTC_LOG(LS_INFO) << "Bandwidth probing disabled";
+ }
+}
+
+void BitrateProber::OnIncomingPacket(DataSize packet_size) {
+ // Don't initialize probing unless we have something large enough to start
+ // probing.
+ if (probing_state_ == ProbingState::kInactive && !clusters_.empty() &&
+ packet_size >= std::min(RecommendedMinProbeSize(), kMinProbePacketSize)) {
+ // Send next probe right away.
+ next_probe_time_ = Timestamp::MinusInfinity();
+ probing_state_ = ProbingState::kActive;
+ }
+}
+
+void BitrateProber::CreateProbeCluster(
+ const ProbeClusterConfig& cluster_config) {
+ RTC_DCHECK(probing_state_ != ProbingState::kDisabled);
+
+ total_probe_count_++;
+ while (!clusters_.empty() &&
+ cluster_config.at_time - clusters_.front().requested_at >
+ kProbeClusterTimeout) {
+ clusters_.pop();
+ total_failed_probe_count_++;
+ }
+
+ ProbeCluster cluster;
+ cluster.requested_at = cluster_config.at_time;
+ cluster.pace_info.probe_cluster_min_probes =
+ cluster_config.target_probe_count;
+ cluster.pace_info.probe_cluster_min_bytes =
+ (cluster_config.target_data_rate * cluster_config.target_duration)
+ .bytes();
+ RTC_DCHECK_GE(cluster.pace_info.probe_cluster_min_bytes, 0);
+ cluster.pace_info.send_bitrate_bps = cluster_config.target_data_rate.bps();
+ cluster.pace_info.probe_cluster_id = cluster_config.id;
+ clusters_.push(cluster);
+
+ RTC_LOG(LS_INFO) << "Probe cluster (bitrate:min bytes:min packets): ("
+ << cluster.pace_info.send_bitrate_bps << ":"
+ << cluster.pace_info.probe_cluster_min_bytes << ":"
+ << cluster.pace_info.probe_cluster_min_probes << ")";
+ // If we are already probing, continue to do so. Otherwise set it to
+ // kInactive and wait for OnIncomingPacket to start the probing.
+ if (probing_state_ != ProbingState::kActive)
+ probing_state_ = ProbingState::kInactive;
+}
+
+Timestamp BitrateProber::NextProbeTime(Timestamp now) const {
+ // Probing is not active or probing is already complete.
+ if (probing_state_ != ProbingState::kActive || clusters_.empty()) {
+ return Timestamp::PlusInfinity();
+ }
+
+ return next_probe_time_;
+}
+
+absl::optional<PacedPacketInfo> BitrateProber::CurrentCluster(Timestamp now) {
+ if (clusters_.empty() || probing_state_ != ProbingState::kActive) {
+ return absl::nullopt;
+ }
+
+ if (next_probe_time_.IsFinite() &&
+ now - next_probe_time_ > config_.max_probe_delay.Get()) {
+ RTC_DLOG(LS_WARNING) << "Probe delay too high"
+ " (next_ms:"
+ << next_probe_time_.ms() << ", now_ms: " << now.ms()
+ << "), discarding probe cluster.";
+ clusters_.pop();
+ if (clusters_.empty()) {
+ probing_state_ = ProbingState::kSuspended;
+ return absl::nullopt;
+ }
+ }
+
+ PacedPacketInfo info = clusters_.front().pace_info;
+ info.probe_cluster_bytes_sent = clusters_.front().sent_bytes;
+ return info;
+}
+
+// Probe size is recommended based on the probe bitrate required. We choose
+// a minimum of twice `kMinProbeDeltaMs` interval to allow scheduling to be
+// feasible.
+DataSize BitrateProber::RecommendedMinProbeSize() const {
+ if (clusters_.empty()) {
+ return DataSize::Zero();
+ }
+ DataRate send_rate =
+ DataRate::BitsPerSec(clusters_.front().pace_info.send_bitrate_bps);
+ return 2 * send_rate * config_.min_probe_delta;
+}
+
+void BitrateProber::ProbeSent(Timestamp now, DataSize size) {
+ RTC_DCHECK(probing_state_ == ProbingState::kActive);
+ RTC_DCHECK(!size.IsZero());
+
+ if (!clusters_.empty()) {
+ ProbeCluster* cluster = &clusters_.front();
+ if (cluster->sent_probes == 0) {
+ RTC_DCHECK(cluster->started_at.IsInfinite());
+ cluster->started_at = now;
+ }
+ cluster->sent_bytes += size.bytes<int>();
+ cluster->sent_probes += 1;
+ next_probe_time_ = CalculateNextProbeTime(*cluster);
+ if (cluster->sent_bytes >= cluster->pace_info.probe_cluster_min_bytes &&
+ cluster->sent_probes >= cluster->pace_info.probe_cluster_min_probes) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.BWE.Probing.ProbeClusterSizeInBytes",
+ cluster->sent_bytes);
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.BWE.Probing.ProbesPerCluster",
+ cluster->sent_probes);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.BWE.Probing.TimePerProbeCluster",
+ (now - cluster->started_at).ms());
+
+ clusters_.pop();
+ }
+ if (clusters_.empty()) {
+ probing_state_ = ProbingState::kSuspended;
+ }
+ }
+}
+
+Timestamp BitrateProber::CalculateNextProbeTime(
+ const ProbeCluster& cluster) const {
+ RTC_CHECK_GT(cluster.pace_info.send_bitrate_bps, 0);
+ RTC_CHECK(cluster.started_at.IsFinite());
+
+ // Compute the time delta from the cluster start to ensure probe bitrate stays
+ // close to the target bitrate. Result is in milliseconds.
+ DataSize sent_bytes = DataSize::Bytes(cluster.sent_bytes);
+ DataRate send_bitrate =
+ DataRate::BitsPerSec(cluster.pace_info.send_bitrate_bps);
+ TimeDelta delta = sent_bytes / send_bitrate;
+ return cluster.started_at + delta;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/bitrate_prober.h b/third_party/libwebrtc/modules/pacing/bitrate_prober.h
new file mode 100644
index 0000000000..d38da7f841
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/bitrate_prober.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_BITRATE_PROBER_H_
+#define MODULES_PACING_BITRATE_PROBER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <queue>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_types.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+class RtcEventLog;
+
+struct BitrateProberConfig {
+ explicit BitrateProberConfig(const FieldTrialsView* key_value_config);
+ BitrateProberConfig(const BitrateProberConfig&) = default;
+ BitrateProberConfig& operator=(const BitrateProberConfig&) = default;
+ ~BitrateProberConfig() = default;
+
+ // A minimum interval between probes to allow scheduling to be feasible.
+ FieldTrialParameter<TimeDelta> min_probe_delta;
+ // Maximum amount of time each probe can be delayed.
+ FieldTrialParameter<TimeDelta> max_probe_delay;
+};
+
+// Note that this class isn't thread-safe by itself and therefore relies
+// on being protected by the caller.
+class BitrateProber {
+ public:
+ explicit BitrateProber(const FieldTrialsView& field_trials);
+ ~BitrateProber();
+
+ void SetEnabled(bool enable);
+
+ // Returns true if the prober is in a probing session, i.e., it currently
+ // wants packets to be sent out according to the time returned by
+ // TimeUntilNextProbe().
+ bool is_probing() const { return probing_state_ == ProbingState::kActive; }
+
+ // Initializes a new probing session if the prober is allowed to probe. Does
+ // not initialize the prober unless the packet size is large enough to probe
+ // with.
+ void OnIncomingPacket(DataSize packet_size);
+
+ // Create a cluster used to probe.
+ void CreateProbeCluster(const ProbeClusterConfig& cluster_config);
+ // Returns the time at which the next probe should be sent to get accurate
+ // probing. If probing is not desired at this time, Timestamp::PlusInfinity()
+ // will be returned.
+ // TODO(bugs.webrtc.org/11780): Remove `now` argument when old mode is gone.
+ Timestamp NextProbeTime(Timestamp now) const;
+
+ // Information about the current probing cluster.
+ absl::optional<PacedPacketInfo> CurrentCluster(Timestamp now);
+
+ // Returns the minimum number of bytes that the prober recommends for
+ // the next probe, or zero if not probing.
+ DataSize RecommendedMinProbeSize() const;
+
+ // Called to report to the prober that a probe has been sent. In case of
+ // multiple packets per probe, this call would be made at the end of sending
+ // the last packet in probe. `size` is the total size of all packets in probe.
+ void ProbeSent(Timestamp now, DataSize size);
+
+ private:
+ enum class ProbingState {
+ // Probing will not be triggered in this state at all times.
+ kDisabled,
+ // Probing is enabled and ready to trigger on the first packet arrival.
+ kInactive,
+ // Probe cluster is filled with the set of data rates to be probed and
+ // probes are being sent.
+ kActive,
+ // Probing is enabled, but currently suspended until an explicit trigger
+ // to start probing again.
+ kSuspended,
+ };
+
+ // A probe cluster consists of a set of probes. Each probe in turn can be
+ // divided into a number of packets to accommodate the MTU on the network.
+ struct ProbeCluster {
+ PacedPacketInfo pace_info;
+
+ int sent_probes = 0;
+ int sent_bytes = 0;
+ Timestamp requested_at = Timestamp::MinusInfinity();
+ Timestamp started_at = Timestamp::MinusInfinity();
+ int retries = 0;
+ };
+
+ Timestamp CalculateNextProbeTime(const ProbeCluster& cluster) const;
+
+ ProbingState probing_state_;
+
+ // Probe bitrate per packet. These are used to compute the delta relative to
+ // the previous probe packet based on the size and time when that packet was
+ // sent.
+ std::queue<ProbeCluster> clusters_;
+
+ // Time the next probe should be sent when in kActive state.
+ Timestamp next_probe_time_;
+
+ int total_probe_count_;
+ int total_failed_probe_count_;
+
+ BitrateProberConfig config_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_PACING_BITRATE_PROBER_H_
diff --git a/third_party/libwebrtc/modules/pacing/bitrate_prober_unittest.cc b/third_party/libwebrtc/modules/pacing/bitrate_prober_unittest.cc
new file mode 100644
index 0000000000..dd9bfe482b
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/bitrate_prober_unittest.cc
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/bitrate_prober.h"
+
+#include <algorithm>
+
+#include "api/units/data_rate.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(BitrateProberTest, VerifyStatesAndTimeBetweenProbes) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+ EXPECT_FALSE(prober.is_probing());
+
+ Timestamp now = Timestamp::Zero();
+ const Timestamp start_time = now;
+ EXPECT_EQ(prober.NextProbeTime(now), Timestamp::PlusInfinity());
+
+ const DataRate kTestBitrate1 = DataRate::KilobitsPerSec(900);
+ const DataRate kTestBitrate2 = DataRate::KilobitsPerSec(1800);
+ const int kClusterSize = 5;
+ const DataSize kProbeSize = DataSize::Bytes(1000);
+ const TimeDelta kMinProbeDuration = TimeDelta::Millis(15);
+
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = kTestBitrate1,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = kTestBitrate2,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 1});
+ EXPECT_FALSE(prober.is_probing());
+
+ prober.OnIncomingPacket(kProbeSize);
+ EXPECT_TRUE(prober.is_probing());
+ EXPECT_EQ(0, prober.CurrentCluster(now)->probe_cluster_id);
+
+ // First packet should probe as soon as possible.
+ EXPECT_EQ(Timestamp::MinusInfinity(), prober.NextProbeTime(now));
+
+ for (int i = 0; i < kClusterSize; ++i) {
+ now = std::max(now, prober.NextProbeTime(now));
+ EXPECT_EQ(now, std::max(now, prober.NextProbeTime(now)));
+ EXPECT_EQ(0, prober.CurrentCluster(now)->probe_cluster_id);
+ prober.ProbeSent(now, kProbeSize);
+ }
+
+ EXPECT_GE(now - start_time, kMinProbeDuration);
+ // Verify that the actual bitrate is withing 10% of the target.
+ DataRate bitrate = kProbeSize * (kClusterSize - 1) / (now - start_time);
+ EXPECT_GT(bitrate, kTestBitrate1 * 0.9);
+ EXPECT_LT(bitrate, kTestBitrate1 * 1.1);
+
+ now = std::max(now, prober.NextProbeTime(now));
+ Timestamp probe2_started = now;
+
+ for (int i = 0; i < kClusterSize; ++i) {
+ now = std::max(now, prober.NextProbeTime(now));
+ EXPECT_EQ(now, std::max(now, prober.NextProbeTime(now)));
+ EXPECT_EQ(1, prober.CurrentCluster(now)->probe_cluster_id);
+ prober.ProbeSent(now, kProbeSize);
+ }
+
+ // Verify that the actual bitrate is withing 10% of the target.
+ TimeDelta duration = now - probe2_started;
+ EXPECT_GE(duration, kMinProbeDuration);
+ bitrate = (kProbeSize * (kClusterSize - 1)) / duration;
+ EXPECT_GT(bitrate, kTestBitrate2 * 0.9);
+ EXPECT_LT(bitrate, kTestBitrate2 * 1.1);
+
+ EXPECT_EQ(prober.NextProbeTime(now), Timestamp::PlusInfinity());
+ EXPECT_FALSE(prober.is_probing());
+}
+
+TEST(BitrateProberTest, DoesntProbeWithoutRecentPackets) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+ const DataSize kProbeSize = DataSize::Bytes(1000);
+
+ Timestamp now = Timestamp::Zero();
+ EXPECT_EQ(prober.NextProbeTime(now), Timestamp::PlusInfinity());
+
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = DataRate::KilobitsPerSec(900),
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+ EXPECT_FALSE(prober.is_probing());
+
+ prober.OnIncomingPacket(kProbeSize);
+ EXPECT_TRUE(prober.is_probing());
+ EXPECT_EQ(now, std::max(now, prober.NextProbeTime(now)));
+ prober.ProbeSent(now, kProbeSize);
+}
+
+TEST(BitrateProberTest, DiscardsDelayedProbes) {
+ const TimeDelta kMaxProbeDelay = TimeDelta::Millis(3);
+ const test::ExplicitKeyValueConfig trials(
+ "WebRTC-Bwe-ProbingBehavior/"
+ "abort_delayed_probes:1,"
+ "max_probe_delay:3ms/");
+ BitrateProber prober(trials);
+ const DataSize kProbeSize = DataSize::Bytes(1000);
+
+ Timestamp now = Timestamp::Zero();
+
+ // Add two probe clusters.
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = DataRate::KilobitsPerSec(900),
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+
+ prober.OnIncomingPacket(kProbeSize);
+ EXPECT_TRUE(prober.is_probing());
+ EXPECT_EQ(prober.CurrentCluster(now)->probe_cluster_id, 0);
+ // Advance to first probe time and indicate sent probe.
+ now = std::max(now, prober.NextProbeTime(now));
+ prober.ProbeSent(now, kProbeSize);
+
+ // Advance time 1ms past timeout for the next probe.
+ Timestamp next_probe_time = prober.NextProbeTime(now);
+ EXPECT_GT(next_probe_time, now);
+ now += next_probe_time - now + kMaxProbeDelay + TimeDelta::Millis(1);
+
+ // Still indicates the time we wanted to probe at.
+ EXPECT_EQ(prober.NextProbeTime(now), next_probe_time);
+ // First and only cluster removed due to timeout.
+ EXPECT_FALSE(prober.CurrentCluster(now).has_value());
+}
+
+TEST(BitrateProberTest, DoesntInitializeProbingForSmallPackets) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+
+ prober.SetEnabled(true);
+ EXPECT_FALSE(prober.is_probing());
+
+ prober.OnIncomingPacket(DataSize::Bytes(100));
+ EXPECT_FALSE(prober.is_probing());
+}
+
+TEST(BitrateProberTest, VerifyProbeSizeOnHighBitrate) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+
+ const DataRate kHighBitrate = DataRate::KilobitsPerSec(10000); // 10 Mbps
+
+ prober.CreateProbeCluster({.at_time = Timestamp::Zero(),
+ .target_data_rate = kHighBitrate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+ // Probe size should ensure a minimum of 1 ms interval.
+ EXPECT_GT(prober.RecommendedMinProbeSize(),
+ kHighBitrate * TimeDelta::Millis(1));
+}
+
+TEST(BitrateProberTest, MinumumNumberOfProbingPackets) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+ // Even when probing at a low bitrate we expect a minimum number
+ // of packets to be sent.
+ const DataRate kBitrate = DataRate::KilobitsPerSec(100);
+ const DataSize kPacketSize = DataSize::Bytes(1000);
+
+ Timestamp now = Timestamp::Zero();
+ prober.CreateProbeCluster({.at_time = Timestamp::Zero(),
+ .target_data_rate = kBitrate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+
+ prober.OnIncomingPacket(kPacketSize);
+ for (int i = 0; i < 5; ++i) {
+ EXPECT_TRUE(prober.is_probing());
+ prober.ProbeSent(now, kPacketSize);
+ }
+
+ EXPECT_FALSE(prober.is_probing());
+}
+
+TEST(BitrateProberTest, ScaleBytesUsedForProbing) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+ const DataRate kBitrate = DataRate::KilobitsPerSec(10000); // 10 Mbps.
+ const DataSize kPacketSize = DataSize::Bytes(1000);
+ const DataSize kExpectedDataSent = kBitrate * TimeDelta::Millis(15);
+
+ Timestamp now = Timestamp::Zero();
+ prober.CreateProbeCluster({.at_time = Timestamp::Zero(),
+ .target_data_rate = kBitrate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+ prober.OnIncomingPacket(kPacketSize);
+ DataSize data_sent = DataSize::Zero();
+ while (data_sent < kExpectedDataSent) {
+ ASSERT_TRUE(prober.is_probing());
+ prober.ProbeSent(now, kPacketSize);
+ data_sent += kPacketSize;
+ }
+
+ EXPECT_FALSE(prober.is_probing());
+}
+
+TEST(BitrateProberTest, HighBitrateProbing) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+ const DataRate kBitrate = DataRate::KilobitsPerSec(1000000); // 1 Gbps.
+ const DataSize kPacketSize = DataSize::Bytes(1000);
+ const DataSize kExpectedDataSent = kBitrate * TimeDelta::Millis(15);
+
+ Timestamp now = Timestamp::Zero();
+ prober.CreateProbeCluster({.at_time = Timestamp::Zero(),
+ .target_data_rate = kBitrate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+ prober.OnIncomingPacket(kPacketSize);
+ DataSize data_sent = DataSize::Zero();
+ while (data_sent < kExpectedDataSent) {
+ ASSERT_TRUE(prober.is_probing());
+ prober.ProbeSent(now, kPacketSize);
+ data_sent += kPacketSize;
+ }
+
+ EXPECT_FALSE(prober.is_probing());
+}
+
+TEST(BitrateProberTest, ProbeClusterTimeout) {
+ const FieldTrialBasedConfig config;
+ BitrateProber prober(config);
+ const DataRate kBitrate = DataRate::KilobitsPerSec(300);
+ const DataSize kSmallPacketSize = DataSize::Bytes(20);
+ // Expecting two probe clusters of 5 packets each.
+ const DataSize kExpectedDataSent = kSmallPacketSize * 2 * 5;
+ const TimeDelta kTimeout = TimeDelta::Millis(5000);
+
+ Timestamp now = Timestamp::Zero();
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = kBitrate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0});
+ prober.OnIncomingPacket(kSmallPacketSize);
+ EXPECT_FALSE(prober.is_probing());
+ now += kTimeout;
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = kBitrate / 10,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 1});
+ prober.OnIncomingPacket(kSmallPacketSize);
+ EXPECT_FALSE(prober.is_probing());
+ now += TimeDelta::Millis(1);
+ prober.CreateProbeCluster({.at_time = now,
+ .target_data_rate = kBitrate / 10,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 2});
+ prober.OnIncomingPacket(kSmallPacketSize);
+ EXPECT_TRUE(prober.is_probing());
+ DataSize data_sent = DataSize::Zero();
+ while (data_sent < kExpectedDataSent) {
+ ASSERT_TRUE(prober.is_probing());
+ prober.ProbeSent(now, kSmallPacketSize);
+ data_sent += kSmallPacketSize;
+ }
+
+ EXPECT_FALSE(prober.is_probing());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/g3doc/index.md b/third_party/libwebrtc/modules/pacing/g3doc/index.md
new file mode 100644
index 0000000000..edc548a8b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/g3doc/index.md
@@ -0,0 +1,164 @@
+<?% config.freshness.reviewed = '2021-04-12' %?>
+<?% config.freshness.owner = 'sprang' %?>
+
+# Paced Sending
+
+The paced sender, often referred to as just the "pacer", is a part of the WebRTC
+RTP stack used primarily to smooth the flow of packets sent onto the network.
+
+## Background
+
+Consider a video stream at 5Mbps and 30fps. This would in an ideal world result
+in each frame being ~21kB large and packetized into 18 RTP packets. While the
+average bitrate over say a one second sliding window would be a correct 5Mbps,
+on a shorter time scale it can be seen as a burst of 167Mbps every 33ms, each
+followed by a 32ms silent period. Further, it is quite common that video
+encoders overshoot the target frame size in case of sudden movement especially
+dealing with screensharing. Frames being 10x or even 100x larger than the ideal
+size is an all too real scenario. These packet bursts can cause several issues,
+such as congesting networks and causing buffer bloat or even packet loss. Most
+sessions have more than one media stream, e.g. a video and an audio track. If
+you put a frame on the wire in one go, and those packets take 100ms to reach the
+other side - that means you have now blocked any audio packets from reaching the
+remote end in time as well.
+
+The paced sender solves this by having a buffer in which media is queued, and
+then using a _leaky bucket_ algorithm to pace them onto the network. The buffer
+contains separate fifo streams for all media tracks so that e.g. audio can be
+prioritized over video - and equal prio streams can be sent in a round-robin
+fashion to avoid any one stream blocking others.
+
+Since the pacer is in control of the bitrate sent on the wire, it is also used
+to generate padding in cases where a minimum send rate is required - and to
+generate packet trains if bitrate probing is used.
+
+## Life of a Packet
+
+The typical path for media packets when using the paced sender looks something
+like this:
+
+1. `RTPSenderVideo` or `RTPSenderAudio` packetizes media into RTP packets.
+2. The packets are sent to the [RTPSender] class for transmission.
+3. The pacer is called via [RtpPacketSender] interface to enqueue the packet
+ batch.
+4. The packets are put into a queue within the pacer awaiting opportune moments
+ to send them.
+5. At a calculated time, the pacer calls the `PacingController::PacketSender()`
+ callback method, normally implemented by the [PacketRouter] class.
+6. The router forwards the packet to the correct RTP module based on the
+ packet's SSRC, and in which the `RTPSenderEgress` class makes final time
+ stamping, potentially records it for retransmissions etc.
+7. The packet is sent to the low-level `Transport` interface, after which it is
+ now out of scope.
+
+Asynchronously to this, the estimated available send bandwidth is determined -
+and the target send rate is set on the `RtpPacketPacer` via the `void
+SetPacingRates(DataRate pacing_rate, DataRate padding_rate)` method.
+
+## Packet Prioritization
+
+The pacer prioritized packets based on two criteria:
+
+* Packet type, with most to least prioritized:
+ 1. Audio
+ 2. Retransmissions
+ 3. Video and FEC
+ 4. Padding
+* Enqueue order
+
+The enqueue order is enforced on a per stream (SSRC) basis. Given equal
+priority, the [RoundRobinPacketQueue] alternates between media streams to ensure
+no stream needlessly blocks others.
+
+## Implementations
+
+The main class to use is called [TaskQueuePacedSender]. It uses a task queue to
+manage thread safety and schedule delayed tasks, but delegates most of the actual
+work to the `PacingController` class.
+This way, it's possible to develop a custom pacer with different scheduling
+mechanism - but ratain the same pacing logic.
+
+## The Packet Router
+
+An adjacent component called [PacketRouter] is used to route packets coming out
+of the pacer and into the correct RTP module. It has the following functions:
+
+* The `SendPacket` method looks up an RTP module with an SSRC corresponding to
+ the packet for further routing to the network.
+* If send-side bandwidth estimation is used, it populates the transport-wide
+ sequence number extension.
+* Generate padding. Modules supporting payload-based padding are prioritized,
+ with the last module to have sent media always being the first choice.
+* Returns any generated FEC after having sent media.
+* Forwards REMB and/or TransportFeedback messages to suitable RTP modules.
+
+At present the FEC is generated on a per SSRC basis, so is always returned from
+an RTP module after sending media. Hopefully one day we will support covering
+multiple streams with a single FlexFEC stream - and the packet router is the
+likely place for that FEC generator to live. It may even be used for FEC padding
+as an alternative to RTX.
+
+## The API
+
+The section outlines the classes and methods relevant to a few different use
+cases of the pacer.
+
+### Packet sending
+
+For sending packets, use
+`RtpPacketSender::EnqueuePackets(std::vector<std::unique_ptr<RtpPacketToSend>>
+packets)` The pacer takes a `PacingController::PacketSender` as constructor
+argument, this callback is used when it's time to actually send packets.
+
+### Send rates
+
+To control the send rate, use `void SetPacingRates(DataRate pacing_rate,
+DataRate padding_rate)` If the packet queue becomes empty and the send rate
+drops below `padding_rate`, the pacer will request padding packets from the
+`PacketRouter`.
+
+In order to completely suspend/resume sending data (e.g. due to network
+availability), use the `Pause()` and `Resume()` methods.
+
+The specified pacing rate may be overriden in some cases, e.g. due to extreme
+encoder overshoot. Use `void SetQueueTimeLimit(TimeDelta limit)` to specify the
+longest time you want packets to spend waiting in the pacer queue (pausing
+excluded). The actual send rate may then be increased past the pacing_rate to
+try to make the _average_ queue time less than that requested limit. The
+rationale for this is that if the send queue is say longer than three seconds,
+it's better to risk packet loss and then try to recover using a key-frame rather
+than cause severe delays.
+
+### Bandwidth estimation
+
+If the bandwidth estimator supports bandwidth probing, it may request a cluster
+of packets to be sent at a specified rate in order to gauge if this causes
+increased delay/loss on the network. Use the `void CreateProbeCluster(...)`
+method - packets sent via this `PacketRouter` will be marked with the
+corresponding cluster_id in the attached `PacedPacketInfo` struct.
+
+If congestion window pushback is used, the state can be updated using
+`SetCongestionWindow()` and `UpdateOutstandingData()`.
+
+A few more methods control how we pace: * `SetAccountForAudioPackets()`
+determines if audio packets count into bandwidth consumed. *
+`SetIncludeOverhead()` determines if the entire RTP packet size counts into
+bandwidth used (otherwise just media payload). * `SetTransportOverhead()` sets
+an additional data size consumed per packet, representing e.g. UDP/IP headers.
+
+### Stats
+
+Several methods are used to gather statistics in pacer state:
+
+* `OldestPacketWaitTime()` time since the oldest packet in the queue was
+ added.
+* `QueueSizeData()` total bytes currently in the queue.
+* `FirstSentPacketTime()` absolute time the first packet was sent.
+* `ExpectedQueueTime()` total bytes in the queue divided by the send rate.
+
+[RTPSender]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h;drc=77ee8542dd35d5143b5788ddf47fb7cdb96eb08e
+[RtpPacketSender]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/rtp_rtcp/include/rtp_packet_sender.h;drc=ea55b0872f14faab23a4e5dbcb6956369c8ed5dc
+[RtpPacketPacer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/pacing/rtp_packet_pacer.h;drc=e7bc3a347760023dd4840cf6ebdd1e6c8592f4d7
+[PacketRouter]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/pacing/packet_router.h;drc=3d2210876e31d0bb5c7de88b27fd02ceb1f4e03e
+[TaskQueuePacedSender]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/pacing/task_queue_paced_sender.h;drc=5051693ada61bc7b78855c6fb3fa87a0394fa813
+[RoundRobinPacketQueue]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/pacing/round_robin_packet_queue.h;drc=b571ff48f8fe07678da5a854cd6c3f5dde02855f
diff --git a/third_party/libwebrtc/modules/pacing/interval_budget.cc b/third_party/libwebrtc/modules/pacing/interval_budget.cc
new file mode 100644
index 0000000000..321ca46be4
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/interval_budget.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/interval_budget.h"
+
+#include <algorithm>
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace {
+constexpr int64_t kWindowMs = 500;
+}
+
+IntervalBudget::IntervalBudget(int initial_target_rate_kbps)
+ : IntervalBudget(initial_target_rate_kbps, false) {}
+
+IntervalBudget::IntervalBudget(int initial_target_rate_kbps,
+ bool can_build_up_underuse)
+ : bytes_remaining_(0), can_build_up_underuse_(can_build_up_underuse) {
+ set_target_rate_kbps(initial_target_rate_kbps);
+}
+
+void IntervalBudget::set_target_rate_kbps(int target_rate_kbps) {
+ target_rate_kbps_ = target_rate_kbps;
+ max_bytes_in_budget_ = (kWindowMs * target_rate_kbps_) / 8;
+ bytes_remaining_ = std::min(std::max(-max_bytes_in_budget_, bytes_remaining_),
+ max_bytes_in_budget_);
+}
+
+void IntervalBudget::IncreaseBudget(int64_t delta_time_ms) {
+ int64_t bytes = target_rate_kbps_ * delta_time_ms / 8;
+ if (bytes_remaining_ < 0 || can_build_up_underuse_) {
+ // We overused last interval, compensate this interval.
+ bytes_remaining_ = std::min(bytes_remaining_ + bytes, max_bytes_in_budget_);
+ } else {
+ // If we underused last interval we can't use it this interval.
+ bytes_remaining_ = std::min(bytes, max_bytes_in_budget_);
+ }
+}
+
+void IntervalBudget::UseBudget(size_t bytes) {
+ bytes_remaining_ = std::max(bytes_remaining_ - static_cast<int>(bytes),
+ -max_bytes_in_budget_);
+}
+
+size_t IntervalBudget::bytes_remaining() const {
+ return rtc::saturated_cast<size_t>(std::max<int64_t>(0, bytes_remaining_));
+}
+
+double IntervalBudget::budget_ratio() const {
+ if (max_bytes_in_budget_ == 0)
+ return 0.0;
+ return static_cast<double>(bytes_remaining_) / max_bytes_in_budget_;
+}
+
+int IntervalBudget::target_rate_kbps() const {
+ return target_rate_kbps_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/interval_budget.h b/third_party/libwebrtc/modules/pacing/interval_budget.h
new file mode 100644
index 0000000000..faeb1d8fc4
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/interval_budget.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_INTERVAL_BUDGET_H_
+#define MODULES_PACING_INTERVAL_BUDGET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+// TODO(tschumim): Reflector IntervalBudget so that we can set a under- and
+// over-use budget in ms.
+class IntervalBudget {
+ public:
+ explicit IntervalBudget(int initial_target_rate_kbps);
+ IntervalBudget(int initial_target_rate_kbps, bool can_build_up_underuse);
+ void set_target_rate_kbps(int target_rate_kbps);
+
+ // TODO(tschumim): Unify IncreaseBudget and UseBudget to one function.
+ void IncreaseBudget(int64_t delta_time_ms);
+ void UseBudget(size_t bytes);
+
+ size_t bytes_remaining() const;
+ double budget_ratio() const;
+ int target_rate_kbps() const;
+
+ private:
+ int target_rate_kbps_;
+ int64_t max_bytes_in_budget_;
+ int64_t bytes_remaining_;
+ bool can_build_up_underuse_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_PACING_INTERVAL_BUDGET_H_
diff --git a/third_party/libwebrtc/modules/pacing/interval_budget_gn/moz.build b/third_party/libwebrtc/modules/pacing/interval_budget_gn/moz.build
new file mode 100644
index 0000000000..f03ff5ebcd
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/interval_budget_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/pacing/interval_budget.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("interval_budget_gn")
diff --git a/third_party/libwebrtc/modules/pacing/interval_budget_unittest.cc b/third_party/libwebrtc/modules/pacing/interval_budget_unittest.cc
new file mode 100644
index 0000000000..e182d35510
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/interval_budget_unittest.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/interval_budget.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kWindowMs = 500;
+constexpr int kBitrateKbps = 100;
+constexpr bool kCanBuildUpUnderuse = true;
+constexpr bool kCanNotBuildUpUnderuse = false;
+size_t TimeToBytes(int bitrate_kbps, int time_ms) {
+ return static_cast<size_t>(bitrate_kbps * time_ms / 8);
+}
+} // namespace
+
+TEST(IntervalBudgetTest, InitailState) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(), 0.0);
+ EXPECT_EQ(interval_budget.bytes_remaining(), 0u);
+}
+
+TEST(IntervalBudgetTest, Underuse) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ int delta_time_ms = 50;
+ interval_budget.IncreaseBudget(delta_time_ms);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(),
+ kWindowMs / static_cast<double>(100 * delta_time_ms));
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, delta_time_ms));
+}
+
+TEST(IntervalBudgetTest, DontUnderuseMoreThanMaxWindow) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ int delta_time_ms = 1000;
+ interval_budget.IncreaseBudget(delta_time_ms);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(), 1.0);
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, kWindowMs));
+}
+
+TEST(IntervalBudgetTest, DontUnderuseMoreThanMaxWindowWhenChangeBitrate) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ int delta_time_ms = kWindowMs / 2;
+ interval_budget.IncreaseBudget(delta_time_ms);
+ interval_budget.set_target_rate_kbps(kBitrateKbps / 10);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(), 1.0);
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps / 10, kWindowMs));
+}
+
+TEST(IntervalBudgetTest, BalanceChangeOnBitrateChange) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ int delta_time_ms = kWindowMs;
+ interval_budget.IncreaseBudget(delta_time_ms);
+ interval_budget.set_target_rate_kbps(kBitrateKbps * 2);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(), 0.5);
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, kWindowMs));
+}
+
+TEST(IntervalBudgetTest, Overuse) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ int overuse_time_ms = 50;
+ int used_bytes = TimeToBytes(kBitrateKbps, overuse_time_ms);
+ interval_budget.UseBudget(used_bytes);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(),
+ -kWindowMs / static_cast<double>(100 * overuse_time_ms));
+ EXPECT_EQ(interval_budget.bytes_remaining(), 0u);
+}
+
+TEST(IntervalBudgetTest, DontOveruseMoreThanMaxWindow) {
+ IntervalBudget interval_budget(kBitrateKbps);
+ int overuse_time_ms = 1000;
+ int used_bytes = TimeToBytes(kBitrateKbps, overuse_time_ms);
+ interval_budget.UseBudget(used_bytes);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(), -1.0);
+ EXPECT_EQ(interval_budget.bytes_remaining(), 0u);
+}
+
+TEST(IntervalBudgetTest, CanBuildUpUnderuseWhenConfigured) {
+ IntervalBudget interval_budget(kBitrateKbps, kCanBuildUpUnderuse);
+ int delta_time_ms = 50;
+ interval_budget.IncreaseBudget(delta_time_ms);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(),
+ kWindowMs / static_cast<double>(100 * delta_time_ms));
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, delta_time_ms));
+
+ interval_budget.IncreaseBudget(delta_time_ms);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(),
+ 2 * kWindowMs / static_cast<double>(100 * delta_time_ms));
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, 2 * delta_time_ms));
+}
+
+TEST(IntervalBudgetTest, CanNotBuildUpUnderuseWhenConfigured) {
+ IntervalBudget interval_budget(kBitrateKbps, kCanNotBuildUpUnderuse);
+ int delta_time_ms = 50;
+ interval_budget.IncreaseBudget(delta_time_ms);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(),
+ kWindowMs / static_cast<double>(100 * delta_time_ms));
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, delta_time_ms));
+
+ interval_budget.IncreaseBudget(delta_time_ms);
+ EXPECT_DOUBLE_EQ(interval_budget.budget_ratio(),
+ kWindowMs / static_cast<double>(100 * delta_time_ms));
+ EXPECT_EQ(interval_budget.bytes_remaining(),
+ TimeToBytes(kBitrateKbps, delta_time_ms));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/pacing_controller.cc b/third_party/libwebrtc/modules/pacing/pacing_controller.cc
new file mode 100644
index 0000000000..cdd908c9f8
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/pacing_controller.cc
@@ -0,0 +1,681 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/pacing_controller.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "modules/pacing/bitrate_prober.h"
+#include "modules/pacing/interval_budget.h"
+#include "modules/pacing/prioritized_packet_queue.h"
+#include "modules/pacing/round_robin_packet_queue.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+// Time limit in milliseconds between packet bursts.
+constexpr TimeDelta kDefaultMinPacketLimit = TimeDelta::Millis(5);
+constexpr TimeDelta kCongestedPacketInterval = TimeDelta::Millis(500);
+// TODO(sprang): Consider dropping this limit.
+// The maximum debt level, in terms of time, capped when sending packets.
+constexpr TimeDelta kMaxDebtInTime = TimeDelta::Millis(500);
+constexpr TimeDelta kMaxElapsedTime = TimeDelta::Seconds(2);
+constexpr TimeDelta kTargetPaddingDuration = TimeDelta::Millis(5);
+
+bool IsDisabled(const FieldTrialsView& field_trials, absl::string_view key) {
+ return absl::StartsWith(field_trials.Lookup(key), "Disabled");
+}
+
+bool IsEnabled(const FieldTrialsView& field_trials, absl::string_view key) {
+ return absl::StartsWith(field_trials.Lookup(key), "Enabled");
+}
+
+std::unique_ptr<PacingController::PacketQueue> CreatePacketQueue(
+ const FieldTrialsView& field_trials,
+ Timestamp creation_time) {
+ if (field_trials.IsDisabled("WebRTC-Pacer-UsePrioritizedPacketQueue")) {
+ return std::make_unique<RoundRobinPacketQueue>(creation_time);
+ }
+ return std::make_unique<PrioritizedPacketQueue>(creation_time);
+}
+
+} // namespace
+
+const TimeDelta PacingController::kMaxExpectedQueueLength =
+ TimeDelta::Millis(2000);
+const float PacingController::kDefaultPaceMultiplier = 2.5f;
+const TimeDelta PacingController::kPausedProcessInterval =
+ kCongestedPacketInterval;
+const TimeDelta PacingController::kMinSleepTime = TimeDelta::Millis(1);
+const TimeDelta PacingController::kMaxEarlyProbeProcessing =
+ TimeDelta::Millis(1);
+
+PacingController::PacingController(Clock* clock,
+ PacketSender* packet_sender,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ packet_sender_(packet_sender),
+ field_trials_(field_trials),
+ drain_large_queues_(
+ !IsDisabled(field_trials_, "WebRTC-Pacer-DrainQueue")),
+ send_padding_if_silent_(
+ IsEnabled(field_trials_, "WebRTC-Pacer-PadInSilence")),
+ pace_audio_(IsEnabled(field_trials_, "WebRTC-Pacer-BlockAudio")),
+ ignore_transport_overhead_(
+ IsEnabled(field_trials_, "WebRTC-Pacer-IgnoreTransportOverhead")),
+ min_packet_limit_(kDefaultMinPacketLimit),
+ transport_overhead_per_packet_(DataSize::Zero()),
+ send_burst_interval_(TimeDelta::Zero()),
+ last_timestamp_(clock_->CurrentTime()),
+ paused_(false),
+ media_debt_(DataSize::Zero()),
+ padding_debt_(DataSize::Zero()),
+ pacing_rate_(DataRate::Zero()),
+ adjusted_media_rate_(DataRate::Zero()),
+ padding_rate_(DataRate::Zero()),
+ prober_(field_trials_),
+ probing_send_failure_(false),
+ last_process_time_(clock->CurrentTime()),
+ last_send_time_(last_process_time_),
+ seen_first_packet_(false),
+ packet_queue_(CreatePacketQueue(field_trials_, last_process_time_)),
+ congested_(false),
+ queue_time_limit_(kMaxExpectedQueueLength),
+ account_for_audio_(false),
+ include_overhead_(false) {
+ if (!drain_large_queues_) {
+ RTC_LOG(LS_WARNING) << "Pacer queues will not be drained,"
+ "pushback experiment must be enabled.";
+ }
+ FieldTrialParameter<int> min_packet_limit_ms("", min_packet_limit_.ms());
+ ParseFieldTrial({&min_packet_limit_ms},
+ field_trials_.Lookup("WebRTC-Pacer-MinPacketLimitMs"));
+ min_packet_limit_ = TimeDelta::Millis(min_packet_limit_ms.Get());
+ UpdateBudgetWithElapsedTime(min_packet_limit_);
+}
+
+PacingController::~PacingController() = default;
+
+void PacingController::CreateProbeCluster(DataRate bitrate, int cluster_id) {
+ prober_.CreateProbeCluster({.at_time = CurrentTime(),
+ .target_data_rate = bitrate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = cluster_id});
+}
+
+void PacingController::CreateProbeClusters(
+ rtc::ArrayView<const ProbeClusterConfig> probe_cluster_configs) {
+ for (const ProbeClusterConfig probe_cluster_config : probe_cluster_configs) {
+ prober_.CreateProbeCluster(probe_cluster_config);
+ }
+}
+
+void PacingController::Pause() {
+ if (!paused_)
+ RTC_LOG(LS_INFO) << "PacedSender paused.";
+ paused_ = true;
+ packet_queue_->SetPauseState(true, CurrentTime());
+}
+
+void PacingController::Resume() {
+ if (paused_)
+ RTC_LOG(LS_INFO) << "PacedSender resumed.";
+ paused_ = false;
+ packet_queue_->SetPauseState(false, CurrentTime());
+}
+
+bool PacingController::IsPaused() const {
+ return paused_;
+}
+
+void PacingController::SetCongested(bool congested) {
+ if (congested_ && !congested) {
+ UpdateBudgetWithElapsedTime(UpdateTimeAndGetElapsed(CurrentTime()));
+ }
+ congested_ = congested;
+}
+
+bool PacingController::IsProbing() const {
+ return prober_.is_probing();
+}
+
+Timestamp PacingController::CurrentTime() const {
+ Timestamp time = clock_->CurrentTime();
+ if (time < last_timestamp_) {
+ RTC_LOG(LS_WARNING)
+ << "Non-monotonic clock behavior observed. Previous timestamp: "
+ << last_timestamp_.ms() << ", new timestamp: " << time.ms();
+ RTC_DCHECK_GE(time, last_timestamp_);
+ time = last_timestamp_;
+ }
+ last_timestamp_ = time;
+ return time;
+}
+
+void PacingController::SetProbingEnabled(bool enabled) {
+ RTC_CHECK(!seen_first_packet_);
+ prober_.SetEnabled(enabled);
+}
+
+void PacingController::SetPacingRates(DataRate pacing_rate,
+ DataRate padding_rate) {
+ static constexpr DataRate kMaxRate = DataRate::KilobitsPerSec(100'000);
+ RTC_CHECK_GT(pacing_rate, DataRate::Zero());
+ RTC_CHECK_GE(padding_rate, DataRate::Zero());
+ if (padding_rate > pacing_rate) {
+ RTC_LOG(LS_WARNING) << "Padding rate " << padding_rate.kbps()
+ << "kbps is higher than the pacing rate "
+ << pacing_rate.kbps() << "kbps, capping.";
+ padding_rate = pacing_rate;
+ }
+
+ if (pacing_rate > kMaxRate || padding_rate > kMaxRate) {
+ RTC_LOG(LS_WARNING) << "Very high pacing rates ( > " << kMaxRate.kbps()
+ << " kbps) configured: pacing = " << pacing_rate.kbps()
+ << " kbps, padding = " << padding_rate.kbps()
+ << " kbps.";
+ }
+ pacing_rate_ = pacing_rate;
+ padding_rate_ = padding_rate;
+ MaybeUpdateMediaRateDueToLongQueue(CurrentTime());
+
+ RTC_LOG(LS_VERBOSE) << "bwe:pacer_updated pacing_kbps=" << pacing_rate_.kbps()
+ << " padding_budget_kbps=" << padding_rate.kbps();
+}
+
+void PacingController::EnqueuePacket(std::unique_ptr<RtpPacketToSend> packet) {
+ RTC_DCHECK(pacing_rate_ > DataRate::Zero())
+ << "SetPacingRate must be called before InsertPacket.";
+ RTC_CHECK(packet->packet_type());
+
+ prober_.OnIncomingPacket(DataSize::Bytes(packet->payload_size()));
+
+ const Timestamp now = CurrentTime();
+ if (packet_queue_->Empty()) {
+ // If queue is empty, we need to "fast-forward" the last process time,
+ // so that we don't use passed time as budget for sending the first new
+ // packet.
+ Timestamp target_process_time = now;
+ Timestamp next_send_time = NextSendTime();
+ if (next_send_time.IsFinite()) {
+ // There was already a valid planned send time, such as a keep-alive.
+ // Use that as last process time only if it's prior to now.
+ target_process_time = std::min(now, next_send_time);
+ }
+ UpdateBudgetWithElapsedTime(UpdateTimeAndGetElapsed(target_process_time));
+ }
+ packet_queue_->Push(now, std::move(packet));
+ seen_first_packet_ = true;
+
+ // Queue length has increased, check if we need to change the pacing rate.
+ MaybeUpdateMediaRateDueToLongQueue(now);
+}
+
+void PacingController::SetAccountForAudioPackets(bool account_for_audio) {
+ account_for_audio_ = account_for_audio;
+}
+
+void PacingController::SetIncludeOverhead() {
+ include_overhead_ = true;
+}
+
+void PacingController::SetTransportOverhead(DataSize overhead_per_packet) {
+ if (ignore_transport_overhead_)
+ return;
+ transport_overhead_per_packet_ = overhead_per_packet;
+}
+
+void PacingController::SetSendBurstInterval(TimeDelta burst_interval) {
+ send_burst_interval_ = burst_interval;
+}
+
+TimeDelta PacingController::ExpectedQueueTime() const {
+ RTC_DCHECK_GT(adjusted_media_rate_, DataRate::Zero());
+ return QueueSizeData() / adjusted_media_rate_;
+}
+
+size_t PacingController::QueueSizePackets() const {
+ return rtc::checked_cast<size_t>(packet_queue_->SizeInPackets());
+}
+
+const std::array<int, kNumMediaTypes>&
+PacingController::SizeInPacketsPerRtpPacketMediaType() const {
+ return packet_queue_->SizeInPacketsPerRtpPacketMediaType();
+}
+
+DataSize PacingController::QueueSizeData() const {
+ DataSize size = packet_queue_->SizeInPayloadBytes();
+ if (include_overhead_) {
+ size += static_cast<int64_t>(packet_queue_->SizeInPackets()) *
+ transport_overhead_per_packet_;
+ }
+ return size;
+}
+
+DataSize PacingController::CurrentBufferLevel() const {
+ return std::max(media_debt_, padding_debt_);
+}
+
+absl::optional<Timestamp> PacingController::FirstSentPacketTime() const {
+ return first_sent_packet_time_;
+}
+
+Timestamp PacingController::OldestPacketEnqueueTime() const {
+ return packet_queue_->OldestEnqueueTime();
+}
+
+TimeDelta PacingController::UpdateTimeAndGetElapsed(Timestamp now) {
+ // If no previous processing, or last process was "in the future" because of
+ // early probe processing, then there is no elapsed time to add budget for.
+ if (last_process_time_.IsMinusInfinity() || now < last_process_time_) {
+ return TimeDelta::Zero();
+ }
+ TimeDelta elapsed_time = now - last_process_time_;
+ last_process_time_ = now;
+ if (elapsed_time > kMaxElapsedTime) {
+ RTC_LOG(LS_WARNING) << "Elapsed time (" << elapsed_time.ms()
+ << " ms) longer than expected, limiting to "
+ << kMaxElapsedTime.ms();
+ elapsed_time = kMaxElapsedTime;
+ }
+ return elapsed_time;
+}
+
+bool PacingController::ShouldSendKeepalive(Timestamp now) const {
+ if (send_padding_if_silent_ || paused_ || congested_ || !seen_first_packet_) {
+ // We send a padding packet every 500 ms to ensure we won't get stuck in
+ // congested state due to no feedback being received.
+ if (now - last_send_time_ >= kCongestedPacketInterval) {
+ return true;
+ }
+ }
+ return false;
+}
+
+Timestamp PacingController::NextSendTime() const {
+ const Timestamp now = CurrentTime();
+ Timestamp next_send_time = Timestamp::PlusInfinity();
+
+ if (paused_) {
+ return last_send_time_ + kPausedProcessInterval;
+ }
+
+ // If probing is active, that always takes priority.
+ if (prober_.is_probing() && !probing_send_failure_) {
+ Timestamp probe_time = prober_.NextProbeTime(now);
+ if (!probe_time.IsPlusInfinity()) {
+ return probe_time.IsMinusInfinity() ? now : probe_time;
+ }
+ }
+
+ // Not pacing audio, if leading packet is audio its target send
+ // time is the time at which it was enqueued.
+ Timestamp unpaced_audio_time =
+ pace_audio_ ? Timestamp::PlusInfinity()
+ : packet_queue_->LeadingAudioPacketEnqueueTime();
+ if (unpaced_audio_time.IsFinite()) {
+ return unpaced_audio_time;
+ }
+
+ if (congested_ || !seen_first_packet_) {
+ // We need to at least send keep-alive packets with some interval.
+ return last_send_time_ + kCongestedPacketInterval;
+ }
+
+ if (adjusted_media_rate_ > DataRate::Zero() && !packet_queue_->Empty()) {
+ // If packets are allowed to be sent in a burst, the
+ // debt is allowed to grow up to one packet more than what can be sent
+ // during 'send_burst_period_'.
+ TimeDelta drain_time = media_debt_ / adjusted_media_rate_;
+ next_send_time =
+ last_process_time_ +
+ ((send_burst_interval_ > drain_time) ? TimeDelta::Zero() : drain_time);
+ } else if (padding_rate_ > DataRate::Zero() && packet_queue_->Empty()) {
+ // If we _don't_ have pending packets, check how long until we have
+ // bandwidth for padding packets. Both media and padding debts must
+ // have been drained to do this.
+ RTC_DCHECK_GT(adjusted_media_rate_, DataRate::Zero());
+ TimeDelta drain_time = std::max(media_debt_ / adjusted_media_rate_,
+ padding_debt_ / padding_rate_);
+
+ if (drain_time.IsZero() &&
+ (!media_debt_.IsZero() || !padding_debt_.IsZero())) {
+ // We have a non-zero debt, but drain time is smaller than tick size of
+ // TimeDelta, round it up to the smallest possible non-zero delta.
+ drain_time = TimeDelta::Micros(1);
+ }
+ next_send_time = last_process_time_ + drain_time;
+ } else {
+ // Nothing to do.
+ next_send_time = last_process_time_ + kPausedProcessInterval;
+ }
+
+ if (send_padding_if_silent_) {
+ next_send_time =
+ std::min(next_send_time, last_send_time_ + kPausedProcessInterval);
+ }
+
+ return next_send_time;
+}
+
+void PacingController::ProcessPackets() {
+ const Timestamp now = CurrentTime();
+ Timestamp target_send_time = now;
+
+ if (ShouldSendKeepalive(now)) {
+ DataSize keepalive_data_sent = DataSize::Zero();
+ // We can not send padding unless a normal packet has first been sent. If
+ // we do, timestamps get messed up.
+ if (seen_first_packet_) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> keepalive_packets =
+ packet_sender_->GeneratePadding(DataSize::Bytes(1));
+ for (auto& packet : keepalive_packets) {
+ keepalive_data_sent +=
+ DataSize::Bytes(packet->payload_size() + packet->padding_size());
+ packet_sender_->SendPacket(std::move(packet), PacedPacketInfo());
+ for (auto& packet : packet_sender_->FetchFec()) {
+ EnqueuePacket(std::move(packet));
+ }
+ }
+ }
+ OnPacketSent(RtpPacketMediaType::kPadding, keepalive_data_sent, now);
+ }
+
+ if (paused_) {
+ return;
+ }
+
+ TimeDelta early_execute_margin =
+ prober_.is_probing() ? kMaxEarlyProbeProcessing : TimeDelta::Zero();
+
+ target_send_time = NextSendTime();
+ if (now + early_execute_margin < target_send_time) {
+ // We are too early, but if queue is empty still allow draining some debt.
+ // Probing is allowed to be sent up to kMinSleepTime early.
+ UpdateBudgetWithElapsedTime(UpdateTimeAndGetElapsed(now));
+ return;
+ }
+
+ TimeDelta elapsed_time = UpdateTimeAndGetElapsed(target_send_time);
+
+ if (elapsed_time > TimeDelta::Zero()) {
+ UpdateBudgetWithElapsedTime(elapsed_time);
+ }
+
+ PacedPacketInfo pacing_info;
+ DataSize recommended_probe_size = DataSize::Zero();
+ bool is_probing = prober_.is_probing();
+ if (is_probing) {
+ // Probe timing is sensitive, and handled explicitly by BitrateProber, so
+ // use actual send time rather than target.
+ pacing_info = prober_.CurrentCluster(now).value_or(PacedPacketInfo());
+ if (pacing_info.probe_cluster_id != PacedPacketInfo::kNotAProbe) {
+ recommended_probe_size = prober_.RecommendedMinProbeSize();
+ RTC_DCHECK_GT(recommended_probe_size, DataSize::Zero());
+ } else {
+ // No valid probe cluster returned, probe might have timed out.
+ is_probing = false;
+ }
+ }
+
+ DataSize data_sent = DataSize::Zero();
+ // Circuit breaker, making sure main loop isn't forever.
+ static constexpr int kMaxIterations = 1 << 16;
+ int iteration = 0;
+ int packets_sent = 0;
+ int padding_packets_generated = 0;
+ for (; iteration < kMaxIterations; ++iteration) {
+ // Fetch packet, so long as queue is not empty or budget is not
+ // exhausted.
+ std::unique_ptr<RtpPacketToSend> rtp_packet =
+ GetPendingPacket(pacing_info, target_send_time, now);
+ if (rtp_packet == nullptr) {
+ // No packet available to send, check if we should send padding.
+ DataSize padding_to_add = PaddingToAdd(recommended_probe_size, data_sent);
+ if (padding_to_add > DataSize::Zero()) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets =
+ packet_sender_->GeneratePadding(padding_to_add);
+ if (!padding_packets.empty()) {
+ padding_packets_generated += padding_packets.size();
+ for (auto& packet : padding_packets) {
+ EnqueuePacket(std::move(packet));
+ }
+ // Continue loop to send the padding that was just added.
+ continue;
+ } else {
+ // Can't generate padding, still update padding budget for next send
+ // time.
+ UpdatePaddingBudgetWithSentData(padding_to_add);
+ }
+ }
+ // Can't fetch new packet and no padding to send, exit send loop.
+ break;
+ } else {
+ RTC_DCHECK(rtp_packet);
+ RTC_DCHECK(rtp_packet->packet_type().has_value());
+ const RtpPacketMediaType packet_type = *rtp_packet->packet_type();
+ DataSize packet_size = DataSize::Bytes(rtp_packet->payload_size() +
+ rtp_packet->padding_size());
+
+ if (include_overhead_) {
+ packet_size += DataSize::Bytes(rtp_packet->headers_size()) +
+ transport_overhead_per_packet_;
+ }
+
+ packet_sender_->SendPacket(std::move(rtp_packet), pacing_info);
+ for (auto& packet : packet_sender_->FetchFec()) {
+ EnqueuePacket(std::move(packet));
+ }
+ data_sent += packet_size;
+ ++packets_sent;
+
+ // Send done, update send time.
+ OnPacketSent(packet_type, packet_size, now);
+
+ if (is_probing) {
+ pacing_info.probe_cluster_bytes_sent += packet_size.bytes();
+ // If we are currently probing, we need to stop the send loop when we
+ // have reached the send target.
+ if (data_sent >= recommended_probe_size) {
+ break;
+ }
+ }
+
+ // Update target send time in case that are more packets that we are late
+ // in processing.
+ target_send_time = NextSendTime();
+ if (target_send_time > now) {
+ // Exit loop if not probing.
+ if (!is_probing) {
+ break;
+ }
+ target_send_time = now;
+ }
+ UpdateBudgetWithElapsedTime(UpdateTimeAndGetElapsed(target_send_time));
+ }
+ }
+
+ if (iteration >= kMaxIterations) {
+ // Circuit break activated. Log warning, adjust send time and return.
+ // TODO(sprang): Consider completely clearing state.
+ RTC_LOG(LS_ERROR) << "PacingController exceeded max iterations in "
+ "send-loop: packets sent = "
+ << packets_sent << ", padding packets generated = "
+ << padding_packets_generated
+ << ", bytes sent = " << data_sent.bytes();
+ last_send_time_ = now;
+ last_process_time_ = now;
+ return;
+ }
+
+ if (is_probing) {
+ probing_send_failure_ = data_sent == DataSize::Zero();
+ if (!probing_send_failure_) {
+ prober_.ProbeSent(CurrentTime(), data_sent);
+ }
+ }
+
+ // Queue length has probably decreased, check if pacing rate needs to updated.
+ // Poll the time again, since we might have enqueued new fec/padding packets
+ // with a later timestamp than `now`.
+ MaybeUpdateMediaRateDueToLongQueue(CurrentTime());
+}
+
+DataSize PacingController::PaddingToAdd(DataSize recommended_probe_size,
+ DataSize data_sent) const {
+ if (!packet_queue_->Empty()) {
+ // Actual payload available, no need to add padding.
+ return DataSize::Zero();
+ }
+
+ if (congested_) {
+ // Don't add padding if congested, even if requested for probing.
+ return DataSize::Zero();
+ }
+
+ if (!seen_first_packet_) {
+ // We can not send padding unless a normal packet has first been sent. If
+ // we do, timestamps get messed up.
+ return DataSize::Zero();
+ }
+
+ if (!recommended_probe_size.IsZero()) {
+ if (recommended_probe_size > data_sent) {
+ return recommended_probe_size - data_sent;
+ }
+ return DataSize::Zero();
+ }
+
+ if (padding_rate_ > DataRate::Zero() && padding_debt_ == DataSize::Zero()) {
+ return kTargetPaddingDuration * padding_rate_;
+ }
+ return DataSize::Zero();
+}
+
+std::unique_ptr<RtpPacketToSend> PacingController::GetPendingPacket(
+ const PacedPacketInfo& pacing_info,
+ Timestamp target_send_time,
+ Timestamp now) {
+ const bool is_probe =
+ pacing_info.probe_cluster_id != PacedPacketInfo::kNotAProbe;
+ // If first packet in probe, insert a small padding packet so we have a
+ // more reliable start window for the rate estimation.
+ if (is_probe && pacing_info.probe_cluster_bytes_sent == 0) {
+ auto padding = packet_sender_->GeneratePadding(DataSize::Bytes(1));
+ // If no RTP modules sending media are registered, we may not get a
+ // padding packet back.
+ if (!padding.empty()) {
+ // We should never get more than one padding packets with a requested
+ // size of 1 byte.
+ RTC_DCHECK_EQ(padding.size(), 1u);
+ return std::move(padding[0]);
+ }
+ }
+
+ if (packet_queue_->Empty()) {
+ return nullptr;
+ }
+
+ // First, check if there is any reason _not_ to send the next queued packet.
+
+ // Unpaced audio packets and probes are exempted from send checks.
+ bool unpaced_audio_packet =
+ !pace_audio_ && packet_queue_->LeadingAudioPacketEnqueueTime().IsFinite();
+ if (!unpaced_audio_packet && !is_probe) {
+ if (congested_) {
+ // Don't send anything if congested.
+ return nullptr;
+ }
+
+ if (now <= target_send_time && send_burst_interval_.IsZero()) {
+ // We allow sending slightly early if we think that we would actually
+ // had been able to, had we been right on time - i.e. the current debt
+ // is not more than would be reduced to zero at the target sent time.
+ // If we allow packets to be sent in a burst, packet are allowed to be
+ // sent early.
+ TimeDelta flush_time = media_debt_ / adjusted_media_rate_;
+ if (now + flush_time > target_send_time) {
+ return nullptr;
+ }
+ }
+ }
+
+ return packet_queue_->Pop();
+}
+
+void PacingController::OnPacketSent(RtpPacketMediaType packet_type,
+ DataSize packet_size,
+ Timestamp send_time) {
+ if (!first_sent_packet_time_ && packet_type != RtpPacketMediaType::kPadding) {
+ first_sent_packet_time_ = send_time;
+ }
+
+ bool audio_packet = packet_type == RtpPacketMediaType::kAudio;
+ if ((!audio_packet || account_for_audio_) && packet_size > DataSize::Zero()) {
+ UpdateBudgetWithSentData(packet_size);
+ }
+
+ last_send_time_ = send_time;
+}
+
+void PacingController::UpdateBudgetWithElapsedTime(TimeDelta delta) {
+ media_debt_ -= std::min(media_debt_, adjusted_media_rate_ * delta);
+ padding_debt_ -= std::min(padding_debt_, padding_rate_ * delta);
+}
+
+void PacingController::UpdateBudgetWithSentData(DataSize size) {
+ media_debt_ += size;
+ media_debt_ = std::min(media_debt_, adjusted_media_rate_ * kMaxDebtInTime);
+ UpdatePaddingBudgetWithSentData(size);
+}
+
+void PacingController::UpdatePaddingBudgetWithSentData(DataSize size) {
+ padding_debt_ += size;
+ padding_debt_ = std::min(padding_debt_, padding_rate_ * kMaxDebtInTime);
+}
+
+void PacingController::SetQueueTimeLimit(TimeDelta limit) {
+ queue_time_limit_ = limit;
+}
+
+void PacingController::MaybeUpdateMediaRateDueToLongQueue(Timestamp now) {
+ adjusted_media_rate_ = pacing_rate_;
+ if (!drain_large_queues_) {
+ return;
+ }
+
+ DataSize queue_size_data = QueueSizeData();
+ if (queue_size_data > DataSize::Zero()) {
+ // Assuming equal size packets and input/output rate, the average packet
+ // has avg_time_left_ms left to get queue_size_bytes out of the queue, if
+ // time constraint shall be met. Determine bitrate needed for that.
+ packet_queue_->UpdateAverageQueueTime(now);
+ TimeDelta avg_time_left =
+ std::max(TimeDelta::Millis(1),
+ queue_time_limit_ - packet_queue_->AverageQueueTime());
+ DataRate min_rate_needed = queue_size_data / avg_time_left;
+ if (min_rate_needed > pacing_rate_) {
+ adjusted_media_rate_ = min_rate_needed;
+ RTC_LOG(LS_VERBOSE) << "bwe:large_pacing_queue pacing_rate_kbps="
+ << pacing_rate_.kbps();
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/pacing_controller.h b/third_party/libwebrtc/modules/pacing/pacing_controller.h
new file mode 100644
index 0000000000..c0a69266a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/pacing_controller.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_PACING_CONTROLLER_H_
+#define MODULES_PACING_PACING_CONTROLLER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/function_view.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_types.h"
+#include "modules/pacing/bitrate_prober.h"
+#include "modules/pacing/interval_budget.h"
+#include "modules/pacing/rtp_packet_pacer.h"
+#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// This class implements a leaky-bucket packet pacing algorithm. It handles the
+// logic of determining which packets to send when, but the actual timing of
+// the processing is done externally (e.g. RtpPacketPacer). Furthermore, the
+// forwarding of packets when they are ready to be sent is also handled
+// externally, via the PacingController::PacketSender interface.
+class PacingController {
+ public:
+ class PacketSender {
+ public:
+ virtual ~PacketSender() = default;
+ virtual void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) = 0;
+ // Should be called after each call to SendPacket().
+ virtual std::vector<std::unique_ptr<RtpPacketToSend>> FetchFec() = 0;
+ virtual std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ DataSize size) = 0;
+ };
+
+ // Interface for class hanlding storage of and prioritization of packets
+ // pending to be sent by the pacer.
+ // Note that for the methods taking a Timestamp as parameter, the parameter
+ // will never decrease between two subsequent calls.
+ class PacketQueue {
+ public:
+ virtual ~PacketQueue() = default;
+
+ virtual void Push(Timestamp enqueue_time,
+ std::unique_ptr<RtpPacketToSend> packet) = 0;
+ virtual std::unique_ptr<RtpPacketToSend> Pop() = 0;
+
+ virtual int SizeInPackets() const = 0;
+ bool Empty() const { return SizeInPackets() == 0; }
+ virtual DataSize SizeInPayloadBytes() const = 0;
+
+ // Total packets in the queue per media type (RtpPacketMediaType values are
+ // used as lookup index).
+ virtual const std::array<int, kNumMediaTypes>&
+ SizeInPacketsPerRtpPacketMediaType() const = 0;
+
+ // If the next packet, that would be returned by Pop() if called
+ // now, is an audio packet this method returns the enqueue time
+ // of that packet. If queue is empty or top packet is not audio,
+ // returns Timestamp::MinusInfinity().
+ virtual Timestamp LeadingAudioPacketEnqueueTime() const = 0;
+
+ // Enqueue time of the oldest packet in the queue,
+ // Timestamp::MinusInfinity() if queue is empty.
+ virtual Timestamp OldestEnqueueTime() const = 0;
+
+ // Average queue time for the packets currently in the queue.
+ // The queuing time is calculated from Push() to the last UpdateQueueTime()
+ // call - with any time spent in a paused state subtracted.
+ // Returns TimeDelta::Zero() for an empty queue.
+ virtual TimeDelta AverageQueueTime() const = 0;
+
+ // Called during packet processing or when pause stats changes. Since the
+ // AverageQueueTime() method does not look at the wall time, this method
+ // needs to be called before querying queue time.
+ virtual void UpdateAverageQueueTime(Timestamp now) = 0;
+
+ // Set the pause state, while `paused` is true queuing time is not counted.
+ virtual void SetPauseState(bool paused, Timestamp now) = 0;
+ };
+
+ // Expected max pacer delay. If ExpectedQueueTime() is higher than
+ // this value, the packet producers should wait (eg drop frames rather than
+ // encoding them). Bitrate sent may temporarily exceed target set by
+ // UpdateBitrate() so that this limit will be upheld.
+ static const TimeDelta kMaxExpectedQueueLength;
+ // Pacing-rate relative to our target send rate.
+ // Multiplicative factor that is applied to the target bitrate to calculate
+ // the number of bytes that can be transmitted per interval.
+ // Increasing this factor will result in lower delays in cases of bitrate
+ // overshoots from the encoder.
+ static const float kDefaultPaceMultiplier;
+ // If no media or paused, wake up at least every `kPausedProcessIntervalMs` in
+ // order to send a keep-alive packet so we don't get stuck in a bad state due
+ // to lack of feedback.
+ static const TimeDelta kPausedProcessInterval;
+
+ static const TimeDelta kMinSleepTime;
+
+ // Allow probes to be processed slightly ahead of inteded send time. Currently
+ // set to 1ms as this is intended to allow times be rounded down to the
+ // nearest millisecond.
+ static const TimeDelta kMaxEarlyProbeProcessing;
+
+ PacingController(Clock* clock,
+ PacketSender* packet_sender,
+ const FieldTrialsView& field_trials);
+
+ ~PacingController();
+
+ // Adds the packet to the queue and calls PacketRouter::SendPacket() when
+ // it's time to send.
+ void EnqueuePacket(std::unique_ptr<RtpPacketToSend> packet);
+
+ // ABSL_DEPRECATED("Use CreateProbeClusters instead")
+ void CreateProbeCluster(DataRate bitrate, int cluster_id);
+ void CreateProbeClusters(
+ rtc::ArrayView<const ProbeClusterConfig> probe_cluster_configs);
+
+ void Pause(); // Temporarily pause all sending.
+ void Resume(); // Resume sending packets.
+ bool IsPaused() const;
+
+ void SetCongested(bool congested);
+
+ // Sets the pacing rates. Must be called once before packets can be sent.
+ void SetPacingRates(DataRate pacing_rate, DataRate padding_rate);
+ DataRate pacing_rate() const { return adjusted_media_rate_; }
+
+ // Currently audio traffic is not accounted by pacer and passed through.
+ // With the introduction of audio BWE audio traffic will be accounted for
+ // the pacer budget calculation. The audio traffic still will be injected
+ // at high priority.
+ void SetAccountForAudioPackets(bool account_for_audio);
+ void SetIncludeOverhead();
+
+ void SetTransportOverhead(DataSize overhead_per_packet);
+ // The pacer is allowed to send enqued packets in bursts and can build up a
+ // packet "debt" that correspond to approximately the send rate during
+ // 'burst_interval'.
+ void SetSendBurstInterval(TimeDelta burst_interval);
+
+ // Returns the time when the oldest packet was queued.
+ Timestamp OldestPacketEnqueueTime() const;
+
+ // Number of packets in the pacer queue.
+ size_t QueueSizePackets() const;
+ // Number of packets in the pacer queue per media type (RtpPacketMediaType
+ // values are used as lookup index).
+ const std::array<int, kNumMediaTypes>& SizeInPacketsPerRtpPacketMediaType()
+ const;
+ // Totals size of packets in the pacer queue.
+ DataSize QueueSizeData() const;
+
+ // Current buffer level, i.e. max of media and padding debt.
+ DataSize CurrentBufferLevel() const;
+
+ // Returns the time when the first packet was sent.
+ absl::optional<Timestamp> FirstSentPacketTime() const;
+
+ // Returns the number of milliseconds it will take to send the current
+ // packets in the queue, given the current size and bitrate, ignoring prio.
+ TimeDelta ExpectedQueueTime() const;
+
+ void SetQueueTimeLimit(TimeDelta limit);
+
+ // Enable bitrate probing. Enabled by default, mostly here to simplify
+ // testing. Must be called before any packets are being sent to have an
+ // effect.
+ void SetProbingEnabled(bool enabled);
+
+ // Returns the next time we expect ProcessPackets() to be called.
+ Timestamp NextSendTime() const;
+
+ // Check queue of pending packets and send them or padding packets, if budget
+ // is available.
+ void ProcessPackets();
+
+ bool IsProbing() const;
+
+ private:
+ TimeDelta UpdateTimeAndGetElapsed(Timestamp now);
+ bool ShouldSendKeepalive(Timestamp now) const;
+
+ // Updates the number of bytes that can be sent for the next time interval.
+ void UpdateBudgetWithElapsedTime(TimeDelta delta);
+ void UpdateBudgetWithSentData(DataSize size);
+ void UpdatePaddingBudgetWithSentData(DataSize size);
+
+ DataSize PaddingToAdd(DataSize recommended_probe_size,
+ DataSize data_sent) const;
+
+ std::unique_ptr<RtpPacketToSend> GetPendingPacket(
+ const PacedPacketInfo& pacing_info,
+ Timestamp target_send_time,
+ Timestamp now);
+ void OnPacketSent(RtpPacketMediaType packet_type,
+ DataSize packet_size,
+ Timestamp send_time);
+ void MaybeUpdateMediaRateDueToLongQueue(Timestamp now);
+
+ Timestamp CurrentTime() const;
+
+ Clock* const clock_;
+ PacketSender* const packet_sender_;
+ const FieldTrialsView& field_trials_;
+
+ const bool drain_large_queues_;
+ const bool send_padding_if_silent_;
+ const bool pace_audio_;
+ const bool ignore_transport_overhead_;
+
+ TimeDelta min_packet_limit_;
+ DataSize transport_overhead_per_packet_;
+ TimeDelta send_burst_interval_;
+
+ // TODO(webrtc:9716): Remove this when we are certain clocks are monotonic.
+ // The last millisecond timestamp returned by `clock_`.
+ mutable Timestamp last_timestamp_;
+ bool paused_;
+
+ // Amount of outstanding data for media and padding.
+ DataSize media_debt_;
+ DataSize padding_debt_;
+
+ // The target pacing rate, signaled via SetPacingRates().
+ DataRate pacing_rate_;
+ // The media send rate, which might adjusted from pacing_rate_, e.g. if the
+ // pacing queue is growing too long.
+ DataRate adjusted_media_rate_;
+ // The padding target rate. We aim to fill up to this rate with padding what
+ // is not already used by media.
+ DataRate padding_rate_;
+
+ BitrateProber prober_;
+ bool probing_send_failure_;
+
+ Timestamp last_process_time_;
+ Timestamp last_send_time_;
+ absl::optional<Timestamp> first_sent_packet_time_;
+ bool seen_first_packet_;
+
+ std::unique_ptr<PacketQueue> packet_queue_;
+
+ bool congested_;
+
+ TimeDelta queue_time_limit_;
+ bool account_for_audio_;
+ bool include_overhead_;
+};
+} // namespace webrtc
+
+#endif // MODULES_PACING_PACING_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc b/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc
new file mode 100644
index 0000000000..0248f93ae1
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc
@@ -0,0 +1,2065 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/pacing_controller.h"
+
+#include <algorithm>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "modules/pacing/packet_router.h"
+#include "system_wrappers/include/clock.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Field;
+using ::testing::Pointee;
+using ::testing::Property;
+using ::testing::Return;
+using ::testing::WithoutArgs;
+
+using ::webrtc::test::ExplicitKeyValueConfig;
+
+namespace webrtc {
+namespace {
+constexpr DataRate kFirstClusterRate = DataRate::KilobitsPerSec(900);
+constexpr DataRate kSecondClusterRate = DataRate::KilobitsPerSec(1800);
+
+// The error stems from truncating the time interval of probe packets to integer
+// values. This results in probing slightly higher than the target bitrate.
+// For 1.8 Mbps, this comes to be about 120 kbps with 1200 probe packets.
+constexpr DataRate kProbingErrorMargin = DataRate::KilobitsPerSec(150);
+
+const float kPaceMultiplier = 2.5f;
+
+constexpr uint32_t kAudioSsrc = 12345;
+constexpr uint32_t kVideoSsrc = 234565;
+
+constexpr DataRate kTargetRate = DataRate::KilobitsPerSec(800);
+
+std::unique_ptr<RtpPacketToSend> BuildPacket(RtpPacketMediaType type,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t size) {
+ auto packet = std::make_unique<RtpPacketToSend>(nullptr);
+ packet->set_packet_type(type);
+ packet->SetSsrc(ssrc);
+ packet->SetSequenceNumber(sequence_number);
+ packet->set_capture_time(Timestamp::Millis(capture_time_ms));
+ packet->SetPayloadSize(size);
+ return packet;
+}
+
+class MediaStream {
+ public:
+ MediaStream(SimulatedClock& clock,
+ RtpPacketMediaType type,
+ uint32_t ssrc,
+ size_t packet_size)
+ : clock_(clock), type_(type), ssrc_(ssrc), packet_size_(packet_size) {}
+
+ std::unique_ptr<RtpPacketToSend> BuildNextPacket() {
+ return BuildPacket(type_, ssrc_, seq_num_++, clock_.TimeInMilliseconds(),
+ packet_size_);
+ }
+ std::unique_ptr<RtpPacketToSend> BuildNextPacket(size_t size) {
+ return BuildPacket(type_, ssrc_, seq_num_++, clock_.TimeInMilliseconds(),
+ size);
+ }
+
+ private:
+ SimulatedClock& clock_;
+ const RtpPacketMediaType type_;
+ const uint32_t ssrc_;
+ const size_t packet_size_;
+ uint16_t seq_num_ = 1000;
+};
+
+// Mock callback proxy, where both new and old api redirects to common mock
+// methods that focus on core aspects.
+class MockPacingControllerCallback : public PacingController::PacketSender {
+ public:
+ void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) override {
+ SendPacket(packet->Ssrc(), packet->SequenceNumber(),
+ packet->capture_time().ms(),
+ packet->packet_type() == RtpPacketMediaType::kRetransmission,
+ packet->packet_type() == RtpPacketMediaType::kPadding);
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ DataSize target_size) override {
+ std::vector<std::unique_ptr<RtpPacketToSend>> ret;
+ size_t padding_size = SendPadding(target_size.bytes());
+ if (padding_size > 0) {
+ auto packet = std::make_unique<RtpPacketToSend>(nullptr);
+ packet->SetPayloadSize(padding_size);
+ packet->set_packet_type(RtpPacketMediaType::kPadding);
+ ret.emplace_back(std::move(packet));
+ }
+ return ret;
+ }
+
+ MOCK_METHOD(void,
+ SendPacket,
+ (uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_timestamp,
+ bool retransmission,
+ bool padding));
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ FetchFec,
+ (),
+ (override));
+ MOCK_METHOD(size_t, SendPadding, (size_t target_size));
+};
+
+// Mock callback implementing the raw api.
+class MockPacketSender : public PacingController::PacketSender {
+ public:
+ MOCK_METHOD(void,
+ SendPacket,
+ (std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info),
+ (override));
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ FetchFec,
+ (),
+ (override));
+
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ GeneratePadding,
+ (DataSize target_size),
+ (override));
+};
+
+class PacingControllerPadding : public PacingController::PacketSender {
+ public:
+ static const size_t kPaddingPacketSize = 224;
+
+ PacingControllerPadding() : padding_sent_(0), total_bytes_sent_(0) {}
+
+ void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& pacing_info) override {
+ total_bytes_sent_ += packet->payload_size();
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> FetchFec() override {
+ return {};
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ DataSize target_size) override {
+ size_t num_packets =
+ (target_size.bytes() + kPaddingPacketSize - 1) / kPaddingPacketSize;
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ for (size_t i = 0; i < num_packets; ++i) {
+ packets.emplace_back(std::make_unique<RtpPacketToSend>(nullptr));
+ packets.back()->SetPadding(kPaddingPacketSize);
+ packets.back()->set_packet_type(RtpPacketMediaType::kPadding);
+ padding_sent_ += kPaddingPacketSize;
+ }
+ return packets;
+ }
+
+ size_t padding_sent() { return padding_sent_; }
+ size_t total_bytes_sent() { return total_bytes_sent_; }
+
+ private:
+ size_t padding_sent_;
+ size_t total_bytes_sent_;
+};
+
+class PacingControllerProbing : public PacingController::PacketSender {
+ public:
+ PacingControllerProbing() : packets_sent_(0), padding_sent_(0) {}
+
+ void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& pacing_info) override {
+ if (packet->packet_type() != RtpPacketMediaType::kPadding) {
+ ++packets_sent_;
+ }
+ last_pacing_info_ = pacing_info;
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> FetchFec() override {
+ return {};
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ DataSize target_size) override {
+ // From RTPSender:
+ // Max in the RFC 3550 is 255 bytes, we limit it to be modulus 32 for SRTP.
+ const DataSize kMaxPadding = DataSize::Bytes(224);
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ while (target_size > DataSize::Zero()) {
+ DataSize padding_size = std::min(kMaxPadding, target_size);
+ packets.emplace_back(std::make_unique<RtpPacketToSend>(nullptr));
+ packets.back()->SetPadding(padding_size.bytes());
+ packets.back()->set_packet_type(RtpPacketMediaType::kPadding);
+ padding_sent_ += padding_size.bytes();
+ target_size -= padding_size;
+ }
+ return packets;
+ }
+
+ int packets_sent() const { return packets_sent_; }
+ int padding_sent() const { return padding_sent_; }
+ int total_packets_sent() const { return packets_sent_ + padding_sent_; }
+ PacedPacketInfo last_pacing_info() const { return last_pacing_info_; }
+
+ private:
+ int packets_sent_;
+ int padding_sent_;
+ PacedPacketInfo last_pacing_info_;
+};
+
+class PacingControllerTest : public ::testing::Test {
+ protected:
+ PacingControllerTest() : clock_(123456), trials_("") {}
+
+ void SendAndExpectPacket(PacingController* pacer,
+ RtpPacketMediaType type,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t size) {
+ pacer->EnqueuePacket(
+ BuildPacket(type, ssrc, sequence_number, capture_time_ms, size));
+
+ EXPECT_CALL(callback_,
+ SendPacket(ssrc, sequence_number, capture_time_ms,
+ type == RtpPacketMediaType::kRetransmission, false));
+ }
+
+ void AdvanceTimeUntil(webrtc::Timestamp time) {
+ Timestamp now = clock_.CurrentTime();
+ clock_.AdvanceTime(std::max(TimeDelta::Zero(), time - now));
+ }
+
+ void ConsumeInitialBudget(PacingController* pacer) {
+ const uint32_t kSsrc = 54321;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = clock_.TimeInMilliseconds();
+ const size_t kPacketSize = 250;
+
+ EXPECT_TRUE(pacer->OldestPacketEnqueueTime().IsInfinite());
+
+ // Due to the multiplicative factor we can send 5 packets during a send
+ // interval. (network capacity * multiplier / (8 bits per byte *
+ // (packet size * #send intervals per second)
+ const size_t packets_to_send_per_interval =
+ kTargetRate.bps() * kPaceMultiplier / (8 * kPacketSize * 200);
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ SendAndExpectPacket(pacer, RtpPacketMediaType::kVideo, kSsrc,
+ sequence_number++, capture_time_ms, kPacketSize);
+ }
+
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ }
+
+ SimulatedClock clock_;
+
+ MediaStream audio_ = MediaStream(clock_,
+ /*type*/ RtpPacketMediaType::kAudio,
+ /*ssrc*/ kAudioSsrc,
+ /*packet_size*/ 100);
+ MediaStream video_ = MediaStream(clock_,
+ /*type*/ RtpPacketMediaType::kVideo,
+ /*ssrc*/ kVideoSsrc,
+ /*packet_size*/ 1000);
+
+ ::testing::NiceMock<MockPacingControllerCallback> callback_;
+ ExplicitKeyValueConfig trials_;
+};
+
+TEST_F(PacingControllerTest, DefaultNoPaddingInSilence) {
+ const test::ExplicitKeyValueConfig trials("");
+ PacingController pacer(&clock_, &callback_, trials);
+ pacer.SetPacingRates(kTargetRate, DataRate::Zero());
+ // Video packet to reset last send time and provide padding data.
+ pacer.EnqueuePacket(video_.BuildNextPacket());
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer.ProcessPackets();
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ // Waiting 500 ms should not trigger sending of padding.
+ clock_.AdvanceTimeMilliseconds(500);
+ pacer.ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, PaddingInSilenceWithTrial) {
+ const test::ExplicitKeyValueConfig trials(
+ "WebRTC-Pacer-PadInSilence/Enabled/");
+ PacingController pacer(&clock_, &callback_, trials);
+ pacer.SetPacingRates(kTargetRate, DataRate::Zero());
+ // Video packet to reset last send time and provide padding data.
+ pacer.EnqueuePacket(video_.BuildNextPacket());
+ EXPECT_CALL(callback_, SendPacket).Times(2);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer.ProcessPackets();
+ EXPECT_CALL(callback_, SendPadding).WillOnce(Return(1000));
+ // Waiting 500 ms should trigger sending of padding.
+ clock_.AdvanceTimeMilliseconds(500);
+ pacer.ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, CongestionWindowAffectsAudioInTrial) {
+ const test::ExplicitKeyValueConfig trials("WebRTC-Pacer-BlockAudio/Enabled/");
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ PacingController pacer(&clock_, &callback_, trials);
+ pacer.SetPacingRates(DataRate::KilobitsPerSec(10000), DataRate::Zero());
+ // Video packet fills congestion window.
+ pacer.EnqueuePacket(video_.BuildNextPacket());
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ pacer.SetCongested(true);
+ // Audio packet blocked due to congestion.
+ pacer.EnqueuePacket(audio_.BuildNextPacket());
+ EXPECT_CALL(callback_, SendPacket).Times(0);
+ // Forward time to where we send keep-alive.
+ EXPECT_CALL(callback_, SendPadding(1)).Times(2);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ // Audio packet unblocked when congestion window clear.
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+ pacer.SetCongested(false);
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, DefaultCongestionWindowDoesNotAffectAudio) {
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ const test::ExplicitKeyValueConfig trials("");
+ PacingController pacer(&clock_, &callback_, trials);
+ pacer.SetPacingRates(DataRate::BitsPerSec(10000000), DataRate::Zero());
+ // Video packet fills congestion window.
+ pacer.EnqueuePacket(video_.BuildNextPacket());
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ pacer.SetCongested(true);
+ // Audio not blocked due to congestion.
+ pacer.EnqueuePacket(audio_.BuildNextPacket());
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, BudgetAffectsAudioInTrial) {
+ ExplicitKeyValueConfig trials("WebRTC-Pacer-BlockAudio/Enabled/");
+ PacingController pacer(&clock_, &callback_, trials);
+ const size_t kPacketSize = 1000;
+ const int kProcessIntervalsPerSecond = 1000 / 5;
+ DataRate pacing_rate =
+ DataRate::BitsPerSec(kPacketSize / 3 * 8 * kProcessIntervalsPerSecond);
+ pacer.SetPacingRates(pacing_rate, DataRate::Zero());
+ // Video fills budget for following process periods.
+ pacer.EnqueuePacket(video_.BuildNextPacket(kPacketSize));
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ // Audio packet blocked due to budget limit.
+ pacer.EnqueuePacket(audio_.BuildNextPacket());
+ Timestamp wait_start_time = clock_.CurrentTime();
+ Timestamp wait_end_time = Timestamp::MinusInfinity();
+ EXPECT_CALL(callback_, SendPacket).WillOnce(WithoutArgs([&]() {
+ wait_end_time = clock_.CurrentTime();
+ }));
+ while (!wait_end_time.IsFinite()) {
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ }
+ const TimeDelta expected_wait_time =
+ DataSize::Bytes(kPacketSize) / pacing_rate;
+ // Verify delay is near expectation, within timing margin.
+ EXPECT_LT(((wait_end_time - wait_start_time) - expected_wait_time).Abs(),
+ PacingController::kMinSleepTime);
+}
+
+TEST_F(PacingControllerTest, DefaultBudgetDoesNotAffectAudio) {
+ const size_t kPacketSize = 1000;
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ const test::ExplicitKeyValueConfig trials("");
+ PacingController pacer(&clock_, &callback_, trials);
+ const int kProcessIntervalsPerSecond = 1000 / 5;
+ pacer.SetPacingRates(
+ DataRate::BitsPerSec(kPacketSize / 3 * 8 * kProcessIntervalsPerSecond),
+ DataRate::Zero());
+ // Video fills budget for following process periods.
+ pacer.EnqueuePacket(video_.BuildNextPacket(kPacketSize));
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ // Audio packet not blocked due to budget limit.
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ pacer.EnqueuePacket(audio_.BuildNextPacket());
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, FirstSentPacketTimeIsSet) {
+ const Timestamp kStartTime = clock_.CurrentTime();
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // No packet sent.
+ EXPECT_FALSE(pacer->FirstSentPacketTime().has_value());
+ pacer->EnqueuePacket(video_.BuildNextPacket());
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ EXPECT_EQ(kStartTime, pacer->FirstSentPacketTime());
+}
+
+TEST_F(PacingControllerTest, QueueAndPacePackets) {
+ const uint32_t kSsrc = 12345;
+ uint16_t sequence_number = 1234;
+ const DataSize kPackeSize = DataSize::Bytes(250);
+ const TimeDelta kSendInterval = TimeDelta::Millis(5);
+
+ // Due to the multiplicative factor we can send 5 packets during a 5ms send
+ // interval. (send interval * network capacity * multiplier / packet size)
+ const size_t kPacketsToSend = (kSendInterval * kTargetRate).bytes() *
+ kPaceMultiplier / kPackeSize.bytes();
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ for (size_t i = 0; i < kPacketsToSend; ++i) {
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, kSsrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPackeSize.bytes());
+ }
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+
+ // Enqueue one extra packet.
+ int64_t queued_packet_timestamp = clock_.TimeInMilliseconds();
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, kSsrc,
+ sequence_number, queued_packet_timestamp,
+ kPackeSize.bytes()));
+ EXPECT_EQ(kPacketsToSend + 1, pacer->QueueSizePackets());
+
+ // Send packets until the initial kPacketsToSend packets are done.
+ Timestamp start_time = clock_.CurrentTime();
+ while (pacer->QueueSizePackets() > 1) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ EXPECT_LT(clock_.CurrentTime() - start_time, kSendInterval);
+
+ // Proceed till last packet can be sent.
+ EXPECT_CALL(callback_, SendPacket(kSsrc, sequence_number,
+ queued_packet_timestamp, false, false))
+ .Times(1);
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ EXPECT_GE(clock_.CurrentTime() - start_time, kSendInterval);
+ EXPECT_EQ(pacer->QueueSizePackets(), 0u);
+}
+
+TEST_F(PacingControllerTest, PaceQueuedPackets) {
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ const size_t kPacketSize = 250;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Due to the multiplicative factor we can send 5 packets during a send
+ // interval. (network capacity * multiplier / (8 bits per byte *
+ // (packet size * #send intervals per second)
+ const size_t packets_to_send_per_interval =
+ kTargetRate.bps() * kPaceMultiplier / (8 * kPacketSize * 200);
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ }
+
+ for (size_t j = 0; j < packets_to_send_per_interval * 10; ++j) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ }
+ EXPECT_EQ(packets_to_send_per_interval + packets_to_send_per_interval * 10,
+ pacer->QueueSizePackets());
+
+ while (pacer->QueueSizePackets() > packets_to_send_per_interval * 10) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ EXPECT_EQ(pacer->QueueSizePackets(), packets_to_send_per_interval * 10);
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+
+ EXPECT_CALL(callback_, SendPacket(ssrc, _, _, false, false))
+ .Times(pacer->QueueSizePackets());
+ const TimeDelta expected_pace_time =
+ DataSize::Bytes(pacer->QueueSizePackets() * kPacketSize) /
+ (kPaceMultiplier * kTargetRate);
+ Timestamp start_time = clock_.CurrentTime();
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ const TimeDelta actual_pace_time = clock_.CurrentTime() - start_time;
+ EXPECT_LT((actual_pace_time - expected_pace_time).Abs(),
+ PacingController::kMinSleepTime);
+
+ EXPECT_EQ(0u, pacer->QueueSizePackets());
+ AdvanceTimeUntil(pacer->NextSendTime());
+ EXPECT_EQ(0u, pacer->QueueSizePackets());
+ pacer->ProcessPackets();
+
+ // Send some more packet, just show that we can..?
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(), 250);
+ }
+ EXPECT_EQ(packets_to_send_per_interval, pacer->QueueSizePackets());
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ EXPECT_EQ(0u, pacer->QueueSizePackets());
+}
+
+TEST_F(PacingControllerTest, RepeatedRetransmissionsAllowed) {
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Send one packet, then two retransmissions of that packet.
+ for (size_t i = 0; i < 3; i++) {
+ constexpr uint32_t ssrc = 333;
+ constexpr uint16_t sequence_number = 444;
+ constexpr size_t bytes = 250;
+ bool is_retransmission = (i != 0); // Original followed by retransmissions.
+ SendAndExpectPacket(pacer.get(),
+ is_retransmission ? RtpPacketMediaType::kRetransmission
+ : RtpPacketMediaType::kVideo,
+ ssrc, sequence_number, clock_.TimeInMilliseconds(),
+ bytes);
+ clock_.AdvanceTimeMilliseconds(5);
+ }
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+}
+
+TEST_F(PacingControllerTest,
+ CanQueuePacketsWithSameSequenceNumberOnDifferentSsrcs) {
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number, clock_.TimeInMilliseconds(), 250);
+
+ // Expect packet on second ssrc to be queued and sent as well.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc + 1,
+ sequence_number, clock_.TimeInMilliseconds(), 250);
+
+ clock_.AdvanceTimeMilliseconds(1000);
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+}
+
+TEST_F(PacingControllerTest, Padding) {
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ const size_t kPacketSize = 250;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, kTargetRate);
+
+ const size_t kPacketsToSend = 20;
+ for (size_t i = 0; i < kPacketsToSend; ++i) {
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ }
+ const TimeDelta expected_pace_time =
+ DataSize::Bytes(pacer->QueueSizePackets() * kPacketSize) /
+ (kPaceMultiplier * kTargetRate);
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ // Only the media packets should be sent.
+ Timestamp start_time = clock_.CurrentTime();
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ const TimeDelta actual_pace_time = clock_.CurrentTime() - start_time;
+ EXPECT_LE((actual_pace_time - expected_pace_time).Abs(),
+ PacingController::kMinSleepTime);
+
+ // Pacing media happens at 2.5x, but padding was configured with 1.0x
+ // factor. We have to wait until the padding debt is gone before we start
+ // sending padding.
+ const TimeDelta time_to_padding_debt_free =
+ (expected_pace_time * kPaceMultiplier) - actual_pace_time;
+ clock_.AdvanceTime(time_to_padding_debt_free -
+ PacingController::kMinSleepTime);
+ pacer->ProcessPackets();
+
+ // Send 10 padding packets.
+ const size_t kPaddingPacketsToSend = 10;
+ DataSize padding_sent = DataSize::Zero();
+ size_t packets_sent = 0;
+ Timestamp first_send_time = Timestamp::MinusInfinity();
+ Timestamp last_send_time = Timestamp::MinusInfinity();
+
+ EXPECT_CALL(callback_, SendPadding)
+ .Times(kPaddingPacketsToSend)
+ .WillRepeatedly([&](size_t target_size) {
+ ++packets_sent;
+ if (packets_sent < kPaddingPacketsToSend) {
+ // Don't count bytes of last packet, instead just
+ // use this as the time the last packet finished
+ // sending.
+ padding_sent += DataSize::Bytes(target_size);
+ }
+ if (first_send_time.IsInfinite()) {
+ first_send_time = clock_.CurrentTime();
+ } else {
+ last_send_time = clock_.CurrentTime();
+ }
+ return target_size;
+ });
+ EXPECT_CALL(callback_, SendPacket(_, _, _, false, true))
+ .Times(kPaddingPacketsToSend);
+
+ while (packets_sent < kPaddingPacketsToSend) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Verify rate of sent padding.
+ TimeDelta padding_duration = last_send_time - first_send_time;
+ DataRate padding_rate = padding_sent / padding_duration;
+ EXPECT_EQ(padding_rate, kTargetRate);
+}
+
+TEST_F(PacingControllerTest, NoPaddingBeforeNormalPacket) {
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, kTargetRate);
+
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+
+ pacer->ProcessPackets();
+ AdvanceTimeUntil(pacer->NextSendTime());
+
+ pacer->ProcessPackets();
+ AdvanceTimeUntil(pacer->NextSendTime());
+
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = 56789;
+
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, capture_time_ms, 250);
+ bool padding_sent = false;
+ EXPECT_CALL(callback_, SendPadding).WillOnce([&](size_t padding) {
+ padding_sent = true;
+ return padding;
+ });
+ EXPECT_CALL(callback_, SendPacket(_, _, _, _, true)).Times(1);
+ while (!padding_sent) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+}
+
+TEST_F(PacingControllerTest, VerifyAverageBitrateVaryingMediaPayload) {
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = 56789;
+ const TimeDelta kAveragingWindowLength = TimeDelta::Seconds(10);
+ PacingControllerPadding callback;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback, trials_);
+ pacer->SetProbingEnabled(false);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, kTargetRate);
+
+ Timestamp start_time = clock_.CurrentTime();
+ size_t media_bytes = 0;
+ while (clock_.CurrentTime() - start_time < kAveragingWindowLength) {
+ // Maybe add some new media packets corresponding to expected send rate.
+ int rand_value = rand(); // NOLINT (rand_r instead of rand)
+ while (
+ media_bytes <
+ (kTargetRate * (clock_.CurrentTime() - start_time)).bytes<size_t>()) {
+ size_t media_payload = rand_value % 400 + 800; // [400, 1200] bytes.
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, capture_time_ms,
+ media_payload));
+ media_bytes += media_payload;
+ }
+
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ EXPECT_NEAR(
+ kTargetRate.bps(),
+ (DataSize::Bytes(callback.total_bytes_sent()) / kAveragingWindowLength)
+ .bps(),
+ (kTargetRate * 0.01 /* 1% error marging */).bps());
+}
+
+TEST_F(PacingControllerTest, Priority) {
+ uint32_t ssrc_low_priority = 12345;
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = 56789;
+ int64_t capture_time_ms_low_priority = 1234567;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ ConsumeInitialBudget(pacer.get());
+
+ // Expect normal and low priority to be queued and high to pass through.
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo,
+ ssrc_low_priority, sequence_number++,
+ capture_time_ms_low_priority, 250));
+
+ const size_t packets_to_send_per_interval =
+ kTargetRate.bps() * kPaceMultiplier / (8 * 250 * 200);
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kRetransmission, ssrc,
+ sequence_number++, capture_time_ms, 250));
+ }
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kAudio, ssrc,
+ sequence_number++, capture_time_ms, 250));
+
+ // Expect all high and normal priority to be sent out first.
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ EXPECT_CALL(callback_, SendPacket(ssrc, _, capture_time_ms, _, _))
+ .Times(packets_to_send_per_interval + 1);
+
+ while (pacer->QueueSizePackets() > 1) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ EXPECT_EQ(1u, pacer->QueueSizePackets());
+
+ EXPECT_CALL(callback_, SendPacket(ssrc_low_priority, _,
+ capture_time_ms_low_priority, _, _));
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, RetransmissionPriority) {
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = 45678;
+ int64_t capture_time_ms_retransmission = 56789;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Due to the multiplicative factor we can send 5 packets during a send
+ // interval. (network capacity * multiplier / (8 bits per byte *
+ // (packet size * #send intervals per second)
+ const size_t packets_to_send_per_interval =
+ kTargetRate.bps() * kPaceMultiplier / (8 * 250 * 200);
+ pacer->ProcessPackets();
+ EXPECT_EQ(0u, pacer->QueueSizePackets());
+
+ // Alternate retransmissions and normal packets.
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, capture_time_ms, 250));
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kRetransmission, ssrc,
+ sequence_number++,
+ capture_time_ms_retransmission, 250));
+ }
+ EXPECT_EQ(2 * packets_to_send_per_interval, pacer->QueueSizePackets());
+
+ // Expect all retransmissions to be sent out first despite having a later
+ // capture time.
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ EXPECT_CALL(callback_, SendPacket(_, _, _, false, _)).Times(0);
+ EXPECT_CALL(callback_,
+ SendPacket(ssrc, _, capture_time_ms_retransmission, true, _))
+ .Times(packets_to_send_per_interval);
+
+ while (pacer->QueueSizePackets() > packets_to_send_per_interval) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ EXPECT_EQ(packets_to_send_per_interval, pacer->QueueSizePackets());
+
+ // Expect the remaining (non-retransmission) packets to be sent.
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ EXPECT_CALL(callback_, SendPacket(_, _, _, true, _)).Times(0);
+ EXPECT_CALL(callback_, SendPacket(ssrc, _, capture_time_ms, false, _))
+ .Times(packets_to_send_per_interval);
+
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ EXPECT_EQ(0u, pacer->QueueSizePackets());
+}
+
+TEST_F(PacingControllerTest, HighPrioDoesntAffectBudget) {
+ const size_t kPacketSize = 250;
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = 56789;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // As high prio packets doesn't affect the budget, we should be able to send
+ // a high number of them at once.
+ const size_t kNumAudioPackets = 25;
+ for (size_t i = 0; i < kNumAudioPackets; ++i) {
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kAudio, ssrc,
+ sequence_number++, capture_time_ms, kPacketSize);
+ }
+ pacer->ProcessPackets();
+ // Low prio packets does affect the budget.
+ // Due to the multiplicative factor we can send 5 packets during a send
+ // interval. (network capacity * multiplier / (8 bits per byte *
+ // (packet size * #send intervals per second)
+ const size_t kPacketsToSendPerInterval =
+ kTargetRate.bps() * kPaceMultiplier / (8 * kPacketSize * 200);
+ for (size_t i = 0; i < kPacketsToSendPerInterval; ++i) {
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ }
+
+ // Send all packets and measure pace time.
+ Timestamp start_time = clock_.CurrentTime();
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Measure pacing time. Expect only low-prio packets to affect this.
+ TimeDelta pacing_time = clock_.CurrentTime() - start_time;
+ TimeDelta expected_pacing_time =
+ DataSize::Bytes(kPacketsToSendPerInterval * kPacketSize) /
+ (kTargetRate * kPaceMultiplier);
+ EXPECT_NEAR(pacing_time.us<double>(), expected_pacing_time.us<double>(),
+ PacingController::kMinSleepTime.us<double>());
+}
+
+TEST_F(PacingControllerTest, SendsOnlyPaddingWhenCongested) {
+ uint32_t ssrc = 202020;
+ uint16_t sequence_number = 1000;
+ int kPacketSize = 250;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Send an initial packet so we have a last send time.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // Set congested state, we should not send anything until the 500ms since
+ // last send time limit for keep-alives is triggered.
+ EXPECT_CALL(callback_, SendPacket).Times(0);
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ pacer->SetCongested(true);
+ size_t blocked_packets = 0;
+ int64_t expected_time_until_padding = 500;
+ while (expected_time_until_padding > 5) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ blocked_packets++;
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->ProcessPackets();
+ expected_time_until_padding -= 5;
+ }
+
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+ EXPECT_CALL(callback_, SendPadding(1)).WillOnce(Return(1));
+ EXPECT_CALL(callback_, SendPacket(_, _, _, _, true)).Times(1);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->ProcessPackets();
+ EXPECT_EQ(blocked_packets, pacer->QueueSizePackets());
+}
+
+TEST_F(PacingControllerTest, DoesNotAllowOveruseAfterCongestion) {
+ uint32_t ssrc = 202020;
+ uint16_t seq_num = 1000;
+ int size = 1000;
+ auto now_ms = [this] { return clock_.TimeInMilliseconds(); };
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ // The pacing rate is low enough that the budget should not allow two packets
+ // to be sent in a row.
+ pacer->SetPacingRates(DataRate::BitsPerSec(400 * 8 * 1000 / 5),
+ DataRate::Zero());
+ // Not yet budget limited or congested, packet is sent.
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, ssrc, seq_num++, now_ms(), size));
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->ProcessPackets();
+ // Packet blocked due to congestion.
+ pacer->SetCongested(true);
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, ssrc, seq_num++, now_ms(), size));
+ EXPECT_CALL(callback_, SendPacket).Times(0);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->ProcessPackets();
+ // Packet blocked due to congestion.
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, ssrc, seq_num++, now_ms(), size));
+ EXPECT_CALL(callback_, SendPacket).Times(0);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->ProcessPackets();
+ // Congestion removed and budget has recovered, packet is sent.
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, ssrc, seq_num++, now_ms(), size));
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->SetCongested(false);
+ pacer->ProcessPackets();
+ // Should be blocked due to budget limitation as congestion has be removed.
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, ssrc, seq_num++, now_ms(), size));
+ EXPECT_CALL(callback_, SendPacket).Times(0);
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, Pause) {
+ uint32_t ssrc_low_priority = 12345;
+ uint32_t ssrc = 12346;
+ uint32_t ssrc_high_priority = 12347;
+ uint16_t sequence_number = 1234;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ EXPECT_TRUE(pacer->OldestPacketEnqueueTime().IsInfinite());
+
+ ConsumeInitialBudget(pacer.get());
+
+ pacer->Pause();
+
+ int64_t capture_time_ms = clock_.TimeInMilliseconds();
+ const size_t packets_to_send_per_interval =
+ kTargetRate.bps() * kPaceMultiplier / (8 * 250 * 200);
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo,
+ ssrc_low_priority, sequence_number++,
+ capture_time_ms, 250));
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kRetransmission, ssrc,
+ sequence_number++, capture_time_ms, 250));
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kAudio,
+ ssrc_high_priority, sequence_number++,
+ capture_time_ms, 250));
+ }
+ clock_.AdvanceTimeMilliseconds(10000);
+ int64_t second_capture_time_ms = clock_.TimeInMilliseconds();
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo,
+ ssrc_low_priority, sequence_number++,
+ second_capture_time_ms, 250));
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kRetransmission, ssrc,
+ sequence_number++, second_capture_time_ms,
+ 250));
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kAudio,
+ ssrc_high_priority, sequence_number++,
+ second_capture_time_ms, 250));
+ }
+
+ // Expect everything to be queued.
+ EXPECT_EQ(capture_time_ms, pacer->OldestPacketEnqueueTime().ms());
+
+ // Process triggers keep-alive packet.
+ EXPECT_CALL(callback_, SendPadding).WillOnce([](size_t padding) {
+ return padding;
+ });
+ EXPECT_CALL(callback_, SendPacket(_, _, _, _, true)).Times(1);
+ pacer->ProcessPackets();
+
+ // Verify no packets sent for the rest of the paused process interval.
+ const TimeDelta kProcessInterval = TimeDelta::Millis(5);
+ TimeDelta expected_time_until_send = PacingController::kPausedProcessInterval;
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ while (expected_time_until_send >= kProcessInterval) {
+ pacer->ProcessPackets();
+ clock_.AdvanceTime(kProcessInterval);
+ expected_time_until_send -= kProcessInterval;
+ }
+
+ // New keep-alive packet.
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+ EXPECT_CALL(callback_, SendPadding).WillOnce([](size_t padding) {
+ return padding;
+ });
+ EXPECT_CALL(callback_, SendPacket(_, _, _, _, true)).Times(1);
+ clock_.AdvanceTime(kProcessInterval);
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // Expect high prio packets to come out first followed by normal
+ // prio packets and low prio packets (all in capture order).
+ {
+ ::testing::InSequence sequence;
+ EXPECT_CALL(callback_,
+ SendPacket(ssrc_high_priority, _, capture_time_ms, _, _))
+ .Times(packets_to_send_per_interval);
+ EXPECT_CALL(callback_,
+ SendPacket(ssrc_high_priority, _, second_capture_time_ms, _, _))
+ .Times(packets_to_send_per_interval);
+
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ EXPECT_CALL(callback_, SendPacket(ssrc, _, capture_time_ms, _, _))
+ .Times(1);
+ }
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ EXPECT_CALL(callback_, SendPacket(ssrc, _, second_capture_time_ms, _, _))
+ .Times(1);
+ }
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ EXPECT_CALL(callback_,
+ SendPacket(ssrc_low_priority, _, capture_time_ms, _, _))
+ .Times(1);
+ }
+ for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
+ EXPECT_CALL(callback_, SendPacket(ssrc_low_priority, _,
+ second_capture_time_ms, _, _))
+ .Times(1);
+ }
+ }
+ pacer->Resume();
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ EXPECT_TRUE(pacer->OldestPacketEnqueueTime().IsInfinite());
+}
+
+TEST_F(PacingControllerTest, InactiveFromStart) {
+ // Recreate the pacer without the inital time forwarding.
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetProbingEnabled(false);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, kTargetRate);
+
+ // No packets sent, there should be no keep-alives sent either.
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ EXPECT_CALL(callback_, SendPacket).Times(0);
+ pacer->ProcessPackets();
+
+ const Timestamp start_time = clock_.CurrentTime();
+
+ // Determine the margin need so we can advance to the last possible moment
+ // that will not cause a process event.
+ const TimeDelta time_margin =
+ PacingController::kMinSleepTime + TimeDelta::Micros(1);
+
+ EXPECT_EQ(pacer->NextSendTime() - start_time,
+ PacingController::kPausedProcessInterval);
+ clock_.AdvanceTime(PacingController::kPausedProcessInterval - time_margin);
+ pacer->ProcessPackets();
+ EXPECT_EQ(pacer->NextSendTime() - start_time,
+ PacingController::kPausedProcessInterval);
+
+ clock_.AdvanceTime(time_margin);
+ pacer->ProcessPackets();
+ EXPECT_EQ(pacer->NextSendTime() - start_time,
+ 2 * PacingController::kPausedProcessInterval);
+}
+
+TEST_F(PacingControllerTest, QueueTimeGrowsOverTime) {
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+ EXPECT_TRUE(pacer->OldestPacketEnqueueTime().IsInfinite());
+
+ pacer->SetPacingRates(DataRate::BitsPerSec(30000 * kPaceMultiplier),
+ DataRate::Zero());
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number, clock_.TimeInMilliseconds(), 1200);
+
+ clock_.AdvanceTimeMilliseconds(500);
+ EXPECT_EQ(clock_.TimeInMilliseconds() - 500,
+ pacer->OldestPacketEnqueueTime().ms());
+ pacer->ProcessPackets();
+ EXPECT_TRUE(pacer->OldestPacketEnqueueTime().IsInfinite());
+}
+
+TEST_F(PacingControllerTest, ProbingWithInsertedPackets) {
+ const size_t kPacketSize = 1200;
+ const int kInitialBitrateBps = 300000;
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+
+ PacingControllerProbing packet_sender;
+ auto pacer =
+ std::make_unique<PacingController>(&clock_, &packet_sender, trials_);
+ std::vector<ProbeClusterConfig> probe_clusters = {
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kFirstClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0},
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kSecondClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 1}};
+ pacer->CreateProbeClusters(probe_clusters);
+ pacer->SetPacingRates(
+ DataRate::BitsPerSec(kInitialBitrateBps * kPaceMultiplier),
+ DataRate::Zero());
+
+ for (int i = 0; i < 10; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ }
+
+ int64_t start = clock_.TimeInMilliseconds();
+ while (packet_sender.packets_sent() < 5) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ int packets_sent = packet_sender.packets_sent();
+ // Validate first cluster bitrate. Note that we have to account for number
+ // of intervals and hence (packets_sent - 1) on the first cluster.
+ EXPECT_NEAR((packets_sent - 1) * kPacketSize * 8000 /
+ (clock_.TimeInMilliseconds() - start),
+ kFirstClusterRate.bps(), kProbingErrorMargin.bps());
+ // Probing always starts with a small padding packet.
+ EXPECT_EQ(1, packet_sender.padding_sent());
+
+ AdvanceTimeUntil(pacer->NextSendTime());
+ start = clock_.TimeInMilliseconds();
+ while (packet_sender.packets_sent() < 10) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+ packets_sent = packet_sender.packets_sent() - packets_sent;
+ // Validate second cluster bitrate.
+ EXPECT_NEAR((packets_sent - 1) * kPacketSize * 8000 /
+ (clock_.TimeInMilliseconds() - start),
+ kSecondClusterRate.bps(), kProbingErrorMargin.bps());
+}
+
+TEST_F(PacingControllerTest, SkipsProbesWhenProcessIntervalTooLarge) {
+ const size_t kPacketSize = 1200;
+ const int kInitialBitrateBps = 300000;
+ const uint32_t ssrc = 12346;
+ const int kProbeClusterId = 3;
+
+ uint16_t sequence_number = 1234;
+
+ PacingControllerProbing packet_sender;
+
+ const test::ExplicitKeyValueConfig trials(
+ "WebRTC-Bwe-ProbingBehavior/max_probe_delay:2ms/");
+ auto pacer =
+ std::make_unique<PacingController>(&clock_, &packet_sender, trials);
+ pacer->SetPacingRates(
+ DataRate::BitsPerSec(kInitialBitrateBps * kPaceMultiplier),
+ DataRate::BitsPerSec(kInitialBitrateBps));
+
+ for (int i = 0; i < 10; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ }
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Probe at a very high rate.
+ std::vector<ProbeClusterConfig> probe_clusters = {
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = DataRate::KilobitsPerSec(10000), // 10 Mbps,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = kProbeClusterId}};
+ pacer->CreateProbeClusters(probe_clusters);
+
+ // We need one packet to start the probe.
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ const int packets_sent_before_probe = packet_sender.packets_sent();
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ EXPECT_EQ(packet_sender.packets_sent(), packets_sent_before_probe + 1);
+
+ // Figure out how long between probe packets.
+ Timestamp start_time = clock_.CurrentTime();
+ AdvanceTimeUntil(pacer->NextSendTime());
+ TimeDelta time_between_probes = clock_.CurrentTime() - start_time;
+ // Advance that distance again + 1ms.
+ clock_.AdvanceTime(time_between_probes);
+
+ // Send second probe packet.
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ pacer->ProcessPackets();
+ EXPECT_EQ(packet_sender.packets_sent(), packets_sent_before_probe + 2);
+ PacedPacketInfo last_pacing_info = packet_sender.last_pacing_info();
+ EXPECT_EQ(last_pacing_info.probe_cluster_id, kProbeClusterId);
+
+ // We're exactly where we should be for the next probe.
+ const Timestamp probe_time = clock_.CurrentTime();
+ EXPECT_EQ(pacer->NextSendTime(), clock_.CurrentTime());
+
+ BitrateProberConfig probing_config(&trials);
+ EXPECT_GT(probing_config.max_probe_delay.Get(), TimeDelta::Zero());
+ // Advance to within max probe delay, should still return same target.
+ clock_.AdvanceTime(probing_config.max_probe_delay.Get());
+ EXPECT_EQ(pacer->NextSendTime(), probe_time);
+
+ // Too high probe delay, drop it!
+ clock_.AdvanceTime(TimeDelta::Micros(1));
+
+ int packets_sent_before_timeout = packet_sender.total_packets_sent();
+ // Expected next process time is unchanged, but calling should not
+ // generate new packets.
+ EXPECT_EQ(pacer->NextSendTime(), probe_time);
+ pacer->ProcessPackets();
+ EXPECT_EQ(packet_sender.total_packets_sent(), packets_sent_before_timeout);
+
+ // Next packet sent is not part of probe.
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ const int expected_probe_id = PacedPacketInfo::kNotAProbe;
+ EXPECT_EQ(packet_sender.last_pacing_info().probe_cluster_id,
+ expected_probe_id);
+}
+
+TEST_F(PacingControllerTest, ProbingWithPaddingSupport) {
+ const size_t kPacketSize = 1200;
+ const int kInitialBitrateBps = 300000;
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+
+ PacingControllerProbing packet_sender;
+ auto pacer =
+ std::make_unique<PacingController>(&clock_, &packet_sender, trials_);
+ std::vector<ProbeClusterConfig> probe_clusters = {
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kFirstClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0}};
+ pacer->CreateProbeClusters(probe_clusters);
+
+ pacer->SetPacingRates(
+ DataRate::BitsPerSec(kInitialBitrateBps * kPaceMultiplier),
+ DataRate::Zero());
+
+ for (int i = 0; i < 3; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ }
+
+ int64_t start = clock_.TimeInMilliseconds();
+ int process_count = 0;
+ while (process_count < 5) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ ++process_count;
+ }
+ int packets_sent = packet_sender.packets_sent();
+ int padding_sent = packet_sender.padding_sent();
+ EXPECT_GT(packets_sent, 0);
+ EXPECT_GT(padding_sent, 0);
+ // Note that the number of intervals here for kPacketSize is
+ // packets_sent due to padding in the same cluster.
+ EXPECT_NEAR((packets_sent * kPacketSize * 8000 + padding_sent) /
+ (clock_.TimeInMilliseconds() - start),
+ kFirstClusterRate.bps(), kProbingErrorMargin.bps());
+}
+
+TEST_F(PacingControllerTest, PaddingOveruse) {
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ const size_t kPacketSize = 1200;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Initially no padding rate.
+ pacer->ProcessPackets();
+ pacer->SetPacingRates(DataRate::BitsPerSec(60000 * kPaceMultiplier),
+ DataRate::Zero());
+
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ pacer->ProcessPackets();
+
+ // Add 30kbit padding. When increasing budget, media budget will increase from
+ // negative (overuse) while padding budget will increase from 0.
+ clock_.AdvanceTimeMilliseconds(5);
+ pacer->SetPacingRates(DataRate::BitsPerSec(60000 * kPaceMultiplier),
+ DataRate::BitsPerSec(30000));
+
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ EXPECT_LT(TimeDelta::Millis(5), pacer->ExpectedQueueTime());
+ // Don't send padding if queue is non-empty, even if padding budget > 0.
+ EXPECT_CALL(callback_, SendPadding).Times(0);
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, ProbeClusterId) {
+ MockPacketSender callback;
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ const size_t kPacketSize = 1200;
+
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback, trials_);
+ pacer->CreateProbeClusters(std::vector<ProbeClusterConfig>(
+ {{.at_time = clock_.CurrentTime(),
+ .target_data_rate = kFirstClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0},
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kSecondClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 1}}));
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, kTargetRate);
+ pacer->SetProbingEnabled(true);
+ for (int i = 0; i < 10; ++i) {
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, ssrc,
+ sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize));
+ }
+
+ // First probing cluster.
+ EXPECT_CALL(callback,
+ SendPacket(_, Field(&PacedPacketInfo::probe_cluster_id, 0)))
+ .Times(5);
+
+ for (int i = 0; i < 5; ++i) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Second probing cluster.
+ EXPECT_CALL(callback,
+ SendPacket(_, Field(&PacedPacketInfo::probe_cluster_id, 1)))
+ .Times(5);
+
+ for (int i = 0; i < 5; ++i) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Needed for the Field comparer below.
+ const int kNotAProbe = PacedPacketInfo::kNotAProbe;
+ // No more probing packets.
+ EXPECT_CALL(callback, GeneratePadding).WillOnce([&](DataSize padding_size) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
+ padding_packets.emplace_back(
+ BuildPacket(RtpPacketMediaType::kPadding, ssrc, sequence_number++,
+ clock_.TimeInMilliseconds(), padding_size.bytes()));
+ return padding_packets;
+ });
+ bool non_probe_packet_seen = false;
+ EXPECT_CALL(callback, SendPacket)
+ .WillOnce([&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) {
+ EXPECT_EQ(cluster_info.probe_cluster_id, kNotAProbe);
+ non_probe_packet_seen = true;
+ });
+ while (!non_probe_packet_seen) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+}
+
+TEST_F(PacingControllerTest, OwnedPacketPrioritizedOnType) {
+ MockPacketSender callback;
+ uint32_t ssrc = 123;
+
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback, trials_);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Insert a packet of each type, from low to high priority. Since priority
+ // is weighted higher than insert order, these should come out of the pacer
+ // in backwards order with the exception of FEC and Video.
+
+ for (RtpPacketMediaType type :
+ {RtpPacketMediaType::kPadding,
+ RtpPacketMediaType::kForwardErrorCorrection, RtpPacketMediaType::kVideo,
+ RtpPacketMediaType::kRetransmission, RtpPacketMediaType::kAudio}) {
+ pacer->EnqueuePacket(BuildPacket(type, ++ssrc, /*sequence_number=*/123,
+ clock_.TimeInMilliseconds(),
+ /*size=*/150));
+ }
+
+ ::testing::InSequence seq;
+ EXPECT_CALL(callback,
+ SendPacket(Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kAudio)),
+ _));
+ EXPECT_CALL(callback,
+ SendPacket(Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kRetransmission)),
+ _));
+
+ // FEC and video actually have the same priority, so will come out in
+ // insertion order.
+ EXPECT_CALL(
+ callback,
+ SendPacket(Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kForwardErrorCorrection)),
+ _));
+ EXPECT_CALL(callback,
+ SendPacket(Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kVideo)),
+ _));
+
+ EXPECT_CALL(callback,
+ SendPacket(Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kPadding)),
+ _));
+
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+}
+
+TEST_F(PacingControllerTest, SmallFirstProbePacket) {
+ MockPacketSender callback;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback, trials_);
+ std::vector<ProbeClusterConfig> probe_clusters = {
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kFirstClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0}};
+ pacer->CreateProbeClusters(probe_clusters);
+
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+
+ // Add high prio media.
+ pacer->EnqueuePacket(audio_.BuildNextPacket(234));
+
+ // Expect small padding packet to be requested.
+ EXPECT_CALL(callback, GeneratePadding(DataSize::Bytes(1)))
+ .WillOnce([&](DataSize padding_size) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
+ padding_packets.emplace_back(
+ BuildPacket(RtpPacketMediaType::kPadding, kAudioSsrc, 1,
+ clock_.TimeInMilliseconds(), 1));
+ return padding_packets;
+ });
+
+ size_t packets_sent = 0;
+ bool media_seen = false;
+ EXPECT_CALL(callback, SendPacket)
+ .Times(::testing::AnyNumber())
+ .WillRepeatedly([&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) {
+ if (packets_sent == 0) {
+ EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding);
+ } else {
+ if (packet->packet_type() == RtpPacketMediaType::kAudio) {
+ media_seen = true;
+ }
+ }
+ packets_sent++;
+ });
+ while (!media_seen) {
+ pacer->ProcessPackets();
+ clock_.AdvanceTimeMilliseconds(5);
+ }
+}
+
+TEST_F(PacingControllerTest, TaskLate) {
+ // Set a low send rate to more easily test timing issues.
+ DataRate kSendRate = DataRate::KilobitsPerSec(30);
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetPacingRates(kSendRate, DataRate::Zero());
+
+ // Add four packets of equal size and priority.
+ pacer->EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer->EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer->EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer->EnqueuePacket(video_.BuildNextPacket(1000));
+
+ // Process packets, only first should be sent.
+ EXPECT_CALL(callback_, SendPacket).Times(1);
+ pacer->ProcessPackets();
+
+ Timestamp next_send_time = pacer->NextSendTime();
+ // Determine time between packets (ca 62ms)
+ const TimeDelta time_between_packets = next_send_time - clock_.CurrentTime();
+
+ // Simulate a late process call, executed just before we allow sending the
+ // fourth packet.
+ const TimeDelta kOffset = TimeDelta::Millis(1);
+ clock_.AdvanceTime((time_between_packets * 3) - kOffset);
+
+ EXPECT_CALL(callback_, SendPacket).Times(2);
+ pacer->ProcessPackets();
+
+ // Check that next scheduled send time is in ca 1ms.
+ next_send_time = pacer->NextSendTime();
+ const TimeDelta time_left = next_send_time - clock_.CurrentTime();
+ EXPECT_EQ(time_left.RoundTo(TimeDelta::Millis(1)), kOffset);
+
+ clock_.AdvanceTime(time_left);
+ EXPECT_CALL(callback_, SendPacket);
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, NoProbingWhilePaused) {
+ uint32_t ssrc = 12345;
+ uint16_t sequence_number = 1234;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ pacer->SetProbingEnabled(true);
+ pacer->SetPacingRates(kTargetRate * kPaceMultiplier, DataRate::Zero());
+ pacer->CreateProbeClusters(std::vector<ProbeClusterConfig>(
+ {{.at_time = clock_.CurrentTime(),
+ .target_data_rate = kFirstClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0},
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kSecondClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 1}}));
+
+ // Send at least one packet so probing can initate.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, ssrc,
+ sequence_number, clock_.TimeInMilliseconds(), 250);
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Trigger probing.
+ std::vector<ProbeClusterConfig> probe_clusters = {
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = DataRate::KilobitsPerSec(10000), // 10 Mbps.
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 3}};
+ pacer->CreateProbeClusters(probe_clusters);
+
+ // Time to next send time should be small.
+ EXPECT_LT(pacer->NextSendTime() - clock_.CurrentTime(),
+ PacingController::kPausedProcessInterval);
+
+ // Pause pacer, time to next send time should now be the pause process
+ // interval.
+ pacer->Pause();
+
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(),
+ PacingController::kPausedProcessInterval);
+}
+
+TEST_F(PacingControllerTest, AudioNotPacedEvenWhenAccountedFor) {
+ const uint32_t kSsrc = 12345;
+ uint16_t sequence_number = 1234;
+ const size_t kPacketSize = 123;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ // Account for audio - so that audio packets can cause pushback on other
+ // types such as video. Audio packet should still be immediated passed
+ // through though ("WebRTC-Pacer-BlockAudio" needs to be enabled in order
+ // to pace audio packets).
+ pacer->SetAccountForAudioPackets(true);
+
+ // Set pacing rate to 1 packet/s, no padding.
+ pacer->SetPacingRates(DataSize::Bytes(kPacketSize) / TimeDelta::Seconds(1),
+ DataRate::Zero());
+
+ // Add and send an audio packet.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kAudio, kSsrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ pacer->ProcessPackets();
+
+ // Advance time, add another audio packet and process. It should be sent
+ // immediately.
+ clock_.AdvanceTimeMilliseconds(5);
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kAudio, kSsrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPacketSize);
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest,
+ PaddingResumesAfterSaturationEvenWithConcurrentAudio) {
+ const uint32_t kSsrc = 12345;
+ const DataRate kPacingDataRate = DataRate::KilobitsPerSec(125);
+ const DataRate kPaddingDataRate = DataRate::KilobitsPerSec(100);
+ const TimeDelta kMaxBufferInTime = TimeDelta::Millis(500);
+ const DataSize kPacketSize = DataSize::Bytes(130);
+ const TimeDelta kAudioPacketInterval = TimeDelta::Millis(20);
+
+ // In this test, we fist send a burst of video in order to saturate the
+ // padding debt level.
+ // We then proceed to send audio at a bitrate that is slightly lower than
+ // the padding rate, meaning there will be a period with audio but no
+ // padding sent while the debt is draining, then audio and padding will
+ // be interlieved.
+
+ // Verify both with and without accounting for audio.
+ for (bool account_for_audio : {false, true}) {
+ uint16_t sequence_number = 1234;
+ MockPacketSender callback;
+ EXPECT_CALL(callback, SendPacket).Times(::testing::AnyNumber());
+ auto pacer =
+ std::make_unique<PacingController>(&clock_, &callback, trials_);
+ pacer->SetAccountForAudioPackets(account_for_audio);
+
+ // First, saturate the padding budget.
+ pacer->SetPacingRates(kPacingDataRate, kPaddingDataRate);
+
+ const TimeDelta kPaddingSaturationTime =
+ kMaxBufferInTime * kPaddingDataRate /
+ (kPacingDataRate - kPaddingDataRate);
+ const DataSize kVideoToSend = kPaddingSaturationTime * kPacingDataRate;
+ const DataSize kVideoPacketSize = DataSize::Bytes(1200);
+ DataSize video_sent = DataSize::Zero();
+ while (video_sent < kVideoToSend) {
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, kSsrc, sequence_number++,
+ clock_.TimeInMilliseconds(), kVideoPacketSize.bytes()));
+ video_sent += kVideoPacketSize;
+ }
+ while (pacer->QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ }
+
+ // Add a stream of audio packets at a rate slightly lower than the padding
+ // rate, once the padding debt is paid off we expect padding to be
+ // generated.
+ pacer->SetPacingRates(kPacingDataRate, kPaddingDataRate);
+ bool padding_seen = false;
+ EXPECT_CALL(callback, GeneratePadding).WillOnce([&](DataSize padding_size) {
+ padding_seen = true;
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
+ padding_packets.emplace_back(
+ BuildPacket(RtpPacketMediaType::kPadding, kSsrc, sequence_number++,
+ clock_.TimeInMilliseconds(), padding_size.bytes()));
+ return padding_packets;
+ });
+
+ Timestamp start_time = clock_.CurrentTime();
+ Timestamp last_audio_time = start_time;
+ while (!padding_seen) {
+ Timestamp now = clock_.CurrentTime();
+ Timestamp next_send_time = pacer->NextSendTime();
+ TimeDelta sleep_time =
+ std::min(next_send_time, last_audio_time + kAudioPacketInterval) -
+ now;
+ clock_.AdvanceTime(sleep_time);
+ while (clock_.CurrentTime() >= last_audio_time + kAudioPacketInterval) {
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kAudio, kSsrc, sequence_number++,
+ clock_.TimeInMilliseconds(), kPacketSize.bytes()));
+ last_audio_time += kAudioPacketInterval;
+ }
+ pacer->ProcessPackets();
+ }
+
+ // Verify how long it took to drain the padding debt. Allow 2% error margin.
+ const DataRate kAudioDataRate = kPacketSize / kAudioPacketInterval;
+ const TimeDelta expected_drain_time =
+ account_for_audio ? (kMaxBufferInTime * kPaddingDataRate /
+ (kPaddingDataRate - kAudioDataRate))
+ : kMaxBufferInTime;
+ const TimeDelta actual_drain_time = clock_.CurrentTime() - start_time;
+ EXPECT_NEAR(actual_drain_time.ms(), expected_drain_time.ms(),
+ expected_drain_time.ms() * 0.02)
+ << " where account_for_audio = "
+ << (account_for_audio ? "true" : "false");
+ }
+}
+
+TEST_F(PacingControllerTest, AccountsForAudioEnqueueTime) {
+ const uint32_t kSsrc = 12345;
+ const DataRate kPacingDataRate = DataRate::KilobitsPerSec(125);
+ const DataRate kPaddingDataRate = DataRate::Zero();
+ const DataSize kPacketSize = DataSize::Bytes(130);
+ const TimeDelta kPacketPacingTime = kPacketSize / kPacingDataRate;
+ uint32_t sequnce_number = 1;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+ // Audio not paced, but still accounted for in budget.
+ pacer->SetAccountForAudioPackets(true);
+ pacer->SetPacingRates(kPacingDataRate, kPaddingDataRate);
+
+ // Enqueue two audio packets, advance clock to where one packet
+ // should have drained the buffer already, has they been sent
+ // immediately.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kAudio, kSsrc,
+ sequnce_number++, clock_.TimeInMilliseconds(),
+ kPacketSize.bytes());
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kAudio, kSsrc,
+ sequnce_number++, clock_.TimeInMilliseconds(),
+ kPacketSize.bytes());
+ clock_.AdvanceTime(kPacketPacingTime);
+ // Now process and make sure both packets were sent.
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // Add a video packet. I can't be sent until debt from audio
+ // packets have been drained.
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, kSsrc + 1, sequnce_number++,
+ clock_.TimeInMilliseconds(), kPacketSize.bytes()));
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(), kPacketPacingTime);
+}
+
+TEST_F(PacingControllerTest, NextSendTimeAccountsForPadding) {
+ const uint32_t kSsrc = 12345;
+ const DataRate kPacingDataRate = DataRate::KilobitsPerSec(125);
+ const DataSize kPacketSize = DataSize::Bytes(130);
+ const TimeDelta kPacketPacingTime = kPacketSize / kPacingDataRate;
+ uint32_t sequnce_number = 1;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ // Start with no padding.
+ pacer->SetPacingRates(kPacingDataRate, DataRate::Zero());
+
+ // Send a single packet.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, kSsrc,
+ sequnce_number++, clock_.TimeInMilliseconds(),
+ kPacketSize.bytes());
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // With current conditions, no need to wake until next keep-alive.
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(),
+ PacingController::kPausedProcessInterval);
+
+ // Enqueue a new packet, that can't be sent until previous buffer has
+ // drained.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, kSsrc,
+ sequnce_number++, clock_.TimeInMilliseconds(),
+ kPacketSize.bytes());
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(), kPacketPacingTime);
+ clock_.AdvanceTime(kPacketPacingTime);
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // With current conditions, again no need to wake until next keep-alive.
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(),
+ PacingController::kPausedProcessInterval);
+
+ // Set a non-zero padding rate. Padding also can't be sent until
+ // previous debt has cleared. Since padding was disabled before, there
+ // currently is no padding debt.
+ pacer->SetPacingRates(kPacingDataRate, kPacingDataRate / 2);
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(), kPacketPacingTime);
+
+ // Advance time, expect padding.
+ EXPECT_CALL(callback_, SendPadding).WillOnce(Return(kPacketSize.bytes()));
+ clock_.AdvanceTime(kPacketPacingTime);
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // Since padding rate is half of pacing rate, next time we can send
+ // padding is double the packet pacing time.
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(),
+ kPacketPacingTime * 2);
+
+ // Insert a packet to be sent, this take precedence again.
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, kSsrc, sequnce_number++,
+ clock_.TimeInMilliseconds(), kPacketSize.bytes()));
+ EXPECT_EQ(pacer->NextSendTime() - clock_.CurrentTime(), kPacketPacingTime);
+}
+
+TEST_F(PacingControllerTest, PaddingTargetAccountsForPaddingRate) {
+ // Target size for a padding packet is 5ms * padding rate.
+ const TimeDelta kPaddingTarget = TimeDelta::Millis(5);
+ srand(0);
+ // Need to initialize PacingController after we initialize clock.
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ const uint32_t kSsrc = 12345;
+ const DataRate kPacingDataRate = DataRate::KilobitsPerSec(125);
+ const DataSize kPacketSize = DataSize::Bytes(130);
+
+ uint32_t sequnce_number = 1;
+
+ // Start with pacing and padding rate equal.
+ pacer->SetPacingRates(kPacingDataRate, kPacingDataRate);
+
+ // Send a single packet.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, kSsrc,
+ sequnce_number++, clock_.TimeInMilliseconds(),
+ kPacketSize.bytes());
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ size_t expected_padding_target_bytes =
+ (kPaddingTarget * kPacingDataRate).bytes();
+ EXPECT_CALL(callback_, SendPadding(expected_padding_target_bytes))
+ .WillOnce(Return(expected_padding_target_bytes));
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+
+ // Half the padding rate - expect half the padding target.
+ pacer->SetPacingRates(kPacingDataRate, kPacingDataRate / 2);
+ EXPECT_CALL(callback_, SendPadding(expected_padding_target_bytes / 2))
+ .WillOnce(Return(expected_padding_target_bytes / 2));
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, SendsFecPackets) {
+ const uint32_t kSsrc = 12345;
+ const uint32_t kFlexSsrc = 54321;
+ uint16_t sequence_number = 1234;
+ uint16_t flexfec_sequence_number = 4321;
+ const size_t kPacketSize = 123;
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ // Set pacing rate to 1000 packet/s, no padding.
+ pacer->SetPacingRates(
+ DataSize::Bytes(1000 * kPacketSize) / TimeDelta::Seconds(1),
+ DataRate::Zero());
+
+ int64_t now = clock_.TimeInMilliseconds();
+ pacer->EnqueuePacket(BuildPacket(RtpPacketMediaType::kVideo, kSsrc,
+ sequence_number, now, kPacketSize));
+ EXPECT_CALL(callback_, SendPacket(kSsrc, sequence_number, now, false, false));
+ EXPECT_CALL(callback_, FetchFec).WillOnce([&]() {
+ EXPECT_CALL(callback_, SendPacket(kFlexSsrc, flexfec_sequence_number, now,
+ false, false));
+ EXPECT_CALL(callback_, FetchFec);
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets;
+ fec_packets.push_back(
+ BuildPacket(RtpPacketMediaType::kForwardErrorCorrection, kFlexSsrc,
+ flexfec_sequence_number, now, kPacketSize));
+ return fec_packets;
+ });
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+ AdvanceTimeUntil(pacer->NextSendTime());
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, GapInPacingDoesntAccumulateBudget) {
+ const uint32_t kSsrc = 12345;
+ uint16_t sequence_number = 1234;
+ const DataSize kPackeSize = DataSize::Bytes(250);
+ const TimeDelta kPacketSendTime = TimeDelta::Millis(15);
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ pacer->SetPacingRates(kPackeSize / kPacketSendTime,
+ /*padding_rate=*/DataRate::Zero());
+
+ // Send an initial packet.
+ SendAndExpectPacket(pacer.get(), RtpPacketMediaType::kVideo, kSsrc,
+ sequence_number++, clock_.TimeInMilliseconds(),
+ kPackeSize.bytes());
+ pacer->ProcessPackets();
+ ::testing::Mock::VerifyAndClearExpectations(&callback_);
+
+ // Advance time kPacketSendTime past where the media debt should be 0.
+ clock_.AdvanceTime(2 * kPacketSendTime);
+
+ // Enqueue two new packets. Expect only one to be sent one ProcessPackets().
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, kSsrc, sequence_number + 1,
+ clock_.TimeInMilliseconds(), kPackeSize.bytes()));
+ pacer->EnqueuePacket(
+ BuildPacket(RtpPacketMediaType::kVideo, kSsrc, sequence_number + 2,
+ clock_.TimeInMilliseconds(), kPackeSize.bytes()));
+ EXPECT_CALL(callback_, SendPacket(kSsrc, sequence_number + 1,
+ clock_.TimeInMilliseconds(), false, false));
+ pacer->ProcessPackets();
+}
+
+TEST_F(PacingControllerTest, HandlesSubMicrosecondSendIntervals) {
+ static constexpr DataSize kPacketSize = DataSize::Bytes(1);
+ static constexpr TimeDelta kPacketSendTime = TimeDelta::Micros(1);
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ // Set pacing rate such that a packet is sent in 0.5us.
+ pacer->SetPacingRates(/*pacing_rate=*/2 * kPacketSize / kPacketSendTime,
+ /*padding_rate=*/DataRate::Zero());
+
+ // Enqueue three packets, the first two should be sent immediately - the third
+ // should cause a non-zero delta to the next process time.
+ EXPECT_CALL(callback_, SendPacket).Times(2);
+ for (int i = 0; i < 3; ++i) {
+ pacer->EnqueuePacket(BuildPacket(
+ RtpPacketMediaType::kVideo, /*ssrc=*/12345, /*sequence_number=*/i,
+ clock_.TimeInMilliseconds(), kPacketSize.bytes()));
+ }
+ pacer->ProcessPackets();
+
+ EXPECT_GT(pacer->NextSendTime(), clock_.CurrentTime());
+}
+
+TEST_F(PacingControllerTest, HandlesSubMicrosecondPaddingInterval) {
+ static constexpr DataSize kPacketSize = DataSize::Bytes(1);
+ static constexpr TimeDelta kPacketSendTime = TimeDelta::Micros(1);
+ auto pacer = std::make_unique<PacingController>(&clock_, &callback_, trials_);
+
+ // Set both pacing and padding rates to 1 byte per 0.5us.
+ pacer->SetPacingRates(/*pacing_rate=*/2 * kPacketSize / kPacketSendTime,
+ /*padding_rate=*/2 * kPacketSize / kPacketSendTime);
+
+ // Enqueue and send one packet.
+ EXPECT_CALL(callback_, SendPacket);
+ pacer->EnqueuePacket(BuildPacket(
+ RtpPacketMediaType::kVideo, /*ssrc=*/12345, /*sequence_number=*/1234,
+ clock_.TimeInMilliseconds(), kPacketSize.bytes()));
+ pacer->ProcessPackets();
+
+ // The padding debt is now 1 byte, and the pacing time for that is lower than
+ // the precision of a TimeStamp tick. Make sure the pacer still indicates a
+ // non-zero sleep time is needed until the next process.
+ EXPECT_GT(pacer->NextSendTime(), clock_.CurrentTime());
+}
+
+TEST_F(PacingControllerTest, SendsPacketsInBurstImmediately) {
+ constexpr TimeDelta kMaxDelay = TimeDelta::Millis(20);
+ PacingController pacer(&clock_, &callback_, trials_);
+ pacer.SetSendBurstInterval(kMaxDelay);
+ pacer.SetPacingRates(DataRate::BytesPerSec(10000), DataRate::Zero());
+
+ // Max allowed send burst size is 100000*20/1000) = 200byte
+ pacer.EnqueuePacket(video_.BuildNextPacket(100));
+ pacer.EnqueuePacket(video_.BuildNextPacket(100));
+ pacer.EnqueuePacket(video_.BuildNextPacket(100));
+ pacer.ProcessPackets();
+ EXPECT_EQ(pacer.QueueSizePackets(), 1u);
+ EXPECT_EQ(pacer.NextSendTime(), clock_.CurrentTime() + kMaxDelay);
+
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ EXPECT_EQ(pacer.QueueSizePackets(), 0u);
+}
+
+TEST_F(PacingControllerTest, SendsPacketsInBurstEvenIfNotEnqueedAtSameTime) {
+ constexpr TimeDelta kMaxDelay = TimeDelta::Millis(20);
+ PacingController pacer(&clock_, &callback_, trials_);
+ pacer.SetSendBurstInterval(kMaxDelay);
+ pacer.SetPacingRates(DataRate::BytesPerSec(10000), DataRate::Zero());
+ pacer.EnqueuePacket(video_.BuildNextPacket(200));
+ EXPECT_EQ(pacer.NextSendTime(), clock_.CurrentTime());
+ pacer.ProcessPackets();
+ clock_.AdvanceTime(TimeDelta::Millis(1));
+ pacer.EnqueuePacket(video_.BuildNextPacket(200));
+ EXPECT_EQ(pacer.NextSendTime(), clock_.CurrentTime());
+ pacer.ProcessPackets();
+ EXPECT_EQ(pacer.QueueSizePackets(), 0u);
+}
+
+TEST_F(PacingControllerTest, RespectsTargetRateWhenSendingPacketsInBursts) {
+ PacingController pacer(&clock_, &callback_, trials_);
+ pacer.SetSendBurstInterval(TimeDelta::Millis(20));
+ pacer.SetAccountForAudioPackets(true);
+ pacer.SetPacingRates(DataRate::KilobitsPerSec(1000), DataRate::Zero());
+ Timestamp start_time = clock_.CurrentTime();
+ // Inject 100 packets, with size 1000bytes over 100ms.
+ // Expect only 1Mbps / (8*1000) / 10 = 12 packets to be sent.
+ // Packets are sent in burst. Each burst is then 3 packets * 1000bytes at
+ // 1Mbits = 24ms long. Thus, expect 4 bursts.
+ EXPECT_CALL(callback_, SendPacket).Times(12);
+ int number_of_bursts = 0;
+ while (clock_.CurrentTime() < start_time + TimeDelta::Millis(100)) {
+ pacer.EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer.EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer.EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer.EnqueuePacket(video_.BuildNextPacket(1000));
+ pacer.EnqueuePacket(video_.BuildNextPacket(1000));
+ if (pacer.NextSendTime() <= clock_.CurrentTime()) {
+ pacer.ProcessPackets();
+ ++number_of_bursts;
+ }
+ clock_.AdvanceTime(TimeDelta::Millis(5));
+ }
+ EXPECT_EQ(pacer.QueueSizePackets(), 88u);
+ EXPECT_EQ(number_of_bursts, 4);
+}
+
+TEST_F(PacingControllerTest, RespectsQueueTimeLimit) {
+ static constexpr DataSize kPacketSize = DataSize::Bytes(100);
+ static constexpr DataRate kNominalPacingRate = DataRate::KilobitsPerSec(200);
+ static constexpr TimeDelta kPacketPacingTime =
+ kPacketSize / kNominalPacingRate;
+ static constexpr TimeDelta kQueueTimeLimit = TimeDelta::Millis(1000);
+
+ PacingController pacer(&clock_, &callback_, trials_);
+ pacer.SetPacingRates(kNominalPacingRate, /*padding_rate=*/DataRate::Zero());
+ pacer.SetQueueTimeLimit(kQueueTimeLimit);
+
+ // Fill pacer up to queue time limit.
+ static constexpr int kNumPackets = kQueueTimeLimit / kPacketPacingTime;
+ for (int i = 0; i < kNumPackets; ++i) {
+ pacer.EnqueuePacket(video_.BuildNextPacket(kPacketSize.bytes()));
+ }
+ EXPECT_EQ(pacer.ExpectedQueueTime(), kQueueTimeLimit);
+ EXPECT_EQ(pacer.pacing_rate(), kNominalPacingRate);
+
+ // Double the amount of packets in the queue, the queue time limit should
+ // effectively double the pacing rate in response.
+ for (int i = 0; i < kNumPackets; ++i) {
+ pacer.EnqueuePacket(video_.BuildNextPacket(kPacketSize.bytes()));
+ }
+ EXPECT_EQ(pacer.ExpectedQueueTime(), kQueueTimeLimit);
+ EXPECT_EQ(pacer.pacing_rate(), 2 * kNominalPacingRate);
+
+ // Send all the packets, should take as long as the queue time limit.
+ Timestamp start_time = clock_.CurrentTime();
+ while (pacer.QueueSizePackets() > 0) {
+ AdvanceTimeUntil(pacer.NextSendTime());
+ pacer.ProcessPackets();
+ }
+ EXPECT_EQ(clock_.CurrentTime() - start_time, kQueueTimeLimit);
+
+ // We're back in a normal state - pacing rate should be back to previous
+ // levels.
+ EXPECT_EQ(pacer.pacing_rate(), kNominalPacingRate);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/pacing_gn/moz.build b/third_party/libwebrtc/modules/pacing/pacing_gn/moz.build
new file mode 100644
index 0000000000..eb2187efdc
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/pacing_gn/moz.build
@@ -0,0 +1,222 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/pacing/bitrate_prober.cc",
+ "/third_party/libwebrtc/modules/pacing/pacing_controller.cc",
+ "/third_party/libwebrtc/modules/pacing/packet_router.cc",
+ "/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.cc",
+ "/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("pacing_gn")
diff --git a/third_party/libwebrtc/modules/pacing/packet_router.cc b/third_party/libwebrtc/modules/pacing/packet_router.cc
new file mode 100644
index 0000000000..a09f191bbd
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/packet_router.cc
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/packet_router.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/system/unused.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+PacketRouter::PacketRouter() : PacketRouter(0) {}
+
+PacketRouter::PacketRouter(uint16_t start_transport_seq)
+ : last_send_module_(nullptr),
+ active_remb_module_(nullptr),
+ transport_seq_(start_transport_seq) {}
+
+PacketRouter::~PacketRouter() {
+ RTC_DCHECK(send_modules_map_.empty());
+ RTC_DCHECK(send_modules_list_.empty());
+ RTC_DCHECK(rtcp_feedback_senders_.empty());
+ RTC_DCHECK(sender_remb_candidates_.empty());
+ RTC_DCHECK(receiver_remb_candidates_.empty());
+ RTC_DCHECK(active_remb_module_ == nullptr);
+}
+
+void PacketRouter::AddSendRtpModule(RtpRtcpInterface* rtp_module,
+ bool remb_candidate) {
+ MutexLock lock(&modules_mutex_);
+
+ AddSendRtpModuleToMap(rtp_module, rtp_module->SSRC());
+ if (absl::optional<uint32_t> rtx_ssrc = rtp_module->RtxSsrc()) {
+ AddSendRtpModuleToMap(rtp_module, *rtx_ssrc);
+ }
+ if (absl::optional<uint32_t> flexfec_ssrc = rtp_module->FlexfecSsrc()) {
+ AddSendRtpModuleToMap(rtp_module, *flexfec_ssrc);
+ }
+
+ if (rtp_module->SupportsRtxPayloadPadding()) {
+ last_send_module_ = rtp_module;
+ }
+
+ if (remb_candidate) {
+ AddRembModuleCandidate(rtp_module, /* media_sender = */ true);
+ }
+}
+
+void PacketRouter::AddSendRtpModuleToMap(RtpRtcpInterface* rtp_module,
+ uint32_t ssrc) {
+ RTC_DCHECK(send_modules_map_.find(ssrc) == send_modules_map_.end());
+
+ // Signal to module that the pacer thread is attached and can send packets.
+ rtp_module->OnPacketSendingThreadSwitched();
+
+ // Always keep the audio modules at the back of the list, so that when we
+ // iterate over the modules in order to find one that can send padding we
+ // will prioritize video. This is important to make sure they are counted
+ // into the bandwidth estimate properly.
+ if (rtp_module->IsAudioConfigured()) {
+ send_modules_list_.push_back(rtp_module);
+ } else {
+ send_modules_list_.push_front(rtp_module);
+ }
+ send_modules_map_[ssrc] = rtp_module;
+}
+
+void PacketRouter::RemoveSendRtpModuleFromMap(uint32_t ssrc) {
+ auto kv = send_modules_map_.find(ssrc);
+ RTC_DCHECK(kv != send_modules_map_.end());
+ send_modules_list_.remove(kv->second);
+ send_modules_map_.erase(kv);
+}
+
+void PacketRouter::RemoveSendRtpModule(RtpRtcpInterface* rtp_module) {
+ MutexLock lock(&modules_mutex_);
+ MaybeRemoveRembModuleCandidate(rtp_module, /* media_sender = */ true);
+
+ RemoveSendRtpModuleFromMap(rtp_module->SSRC());
+ if (absl::optional<uint32_t> rtx_ssrc = rtp_module->RtxSsrc()) {
+ RemoveSendRtpModuleFromMap(*rtx_ssrc);
+ }
+ if (absl::optional<uint32_t> flexfec_ssrc = rtp_module->FlexfecSsrc()) {
+ RemoveSendRtpModuleFromMap(*flexfec_ssrc);
+ }
+
+ if (last_send_module_ == rtp_module) {
+ last_send_module_ = nullptr;
+ }
+ rtp_module->OnPacketSendingThreadSwitched();
+}
+
+void PacketRouter::AddReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender,
+ bool remb_candidate) {
+ MutexLock lock(&modules_mutex_);
+ RTC_DCHECK(std::find(rtcp_feedback_senders_.begin(),
+ rtcp_feedback_senders_.end(),
+ rtcp_sender) == rtcp_feedback_senders_.end());
+
+ rtcp_feedback_senders_.push_back(rtcp_sender);
+
+ if (remb_candidate) {
+ AddRembModuleCandidate(rtcp_sender, /* media_sender = */ false);
+ }
+}
+
+void PacketRouter::RemoveReceiveRtpModule(
+ RtcpFeedbackSenderInterface* rtcp_sender) {
+ MutexLock lock(&modules_mutex_);
+ MaybeRemoveRembModuleCandidate(rtcp_sender, /* media_sender = */ false);
+ auto it = std::find(rtcp_feedback_senders_.begin(),
+ rtcp_feedback_senders_.end(), rtcp_sender);
+ RTC_DCHECK(it != rtcp_feedback_senders_.end());
+ rtcp_feedback_senders_.erase(it);
+}
+
+void PacketRouter::SendPacket(std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) {
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"), "PacketRouter::SendPacket",
+ "sequence_number", packet->SequenceNumber(), "rtp_timestamp",
+ packet->Timestamp());
+
+ MutexLock lock(&modules_mutex_);
+ // With the new pacer code path, transport sequence numbers are only set here,
+ // on the pacer thread. Therefore we don't need atomics/synchronization.
+ bool assign_transport_sequence_number =
+ packet->HasExtension<TransportSequenceNumber>();
+ if (assign_transport_sequence_number) {
+ packet->SetExtension<TransportSequenceNumber>((transport_seq_ + 1) &
+ 0xFFFF);
+ }
+
+ uint32_t ssrc = packet->Ssrc();
+ auto kv = send_modules_map_.find(ssrc);
+ if (kv == send_modules_map_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "Failed to send packet, matching RTP module not found "
+ "or transport error. SSRC = "
+ << packet->Ssrc() << ", sequence number " << packet->SequenceNumber();
+ return;
+ }
+
+ RtpRtcpInterface* rtp_module = kv->second;
+ if (!rtp_module->TrySendPacket(packet.get(), cluster_info)) {
+ RTC_LOG(LS_WARNING) << "Failed to send packet, rejected by RTP module.";
+ return;
+ }
+
+ // Sending succeeded.
+
+ if (assign_transport_sequence_number) {
+ ++transport_seq_;
+ }
+
+ if (rtp_module->SupportsRtxPayloadPadding()) {
+ // This is now the last module to send media, and has the desired
+ // properties needed for payload based padding. Cache it for later use.
+ last_send_module_ = rtp_module;
+ }
+
+ for (auto& packet : rtp_module->FetchFecPackets()) {
+ pending_fec_packets_.push_back(std::move(packet));
+ }
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>> PacketRouter::FetchFec() {
+ MutexLock lock(&modules_mutex_);
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ std::move(pending_fec_packets_);
+ pending_fec_packets_.clear();
+ return fec_packets;
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>> PacketRouter::GeneratePadding(
+ DataSize size) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("webrtc"),
+ "PacketRouter::GeneratePadding", "bytes", size.bytes());
+
+ MutexLock lock(&modules_mutex_);
+ // First try on the last rtp module to have sent media. This increases the
+ // the chance that any payload based padding will be useful as it will be
+ // somewhat distributed over modules according the packet rate, even if it
+ // will be more skewed towards the highest bitrate stream. At the very least
+ // this prevents sending payload padding on a disabled stream where it's
+ // guaranteed not to be useful.
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
+ if (last_send_module_ != nullptr &&
+ last_send_module_->SupportsRtxPayloadPadding()) {
+ padding_packets = last_send_module_->GeneratePadding(size.bytes());
+ }
+
+ if (padding_packets.empty()) {
+ // Iterate over all modules send module. Video modules will be at the front
+ // and so will be prioritized. This is important since audio packets may not
+ // be taken into account by the bandwidth estimator, e.g. in FF.
+ for (RtpRtcpInterface* rtp_module : send_modules_list_) {
+ if (rtp_module->SupportsPadding()) {
+ padding_packets = rtp_module->GeneratePadding(size.bytes());
+ if (!padding_packets.empty()) {
+ last_send_module_ = rtp_module;
+ break;
+ }
+ }
+ }
+ }
+
+ for (auto& packet : padding_packets) {
+ RTC_UNUSED(packet);
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"),
+ "PacketRouter::GeneratePadding::Loop", "sequence_number",
+ packet->SequenceNumber(), "rtp_timestamp",
+ packet->Timestamp());
+ }
+
+ return padding_packets;
+}
+
+uint16_t PacketRouter::CurrentTransportSequenceNumber() const {
+ MutexLock lock(&modules_mutex_);
+ return transport_seq_ & 0xFFFF;
+}
+
+void PacketRouter::SendRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) {
+ MutexLock lock(&modules_mutex_);
+
+ if (!active_remb_module_) {
+ return;
+ }
+
+ // The Add* and Remove* methods above ensure that REMB is disabled on all
+ // other modules, because otherwise, they will send REMB with stale info.
+ active_remb_module_->SetRemb(bitrate_bps, std::move(ssrcs));
+}
+
+void PacketRouter::SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> packets) {
+ MutexLock lock(&modules_mutex_);
+
+ // Prefer send modules.
+ for (RtpRtcpInterface* rtp_module : send_modules_list_) {
+ if (rtp_module->RTCP() == RtcpMode::kOff) {
+ continue;
+ }
+ rtp_module->SendCombinedRtcpPacket(std::move(packets));
+ return;
+ }
+
+ if (rtcp_feedback_senders_.empty()) {
+ return;
+ }
+ auto* rtcp_sender = rtcp_feedback_senders_[0];
+ rtcp_sender->SendCombinedRtcpPacket(std::move(packets));
+}
+
+void PacketRouter::AddRembModuleCandidate(
+ RtcpFeedbackSenderInterface* candidate_module,
+ bool media_sender) {
+ RTC_DCHECK(candidate_module);
+ std::vector<RtcpFeedbackSenderInterface*>& candidates =
+ media_sender ? sender_remb_candidates_ : receiver_remb_candidates_;
+ RTC_DCHECK(std::find(candidates.cbegin(), candidates.cend(),
+ candidate_module) == candidates.cend());
+ candidates.push_back(candidate_module);
+ DetermineActiveRembModule();
+}
+
+void PacketRouter::MaybeRemoveRembModuleCandidate(
+ RtcpFeedbackSenderInterface* candidate_module,
+ bool media_sender) {
+ RTC_DCHECK(candidate_module);
+ std::vector<RtcpFeedbackSenderInterface*>& candidates =
+ media_sender ? sender_remb_candidates_ : receiver_remb_candidates_;
+ auto it = std::find(candidates.begin(), candidates.end(), candidate_module);
+
+ if (it == candidates.end()) {
+ return; // Function called due to removal of non-REMB-candidate module.
+ }
+
+ if (*it == active_remb_module_) {
+ UnsetActiveRembModule();
+ }
+ candidates.erase(it);
+ DetermineActiveRembModule();
+}
+
+void PacketRouter::UnsetActiveRembModule() {
+ RTC_CHECK(active_remb_module_);
+ active_remb_module_->UnsetRemb();
+ active_remb_module_ = nullptr;
+}
+
+void PacketRouter::DetermineActiveRembModule() {
+ // Sender modules take precedence over receiver modules, because SRs (sender
+ // reports) are sent more frequently than RR (receiver reports).
+ // When adding the first sender module, we should change the active REMB
+ // module to be that. Otherwise, we remain with the current active module.
+
+ RtcpFeedbackSenderInterface* new_active_remb_module;
+
+ if (!sender_remb_candidates_.empty()) {
+ new_active_remb_module = sender_remb_candidates_.front();
+ } else if (!receiver_remb_candidates_.empty()) {
+ new_active_remb_module = receiver_remb_candidates_.front();
+ } else {
+ new_active_remb_module = nullptr;
+ }
+
+ if (new_active_remb_module != active_remb_module_ && active_remb_module_) {
+ UnsetActiveRembModule();
+ }
+
+ active_remb_module_ = new_active_remb_module;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/packet_router.h b/third_party/libwebrtc/modules/pacing/packet_router.h
new file mode 100644
index 0000000000..11d8979052
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/packet_router.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_PACKET_ROUTER_H_
+#define MODULES_PACING_PACKET_ROUTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "api/transport/network_types.h"
+#include "modules/pacing/pacing_controller.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class RtpRtcpInterface;
+
+// PacketRouter keeps track of rtp send modules to support the pacer.
+// In addition, it handles feedback messages, which are sent on a send
+// module if possible (sender report), otherwise on receive module
+// (receiver report). For the latter case, we also keep track of the
+// receive modules.
+class PacketRouter : public PacingController::PacketSender {
+ public:
+ PacketRouter();
+ explicit PacketRouter(uint16_t start_transport_seq);
+ ~PacketRouter() override;
+
+ PacketRouter(const PacketRouter&) = delete;
+ PacketRouter& operator=(const PacketRouter&) = delete;
+
+ void AddSendRtpModule(RtpRtcpInterface* rtp_module, bool remb_candidate);
+ void RemoveSendRtpModule(RtpRtcpInterface* rtp_module);
+
+ void AddReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender,
+ bool remb_candidate);
+ void RemoveReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender);
+
+ void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) override;
+ std::vector<std::unique_ptr<RtpPacketToSend>> FetchFec() override;
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ DataSize size) override;
+
+ uint16_t CurrentTransportSequenceNumber() const;
+
+ // Send REMB feedback.
+ void SendRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs);
+
+ // Sends `packets` in one or more IP packets.
+ void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> packets);
+
+ private:
+ void AddRembModuleCandidate(RtcpFeedbackSenderInterface* candidate_module,
+ bool media_sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_);
+ void MaybeRemoveRembModuleCandidate(
+ RtcpFeedbackSenderInterface* candidate_module,
+ bool media_sender) RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_);
+ void UnsetActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_);
+ void DetermineActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_);
+ void AddSendRtpModuleToMap(RtpRtcpInterface* rtp_module, uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_);
+ void RemoveSendRtpModuleFromMap(uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_);
+
+ mutable Mutex modules_mutex_;
+ // Ssrc to RtpRtcpInterface module;
+ std::unordered_map<uint32_t, RtpRtcpInterface*> send_modules_map_
+ RTC_GUARDED_BY(modules_mutex_);
+ std::list<RtpRtcpInterface*> send_modules_list_
+ RTC_GUARDED_BY(modules_mutex_);
+ // The last module used to send media.
+ RtpRtcpInterface* last_send_module_ RTC_GUARDED_BY(modules_mutex_);
+ // Rtcp modules of the rtp receivers.
+ std::vector<RtcpFeedbackSenderInterface*> rtcp_feedback_senders_
+ RTC_GUARDED_BY(modules_mutex_);
+
+ // Candidates for the REMB module can be RTP sender/receiver modules, with
+ // the sender modules taking precedence.
+ std::vector<RtcpFeedbackSenderInterface*> sender_remb_candidates_
+ RTC_GUARDED_BY(modules_mutex_);
+ std::vector<RtcpFeedbackSenderInterface*> receiver_remb_candidates_
+ RTC_GUARDED_BY(modules_mutex_);
+ RtcpFeedbackSenderInterface* active_remb_module_
+ RTC_GUARDED_BY(modules_mutex_);
+
+ uint64_t transport_seq_ RTC_GUARDED_BY(modules_mutex_);
+
+ // TODO(bugs.webrtc.org/10809): Replace lock with a sequence checker once the
+ // process thread is gone.
+ std::vector<std::unique_ptr<RtpPacketToSend>> pending_fec_packets_
+ RTC_GUARDED_BY(modules_mutex_);
+};
+} // namespace webrtc
+#endif // MODULES_PACING_PACKET_ROUTER_H_
diff --git a/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc b/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc
new file mode 100644
index 0000000000..fc26922850
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/packet_router.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#include "api/units/time_delta.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fake_clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// TODO(eladalon): Restructure and/or replace the existing monolithic tests
+// (only some of the test are monolithic) according to the new
+// guidelines - small tests for one thing at a time.
+// (I'm not removing any tests during CL, so as to demonstrate no regressions.)
+
+namespace {
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::AtLeast;
+using ::testing::Field;
+using ::testing::Gt;
+using ::testing::Le;
+using ::testing::NiceMock;
+using ::testing::Property;
+using ::testing::Return;
+using ::testing::SaveArg;
+
+constexpr int kProbeMinProbes = 5;
+constexpr int kProbeMinBytes = 1000;
+
+} // namespace
+
+class PacketRouterTest : public ::testing::Test {
+ public:
+ PacketRouterTest() {
+ extension_manager.Register<TransportSequenceNumber>(/*id=*/1);
+ }
+
+ protected:
+ std::unique_ptr<RtpPacketToSend> BuildRtpPacket(uint32_t ssrc) {
+ std::unique_ptr<RtpPacketToSend> packet =
+ std::make_unique<RtpPacketToSend>(&extension_manager);
+ packet->SetSsrc(ssrc);
+ return packet;
+ }
+
+ PacketRouter packet_router_;
+ RtpHeaderExtensionMap extension_manager;
+};
+
+TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_GeneratePadding) {
+ constexpr DataSize bytes = DataSize::Bytes(300);
+ const PacedPacketInfo paced_info(1, kProbeMinProbes, kProbeMinBytes);
+
+ EXPECT_TRUE(packet_router_.GeneratePadding(bytes).empty());
+}
+
+
+TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_SendRemb) {
+ const std::vector<uint32_t> ssrcs = {1, 2, 3};
+ constexpr uint32_t bitrate_bps = 10000;
+ // Expect not to crash
+ packet_router_.SendRemb(bitrate_bps, ssrcs);
+}
+
+TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_SendTransportFeedback) {
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback;
+ feedback.push_back(std::make_unique<rtcp::TransportFeedback>());
+ // Expect not to crash
+ packet_router_.SendCombinedRtcpPacket(std::move(feedback));
+}
+
+TEST_F(PacketRouterTest, GeneratePaddingPrioritizesRtx) {
+ // Two RTP modules. The first (prioritized due to rtx) isn't sending media so
+ // should not be called.
+ const uint16_t kSsrc1 = 1234;
+ const uint16_t kSsrc2 = 4567;
+
+ NiceMock<MockRtpRtcpInterface> rtp_1;
+ ON_CALL(rtp_1, RtxSendStatus()).WillByDefault(Return(kRtxRedundantPayloads));
+ ON_CALL(rtp_1, SSRC()).WillByDefault(Return(kSsrc1));
+ ON_CALL(rtp_1, SupportsPadding).WillByDefault(Return(false));
+
+ NiceMock<MockRtpRtcpInterface> rtp_2;
+ ON_CALL(rtp_2, RtxSendStatus()).WillByDefault(Return(kRtxOff));
+ ON_CALL(rtp_2, SSRC()).WillByDefault(Return(kSsrc2));
+ ON_CALL(rtp_2, SupportsPadding).WillByDefault(Return(true));
+
+ packet_router_.AddSendRtpModule(&rtp_1, false);
+ packet_router_.AddSendRtpModule(&rtp_2, false);
+
+ const size_t kPaddingSize = 123;
+ const size_t kExpectedPaddingPackets = 1;
+ EXPECT_CALL(rtp_1, GeneratePadding(_)).Times(0);
+ EXPECT_CALL(rtp_2, GeneratePadding(kPaddingSize))
+ .WillOnce([&](size_t padding_size) {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>(
+ kExpectedPaddingPackets);
+ });
+ auto generated_padding =
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize));
+ EXPECT_EQ(generated_padding.size(), kExpectedPaddingPackets);
+
+ packet_router_.RemoveSendRtpModule(&rtp_1);
+ packet_router_.RemoveSendRtpModule(&rtp_2);
+}
+
+TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) {
+ // Two RTP modules. Neither support RTX, both support padding,
+ // but the first one is for audio and second for video.
+ const uint16_t kSsrc1 = 1234;
+ const uint16_t kSsrc2 = 4567;
+ const size_t kPaddingSize = 123;
+ const size_t kExpectedPaddingPackets = 1;
+
+ auto generate_padding = [&](size_t padding_size) {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>(
+ kExpectedPaddingPackets);
+ };
+
+ NiceMock<MockRtpRtcpInterface> audio_module;
+ ON_CALL(audio_module, RtxSendStatus()).WillByDefault(Return(kRtxOff));
+ ON_CALL(audio_module, SSRC()).WillByDefault(Return(kSsrc1));
+ ON_CALL(audio_module, SupportsPadding).WillByDefault(Return(true));
+ ON_CALL(audio_module, IsAudioConfigured).WillByDefault(Return(true));
+
+ NiceMock<MockRtpRtcpInterface> video_module;
+ ON_CALL(video_module, RtxSendStatus()).WillByDefault(Return(kRtxOff));
+ ON_CALL(video_module, SSRC()).WillByDefault(Return(kSsrc2));
+ ON_CALL(video_module, SupportsPadding).WillByDefault(Return(true));
+ ON_CALL(video_module, IsAudioConfigured).WillByDefault(Return(false));
+
+ // First add only the audio module. Since this is the only choice we have,
+ // padding should be sent on the audio ssrc.
+ packet_router_.AddSendRtpModule(&audio_module, false);
+ EXPECT_CALL(audio_module, GeneratePadding(kPaddingSize))
+ .WillOnce(generate_padding);
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize));
+
+ // Add the video module, this should now be prioritized since we cannot
+ // guarantee that audio packets will be included in the BWE.
+ packet_router_.AddSendRtpModule(&video_module, false);
+ EXPECT_CALL(audio_module, GeneratePadding).Times(0);
+ EXPECT_CALL(video_module, GeneratePadding(kPaddingSize))
+ .WillOnce(generate_padding);
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize));
+
+ // Remove and the add audio module again. Module order shouldn't matter;
+ // video should still be prioritized.
+ packet_router_.RemoveSendRtpModule(&audio_module);
+ packet_router_.AddSendRtpModule(&audio_module, false);
+ EXPECT_CALL(audio_module, GeneratePadding).Times(0);
+ EXPECT_CALL(video_module, GeneratePadding(kPaddingSize))
+ .WillOnce(generate_padding);
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize));
+
+ // Remove and the video module, we should fall back to padding on the
+ // audio module again.
+ packet_router_.RemoveSendRtpModule(&video_module);
+ EXPECT_CALL(audio_module, GeneratePadding(kPaddingSize))
+ .WillOnce(generate_padding);
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize));
+
+ packet_router_.RemoveSendRtpModule(&audio_module);
+}
+
+TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) {
+ const uint16_t kSsrc1 = 1234;
+ const uint16_t kSsrc2 = 4567;
+ const uint16_t kSsrc3 = 8901;
+
+ // First two rtp modules send media and have rtx.
+ NiceMock<MockRtpRtcpInterface> rtp_1;
+ EXPECT_CALL(rtp_1, SSRC()).WillRepeatedly(Return(kSsrc1));
+ EXPECT_CALL(rtp_1, SupportsPadding).WillRepeatedly(Return(true));
+ EXPECT_CALL(rtp_1, SupportsRtxPayloadPadding).WillRepeatedly(Return(true));
+ EXPECT_CALL(rtp_1, TrySendPacket).WillRepeatedly(Return(false));
+ EXPECT_CALL(
+ rtp_1,
+ TrySendPacket(
+ ::testing::Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc1)), _))
+ .WillRepeatedly(Return(true));
+
+ NiceMock<MockRtpRtcpInterface> rtp_2;
+ EXPECT_CALL(rtp_2, SSRC()).WillRepeatedly(Return(kSsrc2));
+ EXPECT_CALL(rtp_2, SupportsPadding).WillRepeatedly(Return(true));
+ EXPECT_CALL(rtp_2, SupportsRtxPayloadPadding).WillRepeatedly(Return(true));
+ EXPECT_CALL(rtp_2, TrySendPacket).WillRepeatedly(Return(false));
+ EXPECT_CALL(
+ rtp_2,
+ TrySendPacket(
+ ::testing::Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc2)), _))
+ .WillRepeatedly(Return(true));
+
+ // Third module is sending media, but does not support rtx.
+ NiceMock<MockRtpRtcpInterface> rtp_3;
+ EXPECT_CALL(rtp_3, SSRC()).WillRepeatedly(Return(kSsrc3));
+ EXPECT_CALL(rtp_3, SupportsPadding).WillRepeatedly(Return(true));
+ EXPECT_CALL(rtp_3, SupportsRtxPayloadPadding).WillRepeatedly(Return(false));
+ EXPECT_CALL(rtp_3, TrySendPacket).WillRepeatedly(Return(false));
+ EXPECT_CALL(
+ rtp_3,
+ TrySendPacket(
+ ::testing::Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc3)), _))
+ .WillRepeatedly(Return(true));
+
+ packet_router_.AddSendRtpModule(&rtp_1, false);
+ packet_router_.AddSendRtpModule(&rtp_2, false);
+ packet_router_.AddSendRtpModule(&rtp_3, false);
+
+ const size_t kPaddingBytes = 100;
+
+ // Initially, padding will be sent on last added rtp module that sends media
+ // and supports rtx.
+ EXPECT_CALL(rtp_2, GeneratePadding(kPaddingBytes))
+ .Times(1)
+ .WillOnce([&](size_t target_size_bytes) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ packets.push_back(BuildRtpPacket(kSsrc2));
+ return packets;
+ });
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingBytes));
+
+ // Send media on first module. Padding should be sent on that module.
+ packet_router_.SendPacket(BuildRtpPacket(kSsrc1), PacedPacketInfo());
+
+ EXPECT_CALL(rtp_1, GeneratePadding(kPaddingBytes))
+ .Times(1)
+ .WillOnce([&](size_t target_size_bytes) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ packets.push_back(BuildRtpPacket(kSsrc1));
+ return packets;
+ });
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingBytes));
+
+ // Send media on second module. Padding should be sent there.
+ packet_router_.SendPacket(BuildRtpPacket(kSsrc2), PacedPacketInfo());
+
+ // If the last active module is removed, and no module sends media before
+ // the next padding request, and arbitrary module will be selected.
+ packet_router_.RemoveSendRtpModule(&rtp_2);
+
+ // Send on and then remove all remaining modules.
+ RtpRtcpInterface* last_send_module;
+ EXPECT_CALL(rtp_1, GeneratePadding(kPaddingBytes))
+ .Times(1)
+ .WillOnce([&](size_t target_size_bytes) {
+ last_send_module = &rtp_1;
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ packets.push_back(BuildRtpPacket(kSsrc1));
+ return packets;
+ });
+ EXPECT_CALL(rtp_3, GeneratePadding(kPaddingBytes))
+ .Times(1)
+ .WillOnce([&](size_t target_size_bytes) {
+ last_send_module = &rtp_3;
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ packets.push_back(BuildRtpPacket(kSsrc3));
+ return packets;
+ });
+
+ for (int i = 0; i < 2; ++i) {
+ last_send_module = nullptr;
+ packet_router_.GeneratePadding(DataSize::Bytes(kPaddingBytes));
+ EXPECT_NE(last_send_module, nullptr);
+ packet_router_.RemoveSendRtpModule(last_send_module);
+ }
+}
+
+TEST_F(PacketRouterTest, AllocatesTransportSequenceNumbers) {
+ const uint16_t kStartSeq = 0xFFF0;
+ const size_t kNumPackets = 32;
+ const uint16_t kSsrc1 = 1234;
+
+ PacketRouter packet_router(kStartSeq - 1);
+ NiceMock<MockRtpRtcpInterface> rtp_1;
+ EXPECT_CALL(rtp_1, SSRC()).WillRepeatedly(Return(kSsrc1));
+ EXPECT_CALL(rtp_1, TrySendPacket).WillRepeatedly(Return(true));
+ packet_router.AddSendRtpModule(&rtp_1, false);
+
+ for (size_t i = 0; i < kNumPackets; ++i) {
+ auto packet = BuildRtpPacket(kSsrc1);
+ EXPECT_TRUE(packet->ReserveExtension<TransportSequenceNumber>());
+ packet_router.SendPacket(std::move(packet), PacedPacketInfo());
+ uint32_t expected_unwrapped_seq = static_cast<uint32_t>(kStartSeq) + i;
+ EXPECT_EQ(static_cast<uint16_t>(expected_unwrapped_seq & 0xFFFF),
+ packet_router.CurrentTransportSequenceNumber());
+ }
+
+ packet_router.RemoveSendRtpModule(&rtp_1);
+}
+
+TEST_F(PacketRouterTest, SendTransportFeedback) {
+ NiceMock<MockRtpRtcpInterface> rtp_1;
+ NiceMock<MockRtpRtcpInterface> rtp_2;
+
+ ON_CALL(rtp_1, RTCP()).WillByDefault(Return(RtcpMode::kCompound));
+ ON_CALL(rtp_2, RTCP()).WillByDefault(Return(RtcpMode::kCompound));
+
+ packet_router_.AddSendRtpModule(&rtp_1, false);
+ packet_router_.AddReceiveRtpModule(&rtp_2, false);
+
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback;
+ feedback.push_back(std::make_unique<rtcp::TransportFeedback>());
+ EXPECT_CALL(rtp_1, SendCombinedRtcpPacket);
+ packet_router_.SendCombinedRtcpPacket(std::move(feedback));
+ packet_router_.RemoveSendRtpModule(&rtp_1);
+ EXPECT_CALL(rtp_2, SendCombinedRtcpPacket);
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> new_feedback;
+ new_feedback.push_back(std::make_unique<rtcp::TransportFeedback>());
+ packet_router_.SendCombinedRtcpPacket(std::move(new_feedback));
+ packet_router_.RemoveReceiveRtpModule(&rtp_2);
+}
+
+TEST_F(PacketRouterTest, SendPacketWithoutTransportSequenceNumbers) {
+ const uint16_t kSsrc1 = 1234;
+ NiceMock<MockRtpRtcpInterface> rtp_1;
+ ON_CALL(rtp_1, SendingMedia).WillByDefault(Return(true));
+ ON_CALL(rtp_1, SSRC).WillByDefault(Return(kSsrc1));
+ packet_router_.AddSendRtpModule(&rtp_1, false);
+
+ // Send a packet without TransportSequenceNumber extension registered,
+ // packets sent should not have the extension set.
+ RtpHeaderExtensionMap extension_manager;
+ auto packet = std::make_unique<RtpPacketToSend>(&extension_manager);
+ packet->SetSsrc(kSsrc1);
+ EXPECT_CALL(
+ rtp_1,
+ TrySendPacket(
+ Property(&RtpPacketToSend::HasExtension<TransportSequenceNumber>,
+ false),
+ _))
+ .WillOnce(Return(true));
+ packet_router_.SendPacket(std::move(packet), PacedPacketInfo());
+
+ packet_router_.RemoveSendRtpModule(&rtp_1);
+}
+
+TEST_F(PacketRouterTest, SendPacketAssignsTransportSequenceNumbers) {
+ NiceMock<MockRtpRtcpInterface> rtp_1;
+ NiceMock<MockRtpRtcpInterface> rtp_2;
+
+ const uint16_t kSsrc1 = 1234;
+ const uint16_t kSsrc2 = 2345;
+
+ ON_CALL(rtp_1, SSRC).WillByDefault(Return(kSsrc1));
+ ON_CALL(rtp_2, SSRC).WillByDefault(Return(kSsrc2));
+
+ packet_router_.AddSendRtpModule(&rtp_1, false);
+ packet_router_.AddSendRtpModule(&rtp_2, false);
+
+ // Transport sequence numbers start at 1, for historical reasons.
+ uint16_t transport_sequence_number = 1;
+
+ auto packet = BuildRtpPacket(kSsrc1);
+ EXPECT_TRUE(packet->ReserveExtension<TransportSequenceNumber>());
+ EXPECT_CALL(
+ rtp_1,
+ TrySendPacket(
+ Property(&RtpPacketToSend::GetExtension<TransportSequenceNumber>,
+ transport_sequence_number),
+ _))
+ .WillOnce(Return(true));
+ packet_router_.SendPacket(std::move(packet), PacedPacketInfo());
+
+ ++transport_sequence_number;
+ packet = BuildRtpPacket(kSsrc2);
+ EXPECT_TRUE(packet->ReserveExtension<TransportSequenceNumber>());
+
+ EXPECT_CALL(
+ rtp_2,
+ TrySendPacket(
+ Property(&RtpPacketToSend::GetExtension<TransportSequenceNumber>,
+ transport_sequence_number),
+ _))
+ .WillOnce(Return(true));
+ packet_router_.SendPacket(std::move(packet), PacedPacketInfo());
+
+ packet_router_.RemoveSendRtpModule(&rtp_1);
+ packet_router_.RemoveSendRtpModule(&rtp_2);
+}
+
+TEST_F(PacketRouterTest, DoesNotIncrementTransportSequenceNumberOnSendFailure) {
+ NiceMock<MockRtpRtcpInterface> rtp;
+ constexpr uint32_t kSsrc = 1234;
+ ON_CALL(rtp, SSRC).WillByDefault(Return(kSsrc));
+ packet_router_.AddSendRtpModule(&rtp, false);
+
+ // Transport sequence numbers start at 1, for historical reasons.
+ const uint16_t kStartTransportSequenceNumber = 1;
+
+ // Build and send a packet - it should be assigned the start sequence number.
+ // Return failure status code to make sure sequence number is not incremented.
+ auto packet = BuildRtpPacket(kSsrc);
+ EXPECT_TRUE(packet->ReserveExtension<TransportSequenceNumber>());
+ EXPECT_CALL(
+ rtp, TrySendPacket(
+ Property(&RtpPacketToSend::GetExtension<TransportSequenceNumber>,
+ kStartTransportSequenceNumber),
+ _))
+ .WillOnce(Return(false));
+ packet_router_.SendPacket(std::move(packet), PacedPacketInfo());
+
+ // Send another packet, verify transport sequence number is still at the
+ // start state.
+ packet = BuildRtpPacket(kSsrc);
+ EXPECT_TRUE(packet->ReserveExtension<TransportSequenceNumber>());
+
+ EXPECT_CALL(
+ rtp, TrySendPacket(
+ Property(&RtpPacketToSend::GetExtension<TransportSequenceNumber>,
+ kStartTransportSequenceNumber),
+ _))
+ .WillOnce(Return(true));
+ packet_router_.SendPacket(std::move(packet), PacedPacketInfo());
+
+ packet_router_.RemoveSendRtpModule(&rtp);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+using PacketRouterDeathTest = PacketRouterTest;
+TEST_F(PacketRouterDeathTest, DoubleRegistrationOfSendModuleDisallowed) {
+ NiceMock<MockRtpRtcpInterface> module;
+
+ constexpr bool remb_candidate = false; // Value irrelevant.
+ packet_router_.AddSendRtpModule(&module, remb_candidate);
+ EXPECT_DEATH(packet_router_.AddSendRtpModule(&module, remb_candidate), "");
+
+ // Test tear-down
+ packet_router_.RemoveSendRtpModule(&module);
+}
+
+TEST_F(PacketRouterDeathTest, DoubleRegistrationOfReceiveModuleDisallowed) {
+ NiceMock<MockRtpRtcpInterface> module;
+
+ constexpr bool remb_candidate = false; // Value irrelevant.
+ packet_router_.AddReceiveRtpModule(&module, remb_candidate);
+ EXPECT_DEATH(packet_router_.AddReceiveRtpModule(&module, remb_candidate), "");
+
+ // Test tear-down
+ packet_router_.RemoveReceiveRtpModule(&module);
+}
+
+TEST_F(PacketRouterDeathTest, RemovalOfNeverAddedSendModuleDisallowed) {
+ NiceMock<MockRtpRtcpInterface> module;
+
+ EXPECT_DEATH(packet_router_.RemoveSendRtpModule(&module), "");
+}
+
+TEST_F(PacketRouterDeathTest, RemovalOfNeverAddedReceiveModuleDisallowed) {
+ NiceMock<MockRtpRtcpInterface> module;
+
+ EXPECT_DEATH(packet_router_.RemoveReceiveRtpModule(&module), "");
+}
+#endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(PacketRouterRembTest, ChangeSendRtpModuleChangeRembSender) {
+ rtc::ScopedFakeClock clock;
+ NiceMock<MockRtpRtcpInterface> rtp_send;
+ NiceMock<MockRtpRtcpInterface> rtp_recv;
+ PacketRouter packet_router;
+ packet_router.AddSendRtpModule(&rtp_send, true);
+ packet_router.AddReceiveRtpModule(&rtp_recv, true);
+
+ uint32_t bitrate_estimate = 456;
+ std::vector<uint32_t> ssrcs = {1234, 5678};
+
+ EXPECT_CALL(rtp_send, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Remove the sending module -> should get remb on the second module.
+ packet_router.RemoveSendRtpModule(&rtp_send);
+
+ EXPECT_CALL(rtp_recv, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ packet_router.RemoveReceiveRtpModule(&rtp_recv);
+}
+
+// Only register receiving modules and make sure we fallback to trigger a REMB
+// packet on this one.
+TEST(PacketRouterRembTest, NoSendingRtpModule) {
+ rtc::ScopedFakeClock clock;
+ NiceMock<MockRtpRtcpInterface> rtp;
+ PacketRouter packet_router;
+
+ packet_router.AddReceiveRtpModule(&rtp, true);
+
+ uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+
+ EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Lower the estimate to trigger a new packet REMB packet.
+ EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ EXPECT_CALL(rtp, UnsetRemb());
+ packet_router.RemoveReceiveRtpModule(&rtp);
+}
+
+TEST(PacketRouterRembTest, NonCandidateSendRtpModuleNotUsedForRemb) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> module;
+
+ constexpr bool remb_candidate = false;
+
+ packet_router.AddSendRtpModule(&module, remb_candidate);
+
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(module, SetRemb(_, _)).Times(0);
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveSendRtpModule(&module);
+}
+
+TEST(PacketRouterRembTest, CandidateSendRtpModuleUsedForRemb) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> module;
+
+ constexpr bool remb_candidate = true;
+
+ packet_router.AddSendRtpModule(&module, remb_candidate);
+
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(module, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveSendRtpModule(&module);
+}
+
+TEST(PacketRouterRembTest, NonCandidateReceiveRtpModuleNotUsedForRemb) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> module;
+
+ constexpr bool remb_candidate = false;
+
+ packet_router.AddReceiveRtpModule(&module, remb_candidate);
+
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(module, SetRemb(_, _)).Times(0);
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveReceiveRtpModule(&module);
+}
+
+TEST(PacketRouterRembTest, CandidateReceiveRtpModuleUsedForRemb) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> module;
+
+ constexpr bool remb_candidate = true;
+
+ packet_router.AddReceiveRtpModule(&module, remb_candidate);
+
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(module, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveReceiveRtpModule(&module);
+}
+
+TEST(PacketRouterRembTest,
+ SendCandidatePreferredOverReceiveCandidate_SendModuleAddedFirst) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> send_module;
+ NiceMock<MockRtpRtcpInterface> receive_module;
+
+ constexpr bool remb_candidate = true;
+
+ // Send module added - activated.
+ packet_router.AddSendRtpModule(&send_module, remb_candidate);
+
+ // Receive module added - the send module remains the active one.
+ packet_router.AddReceiveRtpModule(&receive_module, remb_candidate);
+
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(send_module, SetRemb(bitrate_estimate, ssrcs));
+ EXPECT_CALL(receive_module, SetRemb(_, _)).Times(0);
+
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveReceiveRtpModule(&receive_module);
+ packet_router.RemoveSendRtpModule(&send_module);
+}
+
+TEST(PacketRouterRembTest,
+ SendCandidatePreferredOverReceiveCandidate_ReceiveModuleAddedFirst) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> send_module;
+ NiceMock<MockRtpRtcpInterface> receive_module;
+
+ constexpr bool remb_candidate = true;
+
+ // Receive module added - activated.
+ packet_router.AddReceiveRtpModule(&receive_module, remb_candidate);
+
+ // Send module added - replaces receive module as active.
+ packet_router.AddSendRtpModule(&send_module, remb_candidate);
+
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(send_module, SetRemb(bitrate_estimate, ssrcs));
+ EXPECT_CALL(receive_module, SetRemb(_, _)).Times(0);
+
+ clock.AdvanceTime(TimeDelta::Millis(1000));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveReceiveRtpModule(&receive_module);
+ packet_router.RemoveSendRtpModule(&send_module);
+}
+
+TEST(PacketRouterRembTest, ReceiveModuleTakesOverWhenLastSendModuleRemoved) {
+ rtc::ScopedFakeClock clock;
+ PacketRouter packet_router;
+ NiceMock<MockRtpRtcpInterface> send_module;
+ NiceMock<MockRtpRtcpInterface> receive_module;
+
+ constexpr bool remb_candidate = true;
+
+ // Send module active, receive module inactive.
+ packet_router.AddSendRtpModule(&send_module, remb_candidate);
+ packet_router.AddReceiveRtpModule(&receive_module, remb_candidate);
+
+ // Send module removed - receive module becomes active.
+ packet_router.RemoveSendRtpModule(&send_module);
+ constexpr uint32_t bitrate_estimate = 456;
+ const std::vector<uint32_t> ssrcs = {1234};
+ EXPECT_CALL(send_module, SetRemb(_, _)).Times(0);
+ EXPECT_CALL(receive_module, SetRemb(bitrate_estimate, ssrcs));
+ packet_router.SendRemb(bitrate_estimate, ssrcs);
+
+ // Test tear-down
+ packet_router.RemoveReceiveRtpModule(&receive_module);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.cc b/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.cc
new file mode 100644
index 0000000000..83ec77da28
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/prioritized_packet_queue.h"
+
+#include <utility>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kAudioPrioLevel = 0;
+
+int GetPriorityForType(RtpPacketMediaType type) {
+ // Lower number takes priority over higher.
+ switch (type) {
+ case RtpPacketMediaType::kAudio:
+ // Audio is always prioritized over other packet types.
+ return kAudioPrioLevel;
+ case RtpPacketMediaType::kRetransmission:
+ // Send retransmissions before new media.
+ return kAudioPrioLevel + 1;
+ case RtpPacketMediaType::kVideo:
+ case RtpPacketMediaType::kForwardErrorCorrection:
+ // Video has "normal" priority, in the old speak.
+ // Send redundancy concurrently to video. If it is delayed it might have a
+ // lower chance of being useful.
+ return kAudioPrioLevel + 2;
+ case RtpPacketMediaType::kPadding:
+ // Packets that are in themselves likely useless, only sent to keep the
+ // BWE high.
+ return kAudioPrioLevel + 3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace
+
+DataSize PrioritizedPacketQueue::QueuedPacket::PacketSize() const {
+ return DataSize::Bytes(packet->payload_size() + packet->padding_size());
+}
+
+PrioritizedPacketQueue::StreamQueue::StreamQueue(Timestamp creation_time)
+ : last_enqueue_time_(creation_time) {}
+
+bool PrioritizedPacketQueue::StreamQueue::EnqueuePacket(QueuedPacket packet,
+ int priority_level) {
+ bool first_packet_at_level = packets_[priority_level].empty();
+ packets_[priority_level].push_back(std::move(packet));
+ return first_packet_at_level;
+}
+
+PrioritizedPacketQueue::QueuedPacket
+PrioritizedPacketQueue::StreamQueue::DequePacket(int priority_level) {
+ RTC_DCHECK(!packets_[priority_level].empty());
+ QueuedPacket packet = std::move(packets_[priority_level].front());
+ packets_[priority_level].pop_front();
+ return packet;
+}
+
+bool PrioritizedPacketQueue::StreamQueue::HasPacketsAtPrio(
+ int priority_level) const {
+ return !packets_[priority_level].empty();
+}
+
+bool PrioritizedPacketQueue::StreamQueue::IsEmpty() const {
+ for (const std::deque<QueuedPacket>& queue : packets_) {
+ if (!queue.empty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+Timestamp PrioritizedPacketQueue::StreamQueue::LeadingAudioPacketEnqueueTime()
+ const {
+ RTC_DCHECK(!packets_[kAudioPrioLevel].empty());
+ return packets_[kAudioPrioLevel].begin()->enqueue_time;
+}
+
+Timestamp PrioritizedPacketQueue::StreamQueue::LastEnqueueTime() const {
+ return last_enqueue_time_;
+}
+
+PrioritizedPacketQueue::PrioritizedPacketQueue(Timestamp creation_time)
+ : queue_time_sum_(TimeDelta::Zero()),
+ pause_time_sum_(TimeDelta::Zero()),
+ size_packets_(0),
+ size_packets_per_media_type_({}),
+ size_payload_(DataSize::Zero()),
+ last_update_time_(creation_time),
+ paused_(false),
+ last_culling_time_(creation_time),
+ top_active_prio_level_(-1) {}
+
+void PrioritizedPacketQueue::Push(Timestamp enqueue_time,
+ std::unique_ptr<RtpPacketToSend> packet) {
+ StreamQueue* stream_queue;
+ auto [it, inserted] = streams_.emplace(packet->Ssrc(), nullptr);
+ if (inserted) {
+ it->second = std::make_unique<StreamQueue>(enqueue_time);
+ }
+ stream_queue = it->second.get();
+
+ auto enqueue_time_iterator =
+ enqueue_times_.insert(enqueue_times_.end(), enqueue_time);
+ RTC_DCHECK(packet->packet_type().has_value());
+ RtpPacketMediaType packet_type = packet->packet_type().value();
+ int prio_level = GetPriorityForType(packet_type);
+ RTC_DCHECK_GE(prio_level, 0);
+ RTC_DCHECK_LT(prio_level, kNumPriorityLevels);
+ QueuedPacket queued_packed = {.packet = std::move(packet),
+ .enqueue_time = enqueue_time,
+ .enqueue_time_iterator = enqueue_time_iterator};
+ // In order to figure out how much time a packet has spent in the queue
+ // while not in a paused state, we subtract the total amount of time the
+ // queue has been paused so far, and when the packet is popped we subtract
+ // the total amount of time the queue has been paused at that moment. This
+ // way we subtract the total amount of time the packet has spent in the
+ // queue while in a paused state.
+ UpdateAverageQueueTime(enqueue_time);
+ queued_packed.enqueue_time -= pause_time_sum_;
+ ++size_packets_;
+ ++size_packets_per_media_type_[static_cast<size_t>(packet_type)];
+ size_payload_ += queued_packed.PacketSize();
+
+ if (stream_queue->EnqueuePacket(std::move(queued_packed), prio_level)) {
+ // Number packets at `prio_level` for this steam is now non-zero.
+ streams_by_prio_[prio_level].push_back(stream_queue);
+ }
+ if (top_active_prio_level_ < 0 || prio_level < top_active_prio_level_) {
+ top_active_prio_level_ = prio_level;
+ }
+
+ static constexpr TimeDelta kTimeout = TimeDelta::Millis(500);
+ if (enqueue_time - last_culling_time_ > kTimeout) {
+ for (auto it = streams_.begin(); it != streams_.end();) {
+ if (it->second->IsEmpty() &&
+ it->second->LastEnqueueTime() + kTimeout < enqueue_time) {
+ streams_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ last_culling_time_ = enqueue_time;
+ }
+}
+
+std::unique_ptr<RtpPacketToSend> PrioritizedPacketQueue::Pop() {
+ if (size_packets_ == 0) {
+ return nullptr;
+ }
+
+ RTC_DCHECK_GE(top_active_prio_level_, 0);
+ StreamQueue& stream_queue = *streams_by_prio_[top_active_prio_level_].front();
+ QueuedPacket packet = stream_queue.DequePacket(top_active_prio_level_);
+ --size_packets_;
+ RTC_DCHECK(packet.packet->packet_type().has_value());
+ RtpPacketMediaType packet_type = packet.packet->packet_type().value();
+ --size_packets_per_media_type_[static_cast<size_t>(packet_type)];
+ RTC_DCHECK_GE(size_packets_per_media_type_[static_cast<size_t>(packet_type)],
+ 0);
+ size_payload_ -= packet.PacketSize();
+
+ // Calculate the total amount of time spent by this packet in the queue
+ // while in a non-paused state. Note that the `pause_time_sum_ms_` was
+ // subtracted from `packet.enqueue_time_ms` when the packet was pushed, and
+ // by subtracting it now we effectively remove the time spent in in the
+ // queue while in a paused state.
+ TimeDelta time_in_non_paused_state =
+ last_update_time_ - packet.enqueue_time - pause_time_sum_;
+ queue_time_sum_ -= time_in_non_paused_state;
+
+ RTC_DCHECK(size_packets_ > 0 || queue_time_sum_ == TimeDelta::Zero());
+
+ RTC_CHECK(packet.enqueue_time_iterator != enqueue_times_.end());
+ enqueue_times_.erase(packet.enqueue_time_iterator);
+
+ // Remove StreamQueue from head of fifo-queue for this prio level, and
+ // and add it to the end if it still has packets.
+ streams_by_prio_[top_active_prio_level_].pop_front();
+ if (stream_queue.HasPacketsAtPrio(top_active_prio_level_)) {
+ streams_by_prio_[top_active_prio_level_].push_back(&stream_queue);
+ } else if (streams_by_prio_[top_active_prio_level_].empty()) {
+ // No stream queues have packets at this prio level, find top priority
+ // that is not empty.
+ if (size_packets_ == 0) {
+ top_active_prio_level_ = -1;
+ } else {
+ for (int i = 0; i < kNumPriorityLevels; ++i) {
+ if (!streams_by_prio_[i].empty()) {
+ top_active_prio_level_ = i;
+ break;
+ }
+ }
+ }
+ }
+
+ return std::move(packet.packet);
+}
+
+int PrioritizedPacketQueue::SizeInPackets() const {
+ return size_packets_;
+}
+
+DataSize PrioritizedPacketQueue::SizeInPayloadBytes() const {
+ return size_payload_;
+}
+
+const std::array<int, kNumMediaTypes>&
+PrioritizedPacketQueue::SizeInPacketsPerRtpPacketMediaType() const {
+ return size_packets_per_media_type_;
+}
+
+Timestamp PrioritizedPacketQueue::LeadingAudioPacketEnqueueTime() const {
+ if (streams_by_prio_[kAudioPrioLevel].empty()) {
+ return Timestamp::MinusInfinity();
+ }
+ return streams_by_prio_[kAudioPrioLevel]
+ .front()
+ ->LeadingAudioPacketEnqueueTime();
+}
+
+Timestamp PrioritizedPacketQueue::OldestEnqueueTime() const {
+ return enqueue_times_.empty() ? Timestamp::MinusInfinity()
+ : enqueue_times_.front();
+}
+
+TimeDelta PrioritizedPacketQueue::AverageQueueTime() const {
+ if (size_packets_ == 0) {
+ return TimeDelta::Zero();
+ }
+ return queue_time_sum_ / size_packets_;
+}
+
+void PrioritizedPacketQueue::UpdateAverageQueueTime(Timestamp now) {
+ RTC_CHECK_GE(now, last_update_time_);
+ if (now == last_update_time_) {
+ return;
+ }
+
+ TimeDelta delta = now - last_update_time_;
+
+ if (paused_) {
+ pause_time_sum_ += delta;
+ } else {
+ queue_time_sum_ += delta * size_packets_;
+ }
+
+ last_update_time_ = now;
+}
+
+void PrioritizedPacketQueue::SetPauseState(bool paused, Timestamp now) {
+ UpdateAverageQueueTime(now);
+ paused_ = paused;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.h b/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.h
new file mode 100644
index 0000000000..c770435aa1
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/prioritized_packet_queue.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_PRIORITIZED_PACKET_QUEUE_H_
+#define MODULES_PACING_PRIORITIZED_PACKET_QUEUE_H_
+
+#include <stddef.h>
+
+#include <deque>
+#include <list>
+#include <memory>
+#include <unordered_map>
+
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/pacing/pacing_controller.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+
+namespace webrtc {
+
+class PrioritizedPacketQueue : public PacingController::PacketQueue {
+ public:
+ explicit PrioritizedPacketQueue(Timestamp creation_time);
+ PrioritizedPacketQueue(const PrioritizedPacketQueue&) = delete;
+ PrioritizedPacketQueue& operator=(const PrioritizedPacketQueue&) = delete;
+
+ void Push(Timestamp enqueue_time,
+ std::unique_ptr<RtpPacketToSend> packet) override;
+ std::unique_ptr<RtpPacketToSend> Pop() override;
+ int SizeInPackets() const override;
+ DataSize SizeInPayloadBytes() const override;
+ const std::array<int, kNumMediaTypes>& SizeInPacketsPerRtpPacketMediaType()
+ const override;
+ Timestamp LeadingAudioPacketEnqueueTime() const override;
+ Timestamp OldestEnqueueTime() const override;
+ TimeDelta AverageQueueTime() const override;
+ void UpdateAverageQueueTime(Timestamp now) override;
+ void SetPauseState(bool paused, Timestamp now) override;
+
+ private:
+ static constexpr int kNumPriorityLevels = 4;
+
+ class QueuedPacket {
+ public:
+ DataSize PacketSize() const;
+
+ std::unique_ptr<RtpPacketToSend> packet;
+ Timestamp enqueue_time;
+ std::list<Timestamp>::iterator enqueue_time_iterator;
+ };
+
+ // Class containing packets for an RTP stream.
+ // For each priority level, packets are simply stored in a fifo queue.
+ class StreamQueue {
+ public:
+ explicit StreamQueue(Timestamp creation_time);
+ StreamQueue(StreamQueue&&) = default;
+ StreamQueue& operator=(StreamQueue&&) = default;
+
+ StreamQueue(const StreamQueue&) = delete;
+ StreamQueue& operator=(const StreamQueue&) = delete;
+
+ // Enqueue packet at the given priority level. Returns true if the packet
+ // count for that priority level went from zero to non-zero.
+ bool EnqueuePacket(QueuedPacket packet, int priority_level);
+
+ QueuedPacket DequePacket(int priority_level);
+
+ bool HasPacketsAtPrio(int priority_level) const;
+ bool IsEmpty() const;
+ Timestamp LeadingAudioPacketEnqueueTime() const;
+ Timestamp LastEnqueueTime() const;
+
+ private:
+ std::deque<QueuedPacket> packets_[kNumPriorityLevels];
+ Timestamp last_enqueue_time_;
+ };
+
+ // Cumulative sum, over all packets, of time spent in the queue.
+ TimeDelta queue_time_sum_;
+ // Cumulative sum of time the queue has spent in a paused state.
+ TimeDelta pause_time_sum_;
+ // Total number of packets stored in this queue.
+ int size_packets_;
+ // Total number of packets stored in this queue per RtpPacketMediaType.
+ std::array<int, kNumMediaTypes> size_packets_per_media_type_;
+ // Sum of payload sizes for all packts stored in this queue.
+ DataSize size_payload_;
+ // The last time queue/pause time sums were updated.
+ Timestamp last_update_time_;
+ bool paused_;
+
+ // Last time `streams_` was culled for inactive streams.
+ Timestamp last_culling_time_;
+
+ // Map from SSRC to packet queues for the associated RTP stream.
+ std::unordered_map<uint32_t, std::unique_ptr<StreamQueue>> streams_;
+
+ // For each priority level, a queue of StreamQueues which have at least one
+ // packet pending for that prio level.
+ std::deque<StreamQueue*> streams_by_prio_[kNumPriorityLevels];
+
+ // The first index into `stream_by_prio_` that is non-empty.
+ int top_active_prio_level_;
+
+ // Ordered list of enqueue times. Additions are always increasing and added to
+ // the end. QueuedPacket instances have a iterators into this list for fast
+ // removal.
+ std::list<Timestamp> enqueue_times_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_PACING_PRIORITIZED_PACKET_QUEUE_H_
diff --git a/third_party/libwebrtc/modules/pacing/prioritized_packet_queue_unittest.cc b/third_party/libwebrtc/modules/pacing/prioritized_packet_queue_unittest.cc
new file mode 100644
index 0000000000..6e27ff018d
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/prioritized_packet_queue_unittest.cc
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/prioritized_packet_queue.h"
+
+#include <utility>
+
+#include "api/units/time_delta.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr uint32_t kDefaultSsrc = 123;
+constexpr int kDefaultPayloadSize = 789;
+
+std::unique_ptr<RtpPacketToSend> CreatePacket(RtpPacketMediaType type,
+ uint16_t sequence_number,
+ uint32_t ssrc = kDefaultSsrc) {
+ auto packet = std::make_unique<RtpPacketToSend>(/*extensions=*/nullptr);
+ packet->set_packet_type(type);
+ packet->SetSsrc(ssrc);
+ packet->SetSequenceNumber(sequence_number);
+ packet->SetPayloadSize(kDefaultPayloadSize);
+ return packet;
+}
+
+} // namespace
+
+TEST(PrioritizedPacketQueue, ReturnsPacketsInPrioritizedOrder) {
+ Timestamp now = Timestamp::Zero();
+ PrioritizedPacketQueue queue(now);
+
+ // Add packets in low to high packet order.
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kPadding, /*seq=*/1));
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/2));
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kForwardErrorCorrection,
+ /*seq=*/3));
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kRetransmission, /*seq=*/4));
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kAudio, /*seq=*/5));
+
+ // Packets should be returned in high to low order.
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 5);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 4);
+ // Video and FEC prioritized equally - but video was enqueued first.
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 2);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 3);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 1);
+}
+
+TEST(PrioritizedPacketQueue, ReturnsEqualPrioPacketsInRoundRobinOrder) {
+ Timestamp now = Timestamp::Zero();
+ PrioritizedPacketQueue queue(now);
+
+ // Insert video packets (prioritized equally), simulating a simulcast-type use
+ // case.
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/1, /*ssrc=*/100));
+
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/2, /*ssrc=*/101));
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/3, /*ssrc=*/101));
+
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/4, /*ssrc=*/102));
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/5, /*ssrc=*/102));
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/6, /*ssrc=*/102));
+ queue.Push(now,
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/7, /*ssrc=*/102));
+
+ // First packet from each SSRC.
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 1);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 2);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 4);
+
+ // Second packets from streams that have packets left.
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 3);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 5);
+
+ // Only packets from last stream remaining.
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 6);
+ EXPECT_EQ(queue.Pop()->SequenceNumber(), 7);
+}
+
+TEST(PrioritizedPacketQueue, ReportsSizeInPackets) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.SizeInPackets(), 0);
+
+ queue.Push(/*enqueue_time=*/Timestamp::Zero(),
+ CreatePacket(RtpPacketMediaType::kVideo,
+ /*seq_no=*/1));
+ EXPECT_EQ(queue.SizeInPackets(), 1);
+
+ queue.Pop();
+ EXPECT_EQ(queue.SizeInPackets(), 0);
+}
+
+TEST(PrioritizedPacketQueue, ReportsPayloadSize) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.SizeInPayloadBytes(), DataSize::Zero());
+
+ queue.Push(/*enqueue_time=*/Timestamp::Zero(),
+ CreatePacket(RtpPacketMediaType::kVideo,
+ /*seq_no=*/1));
+ EXPECT_EQ(queue.SizeInPayloadBytes(), DataSize::Bytes(kDefaultPayloadSize));
+
+ queue.Pop();
+ EXPECT_EQ(queue.SizeInPayloadBytes(), DataSize::Zero());
+}
+
+TEST(PrioritizedPacketQueue, ReportsPaddingSize) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.SizeInPayloadBytes(), DataSize::Zero());
+ static constexpr DataSize kPaddingSize = DataSize::Bytes(190);
+
+ auto packet = std::make_unique<RtpPacketToSend>(/*extensions=*/nullptr);
+ packet->set_packet_type(RtpPacketMediaType::kPadding);
+ packet->SetSsrc(kDefaultSsrc);
+ packet->SetSequenceNumber(/*seq=*/1);
+ packet->SetPadding(kPaddingSize.bytes());
+ queue.Push(/*enqueue_time=*/Timestamp::Zero(), std::move(packet));
+ EXPECT_EQ(queue.SizeInPayloadBytes(), kPaddingSize);
+
+ queue.Pop();
+ EXPECT_EQ(queue.SizeInPayloadBytes(), DataSize::Zero());
+}
+
+TEST(PrioritizedPacketQueue, ReportsOldestEnqueueTime) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.OldestEnqueueTime(), Timestamp::MinusInfinity());
+
+ // Add three packets, with the middle packet having higher prio.
+ queue.Push(Timestamp::Millis(10),
+ CreatePacket(RtpPacketMediaType::kPadding, /*seq=*/1));
+ queue.Push(Timestamp::Millis(20),
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/2));
+ queue.Push(Timestamp::Millis(30),
+ CreatePacket(RtpPacketMediaType::kPadding, /*seq=*/3));
+ EXPECT_EQ(queue.OldestEnqueueTime(), Timestamp::Millis(10));
+
+ queue.Pop(); // Pop packet with enqueue time 20.
+ EXPECT_EQ(queue.OldestEnqueueTime(), Timestamp::Millis(10));
+
+ queue.Pop(); // Pop packet with enqueue time 10.
+ EXPECT_EQ(queue.OldestEnqueueTime(), Timestamp::Millis(30));
+
+ queue.Pop(); // Pop packet with enqueue time 30, queue empty again.
+ EXPECT_EQ(queue.OldestEnqueueTime(), Timestamp::MinusInfinity());
+}
+
+TEST(PrioritizedPacketQueue, ReportsAverageQueueTime) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Zero());
+
+ // Add three packets, with the middle packet having higher prio.
+ queue.Push(Timestamp::Millis(10),
+ CreatePacket(RtpPacketMediaType::kPadding, /*seq=*/1));
+ queue.Push(Timestamp::Millis(20),
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/2));
+ queue.Push(Timestamp::Millis(30),
+ CreatePacket(RtpPacketMediaType::kPadding, /*seq=*/3));
+
+ queue.UpdateAverageQueueTime(Timestamp::Millis(40));
+ // Packets have waited 30, 20, 10 ms -> average = 20ms.
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(20));
+
+ queue.Pop(); // Pop packet with enqueue time 20.
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(20));
+
+ queue.Pop(); // Pop packet with enqueue time 10.
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(10));
+
+ queue.Pop(); // Pop packet with enqueue time 30, queue empty again.
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Zero());
+}
+
+TEST(PrioritizedPacketQueue, SubtractsPusedTimeFromAverageQueueTime) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Zero());
+
+ // Add a packet and then enable paused state.
+ queue.Push(Timestamp::Millis(100),
+ CreatePacket(RtpPacketMediaType::kPadding, /*seq=*/1));
+ queue.SetPauseState(true, Timestamp::Millis(600));
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(500));
+
+ // Enqueue a packet 500ms into the paused state. Queue time of
+ // original packet is still seen as 500ms and new one has 0ms giving
+ // an average of 250ms.
+ queue.Push(Timestamp::Millis(1100),
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/2));
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(250));
+
+ // Unpause some time later, queue time still unchanged.
+ queue.SetPauseState(false, Timestamp::Millis(1600));
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(250));
+
+ // Update queue time 500ms after pause state ended.
+ queue.UpdateAverageQueueTime(Timestamp::Millis(2100));
+ EXPECT_EQ(queue.AverageQueueTime(), TimeDelta::Millis(750));
+}
+
+TEST(PrioritizedPacketQueue, ReportsLeadingAudioEnqueueTime) {
+ PrioritizedPacketQueue queue(/*creation_time=*/Timestamp::Zero());
+ EXPECT_EQ(queue.LeadingAudioPacketEnqueueTime(), Timestamp::MinusInfinity());
+
+ queue.Push(Timestamp::Millis(10),
+ CreatePacket(RtpPacketMediaType::kVideo, /*seq=*/1));
+ EXPECT_EQ(queue.LeadingAudioPacketEnqueueTime(), Timestamp::MinusInfinity());
+
+ queue.Push(Timestamp::Millis(20),
+ CreatePacket(RtpPacketMediaType::kAudio, /*seq=*/2));
+
+ EXPECT_EQ(queue.LeadingAudioPacketEnqueueTime(), Timestamp::Millis(20));
+
+ queue.Pop(); // Pop audio packet.
+ EXPECT_EQ(queue.LeadingAudioPacketEnqueueTime(), Timestamp::MinusInfinity());
+}
+
+TEST(PrioritizedPacketQueue,
+ PushAndPopUpdatesSizeInPacketsPerRtpPacketMediaType) {
+ Timestamp now = Timestamp::Zero();
+ PrioritizedPacketQueue queue(now);
+
+ // Initially all sizes are zero.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[i], 0);
+ }
+
+ // Push packets.
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kAudio, 1));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kAudio)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kVideo, 2));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kVideo)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kRetransmission, 3));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kRetransmission)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kForwardErrorCorrection, 4));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kForwardErrorCorrection)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kPadding, 5));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kPadding)],
+ 1);
+
+ // Now all sizes are 1.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[i], 1);
+ }
+
+ // Popping happens in a priority order based on media type. This test does not
+ // assert what this order is, only that the counter for the popped packet's
+ // media type is decremented.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ auto popped_packet = queue.Pop();
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ popped_packet->packet_type().value())],
+ 0);
+ }
+
+ // We've popped all packets, so all sizes are zero.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[i], 0);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.cc b/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.cc
new file mode 100644
index 0000000000..d7525e9d5a
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.cc
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/round_robin_packet_queue.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+static constexpr DataSize kMaxLeadingSize = DataSize::Bytes(1400);
+
+int GetPriorityForType(RtpPacketMediaType type) {
+ // Lower number takes priority over higher.
+ switch (type) {
+ case RtpPacketMediaType::kAudio:
+ // Audio is always prioritized over other packet types.
+ return 0;
+ case RtpPacketMediaType::kRetransmission:
+ // Send retransmissions before new media.
+ return 1;
+ case RtpPacketMediaType::kVideo:
+ case RtpPacketMediaType::kForwardErrorCorrection:
+ // Video has "normal" priority, in the old speak.
+ // Send redundancy concurrently to video. If it is delayed it might have a
+ // lower chance of being useful.
+ return 2;
+ case RtpPacketMediaType::kPadding:
+ // Packets that are in themselves likely useless, only sent to keep the
+ // BWE high.
+ return 3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace
+
+RoundRobinPacketQueue::QueuedPacket::QueuedPacket(const QueuedPacket& rhs) =
+ default;
+RoundRobinPacketQueue::QueuedPacket::~QueuedPacket() = default;
+
+RoundRobinPacketQueue::QueuedPacket::QueuedPacket(
+ int priority,
+ Timestamp enqueue_time,
+ int64_t enqueue_order,
+ std::multiset<Timestamp>::iterator enqueue_time_it,
+ std::unique_ptr<RtpPacketToSend> packet)
+ : priority_(priority),
+ enqueue_time_(enqueue_time),
+ enqueue_order_(enqueue_order),
+ is_retransmission_(packet->packet_type() ==
+ RtpPacketMediaType::kRetransmission),
+ enqueue_time_it_(enqueue_time_it),
+ owned_packet_(packet.release()) {}
+
+bool RoundRobinPacketQueue::QueuedPacket::operator<(
+ const RoundRobinPacketQueue::QueuedPacket& other) const {
+ if (priority_ != other.priority_)
+ return priority_ > other.priority_;
+ if (is_retransmission_ != other.is_retransmission_)
+ return other.is_retransmission_;
+
+ return enqueue_order_ > other.enqueue_order_;
+}
+
+int RoundRobinPacketQueue::QueuedPacket::Priority() const {
+ return priority_;
+}
+
+RtpPacketMediaType RoundRobinPacketQueue::QueuedPacket::Type() const {
+ return *owned_packet_->packet_type();
+}
+
+uint32_t RoundRobinPacketQueue::QueuedPacket::Ssrc() const {
+ return owned_packet_->Ssrc();
+}
+
+Timestamp RoundRobinPacketQueue::QueuedPacket::EnqueueTime() const {
+ return enqueue_time_;
+}
+
+bool RoundRobinPacketQueue::QueuedPacket::IsRetransmission() const {
+ return Type() == RtpPacketMediaType::kRetransmission;
+}
+
+int64_t RoundRobinPacketQueue::QueuedPacket::EnqueueOrder() const {
+ return enqueue_order_;
+}
+
+RtpPacketToSend* RoundRobinPacketQueue::QueuedPacket::RtpPacket() const {
+ return owned_packet_;
+}
+
+void RoundRobinPacketQueue::QueuedPacket::UpdateEnqueueTimeIterator(
+ std::multiset<Timestamp>::iterator it) {
+ enqueue_time_it_ = it;
+}
+
+std::multiset<Timestamp>::iterator
+RoundRobinPacketQueue::QueuedPacket::EnqueueTimeIterator() const {
+ return enqueue_time_it_;
+}
+
+void RoundRobinPacketQueue::QueuedPacket::SubtractPauseTime(
+ TimeDelta pause_time_sum) {
+ enqueue_time_ -= pause_time_sum;
+}
+
+RoundRobinPacketQueue::PriorityPacketQueue::const_iterator
+RoundRobinPacketQueue::PriorityPacketQueue::begin() const {
+ return c.begin();
+}
+
+RoundRobinPacketQueue::PriorityPacketQueue::const_iterator
+RoundRobinPacketQueue::PriorityPacketQueue::end() const {
+ return c.end();
+}
+
+RoundRobinPacketQueue::Stream::Stream() : size(DataSize::Zero()), ssrc(0) {}
+RoundRobinPacketQueue::Stream::Stream(const Stream& stream) = default;
+RoundRobinPacketQueue::Stream::~Stream() = default;
+
+RoundRobinPacketQueue::RoundRobinPacketQueue(Timestamp start_time)
+ : transport_overhead_per_packet_(DataSize::Zero()),
+ time_last_updated_(start_time),
+ enqueue_count_(0),
+ paused_(false),
+ size_packets_(0),
+ size_packets_per_media_type_({}),
+ size_(DataSize::Zero()),
+ max_size_(kMaxLeadingSize),
+ queue_time_sum_(TimeDelta::Zero()),
+ pause_time_sum_(TimeDelta::Zero()),
+ include_overhead_(false) {}
+
+RoundRobinPacketQueue::~RoundRobinPacketQueue() {
+ // Make sure to release any packets owned by raw pointer in QueuedPacket.
+ while (size_packets_ > 0) {
+ Pop();
+ }
+}
+
+void RoundRobinPacketQueue::Push(Timestamp enqueue_time,
+ std::unique_ptr<RtpPacketToSend> packet) {
+ RTC_CHECK(packet->packet_type().has_value());
+ RtpPacketMediaType packet_type = packet->packet_type().value();
+ int priority = GetPriorityForType(packet_type);
+ if (size_packets_ == 0) {
+ // Single packet fast-path.
+ single_packet_queue_.emplace(
+ QueuedPacket(priority, enqueue_time, enqueue_count_++,
+ enqueue_times_.end(), std::move(packet)));
+ UpdateAverageQueueTime(enqueue_time);
+ single_packet_queue_->SubtractPauseTime(pause_time_sum_);
+ size_packets_ = 1;
+ ++size_packets_per_media_type_[static_cast<size_t>(packet_type)];
+ size_ += PacketSize(*single_packet_queue_);
+ } else {
+ MaybePromoteSinglePacketToNormalQueue();
+ Push(QueuedPacket(priority, enqueue_time, enqueue_count_++,
+ enqueue_times_.insert(enqueue_time), std::move(packet)));
+ }
+}
+
+std::unique_ptr<RtpPacketToSend> RoundRobinPacketQueue::Pop() {
+ if (single_packet_queue_.has_value()) {
+ RTC_DCHECK(stream_priorities_.empty());
+ std::unique_ptr<RtpPacketToSend> rtp_packet(
+ single_packet_queue_->RtpPacket());
+ single_packet_queue_.reset();
+ queue_time_sum_ = TimeDelta::Zero();
+ size_packets_ = 0;
+ RTC_CHECK(rtp_packet->packet_type().has_value());
+ RtpPacketMediaType packet_type = rtp_packet->packet_type().value();
+ size_packets_per_media_type_[static_cast<size_t>(packet_type)] -= 1;
+ RTC_CHECK_GE(size_packets_per_media_type_[static_cast<size_t>(packet_type)],
+ 0);
+ size_ = DataSize::Zero();
+ return rtp_packet;
+ }
+
+ RTC_DCHECK_GT(size_packets_, 0);
+ Stream* stream = GetHighestPriorityStream();
+ const QueuedPacket& queued_packet = stream->packet_queue.top();
+
+ stream_priorities_.erase(stream->priority_it);
+
+ // Calculate the total amount of time spent by this packet in the queue
+ // while in a non-paused state. Note that the `pause_time_sum_ms_` was
+ // subtracted from `packet.enqueue_time_ms` when the packet was pushed, and
+ // by subtracting it now we effectively remove the time spent in in the
+ // queue while in a paused state.
+ TimeDelta time_in_non_paused_state =
+ time_last_updated_ - queued_packet.EnqueueTime() - pause_time_sum_;
+ queue_time_sum_ -= time_in_non_paused_state;
+
+ RTC_CHECK(queued_packet.EnqueueTimeIterator() != enqueue_times_.end());
+ enqueue_times_.erase(queued_packet.EnqueueTimeIterator());
+
+ // Update `bytes` of this stream. The general idea is that the stream that
+ // has sent the least amount of bytes should have the highest priority.
+ // The problem with that is if streams send with different rates, in which
+ // case a "budget" will be built up for the stream sending at the lower
+ // rate. To avoid building a too large budget we limit `bytes` to be within
+ // kMaxLeading bytes of the stream that has sent the most amount of bytes.
+ DataSize packet_size = PacketSize(queued_packet);
+ stream->size =
+ std::max(stream->size + packet_size, max_size_ - kMaxLeadingSize);
+ max_size_ = std::max(max_size_, stream->size);
+
+ size_ -= packet_size;
+ size_packets_ -= 1;
+ size_packets_per_media_type_[static_cast<size_t>(queued_packet.Type())] -= 1;
+ RTC_CHECK(size_packets_ > 0 || queue_time_sum_ == TimeDelta::Zero());
+ RTC_CHECK_GE(
+ size_packets_per_media_type_[static_cast<size_t>(queued_packet.Type())],
+ 0);
+
+ std::unique_ptr<RtpPacketToSend> rtp_packet(queued_packet.RtpPacket());
+ stream->packet_queue.pop();
+
+ // If there are packets left to be sent, schedule the stream again.
+ RTC_CHECK(!IsSsrcScheduled(stream->ssrc));
+ if (stream->packet_queue.empty()) {
+ stream->priority_it = stream_priorities_.end();
+ } else {
+ int priority = stream->packet_queue.top().Priority();
+ stream->priority_it = stream_priorities_.emplace(
+ StreamPrioKey(priority, stream->size), stream->ssrc);
+ }
+
+ return rtp_packet;
+}
+
+int RoundRobinPacketQueue::SizeInPackets() const {
+ return size_packets_;
+}
+
+DataSize RoundRobinPacketQueue::SizeInPayloadBytes() const {
+ return size_;
+}
+
+const std::array<int, kNumMediaTypes>&
+RoundRobinPacketQueue::SizeInPacketsPerRtpPacketMediaType() const {
+ return size_packets_per_media_type_;
+}
+
+Timestamp RoundRobinPacketQueue::LeadingAudioPacketEnqueueTime() const {
+ if (single_packet_queue_.has_value()) {
+ if (single_packet_queue_->Type() == RtpPacketMediaType::kAudio) {
+ return single_packet_queue_->EnqueueTime();
+ }
+ return Timestamp::MinusInfinity();
+ }
+
+ if (stream_priorities_.empty()) {
+ return Timestamp::MinusInfinity();
+ }
+ uint32_t ssrc = stream_priorities_.begin()->second;
+
+ const auto& top_packet = streams_.find(ssrc)->second.packet_queue.top();
+ if (top_packet.Type() == RtpPacketMediaType::kAudio) {
+ return top_packet.EnqueueTime();
+ }
+ return Timestamp::MinusInfinity();
+}
+
+Timestamp RoundRobinPacketQueue::OldestEnqueueTime() const {
+ if (single_packet_queue_.has_value()) {
+ return single_packet_queue_->EnqueueTime();
+ }
+
+ if (size_packets_ == 0)
+ return Timestamp::MinusInfinity();
+ RTC_CHECK(!enqueue_times_.empty());
+ return *enqueue_times_.begin();
+}
+
+void RoundRobinPacketQueue::UpdateAverageQueueTime(Timestamp now) {
+ RTC_CHECK_GE(now, time_last_updated_);
+ if (now == time_last_updated_)
+ return;
+
+ TimeDelta delta = now - time_last_updated_;
+
+ if (paused_) {
+ pause_time_sum_ += delta;
+ } else {
+ queue_time_sum_ += delta * size_packets_;
+ }
+
+ time_last_updated_ = now;
+}
+
+void RoundRobinPacketQueue::SetPauseState(bool paused, Timestamp now) {
+ if (paused_ == paused)
+ return;
+ UpdateAverageQueueTime(now);
+ paused_ = paused;
+}
+
+TimeDelta RoundRobinPacketQueue::AverageQueueTime() const {
+ if (size_packets_ == 0)
+ return TimeDelta::Zero();
+ return queue_time_sum_ / size_packets_;
+}
+
+void RoundRobinPacketQueue::Push(QueuedPacket packet) {
+ auto stream_info_it = streams_.find(packet.Ssrc());
+ if (stream_info_it == streams_.end()) {
+ stream_info_it = streams_.emplace(packet.Ssrc(), Stream()).first;
+ stream_info_it->second.priority_it = stream_priorities_.end();
+ stream_info_it->second.ssrc = packet.Ssrc();
+ }
+
+ Stream* stream = &stream_info_it->second;
+
+ if (stream->priority_it == stream_priorities_.end()) {
+ // If the SSRC is not currently scheduled, add it to `stream_priorities_`.
+ RTC_CHECK(!IsSsrcScheduled(stream->ssrc));
+ stream->priority_it = stream_priorities_.emplace(
+ StreamPrioKey(packet.Priority(), stream->size), packet.Ssrc());
+ } else if (packet.Priority() < stream->priority_it->first.priority) {
+ // If the priority of this SSRC increased, remove the outdated StreamPrioKey
+ // and insert a new one with the new priority. Note that `priority_` uses
+ // lower ordinal for higher priority.
+ stream_priorities_.erase(stream->priority_it);
+ stream->priority_it = stream_priorities_.emplace(
+ StreamPrioKey(packet.Priority(), stream->size), packet.Ssrc());
+ }
+ RTC_CHECK(stream->priority_it != stream_priorities_.end());
+
+ if (packet.EnqueueTimeIterator() == enqueue_times_.end()) {
+ // Promotion from single-packet queue. Just add to enqueue times.
+ packet.UpdateEnqueueTimeIterator(
+ enqueue_times_.insert(packet.EnqueueTime()));
+ } else {
+ // In order to figure out how much time a packet has spent in the queue
+ // while not in a paused state, we subtract the total amount of time the
+ // queue has been paused so far, and when the packet is popped we subtract
+ // the total amount of time the queue has been paused at that moment. This
+ // way we subtract the total amount of time the packet has spent in the
+ // queue while in a paused state.
+ UpdateAverageQueueTime(packet.EnqueueTime());
+ packet.SubtractPauseTime(pause_time_sum_);
+
+ size_packets_ += 1;
+ size_packets_per_media_type_[static_cast<size_t>(packet.Type())] += 1;
+ size_ += PacketSize(packet);
+ }
+
+ stream->packet_queue.push(packet);
+}
+
+DataSize RoundRobinPacketQueue::PacketSize(const QueuedPacket& packet) const {
+ DataSize packet_size = DataSize::Bytes(packet.RtpPacket()->payload_size() +
+ packet.RtpPacket()->padding_size());
+ if (include_overhead_) {
+ packet_size += DataSize::Bytes(packet.RtpPacket()->headers_size()) +
+ transport_overhead_per_packet_;
+ }
+ return packet_size;
+}
+
+void RoundRobinPacketQueue::MaybePromoteSinglePacketToNormalQueue() {
+ if (single_packet_queue_.has_value()) {
+ Push(*single_packet_queue_);
+ single_packet_queue_.reset();
+ }
+}
+
+RoundRobinPacketQueue::Stream*
+RoundRobinPacketQueue::GetHighestPriorityStream() {
+ RTC_CHECK(!stream_priorities_.empty());
+ uint32_t ssrc = stream_priorities_.begin()->second;
+
+ auto stream_info_it = streams_.find(ssrc);
+ RTC_CHECK(stream_info_it != streams_.end());
+ RTC_CHECK(stream_info_it->second.priority_it == stream_priorities_.begin());
+ RTC_CHECK(!stream_info_it->second.packet_queue.empty());
+ return &stream_info_it->second;
+}
+
+bool RoundRobinPacketQueue::IsSsrcScheduled(uint32_t ssrc) const {
+ for (const auto& scheduled_stream : stream_priorities_) {
+ if (scheduled_stream.second == ssrc)
+ return true;
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.h b/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.h
new file mode 100644
index 0000000000..052b98b16b
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/round_robin_packet_queue.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_ROUND_ROBIN_PACKET_QUEUE_H_
+#define MODULES_PACING_ROUND_ROBIN_PACKET_QUEUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <unordered_map>
+
+#include "absl/types/optional.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/pacing/pacing_controller.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+
+namespace webrtc {
+
+class RoundRobinPacketQueue : public PacingController::PacketQueue {
+ public:
+ explicit RoundRobinPacketQueue(Timestamp start_time);
+ ~RoundRobinPacketQueue();
+
+ void Push(Timestamp enqueue_time,
+ std::unique_ptr<RtpPacketToSend> packet) override;
+ std::unique_ptr<RtpPacketToSend> Pop() override;
+
+ int SizeInPackets() const override;
+ DataSize SizeInPayloadBytes() const override;
+ const std::array<int, kNumMediaTypes>& SizeInPacketsPerRtpPacketMediaType()
+ const override;
+ Timestamp LeadingAudioPacketEnqueueTime() const override;
+ Timestamp OldestEnqueueTime() const override;
+ TimeDelta AverageQueueTime() const override;
+ void UpdateAverageQueueTime(Timestamp now) override;
+ void SetPauseState(bool paused, Timestamp now) override;
+
+ private:
+ struct QueuedPacket {
+ public:
+ QueuedPacket(int priority,
+ Timestamp enqueue_time,
+ int64_t enqueue_order,
+ std::multiset<Timestamp>::iterator enqueue_time_it,
+ std::unique_ptr<RtpPacketToSend> packet);
+ QueuedPacket(const QueuedPacket& rhs);
+ ~QueuedPacket();
+
+ bool operator<(const QueuedPacket& other) const;
+
+ int Priority() const;
+ RtpPacketMediaType Type() const;
+ uint32_t Ssrc() const;
+ Timestamp EnqueueTime() const;
+ bool IsRetransmission() const;
+ int64_t EnqueueOrder() const;
+ RtpPacketToSend* RtpPacket() const;
+
+ std::multiset<Timestamp>::iterator EnqueueTimeIterator() const;
+ void UpdateEnqueueTimeIterator(std::multiset<Timestamp>::iterator it);
+ void SubtractPauseTime(TimeDelta pause_time_sum);
+
+ private:
+ int priority_;
+ Timestamp enqueue_time_; // Absolute time of pacer queue entry.
+ int64_t enqueue_order_;
+ bool is_retransmission_; // Cached for performance.
+ std::multiset<Timestamp>::iterator enqueue_time_it_;
+ // Raw pointer since priority_queue doesn't allow for moving
+ // out of the container.
+ RtpPacketToSend* owned_packet_;
+ };
+
+ class PriorityPacketQueue : public std::priority_queue<QueuedPacket> {
+ public:
+ using const_iterator = container_type::const_iterator;
+ const_iterator begin() const;
+ const_iterator end() const;
+ };
+
+ struct StreamPrioKey {
+ StreamPrioKey(int priority, DataSize size)
+ : priority(priority), size(size) {}
+
+ bool operator<(const StreamPrioKey& other) const {
+ if (priority != other.priority)
+ return priority < other.priority;
+ return size < other.size;
+ }
+
+ const int priority;
+ const DataSize size;
+ };
+
+ struct Stream {
+ Stream();
+ Stream(const Stream&);
+
+ virtual ~Stream();
+
+ DataSize size;
+ uint32_t ssrc;
+
+ PriorityPacketQueue packet_queue;
+
+ // Whenever a packet is inserted for this stream we check if `priority_it`
+ // points to an element in `stream_priorities_`, and if it does it means
+ // this stream has already been scheduled, and if the scheduled priority is
+ // lower than the priority of the incoming packet we reschedule this stream
+ // with the higher priority.
+ std::multimap<StreamPrioKey, uint32_t>::iterator priority_it;
+ };
+
+ void Push(QueuedPacket packet);
+
+ DataSize PacketSize(const QueuedPacket& packet) const;
+ void MaybePromoteSinglePacketToNormalQueue();
+
+ Stream* GetHighestPriorityStream();
+
+ // Just used to verify correctness.
+ bool IsSsrcScheduled(uint32_t ssrc) const;
+
+ DataSize transport_overhead_per_packet_;
+
+ Timestamp time_last_updated_;
+
+ int64_t enqueue_count_;
+
+ bool paused_;
+ int size_packets_;
+ std::array<int, kNumMediaTypes> size_packets_per_media_type_;
+ DataSize size_;
+ DataSize max_size_;
+ TimeDelta queue_time_sum_;
+ TimeDelta pause_time_sum_;
+
+ // A map of streams used to prioritize from which stream to send next. We use
+ // a multimap instead of a priority_queue since the priority of a stream can
+ // change as a new packet is inserted, and a multimap allows us to remove and
+ // then reinsert a StreamPrioKey if the priority has increased.
+ std::multimap<StreamPrioKey, uint32_t> stream_priorities_;
+
+ // A map of SSRCs to Streams.
+ std::unordered_map<uint32_t, Stream> streams_;
+
+ // The enqueue time of every packet currently in the queue. Used to figure out
+ // the age of the oldest packet in the queue.
+ std::multiset<Timestamp> enqueue_times_;
+
+ absl::optional<QueuedPacket> single_packet_queue_;
+
+ bool include_overhead_;
+};
+} // namespace webrtc
+
+#endif // MODULES_PACING_ROUND_ROBIN_PACKET_QUEUE_H_
diff --git a/third_party/libwebrtc/modules/pacing/round_robin_packet_queue_unittest.cc b/third_party/libwebrtc/modules/pacing/round_robin_packet_queue_unittest.cc
new file mode 100644
index 0000000000..86f07be429
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/round_robin_packet_queue_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/round_robin_packet_queue.h"
+
+#include <utility>
+
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr uint32_t kDefaultSsrc = 123;
+constexpr int kDefaultPayloadSize = 321;
+
+std::unique_ptr<RtpPacketToSend> CreatePacket(RtpPacketMediaType type,
+ uint16_t sequence_number) {
+ auto packet = std::make_unique<RtpPacketToSend>(/*extensions=*/nullptr);
+ packet->set_packet_type(type);
+ packet->SetSsrc(kDefaultSsrc);
+ packet->SetSequenceNumber(sequence_number);
+ packet->SetPayloadSize(kDefaultPayloadSize);
+ return packet;
+}
+
+} // namespace
+
+TEST(RoundRobinPacketQueueTest,
+ PushAndPopUpdatesSizeInPacketsPerRtpPacketMediaType) {
+ Timestamp now = Timestamp::Zero();
+ RoundRobinPacketQueue queue(now);
+
+ // Initially all sizes are zero.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[i], 0);
+ }
+
+ // Push packets.
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kAudio, 1));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kAudio)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kVideo, 2));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kVideo)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kRetransmission, 3));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kRetransmission)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kForwardErrorCorrection, 4));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kForwardErrorCorrection)],
+ 1);
+
+ queue.Push(now, CreatePacket(RtpPacketMediaType::kPadding, 5));
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ RtpPacketMediaType::kPadding)],
+ 1);
+
+ // Now all sizes are 1.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[i], 1);
+ }
+
+ // Popping happens in a priority order based on media type. This test does not
+ // assert what this order is, only that the counter for the popped packet's
+ // media type is decremented.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ auto popped_packet = queue.Pop();
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[static_cast<size_t>(
+ popped_packet->packet_type().value())],
+ 0);
+ }
+
+ // We've popped all packets, so all sizes are zero.
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ EXPECT_EQ(queue.SizeInPacketsPerRtpPacketMediaType()[i], 0);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/rtp_packet_pacer.h b/third_party/libwebrtc/modules/pacing/rtp_packet_pacer.h
new file mode 100644
index 0000000000..e2cf806385
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/rtp_packet_pacer.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_RTP_PACKET_PACER_H_
+#define MODULES_PACING_RTP_PACKET_PACER_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
+
+namespace webrtc {
+
+class RtpPacketPacer {
+ public:
+ virtual ~RtpPacketPacer() = default;
+
+ virtual void CreateProbeClusters(
+ std::vector<ProbeClusterConfig> probe_cluster_configs) = 0;
+
+ // Temporarily pause all sending.
+ virtual void Pause() = 0;
+
+ // Resume sending packets.
+ virtual void Resume() = 0;
+
+ virtual void SetCongested(bool congested) = 0;
+
+ // Sets the pacing rates. Must be called once before packets can be sent.
+ virtual void SetPacingRates(DataRate pacing_rate, DataRate padding_rate) = 0;
+
+ // Time since the oldest packet currently in the queue was added.
+ virtual TimeDelta OldestPacketWaitTime() const = 0;
+
+ // Sum of payload + padding bytes of all packets currently in the pacer queue.
+ virtual DataSize QueueSizeData() const = 0;
+
+ // Returns the time when the first packet was sent.
+ virtual absl::optional<Timestamp> FirstSentPacketTime() const = 0;
+
+ // Returns the expected number of milliseconds it will take to send the
+ // current packets in the queue, given the current size and bitrate, ignoring
+ // priority.
+ virtual TimeDelta ExpectedQueueTime() const = 0;
+
+ // Set the average upper bound on pacer queuing delay. The pacer may send at
+ // a higher rate than what was configured via SetPacingRates() in order to
+ // keep ExpectedQueueTimeMs() below `limit_ms` on average.
+ virtual void SetQueueTimeLimit(TimeDelta limit) = 0;
+
+ // Currently audio traffic is not accounted by pacer and passed through.
+ // With the introduction of audio BWE audio traffic will be accounted for
+ // the pacer budget calculation. The audio traffic still will be injected
+ // at high priority.
+ virtual void SetAccountForAudioPackets(bool account_for_audio) = 0;
+ virtual void SetIncludeOverhead() = 0;
+ virtual void SetTransportOverhead(DataSize overhead_per_packet) = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_PACING_RTP_PACKET_PACER_H_
diff --git a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc
new file mode 100644
index 0000000000..b13bc11546
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/task_queue_paced_sender.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/memory/memory.h"
+#include "api/transport/network_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/field_trial_units.h"
+#include "rtc_base/system/unused.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr const char* kBurstyPacerFieldTrial = "WebRTC-BurstyPacer";
+
+constexpr const char* kSlackedTaskQueuePacedSenderFieldTrial =
+ "WebRTC-SlackedTaskQueuePacedSender";
+
+} // namespace
+
+const int TaskQueuePacedSender::kNoPacketHoldback = -1;
+
+TaskQueuePacedSender::BurstyPacerFlags::BurstyPacerFlags(
+ const FieldTrialsView& field_trials)
+ : burst("burst") {
+ ParseFieldTrial({&burst}, field_trials.Lookup(kBurstyPacerFieldTrial));
+}
+
+TaskQueuePacedSender::SlackedPacerFlags::SlackedPacerFlags(
+ const FieldTrialsView& field_trials)
+ : allow_low_precision("Enabled"),
+ max_low_precision_expected_queue_time("max_queue_time"),
+ send_burst_interval("send_burst_interval") {
+ ParseFieldTrial({&allow_low_precision, &max_low_precision_expected_queue_time,
+ &send_burst_interval},
+ field_trials.Lookup(kSlackedTaskQueuePacedSenderFieldTrial));
+}
+
+TaskQueuePacedSender::TaskQueuePacedSender(
+ Clock* clock,
+ PacingController::PacketSender* packet_sender,
+ const FieldTrialsView& field_trials,
+ TaskQueueFactory* task_queue_factory,
+ TimeDelta max_hold_back_window,
+ int max_hold_back_window_in_packets)
+ : clock_(clock),
+ bursty_pacer_flags_(field_trials),
+ slacked_pacer_flags_(field_trials),
+ max_hold_back_window_(slacked_pacer_flags_.allow_low_precision
+ ? PacingController::kMinSleepTime
+ : max_hold_back_window),
+ max_hold_back_window_in_packets_(slacked_pacer_flags_.allow_low_precision
+ ? 0
+ : max_hold_back_window_in_packets),
+ pacing_controller_(clock, packet_sender, field_trials),
+ next_process_time_(Timestamp::MinusInfinity()),
+ is_started_(false),
+ is_shutdown_(false),
+ packet_size_(/*alpha=*/0.95),
+ include_overhead_(false),
+ task_queue_(task_queue_factory->CreateTaskQueue(
+ "TaskQueuePacedSender",
+ TaskQueueFactory::Priority::NORMAL)) {
+ RTC_DCHECK_GE(max_hold_back_window_, PacingController::kMinSleepTime);
+ // There are multiple field trials that can affect burst. If multiple bursts
+ // are specified we pick the largest of the values.
+ absl::optional<TimeDelta> burst = bursty_pacer_flags_.burst.GetOptional();
+ if (slacked_pacer_flags_.allow_low_precision &&
+ slacked_pacer_flags_.send_burst_interval) {
+ TimeDelta slacked_burst = slacked_pacer_flags_.send_burst_interval.Value();
+ if (!burst.has_value() || burst.value() < slacked_burst) {
+ burst = slacked_burst;
+ }
+ }
+ if (burst.has_value()) {
+ pacing_controller_.SetSendBurstInterval(burst.value());
+ }
+}
+
+TaskQueuePacedSender::~TaskQueuePacedSender() {
+ // Post an immediate task to mark the queue as shutting down.
+ // The rtc::TaskQueue destructor will wait for pending tasks to
+ // complete before continuing.
+ task_queue_.PostTask([&]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ is_shutdown_ = true;
+ });
+}
+
+void TaskQueuePacedSender::EnsureStarted() {
+ task_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ is_started_ = true;
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::CreateProbeClusters(
+ std::vector<ProbeClusterConfig> probe_cluster_configs) {
+ task_queue_.PostTask(
+ [this, probe_cluster_configs = std::move(probe_cluster_configs)]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.CreateProbeClusters(probe_cluster_configs);
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::Pause() {
+ task_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.Pause();
+ });
+}
+
+void TaskQueuePacedSender::Resume() {
+ task_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.Resume();
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::SetCongested(bool congested) {
+ task_queue_.PostTask([this, congested]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.SetCongested(congested);
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::SetPacingRates(DataRate pacing_rate,
+ DataRate padding_rate) {
+ task_queue_.PostTask([this, pacing_rate, padding_rate]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.SetPacingRates(pacing_rate, padding_rate);
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ task_queue_.PostTask([this, packets = std::move(packets)]() mutable {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("webrtc"),
+ "TaskQueuePacedSender::EnqueuePackets");
+ for (auto& packet : packets) {
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"),
+ "TaskQueuePacedSender::EnqueuePackets::Loop",
+ "sequence_number", packet->SequenceNumber(), "rtp_timestamp",
+ packet->Timestamp());
+
+ size_t packet_size = packet->payload_size() + packet->padding_size();
+ if (include_overhead_) {
+ packet_size += packet->headers_size();
+ }
+ packet_size_.Apply(1, packet_size);
+ RTC_DCHECK_GE(packet->capture_time(), Timestamp::Zero());
+ pacing_controller_.EnqueuePacket(std::move(packet));
+ }
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::SetAccountForAudioPackets(bool account_for_audio) {
+ task_queue_.PostTask([this, account_for_audio]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.SetAccountForAudioPackets(account_for_audio);
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::SetIncludeOverhead() {
+ task_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ include_overhead_ = true;
+ pacing_controller_.SetIncludeOverhead();
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::SetTransportOverhead(DataSize overhead_per_packet) {
+ task_queue_.PostTask([this, overhead_per_packet]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.SetTransportOverhead(overhead_per_packet);
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+void TaskQueuePacedSender::SetQueueTimeLimit(TimeDelta limit) {
+ task_queue_.PostTask([this, limit]() {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+ pacing_controller_.SetQueueTimeLimit(limit);
+ MaybeProcessPackets(Timestamp::MinusInfinity());
+ });
+}
+
+TimeDelta TaskQueuePacedSender::ExpectedQueueTime() const {
+ return GetStats().expected_queue_time;
+}
+
+DataSize TaskQueuePacedSender::QueueSizeData() const {
+ return GetStats().queue_size;
+}
+
+absl::optional<Timestamp> TaskQueuePacedSender::FirstSentPacketTime() const {
+ return GetStats().first_sent_packet_time;
+}
+
+TimeDelta TaskQueuePacedSender::OldestPacketWaitTime() const {
+ Timestamp oldest_packet = GetStats().oldest_packet_enqueue_time;
+ if (oldest_packet.IsInfinite()) {
+ return TimeDelta::Zero();
+ }
+
+ // (webrtc:9716): The clock is not always monotonic.
+ Timestamp current = clock_->CurrentTime();
+ if (current < oldest_packet) {
+ return TimeDelta::Zero();
+ }
+
+ return current - oldest_packet;
+}
+
+void TaskQueuePacedSender::OnStatsUpdated(const Stats& stats) {
+ MutexLock lock(&stats_mutex_);
+ current_stats_ = stats;
+}
+
+void TaskQueuePacedSender::MaybeProcessPackets(
+ Timestamp scheduled_process_time) {
+ RTC_DCHECK_RUN_ON(&task_queue_);
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("webrtc"),
+ "TaskQueuePacedSender::MaybeProcessPackets");
+
+ if (is_shutdown_ || !is_started_) {
+ return;
+ }
+
+ Timestamp next_send_time = pacing_controller_.NextSendTime();
+ RTC_DCHECK(next_send_time.IsFinite());
+ const Timestamp now = clock_->CurrentTime();
+ TimeDelta early_execute_margin =
+ pacing_controller_.IsProbing()
+ ? PacingController::kMaxEarlyProbeProcessing
+ : TimeDelta::Zero();
+
+ // Process packets and update stats.
+ while (next_send_time <= now + early_execute_margin) {
+ pacing_controller_.ProcessPackets();
+ next_send_time = pacing_controller_.NextSendTime();
+ RTC_DCHECK(next_send_time.IsFinite());
+
+ // Probing state could change. Get margin after process packets.
+ early_execute_margin = pacing_controller_.IsProbing()
+ ? PacingController::kMaxEarlyProbeProcessing
+ : TimeDelta::Zero();
+ }
+ UpdateStats();
+
+ // Ignore retired scheduled task, otherwise reset `next_process_time_`.
+ if (scheduled_process_time.IsFinite()) {
+ if (scheduled_process_time != next_process_time_) {
+ return;
+ }
+ next_process_time_ = Timestamp::MinusInfinity();
+ }
+
+ // Do not hold back in probing.
+ TimeDelta hold_back_window = TimeDelta::Zero();
+ if (!pacing_controller_.IsProbing()) {
+ hold_back_window = max_hold_back_window_;
+ DataRate pacing_rate = pacing_controller_.pacing_rate();
+ if (max_hold_back_window_in_packets_ != kNoPacketHoldback &&
+ !pacing_rate.IsZero() &&
+ packet_size_.filtered() != rtc::ExpFilter::kValueUndefined) {
+ TimeDelta avg_packet_send_time =
+ DataSize::Bytes(packet_size_.filtered()) / pacing_rate;
+ hold_back_window =
+ std::min(hold_back_window,
+ avg_packet_send_time * max_hold_back_window_in_packets_);
+ }
+ }
+
+ // Calculate next process time.
+ TimeDelta time_to_next_process =
+ std::max(hold_back_window, next_send_time - now - early_execute_margin);
+ next_send_time = now + time_to_next_process;
+
+ // If no in flight task or in flight task is later than `next_send_time`,
+ // schedule a new one. Previous in flight task will be retired.
+ if (next_process_time_.IsMinusInfinity() ||
+ next_process_time_ > next_send_time) {
+ // Prefer low precision if allowed and not probing.
+ TaskQueueBase::DelayPrecision precision =
+ slacked_pacer_flags_.allow_low_precision &&
+ !pacing_controller_.IsProbing()
+ ? TaskQueueBase::DelayPrecision::kLow
+ : TaskQueueBase::DelayPrecision::kHigh;
+ // Check for cases where we need high precision.
+ if (precision == TaskQueueBase::DelayPrecision::kLow) {
+ auto& packets_per_type =
+ pacing_controller_.SizeInPacketsPerRtpPacketMediaType();
+ bool audio_or_retransmission_packets_in_queue =
+ packets_per_type[static_cast<size_t>(RtpPacketMediaType::kAudio)] >
+ 0 ||
+ packets_per_type[static_cast<size_t>(
+ RtpPacketMediaType::kRetransmission)] > 0;
+ bool queue_time_too_large =
+ slacked_pacer_flags_.max_low_precision_expected_queue_time &&
+ pacing_controller_.ExpectedQueueTime() >=
+ slacked_pacer_flags_.max_low_precision_expected_queue_time
+ .Value();
+ if (audio_or_retransmission_packets_in_queue || queue_time_too_large) {
+ precision = TaskQueueBase::DelayPrecision::kHigh;
+ }
+ }
+
+ task_queue_.Get()->PostDelayedTaskWithPrecision(
+ precision,
+ [this, next_send_time]() { MaybeProcessPackets(next_send_time); },
+ time_to_next_process.RoundUpTo(TimeDelta::Millis(1)));
+ next_process_time_ = next_send_time;
+ }
+}
+
+void TaskQueuePacedSender::UpdateStats() {
+ Stats new_stats;
+ new_stats.expected_queue_time = pacing_controller_.ExpectedQueueTime();
+ new_stats.first_sent_packet_time = pacing_controller_.FirstSentPacketTime();
+ new_stats.oldest_packet_enqueue_time =
+ pacing_controller_.OldestPacketEnqueueTime();
+ new_stats.queue_size = pacing_controller_.QueueSizeData();
+ OnStatsUpdated(new_stats);
+}
+
+TaskQueuePacedSender::Stats TaskQueuePacedSender::GetStats() const {
+ MutexLock lock(&stats_mutex_);
+ return current_stats_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h
new file mode 100644
index 0000000000..97b0453e0b
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_PACING_TASK_QUEUE_PACED_SENDER_H_
+#define MODULES_PACING_TASK_QUEUE_PACED_SENDER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/pacing/pacing_controller.h"
+#include "modules/pacing/rtp_packet_pacer.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+class Clock;
+
+class TaskQueuePacedSender : public RtpPacketPacer, public RtpPacketSender {
+ public:
+ static const int kNoPacketHoldback;
+
+ // The `hold_back_window` parameter sets a lower bound on time to sleep if
+ // there is currently a pacer queue and packets can't immediately be
+ // processed. Increasing this reduces thread wakeups at the expense of higher
+ // latency.
+ TaskQueuePacedSender(Clock* clock,
+ PacingController::PacketSender* packet_sender,
+ const FieldTrialsView& field_trials,
+ TaskQueueFactory* task_queue_factory,
+ TimeDelta max_hold_back_window,
+ int max_hold_back_window_in_packets);
+
+ ~TaskQueuePacedSender() override;
+
+ // Ensure that necessary delayed tasks are scheduled.
+ void EnsureStarted();
+
+ // Methods implementing RtpPacketSender.
+
+ // Adds the packet to the queue and calls
+ // PacingController::PacketSender::SendPacket() when it's time to send.
+ void EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) override;
+
+ // Methods implementing RtpPacketPacer.
+
+ void CreateProbeClusters(
+ std::vector<ProbeClusterConfig> probe_cluster_configs) override;
+
+ // Temporarily pause all sending.
+ void Pause() override;
+
+ // Resume sending packets.
+ void Resume() override;
+
+ void SetCongested(bool congested) override;
+
+ // Sets the pacing rates. Must be called once before packets can be sent.
+ void SetPacingRates(DataRate pacing_rate, DataRate padding_rate) override;
+
+ // Currently audio traffic is not accounted for by pacer and passed through.
+ // With the introduction of audio BWE, audio traffic will be accounted for
+ // in the pacer budget calculation. The audio traffic will still be injected
+ // at high priority.
+ void SetAccountForAudioPackets(bool account_for_audio) override;
+
+ void SetIncludeOverhead() override;
+ void SetTransportOverhead(DataSize overhead_per_packet) override;
+
+ // Returns the time since the oldest queued packet was enqueued.
+ TimeDelta OldestPacketWaitTime() const override;
+
+ // Returns total size of all packets in the pacer queue.
+ DataSize QueueSizeData() const override;
+
+ // Returns the time when the first packet was sent;
+ absl::optional<Timestamp> FirstSentPacketTime() const override;
+
+ // Returns the number of milliseconds it will take to send the current
+ // packets in the queue, given the current size and bitrate, ignoring prio.
+ TimeDelta ExpectedQueueTime() const override;
+
+ // Set the max desired queuing delay, pacer will override the pacing rate
+ // specified by SetPacingRates() if needed to achieve this goal.
+ void SetQueueTimeLimit(TimeDelta limit) override;
+
+ protected:
+ // Exposed as protected for test.
+ struct Stats {
+ Stats()
+ : oldest_packet_enqueue_time(Timestamp::MinusInfinity()),
+ queue_size(DataSize::Zero()),
+ expected_queue_time(TimeDelta::Zero()) {}
+ Timestamp oldest_packet_enqueue_time;
+ DataSize queue_size;
+ TimeDelta expected_queue_time;
+ absl::optional<Timestamp> first_sent_packet_time;
+ };
+ void OnStatsUpdated(const Stats& stats);
+
+ private:
+ // Check if it is time to send packets, or schedule a delayed task if not.
+ // Use Timestamp::MinusInfinity() to indicate that this call has _not_
+ // been scheduled by the pacing controller. If this is the case, check if
+ // can execute immediately otherwise schedule a delay task that calls this
+ // method again with desired (finite) scheduled process time.
+ void MaybeProcessPackets(Timestamp scheduled_process_time);
+
+ void UpdateStats() RTC_RUN_ON(task_queue_);
+ Stats GetStats() const;
+
+ Clock* const clock_;
+ struct BurstyPacerFlags {
+ // Parses `kBurstyPacerFieldTrial`. Example:
+ // --force-fieldtrials=WebRTC-BurstyPacer/burst:20ms/
+ explicit BurstyPacerFlags(const FieldTrialsView& field_trials);
+ // If set, the pacer is allowed to build up a packet "debt" that correspond
+ // to approximately the send rate during the specified interval.
+ FieldTrialOptional<TimeDelta> burst;
+ };
+ const BurstyPacerFlags bursty_pacer_flags_;
+ struct SlackedPacerFlags {
+ // Parses `kSlackedTaskQueuePacedSenderFieldTrial`. Example:
+ // --force-fieldtrials=WebRTC-SlackedTaskQueuePacedSender/Enabled,max_queue_time:75ms/
+ explicit SlackedPacerFlags(const FieldTrialsView& field_trials);
+ // When "Enabled", delayed tasks invoking MaybeProcessPackets() are
+ // scheduled using low precision instead of high precision, resulting in
+ // less idle wake ups and packets being sent in bursts if the `task_queue_`
+ // implementation supports slack. When probing, high precision is used
+ // regardless to ensure good bandwidth estimation.
+ FieldTrialFlag allow_low_precision;
+ // Controlled via the "max_queue_time" experiment argument. If set, uses
+ // high precision scheduling of MaybeProcessPackets() whenever the expected
+ // queue time is greater than or equal to this value.
+ FieldTrialOptional<TimeDelta> max_low_precision_expected_queue_time;
+ // Controlled via "send_burst_interval" experiment argument. If set, the
+ // pacer is allowed to build up a packet "debt" that correspond to
+ // approximately the send rate during the specified interval.
+ FieldTrialOptional<TimeDelta> send_burst_interval;
+ };
+ const SlackedPacerFlags slacked_pacer_flags_;
+ // The holdback window prevents too frequent delayed MaybeProcessPackets()
+ // calls. These are only applicable if `allow_low_precision` is false.
+ const TimeDelta max_hold_back_window_;
+ const int max_hold_back_window_in_packets_;
+
+ PacingController pacing_controller_ RTC_GUARDED_BY(task_queue_);
+
+ // We want only one (valid) delayed process task in flight at a time.
+ // If the value of `next_process_time_` is finite, it is an id for a
+ // delayed task that will call MaybeProcessPackets() with that time
+ // as parameter.
+ // Timestamp::MinusInfinity() indicates no valid pending task.
+ Timestamp next_process_time_ RTC_GUARDED_BY(task_queue_);
+
+ // Indicates if this task queue is started. If not, don't allow
+ // posting delayed tasks yet.
+ bool is_started_ RTC_GUARDED_BY(task_queue_);
+
+ // Indicates if this task queue is shutting down. If so, don't allow
+ // posting any more delayed tasks as that can cause the task queue to
+ // never drain.
+ bool is_shutdown_ RTC_GUARDED_BY(task_queue_);
+
+ // Filtered size of enqueued packets, in bytes.
+ rtc::ExpFilter packet_size_ RTC_GUARDED_BY(task_queue_);
+ bool include_overhead_ RTC_GUARDED_BY(task_queue_);
+
+ mutable Mutex stats_mutex_;
+ Stats current_stats_ RTC_GUARDED_BY(stats_mutex_);
+
+ rtc::TaskQueue task_queue_;
+};
+} // namespace webrtc
+#endif // MODULES_PACING_TASK_QUEUE_PACED_SENDER_H_
diff --git a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender_unittest.cc b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender_unittest.cc
new file mode 100644
index 0000000000..e6dfe26834
--- /dev/null
+++ b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender_unittest.cc
@@ -0,0 +1,839 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/pacing/task_queue_paced_sender.h"
+
+#include <algorithm>
+#include <atomic>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "modules/pacing/packet_router.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Return;
+using ::testing::SaveArg;
+
+namespace webrtc {
+namespace {
+constexpr uint32_t kAudioSsrc = 12345;
+constexpr uint32_t kVideoSsrc = 234565;
+constexpr uint32_t kVideoRtxSsrc = 34567;
+constexpr uint32_t kFlexFecSsrc = 45678;
+constexpr size_t kDefaultPacketSize = 1234;
+
+class MockPacketRouter : public PacketRouter {
+ public:
+ MOCK_METHOD(void,
+ SendPacket,
+ (std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info),
+ (override));
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ FetchFec,
+ (),
+ (override));
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ GeneratePadding,
+ (DataSize target_size),
+ (override));
+};
+
+std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ DataSize target_size) {
+ // 224 bytes is the max padding size for plain padding packets generated by
+ // RTPSender::GeneratePadding().
+ const DataSize kMaxPaddingPacketSize = DataSize::Bytes(224);
+ DataSize padding_generated = DataSize::Zero();
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
+ while (padding_generated < target_size) {
+ DataSize packet_size =
+ std::min(target_size - padding_generated, kMaxPaddingPacketSize);
+ padding_generated += packet_size;
+ auto padding_packet =
+ std::make_unique<RtpPacketToSend>(/*extensions=*/nullptr);
+ padding_packet->set_packet_type(RtpPacketMediaType::kPadding);
+ padding_packet->SetPadding(packet_size.bytes());
+ padding_packets.push_back(std::move(padding_packet));
+ }
+ return padding_packets;
+}
+
+class TaskQueueWithFakePrecisionFactory : public TaskQueueFactory {
+ public:
+ explicit TaskQueueWithFakePrecisionFactory(
+ TaskQueueFactory* task_queue_factory)
+ : task_queue_factory_(task_queue_factory) {}
+
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> CreateTaskQueue(
+ absl::string_view name,
+ Priority priority) const override {
+ return std::unique_ptr<TaskQueueBase, TaskQueueDeleter>(
+ new TaskQueueWithFakePrecision(
+ const_cast<TaskQueueWithFakePrecisionFactory*>(this),
+ task_queue_factory_));
+ }
+
+ int delayed_low_precision_count() const {
+ return delayed_low_precision_count_;
+ }
+ int delayed_high_precision_count() const {
+ return delayed_high_precision_count_;
+ }
+
+ private:
+ friend class TaskQueueWithFakePrecision;
+
+ class TaskQueueWithFakePrecision : public TaskQueueBase {
+ public:
+ TaskQueueWithFakePrecision(
+ TaskQueueWithFakePrecisionFactory* parent_factory,
+ TaskQueueFactory* task_queue_factory)
+ : parent_factory_(parent_factory),
+ task_queue_(task_queue_factory->CreateTaskQueue(
+ "TaskQueueWithFakePrecision",
+ TaskQueueFactory::Priority::NORMAL)) {}
+ ~TaskQueueWithFakePrecision() override {}
+
+ void Delete() override {
+ // `task_queue_->Delete()` is implicitly called in the destructor due to
+ // TaskQueueDeleter.
+ delete this;
+ }
+ void PostTask(absl::AnyInvocable<void() &&> task) override {
+ task_queue_->PostTask(WrapTask(std::move(task)));
+ }
+ void PostDelayedTask(absl::AnyInvocable<void() &&> task,
+ TimeDelta delay) override {
+ ++parent_factory_->delayed_low_precision_count_;
+ task_queue_->PostDelayedTask(WrapTask(std::move(task)), delay);
+ }
+ void PostDelayedHighPrecisionTask(absl::AnyInvocable<void() &&> task,
+ TimeDelta delay) override {
+ ++parent_factory_->delayed_high_precision_count_;
+ task_queue_->PostDelayedHighPrecisionTask(WrapTask(std::move(task)),
+ delay);
+ }
+
+ private:
+ absl::AnyInvocable<void() &&> WrapTask(absl::AnyInvocable<void() &&> task) {
+ return [this, task = std::move(task)]() mutable {
+ CurrentTaskQueueSetter set_current(this);
+ std::move(task)();
+ };
+ }
+
+ TaskQueueWithFakePrecisionFactory* parent_factory_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_;
+ };
+
+ TaskQueueFactory* task_queue_factory_;
+ std::atomic<int> delayed_low_precision_count_ = 0u;
+ std::atomic<int> delayed_high_precision_count_ = 0u;
+};
+
+} // namespace
+
+namespace test {
+
+std::unique_ptr<RtpPacketToSend> BuildRtpPacket(RtpPacketMediaType type) {
+ auto packet = std::make_unique<RtpPacketToSend>(nullptr);
+ packet->set_packet_type(type);
+ switch (type) {
+ case RtpPacketMediaType::kAudio:
+ packet->SetSsrc(kAudioSsrc);
+ break;
+ case RtpPacketMediaType::kVideo:
+ packet->SetSsrc(kVideoSsrc);
+ break;
+ case RtpPacketMediaType::kRetransmission:
+ case RtpPacketMediaType::kPadding:
+ packet->SetSsrc(kVideoRtxSsrc);
+ break;
+ case RtpPacketMediaType::kForwardErrorCorrection:
+ packet->SetSsrc(kFlexFecSsrc);
+ break;
+ }
+
+ packet->SetPayloadSize(kDefaultPacketSize);
+ return packet;
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePackets(
+ RtpPacketMediaType type,
+ size_t num_packets) {
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ for (size_t i = 0; i < num_packets; ++i) {
+ packets.push_back(BuildRtpPacket(type));
+ }
+ return packets;
+}
+
+TEST(TaskQueuePacedSenderTest, PacesPackets) {
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Insert a number of packets, covering one second.
+ static constexpr size_t kPacketsToSend = 42;
+ pacer.SetPacingRates(
+ DataRate::BitsPerSec(kDefaultPacketSize * 8 * kPacketsToSend),
+ DataRate::Zero());
+ pacer.EnsureStarted();
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketsToSend));
+
+ // Expect all of them to be sent.
+ size_t packets_sent = 0;
+ Timestamp end_time = Timestamp::PlusInfinity();
+ EXPECT_CALL(packet_router, SendPacket)
+ .WillRepeatedly([&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) {
+ ++packets_sent;
+ if (packets_sent == kPacketsToSend) {
+ end_time = time_controller.GetClock()->CurrentTime();
+ }
+ });
+
+ const Timestamp start_time = time_controller.GetClock()->CurrentTime();
+
+ // Packets should be sent over a period of close to 1s. Expect a little
+ // lower than this since initial probing is a bit quicker.
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_EQ(packets_sent, kPacketsToSend);
+ ASSERT_TRUE(end_time.IsFinite());
+ EXPECT_NEAR((end_time - start_time).ms<double>(), 1000.0, 50.0);
+}
+
+TEST(TaskQueuePacedSenderTest, ReschedulesProcessOnRateChange) {
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Insert a number of packets to be sent 200ms apart.
+ const size_t kPacketsPerSecond = 5;
+ const DataRate kPacingRate =
+ DataRate::BitsPerSec(kDefaultPacketSize * 8 * kPacketsPerSecond);
+ pacer.SetPacingRates(kPacingRate, DataRate::Zero());
+ pacer.EnsureStarted();
+
+ // Send some initial packets to be rid of any probes.
+ EXPECT_CALL(packet_router, SendPacket).Times(kPacketsPerSecond);
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketsPerSecond));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+
+ // Insert three packets, and record send time of each of them.
+ // After the second packet is sent, double the send rate so we can
+ // check the third packets is sent after half the wait time.
+ Timestamp first_packet_time = Timestamp::MinusInfinity();
+ Timestamp second_packet_time = Timestamp::MinusInfinity();
+ Timestamp third_packet_time = Timestamp::MinusInfinity();
+
+ EXPECT_CALL(packet_router, SendPacket)
+ .Times(3)
+ .WillRepeatedly([&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) {
+ if (first_packet_time.IsInfinite()) {
+ first_packet_time = time_controller.GetClock()->CurrentTime();
+ } else if (second_packet_time.IsInfinite()) {
+ second_packet_time = time_controller.GetClock()->CurrentTime();
+ pacer.SetPacingRates(2 * kPacingRate, DataRate::Zero());
+ } else {
+ third_packet_time = time_controller.GetClock()->CurrentTime();
+ }
+ });
+
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 3));
+ time_controller.AdvanceTime(TimeDelta::Millis(500));
+ ASSERT_TRUE(third_packet_time.IsFinite());
+ EXPECT_NEAR((second_packet_time - first_packet_time).ms<double>(), 200.0,
+ 1.0);
+ EXPECT_NEAR((third_packet_time - second_packet_time).ms<double>(), 100.0,
+ 1.0);
+}
+
+TEST(TaskQueuePacedSenderTest, SendsAudioImmediately) {
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ const DataRate kPacingDataRate = DataRate::KilobitsPerSec(125);
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = kPacketSize / kPacingDataRate;
+
+ pacer.SetPacingRates(kPacingDataRate, DataRate::Zero());
+ pacer.EnsureStarted();
+
+ // Add some initial video packets, only one should be sent.
+ EXPECT_CALL(packet_router, SendPacket);
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 10));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&packet_router);
+
+ // Advance time, but still before next packet should be sent.
+ time_controller.AdvanceTime(kPacketPacingTime / 2);
+
+ // Insert an audio packet, it should be sent immediately.
+ EXPECT_CALL(packet_router, SendPacket);
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kAudio, 1));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&packet_router);
+}
+
+TEST(TaskQueuePacedSenderTest, SleepsDuringCoalscingWindow) {
+ const TimeDelta kCoalescingWindow = TimeDelta::Millis(5);
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ kCoalescingWindow,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Set rates so one packet adds one ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(1);
+ const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime;
+
+ pacer.SetPacingRates(kPacingDataRate, DataRate::Zero());
+ pacer.EnsureStarted();
+
+ // Add 10 packets. The first should be sent immediately since the buffers
+ // are clear.
+ EXPECT_CALL(packet_router, SendPacket);
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 10));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&packet_router);
+
+ // Advance time to 1ms before the coalescing window ends. No packets should
+ // be sent.
+ EXPECT_CALL(packet_router, SendPacket).Times(0);
+ time_controller.AdvanceTime(kCoalescingWindow - TimeDelta::Millis(1));
+
+ // Advance time to where coalescing window ends. All packets that should
+ // have been sent up til now will be sent.
+ EXPECT_CALL(packet_router, SendPacket).Times(5);
+ time_controller.AdvanceTime(TimeDelta::Millis(1));
+ ::testing::Mock::VerifyAndClearExpectations(&packet_router);
+}
+
+TEST(TaskQueuePacedSenderTest, ProbingOverridesCoalescingWindow) {
+ const TimeDelta kCoalescingWindow = TimeDelta::Millis(5);
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ kCoalescingWindow,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Set rates so one packet adds one ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(1);
+ const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime;
+
+ pacer.SetPacingRates(kPacingDataRate, DataRate::Zero());
+ pacer.EnsureStarted();
+
+ // Add 10 packets. The first should be sent immediately since the buffers
+ // are clear. This will also trigger the probe to start.
+ EXPECT_CALL(packet_router, SendPacket).Times(AtLeast(1));
+ pacer.CreateProbeClusters(
+ {{.at_time = time_controller.GetClock()->CurrentTime(),
+ .target_data_rate = kPacingDataRate * 2,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 17}});
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 10));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&packet_router);
+
+ // Advance time to 1ms before the coalescing window ends. Packets should be
+ // flying.
+ EXPECT_CALL(packet_router, SendPacket).Times(AtLeast(1));
+ time_controller.AdvanceTime(kCoalescingWindow - TimeDelta::Millis(1));
+}
+
+TEST(TaskQueuePacedSenderTest, SchedulesProbeAtSentTime) {
+ ScopedKeyValueConfig trials(
+ "WebRTC-Bwe-ProbingBehavior/min_probe_delta:1ms/");
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Set rates so one packet adds 4ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(4);
+ const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime;
+ pacer.SetPacingRates(kPacingDataRate, /*padding_rate=*/DataRate::Zero());
+ pacer.EnsureStarted();
+ EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>();
+ });
+ EXPECT_CALL(packet_router, GeneratePadding(_))
+ .WillRepeatedly(
+ [](DataSize target_size) { return GeneratePadding(target_size); });
+
+ // Enqueue two packets, only the first is sent immediately and the next
+ // will be scheduled for sending in 4ms.
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 2));
+ const int kNotAProbe = PacedPacketInfo::kNotAProbe;
+ EXPECT_CALL(packet_router,
+ SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id,
+ kNotAProbe)));
+ // Advance to less than 3ms before next packet send time.
+ time_controller.AdvanceTime(TimeDelta::Micros(1001));
+
+ // Trigger a probe at 2x the current pacing rate and insert the number of
+ // packets the probe needs.
+ const DataRate kProbeRate = 2 * kPacingDataRate;
+ const int kProbeClusterId = 1;
+ pacer.CreateProbeClusters(
+ {{.at_time = time_controller.GetClock()->CurrentTime(),
+ .target_data_rate = kProbeRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 4,
+ .id = kProbeClusterId}});
+
+ // Expected size for each probe in a cluster is twice the expected bits sent
+ // during min_probe_delta.
+ // Expect one additional call since probe always starts with a small (1 byte)
+ // padding packet that's not counted into the probe rate here.
+ const TimeDelta kProbeTimeDelta = TimeDelta::Millis(2);
+ const DataSize kProbeSize = kProbeRate * kProbeTimeDelta;
+ const size_t kNumPacketsInProbe =
+ (kProbeSize + kPacketSize - DataSize::Bytes(1)) / kPacketSize;
+ EXPECT_CALL(packet_router,
+ SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id,
+ kProbeClusterId)))
+ .Times(kNumPacketsInProbe + 1);
+
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kNumPacketsInProbe));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+
+ // The pacer should have scheduled the next probe to be sent in
+ // kProbeTimeDelta. That there was existing scheduled call less than
+ // PacingController::kMinSleepTime before this should not matter.
+ EXPECT_CALL(packet_router,
+ SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id,
+ kProbeClusterId)))
+ .Times(AtLeast(1));
+ time_controller.AdvanceTime(TimeDelta::Millis(2));
+}
+
+TEST(TaskQueuePacedSenderTest, NoMinSleepTimeWhenProbing) {
+ // Set min_probe_delta to be less than kMinSleepTime (1ms).
+ const TimeDelta kMinProbeDelta = TimeDelta::Micros(100);
+ ScopedKeyValueConfig trials(
+ "WebRTC-Bwe-ProbingBehavior/min_probe_delta:100us/");
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Set rates so one packet adds 4ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(4);
+ const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime;
+ pacer.SetPacingRates(kPacingDataRate, /*padding_rate=*/DataRate::Zero());
+ pacer.EnsureStarted();
+ EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>();
+ });
+ EXPECT_CALL(packet_router, GeneratePadding)
+ .WillRepeatedly(
+ [](DataSize target_size) { return GeneratePadding(target_size); });
+
+ // Set a high probe rate.
+ const int kProbeClusterId = 1;
+ DataRate kProbingRate = kPacingDataRate * 10;
+
+ pacer.CreateProbeClusters(
+ {{.at_time = time_controller.GetClock()->CurrentTime(),
+ .target_data_rate = kProbingRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = kProbeClusterId}});
+
+ // Advance time less than PacingController::kMinSleepTime, probing packets
+ // for the first millisecond should be sent immediately. Min delta between
+ // probes is 2x 100us, meaning 4 times per ms we will get least one call to
+ // SendPacket().
+ DataSize data_sent = DataSize::Zero();
+ EXPECT_CALL(packet_router,
+ SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id,
+ kProbeClusterId)))
+ .Times(AtLeast(4))
+ .WillRepeatedly([&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo&) {
+ data_sent +=
+ DataSize::Bytes(packet->payload_size() + packet->padding_size());
+ });
+
+ // Add one packet to kickstart probing, the rest will be padding packets.
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1));
+ time_controller.AdvanceTime(kMinProbeDelta);
+
+ // Verify the amount of probing data sent.
+ // Probe always starts with a small (1 byte) padding packet that's not
+ // counted into the probe rate here.
+ const DataSize kMinProbeSize = 2 * kMinProbeDelta * kProbingRate;
+ EXPECT_EQ(data_sent, DataSize::Bytes(1) + kPacketSize + 4 * kMinProbeSize);
+}
+
+TEST(TaskQueuePacedSenderTest, PacketBasedCoalescing) {
+ const TimeDelta kFixedCoalescingWindow = TimeDelta::Millis(10);
+ const int kPacketBasedHoldback = 5;
+
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ kFixedCoalescingWindow, kPacketBasedHoldback);
+
+ // Set rates so one packet adds one ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(1);
+ const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime;
+ const TimeDelta kExpectedHoldbackWindow =
+ kPacketPacingTime * kPacketBasedHoldback;
+ // `kFixedCoalescingWindow` sets the upper bound for the window.
+ ASSERT_GE(kFixedCoalescingWindow, kExpectedHoldbackWindow);
+
+ pacer.SetPacingRates(kPacingDataRate, DataRate::Zero());
+ EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>();
+ });
+ pacer.EnsureStarted();
+
+ // Add some packets and wait till all have been sent, so that the pacer
+ // has a valid estimate of packet size.
+ const int kNumWarmupPackets = 40;
+ EXPECT_CALL(packet_router, SendPacket).Times(kNumWarmupPackets);
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kNumWarmupPackets));
+ // Wait until all packes have been sent, with a 2x margin.
+ time_controller.AdvanceTime(kPacketPacingTime * (kNumWarmupPackets * 2));
+
+ // Enqueue packets. Expect only the first one to be sent immediately.
+ EXPECT_CALL(packet_router, SendPacket).Times(1);
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketBasedHoldback));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+
+ // Advance time to 1ms before the coalescing window ends.
+ EXPECT_CALL(packet_router, SendPacket).Times(0);
+ time_controller.AdvanceTime(kExpectedHoldbackWindow - TimeDelta::Millis(1));
+
+ // Advance past where the coalescing window should end.
+ EXPECT_CALL(packet_router, SendPacket).Times(kPacketBasedHoldback - 1);
+ time_controller.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST(TaskQueuePacedSenderTest, FixedHoldBackHasPriorityOverPackets) {
+ const TimeDelta kFixedCoalescingWindow = TimeDelta::Millis(2);
+ const int kPacketBasedHoldback = 5;
+
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ kFixedCoalescingWindow, kPacketBasedHoldback);
+
+ // Set rates so one packet adds one ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(1);
+ const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime;
+ const TimeDelta kExpectedPacketHoldbackWindow =
+ kPacketPacingTime * kPacketBasedHoldback;
+ // |kFixedCoalescingWindow| sets the upper bound for the window.
+ ASSERT_LT(kFixedCoalescingWindow, kExpectedPacketHoldbackWindow);
+
+ pacer.SetPacingRates(kPacingDataRate, DataRate::Zero());
+ EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>();
+ });
+ pacer.EnsureStarted();
+
+ // Add some packets and wait till all have been sent, so that the pacer
+ // has a valid estimate of packet size.
+ const int kNumWarmupPackets = 40;
+ EXPECT_CALL(packet_router, SendPacket).Times(kNumWarmupPackets);
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kNumWarmupPackets));
+ // Wait until all packes have been sent, with a 2x margin.
+ time_controller.AdvanceTime(kPacketPacingTime * (kNumWarmupPackets * 2));
+
+ // Enqueue packets. Expect onlt the first one to be sent immediately.
+ EXPECT_CALL(packet_router, SendPacket).Times(1);
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketBasedHoldback));
+ time_controller.AdvanceTime(TimeDelta::Zero());
+
+ // Advance time to the fixed coalescing window, that should take presedence so
+ // at least some of the packets should be sent.
+ EXPECT_CALL(packet_router, SendPacket).Times(AtLeast(1));
+ time_controller.AdvanceTime(kFixedCoalescingWindow);
+}
+
+TEST(TaskQueuePacedSenderTest, ProbingStopDuringSendLoop) {
+ // Set a low `min_probe_delta` to let probing finish during send loop.
+ ScopedKeyValueConfig trials(
+ "WebRTC-Bwe-ProbingBehavior/min_probe_delta:100us/");
+
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ MockPacketRouter packet_router;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Set rates so 2 packets adds 1ms of buffer level.
+ const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize);
+ const TimeDelta kPacketPacingTime = TimeDelta::Millis(1);
+ const DataRate kPacingDataRate = 2 * kPacketSize / kPacketPacingTime;
+
+ pacer.SetPacingRates(kPacingDataRate, DataRate::Zero());
+ pacer.EnsureStarted();
+
+ EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>();
+ });
+ EXPECT_CALL(packet_router, GeneratePadding(_))
+ .WillRepeatedly(
+ [](DataSize target_size) { return GeneratePadding(target_size); });
+
+ // Set probe rate.
+ const int kProbeClusterId = 1;
+ const DataRate kProbingRate = kPacingDataRate;
+
+ pacer.CreateProbeClusters(
+ {{.at_time = time_controller.GetClock()->CurrentTime(),
+ .target_data_rate = kProbingRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 4,
+ .id = kProbeClusterId}});
+
+ const int kPacketsToSend = 100;
+ const TimeDelta kPacketsPacedTime =
+ std::max(kPacketsToSend * kPacketSize / kPacingDataRate,
+ kPacketsToSend * kPacketSize / kProbingRate);
+
+ // Expect all packets and one padding packet sent.
+ EXPECT_CALL(packet_router, SendPacket).Times(kPacketsToSend + 1);
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketsToSend));
+ time_controller.AdvanceTime(kPacketsPacedTime + TimeDelta::Millis(1));
+}
+
+TEST(TaskQueuePacedSenderTest, Stats) {
+ static constexpr Timestamp kStartTime = Timestamp::Millis(1234);
+ GlobalSimulatedTimeController time_controller(kStartTime);
+ MockPacketRouter packet_router;
+ ScopedKeyValueConfig trials;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, trials,
+ time_controller.GetTaskQueueFactory(),
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Simulate ~2mbps video stream, covering one second.
+ static constexpr size_t kPacketsToSend = 200;
+ static constexpr DataRate kPacingRate =
+ DataRate::BytesPerSec(kDefaultPacketSize * kPacketsToSend);
+ pacer.SetPacingRates(kPacingRate, DataRate::Zero());
+ pacer.EnsureStarted();
+
+ // Allowed `QueueSizeData` and `ExpectedQueueTime` deviation.
+ static constexpr size_t kAllowedPacketsDeviation = 1;
+ static constexpr DataSize kAllowedQueueSizeDeviation =
+ DataSize::Bytes(kDefaultPacketSize * kAllowedPacketsDeviation);
+ static constexpr TimeDelta kAllowedQueueTimeDeviation =
+ kAllowedQueueSizeDeviation / kPacingRate;
+
+ DataSize expected_queue_size = DataSize::MinusInfinity();
+ TimeDelta expected_queue_time = TimeDelta::MinusInfinity();
+
+ EXPECT_CALL(packet_router, SendPacket).Times(kPacketsToSend);
+
+ // Stats before insert any packets.
+ EXPECT_TRUE(pacer.OldestPacketWaitTime().IsZero());
+ EXPECT_FALSE(pacer.FirstSentPacketTime().has_value());
+ EXPECT_TRUE(pacer.QueueSizeData().IsZero());
+ EXPECT_TRUE(pacer.ExpectedQueueTime().IsZero());
+
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketsToSend));
+
+ // Advance to 200ms.
+ time_controller.AdvanceTime(TimeDelta::Millis(200));
+ EXPECT_EQ(pacer.OldestPacketWaitTime(), TimeDelta::Millis(200));
+ EXPECT_EQ(pacer.FirstSentPacketTime(), kStartTime);
+
+ expected_queue_size = kPacingRate * TimeDelta::Millis(800);
+ expected_queue_time = expected_queue_size / kPacingRate;
+ EXPECT_NEAR(pacer.QueueSizeData().bytes(), expected_queue_size.bytes(),
+ kAllowedQueueSizeDeviation.bytes());
+ EXPECT_NEAR(pacer.ExpectedQueueTime().ms(), expected_queue_time.ms(),
+ kAllowedQueueTimeDeviation.ms());
+
+ // Advance to 500ms.
+ time_controller.AdvanceTime(TimeDelta::Millis(300));
+ EXPECT_EQ(pacer.OldestPacketWaitTime(), TimeDelta::Millis(500));
+ EXPECT_EQ(pacer.FirstSentPacketTime(), kStartTime);
+
+ expected_queue_size = kPacingRate * TimeDelta::Millis(500);
+ expected_queue_time = expected_queue_size / kPacingRate;
+ EXPECT_NEAR(pacer.QueueSizeData().bytes(), expected_queue_size.bytes(),
+ kAllowedQueueSizeDeviation.bytes());
+ EXPECT_NEAR(pacer.ExpectedQueueTime().ms(), expected_queue_time.ms(),
+ kAllowedQueueTimeDeviation.ms());
+
+ // Advance to 1000ms+, expect all packets to be sent.
+ time_controller.AdvanceTime(TimeDelta::Millis(500) +
+ kAllowedQueueTimeDeviation);
+ EXPECT_TRUE(pacer.OldestPacketWaitTime().IsZero());
+ EXPECT_EQ(pacer.FirstSentPacketTime(), kStartTime);
+ EXPECT_TRUE(pacer.QueueSizeData().IsZero());
+ EXPECT_TRUE(pacer.ExpectedQueueTime().IsZero());
+}
+
+TEST(TaskQueuePacedSenderTest, HighPrecisionPacingWhenSlackIsDisabled) {
+ test::ScopedKeyValueConfig experiments(
+ "WebRTC-SlackedTaskQueuePacedSender/Disabled/");
+
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ TaskQueueWithFakePrecisionFactory task_queue_factory(
+ time_controller.GetTaskQueueFactory());
+
+ MockPacketRouter packet_router;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router,
+ experiments, &task_queue_factory,
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Send enough packets (covering one second) that pacing is triggered, i.e.
+ // delayed tasks being scheduled.
+ static constexpr size_t kPacketsToSend = 42;
+ static constexpr DataRate kPacingRate =
+ DataRate::BitsPerSec(kDefaultPacketSize * 8 * kPacketsToSend);
+ pacer.SetPacingRates(kPacingRate, DataRate::Zero());
+ pacer.EnsureStarted();
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketsToSend));
+ // Expect all of them to be sent.
+ size_t packets_sent = 0;
+ EXPECT_CALL(packet_router, SendPacket)
+ .WillRepeatedly(
+ [&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) { ++packets_sent; });
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_EQ(packets_sent, kPacketsToSend);
+
+ // Expect pacing to make use of high precision.
+ EXPECT_EQ(task_queue_factory.delayed_low_precision_count(), 0);
+ EXPECT_GT(task_queue_factory.delayed_high_precision_count(), 0);
+
+ // Create probe cluster which is also high precision.
+ pacer.CreateProbeClusters(
+ {{.at_time = time_controller.GetClock()->CurrentTime(),
+ .target_data_rate = kPacingRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 4,
+ .id = 123}});
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_EQ(task_queue_factory.delayed_low_precision_count(), 0);
+ EXPECT_GT(task_queue_factory.delayed_high_precision_count(), 0);
+}
+
+TEST(TaskQueuePacedSenderTest, LowPrecisionPacingWhenSlackIsEnabled) {
+ test::ScopedKeyValueConfig experiments(
+ "WebRTC-SlackedTaskQueuePacedSender/Enabled/");
+
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234));
+ TaskQueueWithFakePrecisionFactory task_queue_factory(
+ time_controller.GetTaskQueueFactory());
+
+ MockPacketRouter packet_router;
+ TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router,
+ experiments, &task_queue_factory,
+ PacingController::kMinSleepTime,
+ TaskQueuePacedSender::kNoPacketHoldback);
+
+ // Send enough packets (covering one second) that pacing is triggered, i.e.
+ // delayed tasks being scheduled.
+ static constexpr size_t kPacketsToSend = 42;
+ static constexpr DataRate kPacingRate =
+ DataRate::BitsPerSec(kDefaultPacketSize * 8 * kPacketsToSend);
+ pacer.SetPacingRates(kPacingRate, DataRate::Zero());
+ pacer.EnsureStarted();
+ pacer.EnqueuePackets(
+ GeneratePackets(RtpPacketMediaType::kVideo, kPacketsToSend));
+ // Expect all of them to be sent.
+ size_t packets_sent = 0;
+ EXPECT_CALL(packet_router, SendPacket)
+ .WillRepeatedly(
+ [&](std::unique_ptr<RtpPacketToSend> packet,
+ const PacedPacketInfo& cluster_info) { ++packets_sent; });
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_EQ(packets_sent, kPacketsToSend);
+
+ // Expect pacing to make use of low precision.
+ EXPECT_GT(task_queue_factory.delayed_low_precision_count(), 0);
+ EXPECT_EQ(task_queue_factory.delayed_high_precision_count(), 0);
+
+ // Create probe cluster, which uses high precision despite regular pacing
+ // being low precision.
+ pacer.CreateProbeClusters(
+ {{.at_time = time_controller.GetClock()->CurrentTime(),
+ .target_data_rate = kPacingRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 4,
+ .id = 123}});
+ pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_GT(task_queue_factory.delayed_high_precision_count(), 0);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/BUILD.gn b/third_party/libwebrtc/modules/remote_bitrate_estimator/BUILD.gn
new file mode 100644
index 0000000000..9e2352fd61
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/BUILD.gn
@@ -0,0 +1,144 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("remote_bitrate_estimator") {
+ sources = [
+ "aimd_rate_control.cc",
+ "aimd_rate_control.h",
+ "bwe_defines.cc",
+ "include/bwe_defines.h",
+ "include/remote_bitrate_estimator.h",
+ "inter_arrival.cc",
+ "inter_arrival.h",
+ "overuse_detector.cc",
+ "overuse_detector.h",
+ "overuse_estimator.cc",
+ "overuse_estimator.h",
+ "packet_arrival_map.cc",
+ "packet_arrival_map.h",
+ "remote_bitrate_estimator_abs_send_time.cc",
+ "remote_bitrate_estimator_abs_send_time.h",
+ "remote_bitrate_estimator_single_stream.cc",
+ "remote_bitrate_estimator_single_stream.h",
+ "remote_estimator_proxy.cc",
+ "remote_estimator_proxy.h",
+ "test/bwe_test_logging.h",
+ ]
+
+ if (rtc_enable_bwe_test_logging) {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1" ]
+
+ sources += [ "test/bwe_test_logging.cc" ]
+ } else {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0" ]
+ }
+
+ deps = [
+ "../../api:field_trials_view",
+ "../../api:network_state_predictor_api",
+ "../../api:rtp_headers",
+ "../../api/transport:field_trial_based_config",
+ "../../api/transport:network_control",
+ "../../api/units:data_rate",
+ "../../api/units:data_size",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../modules:module_api",
+ "../../modules:module_api_public",
+ "../../modules/congestion_controller/goog_cc:link_capacity_estimator",
+ "../../modules/rtp_rtcp:rtp_rtcp_format",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:race_checker",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:stringutils",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (!build_with_chromium) {
+ rtc_library("bwe_rtp") {
+ testonly = true
+ sources = [
+ "tools/bwe_rtp.cc",
+ "tools/bwe_rtp.h",
+ ]
+ deps = [
+ "../../test:rtp_test_utils",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+
+ rtc_executable("rtp_to_text") {
+ testonly = true
+ sources = [ "tools/rtp_to_text.cc" ]
+ deps = [
+ ":bwe_rtp",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:stringutils",
+ "../../test:rtp_test_utils",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ }
+}
+
+if (rtc_include_tests) {
+ rtc_library("remote_bitrate_estimator_unittests") {
+ testonly = true
+
+ sources = [
+ "aimd_rate_control_unittest.cc",
+ "inter_arrival_unittest.cc",
+ "overuse_detector_unittest.cc",
+ "packet_arrival_map_test.cc",
+ "remote_bitrate_estimator_abs_send_time_unittest.cc",
+ "remote_bitrate_estimator_single_stream_unittest.cc",
+ "remote_bitrate_estimator_unittest_helper.cc",
+ "remote_bitrate_estimator_unittest_helper.h",
+ "remote_estimator_proxy_unittest.cc",
+ ]
+ deps = [
+ ":remote_bitrate_estimator",
+ "..:module_api_public",
+ "../../api/transport:field_trial_based_config",
+ "../../api/transport:mock_network_control",
+ "../../api/transport:network_control",
+ "../../api/units:data_rate",
+ "../../api/units:data_size",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:random",
+ "../../system_wrappers",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "../pacing",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/DEPS b/third_party/libwebrtc/modules/remote_bitrate_estimator/DEPS
new file mode 100644
index 0000000000..35a62119e5
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+logging/rtc_event_log",
+ "+system_wrappers",
+ # Avoid directly using field_trial. Instead use FieldTrialsView.
+ "-system_wrappers/include/field_trial.h",
+]
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/OWNERS b/third_party/libwebrtc/modules/remote_bitrate_estimator/OWNERS
new file mode 100644
index 0000000000..97e5d51dac
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/OWNERS
@@ -0,0 +1,6 @@
+stefan@webrtc.org
+terelius@webrtc.org
+asapersson@webrtc.org
+mflodman@webrtc.org
+philipel@webrtc.org
+perkj@webrtc.org
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
new file mode 100644
index 0000000000..b625a745df
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstdio>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "modules/remote_bitrate_estimator/overuse_detector.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+constexpr TimeDelta kDefaultRtt = TimeDelta::Millis(200);
+constexpr double kDefaultBackoffFactor = 0.85;
+
+constexpr char kBweBackOffFactorExperiment[] = "WebRTC-BweBackOffFactor";
+
+bool IsEnabled(const FieldTrialsView& field_trials, absl::string_view key) {
+ return absl::StartsWith(field_trials.Lookup(key), "Enabled");
+}
+
+bool IsNotDisabled(const FieldTrialsView& field_trials, absl::string_view key) {
+ return !absl::StartsWith(field_trials.Lookup(key), "Disabled");
+}
+
+double ReadBackoffFactor(const FieldTrialsView& key_value_config) {
+ std::string experiment_string =
+ key_value_config.Lookup(kBweBackOffFactorExperiment);
+ double backoff_factor;
+ int parsed_values =
+ sscanf(experiment_string.c_str(), "Enabled-%lf", &backoff_factor);
+ if (parsed_values == 1) {
+ if (backoff_factor >= 1.0) {
+ RTC_LOG(LS_WARNING) << "Back-off factor must be less than 1.";
+ } else if (backoff_factor <= 0.0) {
+ RTC_LOG(LS_WARNING) << "Back-off factor must be greater than 0.";
+ } else {
+ return backoff_factor;
+ }
+ }
+ RTC_LOG(LS_WARNING) << "Failed to parse parameters for AimdRateControl "
+ "experiment from field trial string. Using default.";
+ return kDefaultBackoffFactor;
+}
+
+} // namespace
+
+AimdRateControl::AimdRateControl(const FieldTrialsView* key_value_config)
+ : AimdRateControl(key_value_config, /* send_side =*/false) {}
+
+AimdRateControl::AimdRateControl(const FieldTrialsView* key_value_config,
+ bool send_side)
+ : min_configured_bitrate_(kCongestionControllerMinBitrate),
+ max_configured_bitrate_(DataRate::KilobitsPerSec(30000)),
+ current_bitrate_(max_configured_bitrate_),
+ latest_estimated_throughput_(current_bitrate_),
+ link_capacity_(),
+ rate_control_state_(RateControlState::kRcHold),
+ time_last_bitrate_change_(Timestamp::MinusInfinity()),
+ time_last_bitrate_decrease_(Timestamp::MinusInfinity()),
+ time_first_throughput_estimate_(Timestamp::MinusInfinity()),
+ bitrate_is_initialized_(false),
+ beta_(IsEnabled(*key_value_config, kBweBackOffFactorExperiment)
+ ? ReadBackoffFactor(*key_value_config)
+ : kDefaultBackoffFactor),
+ in_alr_(false),
+ rtt_(kDefaultRtt),
+ send_side_(send_side),
+ in_experiment_(!AdaptiveThresholdExperimentIsDisabled(*key_value_config)),
+ no_bitrate_increase_in_alr_(
+ IsEnabled(*key_value_config,
+ "WebRTC-DontIncreaseDelayBasedBweInAlr")),
+ estimate_bounded_backoff_(
+ IsNotDisabled(*key_value_config,
+ "WebRTC-Bwe-EstimateBoundedBackoff")),
+ initial_backoff_interval_("initial_backoff_interval"),
+ link_capacity_fix_("link_capacity_fix") {
+ ParseFieldTrial(
+ {&disable_estimate_bounded_increase_, &estimate_bounded_increase_ratio_,
+ &ignore_throughput_limit_if_network_estimate_,
+ &ignore_network_estimate_decrease_, &increase_to_network_estimate_},
+ key_value_config->Lookup("WebRTC-Bwe-EstimateBoundedIncrease"));
+ // E.g
+ // WebRTC-BweAimdRateControlConfig/initial_backoff_interval:100ms/
+ ParseFieldTrial({&initial_backoff_interval_, &link_capacity_fix_},
+ key_value_config->Lookup("WebRTC-BweAimdRateControlConfig"));
+ if (initial_backoff_interval_) {
+ RTC_LOG(LS_INFO) << "Using aimd rate control with initial back-off interval"
+ " "
+ << ToString(*initial_backoff_interval_) << ".";
+ }
+ RTC_LOG(LS_INFO) << "Using aimd rate control with back off factor " << beta_;
+}
+
+AimdRateControl::~AimdRateControl() {}
+
+void AimdRateControl::SetStartBitrate(DataRate start_bitrate) {
+ current_bitrate_ = start_bitrate;
+ latest_estimated_throughput_ = current_bitrate_;
+ bitrate_is_initialized_ = true;
+}
+
+void AimdRateControl::SetMinBitrate(DataRate min_bitrate) {
+ min_configured_bitrate_ = min_bitrate;
+ current_bitrate_ = std::max(min_bitrate, current_bitrate_);
+}
+
+bool AimdRateControl::ValidEstimate() const {
+ return bitrate_is_initialized_;
+}
+
+TimeDelta AimdRateControl::GetFeedbackInterval() const {
+ // Estimate how often we can send RTCP if we allocate up to 5% of bandwidth
+ // to feedback.
+ const DataSize kRtcpSize = DataSize::Bytes(80);
+ const DataRate rtcp_bitrate = current_bitrate_ * 0.05;
+ const TimeDelta interval = kRtcpSize / rtcp_bitrate;
+ const TimeDelta kMinFeedbackInterval = TimeDelta::Millis(200);
+ const TimeDelta kMaxFeedbackInterval = TimeDelta::Millis(1000);
+ return interval.Clamped(kMinFeedbackInterval, kMaxFeedbackInterval);
+}
+
+bool AimdRateControl::TimeToReduceFurther(Timestamp at_time,
+ DataRate estimated_throughput) const {
+ const TimeDelta bitrate_reduction_interval =
+ rtt_.Clamped(TimeDelta::Millis(10), TimeDelta::Millis(200));
+ if (at_time - time_last_bitrate_change_ >= bitrate_reduction_interval) {
+ return true;
+ }
+ if (ValidEstimate()) {
+ // TODO(terelius/holmer): Investigate consequences of increasing
+ // the threshold to 0.95 * LatestEstimate().
+ const DataRate threshold = 0.5 * LatestEstimate();
+ return estimated_throughput < threshold;
+ }
+ return false;
+}
+
+bool AimdRateControl::InitialTimeToReduceFurther(Timestamp at_time) const {
+ if (!initial_backoff_interval_) {
+ return ValidEstimate() &&
+ TimeToReduceFurther(at_time,
+ LatestEstimate() / 2 - DataRate::BitsPerSec(1));
+ }
+ // TODO(terelius): We could use the RTT (clamped to suitable limits) instead
+ // of a fixed bitrate_reduction_interval.
+ if (time_last_bitrate_decrease_.IsInfinite() ||
+ at_time - time_last_bitrate_decrease_ >= *initial_backoff_interval_) {
+ return true;
+ }
+ return false;
+}
+
+DataRate AimdRateControl::LatestEstimate() const {
+ return current_bitrate_;
+}
+
+void AimdRateControl::SetRtt(TimeDelta rtt) {
+ rtt_ = rtt;
+}
+
+DataRate AimdRateControl::Update(const RateControlInput* input,
+ Timestamp at_time) {
+ RTC_CHECK(input);
+
+ // Set the initial bit rate value to what we're receiving the first half
+ // second.
+ // TODO(bugs.webrtc.org/9379): The comment above doesn't match to the code.
+ if (!bitrate_is_initialized_) {
+ const TimeDelta kInitializationTime = TimeDelta::Seconds(5);
+ RTC_DCHECK_LE(kBitrateWindowMs, kInitializationTime.ms());
+ if (time_first_throughput_estimate_.IsInfinite()) {
+ if (input->estimated_throughput)
+ time_first_throughput_estimate_ = at_time;
+ } else if (at_time - time_first_throughput_estimate_ >
+ kInitializationTime &&
+ input->estimated_throughput) {
+ current_bitrate_ = *input->estimated_throughput;
+ bitrate_is_initialized_ = true;
+ }
+ }
+
+ ChangeBitrate(*input, at_time);
+ return current_bitrate_;
+}
+
+void AimdRateControl::SetInApplicationLimitedRegion(bool in_alr) {
+ in_alr_ = in_alr;
+}
+
+void AimdRateControl::SetEstimate(DataRate bitrate, Timestamp at_time) {
+ bitrate_is_initialized_ = true;
+ DataRate prev_bitrate = current_bitrate_;
+ current_bitrate_ = ClampBitrate(bitrate);
+ time_last_bitrate_change_ = at_time;
+ if (current_bitrate_ < prev_bitrate) {
+ time_last_bitrate_decrease_ = at_time;
+ }
+}
+
+void AimdRateControl::SetNetworkStateEstimate(
+ const absl::optional<NetworkStateEstimate>& estimate) {
+ network_estimate_ = estimate;
+}
+
+double AimdRateControl::GetNearMaxIncreaseRateBpsPerSecond() const {
+ RTC_DCHECK(!current_bitrate_.IsZero());
+ const TimeDelta kFrameInterval = TimeDelta::Seconds(1) / 30;
+ DataSize frame_size = current_bitrate_ * kFrameInterval;
+ const DataSize kPacketSize = DataSize::Bytes(1200);
+ double packets_per_frame = std::ceil(frame_size / kPacketSize);
+ DataSize avg_packet_size = frame_size / packets_per_frame;
+
+ // Approximate the over-use estimator delay to 100 ms.
+ TimeDelta response_time = rtt_ + TimeDelta::Millis(100);
+ if (in_experiment_)
+ response_time = response_time * 2;
+ double increase_rate_bps_per_second =
+ (avg_packet_size / response_time).bps<double>();
+ double kMinIncreaseRateBpsPerSecond = 4000;
+ return std::max(kMinIncreaseRateBpsPerSecond, increase_rate_bps_per_second);
+}
+
+TimeDelta AimdRateControl::GetExpectedBandwidthPeriod() const {
+ const TimeDelta kMinPeriod = TimeDelta::Seconds(2);
+ const TimeDelta kDefaultPeriod = TimeDelta::Seconds(3);
+ const TimeDelta kMaxPeriod = TimeDelta::Seconds(50);
+
+ double increase_rate_bps_per_second = GetNearMaxIncreaseRateBpsPerSecond();
+ if (!last_decrease_)
+ return kDefaultPeriod;
+ double time_to_recover_decrease_seconds =
+ last_decrease_->bps() / increase_rate_bps_per_second;
+ TimeDelta period = TimeDelta::Seconds(time_to_recover_decrease_seconds);
+ return period.Clamped(kMinPeriod, kMaxPeriod);
+}
+
+void AimdRateControl::ChangeBitrate(const RateControlInput& input,
+ Timestamp at_time) {
+ absl::optional<DataRate> new_bitrate;
+ DataRate estimated_throughput =
+ input.estimated_throughput.value_or(latest_estimated_throughput_);
+ if (input.estimated_throughput)
+ latest_estimated_throughput_ = *input.estimated_throughput;
+
+ // An over-use should always trigger us to reduce the bitrate, even though
+ // we have not yet established our first estimate. By acting on the over-use,
+ // we will end up with a valid estimate.
+ if (!bitrate_is_initialized_ &&
+ input.bw_state != BandwidthUsage::kBwOverusing)
+ return;
+
+ ChangeState(input, at_time);
+
+ switch (rate_control_state_) {
+ case RateControlState::kRcHold:
+ break;
+
+ case RateControlState::kRcIncrease: {
+ if (estimated_throughput > link_capacity_.UpperBound())
+ link_capacity_.Reset();
+
+ // We limit the new bitrate based on the troughput to avoid unlimited
+ // bitrate increases. We allow a bit more lag at very low rates to not too
+ // easily get stuck if the encoder produces uneven outputs.
+ DataRate increase_limit =
+ 1.5 * estimated_throughput + DataRate::KilobitsPerSec(10);
+ if (ignore_throughput_limit_if_network_estimate_ && network_estimate_ &&
+ network_estimate_->link_capacity_upper.IsFinite()) {
+ // If we have a Network estimate, we do allow the estimate to increase.
+ increase_limit = network_estimate_->link_capacity_upper *
+ estimate_bounded_increase_ratio_.Get();
+ } else if (send_side_ && in_alr_ && no_bitrate_increase_in_alr_) {
+ // Do not increase the delay based estimate in alr since the estimator
+ // will not be able to get transport feedback necessary to detect if
+ // the new estimate is correct.
+ // If we have previously increased above the limit (for instance due to
+ // probing), we don't allow further changes.
+ increase_limit = current_bitrate_;
+ }
+
+ if (current_bitrate_ < increase_limit) {
+ DataRate increased_bitrate = DataRate::MinusInfinity();
+ if (increase_to_network_estimate_ && network_estimate_ &&
+ network_estimate_->link_capacity_upper.IsFinite()) {
+ increased_bitrate = increase_limit;
+ } else if (link_capacity_.has_estimate()) {
+ // The link_capacity estimate is reset if the measured throughput
+ // is too far from the estimate. We can therefore assume that our
+ // target rate is reasonably close to link capacity and use additive
+ // increase.
+ DataRate additive_increase =
+ AdditiveRateIncrease(at_time, time_last_bitrate_change_);
+ increased_bitrate = current_bitrate_ + additive_increase;
+ } else {
+ // If we don't have an estimate of the link capacity, use faster ramp
+ // up to discover the capacity.
+ DataRate multiplicative_increase = MultiplicativeRateIncrease(
+ at_time, time_last_bitrate_change_, current_bitrate_);
+ increased_bitrate = current_bitrate_ + multiplicative_increase;
+ }
+ new_bitrate = std::min(increased_bitrate, increase_limit);
+ }
+ time_last_bitrate_change_ = at_time;
+ break;
+ }
+
+ case RateControlState::kRcDecrease: {
+ DataRate decreased_bitrate = DataRate::PlusInfinity();
+
+ // Set bit rate to something slightly lower than the measured throughput
+ // to get rid of any self-induced delay.
+ decreased_bitrate = estimated_throughput * beta_;
+ if (decreased_bitrate > current_bitrate_ && !link_capacity_fix_) {
+ // TODO(terelius): The link_capacity estimate may be based on old
+ // throughput measurements. Relying on them may lead to unnecessary
+ // BWE drops.
+ if (link_capacity_.has_estimate()) {
+ decreased_bitrate = beta_ * link_capacity_.estimate();
+ }
+ }
+ // Avoid increasing the rate when over-using.
+ if (decreased_bitrate < current_bitrate_) {
+ new_bitrate = decreased_bitrate;
+ }
+
+ if (bitrate_is_initialized_ && estimated_throughput < current_bitrate_) {
+ if (!new_bitrate.has_value()) {
+ last_decrease_ = DataRate::Zero();
+ } else {
+ last_decrease_ = current_bitrate_ - *new_bitrate;
+ }
+ }
+ if (estimated_throughput < link_capacity_.LowerBound()) {
+ // The current throughput is far from the estimated link capacity. Clear
+ // the estimate to allow an immediate update in OnOveruseDetected.
+ link_capacity_.Reset();
+ }
+
+ bitrate_is_initialized_ = true;
+ link_capacity_.OnOveruseDetected(estimated_throughput);
+ // Stay on hold until the pipes are cleared.
+ rate_control_state_ = RateControlState::kRcHold;
+ time_last_bitrate_change_ = at_time;
+ time_last_bitrate_decrease_ = at_time;
+ break;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ current_bitrate_ = ClampBitrate(new_bitrate.value_or(current_bitrate_));
+}
+
+DataRate AimdRateControl::ClampBitrate(DataRate new_bitrate) const {
+ if (!disable_estimate_bounded_increase_ && network_estimate_ &&
+ network_estimate_->link_capacity_upper.IsFinite()) {
+ DataRate upper_bound = network_estimate_->link_capacity_upper *
+ estimate_bounded_increase_ratio_.Get();
+ if (ignore_network_estimate_decrease_) {
+ upper_bound = std::max(upper_bound, current_bitrate_);
+ }
+ new_bitrate = std::min(upper_bound, new_bitrate);
+ }
+ if (estimate_bounded_backoff_ && network_estimate_ &&
+ network_estimate_->link_capacity_lower.IsFinite() &&
+ new_bitrate < current_bitrate_) {
+ new_bitrate = std::min(
+ current_bitrate_,
+ std::max(new_bitrate, network_estimate_->link_capacity_lower * beta_));
+ }
+ new_bitrate = std::max(new_bitrate, min_configured_bitrate_);
+ return new_bitrate;
+}
+
+DataRate AimdRateControl::MultiplicativeRateIncrease(
+ Timestamp at_time,
+ Timestamp last_time,
+ DataRate current_bitrate) const {
+ double alpha = 1.08;
+ if (last_time.IsFinite()) {
+ auto time_since_last_update = at_time - last_time;
+ alpha = pow(alpha, std::min(time_since_last_update.seconds<double>(), 1.0));
+ }
+ DataRate multiplicative_increase =
+ std::max(current_bitrate * (alpha - 1.0), DataRate::BitsPerSec(1000));
+ return multiplicative_increase;
+}
+
+DataRate AimdRateControl::AdditiveRateIncrease(Timestamp at_time,
+ Timestamp last_time) const {
+ double time_period_seconds = (at_time - last_time).seconds<double>();
+ double data_rate_increase_bps =
+ GetNearMaxIncreaseRateBpsPerSecond() * time_period_seconds;
+ return DataRate::BitsPerSec(data_rate_increase_bps);
+}
+
+void AimdRateControl::ChangeState(const RateControlInput& input,
+ Timestamp at_time) {
+ switch (input.bw_state) {
+ case BandwidthUsage::kBwNormal:
+ if (rate_control_state_ == RateControlState::kRcHold) {
+ time_last_bitrate_change_ = at_time;
+ rate_control_state_ = RateControlState::kRcIncrease;
+ }
+ break;
+ case BandwidthUsage::kBwOverusing:
+ if (rate_control_state_ != RateControlState::kRcDecrease) {
+ rate_control_state_ = RateControlState::kRcDecrease;
+ }
+ break;
+ case BandwidthUsage::kBwUnderusing:
+ rate_control_state_ = RateControlState::kRcHold;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
new file mode 100644
index 0000000000..6c770cdc45
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_AIMD_RATE_CONTROL_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_AIMD_RATE_CONTROL_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/timestamp.h"
+#include "modules/congestion_controller/goog_cc/link_capacity_estimator.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+// A rate control implementation based on additive increases of
+// bitrate when no over-use is detected and multiplicative decreases when
+// over-uses are detected. When we think the available bandwidth has changes or
+// is unknown, we will switch to a "slow-start mode" where we increase
+// multiplicatively.
+class AimdRateControl {
+ public:
+ explicit AimdRateControl(const FieldTrialsView* key_value_config);
+ AimdRateControl(const FieldTrialsView* key_value_config, bool send_side);
+ ~AimdRateControl();
+
+ // Returns true if the target bitrate has been initialized. This happens
+ // either if it has been explicitly set via SetStartBitrate/SetEstimate, or if
+ // we have measured a throughput.
+ bool ValidEstimate() const;
+ void SetStartBitrate(DataRate start_bitrate);
+ void SetMinBitrate(DataRate min_bitrate);
+ TimeDelta GetFeedbackInterval() const;
+
+ // Returns true if the bitrate estimate hasn't been changed for more than
+ // an RTT, or if the estimated_throughput is less than half of the current
+ // estimate. Should be used to decide if we should reduce the rate further
+ // when over-using.
+ bool TimeToReduceFurther(Timestamp at_time,
+ DataRate estimated_throughput) const;
+ // As above. To be used if overusing before we have measured a throughput.
+ bool InitialTimeToReduceFurther(Timestamp at_time) const;
+
+ DataRate LatestEstimate() const;
+ void SetRtt(TimeDelta rtt);
+ DataRate Update(const RateControlInput* input, Timestamp at_time);
+ void SetInApplicationLimitedRegion(bool in_alr);
+ void SetEstimate(DataRate bitrate, Timestamp at_time);
+ void SetNetworkStateEstimate(
+ const absl::optional<NetworkStateEstimate>& estimate);
+
+ // Returns the increase rate when used bandwidth is near the link capacity.
+ double GetNearMaxIncreaseRateBpsPerSecond() const;
+ // Returns the expected time between overuse signals (assuming steady state).
+ TimeDelta GetExpectedBandwidthPeriod() const;
+
+ private:
+ enum class RateControlState { kRcHold, kRcIncrease, kRcDecrease };
+
+ friend class GoogCcStatePrinter;
+ // Update the target bitrate based on, among other things, the current rate
+ // control state, the current target bitrate and the estimated throughput.
+ // When in the "increase" state the bitrate will be increased either
+ // additively or multiplicatively depending on the rate control region. When
+ // in the "decrease" state the bitrate will be decreased to slightly below the
+ // current throughput. When in the "hold" state the bitrate will be kept
+ // constant to allow built up queues to drain.
+ void ChangeBitrate(const RateControlInput& input, Timestamp at_time);
+
+ DataRate ClampBitrate(DataRate new_bitrate) const;
+ DataRate MultiplicativeRateIncrease(Timestamp at_time,
+ Timestamp last_ms,
+ DataRate current_bitrate) const;
+ DataRate AdditiveRateIncrease(Timestamp at_time, Timestamp last_time) const;
+ void UpdateChangePeriod(Timestamp at_time);
+ void ChangeState(const RateControlInput& input, Timestamp at_time);
+
+ DataRate min_configured_bitrate_;
+ DataRate max_configured_bitrate_;
+ DataRate current_bitrate_;
+ DataRate latest_estimated_throughput_;
+ LinkCapacityEstimator link_capacity_;
+ absl::optional<NetworkStateEstimate> network_estimate_;
+ RateControlState rate_control_state_;
+ Timestamp time_last_bitrate_change_;
+ Timestamp time_last_bitrate_decrease_;
+ Timestamp time_first_throughput_estimate_;
+ bool bitrate_is_initialized_;
+ double beta_;
+ bool in_alr_;
+ TimeDelta rtt_;
+ const bool send_side_;
+ const bool in_experiment_;
+ // Allow the delay based estimate to only increase as long as application
+ // limited region (alr) is not detected.
+ const bool no_bitrate_increase_in_alr_;
+ // Use estimated link capacity lower bound if it is higher than the
+ // acknowledged rate when backing off due to overuse.
+ const bool estimate_bounded_backoff_;
+ // If false, uses estimated link capacity upper bound *
+ // `estimate_bounded_increase_ratio_` as upper limit for the estimate.
+ FieldTrialFlag disable_estimate_bounded_increase_{"Disabled"};
+ FieldTrialParameter<double> estimate_bounded_increase_ratio_{"ratio", 1.0};
+ FieldTrialParameter<bool> ignore_throughput_limit_if_network_estimate_{
+ "ignore_acked", false};
+ FieldTrialParameter<bool> increase_to_network_estimate_{"immediate_incr",
+ false};
+ FieldTrialParameter<bool> ignore_network_estimate_decrease_{"ignore_decr",
+ false};
+ absl::optional<DataRate> last_decrease_;
+ FieldTrialOptional<TimeDelta> initial_backoff_interval_;
+ FieldTrialFlag link_capacity_fix_;
+};
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_AIMD_RATE_CONTROL_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control_unittest.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control_unittest.cc
new file mode 100644
index 0000000000..aa80dae55b
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control_unittest.cc
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+
+#include <memory>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/data_rate.h"
+#include "system_wrappers/include/clock.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int64_t kClockInitialTime = 123456;
+
+constexpr int kMinBwePeriodMs = 2000;
+constexpr int kDefaultPeriodMs = 3000;
+constexpr int kMaxBwePeriodMs = 50000;
+
+// After an overuse, we back off to 85% to the received bitrate.
+constexpr double kFractionAfterOveruse = 0.85;
+
+struct AimdRateControlStates {
+ std::unique_ptr<AimdRateControl> aimd_rate_control;
+ std::unique_ptr<SimulatedClock> simulated_clock;
+ FieldTrialBasedConfig field_trials;
+};
+
+AimdRateControlStates CreateAimdRateControlStates(bool send_side = false) {
+ AimdRateControlStates states;
+ states.aimd_rate_control.reset(
+ new AimdRateControl(&states.field_trials, send_side));
+ states.simulated_clock.reset(new SimulatedClock(kClockInitialTime));
+ return states;
+}
+absl::optional<DataRate> OptionalRateFromOptionalBps(
+ absl::optional<int> bitrate_bps) {
+ if (bitrate_bps) {
+ return DataRate::BitsPerSec(*bitrate_bps);
+ } else {
+ return absl::nullopt;
+ }
+}
+void UpdateRateControl(const AimdRateControlStates& states,
+ const BandwidthUsage& bandwidth_usage,
+ absl::optional<uint32_t> throughput_estimate,
+ int64_t now_ms) {
+ RateControlInput input(bandwidth_usage,
+ OptionalRateFromOptionalBps(throughput_estimate));
+ states.aimd_rate_control->Update(&input, Timestamp::Millis(now_ms));
+}
+void SetEstimate(const AimdRateControlStates& states, int bitrate_bps) {
+ states.aimd_rate_control->SetEstimate(DataRate::BitsPerSec(bitrate_bps),
+ states.simulated_clock->CurrentTime());
+}
+
+} // namespace
+
+TEST(AimdRateControlTest, MinNearMaxIncreaseRateOnLowBandwith) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kBitrate = 30000;
+ SetEstimate(states, kBitrate);
+ EXPECT_EQ(4000,
+ states.aimd_rate_control->GetNearMaxIncreaseRateBpsPerSecond());
+}
+
+TEST(AimdRateControlTest, NearMaxIncreaseRateIs5kbpsOn90kbpsAnd200msRtt) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kBitrate = 90000;
+ SetEstimate(states, kBitrate);
+ EXPECT_EQ(5000,
+ states.aimd_rate_control->GetNearMaxIncreaseRateBpsPerSecond());
+}
+
+TEST(AimdRateControlTest, NearMaxIncreaseRateIs5kbpsOn60kbpsAnd100msRtt) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kBitrate = 60000;
+ SetEstimate(states, kBitrate);
+ states.aimd_rate_control->SetRtt(TimeDelta::Millis(100));
+ EXPECT_EQ(5000,
+ states.aimd_rate_control->GetNearMaxIncreaseRateBpsPerSecond());
+}
+
+TEST(AimdRateControlTest, GetIncreaseRateAndBandwidthPeriod) {
+ // Smoothing experiment disabled
+ auto states = CreateAimdRateControlStates();
+ constexpr int kBitrate = 300000;
+ SetEstimate(states, kBitrate);
+ UpdateRateControl(states, BandwidthUsage::kBwOverusing, kBitrate,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_NEAR(14000,
+ states.aimd_rate_control->GetNearMaxIncreaseRateBpsPerSecond(),
+ 1000);
+ EXPECT_EQ(kDefaultPeriodMs,
+ states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
+}
+
+TEST(AimdRateControlTest, BweLimitedByAckedBitrate) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kAckedBitrate = 10000;
+ SetEstimate(states, kAckedBitrate);
+ while (states.simulated_clock->TimeInMilliseconds() - kClockInitialTime <
+ 20000) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kAckedBitrate,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ ASSERT_TRUE(states.aimd_rate_control->ValidEstimate());
+ EXPECT_EQ(static_cast<uint32_t>(1.5 * kAckedBitrate + 10000),
+ states.aimd_rate_control->LatestEstimate().bps());
+}
+
+TEST(AimdRateControlTest, BweNotLimitedByDecreasingAckedBitrate) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kAckedBitrate = 100000;
+ SetEstimate(states, kAckedBitrate);
+ while (states.simulated_clock->TimeInMilliseconds() - kClockInitialTime <
+ 20000) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kAckedBitrate,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ ASSERT_TRUE(states.aimd_rate_control->ValidEstimate());
+ // If the acked bitrate decreases the BWE shouldn't be reduced to 1.5x
+ // what's being acked, but also shouldn't get to increase more.
+ uint32_t prev_estimate = states.aimd_rate_control->LatestEstimate().bps();
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kAckedBitrate / 2,
+ states.simulated_clock->TimeInMilliseconds());
+ uint32_t new_estimate = states.aimd_rate_control->LatestEstimate().bps();
+ EXPECT_NEAR(new_estimate, static_cast<uint32_t>(1.5 * kAckedBitrate + 10000),
+ 2000);
+ EXPECT_EQ(new_estimate, prev_estimate);
+}
+
+TEST(AimdRateControlTest, DefaultPeriodUntilFirstOveruse) {
+ // Smoothing experiment disabled
+ auto states = CreateAimdRateControlStates();
+ states.aimd_rate_control->SetStartBitrate(DataRate::KilobitsPerSec(300));
+ EXPECT_EQ(kDefaultPeriodMs,
+ states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ UpdateRateControl(states, BandwidthUsage::kBwOverusing, 280000,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_NE(kDefaultPeriodMs,
+ states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
+}
+
+TEST(AimdRateControlTest, ExpectedPeriodAfter20kbpsDropAnd5kbpsIncrease) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kInitialBitrate = 110000;
+ SetEstimate(states, kInitialBitrate);
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ // Make the bitrate drop by 20 kbps to get to 90 kbps.
+ // The rate increase at 90 kbps should be 5 kbps, so the period should be 4 s.
+ constexpr int kAckedBitrate =
+ (kInitialBitrate - 20000) / kFractionAfterOveruse;
+ UpdateRateControl(states, BandwidthUsage::kBwOverusing, kAckedBitrate,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_EQ(5000,
+ states.aimd_rate_control->GetNearMaxIncreaseRateBpsPerSecond());
+ EXPECT_EQ(4000, states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
+}
+
+TEST(AimdRateControlTest, BandwidthPeriodIsNotBelowMin) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kInitialBitrate = 10000;
+ SetEstimate(states, kInitialBitrate);
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ // Make a small (1.5 kbps) bitrate drop to 8.5 kbps.
+ UpdateRateControl(states, BandwidthUsage::kBwOverusing, kInitialBitrate - 1,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_EQ(kMinBwePeriodMs,
+ states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
+}
+
+TEST(AimdRateControlTest, BandwidthPeriodIsNotAboveMaxNoSmoothingExp) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kInitialBitrate = 10010000;
+ SetEstimate(states, kInitialBitrate);
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ // Make a large (10 Mbps) bitrate drop to 10 kbps.
+ constexpr int kAckedBitrate = 10000 / kFractionAfterOveruse;
+ UpdateRateControl(states, BandwidthUsage::kBwOverusing, kAckedBitrate,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_EQ(kMaxBwePeriodMs,
+ states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
+}
+
+TEST(AimdRateControlTest, SendingRateBoundedWhenThroughputNotEstimated) {
+ auto states = CreateAimdRateControlStates();
+ constexpr int kInitialBitrateBps = 123000;
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kInitialBitrateBps,
+ states.simulated_clock->TimeInMilliseconds());
+ // AimdRateControl sets the initial bit rate to what it receives after
+ // five seconds has passed.
+ // TODO(bugs.webrtc.org/9379): The comment in the AimdRateControl does not
+ // match the constant.
+ constexpr int kInitializationTimeMs = 5000;
+ states.simulated_clock->AdvanceTimeMilliseconds(kInitializationTimeMs + 1);
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kInitialBitrateBps,
+ states.simulated_clock->TimeInMilliseconds());
+ for (int i = 0; i < 100; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ EXPECT_LE(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps * 1.5 + 10000);
+}
+
+TEST(AimdRateControlTest, EstimateDoesNotIncreaseInAlr) {
+ // When alr is detected, the delay based estimator is not allowed to increase
+ // bwe since there will be no feedback from the network if the new estimate
+ // is correct.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(true);
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kInitialBitrateBps,
+ states.simulated_clock->TimeInMilliseconds());
+ ASSERT_EQ(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps);
+
+ for (int i = 0; i < 100; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps);
+}
+
+TEST(AimdRateControlTest, SetEstimateIncreaseBweInAlr) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(true);
+ ASSERT_EQ(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps);
+ SetEstimate(states, 2 * kInitialBitrateBps);
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate().bps(),
+ 2 * kInitialBitrateBps);
+}
+
+TEST(AimdRateControlTest, SetEstimateUpperLimitedByNetworkEstimate) {
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(400);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ SetEstimate(states, 500'000);
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper);
+}
+
+TEST(AimdRateControlTest, SetEstimateLowerLimitedByNetworkEstimate) {
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_lower = DataRate::KilobitsPerSec(400);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ SetEstimate(states, 100'000);
+ // 0.85 is default backoff factor. (`beta_`)
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_lower * 0.85);
+}
+
+TEST(AimdRateControlTest,
+ SetEstimateIgnoredIfLowerThanNetworkEstimateAndCurrent) {
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ SetEstimate(states, 200'000);
+ ASSERT_EQ(states.aimd_rate_control->LatestEstimate().kbps(), 200);
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_lower = DataRate::KilobitsPerSec(400);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ // Ignore the next SetEstimate, since the estimate is lower than 85% of
+ // the network estimate.
+ SetEstimate(states, 100'000);
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate().kbps(), 200);
+}
+
+TEST(AimdRateControlTest, SetEstimateIgnoresNetworkEstimatesLowerThanCurrent) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Bwe-EstimateBoundedIncrease/"
+ "ratio:0.85,ignore_acked:true,ignore_decr:true/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ states.aimd_rate_control->SetStartBitrate(DataRate::KilobitsPerSec(30));
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(400);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ SetEstimate(states, 500'000);
+ ASSERT_EQ(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper * 0.85);
+
+ NetworkStateEstimate lower_network_estimate;
+ lower_network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(300);
+ states.aimd_rate_control->SetNetworkStateEstimate(lower_network_estimate);
+ SetEstimate(states, 500'000);
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper * 0.85);
+}
+
+TEST(AimdRateControlTest, EstimateIncreaseWhileNotInAlr) {
+ // Allow the estimate to increase as long as alr is not detected to ensure
+ // tha BWE can not get stuck at a certain bitrate.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(false);
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kInitialBitrateBps,
+ states.simulated_clock->TimeInMilliseconds());
+ for (int i = 0; i < 100; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ EXPECT_GT(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps);
+}
+
+TEST(AimdRateControlTest, EstimateNotLimitedByNetworkEstimateIfDisabled) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Bwe-EstimateBoundedIncrease/Disabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(false);
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(150);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+
+ for (int i = 0; i < 100; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ EXPECT_GT(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper);
+}
+
+TEST(AimdRateControlTest,
+ EstimateSlowlyIncreaseToUpperLinkCapacityEstimateIfConfigured) {
+ // Even if alr is detected, the delay based estimator is allowed to increase
+ // up to a percentage of upper link capacity.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Bwe-EstimateBoundedIncrease/"
+ "ratio:0.85,ignore_acked:true,immediate_incr:false/"
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(true);
+
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(200);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ for (int i = 0; i < 10; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ EXPECT_LT(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper * 0.85);
+ }
+ for (int i = 0; i < 50; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper * 0.85);
+}
+
+TEST(AimdRateControlTest,
+ EstimateImmediatelyIncreaseToUpperLinkCapacityEstimateIfConfigured) {
+ // Even if alr is detected, the delay based estimator is allowed to increase
+ // up to a percentage of upper link capacity.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Bwe-EstimateBoundedIncrease/"
+ "ratio:0.85,ignore_acked:true,immediate_incr:true/"
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(true);
+
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(200);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_upper * 0.85);
+}
+
+TEST(AimdRateControlTest, EstimateNotLoweredByNetworkEstimate) {
+ // The delay based estimator is allowed to increase up to a percentage of
+ // upper link capacity but does not decrease unless the delay detector
+ // discover an overuse.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Bwe-EstimateBoundedIncrease/"
+ "ratio:0.85,ignore_acked:true,ignore_decr:true/"
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ constexpr int kEstimatedThroughputBps = 30'000;
+ SetEstimate(states, kInitialBitrateBps);
+
+ NetworkStateEstimate network_estimate;
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(200);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ for (int i = 0; i < 100; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal,
+ kEstimatedThroughputBps,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ DataRate estimate_after_increase = states.aimd_rate_control->LatestEstimate();
+ ASSERT_EQ(estimate_after_increase,
+ network_estimate.link_capacity_upper * 0.85);
+
+ // A lower network estimate does not decrease the estimate immediately,
+ // but the estimate is not allowed to increase.
+ network_estimate.link_capacity_upper = DataRate::KilobitsPerSec(100);
+ network_estimate.link_capacity_lower = DataRate::KilobitsPerSec(80);
+ states.aimd_rate_control->SetNetworkStateEstimate(network_estimate);
+ for (int i = 0; i < 10; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal,
+ kEstimatedThroughputBps,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate(),
+ estimate_after_increase);
+ }
+
+ // If the detector detects and overuse, BWE drops to a value relative the
+ // network estimate.
+ UpdateRateControl(states, BandwidthUsage::kBwOverusing,
+ kEstimatedThroughputBps,
+ states.simulated_clock->TimeInMilliseconds());
+ EXPECT_LT(states.aimd_rate_control->LatestEstimate(),
+ network_estimate.link_capacity_lower);
+ EXPECT_GT(states.aimd_rate_control->LatestEstimate().bps(),
+ kEstimatedThroughputBps);
+}
+
+TEST(AimdRateControlTest, EstimateDoesNotIncreaseInAlrIfNetworkEstimateNotSet) {
+ // When alr is detected, the delay based estimator is not allowed to increase
+ // bwe since there will be no feedback from the network if the new estimate
+ // is correct.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Bwe-EstimateBoundedIncrease/ratio:0.85,ignore_acked:true/"
+ "WebRTC-DontIncreaseDelayBasedBweInAlr/Enabled/");
+ auto states = CreateAimdRateControlStates(/*send_side=*/true);
+ constexpr int kInitialBitrateBps = 123000;
+ SetEstimate(states, kInitialBitrateBps);
+ states.aimd_rate_control->SetInApplicationLimitedRegion(true);
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, kInitialBitrateBps,
+ states.simulated_clock->TimeInMilliseconds());
+ ASSERT_EQ(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps);
+
+ for (int i = 0; i < 100; ++i) {
+ UpdateRateControl(states, BandwidthUsage::kBwNormal, absl::nullopt,
+ states.simulated_clock->TimeInMilliseconds());
+ states.simulated_clock->AdvanceTimeMilliseconds(100);
+ }
+ EXPECT_EQ(states.aimd_rate_control->LatestEstimate().bps(),
+ kInitialBitrateBps);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/bwe_defines.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/bwe_defines.cc
new file mode 100644
index 0000000000..db92f46717
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/bwe_defines.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+
+namespace webrtc {
+
+const char kBweTypeHistogram[] = "WebRTC.BWE.Types";
+
+RateControlInput::RateControlInput(
+ BandwidthUsage bw_state,
+ const absl::optional<DataRate>& estimated_throughput)
+ : bw_state(bw_state), estimated_throughput(estimated_throughput) {}
+
+RateControlInput::~RateControlInput() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/include/bwe_defines.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/include/bwe_defines.h
new file mode 100644
index 0000000000..d3dd96be75
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/include/bwe_defines.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_BWE_DEFINES_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_BWE_DEFINES_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/network_state_predictor.h"
+#include "api/units/data_rate.h"
+
+namespace webrtc {
+
+constexpr DataRate kCongestionControllerMinBitrate = DataRate::BitsPerSec(5000);
+
+static const int64_t kBitrateWindowMs = 1000;
+
+extern const char kBweTypeHistogram[];
+
+enum BweNames {
+ kReceiverNoExtension = 0,
+ kReceiverTOffset = 1,
+ kReceiverAbsSendTime = 2,
+ kSendSideTransportSeqNum = 3,
+ kBweNamesMax = 4
+};
+
+struct RateControlInput {
+ RateControlInput(BandwidthUsage bw_state,
+ const absl::optional<DataRate>& estimated_throughput);
+ ~RateControlInput();
+
+ BandwidthUsage bw_state;
+ absl::optional<DataRate> estimated_throughput;
+};
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_BWE_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
new file mode 100644
index 0000000000..0d4e15e9e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This class estimates the incoming available bandwidth.
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_REMOTE_BITRATE_ESTIMATOR_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_REMOTE_BITRATE_ESTIMATOR_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+
+class Clock;
+
+// RemoteBitrateObserver is used to signal changes in bitrate estimates for
+// the incoming streams.
+class RemoteBitrateObserver {
+ public:
+ // Called when a receive channel group has a new bitrate estimate for the
+ // incoming streams.
+ virtual void OnReceiveBitrateChanged(const std::vector<uint32_t>& ssrcs,
+ uint32_t bitrate) = 0;
+
+ virtual ~RemoteBitrateObserver() {}
+};
+
+class RemoteBitrateEstimator : public CallStatsObserver {
+ public:
+ ~RemoteBitrateEstimator() override {}
+
+ // Called for each incoming packet. Updates the incoming payload bitrate
+ // estimate and the over-use detector. If an over-use is detected the
+ // remote bitrate estimate will be updated. Note that `payload_size` is the
+ // packet size excluding headers.
+ // Note that `arrival_time_ms` can be of an arbitrary time base.
+ virtual void IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) = 0;
+
+ // Removes all data for `ssrc`.
+ virtual void RemoveStream(uint32_t ssrc) = 0;
+
+ // Returns latest estimate or DataRate::Zero() if estimation is unavailable.
+ virtual DataRate LatestEstimate() const = 0;
+
+ virtual TimeDelta Process() = 0;
+
+ protected:
+ static const int64_t kProcessIntervalMs = 500;
+ static const int64_t kStreamTimeOutMs = 2000;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_REMOTE_BITRATE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.cc
new file mode 100644
index 0000000000..41c092bf91
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/inter_arrival.h"
+
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static const int kBurstDeltaThresholdMs = 5;
+static const int kMaxBurstDurationMs = 100;
+
+InterArrival::InterArrival(uint32_t timestamp_group_length_ticks,
+ double timestamp_to_ms_coeff,
+ bool enable_burst_grouping)
+ : kTimestampGroupLengthTicks(timestamp_group_length_ticks),
+ current_timestamp_group_(),
+ prev_timestamp_group_(),
+ timestamp_to_ms_coeff_(timestamp_to_ms_coeff),
+ burst_grouping_(enable_burst_grouping),
+ num_consecutive_reordered_packets_(0) {}
+
+bool InterArrival::ComputeDeltas(uint32_t timestamp,
+ int64_t arrival_time_ms,
+ int64_t system_time_ms,
+ size_t packet_size,
+ uint32_t* timestamp_delta,
+ int64_t* arrival_time_delta_ms,
+ int* packet_size_delta) {
+ RTC_DCHECK(timestamp_delta);
+ RTC_DCHECK(arrival_time_delta_ms);
+ RTC_DCHECK(packet_size_delta);
+ bool calculated_deltas = false;
+ if (current_timestamp_group_.IsFirstPacket()) {
+ // We don't have enough data to update the filter, so we store it until we
+ // have two frames of data to process.
+ current_timestamp_group_.timestamp = timestamp;
+ current_timestamp_group_.first_timestamp = timestamp;
+ current_timestamp_group_.first_arrival_ms = arrival_time_ms;
+ } else if (!PacketInOrder(timestamp)) {
+ return false;
+ } else if (NewTimestampGroup(arrival_time_ms, timestamp)) {
+ // First packet of a later frame, the previous frame sample is ready.
+ if (prev_timestamp_group_.complete_time_ms >= 0) {
+ *timestamp_delta =
+ current_timestamp_group_.timestamp - prev_timestamp_group_.timestamp;
+ *arrival_time_delta_ms = current_timestamp_group_.complete_time_ms -
+ prev_timestamp_group_.complete_time_ms;
+ // Check system time differences to see if we have an unproportional jump
+ // in arrival time. In that case reset the inter-arrival computations.
+ int64_t system_time_delta_ms =
+ current_timestamp_group_.last_system_time_ms -
+ prev_timestamp_group_.last_system_time_ms;
+ if (*arrival_time_delta_ms - system_time_delta_ms >=
+ kArrivalTimeOffsetThresholdMs) {
+ RTC_LOG(LS_WARNING)
+ << "The arrival time clock offset has changed (diff = "
+ << *arrival_time_delta_ms - system_time_delta_ms
+ << " ms), resetting.";
+ Reset();
+ return false;
+ }
+ if (*arrival_time_delta_ms < 0) {
+ // The group of packets has been reordered since receiving its local
+ // arrival timestamp.
+ ++num_consecutive_reordered_packets_;
+ if (num_consecutive_reordered_packets_ >= kReorderedResetThreshold) {
+ RTC_LOG(LS_WARNING)
+ << "Packets are being reordered on the path from the "
+ "socket to the bandwidth estimator. Ignoring this "
+ "packet for bandwidth estimation, resetting.";
+ Reset();
+ }
+ return false;
+ } else {
+ num_consecutive_reordered_packets_ = 0;
+ }
+ RTC_DCHECK_GE(*arrival_time_delta_ms, 0);
+ *packet_size_delta = static_cast<int>(current_timestamp_group_.size) -
+ static_cast<int>(prev_timestamp_group_.size);
+ calculated_deltas = true;
+ }
+ prev_timestamp_group_ = current_timestamp_group_;
+ // The new timestamp is now the current frame.
+ current_timestamp_group_.first_timestamp = timestamp;
+ current_timestamp_group_.timestamp = timestamp;
+ current_timestamp_group_.first_arrival_ms = arrival_time_ms;
+ current_timestamp_group_.size = 0;
+ } else {
+ current_timestamp_group_.timestamp =
+ LatestTimestamp(current_timestamp_group_.timestamp, timestamp);
+ }
+ // Accumulate the frame size.
+ current_timestamp_group_.size += packet_size;
+ current_timestamp_group_.complete_time_ms = arrival_time_ms;
+ current_timestamp_group_.last_system_time_ms = system_time_ms;
+
+ return calculated_deltas;
+}
+
+bool InterArrival::PacketInOrder(uint32_t timestamp) {
+ if (current_timestamp_group_.IsFirstPacket()) {
+ return true;
+ } else {
+ // Assume that a diff which is bigger than half the timestamp interval
+ // (32 bits) must be due to reordering. This code is almost identical to
+ // that in IsNewerTimestamp() in module_common_types.h.
+ uint32_t timestamp_diff =
+ timestamp - current_timestamp_group_.first_timestamp;
+ return timestamp_diff < 0x80000000;
+ }
+}
+
+// Assumes that `timestamp` is not reordered compared to
+// `current_timestamp_group_`.
+bool InterArrival::NewTimestampGroup(int64_t arrival_time_ms,
+ uint32_t timestamp) const {
+ if (current_timestamp_group_.IsFirstPacket()) {
+ return false;
+ } else if (BelongsToBurst(arrival_time_ms, timestamp)) {
+ return false;
+ } else {
+ uint32_t timestamp_diff =
+ timestamp - current_timestamp_group_.first_timestamp;
+ return timestamp_diff > kTimestampGroupLengthTicks;
+ }
+}
+
+bool InterArrival::BelongsToBurst(int64_t arrival_time_ms,
+ uint32_t timestamp) const {
+ if (!burst_grouping_) {
+ return false;
+ }
+ RTC_DCHECK_GE(current_timestamp_group_.complete_time_ms, 0);
+ int64_t arrival_time_delta_ms =
+ arrival_time_ms - current_timestamp_group_.complete_time_ms;
+ uint32_t timestamp_diff = timestamp - current_timestamp_group_.timestamp;
+ int64_t ts_delta_ms = timestamp_to_ms_coeff_ * timestamp_diff + 0.5;
+ if (ts_delta_ms == 0)
+ return true;
+ int propagation_delta_ms = arrival_time_delta_ms - ts_delta_ms;
+ if (propagation_delta_ms < 0 &&
+ arrival_time_delta_ms <= kBurstDeltaThresholdMs &&
+ arrival_time_ms - current_timestamp_group_.first_arrival_ms <
+ kMaxBurstDurationMs)
+ return true;
+ return false;
+}
+
+void InterArrival::Reset() {
+ num_consecutive_reordered_packets_ = 0;
+ current_timestamp_group_ = TimestampGroup();
+ prev_timestamp_group_ = TimestampGroup();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.h
new file mode 100644
index 0000000000..d31a8b63c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_INTER_ARRIVAL_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_INTER_ARRIVAL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+// Helper class to compute the inter-arrival time delta and the size delta
+// between two timestamp groups. A timestamp is a 32 bit unsigned number with
+// a client defined rate.
+class InterArrival {
+ public:
+ // After this many packet groups received out of order InterArrival will
+ // reset, assuming that clocks have made a jump.
+ static constexpr int kReorderedResetThreshold = 3;
+ static constexpr int64_t kArrivalTimeOffsetThresholdMs = 3000;
+
+ // A timestamp group is defined as all packets with a timestamp which are at
+ // most timestamp_group_length_ticks older than the first timestamp in that
+ // group.
+ InterArrival(uint32_t timestamp_group_length_ticks,
+ double timestamp_to_ms_coeff,
+ bool enable_burst_grouping);
+
+ InterArrival() = delete;
+ InterArrival(const InterArrival&) = delete;
+ InterArrival& operator=(const InterArrival&) = delete;
+
+ // This function returns true if a delta was computed, or false if the current
+ // group is still incomplete or if only one group has been completed.
+ // `timestamp` is the timestamp.
+ // `arrival_time_ms` is the local time at which the packet arrived.
+ // `packet_size` is the size of the packet.
+ // `timestamp_delta` (output) is the computed timestamp delta.
+ // `arrival_time_delta_ms` (output) is the computed arrival-time delta.
+ // `packet_size_delta` (output) is the computed size delta.
+ bool ComputeDeltas(uint32_t timestamp,
+ int64_t arrival_time_ms,
+ int64_t system_time_ms,
+ size_t packet_size,
+ uint32_t* timestamp_delta,
+ int64_t* arrival_time_delta_ms,
+ int* packet_size_delta);
+
+ private:
+ struct TimestampGroup {
+ TimestampGroup()
+ : size(0),
+ first_timestamp(0),
+ timestamp(0),
+ first_arrival_ms(-1),
+ complete_time_ms(-1) {}
+
+ bool IsFirstPacket() const { return complete_time_ms == -1; }
+
+ size_t size;
+ uint32_t first_timestamp;
+ uint32_t timestamp;
+ int64_t first_arrival_ms;
+ int64_t complete_time_ms;
+ int64_t last_system_time_ms;
+ };
+
+ // Returns true if the packet with timestamp `timestamp` arrived in order.
+ bool PacketInOrder(uint32_t timestamp);
+
+ // Returns true if the last packet was the end of the current batch and the
+ // packet with `timestamp` is the first of a new batch.
+ bool NewTimestampGroup(int64_t arrival_time_ms, uint32_t timestamp) const;
+
+ bool BelongsToBurst(int64_t arrival_time_ms, uint32_t timestamp) const;
+
+ void Reset();
+
+ const uint32_t kTimestampGroupLengthTicks;
+ TimestampGroup current_timestamp_group_;
+ TimestampGroup prev_timestamp_group_;
+ double timestamp_to_ms_coeff_;
+ bool burst_grouping_;
+ int num_consecutive_reordered_packets_;
+};
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_INTER_ARRIVAL_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival_unittest.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival_unittest.cc
new file mode 100644
index 0000000000..72a772ed21
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival_unittest.cc
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/inter_arrival.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace testing {
+
+enum {
+ kTimestampGroupLengthUs = 5000,
+ kMinStep = 20,
+ kTriggerNewGroupUs = kTimestampGroupLengthUs + kMinStep,
+ kBurstThresholdMs = 5,
+ kAbsSendTimeFraction = 18,
+ kAbsSendTimeInterArrivalUpshift = 8,
+ kInterArrivalShift = kAbsSendTimeFraction + kAbsSendTimeInterArrivalUpshift,
+};
+
+const double kRtpTimestampToMs = 1.0 / 90.0;
+const double kAstToMs = 1000.0 / static_cast<double>(1 << kInterArrivalShift);
+
+class InterArrivalTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ inter_arrival_.reset(
+ new InterArrival(kTimestampGroupLengthUs / 1000, 1.0, true));
+ inter_arrival_rtp_.reset(new InterArrival(
+ MakeRtpTimestamp(kTimestampGroupLengthUs), kRtpTimestampToMs, true));
+ inter_arrival_ast_.reset(new InterArrival(
+ MakeAbsSendTime(kTimestampGroupLengthUs), kAstToMs, true));
+ }
+
+ // Test that neither inter_arrival instance complete the timestamp group from
+ // the given data.
+ void ExpectFalse(int64_t timestamp_us,
+ int64_t arrival_time_ms,
+ size_t packet_size) {
+ InternalExpectFalse(inter_arrival_rtp_.get(),
+ MakeRtpTimestamp(timestamp_us), arrival_time_ms,
+ packet_size);
+ InternalExpectFalse(inter_arrival_ast_.get(), MakeAbsSendTime(timestamp_us),
+ arrival_time_ms, packet_size);
+ }
+
+ // Test that both inter_arrival instances complete the timestamp group from
+ // the given data and that all returned deltas are as expected (except
+ // timestamp delta, which is rounded from us to different ranges and must
+ // match within an interval, given in |timestamp_near].
+ void ExpectTrue(int64_t timestamp_us,
+ int64_t arrival_time_ms,
+ size_t packet_size,
+ int64_t expected_timestamp_delta_us,
+ int64_t expected_arrival_time_delta_ms,
+ int expected_packet_size_delta,
+ uint32_t timestamp_near) {
+ InternalExpectTrue(inter_arrival_rtp_.get(), MakeRtpTimestamp(timestamp_us),
+ arrival_time_ms, packet_size,
+ MakeRtpTimestamp(expected_timestamp_delta_us),
+ expected_arrival_time_delta_ms,
+ expected_packet_size_delta, timestamp_near);
+ InternalExpectTrue(inter_arrival_ast_.get(), MakeAbsSendTime(timestamp_us),
+ arrival_time_ms, packet_size,
+ MakeAbsSendTime(expected_timestamp_delta_us),
+ expected_arrival_time_delta_ms,
+ expected_packet_size_delta, timestamp_near << 8);
+ }
+
+ void WrapTestHelper(int64_t wrap_start_us,
+ uint32_t timestamp_near,
+ bool unorderly_within_group) {
+ // Step through the range of a 32 bit int, 1/4 at a time to not cause
+ // packets close to wraparound to be judged as out of order.
+
+ // G1
+ int64_t arrival_time = 17;
+ ExpectFalse(0, arrival_time, 1);
+
+ // G2
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectFalse(wrap_start_us / 4, arrival_time, 1);
+
+ // G3
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectTrue(wrap_start_us / 2, arrival_time, 1, wrap_start_us / 4, 6,
+ 0, // Delta G2-G1
+ 0);
+
+ // G4
+ arrival_time += kBurstThresholdMs + 1;
+ int64_t g4_arrival_time = arrival_time;
+ ExpectTrue(wrap_start_us / 2 + wrap_start_us / 4, arrival_time, 1,
+ wrap_start_us / 4, 6, 0, // Delta G3-G2
+ timestamp_near);
+
+ // G5
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectTrue(wrap_start_us, arrival_time, 2, wrap_start_us / 4, 6,
+ 0, // Delta G4-G3
+ timestamp_near);
+ for (int i = 0; i < 10; ++i) {
+ // Slowly step across the wrap point.
+ arrival_time += kBurstThresholdMs + 1;
+ if (unorderly_within_group) {
+ // These packets arrive with timestamps in decreasing order but are
+ // nevertheless accumulated to group because their timestamps are higher
+ // than the initial timestamp of the group.
+ ExpectFalse(wrap_start_us + kMinStep * (9 - i), arrival_time, 1);
+ } else {
+ ExpectFalse(wrap_start_us + kMinStep * i, arrival_time, 1);
+ }
+ }
+ int64_t g5_arrival_time = arrival_time;
+
+ // This packet is out of order and should be dropped.
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectFalse(wrap_start_us - 100, arrival_time, 100);
+
+ // G6
+ arrival_time += kBurstThresholdMs + 1;
+ int64_t g6_arrival_time = arrival_time;
+ ExpectTrue(wrap_start_us + kTriggerNewGroupUs, arrival_time, 10,
+ wrap_start_us / 4 + 9 * kMinStep,
+ g5_arrival_time - g4_arrival_time,
+ (2 + 10) - 1, // Delta G5-G4
+ timestamp_near);
+
+ // This packet is out of order and should be dropped.
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectFalse(wrap_start_us + kTimestampGroupLengthUs, arrival_time, 100);
+
+ // G7
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectTrue(wrap_start_us + 2 * kTriggerNewGroupUs, arrival_time, 100,
+ // Delta G6-G5
+ kTriggerNewGroupUs - 9 * kMinStep,
+ g6_arrival_time - g5_arrival_time, 10 - (2 + 10),
+ timestamp_near);
+ }
+
+ std::unique_ptr<InterArrival> inter_arrival_;
+
+ private:
+ static uint32_t MakeRtpTimestamp(int64_t us) {
+ return static_cast<uint32_t>(static_cast<uint64_t>(us * 90 + 500) / 1000);
+ }
+
+ static uint32_t MakeAbsSendTime(int64_t us) {
+ uint32_t absolute_send_time =
+ static_cast<uint32_t>(((static_cast<uint64_t>(us) << 18) + 500000) /
+ 1000000) &
+ 0x00FFFFFFul;
+ return absolute_send_time << 8;
+ }
+
+ static void InternalExpectFalse(InterArrival* inter_arrival,
+ uint32_t timestamp,
+ int64_t arrival_time_ms,
+ size_t packet_size) {
+ uint32_t dummy_timestamp = 101;
+ int64_t dummy_arrival_time_ms = 303;
+ int dummy_packet_size = 909;
+ bool computed = inter_arrival->ComputeDeltas(
+ timestamp, arrival_time_ms, arrival_time_ms, packet_size,
+ &dummy_timestamp, &dummy_arrival_time_ms, &dummy_packet_size);
+ EXPECT_EQ(computed, false);
+ EXPECT_EQ(101ul, dummy_timestamp);
+ EXPECT_EQ(303, dummy_arrival_time_ms);
+ EXPECT_EQ(909, dummy_packet_size);
+ }
+
+ static void InternalExpectTrue(InterArrival* inter_arrival,
+ uint32_t timestamp,
+ int64_t arrival_time_ms,
+ size_t packet_size,
+ uint32_t expected_timestamp_delta,
+ int64_t expected_arrival_time_delta_ms,
+ int expected_packet_size_delta,
+ uint32_t timestamp_near) {
+ uint32_t delta_timestamp = 101;
+ int64_t delta_arrival_time_ms = 303;
+ int delta_packet_size = 909;
+ bool computed = inter_arrival->ComputeDeltas(
+ timestamp, arrival_time_ms, arrival_time_ms, packet_size,
+ &delta_timestamp, &delta_arrival_time_ms, &delta_packet_size);
+ EXPECT_EQ(true, computed);
+ EXPECT_NEAR(expected_timestamp_delta, delta_timestamp, timestamp_near);
+ EXPECT_EQ(expected_arrival_time_delta_ms, delta_arrival_time_ms);
+ EXPECT_EQ(expected_packet_size_delta, delta_packet_size);
+ }
+
+ std::unique_ptr<InterArrival> inter_arrival_rtp_;
+ std::unique_ptr<InterArrival> inter_arrival_ast_;
+};
+
+TEST_F(InterArrivalTest, FirstPacket) {
+ ExpectFalse(0, 17, 1);
+}
+
+TEST_F(InterArrivalTest, FirstGroup) {
+ // G1
+ int64_t arrival_time = 17;
+ int64_t g1_arrival_time = arrival_time;
+ ExpectFalse(0, arrival_time, 1);
+
+ // G2
+ arrival_time += kBurstThresholdMs + 1;
+ int64_t g2_arrival_time = arrival_time;
+ ExpectFalse(kTriggerNewGroupUs, arrival_time, 2);
+
+ // G3
+ // Only once the first packet of the third group arrives, do we see the deltas
+ // between the first two.
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectTrue(2 * kTriggerNewGroupUs, arrival_time, 1,
+ // Delta G2-G1
+ kTriggerNewGroupUs, g2_arrival_time - g1_arrival_time, 1, 0);
+}
+
+TEST_F(InterArrivalTest, SecondGroup) {
+ // G1
+ int64_t arrival_time = 17;
+ int64_t g1_arrival_time = arrival_time;
+ ExpectFalse(0, arrival_time, 1);
+
+ // G2
+ arrival_time += kBurstThresholdMs + 1;
+ int64_t g2_arrival_time = arrival_time;
+ ExpectFalse(kTriggerNewGroupUs, arrival_time, 2);
+
+ // G3
+ arrival_time += kBurstThresholdMs + 1;
+ int64_t g3_arrival_time = arrival_time;
+ ExpectTrue(2 * kTriggerNewGroupUs, arrival_time, 1,
+ // Delta G2-G1
+ kTriggerNewGroupUs, g2_arrival_time - g1_arrival_time, 1, 0);
+
+ // G4
+ // First packet of 4th group yields deltas between group 2 and 3.
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectTrue(3 * kTriggerNewGroupUs, arrival_time, 2,
+ // Delta G3-G2
+ kTriggerNewGroupUs, g3_arrival_time - g2_arrival_time, -1, 0);
+}
+
+TEST_F(InterArrivalTest, AccumulatedGroup) {
+ // G1
+ int64_t arrival_time = 17;
+ int64_t g1_arrival_time = arrival_time;
+ ExpectFalse(0, arrival_time, 1);
+
+ // G2
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectFalse(kTriggerNewGroupUs, 28, 2);
+ int64_t timestamp = kTriggerNewGroupUs;
+ for (int i = 0; i < 10; ++i) {
+ // A bunch of packets arriving within the same group.
+ arrival_time += kBurstThresholdMs + 1;
+ timestamp += kMinStep;
+ ExpectFalse(timestamp, arrival_time, 1);
+ }
+ int64_t g2_arrival_time = arrival_time;
+ int64_t g2_timestamp = timestamp;
+
+ // G3
+ arrival_time = 500;
+ ExpectTrue(2 * kTriggerNewGroupUs, arrival_time, 100, g2_timestamp,
+ g2_arrival_time - g1_arrival_time,
+ (2 + 10) - 1, // Delta G2-G1
+ 0);
+}
+
+TEST_F(InterArrivalTest, OutOfOrderPacket) {
+ // G1
+ int64_t arrival_time = 17;
+ int64_t timestamp = 0;
+ ExpectFalse(timestamp, arrival_time, 1);
+ int64_t g1_timestamp = timestamp;
+ int64_t g1_arrival_time = arrival_time;
+
+ // G2
+ arrival_time += 11;
+ timestamp += kTriggerNewGroupUs;
+ ExpectFalse(timestamp, 28, 2);
+ for (int i = 0; i < 10; ++i) {
+ arrival_time += kBurstThresholdMs + 1;
+ timestamp += kMinStep;
+ ExpectFalse(timestamp, arrival_time, 1);
+ }
+ int64_t g2_timestamp = timestamp;
+ int64_t g2_arrival_time = arrival_time;
+
+ // This packet is out of order and should be dropped.
+ arrival_time = 281;
+ ExpectFalse(g1_timestamp, arrival_time, 100);
+
+ // G3
+ arrival_time = 500;
+ timestamp = 2 * kTriggerNewGroupUs;
+ ExpectTrue(timestamp, arrival_time, 100,
+ // Delta G2-G1
+ g2_timestamp - g1_timestamp, g2_arrival_time - g1_arrival_time,
+ (2 + 10) - 1, 0);
+}
+
+TEST_F(InterArrivalTest, OutOfOrderWithinGroup) {
+ // G1
+ int64_t arrival_time = 17;
+ int64_t timestamp = 0;
+ ExpectFalse(timestamp, arrival_time, 1);
+ int64_t g1_timestamp = timestamp;
+ int64_t g1_arrival_time = arrival_time;
+
+ // G2
+ timestamp += kTriggerNewGroupUs;
+ arrival_time += 11;
+ ExpectFalse(kTriggerNewGroupUs, 28, 2);
+ timestamp += 10 * kMinStep;
+ int64_t g2_timestamp = timestamp;
+ for (int i = 0; i < 10; ++i) {
+ // These packets arrive with timestamps in decreasing order but are
+ // nevertheless accumulated to group because their timestamps are higher
+ // than the initial timestamp of the group.
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectFalse(timestamp, arrival_time, 1);
+ timestamp -= kMinStep;
+ }
+ int64_t g2_arrival_time = arrival_time;
+
+ // However, this packet is deemed out of order and should be dropped.
+ arrival_time = 281;
+ timestamp = g1_timestamp;
+ ExpectFalse(timestamp, arrival_time, 100);
+
+ // G3
+ timestamp = 2 * kTriggerNewGroupUs;
+ arrival_time = 500;
+ ExpectTrue(timestamp, arrival_time, 100, g2_timestamp - g1_timestamp,
+ g2_arrival_time - g1_arrival_time, (2 + 10) - 1, 0);
+}
+
+TEST_F(InterArrivalTest, TwoBursts) {
+ // G1
+ int64_t g1_arrival_time = 17;
+ ExpectFalse(0, g1_arrival_time, 1);
+
+ // G2
+ int64_t timestamp = kTriggerNewGroupUs;
+ int64_t arrival_time = 100; // Simulate no packets arriving for 100 ms.
+ for (int i = 0; i < 10; ++i) {
+ // A bunch of packets arriving in one burst (within 5 ms apart).
+ timestamp += 30000;
+ arrival_time += kBurstThresholdMs;
+ ExpectFalse(timestamp, arrival_time, 1);
+ }
+ int64_t g2_arrival_time = arrival_time;
+ int64_t g2_timestamp = timestamp;
+
+ // G3
+ timestamp += 30000;
+ arrival_time += kBurstThresholdMs + 1;
+ ExpectTrue(timestamp, arrival_time, 100, g2_timestamp,
+ g2_arrival_time - g1_arrival_time,
+ 10 - 1, // Delta G2-G1
+ 0);
+}
+
+TEST_F(InterArrivalTest, NoBursts) {
+ // G1
+ ExpectFalse(0, 17, 1);
+
+ // G2
+ int64_t timestamp = kTriggerNewGroupUs;
+ int64_t arrival_time = 28;
+ ExpectFalse(timestamp, arrival_time, 2);
+
+ // G3
+ ExpectTrue(kTriggerNewGroupUs + 30000, arrival_time + kBurstThresholdMs + 1,
+ 100, timestamp - 0, arrival_time - 17,
+ 2 - 1, // Delta G2-G1
+ 0);
+}
+
+// Yields 0xfffffffe when converted to internal representation in
+// inter_arrival_rtp_ and inter_arrival_ast_ respectively.
+static const int64_t kStartRtpTimestampWrapUs = 47721858827;
+static const int64_t kStartAbsSendTimeWrapUs = 63999995;
+
+TEST_F(InterArrivalTest, RtpTimestampWrap) {
+ WrapTestHelper(kStartRtpTimestampWrapUs, 1, false);
+}
+
+TEST_F(InterArrivalTest, AbsSendTimeWrap) {
+ WrapTestHelper(kStartAbsSendTimeWrapUs, 1, false);
+}
+
+TEST_F(InterArrivalTest, RtpTimestampWrapOutOfOrderWithinGroup) {
+ WrapTestHelper(kStartRtpTimestampWrapUs, 1, true);
+}
+
+TEST_F(InterArrivalTest, AbsSendTimeWrapOutOfOrderWithinGroup) {
+ WrapTestHelper(kStartAbsSendTimeWrapUs, 1, true);
+}
+
+TEST_F(InterArrivalTest, PositiveArrivalTimeJump) {
+ const size_t kPacketSize = 1000;
+ uint32_t send_time_ms = 10000;
+ int64_t arrival_time_ms = 20000;
+ int64_t system_time_ms = 30000;
+
+ uint32_t send_delta;
+ int64_t arrival_delta;
+ int size_delta;
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+
+ const int kTimeDeltaMs = 30;
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs + InterArrival::kArrivalTimeOffsetThresholdMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_TRUE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+ EXPECT_EQ(kTimeDeltaMs, static_cast<int>(send_delta));
+ EXPECT_EQ(kTimeDeltaMs, arrival_delta);
+ EXPECT_EQ(size_delta, 0);
+
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ // The previous arrival time jump should now be detected and cause a reset.
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+
+ // The two next packets will not give a valid delta since we're in the initial
+ // state.
+ for (int i = 0; i < 2; ++i) {
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+ }
+
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_TRUE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+ EXPECT_EQ(kTimeDeltaMs, static_cast<int>(send_delta));
+ EXPECT_EQ(kTimeDeltaMs, arrival_delta);
+ EXPECT_EQ(size_delta, 0);
+}
+
+TEST_F(InterArrivalTest, NegativeArrivalTimeJump) {
+ const size_t kPacketSize = 1000;
+ uint32_t send_time_ms = 10000;
+ int64_t arrival_time_ms = 20000;
+ int64_t system_time_ms = 30000;
+
+ uint32_t send_delta;
+ int64_t arrival_delta;
+ int size_delta;
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+
+ const int kTimeDeltaMs = 30;
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_TRUE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+ EXPECT_EQ(kTimeDeltaMs, static_cast<int>(send_delta));
+ EXPECT_EQ(kTimeDeltaMs, arrival_delta);
+ EXPECT_EQ(size_delta, 0);
+
+ // Three out of order will fail, after that we will be reset and two more will
+ // fail before we get our first valid delta after the reset.
+ arrival_time_ms -= 1000;
+ for (int i = 0; i < InterArrival::kReorderedResetThreshold + 3; ++i) {
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ // The previous arrival time jump should now be detected and cause a reset.
+ EXPECT_FALSE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+ }
+
+ send_time_ms += kTimeDeltaMs;
+ arrival_time_ms += kTimeDeltaMs;
+ system_time_ms += kTimeDeltaMs;
+ EXPECT_TRUE(inter_arrival_->ComputeDeltas(
+ send_time_ms, arrival_time_ms, system_time_ms, kPacketSize, &send_delta,
+ &arrival_delta, &size_delta));
+ EXPECT_EQ(kTimeDeltaMs, static_cast<int>(send_delta));
+ EXPECT_EQ(kTimeDeltaMs, arrival_delta);
+ EXPECT_EQ(size_delta, 0);
+}
+} // namespace testing
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.cc
new file mode 100644
index 0000000000..672822bbcd
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/overuse_detector.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <string>
+
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+const char kAdaptiveThresholdExperiment[] = "WebRTC-AdaptiveBweThreshold";
+const char kEnabledPrefix[] = "Enabled";
+const size_t kEnabledPrefixLength = sizeof(kEnabledPrefix) - 1;
+const char kDisabledPrefix[] = "Disabled";
+const size_t kDisabledPrefixLength = sizeof(kDisabledPrefix) - 1;
+
+const double kMaxAdaptOffsetMs = 15.0;
+const double kOverUsingTimeThreshold = 10;
+const int kMaxNumDeltas = 60;
+
+bool AdaptiveThresholdExperimentIsDisabled(
+ const FieldTrialsView& key_value_config) {
+ std::string experiment_string =
+ key_value_config.Lookup(kAdaptiveThresholdExperiment);
+ const size_t kMinExperimentLength = kDisabledPrefixLength;
+ if (experiment_string.length() < kMinExperimentLength)
+ return false;
+ return experiment_string.substr(0, kDisabledPrefixLength) == kDisabledPrefix;
+}
+
+// Gets thresholds from the experiment name following the format
+// "WebRTC-AdaptiveBweThreshold/Enabled-0.5,0.002/".
+bool ReadExperimentConstants(const FieldTrialsView& key_value_config,
+ double* k_up,
+ double* k_down) {
+ std::string experiment_string =
+ key_value_config.Lookup(kAdaptiveThresholdExperiment);
+ const size_t kMinExperimentLength = kEnabledPrefixLength + 3;
+ if (experiment_string.length() < kMinExperimentLength ||
+ experiment_string.substr(0, kEnabledPrefixLength) != kEnabledPrefix)
+ return false;
+ return sscanf(experiment_string.substr(kEnabledPrefixLength + 1).c_str(),
+ "%lf,%lf", k_up, k_down) == 2;
+}
+
+OveruseDetector::OveruseDetector(const FieldTrialsView* key_value_config)
+ // Experiment is on by default, but can be disabled with finch by setting
+ // the field trial string to "WebRTC-AdaptiveBweThreshold/Disabled/".
+ : in_experiment_(!AdaptiveThresholdExperimentIsDisabled(*key_value_config)),
+ k_up_(0.0087),
+ k_down_(0.039),
+ overusing_time_threshold_(100),
+ threshold_(12.5),
+ last_update_ms_(-1),
+ prev_offset_(0.0),
+ time_over_using_(-1),
+ overuse_counter_(0),
+ hypothesis_(BandwidthUsage::kBwNormal) {
+ if (!AdaptiveThresholdExperimentIsDisabled(*key_value_config))
+ InitializeExperiment(*key_value_config);
+}
+
+OveruseDetector::~OveruseDetector() {}
+
+BandwidthUsage OveruseDetector::State() const {
+ return hypothesis_;
+}
+
+BandwidthUsage OveruseDetector::Detect(double offset,
+ double ts_delta,
+ int num_of_deltas,
+ int64_t now_ms) {
+ if (num_of_deltas < 2) {
+ return BandwidthUsage::kBwNormal;
+ }
+ const double T = std::min(num_of_deltas, kMaxNumDeltas) * offset;
+ BWE_TEST_LOGGING_PLOT(1, "T", now_ms, T);
+ BWE_TEST_LOGGING_PLOT(1, "threshold", now_ms, threshold_);
+ if (T > threshold_) {
+ if (time_over_using_ == -1) {
+ // Initialize the timer. Assume that we've been
+ // over-using half of the time since the previous
+ // sample.
+ time_over_using_ = ts_delta / 2;
+ } else {
+ // Increment timer
+ time_over_using_ += ts_delta;
+ }
+ overuse_counter_++;
+ if (time_over_using_ > overusing_time_threshold_ && overuse_counter_ > 1) {
+ if (offset >= prev_offset_) {
+ time_over_using_ = 0;
+ overuse_counter_ = 0;
+ hypothesis_ = BandwidthUsage::kBwOverusing;
+ }
+ }
+ } else if (T < -threshold_) {
+ time_over_using_ = -1;
+ overuse_counter_ = 0;
+ hypothesis_ = BandwidthUsage::kBwUnderusing;
+ } else {
+ time_over_using_ = -1;
+ overuse_counter_ = 0;
+ hypothesis_ = BandwidthUsage::kBwNormal;
+ }
+ prev_offset_ = offset;
+
+ UpdateThreshold(T, now_ms);
+
+ return hypothesis_;
+}
+
+void OveruseDetector::UpdateThreshold(double modified_offset, int64_t now_ms) {
+ if (!in_experiment_)
+ return;
+
+ if (last_update_ms_ == -1)
+ last_update_ms_ = now_ms;
+
+ if (fabs(modified_offset) > threshold_ + kMaxAdaptOffsetMs) {
+ // Avoid adapting the threshold to big latency spikes, caused e.g.,
+ // by a sudden capacity drop.
+ last_update_ms_ = now_ms;
+ return;
+ }
+
+ const double k = fabs(modified_offset) < threshold_ ? k_down_ : k_up_;
+ const int64_t kMaxTimeDeltaMs = 100;
+ int64_t time_delta_ms = std::min(now_ms - last_update_ms_, kMaxTimeDeltaMs);
+ threshold_ += k * (fabs(modified_offset) - threshold_) * time_delta_ms;
+ threshold_ = rtc::SafeClamp(threshold_, 6.f, 600.f);
+ last_update_ms_ = now_ms;
+}
+
+void OveruseDetector::InitializeExperiment(
+ const FieldTrialsView& key_value_config) {
+ RTC_DCHECK(in_experiment_);
+ double k_up = 0.0;
+ double k_down = 0.0;
+ overusing_time_threshold_ = kOverUsingTimeThreshold;
+ if (ReadExperimentConstants(key_value_config, &k_up, &k_down)) {
+ k_up_ = k_up;
+ k_down_ = k_down;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.h
new file mode 100644
index 0000000000..dfaea9187a
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_OVERUSE_DETECTOR_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_OVERUSE_DETECTOR_H_
+
+#include <stdint.h>
+
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+
+namespace webrtc {
+
+bool AdaptiveThresholdExperimentIsDisabled(
+ const FieldTrialsView& key_value_config);
+
+class OveruseDetector {
+ public:
+ explicit OveruseDetector(const FieldTrialsView* key_value_config);
+ virtual ~OveruseDetector();
+
+ OveruseDetector(const OveruseDetector&) = delete;
+ OveruseDetector& operator=(const OveruseDetector&) = delete;
+
+ // Update the detection state based on the estimated inter-arrival time delta
+ // offset. `timestamp_delta` is the delta between the last timestamp which the
+ // estimated offset is based on and the last timestamp on which the last
+ // offset was based on, representing the time between detector updates.
+ // `num_of_deltas` is the number of deltas the offset estimate is based on.
+ // Returns the state after the detection update.
+ BandwidthUsage Detect(double offset,
+ double timestamp_delta,
+ int num_of_deltas,
+ int64_t now_ms);
+
+ // Returns the current detector state.
+ BandwidthUsage State() const;
+
+ private:
+ void UpdateThreshold(double modified_offset, int64_t now_ms);
+ void InitializeExperiment(const FieldTrialsView& key_value_config);
+
+ bool in_experiment_;
+ double k_up_;
+ double k_down_;
+ double overusing_time_threshold_;
+ double threshold_;
+ int64_t last_update_ms_;
+ double prev_offset_;
+ double time_over_using_;
+ int overuse_counter_;
+ BandwidthUsage hypothesis_;
+};
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_OVERUSE_DETECTOR_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc
new file mode 100644
index 0000000000..8420af96a1
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc
@@ -0,0 +1,809 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/overuse_detector.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <algorithm>
+#include <cstdlib>
+#include <memory>
+
+#include "api/transport/field_trial_based_config.h"
+#include "modules/remote_bitrate_estimator/inter_arrival.h"
+#include "modules/remote_bitrate_estimator/overuse_estimator.h"
+#include "rtc_base/random.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace testing {
+
+const double kRtpTimestampToMs = 1.0 / 90.0;
+
+class OveruseDetectorTest : public ::testing::Test {
+ public:
+ OveruseDetectorTest()
+ : now_ms_(0),
+ receive_time_ms_(0),
+ rtp_timestamp_(10 * 90),
+ overuse_detector_(),
+ overuse_estimator_(new OveruseEstimator(options_)),
+ inter_arrival_(new InterArrival(5 * 90, kRtpTimestampToMs, true)),
+ random_(123456789) {}
+
+ protected:
+ void SetUp() override {
+ overuse_detector_.reset(new OveruseDetector(&field_trials_));
+ }
+
+ int Run100000Samples(int packets_per_frame,
+ size_t packet_size,
+ int mean_ms,
+ int standard_deviation_ms) {
+ int unique_overuse = 0;
+ int last_overuse = -1;
+ for (int i = 0; i < 100000; ++i) {
+ for (int j = 0; j < packets_per_frame; ++j) {
+ UpdateDetector(rtp_timestamp_, receive_time_ms_, packet_size);
+ }
+ rtp_timestamp_ += mean_ms * 90;
+ now_ms_ += mean_ms;
+ receive_time_ms_ = std::max<int64_t>(
+ receive_time_ms_,
+ now_ms_ + static_cast<int64_t>(
+ random_.Gaussian(0, standard_deviation_ms) + 0.5));
+ if (BandwidthUsage::kBwOverusing == overuse_detector_->State()) {
+ if (last_overuse + 1 != i) {
+ unique_overuse++;
+ }
+ last_overuse = i;
+ }
+ }
+ return unique_overuse;
+ }
+
+ int RunUntilOveruse(int packets_per_frame,
+ size_t packet_size,
+ int mean_ms,
+ int standard_deviation_ms,
+ int drift_per_frame_ms) {
+ // Simulate a higher send pace, that is too high.
+ for (int i = 0; i < 1000; ++i) {
+ for (int j = 0; j < packets_per_frame; ++j) {
+ UpdateDetector(rtp_timestamp_, receive_time_ms_, packet_size);
+ }
+ rtp_timestamp_ += mean_ms * 90;
+ now_ms_ += mean_ms + drift_per_frame_ms;
+ receive_time_ms_ = std::max<int64_t>(
+ receive_time_ms_,
+ now_ms_ + static_cast<int64_t>(
+ random_.Gaussian(0, standard_deviation_ms) + 0.5));
+ if (BandwidthUsage::kBwOverusing == overuse_detector_->State()) {
+ return i + 1;
+ }
+ }
+ return -1;
+ }
+
+ void UpdateDetector(uint32_t rtp_timestamp,
+ int64_t receive_time_ms,
+ size_t packet_size) {
+ uint32_t timestamp_delta;
+ int64_t time_delta;
+ int size_delta;
+ if (inter_arrival_->ComputeDeltas(
+ rtp_timestamp, receive_time_ms, receive_time_ms, packet_size,
+ &timestamp_delta, &time_delta, &size_delta)) {
+ double timestamp_delta_ms = timestamp_delta / 90.0;
+ overuse_estimator_->Update(time_delta, timestamp_delta_ms, size_delta,
+ overuse_detector_->State(), receive_time_ms);
+ overuse_detector_->Detect(
+ overuse_estimator_->offset(), timestamp_delta_ms,
+ overuse_estimator_->num_of_deltas(), receive_time_ms);
+ }
+ }
+
+ const FieldTrialBasedConfig field_trials_;
+ int64_t now_ms_;
+ int64_t receive_time_ms_;
+ uint32_t rtp_timestamp_;
+ OverUseDetectorOptions options_;
+ std::unique_ptr<OveruseDetector> overuse_detector_;
+ std::unique_ptr<OveruseEstimator> overuse_estimator_;
+ std::unique_ptr<InterArrival> inter_arrival_;
+ Random random_;
+};
+
+TEST_F(OveruseDetectorTest, GaussianRandom) {
+ int buckets[100];
+ memset(buckets, 0, sizeof(buckets));
+ for (int i = 0; i < 100000; ++i) {
+ int index = random_.Gaussian(49, 10);
+ if (index >= 0 && index < 100)
+ buckets[index]++;
+ }
+ for (int n = 0; n < 100; ++n) {
+ printf("Bucket n:%d, %d\n", n, buckets[n]);
+ }
+}
+
+TEST_F(OveruseDetectorTest, SimpleNonOveruse30fps) {
+ size_t packet_size = 1200;
+ uint32_t frame_duration_ms = 33;
+ uint32_t rtp_timestamp = 10 * 90;
+
+ // No variance.
+ for (int i = 0; i < 1000; ++i) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ now_ms_ += frame_duration_ms;
+ rtp_timestamp += frame_duration_ms * 90;
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+}
+
+// Roughly 1 Mbit/s
+TEST_F(OveruseDetectorTest, SimpleNonOveruseWithReceiveVariance) {
+ uint32_t frame_duration_ms = 10;
+ uint32_t rtp_timestamp = 10 * 90;
+ size_t packet_size = 1200;
+
+ for (int i = 0; i < 1000; ++i) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ rtp_timestamp += frame_duration_ms * 90;
+ if (i % 2) {
+ now_ms_ += frame_duration_ms - 5;
+ } else {
+ now_ms_ += frame_duration_ms + 5;
+ }
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+}
+
+TEST_F(OveruseDetectorTest, SimpleNonOveruseWithRtpTimestampVariance) {
+ // Roughly 1 Mbit/s.
+ uint32_t frame_duration_ms = 10;
+ uint32_t rtp_timestamp = 10 * 90;
+ size_t packet_size = 1200;
+
+ for (int i = 0; i < 1000; ++i) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ now_ms_ += frame_duration_ms;
+ if (i % 2) {
+ rtp_timestamp += (frame_duration_ms - 5) * 90;
+ } else {
+ rtp_timestamp += (frame_duration_ms + 5) * 90;
+ }
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+}
+
+TEST_F(OveruseDetectorTest, SimpleOveruse2000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 6;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 0; // No variance.
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(7, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, SimpleOveruse100kbit10fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 100;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 0; // No variance.
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(7, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, DISABLED_OveruseWithHighVariance100Kbit10fps) {
+ uint32_t frame_duration_ms = 100;
+ uint32_t drift_per_frame_ms = 10;
+ uint32_t rtp_timestamp = frame_duration_ms * 90;
+ size_t packet_size = 1200;
+ int offset = 10;
+
+ // Run 1000 samples to reach steady state.
+ for (int i = 0; i < 1000; ++i) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ rtp_timestamp += frame_duration_ms * 90;
+ if (i % 2) {
+ offset = random_.Rand(0, 49);
+ now_ms_ += frame_duration_ms - offset;
+ } else {
+ now_ms_ += frame_duration_ms + offset;
+ }
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+ // Simulate a higher send pace, that is too high.
+ // Above noise generate a standard deviation of approximately 28 ms.
+ // Total build up of 150 ms.
+ for (int j = 0; j < 15; ++j) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ now_ms_ += frame_duration_ms + drift_per_frame_ms;
+ rtp_timestamp += frame_duration_ms * 90;
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ EXPECT_EQ(BandwidthUsage::kBwOverusing, overuse_detector_->State());
+}
+
+TEST_F(OveruseDetectorTest, DISABLED_OveruseWithLowVariance100Kbit10fps) {
+ uint32_t frame_duration_ms = 100;
+ uint32_t drift_per_frame_ms = 1;
+ uint32_t rtp_timestamp = frame_duration_ms * 90;
+ size_t packet_size = 1200;
+ int offset = 10;
+
+ // Run 1000 samples to reach steady state.
+ for (int i = 0; i < 1000; ++i) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ rtp_timestamp += frame_duration_ms * 90;
+ if (i % 2) {
+ offset = random_.Rand(0, 1);
+ now_ms_ += frame_duration_ms - offset;
+ } else {
+ now_ms_ += frame_duration_ms + offset;
+ }
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+ // Simulate a higher send pace, that is too high.
+ // Total build up of 6 ms.
+ for (int j = 0; j < 6; ++j) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ now_ms_ += frame_duration_ms + drift_per_frame_ms;
+ rtp_timestamp += frame_duration_ms * 90;
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ EXPECT_EQ(BandwidthUsage::kBwOverusing, overuse_detector_->State());
+}
+
+TEST_F(OveruseDetectorTest, OveruseWithLowVariance2000Kbit30fps) {
+ uint32_t frame_duration_ms = 33;
+ uint32_t drift_per_frame_ms = 1;
+ uint32_t rtp_timestamp = frame_duration_ms * 90;
+ size_t packet_size = 1200;
+ int offset = 0;
+
+ // Run 1000 samples to reach steady state.
+ for (int i = 0; i < 1000; ++i) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ rtp_timestamp += frame_duration_ms * 90;
+ if (i % 2) {
+ offset = random_.Rand(0, 1);
+ now_ms_ += frame_duration_ms - offset;
+ } else {
+ now_ms_ += frame_duration_ms + offset;
+ }
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+ // Simulate a higher send pace, that is too high.
+ // Total build up of 30 ms.
+ for (int j = 0; j < 3; ++j) {
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ now_ms_ += frame_duration_ms + drift_per_frame_ms * 6;
+ rtp_timestamp += frame_duration_ms * 90;
+ EXPECT_EQ(BandwidthUsage::kBwNormal, overuse_detector_->State());
+ }
+ UpdateDetector(rtp_timestamp, now_ms_, packet_size);
+ EXPECT_EQ(BandwidthUsage::kBwOverusing, overuse_detector_->State());
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance30Kbit3fps \
+ DISABLED_LowGaussianVariance30Kbit3fps
+#else
+#define MAYBE_LowGaussianVariance30Kbit3fps LowGaussianVariance30Kbit3fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance30Kbit3fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 333;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(20, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift30Kbit3fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 333;
+ int drift_per_frame_ms = 100;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(4, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVariance30Kbit3fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 333;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(44, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift30Kbit3fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 333;
+ int drift_per_frame_ms = 100;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(4, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance100Kbit5fps \
+ DISABLED_LowGaussianVariance100Kbit5fps
+#else
+#define MAYBE_LowGaussianVariance100Kbit5fps LowGaussianVariance100Kbit5fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance100Kbit5fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 2;
+ int frame_duration_ms = 200;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(20, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_HighGaussianVariance100Kbit5fps \
+ DISABLED_HighGaussianVariance100Kbit5fps
+#else
+#define MAYBE_HighGaussianVariance100Kbit5fps HighGaussianVariance100Kbit5fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_HighGaussianVariance100Kbit5fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 2;
+ int frame_duration_ms = 200;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(44, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance100Kbit10fps \
+ DISABLED_LowGaussianVariance100Kbit10fps
+#else
+#define MAYBE_LowGaussianVariance100Kbit10fps LowGaussianVariance100Kbit10fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance100Kbit10fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 100;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(20, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_HighGaussianVariance100Kbit10fps \
+ DISABLED_HighGaussianVariance100Kbit10fps
+#else
+#define MAYBE_HighGaussianVariance100Kbit10fps HighGaussianVariance100Kbit10fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_HighGaussianVariance100Kbit10fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 100;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(44, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance300Kbit30fps \
+ DISABLED_LowGaussianVariance300Kbit30fps
+#else
+#define MAYBE_LowGaussianVariance300Kbit30fps LowGaussianVariance300Kbit30fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance300Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(19, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift300Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 10;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(5, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVariance300Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(44, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift300Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 1;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 10;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(10, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance1000Kbit30fps \
+ DISABLED_LowGaussianVariance1000Kbit30fps
+#else
+#define MAYBE_LowGaussianVariance1000Kbit30fps LowGaussianVariance1000Kbit30fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance1000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 3;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(19, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift1000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 3;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 10;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(5, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVariance1000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 3;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(44, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift1000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 3;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 10;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(10, frames_until_overuse);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance2000Kbit30fps \
+ DISABLED_LowGaussianVariance2000Kbit30fps
+#else
+#define MAYBE_LowGaussianVariance2000Kbit30fps LowGaussianVariance2000Kbit30fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance2000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 6;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(19, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift2000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 6;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 10;
+ int sigma_ms = 3;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(5, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVariance2000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 6;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 1;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(44, frames_until_overuse);
+}
+
+TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift2000Kbit30fps) {
+ size_t packet_size = 1200;
+ int packets_per_frame = 6;
+ int frame_duration_ms = 33;
+ int drift_per_frame_ms = 10;
+ int sigma_ms = 10;
+ int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
+ frame_duration_ms, sigma_ms);
+ EXPECT_EQ(0, unique_overuse);
+ int frames_until_overuse =
+ RunUntilOveruse(packets_per_frame, packet_size, frame_duration_ms,
+ sigma_ms, drift_per_frame_ms);
+ EXPECT_EQ(10, frames_until_overuse);
+}
+
+class OveruseDetectorExperimentTest : public OveruseDetectorTest {
+ public:
+ OveruseDetectorExperimentTest()
+ : override_field_trials_(
+ "WebRTC-AdaptiveBweThreshold/Enabled-0.01,0.00018/") {}
+
+ protected:
+ void SetUp() override {
+ overuse_detector_.reset(new OveruseDetector(&field_trials_));
+ }
+
+ test::ScopedFieldTrials override_field_trials_;
+ const FieldTrialBasedConfig field_trials_;
+};
+
+TEST_F(OveruseDetectorExperimentTest, ThresholdAdapts) {
+ const double kOffset = 0.21;
+ double kTsDelta = 3000.0;
+ int64_t now_ms = 0;
+ int num_deltas = 60;
+ const int kBatchLength = 10;
+
+ // Pass in a positive offset and verify it triggers overuse.
+ bool overuse_detected = false;
+ for (int i = 0; i < kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_TRUE(overuse_detected);
+
+ // Force the threshold to increase by passing in a higher offset.
+ overuse_detected = false;
+ for (int i = 0; i < kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(1.1 * kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_TRUE(overuse_detected);
+
+ // Verify that the same offset as before no longer triggers overuse.
+ overuse_detected = false;
+ for (int i = 0; i < kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_FALSE(overuse_detected);
+
+ // Pass in a low offset to make the threshold adapt down.
+ for (int i = 0; i < 15 * kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(0.7 * kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_FALSE(overuse_detected);
+
+ // Make sure the original offset now again triggers overuse.
+ for (int i = 0; i < kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_TRUE(overuse_detected);
+}
+
+TEST_F(OveruseDetectorExperimentTest, DoesntAdaptToSpikes) {
+ const double kOffset = 1.0;
+ const double kLargeOffset = 20.0;
+ double kTsDelta = 3000.0;
+ int64_t now_ms = 0;
+ int num_deltas = 60;
+ const int kBatchLength = 10;
+ const int kShortBatchLength = 3;
+
+ // Pass in a positive offset and verify it triggers overuse.
+ bool overuse_detected = false;
+ for (int i = 0; i < kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+
+ // Pass in a large offset. This shouldn't have a too big impact on the
+ // threshold, but still trigger an overuse.
+ now_ms += 100;
+ overuse_detected = false;
+ for (int i = 0; i < kShortBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(kLargeOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_TRUE(overuse_detected);
+
+ // Pass in a positive normal offset and verify it still triggers.
+ overuse_detected = false;
+ for (int i = 0; i < kBatchLength; ++i) {
+ BandwidthUsage overuse_state =
+ overuse_detector_->Detect(kOffset, kTsDelta, num_deltas, now_ms);
+ if (overuse_state == BandwidthUsage::kBwOverusing) {
+ overuse_detected = true;
+ }
+ ++num_deltas;
+ now_ms += 5;
+ }
+ EXPECT_TRUE(overuse_detected);
+}
+} // namespace testing
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.cc
new file mode 100644
index 0000000000..684143fcce
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.cc
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/overuse_estimator.h"
+
+#include <math.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "api/network_state_predictor.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+enum { kMinFramePeriodHistoryLength = 60 };
+enum { kDeltaCounterMax = 1000 };
+
+OveruseEstimator::OveruseEstimator(const OverUseDetectorOptions& options)
+ : options_(options),
+ num_of_deltas_(0),
+ slope_(options_.initial_slope),
+ offset_(options_.initial_offset),
+ prev_offset_(options_.initial_offset),
+ E_(),
+ process_noise_(),
+ avg_noise_(options_.initial_avg_noise),
+ var_noise_(options_.initial_var_noise),
+ ts_delta_hist_() {
+ memcpy(E_, options_.initial_e, sizeof(E_));
+ memcpy(process_noise_, options_.initial_process_noise,
+ sizeof(process_noise_));
+}
+
+OveruseEstimator::~OveruseEstimator() {
+ ts_delta_hist_.clear();
+}
+
+void OveruseEstimator::Update(int64_t t_delta,
+ double ts_delta,
+ int size_delta,
+ BandwidthUsage current_hypothesis,
+ int64_t now_ms) {
+ const double min_frame_period = UpdateMinFramePeriod(ts_delta);
+ const double t_ts_delta = t_delta - ts_delta;
+ BWE_TEST_LOGGING_PLOT(1, "dm_ms", now_ms, t_ts_delta);
+ double fs_delta = size_delta;
+
+ ++num_of_deltas_;
+ if (num_of_deltas_ > kDeltaCounterMax) {
+ num_of_deltas_ = kDeltaCounterMax;
+ }
+
+ // Update the Kalman filter.
+ E_[0][0] += process_noise_[0];
+ E_[1][1] += process_noise_[1];
+
+ if ((current_hypothesis == BandwidthUsage::kBwOverusing &&
+ offset_ < prev_offset_) ||
+ (current_hypothesis == BandwidthUsage::kBwUnderusing &&
+ offset_ > prev_offset_)) {
+ E_[1][1] += 10 * process_noise_[1];
+ }
+
+ const double h[2] = {fs_delta, 1.0};
+ const double Eh[2] = {E_[0][0] * h[0] + E_[0][1] * h[1],
+ E_[1][0] * h[0] + E_[1][1] * h[1]};
+
+ BWE_TEST_LOGGING_PLOT(1, "d_ms", now_ms, slope_ * h[0] - offset_);
+
+ const double residual = t_ts_delta - slope_ * h[0] - offset_;
+
+ const bool in_stable_state =
+ (current_hypothesis == BandwidthUsage::kBwNormal);
+ const double max_residual = 3.0 * sqrt(var_noise_);
+ // We try to filter out very late frames. For instance periodic key
+ // frames doesn't fit the Gaussian model well.
+ if (fabs(residual) < max_residual) {
+ UpdateNoiseEstimate(residual, min_frame_period, in_stable_state);
+ } else {
+ UpdateNoiseEstimate(residual < 0 ? -max_residual : max_residual,
+ min_frame_period, in_stable_state);
+ }
+
+ const double denom = var_noise_ + h[0] * Eh[0] + h[1] * Eh[1];
+
+ const double K[2] = {Eh[0] / denom, Eh[1] / denom};
+
+ const double IKh[2][2] = {{1.0 - K[0] * h[0], -K[0] * h[1]},
+ {-K[1] * h[0], 1.0 - K[1] * h[1]}};
+ const double e00 = E_[0][0];
+ const double e01 = E_[0][1];
+
+ // Update state.
+ E_[0][0] = e00 * IKh[0][0] + E_[1][0] * IKh[0][1];
+ E_[0][1] = e01 * IKh[0][0] + E_[1][1] * IKh[0][1];
+ E_[1][0] = e00 * IKh[1][0] + E_[1][0] * IKh[1][1];
+ E_[1][1] = e01 * IKh[1][0] + E_[1][1] * IKh[1][1];
+
+ // The covariance matrix must be positive semi-definite.
+ bool positive_semi_definite =
+ E_[0][0] + E_[1][1] >= 0 &&
+ E_[0][0] * E_[1][1] - E_[0][1] * E_[1][0] >= 0 && E_[0][0] >= 0;
+ RTC_DCHECK(positive_semi_definite);
+ if (!positive_semi_definite) {
+ RTC_LOG(LS_ERROR)
+ << "The over-use estimator's covariance matrix is no longer "
+ "semi-definite.";
+ }
+
+ slope_ = slope_ + K[0] * residual;
+ prev_offset_ = offset_;
+ offset_ = offset_ + K[1] * residual;
+
+ BWE_TEST_LOGGING_PLOT(1, "kc", now_ms, K[0]);
+ BWE_TEST_LOGGING_PLOT(1, "km", now_ms, K[1]);
+ BWE_TEST_LOGGING_PLOT(1, "slope_1/bps", now_ms, slope_);
+ BWE_TEST_LOGGING_PLOT(1, "var_noise", now_ms, var_noise_);
+}
+
+double OveruseEstimator::UpdateMinFramePeriod(double ts_delta) {
+ double min_frame_period = ts_delta;
+ if (ts_delta_hist_.size() >= kMinFramePeriodHistoryLength) {
+ ts_delta_hist_.pop_front();
+ }
+ for (const double old_ts_delta : ts_delta_hist_) {
+ min_frame_period = std::min(old_ts_delta, min_frame_period);
+ }
+ ts_delta_hist_.push_back(ts_delta);
+ return min_frame_period;
+}
+
+void OveruseEstimator::UpdateNoiseEstimate(double residual,
+ double ts_delta,
+ bool stable_state) {
+ if (!stable_state) {
+ return;
+ }
+ // Faster filter during startup to faster adapt to the jitter level
+ // of the network. `alpha` is tuned for 30 frames per second, but is scaled
+ // according to `ts_delta`.
+ double alpha = 0.01;
+ if (num_of_deltas_ > 10 * 30) {
+ alpha = 0.002;
+ }
+ // Only update the noise estimate if we're not over-using. `beta` is a
+ // function of alpha and the time delta since the previous update.
+ const double beta = pow(1 - alpha, ts_delta * 30.0 / 1000.0);
+ avg_noise_ = beta * avg_noise_ + (1 - beta) * residual;
+ var_noise_ = beta * var_noise_ +
+ (1 - beta) * (avg_noise_ - residual) * (avg_noise_ - residual);
+ if (var_noise_ < 1) {
+ var_noise_ = 1;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.h
new file mode 100644
index 0000000000..c021f00da7
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_OVERUSE_ESTIMATOR_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_OVERUSE_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include <deque>
+
+#include "api/network_state_predictor.h"
+
+namespace webrtc {
+
+// Bandwidth over-use detector options. These are used to drive
+// experimentation with bandwidth estimation parameters.
+// TODO(terelius): This is only used in overuse_estimator.cc, and only in the
+// default constructed state. Can we move the relevant variables into that
+// class and delete this?
+struct OverUseDetectorOptions {
+ OverUseDetectorOptions() = default;
+ double initial_slope = 8.0 / 512.0;
+ double initial_offset = 0;
+ double initial_e[2][2] = {{100.0, 0.0}, {0.0, 1e-1}};
+ double initial_process_noise[2] = {1e-13, 1e-3};
+ double initial_avg_noise = 0.0;
+ double initial_var_noise = 50.0;
+};
+
+class OveruseEstimator {
+ public:
+ explicit OveruseEstimator(const OverUseDetectorOptions& options);
+ ~OveruseEstimator();
+
+ OveruseEstimator(const OveruseEstimator&) = delete;
+ OveruseEstimator& operator=(const OveruseEstimator&) = delete;
+
+ // Update the estimator with a new sample. The deltas should represent deltas
+ // between timestamp groups as defined by the InterArrival class.
+ // `current_hypothesis` should be the hypothesis of the over-use detector at
+ // this time.
+ void Update(int64_t t_delta,
+ double ts_delta,
+ int size_delta,
+ BandwidthUsage current_hypothesis,
+ int64_t now_ms);
+
+ // Returns the estimated noise/jitter variance in ms^2.
+ double var_noise() const { return var_noise_; }
+
+ // Returns the estimated inter-arrival time delta offset in ms.
+ double offset() const { return offset_; }
+
+ // Returns the number of deltas which the current over-use estimator state is
+ // based on.
+ unsigned int num_of_deltas() const { return num_of_deltas_; }
+
+ private:
+ double UpdateMinFramePeriod(double ts_delta);
+ void UpdateNoiseEstimate(double residual, double ts_delta, bool stable_state);
+
+ // Must be first member variable. Cannot be const because we need to be
+ // copyable.
+ OverUseDetectorOptions options_;
+ uint16_t num_of_deltas_;
+ double slope_;
+ double offset_;
+ double prev_offset_;
+ double E_[2][2];
+ double process_noise_[2];
+ double avg_noise_;
+ double var_noise_;
+ std::deque<double> ts_delta_hist_;
+};
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_OVERUSE_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.cc
new file mode 100644
index 0000000000..16d400e227
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.cc
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/remote_bitrate_estimator/packet_arrival_map.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void PacketArrivalTimeMap::AddPacket(int64_t sequence_number,
+ Timestamp arrival_time) {
+ RTC_DCHECK_GE(arrival_time, Timestamp::Zero());
+ if (!has_seen_packet()) {
+ // First packet.
+ Reallocate(kMinCapacity);
+ begin_sequence_number_ = sequence_number;
+ end_sequence_number_ = sequence_number + 1;
+ arrival_times_[Index(sequence_number)] = arrival_time;
+ return;
+ }
+
+ if (sequence_number >= begin_sequence_number() &&
+ sequence_number < end_sequence_number()) {
+ // The packet is within the buffer - no need to expand it.
+ arrival_times_[Index(sequence_number)] = arrival_time;
+ return;
+ }
+
+ if (sequence_number < begin_sequence_number()) {
+ // The packet goes before the current buffer. Expand to add packet, but only
+ // if it fits within kMaxNumberOfPackets.
+ int64_t new_size = end_sequence_number() - sequence_number;
+ if (new_size > kMaxNumberOfPackets) {
+ // Don't expand the buffer further, as that would remove newly received
+ // packets.
+ return;
+ }
+ AdjustToSize(new_size);
+
+ arrival_times_[Index(sequence_number)] = arrival_time;
+ SetNotReceived(sequence_number + 1, begin_sequence_number_);
+ begin_sequence_number_ = sequence_number;
+ return;
+ }
+
+ // The packet goes after the buffer.
+ RTC_DCHECK_GE(sequence_number, end_sequence_number_);
+ int64_t new_end_sequence_number = sequence_number + 1;
+
+ if (new_end_sequence_number >= end_sequence_number_ + kMaxNumberOfPackets) {
+ // All old packets have to be removed.
+ begin_sequence_number_ = sequence_number;
+ end_sequence_number_ = new_end_sequence_number;
+ arrival_times_[Index(sequence_number)] = arrival_time;
+ return;
+ }
+
+ if (begin_sequence_number_ < new_end_sequence_number - kMaxNumberOfPackets) {
+ // Remove oldest entries
+ begin_sequence_number_ = new_end_sequence_number - kMaxNumberOfPackets;
+ RTC_DCHECK_GT(end_sequence_number_, begin_sequence_number_);
+ // Also trim the buffer to remove leading non-received packets, to
+ // ensure that the buffer only spans received packets.
+ TrimLeadingNotReceivedEntries();
+ }
+
+ AdjustToSize(new_end_sequence_number - begin_sequence_number_);
+
+ // Packets can be received out-of-order. If this isn't the next expected
+ // packet, add enough placeholders to fill the gap.
+ SetNotReceived(end_sequence_number_, sequence_number);
+ end_sequence_number_ = new_end_sequence_number;
+ arrival_times_[Index(sequence_number)] = arrival_time;
+}
+
+void PacketArrivalTimeMap::TrimLeadingNotReceivedEntries() {
+ const int begin_index = Index(begin_sequence_number_);
+ const Timestamp* const begin_it = arrival_times_.get() + begin_index;
+ const Timestamp* const end_it = arrival_times_.get() + capacity();
+
+ for (const Timestamp* it = begin_it; it != end_it; ++it) {
+ if (*it >= Timestamp::Zero()) {
+ begin_sequence_number_ += (it - begin_it);
+ return;
+ }
+ }
+ // Reached end of the arrival_times_ and all entries represent not received
+ // packets. Remove them.
+ begin_sequence_number_ += (capacity() - begin_index);
+ // Continue removing entries at the beginning of the circular buffer.
+ for (const Timestamp* it = arrival_times_.get(); it != begin_it; ++it) {
+ if (*it >= Timestamp::Zero()) {
+ begin_sequence_number_ += (it - arrival_times_.get());
+ return;
+ }
+ }
+
+ RTC_DCHECK_NOTREACHED() << "There should be at least one non-empty entry";
+}
+
+void PacketArrivalTimeMap::SetNotReceived(
+ int64_t begin_sequence_number_inclusive,
+ int64_t end_sequence_number_exclusive) {
+ static constexpr Timestamp value = Timestamp::MinusInfinity();
+
+ int begin_index = Index(begin_sequence_number_inclusive);
+ int end_index = Index(end_sequence_number_exclusive);
+
+ if (begin_index <= end_index) {
+ // Entries to clear are in single block:
+ // [......{-----}....]
+ std::fill(arrival_times_.get() + begin_index,
+ arrival_times_.get() + end_index, value);
+ } else {
+ // Entries to clear span across arrival_times_ border:
+ // [--}..........{---]
+ std::fill(arrival_times_.get() + begin_index,
+ arrival_times_.get() + capacity(), value);
+ std::fill(arrival_times_.get(), arrival_times_.get() + end_index, value);
+ }
+}
+
+void PacketArrivalTimeMap::RemoveOldPackets(int64_t sequence_number,
+ Timestamp arrival_time_limit) {
+ int64_t check_to = std::min(sequence_number, end_sequence_number_);
+ while (begin_sequence_number_ < check_to &&
+ arrival_times_[Index(begin_sequence_number_)] <= arrival_time_limit) {
+ ++begin_sequence_number_;
+ }
+ AdjustToSize(end_sequence_number_ - begin_sequence_number_);
+}
+
+void PacketArrivalTimeMap::EraseTo(int64_t sequence_number) {
+ if (sequence_number < begin_sequence_number_) {
+ return;
+ }
+ if (sequence_number >= end_sequence_number_) {
+ // Erase all.
+ begin_sequence_number_ = end_sequence_number_;
+ return;
+ }
+ // Remove some.
+ begin_sequence_number_ = sequence_number;
+ RTC_DCHECK(has_received(begin_sequence_number_));
+ AdjustToSize(end_sequence_number_ - begin_sequence_number_);
+}
+
+void PacketArrivalTimeMap::AdjustToSize(int new_size) {
+ if (new_size > capacity()) {
+ int new_capacity = capacity();
+ while (new_capacity < new_size)
+ new_capacity *= 2;
+ Reallocate(new_capacity);
+ }
+ if (capacity() > std::max(kMinCapacity, 4 * new_size)) {
+ int new_capacity = capacity();
+ while (new_capacity > 2 * std::max(new_size, kMinCapacity)) {
+ new_capacity /= 2;
+ }
+ Reallocate(new_capacity);
+ }
+ RTC_DCHECK_LE(new_size, capacity());
+}
+
+void PacketArrivalTimeMap::Reallocate(int new_capacity) {
+ int new_capacity_minus_1 = new_capacity - 1;
+ // Check capacity is a power of 2.
+ RTC_DCHECK_EQ(new_capacity & new_capacity_minus_1, 0);
+ // Create uninitialized memory.
+ // All valid entries should be set by `AddPacket` before use.
+ void* raw = operator new[](new_capacity * sizeof(Timestamp));
+ Timestamp* new_buffer = static_cast<Timestamp*>(raw);
+
+ for (int64_t sequence_number = begin_sequence_number_;
+ sequence_number < end_sequence_number_; ++sequence_number) {
+ new_buffer[sequence_number & new_capacity_minus_1] =
+ arrival_times_[sequence_number & capacity_minus_1_];
+ }
+ arrival_times_.reset(new_buffer);
+ capacity_minus_1_ = new_capacity_minus_1;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.h
new file mode 100644
index 0000000000..d489a0c53d
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_PACKET_ARRIVAL_MAP_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_PACKET_ARRIVAL_MAP_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// PacketArrivalTimeMap is an optimized map of packet sequence number to arrival
+// time, limited in size to never exceed `kMaxNumberOfPackets`. It will grow as
+// needed, and remove old packets, and will expand to allow earlier packets to
+// be added (out-of-order).
+//
+// Not yet received packets have the arrival time zero. The queue will not span
+// larger than necessary and the last packet should always be received. The
+// first packet in the queue doesn't have to be received in case of receiving
+// packets out-of-order.
+class PacketArrivalTimeMap {
+ public:
+ // Impossible to request feedback older than what can be represented by 15
+ // bits.
+ static constexpr int kMaxNumberOfPackets = (1 << 15);
+
+ PacketArrivalTimeMap() = default;
+ PacketArrivalTimeMap(const PacketArrivalTimeMap&) = delete;
+ PacketArrivalTimeMap& operator=(const PacketArrivalTimeMap&) = delete;
+ ~PacketArrivalTimeMap() = default;
+
+ // Indicates if the packet with `sequence_number` has already been received.
+ bool has_received(int64_t sequence_number) const {
+ return sequence_number >= begin_sequence_number() &&
+ sequence_number < end_sequence_number() &&
+ arrival_times_[Index(sequence_number)] >= Timestamp::Zero();
+ }
+
+ // Returns the sequence number of the first entry in the map, i.e. the
+ // sequence number that a `begin()` iterator would represent.
+ int64_t begin_sequence_number() const { return begin_sequence_number_; }
+
+ // Returns the sequence number of the element just after the map, i.e. the
+ // sequence number that an `end()` iterator would represent.
+ int64_t end_sequence_number() const { return end_sequence_number_; }
+
+ // Returns an element by `sequence_number`, which must be valid, i.e.
+ // between [begin_sequence_number, end_sequence_number).
+ Timestamp get(int64_t sequence_number) {
+ RTC_DCHECK_GE(sequence_number, begin_sequence_number());
+ RTC_DCHECK_LT(sequence_number, end_sequence_number());
+ return arrival_times_[Index(sequence_number)];
+ }
+
+ // Clamps `sequence_number` between [begin_sequence_number,
+ // end_sequence_number].
+ int64_t clamp(int64_t sequence_number) const {
+ return std::clamp(sequence_number, begin_sequence_number(),
+ end_sequence_number());
+ }
+
+ // Erases all elements from the beginning of the map until `sequence_number`.
+ void EraseTo(int64_t sequence_number);
+
+ // Records the fact that a packet with `sequence_number` arrived at
+ // `arrival_time_ms`.
+ void AddPacket(int64_t sequence_number, Timestamp arrival_time);
+
+ // Removes packets from the beginning of the map as long as they are received
+ // before `sequence_number` and with an age older than `arrival_time_limit`
+ void RemoveOldPackets(int64_t sequence_number, Timestamp arrival_time_limit);
+
+ private:
+ static constexpr int kMinCapacity = 128;
+
+ // Returns index in the `arrival_times_` for value for `sequence_number`.
+ int Index(int64_t sequence_number) const {
+ // Note that sequence_number might be negative, thus taking '%' requires
+ // extra handling and can be slow. Because capacity is a power of two, it
+ // is much faster to use '&' operator.
+ return sequence_number & capacity_minus_1_;
+ }
+
+ void SetNotReceived(int64_t begin_sequence_number_inclusive,
+ int64_t end_sequence_number_exclusive);
+
+ void TrimLeadingNotReceivedEntries();
+
+ // Adjust capacity to match new_size, may reduce capacity.
+ // On return guarantees capacity >= new_size.
+ void AdjustToSize(int new_size);
+ void Reallocate(int new_capacity);
+
+ int capacity() const { return capacity_minus_1_ + 1; }
+ bool has_seen_packet() const { return arrival_times_ != nullptr; }
+
+ // Circular buffer. Packet with sequence number `sequence_number`
+ // is stored in the slot `sequence_number % capacity_`
+ std::unique_ptr<Timestamp[]> arrival_times_ = nullptr;
+
+ // Allocated size of the `arrival_times_`
+ // capacity_ is a power of 2 in range [kMinCapacity, kMaxNumberOfPackets]
+ // `capacity - 1` is used much more often than `capacity`, thus that value is
+ // stored.
+ int capacity_minus_1_ = -1;
+
+ // The unwrapped sequence number for valid range of sequence numbers.
+ // arrival_times_ entries only valid for sequence numbers in range
+ // `begin_sequence_number_ <= sequence_number < end_sequence_number_`
+ int64_t begin_sequence_number_ = 0;
+ int64_t end_sequence_number_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_PACKET_ARRIVAL_MAP_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map_test.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map_test.cc
new file mode 100644
index 0000000000..00c927ffd7
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map_test.cc
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/remote_bitrate_estimator/packet_arrival_map.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+TEST(PacketArrivalMapTest, IsConsistentWhenEmpty) {
+ PacketArrivalTimeMap map;
+
+ EXPECT_EQ(map.begin_sequence_number(), map.end_sequence_number());
+ EXPECT_FALSE(map.has_received(0));
+ EXPECT_EQ(map.clamp(-5), 0);
+ EXPECT_EQ(map.clamp(5), 0);
+}
+
+TEST(PacketArrivalMapTest, InsertsFirstItemIntoMap) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ EXPECT_EQ(map.begin_sequence_number(), 42);
+ EXPECT_EQ(map.end_sequence_number(), 43);
+
+ EXPECT_FALSE(map.has_received(41));
+ EXPECT_TRUE(map.has_received(42));
+ EXPECT_FALSE(map.has_received(44));
+
+ EXPECT_EQ(map.clamp(-100), 42);
+ EXPECT_EQ(map.clamp(42), 42);
+ EXPECT_EQ(map.clamp(100), 43);
+}
+
+TEST(PacketArrivalMapTest, InsertsWithGaps) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Zero());
+ map.AddPacket(45, Timestamp::Millis(11));
+ EXPECT_EQ(map.begin_sequence_number(), 42);
+ EXPECT_EQ(map.end_sequence_number(), 46);
+
+ EXPECT_FALSE(map.has_received(41));
+ EXPECT_TRUE(map.has_received(42));
+ EXPECT_FALSE(map.has_received(43));
+ EXPECT_FALSE(map.has_received(44));
+ EXPECT_TRUE(map.has_received(45));
+ EXPECT_FALSE(map.has_received(46));
+
+ EXPECT_EQ(map.get(42), Timestamp::Zero());
+ EXPECT_LT(map.get(43), Timestamp::Zero());
+ EXPECT_LT(map.get(44), Timestamp::Zero());
+ EXPECT_EQ(map.get(45), Timestamp::Millis(11));
+
+ EXPECT_EQ(map.clamp(-100), 42);
+ EXPECT_EQ(map.clamp(44), 44);
+ EXPECT_EQ(map.clamp(100), 46);
+}
+
+TEST(PacketArrivalMapTest, InsertsWithinBuffer) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(45, Timestamp::Millis(11));
+
+ map.AddPacket(43, Timestamp::Millis(12));
+ map.AddPacket(44, Timestamp::Millis(13));
+
+ EXPECT_EQ(map.begin_sequence_number(), 42);
+ EXPECT_EQ(map.end_sequence_number(), 46);
+
+ EXPECT_FALSE(map.has_received(41));
+ EXPECT_TRUE(map.has_received(42));
+ EXPECT_TRUE(map.has_received(43));
+ EXPECT_TRUE(map.has_received(44));
+ EXPECT_TRUE(map.has_received(45));
+ EXPECT_FALSE(map.has_received(46));
+
+ EXPECT_EQ(map.get(42), Timestamp::Millis(10));
+ EXPECT_EQ(map.get(43), Timestamp::Millis(12));
+ EXPECT_EQ(map.get(44), Timestamp::Millis(13));
+ EXPECT_EQ(map.get(45), Timestamp::Millis(11));
+}
+
+TEST(PacketArrivalMapTest, GrowsBufferAndRemoveOld) {
+ PacketArrivalTimeMap map;
+
+ constexpr int64_t kLargeSeq = 42 + PacketArrivalTimeMap::kMaxNumberOfPackets;
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(43, Timestamp::Millis(11));
+ map.AddPacket(44, Timestamp::Millis(12));
+ map.AddPacket(45, Timestamp::Millis(13));
+ map.AddPacket(kLargeSeq, Timestamp::Millis(12));
+
+ EXPECT_EQ(map.begin_sequence_number(), 43);
+ EXPECT_EQ(map.end_sequence_number(), kLargeSeq + 1);
+ EXPECT_EQ(map.end_sequence_number() - map.begin_sequence_number(),
+ PacketArrivalTimeMap::kMaxNumberOfPackets);
+
+ EXPECT_FALSE(map.has_received(41));
+ EXPECT_FALSE(map.has_received(42));
+ EXPECT_TRUE(map.has_received(43));
+ EXPECT_TRUE(map.has_received(44));
+ EXPECT_TRUE(map.has_received(45));
+ EXPECT_FALSE(map.has_received(46));
+ EXPECT_TRUE(map.has_received(kLargeSeq));
+ EXPECT_FALSE(map.has_received(kLargeSeq + 1));
+}
+
+TEST(PacketArrivalMapTest, GrowsBufferAndRemoveOldTrimsBeginning) {
+ PacketArrivalTimeMap map;
+
+ constexpr int64_t kLargeSeq = 42 + PacketArrivalTimeMap::kMaxNumberOfPackets;
+ map.AddPacket(42, Timestamp::Millis(10));
+ // Missing: 43, 44
+ map.AddPacket(45, Timestamp::Millis(13));
+ map.AddPacket(kLargeSeq, Timestamp::Millis(12));
+
+ EXPECT_EQ(map.begin_sequence_number(), 45);
+ EXPECT_EQ(map.end_sequence_number(), kLargeSeq + 1);
+
+ EXPECT_FALSE(map.has_received(44));
+ EXPECT_TRUE(map.has_received(45));
+ EXPECT_FALSE(map.has_received(46));
+ EXPECT_TRUE(map.has_received(kLargeSeq));
+ EXPECT_FALSE(map.has_received(kLargeSeq + 1));
+}
+
+TEST(PacketArrivalMapTest, SequenceNumberJumpsDeletesAll) {
+ PacketArrivalTimeMap map;
+
+ constexpr int64_t kLargeSeq =
+ 42 + 2 * PacketArrivalTimeMap::kMaxNumberOfPackets;
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(kLargeSeq, Timestamp::Millis(12));
+
+ EXPECT_EQ(map.begin_sequence_number(), kLargeSeq);
+ EXPECT_EQ(map.end_sequence_number(), kLargeSeq + 1);
+
+ EXPECT_FALSE(map.has_received(42));
+ EXPECT_TRUE(map.has_received(kLargeSeq));
+ EXPECT_FALSE(map.has_received(kLargeSeq + 1));
+}
+
+TEST(PacketArrivalMapTest, ExpandsBeforeBeginning) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(-1000, Timestamp::Millis(13));
+
+ EXPECT_EQ(map.begin_sequence_number(), -1000);
+ EXPECT_EQ(map.end_sequence_number(), 43);
+
+ EXPECT_FALSE(map.has_received(-1001));
+ EXPECT_TRUE(map.has_received(-1000));
+ EXPECT_FALSE(map.has_received(-999));
+ EXPECT_TRUE(map.has_received(42));
+ EXPECT_FALSE(map.has_received(43));
+}
+
+TEST(PacketArrivalMapTest, ExpandingBeforeBeginningKeepsReceived) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ constexpr int64_t kSmallSeq =
+ static_cast<int64_t>(42) - 2 * PacketArrivalTimeMap::kMaxNumberOfPackets;
+ map.AddPacket(kSmallSeq, Timestamp::Millis(13));
+
+ EXPECT_EQ(map.begin_sequence_number(), 42);
+ EXPECT_EQ(map.end_sequence_number(), 43);
+}
+
+TEST(PacketArrivalMapTest, ErasesToRemoveElements) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(43, Timestamp::Millis(11));
+ map.AddPacket(44, Timestamp::Millis(12));
+ map.AddPacket(45, Timestamp::Millis(13));
+
+ map.EraseTo(44);
+
+ EXPECT_EQ(map.begin_sequence_number(), 44);
+ EXPECT_EQ(map.end_sequence_number(), 46);
+
+ EXPECT_FALSE(map.has_received(43));
+ EXPECT_TRUE(map.has_received(44));
+ EXPECT_TRUE(map.has_received(45));
+ EXPECT_FALSE(map.has_received(46));
+}
+
+TEST(PacketArrivalMapTest, ErasesInEmptyMap) {
+ PacketArrivalTimeMap map;
+
+ EXPECT_EQ(map.begin_sequence_number(), map.end_sequence_number());
+
+ map.EraseTo(map.end_sequence_number());
+ EXPECT_EQ(map.begin_sequence_number(), map.end_sequence_number());
+}
+
+TEST(PacketArrivalMapTest, IsTolerantToWrongArgumentsForErase) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(43, Timestamp::Millis(11));
+
+ map.EraseTo(1);
+
+ EXPECT_EQ(map.begin_sequence_number(), 42);
+ EXPECT_EQ(map.end_sequence_number(), 44);
+
+ map.EraseTo(100);
+
+ EXPECT_EQ(map.begin_sequence_number(), 44);
+ EXPECT_EQ(map.end_sequence_number(), 44);
+}
+
+TEST(PacketArrivalMapTest, EraseAllRemembersBeginningSeqNbr) {
+ PacketArrivalTimeMap map;
+
+ map.AddPacket(42, Timestamp::Millis(10));
+ map.AddPacket(43, Timestamp::Millis(11));
+ map.AddPacket(44, Timestamp::Millis(12));
+ map.AddPacket(45, Timestamp::Millis(13));
+
+ map.EraseTo(46);
+
+ map.AddPacket(50, Timestamp::Millis(10));
+
+ EXPECT_EQ(map.begin_sequence_number(), 46);
+ EXPECT_EQ(map.end_sequence_number(), 51);
+
+ EXPECT_FALSE(map.has_received(45));
+ EXPECT_FALSE(map.has_received(46));
+ EXPECT_FALSE(map.has_received(47));
+ EXPECT_FALSE(map.has_received(48));
+ EXPECT_FALSE(map.has_received(49));
+ EXPECT_TRUE(map.has_received(50));
+ EXPECT_FALSE(map.has_received(51));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
new file mode 100644
index 0000000000..e9fb1b99f6
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+constexpr TimeDelta kMinClusterDelta = TimeDelta::Millis(1);
+constexpr TimeDelta kInitialProbingInterval = TimeDelta::Seconds(2);
+constexpr int kTimestampGroupLengthMs = 5;
+constexpr int kAbsSendTimeInterArrivalUpshift = 8;
+constexpr int kInterArrivalShift =
+ RTPHeaderExtension::kAbsSendTimeFraction + kAbsSendTimeInterArrivalUpshift;
+constexpr int kMinClusterSize = 4;
+constexpr int kMaxProbePackets = 15;
+constexpr int kExpectedNumberOfProbes = 3;
+constexpr double kTimestampToMs =
+ 1000.0 / static_cast<double>(1 << kInterArrivalShift);
+
+absl::optional<DataRate> OptionalRateFromOptionalBps(
+ absl::optional<int> bitrate_bps) {
+ if (bitrate_bps) {
+ return DataRate::BitsPerSec(*bitrate_bps);
+ } else {
+ return absl::nullopt;
+ }
+}
+
+template <typename K, typename V>
+std::vector<K> Keys(const std::map<K, V>& map) {
+ std::vector<K> keys;
+ keys.reserve(map.size());
+ for (const auto& kv_pair : map) {
+ keys.push_back(kv_pair.first);
+ }
+ return keys;
+}
+
+} // namespace
+
+RemoteBitrateEstimatorAbsSendTime::~RemoteBitrateEstimatorAbsSendTime() =
+ default;
+
+bool RemoteBitrateEstimatorAbsSendTime::IsWithinClusterBounds(
+ TimeDelta send_delta,
+ const Cluster& cluster_aggregate) {
+ if (cluster_aggregate.count == 0)
+ return true;
+ TimeDelta cluster_mean =
+ cluster_aggregate.send_mean / cluster_aggregate.count;
+ return (send_delta - cluster_mean).Abs() < TimeDelta::Micros(2'500);
+}
+
+void RemoteBitrateEstimatorAbsSendTime::MaybeAddCluster(
+ const Cluster& cluster_aggregate,
+ std::list<Cluster>& clusters) {
+ if (cluster_aggregate.count < kMinClusterSize ||
+ cluster_aggregate.send_mean <= TimeDelta::Zero() ||
+ cluster_aggregate.recv_mean <= TimeDelta::Zero()) {
+ return;
+ }
+
+ Cluster cluster;
+ cluster.send_mean = cluster_aggregate.send_mean / cluster_aggregate.count;
+ cluster.recv_mean = cluster_aggregate.recv_mean / cluster_aggregate.count;
+ cluster.mean_size = cluster_aggregate.mean_size / cluster_aggregate.count;
+ cluster.count = cluster_aggregate.count;
+ cluster.num_above_min_delta = cluster_aggregate.num_above_min_delta;
+ clusters.push_back(cluster);
+}
+
+RemoteBitrateEstimatorAbsSendTime::RemoteBitrateEstimatorAbsSendTime(
+ RemoteBitrateObserver* observer,
+ Clock* clock)
+ : clock_(clock),
+ observer_(observer),
+ detector_(&field_trials_),
+ remote_rate_(&field_trials_) {
+ RTC_DCHECK(clock_);
+ RTC_DCHECK(observer_);
+ RTC_LOG(LS_INFO) << "RemoteBitrateEstimatorAbsSendTime: Instantiating.";
+}
+
+std::list<RemoteBitrateEstimatorAbsSendTime::Cluster>
+RemoteBitrateEstimatorAbsSendTime::ComputeClusters() const {
+ std::list<Cluster> clusters;
+ Cluster cluster_aggregate;
+ Timestamp prev_send_time = Timestamp::MinusInfinity();
+ Timestamp prev_recv_time = Timestamp::MinusInfinity();
+ for (const Probe& probe : probes_) {
+ if (prev_send_time.IsFinite()) {
+ TimeDelta send_delta = probe.send_time - prev_send_time;
+ TimeDelta recv_delta = probe.recv_time - prev_recv_time;
+ if (send_delta >= kMinClusterDelta && recv_delta >= kMinClusterDelta) {
+ ++cluster_aggregate.num_above_min_delta;
+ }
+ if (!IsWithinClusterBounds(send_delta, cluster_aggregate)) {
+ MaybeAddCluster(cluster_aggregate, clusters);
+ cluster_aggregate = Cluster();
+ }
+ cluster_aggregate.send_mean += send_delta;
+ cluster_aggregate.recv_mean += recv_delta;
+ cluster_aggregate.mean_size += probe.payload_size;
+ ++cluster_aggregate.count;
+ }
+ prev_send_time = probe.send_time;
+ prev_recv_time = probe.recv_time;
+ }
+ MaybeAddCluster(cluster_aggregate, clusters);
+ return clusters;
+}
+
+const RemoteBitrateEstimatorAbsSendTime::Cluster*
+RemoteBitrateEstimatorAbsSendTime::FindBestProbe(
+ const std::list<Cluster>& clusters) const {
+ DataRate highest_probe_bitrate = DataRate::Zero();
+ const Cluster* best = nullptr;
+ for (const auto& cluster : clusters) {
+ if (cluster.send_mean == TimeDelta::Zero() ||
+ cluster.recv_mean == TimeDelta::Zero()) {
+ continue;
+ }
+ if (cluster.num_above_min_delta > cluster.count / 2 &&
+ (cluster.recv_mean - cluster.send_mean <= TimeDelta::Millis(2) &&
+ cluster.send_mean - cluster.recv_mean <= TimeDelta::Millis(5))) {
+ DataRate probe_bitrate =
+ std::min(cluster.SendBitrate(), cluster.RecvBitrate());
+ if (probe_bitrate > highest_probe_bitrate) {
+ highest_probe_bitrate = probe_bitrate;
+ best = &cluster;
+ }
+ } else {
+ RTC_LOG(LS_INFO) << "Probe failed, sent at "
+ << cluster.SendBitrate().bps() << " bps, received at "
+ << cluster.RecvBitrate().bps()
+ << " bps. Mean send delta: " << cluster.send_mean.ms()
+ << " ms, mean recv delta: " << cluster.recv_mean.ms()
+ << " ms, num probes: " << cluster.count;
+ break;
+ }
+ }
+ return best;
+}
+
+RemoteBitrateEstimatorAbsSendTime::ProbeResult
+RemoteBitrateEstimatorAbsSendTime::ProcessClusters(Timestamp now) {
+ std::list<Cluster> clusters = ComputeClusters();
+ if (clusters.empty()) {
+ // If we reach the max number of probe packets and still have no clusters,
+ // we will remove the oldest one.
+ if (probes_.size() >= kMaxProbePackets)
+ probes_.pop_front();
+ return ProbeResult::kNoUpdate;
+ }
+
+ if (const Cluster* best = FindBestProbe(clusters)) {
+ DataRate probe_bitrate = std::min(best->SendBitrate(), best->RecvBitrate());
+ // Make sure that a probe sent on a lower bitrate than our estimate can't
+ // reduce the estimate.
+ if (IsBitrateImproving(probe_bitrate)) {
+ RTC_LOG(LS_INFO) << "Probe successful, sent at "
+ << best->SendBitrate().bps() << " bps, received at "
+ << best->RecvBitrate().bps()
+ << " bps. Mean send delta: " << best->send_mean.ms()
+ << " ms, mean recv delta: " << best->recv_mean.ms()
+ << " ms, num probes: " << best->count;
+ remote_rate_.SetEstimate(probe_bitrate, now);
+ return ProbeResult::kBitrateUpdated;
+ }
+ }
+
+ // Not probing and received non-probe packet, or finished with current set
+ // of probes.
+ if (clusters.size() >= kExpectedNumberOfProbes)
+ probes_.clear();
+ return ProbeResult::kNoUpdate;
+}
+
+bool RemoteBitrateEstimatorAbsSendTime::IsBitrateImproving(
+ DataRate probe_bitrate) const {
+ bool initial_probe =
+ !remote_rate_.ValidEstimate() && probe_bitrate > DataRate::Zero();
+ bool bitrate_above_estimate = remote_rate_.ValidEstimate() &&
+ probe_bitrate > remote_rate_.LatestEstimate();
+ return initial_probe || bitrate_above_estimate;
+}
+
+void RemoteBitrateEstimatorAbsSendTime::IncomingPacket(
+ int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) {
+ RTC_DCHECK_RUNS_SERIALIZED(&network_race_);
+ if (!header.extension.hasAbsoluteSendTime) {
+ RTC_LOG(LS_WARNING)
+ << "RemoteBitrateEstimatorAbsSendTimeImpl: Incoming packet "
+ "is missing absolute send time extension!";
+ return;
+ }
+ IncomingPacketInfo(Timestamp::Millis(arrival_time_ms),
+ header.extension.absoluteSendTime,
+ DataSize::Bytes(payload_size), header.ssrc);
+}
+
+void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo(
+ Timestamp arrival_time,
+ uint32_t send_time_24bits,
+ DataSize payload_size,
+ uint32_t ssrc) {
+ RTC_CHECK(send_time_24bits < (1ul << 24));
+ if (!uma_recorded_) {
+ RTC_HISTOGRAM_ENUMERATION(kBweTypeHistogram, BweNames::kReceiverAbsSendTime,
+ BweNames::kBweNamesMax);
+ uma_recorded_ = true;
+ }
+ // Shift up send time to use the full 32 bits that inter_arrival works with,
+ // so wrapping works properly.
+ uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift;
+ Timestamp send_time =
+ Timestamp::Millis(static_cast<int64_t>(timestamp) * kTimestampToMs);
+
+ Timestamp now = clock_->CurrentTime();
+ // TODO(holmer): SSRCs are only needed for REMB, should be broken out from
+ // here.
+
+ // Check if incoming bitrate estimate is valid, and if it needs to be reset.
+ absl::optional<uint32_t> incoming_bitrate =
+ incoming_bitrate_.Rate(arrival_time.ms());
+ if (incoming_bitrate) {
+ incoming_bitrate_initialized_ = true;
+ } else if (incoming_bitrate_initialized_) {
+ // Incoming bitrate had a previous valid value, but now not enough data
+ // point are left within the current window. Reset incoming bitrate
+ // estimator so that the window size will only contain new data points.
+ incoming_bitrate_.Reset();
+ incoming_bitrate_initialized_ = false;
+ }
+ incoming_bitrate_.Update(payload_size.bytes(), arrival_time.ms());
+
+ if (first_packet_time_.IsInfinite()) {
+ first_packet_time_ = now;
+ }
+
+ uint32_t ts_delta = 0;
+ int64_t t_delta = 0;
+ int size_delta = 0;
+ bool update_estimate = false;
+ DataRate target_bitrate = DataRate::Zero();
+ std::vector<uint32_t> ssrcs;
+ {
+ MutexLock lock(&mutex_);
+
+ TimeoutStreams(now);
+ RTC_DCHECK(inter_arrival_);
+ RTC_DCHECK(estimator_);
+ ssrcs_.insert_or_assign(ssrc, now);
+
+ // For now only try to detect probes while we don't have a valid estimate.
+ // We currently assume that only packets larger than 200 bytes are paced by
+ // the sender.
+ static constexpr DataSize kMinProbePacketSize = DataSize::Bytes(200);
+ if (payload_size > kMinProbePacketSize &&
+ (!remote_rate_.ValidEstimate() ||
+ now - first_packet_time_ < kInitialProbingInterval)) {
+ // TODO(holmer): Use a map instead to get correct order?
+ if (total_probes_received_ < kMaxProbePackets) {
+ TimeDelta send_delta = TimeDelta::Millis(-1);
+ TimeDelta recv_delta = TimeDelta::Millis(-1);
+ if (!probes_.empty()) {
+ send_delta = send_time - probes_.back().send_time;
+ recv_delta = arrival_time - probes_.back().recv_time;
+ }
+ RTC_LOG(LS_INFO) << "Probe packet received: send time="
+ << send_time.ms()
+ << " ms, recv time=" << arrival_time.ms()
+ << " ms, send delta=" << send_delta.ms()
+ << " ms, recv delta=" << recv_delta.ms() << " ms.";
+ }
+ probes_.emplace_back(send_time, arrival_time, payload_size);
+ ++total_probes_received_;
+ // Make sure that a probe which updated the bitrate immediately has an
+ // effect by calling the OnReceiveBitrateChanged callback.
+ if (ProcessClusters(now) == ProbeResult::kBitrateUpdated)
+ update_estimate = true;
+ }
+ if (inter_arrival_->ComputeDeltas(timestamp, arrival_time.ms(), now.ms(),
+ payload_size.bytes(), &ts_delta, &t_delta,
+ &size_delta)) {
+ double ts_delta_ms = (1000.0 * ts_delta) / (1 << kInterArrivalShift);
+ estimator_->Update(t_delta, ts_delta_ms, size_delta, detector_.State(),
+ arrival_time.ms());
+ detector_.Detect(estimator_->offset(), ts_delta_ms,
+ estimator_->num_of_deltas(), arrival_time.ms());
+ }
+
+ if (!update_estimate) {
+ // Check if it's time for a periodic update or if we should update because
+ // of an over-use.
+ if (last_update_.IsInfinite() ||
+ now.ms() - last_update_.ms() >
+ remote_rate_.GetFeedbackInterval().ms()) {
+ update_estimate = true;
+ } else if (detector_.State() == BandwidthUsage::kBwOverusing) {
+ absl::optional<uint32_t> incoming_rate =
+ incoming_bitrate_.Rate(arrival_time.ms());
+ if (incoming_rate && remote_rate_.TimeToReduceFurther(
+ now, DataRate::BitsPerSec(*incoming_rate))) {
+ update_estimate = true;
+ }
+ }
+ }
+
+ if (update_estimate) {
+ // The first overuse should immediately trigger a new estimate.
+ // We also have to update the estimate immediately if we are overusing
+ // and the target bitrate is too high compared to what we are receiving.
+ const RateControlInput input(
+ detector_.State(), OptionalRateFromOptionalBps(
+ incoming_bitrate_.Rate(arrival_time.ms())));
+ target_bitrate = remote_rate_.Update(&input, now);
+ update_estimate = remote_rate_.ValidEstimate();
+ ssrcs = Keys(ssrcs_);
+ }
+ }
+ if (update_estimate) {
+ last_update_ = now;
+ observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate.bps<uint32_t>());
+ }
+}
+
+TimeDelta RemoteBitrateEstimatorAbsSendTime::Process() {
+ return TimeDelta::PlusInfinity();
+}
+
+void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(Timestamp now) {
+ for (auto it = ssrcs_.begin(); it != ssrcs_.end();) {
+ if (now - it->second > TimeDelta::Millis(kStreamTimeOutMs)) {
+ ssrcs_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ if (ssrcs_.empty()) {
+ // We can't update the estimate if we don't have any active streams.
+ inter_arrival_ = std::make_unique<InterArrival>(
+ (kTimestampGroupLengthMs << kInterArrivalShift) / 1000, kTimestampToMs,
+ true);
+ estimator_ = std::make_unique<OveruseEstimator>(OverUseDetectorOptions());
+ // We deliberately don't reset the first_packet_time_ms_ here for now since
+ // we only probe for bandwidth in the beginning of a call right now.
+ }
+}
+
+void RemoteBitrateEstimatorAbsSendTime::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t /*max_rtt_ms*/) {
+ MutexLock lock(&mutex_);
+ remote_rate_.SetRtt(TimeDelta::Millis(avg_rtt_ms));
+}
+
+void RemoteBitrateEstimatorAbsSendTime::RemoveStream(uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ ssrcs_.erase(ssrc);
+}
+
+DataRate RemoteBitrateEstimatorAbsSendTime::LatestEstimate() const {
+ // Currently accessed only from the worker thread (see Call::GetStats()).
+ MutexLock lock(&mutex_);
+ if (!remote_rate_.ValidEstimate() || ssrcs_.empty()) {
+ return DataRate::Zero();
+ }
+ return remote_rate_.LatestEstimate();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
new file mode 100644
index 0000000000..fd33c84b04
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_ABS_SEND_TIME_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_ABS_SEND_TIME_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/rtp_headers.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/remote_bitrate_estimator/inter_arrival.h"
+#include "modules/remote_bitrate_estimator/overuse_detector.h"
+#include "modules/remote_bitrate_estimator/overuse_estimator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator {
+ public:
+ RemoteBitrateEstimatorAbsSendTime(RemoteBitrateObserver* observer,
+ Clock* clock);
+
+ RemoteBitrateEstimatorAbsSendTime() = delete;
+ RemoteBitrateEstimatorAbsSendTime(const RemoteBitrateEstimatorAbsSendTime&) =
+ delete;
+ RemoteBitrateEstimatorAbsSendTime& operator=(
+ const RemoteBitrateEstimatorAbsSendTime&) = delete;
+
+ ~RemoteBitrateEstimatorAbsSendTime() override;
+
+ void IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) override;
+ TimeDelta Process() override;
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+ void RemoveStream(uint32_t ssrc) override;
+ DataRate LatestEstimate() const override;
+
+ private:
+ struct Probe {
+ Probe(Timestamp send_time, Timestamp recv_time, DataSize payload_size)
+ : send_time(send_time),
+ recv_time(recv_time),
+ payload_size(payload_size) {}
+
+ Timestamp send_time;
+ Timestamp recv_time;
+ DataSize payload_size;
+ };
+
+ struct Cluster {
+ DataRate SendBitrate() const { return mean_size / send_mean; }
+ DataRate RecvBitrate() const { return mean_size / recv_mean; }
+
+ TimeDelta send_mean = TimeDelta::Zero();
+ TimeDelta recv_mean = TimeDelta::Zero();
+ // TODO(holmer): Add some variance metric as well?
+ DataSize mean_size = DataSize::Zero();
+ int count = 0;
+ int num_above_min_delta = 0;
+ };
+
+ enum class ProbeResult { kBitrateUpdated, kNoUpdate };
+
+ static bool IsWithinClusterBounds(TimeDelta send_delta,
+ const Cluster& cluster_aggregate);
+
+ static void MaybeAddCluster(const Cluster& cluster_aggregate,
+ std::list<Cluster>& clusters);
+
+ void IncomingPacketInfo(Timestamp arrival_time,
+ uint32_t send_time_24bits,
+ DataSize payload_size,
+ uint32_t ssrc);
+
+ std::list<Cluster> ComputeClusters() const;
+
+ const Cluster* FindBestProbe(const std::list<Cluster>& clusters) const;
+
+ // Returns true if a probe which changed the estimate was detected.
+ ProbeResult ProcessClusters(Timestamp now)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_);
+
+ bool IsBitrateImproving(DataRate probe_bitrate) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_);
+
+ void TimeoutStreams(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_);
+
+ rtc::RaceChecker network_race_;
+ Clock* const clock_;
+ const FieldTrialBasedConfig field_trials_;
+ RemoteBitrateObserver* const observer_;
+ std::unique_ptr<InterArrival> inter_arrival_;
+ std::unique_ptr<OveruseEstimator> estimator_;
+ OveruseDetector detector_;
+ RateStatistics incoming_bitrate_{kBitrateWindowMs, 8000};
+ bool incoming_bitrate_initialized_ = false;
+ std::list<Probe> probes_;
+ size_t total_probes_received_ = 0;
+ Timestamp first_packet_time_ = Timestamp::MinusInfinity();
+ Timestamp last_update_ = Timestamp::MinusInfinity();
+ bool uma_recorded_ = false;
+
+ mutable Mutex mutex_;
+ std::map<uint32_t, Timestamp> ssrcs_ RTC_GUARDED_BY(&mutex_);
+ AimdRateControl remote_rate_ RTC_GUARDED_BY(&mutex_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_ABS_SEND_TIME_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
new file mode 100644
index 0000000000..d8ef23cc92
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
+
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class RemoteBitrateEstimatorAbsSendTimeTest
+ : public RemoteBitrateEstimatorTest {
+ public:
+ RemoteBitrateEstimatorAbsSendTimeTest() {}
+
+ RemoteBitrateEstimatorAbsSendTimeTest(
+ const RemoteBitrateEstimatorAbsSendTimeTest&) = delete;
+ RemoteBitrateEstimatorAbsSendTimeTest& operator=(
+ const RemoteBitrateEstimatorAbsSendTimeTest&) = delete;
+
+ virtual void SetUp() {
+ bitrate_estimator_.reset(new RemoteBitrateEstimatorAbsSendTime(
+ bitrate_observer_.get(), &clock_));
+ }
+
+ protected:
+};
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, InitialBehavior) {
+ InitialBehaviorTestHelper(674840);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, RateIncreaseReordering) {
+ RateIncreaseReorderingTestHelper(674840);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, RateIncreaseRtpTimestamps) {
+ RateIncreaseRtpTimestampsTestHelper(1237);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropOneStream) {
+ CapacityDropTestHelper(1, false, 633, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropPosOffsetChange) {
+ CapacityDropTestHelper(1, false, 267, 30000);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropNegOffsetChange) {
+ CapacityDropTestHelper(1, false, 267, -30000);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropOneStreamWrap) {
+ CapacityDropTestHelper(1, true, 633, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropTwoStreamsWrap) {
+ CapacityDropTestHelper(2, true, 700, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropThreeStreamsWrap) {
+ CapacityDropTestHelper(3, true, 633, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropThirteenStreamsWrap) {
+ CapacityDropTestHelper(13, true, 667, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropNineteenStreamsWrap) {
+ CapacityDropTestHelper(19, true, 667, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropThirtyStreamsWrap) {
+ CapacityDropTestHelper(30, true, 667, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestTimestampGrouping) {
+ TestTimestampGroupingTestHelper();
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestShortTimeoutAndWrap) {
+ // Simulate a client leaving and rejoining the call after 35 seconds. This
+ // will make abs send time wrap, so if streams aren't timed out properly
+ // the next 30 seconds of packets will be out of order.
+ TestWrappingHelper(35);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestLongTimeoutAndWrap) {
+ // Simulate a client leaving and rejoining the call after some multiple of
+ // 64 seconds later. This will cause a zero difference in abs send times due
+ // to the wrap, but a big difference in arrival time, if streams aren't
+ // properly timed out.
+ TestWrappingHelper(10 * 64);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestProcessAfterTimeout) {
+ // This time constant must be equal to the ones defined for the
+ // RemoteBitrateEstimator.
+ const int64_t kStreamTimeOutMs = 2000;
+ const int64_t kProcessIntervalMs = 1000;
+ IncomingPacket(0, 1000, clock_.TimeInMilliseconds(), 0, 0);
+ clock_.AdvanceTimeMilliseconds(kStreamTimeOutMs + 1);
+ // Trigger timeout.
+ bitrate_estimator_->Process();
+ clock_.AdvanceTimeMilliseconds(kProcessIntervalMs);
+ // This shouldn't crash.
+ bitrate_estimator_->Process();
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestProbeDetection) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 10 = 800 kbps.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000));
+ }
+
+ // Second burst sent at 8 * 1000 / 5 = 1600 kbps.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(5);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_GT(bitrate_observer_->latest_bitrate(), 1500000u);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest,
+ TestProbeDetectionNonPacedPackets) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 10 = 800 kbps, but with every other packet
+ // not being paced which could mess things up.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(5);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000));
+ // Non-paced packet, arriving 5 ms after.
+ clock_.AdvanceTimeMilliseconds(5);
+ IncomingPacket(0, 100, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_GT(bitrate_observer_->latest_bitrate(), 800000u);
+}
+
+// Packets will require 5 ms to be transmitted to the receiver, causing packets
+// of the second probe to be dispersed.
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest,
+ TestProbeDetectionTooHighBitrate) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ int64_t send_time_ms = 0;
+ // First burst sent at 8 * 1000 / 10 = 800 kbps.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ send_time_ms += 10;
+ IncomingPacket(0, 1000, now_ms, 90 * send_time_ms,
+ AbsSendTime(send_time_ms, 1000));
+ }
+
+ // Second burst sent at 8 * 1000 / 5 = 1600 kbps, arriving at 8 * 1000 / 8 =
+ // 1000 kbps.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(8);
+ now_ms = clock_.TimeInMilliseconds();
+ send_time_ms += 5;
+ IncomingPacket(0, 1000, now_ms, send_time_ms,
+ AbsSendTime(send_time_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(bitrate_observer_->latest_bitrate(), 800000u, 10000);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest,
+ TestProbeDetectionSlightlyFasterArrival) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 10 = 800 kbps.
+ // Arriving at 8 * 1000 / 5 = 1600 kbps.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(5);
+ send_time_ms += 10;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * send_time_ms,
+ AbsSendTime(send_time_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_GT(bitrate_observer_->latest_bitrate(), 800000u);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestProbeDetectionFasterArrival) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 10 = 800 kbps.
+ // Arriving at 8 * 1000 / 5 = 1600 kbps.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(1);
+ send_time_ms += 10;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * send_time_ms,
+ AbsSendTime(send_time_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_FALSE(bitrate_observer_->updated());
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, TestProbeDetectionSlowerArrival) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // First burst sent at 8 * 1000 / 5 = 1600 kbps.
+ // Arriving at 8 * 1000 / 7 = 1142 kbps.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(7);
+ send_time_ms += 5;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * send_time_ms,
+ AbsSendTime(send_time_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(bitrate_observer_->latest_bitrate(), 1140000, 10000);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest,
+ TestProbeDetectionSlowerArrivalHighBitrate) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // Burst sent at 8 * 1000 / 1 = 8000 kbps.
+ // Arriving at 8 * 1000 / 2 = 4000 kbps.
+ int64_t send_time_ms = 0;
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(2);
+ send_time_ms += 1;
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * send_time_ms,
+ AbsSendTime(send_time_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(bitrate_observer_->latest_bitrate(), 4000000u, 10000);
+}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, ProbingIgnoresSmallPackets) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // Probing with 200 bytes every 10 ms, should be ignored by the probe
+ // detection.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 200, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000));
+ }
+
+ bitrate_estimator_->Process();
+ EXPECT_FALSE(bitrate_observer_->updated());
+
+ // Followed by a probe with 1000 bytes packets, should be detected as a
+ // probe.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000));
+ }
+
+ // Wait long enough so that we can call Process again.
+ clock_.AdvanceTimeMilliseconds(1000);
+
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(bitrate_observer_->latest_bitrate(), 800000u, 10000);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_gn/moz.build b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_gn/moz.build
new file mode 100644
index 0000000000..7af9c5fc00
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_gn/moz.build
@@ -0,0 +1,226 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/bwe_defines.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/inter_arrival.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_detector.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/overuse_estimator.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/packet_arrival_map.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc",
+ "/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("remote_bitrate_estimator_gn")
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
new file mode 100644
index 0000000000..6f442e5e2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
+
+
+#include <cstdint>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+#include "modules/remote_bitrate_estimator/include/bwe_defines.h"
+#include "modules/remote_bitrate_estimator/inter_arrival.h"
+#include "modules/remote_bitrate_estimator/overuse_detector.h"
+#include "modules/remote_bitrate_estimator/overuse_estimator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+absl::optional<DataRate> OptionalRateFromOptionalBps(
+ absl::optional<int> bitrate_bps) {
+ if (bitrate_bps) {
+ return DataRate::BitsPerSec(*bitrate_bps);
+ } else {
+ return absl::nullopt;
+ }
+}
+} // namespace
+
+enum { kTimestampGroupLengthMs = 5 };
+static const double kTimestampToMs = 1.0 / 90.0;
+
+struct RemoteBitrateEstimatorSingleStream::Detector {
+ explicit Detector(int64_t last_packet_time_ms,
+ const OverUseDetectorOptions& options,
+ bool enable_burst_grouping,
+ const FieldTrialsView* key_value_config)
+ : last_packet_time_ms(last_packet_time_ms),
+ inter_arrival(90 * kTimestampGroupLengthMs,
+ kTimestampToMs,
+ enable_burst_grouping),
+ estimator(options),
+ detector(key_value_config) {}
+ int64_t last_packet_time_ms;
+ InterArrival inter_arrival;
+ OveruseEstimator estimator;
+ OveruseDetector detector;
+};
+
+RemoteBitrateEstimatorSingleStream::RemoteBitrateEstimatorSingleStream(
+ RemoteBitrateObserver* observer,
+ Clock* clock)
+ : clock_(clock),
+ incoming_bitrate_(kBitrateWindowMs, 8000),
+ last_valid_incoming_bitrate_(0),
+ remote_rate_(new AimdRateControl(&field_trials_)),
+ observer_(observer),
+ last_process_time_(-1),
+ process_interval_ms_(kProcessIntervalMs),
+ uma_recorded_(false) {
+ RTC_LOG(LS_INFO) << "RemoteBitrateEstimatorSingleStream: Instantiating.";
+}
+
+RemoteBitrateEstimatorSingleStream::~RemoteBitrateEstimatorSingleStream() {
+ while (!overuse_detectors_.empty()) {
+ SsrcOveruseEstimatorMap::iterator it = overuse_detectors_.begin();
+ delete it->second;
+ overuse_detectors_.erase(it);
+ }
+}
+
+void RemoteBitrateEstimatorSingleStream::IncomingPacket(
+ int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) {
+ if (!uma_recorded_) {
+ BweNames type = BweNames::kReceiverTOffset;
+ if (!header.extension.hasTransmissionTimeOffset)
+ type = BweNames::kReceiverNoExtension;
+ RTC_HISTOGRAM_ENUMERATION(kBweTypeHistogram, type, BweNames::kBweNamesMax);
+ uma_recorded_ = true;
+ }
+ uint32_t ssrc = header.ssrc;
+ uint32_t rtp_timestamp =
+ header.timestamp + header.extension.transmissionTimeOffset;
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ MutexLock lock(&mutex_);
+ SsrcOveruseEstimatorMap::iterator it = overuse_detectors_.find(ssrc);
+ if (it == overuse_detectors_.end()) {
+ // This is a new SSRC. Adding to map.
+ // TODO(holmer): If the channel changes SSRC the old SSRC will still be
+ // around in this map until the channel is deleted. This is OK since the
+ // callback will no longer be called for the old SSRC. This will be
+ // automatically cleaned up when we have one RemoteBitrateEstimator per REMB
+ // group.
+ std::pair<SsrcOveruseEstimatorMap::iterator, bool> insert_result =
+ overuse_detectors_.insert(
+ std::make_pair(ssrc, new Detector(now_ms, OverUseDetectorOptions(),
+ true, &field_trials_)));
+ it = insert_result.first;
+ }
+ Detector* estimator = it->second;
+ estimator->last_packet_time_ms = now_ms;
+
+ // Check if incoming bitrate estimate is valid, and if it needs to be reset.
+ absl::optional<uint32_t> incoming_bitrate = incoming_bitrate_.Rate(now_ms);
+ if (incoming_bitrate) {
+ last_valid_incoming_bitrate_ = *incoming_bitrate;
+ } else if (last_valid_incoming_bitrate_ > 0) {
+ // Incoming bitrate had a previous valid value, but now not enough data
+ // point are left within the current window. Reset incoming bitrate
+ // estimator so that the window size will only contain new data points.
+ incoming_bitrate_.Reset();
+ last_valid_incoming_bitrate_ = 0;
+ }
+ incoming_bitrate_.Update(payload_size, now_ms);
+
+ const BandwidthUsage prior_state = estimator->detector.State();
+ uint32_t timestamp_delta = 0;
+ int64_t time_delta = 0;
+ int size_delta = 0;
+ if (estimator->inter_arrival.ComputeDeltas(
+ rtp_timestamp, arrival_time_ms, now_ms, payload_size,
+ &timestamp_delta, &time_delta, &size_delta)) {
+ double timestamp_delta_ms = timestamp_delta * kTimestampToMs;
+ estimator->estimator.Update(time_delta, timestamp_delta_ms, size_delta,
+ estimator->detector.State(), now_ms);
+ estimator->detector.Detect(estimator->estimator.offset(),
+ timestamp_delta_ms,
+ estimator->estimator.num_of_deltas(), now_ms);
+ }
+ if (estimator->detector.State() == BandwidthUsage::kBwOverusing) {
+ absl::optional<uint32_t> incoming_bitrate_bps =
+ incoming_bitrate_.Rate(now_ms);
+ if (incoming_bitrate_bps &&
+ (prior_state != BandwidthUsage::kBwOverusing ||
+ GetRemoteRate()->TimeToReduceFurther(
+ Timestamp::Millis(now_ms),
+ DataRate::BitsPerSec(*incoming_bitrate_bps)))) {
+ // The first overuse should immediately trigger a new estimate.
+ // We also have to update the estimate immediately if we are overusing
+ // and the target bitrate is too high compared to what we are receiving.
+ UpdateEstimate(now_ms);
+ }
+ }
+}
+
+TimeDelta RemoteBitrateEstimatorSingleStream::Process() {
+ MutexLock lock(&mutex_);
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ int64_t next_process_time_ms = last_process_time_ + process_interval_ms_;
+ if (last_process_time_ == -1 || now_ms >= next_process_time_ms) {
+ UpdateEstimate(now_ms);
+ last_process_time_ = now_ms;
+ return TimeDelta::Millis(process_interval_ms_);
+ }
+
+ return TimeDelta::Millis(next_process_time_ms - now_ms);
+}
+
+void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t now_ms) {
+ BandwidthUsage bw_state = BandwidthUsage::kBwNormal;
+ SsrcOveruseEstimatorMap::iterator it = overuse_detectors_.begin();
+ while (it != overuse_detectors_.end()) {
+ const int64_t time_of_last_received_packet =
+ it->second->last_packet_time_ms;
+ if (time_of_last_received_packet >= 0 &&
+ now_ms - time_of_last_received_packet > kStreamTimeOutMs) {
+ // This over-use detector hasn't received packets for `kStreamTimeOutMs`
+ // milliseconds and is considered stale.
+ delete it->second;
+ overuse_detectors_.erase(it++);
+ } else {
+ // Make sure that we trigger an over-use if any of the over-use detectors
+ // is detecting over-use.
+ if (it->second->detector.State() > bw_state) {
+ bw_state = it->second->detector.State();
+ }
+ ++it;
+ }
+ }
+ // We can't update the estimate if we don't have any active streams.
+ if (overuse_detectors_.empty()) {
+ return;
+ }
+ AimdRateControl* remote_rate = GetRemoteRate();
+
+ const RateControlInput input(
+ bw_state, OptionalRateFromOptionalBps(incoming_bitrate_.Rate(now_ms)));
+ uint32_t target_bitrate =
+ remote_rate->Update(&input, Timestamp::Millis(now_ms)).bps<uint32_t>();
+ if (remote_rate->ValidEstimate()) {
+ process_interval_ms_ = remote_rate->GetFeedbackInterval().ms();
+ RTC_DCHECK_GT(process_interval_ms_, 0);
+ std::vector<uint32_t> ssrcs;
+ GetSsrcs(&ssrcs);
+ if (observer_)
+ observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate);
+ }
+}
+
+void RemoteBitrateEstimatorSingleStream::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
+ MutexLock lock(&mutex_);
+ GetRemoteRate()->SetRtt(TimeDelta::Millis(avg_rtt_ms));
+}
+
+void RemoteBitrateEstimatorSingleStream::RemoveStream(unsigned int ssrc) {
+ MutexLock lock(&mutex_);
+ SsrcOveruseEstimatorMap::iterator it = overuse_detectors_.find(ssrc);
+ if (it != overuse_detectors_.end()) {
+ delete it->second;
+ overuse_detectors_.erase(it);
+ }
+}
+
+DataRate RemoteBitrateEstimatorSingleStream::LatestEstimate() const {
+ MutexLock lock(&mutex_);
+ if (!remote_rate_->ValidEstimate() || overuse_detectors_.empty()) {
+ return DataRate::Zero();
+ }
+ return remote_rate_->LatestEstimate();
+}
+
+void RemoteBitrateEstimatorSingleStream::GetSsrcs(
+ std::vector<uint32_t>* ssrcs) const {
+ RTC_DCHECK(ssrcs);
+ ssrcs->resize(overuse_detectors_.size());
+ int i = 0;
+ for (SsrcOveruseEstimatorMap::const_iterator it = overuse_detectors_.begin();
+ it != overuse_detectors_.end(); ++it, ++i) {
+ (*ssrcs)[i] = it->first;
+ }
+}
+
+AimdRateControl* RemoteBitrateEstimatorSingleStream::GetRemoteRate() {
+ if (!remote_rate_)
+ remote_rate_.reset(new AimdRateControl(&field_trials_));
+ return remote_rate_.get();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
new file mode 100644
index 0000000000..d62f922e02
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_SINGLE_STREAM_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_SINGLE_STREAM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/remote_bitrate_estimator/aimd_rate_control.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+struct RTPHeader;
+
+class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator {
+ public:
+ RemoteBitrateEstimatorSingleStream(RemoteBitrateObserver* observer,
+ Clock* clock);
+
+ RemoteBitrateEstimatorSingleStream() = delete;
+ RemoteBitrateEstimatorSingleStream(
+ const RemoteBitrateEstimatorSingleStream&) = delete;
+ RemoteBitrateEstimatorSingleStream& operator=(
+ const RemoteBitrateEstimatorSingleStream&) = delete;
+
+ ~RemoteBitrateEstimatorSingleStream() override;
+
+ void IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) override;
+ TimeDelta Process() override;
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+ void RemoveStream(uint32_t ssrc) override;
+ DataRate LatestEstimate() const override;
+
+ private:
+ struct Detector;
+
+ typedef std::map<uint32_t, Detector*> SsrcOveruseEstimatorMap;
+
+ // Triggers a new estimate calculation.
+ void UpdateEstimate(int64_t time_now) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void GetSsrcs(std::vector<uint32_t>* ssrcs) const
+ RTC_SHARED_LOCKS_REQUIRED(mutex_);
+
+ // Returns `remote_rate_` if the pointed to object exists,
+ // otherwise creates it.
+ AimdRateControl* GetRemoteRate() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* const clock_;
+ const FieldTrialBasedConfig field_trials_;
+ SsrcOveruseEstimatorMap overuse_detectors_ RTC_GUARDED_BY(mutex_);
+ RateStatistics incoming_bitrate_ RTC_GUARDED_BY(mutex_);
+ uint32_t last_valid_incoming_bitrate_ RTC_GUARDED_BY(mutex_);
+ std::unique_ptr<AimdRateControl> remote_rate_ RTC_GUARDED_BY(mutex_);
+ RemoteBitrateObserver* const observer_ RTC_GUARDED_BY(mutex_);
+ mutable Mutex mutex_;
+ int64_t last_process_time_;
+ int64_t process_interval_ms_ RTC_GUARDED_BY(mutex_);
+ bool uma_recorded_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_SINGLE_STREAM_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
new file mode 100644
index 0000000000..64ef39d935
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
+
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class RemoteBitrateEstimatorSingleTest : public RemoteBitrateEstimatorTest {
+ public:
+ RemoteBitrateEstimatorSingleTest() {}
+
+ RemoteBitrateEstimatorSingleTest(const RemoteBitrateEstimatorSingleTest&) =
+ delete;
+ RemoteBitrateEstimatorSingleTest& operator=(
+ const RemoteBitrateEstimatorSingleTest&) = delete;
+
+ virtual void SetUp() {
+ bitrate_estimator_.reset(new RemoteBitrateEstimatorSingleStream(
+ bitrate_observer_.get(), &clock_));
+ }
+
+ protected:
+};
+
+TEST_F(RemoteBitrateEstimatorSingleTest, InitialBehavior) {
+ InitialBehaviorTestHelper(508017);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, RateIncreaseReordering) {
+ RateIncreaseReorderingTestHelper(506422);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, RateIncreaseRtpTimestamps) {
+ RateIncreaseRtpTimestampsTestHelper(1267);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropOneStream) {
+ CapacityDropTestHelper(1, false, 633, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropOneStreamWrap) {
+ CapacityDropTestHelper(1, true, 633, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropTwoStreamsWrap) {
+ CapacityDropTestHelper(2, true, 767, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropThreeStreamsWrap) {
+ CapacityDropTestHelper(3, true, 567, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropThirteenStreamsWrap) {
+ CapacityDropTestHelper(13, true, 567, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropNineteenStreamsWrap) {
+ CapacityDropTestHelper(19, true, 700, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, CapacityDropThirtyStreamsWrap) {
+ CapacityDropTestHelper(30, true, 733, 0);
+}
+
+TEST_F(RemoteBitrateEstimatorSingleTest, TestTimestampGrouping) {
+ TestTimestampGroupingTestHelper();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
new file mode 100644
index 0000000000..899037f5a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+const size_t kMtu = 1200;
+const uint32_t kAcceptedBitrateErrorBps = 50000;
+
+// Number of packets needed before we have a valid estimate.
+const int kNumInitialPackets = 2;
+
+namespace testing {
+
+void TestBitrateObserver::OnReceiveBitrateChanged(
+ const std::vector<uint32_t>& ssrcs,
+ uint32_t bitrate) {
+ latest_bitrate_ = bitrate;
+ updated_ = true;
+}
+
+RtpStream::RtpStream(int fps,
+ int bitrate_bps,
+ uint32_t ssrc,
+ uint32_t frequency,
+ uint32_t timestamp_offset,
+ int64_t rtcp_receive_time)
+ : fps_(fps),
+ bitrate_bps_(bitrate_bps),
+ ssrc_(ssrc),
+ frequency_(frequency),
+ next_rtp_time_(0),
+ next_rtcp_time_(rtcp_receive_time),
+ rtp_timestamp_offset_(timestamp_offset),
+ kNtpFracPerMs(4.294967296E6) {
+ RTC_DCHECK_GT(fps_, 0);
+}
+
+void RtpStream::set_rtp_timestamp_offset(uint32_t offset) {
+ rtp_timestamp_offset_ = offset;
+}
+
+// Generates a new frame for this stream. If called too soon after the
+// previous frame, no frame will be generated. The frame is split into
+// packets.
+int64_t RtpStream::GenerateFrame(int64_t time_now_us, PacketList* packets) {
+ if (time_now_us < next_rtp_time_) {
+ return next_rtp_time_;
+ }
+ RTC_DCHECK(packets);
+ size_t bits_per_frame = (bitrate_bps_ + fps_ / 2) / fps_;
+ size_t n_packets =
+ std::max<size_t>((bits_per_frame + 4 * kMtu) / (8 * kMtu), 1u);
+ size_t packet_size = (bits_per_frame + 4 * n_packets) / (8 * n_packets);
+ for (size_t i = 0; i < n_packets; ++i) {
+ RtpPacket* packet = new RtpPacket;
+ packet->send_time = time_now_us + kSendSideOffsetUs;
+ packet->size = packet_size;
+ packet->rtp_timestamp =
+ rtp_timestamp_offset_ +
+ static_cast<uint32_t>(((frequency_ / 1000) * packet->send_time + 500) /
+ 1000);
+ packet->ssrc = ssrc_;
+ packets->push_back(packet);
+ }
+ next_rtp_time_ = time_now_us + (1000000 + fps_ / 2) / fps_;
+ return next_rtp_time_;
+}
+
+// The send-side time when the next frame can be generated.
+int64_t RtpStream::next_rtp_time() const {
+ return next_rtp_time_;
+}
+
+// Generates an RTCP packet.
+RtpStream::RtcpPacket* RtpStream::Rtcp(int64_t time_now_us) {
+ if (time_now_us < next_rtcp_time_) {
+ return NULL;
+ }
+ RtcpPacket* rtcp = new RtcpPacket;
+ int64_t send_time_us = time_now_us + kSendSideOffsetUs;
+ rtcp->timestamp =
+ rtp_timestamp_offset_ +
+ static_cast<uint32_t>(((frequency_ / 1000) * send_time_us + 500) / 1000);
+ rtcp->ntp_secs = send_time_us / 1000000;
+ rtcp->ntp_frac =
+ static_cast<int64_t>((send_time_us % 1000000) * kNtpFracPerMs);
+ rtcp->ssrc = ssrc_;
+ next_rtcp_time_ = time_now_us + kRtcpIntervalUs;
+ return rtcp;
+}
+
+void RtpStream::set_bitrate_bps(int bitrate_bps) {
+ ASSERT_GE(bitrate_bps, 0);
+ bitrate_bps_ = bitrate_bps;
+}
+
+int RtpStream::bitrate_bps() const {
+ return bitrate_bps_;
+}
+
+uint32_t RtpStream::ssrc() const {
+ return ssrc_;
+}
+
+bool RtpStream::Compare(const std::pair<uint32_t, RtpStream*>& left,
+ const std::pair<uint32_t, RtpStream*>& right) {
+ return left.second->next_rtp_time_ < right.second->next_rtp_time_;
+}
+
+StreamGenerator::StreamGenerator(int capacity, int64_t time_now)
+ : capacity_(capacity), prev_arrival_time_us_(time_now) {}
+
+StreamGenerator::~StreamGenerator() {
+ for (StreamMap::iterator it = streams_.begin(); it != streams_.end(); ++it) {
+ delete it->second;
+ }
+ streams_.clear();
+}
+
+// Add a new stream.
+void StreamGenerator::AddStream(RtpStream* stream) {
+ streams_[stream->ssrc()] = stream;
+}
+
+// Set the link capacity.
+void StreamGenerator::set_capacity_bps(int capacity_bps) {
+ ASSERT_GT(capacity_bps, 0);
+ capacity_ = capacity_bps;
+}
+
+// Divides `bitrate_bps` among all streams. The allocated bitrate per stream
+// is decided by the current allocation ratios.
+void StreamGenerator::SetBitrateBps(int bitrate_bps) {
+ ASSERT_GE(streams_.size(), 0u);
+ int total_bitrate_before = 0;
+ for (StreamMap::iterator it = streams_.begin(); it != streams_.end(); ++it) {
+ total_bitrate_before += it->second->bitrate_bps();
+ }
+ int64_t bitrate_before = 0;
+ int total_bitrate_after = 0;
+ for (StreamMap::iterator it = streams_.begin(); it != streams_.end(); ++it) {
+ bitrate_before += it->second->bitrate_bps();
+ int64_t bitrate_after =
+ (bitrate_before * bitrate_bps + total_bitrate_before / 2) /
+ total_bitrate_before;
+ it->second->set_bitrate_bps(bitrate_after - total_bitrate_after);
+ total_bitrate_after += it->second->bitrate_bps();
+ }
+ ASSERT_EQ(bitrate_before, total_bitrate_before);
+ EXPECT_EQ(total_bitrate_after, bitrate_bps);
+}
+
+// Set the RTP timestamp offset for the stream identified by `ssrc`.
+void StreamGenerator::set_rtp_timestamp_offset(uint32_t ssrc, uint32_t offset) {
+ streams_[ssrc]->set_rtp_timestamp_offset(offset);
+}
+
+// TODO(holmer): Break out the channel simulation part from this class to make
+// it possible to simulate different types of channels.
+int64_t StreamGenerator::GenerateFrame(RtpStream::PacketList* packets,
+ int64_t time_now_us) {
+ RTC_DCHECK(packets);
+ RTC_DCHECK(packets->empty());
+ RTC_DCHECK_GT(capacity_, 0);
+ StreamMap::iterator it =
+ std::min_element(streams_.begin(), streams_.end(), RtpStream::Compare);
+ (*it).second->GenerateFrame(time_now_us, packets);
+ for (RtpStream::PacketList::iterator packet_it = packets->begin();
+ packet_it != packets->end(); ++packet_it) {
+ int capacity_bpus = capacity_ / 1000;
+ int64_t required_network_time_us =
+ (8 * 1000 * (*packet_it)->size + capacity_bpus / 2) / capacity_bpus;
+ prev_arrival_time_us_ =
+ std::max(time_now_us + required_network_time_us,
+ prev_arrival_time_us_ + required_network_time_us);
+ (*packet_it)->arrival_time = prev_arrival_time_us_;
+ }
+ it = std::min_element(streams_.begin(), streams_.end(), RtpStream::Compare);
+ return std::max((*it).second->next_rtp_time(), time_now_us);
+}
+} // namespace testing
+
+RemoteBitrateEstimatorTest::RemoteBitrateEstimatorTest()
+ : clock_(100000000),
+ bitrate_observer_(new testing::TestBitrateObserver),
+ stream_generator_(
+ new testing::StreamGenerator(1e6, // Capacity.
+ clock_.TimeInMicroseconds())),
+ arrival_time_offset_ms_(0) {}
+
+RemoteBitrateEstimatorTest::~RemoteBitrateEstimatorTest() {}
+
+void RemoteBitrateEstimatorTest::AddDefaultStream() {
+ stream_generator_->AddStream(
+ new testing::RtpStream(30, // Frames per second.
+ 3e5, // Bitrate.
+ 1, // SSRC.
+ 90000, // RTP frequency.
+ 0xFFFFF000, // Timestamp offset.
+ 0)); // RTCP receive time.
+}
+
+uint32_t RemoteBitrateEstimatorTest::AbsSendTime(int64_t t, int64_t denom) {
+ return (((t << 18) + (denom >> 1)) / denom) & 0x00fffffful;
+}
+
+uint32_t RemoteBitrateEstimatorTest::AddAbsSendTime(uint32_t t1, uint32_t t2) {
+ return (t1 + t2) & 0x00fffffful;
+}
+
+const uint32_t RemoteBitrateEstimatorTest::kDefaultSsrc = 1;
+
+void RemoteBitrateEstimatorTest::IncomingPacket(uint32_t ssrc,
+ size_t payload_size,
+ int64_t arrival_time,
+ uint32_t rtp_timestamp,
+ uint32_t absolute_send_time) {
+ RTPHeader header;
+ memset(&header, 0, sizeof(header));
+ header.ssrc = ssrc;
+ header.timestamp = rtp_timestamp;
+ header.extension.hasAbsoluteSendTime = true;
+ header.extension.absoluteSendTime = absolute_send_time;
+ RTC_CHECK_GE(arrival_time + arrival_time_offset_ms_, 0);
+ bitrate_estimator_->IncomingPacket(arrival_time + arrival_time_offset_ms_,
+ payload_size, header);
+}
+
+// Generates a frame of packets belonging to a stream at a given bitrate and
+// with a given ssrc. The stream is pushed through a very simple simulated
+// network, and is then given to the receive-side bandwidth estimator.
+// Returns true if an over-use was seen, false otherwise.
+// The StreamGenerator::updated() should be used to check for any changes in
+// target bitrate after the call to this function.
+bool RemoteBitrateEstimatorTest::GenerateAndProcessFrame(uint32_t ssrc,
+ uint32_t bitrate_bps) {
+ RTC_DCHECK_GT(bitrate_bps, 0);
+ stream_generator_->SetBitrateBps(bitrate_bps);
+ testing::RtpStream::PacketList packets;
+ int64_t next_time_us =
+ stream_generator_->GenerateFrame(&packets, clock_.TimeInMicroseconds());
+ bool overuse = false;
+ while (!packets.empty()) {
+ testing::RtpStream::RtpPacket* packet = packets.front();
+ bitrate_observer_->Reset();
+ // The simulated clock should match the time of packet->arrival_time
+ // since both are used in IncomingPacket().
+ clock_.AdvanceTimeMicroseconds(packet->arrival_time -
+ clock_.TimeInMicroseconds());
+ IncomingPacket(packet->ssrc, packet->size,
+ (packet->arrival_time + 500) / 1000, packet->rtp_timestamp,
+ AbsSendTime(packet->send_time, 1000000));
+ if (bitrate_observer_->updated()) {
+ if (bitrate_observer_->latest_bitrate() < bitrate_bps)
+ overuse = true;
+ }
+ delete packet;
+ packets.pop_front();
+ }
+ bitrate_estimator_->Process();
+ clock_.AdvanceTimeMicroseconds(next_time_us - clock_.TimeInMicroseconds());
+ return overuse;
+}
+
+// Run the bandwidth estimator with a stream of `number_of_frames` frames, or
+// until it reaches `target_bitrate`.
+// Can for instance be used to run the estimator for some time to get it
+// into a steady state.
+uint32_t RemoteBitrateEstimatorTest::SteadyStateRun(uint32_t ssrc,
+ int max_number_of_frames,
+ uint32_t start_bitrate,
+ uint32_t min_bitrate,
+ uint32_t max_bitrate,
+ uint32_t target_bitrate) {
+ uint32_t bitrate_bps = start_bitrate;
+ bool bitrate_update_seen = false;
+ // Produce `number_of_frames` frames and give them to the estimator.
+ for (int i = 0; i < max_number_of_frames; ++i) {
+ bool overuse = GenerateAndProcessFrame(ssrc, bitrate_bps);
+ if (overuse) {
+ EXPECT_LT(bitrate_observer_->latest_bitrate(), max_bitrate);
+ EXPECT_GT(bitrate_observer_->latest_bitrate(), min_bitrate);
+ bitrate_bps = bitrate_observer_->latest_bitrate();
+ bitrate_update_seen = true;
+ } else if (bitrate_observer_->updated()) {
+ bitrate_bps = bitrate_observer_->latest_bitrate();
+ bitrate_observer_->Reset();
+ }
+ if (bitrate_update_seen && bitrate_bps > target_bitrate) {
+ break;
+ }
+ }
+ EXPECT_TRUE(bitrate_update_seen);
+ return bitrate_bps;
+}
+
+void RemoteBitrateEstimatorTest::InitialBehaviorTestHelper(
+ uint32_t expected_converge_bitrate) {
+ const int kFramerate = 50; // 50 fps to avoid rounding errors.
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ const uint32_t kFrameIntervalAbsSendTime = AbsSendTime(1, kFramerate);
+ uint32_t timestamp = 0;
+ uint32_t absolute_send_time = 0;
+ EXPECT_EQ(bitrate_estimator_->LatestEstimate(), DataRate::Zero());
+ clock_.AdvanceTimeMilliseconds(1000);
+ bitrate_estimator_->Process();
+ EXPECT_EQ(bitrate_estimator_->LatestEstimate(), DataRate::Zero());
+ EXPECT_FALSE(bitrate_observer_->updated());
+ bitrate_observer_->Reset();
+ clock_.AdvanceTimeMilliseconds(1000);
+ // Inserting packets for 5 seconds to get a valid estimate.
+ for (int i = 0; i < 5 * kFramerate + 1 + kNumInitialPackets; ++i) {
+ if (i == kNumInitialPackets) {
+ bitrate_estimator_->Process();
+ EXPECT_EQ(bitrate_estimator_->LatestEstimate(), DataRate::Zero());
+ EXPECT_FALSE(bitrate_observer_->updated());
+ bitrate_observer_->Reset();
+ }
+
+ IncomingPacket(kDefaultSsrc, kMtu, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ clock_.AdvanceTimeMilliseconds(1000 / kFramerate);
+ timestamp += 90 * kFrameIntervalMs;
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, kFrameIntervalAbsSendTime);
+ }
+ bitrate_estimator_->Process();
+ uint32_t bitrate_bps = bitrate_estimator_->LatestEstimate().bps<uint32_t>();
+ EXPECT_NEAR(expected_converge_bitrate, bitrate_bps, kAcceptedBitrateErrorBps);
+ EXPECT_TRUE(bitrate_observer_->updated());
+ bitrate_observer_->Reset();
+ EXPECT_EQ(bitrate_observer_->latest_bitrate(), bitrate_bps);
+ bitrate_estimator_->RemoveStream(kDefaultSsrc);
+ EXPECT_EQ(bitrate_estimator_->LatestEstimate(), DataRate::Zero());
+}
+
+void RemoteBitrateEstimatorTest::RateIncreaseReorderingTestHelper(
+ uint32_t expected_bitrate_bps) {
+ const int kFramerate = 50; // 50 fps to avoid rounding errors.
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ const uint32_t kFrameIntervalAbsSendTime = AbsSendTime(1, kFramerate);
+ uint32_t timestamp = 0;
+ uint32_t absolute_send_time = 0;
+ // Inserting packets for five seconds to get a valid estimate.
+ for (int i = 0; i < 5 * kFramerate + 1 + kNumInitialPackets; ++i) {
+ // TODO(sprang): Remove this hack once the single stream estimator is gone,
+ // as it doesn't do anything in Process().
+ if (i == kNumInitialPackets) {
+ // Process after we have enough frames to get a valid input rate estimate.
+ bitrate_estimator_->Process();
+ EXPECT_FALSE(bitrate_observer_->updated()); // No valid estimate.
+ }
+
+ IncomingPacket(kDefaultSsrc, kMtu, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ timestamp += 90 * kFrameIntervalMs;
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, kFrameIntervalAbsSendTime);
+ }
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(expected_bitrate_bps, bitrate_observer_->latest_bitrate(),
+ kAcceptedBitrateErrorBps);
+ for (int i = 0; i < 10; ++i) {
+ clock_.AdvanceTimeMilliseconds(2 * kFrameIntervalMs);
+ timestamp += 2 * 90 * kFrameIntervalMs;
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, 2 * kFrameIntervalAbsSendTime);
+ IncomingPacket(kDefaultSsrc, 1000, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ IncomingPacket(
+ kDefaultSsrc, 1000, clock_.TimeInMilliseconds(),
+ timestamp - 90 * kFrameIntervalMs,
+ AddAbsSendTime(absolute_send_time,
+ -static_cast<int>(kFrameIntervalAbsSendTime)));
+ }
+ bitrate_estimator_->Process();
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(expected_bitrate_bps, bitrate_observer_->latest_bitrate(),
+ kAcceptedBitrateErrorBps);
+}
+
+// Make sure we initially increase the bitrate as expected.
+void RemoteBitrateEstimatorTest::RateIncreaseRtpTimestampsTestHelper(
+ int expected_iterations) {
+ // This threshold corresponds approximately to increasing linearly with
+ // bitrate(i) = 1.04 * bitrate(i-1) + 1000
+ // until bitrate(i) > 500000, with bitrate(1) ~= 30000.
+ uint32_t bitrate_bps = 30000;
+ int iterations = 0;
+ AddDefaultStream();
+ // Feed the estimator with a stream of packets and verify that it reaches
+ // 500 kbps at the expected time.
+ while (bitrate_bps < 5e5) {
+ bool overuse = GenerateAndProcessFrame(kDefaultSsrc, bitrate_bps);
+ if (overuse) {
+ EXPECT_GT(bitrate_observer_->latest_bitrate(), bitrate_bps);
+ bitrate_bps = bitrate_observer_->latest_bitrate();
+ bitrate_observer_->Reset();
+ } else if (bitrate_observer_->updated()) {
+ bitrate_bps = bitrate_observer_->latest_bitrate();
+ bitrate_observer_->Reset();
+ }
+ ++iterations;
+ ASSERT_LE(iterations, expected_iterations);
+ }
+ ASSERT_EQ(expected_iterations, iterations);
+}
+
+void RemoteBitrateEstimatorTest::CapacityDropTestHelper(
+ int number_of_streams,
+ bool wrap_time_stamp,
+ uint32_t expected_bitrate_drop_delta,
+ int64_t receiver_clock_offset_change_ms) {
+ const int kFramerate = 30;
+ const int kStartBitrate = 900e3;
+ const int kMinExpectedBitrate = 800e3;
+ const int kMaxExpectedBitrate = 1100e3;
+ const uint32_t kInitialCapacityBps = 1000e3;
+ const uint32_t kReducedCapacityBps = 500e3;
+
+ int steady_state_time = 0;
+ if (number_of_streams <= 1) {
+ steady_state_time = 10;
+ AddDefaultStream();
+ } else {
+ steady_state_time = 10 * number_of_streams;
+ int bitrate_sum = 0;
+ int kBitrateDenom = number_of_streams * (number_of_streams - 1);
+ for (int i = 0; i < number_of_streams; i++) {
+ // First stream gets half available bitrate, while the rest share the
+ // remaining half i.e.: 1/2 = Sum[n/(N*(N-1))] for n=1..N-1 (rounded up)
+ int bitrate = kStartBitrate / 2;
+ if (i > 0) {
+ bitrate = (kStartBitrate * i + kBitrateDenom / 2) / kBitrateDenom;
+ }
+ uint32_t mask = ~0ull << (32 - i);
+ stream_generator_->AddStream(
+ new testing::RtpStream(kFramerate, // Frames per second.
+ bitrate, // Bitrate.
+ kDefaultSsrc + i, // SSRC.
+ 90000, // RTP frequency.
+ 0xFFFFF000u ^ mask, // Timestamp offset.
+ 0)); // RTCP receive time.
+ bitrate_sum += bitrate;
+ }
+ ASSERT_EQ(bitrate_sum, kStartBitrate);
+ }
+ if (wrap_time_stamp) {
+ stream_generator_->set_rtp_timestamp_offset(
+ kDefaultSsrc,
+ std::numeric_limits<uint32_t>::max() - steady_state_time * 90000);
+ }
+
+ // Run in steady state to make the estimator converge.
+ stream_generator_->set_capacity_bps(kInitialCapacityBps);
+ uint32_t bitrate_bps = SteadyStateRun(
+ kDefaultSsrc, steady_state_time * kFramerate, kStartBitrate,
+ kMinExpectedBitrate, kMaxExpectedBitrate, kInitialCapacityBps);
+ EXPECT_NEAR(kInitialCapacityBps, bitrate_bps, 130000u);
+ bitrate_observer_->Reset();
+
+ // Add an offset to make sure the BWE can handle it.
+ arrival_time_offset_ms_ += receiver_clock_offset_change_ms;
+
+ // Reduce the capacity and verify the decrease time.
+ stream_generator_->set_capacity_bps(kReducedCapacityBps);
+ int64_t overuse_start_time = clock_.TimeInMilliseconds();
+ int64_t bitrate_drop_time = -1;
+ for (int i = 0; i < 100 * number_of_streams; ++i) {
+ GenerateAndProcessFrame(kDefaultSsrc, bitrate_bps);
+ if (bitrate_drop_time == -1 &&
+ bitrate_observer_->latest_bitrate() <= kReducedCapacityBps) {
+ bitrate_drop_time = clock_.TimeInMilliseconds();
+ }
+ if (bitrate_observer_->updated())
+ bitrate_bps = bitrate_observer_->latest_bitrate();
+ }
+
+ EXPECT_NEAR(expected_bitrate_drop_delta,
+ bitrate_drop_time - overuse_start_time, 33);
+
+ // Remove stream one by one.
+ for (int i = 0; i < number_of_streams; i++) {
+ EXPECT_EQ(bitrate_estimator_->LatestEstimate().bps(), bitrate_bps);
+ bitrate_estimator_->RemoveStream(kDefaultSsrc + i);
+ }
+ EXPECT_EQ(bitrate_estimator_->LatestEstimate(), DataRate::Zero());
+}
+
+void RemoteBitrateEstimatorTest::TestTimestampGroupingTestHelper() {
+ const int kFramerate = 50; // 50 fps to avoid rounding errors.
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ const uint32_t kFrameIntervalAbsSendTime = AbsSendTime(1, kFramerate);
+ uint32_t timestamp = 0;
+ // Initialize absolute_send_time (24 bits) so that it will definitely wrap
+ // during the test.
+ uint32_t absolute_send_time = AddAbsSendTime(
+ (1 << 24), -static_cast<int>(50 * kFrameIntervalAbsSendTime));
+ // Initial set of frames to increase the bitrate. 6 seconds to have enough
+ // time for the first estimate to be generated and for Process() to be called.
+ for (int i = 0; i <= 6 * kFramerate; ++i) {
+ IncomingPacket(kDefaultSsrc, 1000, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ bitrate_estimator_->Process();
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ timestamp += 90 * kFrameIntervalMs;
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, kFrameIntervalAbsSendTime);
+ }
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_GE(bitrate_observer_->latest_bitrate(), 400000u);
+
+ // Insert batches of frames which were sent very close in time. Also simulate
+ // capacity over-use to see that we back off correctly.
+ const int kTimestampGroupLength = 15;
+ const uint32_t kTimestampGroupLengthAbsSendTime =
+ AbsSendTime(kTimestampGroupLength, 90000);
+ const uint32_t kSingleRtpTickAbsSendTime = AbsSendTime(1, 90000);
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < kTimestampGroupLength; ++j) {
+ // Insert `kTimestampGroupLength` frames with just 1 timestamp ticks in
+ // between. Should be treated as part of the same group by the estimator.
+ IncomingPacket(kDefaultSsrc, 100, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs / kTimestampGroupLength);
+ timestamp += 1;
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, kSingleRtpTickAbsSendTime);
+ }
+ // Increase time until next batch to simulate over-use.
+ clock_.AdvanceTimeMilliseconds(10);
+ timestamp += 90 * kFrameIntervalMs - kTimestampGroupLength;
+ absolute_send_time = AddAbsSendTime(
+ absolute_send_time,
+ AddAbsSendTime(kFrameIntervalAbsSendTime,
+ -static_cast<int>(kTimestampGroupLengthAbsSendTime)));
+ bitrate_estimator_->Process();
+ }
+ EXPECT_TRUE(bitrate_observer_->updated());
+ // Should have reduced the estimate.
+ EXPECT_LT(bitrate_observer_->latest_bitrate(), 400000u);
+}
+
+void RemoteBitrateEstimatorTest::TestWrappingHelper(int silence_time_s) {
+ const int kFramerate = 100;
+ const int kFrameIntervalMs = 1000 / kFramerate;
+ const uint32_t kFrameIntervalAbsSendTime = AbsSendTime(1, kFramerate);
+ uint32_t absolute_send_time = 0;
+ uint32_t timestamp = 0;
+
+ for (size_t i = 0; i < 3000; ++i) {
+ IncomingPacket(kDefaultSsrc, 1000, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ timestamp += kFrameIntervalMs;
+ clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, kFrameIntervalAbsSendTime);
+ bitrate_estimator_->Process();
+ }
+ DataRate bitrate_before = bitrate_estimator_->LatestEstimate();
+
+ clock_.AdvanceTimeMilliseconds(silence_time_s * 1000);
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, AbsSendTime(silence_time_s, 1));
+ bitrate_estimator_->Process();
+ for (size_t i = 0; i < 21; ++i) {
+ IncomingPacket(kDefaultSsrc, 1000, clock_.TimeInMilliseconds(), timestamp,
+ absolute_send_time);
+ timestamp += kFrameIntervalMs;
+ clock_.AdvanceTimeMilliseconds(2 * kFrameIntervalMs);
+ absolute_send_time =
+ AddAbsSendTime(absolute_send_time, kFrameIntervalAbsSendTime);
+ bitrate_estimator_->Process();
+ }
+ DataRate bitrate_after = bitrate_estimator_->LatestEstimate();
+ EXPECT_LT(bitrate_after, bitrate_before);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
new file mode 100644
index 0000000000..a3b1cfdb34
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_UNITTEST_HELPER_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_UNITTEST_HELPER_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace testing {
+
+class TestBitrateObserver : public RemoteBitrateObserver {
+ public:
+ TestBitrateObserver() : updated_(false), latest_bitrate_(0) {}
+ virtual ~TestBitrateObserver() {}
+
+ void OnReceiveBitrateChanged(const std::vector<uint32_t>& ssrcs,
+ uint32_t bitrate) override;
+
+ void Reset() { updated_ = false; }
+
+ bool updated() const { return updated_; }
+
+ uint32_t latest_bitrate() const { return latest_bitrate_; }
+
+ private:
+ bool updated_;
+ uint32_t latest_bitrate_;
+};
+
+class RtpStream {
+ public:
+ struct RtpPacket {
+ int64_t send_time;
+ int64_t arrival_time;
+ uint32_t rtp_timestamp;
+ size_t size;
+ uint32_t ssrc;
+ };
+
+ struct RtcpPacket {
+ uint32_t ntp_secs;
+ uint32_t ntp_frac;
+ uint32_t timestamp;
+ uint32_t ssrc;
+ };
+
+ typedef std::list<RtpPacket*> PacketList;
+
+ enum { kSendSideOffsetUs = 1000000 };
+
+ RtpStream(int fps,
+ int bitrate_bps,
+ uint32_t ssrc,
+ uint32_t frequency,
+ uint32_t timestamp_offset,
+ int64_t rtcp_receive_time);
+
+ RtpStream(const RtpStream&) = delete;
+ RtpStream& operator=(const RtpStream&) = delete;
+
+ void set_rtp_timestamp_offset(uint32_t offset);
+
+ // Generates a new frame for this stream. If called too soon after the
+ // previous frame, no frame will be generated. The frame is split into
+ // packets.
+ int64_t GenerateFrame(int64_t time_now_us, PacketList* packets);
+
+ // The send-side time when the next frame can be generated.
+ int64_t next_rtp_time() const;
+
+ // Generates an RTCP packet.
+ RtcpPacket* Rtcp(int64_t time_now_us);
+
+ void set_bitrate_bps(int bitrate_bps);
+
+ int bitrate_bps() const;
+
+ uint32_t ssrc() const;
+
+ static bool Compare(const std::pair<uint32_t, RtpStream*>& left,
+ const std::pair<uint32_t, RtpStream*>& right);
+
+ private:
+ enum { kRtcpIntervalUs = 1000000 };
+
+ int fps_;
+ int bitrate_bps_;
+ uint32_t ssrc_;
+ uint32_t frequency_;
+ int64_t next_rtp_time_;
+ int64_t next_rtcp_time_;
+ uint32_t rtp_timestamp_offset_;
+ const double kNtpFracPerMs;
+};
+
+class StreamGenerator {
+ public:
+ typedef std::list<RtpStream::RtcpPacket*> RtcpList;
+
+ StreamGenerator(int capacity, int64_t time_now);
+
+ ~StreamGenerator();
+
+ StreamGenerator(const StreamGenerator&) = delete;
+ StreamGenerator& operator=(const StreamGenerator&) = delete;
+
+ // Add a new stream.
+ void AddStream(RtpStream* stream);
+
+ // Set the link capacity.
+ void set_capacity_bps(int capacity_bps);
+
+ // Divides `bitrate_bps` among all streams. The allocated bitrate per stream
+ // is decided by the initial allocation ratios.
+ void SetBitrateBps(int bitrate_bps);
+
+ // Set the RTP timestamp offset for the stream identified by `ssrc`.
+ void set_rtp_timestamp_offset(uint32_t ssrc, uint32_t offset);
+
+ // TODO(holmer): Break out the channel simulation part from this class to make
+ // it possible to simulate different types of channels.
+ int64_t GenerateFrame(RtpStream::PacketList* packets, int64_t time_now_us);
+
+ private:
+ typedef std::map<uint32_t, RtpStream*> StreamMap;
+
+ // Capacity of the simulated channel in bits per second.
+ int capacity_;
+ // The time when the last packet arrived.
+ int64_t prev_arrival_time_us_;
+ // All streams being transmitted on this simulated channel.
+ StreamMap streams_;
+};
+} // namespace testing
+
+class RemoteBitrateEstimatorTest : public ::testing::Test {
+ public:
+ RemoteBitrateEstimatorTest();
+ virtual ~RemoteBitrateEstimatorTest();
+
+ RemoteBitrateEstimatorTest(const RemoteBitrateEstimatorTest&) = delete;
+ RemoteBitrateEstimatorTest& operator=(const RemoteBitrateEstimatorTest&) =
+ delete;
+
+ protected:
+ virtual void SetUp() = 0;
+
+ void AddDefaultStream();
+
+ // Helper to convert some time format to resolution used in absolute send time
+ // header extension, rounded upwards. `t` is the time to convert, in some
+ // resolution. `denom` is the value to divide `t` by to get whole seconds,
+ // e.g. `denom` = 1000 if `t` is in milliseconds.
+ static uint32_t AbsSendTime(int64_t t, int64_t denom);
+
+ // Helper to add two absolute send time values and keep it less than 1<<24.
+ static uint32_t AddAbsSendTime(uint32_t t1, uint32_t t2);
+
+ // Helper to create an RTPHeader containing the relevant data for the
+ // estimator (all other fields are cleared) and call IncomingPacket on the
+ // estimator.
+ void IncomingPacket(uint32_t ssrc,
+ size_t payload_size,
+ int64_t arrival_time,
+ uint32_t rtp_timestamp,
+ uint32_t absolute_send_time);
+
+ // Generates a frame of packets belonging to a stream at a given bitrate and
+ // with a given ssrc. The stream is pushed through a very simple simulated
+ // network, and is then given to the receive-side bandwidth estimator.
+ // Returns true if an over-use was seen, false otherwise.
+ // The StreamGenerator::updated() should be used to check for any changes in
+ // target bitrate after the call to this function.
+ bool GenerateAndProcessFrame(uint32_t ssrc, uint32_t bitrate_bps);
+
+ // Run the bandwidth estimator with a stream of `number_of_frames` frames, or
+ // until it reaches `target_bitrate`.
+ // Can for instance be used to run the estimator for some time to get it
+ // into a steady state.
+ uint32_t SteadyStateRun(uint32_t ssrc,
+ int number_of_frames,
+ uint32_t start_bitrate,
+ uint32_t min_bitrate,
+ uint32_t max_bitrate,
+ uint32_t target_bitrate);
+
+ void TestTimestampGroupingTestHelper();
+
+ void TestWrappingHelper(int silence_time_s);
+
+ void InitialBehaviorTestHelper(uint32_t expected_converge_bitrate);
+ void RateIncreaseReorderingTestHelper(uint32_t expected_bitrate);
+ void RateIncreaseRtpTimestampsTestHelper(int expected_iterations);
+ void CapacityDropTestHelper(int number_of_streams,
+ bool wrap_time_stamp,
+ uint32_t expected_bitrate_drop_delta,
+ int64_t receiver_clock_offset_change_ms);
+
+ static const uint32_t kDefaultSsrc;
+
+ SimulatedClock clock_; // Time at the receiver.
+ std::unique_ptr<testing::TestBitrateObserver> bitrate_observer_;
+ std::unique_ptr<RemoteBitrateEstimator> bitrate_estimator_;
+ std::unique_ptr<testing::StreamGenerator> stream_generator_;
+ int64_t arrival_time_offset_ms_;
+};
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_BITRATE_ESTIMATOR_UNITTEST_HELPER_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
new file mode 100644
index 0000000000..b83720d1a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/remote_estimator_proxy.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "api/units/data_size.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+// The maximum allowed value for a timestamp in milliseconds. This is lower
+// than the numerical limit since we often convert to microseconds.
+static constexpr int64_t kMaxTimeMs =
+ std::numeric_limits<int64_t>::max() / 1000;
+
+TimeDelta GetAbsoluteSendTimeDelta(uint32_t new_sendtime,
+ uint32_t previous_sendtime) {
+ static constexpr uint32_t kWrapAroundPeriod = 0x0100'0000;
+ RTC_DCHECK_LT(new_sendtime, kWrapAroundPeriod);
+ RTC_DCHECK_LT(previous_sendtime, kWrapAroundPeriod);
+ uint32_t delta = (new_sendtime - previous_sendtime) % kWrapAroundPeriod;
+ if (delta >= kWrapAroundPeriod / 2) {
+ // absolute send time wraps around, thus treat deltas larger than half of
+ // the wrap around period as negative. Ignore reordering of packets and
+ // treat them as they have approximately the same send time.
+ return TimeDelta::Zero();
+ }
+ return TimeDelta::Micros(int64_t{delta} * 1'000'000 / (1 << 18));
+}
+} // namespace
+
+RemoteEstimatorProxy::RemoteEstimatorProxy(
+ TransportFeedbackSender feedback_sender,
+ const FieldTrialsView* key_value_config,
+ NetworkStateEstimator* network_state_estimator)
+ : feedback_sender_(std::move(feedback_sender)),
+ send_config_(key_value_config),
+ last_process_time_(Timestamp::MinusInfinity()),
+ network_state_estimator_(network_state_estimator),
+ media_ssrc_(0),
+ feedback_packet_count_(0),
+ packet_overhead_(DataSize::Zero()),
+ send_interval_(send_config_.default_interval.Get()),
+ send_periodic_feedback_(true),
+ previous_abs_send_time_(0),
+ abs_send_timestamp_(Timestamp::Zero()) {
+ RTC_LOG(LS_INFO)
+ << "Maximum interval between transport feedback RTCP messages (ms): "
+ << send_config_.max_interval->ms();
+}
+
+RemoteEstimatorProxy::~RemoteEstimatorProxy() {}
+
+void RemoteEstimatorProxy::MaybeCullOldPackets(int64_t sequence_number,
+ Timestamp arrival_time) {
+ if (periodic_window_start_seq_ >=
+ packet_arrival_times_.end_sequence_number() &&
+ arrival_time - Timestamp::Zero() >= send_config_.back_window.Get()) {
+ // Start new feedback packet, cull old packets.
+ packet_arrival_times_.RemoveOldPackets(
+ sequence_number, arrival_time - send_config_.back_window.Get());
+ }
+}
+
+void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header) {
+ if (arrival_time_ms < 0 || arrival_time_ms >= kMaxTimeMs) {
+ RTC_LOG(LS_WARNING) << "Arrival time out of bounds: " << arrival_time_ms;
+ return;
+ }
+ Packet packet = {.arrival_time = Timestamp::Millis(arrival_time_ms),
+ .size = DataSize::Bytes(header.headerLength + payload_size),
+ .ssrc = header.ssrc};
+ if (header.extension.hasTransportSequenceNumber) {
+ packet.transport_sequence_number = header.extension.transportSequenceNumber;
+ }
+ if (header.extension.hasAbsoluteSendTime) {
+ packet.absolute_send_time_24bits = header.extension.absoluteSendTime;
+ }
+ packet.feedback_request = header.extension.feedback_request;
+
+ IncomingPacket(packet);
+}
+
+void RemoteEstimatorProxy::IncomingPacket(Packet packet) {
+ MutexLock lock(&lock_);
+ media_ssrc_ = packet.ssrc;
+ int64_t seq = 0;
+
+ if (packet.transport_sequence_number.has_value()) {
+ seq = unwrapper_.Unwrap(*packet.transport_sequence_number);
+
+ if (send_periodic_feedback_) {
+ MaybeCullOldPackets(seq, packet.arrival_time);
+
+ if (!periodic_window_start_seq_ || seq < *periodic_window_start_seq_) {
+ periodic_window_start_seq_ = seq;
+ }
+ }
+
+ // We are only interested in the first time a packet is received.
+ if (packet_arrival_times_.has_received(seq)) {
+ return;
+ }
+
+ packet_arrival_times_.AddPacket(seq, packet.arrival_time);
+
+ // Limit the range of sequence numbers to send feedback for.
+ if (!periodic_window_start_seq_.has_value() ||
+ periodic_window_start_seq_.value() <
+ packet_arrival_times_.begin_sequence_number()) {
+ periodic_window_start_seq_ =
+ packet_arrival_times_.begin_sequence_number();
+ }
+
+ if (packet.feedback_request) {
+ // Send feedback packet immediately.
+ SendFeedbackOnRequest(seq, *packet.feedback_request);
+ }
+ }
+
+ if (network_state_estimator_ && packet.absolute_send_time_24bits) {
+ PacketResult packet_result;
+ packet_result.receive_time = packet.arrival_time;
+ abs_send_timestamp_ += GetAbsoluteSendTimeDelta(
+ *packet.absolute_send_time_24bits, previous_abs_send_time_);
+ previous_abs_send_time_ = *packet.absolute_send_time_24bits;
+ packet_result.sent_packet.send_time = abs_send_timestamp_;
+ packet_result.sent_packet.size = packet.size + packet_overhead_;
+ packet_result.sent_packet.sequence_number = seq;
+ network_state_estimator_->OnReceivedPacket(packet_result);
+ }
+}
+
+TimeDelta RemoteEstimatorProxy::Process(Timestamp now) {
+ MutexLock lock(&lock_);
+ if (!send_periodic_feedback_) {
+ return TimeDelta::PlusInfinity();
+ }
+ Timestamp next_process_time = last_process_time_ + send_interval_;
+ if (now >= next_process_time) {
+ last_process_time_ = now;
+ SendPeriodicFeedbacks();
+ return send_interval_;
+ }
+
+ return next_process_time - now;
+}
+
+void RemoteEstimatorProxy::OnBitrateChanged(int bitrate_bps) {
+ // TwccReportSize = Ipv4(20B) + UDP(8B) + SRTP(10B) +
+ // AverageTwccReport(30B)
+ // TwccReport size at 50ms interval is 24 byte.
+ // TwccReport size at 250ms interval is 36 byte.
+ // AverageTwccReport = (TwccReport(50ms) + TwccReport(250ms)) / 2
+ constexpr DataSize kTwccReportSize = DataSize::Bytes(20 + 8 + 10 + 30);
+ const DataRate kMinTwccRate =
+ kTwccReportSize / send_config_.max_interval.Get();
+
+ // Let TWCC reports occupy 5% of total bandwidth.
+ DataRate twcc_bitrate =
+ DataRate::BitsPerSec(send_config_.bandwidth_fraction * bitrate_bps);
+
+ // Check upper send_interval bound by checking bitrate to avoid overflow when
+ // dividing by small bitrate, in particular avoid dividing by zero bitrate.
+ TimeDelta send_interval = twcc_bitrate <= kMinTwccRate
+ ? send_config_.max_interval.Get()
+ : std::max(kTwccReportSize / twcc_bitrate,
+ send_config_.min_interval.Get());
+
+ MutexLock lock(&lock_);
+ send_interval_ = send_interval;
+}
+
+void RemoteEstimatorProxy::SetSendPeriodicFeedback(
+ bool send_periodic_feedback) {
+ MutexLock lock(&lock_);
+ send_periodic_feedback_ = send_periodic_feedback;
+}
+
+void RemoteEstimatorProxy::SetTransportOverhead(DataSize overhead_per_packet) {
+ MutexLock lock(&lock_);
+ packet_overhead_ = overhead_per_packet;
+}
+
+void RemoteEstimatorProxy::SendPeriodicFeedbacks() {
+ // `periodic_window_start_seq_` is the first sequence number to include in
+ // the current feedback packet. Some older may still be in the map, in case
+ // a reordering happens and we need to retransmit them.
+ if (!periodic_window_start_seq_)
+ return;
+
+ std::unique_ptr<rtcp::RemoteEstimate> remote_estimate;
+ if (network_state_estimator_) {
+ absl::optional<NetworkStateEstimate> state_estimate =
+ network_state_estimator_->GetCurrentEstimate();
+ if (state_estimate) {
+ remote_estimate = std::make_unique<rtcp::RemoteEstimate>();
+ remote_estimate->SetEstimate(state_estimate.value());
+ }
+ }
+
+ int64_t packet_arrival_times_end_seq =
+ packet_arrival_times_.end_sequence_number();
+ while (periodic_window_start_seq_ < packet_arrival_times_end_seq) {
+ auto feedback_packet = MaybeBuildFeedbackPacket(
+ /*include_timestamps=*/true, periodic_window_start_seq_.value(),
+ packet_arrival_times_end_seq,
+ /*is_periodic_update=*/true);
+
+ if (feedback_packet == nullptr) {
+ break;
+ }
+
+ RTC_DCHECK(feedback_sender_ != nullptr);
+
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> packets;
+ if (remote_estimate) {
+ packets.push_back(std::move(remote_estimate));
+ }
+ packets.push_back(std::move(feedback_packet));
+
+ feedback_sender_(std::move(packets));
+ // Note: Don't erase items from packet_arrival_times_ after sending, in
+ // case they need to be re-sent after a reordering. Removal will be
+ // handled by OnPacketArrival once packets are too old.
+ }
+}
+
+void RemoteEstimatorProxy::SendFeedbackOnRequest(
+ int64_t sequence_number,
+ const FeedbackRequest& feedback_request) {
+ if (feedback_request.sequence_count == 0) {
+ return;
+ }
+
+ int64_t first_sequence_number =
+ sequence_number - feedback_request.sequence_count + 1;
+
+ auto feedback_packet = MaybeBuildFeedbackPacket(
+ feedback_request.include_timestamps, first_sequence_number,
+ sequence_number + 1, /*is_periodic_update=*/false);
+
+ // This is called when a packet has just been added.
+ RTC_DCHECK(feedback_packet != nullptr);
+
+ // Clear up to the first packet that is included in this feedback packet.
+ packet_arrival_times_.EraseTo(first_sequence_number);
+
+ RTC_DCHECK(feedback_sender_ != nullptr);
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> packets;
+ packets.push_back(std::move(feedback_packet));
+ feedback_sender_(std::move(packets));
+}
+
+std::unique_ptr<rtcp::TransportFeedback>
+RemoteEstimatorProxy::MaybeBuildFeedbackPacket(
+ bool include_timestamps,
+ int64_t begin_sequence_number_inclusive,
+ int64_t end_sequence_number_exclusive,
+ bool is_periodic_update) {
+ RTC_DCHECK_LT(begin_sequence_number_inclusive, end_sequence_number_exclusive);
+
+ int64_t start_seq =
+ packet_arrival_times_.clamp(begin_sequence_number_inclusive);
+
+ int64_t end_seq = packet_arrival_times_.clamp(end_sequence_number_exclusive);
+
+ // Create the packet on demand, as it's not certain that there are packets
+ // in the range that have been received.
+ std::unique_ptr<rtcp::TransportFeedback> feedback_packet = nullptr;
+
+ int64_t next_sequence_number = begin_sequence_number_inclusive;
+
+ for (int64_t seq = start_seq; seq < end_seq; ++seq) {
+ Timestamp arrival_time = packet_arrival_times_.get(seq);
+ if (arrival_time < Timestamp::Zero()) {
+ // Packet not received.
+ continue;
+ }
+
+ if (feedback_packet == nullptr) {
+ feedback_packet =
+ std::make_unique<rtcp::TransportFeedback>(include_timestamps);
+ feedback_packet->SetMediaSsrc(media_ssrc_);
+ // Base sequence number is the expected first sequence number. This is
+ // known, but we might not have actually received it, so the base time
+ // shall be the time of the first received packet in the feedback.
+ feedback_packet->SetBase(
+ static_cast<uint16_t>(begin_sequence_number_inclusive & 0xFFFF),
+ arrival_time);
+ feedback_packet->SetFeedbackSequenceNumber(feedback_packet_count_++);
+ }
+
+ if (!feedback_packet->AddReceivedPacket(static_cast<uint16_t>(seq & 0xFFFF),
+ arrival_time)) {
+ // Could not add timestamp, feedback packet might be full. Return and
+ // try again with a fresh packet.
+ break;
+ }
+
+ next_sequence_number = seq + 1;
+ }
+ if (is_periodic_update) {
+ periodic_window_start_seq_ = next_sequence_number;
+ }
+ return feedback_packet;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
new file mode 100644
index 0000000000..509ad0ba02
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_
+
+#include <deque>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_headers.h"
+#include "api/transport/network_control.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/remote_bitrate_estimator/packet_arrival_map.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+// Class used when send-side BWE is enabled: This proxy is instantiated on the
+// receive side. It buffers a number of receive timestamps and then sends
+// transport feedback messages back too the send side.
+class RemoteEstimatorProxy {
+ public:
+ // Used for sending transport feedback messages when send side
+ // BWE is used.
+ using TransportFeedbackSender = std::function<void(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> packets)>;
+ RemoteEstimatorProxy(TransportFeedbackSender feedback_sender,
+ const FieldTrialsView* key_value_config,
+ NetworkStateEstimator* network_state_estimator);
+ ~RemoteEstimatorProxy();
+
+ struct Packet {
+ Timestamp arrival_time;
+ DataSize size;
+ uint32_t ssrc;
+ absl::optional<uint32_t> absolute_send_time_24bits;
+ absl::optional<uint16_t> transport_sequence_number;
+ absl::optional<FeedbackRequest> feedback_request;
+ };
+ void IncomingPacket(Packet packet);
+
+ void IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header);
+
+ // Sends periodic feedback if it is time to send it.
+ // Returns time until next call to Process should be made.
+ TimeDelta Process(Timestamp now);
+
+ void OnBitrateChanged(int bitrate);
+ void SetSendPeriodicFeedback(bool send_periodic_feedback);
+ void SetTransportOverhead(DataSize overhead_per_packet);
+
+ private:
+ struct TransportWideFeedbackConfig {
+ FieldTrialParameter<TimeDelta> back_window{"wind", TimeDelta::Millis(500)};
+ FieldTrialParameter<TimeDelta> min_interval{"min", TimeDelta::Millis(50)};
+ FieldTrialParameter<TimeDelta> max_interval{"max", TimeDelta::Millis(250)};
+ FieldTrialParameter<TimeDelta> default_interval{"def",
+ TimeDelta::Millis(100)};
+ FieldTrialParameter<double> bandwidth_fraction{"frac", 0.05};
+ explicit TransportWideFeedbackConfig(
+ const FieldTrialsView* key_value_config) {
+ ParseFieldTrial({&back_window, &min_interval, &max_interval,
+ &default_interval, &bandwidth_fraction},
+ key_value_config->Lookup(
+ "WebRTC-Bwe-TransportWideFeedbackIntervals"));
+ }
+ };
+
+ void MaybeCullOldPackets(int64_t sequence_number, Timestamp arrival_time)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_);
+ void SendPeriodicFeedbacks() RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_);
+ void SendFeedbackOnRequest(int64_t sequence_number,
+ const FeedbackRequest& feedback_request)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_);
+
+ // Returns a Transport Feedback packet with information about as many packets
+ // that has been received between [`begin_sequence_number_incl`,
+ // `end_sequence_number_excl`) that can fit in it. If `is_periodic_update`,
+ // this represents sending a periodic feedback message, which will make it
+ // update the `periodic_window_start_seq_` variable with the first packet that
+ // was not included in the feedback packet, so that the next update can
+ // continue from that sequence number.
+ //
+ // If no incoming packets were added, nullptr is returned.
+ //
+ // `include_timestamps` decide if the returned TransportFeedback should
+ // include timestamps.
+ std::unique_ptr<rtcp::TransportFeedback> MaybeBuildFeedbackPacket(
+ bool include_timestamps,
+ int64_t begin_sequence_number_inclusive,
+ int64_t end_sequence_number_exclusive,
+ bool is_periodic_update) RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_);
+
+ const TransportFeedbackSender feedback_sender_;
+ const TransportWideFeedbackConfig send_config_;
+ Timestamp last_process_time_;
+
+ Mutex lock_;
+ // `network_state_estimator_` may be null.
+ NetworkStateEstimator* const network_state_estimator_
+ RTC_PT_GUARDED_BY(&lock_);
+ uint32_t media_ssrc_ RTC_GUARDED_BY(&lock_);
+ uint8_t feedback_packet_count_ RTC_GUARDED_BY(&lock_);
+ SeqNumUnwrapper<uint16_t> unwrapper_ RTC_GUARDED_BY(&lock_);
+ DataSize packet_overhead_ RTC_GUARDED_BY(&lock_);
+
+ // The next sequence number that should be the start sequence number during
+ // periodic reporting. Will be absl::nullopt before the first seen packet.
+ absl::optional<int64_t> periodic_window_start_seq_ RTC_GUARDED_BY(&lock_);
+
+ // Packet arrival times, by sequence number.
+ PacketArrivalTimeMap packet_arrival_times_ RTC_GUARDED_BY(&lock_);
+
+ TimeDelta send_interval_ RTC_GUARDED_BY(&lock_);
+ bool send_periodic_feedback_ RTC_GUARDED_BY(&lock_);
+
+ // Unwraps absolute send times.
+ uint32_t previous_abs_send_time_ RTC_GUARDED_BY(&lock_);
+ Timestamp abs_send_timestamp_ RTC_GUARDED_BY(&lock_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
new file mode 100644
index 0000000000..10bc1e80a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
@@ -0,0 +1,649 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/remote_estimator_proxy.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/network_types.h"
+#include "api/transport/test/mock_network_control.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Invoke;
+using ::testing::MockFunction;
+using ::testing::Return;
+using ::testing::SizeIs;
+
+constexpr DataSize kDefaultPacketSize = DataSize::Bytes(100);
+constexpr uint32_t kMediaSsrc = 456;
+constexpr uint16_t kBaseSeq = 10;
+constexpr Timestamp kBaseTime = Timestamp::Millis(123);
+constexpr TimeDelta kBaseTimeWrapAround =
+ rtcp::TransportFeedback::kDeltaTick * (int64_t{1} << 32);
+constexpr TimeDelta kMaxSmallDelta = rtcp::TransportFeedback::kDeltaTick * 0xFF;
+
+constexpr TimeDelta kBackWindow = TimeDelta::Millis(500);
+constexpr TimeDelta kMinSendInterval = TimeDelta::Millis(50);
+constexpr TimeDelta kMaxSendInterval = TimeDelta::Millis(250);
+constexpr TimeDelta kDefaultSendInterval = TimeDelta::Millis(100);
+
+std::vector<uint16_t> SequenceNumbers(
+ const rtcp::TransportFeedback& feedback_packet) {
+ std::vector<uint16_t> sequence_numbers;
+ for (const auto& rtp_packet_received : feedback_packet.GetReceivedPackets()) {
+ sequence_numbers.push_back(rtp_packet_received.sequence_number());
+ }
+ return sequence_numbers;
+}
+
+std::vector<Timestamp> Timestamps(
+ const rtcp::TransportFeedback& feedback_packet) {
+ std::vector<Timestamp> timestamps;
+ Timestamp timestamp = feedback_packet.BaseTime();
+ // rtcp::TransportFeedback makes no promises about epoch of the base time,
+ // It may add several kBaseTimeWrapAround periods to make it large enough and
+ // thus to support negative deltas. Align it close to the kBaseTime to make
+ // tests expectations simpler.
+ if (timestamp > kBaseTime) {
+ timestamp -= (timestamp - kBaseTime).RoundTo(kBaseTimeWrapAround);
+ }
+ for (const auto& rtp_packet_received : feedback_packet.GetReceivedPackets()) {
+ timestamp += rtp_packet_received.delta();
+ timestamps.push_back(timestamp);
+ }
+ return timestamps;
+}
+
+class RemoteEstimatorProxyTest : public ::testing::Test {
+ public:
+ RemoteEstimatorProxyTest()
+ : clock_(0),
+ proxy_(feedback_sender_.AsStdFunction(),
+ &field_trial_config_,
+ &network_state_estimator_) {}
+
+ protected:
+ void IncomingPacket(
+ uint16_t seq,
+ Timestamp arrival_time,
+ absl::optional<FeedbackRequest> feedback_request = absl::nullopt) {
+ proxy_.IncomingPacket({.arrival_time = arrival_time,
+ .size = DataSize::Bytes(100),
+ .ssrc = kMediaSsrc,
+ .transport_sequence_number = seq,
+ .feedback_request = feedback_request});
+ }
+
+ void Process() {
+ clock_.AdvanceTime(kDefaultSendInterval);
+ proxy_.Process(clock_.CurrentTime());
+ }
+
+ FieldTrialBasedConfig field_trial_config_;
+ SimulatedClock clock_;
+ MockFunction<void(std::vector<std::unique_ptr<rtcp::RtcpPacket>>)>
+ feedback_sender_;
+ ::testing::NiceMock<MockNetworkStateEstimator> network_state_estimator_;
+ RemoteEstimatorProxy proxy_;
+};
+
+TEST_F(RemoteEstimatorProxyTest, SendsSinglePacketFeedback) {
+ IncomingPacket(kBaseSeq, kBaseTime);
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq));
+ EXPECT_THAT(Timestamps(*feedback_packet), ElementsAre(kBaseTime));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, DuplicatedPackets) {
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kBaseSeq, kBaseTime + TimeDelta::Seconds(1));
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq));
+ EXPECT_THAT(Timestamps(*feedback_packet), ElementsAre(kBaseTime));
+ return true;
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, FeedbackWithMissingStart) {
+ // First feedback.
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kBaseSeq + 1, kBaseTime + TimeDelta::Seconds(1));
+ EXPECT_CALL(feedback_sender_, Call);
+ Process();
+
+ // Second feedback starts with a missing packet (DROP kBaseSeq + 2).
+ IncomingPacket(kBaseSeq + 3, kBaseTime + TimeDelta::Seconds(3));
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 2, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 3));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + TimeDelta::Seconds(3)));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, SendsFeedbackWithVaryingDeltas) {
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kBaseSeq + 1, kBaseTime + kMaxSmallDelta);
+ IncomingPacket(kBaseSeq + 2,
+ kBaseTime + (2 * kMaxSmallDelta) + TimeDelta::Millis(1));
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq, kBaseSeq + 1, kBaseSeq + 2));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime, kBaseTime + kMaxSmallDelta,
+ kBaseTime + (2 * kMaxSmallDelta) +
+ TimeDelta::Millis(1)));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, SendsFragmentedFeedback) {
+ static constexpr TimeDelta kTooLargeDelta =
+ rtcp::TransportFeedback::kDeltaTick * (1 << 16);
+
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kBaseSeq + 1, kBaseTime + kTooLargeDelta);
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq));
+ EXPECT_THAT(Timestamps(*feedback_packet), ElementsAre(kBaseTime));
+ }))
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 1, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 1));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + kTooLargeDelta));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, HandlesReorderingAndWrap) {
+ const TimeDelta kDelta = TimeDelta::Seconds(1);
+ const uint16_t kLargeSeq = 62762;
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kLargeSeq, kBaseTime + kDelta);
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [&](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kLargeSeq, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + kDelta, kBaseTime));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, HandlesMalformedSequenceNumbers) {
+ // This test generates incoming packets with large jumps in sequence numbers.
+ // When unwrapped, the sequeunce numbers of these 30 incoming packets, will
+ // span a range of roughly 650k packets. Test that we only send feedback for
+ // the last packets. Test for regression found in chromium:949020.
+ const TimeDelta kDelta = TimeDelta::Seconds(1);
+ for (int i = 0; i < 10; ++i) {
+ IncomingPacket(kBaseSeq + i, kBaseTime + 3 * i * kDelta);
+ IncomingPacket(kBaseSeq + 20000 + i, kBaseTime + (3 * i + 1) * kDelta);
+ IncomingPacket(kBaseSeq + 40000 + i, kBaseTime + (3 * i + 2) * kDelta);
+ }
+
+ // Only expect feedback for the last two packets.
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [&](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 20000 + 9, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 20009, kBaseSeq + 40009));
+ EXPECT_THAT(
+ Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + 28 * kDelta, kBaseTime + 29 * kDelta));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, HandlesBackwardsWrappingSequenceNumbers) {
+ // This test is like HandlesMalformedSequenceNumbers but for negative wrap
+ // arounds. Test that we only send feedback for the packets with highest
+ // sequence numbers. Test for regression found in chromium:949020.
+ const TimeDelta kDelta = TimeDelta::Seconds(1);
+ for (int i = 0; i < 10; ++i) {
+ IncomingPacket(kBaseSeq + i, kBaseTime + 3 * i * kDelta);
+ IncomingPacket(kBaseSeq + 40000 + i, kBaseTime + (3 * i + 1) * kDelta);
+ IncomingPacket(kBaseSeq + 20000 + i, kBaseTime + (3 * i + 2) * kDelta);
+ }
+
+ // Only expect feedback for the first two packets.
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [&](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 40000, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 40000, kBaseSeq));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + kDelta, kBaseTime));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, ResendsTimestampsOnReordering) {
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kBaseSeq + 2, kBaseTime + TimeDelta::Millis(2));
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq, kBaseSeq + 2));
+ EXPECT_THAT(
+ Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime, kBaseTime + TimeDelta::Millis(2)));
+ }));
+
+ Process();
+
+ IncomingPacket(kBaseSeq + 1, kBaseTime + TimeDelta::Millis(1));
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 1, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 1, kBaseSeq + 2));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + TimeDelta::Millis(1),
+ kBaseTime + TimeDelta::Millis(2)));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) {
+ const Timestamp kTimeoutTime = kBaseTime + kBackWindow;
+
+ IncomingPacket(kBaseSeq + 2, kBaseTime);
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 2, feedback_packet->GetBaseSequence());
+
+ EXPECT_THAT(Timestamps(*feedback_packet), ElementsAre(kBaseTime));
+ }));
+
+ Process();
+
+ IncomingPacket(kBaseSeq + 3, kTimeoutTime); // kBaseSeq + 2 times out here.
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [&](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 3, feedback_packet->GetBaseSequence());
+
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kTimeoutTime));
+ }));
+
+ Process();
+
+ // New group, with sequence starting below the first so that they may be
+ // retransmitted.
+ IncomingPacket(kBaseSeq, kBaseTime - TimeDelta::Millis(1));
+ IncomingPacket(kBaseSeq + 1, kTimeoutTime - TimeDelta::Millis(1));
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [&](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq, feedback_packet->GetBaseSequence());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq, kBaseSeq + 1, kBaseSeq + 3));
+ EXPECT_THAT(
+ Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime - TimeDelta::Millis(1),
+ kTimeoutTime - TimeDelta::Millis(1), kTimeoutTime));
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, TimeUntilNextProcessIsDefaultOnUnkownBitrate) {
+ EXPECT_EQ(proxy_.Process(clock_.CurrentTime()), kDefaultSendInterval);
+}
+
+TEST_F(RemoteEstimatorProxyTest, TimeUntilNextProcessIsMinIntervalOn300kbps) {
+ proxy_.OnBitrateChanged(300'000);
+ EXPECT_EQ(proxy_.Process(clock_.CurrentTime()), kMinSendInterval);
+}
+
+TEST_F(RemoteEstimatorProxyTest, TimeUntilNextProcessIsMaxIntervalOn0kbps) {
+ // TimeUntilNextProcess should be limited by `kMaxSendIntervalMs` when
+ // bitrate is small. We choose 0 bps as a special case, which also tests
+ // erroneous behaviors like division-by-zero.
+ proxy_.OnBitrateChanged(0);
+ EXPECT_EQ(proxy_.Process(clock_.CurrentTime()), kMaxSendInterval);
+}
+
+TEST_F(RemoteEstimatorProxyTest, TimeUntilNextProcessIsMaxIntervalOn20kbps) {
+ proxy_.OnBitrateChanged(20'000);
+ EXPECT_EQ(proxy_.Process(clock_.CurrentTime()), kMaxSendInterval);
+}
+
+TEST_F(RemoteEstimatorProxyTest, TwccReportsUse5PercentOfAvailableBandwidth) {
+ proxy_.OnBitrateChanged(80'000);
+ // 80kbps * 0.05 = TwccReportSize(68B * 8b/B) * 1000ms / SendInterval(136ms)
+ EXPECT_EQ(proxy_.Process(clock_.CurrentTime()), TimeDelta::Millis(136));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Tests for the extended protocol where the feedback is explicitly requested
+// by the sender.
+//////////////////////////////////////////////////////////////////////////////
+typedef RemoteEstimatorProxyTest RemoteEstimatorProxyOnRequestTest;
+TEST_F(RemoteEstimatorProxyOnRequestTest, DisablesPeriodicProcess) {
+ proxy_.SetSendPeriodicFeedback(false);
+ EXPECT_EQ(proxy_.Process(clock_.CurrentTime()), TimeDelta::PlusInfinity());
+}
+
+TEST_F(RemoteEstimatorProxyOnRequestTest, ProcessDoesNotSendFeedback) {
+ proxy_.SetSendPeriodicFeedback(false);
+ IncomingPacket(kBaseSeq, kBaseTime);
+ EXPECT_CALL(feedback_sender_, Call).Times(0);
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyOnRequestTest, RequestSinglePacketFeedback) {
+ proxy_.SetSendPeriodicFeedback(false);
+ IncomingPacket(kBaseSeq, kBaseTime);
+ IncomingPacket(kBaseSeq + 1, kBaseTime + kMaxSmallDelta);
+ IncomingPacket(kBaseSeq + 2, kBaseTime + 2 * kMaxSmallDelta);
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 3, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 3));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + 3 * kMaxSmallDelta));
+ }));
+
+ constexpr FeedbackRequest kSinglePacketFeedbackRequest = {
+ /*include_timestamps=*/true, /*sequence_count=*/1};
+ IncomingPacket(kBaseSeq + 3, kBaseTime + 3 * kMaxSmallDelta,
+ kSinglePacketFeedbackRequest);
+}
+
+TEST_F(RemoteEstimatorProxyOnRequestTest, RequestLastFivePacketFeedback) {
+ proxy_.SetSendPeriodicFeedback(false);
+ int i = 0;
+ for (; i < 10; ++i) {
+ IncomingPacket(kBaseSeq + i, kBaseTime + i * kMaxSmallDelta);
+ }
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 6, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 6, kBaseSeq + 7, kBaseSeq + 8,
+ kBaseSeq + 9, kBaseSeq + 10));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + 6 * kMaxSmallDelta,
+ kBaseTime + 7 * kMaxSmallDelta,
+ kBaseTime + 8 * kMaxSmallDelta,
+ kBaseTime + 9 * kMaxSmallDelta,
+ kBaseTime + 10 * kMaxSmallDelta));
+ }));
+
+ constexpr FeedbackRequest kFivePacketsFeedbackRequest = {
+ /*include_timestamps=*/true, /*sequence_count=*/5};
+ IncomingPacket(kBaseSeq + i, kBaseTime + i * kMaxSmallDelta,
+ kFivePacketsFeedbackRequest);
+}
+
+TEST_F(RemoteEstimatorProxyOnRequestTest,
+ RequestLastFivePacketFeedbackMissingPackets) {
+ proxy_.SetSendPeriodicFeedback(false);
+ int i = 0;
+ for (; i < 10; ++i) {
+ if (i != 7 && i != 9)
+ IncomingPacket(kBaseSeq + i, kBaseTime + i * kMaxSmallDelta);
+ }
+
+ EXPECT_CALL(feedback_sender_, Call)
+ .WillOnce(Invoke(
+ [](std::vector<std::unique_ptr<rtcp::RtcpPacket>> feedback_packets) {
+ rtcp::TransportFeedback* feedback_packet =
+ static_cast<rtcp::TransportFeedback*>(
+ feedback_packets[0].get());
+ EXPECT_EQ(kBaseSeq + 6, feedback_packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, feedback_packet->media_ssrc());
+
+ EXPECT_THAT(SequenceNumbers(*feedback_packet),
+ ElementsAre(kBaseSeq + 6, kBaseSeq + 8, kBaseSeq + 10));
+ EXPECT_THAT(Timestamps(*feedback_packet),
+ ElementsAre(kBaseTime + 6 * kMaxSmallDelta,
+ kBaseTime + 8 * kMaxSmallDelta,
+ kBaseTime + 10 * kMaxSmallDelta));
+ }));
+
+ constexpr FeedbackRequest kFivePacketsFeedbackRequest = {
+ /*include_timestamps=*/true, /*sequence_count=*/5};
+ IncomingPacket(kBaseSeq + i, kBaseTime + i * kMaxSmallDelta,
+ kFivePacketsFeedbackRequest);
+}
+
+TEST_F(RemoteEstimatorProxyTest, ReportsIncomingPacketToNetworkStateEstimator) {
+ Timestamp first_send_timestamp = Timestamp::Zero();
+ const DataSize kPacketOverhead = DataSize::Bytes(38);
+ proxy_.SetTransportOverhead(kPacketOverhead);
+
+ EXPECT_CALL(network_state_estimator_, OnReceivedPacket(_))
+ .WillOnce(Invoke([&](const PacketResult& packet) {
+ EXPECT_EQ(packet.receive_time, kBaseTime);
+ EXPECT_EQ(packet.sent_packet.size,
+ kDefaultPacketSize + kPacketOverhead);
+ first_send_timestamp = packet.sent_packet.send_time;
+ }));
+ // Incoming packet with abs sendtime but without transport sequence number.
+ proxy_.IncomingPacket(
+ {.arrival_time = kBaseTime,
+ .size = kDefaultPacketSize,
+ .ssrc = kMediaSsrc,
+ .absolute_send_time_24bits = AbsoluteSendTime::To24Bits(kBaseTime)});
+
+ // Expect packet with older abs send time to be treated as sent at the same
+ // time as the previous packet due to reordering.
+ EXPECT_CALL(network_state_estimator_, OnReceivedPacket(_))
+ .WillOnce(Invoke([&first_send_timestamp](const PacketResult& packet) {
+ EXPECT_EQ(packet.receive_time, kBaseTime);
+ EXPECT_EQ(packet.sent_packet.send_time, first_send_timestamp);
+ }));
+
+ proxy_.IncomingPacket(
+ {.arrival_time = kBaseTime,
+ .size = kDefaultPacketSize,
+ .ssrc = kMediaSsrc,
+ .absolute_send_time_24bits =
+ AbsoluteSendTime::To24Bits(kBaseTime - TimeDelta::Millis(12))});
+}
+
+TEST_F(RemoteEstimatorProxyTest, IncomingPacketHandlesWrapInAbsSendTime) {
+ // abs send time use 24bit precision.
+ const uint32_t kFirstAbsSendTime =
+ AbsoluteSendTime::To24Bits(Timestamp::Millis((1 << 24) - 30));
+ // Second abs send time has wrapped.
+ const uint32_t kSecondAbsSendTime =
+ AbsoluteSendTime::To24Bits(Timestamp::Millis(1 << 24));
+ const TimeDelta kExpectedAbsSendTimeDelta = TimeDelta::Millis(30);
+
+ Timestamp first_send_timestamp = Timestamp::Zero();
+ EXPECT_CALL(network_state_estimator_, OnReceivedPacket(_))
+ .WillOnce(Invoke([&first_send_timestamp](const PacketResult& packet) {
+ EXPECT_EQ(packet.receive_time, kBaseTime);
+ first_send_timestamp = packet.sent_packet.send_time;
+ }));
+ proxy_.IncomingPacket({.arrival_time = kBaseTime,
+ .size = kDefaultPacketSize,
+ .ssrc = kMediaSsrc,
+ .absolute_send_time_24bits = kFirstAbsSendTime,
+ .transport_sequence_number = kBaseSeq});
+
+ EXPECT_CALL(network_state_estimator_, OnReceivedPacket(_))
+ .WillOnce(Invoke([first_send_timestamp,
+ kExpectedAbsSendTimeDelta](const PacketResult& packet) {
+ EXPECT_EQ(packet.receive_time, kBaseTime + TimeDelta::Millis(123));
+ EXPECT_EQ(packet.sent_packet.send_time.ms(),
+ (first_send_timestamp + kExpectedAbsSendTimeDelta).ms());
+ }));
+ proxy_.IncomingPacket({.arrival_time = kBaseTime + TimeDelta::Millis(123),
+ .size = kDefaultPacketSize,
+ .ssrc = kMediaSsrc,
+ .absolute_send_time_24bits = kSecondAbsSendTime,
+ .transport_sequence_number = kBaseSeq + 1});
+}
+
+TEST_F(RemoteEstimatorProxyTest, SendTransportFeedbackAndNetworkStateUpdate) {
+ proxy_.IncomingPacket(
+ {.arrival_time = kBaseTime,
+ .size = kDefaultPacketSize,
+ .ssrc = kMediaSsrc,
+ .absolute_send_time_24bits =
+ AbsoluteSendTime::To24Bits(kBaseTime - TimeDelta::Millis(1)),
+ .transport_sequence_number = kBaseSeq});
+ EXPECT_CALL(network_state_estimator_, GetCurrentEstimate())
+ .WillOnce(Return(NetworkStateEstimate()));
+ EXPECT_CALL(feedback_sender_, Call(SizeIs(2)));
+ Process();
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
new file mode 100644
index 0000000000..c8f6faa127
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+static std::string ToString(uint32_t v) {
+ rtc::StringBuilder ss;
+ ss << v;
+ return ss.Release();
+}
+
+Logging::ThreadState::ThreadState() = default;
+Logging::ThreadState::~ThreadState() = default;
+
+Logging::Context::Context(uint32_t name, int64_t timestamp_ms, bool enabled) {
+ Logging::GetInstance()->PushState(ToString(name), timestamp_ms, enabled);
+}
+
+Logging::Context::Context(const std::string& name,
+ int64_t timestamp_ms,
+ bool enabled) {
+ Logging::GetInstance()->PushState(name, timestamp_ms, enabled);
+}
+
+Logging::Context::Context(const char* name,
+ int64_t timestamp_ms,
+ bool enabled) {
+ Logging::GetInstance()->PushState(name, timestamp_ms, enabled);
+}
+
+Logging::Context::~Context() {
+ Logging::GetInstance()->PopState();
+}
+
+Logging* Logging::GetInstance() {
+ static Logging* logging = new Logging();
+ return logging;
+}
+
+void Logging::SetGlobalContext(uint32_t name) {
+ MutexLock lock(&mutex_);
+ thread_map_[rtc::CurrentThreadId()].global_state.tag = ToString(name);
+}
+
+void Logging::SetGlobalContext(const std::string& name) {
+ MutexLock lock(&mutex_);
+ thread_map_[rtc::CurrentThreadId()].global_state.tag = name;
+}
+
+void Logging::SetGlobalContext(const char* name) {
+ MutexLock lock(&mutex_);
+ thread_map_[rtc::CurrentThreadId()].global_state.tag = name;
+}
+
+void Logging::SetGlobalEnable(bool enabled) {
+ MutexLock lock(&mutex_);
+ thread_map_[rtc::CurrentThreadId()].global_state.enabled = enabled;
+}
+
+void Logging::Log(const char format[], ...) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("%s\t", state.tag.c_str());
+ va_list args;
+ va_start(args, format);
+ vprintf(format, args);
+ va_end(args);
+ printf("\n");
+ }
+}
+
+void Logging::Plot(int figure, const std::string& name, double value) {
+ Plot(figure, name, value, 0, "-");
+}
+
+void Logging::Plot(int figure,
+ const std::string& name,
+ double value,
+ uint32_t ssrc) {
+ Plot(figure, name, value, ssrc, "-");
+}
+
+void Logging::Plot(int figure,
+ const std::string& name,
+ double value,
+ const std::string& alg_name) {
+ Plot(figure, name, value, 0, alg_name);
+}
+
+void Logging::Plot(int figure,
+ const std::string& name,
+ double value,
+ uint32_t ssrc,
+ const std::string& alg_name) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("PLOT\t%d\t%s:%" PRIu32 "@%s\t%f\t%f\n", figure, name.c_str(), ssrc,
+ alg_name.c_str(), state.timestamp_ms * 0.001, value);
+ }
+}
+
+void Logging::PlotBar(int figure,
+ const std::string& name,
+ double value,
+ int flow_id) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("BAR\t%d\t%s_%d\t%f\n", figure, name.c_str(), flow_id, value);
+ }
+}
+
+void Logging::PlotBaselineBar(int figure,
+ const std::string& name,
+ double value,
+ int flow_id) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("BASELINE\t%d\t%s_%d\t%f\n", figure, name.c_str(), flow_id, value);
+ }
+}
+
+void Logging::PlotErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ int flow_id) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("ERRORBAR\t%d\t%s_%d\t%f\t%f\t%f\t%s\n", figure, name.c_str(),
+ flow_id, value, ylow, yhigh, error_title.c_str());
+ }
+}
+
+void Logging::PlotLimitErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ double ymax,
+ const std::string& limit_title,
+ int flow_id) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("LIMITERRORBAR\t%d\t%s_%d\t%f\t%f\t%f\t%s\t%f\t%s\n", figure,
+ name.c_str(), flow_id, value, ylow, yhigh, error_title.c_str(), ymax,
+ limit_title.c_str());
+ }
+}
+
+void Logging::PlotLabel(int figure,
+ const std::string& title,
+ const std::string& y_label,
+ int num_flows) {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("LABEL\t%d\t%s\t%s\t%d\n", figure, title.c_str(), y_label.c_str(),
+ num_flows);
+ }
+}
+
+Logging::Logging() : thread_map_() {}
+
+Logging::~Logging() = default;
+
+Logging::State::State() : tag(""), timestamp_ms(0), enabled(true) {}
+
+Logging::State::State(const std::string& tag,
+ int64_t timestamp_ms,
+ bool enabled)
+ : tag(tag), timestamp_ms(timestamp_ms), enabled(enabled) {}
+
+void Logging::State::MergePrevious(const State& previous) {
+ if (tag.empty()) {
+ tag = previous.tag;
+ } else if (!previous.tag.empty()) {
+ tag = previous.tag + "_" + tag;
+ }
+ timestamp_ms = std::max(previous.timestamp_ms, timestamp_ms);
+ enabled = previous.enabled && enabled;
+}
+
+void Logging::PushState(const std::string& append_to_tag,
+ int64_t timestamp_ms,
+ bool enabled) {
+ MutexLock lock(&mutex_);
+ State new_state(append_to_tag, timestamp_ms, enabled);
+ ThreadState* thread_state = &thread_map_[rtc::CurrentThreadId()];
+ std::stack<State>* stack = &thread_state->stack;
+ if (stack->empty()) {
+ new_state.MergePrevious(thread_state->global_state);
+ } else {
+ new_state.MergePrevious(stack->top());
+ }
+ stack->push(new_state);
+}
+
+void Logging::PopState() {
+ MutexLock lock(&mutex_);
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
+ RTC_DCHECK(it != thread_map_.end());
+ std::stack<State>* stack = &it->second.stack;
+ int64_t newest_timestamp_ms = stack->top().timestamp_ms;
+ stack->pop();
+ if (!stack->empty()) {
+ State* state = &stack->top();
+ // Update time so that next log/plot will use the latest time seen so far
+ // in this call tree.
+ state->timestamp_ms = std::max(state->timestamp_ms, newest_timestamp_ms);
+ }
+}
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
+
+#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
new file mode 100644
index 0000000000..49e1e716b2
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_BWE_TEST_LOGGING_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_BWE_TEST_LOGGING_H_
+
+// To enable BWE logging, run this command from trunk/ :
+// build/gyp_chromium --depth=. webrtc/modules/modules.gyp
+// -Denable_bwe_test_logging=1
+#ifndef BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+#define BWE_TEST_LOGGING_COMPILE_TIME_ENABLE 0
+#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+
+// BWE logging allows you to insert dynamically named log/plot points in the
+// call tree. E.g. the function:
+// void f1() {
+// BWE_TEST_LOGGING_TIME(clock_->TimeInMilliseconds());
+// BWE_TEST_LOGGING_CONTEXT("stream");
+// for (uint32_t i=0; i<4; ++i) {
+// BWE_TEST_LOGGING_ENABLE(i & 1);
+// BWE_TEST_LOGGING_CONTEXT(i);
+// BWE_TEST_LOGGING_LOG1("weight", "%f tonnes", weights_[i]);
+// for (float j=0.0f; j<1.0; j+=0.4f) {
+// BWE_TEST_LOGGING_PLOT(0, "bps", -1, j);
+// }
+// }
+// }
+//
+// Might produce the output:
+// stream_00000001_weight 13.000000 tonnes
+// PLOT stream_00000001_bps 1.000000 0.000000
+// PLOT stream_00000001_bps 1.000000 0.400000
+// PLOT stream_00000001_bps 1.000000 0.800000
+// stream_00000003_weight 39.000000 tonnes
+// PLOT stream_00000003_bps 1.000000 0.000000
+// PLOT stream_00000003_bps 1.000000 0.400000
+// PLOT stream_00000003_bps 1.000000 0.800000
+//
+// Log *contexts* are names concatenated with '_' between them, with the name
+// of the logged/plotted string/value last. Plot *time* is inherited down the
+// tree. A branch is enabled by default but can be *disabled* to reduce output.
+// The difference between the RTC_LOG and PLOT macros is that PLOT prefixes the
+// line so it can be easily filtered, plus it outputs the current time.
+
+#if !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE)
+
+// Set a thread-global base logging context. This name will be prepended to all
+// hierarchical contexts.
+// `name` is a char*, std::string or uint32_t to name the context.
+#define BWE_TEST_LOGGING_GLOBAL_CONTEXT(name)
+
+// Thread-globally allow/disallow logging.
+// `enable` is expected to be a bool.
+#define BWE_TEST_LOGGING_GLOBAL_ENABLE(enabled)
+
+// Insert a (hierarchical) logging context.
+// `name` is a char*, std::string or uint32_t to name the context.
+#define BWE_TEST_LOGGING_CONTEXT(name)
+
+// Allow/disallow logging down the call tree from this point. Logging must be
+// enabled all the way to the root of the call tree to take place.
+// `enable` is expected to be a bool.
+#define BWE_TEST_LOGGING_ENABLE(enabled)
+
+// Set current time (only affects PLOT output). Down the call tree, the latest
+// time set always takes precedence.
+// `time` is an int64_t time in ms, or -1 to inherit time from previous context.
+#define BWE_TEST_LOGGING_TIME(time)
+
+// Print to stdout, e.g.:
+// Context1_Context2_Name printf-formated-string
+// `name` is a char*, std::string or uint32_t to name the log line.
+// `format` is a printf format string.
+// |_1...| are arguments for printf.
+#define BWE_TEST_LOGGING_LOG1(name, format, _1)
+#define BWE_TEST_LOGGING_LOG2(name, format, _1, _2)
+#define BWE_TEST_LOGGING_LOG3(name, format, _1, _2, _3)
+#define BWE_TEST_LOGGING_LOG4(name, format, _1, _2, _3, _4)
+#define BWE_TEST_LOGGING_LOG5(name, format, _1, _2, _3, _4, _5)
+
+// Print to stdout in tab-separated format suitable for plotting, e.g.:
+// PLOT figure Context1_Context2_Name time value
+// `figure` is a figure id. Different figures are plotted in different windows.
+// `name` is a char*, std::string or uint32_t to name the plotted value.
+// `time` is an int64_t time in ms, or -1 to inherit time from previous context.
+// `value` is a double precision float to be plotted.
+// `ssrc` identifies the source of a stream
+// `alg_name` is an optional argument, a string
+#define BWE_TEST_LOGGING_PLOT(figure, name, time, value)
+#define BWE_TEST_LOGGING_PLOT_WITH_NAME(figure, name, time, value, alg_name)
+#define BWE_TEST_LOGGING_PLOT_WITH_SSRC(figure, name, time, value, ssrc)
+#define BWE_TEST_LOGGING_PLOT_WITH_NAME_AND_SSRC(figure, name, time, value, \
+ ssrc, alg_name)
+
+// Print to stdout in tab-separated format suitable for plotting, e.g.:
+// BAR figure Context1_Context2_Name x_left width value
+// `figure` is a figure id. Different figures are plotted in different windows.
+// `name` is a char*, std::string or uint32_t to name the plotted value.
+// `value` is a double precision float to be plotted.
+// `ylow` and `yhigh` are double precision float for the error line.
+// `title` is a string and refers to the error label.
+// `ymax` is a double precision float for the limit horizontal line.
+// `limit_title` is a string and refers to the limit label.
+#define BWE_TEST_LOGGING_BAR(figure, name, value, flow_id)
+#define BWE_TEST_LOGGING_ERRORBAR(figure, name, value, ylow, yhigh, \
+ error_title, flow_id)
+#define BWE_TEST_LOGGING_LIMITERRORBAR( \
+ figure, name, value, ylow, yhigh, error_title, ymax, limit_title, flow_id)
+
+#define BWE_TEST_LOGGING_BASELINEBAR(figure, name, value, flow_id)
+
+// `num_flows` is an integer refering to the number of RMCAT flows in the
+// scenario.
+// Define `x_label` and `y_label` for plots.
+#define BWE_TEST_LOGGING_LABEL(figure, x_label, y_label, num_flows)
+
+#else // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+
+#include <map>
+#include <memory>
+#include <stack>
+#include <string>
+
+#include "rtc_base/synchronization/mutex.h"
+
+#define BWE_TEST_LOGGING_GLOBAL_CONTEXT(name) \
+ do { \
+ webrtc::testing::bwe::Logging::GetInstance()->SetGlobalContext(name); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_GLOBAL_ENABLE(enabled) \
+ do { \
+ webrtc::testing::bwe::Logging::GetInstance()->SetGlobalEnable(enabled); \
+ } while (0)
+
+#define __BWE_TEST_LOGGING_CONTEXT_NAME(ctx, line) ctx##line
+#define __BWE_TEST_LOGGING_CONTEXT_DECLARE(ctx, line, name, time, enabled) \
+ webrtc::testing::bwe::Logging::Context __BWE_TEST_LOGGING_CONTEXT_NAME( \
+ ctx, line)(name, time, enabled)
+
+#define BWE_TEST_LOGGING_CONTEXT(name) \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __LINE__, name, -1, true)
+#define BWE_TEST_LOGGING_ENABLE(enabled) \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __LINE__, "", -1, \
+ static_cast<bool>(enabled))
+#define BWE_TEST_LOGGING_TIME(time) \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __LINE__, "", \
+ static_cast<int64_t>(time), true)
+
+#define BWE_TEST_LOGGING_LOG1(name, format, _1) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1); \
+ } while (0)
+#define BWE_TEST_LOGGING_LOG2(name, format, _1, _2) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2); \
+ } while (0)
+#define BWE_TEST_LOGGING_LOG3(name, format, _1, _2, _3) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2, _3); \
+ } while (0)
+#define BWE_TEST_LOGGING_LOG4(name, format, _1, _2, _3, _4) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2, _3, _4); \
+ } while (0)
+#define BWE_TEST_LOGGING_LOG5(name, format, _1, _2, _3, _4, _5) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2, _3, _4, \
+ _5); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_PLOT(figure, name, time, value) \
+ do { \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
+ static_cast<int64_t>(time), true); \
+ webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, name, value); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_PLOT_WITH_NAME(figure, name, time, value, alg_name) \
+ do { \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
+ static_cast<int64_t>(time), true); \
+ webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, name, value, \
+ alg_name); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_PLOT_WITH_SSRC(figure, name, time, value, ssrc) \
+ do { \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
+ static_cast<int64_t>(time), true); \
+ webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, name, value, \
+ ssrc); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_PLOT_WITH_NAME_AND_SSRC(figure, name, time, value, \
+ ssrc, alg_name) \
+ do { \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
+ static_cast<int64_t>(time), true); \
+ webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, name, value, \
+ ssrc, alg_name); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_BAR(figure, name, value, flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotBar(figure, name, value, \
+ flow_id); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_BASELINEBAR(figure, name, value, flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotBaselineBar( \
+ figure, name, value, flow_id); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_ERRORBAR(figure, name, value, ylow, yhigh, title, \
+ flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotErrorBar( \
+ figure, name, value, ylow, yhigh, title, flow_id); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_LIMITERRORBAR( \
+ figure, name, value, ylow, yhigh, error_title, ymax, limit_title, flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotLimitErrorBar( \
+ figure, name, value, ylow, yhigh, error_title, ymax, limit_title, \
+ flow_id); \
+ } while (0)
+
+#define BWE_TEST_LOGGING_LABEL(figure, title, y_label, num_flows) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(title); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotLabel( \
+ figure, title, y_label, num_flows); \
+ } while (0)
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+class Logging {
+ public:
+ class Context {
+ public:
+ Context(uint32_t name, int64_t timestamp_ms, bool enabled);
+ Context(const std::string& name, int64_t timestamp_ms, bool enabled);
+ Context(const char* name, int64_t timestamp_ms, bool enabled);
+
+ Context() = delete;
+ Context(const Context&) = delete;
+ Context& operator=(const Context&) = delete;
+ ~Context();
+ };
+
+ static Logging* GetInstance();
+
+ void SetGlobalContext(uint32_t name);
+ void SetGlobalContext(const std::string& name);
+ void SetGlobalContext(const char* name);
+ void SetGlobalEnable(bool enabled);
+
+#if defined(__GNUC__)
+ // Note: Implicit `this` argument counts as the first argument.
+ __attribute__((__format__(__printf__, 2, 3)))
+#endif
+ void
+ Log(const char format[], ...);
+ void Plot(int figure, const std::string& name, double value);
+ void Plot(int figure,
+ const std::string& name,
+ double value,
+ const std::string& alg_name);
+ void Plot(int figure, const std::string& name, double value, uint32_t ssrc);
+ void Plot(int figure,
+ const std::string& name,
+ double value,
+ uint32_t ssrc,
+ const std::string& alg_name);
+ void PlotBar(int figure, const std::string& name, double value, int flow_id);
+ void PlotBaselineBar(int figure,
+ const std::string& name,
+ double value,
+ int flow_id);
+ void PlotErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ int flow_id);
+
+ void PlotLimitErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ double ymax,
+ const std::string& limit_title,
+ int flow_id);
+ void PlotLabel(int figure,
+ const std::string& title,
+ const std::string& y_label,
+ int num_flows);
+
+ private:
+ struct State {
+ State();
+ State(const std::string& new_tag, int64_t timestamp_ms, bool enabled);
+ void MergePrevious(const State& previous);
+
+ std::string tag;
+ int64_t timestamp_ms;
+ bool enabled;
+ };
+ struct ThreadState {
+ ThreadState();
+ ~ThreadState();
+ State global_state;
+ std::stack<State> stack;
+ };
+ typedef std::map<uint32_t, ThreadState> ThreadMap;
+
+ Logging();
+ ~Logging();
+
+ Logging(const Logging&) = delete;
+ Logging& operator=(const Logging&) = delete;
+
+ void PushState(const std::string& append_to_tag,
+ int64_t timestamp_ms,
+ bool enabled);
+ void PopState();
+
+ Mutex mutex_;
+ ThreadMap thread_map_;
+};
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
+
+#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_BWE_TEST_LOGGING_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
new file mode 100644
index 0000000000..403f81fd03
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/remote_bitrate_estimator/tools/bwe_rtp.h"
+
+#include <stdio.h>
+
+#include <set>
+#include <sstream>
+#include <string>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "test/rtp_file_reader.h"
+
+ABSL_FLAG(std::string,
+ extension_type,
+ "abs",
+ "Extension type, either abs for absolute send time or tsoffset "
+ "for timestamp offset.");
+std::string ExtensionType() {
+ return absl::GetFlag(FLAGS_extension_type);
+}
+
+ABSL_FLAG(int, extension_id, 3, "Extension id.");
+int ExtensionId() {
+ return absl::GetFlag(FLAGS_extension_id);
+}
+
+ABSL_FLAG(std::string, input_file, "", "Input file.");
+std::string InputFile() {
+ return absl::GetFlag(FLAGS_input_file);
+}
+
+ABSL_FLAG(std::string,
+ ssrc_filter,
+ "",
+ "Comma-separated list of SSRCs in hexadecimal which are to be "
+ "used as input to the BWE (only applicable to pcap files).");
+std::set<uint32_t> SsrcFilter() {
+ std::string ssrc_filter_string = absl::GetFlag(FLAGS_ssrc_filter);
+ if (ssrc_filter_string.empty())
+ return std::set<uint32_t>();
+ std::stringstream ss;
+ std::string ssrc_filter = ssrc_filter_string;
+ std::set<uint32_t> ssrcs;
+
+ // Parse the ssrcs in hexadecimal format.
+ ss << std::hex << ssrc_filter;
+ uint32_t ssrc;
+ while (ss >> ssrc) {
+ ssrcs.insert(ssrc);
+ ss.ignore(1, ',');
+ }
+ return ssrcs;
+}
+
+bool ParseArgsAndSetupRtpReader(
+ int argc,
+ char** argv,
+ std::unique_ptr<webrtc::test::RtpFileReader>& rtp_reader,
+ webrtc::RtpHeaderExtensionMap& rtp_header_extensions) {
+ absl::ParseCommandLine(argc, argv);
+ std::string filename = InputFile();
+
+ std::set<uint32_t> ssrc_filter = SsrcFilter();
+ fprintf(stderr, "Filter on SSRC: ");
+ for (auto& s : ssrc_filter) {
+ fprintf(stderr, "0x%08x, ", s);
+ }
+ fprintf(stderr, "\n");
+ if (filename.substr(filename.find_last_of('.')) == ".pcap") {
+ fprintf(stderr, "Opening as pcap\n");
+ rtp_reader.reset(webrtc::test::RtpFileReader::Create(
+ webrtc::test::RtpFileReader::kPcap, filename.c_str(), SsrcFilter()));
+ } else {
+ fprintf(stderr, "Opening as rtp\n");
+ rtp_reader.reset(webrtc::test::RtpFileReader::Create(
+ webrtc::test::RtpFileReader::kRtpDump, filename.c_str()));
+ }
+ if (!rtp_reader) {
+ fprintf(stderr, "Cannot open input file %s\n", filename.c_str());
+ return false;
+ }
+ fprintf(stderr, "Input file: %s\n\n", filename.c_str());
+
+ webrtc::RTPExtensionType extension = webrtc::kRtpExtensionAbsoluteSendTime;
+ if (ExtensionType() == "tsoffset") {
+ extension = webrtc::kRtpExtensionTransmissionTimeOffset;
+ fprintf(stderr, "Extension: toffset\n");
+ } else if (ExtensionType() == "abs") {
+ fprintf(stderr, "Extension: abs\n");
+ } else {
+ fprintf(stderr, "Unknown extension type\n");
+ return false;
+ }
+
+ rtp_header_extensions.RegisterByType(ExtensionId(), extension);
+
+ return true;
+}
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h b/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h
new file mode 100644
index 0000000000..3b161db37b
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_
+#define MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_
+
+#include <memory>
+
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "test/rtp_file_reader.h"
+
+bool ParseArgsAndSetupRtpReader(
+ int argc,
+ char** argv,
+ std::unique_ptr<webrtc::test::RtpFileReader>& rtp_reader,
+ webrtc::RtpHeaderExtensionMap& rtp_header_extensions);
+
+#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_
diff --git a/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc b/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
new file mode 100644
index 0000000000..e8dc59f740
--- /dev/null
+++ b/third_party/libwebrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "modules/remote_bitrate_estimator/tools/bwe_rtp.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/rtp_file_reader.h"
+
+int main(int argc, char* argv[]) {
+ std::unique_ptr<webrtc::test::RtpFileReader> reader;
+ webrtc::RtpHeaderExtensionMap rtp_header_extensions;
+ if (!ParseArgsAndSetupRtpReader(argc, argv, reader, rtp_header_extensions)) {
+ return -1;
+ }
+
+ bool arrival_time_only = (argc >= 5 && strncmp(argv[4], "-t", 2) == 0);
+
+ fprintf(stdout,
+ "seqnum timestamp ts_offset abs_sendtime recvtime "
+ "markerbit ssrc size original_size\n");
+ int packet_counter = 0;
+ int non_zero_abs_send_time = 0;
+ int non_zero_ts_offsets = 0;
+ webrtc::test::RtpPacket packet;
+ while (reader->NextPacket(&packet)) {
+ webrtc::RtpPacket header(&rtp_header_extensions);
+ header.Parse(packet.data, packet.length);
+ uint32_t abs_send_time = 0;
+ if (header.GetExtension<webrtc::AbsoluteSendTime>(&abs_send_time) &&
+ abs_send_time != 0)
+ ++non_zero_abs_send_time;
+ int32_t toffset = 0;
+ if (header.GetExtension<webrtc::TransmissionOffset>(&toffset) &&
+ toffset != 0)
+ ++non_zero_ts_offsets;
+ if (arrival_time_only) {
+ rtc::StringBuilder ss;
+ ss << static_cast<int64_t>(packet.time_ms) * 1000000;
+ fprintf(stdout, "%s\n", ss.str().c_str());
+ } else {
+ fprintf(stdout, "%u %u %d %u %u %d %u %zu %zu\n", header.SequenceNumber(),
+ header.Timestamp(), toffset, abs_send_time, packet.time_ms,
+ header.Marker(), header.Ssrc(), packet.length,
+ packet.original_length);
+ }
+ ++packet_counter;
+ }
+ fprintf(stderr, "Parsed %d packets\n", packet_counter);
+ fprintf(stderr, "Packets with non-zero absolute send time: %d\n",
+ non_zero_abs_send_time);
+ fprintf(stderr, "Packets with non-zero timestamp offset: %d\n",
+ non_zero_ts_offsets);
+ return 0;
+}
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn b/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn
new file mode 100644
index 0000000000..b49e0633f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn
@@ -0,0 +1,673 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("rtp_rtcp_format") {
+ visibility = [ "*" ]
+ public = [
+ "include/report_block_data.h",
+ "include/rtcp_statistics.h",
+ "include/rtp_cvo.h",
+ "include/rtp_header_extension_map.h",
+ "include/rtp_packet_sender.h",
+ "include/rtp_rtcp_defines.h",
+ "source/byte_io.h",
+ "source/rtcp_packet.h",
+ "source/rtcp_packet/app.h",
+ "source/rtcp_packet/bye.h",
+ "source/rtcp_packet/common_header.h",
+ "source/rtcp_packet/compound_packet.h",
+ "source/rtcp_packet/dlrr.h",
+ "source/rtcp_packet/extended_reports.h",
+ "source/rtcp_packet/fir.h",
+ "source/rtcp_packet/loss_notification.h",
+ "source/rtcp_packet/nack.h",
+ "source/rtcp_packet/pli.h",
+ "source/rtcp_packet/psfb.h",
+ "source/rtcp_packet/rapid_resync_request.h",
+ "source/rtcp_packet/receiver_report.h",
+ "source/rtcp_packet/remb.h",
+ "source/rtcp_packet/remote_estimate.h",
+ "source/rtcp_packet/report_block.h",
+ "source/rtcp_packet/rrtr.h",
+ "source/rtcp_packet/rtpfb.h",
+ "source/rtcp_packet/sdes.h",
+ "source/rtcp_packet/sender_report.h",
+ "source/rtcp_packet/target_bitrate.h",
+ "source/rtcp_packet/tmmb_item.h",
+ "source/rtcp_packet/tmmbn.h",
+ "source/rtcp_packet/tmmbr.h",
+ "source/rtcp_packet/transport_feedback.h",
+ "source/rtp_dependency_descriptor_extension.h",
+ "source/rtp_generic_frame_descriptor.h",
+ "source/rtp_generic_frame_descriptor_extension.h",
+ "source/rtp_header_extensions.h",
+ "source/rtp_packet.h",
+ "source/rtp_packet_received.h",
+ "source/rtp_packet_to_send.h",
+ "source/rtp_util.h",
+ "source/rtp_video_layers_allocation_extension.h",
+ ]
+ sources = [
+ "include/report_block_data.cc",
+ "include/rtp_rtcp_defines.cc",
+ "source/rtcp_packet.cc",
+ "source/rtcp_packet/app.cc",
+ "source/rtcp_packet/bye.cc",
+ "source/rtcp_packet/common_header.cc",
+ "source/rtcp_packet/compound_packet.cc",
+ "source/rtcp_packet/dlrr.cc",
+ "source/rtcp_packet/extended_reports.cc",
+ "source/rtcp_packet/fir.cc",
+ "source/rtcp_packet/loss_notification.cc",
+ "source/rtcp_packet/nack.cc",
+ "source/rtcp_packet/pli.cc",
+ "source/rtcp_packet/psfb.cc",
+ "source/rtcp_packet/rapid_resync_request.cc",
+ "source/rtcp_packet/receiver_report.cc",
+ "source/rtcp_packet/remb.cc",
+ "source/rtcp_packet/remote_estimate.cc",
+ "source/rtcp_packet/report_block.cc",
+ "source/rtcp_packet/rrtr.cc",
+ "source/rtcp_packet/rtpfb.cc",
+ "source/rtcp_packet/sdes.cc",
+ "source/rtcp_packet/sender_report.cc",
+ "source/rtcp_packet/target_bitrate.cc",
+ "source/rtcp_packet/tmmb_item.cc",
+ "source/rtcp_packet/tmmbn.cc",
+ "source/rtcp_packet/tmmbr.cc",
+ "source/rtcp_packet/transport_feedback.cc",
+ "source/rtp_dependency_descriptor_extension.cc",
+ "source/rtp_dependency_descriptor_reader.cc",
+ "source/rtp_dependency_descriptor_reader.h",
+ "source/rtp_dependency_descriptor_writer.cc",
+ "source/rtp_dependency_descriptor_writer.h",
+ "source/rtp_generic_frame_descriptor.cc",
+ "source/rtp_generic_frame_descriptor_extension.cc",
+ "source/rtp_header_extension_map.cc",
+ "source/rtp_header_extensions.cc",
+ "source/rtp_packet.cc",
+ "source/rtp_packet_received.cc",
+ "source/rtp_packet_to_send.cc",
+ "source/rtp_util.cc",
+ "source/rtp_video_layers_allocation_extension.cc",
+ ]
+
+ deps = [
+ "..:module_api_public",
+ "../../api:array_view",
+ "../../api:function_view",
+ "../../api:refcountedbase",
+ "../../api:rtp_headers",
+ "../../api:rtp_parameters",
+ "../../api:scoped_refptr",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/transport:network_control",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:video_frame",
+ "../../api/video:video_layers_allocation",
+ "../../api/video:video_rtp_headers",
+ "../../common_video",
+ "../../rtc_base:bit_buffer",
+ "../../rtc_base:bitstream_reader",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:divide_round",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../system_wrappers",
+ "../video_coding:codec_globals_headers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("rtp_rtcp") {
+ visibility = [ "*" ]
+ sources = [
+ "include/flexfec_receiver.h",
+ "include/flexfec_sender.h",
+ "include/receive_statistics.h",
+ "include/remote_ntp_time_estimator.h",
+ "source/absolute_capture_time_interpolator.cc",
+ "source/absolute_capture_time_interpolator.h",
+ "source/absolute_capture_time_sender.cc",
+ "source/absolute_capture_time_sender.h",
+ "source/active_decode_targets_helper.cc",
+ "source/active_decode_targets_helper.h",
+ "source/capture_clock_offset_updater.cc",
+ "source/capture_clock_offset_updater.h",
+ "source/create_video_rtp_depacketizer.cc",
+ "source/create_video_rtp_depacketizer.h",
+ "source/dtmf_queue.cc",
+ "source/dtmf_queue.h",
+ "source/fec_private_tables_bursty.cc",
+ "source/fec_private_tables_bursty.h",
+ "source/fec_private_tables_random.cc",
+ "source/fec_private_tables_random.h",
+ "source/flexfec_header_reader_writer.cc",
+ "source/flexfec_header_reader_writer.h",
+ "source/flexfec_receiver.cc",
+ "source/flexfec_sender.cc",
+ "source/forward_error_correction.cc",
+ "source/forward_error_correction.h",
+ "source/forward_error_correction_internal.cc",
+ "source/forward_error_correction_internal.h",
+ "source/packet_loss_stats.cc",
+ "source/packet_loss_stats.h",
+ "source/packet_sequencer.cc",
+ "source/packet_sequencer.h",
+ "source/receive_statistics_impl.cc",
+ "source/receive_statistics_impl.h",
+ "source/remote_ntp_time_estimator.cc",
+ "source/rtcp_nack_stats.cc",
+ "source/rtcp_nack_stats.h",
+ "source/rtcp_receiver.cc",
+ "source/rtcp_receiver.h",
+ "source/rtcp_sender.cc",
+ "source/rtcp_sender.h",
+ "source/rtp_descriptor_authentication.cc",
+ "source/rtp_descriptor_authentication.h",
+ "source/rtp_format.cc",
+ "source/rtp_format.h",
+ "source/rtp_format_h264.cc",
+ "source/rtp_format_h264.h",
+ "source/rtp_format_video_generic.cc",
+ "source/rtp_format_video_generic.h",
+ "source/rtp_format_vp8.cc",
+ "source/rtp_format_vp8.h",
+ "source/rtp_format_vp9.cc",
+ "source/rtp_format_vp9.h",
+ "source/rtp_header_extension_size.cc",
+ "source/rtp_header_extension_size.h",
+ "source/rtp_packet_history.cc",
+ "source/rtp_packet_history.h",
+ "source/rtp_packetizer_av1.cc",
+ "source/rtp_packetizer_av1.h",
+ "source/rtp_rtcp_config.h",
+ "source/rtp_rtcp_impl2.cc",
+ "source/rtp_rtcp_impl2.h",
+ "source/rtp_rtcp_interface.h",
+ "source/rtp_sender.cc",
+ "source/rtp_sender.h",
+ "source/rtp_sender_audio.cc",
+ "source/rtp_sender_audio.h",
+ "source/rtp_sender_egress.cc",
+ "source/rtp_sender_egress.h",
+ "source/rtp_sender_video.cc",
+ "source/rtp_sender_video.h",
+ "source/rtp_sender_video_frame_transformer_delegate.cc",
+ "source/rtp_sender_video_frame_transformer_delegate.h",
+ "source/rtp_sequence_number_map.cc",
+ "source/rtp_sequence_number_map.h",
+ "source/source_tracker.cc",
+ "source/source_tracker.h",
+ "source/time_util.cc",
+ "source/time_util.h",
+ "source/tmmbr_help.cc",
+ "source/tmmbr_help.h",
+ "source/ulpfec_generator.cc",
+ "source/ulpfec_generator.h",
+ "source/ulpfec_header_reader_writer.cc",
+ "source/ulpfec_header_reader_writer.h",
+ "source/ulpfec_receiver.cc",
+ "source/ulpfec_receiver.h",
+ "source/video_fec_generator.h",
+ "source/video_rtp_depacketizer.cc",
+ "source/video_rtp_depacketizer.h",
+ "source/video_rtp_depacketizer_av1.cc",
+ "source/video_rtp_depacketizer_av1.h",
+ "source/video_rtp_depacketizer_generic.cc",
+ "source/video_rtp_depacketizer_generic.h",
+ "source/video_rtp_depacketizer_h264.cc",
+ "source/video_rtp_depacketizer_h264.h",
+ "source/video_rtp_depacketizer_raw.cc",
+ "source/video_rtp_depacketizer_raw.h",
+ "source/video_rtp_depacketizer_vp8.cc",
+ "source/video_rtp_depacketizer_vp8.h",
+ "source/video_rtp_depacketizer_vp9.cc",
+ "source/video_rtp_depacketizer_vp9.h",
+ ]
+
+ if (rtc_enable_bwe_test_logging) {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1" ]
+ } else {
+ defines = [ "BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0" ]
+ }
+
+ deps = [
+ ":rtp_rtcp_format",
+ ":rtp_video_header",
+ "..:module_api_public",
+ "..:module_fec_api",
+ "../../api:array_view",
+ "../../api:field_trials_view",
+ "../../api:frame_transformer_interface",
+ "../../api:function_view",
+ "../../api:libjingle_peerconnection_api",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:rtp_parameters",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api:transport_api",
+ "../../api/audio_codecs:audio_codecs_api",
+ "../../api/crypto:frame_encryptor_interface",
+ "../../api/rtc_event_log",
+ "../../api/task_queue:pending_task_safety_flag",
+ "../../api/task_queue:task_queue",
+ "../../api/transport:field_trial_based_config",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/transport/rtp:rtp_source",
+ "../../api/units:data_rate",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_codec_constants",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_layers_allocation",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call:rtp_interfaces",
+ "../../common_video",
+ "../../logging:rtc_event_audio",
+ "../../logging:rtc_event_rtp_rtcp",
+ "../../modules/audio_coding:audio_coding_module_typedefs",
+ "../../rtc_base:bit_buffer",
+ "../../rtc_base:bitstream_reader",
+ "../../rtc_base:buffer",
+ "../../rtc_base:byte_buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:divide_round",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:gtest_prod",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:mod_ops",
+ "../../rtc_base:one_time_event",
+ "../../rtc_base:race_checker",
+ "../../rtc_base:random",
+ "../../rtc_base:rate_limiter",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:safe_minmax",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/containers:flat_map",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../rtc_base/time:timestamp_extrapolator",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ "../remote_bitrate_estimator",
+ "../video_coding:codec_globals_headers",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "../../api:libjingle_peerconnection_api" ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_source_set("rtp_rtcp_legacy") {
+ sources = [
+ "include/rtp_rtcp.h",
+ "source/deprecated/deprecated_rtp_sender_egress.cc",
+ "source/deprecated/deprecated_rtp_sender_egress.h",
+ "source/rtp_rtcp_impl.cc",
+ "source/rtp_rtcp_impl.h",
+ ]
+ deps = [
+ ":rtp_rtcp",
+ ":rtp_rtcp_format",
+ "..:module_fec_api",
+ "../../api:rtp_headers",
+ "../../api:transport_api",
+ "../../api/rtc_event_log",
+ "../../api/transport:field_trial_based_config",
+ "../../api/units:data_rate",
+ "../../api/units:timestamp",
+ "../../api/video:video_bitrate_allocation",
+ "../../logging:rtc_event_rtp_rtcp",
+ "../../rtc_base:checks",
+ "../../rtc_base:gtest_prod",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../remote_bitrate_estimator",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtcp_transceiver") {
+ visibility = [ "*" ]
+ public = [
+ "source/rtcp_transceiver.h",
+ "source/rtcp_transceiver_config.h",
+ "source/rtcp_transceiver_impl.h",
+ ]
+ sources = [
+ "source/rtcp_transceiver.cc",
+ "source/rtcp_transceiver_config.cc",
+ "source/rtcp_transceiver_impl.cc",
+ ]
+ deps = [
+ ":rtp_rtcp",
+ ":rtp_rtcp_format",
+ "../../api:array_view",
+ "../../api:rtp_headers",
+ "../../api:transport_api",
+ "../../api/task_queue",
+ "../../api/units:data_rate",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:video_bitrate_allocation",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:divide_round",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/containers:flat_map",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/cleanup",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtp_video_header") {
+ visibility = [ "*" ]
+ sources = [
+ "source/rtp_video_header.cc",
+ "source/rtp_video_header.h",
+ ]
+ deps = [
+ "../../api:rtp_headers",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../modules/video_coding:codec_globals_headers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("fec_test_helper") {
+ testonly = true
+ sources = [
+ "source/fec_test_helper.cc",
+ "source/fec_test_helper.h",
+ ]
+ deps = [
+ ":rtp_rtcp",
+ ":rtp_rtcp_format",
+ "../../rtc_base:checks",
+ "../../rtc_base:random",
+ ]
+}
+
+if (rtc_include_tests) {
+rtc_library("mock_rtp_rtcp") {
+ testonly = true
+ public = [
+ "mocks/mock_recovered_packet_receiver.h",
+ "mocks/mock_rtcp_bandwidth_observer.h",
+ "mocks/mock_rtcp_rtt_stats.h",
+ "mocks/mock_rtp_rtcp.h",
+ ]
+ deps = [
+ ":rtp_rtcp",
+ ":rtp_rtcp_format",
+ "../../api/video:video_bitrate_allocation",
+ "../../rtc_base:checks",
+ "../../test:test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+}
+
+rtc_library("rtp_packetizer_av1_test_helper") {
+ testonly = true
+ sources = [
+ "source/rtp_packetizer_av1_test_helper.cc",
+ "source/rtp_packetizer_av1_test_helper.h",
+ ]
+}
+
+if (rtc_include_tests) {
+ if (!build_with_chromium) {
+ rtc_executable("test_packet_masks_metrics") {
+ testonly = true
+
+ sources = [
+ "test/testFec/average_residual_loss_xor_codes.h",
+ "test/testFec/test_packet_masks_metrics.cc",
+ ]
+
+ deps = [
+ ":rtp_rtcp",
+ "../../test:fileutils",
+ "../../test:test_main",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ } # test_packet_masks_metrics
+ }
+
+ rtc_library("rtp_rtcp_modules_tests") {
+ testonly = true
+
+ sources = [ "test/testFec/test_fec.cc" ]
+ deps = [
+ ":rtp_rtcp",
+ ":rtp_rtcp_format",
+ "../../rtc_base:random",
+ "../../test:fileutils",
+ "../../test:test_support",
+ ]
+ }
+
+ rtc_library("rtp_rtcp_unittests") {
+ testonly = true
+
+ sources = [
+ "source/absolute_capture_time_interpolator_unittest.cc",
+ "source/absolute_capture_time_sender_unittest.cc",
+ "source/active_decode_targets_helper_unittest.cc",
+ "source/byte_io_unittest.cc",
+ "source/capture_clock_offset_updater_unittest.cc",
+ "source/fec_private_tables_bursty_unittest.cc",
+ "source/flexfec_header_reader_writer_unittest.cc",
+ "source/flexfec_receiver_unittest.cc",
+ "source/flexfec_sender_unittest.cc",
+ "source/nack_rtx_unittest.cc",
+ "source/packet_loss_stats_unittest.cc",
+ "source/packet_sequencer_unittest.cc",
+ "source/receive_statistics_unittest.cc",
+ "source/remote_ntp_time_estimator_unittest.cc",
+ "source/rtcp_nack_stats_unittest.cc",
+ "source/rtcp_packet/app_unittest.cc",
+ "source/rtcp_packet/bye_unittest.cc",
+ "source/rtcp_packet/common_header_unittest.cc",
+ "source/rtcp_packet/compound_packet_unittest.cc",
+ "source/rtcp_packet/dlrr_unittest.cc",
+ "source/rtcp_packet/extended_reports_unittest.cc",
+ "source/rtcp_packet/fir_unittest.cc",
+ "source/rtcp_packet/loss_notification_unittest.cc",
+ "source/rtcp_packet/nack_unittest.cc",
+ "source/rtcp_packet/pli_unittest.cc",
+ "source/rtcp_packet/rapid_resync_request_unittest.cc",
+ "source/rtcp_packet/receiver_report_unittest.cc",
+ "source/rtcp_packet/remb_unittest.cc",
+ "source/rtcp_packet/remote_estimate_unittest.cc",
+ "source/rtcp_packet/report_block_unittest.cc",
+ "source/rtcp_packet/rrtr_unittest.cc",
+ "source/rtcp_packet/sdes_unittest.cc",
+ "source/rtcp_packet/sender_report_unittest.cc",
+ "source/rtcp_packet/target_bitrate_unittest.cc",
+ "source/rtcp_packet/tmmbn_unittest.cc",
+ "source/rtcp_packet/tmmbr_unittest.cc",
+ "source/rtcp_packet/transport_feedback_unittest.cc",
+ "source/rtcp_packet_unittest.cc",
+ "source/rtcp_receiver_unittest.cc",
+ "source/rtcp_sender_unittest.cc",
+ "source/rtcp_transceiver_impl_unittest.cc",
+ "source/rtcp_transceiver_unittest.cc",
+ "source/rtp_dependency_descriptor_extension_unittest.cc",
+ "source/rtp_fec_unittest.cc",
+ "source/rtp_format_h264_unittest.cc",
+ "source/rtp_format_unittest.cc",
+ "source/rtp_format_video_generic_unittest.cc",
+ "source/rtp_format_vp8_test_helper.cc",
+ "source/rtp_format_vp8_test_helper.h",
+ "source/rtp_format_vp8_unittest.cc",
+ "source/rtp_format_vp9_unittest.cc",
+ "source/rtp_generic_frame_descriptor_extension_unittest.cc",
+ "source/rtp_header_extension_map_unittest.cc",
+ "source/rtp_header_extension_size_unittest.cc",
+ "source/rtp_packet_history_unittest.cc",
+ "source/rtp_packet_unittest.cc",
+ "source/rtp_packetizer_av1_unittest.cc",
+ "source/rtp_rtcp_impl2_unittest.cc",
+ "source/rtp_rtcp_impl_unittest.cc",
+ "source/rtp_sender_audio_unittest.cc",
+ "source/rtp_sender_egress_unittest.cc",
+ "source/rtp_sender_unittest.cc",
+ "source/rtp_sender_video_unittest.cc",
+ "source/rtp_sequence_number_map_unittest.cc",
+ "source/rtp_util_unittest.cc",
+ "source/rtp_video_layers_allocation_extension_unittest.cc",
+ "source/source_tracker_unittest.cc",
+ "source/time_util_unittest.cc",
+ "source/ulpfec_generator_unittest.cc",
+ "source/ulpfec_header_reader_writer_unittest.cc",
+ "source/ulpfec_receiver_unittest.cc",
+ "source/video_rtp_depacketizer_av1_unittest.cc",
+ "source/video_rtp_depacketizer_generic_unittest.cc",
+ "source/video_rtp_depacketizer_h264_unittest.cc",
+ "source/video_rtp_depacketizer_raw_unittest.cc",
+ "source/video_rtp_depacketizer_vp8_unittest.cc",
+ "source/video_rtp_depacketizer_vp9_unittest.cc",
+ ]
+ deps = [
+ ":fec_test_helper",
+ ":mock_rtp_rtcp",
+ ":rtcp_transceiver",
+ ":rtp_packetizer_av1_test_helper",
+ ":rtp_rtcp",
+ ":rtp_rtcp_format",
+ ":rtp_rtcp_legacy",
+ "../../api:array_view",
+ "../../api:create_time_controller",
+ "../../api:libjingle_peerconnection_api",
+ "../../api:mock_frame_encryptor",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:rtp_parameters",
+ "../../api:scoped_refptr",
+ "../../api:time_controller",
+ "../../api:transport_api",
+ "../../api/rtc_event_log",
+ "../../api/transport:field_trial_based_config",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/units:data_rate",
+ "../../api/units:data_size",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_codec_constants",
+ "../../api/video:video_frame",
+ "../../api/video:video_layers_allocation",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call:rtp_receiver",
+ "../../common_video",
+ "../../common_video/generic_frame_descriptor",
+ "../../common_video/test:utilities",
+ "../../logging:mocks",
+ "../../rtc_base:bit_buffer",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:random",
+ "../../rtc_base:rate_limiter",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:threading",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers",
+ "../../test:field_trial",
+ "../../test:mock_frame_transformer",
+ "../../test:mock_transport",
+ "../../test:rtp_test_utils",
+ "../../test:run_loop",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ "../../test/time_controller:time_controller",
+ "../video_coding:codec_globals_headers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/DEPS b/third_party/libwebrtc/modules/rtp_rtcp/DEPS
new file mode 100644
index 0000000000..3eec1ca90d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/DEPS
@@ -0,0 +1,8 @@
+include_rules = [
+ "+call",
+ "+common_video",
+ "+logging/rtc_event_log",
+ "+system_wrappers",
+ # Avoid directly using field_trial. Instead use FieldTrialsView.
+ "-system_wrappers/include/field_trial.h",
+]
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/OWNERS b/third_party/libwebrtc/modules/rtp_rtcp/OWNERS
new file mode 100644
index 0000000000..47d12c401f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/OWNERS
@@ -0,0 +1,6 @@
+stefan@webrtc.org
+henrik.lundin@webrtc.org
+mflodman@webrtc.org
+asapersson@webrtc.org
+danilchap@webrtc.org
+sprang@webrtc.org
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_receiver.h b/third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_receiver.h
new file mode 100644
index 0000000000..fe872b3b1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_receiver.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_FLEXFEC_RECEIVER_H_
+#define MODULES_RTP_RTCP_INCLUDE_FLEXFEC_RECEIVER_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/ulpfec_receiver.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+
+class FlexfecReceiver {
+ public:
+ /* Mozilla: Avoid this since it could use GetRealTimeClock().
+ FlexfecReceiver(uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ RecoveredPacketReceiver* recovered_packet_receiver);
+ */
+ FlexfecReceiver(Clock* clock,
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ RecoveredPacketReceiver* recovered_packet_receiver);
+ ~FlexfecReceiver();
+
+ // Inserts a received packet (can be either media or FlexFEC) into the
+ // internal buffer, and sends the received packets to the erasure code.
+ // All newly recovered packets are sent back through the callback.
+ void OnRtpPacket(const RtpPacketReceived& packet);
+
+ // Returns a counter describing the added and recovered packets.
+ FecPacketCounter GetPacketCounter() const;
+
+ // Protected to aid testing.
+ protected:
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> AddReceivedPacket(
+ const RtpPacketReceived& packet);
+ void ProcessReceivedPacket(
+ const ForwardErrorCorrection::ReceivedPacket& received_packet);
+
+ private:
+ // Config.
+ const uint32_t ssrc_;
+ const uint32_t protected_media_ssrc_;
+
+ // Erasure code interfacing and callback.
+ std::unique_ptr<ForwardErrorCorrection> erasure_code_
+ RTC_GUARDED_BY(sequence_checker_);
+ ForwardErrorCorrection::RecoveredPacketList recovered_packets_
+ RTC_GUARDED_BY(sequence_checker_);
+ RecoveredPacketReceiver* const recovered_packet_receiver_;
+
+ // Logging and stats.
+ Clock* const clock_;
+ int64_t last_recovered_packet_ms_ RTC_GUARDED_BY(sequence_checker_);
+ FecPacketCounter packet_counter_ RTC_GUARDED_BY(sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_FLEXFEC_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_sender.h b/third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_sender.h
new file mode 100644
index 0000000000..f0acfe6c3d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/flexfec_sender.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_FLEXFEC_SENDER_H_
+#define MODULES_RTP_RTCP_INCLUDE_FLEXFEC_SENDER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/rtp_parameters.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_header_extension_size.h"
+#include "modules/rtp_rtcp/source/ulpfec_generator.h"
+#include "modules/rtp_rtcp/source/video_fec_generator.h"
+#include "rtc_base/random.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class Clock;
+class RtpPacketToSend;
+
+// Note that this class is not thread safe, and thus requires external
+// synchronization. Currently, this is done using the lock in PayloadRouter.
+
+class FlexfecSender : public VideoFecGenerator {
+ public:
+ FlexfecSender(int payload_type,
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ absl::string_view mid,
+ const std::vector<RtpExtension>& rtp_header_extensions,
+ rtc::ArrayView<const RtpExtensionSize> extension_sizes,
+ const RtpState* rtp_state,
+ Clock* clock);
+ ~FlexfecSender();
+
+ FecType GetFecType() const override {
+ return VideoFecGenerator::FecType::kFlexFec;
+ }
+ absl::optional<uint32_t> FecSsrc() override { return ssrc_; }
+
+ // Sets the FEC rate, max frames sent before FEC packets are sent,
+ // and what type of generator matrices are used.
+ void SetProtectionParameters(const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) override;
+
+ // Adds a media packet to the internal buffer. When enough media packets
+ // have been added, the FEC packets are generated and stored internally.
+ // These FEC packets are then obtained by calling GetFecPackets().
+ void AddPacketAndGenerateFec(const RtpPacketToSend& packet) override;
+
+ // Returns generated FlexFEC packets.
+ std::vector<std::unique_ptr<RtpPacketToSend>> GetFecPackets() override;
+
+ // Returns the overhead, per packet, for FlexFEC.
+ size_t MaxPacketOverhead() const override;
+
+ DataRate CurrentFecRate() const override;
+
+ // Only called on the VideoSendStream queue, after operation has shut down.
+ absl::optional<RtpState> GetRtpState() override;
+
+ private:
+ // Utility.
+ Clock* const clock_;
+ Random random_;
+ int64_t last_generated_packet_ms_;
+
+ // Config.
+ const int payload_type_;
+ const uint32_t timestamp_offset_;
+ const uint32_t ssrc_;
+ const uint32_t protected_media_ssrc_;
+ // MID value to send in the MID header extension.
+ const std::string mid_;
+ // Sequence number of next packet to generate.
+ uint16_t seq_num_;
+
+ // Implementation.
+ UlpfecGenerator ulpfec_generator_;
+ const RtpHeaderExtensionMap rtp_header_extension_map_;
+ const size_t header_extensions_size_;
+
+ mutable Mutex mutex_;
+ RateStatistics fec_bitrate_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_FLEXFEC_SENDER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/receive_statistics.h b/third_party/libwebrtc/modules/rtp_rtcp/include/receive_statistics.h
new file mode 100644
index 0000000000..827fd3a7a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/receive_statistics.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
+#define MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+
+namespace webrtc {
+
+class Clock;
+
+class ReceiveStatisticsProvider {
+ public:
+ virtual ~ReceiveStatisticsProvider() = default;
+ // Collects receive statistic in a form of rtcp report blocks.
+ // Returns at most `max_blocks` report blocks.
+ virtual std::vector<rtcp::ReportBlock> RtcpReportBlocks(
+ size_t max_blocks) = 0;
+};
+
+class StreamStatistician {
+ public:
+ virtual ~StreamStatistician();
+
+ virtual RtpReceiveStats GetStats() const = 0;
+
+ // Returns average over the stream life time.
+ virtual absl::optional<int> GetFractionLostInPercent() const = 0;
+
+ // TODO(bugs.webrtc.org/10679): Delete, migrate users to the above GetStats
+ // method (and extend RtpReceiveStats if needed).
+ // Gets receive stream data counters.
+ virtual StreamDataCounters GetReceiveStreamDataCounters() const = 0;
+
+ virtual uint32_t BitrateReceived() const = 0;
+};
+
+class ReceiveStatistics : public ReceiveStatisticsProvider,
+ public RtpPacketSinkInterface {
+ public:
+ ~ReceiveStatistics() override = default;
+
+ // Returns a thread-safe instance of ReceiveStatistics.
+ // https://chromium.googlesource.com/chromium/src/+/lkgr/docs/threading_and_tasks.md#threading-lexicon
+ static std::unique_ptr<ReceiveStatistics> Create(Clock* clock);
+ // Returns a thread-compatible instance of ReceiveStatistics.
+ static std::unique_ptr<ReceiveStatistics> CreateThreadCompatible(
+ Clock* clock);
+
+ // Returns a pointer to the statistician of an ssrc.
+ virtual StreamStatistician* GetStatistician(uint32_t ssrc) const = 0;
+
+ // TODO(bugs.webrtc.org/10669): Deprecated, delete as soon as downstream
+ // projects are updated. This method sets the max reordering threshold of all
+ // current and future streams.
+ virtual void SetMaxReorderingThreshold(int max_reordering_threshold) = 0;
+
+ // Sets the max reordering threshold in number of packets.
+ virtual void SetMaxReorderingThreshold(uint32_t ssrc,
+ int max_reordering_threshold) = 0;
+ // Detect retransmissions, enabling updates of the retransmitted counters. The
+ // default is false.
+ virtual void EnableRetransmitDetection(uint32_t ssrc, bool enable) = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h b/third_party/libwebrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h
new file mode 100644
index 0000000000..e45ee39530
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_REMOTE_NTP_TIME_ESTIMATOR_H_
+#define MODULES_RTP_RTCP_INCLUDE_REMOTE_NTP_TIME_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/numerics/moving_median_filter.h"
+#include "system_wrappers/include/rtp_to_ntp_estimator.h"
+
+namespace webrtc {
+
+class Clock;
+
+// RemoteNtpTimeEstimator can be used to estimate a given RTP timestamp's NTP
+// time in local timebase.
+// Note that it needs to be trained with at least 2 RTCP SR (by calling
+// `UpdateRtcpTimestamp`) before it can be used.
+class RemoteNtpTimeEstimator {
+ public:
+ explicit RemoteNtpTimeEstimator(Clock* clock);
+ RemoteNtpTimeEstimator(const RemoteNtpTimeEstimator&) = delete;
+ RemoteNtpTimeEstimator& operator=(const RemoteNtpTimeEstimator&) = delete;
+ ~RemoteNtpTimeEstimator() = default;
+
+ // Updates the estimator with round trip time `rtt` and
+ // new NTP time <-> RTP timestamp mapping from an RTCP sender report.
+ bool UpdateRtcpTimestamp(TimeDelta rtt,
+ NtpTime sender_send_time,
+ uint32_t rtp_timestamp);
+
+ // Estimates the NTP timestamp in local timebase from `rtp_timestamp`.
+ // Returns the NTP timestamp in ms when success. -1 if failed.
+ int64_t Estimate(uint32_t rtp_timestamp) {
+ NtpTime ntp_time = EstimateNtp(rtp_timestamp);
+ if (!ntp_time.Valid()) {
+ return -1;
+ }
+ return ntp_time.ToMs();
+ }
+
+ // Estimates the NTP timestamp in local timebase from `rtp_timestamp`.
+ // Returns invalid NtpTime (i.e. NtpTime(0)) on failure.
+ NtpTime EstimateNtp(uint32_t rtp_timestamp);
+
+ // Estimates the offset between the remote clock and the
+ // local one. This is equal to local NTP clock - remote NTP clock.
+ // The offset is returned in ntp time resolution, i.e. 1/2^32 sec ~= 0.2 ns.
+ // Returns nullopt on failure.
+ absl::optional<int64_t> EstimateRemoteToLocalClockOffset();
+
+ private:
+ Clock* clock_;
+ // Offset is measured with the same precision as NtpTime: in 1/2^32 seconds ~=
+ // 0.2 ns.
+ MovingMedianFilter<int64_t> ntp_clocks_offset_estimator_;
+ RtpToNtpEstimator rtp_to_ntp_;
+ Timestamp last_timing_log_ = Timestamp::MinusInfinity();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_REMOTE_NTP_TIME_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.cc b/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.cc
new file mode 100644
index 0000000000..ec4d9d82e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/report_block_data.h"
+
+namespace webrtc {
+
+ReportBlockData::ReportBlockData()
+ : report_block_(),
+ report_block_timestamp_utc_us_(0),
+ last_rtt_ms_(0),
+ min_rtt_ms_(0),
+ max_rtt_ms_(0),
+ sum_rtt_ms_(0),
+ num_rtts_(0) {}
+
+double ReportBlockData::AvgRttMs() const {
+ return num_rtts_ ? static_cast<double>(sum_rtt_ms_) / num_rtts_ : 0.0;
+}
+
+void ReportBlockData::SetReportBlock(RTCPReportBlock report_block,
+ int64_t report_block_timestamp_utc_us) {
+ report_block_ = report_block;
+ report_block_timestamp_utc_us_ = report_block_timestamp_utc_us;
+}
+
+void ReportBlockData::AddRoundTripTimeSample(int64_t rtt_ms) {
+ if (rtt_ms > max_rtt_ms_)
+ max_rtt_ms_ = rtt_ms;
+ if (num_rtts_ == 0 || rtt_ms < min_rtt_ms_)
+ min_rtt_ms_ = rtt_ms;
+ last_rtt_ms_ = rtt_ms;
+ sum_rtt_ms_ += rtt_ms;
+ ++num_rtts_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.h b/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.h
new file mode 100644
index 0000000000..2c4533ada8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_REPORT_BLOCK_DATA_H_
+#define MODULES_RTP_RTCP_INCLUDE_REPORT_BLOCK_DATA_H_
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+class ReportBlockData {
+ public:
+ ReportBlockData();
+
+ const RTCPReportBlock& report_block() const { return report_block_; }
+ int64_t report_block_timestamp_utc_us() const {
+ return report_block_timestamp_utc_us_;
+ }
+ int64_t last_rtt_ms() const { return last_rtt_ms_; }
+ int64_t min_rtt_ms() const { return min_rtt_ms_; }
+ int64_t max_rtt_ms() const { return max_rtt_ms_; }
+ int64_t sum_rtt_ms() const { return sum_rtt_ms_; }
+ size_t num_rtts() const { return num_rtts_; }
+ bool has_rtt() const { return num_rtts_ != 0; }
+
+ double AvgRttMs() const;
+
+ void SetReportBlock(RTCPReportBlock report_block,
+ int64_t report_block_timestamp_utc_us);
+ void AddRoundTripTimeSample(int64_t rtt_ms);
+
+ private:
+ RTCPReportBlock report_block_;
+ int64_t report_block_timestamp_utc_us_;
+
+ int64_t last_rtt_ms_;
+ int64_t min_rtt_ms_;
+ int64_t max_rtt_ms_;
+ int64_t sum_rtt_ms_;
+ size_t num_rtts_;
+};
+
+class ReportBlockDataObserver {
+ public:
+ virtual ~ReportBlockDataObserver() = default;
+
+ virtual void OnReportBlockDataUpdated(ReportBlockData report_block_data) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_REPORT_BLOCK_DATA_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtcp_statistics.h b/third_party/libwebrtc/modules/rtp_rtcp/include/rtcp_statistics.h
new file mode 100644
index 0000000000..6d6246d8a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtcp_statistics.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTCP_STATISTICS_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTCP_STATISTICS_H_
+
+#include <stdint.h>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+
+// Statistics for RTCP packet types.
+struct RtcpPacketTypeCounter {
+ RtcpPacketTypeCounter()
+ : nack_packets(0),
+ fir_packets(0),
+ pli_packets(0),
+ nack_requests(0),
+ unique_nack_requests(0) {}
+
+ void Add(const RtcpPacketTypeCounter& other) {
+ nack_packets += other.nack_packets;
+ fir_packets += other.fir_packets;
+ pli_packets += other.pli_packets;
+ nack_requests += other.nack_requests;
+ unique_nack_requests += other.unique_nack_requests;
+ }
+
+ void Subtract(const RtcpPacketTypeCounter& other) {
+ nack_packets -= other.nack_packets;
+ fir_packets -= other.fir_packets;
+ pli_packets -= other.pli_packets;
+ nack_requests -= other.nack_requests;
+ unique_nack_requests -= other.unique_nack_requests;
+ }
+
+ int UniqueNackRequestsInPercent() const {
+ if (nack_requests == 0) {
+ return 0;
+ }
+ return static_cast<int>((unique_nack_requests * 100.0f / nack_requests) +
+ 0.5f);
+ }
+
+ uint32_t nack_packets; // Number of RTCP NACK packets.
+ uint32_t fir_packets; // Number of RTCP FIR packets.
+ uint32_t pli_packets; // Number of RTCP PLI packets.
+ uint32_t nack_requests; // Number of NACKed RTP packets.
+ uint32_t unique_nack_requests; // Number of unique NACKed RTP packets.
+};
+
+class RtcpPacketTypeCounterObserver {
+ public:
+ virtual ~RtcpPacketTypeCounterObserver() {}
+ virtual void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) = 0;
+};
+
+// Invoked for each cname passed in RTCP SDES blocks.
+class RtcpCnameCallback {
+ public:
+ virtual ~RtcpCnameCallback() = default;
+
+ virtual void OnCname(uint32_t ssrc, absl::string_view cname) = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_INCLUDE_RTCP_STATISTICS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_cvo.h b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_cvo.h
new file mode 100644
index 0000000000..497946d6a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_cvo.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTP_CVO_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTP_CVO_H_
+
+#include "api/video/video_rotation.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Please refer to http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/
+// 12.07.00_60/ts_126114v120700p.pdf Section 7.4.5. The rotation of a frame is
+// the clockwise angle the frames must be rotated in order to display the frames
+// correctly if the display is rotated in its natural orientation.
+inline uint8_t ConvertVideoRotationToCVOByte(VideoRotation rotation) {
+ switch (rotation) {
+ case kVideoRotation_0:
+ return 0;
+ case kVideoRotation_90:
+ return 1;
+ case kVideoRotation_180:
+ return 2;
+ case kVideoRotation_270:
+ return 3;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return 0;
+}
+
+inline VideoRotation ConvertCVOByteToVideoRotation(uint8_t cvo_byte) {
+ // CVO byte: |0 0 0 0 C F R R|.
+ const uint8_t rotation_bits = cvo_byte & 0x3;
+ switch (rotation_bits) {
+ case 0:
+ return kVideoRotation_0;
+ case 1:
+ return kVideoRotation_90;
+ case 2:
+ return kVideoRotation_180;
+ case 3:
+ return kVideoRotation_270;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ return kVideoRotation_0;
+ }
+}
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_INCLUDE_RTP_CVO_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_header_extension_map.h b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_header_extension_map.h
new file mode 100644
index 0000000000..ff1ea61f52
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_header_extension_map.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_EXTENSION_MAP_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_EXTENSION_MAP_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/rtp_parameters.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class RtpHeaderExtensionMap {
+ public:
+ static constexpr RTPExtensionType kInvalidType = kRtpExtensionNone;
+ static constexpr int kInvalidId = 0;
+
+ RtpHeaderExtensionMap();
+ explicit RtpHeaderExtensionMap(bool extmap_allow_mixed);
+ explicit RtpHeaderExtensionMap(rtc::ArrayView<const RtpExtension> extensions);
+
+ void Reset(rtc::ArrayView<const RtpExtension> extensions);
+
+ template <typename Extension>
+ bool Register(int id) {
+ return Register(id, Extension::kId, Extension::Uri());
+ }
+ bool RegisterByType(int id, RTPExtensionType type);
+ bool RegisterByUri(int id, absl::string_view uri);
+
+ bool IsRegistered(RTPExtensionType type) const {
+ return GetId(type) != kInvalidId;
+ }
+ // Return kInvalidType if not found.
+ RTPExtensionType GetType(int id) const;
+ // Return kInvalidId if not found.
+ uint8_t GetId(RTPExtensionType type) const {
+ RTC_DCHECK_GT(type, kRtpExtensionNone);
+ RTC_DCHECK_LT(type, kRtpExtensionNumberOfExtensions);
+ return ids_[type];
+ }
+
+ void Deregister(absl::string_view uri);
+
+ // Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
+ // Set to true if it's allowed to mix one- and two-byte RTP header extensions
+ // in the same stream.
+ bool ExtmapAllowMixed() const { return extmap_allow_mixed_; }
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) {
+ extmap_allow_mixed_ = extmap_allow_mixed;
+ }
+
+ private:
+ bool Register(int id, RTPExtensionType type, absl::string_view uri);
+
+ uint8_t ids_[kRtpExtensionNumberOfExtensions];
+ bool extmap_allow_mixed_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_EXTENSION_MAP_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_packet_sender.h b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_packet_sender.h
new file mode 100644
index 0000000000..ae221b09d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_packet_sender.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTP_PACKET_SENDER_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTP_PACKET_SENDER_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+
+namespace webrtc {
+
+class RtpPacketSender {
+ public:
+ virtual ~RtpPacketSender() = default;
+
+ // Insert a set of packets into queue, for eventual transmission. Based on the
+ // type of packets, they will be prioritized and scheduled relative to other
+ // packets and the current target send rate.
+ virtual void EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_RTP_PACKET_SENDER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp.h b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp.h
new file mode 100644
index 0000000000..c71d7f0c3d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_
+
+#include <memory>
+
+#include "absl/base/attributes.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+
+namespace webrtc {
+
+// DEPRECATED. Do not use.
+class RtpRtcp : public RtpRtcpInterface {
+ public:
+ // Instantiates a deprecated version of the RtpRtcp module.
+ static std::unique_ptr<RtpRtcp> ABSL_DEPRECATED("")
+ Create(const Configuration& configuration) {
+ return DEPRECATED_Create(configuration);
+ }
+
+ static std::unique_ptr<RtpRtcp> DEPRECATED_Create(
+ const Configuration& configuration);
+
+ // Requests new key frame.
+ // using PLI, https://tools.ietf.org/html/rfc4585#section-6.3.1.1
+ void SendPictureLossIndication() { SendRTCP(kRtcpPli); }
+ // using FIR, https://tools.ietf.org/html/rfc5104#section-4.3.1.2
+ void SendFullIntraRequest() { SendRTCP(kRtcpFir); }
+
+ // Process any pending tasks such as timeouts.
+ virtual void Process() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.cc b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.cc
new file mode 100644
index 0000000000..0f527919ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+#include <string.h>
+
+#include <type_traits>
+
+#include "absl/algorithm/container.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+
+namespace webrtc {
+
+namespace {
+constexpr size_t kMidRsidMaxSize = 16;
+
+// Check if passed character is a "token-char" from RFC 4566.
+// https://datatracker.ietf.org/doc/html/rfc4566#section-9
+// token-char = %x21 / %x23-27 / %x2A-2B / %x2D-2E / %x30-39
+// / %x41-5A / %x5E-7E
+bool IsTokenChar(char ch) {
+ return ch == 0x21 || (ch >= 0x23 && ch <= 0x27) || ch == 0x2a || ch == 0x2b ||
+ ch == 0x2d || ch == 0x2e || (ch >= 0x30 && ch <= 0x39) ||
+ (ch >= 0x41 && ch <= 0x5a) || (ch >= 0x5e && ch <= 0x7e);
+}
+} // namespace
+
+bool IsLegalMidName(absl::string_view name) {
+ return (name.size() <= kMidRsidMaxSize && !name.empty() &&
+ absl::c_all_of(name, IsTokenChar));
+}
+
+bool IsLegalRsidName(absl::string_view name) {
+ return (name.size() <= kMidRsidMaxSize && !name.empty() &&
+ absl::c_all_of(name, isalnum));
+}
+
+StreamDataCounters::StreamDataCounters() : first_packet_time_ms(-1) {}
+
+RtpPacketCounter::RtpPacketCounter(const RtpPacket& packet)
+ : header_bytes(packet.headers_size()),
+ payload_bytes(packet.payload_size()),
+ padding_bytes(packet.padding_size()),
+ packets(1) {}
+
+void RtpPacketCounter::AddPacket(const RtpPacket& packet) {
+ ++packets;
+ header_bytes += packet.headers_size();
+ padding_bytes += packet.padding_size();
+ payload_bytes += packet.payload_size();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
new file mode 100644
index 0000000000..0150f3f66b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_
+
+#include <stddef.h>
+
+#include <list>
+#include <memory>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/rtp_headers.h"
+#include "api/transport/network_types.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h"
+#include "system_wrappers/include/clock.h"
+
+#define RTCP_CNAME_SIZE 256 // RFC 3550 page 44, including null termination
+#define IP_PACKET_SIZE 1500 // we assume ethernet
+
+namespace webrtc {
+class RtpPacket;
+class RtpPacketToSend;
+namespace rtcp {
+class TransportFeedback;
+}
+
+const int kVideoPayloadTypeFrequency = 90000;
+
+// TODO(bugs.webrtc.org/6458): Remove this when all the depending projects are
+// updated to correctly set rtp rate for RtcpSender.
+const int kBogusRtpRateForAudioRtcp = 8000;
+
+// Minimum RTP header size in bytes.
+const uint8_t kRtpHeaderSize = 12;
+
+bool IsLegalMidName(absl::string_view name);
+bool IsLegalRsidName(absl::string_view name);
+
+// This enum must not have any gaps, i.e., all integers between
+// kRtpExtensionNone and kRtpExtensionNumberOfExtensions must be valid enum
+// entries.
+enum RTPExtensionType : int {
+ kRtpExtensionNone,
+ kRtpExtensionTransmissionTimeOffset,
+ kRtpExtensionAudioLevel,
+ kRtpExtensionCsrcAudioLevel,
+ kRtpExtensionInbandComfortNoise,
+ kRtpExtensionAbsoluteSendTime,
+ kRtpExtensionAbsoluteCaptureTime,
+ kRtpExtensionVideoRotation,
+ kRtpExtensionTransportSequenceNumber,
+ kRtpExtensionTransportSequenceNumber02,
+ kRtpExtensionPlayoutDelay,
+ kRtpExtensionVideoContentType,
+ kRtpExtensionVideoLayersAllocation,
+ kRtpExtensionVideoTiming,
+ kRtpExtensionRtpStreamId,
+ kRtpExtensionRepairedRtpStreamId,
+ kRtpExtensionMid,
+ kRtpExtensionGenericFrameDescriptor00,
+ kRtpExtensionGenericFrameDescriptor = kRtpExtensionGenericFrameDescriptor00,
+ kRtpExtensionGenericFrameDescriptor02,
+ kRtpExtensionColorSpace,
+ kRtpExtensionVideoFrameTrackingId,
+ kRtpExtensionNumberOfExtensions // Must be the last entity in the enum.
+};
+
+enum RTCPAppSubTypes { kAppSubtypeBwe = 0x00 };
+
+// TODO(sprang): Make this an enum class once rtcp_receiver has been cleaned up.
+enum RTCPPacketType : uint32_t {
+ kRtcpReport = 0x0001,
+ kRtcpSr = 0x0002,
+ kRtcpRr = 0x0004,
+ kRtcpSdes = 0x0008,
+ kRtcpBye = 0x0010,
+ kRtcpPli = 0x0020,
+ kRtcpNack = 0x0040,
+ kRtcpFir = 0x0080,
+ kRtcpTmmbr = 0x0100,
+ kRtcpTmmbn = 0x0200,
+ kRtcpSrReq = 0x0400,
+ kRtcpLossNotification = 0x2000,
+ kRtcpRemb = 0x10000,
+ kRtcpTransmissionTimeOffset = 0x20000,
+ kRtcpXrReceiverReferenceTime = 0x40000,
+ kRtcpXrDlrrReportBlock = 0x80000,
+ kRtcpTransportFeedback = 0x100000,
+ kRtcpXrTargetBitrate = 0x200000
+};
+
+enum class KeyFrameReqMethod : uint8_t {
+ kNone, // Don't request keyframes.
+ kPliRtcp, // Request keyframes through Picture Loss Indication.
+ kFirRtcp // Request keyframes through Full Intra-frame Request.
+};
+
+enum RtxMode {
+ kRtxOff = 0x0,
+ kRtxRetransmitted = 0x1, // Only send retransmissions over RTX.
+ kRtxRedundantPayloads = 0x2 // Preventively send redundant payloads
+ // instead of padding.
+};
+
+const size_t kRtxHeaderSize = 2;
+
+struct RTCPReportBlock {
+ RTCPReportBlock()
+ : sender_ssrc(0),
+ source_ssrc(0),
+ fraction_lost(0),
+ packets_lost(0),
+ extended_highest_sequence_number(0),
+ jitter(0),
+ last_sender_report_timestamp(0),
+ delay_since_last_sender_report(0) {}
+
+ RTCPReportBlock(uint32_t sender_ssrc,
+ uint32_t source_ssrc,
+ uint8_t fraction_lost,
+ int32_t packets_lost,
+ uint32_t extended_highest_sequence_number,
+ uint32_t jitter,
+ uint32_t last_sender_report_timestamp,
+ uint32_t delay_since_last_sender_report)
+ : sender_ssrc(sender_ssrc),
+ source_ssrc(source_ssrc),
+ fraction_lost(fraction_lost),
+ packets_lost(packets_lost),
+ extended_highest_sequence_number(extended_highest_sequence_number),
+ jitter(jitter),
+ last_sender_report_timestamp(last_sender_report_timestamp),
+ delay_since_last_sender_report(delay_since_last_sender_report) {}
+
+ // Fields as described by RFC 3550 6.4.2.
+ uint32_t sender_ssrc; // SSRC of sender of this report.
+ uint32_t source_ssrc; // SSRC of the RTP packet sender.
+ uint8_t fraction_lost;
+ int32_t packets_lost; // 24 bits valid.
+ uint32_t extended_highest_sequence_number;
+ uint32_t jitter;
+ uint32_t last_sender_report_timestamp;
+ uint32_t delay_since_last_sender_report;
+};
+
+typedef std::list<RTCPReportBlock> ReportBlockList;
+
+struct RtpState {
+ RtpState()
+ : sequence_number(0),
+ start_timestamp(0),
+ timestamp(0),
+ capture_time_ms(-1),
+ last_timestamp_time_ms(-1),
+ ssrc_has_acked(false) {}
+ uint16_t sequence_number;
+ uint32_t start_timestamp;
+ uint32_t timestamp;
+ int64_t capture_time_ms;
+ int64_t last_timestamp_time_ms;
+ bool ssrc_has_acked;
+};
+
+// Callback interface for packets recovered by FlexFEC or ULPFEC. In
+// the FlexFEC case, the implementation should be able to demultiplex
+// the recovered RTP packets based on SSRC.
+class RecoveredPacketReceiver {
+ public:
+ virtual void OnRecoveredPacket(const uint8_t* packet, size_t length) = 0;
+
+ protected:
+ virtual ~RecoveredPacketReceiver() = default;
+};
+
+class RtcpIntraFrameObserver {
+ public:
+ virtual ~RtcpIntraFrameObserver() {}
+
+ virtual void OnReceivedIntraFrameRequest(uint32_t ssrc) = 0;
+};
+
+// Observer for incoming LossNotification RTCP messages.
+// See the documentation of LossNotification for details.
+class RtcpLossNotificationObserver {
+ public:
+ virtual ~RtcpLossNotificationObserver() = default;
+
+ virtual void OnReceivedLossNotification(uint32_t ssrc,
+ uint16_t seq_num_of_last_decodable,
+ uint16_t seq_num_of_last_received,
+ bool decodability_flag) = 0;
+};
+
+class RtcpBandwidthObserver {
+ public:
+ // REMB or TMMBR
+ virtual void OnReceivedEstimatedBitrate(uint32_t bitrate) = 0;
+
+ virtual void OnReceivedRtcpReceiverReport(
+ const ReportBlockList& report_blocks,
+ int64_t rtt,
+ int64_t now_ms) = 0;
+
+ virtual ~RtcpBandwidthObserver() {}
+};
+
+class RtcpEventObserver {
+ public:
+ virtual void OnRtcpBye() = 0;
+ virtual void OnRtcpTimeout() = 0;
+
+ virtual ~RtcpEventObserver() {}
+};
+
+// NOTE! `kNumMediaTypes` must be kept in sync with RtpPacketMediaType!
+static constexpr size_t kNumMediaTypes = 5;
+enum class RtpPacketMediaType : size_t {
+ kAudio, // Audio media packets.
+ kVideo, // Video media packets.
+ kRetransmission, // Retransmisions, sent as response to NACK.
+ kForwardErrorCorrection, // FEC packets.
+ kPadding = kNumMediaTypes - 1, // RTX or plain padding sent to maintain BWE.
+ // Again, don't forget to udate `kNumMediaTypes` if you add another value!
+};
+
+struct RtpPacketSendInfo {
+ public:
+ RtpPacketSendInfo() = default;
+
+ uint16_t transport_sequence_number = 0;
+ absl::optional<uint32_t> media_ssrc;
+ uint16_t rtp_sequence_number = 0; // Only valid if `media_ssrc` is set.
+ uint32_t rtp_timestamp = 0;
+ size_t length = 0;
+ absl::optional<RtpPacketMediaType> packet_type;
+ PacedPacketInfo pacing_info;
+};
+
+class NetworkStateEstimateObserver {
+ public:
+ virtual void OnRemoteNetworkEstimate(NetworkStateEstimate estimate) = 0;
+ virtual ~NetworkStateEstimateObserver() = default;
+};
+
+class TransportFeedbackObserver {
+ public:
+ TransportFeedbackObserver() {}
+ virtual ~TransportFeedbackObserver() {}
+
+ virtual void OnAddPacket(const RtpPacketSendInfo& packet_info) = 0;
+ virtual void OnTransportFeedback(const rtcp::TransportFeedback& feedback) = 0;
+};
+
+// Interface for PacketRouter to send rtcp feedback on behalf of
+// congestion controller.
+// TODO(bugs.webrtc.org/8239): Remove and use RtcpTransceiver directly
+// when RtcpTransceiver always present in rtp transport.
+class RtcpFeedbackSenderInterface {
+ public:
+ virtual ~RtcpFeedbackSenderInterface() = default;
+ virtual void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) = 0;
+ virtual void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) = 0;
+ virtual void UnsetRemb() = 0;
+};
+
+class StreamFeedbackObserver {
+ public:
+ struct StreamPacketInfo {
+ bool received;
+
+ // `rtp_sequence_number` and `is_retransmission` are only valid if `ssrc`
+ // is populated.
+ absl::optional<uint32_t> ssrc;
+ uint16_t rtp_sequence_number;
+ bool is_retransmission;
+ };
+ virtual ~StreamFeedbackObserver() = default;
+
+ virtual void OnPacketFeedbackVector(
+ std::vector<StreamPacketInfo> packet_feedback_vector) = 0;
+};
+
+class StreamFeedbackProvider {
+ public:
+ virtual void RegisterStreamFeedbackObserver(
+ std::vector<uint32_t> ssrcs,
+ StreamFeedbackObserver* observer) = 0;
+ virtual void DeRegisterStreamFeedbackObserver(
+ StreamFeedbackObserver* observer) = 0;
+ virtual ~StreamFeedbackProvider() = default;
+};
+
+class RtcpRttStats {
+ public:
+ virtual void OnRttUpdate(int64_t rtt) = 0;
+
+ virtual int64_t LastProcessedRtt() const = 0;
+
+ virtual ~RtcpRttStats() {}
+};
+
+struct RtpPacketCounter {
+ RtpPacketCounter()
+ : header_bytes(0), payload_bytes(0), padding_bytes(0), packets(0) {}
+
+ explicit RtpPacketCounter(const RtpPacket& packet);
+
+ void Add(const RtpPacketCounter& other) {
+ header_bytes += other.header_bytes;
+ payload_bytes += other.payload_bytes;
+ padding_bytes += other.padding_bytes;
+ packets += other.packets;
+ }
+
+ void Subtract(const RtpPacketCounter& other) {
+ RTC_DCHECK_GE(header_bytes, other.header_bytes);
+ header_bytes -= other.header_bytes;
+ RTC_DCHECK_GE(payload_bytes, other.payload_bytes);
+ payload_bytes -= other.payload_bytes;
+ RTC_DCHECK_GE(padding_bytes, other.padding_bytes);
+ padding_bytes -= other.padding_bytes;
+ RTC_DCHECK_GE(packets, other.packets);
+ packets -= other.packets;
+ }
+
+ bool operator==(const RtpPacketCounter& other) const {
+ return header_bytes == other.header_bytes &&
+ payload_bytes == other.payload_bytes &&
+ padding_bytes == other.padding_bytes && packets == other.packets;
+ }
+
+ // Not inlined, since use of RtpPacket would result in circular includes.
+ void AddPacket(const RtpPacket& packet);
+
+ size_t TotalBytes() const {
+ return header_bytes + payload_bytes + padding_bytes;
+ }
+
+ size_t header_bytes; // Number of bytes used by RTP headers.
+ size_t payload_bytes; // Payload bytes, excluding RTP headers and padding.
+ size_t padding_bytes; // Number of padding bytes.
+ uint32_t packets; // Number of packets.
+};
+
+// Data usage statistics for a (rtp) stream.
+struct StreamDataCounters {
+ StreamDataCounters();
+
+ void Add(const StreamDataCounters& other) {
+ transmitted.Add(other.transmitted);
+ retransmitted.Add(other.retransmitted);
+ fec.Add(other.fec);
+ if (other.first_packet_time_ms != -1 &&
+ (other.first_packet_time_ms < first_packet_time_ms ||
+ first_packet_time_ms == -1)) {
+ // Use oldest time.
+ first_packet_time_ms = other.first_packet_time_ms;
+ }
+ }
+
+ void Subtract(const StreamDataCounters& other) {
+ transmitted.Subtract(other.transmitted);
+ retransmitted.Subtract(other.retransmitted);
+ fec.Subtract(other.fec);
+ if (other.first_packet_time_ms != -1 &&
+ (other.first_packet_time_ms > first_packet_time_ms ||
+ first_packet_time_ms == -1)) {
+ // Use youngest time.
+ first_packet_time_ms = other.first_packet_time_ms;
+ }
+ }
+
+ int64_t TimeSinceFirstPacketInMs(int64_t now_ms) const {
+ return (first_packet_time_ms == -1) ? -1 : (now_ms - first_packet_time_ms);
+ }
+
+ // Returns the number of bytes corresponding to the actual media payload (i.e.
+ // RTP headers, padding, retransmissions and fec packets are excluded).
+ // Note this function does not have meaning for an RTX stream.
+ size_t MediaPayloadBytes() const {
+ return transmitted.payload_bytes - retransmitted.payload_bytes -
+ fec.payload_bytes;
+ }
+
+ int64_t first_packet_time_ms; // Time when first packet is sent/received.
+ // The timestamp at which the last packet was received, i.e. the time of the
+ // local clock when it was received - not the RTP timestamp of that packet.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-lastpacketreceivedtimestamp
+ absl::optional<int64_t> last_packet_received_timestamp_ms;
+ RtpPacketCounter transmitted; // Number of transmitted packets/bytes.
+ RtpPacketCounter retransmitted; // Number of retransmitted packets/bytes.
+ RtpPacketCounter fec; // Number of redundancy packets/bytes.
+};
+
+class RtpSendRates {
+ template <std::size_t... Is>
+ constexpr std::array<DataRate, sizeof...(Is)> make_zero_array(
+ std::index_sequence<Is...>) {
+ return {{(static_cast<void>(Is), DataRate::Zero())...}};
+ }
+
+ public:
+ RtpSendRates()
+ : send_rates_(
+ make_zero_array(std::make_index_sequence<kNumMediaTypes>())) {}
+ RtpSendRates(const RtpSendRates& rhs) = default;
+ RtpSendRates& operator=(const RtpSendRates&) = default;
+
+ DataRate& operator[](RtpPacketMediaType type) {
+ return send_rates_[static_cast<size_t>(type)];
+ }
+ const DataRate& operator[](RtpPacketMediaType type) const {
+ return send_rates_[static_cast<size_t>(type)];
+ }
+ DataRate Sum() const {
+ return absl::c_accumulate(send_rates_, DataRate::Zero());
+ }
+
+ private:
+ std::array<DataRate, kNumMediaTypes> send_rates_;
+};
+
+// Callback, called whenever byte/packet counts have been updated.
+class StreamDataCountersCallback {
+ public:
+ virtual ~StreamDataCountersCallback() {}
+
+ virtual void DataCountersUpdated(const StreamDataCounters& counters,
+ uint32_t ssrc) = 0;
+};
+
+// Information exposed through the GetStats api.
+struct RtpReceiveStats {
+ // `packets_lost` and `jitter` are defined by RFC 3550, and exposed in the
+ // RTCReceivedRtpStreamStats dictionary, see
+ // https://w3c.github.io/webrtc-stats/#receivedrtpstats-dict*
+ int32_t packets_lost = 0;
+ uint32_t jitter = 0;
+
+ // Timestamp and counters exposed in RTCInboundRtpStreamStats, see
+ // https://w3c.github.io/webrtc-stats/#inboundrtpstats-dict*
+ absl::optional<int64_t> last_packet_received_timestamp_ms;
+ RtpPacketCounter packet_counter;
+};
+
+// Callback, used to notify an observer whenever new rates have been estimated.
+class BitrateStatisticsObserver {
+ public:
+ virtual ~BitrateStatisticsObserver() {}
+
+ virtual void Notify(uint32_t total_bitrate_bps,
+ uint32_t retransmit_bitrate_bps,
+ uint32_t ssrc) = 0;
+};
+
+// Callback, used to notify an observer whenever the send-side delay is updated.
+class SendSideDelayObserver {
+ public:
+ virtual ~SendSideDelayObserver() {}
+ virtual void SendSideDelayUpdated(int avg_delay_ms,
+ int max_delay_ms,
+ uint64_t total_delay_ms,
+ uint32_t ssrc) = 0;
+};
+
+// Callback, used to notify an observer whenever a packet is sent to the
+// transport.
+// TODO(asapersson): This class will remove the need for SendSideDelayObserver.
+// Remove SendSideDelayObserver once possible.
+class SendPacketObserver {
+ public:
+ virtual ~SendPacketObserver() {}
+ virtual void OnSendPacket(uint16_t packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h
new file mode 100644
index 0000000000..404ded01d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_MOCKS_MOCK_RECOVERED_PACKET_RECEIVER_H_
+#define MODULES_RTP_RTCP_MOCKS_MOCK_RECOVERED_PACKET_RECEIVER_H_
+
+#include "modules/rtp_rtcp/include/flexfec_receiver.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRecoveredPacketReceiver : public RecoveredPacketReceiver {
+ public:
+ MOCK_METHOD(void,
+ OnRecoveredPacket,
+ (const uint8_t* packet, size_t length),
+ (override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_MOCKS_MOCK_RECOVERED_PACKET_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h
new file mode 100644
index 0000000000..12f143ae8b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_BANDWIDTH_OBSERVER_H_
+#define MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_BANDWIDTH_OBSERVER_H_
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRtcpBandwidthObserver : public RtcpBandwidthObserver {
+ public:
+ MOCK_METHOD(void, OnReceivedEstimatedBitrate, (uint32_t), (override));
+ MOCK_METHOD(void,
+ OnReceivedRtcpReceiverReport,
+ (const ReportBlockList&, int64_t, int64_t),
+ (override));
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_BANDWIDTH_OBSERVER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h
new file mode 100644
index 0000000000..e9a7d52691
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_RTT_STATS_H_
+#define MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_RTT_STATS_H_
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRtcpRttStats : public RtcpRttStats {
+ public:
+ MOCK_METHOD(void, OnRttUpdate, (int64_t rtt), (override));
+ MOCK_METHOD(int64_t, LastProcessedRtt, (), (const, override));
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_RTT_STATS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
new file mode 100644
index 0000000000..d10dab860b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_MOCKS_MOCK_RTP_RTCP_H_
+#define MODULES_RTP_RTCP_MOCKS_MOCK_RTP_RTCP_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRtpRtcpInterface : public RtpRtcpInterface {
+ public:
+ MOCK_METHOD(void,
+ IncomingRtcpPacket,
+ (const uint8_t* incoming_packet, size_t packet_length),
+ (override));
+ MOCK_METHOD(void, SetRemoteSSRC, (uint32_t ssrc), (override));
+ MOCK_METHOD(void, SetLocalSsrc, (uint32_t ssrc), (override));
+ MOCK_METHOD(void, SetMaxRtpPacketSize, (size_t size), (override));
+ MOCK_METHOD(size_t, MaxRtpPacketSize, (), (const, override));
+ MOCK_METHOD(void,
+ RegisterSendPayloadFrequency,
+ (int payload_type, int frequency),
+ (override));
+ MOCK_METHOD(int32_t,
+ DeRegisterSendPayload,
+ (int8_t payload_type),
+ (override));
+ MOCK_METHOD(void, SetExtmapAllowMixed, (bool extmap_allow_mixed), (override));
+ MOCK_METHOD(void,
+ RegisterRtpHeaderExtension,
+ (absl::string_view uri, int id),
+ (override));
+ MOCK_METHOD(void,
+ DeregisterSendRtpHeaderExtension,
+ (absl::string_view uri),
+ (override));
+ MOCK_METHOD(bool, SupportsPadding, (), (const, override));
+ MOCK_METHOD(bool, SupportsRtxPayloadPadding, (), (const, override));
+ MOCK_METHOD(uint32_t, StartTimestamp, (), (const, override));
+ MOCK_METHOD(void, SetStartTimestamp, (uint32_t timestamp), (override));
+ MOCK_METHOD(uint16_t, SequenceNumber, (), (const, override));
+ MOCK_METHOD(void, SetSequenceNumber, (uint16_t seq), (override));
+ MOCK_METHOD(void, SetRtpState, (const RtpState& rtp_state), (override));
+ MOCK_METHOD(void, SetRtxState, (const RtpState& rtp_state), (override));
+ MOCK_METHOD(void, SetNonSenderRttMeasurement, (bool enabled), (override));
+ MOCK_METHOD(RtpState, GetRtpState, (), (const, override));
+ MOCK_METHOD(RtpState, GetRtxState, (), (const, override));
+ MOCK_METHOD(uint32_t, SSRC, (), (const, override));
+ MOCK_METHOD(void, SetMid, (absl::string_view mid), (override));
+ MOCK_METHOD(void, SetCsrcs, (const std::vector<uint32_t>& csrcs), (override));
+ MOCK_METHOD(void, SetRtxSendStatus, (int modes), (override));
+ MOCK_METHOD(int, RtxSendStatus, (), (const, override));
+ MOCK_METHOD(absl::optional<uint32_t>, RtxSsrc, (), (const, override));
+ MOCK_METHOD(void, SetRtxSendPayloadType, (int, int), (override));
+ MOCK_METHOD(absl::optional<uint32_t>, FlexfecSsrc, (), (const, override));
+ MOCK_METHOD(int32_t, SetSendingStatus, (bool sending), (override));
+ MOCK_METHOD(bool, Sending, (), (const, override));
+ MOCK_METHOD(void, SetSendingMediaStatus, (bool sending), (override));
+ MOCK_METHOD(bool, SendingMedia, (), (const, override));
+ MOCK_METHOD(bool, IsAudioConfigured, (), (const, override));
+ MOCK_METHOD(void, SetAsPartOfAllocation, (bool), (override));
+ MOCK_METHOD(RtpSendRates, GetSendRates, (), (const, override));
+ MOCK_METHOD(bool,
+ OnSendingRtpFrame,
+ (uint32_t, int64_t, int, bool),
+ (override));
+ MOCK_METHOD(bool,
+ TrySendPacket,
+ (RtpPacketToSend * packet, const PacedPacketInfo& pacing_info),
+ (override));
+ MOCK_METHOD(void,
+ SetFecProtectionParams,
+ (const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params),
+ (override));
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ FetchFecPackets,
+ (),
+ (override));
+ MOCK_METHOD(void,
+ OnPacketsAcknowledged,
+ (rtc::ArrayView<const uint16_t>),
+ (override));
+ MOCK_METHOD(std::vector<std::unique_ptr<RtpPacketToSend>>,
+ GeneratePadding,
+ (size_t target_size_bytes),
+ (override));
+ MOCK_METHOD(std::vector<RtpSequenceNumberMap::Info>,
+ GetSentRtpPacketInfos,
+ (rtc::ArrayView<const uint16_t> sequence_numbers),
+ (const, override));
+ MOCK_METHOD(size_t, ExpectedPerPacketOverhead, (), (const, override));
+ MOCK_METHOD(void, OnPacketSendingThreadSwitched, (), (override));
+ MOCK_METHOD(RtcpMode, RTCP, (), (const, override));
+ MOCK_METHOD(void, SetRTCPStatus, (RtcpMode method), (override));
+ MOCK_METHOD(int32_t, SetCNAME, (absl::string_view cname), (override));
+ MOCK_METHOD(int32_t,
+ RemoteNTP,
+ (uint32_t * received_ntp_secs,
+ uint32_t* received_ntp_frac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp),
+ (const, override));
+ MOCK_METHOD(int32_t,
+ RTT,
+ (uint32_t remote_ssrc,
+ int64_t* rtt,
+ int64_t* avg_rtt,
+ int64_t* min_rtt,
+ int64_t* max_rtt),
+ (const, override));
+ MOCK_METHOD(int64_t, ExpectedRetransmissionTimeMs, (), (const, override));
+ MOCK_METHOD(int32_t, SendRTCP, (RTCPPacketType packet_type), (override));
+ MOCK_METHOD(void,
+ GetSendStreamDataCounters,
+ (StreamDataCounters*, StreamDataCounters*),
+ (const, override));
+ MOCK_METHOD(std::vector<ReportBlockData>,
+ GetLatestReportBlockData,
+ (),
+ (const, override));
+ MOCK_METHOD(absl::optional<SenderReportStats>,
+ GetSenderReportStats,
+ (),
+ (const, override));
+ MOCK_METHOD(absl::optional<NonSenderRttStats>,
+ GetNonSenderRttStats,
+ (),
+ (const, override));
+ MOCK_METHOD(void,
+ SetRemb,
+ (int64_t bitrate, std::vector<uint32_t> ssrcs),
+ (override));
+ MOCK_METHOD(void, UnsetRemb, (), (override));
+ MOCK_METHOD(int32_t,
+ SendNACK,
+ (const uint16_t* nack_list, uint16_t size),
+ (override));
+ MOCK_METHOD(void,
+ SendNack,
+ (const std::vector<uint16_t>& sequence_numbers),
+ (override));
+ MOCK_METHOD(void,
+ SetStorePacketsStatus,
+ (bool enable, uint16_t number_to_store),
+ (override));
+ MOCK_METHOD(void,
+ SendCombinedRtcpPacket,
+ (std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets),
+ (override));
+ MOCK_METHOD(int32_t,
+ SendLossNotification,
+ (uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed),
+ (override));
+ MOCK_METHOD(void,
+ SetVideoBitrateAllocation,
+ (const VideoBitrateAllocation&),
+ (override));
+ MOCK_METHOD(RTPSender*, RtpSender, (), (override));
+ MOCK_METHOD(const RTPSender*, RtpSender, (), (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_MOCKS_MOCK_RTP_RTCP_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_format_gn/moz.build b/third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_format_gn/moz.build
new file mode 100644
index 0000000000..0a05e48201
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_format_gn/moz.build
@@ -0,0 +1,256 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/rtp_rtcp/include/report_block_data.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_rtcp_format_gn")
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_gn/moz.build b/third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_gn/moz.build
new file mode 100644
index 0000000000..ff314f8d6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/rtp_rtcp_gn/moz.build
@@ -0,0 +1,266 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["BWE_TEST_LOGGING_COMPILE_TIME_ENABLE"] = "0"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.cc",
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_rtcp_gn")
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/rtp_video_header_gn/moz.build b/third_party/libwebrtc/modules/rtp_rtcp/rtp_video_header_gn/moz.build
new file mode 100644
index 0000000000..7bb5cb10ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/rtp_video_header_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_video_header_gn")
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc
new file mode 100644
index 0000000000..99fc030aca
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h"
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr Timestamp kInvalidLastReceiveTime = Timestamp::MinusInfinity();
+} // namespace
+
+constexpr TimeDelta AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval;
+
+AbsoluteCaptureTimeInterpolator::AbsoluteCaptureTimeInterpolator(Clock* clock)
+ : clock_(clock), last_receive_time_(kInvalidLastReceiveTime) {}
+
+uint32_t AbsoluteCaptureTimeInterpolator::GetSource(
+ uint32_t ssrc,
+ rtc::ArrayView<const uint32_t> csrcs) {
+ if (csrcs.empty()) {
+ return ssrc;
+ }
+
+ return csrcs[0];
+}
+
+absl::optional<AbsoluteCaptureTime>
+AbsoluteCaptureTimeInterpolator::OnReceivePacket(
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ const absl::optional<AbsoluteCaptureTime>& received_extension) {
+ const Timestamp receive_time = clock_->CurrentTime();
+
+ MutexLock lock(&mutex_);
+
+ AbsoluteCaptureTime extension;
+ if (received_extension == absl::nullopt) {
+ if (!ShouldInterpolateExtension(receive_time, source, rtp_timestamp,
+ rtp_clock_frequency)) {
+ last_receive_time_ = kInvalidLastReceiveTime;
+ return absl::nullopt;
+ }
+
+ extension.absolute_capture_timestamp = InterpolateAbsoluteCaptureTimestamp(
+ rtp_timestamp, rtp_clock_frequency, last_rtp_timestamp_,
+ last_absolute_capture_timestamp_);
+ extension.estimated_capture_clock_offset =
+ last_estimated_capture_clock_offset_;
+ } else {
+ last_source_ = source;
+ last_rtp_timestamp_ = rtp_timestamp;
+ last_rtp_clock_frequency_ = rtp_clock_frequency;
+ last_absolute_capture_timestamp_ =
+ received_extension->absolute_capture_timestamp;
+ last_estimated_capture_clock_offset_ =
+ received_extension->estimated_capture_clock_offset;
+
+ last_receive_time_ = receive_time;
+
+ extension = *received_extension;
+ }
+
+ return extension;
+}
+
+uint64_t AbsoluteCaptureTimeInterpolator::InterpolateAbsoluteCaptureTimestamp(
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ uint32_t last_rtp_timestamp,
+ uint64_t last_absolute_capture_timestamp) {
+ RTC_DCHECK_GT(rtp_clock_frequency, 0);
+
+ return last_absolute_capture_timestamp +
+ static_cast<int64_t>(
+ rtc::dchecked_cast<uint64_t>(rtp_timestamp - last_rtp_timestamp)
+ << 32) /
+ rtp_clock_frequency;
+}
+
+bool AbsoluteCaptureTimeInterpolator::ShouldInterpolateExtension(
+ Timestamp receive_time,
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency) const {
+ // Shouldn't if we don't have a previously received extension stored.
+ if (last_receive_time_ == kInvalidLastReceiveTime) {
+ return false;
+ }
+
+ // Shouldn't if the last received extension is too old.
+ if ((receive_time - last_receive_time_) > kInterpolationMaxInterval) {
+ return false;
+ }
+
+ // Shouldn't if the source has changed.
+ if (last_source_ != source) {
+ return false;
+ }
+
+ // Shouldn't if the RTP clock frequency has changed.
+ if (last_rtp_clock_frequency_ != rtp_clock_frequency) {
+ return false;
+ }
+
+ // Shouldn't if the RTP clock frequency is invalid.
+ if (rtp_clock_frequency <= 0) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h
new file mode 100644
index 0000000000..f5ec820dd5
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_INTERPOLATOR_H_
+#define MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_INTERPOLATOR_H_
+
+#include "api/array_view.h"
+#include "api/rtp_headers.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+//
+// Helper class for interpolating the `AbsoluteCaptureTime` header extension.
+//
+// Supports the "timestamp interpolation" optimization:
+// A receiver SHOULD memorize the capture system (i.e. CSRC/SSRC), capture
+// timestamp, and RTP timestamp of the most recently received abs-capture-time
+// packet on each received stream. It can then use that information, in
+// combination with RTP timestamps of packets without abs-capture-time, to
+// extrapolate missing capture timestamps.
+//
+// See: https://webrtc.org/experiments/rtp-hdrext/abs-capture-time/
+//
+class AbsoluteCaptureTimeInterpolator {
+ public:
+ static constexpr TimeDelta kInterpolationMaxInterval =
+ TimeDelta::Millis(5000);
+
+ explicit AbsoluteCaptureTimeInterpolator(Clock* clock);
+
+ // Returns the source (i.e. SSRC or CSRC) of the capture system.
+ static uint32_t GetSource(uint32_t ssrc,
+ rtc::ArrayView<const uint32_t> csrcs);
+
+ // Returns a received header extension, an interpolated header extension, or
+ // `absl::nullopt` if it's not possible to interpolate a header extension.
+ absl::optional<AbsoluteCaptureTime> OnReceivePacket(
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ const absl::optional<AbsoluteCaptureTime>& received_extension);
+
+ private:
+ friend class AbsoluteCaptureTimeSender;
+
+ static uint64_t InterpolateAbsoluteCaptureTimestamp(
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ uint32_t last_rtp_timestamp,
+ uint64_t last_absolute_capture_timestamp);
+
+ bool ShouldInterpolateExtension(Timestamp receive_time,
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* const clock_;
+
+ Mutex mutex_;
+
+ Timestamp last_receive_time_ RTC_GUARDED_BY(mutex_);
+
+ uint32_t last_source_ RTC_GUARDED_BY(mutex_);
+ uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_);
+ uint32_t last_rtp_clock_frequency_ RTC_GUARDED_BY(mutex_);
+ uint64_t last_absolute_capture_timestamp_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int64_t> last_estimated_capture_clock_offset_
+ RTC_GUARDED_BY(mutex_);
+}; // AbsoluteCaptureTimeInterpolator
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_INTERPOLATOR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc
new file mode 100644
index 0000000000..6a312f9b43
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h"
+
+#include "system_wrappers/include/ntp_time.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, GetSourceWithoutCsrcs) {
+ constexpr uint32_t kSsrc = 12;
+
+ EXPECT_EQ(AbsoluteCaptureTimeInterpolator::GetSource(kSsrc, nullptr), kSsrc);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, GetSourceWithCsrcs) {
+ constexpr uint32_t kSsrc = 12;
+ constexpr uint32_t kCsrcs[] = {34, 56, 78, 90};
+
+ EXPECT_EQ(AbsoluteCaptureTimeInterpolator::GetSource(kSsrc, kCsrcs),
+ kCsrcs[0]);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, ReceiveExtensionReturnsExtension) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9020), absl::nullopt};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp1,
+ kRtpClockFrequency, kExtension1),
+ kExtension1);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest,
+ ReceiveNoExtensionReturnsNoExtension) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ absl::nullopt);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp1,
+ kRtpClockFrequency, kExtension1),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, InterpolateLaterPacketArrivingLater) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ absl::optional<AbsoluteCaptureTime> extension = interpolator.OnReceivePacket(
+ kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 20);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+
+ extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2,
+ kRtpClockFrequency, kExtension2);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 40);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest,
+ InterpolateEarlierPacketArrivingLater) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 - 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 - 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ absl::optional<AbsoluteCaptureTime> extension = interpolator.OnReceivePacket(
+ kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) - 20);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+
+ extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2,
+ kRtpClockFrequency, kExtension2);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) - 40);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest,
+ InterpolateLaterPacketArrivingLaterWithRtpTimestampWrapAround) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = ~uint32_t{0} - 79;
+ constexpr uint32_t kRtpTimestamp1 = 1280 - 80;
+ constexpr uint32_t kRtpTimestamp2 = 2560 - 80;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ absl::optional<AbsoluteCaptureTime> extension = interpolator.OnReceivePacket(
+ kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 20);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+
+ extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2,
+ kRtpClockFrequency, kExtension2);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 40);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest,
+ InterpolateEarlierPacketArrivingLaterWithRtpTimestampWrapAround) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 799;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 - 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 - 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ absl::optional<AbsoluteCaptureTime> extension = interpolator.OnReceivePacket(
+ kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) - 20);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+
+ extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2,
+ kRtpClockFrequency, kExtension2);
+ EXPECT_TRUE(extension.has_value());
+ EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp),
+ UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) - 40);
+ EXPECT_EQ(extension->estimated_capture_clock_offset,
+ kExtension0->estimated_capture_clock_offset);
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIfTooLate) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + 1280;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ clock.AdvanceTime(AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval);
+
+ EXPECT_TRUE(interpolator
+ .OnReceivePacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1)
+ .has_value());
+
+ clock.AdvanceTimeMilliseconds(1);
+
+ EXPECT_FALSE(interpolator
+ .OnReceivePacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2)
+ .has_value());
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIfSourceChanged) {
+ constexpr uint32_t kSource0 = 1337;
+ constexpr uint32_t kSource1 = 1338;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource0, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ EXPECT_FALSE(interpolator
+ .OnReceivePacket(kSource1, kRtpTimestamp1,
+ kRtpClockFrequency, kExtension1)
+ .has_value());
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest,
+ SkipInterpolateIfRtpClockFrequencyChanged) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency0 = 64000;
+ constexpr uint32_t kRtpClockFrequency1 = 32000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 640;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency0, kExtension0),
+ kExtension0);
+
+ EXPECT_FALSE(interpolator
+ .OnReceivePacket(kSource, kRtpTimestamp1,
+ kRtpClockFrequency1, kExtension1)
+ .has_value());
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest,
+ SkipInterpolateIfRtpClockFrequencyIsInvalid) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 0;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 640;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ EXPECT_FALSE(interpolator
+ .OnReceivePacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1)
+ .has_value());
+}
+
+TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIsSticky) {
+ constexpr uint32_t kSource0 = 1337;
+ constexpr uint32_t kSource1 = 1338;
+ constexpr uint32_t kSource2 = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + 1280;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 = absl::nullopt;
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 = absl::nullopt;
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeInterpolator interpolator(&clock);
+
+ EXPECT_EQ(interpolator.OnReceivePacket(kSource0, kRtpTimestamp0,
+ kRtpClockFrequency, kExtension0),
+ kExtension0);
+
+ EXPECT_FALSE(interpolator
+ .OnReceivePacket(kSource1, kRtpTimestamp1,
+ kRtpClockFrequency, kExtension1)
+ .has_value());
+
+ EXPECT_FALSE(interpolator
+ .OnReceivePacket(kSource2, kRtpTimestamp2,
+ kRtpClockFrequency, kExtension2)
+ .has_value());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.cc
new file mode 100644
index 0000000000..28266769ff
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
+
+#include <limits>
+
+#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+namespace {
+
+constexpr Timestamp kInvalidLastSendTime = Timestamp::MinusInfinity();
+
+} // namespace
+
+constexpr TimeDelta AbsoluteCaptureTimeSender::kInterpolationMaxInterval;
+constexpr TimeDelta AbsoluteCaptureTimeSender::kInterpolationMaxError;
+
+static_assert(
+ AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval >=
+ AbsoluteCaptureTimeSender::kInterpolationMaxInterval,
+ "Receivers should be as willing to interpolate timestamps as senders.");
+
+AbsoluteCaptureTimeSender::AbsoluteCaptureTimeSender(Clock* clock)
+ : clock_(clock), last_send_time_(kInvalidLastSendTime) {}
+
+uint32_t AbsoluteCaptureTimeSender::GetSource(
+ uint32_t ssrc,
+ rtc::ArrayView<const uint32_t> csrcs) {
+ return AbsoluteCaptureTimeInterpolator::GetSource(ssrc, csrcs);
+}
+
+absl::optional<AbsoluteCaptureTime> AbsoluteCaptureTimeSender::OnSendPacket(
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ uint64_t absolute_capture_timestamp,
+ absl::optional<int64_t> estimated_capture_clock_offset) {
+ const Timestamp send_time = clock_->CurrentTime();
+
+ MutexLock lock(&mutex_);
+
+ if (!ShouldSendExtension(send_time, source, rtp_timestamp,
+ rtp_clock_frequency, absolute_capture_timestamp,
+ estimated_capture_clock_offset)) {
+ return absl::nullopt;
+ }
+
+ last_source_ = source;
+ last_rtp_timestamp_ = rtp_timestamp;
+ last_rtp_clock_frequency_ = rtp_clock_frequency;
+ last_absolute_capture_timestamp_ = absolute_capture_timestamp;
+ last_estimated_capture_clock_offset_ = estimated_capture_clock_offset;
+
+ last_send_time_ = send_time;
+
+ AbsoluteCaptureTime extension;
+ extension.absolute_capture_timestamp = absolute_capture_timestamp;
+ extension.estimated_capture_clock_offset = estimated_capture_clock_offset;
+ return extension;
+}
+
+bool AbsoluteCaptureTimeSender::ShouldSendExtension(
+ Timestamp send_time,
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ uint64_t absolute_capture_timestamp,
+ absl::optional<int64_t> estimated_capture_clock_offset) const {
+ // Should if we've never sent anything before.
+ if (last_send_time_ == kInvalidLastSendTime) {
+ return true;
+ }
+
+ // Should if the last sent extension is too old.
+ if ((send_time - last_send_time_) > kInterpolationMaxInterval) {
+ return true;
+ }
+
+ // Should if the source has changed.
+ if (last_source_ != source) {
+ return true;
+ }
+
+ // Should if the RTP clock frequency has changed.
+ if (last_rtp_clock_frequency_ != rtp_clock_frequency) {
+ return true;
+ }
+
+ // Should if the RTP clock frequency is invalid.
+ if (rtp_clock_frequency <= 0) {
+ return true;
+ }
+
+ // Should if the estimated capture clock offset has changed.
+ if (last_estimated_capture_clock_offset_ != estimated_capture_clock_offset) {
+ return true;
+ }
+
+ // Should if interpolation would introduce too much error.
+ const uint64_t interpolated_absolute_capture_timestamp =
+ AbsoluteCaptureTimeInterpolator::InterpolateAbsoluteCaptureTimestamp(
+ rtp_timestamp, rtp_clock_frequency, last_rtp_timestamp_,
+ last_absolute_capture_timestamp_);
+ const int64_t interpolation_error_ms = UQ32x32ToInt64Ms(std::min(
+ interpolated_absolute_capture_timestamp - absolute_capture_timestamp,
+ absolute_capture_timestamp - interpolated_absolute_capture_timestamp));
+ if (interpolation_error_ms > kInterpolationMaxError.ms()) {
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.h b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.h
new file mode 100644
index 0000000000..be5a77d5e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_SENDER_H_
+#define MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_SENDER_H_
+
+#include "api/array_view.h"
+#include "api/rtp_headers.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+//
+// Helper class for sending the `AbsoluteCaptureTime` header extension.
+//
+// Supports the "timestamp interpolation" optimization:
+// A sender SHOULD save bandwidth by not sending abs-capture-time with every
+// RTP packet. It SHOULD still send them at regular intervals (e.g. every
+// second) to help mitigate the impact of clock drift and packet loss. Mixers
+// SHOULD always send abs-capture-time with the first RTP packet after
+// changing capture system.
+//
+// Timestamp interpolation works fine as long as there’s reasonably low
+// NTP/RTP clock drift. This is not always true. Senders that detect “jumps”
+// between its NTP and RTP clock mappings SHOULD send abs-capture-time with
+// the first RTP packet after such a thing happening.
+//
+// See: https://webrtc.org/experiments/rtp-hdrext/abs-capture-time/
+//
+class AbsoluteCaptureTimeSender {
+ public:
+ static constexpr TimeDelta kInterpolationMaxInterval =
+ TimeDelta::Millis(1000);
+ static constexpr TimeDelta kInterpolationMaxError = TimeDelta::Millis(1);
+
+ explicit AbsoluteCaptureTimeSender(Clock* clock);
+
+ // Returns the source (i.e. SSRC or CSRC) of the capture system.
+ static uint32_t GetSource(uint32_t ssrc,
+ rtc::ArrayView<const uint32_t> csrcs);
+
+ // Returns a header extension to be sent, or `absl::nullopt` if the header
+ // extension shouldn't be sent.
+ absl::optional<AbsoluteCaptureTime> OnSendPacket(
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ uint64_t absolute_capture_timestamp,
+ absl::optional<int64_t> estimated_capture_clock_offset);
+
+ private:
+ bool ShouldSendExtension(
+ Timestamp send_time,
+ uint32_t source,
+ uint32_t rtp_timestamp,
+ uint32_t rtp_clock_frequency,
+ uint64_t absolute_capture_timestamp,
+ absl::optional<int64_t> estimated_capture_clock_offset) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* const clock_;
+
+ Mutex mutex_;
+
+ Timestamp last_send_time_ RTC_GUARDED_BY(mutex_);
+
+ uint32_t last_source_ RTC_GUARDED_BY(mutex_);
+ uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_);
+ uint32_t last_rtp_clock_frequency_ RTC_GUARDED_BY(mutex_);
+ uint64_t last_absolute_capture_timestamp_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int64_t> last_estimated_capture_clock_offset_
+ RTC_GUARDED_BY(mutex_);
+}; // AbsoluteCaptureTimeSender
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_SENDER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender_unittest.cc
new file mode 100644
index 0000000000..db3fc75100
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/absolute_capture_time_sender_unittest.cc
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
+
+#include "system_wrappers/include/ntp_time.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(AbsoluteCaptureTimeSenderTest, GetSourceWithoutCsrcs) {
+ constexpr uint32_t kSsrc = 12;
+
+ EXPECT_EQ(AbsoluteCaptureTimeSender::GetSource(kSsrc, nullptr), kSsrc);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest, GetSourceWithCsrcs) {
+ constexpr uint32_t kSsrc = 12;
+ constexpr uint32_t kCsrcs[] = {34, 56, 78, 90};
+
+ EXPECT_EQ(AbsoluteCaptureTimeSender::GetSource(kSsrc, kCsrcs), kCsrcs[0]);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest, InterpolateLaterPacketSentLater) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), Int64MsToQ32x32(-350)};
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ absl::nullopt);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest, InterpolateEarlierPacketSentLater) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 - 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 - 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 - 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 - 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ absl::nullopt);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest,
+ InterpolateLaterPacketSentLaterWithRtpTimestampWrapAround) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = ~uint32_t{0} - 79;
+ constexpr uint32_t kRtpTimestamp1 = 1280 - 80;
+ constexpr uint32_t kRtpTimestamp2 = 2560 - 80;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ absl::nullopt);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest,
+ InterpolateEarlierPacketSentLaterWithRtpTimestampWrapAround) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 799;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 - 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 - 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 - 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 - 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ absl::nullopt);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest, SkipInterpolateIfTooLate) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ clock.AdvanceTime(AbsoluteCaptureTimeSender::kInterpolationMaxInterval);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ absl::nullopt);
+
+ clock.AdvanceTimeMicroseconds(1);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ kExtension2);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest, SkipInterpolateIfSourceChanged) {
+ constexpr uint32_t kSource0 = 1337;
+ constexpr uint32_t kSource1 = 1338;
+ constexpr uint32_t kSource2 = 1338;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource0, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource1, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ kExtension1);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource2, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest, SkipInterpolateIfRtpClockFrequencyChanged) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency0 = 64000;
+ constexpr uint32_t kRtpClockFrequency1 = 32000;
+ constexpr uint32_t kRtpClockFrequency2 = 32000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 640;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 1280;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency0,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency1,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ kExtension1);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency2,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest,
+ SkipInterpolateIfRtpClockFrequencyIsInvalid) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency0 = 0;
+ constexpr uint32_t kRtpClockFrequency1 = 0;
+ constexpr uint32_t kRtpClockFrequency2 = 0;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency0,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency1,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ kExtension1);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency2,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ kExtension2);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest,
+ SkipInterpolateIfEstimatedCaptureClockOffsetChanged) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 20), Int64MsToQ32x32(370)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000 + 40), absl::nullopt};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ kExtension1);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ kExtension2);
+}
+
+TEST(AbsoluteCaptureTimeSenderTest,
+ SkipInterpolateIfTooMuchInterpolationError) {
+ constexpr uint32_t kSource = 1337;
+ constexpr uint32_t kRtpClockFrequency = 64000;
+ constexpr uint32_t kRtpTimestamp0 = 1020300000;
+ constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280;
+ constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560;
+ static const absl::optional<AbsoluteCaptureTime> kExtension0 =
+ AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension1 =
+ AbsoluteCaptureTime{
+ Int64MsToUQ32x32(
+ 9000 + 20 +
+ AbsoluteCaptureTimeSender::kInterpolationMaxError.ms()),
+ Int64MsToQ32x32(-350)};
+ static const absl::optional<AbsoluteCaptureTime> kExtension2 =
+ AbsoluteCaptureTime{
+ Int64MsToUQ32x32(
+ 9000 + 40 +
+ AbsoluteCaptureTimeSender::kInterpolationMaxError.ms() + 1),
+ Int64MsToQ32x32(-350)};
+
+ SimulatedClock clock(0);
+ AbsoluteCaptureTimeSender sender(&clock);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp0, kRtpClockFrequency,
+ kExtension0->absolute_capture_timestamp,
+ kExtension0->estimated_capture_clock_offset),
+ kExtension0);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp1, kRtpClockFrequency,
+ kExtension1->absolute_capture_timestamp,
+ kExtension1->estimated_capture_clock_offset),
+ absl::nullopt);
+
+ EXPECT_EQ(sender.OnSendPacket(kSource, kRtpTimestamp2, kRtpClockFrequency,
+ kExtension2->absolute_capture_timestamp,
+ kExtension2->estimated_capture_clock_offset),
+ kExtension2);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.cc
new file mode 100644
index 0000000000..71e7e8cf78
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/active_decode_targets_helper.h"
+
+#include <stdint.h>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+// Returns mask of ids of chains previous frame is part of.
+// Assumes for each chain frames are seen in order and no frame on any chain is
+// missing. That assumptions allows a simple detection when previous frame is
+// part of a chain.
+std::bitset<32> LastSendOnChain(int frame_diff,
+ rtc::ArrayView<const int> chain_diffs) {
+ std::bitset<32> bitmask = 0;
+ for (size_t i = 0; i < chain_diffs.size(); ++i) {
+ if (frame_diff == chain_diffs[i]) {
+ bitmask.set(i);
+ }
+ }
+ return bitmask;
+}
+
+// Returns bitmask with first `num` bits set to 1.
+std::bitset<32> AllActive(size_t num) {
+ RTC_DCHECK_LE(num, 32);
+ return (~uint32_t{0}) >> (32 - num);
+}
+
+// Returns bitmask of chains that protect at least one active decode target.
+std::bitset<32> ActiveChains(
+ rtc::ArrayView<const int> decode_target_protected_by_chain,
+ int num_chains,
+ std::bitset<32> active_decode_targets) {
+ std::bitset<32> active_chains = 0;
+ for (size_t dt = 0; dt < decode_target_protected_by_chain.size(); ++dt) {
+ if (dt < active_decode_targets.size() && !active_decode_targets[dt]) {
+ continue;
+ }
+ int chain_idx = decode_target_protected_by_chain[dt];
+ RTC_DCHECK_LT(chain_idx, num_chains);
+ active_chains.set(chain_idx);
+ }
+ return active_chains;
+}
+
+} // namespace
+
+void ActiveDecodeTargetsHelper::OnFrame(
+ rtc::ArrayView<const int> decode_target_protected_by_chain,
+ std::bitset<32> active_decode_targets,
+ bool is_keyframe,
+ int64_t frame_id,
+ rtc::ArrayView<const int> chain_diffs) {
+ const int num_chains = chain_diffs.size();
+ if (num_chains == 0) {
+ // Avoid printing the warning
+ // when already printed the warning for the same active decode targets, or
+ // when active_decode_targets are not changed from it's default value of
+ // all are active, including non-existent decode targets.
+ if (last_active_decode_targets_ != active_decode_targets &&
+ !active_decode_targets.all()) {
+ RTC_LOG(LS_WARNING) << "No chains are configured, but some decode "
+ "targets might be inactive. Unsupported.";
+ }
+ last_active_decode_targets_ = active_decode_targets;
+ return;
+ }
+ const size_t num_decode_targets = decode_target_protected_by_chain.size();
+ RTC_DCHECK_GT(num_decode_targets, 0);
+ std::bitset<32> all_decode_targets = AllActive(num_decode_targets);
+ // Default value for active_decode_targets is 'all are active', i.e. all bits
+ // are set. Default value is set before number of decode targets is known.
+ // It is up to this helper to make the value cleaner and unset unused bits.
+ active_decode_targets &= all_decode_targets;
+
+ if (is_keyframe) {
+ // Key frame resets the state.
+ last_active_decode_targets_ = all_decode_targets;
+ last_active_chains_ = AllActive(num_chains);
+ unsent_on_chain_.reset();
+ } else {
+ // Update state assuming previous frame was sent.
+ unsent_on_chain_ &=
+ ~LastSendOnChain(frame_id - last_frame_id_, chain_diffs);
+ }
+ // Save for the next call to OnFrame.
+ // Though usually `frame_id == last_frame_id_ + 1`, it might not be so when
+ // frame id space is shared by several simulcast rtp streams.
+ last_frame_id_ = frame_id;
+
+ if (active_decode_targets == last_active_decode_targets_) {
+ return;
+ }
+ last_active_decode_targets_ = active_decode_targets;
+
+ if (active_decode_targets.none()) {
+ RTC_LOG(LS_ERROR) << "It is invalid to produce a frame (" << frame_id
+ << ") while there are no active decode targets";
+ return;
+ }
+ last_active_chains_ = ActiveChains(decode_target_protected_by_chain,
+ num_chains, active_decode_targets);
+ // Frames that are part of inactive chains might not be produced by the
+ // encoder. Thus stop sending `active_decode_target` bitmask when it is sent
+ // on all active chains rather than on all chains.
+ unsent_on_chain_ = last_active_chains_;
+ RTC_DCHECK(!unsent_on_chain_.none());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.h b/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.h
new file mode 100644
index 0000000000..13755e8d80
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_ACTIVE_DECODE_TARGETS_HELPER_H_
+#define MODULES_RTP_RTCP_SOURCE_ACTIVE_DECODE_TARGETS_HELPER_H_
+
+#include <stdint.h>
+
+#include <bitset>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Helper class that decides when active_decode_target_bitmask should be written
+// into the dependency descriptor rtp header extension.
+// See: https://aomediacodec.github.io/av1-rtp-spec/#a44-switching
+// This class is thread-compatible
+class ActiveDecodeTargetsHelper {
+ public:
+ ActiveDecodeTargetsHelper() = default;
+ ActiveDecodeTargetsHelper(const ActiveDecodeTargetsHelper&) = delete;
+ ActiveDecodeTargetsHelper& operator=(const ActiveDecodeTargetsHelper&) =
+ delete;
+ ~ActiveDecodeTargetsHelper() = default;
+
+ // Decides if active decode target bitmask should be attached to the frame
+ // that is about to be sent.
+ void OnFrame(rtc::ArrayView<const int> decode_target_protected_by_chain,
+ std::bitset<32> active_decode_targets,
+ bool is_keyframe,
+ int64_t frame_id,
+ rtc::ArrayView<const int> chain_diffs);
+
+ // Returns active decode target to attach to the dependency descriptor.
+ absl::optional<uint32_t> ActiveDecodeTargetsBitmask() const {
+ if (unsent_on_chain_.none())
+ return absl::nullopt;
+ return last_active_decode_targets_.to_ulong();
+ }
+
+ std::bitset<32> ActiveChainsBitmask() const { return last_active_chains_; }
+
+ private:
+ // `unsent_on_chain_[i]` indicates last active decode
+ // target bitmask wasn't attached to a packet on the chain with id `i`.
+ std::bitset<32> unsent_on_chain_ = 0;
+ std::bitset<32> last_active_decode_targets_ = 0;
+ std::bitset<32> last_active_chains_ = 0;
+ int64_t last_frame_id_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_ACTIVE_DECODE_TARGETS_HELPER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc
new file mode 100644
index 0000000000..6f64fd1418
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/active_decode_targets_helper.h"
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr std::bitset<32> kAll = ~uint32_t{0};
+} // namespace
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsNulloptOnKeyFrameWhenAllDecodeTargetsAreActive) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b11,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs);
+
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+}
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsNulloptOnKeyFrameWhenAllDecodeTargetsAreActiveAfterDeltaFrame) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b11,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key);
+ int chain_diffs_delta[] = {1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta);
+
+ ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u);
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b11,
+ /*is_keyframe=*/true, /*frame_id=*/3, chain_diffs_key);
+
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+}
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsBitmaskOnKeyFrameWhenSomeDecodeTargetsAreInactive) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs);
+
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u);
+}
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsBitmaskOnKeyFrameWhenSomeDecodeTargetsAreInactiveAfterDeltaFrame) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key);
+ int chain_diffs_delta[] = {1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta);
+
+ ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/true, /*frame_id=*/3, chain_diffs_key);
+
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u);
+}
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsNulloptWhenActiveDecodeTargetsAreUnused) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/kAll,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/kAll,
+ /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+}
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsNulloptOnDeltaFrameAfterSentOnKeyFrame) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key);
+ int chain_diffs_delta[] = {1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta);
+
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+}
+
+TEST(ActiveDecodeTargetsHelperTest, ReturnsNewBitmaskOnDeltaFrame) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b11,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key);
+ ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+ int chain_diffs_delta[] = {1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta);
+
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u);
+}
+
+TEST(ActiveDecodeTargetsHelperTest,
+ ReturnsBitmaskWhenAllDecodeTargetsReactivatedOnDeltaFrame) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 0};
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key);
+ ASSERT_NE(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+ int chain_diffs_delta[] = {1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b01,
+ /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta);
+ ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+
+ // Reactive all the decode targets
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/kAll,
+ /*is_keyframe=*/false, /*frame_id=*/3, chain_diffs_delta);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b11u);
+}
+
+TEST(ActiveDecodeTargetsHelperTest, ReturnsNulloptAfterSentOnAllActiveChains) {
+ // Active decode targets (0 and 1) are protected by chains 1 and 2.
+ const std::bitset<32> kSome = 0b011;
+ constexpr int kDecodeTargetProtectedByChain[] = {2, 1, 0};
+
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0, 0, 0};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b111,
+ /*is_keyframe=*/true,
+ /*frame_id=*/0, chain_diffs_key);
+ ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+
+ int chain_diffs_delta1[] = {1, 1, 1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/kSome,
+ /*is_keyframe=*/false,
+ /*frame_id=*/1, chain_diffs_delta1);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b011u);
+
+ int chain_diffs_delta2[] = {2, 2, 1}; // Previous frame was part of chain#2
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/kSome,
+ /*is_keyframe=*/false,
+ /*frame_id=*/2, chain_diffs_delta2);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b011u);
+
+ // active_decode_targets_bitmask was send on chains 1 and 2. It was never sent
+ // on chain 0, but chain 0 only protects inactive decode target#2
+ int chain_diffs_delta3[] = {3, 1, 2}; // Previous frame was part of chain#1
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/kSome,
+ /*is_keyframe=*/false,
+ /*frame_id=*/3, chain_diffs_delta3);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+}
+
+TEST(ActiveDecodeTargetsHelperTest, ReturnsBitmaskWhenChanged) {
+ constexpr int kDecodeTargetProtectedByChain[] = {0, 1, 1};
+
+ ActiveDecodeTargetsHelper helper;
+ int chain_diffs_key[] = {0, 0};
+ helper.OnFrame(kDecodeTargetProtectedByChain, /*active_decode_targets=*/0b111,
+ /*is_keyframe=*/true,
+ /*frame_id=*/0, chain_diffs_key);
+ int chain_diffs_delta1[] = {1, 1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b011,
+ /*is_keyframe=*/false,
+ /*frame_id=*/1, chain_diffs_delta1);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b011u);
+
+ int chain_diffs_delta2[] = {1, 2};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b101,
+ /*is_keyframe=*/false,
+ /*frame_id=*/2, chain_diffs_delta2);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b101u);
+
+ // active_decode_target_bitmask was send on chain0, but it was an old one.
+ int chain_diffs_delta3[] = {2, 1};
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b101,
+ /*is_keyframe=*/false,
+ /*frame_id=*/3, chain_diffs_delta3);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b101u);
+}
+
+TEST(ActiveDecodeTargetsHelperTest, ReturnsNulloptWhenChainsAreNotUsed) {
+ const rtc::ArrayView<const int> kDecodeTargetProtectedByChain;
+ const rtc::ArrayView<const int> kNoChainDiffs;
+
+ ActiveDecodeTargetsHelper helper;
+ helper.OnFrame(kDecodeTargetProtectedByChain, /*active_decode_targets=*/kAll,
+ /*is_keyframe=*/true,
+ /*frame_id=*/0, kNoChainDiffs);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+
+ helper.OnFrame(kDecodeTargetProtectedByChain,
+ /*active_decode_targets=*/0b101,
+ /*is_keyframe=*/false,
+ /*frame_id=*/1, kNoChainDiffs);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+}
+
+TEST(ActiveDecodeTargetsHelperTest, Supports32DecodeTargets) {
+ std::bitset<32> some;
+ std::vector<int> decode_target_protected_by_chain(32);
+ for (int i = 0; i < 32; ++i) {
+ decode_target_protected_by_chain[i] = i;
+ some[i] = i % 2 == 0;
+ }
+
+ ActiveDecodeTargetsHelper helper;
+ std::vector<int> chain_diffs_key(32, 0);
+ helper.OnFrame(decode_target_protected_by_chain,
+ /*active_decode_targets=*/some,
+ /*is_keyframe=*/true,
+ /*frame_id=*/1, chain_diffs_key);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), some.to_ulong());
+ std::vector<int> chain_diffs_delta(32, 1);
+ helper.OnFrame(decode_target_protected_by_chain,
+ /*active_decode_targets=*/some,
+ /*is_keyframe=*/false,
+ /*frame_id=*/2, chain_diffs_delta);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt);
+ helper.OnFrame(decode_target_protected_by_chain,
+ /*active_decode_targets=*/kAll,
+ /*is_keyframe=*/false,
+ /*frame_id=*/2, chain_diffs_delta);
+ EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), kAll.to_ulong());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/byte_io.h b/third_party/libwebrtc/modules/rtp_rtcp/source/byte_io.h
new file mode 100644
index 0000000000..a98eb3073b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/byte_io.h
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_BYTE_IO_H_
+#define MODULES_RTP_RTCP_SOURCE_BYTE_IO_H_
+
+// This file contains classes for reading and writing integer types from/to
+// byte array representations. Signed/unsigned, partial (whole byte) sizes,
+// and big/little endian byte order is all supported.
+//
+// Usage examples:
+//
+// uint8_t* buffer = ...;
+//
+// // Read an unsigned 4 byte integer in big endian format
+// uint32_t val = ByteReader<uint32_t>::ReadBigEndian(buffer);
+//
+// // Read a signed 24-bit (3 byte) integer in little endian format
+// int32_t val = ByteReader<int32_t, 3>::ReadLittle(buffer);
+//
+// // Write an unsigned 8 byte integer in little endian format
+// ByteWriter<uint64_t>::WriteLittleEndian(buffer, val);
+//
+// Write an unsigned 40-bit (5 byte) integer in big endian format
+// ByteWriter<uint64_t, 5>::WriteBigEndian(buffer, val);
+//
+// These classes are implemented as recursive templetizations, inteded to make
+// it easy for the compiler to completely inline the reading/writing.
+
+#include <stdint.h>
+
+#include <limits>
+
+namespace webrtc {
+
+// According to ISO C standard ISO/IEC 9899, section 6.2.6.2 (2), the three
+// representations of signed integers allowed are two's complement, one's
+// complement and sign/magnitude. We can detect which is used by looking at
+// the two last bits of -1, which will be 11 in two's complement, 10 in one's
+// complement and 01 in sign/magnitude.
+// TODO(sprang): In the unlikely event that we actually need to support a
+// platform that doesn't use two's complement, implement conversion to/from
+// wire format.
+
+// Assume the if any one signed integer type is two's complement, then all
+// other will be too.
+static_assert(
+ (-1 & 0x03) == 0x03,
+ "Only two's complement representation of signed integers supported.");
+
+// Plain const char* won't work for static_assert, use #define instead.
+#define kSizeErrorMsg "Byte size must be less than or equal to data type size."
+
+// Utility class for getting the unsigned equivalent of a signed type.
+template <typename T>
+struct UnsignedOf;
+
+// Class for reading integers from a sequence of bytes.
+// T = type of integer, B = bytes to read, is_signed = true if signed integer.
+// If is_signed is true and B < sizeof(T), sign extension might be needed.
+template <typename T,
+ unsigned int B = sizeof(T),
+ bool is_signed = std::numeric_limits<T>::is_signed>
+class ByteReader;
+
+// Specialization of ByteReader for unsigned types.
+template <typename T, unsigned int B>
+class ByteReader<T, B, false> {
+ public:
+ static T ReadBigEndian(const uint8_t* data) {
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
+ return InternalReadBigEndian(data);
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
+ return InternalReadLittleEndian(data);
+ }
+
+ private:
+ static T InternalReadBigEndian(const uint8_t* data) {
+ T val(0);
+ for (unsigned int i = 0; i < B; ++i)
+ val |= static_cast<T>(data[i]) << ((B - 1 - i) * 8);
+ return val;
+ }
+
+ static T InternalReadLittleEndian(const uint8_t* data) {
+ T val(0);
+ for (unsigned int i = 0; i < B; ++i)
+ val |= static_cast<T>(data[i]) << (i * 8);
+ return val;
+ }
+};
+
+// Specialization of ByteReader for signed types.
+template <typename T, unsigned int B>
+class ByteReader<T, B, true> {
+ public:
+ typedef typename UnsignedOf<T>::Type U;
+
+ static T ReadBigEndian(const uint8_t* data) {
+ U unsigned_val = ByteReader<T, B, false>::ReadBigEndian(data);
+ if (B < sizeof(T))
+ unsigned_val = SignExtend(unsigned_val);
+ return ReinterpretAsSigned(unsigned_val);
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ U unsigned_val = ByteReader<T, B, false>::ReadLittleEndian(data);
+ if (B < sizeof(T))
+ unsigned_val = SignExtend(unsigned_val);
+ return ReinterpretAsSigned(unsigned_val);
+ }
+
+ private:
+ // As a hack to avoid implementation-specific or undefined behavior when
+ // bit-shifting or casting signed integers, read as a signed equivalent
+ // instead and convert to signed. This is safe since we have asserted that
+ // two's complement for is used.
+ static T ReinterpretAsSigned(U unsigned_val) {
+ // An unsigned value with only the highest order bit set (ex 0x80).
+ const U kUnsignedHighestBitMask = static_cast<U>(1)
+ << ((sizeof(U) * 8) - 1);
+ // A signed value with only the highest bit set. Since this is two's
+ // complement form, we can use the min value from std::numeric_limits.
+ const T kSignedHighestBitMask = std::numeric_limits<T>::min();
+
+ T val;
+ if ((unsigned_val & kUnsignedHighestBitMask) != 0) {
+ // Casting is only safe when unsigned value can be represented in the
+ // signed target type, so mask out highest bit and mask it back manually.
+ val = static_cast<T>(unsigned_val & ~kUnsignedHighestBitMask);
+ val |= kSignedHighestBitMask;
+ } else {
+ val = static_cast<T>(unsigned_val);
+ }
+ return val;
+ }
+
+ // If number of bytes is less than native data type (eg 24 bit, in int32_t),
+ // and the most significant bit of the actual data is set, we must sign
+ // extend the remaining byte(s) with ones so that the correct negative
+ // number is retained.
+ // Ex: 0x810A0B -> 0xFF810A0B, but 0x710A0B -> 0x00710A0B
+ static U SignExtend(const U val) {
+ const uint8_t kMsb = static_cast<uint8_t>(val >> ((B - 1) * 8));
+ if ((kMsb & 0x80) != 0) {
+ // Create a mask where all bits used by the B bytes are set to one,
+ // for instance 0x00FFFFFF for B = 3. Bit-wise invert that mask (to
+ // (0xFF000000 in the example above) and add it to the input value.
+ // The "B % sizeof(T)" is a workaround to undefined values warnings for
+ // B == sizeof(T), in which case this code won't be called anyway.
+ const U kUsedBitsMask = (1 << ((B % sizeof(T)) * 8)) - 1;
+ return ~kUsedBitsMask | val;
+ }
+ return val;
+ }
+};
+
+// Class for writing integers to a sequence of bytes
+// T = type of integer, B = bytes to write
+template <typename T,
+ unsigned int B = sizeof(T),
+ bool is_signed = std::numeric_limits<T>::is_signed>
+class ByteWriter;
+
+// Specialization of ByteWriter for unsigned types.
+template <typename T, unsigned int B>
+class ByteWriter<T, B, false> {
+ public:
+ static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
+ for (unsigned int i = 0; i < B; ++i) {
+ data[i] = val >> ((B - 1 - i) * 8);
+ }
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
+ for (unsigned int i = 0; i < B; ++i) {
+ data[i] = val >> (i * 8);
+ }
+ }
+};
+
+// Specialization of ByteWriter for signed types.
+template <typename T, unsigned int B>
+class ByteWriter<T, B, true> {
+ public:
+ typedef typename UnsignedOf<T>::Type U;
+
+ static void WriteBigEndian(uint8_t* data, T val) {
+ ByteWriter<U, B, false>::WriteBigEndian(data, ReinterpretAsUnsigned(val));
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ ByteWriter<U, B, false>::WriteLittleEndian(data,
+ ReinterpretAsUnsigned(val));
+ }
+
+ private:
+ static U ReinterpretAsUnsigned(T val) {
+ // According to ISO C standard ISO/IEC 9899, section 6.3.1.3 (1, 2) a
+ // conversion from signed to unsigned keeps the value if the new type can
+ // represent it, and otherwise adds one more than the max value of T until
+ // the value is in range. For two's complement, this fortunately means
+ // that the bit-wise value will be intact. Thus, since we have asserted that
+ // two's complement form is actually used, a simple cast is sufficient.
+ return static_cast<U>(val);
+ }
+};
+
+// ----- Below follows specializations of UnsignedOf utility class -----
+
+template <>
+struct UnsignedOf<int8_t> {
+ typedef uint8_t Type;
+};
+template <>
+struct UnsignedOf<int16_t> {
+ typedef uint16_t Type;
+};
+template <>
+struct UnsignedOf<int32_t> {
+ typedef uint32_t Type;
+};
+template <>
+struct UnsignedOf<int64_t> {
+ typedef uint64_t Type;
+};
+
+// ----- Below follows specializations for unsigned, B in { 1, 2, 4, 8 } -----
+
+// TODO(sprang): Check if these actually help or if generic cases will be
+// unrolled to and optimized to similar performance.
+
+// Specializations for single bytes
+template <typename T>
+class ByteReader<T, 1, false> {
+ public:
+ static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ return data[0];
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ return data[0];
+ }
+};
+
+template <typename T>
+class ByteWriter<T, 1, false> {
+ public:
+ static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ data[0] = val;
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ data[0] = val;
+ }
+};
+
+// Specializations for two byte words
+template <typename T>
+class ByteReader<T, 2, false> {
+ public:
+ static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
+ return (data[0] << 8) | data[1];
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
+ return data[0] | (data[1] << 8);
+ }
+};
+
+template <typename T>
+class ByteWriter<T, 2, false> {
+ public:
+ static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
+ data[0] = val >> 8;
+ data[1] = val;
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
+ data[0] = val;
+ data[1] = val >> 8;
+ }
+};
+
+// Specializations for four byte words.
+template <typename T>
+class ByteReader<T, 4, false> {
+ public:
+ static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
+ return (Get(data, 0) << 24) | (Get(data, 1) << 16) | (Get(data, 2) << 8) |
+ Get(data, 3);
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
+ return Get(data, 0) | (Get(data, 1) << 8) | (Get(data, 2) << 16) |
+ (Get(data, 3) << 24);
+ }
+
+ private:
+ inline static T Get(const uint8_t* data, unsigned int index) {
+ return static_cast<T>(data[index]);
+ }
+};
+
+// Specializations for four byte words.
+template <typename T>
+class ByteWriter<T, 4, false> {
+ public:
+ static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
+ data[0] = val >> 24;
+ data[1] = val >> 16;
+ data[2] = val >> 8;
+ data[3] = val;
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
+ data[0] = val;
+ data[1] = val >> 8;
+ data[2] = val >> 16;
+ data[3] = val >> 24;
+ }
+};
+
+// Specializations for eight byte words.
+template <typename T>
+class ByteReader<T, 8, false> {
+ public:
+ static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
+ return (Get(data, 0) << 56) | (Get(data, 1) << 48) | (Get(data, 2) << 40) |
+ (Get(data, 3) << 32) | (Get(data, 4) << 24) | (Get(data, 5) << 16) |
+ (Get(data, 6) << 8) | Get(data, 7);
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
+ return Get(data, 0) | (Get(data, 1) << 8) | (Get(data, 2) << 16) |
+ (Get(data, 3) << 24) | (Get(data, 4) << 32) | (Get(data, 5) << 40) |
+ (Get(data, 6) << 48) | (Get(data, 7) << 56);
+ }
+
+ private:
+ inline static T Get(const uint8_t* data, unsigned int index) {
+ return static_cast<T>(data[index]);
+ }
+};
+
+template <typename T>
+class ByteWriter<T, 8, false> {
+ public:
+ static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
+ data[0] = val >> 56;
+ data[1] = val >> 48;
+ data[2] = val >> 40;
+ data[3] = val >> 32;
+ data[4] = val >> 24;
+ data[5] = val >> 16;
+ data[6] = val >> 8;
+ data[7] = val;
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
+ data[0] = val;
+ data[1] = val >> 8;
+ data[2] = val >> 16;
+ data[3] = val >> 24;
+ data[4] = val >> 32;
+ data[5] = val >> 40;
+ data[6] = val >> 48;
+ data[7] = val >> 56;
+ }
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_BYTE_IO_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/byte_io_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/byte_io_unittest.cc
new file mode 100644
index 0000000000..e4dea813b8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/byte_io_unittest.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+
+#include <limits>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+class ByteIoTest : public ::testing::Test {
+ protected:
+ ByteIoTest() = default;
+ ~ByteIoTest() override = default;
+
+ enum { kAlignments = sizeof(uint64_t) - 1 };
+
+ // Method to create a test value that is not the same when byte reversed.
+ template <typename T>
+ T CreateTestValue(bool negative, uint8_t num_bytes) {
+ // Examples of output:
+ // T = int32_t, negative = false, num_bytes = 4: 0x00010203
+ // T = int32_t, negative = true, num_bytes = 4: 0xFFFEFDFC
+ // T = int32_t, negative = false, num_bytes = 3: 0x000102
+ // * T = int32_t, negative = true, num_bytes = 3: 0xFFFEFD
+
+ T val = 0;
+ for (uint8_t i = 0; i != num_bytes; ++i) {
+ val = (val << 8) + (negative ? (0xFF - i) : (i + 1));
+ }
+
+ // This loop will create a sign extend mask if num_bytes if necessary.
+ // For the last example (marked * above), the number needs to be sign
+ // extended to be a valid int32_t. The sign extend mask is 0xFF000000.
+ // Comments for each step with this example below.
+ if (std::numeric_limits<T>::is_signed && negative &&
+ num_bytes < sizeof(T)) {
+ // Start with mask = 0xFFFFFFFF.
+ T mask = static_cast<T>(-1);
+ // Create a temporary for the lowest byte (0x000000FF).
+ const T neg_byte = static_cast<T>(0xFF);
+ for (int i = 0; i < num_bytes; ++i) {
+ // And the inverse of the temporary and the mask:
+ // 0xFFFFFFFF & 0xFFFFFF00 = 0xFFFFFF00.
+ // 0xFFFFFF00 & 0xFFFF00FF = 0xFFFF0000.
+ // 0xFFFF0000 & 0xFF00FFFF = 0xFF000000.
+ mask &= ~(neg_byte << (i * 8));
+ }
+ // Add the sign extension mask to the actual value.
+ val |= mask;
+ }
+ return val;
+ }
+
+ // Populate byte buffer with value, in big endian format.
+ template <typename T>
+ void PopulateTestData(uint8_t* data, T value, int num_bytes, bool bigendian) {
+ if (bigendian) {
+ for (int i = 0; i < num_bytes; ++i) {
+ data[i] = (value >> ((num_bytes - i - 1) * 8)) & 0xFF;
+ }
+ } else {
+ for (int i = 0; i < num_bytes; ++i) {
+ data[i] = (value >> (i * 8)) & 0xFF;
+ }
+ }
+ }
+
+ // Test reading big endian numbers.
+ // Template arguments: Type T, read method RM(buffer), B bytes of data.
+ template <typename T, T (*RM)(const uint8_t*), int B>
+ void TestRead(bool big_endian) {
+ // Test both for values that are positive and negative (if signed)
+ for (int neg = 0; neg < 2; ++neg) {
+ bool negative = neg > 0;
+
+ // Write test value to byte buffer, in big endian format.
+ T test_value = CreateTestValue<T>(negative, B);
+ uint8_t bytes[B + kAlignments];
+
+ // Make one test for each alignment.
+ for (int i = 0; i < kAlignments; ++i) {
+ PopulateTestData(bytes + i, test_value, B, big_endian);
+
+ // Check that test value is retrieved from buffer when used read method.
+ EXPECT_EQ(test_value, RM(bytes + i));
+ }
+ }
+ }
+
+ // Test writing big endian numbers.
+ // Template arguments: Type T, write method WM(buffer, value), B bytes of data
+ template <typename T, void (*WM)(uint8_t*, T), int B>
+ void TestWrite(bool big_endian) {
+ // Test both for values that are positive and negative (if signed).
+ for (int neg = 0; neg < 2; ++neg) {
+ bool negative = neg > 0;
+
+ // Write test value to byte buffer, in big endian format.
+ T test_value = CreateTestValue<T>(negative, B);
+ uint8_t expected_bytes[B + kAlignments];
+ uint8_t bytes[B + kAlignments];
+
+ // Make one test for each alignment.
+ for (int i = 0; i < kAlignments; ++i) {
+ PopulateTestData(expected_bytes + i, test_value, B, big_endian);
+
+ // Zero initialize buffer and let WM populate it.
+ memset(bytes, 0, B + kAlignments);
+ WM(bytes + i, test_value);
+
+ // Check that data produced by WM is big endian as expected.
+ for (int j = 0; j < B; ++j) {
+ EXPECT_EQ(expected_bytes[i + j], bytes[i + j]);
+ }
+ }
+ }
+ }
+};
+
+TEST_F(ByteIoTest, Test16UBitBigEndian) {
+ TestRead<uint16_t, ByteReader<uint16_t>::ReadBigEndian, sizeof(uint16_t)>(
+ true);
+ TestWrite<uint16_t, ByteWriter<uint16_t>::WriteBigEndian, sizeof(uint16_t)>(
+ true);
+}
+
+TEST_F(ByteIoTest, Test24UBitBigEndian) {
+ TestRead<uint32_t, ByteReader<uint32_t, 3>::ReadBigEndian, 3>(true);
+ TestWrite<uint32_t, ByteWriter<uint32_t, 3>::WriteBigEndian, 3>(true);
+}
+
+TEST_F(ByteIoTest, Test32UBitBigEndian) {
+ TestRead<uint32_t, ByteReader<uint32_t>::ReadBigEndian, sizeof(uint32_t)>(
+ true);
+ TestWrite<uint32_t, ByteWriter<uint32_t>::WriteBigEndian, sizeof(uint32_t)>(
+ true);
+}
+
+TEST_F(ByteIoTest, Test64UBitBigEndian) {
+ TestRead<uint64_t, ByteReader<uint64_t>::ReadBigEndian, sizeof(uint64_t)>(
+ true);
+ TestWrite<uint64_t, ByteWriter<uint64_t>::WriteBigEndian, sizeof(uint64_t)>(
+ true);
+}
+
+TEST_F(ByteIoTest, Test16SBitBigEndian) {
+ TestRead<int16_t, ByteReader<int16_t>::ReadBigEndian, sizeof(int16_t)>(true);
+ TestWrite<int16_t, ByteWriter<int16_t>::WriteBigEndian, sizeof(int16_t)>(
+ true);
+}
+
+TEST_F(ByteIoTest, Test24SBitBigEndian) {
+ TestRead<int32_t, ByteReader<int32_t, 3>::ReadBigEndian, 3>(true);
+ TestWrite<int32_t, ByteWriter<int32_t, 3>::WriteBigEndian, 3>(true);
+}
+
+TEST_F(ByteIoTest, Test32SBitBigEndian) {
+ TestRead<int32_t, ByteReader<int32_t>::ReadBigEndian, sizeof(int32_t)>(true);
+ TestWrite<int32_t, ByteWriter<int32_t>::WriteBigEndian, sizeof(int32_t)>(
+ true);
+}
+
+TEST_F(ByteIoTest, Test64SBitBigEndian) {
+ TestRead<int64_t, ByteReader<int64_t>::ReadBigEndian, sizeof(int64_t)>(true);
+ TestWrite<int64_t, ByteWriter<int64_t>::WriteBigEndian, sizeof(int64_t)>(
+ true);
+}
+
+TEST_F(ByteIoTest, Test16UBitLittleEndian) {
+ TestRead<uint16_t, ByteReader<uint16_t>::ReadLittleEndian, sizeof(uint16_t)>(
+ false);
+ TestWrite<uint16_t, ByteWriter<uint16_t>::WriteLittleEndian,
+ sizeof(uint16_t)>(false);
+}
+
+TEST_F(ByteIoTest, Test24UBitLittleEndian) {
+ TestRead<uint32_t, ByteReader<uint32_t, 3>::ReadLittleEndian, 3>(false);
+ TestWrite<uint32_t, ByteWriter<uint32_t, 3>::WriteLittleEndian, 3>(false);
+}
+
+TEST_F(ByteIoTest, Test32UBitLittleEndian) {
+ TestRead<uint32_t, ByteReader<uint32_t>::ReadLittleEndian, sizeof(uint32_t)>(
+ false);
+ TestWrite<uint32_t, ByteWriter<uint32_t>::WriteLittleEndian,
+ sizeof(uint32_t)>(false);
+}
+
+TEST_F(ByteIoTest, Test64UBitLittleEndian) {
+ TestRead<uint64_t, ByteReader<uint64_t>::ReadLittleEndian, sizeof(uint64_t)>(
+ false);
+ TestWrite<uint64_t, ByteWriter<uint64_t>::WriteLittleEndian,
+ sizeof(uint64_t)>(false);
+}
+
+TEST_F(ByteIoTest, Test16SBitLittleEndian) {
+ TestRead<int16_t, ByteReader<int16_t>::ReadLittleEndian, sizeof(int16_t)>(
+ false);
+ TestWrite<int16_t, ByteWriter<int16_t>::WriteLittleEndian, sizeof(int16_t)>(
+ false);
+}
+
+TEST_F(ByteIoTest, Test24SBitLittleEndian) {
+ TestRead<int32_t, ByteReader<int32_t, 3>::ReadLittleEndian, 3>(false);
+ TestWrite<int32_t, ByteWriter<int32_t, 3>::WriteLittleEndian, 3>(false);
+}
+
+TEST_F(ByteIoTest, Test32SBitLittleEndian) {
+ TestRead<int32_t, ByteReader<int32_t>::ReadLittleEndian, sizeof(int32_t)>(
+ false);
+ TestWrite<int32_t, ByteWriter<int32_t>::WriteLittleEndian, sizeof(int32_t)>(
+ false);
+}
+
+TEST_F(ByteIoTest, Test64SBitLittleEndian) {
+ TestRead<int64_t, ByteReader<int64_t>::ReadLittleEndian, sizeof(int64_t)>(
+ false);
+ TestWrite<int64_t, ByteWriter<int64_t>::WriteLittleEndian, sizeof(int64_t)>(
+ false);
+}
+
+// Sets up a fixed byte array and converts N bytes from the array into a
+// uint64_t. Verifies the value with hard-coded reference.
+TEST(ByteIo, SanityCheckFixedByteArrayUnsignedReadBigEndian) {
+ uint8_t data[8] = {0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA, 0x99, 0x88};
+ uint64_t value = ByteReader<uint64_t, 2>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEE), value);
+ value = ByteReader<uint64_t, 3>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEEDD), value);
+ value = ByteReader<uint64_t, 4>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEEDDCC), value);
+ value = ByteReader<uint64_t, 5>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEEDDCCBB), value);
+ value = ByteReader<uint64_t, 6>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEEDDCCBBAA), value);
+ value = ByteReader<uint64_t, 7>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEEDDCCBBAA99), value);
+ value = ByteReader<uint64_t, 8>::ReadBigEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xFFEEDDCCBBAA9988), value);
+}
+
+// Same as above, but for little-endian reading.
+TEST(ByteIo, SanityCheckFixedByteArrayUnsignedReadLittleEndian) {
+ uint8_t data[8] = {0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA, 0x99, 0x88};
+ uint64_t value = ByteReader<uint64_t, 2>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xEEFF), value);
+ value = ByteReader<uint64_t, 3>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xDDEEFF), value);
+ value = ByteReader<uint64_t, 4>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xCCDDEEFF), value);
+ value = ByteReader<uint64_t, 5>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xBBCCDDEEFF), value);
+ value = ByteReader<uint64_t, 6>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0xAABBCCDDEEFF), value);
+ value = ByteReader<uint64_t, 7>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0x99AABBCCDDEEFF), value);
+ value = ByteReader<uint64_t, 8>::ReadLittleEndian(data);
+ EXPECT_EQ(static_cast<uint64_t>(0x8899AABBCCDDEEFF), value);
+}
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.cc
new file mode 100644
index 0000000000..a5b12cb422
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h"
+
+namespace webrtc {
+
+absl::optional<int64_t>
+CaptureClockOffsetUpdater::AdjustEstimatedCaptureClockOffset(
+ absl::optional<int64_t> remote_capture_clock_offset) const {
+ if (remote_capture_clock_offset == absl::nullopt ||
+ remote_to_local_clock_offset_ == absl::nullopt) {
+ return absl::nullopt;
+ }
+
+ // Do calculations as "unsigned" to make overflows deterministic.
+ return static_cast<uint64_t>(*remote_capture_clock_offset) +
+ static_cast<uint64_t>(*remote_to_local_clock_offset_);
+}
+
+void CaptureClockOffsetUpdater::SetRemoteToLocalClockOffset(
+ absl::optional<int64_t> offset_q32x32) {
+ remote_to_local_clock_offset_ = offset_q32x32;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.h b/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.h
new file mode 100644
index 0000000000..71d3eb4831
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_CAPTURE_CLOCK_OFFSET_UPDATER_H_
+#define MODULES_RTP_RTCP_SOURCE_CAPTURE_CLOCK_OFFSET_UPDATER_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+//
+// Helper class for calculating the clock offset against the capturer's clock.
+//
+// This is achieved by adjusting the estimated capture clock offset in received
+// Absolute Capture Time RTP header extension (see
+// https://webrtc.org/experiments/rtp-hdrext/abs-capture-time/), which
+// represents the clock offset between a remote sender and the capturer, by
+// adding local-to-remote clock offset.
+
+class CaptureClockOffsetUpdater {
+ public:
+ // Adjusts remote_capture_clock_offset, which originates from Absolute Capture
+ // Time RTP header extension, to get the local clock offset against the
+ // capturer's clock.
+ absl::optional<int64_t> AdjustEstimatedCaptureClockOffset(
+ absl::optional<int64_t> remote_capture_clock_offset) const;
+
+ // Sets the NTP clock offset between the sender system (which may be different
+ // from the capture system) and the local system. This information is normally
+ // provided by passing half the value of the Round-Trip Time estimation given
+ // by RTCP sender reports (see DLSR/DLRR).
+ //
+ // Note that the value must be in Q32.32-formatted fixed-point seconds.
+ void SetRemoteToLocalClockOffset(absl::optional<int64_t> offset_q32x32);
+
+ private:
+ absl::optional<int64_t> remote_to_local_clock_offset_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_CAPTURE_CLOCK_OFFSET_UPDATER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc
new file mode 100644
index 0000000000..43e1dd1379
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h"
+
+#include "system_wrappers/include/ntp_time.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(AbsoluteCaptureTimeReceiverTest,
+ SkipEstimatedCaptureClockOffsetIfRemoteToLocalClockOffsetIsUnknown) {
+ static const absl::optional<int64_t> kRemoteCaptureClockOffset =
+ Int64MsToQ32x32(-350);
+ CaptureClockOffsetUpdater updater;
+ updater.SetRemoteToLocalClockOffset(absl::nullopt);
+ EXPECT_EQ(
+ updater.AdjustEstimatedCaptureClockOffset(kRemoteCaptureClockOffset),
+ absl::nullopt);
+}
+
+TEST(AbsoluteCaptureTimeReceiverTest,
+ SkipEstimatedCaptureClockOffsetIfRemoteCaptureClockOffsetIsUnknown) {
+ static const absl::optional<int64_t> kCaptureClockOffsetNull = absl::nullopt;
+ CaptureClockOffsetUpdater updater;
+ updater.SetRemoteToLocalClockOffset(0);
+ EXPECT_EQ(updater.AdjustEstimatedCaptureClockOffset(kCaptureClockOffsetNull),
+ kCaptureClockOffsetNull);
+
+ static const absl::optional<int64_t> kRemoteCaptureClockOffset =
+ Int64MsToQ32x32(-350);
+ EXPECT_EQ(
+ updater.AdjustEstimatedCaptureClockOffset(kRemoteCaptureClockOffset),
+ kRemoteCaptureClockOffset);
+}
+
+TEST(AbsoluteCaptureTimeReceiverTest, EstimatedCaptureClockOffsetArithmetic) {
+ static const absl::optional<int64_t> kRemoteCaptureClockOffset =
+ Int64MsToQ32x32(-350);
+ static const absl::optional<int64_t> kRemoteToLocalClockOffset =
+ Int64MsToQ32x32(-7000007);
+ CaptureClockOffsetUpdater updater;
+ updater.SetRemoteToLocalClockOffset(kRemoteToLocalClockOffset);
+ EXPECT_THAT(
+ updater.AdjustEstimatedCaptureClockOffset(kRemoteCaptureClockOffset),
+ ::testing::Optional(::testing::Eq(*kRemoteCaptureClockOffset +
+ *kRemoteToLocalClockOffset)));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc
new file mode 100644
index 0000000000..f1e4eddb4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+
+#include <memory>
+
+#include "api/video/video_codec_type.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+
+namespace webrtc {
+
+std::unique_ptr<VideoRtpDepacketizer> CreateVideoRtpDepacketizer(
+ VideoCodecType codec) {
+ switch (codec) {
+ case kVideoCodecH264:
+ return std::make_unique<VideoRtpDepacketizerH264>();
+ case kVideoCodecVP8:
+ return std::make_unique<VideoRtpDepacketizerVp8>();
+ case kVideoCodecVP9:
+ return std::make_unique<VideoRtpDepacketizerVp9>();
+ case kVideoCodecAV1:
+ return std::make_unique<VideoRtpDepacketizerAv1>();
+ case kVideoCodecGeneric:
+ case kVideoCodecMultiplex:
+ return std::make_unique<VideoRtpDepacketizerGeneric>();
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.h b/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.h
new file mode 100644
index 0000000000..102cacf598
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_CREATE_VIDEO_RTP_DEPACKETIZER_H_
+#define MODULES_RTP_RTCP_SOURCE_CREATE_VIDEO_RTP_DEPACKETIZER_H_
+
+#include <memory>
+
+#include "api/video/video_codec_type.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+
+namespace webrtc {
+
+std::unique_ptr<VideoRtpDepacketizer> CreateVideoRtpDepacketizer(
+ VideoCodecType codec);
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_CREATE_VIDEO_RTP_DEPACKETIZER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc
new file mode 100644
index 0000000000..c6a9cfa85c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h"
+
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/timestamp.h"
+#include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+constexpr uint32_t kTimestampTicksPerMs = 90;
+constexpr int kSendSideDelayWindowMs = 1000;
+constexpr int kBitrateStatisticsWindowMs = 1000;
+constexpr size_t kRtpSequenceNumberMapMaxEntries = 1 << 13;
+
+bool IsDisabled(absl::string_view name, const FieldTrialsView* field_trials) {
+ FieldTrialBasedConfig default_trials;
+ auto& trials = field_trials ? *field_trials : default_trials;
+ return absl::StartsWith(trials.Lookup(name), "Disabled");
+}
+} // namespace
+
+DEPRECATED_RtpSenderEgress::NonPacedPacketSender::NonPacedPacketSender(
+ DEPRECATED_RtpSenderEgress* sender,
+ PacketSequencer* sequence_number_assigner)
+ : transport_sequence_number_(0),
+ sender_(sender),
+ sequence_number_assigner_(sequence_number_assigner) {
+ RTC_DCHECK(sequence_number_assigner_);
+}
+DEPRECATED_RtpSenderEgress::NonPacedPacketSender::~NonPacedPacketSender() =
+ default;
+
+void DEPRECATED_RtpSenderEgress::NonPacedPacketSender::EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ for (auto& packet : packets) {
+ // Assign sequence numbers, but not for flexfec which is already running on
+ // an internally maintained sequence number series.
+ if (packet->Ssrc() != sender_->FlexFecSsrc()) {
+ sequence_number_assigner_->Sequence(*packet);
+ }
+ if (!packet->SetExtension<TransportSequenceNumber>(
+ ++transport_sequence_number_)) {
+ --transport_sequence_number_;
+ }
+ packet->ReserveExtension<TransmissionOffset>();
+ packet->ReserveExtension<AbsoluteSendTime>();
+ sender_->SendPacket(packet.get(), PacedPacketInfo());
+ }
+}
+
+DEPRECATED_RtpSenderEgress::DEPRECATED_RtpSenderEgress(
+ const RtpRtcpInterface::Configuration& config,
+ RtpPacketHistory* packet_history)
+ : ssrc_(config.local_media_ssrc),
+ rtx_ssrc_(config.rtx_send_ssrc),
+ flexfec_ssrc_(config.fec_generator ? config.fec_generator->FecSsrc()
+ : absl::nullopt),
+ populate_network2_timestamp_(config.populate_network2_timestamp),
+ send_side_bwe_with_overhead_(
+ !IsDisabled("WebRTC-SendSideBwe-WithOverhead", config.field_trials)),
+ clock_(config.clock),
+ packet_history_(packet_history),
+ transport_(config.outgoing_transport),
+ event_log_(config.event_log),
+ is_audio_(config.audio),
+ need_rtp_packet_infos_(config.need_rtp_packet_infos),
+ transport_feedback_observer_(config.transport_feedback_callback),
+ send_side_delay_observer_(config.send_side_delay_observer),
+ send_packet_observer_(config.send_packet_observer),
+ rtp_stats_callback_(config.rtp_stats_callback),
+ bitrate_callback_(config.send_bitrate_observer),
+ media_has_been_sent_(false),
+ force_part_of_allocation_(false),
+ timestamp_offset_(0),
+ max_delay_it_(send_delays_.end()),
+ sum_delays_ms_(0),
+ total_packet_send_delay_ms_(0),
+ send_rates_(kNumMediaTypes,
+ {kBitrateStatisticsWindowMs, RateStatistics::kBpsScale}),
+ rtp_sequence_number_map_(need_rtp_packet_infos_
+ ? std::make_unique<RtpSequenceNumberMap>(
+ kRtpSequenceNumberMapMaxEntries)
+ : nullptr) {}
+
+void DEPRECATED_RtpSenderEgress::SendPacket(
+ RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) {
+ RTC_DCHECK(packet);
+
+ const uint32_t packet_ssrc = packet->Ssrc();
+ RTC_DCHECK(packet->packet_type().has_value());
+ RTC_DCHECK(HasCorrectSsrc(*packet));
+ Timestamp now = clock_->CurrentTime();
+ int64_t now_ms = now.ms();
+
+ if (is_audio_) {
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "AudioTotBitrate_kbps", now_ms,
+ GetSendRates().Sum().kbps(), packet_ssrc);
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(
+ 1, "AudioNackBitrate_kbps", now_ms,
+ GetSendRates()[RtpPacketMediaType::kRetransmission].kbps(),
+ packet_ssrc);
+#endif
+ } else {
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoTotBitrate_kbps", now_ms,
+ GetSendRates().Sum().kbps(), packet_ssrc);
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(
+ 1, "VideoNackBitrate_kbps", now_ms,
+ GetSendRates()[RtpPacketMediaType::kRetransmission].kbps(),
+ packet_ssrc);
+#endif
+ }
+
+ PacketOptions options;
+ {
+ MutexLock lock(&lock_);
+ options.included_in_allocation = force_part_of_allocation_;
+
+ if (need_rtp_packet_infos_ &&
+ packet->packet_type() == RtpPacketToSend::Type::kVideo) {
+ RTC_DCHECK(rtp_sequence_number_map_);
+ // Last packet of a frame, add it to sequence number info map.
+ const uint32_t timestamp = packet->Timestamp() - timestamp_offset_;
+ bool is_first_packet_of_frame = packet->is_first_packet_of_frame();
+ bool is_last_packet_of_frame = packet->Marker();
+
+ rtp_sequence_number_map_->InsertPacket(
+ packet->SequenceNumber(),
+ RtpSequenceNumberMap::Info(timestamp, is_first_packet_of_frame,
+ is_last_packet_of_frame));
+ }
+ }
+
+ // Bug webrtc:7859. While FEC is invoked from rtp_sender_video, and not after
+ // the pacer, these modifications of the header below are happening after the
+ // FEC protection packets are calculated. This will corrupt recovered packets
+ // at the same place. It's not an issue for extensions, which are present in
+ // all the packets (their content just may be incorrect on recovered packets).
+ // In case of VideoTimingExtension, since it's present not in every packet,
+ // data after rtp header may be corrupted if these packets are protected by
+ // the FEC.
+ int64_t diff_ms = now_ms - packet->capture_time().ms();
+ if (packet->HasExtension<TransmissionOffset>()) {
+ packet->SetExtension<TransmissionOffset>(kTimestampTicksPerMs * diff_ms);
+ }
+ if (packet->HasExtension<AbsoluteSendTime>()) {
+ packet->SetExtension<AbsoluteSendTime>(AbsoluteSendTime::To24Bits(now));
+ }
+
+ if (packet->HasExtension<VideoTimingExtension>()) {
+ if (populate_network2_timestamp_) {
+ packet->set_network2_time(now);
+ } else {
+ packet->set_pacer_exit_time(now);
+ }
+ }
+
+ const bool is_media = packet->packet_type() == RtpPacketMediaType::kAudio ||
+ packet->packet_type() == RtpPacketMediaType::kVideo;
+
+ // Downstream code actually uses this flag to distinguish between media and
+ // everything else.
+ options.is_retransmit = !is_media;
+ if (auto packet_id = packet->GetExtension<TransportSequenceNumber>()) {
+ options.packet_id = *packet_id;
+ options.included_in_feedback = true;
+ options.included_in_allocation = true;
+ AddPacketToTransportFeedback(*packet_id, *packet, pacing_info);
+ }
+
+ options.additional_data = packet->additional_data();
+
+ if (packet->packet_type() != RtpPacketMediaType::kPadding &&
+ packet->packet_type() != RtpPacketMediaType::kRetransmission) {
+ UpdateDelayStatistics(packet->capture_time().ms(), now_ms, packet_ssrc);
+ UpdateOnSendPacket(options.packet_id, packet->capture_time().ms(),
+ packet_ssrc);
+ }
+
+ const bool send_success = SendPacketToNetwork(*packet, options, pacing_info);
+
+ // Put packet in retransmission history or update pending status even if
+ // actual sending fails.
+ if (is_media && packet->allow_retransmission()) {
+ packet_history_->PutRtpPacket(std::make_unique<RtpPacketToSend>(*packet),
+ now);
+ } else if (packet->retransmitted_sequence_number()) {
+ packet_history_->MarkPacketAsSent(*packet->retransmitted_sequence_number());
+ }
+
+ if (send_success) {
+ MutexLock lock(&lock_);
+ UpdateRtpStats(*packet);
+ media_has_been_sent_ = true;
+ }
+}
+
+void DEPRECATED_RtpSenderEgress::ProcessBitrateAndNotifyObservers() {
+ if (!bitrate_callback_)
+ return;
+
+ MutexLock lock(&lock_);
+ RtpSendRates send_rates = GetSendRatesLocked();
+ bitrate_callback_->Notify(
+ send_rates.Sum().bps(),
+ send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_);
+}
+
+RtpSendRates DEPRECATED_RtpSenderEgress::GetSendRates() const {
+ MutexLock lock(&lock_);
+ return GetSendRatesLocked();
+}
+
+RtpSendRates DEPRECATED_RtpSenderEgress::GetSendRatesLocked() const {
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ RtpSendRates current_rates;
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ RtpPacketMediaType type = static_cast<RtpPacketMediaType>(i);
+ current_rates[type] =
+ DataRate::BitsPerSec(send_rates_[i].Rate(now_ms).value_or(0));
+ }
+ return current_rates;
+}
+
+void DEPRECATED_RtpSenderEgress::GetDataCounters(
+ StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const {
+ MutexLock lock(&lock_);
+ *rtp_stats = rtp_stats_;
+ *rtx_stats = rtx_rtp_stats_;
+}
+
+void DEPRECATED_RtpSenderEgress::ForceIncludeSendPacketsInAllocation(
+ bool part_of_allocation) {
+ MutexLock lock(&lock_);
+ force_part_of_allocation_ = part_of_allocation;
+}
+
+bool DEPRECATED_RtpSenderEgress::MediaHasBeenSent() const {
+ MutexLock lock(&lock_);
+ return media_has_been_sent_;
+}
+
+void DEPRECATED_RtpSenderEgress::SetMediaHasBeenSent(bool media_sent) {
+ MutexLock lock(&lock_);
+ media_has_been_sent_ = media_sent;
+}
+
+void DEPRECATED_RtpSenderEgress::SetTimestampOffset(uint32_t timestamp) {
+ MutexLock lock(&lock_);
+ timestamp_offset_ = timestamp;
+}
+
+std::vector<RtpSequenceNumberMap::Info>
+DEPRECATED_RtpSenderEgress::GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const {
+ RTC_DCHECK(!sequence_numbers.empty());
+ if (!need_rtp_packet_infos_) {
+ return std::vector<RtpSequenceNumberMap::Info>();
+ }
+
+ std::vector<RtpSequenceNumberMap::Info> results;
+ results.reserve(sequence_numbers.size());
+
+ MutexLock lock(&lock_);
+ for (uint16_t sequence_number : sequence_numbers) {
+ const auto& info = rtp_sequence_number_map_->Get(sequence_number);
+ if (!info) {
+ // The empty vector will be returned. We can delay the clearing
+ // of the vector until after we exit the critical section.
+ return std::vector<RtpSequenceNumberMap::Info>();
+ }
+ results.push_back(*info);
+ }
+
+ return results;
+}
+
+bool DEPRECATED_RtpSenderEgress::HasCorrectSsrc(
+ const RtpPacketToSend& packet) const {
+ switch (*packet.packet_type()) {
+ case RtpPacketMediaType::kAudio:
+ case RtpPacketMediaType::kVideo:
+ return packet.Ssrc() == ssrc_;
+ case RtpPacketMediaType::kRetransmission:
+ case RtpPacketMediaType::kPadding:
+ // Both padding and retransmission must be on either the media or the
+ // RTX stream.
+ return packet.Ssrc() == rtx_ssrc_ || packet.Ssrc() == ssrc_;
+ case RtpPacketMediaType::kForwardErrorCorrection:
+ // FlexFEC is on separate SSRC, ULPFEC uses media SSRC.
+ return packet.Ssrc() == ssrc_ || packet.Ssrc() == flexfec_ssrc_;
+ }
+ return false;
+}
+
+void DEPRECATED_RtpSenderEgress::AddPacketToTransportFeedback(
+ uint16_t packet_id,
+ const RtpPacketToSend& packet,
+ const PacedPacketInfo& pacing_info) {
+ if (transport_feedback_observer_) {
+ size_t packet_size = packet.payload_size() + packet.padding_size();
+ if (send_side_bwe_with_overhead_) {
+ packet_size = packet.size();
+ }
+
+ RtpPacketSendInfo packet_info;
+ packet_info.media_ssrc = ssrc_;
+ packet_info.transport_sequence_number = packet_id;
+ packet_info.rtp_sequence_number = packet.SequenceNumber();
+ packet_info.length = packet_size;
+ packet_info.pacing_info = pacing_info;
+ packet_info.packet_type = packet.packet_type();
+ transport_feedback_observer_->OnAddPacket(packet_info);
+ }
+}
+
+void DEPRECATED_RtpSenderEgress::UpdateDelayStatistics(int64_t capture_time_ms,
+ int64_t now_ms,
+ uint32_t ssrc) {
+ if (!send_side_delay_observer_ || capture_time_ms <= 0)
+ return;
+
+ int avg_delay_ms = 0;
+ int max_delay_ms = 0;
+ uint64_t total_packet_send_delay_ms = 0;
+ {
+ MutexLock lock(&lock_);
+ // Compute the max and average of the recent capture-to-send delays.
+ // The time complexity of the current approach depends on the distribution
+ // of the delay values. This could be done more efficiently.
+
+ // Remove elements older than kSendSideDelayWindowMs.
+ auto lower_bound =
+ send_delays_.lower_bound(now_ms - kSendSideDelayWindowMs);
+ for (auto it = send_delays_.begin(); it != lower_bound; ++it) {
+ if (max_delay_it_ == it) {
+ max_delay_it_ = send_delays_.end();
+ }
+ sum_delays_ms_ -= it->second;
+ }
+ send_delays_.erase(send_delays_.begin(), lower_bound);
+ if (max_delay_it_ == send_delays_.end()) {
+ // Removed the previous max. Need to recompute.
+ RecomputeMaxSendDelay();
+ }
+
+ // Add the new element.
+ RTC_DCHECK_GE(now_ms, 0);
+ RTC_DCHECK_LE(now_ms, std::numeric_limits<int64_t>::max() / 2);
+ RTC_DCHECK_GE(capture_time_ms, 0);
+ RTC_DCHECK_LE(capture_time_ms, std::numeric_limits<int64_t>::max() / 2);
+ int64_t diff_ms = now_ms - capture_time_ms;
+ RTC_DCHECK_GE(diff_ms, static_cast<int64_t>(0));
+ RTC_DCHECK_LE(diff_ms, std::numeric_limits<int>::max());
+ int new_send_delay = rtc::dchecked_cast<int>(now_ms - capture_time_ms);
+ SendDelayMap::iterator it;
+ bool inserted;
+ std::tie(it, inserted) =
+ send_delays_.insert(std::make_pair(now_ms, new_send_delay));
+ if (!inserted) {
+ // TODO(terelius): If we have multiple delay measurements during the same
+ // millisecond then we keep the most recent one. It is not clear that this
+ // is the right decision, but it preserves an earlier behavior.
+ int previous_send_delay = it->second;
+ sum_delays_ms_ -= previous_send_delay;
+ it->second = new_send_delay;
+ if (max_delay_it_ == it && new_send_delay < previous_send_delay) {
+ RecomputeMaxSendDelay();
+ }
+ }
+ if (max_delay_it_ == send_delays_.end() ||
+ it->second >= max_delay_it_->second) {
+ max_delay_it_ = it;
+ }
+ sum_delays_ms_ += new_send_delay;
+ total_packet_send_delay_ms_ += new_send_delay;
+ total_packet_send_delay_ms = total_packet_send_delay_ms_;
+
+ size_t num_delays = send_delays_.size();
+ RTC_DCHECK(max_delay_it_ != send_delays_.end());
+ max_delay_ms = rtc::dchecked_cast<int>(max_delay_it_->second);
+ int64_t avg_ms = (sum_delays_ms_ + num_delays / 2) / num_delays;
+ RTC_DCHECK_GE(avg_ms, static_cast<int64_t>(0));
+ RTC_DCHECK_LE(avg_ms,
+ static_cast<int64_t>(std::numeric_limits<int>::max()));
+ avg_delay_ms =
+ rtc::dchecked_cast<int>((sum_delays_ms_ + num_delays / 2) / num_delays);
+ }
+ send_side_delay_observer_->SendSideDelayUpdated(
+ avg_delay_ms, max_delay_ms, total_packet_send_delay_ms, ssrc);
+}
+
+void DEPRECATED_RtpSenderEgress::RecomputeMaxSendDelay() {
+ max_delay_it_ = send_delays_.begin();
+ for (auto it = send_delays_.begin(); it != send_delays_.end(); ++it) {
+ if (it->second >= max_delay_it_->second) {
+ max_delay_it_ = it;
+ }
+ }
+}
+
+void DEPRECATED_RtpSenderEgress::UpdateOnSendPacket(int packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) {
+ if (!send_packet_observer_ || capture_time_ms <= 0 || packet_id == -1) {
+ return;
+ }
+
+ send_packet_observer_->OnSendPacket(packet_id, capture_time_ms, ssrc);
+}
+
+bool DEPRECATED_RtpSenderEgress::SendPacketToNetwork(
+ const RtpPacketToSend& packet,
+ const PacketOptions& options,
+ const PacedPacketInfo& pacing_info) {
+ int bytes_sent = -1;
+ if (transport_) {
+ bytes_sent = transport_->SendRtp(packet.data(), packet.size(), options)
+ ? static_cast<int>(packet.size())
+ : -1;
+ if (event_log_ && bytes_sent > 0) {
+ event_log_->Log(std::make_unique<RtcEventRtpPacketOutgoing>(
+ packet, pacing_info.probe_cluster_id));
+ }
+ }
+
+ if (bytes_sent <= 0) {
+ RTC_LOG(LS_WARNING) << "Transport failed to send packet.";
+ return false;
+ }
+ return true;
+}
+
+void DEPRECATED_RtpSenderEgress::UpdateRtpStats(const RtpPacketToSend& packet) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+
+ StreamDataCounters* counters =
+ packet.Ssrc() == rtx_ssrc_ ? &rtx_rtp_stats_ : &rtp_stats_;
+
+ if (counters->first_packet_time_ms == -1) {
+ counters->first_packet_time_ms = now_ms;
+ }
+
+ if (packet.packet_type() == RtpPacketMediaType::kForwardErrorCorrection) {
+ counters->fec.AddPacket(packet);
+ }
+
+ if (packet.packet_type() == RtpPacketMediaType::kRetransmission) {
+ counters->retransmitted.AddPacket(packet);
+ }
+ counters->transmitted.AddPacket(packet);
+
+ RTC_DCHECK(packet.packet_type().has_value());
+ send_rates_[static_cast<size_t>(*packet.packet_type())].Update(packet.size(),
+ now_ms);
+
+ if (rtp_stats_callback_) {
+ rtp_stats_callback_->DataCountersUpdated(*counters, packet.Ssrc());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h b/third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h
new file mode 100644
index 0000000000..fc440d14f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_DEPRECATED_DEPRECATED_RTP_SENDER_EGRESS_H_
+#define MODULES_RTP_RTCP_SOURCE_DEPRECATED_DEPRECATED_RTP_SENDER_EGRESS_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/call/transport.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/units/data_rate.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class DEPRECATED_RtpSenderEgress {
+ public:
+ // Helper class that redirects packets directly to the send part of this class
+ // without passing through an actual paced sender.
+ class NonPacedPacketSender : public RtpPacketSender {
+ public:
+ NonPacedPacketSender(DEPRECATED_RtpSenderEgress* sender,
+ PacketSequencer* sequence_number_assigner);
+ virtual ~NonPacedPacketSender();
+
+ void EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) override;
+
+ private:
+ uint16_t transport_sequence_number_;
+ DEPRECATED_RtpSenderEgress* const sender_;
+ PacketSequencer* sequence_number_assigner_;
+ };
+
+ DEPRECATED_RtpSenderEgress(const RtpRtcpInterface::Configuration& config,
+ RtpPacketHistory* packet_history);
+ ~DEPRECATED_RtpSenderEgress() = default;
+
+ void SendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info)
+ RTC_LOCKS_EXCLUDED(lock_);
+ uint32_t Ssrc() const { return ssrc_; }
+ absl::optional<uint32_t> RtxSsrc() const { return rtx_ssrc_; }
+ absl::optional<uint32_t> FlexFecSsrc() const { return flexfec_ssrc_; }
+
+ void ProcessBitrateAndNotifyObservers() RTC_LOCKS_EXCLUDED(lock_);
+ RtpSendRates GetSendRates() const RTC_LOCKS_EXCLUDED(lock_);
+ void GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const
+ RTC_LOCKS_EXCLUDED(lock_);
+
+ void ForceIncludeSendPacketsInAllocation(bool part_of_allocation)
+ RTC_LOCKS_EXCLUDED(lock_);
+ bool MediaHasBeenSent() const RTC_LOCKS_EXCLUDED(lock_);
+ void SetMediaHasBeenSent(bool media_sent) RTC_LOCKS_EXCLUDED(lock_);
+ void SetTimestampOffset(uint32_t timestamp) RTC_LOCKS_EXCLUDED(lock_);
+
+ // For each sequence number in `sequence_number`, recall the last RTP packet
+ // which bore it - its timestamp and whether it was the first and/or last
+ // packet in that frame. If all of the given sequence numbers could be
+ // recalled, return a vector with all of them (in corresponding order).
+ // If any could not be recalled, return an empty vector.
+ std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const
+ RTC_LOCKS_EXCLUDED(lock_);
+
+ private:
+ // Maps capture time in milliseconds to send-side delay in milliseconds.
+ // Send-side delay is the difference between transmission time and capture
+ // time.
+ typedef std::map<int64_t, int> SendDelayMap;
+
+ RtpSendRates GetSendRatesLocked() const RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ bool HasCorrectSsrc(const RtpPacketToSend& packet) const;
+ void AddPacketToTransportFeedback(uint16_t packet_id,
+ const RtpPacketToSend& packet,
+ const PacedPacketInfo& pacing_info);
+ void UpdateDelayStatistics(int64_t capture_time_ms,
+ int64_t now_ms,
+ uint32_t ssrc);
+ void RecomputeMaxSendDelay() RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void UpdateOnSendPacket(int packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc);
+ // Sends packet on to `transport_`, leaving the RTP module.
+ bool SendPacketToNetwork(const RtpPacketToSend& packet,
+ const PacketOptions& options,
+ const PacedPacketInfo& pacing_info);
+ void UpdateRtpStats(const RtpPacketToSend& packet)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ const uint32_t ssrc_;
+ const absl::optional<uint32_t> rtx_ssrc_;
+ const absl::optional<uint32_t> flexfec_ssrc_;
+ const bool populate_network2_timestamp_;
+ const bool send_side_bwe_with_overhead_;
+ Clock* const clock_;
+ RtpPacketHistory* const packet_history_;
+ Transport* const transport_;
+ RtcEventLog* const event_log_;
+ const bool is_audio_;
+ const bool need_rtp_packet_infos_;
+
+ TransportFeedbackObserver* const transport_feedback_observer_;
+ SendSideDelayObserver* const send_side_delay_observer_;
+ SendPacketObserver* const send_packet_observer_;
+ StreamDataCountersCallback* const rtp_stats_callback_;
+ BitrateStatisticsObserver* const bitrate_callback_;
+
+ mutable Mutex lock_;
+ bool media_has_been_sent_ RTC_GUARDED_BY(lock_);
+ bool force_part_of_allocation_ RTC_GUARDED_BY(lock_);
+ uint32_t timestamp_offset_ RTC_GUARDED_BY(lock_);
+
+ SendDelayMap send_delays_ RTC_GUARDED_BY(lock_);
+ SendDelayMap::const_iterator max_delay_it_ RTC_GUARDED_BY(lock_);
+ // The sum of delays over a kSendSideDelayWindowMs sliding window.
+ int64_t sum_delays_ms_ RTC_GUARDED_BY(lock_);
+ uint64_t total_packet_send_delay_ms_ RTC_GUARDED_BY(lock_);
+ StreamDataCounters rtp_stats_ RTC_GUARDED_BY(lock_);
+ StreamDataCounters rtx_rtp_stats_ RTC_GUARDED_BY(lock_);
+ // One element per value in RtpPacketMediaType, with index matching value.
+ std::vector<RateStatistics> send_rates_ RTC_GUARDED_BY(lock_);
+
+ // Maps sent packets' sequence numbers to a tuple consisting of:
+ // 1. The timestamp, without the randomizing offset mandated by the RFC.
+ // 2. Whether the packet was the first in its frame.
+ // 3. Whether the packet was the last in its frame.
+ const std::unique_ptr<RtpSequenceNumberMap> rtp_sequence_number_map_
+ RTC_GUARDED_BY(lock_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_DEPRECATED_DEPRECATED_RTP_SENDER_EGRESS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.cc
new file mode 100644
index 0000000000..df06d2a2f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/dtmf_queue.h"
+
+#include <stddef.h>
+
+#include "rtc_base/checks.h"
+
+namespace {
+constexpr size_t kDtmfOutbandMax = 20;
+}
+
+namespace webrtc {
+DtmfQueue::DtmfQueue() {}
+
+DtmfQueue::~DtmfQueue() {}
+
+bool DtmfQueue::AddDtmf(const Event& event) {
+ MutexLock lock(&dtmf_mutex_);
+ if (queue_.size() >= kDtmfOutbandMax) {
+ return false;
+ }
+ queue_.push_back(event);
+ return true;
+}
+
+bool DtmfQueue::NextDtmf(Event* event) {
+ RTC_DCHECK(event);
+ MutexLock lock(&dtmf_mutex_);
+ if (queue_.empty()) {
+ return false;
+ }
+
+ *event = queue_.front();
+ queue_.pop_front();
+ return true;
+}
+
+bool DtmfQueue::PendingDtmf() const {
+ MutexLock lock(&dtmf_mutex_);
+ return !queue_.empty();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.h b/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.h
new file mode 100644
index 0000000000..1d1867fd27
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/dtmf_queue.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_DTMF_QUEUE_H_
+#define MODULES_RTP_RTCP_SOURCE_DTMF_QUEUE_H_
+
+#include <stdint.h>
+
+#include <list>
+
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+class DtmfQueue {
+ public:
+ struct Event {
+ uint16_t duration_ms = 0;
+ uint8_t payload_type = 0;
+ uint8_t key = 0;
+ uint8_t level = 0;
+ };
+
+ DtmfQueue();
+ ~DtmfQueue();
+
+ bool AddDtmf(const Event& event);
+ bool NextDtmf(Event* event);
+ bool PendingDtmf() const;
+
+ private:
+ mutable Mutex dtmf_mutex_;
+ std::list<Event> queue_;
+};
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_DTMF_QUEUE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.cc
new file mode 100644
index 0000000000..9dbc012368
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.cc
@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/fec_private_tables_bursty.h"
+
+namespace {
+// clang-format off
+#define kMaskBursty1_1 \
+ 0x80, 0x00
+
+#define kMaskBursty2_1 \
+ 0xc0, 0x00
+
+#define kMaskBursty2_2 \
+ 0x80, 0x00, \
+ 0xc0, 0x00
+
+#define kMaskBursty3_1 \
+ 0xe0, 0x00
+
+#define kMaskBursty3_2 \
+ 0xc0, 0x00, \
+ 0xa0, 0x00
+
+#define kMaskBursty3_3 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00
+
+#define kMaskBursty4_1 \
+ 0xf0, 0x00
+
+#define kMaskBursty4_2 \
+ 0xa0, 0x00, \
+ 0xd0, 0x00
+
+#define kMaskBursty4_3 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x90, 0x00
+
+#define kMaskBursty4_4 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00
+
+#define kMaskBursty5_1 \
+ 0xf8, 0x00
+
+#define kMaskBursty5_2 \
+ 0xd0, 0x00, \
+ 0xa8, 0x00
+
+#define kMaskBursty5_3 \
+ 0x70, 0x00, \
+ 0x90, 0x00, \
+ 0xc8, 0x00
+
+#define kMaskBursty5_4 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x88, 0x00
+
+#define kMaskBursty5_5 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00
+
+#define kMaskBursty6_1 \
+ 0xfc, 0x00
+
+#define kMaskBursty6_2 \
+ 0xa8, 0x00, \
+ 0xd4, 0x00
+
+#define kMaskBursty6_3 \
+ 0x94, 0x00, \
+ 0xc8, 0x00, \
+ 0x64, 0x00
+
+#define kMaskBursty6_4 \
+ 0x60, 0x00, \
+ 0x38, 0x00, \
+ 0x88, 0x00, \
+ 0xc4, 0x00
+
+#define kMaskBursty6_5 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x84, 0x00
+
+#define kMaskBursty6_6 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00
+
+#define kMaskBursty7_1 \
+ 0xfe, 0x00
+
+#define kMaskBursty7_2 \
+ 0xd4, 0x00, \
+ 0xaa, 0x00
+
+#define kMaskBursty7_3 \
+ 0xc8, 0x00, \
+ 0x74, 0x00, \
+ 0x92, 0x00
+
+#define kMaskBursty7_4 \
+ 0x38, 0x00, \
+ 0x8a, 0x00, \
+ 0xc4, 0x00, \
+ 0x62, 0x00
+
+#define kMaskBursty7_5 \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x1c, 0x00, \
+ 0x84, 0x00, \
+ 0xc2, 0x00
+
+#define kMaskBursty7_6 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x82, 0x00
+
+#define kMaskBursty7_7 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00
+
+#define kMaskBursty8_1 \
+ 0xff, 0x00
+
+#define kMaskBursty8_2 \
+ 0xaa, 0x00, \
+ 0xd5, 0x00
+
+#define kMaskBursty8_3 \
+ 0x74, 0x00, \
+ 0x92, 0x00, \
+ 0xc9, 0x00
+
+#define kMaskBursty8_4 \
+ 0x8a, 0x00, \
+ 0xc5, 0x00, \
+ 0x62, 0x00, \
+ 0x31, 0x00
+
+#define kMaskBursty8_5 \
+ 0x30, 0x00, \
+ 0x1c, 0x00, \
+ 0x85, 0x00, \
+ 0xc2, 0x00, \
+ 0x61, 0x00
+
+#define kMaskBursty8_6 \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0e, 0x00, \
+ 0x82, 0x00, \
+ 0xc1, 0x00
+
+#define kMaskBursty8_7 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x81, 0x00
+
+#define kMaskBursty8_8 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00
+
+#define kMaskBursty9_1 \
+ 0xff, 0x80
+
+#define kMaskBursty9_2 \
+ 0xd5, 0x00, \
+ 0xaa, 0x80
+
+#define kMaskBursty9_3 \
+ 0x92, 0x00, \
+ 0xc9, 0x00, \
+ 0x74, 0x80
+
+#define kMaskBursty9_4 \
+ 0xc5, 0x00, \
+ 0x62, 0x00, \
+ 0x39, 0x00, \
+ 0x8a, 0x80
+
+#define kMaskBursty9_5 \
+ 0x1c, 0x00, \
+ 0x85, 0x00, \
+ 0xc2, 0x80, \
+ 0x61, 0x00, \
+ 0x30, 0x80
+
+#define kMaskBursty9_6 \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0e, 0x00, \
+ 0x82, 0x80, \
+ 0xc1, 0x00, \
+ 0x60, 0x80
+
+#define kMaskBursty9_7 \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x07, 0x00, \
+ 0x81, 0x00, \
+ 0xc0, 0x80
+
+#define kMaskBursty9_8 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x80, 0x80
+
+#define kMaskBursty9_9 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80
+
+#define kMaskBursty10_1 \
+ 0xff, 0xc0
+
+#define kMaskBursty10_2 \
+ 0xaa, 0x80, \
+ 0xd5, 0x40
+
+#define kMaskBursty10_3 \
+ 0xc9, 0x00, \
+ 0x74, 0x80, \
+ 0x92, 0x40
+
+#define kMaskBursty10_4 \
+ 0x62, 0x00, \
+ 0x39, 0x00, \
+ 0x8a, 0x80, \
+ 0xc5, 0x40
+
+#define kMaskBursty10_5 \
+ 0x85, 0x00, \
+ 0xc2, 0x80, \
+ 0x61, 0x40, \
+ 0x30, 0x80, \
+ 0x18, 0x40
+
+#define kMaskBursty10_6 \
+ 0x18, 0x00, \
+ 0x0e, 0x00, \
+ 0x82, 0x80, \
+ 0xc1, 0x40, \
+ 0x60, 0x80, \
+ 0x30, 0x40
+
+#define kMaskBursty10_7 \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x07, 0x00, \
+ 0x81, 0x40, \
+ 0xc0, 0x80, \
+ 0x60, 0x40
+
+#define kMaskBursty10_8 \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x80, 0x80, \
+ 0xc0, 0x40
+
+#define kMaskBursty10_9 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x80, 0x40
+
+#define kMaskBursty10_10 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x00, 0xc0
+
+#define kMaskBursty11_1 \
+ 0xff, 0xe0
+
+#define kMaskBursty11_2 \
+ 0xd5, 0x40, \
+ 0xaa, 0xa0
+
+#define kMaskBursty11_3 \
+ 0x74, 0x80, \
+ 0x92, 0x40, \
+ 0xc9, 0x20
+
+#define kMaskBursty11_4 \
+ 0x39, 0x00, \
+ 0x8a, 0x80, \
+ 0xc5, 0x40, \
+ 0x62, 0x20
+
+#define kMaskBursty11_5 \
+ 0xc2, 0xc0, \
+ 0x61, 0x00, \
+ 0x30, 0xa0, \
+ 0x1c, 0x40, \
+ 0x85, 0x20
+
+#define kMaskBursty11_6 \
+ 0x0e, 0x00, \
+ 0x82, 0x80, \
+ 0xc1, 0x40, \
+ 0x60, 0xa0, \
+ 0x30, 0x40, \
+ 0x18, 0x20
+
+#define kMaskBursty11_7 \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x07, 0x00, \
+ 0x81, 0x40, \
+ 0xc0, 0xa0, \
+ 0x60, 0x40, \
+ 0x30, 0x20
+
+#define kMaskBursty11_8 \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x40, \
+ 0x80, 0xa0, \
+ 0xc0, 0x40, \
+ 0x60, 0x20
+
+#define kMaskBursty11_9 \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x80, 0x40, \
+ 0xc0, 0x20
+
+#define kMaskBursty11_10 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x00, 0xc0, \
+ 0x80, 0x20
+
+#define kMaskBursty11_11 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x00, 0xc0, \
+ 0x00, 0x60
+
+#define kMaskBursty12_1 \
+ 0xff, 0xf0
+
+#define kMaskBursty12_2 \
+ 0xaa, 0xa0, \
+ 0xd5, 0x50
+
+#define kMaskBursty12_3 \
+ 0x92, 0x40, \
+ 0xc9, 0x20, \
+ 0x74, 0x90
+
+#define kMaskBursty12_4 \
+ 0x8a, 0x80, \
+ 0xc5, 0x40, \
+ 0x62, 0x20, \
+ 0x39, 0x10
+
+#define kMaskBursty12_5 \
+ 0x61, 0x00, \
+ 0x30, 0xa0, \
+ 0x1c, 0x50, \
+ 0x85, 0x20, \
+ 0xc2, 0x90
+
+#define kMaskBursty12_6 \
+ 0x82, 0x90, \
+ 0xc1, 0x40, \
+ 0x60, 0xa0, \
+ 0x30, 0x50, \
+ 0x18, 0x20, \
+ 0x0c, 0x10
+
+#define kMaskBursty12_7 \
+ 0x0c, 0x00, \
+ 0x07, 0x00, \
+ 0x81, 0x40, \
+ 0xc0, 0xa0, \
+ 0x60, 0x50, \
+ 0x30, 0x20, \
+ 0x18, 0x10
+
+#define kMaskBursty12_8 \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x80, 0xa0, \
+ 0xc0, 0x50, \
+ 0x60, 0x20, \
+ 0x30, 0x10
+
+#define kMaskBursty12_9 \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x80, 0x50, \
+ 0xc0, 0x20, \
+ 0x60, 0x10
+
+#define kMaskBursty12_10 \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x00, 0xc0, \
+ 0x80, 0x20, \
+ 0xc0, 0x10
+
+#define kMaskBursty12_11 \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x00, 0xc0, \
+ 0x00, 0x60, \
+ 0x80, 0x10
+
+#define kMaskBursty12_12 \
+ 0x80, 0x00, \
+ 0xc0, 0x00, \
+ 0x60, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0x0c, 0x00, \
+ 0x06, 0x00, \
+ 0x03, 0x00, \
+ 0x01, 0x80, \
+ 0x00, 0xc0, \
+ 0x00, 0x60, \
+ 0x00, 0x30
+
+#define kPacketMaskBursty1 1, \
+ kMaskBursty1_1
+
+#define kPacketMaskBursty2 2, \
+ kMaskBursty2_1, \
+ kMaskBursty2_2
+
+#define kPacketMaskBursty3 3, \
+ kMaskBursty3_1, \
+ kMaskBursty3_2, \
+ kMaskBursty3_3
+
+#define kPacketMaskBursty4 4, \
+ kMaskBursty4_1, \
+ kMaskBursty4_2, \
+ kMaskBursty4_3, \
+ kMaskBursty4_4
+
+#define kPacketMaskBursty5 5, \
+ kMaskBursty5_1, \
+ kMaskBursty5_2, \
+ kMaskBursty5_3, \
+ kMaskBursty5_4, \
+ kMaskBursty5_5
+
+#define kPacketMaskBursty6 6, \
+ kMaskBursty6_1, \
+ kMaskBursty6_2, \
+ kMaskBursty6_3, \
+ kMaskBursty6_4, \
+ kMaskBursty6_5, \
+ kMaskBursty6_6
+
+#define kPacketMaskBursty7 7, \
+ kMaskBursty7_1, \
+ kMaskBursty7_2, \
+ kMaskBursty7_3, \
+ kMaskBursty7_4, \
+ kMaskBursty7_5, \
+ kMaskBursty7_6, \
+ kMaskBursty7_7
+
+#define kPacketMaskBursty8 8, \
+ kMaskBursty8_1, \
+ kMaskBursty8_2, \
+ kMaskBursty8_3, \
+ kMaskBursty8_4, \
+ kMaskBursty8_5, \
+ kMaskBursty8_6, \
+ kMaskBursty8_7, \
+ kMaskBursty8_8
+
+#define kPacketMaskBursty9 9, \
+ kMaskBursty9_1, \
+ kMaskBursty9_2, \
+ kMaskBursty9_3, \
+ kMaskBursty9_4, \
+ kMaskBursty9_5, \
+ kMaskBursty9_6, \
+ kMaskBursty9_7, \
+ kMaskBursty9_8, \
+ kMaskBursty9_9
+
+#define kPacketMaskBursty10 10, \
+ kMaskBursty10_1, \
+ kMaskBursty10_2, \
+ kMaskBursty10_3, \
+ kMaskBursty10_4, \
+ kMaskBursty10_5, \
+ kMaskBursty10_6, \
+ kMaskBursty10_7, \
+ kMaskBursty10_8, \
+ kMaskBursty10_9, \
+ kMaskBursty10_10
+
+#define kPacketMaskBursty11 11, \
+ kMaskBursty11_1, \
+ kMaskBursty11_2, \
+ kMaskBursty11_3, \
+ kMaskBursty11_4, \
+ kMaskBursty11_5, \
+ kMaskBursty11_6, \
+ kMaskBursty11_7, \
+ kMaskBursty11_8, \
+ kMaskBursty11_9, \
+ kMaskBursty11_10, \
+ kMaskBursty11_11
+
+#define kPacketMaskBursty12 12, \
+ kMaskBursty12_1, \
+ kMaskBursty12_2, \
+ kMaskBursty12_3, \
+ kMaskBursty12_4, \
+ kMaskBursty12_5, \
+ kMaskBursty12_6, \
+ kMaskBursty12_7, \
+ kMaskBursty12_8, \
+ kMaskBursty12_9, \
+ kMaskBursty12_10, \
+ kMaskBursty12_11, \
+ kMaskBursty12_12
+
+// clang-format on
+} // namespace
+
+namespace webrtc {
+namespace fec_private_tables {
+
+const uint8_t kPacketMaskBurstyTbl[] = {
+ 12,
+ kPacketMaskBursty1,
+ kPacketMaskBursty2,
+ kPacketMaskBursty3,
+ kPacketMaskBursty4,
+ kPacketMaskBursty5,
+ kPacketMaskBursty6,
+ kPacketMaskBursty7,
+ kPacketMaskBursty8,
+ kPacketMaskBursty9,
+ kPacketMaskBursty10,
+ kPacketMaskBursty11,
+ kPacketMaskBursty12,
+};
+
+} // namespace fec_private_tables
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h
new file mode 100644
index 0000000000..217d9505e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_BURSTY_H_
+#define MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_BURSTY_H_
+
+// This file contains a set of packets masks for the FEC code. The masks in
+// this table are specifically designed to favor recovery of bursty/consecutive
+// loss network conditions. The tradeoff is worse recovery for random losses.
+// These packet masks are currently defined to protect up to 12 media packets.
+// They have the following property: for any packet mask defined by the
+// parameters (k,m), where k = number of media packets, m = number of FEC
+// packets, all "consecutive" losses of size <= m are completely recoverable.
+// By consecutive losses we mean consecutive with respect to the sequence
+// number ordering of the list (media and FEC) of packets. The difference
+// between these masks (`kFecMaskBursty`) and `kFecMaskRandom` type, defined
+// in fec_private_tables.h, is more significant for longer codes
+// (i.e., more packets/symbols in the code, so larger (k,m), i.e., k > 4,
+// m > 3).
+
+#include <stdint.h>
+
+namespace webrtc {
+namespace fec_private_tables {
+
+extern const uint8_t kPacketMaskBurstyTbl[];
+
+} // namespace fec_private_tables
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_BURSTY_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty_unittest.cc
new file mode 100644
index 0000000000..c62f7d5606
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_bursty_unittest.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/fec_private_tables_bursty.h"
+
+#include "modules/rtp_rtcp/source/fec_private_tables_random.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr uint8_t kMaskRandom15_6[] = {0x82, 0x08, 0x41, 0x04, 0x20, 0x82,
+ 0x10, 0x40, 0x08, 0x20, 0x04, 0x10};
+}
+
+namespace fec_private_tables {
+
+using internal::LookUpInFecTable;
+
+TEST(FecTable, TestBurstyLookup) {
+ rtc::ArrayView<const uint8_t> result;
+ result = LookUpInFecTable(&kPacketMaskBurstyTbl[0], 0, 0);
+ // Should match kMaskBursty1_1.
+ EXPECT_EQ(2u, result.size());
+ EXPECT_EQ(0x80u, result[0]);
+
+ result = LookUpInFecTable(&kPacketMaskBurstyTbl[0], 3, 0);
+ // Should match kMaskBursty4_1.
+ EXPECT_EQ(2u, result.size());
+ EXPECT_EQ(0xf0u, result[0]);
+ EXPECT_EQ(0x00u, result[1]);
+
+ result = LookUpInFecTable(&kPacketMaskBurstyTbl[0], 1, 1);
+ // Should match kMaskBursty2_2.
+ EXPECT_EQ(4u, result.size());
+ EXPECT_EQ(0x80u, result[0]);
+ EXPECT_EQ(0xc0u, result[2]);
+
+ result = LookUpInFecTable(&kPacketMaskBurstyTbl[0], 11, 11);
+ // Should match kMaskBursty12_12.
+ EXPECT_EQ(24u, result.size());
+ EXPECT_EQ(0x80u, result[0]);
+ EXPECT_EQ(0x30u, result[23]);
+}
+
+TEST(FecTable, TestRandomLookup) {
+ rtc::ArrayView<const uint8_t> result;
+ result = LookUpInFecTable(&kPacketMaskRandomTbl[0], 0, 0);
+ EXPECT_EQ(2u, result.size());
+ EXPECT_EQ(0x80u, result[0]);
+ EXPECT_EQ(0x00u, result[1]);
+
+ result = LookUpInFecTable(&kPacketMaskRandomTbl[0], 4, 1);
+ // kMaskRandom5_2.
+ EXPECT_EQ(4u, result.size());
+ EXPECT_EQ(0xa8u, result[0]);
+ EXPECT_EQ(0xd0u, result[2]);
+}
+
+TEST(FecTable, TestRandomGenerated) {
+ FecMaskType fec_mask_type = kFecMaskRandom;
+ int num_media_packets = 15;
+ int num_fec_packets = 6;
+ size_t mask_size = sizeof(kMaskRandom15_6) / sizeof(uint8_t);
+ internal::PacketMaskTable mask_table(fec_mask_type, num_media_packets);
+ rtc::ArrayView<const uint8_t> mask =
+ mask_table.LookUp(num_media_packets, num_fec_packets);
+ EXPECT_EQ(mask.size(), mask_size);
+ for (size_t i = 0; i < mask_size; ++i) {
+ EXPECT_EQ(mask[i], kMaskRandom15_6[i]);
+ }
+}
+
+} // namespace fec_private_tables
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.cc
new file mode 100644
index 0000000000..3cac5db17b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.cc
@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/fec_private_tables_random.h"
+
+namespace {
+// clang-format off
+#define kMaskRandom1_1 \
+ 0x80, 0x00
+
+#define kMaskRandom2_1 \
+ 0xc0, 0x00
+
+#define kMaskRandom2_2 \
+ 0xc0, 0x00, \
+ 0x80, 0x00
+
+#define kMaskRandom3_1 \
+ 0xe0, 0x00
+
+#define kMaskRandom3_2 \
+ 0xc0, 0x00, \
+ 0xa0, 0x00
+
+#define kMaskRandom3_3 \
+ 0xc0, 0x00, \
+ 0xa0, 0x00, \
+ 0x60, 0x00
+
+#define kMaskRandom4_1 \
+ 0xf0, 0x00
+
+#define kMaskRandom4_2 \
+ 0xc0, 0x00, \
+ 0xb0, 0x00
+
+#define kMaskRandom4_3 \
+ 0xc0, 0x00, \
+ 0xb0, 0x00, \
+ 0x60, 0x00
+
+#define kMaskRandom4_4 \
+ 0xc0, 0x00, \
+ 0xa0, 0x00, \
+ 0x30, 0x00, \
+ 0x50, 0x00
+
+#define kMaskRandom5_1 \
+ 0xf8, 0x00
+
+#define kMaskRandom5_2 \
+ 0xa8, 0x00, \
+ 0xd0, 0x00
+
+#define kMaskRandom5_3 \
+ 0xb0, 0x00, \
+ 0xc8, 0x00, \
+ 0x50, 0x00
+
+#define kMaskRandom5_4 \
+ 0xc8, 0x00, \
+ 0xb0, 0x00, \
+ 0x50, 0x00, \
+ 0x28, 0x00
+
+#define kMaskRandom5_5 \
+ 0xc0, 0x00, \
+ 0x30, 0x00, \
+ 0x18, 0x00, \
+ 0xa0, 0x00, \
+ 0x48, 0x00
+
+#define kMaskRandom6_1 \
+ 0xfc, 0x00
+
+#define kMaskRandom6_2 \
+ 0xa8, 0x00, \
+ 0xd4, 0x00
+
+#define kMaskRandom6_3 \
+ 0xd0, 0x00, \
+ 0x68, 0x00, \
+ 0xa4, 0x00
+
+#define kMaskRandom6_4 \
+ 0xa8, 0x00, \
+ 0x58, 0x00, \
+ 0x64, 0x00, \
+ 0x94, 0x00
+
+#define kMaskRandom6_5 \
+ 0xa8, 0x00, \
+ 0x84, 0x00, \
+ 0x64, 0x00, \
+ 0x90, 0x00, \
+ 0x58, 0x00
+
+#define kMaskRandom6_6 \
+ 0x98, 0x00, \
+ 0x64, 0x00, \
+ 0x50, 0x00, \
+ 0x14, 0x00, \
+ 0xa8, 0x00, \
+ 0xe0, 0x00
+
+#define kMaskRandom7_1 \
+ 0xfe, 0x00
+
+#define kMaskRandom7_2 \
+ 0xd4, 0x00, \
+ 0xaa, 0x00
+
+#define kMaskRandom7_3 \
+ 0xd0, 0x00, \
+ 0xaa, 0x00, \
+ 0x64, 0x00
+
+#define kMaskRandom7_4 \
+ 0xd0, 0x00, \
+ 0xaa, 0x00, \
+ 0x64, 0x00, \
+ 0x1c, 0x00
+
+#define kMaskRandom7_5 \
+ 0x0c, 0x00, \
+ 0xb0, 0x00, \
+ 0x1a, 0x00, \
+ 0xc4, 0x00, \
+ 0x62, 0x00
+
+#define kMaskRandom7_6 \
+ 0x8c, 0x00, \
+ 0x4a, 0x00, \
+ 0x64, 0x00, \
+ 0xd0, 0x00, \
+ 0xa0, 0x00, \
+ 0x32, 0x00
+
+#define kMaskRandom7_7 \
+ 0x4a, 0x00, \
+ 0x94, 0x00, \
+ 0x1a, 0x00, \
+ 0xc4, 0x00, \
+ 0x28, 0x00, \
+ 0xc2, 0x00, \
+ 0x34, 0x00
+
+#define kMaskRandom8_1 \
+ 0xff, 0x00
+
+#define kMaskRandom8_2 \
+ 0xaa, 0x00, \
+ 0xd5, 0x00
+
+#define kMaskRandom8_3 \
+ 0xc5, 0x00, \
+ 0x92, 0x00, \
+ 0x6a, 0x00
+
+#define kMaskRandom8_4 \
+ 0x45, 0x00, \
+ 0xb4, 0x00, \
+ 0x6a, 0x00, \
+ 0x89, 0x00
+
+#define kMaskRandom8_5 \
+ 0x8c, 0x00, \
+ 0x92, 0x00, \
+ 0x2b, 0x00, \
+ 0x51, 0x00, \
+ 0x64, 0x00
+
+#define kMaskRandom8_6 \
+ 0xa1, 0x00, \
+ 0x52, 0x00, \
+ 0x91, 0x00, \
+ 0x2a, 0x00, \
+ 0xc4, 0x00, \
+ 0x4c, 0x00
+
+#define kMaskRandom8_7 \
+ 0x15, 0x00, \
+ 0xc2, 0x00, \
+ 0x25, 0x00, \
+ 0x62, 0x00, \
+ 0x58, 0x00, \
+ 0x8c, 0x00, \
+ 0xa3, 0x00
+
+#define kMaskRandom8_8 \
+ 0x25, 0x00, \
+ 0x8a, 0x00, \
+ 0x91, 0x00, \
+ 0x68, 0x00, \
+ 0x32, 0x00, \
+ 0x43, 0x00, \
+ 0xc4, 0x00, \
+ 0x1c, 0x00
+
+#define kMaskRandom9_1 \
+ 0xff, 0x80
+
+#define kMaskRandom9_2 \
+ 0xaa, 0x80, \
+ 0xd5, 0x00
+
+#define kMaskRandom9_3 \
+ 0xa5, 0x00, \
+ 0xc8, 0x00, \
+ 0x52, 0x80
+
+#define kMaskRandom9_4 \
+ 0xa2, 0x00, \
+ 0xc9, 0x00, \
+ 0x52, 0x80, \
+ 0x24, 0x80
+
+#define kMaskRandom9_5 \
+ 0x8c, 0x00, \
+ 0x25, 0x00, \
+ 0x92, 0x80, \
+ 0x41, 0x80, \
+ 0x58, 0x00
+
+#define kMaskRandom9_6 \
+ 0x84, 0x80, \
+ 0x27, 0x00, \
+ 0x51, 0x80, \
+ 0x1a, 0x00, \
+ 0x68, 0x00, \
+ 0x89, 0x00
+
+#define kMaskRandom9_7 \
+ 0x8c, 0x00, \
+ 0x47, 0x00, \
+ 0x81, 0x80, \
+ 0x12, 0x80, \
+ 0x58, 0x00, \
+ 0x28, 0x80, \
+ 0xb4, 0x00
+
+#define kMaskRandom9_8 \
+ 0x2c, 0x00, \
+ 0x91, 0x00, \
+ 0x40, 0x80, \
+ 0x06, 0x80, \
+ 0xc8, 0x00, \
+ 0x45, 0x00, \
+ 0x30, 0x80, \
+ 0xa2, 0x00
+
+#define kMaskRandom9_9 \
+ 0x4c, 0x00, \
+ 0x62, 0x00, \
+ 0x91, 0x00, \
+ 0x42, 0x80, \
+ 0xa4, 0x00, \
+ 0x13, 0x00, \
+ 0x30, 0x80, \
+ 0x88, 0x80, \
+ 0x09, 0x00
+
+#define kMaskRandom10_1 \
+ 0xff, 0xc0
+
+#define kMaskRandom10_10 \
+ 0x4c, 0x00, \
+ 0x51, 0x00, \
+ 0xa0, 0x40, \
+ 0x04, 0xc0, \
+ 0x03, 0x80, \
+ 0x86, 0x00, \
+ 0x29, 0x00, \
+ 0x42, 0x40, \
+ 0x98, 0x00, \
+ 0x30, 0x80
+
+#define kMaskRandom10_2 \
+ 0xaa, 0x80, \
+ 0xd5, 0x40
+
+#define kMaskRandom10_3 \
+ 0xa4, 0x40, \
+ 0xc9, 0x00, \
+ 0x52, 0x80
+
+#define kMaskRandom10_4 \
+ 0xca, 0x00, \
+ 0x32, 0x80, \
+ 0xa1, 0x40, \
+ 0x55, 0x00
+
+#define kMaskRandom10_5 \
+ 0xca, 0x00, \
+ 0x32, 0x80, \
+ 0xa1, 0x40, \
+ 0x55, 0x00, \
+ 0x08, 0xc0
+
+#define kMaskRandom10_6 \
+ 0x0e, 0x00, \
+ 0x33, 0x00, \
+ 0x10, 0xc0, \
+ 0x45, 0x40, \
+ 0x88, 0x80, \
+ 0xe0, 0x00
+
+#define kMaskRandom10_7 \
+ 0x46, 0x00, \
+ 0x33, 0x00, \
+ 0x80, 0xc0, \
+ 0x0c, 0x40, \
+ 0x28, 0x80, \
+ 0x94, 0x00, \
+ 0xc1, 0x00
+
+#define kMaskRandom10_8 \
+ 0x2c, 0x00, \
+ 0x81, 0x80, \
+ 0xa0, 0x40, \
+ 0x05, 0x40, \
+ 0x18, 0x80, \
+ 0xc2, 0x00, \
+ 0x22, 0x80, \
+ 0x50, 0x40
+
+#define kMaskRandom10_9 \
+ 0x4c, 0x00, \
+ 0x23, 0x00, \
+ 0x88, 0xc0, \
+ 0x21, 0x40, \
+ 0x52, 0x80, \
+ 0x94, 0x00, \
+ 0x26, 0x00, \
+ 0x48, 0x40, \
+ 0x91, 0x80
+
+#define kMaskRandom11_1 \
+ 0xff, 0xe0
+
+#define kMaskRandom11_10 \
+ 0x64, 0x40, \
+ 0x51, 0x40, \
+ 0xa9, 0x00, \
+ 0x04, 0xc0, \
+ 0xd0, 0x00, \
+ 0x82, 0x40, \
+ 0x21, 0x20, \
+ 0x0c, 0x20, \
+ 0x4a, 0x00, \
+ 0x12, 0xa0
+
+#define kMaskRandom11_11 \
+ 0x46, 0x40, \
+ 0x33, 0x20, \
+ 0x99, 0x00, \
+ 0x05, 0x80, \
+ 0x80, 0xa0, \
+ 0x84, 0x40, \
+ 0x40, 0x60, \
+ 0x0a, 0x80, \
+ 0x68, 0x00, \
+ 0x10, 0x20, \
+ 0x30, 0x40
+
+#define kMaskRandom11_2 \
+ 0xec, 0xc0, \
+ 0x9b, 0xa0
+
+#define kMaskRandom11_3 \
+ 0xca, 0xc0, \
+ 0xf1, 0x40, \
+ 0xb6, 0x20
+
+#define kMaskRandom11_4 \
+ 0xc4, 0xc0, \
+ 0x31, 0x60, \
+ 0x4b, 0x20, \
+ 0x2c, 0xa0
+
+#define kMaskRandom11_5 \
+ 0x86, 0x80, \
+ 0x23, 0x20, \
+ 0x16, 0x20, \
+ 0x4c, 0x20, \
+ 0x41, 0xc0
+
+#define kMaskRandom11_6 \
+ 0x64, 0x40, \
+ 0x51, 0x40, \
+ 0x0c, 0xa0, \
+ 0xa1, 0x20, \
+ 0x12, 0xa0, \
+ 0x8a, 0x40
+
+#define kMaskRandom11_7 \
+ 0x46, 0x40, \
+ 0x33, 0x20, \
+ 0x91, 0x80, \
+ 0xa4, 0x20, \
+ 0x50, 0xa0, \
+ 0x84, 0xc0, \
+ 0x09, 0x60
+
+#define kMaskRandom11_8 \
+ 0x0c, 0x80, \
+ 0x80, 0x60, \
+ 0xa0, 0x80, \
+ 0x05, 0x40, \
+ 0x43, 0x00, \
+ 0x1a, 0x00, \
+ 0x60, 0x20, \
+ 0x14, 0x20
+
+#define kMaskRandom11_9 \
+ 0x46, 0x40, \
+ 0x62, 0x60, \
+ 0x8c, 0x00, \
+ 0x01, 0x60, \
+ 0x07, 0x80, \
+ 0xa0, 0x80, \
+ 0x18, 0xa0, \
+ 0x91, 0x00, \
+ 0x78, 0x00
+
+#define kMaskRandom12_1 \
+ 0xff, 0xf0
+
+#define kMaskRandom12_10 \
+ 0x51, 0x40, \
+ 0x45, 0x10, \
+ 0x80, 0xd0, \
+ 0x24, 0x20, \
+ 0x0a, 0x20, \
+ 0x00, 0xe0, \
+ 0xb8, 0x00, \
+ 0x09, 0x10, \
+ 0x56, 0x00, \
+ 0xa2, 0x80
+
+#define kMaskRandom12_11 \
+ 0x53, 0x60, \
+ 0x21, 0x30, \
+ 0x10, 0x90, \
+ 0x00, 0x70, \
+ 0x0c, 0x10, \
+ 0x40, 0xc0, \
+ 0x6a, 0x00, \
+ 0x86, 0x00, \
+ 0x24, 0x80, \
+ 0x89, 0x00, \
+ 0xc0, 0x20
+
+#define kMaskRandom12_12 \
+ 0x10, 0x60, \
+ 0x02, 0x30, \
+ 0x40, 0x50, \
+ 0x21, 0x80, \
+ 0x81, 0x10, \
+ 0x14, 0x80, \
+ 0x98, 0x00, \
+ 0x08, 0x90, \
+ 0x62, 0x00, \
+ 0x24, 0x20, \
+ 0x8a, 0x00, \
+ 0x84, 0x40
+
+#define kMaskRandom12_2 \
+ 0xec, 0xc0, \
+ 0x93, 0xb0
+
+#define kMaskRandom12_3 \
+ 0x9b, 0x80, \
+ 0x4f, 0x10, \
+ 0x3c, 0x60
+
+#define kMaskRandom12_4 \
+ 0x8b, 0x20, \
+ 0x14, 0xb0, \
+ 0x22, 0xd0, \
+ 0x45, 0x50
+
+#define kMaskRandom12_5 \
+ 0x53, 0x60, \
+ 0x64, 0x20, \
+ 0x0c, 0xc0, \
+ 0x82, 0xa0, \
+ 0x09, 0x30
+
+#define kMaskRandom12_6 \
+ 0x51, 0x40, \
+ 0xc5, 0x10, \
+ 0x21, 0x80, \
+ 0x12, 0x30, \
+ 0x08, 0xe0, \
+ 0x2e, 0x00
+
+#define kMaskRandom12_7 \
+ 0x53, 0x60, \
+ 0x21, 0x30, \
+ 0x90, 0x90, \
+ 0x02, 0x50, \
+ 0x06, 0xa0, \
+ 0x2c, 0x00, \
+ 0x88, 0x60
+
+#define kMaskRandom12_8 \
+ 0x20, 0x60, \
+ 0x80, 0x30, \
+ 0x42, 0x40, \
+ 0x01, 0x90, \
+ 0x14, 0x10, \
+ 0x0a, 0x80, \
+ 0x38, 0x00, \
+ 0xc5, 0x00
+
+#define kMaskRandom12_9 \
+ 0x53, 0x60, \
+ 0xe4, 0x20, \
+ 0x24, 0x40, \
+ 0xa1, 0x10, \
+ 0x18, 0x30, \
+ 0x03, 0x90, \
+ 0x8a, 0x10, \
+ 0x04, 0x90, \
+ 0x00, 0xe0
+
+#define kPacketMaskRandom1 1, \
+ kMaskRandom1_1
+
+#define kPacketMaskRandom2 2, \
+ kMaskRandom2_1, \
+ kMaskRandom2_2
+
+#define kPacketMaskRandom3 3, \
+ kMaskRandom3_1, \
+ kMaskRandom3_2, \
+ kMaskRandom3_3
+
+#define kPacketMaskRandom4 4, \
+ kMaskRandom4_1, \
+ kMaskRandom4_2, \
+ kMaskRandom4_3, \
+ kMaskRandom4_4
+
+#define kPacketMaskRandom5 5, \
+ kMaskRandom5_1, \
+ kMaskRandom5_2, \
+ kMaskRandom5_3, \
+ kMaskRandom5_4, \
+ kMaskRandom5_5
+
+#define kPacketMaskRandom6 6, \
+ kMaskRandom6_1, \
+ kMaskRandom6_2, \
+ kMaskRandom6_3, \
+ kMaskRandom6_4, \
+ kMaskRandom6_5, \
+ kMaskRandom6_6
+
+#define kPacketMaskRandom7 7, \
+ kMaskRandom7_1, \
+ kMaskRandom7_2, \
+ kMaskRandom7_3, \
+ kMaskRandom7_4, \
+ kMaskRandom7_5, \
+ kMaskRandom7_6, \
+ kMaskRandom7_7
+
+#define kPacketMaskRandom8 8, \
+ kMaskRandom8_1, \
+ kMaskRandom8_2, \
+ kMaskRandom8_3, \
+ kMaskRandom8_4, \
+ kMaskRandom8_5, \
+ kMaskRandom8_6, \
+ kMaskRandom8_7, \
+ kMaskRandom8_8
+
+#define kPacketMaskRandom9 9, \
+ kMaskRandom9_1, \
+ kMaskRandom9_2, \
+ kMaskRandom9_3, \
+ kMaskRandom9_4, \
+ kMaskRandom9_5, \
+ kMaskRandom9_6, \
+ kMaskRandom9_7, \
+ kMaskRandom9_8, \
+ kMaskRandom9_9
+
+#define kPacketMaskRandom10 10, \
+ kMaskRandom10_1, \
+ kMaskRandom10_2, \
+ kMaskRandom10_3, \
+ kMaskRandom10_4, \
+ kMaskRandom10_5, \
+ kMaskRandom10_6, \
+ kMaskRandom10_7, \
+ kMaskRandom10_8, \
+ kMaskRandom10_9, \
+ kMaskRandom10_10
+
+#define kPacketMaskRandom11 11, \
+ kMaskRandom11_1, \
+ kMaskRandom11_2, \
+ kMaskRandom11_3, \
+ kMaskRandom11_4, \
+ kMaskRandom11_5, \
+ kMaskRandom11_6, \
+ kMaskRandom11_7, \
+ kMaskRandom11_8, \
+ kMaskRandom11_9, \
+ kMaskRandom11_10, \
+ kMaskRandom11_11
+
+#define kPacketMaskRandom12 12, \
+ kMaskRandom12_1, \
+ kMaskRandom12_2, \
+ kMaskRandom12_3, \
+ kMaskRandom12_4, \
+ kMaskRandom12_5, \
+ kMaskRandom12_6, \
+ kMaskRandom12_7, \
+ kMaskRandom12_8, \
+ kMaskRandom12_9, \
+ kMaskRandom12_10, \
+ kMaskRandom12_11, \
+ kMaskRandom12_12
+
+// clang-format on
+} // namespace
+
+namespace webrtc {
+namespace fec_private_tables {
+
+const uint8_t kPacketMaskRandomTbl[] = {
+ 12,
+ kPacketMaskRandom1, // 2 byte entries.
+ kPacketMaskRandom2,
+ kPacketMaskRandom3,
+ kPacketMaskRandom4,
+ kPacketMaskRandom5,
+ kPacketMaskRandom6,
+ kPacketMaskRandom7,
+ kPacketMaskRandom8,
+ kPacketMaskRandom9,
+ kPacketMaskRandom10,
+ kPacketMaskRandom11,
+ kPacketMaskRandom12,
+};
+
+} // namespace fec_private_tables
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.h b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.h
new file mode 100644
index 0000000000..cc7b92984c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_private_tables_random.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_RANDOM_H_
+#define MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_RANDOM_H_
+
+// This file contains a set of packets masks for the FEC code. The masks in
+// this table are specifically designed to favor recovery to random loss.
+// These packet masks are defined to protect up to maximum of 48 media packets.
+
+#include <stdint.h>
+
+namespace webrtc {
+namespace fec_private_tables {
+
+extern const uint8_t kPacketMaskRandomTbl[];
+
+} // namespace fec_private_tables
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_RANDOM_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.cc
new file mode 100644
index 0000000000..23e66c23bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/fec_test_helper.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+namespace fec {
+
+namespace {
+
+constexpr uint8_t kRtpMarkerBitMask = 0x80;
+
+constexpr uint8_t kFecPayloadType = 96;
+constexpr uint8_t kRedPayloadType = 97;
+constexpr uint8_t kVp8PayloadType = 120;
+
+constexpr int kPacketTimestampIncrement = 3000;
+} // namespace
+
+MediaPacketGenerator::MediaPacketGenerator(uint32_t min_packet_size,
+ uint32_t max_packet_size,
+ uint32_t ssrc,
+ Random* random)
+ : min_packet_size_(min_packet_size),
+ max_packet_size_(max_packet_size),
+ ssrc_(ssrc),
+ random_(random) {}
+
+MediaPacketGenerator::~MediaPacketGenerator() = default;
+
+ForwardErrorCorrection::PacketList MediaPacketGenerator::ConstructMediaPackets(
+ int num_media_packets,
+ uint16_t start_seq_num) {
+ RTC_DCHECK_GT(num_media_packets, 0);
+ uint16_t seq_num = start_seq_num;
+ int time_stamp = random_->Rand<int>();
+
+ ForwardErrorCorrection::PacketList media_packets;
+
+ for (int i = 0; i < num_media_packets; ++i) {
+ std::unique_ptr<ForwardErrorCorrection::Packet> media_packet(
+ new ForwardErrorCorrection::Packet());
+ media_packet->data.SetSize(
+ random_->Rand(min_packet_size_, max_packet_size_));
+
+ uint8_t* data = media_packet->data.MutableData();
+ // Generate random values for the first 2 bytes
+ data[0] = random_->Rand<uint8_t>();
+ data[1] = random_->Rand<uint8_t>();
+
+ // The first two bits are assumed to be 10 by the FEC encoder.
+ // In fact the FEC decoder will set the two first bits to 10 regardless of
+ // what they actually were. Set the first two bits to 10 so that a memcmp
+ // can be performed for the whole restored packet.
+ data[0] |= 0x80;
+ data[0] &= 0xbf;
+
+ // FEC is applied to a whole frame.
+ // A frame is signaled by multiple packets without the marker bit set
+ // followed by the last packet of the frame for which the marker bit is set.
+ // Only push one (fake) frame to the FEC.
+ data[1] &= 0x7f;
+
+ webrtc::ByteWriter<uint16_t>::WriteBigEndian(&data[2], seq_num);
+ webrtc::ByteWriter<uint32_t>::WriteBigEndian(&data[4], time_stamp);
+ webrtc::ByteWriter<uint32_t>::WriteBigEndian(&data[8], ssrc_);
+
+ // Generate random values for payload.
+ for (size_t j = 12; j < media_packet->data.size(); ++j)
+ data[j] = random_->Rand<uint8_t>();
+ seq_num++;
+ media_packets.push_back(std::move(media_packet));
+ }
+ // Last packet, set marker bit.
+ ForwardErrorCorrection::Packet* media_packet = media_packets.back().get();
+ RTC_DCHECK(media_packet);
+ media_packet->data.MutableData()[1] |= 0x80;
+
+ next_seq_num_ = seq_num;
+
+ return media_packets;
+}
+
+ForwardErrorCorrection::PacketList MediaPacketGenerator::ConstructMediaPackets(
+ int num_media_packets) {
+ return ConstructMediaPackets(num_media_packets, random_->Rand<uint16_t>());
+}
+
+uint16_t MediaPacketGenerator::GetNextSeqNum() {
+ return next_seq_num_;
+}
+
+AugmentedPacketGenerator::AugmentedPacketGenerator(uint32_t ssrc)
+ : num_packets_(0), ssrc_(ssrc), seq_num_(0), timestamp_(0) {}
+
+void AugmentedPacketGenerator::NewFrame(size_t num_packets) {
+ num_packets_ = num_packets;
+ timestamp_ += kPacketTimestampIncrement;
+}
+
+uint16_t AugmentedPacketGenerator::NextPacketSeqNum() {
+ return ++seq_num_;
+}
+
+std::unique_ptr<AugmentedPacket> AugmentedPacketGenerator::NextPacket(
+ size_t offset,
+ size_t length) {
+ std::unique_ptr<AugmentedPacket> packet(new AugmentedPacket());
+
+ packet->data.SetSize(length + kRtpHeaderSize);
+ uint8_t* data = packet->data.MutableData();
+ for (size_t i = 0; i < length; ++i)
+ data[i + kRtpHeaderSize] = offset + i;
+ packet->data.SetSize(length + kRtpHeaderSize);
+ packet->header.headerLength = kRtpHeaderSize;
+ packet->header.markerBit = (num_packets_ == 1);
+ packet->header.payloadType = kVp8PayloadType;
+ packet->header.sequenceNumber = seq_num_;
+ packet->header.timestamp = timestamp_;
+ packet->header.ssrc = ssrc_;
+ WriteRtpHeader(packet->header, data);
+ ++seq_num_;
+ --num_packets_;
+
+ return packet;
+}
+
+void AugmentedPacketGenerator::WriteRtpHeader(const RTPHeader& header,
+ uint8_t* data) {
+ data[0] = 0x80; // Version 2.
+ data[1] = header.payloadType;
+ data[1] |= (header.markerBit ? kRtpMarkerBitMask : 0);
+ ByteWriter<uint16_t>::WriteBigEndian(data + 2, header.sequenceNumber);
+ ByteWriter<uint32_t>::WriteBigEndian(data + 4, header.timestamp);
+ ByteWriter<uint32_t>::WriteBigEndian(data + 8, header.ssrc);
+}
+
+FlexfecPacketGenerator::FlexfecPacketGenerator(uint32_t media_ssrc,
+ uint32_t flexfec_ssrc)
+ : AugmentedPacketGenerator(media_ssrc),
+ flexfec_ssrc_(flexfec_ssrc),
+ flexfec_seq_num_(0),
+ flexfec_timestamp_(0) {}
+
+std::unique_ptr<AugmentedPacket> FlexfecPacketGenerator::BuildFlexfecPacket(
+ const ForwardErrorCorrection::Packet& packet) {
+ RTC_DCHECK_LE(packet.data.size(),
+ static_cast<size_t>(IP_PACKET_SIZE - kRtpHeaderSize));
+
+ RTPHeader header;
+ header.sequenceNumber = flexfec_seq_num_;
+ ++flexfec_seq_num_;
+ header.timestamp = flexfec_timestamp_;
+ flexfec_timestamp_ += kPacketTimestampIncrement;
+ header.ssrc = flexfec_ssrc_;
+
+ std::unique_ptr<AugmentedPacket> packet_with_rtp_header(
+ new AugmentedPacket());
+ packet_with_rtp_header->data.SetSize(kRtpHeaderSize + packet.data.size());
+ WriteRtpHeader(header, packet_with_rtp_header->data.MutableData());
+ memcpy(packet_with_rtp_header->data.MutableData() + kRtpHeaderSize,
+ packet.data.cdata(), packet.data.size());
+
+ return packet_with_rtp_header;
+}
+
+UlpfecPacketGenerator::UlpfecPacketGenerator(uint32_t ssrc)
+ : AugmentedPacketGenerator(ssrc) {}
+
+RtpPacketReceived UlpfecPacketGenerator::BuildMediaRedPacket(
+ const AugmentedPacket& packet,
+ bool is_recovered) {
+ // Create a temporary buffer used to wrap the media packet in RED.
+ rtc::CopyOnWriteBuffer red_buffer;
+ const size_t kHeaderLength = packet.header.headerLength;
+ // Append header.
+ red_buffer.SetData(packet.data.data(), kHeaderLength);
+ // Find payload type and add it as RED header.
+ uint8_t media_payload_type = red_buffer[1] & 0x7F;
+ red_buffer.AppendData({media_payload_type});
+ // Append rest of payload/padding.
+ red_buffer.AppendData(
+ packet.data.Slice(kHeaderLength, packet.data.size() - kHeaderLength));
+
+ RtpPacketReceived red_packet;
+ RTC_CHECK(red_packet.Parse(std::move(red_buffer)));
+ red_packet.SetPayloadType(kRedPayloadType);
+ red_packet.set_recovered(is_recovered);
+
+ return red_packet;
+}
+
+RtpPacketReceived UlpfecPacketGenerator::BuildUlpfecRedPacket(
+ const ForwardErrorCorrection::Packet& packet) {
+ // Create a fake media packet to get a correct header. 1 byte RED header.
+ ++num_packets_;
+ std::unique_ptr<AugmentedPacket> fake_packet =
+ NextPacket(0, packet.data.size() + 1);
+
+ RtpPacketReceived red_packet;
+ red_packet.Parse(fake_packet->data);
+ red_packet.SetMarker(false);
+ uint8_t* rtp_payload = red_packet.AllocatePayload(packet.data.size() + 1);
+ rtp_payload[0] = kFecPayloadType;
+ red_packet.SetPayloadType(kRedPayloadType);
+ memcpy(rtp_payload + 1, packet.data.cdata(), packet.data.size());
+ red_packet.set_recovered(false);
+
+ return red_packet;
+}
+
+} // namespace fec
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.h b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.h
new file mode 100644
index 0000000000..92e09fd44f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/fec_test_helper.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_FEC_TEST_HELPER_H_
+#define MODULES_RTP_RTCP_SOURCE_FEC_TEST_HELPER_H_
+
+#include <memory>
+
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/random.h"
+
+namespace webrtc {
+namespace test {
+namespace fec {
+
+struct AugmentedPacket : public ForwardErrorCorrection::Packet {
+ RTPHeader header;
+};
+
+// TODO(brandtr): Consider merging MediaPacketGenerator and
+// AugmentedPacketGenerator into a single class, since their functionality is
+// similar.
+
+// This class generates media packets corresponding to a single frame.
+class MediaPacketGenerator {
+ public:
+ MediaPacketGenerator(uint32_t min_packet_size,
+ uint32_t max_packet_size,
+ uint32_t ssrc,
+ Random* random);
+ ~MediaPacketGenerator();
+
+ // Construct the media packets, up to `num_media_packets` packets.
+ ForwardErrorCorrection::PacketList ConstructMediaPackets(
+ int num_media_packets,
+ uint16_t start_seq_num);
+ ForwardErrorCorrection::PacketList ConstructMediaPackets(
+ int num_media_packets);
+
+ uint16_t GetNextSeqNum();
+
+ private:
+ uint32_t min_packet_size_;
+ uint32_t max_packet_size_;
+ uint32_t ssrc_;
+ Random* random_;
+
+ ForwardErrorCorrection::PacketList media_packets_;
+ uint16_t next_seq_num_;
+};
+
+// This class generates media packets with a certain structure of the payload.
+class AugmentedPacketGenerator {
+ public:
+ explicit AugmentedPacketGenerator(uint32_t ssrc);
+
+ // Prepare for generating a new set of packets, corresponding to a frame.
+ void NewFrame(size_t num_packets);
+
+ // Increment and return the newly incremented sequence number.
+ uint16_t NextPacketSeqNum();
+
+ // Return the next packet in the current frame.
+ std::unique_ptr<AugmentedPacket> NextPacket(size_t offset, size_t length);
+
+ protected:
+ // Given `header`, writes the appropriate RTP header fields in `data`.
+ static void WriteRtpHeader(const RTPHeader& header, uint8_t* data);
+
+ // Number of packets left to generate, in the current frame.
+ size_t num_packets_;
+
+ private:
+ uint32_t ssrc_;
+ uint16_t seq_num_;
+ uint32_t timestamp_;
+};
+
+// This class generates media and FlexFEC packets for a single frame.
+class FlexfecPacketGenerator : public AugmentedPacketGenerator {
+ public:
+ FlexfecPacketGenerator(uint32_t media_ssrc, uint32_t flexfec_ssrc);
+
+ // Creates a new AugmentedPacket (with RTP headers) from a
+ // FlexFEC packet (without RTP headers).
+ std::unique_ptr<AugmentedPacket> BuildFlexfecPacket(
+ const ForwardErrorCorrection::Packet& packet);
+
+ private:
+ uint32_t flexfec_ssrc_;
+ uint16_t flexfec_seq_num_;
+ uint32_t flexfec_timestamp_;
+};
+
+// This class generates media and ULPFEC packets (both encapsulated in RED)
+// for a single frame.
+class UlpfecPacketGenerator : public AugmentedPacketGenerator {
+ public:
+ explicit UlpfecPacketGenerator(uint32_t ssrc);
+
+ // Creates a new RtpPacket with the RED header added to the packet.
+ static RtpPacketReceived BuildMediaRedPacket(const AugmentedPacket& packet,
+ bool is_recovered);
+
+ // Creates a new RtpPacket with FEC payload and RED header. Does this by
+ // creating a new fake media AugmentedPacket, clears the marker bit and adds a
+ // RED header. Finally replaces the payload with the content of
+ // `packet->data`.
+ RtpPacketReceived BuildUlpfecRedPacket(
+ const ForwardErrorCorrection::Packet& packet);
+};
+
+} // namespace fec
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_FEC_TEST_HELPER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc
new file mode 100644
index 0000000000..2fbb3caae8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/flexfec_header_reader_writer.h"
+
+#include <string.h>
+
+#include "api/scoped_refptr.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// Maximum number of media packets that can be protected in one batch.
+constexpr size_t kMaxMediaPackets = 48; // Since we are reusing ULPFEC masks.
+
+// Maximum number of media packets tracked by FEC decoder.
+// Maintain a sufficiently larger tracking window than `kMaxMediaPackets`
+// to account for packet reordering in pacer/ network.
+constexpr size_t kMaxTrackedMediaPackets = 4 * kMaxMediaPackets;
+
+// Maximum number of FEC packets stored inside ForwardErrorCorrection.
+constexpr size_t kMaxFecPackets = kMaxMediaPackets;
+
+// Size (in bytes) of packet masks, given number of K bits set.
+constexpr size_t kFlexfecPacketMaskSizes[] = {2, 6, 14};
+
+// Size (in bytes) of part of header which is not packet mask specific.
+constexpr size_t kBaseHeaderSize = 12;
+
+// Size (in bytes) of part of header which is stream specific.
+constexpr size_t kStreamSpecificHeaderSize = 6;
+
+// Size (in bytes) of header, given the single stream packet mask size, i.e.
+// the number of K-bits set.
+constexpr size_t kHeaderSizes[] = {
+ kBaseHeaderSize + kStreamSpecificHeaderSize + kFlexfecPacketMaskSizes[0],
+ kBaseHeaderSize + kStreamSpecificHeaderSize + kFlexfecPacketMaskSizes[1],
+ kBaseHeaderSize + kStreamSpecificHeaderSize + kFlexfecPacketMaskSizes[2]};
+
+// We currently only support single-stream protection.
+// TODO(brandtr): Update this when we support multistream protection.
+constexpr uint8_t kSsrcCount = 1;
+
+// There are three reserved bytes that MUST be set to zero in the header.
+constexpr uint32_t kReservedBits = 0;
+
+// TODO(brandtr): Update this when we support multistream protection.
+constexpr size_t kPacketMaskOffset =
+ kBaseHeaderSize + kStreamSpecificHeaderSize;
+
+// Here we count the K-bits as belonging to the packet mask.
+// This can be used in conjunction with FlexfecHeaderWriter::MinPacketMaskSize,
+// which calculates a bound on the needed packet mask size including K-bits,
+// given a packet mask without K-bits.
+size_t FlexfecHeaderSize(size_t packet_mask_size) {
+ RTC_DCHECK_LE(packet_mask_size, kFlexfecPacketMaskSizes[2]);
+ if (packet_mask_size <= kFlexfecPacketMaskSizes[0]) {
+ return kHeaderSizes[0];
+ } else if (packet_mask_size <= kFlexfecPacketMaskSizes[1]) {
+ return kHeaderSizes[1];
+ }
+ return kHeaderSizes[2];
+}
+
+} // namespace
+
+FlexfecHeaderReader::FlexfecHeaderReader()
+ : FecHeaderReader(kMaxTrackedMediaPackets, kMaxFecPackets) {}
+
+FlexfecHeaderReader::~FlexfecHeaderReader() = default;
+
+// TODO(brandtr): Update this function when we support flexible masks,
+// retransmissions, and/or several protected SSRCs.
+bool FlexfecHeaderReader::ReadFecHeader(
+ ForwardErrorCorrection::ReceivedFecPacket* fec_packet) const {
+ if (fec_packet->pkt->data.size() <=
+ kBaseHeaderSize + kStreamSpecificHeaderSize) {
+ RTC_LOG(LS_WARNING) << "Discarding truncated FlexFEC packet.";
+ return false;
+ }
+ uint8_t* const data = fec_packet->pkt->data.MutableData();
+ bool r_bit = (data[0] & 0x80) != 0;
+ if (r_bit) {
+ RTC_LOG(LS_INFO)
+ << "FlexFEC packet with retransmission bit set. We do not yet "
+ "support this, thus discarding the packet.";
+ return false;
+ }
+ bool f_bit = (data[0] & 0x40) != 0;
+ if (f_bit) {
+ RTC_LOG(LS_INFO)
+ << "FlexFEC packet with inflexible generator matrix. We do "
+ "not yet support this, thus discarding packet.";
+ return false;
+ }
+ uint8_t ssrc_count = ByteReader<uint8_t>::ReadBigEndian(&data[8]);
+ if (ssrc_count != 1) {
+ RTC_LOG(LS_INFO)
+ << "FlexFEC packet protecting multiple media SSRCs. We do not "
+ "yet support this, thus discarding packet.";
+ return false;
+ }
+ uint32_t protected_ssrc = ByteReader<uint32_t>::ReadBigEndian(&data[12]);
+ uint16_t seq_num_base = ByteReader<uint16_t>::ReadBigEndian(&data[16]);
+
+ // Parse the FlexFEC packet mask and remove the interleaved K-bits.
+ // (See FEC header schematic in flexfec_header_reader_writer.h.)
+ // We store the packed packet mask in-band, which "destroys" the standards
+ // compliance of the header. That is fine though, since the code that
+ // reads from the header (from this point and onwards) is aware of this.
+ // TODO(brandtr): When the FEC packet classes have been refactored, store
+ // the packed packet masks out-of-band, thus leaving the FlexFEC header as is.
+ //
+ // We treat the mask parts as unsigned integers with host order endianness
+ // in order to simplify the bit shifting between bytes.
+ if (fec_packet->pkt->data.size() < kHeaderSizes[0]) {
+ RTC_LOG(LS_WARNING) << "Discarding truncated FlexFEC packet.";
+ return false;
+ }
+ uint8_t* const packet_mask = data + kPacketMaskOffset;
+ bool k_bit0 = (packet_mask[0] & 0x80) != 0;
+ uint16_t mask_part0 = ByteReader<uint16_t>::ReadBigEndian(&packet_mask[0]);
+ // Shift away K-bit 0, implicitly clearing the last bit.
+ mask_part0 <<= 1;
+ ByteWriter<uint16_t>::WriteBigEndian(&packet_mask[0], mask_part0);
+ size_t packet_mask_size;
+ if (k_bit0) {
+ // The first K-bit is set, and the packet mask is thus only 2 bytes long.
+ // We have now read the entire FEC header, and the rest of the packet
+ // is payload.
+ packet_mask_size = kFlexfecPacketMaskSizes[0];
+ } else {
+ if (fec_packet->pkt->data.size() < kHeaderSizes[1]) {
+ return false;
+ }
+ bool k_bit1 = (packet_mask[2] & 0x80) != 0;
+ // We have already shifted the first two bytes of the packet mask one step
+ // to the left, thus removing K-bit 0. We will now shift the next four bytes
+ // of the packet mask two steps to the left. (One step for the removed
+ // K-bit 0, and one step for the to be removed K-bit 1).
+ uint8_t bit15 = (packet_mask[2] >> 6) & 0x01;
+ packet_mask[1] |= bit15;
+ uint32_t mask_part1 = ByteReader<uint32_t>::ReadBigEndian(&packet_mask[2]);
+ // Shift away K-bit 1 and bit 15, implicitly clearing the last two bits.
+ mask_part1 <<= 2;
+ ByteWriter<uint32_t>::WriteBigEndian(&packet_mask[2], mask_part1);
+ if (k_bit1) {
+ // The first K-bit is clear, but the second K-bit is set. The packet
+ // mask is thus 6 bytes long. We have now read the entire FEC header,
+ // and the rest of the packet is payload.
+ packet_mask_size = kFlexfecPacketMaskSizes[1];
+ } else {
+ if (fec_packet->pkt->data.size() < kHeaderSizes[2]) {
+ RTC_LOG(LS_WARNING) << "Discarding truncated FlexFEC packet.";
+ return false;
+ }
+ bool k_bit2 = (packet_mask[6] & 0x80) != 0;
+ if (k_bit2) {
+ // The first and second K-bits are clear, but the third K-bit is set.
+ // The packet mask is thus 14 bytes long. We have now read the entire
+ // FEC header, and the rest of the packet is payload.
+ packet_mask_size = kFlexfecPacketMaskSizes[2];
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Discarding FlexFEC packet with malformed header.";
+ return false;
+ }
+ // At this point, K-bits 0 and 1 have been removed, and the front-most
+ // part of the FlexFEC packet mask has been packed accordingly. We will
+ // now shift the remaning part of the packet mask three steps to the left.
+ // This corresponds to the (in total) three K-bits, which have been
+ // removed.
+ uint8_t tail_bits = (packet_mask[6] >> 5) & 0x03;
+ packet_mask[5] |= tail_bits;
+ uint64_t mask_part2 =
+ ByteReader<uint64_t>::ReadBigEndian(&packet_mask[6]);
+ // Shift away K-bit 2, bit 46, and bit 47, implicitly clearing the last
+ // three bits.
+ mask_part2 <<= 3;
+ ByteWriter<uint64_t>::WriteBigEndian(&packet_mask[6], mask_part2);
+ }
+ }
+
+ // Store "ULPFECized" packet mask info.
+ fec_packet->fec_header_size = FlexfecHeaderSize(packet_mask_size);
+ fec_packet->protected_ssrc = protected_ssrc;
+ fec_packet->seq_num_base = seq_num_base;
+ fec_packet->packet_mask_offset = kPacketMaskOffset;
+ fec_packet->packet_mask_size = packet_mask_size;
+
+ // In FlexFEC, all media packets are protected in their entirety.
+ fec_packet->protection_length =
+ fec_packet->pkt->data.size() - fec_packet->fec_header_size;
+
+ return true;
+}
+
+FlexfecHeaderWriter::FlexfecHeaderWriter()
+ : FecHeaderWriter(kMaxMediaPackets, kMaxFecPackets, kHeaderSizes[2]) {}
+
+FlexfecHeaderWriter::~FlexfecHeaderWriter() = default;
+
+size_t FlexfecHeaderWriter::MinPacketMaskSize(const uint8_t* packet_mask,
+ size_t packet_mask_size) const {
+ if (packet_mask_size == kUlpfecPacketMaskSizeLBitClear &&
+ (packet_mask[1] & 0x01) == 0) {
+ // Packet mask is 16 bits long, with bit 15 clear.
+ // It can be used as is.
+ return kFlexfecPacketMaskSizes[0];
+ } else if (packet_mask_size == kUlpfecPacketMaskSizeLBitClear) {
+ // Packet mask is 16 bits long, with bit 15 set.
+ // We must expand the packet mask with zeros in the FlexFEC header.
+ return kFlexfecPacketMaskSizes[1];
+ } else if (packet_mask_size == kUlpfecPacketMaskSizeLBitSet &&
+ (packet_mask[5] & 0x03) == 0) {
+ // Packet mask is 48 bits long, with bits 46 and 47 clear.
+ // It can be used as is.
+ return kFlexfecPacketMaskSizes[1];
+ } else if (packet_mask_size == kUlpfecPacketMaskSizeLBitSet) {
+ // Packet mask is 48 bits long, with at least one of bits 46 and 47 set.
+ // We must expand it with zeros.
+ return kFlexfecPacketMaskSizes[2];
+ }
+ RTC_DCHECK_NOTREACHED() << "Incorrect packet mask size: " << packet_mask_size
+ << ".";
+ return kFlexfecPacketMaskSizes[2];
+}
+
+size_t FlexfecHeaderWriter::FecHeaderSize(size_t packet_mask_size) const {
+ return FlexfecHeaderSize(packet_mask_size);
+}
+
+// This function adapts the precomputed ULPFEC packet masks to the
+// FlexFEC header standard. Note that the header size is computed by
+// FecHeaderSize(), so in this function we can be sure that we are
+// writing in space that is intended for the header.
+//
+// TODO(brandtr): Update this function when we support offset-based masks,
+// retransmissions, and protecting multiple SSRCs.
+void FlexfecHeaderWriter::FinalizeFecHeader(
+ uint32_t media_ssrc,
+ uint16_t seq_num_base,
+ const uint8_t* packet_mask,
+ size_t packet_mask_size,
+ ForwardErrorCorrection::Packet* fec_packet) const {
+ uint8_t* data = fec_packet->data.MutableData();
+ data[0] &= 0x7f; // Clear R bit.
+ data[0] &= 0xbf; // Clear F bit.
+ ByteWriter<uint8_t>::WriteBigEndian(&data[8], kSsrcCount);
+ ByteWriter<uint32_t, 3>::WriteBigEndian(&data[9], kReservedBits);
+ ByteWriter<uint32_t>::WriteBigEndian(&data[12], media_ssrc);
+ ByteWriter<uint16_t>::WriteBigEndian(&data[16], seq_num_base);
+ // Adapt ULPFEC packet mask to FlexFEC header.
+ //
+ // We treat the mask parts as unsigned integers with host order endianness
+ // in order to simplify the bit shifting between bytes.
+ uint8_t* const written_packet_mask = data + kPacketMaskOffset;
+ if (packet_mask_size == kUlpfecPacketMaskSizeLBitSet) {
+ // The packet mask is 48 bits long.
+ uint16_t tmp_mask_part0 =
+ ByteReader<uint16_t>::ReadBigEndian(&packet_mask[0]);
+ uint32_t tmp_mask_part1 =
+ ByteReader<uint32_t>::ReadBigEndian(&packet_mask[2]);
+
+ tmp_mask_part0 >>= 1; // Shift, thus clearing K-bit 0.
+ ByteWriter<uint16_t>::WriteBigEndian(&written_packet_mask[0],
+ tmp_mask_part0);
+ tmp_mask_part1 >>= 2; // Shift, thus clearing K-bit 1 and bit 15.
+ ByteWriter<uint32_t>::WriteBigEndian(&written_packet_mask[2],
+ tmp_mask_part1);
+ bool bit15 = (packet_mask[1] & 0x01) != 0;
+ if (bit15)
+ written_packet_mask[2] |= 0x40; // Set bit 15.
+ bool bit46 = (packet_mask[5] & 0x02) != 0;
+ bool bit47 = (packet_mask[5] & 0x01) != 0;
+ if (!bit46 && !bit47) {
+ written_packet_mask[2] |= 0x80; // Set K-bit 1.
+ } else {
+ memset(&written_packet_mask[6], 0, 8); // Clear all trailing bits.
+ written_packet_mask[6] |= 0x80; // Set K-bit 2.
+ if (bit46)
+ written_packet_mask[6] |= 0x40; // Set bit 46.
+ if (bit47)
+ written_packet_mask[6] |= 0x20; // Set bit 47.
+ }
+ } else if (packet_mask_size == kUlpfecPacketMaskSizeLBitClear) {
+ // The packet mask is 16 bits long.
+ uint16_t tmp_mask_part0 =
+ ByteReader<uint16_t>::ReadBigEndian(&packet_mask[0]);
+
+ tmp_mask_part0 >>= 1; // Shift, thus clearing K-bit 0.
+ ByteWriter<uint16_t>::WriteBigEndian(&written_packet_mask[0],
+ tmp_mask_part0);
+ bool bit15 = (packet_mask[1] & 0x01) != 0;
+ if (!bit15) {
+ written_packet_mask[0] |= 0x80; // Set K-bit 0.
+ } else {
+ memset(&written_packet_mask[2], 0U, 4); // Clear all trailing bits.
+ written_packet_mask[2] |= 0x80; // Set K-bit 1.
+ written_packet_mask[2] |= 0x40; // Set bit 15.
+ }
+ } else {
+ RTC_DCHECK_NOTREACHED()
+ << "Incorrect packet mask size: " << packet_mask_size << ".";
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.h b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.h
new file mode 100644
index 0000000000..d305c4c288
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_FLEXFEC_HEADER_READER_WRITER_H_
+#define MODULES_RTP_RTCP_SOURCE_FLEXFEC_HEADER_READER_WRITER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+
+namespace webrtc {
+
+// FlexFEC header, minimum 20 bytes.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 |R|F|P|X| CC |M| PT recovery | length recovery |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | TS recovery |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | SSRCCount | reserved |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// 12 | SSRC_i |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | SN base_i |k| Mask [0-14] |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 20 |k| Mask [15-45] (optional) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 24 |k| |
+// +-+ Mask [46-108] (optional) |
+// 28 | |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// : ... next in SSRC_i ... :
+//
+//
+// FlexFEC header in 'inflexible' mode (F = 1), 20 bytes.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 |0|1|P|X| CC |M| PT recovery | length recovery |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | TS recovery |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | SSRCCount | reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | SSRC_i |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | SN base_i | M (columns) | N (rows) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+class FlexfecHeaderReader : public FecHeaderReader {
+ public:
+ FlexfecHeaderReader();
+ ~FlexfecHeaderReader() override;
+
+ bool ReadFecHeader(
+ ForwardErrorCorrection::ReceivedFecPacket* fec_packet) const override;
+};
+
+class FlexfecHeaderWriter : public FecHeaderWriter {
+ public:
+ FlexfecHeaderWriter();
+ ~FlexfecHeaderWriter() override;
+
+ size_t MinPacketMaskSize(const uint8_t* packet_mask,
+ size_t packet_mask_size) const override;
+
+ size_t FecHeaderSize(size_t packet_mask_row_size) const override;
+
+ void FinalizeFecHeader(
+ uint32_t media_ssrc,
+ uint16_t seq_num_base,
+ const uint8_t* packet_mask,
+ size_t packet_mask_size,
+ ForwardErrorCorrection::Packet* fec_packet) const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_FLEXFEC_HEADER_READER_WRITER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc
new file mode 100644
index 0000000000..4a24e90ec3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/flexfec_header_reader_writer.h"
+
+#include <string.h>
+
+#include <memory>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using Packet = ForwardErrorCorrection::Packet;
+using ReceivedFecPacket = ForwardErrorCorrection::ReceivedFecPacket;
+
+// General. Assume single-stream protection.
+constexpr uint32_t kMediaSsrc = 1254983;
+constexpr uint16_t kMediaStartSeqNum = 825;
+constexpr size_t kMediaPacketLength = 1234;
+constexpr uint32_t kFlexfecSsrc = 52142;
+
+constexpr size_t kFlexfecHeaderSizes[] = {20, 24, 32};
+constexpr size_t kFlexfecPacketMaskOffset = 18;
+constexpr size_t kFlexfecPacketMaskSizes[] = {2, 6, 14};
+constexpr size_t kFlexfecMaxPacketSize = kFlexfecPacketMaskSizes[2];
+
+// Reader tests.
+constexpr uint8_t kNoRBit = 0 << 7;
+constexpr uint8_t kNoFBit = 0 << 6;
+constexpr uint8_t kPtRecovery = 123;
+constexpr uint8_t kLengthRecov[] = {0xab, 0xcd};
+constexpr uint8_t kTsRecovery[] = {0x01, 0x23, 0x45, 0x67};
+constexpr uint8_t kSsrcCount = 1;
+constexpr uint8_t kReservedBits = 0x00;
+constexpr uint8_t kProtSsrc[] = {0x11, 0x22, 0x33, 0x44};
+constexpr uint8_t kSnBase[] = {0xaa, 0xbb};
+constexpr uint8_t kPayloadBits = 0x00;
+
+std::unique_ptr<uint8_t[]> GeneratePacketMask(size_t packet_mask_size,
+ uint64_t seed) {
+ Random random(seed);
+ std::unique_ptr<uint8_t[]> packet_mask(new uint8_t[kFlexfecMaxPacketSize]);
+ memset(packet_mask.get(), 0, kFlexfecMaxPacketSize);
+ for (size_t i = 0; i < packet_mask_size; ++i) {
+ packet_mask[i] = random.Rand<uint8_t>();
+ }
+ return packet_mask;
+}
+
+void ClearBit(size_t index, uint8_t* packet_mask) {
+ packet_mask[index / 8] &= ~(1 << (7 - index % 8));
+}
+
+void SetBit(size_t index, uint8_t* packet_mask) {
+ packet_mask[index / 8] |= (1 << (7 - index % 8));
+}
+
+rtc::scoped_refptr<Packet> WriteHeader(const uint8_t* packet_mask,
+ size_t packet_mask_size) {
+ FlexfecHeaderWriter writer;
+ rtc::scoped_refptr<Packet> written_packet(new Packet());
+ written_packet->data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet->data.MutableData();
+ for (size_t i = 0; i < written_packet->data.size(); ++i) {
+ data[i] = i; // Actual content doesn't matter.
+ }
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, packet_mask,
+ packet_mask_size, written_packet.get());
+ return written_packet;
+}
+
+std::unique_ptr<ReceivedFecPacket> ReadHeader(const Packet& written_packet) {
+ FlexfecHeaderReader reader;
+ std::unique_ptr<ReceivedFecPacket> read_packet(new ReceivedFecPacket());
+ read_packet->ssrc = kFlexfecSsrc;
+ read_packet->pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet->pkt->data = written_packet.data;
+ EXPECT_TRUE(reader.ReadFecHeader(read_packet.get()));
+ return read_packet;
+}
+
+void VerifyReadHeaders(size_t expected_fec_header_size,
+ const uint8_t* expected_packet_mask,
+ size_t expected_packet_mask_size,
+ const ReceivedFecPacket& read_packet) {
+ EXPECT_EQ(expected_fec_header_size, read_packet.fec_header_size);
+ EXPECT_EQ(ByteReader<uint32_t>::ReadBigEndian(kProtSsrc),
+ read_packet.protected_ssrc);
+ EXPECT_EQ(ByteReader<uint16_t>::ReadBigEndian(kSnBase),
+ read_packet.seq_num_base);
+ const size_t packet_mask_offset = read_packet.packet_mask_offset;
+ EXPECT_EQ(kFlexfecPacketMaskOffset, packet_mask_offset);
+ EXPECT_EQ(expected_packet_mask_size, read_packet.packet_mask_size);
+ EXPECT_EQ(read_packet.pkt->data.size() - expected_fec_header_size,
+ read_packet.protection_length);
+ // Ensure that the K-bits are removed and the packet mask has been packed.
+ EXPECT_THAT(
+ ::testing::make_tuple(read_packet.pkt->data.cdata() + packet_mask_offset,
+ read_packet.packet_mask_size),
+ ::testing::ElementsAreArray(expected_packet_mask,
+ expected_packet_mask_size));
+}
+
+void VerifyFinalizedHeaders(const uint8_t* expected_packet_mask,
+ size_t expected_packet_mask_size,
+ const Packet& written_packet) {
+ const uint8_t* packet = written_packet.data.cdata();
+ EXPECT_EQ(0x00, packet[0] & 0x80); // F bit clear.
+ EXPECT_EQ(0x00, packet[0] & 0x40); // R bit clear.
+ EXPECT_EQ(0x01, packet[8]); // SSRCCount = 1.
+ EXPECT_EQ(kMediaSsrc, ByteReader<uint32_t>::ReadBigEndian(packet + 12));
+ EXPECT_EQ(kMediaStartSeqNum,
+ ByteReader<uint16_t>::ReadBigEndian(packet + 16));
+ EXPECT_THAT(::testing::make_tuple(packet + kFlexfecPacketMaskOffset,
+ expected_packet_mask_size),
+ ::testing::ElementsAreArray(expected_packet_mask,
+ expected_packet_mask_size));
+}
+
+void VerifyWrittenAndReadHeaders(size_t expected_fec_header_size,
+ const uint8_t* expected_packet_mask,
+ size_t expected_packet_mask_size,
+ const Packet& written_packet,
+ const ReceivedFecPacket& read_packet) {
+ EXPECT_EQ(kFlexfecSsrc, read_packet.ssrc);
+ EXPECT_EQ(expected_fec_header_size, read_packet.fec_header_size);
+ EXPECT_EQ(kMediaSsrc, read_packet.protected_ssrc);
+ EXPECT_EQ(kMediaStartSeqNum, read_packet.seq_num_base);
+ EXPECT_EQ(kFlexfecPacketMaskOffset, read_packet.packet_mask_offset);
+ ASSERT_EQ(expected_packet_mask_size, read_packet.packet_mask_size);
+ EXPECT_EQ(written_packet.data.size() - expected_fec_header_size,
+ read_packet.protection_length);
+ // Verify that the call to ReadFecHeader did normalize the packet masks.
+ EXPECT_THAT(::testing::make_tuple(
+ read_packet.pkt->data.cdata() + kFlexfecPacketMaskOffset,
+ read_packet.packet_mask_size),
+ ::testing::ElementsAreArray(expected_packet_mask,
+ expected_packet_mask_size));
+ // Verify that the call to ReadFecHeader did not tamper with the payload.
+ EXPECT_THAT(::testing::make_tuple(
+ read_packet.pkt->data.cdata() + read_packet.fec_header_size,
+ read_packet.pkt->data.size() - read_packet.fec_header_size),
+ ::testing::ElementsAreArray(
+ written_packet.data.cdata() + expected_fec_header_size,
+ written_packet.data.size() - expected_fec_header_size));
+}
+
+} // namespace
+
+TEST(FlexfecHeaderReaderTest, ReadsHeaderWithKBit0Set) {
+ constexpr uint8_t kKBit0 = 1 << 7;
+ constexpr size_t kExpectedPacketMaskSize = 2;
+ constexpr size_t kExpectedFecHeaderSize = 20;
+ // clang-format off
+ constexpr uint8_t kFlexfecPktMask[] = {kKBit0 | 0x08, 0x81};
+ constexpr uint8_t kUlpfecPacketMask[] = {0x11, 0x02};
+ // clang-format on
+ constexpr uint8_t kPacketData[] = {
+ kNoRBit | kNoFBit, kPtRecovery, kLengthRecov[0], kLengthRecov[1],
+ kTsRecovery[0], kTsRecovery[1], kTsRecovery[2], kTsRecovery[3],
+ kSsrcCount, kReservedBits, kReservedBits, kReservedBits,
+ kProtSsrc[0], kProtSsrc[1], kProtSsrc[2], kProtSsrc[3],
+ kSnBase[0], kSnBase[1], kFlexfecPktMask[0], kFlexfecPktMask[1],
+ kPayloadBits, kPayloadBits, kPayloadBits, kPayloadBits};
+ const size_t packet_length = sizeof(kPacketData);
+ ReceivedFecPacket read_packet;
+ read_packet.pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet.pkt->data.SetData(kPacketData, packet_length);
+
+ FlexfecHeaderReader reader;
+ EXPECT_TRUE(reader.ReadFecHeader(&read_packet));
+
+ VerifyReadHeaders(kExpectedFecHeaderSize, kUlpfecPacketMask,
+ kExpectedPacketMaskSize, read_packet);
+}
+
+TEST(FlexfecHeaderReaderTest, ReadsHeaderWithKBit1Set) {
+ constexpr uint8_t kKBit0 = 0 << 7;
+ constexpr uint8_t kKBit1 = 1 << 7;
+ constexpr size_t kExpectedPacketMaskSize = 6;
+ constexpr size_t kExpectedFecHeaderSize = 24;
+ // clang-format off
+ constexpr uint8_t kFlxfecPktMsk[] = {kKBit0 | 0x48, 0x81,
+ kKBit1 | 0x02, 0x11, 0x00, 0x21};
+ constexpr uint8_t kUlpfecPacketMask[] = {0x91, 0x02,
+ 0x08, 0x44, 0x00, 0x84};
+ // clang-format on
+ constexpr uint8_t kPacketData[] = {
+ kNoRBit | kNoFBit, kPtRecovery, kLengthRecov[0], kLengthRecov[1],
+ kTsRecovery[0], kTsRecovery[1], kTsRecovery[2], kTsRecovery[3],
+ kSsrcCount, kReservedBits, kReservedBits, kReservedBits,
+ kProtSsrc[0], kProtSsrc[1], kProtSsrc[2], kProtSsrc[3],
+ kSnBase[0], kSnBase[1], kFlxfecPktMsk[0], kFlxfecPktMsk[1],
+ kFlxfecPktMsk[2], kFlxfecPktMsk[3], kFlxfecPktMsk[4], kFlxfecPktMsk[5],
+ kPayloadBits, kPayloadBits, kPayloadBits, kPayloadBits};
+ const size_t packet_length = sizeof(kPacketData);
+ ReceivedFecPacket read_packet;
+ read_packet.pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet.pkt->data.SetData(kPacketData, packet_length);
+
+ FlexfecHeaderReader reader;
+ EXPECT_TRUE(reader.ReadFecHeader(&read_packet));
+
+ VerifyReadHeaders(kExpectedFecHeaderSize, kUlpfecPacketMask,
+ kExpectedPacketMaskSize, read_packet);
+}
+
+TEST(FlexfecHeaderReaderTest, ReadsHeaderWithKBit2Set) {
+ constexpr uint8_t kKBit0 = 0 << 7;
+ constexpr uint8_t kKBit1 = 0 << 7;
+ constexpr uint8_t kKBit2 = 1 << 7;
+ constexpr size_t kExpectedPacketMaskSize = 14;
+ constexpr size_t kExpectedFecHeaderSize = 32;
+ // clang-format off
+ constexpr uint8_t kFlxfcPktMsk[] = {kKBit0 | 0x48, 0x81,
+ kKBit1 | 0x02, 0x11, 0x00, 0x21,
+ kKBit2 | 0x01, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11};
+ constexpr uint8_t kUlpfecPacketMask[] = {0x91, 0x02,
+ 0x08, 0x44, 0x00, 0x84,
+ 0x08, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0x88, 0x88};
+ // clang-format on
+ constexpr uint8_t kPacketData[] = {
+ kNoRBit | kNoFBit, kPtRecovery, kLengthRecov[0], kLengthRecov[1],
+ kTsRecovery[0], kTsRecovery[1], kTsRecovery[2], kTsRecovery[3],
+ kSsrcCount, kReservedBits, kReservedBits, kReservedBits,
+ kProtSsrc[0], kProtSsrc[1], kProtSsrc[2], kProtSsrc[3],
+ kSnBase[0], kSnBase[1], kFlxfcPktMsk[0], kFlxfcPktMsk[1],
+ kFlxfcPktMsk[2], kFlxfcPktMsk[3], kFlxfcPktMsk[4], kFlxfcPktMsk[5],
+ kFlxfcPktMsk[6], kFlxfcPktMsk[7], kFlxfcPktMsk[8], kFlxfcPktMsk[9],
+ kFlxfcPktMsk[10], kFlxfcPktMsk[11], kFlxfcPktMsk[12], kFlxfcPktMsk[13],
+ kPayloadBits, kPayloadBits, kPayloadBits, kPayloadBits};
+ const size_t packet_length = sizeof(kPacketData);
+ ReceivedFecPacket read_packet;
+ read_packet.pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet.pkt->data.SetData(kPacketData, packet_length);
+
+ FlexfecHeaderReader reader;
+ EXPECT_TRUE(reader.ReadFecHeader(&read_packet));
+
+ VerifyReadHeaders(kExpectedFecHeaderSize, kUlpfecPacketMask,
+ kExpectedPacketMaskSize, read_packet);
+}
+
+TEST(FlexfecHeaderReaderTest, ReadPacketWithoutStreamSpecificHeaderShouldFail) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+
+ // Simulate short received packet.
+ ReceivedFecPacket read_packet;
+ read_packet.ssrc = kFlexfecSsrc;
+ read_packet.pkt = std::move(written_packet);
+ read_packet.pkt->data.SetSize(12);
+
+ FlexfecHeaderReader reader;
+ EXPECT_FALSE(reader.ReadFecHeader(&read_packet));
+}
+
+TEST(FlexfecHeaderReaderTest, ReadShortPacketWithKBit0SetShouldFail) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+
+ // Simulate short received packet.
+ ReceivedFecPacket read_packet;
+ read_packet.ssrc = kFlexfecSsrc;
+ read_packet.pkt = std::move(written_packet);
+ read_packet.pkt->data.SetSize(18);
+
+ FlexfecHeaderReader reader;
+ EXPECT_FALSE(reader.ReadFecHeader(&read_packet));
+}
+
+TEST(FlexfecHeaderReaderTest, ReadShortPacketWithKBit1SetShouldFail) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(15, packet_mask.get()); // This expands the packet mask "once".
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+
+ // Simulate short received packet.
+ ReceivedFecPacket read_packet;
+ read_packet.ssrc = kFlexfecSsrc;
+ read_packet.pkt = std::move(written_packet);
+ read_packet.pkt->data.SetSize(20);
+
+ FlexfecHeaderReader reader;
+ EXPECT_FALSE(reader.ReadFecHeader(&read_packet));
+}
+
+TEST(FlexfecHeaderReaderTest, ReadShortPacketWithKBit2SetShouldFail) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(47, packet_mask.get()); // This expands the packet mask "twice".
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+
+ // Simulate short received packet.
+ ReceivedFecPacket read_packet;
+ read_packet.ssrc = kFlexfecSsrc;
+ read_packet.pkt = std::move(written_packet);
+ read_packet.pkt->data.SetSize(24);
+
+ FlexfecHeaderReader reader;
+ EXPECT_FALSE(reader.ReadFecHeader(&read_packet));
+}
+
+TEST(FlexfecHeaderWriterTest, FinalizesHeaderWithKBit0Set) {
+ constexpr size_t kExpectedPacketMaskSize = 2;
+ constexpr uint8_t kFlexfecPacketMask[] = {0x88, 0x81};
+ constexpr uint8_t kUlpfecPacketMask[] = {0x11, 0x02};
+ Packet written_packet;
+ written_packet.data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet.data.MutableData();
+ for (size_t i = 0; i < written_packet.data.size(); ++i) {
+ data[i] = i;
+ }
+
+ FlexfecHeaderWriter writer;
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, kUlpfecPacketMask,
+ sizeof(kUlpfecPacketMask), &written_packet);
+
+ VerifyFinalizedHeaders(kFlexfecPacketMask, kExpectedPacketMaskSize,
+ written_packet);
+}
+
+TEST(FlexfecHeaderWriterTest, FinalizesHeaderWithKBit1Set) {
+ constexpr size_t kExpectedPacketMaskSize = 6;
+ constexpr uint8_t kFlexfecPacketMask[] = {0x48, 0x81, 0x82, 0x11, 0x00, 0x21};
+ constexpr uint8_t kUlpfecPacketMask[] = {0x91, 0x02, 0x08, 0x44, 0x00, 0x84};
+ Packet written_packet;
+ written_packet.data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet.data.MutableData();
+ for (size_t i = 0; i < written_packet.data.size(); ++i) {
+ data[i] = i;
+ }
+
+ FlexfecHeaderWriter writer;
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, kUlpfecPacketMask,
+ sizeof(kUlpfecPacketMask), &written_packet);
+
+ VerifyFinalizedHeaders(kFlexfecPacketMask, kExpectedPacketMaskSize,
+ written_packet);
+}
+
+TEST(FlexfecHeaderWriterTest, FinalizesHeaderWithKBit2Set) {
+ constexpr size_t kExpectedPacketMaskSize = 14;
+ constexpr uint8_t kFlexfecPacketMask[] = {
+ 0x11, 0x11, // K-bit 0 clear.
+ 0x11, 0x11, 0x11, 0x10, // K-bit 1 clear.
+ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // K-bit 2 set.
+ };
+ constexpr uint8_t kUlpfecPacketMask[] = {0x22, 0x22, 0x44, 0x44, 0x44, 0x41};
+ Packet written_packet;
+ written_packet.data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet.data.MutableData();
+ for (size_t i = 0; i < written_packet.data.size(); ++i) {
+ data[i] = i;
+ }
+
+ FlexfecHeaderWriter writer;
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, kUlpfecPacketMask,
+ sizeof(kUlpfecPacketMask), &written_packet);
+
+ VerifyFinalizedHeaders(kFlexfecPacketMask, kExpectedPacketMaskSize,
+ written_packet);
+}
+
+TEST(FlexfecHeaderWriterTest, ContractsShortUlpfecPacketMaskWithBit15Clear) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ ClearBit(15, packet_mask.get());
+
+ FlexfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kFlexfecPacketMaskSizes[0], min_packet_mask_size);
+ EXPECT_EQ(kFlexfecHeaderSizes[0], writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(FlexfecHeaderWriterTest, ExpandsShortUlpfecPacketMaskWithBit15Set) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(15, packet_mask.get());
+
+ FlexfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kFlexfecPacketMaskSizes[1], min_packet_mask_size);
+ EXPECT_EQ(kFlexfecHeaderSizes[1], writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(FlexfecHeaderWriterTest,
+ ContractsLongUlpfecPacketMaskWithBit46ClearBit47Clear) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ ClearBit(46, packet_mask.get());
+ ClearBit(47, packet_mask.get());
+
+ FlexfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kFlexfecPacketMaskSizes[1], min_packet_mask_size);
+ EXPECT_EQ(kFlexfecHeaderSizes[1], writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(FlexfecHeaderWriterTest,
+ ExpandsLongUlpfecPacketMaskWithBit46SetBit47Clear) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(46, packet_mask.get());
+ ClearBit(47, packet_mask.get());
+
+ FlexfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kFlexfecPacketMaskSizes[2], min_packet_mask_size);
+ EXPECT_EQ(kFlexfecHeaderSizes[2], writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(FlexfecHeaderWriterTest,
+ ExpandsLongUlpfecPacketMaskWithBit46ClearBit47Set) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ ClearBit(46, packet_mask.get());
+ SetBit(47, packet_mask.get());
+
+ FlexfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kFlexfecPacketMaskSizes[2], min_packet_mask_size);
+ EXPECT_EQ(kFlexfecHeaderSizes[2], writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(FlexfecHeaderWriterTest, ExpandsLongUlpfecPacketMaskWithBit46SetBit47Set) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(46, packet_mask.get());
+ SetBit(47, packet_mask.get());
+
+ FlexfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kFlexfecPacketMaskSizes[2], min_packet_mask_size);
+ EXPECT_EQ(kFlexfecHeaderSizes[2], writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(FlexfecHeaderReaderWriterTest,
+ WriteAndReadSmallUlpfecPacketHeaderWithMaskBit15Clear) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ ClearBit(15, packet_mask.get());
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyWrittenAndReadHeaders(kFlexfecHeaderSizes[0], packet_mask.get(),
+ kFlexfecPacketMaskSizes[0], *written_packet,
+ *read_packet);
+}
+
+TEST(FlexfecHeaderReaderWriterTest,
+ WriteAndReadSmallUlpfecPacketHeaderWithMaskBit15Set) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(15, packet_mask.get());
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyWrittenAndReadHeaders(kFlexfecHeaderSizes[1], packet_mask.get(),
+ kFlexfecPacketMaskSizes[1], *written_packet,
+ *read_packet);
+}
+
+TEST(FlexfecHeaderReaderWriterTest,
+ WriteAndReadLargeUlpfecPacketHeaderWithMaskBits46And47Clear) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ ClearBit(46, packet_mask.get());
+ ClearBit(47, packet_mask.get());
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyWrittenAndReadHeaders(kFlexfecHeaderSizes[1], packet_mask.get(),
+ kFlexfecPacketMaskSizes[1], *written_packet,
+ *read_packet);
+}
+
+TEST(FlexfecHeaderReaderWriterTest,
+ WriteAndReadLargeUlpfecPacketHeaderWithMaskBit46SetBit47Clear) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(46, packet_mask.get());
+ ClearBit(47, packet_mask.get());
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyWrittenAndReadHeaders(kFlexfecHeaderSizes[2], packet_mask.get(),
+ kFlexfecPacketMaskSizes[2], *written_packet,
+ *read_packet);
+}
+
+TEST(FlexfecHeaderReaderWriterTest,
+ WriteAndReadLargeUlpfecPacketHeaderMaskWithBit46ClearBit47Set) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ ClearBit(46, packet_mask.get());
+ SetBit(47, packet_mask.get());
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyWrittenAndReadHeaders(kFlexfecHeaderSizes[2], packet_mask.get(),
+ kFlexfecPacketMaskSizes[2], *written_packet,
+ *read_packet);
+}
+
+TEST(FlexfecHeaderReaderWriterTest,
+ WriteAndReadLargeUlpfecPacketHeaderWithMaskBits46And47Set) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ SetBit(46, packet_mask.get());
+ SetBit(47, packet_mask.get());
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyWrittenAndReadHeaders(kFlexfecHeaderSizes[2], packet_mask.get(),
+ kFlexfecPacketMaskSizes[2], *written_packet,
+ *read_packet);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver.cc
new file mode 100644
index 0000000000..ef1fa53da8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/flexfec_receiver.h"
+
+#include <string.h>
+
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// Minimum header size (in bytes) of a well-formed non-singular FlexFEC packet.
+constexpr size_t kMinFlexfecHeaderSize = 20;
+
+// How often to log the recovered packets to the text log.
+constexpr int kPacketLogIntervalMs = 10000;
+
+} // namespace
+
+/* Mozilla: Avoid this since it could use GetRealTimeClock().
+FlexfecReceiver::FlexfecReceiver(
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ RecoveredPacketReceiver* recovered_packet_receiver)
+ : FlexfecReceiver(Clock::GetRealTimeClock(),
+ ssrc,
+ protected_media_ssrc,
+ recovered_packet_receiver) {}
+ */
+
+FlexfecReceiver::FlexfecReceiver(
+ Clock* clock,
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ RecoveredPacketReceiver* recovered_packet_receiver)
+ : ssrc_(ssrc),
+ protected_media_ssrc_(protected_media_ssrc),
+ erasure_code_(
+ ForwardErrorCorrection::CreateFlexfec(ssrc, protected_media_ssrc)),
+ recovered_packet_receiver_(recovered_packet_receiver),
+ clock_(clock),
+ last_recovered_packet_ms_(-1) {
+ // It's OK to create this object on a different thread/task queue than
+ // the one used during main operation.
+ sequence_checker_.Detach();
+}
+
+FlexfecReceiver::~FlexfecReceiver() = default;
+
+void FlexfecReceiver::OnRtpPacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // If this packet was recovered, it might be originating from
+ // ProcessReceivedPacket in this object. To avoid lifetime issues with
+ // `recovered_packets_`, we therefore break the cycle here.
+ // This might reduce decoding efficiency a bit, since we can't disambiguate
+ // recovered packets by RTX from recovered packets by FlexFEC.
+ if (packet.recovered())
+ return;
+
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ AddReceivedPacket(packet);
+ if (!received_packet)
+ return;
+
+ ProcessReceivedPacket(*received_packet);
+}
+
+FecPacketCounter FlexfecReceiver::GetPacketCounter() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return packet_counter_;
+}
+
+// TODO(eladalon): Consider using packet.recovered() to avoid processing
+// recovered packets here.
+std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>
+FlexfecReceiver::AddReceivedPacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // RTP packets with a full base header (12 bytes), but without payload,
+ // could conceivably be useful in the decoding. Therefore we check
+ // with a non-strict inequality here.
+ RTC_DCHECK_GE(packet.size(), kRtpHeaderSize);
+
+ // Demultiplex based on SSRC, and insert into erasure code decoder.
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet(
+ new ForwardErrorCorrection::ReceivedPacket());
+ received_packet->seq_num = packet.SequenceNumber();
+ received_packet->ssrc = packet.Ssrc();
+ if (received_packet->ssrc == ssrc_) {
+ // This is a FlexFEC packet.
+ if (packet.payload_size() < kMinFlexfecHeaderSize) {
+ RTC_LOG(LS_WARNING) << "Truncated FlexFEC packet, discarding.";
+ return nullptr;
+ }
+ received_packet->is_fec = true;
+ ++packet_counter_.num_fec_packets;
+
+ // Insert packet payload into erasure code.
+ received_packet->pkt = rtc::scoped_refptr<ForwardErrorCorrection::Packet>(
+ new ForwardErrorCorrection::Packet());
+ received_packet->pkt->data =
+ packet.Buffer().Slice(packet.headers_size(), packet.payload_size());
+ } else {
+ // This is a media packet, or a FlexFEC packet belonging to some
+ // other FlexFEC stream.
+ if (received_packet->ssrc != protected_media_ssrc_) {
+ return nullptr;
+ }
+ received_packet->is_fec = false;
+
+ // Insert entire packet into erasure code.
+ // Create a copy and fill with zeros all mutable extensions.
+ received_packet->pkt = rtc::scoped_refptr<ForwardErrorCorrection::Packet>(
+ new ForwardErrorCorrection::Packet());
+ RtpPacketReceived packet_copy(packet);
+ packet_copy.ZeroMutableExtensions();
+ received_packet->pkt->data = packet_copy.Buffer();
+ }
+
+ ++packet_counter_.num_packets;
+
+ return received_packet;
+}
+
+// Note that the implementation of this member function and the implementation
+// in UlpfecReceiver::ProcessReceivedFec() are slightly different.
+// This implementation only returns _recovered_ media packets through the
+// callback, whereas the implementation in UlpfecReceiver returns _all inserted_
+// media packets through the callback. The latter behaviour makes sense
+// for ULPFEC, since the ULPFEC receiver is owned by the RtpVideoStreamReceiver.
+// Here, however, the received media pipeline is more decoupled from the
+// FlexFEC decoder, and we therefore do not interfere with the reception
+// of non-recovered media packets.
+void FlexfecReceiver::ProcessReceivedPacket(
+ const ForwardErrorCorrection::ReceivedPacket& received_packet) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // Decode.
+ erasure_code_->DecodeFec(received_packet, &recovered_packets_);
+
+ // Return recovered packets through callback.
+ for (const auto& recovered_packet : recovered_packets_) {
+ RTC_CHECK(recovered_packet);
+ if (recovered_packet->returned) {
+ continue;
+ }
+ ++packet_counter_.num_recovered_packets;
+ // Set this flag first, since OnRecoveredPacket may end up here
+ // again, with the same packet.
+ recovered_packet->returned = true;
+ RTC_CHECK_GE(recovered_packet->pkt->data.size(), kRtpHeaderSize);
+ recovered_packet_receiver_->OnRecoveredPacket(
+ recovered_packet->pkt->data.cdata(),
+ recovered_packet->pkt->data.size());
+ uint32_t media_ssrc =
+ ForwardErrorCorrection::ParseSsrc(recovered_packet->pkt->data.data());
+ uint16_t media_seq_num = ForwardErrorCorrection::ParseSequenceNumber(
+ recovered_packet->pkt->data.data());
+ // Periodically log the incoming packets at LS_INFO.
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ bool should_log_periodically =
+ now_ms - last_recovered_packet_ms_ > kPacketLogIntervalMs;
+ if (RTC_LOG_CHECK_LEVEL(LS_VERBOSE) || should_log_periodically) {
+ rtc::LoggingSeverity level =
+ should_log_periodically ? rtc::LS_INFO : rtc::LS_VERBOSE;
+ RTC_LOG_V(level) << "Recovered media packet with SSRC: " << media_ssrc
+ << " seq " << media_seq_num << " recovered length "
+ << recovered_packet->pkt->data.size()
+ << " from FlexFEC stream with SSRC: " << ssrc_;
+ if (should_log_periodically) {
+ last_recovered_packet_ms_ = now_ms;
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc
new file mode 100644
index 0000000000..54ed11d64c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc
@@ -0,0 +1,706 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/flexfec_receiver.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h"
+#include "modules/rtp_rtcp/source/fec_test_helper.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::_;
+using ::testing::Args;
+using ::testing::ElementsAreArray;
+
+using test::fec::FlexfecPacketGenerator;
+using Packet = ForwardErrorCorrection::Packet;
+using PacketList = ForwardErrorCorrection::PacketList;
+
+constexpr size_t kPayloadLength = 500;
+constexpr uint32_t kFlexfecSsrc = 42984;
+constexpr uint32_t kMediaSsrc = 8353;
+
+RtpPacketReceived ParsePacket(const Packet& packet) {
+ RtpPacketReceived parsed_packet;
+ EXPECT_TRUE(parsed_packet.Parse(packet.data));
+ return parsed_packet;
+}
+
+} // namespace
+
+class FlexfecReceiverForTest : public FlexfecReceiver {
+ public:
+ FlexfecReceiverForTest(uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ RecoveredPacketReceiver* recovered_packet_receiver)
+ : FlexfecReceiver(Clock::GetRealTimeClock(),
+ ssrc,
+ protected_media_ssrc,
+ recovered_packet_receiver) {}
+ // Expose methods for tests.
+ using FlexfecReceiver::AddReceivedPacket;
+ using FlexfecReceiver::ProcessReceivedPacket;
+};
+
+class FlexfecReceiverTest : public ::testing::Test {
+ protected:
+ FlexfecReceiverTest()
+ : receiver_(kFlexfecSsrc, kMediaSsrc, &recovered_packet_receiver_),
+ erasure_code_(
+ ForwardErrorCorrection::CreateFlexfec(kFlexfecSsrc, kMediaSsrc)),
+ packet_generator_(kMediaSsrc, kFlexfecSsrc) {}
+
+ // Generates `num_media_packets` corresponding to a single frame.
+ void PacketizeFrame(size_t num_media_packets,
+ size_t frame_offset,
+ PacketList* media_packets);
+
+ // Generates `num_fec_packets` FEC packets, given `media_packets`.
+ std::list<Packet*> EncodeFec(const PacketList& media_packets,
+ size_t num_fec_packets);
+
+ FlexfecReceiverForTest receiver_;
+ std::unique_ptr<ForwardErrorCorrection> erasure_code_;
+
+ FlexfecPacketGenerator packet_generator_;
+ ::testing::StrictMock<MockRecoveredPacketReceiver> recovered_packet_receiver_;
+};
+
+void FlexfecReceiverTest::PacketizeFrame(size_t num_media_packets,
+ size_t frame_offset,
+ PacketList* media_packets) {
+ packet_generator_.NewFrame(num_media_packets);
+ for (size_t i = 0; i < num_media_packets; ++i) {
+ std::unique_ptr<Packet> next_packet(
+ packet_generator_.NextPacket(frame_offset + i, kPayloadLength));
+ media_packets->push_back(std::move(next_packet));
+ }
+}
+
+std::list<Packet*> FlexfecReceiverTest::EncodeFec(
+ const PacketList& media_packets,
+ size_t num_fec_packets) {
+ const uint8_t protection_factor =
+ num_fec_packets * 255 / media_packets.size();
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr FecMaskType kFecMaskType = kFecMaskRandom;
+ std::list<Packet*> fec_packets;
+ EXPECT_EQ(0, erasure_code_->EncodeFec(
+ media_packets, protection_factor, kNumImportantPackets,
+ kUseUnequalProtection, kFecMaskType, &fec_packets));
+ EXPECT_EQ(num_fec_packets, fec_packets.size());
+ return fec_packets;
+}
+
+TEST_F(FlexfecReceiverTest, ReceivesMediaPacket) {
+ packet_generator_.NewFrame(1);
+ std::unique_ptr<Packet> media_packet(
+ packet_generator_.NextPacket(0, kPayloadLength));
+
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ receiver_.AddReceivedPacket(ParsePacket(*media_packet));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+}
+
+TEST_F(FlexfecReceiverTest, ReceivesMediaAndFecPackets) {
+ const size_t kNumMediaPackets = 1;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+ const auto& media_packet = media_packets.front();
+ auto fec_packet = packet_generator_.BuildFlexfecPacket(*fec_packets.front());
+
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ receiver_.AddReceivedPacket(ParsePacket(*media_packet));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+ received_packet = receiver_.AddReceivedPacket(ParsePacket(*fec_packet));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+}
+
+TEST_F(FlexfecReceiverTest, FailsOnTruncatedFecPacket) {
+ const size_t kNumMediaPackets = 1;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+ const auto& media_packet = media_packets.front();
+ // Simulate truncated FlexFEC payload.
+ fec_packets.front()->data.SetSize(1);
+ auto fec_packet = packet_generator_.BuildFlexfecPacket(*fec_packets.front());
+
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ receiver_.AddReceivedPacket(ParsePacket(*media_packet));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+ EXPECT_FALSE(receiver_.AddReceivedPacket(ParsePacket(*fec_packet)));
+}
+
+TEST_F(FlexfecReceiverTest, FailsOnUnknownMediaSsrc) {
+ const size_t kNumMediaPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ auto& media_packet = media_packets.front();
+ // Corrupt the SSRC.
+ media_packet->data.MutableData()[8] = 0;
+ media_packet->data.MutableData()[9] = 1;
+ media_packet->data.MutableData()[10] = 2;
+ media_packet->data.MutableData()[11] = 3;
+
+ EXPECT_FALSE(receiver_.AddReceivedPacket(ParsePacket(*media_packet)));
+}
+
+TEST_F(FlexfecReceiverTest, FailsOnUnknownFecSsrc) {
+ const size_t kNumMediaPackets = 1;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+ const auto& media_packet = media_packets.front();
+ auto fec_packet = packet_generator_.BuildFlexfecPacket(*fec_packets.front());
+ // Corrupt the SSRC.
+ fec_packet->data.MutableData()[8] = 4;
+ fec_packet->data.MutableData()[9] = 5;
+ fec_packet->data.MutableData()[10] = 6;
+ fec_packet->data.MutableData()[11] = 7;
+
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ receiver_.AddReceivedPacket(ParsePacket(*media_packet));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+ EXPECT_FALSE(receiver_.AddReceivedPacket(ParsePacket(*fec_packet)));
+}
+
+TEST_F(FlexfecReceiverTest, ReceivesMultiplePackets) {
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Receive all media packets.
+ for (const auto& media_packet : media_packets) {
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ receiver_.AddReceivedPacket(ParsePacket(*media_packet));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+ }
+
+ // Receive FEC packet.
+ auto* fec_packet = fec_packets.front();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(*fec_packet);
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet =
+ receiver_.AddReceivedPacket(ParsePacket(*packet_with_rtp_header));
+ ASSERT_TRUE(received_packet);
+ receiver_.ProcessReceivedPacket(*received_packet);
+}
+
+TEST_F(FlexfecReceiverTest, RecoversFromSingleMediaLoss) {
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Receive first media packet but drop second.
+ auto media_it = media_packets.begin();
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+
+ // Receive FEC packet and ensure recovery of lost media packet.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ media_it++;
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+}
+
+TEST_F(FlexfecReceiverTest, RecoversFromDoubleMediaLoss) {
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 2;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Drop both media packets.
+
+ // Receive first FEC packet and recover first lost media packet.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ auto media_it = media_packets.begin();
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+
+ // Receive second FEC packet and recover second lost media packet.
+ fec_it++;
+ packet_with_rtp_header = packet_generator_.BuildFlexfecPacket(**fec_it);
+ media_it++;
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+}
+
+TEST_F(FlexfecReceiverTest, DoesNotRecoverFromMediaAndFecLoss) {
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Receive first media packet.
+ auto media_it = media_packets.begin();
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+
+ // Drop second media packet and FEC packet. Do not expect call back.
+}
+
+TEST_F(FlexfecReceiverTest, DoesNotCallbackTwice) {
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Receive first media packet but drop second.
+ auto media_it = media_packets.begin();
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+
+ // Receive FEC packet and ensure recovery of lost media packet.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ media_it++;
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+
+ // Receive the FEC packet again, but do not call back.
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+
+ // Receive the first media packet again, but do not call back.
+ media_it = media_packets.begin();
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+
+ // Receive the second media packet again (the one recovered above),
+ // but do not call back again.
+ media_it++;
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+}
+
+// Here we are implicitly assuming packet masks that are suitable for
+// this type of 50% correlated loss. If we are changing our precomputed
+// packet masks, this test might need to be updated.
+TEST_F(FlexfecReceiverTest, RecoversFrom50PercentLoss) {
+ const size_t kNumFecPackets = 5;
+ const size_t kNumFrames = 2 * kNumFecPackets;
+ const size_t kNumMediaPacketsPerFrame = 1;
+
+ PacketList media_packets;
+ for (size_t i = 0; i < kNumFrames; ++i) {
+ PacketizeFrame(kNumMediaPacketsPerFrame, i, &media_packets);
+ }
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Drop every second media packet.
+ auto media_it = media_packets.begin();
+ while (media_it != media_packets.end()) {
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+ ++media_it;
+ if (media_it == media_packets.end()) {
+ break;
+ }
+ ++media_it;
+ }
+
+ // Receive all FEC packets.
+ media_it = media_packets.begin();
+ for (const auto* fec_packet : fec_packets) {
+ std::unique_ptr<Packet> fec_packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(*fec_packet);
+ ++media_it;
+ if (media_it == media_packets.end()) {
+ break;
+ }
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*fec_packet_with_rtp_header));
+ ++media_it;
+ }
+}
+
+TEST_F(FlexfecReceiverTest, DelayedFecPacketDoesHelp) {
+ // These values need to be updated if the underlying erasure code
+ // implementation changes.
+ // Delay FEC packet by maximum number of media packets tracked by receiver.
+ const size_t kNumFrames = 192;
+ const size_t kNumMediaPacketsPerFrame = 1;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPacketsPerFrame, 0, &media_packets);
+ PacketizeFrame(kNumMediaPacketsPerFrame, 1, &media_packets);
+ // Protect two first frames.
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+ for (size_t i = 2; i < kNumFrames; ++i) {
+ PacketizeFrame(kNumMediaPacketsPerFrame, i, &media_packets);
+ }
+
+ // Drop first media packet and delay FEC packet.
+ auto media_it = media_packets.begin();
+ ++media_it;
+
+ // Receive all other media packets.
+ while (media_it != media_packets.end()) {
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+ ++media_it;
+ }
+
+ // Receive FEC packet and recover first media packet.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ media_it = media_packets.begin();
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+}
+
+TEST_F(FlexfecReceiverTest, TooDelayedFecPacketDoesNotHelp) {
+ // These values need to be updated if the underlying erasure code
+ // implementation changes.
+ // Delay FEC packet by one more than maximum number of media packets
+ // tracked by receiver.
+ const size_t kNumFrames = 193;
+ const size_t kNumMediaPacketsPerFrame = 1;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPacketsPerFrame, 0, &media_packets);
+ PacketizeFrame(kNumMediaPacketsPerFrame, 1, &media_packets);
+ // Protect first two frames.
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+ for (size_t i = 2; i < kNumFrames; ++i) {
+ PacketizeFrame(kNumMediaPacketsPerFrame, i, &media_packets);
+ }
+
+ // Drop first media packet and delay FEC packet.
+ auto media_it = media_packets.begin();
+ ++media_it;
+
+ // Receive all other media packets.
+ while (media_it != media_packets.end()) {
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+ ++media_it;
+ }
+
+ // Receive FEC packet.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+
+ // Do not expect a call back.
+}
+
+TEST_F(FlexfecReceiverTest, SurvivesOldRecoveredPacketBeingReinserted) {
+ // Simulates the behaviour of the
+ // Call->FlexfecReceiveStream->FlexfecReceiver->Call loop in production code.
+ class LoopbackRecoveredPacketReceiver : public RecoveredPacketReceiver {
+ public:
+ LoopbackRecoveredPacketReceiver() : receiver_(nullptr) {}
+
+ void SetReceiver(FlexfecReceiver* receiver) { receiver_ = receiver; }
+
+ // Implements RecoveredPacketReceiver.
+ void OnRecoveredPacket(const uint8_t* packet, size_t length) override {
+ RtpPacketReceived parsed_packet;
+ EXPECT_TRUE(parsed_packet.Parse(packet, length));
+ parsed_packet.set_recovered(true);
+
+ RTC_DCHECK(receiver_);
+ receiver_->OnRtpPacket(parsed_packet);
+ }
+
+ private:
+ FlexfecReceiver* receiver_;
+ } loopback_recovered_packet_receiver;
+
+ // Feed recovered packets back into `receiver`.
+ FlexfecReceiver receiver(Clock::GetRealTimeClock(), kFlexfecSsrc, kMediaSsrc,
+ &loopback_recovered_packet_receiver);
+ loopback_recovered_packet_receiver.SetReceiver(&receiver);
+
+ // Receive first set of packets.
+ PacketList first_media_packets;
+ for (int i = 0; i < 46; ++i) {
+ PacketizeFrame(1, 0, &first_media_packets);
+ }
+ for (const auto& media_packet : first_media_packets) {
+ receiver.OnRtpPacket(ParsePacket(*media_packet));
+ }
+
+ // Protect one media packet. Lose the media packet,
+ // but do not receive FEC packet yet.
+ PacketList protected_media_packet;
+ PacketizeFrame(1, 0, &protected_media_packet);
+ const std::list<Packet*> fec_packets = EncodeFec(protected_media_packet, 1);
+ EXPECT_EQ(1u, fec_packets.size());
+ std::unique_ptr<Packet> fec_packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(*fec_packets.front());
+
+ // Lose some packets, thus introducing a sequence number gap.
+ PacketList lost_packets;
+ for (int i = 0; i < 100; ++i) {
+ PacketizeFrame(1, 0, &lost_packets);
+ }
+
+ // Receive one more packet.
+ PacketList second_media_packets;
+ PacketizeFrame(1, 0, &second_media_packets);
+ for (const auto& media_packet : second_media_packets) {
+ receiver.OnRtpPacket(ParsePacket(*media_packet));
+ }
+
+ // Receive delayed FEC packet.
+ receiver.OnRtpPacket(ParsePacket(*fec_packet_with_rtp_header));
+
+ // Expect no crash.
+}
+
+TEST_F(FlexfecReceiverTest, RecoversWithMediaPacketsOutOfOrder) {
+ const size_t kNumMediaPackets = 6;
+ const size_t kNumFecPackets = 2;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Lose two media packets, and receive the others out of order.
+ auto media_it = media_packets.begin();
+ auto media_packet0 = media_it++;
+ auto media_packet1 = media_it++;
+ auto media_packet2 = media_it++;
+ auto media_packet3 = media_it++;
+ auto media_packet4 = media_it++;
+ auto media_packet5 = media_it++;
+ receiver_.OnRtpPacket(ParsePacket(**media_packet5));
+ receiver_.OnRtpPacket(ParsePacket(**media_packet2));
+ receiver_.OnRtpPacket(ParsePacket(**media_packet3));
+ receiver_.OnRtpPacket(ParsePacket(**media_packet0));
+
+ // Expect to recover lost media packets.
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_packet1)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_packet1)->data.cdata(),
+ (*media_packet1)->data.size())));
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_packet4)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_packet4)->data.cdata(),
+ (*media_packet4)->data.size())));
+
+ // Add FEC packets.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header;
+ while (fec_it != fec_packets.end()) {
+ packet_with_rtp_header = packet_generator_.BuildFlexfecPacket(**fec_it);
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+ ++fec_it;
+ }
+}
+
+// Recovered media packets may be fed back into the FlexfecReceiver by the
+// callback. This test ensures the idempotency of such a situation.
+TEST_F(FlexfecReceiverTest, RecoveryCallbackDoesNotLoopInfinitely) {
+ class LoopbackRecoveredPacketReceiver : public RecoveredPacketReceiver {
+ public:
+ const int kMaxRecursionDepth = 10;
+
+ LoopbackRecoveredPacketReceiver()
+ : receiver_(nullptr),
+ did_receive_call_back_(false),
+ recursion_depth_(0),
+ deep_recursion_(false) {}
+
+ void SetReceiver(FlexfecReceiver* receiver) { receiver_ = receiver; }
+ bool DidReceiveCallback() const { return did_receive_call_back_; }
+ bool DeepRecursion() const { return deep_recursion_; }
+
+ // Implements RecoveredPacketReceiver.
+ void OnRecoveredPacket(const uint8_t* packet, size_t length) override {
+ RtpPacketReceived parsed_packet;
+ EXPECT_TRUE(parsed_packet.Parse(packet, length));
+
+ did_receive_call_back_ = true;
+
+ if (recursion_depth_ > kMaxRecursionDepth) {
+ deep_recursion_ = true;
+ return;
+ }
+ ++recursion_depth_;
+ RTC_DCHECK(receiver_);
+ receiver_->OnRtpPacket(parsed_packet);
+ --recursion_depth_;
+ }
+
+ private:
+ FlexfecReceiver* receiver_;
+ bool did_receive_call_back_;
+ int recursion_depth_;
+ bool deep_recursion_;
+ } loopback_recovered_packet_receiver;
+
+ // Feed recovered packets back into `receiver`.
+ FlexfecReceiver receiver(Clock::GetRealTimeClock(), kFlexfecSsrc, kMediaSsrc,
+ &loopback_recovered_packet_receiver);
+ loopback_recovered_packet_receiver.SetReceiver(&receiver);
+
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Receive first media packet but drop second.
+ auto media_it = media_packets.begin();
+ receiver.OnRtpPacket(ParsePacket(**media_it));
+
+ // Receive FEC packet and verify that a packet was recovered.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ receiver.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+ EXPECT_TRUE(loopback_recovered_packet_receiver.DidReceiveCallback());
+ EXPECT_FALSE(loopback_recovered_packet_receiver.DeepRecursion());
+}
+
+TEST_F(FlexfecReceiverTest, CalculatesNumberOfPackets) {
+ const size_t kNumMediaPackets = 2;
+ const size_t kNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kNumMediaPackets, 0, &media_packets);
+ std::list<Packet*> fec_packets = EncodeFec(media_packets, kNumFecPackets);
+
+ // Receive first media packet but drop second.
+ auto media_it = media_packets.begin();
+ receiver_.OnRtpPacket(ParsePacket(**media_it));
+
+ // Receive FEC packet and ensure recovery of lost media packet.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ media_it++;
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, (*media_it)->data.size()))
+ .With(Args<0, 1>(ElementsAreArray((*media_it)->data.cdata(),
+ (*media_it)->data.size())));
+ receiver_.OnRtpPacket(ParsePacket(*packet_with_rtp_header));
+
+ // Check stats calculations.
+ FecPacketCounter packet_counter = receiver_.GetPacketCounter();
+ EXPECT_EQ(2U, packet_counter.num_packets);
+ EXPECT_EQ(1U, packet_counter.num_fec_packets);
+ EXPECT_EQ(1U, packet_counter.num_recovered_packets);
+}
+
+TEST_F(FlexfecReceiverTest, DoesNotDecodeWrappedMediaSequenceUsingOldFec) {
+ const size_t kFirstFrameNumMediaPackets = 2;
+ const size_t kFirstFrameNumFecPackets = 1;
+
+ PacketList media_packets;
+ PacketizeFrame(kFirstFrameNumMediaPackets, 0, &media_packets);
+
+ // Protect first frame (sequences 0 and 1) with 1 FEC packet.
+ std::list<Packet*> fec_packets =
+ EncodeFec(media_packets, kFirstFrameNumFecPackets);
+
+ // Generate enough media packets to simulate media sequence number wraparound.
+ // Use no FEC for these frames to make sure old FEC is not purged due to age.
+ const size_t kNumFramesSequenceWrapAround =
+ std::numeric_limits<uint16_t>::max();
+ const size_t kNumMediaPacketsPerFrame = 1;
+
+ for (size_t i = 1; i <= kNumFramesSequenceWrapAround; ++i) {
+ PacketizeFrame(kNumMediaPacketsPerFrame, i, &media_packets);
+ }
+
+ // Receive first (`kFirstFrameNumMediaPackets` + 192) media packets.
+ // Simulate an old FEC packet by separating it from its encoded media
+ // packets by at least 192 packets.
+ auto media_it = media_packets.begin();
+ for (size_t i = 0; i < (kFirstFrameNumMediaPackets + 192); i++) {
+ if (i == 1) {
+ // Drop the second packet of the first frame.
+ media_it++;
+ } else {
+ receiver_.OnRtpPacket(ParsePacket(**media_it++));
+ }
+ }
+
+ // Receive FEC packet. Although a protected packet was dropped,
+ // expect no recovery callback since it is delayed from first frame
+ // by more than 192 packets.
+ auto fec_it = fec_packets.begin();
+ std::unique_ptr<Packet> fec_packet_with_rtp_header =
+ packet_generator_.BuildFlexfecPacket(**fec_it);
+ receiver_.OnRtpPacket(ParsePacket(*fec_packet_with_rtp_header));
+
+ // Receive remaining media packets.
+ // NOTE: Because we sent enough to simulate wrap around, sequence 0 is
+ // received again, but is a different packet than the original first
+ // packet of first frame.
+ while (media_it != media_packets.end()) {
+ receiver_.OnRtpPacket(ParsePacket(**media_it++));
+ }
+
+ // Do not expect a recovery callback, the FEC packet is old
+ // and should not decode wrapped around media sequences.
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender.cc
new file mode 100644
index 0000000000..292fe4a8dd
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/flexfec_sender.h"
+
+#include <string.h>
+
+#include <list>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// Let first sequence number be in the first half of the interval.
+constexpr uint16_t kMaxInitRtpSeqNumber = 0x7fff;
+
+// See breakdown in flexfec_header_reader_writer.cc.
+constexpr size_t kFlexfecMaxHeaderSize = 32;
+
+// Since we will mainly use FlexFEC to protect video streams, we use a 90 kHz
+// clock for the RTP timestamps. (This is according to the RFC, which states
+// that it is RECOMMENDED to use the same clock frequency for FlexFEC as for
+// the protected media stream.)
+// The constant converts from clock millisecond timestamps to the 90 kHz
+// RTP timestamp.
+const int kMsToRtpTimestamp = kVideoPayloadTypeFrequency / 1000;
+
+// How often to log the generated FEC packets to the text log.
+constexpr int64_t kPacketLogIntervalMs = 10000;
+
+RtpHeaderExtensionMap RegisterSupportedExtensions(
+ const std::vector<RtpExtension>& rtp_header_extensions) {
+ RtpHeaderExtensionMap map;
+ for (const auto& extension : rtp_header_extensions) {
+ if (extension.uri == TransportSequenceNumber::Uri()) {
+ map.Register<TransportSequenceNumber>(extension.id);
+ } else if (extension.uri == AbsoluteSendTime::Uri()) {
+ map.Register<AbsoluteSendTime>(extension.id);
+ } else if (extension.uri == TransmissionOffset::Uri()) {
+ map.Register<TransmissionOffset>(extension.id);
+ } else if (extension.uri == RtpMid::Uri()) {
+ map.Register<RtpMid>(extension.id);
+ } else {
+ RTC_LOG(LS_INFO)
+ << "FlexfecSender only supports RTP header extensions for "
+ "BWE and MID, so the extension "
+ << extension.ToString() << " will not be used.";
+ }
+ }
+ return map;
+}
+
+} // namespace
+
+FlexfecSender::FlexfecSender(
+ int payload_type,
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc,
+ absl::string_view mid,
+ const std::vector<RtpExtension>& rtp_header_extensions,
+ rtc::ArrayView<const RtpExtensionSize> extension_sizes,
+ const RtpState* rtp_state,
+ Clock* clock)
+ : clock_(clock),
+ random_(clock_->TimeInMicroseconds()),
+ last_generated_packet_ms_(-1),
+ payload_type_(payload_type),
+ // Reset RTP state if this is not the first time we are operating.
+ // Otherwise, randomize the initial timestamp offset and RTP sequence
+ // numbers. (This is not intended to be cryptographically strong.)
+ timestamp_offset_(rtp_state ? rtp_state->start_timestamp
+ : random_.Rand<uint32_t>()),
+ ssrc_(ssrc),
+ protected_media_ssrc_(protected_media_ssrc),
+ mid_(mid),
+ seq_num_(rtp_state ? rtp_state->sequence_number
+ : random_.Rand(1, kMaxInitRtpSeqNumber)),
+ ulpfec_generator_(
+ ForwardErrorCorrection::CreateFlexfec(ssrc, protected_media_ssrc),
+ clock_),
+ rtp_header_extension_map_(
+ RegisterSupportedExtensions(rtp_header_extensions)),
+ header_extensions_size_(
+ RtpHeaderExtensionSize(extension_sizes, rtp_header_extension_map_)),
+ fec_bitrate_(/*max_window_size_ms=*/1000, RateStatistics::kBpsScale) {
+ // This object should not have been instantiated if FlexFEC is disabled.
+ RTC_DCHECK_GE(payload_type, 0);
+ RTC_DCHECK_LE(payload_type, 127);
+}
+
+FlexfecSender::~FlexfecSender() = default;
+
+// We are reusing the implementation from UlpfecGenerator for SetFecParameters,
+// AddRtpPacketAndGenerateFec, and FecAvailable.
+void FlexfecSender::SetProtectionParameters(
+ const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) {
+ ulpfec_generator_.SetProtectionParameters(delta_params, key_params);
+}
+
+void FlexfecSender::AddPacketAndGenerateFec(const RtpPacketToSend& packet) {
+ // TODO(brandtr): Generalize this SSRC check when we support multistream
+ // protection.
+ RTC_DCHECK_EQ(packet.Ssrc(), protected_media_ssrc_);
+ ulpfec_generator_.AddPacketAndGenerateFec(packet);
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>> FlexfecSender::GetFecPackets() {
+ RTC_CHECK_RUNS_SERIALIZED(&ulpfec_generator_.race_checker_);
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets_to_send;
+ fec_packets_to_send.reserve(ulpfec_generator_.generated_fec_packets_.size());
+ size_t total_fec_data_bytes = 0;
+ for (const auto* fec_packet : ulpfec_generator_.generated_fec_packets_) {
+ std::unique_ptr<RtpPacketToSend> fec_packet_to_send(
+ new RtpPacketToSend(&rtp_header_extension_map_));
+ fec_packet_to_send->set_packet_type(
+ RtpPacketMediaType::kForwardErrorCorrection);
+ fec_packet_to_send->set_allow_retransmission(false);
+
+ // RTP header.
+ fec_packet_to_send->SetMarker(false);
+ fec_packet_to_send->SetPayloadType(payload_type_);
+ fec_packet_to_send->SetSequenceNumber(seq_num_++);
+ fec_packet_to_send->SetTimestamp(
+ timestamp_offset_ +
+ static_cast<uint32_t>(kMsToRtpTimestamp *
+ clock_->TimeInMilliseconds()));
+ // Set "capture time" so that the TransmissionOffset header extension
+ // can be set by the RTPSender.
+ fec_packet_to_send->set_capture_time(clock_->CurrentTime());
+ fec_packet_to_send->SetSsrc(ssrc_);
+ // Reserve extensions, if registered. These will be set by the RTPSender.
+ fec_packet_to_send->ReserveExtension<AbsoluteSendTime>();
+ fec_packet_to_send->ReserveExtension<TransmissionOffset>();
+ fec_packet_to_send->ReserveExtension<TransportSequenceNumber>();
+ // Possibly include the MID header extension.
+ if (!mid_.empty()) {
+ // This is a no-op if the MID header extension is not registered.
+ fec_packet_to_send->SetExtension<RtpMid>(mid_);
+ }
+
+ // RTP payload.
+ uint8_t* payload =
+ fec_packet_to_send->AllocatePayload(fec_packet->data.size());
+ memcpy(payload, fec_packet->data.cdata(), fec_packet->data.size());
+
+ total_fec_data_bytes += fec_packet_to_send->size();
+ fec_packets_to_send.push_back(std::move(fec_packet_to_send));
+ }
+
+ if (!fec_packets_to_send.empty()) {
+ ulpfec_generator_.ResetState();
+ }
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (!fec_packets_to_send.empty() &&
+ now_ms - last_generated_packet_ms_ > kPacketLogIntervalMs) {
+ RTC_LOG(LS_VERBOSE) << "Generated " << fec_packets_to_send.size()
+ << " FlexFEC packets with payload type: "
+ << payload_type_ << " and SSRC: " << ssrc_ << ".";
+ last_generated_packet_ms_ = now_ms;
+ }
+
+ MutexLock lock(&mutex_);
+ fec_bitrate_.Update(total_fec_data_bytes, now_ms);
+
+ return fec_packets_to_send;
+}
+
+// The overhead is BWE RTP header extensions and FlexFEC header.
+size_t FlexfecSender::MaxPacketOverhead() const {
+ return header_extensions_size_ + kFlexfecMaxHeaderSize;
+}
+
+DataRate FlexfecSender::CurrentFecRate() const {
+ MutexLock lock(&mutex_);
+ return DataRate::BitsPerSec(
+ fec_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0));
+}
+
+absl::optional<RtpState> FlexfecSender::GetRtpState() {
+ RtpState rtp_state;
+ rtp_state.sequence_number = seq_num_;
+ rtp_state.start_timestamp = timestamp_offset_;
+ return rtp_state;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender_unittest.cc
new file mode 100644
index 0000000000..19614d2bbd
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/flexfec_sender_unittest.cc
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/flexfec_sender.h"
+
+#include <vector>
+
+#include "api/rtp_parameters.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/fec_test_helper.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using test::fec::AugmentedPacket;
+using test::fec::AugmentedPacketGenerator;
+
+constexpr int kFlexfecPayloadType = 123;
+constexpr uint32_t kMediaSsrc = 1234;
+constexpr uint32_t kFlexfecSsrc = 5678;
+const char kNoMid[] = "";
+const std::vector<RtpExtension> kNoRtpHeaderExtensions;
+const std::vector<RtpExtensionSize> kNoRtpHeaderExtensionSizes;
+// Assume a single protected media SSRC.
+constexpr size_t kFlexfecMaxHeaderSize = 32;
+constexpr size_t kPayloadLength = 50;
+
+constexpr int64_t kInitialSimulatedClockTime = 1;
+// These values are deterministically given by the PRNG, due to our fixed seed.
+// They should be updated if the PRNG implementation changes.
+constexpr uint16_t kDeterministicSequenceNumber = 28732;
+constexpr uint32_t kDeterministicTimestamp = 2305613085;
+
+// Round up to the nearest size that is a multiple of 4.
+size_t Word32Align(size_t size) {
+ uint32_t remainder = size % 4;
+ if (remainder != 0)
+ return size + 4 - remainder;
+ return size;
+}
+
+std::unique_ptr<RtpPacketToSend> GenerateSingleFlexfecPacket(
+ FlexfecSender* sender) {
+ // Parameters selected to generate a single FEC packet.
+ FecProtectionParams params;
+ params.fec_rate = 15;
+ params.max_fec_frames = 1;
+ params.fec_mask_type = kFecMaskRandom;
+ constexpr size_t kNumPackets = 4;
+
+ sender->SetProtectionParameters(params, params);
+ AugmentedPacketGenerator packet_generator(kMediaSsrc);
+ packet_generator.NewFrame(kNumPackets);
+ for (size_t i = 0; i < kNumPackets; ++i) {
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator.NextPacket(i, kPayloadLength);
+ RtpPacketToSend rtp_packet(nullptr); // No header extensions.
+ rtp_packet.Parse(packet->data);
+ sender->AddPacketAndGenerateFec(rtp_packet);
+ }
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ sender->GetFecPackets();
+ EXPECT_EQ(1U, fec_packets.size());
+ EXPECT_TRUE(sender->GetFecPackets().empty());
+
+ return std::move(fec_packets.front());
+}
+
+} // namespace
+
+TEST(FlexfecSenderTest, Ssrc) {
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+
+ EXPECT_EQ(kFlexfecSsrc, sender.FecSsrc());
+}
+
+TEST(FlexfecSenderTest, NoFecAvailableBeforeMediaAdded) {
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+
+ EXPECT_TRUE(sender.GetFecPackets().empty());
+}
+
+TEST(FlexfecSenderTest, ProtectOneFrameWithOneFecPacket) {
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ EXPECT_EQ(kRtpHeaderSize, fec_packet->headers_size());
+ EXPECT_FALSE(fec_packet->Marker());
+ EXPECT_EQ(kFlexfecPayloadType, fec_packet->PayloadType());
+ EXPECT_EQ(kDeterministicSequenceNumber, fec_packet->SequenceNumber());
+ EXPECT_EQ(kDeterministicTimestamp, fec_packet->Timestamp());
+ EXPECT_EQ(kFlexfecSsrc, fec_packet->Ssrc());
+ EXPECT_LE(kPayloadLength, fec_packet->payload_size());
+}
+
+TEST(FlexfecSenderTest, ProtectTwoFramesWithOneFecPacket) {
+ // FEC parameters selected to generate a single FEC packet per frame.
+ FecProtectionParams params;
+ params.fec_rate = 15;
+ params.max_fec_frames = 2;
+ params.fec_mask_type = kFecMaskRandom;
+ constexpr size_t kNumFrames = 2;
+ constexpr size_t kNumPacketsPerFrame = 2;
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ sender.SetProtectionParameters(params, params);
+
+ AugmentedPacketGenerator packet_generator(kMediaSsrc);
+ for (size_t i = 0; i < kNumFrames; ++i) {
+ packet_generator.NewFrame(kNumPacketsPerFrame);
+ for (size_t j = 0; j < kNumPacketsPerFrame; ++j) {
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator.NextPacket(i, kPayloadLength);
+ RtpPacketToSend rtp_packet(nullptr);
+ rtp_packet.Parse(packet->data);
+ sender.AddPacketAndGenerateFec(rtp_packet);
+ }
+ }
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ sender.GetFecPackets();
+ ASSERT_EQ(1U, fec_packets.size());
+ EXPECT_TRUE(sender.GetFecPackets().empty());
+
+ RtpPacketToSend* fec_packet = fec_packets.front().get();
+ EXPECT_EQ(kRtpHeaderSize, fec_packet->headers_size());
+ EXPECT_FALSE(fec_packet->Marker());
+ EXPECT_EQ(kFlexfecPayloadType, fec_packet->PayloadType());
+ EXPECT_EQ(kDeterministicSequenceNumber, fec_packet->SequenceNumber());
+ EXPECT_EQ(kDeterministicTimestamp, fec_packet->Timestamp());
+ EXPECT_EQ(kFlexfecSsrc, fec_packet->Ssrc());
+}
+
+TEST(FlexfecSenderTest, ProtectTwoFramesWithTwoFecPackets) {
+ // FEC parameters selected to generate a single FEC packet per frame.
+ FecProtectionParams params;
+ params.fec_rate = 30;
+ params.max_fec_frames = 1;
+ params.fec_mask_type = kFecMaskRandom;
+ constexpr size_t kNumFrames = 2;
+ constexpr size_t kNumPacketsPerFrame = 2;
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ sender.SetProtectionParameters(params, params);
+
+ AugmentedPacketGenerator packet_generator(kMediaSsrc);
+ for (size_t i = 0; i < kNumFrames; ++i) {
+ packet_generator.NewFrame(kNumPacketsPerFrame);
+ for (size_t j = 0; j < kNumPacketsPerFrame; ++j) {
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator.NextPacket(i, kPayloadLength);
+ RtpPacketToSend rtp_packet(nullptr);
+ rtp_packet.Parse(packet->data);
+ sender.AddPacketAndGenerateFec(rtp_packet);
+ }
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ sender.GetFecPackets();
+ ASSERT_EQ(1U, fec_packets.size());
+ EXPECT_TRUE(sender.GetFecPackets().empty());
+
+ RtpPacketToSend* fec_packet = fec_packets.front().get();
+ EXPECT_EQ(kRtpHeaderSize, fec_packet->headers_size());
+ EXPECT_FALSE(fec_packet->Marker());
+ EXPECT_EQ(kFlexfecPayloadType, fec_packet->PayloadType());
+ EXPECT_EQ(static_cast<uint16_t>(kDeterministicSequenceNumber + i),
+ fec_packet->SequenceNumber());
+ EXPECT_EQ(kDeterministicTimestamp, fec_packet->Timestamp());
+ EXPECT_EQ(kFlexfecSsrc, fec_packet->Ssrc());
+ }
+}
+
+// In the tests, we only consider RTP header extensions that are useful for BWE.
+TEST(FlexfecSenderTest, NoRtpHeaderExtensionsForBweByDefault) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{};
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ EXPECT_FALSE(fec_packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_FALSE(fec_packet->HasExtension<TransmissionOffset>());
+ EXPECT_FALSE(fec_packet->HasExtension<TransportSequenceNumber>());
+}
+
+TEST(FlexfecSenderTest, RegisterAbsoluteSendTimeRtpHeaderExtension) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{
+ {RtpExtension::kAbsSendTimeUri, 1}};
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ EXPECT_TRUE(fec_packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_FALSE(fec_packet->HasExtension<TransmissionOffset>());
+ EXPECT_FALSE(fec_packet->HasExtension<TransportSequenceNumber>());
+}
+
+TEST(FlexfecSenderTest, RegisterTransmissionOffsetRtpHeaderExtension) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{
+ {RtpExtension::kTimestampOffsetUri, 1}};
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ EXPECT_FALSE(fec_packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(fec_packet->HasExtension<TransmissionOffset>());
+ EXPECT_FALSE(fec_packet->HasExtension<TransportSequenceNumber>());
+}
+
+TEST(FlexfecSenderTest, RegisterTransportSequenceNumberRtpHeaderExtension) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{
+ {RtpExtension::kTransportSequenceNumberUri, 1}};
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ EXPECT_FALSE(fec_packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_FALSE(fec_packet->HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(fec_packet->HasExtension<TransportSequenceNumber>());
+}
+
+TEST(FlexfecSenderTest, RegisterAllRtpHeaderExtensionsForBwe) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{
+ {RtpExtension::kAbsSendTimeUri, 1},
+ {RtpExtension::kTimestampOffsetUri, 2},
+ {RtpExtension::kTransportSequenceNumberUri, 3}};
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ EXPECT_TRUE(fec_packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(fec_packet->HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(fec_packet->HasExtension<TransportSequenceNumber>());
+}
+
+TEST(FlexfecSenderTest, MaxPacketOverhead) {
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ nullptr /* rtp_state */, &clock);
+
+ EXPECT_EQ(kFlexfecMaxHeaderSize, sender.MaxPacketOverhead());
+}
+
+TEST(FlexfecSenderTest, MaxPacketOverheadWithExtensions) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{
+ {RtpExtension::kAbsSendTimeUri, 1},
+ {RtpExtension::kTimestampOffsetUri, 2},
+ {RtpExtension::kTransportSequenceNumberUri, 3}};
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ const size_t kExtensionHeaderLength = 1;
+ const size_t kRtpOneByteHeaderLength = 4;
+ const size_t kExtensionsTotalSize =
+ Word32Align(kRtpOneByteHeaderLength + kExtensionHeaderLength +
+ AbsoluteSendTime::kValueSizeBytes + kExtensionHeaderLength +
+ TransmissionOffset::kValueSizeBytes + kExtensionHeaderLength +
+ TransportSequenceNumber::kValueSizeBytes);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kRtpHeaderExtensions, RTPSender::FecExtensionSizes(),
+ nullptr /* rtp_state */, &clock);
+
+ EXPECT_EQ(kExtensionsTotalSize + kFlexfecMaxHeaderSize,
+ sender.MaxPacketOverhead());
+}
+
+TEST(FlexfecSenderTest, MidIncludedInPacketsWhenSet) {
+ const std::vector<RtpExtension> kRtpHeaderExtensions{
+ {RtpExtension::kMidUri, 1}};
+ const char kMid[] = "mid";
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kMid,
+ kRtpHeaderExtensions, RTPSender::FecExtensionSizes(),
+ nullptr /* rtp_state */, &clock);
+
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+
+ std::string mid;
+ ASSERT_TRUE(fec_packet->GetExtension<RtpMid>(&mid));
+ EXPECT_EQ(kMid, mid);
+}
+
+TEST(FlexfecSenderTest, SetsAndGetsRtpState) {
+ RtpState initial_rtp_state;
+ initial_rtp_state.sequence_number = 100;
+ initial_rtp_state.start_timestamp = 200;
+ SimulatedClock clock(kInitialSimulatedClockTime);
+ FlexfecSender sender(kFlexfecPayloadType, kFlexfecSsrc, kMediaSsrc, kNoMid,
+ kNoRtpHeaderExtensions, kNoRtpHeaderExtensionSizes,
+ &initial_rtp_state, &clock);
+
+ auto fec_packet = GenerateSingleFlexfecPacket(&sender);
+ EXPECT_EQ(initial_rtp_state.sequence_number, fec_packet->SequenceNumber());
+ EXPECT_EQ(initial_rtp_state.start_timestamp, fec_packet->Timestamp());
+
+ clock.AdvanceTimeMilliseconds(1000);
+ fec_packet = GenerateSingleFlexfecPacket(&sender);
+ EXPECT_EQ(initial_rtp_state.sequence_number + 1,
+ fec_packet->SequenceNumber());
+ EXPECT_EQ(initial_rtp_state.start_timestamp + 1 * kVideoPayloadTypeFrequency,
+ fec_packet->Timestamp());
+
+ RtpState updated_rtp_state = sender.GetRtpState().value();
+ EXPECT_EQ(initial_rtp_state.sequence_number + 2,
+ updated_rtp_state.sequence_number);
+ EXPECT_EQ(initial_rtp_state.start_timestamp,
+ updated_rtp_state.start_timestamp);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.cc
new file mode 100644
index 0000000000..903d3e7d45
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.cc
@@ -0,0 +1,807 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/flexfec_header_reader_writer.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "modules/rtp_rtcp/source/ulpfec_header_reader_writer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/mod_ops.h"
+
+namespace webrtc {
+
+namespace {
+// Transport header size in bytes. Assume UDP/IPv4 as a reasonable minimum.
+constexpr size_t kTransportOverhead = 28;
+
+constexpr uint16_t kOldSequenceThreshold = 0x3fff;
+} // namespace
+
+ForwardErrorCorrection::Packet::Packet() : data(0), ref_count_(0) {}
+ForwardErrorCorrection::Packet::~Packet() = default;
+
+int32_t ForwardErrorCorrection::Packet::AddRef() {
+ return ++ref_count_;
+}
+
+int32_t ForwardErrorCorrection::Packet::Release() {
+ int32_t ref_count;
+ ref_count = --ref_count_;
+ if (ref_count == 0)
+ delete this;
+ return ref_count;
+}
+
+// This comparator is used to compare std::unique_ptr's pointing to
+// subclasses of SortablePackets. It needs to be parametric since
+// the std::unique_ptr's are not covariant w.r.t. the types that
+// they are pointing to.
+template <typename S, typename T>
+bool ForwardErrorCorrection::SortablePacket::LessThan::operator()(
+ const S& first,
+ const T& second) {
+ RTC_DCHECK_EQ(first->ssrc, second->ssrc);
+ return IsNewerSequenceNumber(second->seq_num, first->seq_num);
+}
+
+ForwardErrorCorrection::ReceivedPacket::ReceivedPacket() = default;
+ForwardErrorCorrection::ReceivedPacket::~ReceivedPacket() = default;
+
+ForwardErrorCorrection::RecoveredPacket::RecoveredPacket() = default;
+ForwardErrorCorrection::RecoveredPacket::~RecoveredPacket() = default;
+
+ForwardErrorCorrection::ProtectedPacket::ProtectedPacket() = default;
+ForwardErrorCorrection::ProtectedPacket::~ProtectedPacket() = default;
+
+ForwardErrorCorrection::ReceivedFecPacket::ReceivedFecPacket() = default;
+ForwardErrorCorrection::ReceivedFecPacket::~ReceivedFecPacket() = default;
+
+ForwardErrorCorrection::ForwardErrorCorrection(
+ std::unique_ptr<FecHeaderReader> fec_header_reader,
+ std::unique_ptr<FecHeaderWriter> fec_header_writer,
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc)
+ : ssrc_(ssrc),
+ protected_media_ssrc_(protected_media_ssrc),
+ fec_header_reader_(std::move(fec_header_reader)),
+ fec_header_writer_(std::move(fec_header_writer)),
+ generated_fec_packets_(fec_header_writer_->MaxFecPackets()),
+ packet_mask_size_(0) {}
+
+ForwardErrorCorrection::~ForwardErrorCorrection() = default;
+
+std::unique_ptr<ForwardErrorCorrection> ForwardErrorCorrection::CreateUlpfec(
+ uint32_t ssrc) {
+ std::unique_ptr<FecHeaderReader> fec_header_reader(new UlpfecHeaderReader());
+ std::unique_ptr<FecHeaderWriter> fec_header_writer(new UlpfecHeaderWriter());
+ return std::unique_ptr<ForwardErrorCorrection>(new ForwardErrorCorrection(
+ std::move(fec_header_reader), std::move(fec_header_writer), ssrc, ssrc));
+}
+
+std::unique_ptr<ForwardErrorCorrection> ForwardErrorCorrection::CreateFlexfec(
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc) {
+ std::unique_ptr<FecHeaderReader> fec_header_reader(new FlexfecHeaderReader());
+ std::unique_ptr<FecHeaderWriter> fec_header_writer(new FlexfecHeaderWriter());
+ return std::unique_ptr<ForwardErrorCorrection>(new ForwardErrorCorrection(
+ std::move(fec_header_reader), std::move(fec_header_writer), ssrc,
+ protected_media_ssrc));
+}
+
+int ForwardErrorCorrection::EncodeFec(const PacketList& media_packets,
+ uint8_t protection_factor,
+ int num_important_packets,
+ bool use_unequal_protection,
+ FecMaskType fec_mask_type,
+ std::list<Packet*>* fec_packets) {
+ const size_t num_media_packets = media_packets.size();
+
+ // Sanity check arguments.
+ RTC_DCHECK_GT(num_media_packets, 0);
+ RTC_DCHECK_GE(num_important_packets, 0);
+ RTC_DCHECK_LE(num_important_packets, num_media_packets);
+ RTC_DCHECK(fec_packets->empty());
+ const size_t max_media_packets = fec_header_writer_->MaxMediaPackets();
+ if (num_media_packets > max_media_packets) {
+ RTC_LOG(LS_WARNING) << "Can't protect " << num_media_packets
+ << " media packets per frame. Max is "
+ << max_media_packets << ".";
+ return -1;
+ }
+
+ // Error check the media packets.
+ for (const auto& media_packet : media_packets) {
+ RTC_DCHECK(media_packet);
+ if (media_packet->data.size() < kRtpHeaderSize) {
+ RTC_LOG(LS_WARNING) << "Media packet " << media_packet->data.size()
+ << " bytes "
+ "is smaller than RTP header.";
+ return -1;
+ }
+ // Ensure the FEC packets will fit in a typical MTU.
+ if (media_packet->data.size() + MaxPacketOverhead() + kTransportOverhead >
+ IP_PACKET_SIZE) {
+ RTC_LOG(LS_WARNING) << "Media packet " << media_packet->data.size()
+ << " bytes "
+ "with overhead is larger than "
+ << IP_PACKET_SIZE << " bytes.";
+ }
+ }
+
+ // Prepare generated FEC packets.
+ int num_fec_packets = NumFecPackets(num_media_packets, protection_factor);
+ if (num_fec_packets == 0) {
+ return 0;
+ }
+ for (int i = 0; i < num_fec_packets; ++i) {
+ generated_fec_packets_[i].data.EnsureCapacity(IP_PACKET_SIZE);
+ memset(generated_fec_packets_[i].data.MutableData(), 0, IP_PACKET_SIZE);
+ // Use this as a marker for untouched packets.
+ generated_fec_packets_[i].data.SetSize(0);
+ fec_packets->push_back(&generated_fec_packets_[i]);
+ }
+
+ internal::PacketMaskTable mask_table(fec_mask_type, num_media_packets);
+ packet_mask_size_ = internal::PacketMaskSize(num_media_packets);
+ memset(packet_masks_, 0, num_fec_packets * packet_mask_size_);
+ internal::GeneratePacketMasks(num_media_packets, num_fec_packets,
+ num_important_packets, use_unequal_protection,
+ &mask_table, packet_masks_);
+
+ // Adapt packet masks to missing media packets.
+ int num_mask_bits = InsertZerosInPacketMasks(media_packets, num_fec_packets);
+ if (num_mask_bits < 0) {
+ RTC_LOG(LS_INFO) << "Due to sequence number gaps, cannot protect media "
+ "packets with a single block of FEC packets.";
+ fec_packets->clear();
+ return -1;
+ }
+ packet_mask_size_ = internal::PacketMaskSize(num_mask_bits);
+
+ // Write FEC packets to `generated_fec_packets_`.
+ GenerateFecPayloads(media_packets, num_fec_packets);
+ // TODO(brandtr): Generalize this when multistream protection support is
+ // added.
+ const uint32_t media_ssrc = ParseSsrc(media_packets.front()->data.data());
+ const uint16_t seq_num_base =
+ ParseSequenceNumber(media_packets.front()->data.data());
+ FinalizeFecHeaders(num_fec_packets, media_ssrc, seq_num_base);
+
+ return 0;
+}
+
+int ForwardErrorCorrection::NumFecPackets(int num_media_packets,
+ int protection_factor) {
+ // Result in Q0 with an unsigned round.
+ int num_fec_packets = (num_media_packets * protection_factor + (1 << 7)) >> 8;
+ // Generate at least one FEC packet if we need protection.
+ if (protection_factor > 0 && num_fec_packets == 0) {
+ num_fec_packets = 1;
+ }
+ RTC_DCHECK_LE(num_fec_packets, num_media_packets);
+ return num_fec_packets;
+}
+
+void ForwardErrorCorrection::GenerateFecPayloads(
+ const PacketList& media_packets,
+ size_t num_fec_packets) {
+ RTC_DCHECK(!media_packets.empty());
+ for (size_t i = 0; i < num_fec_packets; ++i) {
+ Packet* const fec_packet = &generated_fec_packets_[i];
+ size_t pkt_mask_idx = i * packet_mask_size_;
+ const size_t min_packet_mask_size = fec_header_writer_->MinPacketMaskSize(
+ &packet_masks_[pkt_mask_idx], packet_mask_size_);
+ const size_t fec_header_size =
+ fec_header_writer_->FecHeaderSize(min_packet_mask_size);
+
+ size_t media_pkt_idx = 0;
+ auto media_packets_it = media_packets.cbegin();
+ uint16_t prev_seq_num =
+ ParseSequenceNumber((*media_packets_it)->data.data());
+ while (media_packets_it != media_packets.end()) {
+ Packet* const media_packet = media_packets_it->get();
+ // Should `media_packet` be protected by `fec_packet`?
+ if (packet_masks_[pkt_mask_idx] & (1 << (7 - media_pkt_idx))) {
+ size_t media_payload_length =
+ media_packet->data.size() - kRtpHeaderSize;
+
+ size_t fec_packet_length = fec_header_size + media_payload_length;
+ if (fec_packet_length > fec_packet->data.size()) {
+ // Recall that XORing with zero (which the FEC packets are prefilled
+ // with) is the identity operator, thus all prior XORs are
+ // still correct even though we expand the packet length here.
+ fec_packet->data.SetSize(fec_packet_length);
+ }
+ XorHeaders(*media_packet, fec_packet);
+ XorPayloads(*media_packet, media_payload_length, fec_header_size,
+ fec_packet);
+ }
+ media_packets_it++;
+ if (media_packets_it != media_packets.end()) {
+ uint16_t seq_num =
+ ParseSequenceNumber((*media_packets_it)->data.data());
+ media_pkt_idx += static_cast<uint16_t>(seq_num - prev_seq_num);
+ prev_seq_num = seq_num;
+ }
+ pkt_mask_idx += media_pkt_idx / 8;
+ media_pkt_idx %= 8;
+ }
+ RTC_DCHECK_GT(fec_packet->data.size(), 0)
+ << "Packet mask is wrong or poorly designed.";
+ }
+}
+
+int ForwardErrorCorrection::InsertZerosInPacketMasks(
+ const PacketList& media_packets,
+ size_t num_fec_packets) {
+ size_t num_media_packets = media_packets.size();
+ if (num_media_packets <= 1) {
+ return num_media_packets;
+ }
+ uint16_t last_seq_num =
+ ParseSequenceNumber(media_packets.back()->data.data());
+ uint16_t first_seq_num =
+ ParseSequenceNumber(media_packets.front()->data.data());
+ size_t total_missing_seq_nums =
+ static_cast<uint16_t>(last_seq_num - first_seq_num) - num_media_packets +
+ 1;
+ if (total_missing_seq_nums == 0) {
+ // All sequence numbers are covered by the packet mask.
+ // No zero insertion required.
+ return num_media_packets;
+ }
+ const size_t max_media_packets = fec_header_writer_->MaxMediaPackets();
+ if (total_missing_seq_nums + num_media_packets > max_media_packets) {
+ return -1;
+ }
+ // Allocate the new mask.
+ size_t tmp_packet_mask_size =
+ internal::PacketMaskSize(total_missing_seq_nums + num_media_packets);
+ memset(tmp_packet_masks_, 0, num_fec_packets * tmp_packet_mask_size);
+
+ auto media_packets_it = media_packets.cbegin();
+ uint16_t prev_seq_num = first_seq_num;
+ ++media_packets_it;
+
+ // Insert the first column.
+ internal::CopyColumn(tmp_packet_masks_, tmp_packet_mask_size, packet_masks_,
+ packet_mask_size_, num_fec_packets, 0, 0);
+ size_t new_bit_index = 1;
+ size_t old_bit_index = 1;
+ // Insert zeros in the bit mask for every hole in the sequence.
+ while (media_packets_it != media_packets.end()) {
+ if (new_bit_index == max_media_packets) {
+ // We can only cover up to 48 packets.
+ break;
+ }
+ uint16_t seq_num = ParseSequenceNumber((*media_packets_it)->data.data());
+ const int num_zeros_to_insert =
+ static_cast<uint16_t>(seq_num - prev_seq_num - 1);
+ if (num_zeros_to_insert > 0) {
+ internal::InsertZeroColumns(num_zeros_to_insert, tmp_packet_masks_,
+ tmp_packet_mask_size, num_fec_packets,
+ new_bit_index);
+ }
+ new_bit_index += num_zeros_to_insert;
+ internal::CopyColumn(tmp_packet_masks_, tmp_packet_mask_size, packet_masks_,
+ packet_mask_size_, num_fec_packets, new_bit_index,
+ old_bit_index);
+ ++new_bit_index;
+ ++old_bit_index;
+ prev_seq_num = seq_num;
+ ++media_packets_it;
+ }
+ if (new_bit_index % 8 != 0) {
+ // We didn't fill the last byte. Shift bits to correct position.
+ for (uint16_t row = 0; row < num_fec_packets; ++row) {
+ int new_byte_index = row * tmp_packet_mask_size + new_bit_index / 8;
+ tmp_packet_masks_[new_byte_index] <<= (7 - (new_bit_index % 8));
+ }
+ }
+ // Replace the old mask with the new.
+ memcpy(packet_masks_, tmp_packet_masks_,
+ num_fec_packets * tmp_packet_mask_size);
+ return new_bit_index;
+}
+
+void ForwardErrorCorrection::FinalizeFecHeaders(size_t num_fec_packets,
+ uint32_t media_ssrc,
+ uint16_t seq_num_base) {
+ for (size_t i = 0; i < num_fec_packets; ++i) {
+ fec_header_writer_->FinalizeFecHeader(
+ media_ssrc, seq_num_base, &packet_masks_[i * packet_mask_size_],
+ packet_mask_size_, &generated_fec_packets_[i]);
+ }
+}
+
+void ForwardErrorCorrection::ResetState(
+ RecoveredPacketList* recovered_packets) {
+ // Free the memory for any existing recovered packets, if the caller hasn't.
+ recovered_packets->clear();
+ received_fec_packets_.clear();
+}
+
+void ForwardErrorCorrection::InsertMediaPacket(
+ RecoveredPacketList* recovered_packets,
+ const ReceivedPacket& received_packet) {
+ RTC_DCHECK_EQ(received_packet.ssrc, protected_media_ssrc_);
+
+ // Search for duplicate packets.
+ for (const auto& recovered_packet : *recovered_packets) {
+ RTC_DCHECK_EQ(recovered_packet->ssrc, received_packet.ssrc);
+ if (recovered_packet->seq_num == received_packet.seq_num) {
+ // Duplicate packet, no need to add to list.
+ return;
+ }
+ }
+
+ std::unique_ptr<RecoveredPacket> recovered_packet(new RecoveredPacket());
+ // This "recovered packet" was not recovered using parity packets.
+ recovered_packet->was_recovered = false;
+ // This media packet has already been passed on.
+ recovered_packet->returned = true;
+ recovered_packet->ssrc = received_packet.ssrc;
+ recovered_packet->seq_num = received_packet.seq_num;
+ recovered_packet->pkt = received_packet.pkt;
+ // TODO(holmer): Consider replacing this with a binary search for the right
+ // position, and then just insert the new packet. Would get rid of the sort.
+ RecoveredPacket* recovered_packet_ptr = recovered_packet.get();
+ recovered_packets->push_back(std::move(recovered_packet));
+ recovered_packets->sort(SortablePacket::LessThan());
+ UpdateCoveringFecPackets(*recovered_packet_ptr);
+}
+
+void ForwardErrorCorrection::UpdateCoveringFecPackets(
+ const RecoveredPacket& packet) {
+ for (auto& fec_packet : received_fec_packets_) {
+ // Is this FEC packet protecting the media packet `packet`?
+ auto protected_it = absl::c_lower_bound(
+ fec_packet->protected_packets, &packet, SortablePacket::LessThan());
+ if (protected_it != fec_packet->protected_packets.end() &&
+ (*protected_it)->seq_num == packet.seq_num) {
+ // Found an FEC packet which is protecting `packet`.
+ (*protected_it)->pkt = packet.pkt;
+ }
+ }
+}
+
+void ForwardErrorCorrection::InsertFecPacket(
+ const RecoveredPacketList& recovered_packets,
+ const ReceivedPacket& received_packet) {
+ RTC_DCHECK_EQ(received_packet.ssrc, ssrc_);
+
+ // Check for duplicate.
+ for (const auto& existing_fec_packet : received_fec_packets_) {
+ RTC_DCHECK_EQ(existing_fec_packet->ssrc, received_packet.ssrc);
+ if (existing_fec_packet->seq_num == received_packet.seq_num) {
+ // Drop duplicate FEC packet data.
+ return;
+ }
+ }
+
+ std::unique_ptr<ReceivedFecPacket> fec_packet(new ReceivedFecPacket());
+ fec_packet->pkt = received_packet.pkt;
+ fec_packet->ssrc = received_packet.ssrc;
+ fec_packet->seq_num = received_packet.seq_num;
+ // Parse ULPFEC/FlexFEC header specific info.
+ bool ret = fec_header_reader_->ReadFecHeader(fec_packet.get());
+ if (!ret) {
+ return;
+ }
+
+ // TODO(brandtr): Update here when we support multistream protection.
+ if (fec_packet->protected_ssrc != protected_media_ssrc_) {
+ RTC_LOG(LS_INFO)
+ << "Received FEC packet is protecting an unknown media SSRC; dropping.";
+ return;
+ }
+
+ if (fec_packet->packet_mask_offset + fec_packet->packet_mask_size >
+ fec_packet->pkt->data.size()) {
+ RTC_LOG(LS_INFO) << "Received corrupted FEC packet; dropping.";
+ return;
+ }
+
+ // Parse packet mask from header and represent as protected packets.
+ for (uint16_t byte_idx = 0; byte_idx < fec_packet->packet_mask_size;
+ ++byte_idx) {
+ uint8_t packet_mask =
+ fec_packet->pkt->data[fec_packet->packet_mask_offset + byte_idx];
+ for (uint16_t bit_idx = 0; bit_idx < 8; ++bit_idx) {
+ if (packet_mask & (1 << (7 - bit_idx))) {
+ std::unique_ptr<ProtectedPacket> protected_packet(
+ new ProtectedPacket());
+ // This wraps naturally with the sequence number.
+ protected_packet->ssrc = protected_media_ssrc_;
+ protected_packet->seq_num = static_cast<uint16_t>(
+ fec_packet->seq_num_base + (byte_idx << 3) + bit_idx);
+ protected_packet->pkt = nullptr;
+ fec_packet->protected_packets.push_back(std::move(protected_packet));
+ }
+ }
+ }
+
+ if (fec_packet->protected_packets.empty()) {
+ // All-zero packet mask; we can discard this FEC packet.
+ RTC_LOG(LS_WARNING) << "Received FEC packet has an all-zero packet mask.";
+ } else {
+ AssignRecoveredPackets(recovered_packets, fec_packet.get());
+ // TODO(holmer): Consider replacing this with a binary search for the right
+ // position, and then just insert the new packet. Would get rid of the sort.
+ received_fec_packets_.push_back(std::move(fec_packet));
+ received_fec_packets_.sort(SortablePacket::LessThan());
+ const size_t max_fec_packets = fec_header_reader_->MaxFecPackets();
+ if (received_fec_packets_.size() > max_fec_packets) {
+ received_fec_packets_.pop_front();
+ }
+ RTC_DCHECK_LE(received_fec_packets_.size(), max_fec_packets);
+ }
+}
+
+void ForwardErrorCorrection::AssignRecoveredPackets(
+ const RecoveredPacketList& recovered_packets,
+ ReceivedFecPacket* fec_packet) {
+ ProtectedPacketList* protected_packets = &fec_packet->protected_packets;
+ std::vector<RecoveredPacket*> recovered_protected_packets;
+
+ // Find intersection between the (sorted) containers `protected_packets`
+ // and `recovered_packets`, i.e. all protected packets that have already
+ // been recovered. Update the corresponding protected packets to point to
+ // the recovered packets.
+ auto it_p = protected_packets->cbegin();
+ auto it_r = recovered_packets.cbegin();
+ SortablePacket::LessThan less_than;
+ while (it_p != protected_packets->end() && it_r != recovered_packets.end()) {
+ if (less_than(*it_p, *it_r)) {
+ ++it_p;
+ } else if (less_than(*it_r, *it_p)) {
+ ++it_r;
+ } else { // *it_p == *it_r.
+ // This protected packet has already been recovered.
+ (*it_p)->pkt = (*it_r)->pkt;
+ ++it_p;
+ ++it_r;
+ }
+ }
+}
+
+void ForwardErrorCorrection::InsertPacket(
+ const ReceivedPacket& received_packet,
+ RecoveredPacketList* recovered_packets) {
+ // Discard old FEC packets such that the sequence numbers in
+ // `received_fec_packets_` span at most 1/2 of the sequence number space.
+ // This is important for keeping `received_fec_packets_` sorted, and may
+ // also reduce the possibility of incorrect decoding due to sequence number
+ // wrap-around.
+ if (!received_fec_packets_.empty() &&
+ received_packet.ssrc == received_fec_packets_.front()->ssrc) {
+ // It only makes sense to detect wrap-around when `received_packet`
+ // and `front_received_fec_packet` belong to the same sequence number
+ // space, i.e., the same SSRC. This happens when `received_packet`
+ // is a FEC packet, or if `received_packet` is a media packet and
+ // RED+ULPFEC is used.
+ auto it = received_fec_packets_.begin();
+ while (it != received_fec_packets_.end()) {
+ uint16_t seq_num_diff = MinDiff(received_packet.seq_num, (*it)->seq_num);
+ if (seq_num_diff > kOldSequenceThreshold) {
+ it = received_fec_packets_.erase(it);
+ } else {
+ // No need to keep iterating, since `received_fec_packets_` is sorted.
+ break;
+ }
+ }
+ }
+
+ if (received_packet.is_fec) {
+ InsertFecPacket(*recovered_packets, received_packet);
+ } else {
+ InsertMediaPacket(recovered_packets, received_packet);
+ }
+
+ DiscardOldRecoveredPackets(recovered_packets);
+}
+
+bool ForwardErrorCorrection::StartPacketRecovery(
+ const ReceivedFecPacket& fec_packet,
+ RecoveredPacket* recovered_packet) {
+ // Ensure pkt is initialized.
+ recovered_packet->pkt = new Packet();
+ // Sanity check packet length.
+ if (fec_packet.pkt->data.size() <
+ fec_packet.fec_header_size + fec_packet.protection_length) {
+ RTC_LOG(LS_WARNING)
+ << "The FEC packet is truncated: it does not contain enough room "
+ "for its own header.";
+ return false;
+ }
+ if (fec_packet.protection_length >
+ std::min(size_t{IP_PACKET_SIZE - kRtpHeaderSize},
+ IP_PACKET_SIZE - fec_packet.fec_header_size)) {
+ RTC_LOG(LS_WARNING) << "Incorrect protection length, dropping FEC packet.";
+ return false;
+ }
+ // Initialize recovered packet data.
+ recovered_packet->pkt->data.EnsureCapacity(IP_PACKET_SIZE);
+ recovered_packet->pkt->data.SetSize(fec_packet.protection_length +
+ kRtpHeaderSize);
+ recovered_packet->returned = false;
+ recovered_packet->was_recovered = true;
+ // Copy bytes corresponding to minimum RTP header size.
+ // Note that the sequence number and SSRC fields will be overwritten
+ // at the end of packet recovery.
+ memcpy(recovered_packet->pkt->data.MutableData(),
+ fec_packet.pkt->data.cdata(), kRtpHeaderSize);
+ // Copy remaining FEC payload.
+ if (fec_packet.protection_length > 0) {
+ memcpy(recovered_packet->pkt->data.MutableData() + kRtpHeaderSize,
+ fec_packet.pkt->data.cdata() + fec_packet.fec_header_size,
+ fec_packet.protection_length);
+ }
+ return true;
+}
+
+bool ForwardErrorCorrection::FinishPacketRecovery(
+ const ReceivedFecPacket& fec_packet,
+ RecoveredPacket* recovered_packet) {
+ uint8_t* data = recovered_packet->pkt->data.MutableData();
+ // Set the RTP version to 2.
+ data[0] |= 0x80; // Set the 1st bit.
+ data[0] &= 0xbf; // Clear the 2nd bit.
+ // Recover the packet length, from temporary location.
+ const size_t new_size =
+ ByteReader<uint16_t>::ReadBigEndian(&data[2]) + kRtpHeaderSize;
+ if (new_size > size_t{IP_PACKET_SIZE - kRtpHeaderSize}) {
+ RTC_LOG(LS_WARNING) << "The recovered packet had a length larger than a "
+ "typical IP packet, and is thus dropped.";
+ return false;
+ }
+ recovered_packet->pkt->data.SetSize(new_size);
+ // Set the SN field.
+ ByteWriter<uint16_t>::WriteBigEndian(&data[2], recovered_packet->seq_num);
+ // Set the SSRC field.
+ ByteWriter<uint32_t>::WriteBigEndian(&data[8], fec_packet.protected_ssrc);
+ recovered_packet->ssrc = fec_packet.protected_ssrc;
+ return true;
+}
+
+void ForwardErrorCorrection::XorHeaders(const Packet& src, Packet* dst) {
+ uint8_t* dst_data = dst->data.MutableData();
+ const uint8_t* src_data = src.data.cdata();
+ // XOR the first 2 bytes of the header: V, P, X, CC, M, PT fields.
+ dst_data[0] ^= src_data[0];
+ dst_data[1] ^= src_data[1];
+
+ // XOR the length recovery field.
+ uint8_t src_payload_length_network_order[2];
+ ByteWriter<uint16_t>::WriteBigEndian(src_payload_length_network_order,
+ src.data.size() - kRtpHeaderSize);
+ dst_data[2] ^= src_payload_length_network_order[0];
+ dst_data[3] ^= src_payload_length_network_order[1];
+
+ // XOR the 5th to 8th bytes of the header: the timestamp field.
+ dst_data[4] ^= src_data[4];
+ dst_data[5] ^= src_data[5];
+ dst_data[6] ^= src_data[6];
+ dst_data[7] ^= src_data[7];
+
+ // Skip the 9th to 12th bytes of the header.
+}
+
+void ForwardErrorCorrection::XorPayloads(const Packet& src,
+ size_t payload_length,
+ size_t dst_offset,
+ Packet* dst) {
+ // XOR the payload.
+ RTC_DCHECK_LE(kRtpHeaderSize + payload_length, src.data.size());
+ RTC_DCHECK_LE(dst_offset + payload_length, dst->data.capacity());
+ if (dst_offset + payload_length > dst->data.size()) {
+ dst->data.SetSize(dst_offset + payload_length);
+ }
+ uint8_t* dst_data = dst->data.MutableData();
+ const uint8_t* src_data = src.data.cdata();
+ for (size_t i = 0; i < payload_length; ++i) {
+ dst_data[dst_offset + i] ^= src_data[kRtpHeaderSize + i];
+ }
+}
+
+bool ForwardErrorCorrection::RecoverPacket(const ReceivedFecPacket& fec_packet,
+ RecoveredPacket* recovered_packet) {
+ if (!StartPacketRecovery(fec_packet, recovered_packet)) {
+ return false;
+ }
+ for (const auto& protected_packet : fec_packet.protected_packets) {
+ if (protected_packet->pkt == nullptr) {
+ // This is the packet we're recovering.
+ recovered_packet->seq_num = protected_packet->seq_num;
+ } else {
+ XorHeaders(*protected_packet->pkt, recovered_packet->pkt.get());
+ XorPayloads(*protected_packet->pkt,
+ protected_packet->pkt->data.size() - kRtpHeaderSize,
+ kRtpHeaderSize, recovered_packet->pkt.get());
+ }
+ }
+ if (!FinishPacketRecovery(fec_packet, recovered_packet)) {
+ return false;
+ }
+ return true;
+}
+
+void ForwardErrorCorrection::AttemptRecovery(
+ RecoveredPacketList* recovered_packets) {
+ auto fec_packet_it = received_fec_packets_.begin();
+ while (fec_packet_it != received_fec_packets_.end()) {
+ // Search for each FEC packet's protected media packets.
+ int packets_missing = NumCoveredPacketsMissing(**fec_packet_it);
+
+ // We can only recover one packet with an FEC packet.
+ if (packets_missing == 1) {
+ // Recovery possible.
+ std::unique_ptr<RecoveredPacket> recovered_packet(new RecoveredPacket());
+ recovered_packet->pkt = nullptr;
+ if (!RecoverPacket(**fec_packet_it, recovered_packet.get())) {
+ // Can't recover using this packet, drop it.
+ fec_packet_it = received_fec_packets_.erase(fec_packet_it);
+ continue;
+ }
+
+ auto* recovered_packet_ptr = recovered_packet.get();
+ // Add recovered packet to the list of recovered packets and update any
+ // FEC packets covering this packet with a pointer to the data.
+ // TODO(holmer): Consider replacing this with a binary search for the
+ // right position, and then just insert the new packet. Would get rid of
+ // the sort.
+ recovered_packets->push_back(std::move(recovered_packet));
+ recovered_packets->sort(SortablePacket::LessThan());
+ UpdateCoveringFecPackets(*recovered_packet_ptr);
+ DiscardOldRecoveredPackets(recovered_packets);
+ fec_packet_it = received_fec_packets_.erase(fec_packet_it);
+
+ // A packet has been recovered. We need to check the FEC list again, as
+ // this may allow additional packets to be recovered.
+ // Restart for first FEC packet.
+ fec_packet_it = received_fec_packets_.begin();
+ } else if (packets_missing == 0 ||
+ IsOldFecPacket(**fec_packet_it, recovered_packets)) {
+ // Either all protected packets arrived or have been recovered, or the FEC
+ // packet is old. We can discard this FEC packet.
+ fec_packet_it = received_fec_packets_.erase(fec_packet_it);
+ } else {
+ fec_packet_it++;
+ }
+ }
+}
+
+int ForwardErrorCorrection::NumCoveredPacketsMissing(
+ const ReceivedFecPacket& fec_packet) {
+ int packets_missing = 0;
+ for (const auto& protected_packet : fec_packet.protected_packets) {
+ if (protected_packet->pkt == nullptr) {
+ ++packets_missing;
+ if (packets_missing > 1) {
+ break; // We can't recover more than one packet.
+ }
+ }
+ }
+ return packets_missing;
+}
+
+void ForwardErrorCorrection::DiscardOldRecoveredPackets(
+ RecoveredPacketList* recovered_packets) {
+ const size_t max_media_packets = fec_header_reader_->MaxMediaPackets();
+ while (recovered_packets->size() > max_media_packets) {
+ recovered_packets->pop_front();
+ }
+ RTC_DCHECK_LE(recovered_packets->size(), max_media_packets);
+}
+
+bool ForwardErrorCorrection::IsOldFecPacket(
+ const ReceivedFecPacket& fec_packet,
+ const RecoveredPacketList* recovered_packets) {
+ if (recovered_packets->empty()) {
+ return false;
+ }
+
+ const uint16_t back_recovered_seq_num = recovered_packets->back()->seq_num;
+ const uint16_t last_protected_seq_num =
+ fec_packet.protected_packets.back()->seq_num;
+
+ // FEC packet is old if its last protected sequence number is much
+ // older than the latest protected sequence number received.
+ return (MinDiff(back_recovered_seq_num, last_protected_seq_num) >
+ kOldSequenceThreshold);
+}
+
+uint16_t ForwardErrorCorrection::ParseSequenceNumber(const uint8_t* packet) {
+ return (packet[2] << 8) + packet[3];
+}
+
+uint32_t ForwardErrorCorrection::ParseSsrc(const uint8_t* packet) {
+ return (packet[8] << 24) + (packet[9] << 16) + (packet[10] << 8) + packet[11];
+}
+
+void ForwardErrorCorrection::DecodeFec(const ReceivedPacket& received_packet,
+ RecoveredPacketList* recovered_packets) {
+ RTC_DCHECK(recovered_packets);
+
+ const size_t max_media_packets = fec_header_reader_->MaxMediaPackets();
+ if (recovered_packets->size() == max_media_packets) {
+ const RecoveredPacket* back_recovered_packet =
+ recovered_packets->back().get();
+
+ if (received_packet.ssrc == back_recovered_packet->ssrc) {
+ const unsigned int seq_num_diff =
+ MinDiff(received_packet.seq_num, back_recovered_packet->seq_num);
+ if (seq_num_diff > max_media_packets) {
+ // A big gap in sequence numbers. The old recovered packets
+ // are now useless, so it's safe to do a reset.
+ RTC_LOG(LS_INFO) << "Big gap in media/ULPFEC sequence numbers. No need "
+ "to keep the old packets in the FEC buffers, thus "
+ "resetting them.";
+ ResetState(recovered_packets);
+ }
+ }
+ }
+
+ InsertPacket(received_packet, recovered_packets);
+ AttemptRecovery(recovered_packets);
+}
+
+size_t ForwardErrorCorrection::MaxPacketOverhead() const {
+ return fec_header_writer_->MaxPacketOverhead();
+}
+
+FecHeaderReader::FecHeaderReader(size_t max_media_packets,
+ size_t max_fec_packets)
+ : max_media_packets_(max_media_packets),
+ max_fec_packets_(max_fec_packets) {}
+
+FecHeaderReader::~FecHeaderReader() = default;
+
+size_t FecHeaderReader::MaxMediaPackets() const {
+ return max_media_packets_;
+}
+
+size_t FecHeaderReader::MaxFecPackets() const {
+ return max_fec_packets_;
+}
+
+FecHeaderWriter::FecHeaderWriter(size_t max_media_packets,
+ size_t max_fec_packets,
+ size_t max_packet_overhead)
+ : max_media_packets_(max_media_packets),
+ max_fec_packets_(max_fec_packets),
+ max_packet_overhead_(max_packet_overhead) {}
+
+FecHeaderWriter::~FecHeaderWriter() = default;
+
+size_t FecHeaderWriter::MaxMediaPackets() const {
+ return max_media_packets_;
+}
+
+size_t FecHeaderWriter::MaxFecPackets() const {
+ return max_fec_packets_;
+}
+
+size_t FecHeaderWriter::MaxPacketOverhead() const {
+ return max_packet_overhead_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.h b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.h
new file mode 100644
index 0000000000..0ebe1c5091
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction.h
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_FORWARD_ERROR_CORRECTION_H_
+#define MODULES_RTP_RTCP_SOURCE_FORWARD_ERROR_CORRECTION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "modules/include/module_fec_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class FecHeaderReader;
+class FecHeaderWriter;
+
+// Performs codec-independent forward error correction (FEC), based on RFC 5109.
+// Option exists to enable unequal protection (UEP) across packets.
+// This is not to be confused with protection within packets
+// (referred to as uneven level protection (ULP) in RFC 5109).
+// TODO(brandtr): Split this class into a separate encoder
+// and a separate decoder.
+class ForwardErrorCorrection {
+ public:
+ // TODO(holmer): As a next step all these struct-like packet classes should be
+ // refactored into proper classes, and their members should be made private.
+ // This will require parts of the functionality in forward_error_correction.cc
+ // and receiver_fec.cc to be refactored into the packet classes.
+ class Packet {
+ public:
+ Packet();
+ virtual ~Packet();
+
+ // Add a reference.
+ virtual int32_t AddRef();
+
+ // Release a reference. Will delete the object if the reference count
+ // reaches zero.
+ virtual int32_t Release();
+
+ rtc::CopyOnWriteBuffer data; // Packet data.
+
+ private:
+ int32_t ref_count_; // Counts the number of references to a packet.
+ };
+
+ // TODO(holmer): Refactor into a proper class.
+ class SortablePacket {
+ public:
+ // Functor which returns true if the sequence number of `first`
+ // is < the sequence number of `second`. Should only ever be called for
+ // packets belonging to the same SSRC.
+ struct LessThan {
+ template <typename S, typename T>
+ bool operator()(const S& first, const T& second);
+ };
+
+ uint32_t ssrc;
+ uint16_t seq_num;
+ };
+
+ // Used for the input to DecodeFec().
+ class ReceivedPacket : public SortablePacket {
+ public:
+ ReceivedPacket();
+ ~ReceivedPacket();
+
+ bool is_fec; // Set to true if this is an FEC packet and false
+ // otherwise.
+ bool is_recovered;
+ rtc::scoped_refptr<Packet> pkt; // Pointer to the packet storage.
+ };
+
+ // The recovered list parameter of DecodeFec() references structs of
+ // this type.
+ // TODO(holmer): Refactor into a proper class.
+ class RecoveredPacket : public SortablePacket {
+ public:
+ RecoveredPacket();
+ ~RecoveredPacket();
+
+ bool was_recovered; // Will be true if this packet was recovered by
+ // the FEC. Otherwise it was a media packet passed in
+ // through the received packet list.
+ bool returned; // True when the packet already has been returned to the
+ // caller through the callback.
+ rtc::scoped_refptr<Packet> pkt; // Pointer to the packet storage.
+ };
+
+ // Used to link media packets to their protecting FEC packets.
+ //
+ // TODO(holmer): Refactor into a proper class.
+ class ProtectedPacket : public SortablePacket {
+ public:
+ ProtectedPacket();
+ ~ProtectedPacket();
+
+ rtc::scoped_refptr<ForwardErrorCorrection::Packet> pkt;
+ };
+
+ using ProtectedPacketList = std::list<std::unique_ptr<ProtectedPacket>>;
+
+ // Used for internal storage of received FEC packets in a list.
+ //
+ // TODO(holmer): Refactor into a proper class.
+ class ReceivedFecPacket : public SortablePacket {
+ public:
+ ReceivedFecPacket();
+ ~ReceivedFecPacket();
+
+ // List of media packets that this FEC packet protects.
+ ProtectedPacketList protected_packets;
+ // RTP header fields.
+ uint32_t ssrc;
+ // FEC header fields.
+ size_t fec_header_size;
+ uint32_t protected_ssrc;
+ uint16_t seq_num_base;
+ size_t packet_mask_offset; // Relative start of FEC header.
+ size_t packet_mask_size;
+ size_t protection_length;
+ // Raw data.
+ rtc::scoped_refptr<ForwardErrorCorrection::Packet> pkt;
+ };
+
+ using PacketList = std::list<std::unique_ptr<Packet>>;
+ using RecoveredPacketList = std::list<std::unique_ptr<RecoveredPacket>>;
+ using ReceivedFecPacketList = std::list<std::unique_ptr<ReceivedFecPacket>>;
+
+ ~ForwardErrorCorrection();
+
+ // Creates a ForwardErrorCorrection tailored for a specific FEC scheme.
+ static std::unique_ptr<ForwardErrorCorrection> CreateUlpfec(uint32_t ssrc);
+ static std::unique_ptr<ForwardErrorCorrection> CreateFlexfec(
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc);
+
+ // Generates a list of FEC packets from supplied media packets.
+ //
+ // Input: media_packets List of media packets to protect, of type
+ // Packet. All packets must belong to the
+ // same frame and the list must not be empty.
+ // Input: protection_factor FEC protection overhead in the [0, 255]
+ // domain. To obtain 100% overhead, or an
+ // equal number of FEC packets as
+ // media packets, use 255.
+ // Input: num_important_packets The number of "important" packets in the
+ // frame. These packets may receive greater
+ // protection than the remaining packets.
+ // The important packets must be located at the
+ // start of the media packet list. For codecs
+ // with data partitioning, the important
+ // packets may correspond to first partition
+ // packets.
+ // Input: use_unequal_protection Parameter to enable/disable unequal
+ // protection (UEP) across packets. Enabling
+ // UEP will allocate more protection to the
+ // num_important_packets from the start of the
+ // media_packets.
+ // Input: fec_mask_type The type of packet mask used in the FEC.
+ // Random or bursty type may be selected. The
+ // bursty type is only defined up to 12 media
+ // packets. If the number of media packets is
+ // above 12, the packet masks from the random
+ // table will be selected.
+ // Output: fec_packets List of pointers to generated FEC packets,
+ // of type Packet. Must be empty on entry.
+ // The memory available through the list will
+ // be valid until the next call to
+ // EncodeFec().
+ //
+ // Returns 0 on success, -1 on failure.
+ //
+ int EncodeFec(const PacketList& media_packets,
+ uint8_t protection_factor,
+ int num_important_packets,
+ bool use_unequal_protection,
+ FecMaskType fec_mask_type,
+ std::list<Packet*>* fec_packets);
+
+ // Decodes a list of received media and FEC packets. It will parse the
+ // `received_packets`, storing FEC packets internally, and move
+ // media packets to `recovered_packets`. The recovered list will be
+ // sorted by ascending sequence number and have duplicates removed.
+ // The function should be called as new packets arrive, and
+ // `recovered_packets` will be progressively assembled with each call.
+ // When the function returns, `received_packets` will be empty.
+ //
+ // The caller will allocate packets submitted through `received_packets`.
+ // The function will handle allocation of recovered packets.
+ //
+ // Input: received_packets List of new received packets, of type
+ // ReceivedPacket, belonging to a single
+ // frame. At output the list will be empty,
+ // with packets either stored internally,
+ // or accessible through the recovered list.
+ // Output: recovered_packets List of recovered media packets, of type
+ // RecoveredPacket, belonging to a single
+ // frame. The memory available through the
+ // list will be valid until the next call to
+ // DecodeFec().
+ //
+ void DecodeFec(const ReceivedPacket& received_packet,
+ RecoveredPacketList* recovered_packets);
+
+ // Get the number of generated FEC packets, given the number of media packets
+ // and the protection factor.
+ static int NumFecPackets(int num_media_packets, int protection_factor);
+
+ // Gets the maximum size of the FEC headers in bytes, which must be
+ // accounted for as packet overhead.
+ size_t MaxPacketOverhead() const;
+
+ // Reset internal states from last frame and clear `recovered_packets`.
+ // Frees all memory allocated by this class.
+ void ResetState(RecoveredPacketList* recovered_packets);
+
+ // TODO(brandtr): Remove these functions when the Packet classes
+ // have been refactored.
+ static uint16_t ParseSequenceNumber(const uint8_t* packet);
+ static uint32_t ParseSsrc(const uint8_t* packet);
+
+ protected:
+ ForwardErrorCorrection(std::unique_ptr<FecHeaderReader> fec_header_reader,
+ std::unique_ptr<FecHeaderWriter> fec_header_writer,
+ uint32_t ssrc,
+ uint32_t protected_media_ssrc);
+
+ private:
+ // Analyzes `media_packets` for holes in the sequence and inserts zero columns
+ // into the `packet_mask` where those holes are found. Zero columns means that
+ // those packets will have no protection.
+ // Returns the number of bits used for one row of the new packet mask.
+ // Requires that `packet_mask` has at least 6 * `num_fec_packets` bytes
+ // allocated.
+ int InsertZerosInPacketMasks(const PacketList& media_packets,
+ size_t num_fec_packets);
+
+ // Writes FEC payloads and some recovery fields in the FEC headers.
+ void GenerateFecPayloads(const PacketList& media_packets,
+ size_t num_fec_packets);
+
+ // Writes the FEC header fields that are not written by GenerateFecPayloads.
+ // This includes writing the packet masks.
+ void FinalizeFecHeaders(size_t num_fec_packets,
+ uint32_t media_ssrc,
+ uint16_t seq_num_base);
+
+ // Inserts the `received_packet` into the internal received FEC packet list
+ // or into `recovered_packets`.
+ void InsertPacket(const ReceivedPacket& received_packet,
+ RecoveredPacketList* recovered_packets);
+
+ // Inserts the `received_packet` into `recovered_packets`. Deletes duplicates.
+ void InsertMediaPacket(RecoveredPacketList* recovered_packets,
+ const ReceivedPacket& received_packet);
+
+ // Assigns pointers to the recovered packet from all FEC packets which cover
+ // it.
+ // Note: This reduces the complexity when we want to try to recover a packet
+ // since we don't have to find the intersection between recovered packets and
+ // packets covered by the FEC packet.
+ void UpdateCoveringFecPackets(const RecoveredPacket& packet);
+
+ // Insert `received_packet` into internal FEC list. Deletes duplicates.
+ void InsertFecPacket(const RecoveredPacketList& recovered_packets,
+ const ReceivedPacket& received_packet);
+
+ // Assigns pointers to already recovered packets covered by `fec_packet`.
+ static void AssignRecoveredPackets(
+ const RecoveredPacketList& recovered_packets,
+ ReceivedFecPacket* fec_packet);
+
+ // Attempt to recover missing packets, using the internally stored
+ // received FEC packets.
+ void AttemptRecovery(RecoveredPacketList* recovered_packets);
+
+ // Initializes headers and payload before the XOR operation
+ // that recovers a packet.
+ static bool StartPacketRecovery(const ReceivedFecPacket& fec_packet,
+ RecoveredPacket* recovered_packet);
+
+ // Performs XOR between the first 8 bytes of `src` and `dst` and stores
+ // the result in `dst`. The 3rd and 4th bytes are used for storing
+ // the length recovery field.
+ static void XorHeaders(const Packet& src, Packet* dst);
+
+ // Performs XOR between the payloads of `src` and `dst` and stores the result
+ // in `dst`. The parameter `dst_offset` determines at what byte the
+ // XOR operation starts in `dst`. In total, `payload_length` bytes are XORed.
+ static void XorPayloads(const Packet& src,
+ size_t payload_length,
+ size_t dst_offset,
+ Packet* dst);
+
+ // Finalizes recovery of packet by setting RTP header fields.
+ // This is not specific to the FEC scheme used.
+ static bool FinishPacketRecovery(const ReceivedFecPacket& fec_packet,
+ RecoveredPacket* recovered_packet);
+
+ // Recover a missing packet.
+ static bool RecoverPacket(const ReceivedFecPacket& fec_packet,
+ RecoveredPacket* recovered_packet);
+
+ // Get the number of missing media packets which are covered by `fec_packet`.
+ // An FEC packet can recover at most one packet, and if zero packets are
+ // missing the FEC packet can be discarded. This function returns 2 when two
+ // or more packets are missing.
+ static int NumCoveredPacketsMissing(const ReceivedFecPacket& fec_packet);
+
+ // Discards old packets in `recovered_packets`, which are no longer relevant
+ // for recovering lost packets.
+ void DiscardOldRecoveredPackets(RecoveredPacketList* recovered_packets);
+
+ // Checks if the FEC packet is old enough and no longer relevant for
+ // recovering lost media packets.
+ bool IsOldFecPacket(const ReceivedFecPacket& fec_packet,
+ const RecoveredPacketList* recovered_packets);
+
+ // These SSRCs are only used by the decoder.
+ const uint32_t ssrc_;
+ const uint32_t protected_media_ssrc_;
+
+ std::unique_ptr<FecHeaderReader> fec_header_reader_;
+ std::unique_ptr<FecHeaderWriter> fec_header_writer_;
+
+ std::vector<Packet> generated_fec_packets_;
+ ReceivedFecPacketList received_fec_packets_;
+
+ // Arrays used to avoid dynamically allocating memory when generating
+ // the packet masks.
+ // (There are never more than `kUlpfecMaxMediaPackets` FEC packets generated.)
+ uint8_t packet_masks_[kUlpfecMaxMediaPackets * kUlpfecMaxPacketMaskSize];
+ uint8_t tmp_packet_masks_[kUlpfecMaxMediaPackets * kUlpfecMaxPacketMaskSize];
+ size_t packet_mask_size_;
+};
+
+// Classes derived from FecHeader{Reader,Writer} encapsulate the
+// specifics of reading and writing FEC header for, e.g., ULPFEC
+// and FlexFEC.
+class FecHeaderReader {
+ public:
+ virtual ~FecHeaderReader();
+
+ // The maximum number of media packets that can be covered by one FEC packet.
+ size_t MaxMediaPackets() const;
+
+ // The maximum number of FEC packets that is supported, per call
+ // to ForwardErrorCorrection::EncodeFec().
+ size_t MaxFecPackets() const;
+
+ // Parses FEC header and stores information in ReceivedFecPacket members.
+ virtual bool ReadFecHeader(
+ ForwardErrorCorrection::ReceivedFecPacket* fec_packet) const = 0;
+
+ protected:
+ FecHeaderReader(size_t max_media_packets, size_t max_fec_packets);
+
+ const size_t max_media_packets_;
+ const size_t max_fec_packets_;
+};
+
+class FecHeaderWriter {
+ public:
+ virtual ~FecHeaderWriter();
+
+ // The maximum number of media packets that can be covered by one FEC packet.
+ size_t MaxMediaPackets() const;
+
+ // The maximum number of FEC packets that is supported, per call
+ // to ForwardErrorCorrection::EncodeFec().
+ size_t MaxFecPackets() const;
+
+ // The maximum overhead (in bytes) per packet, due to FEC headers.
+ size_t MaxPacketOverhead() const;
+
+ // Calculates the minimum packet mask size needed (in bytes),
+ // given the discrete options of the ULPFEC masks and the bits
+ // set in the current packet mask.
+ virtual size_t MinPacketMaskSize(const uint8_t* packet_mask,
+ size_t packet_mask_size) const = 0;
+
+ // The header size (in bytes), given the packet mask size.
+ virtual size_t FecHeaderSize(size_t packet_mask_size) const = 0;
+
+ // Writes FEC header.
+ virtual void FinalizeFecHeader(
+ uint32_t media_ssrc,
+ uint16_t seq_num_base,
+ const uint8_t* packet_mask,
+ size_t packet_mask_size,
+ ForwardErrorCorrection::Packet* fec_packet) const = 0;
+
+ protected:
+ FecHeaderWriter(size_t max_media_packets,
+ size_t max_fec_packets,
+ size_t max_packet_overhead);
+
+ const size_t max_media_packets_;
+ const size_t max_fec_packets_;
+ const size_t max_packet_overhead_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_FORWARD_ERROR_CORRECTION_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc
new file mode 100644
index 0000000000..ac68162d26
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+
+#include <string.h>
+
+#include <algorithm>
+
+#include "modules/rtp_rtcp/source/fec_private_tables_bursty.h"
+#include "modules/rtp_rtcp/source/fec_private_tables_random.h"
+#include "rtc_base/checks.h"
+
+namespace {
+// Allow for different modes of protection for packets in UEP case.
+enum ProtectionMode {
+ kModeNoOverlap,
+ kModeOverlap,
+ kModeBiasFirstPacket,
+};
+
+// Fits an input mask (sub_mask) to an output mask.
+// The mask is a matrix where the rows are the FEC packets,
+// and the columns are the source packets the FEC is applied to.
+// Each row of the mask is represented by a number of mask bytes.
+//
+// \param[in] num_mask_bytes The number of mask bytes of output mask.
+// \param[in] num_sub_mask_bytes The number of mask bytes of input mask.
+// \param[in] num_rows The number of rows of the input mask.
+// \param[in] sub_mask A pointer to hold the input mask, of size
+// [0, num_rows * num_sub_mask_bytes]
+// \param[out] packet_mask A pointer to hold the output mask, of size
+// [0, x * num_mask_bytes], where x >= num_rows.
+void FitSubMask(int num_mask_bytes,
+ int num_sub_mask_bytes,
+ int num_rows,
+ const uint8_t* sub_mask,
+ uint8_t* packet_mask) {
+ if (num_mask_bytes == num_sub_mask_bytes) {
+ memcpy(packet_mask, sub_mask, num_rows * num_sub_mask_bytes);
+ } else {
+ for (int i = 0; i < num_rows; ++i) {
+ int pkt_mask_idx = i * num_mask_bytes;
+ int pkt_mask_idx2 = i * num_sub_mask_bytes;
+ for (int j = 0; j < num_sub_mask_bytes; ++j) {
+ packet_mask[pkt_mask_idx] = sub_mask[pkt_mask_idx2];
+ pkt_mask_idx++;
+ pkt_mask_idx2++;
+ }
+ }
+ }
+}
+
+// Shifts a mask by number of columns (bits), and fits it to an output mask.
+// The mask is a matrix where the rows are the FEC packets,
+// and the columns are the source packets the FEC is applied to.
+// Each row of the mask is represented by a number of mask bytes.
+//
+// \param[in] num_mask_bytes The number of mask bytes of output mask.
+// \param[in] num_sub_mask_bytes The number of mask bytes of input mask.
+// \param[in] num_column_shift The number columns to be shifted, and
+// the starting row for the output mask.
+// \param[in] end_row The ending row for the output mask.
+// \param[in] sub_mask A pointer to hold the input mask, of size
+// [0, (end_row_fec - start_row_fec) *
+// num_sub_mask_bytes]
+// \param[out] packet_mask A pointer to hold the output mask, of size
+// [0, x * num_mask_bytes],
+// where x >= end_row_fec.
+// TODO(marpan): This function is doing three things at the same time:
+// shift within a byte, byte shift and resizing.
+// Split up into subroutines.
+void ShiftFitSubMask(int num_mask_bytes,
+ int res_mask_bytes,
+ int num_column_shift,
+ int end_row,
+ const uint8_t* sub_mask,
+ uint8_t* packet_mask) {
+ // Number of bit shifts within a byte
+ const int num_bit_shifts = (num_column_shift % 8);
+ const int num_byte_shifts = num_column_shift >> 3;
+
+ // Modify new mask with sub-mask21.
+
+ // Loop over the remaining FEC packets.
+ for (int i = num_column_shift; i < end_row; ++i) {
+ // Byte index of new mask, for row i and column res_mask_bytes,
+ // offset by the number of bytes shifts
+ int pkt_mask_idx =
+ i * num_mask_bytes + res_mask_bytes - 1 + num_byte_shifts;
+ // Byte index of sub_mask, for row i and column res_mask_bytes
+ int pkt_mask_idx2 =
+ (i - num_column_shift) * res_mask_bytes + res_mask_bytes - 1;
+
+ uint8_t shift_right_curr_byte = 0;
+ uint8_t shift_left_prev_byte = 0;
+ uint8_t comb_new_byte = 0;
+
+ // Handle case of num_mask_bytes > res_mask_bytes:
+ // For a given row, copy the rightmost "numBitShifts" bits
+ // of the last byte of sub_mask into output mask.
+ if (num_mask_bytes > res_mask_bytes) {
+ shift_left_prev_byte = (sub_mask[pkt_mask_idx2] << (8 - num_bit_shifts));
+ packet_mask[pkt_mask_idx + 1] = shift_left_prev_byte;
+ }
+
+ // For each row i (FEC packet), shift the bit-mask of the sub_mask.
+ // Each row of the mask contains "resMaskBytes" of bytes.
+ // We start from the last byte of the sub_mask and move to first one.
+ for (int j = res_mask_bytes - 1; j > 0; j--) {
+ // Shift current byte of sub21 to the right by "numBitShifts".
+ shift_right_curr_byte = sub_mask[pkt_mask_idx2] >> num_bit_shifts;
+
+ // Fill in shifted bits with bits from the previous (left) byte:
+ // First shift the previous byte to the left by "8-numBitShifts".
+ shift_left_prev_byte =
+ (sub_mask[pkt_mask_idx2 - 1] << (8 - num_bit_shifts));
+
+ // Then combine both shifted bytes into new mask byte.
+ comb_new_byte = shift_right_curr_byte | shift_left_prev_byte;
+
+ // Assign to new mask.
+ packet_mask[pkt_mask_idx] = comb_new_byte;
+ pkt_mask_idx--;
+ pkt_mask_idx2--;
+ }
+ // For the first byte in the row (j=0 case).
+ shift_right_curr_byte = sub_mask[pkt_mask_idx2] >> num_bit_shifts;
+ packet_mask[pkt_mask_idx] = shift_right_curr_byte;
+ }
+}
+
+} // namespace
+
+namespace webrtc {
+namespace internal {
+
+PacketMaskTable::PacketMaskTable(FecMaskType fec_mask_type,
+ int num_media_packets)
+ : table_(PickTable(fec_mask_type, num_media_packets)) {}
+
+PacketMaskTable::~PacketMaskTable() = default;
+
+rtc::ArrayView<const uint8_t> PacketMaskTable::LookUp(int num_media_packets,
+ int num_fec_packets) {
+ RTC_DCHECK_GT(num_media_packets, 0);
+ RTC_DCHECK_GT(num_fec_packets, 0);
+ RTC_DCHECK_LE(num_media_packets, kUlpfecMaxMediaPackets);
+ RTC_DCHECK_LE(num_fec_packets, num_media_packets);
+
+ if (num_media_packets <= 12) {
+ return LookUpInFecTable(table_, num_media_packets - 1, num_fec_packets - 1);
+ }
+ int mask_length =
+ static_cast<int>(PacketMaskSize(static_cast<size_t>(num_media_packets)));
+
+ // Generate FEC code mask for {num_media_packets(M), num_fec_packets(N)} (use
+ // N FEC packets to protect M media packets) In the mask, each FEC packet
+ // occupies one row, each bit / coloumn represent one media packet. E.g. Row
+ // A, Col/Bit B is set to 1, means FEC packet A will have protection for media
+ // packet B.
+
+ // Loop through each fec packet.
+ for (int row = 0; row < num_fec_packets; row++) {
+ // Loop through each fec code in a row, one code has 8 bits.
+ // Bit X will be set to 1 if media packet X shall be protected by current
+ // FEC packet. In this implementation, the protection is interleaved, thus
+ // media packet X will be protected by FEC packet (X % N)
+ for (int col = 0; col < mask_length; col++) {
+ fec_packet_mask_[row * mask_length + col] =
+ ((col * 8) % num_fec_packets == row && (col * 8) < num_media_packets
+ ? 0x80
+ : 0x00) |
+ ((col * 8 + 1) % num_fec_packets == row &&
+ (col * 8 + 1) < num_media_packets
+ ? 0x40
+ : 0x00) |
+ ((col * 8 + 2) % num_fec_packets == row &&
+ (col * 8 + 2) < num_media_packets
+ ? 0x20
+ : 0x00) |
+ ((col * 8 + 3) % num_fec_packets == row &&
+ (col * 8 + 3) < num_media_packets
+ ? 0x10
+ : 0x00) |
+ ((col * 8 + 4) % num_fec_packets == row &&
+ (col * 8 + 4) < num_media_packets
+ ? 0x08
+ : 0x00) |
+ ((col * 8 + 5) % num_fec_packets == row &&
+ (col * 8 + 5) < num_media_packets
+ ? 0x04
+ : 0x00) |
+ ((col * 8 + 6) % num_fec_packets == row &&
+ (col * 8 + 6) < num_media_packets
+ ? 0x02
+ : 0x00) |
+ ((col * 8 + 7) % num_fec_packets == row &&
+ (col * 8 + 7) < num_media_packets
+ ? 0x01
+ : 0x00);
+ }
+ }
+ return {&fec_packet_mask_[0],
+ static_cast<size_t>(num_fec_packets * mask_length)};
+}
+
+// If `num_media_packets` is larger than the maximum allowed by `fec_mask_type`
+// for the bursty type, or the random table is explicitly asked for, then the
+// random type is selected. Otherwise the bursty table callback is returned.
+const uint8_t* PacketMaskTable::PickTable(FecMaskType fec_mask_type,
+ int num_media_packets) {
+ RTC_DCHECK_GE(num_media_packets, 0);
+ RTC_DCHECK_LE(static_cast<size_t>(num_media_packets), kUlpfecMaxMediaPackets);
+
+ if (fec_mask_type != kFecMaskRandom &&
+ num_media_packets <=
+ static_cast<int>(fec_private_tables::kPacketMaskBurstyTbl[0])) {
+ return &fec_private_tables::kPacketMaskBurstyTbl[0];
+ }
+
+ return &fec_private_tables::kPacketMaskRandomTbl[0];
+}
+
+// Remaining protection after important (first partition) packet protection
+void RemainingPacketProtection(int num_media_packets,
+ int num_fec_remaining,
+ int num_fec_for_imp_packets,
+ int num_mask_bytes,
+ ProtectionMode mode,
+ uint8_t* packet_mask,
+ PacketMaskTable* mask_table) {
+ if (mode == kModeNoOverlap) {
+ // sub_mask21
+
+ const int res_mask_bytes =
+ PacketMaskSize(num_media_packets - num_fec_for_imp_packets);
+
+ auto end_row = (num_fec_for_imp_packets + num_fec_remaining);
+ rtc::ArrayView<const uint8_t> packet_mask_sub_21 = mask_table->LookUp(
+ num_media_packets - num_fec_for_imp_packets, num_fec_remaining);
+
+ ShiftFitSubMask(num_mask_bytes, res_mask_bytes, num_fec_for_imp_packets,
+ end_row, &packet_mask_sub_21[0], packet_mask);
+
+ } else if (mode == kModeOverlap || mode == kModeBiasFirstPacket) {
+ // sub_mask22
+ rtc::ArrayView<const uint8_t> packet_mask_sub_22 =
+ mask_table->LookUp(num_media_packets, num_fec_remaining);
+
+ FitSubMask(num_mask_bytes, num_mask_bytes, num_fec_remaining,
+ &packet_mask_sub_22[0],
+ &packet_mask[num_fec_for_imp_packets * num_mask_bytes]);
+
+ if (mode == kModeBiasFirstPacket) {
+ for (int i = 0; i < num_fec_remaining; ++i) {
+ int pkt_mask_idx = i * num_mask_bytes;
+ packet_mask[pkt_mask_idx] = packet_mask[pkt_mask_idx] | (1 << 7);
+ }
+ }
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+// Protection for important (first partition) packets
+void ImportantPacketProtection(int num_fec_for_imp_packets,
+ int num_imp_packets,
+ int num_mask_bytes,
+ uint8_t* packet_mask,
+ PacketMaskTable* mask_table) {
+ const int num_imp_mask_bytes = PacketMaskSize(num_imp_packets);
+
+ // Get sub_mask1 from table
+ rtc::ArrayView<const uint8_t> packet_mask_sub_1 =
+ mask_table->LookUp(num_imp_packets, num_fec_for_imp_packets);
+
+ FitSubMask(num_mask_bytes, num_imp_mask_bytes, num_fec_for_imp_packets,
+ &packet_mask_sub_1[0], packet_mask);
+}
+
+// This function sets the protection allocation: i.e., how many FEC packets
+// to use for num_imp (1st partition) packets, given the: number of media
+// packets, number of FEC packets, and number of 1st partition packets.
+int SetProtectionAllocation(int num_media_packets,
+ int num_fec_packets,
+ int num_imp_packets) {
+ // TODO(marpan): test different cases for protection allocation:
+
+ // Use at most (alloc_par * num_fec_packets) for important packets.
+ float alloc_par = 0.5;
+ int max_num_fec_for_imp = alloc_par * num_fec_packets;
+
+ int num_fec_for_imp_packets = (num_imp_packets < max_num_fec_for_imp)
+ ? num_imp_packets
+ : max_num_fec_for_imp;
+
+ // Fall back to equal protection in this case
+ if (num_fec_packets == 1 && (num_media_packets > 2 * num_imp_packets)) {
+ num_fec_for_imp_packets = 0;
+ }
+
+ return num_fec_for_imp_packets;
+}
+
+// Modification for UEP: reuse the off-line tables for the packet masks.
+// Note: these masks were designed for equal packet protection case,
+// assuming random packet loss.
+
+// Current version has 3 modes (options) to build UEP mask from existing ones.
+// Various other combinations may be added in future versions.
+// Longer-term, we may add another set of tables specifically for UEP cases.
+// TODO(marpan): also consider modification of masks for bursty loss cases.
+
+// Mask is characterized as (#packets_to_protect, #fec_for_protection).
+// Protection factor defined as: (#fec_for_protection / #packets_to_protect).
+
+// Let k=num_media_packets, n=total#packets, (n-k)=num_fec_packets,
+// m=num_imp_packets.
+
+// For ProtectionMode 0 and 1:
+// one mask (sub_mask1) is used for 1st partition packets,
+// the other mask (sub_mask21/22, for 0/1) is for the remaining FEC packets.
+
+// In both mode 0 and 1, the packets of 1st partition (num_imp_packets) are
+// treated equally important, and are afforded more protection than the
+// residual partition packets.
+
+// For num_imp_packets:
+// sub_mask1 = (m, t): protection = t/(m), where t=F(k,n-k,m).
+// t=F(k,n-k,m) is the number of packets used to protect first partition in
+// sub_mask1. This is determined from the function SetProtectionAllocation().
+
+// For the left-over protection:
+// Mode 0: sub_mask21 = (k-m,n-k-t): protection = (n-k-t)/(k-m)
+// mode 0 has no protection overlap between the two partitions.
+// For mode 0, we would typically set t = min(m, n-k).
+
+// Mode 1: sub_mask22 = (k, n-k-t), with protection (n-k-t)/(k)
+// mode 1 has protection overlap between the two partitions (preferred).
+
+// For ProtectionMode 2:
+// This gives 1st packet of list (which is 1st packet of 1st partition) more
+// protection. In mode 2, the equal protection mask (which is obtained from
+// mode 1 for t=0) is modified (more "1s" added in 1st column of packet mask)
+// to bias higher protection for the 1st source packet.
+
+// Protection Mode 2 may be extended for a sort of sliding protection
+// (i.e., vary the number/density of "1s" across columns) across packets.
+
+void UnequalProtectionMask(int num_media_packets,
+ int num_fec_packets,
+ int num_imp_packets,
+ int num_mask_bytes,
+ uint8_t* packet_mask,
+ PacketMaskTable* mask_table) {
+ // Set Protection type and allocation
+ // TODO(marpan): test/update for best mode and some combinations thereof.
+
+ ProtectionMode mode = kModeOverlap;
+ int num_fec_for_imp_packets = 0;
+
+ if (mode != kModeBiasFirstPacket) {
+ num_fec_for_imp_packets = SetProtectionAllocation(
+ num_media_packets, num_fec_packets, num_imp_packets);
+ }
+
+ int num_fec_remaining = num_fec_packets - num_fec_for_imp_packets;
+ // Done with setting protection type and allocation
+
+ //
+ // Generate sub_mask1
+ //
+ if (num_fec_for_imp_packets > 0) {
+ ImportantPacketProtection(num_fec_for_imp_packets, num_imp_packets,
+ num_mask_bytes, packet_mask, mask_table);
+ }
+
+ //
+ // Generate sub_mask2
+ //
+ if (num_fec_remaining > 0) {
+ RemainingPacketProtection(num_media_packets, num_fec_remaining,
+ num_fec_for_imp_packets, num_mask_bytes, mode,
+ packet_mask, mask_table);
+ }
+}
+
+// This algorithm is tailored to look up data in the `kPacketMaskRandomTbl` and
+// `kPacketMaskBurstyTbl` tables. These tables only cover fec code for up to 12
+// media packets. Starting from 13 media packets, the fec code will be generated
+// at runtime. The format of those arrays is that they're essentially a 3
+// dimensional array with the following dimensions: * media packet
+// * Size for kPacketMaskRandomTbl: 12
+// * Size for kPacketMaskBurstyTbl: 12
+// * fec index
+// * Size for both random and bursty table increases from 1 to number of rows.
+// (i.e. 1-48, or 1-12 respectively).
+// * Fec data (what actually gets returned)
+// * Size for kPacketMaskRandomTbl: 2 bytes.
+// * For all entries: 2 * fec index (1 based)
+// * Size for kPacketMaskBurstyTbl: 2 bytes.
+// * For all entries: 2 * fec index (1 based)
+rtc::ArrayView<const uint8_t> LookUpInFecTable(const uint8_t* table,
+ int media_packet_index,
+ int fec_index) {
+ RTC_DCHECK_LT(media_packet_index, table[0]);
+
+ // Skip over the table size.
+ const uint8_t* entry = &table[1];
+
+ uint8_t entry_size_increment = 2; // 0-16 are 2 byte wide, then changes to 6.
+
+ // Hop over un-interesting array entries.
+ for (int i = 0; i < media_packet_index; ++i) {
+ if (i == 16)
+ entry_size_increment = 6;
+ uint8_t count = entry[0];
+ ++entry; // skip over the count.
+ for (int j = 0; j < count; ++j) {
+ entry += entry_size_increment * (j + 1); // skip over the data.
+ }
+ }
+
+ if (media_packet_index == 16)
+ entry_size_increment = 6;
+
+ RTC_DCHECK_LT(fec_index, entry[0]);
+ ++entry; // Skip over the size.
+
+ // Find the appropriate data in the second dimension.
+
+ // Find the specific data we're looking for.
+ for (int i = 0; i < fec_index; ++i)
+ entry += entry_size_increment * (i + 1); // skip over the data.
+
+ size_t size = entry_size_increment * (fec_index + 1);
+ return {&entry[0], size};
+}
+
+void GeneratePacketMasks(int num_media_packets,
+ int num_fec_packets,
+ int num_imp_packets,
+ bool use_unequal_protection,
+ PacketMaskTable* mask_table,
+ uint8_t* packet_mask) {
+ RTC_DCHECK_GT(num_media_packets, 0);
+ RTC_DCHECK_GT(num_fec_packets, 0);
+ RTC_DCHECK_LE(num_fec_packets, num_media_packets);
+ RTC_DCHECK_LE(num_imp_packets, num_media_packets);
+ RTC_DCHECK_GE(num_imp_packets, 0);
+
+ const int num_mask_bytes = PacketMaskSize(num_media_packets);
+
+ // Equal-protection for these cases.
+ if (!use_unequal_protection || num_imp_packets == 0) {
+ // Retrieve corresponding mask table directly:for equal-protection case.
+ // Mask = (k,n-k), with protection factor = (n-k)/k,
+ // where k = num_media_packets, n=total#packets, (n-k)=num_fec_packets.
+ rtc::ArrayView<const uint8_t> mask =
+ mask_table->LookUp(num_media_packets, num_fec_packets);
+ memcpy(packet_mask, &mask[0], mask.size());
+ } else { // UEP case
+ UnequalProtectionMask(num_media_packets, num_fec_packets, num_imp_packets,
+ num_mask_bytes, packet_mask, mask_table);
+ } // End of UEP modification
+} // End of GetPacketMasks
+
+size_t PacketMaskSize(size_t num_sequence_numbers) {
+ RTC_DCHECK_LE(num_sequence_numbers, 8 * kUlpfecPacketMaskSizeLBitSet);
+ if (num_sequence_numbers > 8 * kUlpfecPacketMaskSizeLBitClear) {
+ return kUlpfecPacketMaskSizeLBitSet;
+ }
+ return kUlpfecPacketMaskSizeLBitClear;
+}
+
+void InsertZeroColumns(int num_zeros,
+ uint8_t* new_mask,
+ int new_mask_bytes,
+ int num_fec_packets,
+ int new_bit_index) {
+ for (uint16_t row = 0; row < num_fec_packets; ++row) {
+ const int new_byte_index = row * new_mask_bytes + new_bit_index / 8;
+ const int max_shifts = (7 - (new_bit_index % 8));
+ new_mask[new_byte_index] <<= std::min(num_zeros, max_shifts);
+ }
+}
+
+void CopyColumn(uint8_t* new_mask,
+ int new_mask_bytes,
+ uint8_t* old_mask,
+ int old_mask_bytes,
+ int num_fec_packets,
+ int new_bit_index,
+ int old_bit_index) {
+ RTC_CHECK_LT(new_bit_index, 8 * new_mask_bytes);
+
+ // Copy column from the old mask to the beginning of the new mask and shift it
+ // out from the old mask.
+ for (uint16_t row = 0; row < num_fec_packets; ++row) {
+ int new_byte_index = row * new_mask_bytes + new_bit_index / 8;
+ int old_byte_index = row * old_mask_bytes + old_bit_index / 8;
+ new_mask[new_byte_index] |= ((old_mask[old_byte_index] & 0x80) >> 7);
+ if (new_bit_index % 8 != 7) {
+ new_mask[new_byte_index] <<= 1;
+ }
+ old_mask[old_byte_index] <<= 1;
+ }
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h
new file mode 100644
index 0000000000..31acf73e3e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_FORWARD_ERROR_CORRECTION_INTERNAL_H_
+#define MODULES_RTP_RTCP_SOURCE_FORWARD_ERROR_CORRECTION_INTERNAL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/array_view.h"
+#include "modules/include/module_fec_types.h"
+
+namespace webrtc {
+
+// Maximum number of media packets that can be protected
+// by these packet masks.
+constexpr size_t kUlpfecMaxMediaPackets = 48;
+
+// Packet mask size in bytes (given L bit).
+constexpr size_t kUlpfecPacketMaskSizeLBitClear = 2;
+constexpr size_t kUlpfecPacketMaskSizeLBitSet = 6;
+
+// Packet code mask maximum length. kFECPacketMaskMaxSize = MaxNumFECPackets *
+// (kUlpfecMaxMediaPackets / 8), and MaxNumFECPackets is equal to maximum number
+// of media packets (kUlpfecMaxMediaPackets)
+constexpr size_t kFECPacketMaskMaxSize = 288;
+
+// Convenience constants.
+constexpr size_t kUlpfecMinPacketMaskSize = kUlpfecPacketMaskSizeLBitClear;
+constexpr size_t kUlpfecMaxPacketMaskSize = kUlpfecPacketMaskSizeLBitSet;
+
+namespace internal {
+
+class PacketMaskTable {
+ public:
+ PacketMaskTable(FecMaskType fec_mask_type, int num_media_packets);
+ ~PacketMaskTable();
+
+ rtc::ArrayView<const uint8_t> LookUp(int num_media_packets,
+ int num_fec_packets);
+
+ private:
+ static const uint8_t* PickTable(FecMaskType fec_mask_type,
+ int num_media_packets);
+ const uint8_t* table_;
+ uint8_t fec_packet_mask_[kFECPacketMaskMaxSize];
+};
+
+rtc::ArrayView<const uint8_t> LookUpInFecTable(const uint8_t* table,
+ int media_packet_index,
+ int fec_index);
+
+// Returns an array of packet masks. The mask of a single FEC packet
+// corresponds to a number of mask bytes. The mask indicates which
+// media packets should be protected by the FEC packet.
+
+// \param[in] num_media_packets The number of media packets to protect.
+// [1, max_media_packets].
+// \param[in] num_fec_packets The number of FEC packets which will
+// be generated. [1, num_media_packets].
+// \param[in] num_imp_packets The number of important packets.
+// [0, num_media_packets].
+// num_imp_packets = 0 is the equal
+// protection scenario.
+// \param[in] use_unequal_protection Enables unequal protection: allocates
+// more protection to the num_imp_packets.
+// \param[in] mask_table An instance of the `PacketMaskTable`
+// class, which contains the type of FEC
+// packet mask used, and a pointer to the
+// corresponding packet masks.
+// \param[out] packet_mask A pointer to hold the packet mask array,
+// of size: num_fec_packets *
+// "number of mask bytes".
+void GeneratePacketMasks(int num_media_packets,
+ int num_fec_packets,
+ int num_imp_packets,
+ bool use_unequal_protection,
+ PacketMaskTable* mask_table,
+ uint8_t* packet_mask);
+
+// Returns the required packet mask size, given the number of sequence numbers
+// that will be covered.
+size_t PacketMaskSize(size_t num_sequence_numbers);
+
+// Inserts `num_zeros` zero columns into `new_mask` at position
+// `new_bit_index`. If the current byte of `new_mask` can't fit all zeros, the
+// byte will be filled with zeros from `new_bit_index`, but the next byte will
+// be untouched.
+void InsertZeroColumns(int num_zeros,
+ uint8_t* new_mask,
+ int new_mask_bytes,
+ int num_fec_packets,
+ int new_bit_index);
+
+// Copies the left most bit column from the byte pointed to by
+// `old_bit_index` in `old_mask` to the right most column of the byte pointed
+// to by `new_bit_index` in `new_mask`. `old_mask_bytes` and `new_mask_bytes`
+// represent the number of bytes used per row for each mask. `num_fec_packets`
+// represent the number of rows of the masks.
+// The copied bit is shifted out from `old_mask` and is shifted one step to
+// the left in `new_mask`. `new_mask` will contain "xxxx xxn0" after this
+// operation, where x are previously inserted bits and n is the new bit.
+void CopyColumn(uint8_t* new_mask,
+ int new_mask_bytes,
+ uint8_t* old_mask,
+ int old_mask_bytes,
+ int num_fec_packets,
+ int new_bit_index,
+ int old_bit_index);
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_FORWARD_ERROR_CORRECTION_INTERNAL_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
new file mode 100644
index 0000000000..d0617f3804
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <iterator>
+#include <list>
+#include <memory>
+#include <set>
+
+#include "absl/algorithm/container.h"
+#include "api/call/transport.h"
+#include "api/transport/field_trial_based_config.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "call/rtx_receive_stream.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/thread.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+const int kVideoNackListSize = 30;
+const uint32_t kTestSsrc = 3456;
+const uint32_t kTestRtxSsrc = kTestSsrc + 1;
+const uint16_t kTestSequenceNumber = 2345;
+const uint32_t kTestNumberOfPackets = 1350;
+const int kTestNumberOfRtxPackets = 149;
+const int kNumFrames = 30;
+const int kPayloadType = 123;
+const int kRtxPayloadType = 98;
+const int64_t kMaxRttMs = 1000;
+
+class VerifyingMediaStream : public RtpPacketSinkInterface {
+ public:
+ VerifyingMediaStream() {}
+
+ void OnRtpPacket(const RtpPacketReceived& packet) override {
+ if (!sequence_numbers_.empty())
+ EXPECT_EQ(kTestSsrc, packet.Ssrc());
+
+ sequence_numbers_.push_back(packet.SequenceNumber());
+ }
+ std::list<uint16_t> sequence_numbers_;
+};
+
+class RtxLoopBackTransport : public webrtc::Transport {
+ public:
+ explicit RtxLoopBackTransport(uint32_t rtx_ssrc)
+ : count_(0),
+ packet_loss_(0),
+ consecutive_drop_start_(0),
+ consecutive_drop_end_(0),
+ rtx_ssrc_(rtx_ssrc),
+ count_rtx_ssrc_(0),
+ module_(NULL) {}
+
+ void SetSendModule(RtpRtcpInterface* rtpRtcpModule) {
+ module_ = rtpRtcpModule;
+ }
+
+ void DropEveryNthPacket(int n) { packet_loss_ = n; }
+
+ void DropConsecutivePackets(int start, int total) {
+ consecutive_drop_start_ = start;
+ consecutive_drop_end_ = start + total;
+ packet_loss_ = 0;
+ }
+
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
+ count_++;
+ RtpPacketReceived packet;
+ if (!packet.Parse(data, len))
+ return false;
+ if (packet.Ssrc() == rtx_ssrc_) {
+ count_rtx_ssrc_++;
+ } else {
+ // For non-RTX packets only.
+ expected_sequence_numbers_.insert(expected_sequence_numbers_.end(),
+ packet.SequenceNumber());
+ }
+ if (packet_loss_ > 0) {
+ if ((count_ % packet_loss_) == 0) {
+ return true;
+ }
+ } else if (count_ >= consecutive_drop_start_ &&
+ count_ < consecutive_drop_end_) {
+ return true;
+ }
+ EXPECT_TRUE(stream_receiver_controller_.OnRtpPacket(packet));
+ return true;
+ }
+
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ module_->IncomingRtcpPacket((const uint8_t*)data, len);
+ return true;
+ }
+ int count_;
+ int packet_loss_;
+ int consecutive_drop_start_;
+ int consecutive_drop_end_;
+ uint32_t rtx_ssrc_;
+ int count_rtx_ssrc_;
+ RtpRtcpInterface* module_;
+ RtpStreamReceiverController stream_receiver_controller_;
+ std::set<uint16_t> expected_sequence_numbers_;
+};
+
+class RtpRtcpRtxNackTest : public ::testing::Test {
+ protected:
+ RtpRtcpRtxNackTest()
+ : rtp_rtcp_module_(nullptr),
+ transport_(kTestRtxSsrc),
+ rtx_stream_(&media_stream_, rtx_associated_payload_types_, kTestSsrc),
+ fake_clock(123456),
+ retransmission_rate_limiter_(&fake_clock, kMaxRttMs) {}
+ ~RtpRtcpRtxNackTest() override {}
+
+ void SetUp() override {
+ RtpRtcpInterface::Configuration configuration;
+ configuration.audio = false;
+ configuration.clock = &fake_clock;
+ receive_statistics_ = ReceiveStatistics::Create(&fake_clock);
+ configuration.receive_statistics = receive_statistics_.get();
+ configuration.outgoing_transport = &transport_;
+ configuration.retransmission_rate_limiter = &retransmission_rate_limiter_;
+ configuration.local_media_ssrc = kTestSsrc;
+ configuration.rtx_send_ssrc = kTestRtxSsrc;
+ rtp_rtcp_module_ = ModuleRtpRtcpImpl2::Create(configuration);
+ FieldTrialBasedConfig field_trials;
+ RTPSenderVideo::Config video_config;
+ video_config.clock = &fake_clock;
+ video_config.rtp_sender = rtp_rtcp_module_->RtpSender();
+ video_config.field_trials = &field_trials;
+ rtp_sender_video_ = std::make_unique<RTPSenderVideo>(video_config);
+ rtp_rtcp_module_->SetRTCPStatus(RtcpMode::kCompound);
+ rtp_rtcp_module_->SetStorePacketsStatus(true, 600);
+ EXPECT_EQ(0, rtp_rtcp_module_->SetSendingStatus(true));
+ rtp_rtcp_module_->SetSequenceNumber(kTestSequenceNumber);
+ rtp_rtcp_module_->SetStartTimestamp(111111);
+
+ // Used for NACK processing.
+ rtp_rtcp_module_->SetRemoteSSRC(kTestSsrc);
+
+ rtp_rtcp_module_->SetRtxSendPayloadType(kRtxPayloadType, kPayloadType);
+ transport_.SetSendModule(rtp_rtcp_module_.get());
+ media_receiver_ = transport_.stream_receiver_controller_.CreateReceiver(
+ kTestSsrc, &media_stream_);
+
+ for (size_t n = 0; n < sizeof(payload_data); n++) {
+ payload_data[n] = n % 10;
+ }
+ }
+
+ int BuildNackList(uint16_t* nack_list) {
+ media_stream_.sequence_numbers_.sort();
+ std::list<uint16_t> missing_sequence_numbers;
+ std::list<uint16_t>::iterator it = media_stream_.sequence_numbers_.begin();
+
+ while (it != media_stream_.sequence_numbers_.end()) {
+ uint16_t sequence_number_1 = *it;
+ ++it;
+ if (it != media_stream_.sequence_numbers_.end()) {
+ uint16_t sequence_number_2 = *it;
+ // Add all missing sequence numbers to list
+ for (uint16_t i = sequence_number_1 + 1; i < sequence_number_2; ++i) {
+ missing_sequence_numbers.push_back(i);
+ }
+ }
+ }
+ int n = 0;
+ for (it = missing_sequence_numbers.begin();
+ it != missing_sequence_numbers.end(); ++it) {
+ nack_list[n++] = (*it);
+ }
+ return n;
+ }
+
+ bool ExpectedPacketsReceived() {
+ std::list<uint16_t> received_sorted;
+ absl::c_copy(media_stream_.sequence_numbers_,
+ std::back_inserter(received_sorted));
+ received_sorted.sort();
+ return absl::c_equal(received_sorted,
+ transport_.expected_sequence_numbers_);
+ }
+
+ void RunRtxTest(RtxMode rtx_method, int loss) {
+ rtx_receiver_ = transport_.stream_receiver_controller_.CreateReceiver(
+ kTestRtxSsrc, &rtx_stream_);
+ rtp_rtcp_module_->SetRtxSendStatus(rtx_method);
+ transport_.DropEveryNthPacket(loss);
+ uint32_t timestamp = 3000;
+ uint16_t nack_list[kVideoNackListSize];
+ for (int frame = 0; frame < kNumFrames; ++frame) {
+ RTPVideoHeader video_header;
+ EXPECT_TRUE(rtp_rtcp_module_->OnSendingRtpFrame(timestamp, timestamp / 90,
+ kPayloadType, false));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_TRUE(rtp_sender_video_->SendVideo(
+ kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
+ timestamp / 90, payload_data, video_header, 0));
+ // Min required delay until retransmit = 5 + RTT ms (RTT = 0).
+ fake_clock.AdvanceTimeMilliseconds(5);
+ int length = BuildNackList(nack_list);
+ if (length > 0)
+ rtp_rtcp_module_->SendNACK(nack_list, length);
+ fake_clock.AdvanceTimeMilliseconds(28); // 33ms - 5ms delay.
+ // Prepare next frame.
+ timestamp += 3000;
+ }
+ media_stream_.sequence_numbers_.sort();
+ }
+
+ rtc::AutoThread main_thread_;
+ std::unique_ptr<ReceiveStatistics> receive_statistics_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_module_;
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video_;
+ RtxLoopBackTransport transport_;
+ const std::map<int, int> rtx_associated_payload_types_ = {
+ {kRtxPayloadType, kPayloadType}};
+ VerifyingMediaStream media_stream_;
+ RtxReceiveStream rtx_stream_;
+ uint8_t payload_data[65000];
+ SimulatedClock fake_clock;
+ RateLimiter retransmission_rate_limiter_;
+ std::unique_ptr<RtpStreamReceiverInterface> media_receiver_;
+ std::unique_ptr<RtpStreamReceiverInterface> rtx_receiver_;
+};
+
+TEST_F(RtpRtcpRtxNackTest, LongNackList) {
+ const int kNumPacketsToDrop = 900;
+ const int kNumRequiredRtcp = 4;
+ uint32_t timestamp = 3000;
+ uint16_t nack_list[kNumPacketsToDrop];
+ // Disable StorePackets to be able to set a larger packet history.
+ rtp_rtcp_module_->SetStorePacketsStatus(false, 0);
+ // Enable StorePackets with a packet history of 2000 packets.
+ rtp_rtcp_module_->SetStorePacketsStatus(true, 2000);
+ // Drop 900 packets from the second one so that we get a NACK list which is
+ // big enough to require 4 RTCP packets to be fully transmitted to the sender.
+ transport_.DropConsecutivePackets(2, kNumPacketsToDrop);
+ // Send 30 frames which at the default size is roughly what we need to get
+ // enough packets.
+ for (int frame = 0; frame < kNumFrames; ++frame) {
+ RTPVideoHeader video_header;
+ EXPECT_TRUE(rtp_rtcp_module_->OnSendingRtpFrame(timestamp, timestamp / 90,
+ kPayloadType, false));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_TRUE(rtp_sender_video_->SendVideo(
+ kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
+ timestamp / 90, payload_data, video_header, 0));
+ // Prepare next frame.
+ timestamp += 3000;
+ fake_clock.AdvanceTimeMilliseconds(33);
+ }
+ EXPECT_FALSE(transport_.expected_sequence_numbers_.empty());
+ EXPECT_FALSE(media_stream_.sequence_numbers_.empty());
+ size_t last_receive_count = media_stream_.sequence_numbers_.size();
+ int length = BuildNackList(nack_list);
+ for (int i = 0; i < kNumRequiredRtcp - 1; ++i) {
+ rtp_rtcp_module_->SendNACK(nack_list, length);
+ EXPECT_GT(media_stream_.sequence_numbers_.size(), last_receive_count);
+ last_receive_count = media_stream_.sequence_numbers_.size();
+ EXPECT_FALSE(ExpectedPacketsReceived());
+ }
+ rtp_rtcp_module_->SendNACK(nack_list, length);
+ EXPECT_GT(media_stream_.sequence_numbers_.size(), last_receive_count);
+ EXPECT_TRUE(ExpectedPacketsReceived());
+}
+
+TEST_F(RtpRtcpRtxNackTest, RtxNack) {
+ RunRtxTest(kRtxRetransmitted, 10);
+ EXPECT_EQ(kTestSequenceNumber, *(media_stream_.sequence_numbers_.begin()));
+ EXPECT_EQ(kTestSequenceNumber + kTestNumberOfPackets - 1,
+ *(media_stream_.sequence_numbers_.rbegin()));
+ EXPECT_EQ(kTestNumberOfPackets, media_stream_.sequence_numbers_.size());
+ EXPECT_EQ(kTestNumberOfRtxPackets, transport_.count_rtx_ssrc_);
+ EXPECT_TRUE(ExpectedPacketsReceived());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
new file mode 100644
index 0000000000..36f0a63d59
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/packet_loss_stats.h"
+
+#include <cstdint>
+#include <iterator>
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+// After this many packets are added, adding additional packets will cause the
+// oldest packets to be pruned from the buffer.
+static const int kBufferSize = 100;
+
+namespace webrtc {
+
+PacketLossStats::PacketLossStats()
+ : single_loss_historic_count_(0),
+ multiple_loss_historic_event_count_(0),
+ multiple_loss_historic_packet_count_(0) {}
+
+PacketLossStats::~PacketLossStats() = default;
+
+void PacketLossStats::AddLostPacket(uint16_t sequence_number) {
+ // Detect sequence number wrap around.
+ if (!lost_packets_buffer_.empty() &&
+ static_cast<int>(*(lost_packets_buffer_.rbegin())) - sequence_number >
+ 0x8000) {
+ // The buffer contains large numbers and this is a small number.
+ lost_packets_wrapped_buffer_.insert(sequence_number);
+ } else {
+ lost_packets_buffer_.insert(sequence_number);
+ }
+ if (lost_packets_wrapped_buffer_.size() + lost_packets_buffer_.size() >
+ kBufferSize ||
+ (!lost_packets_wrapped_buffer_.empty() &&
+ *(lost_packets_wrapped_buffer_.rbegin()) > 0x4000)) {
+ PruneBuffer();
+ }
+}
+
+int PacketLossStats::GetSingleLossCount() const {
+ int single_loss_count, unused1, unused2;
+ ComputeLossCounts(&single_loss_count, &unused1, &unused2);
+ return single_loss_count;
+}
+
+int PacketLossStats::GetMultipleLossEventCount() const {
+ int event_count, unused1, unused2;
+ ComputeLossCounts(&unused1, &event_count, &unused2);
+ return event_count;
+}
+
+int PacketLossStats::GetMultipleLossPacketCount() const {
+ int packet_count, unused1, unused2;
+ ComputeLossCounts(&unused1, &unused2, &packet_count);
+ return packet_count;
+}
+
+void PacketLossStats::ComputeLossCounts(
+ int* out_single_loss_count,
+ int* out_multiple_loss_event_count,
+ int* out_multiple_loss_packet_count) const {
+ *out_single_loss_count = single_loss_historic_count_;
+ *out_multiple_loss_event_count = multiple_loss_historic_event_count_;
+ *out_multiple_loss_packet_count = multiple_loss_historic_packet_count_;
+ if (lost_packets_buffer_.empty()) {
+ RTC_DCHECK(lost_packets_wrapped_buffer_.empty());
+ return;
+ }
+ uint16_t last_num = 0;
+ int sequential_count = 0;
+ std::vector<const std::set<uint16_t>*> buffers;
+ buffers.push_back(&lost_packets_buffer_);
+ buffers.push_back(&lost_packets_wrapped_buffer_);
+ for (const auto* buffer : buffers) {
+ for (auto it = buffer->begin(); it != buffer->end(); ++it) {
+ uint16_t current_num = *it;
+ if (sequential_count > 0 && current_num != ((last_num + 1) & 0xFFFF)) {
+ if (sequential_count == 1) {
+ (*out_single_loss_count)++;
+ } else {
+ (*out_multiple_loss_event_count)++;
+ *out_multiple_loss_packet_count += sequential_count;
+ }
+ sequential_count = 0;
+ }
+ sequential_count++;
+ last_num = current_num;
+ }
+ }
+ if (sequential_count == 1) {
+ (*out_single_loss_count)++;
+ } else if (sequential_count > 1) {
+ (*out_multiple_loss_event_count)++;
+ *out_multiple_loss_packet_count += sequential_count;
+ }
+}
+
+void PacketLossStats::PruneBuffer() {
+ // Remove the oldest lost packet and any contiguous packets and move them
+ // into the historic counts.
+ auto it = lost_packets_buffer_.begin();
+ uint16_t last_removed = 0;
+ int remove_count = 0;
+ // Count adjacent packets and continue counting if it is wrap around by
+ // swapping in the wrapped buffer and letting our value wrap as well.
+ while (remove_count == 0 || (!lost_packets_buffer_.empty() &&
+ *it == ((last_removed + 1) & 0xFFFF))) {
+ last_removed = *it;
+ remove_count++;
+ auto to_erase = it++;
+ lost_packets_buffer_.erase(to_erase);
+ if (lost_packets_buffer_.empty()) {
+ lost_packets_buffer_.swap(lost_packets_wrapped_buffer_);
+ it = lost_packets_buffer_.begin();
+ }
+ }
+ if (remove_count > 1) {
+ multiple_loss_historic_event_count_++;
+ multiple_loss_historic_packet_count_ += remove_count;
+ } else {
+ single_loss_historic_count_++;
+ }
+ // Continue pruning if the wrapped buffer is beyond a threshold and there are
+ // things left in the pre-wrapped buffer.
+ if (!lost_packets_wrapped_buffer_.empty() &&
+ *(lost_packets_wrapped_buffer_.rbegin()) > 0x4000) {
+ PruneBuffer();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.h b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.h
new file mode 100644
index 0000000000..60d20294a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_PACKET_LOSS_STATS_H_
+#define MODULES_RTP_RTCP_SOURCE_PACKET_LOSS_STATS_H_
+
+#include <stdint.h>
+
+#include <set>
+
+namespace webrtc {
+
+// Keeps track of statistics of packet loss including whether losses are a
+// single packet or multiple packets in a row.
+class PacketLossStats {
+ public:
+ PacketLossStats();
+ ~PacketLossStats();
+
+ // Adds a lost packet to the stats by sequence number.
+ void AddLostPacket(uint16_t sequence_number);
+
+ // Queries the number of packets that were lost by themselves, no neighboring
+ // packets were lost.
+ int GetSingleLossCount() const;
+
+ // Queries the number of times that multiple packets with sequential numbers
+ // were lost. This is the number of events with more than one packet lost,
+ // regardless of the size of the event;
+ int GetMultipleLossEventCount() const;
+
+ // Queries the number of packets lost in multiple packet loss events. Combined
+ // with the event count, this can be used to determine the average event size.
+ int GetMultipleLossPacketCount() const;
+
+ private:
+ std::set<uint16_t> lost_packets_buffer_;
+ std::set<uint16_t> lost_packets_wrapped_buffer_;
+ int single_loss_historic_count_;
+ int multiple_loss_historic_event_count_;
+ int multiple_loss_historic_packet_count_;
+
+ void ComputeLossCounts(int* out_single_loss_count,
+ int* out_multiple_loss_event_count,
+ int* out_multiple_loss_packet_count) const;
+ void PruneBuffer();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_PACKET_LOSS_STATS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc
new file mode 100644
index 0000000000..673b223867
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/packet_loss_stats.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class PacketLossStatsTest : public ::testing::Test {
+ protected:
+ PacketLossStats stats_;
+};
+
+// Add a lost packet as every other packet, they should all count as single
+// losses.
+TEST_F(PacketLossStatsTest, EveryOtherPacket) {
+ for (int i = 0; i < 1000; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as every other packet, but such that the sequence numbers
+// will wrap around while they are being added.
+TEST_F(PacketLossStatsTest, EveryOtherPacketWrapped) {
+ for (int i = 65500; i < 66500; i += 2) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as every other packet, but such that the sequence numbers
+// will wrap around close to the very end, such that the buffer contains packets
+// on either side of the wrapping.
+TEST_F(PacketLossStatsTest, EveryOtherPacketWrappedAtEnd) {
+ for (int i = 64600; i < 65600; i += 2) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as the first three of every eight packets. Each set of
+// three should count as a multiple loss event and three multiple loss packets.
+TEST_F(PacketLossStatsTest, FirstThreeOfEight) {
+ for (int i = 0; i < 1000; ++i) {
+ if ((i & 7) < 3) {
+ stats_.AddLostPacket(i);
+ }
+ }
+ EXPECT_EQ(0, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as the first three of every eight packets such that the
+// sequence numbers wrap in the middle of adding them.
+TEST_F(PacketLossStatsTest, FirstThreeOfEightWrapped) {
+ for (int i = 65500; i < 66500; ++i) {
+ if ((i & 7) < 3) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(0, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as the first three of every eight packets such that the
+// sequence numbers wrap near the end of adding them and there are still numbers
+// in the buffer from before the wrapping.
+TEST_F(PacketLossStatsTest, FirstThreeOfEightWrappedAtEnd) {
+ for (int i = 64600; i < 65600; ++i) {
+ if ((i & 7) < 3) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(0, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets as the first three and the fifth of every eight packets. The
+// set of three should be multiple loss and the fifth should be single loss.
+TEST_F(PacketLossStatsTest, FirstThreeAndFifthOfEight) {
+ for (int i = 0; i < 1000; ++i) {
+ if ((i & 7) < 3 || (i & 7) == 4) {
+ stats_.AddLostPacket(i);
+ }
+ }
+ EXPECT_EQ(125, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets as the first three and the fifth of every eight packets such
+// that the sequence numbers wrap in the middle of adding them.
+TEST_F(PacketLossStatsTest, FirstThreeAndFifthOfEightWrapped) {
+ for (int i = 65500; i < 66500; ++i) {
+ if ((i & 7) < 3 || (i & 7) == 4) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(125, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets as the first three and the fifth of every eight packets such
+// that the sequence numbers wrap near the end of adding them and there are
+// packets from before the wrapping still in the buffer.
+TEST_F(PacketLossStatsTest, FirstThreeAndFifthOfEightWrappedAtEnd) {
+ for (int i = 64600; i < 65600; ++i) {
+ if ((i & 7) < 3 || (i & 7) == 4) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(125, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets such that there is a multiple loss event that continues
+// around the wrapping of sequence numbers.
+TEST_F(PacketLossStatsTest, MultipleLossEventWrapped) {
+ for (int i = 60000; i < 60500; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ for (int i = 65530; i < 65540; ++i) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ EXPECT_EQ(250, stats_.GetSingleLossCount());
+ EXPECT_EQ(1, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(10, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets such that there is a multiple loss event that continues
+// around the wrapping of sequence numbers and then is pushed out of the buffer.
+TEST_F(PacketLossStatsTest, MultipleLossEventWrappedPushedOut) {
+ for (int i = 60000; i < 60500; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ for (int i = 65530; i < 65540; ++i) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ for (int i = 1000; i < 1500; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(1, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(10, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets out of order and ensure that they still get counted
+// correctly as single or multiple loss events.
+TEST_F(PacketLossStatsTest, OutOfOrder) {
+ for (int i = 0; i < 1000; i += 10) {
+ stats_.AddLostPacket(i + 5);
+ stats_.AddLostPacket(i + 7);
+ stats_.AddLostPacket(i + 4);
+ stats_.AddLostPacket(i + 1);
+ stats_.AddLostPacket(i + 2);
+ }
+ EXPECT_EQ(100, stats_.GetSingleLossCount());
+ EXPECT_EQ(200, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(400, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets out of order and ensure that they still get counted
+// correctly as single or multiple loss events, and wrap in the middle of
+// adding.
+TEST_F(PacketLossStatsTest, OutOfOrderWrapped) {
+ for (int i = 65000; i < 66000; i += 10) {
+ stats_.AddLostPacket((i + 5) & 0xFFFF);
+ stats_.AddLostPacket((i + 7) & 0xFFFF);
+ stats_.AddLostPacket((i + 4) & 0xFFFF);
+ stats_.AddLostPacket((i + 1) & 0xFFFF);
+ stats_.AddLostPacket((i + 2) & 0xFFFF);
+ }
+ EXPECT_EQ(100, stats_.GetSingleLossCount());
+ EXPECT_EQ(200, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(400, stats_.GetMultipleLossPacketCount());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.cc
new file mode 100644
index 0000000000..55edd768a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+
+namespace webrtc {
+
+namespace {
+// RED header is first byte of payload, if present.
+constexpr size_t kRedForFecHeaderLength = 1;
+
+// Timestamps use a 90kHz clock.
+constexpr uint32_t kTimestampTicksPerMs = 90;
+} // namespace
+
+PacketSequencer::PacketSequencer(uint32_t media_ssrc,
+ absl::optional<uint32_t> rtx_ssrc,
+ bool require_marker_before_media_padding,
+ Clock* clock)
+ : media_ssrc_(media_ssrc),
+ rtx_ssrc_(rtx_ssrc),
+ require_marker_before_media_padding_(require_marker_before_media_padding),
+ clock_(clock),
+ media_sequence_number_(0),
+ rtx_sequence_number_(0),
+ last_payload_type_(-1),
+ last_rtp_timestamp_(0),
+ last_capture_time_ms_(0),
+ last_timestamp_time_ms_(0),
+ last_packet_marker_bit_(false) {
+ Random random(clock_->TimeInMicroseconds());
+ // Random start, 16 bits. Upper half of range is avoided in order to prevent
+ // wraparound issues during startup. Sequence number 0 is avoided for
+ // historical reasons, presumably to avoid debugability or test usage
+ // conflicts.
+ constexpr uint16_t kMaxInitRtpSeqNumber = 0x7fff; // 2^15 - 1.
+ media_sequence_number_ = random.Rand(1, kMaxInitRtpSeqNumber);
+ rtx_sequence_number_ = random.Rand(1, kMaxInitRtpSeqNumber);
+}
+
+void PacketSequencer::Sequence(RtpPacketToSend& packet) {
+ if (packet.Ssrc() == media_ssrc_) {
+ if (packet.packet_type() == RtpPacketMediaType::kRetransmission) {
+ // Retransmission of an already sequenced packet, ignore.
+ return;
+ } else if (packet.packet_type() == RtpPacketMediaType::kPadding) {
+ PopulatePaddingFields(packet);
+ }
+ packet.SetSequenceNumber(media_sequence_number_++);
+ if (packet.packet_type() != RtpPacketMediaType::kPadding) {
+ UpdateLastPacketState(packet);
+ }
+ } else if (packet.Ssrc() == rtx_ssrc_) {
+ if (packet.packet_type() == RtpPacketMediaType::kPadding) {
+ PopulatePaddingFields(packet);
+ }
+ packet.SetSequenceNumber(rtx_sequence_number_++);
+ } else {
+ RTC_DCHECK_NOTREACHED() << "Unexpected ssrc " << packet.Ssrc();
+ }
+}
+
+void PacketSequencer::SetRtpState(const RtpState& state) {
+ media_sequence_number_ = state.sequence_number;
+ last_rtp_timestamp_ = state.timestamp;
+ last_capture_time_ms_ = state.capture_time_ms;
+ last_timestamp_time_ms_ = state.last_timestamp_time_ms;
+}
+
+void PacketSequencer::PopulateRtpState(RtpState& state) const {
+ state.sequence_number = media_sequence_number_;
+ state.timestamp = last_rtp_timestamp_;
+ state.capture_time_ms = last_capture_time_ms_;
+ state.last_timestamp_time_ms = last_timestamp_time_ms_;
+}
+
+void PacketSequencer::UpdateLastPacketState(const RtpPacketToSend& packet) {
+ // Remember marker bit to determine if padding can be inserted with
+ // sequence number following `packet`.
+ last_packet_marker_bit_ = packet.Marker();
+ // Remember media payload type to use in the padding packet if rtx is
+ // disabled.
+ if (packet.is_red()) {
+ RTC_DCHECK_GE(packet.payload_size(), kRedForFecHeaderLength);
+ last_payload_type_ = packet.PayloadBuffer()[0];
+ } else {
+ last_payload_type_ = packet.PayloadType();
+ }
+ // Save timestamps to generate timestamp field and extensions for the padding.
+ last_rtp_timestamp_ = packet.Timestamp();
+ last_timestamp_time_ms_ = clock_->TimeInMilliseconds();
+ last_capture_time_ms_ = packet.capture_time().ms();
+}
+
+void PacketSequencer::PopulatePaddingFields(RtpPacketToSend& packet) {
+ if (packet.Ssrc() == media_ssrc_) {
+ RTC_DCHECK(CanSendPaddingOnMediaSsrc());
+
+ packet.SetTimestamp(last_rtp_timestamp_);
+ packet.set_capture_time(Timestamp::Millis(last_capture_time_ms_));
+ packet.SetPayloadType(last_payload_type_);
+ return;
+ }
+
+ RTC_DCHECK(packet.Ssrc() == rtx_ssrc_);
+ if (packet.payload_size() > 0) {
+ // This is payload padding packet, don't update timestamp fields.
+ return;
+ }
+
+ packet.SetTimestamp(last_rtp_timestamp_);
+ packet.set_capture_time(Timestamp::Millis(last_capture_time_ms_));
+
+ // Only change the timestamp of padding packets sent over RTX.
+ // Padding only packets over RTP has to be sent as part of a media
+ // frame (and therefore the same timestamp).
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (last_timestamp_time_ms_ > 0) {
+ packet.SetTimestamp(packet.Timestamp() +
+ (now_ms - last_timestamp_time_ms_) *
+ kTimestampTicksPerMs);
+ if (packet.capture_time() > Timestamp::Zero()) {
+ packet.set_capture_time(
+ packet.capture_time() +
+ TimeDelta::Millis(now_ms - last_timestamp_time_ms_));
+ }
+ }
+}
+
+bool PacketSequencer::CanSendPaddingOnMediaSsrc() const {
+ if (last_payload_type_ == -1) {
+ return false;
+ }
+
+ // Without RTX we can't send padding in the middle of frames.
+ // For audio marker bits doesn't mark the end of a frame and frames
+ // are usually a single packet, so for now we don't apply this rule
+ // for audio.
+ if (require_marker_before_media_padding_ && !last_packet_marker_bit_) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.h b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.h
new file mode 100644
index 0000000000..7c0dee7a5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_PACKET_SEQUENCER_H_
+#define MODULES_RTP_RTCP_SOURCE_PACKET_SEQUENCER_H_
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+// Helper class used to assign RTP sequence numbers and populate some fields for
+// padding packets based on the last sequenced packets.
+// This class is not thread safe, the caller must provide that.
+class PacketSequencer {
+ public:
+ // If `require_marker_before_media_padding_` is true, padding packets on the
+ // media ssrc is not allowed unless the last sequenced media packet had the
+ // marker bit set (i.e. don't insert padding packets between the first and
+ // last packets of a video frame).
+ // Packets with unknown SSRCs will be ignored.
+ PacketSequencer(uint32_t media_ssrc,
+ absl::optional<uint32_t> rtx_ssrc,
+ bool require_marker_before_media_padding,
+ Clock* clock);
+
+ // Assigns sequence number, and in the case of non-RTX padding also timestamps
+ // and payload type.
+ void Sequence(RtpPacketToSend& packet);
+
+ void set_media_sequence_number(uint16_t sequence_number) {
+ media_sequence_number_ = sequence_number;
+ }
+ void set_rtx_sequence_number(uint16_t sequence_number) {
+ rtx_sequence_number_ = sequence_number;
+ }
+
+ void SetRtpState(const RtpState& state);
+ void PopulateRtpState(RtpState& state) const;
+
+ uint16_t media_sequence_number() const { return media_sequence_number_; }
+ uint16_t rtx_sequence_number() const { return rtx_sequence_number_; }
+
+ // Checks whether it is allowed to send padding on the media SSRC at this
+ // time, e.g. that we don't send padding in the middle of a video frame.
+ bool CanSendPaddingOnMediaSsrc() const;
+
+ private:
+ void UpdateLastPacketState(const RtpPacketToSend& packet);
+ void PopulatePaddingFields(RtpPacketToSend& packet);
+
+ const uint32_t media_ssrc_;
+ const absl::optional<uint32_t> rtx_ssrc_;
+ const bool require_marker_before_media_padding_;
+ Clock* const clock_;
+
+ uint16_t media_sequence_number_;
+ uint16_t rtx_sequence_number_;
+
+ int8_t last_payload_type_;
+ uint32_t last_rtp_timestamp_;
+ int64_t last_capture_time_ms_;
+ int64_t last_timestamp_time_ms_;
+ bool last_packet_marker_bit_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_PACKET_SEQUENCER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer_unittest.cc
new file mode 100644
index 0000000000..d892863768
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/packet_sequencer_unittest.cc
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr Timestamp kStartTime = Timestamp::Millis(98765);
+constexpr uint32_t kMediaSsrc = 123456;
+constexpr uint32_t kRtxSsrc = 123457;
+constexpr uint8_t kMediaPayloadType = 42;
+constexpr uint16_t kMediaStartSequenceNumber = 123;
+constexpr uint16_t kRtxStartSequenceNumber = 234;
+constexpr uint16_t kDefaultSequenceNumber = 0x1234;
+constexpr uint32_t kStartRtpTimestamp = 798;
+
+class PacketSequencerTest : public ::testing::Test {
+ public:
+ PacketSequencerTest()
+ : clock_(kStartTime),
+ sequencer_(kMediaSsrc,
+ kRtxSsrc,
+ /*require_marker_before_media_padding=*/true,
+ &clock_) {}
+
+ RtpPacketToSend CreatePacket(RtpPacketMediaType type, uint32_t ssrc) {
+ RtpPacketToSend packet(/*extension_manager=*/nullptr);
+ packet.set_packet_type(type);
+ packet.SetSsrc(ssrc);
+ packet.SetSequenceNumber(kDefaultSequenceNumber);
+ packet.set_capture_time(clock_.CurrentTime());
+ packet.SetTimestamp(
+ kStartRtpTimestamp +
+ static_cast<uint32_t>(packet.capture_time().ms() - kStartTime.ms()));
+ return packet;
+ }
+
+ protected:
+ SimulatedClock clock_;
+ PacketSequencer sequencer_;
+};
+
+TEST_F(PacketSequencerTest, IgnoresMediaSsrcRetransmissions) {
+ RtpPacketToSend packet =
+ CreatePacket(RtpPacketMediaType::kRetransmission, kMediaSsrc);
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(packet);
+ EXPECT_EQ(packet.SequenceNumber(), kDefaultSequenceNumber);
+ EXPECT_EQ(sequencer_.media_sequence_number(), kMediaStartSequenceNumber);
+}
+
+TEST_F(PacketSequencerTest, SequencesAudio) {
+ RtpPacketToSend packet = CreatePacket(RtpPacketMediaType::kAudio, kMediaSsrc);
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(packet);
+ EXPECT_EQ(packet.SequenceNumber(), kMediaStartSequenceNumber);
+ EXPECT_EQ(sequencer_.media_sequence_number(), kMediaStartSequenceNumber + 1);
+}
+
+TEST_F(PacketSequencerTest, SequencesVideo) {
+ RtpPacketToSend packet = CreatePacket(RtpPacketMediaType::kVideo, kMediaSsrc);
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(packet);
+ EXPECT_EQ(packet.SequenceNumber(), kMediaStartSequenceNumber);
+ EXPECT_EQ(sequencer_.media_sequence_number(), kMediaStartSequenceNumber + 1);
+}
+
+TEST_F(PacketSequencerTest, SequencesUlpFec) {
+ RtpPacketToSend packet =
+ CreatePacket(RtpPacketMediaType::kForwardErrorCorrection, kMediaSsrc);
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(packet);
+ EXPECT_EQ(packet.SequenceNumber(), kMediaStartSequenceNumber);
+ EXPECT_EQ(sequencer_.media_sequence_number(), kMediaStartSequenceNumber + 1);
+}
+
+TEST_F(PacketSequencerTest, SequencesRtxRetransmissions) {
+ RtpPacketToSend packet =
+ CreatePacket(RtpPacketMediaType::kRetransmission, kRtxSsrc);
+ sequencer_.set_rtx_sequence_number(kRtxStartSequenceNumber);
+ sequencer_.Sequence(packet);
+ EXPECT_EQ(packet.SequenceNumber(), kRtxStartSequenceNumber);
+ EXPECT_EQ(sequencer_.rtx_sequence_number(), kRtxStartSequenceNumber + 1);
+}
+
+TEST_F(PacketSequencerTest, ProhibitsPaddingWithinVideoFrame) {
+ // Send a video packet with the marker bit set to false (indicating it is not
+ // the last packet of a frame).
+ RtpPacketToSend media_packet =
+ CreatePacket(RtpPacketMediaType::kVideo, kMediaSsrc);
+ media_packet.SetPayloadType(kMediaPayloadType);
+ media_packet.SetMarker(false);
+ sequencer_.Sequence(media_packet);
+
+ // Sending padding on the media SSRC should not be allowed at this point.
+ EXPECT_FALSE(sequencer_.CanSendPaddingOnMediaSsrc());
+
+ // Send a video packet with marker set to true, padding should be allowed
+ // again.
+ media_packet.SetMarker(true);
+ sequencer_.Sequence(media_packet);
+ EXPECT_TRUE(sequencer_.CanSendPaddingOnMediaSsrc());
+}
+
+TEST_F(PacketSequencerTest, AllowsPaddingAtAnyTimeIfSoConfigured) {
+ PacketSequencer packet_sequencer(
+ kMediaSsrc, kRtxSsrc,
+ /*require_marker_before_media_padding=*/false, &clock_);
+
+ // Send an audio packet with the marker bit set to false.
+ RtpPacketToSend media_packet =
+ CreatePacket(RtpPacketMediaType::kAudio, kMediaSsrc);
+ media_packet.SetPayloadType(kMediaPayloadType);
+ media_packet.SetMarker(false);
+ packet_sequencer.Sequence(media_packet);
+
+ // Sending padding on the media SSRC should be allowed despite no marker bit.
+ EXPECT_TRUE(packet_sequencer.CanSendPaddingOnMediaSsrc());
+}
+
+TEST_F(PacketSequencerTest, UpdatesPaddingBasedOnLastMediaPacket) {
+ // First send a media packet.
+ RtpPacketToSend media_packet =
+ CreatePacket(RtpPacketMediaType::kVideo, kMediaSsrc);
+ media_packet.SetPayloadType(kMediaPayloadType);
+ media_packet.SetMarker(true);
+ // Advance time so current time doesn't exactly match timestamp.
+ clock_.AdvanceTime(TimeDelta::Millis(5));
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(media_packet);
+
+ // Next send a padding packet and verify the media packet's timestamps and
+ // payload type is transferred to the padding packet.
+ RtpPacketToSend padding_packet =
+ CreatePacket(RtpPacketMediaType::kPadding, kMediaSsrc);
+ padding_packet.SetPadding(/*padding_size=*/100);
+ sequencer_.Sequence(padding_packet);
+
+ EXPECT_EQ(padding_packet.SequenceNumber(), kMediaStartSequenceNumber + 1);
+ EXPECT_EQ(padding_packet.PayloadType(), kMediaPayloadType);
+ EXPECT_EQ(padding_packet.Timestamp(), media_packet.Timestamp());
+ EXPECT_EQ(padding_packet.capture_time(), media_packet.capture_time());
+}
+
+TEST_F(PacketSequencerTest, UpdatesPaddingBasedOnLastRedPacket) {
+ // First send a media packet.
+ RtpPacketToSend media_packet =
+ CreatePacket(RtpPacketMediaType::kVideo, kMediaSsrc);
+ media_packet.SetPayloadType(kMediaPayloadType);
+ // Simulate a packet with RED encapsulation;
+ media_packet.set_is_red(true);
+ uint8_t* payload_buffer = media_packet.SetPayloadSize(1);
+ payload_buffer[0] = kMediaPayloadType + 1;
+
+ media_packet.SetMarker(true);
+ // Advance time so current time doesn't exactly match timestamp.
+ clock_.AdvanceTime(TimeDelta::Millis(5));
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(media_packet);
+
+ // Next send a padding packet and verify the media packet's timestamps and
+ // payload type is transferred to the padding packet.
+ RtpPacketToSend padding_packet =
+ CreatePacket(RtpPacketMediaType::kPadding, kMediaSsrc);
+ padding_packet.SetPadding(100);
+ sequencer_.Sequence(padding_packet);
+
+ EXPECT_EQ(padding_packet.SequenceNumber(), kMediaStartSequenceNumber + 1);
+ EXPECT_EQ(padding_packet.PayloadType(), kMediaPayloadType + 1);
+ EXPECT_EQ(padding_packet.Timestamp(), media_packet.Timestamp());
+ EXPECT_EQ(padding_packet.capture_time(), media_packet.capture_time());
+}
+
+TEST_F(PacketSequencerTest, DoesNotUpdateFieldsOnPayloadPadding) {
+ // First send a media packet.
+ RtpPacketToSend media_packet =
+ CreatePacket(RtpPacketMediaType::kVideo, kMediaSsrc);
+ media_packet.SetPayloadType(kMediaPayloadType);
+ media_packet.SetMarker(true);
+ // Advance time so current time doesn't exactly match timestamp.
+ clock_.AdvanceTime(TimeDelta::Millis(5));
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(media_packet);
+
+ // Simulate a payload padding packet on the RTX SSRC.
+ RtpPacketToSend padding_packet =
+ CreatePacket(RtpPacketMediaType::kPadding, kRtxSsrc);
+ padding_packet.SetPayloadSize(100);
+ padding_packet.SetPayloadType(kMediaPayloadType + 1);
+ padding_packet.SetTimestamp(kStartRtpTimestamp + 1);
+ padding_packet.set_capture_time(kStartTime + TimeDelta::Millis(1));
+ sequencer_.set_rtx_sequence_number(kRtxStartSequenceNumber);
+ sequencer_.Sequence(padding_packet);
+
+ // The sequence number should be updated, but timestamps kept.
+ EXPECT_EQ(padding_packet.SequenceNumber(), kRtxStartSequenceNumber);
+ EXPECT_EQ(padding_packet.PayloadType(), kMediaPayloadType + 1);
+ EXPECT_EQ(padding_packet.Timestamp(), kStartRtpTimestamp + 1);
+ EXPECT_EQ(padding_packet.capture_time(), kStartTime + TimeDelta::Millis(1));
+}
+
+TEST_F(PacketSequencerTest, UpdatesRtxPaddingBasedOnLastMediaPacket) {
+ constexpr uint32_t kTimestampTicksPerMs = 90;
+
+ // First send a media packet.
+ RtpPacketToSend media_packet =
+ CreatePacket(RtpPacketMediaType::kVideo, kMediaSsrc);
+ media_packet.SetPayloadType(kMediaPayloadType);
+ media_packet.SetMarker(true);
+ sequencer_.set_media_sequence_number(kMediaStartSequenceNumber);
+ sequencer_.Sequence(media_packet);
+
+ // Advance time, this time delta will be used to interpolate padding
+ // timestamps.
+ constexpr TimeDelta kTimeDelta = TimeDelta::Millis(10);
+ clock_.AdvanceTime(kTimeDelta);
+
+ RtpPacketToSend padding_packet =
+ CreatePacket(RtpPacketMediaType::kPadding, kRtxSsrc);
+ padding_packet.SetPadding(100);
+ padding_packet.SetPayloadType(kMediaPayloadType + 1);
+ sequencer_.set_rtx_sequence_number(kRtxStartSequenceNumber);
+ sequencer_.Sequence(padding_packet);
+
+ // Assigned RTX sequence number, but payload type unchanged in this case.
+ EXPECT_EQ(padding_packet.SequenceNumber(), kRtxStartSequenceNumber);
+ EXPECT_EQ(padding_packet.PayloadType(), kMediaPayloadType + 1);
+ // Timestamps are offset realtive to last media packet.
+ EXPECT_EQ(
+ padding_packet.Timestamp(),
+ media_packet.Timestamp() + (kTimeDelta.ms() * kTimestampTicksPerMs));
+ EXPECT_EQ(padding_packet.capture_time(),
+ media_packet.capture_time() + kTimeDelta);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc
new file mode 100644
index 0000000000..1e8e399f4d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/receive_statistics_impl.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+constexpr int64_t kStatisticsTimeoutMs = 8000;
+constexpr int64_t kStatisticsProcessIntervalMs = 1000;
+} // namespace
+
+StreamStatistician::~StreamStatistician() {}
+
+StreamStatisticianImpl::StreamStatisticianImpl(uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold)
+ : ssrc_(ssrc),
+ clock_(clock),
+ delta_internal_unix_epoch_ms_(clock_->CurrentNtpInMilliseconds() -
+ clock_->TimeInMilliseconds() -
+ rtc::kNtpJan1970Millisecs),
+ incoming_bitrate_(kStatisticsProcessIntervalMs,
+ RateStatistics::kBpsScale),
+ max_reordering_threshold_(max_reordering_threshold),
+ enable_retransmit_detection_(false),
+ cumulative_loss_is_capped_(false),
+ jitter_q4_(0),
+ cumulative_loss_(0),
+ cumulative_loss_rtcp_offset_(0),
+ last_receive_time_ms_(0),
+ last_received_timestamp_(0),
+ received_seq_first_(-1),
+ received_seq_max_(-1),
+ last_report_cumulative_loss_(0),
+ last_report_seq_max_(-1) {}
+
+StreamStatisticianImpl::~StreamStatisticianImpl() = default;
+
+bool StreamStatisticianImpl::UpdateOutOfOrder(const RtpPacketReceived& packet,
+ int64_t sequence_number,
+ int64_t now_ms) {
+ // Check if `packet` is second packet of a stream restart.
+ if (received_seq_out_of_order_) {
+ // Count the previous packet as a received; it was postponed below.
+ --cumulative_loss_;
+
+ uint16_t expected_sequence_number = *received_seq_out_of_order_ + 1;
+ received_seq_out_of_order_ = absl::nullopt;
+ if (packet.SequenceNumber() == expected_sequence_number) {
+ // Ignore sequence number gap caused by stream restart for packet loss
+ // calculation, by setting received_seq_max_ to the sequence number just
+ // before the out-of-order seqno. This gives a net zero change of
+ // `cumulative_loss_`, for the two packets interpreted as a stream reset.
+ //
+ // Fraction loss for the next report may get a bit off, since we don't
+ // update last_report_seq_max_ and last_report_cumulative_loss_ in a
+ // consistent way.
+ last_report_seq_max_ = sequence_number - 2;
+ received_seq_max_ = sequence_number - 2;
+ return false;
+ }
+ }
+
+ if (std::abs(sequence_number - received_seq_max_) >
+ max_reordering_threshold_) {
+ // Sequence number gap looks too large, wait until next packet to check
+ // for a stream restart.
+ received_seq_out_of_order_ = packet.SequenceNumber();
+ // Postpone counting this as a received packet until we know how to update
+ // `received_seq_max_`, otherwise we temporarily decrement
+ // `cumulative_loss_`. The
+ // ReceiveStatisticsTest.StreamRestartDoesntCountAsLoss test expects
+ // `cumulative_loss_` to be unchanged by the reception of the first packet
+ // after stream reset.
+ ++cumulative_loss_;
+ return true;
+ }
+
+ if (sequence_number > received_seq_max_)
+ return false;
+
+ // Old out of order packet, may be retransmit.
+ if (enable_retransmit_detection_ && IsRetransmitOfOldPacket(packet, now_ms))
+ receive_counters_.retransmitted.AddPacket(packet);
+ return true;
+}
+
+void StreamStatisticianImpl::UpdateCounters(const RtpPacketReceived& packet) {
+ RTC_DCHECK_EQ(ssrc_, packet.Ssrc());
+ int64_t now_ms = clock_->TimeInMilliseconds();
+
+ incoming_bitrate_.Update(packet.size(), now_ms);
+ receive_counters_.last_packet_received_timestamp_ms = now_ms;
+ receive_counters_.transmitted.AddPacket(packet);
+ --cumulative_loss_;
+
+ int64_t sequence_number =
+ seq_unwrapper_.UnwrapWithoutUpdate(packet.SequenceNumber());
+
+ if (!ReceivedRtpPacket()) {
+ received_seq_first_ = sequence_number;
+ last_report_seq_max_ = sequence_number - 1;
+ received_seq_max_ = sequence_number - 1;
+ receive_counters_.first_packet_time_ms = now_ms;
+ } else if (UpdateOutOfOrder(packet, sequence_number, now_ms)) {
+ return;
+ }
+ // In order packet.
+ cumulative_loss_ += sequence_number - received_seq_max_;
+ received_seq_max_ = sequence_number;
+ seq_unwrapper_.UpdateLast(sequence_number);
+
+ // If new time stamp and more than one in-order packet received, calculate
+ // new jitter statistics.
+ if (packet.Timestamp() != last_received_timestamp_ &&
+ (receive_counters_.transmitted.packets -
+ receive_counters_.retransmitted.packets) > 1) {
+ UpdateJitter(packet, now_ms);
+ }
+ last_received_timestamp_ = packet.Timestamp();
+ last_receive_time_ms_ = now_ms;
+}
+
+void StreamStatisticianImpl::UpdateJitter(const RtpPacketReceived& packet,
+ int64_t receive_time_ms) {
+ int64_t receive_diff_ms = receive_time_ms - last_receive_time_ms_;
+ RTC_DCHECK_GE(receive_diff_ms, 0);
+ uint32_t receive_diff_rtp = static_cast<uint32_t>(
+ (receive_diff_ms * packet.payload_type_frequency()) / 1000);
+ int32_t time_diff_samples =
+ receive_diff_rtp - (packet.Timestamp() - last_received_timestamp_);
+
+ time_diff_samples = std::abs(time_diff_samples);
+
+ // lib_jingle sometimes deliver crazy jumps in TS for the same stream.
+ // If this happens, don't update jitter value. Use 5 secs video frequency
+ // as the threshold.
+ if (time_diff_samples < 450000) {
+ // Note we calculate in Q4 to avoid using float.
+ int32_t jitter_diff_q4 = (time_diff_samples << 4) - jitter_q4_;
+ jitter_q4_ += ((jitter_diff_q4 + 8) >> 4);
+ }
+}
+
+void StreamStatisticianImpl::SetMaxReorderingThreshold(
+ int max_reordering_threshold) {
+ max_reordering_threshold_ = max_reordering_threshold;
+}
+
+void StreamStatisticianImpl::EnableRetransmitDetection(bool enable) {
+ enable_retransmit_detection_ = enable;
+}
+
+RtpReceiveStats StreamStatisticianImpl::GetStats() const {
+ RtpReceiveStats stats;
+ stats.packets_lost = cumulative_loss_;
+ // Note: internal jitter value is in Q4 and needs to be scaled by 1/16.
+ stats.jitter = jitter_q4_ >> 4;
+ if (receive_counters_.last_packet_received_timestamp_ms.has_value()) {
+ stats.last_packet_received_timestamp_ms =
+ *receive_counters_.last_packet_received_timestamp_ms +
+ delta_internal_unix_epoch_ms_;
+ }
+ stats.packet_counter = receive_counters_.transmitted;
+ return stats;
+}
+
+void StreamStatisticianImpl::MaybeAppendReportBlockAndReset(
+ std::vector<rtcp::ReportBlock>& report_blocks) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (now_ms - last_receive_time_ms_ >= kStatisticsTimeoutMs) {
+ // Not active.
+ return;
+ }
+ if (!ReceivedRtpPacket()) {
+ return;
+ }
+
+ report_blocks.emplace_back();
+ rtcp::ReportBlock& stats = report_blocks.back();
+ stats.SetMediaSsrc(ssrc_);
+ // Calculate fraction lost.
+ int64_t exp_since_last = received_seq_max_ - last_report_seq_max_;
+ RTC_DCHECK_GE(exp_since_last, 0);
+
+ int32_t lost_since_last = cumulative_loss_ - last_report_cumulative_loss_;
+ if (exp_since_last > 0 && lost_since_last > 0) {
+ // Scale 0 to 255, where 255 is 100% loss.
+ stats.SetFractionLost(255 * lost_since_last / exp_since_last);
+ }
+
+ int packets_lost = cumulative_loss_ + cumulative_loss_rtcp_offset_;
+ if (packets_lost < 0) {
+ // Clamp to zero. Work around to accomodate for senders that misbehave with
+ // negative cumulative loss.
+ packets_lost = 0;
+ cumulative_loss_rtcp_offset_ = -cumulative_loss_;
+ }
+ if (packets_lost > 0x7fffff) {
+ // Packets lost is a 24 bit signed field, and thus should be clamped, as
+ // described in https://datatracker.ietf.org/doc/html/rfc3550#appendix-A.3
+ if (!cumulative_loss_is_capped_) {
+ cumulative_loss_is_capped_ = true;
+ RTC_LOG(LS_WARNING) << "Cumulative loss reached maximum value for ssrc "
+ << ssrc_;
+ }
+ packets_lost = 0x7fffff;
+ }
+ stats.SetCumulativeLost(packets_lost);
+ stats.SetExtHighestSeqNum(received_seq_max_);
+ // Note: internal jitter value is in Q4 and needs to be scaled by 1/16.
+ stats.SetJitter(jitter_q4_ >> 4);
+
+ // Only for report blocks in RTCP SR and RR.
+ last_report_cumulative_loss_ = cumulative_loss_;
+ last_report_seq_max_ = received_seq_max_;
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "cumulative_loss_pkts", now_ms,
+ cumulative_loss_, ssrc_);
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "received_seq_max_pkts", now_ms,
+ (received_seq_max_ - received_seq_first_),
+ ssrc_);
+}
+
+absl::optional<int> StreamStatisticianImpl::GetFractionLostInPercent() const {
+ if (!ReceivedRtpPacket()) {
+ return absl::nullopt;
+ }
+ int64_t expected_packets = 1 + received_seq_max_ - received_seq_first_;
+ if (expected_packets <= 0) {
+ return absl::nullopt;
+ }
+ if (cumulative_loss_ <= 0) {
+ return 0;
+ }
+ return 100 * static_cast<int64_t>(cumulative_loss_) / expected_packets;
+}
+
+StreamDataCounters StreamStatisticianImpl::GetReceiveStreamDataCounters()
+ const {
+ return receive_counters_;
+}
+
+uint32_t StreamStatisticianImpl::BitrateReceived() const {
+ return incoming_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0);
+}
+
+bool StreamStatisticianImpl::IsRetransmitOfOldPacket(
+ const RtpPacketReceived& packet,
+ int64_t now_ms) const {
+ uint32_t frequency_khz = packet.payload_type_frequency() / 1000;
+ RTC_DCHECK_GT(frequency_khz, 0);
+
+ int64_t time_diff_ms = now_ms - last_receive_time_ms_;
+
+ // Diff in time stamp since last received in order.
+ uint32_t timestamp_diff = packet.Timestamp() - last_received_timestamp_;
+ uint32_t rtp_time_stamp_diff_ms = timestamp_diff / frequency_khz;
+
+ int64_t max_delay_ms = 0;
+
+ // Jitter standard deviation in samples.
+ float jitter_std = std::sqrt(static_cast<float>(jitter_q4_ >> 4));
+
+ // 2 times the standard deviation => 95% confidence.
+ // And transform to milliseconds by dividing by the frequency in kHz.
+ max_delay_ms = static_cast<int64_t>((2 * jitter_std) / frequency_khz);
+
+ // Min max_delay_ms is 1.
+ if (max_delay_ms == 0) {
+ max_delay_ms = 1;
+ }
+ return time_diff_ms > rtp_time_stamp_diff_ms + max_delay_ms;
+}
+
+std::unique_ptr<ReceiveStatistics> ReceiveStatistics::Create(Clock* clock) {
+ return std::make_unique<ReceiveStatisticsLocked>(
+ clock, [](uint32_t ssrc, Clock* clock, int max_reordering_threshold) {
+ return std::make_unique<StreamStatisticianLocked>(
+ ssrc, clock, max_reordering_threshold);
+ });
+}
+
+std::unique_ptr<ReceiveStatistics> ReceiveStatistics::CreateThreadCompatible(
+ Clock* clock) {
+ return std::make_unique<ReceiveStatisticsImpl>(
+ clock, [](uint32_t ssrc, Clock* clock, int max_reordering_threshold) {
+ return std::make_unique<StreamStatisticianImpl>(
+ ssrc, clock, max_reordering_threshold);
+ });
+}
+
+ReceiveStatisticsImpl::ReceiveStatisticsImpl(
+ Clock* clock,
+ std::function<std::unique_ptr<StreamStatisticianImplInterface>(
+ uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold)> stream_statistician_factory)
+ : clock_(clock),
+ stream_statistician_factory_(std::move(stream_statistician_factory)),
+ last_returned_ssrc_idx_(0),
+ max_reordering_threshold_(kDefaultMaxReorderingThreshold) {}
+
+void ReceiveStatisticsImpl::OnRtpPacket(const RtpPacketReceived& packet) {
+ // StreamStatisticianImpl instance is created once and only destroyed when
+ // this whole ReceiveStatisticsImpl is destroyed. StreamStatisticianImpl has
+ // it's own locking so don't hold receive_statistics_lock_ (potential
+ // deadlock).
+ GetOrCreateStatistician(packet.Ssrc())->UpdateCounters(packet);
+}
+
+StreamStatistician* ReceiveStatisticsImpl::GetStatistician(
+ uint32_t ssrc) const {
+ const auto& it = statisticians_.find(ssrc);
+ if (it == statisticians_.end())
+ return nullptr;
+ return it->second.get();
+}
+
+StreamStatisticianImplInterface* ReceiveStatisticsImpl::GetOrCreateStatistician(
+ uint32_t ssrc) {
+ std::unique_ptr<StreamStatisticianImplInterface>& impl = statisticians_[ssrc];
+ if (impl == nullptr) { // new element
+ impl =
+ stream_statistician_factory_(ssrc, clock_, max_reordering_threshold_);
+ all_ssrcs_.push_back(ssrc);
+ }
+ return impl.get();
+}
+
+void ReceiveStatisticsImpl::SetMaxReorderingThreshold(
+ int max_reordering_threshold) {
+ max_reordering_threshold_ = max_reordering_threshold;
+ for (auto& statistician : statisticians_) {
+ statistician.second->SetMaxReorderingThreshold(max_reordering_threshold);
+ }
+}
+
+void ReceiveStatisticsImpl::SetMaxReorderingThreshold(
+ uint32_t ssrc,
+ int max_reordering_threshold) {
+ GetOrCreateStatistician(ssrc)->SetMaxReorderingThreshold(
+ max_reordering_threshold);
+}
+
+void ReceiveStatisticsImpl::EnableRetransmitDetection(uint32_t ssrc,
+ bool enable) {
+ GetOrCreateStatistician(ssrc)->EnableRetransmitDetection(enable);
+}
+
+std::vector<rtcp::ReportBlock> ReceiveStatisticsImpl::RtcpReportBlocks(
+ size_t max_blocks) {
+ std::vector<rtcp::ReportBlock> result;
+ result.reserve(std::min(max_blocks, all_ssrcs_.size()));
+
+ size_t ssrc_idx = 0;
+ for (size_t i = 0; i < all_ssrcs_.size() && result.size() < max_blocks; ++i) {
+ ssrc_idx = (last_returned_ssrc_idx_ + i + 1) % all_ssrcs_.size();
+ const uint32_t media_ssrc = all_ssrcs_[ssrc_idx];
+ auto statistician_it = statisticians_.find(media_ssrc);
+ RTC_DCHECK(statistician_it != statisticians_.end());
+ statistician_it->second->MaybeAppendReportBlockAndReset(result);
+ }
+ last_returned_ssrc_idx_ = ssrc_idx;
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.h b/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.h
new file mode 100644
index 0000000000..1a70fe4ad7
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_impl.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_
+#define MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_
+
+#include <algorithm>
+#include <functional>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// Extends StreamStatistician with methods needed by the implementation.
+class StreamStatisticianImplInterface : public StreamStatistician {
+ public:
+ virtual ~StreamStatisticianImplInterface() = default;
+ virtual void MaybeAppendReportBlockAndReset(
+ std::vector<rtcp::ReportBlock>& report_blocks) = 0;
+ virtual void SetMaxReorderingThreshold(int max_reordering_threshold) = 0;
+ virtual void EnableRetransmitDetection(bool enable) = 0;
+ virtual void UpdateCounters(const RtpPacketReceived& packet) = 0;
+};
+
+// Thread-compatible implementation of StreamStatisticianImplInterface.
+class StreamStatisticianImpl : public StreamStatisticianImplInterface {
+ public:
+ StreamStatisticianImpl(uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold);
+ ~StreamStatisticianImpl() override;
+
+ // Implements StreamStatistician
+ RtpReceiveStats GetStats() const override;
+ absl::optional<int> GetFractionLostInPercent() const override;
+ StreamDataCounters GetReceiveStreamDataCounters() const override;
+ uint32_t BitrateReceived() const override;
+
+ // Implements StreamStatisticianImplInterface
+ void MaybeAppendReportBlockAndReset(
+ std::vector<rtcp::ReportBlock>& report_blocks) override;
+ void SetMaxReorderingThreshold(int max_reordering_threshold) override;
+ void EnableRetransmitDetection(bool enable) override;
+ // Updates StreamStatistician for incoming packets.
+ void UpdateCounters(const RtpPacketReceived& packet) override;
+
+ private:
+ bool IsRetransmitOfOldPacket(const RtpPacketReceived& packet,
+ int64_t now_ms) const;
+ void UpdateJitter(const RtpPacketReceived& packet, int64_t receive_time_ms);
+ // Updates StreamStatistician for out of order packets.
+ // Returns true if packet considered to be out of order.
+ bool UpdateOutOfOrder(const RtpPacketReceived& packet,
+ int64_t sequence_number,
+ int64_t now_ms);
+ // Checks if this StreamStatistician received any rtp packets.
+ bool ReceivedRtpPacket() const { return received_seq_first_ >= 0; }
+
+ const uint32_t ssrc_;
+ Clock* const clock_;
+ // Delta used to map internal timestamps to Unix epoch ones.
+ const int64_t delta_internal_unix_epoch_ms_;
+ RateStatistics incoming_bitrate_;
+ // In number of packets or sequence numbers.
+ int max_reordering_threshold_;
+ bool enable_retransmit_detection_;
+ bool cumulative_loss_is_capped_;
+
+ // Stats on received RTP packets.
+ uint32_t jitter_q4_;
+ // Cumulative loss according to RFC 3550, which may be negative (and often is,
+ // if packets are reordered and there are non-RTX retransmissions).
+ int32_t cumulative_loss_;
+ // Offset added to outgoing rtcp reports, to make ensure that the reported
+ // cumulative loss is non-negative. Reports with negative values confuse some
+ // senders, in particular, our own loss-based bandwidth estimator.
+ int32_t cumulative_loss_rtcp_offset_;
+
+ int64_t last_receive_time_ms_;
+ uint32_t last_received_timestamp_;
+ SequenceNumberUnwrapper seq_unwrapper_;
+ int64_t received_seq_first_;
+ int64_t received_seq_max_;
+ // Assume that the other side restarted when there are two sequential packets
+ // with large jump from received_seq_max_.
+ absl::optional<uint16_t> received_seq_out_of_order_;
+
+ // Current counter values.
+ StreamDataCounters receive_counters_;
+
+ // Counter values when we sent the last report.
+ int32_t last_report_cumulative_loss_;
+ int64_t last_report_seq_max_;
+};
+
+// Thread-safe implementation of StreamStatisticianImplInterface.
+class StreamStatisticianLocked : public StreamStatisticianImplInterface {
+ public:
+ StreamStatisticianLocked(uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold)
+ : impl_(ssrc, clock, max_reordering_threshold) {}
+ ~StreamStatisticianLocked() override = default;
+
+ RtpReceiveStats GetStats() const override {
+ MutexLock lock(&stream_lock_);
+ return impl_.GetStats();
+ }
+ absl::optional<int> GetFractionLostInPercent() const override {
+ MutexLock lock(&stream_lock_);
+ return impl_.GetFractionLostInPercent();
+ }
+ StreamDataCounters GetReceiveStreamDataCounters() const override {
+ MutexLock lock(&stream_lock_);
+ return impl_.GetReceiveStreamDataCounters();
+ }
+ uint32_t BitrateReceived() const override {
+ MutexLock lock(&stream_lock_);
+ return impl_.BitrateReceived();
+ }
+ void MaybeAppendReportBlockAndReset(
+ std::vector<rtcp::ReportBlock>& report_blocks) override {
+ MutexLock lock(&stream_lock_);
+ impl_.MaybeAppendReportBlockAndReset(report_blocks);
+ }
+ void SetMaxReorderingThreshold(int max_reordering_threshold) override {
+ MutexLock lock(&stream_lock_);
+ return impl_.SetMaxReorderingThreshold(max_reordering_threshold);
+ }
+ void EnableRetransmitDetection(bool enable) override {
+ MutexLock lock(&stream_lock_);
+ return impl_.EnableRetransmitDetection(enable);
+ }
+ void UpdateCounters(const RtpPacketReceived& packet) override {
+ MutexLock lock(&stream_lock_);
+ return impl_.UpdateCounters(packet);
+ }
+
+ private:
+ mutable Mutex stream_lock_;
+ StreamStatisticianImpl impl_ RTC_GUARDED_BY(&stream_lock_);
+};
+
+// Thread-compatible implementation.
+class ReceiveStatisticsImpl : public ReceiveStatistics {
+ public:
+ ReceiveStatisticsImpl(
+ Clock* clock,
+ std::function<std::unique_ptr<StreamStatisticianImplInterface>(
+ uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold)> stream_statistician_factory);
+ ~ReceiveStatisticsImpl() override = default;
+
+ // Implements ReceiveStatisticsProvider.
+ std::vector<rtcp::ReportBlock> RtcpReportBlocks(size_t max_blocks) override;
+
+ // Implements RtpPacketSinkInterface
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+ // Implements ReceiveStatistics.
+ StreamStatistician* GetStatistician(uint32_t ssrc) const override;
+ void SetMaxReorderingThreshold(int max_reordering_threshold) override;
+ void SetMaxReorderingThreshold(uint32_t ssrc,
+ int max_reordering_threshold) override;
+ void EnableRetransmitDetection(uint32_t ssrc, bool enable) override;
+
+ private:
+ StreamStatisticianImplInterface* GetOrCreateStatistician(uint32_t ssrc);
+
+ Clock* const clock_;
+ std::function<std::unique_ptr<StreamStatisticianImplInterface>(
+ uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold)>
+ stream_statistician_factory_;
+ // The index within `all_ssrcs_` that was last returned.
+ size_t last_returned_ssrc_idx_;
+ std::vector<uint32_t> all_ssrcs_;
+ int max_reordering_threshold_;
+ flat_map<uint32_t /*ssrc*/, std::unique_ptr<StreamStatisticianImplInterface>>
+ statisticians_;
+};
+
+// Thread-safe implementation wrapping access to ReceiveStatisticsImpl with a
+// mutex.
+class ReceiveStatisticsLocked : public ReceiveStatistics {
+ public:
+ explicit ReceiveStatisticsLocked(
+ Clock* clock,
+ std::function<std::unique_ptr<StreamStatisticianImplInterface>(
+ uint32_t ssrc,
+ Clock* clock,
+ int max_reordering_threshold)> stream_statitician_factory)
+ : impl_(clock, std::move(stream_statitician_factory)) {}
+ ~ReceiveStatisticsLocked() override = default;
+ std::vector<rtcp::ReportBlock> RtcpReportBlocks(size_t max_blocks) override {
+ MutexLock lock(&receive_statistics_lock_);
+ return impl_.RtcpReportBlocks(max_blocks);
+ }
+ void OnRtpPacket(const RtpPacketReceived& packet) override {
+ MutexLock lock(&receive_statistics_lock_);
+ return impl_.OnRtpPacket(packet);
+ }
+ StreamStatistician* GetStatistician(uint32_t ssrc) const override {
+ MutexLock lock(&receive_statistics_lock_);
+ return impl_.GetStatistician(ssrc);
+ }
+ void SetMaxReorderingThreshold(int max_reordering_threshold) override {
+ MutexLock lock(&receive_statistics_lock_);
+ return impl_.SetMaxReorderingThreshold(max_reordering_threshold);
+ }
+ void SetMaxReorderingThreshold(uint32_t ssrc,
+ int max_reordering_threshold) override {
+ MutexLock lock(&receive_statistics_lock_);
+ return impl_.SetMaxReorderingThreshold(ssrc, max_reordering_threshold);
+ }
+ void EnableRetransmitDetection(uint32_t ssrc, bool enable) override {
+ MutexLock lock(&receive_statistics_lock_);
+ return impl_.EnableRetransmitDetection(ssrc, enable);
+ }
+
+ private:
+ mutable Mutex receive_statistics_lock_;
+ ReceiveStatisticsImpl impl_ RTC_GUARDED_BY(&receive_statistics_lock_);
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc
new file mode 100644
index 0000000000..d40a743469
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+
+#include <memory>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::SizeIs;
+using ::testing::UnorderedElementsAre;
+
+const size_t kPacketSize1 = 100;
+const size_t kPacketSize2 = 300;
+const uint32_t kSsrc1 = 101;
+const uint32_t kSsrc2 = 202;
+const uint32_t kSsrc3 = 203;
+const uint32_t kSsrc4 = 304;
+
+RtpPacketReceived CreateRtpPacket(uint32_t ssrc,
+ size_t header_size,
+ size_t payload_size,
+ size_t padding_size) {
+ RtpPacketReceived packet;
+ packet.SetSsrc(ssrc);
+ packet.SetSequenceNumber(100);
+ packet.set_payload_type_frequency(90000);
+ RTC_CHECK_GE(header_size, 12);
+ RTC_CHECK_EQ(header_size % 4, 0);
+ if (header_size > 12) {
+ // Insert csrcs to increase header size.
+ const int num_csrcs = (header_size - 12) / 4;
+ std::vector<uint32_t> csrcs(num_csrcs);
+ packet.SetCsrcs(csrcs);
+ }
+ packet.SetPayloadSize(payload_size);
+ packet.SetPadding(padding_size);
+ return packet;
+}
+
+RtpPacketReceived CreateRtpPacket(uint32_t ssrc, size_t packet_size) {
+ return CreateRtpPacket(ssrc, 12, packet_size - 12, 0);
+}
+
+void IncrementSequenceNumber(RtpPacketReceived* packet, uint16_t incr) {
+ packet->SetSequenceNumber(packet->SequenceNumber() + incr);
+}
+
+void IncrementSequenceNumber(RtpPacketReceived* packet) {
+ IncrementSequenceNumber(packet, 1);
+}
+
+class ReceiveStatisticsTest : public ::testing::TestWithParam<bool> {
+ public:
+ ReceiveStatisticsTest()
+ : clock_(0),
+ receive_statistics_(
+ GetParam() ? ReceiveStatistics::Create(&clock_)
+ : ReceiveStatistics::CreateThreadCompatible(&clock_)) {
+ packet1_ = CreateRtpPacket(kSsrc1, kPacketSize1);
+ packet2_ = CreateRtpPacket(kSsrc2, kPacketSize2);
+ }
+
+ protected:
+ SimulatedClock clock_;
+ std::unique_ptr<ReceiveStatistics> receive_statistics_;
+ RtpPacketReceived packet1_;
+ RtpPacketReceived packet2_;
+};
+
+INSTANTIATE_TEST_SUITE_P(All,
+ ReceiveStatisticsTest,
+ ::testing::Bool(),
+ [](::testing::TestParamInfo<bool> info) {
+ return info.param ? "WithMutex" : "WithoutMutex";
+ });
+
+TEST_P(ReceiveStatisticsTest, TwoIncomingSsrcs) {
+ receive_statistics_->OnRtpPacket(packet1_);
+ IncrementSequenceNumber(&packet1_);
+ receive_statistics_->OnRtpPacket(packet2_);
+ IncrementSequenceNumber(&packet2_);
+ clock_.AdvanceTimeMilliseconds(100);
+ receive_statistics_->OnRtpPacket(packet1_);
+ IncrementSequenceNumber(&packet1_);
+ receive_statistics_->OnRtpPacket(packet2_);
+ IncrementSequenceNumber(&packet2_);
+
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ ASSERT_TRUE(statistician != NULL);
+ EXPECT_GT(statistician->BitrateReceived(), 0u);
+ StreamDataCounters counters = statistician->GetReceiveStreamDataCounters();
+ EXPECT_EQ(176u, counters.transmitted.payload_bytes);
+ EXPECT_EQ(24u, counters.transmitted.header_bytes);
+ EXPECT_EQ(0u, counters.transmitted.padding_bytes);
+ EXPECT_EQ(2u, counters.transmitted.packets);
+
+ statistician = receive_statistics_->GetStatistician(kSsrc2);
+ ASSERT_TRUE(statistician != NULL);
+ EXPECT_GT(statistician->BitrateReceived(), 0u);
+ counters = statistician->GetReceiveStreamDataCounters();
+ EXPECT_EQ(576u, counters.transmitted.payload_bytes);
+ EXPECT_EQ(24u, counters.transmitted.header_bytes);
+ EXPECT_EQ(0u, counters.transmitted.padding_bytes);
+ EXPECT_EQ(2u, counters.transmitted.packets);
+
+ EXPECT_EQ(2u, receive_statistics_->RtcpReportBlocks(3).size());
+ // Add more incoming packets and verify that they are registered in both
+ // access methods.
+ receive_statistics_->OnRtpPacket(packet1_);
+ IncrementSequenceNumber(&packet1_);
+ receive_statistics_->OnRtpPacket(packet2_);
+ IncrementSequenceNumber(&packet2_);
+
+ counters = receive_statistics_->GetStatistician(kSsrc1)
+ ->GetReceiveStreamDataCounters();
+ EXPECT_EQ(264u, counters.transmitted.payload_bytes);
+ EXPECT_EQ(36u, counters.transmitted.header_bytes);
+ EXPECT_EQ(0u, counters.transmitted.padding_bytes);
+ EXPECT_EQ(3u, counters.transmitted.packets);
+
+ counters = receive_statistics_->GetStatistician(kSsrc2)
+ ->GetReceiveStreamDataCounters();
+ EXPECT_EQ(864u, counters.transmitted.payload_bytes);
+ EXPECT_EQ(36u, counters.transmitted.header_bytes);
+ EXPECT_EQ(0u, counters.transmitted.padding_bytes);
+ EXPECT_EQ(3u, counters.transmitted.packets);
+}
+
+TEST_P(ReceiveStatisticsTest,
+ RtcpReportBlocksReturnsMaxBlocksWhenThereAreMoreStatisticians) {
+ RtpPacketReceived packet1 = CreateRtpPacket(kSsrc1, kPacketSize1);
+ RtpPacketReceived packet2 = CreateRtpPacket(kSsrc2, kPacketSize1);
+ RtpPacketReceived packet3 = CreateRtpPacket(kSsrc3, kPacketSize1);
+ receive_statistics_->OnRtpPacket(packet1);
+ receive_statistics_->OnRtpPacket(packet2);
+ receive_statistics_->OnRtpPacket(packet3);
+
+ EXPECT_THAT(receive_statistics_->RtcpReportBlocks(2), SizeIs(2));
+ EXPECT_THAT(receive_statistics_->RtcpReportBlocks(2), SizeIs(2));
+ EXPECT_THAT(receive_statistics_->RtcpReportBlocks(2), SizeIs(2));
+}
+
+TEST_P(ReceiveStatisticsTest,
+ RtcpReportBlocksReturnsAllObservedSsrcsWithMultipleCalls) {
+ RtpPacketReceived packet1 = CreateRtpPacket(kSsrc1, kPacketSize1);
+ RtpPacketReceived packet2 = CreateRtpPacket(kSsrc2, kPacketSize1);
+ RtpPacketReceived packet3 = CreateRtpPacket(kSsrc3, kPacketSize1);
+ RtpPacketReceived packet4 = CreateRtpPacket(kSsrc4, kPacketSize1);
+ receive_statistics_->OnRtpPacket(packet1);
+ receive_statistics_->OnRtpPacket(packet2);
+ receive_statistics_->OnRtpPacket(packet3);
+ receive_statistics_->OnRtpPacket(packet4);
+
+ std::vector<uint32_t> observed_ssrcs;
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(2);
+ ASSERT_THAT(report_blocks, SizeIs(2));
+ observed_ssrcs.push_back(report_blocks[0].source_ssrc());
+ observed_ssrcs.push_back(report_blocks[1].source_ssrc());
+
+ report_blocks = receive_statistics_->RtcpReportBlocks(2);
+ ASSERT_THAT(report_blocks, SizeIs(2));
+ observed_ssrcs.push_back(report_blocks[0].source_ssrc());
+ observed_ssrcs.push_back(report_blocks[1].source_ssrc());
+
+ EXPECT_THAT(observed_ssrcs,
+ UnorderedElementsAre(kSsrc1, kSsrc2, kSsrc3, kSsrc4));
+}
+
+TEST_P(ReceiveStatisticsTest, ActiveStatisticians) {
+ receive_statistics_->OnRtpPacket(packet1_);
+ IncrementSequenceNumber(&packet1_);
+ clock_.AdvanceTimeMilliseconds(1000);
+ receive_statistics_->OnRtpPacket(packet2_);
+ IncrementSequenceNumber(&packet2_);
+ // Nothing should time out since only 1000 ms has passed since the first
+ // packet came in.
+ EXPECT_EQ(2u, receive_statistics_->RtcpReportBlocks(3).size());
+
+ clock_.AdvanceTimeMilliseconds(7000);
+ // kSsrc1 should have timed out.
+ EXPECT_EQ(1u, receive_statistics_->RtcpReportBlocks(3).size());
+
+ clock_.AdvanceTimeMilliseconds(1000);
+ // kSsrc2 should have timed out.
+ EXPECT_EQ(0u, receive_statistics_->RtcpReportBlocks(3).size());
+
+ receive_statistics_->OnRtpPacket(packet1_);
+ IncrementSequenceNumber(&packet1_);
+ // kSsrc1 should be active again and the data counters should have survived.
+ EXPECT_EQ(1u, receive_statistics_->RtcpReportBlocks(3).size());
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ ASSERT_TRUE(statistician != NULL);
+ StreamDataCounters counters = statistician->GetReceiveStreamDataCounters();
+ EXPECT_EQ(176u, counters.transmitted.payload_bytes);
+ EXPECT_EQ(24u, counters.transmitted.header_bytes);
+ EXPECT_EQ(0u, counters.transmitted.padding_bytes);
+ EXPECT_EQ(2u, counters.transmitted.packets);
+}
+
+TEST_P(ReceiveStatisticsTest,
+ DoesntCreateRtcpReportBlockUntilFirstReceivedPacketForSsrc) {
+ // Creates a statistician object for the ssrc.
+ receive_statistics_->EnableRetransmitDetection(kSsrc1, true);
+ EXPECT_TRUE(receive_statistics_->GetStatistician(kSsrc1) != nullptr);
+ EXPECT_EQ(0u, receive_statistics_->RtcpReportBlocks(3).size());
+ // Receive first packet
+ receive_statistics_->OnRtpPacket(packet1_);
+ EXPECT_EQ(1u, receive_statistics_->RtcpReportBlocks(3).size());
+}
+
+TEST_P(ReceiveStatisticsTest, GetReceiveStreamDataCounters) {
+ receive_statistics_->OnRtpPacket(packet1_);
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ ASSERT_TRUE(statistician != NULL);
+
+ StreamDataCounters counters = statistician->GetReceiveStreamDataCounters();
+ EXPECT_GT(counters.first_packet_time_ms, -1);
+ EXPECT_EQ(1u, counters.transmitted.packets);
+
+ receive_statistics_->OnRtpPacket(packet1_);
+ counters = statistician->GetReceiveStreamDataCounters();
+ EXPECT_GT(counters.first_packet_time_ms, -1);
+ EXPECT_EQ(2u, counters.transmitted.packets);
+}
+
+TEST_P(ReceiveStatisticsTest, SimpleLossComputation) {
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(3);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(4);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(5);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ // 20% = 51/255.
+ EXPECT_EQ(51u, report_blocks[0].fraction_lost());
+ EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed());
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ EXPECT_EQ(20, statistician->GetFractionLostInPercent());
+}
+
+TEST_P(ReceiveStatisticsTest, LossComputationWithReordering) {
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(3);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(2);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(5);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ // 20% = 51/255.
+ EXPECT_EQ(51u, report_blocks[0].fraction_lost());
+ EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed());
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ EXPECT_EQ(20, statistician->GetFractionLostInPercent());
+}
+
+TEST_P(ReceiveStatisticsTest, LossComputationWithDuplicates) {
+ // Lose 2 packets, but also receive 1 duplicate. Should actually count as
+ // only 1 packet being lost.
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(4);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(4);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(5);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ // 20% = 51/255.
+ EXPECT_EQ(51u, report_blocks[0].fraction_lost());
+ EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed());
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ EXPECT_EQ(20, statistician->GetFractionLostInPercent());
+}
+
+TEST_P(ReceiveStatisticsTest, LossComputationWithSequenceNumberWrapping) {
+ // First, test loss computation over a period that included a sequence number
+ // rollover.
+ packet1_.SetSequenceNumber(0xfffd);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(0);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(0xfffe);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ // Only one packet was actually lost, 0xffff.
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ // 20% = 51/255.
+ EXPECT_EQ(51u, report_blocks[0].fraction_lost());
+ EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed());
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ EXPECT_EQ(20, statistician->GetFractionLostInPercent());
+
+ // Now test losing one packet *after* the rollover.
+ packet1_.SetSequenceNumber(3);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ report_blocks = receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ // 50% = 127/255.
+ EXPECT_EQ(127u, report_blocks[0].fraction_lost());
+ EXPECT_EQ(2, report_blocks[0].cumulative_lost_signed());
+ // 2 packets lost, 7 expected
+ EXPECT_EQ(28, statistician->GetFractionLostInPercent());
+}
+
+TEST_P(ReceiveStatisticsTest, StreamRestartDoesntCountAsLoss) {
+ receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200);
+
+ packet1_.SetSequenceNumber(0);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ packet1_.SetSequenceNumber(400);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(0, report_blocks[0].fraction_lost());
+ EXPECT_EQ(0, report_blocks[0].cumulative_lost_signed());
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ EXPECT_EQ(0, statistician->GetFractionLostInPercent());
+
+ packet1_.SetSequenceNumber(401);
+ receive_statistics_->OnRtpPacket(packet1_);
+ report_blocks = receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(0, report_blocks[0].fraction_lost());
+ EXPECT_EQ(0, report_blocks[0].cumulative_lost_signed());
+ EXPECT_EQ(0, statistician->GetFractionLostInPercent());
+}
+
+TEST_P(ReceiveStatisticsTest, CountsLossAfterStreamRestart) {
+ receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200);
+
+ packet1_.SetSequenceNumber(0);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ packet1_.SetSequenceNumber(400);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(401);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(403);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed());
+
+ StreamStatistician* statistician =
+ receive_statistics_->GetStatistician(kSsrc1);
+ // Is this reasonable? */
+ EXPECT_EQ(0, statistician->GetFractionLostInPercent());
+}
+
+TEST_P(ReceiveStatisticsTest, StreamCanRestartAtSequenceNumberWrapAround) {
+ receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200);
+
+ packet1_.SetSequenceNumber(0xffff - 401);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(0xffff - 400);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ packet1_.SetSequenceNumber(0xffff);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(0);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(2);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed());
+}
+
+TEST_P(ReceiveStatisticsTest, StreamRestartNeedsTwoConsecutivePackets) {
+ receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200);
+
+ packet1_.SetSequenceNumber(400);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(401);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+ packet1_.SetSequenceNumber(3);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(401u, report_blocks[0].extended_high_seq_num());
+
+ packet1_.SetSequenceNumber(4);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ report_blocks = receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(4u, report_blocks[0].extended_high_seq_num());
+}
+
+TEST_P(ReceiveStatisticsTest, WrapsAroundExtendedHighestSequenceNumber) {
+ packet1_.SetSequenceNumber(0xffff);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(0xffffu, report_blocks[0].extended_high_seq_num());
+
+ // Wrap around.
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+
+ report_blocks = receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(0x10001u, report_blocks[0].extended_high_seq_num());
+
+ // Should be treated as out of order; shouldn't increment highest extended
+ // sequence number.
+ packet1_.SetSequenceNumber(0x10000 - 6);
+ report_blocks = receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(0x10001u, report_blocks[0].extended_high_seq_num());
+
+ // Receive a couple packets then wrap around again.
+ receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200);
+ for (int i = 10; i < 0xffff; i += 150) {
+ packet1_.SetSequenceNumber(i);
+ receive_statistics_->OnRtpPacket(packet1_);
+ }
+ packet1_.SetSequenceNumber(1);
+ receive_statistics_->OnRtpPacket(packet1_);
+ report_blocks = receive_statistics_->RtcpReportBlocks(1);
+ ASSERT_THAT(report_blocks, SizeIs(1));
+ EXPECT_EQ(kSsrc1, report_blocks[0].source_ssrc());
+
+ EXPECT_EQ(0x20001u, report_blocks[0].extended_high_seq_num());
+}
+
+TEST_P(ReceiveStatisticsTest, StreamDataCounters) {
+ receive_statistics_->EnableRetransmitDetection(kSsrc1, true);
+
+ const size_t kHeaderLength = 20;
+ const size_t kPaddingLength = 9;
+
+ // One packet with payload size kPacketSize1.
+ RtpPacketReceived packet1 =
+ CreateRtpPacket(kSsrc1, kHeaderLength, kPacketSize1, 0);
+ receive_statistics_->OnRtpPacket(packet1);
+ StreamDataCounters counters = receive_statistics_->GetStatistician(kSsrc1)
+ ->GetReceiveStreamDataCounters();
+ EXPECT_EQ(counters.transmitted.payload_bytes, kPacketSize1);
+ EXPECT_EQ(counters.transmitted.header_bytes, kHeaderLength);
+ EXPECT_EQ(counters.transmitted.padding_bytes, 0u);
+ EXPECT_EQ(counters.transmitted.packets, 1u);
+ EXPECT_EQ(counters.retransmitted.payload_bytes, 0u);
+ EXPECT_EQ(counters.retransmitted.header_bytes, 0u);
+ EXPECT_EQ(counters.retransmitted.padding_bytes, 0u);
+ EXPECT_EQ(counters.retransmitted.packets, 0u);
+ EXPECT_EQ(counters.fec.packets, 0u);
+
+ // Another packet of size kPacketSize1 with 9 bytes padding.
+ RtpPacketReceived packet2 =
+ CreateRtpPacket(kSsrc1, kHeaderLength, kPacketSize1, 9);
+ packet2.SetSequenceNumber(packet1.SequenceNumber() + 1);
+ clock_.AdvanceTimeMilliseconds(5);
+ receive_statistics_->OnRtpPacket(packet2);
+ counters = receive_statistics_->GetStatistician(kSsrc1)
+ ->GetReceiveStreamDataCounters();
+ EXPECT_EQ(counters.transmitted.payload_bytes, kPacketSize1 * 2);
+ EXPECT_EQ(counters.transmitted.header_bytes, kHeaderLength * 2);
+ EXPECT_EQ(counters.transmitted.padding_bytes, kPaddingLength);
+ EXPECT_EQ(counters.transmitted.packets, 2u);
+
+ clock_.AdvanceTimeMilliseconds(5);
+ // Retransmit last packet.
+ receive_statistics_->OnRtpPacket(packet2);
+ counters = receive_statistics_->GetStatistician(kSsrc1)
+ ->GetReceiveStreamDataCounters();
+ EXPECT_EQ(counters.transmitted.payload_bytes, kPacketSize1 * 3);
+ EXPECT_EQ(counters.transmitted.header_bytes, kHeaderLength * 3);
+ EXPECT_EQ(counters.transmitted.padding_bytes, kPaddingLength * 2);
+ EXPECT_EQ(counters.transmitted.packets, 3u);
+ EXPECT_EQ(counters.retransmitted.payload_bytes, kPacketSize1);
+ EXPECT_EQ(counters.retransmitted.header_bytes, kHeaderLength);
+ EXPECT_EQ(counters.retransmitted.padding_bytes, kPaddingLength);
+ EXPECT_EQ(counters.retransmitted.packets, 1u);
+}
+
+TEST_P(ReceiveStatisticsTest, LastPacketReceivedTimestamp) {
+ clock_.AdvanceTimeMilliseconds(42);
+ receive_statistics_->OnRtpPacket(packet1_);
+ StreamDataCounters counters = receive_statistics_->GetStatistician(kSsrc1)
+ ->GetReceiveStreamDataCounters();
+
+ EXPECT_EQ(42, counters.last_packet_received_timestamp_ms);
+
+ clock_.AdvanceTimeMilliseconds(3);
+ receive_statistics_->OnRtpPacket(packet1_);
+ counters = receive_statistics_->GetStatistician(kSsrc1)
+ ->GetReceiveStreamDataCounters();
+ EXPECT_EQ(45, counters.last_packet_received_timestamp_ms);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc
new file mode 100644
index 0000000000..6f90cd175c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+
+#include <cstdint>
+
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kMinimumNumberOfSamples = 2;
+constexpr TimeDelta kTimingLogInterval = TimeDelta::Seconds(10);
+constexpr int kClocksOffsetSmoothingWindow = 100;
+
+// Subtracts two NtpTime values keeping maximum precision.
+int64_t Subtract(NtpTime minuend, NtpTime subtrahend) {
+ uint64_t a = static_cast<uint64_t>(minuend);
+ uint64_t b = static_cast<uint64_t>(subtrahend);
+ return a >= b ? static_cast<int64_t>(a - b) : -static_cast<int64_t>(b - a);
+}
+
+NtpTime Add(NtpTime lhs, int64_t rhs) {
+ uint64_t result = static_cast<uint64_t>(lhs);
+ if (rhs >= 0) {
+ result += static_cast<uint64_t>(rhs);
+ } else {
+ result -= static_cast<uint64_t>(-rhs);
+ }
+ return NtpTime(result);
+}
+
+} // namespace
+
+// TODO(wu): Refactor this class so that it can be shared with
+// vie_sync_module.cc.
+RemoteNtpTimeEstimator::RemoteNtpTimeEstimator(Clock* clock)
+ : clock_(clock),
+ ntp_clocks_offset_estimator_(kClocksOffsetSmoothingWindow) {}
+
+bool RemoteNtpTimeEstimator::UpdateRtcpTimestamp(TimeDelta rtt,
+ NtpTime sender_send_time,
+ uint32_t rtp_timestamp) {
+ switch (rtp_to_ntp_.UpdateMeasurements(sender_send_time, rtp_timestamp)) {
+ case RtpToNtpEstimator::kInvalidMeasurement:
+ return false;
+ case RtpToNtpEstimator::kSameMeasurement:
+ // No new RTCP SR since last time this function was called.
+ return true;
+ case RtpToNtpEstimator::kNewMeasurement:
+ break;
+ }
+
+ // Assume connection is symmetric and thus time to deliver the packet is half
+ // the round trip time.
+ int64_t deliver_time_ntp = ToNtpUnits(rtt) / 2;
+
+ // Update extrapolator with the new arrival time.
+ NtpTime receiver_arrival_time = clock_->CurrentNtpTime();
+ int64_t remote_to_local_clocks_offset =
+ Subtract(receiver_arrival_time, sender_send_time) - deliver_time_ntp;
+ ntp_clocks_offset_estimator_.Insert(remote_to_local_clocks_offset);
+ return true;
+}
+
+NtpTime RemoteNtpTimeEstimator::EstimateNtp(uint32_t rtp_timestamp) {
+ NtpTime sender_capture = rtp_to_ntp_.Estimate(rtp_timestamp);
+ if (!sender_capture.Valid()) {
+ return sender_capture;
+ }
+
+ int64_t remote_to_local_clocks_offset =
+ ntp_clocks_offset_estimator_.GetFilteredValue();
+ NtpTime receiver_capture = Add(sender_capture, remote_to_local_clocks_offset);
+
+ Timestamp now = clock_->CurrentTime();
+ if (now - last_timing_log_ > kTimingLogInterval) {
+ RTC_LOG(LS_INFO) << "RTP timestamp: " << rtp_timestamp
+ << " in NTP clock: " << sender_capture.ToMs()
+ << " estimated time in receiver NTP clock: "
+ << receiver_capture.ToMs();
+ last_timing_log_ = now;
+ }
+
+ return receiver_capture;
+}
+
+absl::optional<int64_t>
+RemoteNtpTimeEstimator::EstimateRemoteToLocalClockOffset() {
+ if (ntp_clocks_offset_estimator_.GetNumberOfSamplesStored() <
+ kMinimumNumberOfSamples) {
+ return absl::nullopt;
+ }
+ return ntp_clocks_offset_estimator_.GetFilteredValue();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
new file mode 100644
index 0000000000..8dbfaec940
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/ntp_time.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr TimeDelta kTestRtt = TimeDelta::Millis(10);
+constexpr Timestamp kLocalClockInitialTime = Timestamp::Millis(123);
+constexpr Timestamp kRemoteClockInitialTime = Timestamp::Millis(373);
+constexpr uint32_t kTimestampOffset = 567;
+constexpr int64_t kRemoteToLocalClockOffsetNtp =
+ ToNtpUnits(kLocalClockInitialTime - kRemoteClockInitialTime);
+
+class RemoteNtpTimeEstimatorTest : public ::testing::Test {
+ protected:
+ void AdvanceTime(TimeDelta delta) {
+ local_clock_.AdvanceTime(delta);
+ remote_clock_.AdvanceTime(delta);
+ }
+
+ uint32_t GetRemoteTimestamp() {
+ return static_cast<uint32_t>(remote_clock_.TimeInMilliseconds()) * 90 +
+ kTimestampOffset;
+ }
+
+ void SendRtcpSr() {
+ uint32_t rtcp_timestamp = GetRemoteTimestamp();
+ NtpTime ntp = remote_clock_.CurrentNtpTime();
+
+ AdvanceTime(kTestRtt / 2);
+ RTC_DCHECK(estimator_.UpdateRtcpTimestamp(kTestRtt, ntp, rtcp_timestamp));
+ }
+
+ void SendRtcpSrInaccurately(TimeDelta ntp_error, TimeDelta networking_delay) {
+ uint32_t rtcp_timestamp = GetRemoteTimestamp();
+ int64_t ntp_error_fractions = ToNtpUnits(ntp_error);
+ NtpTime ntp(static_cast<uint64_t>(remote_clock_.CurrentNtpTime()) +
+ ntp_error_fractions);
+ AdvanceTime(kTestRtt / 2 + networking_delay);
+ RTC_DCHECK(estimator_.UpdateRtcpTimestamp(kTestRtt, ntp, rtcp_timestamp));
+ }
+
+ SimulatedClock local_clock_{kLocalClockInitialTime};
+ SimulatedClock remote_clock_{kRemoteClockInitialTime};
+ RemoteNtpTimeEstimator estimator_{&local_clock_};
+};
+
+TEST_F(RemoteNtpTimeEstimatorTest, FailsWithoutValidNtpTime) {
+ EXPECT_FALSE(
+ estimator_.UpdateRtcpTimestamp(kTestRtt, NtpTime(), /*rtp_timestamp=*/0));
+}
+
+TEST_F(RemoteNtpTimeEstimatorTest, Estimate) {
+ // Remote peer sends first RTCP SR.
+ SendRtcpSr();
+
+ // Remote sends a RTP packet.
+ AdvanceTime(TimeDelta::Millis(15));
+ uint32_t rtp_timestamp = GetRemoteTimestamp();
+ int64_t capture_ntp_time_ms = local_clock_.CurrentNtpInMilliseconds();
+
+ // Local peer needs at least 2 RTCP SR to calculate the capture time.
+ const int64_t kNotEnoughRtcpSr = -1;
+ EXPECT_EQ(kNotEnoughRtcpSr, estimator_.Estimate(rtp_timestamp));
+ EXPECT_EQ(estimator_.EstimateRemoteToLocalClockOffset(), absl::nullopt);
+
+ AdvanceTime(TimeDelta::Millis(800));
+ // Remote sends second RTCP SR.
+ SendRtcpSr();
+
+ // Local peer gets enough RTCP SR to calculate the capture time.
+ EXPECT_EQ(capture_ntp_time_ms, estimator_.Estimate(rtp_timestamp));
+ EXPECT_EQ(estimator_.EstimateRemoteToLocalClockOffset(),
+ kRemoteToLocalClockOffsetNtp);
+}
+
+TEST_F(RemoteNtpTimeEstimatorTest, AveragesErrorsOut) {
+ // Remote peer sends first 10 RTCP SR without errors.
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(TimeDelta::Seconds(1));
+ SendRtcpSr();
+ }
+
+ AdvanceTime(TimeDelta::Millis(150));
+ uint32_t rtp_timestamp = GetRemoteTimestamp();
+ int64_t capture_ntp_time_ms = local_clock_.CurrentNtpInMilliseconds();
+ // Local peer gets enough RTCP SR to calculate the capture time.
+ EXPECT_EQ(capture_ntp_time_ms, estimator_.Estimate(rtp_timestamp));
+ EXPECT_EQ(kRemoteToLocalClockOffsetNtp,
+ estimator_.EstimateRemoteToLocalClockOffset());
+
+ // Remote sends corrupted RTCP SRs
+ AdvanceTime(TimeDelta::Seconds(1));
+ SendRtcpSrInaccurately(/*ntp_error=*/TimeDelta::Millis(2),
+ /*networking_delay=*/TimeDelta::Millis(-1));
+ AdvanceTime(TimeDelta::Seconds(1));
+ SendRtcpSrInaccurately(/*ntp_error=*/TimeDelta::Millis(-2),
+ /*networking_delay=*/TimeDelta::Millis(1));
+
+ // New RTP packet to estimate timestamp.
+ AdvanceTime(TimeDelta::Millis(150));
+ rtp_timestamp = GetRemoteTimestamp();
+ capture_ntp_time_ms = local_clock_.CurrentNtpInMilliseconds();
+
+ // Errors should be averaged out.
+ EXPECT_EQ(capture_ntp_time_ms, estimator_.Estimate(rtp_timestamp));
+ EXPECT_EQ(kRemoteToLocalClockOffsetNtp,
+ estimator_.EstimateRemoteToLocalClockOffset());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.cc
new file mode 100644
index 0000000000..1d652d0b5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_nack_stats.h"
+
+#include "modules/include/module_common_types_public.h"
+
+namespace webrtc {
+
+RtcpNackStats::RtcpNackStats()
+ : max_sequence_number_(0), requests_(0), unique_requests_(0) {}
+
+void RtcpNackStats::ReportRequest(uint16_t sequence_number) {
+ if (requests_ == 0 ||
+ IsNewerSequenceNumber(sequence_number, max_sequence_number_)) {
+ max_sequence_number_ = sequence_number;
+ ++unique_requests_;
+ }
+ ++requests_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.h
new file mode 100644
index 0000000000..9da4351a59
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_NACK_STATS_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_NACK_STATS_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+class RtcpNackStats {
+ public:
+ RtcpNackStats();
+
+ // Updates stats with requested sequence number.
+ // This function should be called for each NACK request to calculate the
+ // number of unique NACKed RTP packets.
+ void ReportRequest(uint16_t sequence_number);
+
+ // Gets the number of NACKed RTP packets.
+ uint32_t requests() const { return requests_; }
+
+ // Gets the number of unique NACKed RTP packets.
+ uint32_t unique_requests() const { return unique_requests_; }
+
+ private:
+ uint16_t max_sequence_number_;
+ uint32_t requests_;
+ uint32_t unique_requests_;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_NACK_STATS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats_unittest.cc
new file mode 100644
index 0000000000..60858e197e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_nack_stats_unittest.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_nack_stats.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RtcpNackStatsTest, Requests) {
+ RtcpNackStats stats;
+ EXPECT_EQ(0U, stats.unique_requests());
+ EXPECT_EQ(0U, stats.requests());
+ stats.ReportRequest(10);
+ EXPECT_EQ(1U, stats.unique_requests());
+ EXPECT_EQ(1U, stats.requests());
+
+ stats.ReportRequest(10);
+ EXPECT_EQ(1U, stats.unique_requests());
+ stats.ReportRequest(11);
+ EXPECT_EQ(2U, stats.unique_requests());
+
+ stats.ReportRequest(11);
+ EXPECT_EQ(2U, stats.unique_requests());
+ stats.ReportRequest(13);
+ EXPECT_EQ(3U, stats.unique_requests());
+
+ stats.ReportRequest(11);
+ EXPECT_EQ(3U, stats.unique_requests());
+ EXPECT_EQ(6U, stats.requests());
+}
+
+TEST(RtcpNackStatsTest, RequestsWithWrap) {
+ RtcpNackStats stats;
+ stats.ReportRequest(65534);
+ EXPECT_EQ(1U, stats.unique_requests());
+
+ stats.ReportRequest(65534);
+ EXPECT_EQ(1U, stats.unique_requests());
+ stats.ReportRequest(65535);
+ EXPECT_EQ(2U, stats.unique_requests());
+
+ stats.ReportRequest(65535);
+ EXPECT_EQ(2U, stats.unique_requests());
+ stats.ReportRequest(0);
+ EXPECT_EQ(3U, stats.unique_requests());
+
+ stats.ReportRequest(65535);
+ EXPECT_EQ(3U, stats.unique_requests());
+ stats.ReportRequest(0);
+ EXPECT_EQ(3U, stats.unique_requests());
+ stats.ReportRequest(1);
+ EXPECT_EQ(4U, stats.unique_requests());
+ EXPECT_EQ(8U, stats.requests());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.cc
new file mode 100644
index 0000000000..bac03e73d2
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr size_t RtcpPacket::kHeaderLength;
+
+rtc::Buffer RtcpPacket::Build() const {
+ rtc::Buffer packet(BlockLength());
+
+ size_t length = 0;
+ bool created = Create(packet.data(), &length, packet.capacity(), nullptr);
+ RTC_DCHECK(created) << "Invalid packet is not supported.";
+ RTC_DCHECK_EQ(length, packet.size())
+ << "BlockLength mispredicted size used by Create";
+
+ return packet;
+}
+
+bool RtcpPacket::Build(size_t max_length, PacketReadyCallback callback) const {
+ RTC_CHECK_LE(max_length, IP_PACKET_SIZE);
+ uint8_t buffer[IP_PACKET_SIZE];
+ size_t index = 0;
+ if (!Create(buffer, &index, max_length, callback))
+ return false;
+ return OnBufferFull(buffer, &index, callback);
+}
+
+bool RtcpPacket::OnBufferFull(uint8_t* packet,
+ size_t* index,
+ PacketReadyCallback callback) const {
+ if (*index == 0)
+ return false;
+ RTC_DCHECK(callback) << "Fragmentation not supported.";
+ callback(rtc::ArrayView<const uint8_t>(packet, *index));
+ *index = 0;
+ return true;
+}
+
+size_t RtcpPacket::HeaderLength() const {
+ size_t length_in_bytes = BlockLength();
+ RTC_DCHECK_GT(length_in_bytes, 0);
+ RTC_DCHECK_EQ(length_in_bytes % 4, 0)
+ << "Padding must be handled by each subclass.";
+ // Length in 32-bit words without common header.
+ return (length_in_bytes - kHeaderLength) / 4;
+}
+
+// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
+//
+// RTP header format.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| RC/FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+void RtcpPacket::CreateHeader(
+ size_t count_or_format, // Depends on packet type.
+ uint8_t packet_type,
+ size_t length,
+ uint8_t* buffer,
+ size_t* pos) {
+ CreateHeader(count_or_format, packet_type, length, /*padding=*/false, buffer,
+ pos);
+}
+
+void RtcpPacket::CreateHeader(
+ size_t count_or_format, // Depends on packet type.
+ uint8_t packet_type,
+ size_t length,
+ bool padding,
+ uint8_t* buffer,
+ size_t* pos) {
+ RTC_DCHECK_LE(length, 0xffffU);
+ RTC_DCHECK_LE(count_or_format, 0x1f);
+ constexpr uint8_t kVersionBits = 2 << 6;
+ uint8_t padding_bit = padding ? 1 << 5 : 0;
+ buffer[*pos + 0] =
+ kVersionBits | padding_bit | static_cast<uint8_t>(count_or_format);
+ buffer[*pos + 1] = packet_type;
+ buffer[*pos + 2] = (length >> 8) & 0xff;
+ buffer[*pos + 3] = length & 0xff;
+ *pos += kHeaderLength;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.h
new file mode 100644
index 0000000000..07deb0f9bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/array_view.h"
+#include "api/function_view.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+namespace rtcp {
+// Class for building RTCP packets.
+//
+// Example:
+// ReportBlock report_block;
+// report_block.SetMediaSsrc(234);
+// report_block.SetFractionLost(10);
+//
+// ReceiverReport rr;
+// rr.SetSenderSsrc(123);
+// rr.AddReportBlock(report_block);
+//
+// Fir fir;
+// fir.SetSenderSsrc(123);
+// fir.AddRequestTo(234, 56);
+//
+// size_t length = 0; // Builds an intra frame request
+// uint8_t packet[kPacketSize]; // with sequence number 56.
+// fir.Build(packet, &length, kPacketSize);
+//
+// rtc::Buffer packet = fir.Build(); // Returns a RawPacket holding
+// // the built rtcp packet.
+//
+// CompoundPacket compound; // Builds a compound RTCP packet with
+// compound.Append(&rr); // a receiver report, report block
+// compound.Append(&fir); // and fir message.
+// rtc::Buffer packet = compound.Build();
+
+class RtcpPacket {
+ public:
+ // Callback used to signal that an RTCP packet is ready. Note that this may
+ // not contain all data in this RtcpPacket; if a packet cannot fit in
+ // max_length bytes, it will be fragmented and multiple calls to this
+ // callback will be made.
+ using PacketReadyCallback =
+ rtc::FunctionView<void(rtc::ArrayView<const uint8_t> packet)>;
+
+ virtual ~RtcpPacket() = default;
+
+ void SetSenderSsrc(uint32_t ssrc) { sender_ssrc_ = ssrc; }
+ uint32_t sender_ssrc() const { return sender_ssrc_; }
+
+ // Convenience method mostly used for test. Creates packet without
+ // fragmentation using BlockLength() to allocate big enough buffer.
+ rtc::Buffer Build() const;
+
+ // Returns true if call to Create succeeded.
+ bool Build(size_t max_length, PacketReadyCallback callback) const;
+
+ // Size of this packet in bytes (including headers).
+ virtual size_t BlockLength() const = 0;
+
+ // Creates packet in the given buffer at the given position.
+ // Calls PacketReadyCallback::OnPacketReady if remaining buffer is too small
+ // and assume buffer can be reused after OnPacketReady returns.
+ virtual bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const = 0;
+
+ protected:
+ // Size of the rtcp common header.
+ static constexpr size_t kHeaderLength = 4;
+ RtcpPacket() {}
+
+ static void CreateHeader(size_t count_or_format,
+ uint8_t packet_type,
+ size_t block_length, // Payload size in 32bit words.
+ uint8_t* buffer,
+ size_t* pos);
+
+ static void CreateHeader(size_t count_or_format,
+ uint8_t packet_type,
+ size_t block_length, // Payload size in 32bit words.
+ bool padding, // True if there are padding bytes.
+ uint8_t* buffer,
+ size_t* pos);
+
+ bool OnBufferFull(uint8_t* packet,
+ size_t* index,
+ PacketReadyCallback callback) const;
+ // Size of the rtcp packet as written in header.
+ size_t HeaderLength() const;
+
+ private:
+ uint32_t sender_ssrc_ = 0;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc
new file mode 100644
index 0000000000..c839b72a87
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/app.h"
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t App::kPacketType;
+constexpr size_t App::kMaxDataSize;
+// Application-Defined packet (APP) (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| subtype | PT=APP=204 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC/CSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | name (ASCII) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | application-dependent data ...
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+App::App() : sub_type_(0), name_(0) {}
+
+App::~App() = default;
+
+bool App::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ if (packet.payload_size_bytes() < kAppBaseLength) {
+ RTC_LOG(LS_WARNING) << "Packet is too small to be a valid APP packet";
+ return false;
+ }
+ if (packet.payload_size_bytes() % 4 != 0) {
+ RTC_LOG(LS_WARNING)
+ << "Packet payload must be 32 bits aligned to make a valid APP packet";
+ return false;
+ }
+ sub_type_ = packet.fmt();
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(&packet.payload()[0]));
+ name_ = ByteReader<uint32_t>::ReadBigEndian(&packet.payload()[4]);
+ data_.SetData(packet.payload() + kAppBaseLength,
+ packet.payload_size_bytes() - kAppBaseLength);
+ return true;
+}
+
+void App::SetSubType(uint8_t subtype) {
+ RTC_DCHECK_LE(subtype, 0x1f);
+ sub_type_ = subtype;
+}
+
+void App::SetData(const uint8_t* data, size_t data_length) {
+ RTC_DCHECK(data);
+ RTC_DCHECK_EQ(data_length % 4, 0) << "Data must be 32 bits aligned.";
+ RTC_DCHECK_LE(data_length, kMaxDataSize)
+ << "App data size " << data_length << " exceed maximum of "
+ << kMaxDataSize << " bytes.";
+ data_.SetData(data, data_length);
+}
+
+size_t App::BlockLength() const {
+ return kHeaderLength + kAppBaseLength + data_.size();
+}
+
+bool App::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+ CreateHeader(sub_type_, kPacketType, HeaderLength(), packet, index);
+
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 0], sender_ssrc());
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 4], name_);
+ memcpy(&packet[*index + 8], data_.data(), data_.size());
+ *index += (8 + data_.size());
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.h
new file mode 100644
index 0000000000..4518792e5a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_APP_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_APP_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+class App : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 204;
+ App();
+ App(App&&) = default;
+ ~App() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void SetSubType(uint8_t subtype);
+ void SetName(uint32_t name) { name_ = name; }
+ void SetData(const uint8_t* data, size_t data_length);
+
+ uint8_t sub_type() const { return sub_type_; }
+ uint32_t name() const { return name_; }
+ size_t data_size() const { return data_.size(); }
+ const uint8_t* data() const { return data_.data(); }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ static inline constexpr uint32_t NameToInt(const char name[5]) {
+ return static_cast<uint32_t>(name[0]) << 24 |
+ static_cast<uint32_t>(name[1]) << 16 |
+ static_cast<uint32_t>(name[2]) << 8 | static_cast<uint32_t>(name[3]);
+ }
+
+ private:
+ static constexpr size_t kAppBaseLength = 8; // Ssrc and Name.
+ static constexpr size_t kMaxDataSize = 0xffff * 4 - kAppBaseLength;
+
+ uint8_t sub_type_;
+ uint32_t name_;
+ rtc::Buffer data_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_APP_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc
new file mode 100644
index 0000000000..8690e8e5a0
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/app.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using ::webrtc::rtcp::App;
+
+constexpr uint32_t kName = ((uint32_t)'n' << 24) | ((uint32_t)'a' << 16) |
+ ((uint32_t)'m' << 8) | (uint32_t)'e';
+constexpr uint8_t kSubtype = 0x1e;
+constexpr uint32_t kSenderSsrc = 0x12345678;
+constexpr uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't', 'a'};
+constexpr uint8_t kVersionBits = 2 << 6;
+constexpr uint8_t kPaddingBit = 1 << 5;
+// clang-format off
+constexpr uint8_t kPacketWithoutData[] = {
+ kVersionBits | kSubtype, App::kPacketType, 0x00, 0x02,
+ 0x12, 0x34, 0x56, 0x78,
+ 'n', 'a', 'm', 'e'};
+constexpr uint8_t kPacketWithData[] = {
+ kVersionBits | kSubtype, App::kPacketType, 0x00, 0x04,
+ 0x12, 0x34, 0x56, 0x78,
+ 'n', 'a', 'm', 'e',
+ 't', 'e', 's', 't',
+ 'd', 'a', 't', 'a'};
+constexpr uint8_t kTooSmallPacket[] = {
+ kVersionBits | kSubtype, App::kPacketType, 0x00, 0x01,
+ 0x12, 0x34, 0x56, 0x78};
+constexpr uint8_t kPaddingSize = 1;
+constexpr uint8_t kPacketWithUnalignedPayload[] = {
+ kVersionBits | kPaddingBit | kSubtype, App::kPacketType, 0x00, 0x03,
+ 0x12, 0x34, 0x56, 0x78,
+ 'n', 'a', 'm', 'e',
+ 'd', 'a', 't', kPaddingSize};
+// clang-format on
+} // namespace
+
+TEST(RtcpPacketAppTest, CreateWithoutData) {
+ App app;
+ app.SetSenderSsrc(kSenderSsrc);
+ app.SetSubType(kSubtype);
+ app.SetName(kName);
+
+ rtc::Buffer raw = app.Build();
+
+ EXPECT_THAT(make_tuple(raw.data(), raw.size()),
+ ElementsAreArray(kPacketWithoutData));
+}
+
+TEST(RtcpPacketAppTest, ParseWithoutData) {
+ App parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacketWithoutData, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kSubtype, parsed.sub_type());
+ EXPECT_EQ(kName, parsed.name());
+ EXPECT_EQ(0u, parsed.data_size());
+}
+
+TEST(RtcpPacketAppTest, CreateWithData) {
+ App app;
+ app.SetSenderSsrc(kSenderSsrc);
+ app.SetSubType(kSubtype);
+ app.SetName(kName);
+ app.SetData(kData, sizeof(kData));
+
+ rtc::Buffer raw = app.Build();
+
+ EXPECT_THAT(make_tuple(raw.data(), raw.size()),
+ ElementsAreArray(kPacketWithData));
+}
+
+TEST(RtcpPacketAppTest, ParseWithData) {
+ App parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacketWithData, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kSubtype, parsed.sub_type());
+ EXPECT_EQ(kName, parsed.name());
+ EXPECT_THAT(make_tuple(parsed.data(), parsed.data_size()),
+ ElementsAreArray(kData));
+}
+
+TEST(RtcpPacketAppTest, ParseFailsOnTooSmallPacket) {
+ App parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kTooSmallPacket, &parsed));
+}
+
+TEST(RtcpPacketAppTest, ParseFailsOnUnalignedPayload) {
+ App parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kPacketWithUnalignedPayload, &parsed));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc
new file mode 100644
index 0000000000..a6471772b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Bye::kPacketType;
+// Bye packet (BYE) (RFC 3550).
+//
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| SC | PT=BYE=203 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC/CSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : ... :
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// (opt) | length | reason for leaving ...
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+Bye::Bye() = default;
+
+Bye::~Bye() = default;
+
+bool Bye::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+
+ const uint8_t src_count = packet.count();
+ // Validate packet.
+ if (packet.payload_size_bytes() < 4u * src_count) {
+ RTC_LOG(LS_WARNING)
+ << "Packet is too small to contain CSRCs it promise to have.";
+ return false;
+ }
+ const uint8_t* const payload = packet.payload();
+ bool has_reason = packet.payload_size_bytes() > 4u * src_count;
+ uint8_t reason_length = 0;
+ if (has_reason) {
+ reason_length = payload[4u * src_count];
+ if (packet.payload_size_bytes() - 4u * src_count < 1u + reason_length) {
+ RTC_LOG(LS_WARNING) << "Invalid reason length: " << reason_length;
+ return false;
+ }
+ }
+ // Once sure packet is valid, copy values.
+ if (src_count == 0) { // A count value of zero is valid, but useless.
+ SetSenderSsrc(0);
+ csrcs_.clear();
+ } else {
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(payload));
+ csrcs_.resize(src_count - 1);
+ for (size_t i = 1; i < src_count; ++i)
+ csrcs_[i - 1] = ByteReader<uint32_t>::ReadBigEndian(&payload[4 * i]);
+ }
+
+ if (has_reason) {
+ reason_.assign(reinterpret_cast<const char*>(&payload[4u * src_count + 1]),
+ reason_length);
+ } else {
+ reason_.clear();
+ }
+
+ return true;
+}
+
+bool Bye::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+
+ CreateHeader(1 + csrcs_.size(), kPacketType, HeaderLength(), packet, index);
+ // Store srcs of the leaving clients.
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index], sender_ssrc());
+ *index += sizeof(uint32_t);
+ for (uint32_t csrc : csrcs_) {
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index], csrc);
+ *index += sizeof(uint32_t);
+ }
+ // Store the reason to leave.
+ if (!reason_.empty()) {
+ uint8_t reason_length = static_cast<uint8_t>(reason_.size());
+ packet[(*index)++] = reason_length;
+ memcpy(&packet[*index], reason_.data(), reason_length);
+ *index += reason_length;
+ // Add padding bytes if needed.
+ size_t bytes_to_pad = index_end - *index;
+ RTC_DCHECK_LE(bytes_to_pad, 3);
+ if (bytes_to_pad > 0) {
+ memset(&packet[*index], 0, bytes_to_pad);
+ *index += bytes_to_pad;
+ }
+ }
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+
+bool Bye::SetCsrcs(std::vector<uint32_t> csrcs) {
+ if (csrcs.size() > kMaxNumberOfCsrcs) {
+ RTC_LOG(LS_WARNING) << "Too many CSRCs for Bye packet.";
+ return false;
+ }
+ csrcs_ = std::move(csrcs);
+ return true;
+}
+
+void Bye::SetReason(absl::string_view reason) {
+ RTC_DCHECK_LE(reason.size(), 0xffu);
+ reason_ = std::string(reason);
+}
+
+size_t Bye::BlockLength() const {
+ size_t src_count = (1 + csrcs_.size());
+ size_t reason_size_in_32bits = reason_.empty() ? 0 : (reason_.size() / 4 + 1);
+ return kHeaderLength + 4 * (src_count + reason_size_in_32bits);
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h
new file mode 100644
index 0000000000..d31205793a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_BYE_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_BYE_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+class Bye : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 203;
+
+ Bye();
+ ~Bye() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ bool SetCsrcs(std::vector<uint32_t> csrcs);
+ void SetReason(absl::string_view reason);
+
+ const std::vector<uint32_t>& csrcs() const { return csrcs_; }
+ const std::string& reason() const { return reason_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static const int kMaxNumberOfCsrcs = 0x1f - 1; // First item is sender SSRC.
+
+ std::vector<uint32_t> csrcs_;
+ std::string reason_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_BYE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc
new file mode 100644
index 0000000000..448c2d4194
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAre;
+using webrtc::rtcp::Bye;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kCsrc1 = 0x22232425;
+const uint32_t kCsrc2 = 0x33343536;
+} // namespace
+
+TEST(RtcpPacketByeTest, CreateAndParseWithoutReason) {
+ Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+
+ rtc::Buffer raw = bye.Build();
+ Bye parsed_bye;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed_bye));
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_TRUE(parsed_bye.csrcs().empty());
+ EXPECT_TRUE(parsed_bye.reason().empty());
+}
+
+TEST(RtcpPacketByeTest, CreateAndParseWithCsrcs) {
+ Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(bye.SetCsrcs({kCsrc1, kCsrc2}));
+ EXPECT_TRUE(bye.reason().empty());
+
+ rtc::Buffer raw = bye.Build();
+ Bye parsed_bye;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed_bye));
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_THAT(parsed_bye.csrcs(), ElementsAre(kCsrc1, kCsrc2));
+ EXPECT_TRUE(parsed_bye.reason().empty());
+}
+
+TEST(RtcpPacketByeTest, CreateAndParseWithCsrcsAndAReason) {
+ Bye bye;
+ const std::string kReason = "Some Reason";
+
+ bye.SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(bye.SetCsrcs({kCsrc1, kCsrc2}));
+ bye.SetReason(kReason);
+
+ rtc::Buffer raw = bye.Build();
+ Bye parsed_bye;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed_bye));
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_THAT(parsed_bye.csrcs(), ElementsAre(kCsrc1, kCsrc2));
+ EXPECT_EQ(kReason, parsed_bye.reason());
+}
+
+TEST(RtcpPacketByeTest, CreateWithTooManyCsrcs) {
+ Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+ const int kMaxCsrcs = (1 << 5) - 2; // 5 bit len, first item is sender SSRC.
+ EXPECT_TRUE(bye.SetCsrcs(std::vector<uint32_t>(kMaxCsrcs, kCsrc1)));
+ EXPECT_FALSE(bye.SetCsrcs(std::vector<uint32_t>(kMaxCsrcs + 1, kCsrc1)));
+}
+
+TEST(RtcpPacketByeTest, CreateAndParseWithAReason) {
+ Bye bye;
+ const std::string kReason = "Some Random Reason";
+
+ bye.SetSenderSsrc(kSenderSsrc);
+ bye.SetReason(kReason);
+
+ rtc::Buffer raw = bye.Build();
+ Bye parsed_bye;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed_bye));
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_TRUE(parsed_bye.csrcs().empty());
+ EXPECT_EQ(kReason, parsed_bye.reason());
+}
+
+TEST(RtcpPacketByeTest, CreateAndParseWithReasons) {
+ // Test that packet creation/parsing behave with reasons of different length
+ // both when it require padding and when it does not.
+ for (size_t reminder = 0; reminder < 4; ++reminder) {
+ const std::string kReason(4 + reminder, 'a' + reminder);
+ Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+ bye.SetReason(kReason);
+
+ rtc::Buffer raw = bye.Build();
+ Bye parsed_bye;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed_bye));
+
+ EXPECT_EQ(kReason, parsed_bye.reason());
+ }
+}
+
+TEST(RtcpPacketByeTest, ParseEmptyPacket) {
+ uint8_t kEmptyPacket[] = {0x80, Bye::kPacketType, 0, 0};
+ Bye parsed_bye;
+ EXPECT_TRUE(test::ParseSinglePacket(kEmptyPacket, &parsed_bye));
+ EXPECT_EQ(0u, parsed_bye.sender_ssrc());
+ EXPECT_TRUE(parsed_bye.csrcs().empty());
+ EXPECT_TRUE(parsed_bye.reason().empty());
+}
+
+TEST(RtcpPacketByeTest, ParseFailOnInvalidSrcCount) {
+ Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+
+ rtc::Buffer raw = bye.Build();
+ raw[0]++; // Damage the packet: increase ssrc count by one.
+
+ Bye parsed_bye;
+ EXPECT_FALSE(test::ParseSinglePacket(raw, &parsed_bye));
+}
+
+TEST(RtcpPacketByeTest, ParseFailOnInvalidReasonLength) {
+ Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+ bye.SetReason("18 characters long");
+
+ rtc::Buffer raw = bye.Build();
+ // Damage the packet: decrease payload size by 4 bytes
+ raw[3]--;
+ raw.SetSize(raw.size() - 4);
+
+ Bye parsed_bye;
+ EXPECT_FALSE(test::ParseSinglePacket(raw, &parsed_bye));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.cc
new file mode 100644
index 0000000000..5b54982220
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr size_t CommonHeader::kHeaderSizeBytes;
+// 0 1 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 |V=2|P| C/F |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 1 | Packet Type |
+// ----------------+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 2 | length |
+// --------------------------------+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Common header for all RTCP packets, 4 octets.
+bool CommonHeader::Parse(const uint8_t* buffer, size_t size_bytes) {
+ const uint8_t kVersion = 2;
+
+ if (size_bytes < kHeaderSizeBytes) {
+ RTC_LOG(LS_WARNING)
+ << "Too little data (" << size_bytes << " byte"
+ << (size_bytes != 1 ? "s" : "")
+ << ") remaining in buffer to parse RTCP header (4 bytes).";
+ return false;
+ }
+
+ uint8_t version = buffer[0] >> 6;
+ if (version != kVersion) {
+ RTC_LOG(LS_WARNING) << "Invalid RTCP header: Version must be "
+ << static_cast<int>(kVersion) << " but was "
+ << static_cast<int>(version);
+ return false;
+ }
+
+ bool has_padding = (buffer[0] & 0x20) != 0;
+ count_or_format_ = buffer[0] & 0x1F;
+ packet_type_ = buffer[1];
+ payload_size_ = ByteReader<uint16_t>::ReadBigEndian(&buffer[2]) * 4;
+ payload_ = buffer + kHeaderSizeBytes;
+ padding_size_ = 0;
+
+ if (size_bytes < kHeaderSizeBytes + payload_size_) {
+ RTC_LOG(LS_WARNING) << "Buffer too small (" << size_bytes
+ << " bytes) to fit an RtcpPacket with a header and "
+ << payload_size_ << " bytes.";
+ return false;
+ }
+
+ if (has_padding) {
+ if (payload_size_ == 0) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid RTCP header: Padding bit set but 0 payload "
+ "size specified.";
+ return false;
+ }
+
+ padding_size_ = payload_[payload_size_ - 1];
+ if (padding_size_ == 0) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid RTCP header: Padding bit set but 0 padding "
+ "size specified.";
+ return false;
+ }
+ if (padding_size_ > payload_size_) {
+ RTC_LOG(LS_WARNING) << "Invalid RTCP header: Too many padding bytes ("
+ << padding_size_ << ") for a packet payload size of "
+ << payload_size_ << " bytes.";
+ return false;
+ }
+ payload_size_ -= padding_size_;
+ }
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.h
new file mode 100644
index 0000000000..5416406091
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMMON_HEADER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMMON_HEADER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader {
+ public:
+ static constexpr size_t kHeaderSizeBytes = 4;
+
+ CommonHeader() {}
+ CommonHeader(const CommonHeader&) = default;
+ CommonHeader& operator=(const CommonHeader&) = default;
+
+ bool Parse(const uint8_t* buffer, size_t size_bytes);
+
+ uint8_t type() const { return packet_type_; }
+ // Depending on packet type same header field can be used either as count or
+ // as feedback message type (fmt). Caller expected to know how it is used.
+ uint8_t fmt() const { return count_or_format_; }
+ uint8_t count() const { return count_or_format_; }
+ size_t payload_size_bytes() const { return payload_size_; }
+ const uint8_t* payload() const { return payload_; }
+ size_t packet_size() const {
+ return kHeaderSizeBytes + payload_size_ + padding_size_;
+ }
+ // Returns pointer to the next RTCP packet in compound packet.
+ const uint8_t* NextPacket() const {
+ return payload_ + payload_size_ + padding_size_;
+ }
+
+ private:
+ uint8_t packet_type_ = 0;
+ uint8_t count_or_format_ = 0;
+ uint8_t padding_size_ = 0;
+ uint32_t payload_size_ = 0;
+ const uint8_t* payload_ = nullptr;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMMON_HEADER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header_unittest.cc
new file mode 100644
index 0000000000..e8b4c52c68
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/common_header_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+
+#include "test/gtest.h"
+
+using webrtc::rtcp::CommonHeader;
+
+namespace webrtc {
+
+TEST(RtcpCommonHeaderTest, TooSmallBuffer) {
+ uint8_t buffer[] = {0x80, 0x00, 0x00, 0x00};
+ CommonHeader header;
+ // Buffer needs to be able to hold the header.
+ EXPECT_FALSE(header.Parse(buffer, 0));
+ EXPECT_FALSE(header.Parse(buffer, 1));
+ EXPECT_FALSE(header.Parse(buffer, 2));
+ EXPECT_FALSE(header.Parse(buffer, 3));
+ EXPECT_TRUE(header.Parse(buffer, 4));
+}
+
+TEST(RtcpCommonHeaderTest, Version) {
+ uint8_t buffer[] = {0x00, 0x00, 0x00, 0x00};
+ CommonHeader header;
+ // Version 2 is the only allowed.
+ buffer[0] = 0 << 6;
+ EXPECT_FALSE(header.Parse(buffer, sizeof(buffer)));
+ buffer[0] = 1 << 6;
+ EXPECT_FALSE(header.Parse(buffer, sizeof(buffer)));
+ buffer[0] = 2 << 6;
+ EXPECT_TRUE(header.Parse(buffer, sizeof(buffer)));
+ buffer[0] = 3 << 6;
+ EXPECT_FALSE(header.Parse(buffer, sizeof(buffer)));
+}
+
+TEST(RtcpCommonHeaderTest, PacketSize) {
+ uint8_t buffer[] = {0x80, 0x00, 0x00, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ CommonHeader header;
+ EXPECT_FALSE(header.Parse(buffer, sizeof(buffer) - 1));
+ EXPECT_TRUE(header.Parse(buffer, sizeof(buffer)));
+ EXPECT_EQ(8u, header.payload_size_bytes());
+ EXPECT_EQ(buffer + sizeof(buffer), header.NextPacket());
+ EXPECT_EQ(sizeof(buffer), header.packet_size());
+}
+
+TEST(RtcpCommonHeaderTest, PaddingAndPayloadSize) {
+ // Set v = 2, p = 1, but leave fmt, pt as 0.
+ uint8_t buffer[] = {0xa0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ CommonHeader header;
+ // Padding bit set, but no byte for padding (can't specify padding length).
+ EXPECT_FALSE(header.Parse(buffer, 4));
+
+ buffer[3] = 2; // Set payload size to 2x32bit.
+ const size_t kPayloadSizeBytes = buffer[3] * 4;
+ const size_t kPaddingAddress =
+ CommonHeader::kHeaderSizeBytes + kPayloadSizeBytes - 1;
+
+ // Padding one byte larger than possible.
+ buffer[kPaddingAddress] = kPayloadSizeBytes + 1;
+ EXPECT_FALSE(header.Parse(buffer, sizeof(buffer)));
+
+ // Invalid zero padding size.
+ buffer[kPaddingAddress] = 0;
+ EXPECT_FALSE(header.Parse(buffer, sizeof(buffer)));
+
+ // Pure padding packet.
+ buffer[kPaddingAddress] = kPayloadSizeBytes;
+ EXPECT_TRUE(header.Parse(buffer, sizeof(buffer)));
+ EXPECT_EQ(0u, header.payload_size_bytes());
+ EXPECT_EQ(buffer + sizeof(buffer), header.NextPacket());
+ EXPECT_EQ(header.payload(), buffer + CommonHeader::kHeaderSizeBytes);
+ EXPECT_EQ(header.packet_size(), sizeof(buffer));
+
+ // Single byte of actual data.
+ buffer[kPaddingAddress] = kPayloadSizeBytes - 1;
+ EXPECT_TRUE(header.Parse(buffer, sizeof(buffer)));
+ EXPECT_EQ(1u, header.payload_size_bytes());
+ EXPECT_EQ(buffer + sizeof(buffer), header.NextPacket());
+ EXPECT_EQ(header.packet_size(), sizeof(buffer));
+}
+
+TEST(RtcpCommonHeaderTest, FormatAndPayloadType) {
+ uint8_t buffer[] = {0x9e, 0xab, 0x00, 0x00};
+ CommonHeader header;
+ EXPECT_TRUE(header.Parse(buffer, sizeof(buffer)));
+
+ EXPECT_EQ(header.count(), 0x1e);
+ EXPECT_EQ(header.fmt(), 0x1e);
+ EXPECT_EQ(header.type(), 0xab);
+ EXPECT_EQ(header.payload_size_bytes(), 0u);
+ EXPECT_EQ(header.payload(), buffer + CommonHeader::kHeaderSizeBytes);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc
new file mode 100644
index 0000000000..54f3555fc6
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rtcp {
+
+CompoundPacket::CompoundPacket() = default;
+
+CompoundPacket::~CompoundPacket() = default;
+
+void CompoundPacket::Append(std::unique_ptr<RtcpPacket> packet) {
+ RTC_CHECK(packet);
+ appended_packets_.push_back(std::move(packet));
+}
+
+bool CompoundPacket::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ for (const auto& appended : appended_packets_) {
+ if (!appended->Create(packet, index, max_length, callback))
+ return false;
+ }
+ return true;
+}
+
+size_t CompoundPacket::BlockLength() const {
+ size_t block_length = 0;
+ for (const auto& appended : appended_packets_) {
+ block_length += appended->BlockLength();
+ }
+ return block_length;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h
new file mode 100644
index 0000000000..d98dbd088d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class CompoundPacket : public RtcpPacket {
+ public:
+ CompoundPacket();
+ ~CompoundPacket() override;
+
+ CompoundPacket(const CompoundPacket&) = delete;
+ CompoundPacket& operator=(const CompoundPacket&) = delete;
+
+ void Append(std::unique_ptr<RtcpPacket> packet);
+
+ // Size of this packet in bytes (i.e. total size of nested packets).
+ size_t BlockLength() const override;
+ // Returns true if all calls to Create succeeded.
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ protected:
+ std::vector<std::unique_ptr<RtcpPacket>> appended_packets_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc
new file mode 100644
index 0000000000..9348aee7e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::MockFunction;
+using webrtc::rtcp::Bye;
+using webrtc::rtcp::CompoundPacket;
+using webrtc::rtcp::Fir;
+using webrtc::rtcp::ReceiverReport;
+using webrtc::rtcp::ReportBlock;
+using webrtc::rtcp::SenderReport;
+using webrtc::test::RtcpPacketParser;
+
+namespace webrtc {
+
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint8_t kSeqNo = 13;
+
+TEST(RtcpCompoundPacketTest, AppendPacket) {
+ CompoundPacket compound;
+ auto fir = std::make_unique<Fir>();
+ fir->AddRequestTo(kRemoteSsrc, kSeqNo);
+ ReportBlock rb;
+ auto rr = std::make_unique<ReceiverReport>();
+ rr->SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(rr->AddReportBlock(rb));
+ compound.Append(std::move(rr));
+ compound.Append(std::move(fir));
+
+ rtc::Buffer packet = compound.Build();
+ RtcpPacketParser parser;
+ parser.Parse(packet.data(), packet.size());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser.receiver_report()->sender_ssrc());
+ EXPECT_EQ(1u, parser.receiver_report()->report_blocks().size());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+}
+
+TEST(RtcpCompoundPacketTest, AppendPacketWithOwnAppendedPacket) {
+ CompoundPacket root;
+ auto leaf = std::make_unique<CompoundPacket>();
+
+ auto fir = std::make_unique<Fir>();
+ fir->AddRequestTo(kRemoteSsrc, kSeqNo);
+ auto bye = std::make_unique<Bye>();
+ ReportBlock rb;
+
+ auto rr = std::make_unique<ReceiverReport>();
+ EXPECT_TRUE(rr->AddReportBlock(rb));
+ leaf->Append(std::move(rr));
+ leaf->Append(std::move(fir));
+
+ auto sr = std::make_unique<SenderReport>();
+ root.Append(std::move(sr));
+ root.Append(std::move(bye));
+ root.Append(std::move(leaf));
+
+ rtc::Buffer packet = root.Build();
+ RtcpPacketParser parser;
+ parser.Parse(packet.data(), packet.size());
+ EXPECT_EQ(1, parser.sender_report()->num_packets());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(1u, parser.receiver_report()->report_blocks().size());
+ EXPECT_EQ(1, parser.bye()->num_packets());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+}
+
+TEST(RtcpCompoundPacketTest, BuildWithInputBuffer) {
+ CompoundPacket compound;
+ auto fir = std::make_unique<Fir>();
+ fir->AddRequestTo(kRemoteSsrc, kSeqNo);
+ ReportBlock rb;
+ auto rr = std::make_unique<ReceiverReport>();
+ rr->SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(rr->AddReportBlock(rb));
+ compound.Append(std::move(rr));
+ compound.Append(std::move(fir));
+
+ const size_t kRrLength = 8;
+ const size_t kReportBlockLength = 24;
+ const size_t kFirLength = 20;
+
+ const size_t kBufferSize = kRrLength + kReportBlockLength + kFirLength;
+ MockFunction<void(rtc::ArrayView<const uint8_t>)> callback;
+ EXPECT_CALL(callback, Call(_))
+ .WillOnce(Invoke([&](rtc::ArrayView<const uint8_t> packet) {
+ RtcpPacketParser parser;
+ parser.Parse(packet.data(), packet.size());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(1u, parser.receiver_report()->report_blocks().size());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+ }));
+
+ EXPECT_TRUE(compound.Build(kBufferSize, callback.AsStdFunction()));
+}
+
+TEST(RtcpCompoundPacketTest, BuildWithTooSmallBuffer_FragmentedSend) {
+ CompoundPacket compound;
+ auto fir = std::make_unique<Fir>();
+ fir->AddRequestTo(kRemoteSsrc, kSeqNo);
+ ReportBlock rb;
+ auto rr = std::make_unique<ReceiverReport>();
+ rr->SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(rr->AddReportBlock(rb));
+ compound.Append(std::move(rr));
+ compound.Append(std::move(fir));
+
+ const size_t kRrLength = 8;
+ const size_t kReportBlockLength = 24;
+
+ const size_t kBufferSize = kRrLength + kReportBlockLength;
+ MockFunction<void(rtc::ArrayView<const uint8_t>)> callback;
+ EXPECT_CALL(callback, Call(_))
+ .WillOnce(Invoke([&](rtc::ArrayView<const uint8_t> packet) {
+ RtcpPacketParser parser;
+ parser.Parse(packet.data(), packet.size());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(1U, parser.receiver_report()->report_blocks().size());
+ EXPECT_EQ(0, parser.fir()->num_packets());
+ }))
+ .WillOnce(Invoke([&](rtc::ArrayView<const uint8_t> packet) {
+ RtcpPacketParser parser;
+ parser.Parse(packet.data(), packet.size());
+ EXPECT_EQ(0, parser.receiver_report()->num_packets());
+ EXPECT_EQ(0U, parser.receiver_report()->report_blocks().size());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+ }));
+
+ EXPECT_TRUE(compound.Build(kBufferSize, callback.AsStdFunction()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc
new file mode 100644
index 0000000000..6863def2fe
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace rtcp {
+// DLRR Report Block (RFC 3611).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | BT=5 | reserved | block length |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | SSRC_1 (SSRC of first receiver) | sub-
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
+// | last RR (LRR) | 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | delay since last RR (DLRR) |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | SSRC_2 (SSRC of second receiver) | sub-
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
+// : ... : 2
+
+Dlrr::Dlrr() = default;
+
+Dlrr::Dlrr(const Dlrr& other) = default;
+
+Dlrr::~Dlrr() = default;
+
+bool Dlrr::Parse(const uint8_t* buffer, uint16_t block_length_32bits) {
+ RTC_DCHECK(buffer[0] == kBlockType);
+ // kReserved = buffer[1];
+ RTC_DCHECK_EQ(block_length_32bits,
+ ByteReader<uint16_t>::ReadBigEndian(&buffer[2]));
+ if (block_length_32bits % 3 != 0) {
+ RTC_LOG(LS_WARNING) << "Invalid size for dlrr block.";
+ return false;
+ }
+
+ size_t blocks_count = block_length_32bits / 3;
+ const uint8_t* read_at = buffer + kBlockHeaderLength;
+ sub_blocks_.resize(blocks_count);
+ for (ReceiveTimeInfo& sub_block : sub_blocks_) {
+ sub_block.ssrc = ByteReader<uint32_t>::ReadBigEndian(&read_at[0]);
+ sub_block.last_rr = ByteReader<uint32_t>::ReadBigEndian(&read_at[4]);
+ sub_block.delay_since_last_rr =
+ ByteReader<uint32_t>::ReadBigEndian(&read_at[8]);
+ read_at += kSubBlockLength;
+ }
+ return true;
+}
+
+size_t Dlrr::BlockLength() const {
+ if (sub_blocks_.empty())
+ return 0;
+ return kBlockHeaderLength + kSubBlockLength * sub_blocks_.size();
+}
+
+void Dlrr::Create(uint8_t* buffer) const {
+ if (sub_blocks_.empty()) // No subblocks, no need to write header either.
+ return;
+ // Create block header.
+ const uint8_t kReserved = 0;
+ buffer[0] = kBlockType;
+ buffer[1] = kReserved;
+ ByteWriter<uint16_t>::WriteBigEndian(
+ &buffer[2], rtc::dchecked_cast<uint16_t>(3 * sub_blocks_.size()));
+ // Create sub blocks.
+ uint8_t* write_at = buffer + kBlockHeaderLength;
+ for (const ReceiveTimeInfo& sub_block : sub_blocks_) {
+ ByteWriter<uint32_t>::WriteBigEndian(&write_at[0], sub_block.ssrc);
+ ByteWriter<uint32_t>::WriteBigEndian(&write_at[4], sub_block.last_rr);
+ ByteWriter<uint32_t>::WriteBigEndian(&write_at[8],
+ sub_block.delay_since_last_rr);
+ write_at += kSubBlockLength;
+ }
+ RTC_DCHECK_EQ(buffer + BlockLength(), write_at);
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h
new file mode 100644
index 0000000000..ad91dfdcc6
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_DLRR_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_DLRR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+namespace webrtc {
+namespace rtcp {
+struct ReceiveTimeInfo {
+ // RFC 3611 4.5
+ ReceiveTimeInfo() : ssrc(0), last_rr(0), delay_since_last_rr(0) {}
+ ReceiveTimeInfo(uint32_t ssrc, uint32_t last_rr, uint32_t delay)
+ : ssrc(ssrc), last_rr(last_rr), delay_since_last_rr(delay) {}
+
+ uint32_t ssrc;
+ uint32_t last_rr;
+ uint32_t delay_since_last_rr;
+};
+
+inline bool operator==(const ReceiveTimeInfo& lhs, const ReceiveTimeInfo& rhs) {
+ return lhs.ssrc == rhs.ssrc && lhs.last_rr == rhs.last_rr &&
+ lhs.delay_since_last_rr == rhs.delay_since_last_rr;
+}
+
+inline bool operator!=(const ReceiveTimeInfo& lhs, const ReceiveTimeInfo& rhs) {
+ return !(lhs == rhs);
+}
+
+// DLRR Report Block: Delay since the Last Receiver Report (RFC 3611).
+class Dlrr {
+ public:
+ static const uint8_t kBlockType = 5;
+
+ Dlrr();
+ Dlrr(const Dlrr& other);
+ ~Dlrr();
+
+ Dlrr& operator=(const Dlrr& other) = default;
+
+ // Dlrr without items treated same as no dlrr block.
+ explicit operator bool() const { return !sub_blocks_.empty(); }
+
+ // Second parameter is value read from block header,
+ // i.e. size of block in 32bits excluding block header itself.
+ bool Parse(const uint8_t* buffer, uint16_t block_length_32bits);
+
+ size_t BlockLength() const;
+ // Fills buffer with the Dlrr.
+ // Consumes BlockLength() bytes.
+ void Create(uint8_t* buffer) const;
+
+ void ClearItems() { sub_blocks_.clear(); }
+ void AddDlrrItem(const ReceiveTimeInfo& time_info) {
+ sub_blocks_.push_back(time_info);
+ }
+
+ const std::vector<ReceiveTimeInfo>& sub_blocks() const { return sub_blocks_; }
+
+ private:
+ static const size_t kBlockHeaderLength = 4;
+ static const size_t kSubBlockLength = 12;
+
+ std::vector<ReceiveTimeInfo> sub_blocks_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_DLRR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc
new file mode 100644
index 0000000000..408d0011b8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "test/gtest.h"
+
+using webrtc::rtcp::Dlrr;
+using webrtc::rtcp::ReceiveTimeInfo;
+
+namespace webrtc {
+namespace {
+const uint32_t kSsrc = 0x12345678;
+const uint32_t kLastRR = 0x23344556;
+const uint32_t kDelay = 0x33343536;
+const uint8_t kBlock[] = {0x05, 0x00, 0x00, 0x03, 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x34, 0x45, 0x56, 0x33, 0x34, 0x35, 0x36};
+const size_t kBlockSizeBytes = sizeof(kBlock);
+} // namespace
+
+TEST(RtcpPacketDlrrTest, Empty) {
+ Dlrr dlrr;
+
+ EXPECT_EQ(0u, dlrr.BlockLength());
+}
+
+TEST(RtcpPacketDlrrTest, Create) {
+ Dlrr dlrr;
+ dlrr.AddDlrrItem(ReceiveTimeInfo(kSsrc, kLastRR, kDelay));
+
+ ASSERT_EQ(kBlockSizeBytes, dlrr.BlockLength());
+ uint8_t buffer[kBlockSizeBytes];
+
+ dlrr.Create(buffer);
+ EXPECT_EQ(0, memcmp(buffer, kBlock, kBlockSizeBytes));
+}
+
+TEST(RtcpPacketDlrrTest, Parse) {
+ Dlrr dlrr;
+ uint16_t block_length = ByteReader<uint16_t>::ReadBigEndian(&kBlock[2]);
+ EXPECT_TRUE(dlrr.Parse(kBlock, block_length));
+
+ EXPECT_EQ(1u, dlrr.sub_blocks().size());
+ const ReceiveTimeInfo& block = dlrr.sub_blocks().front();
+ EXPECT_EQ(kSsrc, block.ssrc);
+ EXPECT_EQ(kLastRR, block.last_rr);
+ EXPECT_EQ(kDelay, block.delay_since_last_rr);
+}
+
+TEST(RtcpPacketDlrrTest, ParseFailsOnBadSize) {
+ const size_t kBigBufferSize = 0x100; // More than enough.
+ uint8_t buffer[kBigBufferSize];
+ buffer[0] = Dlrr::kBlockType;
+ buffer[1] = 0; // Reserved.
+ buffer[2] = 0; // Most significant size byte.
+ for (uint8_t size = 3; size < 6; ++size) {
+ buffer[3] = size;
+ Dlrr dlrr;
+ // Parse should be successful only when size is multiple of 3.
+ EXPECT_EQ(size % 3 == 0, dlrr.Parse(buffer, static_cast<uint16_t>(size)));
+ }
+}
+
+TEST(RtcpPacketDlrrTest, CreateAndParseManySubBlocks) {
+ const size_t kBufferSize = 0x1000; // More than enough.
+ const size_t kManyDlrrItems = 50;
+ uint8_t buffer[kBufferSize];
+
+ // Create.
+ Dlrr dlrr;
+ for (size_t i = 1; i <= kManyDlrrItems; ++i)
+ dlrr.AddDlrrItem(ReceiveTimeInfo(kSsrc + i, kLastRR + i, kDelay + i));
+ size_t used_buffer_size = dlrr.BlockLength();
+ ASSERT_LE(used_buffer_size, kBufferSize);
+ dlrr.Create(buffer);
+
+ // Parse.
+ Dlrr parsed;
+ uint16_t block_length = ByteReader<uint16_t>::ReadBigEndian(&buffer[2]);
+ EXPECT_EQ(used_buffer_size, (block_length + 1) * 4u);
+ EXPECT_TRUE(parsed.Parse(buffer, block_length));
+ EXPECT_EQ(kManyDlrrItems, parsed.sub_blocks().size());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.cc
new file mode 100644
index 0000000000..ce57bd5a88
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t ExtendedReports::kPacketType;
+constexpr size_t ExtendedReports::kMaxNumberOfDlrrItems;
+// From RFC 3611: RTP Control Protocol Extended Reports (RTCP XR).
+//
+// Format for XR packets:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P|reserved | PT=XR=207 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : report blocks :
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Extended report block:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Block Type | reserved | block length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : type-specific block contents :
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ExtendedReports::ExtendedReports() = default;
+ExtendedReports::ExtendedReports(const ExtendedReports& xr) = default;
+ExtendedReports::~ExtendedReports() = default;
+
+bool ExtendedReports::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+
+ if (packet.payload_size_bytes() < kXrBaseLength) {
+ RTC_LOG(LS_WARNING)
+ << "Packet is too small to be an ExtendedReports packet.";
+ return false;
+ }
+
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(packet.payload()));
+ rrtr_block_.reset();
+ dlrr_block_.ClearItems();
+ target_bitrate_ = absl::nullopt;
+
+ const uint8_t* current_block = packet.payload() + kXrBaseLength;
+ const uint8_t* const packet_end =
+ packet.payload() + packet.payload_size_bytes();
+ constexpr size_t kBlockHeaderSizeBytes = 4;
+ while (current_block + kBlockHeaderSizeBytes <= packet_end) {
+ uint8_t block_type = ByteReader<uint8_t>::ReadBigEndian(current_block);
+ uint16_t block_length =
+ ByteReader<uint16_t>::ReadBigEndian(current_block + 2);
+ const uint8_t* next_block =
+ current_block + kBlockHeaderSizeBytes + block_length * 4;
+ if (next_block > packet_end) {
+ RTC_LOG(LS_WARNING)
+ << "Report block in extended report packet is too big.";
+ return false;
+ }
+ switch (block_type) {
+ case Rrtr::kBlockType:
+ ParseRrtrBlock(current_block, block_length);
+ break;
+ case Dlrr::kBlockType:
+ ParseDlrrBlock(current_block, block_length);
+ break;
+ case TargetBitrate::kBlockType:
+ ParseTargetBitrateBlock(current_block, block_length);
+ break;
+ default:
+ // Unknown block, ignore.
+ RTC_LOG(LS_WARNING)
+ << "Unknown extended report block type " << block_type;
+ break;
+ }
+ current_block = next_block;
+ }
+
+ return true;
+}
+
+void ExtendedReports::SetRrtr(const Rrtr& rrtr) {
+ if (rrtr_block_)
+ RTC_LOG(LS_WARNING) << "Rrtr already set, overwriting.";
+ rrtr_block_.emplace(rrtr);
+}
+
+bool ExtendedReports::AddDlrrItem(const ReceiveTimeInfo& time_info) {
+ if (dlrr_block_.sub_blocks().size() >= kMaxNumberOfDlrrItems) {
+ RTC_LOG(LS_WARNING) << "Reached maximum number of DLRR items.";
+ return false;
+ }
+ dlrr_block_.AddDlrrItem(time_info);
+ return true;
+}
+
+void ExtendedReports::SetTargetBitrate(const TargetBitrate& bitrate) {
+ if (target_bitrate_)
+ RTC_LOG(LS_WARNING) << "TargetBitrate already set, overwriting.";
+
+ target_bitrate_ = bitrate;
+}
+
+size_t ExtendedReports::BlockLength() const {
+ return kHeaderLength + kXrBaseLength + RrtrLength() + DlrrLength() +
+ TargetBitrateLength();
+}
+
+bool ExtendedReports::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ size_t index_end = *index + BlockLength();
+ const uint8_t kReserved = 0;
+ CreateHeader(kReserved, kPacketType, HeaderLength(), packet, index);
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, sender_ssrc());
+ *index += sizeof(uint32_t);
+ if (rrtr_block_) {
+ rrtr_block_->Create(packet + *index);
+ *index += Rrtr::kLength;
+ }
+ if (dlrr_block_) {
+ dlrr_block_.Create(packet + *index);
+ *index += dlrr_block_.BlockLength();
+ }
+ if (target_bitrate_) {
+ target_bitrate_->Create(packet + *index);
+ *index += target_bitrate_->BlockLength();
+ }
+ RTC_CHECK_EQ(*index, index_end);
+ return true;
+}
+
+size_t ExtendedReports::TargetBitrateLength() const {
+ if (target_bitrate_)
+ return target_bitrate_->BlockLength();
+ return 0;
+}
+
+void ExtendedReports::ParseRrtrBlock(const uint8_t* block,
+ uint16_t block_length) {
+ if (block_length != Rrtr::kBlockLength) {
+ RTC_LOG(LS_WARNING) << "Incorrect rrtr block size " << block_length
+ << " Should be " << Rrtr::kBlockLength;
+ return;
+ }
+ if (rrtr_block_) {
+ RTC_LOG(LS_WARNING)
+ << "Two rrtr blocks found in same Extended Report packet";
+ return;
+ }
+ rrtr_block_.emplace();
+ rrtr_block_->Parse(block);
+}
+
+void ExtendedReports::ParseDlrrBlock(const uint8_t* block,
+ uint16_t block_length) {
+ if (dlrr_block_) {
+ RTC_LOG(LS_WARNING)
+ << "Two Dlrr blocks found in same Extended Report packet";
+ return;
+ }
+ dlrr_block_.Parse(block, block_length);
+}
+
+void ExtendedReports::ParseTargetBitrateBlock(const uint8_t* block,
+ uint16_t block_length) {
+ target_bitrate_.emplace();
+ target_bitrate_->Parse(block, block_length);
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h
new file mode 100644
index 0000000000..6c804bbc7b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_EXTENDED_REPORTS_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_EXTENDED_REPORTS_H_
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/rrtr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+// From RFC 3611: RTP Control Protocol Extended Reports (RTCP XR).
+class ExtendedReports : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 207;
+ static constexpr size_t kMaxNumberOfDlrrItems = 50;
+
+ ExtendedReports();
+ ExtendedReports(const ExtendedReports& xr);
+ ~ExtendedReports() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void SetRrtr(const Rrtr& rrtr);
+ bool AddDlrrItem(const ReceiveTimeInfo& time_info);
+ void SetTargetBitrate(const TargetBitrate& target_bitrate);
+
+ const absl::optional<Rrtr>& rrtr() const { return rrtr_block_; }
+ const Dlrr& dlrr() const { return dlrr_block_; }
+ const absl::optional<TargetBitrate>& target_bitrate() const {
+ return target_bitrate_;
+ }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static constexpr size_t kXrBaseLength = 4;
+
+ size_t RrtrLength() const { return rrtr_block_ ? Rrtr::kLength : 0; }
+ size_t DlrrLength() const { return dlrr_block_.BlockLength(); }
+ size_t TargetBitrateLength() const;
+
+ void ParseRrtrBlock(const uint8_t* block, uint16_t block_length);
+ void ParseDlrrBlock(const uint8_t* block, uint16_t block_length);
+ void ParseTargetBitrateBlock(const uint8_t* block, uint16_t block_length);
+
+ absl::optional<Rrtr> rrtr_block_;
+ Dlrr dlrr_block_; // Dlrr without items treated same as no dlrr block.
+ absl::optional<TargetBitrate> target_bitrate_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_EXTENDED_REPORTS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports_unittest.cc
new file mode 100644
index 0000000000..3d9a2a3408
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/extended_reports_unittest.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+
+#include "rtc_base/random.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using ::testing::SizeIs;
+using webrtc::rtcp::ExtendedReports;
+using webrtc::rtcp::ReceiveTimeInfo;
+using webrtc::rtcp::Rrtr;
+
+namespace webrtc {
+namespace {
+constexpr uint32_t kSenderSsrc = 0x12345678;
+constexpr uint8_t kEmptyPacket[] = {0x80, 207, 0x00, 0x01,
+ 0x12, 0x34, 0x56, 0x78};
+} // namespace
+
+class RtcpPacketExtendedReportsTest : public ::testing::Test {
+ public:
+ RtcpPacketExtendedReportsTest() : random_(0x123456789) {}
+
+ protected:
+ template <typename T>
+ T Rand() {
+ return random_.Rand<T>();
+ }
+
+ private:
+ Random random_;
+};
+
+template <>
+ReceiveTimeInfo RtcpPacketExtendedReportsTest::Rand<ReceiveTimeInfo>() {
+ uint32_t ssrc = Rand<uint32_t>();
+ uint32_t last_rr = Rand<uint32_t>();
+ uint32_t delay_since_last_rr = Rand<uint32_t>();
+ return ReceiveTimeInfo(ssrc, last_rr, delay_since_last_rr);
+}
+
+template <>
+NtpTime RtcpPacketExtendedReportsTest::Rand<NtpTime>() {
+ uint32_t secs = Rand<uint32_t>();
+ uint32_t frac = Rand<uint32_t>();
+ return NtpTime(secs, frac);
+}
+
+template <>
+Rrtr RtcpPacketExtendedReportsTest::Rand<Rrtr>() {
+ Rrtr rrtr;
+ rrtr.SetNtp(Rand<NtpTime>());
+ return rrtr;
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, CreateWithoutReportBlocks) {
+ ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+
+ rtc::Buffer packet = xr.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kEmptyPacket));
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, ParseWithoutReportBlocks) {
+ ExtendedReports parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kEmptyPacket, &parsed));
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_FALSE(parsed.rrtr());
+ EXPECT_FALSE(parsed.dlrr());
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, CreateAndParseWithRrtrBlock) {
+ const Rrtr kRrtr = Rand<Rrtr>();
+ ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.SetRrtr(kRrtr);
+ rtc::Buffer packet = xr.Build();
+
+ ExtendedReports mparsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &mparsed));
+ const ExtendedReports& parsed = mparsed;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRrtr, parsed.rrtr());
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, CreateAndParseWithDlrrWithOneSubBlock) {
+ const ReceiveTimeInfo kTimeInfo = Rand<ReceiveTimeInfo>();
+ ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(kTimeInfo);
+
+ rtc::Buffer packet = xr.Build();
+
+ ExtendedReports mparsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &mparsed));
+ const ExtendedReports& parsed = mparsed;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_THAT(parsed.dlrr().sub_blocks(), ElementsAre(kTimeInfo));
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, CreateAndParseWithDlrrWithTwoSubBlocks) {
+ const ReceiveTimeInfo kTimeInfo1 = Rand<ReceiveTimeInfo>();
+ const ReceiveTimeInfo kTimeInfo2 = Rand<ReceiveTimeInfo>();
+ ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(kTimeInfo1);
+ xr.AddDlrrItem(kTimeInfo2);
+
+ rtc::Buffer packet = xr.Build();
+
+ ExtendedReports mparsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &mparsed));
+ const ExtendedReports& parsed = mparsed;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_THAT(parsed.dlrr().sub_blocks(), ElementsAre(kTimeInfo1, kTimeInfo2));
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, CreateLimitsTheNumberOfDlrrSubBlocks) {
+ const ReceiveTimeInfo kTimeInfo = Rand<ReceiveTimeInfo>();
+ ExtendedReports xr;
+
+ for (size_t i = 0; i < ExtendedReports::kMaxNumberOfDlrrItems; ++i)
+ EXPECT_TRUE(xr.AddDlrrItem(kTimeInfo));
+ EXPECT_FALSE(xr.AddDlrrItem(kTimeInfo));
+
+ EXPECT_THAT(xr.dlrr().sub_blocks(),
+ SizeIs(ExtendedReports::kMaxNumberOfDlrrItems));
+}
+
+TEST_F(RtcpPacketExtendedReportsTest, CreateAndParseWithMaximumReportBlocks) {
+ const Rrtr kRrtr = Rand<Rrtr>();
+
+ ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.SetRrtr(kRrtr);
+ for (size_t i = 0; i < ExtendedReports::kMaxNumberOfDlrrItems; ++i)
+ xr.AddDlrrItem(Rand<ReceiveTimeInfo>());
+
+ rtc::Buffer packet = xr.Build();
+
+ ExtendedReports mparsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &mparsed));
+ const ExtendedReports& parsed = mparsed;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRrtr, parsed.rrtr());
+ EXPECT_THAT(parsed.dlrr().sub_blocks(),
+ ElementsAreArray(xr.dlrr().sub_blocks()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.cc
new file mode 100644
index 0000000000..fd4a4c947a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Fir::kFeedbackMessageType;
+// RFC 4585: Feedback format.
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source (unused) = 0 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+// Full intra request (FIR) (RFC 5104).
+// The Feedback Control Information (FCI) for the Full Intra Request
+// consists of one or more FCI entries.
+// FCI:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Seq nr. | Reserved = 0 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+Fir::Fir() = default;
+
+Fir::Fir(const Fir& fir) = default;
+
+Fir::~Fir() = default;
+
+bool Fir::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+
+ // The FCI field MUST contain one or more FIR entries.
+ if (packet.payload_size_bytes() < kCommonFeedbackLength + kFciLength) {
+ RTC_LOG(LS_WARNING) << "Packet is too small to be a valid FIR packet.";
+ return false;
+ }
+
+ if ((packet.payload_size_bytes() - kCommonFeedbackLength) % kFciLength != 0) {
+ RTC_LOG(LS_WARNING) << "Invalid size for a valid FIR packet.";
+ return false;
+ }
+
+ ParseCommonFeedback(packet.payload());
+
+ size_t number_of_fci_items =
+ (packet.payload_size_bytes() - kCommonFeedbackLength) / kFciLength;
+ const uint8_t* next_fci = packet.payload() + kCommonFeedbackLength;
+ items_.resize(number_of_fci_items);
+ for (Request& request : items_) {
+ request.ssrc = ByteReader<uint32_t>::ReadBigEndian(next_fci);
+ request.seq_nr = ByteReader<uint8_t>::ReadBigEndian(next_fci + 4);
+ next_fci += kFciLength;
+ }
+ return true;
+}
+
+size_t Fir::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength + kFciLength * items_.size();
+}
+
+bool Fir::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ RTC_DCHECK(!items_.empty());
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ size_t index_end = *index + BlockLength();
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ RTC_DCHECK_EQ(Psfb::media_ssrc(), 0);
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+
+ constexpr uint32_t kReserved = 0;
+ for (const Request& request : items_) {
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, request.ssrc);
+ ByteWriter<uint8_t>::WriteBigEndian(packet + *index + 4, request.seq_nr);
+ ByteWriter<uint32_t, 3>::WriteBigEndian(packet + *index + 5, kReserved);
+ *index += kFciLength;
+ }
+ RTC_CHECK_EQ(*index, index_end);
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.h
new file mode 100644
index 0000000000..383dc96114
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_FIR_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_FIR_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+// Full intra request (FIR) (RFC 5104).
+class Fir : public Psfb {
+ public:
+ static constexpr uint8_t kFeedbackMessageType = 4;
+ struct Request {
+ Request() : ssrc(0), seq_nr(0) {}
+ Request(uint32_t ssrc, uint8_t seq_nr) : ssrc(ssrc), seq_nr(seq_nr) {}
+ uint32_t ssrc;
+ uint8_t seq_nr;
+ };
+
+ Fir();
+ Fir(const Fir& fir);
+ ~Fir() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void AddRequestTo(uint32_t ssrc, uint8_t seq_num) {
+ items_.emplace_back(ssrc, seq_num);
+ }
+ const std::vector<Request>& requests() const { return items_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static constexpr size_t kFciLength = 8;
+
+ // SSRC of media source is not used in FIR packet. Shadow base functions.
+ void SetMediaSsrc(uint32_t ssrc);
+ uint32_t media_ssrc() const;
+
+ std::vector<Request> items_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_FIR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir_unittest.cc
new file mode 100644
index 0000000000..01593e12ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/fir_unittest.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::make_tuple;
+using webrtc::rtcp::Fir;
+
+namespace webrtc {
+namespace {
+
+constexpr uint32_t kSenderSsrc = 0x12345678;
+constexpr uint32_t kRemoteSsrc = 0x23456789;
+constexpr uint8_t kSeqNr = 13;
+// Manually created Fir packet matching constants above.
+constexpr uint8_t kPacket[] = {0x84, 206, 0x00, 0x04, 0x12, 0x34, 0x56,
+ 0x78, 0x00, 0x00, 0x00, 0x00, 0x23, 0x45,
+ 0x67, 0x89, 0x0d, 0x00, 0x00, 0x00};
+} // namespace
+
+TEST(RtcpPacketFirTest, Parse) {
+ Fir mutable_parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &mutable_parsed));
+ const Fir& parsed = mutable_parsed; // Read values from constant object.
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_THAT(parsed.requests(),
+ ElementsAre(AllOf(Field(&Fir::Request::ssrc, Eq(kRemoteSsrc)),
+ Field(&Fir::Request::seq_nr, Eq(kSeqNr)))));
+}
+
+TEST(RtcpPacketFirTest, Create) {
+ Fir fir;
+ fir.SetSenderSsrc(kSenderSsrc);
+ fir.AddRequestTo(kRemoteSsrc, kSeqNr);
+
+ rtc::Buffer packet = fir.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketFirTest, TwoFciEntries) {
+ Fir fir;
+ fir.SetSenderSsrc(kSenderSsrc);
+ fir.AddRequestTo(kRemoteSsrc, kSeqNr);
+ fir.AddRequestTo(kRemoteSsrc + 1, kSeqNr + 1);
+
+ rtc::Buffer packet = fir.Build();
+ Fir parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_THAT(parsed.requests(),
+ ElementsAre(AllOf(Field(&Fir::Request::ssrc, Eq(kRemoteSsrc)),
+ Field(&Fir::Request::seq_nr, Eq(kSeqNr))),
+ AllOf(Field(&Fir::Request::ssrc, Eq(kRemoteSsrc + 1)),
+ Field(&Fir::Request::seq_nr, Eq(kSeqNr + 1)))));
+}
+
+TEST(RtcpPacketFirTest, ParseFailsOnZeroFciEntries) {
+ constexpr uint8_t kPacketWithoutFci[] = {0x84, 206, 0x00, 0x02, 0x12, 0x34,
+ 0x56, 0x78, 0x00, 0x00, 0x00, 0x00};
+ Fir parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kPacketWithoutFci, &parsed));
+}
+
+TEST(RtcpPacketFirTest, ParseFailsOnFractionalFciEntries) {
+ constexpr uint8_t kPacketWithOneAndHalfFci[] = {
+ 0x84, 206, 0x00, 0x05, 0x12, 0x34, 0x56, 0x78, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x45, 0x67, 0x89, 0x0d, 0x00, 0x00, 0x00, 'h', 'a', 'l', 'f'};
+
+ Fir parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kPacketWithOneAndHalfFci, &parsed));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.cc
new file mode 100644
index 0000000000..0817846f95
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// Loss Notification
+// -----------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT=15 | PT=206 | length |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | Unique identifier 'L' 'N' 'T' 'F' |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | Last Decoded Sequence Number | Last Received SeqNum Delta |D|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+LossNotification::LossNotification()
+ : last_decoded_(0), last_received_(0), decodability_flag_(false) {}
+
+LossNotification::LossNotification(uint16_t last_decoded,
+ uint16_t last_received,
+ bool decodability_flag)
+ : last_decoded_(last_decoded),
+ last_received_(last_received),
+ decodability_flag_(decodability_flag) {}
+
+LossNotification::LossNotification(const LossNotification& rhs) = default;
+
+LossNotification::~LossNotification() = default;
+
+size_t LossNotification::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength + kLossNotificationPayloadLength;
+}
+
+bool LossNotification::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+
+ const size_t index_end = *index + BlockLength();
+
+ // Note: `index` updated by the function below.
+ CreateHeader(Psfb::kAfbMessageType, kPacketType, HeaderLength(), packet,
+ index);
+
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, kUniqueIdentifier);
+ *index += sizeof(uint32_t);
+
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index, last_decoded_);
+ *index += sizeof(uint16_t);
+
+ const uint16_t last_received_delta = last_received_ - last_decoded_;
+ RTC_DCHECK_LE(last_received_delta, 0x7fff);
+ const uint16_t last_received_delta_and_decodability =
+ (last_received_delta << 1) | (decodability_flag_ ? 0x0001 : 0x0000);
+
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index,
+ last_received_delta_and_decodability);
+ *index += sizeof(uint16_t);
+
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+
+bool LossNotification::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), Psfb::kAfbMessageType);
+
+ if (packet.payload_size_bytes() <
+ kCommonFeedbackLength + kLossNotificationPayloadLength) {
+ return false;
+ }
+
+ const uint8_t* const payload = packet.payload();
+
+ if (ByteReader<uint32_t>::ReadBigEndian(&payload[8]) != kUniqueIdentifier) {
+ return false;
+ }
+
+ ParseCommonFeedback(payload);
+
+ last_decoded_ = ByteReader<uint16_t>::ReadBigEndian(&payload[12]);
+
+ const uint16_t last_received_delta_and_decodability =
+ ByteReader<uint16_t>::ReadBigEndian(&payload[14]);
+ last_received_ = last_decoded_ + (last_received_delta_and_decodability >> 1);
+ decodability_flag_ = (last_received_delta_and_decodability & 0x0001);
+
+ return true;
+}
+
+bool LossNotification::Set(uint16_t last_decoded,
+ uint16_t last_received,
+ bool decodability_flag) {
+ const uint16_t delta = last_received - last_decoded;
+ if (delta > 0x7fff) {
+ return false;
+ }
+ last_received_ = last_received;
+ last_decoded_ = last_decoded;
+ decodability_flag_ = decodability_flag;
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h
new file mode 100644
index 0000000000..0f70cf75c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_LOSS_NOTIFICATION_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_LOSS_NOTIFICATION_H_
+
+#include "absl/base/attributes.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class LossNotification : public Psfb {
+ public:
+ LossNotification();
+ LossNotification(uint16_t last_decoded,
+ uint16_t last_received,
+ bool decodability_flag);
+ LossNotification(const LossNotification& other);
+ ~LossNotification() override;
+
+ size_t BlockLength() const override;
+
+ ABSL_MUST_USE_RESULT
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ // Parse assumes header is already parsed and validated.
+ ABSL_MUST_USE_RESULT
+ bool Parse(const CommonHeader& packet);
+
+ // Set all of the values transmitted by the loss notification message.
+ // If the values may not be represented by a loss notification message,
+ // false is returned, and no change is made to the object; this happens
+ // when `last_received` is ahead of `last_decoded` by more than 0x7fff.
+ // This is because `last_received` is represented on the wire as a delta,
+ // and only 15 bits are available for that delta.
+ ABSL_MUST_USE_RESULT
+ bool Set(uint16_t last_decoded,
+ uint16_t last_received,
+ bool decodability_flag);
+
+ // RTP sequence number of the first packet belong to the last decoded
+ // non-discardable frame.
+ uint16_t last_decoded() const { return last_decoded_; }
+
+ // RTP sequence number of the last received packet.
+ uint16_t last_received() const { return last_received_; }
+
+ // A decodability flag, whose specific meaning depends on the last-received
+ // RTP sequence number. The decodability flag is true if and only if all of
+ // the frame's dependencies are known to be decodable, and the frame itself
+ // is not yet known to be unassemblable.
+ // * Clarification #1: In a multi-packet frame, the first packet's
+ // dependencies are known, but it is not yet known whether all parts
+ // of the current frame will be received.
+ // * Clarification #2: In a multi-packet frame, the dependencies would be
+ // unknown if the first packet was not received. Then, the packet will
+ // be known-unassemblable.
+ bool decodability_flag() const { return decodability_flag_; }
+
+ private:
+ static constexpr uint32_t kUniqueIdentifier = 0x4C4E5446; // 'L' 'N' 'T' 'F'.
+ static constexpr size_t kLossNotificationPayloadLength = 8;
+
+ uint16_t last_decoded_;
+ uint16_t last_received_;
+ bool decodability_flag_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_LOSS_NOTIFICATION_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification_unittest.cc
new file mode 100644
index 0000000000..c38e7f4438
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/loss_notification_unittest.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using ::webrtc::rtcp::LossNotification;
+
+TEST(RtcpPacketLossNotificationTest, SetWithIllegalValuesFails) {
+ constexpr uint16_t kLastDecoded = 0x3c7b;
+ constexpr uint16_t kLastReceived = kLastDecoded + 0x7fff + 1;
+ constexpr bool kDecodabilityFlag = true;
+ LossNotification loss_notification;
+ EXPECT_FALSE(
+ loss_notification.Set(kLastDecoded, kLastReceived, kDecodabilityFlag));
+}
+
+TEST(RtcpPacketLossNotificationTest, SetWithLegalValuesSucceeds) {
+ constexpr uint16_t kLastDecoded = 0x3c7b;
+ constexpr uint16_t kLastReceived = kLastDecoded + 0x7fff;
+ constexpr bool kDecodabilityFlag = true;
+ LossNotification loss_notification;
+ EXPECT_TRUE(
+ loss_notification.Set(kLastDecoded, kLastReceived, kDecodabilityFlag));
+}
+
+TEST(RtcpPacketLossNotificationTest, CreateProducesExpectedWireFormat) {
+ // Note that (0x6542 >> 1) is used just to make the pattern in kPacket
+ // more apparent; there's nothing truly special about the value,
+ // it's only an implementation detail that last-received is represented
+ // as a delta from last-decoded, and that this delta is shifted before
+ // it's put on the wire.
+ constexpr uint16_t kLastDecoded = 0x3c7b;
+ constexpr uint16_t kLastReceived = kLastDecoded + (0x6542 >> 1);
+ constexpr bool kDecodabilityFlag = true;
+
+ const uint8_t kPacket[] = {0x8f, 206, 0x00, 0x04, 0x12, 0x34, 0x56, 0x78, //
+ 0xab, 0xcd, 0xef, 0x01, 'L', 'N', 'T', 'F', //
+ 0x3c, 0x7b, 0x65, 0x43};
+
+ LossNotification loss_notification;
+ loss_notification.SetSenderSsrc(0x12345678);
+ loss_notification.SetMediaSsrc(0xabcdef01);
+ ASSERT_TRUE(
+ loss_notification.Set(kLastDecoded, kLastReceived, kDecodabilityFlag));
+
+ rtc::Buffer packet = loss_notification.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketLossNotificationTest,
+ ParseFailsOnTooSmallPacketToBeLossNotification) {
+ uint8_t packet[] = {0x8f, 206, 0x00, 0x04, 0x12, 0x34, 0x56, 0x78, //
+ 0xab, 0xcd, 0xef, 0x01, 'L', 'N', 'T', 'F', //
+ 0x3c, 0x7b, 0x65, 0x43};
+ size_t packet_length_bytes = sizeof(packet);
+
+ LossNotification loss_notification;
+
+ // First, prove that the failure we're expecting to see happens because of
+ // the length, by showing that before the modification to the length,
+ // the packet was correctly parsed.
+ ASSERT_TRUE(
+ test::ParseSinglePacket(packet, packet_length_bytes, &loss_notification));
+
+ // Show that after shaving off a word, the packet is no longer parsable.
+ packet[3] -= 1; // Change the `length` field of the RTCP packet.
+ packet_length_bytes -= 4; // Effectively forget the last 32-bit word.
+ EXPECT_FALSE(
+ test::ParseSinglePacket(packet, packet_length_bytes, &loss_notification));
+}
+
+TEST(RtcpPacketLossNotificationTest,
+ ParseFailsWhenUniqueIdentifierIsNotLossNotification) {
+ uint8_t packet[] = {0x8f, 206, 0x00, 0x04, 0x12, 0x34, 0x56, 0x78, //
+ 0xab, 0xcd, 0xef, 0x01, 'L', 'N', 'T', 'F', //
+ 0x3c, 0x7b, 0x65, 0x43};
+
+ LossNotification loss_notification;
+
+ // First, prove that the failure we're expecting to see happens because of
+ // the identifier, by showing that before the modification to the identifier,
+ // the packet was correctly parsed.
+ ASSERT_TRUE(test::ParseSinglePacket(packet, &loss_notification));
+
+ // Show that after changing the identifier, the packet is no longer parsable.
+ RTC_DCHECK_EQ(packet[12], 'L');
+ RTC_DCHECK_EQ(packet[13], 'N');
+ RTC_DCHECK_EQ(packet[14], 'T');
+ RTC_DCHECK_EQ(packet[15], 'F');
+ packet[14] = 'x';
+ EXPECT_FALSE(test::ParseSinglePacket(packet, &loss_notification));
+}
+
+TEST(RtcpPacketLossNotificationTest,
+ ParseLegalLossNotificationMessagesCorrectly) {
+ // Note that (0x6542 >> 1) is used just to make the pattern in kPacket
+ // more apparent; there's nothing truly special about the value,
+ // it's only an implementation detail that last-received is represented
+ // as a delta from last-decoded, and that this delta is shifted before
+ // it's put on the wire.
+ constexpr uint16_t kLastDecoded = 0x3c7b;
+ constexpr uint16_t kLastReceived = kLastDecoded + (0x6542 >> 1);
+ constexpr bool kDecodabilityFlag = true;
+
+ const uint8_t kPacket[] = {0x8f, 206, 0x00, 0x04, 0x12, 0x34, 0x56, 0x78, //
+ 0xab, 0xcd, 0xef, 0x01, 'L', 'N', 'T', 'F', //
+ 0x3c, 0x7b, 0x65, 0x43};
+
+ LossNotification loss_notification;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &loss_notification));
+
+ EXPECT_EQ(loss_notification.sender_ssrc(), 0x12345678u);
+ EXPECT_EQ(loss_notification.media_ssrc(), 0xabcdef01u);
+ EXPECT_EQ(loss_notification.last_decoded(), kLastDecoded);
+ EXPECT_EQ(loss_notification.last_received(), kLastReceived);
+ EXPECT_EQ(loss_notification.decodability_flag(), kDecodabilityFlag);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc
new file mode 100644
index 0000000000..6fe7eade62
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Nack::kFeedbackMessageType;
+constexpr size_t Nack::kNackItemLength;
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+//
+// Generic NACK (RFC 4585).
+//
+// FCI:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | PID | BLP |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+Nack::Nack() = default;
+Nack::Nack(const Nack& rhs) = default;
+Nack::~Nack() = default;
+
+bool Nack::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+
+ if (packet.payload_size_bytes() < kCommonFeedbackLength + kNackItemLength) {
+ RTC_LOG(LS_WARNING) << "Payload length " << packet.payload_size_bytes()
+ << " is too small for a Nack.";
+ return false;
+ }
+ size_t nack_items =
+ (packet.payload_size_bytes() - kCommonFeedbackLength) / kNackItemLength;
+
+ ParseCommonFeedback(packet.payload());
+ const uint8_t* next_nack = packet.payload() + kCommonFeedbackLength;
+
+ packet_ids_.clear();
+ packed_.resize(nack_items);
+ for (size_t index = 0; index < nack_items; ++index) {
+ packed_[index].first_pid = ByteReader<uint16_t>::ReadBigEndian(next_nack);
+ packed_[index].bitmask = ByteReader<uint16_t>::ReadBigEndian(next_nack + 2);
+ next_nack += kNackItemLength;
+ }
+ Unpack();
+
+ return true;
+}
+
+size_t Nack::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength +
+ packed_.size() * kNackItemLength;
+}
+
+bool Nack::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ RTC_DCHECK(!packed_.empty());
+ // If nack list can't fit in packet, try to fragment.
+ constexpr size_t kNackHeaderLength = kHeaderLength + kCommonFeedbackLength;
+ for (size_t nack_index = 0; nack_index < packed_.size();) {
+ size_t bytes_left_in_buffer = max_length - *index;
+ if (bytes_left_in_buffer < kNackHeaderLength + kNackItemLength) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ continue;
+ }
+ size_t num_nack_fields =
+ std::min((bytes_left_in_buffer - kNackHeaderLength) / kNackItemLength,
+ packed_.size() - nack_index);
+
+ size_t payload_size_bytes =
+ kCommonFeedbackLength + (num_nack_fields * kNackItemLength);
+ size_t payload_size_32bits =
+ rtc::CheckedDivExact<size_t>(payload_size_bytes, 4);
+ CreateHeader(kFeedbackMessageType, kPacketType, payload_size_32bits, packet,
+ index);
+
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+
+ size_t nack_end_index = nack_index + num_nack_fields;
+ for (; nack_index < nack_end_index; ++nack_index) {
+ const PackedNack& item = packed_[nack_index];
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index + 0, item.first_pid);
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index + 2, item.bitmask);
+ *index += kNackItemLength;
+ }
+ RTC_DCHECK_LE(*index, max_length);
+ }
+
+ return true;
+}
+
+void Nack::SetPacketIds(const uint16_t* nack_list, size_t length) {
+ RTC_DCHECK(nack_list);
+ SetPacketIds(std::vector<uint16_t>(nack_list, nack_list + length));
+}
+
+void Nack::SetPacketIds(std::vector<uint16_t> nack_list) {
+ RTC_DCHECK(packet_ids_.empty());
+ RTC_DCHECK(packed_.empty());
+ packet_ids_ = std::move(nack_list);
+ Pack();
+}
+
+void Nack::Pack() {
+ RTC_DCHECK(!packet_ids_.empty());
+ RTC_DCHECK(packed_.empty());
+ auto it = packet_ids_.begin();
+ const auto end = packet_ids_.end();
+ while (it != end) {
+ PackedNack item;
+ item.first_pid = *it++;
+ // Bitmask specifies losses in any of the 16 packets following the pid.
+ item.bitmask = 0;
+ while (it != end) {
+ uint16_t shift = static_cast<uint16_t>(*it - item.first_pid - 1);
+ if (shift <= 15) {
+ item.bitmask |= (1 << shift);
+ ++it;
+ } else {
+ break;
+ }
+ }
+ packed_.push_back(item);
+ }
+}
+
+void Nack::Unpack() {
+ RTC_DCHECK(packet_ids_.empty());
+ RTC_DCHECK(!packed_.empty());
+ for (const PackedNack& item : packed_) {
+ packet_ids_.push_back(item.first_pid);
+ uint16_t pid = item.first_pid + 1;
+ for (uint16_t bitmask = item.bitmask; bitmask != 0; bitmask >>= 1, ++pid) {
+ if (bitmask & 1)
+ packet_ids_.push_back(pid);
+ }
+ }
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h
new file mode 100644
index 0000000000..9153733fb9
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_NACK_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_NACK_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+class Nack : public Rtpfb {
+ public:
+ static constexpr uint8_t kFeedbackMessageType = 1;
+ Nack();
+ Nack(const Nack&);
+ ~Nack() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void SetPacketIds(const uint16_t* nack_list, size_t length);
+ void SetPacketIds(std::vector<uint16_t> nack_list);
+ const std::vector<uint16_t>& packet_ids() const { return packet_ids_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static constexpr size_t kNackItemLength = 4;
+ struct PackedNack {
+ uint16_t first_pid;
+ uint16_t bitmask;
+ };
+
+ void Pack(); // Fills packed_ using packed_ids_. (used in SetPacketIds).
+ void Unpack(); // Fills packet_ids_ using packed_. (used in Parse).
+
+ std::vector<PackedNack> packed_;
+ std::vector<uint16_t> packet_ids_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_NACK_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc
new file mode 100644
index 0000000000..aabae0dc48
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Invoke;
+using ::testing::make_tuple;
+using ::testing::MockFunction;
+using ::testing::UnorderedElementsAreArray;
+using ::webrtc::rtcp::Nack;
+
+constexpr uint32_t kSenderSsrc = 0x12345678;
+constexpr uint32_t kRemoteSsrc = 0x23456789;
+
+constexpr uint16_t kList[] = {0, 1, 3, 8, 16};
+constexpr size_t kListLength = sizeof(kList) / sizeof(kList[0]);
+constexpr uint8_t kVersionBits = 2 << 6;
+// clang-format off
+constexpr uint8_t kPacket[] = {
+ kVersionBits | Nack::kFeedbackMessageType, Nack::kPacketType, 0, 3,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89,
+ 0x00, 0x00, 0x80, 0x85};
+
+constexpr uint16_t kWrapList[] = {0xffdc, 0xffec, 0xfffe, 0xffff, 0x0000,
+ 0x0001, 0x0003, 0x0014, 0x0064};
+constexpr size_t kWrapListLength = sizeof(kWrapList) / sizeof(kWrapList[0]);
+constexpr uint8_t kWrapPacket[] = {
+ kVersionBits | Nack::kFeedbackMessageType, Nack::kPacketType, 0, 6,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89,
+ 0xff, 0xdc, 0x80, 0x00,
+ 0xff, 0xfe, 0x00, 0x17,
+ 0x00, 0x14, 0x00, 0x00,
+ 0x00, 0x64, 0x00, 0x00};
+constexpr uint8_t kTooSmallPacket[] = {
+ kVersionBits | Nack::kFeedbackMessageType, Nack::kPacketType, 0, 2,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89};
+// clang-format on
+} // namespace
+
+TEST(RtcpPacketNackTest, Create) {
+ Nack nack;
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kRemoteSsrc);
+ nack.SetPacketIds(kList, kListLength);
+
+ rtc::Buffer packet = nack.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketNackTest, Parse) {
+ Nack parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &parsed));
+ const Nack& const_parsed = parsed;
+
+ EXPECT_EQ(kSenderSsrc, const_parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, const_parsed.media_ssrc());
+ EXPECT_THAT(const_parsed.packet_ids(), ElementsAreArray(kList));
+}
+
+TEST(RtcpPacketNackTest, CreateWrap) {
+ Nack nack;
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kRemoteSsrc);
+ nack.SetPacketIds(kWrapList, kWrapListLength);
+
+ rtc::Buffer packet = nack.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kWrapPacket));
+}
+
+TEST(RtcpPacketNackTest, ParseWrap) {
+ Nack parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kWrapPacket, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+ EXPECT_THAT(parsed.packet_ids(), ElementsAreArray(kWrapList));
+}
+
+TEST(RtcpPacketNackTest, BadOrder) {
+ // Does not guarantee optimal packing, but should guarantee correctness.
+ const uint16_t kUnorderedList[] = {1, 25, 13, 12, 9, 27, 29};
+ const size_t kUnorderedListLength =
+ sizeof(kUnorderedList) / sizeof(kUnorderedList[0]);
+ Nack nack;
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kRemoteSsrc);
+ nack.SetPacketIds(kUnorderedList, kUnorderedListLength);
+
+ rtc::Buffer packet = nack.Build();
+
+ Nack parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+ EXPECT_THAT(parsed.packet_ids(), UnorderedElementsAreArray(kUnorderedList));
+}
+
+TEST(RtcpPacketNackTest, CreateFragmented) {
+ Nack nack;
+ const uint16_t kList[] = {1, 100, 200, 300, 400};
+ const uint16_t kListLength = sizeof(kList) / sizeof(kList[0]);
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kRemoteSsrc);
+ nack.SetPacketIds(kList, kListLength);
+
+ const size_t kBufferSize = 12 + (3 * 4); // Fits common header + 3 nack items
+
+ MockFunction<void(rtc::ArrayView<const uint8_t>)> callback;
+ EXPECT_CALL(callback, Call(_))
+ .WillOnce(Invoke([&](rtc::ArrayView<const uint8_t> packet) {
+ Nack nack;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &nack));
+ EXPECT_EQ(kSenderSsrc, nack.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, nack.media_ssrc());
+ EXPECT_THAT(nack.packet_ids(), ElementsAre(1, 100, 200));
+ }))
+ .WillOnce(Invoke([&](rtc::ArrayView<const uint8_t> packet) {
+ Nack nack;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &nack));
+ EXPECT_EQ(kSenderSsrc, nack.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, nack.media_ssrc());
+ EXPECT_THAT(nack.packet_ids(), ElementsAre(300, 400));
+ }));
+
+ EXPECT_TRUE(nack.Build(kBufferSize, callback.AsStdFunction()));
+}
+
+TEST(RtcpPacketNackTest, CreateFailsWithTooSmallBuffer) {
+ const uint16_t kList[] = {1};
+ const size_t kMinNackBlockSize = 16;
+ Nack nack;
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kRemoteSsrc);
+ nack.SetPacketIds(kList, 1);
+
+ MockFunction<void(rtc::ArrayView<const uint8_t>)> callback;
+ EXPECT_CALL(callback, Call(_)).Times(0);
+ EXPECT_FALSE(nack.Build(kMinNackBlockSize - 1, callback.AsStdFunction()));
+}
+
+TEST(RtcpPacketNackTest, ParseFailsWithTooSmallBuffer) {
+ Nack parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kTooSmallPacket, &parsed));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc
new file mode 100644
index 0000000000..5b41aa5c2c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/pli.h"
+
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Pli::kFeedbackMessageType;
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+
+Pli::Pli() = default;
+
+Pli::Pli(const Pli& pli) = default;
+
+Pli::~Pli() = default;
+
+//
+// Picture loss indication (PLI) (RFC 4585).
+// FCI: no feedback control information.
+bool Pli::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+
+ if (packet.payload_size_bytes() < kCommonFeedbackLength) {
+ RTC_LOG(LS_WARNING) << "Packet is too small to be a valid PLI packet";
+ return false;
+ }
+
+ ParseCommonFeedback(packet.payload());
+ return true;
+}
+
+size_t Pli::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength;
+}
+
+bool Pli::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h
new file mode 100644
index 0000000000..b9b9c45a9c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PLI_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PLI_H_
+
+#include "modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+// Picture loss indication (PLI) (RFC 4585).
+class Pli : public Psfb {
+ public:
+ static constexpr uint8_t kFeedbackMessageType = 1;
+
+ Pli();
+ Pli(const Pli& pli);
+ ~Pli() override;
+
+ bool Parse(const CommonHeader& packet);
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PLI_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc
new file mode 100644
index 0000000000..c971e22bc1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/pli.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using webrtc::rtcp::Pli;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+// Manually created Pli packet matching constants above.
+const uint8_t kPacket[] = {0x81, 206, 0x00, 0x02, 0x12, 0x34,
+ 0x56, 0x78, 0x23, 0x45, 0x67, 0x89};
+} // namespace
+
+TEST(RtcpPacketPliTest, Parse) {
+ Pli mutable_parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &mutable_parsed));
+ const Pli& parsed = mutable_parsed; // Read values from constant object.
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+}
+
+TEST(RtcpPacketPliTest, Create) {
+ Pli pli;
+ pli.SetSenderSsrc(kSenderSsrc);
+ pli.SetMediaSsrc(kRemoteSsrc);
+
+ rtc::Buffer packet = pli.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketPliTest, ParseFailsOnTooSmallPacket) {
+ const uint8_t kTooSmallPacket[] = {0x81, 206, 0x00, 0x01,
+ 0x12, 0x34, 0x56, 0x78};
+
+ Pli parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kTooSmallPacket, &parsed));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc
new file mode 100644
index 0000000000..384d8ba811
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Psfb::kPacketType;
+constexpr uint8_t Psfb::kAfbMessageType;
+constexpr size_t Psfb::kCommonFeedbackLength;
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+
+void Psfb::ParseCommonFeedback(const uint8_t* payload) {
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(&payload[0]));
+ SetMediaSsrc(ByteReader<uint32_t>::ReadBigEndian(&payload[4]));
+}
+
+void Psfb::CreateCommonFeedback(uint8_t* payload) const {
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[0], sender_ssrc());
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[4], media_ssrc());
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h
new file mode 100644
index 0000000000..d6b8bca7c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PSFB_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PSFB_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// PSFB: Payload-specific feedback message.
+// RFC 4585, Section 6.3.
+class Psfb : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 206;
+ static constexpr uint8_t kAfbMessageType = 15;
+
+ Psfb() = default;
+ ~Psfb() override = default;
+
+ void SetMediaSsrc(uint32_t ssrc) { media_ssrc_ = ssrc; }
+
+ uint32_t media_ssrc() const { return media_ssrc_; }
+
+ protected:
+ static constexpr size_t kCommonFeedbackLength = 8;
+ void ParseCommonFeedback(const uint8_t* payload);
+ void CreateCommonFeedback(uint8_t* payload) const;
+
+ private:
+ uint32_t media_ssrc_ = 0;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PSFB_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.cc
new file mode 100644
index 0000000000..8563c28373
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h"
+
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t RapidResyncRequest::kFeedbackMessageType;
+// RFC 4585: Feedback format.
+// Rapid Resynchronisation Request (draft-perkins-avt-rapid-rtp-sync-03).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT=5 | PT=205 | length=2 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+bool RapidResyncRequest::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+
+ if (packet.payload_size_bytes() != kCommonFeedbackLength) {
+ RTC_LOG(LS_WARNING) << "Packet payload size should be "
+ << kCommonFeedbackLength << " instead of "
+ << packet.payload_size_bytes()
+ << " to be a valid Rapid Resynchronisation Request";
+ return false;
+ }
+
+ ParseCommonFeedback(packet.payload());
+ return true;
+}
+
+size_t RapidResyncRequest::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength;
+}
+
+bool RapidResyncRequest::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h
new file mode 100644
index 0000000000..1955b98f5c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RAPID_RESYNC_REQUEST_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RAPID_RESYNC_REQUEST_H_
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+// draft-perkins-avt-rapid-rtp-sync-03
+class RapidResyncRequest : public Rtpfb {
+ public:
+ static constexpr uint8_t kFeedbackMessageType = 5;
+
+ RapidResyncRequest() {}
+ ~RapidResyncRequest() override {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& header);
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RAPID_RESYNC_REQUEST_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request_unittest.cc
new file mode 100644
index 0000000000..d0e40fd83d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request_unittest.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using webrtc::rtcp::RapidResyncRequest;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+// Manually created packet matching constants above.
+const uint8_t kPacket[] = {0x85, 205, 0x00, 0x02, 0x12, 0x34,
+ 0x56, 0x78, 0x23, 0x45, 0x67, 0x89};
+} // namespace
+
+TEST(RtcpPacketRapidResyncRequestTest, Parse) {
+ RapidResyncRequest mutable_parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &mutable_parsed));
+ const RapidResyncRequest& parsed = mutable_parsed;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+}
+
+TEST(RtcpPacketRapidResyncRequestTest, Create) {
+ RapidResyncRequest rrr;
+ rrr.SetSenderSsrc(kSenderSsrc);
+ rrr.SetMediaSsrc(kRemoteSsrc);
+
+ rtc::Buffer packet = rrr.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketRapidResyncRequestTest, ParseFailsOnTooSmallPacket) {
+ const uint8_t kTooSmallPacket[] = {0x85, 205, 0x00, 0x01,
+ 0x12, 0x34, 0x56, 0x78};
+ RapidResyncRequest parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kTooSmallPacket, &parsed));
+}
+
+TEST(RtcpPacketRapidResyncRequestTest, ParseFailsOnTooLargePacket) {
+ const uint8_t kTooLargePacket[] = {0x85, 205, 0x00, 0x03, 0x12, 0x34,
+ 0x56, 0x78, 0x32, 0x21, 0x65, 0x87,
+ 0x23, 0x45, 0x67, 0x89};
+ RapidResyncRequest parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kTooLargePacket, &parsed));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc
new file mode 100644
index 0000000000..185011dff1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t ReceiverReport::kPacketType;
+constexpr size_t ReceiverReport::kMaxNumberOfReportBlocks;
+// RTCP receiver report (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| RC | PT=RR=201 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | report block(s) |
+// | .... |
+
+ReceiverReport::ReceiverReport() = default;
+
+ReceiverReport::ReceiverReport(const ReceiverReport& rhs) = default;
+
+ReceiverReport::~ReceiverReport() = default;
+
+bool ReceiverReport::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+
+ const uint8_t report_blocks_count = packet.count();
+
+ if (packet.payload_size_bytes() <
+ kRrBaseLength + report_blocks_count * ReportBlock::kLength) {
+ RTC_LOG(LS_WARNING) << "Packet is too small to contain all the data.";
+ return false;
+ }
+
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(packet.payload()));
+
+ const uint8_t* next_report_block = packet.payload() + kRrBaseLength;
+
+ report_blocks_.resize(report_blocks_count);
+ for (ReportBlock& block : report_blocks_) {
+ block.Parse(next_report_block, ReportBlock::kLength);
+ next_report_block += ReportBlock::kLength;
+ }
+
+ RTC_DCHECK_LE(next_report_block - packet.payload(),
+ static_cast<ptrdiff_t>(packet.payload_size_bytes()));
+ return true;
+}
+
+size_t ReceiverReport::BlockLength() const {
+ return kHeaderLength + kRrBaseLength +
+ report_blocks_.size() * ReportBlock::kLength;
+}
+
+bool ReceiverReport::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ CreateHeader(report_blocks_.size(), kPacketType, HeaderLength(), packet,
+ index);
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, sender_ssrc());
+ *index += kRrBaseLength;
+ for (const ReportBlock& block : report_blocks_) {
+ block.Create(packet + *index);
+ *index += ReportBlock::kLength;
+ }
+ return true;
+}
+
+bool ReceiverReport::AddReportBlock(const ReportBlock& block) {
+ if (report_blocks_.size() >= kMaxNumberOfReportBlocks) {
+ RTC_LOG(LS_WARNING) << "Max report blocks reached.";
+ return false;
+ }
+ report_blocks_.push_back(block);
+ return true;
+}
+
+bool ReceiverReport::SetReportBlocks(std::vector<ReportBlock> blocks) {
+ if (blocks.size() > kMaxNumberOfReportBlocks) {
+ RTC_LOG(LS_WARNING) << "Too many report blocks (" << blocks.size()
+ << ") for receiver report.";
+ return false;
+ }
+ report_blocks_ = std::move(blocks);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h
new file mode 100644
index 0000000000..b9c1c466c7
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RECEIVER_REPORT_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RECEIVER_REPORT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+class ReceiverReport : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 201;
+ static constexpr size_t kMaxNumberOfReportBlocks = 0x1f;
+
+ ReceiverReport();
+ ReceiverReport(const ReceiverReport&);
+ ~ReceiverReport() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ bool AddReportBlock(const ReportBlock& block);
+ bool SetReportBlocks(std::vector<ReportBlock> blocks);
+
+ const std::vector<ReportBlock>& report_blocks() const {
+ return report_blocks_;
+ }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static const size_t kRrBaseLength = 4;
+
+ std::vector<ReportBlock> report_blocks_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RECEIVER_REPORT_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc
new file mode 100644
index 0000000000..23ea49622b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+
+#include <utility>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::make_tuple;
+using webrtc::rtcp::ReceiverReport;
+using webrtc::rtcp::ReportBlock;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint8_t kFractionLost = 55;
+const int32_t kCumulativeLost = 0x111213;
+const uint32_t kExtHighestSeqNum = 0x22232425;
+const uint32_t kJitter = 0x33343536;
+const uint32_t kLastSr = 0x44454647;
+const uint32_t kDelayLastSr = 0x55565758;
+// Manually created ReceiverReport with one ReportBlock matching constants
+// above.
+// Having this block allows to test Create and Parse separately.
+const uint8_t kPacket[] = {0x81, 201, 0x00, 0x07, 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89, 55, 0x11, 0x12, 0x13,
+ 0x22, 0x23, 0x24, 0x25, 0x33, 0x34, 0x35, 0x36,
+ 0x44, 0x45, 0x46, 0x47, 0x55, 0x56, 0x57, 0x58};
+} // namespace
+
+TEST(RtcpPacketReceiverReportTest, ParseWithOneReportBlock) {
+ ReceiverReport rr;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &rr));
+ const ReceiverReport& parsed = rr;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(1u, parsed.report_blocks().size());
+ const ReportBlock& rb = parsed.report_blocks().front();
+ EXPECT_EQ(kRemoteSsrc, rb.source_ssrc());
+ EXPECT_EQ(kFractionLost, rb.fraction_lost());
+ EXPECT_EQ(kCumulativeLost, rb.cumulative_lost_signed());
+ EXPECT_EQ(kExtHighestSeqNum, rb.extended_high_seq_num());
+ EXPECT_EQ(kJitter, rb.jitter());
+ EXPECT_EQ(kLastSr, rb.last_sr());
+ EXPECT_EQ(kDelayLastSr, rb.delay_since_last_sr());
+}
+
+TEST(RtcpPacketReceiverReportTest, ParseFailsOnIncorrectSize) {
+ rtc::Buffer damaged_packet(kPacket);
+ damaged_packet[0]++; // Damage the packet: increase count field.
+ ReceiverReport rr;
+ EXPECT_FALSE(test::ParseSinglePacket(damaged_packet, &rr));
+}
+
+TEST(RtcpPacketReceiverReportTest, CreateWithOneReportBlock) {
+ ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ ReportBlock rb;
+ rb.SetMediaSsrc(kRemoteSsrc);
+ rb.SetFractionLost(kFractionLost);
+ rb.SetCumulativeLost(kCumulativeLost);
+ rb.SetExtHighestSeqNum(kExtHighestSeqNum);
+ rb.SetJitter(kJitter);
+ rb.SetLastSr(kLastSr);
+ rb.SetDelayLastSr(kDelayLastSr);
+ rr.AddReportBlock(rb);
+
+ rtc::Buffer raw = rr.Build();
+
+ EXPECT_THAT(make_tuple(raw.data(), raw.size()), ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketReceiverReportTest, CreateAndParseWithoutReportBlocks) {
+ ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+
+ rtc::Buffer raw = rr.Build();
+ ReceiverReport parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_THAT(parsed.report_blocks(), IsEmpty());
+}
+
+TEST(RtcpPacketReceiverReportTest, CreateAndParseWithTwoReportBlocks) {
+ ReceiverReport rr;
+ ReportBlock rb1;
+ rb1.SetMediaSsrc(kRemoteSsrc);
+ ReportBlock rb2;
+ rb2.SetMediaSsrc(kRemoteSsrc + 1);
+
+ rr.SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(rr.AddReportBlock(rb1));
+ EXPECT_TRUE(rr.AddReportBlock(rb2));
+
+ rtc::Buffer raw = rr.Build();
+ ReceiverReport parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(2u, parsed.report_blocks().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.report_blocks()[0].source_ssrc());
+ EXPECT_EQ(kRemoteSsrc + 1, parsed.report_blocks()[1].source_ssrc());
+}
+
+TEST(RtcpPacketReceiverReportTest, CreateWithTooManyReportBlocks) {
+ ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ ReportBlock rb;
+ for (size_t i = 0; i < ReceiverReport::kMaxNumberOfReportBlocks; ++i) {
+ rb.SetMediaSsrc(kRemoteSsrc + i);
+ EXPECT_TRUE(rr.AddReportBlock(rb));
+ }
+ rb.SetMediaSsrc(kRemoteSsrc + ReceiverReport::kMaxNumberOfReportBlocks);
+ EXPECT_FALSE(rr.AddReportBlock(rb));
+}
+
+TEST(RtcpPacketReceiverReportTest, SetReportBlocksOverwritesOldBlocks) {
+ ReceiverReport rr;
+ ReportBlock report_block;
+ // Use jitter field of the report blocks to distinguish them.
+ report_block.SetJitter(1001u);
+ rr.AddReportBlock(report_block);
+ ASSERT_EQ(rr.report_blocks().size(), 1u);
+ ASSERT_EQ(rr.report_blocks()[0].jitter(), 1001u);
+
+ std::vector<ReportBlock> blocks(3u);
+ blocks[0].SetJitter(2001u);
+ blocks[1].SetJitter(3001u);
+ blocks[2].SetJitter(4001u);
+ EXPECT_TRUE(rr.SetReportBlocks(blocks));
+ ASSERT_EQ(rr.report_blocks().size(), 3u);
+ EXPECT_EQ(rr.report_blocks()[0].jitter(), 2001u);
+ EXPECT_EQ(rr.report_blocks()[1].jitter(), 3001u);
+ EXPECT_EQ(rr.report_blocks()[2].jitter(), 4001u);
+}
+
+TEST(RtcpPacketReceiverReportTest, SetReportBlocksMaxLimit) {
+ ReceiverReport rr;
+ std::vector<ReportBlock> max_blocks(ReceiverReport::kMaxNumberOfReportBlocks);
+ EXPECT_TRUE(rr.SetReportBlocks(std::move(max_blocks)));
+
+ std::vector<ReportBlock> one_too_many_blocks(
+ ReceiverReport::kMaxNumberOfReportBlocks + 1);
+ EXPECT_FALSE(rr.SetReportBlocks(std::move(one_too_many_blocks)));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.cc
new file mode 100644
index 0000000000..39795fb79c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.cc
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/remb.h"
+
+#include <cstdint>
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+// Receiver Estimated Max Bitrate (REMB) (draft-alvestrand-rmcat-remb).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT=15 | PT=206 | length |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | Unused = 0 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | Unique identifier 'R' 'E' 'M' 'B' |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | Num SSRC | BR Exp | BR Mantissa |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | SSRC feedback |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : ... :
+
+Remb::Remb() : bitrate_bps_(0) {}
+
+Remb::Remb(const Remb& rhs) = default;
+
+Remb::~Remb() = default;
+
+bool Remb::Parse(const CommonHeader& packet) {
+ RTC_DCHECK(packet.type() == kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), Psfb::kAfbMessageType);
+
+ if (packet.payload_size_bytes() < 16) {
+ RTC_LOG(LS_WARNING) << "Payload length " << packet.payload_size_bytes()
+ << " is too small for Remb packet.";
+ return false;
+ }
+ const uint8_t* const payload = packet.payload();
+ if (kUniqueIdentifier != ByteReader<uint32_t>::ReadBigEndian(&payload[8])) {
+ return false;
+ }
+ uint8_t number_of_ssrcs = payload[12];
+ if (packet.payload_size_bytes() !=
+ kCommonFeedbackLength + (2 + number_of_ssrcs) * 4) {
+ RTC_LOG(LS_WARNING) << "Payload size " << packet.payload_size_bytes()
+ << " does not match " << number_of_ssrcs << " ssrcs.";
+ return false;
+ }
+
+ ParseCommonFeedback(payload);
+ uint8_t exponenta = payload[13] >> 2;
+ uint64_t mantissa = (static_cast<uint32_t>(payload[13] & 0x03) << 16) |
+ ByteReader<uint16_t>::ReadBigEndian(&payload[14]);
+ bitrate_bps_ = (mantissa << exponenta);
+ bool shift_overflow =
+ (static_cast<uint64_t>(bitrate_bps_) >> exponenta) != mantissa;
+ if (shift_overflow) {
+ RTC_LOG(LS_ERROR) << "Invalid remb bitrate value : " << mantissa << "*2^"
+ << static_cast<int>(exponenta);
+ return false;
+ }
+
+ const uint8_t* next_ssrc = payload + 16;
+ ssrcs_.clear();
+ ssrcs_.reserve(number_of_ssrcs);
+ for (uint8_t i = 0; i < number_of_ssrcs; ++i) {
+ ssrcs_.push_back(ByteReader<uint32_t>::ReadBigEndian(next_ssrc));
+ next_ssrc += sizeof(uint32_t);
+ }
+
+ return true;
+}
+
+bool Remb::SetSsrcs(std::vector<uint32_t> ssrcs) {
+ if (ssrcs.size() > kMaxNumberOfSsrcs) {
+ RTC_LOG(LS_WARNING) << "Not enough space for all given SSRCs.";
+ return false;
+ }
+ ssrcs_ = std::move(ssrcs);
+ return true;
+}
+
+size_t Remb::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength + (2 + ssrcs_.size()) * 4;
+}
+
+bool Remb::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ size_t index_end = *index + BlockLength();
+ CreateHeader(Psfb::kAfbMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ RTC_DCHECK_EQ(0, Psfb::media_ssrc());
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, kUniqueIdentifier);
+ *index += sizeof(uint32_t);
+ const uint32_t kMaxMantissa = 0x3ffff; // 18 bits.
+ uint64_t mantissa = bitrate_bps_;
+ uint8_t exponenta = 0;
+ while (mantissa > kMaxMantissa) {
+ mantissa >>= 1;
+ ++exponenta;
+ }
+ packet[(*index)++] = static_cast<uint8_t>(ssrcs_.size());
+ packet[(*index)++] = (exponenta << 2) | (mantissa >> 16);
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index, mantissa & 0xffff);
+ *index += sizeof(uint16_t);
+
+ for (uint32_t ssrc : ssrcs_) {
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, ssrc);
+ *index += sizeof(uint32_t);
+ }
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.h
new file mode 100644
index 0000000000..b7075c0f23
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REMB_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REMB_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+// Receiver Estimated Max Bitrate (REMB) (draft-alvestrand-rmcat-remb).
+class Remb : public Psfb {
+ public:
+ static constexpr size_t kMaxNumberOfSsrcs = 0xff;
+
+ Remb();
+ Remb(const Remb&);
+ ~Remb() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ bool SetSsrcs(std::vector<uint32_t> ssrcs);
+ void SetBitrateBps(int64_t bitrate_bps) { bitrate_bps_ = bitrate_bps; }
+
+ int64_t bitrate_bps() const { return bitrate_bps_; }
+ const std::vector<uint32_t>& ssrcs() const { return ssrcs_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static constexpr uint32_t kUniqueIdentifier = 0x52454D42; // 'R' 'E' 'M' 'B'.
+
+ // Media ssrc is unused, shadow base class setter and getter.
+ void SetMediaSsrc(uint32_t);
+ uint32_t media_ssrc() const;
+
+ int64_t bitrate_bps_;
+ std::vector<uint32_t> ssrcs_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REMB_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb_unittest.cc
new file mode 100644
index 0000000000..391a61de89
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remb_unittest.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/remb.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::make_tuple;
+using webrtc::rtcp::Remb;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrcs[] = {0x23456789, 0x2345678a, 0x2345678b};
+const uint32_t kBitrateBps = 0x3fb93 * 2; // 522022;
+const int64_t kBitrateBps64bit = int64_t{0x3fb93} << 30;
+const uint8_t kPacket[] = {0x8f, 206, 0x00, 0x07, 0x12, 0x34, 0x56, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 'R', 'E', 'M', 'B',
+ 0x03, 0x07, 0xfb, 0x93, 0x23, 0x45, 0x67, 0x89,
+ 0x23, 0x45, 0x67, 0x8a, 0x23, 0x45, 0x67, 0x8b};
+const size_t kPacketLength = sizeof(kPacket);
+} // namespace
+
+TEST(RtcpPacketRembTest, Create) {
+ Remb remb;
+ remb.SetSenderSsrc(kSenderSsrc);
+ remb.SetSsrcs(
+ std::vector<uint32_t>(std::begin(kRemoteSsrcs), std::end(kRemoteSsrcs)));
+ remb.SetBitrateBps(kBitrateBps);
+
+ rtc::Buffer packet = remb.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketRembTest, Parse) {
+ Remb remb;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &remb));
+ const Remb& parsed = remb;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kBitrateBps, parsed.bitrate_bps());
+ EXPECT_THAT(parsed.ssrcs(), ElementsAreArray(kRemoteSsrcs));
+}
+
+TEST(RtcpPacketRembTest, CreateAndParseWithoutSsrcs) {
+ Remb remb;
+ remb.SetSenderSsrc(kSenderSsrc);
+ remb.SetBitrateBps(kBitrateBps);
+ rtc::Buffer packet = remb.Build();
+
+ Remb parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kBitrateBps, parsed.bitrate_bps());
+ EXPECT_THAT(parsed.ssrcs(), IsEmpty());
+}
+
+TEST(RtcpPacketRembTest, CreateAndParse64bitBitrate) {
+ Remb remb;
+ remb.SetBitrateBps(kBitrateBps64bit);
+ rtc::Buffer packet = remb.Build();
+
+ Remb parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+ EXPECT_EQ(kBitrateBps64bit, parsed.bitrate_bps());
+}
+
+TEST(RtcpPacketRembTest, ParseFailsOnTooSmallPacketToBeRemb) {
+ // Make it too small.
+ constexpr size_t kTooSmallSize = (1 + 3) * 4;
+ uint8_t packet[kTooSmallSize];
+ memcpy(packet, kPacket, kTooSmallSize);
+ packet[3] = 3;
+
+ Remb remb;
+ EXPECT_FALSE(test::ParseSinglePacket(packet, &remb));
+}
+
+TEST(RtcpPacketRembTest, ParseFailsWhenUniqueIdentifierIsNotRemb) {
+ uint8_t packet[kPacketLength];
+ memcpy(packet, kPacket, kPacketLength);
+ packet[12] = 'N'; // Swap 'R' -> 'N' in the 'REMB' unique identifier.
+
+ Remb remb;
+ EXPECT_FALSE(test::ParseSinglePacket(packet, &remb));
+}
+
+TEST(RtcpPacketRembTest, ParseFailsWhenBitrateDoNotFitIn64bits) {
+ uint8_t packet[kPacketLength];
+ memcpy(packet, kPacket, kPacketLength);
+ packet[17] |= 0xfc; // Set exponenta component to maximum of 63.
+ packet[19] |= 0x02; // Ensure mantissa is at least 2.
+
+ Remb remb;
+ EXPECT_FALSE(test::ParseSinglePacket(packet, &remb));
+}
+
+TEST(RtcpPacketRembTest, ParseFailsWhenSsrcCountMismatchLength) {
+ uint8_t packet[kPacketLength];
+ memcpy(packet, kPacket, kPacketLength);
+ packet[16]++; // Swap 3 -> 4 in the ssrcs count.
+
+ Remb remb;
+ EXPECT_FALSE(test::ParseSinglePacket(packet, &remb));
+}
+
+TEST(RtcpPacketRembTest, TooManySsrcs) {
+ Remb remb;
+ EXPECT_FALSE(remb.SetSsrcs(
+ std::vector<uint32_t>(Remb::kMaxNumberOfSsrcs + 1, kRemoteSsrcs[0])));
+ EXPECT_TRUE(remb.SetSsrcs(
+ std::vector<uint32_t>(Remb::kMaxNumberOfSsrcs, kRemoteSsrcs[0])));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.cc
new file mode 100644
index 0000000000..ca59791248
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h"
+
+#include <algorithm>
+#include <cmath>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+namespace {
+
+static constexpr int kFieldValueSize = 3;
+static constexpr int kFieldSize = 1 + kFieldValueSize;
+static constexpr DataRate kDataRateResolution = DataRate::KilobitsPerSec(1);
+constexpr int64_t kMaxEncoded = (1 << (kFieldValueSize * 8)) - 1;
+
+class DataRateSerializer {
+ public:
+ DataRateSerializer(
+ uint8_t id,
+ std::function<DataRate*(NetworkStateEstimate*)> field_getter)
+ : id_(id), field_getter_(field_getter) {}
+
+ uint8_t id() const { return id_; }
+
+ void Read(const uint8_t* src, NetworkStateEstimate* target) const {
+ int64_t scaled = ByteReader<uint32_t, kFieldValueSize>::ReadBigEndian(src);
+ if (scaled == kMaxEncoded) {
+ *field_getter_(target) = DataRate::PlusInfinity();
+ } else {
+ *field_getter_(target) = kDataRateResolution * scaled;
+ }
+ }
+
+ bool Write(const NetworkStateEstimate& src, uint8_t* target) const {
+ auto value = *field_getter_(const_cast<NetworkStateEstimate*>(&src));
+ if (value.IsMinusInfinity()) {
+ RTC_LOG(LS_WARNING) << "Trying to serialize MinusInfinity";
+ return false;
+ }
+ ByteWriter<uint8_t>::WriteBigEndian(target++, id_);
+ int64_t scaled;
+ if (value.IsPlusInfinity()) {
+ scaled = kMaxEncoded;
+ } else {
+ scaled = value / kDataRateResolution;
+ if (scaled >= kMaxEncoded) {
+ scaled = kMaxEncoded;
+ RTC_LOG(LS_WARNING) << ToString(value) << " is larger than max ("
+ << ToString(kMaxEncoded * kDataRateResolution)
+ << "), encoded as PlusInfinity.";
+ }
+ }
+ ByteWriter<uint32_t, kFieldValueSize>::WriteBigEndian(target, scaled);
+ return true;
+ }
+
+ private:
+ const uint8_t id_;
+ const std::function<DataRate*(NetworkStateEstimate*)> field_getter_;
+};
+
+class RemoteEstimateSerializerImpl : public RemoteEstimateSerializer {
+ public:
+ explicit RemoteEstimateSerializerImpl(std::vector<DataRateSerializer> fields)
+ : fields_(fields) {}
+
+ rtc::Buffer Serialize(const NetworkStateEstimate& src) const override {
+ size_t max_size = fields_.size() * kFieldSize;
+ size_t size = 0;
+ rtc::Buffer buf(max_size);
+ for (const auto& field : fields_) {
+ if (field.Write(src, buf.data() + size)) {
+ size += kFieldSize;
+ }
+ }
+ buf.SetSize(size);
+ return buf;
+ }
+
+ bool Parse(rtc::ArrayView<const uint8_t> src,
+ NetworkStateEstimate* target) const override {
+ if (src.size() % kFieldSize != 0)
+ return false;
+ RTC_DCHECK_EQ(src.size() % kFieldSize, 0);
+ for (const uint8_t* data_ptr = src.data(); data_ptr < src.end();
+ data_ptr += kFieldSize) {
+ uint8_t field_id = ByteReader<uint8_t>::ReadBigEndian(data_ptr);
+ for (const auto& field : fields_) {
+ if (field.id() == field_id) {
+ field.Read(data_ptr + 1, target);
+ break;
+ }
+ }
+ }
+ return true;
+ }
+
+ private:
+ const std::vector<DataRateSerializer> fields_;
+};
+
+} // namespace
+
+const RemoteEstimateSerializer* GetRemoteEstimateSerializer() {
+ using E = NetworkStateEstimate;
+ static auto* serializer = new RemoteEstimateSerializerImpl({
+ {1, [](E* e) { return &e->link_capacity_lower; }},
+ {2, [](E* e) { return &e->link_capacity_upper; }},
+ });
+ return serializer;
+}
+
+RemoteEstimate::RemoteEstimate() : serializer_(GetRemoteEstimateSerializer()) {
+ SetSubType(kSubType);
+ SetName(kName);
+ SetSenderSsrc(0);
+}
+
+RemoteEstimate::RemoteEstimate(App&& app)
+ : App(std::move(app)), serializer_(GetRemoteEstimateSerializer()) {}
+
+bool RemoteEstimate::ParseData() {
+ return serializer_->Parse({data(), data_size()}, &estimate_);
+}
+
+void RemoteEstimate::SetEstimate(NetworkStateEstimate estimate) {
+ estimate_ = estimate;
+ auto buf = serializer_->Serialize(estimate);
+ SetData(buf.data(), buf.size());
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h
new file mode 100644
index 0000000000..3400274568
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REMOTE_ESTIMATE_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REMOTE_ESTIMATE_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/transport/network_types.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/app.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class CommonHeader;
+class RemoteEstimateSerializer {
+ public:
+ virtual bool Parse(rtc::ArrayView<const uint8_t> src,
+ NetworkStateEstimate* target) const = 0;
+ virtual rtc::Buffer Serialize(const NetworkStateEstimate& src) const = 0;
+ virtual ~RemoteEstimateSerializer() = default;
+};
+
+// Using a static global implementation to avoid incurring initialization
+// overhead of the serializer every time RemoteEstimate is created.
+const RemoteEstimateSerializer* GetRemoteEstimateSerializer();
+
+// The RemoteEstimate packet provides network estimation results from the
+// receive side. This functionality is experimental and subject to change
+// without notice.
+class RemoteEstimate : public App {
+ public:
+ RemoteEstimate();
+ explicit RemoteEstimate(App&& app);
+ // Note, sub type must be unique among all app messages with "goog" name.
+ static constexpr uint8_t kSubType = 13;
+ static constexpr uint32_t kName = NameToInt("goog");
+ static TimeDelta GetTimestampPeriod();
+
+ bool ParseData();
+ void SetEstimate(NetworkStateEstimate estimate);
+ NetworkStateEstimate estimate() const { return estimate_; }
+
+ private:
+ NetworkStateEstimate estimate_;
+ const RemoteEstimateSerializer* const serializer_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REMOTE_ESTIMATE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate_unittest.cc
new file mode 100644
index 0000000000..bf0e0e2610
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/remote_estimate_unittest.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace rtcp {
+TEST(RemoteEstimateTest, EncodesCapacityBounds) {
+ NetworkStateEstimate src;
+ src.link_capacity_lower = DataRate::KilobitsPerSec(10);
+ src.link_capacity_upper = DataRate::KilobitsPerSec(1000000);
+ rtc::Buffer data = GetRemoteEstimateSerializer()->Serialize(src);
+ NetworkStateEstimate dst;
+ EXPECT_TRUE(GetRemoteEstimateSerializer()->Parse(data, &dst));
+ EXPECT_EQ(src.link_capacity_lower, dst.link_capacity_lower);
+ EXPECT_EQ(src.link_capacity_upper, dst.link_capacity_upper);
+}
+
+TEST(RemoteEstimateTest, ExpandsToPlusInfinity) {
+ NetworkStateEstimate src;
+ // White box testing: We know that the value is stored in an unsigned 24 int
+ // with kbps resolution. We expected it be represented as plus infinity.
+ src.link_capacity_lower = DataRate::KilobitsPerSec(2 << 24);
+ src.link_capacity_upper = DataRate::PlusInfinity();
+ rtc::Buffer data = GetRemoteEstimateSerializer()->Serialize(src);
+
+ NetworkStateEstimate dst;
+ EXPECT_TRUE(GetRemoteEstimateSerializer()->Parse(data, &dst));
+ EXPECT_TRUE(dst.link_capacity_lower.IsPlusInfinity());
+ EXPECT_TRUE(dst.link_capacity_upper.IsPlusInfinity());
+}
+
+TEST(RemoteEstimateTest, DoesNotEncodeNegative) {
+ NetworkStateEstimate src;
+ src.link_capacity_lower = DataRate::MinusInfinity();
+ src.link_capacity_upper = DataRate::MinusInfinity();
+ rtc::Buffer data = GetRemoteEstimateSerializer()->Serialize(src);
+ // Since MinusInfinity can't be represented, the buffer should be empty.
+ EXPECT_EQ(data.size(), 0u);
+ NetworkStateEstimate dst;
+ dst.link_capacity_lower = DataRate::KilobitsPerSec(300);
+ EXPECT_TRUE(GetRemoteEstimateSerializer()->Parse(data, &dst));
+ // The fields will be left unchanged by the parser as they were not encoded.
+ EXPECT_EQ(dst.link_capacity_lower, DataRate::KilobitsPerSec(300));
+ EXPECT_TRUE(dst.link_capacity_upper.IsMinusInfinity());
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc
new file mode 100644
index 0000000000..d4579fc8d6
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
+//
+// RTCP report block (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// 0 | SSRC_1 (SSRC of first source) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | fraction lost | cumulative number of packets lost |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | extended highest sequence number received |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | interarrival jitter |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | last SR (LSR) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 20 | delay since last SR (DLSR) |
+// 24 +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ReportBlock::ReportBlock()
+ : source_ssrc_(0),
+ fraction_lost_(0),
+ cumulative_lost_(0),
+ extended_high_seq_num_(0),
+ jitter_(0),
+ last_sr_(0),
+ delay_since_last_sr_(0) {}
+
+bool ReportBlock::Parse(const uint8_t* buffer, size_t length) {
+ RTC_DCHECK(buffer != nullptr);
+ if (length < ReportBlock::kLength) {
+ RTC_LOG(LS_ERROR) << "Report Block should be 24 bytes long";
+ return false;
+ }
+
+ source_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[0]);
+ fraction_lost_ = buffer[4];
+ cumulative_lost_ = ByteReader<int32_t, 3>::ReadBigEndian(&buffer[5]);
+ extended_high_seq_num_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[8]);
+ jitter_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[12]);
+ last_sr_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[16]);
+ delay_since_last_sr_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[20]);
+
+ return true;
+}
+
+void ReportBlock::Create(uint8_t* buffer) const {
+ // Runtime check should be done while setting cumulative_lost.
+ RTC_DCHECK_LT(cumulative_lost_signed(),
+ (1 << 23)); // Have only 3 bytes for it.
+
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[0], source_ssrc());
+ ByteWriter<uint8_t>::WriteBigEndian(&buffer[4], fraction_lost());
+ ByteWriter<int32_t, 3>::WriteBigEndian(&buffer[5], cumulative_lost_signed());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[8], extended_high_seq_num());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[12], jitter());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[16], last_sr());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[20], delay_since_last_sr());
+}
+
+bool ReportBlock::SetCumulativeLost(int32_t cumulative_lost) {
+ // We have only 3 bytes to store it, and it's a signed value.
+ if (cumulative_lost >= (1 << 23) || cumulative_lost < -(1 << 23)) {
+ RTC_LOG(LS_WARNING)
+ << "Cumulative lost is too big to fit into Report Block";
+ return false;
+ }
+ cumulative_lost_ = cumulative_lost;
+ return true;
+}
+
+uint32_t ReportBlock::cumulative_lost() const {
+ if (cumulative_lost_ < 0) {
+ RTC_LOG(LS_VERBOSE) << "Ignoring negative value of cumulative_lost";
+ return 0;
+ }
+ return cumulative_lost_;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h
new file mode 100644
index 0000000000..eb16640ae2
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REPORT_BLOCK_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REPORT_BLOCK_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+namespace rtcp {
+
+// A ReportBlock represents the Sender Report packet from
+// RFC 3550 section 6.4.1.
+class ReportBlock {
+ public:
+ static const size_t kLength = 24;
+
+ ReportBlock();
+ ~ReportBlock() {}
+
+ bool Parse(const uint8_t* buffer, size_t length);
+
+ // Fills buffer with the ReportBlock.
+ // Consumes ReportBlock::kLength bytes.
+ void Create(uint8_t* buffer) const;
+
+ void SetMediaSsrc(uint32_t ssrc) { source_ssrc_ = ssrc; }
+ void SetFractionLost(uint8_t fraction_lost) {
+ fraction_lost_ = fraction_lost;
+ }
+ bool SetCumulativeLost(int32_t cumulative_lost);
+ void SetExtHighestSeqNum(uint32_t ext_highest_seq_num) {
+ extended_high_seq_num_ = ext_highest_seq_num;
+ }
+ void SetJitter(uint32_t jitter) { jitter_ = jitter; }
+ void SetLastSr(uint32_t last_sr) { last_sr_ = last_sr; }
+ void SetDelayLastSr(uint32_t delay_last_sr) {
+ delay_since_last_sr_ = delay_last_sr;
+ }
+
+ uint32_t source_ssrc() const { return source_ssrc_; }
+ uint8_t fraction_lost() const { return fraction_lost_; }
+ int32_t cumulative_lost_signed() const { return cumulative_lost_; }
+ // Deprecated - returns max(0, cumulative_lost_), not negative values.
+ uint32_t cumulative_lost() const;
+ uint32_t extended_high_seq_num() const { return extended_high_seq_num_; }
+ uint32_t jitter() const { return jitter_; }
+ uint32_t last_sr() const { return last_sr_; }
+ uint32_t delay_since_last_sr() const { return delay_since_last_sr_; }
+
+ private:
+ uint32_t source_ssrc_; // 32 bits
+ uint8_t fraction_lost_; // 8 bits representing a fixed point value 0..1
+ int32_t cumulative_lost_; // Signed 24-bit value
+ uint32_t extended_high_seq_num_; // 32 bits
+ uint32_t jitter_; // 32 bits
+ uint32_t last_sr_; // 32 bits
+ uint32_t delay_since_last_sr_; // 32 bits, units of 1/65536 seconds
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REPORT_BLOCK_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc
new file mode 100644
index 0000000000..5cc102fed0
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+
+#include <limits>
+
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+using webrtc::rtcp::ReportBlock;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint8_t kFractionLost = 55;
+// Use values that are streamed differently LE and BE.
+const int32_t kCumulativeLost = 0x111213;
+const uint32_t kExtHighestSeqNum = 0x22232425;
+const uint32_t kJitter = 0x33343536;
+const uint32_t kLastSr = 0x44454647;
+const uint32_t kDelayLastSr = 0x55565758;
+const size_t kBufferLength = ReportBlock::kLength;
+
+TEST(RtcpPacketReportBlockTest, ParseChecksLength) {
+ uint8_t buffer[kBufferLength];
+ memset(buffer, 0, sizeof(buffer));
+
+ ReportBlock rb;
+ EXPECT_FALSE(rb.Parse(buffer, kBufferLength - 1));
+ EXPECT_TRUE(rb.Parse(buffer, kBufferLength));
+}
+
+TEST(RtcpPacketReportBlockTest, ParseAnyData) {
+ uint8_t buffer[kBufferLength];
+ // Fill buffer with semi-random data.
+ Random generator(0x256F8A285EC829ull);
+ for (size_t i = 0; i < kBufferLength; ++i)
+ buffer[i] = static_cast<uint8_t>(generator.Rand(0, 0xff));
+
+ ReportBlock rb;
+ EXPECT_TRUE(rb.Parse(buffer, kBufferLength));
+}
+
+TEST(RtcpPacketReportBlockTest, ParseMatchCreate) {
+ ReportBlock rb;
+ rb.SetMediaSsrc(kRemoteSsrc);
+ rb.SetFractionLost(kFractionLost);
+ rb.SetCumulativeLost(kCumulativeLost);
+ rb.SetExtHighestSeqNum(kExtHighestSeqNum);
+ rb.SetJitter(kJitter);
+ rb.SetLastSr(kLastSr);
+ rb.SetDelayLastSr(kDelayLastSr);
+
+ uint8_t buffer[kBufferLength];
+ rb.Create(buffer);
+
+ ReportBlock parsed;
+ EXPECT_TRUE(parsed.Parse(buffer, kBufferLength));
+
+ EXPECT_EQ(kRemoteSsrc, parsed.source_ssrc());
+ EXPECT_EQ(kFractionLost, parsed.fraction_lost());
+ EXPECT_EQ(kCumulativeLost, parsed.cumulative_lost_signed());
+ EXPECT_EQ(kExtHighestSeqNum, parsed.extended_high_seq_num());
+ EXPECT_EQ(kJitter, parsed.jitter());
+ EXPECT_EQ(kLastSr, parsed.last_sr());
+ EXPECT_EQ(kDelayLastSr, parsed.delay_since_last_sr());
+}
+
+TEST(RtcpPacketReportBlockTest, ValidateCumulativeLost) {
+ // CumulativeLost is a signed 24-bit integer.
+ // However, existing code expects it to be an unsigned integer.
+ // The case of negative values should be unusual; we return 0
+ // when caller wants an unsigned integer.
+ const int32_t kMaxCumulativeLost = 0x7fffff;
+ const int32_t kMinCumulativeLost = -0x800000;
+ ReportBlock rb;
+ EXPECT_FALSE(rb.SetCumulativeLost(kMaxCumulativeLost + 1));
+ EXPECT_TRUE(rb.SetCumulativeLost(kMaxCumulativeLost));
+ EXPECT_FALSE(rb.SetCumulativeLost(kMinCumulativeLost - 1));
+ EXPECT_TRUE(rb.SetCumulativeLost(kMinCumulativeLost));
+ EXPECT_EQ(kMinCumulativeLost, rb.cumulative_lost_signed());
+ EXPECT_EQ(0u, rb.cumulative_lost());
+}
+
+TEST(RtcpPacketReportBlockTest, ParseNegativeCumulativeLost) {
+ // CumulativeLost is a signed 24-bit integer.
+ const int32_t kNegativeCumulativeLost = -123;
+ ReportBlock rb;
+ EXPECT_TRUE(rb.SetCumulativeLost(kNegativeCumulativeLost));
+
+ uint8_t buffer[kBufferLength];
+ rb.Create(buffer);
+
+ ReportBlock parsed;
+ EXPECT_TRUE(parsed.Parse(buffer, kBufferLength));
+
+ EXPECT_EQ(kNegativeCumulativeLost, parsed.cumulative_lost_signed());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc
new file mode 100644
index 0000000000..95fc890b19
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rrtr.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace rtcp {
+// Receiver Reference Time Report Block (RFC 3611).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | BT=4 | reserved | block length = 2 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | NTP timestamp, most significant word |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | NTP timestamp, least significant word |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+void Rrtr::Parse(const uint8_t* buffer) {
+ RTC_DCHECK(buffer[0] == kBlockType);
+ // reserved = buffer[1];
+ RTC_DCHECK(ByteReader<uint16_t>::ReadBigEndian(&buffer[2]) == kBlockLength);
+ uint32_t seconds = ByteReader<uint32_t>::ReadBigEndian(&buffer[4]);
+ uint32_t fraction = ByteReader<uint32_t>::ReadBigEndian(&buffer[8]);
+ ntp_.Set(seconds, fraction);
+}
+
+void Rrtr::Create(uint8_t* buffer) const {
+ const uint8_t kReserved = 0;
+ buffer[0] = kBlockType;
+ buffer[1] = kReserved;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], kBlockLength);
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[4], ntp_.seconds());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[8], ntp_.fractions());
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h
new file mode 100644
index 0000000000..827bd74399
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RRTR_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RRTR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class Rrtr {
+ public:
+ static const uint8_t kBlockType = 4;
+ static const uint16_t kBlockLength = 2;
+ static const size_t kLength = 4 * (kBlockLength + 1); // 12
+
+ Rrtr() {}
+ Rrtr(const Rrtr&) = default;
+ ~Rrtr() {}
+
+ Rrtr& operator=(const Rrtr&) = default;
+
+ void Parse(const uint8_t* buffer);
+
+ // Fills buffer with the Rrtr.
+ // Consumes Rrtr::kLength bytes.
+ void Create(uint8_t* buffer) const;
+
+ void SetNtp(NtpTime ntp) { ntp_ = ntp; }
+
+ NtpTime ntp() const { return ntp_; }
+
+ private:
+ NtpTime ntp_;
+};
+
+inline bool operator==(const Rrtr& rrtr1, const Rrtr& rrtr2) {
+ return rrtr1.ntp() == rrtr2.ntp();
+}
+
+inline bool operator!=(const Rrtr& rrtr1, const Rrtr& rrtr2) {
+ return !(rrtr1 == rrtr2);
+}
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RRTR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc
new file mode 100644
index 0000000000..56622ea81a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rrtr.h"
+
+#include "test/gtest.h"
+
+using webrtc::rtcp::Rrtr;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kNtpSec = 0x12345678;
+const uint32_t kNtpFrac = 0x23456789;
+const uint8_t kBlock[] = {0x04, 0x00, 0x00, 0x02, 0x12, 0x34,
+ 0x56, 0x78, 0x23, 0x45, 0x67, 0x89};
+const size_t kBlockSizeBytes = sizeof(kBlock);
+static_assert(
+ kBlockSizeBytes == Rrtr::kLength,
+ "Size of manually created Rrtr block should match class constant");
+
+TEST(RtcpPacketRrtrTest, Create) {
+ uint8_t buffer[Rrtr::kLength];
+ Rrtr rrtr;
+ rrtr.SetNtp(NtpTime(kNtpSec, kNtpFrac));
+
+ rrtr.Create(buffer);
+ EXPECT_EQ(0, memcmp(buffer, kBlock, kBlockSizeBytes));
+}
+
+TEST(RtcpPacketRrtrTest, Parse) {
+ Rrtr read_rrtr;
+ read_rrtr.Parse(kBlock);
+
+ // Run checks on const object to ensure all accessors have const modifier.
+ const Rrtr& parsed = read_rrtr;
+
+ EXPECT_EQ(kNtpSec, parsed.ntp().seconds());
+ EXPECT_EQ(kNtpFrac, parsed.ntp().fractions());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc
new file mode 100644
index 0000000000..18097de330
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Rtpfb::kPacketType;
+// RFC 4585, Section 6.1: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+
+void Rtpfb::ParseCommonFeedback(const uint8_t* payload) {
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(&payload[0]));
+ SetMediaSsrc(ByteReader<uint32_t>::ReadBigEndian(&payload[4]));
+}
+
+void Rtpfb::CreateCommonFeedback(uint8_t* payload) const {
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[0], sender_ssrc());
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[4], media_ssrc());
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h
new file mode 100644
index 0000000000..973b429a2d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RTPFB_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RTPFB_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// RTPFB: Transport layer feedback message.
+// RFC4585, Section 6.2
+class Rtpfb : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 205;
+
+ Rtpfb() = default;
+ ~Rtpfb() override = default;
+
+ void SetMediaSsrc(uint32_t ssrc) { media_ssrc_ = ssrc; }
+
+ uint32_t media_ssrc() const { return media_ssrc_; }
+
+ protected:
+ static constexpr size_t kCommonFeedbackLength = 8;
+ void ParseCommonFeedback(const uint8_t* payload);
+ void CreateCommonFeedback(uint8_t* payload) const;
+
+ private:
+ uint32_t media_ssrc_ = 0;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RTPFB_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.cc
new file mode 100644
index 0000000000..f244ec5f37
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/sdes.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Sdes::kPacketType;
+constexpr size_t Sdes::kMaxNumberOfChunks;
+// Source Description (SDES) (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// header |V=2|P| SC | PT=SDES=202 | length |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// chunk | SSRC/CSRC_1 |
+// 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SDES items |
+// | ... |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// chunk | SSRC/CSRC_2 |
+// 2 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SDES items |
+// | ... |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+//
+// Canonical End-Point Identifier SDES Item (CNAME)
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | CNAME=1 | length | user and domain name ...
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+namespace {
+const uint8_t kTerminatorTag = 0;
+const uint8_t kCnameTag = 1;
+
+size_t ChunkSize(const Sdes::Chunk& chunk) {
+ // Chunk:
+ // SSRC/CSRC (4 bytes) | CNAME=1 (1 byte) | length (1 byte) | cname | padding.
+ size_t chunk_payload_size = 4 + 1 + 1 + chunk.cname.size();
+ size_t padding_size = 4 - (chunk_payload_size % 4); // Minimum 1.
+ return chunk_payload_size + padding_size;
+}
+} // namespace
+
+Sdes::Sdes() : block_length_(RtcpPacket::kHeaderLength) {}
+
+Sdes::~Sdes() {}
+
+bool Sdes::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+
+ uint8_t number_of_chunks = packet.count();
+ std::vector<Chunk> chunks; // Read chunk into temporary array, so that in
+ // case of an error original array would stay
+ // unchanged.
+ size_t block_length = kHeaderLength;
+
+ if (packet.payload_size_bytes() % 4 != 0) {
+ RTC_LOG(LS_WARNING) << "Invalid payload size "
+ << packet.payload_size_bytes()
+ << " bytes for a valid Sdes packet. Size should be"
+ " multiple of 4 bytes";
+ }
+ const uint8_t* const payload_end =
+ packet.payload() + packet.payload_size_bytes();
+ const uint8_t* looking_at = packet.payload();
+ chunks.resize(number_of_chunks);
+ for (size_t i = 0; i < number_of_chunks;) {
+ // Each chunk consumes at least 8 bytes.
+ if (payload_end - looking_at < 8) {
+ RTC_LOG(LS_WARNING) << "Not enough space left for chunk #" << (i + 1);
+ return false;
+ }
+ chunks[i].ssrc = ByteReader<uint32_t>::ReadBigEndian(looking_at);
+ looking_at += sizeof(uint32_t);
+ bool cname_found = false;
+
+ uint8_t item_type;
+ while ((item_type = *(looking_at++)) != kTerminatorTag) {
+ if (looking_at >= payload_end) {
+ RTC_LOG(LS_WARNING)
+ << "Unexpected end of packet while reading chunk #" << (i + 1)
+ << ". Expected to find size of the text.";
+ return false;
+ }
+ uint8_t item_length = *(looking_at++);
+ const size_t kTerminatorSize = 1;
+ if (looking_at + item_length + kTerminatorSize > payload_end) {
+ RTC_LOG(LS_WARNING)
+ << "Unexpected end of packet while reading chunk #" << (i + 1)
+ << ". Expected to find text of size " << item_length;
+ return false;
+ }
+ if (item_type == kCnameTag) {
+ if (cname_found) {
+ RTC_LOG(LS_WARNING)
+ << "Found extra CNAME for same ssrc in chunk #" << (i + 1);
+ return false;
+ }
+ cname_found = true;
+ chunks[i].cname.assign(reinterpret_cast<const char*>(looking_at),
+ item_length);
+ }
+ looking_at += item_length;
+ }
+ if (cname_found) {
+ // block_length calculates length of the packet that would be generated by
+ // Build/Create functions. Adjust it same way WithCName function does.
+ block_length += ChunkSize(chunks[i]);
+ ++i;
+ } else {
+ // RFC states CNAME item is mandatory.
+ // But same time it allows chunk without items.
+ // So while parsing, ignore all chunks without cname,
+ // but do not fail the parse.
+ RTC_LOG(LS_WARNING) << "CNAME not found for ssrc " << chunks[i].ssrc;
+ --number_of_chunks;
+ chunks.resize(number_of_chunks);
+ }
+ // Adjust to 32bit boundary.
+ looking_at += (payload_end - looking_at) % 4;
+ }
+
+ chunks_ = std::move(chunks);
+ block_length_ = block_length;
+ return true;
+}
+
+bool Sdes::AddCName(uint32_t ssrc, absl::string_view cname) {
+ RTC_DCHECK_LE(cname.length(), 0xffu);
+ if (chunks_.size() >= kMaxNumberOfChunks) {
+ RTC_LOG(LS_WARNING) << "Max SDES chunks reached.";
+ return false;
+ }
+ Chunk chunk;
+ chunk.ssrc = ssrc;
+ chunk.cname = std::string(cname);
+ chunks_.push_back(chunk);
+ block_length_ += ChunkSize(chunk);
+ return true;
+}
+
+size_t Sdes::BlockLength() const {
+ return block_length_;
+}
+
+bool Sdes::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+ CreateHeader(chunks_.size(), kPacketType, HeaderLength(), packet, index);
+
+ for (const Sdes::Chunk& chunk : chunks_) {
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 0], chunk.ssrc);
+ ByteWriter<uint8_t>::WriteBigEndian(&packet[*index + 4], kCnameTag);
+ ByteWriter<uint8_t>::WriteBigEndian(
+ &packet[*index + 5], static_cast<uint8_t>(chunk.cname.size()));
+ memcpy(&packet[*index + 6], chunk.cname.data(), chunk.cname.size());
+ *index += (6 + chunk.cname.size());
+
+ // In each chunk, the list of items must be terminated by one or more null
+ // octets. The next chunk must start on a 32-bit boundary.
+ // CNAME (1 byte) | length (1 byte) | name | padding.
+ size_t padding_size = 4 - ((6 + chunk.cname.size()) % 4);
+ const int kPadding = 0;
+ memset(packet + *index, kPadding, padding_size);
+ *index += padding_size;
+ }
+
+ RTC_CHECK_EQ(*index, index_end);
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.h
new file mode 100644
index 0000000000..36b63ba29f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SDES_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SDES_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+// Source Description (SDES) (RFC 3550).
+class Sdes : public RtcpPacket {
+ public:
+ struct Chunk {
+ uint32_t ssrc;
+ std::string cname;
+ };
+ static constexpr uint8_t kPacketType = 202;
+ static constexpr size_t kMaxNumberOfChunks = 0x1f;
+
+ Sdes();
+ ~Sdes() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ bool AddCName(uint32_t ssrc, absl::string_view cname);
+
+ const std::vector<Chunk>& chunks() const { return chunks_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ std::vector<Chunk> chunks_;
+ size_t block_length_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SDES_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes_unittest.cc
new file mode 100644
index 0000000000..15a39efe87
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sdes_unittest.cc
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/sdes.h"
+
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using webrtc::rtcp::Sdes;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint8_t kPadding = 0;
+const uint8_t kTerminatorTag = 0;
+const uint8_t kCnameTag = 1;
+const uint8_t kNameTag = 2;
+const uint8_t kEmailTag = 3;
+} // namespace
+
+TEST(RtcpPacketSdesTest, CreateAndParseWithoutChunks) {
+ Sdes sdes;
+
+ rtc::Buffer packet = sdes.Build();
+ Sdes parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(0u, parsed.chunks().size());
+}
+
+TEST(RtcpPacketSdesTest, CreateAndParseWithOneChunk) {
+ const std::string kCname = "alice@host";
+
+ Sdes sdes;
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc, kCname));
+
+ rtc::Buffer packet = sdes.Build();
+ Sdes sdes_parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &sdes_parsed));
+ const Sdes& parsed = sdes_parsed; // Ensure accessors are const.
+
+ EXPECT_EQ(1u, parsed.chunks().size());
+ EXPECT_EQ(kSenderSsrc, parsed.chunks()[0].ssrc);
+ EXPECT_EQ(kCname, parsed.chunks()[0].cname);
+}
+
+TEST(RtcpPacketSdesTest, CreateAndParseWithMultipleChunks) {
+ Sdes sdes;
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc + 0, "a"));
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc + 1, "ab"));
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc + 2, "abc"));
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc + 3, "abcd"));
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc + 4, "abcde"));
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc + 5, "abcdef"));
+
+ rtc::Buffer packet = sdes.Build();
+ Sdes parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(6u, parsed.chunks().size());
+ EXPECT_EQ(kSenderSsrc + 5, parsed.chunks()[5].ssrc);
+ EXPECT_EQ("abcdef", parsed.chunks()[5].cname);
+}
+
+TEST(RtcpPacketSdesTest, CreateWithTooManyChunks) {
+ const size_t kMaxChunks = (1 << 5) - 1;
+ Sdes sdes;
+ for (size_t i = 0; i < kMaxChunks; ++i) {
+ uint32_t ssrc = kSenderSsrc + i;
+ rtc::StringBuilder oss;
+ oss << "cname" << i;
+ EXPECT_TRUE(sdes.AddCName(ssrc, oss.str()));
+ }
+ EXPECT_FALSE(sdes.AddCName(kSenderSsrc + kMaxChunks, "foo"));
+}
+
+TEST(RtcpPacketSdesTest, CreateAndParseCnameItemWithEmptyString) {
+ Sdes sdes;
+ EXPECT_TRUE(sdes.AddCName(kSenderSsrc, ""));
+
+ rtc::Buffer packet = sdes.Build();
+ Sdes parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(1u, parsed.chunks().size());
+ EXPECT_EQ(kSenderSsrc, parsed.chunks()[0].ssrc);
+ EXPECT_EQ("", parsed.chunks()[0].cname);
+}
+
+TEST(RtcpPacketSdesTest, ParseSkipsNonCNameField) {
+ const uint8_t kName[] = "abc";
+ const uint8_t kCname[] = "de";
+ const uint8_t kValidPacket[] = {
+ 0x81, 202, 0x00, 0x04, 0x12, 0x34, 0x56,
+ 0x78, kNameTag, 3, kName[0], kName[1], kName[2], kCnameTag,
+ 2, kCname[0], kCname[1], kTerminatorTag, kPadding, kPadding};
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kValidPacket) % 4);
+ ASSERT_EQ(kValidPacket[3] + 1u, sizeof(kValidPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kValidPacket, &parsed));
+
+ EXPECT_EQ(1u, parsed.chunks().size());
+ EXPECT_EQ(kSenderSsrc, parsed.chunks()[0].ssrc);
+ EXPECT_EQ("de", parsed.chunks()[0].cname);
+}
+
+TEST(RtcpPacketSdesTest, ParseSkipsChunksWithoutCName) {
+ const uint8_t kName[] = "ab";
+ const uint8_t kEmail[] = "de";
+ const uint8_t kCname[] = "def";
+ const uint8_t kPacket[] = {
+ 0x82, 202, 0x00, 0x07, 0x12,
+ 0x34, 0x56, 0x78, // 1st chunk.
+ kNameTag, 3, kName[0], kName[1], kName[2],
+ kEmailTag, 2, kEmail[0], kEmail[1], kTerminatorTag,
+ kPadding, kPadding, 0x23, 0x45, 0x67,
+ 0x89, // 2nd chunk.
+ kCnameTag, 3, kCname[0], kCname[1], kCname[2],
+ kTerminatorTag, kPadding, kPadding};
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kPacket) % 4);
+ ASSERT_EQ(kPacket[3] + 1u, sizeof(kPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &parsed));
+ ASSERT_EQ(1u, parsed.chunks().size());
+ EXPECT_EQ(0x23456789u, parsed.chunks()[0].ssrc);
+ EXPECT_EQ("def", parsed.chunks()[0].cname);
+}
+
+TEST(RtcpPacketSdesTest, ParseFailsWithoutChunkItemTerminator) {
+ const uint8_t kName[] = "abc";
+ const uint8_t kCname[] = "d";
+ // No place for next chunk item.
+ const uint8_t kInvalidPacket[] = {
+ 0x81, 202, 0x00, 0x03, 0x12, 0x34, 0x56, 0x78,
+ kNameTag, 3, kName[0], kName[1], kName[2], kCnameTag, 1, kCname[0]};
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kInvalidPacket) % 4);
+ ASSERT_EQ(kInvalidPacket[3] + 1u, sizeof(kInvalidPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kInvalidPacket, &parsed));
+}
+
+TEST(RtcpPacketSdesTest, ParseFailsWithDamagedChunkItem) {
+ const uint8_t kName[] = "ab";
+ const uint8_t kCname[] = "d";
+ // Next chunk item has non-terminator type, but not the size.
+ const uint8_t kInvalidPacket[] = {
+ 0x81, 202, 0x00, 0x03, 0x12, 0x34, 0x56, 0x78,
+ kNameTag, 2, kName[0], kName[1], kCnameTag, 1, kCname[0], kEmailTag};
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kInvalidPacket) % 4);
+ ASSERT_EQ(kInvalidPacket[3] + 1u, sizeof(kInvalidPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kInvalidPacket, &parsed));
+}
+
+TEST(RtcpPacketSdesTest, ParseFailsWithTooLongChunkItem) {
+ const uint8_t kName[] = "abc";
+ const uint8_t kCname[] = "d";
+ // Last chunk item has length that goes beyond the buffer end.
+ const uint8_t kInvalidPacket[] = {
+ 0x81, 202, 0x00, 0x03, 0x12, 0x34, 0x56, 0x78,
+ kNameTag, 3, kName[0], kName[1], kName[2], kCnameTag, 2, kCname[0]};
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kInvalidPacket) % 4);
+ ASSERT_EQ(kInvalidPacket[3] + 1u, sizeof(kInvalidPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kInvalidPacket, &parsed));
+}
+
+TEST(RtcpPacketSdesTest, ParseFailsWithTwoCNames) {
+ const uint8_t kCname1[] = "a";
+ const uint8_t kCname2[] = "de";
+ const uint8_t kInvalidPacket[] = {
+ 0x81, 202, 0x00, 0x03, 0x12, 0x34, 0x56,
+ 0x78, kCnameTag, 1, kCname1[0], kCnameTag, 2, kCname2[0],
+ kCname2[1], kTerminatorTag};
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kInvalidPacket) % 4);
+ ASSERT_EQ(kInvalidPacket[3] + 1u, sizeof(kInvalidPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kInvalidPacket, &parsed));
+}
+
+TEST(RtcpPacketSdesTest, ParseFailsWithTooLittleSpaceForNextChunk) {
+ const uint8_t kCname[] = "a";
+ const uint8_t kEmail[] = "de";
+ // Two chunks are promised in the header, but no place for the second chunk.
+ const uint8_t kInvalidPacket[] = {
+ 0x82, 202, 0x00, 0x04, 0x12, 0x34, 0x56,
+ 0x78, // 1st chunk.
+ kCnameTag, 1, kCname[0], kEmailTag, 2, kEmail[0], kEmail[1],
+ kTerminatorTag, 0x23, 0x45, 0x67, 0x89}; // 2nd chunk.
+ // Sanity checks packet was assembled correctly.
+ ASSERT_EQ(0u, sizeof(kInvalidPacket) % 4);
+ ASSERT_EQ(kInvalidPacket[3] + 1u, sizeof(kInvalidPacket) / 4);
+
+ Sdes parsed;
+ EXPECT_FALSE(test::ParseSinglePacket(kInvalidPacket, &parsed));
+}
+
+TEST(RtcpPacketSdesTest, ParsedSdesCanBeReusedForBuilding) {
+ Sdes source;
+ const std::string kAlice = "alice@host";
+ const std::string kBob = "bob@host";
+ source.AddCName(kSenderSsrc, kAlice);
+
+ rtc::Buffer packet1 = source.Build();
+ Sdes middle;
+ test::ParseSinglePacket(packet1, &middle);
+
+ EXPECT_EQ(source.BlockLength(), middle.BlockLength());
+
+ middle.AddCName(kSenderSsrc + 1, kBob);
+
+ rtc::Buffer packet2 = middle.Build();
+ Sdes destination;
+ test::ParseSinglePacket(packet2, &destination);
+
+ EXPECT_EQ(middle.BlockLength(), destination.BlockLength());
+
+ EXPECT_EQ(2u, destination.chunks().size());
+ EXPECT_EQ(kSenderSsrc, destination.chunks()[0].ssrc);
+ EXPECT_EQ(kAlice, destination.chunks()[0].cname);
+ EXPECT_EQ(kSenderSsrc + 1, destination.chunks()[1].ssrc);
+ EXPECT_EQ(kBob, destination.chunks()[1].cname);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.cc
new file mode 100644
index 0000000000..73738376c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t SenderReport::kPacketType;
+constexpr size_t SenderReport::kMaxNumberOfReportBlocks;
+constexpr size_t SenderReport::kSenderBaseLength;
+// Sender report (SR) (RFC 3550).
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| RC | PT=SR=200 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of sender |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// 4 | NTP timestamp, most significant word |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | NTP timestamp, least significant word |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | RTP timestamp |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | sender's packet count |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 20 | sender's octet count |
+// 24 +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+
+SenderReport::SenderReport()
+ : rtp_timestamp_(0), sender_packet_count_(0), sender_octet_count_(0) {}
+
+SenderReport::SenderReport(const SenderReport&) = default;
+SenderReport::SenderReport(SenderReport&&) = default;
+SenderReport& SenderReport::operator=(const SenderReport&) = default;
+SenderReport& SenderReport::operator=(SenderReport&&) = default;
+SenderReport::~SenderReport() = default;
+
+bool SenderReport::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+
+ const uint8_t report_block_count = packet.count();
+ if (packet.payload_size_bytes() <
+ kSenderBaseLength + report_block_count * ReportBlock::kLength) {
+ RTC_LOG(LS_WARNING) << "Packet is too small to contain all the data.";
+ return false;
+ }
+ // Read SenderReport header.
+ const uint8_t* const payload = packet.payload();
+ SetSenderSsrc(ByteReader<uint32_t>::ReadBigEndian(&payload[0]));
+ uint32_t secs = ByteReader<uint32_t>::ReadBigEndian(&payload[4]);
+ uint32_t frac = ByteReader<uint32_t>::ReadBigEndian(&payload[8]);
+ ntp_.Set(secs, frac);
+ rtp_timestamp_ = ByteReader<uint32_t>::ReadBigEndian(&payload[12]);
+ sender_packet_count_ = ByteReader<uint32_t>::ReadBigEndian(&payload[16]);
+ sender_octet_count_ = ByteReader<uint32_t>::ReadBigEndian(&payload[20]);
+ report_blocks_.resize(report_block_count);
+ const uint8_t* next_block = payload + kSenderBaseLength;
+ for (ReportBlock& block : report_blocks_) {
+ bool block_parsed = block.Parse(next_block, ReportBlock::kLength);
+ RTC_DCHECK(block_parsed);
+ next_block += ReportBlock::kLength;
+ }
+ // Double check we didn't read beyond provided buffer.
+ RTC_DCHECK_LE(next_block - payload,
+ static_cast<ptrdiff_t>(packet.payload_size_bytes()));
+ return true;
+}
+
+size_t SenderReport::BlockLength() const {
+ return kHeaderLength + kSenderBaseLength +
+ report_blocks_.size() * ReportBlock::kLength;
+}
+
+bool SenderReport::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+
+ CreateHeader(report_blocks_.size(), kPacketType, HeaderLength(), packet,
+ index);
+ // Write SenderReport header.
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 0], sender_ssrc());
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 4], ntp_.seconds());
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 8], ntp_.fractions());
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 12], rtp_timestamp_);
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 16],
+ sender_packet_count_);
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 20],
+ sender_octet_count_);
+ *index += kSenderBaseLength;
+ // Write report blocks.
+ for (const ReportBlock& block : report_blocks_) {
+ block.Create(packet + *index);
+ *index += ReportBlock::kLength;
+ }
+ // Ensure bytes written match expected.
+ RTC_DCHECK_EQ(*index, index_end);
+ return true;
+}
+
+bool SenderReport::AddReportBlock(const ReportBlock& block) {
+ if (report_blocks_.size() >= kMaxNumberOfReportBlocks) {
+ RTC_LOG(LS_WARNING) << "Max report blocks reached.";
+ return false;
+ }
+ report_blocks_.push_back(block);
+ return true;
+}
+
+bool SenderReport::SetReportBlocks(std::vector<ReportBlock> blocks) {
+ if (blocks.size() > kMaxNumberOfReportBlocks) {
+ RTC_LOG(LS_WARNING) << "Too many report blocks (" << blocks.size()
+ << ") for sender report.";
+ return false;
+ }
+ report_blocks_ = std::move(blocks);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.h
new file mode 100644
index 0000000000..66ced31721
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SENDER_REPORT_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SENDER_REPORT_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+class SenderReport : public RtcpPacket {
+ public:
+ static constexpr uint8_t kPacketType = 200;
+ static constexpr size_t kMaxNumberOfReportBlocks = 0x1f;
+
+ SenderReport();
+ SenderReport(const SenderReport&);
+ SenderReport(SenderReport&&);
+ SenderReport& operator=(const SenderReport&);
+ SenderReport& operator=(SenderReport&&);
+ ~SenderReport() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void SetNtp(NtpTime ntp) { ntp_ = ntp; }
+ void SetRtpTimestamp(uint32_t rtp_timestamp) {
+ rtp_timestamp_ = rtp_timestamp;
+ }
+ void SetPacketCount(uint32_t packet_count) {
+ sender_packet_count_ = packet_count;
+ }
+ void SetOctetCount(uint32_t octet_count) {
+ sender_octet_count_ = octet_count;
+ }
+ bool AddReportBlock(const ReportBlock& block);
+ bool SetReportBlocks(std::vector<ReportBlock> blocks);
+ void ClearReportBlocks() { report_blocks_.clear(); }
+
+ NtpTime ntp() const { return ntp_; }
+ uint32_t rtp_timestamp() const { return rtp_timestamp_; }
+ uint32_t sender_packet_count() const { return sender_packet_count_; }
+ uint32_t sender_octet_count() const { return sender_octet_count_; }
+
+ const std::vector<ReportBlock>& report_blocks() const {
+ return report_blocks_;
+ }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ static constexpr size_t kSenderBaseLength = 24;
+
+ NtpTime ntp_;
+ uint32_t rtp_timestamp_;
+ uint32_t sender_packet_count_;
+ uint32_t sender_octet_count_;
+ std::vector<ReportBlock> report_blocks_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SENDER_REPORT_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report_unittest.cc
new file mode 100644
index 0000000000..37f268e6b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report_unittest.cc
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+
+#include <utility>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using webrtc::rtcp::ReportBlock;
+using webrtc::rtcp::SenderReport;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+const NtpTime kNtp(0x11121418, 0x22242628);
+const uint32_t kRtpTimestamp = 0x33343536;
+const uint32_t kPacketCount = 0x44454647;
+const uint32_t kOctetCount = 0x55565758;
+const uint8_t kPacket[] = {0x80, 200, 0x00, 0x06, 0x12, 0x34, 0x56,
+ 0x78, 0x11, 0x12, 0x14, 0x18, 0x22, 0x24,
+ 0x26, 0x28, 0x33, 0x34, 0x35, 0x36, 0x44,
+ 0x45, 0x46, 0x47, 0x55, 0x56, 0x57, 0x58};
+} // namespace
+
+TEST(RtcpPacketSenderReportTest, CreateWithoutReportBlocks) {
+ SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ sr.SetNtp(kNtp);
+ sr.SetRtpTimestamp(kRtpTimestamp);
+ sr.SetPacketCount(kPacketCount);
+ sr.SetOctetCount(kOctetCount);
+
+ rtc::Buffer raw = sr.Build();
+ EXPECT_THAT(make_tuple(raw.data(), raw.size()), ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketSenderReportTest, ParseWithoutReportBlocks) {
+ SenderReport parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kNtp, parsed.ntp());
+ EXPECT_EQ(kRtpTimestamp, parsed.rtp_timestamp());
+ EXPECT_EQ(kPacketCount, parsed.sender_packet_count());
+ EXPECT_EQ(kOctetCount, parsed.sender_octet_count());
+ EXPECT_TRUE(parsed.report_blocks().empty());
+}
+
+TEST(RtcpPacketSenderReportTest, CreateAndParseWithOneReportBlock) {
+ ReportBlock rb;
+ rb.SetMediaSsrc(kRemoteSsrc);
+
+ SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(sr.AddReportBlock(rb));
+
+ rtc::Buffer raw = sr.Build();
+ SenderReport parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(1u, parsed.report_blocks().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.report_blocks()[0].source_ssrc());
+}
+
+TEST(RtcpPacketSenderReportTest, CreateAndParseWithTwoReportBlocks) {
+ ReportBlock rb1;
+ rb1.SetMediaSsrc(kRemoteSsrc);
+ ReportBlock rb2;
+ rb2.SetMediaSsrc(kRemoteSsrc + 1);
+
+ SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(sr.AddReportBlock(rb1));
+ EXPECT_TRUE(sr.AddReportBlock(rb2));
+
+ rtc::Buffer raw = sr.Build();
+ SenderReport parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(raw, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(2u, parsed.report_blocks().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.report_blocks()[0].source_ssrc());
+ EXPECT_EQ(kRemoteSsrc + 1, parsed.report_blocks()[1].source_ssrc());
+}
+
+TEST(RtcpPacketSenderReportTest, CreateWithTooManyReportBlocks) {
+ SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ ReportBlock rb;
+ for (size_t i = 0; i < SenderReport::kMaxNumberOfReportBlocks; ++i) {
+ rb.SetMediaSsrc(kRemoteSsrc + i);
+ EXPECT_TRUE(sr.AddReportBlock(rb));
+ }
+ rb.SetMediaSsrc(kRemoteSsrc + SenderReport::kMaxNumberOfReportBlocks);
+ EXPECT_FALSE(sr.AddReportBlock(rb));
+}
+
+TEST(RtcpPacketSenderReportTest, SetReportBlocksOverwritesOldBlocks) {
+ SenderReport sr;
+ ReportBlock report_block;
+ // Use jitter field of the report blocks to distinguish them.
+ report_block.SetJitter(1001u);
+ sr.AddReportBlock(report_block);
+ ASSERT_EQ(sr.report_blocks().size(), 1u);
+ ASSERT_EQ(sr.report_blocks()[0].jitter(), 1001u);
+
+ std::vector<ReportBlock> blocks(3u);
+ blocks[0].SetJitter(2001u);
+ blocks[1].SetJitter(3001u);
+ blocks[2].SetJitter(4001u);
+ EXPECT_TRUE(sr.SetReportBlocks(blocks));
+ ASSERT_EQ(sr.report_blocks().size(), 3u);
+ EXPECT_EQ(sr.report_blocks()[0].jitter(), 2001u);
+ EXPECT_EQ(sr.report_blocks()[1].jitter(), 3001u);
+ EXPECT_EQ(sr.report_blocks()[2].jitter(), 4001u);
+}
+
+TEST(RtcpPacketSenderReportTest, SetReportBlocksMaxLimit) {
+ SenderReport sr;
+ std::vector<ReportBlock> max_blocks(SenderReport::kMaxNumberOfReportBlocks);
+ EXPECT_TRUE(sr.SetReportBlocks(std::move(max_blocks)));
+
+ std::vector<ReportBlock> one_too_many_blocks(
+ SenderReport::kMaxNumberOfReportBlocks + 1);
+ EXPECT_FALSE(sr.SetReportBlocks(std::move(one_too_many_blocks)));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.cc
new file mode 100644
index 0000000000..601b24fe94
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr size_t kTargetBitrateHeaderSizeBytes = 4;
+constexpr uint8_t TargetBitrate::kBlockType;
+const size_t TargetBitrate::kBitrateItemSizeBytes = 4;
+
+TargetBitrate::BitrateItem::BitrateItem()
+ : spatial_layer(0), temporal_layer(0), target_bitrate_kbps(0) {}
+
+TargetBitrate::BitrateItem::BitrateItem(uint8_t spatial_layer,
+ uint8_t temporal_layer,
+ uint32_t target_bitrate_kbps)
+ : spatial_layer(spatial_layer),
+ temporal_layer(temporal_layer),
+ target_bitrate_kbps(target_bitrate_kbps) {}
+
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | BT=42 | reserved | block length |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+//
+// Target bitrate item (repeat as many times as necessary).
+//
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | S | T | Target Bitrate |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : ... :
+//
+// Spatial Layer (S): 4 bits
+// Indicates which temporal layer this bitrate concerns.
+//
+// Temporal Layer (T): 4 bits
+// Indicates which temporal layer this bitrate concerns.
+//
+// Target Bitrate: 24 bits
+// The encoder target bitrate for this layer, in kbps.
+//
+// As an example of how S and T are intended to be used, VP8 simulcast will
+// use a separate TargetBitrate message per stream, since they are transmitted
+// on separate SSRCs, with temporal layers grouped by stream.
+// If VP9 SVC is used, there will be only one SSRC, so each spatial and
+// temporal layer combo used shall be specified in the TargetBitrate packet.
+
+TargetBitrate::TargetBitrate() = default;
+TargetBitrate::TargetBitrate(const TargetBitrate&) = default;
+TargetBitrate& TargetBitrate::operator=(const TargetBitrate&) = default;
+TargetBitrate::~TargetBitrate() = default;
+
+void TargetBitrate::Parse(const uint8_t* block, uint16_t block_length) {
+ // Validate block header (should already have been parsed and checked).
+ RTC_DCHECK_EQ(block[0], kBlockType);
+ RTC_DCHECK_EQ(block_length, ByteReader<uint16_t>::ReadBigEndian(&block[2]));
+
+ // Header specifies block length - 1, but since we ignore the header, which
+ // occupies exactly on block, we can just treat this as payload length.
+ const size_t payload_bytes = block_length * 4;
+ const size_t num_items = payload_bytes / kBitrateItemSizeBytes;
+ size_t index = kTargetBitrateHeaderSizeBytes;
+ bitrates_.clear();
+ for (size_t i = 0; i < num_items; ++i) {
+ uint8_t layers = block[index];
+ uint32_t bitrate_kbps =
+ ByteReader<uint32_t, 3>::ReadBigEndian(&block[index + 1]);
+ index += kBitrateItemSizeBytes;
+ AddTargetBitrate((layers >> 4) & 0x0F, layers & 0x0F, bitrate_kbps);
+ }
+}
+
+void TargetBitrate::AddTargetBitrate(uint8_t spatial_layer,
+ uint8_t temporal_layer,
+ uint32_t target_bitrate_kbps) {
+ RTC_DCHECK_LE(spatial_layer, 0x0F);
+ RTC_DCHECK_LE(temporal_layer, 0x0F);
+ RTC_DCHECK_LE(target_bitrate_kbps, 0x00FFFFFFU);
+ bitrates_.push_back(
+ BitrateItem(spatial_layer, temporal_layer, target_bitrate_kbps));
+}
+
+const std::vector<TargetBitrate::BitrateItem>&
+TargetBitrate::GetTargetBitrates() const {
+ return bitrates_;
+}
+
+size_t TargetBitrate::BlockLength() const {
+ return kTargetBitrateHeaderSizeBytes +
+ bitrates_.size() * kBitrateItemSizeBytes;
+}
+
+void TargetBitrate::Create(uint8_t* buffer) const {
+ buffer[0] = kBlockType;
+ buffer[1] = 0; // Reserved.
+ uint16_t block_length_words =
+ rtc::dchecked_cast<uint16_t>((BlockLength() / 4) - 1);
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], block_length_words);
+
+ size_t index = kTargetBitrateHeaderSizeBytes;
+ for (const BitrateItem& item : bitrates_) {
+ buffer[index] = (item.spatial_layer << 4) | item.temporal_layer;
+ ByteWriter<uint32_t, 3>::WriteBigEndian(&buffer[index + 1],
+ item.target_bitrate_kbps);
+ index += kBitrateItemSizeBytes;
+ }
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h
new file mode 100644
index 0000000000..07e5da1a49
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TARGET_BITRATE_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TARGET_BITRATE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+namespace webrtc {
+namespace rtcp {
+
+class TargetBitrate {
+ public:
+ // TODO(sprang): This block type is just a place holder. We need to get an
+ // id assigned by IANA.
+ static constexpr uint8_t kBlockType = 42;
+ static const size_t kBitrateItemSizeBytes;
+
+ struct BitrateItem {
+ BitrateItem();
+ BitrateItem(uint8_t spatial_layer,
+ uint8_t temporal_layer,
+ uint32_t target_bitrate_kbps);
+
+ uint8_t spatial_layer;
+ uint8_t temporal_layer;
+ uint32_t target_bitrate_kbps;
+ };
+
+ TargetBitrate();
+ TargetBitrate(const TargetBitrate&);
+ TargetBitrate& operator=(const TargetBitrate&);
+ ~TargetBitrate();
+
+ void AddTargetBitrate(uint8_t spatial_layer,
+ uint8_t temporal_layer,
+ uint32_t target_bitrate_kbps);
+
+ const std::vector<BitrateItem>& GetTargetBitrates() const;
+
+ void Parse(const uint8_t* block, uint16_t block_length);
+
+ size_t BlockLength() const;
+
+ void Create(uint8_t* buffer) const;
+
+ private:
+ std::vector<BitrateItem> bitrates_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TARGET_BITRATE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate_unittest.cc
new file mode 100644
index 0000000000..b16bb5beaa
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/target_bitrate_unittest.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+#include "rtc_base/buffer.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+using BitrateItem = rtcp::TargetBitrate::BitrateItem;
+using rtcp::TargetBitrate;
+using test::ParseSinglePacket;
+
+constexpr uint32_t kSsrc = 0x12345678;
+
+// clang-format off
+const uint8_t kPacket[] = { TargetBitrate::kBlockType, // Block ID.
+ 0x00, // Reserved.
+ 0x00, 0x04, // Length = 4 words.
+ 0x00, 0x01, 0x02, 0x03, // S0T0 0x010203 kbps.
+ 0x01, 0x02, 0x03, 0x04, // S0T1 0x020304 kbps.
+ 0x10, 0x03, 0x04, 0x05, // S1T0 0x030405 kbps.
+ 0x11, 0x04, 0x05, 0x06 }; // S1T1 0x040506 kbps.
+constexpr size_t kPacketLengthBlocks = ((sizeof(kPacket) + 3) / 4) - 1;
+// clang-format on
+
+void ExpectBirateItemEquals(const BitrateItem& expected,
+ const BitrateItem& actual) {
+ EXPECT_EQ(expected.spatial_layer, actual.spatial_layer);
+ EXPECT_EQ(expected.temporal_layer, actual.temporal_layer);
+ EXPECT_EQ(expected.target_bitrate_kbps, actual.target_bitrate_kbps);
+}
+
+void CheckBitrateItems(const std::vector<BitrateItem>& bitrates) {
+ EXPECT_EQ(4U, bitrates.size());
+ ExpectBirateItemEquals(BitrateItem(0, 0, 0x010203), bitrates[0]);
+ ExpectBirateItemEquals(BitrateItem(0, 1, 0x020304), bitrates[1]);
+ ExpectBirateItemEquals(BitrateItem(1, 0, 0x030405), bitrates[2]);
+ ExpectBirateItemEquals(BitrateItem(1, 1, 0x040506), bitrates[3]);
+}
+
+} // namespace
+
+TEST(TargetBitrateTest, Parse) {
+ TargetBitrate target_bitrate;
+ target_bitrate.Parse(kPacket, kPacketLengthBlocks);
+ CheckBitrateItems(target_bitrate.GetTargetBitrates());
+}
+
+TEST(TargetBitrateTest, FullPacket) {
+ const size_t kXRHeaderSize = 8; // RTCP header (4) + SSRC (4).
+ const size_t kTotalSize = kXRHeaderSize + sizeof(kPacket);
+ uint8_t kRtcpPacket[kTotalSize] = {2 << 6, 207, 0x00, (kTotalSize / 4) - 1,
+ 0x12, 0x34, 0x56, 0x78}; // SSRC.
+ memcpy(&kRtcpPacket[kXRHeaderSize], kPacket, sizeof(kPacket));
+ rtcp::ExtendedReports xr;
+ EXPECT_TRUE(ParseSinglePacket(kRtcpPacket, &xr));
+ EXPECT_EQ(kSsrc, xr.sender_ssrc());
+ const absl::optional<TargetBitrate>& target_bitrate = xr.target_bitrate();
+ ASSERT_TRUE(static_cast<bool>(target_bitrate));
+ CheckBitrateItems(target_bitrate->GetTargetBitrates());
+}
+
+TEST(TargetBitrateTest, Create) {
+ TargetBitrate target_bitrate;
+ target_bitrate.AddTargetBitrate(0, 0, 0x010203);
+ target_bitrate.AddTargetBitrate(0, 1, 0x020304);
+ target_bitrate.AddTargetBitrate(1, 0, 0x030405);
+ target_bitrate.AddTargetBitrate(1, 1, 0x040506);
+
+ uint8_t buffer[sizeof(kPacket)] = {};
+ ASSERT_EQ(sizeof(kPacket), target_bitrate.BlockLength());
+ target_bitrate.Create(buffer);
+
+ EXPECT_EQ(0, memcmp(kPacket, buffer, sizeof(kPacket)));
+}
+
+TEST(TargetBitrateTest, ParseNullBitratePacket) {
+ const uint8_t kNullPacket[] = {TargetBitrate::kBlockType, 0x00, 0x00, 0x00};
+ TargetBitrate target_bitrate;
+ target_bitrate.Parse(kNullPacket, 0);
+ EXPECT_TRUE(target_bitrate.GetTargetBitrates().empty());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.cc
new file mode 100644
index 0000000000..810e1e267a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+TmmbItem::TmmbItem(uint32_t ssrc, uint64_t bitrate_bps, uint16_t overhead)
+ : ssrc_(ssrc), bitrate_bps_(bitrate_bps), packet_overhead_(overhead) {
+ RTC_DCHECK_LE(overhead, 0x1ffu);
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+bool TmmbItem::Parse(const uint8_t* buffer) {
+ ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[0]);
+ // Read 4 bytes into 1 block.
+ uint32_t compact = ByteReader<uint32_t>::ReadBigEndian(&buffer[4]);
+ // Split 1 block into 3 components.
+ uint8_t exponent = compact >> 26; // 6 bits.
+ uint64_t mantissa = (compact >> 9) & 0x1ffff; // 17 bits.
+ uint16_t overhead = compact & 0x1ff; // 9 bits.
+ // Combine 3 components into 2 values.
+ bitrate_bps_ = (mantissa << exponent);
+
+ bool shift_overflow = (bitrate_bps_ >> exponent) != mantissa;
+ if (shift_overflow) {
+ RTC_LOG(LS_ERROR) << "Invalid tmmb bitrate value : " << mantissa << "*2^"
+ << static_cast<int>(exponent);
+ return false;
+ }
+ packet_overhead_ = overhead;
+ return true;
+}
+
+void TmmbItem::Create(uint8_t* buffer) const {
+ constexpr uint64_t kMaxMantissa = 0x1ffff; // 17 bits.
+ uint64_t mantissa = bitrate_bps_;
+ uint32_t exponent = 0;
+ while (mantissa > kMaxMantissa) {
+ mantissa >>= 1;
+ ++exponent;
+ }
+
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[0], ssrc_);
+ uint32_t compact = (exponent << 26) | (mantissa << 9) | packet_overhead_;
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[4], compact);
+}
+
+void TmmbItem::set_packet_overhead(uint16_t overhead) {
+ RTC_DCHECK_LE(overhead, 0x1ffu);
+ packet_overhead_ = overhead;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h
new file mode 100644
index 0000000000..dc5d1b2c2d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMB_ITEM_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMB_ITEM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+namespace rtcp {
+// RFC5104, Section 3.5.4
+// Temporary Maximum Media Stream Bitrate Request/Notification.
+// Used both by TMMBR and TMMBN rtcp packets.
+class TmmbItem {
+ public:
+ static const size_t kLength = 8;
+
+ TmmbItem() : ssrc_(0), bitrate_bps_(0), packet_overhead_(0) {}
+ TmmbItem(uint32_t ssrc, uint64_t bitrate_bps, uint16_t overhead);
+
+ bool Parse(const uint8_t* buffer);
+ void Create(uint8_t* buffer) const;
+
+ void set_ssrc(uint32_t ssrc) { ssrc_ = ssrc; }
+ void set_bitrate_bps(uint64_t bitrate_bps) { bitrate_bps_ = bitrate_bps; }
+ void set_packet_overhead(uint16_t overhead);
+
+ uint32_t ssrc() const { return ssrc_; }
+ uint64_t bitrate_bps() const { return bitrate_bps_; }
+ uint16_t packet_overhead() const { return packet_overhead_; }
+
+ private:
+ // Media stream id.
+ uint32_t ssrc_;
+ // Maximum total media bit rate that the media receiver is
+ // currently prepared to accept for this media stream.
+ uint64_t bitrate_bps_;
+ // Per-packet overhead that the media receiver has observed
+ // for this media stream at its chosen reference protocol layer.
+ uint16_t packet_overhead_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMB_ITEM_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc
new file mode 100644
index 0000000000..f57e5749c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Tmmbn::kFeedbackMessageType;
+// RFC 4585: Feedback format.
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source (unused) = 0 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+// Temporary Maximum Media Stream Bit Rate Notification (TMMBN) (RFC 5104).
+// The Feedback Control Information (FCI) consists of zero, one, or more
+// TMMBN FCI entries.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+Tmmbn::Tmmbn() = default;
+
+Tmmbn::~Tmmbn() = default;
+
+bool Tmmbn::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+
+ if (packet.payload_size_bytes() < kCommonFeedbackLength) {
+ RTC_LOG(LS_WARNING) << "Payload length " << packet.payload_size_bytes()
+ << " is too small for TMMBN.";
+ return false;
+ }
+ size_t items_size_bytes = packet.payload_size_bytes() - kCommonFeedbackLength;
+ if (items_size_bytes % TmmbItem::kLength != 0) {
+ RTC_LOG(LS_WARNING) << "Payload length " << packet.payload_size_bytes()
+ << " is not valid for TMMBN.";
+ return false;
+ }
+ ParseCommonFeedback(packet.payload());
+ const uint8_t* next_item = packet.payload() + kCommonFeedbackLength;
+
+ size_t number_of_items = items_size_bytes / TmmbItem::kLength;
+ items_.resize(number_of_items);
+ for (TmmbItem& item : items_) {
+ if (!item.Parse(next_item))
+ return false;
+ next_item += TmmbItem::kLength;
+ }
+ return true;
+}
+
+void Tmmbn::AddTmmbr(const TmmbItem& item) {
+ items_.push_back(item);
+}
+
+size_t Tmmbn::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength +
+ TmmbItem::kLength * items_.size();
+}
+
+bool Tmmbn::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ RTC_DCHECK_EQ(0, Rtpfb::media_ssrc());
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ for (const TmmbItem& item : items_) {
+ item.Create(packet + *index);
+ *index += TmmbItem::kLength;
+ }
+ RTC_CHECK_EQ(index_end, *index);
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h
new file mode 100644
index 0000000000..ff7779d8ac
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBN_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBN_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+// Temporary Maximum Media Stream Bit Rate Notification (TMMBN).
+// RFC 5104, Section 4.2.2.
+class Tmmbn : public Rtpfb {
+ public:
+ static constexpr uint8_t kFeedbackMessageType = 4;
+
+ Tmmbn();
+ ~Tmmbn() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void AddTmmbr(const TmmbItem& item);
+
+ const std::vector<TmmbItem>& items() const { return items_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ // Media ssrc is unused, shadow base class setter and getter.
+ void SetMediaSsrc(uint32_t ssrc);
+ uint32_t media_ssrc() const;
+
+ std::vector<TmmbItem> items_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBN_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc
new file mode 100644
index 0000000000..3a37bb1c0e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::make_tuple;
+using webrtc::rtcp::TmmbItem;
+using webrtc::rtcp::Tmmbn;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint32_t kBitrateBps = 312000;
+const uint16_t kOverhead = 0x1fe;
+const uint8_t kPacket[] = {0x84, 205, 0x00, 0x04, 0x12, 0x34, 0x56,
+ 0x78, 0x00, 0x00, 0x00, 0x00, 0x23, 0x45,
+ 0x67, 0x89, 0x0a, 0x61, 0x61, 0xfe};
+} // namespace
+
+TEST(RtcpPacketTmmbnTest, Create) {
+ Tmmbn tmmbn;
+ tmmbn.SetSenderSsrc(kSenderSsrc);
+ tmmbn.AddTmmbr(TmmbItem(kRemoteSsrc, kBitrateBps, kOverhead));
+
+ rtc::Buffer packet = tmmbn.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketTmmbnTest, Parse) {
+ Tmmbn tmmbn;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &tmmbn));
+
+ const Tmmbn& parsed = tmmbn;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ ASSERT_EQ(1u, parsed.items().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.items().front().ssrc());
+ EXPECT_EQ(kBitrateBps, parsed.items().front().bitrate_bps());
+ EXPECT_EQ(kOverhead, parsed.items().front().packet_overhead());
+}
+
+TEST(RtcpPacketTmmbnTest, CreateAndParseWithoutItems) {
+ Tmmbn tmmbn;
+ tmmbn.SetSenderSsrc(kSenderSsrc);
+
+ rtc::Buffer packet = tmmbn.Build();
+ Tmmbn parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_THAT(parsed.items(), IsEmpty());
+}
+
+TEST(RtcpPacketTmmbnTest, CreateAndParseWithTwoItems) {
+ Tmmbn tmmbn;
+ tmmbn.SetSenderSsrc(kSenderSsrc);
+ tmmbn.AddTmmbr(TmmbItem(kRemoteSsrc, kBitrateBps, kOverhead));
+ tmmbn.AddTmmbr(TmmbItem(kRemoteSsrc + 1, 4 * kBitrateBps, 40));
+
+ rtc::Buffer packet = tmmbn.Build();
+ Tmmbn parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(2u, parsed.items().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.items()[0].ssrc());
+ EXPECT_EQ(kBitrateBps, parsed.items()[0].bitrate_bps());
+ EXPECT_EQ(kOverhead, parsed.items()[0].packet_overhead());
+ EXPECT_EQ(kRemoteSsrc + 1, parsed.items()[1].ssrc());
+ EXPECT_EQ(4 * kBitrateBps, parsed.items()[1].bitrate_bps());
+ EXPECT_EQ(40U, parsed.items()[1].packet_overhead());
+}
+
+TEST(RtcpPacketTmmbnTest, ParseFailsOnTooSmallPacket) {
+ const uint8_t kSmallPacket[] = {0x84, 205, 0x00, 0x01,
+ 0x12, 0x34, 0x56, 0x78};
+ Tmmbn tmmbn;
+ EXPECT_FALSE(test::ParseSinglePacket(kSmallPacket, &tmmbn));
+}
+
+TEST(RtcpPacketTmmbnTest, ParseFailsOnUnAlignedPacket) {
+ const uint8_t kUnalignedPacket[] = {0x84, 205, 0x00, 0x03, 0x12, 0x34,
+ 0x56, 0x78, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x45, 0x67, 0x89};
+
+ Tmmbn tmmbn;
+ EXPECT_FALSE(test::ParseSinglePacket(kUnalignedPacket, &tmmbn));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc
new file mode 100644
index 0000000000..9dc745e509
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace rtcp {
+constexpr uint8_t Tmmbr::kFeedbackMessageType;
+// RFC 4585: Feedback format.
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source (unused) = 0 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+// Temporary Maximum Media Stream Bit Rate Request (TMMBR) (RFC 5104).
+// The Feedback Control Information (FCI) for the TMMBR
+// consists of one or more FCI entries.
+// FCI:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+Tmmbr::Tmmbr() = default;
+
+Tmmbr::~Tmmbr() = default;
+
+bool Tmmbr::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+
+ if (packet.payload_size_bytes() < kCommonFeedbackLength + TmmbItem::kLength) {
+ RTC_LOG(LS_WARNING) << "Payload length " << packet.payload_size_bytes()
+ << " is too small for a TMMBR.";
+ return false;
+ }
+ size_t items_size_bytes = packet.payload_size_bytes() - kCommonFeedbackLength;
+ if (items_size_bytes % TmmbItem::kLength != 0) {
+ RTC_LOG(LS_WARNING) << "Payload length " << packet.payload_size_bytes()
+ << " is not valid for a TMMBR.";
+ return false;
+ }
+ ParseCommonFeedback(packet.payload());
+
+ const uint8_t* next_item = packet.payload() + kCommonFeedbackLength;
+ size_t number_of_items = items_size_bytes / TmmbItem::kLength;
+ items_.resize(number_of_items);
+ for (TmmbItem& item : items_) {
+ if (!item.Parse(next_item))
+ return false;
+ next_item += TmmbItem::kLength;
+ }
+ return true;
+}
+
+void Tmmbr::AddTmmbr(const TmmbItem& item) {
+ items_.push_back(item);
+}
+
+size_t Tmmbr::BlockLength() const {
+ return kHeaderLength + kCommonFeedbackLength +
+ TmmbItem::kLength * items_.size();
+}
+
+bool Tmmbr::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ RTC_DCHECK(!items_.empty());
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ RTC_DCHECK_EQ(0, Rtpfb::media_ssrc());
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ for (const TmmbItem& item : items_) {
+ item.Create(packet + *index);
+ *index += TmmbItem::kLength;
+ }
+ RTC_CHECK_EQ(index_end, *index);
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h
new file mode 100644
index 0000000000..7482cb75cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBR_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBR_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+// Temporary Maximum Media Stream Bit Rate Request (TMMBR).
+// RFC 5104, Section 4.2.1.
+class Tmmbr : public Rtpfb {
+ public:
+ static constexpr uint8_t kFeedbackMessageType = 3;
+
+ Tmmbr();
+ ~Tmmbr() override;
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const CommonHeader& packet);
+
+ void AddTmmbr(const TmmbItem& item);
+
+ const std::vector<TmmbItem>& requests() const { return items_; }
+
+ size_t BlockLength() const override;
+
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ // Media ssrc is unused, shadow base class setter.
+ void SetMediaSsrc(uint32_t ssrc);
+
+ std::vector<TmmbItem> items_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc
new file mode 100644
index 0000000000..1bac808aa9
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAreArray;
+using ::testing::make_tuple;
+using webrtc::rtcp::TmmbItem;
+using webrtc::rtcp::Tmmbr;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint32_t kBitrateBps = 312000;
+const uint16_t kOverhead = 0x1fe;
+const uint8_t kPacket[] = {0x83, 205, 0x00, 0x04, 0x12, 0x34, 0x56,
+ 0x78, 0x00, 0x00, 0x00, 0x00, 0x23, 0x45,
+ 0x67, 0x89, 0x0a, 0x61, 0x61, 0xfe};
+} // namespace
+
+TEST(RtcpPacketTmmbrTest, Create) {
+ Tmmbr tmmbr;
+ tmmbr.SetSenderSsrc(kSenderSsrc);
+ tmmbr.AddTmmbr(TmmbItem(kRemoteSsrc, kBitrateBps, kOverhead));
+
+ rtc::Buffer packet = tmmbr.Build();
+
+ EXPECT_THAT(make_tuple(packet.data(), packet.size()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketTmmbrTest, Parse) {
+ Tmmbr tmmbr;
+ EXPECT_TRUE(test::ParseSinglePacket(kPacket, &tmmbr));
+ const Tmmbr& parsed = tmmbr;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ ASSERT_EQ(1u, parsed.requests().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.requests().front().ssrc());
+ EXPECT_EQ(kBitrateBps, parsed.requests().front().bitrate_bps());
+ EXPECT_EQ(kOverhead, parsed.requests().front().packet_overhead());
+}
+
+TEST(RtcpPacketTmmbrTest, CreateAndParseWithTwoEntries) {
+ Tmmbr tmmbr;
+ tmmbr.SetSenderSsrc(kSenderSsrc);
+ tmmbr.AddTmmbr(TmmbItem(kRemoteSsrc, kBitrateBps, kOverhead));
+ tmmbr.AddTmmbr(TmmbItem(kRemoteSsrc + 1, 4 * kBitrateBps, kOverhead + 1));
+
+ rtc::Buffer packet = tmmbr.Build();
+
+ Tmmbr parsed;
+ EXPECT_TRUE(test::ParseSinglePacket(packet, &parsed));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(2u, parsed.requests().size());
+ EXPECT_EQ(kRemoteSsrc, parsed.requests()[0].ssrc());
+ EXPECT_EQ(kRemoteSsrc + 1, parsed.requests()[1].ssrc());
+}
+
+TEST(RtcpPacketTmmbrTest, ParseFailsWithoutItems) {
+ const uint8_t kZeroItemsPacket[] = {0x83, 205, 0x00, 0x02, 0x12, 0x34,
+ 0x56, 0x78, 0x00, 0x00, 0x00, 0x00};
+
+ Tmmbr tmmbr;
+ EXPECT_FALSE(test::ParseSinglePacket(kZeroItemsPacket, &tmmbr));
+}
+
+TEST(RtcpPacketTmmbrTest, ParseFailsOnUnAlignedPacket) {
+ const uint8_t kUnalignedPacket[] = {
+ 0x83, 205, 0x00, 0x05, 0x12, 0x34, 0x56, 0x78, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x45, 0x67, 0x89, 0x0a, 0x61, 0x61, 0xfe, 0x34, 0x56, 0x78, 0x9a};
+
+ Tmmbr tmmbr;
+ EXPECT_FALSE(test::ParseSinglePacket(kUnalignedPacket, &tmmbr));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
new file mode 100644
index 0000000000..bb1578fd8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
@@ -0,0 +1,747 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <numeric>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+namespace rtcp {
+namespace {
+// Header size:
+// * 4 bytes Common RTCP Packet Header
+// * 8 bytes Common Packet Format for RTCP Feedback Messages
+// * 8 bytes FeedbackPacket header
+constexpr size_t kTransportFeedbackHeaderSizeBytes = 4 + 8 + 8;
+constexpr size_t kChunkSizeBytes = 2;
+// TODO(sprang): Add support for dynamic max size for easier fragmentation,
+// eg. set it to what's left in the buffer or IP_PACKET_SIZE.
+// Size constraint imposed by RTCP common header: 16bit size field interpreted
+// as number of four byte words minus the first header word.
+constexpr size_t kMaxSizeBytes = (1 << 16) * 4;
+// Payload size:
+// * 8 bytes Common Packet Format for RTCP Feedback Messages
+// * 8 bytes FeedbackPacket header.
+// * 2 bytes for one chunk.
+constexpr size_t kMinPayloadSizeBytes = 8 + 8 + 2;
+constexpr TimeDelta kBaseTimeTick = TransportFeedback::kDeltaTick * (1 << 8);
+constexpr TimeDelta kTimeWrapPeriod = kBaseTimeTick * (1 << 24);
+
+// Message format
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT=15 | PT=205 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | base sequence number | packet status count |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | reference time | fb pkt. count |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | packet chunk | packet chunk |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// . .
+// . .
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | packet chunk | recv delta | recv delta |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// . .
+// . .
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | recv delta | recv delta | zero padding |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+} // namespace
+constexpr uint8_t TransportFeedback::kFeedbackMessageType;
+constexpr size_t TransportFeedback::kMaxReportedPackets;
+
+constexpr size_t TransportFeedback::LastChunk::kMaxRunLengthCapacity;
+constexpr size_t TransportFeedback::LastChunk::kMaxOneBitCapacity;
+constexpr size_t TransportFeedback::LastChunk::kMaxTwoBitCapacity;
+constexpr size_t TransportFeedback::LastChunk::kMaxVectorCapacity;
+
+TransportFeedback::LastChunk::LastChunk() {
+ Clear();
+}
+
+bool TransportFeedback::LastChunk::Empty() const {
+ return size_ == 0;
+}
+
+void TransportFeedback::LastChunk::Clear() {
+ size_ = 0;
+ all_same_ = true;
+ has_large_delta_ = false;
+}
+
+bool TransportFeedback::LastChunk::CanAdd(DeltaSize delta_size) const {
+ RTC_DCHECK_LE(delta_size, 2);
+ if (size_ < kMaxTwoBitCapacity)
+ return true;
+ if (size_ < kMaxOneBitCapacity && !has_large_delta_ && delta_size != kLarge)
+ return true;
+ if (size_ < kMaxRunLengthCapacity && all_same_ &&
+ delta_sizes_[0] == delta_size)
+ return true;
+ return false;
+}
+
+void TransportFeedback::LastChunk::Add(DeltaSize delta_size) {
+ RTC_DCHECK(CanAdd(delta_size));
+ if (size_ < kMaxVectorCapacity)
+ delta_sizes_[size_] = delta_size;
+ size_++;
+ all_same_ = all_same_ && delta_size == delta_sizes_[0];
+ has_large_delta_ = has_large_delta_ || delta_size == kLarge;
+}
+
+void TransportFeedback::LastChunk::AddMissingPackets(size_t num_missing) {
+ RTC_DCHECK_EQ(size_, 0);
+ RTC_DCHECK(all_same_);
+ RTC_DCHECK(!has_large_delta_);
+ RTC_DCHECK_LT(num_missing, kMaxRunLengthCapacity);
+ absl::c_fill(delta_sizes_, DeltaSize(0));
+ size_ = num_missing;
+}
+
+uint16_t TransportFeedback::LastChunk::Emit() {
+ RTC_DCHECK(!CanAdd(0) || !CanAdd(1) || !CanAdd(2));
+ if (all_same_) {
+ uint16_t chunk = EncodeRunLength();
+ Clear();
+ return chunk;
+ }
+ if (size_ == kMaxOneBitCapacity) {
+ uint16_t chunk = EncodeOneBit();
+ Clear();
+ return chunk;
+ }
+ RTC_DCHECK_GE(size_, kMaxTwoBitCapacity);
+ uint16_t chunk = EncodeTwoBit(kMaxTwoBitCapacity);
+ // Remove `kMaxTwoBitCapacity` encoded delta sizes:
+ // Shift remaining delta sizes and recalculate all_same_ && has_large_delta_.
+ size_ -= kMaxTwoBitCapacity;
+ all_same_ = true;
+ has_large_delta_ = false;
+ for (size_t i = 0; i < size_; ++i) {
+ DeltaSize delta_size = delta_sizes_[kMaxTwoBitCapacity + i];
+ delta_sizes_[i] = delta_size;
+ all_same_ = all_same_ && delta_size == delta_sizes_[0];
+ has_large_delta_ = has_large_delta_ || delta_size == kLarge;
+ }
+
+ return chunk;
+}
+
+uint16_t TransportFeedback::LastChunk::EncodeLast() const {
+ RTC_DCHECK_GT(size_, 0);
+ if (all_same_)
+ return EncodeRunLength();
+ if (size_ <= kMaxTwoBitCapacity)
+ return EncodeTwoBit(size_);
+ return EncodeOneBit();
+}
+
+// Appends content of the Lastchunk to `deltas`.
+void TransportFeedback::LastChunk::AppendTo(
+ std::vector<DeltaSize>* deltas) const {
+ if (all_same_) {
+ deltas->insert(deltas->end(), size_, delta_sizes_[0]);
+ } else {
+ deltas->insert(deltas->end(), delta_sizes_.begin(),
+ delta_sizes_.begin() + size_);
+ }
+}
+
+void TransportFeedback::LastChunk::Decode(uint16_t chunk, size_t max_size) {
+ if ((chunk & 0x8000) == 0) {
+ DecodeRunLength(chunk, max_size);
+ } else if ((chunk & 0x4000) == 0) {
+ DecodeOneBit(chunk, max_size);
+ } else {
+ DecodeTwoBit(chunk, max_size);
+ }
+}
+
+// One Bit Status Vector Chunk
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |T|S| symbol list |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// T = 1
+// S = 0
+// Symbol list = 14 entries where 0 = not received, 1 = received 1-byte delta.
+uint16_t TransportFeedback::LastChunk::EncodeOneBit() const {
+ RTC_DCHECK(!has_large_delta_);
+ RTC_DCHECK_LE(size_, kMaxOneBitCapacity);
+ uint16_t chunk = 0x8000;
+ for (size_t i = 0; i < size_; ++i)
+ chunk |= delta_sizes_[i] << (kMaxOneBitCapacity - 1 - i);
+ return chunk;
+}
+
+void TransportFeedback::LastChunk::DecodeOneBit(uint16_t chunk,
+ size_t max_size) {
+ RTC_DCHECK_EQ(chunk & 0xc000, 0x8000);
+ size_ = std::min(kMaxOneBitCapacity, max_size);
+ has_large_delta_ = false;
+ all_same_ = false;
+ for (size_t i = 0; i < size_; ++i)
+ delta_sizes_[i] = (chunk >> (kMaxOneBitCapacity - 1 - i)) & 0x01;
+}
+
+// Two Bit Status Vector Chunk
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |T|S| symbol list |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// T = 1
+// S = 1
+// symbol list = 7 entries of two bits each.
+uint16_t TransportFeedback::LastChunk::EncodeTwoBit(size_t size) const {
+ RTC_DCHECK_LE(size, size_);
+ uint16_t chunk = 0xc000;
+ for (size_t i = 0; i < size; ++i)
+ chunk |= delta_sizes_[i] << 2 * (kMaxTwoBitCapacity - 1 - i);
+ return chunk;
+}
+
+void TransportFeedback::LastChunk::DecodeTwoBit(uint16_t chunk,
+ size_t max_size) {
+ RTC_DCHECK_EQ(chunk & 0xc000, 0xc000);
+ size_ = std::min(kMaxTwoBitCapacity, max_size);
+ has_large_delta_ = true;
+ all_same_ = false;
+ for (size_t i = 0; i < size_; ++i)
+ delta_sizes_[i] = (chunk >> 2 * (kMaxTwoBitCapacity - 1 - i)) & 0x03;
+}
+
+// Run Length Status Vector Chunk
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |T| S | Run Length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// T = 0
+// S = symbol
+// Run Length = Unsigned integer denoting the run length of the symbol
+uint16_t TransportFeedback::LastChunk::EncodeRunLength() const {
+ RTC_DCHECK(all_same_);
+ RTC_DCHECK_LE(size_, kMaxRunLengthCapacity);
+ return (delta_sizes_[0] << 13) | static_cast<uint16_t>(size_);
+}
+
+void TransportFeedback::LastChunk::DecodeRunLength(uint16_t chunk,
+ size_t max_count) {
+ RTC_DCHECK_EQ(chunk & 0x8000, 0);
+ size_ = std::min<size_t>(chunk & 0x1fff, max_count);
+ DeltaSize delta_size = (chunk >> 13) & 0x03;
+ has_large_delta_ = delta_size >= kLarge;
+ all_same_ = true;
+ // To make it consistent with Add function, populate delta_sizes_ beyond 1st.
+ for (size_t i = 0; i < std::min<size_t>(size_, kMaxVectorCapacity); ++i)
+ delta_sizes_[i] = delta_size;
+}
+
+TransportFeedback::TransportFeedback()
+ : TransportFeedback(/*include_timestamps=*/true, /*include_lost=*/true) {}
+
+TransportFeedback::TransportFeedback(bool include_timestamps, bool include_lost)
+ : include_lost_(include_lost),
+ base_seq_no_(0),
+ num_seq_no_(0),
+ base_time_ticks_(0),
+ feedback_seq_(0),
+ include_timestamps_(include_timestamps),
+ last_timestamp_(Timestamp::Zero()),
+ size_bytes_(kTransportFeedbackHeaderSizeBytes) {}
+
+TransportFeedback::TransportFeedback(const TransportFeedback&) = default;
+
+TransportFeedback::TransportFeedback(TransportFeedback&& other)
+ : include_lost_(other.include_lost_),
+ base_seq_no_(other.base_seq_no_),
+ num_seq_no_(other.num_seq_no_),
+ base_time_ticks_(other.base_time_ticks_),
+ feedback_seq_(other.feedback_seq_),
+ include_timestamps_(other.include_timestamps_),
+ last_timestamp_(other.last_timestamp_),
+ received_packets_(std::move(other.received_packets_)),
+ all_packets_(std::move(other.all_packets_)),
+ encoded_chunks_(std::move(other.encoded_chunks_)),
+ last_chunk_(other.last_chunk_),
+ size_bytes_(other.size_bytes_) {
+ other.Clear();
+}
+
+TransportFeedback::~TransportFeedback() {}
+
+void TransportFeedback::SetBase(uint16_t base_sequence,
+ Timestamp ref_timestamp) {
+ RTC_DCHECK_EQ(num_seq_no_, 0);
+ base_seq_no_ = base_sequence;
+ base_time_ticks_ =
+ (ref_timestamp.us() % kTimeWrapPeriod.us()) / kBaseTimeTick.us();
+ last_timestamp_ = BaseTime();
+}
+
+void TransportFeedback::SetFeedbackSequenceNumber(uint8_t feedback_sequence) {
+ feedback_seq_ = feedback_sequence;
+}
+
+bool TransportFeedback::AddReceivedPacket(uint16_t sequence_number,
+ Timestamp timestamp) {
+ // Set delta to zero if timestamps are not included, this will simplify the
+ // encoding process.
+ int16_t delta = 0;
+ if (include_timestamps_) {
+ // Convert to ticks and round.
+ if (last_timestamp_ > timestamp) {
+ timestamp += (last_timestamp_ - timestamp).RoundUpTo(kTimeWrapPeriod);
+ }
+ RTC_DCHECK_GE(timestamp, last_timestamp_);
+ int64_t delta_full =
+ (timestamp - last_timestamp_).us() % kTimeWrapPeriod.us();
+ if (delta_full > kTimeWrapPeriod.us() / 2) {
+ delta_full -= kTimeWrapPeriod.us();
+ delta_full -= kDeltaTick.us() / 2;
+ } else {
+ delta_full += kDeltaTick.us() / 2;
+ }
+ delta_full /= kDeltaTick.us();
+
+ delta = static_cast<int16_t>(delta_full);
+ // If larger than 16bit signed, we can't represent it - need new fb packet.
+ if (delta != delta_full) {
+ RTC_LOG(LS_WARNING) << "Delta value too large ( >= 2^16 ticks )";
+ return false;
+ }
+ }
+
+ uint16_t next_seq_no = base_seq_no_ + num_seq_no_;
+ if (sequence_number != next_seq_no) {
+ uint16_t last_seq_no = next_seq_no - 1;
+ if (!IsNewerSequenceNumber(sequence_number, last_seq_no))
+ return false;
+ uint16_t num_missing_packets = sequence_number - next_seq_no;
+ if (!AddMissingPackets(num_missing_packets))
+ return false;
+ if (include_lost_) {
+ for (; next_seq_no != sequence_number; ++next_seq_no) {
+ all_packets_.emplace_back(next_seq_no);
+ }
+ }
+ }
+
+ DeltaSize delta_size = (delta >= 0 && delta <= 0xff) ? 1 : 2;
+ if (!AddDeltaSize(delta_size))
+ return false;
+
+ received_packets_.emplace_back(sequence_number, delta);
+ if (include_lost_)
+ all_packets_.emplace_back(sequence_number, delta);
+ last_timestamp_ += delta * kDeltaTick;
+ if (include_timestamps_) {
+ size_bytes_ += delta_size;
+ }
+ return true;
+}
+
+const std::vector<TransportFeedback::ReceivedPacket>&
+TransportFeedback::GetReceivedPackets() const {
+ return received_packets_;
+}
+
+const std::vector<TransportFeedback::ReceivedPacket>&
+TransportFeedback::GetAllPackets() const {
+ RTC_DCHECK(include_lost_);
+ return all_packets_;
+}
+
+uint16_t TransportFeedback::GetBaseSequence() const {
+ return base_seq_no_;
+}
+
+Timestamp TransportFeedback::BaseTime() const {
+ // Add an extra kTimeWrapPeriod to allow add received packets arrived earlier
+ // than the first added packet (and thus allow to record negative deltas)
+ // even when base_time_ticks_ == 0.
+ return Timestamp::Zero() + kTimeWrapPeriod +
+ int64_t{base_time_ticks_} * kBaseTimeTick;
+}
+
+TimeDelta TransportFeedback::GetBaseDelta(Timestamp prev_timestamp) const {
+ TimeDelta delta = BaseTime() - prev_timestamp;
+ // Compensate for wrap around.
+ if ((delta - kTimeWrapPeriod).Abs() < delta.Abs()) {
+ delta -= kTimeWrapPeriod; // Wrap backwards.
+ } else if ((delta + kTimeWrapPeriod).Abs() < delta.Abs()) {
+ delta += kTimeWrapPeriod; // Wrap forwards.
+ }
+ return delta;
+}
+
+// De-serialize packet.
+bool TransportFeedback::Parse(const CommonHeader& packet) {
+ RTC_DCHECK_EQ(packet.type(), kPacketType);
+ RTC_DCHECK_EQ(packet.fmt(), kFeedbackMessageType);
+ TRACE_EVENT0("webrtc", "TransportFeedback::Parse");
+
+ if (packet.payload_size_bytes() < kMinPayloadSizeBytes) {
+ RTC_LOG(LS_WARNING) << "Buffer too small (" << packet.payload_size_bytes()
+ << " bytes) to fit a "
+ "FeedbackPacket. Minimum size = "
+ << kMinPayloadSizeBytes;
+ return false;
+ }
+
+ const uint8_t* const payload = packet.payload();
+ ParseCommonFeedback(payload);
+
+ base_seq_no_ = ByteReader<uint16_t>::ReadBigEndian(&payload[8]);
+ uint16_t status_count = ByteReader<uint16_t>::ReadBigEndian(&payload[10]);
+ base_time_ticks_ = ByteReader<uint32_t, 3>::ReadBigEndian(&payload[12]);
+ feedback_seq_ = payload[15];
+ Clear();
+ size_t index = 16;
+ const size_t end_index = packet.payload_size_bytes();
+
+ if (status_count == 0) {
+ RTC_LOG(LS_WARNING) << "Empty feedback messages not allowed.";
+ return false;
+ }
+
+ std::vector<uint8_t> delta_sizes;
+ delta_sizes.reserve(status_count);
+ while (delta_sizes.size() < status_count) {
+ if (index + kChunkSizeBytes > end_index) {
+ RTC_LOG(LS_WARNING) << "Buffer overflow while parsing packet.";
+ Clear();
+ return false;
+ }
+
+ uint16_t chunk = ByteReader<uint16_t>::ReadBigEndian(&payload[index]);
+ index += kChunkSizeBytes;
+ encoded_chunks_.push_back(chunk);
+ last_chunk_.Decode(chunk, status_count - delta_sizes.size());
+ last_chunk_.AppendTo(&delta_sizes);
+ }
+ // Last chunk is stored in the `last_chunk_`.
+ encoded_chunks_.pop_back();
+ RTC_DCHECK_EQ(delta_sizes.size(), status_count);
+ num_seq_no_ = status_count;
+
+ uint16_t seq_no = base_seq_no_;
+ size_t recv_delta_size = absl::c_accumulate(delta_sizes, 0);
+
+ // Determine if timestamps, that is, recv_delta are included in the packet.
+ if (end_index >= index + recv_delta_size) {
+ for (size_t delta_size : delta_sizes) {
+ RTC_DCHECK_LE(index + delta_size, end_index);
+ switch (delta_size) {
+ case 0:
+ if (include_lost_)
+ all_packets_.emplace_back(seq_no);
+ break;
+ case 1: {
+ int16_t delta = payload[index];
+ received_packets_.emplace_back(seq_no, delta);
+ if (include_lost_)
+ all_packets_.emplace_back(seq_no, delta);
+ last_timestamp_ += delta * kDeltaTick;
+ index += delta_size;
+ break;
+ }
+ case 2: {
+ int16_t delta = ByteReader<int16_t>::ReadBigEndian(&payload[index]);
+ received_packets_.emplace_back(seq_no, delta);
+ if (include_lost_)
+ all_packets_.emplace_back(seq_no, delta);
+ last_timestamp_ += delta * kDeltaTick;
+ index += delta_size;
+ break;
+ }
+ case 3:
+ Clear();
+ RTC_LOG(LS_WARNING) << "Invalid delta_size for seq_no " << seq_no;
+
+ return false;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ ++seq_no;
+ }
+ } else {
+ // The packet does not contain receive deltas.
+ include_timestamps_ = false;
+ for (size_t delta_size : delta_sizes) {
+ // Use delta sizes to detect if packet was received.
+ if (delta_size > 0) {
+ received_packets_.emplace_back(seq_no, 0);
+ }
+ if (include_lost_) {
+ if (delta_size > 0) {
+ all_packets_.emplace_back(seq_no, 0);
+ } else {
+ all_packets_.emplace_back(seq_no);
+ }
+ }
+ ++seq_no;
+ }
+ }
+ size_bytes_ = RtcpPacket::kHeaderLength + index;
+ RTC_DCHECK_LE(index, end_index);
+ return true;
+}
+
+std::unique_ptr<TransportFeedback> TransportFeedback::ParseFrom(
+ const uint8_t* buffer,
+ size_t length) {
+ CommonHeader header;
+ if (!header.Parse(buffer, length))
+ return nullptr;
+ if (header.type() != kPacketType || header.fmt() != kFeedbackMessageType)
+ return nullptr;
+ std::unique_ptr<TransportFeedback> parsed(new TransportFeedback);
+ if (!parsed->Parse(header))
+ return nullptr;
+ return parsed;
+}
+
+bool TransportFeedback::IsConsistent() const {
+ size_t packet_size = kTransportFeedbackHeaderSizeBytes;
+ std::vector<DeltaSize> delta_sizes;
+ LastChunk chunk_decoder;
+ for (uint16_t chunk : encoded_chunks_) {
+ chunk_decoder.Decode(chunk, kMaxReportedPackets);
+ chunk_decoder.AppendTo(&delta_sizes);
+ packet_size += kChunkSizeBytes;
+ }
+ if (!last_chunk_.Empty()) {
+ last_chunk_.AppendTo(&delta_sizes);
+ packet_size += kChunkSizeBytes;
+ }
+ if (num_seq_no_ != delta_sizes.size()) {
+ RTC_LOG(LS_ERROR) << delta_sizes.size() << " packets encoded. Expected "
+ << num_seq_no_;
+ return false;
+ }
+ Timestamp timestamp = BaseTime();
+ auto packet_it = received_packets_.begin();
+ uint16_t seq_no = base_seq_no_;
+ for (DeltaSize delta_size : delta_sizes) {
+ if (delta_size > 0) {
+ if (packet_it == received_packets_.end()) {
+ RTC_LOG(LS_ERROR) << "Failed to find delta for seq_no " << seq_no;
+ return false;
+ }
+ if (packet_it->sequence_number() != seq_no) {
+ RTC_LOG(LS_ERROR) << "Expected to find delta for seq_no " << seq_no
+ << ". Next delta is for "
+ << packet_it->sequence_number();
+ return false;
+ }
+ if (delta_size == 1 &&
+ (packet_it->delta_ticks() < 0 || packet_it->delta_ticks() > 0xff)) {
+ RTC_LOG(LS_ERROR) << "Delta " << packet_it->delta_ticks()
+ << " for seq_no " << seq_no
+ << " doesn't fit into one byte";
+ return false;
+ }
+ timestamp += packet_it->delta();
+ ++packet_it;
+ }
+ if (include_timestamps_) {
+ packet_size += delta_size;
+ }
+ ++seq_no;
+ }
+ if (packet_it != received_packets_.end()) {
+ RTC_LOG(LS_ERROR) << "Unencoded delta for seq_no "
+ << packet_it->sequence_number();
+ return false;
+ }
+ if (timestamp != last_timestamp_) {
+ RTC_LOG(LS_ERROR) << "Last timestamp mismatch. Calculated: "
+ << ToLogString(timestamp)
+ << ". Saved: " << ToLogString(last_timestamp_);
+ return false;
+ }
+ if (size_bytes_ != packet_size) {
+ RTC_LOG(LS_ERROR) << "Rtcp packet size mismatch. Calculated: "
+ << packet_size << ". Saved: " << size_bytes_;
+ return false;
+ }
+ return true;
+}
+
+size_t TransportFeedback::BlockLength() const {
+ // Round size_bytes_ up to multiple of 32bits.
+ return (size_bytes_ + 3) & (~static_cast<size_t>(3));
+}
+
+size_t TransportFeedback::PaddingLength() const {
+ return BlockLength() - size_bytes_;
+}
+
+// Serialize packet.
+bool TransportFeedback::Create(uint8_t* packet,
+ size_t* position,
+ size_t max_length,
+ PacketReadyCallback callback) const {
+ if (num_seq_no_ == 0)
+ return false;
+
+ while (*position + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, position, callback))
+ return false;
+ }
+ const size_t position_end = *position + BlockLength();
+ const size_t padding_length = PaddingLength();
+ bool has_padding = padding_length > 0;
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), has_padding,
+ packet, position);
+ CreateCommonFeedback(packet + *position);
+ *position += kCommonFeedbackLength;
+
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], base_seq_no_);
+ *position += 2;
+
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], num_seq_no_);
+ *position += 2;
+
+ ByteWriter<uint32_t, 3>::WriteBigEndian(&packet[*position], base_time_ticks_);
+ *position += 3;
+
+ packet[(*position)++] = feedback_seq_;
+
+ for (uint16_t chunk : encoded_chunks_) {
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], chunk);
+ *position += 2;
+ }
+ if (!last_chunk_.Empty()) {
+ uint16_t chunk = last_chunk_.EncodeLast();
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], chunk);
+ *position += 2;
+ }
+
+ if (include_timestamps_) {
+ for (const auto& received_packet : received_packets_) {
+ int16_t delta = received_packet.delta_ticks();
+ if (delta >= 0 && delta <= 0xFF) {
+ packet[(*position)++] = delta;
+ } else {
+ ByteWriter<int16_t>::WriteBigEndian(&packet[*position], delta);
+ *position += 2;
+ }
+ }
+ }
+
+ if (padding_length > 0) {
+ for (size_t i = 0; i < padding_length - 1; ++i) {
+ packet[(*position)++] = 0;
+ }
+ packet[(*position)++] = padding_length;
+ }
+ RTC_DCHECK_EQ(*position, position_end);
+ return true;
+}
+
+void TransportFeedback::Clear() {
+ num_seq_no_ = 0;
+ last_timestamp_ = BaseTime();
+ received_packets_.clear();
+ all_packets_.clear();
+ encoded_chunks_.clear();
+ last_chunk_.Clear();
+ size_bytes_ = kTransportFeedbackHeaderSizeBytes;
+}
+
+bool TransportFeedback::AddDeltaSize(DeltaSize delta_size) {
+ if (num_seq_no_ == kMaxReportedPackets)
+ return false;
+ size_t add_chunk_size = last_chunk_.Empty() ? kChunkSizeBytes : 0;
+ if (size_bytes_ + delta_size + add_chunk_size > kMaxSizeBytes)
+ return false;
+
+ if (last_chunk_.CanAdd(delta_size)) {
+ size_bytes_ += add_chunk_size;
+ last_chunk_.Add(delta_size);
+ ++num_seq_no_;
+ return true;
+ }
+ if (size_bytes_ + delta_size + kChunkSizeBytes > kMaxSizeBytes)
+ return false;
+
+ encoded_chunks_.push_back(last_chunk_.Emit());
+ size_bytes_ += kChunkSizeBytes;
+ last_chunk_.Add(delta_size);
+ ++num_seq_no_;
+ return true;
+}
+
+bool TransportFeedback::AddMissingPackets(size_t num_missing_packets) {
+ size_t new_num_seq_no = num_seq_no_ + num_missing_packets;
+ if (new_num_seq_no > kMaxReportedPackets) {
+ return false;
+ }
+
+ if (!last_chunk_.Empty()) {
+ while (num_missing_packets > 0 && last_chunk_.CanAdd(0)) {
+ last_chunk_.Add(0);
+ --num_missing_packets;
+ }
+ if (num_missing_packets == 0) {
+ num_seq_no_ = new_num_seq_no;
+ return true;
+ }
+ encoded_chunks_.push_back(last_chunk_.Emit());
+ }
+ RTC_DCHECK(last_chunk_.Empty());
+ size_t full_chunks = num_missing_packets / LastChunk::kMaxRunLengthCapacity;
+ size_t partial_chunk = num_missing_packets % LastChunk::kMaxRunLengthCapacity;
+ size_t num_chunks = full_chunks + (partial_chunk > 0 ? 1 : 0);
+ if (size_bytes_ + kChunkSizeBytes * num_chunks > kMaxSizeBytes) {
+ num_seq_no_ = (new_num_seq_no - num_missing_packets);
+ return false;
+ }
+ size_bytes_ += kChunkSizeBytes * num_chunks;
+ // T = 0, S = 0, run length = kMaxRunLengthCapacity, see EncodeRunLength().
+ encoded_chunks_.insert(encoded_chunks_.end(), full_chunks,
+ LastChunk::kMaxRunLengthCapacity);
+ last_chunk_.AddMissingPackets(partial_chunk);
+ num_seq_no_ = new_num_seq_no;
+ return true;
+}
+} // namespace rtcp
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
new file mode 100644
index 0000000000..c580632337
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TRANSPORT_FEEDBACK_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TRANSPORT_FEEDBACK_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+
+namespace webrtc {
+namespace rtcp {
+class CommonHeader;
+
+class TransportFeedback : public Rtpfb {
+ public:
+ class ReceivedPacket {
+ public:
+ ReceivedPacket(uint16_t sequence_number, int16_t delta_ticks)
+ : sequence_number_(sequence_number),
+ delta_ticks_(delta_ticks),
+ received_(true) {}
+ explicit ReceivedPacket(uint16_t sequence_number)
+ : sequence_number_(sequence_number), received_(false) {}
+ ReceivedPacket(const ReceivedPacket&) = default;
+ ReceivedPacket& operator=(const ReceivedPacket&) = default;
+
+ uint16_t sequence_number() const { return sequence_number_; }
+ int16_t delta_ticks() const { return delta_ticks_; }
+ TimeDelta delta() const { return delta_ticks_ * kDeltaTick; }
+ bool received() const { return received_; }
+
+ private:
+ uint16_t sequence_number_;
+ int16_t delta_ticks_;
+ bool received_;
+ };
+ // TODO(sprang): IANA reg?
+ static constexpr uint8_t kFeedbackMessageType = 15;
+ // Convert to multiples of 0.25ms.
+ static constexpr TimeDelta kDeltaTick = TimeDelta::Micros(250);
+ // Maximum number of packets (including missing) TransportFeedback can report.
+ static constexpr size_t kMaxReportedPackets = 0xffff;
+
+ TransportFeedback();
+
+ // If `include_timestamps` is set to false, the created packet will not
+ // contain the receive delta block.
+ explicit TransportFeedback(bool include_timestamps,
+ bool include_lost = false);
+ TransportFeedback(const TransportFeedback&);
+ TransportFeedback(TransportFeedback&&);
+
+ ~TransportFeedback() override;
+
+ void SetBase(uint16_t base_sequence, // Seq# of first packet in this msg.
+ Timestamp ref_timestamp); // Reference timestamp for this msg.
+
+ void SetFeedbackSequenceNumber(uint8_t feedback_sequence);
+ // NOTE: This method requires increasing sequence numbers (excepting wraps).
+ bool AddReceivedPacket(uint16_t sequence_number, Timestamp timestamp);
+ const std::vector<ReceivedPacket>& GetReceivedPackets() const;
+ const std::vector<ReceivedPacket>& GetAllPackets() const;
+
+ uint16_t GetBaseSequence() const;
+
+ // Returns number of packets (including missing) this feedback describes.
+ size_t GetPacketStatusCount() const { return num_seq_no_; }
+
+ // Get the reference time including any precision loss.
+ Timestamp BaseTime() const;
+
+ // Get the unwrapped delta between current base time and `prev_timestamp`.
+ TimeDelta GetBaseDelta(Timestamp prev_timestamp) const;
+
+ // Does the feedback packet contain timestamp information?
+ bool IncludeTimestamps() const { return include_timestamps_; }
+
+ bool Parse(const CommonHeader& packet);
+ static std::unique_ptr<TransportFeedback> ParseFrom(const uint8_t* buffer,
+ size_t length);
+ // Pre and postcondition for all public methods. Should always return true.
+ // This function is for tests.
+ bool IsConsistent() const;
+
+ size_t BlockLength() const override;
+ size_t PaddingLength() const;
+
+ bool Create(uint8_t* packet,
+ size_t* position,
+ size_t max_length,
+ PacketReadyCallback callback) const override;
+
+ private:
+ // Size in bytes of a delta time in rtcp packet.
+ // Valid values are 0 (packet wasn't received), 1 or 2.
+ using DeltaSize = uint8_t;
+ // Keeps DeltaSizes that can be encoded into single chunk if it is last chunk.
+ class LastChunk {
+ public:
+ using DeltaSize = TransportFeedback::DeltaSize;
+ static constexpr size_t kMaxRunLengthCapacity = 0x1fff;
+
+ LastChunk();
+
+ bool Empty() const;
+ void Clear();
+ // Return if delta sizes still can be encoded into single chunk with added
+ // `delta_size`.
+ bool CanAdd(DeltaSize delta_size) const;
+ // Add `delta_size`, assumes `CanAdd(delta_size)`,
+ void Add(DeltaSize delta_size);
+ // Equivalent to calling Add(0) `num_missing` times. Assumes `Empty()`.
+ void AddMissingPackets(size_t num_missing);
+
+ // Encode chunk as large as possible removing encoded delta sizes.
+ // Assume CanAdd() == false for some valid delta_size.
+ uint16_t Emit();
+ // Encode all stored delta_sizes into single chunk, pad with 0s if needed.
+ uint16_t EncodeLast() const;
+
+ // Decode up to `max_size` delta sizes from `chunk`.
+ void Decode(uint16_t chunk, size_t max_size);
+ // Appends content of the Lastchunk to `deltas`.
+ void AppendTo(std::vector<DeltaSize>* deltas) const;
+
+ private:
+ static constexpr size_t kMaxOneBitCapacity = 14;
+ static constexpr size_t kMaxTwoBitCapacity = 7;
+ static constexpr size_t kMaxVectorCapacity = kMaxOneBitCapacity;
+ static constexpr DeltaSize kLarge = 2;
+
+ uint16_t EncodeOneBit() const;
+ void DecodeOneBit(uint16_t chunk, size_t max_size);
+
+ uint16_t EncodeTwoBit(size_t size) const;
+ void DecodeTwoBit(uint16_t chunk, size_t max_size);
+
+ uint16_t EncodeRunLength() const;
+ void DecodeRunLength(uint16_t chunk, size_t max_size);
+
+ std::array<DeltaSize, kMaxVectorCapacity> delta_sizes_;
+ size_t size_;
+ bool all_same_;
+ bool has_large_delta_;
+ };
+
+ // Reset packet to consistent empty state.
+ void Clear();
+
+ bool AddDeltaSize(DeltaSize delta_size);
+ // Adds `num_missing_packets` deltas of size 0.
+ bool AddMissingPackets(size_t num_missing_packets);
+
+ const bool include_lost_;
+ uint16_t base_seq_no_;
+ uint16_t num_seq_no_;
+ uint32_t base_time_ticks_;
+ uint8_t feedback_seq_;
+ bool include_timestamps_;
+
+ Timestamp last_timestamp_;
+ std::vector<ReceivedPacket> received_packets_;
+ std::vector<ReceivedPacket> all_packets_;
+ // All but last encoded packet chunks.
+ std::vector<uint16_t> encoded_chunks_;
+ LastChunk last_chunk_;
+ size_t size_bytes_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TRANSPORT_FEEDBACK_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
new file mode 100644
index 0000000000..b3b1175305
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using rtcp::TransportFeedback;
+using ::testing::AllOf;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::Property;
+using ::testing::SizeIs;
+
+constexpr int kHeaderSize = 20;
+constexpr int kStatusChunkSize = 2;
+constexpr int kSmallDeltaSize = 1;
+constexpr int kLargeDeltaSize = 2;
+
+constexpr TimeDelta kDeltaLimit = 0xFF * TransportFeedback::kDeltaTick;
+constexpr TimeDelta kBaseTimeTick = TransportFeedback::kDeltaTick * (1 << 8);
+constexpr TimeDelta kBaseTimeWrapPeriod = kBaseTimeTick * (1 << 24);
+
+MATCHER_P2(Near, value, max_abs_error, "") {
+ return value - max_abs_error <= arg && arg <= value + max_abs_error;
+}
+
+MATCHER(IsValidFeedback, "") {
+ rtcp::CommonHeader rtcp_header;
+ TransportFeedback feedback;
+ return rtcp_header.Parse(std::data(arg), std::size(arg)) &&
+ rtcp_header.type() == TransportFeedback::kPacketType &&
+ rtcp_header.fmt() == TransportFeedback::kFeedbackMessageType &&
+ feedback.Parse(rtcp_header);
+}
+
+TransportFeedback Parse(rtc::ArrayView<const uint8_t> buffer) {
+ rtcp::CommonHeader header;
+ RTC_DCHECK(header.Parse(buffer.data(), buffer.size()));
+ RTC_DCHECK_EQ(header.type(), TransportFeedback::kPacketType);
+ RTC_DCHECK_EQ(header.fmt(), TransportFeedback::kFeedbackMessageType);
+ TransportFeedback feedback;
+ RTC_DCHECK(feedback.Parse(header));
+ return feedback;
+}
+
+class FeedbackTester {
+ public:
+ FeedbackTester() : FeedbackTester(true) {}
+ explicit FeedbackTester(bool include_timestamps)
+ : expected_size_(kAnySize),
+ default_delta_(TransportFeedback::kDeltaTick * 4),
+ include_timestamps_(include_timestamps) {}
+
+ void WithExpectedSize(size_t expected_size) {
+ expected_size_ = expected_size;
+ }
+
+ void WithDefaultDelta(TimeDelta delta) { default_delta_ = delta; }
+
+ void WithInput(rtc::ArrayView<const uint16_t> received_seq,
+ rtc::ArrayView<const Timestamp> received_ts = {}) {
+ std::vector<Timestamp> temp_timestamps;
+ if (received_ts.empty()) {
+ temp_timestamps = GenerateReceiveTimestamps(received_seq);
+ received_ts = temp_timestamps;
+ }
+ RTC_DCHECK_EQ(received_seq.size(), received_ts.size());
+
+ expected_deltas_.clear();
+ feedback_.emplace(include_timestamps_);
+ feedback_->SetBase(received_seq[0], received_ts[0]);
+ ASSERT_TRUE(feedback_->IsConsistent());
+ // First delta is special: it doesn't represent the delta between two times,
+ // but a compensation for the reduced precision of the base time.
+ EXPECT_TRUE(feedback_->AddReceivedPacket(received_seq[0], received_ts[0]));
+ // GetBaseDelta suppose to return balanced diff between base time of the new
+ // feedback message (stored internally) and base time of the old feedback
+ // message (passed as parameter), but first delta is the difference between
+ // 1st timestamp (passed as parameter) and base time (stored internally),
+ // thus to get the first delta need to negate whatever GetBaseDelta returns.
+ expected_deltas_.push_back(-feedback_->GetBaseDelta(received_ts[0]));
+
+ for (size_t i = 1; i < received_ts.size(); ++i) {
+ EXPECT_TRUE(
+ feedback_->AddReceivedPacket(received_seq[i], received_ts[i]));
+ expected_deltas_.push_back(received_ts[i] - received_ts[i - 1]);
+ }
+ ASSERT_TRUE(feedback_->IsConsistent());
+ expected_seq_.assign(received_seq.begin(), received_seq.end());
+ }
+
+ void VerifyPacket() {
+ ASSERT_TRUE(feedback_->IsConsistent());
+ serialized_ = feedback_->Build();
+ VerifyInternal();
+
+ feedback_.emplace(Parse(serialized_));
+ ASSERT_TRUE(feedback_->IsConsistent());
+ EXPECT_EQ(include_timestamps_, feedback_->IncludeTimestamps());
+ VerifyInternal();
+ }
+
+ static constexpr size_t kAnySize = static_cast<size_t>(0) - 1;
+
+ private:
+ void VerifyInternal() {
+ if (expected_size_ != kAnySize) {
+ // Round up to whole 32-bit words.
+ size_t expected_size_words = (expected_size_ + 3) / 4;
+ size_t expected_size_bytes = expected_size_words * 4;
+ EXPECT_EQ(expected_size_bytes, serialized_.size());
+ }
+
+ std::vector<uint16_t> actual_seq_nos;
+ std::vector<TimeDelta> actual_deltas;
+ for (const auto& packet : feedback_->GetReceivedPackets()) {
+ actual_seq_nos.push_back(packet.sequence_number());
+ actual_deltas.push_back(packet.delta());
+ }
+ EXPECT_THAT(actual_seq_nos, ElementsAreArray(expected_seq_));
+ if (include_timestamps_) {
+ EXPECT_THAT(actual_deltas, ElementsAreArray(expected_deltas_));
+ }
+ }
+
+ std::vector<Timestamp> GenerateReceiveTimestamps(
+ rtc::ArrayView<const uint16_t> seq_nums) {
+ RTC_DCHECK(!seq_nums.empty());
+ uint16_t last_seq = seq_nums[0];
+ Timestamp time = Timestamp::Zero();
+ std::vector<Timestamp> result;
+
+ for (uint16_t seq : seq_nums) {
+ if (seq < last_seq)
+ time += 0x10000 * default_delta_;
+ last_seq = seq;
+
+ result.push_back(time + last_seq * default_delta_);
+ }
+ return result;
+ }
+
+ std::vector<uint16_t> expected_seq_;
+ std::vector<TimeDelta> expected_deltas_;
+ size_t expected_size_;
+ TimeDelta default_delta_;
+ absl::optional<TransportFeedback> feedback_;
+ rtc::Buffer serialized_;
+ bool include_timestamps_;
+};
+
+// The following tests use FeedbackTester that simulates received packets as
+// specified by the parameters `received_seq[]` and `received_ts[]` (optional).
+// The following is verified in these tests:
+// - Expected size of serialized packet.
+// - Expected sequence numbers and receive deltas.
+// - Sequence numbers and receive deltas are persistent after serialization
+// followed by parsing.
+// - The internal state of a feedback packet is consistent.
+TEST(RtcpPacketTest, TransportFeedbackOneBitVector) {
+ const uint16_t kReceived[] = {1, 2, 7, 8, 9, 10, 13};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackOneBitVectorNoRecvDelta) {
+ const uint16_t kReceived[] = {1, 2, 7, 8, 9, 10, 13};
+ const size_t kExpectedSizeBytes = kHeaderSize + kStatusChunkSize;
+
+ FeedbackTester test(/*include_timestamps=*/false);
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackFullOneBitVector) {
+ const uint16_t kReceived[] = {1, 2, 7, 8, 9, 10, 13, 14};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackOneBitVectorWrapReceived) {
+ const uint16_t kMax = 0xFFFF;
+ const uint16_t kReceived[] = {kMax - 2, kMax - 1, kMax, 0, 1, 2};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackOneBitVectorWrapMissing) {
+ const uint16_t kMax = 0xFFFF;
+ const uint16_t kReceived[] = {kMax - 2, kMax - 1, 1, 2};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackTwoBitVector) {
+ const uint16_t kReceived[] = {1, 2, 6, 7};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kLargeDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithDefaultDelta(kDeltaLimit + TransportFeedback::kDeltaTick);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackTwoBitVectorFull) {
+ const uint16_t kReceived[] = {1, 2, 6, 7, 8};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (2 * kStatusChunkSize) + (kLength * kLargeDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithDefaultDelta(kDeltaLimit + TransportFeedback::kDeltaTick);
+ test.WithInput(kReceived);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackWithLargeBaseTimeIsConsistent) {
+ TransportFeedback tb;
+ constexpr Timestamp kTimestamp =
+ Timestamp::Zero() + int64_t{0x7fff'ffff} * TransportFeedback::kDeltaTick;
+ tb.SetBase(/*base_sequence=*/0, /*ref_timestamp=*/kTimestamp);
+ tb.AddReceivedPacket(/*base_sequence=*/0, /*ref_timestamp=*/kTimestamp);
+ EXPECT_TRUE(tb.IsConsistent());
+}
+
+TEST(RtcpPacketTest, TransportFeedbackLargeAndNegativeDeltas) {
+ const uint16_t kReceived[] = {1, 2, 6, 7, 8};
+ const Timestamp kReceiveTimes[] = {
+ Timestamp::Millis(2), Timestamp::Millis(1), Timestamp::Millis(4),
+ Timestamp::Millis(3),
+ Timestamp::Millis(3) + TransportFeedback::kDeltaTick * (1 << 8)};
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (3 * kLargeDeltaSize) + kSmallDeltaSize;
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackMaxRle) {
+ // Expected chunks created:
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+ // * RLE chunk of max length for dropped symbol
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+
+ const size_t kPacketCount = (1 << 13) - 1 + 14;
+ const uint16_t kReceived[] = {0, kPacketCount};
+ const Timestamp kReceiveTimes[] = {Timestamp::Millis(1),
+ Timestamp::Millis(2)};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (3 * kStatusChunkSize) + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackMinRle) {
+ // Expected chunks created:
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+ // * RLE chunk of length 15 for dropped symbol
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+
+ const uint16_t kReceived[] = {0, (14 * 2) + 1};
+ const Timestamp kReceiveTimes[] = {Timestamp::Millis(1),
+ Timestamp::Millis(2)};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (3 * kStatusChunkSize) + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackOneToTwoBitVector) {
+ const size_t kTwoBitVectorCapacity = 7;
+ const uint16_t kReceived[] = {0, kTwoBitVectorCapacity - 1};
+ const Timestamp kReceiveTimes[] = {
+ Timestamp::Zero(),
+ Timestamp::Zero() + kDeltaLimit + TransportFeedback::kDeltaTick};
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + kSmallDeltaSize + kLargeDeltaSize;
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackOneToTwoBitVectorSimpleSplit) {
+ const size_t kTwoBitVectorCapacity = 7;
+ const uint16_t kReceived[] = {0, kTwoBitVectorCapacity};
+ const Timestamp kReceiveTimes[] = {
+ Timestamp::Zero(),
+ Timestamp::Zero() + kDeltaLimit + TransportFeedback::kDeltaTick};
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (kStatusChunkSize * 2) + kSmallDeltaSize + kLargeDeltaSize;
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackOneToTwoBitVectorSplit) {
+ // With received small delta = S, received large delta = L, use input
+ // SSSSSSSSLSSSSSSSSSSSS. This will cause a 1:2 split at the L.
+ // After split there will be two symbols in symbol_vec: SL.
+
+ const TimeDelta kLargeDelta = TransportFeedback::kDeltaTick * (1 << 8);
+ const size_t kNumPackets = (3 * 7) + 1;
+ const size_t kExpectedSizeBytes = kHeaderSize + (kStatusChunkSize * 3) +
+ (kSmallDeltaSize * (kNumPackets - 1)) +
+ (kLargeDeltaSize * 1);
+
+ uint16_t kReceived[kNumPackets];
+ for (size_t i = 0; i < kNumPackets; ++i)
+ kReceived[i] = i;
+
+ std::vector<Timestamp> receive_times;
+ receive_times.reserve(kNumPackets);
+ receive_times.push_back(Timestamp::Millis(1));
+ for (size_t i = 1; i < kNumPackets; ++i) {
+ TimeDelta delta = (i == 8) ? kLargeDelta : TimeDelta::Millis(1);
+ receive_times.push_back(receive_times.back() + delta);
+ }
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, receive_times);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedbackAliasing) {
+ TransportFeedback feedback;
+ feedback.SetBase(0, Timestamp::Zero());
+
+ const int kSamples = 100;
+ const TimeDelta kTooSmallDelta = TransportFeedback::kDeltaTick / 3;
+
+ for (int i = 0; i < kSamples; ++i)
+ feedback.AddReceivedPacket(i, Timestamp::Zero() + i * kTooSmallDelta);
+
+ feedback.Build();
+
+ TimeDelta accumulated_delta = TimeDelta::Zero();
+ int num_samples = 0;
+ for (const auto& packet : feedback.GetReceivedPackets()) {
+ accumulated_delta += packet.delta();
+ TimeDelta expected_time = num_samples * kTooSmallDelta;
+ ++num_samples;
+
+ EXPECT_THAT(accumulated_delta,
+ Near(expected_time, TransportFeedback::kDeltaTick / 2));
+ }
+}
+
+TEST(RtcpPacketTest, TransportFeedbackLimits) {
+ // Sequence number wrap above 0x8000.
+ std::unique_ptr<TransportFeedback> packet(new TransportFeedback());
+ packet->SetBase(0, Timestamp::Zero());
+ EXPECT_TRUE(packet->AddReceivedPacket(0x0, Timestamp::Zero()));
+ EXPECT_TRUE(packet->AddReceivedPacket(0x8000, Timestamp::Millis(1)));
+
+ packet.reset(new TransportFeedback());
+ packet->SetBase(0, Timestamp::Zero());
+ EXPECT_TRUE(packet->AddReceivedPacket(0x0, Timestamp::Zero()));
+ EXPECT_FALSE(packet->AddReceivedPacket(0x8000 + 1, Timestamp::Millis(1)));
+
+ // Packet status count max 0xFFFF.
+ packet.reset(new TransportFeedback());
+ packet->SetBase(0, Timestamp::Zero());
+ EXPECT_TRUE(packet->AddReceivedPacket(0x0, Timestamp::Zero()));
+ EXPECT_TRUE(packet->AddReceivedPacket(0x8000, Timestamp::Millis(1)));
+ EXPECT_TRUE(packet->AddReceivedPacket(0xFFFE, Timestamp::Millis(2)));
+ EXPECT_FALSE(packet->AddReceivedPacket(0xFFFF, Timestamp::Millis(3)));
+
+ // Too large delta.
+ packet.reset(new TransportFeedback());
+ packet->SetBase(0, Timestamp::Zero());
+ TimeDelta kMaxPositiveTimeDelta =
+ std::numeric_limits<int16_t>::max() * TransportFeedback::kDeltaTick;
+ EXPECT_FALSE(packet->AddReceivedPacket(1, Timestamp::Zero() +
+ kMaxPositiveTimeDelta +
+ TransportFeedback::kDeltaTick));
+ EXPECT_TRUE(
+ packet->AddReceivedPacket(1, Timestamp::Zero() + kMaxPositiveTimeDelta));
+
+ // Too large negative delta.
+ packet.reset(new TransportFeedback());
+ TimeDelta kMaxNegativeTimeDelta =
+ std::numeric_limits<int16_t>::min() * TransportFeedback::kDeltaTick;
+ // Use larger base time to avoid kBaseTime + kNegativeDelta to be negative.
+ Timestamp kBaseTime = Timestamp::Seconds(1'000'000);
+ packet->SetBase(0, kBaseTime);
+ EXPECT_FALSE(packet->AddReceivedPacket(
+ 1, kBaseTime + kMaxNegativeTimeDelta - TransportFeedback::kDeltaTick));
+ EXPECT_TRUE(packet->AddReceivedPacket(1, kBaseTime + kMaxNegativeTimeDelta));
+
+ // TODO(sprang): Once we support max length lower than RTCP length limit,
+ // add back test for max size in bytes.
+}
+
+TEST(RtcpPacketTest, BaseTimeIsConsistentAcrossMultiplePackets) {
+ constexpr Timestamp kMaxBaseTime =
+ Timestamp::Zero() + kBaseTimeWrapPeriod - kBaseTimeTick;
+
+ TransportFeedback packet1;
+ packet1.SetBase(0, kMaxBaseTime);
+ packet1.AddReceivedPacket(0, kMaxBaseTime);
+ // Build and parse packet to simulate sending it over the wire.
+ TransportFeedback parsed_packet1 = Parse(packet1.Build());
+
+ TransportFeedback packet2;
+ packet2.SetBase(1, kMaxBaseTime + kBaseTimeTick);
+ packet2.AddReceivedPacket(1, kMaxBaseTime + kBaseTimeTick);
+ TransportFeedback parsed_packet2 = Parse(packet2.Build());
+
+ EXPECT_EQ(parsed_packet2.GetBaseDelta(parsed_packet1.BaseTime()),
+ kBaseTimeTick);
+}
+
+TEST(RtcpPacketTest, SupportsMaximumNumberOfNegativeDeltas) {
+ TransportFeedback feedback;
+ // Use large base time to avoid hitting zero limit while filling the feedback,
+ // but use multiple of kBaseTimeWrapPeriod to hit edge case where base time
+ // is recorded as zero in the raw rtcp packet.
+ Timestamp time = Timestamp::Zero() + 1'000 * kBaseTimeWrapPeriod;
+ feedback.SetBase(0, time);
+ static constexpr TimeDelta kMinDelta =
+ TransportFeedback::kDeltaTick * std::numeric_limits<int16_t>::min();
+ uint16_t num_received_rtp_packets = 0;
+ time += kMinDelta;
+ while (feedback.AddReceivedPacket(++num_received_rtp_packets, time)) {
+ ASSERT_GE(time, Timestamp::Zero() - kMinDelta);
+ time += kMinDelta;
+ }
+ // Subtract one last packet that failed to add.
+ --num_received_rtp_packets;
+ EXPECT_TRUE(feedback.IsConsistent());
+
+ TransportFeedback parsed = Parse(feedback.Build());
+ EXPECT_EQ(parsed.GetReceivedPackets().size(), num_received_rtp_packets);
+ EXPECT_THAT(parsed.GetReceivedPackets(),
+ AllOf(SizeIs(num_received_rtp_packets),
+ Each(Property(&TransportFeedback::ReceivedPacket::delta,
+ Eq(kMinDelta)))));
+ EXPECT_GE(parsed.BaseTime(),
+ Timestamp::Zero() - kMinDelta * num_received_rtp_packets);
+}
+
+TEST(RtcpPacketTest, TransportFeedbackPadding) {
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + kSmallDeltaSize;
+ const size_t kExpectedSizeWords = (kExpectedSizeBytes + 3) / 4;
+ const size_t kExpectedPaddingSizeBytes =
+ 4 * kExpectedSizeWords - kExpectedSizeBytes;
+
+ TransportFeedback feedback;
+ feedback.SetBase(0, Timestamp::Zero());
+ EXPECT_TRUE(feedback.AddReceivedPacket(0, Timestamp::Zero()));
+
+ rtc::Buffer packet = feedback.Build();
+ EXPECT_EQ(kExpectedSizeWords * 4, packet.size());
+ ASSERT_GT(kExpectedSizeWords * 4, kExpectedSizeBytes);
+ for (size_t i = kExpectedSizeBytes; i < (kExpectedSizeWords * 4 - 1); ++i)
+ EXPECT_EQ(0u, packet[i]);
+
+ EXPECT_EQ(kExpectedPaddingSizeBytes, packet[kExpectedSizeWords * 4 - 1]);
+
+ // Modify packet by adding 4 bytes of padding at the end. Not currently used
+ // when we're sending, but need to be able to handle it when receiving.
+
+ const int kPaddingBytes = 4;
+ const size_t kExpectedSizeWithPadding =
+ (kExpectedSizeWords * 4) + kPaddingBytes;
+ uint8_t mod_buffer[kExpectedSizeWithPadding];
+ memcpy(mod_buffer, packet.data(), kExpectedSizeWords * 4);
+ memset(&mod_buffer[kExpectedSizeWords * 4], 0, kPaddingBytes - 1);
+ mod_buffer[kExpectedSizeWithPadding - 1] =
+ kPaddingBytes + kExpectedPaddingSizeBytes;
+ const uint8_t padding_flag = 1 << 5;
+ mod_buffer[0] |= padding_flag;
+ ByteWriter<uint16_t>::WriteBigEndian(
+ &mod_buffer[2], ByteReader<uint16_t>::ReadBigEndian(&mod_buffer[2]) +
+ ((kPaddingBytes + 3) / 4));
+
+ EXPECT_THAT(mod_buffer, IsValidFeedback());
+}
+
+TEST(RtcpPacketTest, TransportFeedbackPaddingBackwardsCompatibility) {
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + kSmallDeltaSize;
+ const size_t kExpectedSizeWords = (kExpectedSizeBytes + 3) / 4;
+ const size_t kExpectedPaddingSizeBytes =
+ 4 * kExpectedSizeWords - kExpectedSizeBytes;
+
+ TransportFeedback feedback;
+ feedback.SetBase(0, Timestamp::Zero());
+ EXPECT_TRUE(feedback.AddReceivedPacket(0, Timestamp::Zero()));
+
+ rtc::Buffer packet = feedback.Build();
+ EXPECT_EQ(kExpectedSizeWords * 4, packet.size());
+ ASSERT_GT(kExpectedSizeWords * 4, kExpectedSizeBytes);
+ for (size_t i = kExpectedSizeBytes; i < (kExpectedSizeWords * 4 - 1); ++i)
+ EXPECT_EQ(0u, packet[i]);
+
+ EXPECT_GT(kExpectedPaddingSizeBytes, 0u);
+ EXPECT_EQ(kExpectedPaddingSizeBytes, packet[kExpectedSizeWords * 4 - 1]);
+
+ // Modify packet by removing padding bit and writing zero at the last padding
+ // byte to verify that we can parse packets from old clients, where zero
+ // padding of up to three bytes was used without the padding bit being set.
+ uint8_t mod_buffer[kExpectedSizeWords * 4];
+ memcpy(mod_buffer, packet.data(), kExpectedSizeWords * 4);
+ mod_buffer[kExpectedSizeWords * 4 - 1] = 0;
+ const uint8_t padding_flag = 1 << 5;
+ mod_buffer[0] &= ~padding_flag; // Unset padding flag.
+
+ EXPECT_THAT(mod_buffer, IsValidFeedback());
+}
+
+TEST(RtcpPacketTest, TransportFeedbackCorrectlySplitsVectorChunks) {
+ const int kOneBitVectorCapacity = 14;
+ const TimeDelta kLargeTimeDelta = TransportFeedback::kDeltaTick * (1 << 8);
+
+ // Test that a number of small deltas followed by a large delta results in a
+ // correct split into multiple chunks, as needed.
+
+ for (int deltas = 0; deltas <= kOneBitVectorCapacity + 1; ++deltas) {
+ TransportFeedback feedback;
+ feedback.SetBase(0, Timestamp::Zero());
+ for (int i = 0; i < deltas; ++i)
+ feedback.AddReceivedPacket(i, Timestamp::Millis(i));
+ feedback.AddReceivedPacket(deltas,
+ Timestamp::Millis(deltas) + kLargeTimeDelta);
+
+ EXPECT_THAT(feedback.Build(), IsValidFeedback());
+ }
+}
+
+TEST(RtcpPacketTest, TransportFeedbackMoveConstructor) {
+ const int kSamples = 100;
+ const uint16_t kBaseSeqNo = 7531;
+ const Timestamp kBaseTimestamp = Timestamp::Micros(123'456'789);
+ const uint8_t kFeedbackSeqNo = 90;
+
+ TransportFeedback feedback;
+ feedback.SetBase(kBaseSeqNo, kBaseTimestamp);
+ feedback.SetFeedbackSequenceNumber(kFeedbackSeqNo);
+ for (int i = 0; i < kSamples; ++i) {
+ feedback.AddReceivedPacket(
+ kBaseSeqNo + i, kBaseTimestamp + i * TransportFeedback::kDeltaTick);
+ }
+ EXPECT_TRUE(feedback.IsConsistent());
+
+ TransportFeedback feedback_copy(feedback);
+ EXPECT_TRUE(feedback_copy.IsConsistent());
+ EXPECT_TRUE(feedback.IsConsistent());
+ EXPECT_EQ(feedback_copy.Build(), feedback.Build());
+
+ TransportFeedback moved(std::move(feedback));
+ EXPECT_TRUE(moved.IsConsistent());
+ EXPECT_TRUE(feedback.IsConsistent());
+ EXPECT_EQ(moved.Build(), feedback_copy.Build());
+}
+
+TEST(TransportFeedbackTest, ReportsMissingPackets) {
+ const uint16_t kBaseSeqNo = 1000;
+ const Timestamp kBaseTimestamp = Timestamp::Millis(10);
+ const uint8_t kFeedbackSeqNo = 90;
+ TransportFeedback feedback_builder(/*include_timestamps*/ true);
+ feedback_builder.SetBase(kBaseSeqNo, kBaseTimestamp);
+ feedback_builder.SetFeedbackSequenceNumber(kFeedbackSeqNo);
+ feedback_builder.AddReceivedPacket(kBaseSeqNo + 0, kBaseTimestamp);
+ // Packet losses indicated by jump in sequence number.
+ feedback_builder.AddReceivedPacket(kBaseSeqNo + 3,
+ kBaseTimestamp + TimeDelta::Millis(2));
+
+ EXPECT_THAT(
+ Parse(feedback_builder.Build()).GetAllPackets(),
+ ElementsAre(
+ Property(&TransportFeedback::ReceivedPacket::received, true),
+ Property(&TransportFeedback::ReceivedPacket::received, false),
+ Property(&TransportFeedback::ReceivedPacket::received, false),
+ Property(&TransportFeedback::ReceivedPacket::received, true)));
+}
+
+TEST(TransportFeedbackTest, ReportsMissingPacketsWithoutTimestamps) {
+ const uint16_t kBaseSeqNo = 1000;
+ const uint8_t kFeedbackSeqNo = 90;
+ TransportFeedback feedback_builder(/*include_timestamps*/ false);
+ feedback_builder.SetBase(kBaseSeqNo, Timestamp::Millis(10));
+ feedback_builder.SetFeedbackSequenceNumber(kFeedbackSeqNo);
+ feedback_builder.AddReceivedPacket(kBaseSeqNo + 0, Timestamp::Zero());
+ // Packet losses indicated by jump in sequence number.
+ feedback_builder.AddReceivedPacket(kBaseSeqNo + 3, Timestamp::Zero());
+
+ EXPECT_THAT(
+ Parse(feedback_builder.Build()).GetAllPackets(),
+ ElementsAre(
+ Property(&TransportFeedback::ReceivedPacket::received, true),
+ Property(&TransportFeedback::ReceivedPacket::received, false),
+ Property(&TransportFeedback::ReceivedPacket::received, false),
+ Property(&TransportFeedback::ReceivedPacket::received, true)));
+}
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
new file mode 100644
index 0000000000..dccd1354a9
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace {
+
+using ::testing::_;
+using ::testing::MockFunction;
+using ::webrtc::rtcp::ReceiverReport;
+using ::webrtc::rtcp::ReportBlock;
+
+const uint32_t kSenderSsrc = 0x12345678;
+
+TEST(RtcpPacketTest, BuildWithTooSmallBuffer) {
+ ReportBlock rb;
+ ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ EXPECT_TRUE(rr.AddReportBlock(rb));
+
+ const size_t kRrLength = 8;
+ const size_t kReportBlockLength = 24;
+
+ // No packet.
+ MockFunction<void(rtc::ArrayView<const uint8_t>)> callback;
+ EXPECT_CALL(callback, Call(_)).Times(0);
+ const size_t kBufferSize = kRrLength + kReportBlockLength - 1;
+ EXPECT_FALSE(rr.Build(kBufferSize, callback.AsStdFunction()));
+}
+
+} // namespace
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
new file mode 100644
index 0000000000..936750c263
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -0,0 +1,1292 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_receiver.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <limits>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/pli.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remb.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sdes.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "modules/rtp_rtcp/source/tmmbr_help.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+namespace {
+
+using rtcp::CommonHeader;
+using rtcp::ReportBlock;
+
+// The number of RTCP time intervals needed to trigger a timeout.
+const int kRrTimeoutIntervals = 3;
+
+const int64_t kTmmbrTimeoutIntervalMs = 5 * 5000;
+
+const int64_t kMaxWarningLogIntervalMs = 10000;
+const int64_t kRtcpMinFrameLengthMs = 17;
+
+// Maximum number of received RRTRs that will be stored.
+const size_t kMaxNumberOfStoredRrtrs = 300;
+
+constexpr TimeDelta kDefaultVideoReportInterval = TimeDelta::Seconds(1);
+constexpr TimeDelta kDefaultAudioReportInterval = TimeDelta::Seconds(5);
+
+// Returns true if the `timestamp` has exceeded the |interval *
+// kRrTimeoutIntervals| period and was reset (set to PlusInfinity()). Returns
+// false if the timer was either already reset or if it has not expired.
+bool ResetTimestampIfExpired(const Timestamp now,
+ Timestamp& timestamp,
+ TimeDelta interval) {
+ if (timestamp.IsInfinite() ||
+ now <= timestamp + interval * kRrTimeoutIntervals) {
+ return false;
+ }
+
+ timestamp = Timestamp::PlusInfinity();
+ return true;
+}
+
+} // namespace
+
+constexpr size_t RTCPReceiver::RegisteredSsrcs::kMediaSsrcIndex;
+constexpr size_t RTCPReceiver::RegisteredSsrcs::kMaxSsrcs;
+
+RTCPReceiver::RegisteredSsrcs::RegisteredSsrcs(
+ bool disable_sequence_checker,
+ const RtpRtcpInterface::Configuration& config)
+ : packet_sequence_checker_(disable_sequence_checker) {
+ packet_sequence_checker_.Detach();
+ ssrcs_.push_back(config.local_media_ssrc);
+ if (config.rtx_send_ssrc) {
+ ssrcs_.push_back(*config.rtx_send_ssrc);
+ }
+ if (config.fec_generator) {
+ absl::optional<uint32_t> flexfec_ssrc = config.fec_generator->FecSsrc();
+ if (flexfec_ssrc) {
+ ssrcs_.push_back(*flexfec_ssrc);
+ }
+ }
+ // Ensure that the RegisteredSsrcs can inline the SSRCs.
+ RTC_DCHECK_LE(ssrcs_.size(), RTCPReceiver::RegisteredSsrcs::kMaxSsrcs);
+}
+
+bool RTCPReceiver::RegisteredSsrcs::contains(uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return absl::c_linear_search(ssrcs_, ssrc);
+}
+
+uint32_t RTCPReceiver::RegisteredSsrcs::media_ssrc() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return ssrcs_[kMediaSsrcIndex];
+}
+
+void RTCPReceiver::RegisteredSsrcs::set_media_ssrc(uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ ssrcs_[kMediaSsrcIndex] = ssrc;
+}
+
+struct RTCPReceiver::PacketInformation {
+ uint32_t packet_type_flags = 0; // RTCPPacketTypeFlags bit field.
+
+ uint32_t remote_ssrc = 0;
+ std::vector<uint16_t> nack_sequence_numbers;
+ // TODO(hbos): Remove `report_blocks` in favor of `report_block_datas`.
+ ReportBlockList report_blocks;
+ std::vector<ReportBlockData> report_block_datas;
+ int64_t rtt_ms = 0;
+ uint32_t receiver_estimated_max_bitrate_bps = 0;
+ std::unique_ptr<rtcp::TransportFeedback> transport_feedback;
+ absl::optional<VideoBitrateAllocation> target_bitrate_allocation;
+ absl::optional<NetworkStateEstimate> network_state_estimate;
+ std::unique_ptr<rtcp::LossNotification> loss_notification;
+};
+
+RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config,
+ ModuleRtpRtcpImpl2* owner)
+ : clock_(config.clock),
+ receiver_only_(config.receiver_only),
+ rtp_rtcp_(owner),
+ registered_ssrcs_(false, config),
+ rtcp_bandwidth_observer_(config.bandwidth_callback),
+ rtcp_event_observer_(config.rtcp_event_observer),
+ rtcp_intra_frame_observer_(config.intra_frame_callback),
+ rtcp_loss_notification_observer_(config.rtcp_loss_notification_observer),
+ network_state_estimate_observer_(config.network_state_estimate_observer),
+ transport_feedback_observer_(config.transport_feedback_callback),
+ bitrate_allocation_observer_(config.bitrate_allocation_observer),
+ report_interval_(config.rtcp_report_interval_ms > 0
+ ? TimeDelta::Millis(config.rtcp_report_interval_ms)
+ : (config.audio ? kDefaultAudioReportInterval
+ : kDefaultVideoReportInterval)),
+ // TODO(bugs.webrtc.org/10774): Remove fallback.
+ remote_ssrc_(0),
+ remote_sender_rtp_time_(0),
+ remote_sender_packet_count_(0),
+ remote_sender_octet_count_(0),
+ remote_sender_reports_count_(0),
+ xr_rrtr_status_(config.non_sender_rtt_measurement),
+ xr_rr_rtt_ms_(0),
+ oldest_tmmbr_info_ms_(0),
+ cname_callback_(config.rtcp_cname_callback),
+ report_block_data_observer_(config.report_block_data_observer),
+ packet_type_counter_observer_(config.rtcp_packet_type_counter_observer),
+ num_skipped_packets_(0),
+ last_skipped_packets_warning_ms_(clock_->TimeInMilliseconds()) {
+ RTC_DCHECK(owner);
+}
+
+RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config,
+ ModuleRtpRtcp* owner)
+ : clock_(config.clock),
+ receiver_only_(config.receiver_only),
+ rtp_rtcp_(owner),
+ registered_ssrcs_(true, config),
+ rtcp_bandwidth_observer_(config.bandwidth_callback),
+ rtcp_event_observer_(config.rtcp_event_observer),
+ rtcp_intra_frame_observer_(config.intra_frame_callback),
+ rtcp_loss_notification_observer_(config.rtcp_loss_notification_observer),
+ network_state_estimate_observer_(config.network_state_estimate_observer),
+ transport_feedback_observer_(config.transport_feedback_callback),
+ bitrate_allocation_observer_(config.bitrate_allocation_observer),
+ report_interval_(config.rtcp_report_interval_ms > 0
+ ? TimeDelta::Millis(config.rtcp_report_interval_ms)
+ : (config.audio ? kDefaultAudioReportInterval
+ : kDefaultVideoReportInterval)),
+ // TODO(bugs.webrtc.org/10774): Remove fallback.
+ remote_ssrc_(0),
+ remote_sender_rtp_time_(0),
+ remote_sender_packet_count_(0),
+ remote_sender_octet_count_(0),
+ remote_sender_reports_count_(0),
+ xr_rrtr_status_(config.non_sender_rtt_measurement),
+ xr_rr_rtt_ms_(0),
+ oldest_tmmbr_info_ms_(0),
+ cname_callback_(config.rtcp_cname_callback),
+ report_block_data_observer_(config.report_block_data_observer),
+ packet_type_counter_observer_(config.rtcp_packet_type_counter_observer),
+ num_skipped_packets_(0),
+ last_skipped_packets_warning_ms_(clock_->TimeInMilliseconds()) {
+ RTC_DCHECK(owner);
+ // Dear reader - if you're here because of this log statement and are
+ // wondering what this is about, chances are that you are using an instance
+ // of RTCPReceiver without using the webrtc APIs. This creates a bit of a
+ // problem for WebRTC because this class is a part of an internal
+ // implementation that is constantly changing and being improved.
+ // The intention of this log statement is to give a heads up that changes
+ // are coming and encourage you to use the public APIs or be prepared that
+ // things might break down the line as more changes land. A thing you could
+ // try out for now is to replace the `CustomSequenceChecker` in the header
+ // with a regular `SequenceChecker` and see if that triggers an
+ // error in your code. If it does, chances are you have your own threading
+ // model that is not the same as WebRTC internally has.
+ RTC_LOG(LS_INFO) << "************** !!!DEPRECATION WARNING!! **************";
+}
+
+RTCPReceiver::~RTCPReceiver() {}
+
+void RTCPReceiver::IncomingPacket(rtc::ArrayView<const uint8_t> packet) {
+ if (packet.empty()) {
+ RTC_LOG(LS_WARNING) << "Incoming empty RTCP packet";
+ return;
+ }
+
+ PacketInformation packet_information;
+ if (!ParseCompoundPacket(packet, &packet_information))
+ return;
+ TriggerCallbacksFromRtcpPacket(packet_information);
+}
+
+// This method is only used by test and legacy code, so we should be able to
+// remove it soon.
+int64_t RTCPReceiver::LastReceivedReportBlockMs() const {
+ MutexLock lock(&rtcp_receiver_lock_);
+ return last_received_rb_.IsFinite() ? last_received_rb_.ms() : 0;
+}
+
+void RTCPReceiver::SetRemoteSSRC(uint32_t ssrc) {
+ MutexLock lock(&rtcp_receiver_lock_);
+ // New SSRC reset old reports.
+ last_received_sr_ntp_.Reset();
+ remote_ssrc_ = ssrc;
+}
+
+void RTCPReceiver::set_local_media_ssrc(uint32_t ssrc) {
+ registered_ssrcs_.set_media_ssrc(ssrc);
+}
+
+uint32_t RTCPReceiver::local_media_ssrc() const {
+ return registered_ssrcs_.media_ssrc();
+}
+
+uint32_t RTCPReceiver::RemoteSSRC() const {
+ MutexLock lock(&rtcp_receiver_lock_);
+ return remote_ssrc_;
+}
+
+void RTCPReceiver::RttStats::AddRtt(TimeDelta rtt) {
+ last_rtt_ = rtt;
+ if (rtt < min_rtt_) {
+ min_rtt_ = rtt;
+ }
+ if (rtt > max_rtt_) {
+ max_rtt_ = rtt;
+ }
+ sum_rtt_ += rtt;
+ ++num_rtts_;
+}
+
+int32_t RTCPReceiver::RTT(uint32_t remote_ssrc,
+ int64_t* last_rtt_ms,
+ int64_t* avg_rtt_ms,
+ int64_t* min_rtt_ms,
+ int64_t* max_rtt_ms) const {
+ MutexLock lock(&rtcp_receiver_lock_);
+
+ auto it = rtts_.find(remote_ssrc);
+ if (it == rtts_.end()) {
+ return -1;
+ }
+
+ if (last_rtt_ms) {
+ *last_rtt_ms = it->second.last_rtt().ms();
+ }
+
+ if (avg_rtt_ms) {
+ *avg_rtt_ms = it->second.average_rtt().ms();
+ }
+
+ if (min_rtt_ms) {
+ *min_rtt_ms = it->second.min_rtt().ms();
+ }
+
+ if (max_rtt_ms) {
+ *max_rtt_ms = it->second.max_rtt().ms();
+ }
+
+ return 0;
+}
+
+RTCPReceiver::NonSenderRttStats RTCPReceiver::GetNonSenderRTT() const {
+ MutexLock lock(&rtcp_receiver_lock_);
+ auto it = non_sender_rtts_.find(remote_ssrc_);
+ if (it == non_sender_rtts_.end()) {
+ return {};
+ }
+ return it->second;
+}
+
+void RTCPReceiver::SetNonSenderRttMeasurement(bool enabled) {
+ MutexLock lock(&rtcp_receiver_lock_);
+ xr_rrtr_status_ = enabled;
+}
+
+bool RTCPReceiver::GetAndResetXrRrRtt(int64_t* rtt_ms) {
+ RTC_DCHECK(rtt_ms);
+ MutexLock lock(&rtcp_receiver_lock_);
+ if (xr_rr_rtt_ms_ == 0) {
+ return false;
+ }
+ *rtt_ms = xr_rr_rtt_ms_;
+ xr_rr_rtt_ms_ = 0;
+ return true;
+}
+
+// Called regularly (1/sec) on the worker thread to do rtt calculations.
+absl::optional<TimeDelta> RTCPReceiver::OnPeriodicRttUpdate(
+ Timestamp newer_than,
+ bool sending) {
+ // Running on the worker thread (same as construction thread).
+ absl::optional<TimeDelta> rtt;
+
+ if (sending) {
+ // Check if we've received a report block within the last kRttUpdateInterval
+ // amount of time.
+ MutexLock lock(&rtcp_receiver_lock_);
+ if (last_received_rb_.IsInfinite() || last_received_rb_ > newer_than) {
+ TimeDelta max_rtt = TimeDelta::MinusInfinity();
+ for (const auto& rtt_stats : rtts_) {
+ if (rtt_stats.second.last_rtt() > max_rtt) {
+ max_rtt = rtt_stats.second.last_rtt();
+ }
+ }
+ if (max_rtt.IsFinite()) {
+ rtt = max_rtt;
+ }
+ }
+
+ // Check for expired timers and if so, log and reset.
+ auto now = clock_->CurrentTime();
+ if (RtcpRrTimeoutLocked(now)) {
+ RTC_LOG_F(LS_WARNING) << "Timeout: No RTCP RR received.";
+ } else if (RtcpRrSequenceNumberTimeoutLocked(now)) {
+ RTC_LOG_F(LS_WARNING) << "Timeout: No increase in RTCP RR extended "
+ "highest sequence number.";
+ }
+ } else {
+ // Report rtt from receiver.
+ int64_t rtt_ms;
+ if (GetAndResetXrRrRtt(&rtt_ms)) {
+ rtt.emplace(TimeDelta::Millis(rtt_ms));
+ }
+ }
+
+ return rtt;
+}
+
+bool RTCPReceiver::NTP(uint32_t* received_ntp_secs,
+ uint32_t* received_ntp_frac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp,
+ uint32_t* remote_sender_packet_count,
+ uint64_t* remote_sender_octet_count,
+ uint64_t* remote_sender_reports_count) const {
+ MutexLock lock(&rtcp_receiver_lock_);
+ if (!last_received_sr_ntp_.Valid())
+ return false;
+
+ // NTP from incoming SenderReport.
+ if (received_ntp_secs)
+ *received_ntp_secs = remote_sender_ntp_time_.seconds();
+ if (received_ntp_frac)
+ *received_ntp_frac = remote_sender_ntp_time_.fractions();
+ // Rtp time from incoming SenderReport.
+ if (rtcp_timestamp)
+ *rtcp_timestamp = remote_sender_rtp_time_;
+
+ // Local NTP time when we received a RTCP packet with a send block.
+ if (rtcp_arrival_time_secs)
+ *rtcp_arrival_time_secs = last_received_sr_ntp_.seconds();
+ if (rtcp_arrival_time_frac)
+ *rtcp_arrival_time_frac = last_received_sr_ntp_.fractions();
+
+ // Counters.
+ if (remote_sender_packet_count)
+ *remote_sender_packet_count = remote_sender_packet_count_;
+ if (remote_sender_octet_count)
+ *remote_sender_octet_count = remote_sender_octet_count_;
+ if (remote_sender_reports_count)
+ *remote_sender_reports_count = remote_sender_reports_count_;
+
+ return true;
+}
+
+std::vector<rtcp::ReceiveTimeInfo>
+RTCPReceiver::ConsumeReceivedXrReferenceTimeInfo() {
+ MutexLock lock(&rtcp_receiver_lock_);
+
+ const size_t last_xr_rtis_size = std::min(
+ received_rrtrs_.size(), rtcp::ExtendedReports::kMaxNumberOfDlrrItems);
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis;
+ last_xr_rtis.reserve(last_xr_rtis_size);
+
+ const uint32_t now_ntp = CompactNtp(clock_->CurrentNtpTime());
+
+ for (size_t i = 0; i < last_xr_rtis_size; ++i) {
+ RrtrInformation& rrtr = received_rrtrs_.front();
+ last_xr_rtis.emplace_back(rrtr.ssrc, rrtr.received_remote_mid_ntp_time,
+ now_ntp - rrtr.local_receive_mid_ntp_time);
+ received_rrtrs_ssrc_it_.erase(rrtr.ssrc);
+ received_rrtrs_.pop_front();
+ }
+
+ return last_xr_rtis;
+}
+
+void RTCPReceiver::RemoteRTCPSenderInfo(uint32_t* packet_count,
+ uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const {
+ MutexLock lock(&rtcp_receiver_lock_);
+ *packet_count = remote_sender_packet_count_;
+ *octet_count = remote_sender_octet_count_;
+ *ntp_timestamp_ms = last_received_sr_ntp_.ToMs();
+ *remote_ntp_timestamp_ms = remote_sender_ntp_time_.ToMs();
+}
+
+std::vector<ReportBlockData> RTCPReceiver::GetLatestReportBlockData() const {
+ std::vector<ReportBlockData> result;
+ MutexLock lock(&rtcp_receiver_lock_);
+ for (const auto& report : received_report_blocks_) {
+ result.push_back(report.second);
+ }
+ return result;
+}
+
+bool RTCPReceiver::ParseCompoundPacket(rtc::ArrayView<const uint8_t> packet,
+ PacketInformation* packet_information) {
+ MutexLock lock(&rtcp_receiver_lock_);
+
+ CommonHeader rtcp_block;
+ // If a sender report is received but no DLRR, we need to reset the
+ // roundTripTime stat according to the standard, see
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptime
+ struct RtcpReceivedBlock {
+ bool sender_report = false;
+ bool dlrr = false;
+ };
+ // For each remote SSRC we store if we've received a sender report or a DLRR
+ // block.
+ flat_map<uint32_t, RtcpReceivedBlock> received_blocks;
+ for (const uint8_t* next_block = packet.begin(); next_block != packet.end();
+ next_block = rtcp_block.NextPacket()) {
+ ptrdiff_t remaining_blocks_size = packet.end() - next_block;
+ RTC_DCHECK_GT(remaining_blocks_size, 0);
+ if (!rtcp_block.Parse(next_block, remaining_blocks_size)) {
+ if (next_block == packet.begin()) {
+ // Failed to parse 1st header, nothing was extracted from this packet.
+ RTC_LOG(LS_WARNING) << "Incoming invalid RTCP packet";
+ return false;
+ }
+ ++num_skipped_packets_;
+ break;
+ }
+
+ switch (rtcp_block.type()) {
+ case rtcp::SenderReport::kPacketType:
+ HandleSenderReport(rtcp_block, packet_information);
+ received_blocks[packet_information->remote_ssrc].sender_report = true;
+ break;
+ case rtcp::ReceiverReport::kPacketType:
+ HandleReceiverReport(rtcp_block, packet_information);
+ break;
+ case rtcp::Sdes::kPacketType:
+ HandleSdes(rtcp_block, packet_information);
+ break;
+ case rtcp::ExtendedReports::kPacketType: {
+ bool contains_dlrr = false;
+ uint32_t ssrc = 0;
+ HandleXr(rtcp_block, packet_information, contains_dlrr, ssrc);
+ if (contains_dlrr) {
+ received_blocks[ssrc].dlrr = true;
+ }
+ break;
+ }
+ case rtcp::Bye::kPacketType:
+ HandleBye(rtcp_block);
+ break;
+ case rtcp::App::kPacketType:
+ HandleApp(rtcp_block, packet_information);
+ break;
+ case rtcp::Rtpfb::kPacketType:
+ switch (rtcp_block.fmt()) {
+ case rtcp::Nack::kFeedbackMessageType:
+ HandleNack(rtcp_block, packet_information);
+ break;
+ case rtcp::Tmmbr::kFeedbackMessageType:
+ HandleTmmbr(rtcp_block, packet_information);
+ break;
+ case rtcp::Tmmbn::kFeedbackMessageType:
+ HandleTmmbn(rtcp_block, packet_information);
+ break;
+ case rtcp::RapidResyncRequest::kFeedbackMessageType:
+ HandleSrReq(rtcp_block, packet_information);
+ break;
+ case rtcp::TransportFeedback::kFeedbackMessageType:
+ HandleTransportFeedback(rtcp_block, packet_information);
+ break;
+ default:
+ ++num_skipped_packets_;
+ break;
+ }
+ break;
+ case rtcp::Psfb::kPacketType:
+ switch (rtcp_block.fmt()) {
+ case rtcp::Pli::kFeedbackMessageType:
+ HandlePli(rtcp_block, packet_information);
+ break;
+ case rtcp::Fir::kFeedbackMessageType:
+ HandleFir(rtcp_block, packet_information);
+ break;
+ case rtcp::Psfb::kAfbMessageType:
+ HandlePsfbApp(rtcp_block, packet_information);
+ break;
+ default:
+ ++num_skipped_packets_;
+ break;
+ }
+ break;
+ default:
+ ++num_skipped_packets_;
+ break;
+ }
+ }
+
+ for (const auto& rb : received_blocks) {
+ if (rb.second.sender_report && !rb.second.dlrr) {
+ auto rtt_stats = non_sender_rtts_.find(rb.first);
+ if (rtt_stats != non_sender_rtts_.end()) {
+ rtt_stats->second.Invalidate();
+ }
+ }
+ }
+
+ if (packet_type_counter_observer_) {
+ packet_type_counter_observer_->RtcpPacketTypesCounterUpdated(
+ local_media_ssrc(), packet_type_counter_);
+ }
+
+ if (num_skipped_packets_ > 0) {
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ if (now_ms - last_skipped_packets_warning_ms_ >= kMaxWarningLogIntervalMs) {
+ last_skipped_packets_warning_ms_ = now_ms;
+ RTC_LOG(LS_WARNING)
+ << num_skipped_packets_
+ << " RTCP blocks were skipped due to being malformed or of "
+ "unrecognized/unsupported type, during the past "
+ << (kMaxWarningLogIntervalMs / 1000) << " second period.";
+ }
+ }
+
+ return true;
+}
+
+void RTCPReceiver::HandleSenderReport(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::SenderReport sender_report;
+ if (!sender_report.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ const uint32_t remote_ssrc = sender_report.sender_ssrc();
+
+ packet_information->remote_ssrc = remote_ssrc;
+
+ UpdateTmmbrRemoteIsAlive(remote_ssrc);
+
+ // Have I received RTP packets from this party?
+ if (remote_ssrc_ == remote_ssrc) {
+ // Only signal that we have received a SR when we accept one.
+ packet_information->packet_type_flags |= kRtcpSr;
+
+ remote_sender_ntp_time_ = sender_report.ntp();
+ remote_sender_rtp_time_ = sender_report.rtp_timestamp();
+ last_received_sr_ntp_ = clock_->CurrentNtpTime();
+ remote_sender_packet_count_ = sender_report.sender_packet_count();
+ remote_sender_octet_count_ = sender_report.sender_octet_count();
+ remote_sender_reports_count_++;
+ } else {
+ // We will only store the send report from one source, but
+ // we will store all the receive blocks.
+ packet_information->packet_type_flags |= kRtcpRr;
+ }
+
+ for (const rtcp::ReportBlock& report_block : sender_report.report_blocks())
+ HandleReportBlock(report_block, packet_information, remote_ssrc);
+}
+
+void RTCPReceiver::HandleReceiverReport(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::ReceiverReport receiver_report;
+ if (!receiver_report.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ const uint32_t remote_ssrc = receiver_report.sender_ssrc();
+
+ packet_information->remote_ssrc = remote_ssrc;
+
+ UpdateTmmbrRemoteIsAlive(remote_ssrc);
+
+ packet_information->packet_type_flags |= kRtcpRr;
+
+ for (const ReportBlock& report_block : receiver_report.report_blocks())
+ HandleReportBlock(report_block, packet_information, remote_ssrc);
+}
+
+void RTCPReceiver::HandleReportBlock(const ReportBlock& report_block,
+ PacketInformation* packet_information,
+ uint32_t remote_ssrc) {
+ // This will be called once per report block in the RTCP packet.
+ // We filter out all report blocks that are not for us.
+ // Each packet has max 31 RR blocks.
+ //
+ // We can calc RTT if we send a send report and get a report block back.
+
+ // `report_block.source_ssrc()` is the SSRC identifier of the source to
+ // which the information in this reception report block pertains.
+
+ // Filter out all report blocks that are not for us.
+ if (!registered_ssrcs_.contains(report_block.source_ssrc()))
+ return;
+
+ last_received_rb_ = clock_->CurrentTime();
+
+ ReportBlockData* report_block_data =
+ &received_report_blocks_[report_block.source_ssrc()];
+ RTCPReportBlock rtcp_report_block;
+ rtcp_report_block.sender_ssrc = remote_ssrc;
+ rtcp_report_block.source_ssrc = report_block.source_ssrc();
+ rtcp_report_block.fraction_lost = report_block.fraction_lost();
+ rtcp_report_block.packets_lost = report_block.cumulative_lost_signed();
+ if (report_block.extended_high_seq_num() >
+ report_block_data->report_block().extended_highest_sequence_number) {
+ // We have successfully delivered new RTP packets to the remote side after
+ // the last RR was sent from the remote side.
+ last_increased_sequence_number_ = last_received_rb_;
+ }
+ rtcp_report_block.extended_highest_sequence_number =
+ report_block.extended_high_seq_num();
+ rtcp_report_block.jitter = report_block.jitter();
+ rtcp_report_block.delay_since_last_sender_report =
+ report_block.delay_since_last_sr();
+ rtcp_report_block.last_sender_report_timestamp = report_block.last_sr();
+ // Number of seconds since 1900 January 1 00:00 GMT (see
+ // https://tools.ietf.org/html/rfc868).
+ report_block_data->SetReportBlock(
+ rtcp_report_block,
+ (clock_->CurrentNtpInMilliseconds() - rtc::kNtpJan1970Millisecs) *
+ rtc::kNumMicrosecsPerMillisec);
+
+ uint32_t send_time_ntp = report_block.last_sr();
+ // RFC3550, section 6.4.1, LSR field discription states:
+ // If no SR has been received yet, the field is set to zero.
+ // Receiver rtp_rtcp module is not expected to calculate rtt using
+ // Sender Reports even if it accidentally can.
+ if (send_time_ntp != 0) {
+ uint32_t delay_ntp = report_block.delay_since_last_sr();
+ // Local NTP time.
+ uint32_t receive_time_ntp =
+ CompactNtp(clock_->ConvertTimestampToNtpTime(last_received_rb_));
+
+ // RTT in 1/(2^16) seconds.
+ uint32_t rtt_ntp = receive_time_ntp - delay_ntp - send_time_ntp;
+ // Convert to 1/1000 seconds (milliseconds).
+ TimeDelta rtt = CompactNtpRttToTimeDelta(rtt_ntp);
+ report_block_data->AddRoundTripTimeSample(rtt.ms());
+ if (report_block.source_ssrc() == local_media_ssrc()) {
+ rtts_[remote_ssrc].AddRtt(rtt);
+ }
+
+ packet_information->rtt_ms = rtt.ms();
+ }
+
+ packet_information->report_blocks.push_back(
+ report_block_data->report_block());
+ packet_information->report_block_datas.push_back(*report_block_data);
+}
+
+RTCPReceiver::TmmbrInformation* RTCPReceiver::FindOrCreateTmmbrInfo(
+ uint32_t remote_ssrc) {
+ // Create or find receive information.
+ TmmbrInformation* tmmbr_info = &tmmbr_infos_[remote_ssrc];
+ // Update that this remote is alive.
+ tmmbr_info->last_time_received_ms = clock_->TimeInMilliseconds();
+ return tmmbr_info;
+}
+
+void RTCPReceiver::UpdateTmmbrRemoteIsAlive(uint32_t remote_ssrc) {
+ auto tmmbr_it = tmmbr_infos_.find(remote_ssrc);
+ if (tmmbr_it != tmmbr_infos_.end())
+ tmmbr_it->second.last_time_received_ms = clock_->TimeInMilliseconds();
+}
+
+RTCPReceiver::TmmbrInformation* RTCPReceiver::GetTmmbrInformation(
+ uint32_t remote_ssrc) {
+ auto it = tmmbr_infos_.find(remote_ssrc);
+ if (it == tmmbr_infos_.end())
+ return nullptr;
+ return &it->second;
+}
+
+// These two methods (RtcpRrTimeout and RtcpRrSequenceNumberTimeout) only exist
+// for tests and legacy code (rtp_rtcp_impl.cc). We should be able to to delete
+// the methods and require that access to the locked variables only happens on
+// the worker thread and thus no locking is needed.
+bool RTCPReceiver::RtcpRrTimeout() {
+ MutexLock lock(&rtcp_receiver_lock_);
+ return RtcpRrTimeoutLocked(clock_->CurrentTime());
+}
+
+bool RTCPReceiver::RtcpRrSequenceNumberTimeout() {
+ MutexLock lock(&rtcp_receiver_lock_);
+ return RtcpRrSequenceNumberTimeoutLocked(clock_->CurrentTime());
+}
+
+bool RTCPReceiver::UpdateTmmbrTimers() {
+ MutexLock lock(&rtcp_receiver_lock_);
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ int64_t timeout_ms = now_ms - kTmmbrTimeoutIntervalMs;
+
+ if (oldest_tmmbr_info_ms_ >= timeout_ms)
+ return false;
+
+ bool update_bounding_set = false;
+ oldest_tmmbr_info_ms_ = -1;
+ for (auto tmmbr_it = tmmbr_infos_.begin(); tmmbr_it != tmmbr_infos_.end();) {
+ TmmbrInformation* tmmbr_info = &tmmbr_it->second;
+ if (tmmbr_info->last_time_received_ms > 0) {
+ if (tmmbr_info->last_time_received_ms < timeout_ms) {
+ // No rtcp packet for the last 5 regular intervals, reset limitations.
+ tmmbr_info->tmmbr.clear();
+ // Prevent that we call this over and over again.
+ tmmbr_info->last_time_received_ms = 0;
+ // Send new TMMBN to all channels using the default codec.
+ update_bounding_set = true;
+ } else if (oldest_tmmbr_info_ms_ == -1 ||
+ tmmbr_info->last_time_received_ms < oldest_tmmbr_info_ms_) {
+ oldest_tmmbr_info_ms_ = tmmbr_info->last_time_received_ms;
+ }
+ ++tmmbr_it;
+ } else if (tmmbr_info->ready_for_delete) {
+ // When we dont have a last_time_received_ms and the object is marked
+ // ready_for_delete it's removed from the map.
+ tmmbr_it = tmmbr_infos_.erase(tmmbr_it);
+ } else {
+ ++tmmbr_it;
+ }
+ }
+ return update_bounding_set;
+}
+
+std::vector<rtcp::TmmbItem> RTCPReceiver::BoundingSet(bool* tmmbr_owner) {
+ MutexLock lock(&rtcp_receiver_lock_);
+ TmmbrInformation* tmmbr_info = GetTmmbrInformation(remote_ssrc_);
+ if (!tmmbr_info)
+ return std::vector<rtcp::TmmbItem>();
+
+ *tmmbr_owner = TMMBRHelp::IsOwner(tmmbr_info->tmmbn, local_media_ssrc());
+ return tmmbr_info->tmmbn;
+}
+
+void RTCPReceiver::HandleSdes(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::Sdes sdes;
+ if (!sdes.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ for (const rtcp::Sdes::Chunk& chunk : sdes.chunks()) {
+ if (cname_callback_)
+ cname_callback_->OnCname(chunk.ssrc, chunk.cname);
+ }
+ packet_information->packet_type_flags |= kRtcpSdes;
+}
+
+void RTCPReceiver::HandleNack(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::Nack nack;
+ if (!nack.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ if (receiver_only_ || local_media_ssrc() != nack.media_ssrc()) // Not to us.
+ return;
+
+ packet_information->nack_sequence_numbers.insert(
+ packet_information->nack_sequence_numbers.end(),
+ nack.packet_ids().begin(), nack.packet_ids().end());
+ for (uint16_t packet_id : nack.packet_ids())
+ nack_stats_.ReportRequest(packet_id);
+
+ if (!nack.packet_ids().empty()) {
+ packet_information->packet_type_flags |= kRtcpNack;
+ ++packet_type_counter_.nack_packets;
+ packet_type_counter_.nack_requests = nack_stats_.requests();
+ packet_type_counter_.unique_nack_requests = nack_stats_.unique_requests();
+ }
+}
+
+void RTCPReceiver::HandleApp(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::App app;
+ if (app.Parse(rtcp_block)) {
+ if (app.name() == rtcp::RemoteEstimate::kName &&
+ app.sub_type() == rtcp::RemoteEstimate::kSubType) {
+ rtcp::RemoteEstimate estimate(std::move(app));
+ if (estimate.ParseData()) {
+ packet_information->network_state_estimate = estimate.estimate();
+ return;
+ }
+ }
+ }
+ ++num_skipped_packets_;
+}
+
+void RTCPReceiver::HandleBye(const CommonHeader& rtcp_block) {
+ rtcp::Bye bye;
+ if (!bye.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ if (rtcp_event_observer_) {
+ rtcp_event_observer_->OnRtcpBye();
+ }
+
+ // Clear our lists.
+ rtts_.erase(bye.sender_ssrc());
+ EraseIf(received_report_blocks_, [&](const auto& elem) {
+ return elem.second.report_block().sender_ssrc == bye.sender_ssrc();
+ });
+
+ TmmbrInformation* tmmbr_info = GetTmmbrInformation(bye.sender_ssrc());
+ if (tmmbr_info)
+ tmmbr_info->ready_for_delete = true;
+
+ last_fir_.erase(bye.sender_ssrc());
+ auto it = received_rrtrs_ssrc_it_.find(bye.sender_ssrc());
+ if (it != received_rrtrs_ssrc_it_.end()) {
+ received_rrtrs_.erase(it->second);
+ received_rrtrs_ssrc_it_.erase(it);
+ }
+ xr_rr_rtt_ms_ = 0;
+}
+
+void RTCPReceiver::HandleXr(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information,
+ bool& contains_dlrr,
+ uint32_t& ssrc) {
+ rtcp::ExtendedReports xr;
+ if (!xr.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+ ssrc = xr.sender_ssrc();
+ contains_dlrr = !xr.dlrr().sub_blocks().empty();
+
+ if (xr.rrtr())
+ HandleXrReceiveReferenceTime(xr.sender_ssrc(), *xr.rrtr());
+
+ for (const rtcp::ReceiveTimeInfo& time_info : xr.dlrr().sub_blocks())
+ HandleXrDlrrReportBlock(xr.sender_ssrc(), time_info);
+
+ if (xr.target_bitrate()) {
+ HandleXrTargetBitrate(xr.sender_ssrc(), *xr.target_bitrate(),
+ packet_information);
+ }
+}
+
+void RTCPReceiver::HandleXrReceiveReferenceTime(uint32_t sender_ssrc,
+ const rtcp::Rrtr& rrtr) {
+ uint32_t received_remote_mid_ntp_time = CompactNtp(rrtr.ntp());
+ uint32_t local_receive_mid_ntp_time = CompactNtp(clock_->CurrentNtpTime());
+
+ auto it = received_rrtrs_ssrc_it_.find(sender_ssrc);
+ if (it != received_rrtrs_ssrc_it_.end()) {
+ it->second->received_remote_mid_ntp_time = received_remote_mid_ntp_time;
+ it->second->local_receive_mid_ntp_time = local_receive_mid_ntp_time;
+ } else {
+ if (received_rrtrs_.size() < kMaxNumberOfStoredRrtrs) {
+ received_rrtrs_.emplace_back(sender_ssrc, received_remote_mid_ntp_time,
+ local_receive_mid_ntp_time);
+ received_rrtrs_ssrc_it_[sender_ssrc] = std::prev(received_rrtrs_.end());
+ } else {
+ RTC_LOG(LS_WARNING) << "Discarding received RRTR for ssrc " << sender_ssrc
+ << ", reached maximum number of stored RRTRs.";
+ }
+ }
+}
+
+void RTCPReceiver::HandleXrDlrrReportBlock(uint32_t sender_ssrc,
+ const rtcp::ReceiveTimeInfo& rti) {
+ if (!registered_ssrcs_.contains(rti.ssrc)) // Not to us.
+ return;
+
+ // Caller should explicitly enable rtt calculation using extended reports.
+ if (!xr_rrtr_status_)
+ return;
+
+ // The send_time and delay_rr fields are in units of 1/2^16 sec.
+ uint32_t send_time_ntp = rti.last_rr;
+ // RFC3611, section 4.5, LRR field discription states:
+ // If no such block has been received, the field is set to zero.
+ if (send_time_ntp == 0) {
+ auto rtt_stats = non_sender_rtts_.find(sender_ssrc);
+ if (rtt_stats != non_sender_rtts_.end()) {
+ rtt_stats->second.Invalidate();
+ }
+ return;
+ }
+
+ uint32_t delay_ntp = rti.delay_since_last_rr;
+ uint32_t now_ntp = CompactNtp(clock_->CurrentNtpTime());
+
+ uint32_t rtt_ntp = now_ntp - delay_ntp - send_time_ntp;
+ TimeDelta rtt = CompactNtpRttToTimeDelta(rtt_ntp);
+ xr_rr_rtt_ms_ = rtt.ms();
+
+ non_sender_rtts_[sender_ssrc].Update(rtt);
+}
+
+void RTCPReceiver::HandleXrTargetBitrate(
+ uint32_t ssrc,
+ const rtcp::TargetBitrate& target_bitrate,
+ PacketInformation* packet_information) {
+ if (ssrc != remote_ssrc_) {
+ return; // Not for us.
+ }
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (const auto& item : target_bitrate.GetTargetBitrates()) {
+ if (item.spatial_layer >= kMaxSpatialLayers ||
+ item.temporal_layer >= kMaxTemporalStreams) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid layer in XR target bitrate pack: spatial index "
+ << item.spatial_layer << ", temporal index " << item.temporal_layer
+ << ", dropping.";
+ } else {
+ bitrate_allocation.SetBitrate(item.spatial_layer, item.temporal_layer,
+ item.target_bitrate_kbps * 1000);
+ }
+ }
+ packet_information->target_bitrate_allocation.emplace(bitrate_allocation);
+}
+
+void RTCPReceiver::HandlePli(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::Pli pli;
+ if (!pli.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ if (local_media_ssrc() == pli.media_ssrc()) {
+ ++packet_type_counter_.pli_packets;
+ // Received a signal that we need to send a new key frame.
+ packet_information->packet_type_flags |= kRtcpPli;
+ }
+}
+
+void RTCPReceiver::HandleTmmbr(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::Tmmbr tmmbr;
+ if (!tmmbr.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ uint32_t sender_ssrc = tmmbr.sender_ssrc();
+ if (tmmbr.media_ssrc()) {
+ // media_ssrc() SHOULD be 0 if same as SenderSSRC.
+ // In relay mode this is a valid number.
+ sender_ssrc = tmmbr.media_ssrc();
+ }
+
+ for (const rtcp::TmmbItem& request : tmmbr.requests()) {
+ if (local_media_ssrc() != request.ssrc() || request.bitrate_bps() == 0)
+ continue;
+
+ TmmbrInformation* tmmbr_info = FindOrCreateTmmbrInfo(tmmbr.sender_ssrc());
+ auto* entry = &tmmbr_info->tmmbr[sender_ssrc];
+ entry->tmmbr_item = rtcp::TmmbItem(sender_ssrc, request.bitrate_bps(),
+ request.packet_overhead());
+ // FindOrCreateTmmbrInfo always sets `last_time_received_ms` to
+ // `clock_->TimeInMilliseconds()`.
+ entry->last_updated_ms = tmmbr_info->last_time_received_ms;
+
+ packet_information->packet_type_flags |= kRtcpTmmbr;
+ break;
+ }
+}
+
+void RTCPReceiver::HandleTmmbn(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::Tmmbn tmmbn;
+ if (!tmmbn.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ TmmbrInformation* tmmbr_info = FindOrCreateTmmbrInfo(tmmbn.sender_ssrc());
+
+ packet_information->packet_type_flags |= kRtcpTmmbn;
+
+ tmmbr_info->tmmbn = tmmbn.items();
+}
+
+void RTCPReceiver::HandleSrReq(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::RapidResyncRequest sr_req;
+ if (!sr_req.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ packet_information->packet_type_flags |= kRtcpSrReq;
+}
+
+void RTCPReceiver::HandlePsfbApp(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ {
+ rtcp::Remb remb;
+ if (remb.Parse(rtcp_block)) {
+ packet_information->packet_type_flags |= kRtcpRemb;
+ packet_information->receiver_estimated_max_bitrate_bps =
+ remb.bitrate_bps();
+ return;
+ }
+ }
+
+ {
+ auto loss_notification = std::make_unique<rtcp::LossNotification>();
+ if (loss_notification->Parse(rtcp_block)) {
+ packet_information->packet_type_flags |= kRtcpLossNotification;
+ packet_information->loss_notification = std::move(loss_notification);
+ return;
+ }
+ }
+
+ RTC_LOG(LS_WARNING) << "Unknown PSFB-APP packet.";
+
+ ++num_skipped_packets_;
+}
+
+void RTCPReceiver::HandleFir(const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ rtcp::Fir fir;
+ if (!fir.Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ if (fir.requests().empty())
+ return;
+
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ for (const rtcp::Fir::Request& fir_request : fir.requests()) {
+ // Is it our sender that is requested to generate a new keyframe.
+ if (local_media_ssrc() != fir_request.ssrc)
+ continue;
+
+ ++packet_type_counter_.fir_packets;
+
+ auto inserted = last_fir_.insert(std::make_pair(
+ fir.sender_ssrc(), LastFirStatus(now_ms, fir_request.seq_nr)));
+ if (!inserted.second) { // There was already an entry.
+ LastFirStatus* last_fir = &inserted.first->second;
+
+ // Check if we have reported this FIRSequenceNumber before.
+ if (fir_request.seq_nr == last_fir->sequence_number)
+ continue;
+
+ // Sanity: don't go crazy with the callbacks.
+ if (now_ms - last_fir->request_ms < kRtcpMinFrameLengthMs)
+ continue;
+
+ last_fir->request_ms = now_ms;
+ last_fir->sequence_number = fir_request.seq_nr;
+ }
+ // Received signal that we need to send a new key frame.
+ packet_information->packet_type_flags |= kRtcpFir;
+ }
+}
+
+void RTCPReceiver::HandleTransportFeedback(
+ const CommonHeader& rtcp_block,
+ PacketInformation* packet_information) {
+ std::unique_ptr<rtcp::TransportFeedback> transport_feedback(
+ new rtcp::TransportFeedback());
+ if (!transport_feedback->Parse(rtcp_block)) {
+ ++num_skipped_packets_;
+ return;
+ }
+
+ packet_information->packet_type_flags |= kRtcpTransportFeedback;
+ packet_information->transport_feedback = std::move(transport_feedback);
+}
+
+void RTCPReceiver::NotifyTmmbrUpdated() {
+ // Find bounding set.
+ std::vector<rtcp::TmmbItem> bounding =
+ TMMBRHelp::FindBoundingSet(TmmbrReceived());
+
+ if (!bounding.empty() && rtcp_bandwidth_observer_) {
+ // We have a new bandwidth estimate on this channel.
+ uint64_t bitrate_bps = TMMBRHelp::CalcMinBitrateBps(bounding);
+ if (bitrate_bps <= std::numeric_limits<uint32_t>::max())
+ rtcp_bandwidth_observer_->OnReceivedEstimatedBitrate(bitrate_bps);
+ }
+
+ // Send tmmbn to inform remote clients about the new bandwidth.
+ rtp_rtcp_->SetTmmbn(std::move(bounding));
+}
+
+// Holding no Critical section.
+void RTCPReceiver::TriggerCallbacksFromRtcpPacket(
+ const PacketInformation& packet_information) {
+ // Process TMMBR and REMB first to avoid multiple callbacks
+ // to OnNetworkChanged.
+ if (packet_information.packet_type_flags & kRtcpTmmbr) {
+ // Might trigger a OnReceivedBandwidthEstimateUpdate.
+ NotifyTmmbrUpdated();
+ }
+
+ if (!receiver_only_ && (packet_information.packet_type_flags & kRtcpSrReq)) {
+ rtp_rtcp_->OnRequestSendReport();
+ }
+ if (!receiver_only_ && (packet_information.packet_type_flags & kRtcpNack)) {
+ if (!packet_information.nack_sequence_numbers.empty()) {
+ RTC_LOG(LS_VERBOSE) << "Incoming NACK length: "
+ << packet_information.nack_sequence_numbers.size();
+ rtp_rtcp_->OnReceivedNack(packet_information.nack_sequence_numbers);
+ }
+ }
+
+ // We need feedback that we have received a report block(s) so that we
+ // can generate a new packet in a conference relay scenario, one received
+ // report can generate several RTCP packets, based on number relayed/mixed
+ // a send report block should go out to all receivers.
+ if (rtcp_intra_frame_observer_) {
+ RTC_DCHECK(!receiver_only_);
+ if ((packet_information.packet_type_flags & kRtcpPli) ||
+ (packet_information.packet_type_flags & kRtcpFir)) {
+ if (packet_information.packet_type_flags & kRtcpPli) {
+ RTC_LOG(LS_VERBOSE)
+ << "Incoming PLI from SSRC " << packet_information.remote_ssrc;
+ } else {
+ RTC_LOG(LS_VERBOSE)
+ << "Incoming FIR from SSRC " << packet_information.remote_ssrc;
+ }
+ rtcp_intra_frame_observer_->OnReceivedIntraFrameRequest(
+ local_media_ssrc());
+ }
+ }
+ if (rtcp_loss_notification_observer_ &&
+ (packet_information.packet_type_flags & kRtcpLossNotification)) {
+ rtcp::LossNotification* loss_notification =
+ packet_information.loss_notification.get();
+ RTC_DCHECK(loss_notification);
+ if (loss_notification->media_ssrc() == local_media_ssrc()) {
+ rtcp_loss_notification_observer_->OnReceivedLossNotification(
+ loss_notification->media_ssrc(), loss_notification->last_decoded(),
+ loss_notification->last_received(),
+ loss_notification->decodability_flag());
+ }
+ }
+ if (rtcp_bandwidth_observer_) {
+ RTC_DCHECK(!receiver_only_);
+ if (packet_information.packet_type_flags & kRtcpRemb) {
+ RTC_LOG(LS_VERBOSE)
+ << "Incoming REMB: "
+ << packet_information.receiver_estimated_max_bitrate_bps;
+ rtcp_bandwidth_observer_->OnReceivedEstimatedBitrate(
+ packet_information.receiver_estimated_max_bitrate_bps);
+ }
+ if ((packet_information.packet_type_flags & kRtcpSr) ||
+ (packet_information.packet_type_flags & kRtcpRr)) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ rtcp_bandwidth_observer_->OnReceivedRtcpReceiverReport(
+ packet_information.report_blocks, packet_information.rtt_ms, now_ms);
+ }
+ }
+ if ((packet_information.packet_type_flags & kRtcpSr) ||
+ (packet_information.packet_type_flags & kRtcpRr)) {
+ rtp_rtcp_->OnReceivedRtcpReportBlocks(packet_information.report_blocks);
+ }
+
+ if (transport_feedback_observer_ &&
+ (packet_information.packet_type_flags & kRtcpTransportFeedback)) {
+ uint32_t media_source_ssrc =
+ packet_information.transport_feedback->media_ssrc();
+ if (media_source_ssrc == local_media_ssrc() ||
+ registered_ssrcs_.contains(media_source_ssrc)) {
+ transport_feedback_observer_->OnTransportFeedback(
+ *packet_information.transport_feedback);
+ }
+ }
+
+ if (network_state_estimate_observer_ &&
+ packet_information.network_state_estimate) {
+ network_state_estimate_observer_->OnRemoteNetworkEstimate(
+ *packet_information.network_state_estimate);
+ }
+
+ if (bitrate_allocation_observer_ &&
+ packet_information.target_bitrate_allocation) {
+ bitrate_allocation_observer_->OnBitrateAllocationUpdated(
+ *packet_information.target_bitrate_allocation);
+ }
+
+ if (!receiver_only_) {
+ if (report_block_data_observer_) {
+ for (const auto& report_block_data :
+ packet_information.report_block_datas) {
+ report_block_data_observer_->OnReportBlockDataUpdated(
+ report_block_data);
+ }
+ }
+ }
+}
+
+std::vector<rtcp::TmmbItem> RTCPReceiver::TmmbrReceived() {
+ MutexLock lock(&rtcp_receiver_lock_);
+ std::vector<rtcp::TmmbItem> candidates;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ int64_t timeout_ms = now_ms - kTmmbrTimeoutIntervalMs;
+
+ for (auto& kv : tmmbr_infos_) {
+ for (auto it = kv.second.tmmbr.begin(); it != kv.second.tmmbr.end();) {
+ if (it->second.last_updated_ms < timeout_ms) {
+ // Erase timeout entries.
+ it = kv.second.tmmbr.erase(it);
+ } else {
+ candidates.push_back(it->second.tmmbr_item);
+ ++it;
+ }
+ }
+ }
+ return candidates;
+}
+
+bool RTCPReceiver::RtcpRrTimeoutLocked(Timestamp now) {
+ bool result = ResetTimestampIfExpired(now, last_received_rb_, report_interval_);
+ if (result && rtcp_event_observer_) {
+ rtcp_event_observer_->OnRtcpTimeout();
+ }
+ return result;
+}
+
+bool RTCPReceiver::RtcpRrSequenceNumberTimeoutLocked(Timestamp now) {
+ bool result = ResetTimestampIfExpired(now, last_increased_sequence_number_,
+ report_interval_);
+ if (result && rtcp_event_observer_) {
+ rtcp_event_observer_->OnRtcpTimeout();
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.h
new file mode 100644
index 0000000000..e3f5bc765c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_H_
+
+#include <list>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/sequence_checker.h"
+#include "api/units/time_delta.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_nack_stats.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+class ModuleRtpRtcpImpl2;
+class VideoBitrateAllocationObserver;
+
+namespace rtcp {
+class CommonHeader;
+class ReportBlock;
+class Rrtr;
+class TargetBitrate;
+class TmmbItem;
+} // namespace rtcp
+
+class RTCPReceiver final {
+ public:
+ class ModuleRtpRtcp {
+ public:
+ virtual void SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set) = 0;
+ virtual void OnRequestSendReport() = 0;
+ virtual void OnReceivedNack(
+ const std::vector<uint16_t>& nack_sequence_numbers) = 0;
+ virtual void OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) = 0;
+
+ protected:
+ virtual ~ModuleRtpRtcp() = default;
+ };
+ // Standardized stats derived from the non-sender RTT.
+ class NonSenderRttStats {
+ public:
+ NonSenderRttStats() = default;
+ NonSenderRttStats(const NonSenderRttStats&) = default;
+ NonSenderRttStats& operator=(const NonSenderRttStats&) = default;
+ ~NonSenderRttStats() = default;
+ void Update(TimeDelta non_sender_rtt_seconds) {
+ round_trip_time_ = non_sender_rtt_seconds;
+ total_round_trip_time_ += non_sender_rtt_seconds;
+ round_trip_time_measurements_++;
+ }
+ void Invalidate() { round_trip_time_.reset(); }
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptime
+ absl::optional<TimeDelta> round_trip_time() const {
+ return round_trip_time_;
+ }
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-totalroundtriptime
+ TimeDelta total_round_trip_time() const { return total_round_trip_time_; }
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptimemeasurements
+ int round_trip_time_measurements() const {
+ return round_trip_time_measurements_;
+ }
+
+ private:
+ absl::optional<TimeDelta> round_trip_time_;
+ TimeDelta total_round_trip_time_ = TimeDelta::Zero();
+ int round_trip_time_measurements_ = 0;
+ };
+
+ RTCPReceiver(const RtpRtcpInterface::Configuration& config,
+ ModuleRtpRtcp* owner);
+
+ RTCPReceiver(const RtpRtcpInterface::Configuration& config,
+ ModuleRtpRtcpImpl2* owner);
+
+ ~RTCPReceiver();
+
+ void IncomingPacket(const uint8_t* packet, size_t packet_size) {
+ IncomingPacket(rtc::MakeArrayView(packet, packet_size));
+ }
+ void IncomingPacket(rtc::ArrayView<const uint8_t> packet);
+
+ int64_t LastReceivedReportBlockMs() const;
+
+ void set_local_media_ssrc(uint32_t ssrc);
+ uint32_t local_media_ssrc() const;
+
+ void SetRemoteSSRC(uint32_t ssrc);
+ uint32_t RemoteSSRC() const;
+
+ bool receiver_only() const { return receiver_only_; }
+
+ // Get received NTP.
+ // The types for the arguments below derive from the specification:
+ // - `remote_sender_packet_count`: `RTCSentRtpStreamStats.packetsSent` [1]
+ // - `remote_sender_octet_count`: `RTCSentRtpStreamStats.bytesSent` [1]
+ // - `remote_sender_reports_count`:
+ // `RTCRemoteOutboundRtpStreamStats.reportsSent` [2]
+ // [1] https://www.w3.org/TR/webrtc-stats/#remoteoutboundrtpstats-dict*
+ // [2] https://www.w3.org/TR/webrtc-stats/#dom-rtcsentrtpstreamstats
+ bool NTP(uint32_t* received_ntp_secs,
+ uint32_t* received_ntp_frac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp,
+ uint32_t* remote_sender_packet_count,
+ uint64_t* remote_sender_octet_count,
+ uint64_t* remote_sender_reports_count) const;
+
+ std::vector<rtcp::ReceiveTimeInfo> ConsumeReceivedXrReferenceTimeInfo();
+
+ // Get received sender packet and octet counts
+ void RemoteRTCPSenderInfo(uint32_t* packet_count,
+ uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const;
+
+ // Get rtt.
+ int32_t RTT(uint32_t remote_ssrc,
+ int64_t* last_rtt_ms,
+ int64_t* avg_rtt_ms,
+ int64_t* min_rtt_ms,
+ int64_t* max_rtt_ms) const;
+
+ // Returns non-sender RTT metrics for the remote SSRC.
+ NonSenderRttStats GetNonSenderRTT() const;
+
+ void SetNonSenderRttMeasurement(bool enabled);
+ bool GetAndResetXrRrRtt(int64_t* rtt_ms);
+
+ // Called once per second on the worker thread to do rtt calculations.
+ // Returns an optional rtt value if one is available.
+ absl::optional<TimeDelta> OnPeriodicRttUpdate(Timestamp newer_than,
+ bool sending);
+
+ // A snapshot of Report Blocks with additional data of interest to statistics.
+ // Within this list, the source SSRC is unique and ReportBlockData represents
+ // the latest Report Block that was received for that SSRC.
+ std::vector<ReportBlockData> GetLatestReportBlockData() const;
+
+ // Returns true if we haven't received an RTCP RR for several RTCP
+ // intervals, but only triggers true once.
+ bool RtcpRrTimeout();
+
+ // Returns true if we haven't received an RTCP RR telling the receive side
+ // has not received RTP packets for too long, i.e. extended highest sequence
+ // number hasn't increased for several RTCP intervals. The function only
+ // returns true once until a new RR is received.
+ bool RtcpRrSequenceNumberTimeout();
+
+ std::vector<rtcp::TmmbItem> TmmbrReceived();
+ // Return true if new bandwidth should be set.
+ bool UpdateTmmbrTimers();
+ std::vector<rtcp::TmmbItem> BoundingSet(bool* tmmbr_owner);
+ // Set new bandwidth and notify remote clients about it.
+ void NotifyTmmbrUpdated();
+
+ private:
+#if RTC_DCHECK_IS_ON
+ class CustomSequenceChecker : public SequenceChecker {
+ public:
+ explicit CustomSequenceChecker(bool disable_checks)
+ : disable_checks_(disable_checks) {}
+ bool IsCurrent() const {
+ if (disable_checks_)
+ return true;
+ return SequenceChecker::IsCurrent();
+ }
+
+ private:
+ const bool disable_checks_;
+ };
+#else
+ class CustomSequenceChecker : public SequenceChecker {
+ public:
+ explicit CustomSequenceChecker(bool) {}
+ };
+#endif
+
+ // A lightweight inlined set of local SSRCs.
+ class RegisteredSsrcs {
+ public:
+ static constexpr size_t kMediaSsrcIndex = 0;
+ static constexpr size_t kMaxSsrcs = 3;
+ // Initializes the set of registered local SSRCS by extracting them from the
+ // provided `config`. The `disable_sequence_checker` flag is a workaround
+ // to be able to use a sequence checker without breaking downstream
+ // code that currently doesn't follow the same threading rules as webrtc.
+ RegisteredSsrcs(bool disable_sequence_checker,
+ const RtpRtcpInterface::Configuration& config);
+
+ // Indicates if `ssrc` is in the set of registered local SSRCs.
+ bool contains(uint32_t ssrc) const;
+ uint32_t media_ssrc() const;
+ void set_media_ssrc(uint32_t ssrc);
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS CustomSequenceChecker packet_sequence_checker_;
+ absl::InlinedVector<uint32_t, kMaxSsrcs> ssrcs_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ };
+
+ struct PacketInformation;
+
+ // Structure for handing TMMBR and TMMBN rtcp messages (RFC5104,
+ // section 3.5.4).
+ struct TmmbrInformation {
+ struct TimedTmmbrItem {
+ rtcp::TmmbItem tmmbr_item;
+ int64_t last_updated_ms;
+ };
+
+ int64_t last_time_received_ms = 0;
+
+ bool ready_for_delete = false;
+
+ std::vector<rtcp::TmmbItem> tmmbn;
+ std::map<uint32_t, TimedTmmbrItem> tmmbr;
+ };
+
+ // Structure for storing received RRTR RTCP messages (RFC3611, section 4.4).
+ struct RrtrInformation {
+ RrtrInformation(uint32_t ssrc,
+ uint32_t received_remote_mid_ntp_time,
+ uint32_t local_receive_mid_ntp_time)
+ : ssrc(ssrc),
+ received_remote_mid_ntp_time(received_remote_mid_ntp_time),
+ local_receive_mid_ntp_time(local_receive_mid_ntp_time) {}
+
+ uint32_t ssrc;
+ // Received NTP timestamp in compact representation.
+ uint32_t received_remote_mid_ntp_time;
+ // NTP time when the report was received in compact representation.
+ uint32_t local_receive_mid_ntp_time;
+ };
+
+ struct LastFirStatus {
+ LastFirStatus(int64_t now_ms, uint8_t sequence_number)
+ : request_ms(now_ms), sequence_number(sequence_number) {}
+ int64_t request_ms;
+ uint8_t sequence_number;
+ };
+
+ class RttStats {
+ public:
+ RttStats() = default;
+ RttStats(const RttStats&) = default;
+ RttStats& operator=(const RttStats&) = default;
+
+ void AddRtt(TimeDelta rtt);
+
+ TimeDelta last_rtt() const { return last_rtt_; }
+ TimeDelta min_rtt() const { return min_rtt_; }
+ TimeDelta max_rtt() const { return max_rtt_; }
+ TimeDelta average_rtt() const { return sum_rtt_ / num_rtts_; }
+
+ private:
+ TimeDelta last_rtt_ = TimeDelta::Zero();
+ TimeDelta min_rtt_ = TimeDelta::PlusInfinity();
+ TimeDelta max_rtt_ = TimeDelta::MinusInfinity();
+ TimeDelta sum_rtt_ = TimeDelta::Zero();
+ size_t num_rtts_ = 0;
+ };
+
+ bool ParseCompoundPacket(rtc::ArrayView<const uint8_t> packet,
+ PacketInformation* packet_information);
+
+ void TriggerCallbacksFromRtcpPacket(
+ const PacketInformation& packet_information);
+
+ TmmbrInformation* FindOrCreateTmmbrInfo(uint32_t remote_ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+ // Update TmmbrInformation (if present) is alive.
+ void UpdateTmmbrRemoteIsAlive(uint32_t remote_ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+ TmmbrInformation* GetTmmbrInformation(uint32_t remote_ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleSenderReport(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleReceiverReport(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleReportBlock(const rtcp::ReportBlock& report_block,
+ PacketInformation* packet_information,
+ uint32_t remote_ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleSdes(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleXr(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information,
+ bool& contains_dlrr,
+ uint32_t& ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleXrReceiveReferenceTime(uint32_t sender_ssrc,
+ const rtcp::Rrtr& rrtr)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleXrDlrrReportBlock(uint32_t ssrc, const rtcp::ReceiveTimeInfo& rti)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleXrTargetBitrate(uint32_t ssrc,
+ const rtcp::TargetBitrate& target_bitrate,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleNack(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleApp(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleBye(const rtcp::CommonHeader& rtcp_block)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandlePli(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandlePsfbApp(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleTmmbr(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleTmmbn(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleSrReq(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleFir(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ void HandleTransportFeedback(const rtcp::CommonHeader& rtcp_block,
+ PacketInformation* packet_information)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ bool RtcpRrTimeoutLocked(Timestamp now)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ bool RtcpRrSequenceNumberTimeoutLocked(Timestamp now)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_);
+
+ Clock* const clock_;
+ const bool receiver_only_;
+ ModuleRtpRtcp* const rtp_rtcp_;
+ // The set of registered local SSRCs.
+ RegisteredSsrcs registered_ssrcs_;
+
+ RtcpBandwidthObserver* const rtcp_bandwidth_observer_;
+ RtcpEventObserver* const rtcp_event_observer_;
+ RtcpIntraFrameObserver* const rtcp_intra_frame_observer_;
+ RtcpLossNotificationObserver* const rtcp_loss_notification_observer_;
+ NetworkStateEstimateObserver* const network_state_estimate_observer_;
+ TransportFeedbackObserver* const transport_feedback_observer_;
+ VideoBitrateAllocationObserver* const bitrate_allocation_observer_;
+ const TimeDelta report_interval_;
+
+ mutable Mutex rtcp_receiver_lock_;
+ uint32_t remote_ssrc_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+
+ // Received sender report.
+ NtpTime remote_sender_ntp_time_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ uint32_t remote_sender_rtp_time_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ // When did we receive the last send report.
+ NtpTime last_received_sr_ntp_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ uint32_t remote_sender_packet_count_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ uint64_t remote_sender_octet_count_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ uint64_t remote_sender_reports_count_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+
+ // Received RRTR information in ascending receive time order.
+ std::list<RrtrInformation> received_rrtrs_
+ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ // Received RRTR information mapped by remote ssrc.
+ flat_map<uint32_t, std::list<RrtrInformation>::iterator>
+ received_rrtrs_ssrc_it_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+
+ // Estimated rtt, zero when there is no valid estimate.
+ bool xr_rrtr_status_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ int64_t xr_rr_rtt_ms_;
+
+ int64_t oldest_tmmbr_info_ms_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ // Mapped by remote ssrc.
+ flat_map<uint32_t, TmmbrInformation> tmmbr_infos_
+ RTC_GUARDED_BY(rtcp_receiver_lock_);
+
+ // Round-Trip Time per remote sender ssrc.
+ flat_map<uint32_t, RttStats> rtts_ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ // Non-sender Round-trip time per remote ssrc.
+ flat_map<uint32_t, NonSenderRttStats> non_sender_rtts_
+ RTC_GUARDED_BY(rtcp_receiver_lock_);
+
+ // Report blocks per local source ssrc.
+ flat_map<uint32_t, ReportBlockData> received_report_blocks_
+ RTC_GUARDED_BY(rtcp_receiver_lock_);
+ flat_map<uint32_t, LastFirStatus> last_fir_
+ RTC_GUARDED_BY(rtcp_receiver_lock_);
+
+ // The last time we received an RTCP Report block for this module.
+ Timestamp last_received_rb_ RTC_GUARDED_BY(rtcp_receiver_lock_) =
+ Timestamp::PlusInfinity();
+
+ // The time we last received an RTCP RR telling we have successfully
+ // delivered RTP packet to the remote side.
+ Timestamp last_increased_sequence_number_ = Timestamp::PlusInfinity();
+
+ RtcpCnameCallback* const cname_callback_;
+ ReportBlockDataObserver* const report_block_data_observer_;
+
+ RtcpPacketTypeCounterObserver* const packet_type_counter_observer_;
+ RtcpPacketTypeCounter packet_type_counter_;
+
+ RtcpNackStats nack_stats_;
+
+ size_t num_skipped_packets_;
+ int64_t last_skipped_packets_warning_ms_;
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
new file mode 100644
index 0000000000..ff39ccca9c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
@@ -0,0 +1,2012 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_receiver.h"
+
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/app.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/pli.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remb.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sdes.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/ntp_time.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using rtcp::ReceiveTimeInfo;
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::InSequence;
+using ::testing::IsEmpty;
+using ::testing::NiceMock;
+using ::testing::Property;
+using ::testing::SizeIs;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+using ::testing::UnorderedElementsAre;
+
+class MockRtcpPacketTypeCounterObserver : public RtcpPacketTypeCounterObserver {
+ public:
+ MOCK_METHOD(void,
+ RtcpPacketTypesCounterUpdated,
+ (uint32_t, const RtcpPacketTypeCounter&),
+ (override));
+};
+
+class MockRtcpIntraFrameObserver : public RtcpIntraFrameObserver {
+ public:
+ MOCK_METHOD(void, OnReceivedIntraFrameRequest, (uint32_t), (override));
+};
+
+class MockRtcpLossNotificationObserver : public RtcpLossNotificationObserver {
+ public:
+ ~MockRtcpLossNotificationObserver() override = default;
+ MOCK_METHOD(void,
+ OnReceivedLossNotification,
+ (uint32_t ssrc,
+ uint16_t seq_num_of_last_decodable,
+ uint16_t seq_num_of_last_received,
+ bool decodability_flag),
+ (override));
+};
+
+class MockCnameCallbackImpl : public RtcpCnameCallback {
+ public:
+ MOCK_METHOD(void, OnCname, (uint32_t, absl::string_view), (override));
+};
+
+class MockReportBlockDataObserverImpl : public ReportBlockDataObserver {
+ public:
+ MOCK_METHOD(void, OnReportBlockDataUpdated, (ReportBlockData), (override));
+};
+
+class MockTransportFeedbackObserver : public TransportFeedbackObserver {
+ public:
+ MOCK_METHOD(void, OnAddPacket, (const RtpPacketSendInfo&), (override));
+ MOCK_METHOD(void,
+ OnTransportFeedback,
+ (const rtcp::TransportFeedback&),
+ (override));
+};
+
+class MockModuleRtpRtcp : public RTCPReceiver::ModuleRtpRtcp {
+ public:
+ MOCK_METHOD(void, SetTmmbn, (std::vector<rtcp::TmmbItem>), (override));
+ MOCK_METHOD(void, OnRequestSendReport, (), (override));
+ MOCK_METHOD(void, OnReceivedNack, (const std::vector<uint16_t>&), (override));
+ MOCK_METHOD(void,
+ OnReceivedRtcpReportBlocks,
+ (const ReportBlockList&),
+ (override));
+};
+
+class MockVideoBitrateAllocationObserver
+ : public VideoBitrateAllocationObserver {
+ public:
+ MOCK_METHOD(void,
+ OnBitrateAllocationUpdated,
+ (const VideoBitrateAllocation& allocation),
+ (override));
+};
+
+// SSRC of remote peer, that sends rtcp packet to the rtcp receiver under test.
+constexpr uint32_t kSenderSsrc = 0x10203;
+// SSRCs of local peer, that rtcp packet addressed to.
+constexpr uint32_t kReceiverMainSsrc = 0x123456;
+// RtcpReceiver can accept several ssrc, e.g. regular and rtx streams.
+constexpr uint32_t kReceiverExtraSsrc = 0x1234567;
+// SSRCs to ignore (i.e. not configured in RtcpReceiver).
+constexpr uint32_t kNotToUsSsrc = 0x654321;
+constexpr uint32_t kUnknownSenderSsrc = 0x54321;
+
+constexpr int64_t kRtcpIntervalMs = 1000;
+
+} // namespace
+
+struct ReceiverMocks {
+ ReceiverMocks() : clock(1335900000) {}
+
+ SimulatedClock clock;
+ // Callbacks to packet_type_counter_observer are frequent but most of the time
+ // are not interesting.
+ NiceMock<MockRtcpPacketTypeCounterObserver> packet_type_counter_observer;
+ StrictMock<MockRtcpBandwidthObserver> bandwidth_observer;
+ StrictMock<MockRtcpIntraFrameObserver> intra_frame_observer;
+ StrictMock<MockRtcpLossNotificationObserver> rtcp_loss_notification_observer;
+ StrictMock<MockTransportFeedbackObserver> transport_feedback_observer;
+ StrictMock<MockVideoBitrateAllocationObserver> bitrate_allocation_observer;
+ StrictMock<MockModuleRtpRtcp> rtp_rtcp_impl;
+};
+
+RtpRtcpInterface::Configuration DefaultConfiguration(ReceiverMocks* mocks) {
+ RtpRtcpInterface::Configuration config;
+ config.clock = &mocks->clock;
+ config.receiver_only = false;
+ config.rtcp_packet_type_counter_observer =
+ &mocks->packet_type_counter_observer;
+ config.bandwidth_callback = &mocks->bandwidth_observer;
+ config.intra_frame_callback = &mocks->intra_frame_observer;
+ config.rtcp_loss_notification_observer =
+ &mocks->rtcp_loss_notification_observer;
+ config.transport_feedback_callback = &mocks->transport_feedback_observer;
+ config.bitrate_allocation_observer = &mocks->bitrate_allocation_observer;
+ config.rtcp_report_interval_ms = kRtcpIntervalMs;
+ config.local_media_ssrc = kReceiverMainSsrc;
+ config.rtx_send_ssrc = kReceiverExtraSsrc;
+ return config;
+}
+
+TEST(RtcpReceiverTest, BrokenPacketIsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+
+ const uint8_t bad_packet[] = {0, 0, 0, 0};
+ EXPECT_CALL(mocks.packet_type_counter_observer, RtcpPacketTypesCounterUpdated)
+ .Times(0);
+ receiver.IncomingPacket(bad_packet);
+}
+
+TEST(RtcpReceiverTest, InvalidFeedbackPacketIsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+
+ // Too short feedback packet.
+ const uint8_t bad_packet[] = {0x81, rtcp::Rtpfb::kPacketType, 0, 0};
+
+ // TODO(danilchap): Add expectation RtcpPacketTypesCounterUpdated
+ // is not called once parser would be adjusted to avoid that callback on
+ // semi-valid packets.
+ receiver.IncomingPacket(bad_packet);
+}
+
+TEST(RtcpReceiverTest, InjectSrPacket) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ EXPECT_FALSE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr));
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(IsEmpty()));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(IsEmpty(), _, now));
+ receiver.IncomingPacket(sr.Build());
+
+ EXPECT_TRUE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr));
+}
+
+TEST(RtcpReceiverTest, InjectSrPacketFromUnknownSender) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kUnknownSenderSsrc);
+
+ // The parser will handle report blocks in Sender Report from other than their
+ // expected peer.
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(_, _, now));
+ receiver.IncomingPacket(sr.Build());
+
+ // But will not flag that he's gotten sender information.
+ EXPECT_FALSE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr));
+}
+
+TEST(RtcpReceiverTest, InjectSrPacketCalculatesRTT) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const TimeDelta kRtt = TimeDelta::Millis(123);
+ const uint32_t kDelayNtp = 0x4321;
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+
+ int64_t rtt_ms = 0;
+ EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr));
+
+ uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime());
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ rtcp::ReportBlock block;
+ block.SetMediaSsrc(kReceiverMainSsrc);
+ block.SetLastSr(sent_ntp);
+ block.SetDelayLastSr(kDelayNtp);
+ sr.AddReportBlock(block);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(sr.Build());
+
+ EXPECT_EQ(0, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr));
+ EXPECT_NEAR(rtt_ms, kRtt.ms(), 1);
+}
+
+TEST(RtcpReceiverTest, InjectSrPacketCalculatesNegativeRTTAsOne) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const TimeDelta kRtt = TimeDelta::Millis(-13);
+ const uint32_t kDelayNtp = 0x4321;
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+
+ int64_t rtt_ms = 0;
+ EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr));
+
+ uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime());
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ rtcp::ReportBlock block;
+ block.SetMediaSsrc(kReceiverMainSsrc);
+ block.SetLastSr(sent_ntp);
+ block.SetDelayLastSr(kDelayNtp);
+ sr.AddReportBlock(block);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(1)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(1), _, _));
+ receiver.IncomingPacket(sr.Build());
+
+ EXPECT_EQ(0, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr));
+ EXPECT_EQ(1, rtt_ms);
+}
+
+TEST(RtcpReceiverTest,
+ TwoReportBlocksWithLastOneWithoutLastSrCalculatesRttForBandwidthObserver) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const TimeDelta kRtt = TimeDelta::Millis(120);
+ const uint32_t kDelayNtp = 123000;
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+
+ uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime());
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ rtcp::ReportBlock block;
+ block.SetMediaSsrc(kReceiverMainSsrc);
+ block.SetLastSr(sent_ntp);
+ block.SetDelayLastSr(kDelayNtp);
+ sr.AddReportBlock(block);
+ block.SetMediaSsrc(kReceiverExtraSsrc);
+ block.SetLastSr(0);
+ sr.AddReportBlock(block);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(2)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(2), kRtt.ms(), _));
+ receiver.IncomingPacket(sr.Build());
+}
+
+TEST(RtcpReceiverTest, InjectRrPacket) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+ rtcp::ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(IsEmpty()));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(IsEmpty(), _, now));
+ receiver.IncomingPacket(rr.Build());
+
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), IsEmpty());
+}
+
+TEST(RtcpReceiverTest, InjectRrPacketWithReportBlockNotToUsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+ rtcp::ReportBlock rb;
+ rb.SetMediaSsrc(kNotToUsSsrc);
+ rtcp::ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ rr.AddReportBlock(rb);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(IsEmpty()));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(IsEmpty(), _, now));
+ receiver.IncomingPacket(rr.Build());
+
+ EXPECT_EQ(0, receiver.LastReceivedReportBlockMs());
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), IsEmpty());
+}
+
+TEST(RtcpReceiverTest, InjectRrPacketWithOneReportBlock) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+
+ rtcp::ReportBlock rb;
+ rb.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp::ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ rr.AddReportBlock(rb);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(1)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(1), _, now));
+ receiver.IncomingPacket(rr.Build());
+
+ EXPECT_EQ(now, receiver.LastReceivedReportBlockMs());
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(1));
+}
+
+TEST(RtcpReceiverTest, InjectSrPacketWithOneReportBlock) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+
+ rtcp::ReportBlock rb;
+ rb.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ sr.AddReportBlock(rb);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(1)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(1), _, now));
+ receiver.IncomingPacket(sr.Build());
+
+ EXPECT_EQ(now, receiver.LastReceivedReportBlockMs());
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(1));
+}
+
+TEST(RtcpReceiverTest, InjectRrPacketWithTwoReportBlocks) {
+ const uint16_t kSequenceNumbers[] = {10, 12423};
+ const uint32_t kCumLost[] = {13, 555};
+ const uint8_t kFracLost[] = {20, 11};
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+
+ rtcp::ReportBlock rb1;
+ rb1.SetMediaSsrc(kReceiverMainSsrc);
+ rb1.SetExtHighestSeqNum(kSequenceNumbers[0]);
+ rb1.SetFractionLost(10);
+
+ rtcp::ReportBlock rb2;
+ rb2.SetMediaSsrc(kReceiverExtraSsrc);
+ rb2.SetExtHighestSeqNum(kSequenceNumbers[1]);
+ rb2.SetFractionLost(0);
+
+ rtcp::ReceiverReport rr1;
+ rr1.SetSenderSsrc(kSenderSsrc);
+ rr1.AddReportBlock(rb1);
+ rr1.AddReportBlock(rb2);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(2)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(2), _, now));
+ receiver.IncomingPacket(rr1.Build());
+
+ EXPECT_EQ(now, receiver.LastReceivedReportBlockMs());
+ EXPECT_THAT(receiver.GetLatestReportBlockData(),
+ UnorderedElementsAre(
+ Property(&ReportBlockData::report_block,
+ Field(&RTCPReportBlock::fraction_lost, 0)),
+ Property(&ReportBlockData::report_block,
+ Field(&RTCPReportBlock::fraction_lost, 10))));
+
+ // Insert next receiver report with same ssrc but new values.
+ rtcp::ReportBlock rb3;
+ rb3.SetMediaSsrc(kReceiverMainSsrc);
+ rb3.SetExtHighestSeqNum(kSequenceNumbers[0]);
+ rb3.SetFractionLost(kFracLost[0]);
+ rb3.SetCumulativeLost(kCumLost[0]);
+
+ rtcp::ReportBlock rb4;
+ rb4.SetMediaSsrc(kReceiverExtraSsrc);
+ rb4.SetExtHighestSeqNum(kSequenceNumbers[1]);
+ rb4.SetFractionLost(kFracLost[1]);
+ rb4.SetCumulativeLost(kCumLost[1]);
+
+ rtcp::ReceiverReport rr2;
+ rr2.SetSenderSsrc(kSenderSsrc);
+ rr2.AddReportBlock(rb3);
+ rr2.AddReportBlock(rb4);
+
+ // Advance time to make 1st sent time and 2nd sent time different.
+ mocks.clock.AdvanceTimeMilliseconds(500);
+ now = mocks.clock.TimeInMilliseconds();
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(2)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(2), _, now));
+ receiver.IncomingPacket(rr2.Build());
+
+ EXPECT_THAT(
+ receiver.GetLatestReportBlockData(),
+ UnorderedElementsAre(
+ Property(
+ &ReportBlockData::report_block,
+ AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc),
+ Field(&RTCPReportBlock::fraction_lost, kFracLost[0]),
+ Field(&RTCPReportBlock::packets_lost, kCumLost[0]),
+ Field(&RTCPReportBlock::extended_highest_sequence_number,
+ kSequenceNumbers[0]))),
+ Property(
+ &ReportBlockData::report_block,
+ AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverExtraSsrc),
+ Field(&RTCPReportBlock::fraction_lost, kFracLost[1]),
+ Field(&RTCPReportBlock::packets_lost, kCumLost[1]),
+ Field(&RTCPReportBlock::extended_highest_sequence_number,
+ kSequenceNumbers[1])))));
+}
+
+TEST(RtcpReceiverTest,
+ InjectRrPacketsFromTwoRemoteSsrcsReturnsLatestReportBlock) {
+ const uint32_t kSenderSsrc2 = 0x20304;
+ const uint16_t kSequenceNumbers[] = {10, 12423};
+ const int32_t kCumLost[] = {13, 555};
+ const uint8_t kFracLost[] = {20, 11};
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::ReportBlock rb1;
+ rb1.SetMediaSsrc(kReceiverMainSsrc);
+ rb1.SetExtHighestSeqNum(kSequenceNumbers[0]);
+ rb1.SetFractionLost(kFracLost[0]);
+ rb1.SetCumulativeLost(kCumLost[0]);
+ rtcp::ReceiverReport rr1;
+ rr1.SetSenderSsrc(kSenderSsrc);
+ rr1.AddReportBlock(rb1);
+
+ int64_t now = mocks.clock.TimeInMilliseconds();
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(1)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(1), _, now));
+ receiver.IncomingPacket(rr1.Build());
+
+ EXPECT_EQ(now, receiver.LastReceivedReportBlockMs());
+
+ EXPECT_THAT(
+ receiver.GetLatestReportBlockData(),
+ ElementsAre(Property(
+ &ReportBlockData::report_block,
+ AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc),
+ Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc),
+ Field(&RTCPReportBlock::fraction_lost, kFracLost[0]),
+ Field(&RTCPReportBlock::packets_lost, kCumLost[0]),
+ Field(&RTCPReportBlock::extended_highest_sequence_number,
+ kSequenceNumbers[0])))));
+
+ rtcp::ReportBlock rb2;
+ rb2.SetMediaSsrc(kReceiverMainSsrc);
+ rb2.SetExtHighestSeqNum(kSequenceNumbers[1]);
+ rb2.SetFractionLost(kFracLost[1]);
+ rb2.SetCumulativeLost(kCumLost[1]);
+ rtcp::ReceiverReport rr2;
+ rr2.SetSenderSsrc(kSenderSsrc2);
+ rr2.AddReportBlock(rb2);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks(SizeIs(1)));
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedRtcpReceiverReport(SizeIs(1), _, now));
+ receiver.IncomingPacket(rr2.Build());
+
+ EXPECT_THAT(
+ receiver.GetLatestReportBlockData(),
+ UnorderedElementsAre(
+ Property(
+ &ReportBlockData::report_block,
+ AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc),
+ Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc2),
+ Field(&RTCPReportBlock::fraction_lost, kFracLost[1]),
+ Field(&RTCPReportBlock::packets_lost, kCumLost[1]),
+ Field(&RTCPReportBlock::extended_highest_sequence_number,
+ kSequenceNumbers[1])))));
+}
+
+TEST(RtcpReceiverTest, GetRtt) {
+ const uint32_t kSentCompactNtp = 0x1234;
+ const uint32_t kDelayCompactNtp = 0x222;
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ // No report block received.
+ EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, nullptr, nullptr, nullptr, nullptr));
+
+ rtcp::ReportBlock rb;
+ rb.SetMediaSsrc(kReceiverMainSsrc);
+ rb.SetLastSr(kSentCompactNtp);
+ rb.SetDelayLastSr(kDelayCompactNtp);
+
+ rtcp::ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ rr.AddReportBlock(rb);
+ int64_t now = mocks.clock.TimeInMilliseconds();
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr.Build());
+
+ EXPECT_EQ(now, receiver.LastReceivedReportBlockMs());
+ EXPECT_EQ(0, receiver.RTT(kSenderSsrc, nullptr, nullptr, nullptr, nullptr));
+}
+
+// App packets are ignored.
+TEST(RtcpReceiverTest, InjectApp) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::App app;
+ app.SetSubType(30);
+ app.SetName(0x17a177e);
+ const uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't', 'a'};
+ app.SetData(kData, sizeof(kData));
+
+ receiver.IncomingPacket(app.Build());
+}
+
+TEST(RtcpReceiverTest, InjectSdesWithOneChunk) {
+ ReceiverMocks mocks;
+ MockCnameCallbackImpl callback;
+ RtpRtcpInterface::Configuration config = DefaultConfiguration(&mocks);
+ config.rtcp_cname_callback = &callback;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const char kCname[] = "alice@host";
+ rtcp::Sdes sdes;
+ sdes.AddCName(kSenderSsrc, kCname);
+
+ EXPECT_CALL(callback, OnCname(kSenderSsrc, StrEq(kCname)));
+ receiver.IncomingPacket(sdes.Build());
+}
+
+TEST(RtcpReceiverTest, InjectByePacket_RemovesReportBlocks) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::ReportBlock rb1;
+ rb1.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp::ReportBlock rb2;
+ rb2.SetMediaSsrc(kReceiverExtraSsrc);
+ rtcp::ReceiverReport rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ rr.AddReportBlock(rb1);
+ rr.AddReportBlock(rb2);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr.Build());
+
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(2));
+
+ // Verify that BYE removes the report blocks.
+ rtcp::Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+
+ receiver.IncomingPacket(bye.Build());
+
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), IsEmpty());
+
+ // Inject packet again.
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr.Build());
+
+ EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(2));
+}
+
+TEST(RtcpReceiverTest, InjectByePacketRemovesReferenceTimeInfo) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(NtpTime(0x10203, 0x40506));
+ xr.SetRrtr(rrtr);
+ receiver.IncomingPacket(xr.Build());
+
+ rtcp::Bye bye;
+ bye.SetSenderSsrc(kSenderSsrc);
+ receiver.IncomingPacket(bye.Build());
+
+ EXPECT_THAT(receiver.ConsumeReceivedXrReferenceTimeInfo(), IsEmpty());
+}
+
+TEST(RtcpReceiverTest, InjectPliPacket) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::Pli pli;
+ pli.SetMediaSsrc(kReceiverMainSsrc);
+
+ EXPECT_CALL(
+ mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(
+ kReceiverMainSsrc, Field(&RtcpPacketTypeCounter::pli_packets, 1)));
+ EXPECT_CALL(mocks.intra_frame_observer,
+ OnReceivedIntraFrameRequest(kReceiverMainSsrc));
+ receiver.IncomingPacket(pli.Build());
+}
+
+TEST(RtcpReceiverTest, PliPacketNotToUsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::Pli pli;
+ pli.SetMediaSsrc(kNotToUsSsrc);
+
+ EXPECT_CALL(
+ mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(
+ kReceiverMainSsrc, Field(&RtcpPacketTypeCounter::pli_packets, 0)));
+ EXPECT_CALL(mocks.intra_frame_observer, OnReceivedIntraFrameRequest).Times(0);
+ receiver.IncomingPacket(pli.Build());
+}
+
+TEST(RtcpReceiverTest, InjectFirPacket) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::Fir fir;
+ fir.AddRequestTo(kReceiverMainSsrc, 13);
+
+ EXPECT_CALL(
+ mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(
+ kReceiverMainSsrc, Field(&RtcpPacketTypeCounter::fir_packets, 1)));
+ EXPECT_CALL(mocks.intra_frame_observer,
+ OnReceivedIntraFrameRequest(kReceiverMainSsrc));
+ receiver.IncomingPacket(fir.Build());
+}
+
+TEST(RtcpReceiverTest, FirPacketNotToUsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::Fir fir;
+ fir.AddRequestTo(kNotToUsSsrc, 13);
+
+ EXPECT_CALL(mocks.intra_frame_observer, OnReceivedIntraFrameRequest).Times(0);
+ receiver.IncomingPacket(fir.Build());
+}
+
+TEST(RtcpReceiverTest, ExtendedReportsPacketWithZeroReportBlocksIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+
+ receiver.IncomingPacket(xr.Build());
+}
+
+TEST(RtcpReceiverTest, InjectExtendedReportsReceiverReferenceTimePacket) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const NtpTime kNtp(0x10203, 0x40506);
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(kNtp);
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.SetRrtr(rrtr);
+
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis =
+ receiver.ConsumeReceivedXrReferenceTimeInfo();
+ EXPECT_THAT(last_xr_rtis, IsEmpty());
+
+ receiver.IncomingPacket(xr.Build());
+
+ last_xr_rtis = receiver.ConsumeReceivedXrReferenceTimeInfo();
+ ASSERT_THAT(last_xr_rtis, SizeIs(1));
+ EXPECT_EQ(kSenderSsrc, last_xr_rtis[0].ssrc);
+ EXPECT_EQ(CompactNtp(kNtp), last_xr_rtis[0].last_rr);
+ EXPECT_EQ(0U, last_xr_rtis[0].delay_since_last_rr);
+}
+
+TEST(RtcpReceiverTest, ExtendedReportsDlrrPacketNotToUsIgnored) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ // Allow calculate rtt using dlrr/rrtr, simulating media receiver side.
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kNotToUsSsrc, 0x12345, 0x67890));
+
+ receiver.IncomingPacket(xr.Build());
+
+ int64_t rtt_ms = 0;
+ EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_TRUE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_EQ(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithSubBlock) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint32_t kLastRR = 0x12345;
+ const uint32_t kDelay = 0x23456;
+ int64_t rtt_ms = 0;
+ EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, kLastRR, kDelay));
+
+ receiver.IncomingPacket(xr.Build());
+
+ uint32_t compact_ntp_now = CompactNtp(mocks.clock.CurrentNtpTime());
+ EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ uint32_t rtt_ntp = compact_ntp_now - kDelay - kLastRR;
+ EXPECT_NEAR(CompactNtpRttToTimeDelta(rtt_ntp).ms(), rtt_ms, 1);
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time(), TimeDelta::Zero());
+ EXPECT_FALSE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithMultipleSubBlocks) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint32_t kLastRR = 0x12345;
+ const uint32_t kDelay = 0x56789;
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, kLastRR, kDelay));
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc + 1, 0x12345, 0x67890));
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc + 2, 0x12345, 0x67890));
+
+ receiver.IncomingPacket(xr.Build());
+
+ uint32_t compact_ntp_now = CompactNtp(mocks.clock.CurrentNtpTime());
+ int64_t rtt_ms = 0;
+ EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ uint32_t rtt_ntp = compact_ntp_now - kDelay - kLastRR;
+ EXPECT_NEAR(CompactNtpRttToTimeDelta(rtt_ntp).ms(), rtt_ms, 1);
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time(), TimeDelta::Zero());
+ EXPECT_FALSE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, InjectExtendedReportsPacketWithMultipleReportBlocks) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::Rrtr rrtr;
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.SetRrtr(rrtr);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, 0x12345, 0x67890));
+
+ receiver.IncomingPacket(xr.Build());
+
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis =
+ receiver.ConsumeReceivedXrReferenceTimeInfo();
+ EXPECT_THAT(last_xr_rtis, SizeIs(1));
+ int64_t rtt_ms = 0;
+ EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+}
+
+TEST(RtcpReceiverTest, InjectExtendedReportsPacketWithUnknownReportBlock) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::Rrtr rrtr;
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.SetRrtr(rrtr);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, 0x12345, 0x67890));
+
+ rtc::Buffer packet = xr.Build();
+ // Modify the DLRR block to have an unsupported block type, from 5 to 6.
+ ASSERT_EQ(5, packet.data()[20]);
+ packet.data()[20] = 6;
+ receiver.IncomingPacket(packet);
+
+ // Validate Rrtr was received and processed.
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis =
+ receiver.ConsumeReceivedXrReferenceTimeInfo();
+ EXPECT_THAT(last_xr_rtis, SizeIs(1));
+ // Validate Dlrr report wasn't processed.
+ int64_t rtt_ms = 0;
+ EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_TRUE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_EQ(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, TestExtendedReportsRrRttInitiallyFalse) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ int64_t rtt_ms;
+ EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_TRUE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_EQ(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, RttCalculatedAfterExtendedReportsDlrr) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ int64_t rtt_ms = 0;
+ EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ EXPECT_NEAR(kRtt.ms(), rtt_ms, 1);
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().value().IsZero());
+ EXPECT_FALSE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+// Same test as above but enables receive-side RTT using the setter instead of
+// the config struct.
+TEST(RtcpReceiverTest, SetterEnablesReceiverRtt) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = false;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+ receiver.SetNonSenderRttMeasurement(true);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ int64_t rtt_ms = 0;
+ EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ EXPECT_NEAR(rtt_ms, kRtt.ms(), 1);
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().value().IsZero());
+ EXPECT_FALSE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+// Same test as above but disables receive-side RTT using the setter instead of
+// the config struct.
+TEST(RtcpReceiverTest, DoesntCalculateRttOnReceivedDlrr) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+ receiver.SetNonSenderRttMeasurement(false);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ // We expect that no RTT is available (because receive-side RTT was disabled).
+ int64_t rtt_ms = 0;
+ EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_TRUE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_EQ(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, XrDlrrCalculatesNegativeRttAsOne) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(-3600 * 1000, -1));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ int64_t rtt_ms = 0;
+ EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms));
+ EXPECT_EQ(1, rtt_ms);
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().value().IsZero());
+ EXPECT_FALSE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+}
+
+// Test receiver RTT stats with multiple measurements.
+TEST(RtcpReceiverTest, ReceiverRttWithMultipleMeasurements) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ // Check that the non-sender RTT stats are valid and based on a single
+ // measurement.
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_NEAR(non_sender_rtt_stats.round_trip_time()->ms(), kRtt.ms(), 1);
+ EXPECT_EQ(non_sender_rtt_stats.round_trip_time_measurements(), 1);
+ EXPECT_EQ(non_sender_rtt_stats.total_round_trip_time().ms(),
+ non_sender_rtt_stats.round_trip_time()->ms());
+
+ // Generate another XR report with the same RTT and delay.
+ NtpTime now2 = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp2 = CompactNtp(now2);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr2;
+ xr2.SetSenderSsrc(kSenderSsrc);
+ xr2.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp2, kDelayNtp));
+
+ receiver.IncomingPacket(xr2.Build());
+
+ // Check that the non-sender RTT stats are based on 2 measurements, and that
+ // the values are as expected.
+ non_sender_rtt_stats = receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_NEAR(non_sender_rtt_stats.round_trip_time()->ms(), kRtt.ms(), 1);
+ EXPECT_EQ(non_sender_rtt_stats.round_trip_time_measurements(), 2);
+ EXPECT_NEAR(non_sender_rtt_stats.total_round_trip_time().ms(), 2 * kRtt.ms(),
+ 2);
+}
+
+// Test that the receiver RTT stat resets when receiving a SR without XR. This
+// behavior is described in the standard, see
+// https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptime.
+TEST(RtcpReceiverTest, ReceiverRttResetOnSrWithoutXr) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_NEAR(non_sender_rtt_stats.round_trip_time()->ms(), kRtt.ms(), 1);
+
+ // Generate a SR without XR.
+ rtcp::ReportBlock rb;
+ rb.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ sr.AddReportBlock(rb);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+
+ receiver.IncomingPacket(sr.Build());
+
+ // Check that the non-sender RTT stat is not set.
+ non_sender_rtt_stats = receiver.GetNonSenderRTT();
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().has_value());
+}
+
+// Test that the receiver RTT stat resets when receiving a DLRR with a timestamp
+// of zero. This behavior is described in the standard, see
+// https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptime.
+TEST(RtcpReceiverTest, ReceiverRttResetOnDlrrWithZeroTimestamp) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = true;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_NEAR(non_sender_rtt_stats.round_trip_time()->ms(), kRtt.ms(), 1);
+
+ // Generate an XR+DLRR with zero timestamp.
+ rtcp::ExtendedReports xr2;
+ xr2.SetSenderSsrc(kSenderSsrc);
+ xr2.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, 0, kDelayNtp));
+
+ receiver.IncomingPacket(xr2.Build());
+
+ // Check that the non-sender RTT stat is not set.
+ non_sender_rtt_stats = receiver.GetNonSenderRTT();
+ EXPECT_FALSE(non_sender_rtt_stats.round_trip_time().has_value());
+}
+
+// Check that the receiver RTT works correctly when the remote SSRC changes.
+TEST(RtcpReceiverTest, ReceiverRttWithMultipleRemoteSsrcs) {
+ ReceiverMocks mocks;
+ auto config = DefaultConfiguration(&mocks);
+ config.non_sender_rtt_measurement = false;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+ receiver.SetNonSenderRttMeasurement(true);
+
+ Random rand(0x0123456789abcdef);
+ const TimeDelta kRtt = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+ NtpTime now = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp = CompactNtp(now);
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp, kDelayNtp));
+
+ receiver.IncomingPacket(xr.Build());
+
+ // Generate an XR report for another SSRC.
+ const TimeDelta kRtt2 = TimeDelta::Millis(rand.Rand(1, 9 * 3600 * 1000));
+ const uint32_t kDelayNtp2 = rand.Rand(0, 0x7fffffff);
+ const TimeDelta kDelay2 = CompactNtpRttToTimeDelta(kDelayNtp2);
+ NtpTime now2 = mocks.clock.CurrentNtpTime();
+ uint32_t sent_ntp2 = CompactNtp(now2);
+ mocks.clock.AdvanceTime(kRtt2 + kDelay2);
+
+ rtcp::ExtendedReports xr2;
+ xr2.SetSenderSsrc(kSenderSsrc + 1);
+ xr2.AddDlrrItem(ReceiveTimeInfo(kReceiverMainSsrc, sent_ntp2, kDelayNtp2));
+
+ receiver.IncomingPacket(xr2.Build());
+
+ // Check that the non-sender RTT stats match the first XR.
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats.round_trip_time().has_value());
+ EXPECT_NEAR(non_sender_rtt_stats.round_trip_time()->ms(), kRtt.ms(), 1);
+ EXPECT_FALSE(non_sender_rtt_stats.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats.round_trip_time_measurements(), 0);
+
+ // Change the remote SSRC and check that the stats match the second XR.
+ receiver.SetRemoteSSRC(kSenderSsrc + 1);
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats2 =
+ receiver.GetNonSenderRTT();
+ EXPECT_TRUE(non_sender_rtt_stats2.round_trip_time().has_value());
+ EXPECT_NEAR(non_sender_rtt_stats2.round_trip_time()->ms(), kRtt2.ms(), 1);
+ EXPECT_FALSE(non_sender_rtt_stats2.total_round_trip_time().IsZero());
+ EXPECT_GT(non_sender_rtt_stats2.round_trip_time_measurements(), 0);
+}
+
+TEST(RtcpReceiverTest, ConsumeReceivedXrReferenceTimeInfoInitiallyEmpty) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ EXPECT_THAT(receiver.ConsumeReceivedXrReferenceTimeInfo(), IsEmpty());
+}
+
+TEST(RtcpReceiverTest, ConsumeReceivedXrReferenceTimeInfo) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const NtpTime kNtp(0x10203, 0x40506);
+ const uint32_t kNtpMid = CompactNtp(kNtp);
+
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(kNtp);
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ xr.SetRrtr(rrtr);
+
+ receiver.IncomingPacket(xr.Build());
+
+ mocks.clock.AdvanceTimeMilliseconds(1000);
+
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis =
+ receiver.ConsumeReceivedXrReferenceTimeInfo();
+ ASSERT_THAT(last_xr_rtis, SizeIs(1));
+ EXPECT_EQ(kSenderSsrc, last_xr_rtis[0].ssrc);
+ EXPECT_EQ(kNtpMid, last_xr_rtis[0].last_rr);
+ EXPECT_EQ(65536U, last_xr_rtis[0].delay_since_last_rr);
+}
+
+TEST(RtcpReceiverTest,
+ ReceivedRrtrFromSameSsrcUpdatesReceivedReferenceTimeInfo) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const NtpTime kNtp1(0x10203, 0x40506);
+ const NtpTime kNtp2(0x11223, 0x44556);
+ const int64_t kDelayMs = 2000;
+
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kSenderSsrc);
+ rtcp::Rrtr rrtr1;
+ rrtr1.SetNtp(kNtp1);
+ xr.SetRrtr(rrtr1);
+ receiver.IncomingPacket(xr.Build());
+ mocks.clock.AdvanceTimeMilliseconds(kDelayMs);
+ rtcp::Rrtr rrtr2;
+ rrtr2.SetNtp(kNtp2);
+ xr.SetRrtr(rrtr2);
+ receiver.IncomingPacket(xr.Build());
+ mocks.clock.AdvanceTimeMilliseconds(kDelayMs);
+
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis =
+ receiver.ConsumeReceivedXrReferenceTimeInfo();
+ ASSERT_THAT(last_xr_rtis, SizeIs(1));
+ EXPECT_EQ(kSenderSsrc, last_xr_rtis[0].ssrc);
+ EXPECT_EQ(CompactNtp(kNtp2), last_xr_rtis[0].last_rr);
+ EXPECT_EQ(kDelayMs * 65536 / 1000, last_xr_rtis[0].delay_since_last_rr);
+}
+
+TEST(RtcpReceiverTest, StoresLastReceivedRrtrPerSsrc) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const size_t kNumBufferedReports = 1;
+ const size_t kNumReports =
+ rtcp::ExtendedReports::kMaxNumberOfDlrrItems + kNumBufferedReports;
+ for (size_t i = 0; i < kNumReports; ++i) {
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(i * 100);
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(NtpTime(i * 200, i * 300));
+ xr.SetRrtr(rrtr);
+ receiver.IncomingPacket(xr.Build());
+ mocks.clock.AdvanceTimeMilliseconds(1000);
+ }
+
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis =
+ receiver.ConsumeReceivedXrReferenceTimeInfo();
+ ASSERT_THAT(last_xr_rtis,
+ SizeIs(rtcp::ExtendedReports::kMaxNumberOfDlrrItems));
+ for (size_t i = 0; i < rtcp::ExtendedReports::kMaxNumberOfDlrrItems; ++i) {
+ EXPECT_EQ(i * 100, last_xr_rtis[i].ssrc);
+ EXPECT_EQ(CompactNtp(NtpTime(i * 200, i * 300)), last_xr_rtis[i].last_rr);
+ EXPECT_EQ(65536U * (kNumReports - i), last_xr_rtis[i].delay_since_last_rr);
+ }
+
+ last_xr_rtis = receiver.ConsumeReceivedXrReferenceTimeInfo();
+ ASSERT_THAT(last_xr_rtis, SizeIs(kNumBufferedReports));
+}
+
+TEST(RtcpReceiverTest, ReceiveReportTimeout) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint16_t kSequenceNumber = 1234;
+ mocks.clock.AdvanceTimeMilliseconds(3 * kRtcpIntervalMs);
+
+ // No RR received, shouldn't trigger a timeout.
+ EXPECT_FALSE(receiver.RtcpRrTimeout());
+ EXPECT_FALSE(receiver.RtcpRrSequenceNumberTimeout());
+
+ // Add a RR and advance the clock just enough to not trigger a timeout.
+ rtcp::ReportBlock rb1;
+ rb1.SetMediaSsrc(kReceiverMainSsrc);
+ rb1.SetExtHighestSeqNum(kSequenceNumber);
+ rtcp::ReceiverReport rr1;
+ rr1.SetSenderSsrc(kSenderSsrc);
+ rr1.AddReportBlock(rb1);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr1.Build());
+
+ mocks.clock.AdvanceTimeMilliseconds(3 * kRtcpIntervalMs - 1);
+ EXPECT_FALSE(receiver.RtcpRrTimeout());
+ EXPECT_FALSE(receiver.RtcpRrSequenceNumberTimeout());
+
+ // Add a RR with the same extended max as the previous RR to trigger a
+ // sequence number timeout, but not a RR timeout.
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr1.Build());
+
+ mocks.clock.AdvanceTimeMilliseconds(2);
+ EXPECT_FALSE(receiver.RtcpRrTimeout());
+ EXPECT_TRUE(receiver.RtcpRrSequenceNumberTimeout());
+
+ // Advance clock enough to trigger an RR timeout too.
+ mocks.clock.AdvanceTimeMilliseconds(3 * kRtcpIntervalMs);
+ EXPECT_TRUE(receiver.RtcpRrTimeout());
+
+ // We should only get one timeout even though we still haven't received a new
+ // RR.
+ EXPECT_FALSE(receiver.RtcpRrTimeout());
+ EXPECT_FALSE(receiver.RtcpRrSequenceNumberTimeout());
+
+ // Add a new RR with increase sequence number to reset timers.
+ rtcp::ReportBlock rb2;
+ rb2.SetMediaSsrc(kReceiverMainSsrc);
+ rb2.SetExtHighestSeqNum(kSequenceNumber + 1);
+ rtcp::ReceiverReport rr2;
+ rr2.SetSenderSsrc(kSenderSsrc);
+ rr2.AddReportBlock(rb2);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr2.Build());
+
+ EXPECT_FALSE(receiver.RtcpRrTimeout());
+ EXPECT_FALSE(receiver.RtcpRrSequenceNumberTimeout());
+
+ // Verify we can get a timeout again once we've received new RR.
+ mocks.clock.AdvanceTimeMilliseconds(2 * kRtcpIntervalMs);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rr2.Build());
+
+ mocks.clock.AdvanceTimeMilliseconds(kRtcpIntervalMs + 1);
+ EXPECT_FALSE(receiver.RtcpRrTimeout());
+ EXPECT_TRUE(receiver.RtcpRrSequenceNumberTimeout());
+
+ mocks.clock.AdvanceTimeMilliseconds(2 * kRtcpIntervalMs);
+ EXPECT_TRUE(receiver.RtcpRrTimeout());
+}
+
+TEST(RtcpReceiverTest, TmmbrReceivedWithNoIncomingPacket) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ EXPECT_THAT(receiver.TmmbrReceived(), IsEmpty());
+}
+
+TEST(RtcpReceiverTest, TmmbrPacketAccepted) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint32_t kBitrateBps = 30000;
+ auto tmmbr = std::make_unique<rtcp::Tmmbr>();
+ tmmbr->SetSenderSsrc(kSenderSsrc);
+ tmmbr->AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, kBitrateBps, 0));
+ auto sr = std::make_unique<rtcp::SenderReport>();
+ sr->SetSenderSsrc(kSenderSsrc);
+ rtcp::CompoundPacket compound;
+ compound.Append(std::move(sr));
+ compound.Append(std::move(tmmbr));
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, SetTmmbn(SizeIs(1)));
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedEstimatedBitrate(kBitrateBps));
+ receiver.IncomingPacket(compound.Build());
+
+ std::vector<rtcp::TmmbItem> tmmbr_received = receiver.TmmbrReceived();
+ ASSERT_EQ(1u, tmmbr_received.size());
+ EXPECT_EQ(kBitrateBps, tmmbr_received[0].bitrate_bps());
+ EXPECT_EQ(kSenderSsrc, tmmbr_received[0].ssrc());
+}
+
+TEST(RtcpReceiverTest, TmmbrPacketNotForUsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint32_t kBitrateBps = 30000;
+ auto tmmbr = std::make_unique<rtcp::Tmmbr>();
+ tmmbr->SetSenderSsrc(kSenderSsrc);
+ tmmbr->AddTmmbr(rtcp::TmmbItem(kNotToUsSsrc, kBitrateBps, 0));
+
+ auto sr = std::make_unique<rtcp::SenderReport>();
+ sr->SetSenderSsrc(kSenderSsrc);
+ rtcp::CompoundPacket compound;
+ compound.Append(std::move(sr));
+ compound.Append(std::move(tmmbr));
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedEstimatedBitrate).Times(0);
+ receiver.IncomingPacket(compound.Build());
+
+ EXPECT_EQ(0u, receiver.TmmbrReceived().size());
+}
+
+TEST(RtcpReceiverTest, TmmbrPacketZeroRateIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ auto tmmbr = std::make_unique<rtcp::Tmmbr>();
+ tmmbr->SetSenderSsrc(kSenderSsrc);
+ tmmbr->AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, 0, 0));
+ auto sr = std::make_unique<rtcp::SenderReport>();
+ sr->SetSenderSsrc(kSenderSsrc);
+ rtcp::CompoundPacket compound;
+ compound.Append(std::move(sr));
+ compound.Append(std::move(tmmbr));
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedEstimatedBitrate).Times(0);
+ receiver.IncomingPacket(compound.Build());
+
+ EXPECT_EQ(0u, receiver.TmmbrReceived().size());
+}
+
+TEST(RtcpReceiverTest, TmmbrThreeConstraintsTimeOut) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ // Inject 3 packets "from" kSenderSsrc, kSenderSsrc+1, kSenderSsrc+2.
+ // The times of arrival are starttime + 0, starttime + 5 and starttime + 10.
+ for (uint32_t ssrc = kSenderSsrc; ssrc < kSenderSsrc + 3; ++ssrc) {
+ auto tmmbr = std::make_unique<rtcp::Tmmbr>();
+ tmmbr->SetSenderSsrc(ssrc);
+ tmmbr->AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, 30000, 0));
+ auto sr = std::make_unique<rtcp::SenderReport>();
+ sr->SetSenderSsrc(ssrc);
+ rtcp::CompoundPacket compound;
+ compound.Append(std::move(sr));
+ compound.Append(std::move(tmmbr));
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, SetTmmbn);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedEstimatedBitrate);
+ receiver.IncomingPacket(compound.Build());
+
+ // 5 seconds between each packet.
+ mocks.clock.AdvanceTimeMilliseconds(5000);
+ }
+ // It is now starttime + 15.
+ EXPECT_THAT(receiver.TmmbrReceived(),
+ AllOf(SizeIs(3),
+ Each(Property(&rtcp::TmmbItem::bitrate_bps, Eq(30'000U)))));
+
+ // We expect the timeout to be 25 seconds. Advance the clock by 12
+ // seconds, timing out the first packet.
+ mocks.clock.AdvanceTimeMilliseconds(12000);
+ EXPECT_THAT(receiver.TmmbrReceived(),
+ UnorderedElementsAre(
+ Property(&rtcp::TmmbItem::ssrc, Eq(kSenderSsrc + 1)),
+ Property(&rtcp::TmmbItem::ssrc, Eq(kSenderSsrc + 2))));
+}
+
+TEST(RtcpReceiverTest,
+ VerifyBlockAndTimestampObtainedFromReportBlockDataObserver) {
+ ReceiverMocks mocks;
+ MockReportBlockDataObserverImpl observer;
+ RtpRtcpInterface::Configuration config = DefaultConfiguration(&mocks);
+ config.report_block_data_observer = &observer;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint8_t kFractionLoss = 3;
+ const uint32_t kCumulativeLoss = 7;
+ const uint32_t kJitter = 9;
+ const uint16_t kSequenceNumber = 1234;
+ const int64_t kNtpNowMs =
+ mocks.clock.CurrentNtpInMilliseconds() - rtc::kNtpJan1970Millisecs;
+
+ rtcp::ReportBlock rtcp_block;
+ rtcp_block.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp_block.SetExtHighestSeqNum(kSequenceNumber);
+ rtcp_block.SetFractionLost(kFractionLoss);
+ rtcp_block.SetCumulativeLost(kCumulativeLoss);
+ rtcp_block.SetJitter(kJitter);
+
+ rtcp::ReceiverReport rtcp_report;
+ rtcp_report.SetSenderSsrc(kSenderSsrc);
+ rtcp_report.AddReportBlock(rtcp_block);
+ EXPECT_CALL(observer, OnReportBlockDataUpdated)
+ .WillOnce([&](ReportBlockData report_block_data) {
+ const auto& report_block = report_block_data.report_block();
+ EXPECT_EQ(rtcp_block.source_ssrc(), report_block.source_ssrc);
+ EXPECT_EQ(kSenderSsrc, report_block.sender_ssrc);
+ EXPECT_EQ(rtcp_block.fraction_lost(), report_block.fraction_lost);
+ EXPECT_EQ(rtcp_block.cumulative_lost_signed(),
+ report_block.packets_lost);
+ EXPECT_EQ(rtcp_block.extended_high_seq_num(),
+ report_block.extended_highest_sequence_number);
+ EXPECT_EQ(rtcp_block.jitter(), report_block.jitter);
+ EXPECT_EQ(kNtpNowMs * rtc::kNumMicrosecsPerMillisec,
+ report_block_data.report_block_timestamp_utc_us());
+ // No RTT is calculated in this test.
+ EXPECT_EQ(0u, report_block_data.num_rtts());
+ });
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rtcp_report.Build());
+}
+
+TEST(RtcpReceiverTest, VerifyRttObtainedFromReportBlockDataObserver) {
+ ReceiverMocks mocks;
+ MockReportBlockDataObserverImpl observer;
+ RtpRtcpInterface::Configuration config = DefaultConfiguration(&mocks);
+ config.report_block_data_observer = &observer;
+ RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const TimeDelta kRtt = TimeDelta::Millis(120);
+ const uint32_t kDelayNtp = 123000;
+ const TimeDelta kDelay = CompactNtpRttToTimeDelta(kDelayNtp);
+
+ uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime());
+ mocks.clock.AdvanceTime(kRtt + kDelay);
+
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ rtcp::ReportBlock block;
+ block.SetMediaSsrc(kReceiverMainSsrc);
+ block.SetLastSr(sent_ntp);
+ block.SetDelayLastSr(kDelayNtp);
+ sr.AddReportBlock(block);
+ block.SetMediaSsrc(kReceiverExtraSsrc);
+ block.SetLastSr(0);
+ sr.AddReportBlock(block);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ InSequence sequence;
+ EXPECT_CALL(observer, OnReportBlockDataUpdated)
+ .WillOnce([&](ReportBlockData report_block_data) {
+ EXPECT_EQ(kReceiverMainSsrc,
+ report_block_data.report_block().source_ssrc);
+ EXPECT_EQ(1u, report_block_data.num_rtts());
+ EXPECT_EQ(kRtt.ms(), report_block_data.min_rtt_ms());
+ EXPECT_EQ(kRtt.ms(), report_block_data.max_rtt_ms());
+ EXPECT_EQ(kRtt.ms(), report_block_data.sum_rtt_ms());
+ EXPECT_EQ(kRtt.ms(), report_block_data.last_rtt_ms());
+ });
+ EXPECT_CALL(observer, OnReportBlockDataUpdated)
+ .WillOnce([](ReportBlockData report_block_data) {
+ EXPECT_EQ(kReceiverExtraSsrc,
+ report_block_data.report_block().source_ssrc);
+ EXPECT_EQ(0u, report_block_data.num_rtts());
+ });
+ receiver.IncomingPacket(sr.Build());
+}
+
+TEST(RtcpReceiverTest, GetReportBlockDataAfterOneReportBlock) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint16_t kSequenceNumber = 1234;
+
+ rtcp::ReportBlock rtcp_block;
+ rtcp_block.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp_block.SetExtHighestSeqNum(kSequenceNumber);
+
+ rtcp::ReceiverReport rtcp_report;
+ rtcp_report.SetSenderSsrc(kSenderSsrc);
+ rtcp_report.AddReportBlock(rtcp_block);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rtcp_report.Build());
+
+ auto report_block_datas = receiver.GetLatestReportBlockData();
+ ASSERT_THAT(report_block_datas, SizeIs(1));
+ EXPECT_EQ(kReceiverMainSsrc,
+ report_block_datas[0].report_block().source_ssrc);
+ EXPECT_EQ(
+ kSequenceNumber,
+ report_block_datas[0].report_block().extended_highest_sequence_number);
+}
+
+TEST(RtcpReceiverTest, GetReportBlockDataAfterTwoReportBlocksOfSameSsrc) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint16_t kSequenceNumber1 = 1234;
+ const uint16_t kSequenceNumber2 = 1235;
+
+ rtcp::ReportBlock rtcp_block1;
+ rtcp_block1.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp_block1.SetExtHighestSeqNum(kSequenceNumber1);
+
+ rtcp::ReceiverReport rtcp_report1;
+ rtcp_report1.SetSenderSsrc(kSenderSsrc);
+ rtcp_report1.AddReportBlock(rtcp_block1);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rtcp_report1.Build());
+
+ // Inject a report block with an increased the sequence number for the same
+ // source SSRC.
+ rtcp::ReportBlock rtcp_block2;
+ rtcp_block2.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp_block2.SetExtHighestSeqNum(kSequenceNumber2);
+
+ rtcp::ReceiverReport rtcp_report2;
+ rtcp_report2.SetSenderSsrc(kSenderSsrc);
+ rtcp_report2.AddReportBlock(rtcp_block2);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rtcp_report2.Build());
+
+ // Only the latest block should be returned.
+ auto report_block_datas = receiver.GetLatestReportBlockData();
+ ASSERT_THAT(report_block_datas, SizeIs(1));
+ EXPECT_EQ(kReceiverMainSsrc,
+ report_block_datas[0].report_block().source_ssrc);
+ EXPECT_EQ(
+ kSequenceNumber2,
+ report_block_datas[0].report_block().extended_highest_sequence_number);
+}
+
+TEST(RtcpReceiverTest, GetReportBlockDataAfterTwoReportBlocksOfDifferentSsrcs) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint16_t kSequenceNumber1 = 1234;
+ const uint16_t kSequenceNumber2 = 42;
+
+ rtcp::ReportBlock rtcp_block1;
+ rtcp_block1.SetMediaSsrc(kReceiverMainSsrc);
+ rtcp_block1.SetExtHighestSeqNum(kSequenceNumber1);
+
+ rtcp::ReceiverReport rtcp_report1;
+ rtcp_report1.SetSenderSsrc(kSenderSsrc);
+ rtcp_report1.AddReportBlock(rtcp_block1);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rtcp_report1.Build());
+
+ // Inject a report block for a different source SSRC.
+ rtcp::ReportBlock rtcp_block2;
+ rtcp_block2.SetMediaSsrc(kReceiverExtraSsrc);
+ rtcp_block2.SetExtHighestSeqNum(kSequenceNumber2);
+
+ rtcp::ReceiverReport rtcp_report2;
+ rtcp_report2.SetSenderSsrc(kSenderSsrc);
+ rtcp_report2.AddReportBlock(rtcp_block2);
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks);
+ EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport);
+ receiver.IncomingPacket(rtcp_report2.Build());
+
+ // Both report blocks should be returned.
+ auto report_block_datas = receiver.GetLatestReportBlockData();
+ ASSERT_THAT(report_block_datas, SizeIs(2));
+ EXPECT_EQ(kReceiverMainSsrc,
+ report_block_datas[0].report_block().source_ssrc);
+ EXPECT_EQ(
+ kSequenceNumber1,
+ report_block_datas[0].report_block().extended_highest_sequence_number);
+ EXPECT_EQ(kReceiverExtraSsrc,
+ report_block_datas[1].report_block().source_ssrc);
+ EXPECT_EQ(
+ kSequenceNumber2,
+ report_block_datas[1].report_block().extended_highest_sequence_number);
+}
+
+TEST(RtcpReceiverTest, ReceivesTransportFeedback) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::TransportFeedback packet;
+ packet.SetMediaSsrc(kReceiverMainSsrc);
+ packet.SetSenderSsrc(kSenderSsrc);
+ packet.SetBase(1, Timestamp::Millis(1));
+ packet.AddReceivedPacket(1, Timestamp::Millis(1));
+
+ EXPECT_CALL(
+ mocks.transport_feedback_observer,
+ OnTransportFeedback(AllOf(
+ Property(&rtcp::TransportFeedback::media_ssrc, kReceiverMainSsrc),
+ Property(&rtcp::TransportFeedback::sender_ssrc, kSenderSsrc))));
+ receiver.IncomingPacket(packet.Build());
+}
+
+TEST(RtcpReceiverTest, ReceivesRemb) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint32_t kBitrateBps = 500000;
+ rtcp::Remb remb;
+ remb.SetSenderSsrc(kSenderSsrc);
+ remb.SetBitrateBps(kBitrateBps);
+
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedEstimatedBitrate(kBitrateBps));
+ receiver.IncomingPacket(remb.Build());
+}
+
+TEST(RtcpReceiverTest, HandlesInvalidTransportFeedback) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ // Send a compound packet with a TransportFeedback followed by something else.
+ auto packet = std::make_unique<rtcp::TransportFeedback>();
+ packet->SetMediaSsrc(kReceiverMainSsrc);
+ packet->SetSenderSsrc(kSenderSsrc);
+ packet->SetBase(1, Timestamp::Millis(1));
+ packet->AddReceivedPacket(1, Timestamp::Millis(1));
+
+ static uint32_t kBitrateBps = 50000;
+ auto remb = std::make_unique<rtcp::Remb>();
+ remb->SetSenderSsrc(kSenderSsrc);
+ remb->SetBitrateBps(kBitrateBps);
+ rtcp::CompoundPacket compound;
+ compound.Append(std::move(packet));
+ compound.Append(std::move(remb));
+ rtc::Buffer built_packet = compound.Build();
+
+ // Modify the TransportFeedback packet so that it is invalid.
+ const size_t kStatusCountOffset = 14;
+ ByteWriter<uint16_t>::WriteBigEndian(&built_packet.data()[kStatusCountOffset],
+ 42);
+
+ // Stress no transport feedback is expected.
+ EXPECT_CALL(mocks.transport_feedback_observer, OnTransportFeedback).Times(0);
+ // But remb should be processed and cause a callback
+ EXPECT_CALL(mocks.bandwidth_observer,
+ OnReceivedEstimatedBitrate(kBitrateBps));
+ receiver.IncomingPacket(built_packet);
+}
+
+TEST(RtcpReceiverTest, Nack) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint16_t kNackList1[] = {1, 2, 3, 5};
+ const uint16_t kNackList23[] = {5, 7, 30, 40, 41, 58, 59, 61, 63};
+ const size_t kNackListLength2 = 4;
+ const size_t kNackListLength3 = arraysize(kNackList23) - kNackListLength2;
+ std::set<uint16_t> nack_set;
+ nack_set.insert(std::begin(kNackList1), std::end(kNackList1));
+ nack_set.insert(std::begin(kNackList23), std::end(kNackList23));
+
+ auto nack1 = std::make_unique<rtcp::Nack>();
+ nack1->SetSenderSsrc(kSenderSsrc);
+ nack1->SetMediaSsrc(kReceiverMainSsrc);
+ nack1->SetPacketIds(kNackList1, arraysize(kNackList1));
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl,
+ OnReceivedNack(ElementsAreArray(kNackList1)));
+ EXPECT_CALL(mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(
+ kReceiverMainSsrc,
+ AllOf(Field(&RtcpPacketTypeCounter::nack_requests,
+ arraysize(kNackList1)),
+ Field(&RtcpPacketTypeCounter::unique_nack_requests,
+ arraysize(kNackList1)))));
+ receiver.IncomingPacket(nack1->Build());
+
+ auto nack2 = std::make_unique<rtcp::Nack>();
+ nack2->SetSenderSsrc(kSenderSsrc);
+ nack2->SetMediaSsrc(kReceiverMainSsrc);
+ nack2->SetPacketIds(kNackList23, kNackListLength2);
+
+ auto nack3 = std::make_unique<rtcp::Nack>();
+ nack3->SetSenderSsrc(kSenderSsrc);
+ nack3->SetMediaSsrc(kReceiverMainSsrc);
+ nack3->SetPacketIds(kNackList23 + kNackListLength2, kNackListLength3);
+
+ rtcp::CompoundPacket two_nacks;
+ two_nacks.Append(std::move(nack2));
+ two_nacks.Append(std::move(nack3));
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl,
+ OnReceivedNack(ElementsAreArray(kNackList23)));
+ EXPECT_CALL(mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(
+ kReceiverMainSsrc,
+ AllOf(Field(&RtcpPacketTypeCounter::nack_requests,
+ arraysize(kNackList1) + arraysize(kNackList23)),
+ Field(&RtcpPacketTypeCounter::unique_nack_requests,
+ nack_set.size()))));
+ receiver.IncomingPacket(two_nacks.Build());
+}
+
+TEST(RtcpReceiverTest, NackNotForUsIgnored) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ const uint16_t kNackList1[] = {1, 2, 3, 5};
+ const size_t kNackListLength1 = std::end(kNackList1) - std::begin(kNackList1);
+
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kNotToUsSsrc);
+ nack.SetPacketIds(kNackList1, kNackListLength1);
+
+ EXPECT_CALL(mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(
+ _, Field(&RtcpPacketTypeCounter::nack_requests, 0)));
+ receiver.IncomingPacket(nack.Build());
+}
+
+TEST(RtcpReceiverTest, ForceSenderReport) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ rtcp::RapidResyncRequest rr;
+ rr.SetSenderSsrc(kSenderSsrc);
+ rr.SetMediaSsrc(kReceiverMainSsrc);
+
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnRequestSendReport());
+ receiver.IncomingPacket(rr.Build());
+}
+
+TEST(RtcpReceiverTest, ReceivesTargetBitrate) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ VideoBitrateAllocation expected_allocation;
+ expected_allocation.SetBitrate(0, 0, 10000);
+ expected_allocation.SetBitrate(0, 1, 20000);
+ expected_allocation.SetBitrate(1, 0, 40000);
+ expected_allocation.SetBitrate(1, 1, 80000);
+
+ rtcp::TargetBitrate bitrate;
+ bitrate.AddTargetBitrate(0, 0, expected_allocation.GetBitrate(0, 0) / 1000);
+ bitrate.AddTargetBitrate(0, 1, expected_allocation.GetBitrate(0, 1) / 1000);
+ bitrate.AddTargetBitrate(1, 0, expected_allocation.GetBitrate(1, 0) / 1000);
+ bitrate.AddTargetBitrate(1, 1, expected_allocation.GetBitrate(1, 1) / 1000);
+
+ rtcp::ExtendedReports xr;
+ xr.SetTargetBitrate(bitrate);
+
+ // Wrong sender ssrc, target bitrate should be discarded.
+ xr.SetSenderSsrc(kSenderSsrc + 1);
+ EXPECT_CALL(mocks.bitrate_allocation_observer,
+ OnBitrateAllocationUpdated(expected_allocation))
+ .Times(0);
+ receiver.IncomingPacket(xr.Build());
+
+ // Set correct ssrc, callback should be called once.
+ xr.SetSenderSsrc(kSenderSsrc);
+ EXPECT_CALL(mocks.bitrate_allocation_observer,
+ OnBitrateAllocationUpdated(expected_allocation));
+ receiver.IncomingPacket(xr.Build());
+}
+
+TEST(RtcpReceiverTest, HandlesIncorrectTargetBitrate) {
+ ReceiverMocks mocks;
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ VideoBitrateAllocation expected_allocation;
+ expected_allocation.SetBitrate(0, 0, 10000);
+
+ rtcp::TargetBitrate bitrate;
+ bitrate.AddTargetBitrate(0, 0, expected_allocation.GetBitrate(0, 0) / 1000);
+ bitrate.AddTargetBitrate(0, kMaxTemporalStreams, 20000);
+ bitrate.AddTargetBitrate(kMaxSpatialLayers, 0, 40000);
+
+ rtcp::ExtendedReports xr;
+ xr.SetTargetBitrate(bitrate);
+ xr.SetSenderSsrc(kSenderSsrc);
+
+ EXPECT_CALL(mocks.bitrate_allocation_observer,
+ OnBitrateAllocationUpdated(expected_allocation));
+ receiver.IncomingPacket(xr.Build());
+}
+
+TEST(RtcpReceiverTest, ChangeLocalMediaSsrc) {
+ ReceiverMocks mocks;
+ // Construct a receiver with `kReceiverMainSsrc` (default) local media ssrc.
+ RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl);
+ receiver.SetRemoteSSRC(kSenderSsrc);
+
+ constexpr uint32_t kSecondarySsrc = kReceiverMainSsrc + 1;
+
+ // Expect to only get the `OnReceivedNack()` callback once since we'll
+ // configure it for the `kReceiverMainSsrc` media ssrc.
+ EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedNack);
+
+ // We'll get two callbacks to RtcpPacketTypesCounterUpdated, one for each
+ // call to `IncomingPacket`, differentiated by the local media ssrc.
+ EXPECT_CALL(mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(kReceiverMainSsrc, _));
+ EXPECT_CALL(mocks.packet_type_counter_observer,
+ RtcpPacketTypesCounterUpdated(kSecondarySsrc, _));
+
+ // Construct a test nack packet with media ssrc set to `kReceiverMainSsrc`.
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(kSenderSsrc);
+ nack.SetMediaSsrc(kReceiverMainSsrc);
+ const uint16_t kNackList[] = {1, 2, 3, 5};
+ nack.SetPacketIds(kNackList, std::size(kNackList));
+
+ // Deliver the first callback.
+ receiver.IncomingPacket(nack.Build());
+
+ // Change the set local media ssrc.
+ receiver.set_local_media_ssrc(kSecondarySsrc);
+
+ // Deliver another packet - this time there will be no callback to
+ // OnReceivedNack due to the ssrc not matching.
+ receiver.IncomingPacket(nack.Build());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.cc
new file mode 100644
index 0000000000..983851a55b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -0,0 +1,974 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+
+#include <string.h> // memcpy
+
+#include <algorithm> // std::min
+#include <memory>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/rtp_headers.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/app.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/pli.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remb.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sdes.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "modules/rtp_rtcp/source/tmmbr_help.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+namespace {
+const uint32_t kRtcpAnyExtendedReports = kRtcpXrReceiverReferenceTime |
+ kRtcpXrDlrrReportBlock |
+ kRtcpXrTargetBitrate;
+constexpr int32_t kDefaultVideoReportInterval = 1000;
+constexpr int32_t kDefaultAudioReportInterval = 5000;
+} // namespace
+
+// Helper to put several RTCP packets into lower layer datagram RTCP packet.
+class RTCPSender::PacketSender {
+ public:
+ PacketSender(rtcp::RtcpPacket::PacketReadyCallback callback,
+ size_t max_packet_size)
+ : callback_(callback), max_packet_size_(max_packet_size) {
+ RTC_CHECK_LE(max_packet_size, IP_PACKET_SIZE);
+ }
+ ~PacketSender() { RTC_DCHECK_EQ(index_, 0) << "Unsent rtcp packet."; }
+
+ // Appends a packet to pending compound packet.
+ // Sends rtcp packet if buffer is full and resets the buffer.
+ void AppendPacket(const rtcp::RtcpPacket& packet) {
+ packet.Create(buffer_, &index_, max_packet_size_, callback_);
+ }
+
+ // Sends pending rtcp packet.
+ void Send() {
+ if (index_ > 0) {
+ callback_(rtc::ArrayView<const uint8_t>(buffer_, index_));
+ index_ = 0;
+ }
+ }
+
+ private:
+ const rtcp::RtcpPacket::PacketReadyCallback callback_;
+ const size_t max_packet_size_;
+ size_t index_ = 0;
+ uint8_t buffer_[IP_PACKET_SIZE];
+};
+
+RTCPSender::FeedbackState::FeedbackState()
+ : packets_sent(0),
+ media_bytes_sent(0),
+ send_bitrate(0),
+ last_rr_ntp_secs(0),
+ last_rr_ntp_frac(0),
+ remote_sr(0),
+ receiver(nullptr) {}
+
+RTCPSender::FeedbackState::FeedbackState(const FeedbackState&) = default;
+
+RTCPSender::FeedbackState::FeedbackState(FeedbackState&&) = default;
+
+RTCPSender::FeedbackState::~FeedbackState() = default;
+
+class RTCPSender::RtcpContext {
+ public:
+ RtcpContext(const FeedbackState& feedback_state,
+ int32_t nack_size,
+ const uint16_t* nack_list,
+ Timestamp now)
+ : feedback_state_(feedback_state),
+ nack_size_(nack_size),
+ nack_list_(nack_list),
+ now_(now) {}
+
+ const FeedbackState& feedback_state_;
+ const int32_t nack_size_;
+ const uint16_t* nack_list_;
+ const Timestamp now_;
+};
+
+RTCPSender::Configuration RTCPSender::Configuration::FromRtpRtcpConfiguration(
+ const RtpRtcpInterface::Configuration& configuration) {
+ RTCPSender::Configuration result;
+ result.audio = configuration.audio;
+ result.local_media_ssrc = configuration.local_media_ssrc;
+ result.clock = configuration.clock;
+ result.outgoing_transport = configuration.outgoing_transport;
+ result.non_sender_rtt_measurement = configuration.non_sender_rtt_measurement;
+ result.event_log = configuration.event_log;
+ if (configuration.rtcp_report_interval_ms) {
+ result.rtcp_report_interval =
+ TimeDelta::Millis(configuration.rtcp_report_interval_ms);
+ }
+ result.receive_statistics = configuration.receive_statistics;
+ result.rtcp_packet_type_counter_observer =
+ configuration.rtcp_packet_type_counter_observer;
+ return result;
+}
+
+RTCPSender::RTCPSender(Configuration config)
+ : audio_(config.audio),
+ ssrc_(config.local_media_ssrc),
+ clock_(config.clock),
+ random_(clock_->TimeInMicroseconds()),
+ method_(RtcpMode::kOff),
+ event_log_(config.event_log),
+ transport_(config.outgoing_transport),
+ report_interval_(config.rtcp_report_interval.value_or(
+ TimeDelta::Millis(config.audio ? kDefaultAudioReportInterval
+ : kDefaultVideoReportInterval))),
+ schedule_next_rtcp_send_evaluation_function_(
+ std::move(config.schedule_next_rtcp_send_evaluation_function)),
+ sending_(false),
+ timestamp_offset_(0),
+ last_rtp_timestamp_(0),
+ remote_ssrc_(0),
+ receive_statistics_(config.receive_statistics),
+
+ sequence_number_fir_(0),
+
+ remb_bitrate_(0),
+
+ tmmbr_send_bps_(0),
+ packet_oh_send_(0),
+ max_packet_size_(IP_PACKET_SIZE - 28), // IPv4 + UDP by default.
+
+ xr_send_receiver_reference_time_enabled_(
+ config.non_sender_rtt_measurement),
+ packet_type_counter_observer_(config.rtcp_packet_type_counter_observer),
+ send_video_bitrate_allocation_(false),
+ last_payload_type_(-1) {
+ RTC_DCHECK(transport_ != nullptr);
+
+ builders_[kRtcpSr] = &RTCPSender::BuildSR;
+ builders_[kRtcpRr] = &RTCPSender::BuildRR;
+ builders_[kRtcpSdes] = &RTCPSender::BuildSDES;
+ builders_[kRtcpPli] = &RTCPSender::BuildPLI;
+ builders_[kRtcpFir] = &RTCPSender::BuildFIR;
+ builders_[kRtcpRemb] = &RTCPSender::BuildREMB;
+ builders_[kRtcpBye] = &RTCPSender::BuildBYE;
+ builders_[kRtcpLossNotification] = &RTCPSender::BuildLossNotification;
+ builders_[kRtcpTmmbr] = &RTCPSender::BuildTMMBR;
+ builders_[kRtcpTmmbn] = &RTCPSender::BuildTMMBN;
+ builders_[kRtcpNack] = &RTCPSender::BuildNACK;
+ builders_[kRtcpAnyExtendedReports] = &RTCPSender::BuildExtendedReports;
+}
+
+RTCPSender::~RTCPSender() {}
+
+RtcpMode RTCPSender::Status() const {
+ MutexLock lock(&mutex_rtcp_sender_);
+ return method_;
+}
+
+void RTCPSender::SetRTCPStatus(RtcpMode new_method) {
+ MutexLock lock(&mutex_rtcp_sender_);
+
+ if (new_method == RtcpMode::kOff) {
+ next_time_to_send_rtcp_ = absl::nullopt;
+ } else if (method_ == RtcpMode::kOff) {
+ // When switching on, reschedule the next packet
+ SetNextRtcpSendEvaluationDuration(RTCP_INTERVAL_RAPID_SYNC_MS / 2);
+ }
+ method_ = new_method;
+}
+
+bool RTCPSender::Sending() const {
+ MutexLock lock(&mutex_rtcp_sender_);
+ return sending_;
+}
+
+void RTCPSender::SetSendingStatus(const FeedbackState& feedback_state,
+ bool sending) {
+ bool sendRTCPBye = false;
+ {
+ MutexLock lock(&mutex_rtcp_sender_);
+
+ if (method_ != RtcpMode::kOff) {
+ if (sending == false && sending_ == true) {
+ // Trigger RTCP bye
+ sendRTCPBye = true;
+ }
+ }
+ sending_ = sending;
+ }
+ if (sendRTCPBye) {
+ if (SendRTCP(feedback_state, kRtcpBye) != 0) {
+ RTC_LOG(LS_WARNING) << "Failed to send RTCP BYE";
+ }
+ }
+}
+
+void RTCPSender::SetNonSenderRttMeasurement(bool enabled) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ xr_send_receiver_reference_time_enabled_ = enabled;
+}
+
+int32_t RTCPSender::SendLossNotification(const FeedbackState& feedback_state,
+ uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ int32_t error_code = -1;
+ auto callback = [&](rtc::ArrayView<const uint8_t> packet) {
+ transport_->SendRtcp(packet.data(), packet.size());
+ error_code = 0;
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventRtcpPacketOutgoing>(packet));
+ }
+ };
+ absl::optional<PacketSender> sender;
+ {
+ MutexLock lock(&mutex_rtcp_sender_);
+
+ if (!loss_notification_.Set(last_decoded_seq_num, last_received_seq_num,
+ decodability_flag)) {
+ return -1;
+ }
+
+ SetFlag(kRtcpLossNotification, /*is_volatile=*/true);
+
+ if (buffering_allowed) {
+ // The loss notification will be batched with additional feedback
+ // messages.
+ return 0;
+ }
+
+ sender.emplace(callback, max_packet_size_);
+ auto result = ComputeCompoundRTCPPacket(
+ feedback_state, RTCPPacketType::kRtcpLossNotification, 0, nullptr,
+ *sender);
+ if (result) {
+ return *result;
+ }
+ }
+ sender->Send();
+
+ return error_code;
+}
+
+void RTCPSender::SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) {
+ RTC_CHECK_GE(bitrate_bps, 0);
+ MutexLock lock(&mutex_rtcp_sender_);
+ if (method_ == RtcpMode::kOff) {
+ RTC_LOG(LS_WARNING) << "Can't send rtcp if it is disabled.";
+ return;
+ }
+ remb_bitrate_ = bitrate_bps;
+ remb_ssrcs_ = std::move(ssrcs);
+
+ SetFlag(kRtcpRemb, /*is_volatile=*/false);
+ // Send a REMB immediately if we have a new REMB. The frequency of REMBs is
+ // throttled by the caller.
+ SetNextRtcpSendEvaluationDuration(TimeDelta::Zero());
+}
+
+void RTCPSender::UnsetRemb() {
+ MutexLock lock(&mutex_rtcp_sender_);
+ // Stop sending REMB each report until it is reenabled and REMB data set.
+ ConsumeFlag(kRtcpRemb, /*forced=*/true);
+}
+
+bool RTCPSender::TMMBR() const {
+ MutexLock lock(&mutex_rtcp_sender_);
+ return IsFlagPresent(RTCPPacketType::kRtcpTmmbr);
+}
+
+void RTCPSender::SetMaxRtpPacketSize(size_t max_packet_size) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ max_packet_size_ = max_packet_size;
+}
+
+void RTCPSender::SetTimestampOffset(uint32_t timestamp_offset) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ timestamp_offset_ = timestamp_offset;
+}
+
+void RTCPSender::SetLastRtpTime(uint32_t rtp_timestamp,
+ absl::optional<Timestamp> capture_time,
+ absl::optional<int8_t> payload_type) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ // For compatibility with clients who don't set payload type correctly on all
+ // calls.
+ if (payload_type.has_value()) {
+ last_payload_type_ = *payload_type;
+ }
+ last_rtp_timestamp_ = rtp_timestamp;
+ if (!capture_time.has_value()) {
+ // We don't currently get a capture time from VoiceEngine.
+ last_frame_capture_time_ = clock_->CurrentTime();
+ } else {
+ last_frame_capture_time_ = *capture_time;
+ }
+}
+
+void RTCPSender::SetRtpClockRate(int8_t payload_type, int rtp_clock_rate_hz) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ rtp_clock_rates_khz_[payload_type] = rtp_clock_rate_hz / 1000;
+}
+
+uint32_t RTCPSender::SSRC() const {
+ MutexLock lock(&mutex_rtcp_sender_);
+ return ssrc_;
+}
+
+void RTCPSender::SetSsrc(uint32_t ssrc) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ ssrc_ = ssrc;
+}
+
+void RTCPSender::SetRemoteSSRC(uint32_t ssrc) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ remote_ssrc_ = ssrc;
+}
+
+int32_t RTCPSender::SetCNAME(absl::string_view c_name) {
+ RTC_DCHECK_LT(c_name.size(), RTCP_CNAME_SIZE);
+ MutexLock lock(&mutex_rtcp_sender_);
+ cname_ = std::string(c_name);
+ return 0;
+}
+
+bool RTCPSender::TimeToSendRTCPReport(bool sendKeyframeBeforeRTP) const {
+ /*
+ For audio we use a configurable interval (default: 5 seconds)
+
+ For video we use a configurable interval (default: 1 second) for a BW
+ smaller than 360 kbit/s, technicaly we break the max 5% RTCP BW for
+ video below 10 kbit/s but that should be extremely rare
+
+
+ From RFC 3550
+
+ MAX RTCP BW is 5% if the session BW
+ A send report is approximately 65 bytes inc CNAME
+ A receiver report is approximately 28 bytes
+
+ The RECOMMENDED value for the reduced minimum in seconds is 360
+ divided by the session bandwidth in kilobits/second. This minimum
+ is smaller than 5 seconds for bandwidths greater than 72 kb/s.
+
+ If the participant has not yet sent an RTCP packet (the variable
+ initial is true), the constant Tmin is set to half of the configured
+ interval.
+
+ The interval between RTCP packets is varied randomly over the
+ range [0.5,1.5] times the calculated interval to avoid unintended
+ synchronization of all participants
+
+ if we send
+ If the participant is a sender (we_sent true), the constant C is
+ set to the average RTCP packet size (avg_rtcp_size) divided by 25%
+ of the RTCP bandwidth (rtcp_bw), and the constant n is set to the
+ number of senders.
+
+ if we receive only
+ If we_sent is not true, the constant C is set
+ to the average RTCP packet size divided by 75% of the RTCP
+ bandwidth. The constant n is set to the number of receivers
+ (members - senders). If the number of senders is greater than
+ 25%, senders and receivers are treated together.
+
+ reconsideration NOT required for peer-to-peer
+ "timer reconsideration" is
+ employed. This algorithm implements a simple back-off mechanism
+ which causes users to hold back RTCP packet transmission if the
+ group sizes are increasing.
+
+ n = number of members
+ C = avg_size/(rtcpBW/4)
+
+ 3. The deterministic calculated interval Td is set to max(Tmin, n*C).
+
+ 4. The calculated interval T is set to a number uniformly distributed
+ between 0.5 and 1.5 times the deterministic calculated interval.
+
+ 5. The resulting value of T is divided by e-3/2=1.21828 to compensate
+ for the fact that the timer reconsideration algorithm converges to
+ a value of the RTCP bandwidth below the intended average
+ */
+
+ Timestamp now = clock_->CurrentTime();
+
+ MutexLock lock(&mutex_rtcp_sender_);
+ RTC_DCHECK(
+ (method_ == RtcpMode::kOff && !next_time_to_send_rtcp_.has_value()) ||
+ (method_ != RtcpMode::kOff && next_time_to_send_rtcp_.has_value()));
+ if (method_ == RtcpMode::kOff)
+ return false;
+
+ if (!audio_ && sendKeyframeBeforeRTP) {
+ // for video key-frames we want to send the RTCP before the large key-frame
+ // if we have a 100 ms margin
+ now += RTCP_SEND_BEFORE_KEY_FRAME;
+ }
+
+ return now >= *next_time_to_send_rtcp_;
+}
+
+void RTCPSender::BuildSR(const RtcpContext& ctx, PacketSender& sender) {
+ // Timestamp shouldn't be estimated before first media frame.
+ RTC_DCHECK(last_frame_capture_time_.has_value());
+ // The timestamp of this RTCP packet should be estimated as the timestamp of
+ // the frame being captured at this moment. We are calculating that
+ // timestamp as the last frame's timestamp + the time since the last frame
+ // was captured.
+ int rtp_rate = rtp_clock_rates_khz_[last_payload_type_];
+ if (rtp_rate <= 0) {
+ rtp_rate =
+ (audio_ ? kBogusRtpRateForAudioRtcp : kVideoPayloadTypeFrequency) /
+ 1000;
+ }
+ // Round now_us_ to the closest millisecond, because Ntp time is rounded
+ // when converted to milliseconds,
+ uint32_t rtp_timestamp =
+ timestamp_offset_ + last_rtp_timestamp_ +
+ ((ctx.now_.us() + 500) / 1000 - last_frame_capture_time_->ms()) *
+ rtp_rate;
+
+ rtcp::SenderReport report;
+ report.SetSenderSsrc(ssrc_);
+ report.SetNtp(clock_->ConvertTimestampToNtpTime(ctx.now_));
+ report.SetRtpTimestamp(rtp_timestamp);
+ report.SetPacketCount(ctx.feedback_state_.packets_sent);
+ report.SetOctetCount(ctx.feedback_state_.media_bytes_sent);
+ report.SetReportBlocks(CreateReportBlocks(ctx.feedback_state_));
+ sender.AppendPacket(report);
+}
+
+void RTCPSender::BuildSDES(const RtcpContext& ctx, PacketSender& sender) {
+ size_t length_cname = cname_.length();
+ RTC_CHECK_LT(length_cname, RTCP_CNAME_SIZE);
+
+ rtcp::Sdes sdes;
+ sdes.AddCName(ssrc_, cname_);
+ sender.AppendPacket(sdes);
+}
+
+void RTCPSender::BuildRR(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::ReceiverReport report;
+ report.SetSenderSsrc(ssrc_);
+ report.SetReportBlocks(CreateReportBlocks(ctx.feedback_state_));
+ if (method_ == RtcpMode::kCompound || !report.report_blocks().empty()) {
+ sender.AppendPacket(report);
+ }
+}
+
+void RTCPSender::BuildPLI(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::Pli pli;
+ pli.SetSenderSsrc(ssrc_);
+ pli.SetMediaSsrc(remote_ssrc_);
+
+ ++packet_type_counter_.pli_packets;
+ sender.AppendPacket(pli);
+}
+
+void RTCPSender::BuildFIR(const RtcpContext& ctx, PacketSender& sender) {
+ ++sequence_number_fir_;
+
+ rtcp::Fir fir;
+ fir.SetSenderSsrc(ssrc_);
+ fir.AddRequestTo(remote_ssrc_, sequence_number_fir_);
+
+ ++packet_type_counter_.fir_packets;
+ sender.AppendPacket(fir);
+}
+
+void RTCPSender::BuildREMB(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::Remb remb;
+ remb.SetSenderSsrc(ssrc_);
+ remb.SetBitrateBps(remb_bitrate_);
+ remb.SetSsrcs(remb_ssrcs_);
+ sender.AppendPacket(remb);
+}
+
+void RTCPSender::SetTargetBitrate(unsigned int target_bitrate) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ tmmbr_send_bps_ = target_bitrate;
+}
+
+void RTCPSender::BuildTMMBR(const RtcpContext& ctx, PacketSender& sender) {
+ if (ctx.feedback_state_.receiver == nullptr)
+ return;
+ // Before sending the TMMBR check the received TMMBN, only an owner is
+ // allowed to raise the bitrate:
+ // * If the sender is an owner of the TMMBN -> send TMMBR
+ // * If not an owner but the TMMBR would enter the TMMBN -> send TMMBR
+
+ // get current bounding set from RTCP receiver
+ bool tmmbr_owner = false;
+
+ // holding mutex_rtcp_sender_ while calling RTCPreceiver which
+ // will accuire criticalSectionRTCPReceiver_ is a potental deadlock but
+ // since RTCPreceiver is not doing the reverse we should be fine
+ std::vector<rtcp::TmmbItem> candidates =
+ ctx.feedback_state_.receiver->BoundingSet(&tmmbr_owner);
+
+ if (!candidates.empty()) {
+ for (const auto& candidate : candidates) {
+ if (candidate.bitrate_bps() == tmmbr_send_bps_ &&
+ candidate.packet_overhead() == packet_oh_send_) {
+ // Do not send the same tuple.
+ return;
+ }
+ }
+ if (!tmmbr_owner) {
+ // Use received bounding set as candidate set.
+ // Add current tuple.
+ candidates.emplace_back(ssrc_, tmmbr_send_bps_, packet_oh_send_);
+
+ // Find bounding set.
+ std::vector<rtcp::TmmbItem> bounding =
+ TMMBRHelp::FindBoundingSet(std::move(candidates));
+ tmmbr_owner = TMMBRHelp::IsOwner(bounding, ssrc_);
+ if (!tmmbr_owner) {
+ // Did not enter bounding set, no meaning to send this request.
+ return;
+ }
+ }
+ }
+
+ if (!tmmbr_send_bps_)
+ return;
+
+ rtcp::Tmmbr tmmbr;
+ tmmbr.SetSenderSsrc(ssrc_);
+ rtcp::TmmbItem request;
+ request.set_ssrc(remote_ssrc_);
+ request.set_bitrate_bps(tmmbr_send_bps_);
+ request.set_packet_overhead(packet_oh_send_);
+ tmmbr.AddTmmbr(request);
+ sender.AppendPacket(tmmbr);
+}
+
+void RTCPSender::BuildTMMBN(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::Tmmbn tmmbn;
+ tmmbn.SetSenderSsrc(ssrc_);
+ for (const rtcp::TmmbItem& tmmbr : tmmbn_to_send_) {
+ if (tmmbr.bitrate_bps() > 0) {
+ tmmbn.AddTmmbr(tmmbr);
+ }
+ }
+ sender.AppendPacket(tmmbn);
+}
+
+void RTCPSender::BuildAPP(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::App app;
+ app.SetSenderSsrc(ssrc_);
+ sender.AppendPacket(app);
+}
+
+void RTCPSender::BuildLossNotification(const RtcpContext& ctx,
+ PacketSender& sender) {
+ loss_notification_.SetSenderSsrc(ssrc_);
+ loss_notification_.SetMediaSsrc(remote_ssrc_);
+ sender.AppendPacket(loss_notification_);
+}
+
+void RTCPSender::BuildNACK(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(ssrc_);
+ nack.SetMediaSsrc(remote_ssrc_);
+ nack.SetPacketIds(ctx.nack_list_, ctx.nack_size_);
+
+ // Report stats.
+ for (int idx = 0; idx < ctx.nack_size_; ++idx) {
+ nack_stats_.ReportRequest(ctx.nack_list_[idx]);
+ }
+ packet_type_counter_.nack_requests = nack_stats_.requests();
+ packet_type_counter_.unique_nack_requests = nack_stats_.unique_requests();
+
+ ++packet_type_counter_.nack_packets;
+ sender.AppendPacket(nack);
+}
+
+void RTCPSender::BuildBYE(const RtcpContext& ctx, PacketSender& sender) {
+ rtcp::Bye bye;
+ bye.SetSenderSsrc(ssrc_);
+ bye.SetCsrcs(csrcs_);
+ sender.AppendPacket(bye);
+}
+
+void RTCPSender::BuildExtendedReports(const RtcpContext& ctx,
+ PacketSender& sender) {
+ rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(ssrc_);
+
+ if (!sending_ && xr_send_receiver_reference_time_enabled_) {
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(clock_->ConvertTimestampToNtpTime(ctx.now_));
+ xr.SetRrtr(rrtr);
+ }
+
+ for (const rtcp::ReceiveTimeInfo& rti : ctx.feedback_state_.last_xr_rtis) {
+ xr.AddDlrrItem(rti);
+ }
+
+ if (send_video_bitrate_allocation_) {
+ rtcp::TargetBitrate target_bitrate;
+
+ for (int sl = 0; sl < kMaxSpatialLayers; ++sl) {
+ for (int tl = 0; tl < kMaxTemporalStreams; ++tl) {
+ if (video_bitrate_allocation_.HasBitrate(sl, tl)) {
+ target_bitrate.AddTargetBitrate(
+ sl, tl, video_bitrate_allocation_.GetBitrate(sl, tl) / 1000);
+ }
+ }
+ }
+
+ xr.SetTargetBitrate(target_bitrate);
+ send_video_bitrate_allocation_ = false;
+ }
+ sender.AppendPacket(xr);
+}
+
+int32_t RTCPSender::SendRTCP(const FeedbackState& feedback_state,
+ RTCPPacketType packet_type,
+ int32_t nack_size,
+ const uint16_t* nack_list) {
+ int32_t error_code = -1;
+ auto callback = [&](rtc::ArrayView<const uint8_t> packet) {
+ if (transport_->SendRtcp(packet.data(), packet.size())) {
+ error_code = 0;
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventRtcpPacketOutgoing>(packet));
+ }
+ }
+ };
+ absl::optional<PacketSender> sender;
+ {
+ MutexLock lock(&mutex_rtcp_sender_);
+ sender.emplace(callback, max_packet_size_);
+ auto result = ComputeCompoundRTCPPacket(feedback_state, packet_type,
+ nack_size, nack_list, *sender);
+ if (result) {
+ return *result;
+ }
+ }
+ sender->Send();
+
+ return error_code;
+}
+
+absl::optional<int32_t> RTCPSender::ComputeCompoundRTCPPacket(
+ const FeedbackState& feedback_state,
+ RTCPPacketType packet_type,
+ int32_t nack_size,
+ const uint16_t* nack_list,
+ PacketSender& sender) {
+ if (method_ == RtcpMode::kOff) {
+ RTC_LOG(LS_WARNING) << "Can't send rtcp if it is disabled.";
+ return -1;
+ }
+ // Add the flag as volatile. Non volatile entries will not be overwritten.
+ // The new volatile flag will be consumed by the end of this call.
+ SetFlag(packet_type, true);
+
+ // Prevent sending streams to send SR before any media has been sent.
+ const bool can_calculate_rtp_timestamp = last_frame_capture_time_.has_value();
+ if (!can_calculate_rtp_timestamp) {
+ bool consumed_sr_flag = ConsumeFlag(kRtcpSr);
+ bool consumed_report_flag = sending_ && ConsumeFlag(kRtcpReport);
+ bool sender_report = consumed_report_flag || consumed_sr_flag;
+ if (sender_report && AllVolatileFlagsConsumed()) {
+ // This call was for Sender Report and nothing else.
+ return 0;
+ }
+ if (sending_ && method_ == RtcpMode::kCompound) {
+ // Not allowed to send any RTCP packet without sender report.
+ return -1;
+ }
+ }
+
+ // We need to send our NTP even if we haven't received any reports.
+ RtcpContext context(feedback_state, nack_size, nack_list,
+ clock_->CurrentTime());
+
+ PrepareReport(feedback_state);
+
+ bool create_bye = false;
+
+ auto it = report_flags_.begin();
+ while (it != report_flags_.end()) {
+ uint32_t rtcp_packet_type = it->type;
+
+ if (it->is_volatile) {
+ report_flags_.erase(it++);
+ } else {
+ ++it;
+ }
+
+ // If there is a BYE, don't append now - save it and append it
+ // at the end later.
+ if (rtcp_packet_type == kRtcpBye) {
+ create_bye = true;
+ continue;
+ }
+ auto builder_it = builders_.find(rtcp_packet_type);
+ if (builder_it == builders_.end()) {
+ RTC_DCHECK_NOTREACHED()
+ << "Could not find builder for packet type " << rtcp_packet_type;
+ } else {
+ BuilderFunc func = builder_it->second;
+ (this->*func)(context, sender);
+ }
+ }
+
+ // Append the BYE now at the end
+ if (create_bye) {
+ BuildBYE(context, sender);
+ }
+
+ if (packet_type_counter_observer_ != nullptr) {
+ packet_type_counter_observer_->RtcpPacketTypesCounterUpdated(
+ remote_ssrc_, packet_type_counter_);
+ }
+
+ RTC_DCHECK(AllVolatileFlagsConsumed());
+ return absl::nullopt;
+}
+
+void RTCPSender::PrepareReport(const FeedbackState& feedback_state) {
+ bool generate_report;
+ if (IsFlagPresent(kRtcpSr) || IsFlagPresent(kRtcpRr)) {
+ // Report type already explicitly set, don't automatically populate.
+ generate_report = true;
+ RTC_DCHECK(ConsumeFlag(kRtcpReport) == false);
+ } else {
+ generate_report =
+ (ConsumeFlag(kRtcpReport) && method_ == RtcpMode::kReducedSize) ||
+ method_ == RtcpMode::kCompound;
+ if (generate_report)
+ SetFlag(sending_ ? kRtcpSr : kRtcpRr, true);
+ }
+
+ if (IsFlagPresent(kRtcpSr) || (IsFlagPresent(kRtcpRr) && !cname_.empty()))
+ SetFlag(kRtcpSdes, true);
+
+ if (generate_report) {
+ if ((!sending_ && xr_send_receiver_reference_time_enabled_) ||
+ !feedback_state.last_xr_rtis.empty() ||
+ send_video_bitrate_allocation_) {
+ SetFlag(kRtcpAnyExtendedReports, true);
+ }
+
+ // generate next time to send an RTCP report
+ TimeDelta min_interval = report_interval_;
+
+ if (!audio_ && sending_) {
+ // Calculate bandwidth for video; 360 / send bandwidth in kbit/s.
+ int send_bitrate_kbit = feedback_state.send_bitrate / 1000;
+ if (send_bitrate_kbit != 0) {
+ min_interval = std::min(TimeDelta::Millis(360000 / send_bitrate_kbit),
+ report_interval_);
+ }
+ }
+
+ // The interval between RTCP packets is varied randomly over the
+ // range [1/2,3/2] times the calculated interval.
+ int min_interval_int = rtc::dchecked_cast<int>(min_interval.ms());
+ TimeDelta time_to_next = TimeDelta::Millis(
+ random_.Rand(min_interval_int * 1 / 2, min_interval_int * 3 / 2));
+
+ RTC_DCHECK(!time_to_next.IsZero());
+ SetNextRtcpSendEvaluationDuration(time_to_next);
+
+ // RtcpSender expected to be used for sending either just sender reports
+ // or just receiver reports.
+ RTC_DCHECK(!(IsFlagPresent(kRtcpSr) && IsFlagPresent(kRtcpRr)));
+ }
+}
+
+std::vector<rtcp::ReportBlock> RTCPSender::CreateReportBlocks(
+ const FeedbackState& feedback_state) {
+ std::vector<rtcp::ReportBlock> result;
+ if (!receive_statistics_)
+ return result;
+
+ // TODO(danilchap): Support sending more than `RTCP_MAX_REPORT_BLOCKS` per
+ // compound rtcp packet when single rtcp module is used for multiple media
+ // streams.
+ result = receive_statistics_->RtcpReportBlocks(RTCP_MAX_REPORT_BLOCKS);
+
+ if (!result.empty() && ((feedback_state.last_rr_ntp_secs != 0) ||
+ (feedback_state.last_rr_ntp_frac != 0))) {
+ // Get our NTP as late as possible to avoid a race.
+ uint32_t now = CompactNtp(clock_->CurrentNtpTime());
+
+ uint32_t receive_time = feedback_state.last_rr_ntp_secs & 0x0000FFFF;
+ receive_time <<= 16;
+ receive_time += (feedback_state.last_rr_ntp_frac & 0xffff0000) >> 16;
+
+ uint32_t delay_since_last_sr = now - receive_time;
+ // TODO(danilchap): Instead of setting same value on all report blocks,
+ // set only when media_ssrc match sender ssrc of the sender report
+ // remote times were taken from.
+ for (auto& report_block : result) {
+ report_block.SetLastSr(feedback_state.remote_sr);
+ report_block.SetDelayLastSr(delay_since_last_sr);
+ }
+ }
+ return result;
+}
+
+void RTCPSender::SetCsrcs(const std::vector<uint32_t>& csrcs) {
+ RTC_DCHECK_LE(csrcs.size(), kRtpCsrcSize);
+ MutexLock lock(&mutex_rtcp_sender_);
+ csrcs_ = csrcs;
+}
+
+void RTCPSender::SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ tmmbn_to_send_ = std::move(bounding_set);
+ SetFlag(kRtcpTmmbn, true);
+}
+
+void RTCPSender::SetFlag(uint32_t type, bool is_volatile) {
+ if (type & kRtcpAnyExtendedReports) {
+ report_flags_.insert(ReportFlag(kRtcpAnyExtendedReports, is_volatile));
+ } else {
+ report_flags_.insert(ReportFlag(type, is_volatile));
+ }
+}
+
+bool RTCPSender::IsFlagPresent(uint32_t type) const {
+ return report_flags_.find(ReportFlag(type, false)) != report_flags_.end();
+}
+
+bool RTCPSender::ConsumeFlag(uint32_t type, bool forced) {
+ auto it = report_flags_.find(ReportFlag(type, false));
+ if (it == report_flags_.end())
+ return false;
+ if (it->is_volatile || forced)
+ report_flags_.erase((it));
+ return true;
+}
+
+bool RTCPSender::AllVolatileFlagsConsumed() const {
+ for (const ReportFlag& flag : report_flags_) {
+ if (flag.is_volatile)
+ return false;
+ }
+ return true;
+}
+
+void RTCPSender::SetVideoBitrateAllocation(
+ const VideoBitrateAllocation& bitrate) {
+ MutexLock lock(&mutex_rtcp_sender_);
+ if (method_ == RtcpMode::kOff) {
+ RTC_LOG(LS_WARNING) << "Can't send rtcp if it is disabled.";
+ return;
+ }
+ // Check if this allocation is first ever, or has a different set of
+ // spatial/temporal layers signaled and enabled, if so trigger an rtcp report
+ // as soon as possible.
+ absl::optional<VideoBitrateAllocation> new_bitrate =
+ CheckAndUpdateLayerStructure(bitrate);
+ if (new_bitrate) {
+ video_bitrate_allocation_ = *new_bitrate;
+ RTC_LOG(LS_INFO) << "Emitting TargetBitrate XR for SSRC " << ssrc_
+ << " with new layers enabled/disabled: "
+ << video_bitrate_allocation_.ToString();
+ SetNextRtcpSendEvaluationDuration(TimeDelta::Zero());
+ } else {
+ video_bitrate_allocation_ = bitrate;
+ }
+
+ send_video_bitrate_allocation_ = true;
+ SetFlag(kRtcpAnyExtendedReports, true);
+}
+
+absl::optional<VideoBitrateAllocation> RTCPSender::CheckAndUpdateLayerStructure(
+ const VideoBitrateAllocation& bitrate) const {
+ absl::optional<VideoBitrateAllocation> updated_bitrate;
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (!updated_bitrate &&
+ (bitrate.HasBitrate(si, ti) !=
+ video_bitrate_allocation_.HasBitrate(si, ti) ||
+ (bitrate.GetBitrate(si, ti) == 0) !=
+ (video_bitrate_allocation_.GetBitrate(si, ti) == 0))) {
+ updated_bitrate = bitrate;
+ }
+ if (video_bitrate_allocation_.GetBitrate(si, ti) > 0 &&
+ bitrate.GetBitrate(si, ti) == 0) {
+ // Make sure this stream disabling is explicitly signaled.
+ updated_bitrate->SetBitrate(si, ti, 0);
+ }
+ }
+ }
+
+ return updated_bitrate;
+}
+
+void RTCPSender::SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) {
+ size_t max_packet_size;
+ uint32_t ssrc;
+ {
+ MutexLock lock(&mutex_rtcp_sender_);
+ if (method_ == RtcpMode::kOff) {
+ RTC_LOG(LS_WARNING) << "Can't send rtcp if it is disabled.";
+ return;
+ }
+
+ max_packet_size = max_packet_size_;
+ ssrc = ssrc_;
+ }
+ RTC_DCHECK_LE(max_packet_size, IP_PACKET_SIZE);
+ auto callback = [&](rtc::ArrayView<const uint8_t> packet) {
+ if (transport_->SendRtcp(packet.data(), packet.size())) {
+ if (event_log_)
+ event_log_->Log(std::make_unique<RtcEventRtcpPacketOutgoing>(packet));
+ }
+ };
+ PacketSender sender(callback, max_packet_size);
+ for (auto& rtcp_packet : rtcp_packets) {
+ rtcp_packet->SetSenderSsrc(ssrc);
+ sender.AppendPacket(*rtcp_packet);
+ }
+ sender.Send();
+}
+
+void RTCPSender::SetNextRtcpSendEvaluationDuration(TimeDelta duration) {
+ next_time_to_send_rtcp_ = clock_->CurrentTime() + duration;
+ // TODO(bugs.webrtc.org/11581): make unconditional once downstream consumers
+ // are using the callback method.
+ if (schedule_next_rtcp_send_evaluation_function_)
+ schedule_next_rtcp_send_evaluation_function_(duration);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.h
new file mode 100644
index 0000000000..8f51e7947d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_SENDER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_SENDER_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/call/transport.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_nack_stats.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "rtc_base/random.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class RTCPReceiver;
+class RtcEventLog;
+
+class RTCPSender final {
+ public:
+ struct Configuration {
+ // TODO(bugs.webrtc.org/11581): Remove this temporary conversion utility
+ // once rtc_rtcp_impl.cc/h are gone.
+ static Configuration FromRtpRtcpConfiguration(
+ const RtpRtcpInterface::Configuration& config);
+
+ // True for a audio version of the RTP/RTCP module object false will create
+ // a video version.
+ bool audio = false;
+ // SSRCs for media and retransmission, respectively.
+ // FlexFec SSRC is fetched from `flexfec_sender`.
+ uint32_t local_media_ssrc = 0;
+ // The clock to use to read time. If nullptr then system clock will be used.
+ Clock* clock = nullptr;
+ // Transport object that will be called when packets are ready to be sent
+ // out on the network.
+ Transport* outgoing_transport = nullptr;
+ // Estimate RTT as non-sender as described in
+ // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5
+ bool non_sender_rtt_measurement = false;
+ // Optional callback which, if specified, is used by RTCPSender to schedule
+ // the next time to evaluate if RTCP should be sent by means of
+ // TimeToSendRTCPReport/SendRTCP.
+ // The RTCPSender client still needs to call TimeToSendRTCPReport/SendRTCP
+ // to actually get RTCP sent.
+ //
+ // Note: It's recommended to use the callback to ensure program design that
+ // doesn't use polling.
+ // TODO(bugs.webrtc.org/11581): Make mandatory once downstream consumers
+ // have migrated to the callback solution.
+ std::function<void(TimeDelta)> schedule_next_rtcp_send_evaluation_function;
+
+ RtcEventLog* event_log = nullptr;
+ absl::optional<TimeDelta> rtcp_report_interval;
+ ReceiveStatisticsProvider* receive_statistics = nullptr;
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer = nullptr;
+ };
+ struct FeedbackState {
+ FeedbackState();
+ FeedbackState(const FeedbackState&);
+ FeedbackState(FeedbackState&&);
+
+ ~FeedbackState();
+
+ uint32_t packets_sent;
+ size_t media_bytes_sent;
+ uint32_t send_bitrate;
+
+ uint32_t last_rr_ntp_secs;
+ uint32_t last_rr_ntp_frac;
+ uint32_t remote_sr;
+
+ std::vector<rtcp::ReceiveTimeInfo> last_xr_rtis;
+
+ // Used when generating TMMBR.
+ RTCPReceiver* receiver;
+ };
+
+ explicit RTCPSender(Configuration config);
+
+ RTCPSender() = delete;
+ RTCPSender(const RTCPSender&) = delete;
+ RTCPSender& operator=(const RTCPSender&) = delete;
+
+ virtual ~RTCPSender();
+
+ RtcpMode Status() const RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+ void SetRTCPStatus(RtcpMode method) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ bool Sending() const RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+ void SetSendingStatus(const FeedbackState& feedback_state,
+ bool enabled)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); // combine the functions
+
+ void SetNonSenderRttMeasurement(bool enabled)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetTimestampOffset(uint32_t timestamp_offset)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetLastRtpTime(uint32_t rtp_timestamp,
+ absl::optional<Timestamp> capture_time,
+ absl::optional<int8_t> payload_type)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetRtpClockRate(int8_t payload_type, int rtp_clock_rate_hz)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ uint32_t SSRC() const;
+ void SetSsrc(uint32_t ssrc);
+
+ void SetRemoteSSRC(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ int32_t SetCNAME(absl::string_view cName)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ bool TimeToSendRTCPReport(bool sendKeyframeBeforeRTP = false) const
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ int32_t SendRTCP(const FeedbackState& feedback_state,
+ RTCPPacketType packetType,
+ int32_t nackSize = 0,
+ const uint16_t* nackList = 0)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ int32_t SendLossNotification(const FeedbackState& feedback_state,
+ uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void UnsetRemb() RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ bool TMMBR() const RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetMaxRtpPacketSize(size_t max_packet_size)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetCsrcs(const std::vector<uint32_t>& csrcs)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ void SetTargetBitrate(unsigned int target_bitrate)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+ void SetVideoBitrateAllocation(const VideoBitrateAllocation& bitrate)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+ void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets)
+ RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_);
+
+ private:
+ class RtcpContext;
+ class PacketSender;
+
+ absl::optional<int32_t> ComputeCompoundRTCPPacket(
+ const FeedbackState& feedback_state,
+ RTCPPacketType packet_type,
+ int32_t nack_size,
+ const uint16_t* nack_list,
+ PacketSender& sender) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+
+ // Determine which RTCP messages should be sent and setup flags.
+ void PrepareReport(const FeedbackState& feedback_state)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+
+ std::vector<rtcp::ReportBlock> CreateReportBlocks(
+ const FeedbackState& feedback_state)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+
+ void BuildSR(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildRR(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildSDES(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildPLI(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildREMB(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildTMMBR(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildTMMBN(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildAPP(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildLossNotification(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildExtendedReports(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildBYE(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildFIR(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ void BuildNACK(const RtcpContext& context, PacketSender& sender)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+
+ // `duration` being TimeDelta::Zero() means schedule immediately.
+ void SetNextRtcpSendEvaluationDuration(TimeDelta duration)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+
+ const bool audio_;
+ // TODO(bugs.webrtc.org/11581): `mutex_rtcp_sender_` shouldn't be required if
+ // we consistently run network related operations on the network thread.
+ // This is currently not possible due to callbacks from the process thread in
+ // ModuleRtpRtcpImpl2.
+ uint32_t ssrc_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ Clock* const clock_;
+ Random random_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ RtcpMode method_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ RtcEventLog* const event_log_;
+ Transport* const transport_;
+
+ const TimeDelta report_interval_;
+ // Set from
+ // RTCPSender::Configuration::schedule_next_rtcp_send_evaluation_function.
+ const std::function<void(TimeDelta)>
+ schedule_next_rtcp_send_evaluation_function_;
+
+ mutable Mutex mutex_rtcp_sender_;
+ bool sending_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ absl::optional<Timestamp> next_time_to_send_rtcp_
+ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ uint32_t timestamp_offset_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ absl::optional<Timestamp> last_frame_capture_time_
+ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ // SSRC that we receive on our RTP channel
+ uint32_t remote_ssrc_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ std::string cname_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ ReceiveStatisticsProvider* receive_statistics_
+ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ // send CSRCs
+ std::vector<uint32_t> csrcs_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ // Full intra request
+ uint8_t sequence_number_fir_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ rtcp::LossNotification loss_notification_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ // REMB
+ int64_t remb_bitrate_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ std::vector<uint32_t> remb_ssrcs_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ std::vector<rtcp::TmmbItem> tmmbn_to_send_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ uint32_t tmmbr_send_bps_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ uint32_t packet_oh_send_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ size_t max_packet_size_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ // True if sending of XR Receiver reference time report is enabled.
+ bool xr_send_receiver_reference_time_enabled_
+ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ RtcpPacketTypeCounterObserver* const packet_type_counter_observer_;
+ RtcpPacketTypeCounter packet_type_counter_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ RtcpNackStats nack_stats_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ VideoBitrateAllocation video_bitrate_allocation_
+ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ bool send_video_bitrate_allocation_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ std::map<int8_t, int> rtp_clock_rates_khz_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+ int8_t last_payload_type_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ absl::optional<VideoBitrateAllocation> CheckAndUpdateLayerStructure(
+ const VideoBitrateAllocation& bitrate) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+
+ void SetFlag(uint32_t type, bool is_volatile)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ bool IsFlagPresent(uint32_t type) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ bool ConsumeFlag(uint32_t type, bool forced = false)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ bool AllVolatileFlagsConsumed() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_);
+ struct ReportFlag {
+ ReportFlag(uint32_t type, bool is_volatile)
+ : type(type), is_volatile(is_volatile) {}
+ bool operator<(const ReportFlag& flag) const { return type < flag.type; }
+ bool operator==(const ReportFlag& flag) const { return type == flag.type; }
+ const uint32_t type;
+ const bool is_volatile;
+ };
+
+ std::set<ReportFlag> report_flags_ RTC_GUARDED_BY(mutex_rtcp_sender_);
+
+ typedef void (RTCPSender::*BuilderFunc)(const RtcpContext&, PacketSender&);
+ // Map from RTCPPacketType to builder.
+ std::map<uint32_t, BuilderFunc> builders_;
+};
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_SENDER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
new file mode 100644
index 0000000000..ae59dc5b0c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
@@ -0,0 +1,844 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "api/units/time_delta.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/thread.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Invoke;
+using ::testing::Property;
+using ::testing::SizeIs;
+
+namespace webrtc {
+
+class RtcpPacketTypeCounterObserverImpl : public RtcpPacketTypeCounterObserver {
+ public:
+ RtcpPacketTypeCounterObserverImpl() : ssrc_(0) {}
+ ~RtcpPacketTypeCounterObserverImpl() override = default;
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override {
+ ssrc_ = ssrc;
+ counter_ = packet_counter;
+ }
+ uint32_t ssrc_;
+ RtcpPacketTypeCounter counter_;
+};
+
+class TestTransport : public Transport {
+ public:
+ TestTransport() {}
+
+ bool SendRtp(const uint8_t* /*data*/,
+ size_t /*len*/,
+ const PacketOptions& options) override {
+ return false;
+ }
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ parser_.Parse(data, len);
+ return true;
+ }
+ test::RtcpPacketParser parser_;
+};
+
+namespace {
+static const uint32_t kSenderSsrc = 0x11111111;
+static const uint32_t kRemoteSsrc = 0x22222222;
+static const uint32_t kStartRtpTimestamp = 0x34567;
+static const uint32_t kRtpTimestamp = 0x45678;
+
+std::unique_ptr<RTCPSender> CreateRtcpSender(
+ const RTCPSender::Configuration& config,
+ bool init_timestamps = true) {
+ auto rtcp_sender = std::make_unique<RTCPSender>(config);
+ rtcp_sender->SetRemoteSSRC(kRemoteSsrc);
+ if (init_timestamps) {
+ rtcp_sender->SetTimestampOffset(kStartRtpTimestamp);
+ rtcp_sender->SetLastRtpTime(kRtpTimestamp, config.clock->CurrentTime(),
+ /*payload_type=*/0);
+ }
+ return rtcp_sender;
+}
+} // namespace
+
+class RtcpSenderTest : public ::testing::Test {
+ protected:
+ RtcpSenderTest()
+ : clock_(1335900000),
+ receive_statistics_(ReceiveStatistics::Create(&clock_)) {
+ rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl2(GetDefaultRtpRtcpConfig()));
+ }
+
+ RTCPSender::Configuration GetDefaultConfig() {
+ RTCPSender::Configuration configuration;
+ configuration.audio = false;
+ configuration.clock = &clock_;
+ configuration.outgoing_transport = &test_transport_;
+ configuration.rtcp_report_interval = TimeDelta::Millis(1000);
+ configuration.receive_statistics = receive_statistics_.get();
+ configuration.local_media_ssrc = kSenderSsrc;
+ return configuration;
+ }
+
+ RtpRtcpInterface::Configuration GetDefaultRtpRtcpConfig() {
+ RTCPSender::Configuration config = GetDefaultConfig();
+ RtpRtcpInterface::Configuration result;
+ result.audio = config.audio;
+ result.clock = config.clock;
+ result.outgoing_transport = config.outgoing_transport;
+ result.rtcp_report_interval_ms = config.rtcp_report_interval->ms();
+ result.receive_statistics = config.receive_statistics;
+ result.local_media_ssrc = config.local_media_ssrc;
+ return result;
+ }
+
+ void InsertIncomingPacket(uint32_t remote_ssrc, uint16_t seq_num) {
+ RtpPacketReceived packet;
+ packet.SetSsrc(remote_ssrc);
+ packet.SetSequenceNumber(seq_num);
+ packet.SetTimestamp(12345);
+ packet.SetPayloadSize(100 - 12);
+ receive_statistics_->OnRtpPacket(packet);
+ }
+
+ test::RtcpPacketParser* parser() { return &test_transport_.parser_; }
+
+ RTCPSender::FeedbackState feedback_state() {
+ return rtp_rtcp_impl_->GetFeedbackState();
+ }
+
+ rtc::AutoThread main_thread_;
+ SimulatedClock clock_;
+ TestTransport test_transport_;
+ std::unique_ptr<ReceiveStatistics> receive_statistics_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_impl_;
+};
+
+TEST_F(RtcpSenderTest, SetRtcpStatus) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ EXPECT_EQ(RtcpMode::kOff, rtcp_sender->Status());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(RtcpMode::kReducedSize, rtcp_sender->Status());
+}
+
+TEST_F(RtcpSenderTest, SetSendingStatus) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ EXPECT_FALSE(rtcp_sender->Sending());
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+ EXPECT_TRUE(rtcp_sender->Sending());
+}
+
+TEST_F(RtcpSenderTest, NoPacketSentIfOff) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kOff);
+ EXPECT_EQ(-1, rtcp_sender->SendRTCP(feedback_state(), kRtcpSr));
+}
+
+TEST_F(RtcpSenderTest, SendSr) {
+ const uint32_t kPacketCount = 0x12345;
+ const uint32_t kOctetCount = 0x23456;
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
+ rtcp_sender->SetSendingStatus(feedback_state, true);
+ feedback_state.packets_sent = kPacketCount;
+ feedback_state.media_bytes_sent = kOctetCount;
+ NtpTime ntp = clock_.CurrentNtpTime();
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->sender_report()->sender_ssrc());
+ EXPECT_EQ(ntp, parser()->sender_report()->ntp());
+ EXPECT_EQ(kPacketCount, parser()->sender_report()->sender_packet_count());
+ EXPECT_EQ(kOctetCount, parser()->sender_report()->sender_octet_count());
+ EXPECT_EQ(kStartRtpTimestamp + kRtpTimestamp,
+ parser()->sender_report()->rtp_timestamp());
+ EXPECT_EQ(0U, parser()->sender_report()->report_blocks().size());
+}
+
+TEST_F(RtcpSenderTest, SendConsecutiveSrWithExactSlope) {
+ const uint32_t kPacketCount = 0x12345;
+ const uint32_t kOctetCount = 0x23456;
+ const int kTimeBetweenSRsUs = 10043; // Not exact value in milliseconds.
+ const int kExtraPackets = 30;
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ // Make sure clock is not exactly at some milliseconds point.
+ clock_.AdvanceTimeMicroseconds(kTimeBetweenSRsUs);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
+ rtcp_sender->SetSendingStatus(feedback_state, true);
+ feedback_state.packets_sent = kPacketCount;
+ feedback_state.media_bytes_sent = kOctetCount;
+
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ NtpTime ntp1 = parser()->sender_report()->ntp();
+ uint32_t rtp1 = parser()->sender_report()->rtp_timestamp();
+
+ // Send more SRs to ensure slope is always exact for different offsets
+ for (int packets = 1; packets <= kExtraPackets; ++packets) {
+ clock_.AdvanceTimeMicroseconds(kTimeBetweenSRsUs);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr));
+ EXPECT_EQ(packets + 1, parser()->sender_report()->num_packets());
+
+ NtpTime ntp2 = parser()->sender_report()->ntp();
+ uint32_t rtp2 = parser()->sender_report()->rtp_timestamp();
+
+ uint32_t ntp_diff_in_rtp_units =
+ (ntp2.ToMs() - ntp1.ToMs()) * (kVideoPayloadTypeFrequency / 1000);
+ EXPECT_EQ(rtp2 - rtp1, ntp_diff_in_rtp_units);
+ }
+}
+
+TEST_F(RtcpSenderTest, DoNotSendSrBeforeRtp) {
+ RTCPSender::Configuration config;
+ config.clock = &clock_;
+ config.receive_statistics = receive_statistics_.get();
+ config.outgoing_transport = &test_transport_;
+ config.rtcp_report_interval = TimeDelta::Millis(1000);
+ config.local_media_ssrc = kSenderSsrc;
+ auto rtcp_sender = CreateRtcpSender(config, /*init_timestamps=*/false);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+
+ // Sender Report shouldn't be send as an SR nor as a Report.
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpSr);
+ EXPECT_EQ(0, parser()->sender_report()->num_packets());
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpReport);
+ EXPECT_EQ(0, parser()->sender_report()->num_packets());
+ // Other packets (e.g. Pli) are allowed, even if useless.
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli));
+ EXPECT_EQ(1, parser()->pli()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, DoNotSendCompundBeforeRtp) {
+ RTCPSender::Configuration config;
+ config.clock = &clock_;
+ config.receive_statistics = receive_statistics_.get();
+ config.outgoing_transport = &test_transport_;
+ config.rtcp_report_interval = TimeDelta::Millis(1000);
+ config.local_media_ssrc = kSenderSsrc;
+ auto rtcp_sender = CreateRtcpSender(config, /*init_timestamps=*/false);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+
+ // In compound mode no packets are allowed (e.g. Pli) because compound mode
+ // should start with Sender Report.
+ EXPECT_EQ(-1, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli));
+ EXPECT_EQ(0, parser()->pli()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, SendRr) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpRr));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->sender_ssrc());
+ EXPECT_EQ(0U, parser()->receiver_report()->report_blocks().size());
+}
+
+TEST_F(RtcpSenderTest, DoesntSendEmptyRrInReducedSizeMode) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpRr);
+ EXPECT_EQ(parser()->receiver_report()->num_packets(), 0);
+}
+
+TEST_F(RtcpSenderTest, SendRrWithOneReportBlock) {
+ const uint16_t kSeqNum = 11111;
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ InsertIncomingPacket(kRemoteSsrc, kSeqNum);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpRr));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->sender_ssrc());
+ ASSERT_EQ(1U, parser()->receiver_report()->report_blocks().size());
+ const rtcp::ReportBlock& rb = parser()->receiver_report()->report_blocks()[0];
+ EXPECT_EQ(kRemoteSsrc, rb.source_ssrc());
+ EXPECT_EQ(0U, rb.fraction_lost());
+ EXPECT_EQ(0, rb.cumulative_lost_signed());
+ EXPECT_EQ(kSeqNum, rb.extended_high_seq_num());
+}
+
+TEST_F(RtcpSenderTest, SendRrWithTwoReportBlocks) {
+ const uint16_t kSeqNum = 11111;
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ InsertIncomingPacket(kRemoteSsrc, kSeqNum);
+ InsertIncomingPacket(kRemoteSsrc + 1, kSeqNum + 1);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpRr));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->sender_ssrc());
+ EXPECT_THAT(
+ parser()->receiver_report()->report_blocks(),
+ UnorderedElementsAre(
+ Property(&rtcp::ReportBlock::source_ssrc, Eq(kRemoteSsrc)),
+ Property(&rtcp::ReportBlock::source_ssrc, Eq(kRemoteSsrc + 1))));
+}
+
+TEST_F(RtcpSenderTest, SendSdes) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender->SetCNAME("alice@host"));
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpSdes));
+ EXPECT_EQ(1, parser()->sdes()->num_packets());
+ EXPECT_EQ(1U, parser()->sdes()->chunks().size());
+ EXPECT_EQ(kSenderSsrc, parser()->sdes()->chunks()[0].ssrc);
+ EXPECT_EQ("alice@host", parser()->sdes()->chunks()[0].cname);
+}
+
+TEST_F(RtcpSenderTest, SdesIncludedInCompoundPacket) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender->SetCNAME("alice@host"));
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(1, parser()->sdes()->num_packets());
+ EXPECT_EQ(1U, parser()->sdes()->chunks().size());
+}
+
+TEST_F(RtcpSenderTest, SendBye) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpBye));
+ EXPECT_EQ(1, parser()->bye()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->bye()->sender_ssrc());
+}
+
+TEST_F(RtcpSenderTest, StopSendingTriggersBye) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+ rtcp_sender->SetSendingStatus(feedback_state(), false);
+ EXPECT_EQ(1, parser()->bye()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->bye()->sender_ssrc());
+}
+
+TEST_F(RtcpSenderTest, SendFir) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpFir));
+ EXPECT_EQ(1, parser()->fir()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->fir()->sender_ssrc());
+ EXPECT_EQ(1U, parser()->fir()->requests().size());
+ EXPECT_EQ(kRemoteSsrc, parser()->fir()->requests()[0].ssrc);
+ uint8_t seq = parser()->fir()->requests()[0].seq_nr;
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpFir));
+ EXPECT_EQ(2, parser()->fir()->num_packets());
+ EXPECT_EQ(seq + 1, parser()->fir()->requests()[0].seq_nr);
+}
+
+TEST_F(RtcpSenderTest, SendPli) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli));
+ EXPECT_EQ(1, parser()->pli()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->pli()->sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parser()->pli()->media_ssrc());
+}
+
+TEST_F(RtcpSenderTest, SendNack) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ const uint16_t kList[] = {0, 1, 16};
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpNack,
+ ABSL_ARRAYSIZE(kList), kList));
+ EXPECT_EQ(1, parser()->nack()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->nack()->sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parser()->nack()->media_ssrc());
+ EXPECT_THAT(parser()->nack()->packet_ids(), ElementsAre(0, 1, 16));
+}
+
+TEST_F(RtcpSenderTest, SendLossNotificationBufferingNotAllowed) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ constexpr uint16_t kLastDecoded = 0x1234;
+ constexpr uint16_t kLastReceived = 0x4321;
+ constexpr bool kDecodabilityFlag = true;
+ constexpr bool kBufferingAllowed = false;
+ EXPECT_EQ(rtcp_sender->SendLossNotification(feedback_state(), kLastDecoded,
+ kLastReceived, kDecodabilityFlag,
+ kBufferingAllowed),
+ 0);
+ EXPECT_EQ(parser()->processed_rtcp_packets(), 1u);
+ EXPECT_EQ(parser()->loss_notification()->num_packets(), 1);
+ EXPECT_EQ(kSenderSsrc, parser()->loss_notification()->sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parser()->loss_notification()->media_ssrc());
+}
+
+TEST_F(RtcpSenderTest, SendLossNotificationBufferingAllowed) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ constexpr uint16_t kLastDecoded = 0x1234;
+ constexpr uint16_t kLastReceived = 0x4321;
+ constexpr bool kDecodabilityFlag = true;
+ constexpr bool kBufferingAllowed = true;
+ EXPECT_EQ(rtcp_sender->SendLossNotification(feedback_state(), kLastDecoded,
+ kLastReceived, kDecodabilityFlag,
+ kBufferingAllowed),
+ 0);
+
+ // No RTCP messages sent yet.
+ ASSERT_EQ(parser()->processed_rtcp_packets(), 0u);
+
+ // Sending another messages triggers sending the LNTF messages as well.
+ const uint16_t kList[] = {0, 1, 16};
+ EXPECT_EQ(rtcp_sender->SendRTCP(feedback_state(), kRtcpNack,
+ ABSL_ARRAYSIZE(kList), kList),
+ 0);
+
+ // Exactly one packet was produced, and it contained both the buffered LNTF
+ // as well as the message that had triggered the packet.
+ EXPECT_EQ(parser()->processed_rtcp_packets(), 1u);
+ EXPECT_EQ(parser()->loss_notification()->num_packets(), 1);
+ EXPECT_EQ(parser()->loss_notification()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(parser()->loss_notification()->media_ssrc(), kRemoteSsrc);
+ EXPECT_EQ(parser()->nack()->num_packets(), 1);
+ EXPECT_EQ(parser()->nack()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(parser()->nack()->media_ssrc(), kRemoteSsrc);
+}
+
+TEST_F(RtcpSenderTest, RembNotIncludedBeforeSet) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpRr);
+
+ ASSERT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(0, parser()->remb()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, RembNotIncludedAfterUnset) {
+ const int64_t kBitrate = 261011;
+ const std::vector<uint32_t> kSsrcs = {kRemoteSsrc, kRemoteSsrc + 1};
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetRemb(kBitrate, kSsrcs);
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpRr);
+ ASSERT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(1, parser()->remb()->num_packets());
+
+ // Turn off REMB. rtcp_sender no longer should send it.
+ rtcp_sender->UnsetRemb();
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpRr);
+ ASSERT_EQ(2, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(1, parser()->remb()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, SendRemb) {
+ const int64_t kBitrate = 261011;
+ const std::vector<uint32_t> kSsrcs = {kRemoteSsrc, kRemoteSsrc + 1};
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender->SetRemb(kBitrate, kSsrcs);
+
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpRemb);
+
+ EXPECT_EQ(1, parser()->remb()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->remb()->sender_ssrc());
+ EXPECT_EQ(kBitrate, parser()->remb()->bitrate_bps());
+ EXPECT_THAT(parser()->remb()->ssrcs(),
+ ElementsAre(kRemoteSsrc, kRemoteSsrc + 1));
+}
+
+TEST_F(RtcpSenderTest, RembIncludedInEachCompoundPacketAfterSet) {
+ const int kBitrate = 261011;
+ const std::vector<uint32_t> kSsrcs = {kRemoteSsrc, kRemoteSsrc + 1};
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetRemb(kBitrate, kSsrcs);
+
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpReport);
+ EXPECT_EQ(1, parser()->remb()->num_packets());
+ // REMB should be included in each compound packet.
+ rtcp_sender->SendRTCP(feedback_state(), kRtcpReport);
+ EXPECT_EQ(2, parser()->remb()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, SendXrWithDlrr) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
+ rtcp::ReceiveTimeInfo last_xr_rr;
+ last_xr_rr.ssrc = 0x11111111;
+ last_xr_rr.last_rr = 0x22222222;
+ last_xr_rr.delay_since_last_rr = 0x33333333;
+ feedback_state.last_xr_rtis.push_back(last_xr_rr);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpReport));
+ EXPECT_EQ(1, parser()->xr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc());
+ ASSERT_THAT(parser()->xr()->dlrr().sub_blocks(), SizeIs(1));
+ EXPECT_EQ(last_xr_rr.ssrc, parser()->xr()->dlrr().sub_blocks()[0].ssrc);
+ EXPECT_EQ(last_xr_rr.last_rr, parser()->xr()->dlrr().sub_blocks()[0].last_rr);
+ EXPECT_EQ(last_xr_rr.delay_since_last_rr,
+ parser()->xr()->dlrr().sub_blocks()[0].delay_since_last_rr);
+}
+
+TEST_F(RtcpSenderTest, SendXrWithMultipleDlrrSubBlocks) {
+ const size_t kNumReceivers = 2;
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
+ for (size_t i = 0; i < kNumReceivers; ++i) {
+ rtcp::ReceiveTimeInfo last_xr_rr;
+ last_xr_rr.ssrc = i;
+ last_xr_rr.last_rr = (i + 1) * 100;
+ last_xr_rr.delay_since_last_rr = (i + 2) * 200;
+ feedback_state.last_xr_rtis.push_back(last_xr_rr);
+ }
+
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpReport));
+ EXPECT_EQ(1, parser()->xr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc());
+ ASSERT_THAT(parser()->xr()->dlrr().sub_blocks(), SizeIs(kNumReceivers));
+ for (size_t i = 0; i < kNumReceivers; ++i) {
+ EXPECT_EQ(feedback_state.last_xr_rtis[i].ssrc,
+ parser()->xr()->dlrr().sub_blocks()[i].ssrc);
+ EXPECT_EQ(feedback_state.last_xr_rtis[i].last_rr,
+ parser()->xr()->dlrr().sub_blocks()[i].last_rr);
+ EXPECT_EQ(feedback_state.last_xr_rtis[i].delay_since_last_rr,
+ parser()->xr()->dlrr().sub_blocks()[i].delay_since_last_rr);
+ }
+}
+
+TEST_F(RtcpSenderTest, SendXrWithRrtr) {
+ RTCPSender::Configuration config = GetDefaultConfig();
+ config.non_sender_rtt_measurement = true;
+ auto rtcp_sender = CreateRtcpSender(config);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), false);
+ NtpTime ntp = clock_.CurrentNtpTime();
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->xr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc());
+ EXPECT_FALSE(parser()->xr()->dlrr());
+ ASSERT_TRUE(parser()->xr()->rrtr());
+ EXPECT_EQ(ntp, parser()->xr()->rrtr()->ntp());
+}
+
+// Same test as above, but enable Rrtr with the setter.
+TEST_F(RtcpSenderTest, SendXrWithRrtrUsingSetter) {
+ RTCPSender::Configuration config = GetDefaultConfig();
+ config.non_sender_rtt_measurement = false;
+ auto rtcp_sender = CreateRtcpSender(config);
+ rtcp_sender->SetNonSenderRttMeasurement(true);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), false);
+ NtpTime ntp = clock_.CurrentNtpTime();
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->xr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc());
+ EXPECT_FALSE(parser()->xr()->dlrr());
+ ASSERT_TRUE(parser()->xr()->rrtr());
+ EXPECT_EQ(ntp, parser()->xr()->rrtr()->ntp());
+}
+
+// Same test as above, but disable Rrtr with the setter.
+TEST_F(RtcpSenderTest, SendsNoRrtrUsingSetter) {
+ RTCPSender::Configuration config = GetDefaultConfig();
+ config.non_sender_rtt_measurement = true;
+ auto rtcp_sender = CreateRtcpSender(config);
+ rtcp_sender->SetNonSenderRttMeasurement(false);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), false);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(0, parser()->xr()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfSending) {
+ RTCPSender::Configuration config = GetDefaultConfig();
+ config.non_sender_rtt_measurement = true;
+ auto rtcp_sender = CreateRtcpSender(config);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(0, parser()->xr()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfNotEnabled) {
+ RTCPSender::Configuration config = GetDefaultConfig();
+ config.non_sender_rtt_measurement = false;
+ auto rtcp_sender = CreateRtcpSender(config);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), false);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(0, parser()->xr()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, TestRegisterRtcpPacketTypeObserver) {
+ RtcpPacketTypeCounterObserverImpl observer;
+ RTCPSender::Configuration config;
+ config.clock = &clock_;
+ config.receive_statistics = receive_statistics_.get();
+ config.outgoing_transport = &test_transport_;
+ config.rtcp_packet_type_counter_observer = &observer;
+ config.rtcp_report_interval = TimeDelta::Millis(1000);
+ auto rtcp_sender = CreateRtcpSender(config);
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli));
+ EXPECT_EQ(1, parser()->pli()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, observer.ssrc_);
+ EXPECT_EQ(1U, observer.counter_.pli_packets);
+}
+
+TEST_F(RtcpSenderTest, SendTmmbr) {
+ const unsigned int kBitrateBps = 312000;
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender->SetTargetBitrate(kBitrateBps);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpTmmbr));
+ EXPECT_EQ(1, parser()->tmmbr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->tmmbr()->sender_ssrc());
+ EXPECT_EQ(1U, parser()->tmmbr()->requests().size());
+ EXPECT_EQ(kBitrateBps, parser()->tmmbr()->requests()[0].bitrate_bps());
+ // TODO(asapersson): tmmbr_item()->Overhead() looks broken, always zero.
+}
+
+TEST_F(RtcpSenderTest, SendTmmbn) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+ std::vector<rtcp::TmmbItem> bounding_set;
+ const uint32_t kBitrateBps = 32768000;
+ const uint32_t kPacketOh = 40;
+ const uint32_t kSourceSsrc = 12345;
+ const rtcp::TmmbItem tmmbn(kSourceSsrc, kBitrateBps, kPacketOh);
+ bounding_set.push_back(tmmbn);
+ rtcp_sender->SetTmmbn(bounding_set);
+
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ EXPECT_EQ(1, parser()->tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->tmmbn()->sender_ssrc());
+ EXPECT_EQ(1U, parser()->tmmbn()->items().size());
+ EXPECT_EQ(kBitrateBps, parser()->tmmbn()->items()[0].bitrate_bps());
+ EXPECT_EQ(kPacketOh, parser()->tmmbn()->items()[0].packet_overhead());
+ EXPECT_EQ(kSourceSsrc, parser()->tmmbn()->items()[0].ssrc());
+}
+
+// This test is written to verify actual behaviour. It does not seem
+// to make much sense to send an empty TMMBN, since there is no place
+// to put an actual limit here. It's just information that no limit
+// is set, which is kind of the starting assumption.
+// See http://code.google.com/p/webrtc/issues/detail?id=468 for one
+// situation where this caused confusion.
+TEST_F(RtcpSenderTest, SendsTmmbnIfSetAndEmpty) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetSendingStatus(feedback_state(), true);
+ std::vector<rtcp::TmmbItem> bounding_set;
+ rtcp_sender->SetTmmbn(bounding_set);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ EXPECT_EQ(1, parser()->tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->tmmbn()->sender_ssrc());
+ EXPECT_EQ(0U, parser()->tmmbn()->items().size());
+}
+
+// This test is written to verify that BYE is always the last packet
+// type in a RTCP compoud packet. The rtcp_sender is recreated with
+// mock_transport, which is used to check for whether BYE at the end
+// of a RTCP compound packet.
+TEST_F(RtcpSenderTest, ByeMustBeLast) {
+ MockTransport mock_transport;
+ EXPECT_CALL(mock_transport, SendRtcp(_, _))
+ .WillOnce(Invoke([](const uint8_t* data, size_t len) {
+ const uint8_t* next_packet = data;
+ const uint8_t* const packet_end = data + len;
+ rtcp::CommonHeader packet;
+ while (next_packet < packet_end) {
+ EXPECT_TRUE(packet.Parse(next_packet, packet_end - next_packet));
+ next_packet = packet.NextPacket();
+ if (packet.type() ==
+ rtcp::Bye::kPacketType) // Main test expectation.
+ EXPECT_EQ(0, packet_end - next_packet)
+ << "Bye packet should be last in a compound RTCP packet.";
+ if (next_packet == packet_end) // Validate test was set correctly.
+ EXPECT_EQ(packet.type(), rtcp::Bye::kPacketType)
+ << "Last packet in this test expected to be Bye.";
+ }
+
+ return true;
+ }));
+
+ // Re-configure rtcp_sender with mock_transport_
+ RTCPSender::Configuration config;
+ config.clock = &clock_;
+ config.receive_statistics = receive_statistics_.get();
+ config.outgoing_transport = &mock_transport;
+ config.rtcp_report_interval = TimeDelta::Millis(1000);
+ config.local_media_ssrc = kSenderSsrc;
+ auto rtcp_sender = CreateRtcpSender(config);
+
+ rtcp_sender->SetTimestampOffset(kStartRtpTimestamp);
+ rtcp_sender->SetLastRtpTime(kRtpTimestamp, clock_.CurrentTime(),
+ /*payload_type=*/0);
+
+ // Set up REMB info to be included with BYE.
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender->SetRemb(1234, {});
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpBye));
+}
+
+TEST_F(RtcpSenderTest, SendXrWithTargetBitrate) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ const size_t kNumSpatialLayers = 2;
+ const size_t kNumTemporalLayers = 2;
+ VideoBitrateAllocation allocation;
+ for (size_t sl = 0; sl < kNumSpatialLayers; ++sl) {
+ uint32_t start_bitrate_bps = (sl + 1) * 100000;
+ for (size_t tl = 0; tl < kNumTemporalLayers; ++tl)
+ allocation.SetBitrate(sl, tl, start_bitrate_bps + (tl * 20000));
+ }
+ rtcp_sender->SetVideoBitrateAllocation(allocation);
+
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->xr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc());
+ const absl::optional<rtcp::TargetBitrate>& target_bitrate =
+ parser()->xr()->target_bitrate();
+ ASSERT_TRUE(target_bitrate);
+ const std::vector<rtcp::TargetBitrate::BitrateItem>& bitrates =
+ target_bitrate->GetTargetBitrates();
+ EXPECT_EQ(kNumSpatialLayers * kNumTemporalLayers, bitrates.size());
+
+ for (size_t sl = 0; sl < kNumSpatialLayers; ++sl) {
+ uint32_t start_bitrate_bps = (sl + 1) * 100000;
+ for (size_t tl = 0; tl < kNumTemporalLayers; ++tl) {
+ size_t index = (sl * kNumSpatialLayers) + tl;
+ const rtcp::TargetBitrate::BitrateItem& item = bitrates[index];
+ EXPECT_EQ(sl, item.spatial_layer);
+ EXPECT_EQ(tl, item.temporal_layer);
+ EXPECT_EQ(start_bitrate_bps + (tl * 20000),
+ item.target_bitrate_kbps * 1000);
+ }
+ }
+}
+
+TEST_F(RtcpSenderTest, SendImmediateXrWithTargetBitrate) {
+ // Initialize. Send a first report right away.
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ clock_.AdvanceTimeMilliseconds(5);
+
+ // Video bitrate allocation generated, save until next time we send a report.
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 100000);
+ rtcp_sender->SetVideoBitrateAllocation(allocation);
+ // First seen instance will be sent immediately.
+ EXPECT_TRUE(rtcp_sender->TimeToSendRTCPReport(false));
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ clock_.AdvanceTimeMilliseconds(5);
+
+ // Update bitrate of existing layer, does not quality for immediate sending.
+ allocation.SetBitrate(0, 0, 150000);
+ rtcp_sender->SetVideoBitrateAllocation(allocation);
+ EXPECT_FALSE(rtcp_sender->TimeToSendRTCPReport(false));
+
+ // A new spatial layer enabled, signal this as soon as possible.
+ allocation.SetBitrate(1, 0, 200000);
+ rtcp_sender->SetVideoBitrateAllocation(allocation);
+ EXPECT_TRUE(rtcp_sender->TimeToSendRTCPReport(false));
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ clock_.AdvanceTimeMilliseconds(5);
+
+ // Explicitly disable top layer. The same set of layers now has a bitrate
+ // defined, but the explicit 0 indicates shutdown. Signal immediately.
+ allocation.SetBitrate(1, 0, 0);
+ EXPECT_FALSE(rtcp_sender->TimeToSendRTCPReport(false));
+ rtcp_sender->SetVideoBitrateAllocation(allocation);
+ EXPECT_TRUE(rtcp_sender->TimeToSendRTCPReport(false));
+}
+
+TEST_F(RtcpSenderTest, SendTargetBitrateExplicitZeroOnStreamRemoval) {
+ // Set up and send a bitrate allocation with two layers.
+
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kCompound);
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 100000);
+ allocation.SetBitrate(1, 0, 200000);
+ rtcp_sender->SetVideoBitrateAllocation(allocation);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ absl::optional<rtcp::TargetBitrate> target_bitrate =
+ parser()->xr()->target_bitrate();
+ ASSERT_TRUE(target_bitrate);
+ std::vector<rtcp::TargetBitrate::BitrateItem> bitrates =
+ target_bitrate->GetTargetBitrates();
+ ASSERT_EQ(2u, bitrates.size());
+ EXPECT_EQ(bitrates[0].target_bitrate_kbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(bitrates[1].target_bitrate_kbps,
+ allocation.GetBitrate(1, 0) / 1000);
+
+ // Create a new allocation, where the second stream is no longer available.
+ VideoBitrateAllocation new_allocation;
+ new_allocation.SetBitrate(0, 0, 150000);
+ rtcp_sender->SetVideoBitrateAllocation(new_allocation);
+ EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport));
+ target_bitrate = parser()->xr()->target_bitrate();
+ ASSERT_TRUE(target_bitrate);
+ bitrates = target_bitrate->GetTargetBitrates();
+
+ // Two bitrates should still be set, with an explicit entry indicating the
+ // removed stream is gone.
+ ASSERT_EQ(2u, bitrates.size());
+ EXPECT_EQ(bitrates[0].target_bitrate_kbps,
+ new_allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(bitrates[1].target_bitrate_kbps, 0u);
+}
+
+TEST_F(RtcpSenderTest, DoesntSchedulesInitialReportWhenSsrcSetOnConstruction) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender->SetRemoteSSRC(kRemoteSsrc);
+ // New report should not have been scheduled yet.
+ clock_.AdvanceTimeMilliseconds(100);
+ EXPECT_FALSE(rtcp_sender->TimeToSendRTCPReport(false));
+}
+
+TEST_F(RtcpSenderTest, SendsCombinedRtcpPacket) {
+ auto rtcp_sender = CreateRtcpSender(GetDefaultConfig());
+ rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize);
+
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> packets;
+ auto transport_feedback = std::make_unique<rtcp::TransportFeedback>();
+ transport_feedback->AddReceivedPacket(321, Timestamp::Millis(10));
+ packets.push_back(std::move(transport_feedback));
+ auto remote_estimate = std::make_unique<rtcp::RemoteEstimate>();
+ packets.push_back(std::move(remote_estimate));
+ rtcp_sender->SendCombinedRtcpPacket(std::move(packets));
+
+ EXPECT_EQ(parser()->transport_feedback()->num_packets(), 1);
+ EXPECT_EQ(parser()->transport_feedback()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(parser()->app()->num_packets(), 1);
+ EXPECT_EQ(parser()->app()->sender_ssrc(), kSenderSsrc);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.cc
new file mode 100644
index 0000000000..f265bd5825
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_transceiver.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/cleanup/cleanup.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+RtcpTransceiver::RtcpTransceiver(const RtcpTransceiverConfig& config)
+ : clock_(config.clock),
+ task_queue_(config.task_queue),
+ rtcp_transceiver_(std::make_unique<RtcpTransceiverImpl>(config)) {
+ RTC_DCHECK(task_queue_);
+}
+
+RtcpTransceiver::~RtcpTransceiver() {
+ if (!rtcp_transceiver_)
+ return;
+ auto rtcp_transceiver = std::move(rtcp_transceiver_);
+ task_queue_->PostTask([rtcp_transceiver = std::move(rtcp_transceiver)] {
+ rtcp_transceiver->StopPeriodicTask();
+ });
+ RTC_DCHECK(!rtcp_transceiver_);
+}
+
+void RtcpTransceiver::Stop(absl::AnyInvocable<void() &&> on_destroyed) {
+ RTC_DCHECK(rtcp_transceiver_);
+ auto rtcp_transceiver = std::move(rtcp_transceiver_);
+ absl::Cleanup cleanup = std::move(on_destroyed);
+ task_queue_->PostTask(
+ [rtcp_transceiver = std::move(rtcp_transceiver),
+ cleanup = std::move(cleanup)] { rtcp_transceiver->StopPeriodicTask(); });
+ RTC_DCHECK(!rtcp_transceiver_);
+}
+
+void RtcpTransceiver::AddMediaReceiverRtcpObserver(
+ uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr, remote_ssrc, observer] {
+ ptr->AddMediaReceiverRtcpObserver(remote_ssrc, observer);
+ });
+}
+
+void RtcpTransceiver::RemoveMediaReceiverRtcpObserver(
+ uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer,
+ absl::AnyInvocable<void() &&> on_removed) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ absl::Cleanup cleanup = std::move(on_removed);
+ task_queue_->PostTask(
+ [ptr, remote_ssrc, observer, cleanup = std::move(cleanup)] {
+ ptr->RemoveMediaReceiverRtcpObserver(remote_ssrc, observer);
+ });
+}
+
+void RtcpTransceiver::SetReadyToSend(bool ready) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr, ready] { ptr->SetReadyToSend(ready); });
+}
+
+void RtcpTransceiver::ReceivePacket(rtc::CopyOnWriteBuffer packet) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ Timestamp now = clock_->CurrentTime();
+ task_queue_->PostTask(
+ [ptr, packet, now] { ptr->ReceivePacket(packet, now); });
+}
+
+void RtcpTransceiver::SendCompoundPacket() {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr] { ptr->SendCompoundPacket(); });
+}
+
+void RtcpTransceiver::SetRemb(int64_t bitrate_bps,
+ std::vector<uint32_t> ssrcs) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr, bitrate_bps, ssrcs = std::move(ssrcs)]() mutable {
+ ptr->SetRemb(bitrate_bps, std::move(ssrcs));
+ });
+}
+
+void RtcpTransceiver::UnsetRemb() {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr] { ptr->UnsetRemb(); });
+}
+
+void RtcpTransceiver::SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask(
+ [ptr, rtcp_packets = std::move(rtcp_packets)]() mutable {
+ ptr->SendCombinedRtcpPacket(std::move(rtcp_packets));
+ });
+}
+
+void RtcpTransceiver::SendNack(uint32_t ssrc,
+ std::vector<uint16_t> sequence_numbers) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask(
+ [ptr, ssrc, sequence_numbers = std::move(sequence_numbers)]() mutable {
+ ptr->SendNack(ssrc, std::move(sequence_numbers));
+ });
+}
+
+void RtcpTransceiver::SendPictureLossIndication(uint32_t ssrc) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr, ssrc] { ptr->SendPictureLossIndication(ssrc); });
+}
+
+void RtcpTransceiver::SendFullIntraRequest(std::vector<uint32_t> ssrcs) {
+ return SendFullIntraRequest(std::move(ssrcs), true);
+}
+
+void RtcpTransceiver::SendFullIntraRequest(std::vector<uint32_t> ssrcs,
+ bool new_request) {
+ RTC_CHECK(rtcp_transceiver_);
+ RtcpTransceiverImpl* ptr = rtcp_transceiver_.get();
+ task_queue_->PostTask([ptr, ssrcs = std::move(ssrcs), new_request] {
+ ptr->SendFullIntraRequest(ssrcs, new_request);
+ });
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.h
new file mode 100644
index 0000000000..22fcc73337
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "api/task_queue/task_queue_base.h"
+#include "modules/rtp_rtcp/source/rtcp_transceiver_config.h"
+#include "modules/rtp_rtcp/source/rtcp_transceiver_impl.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+//
+// Manage incoming and outgoing rtcp messages for multiple BUNDLED streams.
+//
+// This class is thread-safe wrapper of RtcpTransceiverImpl
+class RtcpTransceiver : public RtcpFeedbackSenderInterface {
+ public:
+ explicit RtcpTransceiver(const RtcpTransceiverConfig& config);
+ RtcpTransceiver(const RtcpTransceiver&) = delete;
+ RtcpTransceiver& operator=(const RtcpTransceiver&) = delete;
+ // Note that interfaces provided in constructor still might be used after the
+ // destructor. However they can only be used on the confic.task_queue.
+ // Use Stop function to get notified when they are no longer used or
+ // ensure those objects outlive the task queue.
+ ~RtcpTransceiver() override;
+
+ // Start asynchronious destruction of the RtcpTransceiver.
+ // It is safe to call destructor right after Stop exits.
+ // No other methods can be called.
+ // Note that interfaces provided in constructor or registered with AddObserver
+ // still might be used by the transceiver on the task queue
+ // until `on_destroyed` runs.
+ void Stop(absl::AnyInvocable<void() &&> on_destroyed);
+
+ // Registers observer to be notified about incoming rtcp packets.
+ // Calls to observer will be done on the `config.task_queue`.
+ void AddMediaReceiverRtcpObserver(uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer);
+ // Deregisters the observer. Might return before observer is deregistered.
+ // Runs `on_removed` when observer is deregistered.
+ void RemoveMediaReceiverRtcpObserver(
+ uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer,
+ absl::AnyInvocable<void() &&> on_removed);
+
+ // Enables/disables sending rtcp packets eventually.
+ // Packets may be sent after the SetReadyToSend(false) returns, but no new
+ // packets will be scheduled.
+ void SetReadyToSend(bool ready);
+
+ // Handles incoming rtcp packets.
+ void ReceivePacket(rtc::CopyOnWriteBuffer packet);
+
+ // Sends RTCP packets starting with a sender or receiver report.
+ void SendCompoundPacket();
+
+ // (REMB) Receiver Estimated Max Bitrate.
+ // Includes REMB in following compound packets and sends a REMB message
+ // immediately if 'RtcpTransceiverConfig::send_remb_on_change' is set.
+ void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) override;
+ // Stops sending REMB in following compound packets.
+ void UnsetRemb() override;
+
+ // TODO(bugs.webrtc.org/8239): Remove SendCombinedRtcpPacket
+ // and move generating of the TransportFeedback message inside
+ // RtcpTransceiverImpl when there is one RtcpTransceiver per rtp transport.
+ void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) override;
+
+ // Reports missing packets, https://tools.ietf.org/html/rfc4585#section-6.2.1
+ void SendNack(uint32_t ssrc, std::vector<uint16_t> sequence_numbers);
+
+ // Requests new key frame.
+ // using PLI, https://tools.ietf.org/html/rfc4585#section-6.3.1.1
+ void SendPictureLossIndication(uint32_t ssrc);
+ // using FIR, https://tools.ietf.org/html/rfc5104#section-4.3.1.2
+ // Use the SendFullIntraRequest(ssrcs, true) instead.
+ void SendFullIntraRequest(std::vector<uint32_t> ssrcs);
+ // If new_request is true then requested sequence no. will increase for each
+ // requested ssrc.
+ void SendFullIntraRequest(std::vector<uint32_t> ssrcs, bool new_request);
+
+ private:
+ Clock* const clock_;
+ TaskQueueBase* const task_queue_;
+ std::unique_ptr<RtcpTransceiverImpl> rtcp_transceiver_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.cc
new file mode 100644
index 0000000000..7acaa0f600
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_transceiver_config.h"
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtcpTransceiverConfig::RtcpTransceiverConfig() = default;
+RtcpTransceiverConfig::RtcpTransceiverConfig(const RtcpTransceiverConfig&) =
+ default;
+RtcpTransceiverConfig& RtcpTransceiverConfig::operator=(
+ const RtcpTransceiverConfig&) = default;
+RtcpTransceiverConfig::~RtcpTransceiverConfig() = default;
+
+bool RtcpTransceiverConfig::Validate() const {
+ if (feedback_ssrc == 0) {
+ RTC_LOG(LS_WARNING)
+ << debug_id
+ << "Ssrc 0 may be treated by some implementation as invalid.";
+ }
+ if (cname.size() > 255) {
+ RTC_LOG(LS_ERROR) << debug_id << "cname can be maximum 255 characters.";
+ return false;
+ }
+ if (max_packet_size < 100) {
+ RTC_LOG(LS_ERROR) << debug_id << "max packet size " << max_packet_size
+ << " is too small.";
+ return false;
+ }
+ if (max_packet_size > IP_PACKET_SIZE) {
+ RTC_LOG(LS_ERROR) << debug_id << "max packet size " << max_packet_size
+ << " more than " << IP_PACKET_SIZE << " is unsupported.";
+ return false;
+ }
+ if (clock == nullptr) {
+ RTC_LOG(LS_ERROR) << debug_id << "clock must be set";
+ return false;
+ }
+ if (!outgoing_transport) {
+ RTC_LOG(LS_ERROR) << debug_id << "outgoing transport must be set";
+ return false;
+ }
+ if (initial_report_delay < TimeDelta::Zero()) {
+ RTC_LOG(LS_ERROR) << debug_id << "delay " << initial_report_delay.ms()
+ << "ms before first report shouldn't be negative.";
+ return false;
+ }
+ if (report_period <= TimeDelta::Zero()) {
+ RTC_LOG(LS_ERROR) << debug_id << "period " << report_period.ms()
+ << "ms between reports should be positive.";
+ return false;
+ }
+ if (schedule_periodic_compound_packets && task_queue == nullptr) {
+ RTC_LOG(LS_ERROR) << debug_id
+ << "missing task queue for periodic compound packets";
+ return false;
+ }
+ if (rtcp_mode != RtcpMode::kCompound && rtcp_mode != RtcpMode::kReducedSize) {
+ RTC_LOG(LS_ERROR) << debug_id << "unsupported rtcp mode";
+ return false;
+ }
+ if (non_sender_rtt_measurement && !network_link_observer) {
+ RTC_LOG(LS_WARNING) << debug_id
+ << "Enabled special feature to calculate rtt, but no "
+ "rtt observer is provided.";
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.h
new file mode 100644
index 0000000000..3122ad5c36
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_config.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_CONFIG_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_CONFIG_H_
+
+#include <string>
+
+#include "api/array_view.h"
+#include "api/rtp_headers.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+class ReceiveStatisticsProvider;
+class Transport;
+
+// Interface to watch incoming rtcp packets related to the link in general.
+// All message handlers have default empty implementation. This way users only
+// need to implement the ones they are interested in.
+// All message handles pass `receive_time` parameter, which is receive time
+// of the rtcp packet that triggered the update.
+class NetworkLinkRtcpObserver {
+ public:
+ virtual ~NetworkLinkRtcpObserver() = default;
+
+ virtual void OnTransportFeedback(Timestamp receive_time,
+ const rtcp::TransportFeedback& feedback) {}
+ virtual void OnReceiverEstimatedMaxBitrate(Timestamp receive_time,
+ DataRate bitrate) {}
+ virtual void OnReportBlocks(
+ Timestamp receive_time,
+ rtc::ArrayView<const rtcp::ReportBlock> report_blocks) {}
+ virtual void OnRttUpdate(Timestamp receive_time, TimeDelta rtt) {}
+};
+
+// Interface to watch incoming rtcp packets by media (rtp) receiver.
+// All message handlers have default empty implementation. This way users only
+// need to implement the ones they are interested in.
+class MediaReceiverRtcpObserver {
+ public:
+ virtual ~MediaReceiverRtcpObserver() = default;
+
+ virtual void OnSenderReport(uint32_t sender_ssrc,
+ NtpTime ntp_time,
+ uint32_t rtp_time) {}
+ virtual void OnBye(uint32_t sender_ssrc) {}
+ virtual void OnBitrateAllocation(uint32_t sender_ssrc,
+ const VideoBitrateAllocation& allocation) {}
+};
+
+// Handles RTCP related messages for a single RTP stream (i.e. single SSRC)
+class RtpStreamRtcpHandler {
+ public:
+ virtual ~RtpStreamRtcpHandler() = default;
+
+ // Statistic about sent RTP packets to propagate to RTCP sender report.
+ class RtpStats {
+ public:
+ RtpStats() = default;
+ RtpStats(const RtpStats&) = default;
+ RtpStats& operator=(const RtpStats&) = default;
+ ~RtpStats() = default;
+
+ size_t num_sent_packets() const { return num_sent_packets_; }
+ size_t num_sent_bytes() const { return num_sent_bytes_; }
+ Timestamp last_capture_time() const { return last_capture_time_; }
+ uint32_t last_rtp_timestamp() const { return last_rtp_timestamp_; }
+ int last_clock_rate() const { return last_clock_rate_; }
+
+ void set_num_sent_packets(size_t v) { num_sent_packets_ = v; }
+ void set_num_sent_bytes(size_t v) { num_sent_bytes_ = v; }
+ void set_last_capture_time(Timestamp v) { last_capture_time_ = v; }
+ void set_last_rtp_timestamp(uint32_t v) { last_rtp_timestamp_ = v; }
+ void set_last_clock_rate(int v) { last_clock_rate_ = v; }
+
+ private:
+ size_t num_sent_packets_ = 0;
+ size_t num_sent_bytes_ = 0;
+ Timestamp last_capture_time_ = Timestamp::Zero();
+ uint32_t last_rtp_timestamp_ = 0;
+ int last_clock_rate_ = 90'000;
+ };
+ virtual RtpStats SentStats() = 0;
+
+ virtual void OnNack(uint32_t sender_ssrc,
+ rtc::ArrayView<const uint16_t> sequence_numbers) {}
+ virtual void OnFir(uint32_t sender_ssrc) {}
+ virtual void OnPli(uint32_t sender_ssrc) {}
+ virtual void OnReportBlock(uint32_t sender_ssrc,
+ const rtcp::ReportBlock& report_block) {}
+};
+
+struct RtcpTransceiverConfig {
+ RtcpTransceiverConfig();
+ RtcpTransceiverConfig(const RtcpTransceiverConfig&);
+ RtcpTransceiverConfig& operator=(const RtcpTransceiverConfig&);
+ ~RtcpTransceiverConfig();
+
+ // Logs the error and returns false if configuration miss key objects or
+ // is inconsistant. May log warnings.
+ bool Validate() const;
+
+ // Used to prepend all log messages. Can be empty.
+ std::string debug_id;
+
+ // Ssrc to use as default sender ssrc, e.g. for transport-wide feedbacks.
+ uint32_t feedback_ssrc = 1;
+
+ // Canonical End-Point Identifier of the local particiapnt.
+ // Defined in rfc3550 section 6 note 2 and section 6.5.1.
+ std::string cname;
+
+ // Maximum packet size outgoing transport accepts.
+ size_t max_packet_size = 1200;
+
+ // The clock to use when querying for the NTP time. Should be set.
+ Clock* clock = nullptr;
+
+ // Transport to send rtcp packets to. Should be set.
+ Transport* outgoing_transport = nullptr;
+
+ // Queue for scheduling delayed tasks, e.g. sending periodic compound packets.
+ TaskQueueBase* task_queue = nullptr;
+
+ // Rtcp report block generator for outgoing receiver reports.
+ ReceiveStatisticsProvider* receive_statistics = nullptr;
+
+ // Should outlive RtcpTransceiver.
+ // Callbacks will be invoked on the `task_queue`.
+ NetworkLinkRtcpObserver* network_link_observer = nullptr;
+
+ // Configures if sending should
+ // enforce compound packets: https://tools.ietf.org/html/rfc4585#section-3.1
+ // or allow reduced size packets: https://tools.ietf.org/html/rfc5506
+ // Receiving accepts both compound and reduced-size packets.
+ RtcpMode rtcp_mode = RtcpMode::kCompound;
+ //
+ // Tuning parameters.
+ //
+ // Initial state if `outgoing_transport` ready to accept packets.
+ bool initial_ready_to_send = true;
+ // Delay before 1st periodic compound packet.
+ TimeDelta initial_report_delay = TimeDelta::Millis(500);
+
+ // Period between periodic compound packets.
+ TimeDelta report_period = TimeDelta::Seconds(1);
+
+ //
+ // Flags for features and experiments.
+ //
+ bool schedule_periodic_compound_packets = true;
+ // Estimate RTT as non-sender as described in
+ // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5
+ bool non_sender_rtt_measurement = false;
+
+ // Reply to incoming RRTR messages so that remote endpoint may estimate RTT as
+ // non-sender as described in https://tools.ietf.org/html/rfc3611#section-4.4
+ // and #section-4.5
+ bool reply_to_non_sender_rtt_measurement = true;
+
+ // Reply to incoming RRTR messages multiple times, one per sender SSRC, to
+ // support clients that calculate and process RTT per sender SSRC.
+ bool reply_to_non_sender_rtt_mesaurments_on_all_ssrcs = true;
+
+ // Allows a REMB message to be sent immediately when SetRemb is called without
+ // having to wait for the next compount message to be sent.
+ bool send_remb_on_change = false;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
new file mode 100644
index 0000000000..bb4f96b970
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
@@ -0,0 +1,857 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_transceiver_impl.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/memory/memory.h"
+#include "api/call/transport.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/fir.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/pli.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sdes.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/divide_round.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+
+struct SenderReportTimes {
+ Timestamp local_received_time;
+ NtpTime remote_sent_time;
+};
+
+} // namespace
+
+struct RtcpTransceiverImpl::RemoteSenderState {
+ uint8_t fir_sequence_number = 0;
+ absl::optional<SenderReportTimes> last_received_sender_report;
+ std::vector<MediaReceiverRtcpObserver*> observers;
+};
+
+struct RtcpTransceiverImpl::LocalSenderState {
+ uint32_t ssrc;
+ size_t last_num_sent_bytes = 0;
+ // Sequence number of the last FIR message per sender SSRC.
+ flat_map<uint32_t, uint8_t> last_fir;
+ RtpStreamRtcpHandler* handler = nullptr;
+};
+
+// Helper to put several RTCP packets into lower layer datagram composing
+// Compound or Reduced-Size RTCP packet, as defined by RFC 5506 section 2.
+// TODO(danilchap): When in compound mode and packets are so many that several
+// compound RTCP packets need to be generated, ensure each packet is compound.
+class RtcpTransceiverImpl::PacketSender {
+ public:
+ PacketSender(rtcp::RtcpPacket::PacketReadyCallback callback,
+ size_t max_packet_size)
+ : callback_(callback), max_packet_size_(max_packet_size) {
+ RTC_CHECK_LE(max_packet_size, IP_PACKET_SIZE);
+ }
+ ~PacketSender() { RTC_DCHECK_EQ(index_, 0) << "Unsent rtcp packet."; }
+
+ // Appends a packet to pending compound packet.
+ // Sends rtcp compound packet if buffer was already full and resets buffer.
+ void AppendPacket(const rtcp::RtcpPacket& packet) {
+ packet.Create(buffer_, &index_, max_packet_size_, callback_);
+ }
+
+ // Sends pending rtcp compound packet.
+ void Send() {
+ if (index_ > 0) {
+ callback_(rtc::ArrayView<const uint8_t>(buffer_, index_));
+ index_ = 0;
+ }
+ }
+
+ bool IsEmpty() const { return index_ == 0; }
+
+ private:
+ const rtcp::RtcpPacket::PacketReadyCallback callback_;
+ const size_t max_packet_size_;
+ size_t index_ = 0;
+ uint8_t buffer_[IP_PACKET_SIZE];
+};
+
+RtcpTransceiverImpl::RtcpTransceiverImpl(const RtcpTransceiverConfig& config)
+ : config_(config), ready_to_send_(config.initial_ready_to_send) {
+ RTC_CHECK(config_.Validate());
+ if (ready_to_send_ && config_.schedule_periodic_compound_packets) {
+ SchedulePeriodicCompoundPackets(config_.initial_report_delay);
+ }
+}
+
+RtcpTransceiverImpl::~RtcpTransceiverImpl() = default;
+
+void RtcpTransceiverImpl::AddMediaReceiverRtcpObserver(
+ uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer) {
+ if (config_.receive_statistics == nullptr && remote_senders_.empty()) {
+ RTC_LOG(LS_WARNING) << config_.debug_id
+ << "receive statistic is not set. RTCP report blocks "
+ "will not be generated.";
+ }
+ auto& stored = remote_senders_[remote_ssrc].observers;
+ RTC_DCHECK(!absl::c_linear_search(stored, observer));
+ stored.push_back(observer);
+}
+
+void RtcpTransceiverImpl::RemoveMediaReceiverRtcpObserver(
+ uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer) {
+ auto remote_sender_it = remote_senders_.find(remote_ssrc);
+ if (remote_sender_it == remote_senders_.end())
+ return;
+ auto& stored = remote_sender_it->second.observers;
+ auto it = absl::c_find(stored, observer);
+ if (it == stored.end())
+ return;
+ stored.erase(it);
+}
+
+bool RtcpTransceiverImpl::AddMediaSender(uint32_t local_ssrc,
+ RtpStreamRtcpHandler* handler) {
+ RTC_DCHECK(handler != nullptr);
+ LocalSenderState state;
+ state.ssrc = local_ssrc;
+ state.handler = handler;
+ local_senders_.push_back(state);
+ auto it = std::prev(local_senders_.end());
+ auto [unused, inserted] = local_senders_by_ssrc_.emplace(local_ssrc, it);
+ if (!inserted) {
+ local_senders_.pop_back();
+ return false;
+ }
+ return true;
+}
+
+bool RtcpTransceiverImpl::RemoveMediaSender(uint32_t local_ssrc) {
+ auto index_it = local_senders_by_ssrc_.find(local_ssrc);
+ if (index_it == local_senders_by_ssrc_.end()) {
+ return false;
+ }
+ local_senders_.erase(index_it->second);
+ local_senders_by_ssrc_.erase(index_it);
+ return true;
+}
+
+void RtcpTransceiverImpl::SetReadyToSend(bool ready) {
+ if (config_.schedule_periodic_compound_packets) {
+ if (ready_to_send_ && !ready)
+ periodic_task_handle_.Stop();
+
+ if (!ready_to_send_ && ready) // Restart periodic sending.
+ SchedulePeriodicCompoundPackets(config_.report_period / 2);
+ }
+ ready_to_send_ = ready;
+}
+
+void RtcpTransceiverImpl::ReceivePacket(rtc::ArrayView<const uint8_t> packet,
+ Timestamp now) {
+ // Report blocks may be spread across multiple sender and receiver reports.
+ std::vector<rtcp::ReportBlock> report_blocks;
+
+ while (!packet.empty()) {
+ rtcp::CommonHeader rtcp_block;
+ if (!rtcp_block.Parse(packet.data(), packet.size()))
+ break;
+
+ HandleReceivedPacket(rtcp_block, now, report_blocks);
+
+ packet = packet.subview(rtcp_block.packet_size());
+ }
+
+ if (!report_blocks.empty()) {
+ ProcessReportBlocks(now, report_blocks);
+ }
+}
+
+void RtcpTransceiverImpl::SendCompoundPacket() {
+ if (!ready_to_send_)
+ return;
+ SendPeriodicCompoundPacket();
+ ReschedulePeriodicCompoundPackets();
+}
+
+void RtcpTransceiverImpl::SetRemb(int64_t bitrate_bps,
+ std::vector<uint32_t> ssrcs) {
+ RTC_DCHECK_GE(bitrate_bps, 0);
+
+ bool send_now = config_.send_remb_on_change &&
+ (!remb_.has_value() || bitrate_bps != remb_->bitrate_bps());
+ remb_.emplace();
+ remb_->SetSsrcs(std::move(ssrcs));
+ remb_->SetBitrateBps(bitrate_bps);
+ remb_->SetSenderSsrc(config_.feedback_ssrc);
+ // TODO(bugs.webrtc.org/8239): Move logic from PacketRouter for sending remb
+ // immideately on large bitrate change when there is one RtcpTransceiver per
+ // rtp transport.
+ if (send_now) {
+ absl::optional<rtcp::Remb> remb;
+ remb.swap(remb_);
+ SendImmediateFeedback(*remb);
+ remb.swap(remb_);
+ }
+}
+
+void RtcpTransceiverImpl::UnsetRemb() {
+ remb_.reset();
+}
+
+void RtcpTransceiverImpl::SendRawPacket(rtc::ArrayView<const uint8_t> packet) {
+ if (!ready_to_send_)
+ return;
+ // Unlike other senders, this functions just tries to send packet away and
+ // disregard rtcp_mode, max_packet_size or anything else.
+ // TODO(bugs.webrtc.org/8239): respect config_ by creating the
+ // TransportFeedback inside this class when there is one per rtp transport.
+ config_.outgoing_transport->SendRtcp(packet.data(), packet.size());
+}
+
+void RtcpTransceiverImpl::SendNack(uint32_t ssrc,
+ std::vector<uint16_t> sequence_numbers) {
+ RTC_DCHECK(!sequence_numbers.empty());
+ if (!ready_to_send_)
+ return;
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(config_.feedback_ssrc);
+ nack.SetMediaSsrc(ssrc);
+ nack.SetPacketIds(std::move(sequence_numbers));
+ SendImmediateFeedback(nack);
+}
+
+void RtcpTransceiverImpl::SendPictureLossIndication(uint32_t ssrc) {
+ if (!ready_to_send_)
+ return;
+ rtcp::Pli pli;
+ pli.SetSenderSsrc(config_.feedback_ssrc);
+ pli.SetMediaSsrc(ssrc);
+ SendImmediateFeedback(pli);
+}
+
+void RtcpTransceiverImpl::SendFullIntraRequest(
+ rtc::ArrayView<const uint32_t> ssrcs,
+ bool new_request) {
+ RTC_DCHECK(!ssrcs.empty());
+ if (!ready_to_send_)
+ return;
+ rtcp::Fir fir;
+ fir.SetSenderSsrc(config_.feedback_ssrc);
+ for (uint32_t media_ssrc : ssrcs) {
+ uint8_t& command_seq_num = remote_senders_[media_ssrc].fir_sequence_number;
+ if (new_request)
+ command_seq_num += 1;
+ fir.AddRequestTo(media_ssrc, command_seq_num);
+ }
+ SendImmediateFeedback(fir);
+}
+
+void RtcpTransceiverImpl::HandleReceivedPacket(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now,
+ std::vector<rtcp::ReportBlock>& report_blocks) {
+ switch (rtcp_packet_header.type()) {
+ case rtcp::Bye::kPacketType:
+ HandleBye(rtcp_packet_header);
+ break;
+ case rtcp::SenderReport::kPacketType:
+ HandleSenderReport(rtcp_packet_header, now, report_blocks);
+ break;
+ case rtcp::ReceiverReport::kPacketType:
+ HandleReceiverReport(rtcp_packet_header, report_blocks);
+ break;
+ case rtcp::ExtendedReports::kPacketType:
+ HandleExtendedReports(rtcp_packet_header, now);
+ break;
+ case rtcp::Psfb::kPacketType:
+ HandlePayloadSpecificFeedback(rtcp_packet_header, now);
+ break;
+ case rtcp::Rtpfb::kPacketType:
+ HandleRtpFeedback(rtcp_packet_header, now);
+ break;
+ }
+}
+
+void RtcpTransceiverImpl::HandleBye(
+ const rtcp::CommonHeader& rtcp_packet_header) {
+ rtcp::Bye bye;
+ if (!bye.Parse(rtcp_packet_header))
+ return;
+ auto remote_sender_it = remote_senders_.find(bye.sender_ssrc());
+ if (remote_sender_it == remote_senders_.end())
+ return;
+ for (MediaReceiverRtcpObserver* observer : remote_sender_it->second.observers)
+ observer->OnBye(bye.sender_ssrc());
+}
+
+void RtcpTransceiverImpl::HandleSenderReport(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now,
+ std::vector<rtcp::ReportBlock>& report_blocks) {
+ rtcp::SenderReport sender_report;
+ if (!sender_report.Parse(rtcp_packet_header))
+ return;
+ RemoteSenderState& remote_sender =
+ remote_senders_[sender_report.sender_ssrc()];
+ remote_sender.last_received_sender_report = {{now, sender_report.ntp()}};
+ const auto& received_report_blocks = sender_report.report_blocks();
+ CallbackOnReportBlocks(sender_report.sender_ssrc(), received_report_blocks);
+ report_blocks.insert(report_blocks.end(), received_report_blocks.begin(),
+ received_report_blocks.end());
+
+ for (MediaReceiverRtcpObserver* observer : remote_sender.observers)
+ observer->OnSenderReport(sender_report.sender_ssrc(), sender_report.ntp(),
+ sender_report.rtp_timestamp());
+}
+
+void RtcpTransceiverImpl::HandleReceiverReport(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ std::vector<rtcp::ReportBlock>& report_blocks) {
+ rtcp::ReceiverReport receiver_report;
+ if (!receiver_report.Parse(rtcp_packet_header)) {
+ return;
+ }
+ const auto& received_report_blocks = receiver_report.report_blocks();
+ CallbackOnReportBlocks(receiver_report.sender_ssrc(), received_report_blocks);
+ report_blocks.insert(report_blocks.end(), received_report_blocks.begin(),
+ received_report_blocks.end());
+}
+
+void RtcpTransceiverImpl::CallbackOnReportBlocks(
+ uint32_t sender_ssrc,
+ rtc::ArrayView<const rtcp::ReportBlock> report_blocks) {
+ if (local_senders_.empty()) {
+ return;
+ }
+ for (const rtcp::ReportBlock& block : report_blocks) {
+ auto sender_it = local_senders_by_ssrc_.find(block.source_ssrc());
+ if (sender_it != local_senders_by_ssrc_.end()) {
+ sender_it->second->handler->OnReportBlock(sender_ssrc, block);
+ }
+ }
+}
+
+void RtcpTransceiverImpl::HandlePayloadSpecificFeedback(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now) {
+ switch (rtcp_packet_header.fmt()) {
+ case rtcp::Fir::kFeedbackMessageType:
+ HandleFir(rtcp_packet_header);
+ break;
+ case rtcp::Pli::kFeedbackMessageType:
+ HandlePli(rtcp_packet_header);
+ break;
+ case rtcp::Psfb::kAfbMessageType:
+ HandleRemb(rtcp_packet_header, now);
+ break;
+ }
+}
+
+void RtcpTransceiverImpl::HandleFir(
+ const rtcp::CommonHeader& rtcp_packet_header) {
+ rtcp::Fir fir;
+ if (local_senders_.empty() || !fir.Parse(rtcp_packet_header)) {
+ return;
+ }
+ for (const rtcp::Fir::Request& r : fir.requests()) {
+ auto it = local_senders_by_ssrc_.find(r.ssrc);
+ if (it == local_senders_by_ssrc_.end()) {
+ continue;
+ }
+ auto [fir_it, is_new] =
+ it->second->last_fir.emplace(fir.sender_ssrc(), r.seq_nr);
+ if (is_new || fir_it->second != r.seq_nr) {
+ it->second->handler->OnFir(fir.sender_ssrc());
+ fir_it->second = r.seq_nr;
+ }
+ }
+}
+
+void RtcpTransceiverImpl::HandlePli(
+ const rtcp::CommonHeader& rtcp_packet_header) {
+ rtcp::Pli pli;
+ if (local_senders_.empty() || !pli.Parse(rtcp_packet_header)) {
+ return;
+ }
+ auto it = local_senders_by_ssrc_.find(pli.media_ssrc());
+ if (it != local_senders_by_ssrc_.end()) {
+ it->second->handler->OnPli(pli.sender_ssrc());
+ }
+}
+
+void RtcpTransceiverImpl::HandleRemb(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now) {
+ rtcp::Remb remb;
+ if (config_.network_link_observer == nullptr ||
+ !remb.Parse(rtcp_packet_header)) {
+ return;
+ }
+ config_.network_link_observer->OnReceiverEstimatedMaxBitrate(
+ now, DataRate::BitsPerSec(remb.bitrate_bps()));
+}
+
+void RtcpTransceiverImpl::HandleRtpFeedback(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now) {
+ switch (rtcp_packet_header.fmt()) {
+ case rtcp::Nack::kFeedbackMessageType:
+ HandleNack(rtcp_packet_header);
+ break;
+ case rtcp::TransportFeedback::kFeedbackMessageType:
+ HandleTransportFeedback(rtcp_packet_header, now);
+ break;
+ }
+}
+
+void RtcpTransceiverImpl::HandleNack(
+ const rtcp::CommonHeader& rtcp_packet_header) {
+ rtcp::Nack nack;
+ if (local_senders_.empty() || !nack.Parse(rtcp_packet_header)) {
+ return;
+ }
+ auto it = local_senders_by_ssrc_.find(nack.media_ssrc());
+ if (it != local_senders_by_ssrc_.end()) {
+ it->second->handler->OnNack(nack.sender_ssrc(), nack.packet_ids());
+ }
+}
+
+void RtcpTransceiverImpl::HandleTransportFeedback(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now) {
+ RTC_DCHECK_EQ(rtcp_packet_header.fmt(),
+ rtcp::TransportFeedback::kFeedbackMessageType);
+ if (config_.network_link_observer == nullptr) {
+ return;
+ }
+ rtcp::TransportFeedback feedback;
+ if (feedback.Parse(rtcp_packet_header)) {
+ config_.network_link_observer->OnTransportFeedback(now, feedback);
+ }
+}
+
+void RtcpTransceiverImpl::HandleExtendedReports(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now) {
+ rtcp::ExtendedReports extended_reports;
+ if (!extended_reports.Parse(rtcp_packet_header))
+ return;
+
+ if (config_.reply_to_non_sender_rtt_measurement && extended_reports.rrtr()) {
+ RrtrTimes& rrtr = received_rrtrs_[extended_reports.sender_ssrc()];
+ rrtr.received_remote_mid_ntp_time =
+ CompactNtp(extended_reports.rrtr()->ntp());
+ rrtr.local_receive_mid_ntp_time =
+ CompactNtp(config_.clock->ConvertTimestampToNtpTime(now));
+ }
+
+ if (extended_reports.dlrr())
+ HandleDlrr(extended_reports.dlrr(), now);
+
+ if (extended_reports.target_bitrate())
+ HandleTargetBitrate(*extended_reports.target_bitrate(),
+ extended_reports.sender_ssrc());
+}
+
+void RtcpTransceiverImpl::HandleDlrr(const rtcp::Dlrr& dlrr, Timestamp now) {
+ if (!config_.non_sender_rtt_measurement ||
+ config_.network_link_observer == nullptr) {
+ return;
+ }
+
+ // Delay and last_rr are transferred using 32bit compact ntp resolution.
+ // Convert packet arrival time to same format through 64bit ntp format.
+ uint32_t receive_time_ntp =
+ CompactNtp(config_.clock->ConvertTimestampToNtpTime(now));
+ for (const rtcp::ReceiveTimeInfo& rti : dlrr.sub_blocks()) {
+ if (rti.ssrc != config_.feedback_ssrc)
+ continue;
+ uint32_t rtt_ntp = receive_time_ntp - rti.delay_since_last_rr - rti.last_rr;
+ TimeDelta rtt = CompactNtpRttToTimeDelta(rtt_ntp);
+ config_.network_link_observer->OnRttUpdate(now, rtt);
+ }
+}
+
+void RtcpTransceiverImpl::ProcessReportBlocks(
+ Timestamp now,
+ rtc::ArrayView<const rtcp::ReportBlock> report_blocks) {
+ RTC_DCHECK(!report_blocks.empty());
+ if (config_.network_link_observer == nullptr) {
+ return;
+ }
+ // Round trip time calculated from different report blocks suppose to be about
+ // the same, as those blocks should be generated by the same remote sender.
+ // To avoid too many callbacks, this code accumulate multiple rtts into one.
+ TimeDelta rtt_sum = TimeDelta::Zero();
+ size_t num_rtts = 0;
+ uint32_t receive_time_ntp =
+ CompactNtp(config_.clock->ConvertTimestampToNtpTime(now));
+ for (const rtcp::ReportBlock& report_block : report_blocks) {
+ if (report_block.last_sr() == 0) {
+ continue;
+ }
+
+ uint32_t rtt_ntp = receive_time_ntp - report_block.delay_since_last_sr() -
+ report_block.last_sr();
+ rtt_sum += CompactNtpRttToTimeDelta(rtt_ntp);
+ ++num_rtts;
+ }
+ // For backward compatibility, do not report rtt based on report blocks to the
+ // `config_.rtt_observer`
+ if (num_rtts > 0) {
+ config_.network_link_observer->OnRttUpdate(now, rtt_sum / num_rtts);
+ }
+ config_.network_link_observer->OnReportBlocks(now, report_blocks);
+}
+
+void RtcpTransceiverImpl::HandleTargetBitrate(
+ const rtcp::TargetBitrate& target_bitrate,
+ uint32_t remote_ssrc) {
+ auto remote_sender_it = remote_senders_.find(remote_ssrc);
+ if (remote_sender_it == remote_senders_.end() ||
+ remote_sender_it->second.observers.empty())
+ return;
+
+ // Convert rtcp::TargetBitrate to VideoBitrateAllocation.
+ VideoBitrateAllocation bitrate_allocation;
+ for (const rtcp::TargetBitrate::BitrateItem& item :
+ target_bitrate.GetTargetBitrates()) {
+ if (item.spatial_layer >= kMaxSpatialLayers ||
+ item.temporal_layer >= kMaxTemporalStreams) {
+ RTC_DLOG(LS_WARNING)
+ << config_.debug_id
+ << "Invalid incoming TargetBitrate with spatial layer "
+ << item.spatial_layer << ", temporal layer " << item.temporal_layer;
+ continue;
+ }
+ bitrate_allocation.SetBitrate(item.spatial_layer, item.temporal_layer,
+ item.target_bitrate_kbps * 1000);
+ }
+
+ for (MediaReceiverRtcpObserver* observer : remote_sender_it->second.observers)
+ observer->OnBitrateAllocation(remote_ssrc, bitrate_allocation);
+}
+
+void RtcpTransceiverImpl::ReschedulePeriodicCompoundPackets() {
+ if (!config_.schedule_periodic_compound_packets)
+ return;
+ periodic_task_handle_.Stop();
+ RTC_DCHECK(ready_to_send_);
+ SchedulePeriodicCompoundPackets(config_.report_period);
+}
+
+void RtcpTransceiverImpl::SchedulePeriodicCompoundPackets(TimeDelta delay) {
+ periodic_task_handle_ = RepeatingTaskHandle::DelayedStart(
+ config_.task_queue, delay,
+ [this] {
+ RTC_DCHECK(config_.schedule_periodic_compound_packets);
+ RTC_DCHECK(ready_to_send_);
+ SendPeriodicCompoundPacket();
+ return config_.report_period;
+ },
+ TaskQueueBase::DelayPrecision::kLow, config_.clock);
+}
+
+std::vector<uint32_t> RtcpTransceiverImpl::FillReports(
+ Timestamp now,
+ ReservedBytes reserved,
+ PacketSender& rtcp_sender) {
+ // Sender/receiver reports should be first in the RTCP packet.
+ RTC_DCHECK(rtcp_sender.IsEmpty());
+
+ size_t available_bytes = config_.max_packet_size;
+ if (reserved.per_packet > available_bytes) {
+ // Because reserved.per_packet is unsigned, substracting would underflow and
+ // will not produce desired result.
+ available_bytes = 0;
+ } else {
+ available_bytes -= reserved.per_packet;
+ }
+
+ const size_t sender_report_size_bytes = 28 + reserved.per_sender;
+ const size_t full_sender_report_size_bytes =
+ sender_report_size_bytes +
+ rtcp::SenderReport::kMaxNumberOfReportBlocks * rtcp::ReportBlock::kLength;
+ size_t max_full_sender_reports =
+ available_bytes / full_sender_report_size_bytes;
+ size_t max_report_blocks =
+ max_full_sender_reports * rtcp::SenderReport::kMaxNumberOfReportBlocks;
+ size_t available_bytes_for_last_sender_report =
+ available_bytes - max_full_sender_reports * full_sender_report_size_bytes;
+ if (available_bytes_for_last_sender_report >= sender_report_size_bytes) {
+ max_report_blocks +=
+ (available_bytes_for_last_sender_report - sender_report_size_bytes) /
+ rtcp::ReportBlock::kLength;
+ }
+
+ std::vector<rtcp::ReportBlock> report_blocks =
+ CreateReportBlocks(now, max_report_blocks);
+ // Previous calculation of max number of sender report made space for max
+ // number of report blocks per sender report, but if number of report blocks
+ // is low, more sender reports may fit in.
+ size_t max_sender_reports =
+ (available_bytes - report_blocks.size() * rtcp::ReportBlock::kLength) /
+ sender_report_size_bytes;
+
+ auto last_handled_sender_it = local_senders_.end();
+ auto report_block_it = report_blocks.begin();
+ std::vector<uint32_t> sender_ssrcs;
+ for (auto it = local_senders_.begin();
+ it != local_senders_.end() && sender_ssrcs.size() < max_sender_reports;
+ ++it) {
+ LocalSenderState& rtp_sender = *it;
+ RtpStreamRtcpHandler::RtpStats stats = rtp_sender.handler->SentStats();
+
+ if (stats.num_sent_bytes() < rtp_sender.last_num_sent_bytes) {
+ RTC_LOG(LS_ERROR) << "Inconsistent SR for SSRC " << rtp_sender.ssrc
+ << ". Number of total sent bytes decreased.";
+ rtp_sender.last_num_sent_bytes = 0;
+ }
+ if (stats.num_sent_bytes() == rtp_sender.last_num_sent_bytes) {
+ // Skip because no RTP packet was send for this SSRC since last report.
+ continue;
+ }
+ rtp_sender.last_num_sent_bytes = stats.num_sent_bytes();
+
+ last_handled_sender_it = it;
+ rtcp::SenderReport sender_report;
+ sender_report.SetSenderSsrc(rtp_sender.ssrc);
+ sender_report.SetPacketCount(stats.num_sent_packets());
+ sender_report.SetOctetCount(stats.num_sent_bytes());
+ sender_report.SetNtp(config_.clock->ConvertTimestampToNtpTime(now));
+ RTC_DCHECK_GE(now, stats.last_capture_time());
+ sender_report.SetRtpTimestamp(
+ stats.last_rtp_timestamp() +
+ ((now - stats.last_capture_time()) * stats.last_clock_rate())
+ .seconds());
+ if (report_block_it != report_blocks.end()) {
+ size_t num_blocks =
+ std::min<size_t>(rtcp::SenderReport::kMaxNumberOfReportBlocks,
+ report_blocks.end() - report_block_it);
+ std::vector<rtcp::ReportBlock> sub_blocks(report_block_it,
+ report_block_it + num_blocks);
+ sender_report.SetReportBlocks(std::move(sub_blocks));
+ report_block_it += num_blocks;
+ }
+ rtcp_sender.AppendPacket(sender_report);
+ sender_ssrcs.push_back(rtp_sender.ssrc);
+ }
+ if (last_handled_sender_it != local_senders_.end()) {
+ // Rotate `local_senders_` so that the 1st unhandled sender become first in
+ // the list, and thus will be first to generate rtcp sender report for on
+ // the next call to `FillReports`.
+ local_senders_.splice(local_senders_.end(), local_senders_,
+ local_senders_.begin(),
+ std::next(last_handled_sender_it));
+ }
+
+ // Calculcate number of receiver reports to attach remaining report blocks to.
+ size_t num_receiver_reports =
+ DivideRoundUp(report_blocks.end() - report_block_it,
+ rtcp::ReceiverReport::kMaxNumberOfReportBlocks);
+
+ // In compound mode each RTCP packet has to start with a sender or receiver
+ // report.
+ if (config_.rtcp_mode == RtcpMode::kCompound && sender_ssrcs.empty() &&
+ num_receiver_reports == 0) {
+ num_receiver_reports = 1;
+ }
+
+ uint32_t sender_ssrc =
+ sender_ssrcs.empty() ? config_.feedback_ssrc : sender_ssrcs.front();
+ for (size_t i = 0; i < num_receiver_reports; ++i) {
+ rtcp::ReceiverReport receiver_report;
+ receiver_report.SetSenderSsrc(sender_ssrc);
+ size_t num_blocks =
+ std::min<size_t>(rtcp::ReceiverReport::kMaxNumberOfReportBlocks,
+ report_blocks.end() - report_block_it);
+ std::vector<rtcp::ReportBlock> sub_blocks(report_block_it,
+ report_block_it + num_blocks);
+ receiver_report.SetReportBlocks(std::move(sub_blocks));
+ report_block_it += num_blocks;
+ rtcp_sender.AppendPacket(receiver_report);
+ }
+ // All report blocks should be attached at this point.
+ RTC_DCHECK_EQ(report_blocks.end() - report_block_it, 0);
+ return sender_ssrcs;
+}
+
+void RtcpTransceiverImpl::CreateCompoundPacket(Timestamp now,
+ size_t reserved_bytes,
+ PacketSender& sender) {
+ RTC_DCHECK(sender.IsEmpty());
+ ReservedBytes reserved = {.per_packet = reserved_bytes};
+ absl::optional<rtcp::Sdes> sdes;
+ if (!config_.cname.empty()) {
+ sdes.emplace();
+ bool added = sdes->AddCName(config_.feedback_ssrc, config_.cname);
+ RTC_DCHECK(added) << "Failed to add CNAME " << config_.cname
+ << " to RTCP SDES packet.";
+ reserved.per_packet += sdes->BlockLength();
+ }
+ if (remb_.has_value()) {
+ reserved.per_packet += remb_->BlockLength();
+ }
+ absl::optional<rtcp::ExtendedReports> xr_with_dlrr;
+ if (!received_rrtrs_.empty()) {
+ RTC_DCHECK(config_.reply_to_non_sender_rtt_measurement);
+ xr_with_dlrr.emplace();
+ uint32_t now_ntp =
+ CompactNtp(config_.clock->ConvertTimestampToNtpTime(now));
+ for (const auto& [ssrc, rrtr_info] : received_rrtrs_) {
+ rtcp::ReceiveTimeInfo reply;
+ reply.ssrc = ssrc;
+ reply.last_rr = rrtr_info.received_remote_mid_ntp_time;
+ reply.delay_since_last_rr =
+ now_ntp - rrtr_info.local_receive_mid_ntp_time;
+ xr_with_dlrr->AddDlrrItem(reply);
+ }
+ if (config_.reply_to_non_sender_rtt_mesaurments_on_all_ssrcs) {
+ reserved.per_sender += xr_with_dlrr->BlockLength();
+ } else {
+ reserved.per_packet += xr_with_dlrr->BlockLength();
+ }
+ }
+ if (config_.non_sender_rtt_measurement) {
+ // It looks like bytes for ExtendedReport header are reserved twice, but in
+ // practice the same RtcpTransceiver won't both produce RRTR (i.e. it is a
+ // receiver-only) and reply to RRTR (i.e. remote participant is a receiver
+ // only). If that happen, then `reserved_bytes` would be slightly larger
+ // than it should, which is not an issue.
+
+ // 4 bytes for common RTCP header + 4 bytes for the ExtenedReports header.
+ reserved.per_packet += (4 + 4 + rtcp::Rrtr::kLength);
+ }
+
+ std::vector<uint32_t> sender_ssrcs = FillReports(now, reserved, sender);
+ bool has_sender_report = !sender_ssrcs.empty();
+ uint32_t sender_ssrc =
+ has_sender_report ? sender_ssrcs.front() : config_.feedback_ssrc;
+
+ if (sdes.has_value() && !sender.IsEmpty()) {
+ sender.AppendPacket(*sdes);
+ }
+ if (remb_.has_value()) {
+ remb_->SetSenderSsrc(sender_ssrc);
+ sender.AppendPacket(*remb_);
+ }
+ if (!has_sender_report && config_.non_sender_rtt_measurement) {
+ rtcp::ExtendedReports xr_with_rrtr;
+ xr_with_rrtr.SetSenderSsrc(config_.feedback_ssrc);
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(config_.clock->ConvertTimestampToNtpTime(now));
+ xr_with_rrtr.SetRrtr(rrtr);
+ sender.AppendPacket(xr_with_rrtr);
+ }
+ if (xr_with_dlrr.has_value()) {
+ rtc::ArrayView<const uint32_t> ssrcs(&sender_ssrc, 1);
+ if (config_.reply_to_non_sender_rtt_mesaurments_on_all_ssrcs &&
+ !sender_ssrcs.empty()) {
+ ssrcs = sender_ssrcs;
+ }
+ RTC_DCHECK(!ssrcs.empty());
+ for (uint32_t ssrc : ssrcs) {
+ xr_with_dlrr->SetSenderSsrc(ssrc);
+ sender.AppendPacket(*xr_with_dlrr);
+ }
+ }
+}
+
+void RtcpTransceiverImpl::SendPeriodicCompoundPacket() {
+ auto send_packet = [this](rtc::ArrayView<const uint8_t> packet) {
+ config_.outgoing_transport->SendRtcp(packet.data(), packet.size());
+ };
+ Timestamp now = config_.clock->CurrentTime();
+ PacketSender sender(send_packet, config_.max_packet_size);
+ CreateCompoundPacket(now, /*reserved_bytes=*/0, sender);
+ sender.Send();
+}
+
+void RtcpTransceiverImpl::SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) {
+ auto send_packet = [this](rtc::ArrayView<const uint8_t> packet) {
+ config_.outgoing_transport->SendRtcp(packet.data(), packet.size());
+ };
+ PacketSender sender(send_packet, config_.max_packet_size);
+
+ for (auto& rtcp_packet : rtcp_packets) {
+ rtcp_packet->SetSenderSsrc(config_.feedback_ssrc);
+ sender.AppendPacket(*rtcp_packet);
+ }
+ sender.Send();
+}
+
+void RtcpTransceiverImpl::SendImmediateFeedback(
+ const rtcp::RtcpPacket& rtcp_packet) {
+ auto send_packet = [this](rtc::ArrayView<const uint8_t> packet) {
+ config_.outgoing_transport->SendRtcp(packet.data(), packet.size());
+ };
+ PacketSender sender(send_packet, config_.max_packet_size);
+ // Compound mode requires every sent rtcp packet to be compound, i.e. start
+ // with a sender or receiver report.
+ if (config_.rtcp_mode == RtcpMode::kCompound) {
+ Timestamp now = config_.clock->CurrentTime();
+ CreateCompoundPacket(now, /*reserved_bytes=*/rtcp_packet.BlockLength(),
+ sender);
+ }
+
+ sender.AppendPacket(rtcp_packet);
+ sender.Send();
+
+ // If compound packet was sent, delay (reschedule) the periodic one.
+ if (config_.rtcp_mode == RtcpMode::kCompound)
+ ReschedulePeriodicCompoundPackets();
+}
+
+std::vector<rtcp::ReportBlock> RtcpTransceiverImpl::CreateReportBlocks(
+ Timestamp now,
+ size_t num_max_blocks) {
+ if (!config_.receive_statistics)
+ return {};
+ std::vector<rtcp::ReportBlock> report_blocks =
+ config_.receive_statistics->RtcpReportBlocks(num_max_blocks);
+ uint32_t last_sr = 0;
+ uint32_t last_delay = 0;
+ for (rtcp::ReportBlock& report_block : report_blocks) {
+ auto it = remote_senders_.find(report_block.source_ssrc());
+ if (it == remote_senders_.end() ||
+ !it->second.last_received_sender_report) {
+ continue;
+ }
+ const SenderReportTimes& last_sender_report =
+ *it->second.last_received_sender_report;
+ last_sr = CompactNtp(last_sender_report.remote_sent_time);
+ last_delay =
+ SaturatedToCompactNtp(now - last_sender_report.local_received_time);
+ report_block.SetLastSr(last_sr);
+ report_block.SetDelayLastSr(last_delay);
+ }
+ return report_blocks;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.h
new file mode 100644
index 0000000000..8a3333d45c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_IMPL_H_
+#define MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_IMPL_H_
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remb.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h"
+#include "modules/rtp_rtcp/source/rtcp_transceiver_config.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+//
+// Manage incoming and outgoing rtcp messages for multiple BUNDLED streams.
+//
+// This class is not thread-safe.
+class RtcpTransceiverImpl {
+ public:
+ explicit RtcpTransceiverImpl(const RtcpTransceiverConfig& config);
+ RtcpTransceiverImpl(const RtcpTransceiverImpl&) = delete;
+ RtcpTransceiverImpl& operator=(const RtcpTransceiverImpl&) = delete;
+ ~RtcpTransceiverImpl();
+
+ void StopPeriodicTask() { periodic_task_handle_.Stop(); }
+
+ void AddMediaReceiverRtcpObserver(uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer);
+ void RemoveMediaReceiverRtcpObserver(uint32_t remote_ssrc,
+ MediaReceiverRtcpObserver* observer);
+
+ // Returns false on failure, e.g. when there is already an handler for the
+ // `local_ssrc`.
+ bool AddMediaSender(uint32_t local_ssrc, RtpStreamRtcpHandler* handler);
+ bool RemoveMediaSender(uint32_t local_ssrc);
+
+ void SetReadyToSend(bool ready);
+
+ void ReceivePacket(rtc::ArrayView<const uint8_t> packet, Timestamp now);
+
+ void SendCompoundPacket();
+
+ void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs);
+ void UnsetRemb();
+ // Temporary helpers to send pre-built TransportFeedback rtcp packet.
+ uint32_t sender_ssrc() const { return config_.feedback_ssrc; }
+ void SendRawPacket(rtc::ArrayView<const uint8_t> packet);
+
+ void SendNack(uint32_t ssrc, std::vector<uint16_t> sequence_numbers);
+
+ void SendPictureLossIndication(uint32_t ssrc);
+ // If new_request is true then requested sequence no. will increase for each
+ // requested ssrc.
+ void SendFullIntraRequest(rtc::ArrayView<const uint32_t> ssrcs,
+ bool new_request);
+
+ // SendCombinedRtcpPacket ignores rtcp mode and does not send a compound
+ // message. https://tools.ietf.org/html/rfc4585#section-3.1
+ void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets);
+
+ private:
+ class PacketSender;
+ struct RemoteSenderState;
+ struct LocalSenderState;
+ struct RrtrTimes {
+ // Received remote NTP timestamp in compact representation.
+ uint32_t received_remote_mid_ntp_time;
+
+ // Local NTP time when the report was received in compact representation.
+ uint32_t local_receive_mid_ntp_time;
+ };
+
+ void HandleReceivedPacket(const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now,
+ std::vector<rtcp::ReportBlock>& report_blocks);
+ // Individual rtcp packet handlers.
+ void HandleBye(const rtcp::CommonHeader& rtcp_packet_header);
+ void HandleSenderReport(const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now,
+ std::vector<rtcp::ReportBlock>& report_blocks);
+ void HandleReceiverReport(const rtcp::CommonHeader& rtcp_packet_header,
+ std::vector<rtcp::ReportBlock>& report_blocks);
+ void CallbackOnReportBlocks(
+ uint32_t sender_ssrc,
+ rtc::ArrayView<const rtcp::ReportBlock> report_blocks);
+ void HandlePayloadSpecificFeedback(
+ const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now);
+ void HandleRtpFeedback(const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now);
+ void HandleFir(const rtcp::CommonHeader& rtcp_packet_header);
+ void HandlePli(const rtcp::CommonHeader& rtcp_packet_header);
+ void HandleRemb(const rtcp::CommonHeader& rtcp_packet_header, Timestamp now);
+ void HandleNack(const rtcp::CommonHeader& rtcp_packet_header);
+ void HandleTransportFeedback(const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now);
+ void HandleExtendedReports(const rtcp::CommonHeader& rtcp_packet_header,
+ Timestamp now);
+ // Extended Reports blocks handlers.
+ void HandleDlrr(const rtcp::Dlrr& dlrr, Timestamp now);
+ void HandleTargetBitrate(const rtcp::TargetBitrate& target_bitrate,
+ uint32_t remote_ssrc);
+ void ProcessReportBlocks(
+ Timestamp now,
+ rtc::ArrayView<const rtcp::ReportBlock> report_blocks);
+
+ void ReschedulePeriodicCompoundPackets();
+ void SchedulePeriodicCompoundPackets(TimeDelta delay);
+ // Appends RTCP sender and receiver reports to the `sender`.
+ // Both sender and receiver reports may have attached report blocks.
+ // Uses up to `config_.max_packet_size - reserved_bytes.per_packet`
+ // Returns list of sender ssrc in sender reports.
+ struct ReservedBytes {
+ size_t per_packet = 0;
+ size_t per_sender = 0;
+ };
+ std::vector<uint32_t> FillReports(Timestamp now,
+ ReservedBytes reserved_bytes,
+ PacketSender& rtcp_sender);
+
+ // Creates compound RTCP packet, as defined in
+ // https://tools.ietf.org/html/rfc5506#section-2
+ void CreateCompoundPacket(Timestamp now,
+ size_t reserved_bytes,
+ PacketSender& rtcp_sender);
+
+ // Sends RTCP packets.
+ void SendPeriodicCompoundPacket();
+ void SendImmediateFeedback(const rtcp::RtcpPacket& rtcp_packet);
+ // Generate Report Blocks to be send in Sender or Receiver Reports.
+ std::vector<rtcp::ReportBlock> CreateReportBlocks(Timestamp now,
+ size_t num_max_blocks);
+
+ const RtcpTransceiverConfig config_;
+
+ bool ready_to_send_;
+ absl::optional<rtcp::Remb> remb_;
+ // TODO(danilchap): Remove entries from remote_senders_ that are no longer
+ // needed.
+ flat_map<uint32_t, RemoteSenderState> remote_senders_;
+ std::list<LocalSenderState> local_senders_;
+ flat_map<uint32_t, std::list<LocalSenderState>::iterator>
+ local_senders_by_ssrc_;
+ flat_map<uint32_t, RrtrTimes> received_rrtrs_;
+ RepeatingTaskHandle periodic_task_handle_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTCP_TRANSCEIVER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc
new file mode 100644
index 0000000000..659df688f6
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc
@@ -0,0 +1,1744 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_transceiver_impl.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "api/rtp_headers.h"
+#include "api/test/create_time_controller.h"
+#include "api/test/time_controller.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/app.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Ge;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::SizeIs;
+using ::testing::StrictMock;
+using ::testing::UnorderedElementsAre;
+using ::testing::WithArg;
+using ::webrtc::rtcp::Bye;
+using ::webrtc::rtcp::CompoundPacket;
+using ::webrtc::rtcp::ReportBlock;
+using ::webrtc::rtcp::SenderReport;
+using ::webrtc::test::RtcpPacketParser;
+
+class MockReceiveStatisticsProvider : public ReceiveStatisticsProvider {
+ public:
+ MOCK_METHOD(std::vector<ReportBlock>, RtcpReportBlocks, (size_t), (override));
+};
+
+class MockMediaReceiverRtcpObserver : public MediaReceiverRtcpObserver {
+ public:
+ MOCK_METHOD(void, OnSenderReport, (uint32_t, NtpTime, uint32_t), (override));
+ MOCK_METHOD(void, OnBye, (uint32_t), (override));
+ MOCK_METHOD(void,
+ OnBitrateAllocation,
+ (uint32_t, const VideoBitrateAllocation&),
+ (override));
+};
+
+class MockRtpStreamRtcpHandler : public RtpStreamRtcpHandler {
+ public:
+ MockRtpStreamRtcpHandler() {
+ // With each next call increase number of sent packets and bytes to simulate
+ // active RTP sender.
+ ON_CALL(*this, SentStats).WillByDefault([this] {
+ RtpStats stats;
+ stats.set_num_sent_packets(++num_calls_);
+ stats.set_num_sent_bytes(1'000 * num_calls_);
+ return stats;
+ });
+ }
+
+ MOCK_METHOD(RtpStats, SentStats, (), (override));
+ MOCK_METHOD(void,
+ OnNack,
+ (uint32_t, rtc::ArrayView<const uint16_t>),
+ (override));
+ MOCK_METHOD(void, OnFir, (uint32_t), (override));
+ MOCK_METHOD(void, OnPli, (uint32_t), (override));
+ MOCK_METHOD(void,
+ OnReportBlock,
+ (uint32_t, const rtcp::ReportBlock&),
+ (override));
+
+ private:
+ int num_calls_ = 0;
+};
+
+class MockNetworkLinkRtcpObserver : public NetworkLinkRtcpObserver {
+ public:
+ MOCK_METHOD(void,
+ OnRttUpdate,
+ (Timestamp receive_time, TimeDelta rtt),
+ (override));
+ MOCK_METHOD(void,
+ OnTransportFeedback,
+ (Timestamp receive_time, const rtcp::TransportFeedback& feedback),
+ (override));
+ MOCK_METHOD(void,
+ OnReceiverEstimatedMaxBitrate,
+ (Timestamp receive_time, DataRate bitrate),
+ (override));
+ MOCK_METHOD(void,
+ OnReportBlocks,
+ (Timestamp receive_time,
+ rtc::ArrayView<const rtcp::ReportBlock> report_blocks),
+ (override));
+};
+
+constexpr TimeDelta kReportPeriod = TimeDelta::Seconds(1);
+constexpr TimeDelta kAlmostForever = TimeDelta::Seconds(2);
+constexpr TimeDelta kTimePrecision = TimeDelta::Millis(1);
+
+MATCHER_P(Near, value, "") {
+ return arg > value - kTimePrecision && arg < value + kTimePrecision;
+}
+
+// Helper to wait for an rtcp packet produced on a different thread/task queue.
+class FakeRtcpTransport : public webrtc::Transport {
+ public:
+ explicit FakeRtcpTransport(TimeController& time) : time_(time) {}
+
+ bool SendRtcp(const uint8_t* data, size_t size) override {
+ sent_rtcp_ = true;
+ return true;
+ }
+ bool SendRtp(const uint8_t*, size_t, const webrtc::PacketOptions&) override {
+ ADD_FAILURE() << "RtcpTransciver shouldn't send rtp packets.";
+ return true;
+ }
+
+ // Returns true when packet was received by the transport.
+ bool WaitPacket() {
+ bool got_packet = time_.Wait([this] { return sent_rtcp_; }, kAlmostForever);
+ // Clear the 'event' to allow waiting for multiple packets.
+ sent_rtcp_ = false;
+ return got_packet;
+ }
+
+ private:
+ TimeController& time_;
+ bool sent_rtcp_ = false;
+};
+
+class RtcpParserTransport : public webrtc::Transport {
+ public:
+ explicit RtcpParserTransport(RtcpPacketParser* parser) : parser_(parser) {}
+ // Returns total number of rtcp packet received.
+ int num_packets() const { return num_packets_; }
+
+ private:
+ bool SendRtcp(const uint8_t* data, size_t size) override {
+ ++num_packets_;
+ parser_->Parse(data, size);
+ return true;
+ }
+
+ bool SendRtp(const uint8_t*, size_t, const webrtc::PacketOptions&) override {
+ ADD_FAILURE() << "RtcpTransciver shouldn't send rtp packets.";
+ return true;
+ }
+
+ RtcpPacketParser* const parser_;
+ int num_packets_ = 0;
+};
+
+class RtcpTransceiverImplTest : public ::testing::Test {
+ public:
+ RtcpTransceiverConfig DefaultTestConfig() {
+ // RtcpTransceiverConfig default constructor sets default values for prod.
+ // Test doesn't need to support all key features: Default test config
+ // returns valid config with all features turned off.
+ RtcpTransceiverConfig config;
+ config.clock = time_->GetClock();
+ config.outgoing_transport = &null_transport_;
+ config.schedule_periodic_compound_packets = false;
+ config.initial_report_delay = kReportPeriod / 2;
+ config.report_period = kReportPeriod;
+ return config;
+ }
+
+ TimeController& time_controller() { return *time_; }
+ Timestamp CurrentTime() { return time_->GetClock()->CurrentTime(); }
+ void AdvanceTime(TimeDelta time) { time_->AdvanceTime(time); }
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> CreateTaskQueue() {
+ return time_->GetTaskQueueFactory()->CreateTaskQueue(
+ "rtcp", TaskQueueFactory::Priority::NORMAL);
+ }
+
+ private:
+ MockTransport null_transport_;
+ std::unique_ptr<TimeController> time_ = CreateSimulatedTimeController();
+};
+
+TEST_F(RtcpTransceiverImplTest, NeedToStopPeriodicTaskToDestroyOnTaskQueue) {
+ FakeRtcpTransport transport(time_controller());
+ auto queue = CreateTaskQueue();
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.task_queue = queue.get();
+ config.schedule_periodic_compound_packets = true;
+ config.outgoing_transport = &transport;
+ auto* rtcp_transceiver = new RtcpTransceiverImpl(config);
+ // Wait for a periodic packet.
+ EXPECT_TRUE(transport.WaitPacket());
+
+ bool done = false;
+ queue->PostTask([rtcp_transceiver, &done] {
+ rtcp_transceiver->StopPeriodicTask();
+ delete rtcp_transceiver;
+ done = true;
+ });
+ ASSERT_TRUE(time_controller().Wait([&] { return done; }, kAlmostForever));
+}
+
+TEST_F(RtcpTransceiverImplTest, CanBeDestroyedRightAfterCreation) {
+ FakeRtcpTransport transport(time_controller());
+ auto queue = CreateTaskQueue();
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.task_queue = queue.get();
+ config.schedule_periodic_compound_packets = true;
+ config.outgoing_transport = &transport;
+
+ bool done = false;
+ queue->PostTask([&] {
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.StopPeriodicTask();
+ done = true;
+ });
+ ASSERT_TRUE(time_controller().Wait([&] { return done; }, kAlmostForever));
+}
+
+TEST_F(RtcpTransceiverImplTest, CanDestroyAfterTaskQueue) {
+ FakeRtcpTransport transport(time_controller());
+ auto queue = CreateTaskQueue();
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.task_queue = queue.get();
+ config.schedule_periodic_compound_packets = true;
+ config.outgoing_transport = &transport;
+ auto* rtcp_transceiver = new RtcpTransceiverImpl(config);
+ // Wait for a periodic packet.
+ EXPECT_TRUE(transport.WaitPacket());
+
+ queue = nullptr;
+ delete rtcp_transceiver;
+}
+
+TEST_F(RtcpTransceiverImplTest, DelaysSendingFirstCompondPacket) {
+ auto queue = CreateTaskQueue();
+ FakeRtcpTransport transport(time_controller());
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = true;
+ config.outgoing_transport = &transport;
+ config.initial_report_delay = TimeDelta::Millis(10);
+ config.task_queue = queue.get();
+ absl::optional<RtcpTransceiverImpl> rtcp_transceiver;
+
+ Timestamp started = CurrentTime();
+ queue->PostTask([&] { rtcp_transceiver.emplace(config); });
+ EXPECT_TRUE(transport.WaitPacket());
+
+ EXPECT_GE(CurrentTime() - started, config.initial_report_delay);
+
+ // Cleanup.
+ bool done = false;
+ queue->PostTask([&] {
+ rtcp_transceiver->StopPeriodicTask();
+ rtcp_transceiver.reset();
+ done = true;
+ });
+ ASSERT_TRUE(time_controller().Wait([&] { return done; }, kAlmostForever));
+}
+
+TEST_F(RtcpTransceiverImplTest, PeriodicallySendsPackets) {
+ auto queue = CreateTaskQueue();
+ FakeRtcpTransport transport(time_controller());
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = true;
+ config.outgoing_transport = &transport;
+ config.initial_report_delay = TimeDelta::Zero();
+ config.report_period = kReportPeriod;
+ config.task_queue = queue.get();
+ absl::optional<RtcpTransceiverImpl> rtcp_transceiver;
+ Timestamp time_just_before_1st_packet = Timestamp::MinusInfinity();
+ queue->PostTask([&] {
+ // Because initial_report_delay_ms is set to 0, time_just_before_the_packet
+ // should be very close to the time_of_the_packet.
+ time_just_before_1st_packet = CurrentTime();
+ rtcp_transceiver.emplace(config);
+ });
+
+ EXPECT_TRUE(transport.WaitPacket());
+ EXPECT_TRUE(transport.WaitPacket());
+ Timestamp time_just_after_2nd_packet = CurrentTime();
+
+ EXPECT_GE(time_just_after_2nd_packet - time_just_before_1st_packet,
+ config.report_period);
+
+ // Cleanup.
+ bool done = false;
+ queue->PostTask([&] {
+ rtcp_transceiver->StopPeriodicTask();
+ rtcp_transceiver.reset();
+ done = true;
+ });
+ ASSERT_TRUE(time_controller().Wait([&] { return done; }, kAlmostForever));
+}
+
+TEST_F(RtcpTransceiverImplTest, SendCompoundPacketDelaysPeriodicSendPackets) {
+ auto queue = CreateTaskQueue();
+ FakeRtcpTransport transport(time_controller());
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = true;
+ config.outgoing_transport = &transport;
+ config.initial_report_delay = TimeDelta::Zero();
+ config.report_period = kReportPeriod;
+ config.task_queue = queue.get();
+ absl::optional<RtcpTransceiverImpl> rtcp_transceiver;
+ queue->PostTask([&] { rtcp_transceiver.emplace(config); });
+
+ // Wait for the first packet.
+ EXPECT_TRUE(transport.WaitPacket());
+ // Send non periodic one after half period.
+ bool non_periodic = false;
+ Timestamp time_of_non_periodic_packet = Timestamp::MinusInfinity();
+ queue->PostDelayedTask(
+ [&] {
+ time_of_non_periodic_packet = CurrentTime();
+ rtcp_transceiver->SendCompoundPacket();
+ non_periodic = true;
+ },
+ config.report_period / 2);
+ // Though non-periodic packet is scheduled just in between periodic, due to
+ // small period and task queue flakiness it migth end-up 1ms after next
+ // periodic packet. To be sure duration after non-periodic packet is tested
+ // wait for transport after ensuring non-periodic packet was sent.
+ EXPECT_TRUE(
+ time_controller().Wait([&] { return non_periodic; }, kAlmostForever));
+ EXPECT_TRUE(transport.WaitPacket());
+ // Wait for next periodic packet.
+ EXPECT_TRUE(transport.WaitPacket());
+ Timestamp time_of_last_periodic_packet = CurrentTime();
+ EXPECT_GE(time_of_last_periodic_packet - time_of_non_periodic_packet,
+ config.report_period);
+
+ // Cleanup.
+ bool done = false;
+ queue->PostTask([&] {
+ rtcp_transceiver->StopPeriodicTask();
+ rtcp_transceiver.reset();
+ done = true;
+ });
+ ASSERT_TRUE(time_controller().Wait([&] { return done; }, kAlmostForever));
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsNoRtcpWhenNetworkStateIsDown) {
+ MockTransport mock_transport;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.initial_ready_to_send = false;
+ config.outgoing_transport = &mock_transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ EXPECT_CALL(mock_transport, SendRtcp(_, _)).Times(0);
+
+ const uint8_t raw[] = {1, 2, 3, 4};
+ const std::vector<uint16_t> sequence_numbers = {45, 57};
+ const uint32_t ssrcs[] = {123};
+ rtcp_transceiver.SendCompoundPacket();
+ rtcp_transceiver.SendRawPacket(raw);
+ rtcp_transceiver.SendNack(ssrcs[0], sequence_numbers);
+ rtcp_transceiver.SendPictureLossIndication(ssrcs[0]);
+ rtcp_transceiver.SendFullIntraRequest(ssrcs, true);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsRtcpWhenNetworkStateIsUp) {
+ MockTransport mock_transport;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.initial_ready_to_send = false;
+ config.outgoing_transport = &mock_transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetReadyToSend(true);
+
+ EXPECT_CALL(mock_transport, SendRtcp(_, _)).Times(5);
+
+ const uint8_t raw[] = {1, 2, 3, 4};
+ const std::vector<uint16_t> sequence_numbers = {45, 57};
+ const uint32_t ssrcs[] = {123};
+ rtcp_transceiver.SendCompoundPacket();
+ rtcp_transceiver.SendRawPacket(raw);
+ rtcp_transceiver.SendNack(ssrcs[0], sequence_numbers);
+ rtcp_transceiver.SendPictureLossIndication(ssrcs[0]);
+ rtcp_transceiver.SendFullIntraRequest(ssrcs, true);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsPeriodicRtcpWhenNetworkStateIsUp) {
+ auto queue = CreateTaskQueue();
+ FakeRtcpTransport transport(time_controller());
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = true;
+ config.initial_ready_to_send = false;
+ config.outgoing_transport = &transport;
+ config.task_queue = queue.get();
+ absl::optional<RtcpTransceiverImpl> rtcp_transceiver;
+ rtcp_transceiver.emplace(config);
+
+ queue->PostTask([&] { rtcp_transceiver->SetReadyToSend(true); });
+
+ EXPECT_TRUE(transport.WaitPacket());
+
+ // Cleanup.
+ bool done = false;
+ queue->PostTask([&] {
+ rtcp_transceiver->StopPeriodicTask();
+ rtcp_transceiver.reset();
+ done = true;
+ });
+ ASSERT_TRUE(time_controller().Wait([&] { return done; }, kAlmostForever));
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsMinimalCompoundPacket) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.cname = "cname";
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ // Minimal compound RTCP packet contains sender or receiver report and sdes
+ // with cname.
+ ASSERT_GT(rtcp_parser.receiver_report()->num_packets(), 0);
+ EXPECT_EQ(rtcp_parser.receiver_report()->sender_ssrc(), kSenderSsrc);
+ ASSERT_GT(rtcp_parser.sdes()->num_packets(), 0);
+ ASSERT_EQ(rtcp_parser.sdes()->chunks().size(), 1u);
+ EXPECT_EQ(rtcp_parser.sdes()->chunks()[0].ssrc, kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.sdes()->chunks()[0].cname, config.cname);
+}
+
+TEST_F(RtcpTransceiverImplTest, AvoidsEmptyPacketsInReducedMode) {
+ MockTransport transport;
+ EXPECT_CALL(transport, SendRtcp).Times(0);
+ NiceMock<MockReceiveStatisticsProvider> receive_statistics;
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.outgoing_transport = &transport;
+ config.rtcp_mode = webrtc::RtcpMode::kReducedSize;
+ config.receive_statistics = &receive_statistics;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendCompoundPacket();
+}
+
+TEST_F(RtcpTransceiverImplTest, AvoidsEmptyReceiverReportsInReducedMode) {
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ NiceMock<MockReceiveStatisticsProvider> receive_statistics;
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.outgoing_transport = &transport;
+ config.rtcp_mode = webrtc::RtcpMode::kReducedSize;
+ config.receive_statistics = &receive_statistics;
+ // Set it to produce something (RRTR) in the "periodic" rtcp packets.
+ config.non_sender_rtt_measurement = true;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ // Rather than waiting for the right time to produce the periodic packet,
+ // trigger it manually.
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.receiver_report()->num_packets(), 0);
+ EXPECT_GT(rtcp_parser.xr()->num_packets(), 0);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsNoRembInitially) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(transport.num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 0);
+}
+
+TEST_F(RtcpTransceiverImplTest, SetRembIncludesRembInNextCompoundPacket) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{54321, 64321});
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.remb()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.remb()->bitrate_bps(), 10000);
+ EXPECT_THAT(rtcp_parser.remb()->ssrcs(), ElementsAre(54321, 64321));
+}
+
+TEST_F(RtcpTransceiverImplTest, SetRembUpdatesValuesToSend) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{54321, 64321});
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.remb()->bitrate_bps(), 10000);
+ EXPECT_THAT(rtcp_parser.remb()->ssrcs(), ElementsAre(54321, 64321));
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/70000, /*ssrcs=*/{67321});
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 2);
+ EXPECT_EQ(rtcp_parser.remb()->bitrate_bps(), 70000);
+ EXPECT_THAT(rtcp_parser.remb()->ssrcs(), ElementsAre(67321));
+}
+
+TEST_F(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChange) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.send_remb_on_change = true;
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{});
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.remb()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.remb()->bitrate_bps(), 10000);
+
+ // If there is no change, the packet is not sent immediately.
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{});
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 1);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/20000, /*ssrcs=*/{});
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 2);
+ EXPECT_EQ(rtcp_parser.remb()->bitrate_bps(), 20000);
+}
+
+TEST_F(RtcpTransceiverImplTest,
+ SetRembSendsImmediatelyIfSendRembOnChangeReducedSize) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.send_remb_on_change = true;
+ config.rtcp_mode = webrtc::RtcpMode::kReducedSize;
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{});
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.remb()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.remb()->bitrate_bps(), 10000);
+}
+
+TEST_F(RtcpTransceiverImplTest, SetRembIncludesRembInAllCompoundPackets) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{54321, 64321});
+ rtcp_transceiver.SendCompoundPacket();
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(transport.num_packets(), 2);
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 2);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsNoRembAfterUnset) {
+ const uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SetRemb(/*bitrate_bps=*/10000, /*ssrcs=*/{54321, 64321});
+ rtcp_transceiver.SendCompoundPacket();
+ EXPECT_EQ(transport.num_packets(), 1);
+ ASSERT_EQ(rtcp_parser.remb()->num_packets(), 1);
+
+ rtcp_transceiver.UnsetRemb();
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(transport.num_packets(), 2);
+ EXPECT_EQ(rtcp_parser.remb()->num_packets(), 1);
+}
+
+TEST_F(RtcpTransceiverImplTest, ReceiverReportUsesReceiveStatistics) {
+ const uint32_t kSenderSsrc = 12345;
+ const uint32_t kMediaSsrc = 54321;
+ MockReceiveStatisticsProvider receive_statistics;
+ std::vector<ReportBlock> report_blocks(1);
+ report_blocks[0].SetMediaSsrc(kMediaSsrc);
+ EXPECT_CALL(receive_statistics, RtcpReportBlocks(_))
+ .WillRepeatedly(Return(report_blocks));
+ SimulatedClock clock(0);
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.receive_statistics = &receive_statistics;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ ASSERT_GT(rtcp_parser.receiver_report()->num_packets(), 0);
+ EXPECT_EQ(rtcp_parser.receiver_report()->sender_ssrc(), kSenderSsrc);
+ ASSERT_THAT(rtcp_parser.receiver_report()->report_blocks(),
+ SizeIs(report_blocks.size()));
+ EXPECT_EQ(rtcp_parser.receiver_report()->report_blocks()[0].source_ssrc(),
+ kMediaSsrc);
+}
+
+TEST_F(RtcpTransceiverImplTest, MultipleObserversOnSameSsrc) {
+ const uint32_t kRemoteSsrc = 12345;
+ StrictMock<MockMediaReceiverRtcpObserver> observer1;
+ StrictMock<MockMediaReceiverRtcpObserver> observer2;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer1);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer2);
+
+ const NtpTime kRemoteNtp(0x9876543211);
+ const uint32_t kRemoteRtp = 0x444555;
+ SenderReport sr;
+ sr.SetSenderSsrc(kRemoteSsrc);
+ sr.SetNtp(kRemoteNtp);
+ sr.SetRtpTimestamp(kRemoteRtp);
+ auto raw_packet = sr.Build();
+
+ EXPECT_CALL(observer1, OnSenderReport(kRemoteSsrc, kRemoteNtp, kRemoteRtp));
+ EXPECT_CALL(observer2, OnSenderReport(kRemoteSsrc, kRemoteNtp, kRemoteRtp));
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, DoesntCallsObserverAfterRemoved) {
+ const uint32_t kRemoteSsrc = 12345;
+ StrictMock<MockMediaReceiverRtcpObserver> observer1;
+ StrictMock<MockMediaReceiverRtcpObserver> observer2;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer1);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer2);
+
+ SenderReport sr;
+ sr.SetSenderSsrc(kRemoteSsrc);
+ auto raw_packet = sr.Build();
+
+ rtcp_transceiver.RemoveMediaReceiverRtcpObserver(kRemoteSsrc, &observer1);
+
+ EXPECT_CALL(observer1, OnSenderReport(_, _, _)).Times(0);
+ EXPECT_CALL(observer2, OnSenderReport(_, _, _));
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, CallsObserverOnSenderReportBySenderSsrc) {
+ const uint32_t kRemoteSsrc1 = 12345;
+ const uint32_t kRemoteSsrc2 = 22345;
+ StrictMock<MockMediaReceiverRtcpObserver> observer1;
+ StrictMock<MockMediaReceiverRtcpObserver> observer2;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2);
+
+ const NtpTime kRemoteNtp(0x9876543211);
+ const uint32_t kRemoteRtp = 0x444555;
+ SenderReport sr;
+ sr.SetSenderSsrc(kRemoteSsrc1);
+ sr.SetNtp(kRemoteNtp);
+ sr.SetRtpTimestamp(kRemoteRtp);
+ auto raw_packet = sr.Build();
+
+ EXPECT_CALL(observer1, OnSenderReport(kRemoteSsrc1, kRemoteNtp, kRemoteRtp));
+ EXPECT_CALL(observer2, OnSenderReport).Times(0);
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, CallsObserverOnByeBySenderSsrc) {
+ const uint32_t kRemoteSsrc1 = 12345;
+ const uint32_t kRemoteSsrc2 = 22345;
+ StrictMock<MockMediaReceiverRtcpObserver> observer1;
+ StrictMock<MockMediaReceiverRtcpObserver> observer2;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2);
+
+ Bye bye;
+ bye.SetSenderSsrc(kRemoteSsrc1);
+ auto raw_packet = bye.Build();
+
+ EXPECT_CALL(observer1, OnBye(kRemoteSsrc1));
+ EXPECT_CALL(observer2, OnBye(_)).Times(0);
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, CallsObserverOnTargetBitrateBySenderSsrc) {
+ const uint32_t kRemoteSsrc1 = 12345;
+ const uint32_t kRemoteSsrc2 = 22345;
+ StrictMock<MockMediaReceiverRtcpObserver> observer1;
+ StrictMock<MockMediaReceiverRtcpObserver> observer2;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2);
+
+ webrtc::rtcp::TargetBitrate target_bitrate;
+ target_bitrate.AddTargetBitrate(0, 0, /*target_bitrate_kbps=*/10);
+ target_bitrate.AddTargetBitrate(0, 1, /*target_bitrate_kbps=*/20);
+ target_bitrate.AddTargetBitrate(1, 0, /*target_bitrate_kbps=*/40);
+ target_bitrate.AddTargetBitrate(1, 1, /*target_bitrate_kbps=*/80);
+ webrtc::rtcp::ExtendedReports xr;
+ xr.SetSenderSsrc(kRemoteSsrc1);
+ xr.SetTargetBitrate(target_bitrate);
+ auto raw_packet = xr.Build();
+
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, /*bitrate_bps=*/10000);
+ bitrate_allocation.SetBitrate(0, 1, /*bitrate_bps=*/20000);
+ bitrate_allocation.SetBitrate(1, 0, /*bitrate_bps=*/40000);
+ bitrate_allocation.SetBitrate(1, 1, /*bitrate_bps=*/80000);
+ EXPECT_CALL(observer1, OnBitrateAllocation(kRemoteSsrc1, bitrate_allocation));
+ EXPECT_CALL(observer2, OnBitrateAllocation(_, _)).Times(0);
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, SkipsIncorrectTargetBitrateEntries) {
+ const uint32_t kRemoteSsrc = 12345;
+ MockMediaReceiverRtcpObserver observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer);
+
+ webrtc::rtcp::TargetBitrate target_bitrate;
+ target_bitrate.AddTargetBitrate(0, 0, /*target_bitrate_kbps=*/10);
+ target_bitrate.AddTargetBitrate(0, webrtc::kMaxTemporalStreams, 20);
+ target_bitrate.AddTargetBitrate(webrtc::kMaxSpatialLayers, 0, 40);
+
+ webrtc::rtcp::ExtendedReports xr;
+ xr.SetTargetBitrate(target_bitrate);
+ xr.SetSenderSsrc(kRemoteSsrc);
+ auto raw_packet = xr.Build();
+
+ VideoBitrateAllocation expected_allocation;
+ expected_allocation.SetBitrate(0, 0, /*bitrate_bps=*/10000);
+ EXPECT_CALL(observer, OnBitrateAllocation(kRemoteSsrc, expected_allocation));
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, CallsObserverOnByeBehindSenderReport) {
+ const uint32_t kRemoteSsrc = 12345;
+ MockMediaReceiverRtcpObserver observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer);
+
+ CompoundPacket compound;
+ auto sr = std::make_unique<SenderReport>();
+ sr->SetSenderSsrc(kRemoteSsrc);
+ compound.Append(std::move(sr));
+ auto bye = std::make_unique<Bye>();
+ bye->SetSenderSsrc(kRemoteSsrc);
+ compound.Append(std::move(bye));
+ auto raw_packet = compound.Build();
+
+ EXPECT_CALL(observer, OnBye(kRemoteSsrc));
+ EXPECT_CALL(observer, OnSenderReport(kRemoteSsrc, _, _));
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest, CallsObserverOnByeBehindUnknownRtcpPacket) {
+ const uint32_t kRemoteSsrc = 12345;
+ MockMediaReceiverRtcpObserver observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer);
+
+ CompoundPacket compound;
+ // Use Application-Defined rtcp packet as unknown.
+ auto app = std::make_unique<webrtc::rtcp::App>();
+ compound.Append(std::move(app));
+ auto bye = std::make_unique<Bye>();
+ bye->SetSenderSsrc(kRemoteSsrc);
+ compound.Append(std::move(bye));
+ auto raw_packet = compound.Build();
+
+ EXPECT_CALL(observer, OnBye(kRemoteSsrc));
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+}
+
+TEST_F(RtcpTransceiverImplTest,
+ WhenSendsReceiverReportSetsLastSenderReportTimestampPerRemoteSsrc) {
+ const uint32_t kRemoteSsrc1 = 4321;
+ const uint32_t kRemoteSsrc2 = 5321;
+ std::vector<ReportBlock> statistics_report_blocks(2);
+ statistics_report_blocks[0].SetMediaSsrc(kRemoteSsrc1);
+ statistics_report_blocks[1].SetMediaSsrc(kRemoteSsrc2);
+ MockReceiveStatisticsProvider receive_statistics;
+ EXPECT_CALL(receive_statistics, RtcpReportBlocks(_))
+ .WillOnce(Return(statistics_report_blocks));
+ SimulatedClock clock(0);
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.receive_statistics = &receive_statistics;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ const NtpTime kRemoteNtp(0x9876543211);
+ // Receive SenderReport for RemoteSsrc1, but no report for RemoteSsrc2.
+ SenderReport sr;
+ sr.SetSenderSsrc(kRemoteSsrc1);
+ sr.SetNtp(kRemoteNtp);
+ auto raw_packet = sr.Build();
+ rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0));
+
+ // Trigger sending ReceiverReport.
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_GT(rtcp_parser.receiver_report()->num_packets(), 0);
+ const auto& report_blocks = rtcp_parser.receiver_report()->report_blocks();
+ ASSERT_EQ(report_blocks.size(), 2u);
+ // RtcpTransceiverImpl doesn't guarantee order of the report blocks
+ // match result of ReceiveStatisticsProvider::RtcpReportBlocks callback,
+ // but for simplicity of the test asume it is the same.
+ ASSERT_EQ(report_blocks[0].source_ssrc(), kRemoteSsrc1);
+ EXPECT_EQ(report_blocks[0].last_sr(), CompactNtp(kRemoteNtp));
+
+ ASSERT_EQ(report_blocks[1].source_ssrc(), kRemoteSsrc2);
+ // No matching Sender Report for kRemoteSsrc2, LastSR fields has to be 0.
+ EXPECT_EQ(report_blocks[1].last_sr(), 0u);
+}
+
+TEST_F(RtcpTransceiverImplTest,
+ WhenSendsReceiverReportCalculatesDelaySinceLastSenderReport) {
+ const uint32_t kRemoteSsrc1 = 4321;
+ const uint32_t kRemoteSsrc2 = 5321;
+
+ std::vector<ReportBlock> statistics_report_blocks(2);
+ statistics_report_blocks[0].SetMediaSsrc(kRemoteSsrc1);
+ statistics_report_blocks[1].SetMediaSsrc(kRemoteSsrc2);
+ MockReceiveStatisticsProvider receive_statistics;
+ EXPECT_CALL(receive_statistics, RtcpReportBlocks(_))
+ .WillOnce(Return(statistics_report_blocks));
+ SimulatedClock clock(0);
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.receive_statistics = &receive_statistics;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ auto receive_sender_report = [&](uint32_t remote_ssrc) {
+ SenderReport sr;
+ sr.SetSenderSsrc(remote_ssrc);
+ rtcp_transceiver.ReceivePacket(sr.Build(), CurrentTime());
+ };
+
+ receive_sender_report(kRemoteSsrc1);
+ time_controller().AdvanceTime(TimeDelta::Millis(100));
+
+ receive_sender_report(kRemoteSsrc2);
+ time_controller().AdvanceTime(TimeDelta::Millis(100));
+
+ // Trigger ReceiverReport back.
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_GT(rtcp_parser.receiver_report()->num_packets(), 0);
+ const auto& report_blocks = rtcp_parser.receiver_report()->report_blocks();
+ ASSERT_EQ(report_blocks.size(), 2u);
+ // RtcpTransceiverImpl doesn't guarantee order of the report blocks
+ // match result of ReceiveStatisticsProvider::RtcpReportBlocks callback,
+ // but for simplicity of the test asume it is the same.
+ ASSERT_EQ(report_blocks[0].source_ssrc(), kRemoteSsrc1);
+ EXPECT_THAT(CompactNtpRttToTimeDelta(report_blocks[0].delay_since_last_sr()),
+ Near(TimeDelta::Millis(200)));
+
+ ASSERT_EQ(report_blocks[1].source_ssrc(), kRemoteSsrc2);
+ EXPECT_THAT(CompactNtpRttToTimeDelta(report_blocks[1].delay_since_last_sr()),
+ Near(TimeDelta::Millis(100)));
+}
+
+TEST_F(RtcpTransceiverImplTest, MaySendMultipleReceiverReportInSinglePacket) {
+ std::vector<ReportBlock> statistics_report_blocks(40);
+ MockReceiveStatisticsProvider receive_statistics;
+ EXPECT_CALL(receive_statistics, RtcpReportBlocks(/*max_blocks=*/Ge(40u)))
+ .WillOnce(Return(statistics_report_blocks));
+
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.receive_statistics = &receive_statistics;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ // Trigger ReceiverReports.
+ rtcp_transceiver.SendCompoundPacket();
+
+ // Expect a single RTCP packet with multiple receiver reports in it.
+ EXPECT_EQ(transport.num_packets(), 1);
+ // Receiver report may contain up to 31 report blocks, thus 2 reports are
+ // needed to carry 40 blocks: 31 in the first, 9 in the last.
+ EXPECT_EQ(rtcp_parser.receiver_report()->num_packets(), 2);
+ // RtcpParser remembers just the last receiver report, thus can't check number
+ // of blocks in the first receiver report.
+ EXPECT_THAT(rtcp_parser.receiver_report()->report_blocks(), SizeIs(9));
+}
+
+TEST_F(RtcpTransceiverImplTest, AttachMaxNumberOfReportBlocksToCompoundPacket) {
+ MockReceiveStatisticsProvider receive_statistics;
+ EXPECT_CALL(receive_statistics, RtcpReportBlocks)
+ .WillOnce([](size_t max_blocks) {
+ return std::vector<ReportBlock>(max_blocks);
+ });
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.rtcp_mode = RtcpMode::kCompound;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.receive_statistics = &receive_statistics;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ EXPECT_EQ(transport.num_packets(), 0);
+ // Send some fast feedback message. Because of compound mode, report blocks
+ // should be attached.
+ rtcp_transceiver.SendPictureLossIndication(/*ssrc=*/123);
+
+ // Expect single RTCP packet with multiple receiver reports and a PLI.
+ EXPECT_EQ(transport.num_packets(), 1);
+ EXPECT_GT(rtcp_parser.receiver_report()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.pli()->num_packets(), 1);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsNack) {
+ const uint32_t kSenderSsrc = 1234;
+ const uint32_t kRemoteSsrc = 4321;
+ std::vector<uint16_t> kMissingSequenceNumbers = {34, 37, 38};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendNack(kRemoteSsrc, kMissingSequenceNumbers);
+
+ EXPECT_EQ(rtcp_parser.nack()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.nack()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.nack()->media_ssrc(), kRemoteSsrc);
+ EXPECT_EQ(rtcp_parser.nack()->packet_ids(), kMissingSequenceNumbers);
+}
+
+TEST_F(RtcpTransceiverImplTest, ReceivesNack) {
+ static constexpr uint32_t kRemoteSsrc = 4321;
+ static constexpr uint32_t kMediaSsrc1 = 1234;
+ static constexpr uint32_t kMediaSsrc2 = 1235;
+ std::vector<uint16_t> kMissingSequenceNumbers = {34, 37, 38};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_stream1;
+ MockRtpStreamRtcpHandler local_stream2;
+ EXPECT_CALL(local_stream1,
+ OnNack(kRemoteSsrc, ElementsAreArray(kMissingSequenceNumbers)));
+ EXPECT_CALL(local_stream2, OnNack).Times(0);
+
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc1, &local_stream1));
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc2, &local_stream2));
+
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(kRemoteSsrc);
+ nack.SetMediaSsrc(kMediaSsrc1);
+ nack.SetPacketIds(kMissingSequenceNumbers);
+ rtcp_transceiver.ReceivePacket(nack.Build(), config.clock->CurrentTime());
+}
+
+TEST_F(RtcpTransceiverImplTest, RequestKeyFrameWithPictureLossIndication) {
+ const uint32_t kSenderSsrc = 1234;
+ const uint32_t kRemoteSsrc = 4321;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendPictureLossIndication(kRemoteSsrc);
+
+ EXPECT_EQ(transport.num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.pli()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.pli()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.pli()->media_ssrc(), kRemoteSsrc);
+}
+
+TEST_F(RtcpTransceiverImplTest, ReceivesPictureLossIndication) {
+ static constexpr uint32_t kRemoteSsrc = 4321;
+ static constexpr uint32_t kMediaSsrc1 = 1234;
+ static constexpr uint32_t kMediaSsrc2 = 1235;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_stream1;
+ MockRtpStreamRtcpHandler local_stream2;
+ EXPECT_CALL(local_stream1, OnPli(kRemoteSsrc));
+ EXPECT_CALL(local_stream2, OnPli).Times(0);
+
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc1, &local_stream1));
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc2, &local_stream2));
+
+ rtcp::Pli pli;
+ pli.SetSenderSsrc(kRemoteSsrc);
+ pli.SetMediaSsrc(kMediaSsrc1);
+ rtcp_transceiver.ReceivePacket(pli.Build(), config.clock->CurrentTime());
+}
+
+TEST_F(RtcpTransceiverImplTest, RequestKeyFrameWithFullIntraRequest) {
+ const uint32_t kSenderSsrc = 1234;
+ const uint32_t kRemoteSsrcs[] = {4321, 5321};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendFullIntraRequest(kRemoteSsrcs, true);
+
+ EXPECT_EQ(rtcp_parser.fir()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.fir()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kRemoteSsrcs[0]);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[1].ssrc, kRemoteSsrcs[1]);
+}
+
+TEST_F(RtcpTransceiverImplTest, RequestKeyFrameWithFirIncreaseSeqNoPerSsrc) {
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ const uint32_t kBothRemoteSsrcs[] = {4321, 5321};
+ const uint32_t kOneRemoteSsrc[] = {4321};
+
+ rtcp_transceiver.SendFullIntraRequest(kBothRemoteSsrcs, true);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kBothRemoteSsrcs[0]);
+ uint8_t fir_sequence_number0 = rtcp_parser.fir()->requests()[0].seq_nr;
+ ASSERT_EQ(rtcp_parser.fir()->requests()[1].ssrc, kBothRemoteSsrcs[1]);
+ uint8_t fir_sequence_number1 = rtcp_parser.fir()->requests()[1].seq_nr;
+
+ rtcp_transceiver.SendFullIntraRequest(kOneRemoteSsrc, true);
+ ASSERT_EQ(rtcp_parser.fir()->requests().size(), 1u);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kBothRemoteSsrcs[0]);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].seq_nr, fir_sequence_number0 + 1);
+
+ rtcp_transceiver.SendFullIntraRequest(kBothRemoteSsrcs, true);
+ ASSERT_EQ(rtcp_parser.fir()->requests().size(), 2u);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kBothRemoteSsrcs[0]);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].seq_nr, fir_sequence_number0 + 2);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[1].ssrc, kBothRemoteSsrcs[1]);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[1].seq_nr, fir_sequence_number1 + 1);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendFirDoesNotIncreaseSeqNoIfOldRequest) {
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ const uint32_t kBothRemoteSsrcs[] = {4321, 5321};
+
+ rtcp_transceiver.SendFullIntraRequest(kBothRemoteSsrcs, true);
+ ASSERT_EQ(rtcp_parser.fir()->requests().size(), 2u);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kBothRemoteSsrcs[0]);
+ uint8_t fir_sequence_number0 = rtcp_parser.fir()->requests()[0].seq_nr;
+ ASSERT_EQ(rtcp_parser.fir()->requests()[1].ssrc, kBothRemoteSsrcs[1]);
+ uint8_t fir_sequence_number1 = rtcp_parser.fir()->requests()[1].seq_nr;
+
+ rtcp_transceiver.SendFullIntraRequest(kBothRemoteSsrcs, false);
+ ASSERT_EQ(rtcp_parser.fir()->requests().size(), 2u);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kBothRemoteSsrcs[0]);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].seq_nr, fir_sequence_number0);
+ ASSERT_EQ(rtcp_parser.fir()->requests()[1].ssrc, kBothRemoteSsrcs[1]);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[1].seq_nr, fir_sequence_number1);
+}
+
+TEST_F(RtcpTransceiverImplTest, ReceivesFir) {
+ static constexpr uint32_t kRemoteSsrc = 4321;
+ static constexpr uint32_t kMediaSsrc1 = 1234;
+ static constexpr uint32_t kMediaSsrc2 = 1235;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_stream1;
+ MockRtpStreamRtcpHandler local_stream2;
+ EXPECT_CALL(local_stream1, OnFir(kRemoteSsrc));
+ EXPECT_CALL(local_stream2, OnFir).Times(0);
+
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc1, &local_stream1));
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc2, &local_stream2));
+
+ rtcp::Fir fir;
+ fir.SetSenderSsrc(kRemoteSsrc);
+ fir.AddRequestTo(kMediaSsrc1, /*seq_num=*/13);
+
+ rtcp_transceiver.ReceivePacket(fir.Build(), config.clock->CurrentTime());
+}
+
+TEST_F(RtcpTransceiverImplTest, IgnoresReceivedFirWithRepeatedSequenceNumber) {
+ static constexpr uint32_t kRemoteSsrc = 4321;
+ static constexpr uint32_t kMediaSsrc1 = 1234;
+ static constexpr uint32_t kMediaSsrc2 = 1235;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_stream1;
+ MockRtpStreamRtcpHandler local_stream2;
+ EXPECT_CALL(local_stream1, OnFir(kRemoteSsrc)).Times(1);
+ EXPECT_CALL(local_stream2, OnFir(kRemoteSsrc)).Times(2);
+
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc1, &local_stream1));
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc2, &local_stream2));
+
+ rtcp::Fir fir1;
+ fir1.SetSenderSsrc(kRemoteSsrc);
+ fir1.AddRequestTo(kMediaSsrc1, /*seq_num=*/132);
+ fir1.AddRequestTo(kMediaSsrc2, /*seq_num=*/10);
+ rtcp_transceiver.ReceivePacket(fir1.Build(), config.clock->CurrentTime());
+
+ // Repeat request for MediaSsrc1 - expect it to be ignored,
+ // Change FIR sequence number for MediaSsrc2 - expect a 2nd callback.
+ rtcp::Fir fir2;
+ fir2.SetSenderSsrc(kRemoteSsrc);
+ fir2.AddRequestTo(kMediaSsrc1, /*seq_num=*/132);
+ fir2.AddRequestTo(kMediaSsrc2, /*seq_num=*/13);
+ rtcp_transceiver.ReceivePacket(fir2.Build(), config.clock->CurrentTime());
+}
+
+TEST_F(RtcpTransceiverImplTest, ReceivedFirTracksSequenceNumberPerRemoteSsrc) {
+ static constexpr uint32_t kRemoteSsrc1 = 4321;
+ static constexpr uint32_t kRemoteSsrc2 = 4323;
+ static constexpr uint32_t kMediaSsrc = 1234;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_stream;
+ EXPECT_CALL(local_stream, OnFir(kRemoteSsrc1));
+ EXPECT_CALL(local_stream, OnFir(kRemoteSsrc2));
+
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc, &local_stream));
+
+ rtcp::Fir fir1;
+ fir1.SetSenderSsrc(kRemoteSsrc1);
+ fir1.AddRequestTo(kMediaSsrc, /*seq_num=*/13);
+ rtcp_transceiver.ReceivePacket(fir1.Build(), config.clock->CurrentTime());
+
+ // Use the same FIR sequence number, but different sender SSRC.
+ rtcp::Fir fir2;
+ fir2.SetSenderSsrc(kRemoteSsrc2);
+ fir2.AddRequestTo(kMediaSsrc, /*seq_num=*/13);
+ rtcp_transceiver.ReceivePacket(fir2.Build(), config.clock->CurrentTime());
+}
+
+TEST_F(RtcpTransceiverImplTest, KeyFrameRequestCreatesCompoundPacket) {
+ const uint32_t kRemoteSsrcs[] = {4321};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ // Turn periodic off to ensure sent rtcp packet is explicitly requested.
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+
+ config.rtcp_mode = webrtc::RtcpMode::kCompound;
+
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.SendFullIntraRequest(kRemoteSsrcs, true);
+
+ // Test sent packet is compound by expecting presense of receiver report.
+ EXPECT_EQ(transport.num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.receiver_report()->num_packets(), 1);
+}
+
+TEST_F(RtcpTransceiverImplTest, KeyFrameRequestCreatesReducedSizePacket) {
+ const uint32_t kRemoteSsrcs[] = {4321};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ // Turn periodic off to ensure sent rtcp packet is explicitly requested.
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+
+ config.rtcp_mode = webrtc::RtcpMode::kReducedSize;
+
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ rtcp_transceiver.SendFullIntraRequest(kRemoteSsrcs, true);
+
+ // Test sent packet is reduced size by expecting absense of receiver report.
+ EXPECT_EQ(transport.num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.receiver_report()->num_packets(), 0);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) {
+ const uint32_t kSenderSsrc = 4321;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.non_sender_rtt_measurement = true;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendCompoundPacket();
+ NtpTime ntp_time_now = config.clock->CurrentNtpTime();
+
+ EXPECT_EQ(rtcp_parser.xr()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.xr()->sender_ssrc(), kSenderSsrc);
+ ASSERT_TRUE(rtcp_parser.xr()->rrtr());
+ EXPECT_EQ(rtcp_parser.xr()->rrtr()->ntp(), ntp_time_now);
+}
+
+TEST_F(RtcpTransceiverImplTest, RepliesToRrtrWhenEnabled) {
+ static constexpr uint32_t kSenderSsrc[] = {4321, 9876};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.reply_to_non_sender_rtt_measurement = true;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp::ExtendedReports xr;
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(NtpTime(uint64_t{0x1111'2222'3333'4444}));
+ xr.SetRrtr(rrtr);
+ xr.SetSenderSsrc(kSenderSsrc[0]);
+ rtcp_transceiver.ReceivePacket(xr.Build(), CurrentTime());
+ AdvanceTime(TimeDelta::Millis(1'500));
+
+ rrtr.SetNtp(NtpTime(uint64_t{0x4444'5555'6666'7777}));
+ xr.SetRrtr(rrtr);
+ xr.SetSenderSsrc(kSenderSsrc[1]);
+ rtcp_transceiver.ReceivePacket(xr.Build(), CurrentTime());
+ AdvanceTime(TimeDelta::Millis(500));
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.xr()->num_packets(), 1);
+ static constexpr uint32_t kComactNtpOneSecond = 0x0001'0000;
+ EXPECT_THAT(rtcp_parser.xr()->dlrr().sub_blocks(),
+ UnorderedElementsAre(
+ rtcp::ReceiveTimeInfo(kSenderSsrc[0], 0x2222'3333,
+ /*delay=*/2 * kComactNtpOneSecond),
+ rtcp::ReceiveTimeInfo(kSenderSsrc[1], 0x5555'6666,
+ /*delay=*/kComactNtpOneSecond / 2)));
+}
+
+TEST_F(RtcpTransceiverImplTest, CanReplyToRrtrOnceForAllLocalSsrcs) {
+ static constexpr uint32_t kRemoteSsrc = 4321;
+ static constexpr uint32_t kLocalSsrcs[] = {1234, 5678};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.reply_to_non_sender_rtt_measurement = true;
+ config.reply_to_non_sender_rtt_mesaurments_on_all_ssrcs = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_sender0;
+ MockRtpStreamRtcpHandler local_sender1;
+ rtcp_transceiver.AddMediaSender(kLocalSsrcs[0], &local_sender0);
+ rtcp_transceiver.AddMediaSender(kLocalSsrcs[1], &local_sender1);
+
+ rtcp::ExtendedReports xr;
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(NtpTime(uint64_t{0x1111'2222'3333'4444}));
+ xr.SetRrtr(rrtr);
+ xr.SetSenderSsrc(kRemoteSsrc);
+ rtcp_transceiver.ReceivePacket(xr.Build(), CurrentTime());
+ AdvanceTime(TimeDelta::Millis(1'500));
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.xr()->num_packets(), 1);
+}
+
+TEST_F(RtcpTransceiverImplTest, CanReplyToRrtrForEachLocalSsrc) {
+ static constexpr uint32_t kRemoteSsrc = 4321;
+ static constexpr uint32_t kLocalSsrc[] = {1234, 5678};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.reply_to_non_sender_rtt_measurement = true;
+ config.reply_to_non_sender_rtt_mesaurments_on_all_ssrcs = true;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_sender0;
+ MockRtpStreamRtcpHandler local_sender1;
+ rtcp_transceiver.AddMediaSender(kLocalSsrc[0], &local_sender0);
+ rtcp_transceiver.AddMediaSender(kLocalSsrc[1], &local_sender1);
+
+ rtcp::ExtendedReports xr;
+ rtcp::Rrtr rrtr;
+ rrtr.SetNtp(NtpTime(uint64_t{0x1111'2222'3333'4444}));
+ xr.SetRrtr(rrtr);
+ xr.SetSenderSsrc(kRemoteSsrc);
+ rtcp_transceiver.ReceivePacket(xr.Build(), CurrentTime());
+ AdvanceTime(TimeDelta::Millis(1'500));
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(rtcp_parser.xr()->num_packets(), 2);
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsNoXrRrtrWhenDisabled) {
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.schedule_periodic_compound_packets = false;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.non_sender_rtt_measurement = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ EXPECT_EQ(transport.num_packets(), 1);
+ // Extended reports rtcp packet might be included for another reason,
+ // but it shouldn't contain rrtr block.
+ EXPECT_FALSE(rtcp_parser.xr()->rrtr());
+}
+
+TEST_F(RtcpTransceiverImplTest, PassRttFromDlrrToLinkObserver) {
+ const uint32_t kSenderSsrc = 4321;
+ MockNetworkLinkRtcpObserver link_observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.network_link_observer = &link_observer;
+ config.non_sender_rtt_measurement = true;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ Timestamp send_time = Timestamp::Seconds(5678);
+ Timestamp receive_time = send_time + TimeDelta::Millis(110);
+ rtcp::ReceiveTimeInfo rti;
+ rti.ssrc = kSenderSsrc;
+ rti.last_rr = CompactNtp(config.clock->ConvertTimestampToNtpTime(send_time));
+ rti.delay_since_last_rr = SaturatedToCompactNtp(TimeDelta::Millis(10));
+ rtcp::ExtendedReports xr;
+ xr.AddDlrrItem(rti);
+
+ EXPECT_CALL(link_observer,
+ OnRttUpdate(receive_time, Near(TimeDelta::Millis(100))));
+ rtcp_transceiver.ReceivePacket(xr.Build(), receive_time);
+}
+
+TEST_F(RtcpTransceiverImplTest, CalculatesRoundTripTimeFromReportBlocks) {
+ MockNetworkLinkRtcpObserver link_observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.network_link_observer = &link_observer;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ TimeDelta rtt = TimeDelta::Millis(100);
+ Timestamp send_time = Timestamp::Seconds(5678);
+ Timestamp receive_time = send_time + TimeDelta::Millis(110);
+ rtcp::ReceiverReport rr;
+ rtcp::ReportBlock rb1;
+ rb1.SetLastSr(CompactNtp(config.clock->ConvertTimestampToNtpTime(
+ receive_time - rtt - TimeDelta::Millis(10))));
+ rb1.SetDelayLastSr(SaturatedToCompactNtp(TimeDelta::Millis(10)));
+ rr.AddReportBlock(rb1);
+ rtcp::ReportBlock rb2;
+ rb2.SetLastSr(CompactNtp(config.clock->ConvertTimestampToNtpTime(
+ receive_time - rtt - TimeDelta::Millis(20))));
+ rb2.SetDelayLastSr(SaturatedToCompactNtp(TimeDelta::Millis(20)));
+ rr.AddReportBlock(rb2);
+
+ EXPECT_CALL(link_observer, OnRttUpdate(receive_time, Near(rtt)));
+ rtcp_transceiver.ReceivePacket(rr.Build(), receive_time);
+}
+
+TEST_F(RtcpTransceiverImplTest, IgnoresUnknownSsrcInDlrr) {
+ const uint32_t kSenderSsrc = 4321;
+ const uint32_t kUnknownSsrc = 4322;
+ MockNetworkLinkRtcpObserver link_observer;
+ MockTransport null_transport;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kSenderSsrc;
+ config.schedule_periodic_compound_packets = false;
+ config.outgoing_transport = &null_transport;
+ config.non_sender_rtt_measurement = true;
+ config.network_link_observer = &link_observer;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ Timestamp time = Timestamp::Micros(12345678);
+ webrtc::rtcp::ReceiveTimeInfo rti;
+ rti.ssrc = kUnknownSsrc;
+ rti.last_rr = CompactNtp(config.clock->ConvertTimestampToNtpTime(time));
+ webrtc::rtcp::ExtendedReports xr;
+ xr.AddDlrrItem(rti);
+ auto raw_packet = xr.Build();
+
+ EXPECT_CALL(link_observer, OnRttUpdate).Times(0);
+ rtcp_transceiver.ReceivePacket(raw_packet, time + TimeDelta::Millis(100));
+}
+
+TEST_F(RtcpTransceiverImplTest, ParsesTransportFeedback) {
+ MockNetworkLinkRtcpObserver link_observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.network_link_observer = &link_observer;
+ Timestamp receive_time = Timestamp::Seconds(5678);
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ EXPECT_CALL(link_observer, OnTransportFeedback(receive_time, _))
+ .WillOnce(WithArg<1>([](const rtcp::TransportFeedback& message) {
+ EXPECT_EQ(message.GetBaseSequence(), 321);
+ EXPECT_THAT(message.GetReceivedPackets(), SizeIs(2));
+ }));
+
+ rtcp::TransportFeedback tb;
+ tb.SetBase(/*base_sequence=*/321, Timestamp::Micros(15));
+ tb.AddReceivedPacket(/*base_sequence=*/321, Timestamp::Micros(15));
+ tb.AddReceivedPacket(/*base_sequence=*/322, Timestamp::Micros(17));
+ rtcp_transceiver.ReceivePacket(tb.Build(), receive_time);
+}
+
+TEST_F(RtcpTransceiverImplTest, ParsesRemb) {
+ MockNetworkLinkRtcpObserver link_observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.network_link_observer = &link_observer;
+ Timestamp receive_time = Timestamp::Seconds(5678);
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ EXPECT_CALL(link_observer,
+ OnReceiverEstimatedMaxBitrate(receive_time,
+ DataRate::BitsPerSec(1'234'000)));
+
+ rtcp::Remb remb;
+ remb.SetBitrateBps(1'234'000);
+ rtcp_transceiver.ReceivePacket(remb.Build(), receive_time);
+}
+
+TEST_F(RtcpTransceiverImplTest,
+ CombinesReportBlocksFromSenderAndRecieverReports) {
+ MockNetworkLinkRtcpObserver link_observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.network_link_observer = &link_observer;
+ Timestamp receive_time = Timestamp::Seconds(5678);
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ // Assemble compound packet with multiple rtcp packets in it.
+ rtcp::CompoundPacket packet;
+ auto sr = std::make_unique<rtcp::SenderReport>();
+ sr->SetSenderSsrc(1234);
+ sr->SetReportBlocks(std::vector<ReportBlock>(31));
+ packet.Append(std::move(sr));
+ auto rr1 = std::make_unique<rtcp::ReceiverReport>();
+ rr1->SetReportBlocks(std::vector<ReportBlock>(31));
+ packet.Append(std::move(rr1));
+ auto rr2 = std::make_unique<rtcp::ReceiverReport>();
+ rr2->SetReportBlocks(std::vector<ReportBlock>(2));
+ packet.Append(std::move(rr2));
+
+ EXPECT_CALL(link_observer, OnReportBlocks(receive_time, SizeIs(64)));
+
+ rtcp_transceiver.ReceivePacket(packet.Build(), receive_time);
+}
+
+TEST_F(RtcpTransceiverImplTest,
+ CallbackOnReportBlocksFromSenderAndReceiverReports) {
+ static constexpr uint32_t kRemoteSsrc = 5678;
+ // Has registered sender, report block attached to sender report.
+ static constexpr uint32_t kMediaSsrc1 = 1234;
+ // No registered sender, report block attached to receiver report.
+ // Such report block shouldn't prevent handling following report block.
+ static constexpr uint32_t kMediaSsrc2 = 1235;
+ // Has registered sender, no report block attached.
+ static constexpr uint32_t kMediaSsrc3 = 1236;
+ // Has registered sender, report block attached to receiver report.
+ static constexpr uint32_t kMediaSsrc4 = 1237;
+
+ MockNetworkLinkRtcpObserver link_observer;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ Timestamp receive_time = Timestamp::Seconds(5678);
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler local_stream1;
+ MockRtpStreamRtcpHandler local_stream3;
+ MockRtpStreamRtcpHandler local_stream4;
+ EXPECT_CALL(local_stream1, OnReportBlock(kRemoteSsrc, _));
+ EXPECT_CALL(local_stream3, OnReportBlock).Times(0);
+ EXPECT_CALL(local_stream4, OnReportBlock(kRemoteSsrc, _));
+
+ ASSERT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc1, &local_stream1));
+ ASSERT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc3, &local_stream3));
+ ASSERT_TRUE(rtcp_transceiver.AddMediaSender(kMediaSsrc4, &local_stream4));
+
+ // Assemble compound packet with multiple RTCP packets in it.
+ rtcp::CompoundPacket packet;
+ auto sr = std::make_unique<rtcp::SenderReport>();
+ sr->SetSenderSsrc(kRemoteSsrc);
+ std::vector<ReportBlock> rb(1);
+ rb[0].SetMediaSsrc(kMediaSsrc1);
+ sr->SetReportBlocks(std::move(rb));
+ packet.Append(std::move(sr));
+ auto rr = std::make_unique<rtcp::ReceiverReport>();
+ rr->SetSenderSsrc(kRemoteSsrc);
+ rb = std::vector<ReportBlock>(2);
+ rb[0].SetMediaSsrc(kMediaSsrc2);
+ rb[1].SetMediaSsrc(kMediaSsrc4);
+ rr->SetReportBlocks(std::move(rb));
+ packet.Append(std::move(rr));
+
+ rtcp_transceiver.ReceivePacket(packet.Build(), receive_time);
+}
+
+TEST_F(RtcpTransceiverImplTest, FailsToRegisterTwoSendersWithTheSameSsrc) {
+ RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig());
+ MockRtpStreamRtcpHandler sender1;
+ MockRtpStreamRtcpHandler sender2;
+
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(/*local_ssrc=*/10001, &sender1));
+ EXPECT_FALSE(rtcp_transceiver.AddMediaSender(/*local_ssrc=*/10001, &sender2));
+ EXPECT_TRUE(rtcp_transceiver.AddMediaSender(/*local_ssrc=*/10002, &sender2));
+
+ EXPECT_TRUE(rtcp_transceiver.RemoveMediaSender(/*local_ssrc=*/10001));
+ EXPECT_FALSE(rtcp_transceiver.RemoveMediaSender(/*local_ssrc=*/10001));
+}
+
+TEST_F(RtcpTransceiverImplTest, SendsSenderReport) {
+ static constexpr uint32_t kFeedbackSsrc = 123;
+ static constexpr uint32_t kSenderSsrc = 12345;
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.feedback_ssrc = kFeedbackSsrc;
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ RtpStreamRtcpHandler::RtpStats sender_stats;
+ sender_stats.set_num_sent_packets(10);
+ sender_stats.set_num_sent_bytes(1000);
+ sender_stats.set_last_rtp_timestamp(0x3333);
+ sender_stats.set_last_capture_time(CurrentTime() - TimeDelta::Seconds(2));
+ sender_stats.set_last_clock_rate(0x1000);
+ MockRtpStreamRtcpHandler sender;
+ ON_CALL(sender, SentStats).WillByDefault(Return(sender_stats));
+ rtcp_transceiver.AddMediaSender(kSenderSsrc, &sender);
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ ASSERT_GT(rtcp_parser.sender_report()->num_packets(), 0);
+ EXPECT_EQ(rtcp_parser.sender_report()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.sender_report()->ntp(),
+ time_controller().GetClock()->CurrentNtpTime());
+ EXPECT_EQ(rtcp_parser.sender_report()->rtp_timestamp(), 0x3333u + 0x2000u);
+ EXPECT_EQ(rtcp_parser.sender_report()->sender_packet_count(), 10u);
+ EXPECT_EQ(rtcp_parser.sender_report()->sender_octet_count(), 1000u);
+}
+
+TEST_F(RtcpTransceiverImplTest,
+ MaySendBothSenderReportAndReceiverReportInTheSamePacket) {
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ std::vector<ReportBlock> statistics_report_blocks(40);
+ MockReceiveStatisticsProvider receive_statistics;
+ EXPECT_CALL(receive_statistics, RtcpReportBlocks(/*max_blocks=*/Ge(40u)))
+ .WillOnce(Return(statistics_report_blocks));
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.outgoing_transport = &transport;
+ config.receive_statistics = &receive_statistics;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ MockRtpStreamRtcpHandler sender;
+ rtcp_transceiver.AddMediaSender(/*ssrc=*/12345, &sender);
+
+ rtcp_transceiver.SendCompoundPacket();
+
+ // Expect a single RTCP packet with a sender and a receiver reports in it.
+ EXPECT_EQ(transport.num_packets(), 1);
+ ASSERT_EQ(rtcp_parser.sender_report()->num_packets(), 1);
+ ASSERT_EQ(rtcp_parser.receiver_report()->num_packets(), 1);
+ // Sender report may contain up to 31 report blocks, thus remaining 9 report
+ // block should be attached to the receiver report.
+ EXPECT_THAT(rtcp_parser.sender_report()->report_blocks(), SizeIs(31));
+ EXPECT_THAT(rtcp_parser.receiver_report()->report_blocks(), SizeIs(9));
+}
+
+TEST_F(RtcpTransceiverImplTest, RotatesSendersWhenAllSenderReportDoNotFit) {
+ // Send 6 compound packet, each should contain 5 sender reports,
+ // each of 6 senders should be mentioned 5 times.
+ static constexpr int kNumSenders = 6;
+ static constexpr uint32_t kSenderSsrc[kNumSenders] = {10, 20, 30, 40, 50, 60};
+ static constexpr int kSendersPerPacket = 5;
+ // RtcpPacketParser remembers only latest block for each type, but this test
+ // is about sending multiple sender reports in the same packet, thus need
+ // a more advance parser: RtcpTranceiver
+ RtcpTransceiverConfig receiver_config = DefaultTestConfig();
+ RtcpTransceiverImpl rtcp_receiver(receiver_config);
+ // Main expectatation: all senders are spread equally across multiple packets.
+ NiceMock<MockMediaReceiverRtcpObserver> receiver[kNumSenders];
+ for (int i = 0; i < kNumSenders; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_CALL(receiver[i], OnSenderReport(kSenderSsrc[i], _, _))
+ .Times(kSendersPerPacket);
+ rtcp_receiver.AddMediaReceiverRtcpObserver(kSenderSsrc[i], &receiver[i]);
+ }
+
+ MockTransport transport;
+ EXPECT_CALL(transport, SendRtcp)
+ .Times(kNumSenders)
+ .WillRepeatedly([&](const uint8_t* data, size_t size) {
+ rtcp_receiver.ReceivePacket(rtc::MakeArrayView(data, size),
+ CurrentTime());
+ return true;
+ });
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ config.outgoing_transport = &transport;
+ // Limit packet to have space just for kSendersPerPacket sender reports.
+ // Sender report without report blocks require 28 bytes.
+ config.max_packet_size = kSendersPerPacket * 28;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+ NiceMock<MockRtpStreamRtcpHandler> sender[kNumSenders];
+ for (int i = 0; i < kNumSenders; ++i) {
+ rtcp_transceiver.AddMediaSender(kSenderSsrc[i], &sender[i]);
+ }
+
+ for (int i = 1; i <= kNumSenders; ++i) {
+ SCOPED_TRACE(i);
+ rtcp_transceiver.SendCompoundPacket();
+ }
+}
+
+TEST_F(RtcpTransceiverImplTest, SkipsSenderReportForInactiveSender) {
+ static constexpr uint32_t kSenderSsrc[] = {12345, 23456};
+ RtcpTransceiverConfig config = DefaultTestConfig();
+ RtcpPacketParser rtcp_parser;
+ RtcpParserTransport transport(&rtcp_parser);
+ config.outgoing_transport = &transport;
+ RtcpTransceiverImpl rtcp_transceiver(config);
+
+ RtpStreamRtcpHandler::RtpStats sender_stats[2];
+ NiceMock<MockRtpStreamRtcpHandler> sender[2];
+ ON_CALL(sender[0], SentStats).WillByDefault([&] { return sender_stats[0]; });
+ ON_CALL(sender[1], SentStats).WillByDefault([&] { return sender_stats[1]; });
+ rtcp_transceiver.AddMediaSender(kSenderSsrc[0], &sender[0]);
+ rtcp_transceiver.AddMediaSender(kSenderSsrc[1], &sender[1]);
+
+ // Start with both senders beeing active.
+ sender_stats[0].set_num_sent_packets(10);
+ sender_stats[0].set_num_sent_bytes(1'000);
+ sender_stats[1].set_num_sent_packets(5);
+ sender_stats[1].set_num_sent_bytes(2'000);
+ rtcp_transceiver.SendCompoundPacket();
+ EXPECT_EQ(transport.num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.sender_report()->num_packets(), 2);
+
+ // Keep 1st sender active, but make 2nd second look inactive by returning the
+ // same RtpStats.
+ sender_stats[0].set_num_sent_packets(15);
+ sender_stats[0].set_num_sent_bytes(2'000);
+ rtcp_transceiver.SendCompoundPacket();
+ EXPECT_EQ(transport.num_packets(), 2);
+ EXPECT_EQ(rtcp_parser.sender_report()->num_packets(), 3);
+ EXPECT_EQ(rtcp_parser.sender_report()->sender_ssrc(), kSenderSsrc[0]);
+
+ // Swap active sender.
+ sender_stats[1].set_num_sent_packets(20);
+ sender_stats[1].set_num_sent_bytes(3'000);
+ rtcp_transceiver.SendCompoundPacket();
+ EXPECT_EQ(transport.num_packets(), 3);
+ EXPECT_EQ(rtcp_parser.sender_report()->num_packets(), 4);
+ EXPECT_EQ(rtcp_parser.sender_report()->sender_ssrc(), kSenderSsrc[1]);
+
+ // Activate both senders again.
+ sender_stats[0].set_num_sent_packets(20);
+ sender_stats[0].set_num_sent_bytes(3'000);
+ sender_stats[1].set_num_sent_packets(25);
+ sender_stats[1].set_num_sent_bytes(3'500);
+ rtcp_transceiver.SendCompoundPacket();
+ EXPECT_EQ(transport.num_packets(), 4);
+ EXPECT_EQ(rtcp_parser.sender_report()->num_packets(), 6);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc
new file mode 100644
index 0000000000..57652f2305
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtcp_transceiver.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/remote_estimate.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace {
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
+using ::testing::IsNull;
+using ::testing::NiceMock;
+using ::webrtc::MockTransport;
+using ::webrtc::RtcpTransceiver;
+using ::webrtc::RtcpTransceiverConfig;
+using ::webrtc::SimulatedClock;
+using ::webrtc::TaskQueueForTest;
+using ::webrtc::Timestamp;
+using ::webrtc::rtcp::RemoteEstimate;
+using ::webrtc::rtcp::RtcpPacket;
+using ::webrtc::rtcp::TransportFeedback;
+using ::webrtc::test::RtcpPacketParser;
+
+class MockMediaReceiverRtcpObserver : public webrtc::MediaReceiverRtcpObserver {
+ public:
+ MOCK_METHOD(void,
+ OnSenderReport,
+ (uint32_t, webrtc::NtpTime, uint32_t),
+ (override));
+};
+
+constexpr int kTimeoutMs = 1000;
+
+void WaitPostedTasks(TaskQueueForTest* queue) {
+ rtc::Event done;
+ queue->PostTask([&done] { done.Set(); });
+ ASSERT_TRUE(done.Wait(kTimeoutMs));
+}
+
+TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOffTaskQueue) {
+ SimulatedClock clock(0);
+ MockTransport outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ EXPECT_CALL(outgoing_transport, SendRtcp(_, _))
+ .WillRepeatedly(InvokeWithoutArgs([&] {
+ EXPECT_TRUE(queue.IsCurrent());
+ return true;
+ }));
+
+ RtcpTransceiver rtcp_transceiver(config);
+ rtcp_transceiver.SendCompoundPacket();
+ WaitPostedTasks(&queue);
+}
+
+TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOnTaskQueue) {
+ SimulatedClock clock(0);
+ MockTransport outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ EXPECT_CALL(outgoing_transport, SendRtcp(_, _))
+ .WillRepeatedly(InvokeWithoutArgs([&] {
+ EXPECT_TRUE(queue.IsCurrent());
+ return true;
+ }));
+
+ std::unique_ptr<RtcpTransceiver> rtcp_transceiver;
+ queue.PostTask([&] {
+ rtcp_transceiver = std::make_unique<RtcpTransceiver>(config);
+ rtcp_transceiver->SendCompoundPacket();
+ });
+ WaitPostedTasks(&queue);
+}
+
+TEST(RtcpTransceiverTest, CanBeDestroyedOnTaskQueue) {
+ SimulatedClock clock(0);
+ NiceMock<MockTransport> outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ auto rtcp_transceiver = std::make_unique<RtcpTransceiver>(config);
+
+ queue.PostTask([&] {
+ // Insert a packet just before destruction to test for races.
+ rtcp_transceiver->SendCompoundPacket();
+ rtcp_transceiver.reset();
+ });
+ WaitPostedTasks(&queue);
+}
+
+TEST(RtcpTransceiverTest, CanBeDestroyedWithoutBlocking) {
+ SimulatedClock clock(0);
+ TaskQueueForTest queue("rtcp");
+ NiceMock<MockTransport> outgoing_transport;
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ auto* rtcp_transceiver = new RtcpTransceiver(config);
+ rtcp_transceiver->SendCompoundPacket();
+
+ rtc::Event done;
+ rtc::Event heavy_task;
+ queue.PostTask([&] {
+ EXPECT_TRUE(heavy_task.Wait(kTimeoutMs));
+ done.Set();
+ });
+ delete rtcp_transceiver;
+
+ heavy_task.Set();
+ EXPECT_TRUE(done.Wait(kTimeoutMs));
+}
+
+TEST(RtcpTransceiverTest, MaySendPacketsAfterDestructor) { // i.e. Be careful!
+ SimulatedClock clock(0);
+ NiceMock<MockTransport> outgoing_transport; // Must outlive queue below.
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ auto* rtcp_transceiver = new RtcpTransceiver(config);
+
+ rtc::Event heavy_task;
+ queue.PostTask([&] { EXPECT_TRUE(heavy_task.Wait(kTimeoutMs)); });
+ rtcp_transceiver->SendCompoundPacket();
+ delete rtcp_transceiver;
+
+ EXPECT_CALL(outgoing_transport, SendRtcp);
+ heavy_task.Set();
+
+ WaitPostedTasks(&queue);
+}
+
+// Use rtp timestamp to distinguish different incoming sender reports.
+rtc::CopyOnWriteBuffer CreateSenderReport(uint32_t ssrc, uint32_t rtp_time) {
+ webrtc::rtcp::SenderReport sr;
+ sr.SetSenderSsrc(ssrc);
+ sr.SetRtpTimestamp(rtp_time);
+ rtc::Buffer buffer = sr.Build();
+ // Switch to an efficient way creating CopyOnWriteBuffer from RtcpPacket when
+ // there is one. Until then do not worry about extra memcpy in test.
+ return rtc::CopyOnWriteBuffer(buffer.data(), buffer.size());
+}
+
+TEST(RtcpTransceiverTest, DoesntPostToRtcpObserverAfterCallToRemove) {
+ const uint32_t kRemoteSsrc = 1234;
+ SimulatedClock clock(0);
+ MockTransport null_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &null_transport;
+ config.task_queue = queue.Get();
+ RtcpTransceiver rtcp_transceiver(config);
+ rtc::Event observer_deleted;
+
+ auto observer = std::make_unique<MockMediaReceiverRtcpObserver>();
+ EXPECT_CALL(*observer, OnSenderReport(kRemoteSsrc, _, 1));
+ EXPECT_CALL(*observer, OnSenderReport(kRemoteSsrc, _, 2)).Times(0);
+
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, observer.get());
+ rtcp_transceiver.ReceivePacket(CreateSenderReport(kRemoteSsrc, 1));
+ rtcp_transceiver.RemoveMediaReceiverRtcpObserver(kRemoteSsrc, observer.get(),
+ /*on_removed=*/[&] {
+ observer.reset();
+ observer_deleted.Set();
+ });
+ rtcp_transceiver.ReceivePacket(CreateSenderReport(kRemoteSsrc, 2));
+
+ EXPECT_TRUE(observer_deleted.Wait(kTimeoutMs));
+ WaitPostedTasks(&queue);
+}
+
+TEST(RtcpTransceiverTest, RemoveMediaReceiverRtcpObserverIsNonBlocking) {
+ const uint32_t kRemoteSsrc = 1234;
+ SimulatedClock clock(0);
+ MockTransport null_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &null_transport;
+ config.task_queue = queue.Get();
+ RtcpTransceiver rtcp_transceiver(config);
+ auto observer = std::make_unique<MockMediaReceiverRtcpObserver>();
+ rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, observer.get());
+
+ rtc::Event queue_blocker;
+ rtc::Event observer_deleted;
+ queue.PostTask([&] { EXPECT_TRUE(queue_blocker.Wait(kTimeoutMs)); });
+ rtcp_transceiver.RemoveMediaReceiverRtcpObserver(kRemoteSsrc, observer.get(),
+ /*on_removed=*/[&] {
+ observer.reset();
+ observer_deleted.Set();
+ });
+
+ EXPECT_THAT(observer, Not(IsNull()));
+ queue_blocker.Set();
+ EXPECT_TRUE(observer_deleted.Wait(kTimeoutMs));
+}
+
+TEST(RtcpTransceiverTest, CanCallSendCompoundPacketFromAnyThread) {
+ SimulatedClock clock(0);
+ MockTransport outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+
+ EXPECT_CALL(outgoing_transport, SendRtcp(_, _))
+ // If test is slow, a periodic task may send an extra packet.
+ .Times(AtLeast(3))
+ .WillRepeatedly(InvokeWithoutArgs([&] {
+ EXPECT_TRUE(queue.IsCurrent());
+ return true;
+ }));
+
+ RtcpTransceiver rtcp_transceiver(config);
+
+ // Call from the construction thread.
+ rtcp_transceiver.SendCompoundPacket();
+ // Call from the same queue transceiver use for processing.
+ queue.PostTask([&] { rtcp_transceiver.SendCompoundPacket(); });
+ // Call from unrelated task queue.
+ TaskQueueForTest queue_send("send_packet");
+ queue_send.PostTask([&] { rtcp_transceiver.SendCompoundPacket(); });
+
+ WaitPostedTasks(&queue_send);
+ WaitPostedTasks(&queue);
+}
+
+TEST(RtcpTransceiverTest, DoesntSendPacketsAfterStopCallback) {
+ SimulatedClock clock(0);
+ NiceMock<MockTransport> outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ config.schedule_periodic_compound_packets = true;
+
+ auto rtcp_transceiver = std::make_unique<RtcpTransceiver>(config);
+ rtc::Event done;
+ rtcp_transceiver->SendCompoundPacket();
+ rtcp_transceiver->Stop([&] {
+ EXPECT_CALL(outgoing_transport, SendRtcp).Times(0);
+ done.Set();
+ });
+ rtcp_transceiver = nullptr;
+ EXPECT_TRUE(done.Wait(kTimeoutMs));
+}
+
+TEST(RtcpTransceiverTest, SendsCombinedRtcpPacketOnTaskQueue) {
+ static constexpr uint32_t kSenderSsrc = 12345;
+
+ SimulatedClock clock(0);
+ MockTransport outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.feedback_ssrc = kSenderSsrc;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiver rtcp_transceiver(config);
+
+ EXPECT_CALL(outgoing_transport, SendRtcp)
+ .WillOnce([&](const uint8_t* buffer, size_t size) {
+ EXPECT_TRUE(queue.IsCurrent());
+ RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(buffer, size);
+ EXPECT_EQ(rtcp_parser.transport_feedback()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.transport_feedback()->sender_ssrc(), kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.app()->num_packets(), 1);
+ EXPECT_EQ(rtcp_parser.app()->sender_ssrc(), kSenderSsrc);
+ return true;
+ });
+
+ // Create minimalistic transport feedback packet.
+ std::vector<std::unique_ptr<RtcpPacket>> packets;
+ auto transport_feedback = std::make_unique<TransportFeedback>();
+ transport_feedback->AddReceivedPacket(321, Timestamp::Millis(10));
+ packets.push_back(std::move(transport_feedback));
+
+ auto remote_estimate = std::make_unique<RemoteEstimate>();
+ packets.push_back(std::move(remote_estimate));
+
+ rtcp_transceiver.SendCombinedRtcpPacket(std::move(packets));
+ WaitPostedTasks(&queue);
+}
+
+TEST(RtcpTransceiverTest, SendFrameIntraRequestDefaultsToNewRequest) {
+ static constexpr uint32_t kSenderSsrc = 12345;
+
+ SimulatedClock clock(0);
+ MockTransport outgoing_transport;
+ TaskQueueForTest queue("rtcp");
+ RtcpTransceiverConfig config;
+ config.clock = &clock;
+ config.feedback_ssrc = kSenderSsrc;
+ config.outgoing_transport = &outgoing_transport;
+ config.task_queue = queue.Get();
+ config.schedule_periodic_compound_packets = false;
+ RtcpTransceiver rtcp_transceiver(config);
+
+ uint8_t first_seq_nr;
+ EXPECT_CALL(outgoing_transport, SendRtcp)
+ .WillOnce([&](const uint8_t* buffer, size_t size) {
+ EXPECT_TRUE(queue.IsCurrent());
+ RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(buffer, size);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kSenderSsrc);
+ first_seq_nr = rtcp_parser.fir()->requests()[0].seq_nr;
+ return true;
+ })
+ .WillOnce([&](const uint8_t* buffer, size_t size) {
+ EXPECT_TRUE(queue.IsCurrent());
+ RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(buffer, size);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].ssrc, kSenderSsrc);
+ EXPECT_EQ(rtcp_parser.fir()->requests()[0].seq_nr, first_seq_nr + 1);
+ return true;
+ });
+
+ // Send 2 FIR packets because the sequence numbers are incremented after,
+ // sending. One wouldn't be able to differentiate the new_request.
+ rtcp_transceiver.SendFullIntraRequest({kSenderSsrc});
+ rtcp_transceiver.SendFullIntraRequest({kSenderSsrc});
+
+ WaitPostedTasks(&queue);
+}
+
+} // namespace
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc
new file mode 100644
index 0000000000..fd42b798d4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+
+#include <bitset>
+#include <cstdint>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h"
+#include "rtc_base/numerics/divide_round.h"
+
+namespace webrtc {
+
+constexpr RTPExtensionType RtpDependencyDescriptorExtension::kId;
+constexpr std::bitset<32> RtpDependencyDescriptorExtension::kAllChainsAreActive;
+
+bool RtpDependencyDescriptorExtension::Parse(
+ rtc::ArrayView<const uint8_t> data,
+ const FrameDependencyStructure* structure,
+ DependencyDescriptor* descriptor) {
+ RtpDependencyDescriptorReader reader(data, structure, descriptor);
+ return reader.ParseSuccessful();
+}
+
+size_t RtpDependencyDescriptorExtension::ValueSize(
+ const FrameDependencyStructure& structure,
+ std::bitset<32> active_chains,
+ const DependencyDescriptor& descriptor) {
+ RtpDependencyDescriptorWriter writer(/*data=*/{}, structure, active_chains,
+ descriptor);
+ return DivideRoundUp(writer.ValueSizeBits(), 8);
+}
+
+bool RtpDependencyDescriptorExtension::Write(
+ rtc::ArrayView<uint8_t> data,
+ const FrameDependencyStructure& structure,
+ std::bitset<32> active_chains,
+ const DependencyDescriptor& descriptor) {
+ RtpDependencyDescriptorWriter writer(data, structure, active_chains,
+ descriptor);
+ return writer.Write();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h
new file mode 100644
index 0000000000..a2aedb8d4f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_EXTENSION_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_EXTENSION_H_
+
+#include <bitset>
+#include <cstdint>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/rtp_parameters.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+// Trait to read/write the dependency descriptor extension as described in
+// https://aomediacodec.github.io/av1-rtp-spec/#dependency-descriptor-rtp-header-extension
+// While the format is still in design, the code might change without backward
+// compatibility.
+class RtpDependencyDescriptorExtension {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionGenericFrameDescriptor02;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kDependencyDescriptorUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ const FrameDependencyStructure* structure,
+ DependencyDescriptor* descriptor);
+
+ static size_t ValueSize(const FrameDependencyStructure& structure,
+ const DependencyDescriptor& descriptor) {
+ return ValueSize(structure, kAllChainsAreActive, descriptor);
+ }
+ static size_t ValueSize(const FrameDependencyStructure& structure,
+ std::bitset<32> active_chains,
+ const DependencyDescriptor& descriptor);
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const FrameDependencyStructure& structure,
+ const DependencyDescriptor& descriptor) {
+ return Write(data, structure, kAllChainsAreActive, descriptor);
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const FrameDependencyStructure& structure,
+ std::bitset<32> active_chains,
+ const DependencyDescriptor& descriptor);
+
+ private:
+ static constexpr std::bitset<32> kAllChainsAreActive = ~uint32_t{0};
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_EXTENSION_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc
new file mode 100644
index 0000000000..974557ce6e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+
+TEST(RtpDependencyDescriptorExtensionTest, Writer3BytesForPerfectTemplate) {
+ uint8_t buffer[3];
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.templates = {
+ FrameDependencyTemplate().Dtis("SR").FrameDiffs({1}).ChainDiffs({2, 2})};
+ DependencyDescriptor descriptor;
+ descriptor.frame_dependencies = structure.templates[0];
+
+ EXPECT_EQ(RtpDependencyDescriptorExtension::ValueSize(structure, descriptor),
+ 3u);
+ EXPECT_TRUE(
+ RtpDependencyDescriptorExtension::Write(buffer, structure, descriptor));
+}
+
+TEST(RtpDependencyDescriptorExtensionTest, WriteZeroInUnusedBits) {
+ uint8_t buffer[32];
+ std::memset(buffer, 0xff, sizeof(buffer));
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.templates = {
+ FrameDependencyTemplate().Dtis("SR").FrameDiffs({1}).ChainDiffs({1, 1})};
+ DependencyDescriptor descriptor;
+ descriptor.frame_dependencies = structure.templates[0];
+ descriptor.frame_dependencies.frame_diffs = {2};
+
+ // To test unused bytes are zeroed, need a buffer large enough.
+ size_t value_size =
+ RtpDependencyDescriptorExtension::ValueSize(structure, descriptor);
+ ASSERT_LT(value_size, sizeof(buffer));
+
+ ASSERT_TRUE(
+ RtpDependencyDescriptorExtension::Write(buffer, structure, descriptor));
+
+ const uint8_t* unused_bytes = buffer + value_size;
+ size_t num_unused_bytes = buffer + sizeof(buffer) - unused_bytes;
+ // Check remaining bytes are zeroed.
+ EXPECT_THAT(rtc::MakeArrayView(unused_bytes, num_unused_bytes), Each(0));
+}
+
+// In practice chain diff for inactive chain will grow uboundly because no
+// frames are produced for it, that shouldn't block writing the extension.
+TEST(RtpDependencyDescriptorExtensionTest,
+ TemplateMatchingSkipsInactiveChains) {
+ uint8_t buffer[3];
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.templates = {
+ FrameDependencyTemplate().Dtis("SR").ChainDiffs({2, 2})};
+ DependencyDescriptor descriptor;
+ descriptor.frame_dependencies = structure.templates[0];
+
+ // Set only 1st chain as active.
+ std::bitset<32> active_chains = 0b01;
+ descriptor.frame_dependencies.chain_diffs[1] = 1000;
+
+ // Expect perfect template match since the only difference is for an inactive
+ // chain. Pefect template match consumes 3 bytes.
+ EXPECT_EQ(RtpDependencyDescriptorExtension::ValueSize(
+ structure, active_chains, descriptor),
+ 3u);
+ EXPECT_TRUE(RtpDependencyDescriptorExtension::Write(
+ buffer, structure, active_chains, descriptor));
+}
+
+TEST(RtpDependencyDescriptorExtensionTest,
+ AcceptsInvalidChainDiffForInactiveChainWhenChainsAreCustom) {
+ uint8_t buffer[256];
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.templates = {
+ FrameDependencyTemplate().Dtis("SR").ChainDiffs({2, 2})};
+ DependencyDescriptor descriptor;
+ descriptor.frame_dependencies = structure.templates[0];
+
+ // Set only 1st chain as active.
+ std::bitset<32> active_chains = 0b01;
+ // Set chain_diff different to the template to make it custom.
+ descriptor.frame_dependencies.chain_diffs[0] = 1;
+ // Set chain diff for inactive chain beyound limit of 255 max chain diff.
+ descriptor.frame_dependencies.chain_diffs[1] = 1000;
+
+ // Because chains are custom, should use more than base 3 bytes.
+ EXPECT_GT(RtpDependencyDescriptorExtension::ValueSize(
+ structure, active_chains, descriptor),
+ 3u);
+ EXPECT_TRUE(RtpDependencyDescriptorExtension::Write(
+ buffer, structure, active_chains, descriptor));
+}
+
+TEST(RtpDependencyDescriptorExtensionTest, FailsToWriteInvalidDescriptor) {
+ uint8_t buffer[256];
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.templates = {
+ FrameDependencyTemplate().T(0).Dtis("SR").ChainDiffs({2, 2})};
+ DependencyDescriptor descriptor;
+ descriptor.frame_dependencies = structure.templates[0];
+ descriptor.frame_dependencies.temporal_id = 1;
+
+ EXPECT_EQ(
+ RtpDependencyDescriptorExtension::ValueSize(structure, 0b11, descriptor),
+ 0u);
+ EXPECT_FALSE(RtpDependencyDescriptorExtension::Write(buffer, structure, 0b11,
+ descriptor));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc
new file mode 100644
index 0000000000..1a56efd9b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/bitstream_reader.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+RtpDependencyDescriptorReader::RtpDependencyDescriptorReader(
+ rtc::ArrayView<const uint8_t> raw_data,
+ const FrameDependencyStructure* structure,
+ DependencyDescriptor* descriptor)
+ : descriptor_(descriptor), buffer_(raw_data) {
+ RTC_DCHECK(descriptor);
+
+ ReadMandatoryFields();
+ if (raw_data.size() > 3)
+ ReadExtendedFields();
+
+ structure_ = descriptor->attached_structure
+ ? descriptor->attached_structure.get()
+ : structure;
+ if (structure_ == nullptr) {
+ buffer_.Invalidate();
+ return;
+ }
+ if (active_decode_targets_present_flag_) {
+ descriptor->active_decode_targets_bitmask =
+ buffer_.ReadBits(structure_->num_decode_targets);
+ }
+
+ ReadFrameDependencyDefinition();
+}
+
+void RtpDependencyDescriptorReader::ReadTemplateDependencyStructure() {
+ descriptor_->attached_structure =
+ std::make_unique<FrameDependencyStructure>();
+ descriptor_->attached_structure->structure_id = buffer_.ReadBits(6);
+ descriptor_->attached_structure->num_decode_targets = buffer_.ReadBits(5) + 1;
+
+ ReadTemplateLayers();
+ ReadTemplateDtis();
+ ReadTemplateFdiffs();
+ ReadTemplateChains();
+
+ if (buffer_.Read<bool>())
+ ReadResolutions();
+}
+
+void RtpDependencyDescriptorReader::ReadTemplateLayers() {
+ enum NextLayerIdc {
+ kSameLayer = 0,
+ kNextTemporalLayer = 1,
+ kNextSpatialLayer = 2,
+ kNoMoreTemplates = 3,
+ };
+ std::vector<FrameDependencyTemplate> templates;
+
+ int temporal_id = 0;
+ int spatial_id = 0;
+ NextLayerIdc next_layer_idc;
+ do {
+ if (templates.size() == DependencyDescriptor::kMaxTemplates) {
+ buffer_.Invalidate();
+ break;
+ }
+ templates.emplace_back();
+ FrameDependencyTemplate& last_template = templates.back();
+ last_template.temporal_id = temporal_id;
+ last_template.spatial_id = spatial_id;
+
+ next_layer_idc = static_cast<NextLayerIdc>(buffer_.ReadBits(2));
+ if (next_layer_idc == kNextTemporalLayer) {
+ temporal_id++;
+ if (temporal_id >= DependencyDescriptor::kMaxTemporalIds) {
+ buffer_.Invalidate();
+ break;
+ }
+ } else if (next_layer_idc == kNextSpatialLayer) {
+ temporal_id = 0;
+ spatial_id++;
+ if (spatial_id >= DependencyDescriptor::kMaxSpatialIds) {
+ buffer_.Invalidate();
+ break;
+ }
+ }
+ } while (next_layer_idc != kNoMoreTemplates && buffer_.Ok());
+
+ descriptor_->attached_structure->templates = std::move(templates);
+}
+
+void RtpDependencyDescriptorReader::ReadTemplateDtis() {
+ FrameDependencyStructure* structure = descriptor_->attached_structure.get();
+ for (FrameDependencyTemplate& current_template : structure->templates) {
+ current_template.decode_target_indications.resize(
+ structure->num_decode_targets);
+ for (int i = 0; i < structure->num_decode_targets; ++i) {
+ current_template.decode_target_indications[i] =
+ static_cast<DecodeTargetIndication>(buffer_.ReadBits(2));
+ }
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadTemplateFdiffs() {
+ for (FrameDependencyTemplate& current_template :
+ descriptor_->attached_structure->templates) {
+ for (bool fdiff_follows = buffer_.Read<bool>(); fdiff_follows;
+ fdiff_follows = buffer_.Read<bool>()) {
+ uint64_t fdiff_minus_one = buffer_.ReadBits(4);
+ current_template.frame_diffs.push_back(fdiff_minus_one + 1);
+ }
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadTemplateChains() {
+ FrameDependencyStructure* structure = descriptor_->attached_structure.get();
+ structure->num_chains =
+ buffer_.ReadNonSymmetric(structure->num_decode_targets + 1);
+ if (structure->num_chains == 0)
+ return;
+ for (int i = 0; i < structure->num_decode_targets; ++i) {
+ uint32_t protected_by_chain =
+ buffer_.ReadNonSymmetric(structure->num_chains);
+ structure->decode_target_protected_by_chain.push_back(protected_by_chain);
+ }
+ for (FrameDependencyTemplate& frame_template : structure->templates) {
+ for (int chain_id = 0; chain_id < structure->num_chains; ++chain_id) {
+ frame_template.chain_diffs.push_back(buffer_.ReadBits(4));
+ }
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadResolutions() {
+ FrameDependencyStructure* structure = descriptor_->attached_structure.get();
+ // The way templates are bitpacked, they are always ordered by spatial_id.
+ int spatial_layers = structure->templates.back().spatial_id + 1;
+ structure->resolutions.reserve(spatial_layers);
+ for (int sid = 0; sid < spatial_layers; ++sid) {
+ uint16_t width_minus_1 = buffer_.Read<uint16_t>();
+ uint16_t height_minus_1 = buffer_.Read<uint16_t>();
+ structure->resolutions.emplace_back(width_minus_1 + 1, height_minus_1 + 1);
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadMandatoryFields() {
+ descriptor_->first_packet_in_frame = buffer_.Read<bool>();
+ descriptor_->last_packet_in_frame = buffer_.Read<bool>();
+ frame_dependency_template_id_ = buffer_.ReadBits(6);
+ descriptor_->frame_number = buffer_.Read<uint16_t>();
+}
+
+void RtpDependencyDescriptorReader::ReadExtendedFields() {
+ bool template_dependency_structure_present_flag = buffer_.Read<bool>();
+ active_decode_targets_present_flag_ = buffer_.Read<bool>();
+ custom_dtis_flag_ = buffer_.Read<bool>();
+ custom_fdiffs_flag_ = buffer_.Read<bool>();
+ custom_chains_flag_ = buffer_.Read<bool>();
+ if (template_dependency_structure_present_flag) {
+ ReadTemplateDependencyStructure();
+ RTC_DCHECK(descriptor_->attached_structure);
+ descriptor_->active_decode_targets_bitmask =
+ (uint64_t{1} << descriptor_->attached_structure->num_decode_targets) -
+ 1;
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadFrameDependencyDefinition() {
+ size_t template_index =
+ (frame_dependency_template_id_ + DependencyDescriptor::kMaxTemplates -
+ structure_->structure_id) %
+ DependencyDescriptor::kMaxTemplates;
+
+ if (template_index >= structure_->templates.size()) {
+ buffer_.Invalidate();
+ return;
+ }
+
+ // Copy all the fields from the matching template
+ descriptor_->frame_dependencies = structure_->templates[template_index];
+
+ if (custom_dtis_flag_)
+ ReadFrameDtis();
+ if (custom_fdiffs_flag_)
+ ReadFrameFdiffs();
+ if (custom_chains_flag_)
+ ReadFrameChains();
+
+ if (structure_->resolutions.empty()) {
+ descriptor_->resolution = absl::nullopt;
+ } else {
+ // Format guarantees that if there were resolutions in the last structure,
+ // then each spatial layer got one.
+ RTC_DCHECK_LE(descriptor_->frame_dependencies.spatial_id,
+ structure_->resolutions.size());
+ descriptor_->resolution =
+ structure_->resolutions[descriptor_->frame_dependencies.spatial_id];
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadFrameDtis() {
+ RTC_DCHECK_EQ(
+ descriptor_->frame_dependencies.decode_target_indications.size(),
+ structure_->num_decode_targets);
+ for (auto& dti : descriptor_->frame_dependencies.decode_target_indications) {
+ dti = static_cast<DecodeTargetIndication>(buffer_.ReadBits(2));
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadFrameFdiffs() {
+ descriptor_->frame_dependencies.frame_diffs.clear();
+ for (uint64_t next_fdiff_size = buffer_.ReadBits(2); next_fdiff_size > 0;
+ next_fdiff_size = buffer_.ReadBits(2)) {
+ uint64_t fdiff_minus_one = buffer_.ReadBits(4 * next_fdiff_size);
+ descriptor_->frame_dependencies.frame_diffs.push_back(fdiff_minus_one + 1);
+ }
+}
+
+void RtpDependencyDescriptorReader::ReadFrameChains() {
+ RTC_DCHECK_EQ(descriptor_->frame_dependencies.chain_diffs.size(),
+ structure_->num_chains);
+ for (auto& chain_diff : descriptor_->frame_dependencies.chain_diffs) {
+ chain_diff = buffer_.Read<uint8_t>();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.h
new file mode 100644
index 0000000000..f79d3d1d07
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_READER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_READER_H_
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/bitstream_reader.h"
+
+namespace webrtc {
+// Deserializes DependencyDescriptor rtp header extension.
+class RtpDependencyDescriptorReader {
+ public:
+ // Parses the dependency descriptor.
+ RtpDependencyDescriptorReader(rtc::ArrayView<const uint8_t> raw_data,
+ const FrameDependencyStructure* structure,
+ DependencyDescriptor* descriptor);
+ RtpDependencyDescriptorReader(const RtpDependencyDescriptorReader&) = delete;
+ RtpDependencyDescriptorReader& operator=(
+ const RtpDependencyDescriptorReader&) = delete;
+
+ // Returns true if parse was successful.
+ bool ParseSuccessful() { return buffer_.Ok(); }
+
+ private:
+ // Functions to read template dependency structure.
+ void ReadTemplateDependencyStructure();
+ void ReadTemplateLayers();
+ void ReadTemplateDtis();
+ void ReadTemplateFdiffs();
+ void ReadTemplateChains();
+ void ReadResolutions();
+
+ // Function to read details for the current frame.
+ void ReadMandatoryFields();
+ void ReadExtendedFields();
+ void ReadFrameDependencyDefinition();
+
+ void ReadFrameDtis();
+ void ReadFrameFdiffs();
+ void ReadFrameChains();
+
+ // Output.
+ DependencyDescriptor* const descriptor_;
+ // Values that are needed while reading the descriptor, but can be discarded
+ // when reading is complete.
+ BitstreamReader buffer_;
+ int frame_dependency_template_id_ = 0;
+ bool active_decode_targets_present_flag_ = false;
+ bool custom_dtis_flag_ = false;
+ bool custom_fdiffs_flag_ = false;
+ bool custom_chains_flag_ = false;
+ const FrameDependencyStructure* structure_ = nullptr;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_READER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc
new file mode 100644
index 0000000000..31df783064
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h"
+
+#include <bitset>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/bit_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+enum class NextLayerIdc : uint64_t {
+ kSameLayer = 0,
+ kNextTemporal = 1,
+ kNewSpatial = 2,
+ kNoMoreLayers = 3,
+ kInvalid = 4
+};
+
+NextLayerIdc GetNextLayerIdc(const FrameDependencyTemplate& previous,
+ const FrameDependencyTemplate& next) {
+ RTC_DCHECK_LT(next.spatial_id, DependencyDescriptor::kMaxSpatialIds);
+ RTC_DCHECK_LT(next.temporal_id, DependencyDescriptor::kMaxTemporalIds);
+
+ if (next.spatial_id == previous.spatial_id &&
+ next.temporal_id == previous.temporal_id) {
+ return NextLayerIdc::kSameLayer;
+ } else if (next.spatial_id == previous.spatial_id &&
+ next.temporal_id == previous.temporal_id + 1) {
+ return NextLayerIdc::kNextTemporal;
+ } else if (next.spatial_id == previous.spatial_id + 1 &&
+ next.temporal_id == 0) {
+ return NextLayerIdc::kNewSpatial;
+ }
+ // Everything else is unsupported.
+ return NextLayerIdc::kInvalid;
+}
+
+} // namespace
+
+RtpDependencyDescriptorWriter::RtpDependencyDescriptorWriter(
+ rtc::ArrayView<uint8_t> data,
+ const FrameDependencyStructure& structure,
+ std::bitset<32> active_chains,
+ const DependencyDescriptor& descriptor)
+ : descriptor_(descriptor),
+ structure_(structure),
+ active_chains_(active_chains),
+ bit_writer_(data.data(), data.size()) {
+ FindBestTemplate();
+}
+
+bool RtpDependencyDescriptorWriter::Write() {
+ if (build_failed_) {
+ return false;
+ }
+ WriteMandatoryFields();
+ if (HasExtendedFields()) {
+ WriteExtendedFields();
+ WriteFrameDependencyDefinition();
+ }
+ size_t remaining_bits = bit_writer_.RemainingBitCount();
+ // Zero remaining memory to avoid leaving it uninitialized.
+ if (remaining_bits % 64 != 0) {
+ WriteBits(/*val=*/0, remaining_bits % 64);
+ }
+ for (size_t i = 0; i < remaining_bits / 64; ++i) {
+ WriteBits(/*val=*/0, 64);
+ }
+ return !build_failed_;
+}
+
+int RtpDependencyDescriptorWriter::ValueSizeBits() const {
+ if (build_failed_) {
+ return 0;
+ }
+ static constexpr int kMandatoryFields = 1 + 1 + 6 + 16;
+ int value_size_bits = kMandatoryFields + best_template_.extra_size_bits;
+ if (HasExtendedFields()) {
+ value_size_bits += 5;
+ if (descriptor_.attached_structure)
+ value_size_bits += StructureSizeBits();
+ if (ShouldWriteActiveDecodeTargetsBitmask())
+ value_size_bits += structure_.num_decode_targets;
+ }
+ return value_size_bits;
+}
+
+int RtpDependencyDescriptorWriter::StructureSizeBits() const {
+ // template_id offset (6 bits) and number of decode targets (5 bits)
+ int bits = 11;
+ // template layers.
+ bits += 2 * structure_.templates.size();
+ // dtis.
+ bits += 2 * structure_.templates.size() * structure_.num_decode_targets;
+ // fdiffs. each templates uses 1 + 5 * sizeof(fdiff) bits.
+ bits += structure_.templates.size();
+ for (const FrameDependencyTemplate& frame_template : structure_.templates) {
+ bits += 5 * frame_template.frame_diffs.size();
+ }
+ bits += rtc::BitBufferWriter::SizeNonSymmetricBits(
+ structure_.num_chains, structure_.num_decode_targets + 1);
+ if (structure_.num_chains > 0) {
+ for (int protected_by : structure_.decode_target_protected_by_chain) {
+ bits += rtc::BitBufferWriter::SizeNonSymmetricBits(protected_by,
+ structure_.num_chains);
+ }
+ bits += 4 * structure_.templates.size() * structure_.num_chains;
+ }
+ // Resolutions.
+ bits += 1 + 32 * structure_.resolutions.size();
+ return bits;
+}
+
+RtpDependencyDescriptorWriter::TemplateMatch
+RtpDependencyDescriptorWriter::CalculateMatch(
+ TemplateIterator frame_template) const {
+ TemplateMatch result;
+ result.template_position = frame_template;
+ result.need_custom_fdiffs =
+ descriptor_.frame_dependencies.frame_diffs != frame_template->frame_diffs;
+ result.need_custom_dtis =
+ descriptor_.frame_dependencies.decode_target_indications !=
+ frame_template->decode_target_indications;
+ result.need_custom_chains = false;
+ for (int i = 0; i < structure_.num_chains; ++i) {
+ if (active_chains_[i] && descriptor_.frame_dependencies.chain_diffs[i] !=
+ frame_template->chain_diffs[i]) {
+ result.need_custom_chains = true;
+ break;
+ }
+ }
+
+ result.extra_size_bits = 0;
+ if (result.need_custom_fdiffs) {
+ result.extra_size_bits +=
+ 2 * (1 + descriptor_.frame_dependencies.frame_diffs.size());
+ for (int fdiff : descriptor_.frame_dependencies.frame_diffs) {
+ if (fdiff <= (1 << 4))
+ result.extra_size_bits += 4;
+ else if (fdiff <= (1 << 8))
+ result.extra_size_bits += 8;
+ else
+ result.extra_size_bits += 12;
+ }
+ }
+ if (result.need_custom_dtis) {
+ result.extra_size_bits +=
+ 2 * descriptor_.frame_dependencies.decode_target_indications.size();
+ }
+ if (result.need_custom_chains)
+ result.extra_size_bits += 8 * structure_.num_chains;
+ return result;
+}
+
+void RtpDependencyDescriptorWriter::FindBestTemplate() {
+ const std::vector<FrameDependencyTemplate>& templates = structure_.templates;
+ // Find range of templates with matching spatial/temporal id.
+ auto same_layer = [&](const FrameDependencyTemplate& frame_template) {
+ return descriptor_.frame_dependencies.spatial_id ==
+ frame_template.spatial_id &&
+ descriptor_.frame_dependencies.temporal_id ==
+ frame_template.temporal_id;
+ };
+ auto first = absl::c_find_if(templates, same_layer);
+ if (first == templates.end()) {
+ build_failed_ = true;
+ return;
+ }
+ auto last = std::find_if_not(first, templates.end(), same_layer);
+
+ best_template_ = CalculateMatch(first);
+ // Search if there any better template than the first one.
+ for (auto next = std::next(first); next != last; ++next) {
+ TemplateMatch match = CalculateMatch(next);
+ if (match.extra_size_bits < best_template_.extra_size_bits)
+ best_template_ = match;
+ }
+}
+
+bool RtpDependencyDescriptorWriter::ShouldWriteActiveDecodeTargetsBitmask()
+ const {
+ if (!descriptor_.active_decode_targets_bitmask)
+ return false;
+ const uint64_t all_decode_targets_bitmask =
+ (uint64_t{1} << structure_.num_decode_targets) - 1;
+ if (descriptor_.attached_structure &&
+ descriptor_.active_decode_targets_bitmask == all_decode_targets_bitmask)
+ return false;
+ return true;
+}
+
+bool RtpDependencyDescriptorWriter::HasExtendedFields() const {
+ return best_template_.extra_size_bits > 0 || descriptor_.attached_structure ||
+ descriptor_.active_decode_targets_bitmask;
+}
+
+uint64_t RtpDependencyDescriptorWriter::TemplateId() const {
+ return (best_template_.template_position - structure_.templates.begin() +
+ structure_.structure_id) %
+ DependencyDescriptor::kMaxTemplates;
+}
+
+void RtpDependencyDescriptorWriter::WriteBits(uint64_t val, size_t bit_count) {
+ if (!bit_writer_.WriteBits(val, bit_count))
+ build_failed_ = true;
+}
+
+void RtpDependencyDescriptorWriter::WriteNonSymmetric(uint32_t value,
+ uint32_t num_values) {
+ if (!bit_writer_.WriteNonSymmetric(value, num_values))
+ build_failed_ = true;
+}
+
+void RtpDependencyDescriptorWriter::WriteTemplateDependencyStructure() {
+ RTC_DCHECK_GE(structure_.structure_id, 0);
+ RTC_DCHECK_LT(structure_.structure_id, DependencyDescriptor::kMaxTemplates);
+ RTC_DCHECK_GT(structure_.num_decode_targets, 0);
+ RTC_DCHECK_LE(structure_.num_decode_targets,
+ DependencyDescriptor::kMaxDecodeTargets);
+
+ WriteBits(structure_.structure_id, 6);
+ WriteBits(structure_.num_decode_targets - 1, 5);
+ WriteTemplateLayers();
+ WriteTemplateDtis();
+ WriteTemplateFdiffs();
+ WriteTemplateChains();
+ uint64_t has_resolutions = structure_.resolutions.empty() ? 0 : 1;
+ WriteBits(has_resolutions, 1);
+ if (has_resolutions)
+ WriteResolutions();
+}
+
+void RtpDependencyDescriptorWriter::WriteTemplateLayers() {
+ const auto& templates = structure_.templates;
+ RTC_DCHECK(!templates.empty());
+ RTC_DCHECK_LE(templates.size(), DependencyDescriptor::kMaxTemplates);
+ RTC_DCHECK_EQ(templates[0].spatial_id, 0);
+ RTC_DCHECK_EQ(templates[0].temporal_id, 0);
+
+ for (size_t i = 1; i < templates.size(); ++i) {
+ uint64_t next_layer_idc =
+ static_cast<uint64_t>(GetNextLayerIdc(templates[i - 1], templates[i]));
+ RTC_DCHECK_LE(next_layer_idc, 3);
+ WriteBits(next_layer_idc, 2);
+ }
+ WriteBits(static_cast<uint64_t>(NextLayerIdc::kNoMoreLayers), 2);
+}
+
+void RtpDependencyDescriptorWriter::WriteTemplateDtis() {
+ for (const FrameDependencyTemplate& current_template : structure_.templates) {
+ RTC_DCHECK_EQ(current_template.decode_target_indications.size(),
+ structure_.num_decode_targets);
+ for (DecodeTargetIndication dti :
+ current_template.decode_target_indications) {
+ WriteBits(static_cast<uint32_t>(dti), 2);
+ }
+ }
+}
+
+void RtpDependencyDescriptorWriter::WriteTemplateFdiffs() {
+ for (const FrameDependencyTemplate& current_template : structure_.templates) {
+ for (int fdiff : current_template.frame_diffs) {
+ RTC_DCHECK_GE(fdiff - 1, 0);
+ RTC_DCHECK_LT(fdiff - 1, 1 << 4);
+ WriteBits((1u << 4) | (fdiff - 1), 1 + 4);
+ }
+ // No more diffs for current template.
+ WriteBits(/*val=*/0, /*bit_count=*/1);
+ }
+}
+
+void RtpDependencyDescriptorWriter::WriteTemplateChains() {
+ RTC_DCHECK_GE(structure_.num_chains, 0);
+ RTC_DCHECK_LE(structure_.num_chains, structure_.num_decode_targets);
+
+ WriteNonSymmetric(structure_.num_chains, structure_.num_decode_targets + 1);
+ if (structure_.num_chains == 0)
+ return;
+
+ RTC_DCHECK_EQ(structure_.decode_target_protected_by_chain.size(),
+ structure_.num_decode_targets);
+ for (int protected_by : structure_.decode_target_protected_by_chain) {
+ RTC_DCHECK_GE(protected_by, 0);
+ RTC_DCHECK_LT(protected_by, structure_.num_chains);
+ WriteNonSymmetric(protected_by, structure_.num_chains);
+ }
+ for (const auto& frame_template : structure_.templates) {
+ RTC_DCHECK_EQ(frame_template.chain_diffs.size(), structure_.num_chains);
+ for (int chain_diff : frame_template.chain_diffs) {
+ RTC_DCHECK_GE(chain_diff, 0);
+ RTC_DCHECK_LT(chain_diff, 1 << 4);
+ WriteBits(chain_diff, 4);
+ }
+ }
+}
+
+void RtpDependencyDescriptorWriter::WriteResolutions() {
+ int max_spatial_id = structure_.templates.back().spatial_id;
+ RTC_DCHECK_EQ(structure_.resolutions.size(), max_spatial_id + 1);
+ for (const RenderResolution& resolution : structure_.resolutions) {
+ RTC_DCHECK_GT(resolution.Width(), 0);
+ RTC_DCHECK_LE(resolution.Width(), 1 << 16);
+ RTC_DCHECK_GT(resolution.Height(), 0);
+ RTC_DCHECK_LE(resolution.Height(), 1 << 16);
+
+ WriteBits(resolution.Width() - 1, 16);
+ WriteBits(resolution.Height() - 1, 16);
+ }
+}
+
+void RtpDependencyDescriptorWriter::WriteMandatoryFields() {
+ WriteBits(descriptor_.first_packet_in_frame, 1);
+ WriteBits(descriptor_.last_packet_in_frame, 1);
+ WriteBits(TemplateId(), 6);
+ WriteBits(descriptor_.frame_number, 16);
+}
+
+void RtpDependencyDescriptorWriter::WriteExtendedFields() {
+ uint64_t template_dependency_structure_present_flag =
+ descriptor_.attached_structure ? 1u : 0u;
+ WriteBits(template_dependency_structure_present_flag, 1);
+ uint64_t active_decode_targets_present_flag =
+ ShouldWriteActiveDecodeTargetsBitmask() ? 1u : 0u;
+ WriteBits(active_decode_targets_present_flag, 1);
+ WriteBits(best_template_.need_custom_dtis, 1);
+ WriteBits(best_template_.need_custom_fdiffs, 1);
+ WriteBits(best_template_.need_custom_chains, 1);
+ if (template_dependency_structure_present_flag)
+ WriteTemplateDependencyStructure();
+ if (active_decode_targets_present_flag)
+ WriteBits(*descriptor_.active_decode_targets_bitmask,
+ structure_.num_decode_targets);
+}
+
+void RtpDependencyDescriptorWriter::WriteFrameDependencyDefinition() {
+ if (best_template_.need_custom_dtis)
+ WriteFrameDtis();
+ if (best_template_.need_custom_fdiffs)
+ WriteFrameFdiffs();
+ if (best_template_.need_custom_chains)
+ WriteFrameChains();
+}
+
+void RtpDependencyDescriptorWriter::WriteFrameDtis() {
+ RTC_DCHECK_EQ(descriptor_.frame_dependencies.decode_target_indications.size(),
+ structure_.num_decode_targets);
+ for (DecodeTargetIndication dti :
+ descriptor_.frame_dependencies.decode_target_indications) {
+ WriteBits(static_cast<uint32_t>(dti), 2);
+ }
+}
+
+void RtpDependencyDescriptorWriter::WriteFrameFdiffs() {
+ for (int fdiff : descriptor_.frame_dependencies.frame_diffs) {
+ RTC_DCHECK_GT(fdiff, 0);
+ RTC_DCHECK_LE(fdiff, 1 << 12);
+ if (fdiff <= (1 << 4))
+ WriteBits((1u << 4) | (fdiff - 1), 2 + 4);
+ else if (fdiff <= (1 << 8))
+ WriteBits((2u << 8) | (fdiff - 1), 2 + 8);
+ else // fdiff <= (1 << 12)
+ WriteBits((3u << 12) | (fdiff - 1), 2 + 12);
+ }
+ // No more diffs.
+ WriteBits(/*val=*/0, /*bit_count=*/2);
+}
+
+void RtpDependencyDescriptorWriter::WriteFrameChains() {
+ RTC_DCHECK_EQ(descriptor_.frame_dependencies.chain_diffs.size(),
+ structure_.num_chains);
+ for (int i = 0; i < structure_.num_chains; ++i) {
+ int chain_diff =
+ active_chains_[i] ? descriptor_.frame_dependencies.chain_diffs[i] : 0;
+ RTC_DCHECK_GE(chain_diff, 0);
+ RTC_DCHECK_LT(chain_diff, 1 << 8);
+ WriteBits(chain_diff, 8);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h
new file mode 100644
index 0000000000..568e0a8aab
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_WRITER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_WRITER_H_
+
+#include <bitset>
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/bit_buffer.h"
+
+namespace webrtc {
+class RtpDependencyDescriptorWriter {
+ public:
+ // Assumes `structure` and `descriptor` are valid and
+ // `descriptor` matches the `structure`.
+ RtpDependencyDescriptorWriter(rtc::ArrayView<uint8_t> data,
+ const FrameDependencyStructure& structure,
+ std::bitset<32> active_chains,
+ const DependencyDescriptor& descriptor);
+
+ // Serializes DependencyDescriptor rtp header extension.
+ // Returns false if `data` is too small to serialize the `descriptor`.
+ bool Write();
+
+ // Returns minimum number of bits needed to serialize descriptor with respect
+ // to the `structure`. Returns 0 if `descriptor` can't be serialized.
+ int ValueSizeBits() const;
+
+ private:
+ // Used both as pointer to the template and as index in the templates vector.
+ using TemplateIterator = std::vector<FrameDependencyTemplate>::const_iterator;
+ struct TemplateMatch {
+ TemplateIterator template_position;
+ bool need_custom_dtis;
+ bool need_custom_fdiffs;
+ bool need_custom_chains;
+ // Size in bits to store frame-specific details, i.e.
+ // excluding mandatory fields and template dependency structure.
+ int extra_size_bits;
+ };
+ int StructureSizeBits() const;
+ TemplateMatch CalculateMatch(TemplateIterator frame_template) const;
+ void FindBestTemplate();
+ bool ShouldWriteActiveDecodeTargetsBitmask() const;
+ bool HasExtendedFields() const;
+ uint64_t TemplateId() const;
+
+ void WriteBits(uint64_t val, size_t bit_count);
+ void WriteNonSymmetric(uint32_t value, uint32_t num_values);
+
+ // Functions to read template dependency structure.
+ void WriteTemplateDependencyStructure();
+ void WriteTemplateLayers();
+ void WriteTemplateDtis();
+ void WriteTemplateFdiffs();
+ void WriteTemplateChains();
+ void WriteResolutions();
+
+ // Function to read details for the current frame.
+ void WriteMandatoryFields();
+ void WriteExtendedFields();
+ void WriteFrameDependencyDefinition();
+
+ void WriteFrameDtis();
+ void WriteFrameFdiffs();
+ void WriteFrameChains();
+
+ bool build_failed_ = false;
+ const DependencyDescriptor& descriptor_;
+ const FrameDependencyStructure& structure_;
+ std::bitset<32> active_chains_;
+ rtc::BitBufferWriter bit_writer_;
+ TemplateMatch best_template_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_WRITER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.cc
new file mode 100644
index 0000000000..f4525f0db1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+
+namespace webrtc {
+
+std::vector<uint8_t> RtpDescriptorAuthentication(
+ const RTPVideoHeader& rtp_video_header) {
+ if (!rtp_video_header.generic) {
+ return {};
+ }
+ const RTPVideoHeader::GenericDescriptorInfo& descriptor =
+ *rtp_video_header.generic;
+ // Default way of creating additional data for an encrypted frame.
+ if (descriptor.spatial_index < 0 || descriptor.temporal_index < 0 ||
+ descriptor.spatial_index >=
+ RtpGenericFrameDescriptor::kMaxSpatialLayers ||
+ descriptor.temporal_index >=
+ RtpGenericFrameDescriptor::kMaxTemporalLayers ||
+ descriptor.dependencies.size() >
+ RtpGenericFrameDescriptor::kMaxNumFrameDependencies) {
+ return {};
+ }
+ RtpGenericFrameDescriptor frame_descriptor;
+ frame_descriptor.SetFirstPacketInSubFrame(true);
+ frame_descriptor.SetLastPacketInSubFrame(false);
+ frame_descriptor.SetTemporalLayer(descriptor.temporal_index);
+ frame_descriptor.SetSpatialLayersBitmask(1 << descriptor.spatial_index);
+ frame_descriptor.SetFrameId(descriptor.frame_id & 0xFFFF);
+ for (int64_t dependency : descriptor.dependencies) {
+ frame_descriptor.AddFrameDependencyDiff(descriptor.frame_id - dependency);
+ }
+ if (descriptor.dependencies.empty()) {
+ frame_descriptor.SetResolution(rtp_video_header.width,
+ rtp_video_header.height);
+ }
+ std::vector<uint8_t> result(
+ RtpGenericFrameDescriptorExtension00::ValueSize(frame_descriptor));
+ RtpGenericFrameDescriptorExtension00::Write(result, frame_descriptor);
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.h
new file mode 100644
index 0000000000..1791abecd8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_descriptor_authentication.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_DESCRIPTOR_AUTHENTICATION_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_DESCRIPTOR_AUTHENTICATION_H_
+
+#include <cstdint>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+
+namespace webrtc {
+
+// Converts frame dependencies into array of bytes for authentication.
+std::vector<uint8_t> RtpDescriptorAuthentication(
+ const RTPVideoHeader& rtp_video_header);
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_DESCRIPTOR_AUTHENTICATION_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc
new file mode 100644
index 0000000000..2c01a0d40a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <list>
+#include <memory>
+
+#include "absl/algorithm/container.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/fec_test_helper.h"
+#include "modules/rtp_rtcp/source/flexfec_header_reader_writer.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/ulpfec_header_reader_writer.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Transport header size in bytes. Assume UDP/IPv4 as a reasonable minimum.
+constexpr size_t kTransportOverhead = 28;
+
+constexpr uint32_t kMediaSsrc = 83542;
+constexpr uint32_t kFlexfecSsrc = 43245;
+
+constexpr size_t kMaxMediaPackets = 48;
+
+// Deep copies `src` to `dst`, but only keeps every Nth packet.
+void DeepCopyEveryNthPacket(const ForwardErrorCorrection::PacketList& src,
+ int n,
+ ForwardErrorCorrection::PacketList* dst) {
+ RTC_DCHECK_GT(n, 0);
+ int i = 0;
+ for (const auto& packet : src) {
+ if (i % n == 0) {
+ dst->emplace_back(new ForwardErrorCorrection::Packet(*packet));
+ }
+ ++i;
+ }
+}
+
+} // namespace
+
+using ::testing::Types;
+
+template <typename ForwardErrorCorrectionType>
+class RtpFecTest : public ::testing::Test {
+ protected:
+ RtpFecTest()
+ : random_(0xabcdef123456),
+ media_packet_generator_(
+ kRtpHeaderSize, // Minimum packet size.
+ IP_PACKET_SIZE - kRtpHeaderSize - kTransportOverhead -
+ fec_.MaxPacketOverhead(), // Maximum packet size.
+ kMediaSsrc,
+ &random_) {}
+
+ // Construct `received_packets_`: a subset of the media and FEC packets.
+ //
+ // Media packet "i" is lost if media_loss_mask_[i] = 1, received if
+ // media_loss_mask_[i] = 0.
+ // FEC packet "i" is lost if fec_loss_mask_[i] = 1, received if
+ // fec_loss_mask_[i] = 0.
+ void NetworkReceivedPackets(int* media_loss_mask, int* fec_loss_mask);
+
+ // Add packet from `packet_list` to list of received packets, using the
+ // `loss_mask`.
+ // The `packet_list` may be a media packet list (is_fec = false), or a
+ // FEC packet list (is_fec = true).
+ template <typename T>
+ void ReceivedPackets(const T& packet_list, int* loss_mask, bool is_fec);
+
+ // Check for complete recovery after FEC decoding.
+ bool IsRecoveryComplete();
+
+ ForwardErrorCorrectionType fec_;
+
+ Random random_;
+ test::fec::MediaPacketGenerator media_packet_generator_;
+
+ ForwardErrorCorrection::PacketList media_packets_;
+ std::list<ForwardErrorCorrection::Packet*> generated_fec_packets_;
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>
+ received_packets_;
+ ForwardErrorCorrection::RecoveredPacketList recovered_packets_;
+
+ int media_loss_mask_[kUlpfecMaxMediaPackets];
+ int fec_loss_mask_[kUlpfecMaxMediaPackets];
+};
+
+template <typename ForwardErrorCorrectionType>
+void RtpFecTest<ForwardErrorCorrectionType>::NetworkReceivedPackets(
+ int* media_loss_mask,
+ int* fec_loss_mask) {
+ constexpr bool kFecPacket = true;
+ this->received_packets_.clear();
+ ReceivedPackets(media_packets_, media_loss_mask, !kFecPacket);
+ ReceivedPackets(generated_fec_packets_, fec_loss_mask, kFecPacket);
+}
+
+template <typename ForwardErrorCorrectionType>
+template <typename PacketListType>
+void RtpFecTest<ForwardErrorCorrectionType>::ReceivedPackets(
+ const PacketListType& packet_list,
+ int* loss_mask,
+ bool is_fec) {
+ uint16_t fec_seq_num = ForwardErrorCorrectionType::GetFirstFecSeqNum(
+ media_packet_generator_.GetNextSeqNum());
+ int packet_idx = 0;
+
+ for (const auto& packet : packet_list) {
+ if (loss_mask[packet_idx] == 0) {
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> received_packet(
+ new ForwardErrorCorrection::ReceivedPacket());
+ received_packet->pkt = new ForwardErrorCorrection::Packet();
+ received_packet->pkt->data = packet->data;
+ received_packet->is_fec = is_fec;
+ if (!is_fec) {
+ received_packet->ssrc = kMediaSsrc;
+ // For media packets, the sequence number is obtained from the
+ // RTP header as written by MediaPacketGenerator::ConstructMediaPackets.
+ received_packet->seq_num =
+ ByteReader<uint16_t>::ReadBigEndian(packet->data.data() + 2);
+ } else {
+ received_packet->ssrc = ForwardErrorCorrectionType::kFecSsrc;
+ // For FEC packets, we simulate the sequence numbers differently
+ // depending on if ULPFEC or FlexFEC is used. See the definition of
+ // ForwardErrorCorrectionType::GetFirstFecSeqNum.
+ received_packet->seq_num = fec_seq_num;
+ }
+ received_packets_.push_back(std::move(received_packet));
+ }
+ packet_idx++;
+ // Sequence number of FEC packets are defined as increment by 1 from
+ // last media packet in frame.
+ if (is_fec)
+ fec_seq_num++;
+ }
+}
+
+template <typename ForwardErrorCorrectionType>
+bool RtpFecTest<ForwardErrorCorrectionType>::IsRecoveryComplete() {
+ // We must have equally many recovered packets as original packets and all
+ // recovered packets must be identical to the corresponding original packets.
+ return absl::c_equal(
+ media_packets_, recovered_packets_,
+ [](const std::unique_ptr<ForwardErrorCorrection::Packet>& media_packet,
+ const std::unique_ptr<ForwardErrorCorrection::RecoveredPacket>&
+ recovered_packet) {
+ if (media_packet->data.size() != recovered_packet->pkt->data.size()) {
+ return false;
+ }
+ if (memcmp(media_packet->data.cdata(),
+ recovered_packet->pkt->data.cdata(),
+ media_packet->data.size()) != 0) {
+ return false;
+ }
+ return true;
+ });
+}
+
+// Define gTest typed test to loop over both ULPFEC and FlexFEC.
+// Since the tests now are parameterized, we need to access
+// member variables using `this`, thereby enforcing runtime
+// resolution.
+
+class FlexfecForwardErrorCorrection : public ForwardErrorCorrection {
+ public:
+ static const uint32_t kFecSsrc = kFlexfecSsrc;
+
+ FlexfecForwardErrorCorrection()
+ : ForwardErrorCorrection(
+ std::unique_ptr<FecHeaderReader>(new FlexfecHeaderReader()),
+ std::unique_ptr<FecHeaderWriter>(new FlexfecHeaderWriter()),
+ kFecSsrc,
+ kMediaSsrc) {}
+
+ // For FlexFEC we let the FEC packet sequence numbers be independent of
+ // the media packet sequence numbers.
+ static uint16_t GetFirstFecSeqNum(uint16_t next_media_seq_num) {
+ Random random(0xbe110);
+ return random.Rand<uint16_t>();
+ }
+};
+
+class UlpfecForwardErrorCorrection : public ForwardErrorCorrection {
+ public:
+ static const uint32_t kFecSsrc = kMediaSsrc;
+
+ UlpfecForwardErrorCorrection()
+ : ForwardErrorCorrection(
+ std::unique_ptr<FecHeaderReader>(new UlpfecHeaderReader()),
+ std::unique_ptr<FecHeaderWriter>(new UlpfecHeaderWriter()),
+ kFecSsrc,
+ kMediaSsrc) {}
+
+ // For ULPFEC we assume that the FEC packets are subsequent to the media
+ // packets in terms of sequence number.
+ static uint16_t GetFirstFecSeqNum(uint16_t next_media_seq_num) {
+ return next_media_seq_num;
+ }
+};
+
+using FecTypes =
+ Types<FlexfecForwardErrorCorrection, UlpfecForwardErrorCorrection>;
+TYPED_TEST_SUITE(RtpFecTest, FecTypes);
+
+TYPED_TEST(RtpFecTest, WillProtectMediaPacketsWithLargeSequenceNumberGap) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 2;
+ constexpr uint8_t kProtectionFactor = 127;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ // Create |kMaxMediaPackets - 1| sequence number difference.
+ ByteWriter<uint16_t>::WriteBigEndian(
+ this->media_packets_.front()->data.MutableData() + 2, 1);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ this->media_packets_.back()->data.MutableData() + 2, kMaxMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+}
+
+TYPED_TEST(RtpFecTest,
+ WillNotProtectMediaPacketsWithTooLargeSequenceNumberGap) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 2;
+ constexpr uint8_t kProtectionFactor = 127;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ // Create `kMaxMediaPackets` sequence number difference.
+ ByteWriter<uint16_t>::WriteBigEndian(
+ this->media_packets_.front()->data.MutableData() + 2, 1);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ this->media_packets_.back()->data.MutableData() + 2,
+ kMaxMediaPackets + 1);
+
+ EXPECT_EQ(
+ -1, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+ EXPECT_TRUE(this->generated_fec_packets_.empty());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryNoLoss) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 60;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // No packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // No packets lost, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryWithLoss) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 60;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // 1 media packet lost
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // One packet lost, one FEC packet, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 2 media packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // 2 packets lost, one FEC packet, cannot get complete recovery.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+// Verify that we don't use an old FEC packet for FEC decoding.
+TYPED_TEST(RtpFecTest, NoFecRecoveryWithOldFecPacket) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr uint8_t kProtectionFactor = 20;
+
+ // Two frames: first frame (old) with two media packets and 1 FEC packet.
+ // Third frame (new) with 3 media packets, and no FEC packets.
+ //
+ // #0(media) #1(media) #2(FEC) ----Frame 1-----
+ // #32767(media) 32768(media) 32769(media) ----Frame 2-----
+ // #65535(media) #0(media) #1(media). ----Frame 3-----
+ // If we lose either packet 0 or 1 of third frame, FEC decoding should not
+ // try to decode using "old" FEC packet #2.
+
+ // Construct media packets for first frame, starting at sequence number 0.
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(2, 0);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+ // Add FEC packet (seq#2) of this first frame to received list (i.e., assume
+ // the two media packet were lost).
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->ReceivedPackets(this->generated_fec_packets_, this->fec_loss_mask_,
+ true);
+
+ // Construct media packets for second frame, with sequence number wrap.
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 32767);
+
+ // Expect 3 media packets for this frame.
+ EXPECT_EQ(3u, this->media_packets_.size());
+
+ // No packets lost
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ this->ReceivedPackets(this->media_packets_, this->media_loss_mask_, false);
+
+ // Construct media packets for third frame, with sequence number wrap.
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 65535);
+
+ // Expect 3 media packets for this frame.
+ EXPECT_EQ(3u, this->media_packets_.size());
+
+ // Second media packet lost (seq#0).
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ // Add packets #65535, and #1 to received list.
+ this->ReceivedPackets(this->media_packets_, this->media_loss_mask_, false);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Expect that no decoding is done to get missing packet (seq#0) of third
+ // frame, using old FEC packet (seq#2) from first (old) frame. So number of
+ // recovered packets is 5 (0 from first frame, three from second frame, and 2
+ // for the third frame, with no packets recovered via FEC).
+ EXPECT_EQ(5u, this->recovered_packets_.size());
+ EXPECT_TRUE(this->recovered_packets_.size() != this->media_packets_.size());
+}
+
+// Verify we can still recover frame if sequence number wrap occurs within
+// the frame and FEC packet following wrap is received after media packets.
+TYPED_TEST(RtpFecTest, FecRecoveryWithSeqNumGapOneFrameRecovery) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr uint8_t kProtectionFactor = 20;
+
+ // One frame, with sequence number wrap in media packets.
+ // -----Frame 1----
+ // #65534(media) #65535(media) #0(media) #1(FEC).
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 65534);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // Lose one media packet (seq# 65535).
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->ReceivedPackets(this->media_packets_, this->media_loss_mask_, false);
+ // Add FEC packet to received list following the media packets.
+ this->ReceivedPackets(this->generated_fec_packets_, this->fec_loss_mask_,
+ true);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Expect 3 media packets in recovered list, and complete recovery.
+ // Wrap-around won't remove FEC packet, as it follows the wrap.
+ EXPECT_EQ(3u, this->recovered_packets_.size());
+ EXPECT_TRUE(this->IsRecoveryComplete());
+}
+
+// Sequence number wrap occurs within the ULPFEC packets for the frame.
+// Same problem will occur if wrap is within media packets but ULPFEC packet is
+// received before the media packets. This may be improved if timing information
+// is used to detect old ULPFEC packets.
+
+// TODO(nisse): There's some logic to discard ULPFEC packets at wrap-around,
+// however, that is not actually exercised by this test: When the first FEC
+// packet is processed, it results in full recovery of one media packet and the
+// FEC packet is forgotten. And then the wraparound isn't noticed when the next
+// FEC packet is received. We should fix wraparound handling, which currently
+// appears broken, and then figure out how to test it properly.
+using RtpFecTestUlpfecOnly = RtpFecTest<UlpfecForwardErrorCorrection>;
+TEST_F(RtpFecTestUlpfecOnly, FecRecoveryWithSeqNumGapOneFrameRecovery) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr uint8_t kProtectionFactor = 200;
+
+ // 1 frame: 3 media packets and 2 FEC packets.
+ // Sequence number wrap in FEC packets.
+ // -----Frame 1----
+ // #65532(media) #65533(media) #65534(media) #65535(FEC) #0(FEC).
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 65532);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 2 FEC packets.
+ EXPECT_EQ(2u, this->generated_fec_packets_.size());
+
+ // Lose the last two media packets (seq# 65533, 65534).
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->ReceivedPackets(this->media_packets_, this->media_loss_mask_, false);
+ this->ReceivedPackets(this->generated_fec_packets_, this->fec_loss_mask_,
+ true);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // The two FEC packets are received and should allow for complete recovery,
+ // but because of the wrap the first FEC packet will be discarded, and only
+ // one media packet is recoverable. So expect 2 media packets on recovered
+ // list and no complete recovery.
+ EXPECT_EQ(3u, this->recovered_packets_.size());
+ EXPECT_EQ(this->recovered_packets_.size(), this->media_packets_.size());
+ EXPECT_TRUE(this->IsRecoveryComplete());
+}
+
+// TODO(brandtr): This test mimics the one above, ensuring that the recovery
+// strategy of FlexFEC matches the recovery strategy of ULPFEC. Since FlexFEC
+// does not share the sequence number space with the media, however, having a
+// matching recovery strategy may be suboptimal. Study this further.
+// TODO(nisse): In this test, recovery based on the first FEC packet fails with
+// the log message "The recovered packet had a length larger than a typical IP
+// packet, and is thus dropped." This is probably not intended, and needs
+// investigation.
+using RtpFecTestFlexfecOnly = RtpFecTest<FlexfecForwardErrorCorrection>;
+TEST_F(RtpFecTestFlexfecOnly, FecRecoveryWithSeqNumGapOneFrameNoRecovery) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr uint8_t kProtectionFactor = 200;
+
+ // 1 frame: 3 media packets and 2 FEC packets.
+ // Sequence number wrap in FEC packets.
+ // -----Frame 1----
+ // #65532(media) #65533(media) #65534(media) #65535(FEC) #0(FEC).
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 65532);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 2 FEC packets.
+ EXPECT_EQ(2u, this->generated_fec_packets_.size());
+
+ // Overwrite the sequence numbers generated by ConstructMediaPackets,
+ // to make sure that we do have a wrap.
+ auto it = this->generated_fec_packets_.begin();
+ ByteWriter<uint16_t>::WriteBigEndian(&(*it)->data.MutableData()[2], 65535);
+ ++it;
+ ByteWriter<uint16_t>::WriteBigEndian(&(*it)->data.MutableData()[2], 0);
+
+ // Lose the last two media packets (seq# 65533, 65534).
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->ReceivedPackets(this->media_packets_, this->media_loss_mask_, false);
+ this->ReceivedPackets(this->generated_fec_packets_, this->fec_loss_mask_,
+ true);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // The two FEC packets are received and should allow for complete recovery,
+ // but because of the wrap the first FEC packet will be discarded, and only
+ // one media packet is recoverable. So expect 2 media packets on recovered
+ // list and no complete recovery.
+ EXPECT_EQ(2u, this->recovered_packets_.size());
+ EXPECT_TRUE(this->recovered_packets_.size() != this->media_packets_.size());
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+// Verify we can still recover frame if media packets are reordered.
+TYPED_TEST(RtpFecTest, FecRecoveryWithMediaOutOfOrder) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr uint8_t kProtectionFactor = 20;
+
+ // One frame: 3 media packets, 1 FEC packet.
+ // -----Frame 1----
+ // #0(media) #1(media) #2(media) #3(FEC).
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 0);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // Lose one media packet (seq# 1).
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ // Reorder received media packets.
+ auto it0 = this->received_packets_.begin();
+ auto it1 = this->received_packets_.begin();
+ it1++;
+ std::swap(*it0, *it1);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Expect 3 media packets in recovered list, and complete recovery.
+ EXPECT_EQ(3u, this->recovered_packets_.size());
+ EXPECT_TRUE(this->IsRecoveryComplete());
+}
+
+// Verify we can still recover frame if FEC is received before media packets.
+TYPED_TEST(RtpFecTest, FecRecoveryWithFecOutOfOrder) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr uint8_t kProtectionFactor = 20;
+
+ // One frame: 3 media packets, 1 FEC packet.
+ // -----Frame 1----
+ // #0(media) #1(media) #2(media) #3(FEC).
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(3, 0);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // Lose one media packet (seq# 1).
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ // Add FEC packet to received list before the media packets.
+ this->ReceivedPackets(this->generated_fec_packets_, this->fec_loss_mask_,
+ true);
+ // Add media packets to received list.
+ this->ReceivedPackets(this->media_packets_, this->media_loss_mask_, false);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Expect 3 media packets in recovered list, and complete recovery.
+ EXPECT_EQ(3u, this->recovered_packets_.size());
+ EXPECT_TRUE(this->IsRecoveryComplete());
+}
+
+// Test 50% protection with random mask type: Two cases are considered:
+// a 50% non-consecutive loss which can be fully recovered, and a 50%
+// consecutive loss which cannot be fully recovered.
+TYPED_TEST(RtpFecTest, FecRecoveryWithLoss50percRandomMask) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 255;
+
+ // Packet Mask for (4,4,0) code, from random mask table.
+ // (kNumMediaPackets = 4; num_fec_packets = 4, kNumImportantPackets = 0)
+
+ // media#0 media#1 media#2 media#3
+ // fec#0: 1 1 0 0
+ // fec#1: 1 0 1 0
+ // fec#2: 0 0 1 1
+ // fec#3: 0 1 0 1
+ //
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskRandom, &this->generated_fec_packets_));
+
+ // Expect 4 FEC packets.
+ EXPECT_EQ(4u, this->generated_fec_packets_.size());
+
+ // 4 packets lost: 3 media packets (0, 2, 3), and one FEC packet (0) lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->fec_loss_mask_[0] = 1;
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // With media packet#1 and FEC packets #1, #2, #3, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 4 consecutive packets lost: media packets 0, 1, 2, 3.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Cannot get complete recovery for this loss configuration with random mask.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+// Test 50% protection with bursty type: Three cases are considered:
+// two 50% consecutive losses which can be fully recovered, and one
+// non-consecutive which cannot be fully recovered.
+TYPED_TEST(RtpFecTest, FecRecoveryWithLoss50percBurstyMask) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 255;
+
+ // Packet Mask for (4,4,0) code, from bursty mask table.
+ // (kNumMediaPackets = 4; num_fec_packets = 4, kNumImportantPackets = 0)
+
+ // media#0 media#1 media#2 media#3
+ // fec#0: 1 0 0 0
+ // fec#1: 1 1 0 0
+ // fec#2: 0 1 1 0
+ // fec#3: 0 0 1 1
+ //
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 4 FEC packets.
+ EXPECT_EQ(4u, this->generated_fec_packets_.size());
+
+ // 4 consecutive packets lost: media packets 0,1,2,3.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Expect complete recovery for consecutive packet loss <= 50%.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 4 consecutive packets lost: media packets 1,2, 3, and FEC packet 0.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->fec_loss_mask_[0] = 1;
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Expect complete recovery for consecutive packet loss <= 50%.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 4 packets lost (non-consecutive loss): media packets 0, 3, and FEC# 0, 3.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->fec_loss_mask_[0] = 1;
+ this->fec_loss_mask_[3] = 1;
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Cannot get complete recovery for this loss configuration.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryNoLossUep) {
+ constexpr int kNumImportantPackets = 2;
+ constexpr bool kUseUnequalProtection = true;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 60;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // No packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // No packets lost, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryWithLossUep) {
+ constexpr int kNumImportantPackets = 2;
+ constexpr bool kUseUnequalProtection = true;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 60;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // 1 media packet lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // One packet lost, one FEC packet, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 2 media packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // 2 packets lost, one FEC packet, cannot get complete recovery.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+// Test 50% protection with random mask type for UEP on.
+TYPED_TEST(RtpFecTest, FecRecoveryWithLoss50percUepRandomMask) {
+ constexpr int kNumImportantPackets = 1;
+ constexpr bool kUseUnequalProtection = true;
+ constexpr int kNumMediaPackets = 4;
+ constexpr uint8_t kProtectionFactor = 255;
+
+ // Packet Mask for (4,4,1) code, from random mask table.
+ // (kNumMediaPackets = 4; num_fec_packets = 4, kNumImportantPackets = 1)
+
+ // media#0 media#1 media#2 media#3
+ // fec#0: 1 0 0 0
+ // fec#1: 1 1 0 0
+ // fec#2: 1 0 1 1
+ // fec#3: 0 1 1 0
+ //
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskRandom, &this->generated_fec_packets_));
+
+ // Expect 4 FEC packets.
+ EXPECT_EQ(4u, this->generated_fec_packets_.size());
+
+ // 4 packets lost: 3 media packets and FEC packet#1 lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->fec_loss_mask_[1] = 1;
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // With media packet#3 and FEC packets #0, #1, #3, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 5 packets lost: 4 media packets and one FEC packet#2 lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->fec_loss_mask_[2] = 1;
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[1] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->media_loss_mask_[3] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Cannot get complete recovery for this loss configuration.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryNonConsecutivePackets) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 5;
+ constexpr uint8_t kProtectionFactor = 60;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ // Create a new temporary packet list for generating FEC packets.
+ // This list should have every other packet removed.
+ ForwardErrorCorrection::PacketList protected_media_packets;
+ DeepCopyEveryNthPacket(this->media_packets_, 2, &protected_media_packets);
+
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(protected_media_packets, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 1 FEC packet.
+ EXPECT_EQ(1u, this->generated_fec_packets_.size());
+
+ // 1 protected media packet lost
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[2] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // One packet lost, one FEC packet, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // Unprotected packet lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[1] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Unprotected packet lost. Recovery not possible.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 2 media packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[0] = 1;
+ this->media_loss_mask_[2] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // 2 protected packets lost, one FEC packet, cannot get complete recovery.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryNonConsecutivePacketsExtension) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 21;
+ uint8_t kProtectionFactor = 127;
+
+ this->media_packets_ =
+ this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets);
+
+ // Create a new temporary packet list for generating FEC packets.
+ // This list should have every other packet removed.
+ ForwardErrorCorrection::PacketList protected_media_packets;
+ DeepCopyEveryNthPacket(this->media_packets_, 2, &protected_media_packets);
+
+ // Zero column insertion will have to extend the size of the packet
+ // mask since the number of actual packets are 21, while the number
+ // of protected packets are 11.
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(protected_media_packets, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 5 FEC packet.
+ EXPECT_EQ(5u, this->generated_fec_packets_.size());
+
+ // Last protected media packet lost
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[kNumMediaPackets - 1] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // One packet lost, one FEC packet, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // Last unprotected packet lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[kNumMediaPackets - 2] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Unprotected packet lost. Recovery not possible.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 6 media packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[kNumMediaPackets - 11] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 9] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 7] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 5] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 3] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 1] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // 5 protected packets lost, one FEC packet, cannot get complete recovery.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+TYPED_TEST(RtpFecTest, FecRecoveryNonConsecutivePacketsWrap) {
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr int kNumMediaPackets = 21;
+ uint8_t kProtectionFactor = 127;
+
+ this->media_packets_ = this->media_packet_generator_.ConstructMediaPackets(
+ kNumMediaPackets, 0xFFFF - 5);
+
+ // Create a new temporary packet list for generating FEC packets.
+ // This list should have every other packet removed.
+ ForwardErrorCorrection::PacketList protected_media_packets;
+ DeepCopyEveryNthPacket(this->media_packets_, 2, &protected_media_packets);
+
+ // Zero column insertion will have to extend the size of the packet
+ // mask since the number of actual packets are 21, while the number
+ // of protected packets are 11.
+ EXPECT_EQ(
+ 0, this->fec_.EncodeFec(protected_media_packets, kProtectionFactor,
+ kNumImportantPackets, kUseUnequalProtection,
+ kFecMaskBursty, &this->generated_fec_packets_));
+
+ // Expect 5 FEC packet.
+ EXPECT_EQ(5u, this->generated_fec_packets_.size());
+
+ // Last protected media packet lost
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[kNumMediaPackets - 1] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // One packet lost, one FEC packet, expect complete recovery.
+ EXPECT_TRUE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // Last unprotected packet lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[kNumMediaPackets - 2] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // Unprotected packet lost. Recovery not possible.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+ this->recovered_packets_.clear();
+
+ // 6 media packets lost.
+ memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_));
+ memset(this->fec_loss_mask_, 0, sizeof(this->fec_loss_mask_));
+ this->media_loss_mask_[kNumMediaPackets - 11] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 9] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 7] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 5] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 3] = 1;
+ this->media_loss_mask_[kNumMediaPackets - 1] = 1;
+ this->NetworkReceivedPackets(this->media_loss_mask_, this->fec_loss_mask_);
+
+ for (const auto& received_packet : this->received_packets_) {
+ this->fec_.DecodeFec(*received_packet, &this->recovered_packets_);
+ }
+
+ // 5 protected packets lost, one FEC packet, cannot get complete recovery.
+ EXPECT_FALSE(this->IsRecoveryComplete());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.cc
new file mode 100644
index 0000000000..7550b70f69
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format.h"
+
+#include <memory>
+
+#include "absl/types/variant.h"
+#include "modules/rtp_rtcp/source/rtp_format_h264.h"
+#include "modules/rtp_rtcp/source/rtp_format_video_generic.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp8.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+#include "modules/rtp_rtcp/source/rtp_packetizer_av1.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+std::unique_ptr<RtpPacketizer> RtpPacketizer::Create(
+ absl::optional<VideoCodecType> type,
+ rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ // Codec-specific details.
+ const RTPVideoHeader& rtp_video_header) {
+ if (!type) {
+ // Use raw packetizer.
+ return std::make_unique<RtpPacketizerGeneric>(payload, limits);
+ }
+
+ switch (*type) {
+ case kVideoCodecH264: {
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(rtp_video_header.video_type_header);
+ return std::make_unique<RtpPacketizerH264>(payload, limits,
+ h264.packetization_mode);
+ }
+ case kVideoCodecVP8: {
+ const auto& vp8 =
+ absl::get<RTPVideoHeaderVP8>(rtp_video_header.video_type_header);
+ return std::make_unique<RtpPacketizerVp8>(payload, limits, vp8);
+ }
+ case kVideoCodecVP9: {
+ const auto& vp9 =
+ absl::get<RTPVideoHeaderVP9>(rtp_video_header.video_type_header);
+ return std::make_unique<RtpPacketizerVp9>(payload, limits, vp9);
+ }
+ case kVideoCodecAV1:
+ return std::make_unique<RtpPacketizerAv1>(
+ payload, limits, rtp_video_header.frame_type,
+ rtp_video_header.is_last_frame_in_picture);
+ default: {
+ return std::make_unique<RtpPacketizerGeneric>(payload, limits,
+ rtp_video_header);
+ }
+ }
+}
+
+std::vector<int> RtpPacketizer::SplitAboutEqually(
+ int payload_len,
+ const PayloadSizeLimits& limits) {
+ RTC_DCHECK_GT(payload_len, 0);
+ // First or last packet larger than normal are unsupported.
+ RTC_DCHECK_GE(limits.first_packet_reduction_len, 0);
+ RTC_DCHECK_GE(limits.last_packet_reduction_len, 0);
+
+ std::vector<int> result;
+ if (limits.max_payload_len >=
+ limits.single_packet_reduction_len + payload_len) {
+ result.push_back(payload_len);
+ return result;
+ }
+ if (limits.max_payload_len - limits.first_packet_reduction_len < 1 ||
+ limits.max_payload_len - limits.last_packet_reduction_len < 1) {
+ // Capacity is not enough to put a single byte into one of the packets.
+ return result;
+ }
+ // First and last packet of the frame can be smaller. Pretend that it's
+ // the same size, but we must write more payload to it.
+ // Assume frame fits in single packet if packet has extra space for sum
+ // of first and last packets reductions.
+ int total_bytes = payload_len + limits.first_packet_reduction_len +
+ limits.last_packet_reduction_len;
+ // Integer divisions with rounding up.
+ int num_packets_left =
+ (total_bytes + limits.max_payload_len - 1) / limits.max_payload_len;
+ if (num_packets_left == 1) {
+ // Single packet is a special case handled above.
+ num_packets_left = 2;
+ }
+
+ if (payload_len < num_packets_left) {
+ // Edge case where limits force to have more packets than there are payload
+ // bytes. This may happen when there is single byte of payload that can't be
+ // put into single packet if
+ // first_packet_reduction + last_packet_reduction >= max_payload_len.
+ return result;
+ }
+
+ int bytes_per_packet = total_bytes / num_packets_left;
+ int num_larger_packets = total_bytes % num_packets_left;
+ int remaining_data = payload_len;
+
+ result.reserve(num_packets_left);
+ bool first_packet = true;
+ while (remaining_data > 0) {
+ // Last num_larger_packets are 1 byte wider than the rest. Increase
+ // per-packet payload size when needed.
+ if (num_packets_left == num_larger_packets)
+ ++bytes_per_packet;
+ int current_packet_bytes = bytes_per_packet;
+ if (first_packet) {
+ if (current_packet_bytes > limits.first_packet_reduction_len + 1)
+ current_packet_bytes -= limits.first_packet_reduction_len;
+ else
+ current_packet_bytes = 1;
+ }
+ if (current_packet_bytes > remaining_data) {
+ current_packet_bytes = remaining_data;
+ }
+ // This is not the last packet in the whole payload, but there's no data
+ // left for the last packet. Leave at least one byte for the last packet.
+ if (num_packets_left == 2 && current_packet_bytes == remaining_data) {
+ --current_packet_bytes;
+ }
+ result.push_back(current_packet_bytes);
+
+ remaining_data -= current_packet_bytes;
+ --num_packets_left;
+ first_packet = false;
+ }
+
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.h
new file mode 100644
index 0000000000..19abd3feb2
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+
+namespace webrtc {
+
+class RtpPacketToSend;
+
+class RtpPacketizer {
+ public:
+ struct PayloadSizeLimits {
+ int max_payload_len = 1200;
+ int first_packet_reduction_len = 0;
+ int last_packet_reduction_len = 0;
+ // Reduction len for packet that is first & last at the same time.
+ int single_packet_reduction_len = 0;
+ };
+
+ // If type is not set, returns a raw packetizer.
+ static std::unique_ptr<RtpPacketizer> Create(
+ absl::optional<VideoCodecType> type,
+ rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ // Codec-specific details.
+ const RTPVideoHeader& rtp_video_header);
+
+ virtual ~RtpPacketizer() = default;
+
+ // Returns number of remaining packets to produce by the packetizer.
+ virtual size_t NumPackets() const = 0;
+
+ // Get the next payload with payload header.
+ // Write payload and set marker bit of the `packet`.
+ // Returns true on success, false otherwise.
+ virtual bool NextPacket(RtpPacketToSend* packet) = 0;
+
+ // Split payload_len into sum of integers with respect to `limits`.
+ // Returns empty vector on failure.
+ static std::vector<int> SplitAboutEqually(int payload_len,
+ const PayloadSizeLimits& limits);
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
new file mode 100644
index 0000000000..86f48582a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_h264.h"
+
+#include <string.h>
+
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/h264/pps_parser.h"
+#include "common_video/h264/sps_parser.h"
+#include "common_video/h264/sps_vui_rewriter.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+static const size_t kNalHeaderSize = 1;
+static const size_t kFuAHeaderSize = 2;
+static const size_t kLengthFieldSize = 2;
+
+// Bit masks for FU (A and B) indicators.
+enum NalDefs : uint8_t { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
+
+// Bit masks for FU (A and B) headers.
+enum FuDefs : uint8_t { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
+
+} // namespace
+
+RtpPacketizerH264::RtpPacketizerH264(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ H264PacketizationMode packetization_mode)
+ : limits_(limits), num_packets_left_(0) {
+ // Guard against uninitialized memory in packetization_mode.
+ RTC_CHECK(packetization_mode == H264PacketizationMode::NonInterleaved ||
+ packetization_mode == H264PacketizationMode::SingleNalUnit);
+
+ for (const auto& nalu :
+ H264::FindNaluIndices(payload.data(), payload.size())) {
+ input_fragments_.push_back(
+ payload.subview(nalu.payload_start_offset, nalu.payload_size));
+ }
+
+ if (!GeneratePackets(packetization_mode)) {
+ // If failed to generate all the packets, discard already generated
+ // packets in case the caller would ignore return value and still try to
+ // call NextPacket().
+ num_packets_left_ = 0;
+ while (!packets_.empty()) {
+ packets_.pop();
+ }
+ }
+}
+
+RtpPacketizerH264::~RtpPacketizerH264() = default;
+
+size_t RtpPacketizerH264::NumPackets() const {
+ return num_packets_left_;
+}
+
+bool RtpPacketizerH264::GeneratePackets(
+ H264PacketizationMode packetization_mode) {
+ for (size_t i = 0; i < input_fragments_.size();) {
+ switch (packetization_mode) {
+ case H264PacketizationMode::SingleNalUnit:
+ if (!PacketizeSingleNalu(i))
+ return false;
+ ++i;
+ break;
+ case H264PacketizationMode::NonInterleaved:
+ int fragment_len = input_fragments_[i].size();
+ int single_packet_capacity = limits_.max_payload_len;
+ if (input_fragments_.size() == 1)
+ single_packet_capacity -= limits_.single_packet_reduction_len;
+ else if (i == 0)
+ single_packet_capacity -= limits_.first_packet_reduction_len;
+ else if (i + 1 == input_fragments_.size())
+ single_packet_capacity -= limits_.last_packet_reduction_len;
+
+ if (fragment_len > single_packet_capacity) {
+ if (!PacketizeFuA(i))
+ return false;
+ ++i;
+ } else {
+ i = PacketizeStapA(i);
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+bool RtpPacketizerH264::PacketizeFuA(size_t fragment_index) {
+ // Fragment payload into packets (FU-A).
+ rtc::ArrayView<const uint8_t> fragment = input_fragments_[fragment_index];
+
+ PayloadSizeLimits limits = limits_;
+ // Leave room for the FU-A header.
+ limits.max_payload_len -= kFuAHeaderSize;
+ // Update single/first/last packet reductions unless it is single/first/last
+ // fragment.
+ if (input_fragments_.size() != 1) {
+ // if this fragment is put into a single packet, it might still be the
+ // first or the last packet in the whole sequence of packets.
+ if (fragment_index == input_fragments_.size() - 1) {
+ limits.single_packet_reduction_len = limits_.last_packet_reduction_len;
+ } else if (fragment_index == 0) {
+ limits.single_packet_reduction_len = limits_.first_packet_reduction_len;
+ } else {
+ limits.single_packet_reduction_len = 0;
+ }
+ }
+ if (fragment_index != 0)
+ limits.first_packet_reduction_len = 0;
+ if (fragment_index != input_fragments_.size() - 1)
+ limits.last_packet_reduction_len = 0;
+
+ // Strip out the original header.
+ size_t payload_left = fragment.size() - kNalHeaderSize;
+ int offset = kNalHeaderSize;
+
+ std::vector<int> payload_sizes = SplitAboutEqually(payload_left, limits);
+ if (payload_sizes.empty())
+ return false;
+
+ for (size_t i = 0; i < payload_sizes.size(); ++i) {
+ int packet_length = payload_sizes[i];
+ RTC_CHECK_GT(packet_length, 0);
+ packets_.push(PacketUnit(fragment.subview(offset, packet_length),
+ /*first_fragment=*/i == 0,
+ /*last_fragment=*/i == payload_sizes.size() - 1,
+ false, fragment[0]));
+ offset += packet_length;
+ payload_left -= packet_length;
+ }
+ num_packets_left_ += payload_sizes.size();
+ RTC_CHECK_EQ(0, payload_left);
+ return true;
+}
+
+size_t RtpPacketizerH264::PacketizeStapA(size_t fragment_index) {
+ // Aggregate fragments into one packet (STAP-A).
+ size_t payload_size_left = limits_.max_payload_len;
+ if (input_fragments_.size() == 1)
+ payload_size_left -= limits_.single_packet_reduction_len;
+ else if (fragment_index == 0)
+ payload_size_left -= limits_.first_packet_reduction_len;
+ int aggregated_fragments = 0;
+ size_t fragment_headers_length = 0;
+ rtc::ArrayView<const uint8_t> fragment = input_fragments_[fragment_index];
+ RTC_CHECK_GE(payload_size_left, fragment.size());
+ ++num_packets_left_;
+
+ auto payload_size_needed = [&] {
+ size_t fragment_size = fragment.size() + fragment_headers_length;
+ if (input_fragments_.size() == 1) {
+ // Single fragment, single packet, payload_size_left already adjusted
+ // with limits_.single_packet_reduction_len.
+ return fragment_size;
+ }
+ if (fragment_index == input_fragments_.size() - 1) {
+ // Last fragment, so STAP-A might be the last packet.
+ return fragment_size + limits_.last_packet_reduction_len;
+ }
+ return fragment_size;
+ };
+
+ while (payload_size_left >= payload_size_needed()) {
+ RTC_CHECK_GT(fragment.size(), 0);
+ packets_.push(PacketUnit(fragment, aggregated_fragments == 0, false, true,
+ fragment[0]));
+ payload_size_left -= fragment.size();
+ payload_size_left -= fragment_headers_length;
+
+ fragment_headers_length = kLengthFieldSize;
+ // If we are going to try to aggregate more fragments into this packet
+ // we need to add the STAP-A NALU header and a length field for the first
+ // NALU of this packet.
+ if (aggregated_fragments == 0)
+ fragment_headers_length += kNalHeaderSize + kLengthFieldSize;
+ ++aggregated_fragments;
+
+ // Next fragment.
+ ++fragment_index;
+ if (fragment_index == input_fragments_.size())
+ break;
+ fragment = input_fragments_[fragment_index];
+ }
+ RTC_CHECK_GT(aggregated_fragments, 0);
+ packets_.back().last_fragment = true;
+ return fragment_index;
+}
+
+bool RtpPacketizerH264::PacketizeSingleNalu(size_t fragment_index) {
+ // Add a single NALU to the queue, no aggregation.
+ size_t payload_size_left = limits_.max_payload_len;
+ if (input_fragments_.size() == 1)
+ payload_size_left -= limits_.single_packet_reduction_len;
+ else if (fragment_index == 0)
+ payload_size_left -= limits_.first_packet_reduction_len;
+ else if (fragment_index + 1 == input_fragments_.size())
+ payload_size_left -= limits_.last_packet_reduction_len;
+ rtc::ArrayView<const uint8_t> fragment = input_fragments_[fragment_index];
+ if (payload_size_left < fragment.size()) {
+ RTC_LOG(LS_ERROR) << "Failed to fit a fragment to packet in SingleNalu "
+ "packetization mode. Payload size left "
+ << payload_size_left << ", fragment length "
+ << fragment.size() << ", packet capacity "
+ << limits_.max_payload_len;
+ return false;
+ }
+ RTC_CHECK_GT(fragment.size(), 0u);
+ packets_.push(PacketUnit(fragment, true /* first */, true /* last */,
+ false /* aggregated */, fragment[0]));
+ ++num_packets_left_;
+ return true;
+}
+
+bool RtpPacketizerH264::NextPacket(RtpPacketToSend* rtp_packet) {
+ RTC_DCHECK(rtp_packet);
+ if (packets_.empty()) {
+ return false;
+ }
+
+ PacketUnit packet = packets_.front();
+ if (packet.first_fragment && packet.last_fragment) {
+ // Single NAL unit packet.
+ size_t bytes_to_send = packet.source_fragment.size();
+ uint8_t* buffer = rtp_packet->AllocatePayload(bytes_to_send);
+ memcpy(buffer, packet.source_fragment.data(), bytes_to_send);
+ packets_.pop();
+ input_fragments_.pop_front();
+ } else if (packet.aggregated) {
+ NextAggregatePacket(rtp_packet);
+ } else {
+ NextFragmentPacket(rtp_packet);
+ }
+ rtp_packet->SetMarker(packets_.empty());
+ --num_packets_left_;
+ return true;
+}
+
+void RtpPacketizerH264::NextAggregatePacket(RtpPacketToSend* rtp_packet) {
+ // Reserve maximum available payload, set actual payload size later.
+ size_t payload_capacity = rtp_packet->FreeCapacity();
+ RTC_CHECK_GE(payload_capacity, kNalHeaderSize);
+ uint8_t* buffer = rtp_packet->AllocatePayload(payload_capacity);
+ RTC_DCHECK(buffer);
+ PacketUnit* packet = &packets_.front();
+ RTC_CHECK(packet->first_fragment);
+ // STAP-A NALU header.
+ buffer[0] = (packet->header & (kFBit | kNriMask)) | H264::NaluType::kStapA;
+ size_t index = kNalHeaderSize;
+ bool is_last_fragment = packet->last_fragment;
+ while (packet->aggregated) {
+ rtc::ArrayView<const uint8_t> fragment = packet->source_fragment;
+ RTC_CHECK_LE(index + kLengthFieldSize + fragment.size(), payload_capacity);
+ // Add NAL unit length field.
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[index], fragment.size());
+ index += kLengthFieldSize;
+ // Add NAL unit.
+ memcpy(&buffer[index], fragment.data(), fragment.size());
+ index += fragment.size();
+ packets_.pop();
+ input_fragments_.pop_front();
+ if (is_last_fragment)
+ break;
+ packet = &packets_.front();
+ is_last_fragment = packet->last_fragment;
+ }
+ RTC_CHECK(is_last_fragment);
+ rtp_packet->SetPayloadSize(index);
+}
+
+void RtpPacketizerH264::NextFragmentPacket(RtpPacketToSend* rtp_packet) {
+ PacketUnit* packet = &packets_.front();
+ // NAL unit fragmented over multiple packets (FU-A).
+ // We do not send original NALU header, so it will be replaced by the
+ // FU indicator header of the first packet.
+ uint8_t fu_indicator =
+ (packet->header & (kFBit | kNriMask)) | H264::NaluType::kFuA;
+ uint8_t fu_header = 0;
+
+ // S | E | R | 5 bit type.
+ fu_header |= (packet->first_fragment ? kSBit : 0);
+ fu_header |= (packet->last_fragment ? kEBit : 0);
+ uint8_t type = packet->header & kTypeMask;
+ fu_header |= type;
+ rtc::ArrayView<const uint8_t> fragment = packet->source_fragment;
+ uint8_t* buffer =
+ rtp_packet->AllocatePayload(kFuAHeaderSize + fragment.size());
+ buffer[0] = fu_indicator;
+ buffer[1] = fu_header;
+ memcpy(buffer + kFuAHeaderSize, fragment.data(), fragment.size());
+ if (packet->last_fragment)
+ input_fragments_.pop_front();
+ packets_.pop();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.h
new file mode 100644
index 0000000000..283beacb19
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <deque>
+#include <memory>
+#include <queue>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class RtpPacketizerH264 : public RtpPacketizer {
+ public:
+ // Initialize with payload from encoder.
+ // The payload_data must be exactly one encoded H264 frame.
+ RtpPacketizerH264(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ H264PacketizationMode packetization_mode);
+
+ ~RtpPacketizerH264() override;
+
+ RtpPacketizerH264(const RtpPacketizerH264&) = delete;
+ RtpPacketizerH264& operator=(const RtpPacketizerH264&) = delete;
+
+ size_t NumPackets() const override;
+
+ // Get the next payload with H264 payload header.
+ // Write payload and set marker bit of the `packet`.
+ // Returns true on success, false otherwise.
+ bool NextPacket(RtpPacketToSend* rtp_packet) override;
+
+ private:
+ // A packet unit (H264 packet), to be put into an RTP packet:
+ // If a NAL unit is too large for an RTP packet, this packet unit will
+ // represent a FU-A packet of a single fragment of the NAL unit.
+ // If a NAL unit is small enough to fit within a single RTP packet, this
+ // packet unit may represent a single NAL unit or a STAP-A packet, of which
+ // there may be multiple in a single RTP packet (if so, aggregated = true).
+ struct PacketUnit {
+ PacketUnit(rtc::ArrayView<const uint8_t> source_fragment,
+ bool first_fragment,
+ bool last_fragment,
+ bool aggregated,
+ uint8_t header)
+ : source_fragment(source_fragment),
+ first_fragment(first_fragment),
+ last_fragment(last_fragment),
+ aggregated(aggregated),
+ header(header) {}
+
+ rtc::ArrayView<const uint8_t> source_fragment;
+ bool first_fragment;
+ bool last_fragment;
+ bool aggregated;
+ uint8_t header;
+ };
+
+ bool GeneratePackets(H264PacketizationMode packetization_mode);
+ bool PacketizeFuA(size_t fragment_index);
+ size_t PacketizeStapA(size_t fragment_index);
+ bool PacketizeSingleNalu(size_t fragment_index);
+
+ void NextAggregatePacket(RtpPacketToSend* rtp_packet);
+ void NextFragmentPacket(RtpPacketToSend* rtp_packet);
+
+ const PayloadSizeLimits limits_;
+ size_t num_packets_left_;
+ std::deque<rtc::ArrayView<const uint8_t>> input_fragments_;
+ std::queue<PacketUnit> packets_;
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
new file mode 100644
index 0000000000..d2171963f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_h264.h"
+
+#include <memory>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/array_view.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
+constexpr size_t kMaxPayloadSize = 1200;
+constexpr size_t kLengthFieldLength = 2;
+constexpr RtpPacketizer::PayloadSizeLimits kNoLimits;
+
+enum Nalu {
+ kSlice = 1,
+ kIdr = 5,
+ kSei = 6,
+ kSps = 7,
+ kPps = 8,
+ kStapA = 24,
+ kFuA = 28
+};
+
+static const size_t kNalHeaderSize = 1;
+static const size_t kFuAHeaderSize = 2;
+
+// Bit masks for FU (A and B) indicators.
+enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
+
+// Bit masks for FU (A and B) headers.
+enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
+
+// Creates Buffer that looks like nal unit of given size.
+rtc::Buffer GenerateNalUnit(size_t size) {
+ RTC_CHECK_GT(size, 0);
+ rtc::Buffer buffer(size);
+ // Set some valid header.
+ buffer[0] = kSlice;
+ for (size_t i = 1; i < size; ++i) {
+ buffer[i] = static_cast<uint8_t>(i);
+ }
+ // Last byte shouldn't be 0, or it may be counted as part of next 4-byte start
+ // sequence.
+ buffer[size - 1] |= 0x10;
+ return buffer;
+}
+
+// Create frame consisting of nalus of given size.
+rtc::Buffer CreateFrame(std::initializer_list<size_t> nalu_sizes) {
+ static constexpr int kStartCodeSize = 3;
+ rtc::Buffer frame(absl::c_accumulate(nalu_sizes, 0) +
+ kStartCodeSize * nalu_sizes.size());
+ size_t offset = 0;
+ for (size_t nalu_size : nalu_sizes) {
+ EXPECT_GE(nalu_size, 1u);
+ // Insert nalu start code
+ frame[offset] = 0;
+ frame[offset + 1] = 0;
+ frame[offset + 2] = 1;
+ // Set some valid header.
+ frame[offset + 3] = 1;
+ // Fill payload avoiding accidental start codes
+ if (nalu_size > 1) {
+ memset(frame.data() + offset + 4, 0x3f, nalu_size - 1);
+ }
+ offset += (kStartCodeSize + nalu_size);
+ }
+ return frame;
+}
+
+// Create frame consisting of given nalus.
+rtc::Buffer CreateFrame(rtc::ArrayView<const rtc::Buffer> nalus) {
+ static constexpr int kStartCodeSize = 3;
+ int frame_size = 0;
+ for (const rtc::Buffer& nalu : nalus) {
+ frame_size += (kStartCodeSize + nalu.size());
+ }
+ rtc::Buffer frame(frame_size);
+ size_t offset = 0;
+ for (const rtc::Buffer& nalu : nalus) {
+ // Insert nalu start code
+ frame[offset] = 0;
+ frame[offset + 1] = 0;
+ frame[offset + 2] = 1;
+ // Copy the nalu unit.
+ memcpy(frame.data() + offset + 3, nalu.data(), nalu.size());
+ offset += (kStartCodeSize + nalu.size());
+ }
+ return frame;
+}
+
+std::vector<RtpPacketToSend> FetchAllPackets(RtpPacketizerH264* packetizer) {
+ std::vector<RtpPacketToSend> result;
+ size_t num_packets = packetizer->NumPackets();
+ result.reserve(num_packets);
+ RtpPacketToSend packet(kNoExtensions);
+ while (packetizer->NextPacket(&packet)) {
+ result.push_back(packet);
+ }
+ EXPECT_THAT(result, SizeIs(num_packets));
+ return result;
+}
+
+// Tests that should work with both packetization mode 0 and
+// packetization mode 1.
+class RtpPacketizerH264ModeTest
+ : public ::testing::TestWithParam<H264PacketizationMode> {};
+
+TEST_P(RtpPacketizerH264ModeTest, SingleNalu) {
+ const uint8_t frame[] = {0, 0, 1, kIdr, 0xFF};
+
+ RtpPacketizerH264 packetizer(frame, kNoLimits, GetParam());
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(1));
+ EXPECT_THAT(packets[0].payload(), ElementsAre(kIdr, 0xFF));
+}
+
+TEST_P(RtpPacketizerH264ModeTest, SingleNaluTwoPackets) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = kMaxPayloadSize;
+ rtc::Buffer nalus[] = {GenerateNalUnit(kMaxPayloadSize),
+ GenerateNalUnit(100)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits, GetParam());
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(2));
+ EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0]));
+ EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[1]));
+}
+
+TEST_P(RtpPacketizerH264ModeTest,
+ SingleNaluFirstPacketReductionAppliesOnlyToFirstFragment) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 200;
+ limits.first_packet_reduction_len = 5;
+ rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/195),
+ GenerateNalUnit(/*size=*/200),
+ GenerateNalUnit(/*size=*/200)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits, GetParam());
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0]));
+ EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[1]));
+ EXPECT_THAT(packets[2].payload(), ElementsAreArray(nalus[2]));
+}
+
+TEST_P(RtpPacketizerH264ModeTest,
+ SingleNaluLastPacketReductionAppliesOnlyToLastFragment) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 200;
+ limits.last_packet_reduction_len = 5;
+ rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/200),
+ GenerateNalUnit(/*size=*/200),
+ GenerateNalUnit(/*size=*/195)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits, GetParam());
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0]));
+ EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[1]));
+ EXPECT_THAT(packets[2].payload(), ElementsAreArray(nalus[2]));
+}
+
+TEST_P(RtpPacketizerH264ModeTest,
+ SingleNaluFirstAndLastPacketReductionSumsForSinglePacket) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 200;
+ limits.first_packet_reduction_len = 20;
+ limits.last_packet_reduction_len = 30;
+ rtc::Buffer frame = CreateFrame({150});
+
+ RtpPacketizerH264 packetizer(frame, limits, GetParam());
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ EXPECT_THAT(packets, SizeIs(1));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ PacketMode,
+ RtpPacketizerH264ModeTest,
+ ::testing::Values(H264PacketizationMode::SingleNalUnit,
+ H264PacketizationMode::NonInterleaved));
+
+// Aggregation tests.
+TEST(RtpPacketizerH264Test, StapA) {
+ rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/0x123)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, kNoLimits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(1));
+ auto payload = packets[0].payload();
+ EXPECT_EQ(payload.size(),
+ kNalHeaderSize + 3 * kLengthFieldLength + 2 + 2 + 0x123);
+
+ EXPECT_EQ(payload[0], kStapA);
+ payload = payload.subview(kNalHeaderSize);
+ // 1st fragment.
+ EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ ElementsAre(0, 2)); // Size.
+ EXPECT_THAT(payload.subview(kLengthFieldLength, 2),
+ ElementsAreArray(nalus[0]));
+ payload = payload.subview(kLengthFieldLength + 2);
+ // 2nd fragment.
+ EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ ElementsAre(0, 2)); // Size.
+ EXPECT_THAT(payload.subview(kLengthFieldLength, 2),
+ ElementsAreArray(nalus[1]));
+ payload = payload.subview(kLengthFieldLength + 2);
+ // 3rd fragment.
+ EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ ElementsAre(0x1, 0x23)); // Size.
+ EXPECT_THAT(payload.subview(kLengthFieldLength), ElementsAreArray(nalus[2]));
+}
+
+TEST(RtpPacketizerH264Test, SingleNalUnitModeHasNoStapA) {
+ // This is the same setup as for the StapA test.
+ rtc::Buffer frame = CreateFrame({2, 2, 0x123});
+
+ RtpPacketizerH264 packetizer(frame, kNoLimits,
+ H264PacketizationMode::SingleNalUnit);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ // The three fragments should be returned as three packets.
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_EQ(packets[0].payload_size(), 2u);
+ EXPECT_EQ(packets[1].payload_size(), 2u);
+ EXPECT_EQ(packets[2].payload_size(), 0x123u);
+}
+
+TEST(RtpPacketizerH264Test, StapARespectsFirstPacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1000;
+ limits.first_packet_reduction_len = 100;
+ const size_t kFirstFragmentSize =
+ limits.max_payload_len - limits.first_packet_reduction_len;
+ rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/kFirstFragmentSize),
+ GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/2)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(2));
+ // Expect 1st packet is single nalu.
+ EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0]));
+ // Expect 2nd packet is aggregate of last two fragments.
+ EXPECT_THAT(packets[1].payload(),
+ ElementsAre(kStapA, //
+ 0, 2, nalus[1][0], nalus[1][1], //
+ 0, 2, nalus[2][0], nalus[2][1]));
+}
+
+TEST(RtpPacketizerH264Test, StapARespectsLastPacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1000;
+ limits.last_packet_reduction_len = 100;
+ const size_t kLastFragmentSize =
+ limits.max_payload_len - limits.last_packet_reduction_len;
+ rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/kLastFragmentSize)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(2));
+ // Expect 1st packet is aggregate of 1st two fragments.
+ EXPECT_THAT(packets[0].payload(),
+ ElementsAre(kStapA, //
+ 0, 2, nalus[0][0], nalus[0][1], //
+ 0, 2, nalus[1][0], nalus[1][1]));
+ // Expect 2nd packet is single nalu.
+ EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[2]));
+}
+
+TEST(RtpPacketizerH264Test, TooSmallForStapAHeaders) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1000;
+ const size_t kLastFragmentSize =
+ limits.max_payload_len - 3 * kLengthFieldLength - 4;
+ rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/2),
+ GenerateNalUnit(/*size=*/kLastFragmentSize)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(2));
+ // Expect 1st packet is aggregate of 1st two fragments.
+ EXPECT_THAT(packets[0].payload(),
+ ElementsAre(kStapA, //
+ 0, 2, nalus[0][0], nalus[0][1], //
+ 0, 2, nalus[1][0], nalus[1][1]));
+ // Expect 2nd packet is single nalu.
+ EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[2]));
+}
+
+// Fragmentation + aggregation.
+TEST(RtpPacketizerH264Test, MixedStapAFUA) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 100;
+ const size_t kFuaPayloadSize = 70;
+ const size_t kFuaNaluSize = kNalHeaderSize + 2 * kFuaPayloadSize;
+ const size_t kStapANaluSize = 20;
+ rtc::Buffer nalus[] = {GenerateNalUnit(kFuaNaluSize),
+ GenerateNalUnit(kStapANaluSize),
+ GenerateNalUnit(kStapANaluSize)};
+ rtc::Buffer frame = CreateFrame(nalus);
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ ASSERT_THAT(packets, SizeIs(3));
+ // First expect two FU-A packets.
+ EXPECT_THAT(packets[0].payload().subview(0, kFuAHeaderSize),
+ ElementsAre(kFuA, FuDefs::kSBit | nalus[0][0]));
+ EXPECT_THAT(
+ packets[0].payload().subview(kFuAHeaderSize),
+ ElementsAreArray(nalus[0].data() + kNalHeaderSize, kFuaPayloadSize));
+
+ EXPECT_THAT(packets[1].payload().subview(0, kFuAHeaderSize),
+ ElementsAre(kFuA, FuDefs::kEBit | nalus[0][0]));
+ EXPECT_THAT(
+ packets[1].payload().subview(kFuAHeaderSize),
+ ElementsAreArray(nalus[0].data() + kNalHeaderSize + kFuaPayloadSize,
+ kFuaPayloadSize));
+
+ // Then expect one STAP-A packet with two nal units.
+ EXPECT_THAT(packets[2].payload()[0], kStapA);
+ auto payload = packets[2].payload().subview(kNalHeaderSize);
+ EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ ElementsAre(0, kStapANaluSize));
+ EXPECT_THAT(payload.subview(kLengthFieldLength, kStapANaluSize),
+ ElementsAreArray(nalus[1]));
+ payload = payload.subview(kLengthFieldLength + kStapANaluSize);
+ EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ ElementsAre(0, kStapANaluSize));
+ EXPECT_THAT(payload.subview(kLengthFieldLength), ElementsAreArray(nalus[2]));
+}
+
+TEST(RtpPacketizerH264Test, LastFragmentFitsInSingleButNotLastPacket) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1178;
+ limits.first_packet_reduction_len = 0;
+ limits.last_packet_reduction_len = 20;
+ limits.single_packet_reduction_len = 20;
+ // Actual sizes, which triggered this bug.
+ rtc::Buffer frame = CreateFrame({20, 8, 18, 1161});
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ // Last packet has to be of correct size.
+ // Incorrect implementation might miss this constraint and not split the last
+ // fragment in two packets.
+ EXPECT_LE(static_cast<int>(packets.back().payload_size()),
+ limits.max_payload_len - limits.last_packet_reduction_len);
+}
+
+// Splits frame with payload size `frame_payload_size` without fragmentation,
+// Returns sizes of the payloads excluding fua headers.
+std::vector<int> TestFua(size_t frame_payload_size,
+ const RtpPacketizer::PayloadSizeLimits& limits) {
+ rtc::Buffer nalu[] = {GenerateNalUnit(kNalHeaderSize + frame_payload_size)};
+ rtc::Buffer frame = CreateFrame(nalu);
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::NonInterleaved);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ EXPECT_GE(packets.size(), 2u); // Single packet indicates it is not FuA.
+ std::vector<uint16_t> fua_header;
+ std::vector<int> payload_sizes;
+
+ for (const RtpPacketToSend& packet : packets) {
+ auto payload = packet.payload();
+ EXPECT_GT(payload.size(), kFuAHeaderSize);
+ fua_header.push_back((payload[0] << 8) | payload[1]);
+ payload_sizes.push_back(payload.size() - kFuAHeaderSize);
+ }
+
+ EXPECT_TRUE(fua_header.front() & FuDefs::kSBit);
+ EXPECT_TRUE(fua_header.back() & FuDefs::kEBit);
+ // Clear S and E bits before testing all are duplicating same original header.
+ fua_header.front() &= ~FuDefs::kSBit;
+ fua_header.back() &= ~FuDefs::kEBit;
+ EXPECT_THAT(fua_header, Each(Eq((kFuA << 8) | nalu[0][0])));
+
+ return payload_sizes;
+}
+
+// Fragmentation tests.
+TEST(RtpPacketizerH264Test, FUAOddSize) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1200;
+ EXPECT_THAT(TestFua(1200, limits), ElementsAre(600, 600));
+}
+
+TEST(RtpPacketizerH264Test, FUAWithFirstPacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1200;
+ limits.first_packet_reduction_len = 4;
+ limits.single_packet_reduction_len = 4;
+ EXPECT_THAT(TestFua(1198, limits), ElementsAre(597, 601));
+}
+
+TEST(RtpPacketizerH264Test, FUAWithLastPacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1200;
+ limits.last_packet_reduction_len = 4;
+ limits.single_packet_reduction_len = 4;
+ EXPECT_THAT(TestFua(1198, limits), ElementsAre(601, 597));
+}
+
+TEST(RtpPacketizerH264Test, FUAWithSinglePacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1199;
+ limits.single_packet_reduction_len = 200;
+ EXPECT_THAT(TestFua(1000, limits), ElementsAre(500, 500));
+}
+
+TEST(RtpPacketizerH264Test, FUAEvenSize) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1200;
+ EXPECT_THAT(TestFua(1201, limits), ElementsAre(600, 601));
+}
+
+TEST(RtpPacketizerH264Test, FUARounding) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1448;
+ EXPECT_THAT(TestFua(10123, limits),
+ ElementsAre(1265, 1265, 1265, 1265, 1265, 1266, 1266, 1266));
+}
+
+TEST(RtpPacketizerH264Test, FUABig) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 1200;
+ // Generate 10 full sized packets, leave room for FU-A headers.
+ EXPECT_THAT(
+ TestFua(10 * (1200 - kFuAHeaderSize), limits),
+ ElementsAre(1198, 1198, 1198, 1198, 1198, 1198, 1198, 1198, 1198, 1198));
+}
+
+TEST(RtpPacketizerH264Test, RejectsOverlongDataInPacketizationMode0) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ rtc::Buffer frame = CreateFrame({kMaxPayloadSize + 1});
+
+ RtpPacketizerH264 packetizer(frame, limits,
+ H264PacketizationMode::SingleNalUnit);
+ std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
+
+ EXPECT_THAT(packets, IsEmpty());
+}
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_unittest.cc
new file mode 100644
index 0000000000..53264c6609
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_unittest.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format.h"
+
+#include <memory>
+#include <numeric>
+
+#include "absl/algorithm/container.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::Gt;
+using ::testing::IsEmpty;
+using ::testing::Le;
+using ::testing::Not;
+using ::testing::SizeIs;
+
+// Calculate difference between largest and smallest packets respecting sizes
+// adjustement provided by limits,
+// i.e. last packet expected to be smaller than 'average' by reduction_len.
+int EffectivePacketsSizeDifference(
+ std::vector<int> sizes,
+ const RtpPacketizer::PayloadSizeLimits& limits) {
+ // Account for larger last packet header.
+ sizes.back() += limits.last_packet_reduction_len;
+
+ auto minmax = absl::c_minmax_element(sizes);
+ // MAX-MIN
+ return *minmax.second - *minmax.first;
+}
+
+int Sum(const std::vector<int>& sizes) {
+ return absl::c_accumulate(sizes, 0);
+}
+
+TEST(RtpPacketizerSplitAboutEqually, AllPacketsAreEqualSumToPayloadLen) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.last_packet_reduction_len = 2;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(13, limits);
+
+ EXPECT_THAT(Sum(payload_sizes), 13);
+}
+
+TEST(RtpPacketizerSplitAboutEqually, AllPacketsAreEqualRespectsMaxPayloadSize) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.last_packet_reduction_len = 2;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(13, limits);
+
+ EXPECT_THAT(payload_sizes, Each(Le(limits.max_payload_len)));
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ AllPacketsAreEqualRespectsFirstPacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.first_packet_reduction_len = 2;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(13, limits);
+
+ ASSERT_THAT(payload_sizes, Not(IsEmpty()));
+ EXPECT_EQ(payload_sizes.front() + limits.first_packet_reduction_len,
+ limits.max_payload_len);
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ AllPacketsAreEqualRespectsLastPacketReductionLength) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.last_packet_reduction_len = 2;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(13, limits);
+
+ ASSERT_THAT(payload_sizes, Not(IsEmpty()));
+ EXPECT_LE(payload_sizes.back() + limits.last_packet_reduction_len,
+ limits.max_payload_len);
+}
+
+TEST(RtpPacketizerSplitAboutEqually, AllPacketsAreEqualInSize) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.last_packet_reduction_len = 2;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(13, limits);
+
+ EXPECT_EQ(EffectivePacketsSizeDifference(payload_sizes, limits), 0);
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ AllPacketsAreEqualGeneratesMinimumNumberOfPackets) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.last_packet_reduction_len = 2;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(13, limits);
+ // Computed by hand. 3 packets would have exactly capacity 3*5-2=13
+ // (max length - for each packet minus last packet reduction).
+ EXPECT_THAT(payload_sizes, SizeIs(3));
+}
+
+TEST(RtpPacketizerSplitAboutEqually, SomePacketsAreSmallerSumToPayloadLen) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 7;
+ limits.last_packet_reduction_len = 5;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(28, limits);
+
+ EXPECT_THAT(Sum(payload_sizes), 28);
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ SomePacketsAreSmallerRespectsMaxPayloadSize) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 7;
+ limits.last_packet_reduction_len = 5;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(28, limits);
+
+ EXPECT_THAT(payload_sizes, Each(Le(limits.max_payload_len)));
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ SomePacketsAreSmallerRespectsFirstPacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 7;
+ limits.first_packet_reduction_len = 5;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(28, limits);
+
+ EXPECT_LE(payload_sizes.front() + limits.first_packet_reduction_len,
+ limits.max_payload_len);
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ SomePacketsAreSmallerRespectsLastPacketReductionLength) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 7;
+ limits.last_packet_reduction_len = 5;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(28, limits);
+
+ EXPECT_LE(payload_sizes.back(),
+ limits.max_payload_len - limits.last_packet_reduction_len);
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ SomePacketsAreSmallerPacketsAlmostEqualInSize) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 7;
+ limits.last_packet_reduction_len = 5;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(28, limits);
+
+ EXPECT_LE(EffectivePacketsSizeDifference(payload_sizes, limits), 1);
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ SomePacketsAreSmallerGeneratesMinimumNumberOfPackets) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 7;
+ limits.last_packet_reduction_len = 5;
+
+ std::vector<int> payload_sizes = RtpPacketizer::SplitAboutEqually(24, limits);
+ // Computed by hand. 4 packets would have capacity 4*7-5=23 (max length -
+ // for each packet minus last packet reduction).
+ // 5 packets is enough for kPayloadSize.
+ EXPECT_THAT(payload_sizes, SizeIs(5));
+}
+
+TEST(RtpPacketizerSplitAboutEqually, GivesNonZeroPayloadLengthEachPacket) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 600;
+ limits.first_packet_reduction_len = 500;
+ limits.last_packet_reduction_len = 550;
+
+ // Naive implementation would split 1450 payload + 1050 reduction bytes into 5
+ // packets 500 bytes each, thus leaving first packet zero bytes and even less
+ // to last packet.
+ std::vector<int> payload_sizes =
+ RtpPacketizer::SplitAboutEqually(1450, limits);
+
+ EXPECT_EQ(Sum(payload_sizes), 1450);
+ EXPECT_THAT(payload_sizes, Each(Gt(0)));
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ IgnoresFirstAndLastPacketReductionWhenPayloadFitsIntoSinglePacket) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 30;
+ limits.first_packet_reduction_len = 29;
+ limits.last_packet_reduction_len = 29;
+ limits.single_packet_reduction_len = 10;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(20, limits), ElementsAre(20));
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ OnePacketWhenExtraSpaceIsEnoughForSinglePacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 30;
+ limits.single_packet_reduction_len = 10;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(20, limits), ElementsAre(20));
+}
+
+TEST(RtpPacketizerSplitAboutEqually,
+ TwoPacketsWhenExtraSpaceIsTooSmallForSinglePacketReduction) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 29;
+ limits.first_packet_reduction_len = 3;
+ limits.last_packet_reduction_len = 1;
+ limits.single_packet_reduction_len = 10;
+
+ // First packet needs two more extra bytes compared to last one,
+ // so should have two less payload bytes.
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(20, limits), ElementsAre(9, 11));
+}
+
+TEST(RtpPacketizerSplitAboutEqually, RejectsZeroMaxPayloadLen) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 0;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(20, limits), IsEmpty());
+}
+
+TEST(RtpPacketizerSplitAboutEqually, RejectsZeroFirstPacketLen) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.first_packet_reduction_len = 5;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(20, limits), IsEmpty());
+}
+
+TEST(RtpPacketizerSplitAboutEqually, RejectsZeroLastPacketLen) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 5;
+ limits.last_packet_reduction_len = 5;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(20, limits), IsEmpty());
+}
+
+TEST(RtpPacketizerSplitAboutEqually, CantPutSinglePayloadByteInTwoPackets) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 10;
+ limits.single_packet_reduction_len = 10;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(1, limits), IsEmpty());
+}
+
+TEST(RtpPacketizerSplitAboutEqually, CanPutTwoPayloadBytesInTwoPackets) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 10;
+ limits.single_packet_reduction_len = 10;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(2, limits), ElementsAre(1, 1));
+}
+
+TEST(RtpPacketizerSplitAboutEqually, CanPutSinglePayloadByteInOnePacket) {
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 11;
+ limits.single_packet_reduction_len = 10;
+
+ EXPECT_THAT(RtpPacketizer::SplitAboutEqually(1, limits), ElementsAre(1));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc
new file mode 100644
index 0000000000..f5c7f2ee29
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_video_generic.h"
+
+#include <string.h>
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static const size_t kGenericHeaderLength = 1;
+static const size_t kExtendedHeaderLength = 2;
+
+RtpPacketizerGeneric::RtpPacketizerGeneric(
+ rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ const RTPVideoHeader& rtp_video_header)
+ : remaining_payload_(payload) {
+ BuildHeader(rtp_video_header);
+
+ limits.max_payload_len -= header_size_;
+ payload_sizes_ = SplitAboutEqually(payload.size(), limits);
+ current_packet_ = payload_sizes_.begin();
+}
+
+RtpPacketizerGeneric::RtpPacketizerGeneric(
+ rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits)
+ : header_size_(0), remaining_payload_(payload) {
+ payload_sizes_ = SplitAboutEqually(payload.size(), limits);
+ current_packet_ = payload_sizes_.begin();
+}
+
+RtpPacketizerGeneric::~RtpPacketizerGeneric() = default;
+
+size_t RtpPacketizerGeneric::NumPackets() const {
+ return payload_sizes_.end() - current_packet_;
+}
+
+bool RtpPacketizerGeneric::NextPacket(RtpPacketToSend* packet) {
+ RTC_DCHECK(packet);
+ if (current_packet_ == payload_sizes_.end())
+ return false;
+
+ size_t next_packet_payload_len = *current_packet_;
+
+ uint8_t* out_ptr =
+ packet->AllocatePayload(header_size_ + next_packet_payload_len);
+ RTC_CHECK(out_ptr);
+
+ if (header_size_ > 0) {
+ memcpy(out_ptr, header_, header_size_);
+ // Remove first-packet bit, following packets are intermediate.
+ header_[0] &= ~RtpFormatVideoGeneric::kFirstPacketBit;
+ }
+
+ memcpy(out_ptr + header_size_, remaining_payload_.data(),
+ next_packet_payload_len);
+
+ remaining_payload_ = remaining_payload_.subview(next_packet_payload_len);
+
+ ++current_packet_;
+
+ // Packets left to produce and data left to split should end at the same time.
+ RTC_DCHECK_EQ(current_packet_ == payload_sizes_.end(),
+ remaining_payload_.empty());
+
+ packet->SetMarker(remaining_payload_.empty());
+ return true;
+}
+
+void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header) {
+ header_size_ = kGenericHeaderLength;
+ header_[0] = RtpFormatVideoGeneric::kFirstPacketBit;
+ if (rtp_video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ header_[0] |= RtpFormatVideoGeneric::kKeyFrameBit;
+ }
+ if (const auto* generic_header = absl::get_if<RTPVideoHeaderLegacyGeneric>(
+ &rtp_video_header.video_type_header)) {
+ // Store bottom 15 bits of the picture id. Only 15 bits are used for
+ // compatibility with other packetizer implemenetations.
+ uint16_t picture_id = generic_header->picture_id;
+ header_[0] |= RtpFormatVideoGeneric::kExtendedHeaderBit;
+ header_[1] = (picture_id >> 8) & 0x7F;
+ header_[2] = picture_id & 0xFF;
+ header_size_ += kExtendedHeaderLength;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h
new file mode 100644
index 0000000000..fd44bd1980
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VIDEO_GENERIC_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VIDEO_GENERIC_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+
+namespace webrtc {
+
+class RtpPacketToSend;
+struct RTPVideoHeader;
+
+namespace RtpFormatVideoGeneric {
+static const uint8_t kKeyFrameBit = 0x01;
+static const uint8_t kFirstPacketBit = 0x02;
+// If this bit is set, there will be an extended header contained in this
+// packet. This was added later so old clients will not send this.
+static const uint8_t kExtendedHeaderBit = 0x04;
+} // namespace RtpFormatVideoGeneric
+
+class RtpPacketizerGeneric : public RtpPacketizer {
+ public:
+ // Initialize with payload from encoder.
+ // The payload_data must be exactly one encoded generic frame.
+ // Packets returned by `NextPacket` will contain the generic payload header.
+ RtpPacketizerGeneric(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ const RTPVideoHeader& rtp_video_header);
+ // Initialize with payload from encoder.
+ // The payload_data must be exactly one encoded generic frame.
+ // Packets returned by `NextPacket` will contain raw payload without the
+ // generic payload header.
+ RtpPacketizerGeneric(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits);
+
+ ~RtpPacketizerGeneric() override;
+
+ RtpPacketizerGeneric(const RtpPacketizerGeneric&) = delete;
+ RtpPacketizerGeneric& operator=(const RtpPacketizerGeneric&) = delete;
+
+ size_t NumPackets() const override;
+
+ // Get the next payload.
+ // Write payload and set marker bit of the `packet`.
+ // Returns true on success, false otherwise.
+ bool NextPacket(RtpPacketToSend* packet) override;
+
+ private:
+ // Fills header_ and header_size_ members.
+ void BuildHeader(const RTPVideoHeader& rtp_video_header);
+
+ uint8_t header_[3];
+ size_t header_size_;
+ rtc::ArrayView<const uint8_t> remaining_payload_;
+ std::vector<int> payload_sizes_;
+ std::vector<int>::const_iterator current_packet_;
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VIDEO_GENERIC_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc
new file mode 100644
index 0000000000..d83c3b03c9
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_video_generic.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::Le;
+
+constexpr RtpPacketizer::PayloadSizeLimits kNoSizeLimits;
+
+std::vector<int> NextPacketFillPayloadSizes(RtpPacketizerGeneric* packetizer) {
+ RtpPacketToSend packet(nullptr);
+ std::vector<int> result;
+ while (packetizer->NextPacket(&packet)) {
+ result.push_back(packet.payload_size());
+ }
+ return result;
+}
+
+TEST(RtpPacketizerVideoGeneric, RespectsMaxPayloadSize) {
+ const size_t kPayloadSize = 50;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader());
+
+ std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
+
+ EXPECT_THAT(payload_sizes, Each(Le(limits.max_payload_len)));
+}
+
+TEST(RtpPacketizerVideoGeneric, UsesMaxPayloadSize) {
+ const size_t kPayloadSize = 50;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader());
+
+ std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
+
+ // With kPayloadSize > max_payload_len^2, there should be packets that use
+ // all the payload, otherwise it is possible to use less packets.
+ EXPECT_THAT(payload_sizes, Contains(limits.max_payload_len));
+}
+
+TEST(RtpPacketizerVideoGeneric, WritesExtendedHeaderWhenPictureIdIsSet) {
+ const size_t kPayloadSize = 13;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.video_type_header.emplace<RTPVideoHeaderLegacyGeneric>()
+ .picture_id = 37;
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header);
+
+ RtpPacketToSend packet(nullptr);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+
+ rtc::ArrayView<const uint8_t> payload = packet.payload();
+ EXPECT_EQ(payload.size(), 3 + kPayloadSize);
+ EXPECT_TRUE(payload[0] & 0x04); // Extended header bit is set.
+ // Frame id is 37.
+ EXPECT_EQ(0u, payload[1]);
+ EXPECT_EQ(37u, payload[2]);
+}
+
+TEST(RtpPacketizerVideoGeneric, RespectsMaxPayloadSizeWithExtendedHeader) {
+ const int kPayloadSize = 50;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.video_type_header.emplace<RTPVideoHeaderLegacyGeneric>()
+ .picture_id = 37;
+ RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header);
+
+ std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
+
+ EXPECT_THAT(payload_sizes, Each(Le(limits.max_payload_len)));
+}
+
+TEST(RtpPacketizerVideoGeneric, UsesMaxPayloadSizeWithExtendedHeader) {
+ const int kPayloadSize = 50;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.video_type_header.emplace<RTPVideoHeaderLegacyGeneric>()
+ .picture_id = 37;
+ RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header);
+ std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
+
+ // With kPayloadSize > max_payload_len^2, there should be packets that use
+ // all the payload, otherwise it is possible to use less packets.
+ EXPECT_THAT(payload_sizes, Contains(limits.max_payload_len));
+}
+
+TEST(RtpPacketizerVideoGeneric, FrameIdOver15bitsWrapsAround) {
+ const int kPayloadSize = 13;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.video_type_header.emplace<RTPVideoHeaderLegacyGeneric>()
+ .picture_id = 0x8137;
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header);
+
+ RtpPacketToSend packet(nullptr);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+
+ rtc::ArrayView<const uint8_t> payload = packet.payload();
+ EXPECT_TRUE(payload[0] & 0x04); // Extended header bit is set.
+ // Frame id is 0x137.
+ EXPECT_EQ(0x01u, payload[1]);
+ EXPECT_EQ(0x37u, payload[2]);
+}
+
+TEST(RtpPacketizerVideoGeneric, NoFrameIdDoesNotWriteExtendedHeader) {
+ const int kPayloadSize = 13;
+ const uint8_t kPayload[kPayloadSize] = {};
+
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, RTPVideoHeader());
+
+ RtpPacketToSend packet(nullptr);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+
+ rtc::ArrayView<const uint8_t> payload = packet.payload();
+ EXPECT_FALSE(payload[0] & 0x04);
+}
+
+TEST(RtpPacketizerVideoGeneric, DoesNotWriteHeaderForRawPayload) {
+ const uint8_t kPayload[] = {0x05, 0x25, 0x52};
+
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits);
+
+ RtpPacketToSend packet(nullptr);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+
+ rtc::ArrayView<const uint8_t> payload = packet.payload();
+ EXPECT_THAT(payload, ElementsAreArray(kPayload));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc
new file mode 100644
index 0000000000..ae5f4e50a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_vp8.h"
+
+#include <stdint.h>
+#include <string.h> // memcpy
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kXBit = 0x80;
+constexpr int kNBit = 0x20;
+constexpr int kSBit = 0x10;
+constexpr int kKeyIdxField = 0x1F;
+constexpr int kIBit = 0x80;
+constexpr int kLBit = 0x40;
+constexpr int kTBit = 0x20;
+constexpr int kKBit = 0x10;
+constexpr int kYBit = 0x20;
+
+bool ValidateHeader(const RTPVideoHeaderVP8& hdr_info) {
+ if (hdr_info.pictureId != kNoPictureId) {
+ RTC_DCHECK_GE(hdr_info.pictureId, 0);
+ RTC_DCHECK_LE(hdr_info.pictureId, 0x7FFF);
+ }
+ if (hdr_info.tl0PicIdx != kNoTl0PicIdx) {
+ RTC_DCHECK_GE(hdr_info.tl0PicIdx, 0);
+ RTC_DCHECK_LE(hdr_info.tl0PicIdx, 0xFF);
+ }
+ if (hdr_info.temporalIdx != kNoTemporalIdx) {
+ RTC_DCHECK_GE(hdr_info.temporalIdx, 0);
+ RTC_DCHECK_LE(hdr_info.temporalIdx, 3);
+ } else {
+ RTC_DCHECK(!hdr_info.layerSync);
+ }
+ if (hdr_info.keyIdx != kNoKeyIdx) {
+ RTC_DCHECK_GE(hdr_info.keyIdx, 0);
+ RTC_DCHECK_LE(hdr_info.keyIdx, 0x1F);
+ }
+ return true;
+}
+
+} // namespace
+
+RtpPacketizerVp8::RtpPacketizerVp8(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ const RTPVideoHeaderVP8& hdr_info)
+ : hdr_(BuildHeader(hdr_info)), remaining_payload_(payload) {
+ limits.max_payload_len -= hdr_.size();
+ payload_sizes_ = SplitAboutEqually(payload.size(), limits);
+ current_packet_ = payload_sizes_.begin();
+}
+
+RtpPacketizerVp8::~RtpPacketizerVp8() = default;
+
+size_t RtpPacketizerVp8::NumPackets() const {
+ return payload_sizes_.end() - current_packet_;
+}
+
+bool RtpPacketizerVp8::NextPacket(RtpPacketToSend* packet) {
+ RTC_DCHECK(packet);
+ if (current_packet_ == payload_sizes_.end()) {
+ return false;
+ }
+
+ size_t packet_payload_len = *current_packet_;
+ ++current_packet_;
+
+ uint8_t* buffer = packet->AllocatePayload(hdr_.size() + packet_payload_len);
+ RTC_CHECK(buffer);
+
+ memcpy(buffer, hdr_.data(), hdr_.size());
+ memcpy(buffer + hdr_.size(), remaining_payload_.data(), packet_payload_len);
+
+ remaining_payload_ = remaining_payload_.subview(packet_payload_len);
+ hdr_[0] &= (~kSBit); // Clear 'Start of partition' bit.
+ packet->SetMarker(current_packet_ == payload_sizes_.end());
+ return true;
+}
+
+RtpPacketizerVp8::RawHeader RtpPacketizerVp8::BuildHeader(
+ const RTPVideoHeaderVP8& header) {
+ // VP8 payload descriptor
+ // https://datatracker.ietf.org/doc/html/rfc7741#section-4.2
+ //
+ // 0 1 2 3 4 5 6 7
+ // +-+-+-+-+-+-+-+-+
+ // |X|R|N|S|R| PID | (REQUIRED)
+ // +-+-+-+-+-+-+-+-+
+ // X: |I|L|T|K| RSV | (OPTIONAL)
+ // +-+-+-+-+-+-+-+-+
+ // I: |M| PictureID | (OPTIONAL)
+ // +-+-+-+-+-+-+-+-+
+ // | PictureID |
+ // +-+-+-+-+-+-+-+-+
+ // L: | TL0PICIDX | (OPTIONAL)
+ // +-+-+-+-+-+-+-+-+
+ // T/K: |TID|Y| KEYIDX | (OPTIONAL)
+ // +-+-+-+-+-+-+-+-+
+ RTC_DCHECK(ValidateHeader(header));
+
+ RawHeader result;
+ bool tid_present = header.temporalIdx != kNoTemporalIdx;
+ bool keyid_present = header.keyIdx != kNoKeyIdx;
+ bool tl0_pid_present = header.tl0PicIdx != kNoTl0PicIdx;
+ bool pid_present = header.pictureId != kNoPictureId;
+ uint8_t x_field = 0;
+ if (pid_present)
+ x_field |= kIBit;
+ if (tl0_pid_present)
+ x_field |= kLBit;
+ if (tid_present)
+ x_field |= kTBit;
+ if (keyid_present)
+ x_field |= kKBit;
+
+ uint8_t flags = 0;
+ if (x_field != 0)
+ flags |= kXBit;
+ if (header.nonReference)
+ flags |= kNBit;
+ // Create header as first packet in the frame. NextPacket() will clear it
+ // after first use.
+ flags |= kSBit;
+ result.push_back(flags);
+ if (x_field == 0) {
+ return result;
+ }
+ result.push_back(x_field);
+ if (pid_present) {
+ const uint16_t pic_id = static_cast<uint16_t>(header.pictureId);
+ result.push_back(0x80 | ((pic_id >> 8) & 0x7F));
+ result.push_back(pic_id & 0xFF);
+ }
+ if (tl0_pid_present) {
+ result.push_back(header.tl0PicIdx);
+ }
+ if (tid_present || keyid_present) {
+ uint8_t data_field = 0;
+ if (tid_present) {
+ data_field |= header.temporalIdx << 6;
+ if (header.layerSync)
+ data_field |= kYBit;
+ }
+ if (keyid_present) {
+ data_field |= (header.keyIdx & kKeyIdxField);
+ }
+ result.push_back(data_field);
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
new file mode 100644
index 0000000000..d1f569a946
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains the declaration of the VP8 packetizer class.
+ * A packetizer object is created for each encoded video frame. The
+ * constructor is called with the payload data and size,
+ * together with the fragmentation information and a packetizer mode
+ * of choice. Alternatively, if no fragmentation info is available, the
+ * second constructor can be used with only payload data and size; in that
+ * case the mode kEqualSize is used.
+ *
+ * After creating the packetizer, the method NextPacket is called
+ * repeatedly to get all packets for the frame. The method returns
+ * false as long as there are more packets left to fetch.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_H_
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+
+namespace webrtc {
+
+// Packetizer for VP8.
+class RtpPacketizerVp8 : public RtpPacketizer {
+ public:
+ // Initialize with payload from encoder.
+ // The payload_data must be exactly one encoded VP8 frame.
+ RtpPacketizerVp8(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ const RTPVideoHeaderVP8& hdr_info);
+
+ ~RtpPacketizerVp8() override;
+
+ RtpPacketizerVp8(const RtpPacketizerVp8&) = delete;
+ RtpPacketizerVp8& operator=(const RtpPacketizerVp8&) = delete;
+
+ size_t NumPackets() const override;
+
+ // Get the next payload with VP8 payload header.
+ // Write payload and set marker bit of the `packet`.
+ // Returns true on success, false otherwise.
+ bool NextPacket(RtpPacketToSend* packet) override;
+
+ private:
+ // VP8 header can use up to 6 bytes.
+ using RawHeader = absl::InlinedVector<uint8_t, 6>;
+ static RawHeader BuildHeader(const RTPVideoHeaderVP8& header);
+
+ RawHeader hdr_;
+ rtc::ArrayView<const uint8_t> remaining_payload_;
+ std::vector<int> payload_sizes_;
+ std::vector<int>::const_iterator current_packet_;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.cc
new file mode 100644
index 0000000000..0088ff8f31
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.cc
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h"
+
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+// VP8 payload descriptor
+// https://datatracker.ietf.org/doc/html/rfc7741#section-4.2
+//
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |X|R|N|S|R| PID | (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// X: |I|L|T|K| RSV | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PictureID | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// | PictureID |
+// +-+-+-+-+-+-+-+-+
+// L: | TL0PICIDX | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// T/K: |TID|Y| KEYIDX | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
+
+int Bit(uint8_t byte, int position) {
+ return (byte >> position) & 0x01;
+}
+
+} // namespace
+
+RtpFormatVp8TestHelper::RtpFormatVp8TestHelper(const RTPVideoHeaderVP8* hdr,
+ size_t payload_len)
+ : hdr_info_(hdr), payload_(payload_len) {
+ for (size_t i = 0; i < payload_.size(); ++i) {
+ payload_[i] = i;
+ }
+}
+
+RtpFormatVp8TestHelper::~RtpFormatVp8TestHelper() = default;
+
+void RtpFormatVp8TestHelper::GetAllPacketsAndCheck(
+ RtpPacketizerVp8* packetizer,
+ rtc::ArrayView<const size_t> expected_sizes) {
+ EXPECT_EQ(packetizer->NumPackets(), expected_sizes.size());
+ const uint8_t* data_ptr = payload_.begin();
+ RtpPacketToSend packet(kNoExtensions);
+ for (size_t i = 0; i < expected_sizes.size(); ++i) {
+ EXPECT_TRUE(packetizer->NextPacket(&packet));
+ auto rtp_payload = packet.payload();
+ EXPECT_EQ(rtp_payload.size(), expected_sizes[i]);
+
+ int payload_offset = CheckHeader(rtp_payload, /*first=*/i == 0);
+ // Verify that the payload (i.e., after the headers) of the packet is
+ // identical to the expected (as found in data_ptr).
+ auto vp8_payload = rtp_payload.subview(payload_offset);
+ ASSERT_GE(payload_.end() - data_ptr, static_cast<int>(vp8_payload.size()));
+ EXPECT_THAT(vp8_payload, ElementsAreArray(data_ptr, vp8_payload.size()));
+ data_ptr += vp8_payload.size();
+ }
+ EXPECT_EQ(payload_.end() - data_ptr, 0);
+}
+
+int RtpFormatVp8TestHelper::CheckHeader(rtc::ArrayView<const uint8_t> buffer,
+ bool first) {
+ int x_bit = Bit(buffer[0], 7);
+ EXPECT_EQ(Bit(buffer[0], 6), 0); // Reserved.
+ EXPECT_EQ(Bit(buffer[0], 5), hdr_info_->nonReference ? 1 : 0);
+ EXPECT_EQ(Bit(buffer[0], 4), first ? 1 : 0);
+ EXPECT_EQ(buffer[0] & 0x0f, 0); // RtpPacketizerVp8 always uses partition 0.
+
+ int payload_offset = 1;
+ if (hdr_info_->pictureId != kNoPictureId ||
+ hdr_info_->temporalIdx != kNoTemporalIdx ||
+ hdr_info_->tl0PicIdx != kNoTl0PicIdx || hdr_info_->keyIdx != kNoKeyIdx) {
+ EXPECT_EQ(x_bit, 1);
+ ++payload_offset;
+ CheckPictureID(buffer, &payload_offset);
+ CheckTl0PicIdx(buffer, &payload_offset);
+ CheckTIDAndKeyIdx(buffer, &payload_offset);
+ EXPECT_EQ(buffer[1] & 0x07, 0); // Reserved.
+ } else {
+ EXPECT_EQ(x_bit, 0);
+ }
+
+ return payload_offset;
+}
+
+// Verify that the I bit and the PictureID field are both set in accordance
+// with the information in hdr_info_->pictureId.
+void RtpFormatVp8TestHelper::CheckPictureID(
+ rtc::ArrayView<const uint8_t> buffer,
+ int* offset) {
+ int i_bit = Bit(buffer[1], 7);
+ if (hdr_info_->pictureId != kNoPictureId) {
+ EXPECT_EQ(i_bit, 1);
+ int two_byte_picture_id = Bit(buffer[*offset], 7);
+ EXPECT_EQ(two_byte_picture_id, 1);
+ EXPECT_EQ(buffer[*offset] & 0x7F, (hdr_info_->pictureId >> 8) & 0x7F);
+ EXPECT_EQ(buffer[(*offset) + 1], hdr_info_->pictureId & 0xFF);
+ (*offset) += 2;
+ } else {
+ EXPECT_EQ(i_bit, 0);
+ }
+}
+
+// Verify that the L bit and the TL0PICIDX field are both set in accordance
+// with the information in hdr_info_->tl0PicIdx.
+void RtpFormatVp8TestHelper::CheckTl0PicIdx(
+ rtc::ArrayView<const uint8_t> buffer,
+ int* offset) {
+ int l_bit = Bit(buffer[1], 6);
+ if (hdr_info_->tl0PicIdx != kNoTl0PicIdx) {
+ EXPECT_EQ(l_bit, 1);
+ EXPECT_EQ(buffer[*offset], hdr_info_->tl0PicIdx);
+ ++*offset;
+ } else {
+ EXPECT_EQ(l_bit, 0);
+ }
+}
+
+// Verify that the T bit and the TL0PICIDX field, and the K bit and KEYIDX
+// field are all set in accordance with the information in
+// hdr_info_->temporalIdx and hdr_info_->keyIdx, respectively.
+void RtpFormatVp8TestHelper::CheckTIDAndKeyIdx(
+ rtc::ArrayView<const uint8_t> buffer,
+ int* offset) {
+ int t_bit = Bit(buffer[1], 5);
+ int k_bit = Bit(buffer[1], 4);
+ if (hdr_info_->temporalIdx == kNoTemporalIdx &&
+ hdr_info_->keyIdx == kNoKeyIdx) {
+ EXPECT_EQ(t_bit, 0);
+ EXPECT_EQ(k_bit, 0);
+ return;
+ }
+ int temporal_id = (buffer[*offset] & 0xC0) >> 6;
+ int y_bit = Bit(buffer[*offset], 5);
+ int key_idx = buffer[*offset] & 0x1f;
+ if (hdr_info_->temporalIdx != kNoTemporalIdx) {
+ EXPECT_EQ(t_bit, 1);
+ EXPECT_EQ(temporal_id, hdr_info_->temporalIdx);
+ EXPECT_EQ(y_bit, hdr_info_->layerSync ? 1 : 0);
+ } else {
+ EXPECT_EQ(t_bit, 0);
+ EXPECT_EQ(temporal_id, 0);
+ EXPECT_EQ(y_bit, 0);
+ }
+ if (hdr_info_->keyIdx != kNoKeyIdx) {
+ EXPECT_EQ(k_bit, 1);
+ EXPECT_EQ(key_idx, hdr_info_->keyIdx);
+ } else {
+ EXPECT_EQ(k_bit, 0);
+ EXPECT_EQ(key_idx, 0);
+ }
+ ++*offset;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
new file mode 100644
index 0000000000..3ecaa476da
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains the class RtpFormatVp8TestHelper. The class is
+// responsible for setting up a fake VP8 bitstream according to the
+// RTPVideoHeaderVP8 header. The packetizer can then be provided to this helper
+// class, which will then extract all packets and compare to the expected
+// outcome.
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp8.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+
+class RtpFormatVp8TestHelper {
+ public:
+ RtpFormatVp8TestHelper(const RTPVideoHeaderVP8* hdr, size_t payload_len);
+ ~RtpFormatVp8TestHelper();
+
+ RtpFormatVp8TestHelper(const RtpFormatVp8TestHelper&) = delete;
+ RtpFormatVp8TestHelper& operator=(const RtpFormatVp8TestHelper&) = delete;
+
+ void GetAllPacketsAndCheck(RtpPacketizerVp8* packetizer,
+ rtc::ArrayView<const size_t> expected_sizes);
+
+ rtc::ArrayView<const uint8_t> payload() const { return payload_; }
+ size_t payload_size() const { return payload_.size(); }
+
+ private:
+ // Returns header size, i.e. payload offset.
+ int CheckHeader(rtc::ArrayView<const uint8_t> rtp_payload, bool first);
+ void CheckPictureID(rtc::ArrayView<const uint8_t> rtp_payload, int* offset);
+ void CheckTl0PicIdx(rtc::ArrayView<const uint8_t> rtp_payload, int* offset);
+ void CheckTIDAndKeyIdx(rtc::ArrayView<const uint8_t> rtp_payload,
+ int* offset);
+ void CheckPayload(const uint8_t* data_ptr);
+
+ const RTPVideoHeaderVP8* const hdr_info_;
+ rtc::Buffer payload_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
new file mode 100644
index 0000000000..7934ff8ea9
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_vp8.h"
+
+#include <memory>
+
+#include "modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr RtpPacketizer::PayloadSizeLimits kNoSizeLimits;
+
+TEST(RtpPacketizerVp8Test, ResultPacketsAreAlmostEqualSize) {
+ RTPVideoHeaderVP8 hdr_info;
+ hdr_info.InitRTPVideoHeaderVP8();
+ hdr_info.pictureId = 200;
+ RtpFormatVp8TestHelper helper(&hdr_info, /*payload_len=*/30);
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 12; // Small enough to produce 4 packets.
+ RtpPacketizerVp8 packetizer(helper.payload(), limits, hdr_info);
+
+ const size_t kExpectedSizes[] = {11, 11, 12, 12};
+ helper.GetAllPacketsAndCheck(&packetizer, kExpectedSizes);
+}
+
+TEST(RtpPacketizerVp8Test, EqualSizeWithLastPacketReduction) {
+ RTPVideoHeaderVP8 hdr_info;
+ hdr_info.InitRTPVideoHeaderVP8();
+ hdr_info.pictureId = 200;
+ RtpFormatVp8TestHelper helper(&hdr_info, /*payload_len=*/43);
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 15; // Small enough to produce 5 packets.
+ limits.last_packet_reduction_len = 5;
+ RtpPacketizerVp8 packetizer(helper.payload(), limits, hdr_info);
+
+ // Calculated by hand. VP8 payload descriptors are 4 byte each. 5 packets is
+ // minimum possible to fit 43 payload bytes into packets with capacity of
+ // 15 - 4 = 11 and leave 5 free bytes in the last packet. All packets are
+ // almost equal in size, even last packet if counted with free space (which
+ // will be filled up the stack by extra long RTP header).
+ const size_t kExpectedSizes[] = {13, 13, 14, 14, 9};
+ helper.GetAllPacketsAndCheck(&packetizer, kExpectedSizes);
+}
+
+// Verify that non-reference bit is set.
+TEST(RtpPacketizerVp8Test, NonReferenceBit) {
+ RTPVideoHeaderVP8 hdr_info;
+ hdr_info.InitRTPVideoHeaderVP8();
+ hdr_info.nonReference = true;
+ RtpFormatVp8TestHelper helper(&hdr_info, /*payload_len=*/30);
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 25; // Small enough to produce two packets.
+ RtpPacketizerVp8 packetizer(helper.payload(), limits, hdr_info);
+
+ const size_t kExpectedSizes[] = {16, 16};
+ helper.GetAllPacketsAndCheck(&packetizer, kExpectedSizes);
+}
+
+// Verify Tl0PicIdx and TID fields, and layerSync bit.
+TEST(RtpPacketizerVp8Test, Tl0PicIdxAndTID) {
+ RTPVideoHeaderVP8 hdr_info;
+ hdr_info.InitRTPVideoHeaderVP8();
+ hdr_info.tl0PicIdx = 117;
+ hdr_info.temporalIdx = 2;
+ hdr_info.layerSync = true;
+ RtpFormatVp8TestHelper helper(&hdr_info, /*payload_len=*/30);
+
+ RtpPacketizerVp8 packetizer(helper.payload(), kNoSizeLimits, hdr_info);
+
+ const size_t kExpectedSizes[1] = {helper.payload_size() + 4};
+ helper.GetAllPacketsAndCheck(&packetizer, kExpectedSizes);
+}
+
+TEST(RtpPacketizerVp8Test, KeyIdx) {
+ RTPVideoHeaderVP8 hdr_info;
+ hdr_info.InitRTPVideoHeaderVP8();
+ hdr_info.keyIdx = 17;
+ RtpFormatVp8TestHelper helper(&hdr_info, /*payload_len=*/30);
+
+ RtpPacketizerVp8 packetizer(helper.payload(), kNoSizeLimits, hdr_info);
+
+ const size_t kExpectedSizes[1] = {helper.payload_size() + 3};
+ helper.GetAllPacketsAndCheck(&packetizer, kExpectedSizes);
+}
+
+// Verify TID field and KeyIdx field in combination.
+TEST(RtpPacketizerVp8Test, TIDAndKeyIdx) {
+ RTPVideoHeaderVP8 hdr_info;
+ hdr_info.InitRTPVideoHeaderVP8();
+ hdr_info.temporalIdx = 1;
+ hdr_info.keyIdx = 5;
+ RtpFormatVp8TestHelper helper(&hdr_info, /*payload_len=*/30);
+
+ RtpPacketizerVp8 packetizer(helper.payload(), kNoSizeLimits, hdr_info);
+
+ const size_t kExpectedSizes[1] = {helper.payload_size() + 3};
+ helper.GetAllPacketsAndCheck(&packetizer, kExpectedSizes);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
new file mode 100644
index 0000000000..15e059e85c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+
+#include <string.h>
+
+#include "api/video/video_codec_constants.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/bit_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+#define RETURN_FALSE_ON_ERROR(x) \
+ if (!(x)) { \
+ return false; \
+ }
+
+namespace webrtc {
+namespace {
+// Length of VP9 payload descriptors' fixed part.
+const size_t kFixedPayloadDescriptorBytes = 1;
+
+const uint32_t kReservedBitValue0 = 0;
+
+uint8_t TemporalIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.temporal_idx == kNoTemporalIdx) ? def : hdr.temporal_idx;
+}
+
+uint8_t SpatialIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.spatial_idx == kNoSpatialIdx) ? def : hdr.spatial_idx;
+}
+
+int16_t Tl0PicIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.tl0_pic_idx == kNoTl0PicIdx) ? def : hdr.tl0_pic_idx;
+}
+
+// Picture ID:
+//
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
+// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
+// M: | EXTENDED PID |
+// +-+-+-+-+-+-+-+-+
+//
+size_t PictureIdLength(const RTPVideoHeaderVP9& hdr) {
+ if (hdr.picture_id == kNoPictureId)
+ return 0;
+ return (hdr.max_picture_id == kMaxOneBytePictureId) ? 1 : 2;
+}
+
+bool PictureIdPresent(const RTPVideoHeaderVP9& hdr) {
+ return PictureIdLength(hdr) > 0;
+}
+
+// Layer indices:
+//
+// Flexible mode (F=1): Non-flexible mode (F=0):
+//
+// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| | T |U| S |D|
+// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+// | TL0PICIDX |
+// +-+-+-+-+-+-+-+-+
+//
+size_t LayerInfoLength(const RTPVideoHeaderVP9& hdr) {
+ if (hdr.temporal_idx == kNoTemporalIdx && hdr.spatial_idx == kNoSpatialIdx) {
+ return 0;
+ }
+ return hdr.flexible_mode ? 1 : 2;
+}
+
+bool LayerInfoPresent(const RTPVideoHeaderVP9& hdr) {
+ return LayerInfoLength(hdr) > 0;
+}
+
+// Reference indices:
+//
+// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index
+// P,F: | P_DIFF |N| up to 3 times has to be specified.
+// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows
+// current P_DIFF.
+//
+size_t RefIndicesLength(const RTPVideoHeaderVP9& hdr) {
+ if (!hdr.inter_pic_predicted || !hdr.flexible_mode)
+ return 0;
+
+ RTC_DCHECK_GT(hdr.num_ref_pics, 0U);
+ RTC_DCHECK_LE(hdr.num_ref_pics, kMaxVp9RefPics);
+ return hdr.num_ref_pics;
+}
+
+// Scalability structure (SS).
+//
+// +-+-+-+-+-+-+-+-+
+// V: | N_S |Y|G|-|-|-|
+// +-+-+-+-+-+-+-+-+ -|
+// Y: | WIDTH | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ . N_S + 1 times
+// | HEIGHT | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -|
+// G: | N_G | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+ -|
+// N_G: | T |U| R |-|-| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| . N_G times
+// | P_DIFF | (OPTIONAL) . R times .
+// +-+-+-+-+-+-+-+-+ -| -|
+//
+size_t SsDataLength(const RTPVideoHeaderVP9& hdr) {
+ if (!hdr.ss_data_available)
+ return 0;
+
+ RTC_DCHECK_GT(hdr.num_spatial_layers, 0U);
+ RTC_DCHECK_LE(hdr.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
+ RTC_DCHECK_LE(hdr.gof.num_frames_in_gof, kMaxVp9FramesInGof);
+ size_t length = 1; // V
+ if (hdr.spatial_layer_resolution_present) {
+ length += 4 * hdr.num_spatial_layers; // Y
+ }
+ if (hdr.gof.num_frames_in_gof > 0) {
+ ++length; // G
+ }
+ // N_G
+ length += hdr.gof.num_frames_in_gof; // T, U, R
+ for (size_t i = 0; i < hdr.gof.num_frames_in_gof; ++i) {
+ RTC_DCHECK_LE(hdr.gof.num_ref_pics[i], kMaxVp9RefPics);
+ length += hdr.gof.num_ref_pics[i]; // R times
+ }
+ return length;
+}
+
+size_t PayloadDescriptorLengthMinusSsData(const RTPVideoHeaderVP9& hdr) {
+ return kFixedPayloadDescriptorBytes + PictureIdLength(hdr) +
+ LayerInfoLength(hdr) + RefIndicesLength(hdr);
+}
+
+// Picture ID:
+//
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
+// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
+// M: | EXTENDED PID |
+// +-+-+-+-+-+-+-+-+
+//
+bool WritePictureId(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ bool m_bit = (PictureIdLength(vp9) == 2);
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(m_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.picture_id, m_bit ? 15 : 7));
+ return true;
+}
+
+// Layer indices:
+//
+// Flexible mode (F=1):
+//
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D|
+// +-+-+-+-+-+-+-+-+
+//
+bool WriteLayerInfoCommon(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(TemporalIdxField(vp9, 0), 3));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.temporal_up_switch ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3));
+ RETURN_FALSE_ON_ERROR(
+ writer->WriteBits(vp9.inter_layer_predicted ? 1 : 0, 1));
+ return true;
+}
+
+// Non-flexible mode (F=0):
+//
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D|
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX |
+// +-+-+-+-+-+-+-+-+
+//
+bool WriteLayerInfoNonFlexibleMode(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt8(Tl0PicIdxField(vp9, 0)));
+ return true;
+}
+
+bool WriteLayerInfo(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ if (!WriteLayerInfoCommon(vp9, writer))
+ return false;
+
+ if (vp9.flexible_mode)
+ return true;
+
+ return WriteLayerInfoNonFlexibleMode(vp9, writer);
+}
+
+// Reference indices:
+//
+// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index
+// P,F: | P_DIFF |N| up to 3 times has to be specified.
+// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows
+// current P_DIFF.
+//
+bool WriteRefIndices(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ if (!PictureIdPresent(vp9) || vp9.num_ref_pics == 0 ||
+ vp9.num_ref_pics > kMaxVp9RefPics) {
+ return false;
+ }
+ for (uint8_t i = 0; i < vp9.num_ref_pics; ++i) {
+ bool n_bit = !(i == vp9.num_ref_pics - 1);
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.pid_diff[i], 7));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1));
+ }
+ return true;
+}
+
+// Scalability structure (SS).
+//
+// +-+-+-+-+-+-+-+-+
+// V: | N_S |Y|G|-|-|-|
+// +-+-+-+-+-+-+-+-+ -|
+// Y: | WIDTH | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ . N_S + 1 times
+// | HEIGHT | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -|
+// G: | N_G | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+ -|
+// N_G: | T |U| R |-|-| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| . N_G times
+// | P_DIFF | (OPTIONAL) . R times .
+// +-+-+-+-+-+-+-+-+ -| -|
+//
+bool WriteSsData(const RTPVideoHeaderVP9& vp9, rtc::BitBufferWriter* writer) {
+ RTC_DCHECK_GT(vp9.num_spatial_layers, 0U);
+ RTC_DCHECK_LE(vp9.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
+ RTC_DCHECK_LE(vp9.gof.num_frames_in_gof, kMaxVp9FramesInGof);
+ bool g_bit = vp9.gof.num_frames_in_gof > 0;
+
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.num_spatial_layers - 1, 3));
+ RETURN_FALSE_ON_ERROR(
+ writer->WriteBits(vp9.spatial_layer_resolution_present ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(g_bit ? 1 : 0, 1)); // G
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 3));
+
+ if (vp9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < vp9.num_spatial_layers; ++i) {
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.width[i]));
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.height[i]));
+ }
+ }
+ if (g_bit) {
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.num_frames_in_gof));
+ }
+ for (size_t i = 0; i < vp9.gof.num_frames_in_gof; ++i) {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.temporal_idx[i], 3));
+ RETURN_FALSE_ON_ERROR(
+ writer->WriteBits(vp9.gof.temporal_up_switch[i] ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.num_ref_pics[i], 2));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 2));
+ for (uint8_t r = 0; r < vp9.gof.num_ref_pics[i]; ++r) {
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.pid_diff[i][r]));
+ }
+ }
+ return true;
+}
+
+// TODO(https://bugs.webrtc.org/11319):
+// Workaround for switching off spatial layers on the fly.
+// Sent layers must start from SL0 on RTP layer, but can start from any
+// spatial layer because WebRTC-SVC api isn't implemented yet and
+// current API to invoke SVC is not flexible enough.
+RTPVideoHeaderVP9 RemoveInactiveSpatialLayers(
+ const RTPVideoHeaderVP9& original_header) {
+ RTPVideoHeaderVP9 hdr(original_header);
+ if (original_header.first_active_layer == 0)
+ return hdr;
+ for (size_t i = hdr.first_active_layer; i < hdr.num_spatial_layers; ++i) {
+ hdr.width[i - hdr.first_active_layer] = hdr.width[i];
+ hdr.height[i - hdr.first_active_layer] = hdr.height[i];
+ }
+ for (size_t i = hdr.num_spatial_layers - hdr.first_active_layer;
+ i < hdr.num_spatial_layers; ++i) {
+ hdr.width[i] = 0;
+ hdr.height[i] = 0;
+ }
+ hdr.num_spatial_layers -= hdr.first_active_layer;
+ hdr.spatial_idx -= hdr.first_active_layer;
+ hdr.first_active_layer = 0;
+ return hdr;
+}
+} // namespace
+
+RtpPacketizerVp9::RtpPacketizerVp9(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ const RTPVideoHeaderVP9& hdr)
+ : hdr_(RemoveInactiveSpatialLayers(hdr)),
+ header_size_(PayloadDescriptorLengthMinusSsData(hdr_)),
+ first_packet_extra_header_size_(SsDataLength(hdr_)),
+ remaining_payload_(payload) {
+ RTC_DCHECK_EQ(hdr_.first_active_layer, 0);
+
+ limits.max_payload_len -= header_size_;
+ limits.first_packet_reduction_len += first_packet_extra_header_size_;
+ limits.single_packet_reduction_len += first_packet_extra_header_size_;
+
+ payload_sizes_ = SplitAboutEqually(payload.size(), limits);
+ current_packet_ = payload_sizes_.begin();
+}
+
+RtpPacketizerVp9::~RtpPacketizerVp9() = default;
+
+size_t RtpPacketizerVp9::NumPackets() const {
+ return payload_sizes_.end() - current_packet_;
+}
+
+bool RtpPacketizerVp9::NextPacket(RtpPacketToSend* packet) {
+ RTC_DCHECK(packet);
+ if (current_packet_ == payload_sizes_.end()) {
+ return false;
+ }
+
+ bool layer_begin = current_packet_ == payload_sizes_.begin();
+ int packet_payload_len = *current_packet_;
+ ++current_packet_;
+ bool layer_end = current_packet_ == payload_sizes_.end();
+
+ int header_size = header_size_;
+ if (layer_begin)
+ header_size += first_packet_extra_header_size_;
+
+ uint8_t* buffer = packet->AllocatePayload(header_size + packet_payload_len);
+ RTC_CHECK(buffer);
+
+ if (!WriteHeader(layer_begin, layer_end,
+ rtc::MakeArrayView(buffer, header_size)))
+ return false;
+
+ memcpy(buffer + header_size, remaining_payload_.data(), packet_payload_len);
+ remaining_payload_ = remaining_payload_.subview(packet_payload_len);
+
+ // Ensure end_of_picture is always set on top spatial layer when it is not
+ // dropped.
+ RTC_DCHECK(hdr_.spatial_idx < hdr_.num_spatial_layers - 1 ||
+ hdr_.end_of_picture);
+
+ packet->SetMarker(layer_end && hdr_.end_of_picture);
+ return true;
+}
+
+// VP9 format:
+//
+// Payload descriptor for F = 1 (flexible mode)
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|Z| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+ -|
+// P,F: | P_DIFF |N| (CONDITIONALLY RECOMMENDED) . up to 3 times
+// +-+-+-+-+-+-+-+-+ -|
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+//
+// Payload descriptor for F = 0 (non-flexible mode)
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|Z| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX | (CONDITIONALLY REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+bool RtpPacketizerVp9::WriteHeader(bool layer_begin,
+ bool layer_end,
+ rtc::ArrayView<uint8_t> buffer) const {
+ // Required payload descriptor byte.
+ bool i_bit = PictureIdPresent(hdr_);
+ bool p_bit = hdr_.inter_pic_predicted;
+ bool l_bit = LayerInfoPresent(hdr_);
+ bool f_bit = hdr_.flexible_mode;
+ bool b_bit = layer_begin;
+ bool e_bit = layer_end;
+ bool v_bit = hdr_.ss_data_available && b_bit;
+ bool z_bit = hdr_.non_ref_for_inter_layer_pred;
+
+ rtc::BitBufferWriter writer(buffer.data(), buffer.size());
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(i_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(p_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(l_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(f_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(b_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(e_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(v_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(z_bit ? 1 : 0, 1));
+
+ // Add fields that are present.
+ if (i_bit && !WritePictureId(hdr_, &writer)) {
+ RTC_LOG(LS_ERROR) << "Failed writing VP9 picture id.";
+ return false;
+ }
+ if (l_bit && !WriteLayerInfo(hdr_, &writer)) {
+ RTC_LOG(LS_ERROR) << "Failed writing VP9 layer info.";
+ return false;
+ }
+ if (p_bit && f_bit && !WriteRefIndices(hdr_, &writer)) {
+ RTC_LOG(LS_ERROR) << "Failed writing VP9 ref indices.";
+ return false;
+ }
+ if (v_bit && !WriteSsData(hdr_, &writer)) {
+ RTC_LOG(LS_ERROR) << "Failed writing VP9 SS data.";
+ return false;
+ }
+
+ size_t offset_bytes = 0;
+ size_t offset_bits = 0;
+ writer.GetCurrentOffset(&offset_bytes, &offset_bits);
+ RTC_DCHECK_EQ(offset_bits, 0);
+ RTC_DCHECK_EQ(offset_bytes, buffer.size());
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
new file mode 100644
index 0000000000..3cf4dd56e5
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+//
+// This file contains the declaration of the VP9 packetizer class.
+// A packetizer object is created for each encoded video frame. The
+// constructor is called with the payload data and size.
+//
+// After creating the packetizer, the method NextPacket is called
+// repeatedly to get all packets for the frame. The method returns
+// false as long as there are more packets left to fetch.
+//
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+
+namespace webrtc {
+
+class RtpPacketizerVp9 : public RtpPacketizer {
+ public:
+ // The `payload` must be one encoded VP9 layer frame.
+ RtpPacketizerVp9(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ const RTPVideoHeaderVP9& hdr);
+
+ ~RtpPacketizerVp9() override;
+
+ RtpPacketizerVp9(const RtpPacketizerVp9&) = delete;
+ RtpPacketizerVp9& operator=(const RtpPacketizerVp9&) = delete;
+
+ size_t NumPackets() const override;
+
+ // Gets the next payload with VP9 payload header.
+ // Write payload and set marker bit of the `packet`.
+ // Returns true on success, false otherwise.
+ bool NextPacket(RtpPacketToSend* packet) override;
+
+ private:
+ // Writes the payload descriptor header.
+ // `layer_begin` and `layer_end` indicates the postision of the packet in
+ // the layer frame. Returns false on failure.
+ bool WriteHeader(bool layer_begin,
+ bool layer_end,
+ rtc::ArrayView<uint8_t> rtp_payload) const;
+
+ const RTPVideoHeaderVP9 hdr_;
+ const int header_size_;
+ const int first_packet_extra_header_size_;
+ rtc::ArrayView<const uint8_t> remaining_payload_;
+ std::vector<int> payload_sizes_;
+ std::vector<int>::const_iterator current_packet_;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
new file mode 100644
index 0000000000..e18b8a803f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+void VerifyHeader(const RTPVideoHeaderVP9& expected,
+ const RTPVideoHeaderVP9& actual) {
+ EXPECT_EQ(expected.inter_layer_predicted, actual.inter_layer_predicted);
+ EXPECT_EQ(expected.inter_pic_predicted, actual.inter_pic_predicted);
+ EXPECT_EQ(expected.flexible_mode, actual.flexible_mode);
+ EXPECT_EQ(expected.beginning_of_frame, actual.beginning_of_frame);
+ EXPECT_EQ(expected.end_of_frame, actual.end_of_frame);
+ EXPECT_EQ(expected.ss_data_available, actual.ss_data_available);
+ EXPECT_EQ(expected.non_ref_for_inter_layer_pred,
+ actual.non_ref_for_inter_layer_pred);
+ EXPECT_EQ(expected.picture_id, actual.picture_id);
+ EXPECT_EQ(expected.max_picture_id, actual.max_picture_id);
+ EXPECT_EQ(expected.temporal_idx, actual.temporal_idx);
+ EXPECT_EQ(expected.spatial_idx, actual.spatial_idx);
+ EXPECT_EQ(expected.gof_idx, actual.gof_idx);
+ EXPECT_EQ(expected.tl0_pic_idx, actual.tl0_pic_idx);
+ EXPECT_EQ(expected.temporal_up_switch, actual.temporal_up_switch);
+
+ EXPECT_EQ(expected.num_ref_pics, actual.num_ref_pics);
+ for (uint8_t i = 0; i < expected.num_ref_pics; ++i) {
+ EXPECT_EQ(expected.pid_diff[i], actual.pid_diff[i]);
+ EXPECT_EQ(expected.ref_picture_id[i], actual.ref_picture_id[i]);
+ }
+ if (expected.ss_data_available) {
+ EXPECT_EQ(expected.spatial_layer_resolution_present,
+ actual.spatial_layer_resolution_present);
+ EXPECT_EQ(expected.num_spatial_layers, actual.num_spatial_layers);
+ if (expected.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < expected.num_spatial_layers; i++) {
+ EXPECT_EQ(expected.width[i], actual.width[i]);
+ EXPECT_EQ(expected.height[i], actual.height[i]);
+ }
+ }
+ EXPECT_EQ(expected.gof.num_frames_in_gof, actual.gof.num_frames_in_gof);
+ for (size_t i = 0; i < expected.gof.num_frames_in_gof; i++) {
+ EXPECT_EQ(expected.gof.temporal_up_switch[i],
+ actual.gof.temporal_up_switch[i]);
+ EXPECT_EQ(expected.gof.temporal_idx[i], actual.gof.temporal_idx[i]);
+ EXPECT_EQ(expected.gof.num_ref_pics[i], actual.gof.num_ref_pics[i]);
+ for (uint8_t j = 0; j < expected.gof.num_ref_pics[i]; j++) {
+ EXPECT_EQ(expected.gof.pid_diff[i][j], actual.gof.pid_diff[i][j]);
+ }
+ }
+ }
+}
+
+void ParseAndCheckPacket(const uint8_t* packet,
+ const RTPVideoHeaderVP9& expected,
+ int expected_hdr_length,
+ size_t expected_length) {
+ RTPVideoHeader video_header;
+ EXPECT_EQ(VideoRtpDepacketizerVp9::ParseRtpPayload(
+ rtc::MakeArrayView(packet, expected_length), &video_header),
+ expected_hdr_length);
+ EXPECT_EQ(kVideoCodecVP9, video_header.codec);
+ auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
+ VerifyHeader(expected, vp9_header);
+}
+
+// Payload descriptor for flexible mode
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|Z| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+ -|
+// P,F: | P_DIFF |N| (CONDITIONALLY RECOMMENDED) . up to 3 times
+// +-+-+-+-+-+-+-+-+ -|
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+//
+// Payload descriptor for non-flexible mode
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|Z| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX | (CONDITIONALLY REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+
+class RtpPacketizerVp9Test : public ::testing::Test {
+ protected:
+ static constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
+ static constexpr size_t kMaxPacketSize = 1200;
+
+ RtpPacketizerVp9Test() : packet_(kNoExtensions, kMaxPacketSize) {}
+ void SetUp() override { expected_.InitRTPVideoHeaderVP9(); }
+
+ RtpPacketToSend packet_;
+ std::vector<uint8_t> payload_;
+ size_t payload_pos_;
+ RTPVideoHeaderVP9 expected_;
+ std::unique_ptr<RtpPacketizerVp9> packetizer_;
+ size_t num_packets_;
+
+ void Init(size_t payload_size, size_t packet_size) {
+ payload_.assign(payload_size, 7);
+ payload_pos_ = 0;
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = packet_size;
+ packetizer_.reset(new RtpPacketizerVp9(payload_, limits, expected_));
+ num_packets_ = packetizer_->NumPackets();
+ }
+
+ void CheckPayload(const uint8_t* packet,
+ size_t start_pos,
+ size_t end_pos,
+ bool last) {
+ for (size_t i = start_pos; i < end_pos; ++i) {
+ EXPECT_EQ(packet[i], payload_[payload_pos_++]);
+ }
+ EXPECT_EQ(last, payload_pos_ == payload_.size());
+ }
+
+ void CreateParseAndCheckPackets(
+ rtc::ArrayView<const size_t> expected_hdr_sizes,
+ rtc::ArrayView<const size_t> expected_sizes) {
+ ASSERT_EQ(expected_hdr_sizes.size(), expected_sizes.size());
+ ASSERT_TRUE(packetizer_ != nullptr);
+ EXPECT_EQ(expected_sizes.size(), num_packets_);
+ for (size_t i = 0; i < expected_sizes.size(); ++i) {
+ EXPECT_TRUE(packetizer_->NextPacket(&packet_));
+ auto rtp_payload = packet_.payload();
+ EXPECT_EQ(expected_sizes[i], rtp_payload.size());
+ RTPVideoHeaderVP9 hdr = expected_;
+ hdr.beginning_of_frame = (i == 0);
+ hdr.end_of_frame = (i + 1) == expected_sizes.size();
+ ParseAndCheckPacket(rtp_payload.data(), hdr, expected_hdr_sizes[i],
+ rtp_payload.size());
+ CheckPayload(rtp_payload.data(), expected_hdr_sizes[i],
+ rtp_payload.size(), (i + 1) == expected_sizes.size());
+ expected_.ss_data_available = false;
+ }
+ }
+
+ void CreateParseAndCheckPacketsLayers(size_t num_spatial_layers,
+ size_t expected_layer) {
+ ASSERT_TRUE(packetizer_ != nullptr);
+ for (size_t i = 0; i < num_packets_; ++i) {
+ EXPECT_TRUE(packetizer_->NextPacket(&packet_));
+ RTPVideoHeader video_header;
+ VideoRtpDepacketizerVp9::ParseRtpPayload(packet_.payload(),
+ &video_header);
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
+ EXPECT_EQ(vp9_header.spatial_idx, expected_layer);
+ EXPECT_EQ(vp9_header.num_spatial_layers, num_spatial_layers);
+ }
+ }
+};
+
+TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_OnePacket) {
+ const size_t kFrameSize = 25;
+ const size_t kPacketSize = 26;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:0, Z:0 (1hdr + 25 payload)
+ const size_t kExpectedHdrSizes[] = {1};
+ const size_t kExpectedSizes[] = {26};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_TwoPackets) {
+ const size_t kFrameSize = 27;
+ const size_t kPacketSize = 27;
+ Init(kFrameSize, kPacketSize);
+
+ // Two packets:
+ // I:0, P:0, L:0, F:0, B:1, E:0, V:0, Z:0 (1hdr + 14 payload)
+ // I:0, P:0, L:0, F:0, B:0, E:1, V:0, Z:0 (1hdr + 13 payload)
+ const size_t kExpectedHdrSizes[] = {1, 1};
+ const size_t kExpectedSizes[] = {14, 15};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestTooShortBufferToFitPayload) {
+ const size_t kFrameSize = 1;
+ const size_t kPacketSize = 1;
+ Init(kFrameSize, kPacketSize); // 1hdr + 1 payload
+
+ EXPECT_FALSE(packetizer_->NextPacket(&packet_));
+}
+
+TEST_F(RtpPacketizerVp9Test, TestOneBytePictureId) {
+ const size_t kFrameSize = 30;
+ const size_t kPacketSize = 12;
+
+ expected_.picture_id = kMaxOneBytePictureId; // 2 byte payload descriptor
+ expected_.max_picture_id = kMaxOneBytePictureId;
+ Init(kFrameSize, kPacketSize);
+
+ // Three packets:
+ // I:1, P:0, L:0, F:0, B:1, E:0, V:0, Z:0 (2hdr + 10 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:0, V:0, Z:0 (2hdr + 10 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:1, V:0, Z:0 (2hdr + 10 payload)
+ const size_t kExpectedHdrSizes[] = {2, 2, 2};
+ const size_t kExpectedSizes[] = {12, 12, 12};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestTwoBytePictureId) {
+ const size_t kFrameSize = 31;
+ const size_t kPacketSize = 13;
+
+ expected_.picture_id = kMaxTwoBytePictureId; // 3 byte payload descriptor
+ Init(kFrameSize, kPacketSize);
+
+ // Four packets:
+ // I:1, P:0, L:0, F:0, B:1, E:0, V:0, Z:0 (3hdr + 8 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:0, V:0, Z:0 (3hdr + 8 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:0, V:0, Z:0 (3hdr + 8 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:1, V:0, Z:0 (3hdr + 7 payload)
+ const size_t kExpectedHdrSizes[] = {3, 3, 3, 3};
+ const size_t kExpectedSizes[] = {10, 11, 11, 11};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithNonFlexibleMode) {
+ const size_t kFrameSize = 30;
+ const size_t kPacketSize = 25;
+
+ expected_.temporal_idx = 3;
+ expected_.temporal_up_switch = true; // U
+ expected_.num_spatial_layers = 3;
+ expected_.spatial_idx = 2;
+ expected_.inter_layer_predicted = true; // D
+ expected_.tl0_pic_idx = 117;
+ Init(kFrameSize, kPacketSize);
+
+ // Two packets:
+ // | I:0, P:0, L:1, F:0, B:1, E:0, V:0 Z:0 | (3hdr + 15 payload)
+ // L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 |
+ // | I:0, P:0, L:1, F:0, B:0, E:1, V:0 Z:0 | (3hdr + 15 payload)
+ // L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 |
+ const size_t kExpectedHdrSizes[] = {3, 3};
+ const size_t kExpectedSizes[] = {18, 18};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithFlexibleMode) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 23;
+
+ expected_.flexible_mode = true;
+ expected_.temporal_idx = 3;
+ expected_.temporal_up_switch = true; // U
+ expected_.num_spatial_layers = 3;
+ expected_.spatial_idx = 2;
+ expected_.inter_layer_predicted = false; // D
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 21 payload)
+ // L: T:3, U:1, S:2, D:0
+ const size_t kExpectedHdrSizes[] = {2};
+ const size_t kExpectedSizes[] = {23};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestRefIdx) {
+ const size_t kFrameSize = 16;
+ const size_t kPacketSize = 21;
+
+ expected_.inter_pic_predicted = true; // P
+ expected_.flexible_mode = true; // F
+ expected_.picture_id = 2;
+ expected_.max_picture_id = kMaxOneBytePictureId;
+
+ expected_.num_ref_pics = 3;
+ expected_.pid_diff[0] = 1;
+ expected_.pid_diff[1] = 3;
+ expected_.pid_diff[2] = 127;
+ expected_.ref_picture_id[0] = 1; // 2 - 1 = 1
+ expected_.ref_picture_id[1] = 127; // (kMaxPictureId + 1) + 2 - 3 = 127
+ expected_.ref_picture_id[2] = 3; // (kMaxPictureId + 1) + 2 - 127 = 3
+ Init(kFrameSize, kPacketSize);
+
+ // Two packets:
+ // I:1, P:1, L:0, F:1, B:1, E:1, V:0, Z:0 (5hdr + 16 payload)
+ // I: 2
+ // P,F: P_DIFF:1, N:1
+ // P_DIFF:3, N:1
+ // P_DIFF:127, N:0
+ const size_t kExpectedHdrSizes[] = {5};
+ const size_t kExpectedSizes[] = {21};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestRefIdxFailsWithoutPictureId) {
+ const size_t kFrameSize = 16;
+ const size_t kPacketSize = 21;
+
+ expected_.inter_pic_predicted = true;
+ expected_.flexible_mode = true;
+ expected_.num_ref_pics = 1;
+ expected_.pid_diff[0] = 3;
+ Init(kFrameSize, kPacketSize);
+
+ EXPECT_FALSE(packetizer_->NextPacket(&packet_));
+}
+
+TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutSpatialResolutionPresent) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 26;
+
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = 1;
+ expected_.spatial_layer_resolution_present = false;
+ expected_.gof.num_frames_in_gof = 1;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.num_ref_pics[0] = 1;
+ expected_.gof.pid_diff[0][0] = 4;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:1, Z:0 (5hdr + 21 payload)
+ // N_S:0, Y:0, G:1
+ // N_G:1
+ // T:0, U:1, R:1 | P_DIFF[0][0]:4
+ const size_t kExpectedHdrSizes[] = {5};
+ const size_t kExpectedSizes[] = {26};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutGbitPresent) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 23;
+
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = 1;
+ expected_.spatial_layer_resolution_present = false;
+ expected_.gof.num_frames_in_gof = 0;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:1, Z:0 (2hdr + 21 payload)
+ // N_S:0, Y:0, G:0
+ const size_t kExpectedHdrSizes[] = {2};
+ const size_t kExpectedSizes[] = {23};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestSsData) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 40;
+
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = 2;
+ expected_.spatial_layer_resolution_present = true;
+ expected_.width[0] = 640;
+ expected_.width[1] = 1280;
+ expected_.height[0] = 360;
+ expected_.height[1] = 720;
+ expected_.gof.num_frames_in_gof = 3;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_idx[1] = 1;
+ expected_.gof.temporal_idx[2] = 2;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.temporal_up_switch[1] = true;
+ expected_.gof.temporal_up_switch[2] = false;
+ expected_.gof.num_ref_pics[0] = 0;
+ expected_.gof.num_ref_pics[1] = 3;
+ expected_.gof.num_ref_pics[2] = 2;
+ expected_.gof.pid_diff[1][0] = 5;
+ expected_.gof.pid_diff[1][1] = 6;
+ expected_.gof.pid_diff[1][2] = 7;
+ expected_.gof.pid_diff[2][0] = 8;
+ expected_.gof.pid_diff[2][1] = 9;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:1, Z:0 (19hdr + 21 payload)
+ // N_S:1, Y:1, G:1
+ // WIDTH:640 // 2 bytes
+ // HEIGHT:360 // 2 bytes
+ // WIDTH:1280 // 2 bytes
+ // HEIGHT:720 // 2 bytes
+ // N_G:3
+ // T:0, U:1, R:0
+ // T:1, U:1, R:3 | P_DIFF[1][0]:5 | P_DIFF[1][1]:6 | P_DIFF[1][2]:7
+ // T:2, U:0, R:2 | P_DIFF[2][0]:8 | P_DIFF[2][0]:9
+ const size_t kExpectedHdrSizes[] = {19};
+ const size_t kExpectedSizes[] = {40};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestSsDataDoesNotFitInAveragePacket) {
+ const size_t kFrameSize = 24;
+ const size_t kPacketSize = 20;
+
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = 2;
+ expected_.spatial_layer_resolution_present = true;
+ expected_.width[0] = 640;
+ expected_.width[1] = 1280;
+ expected_.height[0] = 360;
+ expected_.height[1] = 720;
+ expected_.gof.num_frames_in_gof = 3;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_idx[1] = 1;
+ expected_.gof.temporal_idx[2] = 2;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.temporal_up_switch[1] = true;
+ expected_.gof.temporal_up_switch[2] = false;
+ expected_.gof.num_ref_pics[0] = 0;
+ expected_.gof.num_ref_pics[1] = 3;
+ expected_.gof.num_ref_pics[2] = 2;
+ expected_.gof.pid_diff[1][0] = 5;
+ expected_.gof.pid_diff[1][1] = 6;
+ expected_.gof.pid_diff[1][2] = 7;
+ expected_.gof.pid_diff[2][0] = 8;
+ expected_.gof.pid_diff[2][1] = 9;
+ Init(kFrameSize, kPacketSize);
+
+ // Three packets:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:1, Z:0 (19hdr + 1 payload)
+ // N_S:1, Y:1, G:1
+ // WIDTH:640 // 2 bytes
+ // HEIGHT:360 // 2 bytes
+ // WIDTH:1280 // 2 bytes
+ // HEIGHT:720 // 2 bytes
+ // N_G:3
+ // T:0, U:1, R:0
+ // T:1, U:1, R:3 | P_DIFF[1][0]:5 | P_DIFF[1][1]:6 | P_DIFF[1][2]:7
+ // T:2, U:0, R:2 | P_DIFF[2][0]:8 | P_DIFF[2][0]:9
+ // Last two packets 1 bytes vp9 hdrs and the rest of payload 14 and 9 bytes.
+ const size_t kExpectedHdrSizes[] = {19, 1, 1};
+ const size_t kExpectedSizes[] = {20, 15, 10};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test, EndOfPictureSetsSetMarker) {
+ const size_t kFrameSize = 10;
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 8;
+ const uint8_t kFrame[kFrameSize] = {7};
+
+ RTPVideoHeaderVP9 vp9_header;
+ vp9_header.InitRTPVideoHeaderVP9();
+ vp9_header.flexible_mode = true;
+ vp9_header.num_spatial_layers = 3;
+
+ RtpPacketToSend packet(kNoExtensions);
+
+ // Drop top layer and ensure that marker bit is set on last encoded layer.
+ for (size_t spatial_idx = 0; spatial_idx < vp9_header.num_spatial_layers - 1;
+ ++spatial_idx) {
+ const bool end_of_picture =
+ spatial_idx + 1 == vp9_header.num_spatial_layers - 1;
+ vp9_header.spatial_idx = spatial_idx;
+ vp9_header.end_of_picture = end_of_picture;
+ RtpPacketizerVp9 packetizer(kFrame, limits, vp9_header);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+ EXPECT_FALSE(packet.Marker());
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+ EXPECT_EQ(packet.Marker(), end_of_picture);
+ }
+}
+
+TEST_F(RtpPacketizerVp9Test, TestGeneratesMinimumNumberOfPackets) {
+ const size_t kFrameSize = 10;
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 8;
+ // Calculated by hand. One packet can contain
+ // `kPacketSize` - `kVp9MinDiscriptorSize` = 6 bytes of the frame payload,
+ // thus to fit 10 bytes two packets are required.
+ const size_t kMinNumberOfPackets = 2;
+ const uint8_t kFrame[kFrameSize] = {7};
+
+ RTPVideoHeaderVP9 vp9_header;
+ vp9_header.InitRTPVideoHeaderVP9();
+
+ RtpPacketToSend packet(kNoExtensions);
+
+ RtpPacketizerVp9 packetizer(kFrame, limits, vp9_header);
+ EXPECT_EQ(packetizer.NumPackets(), kMinNumberOfPackets);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+ EXPECT_FALSE(packet.Marker());
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+ EXPECT_TRUE(packet.Marker());
+}
+
+TEST_F(RtpPacketizerVp9Test, TestRespectsLastPacketReductionLen) {
+ const size_t kFrameSize = 10;
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 8;
+ limits.last_packet_reduction_len = 5;
+ // Calculated by hand. VP9 payload descriptor is 2 bytes. Like in the test
+ // above, 1 packet is not enough. 2 packets can contain
+ // 2*(`kPacketSize` - `kVp9MinDiscriptorSize`) - `kLastPacketReductionLen` = 7
+ // But three packets are enough, since they have capacity of 3*(8-2)-5=13
+ // bytes.
+ const size_t kMinNumberOfPackets = 3;
+ const uint8_t kFrame[kFrameSize] = {7};
+
+ RTPVideoHeaderVP9 vp9_header;
+ vp9_header.InitRTPVideoHeaderVP9();
+ vp9_header.flexible_mode = true;
+
+ RtpPacketToSend packet(kNoExtensions);
+
+ RtpPacketizerVp9 packetizer0(kFrame, limits, vp9_header);
+ EXPECT_EQ(packetizer0.NumPackets(), kMinNumberOfPackets);
+ ASSERT_TRUE(packetizer0.NextPacket(&packet));
+ EXPECT_FALSE(packet.Marker());
+ ASSERT_TRUE(packetizer0.NextPacket(&packet));
+ EXPECT_FALSE(packet.Marker());
+ ASSERT_TRUE(packetizer0.NextPacket(&packet));
+ EXPECT_TRUE(packet.Marker());
+}
+
+TEST_F(RtpPacketizerVp9Test, TestNonRefForInterLayerPred) {
+ const size_t kFrameSize = 25;
+ const size_t kPacketSize = 26;
+
+ expected_.non_ref_for_inter_layer_pred = true;
+ Init(kFrameSize, kPacketSize);
+
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:0, Z:1 (1hdr + 25 payload)
+ const size_t kExpectedHdrSizes[] = {1};
+ const size_t kExpectedSizes[] = {26};
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes);
+}
+
+TEST_F(RtpPacketizerVp9Test,
+ ShiftsSpatialLayersTowardZeroWhenFirstLayersAreDisabled) {
+ const size_t kFrameSize = 25;
+ const size_t kPacketSize = 1024;
+
+ expected_.width[0] = 0;
+ expected_.height[0] = 0;
+ expected_.width[1] = 640;
+ expected_.height[1] = 360;
+ expected_.width[2] = 1280;
+ expected_.height[2] = 720;
+ expected_.num_spatial_layers = 3;
+ expected_.first_active_layer = 1;
+ expected_.ss_data_available = true;
+ expected_.spatial_layer_resolution_present = true;
+ expected_.gof.num_frames_in_gof = 3;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_idx[1] = 1;
+ expected_.gof.temporal_idx[2] = 2;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.temporal_up_switch[1] = true;
+ expected_.gof.temporal_up_switch[2] = false;
+ expected_.gof.num_ref_pics[0] = 0;
+ expected_.gof.num_ref_pics[1] = 3;
+ expected_.gof.num_ref_pics[2] = 2;
+ expected_.gof.pid_diff[1][0] = 5;
+ expected_.gof.pid_diff[1][1] = 6;
+ expected_.gof.pid_diff[1][2] = 7;
+ expected_.gof.pid_diff[2][0] = 8;
+ expected_.gof.pid_diff[2][1] = 9;
+
+ expected_.spatial_idx = 1;
+ Init(kFrameSize, kPacketSize);
+ CreateParseAndCheckPacketsLayers(/*num_spatial_layers=*/2,
+ /*expected_layer=*/0);
+
+ // Now check for SL 2;
+ expected_.spatial_idx = 2;
+ Init(kFrameSize, kPacketSize);
+ CreateParseAndCheckPacketsLayers(/*num_spatial_layers=*/2,
+ /*expected_layer=*/1);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.cc
new file mode 100644
index 0000000000..465308ec45
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+
+#include <cstdint>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+constexpr int RtpGenericFrameDescriptor::kMaxNumFrameDependencies;
+constexpr int RtpGenericFrameDescriptor::kMaxTemporalLayers;
+constexpr int RtpGenericFrameDescriptor::kMaxSpatialLayers;
+
+RtpGenericFrameDescriptor::RtpGenericFrameDescriptor() = default;
+RtpGenericFrameDescriptor::RtpGenericFrameDescriptor(
+ const RtpGenericFrameDescriptor&) = default;
+RtpGenericFrameDescriptor::~RtpGenericFrameDescriptor() = default;
+
+int RtpGenericFrameDescriptor::TemporalLayer() const {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ return temporal_layer_;
+}
+
+void RtpGenericFrameDescriptor::SetTemporalLayer(int temporal_layer) {
+ RTC_DCHECK_GE(temporal_layer, 0);
+ RTC_DCHECK_LT(temporal_layer, kMaxTemporalLayers);
+ temporal_layer_ = temporal_layer;
+}
+
+int RtpGenericFrameDescriptor::SpatialLayer() const {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ int layer = 0;
+ uint8_t spatial_layers = spatial_layers_;
+ while (spatial_layers_ != 0 && !(spatial_layers & 1)) {
+ spatial_layers >>= 1;
+ layer++;
+ }
+ return layer;
+}
+
+uint8_t RtpGenericFrameDescriptor::SpatialLayersBitmask() const {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ return spatial_layers_;
+}
+
+void RtpGenericFrameDescriptor::SetSpatialLayersBitmask(
+ uint8_t spatial_layers) {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ spatial_layers_ = spatial_layers;
+}
+
+void RtpGenericFrameDescriptor::SetResolution(int width, int height) {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ RTC_DCHECK_GE(width, 0);
+ RTC_DCHECK_LE(width, 0xFFFF);
+ RTC_DCHECK_GE(height, 0);
+ RTC_DCHECK_LE(height, 0xFFFF);
+ width_ = width;
+ height_ = height;
+}
+
+uint16_t RtpGenericFrameDescriptor::FrameId() const {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ return frame_id_;
+}
+
+void RtpGenericFrameDescriptor::SetFrameId(uint16_t frame_id) {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ frame_id_ = frame_id;
+}
+
+rtc::ArrayView<const uint16_t>
+RtpGenericFrameDescriptor::FrameDependenciesDiffs() const {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ return rtc::MakeArrayView(frame_deps_id_diffs_, num_frame_deps_);
+}
+
+bool RtpGenericFrameDescriptor::AddFrameDependencyDiff(uint16_t fdiff) {
+ RTC_DCHECK(FirstPacketInSubFrame());
+ if (num_frame_deps_ == kMaxNumFrameDependencies)
+ return false;
+ if (fdiff == 0)
+ return false;
+ RTC_DCHECK_LT(fdiff, 1 << 14);
+ RTC_DCHECK_GT(fdiff, 0);
+ frame_deps_id_diffs_[num_frame_deps_] = fdiff;
+ num_frame_deps_++;
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h
new file mode 100644
index 0000000000..8760acca2a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_GENERIC_FRAME_DESCRIPTOR_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_GENERIC_FRAME_DESCRIPTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+
+namespace webrtc {
+
+class RtpGenericFrameDescriptorExtension;
+
+// Data to put on the wire for FrameDescriptor rtp header extension.
+class RtpGenericFrameDescriptor {
+ public:
+ static constexpr int kMaxNumFrameDependencies = 8;
+ static constexpr int kMaxTemporalLayers = 8;
+ static constexpr int kMaxSpatialLayers = 8;
+
+ RtpGenericFrameDescriptor();
+ RtpGenericFrameDescriptor(const RtpGenericFrameDescriptor&);
+ ~RtpGenericFrameDescriptor();
+
+ bool FirstPacketInSubFrame() const { return beginning_of_subframe_; }
+ void SetFirstPacketInSubFrame(bool first) { beginning_of_subframe_ = first; }
+ bool LastPacketInSubFrame() const { return end_of_subframe_; }
+ void SetLastPacketInSubFrame(bool last) { end_of_subframe_ = last; }
+
+ // Properties below undefined if !FirstPacketInSubFrame()
+ // Valid range for temporal layer: [0, 7]
+ int TemporalLayer() const;
+ void SetTemporalLayer(int temporal_layer);
+
+ // Frame might by used, possible indirectly, for spatial layer sid iff
+ // (bitmask & (1 << sid)) != 0
+ int SpatialLayer() const;
+ uint8_t SpatialLayersBitmask() const;
+ void SetSpatialLayersBitmask(uint8_t spatial_layers);
+
+ int Width() const { return width_; }
+ int Height() const { return height_; }
+ void SetResolution(int width, int height);
+
+ uint16_t FrameId() const;
+ void SetFrameId(uint16_t frame_id);
+
+ rtc::ArrayView<const uint16_t> FrameDependenciesDiffs() const;
+ void ClearFrameDependencies() { num_frame_deps_ = 0; }
+ // Returns false on failure, i.e. number of dependencies is too large.
+ bool AddFrameDependencyDiff(uint16_t fdiff);
+
+ private:
+ bool beginning_of_subframe_ = false;
+ bool end_of_subframe_ = false;
+
+ uint16_t frame_id_ = 0;
+ uint8_t spatial_layers_ = 1;
+ uint8_t temporal_layer_ = 0;
+ size_t num_frame_deps_ = 0;
+ uint16_t frame_deps_id_diffs_[kMaxNumFrameDependencies];
+ int width_ = 0;
+ int height_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_GENERIC_FRAME_DESCRIPTOR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.cc
new file mode 100644
index 0000000000..8a0810f445
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr uint8_t kFlagBeginOfSubframe = 0x80;
+constexpr uint8_t kFlagEndOfSubframe = 0x40;
+
+// In version 00, the flags F and L in the first byte correspond to
+// kFlagFirstSubframeV00 and kFlagLastSubframeV00. In practice, they were
+// always set to `true`.
+constexpr uint8_t kFlagFirstSubframeV00 = 0x20;
+constexpr uint8_t kFlagLastSubframeV00 = 0x10;
+
+constexpr uint8_t kFlagDependencies = 0x08;
+constexpr uint8_t kMaskTemporalLayer = 0x07;
+
+constexpr uint8_t kFlagMoreDependencies = 0x01;
+constexpr uint8_t kFlageXtendedOffset = 0x02;
+} // namespace
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |B|E|F|L|D| T |
+// +-+-+-+-+-+-+-+-+
+// B: | S |
+// +-+-+-+-+-+-+-+-+
+// | |
+// B: + FID +
+// | |
+// +-+-+-+-+-+-+-+-+
+// | |
+// + Width +
+// B=1 | |
+// and +-+-+-+-+-+-+-+-+
+// D=0 | |
+// + Height +
+// | |
+// +-+-+-+-+-+-+-+-+
+// D: | FDIFF |X|M|
+// +---------------+
+// X: | ... |
+// +-+-+-+-+-+-+-+-+
+// M: | FDIFF |X|M|
+// +---------------+
+// | ... |
+// +-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType RtpGenericFrameDescriptorExtension00::kId;
+
+bool RtpGenericFrameDescriptorExtension00::Parse(
+ rtc::ArrayView<const uint8_t> data,
+ RtpGenericFrameDescriptor* descriptor) {
+ if (data.empty()) {
+ return false;
+ }
+
+ bool begins_subframe = (data[0] & kFlagBeginOfSubframe) != 0;
+ descriptor->SetFirstPacketInSubFrame(begins_subframe);
+ descriptor->SetLastPacketInSubFrame((data[0] & kFlagEndOfSubframe) != 0);
+
+ // Parse Subframe details provided in 1st packet of subframe.
+ if (!begins_subframe) {
+ return data.size() == 1;
+ }
+ if (data.size() < 4) {
+ return false;
+ }
+ descriptor->SetTemporalLayer(data[0] & kMaskTemporalLayer);
+ descriptor->SetSpatialLayersBitmask(data[1]);
+ descriptor->SetFrameId(data[2] | (data[3] << 8));
+
+ // Parse dependencies.
+ descriptor->ClearFrameDependencies();
+ size_t offset = 4;
+ bool has_more_dependencies = (data[0] & kFlagDependencies) != 0;
+ if (!has_more_dependencies && data.size() >= offset + 4) {
+ uint16_t width = (data[offset] << 8) | data[offset + 1];
+ uint16_t height = (data[offset + 2] << 8) | data[offset + 3];
+ descriptor->SetResolution(width, height);
+ offset += 4;
+ }
+ while (has_more_dependencies) {
+ if (data.size() == offset)
+ return false;
+ has_more_dependencies = (data[offset] & kFlagMoreDependencies) != 0;
+ bool extended = (data[offset] & kFlageXtendedOffset) != 0;
+ uint16_t fdiff = data[offset] >> 2;
+ offset++;
+ if (extended) {
+ if (data.size() == offset)
+ return false;
+ fdiff |= (data[offset] << 6);
+ offset++;
+ }
+ if (!descriptor->AddFrameDependencyDiff(fdiff))
+ return false;
+ }
+ return true;
+}
+
+size_t RtpGenericFrameDescriptorExtension00::ValueSize(
+ const RtpGenericFrameDescriptor& descriptor) {
+ if (!descriptor.FirstPacketInSubFrame())
+ return 1;
+
+ size_t size = 4;
+ for (uint16_t fdiff : descriptor.FrameDependenciesDiffs()) {
+ size += (fdiff >= (1 << 6)) ? 2 : 1;
+ }
+ if (descriptor.FirstPacketInSubFrame() &&
+ descriptor.FrameDependenciesDiffs().empty() && descriptor.Width() > 0 &&
+ descriptor.Height() > 0) {
+ size += 4;
+ }
+ return size;
+}
+
+bool RtpGenericFrameDescriptorExtension00::Write(
+ rtc::ArrayView<uint8_t> data,
+ const RtpGenericFrameDescriptor& descriptor) {
+ RTC_CHECK_EQ(data.size(), ValueSize(descriptor));
+ uint8_t base_header =
+ (descriptor.FirstPacketInSubFrame() ? kFlagBeginOfSubframe : 0) |
+ (descriptor.LastPacketInSubFrame() ? kFlagEndOfSubframe : 0);
+ base_header |= kFlagFirstSubframeV00;
+ base_header |= kFlagLastSubframeV00;
+
+ if (!descriptor.FirstPacketInSubFrame()) {
+ data[0] = base_header;
+ return true;
+ }
+ data[0] =
+ base_header |
+ (descriptor.FrameDependenciesDiffs().empty() ? 0 : kFlagDependencies) |
+ descriptor.TemporalLayer();
+ data[1] = descriptor.SpatialLayersBitmask();
+ uint16_t frame_id = descriptor.FrameId();
+ data[2] = frame_id & 0xff;
+ data[3] = frame_id >> 8;
+ rtc::ArrayView<const uint16_t> fdiffs = descriptor.FrameDependenciesDiffs();
+ size_t offset = 4;
+ if (descriptor.FirstPacketInSubFrame() && fdiffs.empty() &&
+ descriptor.Width() > 0 && descriptor.Height() > 0) {
+ data[offset++] = (descriptor.Width() >> 8);
+ data[offset++] = (descriptor.Width() & 0xFF);
+ data[offset++] = (descriptor.Height() >> 8);
+ data[offset++] = (descriptor.Height() & 0xFF);
+ }
+ for (size_t i = 0; i < fdiffs.size(); i++) {
+ bool extended = fdiffs[i] >= (1 << 6);
+ bool more = i < fdiffs.size() - 1;
+ data[offset++] = ((fdiffs[i] & 0x3f) << 2) |
+ (extended ? kFlageXtendedOffset : 0) |
+ (more ? kFlagMoreDependencies : 0);
+ if (extended) {
+ data[offset++] = fdiffs[i] >> 6;
+ }
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h
new file mode 100644
index 0000000000..93ca690b22
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_GENERIC_FRAME_DESCRIPTOR_EXTENSION_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_GENERIC_FRAME_DESCRIPTOR_EXTENSION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/rtp_parameters.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+
+namespace webrtc {
+
+class RtpGenericFrameDescriptorExtension00 {
+ public:
+ using value_type = RtpGenericFrameDescriptor;
+ static constexpr RTPExtensionType kId = kRtpExtensionGenericFrameDescriptor00;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kGenericFrameDescriptorUri00;
+ }
+ static constexpr int kMaxSizeBytes = 16;
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ RtpGenericFrameDescriptor* descriptor);
+ static size_t ValueSize(const RtpGenericFrameDescriptor& descriptor);
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const RtpGenericFrameDescriptor& descriptor);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_GENERIC_FRAME_DESCRIPTOR_EXTENSION_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension_unittest.cc
new file mode 100644
index 0000000000..d7f8e1e906
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension_unittest.cc
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+
+constexpr uint8_t kDeprecatedFlags = 0x30;
+
+// TODO(danilchap): Add fuzzer to test for various invalid inputs.
+
+TEST(RtpGenericFrameDescriptorExtensionTest,
+ ParseFirstPacketOfIndependenSubFrame) {
+ const int kTemporalLayer = 5;
+ constexpr uint8_t kRaw[] = {0x80 | kTemporalLayer, 0x49, 0x12, 0x34};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+
+ EXPECT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_FALSE(descriptor.LastPacketInSubFrame());
+
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), IsEmpty());
+ EXPECT_EQ(descriptor.TemporalLayer(), kTemporalLayer);
+ EXPECT_EQ(descriptor.SpatialLayersBitmask(), 0x49);
+ EXPECT_EQ(descriptor.FrameId(), 0x3412);
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest,
+ WriteFirstPacketOfIndependenSubFrame) {
+ const int kTemporalLayer = 5;
+ uint8_t kRaw[] = {0x80 | kTemporalLayer | kDeprecatedFlags, 0x49, 0x12, 0x34};
+ RtpGenericFrameDescriptor descriptor;
+
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.SetTemporalLayer(kTemporalLayer);
+ descriptor.SetSpatialLayersBitmask(0x49);
+ descriptor.SetFrameId(0x3412);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, ParseLastPacketOfSubFrame) {
+ constexpr uint8_t kRaw[] = {0x40};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+
+ EXPECT_FALSE(descriptor.FirstPacketInSubFrame());
+ EXPECT_TRUE(descriptor.LastPacketInSubFrame());
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, WriteLastPacketOfSubFrame) {
+ uint8_t kRaw[] = {0x40 | kDeprecatedFlags};
+
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetLastPacketInSubFrame(true);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, ParseMinShortFrameDependencies) {
+ constexpr uint16_t kDiff = 1;
+ constexpr uint8_t kRaw[] = {0x88, 0x01, 0x00, 0x00, 0x04};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ ASSERT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), ElementsAre(kDiff));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, WriteMinShortFrameDependencies) {
+ constexpr uint16_t kDiff = 1;
+ uint8_t kRaw[] = {0x88 | kDeprecatedFlags, 0x01, 0x00, 0x00, 0x04};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.AddFrameDependencyDiff(kDiff);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, ParseMaxShortFrameDependencies) {
+ constexpr uint16_t kDiff = 0x3f;
+ constexpr uint8_t kRaw[] = {0xb8, 0x01, 0x00, 0x00, 0xfc};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ ASSERT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), ElementsAre(kDiff));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, WriteMaxShortFrameDependencies) {
+ constexpr uint16_t kDiff = 0x3f;
+ uint8_t kRaw[] = {0x88 | kDeprecatedFlags, 0x01, 0x00, 0x00, 0xfc};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.AddFrameDependencyDiff(kDiff);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, ParseMinLongFrameDependencies) {
+ constexpr uint16_t kDiff = 0x40;
+ constexpr uint8_t kRaw[] = {0xb8, 0x01, 0x00, 0x00, 0x02, 0x01};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ ASSERT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), ElementsAre(kDiff));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, WriteMinLongFrameDependencies) {
+ constexpr uint16_t kDiff = 0x40;
+ uint8_t kRaw[] = {0x88 | kDeprecatedFlags, 0x01, 0x00, 0x00, 0x02, 0x01};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.AddFrameDependencyDiff(kDiff);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest,
+ ParseLongFrameDependenciesAsBigEndian) {
+ constexpr uint16_t kDiff = 0x7654 >> 2;
+ constexpr uint8_t kRaw[] = {0xb8, 0x01, 0x00, 0x00, 0x54 | 0x02, 0x76};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ ASSERT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), ElementsAre(kDiff));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest,
+ WriteLongFrameDependenciesAsBigEndian) {
+ constexpr uint16_t kDiff = 0x7654 >> 2;
+ uint8_t kRaw[] = {
+ 0x88 | kDeprecatedFlags, 0x01, 0x00, 0x00, 0x54 | 0x02, 0x76};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.AddFrameDependencyDiff(kDiff);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, ParseMaxLongFrameDependencies) {
+ constexpr uint16_t kDiff = 0x3fff;
+ constexpr uint8_t kRaw[] = {0xb8, 0x01, 0x00, 0x00, 0xfe, 0xff};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ ASSERT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), ElementsAre(kDiff));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, WriteMaxLongFrameDependencies) {
+ constexpr uint16_t kDiff = 0x3fff;
+ uint8_t kRaw[] = {0x88 | kDeprecatedFlags, 0x01, 0x00, 0x00, 0xfe, 0xff};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.AddFrameDependencyDiff(kDiff);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, ParseTwoFrameDependencies) {
+ constexpr uint16_t kDiff1 = 9;
+ constexpr uint16_t kDiff2 = 15;
+ constexpr uint8_t kRaw[] = {
+ 0xb8, 0x01, 0x00, 0x00, (kDiff1 << 2) | 0x01, kDiff2 << 2};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ ASSERT_TRUE(descriptor.FirstPacketInSubFrame());
+ EXPECT_THAT(descriptor.FrameDependenciesDiffs(), ElementsAre(kDiff1, kDiff2));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest, WriteTwoFrameDependencies) {
+ constexpr uint16_t kDiff1 = 9;
+ constexpr uint16_t kDiff2 = 15;
+ uint8_t kRaw[] = {0x88 | kDeprecatedFlags, 0x01, 0x00, 0x00,
+ (kDiff1 << 2) | 0x01, kDiff2 << 2};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.AddFrameDependencyDiff(kDiff1);
+ descriptor.AddFrameDependencyDiff(kDiff2);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest,
+ ParseResolutionOnIndependentFrame) {
+ constexpr int kWidth = 0x2468;
+ constexpr int kHeight = 0x6543;
+ constexpr uint8_t kRaw[] = {0xb0, 0x01, 0x00, 0x00, 0x24, 0x68, 0x65, 0x43};
+ RtpGenericFrameDescriptor descriptor;
+
+ ASSERT_TRUE(RtpGenericFrameDescriptorExtension00::Parse(kRaw, &descriptor));
+ EXPECT_EQ(descriptor.Width(), kWidth);
+ EXPECT_EQ(descriptor.Height(), kHeight);
+}
+
+TEST(RtpGenericFrameDescriptorExtensionTest,
+ WriteResolutionOnIndependentFrame) {
+ constexpr int kWidth = 0x2468;
+ constexpr int kHeight = 0x6543;
+ uint8_t kRaw[] = {
+ 0x80 | kDeprecatedFlags, 0x01, 0x00, 0x00, 0x24, 0x68, 0x65, 0x43};
+ RtpGenericFrameDescriptor descriptor;
+ descriptor.SetFirstPacketInSubFrame(true);
+ descriptor.SetResolution(kWidth, kHeight);
+
+ ASSERT_EQ(RtpGenericFrameDescriptorExtension00::ValueSize(descriptor),
+ sizeof(kRaw));
+ uint8_t buffer[sizeof(kRaw)];
+ EXPECT_TRUE(RtpGenericFrameDescriptorExtension00::Write(buffer, descriptor));
+ EXPECT_THAT(buffer, ElementsAreArray(kRaw));
+}
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map.cc
new file mode 100644
index 0000000000..4b8c7b5385
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+struct ExtensionInfo {
+ RTPExtensionType type;
+ absl::string_view uri;
+};
+
+template <typename Extension>
+constexpr ExtensionInfo CreateExtensionInfo() {
+ return {Extension::kId, Extension::Uri()};
+}
+
+constexpr ExtensionInfo kExtensions[] = {
+ CreateExtensionInfo<TransmissionOffset>(),
+ CreateExtensionInfo<AudioLevel>(),
+ CreateExtensionInfo<CsrcAudioLevel>(),
+ CreateExtensionInfo<AbsoluteSendTime>(),
+ CreateExtensionInfo<AbsoluteCaptureTimeExtension>(),
+ CreateExtensionInfo<VideoOrientation>(),
+ CreateExtensionInfo<TransportSequenceNumber>(),
+ CreateExtensionInfo<TransportSequenceNumberV2>(),
+ CreateExtensionInfo<PlayoutDelayLimits>(),
+ CreateExtensionInfo<VideoContentTypeExtension>(),
+ CreateExtensionInfo<RtpVideoLayersAllocationExtension>(),
+ CreateExtensionInfo<VideoTimingExtension>(),
+ CreateExtensionInfo<RtpStreamId>(),
+ CreateExtensionInfo<RepairedRtpStreamId>(),
+ CreateExtensionInfo<RtpMid>(),
+ CreateExtensionInfo<RtpGenericFrameDescriptorExtension00>(),
+ CreateExtensionInfo<RtpDependencyDescriptorExtension>(),
+ CreateExtensionInfo<ColorSpaceExtension>(),
+ CreateExtensionInfo<InbandComfortNoiseExtension>(),
+ CreateExtensionInfo<VideoFrameTrackingIdExtension>(),
+};
+
+// Because of kRtpExtensionNone, NumberOfExtension is 1 bigger than the actual
+// number of known extensions.
+static_assert(arraysize(kExtensions) ==
+ static_cast<int>(kRtpExtensionNumberOfExtensions) - 1,
+ "kExtensions expect to list all known extensions");
+
+} // namespace
+
+constexpr RTPExtensionType RtpHeaderExtensionMap::kInvalidType;
+constexpr int RtpHeaderExtensionMap::kInvalidId;
+
+RtpHeaderExtensionMap::RtpHeaderExtensionMap() : RtpHeaderExtensionMap(false) {}
+
+RtpHeaderExtensionMap::RtpHeaderExtensionMap(bool extmap_allow_mixed)
+ : extmap_allow_mixed_(extmap_allow_mixed) {
+ for (auto& id : ids_)
+ id = kInvalidId;
+}
+
+RtpHeaderExtensionMap::RtpHeaderExtensionMap(
+ rtc::ArrayView<const RtpExtension> extensions)
+ : RtpHeaderExtensionMap(false) {
+ for (const RtpExtension& extension : extensions)
+ RegisterByUri(extension.id, extension.uri);
+}
+
+void RtpHeaderExtensionMap::Reset(
+ rtc::ArrayView<const RtpExtension> extensions) {
+ for (auto& id : ids_)
+ id = kInvalidId;
+ for (const RtpExtension& extension : extensions)
+ RegisterByUri(extension.id, extension.uri);
+}
+
+bool RtpHeaderExtensionMap::RegisterByType(int id, RTPExtensionType type) {
+ for (const ExtensionInfo& extension : kExtensions)
+ if (type == extension.type)
+ return Register(id, extension.type, extension.uri);
+ RTC_DCHECK_NOTREACHED();
+ return false;
+}
+
+bool RtpHeaderExtensionMap::RegisterByUri(int id, absl::string_view uri) {
+ for (const ExtensionInfo& extension : kExtensions)
+ if (uri == extension.uri)
+ return Register(id, extension.type, extension.uri);
+ RTC_LOG(LS_WARNING) << "Unknown extension uri:'" << uri << "', id: " << id
+ << '.';
+ return false;
+}
+
+RTPExtensionType RtpHeaderExtensionMap::GetType(int id) const {
+ RTC_DCHECK_GE(id, RtpExtension::kMinId);
+ RTC_DCHECK_LE(id, RtpExtension::kMaxId);
+ for (int type = kRtpExtensionNone + 1; type < kRtpExtensionNumberOfExtensions;
+ ++type) {
+ if (ids_[type] == id) {
+ return static_cast<RTPExtensionType>(type);
+ }
+ }
+ return kInvalidType;
+}
+
+void RtpHeaderExtensionMap::Deregister(absl::string_view uri) {
+ for (const ExtensionInfo& extension : kExtensions) {
+ if (extension.uri == uri) {
+ ids_[extension.type] = kInvalidId;
+ break;
+ }
+ }
+}
+
+bool RtpHeaderExtensionMap::Register(int id,
+ RTPExtensionType type,
+ absl::string_view uri) {
+ RTC_DCHECK_GT(type, kRtpExtensionNone);
+ RTC_DCHECK_LT(type, kRtpExtensionNumberOfExtensions);
+
+ if (id < RtpExtension::kMinId || id > RtpExtension::kMaxId) {
+ RTC_LOG(LS_WARNING) << "Failed to register extension uri:'" << uri
+ << "' with invalid id:" << id << ".";
+ return false;
+ }
+
+ RTPExtensionType registered_type = GetType(id);
+ if (registered_type == type) { // Same type/id pair already registered.
+ RTC_LOG(LS_VERBOSE) << "Reregistering extension uri:'" << uri
+ << "', id:" << id;
+ return true;
+ }
+
+ if (registered_type !=
+ kInvalidType) { // `id` used by another extension type.
+ RTC_LOG(LS_WARNING) << "Failed to register extension uri:'" << uri
+ << "', id:" << id
+ << ". Id already in use by extension type "
+ << static_cast<int>(registered_type);
+ return false;
+ }
+ if (IsRegistered(type)) {
+ RTC_LOG(LS_WARNING) << "Illegal reregistration for uri: " << uri
+ << " is previously registered with id " << GetId(type)
+ << " and cannot be reregistered with id " << id;
+ return false;
+ }
+
+ // There is a run-time check above id fits into uint8_t.
+ ids_[type] = static_cast<uint8_t>(id);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map_unittest.cc
new file mode 100644
index 0000000000..42842cc876
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_map_unittest.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+
+#include <vector>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RtpHeaderExtensionTest, RegisterByType) {
+ RtpHeaderExtensionMap map;
+ EXPECT_FALSE(map.IsRegistered(TransmissionOffset::kId));
+
+ EXPECT_TRUE(map.RegisterByType(3, TransmissionOffset::kId));
+
+ EXPECT_TRUE(map.IsRegistered(TransmissionOffset::kId));
+ EXPECT_EQ(3, map.GetId(TransmissionOffset::kId));
+ EXPECT_EQ(TransmissionOffset::kId, map.GetType(3));
+}
+
+TEST(RtpHeaderExtensionTest, RegisterByUri) {
+ RtpHeaderExtensionMap map;
+
+ EXPECT_TRUE(map.RegisterByUri(3, TransmissionOffset::Uri()));
+
+ EXPECT_TRUE(map.IsRegistered(TransmissionOffset::kId));
+ EXPECT_EQ(3, map.GetId(TransmissionOffset::kId));
+ EXPECT_EQ(TransmissionOffset::kId, map.GetType(3));
+}
+
+TEST(RtpHeaderExtensionTest, RegisterWithTrait) {
+ RtpHeaderExtensionMap map;
+
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+
+ EXPECT_TRUE(map.IsRegistered(TransmissionOffset::kId));
+ EXPECT_EQ(3, map.GetId(TransmissionOffset::kId));
+ EXPECT_EQ(TransmissionOffset::kId, map.GetType(3));
+}
+
+TEST(RtpHeaderExtensionTest, RegisterDuringContruction) {
+ const std::vector<RtpExtension> config = {{TransmissionOffset::Uri(), 1},
+ {AbsoluteSendTime::Uri(), 3}};
+ const RtpHeaderExtensionMap map(config);
+
+ EXPECT_EQ(1, map.GetId(TransmissionOffset::kId));
+ EXPECT_EQ(3, map.GetId(AbsoluteSendTime::kId));
+}
+
+TEST(RtpHeaderExtensionTest, RegisterTwoByteHeaderExtensions) {
+ RtpHeaderExtensionMap map;
+ // Two-byte header extension needed for id: [15-255].
+ EXPECT_TRUE(map.Register<TransmissionOffset>(18));
+ EXPECT_TRUE(map.Register<AbsoluteSendTime>(255));
+}
+
+TEST(RtpHeaderExtensionTest, RegisterIllegalArg) {
+ RtpHeaderExtensionMap map;
+ // Valid range for id: [1-255].
+ EXPECT_FALSE(map.Register<TransmissionOffset>(0));
+ EXPECT_FALSE(map.Register<TransmissionOffset>(256));
+}
+
+TEST(RtpHeaderExtensionTest, Idempotent) {
+ RtpHeaderExtensionMap map;
+
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+
+ map.Deregister(TransmissionOffset::Uri());
+ map.Deregister(TransmissionOffset::Uri());
+}
+
+TEST(RtpHeaderExtensionTest, NonUniqueId) {
+ RtpHeaderExtensionMap map;
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+
+ EXPECT_FALSE(map.Register<AudioLevel>(3));
+ EXPECT_TRUE(map.Register<AudioLevel>(4));
+}
+
+TEST(RtpHeaderExtensionTest, GetType) {
+ RtpHeaderExtensionMap map;
+ EXPECT_EQ(RtpHeaderExtensionMap::kInvalidType, map.GetType(3));
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+
+ EXPECT_EQ(TransmissionOffset::kId, map.GetType(3));
+}
+
+TEST(RtpHeaderExtensionTest, GetId) {
+ RtpHeaderExtensionMap map;
+ EXPECT_EQ(RtpHeaderExtensionMap::kInvalidId,
+ map.GetId(TransmissionOffset::kId));
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+
+ EXPECT_EQ(3, map.GetId(TransmissionOffset::kId));
+}
+
+TEST(RtpHeaderExtensionTest, RemapFails) {
+ RtpHeaderExtensionMap map;
+ EXPECT_TRUE(map.Register<TransmissionOffset>(3));
+ EXPECT_FALSE(map.Register<TransmissionOffset>(4));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.cc
new file mode 100644
index 0000000000..4acbcf4e6b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_header_extension_size.h"
+
+#include "api/rtp_parameters.h"
+
+namespace webrtc {
+
+int RtpHeaderExtensionSize(rtc::ArrayView<const RtpExtensionSize> extensions,
+ const RtpHeaderExtensionMap& registered_extensions) {
+ // RFC3550 Section 5.3.1
+ static constexpr int kExtensionBlockHeaderSize = 4;
+
+ int values_size = 0;
+ int num_extensions = 0;
+ int each_extension_header_size = 1;
+ for (const RtpExtensionSize& extension : extensions) {
+ int id = registered_extensions.GetId(extension.type);
+ if (id == RtpHeaderExtensionMap::kInvalidId)
+ continue;
+ // All extensions should use same size header. Check if the `extension`
+ // forces to switch to two byte header that allows larger id and value size.
+ if (id > RtpExtension::kOneByteHeaderExtensionMaxId ||
+ extension.value_size >
+ RtpExtension::kOneByteHeaderExtensionMaxValueSize) {
+ each_extension_header_size = 2;
+ }
+ values_size += extension.value_size;
+ num_extensions++;
+ }
+ if (values_size == 0)
+ return 0;
+ int size = kExtensionBlockHeaderSize +
+ each_extension_header_size * num_extensions + values_size;
+ // Extension size specified in 32bit words,
+ // so result must be multiple of 4 bytes. Round up.
+ return size + 3 - (size + 3) % 4;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.h
new file mode 100644
index 0000000000..1fb2eb2a1e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSION_SIZE_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSION_SIZE_H_
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+struct RtpExtensionSize {
+ RTPExtensionType type;
+ int value_size;
+};
+
+// Calculates rtp header extension size in bytes assuming packet contain
+// all `extensions` with provided `value_size`.
+// Counts only extensions present among `registered_extensions`.
+int RtpHeaderExtensionSize(rtc::ArrayView<const RtpExtensionSize> extensions,
+ const RtpHeaderExtensionMap& registered_extensions);
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSION_SIZE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size_unittest.cc
new file mode 100644
index 0000000000..5cc26bc652
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extension_size_unittest.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtp_header_extension_size.h"
+
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "test/gtest.h"
+
+namespace {
+
+using ::webrtc::RtpExtensionSize;
+using ::webrtc::RtpHeaderExtensionMap;
+using ::webrtc::RtpHeaderExtensionSize;
+using ::webrtc::RtpMid;
+using ::webrtc::RtpStreamId;
+
+// id for 1-byte header extension. actual value is irrelevant for these tests.
+constexpr int kId = 1;
+// id that forces to use 2-byte header extension.
+constexpr int kIdForceTwoByteHeader = 15;
+
+TEST(RtpHeaderExtensionSizeTest, ReturnsZeroIfNoExtensionsAreRegistered) {
+ constexpr RtpExtensionSize kExtensionSizes[] = {{RtpMid::kId, 3}};
+ // Register different extension than ask size for.
+ RtpHeaderExtensionMap registered;
+ registered.Register<RtpStreamId>(kId);
+
+ EXPECT_EQ(RtpHeaderExtensionSize(kExtensionSizes, registered), 0);
+}
+
+TEST(RtpHeaderExtensionSizeTest, IncludesSizeOfExtensionHeaders) {
+ constexpr RtpExtensionSize kExtensionSizes[] = {{RtpMid::kId, 3}};
+ RtpHeaderExtensionMap registered;
+ registered.Register<RtpMid>(kId);
+
+ // 4 bytes for extension block header + 1 byte for individual extension header
+ // + 3 bytes for the value.
+ EXPECT_EQ(RtpHeaderExtensionSize(kExtensionSizes, registered), 8);
+}
+
+TEST(RtpHeaderExtensionSizeTest, RoundsUpTo32bitAlignmant) {
+ constexpr RtpExtensionSize kExtensionSizes[] = {{RtpMid::kId, 5}};
+ RtpHeaderExtensionMap registered;
+ registered.Register<RtpMid>(kId);
+
+ // 10 bytes of data including headers + 2 bytes of padding.
+ EXPECT_EQ(RtpHeaderExtensionSize(kExtensionSizes, registered), 12);
+}
+
+TEST(RtpHeaderExtensionSizeTest, SumsSeveralExtensions) {
+ constexpr RtpExtensionSize kExtensionSizes[] = {{RtpMid::kId, 16},
+ {RtpStreamId::kId, 2}};
+ RtpHeaderExtensionMap registered;
+ registered.Register<RtpMid>(kId);
+ registered.Register<RtpStreamId>(14);
+
+ // 4 bytes for extension block header + 18 bytes of value +
+ // 2 bytes for two headers
+ EXPECT_EQ(RtpHeaderExtensionSize(kExtensionSizes, registered), 24);
+}
+
+TEST(RtpHeaderExtensionSizeTest, LargeIdForce2BytesHeader) {
+ constexpr RtpExtensionSize kExtensionSizes[] = {{RtpMid::kId, 3},
+ {RtpStreamId::kId, 2}};
+ RtpHeaderExtensionMap registered;
+ registered.Register<RtpMid>(kId);
+ registered.Register<RtpStreamId>(kIdForceTwoByteHeader);
+
+ // 4 bytes for extension block header + 5 bytes of value +
+ // 2*2 bytes for two headers + 3 bytes of padding.
+ EXPECT_EQ(RtpHeaderExtensionSize(kExtensionSizes, registered), 16);
+}
+
+TEST(RtpHeaderExtensionSizeTest, LargeValueForce2BytesHeader) {
+ constexpr RtpExtensionSize kExtensionSizes[] = {{RtpMid::kId, 17},
+ {RtpStreamId::kId, 4}};
+ RtpHeaderExtensionMap registered;
+ registered.Register<RtpMid>(1);
+ registered.Register<RtpStreamId>(2);
+
+ // 4 bytes for extension block header + 21 bytes of value +
+ // 2*2 bytes for two headers + 3 byte of padding.
+ EXPECT_EQ(RtpHeaderExtensionSize(kExtensionSizes, registered), 32);
+}
+
+} // namespace
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.cc
new file mode 100644
index 0000000000..de29fd2075
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.cc
@@ -0,0 +1,934 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+
+#include <string.h>
+
+#include <cmath>
+#include <cstdint>
+#include <limits>
+
+#include "absl/strings/string_view.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+// TODO(bug:9855) Move kNoSpatialIdx from vp9_globals.h to common_constants
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+// Absolute send time in RTP streams.
+//
+// The absolute send time is signaled to the receiver in-band using the
+// general mechanism for RTP header extensions [RFC8285]. The payload
+// of this extension (the transmitted value) is a 24-bit unsigned integer
+// containing the sender's current time in seconds as a fixed point number
+// with 18 bits fractional part.
+//
+// The form of the absolute send time extension block:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=2 | absolute send time |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType AbsoluteSendTime::kId;
+constexpr uint8_t AbsoluteSendTime::kValueSizeBytes;
+
+bool AbsoluteSendTime::Parse(rtc::ArrayView<const uint8_t> data,
+ uint32_t* time_24bits) {
+ if (data.size() != 3)
+ return false;
+ *time_24bits = ByteReader<uint32_t, 3>::ReadBigEndian(data.data());
+ return true;
+}
+
+bool AbsoluteSendTime::Write(rtc::ArrayView<uint8_t> data,
+ uint32_t time_24bits) {
+ RTC_DCHECK_EQ(data.size(), 3);
+ RTC_DCHECK_LE(time_24bits, 0x00FFFFFF);
+ ByteWriter<uint32_t, 3>::WriteBigEndian(data.data(), time_24bits);
+ return true;
+}
+
+// Absolute Capture Time
+//
+// The Absolute Capture Time extension is used to stamp RTP packets with a NTP
+// timestamp showing when the first audio or video frame in a packet was
+// originally captured. The intent of this extension is to provide a way to
+// accomplish audio-to-video synchronization when RTCP-terminating intermediate
+// systems (e.g. mixers) are involved.
+//
+// Data layout of the shortened version of abs-capture-time:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=7 | absolute capture timestamp (bit 0-23) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | absolute capture timestamp (bit 24-55) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... (56-63) |
+// +-+-+-+-+-+-+-+-+
+//
+// Data layout of the extended version of abs-capture-time:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=15| absolute capture timestamp (bit 0-23) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | absolute capture timestamp (bit 24-55) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... (56-63) | estimated capture clock offset (bit 0-23) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | estimated capture clock offset (bit 24-55) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... (56-63) |
+// +-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType AbsoluteCaptureTimeExtension::kId;
+constexpr uint8_t AbsoluteCaptureTimeExtension::kValueSizeBytes;
+constexpr uint8_t AbsoluteCaptureTimeExtension::
+ kValueSizeBytesWithoutEstimatedCaptureClockOffset;
+
+bool AbsoluteCaptureTimeExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ AbsoluteCaptureTime* extension) {
+ if (data.size() != kValueSizeBytes &&
+ data.size() != kValueSizeBytesWithoutEstimatedCaptureClockOffset) {
+ return false;
+ }
+
+ extension->absolute_capture_timestamp =
+ ByteReader<uint64_t>::ReadBigEndian(data.data());
+
+ if (data.size() != kValueSizeBytesWithoutEstimatedCaptureClockOffset) {
+ extension->estimated_capture_clock_offset =
+ ByteReader<int64_t>::ReadBigEndian(data.data() + 8);
+ }
+
+ return true;
+}
+
+size_t AbsoluteCaptureTimeExtension::ValueSize(
+ const AbsoluteCaptureTime& extension) {
+ if (extension.estimated_capture_clock_offset != absl::nullopt) {
+ return kValueSizeBytes;
+ } else {
+ return kValueSizeBytesWithoutEstimatedCaptureClockOffset;
+ }
+}
+
+bool AbsoluteCaptureTimeExtension::Write(rtc::ArrayView<uint8_t> data,
+ const AbsoluteCaptureTime& extension) {
+ RTC_DCHECK_EQ(data.size(), ValueSize(extension));
+
+ ByteWriter<uint64_t>::WriteBigEndian(data.data(),
+ extension.absolute_capture_timestamp);
+
+ if (data.size() != kValueSizeBytesWithoutEstimatedCaptureClockOffset) {
+ ByteWriter<int64_t>::WriteBigEndian(
+ data.data() + 8, extension.estimated_capture_clock_offset.value());
+ }
+
+ return true;
+}
+
+// An RTP Header Extension for Client-to-Mixer Audio Level Indication
+//
+// https://tools.ietf.org/html/rfc6464
+//
+// The form of the audio level extension block:
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=0 |V| level |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// Sample Audio Level Encoding Using the One-Byte Header Format
+//
+// 0 1 2
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=1 |V| level |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// Sample Audio Level Encoding Using the Two-Byte Header Format
+
+constexpr RTPExtensionType AudioLevel::kId;
+constexpr uint8_t AudioLevel::kValueSizeBytes;
+
+bool AudioLevel::Parse(rtc::ArrayView<const uint8_t> data,
+ bool* voice_activity,
+ uint8_t* audio_level) {
+ // One-byte and two-byte format share the same data definition.
+ if (data.size() != 1)
+ return false;
+ *voice_activity = (data[0] & 0x80) != 0;
+ *audio_level = data[0] & 0x7F;
+ return true;
+}
+
+bool AudioLevel::Write(rtc::ArrayView<uint8_t> data,
+ bool voice_activity,
+ uint8_t audio_level) {
+ // One-byte and two-byte format share the same data definition.
+ RTC_DCHECK_EQ(data.size(), 1);
+ RTC_CHECK_LE(audio_level, 0x7f);
+ data[0] = (voice_activity ? 0x80 : 0x00) | audio_level;
+ return true;
+}
+
+#if !defined(WEBRTC_MOZILLA_BUILD)
+// An RTP Header Extension for Mixer-to-Client Audio Level Indication
+//
+// https://tools.ietf.org/html/rfc6465
+//
+// The form of the audio level extension block:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=2 |0| level 1 |0| level 2 |0| level 3 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// Sample Audio Level Encoding Using the One-Byte Header Format
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=3 |0| level 1 |0| level 2 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |0| level 3 | 0 (pad) | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// Sample Audio Level Encoding Using the Two-Byte Header Format
+constexpr RTPExtensionType CsrcAudioLevel::kId;
+constexpr uint8_t CsrcAudioLevel::kMaxValueSizeBytes;
+
+bool CsrcAudioLevel::Parse(rtc::ArrayView<const uint8_t> data,
+ std::vector<uint8_t>* csrc_audio_levels) {
+ if (data.size() > kRtpCsrcSize) {
+ return false;
+ }
+ csrc_audio_levels->resize(data.size());
+ for (size_t i = 0; i < data.size(); i++) {
+ (*csrc_audio_levels)[i] = data[i] & 0x7F;
+ }
+ return true;
+}
+
+size_t CsrcAudioLevel::ValueSize(
+ rtc::ArrayView<const uint8_t> csrc_audio_levels) {
+ return csrc_audio_levels.size();
+}
+
+bool CsrcAudioLevel::Write(rtc::ArrayView<uint8_t> data,
+ rtc::ArrayView<const uint8_t> csrc_audio_levels) {
+ RTC_CHECK_LE(csrc_audio_levels.size(), kRtpCsrcSize);
+ if (csrc_audio_levels.size() != data.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < csrc_audio_levels.size(); i++) {
+ data[i] = csrc_audio_levels[i] & 0x7F;
+ }
+ return true;
+}
+#endif
+
+// From RFC 5450: Transmission Time Offsets in RTP Streams.
+//
+// The transmission time is signaled to the receiver in-band using the
+// general mechanism for RTP header extensions [RFC8285]. The payload
+// of this extension (the transmitted value) is a 24-bit signed integer.
+// When added to the RTP timestamp of the packet, it represents the
+// "effective" RTP transmission time of the packet, on the RTP
+// timescale.
+//
+// The form of the transmission offset extension block:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=2 | transmission offset |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType TransmissionOffset::kId;
+constexpr uint8_t TransmissionOffset::kValueSizeBytes;
+
+bool TransmissionOffset::Parse(rtc::ArrayView<const uint8_t> data,
+ int32_t* rtp_time) {
+ if (data.size() != 3)
+ return false;
+ *rtp_time = ByteReader<int32_t, 3>::ReadBigEndian(data.data());
+ return true;
+}
+
+bool TransmissionOffset::Write(rtc::ArrayView<uint8_t> data, int32_t rtp_time) {
+ RTC_DCHECK_EQ(data.size(), 3);
+ RTC_DCHECK_LE(rtp_time, 0x00ffffff);
+ ByteWriter<int32_t, 3>::WriteBigEndian(data.data(), rtp_time);
+ return true;
+}
+
+// TransportSequenceNumber
+//
+// 0 1 2
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | L=1 |transport-wide sequence number |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType TransportSequenceNumber::kId;
+constexpr uint8_t TransportSequenceNumber::kValueSizeBytes;
+
+bool TransportSequenceNumber::Parse(rtc::ArrayView<const uint8_t> data,
+ uint16_t* transport_sequence_number) {
+ if (data.size() != kValueSizeBytes)
+ return false;
+ *transport_sequence_number = ByteReader<uint16_t>::ReadBigEndian(data.data());
+ return true;
+}
+
+bool TransportSequenceNumber::Write(rtc::ArrayView<uint8_t> data,
+ uint16_t transport_sequence_number) {
+ RTC_DCHECK_EQ(data.size(), ValueSize(transport_sequence_number));
+ ByteWriter<uint16_t>::WriteBigEndian(data.data(), transport_sequence_number);
+ return true;
+}
+
+// TransportSequenceNumberV2
+//
+// In addition to the format used for TransportSequencNumber, V2 also supports
+// the following packet format where two extra bytes are used to specify that
+// the sender requests immediate feedback.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | L=3 |transport-wide sequence number |T| seq count |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |seq count cont.|
+// +-+-+-+-+-+-+-+-+
+//
+// The bit `T` determines whether the feedback should include timing information
+// or not and `seq_count` determines how many packets the feedback packet should
+// cover including the current packet. If `seq_count` is zero no feedback is
+// requested.
+constexpr RTPExtensionType TransportSequenceNumberV2::kId;
+constexpr uint8_t TransportSequenceNumberV2::kValueSizeBytes;
+constexpr uint8_t
+ TransportSequenceNumberV2::kValueSizeBytesWithoutFeedbackRequest;
+constexpr uint16_t TransportSequenceNumberV2::kIncludeTimestampsBit;
+
+bool TransportSequenceNumberV2::Parse(
+ rtc::ArrayView<const uint8_t> data,
+ uint16_t* transport_sequence_number,
+ absl::optional<FeedbackRequest>* feedback_request) {
+ if (data.size() != kValueSizeBytes &&
+ data.size() != kValueSizeBytesWithoutFeedbackRequest)
+ return false;
+
+ *transport_sequence_number = ByteReader<uint16_t>::ReadBigEndian(data.data());
+
+ *feedback_request = absl::nullopt;
+ if (data.size() == kValueSizeBytes) {
+ uint16_t feedback_request_raw =
+ ByteReader<uint16_t>::ReadBigEndian(data.data() + 2);
+ bool include_timestamps =
+ (feedback_request_raw & kIncludeTimestampsBit) != 0;
+ uint16_t sequence_count = feedback_request_raw & ~kIncludeTimestampsBit;
+
+ // If `sequence_count` is zero no feedback is requested.
+ if (sequence_count != 0) {
+ *feedback_request = {include_timestamps, sequence_count};
+ }
+ }
+ return true;
+}
+
+bool TransportSequenceNumberV2::Write(
+ rtc::ArrayView<uint8_t> data,
+ uint16_t transport_sequence_number,
+ const absl::optional<FeedbackRequest>& feedback_request) {
+ RTC_DCHECK_EQ(data.size(),
+ ValueSize(transport_sequence_number, feedback_request));
+
+ ByteWriter<uint16_t>::WriteBigEndian(data.data(), transport_sequence_number);
+
+ if (feedback_request) {
+ RTC_DCHECK_GE(feedback_request->sequence_count, 0);
+ RTC_DCHECK_LT(feedback_request->sequence_count, kIncludeTimestampsBit);
+ uint16_t feedback_request_raw =
+ feedback_request->sequence_count |
+ (feedback_request->include_timestamps ? kIncludeTimestampsBit : 0);
+ ByteWriter<uint16_t>::WriteBigEndian(data.data() + 2, feedback_request_raw);
+ }
+ return true;
+}
+
+// Coordination of Video Orientation in RTP streams.
+//
+// Coordination of Video Orientation consists in signaling of the current
+// orientation of the image captured on the sender side to the receiver for
+// appropriate rendering and displaying.
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=0 |0 0 0 0 C F R R|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType VideoOrientation::kId;
+constexpr uint8_t VideoOrientation::kValueSizeBytes;
+
+bool VideoOrientation::Parse(rtc::ArrayView<const uint8_t> data,
+ VideoRotation* rotation) {
+ if (data.size() != 1)
+ return false;
+ *rotation = ConvertCVOByteToVideoRotation(data[0]);
+ return true;
+}
+
+bool VideoOrientation::Write(rtc::ArrayView<uint8_t> data,
+ VideoRotation rotation) {
+ RTC_DCHECK_EQ(data.size(), 1);
+ data[0] = ConvertVideoRotationToCVOByte(rotation);
+ return true;
+}
+
+bool VideoOrientation::Parse(rtc::ArrayView<const uint8_t> data,
+ uint8_t* value) {
+ if (data.size() != 1)
+ return false;
+ *value = data[0];
+ return true;
+}
+
+bool VideoOrientation::Write(rtc::ArrayView<uint8_t> data, uint8_t value) {
+ RTC_DCHECK_EQ(data.size(), 1);
+ data[0] = value;
+ return true;
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=2 | MIN delay | MAX delay |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType PlayoutDelayLimits::kId;
+constexpr uint8_t PlayoutDelayLimits::kValueSizeBytes;
+
+bool PlayoutDelayLimits::Parse(rtc::ArrayView<const uint8_t> data,
+ VideoPlayoutDelay* playout_delay) {
+ RTC_DCHECK(playout_delay);
+ if (data.size() != 3)
+ return false;
+ uint32_t raw = ByteReader<uint32_t, 3>::ReadBigEndian(data.data());
+ uint16_t min_raw = (raw >> 12);
+ uint16_t max_raw = (raw & 0xfff);
+ if (min_raw > max_raw)
+ return false;
+ playout_delay->min_ms = min_raw * kGranularityMs;
+ playout_delay->max_ms = max_raw * kGranularityMs;
+ return true;
+}
+
+bool PlayoutDelayLimits::Write(rtc::ArrayView<uint8_t> data,
+ const VideoPlayoutDelay& playout_delay) {
+ RTC_DCHECK_EQ(data.size(), 3);
+ RTC_DCHECK_LE(0, playout_delay.min_ms);
+ RTC_DCHECK_LE(playout_delay.min_ms, playout_delay.max_ms);
+ RTC_DCHECK_LE(playout_delay.max_ms, kMaxMs);
+ // Convert MS to value to be sent on extension header.
+ uint32_t min_delay = playout_delay.min_ms / kGranularityMs;
+ uint32_t max_delay = playout_delay.max_ms / kGranularityMs;
+ ByteWriter<uint32_t, 3>::WriteBigEndian(data.data(),
+ (min_delay << 12) | max_delay);
+ return true;
+}
+
+#if defined(WEBRTC_MOZILLA_BUILD)
+// CSRCAudioLevel
+// Sample Audio Level Encoding Using the One-Byte Header Format
+// Note that the range of len is 1 to 15 which is encoded as 0 to 14
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=2 |0| level 1 |0| level 2 |0| level 3 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+constexpr RTPExtensionType CsrcAudioLevel::kId;
+constexpr const char* CsrcAudioLevel::kUri;
+
+bool CsrcAudioLevel::Parse(rtc::ArrayView<const uint8_t> data,
+ CsrcAudioLevelList* csrcAudioLevels) {
+ if (data.size() < 1 || data.size() > kRtpCsrcSize)
+ return false;
+ csrcAudioLevels->numAudioLevels = data.size();
+ for(uint8_t i = 0; i < csrcAudioLevels->numAudioLevels; i++) {
+ // Ensure range is 0 to 127 inclusive
+ csrcAudioLevels->arrOfAudioLevels[i] = 0x7f & data[i];
+ }
+ return true;
+}
+
+size_t CsrcAudioLevel::ValueSize(const CsrcAudioLevelList& csrcAudioLevels) {
+ return csrcAudioLevels.numAudioLevels;
+}
+
+bool CsrcAudioLevel::Write(rtc::ArrayView<uint8_t> data,
+ const CsrcAudioLevelList& csrcAudioLevels) {
+ RTC_DCHECK_GE(csrcAudioLevels.numAudioLevels, 0);
+ for(uint8_t i = 0; i < csrcAudioLevels.numAudioLevels; i++) {
+ data[i] = csrcAudioLevels.arrOfAudioLevels[i] & 0x7f;
+ }
+ // This extension if used must have at least one audio level
+ return csrcAudioLevels.numAudioLevels;
+}
+#endif
+
+// Video Content Type.
+//
+// E.g. default video or screenshare.
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=0 | Content type |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+constexpr RTPExtensionType VideoContentTypeExtension::kId;
+constexpr uint8_t VideoContentTypeExtension::kValueSizeBytes;
+
+bool VideoContentTypeExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ VideoContentType* content_type) {
+ if (data.size() == 1 &&
+ videocontenttypehelpers::IsValidContentType(data[0])) {
+ *content_type = static_cast<VideoContentType>(data[0]);
+ return true;
+ }
+ return false;
+}
+
+bool VideoContentTypeExtension::Write(rtc::ArrayView<uint8_t> data,
+ VideoContentType content_type) {
+ RTC_DCHECK_EQ(data.size(), 1);
+ data[0] = static_cast<uint8_t>(content_type);
+ return true;
+}
+
+// Video Timing.
+// 6 timestamps in milliseconds counted from capture time stored in rtp header:
+// encode start/finish, packetization complete, pacer exit and reserved for
+// modification by the network modification. `flags` is a bitmask and has the
+// following allowed values:
+// 0 = Valid data, but no flags available (backwards compatibility)
+// 1 = Frame marked as timing frame due to cyclic timer.
+// 2 = Frame marked as timing frame due to size being outside limit.
+// 255 = Invalid. The whole timing frame extension should be ignored.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=12| flags | encode start ms delta |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | encode finish ms delta | packetizer finish ms delta |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | pacer exit ms delta | network timestamp ms delta |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | network2 timestamp ms delta |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+constexpr RTPExtensionType VideoTimingExtension::kId;
+constexpr uint8_t VideoTimingExtension::kValueSizeBytes;
+constexpr uint8_t VideoTimingExtension::kFlagsOffset;
+constexpr uint8_t VideoTimingExtension::kEncodeStartDeltaOffset;
+constexpr uint8_t VideoTimingExtension::kEncodeFinishDeltaOffset;
+constexpr uint8_t VideoTimingExtension::kPacketizationFinishDeltaOffset;
+constexpr uint8_t VideoTimingExtension::kPacerExitDeltaOffset;
+constexpr uint8_t VideoTimingExtension::kNetworkTimestampDeltaOffset;
+constexpr uint8_t VideoTimingExtension::kNetwork2TimestampDeltaOffset;
+
+bool VideoTimingExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ VideoSendTiming* timing) {
+ RTC_DCHECK(timing);
+ // TODO(sprang): Deprecate support for old wire format.
+ ptrdiff_t off = 0;
+ switch (data.size()) {
+ case kValueSizeBytes - 1:
+ timing->flags = 0;
+ off = 1; // Old wire format without the flags field.
+ break;
+ case kValueSizeBytes:
+ timing->flags = ByteReader<uint8_t>::ReadBigEndian(data.data());
+ break;
+ default:
+ return false;
+ }
+
+ timing->encode_start_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
+ data.data() + kEncodeStartDeltaOffset - off);
+ timing->encode_finish_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
+ data.data() + kEncodeFinishDeltaOffset - off);
+ timing->packetization_finish_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
+ data.data() + kPacketizationFinishDeltaOffset - off);
+ timing->pacer_exit_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
+ data.data() + kPacerExitDeltaOffset - off);
+ timing->network_timestamp_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
+ data.data() + kNetworkTimestampDeltaOffset - off);
+ timing->network2_timestamp_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
+ data.data() + kNetwork2TimestampDeltaOffset - off);
+ return true;
+}
+
+bool VideoTimingExtension::Write(rtc::ArrayView<uint8_t> data,
+ const VideoSendTiming& timing) {
+ RTC_DCHECK_EQ(data.size(), 1 + 2 * 6);
+ ByteWriter<uint8_t>::WriteBigEndian(data.data() + kFlagsOffset, timing.flags);
+ ByteWriter<uint16_t>::WriteBigEndian(data.data() + kEncodeStartDeltaOffset,
+ timing.encode_start_delta_ms);
+ ByteWriter<uint16_t>::WriteBigEndian(data.data() + kEncodeFinishDeltaOffset,
+ timing.encode_finish_delta_ms);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ data.data() + kPacketizationFinishDeltaOffset,
+ timing.packetization_finish_delta_ms);
+ ByteWriter<uint16_t>::WriteBigEndian(data.data() + kPacerExitDeltaOffset,
+ timing.pacer_exit_delta_ms);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ data.data() + kNetworkTimestampDeltaOffset,
+ timing.network_timestamp_delta_ms);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ data.data() + kNetwork2TimestampDeltaOffset,
+ timing.network2_timestamp_delta_ms);
+ return true;
+}
+
+bool VideoTimingExtension::Write(rtc::ArrayView<uint8_t> data,
+ uint16_t time_delta_ms,
+ uint8_t offset) {
+ RTC_DCHECK_GE(data.size(), offset + 2);
+ RTC_DCHECK_LE(offset, kValueSizeBytes - sizeof(uint16_t));
+ ByteWriter<uint16_t>::WriteBigEndian(data.data() + offset, time_delta_ms);
+ return true;
+}
+
+// Color space including HDR metadata as an optional field.
+//
+// RTP header extension to carry color space information and optionally HDR
+// metadata. The float values in the HDR metadata struct are upscaled by a
+// static factor and transmitted as unsigned integers.
+//
+// Data layout of color space with HDR metadata (two-byte RTP header extension)
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | length=28 | primaries | transfer |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | matrix |range+chr.sit. | luminance_max |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | luminance_min | mastering_metadata.|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |primary_r.x and .y | mastering_metadata.|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |primary_g.x and .y | mastering_metadata.|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |primary_b.x and .y | mastering_metadata.|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |white.x and .y | max_content_light_level |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | max_frame_average_light_level |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Data layout of color space w/o HDR metadata (one-byte RTP header extension)
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | L = 3 | primaries | transfer | matrix |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |range+chr.sit. |
+// +-+-+-+-+-+-+-+-+
+
+constexpr RTPExtensionType ColorSpaceExtension::kId;
+constexpr uint8_t ColorSpaceExtension::kValueSizeBytes;
+
+bool ColorSpaceExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ ColorSpace* color_space) {
+ RTC_DCHECK(color_space);
+ if (data.size() != kValueSizeBytes &&
+ data.size() != kValueSizeBytesWithoutHdrMetadata)
+ return false;
+
+ size_t offset = 0;
+ // Read color space information.
+ if (!color_space->set_primaries_from_uint8(data[offset++]))
+ return false;
+ if (!color_space->set_transfer_from_uint8(data[offset++]))
+ return false;
+ if (!color_space->set_matrix_from_uint8(data[offset++]))
+ return false;
+
+ uint8_t range_and_chroma_siting = data[offset++];
+ if (!color_space->set_range_from_uint8((range_and_chroma_siting >> 4) & 0x03))
+ return false;
+ if (!color_space->set_chroma_siting_horizontal_from_uint8(
+ (range_and_chroma_siting >> 2) & 0x03))
+ return false;
+ if (!color_space->set_chroma_siting_vertical_from_uint8(
+ range_and_chroma_siting & 0x03))
+ return false;
+
+ // Read HDR metadata if it exists, otherwise clear it.
+ if (data.size() == kValueSizeBytesWithoutHdrMetadata) {
+ color_space->set_hdr_metadata(nullptr);
+ } else {
+ HdrMetadata hdr_metadata;
+ offset += ParseHdrMetadata(data.subview(offset), &hdr_metadata);
+ if (!hdr_metadata.Validate())
+ return false;
+ color_space->set_hdr_metadata(&hdr_metadata);
+ }
+ RTC_DCHECK_EQ(ValueSize(*color_space), offset);
+ return true;
+}
+
+bool ColorSpaceExtension::Write(rtc::ArrayView<uint8_t> data,
+ const ColorSpace& color_space) {
+ RTC_DCHECK_EQ(data.size(), ValueSize(color_space));
+ size_t offset = 0;
+ // Write color space information.
+ data[offset++] = static_cast<uint8_t>(color_space.primaries());
+ data[offset++] = static_cast<uint8_t>(color_space.transfer());
+ data[offset++] = static_cast<uint8_t>(color_space.matrix());
+ data[offset++] = CombineRangeAndChromaSiting(
+ color_space.range(), color_space.chroma_siting_horizontal(),
+ color_space.chroma_siting_vertical());
+
+ // Write HDR metadata if it exists.
+ if (color_space.hdr_metadata()) {
+ offset +=
+ WriteHdrMetadata(data.subview(offset), *color_space.hdr_metadata());
+ }
+ RTC_DCHECK_EQ(ValueSize(color_space), offset);
+ return true;
+}
+
+// Combines range and chroma siting into one byte with the following bit layout:
+// bits 0-1 Chroma siting vertical.
+// 2-3 Chroma siting horizontal.
+// 4-5 Range.
+// 6-7 Unused.
+uint8_t ColorSpaceExtension::CombineRangeAndChromaSiting(
+ ColorSpace::RangeID range,
+ ColorSpace::ChromaSiting chroma_siting_horizontal,
+ ColorSpace::ChromaSiting chroma_siting_vertical) {
+ RTC_DCHECK_LE(static_cast<uint8_t>(range), 3);
+ RTC_DCHECK_LE(static_cast<uint8_t>(chroma_siting_horizontal), 3);
+ RTC_DCHECK_LE(static_cast<uint8_t>(chroma_siting_vertical), 3);
+ return (static_cast<uint8_t>(range) << 4) |
+ (static_cast<uint8_t>(chroma_siting_horizontal) << 2) |
+ static_cast<uint8_t>(chroma_siting_vertical);
+}
+
+size_t ColorSpaceExtension::ParseHdrMetadata(rtc::ArrayView<const uint8_t> data,
+ HdrMetadata* hdr_metadata) {
+ RTC_DCHECK_EQ(data.size(),
+ kValueSizeBytes - kValueSizeBytesWithoutHdrMetadata);
+ size_t offset = 0;
+ offset += ParseLuminance(data.data() + offset,
+ &hdr_metadata->mastering_metadata.luminance_max,
+ kLuminanceMaxDenominator);
+ offset += ParseLuminance(data.data() + offset,
+ &hdr_metadata->mastering_metadata.luminance_min,
+ kLuminanceMinDenominator);
+ offset += ParseChromaticity(data.data() + offset,
+ &hdr_metadata->mastering_metadata.primary_r);
+ offset += ParseChromaticity(data.data() + offset,
+ &hdr_metadata->mastering_metadata.primary_g);
+ offset += ParseChromaticity(data.data() + offset,
+ &hdr_metadata->mastering_metadata.primary_b);
+ offset += ParseChromaticity(data.data() + offset,
+ &hdr_metadata->mastering_metadata.white_point);
+ hdr_metadata->max_content_light_level =
+ ByteReader<uint16_t>::ReadBigEndian(data.data() + offset);
+ offset += 2;
+ hdr_metadata->max_frame_average_light_level =
+ ByteReader<uint16_t>::ReadBigEndian(data.data() + offset);
+ offset += 2;
+ return offset;
+}
+
+size_t ColorSpaceExtension::ParseChromaticity(
+ const uint8_t* data,
+ HdrMasteringMetadata::Chromaticity* p) {
+ uint16_t chromaticity_x_scaled = ByteReader<uint16_t>::ReadBigEndian(data);
+ uint16_t chromaticity_y_scaled =
+ ByteReader<uint16_t>::ReadBigEndian(data + 2);
+ p->x = static_cast<float>(chromaticity_x_scaled) / kChromaticityDenominator;
+ p->y = static_cast<float>(chromaticity_y_scaled) / kChromaticityDenominator;
+ return 4; // Return number of bytes read.
+}
+
+size_t ColorSpaceExtension::ParseLuminance(const uint8_t* data,
+ float* f,
+ int denominator) {
+ uint16_t luminance_scaled = ByteReader<uint16_t>::ReadBigEndian(data);
+ *f = static_cast<float>(luminance_scaled) / denominator;
+ return 2; // Return number of bytes read.
+}
+
+size_t ColorSpaceExtension::WriteHdrMetadata(rtc::ArrayView<uint8_t> data,
+ const HdrMetadata& hdr_metadata) {
+ RTC_DCHECK_EQ(data.size(),
+ kValueSizeBytes - kValueSizeBytesWithoutHdrMetadata);
+ RTC_DCHECK(hdr_metadata.Validate());
+ size_t offset = 0;
+ offset += WriteLuminance(data.data() + offset,
+ hdr_metadata.mastering_metadata.luminance_max,
+ kLuminanceMaxDenominator);
+ offset += WriteLuminance(data.data() + offset,
+ hdr_metadata.mastering_metadata.luminance_min,
+ kLuminanceMinDenominator);
+ offset += WriteChromaticity(data.data() + offset,
+ hdr_metadata.mastering_metadata.primary_r);
+ offset += WriteChromaticity(data.data() + offset,
+ hdr_metadata.mastering_metadata.primary_g);
+ offset += WriteChromaticity(data.data() + offset,
+ hdr_metadata.mastering_metadata.primary_b);
+ offset += WriteChromaticity(data.data() + offset,
+ hdr_metadata.mastering_metadata.white_point);
+
+ ByteWriter<uint16_t>::WriteBigEndian(data.data() + offset,
+ hdr_metadata.max_content_light_level);
+ offset += 2;
+ ByteWriter<uint16_t>::WriteBigEndian(
+ data.data() + offset, hdr_metadata.max_frame_average_light_level);
+ offset += 2;
+ return offset;
+}
+
+size_t ColorSpaceExtension::WriteChromaticity(
+ uint8_t* data,
+ const HdrMasteringMetadata::Chromaticity& p) {
+ RTC_DCHECK_GE(p.x, 0.0f);
+ RTC_DCHECK_LE(p.x, 1.0f);
+ RTC_DCHECK_GE(p.y, 0.0f);
+ RTC_DCHECK_LE(p.y, 1.0f);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ data, std::round(p.x * kChromaticityDenominator));
+ ByteWriter<uint16_t>::WriteBigEndian(
+ data + 2, std::round(p.y * kChromaticityDenominator));
+ return 4; // Return number of bytes written.
+}
+
+size_t ColorSpaceExtension::WriteLuminance(uint8_t* data,
+ float f,
+ int denominator) {
+ RTC_DCHECK_GE(f, 0.0f);
+ float upscaled_value = f * denominator;
+ RTC_DCHECK_LE(upscaled_value, std::numeric_limits<uint16_t>::max());
+ ByteWriter<uint16_t>::WriteBigEndian(data, std::round(upscaled_value));
+ return 2; // Return number of bytes written.
+}
+
+bool BaseRtpStringExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ std::string* str) {
+ if (data.empty() || data[0] == 0) // Valid string extension can't be empty.
+ return false;
+ const char* cstr = reinterpret_cast<const char*>(data.data());
+ // If there is a \0 character in the middle of the `data`, treat it as end
+ // of the string. Well-formed string extensions shouldn't contain it.
+ str->assign(cstr, strnlen(cstr, data.size()));
+ RTC_DCHECK(!str->empty());
+ return true;
+}
+
+bool BaseRtpStringExtension::Write(rtc::ArrayView<uint8_t> data,
+ absl::string_view str) {
+ if (str.size() > kMaxValueSizeBytes) {
+ return false;
+ }
+ RTC_DCHECK_EQ(data.size(), str.size());
+ RTC_DCHECK_GE(str.size(), 1);
+ memcpy(data.data(), str.data(), str.size());
+ return true;
+}
+
+// Constant declarations for RTP header extension types.
+constexpr RTPExtensionType RtpStreamId::kId;
+constexpr RTPExtensionType RepairedRtpStreamId::kId;
+constexpr RTPExtensionType RtpMid::kId;
+
+// An RTP Header Extension for Inband Comfort Noise
+//
+// The form of the audio level extension block:
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=0 |N| level |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// Sample Audio Level Encoding Using the One-Byte Header Format
+//
+// 0 1 2
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | len=1 |N| level |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// Sample Audio Level Encoding Using the Two-Byte Header Format
+
+constexpr RTPExtensionType InbandComfortNoiseExtension::kId;
+constexpr uint8_t InbandComfortNoiseExtension::kValueSizeBytes;
+constexpr const char InbandComfortNoiseExtension::kUri[];
+
+bool InbandComfortNoiseExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ absl::optional<uint8_t>* level) {
+ if (data.size() != kValueSizeBytes)
+ return false;
+ *level = (data[0] & 0b1000'0000) != 0
+ ? absl::nullopt
+ : absl::make_optional(data[0] & 0b0111'1111);
+ return true;
+}
+
+bool InbandComfortNoiseExtension::Write(rtc::ArrayView<uint8_t> data,
+ absl::optional<uint8_t> level) {
+ RTC_DCHECK_EQ(data.size(), kValueSizeBytes);
+ data[0] = 0b0000'0000;
+ if (level) {
+ if (*level > 127) {
+ return false;
+ }
+ data[0] = 0b1000'0000 | *level;
+ }
+ return true;
+}
+
+// VideoFrameTrackingIdExtension
+//
+// 0 1 2
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID | L=1 | video-frame-tracking-id |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+constexpr RTPExtensionType VideoFrameTrackingIdExtension::kId;
+constexpr uint8_t VideoFrameTrackingIdExtension::kValueSizeBytes;
+
+bool VideoFrameTrackingIdExtension::Parse(rtc::ArrayView<const uint8_t> data,
+ uint16_t* video_frame_tracking_id) {
+ if (data.size() != kValueSizeBytes) {
+ return false;
+ }
+ *video_frame_tracking_id = ByteReader<uint16_t>::ReadBigEndian(data.data());
+ return true;
+}
+
+bool VideoFrameTrackingIdExtension::Write(rtc::ArrayView<uint8_t> data,
+ uint16_t video_frame_tracking_id) {
+ RTC_DCHECK_EQ(data.size(), kValueSizeBytes);
+ ByteWriter<uint16_t>::WriteBigEndian(data.data(), video_frame_tracking_id);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.h
new file mode 100644
index 0000000000..4b4984bf6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_header_extensions.h
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSIONS_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSIONS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "api/units/timestamp.h"
+#include "api/video/color_space.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_timing.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+class AbsoluteSendTime {
+ public:
+ using value_type = uint32_t;
+ static constexpr RTPExtensionType kId = kRtpExtensionAbsoluteSendTime;
+ static constexpr uint8_t kValueSizeBytes = 3;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kAbsSendTimeUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data, uint32_t* time_24bits);
+ static size_t ValueSize(uint32_t time_24bits) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data, uint32_t time_24bits);
+
+ static constexpr uint32_t To24Bits(Timestamp time) {
+ int64_t time_us = time.us() % (int64_t{1 << 6} * 1'000'000);
+ int64_t time6x18 = (time_us << 18) / 1'000'000;
+ RTC_DCHECK_GE(time6x18, 0);
+ RTC_DCHECK_LT(time6x18, 1 << 24);
+ return static_cast<uint32_t>(time6x18);
+ }
+};
+
+class AbsoluteCaptureTimeExtension {
+ public:
+ using value_type = AbsoluteCaptureTime;
+ static constexpr RTPExtensionType kId = kRtpExtensionAbsoluteCaptureTime;
+ static constexpr uint8_t kValueSizeBytes = 16;
+ static constexpr uint8_t kValueSizeBytesWithoutEstimatedCaptureClockOffset =
+ 8;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kAbsoluteCaptureTimeUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ AbsoluteCaptureTime* extension);
+ static size_t ValueSize(const AbsoluteCaptureTime& extension);
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const AbsoluteCaptureTime& extension);
+};
+
+class AudioLevel {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionAudioLevel;
+ static constexpr uint8_t kValueSizeBytes = 1;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kAudioLevelUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ bool* voice_activity,
+ uint8_t* audio_level);
+ static size_t ValueSize(bool voice_activity, uint8_t audio_level) {
+ return kValueSizeBytes;
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ bool voice_activity,
+ uint8_t audio_level);
+};
+
+#if !defined(WEBRTC_MOZILLA_BUILD)
+class CsrcAudioLevel {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionCsrcAudioLevel;
+ static constexpr uint8_t kMaxValueSizeBytes = 15;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kCsrcAudioLevelsUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ std::vector<uint8_t>* csrc_audio_levels);
+ static size_t ValueSize(rtc::ArrayView<const uint8_t> csrc_audio_levels);
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ rtc::ArrayView<const uint8_t> csrc_audio_levels);
+};
+#endif
+
+class TransmissionOffset {
+ public:
+ using value_type = int32_t;
+ static constexpr RTPExtensionType kId = kRtpExtensionTransmissionTimeOffset;
+ static constexpr uint8_t kValueSizeBytes = 3;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kTimestampOffsetUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data, int32_t* rtp_time);
+ static size_t ValueSize(int32_t rtp_time) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data, int32_t rtp_time);
+};
+
+class TransportSequenceNumber {
+ public:
+ using value_type = uint16_t;
+ static constexpr RTPExtensionType kId = kRtpExtensionTransportSequenceNumber;
+ static constexpr uint8_t kValueSizeBytes = 2;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kTransportSequenceNumberUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ uint16_t* transport_sequence_number);
+ static size_t ValueSize(uint16_t /*transport_sequence_number*/) {
+ return kValueSizeBytes;
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ uint16_t transport_sequence_number);
+};
+
+class TransportSequenceNumberV2 {
+ public:
+ static constexpr RTPExtensionType kId =
+ kRtpExtensionTransportSequenceNumber02;
+ static constexpr uint8_t kValueSizeBytes = 4;
+ static constexpr uint8_t kValueSizeBytesWithoutFeedbackRequest = 2;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kTransportSequenceNumberV2Uri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ uint16_t* transport_sequence_number,
+ absl::optional<FeedbackRequest>* feedback_request);
+ static size_t ValueSize(
+ uint16_t /*transport_sequence_number*/,
+ const absl::optional<FeedbackRequest>& feedback_request) {
+ return feedback_request ? kValueSizeBytes
+ : kValueSizeBytesWithoutFeedbackRequest;
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ uint16_t transport_sequence_number,
+ const absl::optional<FeedbackRequest>& feedback_request);
+
+ private:
+ static constexpr uint16_t kIncludeTimestampsBit = 1 << 15;
+};
+
+class VideoOrientation {
+ public:
+ using value_type = VideoRotation;
+ static constexpr RTPExtensionType kId = kRtpExtensionVideoRotation;
+ static constexpr uint8_t kValueSizeBytes = 1;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kVideoRotationUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data, VideoRotation* value);
+ static size_t ValueSize(VideoRotation) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data, VideoRotation value);
+ static bool Parse(rtc::ArrayView<const uint8_t> data, uint8_t* value);
+ static size_t ValueSize(uint8_t value) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data, uint8_t value);
+};
+
+class PlayoutDelayLimits {
+ public:
+ using value_type = VideoPlayoutDelay;
+ static constexpr RTPExtensionType kId = kRtpExtensionPlayoutDelay;
+ static constexpr uint8_t kValueSizeBytes = 3;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kPlayoutDelayUri;
+ }
+
+ // Playout delay in milliseconds. A playout delay limit (min or max)
+ // has 12 bits allocated. This allows a range of 0-4095 values which
+ // translates to a range of 0-40950 in milliseconds.
+ static constexpr int kGranularityMs = 10;
+ // Maximum playout delay value in milliseconds.
+ static constexpr int kMaxMs = 0xfff * kGranularityMs; // 40950.
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ VideoPlayoutDelay* playout_delay);
+ static size_t ValueSize(const VideoPlayoutDelay&) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const VideoPlayoutDelay& playout_delay);
+};
+
+class VideoContentTypeExtension {
+ public:
+ using value_type = VideoContentType;
+ static constexpr RTPExtensionType kId = kRtpExtensionVideoContentType;
+ static constexpr uint8_t kValueSizeBytes = 1;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kVideoContentTypeUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ VideoContentType* content_type);
+ static size_t ValueSize(VideoContentType) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ VideoContentType content_type);
+};
+
+class VideoTimingExtension {
+ public:
+ using value_type = VideoSendTiming;
+ static constexpr RTPExtensionType kId = kRtpExtensionVideoTiming;
+ static constexpr uint8_t kValueSizeBytes = 13;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kVideoTimingUri;
+ }
+
+ // Offsets of the fields in the RTP header extension, counting from the first
+ // byte after the one-byte header.
+ static constexpr uint8_t kFlagsOffset = 0;
+ static constexpr uint8_t kEncodeStartDeltaOffset = 1;
+ static constexpr uint8_t kEncodeFinishDeltaOffset = 3;
+ static constexpr uint8_t kPacketizationFinishDeltaOffset = 5;
+ static constexpr uint8_t kPacerExitDeltaOffset = 7;
+ static constexpr uint8_t kNetworkTimestampDeltaOffset = 9;
+ static constexpr uint8_t kNetwork2TimestampDeltaOffset = 11;
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ VideoSendTiming* timing);
+ static size_t ValueSize(const VideoSendTiming&) { return kValueSizeBytes; }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const VideoSendTiming& timing);
+
+ static size_t ValueSize(uint16_t time_delta_ms, uint8_t idx) {
+ return kValueSizeBytes;
+ }
+ // Writes only single time delta to position idx.
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ uint16_t time_delta_ms,
+ uint8_t offset);
+};
+
+class ColorSpaceExtension {
+ public:
+ using value_type = ColorSpace;
+ static constexpr RTPExtensionType kId = kRtpExtensionColorSpace;
+ static constexpr uint8_t kValueSizeBytes = 28;
+ static constexpr uint8_t kValueSizeBytesWithoutHdrMetadata = 4;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kColorSpaceUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ ColorSpace* color_space);
+ static size_t ValueSize(const ColorSpace& color_space) {
+ return color_space.hdr_metadata() ? kValueSizeBytes
+ : kValueSizeBytesWithoutHdrMetadata;
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const ColorSpace& color_space);
+
+ private:
+ static constexpr int kChromaticityDenominator = 50000; // 0.00002 resolution.
+ static constexpr int kLuminanceMaxDenominator = 1; // 1 resolution.
+ static constexpr int kLuminanceMinDenominator = 10000; // 0.0001 resolution.
+
+ static uint8_t CombineRangeAndChromaSiting(
+ ColorSpace::RangeID range,
+ ColorSpace::ChromaSiting chroma_siting_horizontal,
+ ColorSpace::ChromaSiting chroma_siting_vertical);
+ static size_t ParseHdrMetadata(rtc::ArrayView<const uint8_t> data,
+ HdrMetadata* hdr_metadata);
+ static size_t ParseChromaticity(const uint8_t* data,
+ HdrMasteringMetadata::Chromaticity* p);
+ static size_t ParseLuminance(const uint8_t* data, float* f, int denominator);
+ static size_t WriteHdrMetadata(rtc::ArrayView<uint8_t> data,
+ const HdrMetadata& hdr_metadata);
+ static size_t WriteChromaticity(uint8_t* data,
+ const HdrMasteringMetadata::Chromaticity& p);
+ static size_t WriteLuminance(uint8_t* data, float f, int denominator);
+};
+
+#if defined(WEBRTC_MOZILLA_BUILD)
+class CsrcAudioLevel {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionCsrcAudioLevel;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kCsrcAudioLevelsUri;
+ }
+ static constexpr const char* kUri =
+ "urn:ietf:params:rtp-hdrext:csrc-audio-level";
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ CsrcAudioLevelList* csrcAudioLevels);
+ static size_t ValueSize(const CsrcAudioLevelList& csrcAudioLevels);
+ static bool Write(rtc::ArrayView<uint8_t> data, const CsrcAudioLevelList& csrcAudioLevels);
+};
+#endif
+
+// Base extension class for RTP header extensions which are strings.
+// Subclasses must defined kId and kUri static constexpr members.
+class BaseRtpStringExtension {
+ public:
+ using value_type = std::string;
+ // String RTP header extensions are limited to 16 bytes because it is the
+ // maximum length that can be encoded with one-byte header extensions.
+ static constexpr uint8_t kMaxValueSizeBytes = 16;
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data, std::string* str);
+ static size_t ValueSize(absl::string_view str) { return str.size(); }
+ static bool Write(rtc::ArrayView<uint8_t> data, absl::string_view str);
+};
+
+class RtpStreamId : public BaseRtpStringExtension {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionRtpStreamId;
+ static constexpr absl::string_view Uri() { return RtpExtension::kRidUri; }
+};
+
+class RepairedRtpStreamId : public BaseRtpStringExtension {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionRepairedRtpStreamId;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kRepairedRidUri;
+ }
+};
+
+class RtpMid : public BaseRtpStringExtension {
+ public:
+ static constexpr RTPExtensionType kId = kRtpExtensionMid;
+ static constexpr absl::string_view Uri() { return RtpExtension::kMidUri; }
+};
+
+class InbandComfortNoiseExtension {
+ public:
+ using value_type = absl::optional<uint8_t>;
+
+ static constexpr RTPExtensionType kId = kRtpExtensionInbandComfortNoise;
+ static constexpr uint8_t kValueSizeBytes = 1;
+ static constexpr const char kUri[] =
+ "http://www.webrtc.org/experiments/rtp-hdrext/inband-cn";
+ static constexpr absl::string_view Uri() { return kUri; }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ absl::optional<uint8_t>* level);
+ static size_t ValueSize(absl::optional<uint8_t> level) {
+ return kValueSizeBytes;
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ absl::optional<uint8_t> level);
+};
+
+class VideoFrameTrackingIdExtension {
+ public:
+ using value_type = uint16_t;
+ static constexpr RTPExtensionType kId = kRtpExtensionVideoFrameTrackingId;
+ static constexpr uint8_t kValueSizeBytes = 2;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kVideoFrameTrackingIdUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ uint16_t* video_frame_tracking_id);
+ static size_t ValueSize(uint16_t /*video_frame_tracking_id*/) {
+ return kValueSizeBytes;
+ }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ uint16_t video_frame_tracking_id);
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSIONS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.cc
new file mode 100644
index 0000000000..de275d4f3b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.cc
@@ -0,0 +1,706 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+
+#include <cstdint>
+#include <cstring>
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace {
+constexpr size_t kFixedHeaderSize = 12;
+constexpr uint8_t kRtpVersion = 2;
+constexpr uint16_t kOneByteExtensionProfileId = 0xBEDE;
+constexpr uint16_t kTwoByteExtensionProfileId = 0x1000;
+constexpr uint16_t kTwobyteExtensionProfileIdAppBitsFilter = 0xfff0;
+constexpr size_t kOneByteExtensionHeaderLength = 1;
+constexpr size_t kTwoByteExtensionHeaderLength = 2;
+constexpr size_t kDefaultPacketSize = 1500;
+} // namespace
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P|X| CC |M| PT | sequence number |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | timestamp |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | synchronization source (SSRC) identifier |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | Contributing source (CSRC) identifiers |
+// | .... |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | header eXtension profile id | length in 32bits |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Extensions |
+// | .... |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | Payload |
+// | .... : padding... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | padding | Padding size |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+RtpPacket::RtpPacket() : RtpPacket(nullptr, kDefaultPacketSize) {}
+
+RtpPacket::RtpPacket(const ExtensionManager* extensions)
+ : RtpPacket(extensions, kDefaultPacketSize) {}
+
+RtpPacket::RtpPacket(const RtpPacket&) = default;
+
+RtpPacket::RtpPacket(const ExtensionManager* extensions, size_t capacity)
+ : extensions_(extensions ? *extensions : ExtensionManager()),
+ buffer_(capacity) {
+ RTC_DCHECK_GE(capacity, kFixedHeaderSize);
+ Clear();
+}
+
+RtpPacket::~RtpPacket() {}
+
+void RtpPacket::IdentifyExtensions(ExtensionManager extensions) {
+ extensions_ = std::move(extensions);
+}
+
+bool RtpPacket::Parse(const uint8_t* buffer, size_t buffer_size) {
+ if (!ParseBuffer(buffer, buffer_size)) {
+ Clear();
+ return false;
+ }
+ buffer_.SetData(buffer, buffer_size);
+ RTC_DCHECK_EQ(size(), buffer_size);
+ return true;
+}
+
+bool RtpPacket::Parse(rtc::ArrayView<const uint8_t> packet) {
+ return Parse(packet.data(), packet.size());
+}
+
+bool RtpPacket::Parse(rtc::CopyOnWriteBuffer buffer) {
+ if (!ParseBuffer(buffer.cdata(), buffer.size())) {
+ Clear();
+ return false;
+ }
+ size_t buffer_size = buffer.size();
+ buffer_ = std::move(buffer);
+ RTC_DCHECK_EQ(size(), buffer_size);
+ return true;
+}
+
+std::vector<uint32_t> RtpPacket::Csrcs() const {
+ size_t num_csrc = data()[0] & 0x0F;
+ RTC_DCHECK_GE(capacity(), kFixedHeaderSize + num_csrc * 4);
+ std::vector<uint32_t> csrcs(num_csrc);
+ for (size_t i = 0; i < num_csrc; ++i) {
+ csrcs[i] =
+ ByteReader<uint32_t>::ReadBigEndian(&data()[kFixedHeaderSize + i * 4]);
+ }
+ return csrcs;
+}
+
+void RtpPacket::CopyHeaderFrom(const RtpPacket& packet) {
+ marker_ = packet.marker_;
+ payload_type_ = packet.payload_type_;
+ sequence_number_ = packet.sequence_number_;
+ timestamp_ = packet.timestamp_;
+ ssrc_ = packet.ssrc_;
+ payload_offset_ = packet.payload_offset_;
+ extensions_ = packet.extensions_;
+ extension_entries_ = packet.extension_entries_;
+ extensions_size_ = packet.extensions_size_;
+ buffer_ = packet.buffer_.Slice(0, packet.headers_size());
+ // Reset payload and padding.
+ payload_size_ = 0;
+ padding_size_ = 0;
+}
+
+void RtpPacket::SetMarker(bool marker_bit) {
+ marker_ = marker_bit;
+ if (marker_) {
+ WriteAt(1, data()[1] | 0x80);
+ } else {
+ WriteAt(1, data()[1] & 0x7F);
+ }
+}
+
+void RtpPacket::SetPayloadType(uint8_t payload_type) {
+ RTC_DCHECK_LE(payload_type, 0x7Fu);
+ payload_type_ = payload_type;
+ WriteAt(1, (data()[1] & 0x80) | payload_type);
+}
+
+void RtpPacket::SetSequenceNumber(uint16_t seq_no) {
+ sequence_number_ = seq_no;
+ ByteWriter<uint16_t>::WriteBigEndian(WriteAt(2), seq_no);
+}
+
+void RtpPacket::SetTimestamp(uint32_t timestamp) {
+ timestamp_ = timestamp;
+ ByteWriter<uint32_t>::WriteBigEndian(WriteAt(4), timestamp);
+}
+
+void RtpPacket::SetSsrc(uint32_t ssrc) {
+ ssrc_ = ssrc;
+ ByteWriter<uint32_t>::WriteBigEndian(WriteAt(8), ssrc);
+}
+
+void RtpPacket::ZeroMutableExtensions() {
+ for (const ExtensionInfo& extension : extension_entries_) {
+ switch (extensions_.GetType(extension.id)) {
+ case RTPExtensionType::kRtpExtensionNone: {
+ RTC_LOG(LS_WARNING) << "Unidentified extension in the packet.";
+ break;
+ }
+ case RTPExtensionType::kRtpExtensionVideoTiming: {
+ // Nullify last entries, starting at pacer delay.
+ // These are set by pacer and SFUs
+ if (VideoTimingExtension::kPacerExitDeltaOffset < extension.length) {
+ memset(
+ WriteAt(extension.offset +
+ VideoTimingExtension::kPacerExitDeltaOffset),
+ 0,
+ extension.length - VideoTimingExtension::kPacerExitDeltaOffset);
+ }
+ break;
+ }
+ case RTPExtensionType::kRtpExtensionTransportSequenceNumber:
+ case RTPExtensionType::kRtpExtensionTransportSequenceNumber02:
+ case RTPExtensionType::kRtpExtensionTransmissionTimeOffset:
+ case RTPExtensionType::kRtpExtensionAbsoluteSendTime: {
+ // Nullify whole extension, as it's filled in the pacer.
+ memset(WriteAt(extension.offset), 0, extension.length);
+ break;
+ }
+ case RTPExtensionType::kRtpExtensionAudioLevel:
+#if !defined(WEBRTC_MOZILLA_BUILD)
+ case RTPExtensionType::kRtpExtensionCsrcAudioLevel:
+#endif
+ case RTPExtensionType::kRtpExtensionAbsoluteCaptureTime:
+ case RTPExtensionType::kRtpExtensionColorSpace:
+ case RTPExtensionType::kRtpExtensionGenericFrameDescriptor00:
+ case RTPExtensionType::kRtpExtensionGenericFrameDescriptor02:
+ case RTPExtensionType::kRtpExtensionMid:
+ case RTPExtensionType::kRtpExtensionNumberOfExtensions:
+ case RTPExtensionType::kRtpExtensionPlayoutDelay:
+ case RTPExtensionType::kRtpExtensionRepairedRtpStreamId:
+ case RTPExtensionType::kRtpExtensionRtpStreamId:
+ case RTPExtensionType::kRtpExtensionVideoContentType:
+ case RTPExtensionType::kRtpExtensionVideoLayersAllocation:
+ case RTPExtensionType::kRtpExtensionVideoRotation:
+ case RTPExtensionType::kRtpExtensionInbandComfortNoise:
+ case RTPExtensionType::kRtpExtensionVideoFrameTrackingId: {
+ // Non-mutable extension. Don't change it.
+ break;
+ }
+#if defined(WEBRTC_MOZILLA_BUILD)
+ case RTPExtensionType::kRtpExtensionCsrcAudioLevel: {
+ // TODO: This is a Mozilla addition, we need to add a handler for this.
+ RTC_CHECK(false);
+ }
+#endif
+ }
+ }
+}
+
+void RtpPacket::SetCsrcs(rtc::ArrayView<const uint32_t> csrcs) {
+ RTC_DCHECK_EQ(extensions_size_, 0);
+ RTC_DCHECK_EQ(payload_size_, 0);
+ RTC_DCHECK_EQ(padding_size_, 0);
+ RTC_DCHECK_LE(csrcs.size(), 0x0fu);
+ RTC_DCHECK_LE(kFixedHeaderSize + 4 * csrcs.size(), capacity());
+ payload_offset_ = kFixedHeaderSize + 4 * csrcs.size();
+ WriteAt(0, (data()[0] & 0xF0) | rtc::dchecked_cast<uint8_t>(csrcs.size()));
+ size_t offset = kFixedHeaderSize;
+ for (uint32_t csrc : csrcs) {
+ ByteWriter<uint32_t>::WriteBigEndian(WriteAt(offset), csrc);
+ offset += 4;
+ }
+ buffer_.SetSize(payload_offset_);
+}
+
+rtc::ArrayView<uint8_t> RtpPacket::AllocateRawExtension(int id, size_t length) {
+ RTC_DCHECK_GE(id, RtpExtension::kMinId);
+ RTC_DCHECK_LE(id, RtpExtension::kMaxId);
+ RTC_DCHECK_GE(length, 1);
+ RTC_DCHECK_LE(length, RtpExtension::kMaxValueSize);
+ const ExtensionInfo* extension_entry = FindExtensionInfo(id);
+ if (extension_entry != nullptr) {
+ // Extension already reserved. Check if same length is used.
+ if (extension_entry->length == length)
+ return rtc::MakeArrayView(WriteAt(extension_entry->offset), length);
+
+ RTC_LOG(LS_ERROR) << "Length mismatch for extension id " << id
+ << ": expected "
+ << static_cast<int>(extension_entry->length)
+ << ". received " << length;
+ return nullptr;
+ }
+ if (payload_size_ > 0) {
+ RTC_LOG(LS_ERROR) << "Can't add new extension id " << id
+ << " after payload was set.";
+ return nullptr;
+ }
+ if (padding_size_ > 0) {
+ RTC_LOG(LS_ERROR) << "Can't add new extension id " << id
+ << " after padding was set.";
+ return nullptr;
+ }
+
+ const size_t num_csrc = data()[0] & 0x0F;
+ const size_t extensions_offset = kFixedHeaderSize + (num_csrc * 4) + 4;
+ // Determine if two-byte header is required for the extension based on id and
+ // length. Please note that a length of 0 also requires two-byte header
+ // extension. See RFC8285 Section 4.2-4.3.
+ const bool two_byte_header_required =
+ id > RtpExtension::kOneByteHeaderExtensionMaxId ||
+ length > RtpExtension::kOneByteHeaderExtensionMaxValueSize || length == 0;
+ RTC_CHECK(!two_byte_header_required || extensions_.ExtmapAllowMixed());
+
+ uint16_t profile_id;
+ if (extensions_size_ > 0) {
+ profile_id =
+ ByteReader<uint16_t>::ReadBigEndian(data() + extensions_offset - 4);
+ if (profile_id == kOneByteExtensionProfileId && two_byte_header_required) {
+ // Is buffer size big enough to fit promotion and new data field?
+ // The header extension will grow with one byte per already allocated
+ // extension + the size of the extension that is about to be allocated.
+ size_t expected_new_extensions_size =
+ extensions_size_ + extension_entries_.size() +
+ kTwoByteExtensionHeaderLength + length;
+ if (extensions_offset + expected_new_extensions_size > capacity()) {
+ RTC_LOG(LS_ERROR)
+ << "Extension cannot be registered: Not enough space left in "
+ "buffer to change to two-byte header extension and add new "
+ "extension.";
+ return nullptr;
+ }
+ // Promote already written data to two-byte header format.
+ PromoteToTwoByteHeaderExtension();
+ profile_id = kTwoByteExtensionProfileId;
+ }
+ } else {
+ // Profile specific ID, set to OneByteExtensionHeader unless
+ // TwoByteExtensionHeader is required.
+ profile_id = two_byte_header_required ? kTwoByteExtensionProfileId
+ : kOneByteExtensionProfileId;
+ }
+
+ const size_t extension_header_size = profile_id == kOneByteExtensionProfileId
+ ? kOneByteExtensionHeaderLength
+ : kTwoByteExtensionHeaderLength;
+ size_t new_extensions_size =
+ extensions_size_ + extension_header_size + length;
+ if (extensions_offset + new_extensions_size > capacity()) {
+ RTC_LOG(LS_ERROR)
+ << "Extension cannot be registered: Not enough space left in buffer.";
+ return nullptr;
+ }
+
+ // All checks passed, write down the extension headers.
+ if (extensions_size_ == 0) {
+ RTC_DCHECK_EQ(payload_offset_, kFixedHeaderSize + (num_csrc * 4));
+ WriteAt(0, data()[0] | 0x10); // Set extension bit.
+ ByteWriter<uint16_t>::WriteBigEndian(WriteAt(extensions_offset - 4),
+ profile_id);
+ }
+
+ if (profile_id == kOneByteExtensionProfileId) {
+ uint8_t one_byte_header = rtc::dchecked_cast<uint8_t>(id) << 4;
+ one_byte_header |= rtc::dchecked_cast<uint8_t>(length - 1);
+ WriteAt(extensions_offset + extensions_size_, one_byte_header);
+ } else {
+ // TwoByteHeaderExtension.
+ uint8_t extension_id = rtc::dchecked_cast<uint8_t>(id);
+ WriteAt(extensions_offset + extensions_size_, extension_id);
+ uint8_t extension_length = rtc::dchecked_cast<uint8_t>(length);
+ WriteAt(extensions_offset + extensions_size_ + 1, extension_length);
+ }
+
+ const uint16_t extension_info_offset = rtc::dchecked_cast<uint16_t>(
+ extensions_offset + extensions_size_ + extension_header_size);
+ const uint8_t extension_info_length = rtc::dchecked_cast<uint8_t>(length);
+ extension_entries_.emplace_back(id, extension_info_length,
+ extension_info_offset);
+
+ extensions_size_ = new_extensions_size;
+
+ uint16_t extensions_size_padded =
+ SetExtensionLengthMaybeAddZeroPadding(extensions_offset);
+ payload_offset_ = extensions_offset + extensions_size_padded;
+ buffer_.SetSize(payload_offset_);
+ return rtc::MakeArrayView(WriteAt(extension_info_offset),
+ extension_info_length);
+}
+
+void RtpPacket::PromoteToTwoByteHeaderExtension() {
+ size_t num_csrc = data()[0] & 0x0F;
+ size_t extensions_offset = kFixedHeaderSize + (num_csrc * 4) + 4;
+
+ RTC_CHECK_GT(extension_entries_.size(), 0);
+ RTC_CHECK_EQ(payload_size_, 0);
+ RTC_CHECK_EQ(kOneByteExtensionProfileId, ByteReader<uint16_t>::ReadBigEndian(
+ data() + extensions_offset - 4));
+ // Rewrite data.
+ // Each extension adds one to the offset. The write-read delta for the last
+ // extension is therefore the same as the number of extension entries.
+ size_t write_read_delta = extension_entries_.size();
+ for (auto extension_entry = extension_entries_.rbegin();
+ extension_entry != extension_entries_.rend(); ++extension_entry) {
+ size_t read_index = extension_entry->offset;
+ size_t write_index = read_index + write_read_delta;
+ // Update offset.
+ extension_entry->offset = rtc::dchecked_cast<uint16_t>(write_index);
+ // Copy data. Use memmove since read/write regions may overlap.
+ memmove(WriteAt(write_index), data() + read_index, extension_entry->length);
+ // Rewrite id and length.
+ WriteAt(--write_index, extension_entry->length);
+ WriteAt(--write_index, extension_entry->id);
+ --write_read_delta;
+ }
+
+ // Update profile header, extensions length, and zero padding.
+ ByteWriter<uint16_t>::WriteBigEndian(WriteAt(extensions_offset - 4),
+ kTwoByteExtensionProfileId);
+ extensions_size_ += extension_entries_.size();
+ uint16_t extensions_size_padded =
+ SetExtensionLengthMaybeAddZeroPadding(extensions_offset);
+ payload_offset_ = extensions_offset + extensions_size_padded;
+ buffer_.SetSize(payload_offset_);
+}
+
+uint16_t RtpPacket::SetExtensionLengthMaybeAddZeroPadding(
+ size_t extensions_offset) {
+ // Update header length field.
+ uint16_t extensions_words = rtc::dchecked_cast<uint16_t>(
+ (extensions_size_ + 3) / 4); // Wrap up to 32bit.
+ ByteWriter<uint16_t>::WriteBigEndian(WriteAt(extensions_offset - 2),
+ extensions_words);
+ // Fill extension padding place with zeroes.
+ size_t extension_padding_size = 4 * extensions_words - extensions_size_;
+ memset(WriteAt(extensions_offset + extensions_size_), 0,
+ extension_padding_size);
+ return 4 * extensions_words;
+}
+
+uint8_t* RtpPacket::AllocatePayload(size_t size_bytes) {
+ // Reset payload size to 0. If CopyOnWrite buffer_ was shared, this will cause
+ // reallocation and memcpy. Keeping just header reduces memcpy size.
+ SetPayloadSize(0);
+ return SetPayloadSize(size_bytes);
+}
+
+uint8_t* RtpPacket::SetPayloadSize(size_t size_bytes) {
+ RTC_DCHECK_EQ(padding_size_, 0);
+ if (payload_offset_ + size_bytes > capacity()) {
+ RTC_LOG(LS_WARNING) << "Cannot set payload, not enough space in buffer.";
+ return nullptr;
+ }
+ payload_size_ = size_bytes;
+ buffer_.SetSize(payload_offset_ + payload_size_);
+ return WriteAt(payload_offset_);
+}
+
+bool RtpPacket::SetPadding(size_t padding_bytes) {
+ if (payload_offset_ + payload_size_ + padding_bytes > capacity()) {
+ RTC_LOG(LS_WARNING) << "Cannot set padding size " << padding_bytes
+ << ", only "
+ << (capacity() - payload_offset_ - payload_size_)
+ << " bytes left in buffer.";
+ return false;
+ }
+ padding_size_ = rtc::dchecked_cast<uint8_t>(padding_bytes);
+ buffer_.SetSize(payload_offset_ + payload_size_ + padding_size_);
+ if (padding_size_ > 0) {
+ size_t padding_offset = payload_offset_ + payload_size_;
+ size_t padding_end = padding_offset + padding_size_;
+ memset(WriteAt(padding_offset), 0, padding_size_ - 1);
+ WriteAt(padding_end - 1, padding_size_);
+ WriteAt(0, data()[0] | 0x20); // Set padding bit.
+ } else {
+ WriteAt(0, data()[0] & ~0x20); // Clear padding bit.
+ }
+ return true;
+}
+
+void RtpPacket::Clear() {
+ marker_ = false;
+ payload_type_ = 0;
+ sequence_number_ = 0;
+ timestamp_ = 0;
+ ssrc_ = 0;
+ payload_offset_ = kFixedHeaderSize;
+ payload_size_ = 0;
+ padding_size_ = 0;
+ extensions_size_ = 0;
+ extension_entries_.clear();
+
+ memset(WriteAt(0), 0, kFixedHeaderSize);
+ buffer_.SetSize(kFixedHeaderSize);
+ WriteAt(0, kRtpVersion << 6);
+}
+
+bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) {
+ if (size < kFixedHeaderSize) {
+ return false;
+ }
+ const uint8_t version = buffer[0] >> 6;
+ if (version != kRtpVersion) {
+ return false;
+ }
+ const bool has_padding = (buffer[0] & 0x20) != 0;
+ const bool has_extension = (buffer[0] & 0x10) != 0;
+ const uint8_t number_of_crcs = buffer[0] & 0x0f;
+ marker_ = (buffer[1] & 0x80) != 0;
+ payload_type_ = buffer[1] & 0x7f;
+
+ sequence_number_ = ByteReader<uint16_t>::ReadBigEndian(&buffer[2]);
+ timestamp_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[4]);
+ ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[8]);
+ if (size < kFixedHeaderSize + number_of_crcs * 4) {
+ return false;
+ }
+ payload_offset_ = kFixedHeaderSize + number_of_crcs * 4;
+
+ extensions_size_ = 0;
+ extension_entries_.clear();
+ if (has_extension) {
+ /* RTP header extension, RFC 3550.
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | defined by profile | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | header extension |
+ | .... |
+ */
+ size_t extension_offset = payload_offset_ + 4;
+ if (extension_offset > size) {
+ return false;
+ }
+ uint16_t profile =
+ ByteReader<uint16_t>::ReadBigEndian(&buffer[payload_offset_]);
+ size_t extensions_capacity =
+ ByteReader<uint16_t>::ReadBigEndian(&buffer[payload_offset_ + 2]);
+ extensions_capacity *= 4;
+ if (extension_offset + extensions_capacity > size) {
+ return false;
+ }
+ if (profile != kOneByteExtensionProfileId &&
+ (profile & kTwobyteExtensionProfileIdAppBitsFilter) !=
+ kTwoByteExtensionProfileId) {
+ RTC_LOG(LS_WARNING) << "Unsupported rtp extension " << profile;
+ } else {
+ size_t extension_header_length = profile == kOneByteExtensionProfileId
+ ? kOneByteExtensionHeaderLength
+ : kTwoByteExtensionHeaderLength;
+ constexpr uint8_t kPaddingByte = 0;
+ constexpr uint8_t kPaddingId = 0;
+ constexpr uint8_t kOneByteHeaderExtensionReservedId = 15;
+ while (extensions_size_ + extension_header_length < extensions_capacity) {
+ if (buffer[extension_offset + extensions_size_] == kPaddingByte) {
+ extensions_size_++;
+ continue;
+ }
+ int id;
+ uint8_t length;
+ if (profile == kOneByteExtensionProfileId) {
+ id = buffer[extension_offset + extensions_size_] >> 4;
+ length = 1 + (buffer[extension_offset + extensions_size_] & 0xf);
+ if (id == kOneByteHeaderExtensionReservedId ||
+ (id == kPaddingId && length != 1)) {
+ break;
+ }
+ } else {
+ id = buffer[extension_offset + extensions_size_];
+ length = buffer[extension_offset + extensions_size_ + 1];
+ }
+
+ if (extensions_size_ + extension_header_length + length >
+ extensions_capacity) {
+ RTC_LOG(LS_WARNING) << "Oversized rtp header extension.";
+ break;
+ }
+
+ ExtensionInfo& extension_info = FindOrCreateExtensionInfo(id);
+ if (extension_info.length != 0) {
+ RTC_LOG(LS_VERBOSE)
+ << "Duplicate rtp header extension id " << id << ". Overwriting.";
+ }
+
+ size_t offset =
+ extension_offset + extensions_size_ + extension_header_length;
+ if (!rtc::IsValueInRangeForNumericType<uint16_t>(offset)) {
+ RTC_DLOG(LS_WARNING) << "Oversized rtp header extension.";
+ break;
+ }
+ extension_info.offset = static_cast<uint16_t>(offset);
+ extension_info.length = length;
+ extensions_size_ += extension_header_length + length;
+ }
+ }
+ payload_offset_ = extension_offset + extensions_capacity;
+ }
+
+ if (has_padding && payload_offset_ < size) {
+ padding_size_ = buffer[size - 1];
+ if (padding_size_ == 0) {
+ RTC_LOG(LS_WARNING) << "Padding was set, but padding size is zero";
+ return false;
+ }
+ } else {
+ padding_size_ = 0;
+ }
+
+ if (payload_offset_ + padding_size_ > size) {
+ return false;
+ }
+ payload_size_ = size - payload_offset_ - padding_size_;
+ return true;
+}
+
+const RtpPacket::ExtensionInfo* RtpPacket::FindExtensionInfo(int id) const {
+ for (const ExtensionInfo& extension : extension_entries_) {
+ if (extension.id == id) {
+ return &extension;
+ }
+ }
+ return nullptr;
+}
+
+RtpPacket::ExtensionInfo& RtpPacket::FindOrCreateExtensionInfo(int id) {
+ for (ExtensionInfo& extension : extension_entries_) {
+ if (extension.id == id) {
+ return extension;
+ }
+ }
+ extension_entries_.emplace_back(id);
+ return extension_entries_.back();
+}
+
+rtc::ArrayView<const uint8_t> RtpPacket::FindExtension(
+ ExtensionType type) const {
+ uint8_t id = extensions_.GetId(type);
+ if (id == ExtensionManager::kInvalidId) {
+ // Extension not registered.
+ return nullptr;
+ }
+ ExtensionInfo const* extension_info = FindExtensionInfo(id);
+ if (extension_info == nullptr) {
+ return nullptr;
+ }
+ return rtc::MakeArrayView(data() + extension_info->offset,
+ extension_info->length);
+}
+
+rtc::ArrayView<uint8_t> RtpPacket::AllocateExtension(ExtensionType type,
+ size_t length) {
+ // TODO(webrtc:7990): Add support for empty extensions (length==0).
+ if (length == 0 || length > RtpExtension::kMaxValueSize ||
+ (!extensions_.ExtmapAllowMixed() &&
+ length > RtpExtension::kOneByteHeaderExtensionMaxValueSize)) {
+ return nullptr;
+ }
+
+ uint8_t id = extensions_.GetId(type);
+ if (id == ExtensionManager::kInvalidId) {
+ // Extension not registered.
+ return nullptr;
+ }
+ if (!extensions_.ExtmapAllowMixed() &&
+ id > RtpExtension::kOneByteHeaderExtensionMaxId) {
+ return nullptr;
+ }
+ return AllocateRawExtension(id, length);
+}
+
+bool RtpPacket::HasExtension(ExtensionType type) const {
+ uint8_t id = extensions_.GetId(type);
+ if (id == ExtensionManager::kInvalidId) {
+ // Extension not registered.
+ return false;
+ }
+ return FindExtensionInfo(id) != nullptr;
+}
+
+bool RtpPacket::RemoveExtension(ExtensionType type) {
+ uint8_t id_to_remove = extensions_.GetId(type);
+ if (id_to_remove == ExtensionManager::kInvalidId) {
+ // Extension not registered.
+ RTC_LOG(LS_ERROR) << "Extension not registered, type=" << type
+ << ", packet=" << ToString();
+ return false;
+ }
+
+ // Rebuild new packet from scratch.
+ RtpPacket new_packet;
+
+ new_packet.SetMarker(Marker());
+ new_packet.SetPayloadType(PayloadType());
+ new_packet.SetSequenceNumber(SequenceNumber());
+ new_packet.SetTimestamp(Timestamp());
+ new_packet.SetSsrc(Ssrc());
+ new_packet.IdentifyExtensions(extensions_);
+
+ // Copy all extensions, except the one we are removing.
+ bool found_extension = false;
+ for (const ExtensionInfo& ext : extension_entries_) {
+ if (ext.id == id_to_remove) {
+ found_extension = true;
+ } else {
+ auto extension_data = new_packet.AllocateRawExtension(ext.id, ext.length);
+ if (extension_data.size() != ext.length) {
+ RTC_LOG(LS_ERROR) << "Failed to allocate extension id=" << ext.id
+ << ", length=" << ext.length
+ << ", packet=" << ToString();
+ return false;
+ }
+
+ // Copy extension data to new packet.
+ memcpy(extension_data.data(), ReadAt(ext.offset), ext.length);
+ }
+ }
+
+ if (!found_extension) {
+ RTC_LOG(LS_WARNING) << "Extension not present in RTP packet, type=" << type
+ << ", packet=" << ToString();
+ return false;
+ }
+
+ // Copy payload data to new packet.
+ memcpy(new_packet.AllocatePayload(payload_size()), payload().data(),
+ payload_size());
+
+ // Allocate padding -- must be last!
+ new_packet.SetPadding(padding_size());
+
+ // Success, replace current packet with newly built packet.
+ *this = new_packet;
+ return true;
+}
+
+std::string RtpPacket::ToString() const {
+ rtc::StringBuilder result;
+ result << "{payload_type=" << payload_type_ << "marker=" << marker_
+ << ", sequence_number=" << sequence_number_
+ << ", padding_size=" << padding_size_ << ", timestamp=" << timestamp_
+ << ", ssrc=" << ssrc_ << ", payload_offset=" << payload_offset_
+ << ", payload_size=" << payload_size_ << ", total_size=" << size()
+ << "}";
+
+ return result.Release();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.h
new file mode 100644
index 0000000000..b87d213636
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKET_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKET_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class RtpPacket {
+ public:
+ using ExtensionType = RTPExtensionType;
+ using ExtensionManager = RtpHeaderExtensionMap;
+
+ // `extensions` required for SetExtension/ReserveExtension functions during
+ // packet creating and used if available in Parse function.
+ // Adding and getting extensions will fail until `extensions` is
+ // provided via constructor or IdentifyExtensions function.
+ // |*extensions| is only accessed during construction; the pointer is not
+ // stored.
+ RtpPacket();
+ explicit RtpPacket(const ExtensionManager* extensions);
+ RtpPacket(const RtpPacket&);
+ RtpPacket(const ExtensionManager* extensions, size_t capacity);
+ ~RtpPacket();
+
+ RtpPacket& operator=(const RtpPacket&) = default;
+
+ // Parse and copy given buffer into Packet.
+ // Does not require extension map to be registered (map is only required to
+ // read or allocate extensions in methods GetExtension, AllocateExtension,
+ // etc.)
+ bool Parse(const uint8_t* buffer, size_t size);
+ bool Parse(rtc::ArrayView<const uint8_t> packet);
+
+ // Parse and move given buffer into Packet.
+ bool Parse(rtc::CopyOnWriteBuffer packet);
+
+ // Maps extensions id to their types.
+ void IdentifyExtensions(ExtensionManager extensions);
+
+ // Header.
+ bool Marker() const { return marker_; }
+ uint8_t PayloadType() const { return payload_type_; }
+ uint16_t SequenceNumber() const { return sequence_number_; }
+ uint32_t Timestamp() const { return timestamp_; }
+ uint32_t Ssrc() const { return ssrc_; }
+ std::vector<uint32_t> Csrcs() const;
+
+ size_t headers_size() const { return payload_offset_; }
+
+ // Payload.
+ size_t payload_size() const { return payload_size_; }
+ bool has_padding() const { return buffer_[0] & 0x20; }
+ size_t padding_size() const { return padding_size_; }
+ rtc::ArrayView<const uint8_t> payload() const {
+ return rtc::MakeArrayView(data() + payload_offset_, payload_size_);
+ }
+ rtc::CopyOnWriteBuffer PayloadBuffer() const {
+ return buffer_.Slice(payload_offset_, payload_size_);
+ }
+
+ // Buffer.
+ rtc::CopyOnWriteBuffer Buffer() const { return buffer_; }
+ size_t capacity() const { return buffer_.capacity(); }
+ size_t size() const {
+ return payload_offset_ + payload_size_ + padding_size_;
+ }
+ const uint8_t* data() const { return buffer_.cdata(); }
+ size_t FreeCapacity() const { return capacity() - size(); }
+ size_t MaxPayloadSize() const { return capacity() - headers_size(); }
+
+ // Reset fields and buffer.
+ void Clear();
+
+ // Header setters.
+ void CopyHeaderFrom(const RtpPacket& packet);
+ void SetMarker(bool marker_bit);
+ void SetPayloadType(uint8_t payload_type);
+ void SetSequenceNumber(uint16_t seq_no);
+ void SetTimestamp(uint32_t timestamp);
+ void SetSsrc(uint32_t ssrc);
+
+ // Fills with zeroes mutable extensions,
+ // which are modified after FEC protection is generated.
+ void ZeroMutableExtensions();
+
+ // Removes extension of given `type`, returns false is extension was not
+ // registered in packet's extension map or not present in the packet. Only
+ // extension that should be removed must be registered, other extensions may
+ // not be registered and will be preserved as is.
+ bool RemoveExtension(ExtensionType type);
+
+ // Writes csrc list. Assumes:
+ // a) There is enough room left in buffer.
+ // b) Extension headers, payload or padding data has not already been added.
+ void SetCsrcs(rtc::ArrayView<const uint32_t> csrcs);
+
+ // Header extensions.
+ template <typename Extension>
+ bool HasExtension() const;
+ bool HasExtension(ExtensionType type) const;
+
+ // Returns whether there is an associated id for the extension and thus it is
+ // possible to set the extension.
+ template <typename Extension>
+ bool IsRegistered() const;
+
+ template <typename Extension, typename FirstValue, typename... Values>
+ bool GetExtension(FirstValue, Values...) const;
+
+ template <typename Extension>
+ absl::optional<typename Extension::value_type> GetExtension() const;
+
+ // Returns view of the raw extension or empty view on failure.
+ template <typename Extension>
+ rtc::ArrayView<const uint8_t> GetRawExtension() const;
+
+ template <typename Extension, typename... Values>
+ bool SetExtension(const Values&...);
+
+ template <typename Extension>
+ bool ReserveExtension();
+
+ // Find or allocate an extension `type`. Returns view of size `length`
+ // to write raw extension to or an empty view on failure.
+ rtc::ArrayView<uint8_t> AllocateExtension(ExtensionType type, size_t length);
+
+ // Find an extension `type`.
+ // Returns view of the raw extension or empty view on failure.
+ rtc::ArrayView<const uint8_t> FindExtension(ExtensionType type) const;
+
+ // Reserve size_bytes for payload. Returns nullptr on failure.
+ uint8_t* SetPayloadSize(size_t size_bytes);
+ // Same as SetPayloadSize but doesn't guarantee to keep current payload.
+ uint8_t* AllocatePayload(size_t size_bytes);
+
+ bool SetPadding(size_t padding_size);
+
+ // Returns debug string of RTP packet (without detailed extension info).
+ std::string ToString() const;
+
+ private:
+ struct ExtensionInfo {
+ explicit ExtensionInfo(uint8_t id) : ExtensionInfo(id, 0, 0) {}
+ ExtensionInfo(uint8_t id, uint8_t length, uint16_t offset)
+ : id(id), length(length), offset(offset) {}
+ uint8_t id;
+ uint8_t length;
+ uint16_t offset;
+ };
+
+ // Helper function for Parse. Fill header fields using data in given buffer,
+ // but does not touch packet own buffer, leaving packet in invalid state.
+ bool ParseBuffer(const uint8_t* buffer, size_t size);
+
+ // Returns pointer to extension info for a given id. Returns nullptr if not
+ // found.
+ const ExtensionInfo* FindExtensionInfo(int id) const;
+
+ // Returns reference to extension info for a given id. Creates a new entry
+ // with the specified id if not found.
+ ExtensionInfo& FindOrCreateExtensionInfo(int id);
+
+ // Allocates and returns place to store rtp header extension.
+ // Returns empty arrayview on failure.
+ rtc::ArrayView<uint8_t> AllocateRawExtension(int id, size_t length);
+
+ // Promotes existing one-byte header extensions to two-byte header extensions
+ // by rewriting the data and updates the corresponding extension offsets.
+ void PromoteToTwoByteHeaderExtension();
+
+ uint16_t SetExtensionLengthMaybeAddZeroPadding(size_t extensions_offset);
+
+ uint8_t* WriteAt(size_t offset) { return buffer_.MutableData() + offset; }
+ void WriteAt(size_t offset, uint8_t byte) {
+ buffer_.MutableData()[offset] = byte;
+ }
+ const uint8_t* ReadAt(size_t offset) const { return buffer_.data() + offset; }
+
+ // Header.
+ bool marker_;
+ uint8_t payload_type_;
+ uint8_t padding_size_;
+ uint16_t sequence_number_;
+ uint32_t timestamp_;
+ uint32_t ssrc_;
+ size_t payload_offset_; // Match header size with csrcs and extensions.
+ size_t payload_size_;
+
+ ExtensionManager extensions_;
+ std::vector<ExtensionInfo> extension_entries_;
+ size_t extensions_size_ = 0; // Unaligned.
+ rtc::CopyOnWriteBuffer buffer_;
+};
+
+template <typename Extension>
+bool RtpPacket::HasExtension() const {
+ return HasExtension(Extension::kId);
+}
+
+template <typename Extension>
+bool RtpPacket::IsRegistered() const {
+ return extensions_.IsRegistered(Extension::kId);
+}
+
+template <typename Extension, typename FirstValue, typename... Values>
+bool RtpPacket::GetExtension(FirstValue first, Values... values) const {
+ auto raw = FindExtension(Extension::kId);
+ if (raw.empty())
+ return false;
+ return Extension::Parse(raw, first, values...);
+}
+
+template <typename Extension>
+absl::optional<typename Extension::value_type> RtpPacket::GetExtension() const {
+ absl::optional<typename Extension::value_type> result;
+ auto raw = FindExtension(Extension::kId);
+ if (raw.empty() || !Extension::Parse(raw, &result.emplace()))
+ result = absl::nullopt;
+ return result;
+}
+
+template <typename Extension>
+rtc::ArrayView<const uint8_t> RtpPacket::GetRawExtension() const {
+ return FindExtension(Extension::kId);
+}
+
+template <typename Extension, typename... Values>
+bool RtpPacket::SetExtension(const Values&... values) {
+ const size_t value_size = Extension::ValueSize(values...);
+ auto buffer = AllocateExtension(Extension::kId, value_size);
+ if (buffer.empty())
+ return false;
+ return Extension::Write(buffer, values...);
+}
+
+template <typename Extension>
+bool RtpPacket::ReserveExtension() {
+ auto buffer = AllocateExtension(Extension::kId, Extension::kValueSizeBytes);
+ if (buffer.empty())
+ return false;
+ memset(buffer.data(), 0, Extension::kValueSizeBytes);
+ return true;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKET_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
new file mode 100644
index 0000000000..c8d400a985
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+RtpPacketHistory::StoredPacket::StoredPacket(
+ std::unique_ptr<RtpPacketToSend> packet,
+ Timestamp send_time,
+ uint64_t insert_order)
+ : packet_(std::move(packet)),
+ pending_transmission_(false),
+ send_time_(send_time),
+ insert_order_(insert_order),
+ times_retransmitted_(0) {}
+
+RtpPacketHistory::StoredPacket::StoredPacket(StoredPacket&&) = default;
+RtpPacketHistory::StoredPacket& RtpPacketHistory::StoredPacket::operator=(
+ RtpPacketHistory::StoredPacket&&) = default;
+RtpPacketHistory::StoredPacket::~StoredPacket() = default;
+
+void RtpPacketHistory::StoredPacket::IncrementTimesRetransmitted(
+ PacketPrioritySet* priority_set) {
+ // Check if this StoredPacket is in the priority set. If so, we need to remove
+ // it before updating `times_retransmitted_` since that is used in sorting,
+ // and then add it back.
+ const bool in_priority_set = priority_set && priority_set->erase(this) > 0;
+ ++times_retransmitted_;
+ if (in_priority_set) {
+ auto it = priority_set->insert(this);
+ RTC_DCHECK(it.second)
+ << "ERROR: Priority set already contains matching packet! In set: "
+ "insert order = "
+ << (*it.first)->insert_order_
+ << ", times retransmitted = " << (*it.first)->times_retransmitted_
+ << ". Trying to add: insert order = " << insert_order_
+ << ", times retransmitted = " << times_retransmitted_;
+ }
+}
+
+bool RtpPacketHistory::MoreUseful::operator()(StoredPacket* lhs,
+ StoredPacket* rhs) const {
+ // Prefer to send packets we haven't already sent as padding.
+ if (lhs->times_retransmitted() != rhs->times_retransmitted()) {
+ return lhs->times_retransmitted() < rhs->times_retransmitted();
+ }
+ // All else being equal, prefer newer packets.
+ return lhs->insert_order() > rhs->insert_order();
+}
+
+RtpPacketHistory::RtpPacketHistory(Clock* clock, bool enable_padding_prio)
+ : clock_(clock),
+ enable_padding_prio_(enable_padding_prio),
+ number_to_store_(0),
+ mode_(StorageMode::kDisabled),
+ rtt_(TimeDelta::MinusInfinity()),
+ packets_inserted_(0) {}
+
+RtpPacketHistory::~RtpPacketHistory() {}
+
+void RtpPacketHistory::SetStorePacketsStatus(StorageMode mode,
+ size_t number_to_store) {
+ RTC_DCHECK_LE(number_to_store, kMaxCapacity);
+ MutexLock lock(&lock_);
+ if (mode != StorageMode::kDisabled && mode_ != StorageMode::kDisabled) {
+ RTC_LOG(LS_WARNING) << "Purging packet history in order to re-set status.";
+ }
+ Reset();
+ mode_ = mode;
+ number_to_store_ = std::min(kMaxCapacity, number_to_store);
+}
+
+RtpPacketHistory::StorageMode RtpPacketHistory::GetStorageMode() const {
+ MutexLock lock(&lock_);
+ return mode_;
+}
+
+void RtpPacketHistory::SetRtt(TimeDelta rtt) {
+ MutexLock lock(&lock_);
+ RTC_DCHECK_GE(rtt, TimeDelta::Zero());
+ rtt_ = rtt;
+ // If storage is not disabled, packets will be removed after a timeout
+ // that depends on the RTT. Changing the RTT may thus cause some packets
+ // become "old" and subject to removal.
+ if (mode_ != StorageMode::kDisabled) {
+ CullOldPackets();
+ }
+}
+
+void RtpPacketHistory::PutRtpPacket(std::unique_ptr<RtpPacketToSend> packet,
+ Timestamp send_time) {
+ RTC_DCHECK(packet);
+ MutexLock lock(&lock_);
+ if (mode_ == StorageMode::kDisabled) {
+ return;
+ }
+
+ RTC_DCHECK(packet->allow_retransmission());
+ CullOldPackets();
+
+ // Store packet.
+ const uint16_t rtp_seq_no = packet->SequenceNumber();
+ int packet_index = GetPacketIndex(rtp_seq_no);
+ if (packet_index >= 0 &&
+ static_cast<size_t>(packet_index) < packet_history_.size() &&
+ packet_history_[packet_index].packet_ != nullptr) {
+ RTC_LOG(LS_WARNING) << "Duplicate packet inserted: " << rtp_seq_no;
+ // Remove previous packet to avoid inconsistent state.
+ RemovePacket(packet_index);
+ packet_index = GetPacketIndex(rtp_seq_no);
+ }
+
+ // Packet to be inserted ahead of first packet, expand front.
+ for (; packet_index < 0; ++packet_index) {
+ packet_history_.emplace_front();
+ }
+ // Packet to be inserted behind last packet, expand back.
+ while (static_cast<int>(packet_history_.size()) <= packet_index) {
+ packet_history_.emplace_back();
+ }
+
+ RTC_DCHECK_GE(packet_index, 0);
+ RTC_DCHECK_LT(packet_index, packet_history_.size());
+ RTC_DCHECK(packet_history_[packet_index].packet_ == nullptr);
+
+ packet_history_[packet_index] =
+ StoredPacket(std::move(packet), send_time, packets_inserted_++);
+
+ if (enable_padding_prio_) {
+ if (padding_priority_.size() >= kMaxPaddingHistory - 1) {
+ padding_priority_.erase(std::prev(padding_priority_.end()));
+ }
+ auto prio_it = padding_priority_.insert(&packet_history_[packet_index]);
+ RTC_DCHECK(prio_it.second) << "Failed to insert packet into prio set.";
+ }
+}
+
+std::unique_ptr<RtpPacketToSend> RtpPacketHistory::GetPacketAndMarkAsPending(
+ uint16_t sequence_number) {
+ return GetPacketAndMarkAsPending(
+ sequence_number, [](const RtpPacketToSend& packet) {
+ return std::make_unique<RtpPacketToSend>(packet);
+ });
+}
+
+std::unique_ptr<RtpPacketToSend> RtpPacketHistory::GetPacketAndMarkAsPending(
+ uint16_t sequence_number,
+ rtc::FunctionView<std::unique_ptr<RtpPacketToSend>(const RtpPacketToSend&)>
+ encapsulate) {
+ MutexLock lock(&lock_);
+ if (mode_ == StorageMode::kDisabled) {
+ return nullptr;
+ }
+
+ StoredPacket* packet = GetStoredPacket(sequence_number);
+ if (packet == nullptr) {
+ return nullptr;
+ }
+
+ if (packet->pending_transmission_) {
+ // Packet already in pacer queue, ignore this request.
+ return nullptr;
+ }
+
+ if (!VerifyRtt(*packet)) {
+ // Packet already resent within too short a time window, ignore.
+ return nullptr;
+ }
+
+ // Copy and/or encapsulate packet.
+ std::unique_ptr<RtpPacketToSend> encapsulated_packet =
+ encapsulate(*packet->packet_);
+ if (encapsulated_packet) {
+ packet->pending_transmission_ = true;
+ }
+
+ return encapsulated_packet;
+}
+
+void RtpPacketHistory::MarkPacketAsSent(uint16_t sequence_number) {
+ MutexLock lock(&lock_);
+ if (mode_ == StorageMode::kDisabled) {
+ return;
+ }
+
+ StoredPacket* packet = GetStoredPacket(sequence_number);
+ if (packet == nullptr) {
+ return;
+ }
+
+ // Update send-time, mark as no longer in pacer queue, and increment
+ // transmission count.
+ packet->set_send_time(clock_->CurrentTime());
+ packet->pending_transmission_ = false;
+ packet->IncrementTimesRetransmitted(enable_padding_prio_ ? &padding_priority_
+ : nullptr);
+}
+
+bool RtpPacketHistory::GetPacketState(uint16_t sequence_number) const {
+ MutexLock lock(&lock_);
+ if (mode_ == StorageMode::kDisabled) {
+ return false;
+ }
+
+ int packet_index = GetPacketIndex(sequence_number);
+ if (packet_index < 0 ||
+ static_cast<size_t>(packet_index) >= packet_history_.size()) {
+ return false;
+ }
+ const StoredPacket& packet = packet_history_[packet_index];
+ if (packet.packet_ == nullptr) {
+ return false;
+ }
+
+ if (!VerifyRtt(packet)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool RtpPacketHistory::VerifyRtt(
+ const RtpPacketHistory::StoredPacket& packet) const {
+ if (packet.times_retransmitted() > 0 &&
+ clock_->CurrentTime() - packet.send_time() < rtt_) {
+ // This packet has already been retransmitted once, and the time since
+ // that even is lower than on RTT. Ignore request as this packet is
+ // likely already in the network pipe.
+ return false;
+ }
+
+ return true;
+}
+
+std::unique_ptr<RtpPacketToSend> RtpPacketHistory::GetPayloadPaddingPacket() {
+ // Default implementation always just returns a copy of the packet.
+ return GetPayloadPaddingPacket([](const RtpPacketToSend& packet) {
+ return std::make_unique<RtpPacketToSend>(packet);
+ });
+}
+
+std::unique_ptr<RtpPacketToSend> RtpPacketHistory::GetPayloadPaddingPacket(
+ rtc::FunctionView<std::unique_ptr<RtpPacketToSend>(const RtpPacketToSend&)>
+ encapsulate) {
+ MutexLock lock(&lock_);
+ if (mode_ == StorageMode::kDisabled) {
+ return nullptr;
+ }
+
+ StoredPacket* best_packet = nullptr;
+ if (enable_padding_prio_ && !padding_priority_.empty()) {
+ auto best_packet_it = padding_priority_.begin();
+ best_packet = *best_packet_it;
+ } else if (!enable_padding_prio_ && !packet_history_.empty()) {
+ // Prioritization not available, pick the last packet.
+ for (auto it = packet_history_.rbegin(); it != packet_history_.rend();
+ ++it) {
+ if (it->packet_ != nullptr) {
+ best_packet = &(*it);
+ break;
+ }
+ }
+ }
+ if (best_packet == nullptr) {
+ return nullptr;
+ }
+
+ if (best_packet->pending_transmission_) {
+ // Because PacedSender releases it's lock when it calls
+ // GeneratePadding() there is the potential for a race where a new
+ // packet ends up here instead of the regular transmit path. In such a
+ // case, just return empty and it will be picked up on the next
+ // Process() call.
+ return nullptr;
+ }
+
+ auto padding_packet = encapsulate(*best_packet->packet_);
+ if (!padding_packet) {
+ return nullptr;
+ }
+
+ best_packet->set_send_time(clock_->CurrentTime());
+ best_packet->IncrementTimesRetransmitted(
+ enable_padding_prio_ ? &padding_priority_ : nullptr);
+
+ return padding_packet;
+}
+
+void RtpPacketHistory::CullAcknowledgedPackets(
+ rtc::ArrayView<const uint16_t> sequence_numbers) {
+ MutexLock lock(&lock_);
+ for (uint16_t sequence_number : sequence_numbers) {
+ int packet_index = GetPacketIndex(sequence_number);
+ if (packet_index < 0 ||
+ static_cast<size_t>(packet_index) >= packet_history_.size()) {
+ continue;
+ }
+ RemovePacket(packet_index);
+ }
+}
+
+void RtpPacketHistory::Clear() {
+ MutexLock lock(&lock_);
+ Reset();
+}
+
+void RtpPacketHistory::Reset() {
+ packet_history_.clear();
+ padding_priority_.clear();
+}
+
+void RtpPacketHistory::CullOldPackets() {
+ Timestamp now = clock_->CurrentTime();
+ TimeDelta packet_duration =
+ rtt_.IsFinite()
+ ? std::max(kMinPacketDurationRtt * rtt_, kMinPacketDuration)
+ : kMinPacketDuration;
+ while (!packet_history_.empty()) {
+ if (packet_history_.size() >= kMaxCapacity) {
+ // We have reached the absolute max capacity, remove one packet
+ // unconditionally.
+ RemovePacket(0);
+ continue;
+ }
+
+ const StoredPacket& stored_packet = packet_history_.front();
+ if (stored_packet.pending_transmission_) {
+ // Don't remove packets in the pacer queue, pending tranmission.
+ return;
+ }
+
+ if (stored_packet.send_time() + packet_duration > now) {
+ // Don't cull packets too early to avoid failed retransmission requests.
+ return;
+ }
+
+ if (packet_history_.size() >= number_to_store_ ||
+ stored_packet.send_time() +
+ (packet_duration * kPacketCullingDelayFactor) <=
+ now) {
+ // Too many packets in history, or this packet has timed out. Remove it
+ // and continue.
+ RemovePacket(0);
+ } else {
+ // No more packets can be removed right now.
+ return;
+ }
+ }
+}
+
+std::unique_ptr<RtpPacketToSend> RtpPacketHistory::RemovePacket(
+ int packet_index) {
+ // Move the packet out from the StoredPacket container.
+ std::unique_ptr<RtpPacketToSend> rtp_packet =
+ std::move(packet_history_[packet_index].packet_);
+
+ // Erase from padding priority set, if eligible.
+ if (enable_padding_prio_) {
+ padding_priority_.erase(&packet_history_[packet_index]);
+ }
+
+ if (packet_index == 0) {
+ while (!packet_history_.empty() &&
+ packet_history_.front().packet_ == nullptr) {
+ packet_history_.pop_front();
+ }
+ }
+
+ return rtp_packet;
+}
+
+int RtpPacketHistory::GetPacketIndex(uint16_t sequence_number) const {
+ if (packet_history_.empty()) {
+ return 0;
+ }
+
+ RTC_DCHECK(packet_history_.front().packet_ != nullptr);
+ int first_seq = packet_history_.front().packet_->SequenceNumber();
+ if (first_seq == sequence_number) {
+ return 0;
+ }
+
+ int packet_index = sequence_number - first_seq;
+ constexpr int kSeqNumSpan = std::numeric_limits<uint16_t>::max() + 1;
+
+ if (IsNewerSequenceNumber(sequence_number, first_seq)) {
+ if (sequence_number < first_seq) {
+ // Forward wrap.
+ packet_index += kSeqNumSpan;
+ }
+ } else if (sequence_number > first_seq) {
+ // Backwards wrap.
+ packet_index -= kSeqNumSpan;
+ }
+
+ return packet_index;
+}
+
+RtpPacketHistory::StoredPacket* RtpPacketHistory::GetStoredPacket(
+ uint16_t sequence_number) {
+ int index = GetPacketIndex(sequence_number);
+ if (index < 0 || static_cast<size_t>(index) >= packet_history_.size() ||
+ packet_history_[index].packet_ == nullptr) {
+ return nullptr;
+ }
+ return &packet_history_[index];
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.h
new file mode 100644
index 0000000000..7475a35be3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "api/function_view.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+class RtpPacketToSend;
+
+class RtpPacketHistory {
+ public:
+ enum class StorageMode {
+ kDisabled, // Don't store any packets.
+ kStoreAndCull // Store up to `number_to_store` packets, but try to remove
+ // packets as they time out or as signaled as received.
+ };
+
+ // Maximum number of packets we ever allow in the history.
+ static constexpr size_t kMaxCapacity = 9600;
+ // Maximum number of entries in prioritized queue of padding packets.
+ static constexpr size_t kMaxPaddingHistory = 63;
+ // Don't remove packets within max(1 second, 3x RTT).
+ static constexpr TimeDelta kMinPacketDuration = TimeDelta::Seconds(1);
+ static constexpr int kMinPacketDurationRtt = 3;
+ // With kStoreAndCull, always remove packets after 3x max(1000ms, 3x rtt).
+ static constexpr int kPacketCullingDelayFactor = 3;
+
+ RtpPacketHistory(Clock* clock, bool enable_padding_prio);
+
+ RtpPacketHistory() = delete;
+ RtpPacketHistory(const RtpPacketHistory&) = delete;
+ RtpPacketHistory& operator=(const RtpPacketHistory&) = delete;
+
+ ~RtpPacketHistory();
+
+ // Set/get storage mode. Note that setting the state will clear the history,
+ // even if setting the same state as is currently used.
+ void SetStorePacketsStatus(StorageMode mode, size_t number_to_store);
+ StorageMode GetStorageMode() const;
+
+ // Set RTT, used to avoid premature retransmission and to prevent over-writing
+ // a packet in the history before we are reasonably sure it has been received.
+ void SetRtt(TimeDelta rtt);
+
+ void PutRtpPacket(std::unique_ptr<RtpPacketToSend> packet,
+ Timestamp send_time);
+
+ // Gets stored RTP packet corresponding to the input |sequence number|.
+ // Returns nullptr if packet is not found or was (re)sent too recently.
+ // If a packet copy is returned, it will be marked as pending transmission but
+ // does not update send time, that must be done by MarkPacketAsSent().
+ std::unique_ptr<RtpPacketToSend> GetPacketAndMarkAsPending(
+ uint16_t sequence_number);
+
+ // In addition to getting packet and marking as sent, this method takes an
+ // encapsulator function that takes a reference to the packet and outputs a
+ // copy that may be wrapped in a container, eg RTX.
+ // If the the encapsulator returns nullptr, the retransmit is aborted and the
+ // packet will not be marked as pending.
+ std::unique_ptr<RtpPacketToSend> GetPacketAndMarkAsPending(
+ uint16_t sequence_number,
+ rtc::FunctionView<std::unique_ptr<RtpPacketToSend>(
+ const RtpPacketToSend&)> encapsulate);
+
+ // Updates the send time for the given packet and increments the transmission
+ // counter. Marks the packet as no longer being in the pacer queue.
+ void MarkPacketAsSent(uint16_t sequence_number);
+
+ // Returns true if history contains packet with `sequence_number` and it can
+ // be retransmitted.
+ bool GetPacketState(uint16_t sequence_number) const;
+
+ // Get the packet (if any) from the history, that is deemed most likely to
+ // the remote side. This is calculated from heuristics such as packet age
+ // and times retransmitted. Updated the send time of the packet, so is not
+ // a const method.
+ std::unique_ptr<RtpPacketToSend> GetPayloadPaddingPacket();
+
+ // Same as GetPayloadPaddingPacket(void), but adds an encapsulation
+ // that can be used for instance to encapsulate the packet in an RTX
+ // container, or to abort getting the packet if the function returns
+ // nullptr.
+ std::unique_ptr<RtpPacketToSend> GetPayloadPaddingPacket(
+ rtc::FunctionView<std::unique_ptr<RtpPacketToSend>(
+ const RtpPacketToSend&)> encapsulate);
+
+ // Cull packets that have been acknowledged as received by the remote end.
+ void CullAcknowledgedPackets(rtc::ArrayView<const uint16_t> sequence_numbers);
+
+ // Remove all pending packets from the history, but keep storage mode and
+ // capacity.
+ void Clear();
+
+ private:
+ struct MoreUseful;
+ class StoredPacket;
+ using PacketPrioritySet = std::set<StoredPacket*, MoreUseful>;
+
+ class StoredPacket {
+ public:
+ StoredPacket() = default;
+ StoredPacket(std::unique_ptr<RtpPacketToSend> packet,
+ Timestamp send_time,
+ uint64_t insert_order);
+ StoredPacket(StoredPacket&&);
+ StoredPacket& operator=(StoredPacket&&);
+ ~StoredPacket();
+
+ uint64_t insert_order() const { return insert_order_; }
+ size_t times_retransmitted() const { return times_retransmitted_; }
+ void IncrementTimesRetransmitted(PacketPrioritySet* priority_set);
+
+ // The time of last transmission, including retransmissions.
+ Timestamp send_time() const { return send_time_; }
+ void set_send_time(Timestamp value) { send_time_ = value; }
+
+ // The actual packet.
+ std::unique_ptr<RtpPacketToSend> packet_;
+
+ // True if the packet is currently in the pacer queue pending transmission.
+ bool pending_transmission_;
+
+ private:
+ Timestamp send_time_ = Timestamp::Zero();
+
+ // Unique number per StoredPacket, incremented by one for each added
+ // packet. Used to sort on insert order.
+ uint64_t insert_order_;
+
+ // Number of times RE-transmitted, ie excluding the first transmission.
+ size_t times_retransmitted_;
+ };
+ struct MoreUseful {
+ bool operator()(StoredPacket* lhs, StoredPacket* rhs) const;
+ };
+
+ // Helper method to check if packet has too recently been sent.
+ bool VerifyRtt(const StoredPacket& packet) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void Reset() RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void CullOldPackets() RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Removes the packet from the history, and context/mapping that has been
+ // stored. Returns the RTP packet instance contained within the StoredPacket.
+ std::unique_ptr<RtpPacketToSend> RemovePacket(int packet_index)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ int GetPacketIndex(uint16_t sequence_number) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ StoredPacket* GetStoredPacket(uint16_t sequence_number)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ Clock* const clock_;
+ const bool enable_padding_prio_;
+ mutable Mutex lock_;
+ size_t number_to_store_ RTC_GUARDED_BY(lock_);
+ StorageMode mode_ RTC_GUARDED_BY(lock_);
+ TimeDelta rtt_ RTC_GUARDED_BY(lock_);
+
+ // Queue of stored packets, ordered by sequence number, with older packets in
+ // the front and new packets being added to the back. Note that there may be
+ // wrap-arounds so the back may have a lower sequence number.
+ // Packets may also be removed out-of-order, in which case there will be
+ // instances of StoredPacket with `packet_` set to nullptr. The first and last
+ // entry in the queue will however always be populated.
+ std::deque<StoredPacket> packet_history_ RTC_GUARDED_BY(lock_);
+
+ // Total number of packets with inserted.
+ uint64_t packets_inserted_ RTC_GUARDED_BY(lock_);
+ // Objects from `packet_history_` ordered by "most likely to be useful", used
+ // in GetPayloadPaddingPacket().
+ PacketPrioritySet padding_priority_ RTC_GUARDED_BY(lock_);
+};
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
new file mode 100644
index 0000000000..f50541849e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
@@ -0,0 +1,681 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+// Set a high sequence number so we'll suffer a wrap-around.
+constexpr uint16_t kStartSeqNum = 65534u;
+
+// Utility method for truncating sequence numbers to uint16.
+uint16_t To16u(size_t sequence_number) {
+ return static_cast<uint16_t>(sequence_number & 0xFFFF);
+}
+} // namespace
+
+using StorageMode = RtpPacketHistory::StorageMode;
+
+class RtpPacketHistoryTest : public ::testing::TestWithParam<bool> {
+ protected:
+ RtpPacketHistoryTest()
+ : fake_clock_(123456),
+ hist_(&fake_clock_, /*enable_padding_prio=*/GetParam()) {}
+
+ SimulatedClock fake_clock_;
+ RtpPacketHistory hist_;
+
+ std::unique_ptr<RtpPacketToSend> CreateRtpPacket(uint16_t seq_num) {
+ // Payload, ssrc, timestamp and extensions are irrelevant for this tests.
+ std::unique_ptr<RtpPacketToSend> packet(new RtpPacketToSend(nullptr));
+ packet->SetSequenceNumber(seq_num);
+ packet->set_capture_time(fake_clock_.CurrentTime());
+ packet->set_allow_retransmission(true);
+ return packet;
+ }
+};
+
+TEST_P(RtpPacketHistoryTest, SetStoreStatus) {
+ EXPECT_EQ(StorageMode::kDisabled, hist_.GetStorageMode());
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ EXPECT_EQ(StorageMode::kStoreAndCull, hist_.GetStorageMode());
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ EXPECT_EQ(StorageMode::kStoreAndCull, hist_.GetStorageMode());
+ hist_.SetStorePacketsStatus(StorageMode::kDisabled, 0);
+ EXPECT_EQ(StorageMode::kDisabled, hist_.GetStorageMode());
+}
+
+TEST_P(RtpPacketHistoryTest, ClearsHistoryAfterSetStoreStatus) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum),
+ /*send_time=*/fake_clock_.CurrentTime());
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // Changing store status, even to the current one, will clear the history.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, StartSeqResetAfterReset) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum),
+ /*send_time=*/fake_clock_.CurrentTime());
+ // Mark packet as pending so it won't be removed.
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+
+ // Changing store status, to clear the history.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+
+ // Add a new packet.
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 1)),
+ /*send_time=*/fake_clock_.CurrentTime());
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(To16u(kStartSeqNum + 1)));
+
+ // Advance time past where packet expires.
+ fake_clock_.AdvanceTime(RtpPacketHistory::kPacketCullingDelayFactor *
+ RtpPacketHistory::kMinPacketDuration);
+
+ // Add one more packet and verify no state left from packet before reset.
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 2)),
+ /*send_time=*/fake_clock_.CurrentTime());
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 2)));
+}
+
+TEST_P(RtpPacketHistoryTest, NoStoreStatus) {
+ EXPECT_EQ(StorageMode::kDisabled, hist_.GetStorageMode());
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(kStartSeqNum);
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+ // Packet should not be stored.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, GetRtpPacket_NotStored) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ EXPECT_FALSE(hist_.GetPacketState(0));
+}
+
+TEST_P(RtpPacketHistoryTest, PutRtpPacket) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(kStartSeqNum);
+
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, GetRtpPacket) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ Timestamp capture_time = Timestamp::Millis(1);
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(kStartSeqNum);
+ packet->set_capture_time(capture_time);
+ rtc::CopyOnWriteBuffer buffer = packet->Buffer();
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+
+ std::unique_ptr<RtpPacketToSend> packet_out =
+ hist_.GetPacketAndMarkAsPending(kStartSeqNum);
+ ASSERT_TRUE(packet_out);
+ EXPECT_EQ(buffer, packet_out->Buffer());
+ EXPECT_EQ(capture_time, packet_out->capture_time());
+}
+
+TEST_P(RtpPacketHistoryTest, MinResendTime) {
+ static const TimeDelta kMinRetransmitInterval = TimeDelta::Millis(100);
+
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+ hist_.SetRtt(kMinRetransmitInterval);
+ Timestamp capture_time = fake_clock_.CurrentTime();
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(kStartSeqNum);
+ size_t len = packet->size();
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+
+ // First retransmission - allow early retransmission.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ packet = hist_.GetPacketAndMarkAsPending(kStartSeqNum);
+ ASSERT_TRUE(packet);
+ EXPECT_EQ(len, packet->size());
+ EXPECT_EQ(packet->capture_time(), capture_time);
+ hist_.MarkPacketAsSent(kStartSeqNum);
+
+ // Second retransmission - advance time to just before retransmission OK.
+ fake_clock_.AdvanceTime(kMinRetransmitInterval - TimeDelta::Millis(1));
+ EXPECT_FALSE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+
+ // Advance time to just after retransmission OK.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, RemovesOldestSentPacketWhenAtMaxSize) {
+ const size_t kMaxNumPackets = 10;
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, kMaxNumPackets);
+
+ // History does not allow removing packets within kMinPacketDuration,
+ // so in order to test capacity, make sure insertion spans this time.
+ const TimeDelta kPacketInterval =
+ RtpPacketHistory::kMinPacketDuration / kMaxNumPackets;
+
+ // Add packets until the buffer is full.
+ for (size_t i = 0; i < kMaxNumPackets; ++i) {
+ std::unique_ptr<RtpPacketToSend> packet =
+ CreateRtpPacket(To16u(kStartSeqNum + i));
+ // Immediate mark packet as sent.
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTime(kPacketInterval);
+ }
+
+ // First packet should still be there.
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // History is full, oldest one should be overwritten.
+ std::unique_ptr<RtpPacketToSend> packet =
+ CreateRtpPacket(To16u(kStartSeqNum + kMaxNumPackets));
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+
+ // Oldest packet should be gone, but packet after than one still present.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+}
+
+TEST_P(RtpPacketHistoryTest, RemovesOldestPacketWhenAtMaxCapacity) {
+ // Tests the absolute upper bound on number of stored packets. Don't allow
+ // storing more than this, even if packets have not yet been sent.
+ const size_t kMaxNumPackets = RtpPacketHistory::kMaxCapacity;
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull,
+ RtpPacketHistory::kMaxCapacity);
+
+ // Add packets until the buffer is full.
+ for (size_t i = 0; i < kMaxNumPackets; ++i) {
+ std::unique_ptr<RtpPacketToSend> packet =
+ CreateRtpPacket(To16u(kStartSeqNum + i));
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+ // Mark packets as pending, preventing it from being removed.
+ hist_.GetPacketAndMarkAsPending(To16u(kStartSeqNum + i));
+ }
+
+ // First packet should still be there.
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // History is full, oldest one should be overwritten.
+ std::unique_ptr<RtpPacketToSend> packet =
+ CreateRtpPacket(To16u(kStartSeqNum + kMaxNumPackets));
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+
+ // Oldest packet should be gone, but packet after than one still present.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+}
+
+TEST_P(RtpPacketHistoryTest, RemovesLowestPrioPaddingWhenAtMaxCapacity) {
+ if (!GetParam()) {
+ // Padding prioritization is off, ignore this test.
+ return;
+ }
+
+ // Tests the absolute upper bound on number of packets in the prioritized
+ // set of potential padding packets.
+ const size_t kMaxNumPackets = RtpPacketHistory::kMaxPaddingHistory;
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, kMaxNumPackets * 2);
+ hist_.SetRtt(TimeDelta::Millis(1));
+
+ // Add packets until the max is reached, and then yet another one.
+ for (size_t i = 0; i < kMaxNumPackets + 1; ++i) {
+ std::unique_ptr<RtpPacketToSend> packet =
+ CreateRtpPacket(To16u(kStartSeqNum + i));
+ // Don't mark packets as sent, preventing them from being removed.
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+ }
+
+ // Advance time to allow retransmission/padding.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+
+ // The oldest packet will be least prioritized and has fallen out of the
+ // priority set.
+ for (size_t i = kMaxNumPackets - 1; i > 0; --i) {
+ auto packet = hist_.GetPayloadPaddingPacket();
+ ASSERT_TRUE(packet);
+ EXPECT_EQ(packet->SequenceNumber(), To16u(kStartSeqNum + i + 1));
+ }
+
+ // Wrap around to newest padding packet again.
+ auto packet = hist_.GetPayloadPaddingPacket();
+ ASSERT_TRUE(packet);
+ EXPECT_EQ(packet->SequenceNumber(), To16u(kStartSeqNum + kMaxNumPackets));
+}
+
+TEST_P(RtpPacketHistoryTest, DontRemoveTooRecentlyTransmittedPackets) {
+ // Set size to remove old packets as soon as possible.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ // Add a packet, marked as send, and advance time to just before removal time.
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTime(RtpPacketHistory::kMinPacketDuration -
+ TimeDelta::Millis(1));
+
+ // Add a new packet to trigger culling.
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 1)),
+ fake_clock_.CurrentTime());
+ // First packet should still be there.
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // Advance time to where packet will be eligible for removal and try again.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 2)),
+ fake_clock_.CurrentTime());
+ // First packet should no be gone, but next one still there.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+}
+
+TEST_P(RtpPacketHistoryTest, DontRemoveTooRecentlyTransmittedPacketsHighRtt) {
+ const TimeDelta kRtt = RtpPacketHistory::kMinPacketDuration * 2;
+ const TimeDelta kPacketTimeout =
+ kRtt * RtpPacketHistory::kMinPacketDurationRtt;
+
+ // Set size to remove old packets as soon as possible.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+ hist_.SetRtt(kRtt);
+
+ // Add a packet, marked as send, and advance time to just before removal time.
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTime(kPacketTimeout - TimeDelta::Millis(1));
+
+ // Add a new packet to trigger culling.
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 1)),
+ fake_clock_.CurrentTime());
+ // First packet should still be there.
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // Advance time to where packet will be eligible for removal and try again.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 2)),
+ fake_clock_.CurrentTime());
+ // First packet should no be gone, but next one still there.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+}
+
+TEST_P(RtpPacketHistoryTest, RemovesOldWithCulling) {
+ const size_t kMaxNumPackets = 10;
+ // Enable culling. Even without feedback, this can trigger early removal.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, kMaxNumPackets);
+
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+
+ TimeDelta kMaxPacketDuration = RtpPacketHistory::kMinPacketDuration *
+ RtpPacketHistory::kPacketCullingDelayFactor;
+ fake_clock_.AdvanceTime(kMaxPacketDuration - TimeDelta::Millis(1));
+
+ // First packet should still be there.
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // Advance to where packet can be culled, even if buffer is not full.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 1)),
+ fake_clock_.CurrentTime());
+
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, RemovesOldWithCullingHighRtt) {
+ const size_t kMaxNumPackets = 10;
+ const TimeDelta kRtt = RtpPacketHistory::kMinPacketDuration * 2;
+ // Enable culling. Even without feedback, this can trigger early removal.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, kMaxNumPackets);
+ hist_.SetRtt(kRtt);
+
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+
+ TimeDelta kMaxPacketDuration = kRtt *
+ RtpPacketHistory::kMinPacketDurationRtt *
+ RtpPacketHistory::kPacketCullingDelayFactor;
+ fake_clock_.AdvanceTime(kMaxPacketDuration - TimeDelta::Millis(1));
+
+ // First packet should still be there.
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // Advance to where packet can be culled, even if buffer is not full.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + 1)),
+ fake_clock_.CurrentTime());
+
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, CullWithAcks) {
+ const TimeDelta kPacketLifetime = RtpPacketHistory::kMinPacketDuration *
+ RtpPacketHistory::kPacketCullingDelayFactor;
+
+ const Timestamp start_time = fake_clock_.CurrentTime();
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+
+ // Insert three packets 33ms apart, immediately mark them as sent.
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(kStartSeqNum);
+ packet->SetPayloadSize(50);
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(33);
+ packet = CreateRtpPacket(To16u(kStartSeqNum + 1));
+ packet->SetPayloadSize(50);
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(33);
+ packet = CreateRtpPacket(To16u(kStartSeqNum + 2));
+ packet->SetPayloadSize(50);
+ hist_.PutRtpPacket(std::move(packet),
+ /*send_time=*/fake_clock_.CurrentTime());
+
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 2)));
+
+ // Remove middle one using ack, check that only that one is gone.
+ std::vector<uint16_t> acked_sequence_numbers = {To16u(kStartSeqNum + 1)};
+ hist_.CullAcknowledgedPackets(acked_sequence_numbers);
+
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_FALSE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 2)));
+
+ // Advance time to where second packet would have expired, verify first packet
+ // is removed.
+ Timestamp second_packet_expiry_time =
+ start_time + kPacketLifetime + TimeDelta::Millis(33 + 1);
+ fake_clock_.AdvanceTime(second_packet_expiry_time -
+ fake_clock_.CurrentTime());
+ hist_.SetRtt(TimeDelta::Millis(1)); // Trigger culling of old packets.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_FALSE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+ EXPECT_TRUE(hist_.GetPacketState(To16u(kStartSeqNum + 2)));
+
+ // Advance to where last packet expires, verify all gone.
+ fake_clock_.AdvanceTimeMilliseconds(33);
+ hist_.SetRtt(TimeDelta::Millis(1)); // Trigger culling of old packets.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+ EXPECT_FALSE(hist_.GetPacketState(To16u(kStartSeqNum + 1)));
+ EXPECT_FALSE(hist_.GetPacketState(To16u(kStartSeqNum + 2)));
+}
+
+TEST_P(RtpPacketHistoryTest, GetPacketAndSetSent) {
+ const TimeDelta kRtt = RtpPacketHistory::kMinPacketDuration * 2;
+ hist_.SetRtt(kRtt);
+
+ // Set size to remove old packets as soon as possible.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ // Add a sent packet to the history.
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+
+ // Retransmission request, first retransmission is allowed immediately.
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+
+ // Packet not yet sent, new retransmission not allowed.
+ fake_clock_.AdvanceTime(kRtt);
+ EXPECT_FALSE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+
+ // Mark as sent, but too early for retransmission.
+ hist_.MarkPacketAsSent(kStartSeqNum);
+ EXPECT_FALSE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+
+ // Enough time has passed, retransmission is allowed again.
+ fake_clock_.AdvanceTime(kRtt);
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, GetPacketWithEncapsulation) {
+ const uint32_t kSsrc = 92384762;
+ const TimeDelta kRtt = RtpPacketHistory::kMinPacketDuration * 2;
+ hist_.SetRtt(kRtt);
+
+ // Set size to remove old packets as soon as possible.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ // Add a sent packet to the history, with a set SSRC.
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(kStartSeqNum);
+ packet->SetSsrc(kSsrc);
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+
+ // Retransmission request, simulate an RTX-like encapsulation, were the packet
+ // is sent on a different SSRC.
+ std::unique_ptr<RtpPacketToSend> retransmit_packet =
+ hist_.GetPacketAndMarkAsPending(
+ kStartSeqNum, [](const RtpPacketToSend& packet) {
+ auto encapsulated_packet =
+ std::make_unique<RtpPacketToSend>(packet);
+ encapsulated_packet->SetSsrc(packet.Ssrc() + 1);
+ return encapsulated_packet;
+ });
+ ASSERT_TRUE(retransmit_packet);
+ EXPECT_EQ(retransmit_packet->Ssrc(), kSsrc + 1);
+}
+
+TEST_P(RtpPacketHistoryTest, GetPacketWithEncapsulationAbortOnNullptr) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+
+ // Retransmission request, but the encapsulator determines that this packet is
+ // not suitable for retransmission (bandwidth exhausted?) so the retransmit is
+ // aborted and the packet is not marked as pending.
+ EXPECT_FALSE(hist_.GetPacketAndMarkAsPending(
+ kStartSeqNum, [](const RtpPacketToSend&) { return nullptr; }));
+
+ // New try, this time getting the packet should work, and it should not be
+ // blocked due to any pending status.
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, DontRemovePendingTransmissions) {
+ const TimeDelta kRtt = RtpPacketHistory::kMinPacketDuration * 2;
+ const TimeDelta kPacketTimeout =
+ kRtt * RtpPacketHistory::kMinPacketDurationRtt;
+
+ // Set size to remove old packets as soon as possible.
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+ hist_.SetRtt(kRtt);
+
+ // Add a sent packet.
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+
+ // Advance clock to just before packet timeout.
+ fake_clock_.AdvanceTime(kPacketTimeout - TimeDelta::Millis(1));
+ // Mark as enqueued in pacer.
+ EXPECT_TRUE(hist_.GetPacketAndMarkAsPending(kStartSeqNum));
+
+ // Advance clock to where packet would have timed out. It should still
+ // be there and pending.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ EXPECT_TRUE(hist_.GetPacketState(kStartSeqNum));
+
+ // Packet sent. Now it can be removed.
+ hist_.MarkPacketAsSent(kStartSeqNum);
+ hist_.SetRtt(kRtt); // Force culling of old packets.
+ EXPECT_FALSE(hist_.GetPacketState(kStartSeqNum));
+}
+
+TEST_P(RtpPacketHistoryTest, PrioritizedPayloadPadding) {
+ if (!GetParam()) {
+ // Padding prioritization is off, ignore this test.
+ return;
+ }
+
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ // Add two sent packets, one millisecond apart.
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(1);
+
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum + 1),
+ fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(1);
+
+ // Latest packet given equal retransmission count.
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ kStartSeqNum + 1);
+
+ // Older packet has lower retransmission count.
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(), kStartSeqNum);
+
+ // Equal retransmission count again, use newest packet.
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ kStartSeqNum + 1);
+
+ // Older packet has lower retransmission count.
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(), kStartSeqNum);
+
+ // Remove newest packet.
+ hist_.CullAcknowledgedPackets(std::vector<uint16_t>{kStartSeqNum + 1});
+
+ // Only older packet left.
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(), kStartSeqNum);
+
+ hist_.CullAcknowledgedPackets(std::vector<uint16_t>{kStartSeqNum});
+
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket(), nullptr);
+}
+
+TEST_P(RtpPacketHistoryTest, NoPendingPacketAsPadding) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(1);
+
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(), kStartSeqNum);
+
+ // If packet is pending retransmission, don't try to use it as padding.
+ hist_.GetPacketAndMarkAsPending(kStartSeqNum);
+ EXPECT_EQ(nullptr, hist_.GetPayloadPaddingPacket());
+
+ // Market it as no longer pending, should be usable as padding again.
+ hist_.MarkPacketAsSent(kStartSeqNum);
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(), kStartSeqNum);
+}
+
+TEST_P(RtpPacketHistoryTest, PayloadPaddingWithEncapsulation) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 1);
+
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(1);
+
+ // Aborted padding.
+ EXPECT_EQ(nullptr, hist_.GetPayloadPaddingPacket(
+ [](const RtpPacketToSend&) { return nullptr; }));
+
+ // Get copy of packet, but with sequence number modified.
+ auto padding_packet =
+ hist_.GetPayloadPaddingPacket([&](const RtpPacketToSend& packet) {
+ auto encapsulated_packet = std::make_unique<RtpPacketToSend>(packet);
+ encapsulated_packet->SetSequenceNumber(kStartSeqNum + 1);
+ return encapsulated_packet;
+ });
+ ASSERT_TRUE(padding_packet);
+ EXPECT_EQ(padding_packet->SequenceNumber(), kStartSeqNum + 1);
+}
+
+TEST_P(RtpPacketHistoryTest, NackAfterAckIsNoop) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 2);
+ // Add two sent packets.
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum), fake_clock_.CurrentTime());
+ hist_.PutRtpPacket(CreateRtpPacket(kStartSeqNum + 1),
+ fake_clock_.CurrentTime());
+ // Remove newest one.
+ hist_.CullAcknowledgedPackets(std::vector<uint16_t>{kStartSeqNum + 1});
+ // Retransmission request for already acked packet, should be noop.
+ auto packet = hist_.GetPacketAndMarkAsPending(kStartSeqNum + 1);
+ EXPECT_EQ(packet.get(), nullptr);
+}
+
+TEST_P(RtpPacketHistoryTest, OutOfOrderInsertRemoval) {
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, 10);
+
+ // Insert packets, out of order, including both forwards and backwards
+ // sequence number wraps.
+ const int seq_offsets[] = {0, 1, -1, 2, -2, 3, -3};
+
+ for (int offset : seq_offsets) {
+ uint16_t seq_no = To16u(kStartSeqNum + offset);
+ std::unique_ptr<RtpPacketToSend> packet = CreateRtpPacket(seq_no);
+ packet->SetPayloadSize(50);
+ hist_.PutRtpPacket(std::move(packet), fake_clock_.CurrentTime());
+ fake_clock_.AdvanceTimeMilliseconds(33);
+ }
+
+ // Check packet are there and remove them in the same out-of-order fashion.
+ for (int offset : seq_offsets) {
+ uint16_t seq_no = To16u(kStartSeqNum + offset);
+ EXPECT_TRUE(hist_.GetPacketState(seq_no));
+ std::vector<uint16_t> acked_sequence_numbers = {seq_no};
+ hist_.CullAcknowledgedPackets(acked_sequence_numbers);
+ EXPECT_FALSE(hist_.GetPacketState(seq_no));
+ }
+}
+
+TEST_P(RtpPacketHistoryTest, UsesLastPacketAsPaddingWithPrioOff) {
+ if (GetParam()) {
+ // Padding prioritization is enabled, ignore this test.
+ return;
+ }
+
+ const size_t kHistorySize = 10;
+ hist_.SetStorePacketsStatus(StorageMode::kStoreAndCull, kHistorySize);
+
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket(), nullptr);
+
+ for (size_t i = 0; i < kHistorySize; ++i) {
+ hist_.PutRtpPacket(CreateRtpPacket(To16u(kStartSeqNum + i)),
+ fake_clock_.CurrentTime());
+ hist_.MarkPacketAsSent(To16u(kStartSeqNum + i));
+ fake_clock_.AdvanceTimeMilliseconds(1);
+
+ // Last packet always returned.
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ To16u(kStartSeqNum + i));
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ To16u(kStartSeqNum + i));
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ To16u(kStartSeqNum + i));
+ }
+
+ // Remove packets from the end, last in the list should be returned.
+ for (size_t i = kHistorySize - 1; i > 0; --i) {
+ hist_.CullAcknowledgedPackets(
+ std::vector<uint16_t>{To16u(kStartSeqNum + i)});
+
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ To16u(kStartSeqNum + i - 1));
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ To16u(kStartSeqNum + i - 1));
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket()->SequenceNumber(),
+ To16u(kStartSeqNum + i - 1));
+ }
+
+ hist_.CullAcknowledgedPackets(std::vector<uint16_t>{kStartSeqNum});
+ EXPECT_EQ(hist_.GetPayloadPaddingPacket(), nullptr);
+}
+
+INSTANTIATE_TEST_SUITE_P(WithAndWithoutPaddingPrio,
+ RtpPacketHistoryTest,
+ ::testing::Bool());
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.cc
new file mode 100644
index 0000000000..6b2cc76981
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+RtpPacketReceived::RtpPacketReceived() = default;
+RtpPacketReceived::RtpPacketReceived(
+ const ExtensionManager* extensions,
+ webrtc::Timestamp arrival_time /*= webrtc::Timestamp::MinusInfinity()*/)
+ : RtpPacket(extensions), arrival_time_(arrival_time) {}
+RtpPacketReceived::RtpPacketReceived(const RtpPacketReceived& packet) = default;
+RtpPacketReceived::RtpPacketReceived(RtpPacketReceived&& packet) = default;
+
+RtpPacketReceived& RtpPacketReceived::operator=(
+ const RtpPacketReceived& packet) = default;
+RtpPacketReceived& RtpPacketReceived::operator=(RtpPacketReceived&& packet) =
+ default;
+
+RtpPacketReceived::~RtpPacketReceived() {}
+
+void RtpPacketReceived::GetHeader(RTPHeader* header) const {
+ header->markerBit = Marker();
+ header->payloadType = PayloadType();
+ header->sequenceNumber = SequenceNumber();
+ header->timestamp = Timestamp();
+ header->ssrc = Ssrc();
+ std::vector<uint32_t> csrcs = Csrcs();
+ header->numCSRCs = rtc::dchecked_cast<uint8_t>(csrcs.size());
+ for (size_t i = 0; i < csrcs.size(); ++i) {
+ header->arrOfCSRCs[i] = csrcs[i];
+ }
+ header->paddingLength = padding_size();
+ header->headerLength = headers_size();
+ header->payload_type_frequency = payload_type_frequency();
+ header->extension.hasTransmissionTimeOffset =
+ GetExtension<TransmissionOffset>(
+ &header->extension.transmissionTimeOffset);
+ header->extension.hasAbsoluteSendTime =
+ GetExtension<AbsoluteSendTime>(&header->extension.absoluteSendTime);
+ header->extension.absolute_capture_time =
+ GetExtension<AbsoluteCaptureTimeExtension>();
+ header->extension.hasTransportSequenceNumber =
+ GetExtension<TransportSequenceNumberV2>(
+ &header->extension.transportSequenceNumber,
+ &header->extension.feedback_request) ||
+ GetExtension<TransportSequenceNumber>(
+ &header->extension.transportSequenceNumber);
+ header->extension.hasAudioLevel = GetExtension<AudioLevel>(
+ &header->extension.voiceActivity, &header->extension.audioLevel);
+ header->extension.hasVideoRotation =
+ GetExtension<VideoOrientation>(&header->extension.videoRotation);
+ header->extension.hasVideoContentType =
+ GetExtension<VideoContentTypeExtension>(
+ &header->extension.videoContentType);
+ header->extension.has_video_timing =
+ GetExtension<VideoTimingExtension>(&header->extension.video_timing);
+ GetExtension<RtpStreamId>(&header->extension.stream_id);
+ GetExtension<RepairedRtpStreamId>(&header->extension.repaired_stream_id);
+ GetExtension<RtpMid>(&header->extension.mid);
+ GetExtension<PlayoutDelayLimits>(&header->extension.playout_delay);
+ header->extension.color_space = GetExtension<ColorSpaceExtension>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.h
new file mode 100644
index 0000000000..f290a643a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_received.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKET_RECEIVED_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKET_RECEIVED_H_
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/ref_counted_base.h"
+#include "api/rtp_headers.h"
+#include "api/scoped_refptr.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+
+namespace webrtc {
+// Class to hold rtp packet with metadata for receiver side.
+// The metadata is not parsed from the rtp packet, but may be derived from the
+// data that is parsed from the rtp packet.
+class RtpPacketReceived : public RtpPacket {
+ public:
+ RtpPacketReceived();
+ explicit RtpPacketReceived(
+ const ExtensionManager* extensions,
+ webrtc::Timestamp arrival_time = webrtc::Timestamp::MinusInfinity());
+ RtpPacketReceived(const RtpPacketReceived& packet);
+ RtpPacketReceived(RtpPacketReceived&& packet);
+
+ RtpPacketReceived& operator=(const RtpPacketReceived& packet);
+ RtpPacketReceived& operator=(RtpPacketReceived&& packet);
+
+ ~RtpPacketReceived();
+
+ // TODO(danilchap): Remove this function when all code update to use RtpPacket
+ // directly. Function is there just for easier backward compatibilty.
+ void GetHeader(RTPHeader* header) const;
+
+ // Time in local time base as close as it can to packet arrived on the
+ // network.
+ webrtc::Timestamp arrival_time() const { return arrival_time_; }
+ void set_arrival_time(webrtc::Timestamp time) { arrival_time_ = time; }
+
+ // Flag if packet was recovered via RTX or FEC.
+ bool recovered() const { return recovered_; }
+ void set_recovered(bool value) { recovered_ = value; }
+
+ int payload_type_frequency() const { return payload_type_frequency_; }
+ void set_payload_type_frequency(int value) {
+ payload_type_frequency_ = value;
+ }
+
+ // An application can attach arbitrary data to an RTP packet using
+ // `additional_data`. The additional data does not affect WebRTC processing.
+ rtc::scoped_refptr<rtc::RefCountedBase> additional_data() const {
+ return additional_data_;
+ }
+ void set_additional_data(rtc::scoped_refptr<rtc::RefCountedBase> data) {
+ additional_data_ = std::move(data);
+ }
+
+ private:
+ webrtc::Timestamp arrival_time_ = Timestamp::MinusInfinity();
+ int payload_type_frequency_ = 0;
+ bool recovered_ = false;
+ rtc::scoped_refptr<rtc::RefCountedBase> additional_data_;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKET_RECEIVED_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.cc
new file mode 100644
index 0000000000..b55e74aaf0
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+
+#include <cstdint>
+
+namespace webrtc {
+
+RtpPacketToSend::RtpPacketToSend(const ExtensionManager* extensions)
+ : RtpPacket(extensions) {}
+RtpPacketToSend::RtpPacketToSend(const ExtensionManager* extensions,
+ size_t capacity)
+ : RtpPacket(extensions, capacity) {}
+RtpPacketToSend::RtpPacketToSend(const RtpPacketToSend& packet) = default;
+RtpPacketToSend::RtpPacketToSend(RtpPacketToSend&& packet) = default;
+
+RtpPacketToSend& RtpPacketToSend::operator=(const RtpPacketToSend& packet) =
+ default;
+RtpPacketToSend& RtpPacketToSend::operator=(RtpPacketToSend&& packet) = default;
+
+RtpPacketToSend::~RtpPacketToSend() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h
new file mode 100644
index 0000000000..8c0fc7bd5c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKET_TO_SEND_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKET_TO_SEND_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/ref_counted_base.h"
+#include "api/scoped_refptr.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_timing.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+
+namespace webrtc {
+// Class to hold rtp packet with metadata for sender side.
+// The metadata is not send over the wire, but packet sender may use it to
+// create rtp header extensions or other data that is sent over the wire.
+class RtpPacketToSend : public RtpPacket {
+ public:
+ // RtpPacketToSend::Type is deprecated. Use RtpPacketMediaType directly.
+ using Type = RtpPacketMediaType;
+
+ explicit RtpPacketToSend(const ExtensionManager* extensions);
+ RtpPacketToSend(const ExtensionManager* extensions, size_t capacity);
+ RtpPacketToSend(const RtpPacketToSend& packet);
+ RtpPacketToSend(RtpPacketToSend&& packet);
+
+ RtpPacketToSend& operator=(const RtpPacketToSend& packet);
+ RtpPacketToSend& operator=(RtpPacketToSend&& packet);
+
+ ~RtpPacketToSend();
+
+ // Time in local time base as close as it can to frame capture time.
+ webrtc::Timestamp capture_time() const { return capture_time_; }
+ void set_capture_time(webrtc::Timestamp time) { capture_time_ = time; }
+
+ void set_packet_type(RtpPacketMediaType type) { packet_type_ = type; }
+ absl::optional<RtpPacketMediaType> packet_type() const {
+ return packet_type_;
+ }
+
+ // If this is a retransmission, indicates the sequence number of the original
+ // media packet that this packet represents. If RTX is used this will likely
+ // be different from SequenceNumber().
+ void set_retransmitted_sequence_number(uint16_t sequence_number) {
+ retransmitted_sequence_number_ = sequence_number;
+ }
+ absl::optional<uint16_t> retransmitted_sequence_number() const {
+ return retransmitted_sequence_number_;
+ }
+
+ void set_allow_retransmission(bool allow_retransmission) {
+ allow_retransmission_ = allow_retransmission;
+ }
+ bool allow_retransmission() const { return allow_retransmission_; }
+
+ // An application can attach arbitrary data to an RTP packet using
+ // `additional_data`. The additional data does not affect WebRTC processing.
+ rtc::scoped_refptr<rtc::RefCountedBase> additional_data() const {
+ return additional_data_;
+ }
+ void set_additional_data(rtc::scoped_refptr<rtc::RefCountedBase> data) {
+ additional_data_ = std::move(data);
+ }
+
+ void set_packetization_finish_time(webrtc::Timestamp time) {
+ SetExtension<VideoTimingExtension>(
+ VideoSendTiming::GetDeltaCappedMs(time - capture_time_),
+ VideoTimingExtension::kPacketizationFinishDeltaOffset);
+ }
+
+ void set_pacer_exit_time(webrtc::Timestamp time) {
+ SetExtension<VideoTimingExtension>(
+ VideoSendTiming::GetDeltaCappedMs(time - capture_time_),
+ VideoTimingExtension::kPacerExitDeltaOffset);
+ }
+
+ void set_network_time(webrtc::Timestamp time) {
+ SetExtension<VideoTimingExtension>(
+ VideoSendTiming::GetDeltaCappedMs(time - capture_time_),
+ VideoTimingExtension::kNetworkTimestampDeltaOffset);
+ }
+
+ void set_network2_time(webrtc::Timestamp time) {
+ SetExtension<VideoTimingExtension>(
+ VideoSendTiming::GetDeltaCappedMs(time - capture_time_),
+ VideoTimingExtension::kNetwork2TimestampDeltaOffset);
+ }
+
+ // Indicates if packet is the first packet of a video frame.
+ void set_first_packet_of_frame(bool is_first_packet) {
+ is_first_packet_of_frame_ = is_first_packet;
+ }
+ bool is_first_packet_of_frame() const { return is_first_packet_of_frame_; }
+
+ // Indicates if packet contains payload for a video key-frame.
+ void set_is_key_frame(bool is_key_frame) { is_key_frame_ = is_key_frame; }
+ bool is_key_frame() const { return is_key_frame_; }
+
+ // Indicates if packets should be protected by FEC (Forward Error Correction).
+ void set_fec_protect_packet(bool protect) { fec_protect_packet_ = protect; }
+ bool fec_protect_packet() const { return fec_protect_packet_; }
+
+ // Indicates if packet is using RED encapsulation, in accordance with
+ // https://tools.ietf.org/html/rfc2198
+ void set_is_red(bool is_red) { is_red_ = is_red; }
+ bool is_red() const { return is_red_; }
+
+ private:
+ webrtc::Timestamp capture_time_ = webrtc::Timestamp::Zero();
+ absl::optional<RtpPacketMediaType> packet_type_;
+ bool allow_retransmission_ = false;
+ absl::optional<uint16_t> retransmitted_sequence_number_;
+ rtc::scoped_refptr<rtc::RefCountedBase> additional_data_;
+ bool is_first_packet_of_frame_ = false;
+ bool is_key_frame_ = false;
+ bool fec_protect_packet_ = false;
+ bool is_red_ = false;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKET_TO_SEND_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_unittest.cc
new file mode 100644
index 0000000000..21bf37c0c3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_unittest.cc
@@ -0,0 +1,1273 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "common_video/test/utilities.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/random.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+
+constexpr int8_t kPayloadType = 100;
+constexpr uint32_t kSsrc = 0x12345678;
+constexpr uint16_t kSeqNum = 0x1234;
+constexpr uint8_t kSeqNumFirstByte = kSeqNum >> 8;
+constexpr uint8_t kSeqNumSecondByte = kSeqNum & 0xff;
+constexpr uint32_t kTimestamp = 0x65431278;
+constexpr uint8_t kTransmissionOffsetExtensionId = 1;
+constexpr uint8_t kAudioLevelExtensionId = 9;
+constexpr uint8_t kRtpStreamIdExtensionId = 0xa;
+constexpr uint8_t kRtpMidExtensionId = 0xb;
+constexpr uint8_t kVideoTimingExtensionId = 0xc;
+constexpr uint8_t kTwoByteExtensionId = 0xf0;
+constexpr int32_t kTimeOffset = 0x56ce;
+constexpr bool kVoiceActive = true;
+constexpr uint8_t kAudioLevel = 0x5a;
+constexpr char kStreamId[] = "streamid";
+constexpr char kMid[] = "mid";
+constexpr char kLongMid[] = "extra-long string to test two-byte header";
+constexpr size_t kMaxPaddingSize = 224u;
+// clang-format off
+constexpr uint8_t kMinimumPacket[] = {
+ 0x80, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78};
+
+constexpr uint8_t kPacketWithTO[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x01,
+ 0x12, 0x00, 0x56, 0xce};
+
+constexpr uint8_t kPacketWithTOAndAL[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x02,
+ 0x12, 0x00, 0x56, 0xce,
+ 0x90, 0x80|kAudioLevel, 0x00, 0x00};
+
+constexpr uint8_t kPacketWithTwoByteExtensionIdLast[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x10, 0x00, 0x00, 0x04,
+ 0x01, 0x03, 0x00, 0x56,
+ 0xce, 0x09, 0x01, 0x80|kAudioLevel,
+ kTwoByteExtensionId, 0x03, 0x00, 0x30, // => 0x00 0x30 0x22
+ 0x22, 0x00, 0x00, 0x00}; // => Playout delay.min_ms = 3*10
+ // => Playout delay.max_ms = 34*10
+
+constexpr uint8_t kPacketWithTwoByteExtensionIdFirst[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x10, 0x00, 0x00, 0x04,
+ kTwoByteExtensionId, 0x03, 0x00, 0x30, // => 0x00 0x30 0x22
+ 0x22, 0x01, 0x03, 0x00, // => Playout delay.min_ms = 3*10
+ 0x56, 0xce, 0x09, 0x01, // => Playout delay.max_ms = 34*10
+ 0x80|kAudioLevel, 0x00, 0x00, 0x00};
+
+constexpr uint8_t kPacketWithTOAndALInvalidPadding[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x03,
+ 0x12, 0x00, 0x56, 0xce,
+ 0x00, 0x02, 0x00, 0x00, // 0x02 is invalid padding, parsing should stop.
+ 0x90, 0x80|kAudioLevel, 0x00, 0x00};
+
+constexpr uint8_t kPacketWithTOAndALReservedExtensionId[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x03,
+ 0x12, 0x00, 0x56, 0xce,
+ 0x00, 0xF0, 0x00, 0x00, // F is a reserved id, parsing should stop.
+ 0x90, 0x80|kAudioLevel, 0x00, 0x00};
+
+constexpr uint8_t kPacketWithRsid[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x03,
+ 0xa7, 's', 't', 'r',
+ 'e', 'a', 'm', 'i',
+ 'd' , 0x00, 0x00, 0x00};
+
+constexpr uint8_t kPacketWithMid[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x01,
+ 0xb2, 'm', 'i', 'd'};
+
+constexpr uint8_t kCsrcAudioLevelExtensionId = 0xc;
+constexpr uint8_t kCsrcAudioLevelsSize = 4;
+constexpr uint8_t kCsrcAudioLevels[] = {0x7f, 0x00, 0x10, 0x08};
+constexpr uint8_t kPacketWithCsrcAudioLevels[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x02,
+ (kCsrcAudioLevelExtensionId << 4) | (kCsrcAudioLevelsSize - 1),
+ 0x7f, 0x00, 0x10,
+ 0x08, 0x00, 0x00, 0x00};
+
+constexpr uint32_t kCsrcs[] = {0x34567890, 0x32435465};
+constexpr uint8_t kPayload[] = {'p', 'a', 'y', 'l', 'o', 'a', 'd'};
+constexpr uint8_t kPacketPaddingSize = 8;
+constexpr uint8_t kPacket[] = {
+ 0xb2, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x34, 0x56, 0x78, 0x90,
+ 0x32, 0x43, 0x54, 0x65,
+ 0xbe, 0xde, 0x00, 0x01,
+ 0x12, 0x00, 0x56, 0xce,
+ 'p', 'a', 'y', 'l', 'o', 'a', 'd',
+ 'p', 'a', 'd', 'd', 'i', 'n', 'g', kPacketPaddingSize};
+
+constexpr uint8_t kPacketWithTwoByteHeaderExtension[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x10, 0x00, 0x00, 0x02, // Two-byte header extension profile id + length.
+ kTwoByteExtensionId, 0x03, 0x00, 0x56,
+ 0xce, 0x00, 0x00, 0x00};
+
+constexpr uint8_t kPacketWithLongTwoByteHeaderExtension[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x10, 0x00, 0x00, 0x0B, // Two-byte header extension profile id + length.
+ kTwoByteExtensionId, 0x29, 'e', 'x',
+ 't', 'r', 'a', '-', 'l', 'o', 'n', 'g',
+ ' ', 's', 't', 'r', 'i', 'n', 'g', ' ',
+ 't', 'o', ' ', 't', 'e', 's', 't', ' ',
+ 't', 'w', 'o', '-', 'b', 'y', 't', 'e',
+ ' ', 'h', 'e', 'a', 'd', 'e', 'r', 0x00};
+
+constexpr uint8_t kPacketWithTwoByteHeaderExtensionWithPadding[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x10, 0x00, 0x00, 0x03, // Two-byte header extension profile id + length.
+ kTwoByteExtensionId, 0x03, 0x00, 0x56,
+ 0xce, 0x00, 0x00, 0x00, // Three padding bytes.
+ kAudioLevelExtensionId, 0x01, 0x80|kAudioLevel, 0x00};
+
+constexpr uint8_t kPacketWithInvalidExtension[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78, // kTimestamp.
+ 0x12, 0x34, 0x56, 0x78, // kSSrc.
+ 0xbe, 0xde, 0x00, 0x02, // Extension block of size 2 x 32bit words.
+ (kTransmissionOffsetExtensionId << 4) | 6, // (6+1)-byte extension, but
+ 'e', 'x', 't', // Transmission Offset
+ 'd', 'a', 't', 'a', // expected to be 3-bytes.
+ 'p', 'a', 'y', 'l', 'o', 'a', 'd'};
+
+constexpr uint8_t kPacketWithLegacyTimingExtension[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78, // kTimestamp.
+ 0x12, 0x34, 0x56, 0x78, // kSSrc.
+ 0xbe, 0xde, 0x00, 0x04, // Extension block of size 4 x 32bit words.
+ (kVideoTimingExtensionId << 4)
+ | VideoTimingExtension::kValueSizeBytes - 2, // Old format without flags.
+ 0x00, 0x01, 0x00,
+ 0x02, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+// clang-format on
+
+void TestCreateAndParseColorSpaceExtension(bool with_hdr_metadata) {
+ // Create packet with extension.
+ RtpPacket::ExtensionManager extensions(/*extmap-allow-mixed=*/true);
+ extensions.Register<ColorSpaceExtension>(1);
+ RtpPacket packet(&extensions);
+ const ColorSpace kColorSpace = CreateTestColorSpace(with_hdr_metadata);
+ EXPECT_TRUE(packet.SetExtension<ColorSpaceExtension>(kColorSpace));
+ packet.SetPayloadSize(42);
+
+ // Read packet with the extension.
+ RtpPacketReceived parsed(&extensions);
+ EXPECT_TRUE(parsed.Parse(packet.Buffer()));
+ ColorSpace parsed_color_space;
+ EXPECT_TRUE(parsed.GetExtension<ColorSpaceExtension>(&parsed_color_space));
+ EXPECT_EQ(kColorSpace, parsed_color_space);
+}
+
+TEST(RtpPacketTest, CreateMinimum) {
+ RtpPacketToSend packet(nullptr);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ EXPECT_THAT(kMinimumPacket, ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, CreateWithExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+ EXPECT_THAT(kPacketWithTO, ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, CreateWith2Extensions) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+ packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel);
+ EXPECT_THAT(kPacketWithTOAndAL,
+ ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, CreateWithTwoByteHeaderExtensionFirst) {
+ RtpPacketToSend::ExtensionManager extensions(true);
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ extensions.Register<PlayoutDelayLimits>(kTwoByteExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ // Set extension that requires two-byte header.
+ VideoPlayoutDelay playoutDelay = {30, 340};
+ ASSERT_TRUE(packet.SetExtension<PlayoutDelayLimits>(playoutDelay));
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+ packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel);
+ EXPECT_THAT(kPacketWithTwoByteExtensionIdFirst,
+ ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, CreateWithTwoByteHeaderExtensionLast) {
+ // This test will trigger RtpPacket::PromoteToTwoByteHeaderExtension().
+ RtpPacketToSend::ExtensionManager extensions(true);
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ extensions.Register<PlayoutDelayLimits>(kTwoByteExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+ packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel);
+ EXPECT_THAT(kPacketWithTOAndAL,
+ ElementsAreArray(packet.data(), packet.size()));
+ // Set extension that requires two-byte header.
+ VideoPlayoutDelay playoutDelay = {30, 340};
+ ASSERT_TRUE(packet.SetExtension<PlayoutDelayLimits>(playoutDelay));
+ EXPECT_THAT(kPacketWithTwoByteExtensionIdLast,
+ ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, CreateWithDynamicSizedExtensions) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<RtpStreamId>(kRtpStreamIdExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<RtpStreamId>(kStreamId);
+ EXPECT_THAT(kPacketWithRsid, ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, TryToCreateWithEmptyRsid) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<RtpStreamId>(kRtpStreamIdExtensionId);
+ RtpPacketToSend packet(&extensions);
+ EXPECT_FALSE(packet.SetExtension<RtpStreamId>(""));
+}
+
+TEST(RtpPacketTest, TryToCreateWithLongRsid) {
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr char kLongStreamId[] = "LoooooooooongRsid";
+ ASSERT_EQ(strlen(kLongStreamId), 17u);
+ extensions.Register<RtpStreamId>(kRtpStreamIdExtensionId);
+ RtpPacketToSend packet(&extensions);
+ EXPECT_FALSE(packet.SetExtension<RtpStreamId>(kLongStreamId));
+}
+
+TEST(RtpPacketTest, TryToCreateWithEmptyMid) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<RtpMid>(kRtpMidExtensionId);
+ RtpPacketToSend packet(&extensions);
+ EXPECT_FALSE(packet.SetExtension<RtpMid>(""));
+}
+
+TEST(RtpPacketTest, TryToCreateWithLongMid) {
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr char kLongMid[] = "LoooooooooonogMid";
+ ASSERT_EQ(strlen(kLongMid), 17u);
+ extensions.Register<RtpMid>(kRtpMidExtensionId);
+ RtpPacketToSend packet(&extensions);
+ EXPECT_FALSE(packet.SetExtension<RtpMid>(kLongMid));
+}
+
+TEST(RtpPacketTest, TryToCreateTwoByteHeaderNotSupported) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<AudioLevel>(kTwoByteExtensionId);
+ RtpPacketToSend packet(&extensions);
+ // Set extension that requires two-byte header.
+ EXPECT_FALSE(packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel));
+}
+
+TEST(RtpPacketTest, CreateWithMaxSizeHeaderExtension) {
+ const std::string kValue = "123456789abcdef";
+ RtpPacket::ExtensionManager extensions;
+ extensions.Register<RtpMid>(1);
+ extensions.Register<RtpStreamId>(2);
+
+ RtpPacket packet(&extensions);
+ EXPECT_TRUE(packet.SetExtension<RtpMid>(kValue));
+
+ packet.SetPayloadSize(42);
+ // Rewriting allocated extension is allowed.
+ EXPECT_TRUE(packet.SetExtension<RtpMid>(kValue));
+ // Adding another extension after payload is set is not allowed.
+ EXPECT_FALSE(packet.SetExtension<RtpStreamId>(kValue));
+
+ // Read packet with the extension.
+ RtpPacketReceived parsed(&extensions);
+ EXPECT_TRUE(parsed.Parse(packet.Buffer()));
+ std::string read;
+ EXPECT_TRUE(parsed.GetExtension<RtpMid>(&read));
+ EXPECT_EQ(read, kValue);
+}
+
+TEST(RtpPacketTest, SetsRegisteredExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ RtpPacketToSend packet(&extensions);
+
+ EXPECT_TRUE(packet.IsRegistered<TransmissionOffset>());
+ EXPECT_FALSE(packet.HasExtension<TransmissionOffset>());
+
+ // Try to set the extensions.
+ EXPECT_TRUE(packet.SetExtension<TransmissionOffset>(kTimeOffset));
+
+ EXPECT_TRUE(packet.HasExtension<TransmissionOffset>());
+ EXPECT_EQ(packet.GetExtension<TransmissionOffset>(), kTimeOffset);
+}
+
+TEST(RtpPacketTest, FailsToSetUnregisteredExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ RtpPacketToSend packet(&extensions);
+
+ EXPECT_FALSE(packet.IsRegistered<TransportSequenceNumber>());
+ EXPECT_FALSE(packet.HasExtension<TransportSequenceNumber>());
+
+ EXPECT_FALSE(packet.SetExtension<TransportSequenceNumber>(42));
+
+ EXPECT_FALSE(packet.HasExtension<TransportSequenceNumber>());
+ EXPECT_EQ(packet.GetExtension<TransportSequenceNumber>(), absl::nullopt);
+}
+
+TEST(RtpPacketTest, CreateWithDynamicSizedExtensionCsrcAudioLevel) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<CsrcAudioLevel>(kCsrcAudioLevelExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ CsrcAudioLevelList levels;
+ levels.numAudioLevels = kCsrcAudioLevelsSize;
+ for (uint8_t i = 0; i < kCsrcAudioLevelsSize; i++) {
+ levels.arrOfAudioLevels[i] = kCsrcAudioLevels[i];
+ }
+ packet.SetExtension<CsrcAudioLevel>(levels);
+ EXPECT_THAT(kPacketWithCsrcAudioLevels,
+ ElementsAreArray(packet.data(), packet.size()));
+}
+
+TEST(RtpPacketTest, SetReservedExtensionsAfterPayload) {
+ const size_t kPayloadSize = 4;
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketToSend packet(&extensions);
+
+ EXPECT_TRUE(packet.ReserveExtension<TransmissionOffset>());
+ packet.SetPayloadSize(kPayloadSize);
+ // Can't set extension after payload.
+ EXPECT_FALSE(packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel));
+ // Unless reserved.
+ EXPECT_TRUE(packet.SetExtension<TransmissionOffset>(kTimeOffset));
+}
+
+TEST(RtpPacketTest, CreatePurePadding) {
+ const size_t kPaddingSize = kMaxPaddingSize - 1;
+ RtpPacketToSend packet(nullptr, 12 + kPaddingSize);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+
+ EXPECT_LT(packet.size(), packet.capacity());
+ EXPECT_FALSE(packet.SetPadding(kPaddingSize + 1));
+ EXPECT_TRUE(packet.SetPadding(kPaddingSize));
+ EXPECT_EQ(packet.size(), packet.capacity());
+}
+
+TEST(RtpPacketTest, CreateUnalignedPadding) {
+ const size_t kPayloadSize = 3; // Make padding start at unaligned address.
+ RtpPacketToSend packet(nullptr, 12 + kPayloadSize + kMaxPaddingSize);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetPayloadSize(kPayloadSize);
+
+ EXPECT_LT(packet.size(), packet.capacity());
+ EXPECT_TRUE(packet.SetPadding(kMaxPaddingSize));
+ EXPECT_EQ(packet.size(), packet.capacity());
+}
+
+TEST(RtpPacketTest, WritesPaddingSizeToLastByte) {
+ const size_t kPaddingSize = 5;
+ RtpPacket packet;
+
+ EXPECT_TRUE(packet.SetPadding(kPaddingSize));
+ EXPECT_EQ(packet.data()[packet.size() - 1], kPaddingSize);
+}
+
+TEST(RtpPacketTest, UsesZerosForPadding) {
+ const size_t kPaddingSize = 5;
+ RtpPacket packet;
+
+ EXPECT_TRUE(packet.SetPadding(kPaddingSize));
+ EXPECT_THAT(rtc::MakeArrayView(packet.data() + 12, kPaddingSize - 1),
+ Each(0));
+}
+
+TEST(RtpPacketTest, CreateOneBytePadding) {
+ size_t kPayloadSize = 123;
+ RtpPacket packet(nullptr, 12 + kPayloadSize + 1);
+ packet.SetPayloadSize(kPayloadSize);
+
+ EXPECT_TRUE(packet.SetPadding(1));
+
+ EXPECT_EQ(packet.size(), 12 + kPayloadSize + 1);
+ EXPECT_EQ(packet.padding_size(), 1u);
+}
+
+TEST(RtpPacketTest, FailsToAddPaddingWithoutCapacity) {
+ size_t kPayloadSize = 123;
+ RtpPacket packet(nullptr, 12 + kPayloadSize);
+ packet.SetPayloadSize(kPayloadSize);
+
+ EXPECT_FALSE(packet.SetPadding(1));
+}
+
+TEST(RtpPacketTest, ParseMinimum) {
+ RtpPacketReceived packet;
+ EXPECT_TRUE(packet.Parse(kMinimumPacket, sizeof(kMinimumPacket)));
+ EXPECT_EQ(kPayloadType, packet.PayloadType());
+ EXPECT_EQ(kSeqNum, packet.SequenceNumber());
+ EXPECT_EQ(kTimestamp, packet.Timestamp());
+ EXPECT_EQ(kSsrc, packet.Ssrc());
+ EXPECT_EQ(0u, packet.padding_size());
+ EXPECT_EQ(0u, packet.payload_size());
+}
+
+TEST(RtpPacketTest, ParseBuffer) {
+ rtc::CopyOnWriteBuffer unparsed(kMinimumPacket);
+ const uint8_t* raw = unparsed.data();
+
+ RtpPacketReceived packet;
+ EXPECT_TRUE(packet.Parse(std::move(unparsed)));
+ EXPECT_EQ(raw, packet.data()); // Expect packet take the buffer without copy.
+ EXPECT_EQ(kSeqNum, packet.SequenceNumber());
+ EXPECT_EQ(kTimestamp, packet.Timestamp());
+ EXPECT_EQ(kSsrc, packet.Ssrc());
+ EXPECT_EQ(0u, packet.padding_size());
+ EXPECT_EQ(0u, packet.payload_size());
+}
+
+TEST(RtpPacketTest, ParseWithExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTO, sizeof(kPacketWithTO)));
+ EXPECT_EQ(kPayloadType, packet.PayloadType());
+ EXPECT_EQ(kSeqNum, packet.SequenceNumber());
+ EXPECT_EQ(kTimestamp, packet.Timestamp());
+ EXPECT_EQ(kSsrc, packet.Ssrc());
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+ EXPECT_EQ(0u, packet.payload_size());
+ EXPECT_EQ(0u, packet.padding_size());
+}
+
+TEST(RtpPacketTest, ParseHeaderOnly) {
+ // clang-format off
+ constexpr uint8_t kPaddingHeader[] = {
+ 0x80, 0x62, 0x35, 0x79,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78};
+ // clang-format on
+
+ RtpPacket packet;
+ EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader)));
+ EXPECT_EQ(packet.PayloadType(), 0x62u);
+ EXPECT_EQ(packet.SequenceNumber(), 0x3579u);
+ EXPECT_EQ(packet.Timestamp(), 0x65431278u);
+ EXPECT_EQ(packet.Ssrc(), 0x12345678u);
+
+ EXPECT_FALSE(packet.has_padding());
+ EXPECT_EQ(packet.padding_size(), 0u);
+ EXPECT_EQ(packet.payload_size(), 0u);
+}
+
+TEST(RtpPacketTest, ParseHeaderOnlyWithPadding) {
+ // clang-format off
+ constexpr uint8_t kPaddingHeader[] = {
+ 0xa0, 0x62, 0x35, 0x79,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78};
+ // clang-format on
+
+ RtpPacket packet;
+ EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader)));
+
+ EXPECT_TRUE(packet.has_padding());
+ EXPECT_EQ(packet.padding_size(), 0u);
+ EXPECT_EQ(packet.payload_size(), 0u);
+}
+
+TEST(RtpPacketTest, ParseHeaderOnlyWithExtensionAndPadding) {
+ // clang-format off
+ constexpr uint8_t kPaddingHeader[] = {
+ 0xb0, 0x62, 0x35, 0x79,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0xbe, 0xde, 0x00, 0x01,
+ 0x11, 0x00, 0x00, 0x00};
+ // clang-format on
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<TransmissionOffset>(1);
+ RtpPacket packet(&extensions);
+ EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader)));
+ EXPECT_TRUE(packet.has_padding());
+ EXPECT_TRUE(packet.HasExtension<TransmissionOffset>());
+ EXPECT_EQ(packet.padding_size(), 0u);
+}
+
+TEST(RtpPacketTest, ParsePaddingOnlyPacket) {
+ // clang-format off
+ constexpr uint8_t kPaddingHeader[] = {
+ 0xa0, 0x62, 0x35, 0x79,
+ 0x65, 0x43, 0x12, 0x78,
+ 0x12, 0x34, 0x56, 0x78,
+ 0, 0, 3};
+ // clang-format on
+
+ RtpPacket packet;
+ EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader)));
+ EXPECT_TRUE(packet.has_padding());
+ EXPECT_EQ(packet.padding_size(), 3u);
+}
+
+TEST(RtpPacketTest, GetExtensionWithoutParametersReturnsOptionalValue) {
+ RtpPacket::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<RtpStreamId>(kRtpStreamIdExtensionId);
+
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTO, sizeof(kPacketWithTO)));
+
+ auto time_offset = packet.GetExtension<TransmissionOffset>();
+ static_assert(
+ std::is_same<decltype(time_offset),
+ absl::optional<TransmissionOffset::value_type>>::value,
+ "");
+ EXPECT_EQ(time_offset, kTimeOffset);
+ EXPECT_FALSE(packet.GetExtension<RtpStreamId>().has_value());
+}
+
+TEST(RtpPacketTest, GetRawExtensionWhenPresent) {
+ constexpr uint8_t kRawPacket[] = {
+ // comment for clang-format to align kRawPacket nicer.
+ 0x90, 100, 0x5e, 0x04, //
+ 0x65, 0x43, 0x12, 0x78, // Timestamp.
+ 0x12, 0x34, 0x56, 0x78, // Ssrc
+ 0xbe, 0xde, 0x00, 0x01, // Extension header
+ 0x12, 'm', 'i', 'd', // 3-byte extension with id=1.
+ 'p', 'a', 'y', 'l', 'o', 'a', 'd'};
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<RtpMid>(1);
+ RtpPacket packet(&extensions);
+ ASSERT_TRUE(packet.Parse(kRawPacket, sizeof(kRawPacket)));
+ EXPECT_THAT(packet.GetRawExtension<RtpMid>(), ElementsAre('m', 'i', 'd'));
+}
+
+TEST(RtpPacketTest, GetRawExtensionWhenAbsent) {
+ constexpr uint8_t kRawPacket[] = {
+ // comment for clang-format to align kRawPacket nicer.
+ 0x90, 100, 0x5e, 0x04, //
+ 0x65, 0x43, 0x12, 0x78, // Timestamp.
+ 0x12, 0x34, 0x56, 0x78, // Ssrc
+ 0xbe, 0xde, 0x00, 0x01, // Extension header
+ 0x12, 'm', 'i', 'd', // 3-byte extension with id=1.
+ 'p', 'a', 'y', 'l', 'o', 'a', 'd'};
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<RtpMid>(2);
+ RtpPacket packet(&extensions);
+ ASSERT_TRUE(packet.Parse(kRawPacket, sizeof(kRawPacket)));
+ EXPECT_THAT(packet.GetRawExtension<RtpMid>(), IsEmpty());
+}
+
+TEST(RtpPacketTest, ParseWithInvalidSizedExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithInvalidExtension,
+ sizeof(kPacketWithInvalidExtension)));
+
+ // Extension should be ignored.
+ int32_t time_offset;
+ EXPECT_FALSE(packet.GetExtension<TransmissionOffset>(&time_offset));
+
+ // But shouldn't prevent reading payload.
+ EXPECT_THAT(packet.payload(), ElementsAreArray(kPayload));
+}
+
+TEST(RtpPacketTest, ParseWithOverSizedExtension) {
+ // clang-format off
+ const uint8_t bad_packet[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78, // kTimestamp.
+ 0x12, 0x34, 0x56, 0x78, // kSsrc.
+ 0xbe, 0xde, 0x00, 0x01, // Extension of size 1x32bit word.
+ 0x00, // Add a byte of padding.
+ 0x12, // Extension id 1 size (2+1).
+ 0xda, 0x1a // Only 2 bytes of extension payload.
+ };
+ // clang-format on
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(1);
+ RtpPacketReceived packet(&extensions);
+
+ // Parse should ignore bad extension and proceed.
+ EXPECT_TRUE(packet.Parse(bad_packet, sizeof(bad_packet)));
+ int32_t time_offset;
+ // But extracting extension should fail.
+ EXPECT_FALSE(packet.GetExtension<TransmissionOffset>(&time_offset));
+}
+
+TEST(RtpPacketTest, ParseWith2Extensions) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTOAndAL, sizeof(kPacketWithTOAndAL)));
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+ bool voice_active;
+ uint8_t audio_level;
+ EXPECT_TRUE(packet.GetExtension<AudioLevel>(&voice_active, &audio_level));
+ EXPECT_EQ(kVoiceActive, voice_active);
+ EXPECT_EQ(kAudioLevel, audio_level);
+}
+
+TEST(RtpPacketTest, ParseSecondPacketWithFewerExtensions) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTOAndAL, sizeof(kPacketWithTOAndAL)));
+ EXPECT_TRUE(packet.HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(packet.HasExtension<AudioLevel>());
+
+ // Second packet without audio level.
+ EXPECT_TRUE(packet.Parse(kPacketWithTO, sizeof(kPacketWithTO)));
+ EXPECT_TRUE(packet.HasExtension<TransmissionOffset>());
+ EXPECT_FALSE(packet.HasExtension<AudioLevel>());
+}
+
+TEST(RtpPacketTest, ParseWith2ExtensionsInvalidPadding) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTOAndALInvalidPadding,
+ sizeof(kPacketWithTOAndALInvalidPadding)));
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+ bool voice_active;
+ uint8_t audio_level;
+ EXPECT_FALSE(packet.GetExtension<AudioLevel>(&voice_active, &audio_level));
+}
+
+TEST(RtpPacketTest, ParseWith2ExtensionsReservedExtensionId) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTOAndALReservedExtensionId,
+ sizeof(kPacketWithTOAndALReservedExtensionId)));
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+ bool voice_active;
+ uint8_t audio_level;
+ EXPECT_FALSE(packet.GetExtension<AudioLevel>(&voice_active, &audio_level));
+}
+
+TEST(RtpPacketTest, ParseWithAllFeatures) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacket, sizeof(kPacket)));
+ EXPECT_EQ(kPayloadType, packet.PayloadType());
+ EXPECT_EQ(kSeqNum, packet.SequenceNumber());
+ EXPECT_EQ(kTimestamp, packet.Timestamp());
+ EXPECT_EQ(kSsrc, packet.Ssrc());
+ EXPECT_THAT(packet.Csrcs(), ElementsAreArray(kCsrcs));
+ EXPECT_THAT(packet.payload(), ElementsAreArray(kPayload));
+ EXPECT_EQ(kPacketPaddingSize, packet.padding_size());
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+}
+
+TEST(RtpPacketTest, ParseTwoByteHeaderExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTwoByteExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithTwoByteHeaderExtension,
+ sizeof(kPacketWithTwoByteHeaderExtension)));
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+}
+
+TEST(RtpPacketTest, ParseLongTwoByteHeaderExtension) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<RtpMid>(kTwoByteExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithLongTwoByteHeaderExtension,
+ sizeof(kPacketWithLongTwoByteHeaderExtension)));
+ std::string long_rtp_mid;
+ EXPECT_TRUE(packet.GetExtension<RtpMid>(&long_rtp_mid));
+ EXPECT_EQ(kLongMid, long_rtp_mid);
+}
+
+TEST(RtpPacketTest, ParseTwoByteHeaderExtensionWithPadding) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTwoByteExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(
+ packet.Parse(kPacketWithTwoByteHeaderExtensionWithPadding,
+ sizeof(kPacketWithTwoByteHeaderExtensionWithPadding)));
+ int32_t time_offset;
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+ bool voice_active;
+ uint8_t audio_level;
+ EXPECT_TRUE(packet.GetExtension<AudioLevel>(&voice_active, &audio_level));
+ EXPECT_EQ(kVoiceActive, voice_active);
+ EXPECT_EQ(kAudioLevel, audio_level);
+}
+
+TEST(RtpPacketTest, ParseWithExtensionDelayed) {
+ RtpPacketReceived packet;
+ EXPECT_TRUE(packet.Parse(kPacketWithTO, sizeof(kPacketWithTO)));
+ EXPECT_EQ(kPayloadType, packet.PayloadType());
+ EXPECT_EQ(kSeqNum, packet.SequenceNumber());
+ EXPECT_EQ(kTimestamp, packet.Timestamp());
+ EXPECT_EQ(kSsrc, packet.Ssrc());
+
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+
+ int32_t time_offset;
+ EXPECT_FALSE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ packet.IdentifyExtensions(extensions);
+ EXPECT_TRUE(packet.GetExtension<TransmissionOffset>(&time_offset));
+ EXPECT_EQ(kTimeOffset, time_offset);
+ EXPECT_EQ(0u, packet.payload_size());
+ EXPECT_EQ(0u, packet.padding_size());
+}
+
+TEST(RtpPacketTest, ParseDynamicSizeExtension) {
+ // clang-format off
+ const uint8_t kPacket1[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78, // Timestamp.
+ 0x12, 0x34, 0x56, 0x78, // Ssrc.
+ 0xbe, 0xde, 0x00, 0x02, // Extensions block of size 2x32bit words.
+ 0x21, 'H', 'D', // Extension with id = 2, size = (1+1).
+ 0x12, 'r', 't', 'x', // Extension with id = 1, size = (2+1).
+ 0x00}; // Extension padding.
+ const uint8_t kPacket2[] = {
+ 0x90, kPayloadType, kSeqNumFirstByte, kSeqNumSecondByte,
+ 0x65, 0x43, 0x12, 0x78, // Timestamp.
+ 0x12, 0x34, 0x56, 0x79, // Ssrc.
+ 0xbe, 0xde, 0x00, 0x01, // Extensions block of size 1x32bit words.
+ 0x11, 'H', 'D', // Extension with id = 1, size = (1+1).
+ 0x00}; // Extension padding.
+ // clang-format on
+ RtpPacketReceived::ExtensionManager extensions;
+ extensions.Register<RtpStreamId>(1);
+ extensions.Register<RepairedRtpStreamId>(2);
+ RtpPacketReceived packet(&extensions);
+ ASSERT_TRUE(packet.Parse(kPacket1, sizeof(kPacket1)));
+
+ std::string rsid;
+ EXPECT_TRUE(packet.GetExtension<RtpStreamId>(&rsid));
+ EXPECT_EQ(rsid, "rtx");
+
+ std::string repaired_rsid;
+ EXPECT_TRUE(packet.GetExtension<RepairedRtpStreamId>(&repaired_rsid));
+ EXPECT_EQ(repaired_rsid, "HD");
+
+ // Parse another packet with RtpStreamId extension of different size.
+ ASSERT_TRUE(packet.Parse(kPacket2, sizeof(kPacket2)));
+ EXPECT_TRUE(packet.GetExtension<RtpStreamId>(&rsid));
+ EXPECT_EQ(rsid, "HD");
+ EXPECT_FALSE(packet.GetExtension<RepairedRtpStreamId>(&repaired_rsid));
+}
+
+TEST(RtpPacketTest, ParseWithMid) {
+ RtpPacketReceived::ExtensionManager extensions;
+ extensions.Register<RtpMid>(kRtpMidExtensionId);
+ RtpPacketReceived packet(&extensions);
+ ASSERT_TRUE(packet.Parse(kPacketWithMid, sizeof(kPacketWithMid)));
+
+ std::string mid;
+ EXPECT_TRUE(packet.GetExtension<RtpMid>(&mid));
+ EXPECT_EQ(mid, kMid);
+}
+
+struct UncopyableValue {
+ UncopyableValue() = default;
+ UncopyableValue(const UncopyableValue&) = delete;
+ UncopyableValue& operator=(const UncopyableValue&) = delete;
+};
+struct UncopyableExtension {
+ static constexpr RTPExtensionType kId = kRtpExtensionGenericFrameDescriptor02;
+ static constexpr absl::string_view Uri() { return "uri"; }
+
+ static size_t ValueSize(const UncopyableValue& value) { return 1; }
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const UncopyableValue& value) {
+ return true;
+ }
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ UncopyableValue* value) {
+ return true;
+ }
+};
+constexpr RTPExtensionType UncopyableExtension::kId;
+
+TEST(RtpPacketTest, SetUncopyableExtension) {
+ RtpPacket::ExtensionManager extensions;
+ extensions.Register<UncopyableExtension>(1);
+ RtpPacket rtp_packet(&extensions);
+
+ UncopyableValue value;
+ EXPECT_TRUE(rtp_packet.SetExtension<UncopyableExtension>(value));
+}
+
+TEST(RtpPacketTest, GetUncopyableExtension) {
+ RtpPacket::ExtensionManager extensions;
+ extensions.Register<UncopyableExtension>(1);
+ RtpPacket rtp_packet(&extensions);
+ UncopyableValue value;
+ rtp_packet.SetExtension<UncopyableExtension>(value);
+
+ UncopyableValue value2;
+ EXPECT_TRUE(rtp_packet.GetExtension<UncopyableExtension>(&value2));
+}
+
+TEST(RtpPacketTest, CreateAndParseTimingFrameExtension) {
+ // Create a packet with video frame timing extension populated.
+ RtpPacketToSend::ExtensionManager send_extensions;
+ send_extensions.Register<VideoTimingExtension>(kVideoTimingExtensionId);
+ RtpPacketToSend send_packet(&send_extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ VideoSendTiming timing;
+ timing.encode_start_delta_ms = 1;
+ timing.encode_finish_delta_ms = 2;
+ timing.packetization_finish_delta_ms = 3;
+ timing.pacer_exit_delta_ms = 4;
+ timing.flags =
+ VideoSendTiming::kTriggeredByTimer | VideoSendTiming::kTriggeredBySize;
+
+ send_packet.SetExtension<VideoTimingExtension>(timing);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived::ExtensionManager extensions;
+ extensions.Register<VideoTimingExtension>(kVideoTimingExtensionId);
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ VideoSendTiming receivied_timing;
+ EXPECT_TRUE(
+ receive_packet.GetExtension<VideoTimingExtension>(&receivied_timing));
+
+ // Only check first and last timestamp (covered by other tests) plus flags.
+ EXPECT_EQ(receivied_timing.encode_start_delta_ms,
+ timing.encode_start_delta_ms);
+ EXPECT_EQ(receivied_timing.pacer_exit_delta_ms, timing.pacer_exit_delta_ms);
+ EXPECT_EQ(receivied_timing.flags, timing.flags);
+}
+
+TEST(RtpPacketTest, ParseLegacyTimingFrameExtension) {
+ // Parse the modified packet.
+ RtpPacketReceived::ExtensionManager extensions;
+ extensions.Register<VideoTimingExtension>(kVideoTimingExtensionId);
+ RtpPacketReceived packet(&extensions);
+ EXPECT_TRUE(packet.Parse(kPacketWithLegacyTimingExtension,
+ sizeof(kPacketWithLegacyTimingExtension)));
+ VideoSendTiming receivied_timing;
+ EXPECT_TRUE(packet.GetExtension<VideoTimingExtension>(&receivied_timing));
+
+ // Check first and last timestamp are still OK. Flags should now be 0.
+ EXPECT_EQ(receivied_timing.encode_start_delta_ms, 1);
+ EXPECT_EQ(receivied_timing.pacer_exit_delta_ms, 4);
+ EXPECT_EQ(receivied_timing.flags, 0);
+}
+
+TEST(RtpPacketTest, CreateAndParseColorSpaceExtension) {
+ TestCreateAndParseColorSpaceExtension(/*with_hdr_metadata=*/true);
+}
+
+TEST(RtpPacketTest, CreateAndParseColorSpaceExtensionWithoutHdrMetadata) {
+ TestCreateAndParseColorSpaceExtension(/*with_hdr_metadata=*/false);
+}
+
+TEST(RtpPacketTest, CreateAndParseAbsoluteCaptureTime) {
+ // Create a packet with absolute capture time extension populated.
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr int kExtensionId = 1;
+ extensions.Register<AbsoluteCaptureTimeExtension>(kExtensionId);
+ RtpPacketToSend send_packet(&extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ constexpr AbsoluteCaptureTime kAbsoluteCaptureTime{
+ /*absolute_capture_timestamp=*/9876543210123456789ULL,
+ /*estimated_capture_clock_offset=*/-1234567890987654321LL};
+ send_packet.SetExtension<AbsoluteCaptureTimeExtension>(kAbsoluteCaptureTime);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ AbsoluteCaptureTime received_absolute_capture_time;
+ EXPECT_TRUE(receive_packet.GetExtension<AbsoluteCaptureTimeExtension>(
+ &received_absolute_capture_time));
+ EXPECT_EQ(kAbsoluteCaptureTime.absolute_capture_timestamp,
+ received_absolute_capture_time.absolute_capture_timestamp);
+ EXPECT_EQ(kAbsoluteCaptureTime.estimated_capture_clock_offset,
+ received_absolute_capture_time.estimated_capture_clock_offset);
+}
+
+TEST(RtpPacketTest,
+ CreateAndParseAbsoluteCaptureTimeWithoutEstimatedCaptureClockOffset) {
+ // Create a packet with absolute capture time extension populated.
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr int kExtensionId = 1;
+ extensions.Register<AbsoluteCaptureTimeExtension>(kExtensionId);
+ RtpPacketToSend send_packet(&extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ constexpr AbsoluteCaptureTime kAbsoluteCaptureTime{
+ /*absolute_capture_timestamp=*/9876543210123456789ULL,
+ /*estimated_capture_clock_offset=*/absl::nullopt};
+ send_packet.SetExtension<AbsoluteCaptureTimeExtension>(kAbsoluteCaptureTime);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ AbsoluteCaptureTime received_absolute_capture_time;
+ EXPECT_TRUE(receive_packet.GetExtension<AbsoluteCaptureTimeExtension>(
+ &received_absolute_capture_time));
+ EXPECT_EQ(kAbsoluteCaptureTime.absolute_capture_timestamp,
+ received_absolute_capture_time.absolute_capture_timestamp);
+ EXPECT_EQ(kAbsoluteCaptureTime.estimated_capture_clock_offset,
+ received_absolute_capture_time.estimated_capture_clock_offset);
+}
+
+TEST(RtpPacketTest, CreateAndParseTransportSequenceNumber) {
+ // Create a packet with transport sequence number extension populated.
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr int kExtensionId = 1;
+ extensions.Register<TransportSequenceNumber>(kExtensionId);
+ RtpPacketToSend send_packet(&extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ constexpr int kTransportSequenceNumber = 12345;
+ send_packet.SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ uint16_t received_transport_sequeunce_number;
+ EXPECT_TRUE(receive_packet.GetExtension<TransportSequenceNumber>(
+ &received_transport_sequeunce_number));
+ EXPECT_EQ(received_transport_sequeunce_number, kTransportSequenceNumber);
+}
+
+TEST(RtpPacketTest, CreateAndParseTransportSequenceNumberV2) {
+ // Create a packet with transport sequence number V2 extension populated.
+ // No feedback request means that the extension will be two bytes unless it's
+ // pre-allocated.
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr int kExtensionId = 1;
+ extensions.Register<TransportSequenceNumberV2>(kExtensionId);
+ RtpPacketToSend send_packet(&extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ constexpr int kTransportSequenceNumber = 12345;
+ send_packet.SetExtension<TransportSequenceNumberV2>(kTransportSequenceNumber,
+ absl::nullopt);
+ EXPECT_EQ(send_packet.GetRawExtension<TransportSequenceNumberV2>().size(),
+ 2u);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ uint16_t received_transport_sequeunce_number;
+ absl::optional<FeedbackRequest> received_feedback_request;
+ EXPECT_TRUE(receive_packet.GetExtension<TransportSequenceNumberV2>(
+ &received_transport_sequeunce_number, &received_feedback_request));
+ EXPECT_EQ(received_transport_sequeunce_number, kTransportSequenceNumber);
+ EXPECT_FALSE(received_feedback_request);
+}
+
+TEST(RtpPacketTest, CreateAndParseTransportSequenceNumberV2Preallocated) {
+ // Create a packet with transport sequence number V2 extension populated.
+ // No feedback request means that the extension could be two bytes, but since
+ // it's pre-allocated we don't know if it is with or without feedback request
+ // therefore the size is four bytes.
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr int kExtensionId = 1;
+ extensions.Register<TransportSequenceNumberV2>(kExtensionId);
+ RtpPacketToSend send_packet(&extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ constexpr int kTransportSequenceNumber = 12345;
+ constexpr absl::optional<FeedbackRequest> kNoFeedbackRequest =
+ FeedbackRequest{/*include_timestamps=*/false, /*sequence_count=*/0};
+ send_packet.ReserveExtension<TransportSequenceNumberV2>();
+ send_packet.SetExtension<TransportSequenceNumberV2>(kTransportSequenceNumber,
+ kNoFeedbackRequest);
+ EXPECT_EQ(send_packet.GetRawExtension<TransportSequenceNumberV2>().size(),
+ 4u);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ uint16_t received_transport_sequeunce_number;
+ absl::optional<FeedbackRequest> received_feedback_request;
+ EXPECT_TRUE(receive_packet.GetExtension<TransportSequenceNumberV2>(
+ &received_transport_sequeunce_number, &received_feedback_request));
+ EXPECT_EQ(received_transport_sequeunce_number, kTransportSequenceNumber);
+ EXPECT_FALSE(received_feedback_request);
+}
+
+TEST(RtpPacketTest,
+ CreateAndParseTransportSequenceNumberV2WithFeedbackRequest) {
+ // Create a packet with TransportSequenceNumberV2 extension populated.
+ RtpPacketToSend::ExtensionManager extensions;
+ constexpr int kExtensionId = 1;
+ extensions.Register<TransportSequenceNumberV2>(kExtensionId);
+ RtpPacketToSend send_packet(&extensions);
+ send_packet.SetPayloadType(kPayloadType);
+ send_packet.SetSequenceNumber(kSeqNum);
+ send_packet.SetTimestamp(kTimestamp);
+ send_packet.SetSsrc(kSsrc);
+
+ constexpr int kTransportSequenceNumber = 12345;
+ constexpr absl::optional<FeedbackRequest> kFeedbackRequest =
+ FeedbackRequest{/*include_timestamps=*/true, /*sequence_count=*/3};
+ send_packet.SetExtension<TransportSequenceNumberV2>(kTransportSequenceNumber,
+ kFeedbackRequest);
+
+ // Serialize the packet and then parse it again.
+ RtpPacketReceived receive_packet(&extensions);
+ EXPECT_TRUE(receive_packet.Parse(send_packet.Buffer()));
+
+ // Parse transport sequence number and feedback request.
+ uint16_t received_transport_sequeunce_number;
+ absl::optional<FeedbackRequest> received_feedback_request;
+ EXPECT_TRUE(receive_packet.GetExtension<TransportSequenceNumberV2>(
+ &received_transport_sequeunce_number, &received_feedback_request));
+ EXPECT_EQ(received_transport_sequeunce_number, kTransportSequenceNumber);
+ ASSERT_TRUE(received_feedback_request);
+ EXPECT_EQ(received_feedback_request->include_timestamps,
+ kFeedbackRequest->include_timestamps);
+ EXPECT_EQ(received_feedback_request->sequence_count,
+ kFeedbackRequest->sequence_count);
+}
+
+TEST(RtpPacketTest, ReservedExtensionsCountedAsSetExtension) {
+ // Register two extensions.
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+
+ RtpPacketReceived packet(&extensions);
+
+ // Reserve slot for only one of them.
+ EXPECT_TRUE(packet.ReserveExtension<TransmissionOffset>());
+ // Non-registered extension cannot be reserved.
+ EXPECT_FALSE(packet.ReserveExtension<VideoContentTypeExtension>());
+
+ // Only the extension that is both registered and reserved matches
+ // IsExtensionReserved().
+ EXPECT_FALSE(packet.HasExtension<VideoContentTypeExtension>());
+ EXPECT_FALSE(packet.HasExtension<AudioLevel>());
+ EXPECT_TRUE(packet.HasExtension<TransmissionOffset>());
+}
+
+// Tests that RtpPacket::RemoveExtension can successfully remove extensions.
+TEST(RtpPacketTest, RemoveMultipleExtensions) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+ packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel);
+
+ EXPECT_THAT(kPacketWithTOAndAL,
+ ElementsAreArray(packet.data(), packet.size()));
+
+ // Remove one of two extensions.
+ EXPECT_TRUE(packet.RemoveExtension(kRtpExtensionAudioLevel));
+
+ EXPECT_THAT(kPacketWithTO, ElementsAreArray(packet.data(), packet.size()));
+
+ // Remove remaining extension.
+ EXPECT_TRUE(packet.RemoveExtension(kRtpExtensionTransmissionTimeOffset));
+
+ EXPECT_THAT(kMinimumPacket, ElementsAreArray(packet.data(), packet.size()));
+}
+
+// Tests that RtpPacket::RemoveExtension can successfully remove extension when
+// other extensions are present but not registered.
+TEST(RtpPacketTest, RemoveExtensionPreservesOtherUnregisteredExtensions) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+ packet.SetExtension<AudioLevel>(kVoiceActive, kAudioLevel);
+
+ EXPECT_THAT(kPacketWithTOAndAL,
+ ElementsAreArray(packet.data(), packet.size()));
+
+ // "Unregister" kRtpExtensionTransmissionTimeOffset.
+ RtpPacketToSend::ExtensionManager extensions1;
+ extensions1.Register<AudioLevel>(kAudioLevelExtensionId);
+ packet.IdentifyExtensions(extensions1);
+
+ // Make sure we can not delete extension which is set but not registered.
+ EXPECT_FALSE(packet.RemoveExtension(kRtpExtensionTransmissionTimeOffset));
+
+ // Remove registered extension.
+ EXPECT_TRUE(packet.RemoveExtension(kRtpExtensionAudioLevel));
+
+ EXPECT_THAT(kPacketWithTO, ElementsAreArray(packet.data(), packet.size()));
+}
+
+// Tests that RtpPacket::RemoveExtension fails if extension is not present or
+// not registered and does not modify packet.
+TEST(RtpPacketTest, RemoveExtensionFailure) {
+ RtpPacketToSend::ExtensionManager extensions;
+ extensions.Register<TransmissionOffset>(kTransmissionOffsetExtensionId);
+ extensions.Register<AudioLevel>(kAudioLevelExtensionId);
+ RtpPacketToSend packet(&extensions);
+ packet.SetPayloadType(kPayloadType);
+ packet.SetSequenceNumber(kSeqNum);
+ packet.SetTimestamp(kTimestamp);
+ packet.SetSsrc(kSsrc);
+ packet.SetExtension<TransmissionOffset>(kTimeOffset);
+
+ EXPECT_THAT(kPacketWithTO, ElementsAreArray(packet.data(), packet.size()));
+
+ // Try to remove extension, which was registered, but not set.
+ EXPECT_FALSE(packet.RemoveExtension(kRtpExtensionAudioLevel));
+
+ EXPECT_THAT(kPacketWithTO, ElementsAreArray(packet.data(), packet.size()));
+
+ // Try to remove extension, which was not registered.
+ EXPECT_FALSE(packet.RemoveExtension(kRtpExtensionPlayoutDelay));
+
+ EXPECT_THAT(kPacketWithTO, ElementsAreArray(packet.data(), packet.size()));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.cc
new file mode 100644
index 0000000000..9cca9837ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.cc
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/rtp_packetizer_av1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "api/array_view.h"
+#include "api/video/video_frame_type.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/byte_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+// TODO(danilchap): Some of the helpers/constants are same as in
+// rtp_depacketizer_av1. Move them to common av1 file.
+constexpr int kAggregationHeaderSize = 1;
+// when there are 3 or less OBU (fragments) in a packet, size of the last one
+// can be omited.
+constexpr int kMaxNumObusToOmitSize = 3;
+constexpr uint8_t kObuSizePresentBit = 0b0'0000'010;
+constexpr int kObuTypeSequenceHeader = 1;
+constexpr int kObuTypeTemporalDelimiter = 2;
+constexpr int kObuTypeTileList = 8;
+constexpr int kObuTypePadding = 15;
+
+bool ObuHasExtension(uint8_t obu_header) {
+ return obu_header & 0b0'0000'100;
+}
+
+bool ObuHasSize(uint8_t obu_header) {
+ return obu_header & kObuSizePresentBit;
+}
+
+int ObuType(uint8_t obu_header) {
+ return (obu_header & 0b0'1111'000) >> 3;
+}
+
+int Leb128Size(int value) {
+ RTC_DCHECK_GE(value, 0);
+ int size = 0;
+ while (value >= 0x80) {
+ ++size;
+ value >>= 7;
+ }
+ return size + 1;
+}
+
+// Returns number of bytes consumed.
+int WriteLeb128(uint32_t value, uint8_t* buffer) {
+ int size = 0;
+ while (value >= 0x80) {
+ buffer[size] = 0x80 | (value & 0x7F);
+ ++size;
+ value >>= 7;
+ }
+ buffer[size] = value;
+ ++size;
+ return size;
+}
+
+// Given `remaining_bytes` free bytes left in a packet, returns max size of an
+// OBU fragment that can fit into the packet.
+// i.e. MaxFragmentSize + Leb128Size(MaxFragmentSize) <= remaining_bytes.
+int MaxFragmentSize(int remaining_bytes) {
+ if (remaining_bytes <= 1) {
+ return 0;
+ }
+ for (int i = 1;; ++i) {
+ if (remaining_bytes < (1 << 7 * i) + i) {
+ return remaining_bytes - i;
+ }
+ }
+}
+
+} // namespace
+
+RtpPacketizerAv1::RtpPacketizerAv1(rtc::ArrayView<const uint8_t> payload,
+ RtpPacketizer::PayloadSizeLimits limits,
+ VideoFrameType frame_type,
+ bool is_last_frame_in_picture)
+ : frame_type_(frame_type),
+ obus_(ParseObus(payload)),
+ packets_(Packetize(obus_, limits)),
+ is_last_frame_in_picture_(is_last_frame_in_picture) {}
+
+std::vector<RtpPacketizerAv1::Obu> RtpPacketizerAv1::ParseObus(
+ rtc::ArrayView<const uint8_t> payload) {
+ std::vector<Obu> result;
+ rtc::ByteBufferReader payload_reader(
+ reinterpret_cast<const char*>(payload.data()), payload.size());
+ while (payload_reader.Length() > 0) {
+ Obu obu;
+ payload_reader.ReadUInt8(&obu.header);
+ obu.size = 1;
+ if (ObuHasExtension(obu.header)) {
+ if (payload_reader.Length() == 0) {
+ RTC_DLOG(LS_ERROR) << "Malformed AV1 input: expected extension_header, "
+ "no more bytes in the buffer. Offset: "
+ << (payload.size() - payload_reader.Length());
+ return {};
+ }
+ payload_reader.ReadUInt8(&obu.extension_header);
+ ++obu.size;
+ }
+ if (!ObuHasSize(obu.header)) {
+ obu.payload = rtc::MakeArrayView(
+ reinterpret_cast<const uint8_t*>(payload_reader.Data()),
+ payload_reader.Length());
+ payload_reader.Consume(payload_reader.Length());
+ } else {
+ uint64_t size = 0;
+ if (!payload_reader.ReadUVarint(&size) ||
+ size > payload_reader.Length()) {
+ RTC_DLOG(LS_ERROR) << "Malformed AV1 input: declared size " << size
+ << " is larger than remaining buffer size "
+ << payload_reader.Length();
+ return {};
+ }
+ obu.payload = rtc::MakeArrayView(
+ reinterpret_cast<const uint8_t*>(payload_reader.Data()), size);
+ payload_reader.Consume(size);
+ }
+ obu.size += obu.payload.size();
+ // Skip obus that shouldn't be transfered over rtp.
+ int obu_type = ObuType(obu.header);
+ if (obu_type != kObuTypeTemporalDelimiter && //
+ obu_type != kObuTypeTileList && //
+ obu_type != kObuTypePadding) {
+ result.push_back(obu);
+ }
+ }
+ return result;
+}
+
+int RtpPacketizerAv1::AdditionalBytesForPreviousObuElement(
+ const Packet& packet) {
+ if (packet.packet_size == 0) {
+ // Packet is still empty => no last OBU element, no need to reserve space
+ // for it.
+ return 0;
+ }
+ if (packet.num_obu_elements > kMaxNumObusToOmitSize) {
+ // There is so many obu elements in the packet, all of them must be
+ // prepended with the length field. That imply space for the length of the
+ // last obu element is already reserved.
+ return 0;
+ }
+ // No space was reserved for length field of the last OBU element, but that
+ // element becoming non-last, so it now requires explicit length field.
+ // Calculate how many bytes are needed to store the length in leb128 format.
+ return Leb128Size(packet.last_obu_size);
+}
+
+std::vector<RtpPacketizerAv1::Packet> RtpPacketizerAv1::Packetize(
+ rtc::ArrayView<const Obu> obus,
+ PayloadSizeLimits limits) {
+ std::vector<Packet> packets;
+ if (obus.empty()) {
+ return packets;
+ }
+ // Ignore certian edge cases where packets should be very small. They are
+ // inpractical but adds complexity to handle.
+ if (limits.max_payload_len - limits.last_packet_reduction_len < 3 ||
+ limits.max_payload_len - limits.first_packet_reduction_len < 3) {
+ RTC_DLOG(LS_ERROR) << "Failed to packetize AV1 frame: requested packet "
+ "size is unreasonable small.";
+ return packets;
+ }
+ // Aggregation header is present in all packets.
+ limits.max_payload_len -= kAggregationHeaderSize;
+
+ // Assemble packets. Push to current packet as much as it can hold before
+ // considering next one. That would normally cause uneven distribution across
+ // packets, specifically last one would be generally smaller.
+ packets.emplace_back(/*first_obu_index=*/0);
+ int packet_remaining_bytes =
+ limits.max_payload_len - limits.first_packet_reduction_len;
+ for (size_t obu_index = 0; obu_index < obus.size(); ++obu_index) {
+ const bool is_last_obu = obu_index == obus.size() - 1;
+ const Obu& obu = obus[obu_index];
+
+ // Putting `obu` into the last packet would make last obu element stored in
+ // that packet not last. All not last OBU elements must be prepend with the
+ // element length. AdditionalBytesForPreviousObuElement calculates how many
+ // bytes are needed to store that length.
+ int previous_obu_extra_size =
+ AdditionalBytesForPreviousObuElement(packets.back());
+ int min_required_size =
+ packets.back().num_obu_elements >= kMaxNumObusToOmitSize ? 2 : 1;
+ if (packet_remaining_bytes < previous_obu_extra_size + min_required_size) {
+ // Start a new packet.
+ packets.emplace_back(/*first_obu_index=*/obu_index);
+ packet_remaining_bytes = limits.max_payload_len;
+ previous_obu_extra_size = 0;
+ }
+ Packet& packet = packets.back();
+ // Start inserting current obu into the packet.
+ packet.packet_size += previous_obu_extra_size;
+ packet_remaining_bytes -= previous_obu_extra_size;
+ packet.num_obu_elements++;
+
+ bool must_write_obu_element_size =
+ packet.num_obu_elements > kMaxNumObusToOmitSize;
+ // Can fit all of the obu into the packet?
+ int required_bytes = obu.size;
+ if (must_write_obu_element_size) {
+ required_bytes += Leb128Size(obu.size);
+ }
+ int available_bytes = packet_remaining_bytes;
+ if (is_last_obu) {
+ // If this packet would be the last packet, available size is smaller.
+ if (packets.size() == 1) {
+ available_bytes += limits.first_packet_reduction_len;
+ available_bytes -= limits.single_packet_reduction_len;
+ } else {
+ available_bytes -= limits.last_packet_reduction_len;
+ }
+ }
+ if (required_bytes <= available_bytes) {
+ // Insert the obu into the packet unfragmented.
+ packet.last_obu_size = obu.size;
+ packet.packet_size += required_bytes;
+ packet_remaining_bytes -= required_bytes;
+ continue;
+ }
+
+ // Fragment the obu.
+ int max_first_fragment_size = must_write_obu_element_size
+ ? MaxFragmentSize(packet_remaining_bytes)
+ : packet_remaining_bytes;
+ // Because available_bytes might be different than
+ // packet_remaining_bytes it might happen that max_first_fragment_size >=
+ // obu.size. Also, since checks above verified `obu` should not be put
+ // completely into the `packet`, leave at least 1 byte for later packet.
+ int first_fragment_size = std::min(obu.size - 1, max_first_fragment_size);
+ if (first_fragment_size == 0) {
+ // Rather than writing 0-size element at the tail of the packet,
+ // 'uninsert' the `obu` from the `packet`.
+ packet.num_obu_elements--;
+ packet.packet_size -= previous_obu_extra_size;
+ } else {
+ packet.packet_size += first_fragment_size;
+ if (must_write_obu_element_size) {
+ packet.packet_size += Leb128Size(first_fragment_size);
+ }
+ packet.last_obu_size = first_fragment_size;
+ }
+
+ // Add middle fragments that occupy all of the packet.
+ // These are easy because
+ // - one obu per packet imply no need to store the size of the obu.
+ // - this packets are nor the first nor the last packets of the frame, so
+ // packet capacity is always limits.max_payload_len.
+ int obu_offset;
+ for (obu_offset = first_fragment_size;
+ obu_offset + limits.max_payload_len < obu.size;
+ obu_offset += limits.max_payload_len) {
+ packets.emplace_back(/*first_obu_index=*/obu_index);
+ Packet& packet = packets.back();
+ packet.num_obu_elements = 1;
+ packet.first_obu_offset = obu_offset;
+ int middle_fragment_size = limits.max_payload_len;
+ packet.last_obu_size = middle_fragment_size;
+ packet.packet_size = middle_fragment_size;
+ }
+
+ // Add the last fragment of the obu.
+ int last_fragment_size = obu.size - obu_offset;
+ // Check for corner case where last fragment of the last obu is too large
+ // to fit into last packet, but may fully fit into semi-last packet.
+ if (is_last_obu &&
+ last_fragment_size >
+ limits.max_payload_len - limits.last_packet_reduction_len) {
+ // Split last fragments into two.
+ RTC_DCHECK_GE(last_fragment_size, 2);
+ // Try to even packet sizes rather than payload sizes across the last
+ // two packets.
+ int semi_last_fragment_size =
+ (last_fragment_size + limits.last_packet_reduction_len) / 2;
+ // But leave at least one payload byte for the last packet to avoid
+ // weird scenarios where size of the fragment is zero and rtp payload has
+ // nothing except for an aggregation header.
+ if (semi_last_fragment_size >= last_fragment_size) {
+ semi_last_fragment_size = last_fragment_size - 1;
+ }
+ last_fragment_size -= semi_last_fragment_size;
+
+ packets.emplace_back(/*first_obu_index=*/obu_index);
+ Packet& packet = packets.back();
+ packet.num_obu_elements = 1;
+ packet.first_obu_offset = obu_offset;
+ packet.last_obu_size = semi_last_fragment_size;
+ packet.packet_size = semi_last_fragment_size;
+ obu_offset += semi_last_fragment_size;
+ }
+ packets.emplace_back(/*first_obu_index=*/obu_index);
+ Packet& last_packet = packets.back();
+ last_packet.num_obu_elements = 1;
+ last_packet.first_obu_offset = obu_offset;
+ last_packet.last_obu_size = last_fragment_size;
+ last_packet.packet_size = last_fragment_size;
+ packet_remaining_bytes = limits.max_payload_len - last_fragment_size;
+ }
+ return packets;
+}
+
+uint8_t RtpPacketizerAv1::AggregationHeader() const {
+ const Packet& packet = packets_[packet_index_];
+ uint8_t aggregation_header = 0;
+
+ // Set Z flag: first obu element is continuation of the previous OBU.
+ bool first_obu_element_is_fragment = packet.first_obu_offset > 0;
+ if (first_obu_element_is_fragment)
+ aggregation_header |= (1 << 7);
+
+ // Set Y flag: last obu element will be continuated in the next packet.
+ int last_obu_offset =
+ packet.num_obu_elements == 1 ? packet.first_obu_offset : 0;
+ bool last_obu_is_fragment =
+ last_obu_offset + packet.last_obu_size <
+ obus_[packet.first_obu + packet.num_obu_elements - 1].size;
+ if (last_obu_is_fragment)
+ aggregation_header |= (1 << 6);
+
+ // Set W field: number of obu elements in the packet (when not too large).
+ if (packet.num_obu_elements <= kMaxNumObusToOmitSize)
+ aggregation_header |= packet.num_obu_elements << 4;
+
+ // Set N flag: beginning of a new coded video sequence.
+ // Encoder may produce key frame without a sequence header, thus double check
+ // incoming frame includes the sequence header. Since Temporal delimiter is
+ // already filtered out, sequence header should be the first obu when present.
+ if (frame_type_ == VideoFrameType::kVideoFrameKey && packet_index_ == 0 &&
+ ObuType(obus_.front().header) == kObuTypeSequenceHeader) {
+ aggregation_header |= (1 << 3);
+ }
+ return aggregation_header;
+}
+
+bool RtpPacketizerAv1::NextPacket(RtpPacketToSend* packet) {
+ if (packet_index_ >= packets_.size()) {
+ return false;
+ }
+ const Packet& next_packet = packets_[packet_index_];
+
+ RTC_DCHECK_GT(next_packet.num_obu_elements, 0);
+ RTC_DCHECK_LT(next_packet.first_obu_offset,
+ obus_[next_packet.first_obu].size);
+ RTC_DCHECK_LE(
+ next_packet.last_obu_size,
+ obus_[next_packet.first_obu + next_packet.num_obu_elements - 1].size);
+
+ uint8_t* const rtp_payload =
+ packet->AllocatePayload(kAggregationHeaderSize + next_packet.packet_size);
+ uint8_t* write_at = rtp_payload;
+
+ *write_at++ = AggregationHeader();
+
+ int obu_offset = next_packet.first_obu_offset;
+ // Store all OBU elements except the last one.
+ for (int i = 0; i < next_packet.num_obu_elements - 1; ++i) {
+ const Obu& obu = obus_[next_packet.first_obu + i];
+ size_t fragment_size = obu.size - obu_offset;
+ write_at += WriteLeb128(fragment_size, write_at);
+ if (obu_offset == 0) {
+ *write_at++ = obu.header & ~kObuSizePresentBit;
+ }
+ if (obu_offset <= 1 && ObuHasExtension(obu.header)) {
+ *write_at++ = obu.extension_header;
+ }
+ int payload_offset =
+ std::max(0, obu_offset - (ObuHasExtension(obu.header) ? 2 : 1));
+ size_t payload_size = obu.payload.size() - payload_offset;
+ memcpy(write_at, obu.payload.data() + payload_offset, payload_size);
+ write_at += payload_size;
+ // All obus are stored from the beginning, except, may be, the first one.
+ obu_offset = 0;
+ }
+ // Store the last OBU element.
+ const Obu& last_obu =
+ obus_[next_packet.first_obu + next_packet.num_obu_elements - 1];
+ int fragment_size = next_packet.last_obu_size;
+ RTC_DCHECK_GT(fragment_size, 0);
+ if (next_packet.num_obu_elements > kMaxNumObusToOmitSize) {
+ write_at += WriteLeb128(fragment_size, write_at);
+ }
+ if (obu_offset == 0 && fragment_size > 0) {
+ *write_at++ = last_obu.header & ~kObuSizePresentBit;
+ --fragment_size;
+ }
+ if (obu_offset <= 1 && ObuHasExtension(last_obu.header) &&
+ fragment_size > 0) {
+ *write_at++ = last_obu.extension_header;
+ --fragment_size;
+ }
+ RTC_DCHECK_EQ(write_at - rtp_payload + fragment_size,
+ kAggregationHeaderSize + next_packet.packet_size);
+ int payload_offset =
+ std::max(0, obu_offset - (ObuHasExtension(last_obu.header) ? 2 : 1));
+ memcpy(write_at, last_obu.payload.data() + payload_offset, fragment_size);
+ write_at += fragment_size;
+
+ RTC_DCHECK_EQ(write_at - rtp_payload,
+ kAggregationHeaderSize + next_packet.packet_size);
+
+ ++packet_index_;
+ bool is_last_packet_in_frame = packet_index_ == packets_.size();
+ packet->SetMarker(is_last_packet_in_frame && is_last_frame_in_picture_);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.h
new file mode 100644
index 0000000000..520e746eac
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/video/video_frame_type.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+
+namespace webrtc {
+
+class RtpPacketizerAv1 : public RtpPacketizer {
+ public:
+ RtpPacketizerAv1(rtc::ArrayView<const uint8_t> payload,
+ PayloadSizeLimits limits,
+ VideoFrameType frame_type,
+ bool is_last_frame_in_picture);
+ ~RtpPacketizerAv1() override = default;
+
+ size_t NumPackets() const override { return packets_.size() - packet_index_; }
+ bool NextPacket(RtpPacketToSend* packet) override;
+
+ private:
+ struct Obu {
+ uint8_t header;
+ uint8_t extension_header; // undefined if (header & kXbit) == 0
+ rtc::ArrayView<const uint8_t> payload;
+ int size; // size of the header and payload combined.
+ };
+ struct Packet {
+ explicit Packet(int first_obu_index) : first_obu(first_obu_index) {}
+ // Indexes into obus_ vector of the first and last obus that should put into
+ // the packet.
+ int first_obu;
+ int num_obu_elements = 0;
+ int first_obu_offset = 0;
+ int last_obu_size;
+ // Total size consumed by the packet.
+ int packet_size = 0;
+ };
+
+ // Parses the payload into serie of OBUs.
+ static std::vector<Obu> ParseObus(rtc::ArrayView<const uint8_t> payload);
+ // Returns the number of additional bytes needed to store the previous OBU
+ // element if an additonal OBU element is added to the packet.
+ static int AdditionalBytesForPreviousObuElement(const Packet& packet);
+ static std::vector<Packet> Packetize(rtc::ArrayView<const Obu> obus,
+ PayloadSizeLimits limits);
+ uint8_t AggregationHeader() const;
+
+ const VideoFrameType frame_type_;
+ const std::vector<Obu> obus_;
+ const std::vector<Packet> packets_;
+ const bool is_last_frame_in_picture_;
+ size_t packet_index_ = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc
new file mode 100644
index 0000000000..3d62bcef44
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h"
+
+#include <stdint.h>
+
+#include <initializer_list>
+#include <vector>
+
+namespace webrtc {
+
+Av1Obu::Av1Obu(uint8_t obu_type) : header_(obu_type | kAv1ObuSizePresentBit) {}
+
+Av1Obu& Av1Obu::WithExtension(uint8_t extension) {
+ extension_ = extension;
+ header_ |= kAv1ObuExtensionPresentBit;
+ return *this;
+}
+Av1Obu& Av1Obu::WithoutSize() {
+ header_ &= ~kAv1ObuSizePresentBit;
+ return *this;
+}
+Av1Obu& Av1Obu::WithPayload(std::vector<uint8_t> payload) {
+ payload_ = std::move(payload);
+ return *this;
+}
+
+std::vector<uint8_t> BuildAv1Frame(std::initializer_list<Av1Obu> obus) {
+ std::vector<uint8_t> raw;
+ for (const Av1Obu& obu : obus) {
+ raw.push_back(obu.header_);
+ if (obu.header_ & kAv1ObuExtensionPresentBit) {
+ raw.push_back(obu.extension_);
+ }
+ if (obu.header_ & kAv1ObuSizePresentBit) {
+ // write size in leb128 format.
+ size_t payload_size = obu.payload_.size();
+ while (payload_size >= 0x80) {
+ raw.push_back(0x80 | (payload_size & 0x7F));
+ payload_size >>= 7;
+ }
+ raw.push_back(payload_size);
+ }
+ raw.insert(raw.end(), obu.payload_.begin(), obu.payload_.end());
+ }
+ return raw;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h
new file mode 100644
index 0000000000..04a902fe56
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_
+
+#include <stdint.h>
+
+#include <initializer_list>
+#include <utility>
+#include <vector>
+
+namespace webrtc {
+// All obu types offset by 3 to take correct position in the obu_header.
+constexpr uint8_t kAv1ObuTypeSequenceHeader = 1 << 3;
+constexpr uint8_t kAv1ObuTypeTemporalDelimiter = 2 << 3;
+constexpr uint8_t kAv1ObuTypeFrameHeader = 3 << 3;
+constexpr uint8_t kAv1ObuTypeTileGroup = 4 << 3;
+constexpr uint8_t kAv1ObuTypeMetadata = 5 << 3;
+constexpr uint8_t kAv1ObuTypeFrame = 6 << 3;
+constexpr uint8_t kAv1ObuTypeTileList = 8 << 3;
+constexpr uint8_t kAv1ObuExtensionPresentBit = 0b0'0000'100;
+constexpr uint8_t kAv1ObuSizePresentBit = 0b0'0000'010;
+constexpr uint8_t kAv1ObuExtensionS1T1 = 0b001'01'000;
+
+class Av1Obu {
+ public:
+ explicit Av1Obu(uint8_t obu_type);
+
+ Av1Obu& WithExtension(uint8_t extension);
+ Av1Obu& WithoutSize();
+ Av1Obu& WithPayload(std::vector<uint8_t> payload);
+
+ private:
+ friend std::vector<uint8_t> BuildAv1Frame(std::initializer_list<Av1Obu> obus);
+ uint8_t header_;
+ uint8_t extension_ = 0;
+ std::vector<uint8_t> payload_;
+};
+
+std::vector<uint8_t> BuildAv1Frame(std::initializer_list<Av1Obu> obus);
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc
new file mode 100644
index 0000000000..2151a59295
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_packetizer_av1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <initializer_list>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Le;
+using ::testing::SizeIs;
+
+constexpr uint8_t kNewCodedVideoSequenceBit = 0b00'00'1000;
+
+// Wrapper around rtp_packet to make it look like container of payload bytes.
+struct RtpPayload {
+ using value_type = rtc::ArrayView<const uint8_t>::value_type;
+ using const_iterator = rtc::ArrayView<const uint8_t>::const_iterator;
+
+ RtpPayload() : rtp_packet(/*extensions=*/nullptr) {}
+ RtpPayload& operator=(RtpPayload&&) = default;
+ RtpPayload(RtpPayload&&) = default;
+
+ const_iterator begin() const { return rtp_packet.payload().begin(); }
+ const_iterator end() const { return rtp_packet.payload().end(); }
+ const uint8_t* data() const { return rtp_packet.payload().data(); }
+ size_t size() const { return rtp_packet.payload().size(); }
+
+ uint8_t aggregation_header() const { return rtp_packet.payload()[0]; }
+
+ RtpPacketToSend rtp_packet;
+};
+
+// Wrapper around frame pointer to make it look like container of bytes with
+// nullptr frame look like empty container.
+class Av1Frame {
+ public:
+ using value_type = uint8_t;
+ using const_iterator = const uint8_t*;
+
+ explicit Av1Frame(rtc::scoped_refptr<EncodedImageBuffer> frame)
+ : frame_(std::move(frame)) {}
+
+ const_iterator begin() const { return frame_ ? frame_->data() : nullptr; }
+ const_iterator end() const {
+ return frame_ ? (frame_->data() + frame_->size()) : nullptr;
+ }
+
+ private:
+ rtc::scoped_refptr<EncodedImageBuffer> frame_;
+};
+
+std::vector<RtpPayload> Packetize(
+ rtc::ArrayView<const uint8_t> payload,
+ RtpPacketizer::PayloadSizeLimits limits,
+ VideoFrameType frame_type = VideoFrameType::kVideoFrameDelta,
+ bool is_last_frame_in_picture = true) {
+ // Run code under test.
+ RtpPacketizerAv1 packetizer(payload, limits, frame_type,
+ is_last_frame_in_picture);
+ // Convert result into structure that is easier to run expectation against.
+ std::vector<RtpPayload> result(packetizer.NumPackets());
+ for (RtpPayload& rtp_payload : result) {
+ EXPECT_TRUE(packetizer.NextPacket(&rtp_payload.rtp_packet));
+ }
+ return result;
+}
+
+Av1Frame ReassembleFrame(rtc::ArrayView<const RtpPayload> rtp_payloads) {
+ std::vector<rtc::ArrayView<const uint8_t>> payloads(rtp_payloads.size());
+ for (size_t i = 0; i < rtp_payloads.size(); ++i) {
+ payloads[i] = rtp_payloads[i];
+ }
+ return Av1Frame(VideoRtpDepacketizerAv1().AssembleFrame(payloads));
+}
+
+TEST(RtpPacketizerAv1Test, PacketizeOneObuWithoutSizeAndExtension) {
+ auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame)
+ .WithoutSize()
+ .WithPayload({1, 2, 3, 4, 5, 6, 7})});
+ EXPECT_THAT(Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(0b00'01'0000, // aggregation header
+ kAv1ObuTypeFrame, 1, 2, 3, 4, 5, 6, 7)));
+}
+
+TEST(RtpPacketizerAv1Test, PacketizeOneObuWithoutSizeWithExtension) {
+ auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame)
+ .WithoutSize()
+ .WithExtension(kAv1ObuExtensionS1T1)
+ .WithPayload({2, 3, 4, 5, 6, 7})});
+ EXPECT_THAT(
+ Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(0b00'01'0000, // aggregation header
+ kAv1ObuTypeFrame | kAv1ObuExtensionPresentBit,
+ kAv1ObuExtensionS1T1, 2, 3, 4, 5, 6, 7)));
+}
+
+TEST(RtpPacketizerAv1Test, RemovesObuSizeFieldWithoutExtension) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17})});
+ EXPECT_THAT(
+ Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(0b00'01'0000, // aggregation header
+ kAv1ObuTypeFrame, 11, 12, 13, 14, 15, 16, 17)));
+}
+
+TEST(RtpPacketizerAv1Test, RemovesObuSizeFieldWithExtension) {
+ auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame)
+ .WithExtension(kAv1ObuExtensionS1T1)
+ .WithPayload({1, 2, 3, 4, 5, 6, 7})});
+ EXPECT_THAT(
+ Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(0b00'01'0000, // aggregation header
+ kAv1ObuTypeFrame | kAv1ObuExtensionPresentBit,
+ kAv1ObuExtensionS1T1, 1, 2, 3, 4, 5, 6, 7)));
+}
+
+TEST(RtpPacketizerAv1Test, OmitsSizeForLastObuWhenThreeObusFitsIntoThePacket) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}),
+ Av1Obu(kAv1ObuTypeMetadata).WithPayload({11, 12, 13, 14}),
+ Av1Obu(kAv1ObuTypeFrame).WithPayload({21, 22, 23, 24, 25, 26})});
+ EXPECT_THAT(Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(
+ 0b00'11'0000, // aggregation header
+ 7, kAv1ObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, //
+ 5, kAv1ObuTypeMetadata, 11, 12, 13, 14, //
+ kAv1ObuTypeFrame, 21, 22, 23, 24, 25, 26)));
+}
+
+TEST(RtpPacketizerAv1Test, UseSizeForAllObusWhenFourObusFitsIntoThePacket) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}),
+ Av1Obu(kAv1ObuTypeMetadata).WithPayload({11, 12, 13, 14}),
+ Av1Obu(kAv1ObuTypeFrameHeader).WithPayload({21, 22, 23}),
+ Av1Obu(kAv1ObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})});
+ EXPECT_THAT(Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(
+ 0b00'00'0000, // aggregation header
+ 7, kAv1ObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, //
+ 5, kAv1ObuTypeMetadata, 11, 12, 13, 14, //
+ 4, kAv1ObuTypeFrameHeader, 21, 22, 23, //
+ 7, kAv1ObuTypeTileGroup, 31, 32, 33, 34, 35, 36)));
+}
+
+TEST(RtpPacketizerAv1Test, DiscardsTemporalDelimiterAndTileListObu) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeTemporalDelimiter), Av1Obu(kAv1ObuTypeMetadata),
+ Av1Obu(kAv1ObuTypeTileList).WithPayload({1, 2, 3, 4, 5, 6}),
+ Av1Obu(kAv1ObuTypeFrameHeader).WithPayload({21, 22, 23}),
+ Av1Obu(kAv1ObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})});
+
+ EXPECT_THAT(
+ Packetize(kFrame, {}),
+ ElementsAre(ElementsAre(0b00'11'0000, // aggregation header
+ 1,
+ kAv1ObuTypeMetadata, //
+ 4, kAv1ObuTypeFrameHeader, 21, 22,
+ 23, //
+ kAv1ObuTypeTileGroup, 31, 32, 33, 34, 35, 36)));
+}
+
+TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) {
+ // Craft expected payloads so that there is only one way to split original
+ // frame into two packets.
+ const uint8_t kExpectPayload1[6] = {
+ 0b01'10'0000, // aggregation_header
+ 3,
+ kAv1ObuTypeFrameHeader | kAv1ObuExtensionPresentBit,
+ kAv1ObuExtensionS1T1,
+ 21, //
+ kAv1ObuTypeTileGroup | kAv1ObuExtensionPresentBit};
+ const uint8_t kExpectPayload2[6] = {0b10'01'0000, // aggregation_header
+ kAv1ObuExtensionS1T1, 11, 12, 13, 14};
+ auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrameHeader)
+ .WithExtension(kAv1ObuExtensionS1T1)
+ .WithPayload({21}),
+ Av1Obu(kAv1ObuTypeTileGroup)
+ .WithExtension(kAv1ObuExtensionS1T1)
+ .WithPayload({11, 12, 13, 14})});
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ auto payloads = Packetize(kFrame, limits);
+ EXPECT_THAT(payloads, ElementsAre(ElementsAreArray(kExpectPayload1),
+ ElementsAreArray(kExpectPayload2)));
+}
+
+TEST(RtpPacketizerAv1Test,
+ SetsNbitAtTheFirstPacketOfAKeyFrameWithSequenceHeader) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})});
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameKey);
+ ASSERT_THAT(packets, SizeIs(2));
+ EXPECT_TRUE(packets[0].aggregation_header() & kNewCodedVideoSequenceBit);
+ EXPECT_FALSE(packets[1].aggregation_header() & kNewCodedVideoSequenceBit);
+}
+
+TEST(RtpPacketizerAv1Test,
+ DoesntSetNbitAtThePacketsOfAKeyFrameWithoutSequenceHeader) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7})});
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameKey);
+ ASSERT_THAT(packets, SizeIs(2));
+ EXPECT_FALSE(packets[0].aggregation_header() & kNewCodedVideoSequenceBit);
+ EXPECT_FALSE(packets[1].aggregation_header() & kNewCodedVideoSequenceBit);
+}
+
+TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfADeltaFrame) {
+ // Even when that delta frame starts with a (redundant) sequence header.
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})});
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 6;
+ auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta);
+ ASSERT_THAT(packets, SizeIs(2));
+ EXPECT_FALSE(packets[0].aggregation_header() & kNewCodedVideoSequenceBit);
+ EXPECT_FALSE(packets[1].aggregation_header() & kNewCodedVideoSequenceBit);
+}
+
+// There are multiple valid reasonable ways to split payload into multiple
+// packets, do not validate current choice, instead use RtpDepacketizer
+// to validate frame is reconstracted to the same one. Note: since
+// RtpDepacketizer always inserts obu_size fields in the output, use frame where
+// each obu has obu_size fields for more streight forward validation.
+TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPackets) {
+ auto kFrame =
+ BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame)
+ .WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})});
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 8;
+ auto payloads = Packetize(kFrame, limits);
+ EXPECT_THAT(payloads, ElementsAre(SizeIs(Le(8u)), SizeIs(Le(8u))));
+
+ // Use RtpDepacketizer to validate the split.
+ EXPECT_THAT(ReassembleFrame(payloads), ElementsAreArray(kFrame));
+}
+
+TEST(RtpPacketizerAv1Test, SplitSingleObuIntoManyPackets) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector<uint8_t>(1200, 27))});
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 100;
+ auto payloads = Packetize(kFrame, limits);
+ EXPECT_THAT(payloads, SizeIs(13u));
+ EXPECT_THAT(payloads, Each(SizeIs(Le(100u))));
+
+ // Use RtpDepacketizer to validate the split.
+ EXPECT_THAT(ReassembleFrame(payloads), ElementsAreArray(kFrame));
+}
+
+TEST(RtpPacketizerAv1Test, SetMarkerBitForLastPacketInEndOfPictureFrame) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector<uint8_t>(200, 27))});
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 100;
+ auto payloads = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta,
+ /*is_last_frame_in_picture=*/true);
+ ASSERT_THAT(payloads, SizeIs(3u));
+ EXPECT_FALSE(payloads[0].rtp_packet.Marker());
+ EXPECT_FALSE(payloads[1].rtp_packet.Marker());
+ EXPECT_TRUE(payloads[2].rtp_packet.Marker());
+}
+
+TEST(RtpPacketizerAv1Test, DoesntSetMarkerBitForPacketsNotInEndOfPictureFrame) {
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector<uint8_t>(200, 27))});
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 100;
+ auto payloads = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta,
+ /*is_last_frame_in_picture=*/false);
+ ASSERT_THAT(payloads, SizeIs(3u));
+ EXPECT_FALSE(payloads[0].rtp_packet.Marker());
+ EXPECT_FALSE(payloads[1].rtp_packet.Marker());
+ EXPECT_FALSE(payloads[2].rtp_packet.Marker());
+}
+
+TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPackets) {
+ // 2nd OBU is too large to fit into one packet, so its head would be in the
+ // same packet as the 1st OBU.
+ auto kFrame = BuildAv1Frame(
+ {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({11, 12}),
+ Av1Obu(kAv1ObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7, 8, 9})});
+
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 8;
+ auto payloads = Packetize(kFrame, limits);
+ EXPECT_THAT(payloads, ElementsAre(SizeIs(Le(8u)), SizeIs(Le(8u))));
+
+ // Use RtpDepacketizer to validate the split.
+ EXPECT_THAT(ReassembleFrame(payloads), ElementsAreArray(kFrame));
+}
+
+TEST(RtpPacketizerAv1Test,
+ SplitSingleObuIntoTwoPacketsBecauseOfSinglePacketLimit) {
+ auto kFrame =
+ BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame)
+ .WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})});
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = 10;
+ limits.single_packet_reduction_len = 8;
+ auto payloads = Packetize(kFrame, limits);
+ EXPECT_THAT(payloads, ElementsAre(SizeIs(Le(10u)), SizeIs(Le(10u))));
+
+ EXPECT_THAT(ReassembleFrame(payloads), ElementsAreArray(kFrame));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h
new file mode 100644
index 0000000000..83df0b184d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_
+
+#include "api/units/time_delta.h"
+
+// Configuration file for RTP utilities (RTPSender, RTPReceiver ...)
+namespace webrtc {
+constexpr int kDefaultMaxReorderingThreshold = 50; // In sequence numbers.
+constexpr int kRtcpMaxNackFields = 253;
+
+constexpr TimeDelta RTCP_INTERVAL_RAPID_SYNC_MS =
+ TimeDelta::Millis(100); // RFX 6051
+constexpr TimeDelta RTCP_SEND_BEFORE_KEY_FRAME = TimeDelta::Millis(100);
+constexpr int RTCP_MAX_REPORT_BLOCKS = 31; // RFC 3550 page 37
+
+enum { RTCP_NUMBER_OF_SR = 60 };
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
new file mode 100644
index 0000000000..c9797a693f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -0,0 +1,769 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "api/transport/field_trial_based_config.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/ntp_time.h"
+
+#ifdef _WIN32
+// Disable warning C4355: 'this' : used in base member initializer list.
+#pragma warning(disable : 4355)
+#endif
+
+namespace webrtc {
+namespace {
+const int64_t kRtpRtcpRttProcessTimeMs = 1000;
+const int64_t kRtpRtcpBitrateProcessTimeMs = 10;
+const int64_t kDefaultExpectedRetransmissionTimeMs = 125;
+} // namespace
+
+ModuleRtpRtcpImpl::RtpSenderContext::RtpSenderContext(
+ const RtpRtcpInterface::Configuration& config)
+ : packet_history(config.clock, config.enable_rtx_padding_prioritization),
+ sequencer_(config.local_media_ssrc,
+ config.rtx_send_ssrc,
+ /*require_marker_before_media_padding=*/!config.audio,
+ config.clock),
+ packet_sender(config, &packet_history),
+ non_paced_sender(&packet_sender, &sequencer_),
+ packet_generator(
+ config,
+ &packet_history,
+ config.paced_sender ? config.paced_sender : &non_paced_sender) {}
+
+std::unique_ptr<RtpRtcp> RtpRtcp::DEPRECATED_Create(
+ const Configuration& configuration) {
+ RTC_DCHECK(configuration.clock);
+ RTC_LOG(LS_ERROR)
+ << "*********** USING WebRTC INTERNAL IMPLEMENTATION DETAILS ***********";
+ return std::make_unique<ModuleRtpRtcpImpl>(configuration);
+}
+
+ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration)
+ : rtcp_sender_(
+ RTCPSender::Configuration::FromRtpRtcpConfiguration(configuration)),
+ rtcp_receiver_(configuration, this),
+ clock_(configuration.clock),
+ last_bitrate_process_time_(clock_->TimeInMilliseconds()),
+ last_rtt_process_time_(clock_->TimeInMilliseconds()),
+ packet_overhead_(28), // IPV4 UDP.
+ nack_last_time_sent_full_ms_(0),
+ nack_last_seq_number_sent_(0),
+ rtt_stats_(configuration.rtt_stats),
+ rtt_ms_(0) {
+ if (!configuration.receiver_only) {
+ rtp_sender_ = std::make_unique<RtpSenderContext>(configuration);
+ // Make sure rtcp sender use same timestamp offset as rtp sender.
+ rtcp_sender_.SetTimestampOffset(
+ rtp_sender_->packet_generator.TimestampOffset());
+ }
+
+ // Set default packet size limit.
+ // TODO(nisse): Kind-of duplicates
+ // webrtc::VideoSendStream::Config::Rtp::kDefaultMaxPacketSize.
+ const size_t kTcpOverIpv4HeaderSize = 40;
+ SetMaxRtpPacketSize(IP_PACKET_SIZE - kTcpOverIpv4HeaderSize);
+}
+
+ModuleRtpRtcpImpl::~ModuleRtpRtcpImpl() = default;
+
+// Process any pending tasks such as timeouts (non time critical events).
+void ModuleRtpRtcpImpl::Process() {
+ const int64_t now = clock_->TimeInMilliseconds();
+
+ if (rtp_sender_) {
+ if (now >= last_bitrate_process_time_ + kRtpRtcpBitrateProcessTimeMs) {
+ rtp_sender_->packet_sender.ProcessBitrateAndNotifyObservers();
+ last_bitrate_process_time_ = now;
+ }
+ }
+
+ // TODO(bugs.webrtc.org/11581): We update the RTT once a second, whereas other
+ // things that run in this method are updated much more frequently. Move the
+ // RTT checking over to the worker thread, which matches better with where the
+ // stats are maintained.
+ bool process_rtt = now >= last_rtt_process_time_ + kRtpRtcpRttProcessTimeMs;
+ if (rtcp_sender_.Sending()) {
+ // Process RTT if we have received a report block and we haven't
+ // processed RTT for at least `kRtpRtcpRttProcessTimeMs` milliseconds.
+ // Note that LastReceivedReportBlockMs() grabs a lock, so check
+ // `process_rtt` first.
+ if (process_rtt && rtt_stats_ != nullptr &&
+ rtcp_receiver_.LastReceivedReportBlockMs() > last_rtt_process_time_) {
+ int64_t max_rtt_ms = 0;
+ for (const auto& block : rtcp_receiver_.GetLatestReportBlockData()) {
+ if (block.last_rtt_ms() > max_rtt_ms) {
+ max_rtt_ms = block.last_rtt_ms();
+ }
+ }
+ // Report the rtt.
+ if (max_rtt_ms > 0) {
+ rtt_stats_->OnRttUpdate(max_rtt_ms);
+ }
+ }
+
+ // Verify receiver reports are delivered and the reported sequence number
+ // is increasing.
+ // TODO(bugs.webrtc.org/11581): The timeout value needs to be checked every
+ // few seconds (see internals of RtcpRrTimeout). Here, we may be polling it
+ // a couple of hundred times a second, which isn't great since it grabs a
+ // lock. Note also that LastReceivedReportBlockMs() (called above) and
+ // RtcpRrTimeout() both grab the same lock and check the same timer, so
+ // it should be possible to consolidate that work somehow.
+ if (rtcp_receiver_.RtcpRrTimeout()) {
+ RTC_LOG_F(LS_WARNING) << "Timeout: No RTCP RR received.";
+ } else if (rtcp_receiver_.RtcpRrSequenceNumberTimeout()) {
+ RTC_LOG_F(LS_WARNING) << "Timeout: No increase in RTCP RR extended "
+ "highest sequence number.";
+ }
+ } else {
+ // Report rtt from receiver.
+ if (process_rtt) {
+ int64_t rtt_ms;
+ if (rtt_stats_ && rtcp_receiver_.GetAndResetXrRrRtt(&rtt_ms)) {
+ rtt_stats_->OnRttUpdate(rtt_ms);
+ }
+ }
+ }
+
+ // Get processed rtt.
+ if (process_rtt) {
+ last_rtt_process_time_ = now;
+ if (rtt_stats_) {
+ // Make sure we have a valid RTT before setting.
+ int64_t last_rtt = rtt_stats_->LastProcessedRtt();
+ if (last_rtt >= 0)
+ set_rtt_ms(last_rtt);
+ }
+ }
+
+ if (rtcp_sender_.TimeToSendRTCPReport())
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
+
+ if (rtcp_sender_.TMMBR() && rtcp_receiver_.UpdateTmmbrTimers()) {
+ rtcp_receiver_.NotifyTmmbrUpdated();
+ }
+}
+
+void ModuleRtpRtcpImpl::SetRtxSendStatus(int mode) {
+ rtp_sender_->packet_generator.SetRtxStatus(mode);
+}
+
+int ModuleRtpRtcpImpl::RtxSendStatus() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.RtxStatus() : kRtxOff;
+}
+
+void ModuleRtpRtcpImpl::SetRtxSendPayloadType(int payload_type,
+ int associated_payload_type) {
+ rtp_sender_->packet_generator.SetRtxPayloadType(payload_type,
+ associated_payload_type);
+}
+
+absl::optional<uint32_t> ModuleRtpRtcpImpl::RtxSsrc() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.RtxSsrc() : absl::nullopt;
+}
+
+absl::optional<uint32_t> ModuleRtpRtcpImpl::FlexfecSsrc() const {
+ if (rtp_sender_) {
+ return rtp_sender_->packet_generator.FlexfecSsrc();
+ }
+ return absl::nullopt;
+}
+
+void ModuleRtpRtcpImpl::IncomingRtcpPacket(const uint8_t* rtcp_packet,
+ const size_t length) {
+ rtcp_receiver_.IncomingPacket(rtcp_packet, length);
+}
+
+void ModuleRtpRtcpImpl::RegisterSendPayloadFrequency(int payload_type,
+ int payload_frequency) {
+ rtcp_sender_.SetRtpClockRate(payload_type, payload_frequency);
+}
+
+int32_t ModuleRtpRtcpImpl::DeRegisterSendPayload(const int8_t payload_type) {
+ return 0;
+}
+
+uint32_t ModuleRtpRtcpImpl::StartTimestamp() const {
+ return rtp_sender_->packet_generator.TimestampOffset();
+}
+
+// Configure start timestamp, default is a random number.
+void ModuleRtpRtcpImpl::SetStartTimestamp(const uint32_t timestamp) {
+ rtcp_sender_.SetTimestampOffset(timestamp);
+ rtp_sender_->packet_generator.SetTimestampOffset(timestamp);
+ rtp_sender_->packet_sender.SetTimestampOffset(timestamp);
+}
+
+uint16_t ModuleRtpRtcpImpl::SequenceNumber() const {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ return rtp_sender_->sequencer_.media_sequence_number();
+}
+
+// Set SequenceNumber, default is a random number.
+void ModuleRtpRtcpImpl::SetSequenceNumber(const uint16_t seq_num) {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ rtp_sender_->sequencer_.set_media_sequence_number(seq_num);
+}
+
+void ModuleRtpRtcpImpl::SetRtpState(const RtpState& rtp_state) {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ rtp_sender_->packet_generator.SetRtpState(rtp_state);
+ rtp_sender_->sequencer_.SetRtpState(rtp_state);
+ rtcp_sender_.SetTimestampOffset(rtp_state.start_timestamp);
+}
+
+void ModuleRtpRtcpImpl::SetRtxState(const RtpState& rtp_state) {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ rtp_sender_->packet_generator.SetRtxRtpState(rtp_state);
+ rtp_sender_->sequencer_.set_rtx_sequence_number(rtp_state.sequence_number);
+}
+
+RtpState ModuleRtpRtcpImpl::GetRtpState() const {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ RtpState state = rtp_sender_->packet_generator.GetRtpState();
+ rtp_sender_->sequencer_.PopulateRtpState(state);
+ return state;
+}
+
+RtpState ModuleRtpRtcpImpl::GetRtxState() const {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ RtpState state = rtp_sender_->packet_generator.GetRtxRtpState();
+ state.sequence_number = rtp_sender_->sequencer_.rtx_sequence_number();
+ return state;
+}
+
+void ModuleRtpRtcpImpl::SetMid(absl::string_view mid) {
+ if (rtp_sender_) {
+ rtp_sender_->packet_generator.SetMid(mid);
+ }
+ // TODO(bugs.webrtc.org/4050): If we end up supporting the MID SDES item for
+ // RTCP, this will need to be passed down to the RTCPSender also.
+}
+
+void ModuleRtpRtcpImpl::SetCsrcs(const std::vector<uint32_t>& csrcs) {
+ rtcp_sender_.SetCsrcs(csrcs);
+ rtp_sender_->packet_generator.SetCsrcs(csrcs);
+}
+
+// TODO(pbos): Handle media and RTX streams separately (separate RTCP
+// feedbacks).
+RTCPSender::FeedbackState ModuleRtpRtcpImpl::GetFeedbackState() {
+ RTCPSender::FeedbackState state;
+ // This is called also when receiver_only is true. Hence below
+ // checks that rtp_sender_ exists.
+ if (rtp_sender_) {
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_->packet_sender.GetDataCounters(&rtp_stats, &rtx_stats);
+ state.packets_sent =
+ rtp_stats.transmitted.packets + rtx_stats.transmitted.packets;
+ state.media_bytes_sent = rtp_stats.transmitted.payload_bytes +
+ rtx_stats.transmitted.payload_bytes;
+ state.send_bitrate =
+ rtp_sender_->packet_sender.GetSendRates().Sum().bps<uint32_t>();
+ }
+ state.receiver = &rtcp_receiver_;
+
+ uint32_t received_ntp_secs = 0;
+ uint32_t received_ntp_frac = 0;
+ state.remote_sr = 0;
+ if (rtcp_receiver_.NTP(&received_ntp_secs, &received_ntp_frac,
+ /*rtcp_arrival_time_secs=*/&state.last_rr_ntp_secs,
+ /*rtcp_arrival_time_frac=*/&state.last_rr_ntp_frac,
+ /*rtcp_timestamp=*/nullptr,
+ /*remote_sender_packet_count=*/nullptr,
+ /*remote_sender_octet_count=*/nullptr,
+ /*remote_sender_reports_count=*/nullptr)) {
+ state.remote_sr = ((received_ntp_secs & 0x0000ffff) << 16) +
+ ((received_ntp_frac & 0xffff0000) >> 16);
+ }
+
+ state.last_xr_rtis = rtcp_receiver_.ConsumeReceivedXrReferenceTimeInfo();
+
+ return state;
+}
+
+int32_t ModuleRtpRtcpImpl::SetSendingStatus(const bool sending) {
+ if (rtcp_sender_.Sending() != sending) {
+ // Sends RTCP BYE when going from true to false
+ rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending);
+ }
+ return 0;
+}
+
+bool ModuleRtpRtcpImpl::Sending() const {
+ return rtcp_sender_.Sending();
+}
+
+void ModuleRtpRtcpImpl::SetSendingMediaStatus(const bool sending) {
+ rtp_sender_->packet_generator.SetSendingMediaStatus(sending);
+}
+
+bool ModuleRtpRtcpImpl::SendingMedia() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.SendingMedia() : false;
+}
+
+bool ModuleRtpRtcpImpl::IsAudioConfigured() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.IsAudioConfigured()
+ : false;
+}
+
+void ModuleRtpRtcpImpl::SetAsPartOfAllocation(bool part_of_allocation) {
+ RTC_CHECK(rtp_sender_);
+ rtp_sender_->packet_sender.ForceIncludeSendPacketsInAllocation(
+ part_of_allocation);
+}
+
+bool ModuleRtpRtcpImpl::OnSendingRtpFrame(uint32_t timestamp,
+ int64_t capture_time_ms,
+ int payload_type,
+ bool force_sender_report) {
+ if (!Sending())
+ return false;
+
+ // TODO(bugs.webrtc.org/12873): Migrate this method and it's users to use
+ // optional Timestamps.
+ absl::optional<Timestamp> capture_time;
+ if (capture_time_ms > 0) {
+ capture_time = Timestamp::Millis(capture_time_ms);
+ }
+ absl::optional<int> payload_type_optional;
+ if (payload_type >= 0)
+ payload_type_optional = payload_type;
+ rtcp_sender_.SetLastRtpTime(timestamp, capture_time, payload_type_optional);
+ // Make sure an RTCP report isn't queued behind a key frame.
+ if (rtcp_sender_.TimeToSendRTCPReport(force_sender_report))
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
+
+ return true;
+}
+
+bool ModuleRtpRtcpImpl::TrySendPacket(RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) {
+ RTC_DCHECK(rtp_sender_);
+ // TODO(sprang): Consider if we can remove this check.
+ if (!rtp_sender_->packet_generator.SendingMedia()) {
+ return false;
+ }
+ {
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ if (packet->packet_type() == RtpPacketMediaType::kPadding &&
+ packet->Ssrc() == rtp_sender_->packet_generator.SSRC() &&
+ !rtp_sender_->sequencer_.CanSendPaddingOnMediaSsrc()) {
+ // New media packet preempted this generated padding packet, discard it.
+ return false;
+ }
+ bool is_flexfec =
+ packet->packet_type() == RtpPacketMediaType::kForwardErrorCorrection &&
+ packet->Ssrc() == rtp_sender_->packet_generator.FlexfecSsrc();
+ if (!is_flexfec) {
+ rtp_sender_->sequencer_.Sequence(*packet);
+ }
+ }
+ rtp_sender_->packet_sender.SendPacket(packet, pacing_info);
+ return true;
+}
+
+void ModuleRtpRtcpImpl::SetFecProtectionParams(const FecProtectionParams&,
+ const FecProtectionParams&) {
+ // Deferred FEC not supported in deprecated RTP module.
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>>
+ModuleRtpRtcpImpl::FetchFecPackets() {
+ // Deferred FEC not supported in deprecated RTP module.
+ return {};
+}
+
+void ModuleRtpRtcpImpl::OnPacketsAcknowledged(
+ rtc::ArrayView<const uint16_t> sequence_numbers) {
+ RTC_DCHECK(rtp_sender_);
+ rtp_sender_->packet_history.CullAcknowledgedPackets(sequence_numbers);
+}
+
+bool ModuleRtpRtcpImpl::SupportsPadding() const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_generator.SupportsPadding();
+}
+
+bool ModuleRtpRtcpImpl::SupportsRtxPayloadPadding() const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_generator.SupportsRtxPayloadPadding();
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>>
+ModuleRtpRtcpImpl::GeneratePadding(size_t target_size_bytes) {
+ RTC_DCHECK(rtp_sender_);
+ MutexLock lock(&rtp_sender_->sequencer_mutex);
+ return rtp_sender_->packet_generator.GeneratePadding(
+ target_size_bytes, rtp_sender_->packet_sender.MediaHasBeenSent(),
+ rtp_sender_->sequencer_.CanSendPaddingOnMediaSsrc());
+}
+
+std::vector<RtpSequenceNumberMap::Info>
+ModuleRtpRtcpImpl::GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_sender.GetSentRtpPacketInfos(sequence_numbers);
+}
+
+size_t ModuleRtpRtcpImpl::ExpectedPerPacketOverhead() const {
+ if (!rtp_sender_) {
+ return 0;
+ }
+ return rtp_sender_->packet_generator.ExpectedPerPacketOverhead();
+}
+
+void ModuleRtpRtcpImpl::OnPacketSendingThreadSwitched() {}
+
+size_t ModuleRtpRtcpImpl::MaxRtpPacketSize() const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_generator.MaxRtpPacketSize();
+}
+
+void ModuleRtpRtcpImpl::SetMaxRtpPacketSize(size_t rtp_packet_size) {
+ RTC_DCHECK_LE(rtp_packet_size, IP_PACKET_SIZE)
+ << "rtp packet size too large: " << rtp_packet_size;
+ RTC_DCHECK_GT(rtp_packet_size, packet_overhead_)
+ << "rtp packet size too small: " << rtp_packet_size;
+
+ rtcp_sender_.SetMaxRtpPacketSize(rtp_packet_size);
+ if (rtp_sender_) {
+ rtp_sender_->packet_generator.SetMaxRtpPacketSize(rtp_packet_size);
+ }
+}
+
+RtcpMode ModuleRtpRtcpImpl::RTCP() const {
+ return rtcp_sender_.Status();
+}
+
+// Configure RTCP status i.e on/off.
+void ModuleRtpRtcpImpl::SetRTCPStatus(const RtcpMode method) {
+ rtcp_sender_.SetRTCPStatus(method);
+}
+
+int32_t ModuleRtpRtcpImpl::SetCNAME(absl::string_view c_name) {
+ return rtcp_sender_.SetCNAME(c_name);
+}
+
+int32_t ModuleRtpRtcpImpl::RemoteNTP(uint32_t* received_ntpsecs,
+ uint32_t* received_ntpfrac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp) const {
+ return rtcp_receiver_.NTP(received_ntpsecs, received_ntpfrac,
+ rtcp_arrival_time_secs, rtcp_arrival_time_frac,
+ rtcp_timestamp,
+ /*remote_sender_packet_count=*/nullptr,
+ /*remote_sender_octet_count=*/nullptr,
+ /*remote_sender_reports_count=*/nullptr)
+ ? 0
+ : -1;
+}
+
+// Get RoundTripTime.
+int32_t ModuleRtpRtcpImpl::RTT(const uint32_t remote_ssrc,
+ int64_t* rtt,
+ int64_t* avg_rtt,
+ int64_t* min_rtt,
+ int64_t* max_rtt) const {
+ int32_t ret = rtcp_receiver_.RTT(remote_ssrc, rtt, avg_rtt, min_rtt, max_rtt);
+ if (rtt && *rtt == 0) {
+ // Try to get RTT from RtcpRttStats class.
+ *rtt = rtt_ms();
+ }
+ return ret;
+}
+
+int64_t ModuleRtpRtcpImpl::ExpectedRetransmissionTimeMs() const {
+ int64_t expected_retransmission_time_ms = rtt_ms();
+ if (expected_retransmission_time_ms > 0) {
+ return expected_retransmission_time_ms;
+ }
+ // No rtt available (`kRtpRtcpRttProcessTimeMs` not yet passed?), so try to
+ // poll avg_rtt_ms directly from rtcp receiver.
+ if (rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), nullptr,
+ &expected_retransmission_time_ms, nullptr,
+ nullptr) == 0) {
+ return expected_retransmission_time_ms;
+ }
+ return kDefaultExpectedRetransmissionTimeMs;
+}
+
+// Force a send of an RTCP packet.
+// Normal SR and RR are triggered via the process function.
+int32_t ModuleRtpRtcpImpl::SendRTCP(RTCPPacketType packet_type) {
+ return rtcp_sender_.SendRTCP(GetFeedbackState(), packet_type);
+}
+
+void ModuleRtpRtcpImpl::GetSendStreamDataCounters(
+ StreamDataCounters* rtp_counters,
+ StreamDataCounters* rtx_counters) const {
+ rtp_sender_->packet_sender.GetDataCounters(rtp_counters, rtx_counters);
+}
+
+// Received RTCP report.
+void ModuleRtpRtcpImpl::RemoteRTCPSenderInfo(
+ uint32_t* packet_count, uint32_t* octet_count, int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const {
+ return rtcp_receiver_.RemoteRTCPSenderInfo(
+ packet_count, octet_count, ntp_timestamp_ms, remote_ntp_timestamp_ms);
+}
+
+std::vector<ReportBlockData> ModuleRtpRtcpImpl::GetLatestReportBlockData()
+ const {
+ return rtcp_receiver_.GetLatestReportBlockData();
+}
+
+absl::optional<RtpRtcpInterface::SenderReportStats>
+ModuleRtpRtcpImpl::GetSenderReportStats() const {
+ SenderReportStats stats;
+ uint32_t remote_timestamp_secs;
+ uint32_t remote_timestamp_frac;
+ uint32_t arrival_timestamp_secs;
+ uint32_t arrival_timestamp_frac;
+ if (rtcp_receiver_.NTP(&remote_timestamp_secs, &remote_timestamp_frac,
+ &arrival_timestamp_secs, &arrival_timestamp_frac,
+ /*rtcp_timestamp=*/nullptr, &stats.packets_sent,
+ &stats.bytes_sent, &stats.reports_count)) {
+ stats.last_remote_timestamp.Set(remote_timestamp_secs,
+ remote_timestamp_frac);
+ stats.last_arrival_timestamp.Set(arrival_timestamp_secs,
+ arrival_timestamp_frac);
+ return stats;
+ }
+ return absl::nullopt;
+}
+
+absl::optional<RtpRtcpInterface::NonSenderRttStats>
+ModuleRtpRtcpImpl::GetNonSenderRttStats() const {
+ // This is not implemented for this legacy class.
+ return absl::nullopt;
+}
+
+// (REMB) Receiver Estimated Max Bitrate.
+void ModuleRtpRtcpImpl::SetRemb(int64_t bitrate_bps,
+ std::vector<uint32_t> ssrcs) {
+ rtcp_sender_.SetRemb(bitrate_bps, std::move(ssrcs));
+}
+
+void ModuleRtpRtcpImpl::UnsetRemb() {
+ rtcp_sender_.UnsetRemb();
+}
+
+void ModuleRtpRtcpImpl::SetExtmapAllowMixed(bool extmap_allow_mixed) {
+ rtp_sender_->packet_generator.SetExtmapAllowMixed(extmap_allow_mixed);
+}
+
+void ModuleRtpRtcpImpl::RegisterRtpHeaderExtension(absl::string_view uri,
+ int id) {
+ bool registered =
+ rtp_sender_->packet_generator.RegisterRtpHeaderExtension(uri, id);
+ RTC_CHECK(registered);
+}
+
+void ModuleRtpRtcpImpl::DeregisterSendRtpHeaderExtension(
+ absl::string_view uri) {
+ rtp_sender_->packet_generator.DeregisterRtpHeaderExtension(uri);
+}
+
+void ModuleRtpRtcpImpl::SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set) {
+ rtcp_sender_.SetTmmbn(std::move(bounding_set));
+}
+
+// Send a Negative acknowledgment packet.
+int32_t ModuleRtpRtcpImpl::SendNACK(const uint16_t* nack_list,
+ const uint16_t size) {
+ uint16_t nack_length = size;
+ uint16_t start_id = 0;
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (TimeToSendFullNackList(now_ms)) {
+ nack_last_time_sent_full_ms_ = now_ms;
+ } else {
+ // Only send extended list.
+ if (nack_last_seq_number_sent_ == nack_list[size - 1]) {
+ // Last sequence number is the same, do not send list.
+ return 0;
+ }
+ // Send new sequence numbers.
+ for (int i = 0; i < size; ++i) {
+ if (nack_last_seq_number_sent_ == nack_list[i]) {
+ start_id = i + 1;
+ break;
+ }
+ }
+ nack_length = size - start_id;
+ }
+
+ // Our RTCP NACK implementation is limited to kRtcpMaxNackFields sequence
+ // numbers per RTCP packet.
+ if (nack_length > kRtcpMaxNackFields) {
+ nack_length = kRtcpMaxNackFields;
+ }
+ nack_last_seq_number_sent_ = nack_list[start_id + nack_length - 1];
+
+ return rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpNack, nack_length,
+ &nack_list[start_id]);
+}
+
+void ModuleRtpRtcpImpl::SendNack(
+ const std::vector<uint16_t>& sequence_numbers) {
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpNack, sequence_numbers.size(),
+ sequence_numbers.data());
+}
+
+bool ModuleRtpRtcpImpl::TimeToSendFullNackList(int64_t now) const {
+ // Use RTT from RtcpRttStats class if provided.
+ int64_t rtt = rtt_ms();
+ if (rtt == 0) {
+ rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &rtt, NULL, NULL);
+ }
+
+ const int64_t kStartUpRttMs = 100;
+ int64_t wait_time = 5 + ((rtt * 3) >> 1); // 5 + RTT * 1.5.
+ if (rtt == 0) {
+ wait_time = kStartUpRttMs;
+ }
+
+ // Send a full NACK list once within every `wait_time`.
+ return now - nack_last_time_sent_full_ms_ > wait_time;
+}
+
+// Store the sent packets, needed to answer to Negative acknowledgment requests.
+void ModuleRtpRtcpImpl::SetStorePacketsStatus(const bool enable,
+ const uint16_t number_to_store) {
+ rtp_sender_->packet_history.SetStorePacketsStatus(
+ enable ? RtpPacketHistory::StorageMode::kStoreAndCull
+ : RtpPacketHistory::StorageMode::kDisabled,
+ number_to_store);
+}
+
+bool ModuleRtpRtcpImpl::StorePackets() const {
+ return rtp_sender_->packet_history.GetStorageMode() !=
+ RtpPacketHistory::StorageMode::kDisabled;
+}
+
+void ModuleRtpRtcpImpl::SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) {
+ rtcp_sender_.SendCombinedRtcpPacket(std::move(rtcp_packets));
+}
+
+int32_t ModuleRtpRtcpImpl::SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ return rtcp_sender_.SendLossNotification(
+ GetFeedbackState(), last_decoded_seq_num, last_received_seq_num,
+ decodability_flag, buffering_allowed);
+}
+
+void ModuleRtpRtcpImpl::SetRemoteSSRC(const uint32_t ssrc) {
+ // Inform about the incoming SSRC.
+ rtcp_sender_.SetRemoteSSRC(ssrc);
+ rtcp_receiver_.SetRemoteSSRC(ssrc);
+}
+
+void ModuleRtpRtcpImpl::SetLocalSsrc(uint32_t local_ssrc) {
+ rtcp_receiver_.set_local_media_ssrc(local_ssrc);
+ rtcp_sender_.SetSsrc(local_ssrc);
+}
+
+RtpSendRates ModuleRtpRtcpImpl::GetSendRates() const {
+ return rtp_sender_->packet_sender.GetSendRates();
+}
+
+void ModuleRtpRtcpImpl::OnRequestSendReport() {
+ SendRTCP(kRtcpSr);
+}
+
+void ModuleRtpRtcpImpl::OnReceivedNack(
+ const std::vector<uint16_t>& nack_sequence_numbers) {
+ if (!rtp_sender_)
+ return;
+
+ if (!StorePackets() || nack_sequence_numbers.empty()) {
+ return;
+ }
+ // Use RTT from RtcpRttStats class if provided.
+ int64_t rtt = rtt_ms();
+ if (rtt == 0) {
+ rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &rtt, NULL, NULL);
+ }
+ rtp_sender_->packet_generator.OnReceivedNack(nack_sequence_numbers, rtt);
+}
+
+void ModuleRtpRtcpImpl::OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) {
+ if (rtp_sender_) {
+ uint32_t ssrc = SSRC();
+ absl::optional<uint32_t> rtx_ssrc;
+ if (rtp_sender_->packet_generator.RtxStatus() != kRtxOff) {
+ rtx_ssrc = rtp_sender_->packet_generator.RtxSsrc();
+ }
+
+ for (const RTCPReportBlock& report_block : report_blocks) {
+ if (ssrc == report_block.source_ssrc) {
+ rtp_sender_->packet_generator.OnReceivedAckOnSsrc(
+ report_block.extended_highest_sequence_number);
+ } else if (rtx_ssrc && *rtx_ssrc == report_block.source_ssrc) {
+ rtp_sender_->packet_generator.OnReceivedAckOnRtxSsrc(
+ report_block.extended_highest_sequence_number);
+ }
+ }
+ }
+}
+
+void ModuleRtpRtcpImpl::set_rtt_ms(int64_t rtt_ms) {
+ {
+ MutexLock lock(&mutex_rtt_);
+ rtt_ms_ = rtt_ms;
+ }
+ if (rtp_sender_) {
+ rtp_sender_->packet_history.SetRtt(TimeDelta::Millis(rtt_ms));
+ }
+}
+
+int64_t ModuleRtpRtcpImpl::rtt_ms() const {
+ MutexLock lock(&mutex_rtt_);
+ return rtt_ms_;
+}
+
+void ModuleRtpRtcpImpl::SetVideoBitrateAllocation(
+ const VideoBitrateAllocation& bitrate) {
+ rtcp_sender_.SetVideoBitrateAllocation(bitrate);
+}
+
+RTPSender* ModuleRtpRtcpImpl::RtpSender() {
+ return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr;
+}
+
+const RTPSender* ModuleRtpRtcpImpl::RtpSender() const {
+ return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
new file mode 100644
index 0000000000..7ba2df598e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/include/module_fec_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" // RTCPPacketType
+#include "modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h"
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+#include "modules/rtp_rtcp/source/rtcp_receiver.h"
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class Clock;
+struct PacedPacketInfo;
+struct RTPVideoHeader;
+
+// DEPRECATED.
+class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp {
+ public:
+ explicit ModuleRtpRtcpImpl(
+ const RtpRtcpInterface::Configuration& configuration);
+ ~ModuleRtpRtcpImpl() override;
+
+ // Process any pending tasks such as timeouts.
+ void Process() override;
+
+ // Receiver part.
+
+ // Called when we receive an RTCP packet.
+ void IncomingRtcpPacket(const uint8_t* incoming_packet,
+ size_t incoming_packet_length) override;
+
+ void SetRemoteSSRC(uint32_t ssrc) override;
+ void SetLocalSsrc(uint32_t ssrc) override;
+
+ // Sender part.
+ void RegisterSendPayloadFrequency(int payload_type,
+ int payload_frequency) override;
+
+ int32_t DeRegisterSendPayload(int8_t payload_type) override;
+
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) override;
+
+ // Register RTP header extension.
+ void RegisterRtpHeaderExtension(absl::string_view uri, int id) override;
+ void DeregisterSendRtpHeaderExtension(absl::string_view uri) override;
+
+ bool SupportsPadding() const override;
+ bool SupportsRtxPayloadPadding() const override;
+
+ // Get start timestamp.
+ uint32_t StartTimestamp() const override;
+
+ // Configure start timestamp, default is a random number.
+ void SetStartTimestamp(uint32_t timestamp) override;
+
+ uint16_t SequenceNumber() const override;
+
+ // Set SequenceNumber, default is a random number.
+ void SetSequenceNumber(uint16_t seq) override;
+
+ void SetRtpState(const RtpState& rtp_state) override;
+ void SetRtxState(const RtpState& rtp_state) override;
+ RtpState GetRtpState() const override;
+ RtpState GetRtxState() const override;
+
+ void SetNonSenderRttMeasurement(bool enabled) override {}
+
+ uint32_t SSRC() const override { return rtcp_sender_.SSRC(); }
+
+ void SetMid(absl::string_view mid) override;
+
+ void SetCsrcs(const std::vector<uint32_t>& csrcs) override;
+
+ RTCPSender::FeedbackState GetFeedbackState();
+
+ void SetRtxSendStatus(int mode) override;
+ int RtxSendStatus() const override;
+ absl::optional<uint32_t> RtxSsrc() const override;
+
+ void SetRtxSendPayloadType(int payload_type,
+ int associated_payload_type) override;
+
+ absl::optional<uint32_t> FlexfecSsrc() const override;
+
+ // Sends kRtcpByeCode when going from true to false.
+ int32_t SetSendingStatus(bool sending) override;
+
+ bool Sending() const override;
+
+ // Drops or relays media packets.
+ void SetSendingMediaStatus(bool sending) override;
+
+ bool SendingMedia() const override;
+
+ bool IsAudioConfigured() const override;
+
+ void SetAsPartOfAllocation(bool part_of_allocation) override;
+
+ bool OnSendingRtpFrame(uint32_t timestamp,
+ int64_t capture_time_ms,
+ int payload_type,
+ bool force_sender_report) override;
+
+ bool TrySendPacket(RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) override;
+
+ void SetFecProtectionParams(const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) override;
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> FetchFecPackets() override;
+
+ void OnPacketsAcknowledged(
+ rtc::ArrayView<const uint16_t> sequence_numbers) override;
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ size_t target_size_bytes) override;
+
+ std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const override;
+
+ size_t ExpectedPerPacketOverhead() const override;
+
+ void OnPacketSendingThreadSwitched() override;
+
+ // RTCP part.
+
+ // Get RTCP status.
+ RtcpMode RTCP() const override;
+
+ // Configure RTCP status i.e on/off.
+ void SetRTCPStatus(RtcpMode method) override;
+
+ // Set RTCP CName.
+ int32_t SetCNAME(absl::string_view c_name) override;
+
+ // Get remote NTP.
+ int32_t RemoteNTP(uint32_t* received_ntp_secs,
+ uint32_t* received_ntp_frac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp) const override;
+
+ // Get RoundTripTime.
+ int32_t RTT(uint32_t remote_ssrc,
+ int64_t* rtt,
+ int64_t* avg_rtt,
+ int64_t* min_rtt,
+ int64_t* max_rtt) const override;
+
+ int64_t ExpectedRetransmissionTimeMs() const override;
+
+ // Force a send of an RTCP packet.
+ // Normal SR and RR are triggered via the process function.
+ int32_t SendRTCP(RTCPPacketType rtcpPacketType) override;
+
+ void GetSendStreamDataCounters(
+ StreamDataCounters* rtp_counters,
+ StreamDataCounters* rtx_counters) const override;
+
+ void RemoteRTCPSenderInfo(uint32_t* packet_count,
+ uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const override;
+
+ // A snapshot of the most recent Report Block with additional data of
+ // interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats.
+ // Within this list, the ReportBlockData::RTCPReportBlock::source_ssrc(),
+ // which is the SSRC of the corresponding outbound RTP stream, is unique.
+ std::vector<ReportBlockData> GetLatestReportBlockData() const override;
+ absl::optional<SenderReportStats> GetSenderReportStats() const override;
+ // Round trip time statistics computed from the XR block contained in the last
+ // report.
+ absl::optional<NonSenderRttStats> GetNonSenderRttStats() const override;
+
+ // (REMB) Receiver Estimated Max Bitrate.
+ void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) override;
+ void UnsetRemb() override;
+
+ void SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set) override;
+
+ size_t MaxRtpPacketSize() const override;
+
+ void SetMaxRtpPacketSize(size_t max_packet_size) override;
+
+ // (NACK) Negative acknowledgment part.
+
+ // Send a Negative acknowledgment packet.
+ // TODO(philipel): Deprecate SendNACK and use SendNack instead.
+ int32_t SendNACK(const uint16_t* nack_list, uint16_t size) override;
+
+ void SendNack(const std::vector<uint16_t>& sequence_numbers) override;
+
+ // Store the sent packets, needed to answer to a negative acknowledgment
+ // requests.
+ void SetStorePacketsStatus(bool enable, uint16_t number_to_store) override;
+
+ void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) override;
+
+ // Video part.
+ int32_t SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override;
+
+ RtpSendRates GetSendRates() const override;
+
+ void OnReceivedNack(
+ const std::vector<uint16_t>& nack_sequence_numbers) override;
+ void OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) override;
+ void OnRequestSendReport() override;
+
+ void SetVideoBitrateAllocation(
+ const VideoBitrateAllocation& bitrate) override;
+
+ RTPSender* RtpSender() override;
+ const RTPSender* RtpSender() const override;
+
+ protected:
+ bool UpdateRTCPReceiveInformationTimers();
+
+ RTPSender* rtp_sender() {
+ return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr;
+ }
+ const RTPSender* rtp_sender() const {
+ return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr;
+ }
+
+ RTCPSender* rtcp_sender() { return &rtcp_sender_; }
+ const RTCPSender* rtcp_sender() const { return &rtcp_sender_; }
+
+ RTCPReceiver* rtcp_receiver() { return &rtcp_receiver_; }
+ const RTCPReceiver* rtcp_receiver() const { return &rtcp_receiver_; }
+
+ void SetMediaHasBeenSent(bool media_has_been_sent) {
+ rtp_sender_->packet_sender.SetMediaHasBeenSent(media_has_been_sent);
+ }
+
+ Clock* clock() const { return clock_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, Rtt);
+ FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, RttForReceiverOnly);
+
+ struct RtpSenderContext {
+ explicit RtpSenderContext(const RtpRtcpInterface::Configuration& config);
+ // Storage of packets, for retransmissions and padding, if applicable.
+ RtpPacketHistory packet_history;
+ // Handles sequence number assignment and padding timestamp generation.
+ mutable Mutex sequencer_mutex;
+ PacketSequencer sequencer_ RTC_GUARDED_BY(sequencer_mutex);
+ // Handles final time timestamping/stats/etc and handover to Transport.
+ DEPRECATED_RtpSenderEgress packet_sender;
+ // If no paced sender configured, this class will be used to pass packets
+ // from `packet_generator_` to `packet_sender_`.
+ DEPRECATED_RtpSenderEgress::NonPacedPacketSender non_paced_sender;
+ // Handles creation of RTP packets to be sent.
+ RTPSender packet_generator;
+ };
+
+ void set_rtt_ms(int64_t rtt_ms);
+ int64_t rtt_ms() const;
+
+ bool TimeToSendFullNackList(int64_t now) const;
+
+ // Returns true if the module is configured to store packets.
+ bool StorePackets() const;
+
+ // Returns current Receiver Reference Time Report (RTTR) status.
+ bool RtcpXrRrtrStatus() const;
+
+ std::unique_ptr<RtpSenderContext> rtp_sender_;
+
+ RTCPSender rtcp_sender_;
+ RTCPReceiver rtcp_receiver_;
+
+ Clock* const clock_;
+
+ int64_t last_bitrate_process_time_;
+ int64_t last_rtt_process_time_;
+ uint16_t packet_overhead_;
+
+ // Send side
+ int64_t nack_last_time_sent_full_ms_;
+ uint16_t nack_last_seq_number_sent_;
+
+ RtcpRttStats* const rtt_stats_;
+
+ // The processed RTT from RtcpRttStats.
+ mutable Mutex mutex_rtt_;
+ int64_t rtt_ms_ RTC_GUARDED_BY(mutex_rtt_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc
new file mode 100644
index 0000000000..891c9e31a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/ntp_time.h"
+
+#ifdef _WIN32
+// Disable warning C4355: 'this' : used in base member initializer list.
+#pragma warning(disable : 4355)
+#endif
+
+namespace webrtc {
+namespace {
+const int64_t kDefaultExpectedRetransmissionTimeMs = 125;
+
+constexpr TimeDelta kRttUpdateInterval = TimeDelta::Millis(1000);
+
+RTCPSender::Configuration AddRtcpSendEvaluationCallback(
+ RTCPSender::Configuration config,
+ std::function<void(TimeDelta)> send_evaluation_callback) {
+ config.schedule_next_rtcp_send_evaluation_function =
+ std::move(send_evaluation_callback);
+ return config;
+}
+
+} // namespace
+
+ModuleRtpRtcpImpl2::RtpSenderContext::RtpSenderContext(
+ const RtpRtcpInterface::Configuration& config)
+ : packet_history(config.clock, config.enable_rtx_padding_prioritization),
+ sequencer(config.local_media_ssrc,
+ config.rtx_send_ssrc,
+ /*require_marker_before_media_padding=*/!config.audio,
+ config.clock),
+ packet_sender(config, &packet_history),
+ non_paced_sender(&packet_sender, &sequencer),
+ packet_generator(
+ config,
+ &packet_history,
+ config.paced_sender ? config.paced_sender : &non_paced_sender) {}
+
+ModuleRtpRtcpImpl2::ModuleRtpRtcpImpl2(const Configuration& configuration)
+ : worker_queue_(TaskQueueBase::Current()),
+ rtcp_sender_(AddRtcpSendEvaluationCallback(
+ RTCPSender::Configuration::FromRtpRtcpConfiguration(configuration),
+ [this](TimeDelta duration) {
+ ScheduleRtcpSendEvaluation(duration);
+ })),
+ rtcp_receiver_(configuration, this),
+ clock_(configuration.clock),
+ packet_overhead_(28), // IPV4 UDP.
+ nack_last_time_sent_full_ms_(0),
+ nack_last_seq_number_sent_(0),
+ rtt_stats_(configuration.rtt_stats),
+ rtt_ms_(0) {
+ RTC_DCHECK(worker_queue_);
+ rtcp_thread_checker_.Detach();
+ if (!configuration.receiver_only) {
+ rtp_sender_ = std::make_unique<RtpSenderContext>(configuration);
+ rtp_sender_->sequencing_checker.Detach();
+ // Make sure rtcp sender use same timestamp offset as rtp sender.
+ rtcp_sender_.SetTimestampOffset(
+ rtp_sender_->packet_generator.TimestampOffset());
+ }
+
+ // Set default packet size limit.
+ // TODO(nisse): Kind-of duplicates
+ // webrtc::VideoSendStream::Config::Rtp::kDefaultMaxPacketSize.
+ const size_t kTcpOverIpv4HeaderSize = 40;
+ SetMaxRtpPacketSize(IP_PACKET_SIZE - kTcpOverIpv4HeaderSize);
+ rtt_update_task_ = RepeatingTaskHandle::DelayedStart(
+ worker_queue_, kRttUpdateInterval, [this]() {
+ PeriodicUpdate();
+ return kRttUpdateInterval;
+ });
+}
+
+ModuleRtpRtcpImpl2::~ModuleRtpRtcpImpl2() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ rtt_update_task_.Stop();
+}
+
+// static
+std::unique_ptr<ModuleRtpRtcpImpl2> ModuleRtpRtcpImpl2::Create(
+ const Configuration& configuration) {
+ RTC_DCHECK(configuration.clock);
+ RTC_DCHECK(TaskQueueBase::Current());
+ return std::make_unique<ModuleRtpRtcpImpl2>(configuration);
+}
+
+void ModuleRtpRtcpImpl2::SetRtxSendStatus(int mode) {
+ rtp_sender_->packet_generator.SetRtxStatus(mode);
+}
+
+int ModuleRtpRtcpImpl2::RtxSendStatus() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.RtxStatus() : kRtxOff;
+}
+
+void ModuleRtpRtcpImpl2::SetRtxSendPayloadType(int payload_type,
+ int associated_payload_type) {
+ rtp_sender_->packet_generator.SetRtxPayloadType(payload_type,
+ associated_payload_type);
+}
+
+absl::optional<uint32_t> ModuleRtpRtcpImpl2::RtxSsrc() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.RtxSsrc() : absl::nullopt;
+}
+
+absl::optional<uint32_t> ModuleRtpRtcpImpl2::FlexfecSsrc() const {
+ if (rtp_sender_) {
+ return rtp_sender_->packet_generator.FlexfecSsrc();
+ }
+ return absl::nullopt;
+}
+
+void ModuleRtpRtcpImpl2::IncomingRtcpPacket(const uint8_t* rtcp_packet,
+ const size_t length) {
+ RTC_DCHECK_RUN_ON(&rtcp_thread_checker_);
+ rtcp_receiver_.IncomingPacket(rtcp_packet, length);
+}
+
+void ModuleRtpRtcpImpl2::RegisterSendPayloadFrequency(int payload_type,
+ int payload_frequency) {
+ rtcp_sender_.SetRtpClockRate(payload_type, payload_frequency);
+}
+
+int32_t ModuleRtpRtcpImpl2::DeRegisterSendPayload(const int8_t payload_type) {
+ return 0;
+}
+
+uint32_t ModuleRtpRtcpImpl2::StartTimestamp() const {
+ return rtp_sender_->packet_generator.TimestampOffset();
+}
+
+// Configure start timestamp, default is a random number.
+void ModuleRtpRtcpImpl2::SetStartTimestamp(const uint32_t timestamp) {
+ rtcp_sender_.SetTimestampOffset(timestamp);
+ rtp_sender_->packet_generator.SetTimestampOffset(timestamp);
+ rtp_sender_->packet_sender.SetTimestampOffset(timestamp);
+}
+
+uint16_t ModuleRtpRtcpImpl2::SequenceNumber() const {
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ return rtp_sender_->sequencer.media_sequence_number();
+}
+
+// Set SequenceNumber, default is a random number.
+void ModuleRtpRtcpImpl2::SetSequenceNumber(const uint16_t seq_num) {
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ if (rtp_sender_->sequencer.media_sequence_number() != seq_num) {
+ rtp_sender_->sequencer.set_media_sequence_number(seq_num);
+ rtp_sender_->packet_history.Clear();
+ }
+}
+
+void ModuleRtpRtcpImpl2::SetRtpState(const RtpState& rtp_state) {
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ rtp_sender_->packet_generator.SetRtpState(rtp_state);
+ rtp_sender_->sequencer.SetRtpState(rtp_state);
+ rtcp_sender_.SetTimestampOffset(rtp_state.start_timestamp);
+}
+
+void ModuleRtpRtcpImpl2::SetRtxState(const RtpState& rtp_state) {
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ rtp_sender_->packet_generator.SetRtxRtpState(rtp_state);
+ rtp_sender_->sequencer.set_rtx_sequence_number(rtp_state.sequence_number);
+}
+
+RtpState ModuleRtpRtcpImpl2::GetRtpState() const {
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ RtpState state = rtp_sender_->packet_generator.GetRtpState();
+ rtp_sender_->sequencer.PopulateRtpState(state);
+ return state;
+}
+
+RtpState ModuleRtpRtcpImpl2::GetRtxState() const {
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ RtpState state = rtp_sender_->packet_generator.GetRtxRtpState();
+ state.sequence_number = rtp_sender_->sequencer.rtx_sequence_number();
+ return state;
+}
+
+void ModuleRtpRtcpImpl2::SetNonSenderRttMeasurement(bool enabled) {
+ rtcp_sender_.SetNonSenderRttMeasurement(enabled);
+ rtcp_receiver_.SetNonSenderRttMeasurement(enabled);
+}
+
+uint32_t ModuleRtpRtcpImpl2::local_media_ssrc() const {
+ RTC_DCHECK_RUN_ON(&rtcp_thread_checker_);
+ RTC_DCHECK_EQ(rtcp_receiver_.local_media_ssrc(), rtcp_sender_.SSRC());
+ return rtcp_receiver_.local_media_ssrc();
+}
+
+void ModuleRtpRtcpImpl2::SetMid(absl::string_view mid) {
+ if (rtp_sender_) {
+ rtp_sender_->packet_generator.SetMid(mid);
+ }
+ // TODO(bugs.webrtc.org/4050): If we end up supporting the MID SDES item for
+ // RTCP, this will need to be passed down to the RTCPSender also.
+}
+
+void ModuleRtpRtcpImpl2::SetCsrcs(const std::vector<uint32_t>& csrcs) {
+ rtcp_sender_.SetCsrcs(csrcs);
+ rtp_sender_->packet_generator.SetCsrcs(csrcs);
+}
+
+// TODO(pbos): Handle media and RTX streams separately (separate RTCP
+// feedbacks).
+RTCPSender::FeedbackState ModuleRtpRtcpImpl2::GetFeedbackState() {
+ // TODO(bugs.webrtc.org/11581): Called by potentially multiple threads.
+ // Mostly "Send*" methods. Make sure it's only called on the
+ // construction thread.
+
+ RTCPSender::FeedbackState state;
+ // This is called also when receiver_only is true. Hence below
+ // checks that rtp_sender_ exists.
+ if (rtp_sender_) {
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_->packet_sender.GetDataCounters(&rtp_stats, &rtx_stats);
+ state.packets_sent =
+ rtp_stats.transmitted.packets + rtx_stats.transmitted.packets;
+ state.media_bytes_sent = rtp_stats.transmitted.payload_bytes +
+ rtx_stats.transmitted.payload_bytes;
+ state.send_bitrate =
+ rtp_sender_->packet_sender.GetSendRates().Sum().bps<uint32_t>();
+ }
+ state.receiver = &rtcp_receiver_;
+
+ uint32_t received_ntp_secs = 0;
+ uint32_t received_ntp_frac = 0;
+ state.remote_sr = 0;
+ if (rtcp_receiver_.NTP(&received_ntp_secs, &received_ntp_frac,
+ /*rtcp_arrival_time_secs=*/&state.last_rr_ntp_secs,
+ /*rtcp_arrival_time_frac=*/&state.last_rr_ntp_frac,
+ /*rtcp_timestamp=*/nullptr,
+ /*remote_sender_packet_count=*/nullptr,
+ /*remote_sender_octet_count=*/nullptr,
+ /*remote_sender_reports_count=*/nullptr)) {
+ state.remote_sr = ((received_ntp_secs & 0x0000ffff) << 16) +
+ ((received_ntp_frac & 0xffff0000) >> 16);
+ }
+
+ state.last_xr_rtis = rtcp_receiver_.ConsumeReceivedXrReferenceTimeInfo();
+
+ return state;
+}
+
+int32_t ModuleRtpRtcpImpl2::SetSendingStatus(const bool sending) {
+ if (rtcp_sender_.Sending() != sending) {
+ // Sends RTCP BYE when going from true to false
+ rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending);
+ }
+ return 0;
+}
+
+bool ModuleRtpRtcpImpl2::Sending() const {
+ return rtcp_sender_.Sending();
+}
+
+void ModuleRtpRtcpImpl2::SetSendingMediaStatus(const bool sending) {
+ rtp_sender_->packet_generator.SetSendingMediaStatus(sending);
+}
+
+bool ModuleRtpRtcpImpl2::SendingMedia() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.SendingMedia() : false;
+}
+
+bool ModuleRtpRtcpImpl2::IsAudioConfigured() const {
+ return rtp_sender_ ? rtp_sender_->packet_generator.IsAudioConfigured()
+ : false;
+}
+
+void ModuleRtpRtcpImpl2::SetAsPartOfAllocation(bool part_of_allocation) {
+ RTC_CHECK(rtp_sender_);
+ rtp_sender_->packet_sender.ForceIncludeSendPacketsInAllocation(
+ part_of_allocation);
+}
+
+bool ModuleRtpRtcpImpl2::OnSendingRtpFrame(uint32_t timestamp,
+ int64_t capture_time_ms,
+ int payload_type,
+ bool force_sender_report) {
+ if (!Sending())
+ return false;
+
+ // TODO(bugs.webrtc.org/12873): Migrate this method and it's users to use
+ // optional Timestamps.
+ absl::optional<Timestamp> capture_time;
+ if (capture_time_ms > 0) {
+ capture_time = Timestamp::Millis(capture_time_ms);
+ }
+ absl::optional<int> payload_type_optional;
+ if (payload_type >= 0)
+ payload_type_optional = payload_type;
+ rtcp_sender_.SetLastRtpTime(timestamp, capture_time, payload_type_optional);
+ // Make sure an RTCP report isn't queued behind a key frame.
+ if (rtcp_sender_.TimeToSendRTCPReport(force_sender_report))
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
+
+ return true;
+}
+
+bool ModuleRtpRtcpImpl2::TrySendPacket(RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) {
+ RTC_DCHECK(rtp_sender_);
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ if (!rtp_sender_->packet_generator.SendingMedia()) {
+ return false;
+ }
+ if (packet->packet_type() == RtpPacketMediaType::kPadding &&
+ packet->Ssrc() == rtp_sender_->packet_generator.SSRC() &&
+ !rtp_sender_->sequencer.CanSendPaddingOnMediaSsrc()) {
+ // New media packet preempted this generated padding packet, discard it.
+ return false;
+ }
+ bool is_flexfec =
+ packet->packet_type() == RtpPacketMediaType::kForwardErrorCorrection &&
+ packet->Ssrc() == rtp_sender_->packet_generator.FlexfecSsrc();
+ if (!is_flexfec) {
+ rtp_sender_->sequencer.Sequence(*packet);
+ }
+
+ rtp_sender_->packet_sender.SendPacket(packet, pacing_info);
+ return true;
+}
+
+void ModuleRtpRtcpImpl2::SetFecProtectionParams(
+ const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) {
+ RTC_DCHECK(rtp_sender_);
+ rtp_sender_->packet_sender.SetFecProtectionParameters(delta_params,
+ key_params);
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>>
+ModuleRtpRtcpImpl2::FetchFecPackets() {
+ RTC_DCHECK(rtp_sender_);
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+ return rtp_sender_->packet_sender.FetchFecPackets();
+}
+
+void ModuleRtpRtcpImpl2::OnPacketsAcknowledged(
+ rtc::ArrayView<const uint16_t> sequence_numbers) {
+ RTC_DCHECK(rtp_sender_);
+ rtp_sender_->packet_history.CullAcknowledgedPackets(sequence_numbers);
+}
+
+bool ModuleRtpRtcpImpl2::SupportsPadding() const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_generator.SupportsPadding();
+}
+
+bool ModuleRtpRtcpImpl2::SupportsRtxPayloadPadding() const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_generator.SupportsRtxPayloadPadding();
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>>
+ModuleRtpRtcpImpl2::GeneratePadding(size_t target_size_bytes) {
+ RTC_DCHECK(rtp_sender_);
+ RTC_DCHECK_RUN_ON(&rtp_sender_->sequencing_checker);
+
+ return rtp_sender_->packet_generator.GeneratePadding(
+ target_size_bytes, rtp_sender_->packet_sender.MediaHasBeenSent(),
+ rtp_sender_->sequencer.CanSendPaddingOnMediaSsrc());
+}
+
+std::vector<RtpSequenceNumberMap::Info>
+ModuleRtpRtcpImpl2::GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_sender.GetSentRtpPacketInfos(sequence_numbers);
+}
+
+size_t ModuleRtpRtcpImpl2::ExpectedPerPacketOverhead() const {
+ if (!rtp_sender_) {
+ return 0;
+ }
+ return rtp_sender_->packet_generator.ExpectedPerPacketOverhead();
+}
+
+void ModuleRtpRtcpImpl2::OnPacketSendingThreadSwitched() {
+ // Ownership of sequencing is being transferred to another thread.
+ rtp_sender_->sequencing_checker.Detach();
+}
+
+size_t ModuleRtpRtcpImpl2::MaxRtpPacketSize() const {
+ RTC_DCHECK(rtp_sender_);
+ return rtp_sender_->packet_generator.MaxRtpPacketSize();
+}
+
+void ModuleRtpRtcpImpl2::SetMaxRtpPacketSize(size_t rtp_packet_size) {
+ RTC_DCHECK_LE(rtp_packet_size, IP_PACKET_SIZE)
+ << "rtp packet size too large: " << rtp_packet_size;
+ RTC_DCHECK_GT(rtp_packet_size, packet_overhead_)
+ << "rtp packet size too small: " << rtp_packet_size;
+
+ rtcp_sender_.SetMaxRtpPacketSize(rtp_packet_size);
+ if (rtp_sender_) {
+ rtp_sender_->packet_generator.SetMaxRtpPacketSize(rtp_packet_size);
+ }
+}
+
+RtcpMode ModuleRtpRtcpImpl2::RTCP() const {
+ return rtcp_sender_.Status();
+}
+
+// Configure RTCP status i.e on/off.
+void ModuleRtpRtcpImpl2::SetRTCPStatus(const RtcpMode method) {
+ rtcp_sender_.SetRTCPStatus(method);
+}
+
+int32_t ModuleRtpRtcpImpl2::SetCNAME(absl::string_view c_name) {
+ return rtcp_sender_.SetCNAME(c_name);
+}
+
+int32_t ModuleRtpRtcpImpl2::RemoteNTP(uint32_t* received_ntpsecs,
+ uint32_t* received_ntpfrac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp) const {
+ return rtcp_receiver_.NTP(received_ntpsecs, received_ntpfrac,
+ rtcp_arrival_time_secs, rtcp_arrival_time_frac,
+ rtcp_timestamp,
+ /*remote_sender_packet_count=*/nullptr,
+ /*remote_sender_octet_count=*/nullptr,
+ /*remote_sender_reports_count=*/nullptr)
+ ? 0
+ : -1;
+}
+
+// TODO(tommi): Check if `avg_rtt_ms`, `min_rtt_ms`, `max_rtt_ms` params are
+// actually used in practice (some callers ask for it but don't use it). It
+// could be that only `rtt` is needed and if so, then the fast path could be to
+// just call rtt_ms() and rely on the calculation being done periodically.
+int32_t ModuleRtpRtcpImpl2::RTT(const uint32_t remote_ssrc,
+ int64_t* rtt,
+ int64_t* avg_rtt,
+ int64_t* min_rtt,
+ int64_t* max_rtt) const {
+ int32_t ret = rtcp_receiver_.RTT(remote_ssrc, rtt, avg_rtt, min_rtt, max_rtt);
+ if (rtt && *rtt == 0) {
+ // Try to get RTT from RtcpRttStats class.
+ *rtt = rtt_ms();
+ }
+ return ret;
+}
+
+int64_t ModuleRtpRtcpImpl2::ExpectedRetransmissionTimeMs() const {
+ int64_t expected_retransmission_time_ms = rtt_ms();
+ if (expected_retransmission_time_ms > 0) {
+ return expected_retransmission_time_ms;
+ }
+ // No rtt available (`kRttUpdateInterval` not yet passed?), so try to
+ // poll avg_rtt_ms directly from rtcp receiver.
+ if (rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), nullptr,
+ &expected_retransmission_time_ms, nullptr,
+ nullptr) == 0) {
+ return expected_retransmission_time_ms;
+ }
+ return kDefaultExpectedRetransmissionTimeMs;
+}
+
+// Force a send of an RTCP packet.
+// Normal SR and RR are triggered via the process function.
+int32_t ModuleRtpRtcpImpl2::SendRTCP(RTCPPacketType packet_type) {
+ return rtcp_sender_.SendRTCP(GetFeedbackState(), packet_type);
+}
+
+void ModuleRtpRtcpImpl2::GetSendStreamDataCounters(
+ StreamDataCounters* rtp_counters,
+ StreamDataCounters* rtx_counters) const {
+ rtp_sender_->packet_sender.GetDataCounters(rtp_counters, rtx_counters);
+}
+
+// Received RTCP report.
+void ModuleRtpRtcpImpl2::RemoteRTCPSenderInfo(
+ uint32_t* packet_count, uint32_t* octet_count, int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const {
+ return rtcp_receiver_.RemoteRTCPSenderInfo(
+ packet_count, octet_count, ntp_timestamp_ms, remote_ntp_timestamp_ms);
+}
+
+std::vector<ReportBlockData> ModuleRtpRtcpImpl2::GetLatestReportBlockData()
+ const {
+ return rtcp_receiver_.GetLatestReportBlockData();
+}
+
+absl::optional<RtpRtcpInterface::SenderReportStats>
+ModuleRtpRtcpImpl2::GetSenderReportStats() const {
+ SenderReportStats stats;
+ uint32_t remote_timestamp_secs;
+ uint32_t remote_timestamp_frac;
+ uint32_t arrival_timestamp_secs;
+ uint32_t arrival_timestamp_frac;
+ if (rtcp_receiver_.NTP(&remote_timestamp_secs, &remote_timestamp_frac,
+ &arrival_timestamp_secs, &arrival_timestamp_frac,
+ /*rtcp_timestamp=*/nullptr, &stats.packets_sent,
+ &stats.bytes_sent, &stats.reports_count)) {
+ stats.last_remote_timestamp.Set(remote_timestamp_secs,
+ remote_timestamp_frac);
+ stats.last_arrival_timestamp.Set(arrival_timestamp_secs,
+ arrival_timestamp_frac);
+ return stats;
+ }
+ return absl::nullopt;
+}
+
+absl::optional<RtpRtcpInterface::NonSenderRttStats>
+ModuleRtpRtcpImpl2::GetNonSenderRttStats() const {
+ RTCPReceiver::NonSenderRttStats non_sender_rtt_stats =
+ rtcp_receiver_.GetNonSenderRTT();
+ return {{
+ non_sender_rtt_stats.round_trip_time(),
+ non_sender_rtt_stats.total_round_trip_time(),
+ non_sender_rtt_stats.round_trip_time_measurements(),
+ }};
+}
+
+// (REMB) Receiver Estimated Max Bitrate.
+void ModuleRtpRtcpImpl2::SetRemb(int64_t bitrate_bps,
+ std::vector<uint32_t> ssrcs) {
+ rtcp_sender_.SetRemb(bitrate_bps, std::move(ssrcs));
+}
+
+void ModuleRtpRtcpImpl2::UnsetRemb() {
+ rtcp_sender_.UnsetRemb();
+}
+
+void ModuleRtpRtcpImpl2::SetExtmapAllowMixed(bool extmap_allow_mixed) {
+ rtp_sender_->packet_generator.SetExtmapAllowMixed(extmap_allow_mixed);
+}
+
+void ModuleRtpRtcpImpl2::RegisterRtpHeaderExtension(absl::string_view uri,
+ int id) {
+ bool registered =
+ rtp_sender_->packet_generator.RegisterRtpHeaderExtension(uri, id);
+ RTC_CHECK(registered);
+}
+
+void ModuleRtpRtcpImpl2::DeregisterSendRtpHeaderExtension(
+ absl::string_view uri) {
+ rtp_sender_->packet_generator.DeregisterRtpHeaderExtension(uri);
+}
+
+void ModuleRtpRtcpImpl2::SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set) {
+ rtcp_sender_.SetTmmbn(std::move(bounding_set));
+}
+
+// Send a Negative acknowledgment packet.
+int32_t ModuleRtpRtcpImpl2::SendNACK(const uint16_t* nack_list,
+ const uint16_t size) {
+ uint16_t nack_length = size;
+ uint16_t start_id = 0;
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (TimeToSendFullNackList(now_ms)) {
+ nack_last_time_sent_full_ms_ = now_ms;
+ } else {
+ // Only send extended list.
+ if (nack_last_seq_number_sent_ == nack_list[size - 1]) {
+ // Last sequence number is the same, do not send list.
+ return 0;
+ }
+ // Send new sequence numbers.
+ for (int i = 0; i < size; ++i) {
+ if (nack_last_seq_number_sent_ == nack_list[i]) {
+ start_id = i + 1;
+ break;
+ }
+ }
+ nack_length = size - start_id;
+ }
+
+ // Our RTCP NACK implementation is limited to kRtcpMaxNackFields sequence
+ // numbers per RTCP packet.
+ if (nack_length > kRtcpMaxNackFields) {
+ nack_length = kRtcpMaxNackFields;
+ }
+ nack_last_seq_number_sent_ = nack_list[start_id + nack_length - 1];
+
+ return rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpNack, nack_length,
+ &nack_list[start_id]);
+}
+
+void ModuleRtpRtcpImpl2::SendNack(
+ const std::vector<uint16_t>& sequence_numbers) {
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpNack, sequence_numbers.size(),
+ sequence_numbers.data());
+}
+
+bool ModuleRtpRtcpImpl2::TimeToSendFullNackList(int64_t now) const {
+ // Use RTT from RtcpRttStats class if provided.
+ int64_t rtt = rtt_ms();
+ if (rtt == 0) {
+ rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &rtt, NULL, NULL);
+ }
+
+ const int64_t kStartUpRttMs = 100;
+ int64_t wait_time = 5 + ((rtt * 3) >> 1); // 5 + RTT * 1.5.
+ if (rtt == 0) {
+ wait_time = kStartUpRttMs;
+ }
+
+ // Send a full NACK list once within every `wait_time`.
+ return now - nack_last_time_sent_full_ms_ > wait_time;
+}
+
+// Store the sent packets, needed to answer to Negative acknowledgment requests.
+void ModuleRtpRtcpImpl2::SetStorePacketsStatus(const bool enable,
+ const uint16_t number_to_store) {
+ rtp_sender_->packet_history.SetStorePacketsStatus(
+ enable ? RtpPacketHistory::StorageMode::kStoreAndCull
+ : RtpPacketHistory::StorageMode::kDisabled,
+ number_to_store);
+}
+
+bool ModuleRtpRtcpImpl2::StorePackets() const {
+ return rtp_sender_->packet_history.GetStorageMode() !=
+ RtpPacketHistory::StorageMode::kDisabled;
+}
+
+void ModuleRtpRtcpImpl2::SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) {
+ rtcp_sender_.SendCombinedRtcpPacket(std::move(rtcp_packets));
+}
+
+int32_t ModuleRtpRtcpImpl2::SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ return rtcp_sender_.SendLossNotification(
+ GetFeedbackState(), last_decoded_seq_num, last_received_seq_num,
+ decodability_flag, buffering_allowed);
+}
+
+void ModuleRtpRtcpImpl2::SetRemoteSSRC(const uint32_t ssrc) {
+ // Inform about the incoming SSRC.
+ rtcp_sender_.SetRemoteSSRC(ssrc);
+ rtcp_receiver_.SetRemoteSSRC(ssrc);
+}
+
+void ModuleRtpRtcpImpl2::SetLocalSsrc(uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(&rtcp_thread_checker_);
+ rtcp_receiver_.set_local_media_ssrc(local_ssrc);
+ rtcp_sender_.SetSsrc(local_ssrc);
+}
+
+RtpSendRates ModuleRtpRtcpImpl2::GetSendRates() const {
+ // Typically called on the `rtp_transport_queue_` owned by an
+ // RtpTransportControllerSendInterface instance.
+ return rtp_sender_->packet_sender.GetSendRates();
+}
+
+void ModuleRtpRtcpImpl2::OnRequestSendReport() {
+ SendRTCP(kRtcpSr);
+}
+
+void ModuleRtpRtcpImpl2::OnReceivedNack(
+ const std::vector<uint16_t>& nack_sequence_numbers) {
+ if (!rtp_sender_)
+ return;
+
+ if (!StorePackets() || nack_sequence_numbers.empty()) {
+ return;
+ }
+ // Use RTT from RtcpRttStats class if provided.
+ int64_t rtt = rtt_ms();
+ if (rtt == 0) {
+ rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &rtt, NULL, NULL);
+ }
+ rtp_sender_->packet_generator.OnReceivedNack(nack_sequence_numbers, rtt);
+}
+
+void ModuleRtpRtcpImpl2::OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) {
+ if (rtp_sender_) {
+ uint32_t ssrc = SSRC();
+ absl::optional<uint32_t> rtx_ssrc;
+ if (rtp_sender_->packet_generator.RtxStatus() != kRtxOff) {
+ rtx_ssrc = rtp_sender_->packet_generator.RtxSsrc();
+ }
+
+ for (const RTCPReportBlock& report_block : report_blocks) {
+ if (ssrc == report_block.source_ssrc) {
+ rtp_sender_->packet_generator.OnReceivedAckOnSsrc(
+ report_block.extended_highest_sequence_number);
+ } else if (rtx_ssrc && *rtx_ssrc == report_block.source_ssrc) {
+ rtp_sender_->packet_generator.OnReceivedAckOnRtxSsrc(
+ report_block.extended_highest_sequence_number);
+ }
+ }
+ }
+}
+
+void ModuleRtpRtcpImpl2::set_rtt_ms(int64_t rtt_ms) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ {
+ MutexLock lock(&mutex_rtt_);
+ rtt_ms_ = rtt_ms;
+ }
+ if (rtp_sender_) {
+ rtp_sender_->packet_history.SetRtt(TimeDelta::Millis(rtt_ms));
+ }
+}
+
+int64_t ModuleRtpRtcpImpl2::rtt_ms() const {
+ MutexLock lock(&mutex_rtt_);
+ return rtt_ms_;
+}
+
+void ModuleRtpRtcpImpl2::SetVideoBitrateAllocation(
+ const VideoBitrateAllocation& bitrate) {
+ rtcp_sender_.SetVideoBitrateAllocation(bitrate);
+}
+
+RTPSender* ModuleRtpRtcpImpl2::RtpSender() {
+ return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr;
+}
+
+const RTPSender* ModuleRtpRtcpImpl2::RtpSender() const {
+ return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr;
+}
+
+void ModuleRtpRtcpImpl2::PeriodicUpdate() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+
+ Timestamp check_since = clock_->CurrentTime() - kRttUpdateInterval;
+ absl::optional<TimeDelta> rtt =
+ rtcp_receiver_.OnPeriodicRttUpdate(check_since, rtcp_sender_.Sending());
+ if (rtt) {
+ if (rtt_stats_) {
+ rtt_stats_->OnRttUpdate(rtt->ms());
+ }
+ set_rtt_ms(rtt->ms());
+ }
+}
+
+void ModuleRtpRtcpImpl2::MaybeSendRtcp() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ if (rtcp_sender_.TimeToSendRTCPReport())
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
+}
+
+// TODO(bugs.webrtc.org/12889): Consider removing this function when the issue
+// is resolved.
+void ModuleRtpRtcpImpl2::MaybeSendRtcpAtOrAfterTimestamp(
+ Timestamp execution_time) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ Timestamp now = clock_->CurrentTime();
+ if (now >= execution_time) {
+ MaybeSendRtcp();
+ return;
+ }
+
+ TimeDelta delta = execution_time - now;
+ // TaskQueue may run task 1ms earlier, so don't print warning if in this case.
+ if (delta > TimeDelta::Millis(1)) {
+ RTC_DLOG(LS_WARNING) << "BUGBUG: Task queue scheduled delayed call "
+ << delta << " too early.";
+ }
+
+ ScheduleMaybeSendRtcpAtOrAfterTimestamp(execution_time, delta);
+}
+
+void ModuleRtpRtcpImpl2::ScheduleRtcpSendEvaluation(TimeDelta duration) {
+ // We end up here under various sequences including the worker queue, and
+ // the RTCPSender lock is held.
+ // We're assuming that the fact that RTCPSender executes under other sequences
+ // than the worker queue on which it's created on implies that external
+ // synchronization is present and removes this activity before destruction.
+ if (duration.IsZero()) {
+ worker_queue_->PostTask(SafeTask(task_safety_.flag(), [this] {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ MaybeSendRtcp();
+ }));
+ } else {
+ Timestamp execution_time = clock_->CurrentTime() + duration;
+ ScheduleMaybeSendRtcpAtOrAfterTimestamp(execution_time, duration);
+ }
+}
+
+void ModuleRtpRtcpImpl2::ScheduleMaybeSendRtcpAtOrAfterTimestamp(
+ Timestamp execution_time,
+ TimeDelta duration) {
+ // We end up here under various sequences including the worker queue, and
+ // the RTCPSender lock is held.
+ // See note in ScheduleRtcpSendEvaluation about why `worker_queue_` can be
+ // accessed.
+ worker_queue_->PostDelayedTask(
+ SafeTask(task_safety_.flag(),
+ [this, execution_time] {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ MaybeSendRtcpAtOrAfterTimestamp(execution_time);
+ }),
+ duration.RoundUpTo(TimeDelta::Millis(1)));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.h
new file mode 100644
index 0000000000..b94650a146
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL2_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL2_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/include/module_fec_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" // RTCPPacketType
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+#include "modules/rtp_rtcp/source/rtcp_receiver.h"
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_sender_egress.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+struct PacedPacketInfo;
+struct RTPVideoHeader;
+
+class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface,
+ public RTCPReceiver::ModuleRtpRtcp {
+ public:
+ explicit ModuleRtpRtcpImpl2(
+ const RtpRtcpInterface::Configuration& configuration);
+ ~ModuleRtpRtcpImpl2() override;
+
+ // This method is provided to easy with migrating away from the
+ // RtpRtcp::Create factory method. Since this is an internal implementation
+ // detail though, creating an instance of ModuleRtpRtcpImpl2 directly should
+ // be fine.
+ static std::unique_ptr<ModuleRtpRtcpImpl2> Create(
+ const Configuration& configuration);
+
+ // Receiver part.
+
+ // Called when we receive an RTCP packet.
+ void IncomingRtcpPacket(const uint8_t* incoming_packet,
+ size_t incoming_packet_length) override;
+
+ void SetRemoteSSRC(uint32_t ssrc) override;
+
+ void SetLocalSsrc(uint32_t local_ssrc) override;
+
+ // Sender part.
+ void RegisterSendPayloadFrequency(int payload_type,
+ int payload_frequency) override;
+
+ int32_t DeRegisterSendPayload(int8_t payload_type) override;
+
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) override;
+
+ void RegisterRtpHeaderExtension(absl::string_view uri, int id) override;
+ void DeregisterSendRtpHeaderExtension(absl::string_view uri) override;
+
+ bool SupportsPadding() const override;
+ bool SupportsRtxPayloadPadding() const override;
+
+ // Get start timestamp.
+ uint32_t StartTimestamp() const override;
+
+ // Configure start timestamp, default is a random number.
+ void SetStartTimestamp(uint32_t timestamp) override;
+
+ uint16_t SequenceNumber() const override;
+
+ // Set SequenceNumber, default is a random number.
+ void SetSequenceNumber(uint16_t seq) override;
+
+ void SetRtpState(const RtpState& rtp_state) override;
+ void SetRtxState(const RtpState& rtp_state) override;
+ RtpState GetRtpState() const override;
+ RtpState GetRtxState() const override;
+
+ void SetNonSenderRttMeasurement(bool enabled) override;
+
+ uint32_t SSRC() const override { return rtcp_sender_.SSRC(); }
+
+ // Semantically identical to `SSRC()` but must be called on the packet
+ // delivery thread/tq and returns the ssrc that maps to
+ // RtpRtcpInterface::Configuration::local_media_ssrc.
+ uint32_t local_media_ssrc() const;
+
+ void SetMid(absl::string_view mid) override;
+
+ void SetCsrcs(const std::vector<uint32_t>& csrcs) override;
+
+ RTCPSender::FeedbackState GetFeedbackState();
+
+ void SetRtxSendStatus(int mode) override;
+ int RtxSendStatus() const override;
+ absl::optional<uint32_t> RtxSsrc() const override;
+
+ void SetRtxSendPayloadType(int payload_type,
+ int associated_payload_type) override;
+
+ absl::optional<uint32_t> FlexfecSsrc() const override;
+
+ // Sends kRtcpByeCode when going from true to false.
+ int32_t SetSendingStatus(bool sending) override;
+
+ bool Sending() const override;
+
+ // Drops or relays media packets.
+ void SetSendingMediaStatus(bool sending) override;
+
+ bool SendingMedia() const override;
+
+ bool IsAudioConfigured() const override;
+
+ void SetAsPartOfAllocation(bool part_of_allocation) override;
+
+ bool OnSendingRtpFrame(uint32_t timestamp,
+ int64_t capture_time_ms,
+ int payload_type,
+ bool force_sender_report) override;
+
+ bool TrySendPacket(RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) override;
+
+ void SetFecProtectionParams(const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) override;
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> FetchFecPackets() override;
+
+ void OnPacketsAcknowledged(
+ rtc::ArrayView<const uint16_t> sequence_numbers) override;
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ size_t target_size_bytes) override;
+
+ std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const override;
+
+ size_t ExpectedPerPacketOverhead() const override;
+
+ void OnPacketSendingThreadSwitched() override;
+
+ // RTCP part.
+
+ // Get RTCP status.
+ RtcpMode RTCP() const override;
+
+ // Configure RTCP status i.e on/off.
+ void SetRTCPStatus(RtcpMode method) override;
+
+ // Set RTCP CName.
+ int32_t SetCNAME(absl::string_view c_name) override;
+
+ // Get remote NTP.
+ int32_t RemoteNTP(uint32_t* received_ntp_secs,
+ uint32_t* received_ntp_frac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp) const override;
+
+ // Get RoundTripTime.
+ int32_t RTT(uint32_t remote_ssrc,
+ int64_t* rtt,
+ int64_t* avg_rtt,
+ int64_t* min_rtt,
+ int64_t* max_rtt) const override;
+
+ int64_t ExpectedRetransmissionTimeMs() const override;
+
+ // Force a send of an RTCP packet.
+ // Normal SR and RR are triggered via the task queue that's current when this
+ // object is created.
+ int32_t SendRTCP(RTCPPacketType rtcpPacketType) override;
+
+ void GetSendStreamDataCounters(
+ StreamDataCounters* rtp_counters,
+ StreamDataCounters* rtx_counters) const override;
+
+ void RemoteRTCPSenderInfo(uint32_t* packet_count,
+ uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const override;
+
+ // A snapshot of the most recent Report Block with additional data of
+ // interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats.
+ // Within this list, the ReportBlockData::RTCPReportBlock::source_ssrc(),
+ // which is the SSRC of the corresponding outbound RTP stream, is unique.
+ std::vector<ReportBlockData> GetLatestReportBlockData() const override;
+ absl::optional<SenderReportStats> GetSenderReportStats() const override;
+ absl::optional<NonSenderRttStats> GetNonSenderRttStats() const override;
+
+ // (REMB) Receiver Estimated Max Bitrate.
+ void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) override;
+ void UnsetRemb() override;
+
+ void SetTmmbn(std::vector<rtcp::TmmbItem> bounding_set) override;
+
+ size_t MaxRtpPacketSize() const override;
+
+ void SetMaxRtpPacketSize(size_t max_packet_size) override;
+
+ // (NACK) Negative acknowledgment part.
+
+ // Send a Negative acknowledgment packet.
+ // TODO(philipel): Deprecate SendNACK and use SendNack instead.
+ int32_t SendNACK(const uint16_t* nack_list, uint16_t size) override;
+
+ void SendNack(const std::vector<uint16_t>& sequence_numbers) override;
+
+ // Store the sent packets, needed to answer to a negative acknowledgment
+ // requests.
+ void SetStorePacketsStatus(bool enable, uint16_t number_to_store) override;
+
+ void SendCombinedRtcpPacket(
+ std::vector<std::unique_ptr<rtcp::RtcpPacket>> rtcp_packets) override;
+
+ // Video part.
+ int32_t SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override;
+
+ RtpSendRates GetSendRates() const override;
+
+ void OnReceivedNack(
+ const std::vector<uint16_t>& nack_sequence_numbers) override;
+ void OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) override;
+ void OnRequestSendReport() override;
+
+ void SetVideoBitrateAllocation(
+ const VideoBitrateAllocation& bitrate) override;
+
+ RTPSender* RtpSender() override;
+ const RTPSender* RtpSender() const override;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(RtpRtcpImpl2Test, Rtt);
+ FRIEND_TEST_ALL_PREFIXES(RtpRtcpImpl2Test, RttForReceiverOnly);
+
+ struct RtpSenderContext {
+ explicit RtpSenderContext(const RtpRtcpInterface::Configuration& config);
+ // Storage of packets, for retransmissions and padding, if applicable.
+ RtpPacketHistory packet_history;
+ SequenceChecker sequencing_checker;
+ // Handles sequence number assignment and padding timestamp generation.
+ PacketSequencer sequencer RTC_GUARDED_BY(sequencing_checker);
+ // Handles final time timestamping/stats/etc and handover to Transport.
+ RtpSenderEgress packet_sender;
+ // If no paced sender configured, this class will be used to pass packets
+ // from `packet_generator_` to `packet_sender_`.
+ RtpSenderEgress::NonPacedPacketSender non_paced_sender;
+ // Handles creation of RTP packets to be sent.
+ RTPSender packet_generator;
+ };
+
+ void set_rtt_ms(int64_t rtt_ms);
+ int64_t rtt_ms() const;
+
+ bool TimeToSendFullNackList(int64_t now) const;
+
+ // Called on a timer, once a second, on the worker_queue_, to update the RTT,
+ // check if we need to send RTCP report, send TMMBR updates and fire events.
+ void PeriodicUpdate();
+
+ // Returns true if the module is configured to store packets.
+ bool StorePackets() const;
+
+ // Used from RtcpSenderMediator to maybe send rtcp.
+ void MaybeSendRtcp() RTC_RUN_ON(worker_queue_);
+
+ // Called when `rtcp_sender_` informs of the next RTCP instant. The method may
+ // be called on various sequences, and is called under a RTCPSenderLock.
+ void ScheduleRtcpSendEvaluation(TimeDelta duration);
+
+ // Helper method combating too early delayed calls from task queues.
+ // TODO(bugs.webrtc.org/12889): Consider removing this function when the issue
+ // is resolved.
+ void MaybeSendRtcpAtOrAfterTimestamp(Timestamp execution_time)
+ RTC_RUN_ON(worker_queue_);
+
+ // Schedules a call to MaybeSendRtcpAtOrAfterTimestamp delayed by `duration`.
+ void ScheduleMaybeSendRtcpAtOrAfterTimestamp(Timestamp execution_time,
+ TimeDelta duration);
+
+ TaskQueueBase* const worker_queue_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker rtcp_thread_checker_;
+
+ std::unique_ptr<RtpSenderContext> rtp_sender_;
+ RTCPSender rtcp_sender_;
+ RTCPReceiver rtcp_receiver_;
+
+ Clock* const clock_;
+
+ uint16_t packet_overhead_;
+
+ // Send side
+ int64_t nack_last_time_sent_full_ms_;
+ uint16_t nack_last_seq_number_sent_;
+
+ RtcpRttStats* const rtt_stats_;
+ RepeatingTaskHandle rtt_update_task_ RTC_GUARDED_BY(worker_queue_);
+
+ // The processed RTT from RtcpRttStats.
+ mutable Mutex mutex_rtt_;
+ int64_t rtt_ms_ RTC_GUARDED_BY(mutex_rtt_);
+
+ RTC_NO_UNIQUE_ADDRESS ScopedTaskSafety task_safety_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL2_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc
new file mode 100644
index 0000000000..4c08ce5c13
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc
@@ -0,0 +1,1159 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/time_delta.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/run_loop.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Gt;
+using ::testing::Not;
+using ::testing::Optional;
+using ::testing::SizeIs;
+
+namespace webrtc {
+namespace {
+constexpr uint32_t kSenderSsrc = 0x12345;
+constexpr uint32_t kReceiverSsrc = 0x23456;
+constexpr uint32_t kRtxSenderSsrc = 0x12346;
+constexpr TimeDelta kOneWayNetworkDelay = TimeDelta::Millis(100);
+constexpr uint8_t kBaseLayerTid = 0;
+constexpr uint8_t kHigherLayerTid = 1;
+constexpr uint16_t kSequenceNumber = 100;
+constexpr uint8_t kPayloadType = 100;
+constexpr uint8_t kRtxPayloadType = 98;
+constexpr int kWidth = 320;
+constexpr int kHeight = 100;
+constexpr int kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock.
+constexpr TimeDelta kDefaultReportInterval = TimeDelta::Millis(1000);
+
+// RTP header extension ids.
+enum : int {
+ kAbsoluteSendTimeExtensionId = 1,
+ kTransportSequenceNumberExtensionId,
+ kTransmissionOffsetExtensionId,
+};
+
+class RtcpRttStatsTestImpl : public RtcpRttStats {
+ public:
+ RtcpRttStatsTestImpl() : rtt_ms_(0) {}
+ ~RtcpRttStatsTestImpl() override = default;
+
+ void OnRttUpdate(int64_t rtt_ms) override { rtt_ms_ = rtt_ms; }
+ int64_t LastProcessedRtt() const override { return rtt_ms_; }
+ int64_t rtt_ms_;
+};
+
+// TODO(bugs.webrtc.org/11581): remove inheritance once the ModuleRtpRtcpImpl2
+// Module/ProcessThread dependency is gone.
+class SendTransport : public Transport,
+ public sim_time_impl::SimulatedSequenceRunner {
+ public:
+ SendTransport(TimeDelta delay, GlobalSimulatedTimeController* time_controller)
+ : receiver_(nullptr),
+ time_controller_(time_controller),
+ delay_(delay),
+ rtp_packets_sent_(0),
+ rtcp_packets_sent_(0),
+ last_packet_(&header_extensions_) {
+ time_controller_->Register(this);
+ }
+
+ ~SendTransport() { time_controller_->Unregister(this); }
+
+ void SetRtpRtcpModule(ModuleRtpRtcpImpl2* receiver) { receiver_ = receiver; }
+ void SimulateNetworkDelay(TimeDelta delay) { delay_ = delay; }
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
+ EXPECT_TRUE(last_packet_.Parse(data, len));
+ ++rtp_packets_sent_;
+ return true;
+ }
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ test::RtcpPacketParser parser;
+ parser.Parse(data, len);
+ last_nack_list_ = parser.nack()->packet_ids();
+ Timestamp current_time = time_controller_->GetClock()->CurrentTime();
+ Timestamp delivery_time = current_time + delay_;
+ rtcp_packets_.push_back(
+ Packet{delivery_time, std::vector<uint8_t>(data, data + len)});
+ ++rtcp_packets_sent_;
+ RunReady(current_time);
+ return true;
+ }
+
+ // sim_time_impl::SimulatedSequenceRunner
+ Timestamp GetNextRunTime() const override {
+ if (!rtcp_packets_.empty())
+ return rtcp_packets_.front().send_time;
+ return Timestamp::PlusInfinity();
+ }
+ void RunReady(Timestamp at_time) override {
+ while (!rtcp_packets_.empty() &&
+ rtcp_packets_.front().send_time <= at_time) {
+ Packet packet = std::move(rtcp_packets_.front());
+ rtcp_packets_.pop_front();
+ EXPECT_TRUE(receiver_);
+ receiver_->IncomingRtcpPacket(packet.data.data(), packet.data.size());
+ }
+ }
+ TaskQueueBase* GetAsTaskQueue() override {
+ return reinterpret_cast<TaskQueueBase*>(this);
+ }
+
+ size_t NumRtcpSent() { return rtcp_packets_sent_; }
+
+ ModuleRtpRtcpImpl2* receiver_;
+ GlobalSimulatedTimeController* const time_controller_;
+ TimeDelta delay_;
+ int rtp_packets_sent_;
+ size_t rtcp_packets_sent_;
+ std::vector<uint16_t> last_nack_list_;
+ RtpHeaderExtensionMap header_extensions_;
+ RtpPacketReceived last_packet_;
+ struct Packet {
+ Timestamp send_time;
+ std::vector<uint8_t> data;
+ };
+ std::deque<Packet> rtcp_packets_;
+};
+
+struct TestConfig {
+ explicit TestConfig(bool with_overhead) : with_overhead(with_overhead) {}
+
+ bool with_overhead = false;
+};
+
+class FieldTrialConfig : public FieldTrialsView {
+ public:
+ static FieldTrialConfig GetFromTestConfig(const TestConfig& config) {
+ FieldTrialConfig trials;
+ trials.overhead_enabled_ = config.with_overhead;
+ return trials;
+ }
+
+ FieldTrialConfig() : overhead_enabled_(false) {}
+ ~FieldTrialConfig() override {}
+
+ void SetOverHeadEnabled(bool enabled) { overhead_enabled_ = enabled; }
+
+ std::string Lookup(absl::string_view key) const override {
+ if (key == "WebRTC-SendSideBwe-WithOverhead") {
+ return overhead_enabled_ ? "Enabled" : "Disabled";
+ }
+ return "";
+ }
+
+ private:
+ bool overhead_enabled_;
+};
+
+class RtpRtcpModule : public RtcpPacketTypeCounterObserver,
+ public SendPacketObserver {
+ public:
+ struct SentPacket {
+ SentPacket(uint16_t packet_id, int64_t capture_time_ms, uint32_t ssrc)
+ : packet_id(packet_id), capture_time_ms(capture_time_ms), ssrc(ssrc) {}
+ uint16_t packet_id;
+ int64_t capture_time_ms;
+ uint32_t ssrc;
+ };
+
+ RtpRtcpModule(GlobalSimulatedTimeController* time_controller,
+ bool is_sender,
+ const FieldTrialConfig& trials)
+ : time_controller_(time_controller),
+ is_sender_(is_sender),
+ trials_(trials),
+ receive_statistics_(
+ ReceiveStatistics::Create(time_controller->GetClock())),
+ transport_(kOneWayNetworkDelay, time_controller) {
+ CreateModuleImpl();
+ }
+
+ TimeController* const time_controller_;
+ const bool is_sender_;
+ const FieldTrialConfig& trials_;
+ RtcpPacketTypeCounter packets_sent_;
+ RtcpPacketTypeCounter packets_received_;
+ std::unique_ptr<ReceiveStatistics> receive_statistics_;
+ SendTransport transport_;
+ RtcpRttStatsTestImpl rtt_stats_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> impl_;
+
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override {
+ counter_map_[ssrc] = packet_counter;
+ }
+
+ void OnSendPacket(uint16_t packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) override {
+ last_sent_packet_.emplace(packet_id, capture_time_ms, ssrc);
+ }
+
+ absl::optional<SentPacket> last_sent_packet() const {
+ return last_sent_packet_;
+ }
+
+ RtcpPacketTypeCounter RtcpSent() {
+ // RTCP counters for remote SSRC.
+ return counter_map_[is_sender_ ? kReceiverSsrc : kSenderSsrc];
+ }
+
+ RtcpPacketTypeCounter RtcpReceived() {
+ // Received RTCP stats for (own) local SSRC.
+ return counter_map_[impl_->SSRC()];
+ }
+ int RtpSent() { return transport_.rtp_packets_sent_; }
+ uint16_t LastRtpSequenceNumber() { return last_packet().SequenceNumber(); }
+ std::vector<uint16_t> LastNackListSent() {
+ return transport_.last_nack_list_;
+ }
+ void SetRtcpReportIntervalAndReset(TimeDelta rtcp_report_interval) {
+ rtcp_report_interval_ = rtcp_report_interval;
+ CreateModuleImpl();
+ }
+ const RtpPacketReceived& last_packet() { return transport_.last_packet_; }
+ void RegisterHeaderExtension(absl::string_view uri, int id) {
+ impl_->RegisterRtpHeaderExtension(uri, id);
+ transport_.header_extensions_.RegisterByUri(id, uri);
+ transport_.last_packet_.IdentifyExtensions(transport_.header_extensions_);
+ }
+ void ReinintWithFec(VideoFecGenerator* fec_generator) {
+ fec_generator_ = fec_generator;
+ CreateModuleImpl();
+ }
+
+ void CreateModuleImpl() {
+ RtpRtcpInterface::Configuration config;
+ config.audio = false;
+ config.clock = time_controller_->GetClock();
+ config.outgoing_transport = &transport_;
+ config.receive_statistics = receive_statistics_.get();
+ config.rtcp_packet_type_counter_observer = this;
+ config.rtt_stats = &rtt_stats_;
+ config.rtcp_report_interval_ms = rtcp_report_interval_.ms();
+ config.local_media_ssrc = is_sender_ ? kSenderSsrc : kReceiverSsrc;
+ config.rtx_send_ssrc =
+ is_sender_ ? absl::make_optional(kRtxSenderSsrc) : absl::nullopt;
+ config.need_rtp_packet_infos = true;
+ config.non_sender_rtt_measurement = true;
+ config.field_trials = &trials_;
+ config.send_packet_observer = this;
+ config.fec_generator = fec_generator_;
+ impl_.reset(new ModuleRtpRtcpImpl2(config));
+ impl_->SetRemoteSSRC(is_sender_ ? kReceiverSsrc : kSenderSsrc);
+ impl_->SetRTCPStatus(RtcpMode::kCompound);
+ }
+
+ private:
+ std::map<uint32_t, RtcpPacketTypeCounter> counter_map_;
+ absl::optional<SentPacket> last_sent_packet_;
+ VideoFecGenerator* fec_generator_ = nullptr;
+ TimeDelta rtcp_report_interval_ = kDefaultReportInterval;
+};
+} // namespace
+
+class RtpRtcpImpl2Test : public ::testing::TestWithParam<TestConfig> {
+ protected:
+ RtpRtcpImpl2Test()
+ : time_controller_(Timestamp::Micros(133590000000000)),
+ field_trials_(FieldTrialConfig::GetFromTestConfig(GetParam())),
+ sender_(&time_controller_,
+ /*is_sender=*/true,
+ field_trials_),
+ receiver_(&time_controller_,
+ /*is_sender=*/false,
+ field_trials_) {}
+
+ void SetUp() override {
+ // Send module.
+ EXPECT_EQ(0, sender_.impl_->SetSendingStatus(true));
+ sender_.impl_->SetSendingMediaStatus(true);
+ sender_.impl_->SetSequenceNumber(kSequenceNumber);
+ sender_.impl_->SetStorePacketsStatus(true, 100);
+
+ RTPSenderVideo::Config video_config;
+ video_config.clock = time_controller_.GetClock();
+ video_config.rtp_sender = sender_.impl_->RtpSender();
+ video_config.field_trials = &field_trials_;
+ sender_video_ = std::make_unique<RTPSenderVideo>(video_config);
+
+ // Receive module.
+ EXPECT_EQ(0, receiver_.impl_->SetSendingStatus(false));
+ receiver_.impl_->SetSendingMediaStatus(false);
+ // Transport settings.
+ sender_.transport_.SetRtpRtcpModule(receiver_.impl_.get());
+ receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get());
+ }
+
+ void AdvanceTime(TimeDelta duration) {
+ time_controller_.AdvanceTime(duration);
+ }
+
+ void ReinitWithFec(VideoFecGenerator* fec_generator,
+ absl::optional<int> red_payload_type) {
+ sender_.ReinintWithFec(fec_generator);
+ EXPECT_EQ(0, sender_.impl_->SetSendingStatus(true));
+ sender_.impl_->SetSendingMediaStatus(true);
+ sender_.impl_->SetSequenceNumber(kSequenceNumber);
+ sender_.impl_->SetStorePacketsStatus(true, 100);
+ receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get());
+
+ RTPSenderVideo::Config video_config;
+ video_config.clock = time_controller_.GetClock();
+ video_config.rtp_sender = sender_.impl_->RtpSender();
+ video_config.field_trials = &field_trials_;
+ video_config.fec_overhead_bytes = fec_generator->MaxPacketOverhead();
+ video_config.fec_type = fec_generator->GetFecType();
+ video_config.red_payload_type = red_payload_type;
+ sender_video_ = std::make_unique<RTPSenderVideo>(video_config);
+ }
+
+ GlobalSimulatedTimeController time_controller_;
+ FieldTrialConfig field_trials_;
+ RtpRtcpModule sender_;
+ std::unique_ptr<RTPSenderVideo> sender_video_;
+ RtpRtcpModule receiver_;
+
+ bool SendFrame(const RtpRtcpModule* module,
+ RTPSenderVideo* sender,
+ uint8_t tid) {
+ int64_t now_ms = time_controller_.GetClock()->TimeInMilliseconds();
+ return SendFrame(
+ module, sender, tid,
+ static_cast<uint32_t>(now_ms * kCaptureTimeMsToRtpTimestamp), now_ms);
+ }
+
+ bool SendFrame(const RtpRtcpModule* module,
+ RTPSenderVideo* sender,
+ uint8_t tid,
+ uint32_t rtp_timestamp,
+ int64_t capture_time_ms) {
+ RTPVideoHeaderVP8 vp8_header = {};
+ vp8_header.temporalIdx = tid;
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_video_header.width = kWidth;
+ rtp_video_header.height = kHeight;
+ rtp_video_header.rotation = kVideoRotation_0;
+ rtp_video_header.content_type = VideoContentType::UNSPECIFIED;
+ rtp_video_header.playout_delay = {-1, -1};
+ rtp_video_header.is_first_packet_in_frame = true;
+ rtp_video_header.simulcastIdx = 0;
+ rtp_video_header.codec = kVideoCodecVP8;
+ rtp_video_header.video_type_header = vp8_header;
+ rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
+
+ const uint8_t payload[100] = {0};
+ bool success = module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true);
+
+ success &= sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8,
+ rtp_timestamp, capture_time_ms, payload,
+ rtp_video_header, 0);
+ return success;
+ }
+
+ void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
+ bool sender = module->impl_->SSRC() == kSenderSsrc;
+ rtcp::Nack nack;
+ uint16_t list[1];
+ list[0] = sequence_number;
+ const uint16_t kListLength = sizeof(list) / sizeof(list[0]);
+ nack.SetSenderSsrc(sender ? kReceiverSsrc : kSenderSsrc);
+ nack.SetMediaSsrc(sender ? kSenderSsrc : kReceiverSsrc);
+ nack.SetPacketIds(list, kListLength);
+ rtc::Buffer packet = nack.Build();
+ module->impl_->IncomingRtcpPacket(packet.data(), packet.size());
+ }
+};
+
+TEST_P(RtpRtcpImpl2Test, RetransmitsAllLayers) {
+ // Send frames.
+ EXPECT_EQ(0, sender_.RtpSent());
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(),
+ kBaseLayerTid)); // kSequenceNumber
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(),
+ kHigherLayerTid)); // kSequenceNumber + 1
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(),
+ kNoTemporalIdx)); // kSequenceNumber + 2
+ EXPECT_EQ(3, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber());
+
+ // Min required delay until retransmit = 5 + RTT ms (RTT = 0).
+ AdvanceTime(TimeDelta::Millis(5));
+
+ // Frame with kBaseLayerTid re-sent.
+ IncomingRtcpNack(&sender_, kSequenceNumber);
+ EXPECT_EQ(4, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber, sender_.LastRtpSequenceNumber());
+ // Frame with kHigherLayerTid re-sent.
+ IncomingRtcpNack(&sender_, kSequenceNumber + 1);
+ EXPECT_EQ(5, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber + 1, sender_.LastRtpSequenceNumber());
+ // Frame with kNoTemporalIdx re-sent.
+ IncomingRtcpNack(&sender_, kSequenceNumber + 2);
+ EXPECT_EQ(6, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber());
+}
+
+TEST_P(RtpRtcpImpl2Test, Rtt) {
+ RtpPacketReceived packet;
+ packet.SetTimestamp(1);
+ packet.SetSequenceNumber(123);
+ packet.SetSsrc(kSenderSsrc);
+ packet.AllocatePayload(100 - 12);
+ receiver_.receive_statistics_->OnRtpPacket(packet);
+
+ // Send Frame before sending an SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ // Sender module should send an SR.
+ EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport));
+ AdvanceTime(kOneWayNetworkDelay);
+
+ // Receiver module should send a RR with a response to the last received SR.
+ EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport));
+ AdvanceTime(kOneWayNetworkDelay);
+
+ // Verify RTT.
+ int64_t rtt;
+ int64_t avg_rtt;
+ int64_t min_rtt;
+ int64_t max_rtt;
+ EXPECT_EQ(
+ 0, sender_.impl_->RTT(kReceiverSsrc, &rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), rtt, 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), avg_rtt, 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), min_rtt, 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), max_rtt, 1);
+
+ // No RTT from other ssrc.
+ EXPECT_EQ(-1, sender_.impl_->RTT(kReceiverSsrc + 1, &rtt, &avg_rtt, &min_rtt,
+ &max_rtt));
+
+ // Verify RTT from rtt_stats config.
+ EXPECT_EQ(0, sender_.rtt_stats_.LastProcessedRtt());
+ EXPECT_EQ(0, sender_.impl_->rtt_ms());
+ AdvanceTime(TimeDelta::Millis(1000));
+
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(),
+ sender_.rtt_stats_.LastProcessedRtt(), 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), sender_.impl_->rtt_ms(), 1);
+}
+
+TEST_P(RtpRtcpImpl2Test, RttForReceiverOnly) {
+ // Receiver module should send a Receiver time reference report (RTRR).
+ EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport));
+
+ // Sender module should send a response to the last received RTRR (DLRR).
+ AdvanceTime(TimeDelta::Millis(1000));
+ // Send Frame before sending a SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport));
+
+ // Verify RTT.
+ EXPECT_EQ(0, receiver_.rtt_stats_.LastProcessedRtt());
+ EXPECT_EQ(0, receiver_.impl_->rtt_ms());
+ AdvanceTime(TimeDelta::Millis(1000));
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(),
+ receiver_.rtt_stats_.LastProcessedRtt(), 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), receiver_.impl_->rtt_ms(), 1);
+}
+
+TEST_P(RtpRtcpImpl2Test, NoSrBeforeMedia) {
+ // Ignore fake transport delays in this test.
+ sender_.transport_.SimulateNetworkDelay(TimeDelta::Zero());
+ receiver_.transport_.SimulateNetworkDelay(TimeDelta::Zero());
+
+ // Move ahead to the instant a rtcp is expected.
+ // Verify no SR is sent before media has been sent, RR should still be sent
+ // from the receiving module though.
+ AdvanceTime(kDefaultReportInterval / 2);
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u);
+ EXPECT_EQ(receiver_.transport_.NumRtcpSent(), 1u);
+
+ // RTCP should be triggered by the RTP send.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u);
+}
+
+TEST_P(RtpRtcpImpl2Test, RtcpPacketTypeCounter_Nack) {
+ EXPECT_EQ(0U, sender_.RtcpReceived().nack_packets);
+ EXPECT_EQ(0U, receiver_.RtcpSent().nack_packets);
+
+ // Receive module sends a NACK.
+ const uint16_t kNackLength = 1;
+ uint16_t nack_list[kNackLength] = {123};
+ EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list, kNackLength));
+ AdvanceTime(kOneWayNetworkDelay);
+ EXPECT_EQ(1U, receiver_.RtcpSent().nack_packets);
+
+ // Send module receives the NACK.
+ EXPECT_EQ(1U, sender_.RtcpReceived().nack_packets);
+}
+
+TEST_P(RtpRtcpImpl2Test, AddStreamDataCounters) {
+ StreamDataCounters rtp;
+ const int64_t kStartTimeMs = 1;
+ rtp.first_packet_time_ms = kStartTimeMs;
+ rtp.transmitted.packets = 1;
+ rtp.transmitted.payload_bytes = 1;
+ rtp.transmitted.header_bytes = 2;
+ rtp.transmitted.padding_bytes = 3;
+ EXPECT_EQ(rtp.transmitted.TotalBytes(), rtp.transmitted.payload_bytes +
+ rtp.transmitted.header_bytes +
+ rtp.transmitted.padding_bytes);
+
+ StreamDataCounters rtp2;
+ rtp2.first_packet_time_ms = -1;
+ rtp2.transmitted.packets = 10;
+ rtp2.transmitted.payload_bytes = 10;
+ rtp2.retransmitted.header_bytes = 4;
+ rtp2.retransmitted.payload_bytes = 5;
+ rtp2.retransmitted.padding_bytes = 6;
+ rtp2.retransmitted.packets = 7;
+ rtp2.fec.packets = 8;
+
+ StreamDataCounters sum = rtp;
+ sum.Add(rtp2);
+ EXPECT_EQ(kStartTimeMs, sum.first_packet_time_ms);
+ EXPECT_EQ(11U, sum.transmitted.packets);
+ EXPECT_EQ(11U, sum.transmitted.payload_bytes);
+ EXPECT_EQ(2U, sum.transmitted.header_bytes);
+ EXPECT_EQ(3U, sum.transmitted.padding_bytes);
+ EXPECT_EQ(4U, sum.retransmitted.header_bytes);
+ EXPECT_EQ(5U, sum.retransmitted.payload_bytes);
+ EXPECT_EQ(6U, sum.retransmitted.padding_bytes);
+ EXPECT_EQ(7U, sum.retransmitted.packets);
+ EXPECT_EQ(8U, sum.fec.packets);
+ EXPECT_EQ(sum.transmitted.TotalBytes(),
+ rtp.transmitted.TotalBytes() + rtp2.transmitted.TotalBytes());
+
+ StreamDataCounters rtp3;
+ rtp3.first_packet_time_ms = kStartTimeMs + 10;
+ sum.Add(rtp3);
+ EXPECT_EQ(kStartTimeMs, sum.first_packet_time_ms); // Holds oldest time.
+}
+
+TEST_P(RtpRtcpImpl2Test, SendsInitialNackList) {
+ // Send module sends a NACK.
+ const uint16_t kNackLength = 1;
+ uint16_t nack_list[kNackLength] = {123};
+ EXPECT_EQ(0U, sender_.RtcpSent().nack_packets);
+ // Send Frame before sending a compound RTCP that starts with SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123));
+}
+
+TEST_P(RtpRtcpImpl2Test, SendsExtendedNackList) {
+ // Send module sends a NACK.
+ const uint16_t kNackLength = 1;
+ uint16_t nack_list[kNackLength] = {123};
+ EXPECT_EQ(0U, sender_.RtcpSent().nack_packets);
+ // Send Frame before sending a compound RTCP that starts with SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123));
+
+ // Same list not re-send.
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123));
+
+ // Only extended list sent.
+ const uint16_t kNackExtLength = 2;
+ uint16_t nack_list_ext[kNackExtLength] = {123, 124};
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list_ext, kNackExtLength));
+ EXPECT_EQ(2U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(124));
+}
+
+TEST_P(RtpRtcpImpl2Test, ReSendsNackListAfterRttMs) {
+ sender_.transport_.SimulateNetworkDelay(TimeDelta::Zero());
+ // Send module sends a NACK.
+ const uint16_t kNackLength = 2;
+ uint16_t nack_list[kNackLength] = {123, 125};
+ EXPECT_EQ(0U, sender_.RtcpSent().nack_packets);
+ // Send Frame before sending a compound RTCP that starts with SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125));
+
+ // Same list not re-send, rtt interval has not passed.
+ const TimeDelta kStartupRtt = TimeDelta::Millis(100);
+ AdvanceTime(kStartupRtt);
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+
+ // Rtt interval passed, full list sent.
+ AdvanceTime(TimeDelta::Millis(1));
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(2U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125));
+}
+
+TEST_P(RtpRtcpImpl2Test, UniqueNackRequests) {
+ receiver_.transport_.SimulateNetworkDelay(TimeDelta::Zero());
+ EXPECT_EQ(0U, receiver_.RtcpSent().nack_packets);
+ EXPECT_EQ(0U, receiver_.RtcpSent().nack_requests);
+ EXPECT_EQ(0U, receiver_.RtcpSent().unique_nack_requests);
+ EXPECT_EQ(0, receiver_.RtcpSent().UniqueNackRequestsInPercent());
+
+ // Receive module sends NACK request.
+ const uint16_t kNackLength = 4;
+ uint16_t nack_list[kNackLength] = {10, 11, 13, 18};
+ EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, receiver_.RtcpSent().nack_packets);
+ EXPECT_EQ(4U, receiver_.RtcpSent().nack_requests);
+ EXPECT_EQ(4U, receiver_.RtcpSent().unique_nack_requests);
+ EXPECT_THAT(receiver_.LastNackListSent(), ElementsAre(10, 11, 13, 18));
+
+ // Send module receives the request.
+ EXPECT_EQ(1U, sender_.RtcpReceived().nack_packets);
+ EXPECT_EQ(4U, sender_.RtcpReceived().nack_requests);
+ EXPECT_EQ(4U, sender_.RtcpReceived().unique_nack_requests);
+ EXPECT_EQ(100, sender_.RtcpReceived().UniqueNackRequestsInPercent());
+
+ // Receive module sends new request with duplicated packets.
+ const TimeDelta kStartupRtt = TimeDelta::Millis(100);
+ AdvanceTime(kStartupRtt + TimeDelta::Millis(1));
+ const uint16_t kNackLength2 = 4;
+ uint16_t nack_list2[kNackLength2] = {11, 18, 20, 21};
+ EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list2, kNackLength2));
+ EXPECT_EQ(2U, receiver_.RtcpSent().nack_packets);
+ EXPECT_EQ(8U, receiver_.RtcpSent().nack_requests);
+ EXPECT_EQ(6U, receiver_.RtcpSent().unique_nack_requests);
+ EXPECT_THAT(receiver_.LastNackListSent(), ElementsAre(11, 18, 20, 21));
+
+ // Send module receives the request.
+ EXPECT_EQ(2U, sender_.RtcpReceived().nack_packets);
+ EXPECT_EQ(8U, sender_.RtcpReceived().nack_requests);
+ EXPECT_EQ(6U, sender_.RtcpReceived().unique_nack_requests);
+ EXPECT_EQ(75, sender_.RtcpReceived().UniqueNackRequestsInPercent());
+}
+
+TEST_P(RtpRtcpImpl2Test, ConfigurableRtcpReportInterval) {
+ const TimeDelta kVideoReportInterval = TimeDelta::Millis(3000);
+
+ // Recreate sender impl with new configuration, and redo setup.
+ sender_.SetRtcpReportIntervalAndReset(kVideoReportInterval);
+ SetUp();
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+
+ // Initial state
+ EXPECT_EQ(0u, sender_.transport_.NumRtcpSent());
+
+ // Move ahead to the last ms before a rtcp is expected, no action.
+ AdvanceTime(kVideoReportInterval / 2 - TimeDelta::Millis(1));
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u);
+
+ // Move ahead to the first rtcp. Send RTCP.
+ AdvanceTime(TimeDelta::Millis(1));
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u);
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+
+ // Move ahead to the last possible second before second rtcp is expected.
+ AdvanceTime(kVideoReportInterval * 1 / 2 - TimeDelta::Millis(1));
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u);
+
+ // Move ahead into the range of second rtcp, the second rtcp may be sent.
+ AdvanceTime(TimeDelta::Millis(1));
+ EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u);
+
+ AdvanceTime(kVideoReportInterval / 2);
+ EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u);
+
+ // Move out the range of second rtcp, the second rtcp must have been sent.
+ AdvanceTime(kVideoReportInterval / 2);
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 2u);
+}
+
+TEST_P(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) {
+ const uint32_t kStartTimestamp = 1u;
+ SetUp();
+ sender_.impl_->SetStartTimestamp(kStartTimestamp);
+
+ sender_.impl_->SetSequenceNumber(1);
+
+ PacedPacketInfo pacing_info;
+ RtpPacketToSend packet(nullptr);
+ packet.set_packet_type(RtpPacketToSend::Type::kVideo);
+ packet.SetSsrc(kSenderSsrc);
+
+ // Single-packet frame.
+ packet.SetTimestamp(1);
+ packet.set_first_packet_of_frame(true);
+ packet.SetMarker(true);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+ AdvanceTime(TimeDelta::Millis(1));
+
+ std::vector<RtpSequenceNumberMap::Info> seqno_info =
+ sender_.impl_->GetSentRtpPacketInfos(std::vector<uint16_t>{1});
+
+ EXPECT_THAT(seqno_info, ElementsAre(RtpSequenceNumberMap::Info(
+ /*timestamp=*/1 - kStartTimestamp,
+ /*is_first=*/1,
+ /*is_last=*/1)));
+
+ // Three-packet frame.
+ packet.SetTimestamp(2);
+ packet.set_first_packet_of_frame(true);
+ packet.SetMarker(false);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ packet.set_first_packet_of_frame(false);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ packet.SetMarker(true);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ AdvanceTime(TimeDelta::Millis(1));
+
+ seqno_info =
+ sender_.impl_->GetSentRtpPacketInfos(std::vector<uint16_t>{2, 3, 4});
+
+ EXPECT_THAT(seqno_info, ElementsAre(RtpSequenceNumberMap::Info(
+ /*timestamp=*/2 - kStartTimestamp,
+ /*is_first=*/1,
+ /*is_last=*/0),
+ RtpSequenceNumberMap::Info(
+ /*timestamp=*/2 - kStartTimestamp,
+ /*is_first=*/0,
+ /*is_last=*/0),
+ RtpSequenceNumberMap::Info(
+ /*timestamp=*/2 - kStartTimestamp,
+ /*is_first=*/0,
+ /*is_last=*/1)));
+}
+
+// Checks that the sender report stats are not available if no RTCP SR was sent.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsNotAvailable) {
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt));
+}
+
+// Checks that the sender report stats are available if an RTCP SR was sent.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsAvailable) {
+ // Send a frame in order to send an SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ // Send an SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ AdvanceTime(kOneWayNetworkDelay);
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Not(Eq(absl::nullopt)));
+}
+
+// Checks that the sender report stats are not available if an RTCP SR with an
+// unexpected SSRC is received.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsNotUpdatedWithUnexpectedSsrc) {
+ constexpr uint32_t kUnexpectedSenderSsrc = 0x87654321;
+ static_assert(kUnexpectedSenderSsrc != kSenderSsrc, "");
+ // Forge a sender report and pass it to the receiver as if an RTCP SR were
+ // sent by an unexpected sender.
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kUnexpectedSenderSsrc);
+ sr.SetNtp({/*seconds=*/1u, /*fractions=*/1u << 31});
+ sr.SetPacketCount(123u);
+ sr.SetOctetCount(456u);
+ auto raw_packet = sr.Build();
+ receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size());
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt));
+}
+
+// Checks the stats derived from the last received RTCP SR are set correctly.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsCheckStatsFromLastReport) {
+ using SenderReportStats = RtpRtcpInterface::SenderReportStats;
+ const NtpTime ntp(/*seconds=*/1u, /*fractions=*/1u << 31);
+ constexpr uint32_t kPacketCount = 123u;
+ constexpr uint32_t kOctetCount = 456u;
+ // Forge a sender report and pass it to the receiver as if an RTCP SR were
+ // sent by the sender.
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ sr.SetNtp(ntp);
+ sr.SetPacketCount(kPacketCount);
+ sr.SetOctetCount(kOctetCount);
+ auto raw_packet = sr.Build();
+ receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size());
+
+ EXPECT_THAT(
+ receiver_.impl_->GetSenderReportStats(),
+ Optional(AllOf(Field(&SenderReportStats::last_remote_timestamp, Eq(ntp)),
+ Field(&SenderReportStats::packets_sent, Eq(kPacketCount)),
+ Field(&SenderReportStats::bytes_sent, Eq(kOctetCount)))));
+}
+
+// Checks that the sender report stats count equals the number of sent RTCP SRs.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsCount) {
+ using SenderReportStats = RtpRtcpInterface::SenderReportStats;
+ // Send a frame in order to send an SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ // Send the first SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ AdvanceTime(kOneWayNetworkDelay);
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(),
+ Optional(Field(&SenderReportStats::reports_count, Eq(1u))));
+ // Send the second SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ AdvanceTime(kOneWayNetworkDelay);
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(),
+ Optional(Field(&SenderReportStats::reports_count, Eq(2u))));
+}
+
+// Checks that the sender report stats include a valid arrival time if an RTCP
+// SR was sent.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsArrivalTimestampSet) {
+ // Send a frame in order to send an SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ // Send an SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ AdvanceTime(kOneWayNetworkDelay);
+ auto stats = receiver_.impl_->GetSenderReportStats();
+ ASSERT_THAT(stats, Not(Eq(absl::nullopt)));
+ EXPECT_TRUE(stats->last_arrival_timestamp.Valid());
+}
+
+// Checks that the packet and byte counters from an RTCP SR are not zero once
+// a frame is sent.
+TEST_P(RtpRtcpImpl2Test, SenderReportStatsPacketByteCounters) {
+ using SenderReportStats = RtpRtcpInterface::SenderReportStats;
+ // Send a frame in order to send an SR.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0));
+ // Advance time otherwise the RTCP SR report will not include any packets
+ // generated by `SendFrame()`.
+ AdvanceTime(TimeDelta::Millis(1));
+ // Send an SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ AdvanceTime(kOneWayNetworkDelay);
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(),
+ Optional(AllOf(Field(&SenderReportStats::packets_sent, Gt(0u)),
+ Field(&SenderReportStats::bytes_sent, Gt(0u)))));
+}
+
+TEST_P(RtpRtcpImpl2Test, SendingVideoAdvancesSequenceNumber) {
+ const uint16_t sequence_number = sender_.impl_->SequenceNumber();
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0));
+ EXPECT_EQ(sequence_number + 1, sender_.impl_->SequenceNumber());
+}
+
+TEST_P(RtpRtcpImpl2Test, SequenceNumberNotAdvancedWhenNotSending) {
+ const uint16_t sequence_number = sender_.impl_->SequenceNumber();
+ sender_.impl_->SetSendingMediaStatus(false);
+ EXPECT_FALSE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(0));
+ EXPECT_EQ(sequence_number, sender_.impl_->SequenceNumber());
+}
+
+TEST_P(RtpRtcpImpl2Test, PaddingNotAllowedInMiddleOfFrame) {
+ constexpr size_t kPaddingSize = 100;
+
+ // Can't send padding before media.
+ EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(0u));
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+
+ // Padding is now ok.
+ EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(Gt(0u)));
+
+ // Send half a video frame.
+ PacedPacketInfo pacing_info;
+ std::unique_ptr<RtpPacketToSend> packet =
+ sender_.impl_->RtpSender()->AllocatePacket();
+ packet->set_packet_type(RtpPacketToSend::Type::kVideo);
+ packet->set_first_packet_of_frame(true);
+ packet->SetMarker(false); // Marker false - not last packet of frame.
+
+ EXPECT_TRUE(sender_.impl_->TrySendPacket(packet.get(), pacing_info));
+
+ // Padding not allowed in middle of frame.
+ EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(0u));
+
+ packet = sender_.impl_->RtpSender()->AllocatePacket();
+ packet->set_packet_type(RtpPacketToSend::Type::kVideo);
+ packet->set_first_packet_of_frame(true);
+ packet->SetMarker(true);
+
+ EXPECT_TRUE(sender_.impl_->TrySendPacket(packet.get(), pacing_info));
+
+ // Padding is OK again.
+ EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(Gt(0u)));
+}
+
+TEST_P(RtpRtcpImpl2Test, PaddingTimestampMatchesMedia) {
+ constexpr size_t kPaddingSize = 100;
+ const uint32_t kTimestamp = 123;
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid,
+ kTimestamp, /*capture_time_ms=*/0));
+ EXPECT_EQ(sender_.last_packet().Timestamp(), kTimestamp);
+ uint16_t media_seq = sender_.last_packet().SequenceNumber();
+
+ // Generate and send padding.
+ auto padding = sender_.impl_->GeneratePadding(kPaddingSize);
+ ASSERT_FALSE(padding.empty());
+ for (auto& packet : padding) {
+ sender_.impl_->TrySendPacket(packet.get(), PacedPacketInfo());
+ }
+
+ // Verify we sent a new packet, but with the same timestamp.
+ EXPECT_NE(sender_.last_packet().SequenceNumber(), media_seq);
+ EXPECT_EQ(sender_.last_packet().Timestamp(), kTimestamp);
+}
+
+TEST_P(RtpRtcpImpl2Test, AssignsTransportSequenceNumber) {
+ sender_.RegisterHeaderExtension(TransportSequenceNumber::Uri(),
+ kTransportSequenceNumberExtensionId);
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ uint16_t first_transport_seq = 0;
+ EXPECT_TRUE(sender_.last_packet().GetExtension<TransportSequenceNumber>(
+ &first_transport_seq));
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ uint16_t second_transport_seq = 0;
+ EXPECT_TRUE(sender_.last_packet().GetExtension<TransportSequenceNumber>(
+ &second_transport_seq));
+
+ EXPECT_EQ(first_transport_seq + 1, second_transport_seq);
+}
+
+TEST_P(RtpRtcpImpl2Test, AssignsAbsoluteSendTime) {
+ sender_.RegisterHeaderExtension(AbsoluteSendTime::Uri(),
+ kAbsoluteSendTimeExtensionId);
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_NE(sender_.last_packet().GetExtension<AbsoluteSendTime>(), 0u);
+}
+
+TEST_P(RtpRtcpImpl2Test, AssignsTransmissionTimeOffset) {
+ sender_.RegisterHeaderExtension(TransmissionOffset::Uri(),
+ kTransmissionOffsetExtensionId);
+
+ constexpr TimeDelta kOffset = TimeDelta::Millis(100);
+ // Transmission offset is calculated from difference between capture time
+ // and send time.
+ int64_t capture_time_ms = time_controller_.GetClock()->TimeInMilliseconds();
+ time_controller_.AdvanceTime(kOffset);
+
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid,
+ /*timestamp=*/0, capture_time_ms));
+ EXPECT_EQ(sender_.last_packet().GetExtension<TransmissionOffset>(),
+ kOffset.ms() * kCaptureTimeMsToRtpTimestamp);
+}
+
+TEST_P(RtpRtcpImpl2Test, PropagatesSentPacketInfo) {
+ sender_.RegisterHeaderExtension(TransportSequenceNumber::Uri(),
+ kTransportSequenceNumberExtensionId);
+ int64_t now_ms = time_controller_.GetClock()->TimeInMilliseconds();
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ EXPECT_THAT(
+ sender_.last_sent_packet(),
+ Optional(
+ AllOf(Field(&RtpRtcpModule::SentPacket::packet_id,
+ Eq(sender_.last_packet()
+ .GetExtension<TransportSequenceNumber>())),
+ Field(&RtpRtcpModule::SentPacket::capture_time_ms, Eq(now_ms)),
+ Field(&RtpRtcpModule::SentPacket::ssrc, Eq(kSenderSsrc)))));
+}
+
+TEST_P(RtpRtcpImpl2Test, GeneratesFlexfec) {
+ constexpr int kFlexfecPayloadType = 118;
+ constexpr uint32_t kFlexfecSsrc = 17;
+ const char kNoMid[] = "";
+ const std::vector<RtpExtension> kNoRtpExtensions;
+ const std::vector<RtpExtensionSize> kNoRtpExtensionSizes;
+
+ // Make sure FlexFec sequence numbers start at a different point than media.
+ const uint16_t fec_start_seq = sender_.impl_->SequenceNumber() + 100;
+ RtpState start_state;
+ start_state.sequence_number = fec_start_seq;
+ FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexfecSsrc, kSenderSsrc,
+ kNoMid, kNoRtpExtensions, kNoRtpExtensionSizes,
+ &start_state, time_controller_.GetClock());
+ ReinitWithFec(&flexfec_sender, /*red_payload_type=*/absl::nullopt);
+
+ // Parameters selected to generate a single FEC packet per media packet.
+ FecProtectionParams params;
+ params.fec_rate = 15;
+ params.max_fec_frames = 1;
+ params.fec_mask_type = kFecMaskRandom;
+ sender_.impl_->SetFecProtectionParams(params, params);
+
+ // Send a one packet frame, expect one media packet and one FEC packet.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(2));
+
+ const RtpPacketReceived& fec_packet = sender_.last_packet();
+ EXPECT_EQ(fec_packet.SequenceNumber(), fec_start_seq);
+ EXPECT_EQ(fec_packet.Ssrc(), kFlexfecSsrc);
+ EXPECT_EQ(fec_packet.PayloadType(), kFlexfecPayloadType);
+}
+
+TEST_P(RtpRtcpImpl2Test, GeneratesUlpfec) {
+ constexpr int kUlpfecPayloadType = 118;
+ constexpr int kRedPayloadType = 119;
+ UlpfecGenerator ulpfec_sender(kRedPayloadType, kUlpfecPayloadType,
+ time_controller_.GetClock());
+ ReinitWithFec(&ulpfec_sender, kRedPayloadType);
+
+ // Parameters selected to generate a single FEC packet per media packet.
+ FecProtectionParams params;
+ params.fec_rate = 15;
+ params.max_fec_frames = 1;
+ params.fec_mask_type = kFecMaskRandom;
+ sender_.impl_->SetFecProtectionParams(params, params);
+
+ // Send a one packet frame, expect one media packet and one FEC packet.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(2));
+
+ // Ulpfec is sent on the media ssrc, sharing the sequene number series.
+ const RtpPacketReceived& fec_packet = sender_.last_packet();
+ EXPECT_EQ(fec_packet.SequenceNumber(), kSequenceNumber + 1);
+ EXPECT_EQ(fec_packet.Ssrc(), kSenderSsrc);
+ // The packets are encapsulated in RED packets, check that and that the RED
+ // header (first byte of payload) indicates the desired FEC payload type.
+ EXPECT_EQ(fec_packet.PayloadType(), kRedPayloadType);
+ EXPECT_EQ(fec_packet.payload()[0], kUlpfecPayloadType);
+}
+
+TEST_P(RtpRtcpImpl2Test, RtpStateReflectsCurrentState) {
+ // Verify that that each of the field of GetRtpState actually reflects
+ // the current state.
+
+ // Current time will be used for `timestamp`, `capture_time_ms` and
+ // `last_timestamp_time_ms`.
+ const int64_t time_ms = time_controller_.GetClock()->TimeInMilliseconds();
+
+ // Use different than default sequence number to test `sequence_number`.
+ const uint16_t kSeq = kSequenceNumber + 123;
+ // Hard-coded value for `start_timestamp`.
+ const uint32_t kStartTimestamp = 3456;
+ const int64_t capture_time_ms = time_ms;
+ const uint32_t timestamp = capture_time_ms * kCaptureTimeMsToRtpTimestamp;
+
+ sender_.impl_->SetSequenceNumber(kSeq - 1);
+ sender_.impl_->SetStartTimestamp(kStartTimestamp);
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+
+ // Simulate an RTCP receiver report in order to populate `ssrc_has_acked`.
+ RTCPReportBlock ack;
+ ack.source_ssrc = kSenderSsrc;
+ ack.extended_highest_sequence_number = kSeq;
+ sender_.impl_->OnReceivedRtcpReportBlocks({ack});
+
+ RtpState state = sender_.impl_->GetRtpState();
+ EXPECT_EQ(state.sequence_number, kSeq);
+ EXPECT_EQ(state.start_timestamp, kStartTimestamp);
+ EXPECT_EQ(state.timestamp, timestamp);
+ EXPECT_EQ(state.capture_time_ms, capture_time_ms);
+ EXPECT_EQ(state.last_timestamp_time_ms, time_ms);
+ EXPECT_EQ(state.ssrc_has_acked, true);
+
+ // Reset sender, advance time, restore state. Directly observing state
+ // is not feasible, so just verify returned state matches what we set.
+ sender_.CreateModuleImpl();
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ sender_.impl_->SetRtpState(state);
+
+ state = sender_.impl_->GetRtpState();
+ EXPECT_EQ(state.sequence_number, kSeq);
+ EXPECT_EQ(state.start_timestamp, kStartTimestamp);
+ EXPECT_EQ(state.timestamp, timestamp);
+ EXPECT_EQ(state.capture_time_ms, capture_time_ms);
+ EXPECT_EQ(state.last_timestamp_time_ms, time_ms);
+ EXPECT_EQ(state.ssrc_has_acked, true);
+}
+
+TEST_P(RtpRtcpImpl2Test, RtxRtpStateReflectsCurrentState) {
+ // Enable RTX.
+ sender_.impl_->SetStorePacketsStatus(/*enable=*/true, /*number_to_store=*/10);
+ sender_.impl_->SetRtxSendPayloadType(kRtxPayloadType, kPayloadType);
+ sender_.impl_->SetRtxSendStatus(kRtxRetransmitted | kRtxRedundantPayloads);
+
+ // `start_timestamp` is the only timestamp populate in the RTX state.
+ const uint32_t kStartTimestamp = 3456;
+ sender_.impl_->SetStartTimestamp(kStartTimestamp);
+
+ // Send a frame and ask for a retransmit of the last packet. Capture the RTX
+ // packet in order to verify RTX sequence number.
+ EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid));
+ time_controller_.AdvanceTime(TimeDelta::Millis(5));
+ sender_.impl_->OnReceivedNack(
+ std::vector<uint16_t>{sender_.transport_.last_packet_.SequenceNumber()});
+ RtpPacketReceived& rtx_packet = sender_.transport_.last_packet_;
+ EXPECT_EQ(rtx_packet.Ssrc(), kRtxSenderSsrc);
+
+ // Simulate an RTCP receiver report in order to populate `ssrc_has_acked`.
+ RTCPReportBlock ack;
+ ack.source_ssrc = kRtxSenderSsrc;
+ ack.extended_highest_sequence_number = rtx_packet.SequenceNumber();
+ sender_.impl_->OnReceivedRtcpReportBlocks({ack});
+
+ RtpState rtp_state = sender_.impl_->GetRtpState();
+ RtpState rtx_state = sender_.impl_->GetRtxState();
+ EXPECT_EQ(rtx_state.start_timestamp, kStartTimestamp);
+ EXPECT_EQ(rtx_state.ssrc_has_acked, true);
+ EXPECT_EQ(rtx_state.sequence_number, rtx_packet.SequenceNumber() + 1);
+
+ // Reset sender, advance time, restore state. Directly observing state
+ // is not feasible, so just verify returned state matches what we set.
+ // Needs SetRtpState() too in order to propagate start timestamp.
+ sender_.CreateModuleImpl();
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ sender_.impl_->SetRtpState(rtp_state);
+ sender_.impl_->SetRtxState(rtx_state);
+
+ rtx_state = sender_.impl_->GetRtxState();
+ EXPECT_EQ(rtx_state.start_timestamp, kStartTimestamp);
+ EXPECT_EQ(rtx_state.ssrc_has_acked, true);
+ EXPECT_EQ(rtx_state.sequence_number, rtx_packet.SequenceNumber() + 1);
+}
+
+INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead,
+ RtpRtcpImpl2Test,
+ ::testing::Values(TestConfig{false},
+ TestConfig{true}));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
new file mode 100644
index 0000000000..9b9c1d8970
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl.h"
+
+#include <map>
+#include <memory>
+#include <set>
+
+#include "api/transport/field_trial_based_config.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+#include "rtc_base/rate_limiter.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Gt;
+using ::testing::Not;
+using ::testing::Optional;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345;
+const uint32_t kReceiverSsrc = 0x23456;
+const int64_t kOneWayNetworkDelayMs = 100;
+const uint8_t kBaseLayerTid = 0;
+const uint8_t kHigherLayerTid = 1;
+const uint16_t kSequenceNumber = 100;
+const uint8_t kPayloadType = 100;
+const int kWidth = 320;
+const int kHeight = 100;
+
+class RtcpRttStatsTestImpl : public RtcpRttStats {
+ public:
+ RtcpRttStatsTestImpl() : rtt_ms_(0) {}
+ ~RtcpRttStatsTestImpl() override = default;
+
+ void OnRttUpdate(int64_t rtt_ms) override { rtt_ms_ = rtt_ms; }
+ int64_t LastProcessedRtt() const override { return rtt_ms_; }
+ int64_t rtt_ms_;
+};
+
+class SendTransport : public Transport {
+ public:
+ SendTransport()
+ : receiver_(nullptr),
+ clock_(nullptr),
+ delay_ms_(0),
+ rtp_packets_sent_(0),
+ rtcp_packets_sent_(0) {}
+
+ void SetRtpRtcpModule(ModuleRtpRtcpImpl* receiver) { receiver_ = receiver; }
+ void SimulateNetworkDelay(int64_t delay_ms, SimulatedClock* clock) {
+ clock_ = clock;
+ delay_ms_ = delay_ms;
+ }
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
+ RtpPacket packet;
+ EXPECT_TRUE(packet.Parse(data, len));
+ ++rtp_packets_sent_;
+ last_rtp_sequence_number_ = packet.SequenceNumber();
+ return true;
+ }
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ test::RtcpPacketParser parser;
+ parser.Parse(data, len);
+ last_nack_list_ = parser.nack()->packet_ids();
+
+ if (clock_) {
+ clock_->AdvanceTimeMilliseconds(delay_ms_);
+ }
+ EXPECT_TRUE(receiver_);
+ receiver_->IncomingRtcpPacket(data, len);
+ ++rtcp_packets_sent_;
+ return true;
+ }
+ size_t NumRtcpSent() { return rtcp_packets_sent_; }
+ ModuleRtpRtcpImpl* receiver_;
+ SimulatedClock* clock_;
+ int64_t delay_ms_;
+ int rtp_packets_sent_;
+ size_t rtcp_packets_sent_;
+ uint16_t last_rtp_sequence_number_;
+ std::vector<uint16_t> last_nack_list_;
+};
+
+class RtpRtcpModule : public RtcpPacketTypeCounterObserver {
+ public:
+ RtpRtcpModule(SimulatedClock* clock, bool is_sender)
+ : is_sender_(is_sender),
+ receive_statistics_(ReceiveStatistics::Create(clock)),
+ clock_(clock) {
+ CreateModuleImpl();
+ transport_.SimulateNetworkDelay(kOneWayNetworkDelayMs, clock);
+ }
+
+ const bool is_sender_;
+ RtcpPacketTypeCounter packets_sent_;
+ RtcpPacketTypeCounter packets_received_;
+ std::unique_ptr<ReceiveStatistics> receive_statistics_;
+ SendTransport transport_;
+ RtcpRttStatsTestImpl rtt_stats_;
+ std::unique_ptr<ModuleRtpRtcpImpl> impl_;
+ int rtcp_report_interval_ms_ = 0;
+
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override {
+ counter_map_[ssrc] = packet_counter;
+ }
+
+ RtcpPacketTypeCounter RtcpSent() {
+ // RTCP counters for remote SSRC.
+ return counter_map_[is_sender_ ? kReceiverSsrc : kSenderSsrc];
+ }
+
+ RtcpPacketTypeCounter RtcpReceived() {
+ // Received RTCP stats for (own) local SSRC.
+ return counter_map_[impl_->SSRC()];
+ }
+ int RtpSent() { return transport_.rtp_packets_sent_; }
+ uint16_t LastRtpSequenceNumber() {
+ return transport_.last_rtp_sequence_number_;
+ }
+ std::vector<uint16_t> LastNackListSent() {
+ return transport_.last_nack_list_;
+ }
+ void SetRtcpReportIntervalAndReset(int rtcp_report_interval_ms) {
+ rtcp_report_interval_ms_ = rtcp_report_interval_ms;
+ CreateModuleImpl();
+ }
+
+ private:
+ void CreateModuleImpl() {
+ RtpRtcpInterface::Configuration config;
+ config.audio = false;
+ config.clock = clock_;
+ config.outgoing_transport = &transport_;
+ config.receive_statistics = receive_statistics_.get();
+ config.rtcp_packet_type_counter_observer = this;
+ config.rtt_stats = &rtt_stats_;
+ config.rtcp_report_interval_ms = rtcp_report_interval_ms_;
+ config.local_media_ssrc = is_sender_ ? kSenderSsrc : kReceiverSsrc;
+ config.need_rtp_packet_infos = true;
+ config.non_sender_rtt_measurement = true;
+
+ impl_.reset(new ModuleRtpRtcpImpl(config));
+ impl_->SetRemoteSSRC(is_sender_ ? kReceiverSsrc : kSenderSsrc);
+ impl_->SetRTCPStatus(RtcpMode::kCompound);
+ }
+
+ SimulatedClock* const clock_;
+ std::map<uint32_t, RtcpPacketTypeCounter> counter_map_;
+};
+} // namespace
+
+class RtpRtcpImplTest : public ::testing::Test {
+ protected:
+ RtpRtcpImplTest()
+ : clock_(133590000000000),
+ sender_(&clock_, /*is_sender=*/true),
+ receiver_(&clock_, /*is_sender=*/false) {}
+
+ void SetUp() override {
+ // Send module.
+ EXPECT_EQ(0, sender_.impl_->SetSendingStatus(true));
+ sender_.impl_->SetSendingMediaStatus(true);
+ sender_.impl_->SetSequenceNumber(kSequenceNumber);
+ sender_.impl_->SetStorePacketsStatus(true, 100);
+
+ FieldTrialBasedConfig field_trials;
+ RTPSenderVideo::Config video_config;
+ video_config.clock = &clock_;
+ video_config.rtp_sender = sender_.impl_->RtpSender();
+ video_config.field_trials = &field_trials;
+ sender_video_ = std::make_unique<RTPSenderVideo>(video_config);
+
+ // Receive module.
+ EXPECT_EQ(0, receiver_.impl_->SetSendingStatus(false));
+ receiver_.impl_->SetSendingMediaStatus(false);
+ // Transport settings.
+ sender_.transport_.SetRtpRtcpModule(receiver_.impl_.get());
+ receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get());
+ }
+
+ SimulatedClock clock_;
+ RtpRtcpModule sender_;
+ std::unique_ptr<RTPSenderVideo> sender_video_;
+ RtpRtcpModule receiver_;
+
+ void SendFrame(const RtpRtcpModule* module,
+ RTPSenderVideo* sender,
+ uint8_t tid) {
+ RTPVideoHeaderVP8 vp8_header = {};
+ vp8_header.temporalIdx = tid;
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_video_header.width = kWidth;
+ rtp_video_header.height = kHeight;
+ rtp_video_header.rotation = kVideoRotation_0;
+ rtp_video_header.content_type = VideoContentType::UNSPECIFIED;
+ rtp_video_header.playout_delay = {-1, -1};
+ rtp_video_header.is_first_packet_in_frame = true;
+ rtp_video_header.simulcastIdx = 0;
+ rtp_video_header.codec = kVideoCodecVP8;
+ rtp_video_header.video_type_header = vp8_header;
+ rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
+
+ const uint8_t payload[100] = {0};
+ EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true));
+ EXPECT_TRUE(sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8,
+ 0, 0, payload, rtp_video_header, 0));
+ }
+
+ void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
+ bool sender = module->impl_->SSRC() == kSenderSsrc;
+ rtcp::Nack nack;
+ uint16_t list[1];
+ list[0] = sequence_number;
+ const uint16_t kListLength = sizeof(list) / sizeof(list[0]);
+ nack.SetSenderSsrc(sender ? kReceiverSsrc : kSenderSsrc);
+ nack.SetMediaSsrc(sender ? kSenderSsrc : kReceiverSsrc);
+ nack.SetPacketIds(list, kListLength);
+ rtc::Buffer packet = nack.Build();
+ module->impl_->IncomingRtcpPacket(packet.data(), packet.size());
+ }
+};
+
+TEST_F(RtpRtcpImplTest, RetransmitsAllLayers) {
+ // Send frames.
+ EXPECT_EQ(0, sender_.RtpSent());
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); // kSequenceNumber
+ SendFrame(&sender_, sender_video_.get(),
+ kHigherLayerTid); // kSequenceNumber + 1
+ SendFrame(&sender_, sender_video_.get(),
+ kNoTemporalIdx); // kSequenceNumber + 2
+ EXPECT_EQ(3, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber());
+
+ // Min required delay until retransmit = 5 + RTT ms (RTT = 0).
+ clock_.AdvanceTimeMilliseconds(5);
+
+ // Frame with kBaseLayerTid re-sent.
+ IncomingRtcpNack(&sender_, kSequenceNumber);
+ EXPECT_EQ(4, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber, sender_.LastRtpSequenceNumber());
+ // Frame with kHigherLayerTid re-sent.
+ IncomingRtcpNack(&sender_, kSequenceNumber + 1);
+ EXPECT_EQ(5, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber + 1, sender_.LastRtpSequenceNumber());
+ // Frame with kNoTemporalIdx re-sent.
+ IncomingRtcpNack(&sender_, kSequenceNumber + 2);
+ EXPECT_EQ(6, sender_.RtpSent());
+ EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber());
+}
+
+TEST_F(RtpRtcpImplTest, Rtt) {
+ RtpPacketReceived packet;
+ packet.SetTimestamp(1);
+ packet.SetSequenceNumber(123);
+ packet.SetSsrc(kSenderSsrc);
+ packet.AllocatePayload(100 - 12);
+ receiver_.receive_statistics_->OnRtpPacket(packet);
+
+ // Send Frame before sending an SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ // Sender module should send an SR.
+ EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport));
+
+ // Receiver module should send a RR with a response to the last received SR.
+ clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport));
+
+ // Verify RTT.
+ int64_t rtt;
+ int64_t avg_rtt;
+ int64_t min_rtt;
+ int64_t max_rtt;
+ EXPECT_EQ(
+ 0, sender_.impl_->RTT(kReceiverSsrc, &rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, rtt, 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, avg_rtt, 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, min_rtt, 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, max_rtt, 1);
+
+ // No RTT from other ssrc.
+ EXPECT_EQ(-1, sender_.impl_->RTT(kReceiverSsrc + 1, &rtt, &avg_rtt, &min_rtt,
+ &max_rtt));
+
+ // Verify RTT from rtt_stats config.
+ EXPECT_EQ(0, sender_.rtt_stats_.LastProcessedRtt());
+ EXPECT_EQ(0, sender_.impl_->rtt_ms());
+ sender_.impl_->Process();
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.rtt_stats_.LastProcessedRtt(),
+ 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.impl_->rtt_ms(), 1);
+}
+
+TEST_F(RtpRtcpImplTest, RttForReceiverOnly) {
+ // Receiver module should send a Receiver time reference report (RTRR).
+ EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport));
+
+ // Sender module should send a response to the last received RTRR (DLRR).
+ clock_.AdvanceTimeMilliseconds(1000);
+ // Send Frame before sending a SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport));
+
+ // Verify RTT.
+ EXPECT_EQ(0, receiver_.rtt_stats_.LastProcessedRtt());
+ EXPECT_EQ(0, receiver_.impl_->rtt_ms());
+ receiver_.impl_->Process();
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs,
+ receiver_.rtt_stats_.LastProcessedRtt(), 1);
+ EXPECT_NEAR(2 * kOneWayNetworkDelayMs, receiver_.impl_->rtt_ms(), 1);
+}
+
+TEST_F(RtpRtcpImplTest, NoSrBeforeMedia) {
+ // Ignore fake transport delays in this test.
+ sender_.transport_.SimulateNetworkDelay(0, &clock_);
+ receiver_.transport_.SimulateNetworkDelay(0, &clock_);
+
+ sender_.impl_->Process();
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u);
+
+ // Verify no SR is sent before media has been sent, RR should still be sent
+ // from the receiving module though.
+ clock_.AdvanceTimeMilliseconds(2000);
+ sender_.impl_->Process();
+ receiver_.impl_->Process();
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u);
+ EXPECT_EQ(receiver_.transport_.NumRtcpSent(), 1u);
+
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u);
+}
+
+TEST_F(RtpRtcpImplTest, RtcpPacketTypeCounter_Nack) {
+ EXPECT_EQ(0U, sender_.RtcpReceived().nack_packets);
+ EXPECT_EQ(0U, receiver_.RtcpSent().nack_packets);
+
+ // Receive module sends a NACK.
+ const uint16_t kNackLength = 1;
+ uint16_t nack_list[kNackLength] = {123};
+ EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, receiver_.RtcpSent().nack_packets);
+
+ // Send module receives the NACK.
+ EXPECT_EQ(1U, sender_.RtcpReceived().nack_packets);
+}
+
+TEST_F(RtpRtcpImplTest, AddStreamDataCounters) {
+ StreamDataCounters rtp;
+ rtp.transmitted.packets = 1;
+ rtp.transmitted.payload_bytes = 1;
+ rtp.transmitted.header_bytes = 2;
+ rtp.transmitted.padding_bytes = 3;
+ EXPECT_EQ(rtp.transmitted.TotalBytes(), rtp.transmitted.payload_bytes +
+ rtp.transmitted.header_bytes +
+ rtp.transmitted.padding_bytes);
+
+ StreamDataCounters rtp2;
+ rtp2.transmitted.packets = 10;
+ rtp2.transmitted.payload_bytes = 10;
+ rtp2.retransmitted.header_bytes = 4;
+ rtp2.retransmitted.payload_bytes = 5;
+ rtp2.retransmitted.padding_bytes = 6;
+ rtp2.retransmitted.packets = 7;
+ rtp2.fec.packets = 8;
+
+ StreamDataCounters sum = rtp;
+ sum.Add(rtp2);
+ EXPECT_EQ(11U, sum.transmitted.packets);
+ EXPECT_EQ(11U, sum.transmitted.payload_bytes);
+ EXPECT_EQ(2U, sum.transmitted.header_bytes);
+ EXPECT_EQ(3U, sum.transmitted.padding_bytes);
+ EXPECT_EQ(4U, sum.retransmitted.header_bytes);
+ EXPECT_EQ(5U, sum.retransmitted.payload_bytes);
+ EXPECT_EQ(6U, sum.retransmitted.padding_bytes);
+ EXPECT_EQ(7U, sum.retransmitted.packets);
+ EXPECT_EQ(8U, sum.fec.packets);
+ EXPECT_EQ(sum.transmitted.TotalBytes(),
+ rtp.transmitted.TotalBytes() + rtp2.transmitted.TotalBytes());
+}
+
+TEST_F(RtpRtcpImplTest, SendsInitialNackList) {
+ // Send module sends a NACK.
+ const uint16_t kNackLength = 1;
+ uint16_t nack_list[kNackLength] = {123};
+ EXPECT_EQ(0U, sender_.RtcpSent().nack_packets);
+ // Send Frame before sending a compound RTCP that starts with SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123));
+}
+
+TEST_F(RtpRtcpImplTest, SendsExtendedNackList) {
+ // Send module sends a NACK.
+ const uint16_t kNackLength = 1;
+ uint16_t nack_list[kNackLength] = {123};
+ EXPECT_EQ(0U, sender_.RtcpSent().nack_packets);
+ // Send Frame before sending a compound RTCP that starts with SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123));
+
+ // Same list not re-send.
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123));
+
+ // Only extended list sent.
+ const uint16_t kNackExtLength = 2;
+ uint16_t nack_list_ext[kNackExtLength] = {123, 124};
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list_ext, kNackExtLength));
+ EXPECT_EQ(2U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(124));
+}
+
+TEST_F(RtpRtcpImplTest, ReSendsNackListAfterRttMs) {
+ sender_.transport_.SimulateNetworkDelay(0, &clock_);
+ // Send module sends a NACK.
+ const uint16_t kNackLength = 2;
+ uint16_t nack_list[kNackLength] = {123, 125};
+ EXPECT_EQ(0U, sender_.RtcpSent().nack_packets);
+ // Send Frame before sending a compound RTCP that starts with SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125));
+
+ // Same list not re-send, rtt interval has not passed.
+ const int kStartupRttMs = 100;
+ clock_.AdvanceTimeMilliseconds(kStartupRttMs);
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, sender_.RtcpSent().nack_packets);
+
+ // Rtt interval passed, full list sent.
+ clock_.AdvanceTimeMilliseconds(1);
+ EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(2U, sender_.RtcpSent().nack_packets);
+ EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125));
+}
+
+TEST_F(RtpRtcpImplTest, UniqueNackRequests) {
+ receiver_.transport_.SimulateNetworkDelay(0, &clock_);
+ EXPECT_EQ(0U, receiver_.RtcpSent().nack_packets);
+ EXPECT_EQ(0U, receiver_.RtcpSent().nack_requests);
+ EXPECT_EQ(0U, receiver_.RtcpSent().unique_nack_requests);
+ EXPECT_EQ(0, receiver_.RtcpSent().UniqueNackRequestsInPercent());
+
+ // Receive module sends NACK request.
+ const uint16_t kNackLength = 4;
+ uint16_t nack_list[kNackLength] = {10, 11, 13, 18};
+ EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list, kNackLength));
+ EXPECT_EQ(1U, receiver_.RtcpSent().nack_packets);
+ EXPECT_EQ(4U, receiver_.RtcpSent().nack_requests);
+ EXPECT_EQ(4U, receiver_.RtcpSent().unique_nack_requests);
+ EXPECT_THAT(receiver_.LastNackListSent(), ElementsAre(10, 11, 13, 18));
+
+ // Send module receives the request.
+ EXPECT_EQ(1U, sender_.RtcpReceived().nack_packets);
+ EXPECT_EQ(4U, sender_.RtcpReceived().nack_requests);
+ EXPECT_EQ(4U, sender_.RtcpReceived().unique_nack_requests);
+ EXPECT_EQ(100, sender_.RtcpReceived().UniqueNackRequestsInPercent());
+
+ // Receive module sends new request with duplicated packets.
+ const int kStartupRttMs = 100;
+ clock_.AdvanceTimeMilliseconds(kStartupRttMs + 1);
+ const uint16_t kNackLength2 = 4;
+ uint16_t nack_list2[kNackLength2] = {11, 18, 20, 21};
+ EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list2, kNackLength2));
+ EXPECT_EQ(2U, receiver_.RtcpSent().nack_packets);
+ EXPECT_EQ(8U, receiver_.RtcpSent().nack_requests);
+ EXPECT_EQ(6U, receiver_.RtcpSent().unique_nack_requests);
+ EXPECT_THAT(receiver_.LastNackListSent(), ElementsAre(11, 18, 20, 21));
+
+ // Send module receives the request.
+ EXPECT_EQ(2U, sender_.RtcpReceived().nack_packets);
+ EXPECT_EQ(8U, sender_.RtcpReceived().nack_requests);
+ EXPECT_EQ(6U, sender_.RtcpReceived().unique_nack_requests);
+ EXPECT_EQ(75, sender_.RtcpReceived().UniqueNackRequestsInPercent());
+}
+
+TEST_F(RtpRtcpImplTest, ConfigurableRtcpReportInterval) {
+ const int kVideoReportInterval = 3000;
+
+ // Recreate sender impl with new configuration, and redo setup.
+ sender_.SetRtcpReportIntervalAndReset(kVideoReportInterval);
+ SetUp();
+
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+
+ // Initial state
+ sender_.impl_->Process();
+ EXPECT_EQ(0u, sender_.transport_.NumRtcpSent());
+
+ // Move ahead to the last ms before a rtcp is expected, no action.
+ clock_.AdvanceTimeMilliseconds(kVideoReportInterval / 2 - 1);
+ sender_.impl_->Process();
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u);
+
+ // Move ahead to the first rtcp. Send RTCP.
+ clock_.AdvanceTimeMilliseconds(1);
+ sender_.impl_->Process();
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u);
+
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+
+ // Move ahead to the last possible second before second rtcp is expected.
+ clock_.AdvanceTimeMilliseconds(kVideoReportInterval * 1 / 2 - 1);
+ sender_.impl_->Process();
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u);
+
+ // Move ahead into the range of second rtcp, the second rtcp may be sent.
+ clock_.AdvanceTimeMilliseconds(1);
+ sender_.impl_->Process();
+ EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u);
+
+ clock_.AdvanceTimeMilliseconds(kVideoReportInterval / 2);
+ sender_.impl_->Process();
+ EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u);
+
+ // Move out the range of second rtcp, the second rtcp must have been sent.
+ clock_.AdvanceTimeMilliseconds(kVideoReportInterval / 2);
+ sender_.impl_->Process();
+ EXPECT_EQ(sender_.transport_.NumRtcpSent(), 2u);
+}
+
+TEST_F(RtpRtcpImplTest, StoresPacketInfoForSentPackets) {
+ const uint32_t kStartTimestamp = 1u;
+ SetUp();
+ sender_.impl_->SetStartTimestamp(kStartTimestamp);
+ sender_.impl_->SetSequenceNumber(1);
+
+ PacedPacketInfo pacing_info;
+ RtpPacketToSend packet(nullptr);
+ packet.set_packet_type(RtpPacketToSend::Type::kVideo);
+ packet.SetSsrc(kSenderSsrc);
+
+ // Single-packet frame.
+ packet.SetTimestamp(1);
+ packet.set_first_packet_of_frame(true);
+ packet.SetMarker(true);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ std::vector<RtpSequenceNumberMap::Info> seqno_info =
+ sender_.impl_->GetSentRtpPacketInfos(std::vector<uint16_t>{1});
+
+ EXPECT_THAT(seqno_info, ElementsAre(RtpSequenceNumberMap::Info(
+ /*timestamp=*/1 - kStartTimestamp,
+ /*is_first=*/1,
+ /*is_last=*/1)));
+
+ // Three-packet frame.
+ packet.SetTimestamp(2);
+ packet.set_first_packet_of_frame(true);
+ packet.SetMarker(false);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ packet.set_first_packet_of_frame(false);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ packet.SetMarker(true);
+ sender_.impl_->TrySendPacket(&packet, pacing_info);
+
+ seqno_info =
+ sender_.impl_->GetSentRtpPacketInfos(std::vector<uint16_t>{2, 3, 4});
+
+ EXPECT_THAT(seqno_info, ElementsAre(RtpSequenceNumberMap::Info(
+ /*timestamp=*/2 - kStartTimestamp,
+ /*is_first=*/1,
+ /*is_last=*/0),
+ RtpSequenceNumberMap::Info(
+ /*timestamp=*/2 - kStartTimestamp,
+ /*is_first=*/0,
+ /*is_last=*/0),
+ RtpSequenceNumberMap::Info(
+ /*timestamp=*/2 - kStartTimestamp,
+ /*is_first=*/0,
+ /*is_last=*/1)));
+}
+
+// Checks that the remote sender stats are not available if no RTCP SR was sent.
+TEST_F(RtpRtcpImplTest, SenderReportStatsNotAvailable) {
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt));
+}
+
+// Checks that the remote sender stats are available if an RTCP SR was sent.
+TEST_F(RtpRtcpImplTest, SenderReportStatsAvailable) {
+ // Send a frame in order to send an SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ // Send an SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Not(Eq(absl::nullopt)));
+}
+
+// Checks that the remote sender stats are not available if an RTCP SR with an
+// unexpected SSRC is received.
+TEST_F(RtpRtcpImplTest, SenderReportStatsNotUpdatedWithUnexpectedSsrc) {
+ constexpr uint32_t kUnexpectedSenderSsrc = 0x87654321;
+ static_assert(kUnexpectedSenderSsrc != kSenderSsrc, "");
+ // Forge a sender report and pass it to the receiver as if an RTCP SR were
+ // sent by an unexpected sender.
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kUnexpectedSenderSsrc);
+ sr.SetNtp({/*seconds=*/1u, /*fractions=*/1u << 31});
+ sr.SetPacketCount(123u);
+ sr.SetOctetCount(456u);
+ auto raw_packet = sr.Build();
+ receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size());
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt));
+}
+
+// Checks the stats derived from the last received RTCP SR are set correctly.
+TEST_F(RtpRtcpImplTest, SenderReportStatsCheckStatsFromLastReport) {
+ using SenderReportStats = RtpRtcpInterface::SenderReportStats;
+ const NtpTime ntp(/*seconds=*/1u, /*fractions=*/1u << 31);
+ constexpr uint32_t kPacketCount = 123u;
+ constexpr uint32_t kOctetCount = 456u;
+ // Forge a sender report and pass it to the receiver as if an RTCP SR were
+ // sent by the sender.
+ rtcp::SenderReport sr;
+ sr.SetSenderSsrc(kSenderSsrc);
+ sr.SetNtp(ntp);
+ sr.SetPacketCount(kPacketCount);
+ sr.SetOctetCount(kOctetCount);
+ auto raw_packet = sr.Build();
+ receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size());
+
+ EXPECT_THAT(
+ receiver_.impl_->GetSenderReportStats(),
+ Optional(AllOf(Field(&SenderReportStats::last_remote_timestamp, Eq(ntp)),
+ Field(&SenderReportStats::packets_sent, Eq(kPacketCount)),
+ Field(&SenderReportStats::bytes_sent, Eq(kOctetCount)))));
+}
+
+// Checks that the remote sender stats count equals the number of sent RTCP SRs.
+TEST_F(RtpRtcpImplTest, SenderReportStatsCount) {
+ using SenderReportStats = RtpRtcpInterface::SenderReportStats;
+ // Send a frame in order to send an SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ // Send the first SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(),
+ Optional(Field(&SenderReportStats::reports_count, Eq(1u))));
+ // Send the second SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(),
+ Optional(Field(&SenderReportStats::reports_count, Eq(2u))));
+}
+
+// Checks that the remote sender stats include a valid arrival time if an RTCP
+// SR was sent.
+TEST_F(RtpRtcpImplTest, SenderReportStatsArrivalTimestampSet) {
+ // Send a frame in order to send an SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ // Send an SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ auto stats = receiver_.impl_->GetSenderReportStats();
+ ASSERT_THAT(stats, Not(Eq(absl::nullopt)));
+ EXPECT_TRUE(stats->last_arrival_timestamp.Valid());
+}
+
+// Checks that the packet and byte counters from an RTCP SR are not zero once
+// a frame is sent.
+TEST_F(RtpRtcpImplTest, SenderReportStatsPacketByteCounters) {
+ using SenderReportStats = RtpRtcpInterface::SenderReportStats;
+ // Send a frame in order to send an SR.
+ SendFrame(&sender_, sender_video_.get(), kBaseLayerTid);
+ ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0));
+ // Advance time otherwise the RTCP SR report will not include any packets
+ // generated by `SendFrame()`.
+ clock_.AdvanceTimeMilliseconds(1);
+ // Send an SR.
+ ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0));
+ EXPECT_THAT(receiver_.impl_->GetSenderReportStats(),
+ Optional(AllOf(Field(&SenderReportStats::packets_sent, Gt(0u)),
+ Field(&SenderReportStats::bytes_sent, Gt(0u)))));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_interface.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_interface.h
new file mode 100644
index 0000000000..d3bb8c869c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_rtcp_interface.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_INTERFACE_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_INTERFACE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/frame_transformer_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+#include "modules/rtp_rtcp/source/video_fec_generator.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class FrameEncryptorInterface;
+class RateLimiter;
+class RtcEventLog;
+class RTPSender;
+class Transport;
+class VideoBitrateAllocationObserver;
+
+class RtpRtcpInterface : public RtcpFeedbackSenderInterface {
+ public:
+ struct Configuration {
+ Configuration() = default;
+ Configuration(Configuration&& rhs) = default;
+
+ Configuration(const Configuration&) = delete;
+ Configuration& operator=(const Configuration&) = delete;
+
+ // True for a audio version of the RTP/RTCP module object false will create
+ // a video version.
+ bool audio = false;
+ bool receiver_only = false;
+
+ // The clock to use to read time. If nullptr then system clock will be used.
+ Clock* clock = nullptr;
+
+ ReceiveStatisticsProvider* receive_statistics = nullptr;
+
+ // Transport object that will be called when packets are ready to be sent
+ // out on the network.
+ Transport* outgoing_transport = nullptr;
+
+ // Called when the receiver requests an intra frame.
+ RtcpIntraFrameObserver* intra_frame_callback = nullptr;
+
+ // Called when the receiver sends a loss notification.
+ RtcpLossNotificationObserver* rtcp_loss_notification_observer = nullptr;
+
+ // Called when we receive a changed estimate from the receiver of out
+ // stream.
+ RtcpBandwidthObserver* bandwidth_callback = nullptr;
+
+ // Called when we receive a RTCP bye or timeout
+ RtcpEventObserver* rtcp_event_observer = nullptr;
+
+ NetworkStateEstimateObserver* network_state_estimate_observer = nullptr;
+ TransportFeedbackObserver* transport_feedback_callback = nullptr;
+ VideoBitrateAllocationObserver* bitrate_allocation_observer = nullptr;
+ RtcpRttStats* rtt_stats = nullptr;
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer = nullptr;
+ // Called on receipt of RTCP report block from remote side.
+ // TODO(bugs.webrtc.org/10679): Consider whether we want to use
+ // only getters or only callbacks. If we decide on getters, the
+ // ReportBlockDataObserver should also be removed in favor of
+ // GetLatestReportBlockData().
+ RtcpCnameCallback* rtcp_cname_callback = nullptr;
+ ReportBlockDataObserver* report_block_data_observer = nullptr;
+
+ // Spread any bursts of packets into smaller bursts to minimize packet loss.
+ RtpPacketSender* paced_sender = nullptr;
+
+ // Generates FEC packets.
+ // TODO(sprang): Wire up to RtpSenderEgress.
+ VideoFecGenerator* fec_generator = nullptr;
+
+ BitrateStatisticsObserver* send_bitrate_observer = nullptr;
+ SendSideDelayObserver* send_side_delay_observer = nullptr;
+ RtcEventLog* event_log = nullptr;
+ SendPacketObserver* send_packet_observer = nullptr;
+ RateLimiter* retransmission_rate_limiter = nullptr;
+ StreamDataCountersCallback* rtp_stats_callback = nullptr;
+
+ int rtcp_report_interval_ms = 0;
+
+ // Update network2 instead of pacer_exit field of video timing extension.
+ bool populate_network2_timestamp = false;
+
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer;
+
+ // E2EE Custom Video Frame Encryption
+ FrameEncryptorInterface* frame_encryptor = nullptr;
+ // Require all outgoing frames to be encrypted with a FrameEncryptor.
+ bool require_frame_encryption = false;
+
+ // Corresponds to extmap-allow-mixed in SDP negotiation.
+ bool extmap_allow_mixed = false;
+
+ // If true, the RTP sender will always annotate outgoing packets with
+ // MID and RID header extensions, if provided and negotiated.
+ // If false, the RTP sender will stop sending MID and RID header extensions,
+ // when it knows that the receiver is ready to demux based on SSRC. This is
+ // done by RTCP RR acking.
+ bool always_send_mid_and_rid = false;
+
+ // If set, field trials are read from `field_trials`, otherwise
+ // defaults to webrtc::FieldTrialBasedConfig.
+ const FieldTrialsView* field_trials = nullptr;
+
+ // SSRCs for media and retransmission, respectively.
+ // FlexFec SSRC is fetched from `flexfec_sender`.
+ uint32_t local_media_ssrc = 0;
+ absl::optional<uint32_t> rtx_send_ssrc;
+
+ bool need_rtp_packet_infos = false;
+
+ // If true, the RTP packet history will select RTX packets based on
+ // heuristics such as send time, retransmission count etc, in order to
+ // make padding potentially more useful.
+ // If false, the last packet will always be picked. This may reduce CPU
+ // overhead.
+ bool enable_rtx_padding_prioritization = true;
+
+ // Estimate RTT as non-sender as described in
+ // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5
+ bool non_sender_rtt_measurement = false;
+
+ // If non-empty, sets the value for sending in the RID (and Repaired) RTP
+ // header extension. RIDs are used to identify an RTP stream if SSRCs are
+ // not negotiated. If the RID and Repaired RID extensions are not
+ // registered, the RID will not be sent.
+ std::string rid;
+ };
+
+ // Stats for RTCP sender reports (SR) for a specific SSRC.
+ // Refer to https://tools.ietf.org/html/rfc3550#section-6.4.1.
+ struct SenderReportStats {
+ // Arrival NTP timestamp for the last received RTCP SR.
+ NtpTime last_arrival_timestamp;
+ // Received (a.k.a., remote) NTP timestamp for the last received RTCP SR.
+ NtpTime last_remote_timestamp;
+ // Total number of RTP data packets transmitted by the sender since starting
+ // transmission up until the time this SR packet was generated. The count
+ // should be reset if the sender changes its SSRC identifier.
+ uint32_t packets_sent;
+ // Total number of payload octets (i.e., not including header or padding)
+ // transmitted in RTP data packets by the sender since starting transmission
+ // up until the time this SR packet was generated. The count should be reset
+ // if the sender changes its SSRC identifier.
+ uint64_t bytes_sent;
+ // Total number of RTCP SR blocks received.
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-reportssent.
+ uint64_t reports_count;
+ };
+ // Stats about the non-sender SSRC, based on RTCP extended reports (XR).
+ // Refer to https://datatracker.ietf.org/doc/html/rfc3611#section-2.
+ struct NonSenderRttStats {
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptime
+ absl::optional<TimeDelta> round_trip_time;
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-totalroundtriptime
+ TimeDelta total_round_trip_time = TimeDelta::Zero();
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-roundtriptimemeasurements
+ int round_trip_time_measurements = 0;
+ };
+
+ // **************************************************************************
+ // Receiver functions
+ // **************************************************************************
+
+ virtual void IncomingRtcpPacket(const uint8_t* incoming_packet,
+ size_t incoming_packet_length) = 0;
+
+ virtual void SetRemoteSSRC(uint32_t ssrc) = 0;
+
+ // Called when the local ssrc changes (post initialization) for receive
+ // streams to match with send. Called on the packet receive thread/tq.
+ virtual void SetLocalSsrc(uint32_t ssrc) = 0;
+
+ // **************************************************************************
+ // Sender
+ // **************************************************************************
+
+ // Sets the maximum size of an RTP packet, including RTP headers.
+ virtual void SetMaxRtpPacketSize(size_t size) = 0;
+
+ // Returns max RTP packet size. Takes into account RTP headers and
+ // FEC/ULP/RED overhead (when FEC is enabled).
+ virtual size_t MaxRtpPacketSize() const = 0;
+
+ virtual void RegisterSendPayloadFrequency(int payload_type,
+ int payload_frequency) = 0;
+
+ // Unregisters a send payload.
+ // `payload_type` - payload type of codec
+ // Returns -1 on failure else 0.
+ virtual int32_t DeRegisterSendPayload(int8_t payload_type) = 0;
+
+ virtual void SetExtmapAllowMixed(bool extmap_allow_mixed) = 0;
+
+ // Register extension by uri, triggers CHECK on falure.
+ virtual void RegisterRtpHeaderExtension(absl::string_view uri, int id) = 0;
+
+ virtual void DeregisterSendRtpHeaderExtension(absl::string_view uri) = 0;
+
+ // Returns true if RTP module is send media, and any of the extensions
+ // required for bandwidth estimation is registered.
+ virtual bool SupportsPadding() const = 0;
+ // Same as SupportsPadding(), but additionally requires that
+ // SetRtxSendStatus() has been called with the kRtxRedundantPayloads option
+ // enabled.
+ virtual bool SupportsRtxPayloadPadding() const = 0;
+
+ // Returns start timestamp.
+ virtual uint32_t StartTimestamp() const = 0;
+
+ // Sets start timestamp. Start timestamp is set to a random value if this
+ // function is never called.
+ virtual void SetStartTimestamp(uint32_t timestamp) = 0;
+
+ // Returns SequenceNumber.
+ virtual uint16_t SequenceNumber() const = 0;
+
+ // Sets SequenceNumber, default is a random number.
+ virtual void SetSequenceNumber(uint16_t seq) = 0;
+
+ virtual void SetRtpState(const RtpState& rtp_state) = 0;
+ virtual void SetRtxState(const RtpState& rtp_state) = 0;
+ virtual RtpState GetRtpState() const = 0;
+ virtual RtpState GetRtxState() const = 0;
+
+ // This can be used to enable/disable receive-side RTT.
+ virtual void SetNonSenderRttMeasurement(bool enabled) = 0;
+
+ // Returns SSRC.
+ virtual uint32_t SSRC() const = 0;
+
+ // Sets the value for sending in the MID RTP header extension.
+ // The MID RTP header extension should be registered for this to do anything.
+ // Once set, this value can not be changed or removed.
+ virtual void SetMid(absl::string_view mid) = 0;
+
+ // Sets CSRC.
+ // `csrcs` - vector of CSRCs
+ virtual void SetCsrcs(const std::vector<uint32_t>& csrcs) = 0;
+
+ // Turns on/off sending RTX (RFC 4588). The modes can be set as a combination
+ // of values of the enumerator RtxMode.
+ virtual void SetRtxSendStatus(int modes) = 0;
+
+ // Returns status of sending RTX (RFC 4588). The returned value can be
+ // a combination of values of the enumerator RtxMode.
+ virtual int RtxSendStatus() const = 0;
+
+ // Returns the SSRC used for RTX if set, otherwise a nullopt.
+ virtual absl::optional<uint32_t> RtxSsrc() const = 0;
+
+ // Sets the payload type to use when sending RTX packets. Note that this
+ // doesn't enable RTX, only the payload type is set.
+ virtual void SetRtxSendPayloadType(int payload_type,
+ int associated_payload_type) = 0;
+
+ // Returns the FlexFEC SSRC, if there is one.
+ virtual absl::optional<uint32_t> FlexfecSsrc() const = 0;
+
+ // Sets sending status. Sends kRtcpByeCode when going from true to false.
+ // Returns -1 on failure else 0.
+ virtual int32_t SetSendingStatus(bool sending) = 0;
+
+ // Returns current sending status.
+ virtual bool Sending() const = 0;
+
+ // Starts/Stops media packets. On by default.
+ virtual void SetSendingMediaStatus(bool sending) = 0;
+
+ // Returns current media sending status.
+ virtual bool SendingMedia() const = 0;
+
+ // Returns whether audio is configured (i.e. Configuration::audio = true).
+ virtual bool IsAudioConfigured() const = 0;
+
+ // Indicate that the packets sent by this module should be counted towards the
+ // bitrate estimate since the stream participates in the bitrate allocation.
+ virtual void SetAsPartOfAllocation(bool part_of_allocation) = 0;
+
+ // Returns bitrate sent (post-pacing) per packet type.
+ virtual RtpSendRates GetSendRates() const = 0;
+
+ virtual RTPSender* RtpSender() = 0;
+ virtual const RTPSender* RtpSender() const = 0;
+
+ // Record that a frame is about to be sent. Returns true on success, and false
+ // if the module isn't ready to send.
+ virtual bool OnSendingRtpFrame(uint32_t timestamp,
+ int64_t capture_time_ms,
+ int payload_type,
+ bool force_sender_report) = 0;
+
+ // Try to send the provided packet. Returns true iff packet matches any of
+ // the SSRCs for this module (media/rtx/fec etc) and was forwarded to the
+ // transport.
+ virtual bool TrySendPacket(RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) = 0;
+
+ // Update the FEC protection parameters to use for delta- and key-frames.
+ // Only used when deferred FEC is active.
+ virtual void SetFecProtectionParams(
+ const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) = 0;
+
+ // If deferred FEC generation is enabled, this method should be called after
+ // calling TrySendPacket(). Any generated FEC packets will be removed and
+ // returned from the FEC generator.
+ virtual std::vector<std::unique_ptr<RtpPacketToSend>> FetchFecPackets() = 0;
+
+ virtual void OnPacketsAcknowledged(
+ rtc::ArrayView<const uint16_t> sequence_numbers) = 0;
+
+ virtual std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ size_t target_size_bytes) = 0;
+
+ virtual std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const = 0;
+
+ // Returns an expected per packet overhead representing the main RTP header,
+ // any CSRCs, and the registered header extensions that are expected on all
+ // packets (i.e. disregarding things like abs capture time which is only
+ // populated on a subset of packets, but counting MID/RID type extensions
+ // when we expect to send them).
+ virtual size_t ExpectedPerPacketOverhead() const = 0;
+
+ // Access to packet state (e.g. sequence numbering) must only be access by
+ // one thread at a time. It may be only one thread, or a construction thread
+ // that calls SetRtpState() - handing over to a pacer thread that calls
+ // TrySendPacket() - and at teardown ownership is handed to a destruciton
+ // thread that calls GetRtpState().
+ // This method is used to signal that "ownership" of the rtp state is being
+ // transferred to another thread.
+ virtual void OnPacketSendingThreadSwitched() = 0;
+
+ // **************************************************************************
+ // RTCP
+ // **************************************************************************
+
+ // Returns RTCP status.
+ virtual RtcpMode RTCP() const = 0;
+
+ // Sets RTCP status i.e on(compound or non-compound)/off.
+ // `method` - RTCP method to use.
+ virtual void SetRTCPStatus(RtcpMode method) = 0;
+
+ // Sets RTCP CName (i.e unique identifier).
+ // Returns -1 on failure else 0.
+ virtual int32_t SetCNAME(absl::string_view cname) = 0;
+
+ // Returns remote NTP.
+ // Returns -1 on failure else 0.
+ virtual int32_t RemoteNTP(uint32_t* received_ntp_secs,
+ uint32_t* received_ntp_frac,
+ uint32_t* rtcp_arrival_time_secs,
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* rtcp_timestamp) const = 0;
+
+ // Returns current RTT (round-trip time) estimate.
+ // Returns -1 on failure else 0.
+ virtual int32_t RTT(uint32_t remote_ssrc,
+ int64_t* rtt,
+ int64_t* avg_rtt,
+ int64_t* min_rtt,
+ int64_t* max_rtt) const = 0;
+
+ // Returns the estimated RTT, with fallback to a default value.
+ virtual int64_t ExpectedRetransmissionTimeMs() const = 0;
+
+ // Forces a send of a RTCP packet. Periodic SR and RR are triggered via the
+ // process function.
+ // Returns -1 on failure else 0.
+ virtual int32_t SendRTCP(RTCPPacketType rtcp_packet_type) = 0;
+
+ // Returns send statistics for the RTP and RTX stream.
+ virtual void GetSendStreamDataCounters(
+ StreamDataCounters* rtp_counters,
+ StreamDataCounters* rtx_counters) const = 0;
+
+
+ // Returns packet count, octet count, and timestamps from RTCP sender report.
+ virtual void RemoteRTCPSenderInfo(uint32_t* packet_count,
+ uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const = 0;
+ // A snapshot of Report Blocks with additional data of interest to statistics.
+ // Within this list, the sender-source SSRC pair is unique and per-pair the
+ // ReportBlockData represents the latest Report Block that was received for
+ // that pair.
+ virtual std::vector<ReportBlockData> GetLatestReportBlockData() const = 0;
+ // Returns stats based on the received RTCP SRs.
+ virtual absl::optional<SenderReportStats> GetSenderReportStats() const = 0;
+ // Returns non-sender RTT stats, based on DLRR.
+ virtual absl::optional<NonSenderRttStats> GetNonSenderRttStats() const = 0;
+
+ // (REMB) Receiver Estimated Max Bitrate.
+ // Schedules sending REMB on next and following sender/receiver reports.
+ void SetRemb(int64_t bitrate_bps, std::vector<uint32_t> ssrcs) override = 0;
+ // Stops sending REMB on next and following sender/receiver reports.
+ void UnsetRemb() override = 0;
+
+ // (NACK)
+
+ // Sends a Negative acknowledgement packet.
+ // Returns -1 on failure else 0.
+ // TODO(philipel): Deprecate this and start using SendNack instead, mostly
+ // because we want a function that actually send NACK for the specified
+ // packets.
+ virtual int32_t SendNACK(const uint16_t* nack_list, uint16_t size) = 0;
+
+ // Sends NACK for the packets specified.
+ // Note: This assumes the caller keeps track of timing and doesn't rely on
+ // the RTP module to do this.
+ virtual void SendNack(const std::vector<uint16_t>& sequence_numbers) = 0;
+
+ // Store the sent packets, needed to answer to a Negative acknowledgment
+ // requests.
+ virtual void SetStorePacketsStatus(bool enable, uint16_t numberToStore) = 0;
+
+ virtual void SetVideoBitrateAllocation(
+ const VideoBitrateAllocation& bitrate) = 0;
+
+ // **************************************************************************
+ // Video
+ // **************************************************************************
+
+ // Requests new key frame.
+ // using PLI, https://tools.ietf.org/html/rfc4585#section-6.3.1.1
+ void SendPictureLossIndication() { SendRTCP(kRtcpPli); }
+ // using FIR, https://tools.ietf.org/html/rfc5104#section-4.3.1.2
+ void SendFullIntraRequest() { SendRTCP(kRtcpFir); }
+
+ // Sends a LossNotification RTCP message.
+ // Returns -1 on failure else 0.
+ virtual int32_t SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.cc
new file mode 100644
index 0000000000..1a0291dfc5
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+namespace {
+// Max in the RFC 3550 is 255 bytes, we limit it to be modulus 32 for SRTP.
+constexpr size_t kMaxPaddingLength = 224;
+constexpr size_t kMinAudioPaddingLength = 50;
+constexpr size_t kRtpHeaderLength = 12;
+
+// Min size needed to get payload padding from packet history.
+constexpr int kMinPayloadPaddingBytes = 50;
+
+// Determines how much larger a payload padding packet may be, compared to the
+// requested padding size.
+constexpr double kMaxPaddingSizeFactor = 3.0;
+
+template <typename Extension>
+constexpr RtpExtensionSize CreateExtensionSize() {
+ return {Extension::kId, Extension::kValueSizeBytes};
+}
+
+template <typename Extension>
+constexpr RtpExtensionSize CreateMaxExtensionSize() {
+ return {Extension::kId, Extension::kMaxValueSizeBytes};
+}
+
+// Size info for header extensions that might be used in padding or FEC packets.
+constexpr RtpExtensionSize kFecOrPaddingExtensionSizes[] = {
+ CreateExtensionSize<AbsoluteSendTime>(),
+ CreateExtensionSize<TransmissionOffset>(),
+ CreateExtensionSize<TransportSequenceNumber>(),
+ CreateExtensionSize<PlayoutDelayLimits>(),
+ CreateMaxExtensionSize<RtpMid>(),
+ CreateExtensionSize<VideoTimingExtension>(),
+};
+
+// Size info for header extensions that might be used in video packets.
+constexpr RtpExtensionSize kVideoExtensionSizes[] = {
+ CreateExtensionSize<AbsoluteSendTime>(),
+ CreateExtensionSize<AbsoluteCaptureTimeExtension>(),
+ CreateExtensionSize<TransmissionOffset>(),
+ CreateExtensionSize<TransportSequenceNumber>(),
+ CreateExtensionSize<PlayoutDelayLimits>(),
+ CreateExtensionSize<VideoOrientation>(),
+ CreateExtensionSize<VideoContentTypeExtension>(),
+ CreateExtensionSize<VideoTimingExtension>(),
+ CreateMaxExtensionSize<RtpStreamId>(),
+ CreateMaxExtensionSize<RepairedRtpStreamId>(),
+ CreateMaxExtensionSize<RtpMid>(),
+ {RtpGenericFrameDescriptorExtension00::kId,
+ RtpGenericFrameDescriptorExtension00::kMaxSizeBytes},
+};
+
+// Size info for header extensions that might be used in audio packets.
+constexpr RtpExtensionSize kAudioExtensionSizes[] = {
+ CreateExtensionSize<AbsoluteSendTime>(),
+ CreateExtensionSize<AbsoluteCaptureTimeExtension>(),
+ CreateExtensionSize<AudioLevel>(),
+ CreateExtensionSize<InbandComfortNoiseExtension>(),
+ CreateExtensionSize<TransmissionOffset>(),
+ CreateExtensionSize<TransportSequenceNumber>(),
+ CreateMaxExtensionSize<RtpStreamId>(),
+ CreateMaxExtensionSize<RepairedRtpStreamId>(),
+ CreateMaxExtensionSize<RtpMid>(),
+};
+
+// Non-volatile extensions can be expected on all packets, if registered.
+// Volatile ones, such as VideoContentTypeExtension which is only set on
+// key-frames, are removed to simplify overhead calculations at the expense of
+// some accuracy.
+bool IsNonVolatile(RTPExtensionType type) {
+ switch (type) {
+ case kRtpExtensionTransmissionTimeOffset:
+ case kRtpExtensionAudioLevel:
+#if !defined(WEBRTC_MOZILLA_BUILD)
+ case kRtpExtensionCsrcAudioLevel:
+#endif
+ case kRtpExtensionAbsoluteSendTime:
+ case kRtpExtensionTransportSequenceNumber:
+ case kRtpExtensionTransportSequenceNumber02:
+ case kRtpExtensionRtpStreamId:
+ case kRtpExtensionMid:
+ case kRtpExtensionGenericFrameDescriptor00:
+ case kRtpExtensionGenericFrameDescriptor02:
+ return true;
+ case kRtpExtensionInbandComfortNoise:
+ case kRtpExtensionAbsoluteCaptureTime:
+ case kRtpExtensionVideoRotation:
+ case kRtpExtensionPlayoutDelay:
+ case kRtpExtensionVideoContentType:
+ case kRtpExtensionVideoLayersAllocation:
+ case kRtpExtensionVideoTiming:
+ case kRtpExtensionRepairedRtpStreamId:
+ case kRtpExtensionColorSpace:
+ case kRtpExtensionVideoFrameTrackingId:
+ return false;
+ case kRtpExtensionNone:
+ case kRtpExtensionNumberOfExtensions:
+ RTC_DCHECK_NOTREACHED();
+ return false;
+#if defined(WEBRTC_MOZILLA_BUILD)
+ case kRtpExtensionCsrcAudioLevel:
+ // TODO: Mozilla implement for CsrcAudioLevel
+ RTC_CHECK(false);
+ return false;
+#endif
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+bool HasBweExtension(const RtpHeaderExtensionMap& extensions_map) {
+ return extensions_map.IsRegistered(kRtpExtensionTransportSequenceNumber) ||
+ extensions_map.IsRegistered(kRtpExtensionTransportSequenceNumber02) ||
+ extensions_map.IsRegistered(kRtpExtensionAbsoluteSendTime) ||
+ extensions_map.IsRegistered(kRtpExtensionTransmissionTimeOffset);
+}
+
+} // namespace
+
+RTPSender::RTPSender(const RtpRtcpInterface::Configuration& config,
+ RtpPacketHistory* packet_history,
+ RtpPacketSender* packet_sender)
+ : clock_(config.clock),
+ random_(clock_->TimeInMicroseconds()),
+ audio_configured_(config.audio),
+ ssrc_(config.local_media_ssrc),
+ rtx_ssrc_(config.rtx_send_ssrc),
+ flexfec_ssrc_(config.fec_generator ? config.fec_generator->FecSsrc()
+ : absl::nullopt),
+ packet_history_(packet_history),
+ paced_sender_(packet_sender),
+ sending_media_(true), // Default to sending media.
+ max_packet_size_(IP_PACKET_SIZE - 28), // Default is IP-v4/UDP.
+ rtp_header_extension_map_(config.extmap_allow_mixed),
+ // RTP variables
+ rid_(config.rid),
+ always_send_mid_and_rid_(config.always_send_mid_and_rid),
+ ssrc_has_acked_(false),
+ rtx_ssrc_has_acked_(false),
+ csrcs_(),
+ rtx_(kRtxOff),
+ supports_bwe_extension_(false),
+ retransmission_rate_limiter_(config.retransmission_rate_limiter) {
+ // This random initialization is not intended to be cryptographic strong.
+ timestamp_offset_ = random_.Rand<uint32_t>();
+
+ RTC_DCHECK(paced_sender_);
+ RTC_DCHECK(packet_history_);
+ RTC_DCHECK_LE(rid_.size(), RtpStreamId::kMaxValueSizeBytes);
+
+ UpdateHeaderSizes();
+}
+
+RTPSender::~RTPSender() {
+ // TODO(tommi): Use a thread checker to ensure the object is created and
+ // deleted on the same thread. At the moment this isn't possible due to
+ // voe::ChannelOwner in voice engine. To reproduce, run:
+ // voe_auto_test --automated --gtest_filter=*MixManyChannelsForStressOpus
+
+ // TODO(tommi,holmer): We don't grab locks in the dtor before accessing member
+ // variables but we grab them in all other methods. (what's the design?)
+ // Start documenting what thread we're on in what method so that it's easier
+ // to understand performance attributes and possibly remove locks.
+}
+
+rtc::ArrayView<const RtpExtensionSize> RTPSender::FecExtensionSizes() {
+ return rtc::MakeArrayView(kFecOrPaddingExtensionSizes,
+ arraysize(kFecOrPaddingExtensionSizes));
+}
+
+rtc::ArrayView<const RtpExtensionSize> RTPSender::VideoExtensionSizes() {
+ return rtc::MakeArrayView(kVideoExtensionSizes,
+ arraysize(kVideoExtensionSizes));
+}
+
+rtc::ArrayView<const RtpExtensionSize> RTPSender::AudioExtensionSizes() {
+ return rtc::MakeArrayView(kAudioExtensionSizes,
+ arraysize(kAudioExtensionSizes));
+}
+
+void RTPSender::SetExtmapAllowMixed(bool extmap_allow_mixed) {
+ MutexLock lock(&send_mutex_);
+ rtp_header_extension_map_.SetExtmapAllowMixed(extmap_allow_mixed);
+}
+
+bool RTPSender::RegisterRtpHeaderExtension(absl::string_view uri, int id) {
+ MutexLock lock(&send_mutex_);
+ bool registered = rtp_header_extension_map_.RegisterByUri(id, uri);
+ supports_bwe_extension_ = HasBweExtension(rtp_header_extension_map_);
+ UpdateHeaderSizes();
+ return registered;
+}
+
+bool RTPSender::IsRtpHeaderExtensionRegistered(RTPExtensionType type) const {
+ MutexLock lock(&send_mutex_);
+ return rtp_header_extension_map_.IsRegistered(type);
+}
+
+void RTPSender::DeregisterRtpHeaderExtension(absl::string_view uri) {
+ MutexLock lock(&send_mutex_);
+ rtp_header_extension_map_.Deregister(uri);
+ supports_bwe_extension_ = HasBweExtension(rtp_header_extension_map_);
+ UpdateHeaderSizes();
+}
+
+void RTPSender::SetMaxRtpPacketSize(size_t max_packet_size) {
+ RTC_DCHECK_GE(max_packet_size, 100);
+ RTC_DCHECK_LE(max_packet_size, IP_PACKET_SIZE);
+ MutexLock lock(&send_mutex_);
+ max_packet_size_ = max_packet_size;
+}
+
+size_t RTPSender::MaxRtpPacketSize() const {
+ return max_packet_size_;
+}
+
+void RTPSender::SetRtxStatus(int mode) {
+ MutexLock lock(&send_mutex_);
+ if (mode != kRtxOff &&
+ (!rtx_ssrc_.has_value() || rtx_payload_type_map_.empty())) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to enable RTX without RTX SSRC or payload types.";
+ return;
+ }
+ rtx_ = mode;
+}
+
+int RTPSender::RtxStatus() const {
+ MutexLock lock(&send_mutex_);
+ return rtx_;
+}
+
+void RTPSender::SetRtxPayloadType(int payload_type,
+ int associated_payload_type) {
+ MutexLock lock(&send_mutex_);
+ RTC_DCHECK_LE(payload_type, 127);
+ RTC_DCHECK_LE(associated_payload_type, 127);
+ if (payload_type < 0) {
+ RTC_LOG(LS_ERROR) << "Invalid RTX payload type: " << payload_type << ".";
+ return;
+ }
+
+ rtx_payload_type_map_[associated_payload_type] = payload_type;
+}
+
+int32_t RTPSender::ReSendPacket(uint16_t packet_id) {
+ int32_t packet_size = 0;
+ const bool rtx = (RtxStatus() & kRtxRetransmitted) > 0;
+
+ std::unique_ptr<RtpPacketToSend> packet =
+ packet_history_->GetPacketAndMarkAsPending(
+ packet_id, [&](const RtpPacketToSend& stored_packet) {
+ // Check if we're overusing retransmission bitrate.
+ // TODO(sprang): Add histograms for nack success or failure
+ // reasons.
+ packet_size = stored_packet.size();
+ std::unique_ptr<RtpPacketToSend> retransmit_packet;
+ if (retransmission_rate_limiter_ &&
+ !retransmission_rate_limiter_->TryUseRate(packet_size)) {
+ return retransmit_packet;
+ }
+ if (rtx) {
+ retransmit_packet = BuildRtxPacket(stored_packet);
+ } else {
+ retransmit_packet =
+ std::make_unique<RtpPacketToSend>(stored_packet);
+ }
+ if (retransmit_packet) {
+ retransmit_packet->set_retransmitted_sequence_number(
+ stored_packet.SequenceNumber());
+ }
+ return retransmit_packet;
+ });
+ if (packet_size == 0) {
+ // Packet not found or already queued for retransmission, ignore.
+ RTC_DCHECK(!packet);
+ return 0;
+ }
+ if (!packet) {
+ // Packet was found, but lambda helper above chose not to create
+ // `retransmit_packet` out of it.
+ return -1;
+ }
+ packet->set_packet_type(RtpPacketMediaType::kRetransmission);
+ packet->set_fec_protect_packet(false);
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ packets.emplace_back(std::move(packet));
+ paced_sender_->EnqueuePackets(std::move(packets));
+
+ return packet_size;
+}
+
+void RTPSender::OnReceivedAckOnSsrc(int64_t extended_highest_sequence_number) {
+ MutexLock lock(&send_mutex_);
+ bool update_required = !ssrc_has_acked_;
+ ssrc_has_acked_ = true;
+ if (update_required) {
+ UpdateHeaderSizes();
+ }
+}
+
+void RTPSender::OnReceivedAckOnRtxSsrc(
+ int64_t extended_highest_sequence_number) {
+ MutexLock lock(&send_mutex_);
+ bool update_required = !rtx_ssrc_has_acked_;
+ rtx_ssrc_has_acked_ = true;
+ if (update_required) {
+ UpdateHeaderSizes();
+ }
+}
+
+void RTPSender::OnReceivedNack(
+ const std::vector<uint16_t>& nack_sequence_numbers,
+ int64_t avg_rtt) {
+ packet_history_->SetRtt(TimeDelta::Millis(5 + avg_rtt));
+ for (uint16_t seq_no : nack_sequence_numbers) {
+ const int32_t bytes_sent = ReSendPacket(seq_no);
+ if (bytes_sent < 0) {
+ // Failed to send one Sequence number. Give up the rest in this nack.
+ RTC_LOG(LS_WARNING) << "Failed resending RTP packet " << seq_no
+ << ", Discard rest of packets.";
+ break;
+ }
+ }
+}
+
+bool RTPSender::SupportsPadding() const {
+ MutexLock lock(&send_mutex_);
+ return sending_media_ && supports_bwe_extension_;
+}
+
+bool RTPSender::SupportsRtxPayloadPadding() const {
+ MutexLock lock(&send_mutex_);
+ return sending_media_ && supports_bwe_extension_ &&
+ (rtx_ & kRtxRedundantPayloads);
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>> RTPSender::GeneratePadding(
+ size_t target_size_bytes,
+ bool media_has_been_sent,
+ bool can_send_padding_on_media_ssrc) {
+ // This method does not actually send packets, it just generates
+ // them and puts them in the pacer queue. Since this should incur
+ // low overhead, keep the lock for the scope of the method in order
+ // to make the code more readable.
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
+ size_t bytes_left = target_size_bytes;
+ if (SupportsRtxPayloadPadding()) {
+ while (bytes_left >= kMinPayloadPaddingBytes) {
+ std::unique_ptr<RtpPacketToSend> packet =
+ packet_history_->GetPayloadPaddingPacket(
+ [&](const RtpPacketToSend& packet)
+ -> std::unique_ptr<RtpPacketToSend> {
+ // Limit overshoot, generate <= `kMaxPaddingSizeFactor` *
+ // `target_size_bytes`.
+ const size_t max_overshoot_bytes = static_cast<size_t>(
+ ((kMaxPaddingSizeFactor - 1.0) * target_size_bytes) + 0.5);
+ if (packet.payload_size() + kRtxHeaderSize >
+ max_overshoot_bytes + bytes_left) {
+ return nullptr;
+ }
+ return BuildRtxPacket(packet);
+ });
+ if (!packet) {
+ break;
+ }
+
+ bytes_left -= std::min(bytes_left, packet->payload_size());
+ packet->set_packet_type(RtpPacketMediaType::kPadding);
+ padding_packets.push_back(std::move(packet));
+ }
+ }
+
+ MutexLock lock(&send_mutex_);
+ if (!sending_media_) {
+ return {};
+ }
+
+ size_t padding_bytes_in_packet;
+ const size_t max_payload_size =
+ max_packet_size_ - max_padding_fec_packet_header_;
+ if (audio_configured_) {
+ // Allow smaller padding packets for audio.
+ padding_bytes_in_packet = rtc::SafeClamp<size_t>(
+ bytes_left, kMinAudioPaddingLength,
+ rtc::SafeMin(max_payload_size, kMaxPaddingLength));
+ } else {
+ // Always send full padding packets. This is accounted for by the
+ // RtpPacketSender, which will make sure we don't send too much padding even
+ // if a single packet is larger than requested.
+ // We do this to avoid frequently sending small packets on higher bitrates.
+ padding_bytes_in_packet = rtc::SafeMin(max_payload_size, kMaxPaddingLength);
+ }
+
+ while (bytes_left > 0) {
+ auto padding_packet =
+ std::make_unique<RtpPacketToSend>(&rtp_header_extension_map_);
+ padding_packet->set_packet_type(RtpPacketMediaType::kPadding);
+ padding_packet->SetMarker(false);
+ if (rtx_ == kRtxOff) {
+ if (!can_send_padding_on_media_ssrc) {
+ break;
+ }
+ padding_packet->SetSsrc(ssrc_);
+ } else {
+ // Without abs-send-time or transport sequence number a media packet
+ // must be sent before padding so that the timestamps used for
+ // estimation are correct.
+ if (!media_has_been_sent &&
+ !(rtp_header_extension_map_.IsRegistered(AbsoluteSendTime::kId) ||
+ rtp_header_extension_map_.IsRegistered(
+ TransportSequenceNumber::kId))) {
+ break;
+ }
+
+ RTC_DCHECK(rtx_ssrc_);
+ RTC_DCHECK(!rtx_payload_type_map_.empty());
+ padding_packet->SetSsrc(*rtx_ssrc_);
+ padding_packet->SetPayloadType(rtx_payload_type_map_.begin()->second);
+ }
+
+ if (rtp_header_extension_map_.IsRegistered(TransportSequenceNumber::kId)) {
+ padding_packet->ReserveExtension<TransportSequenceNumber>();
+ }
+ if (rtp_header_extension_map_.IsRegistered(TransmissionOffset::kId)) {
+ padding_packet->ReserveExtension<TransmissionOffset>();
+ }
+ if (rtp_header_extension_map_.IsRegistered(AbsoluteSendTime::kId)) {
+ padding_packet->ReserveExtension<AbsoluteSendTime>();
+ }
+
+ padding_packet->SetPadding(padding_bytes_in_packet);
+ bytes_left -= std::min(bytes_left, padding_bytes_in_packet);
+ padding_packets.push_back(std::move(padding_packet));
+ }
+
+ return padding_packets;
+}
+
+bool RTPSender::SendToNetwork(std::unique_ptr<RtpPacketToSend> packet) {
+ RTC_DCHECK(packet);
+ auto packet_type = packet->packet_type();
+ RTC_CHECK(packet_type) << "Packet type must be set before sending.";
+
+ if (packet->capture_time() <= Timestamp::Zero()) {
+ packet->set_capture_time(clock_->CurrentTime());
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets;
+ packets.emplace_back(std::move(packet));
+ paced_sender_->EnqueuePackets(std::move(packets));
+
+ return true;
+}
+
+void RTPSender::EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ RTC_DCHECK(!packets.empty());
+ Timestamp now = clock_->CurrentTime();
+ for (auto& packet : packets) {
+ RTC_DCHECK(packet);
+ RTC_CHECK(packet->packet_type().has_value())
+ << "Packet type must be set before sending.";
+ if (packet->capture_time() <= Timestamp::Zero()) {
+ packet->set_capture_time(now);
+ }
+ }
+
+ paced_sender_->EnqueuePackets(std::move(packets));
+}
+
+size_t RTPSender::FecOrPaddingPacketMaxRtpHeaderLength() const {
+ MutexLock lock(&send_mutex_);
+ return max_padding_fec_packet_header_;
+}
+
+size_t RTPSender::ExpectedPerPacketOverhead() const {
+ MutexLock lock(&send_mutex_);
+ return max_media_packet_header_;
+}
+
+std::unique_ptr<RtpPacketToSend> RTPSender::AllocatePacket() const {
+ MutexLock lock(&send_mutex_);
+ // TODO(danilchap): Find better motivator and value for extra capacity.
+ // RtpPacketizer might slightly miscalulate needed size,
+ // SRTP may benefit from extra space in the buffer and do encryption in place
+ // saving reallocation.
+ // While sending slightly oversized packet increase chance of dropped packet,
+ // it is better than crash on drop packet without trying to send it.
+ static constexpr int kExtraCapacity = 16;
+ auto packet = std::make_unique<RtpPacketToSend>(
+ &rtp_header_extension_map_, max_packet_size_ + kExtraCapacity);
+ packet->SetSsrc(ssrc_);
+ packet->SetCsrcs(csrcs_);
+ // Reserve extensions, if registered, RtpSender set in SendToNetwork.
+ packet->ReserveExtension<AbsoluteSendTime>();
+ packet->ReserveExtension<TransmissionOffset>();
+ packet->ReserveExtension<TransportSequenceNumber>();
+
+ // BUNDLE requires that the receiver "bind" the received SSRC to the values
+ // in the MID and/or (R)RID header extensions if present. Therefore, the
+ // sender can reduce overhead by omitting these header extensions once it
+ // knows that the receiver has "bound" the SSRC.
+ // This optimization can be configured by setting
+ // `always_send_mid_and_rid_` appropriately.
+ //
+ // The algorithm here is fairly simple: Always attach a MID and/or RID (if
+ // configured) to the outgoing packets until an RTCP receiver report comes
+ // back for this SSRC. That feedback indicates the receiver must have
+ // received a packet with the SSRC and header extension(s), so the sender
+ // then stops attaching the MID and RID.
+ if (always_send_mid_and_rid_ || !ssrc_has_acked_) {
+ // These are no-ops if the corresponding header extension is not registered.
+ if (!mid_.empty()) {
+ packet->SetExtension<RtpMid>(mid_);
+ }
+ if (!rid_.empty()) {
+ packet->SetExtension<RtpStreamId>(rid_);
+ }
+ }
+ return packet;
+}
+
+void RTPSender::SetSendingMediaStatus(bool enabled) {
+ MutexLock lock(&send_mutex_);
+ sending_media_ = enabled;
+}
+
+bool RTPSender::SendingMedia() const {
+ MutexLock lock(&send_mutex_);
+ return sending_media_;
+}
+
+bool RTPSender::IsAudioConfigured() const {
+ return audio_configured_;
+}
+
+void RTPSender::SetTimestampOffset(uint32_t timestamp) {
+ MutexLock lock(&send_mutex_);
+ timestamp_offset_ = timestamp;
+}
+
+uint32_t RTPSender::TimestampOffset() const {
+ MutexLock lock(&send_mutex_);
+ return timestamp_offset_;
+}
+
+void RTPSender::SetMid(absl::string_view mid) {
+ // This is configured via the API.
+ MutexLock lock(&send_mutex_);
+ RTC_DCHECK_LE(mid.length(), RtpMid::kMaxValueSizeBytes);
+ mid_ = std::string(mid);
+ UpdateHeaderSizes();
+}
+
+void RTPSender::SetCsrcs(const std::vector<uint32_t>& csrcs) {
+ RTC_DCHECK_LE(csrcs.size(), kRtpCsrcSize);
+ MutexLock lock(&send_mutex_);
+ csrcs_ = csrcs;
+ UpdateHeaderSizes();
+}
+
+static void CopyHeaderAndExtensionsToRtxPacket(const RtpPacketToSend& packet,
+ RtpPacketToSend* rtx_packet) {
+ // Set the relevant fixed packet headers. The following are not set:
+ // * Payload type - it is replaced in rtx packets.
+ // * Sequence number - RTX has a separate sequence numbering.
+ // * SSRC - RTX stream has its own SSRC.
+ rtx_packet->SetMarker(packet.Marker());
+ rtx_packet->SetTimestamp(packet.Timestamp());
+
+ // Set the variable fields in the packet header:
+ // * CSRCs - must be set before header extensions.
+ // * Header extensions - replace Rid header with RepairedRid header.
+ const std::vector<uint32_t> csrcs = packet.Csrcs();
+ rtx_packet->SetCsrcs(csrcs);
+ for (int extension_num = kRtpExtensionNone + 1;
+ extension_num < kRtpExtensionNumberOfExtensions; ++extension_num) {
+ auto extension = static_cast<RTPExtensionType>(extension_num);
+
+ // Stream ID header extensions (MID, RSID) are sent per-SSRC. Since RTX
+ // operates on a different SSRC, the presence and values of these header
+ // extensions should be determined separately and not blindly copied.
+ if (extension == kRtpExtensionMid ||
+ extension == kRtpExtensionRtpStreamId) {
+ continue;
+ }
+
+ // Empty extensions should be supported, so not checking `source.empty()`.
+ if (!packet.HasExtension(extension)) {
+ continue;
+ }
+
+ rtc::ArrayView<const uint8_t> source = packet.FindExtension(extension);
+
+ rtc::ArrayView<uint8_t> destination =
+ rtx_packet->AllocateExtension(extension, source.size());
+
+ // Could happen if any:
+ // 1. Extension has 0 length.
+ // 2. Extension is not registered in destination.
+ // 3. Allocating extension in destination failed.
+ if (destination.empty() || source.size() != destination.size()) {
+ continue;
+ }
+
+ std::memcpy(destination.begin(), source.begin(), destination.size());
+ }
+}
+
+std::unique_ptr<RtpPacketToSend> RTPSender::BuildRtxPacket(
+ const RtpPacketToSend& packet) {
+ std::unique_ptr<RtpPacketToSend> rtx_packet;
+
+ // Add original RTP header.
+ {
+ MutexLock lock(&send_mutex_);
+ if (!sending_media_)
+ return nullptr;
+
+ RTC_DCHECK(rtx_ssrc_);
+
+ // Replace payload type.
+ auto kv = rtx_payload_type_map_.find(packet.PayloadType());
+ if (kv == rtx_payload_type_map_.end())
+ return nullptr;
+
+ rtx_packet = std::make_unique<RtpPacketToSend>(&rtp_header_extension_map_,
+ max_packet_size_);
+
+ rtx_packet->SetPayloadType(kv->second);
+
+ // Replace SSRC.
+ rtx_packet->SetSsrc(*rtx_ssrc_);
+
+ CopyHeaderAndExtensionsToRtxPacket(packet, rtx_packet.get());
+
+ // RTX packets are sent on an SSRC different from the main media, so the
+ // decision to attach MID and/or RRID header extensions is completely
+ // separate from that of the main media SSRC.
+ //
+ // Note that RTX packets must used the RepairedRtpStreamId (RRID) header
+ // extension instead of the RtpStreamId (RID) header extension even though
+ // the payload is identical.
+ if (always_send_mid_and_rid_ || !rtx_ssrc_has_acked_) {
+ // These are no-ops if the corresponding header extension is not
+ // registered.
+ if (!mid_.empty()) {
+ rtx_packet->SetExtension<RtpMid>(mid_);
+ }
+ if (!rid_.empty()) {
+ rtx_packet->SetExtension<RepairedRtpStreamId>(rid_);
+ }
+ }
+ }
+ RTC_DCHECK(rtx_packet);
+
+ uint8_t* rtx_payload =
+ rtx_packet->AllocatePayload(packet.payload_size() + kRtxHeaderSize);
+ if (rtx_payload == nullptr)
+ return nullptr;
+
+ // Add OSN (original sequence number).
+ ByteWriter<uint16_t>::WriteBigEndian(rtx_payload, packet.SequenceNumber());
+
+ // Add original payload data.
+ auto payload = packet.payload();
+ memcpy(rtx_payload + kRtxHeaderSize, payload.data(), payload.size());
+
+ // Add original additional data.
+ rtx_packet->set_additional_data(packet.additional_data());
+
+ // Copy capture time so e.g. TransmissionOffset is correctly set.
+ rtx_packet->set_capture_time(packet.capture_time());
+
+ return rtx_packet;
+}
+
+void RTPSender::SetRtpState(const RtpState& rtp_state) {
+ MutexLock lock(&send_mutex_);
+
+ timestamp_offset_ = rtp_state.start_timestamp;
+ ssrc_has_acked_ = rtp_state.ssrc_has_acked;
+ UpdateHeaderSizes();
+}
+
+RtpState RTPSender::GetRtpState() const {
+ MutexLock lock(&send_mutex_);
+
+ RtpState state;
+ state.start_timestamp = timestamp_offset_;
+ state.ssrc_has_acked = ssrc_has_acked_;
+ return state;
+}
+
+void RTPSender::SetRtxRtpState(const RtpState& rtp_state) {
+ MutexLock lock(&send_mutex_);
+ rtx_ssrc_has_acked_ = rtp_state.ssrc_has_acked;
+}
+
+RtpState RTPSender::GetRtxRtpState() const {
+ MutexLock lock(&send_mutex_);
+
+ RtpState state;
+ state.start_timestamp = timestamp_offset_;
+ state.ssrc_has_acked = rtx_ssrc_has_acked_;
+
+ return state;
+}
+
+void RTPSender::UpdateHeaderSizes() {
+ const size_t rtp_header_length =
+ kRtpHeaderLength + sizeof(uint32_t) * csrcs_.size();
+
+ max_padding_fec_packet_header_ =
+ rtp_header_length + RtpHeaderExtensionSize(kFecOrPaddingExtensionSizes,
+ rtp_header_extension_map_);
+
+ // RtpStreamId and Mid are treated specially in that we check if they
+ // currently are being sent. RepairedRtpStreamId is ignored because it is sent
+ // instead of RtpStreamId on rtx packets and require the same size.
+ const bool send_mid_rid_on_rtx =
+ rtx_ssrc_.has_value() && !rtx_ssrc_has_acked_;
+ const bool send_mid_rid =
+ always_send_mid_and_rid_ || !ssrc_has_acked_ || send_mid_rid_on_rtx;
+ std::vector<RtpExtensionSize> non_volatile_extensions;
+ for (auto& extension :
+ audio_configured_ ? AudioExtensionSizes() : VideoExtensionSizes()) {
+ if (IsNonVolatile(extension.type)) {
+ switch (extension.type) {
+ case RTPExtensionType::kRtpExtensionMid:
+ if (send_mid_rid && !mid_.empty()) {
+ non_volatile_extensions.push_back(extension);
+ }
+ break;
+ case RTPExtensionType::kRtpExtensionRtpStreamId:
+ if (send_mid_rid && !rid_.empty()) {
+ non_volatile_extensions.push_back(extension);
+ }
+ break;
+ default:
+ non_volatile_extensions.push_back(extension);
+ }
+ }
+ }
+ max_media_packet_header_ =
+ rtp_header_length + RtpHeaderExtensionSize(non_volatile_extensions,
+ rtp_header_extension_map_);
+ // Reserve extra bytes if packet might be resent in an rtx packet.
+ if (rtx_ssrc_.has_value()) {
+ max_media_packet_header_ += kRtxHeaderSize;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.h
new file mode 100644
index 0000000000..07df701548
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_SENDER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_SENDER_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/call/transport.h"
+#include "api/field_trials_view.h"
+#include "modules/rtp_rtcp/include/flexfec_sender.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "rtc_base/random.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class FrameEncryptorInterface;
+class RateLimiter;
+class RtcEventLog;
+class RtpPacketToSend;
+
+class RTPSender {
+ public:
+ RTPSender(const RtpRtcpInterface::Configuration& config,
+ RtpPacketHistory* packet_history,
+ RtpPacketSender* packet_sender);
+ RTPSender(const RTPSender&) = delete;
+ RTPSender& operator=(const RTPSender&) = delete;
+
+ ~RTPSender();
+
+ void SetSendingMediaStatus(bool enabled) RTC_LOCKS_EXCLUDED(send_mutex_);
+ bool SendingMedia() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ bool IsAudioConfigured() const RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ uint32_t TimestampOffset() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ void SetTimestampOffset(uint32_t timestamp) RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ void SetMid(absl::string_view mid) RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ uint16_t SequenceNumber() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ void SetSequenceNumber(uint16_t seq) RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ void SetCsrcs(const std::vector<uint32_t>& csrcs)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ void SetMaxRtpPacketSize(size_t max_packet_size)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ void SetExtmapAllowMixed(bool extmap_allow_mixed)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ int32_t SetRID(const char* rid);
+ int32_t SetMId(const char* mid);
+
+ // RTP header extension
+ bool RegisterRtpHeaderExtension(absl::string_view uri, int id)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+ bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) const
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+ void DeregisterRtpHeaderExtension(absl::string_view uri)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ bool SupportsPadding() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ bool SupportsRtxPayloadPadding() const RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ size_t target_size_bytes,
+ bool media_has_been_sent,
+ bool can_send_padding_on_media_ssrc) RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // NACK.
+ void OnReceivedNack(const std::vector<uint16_t>& nack_sequence_numbers,
+ int64_t avg_rtt) RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ int32_t ReSendPacket(uint16_t packet_id) RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // ACK.
+ void OnReceivedAckOnSsrc(int64_t extended_highest_sequence_number)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+ void OnReceivedAckOnRtxSsrc(int64_t extended_highest_sequence_number)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // RTX.
+ void SetRtxStatus(int mode) RTC_LOCKS_EXCLUDED(send_mutex_);
+ int RtxStatus() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ absl::optional<uint32_t> RtxSsrc() const RTC_LOCKS_EXCLUDED(send_mutex_) {
+ return rtx_ssrc_;
+ }
+
+ void SetRtxPayloadType(int payload_type, int associated_payload_type)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // Size info for header extensions used by FEC packets.
+ static rtc::ArrayView<const RtpExtensionSize> FecExtensionSizes()
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // Size info for header extensions used by video packets.
+ static rtc::ArrayView<const RtpExtensionSize> VideoExtensionSizes()
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // Size info for header extensions used by audio packets.
+ static rtc::ArrayView<const RtpExtensionSize> AudioExtensionSizes()
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // Create empty packet, fills ssrc, csrcs and reserve place for header
+ // extensions RtpSender updates before sending.
+ std::unique_ptr<RtpPacketToSend> AllocatePacket() const
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+ // Maximum header overhead per fec/padding packet.
+ size_t FecOrPaddingPacketMaxRtpHeaderLength() const
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+ // Expected header overhead per media packet.
+ size_t ExpectedPerPacketOverhead() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ // Including RTP headers.
+ size_t MaxRtpPacketSize() const RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ uint32_t SSRC() const RTC_LOCKS_EXCLUDED(send_mutex_) { return ssrc_; }
+
+ absl::optional<uint32_t> FlexfecSsrc() const RTC_LOCKS_EXCLUDED(send_mutex_) {
+ return flexfec_ssrc_;
+ }
+
+ // Sends packet to `transport_` or to the pacer, depending on configuration.
+ // TODO(bugs.webrtc.org/XXX): Remove in favor of EnqueuePackets().
+ bool SendToNetwork(std::unique_ptr<RtpPacketToSend> packet)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ // Pass a set of packets to RtpPacketSender instance, for paced or immediate
+ // sending to the network.
+ void EnqueuePackets(std::vector<std::unique_ptr<RtpPacketToSend>> packets)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ void SetRtpState(const RtpState& rtp_state) RTC_LOCKS_EXCLUDED(send_mutex_);
+ RtpState GetRtpState() const RTC_LOCKS_EXCLUDED(send_mutex_);
+ void SetRtxRtpState(const RtpState& rtp_state)
+ RTC_LOCKS_EXCLUDED(send_mutex_);
+ RtpState GetRtxRtpState() const RTC_LOCKS_EXCLUDED(send_mutex_);
+
+ private:
+ std::unique_ptr<RtpPacketToSend> BuildRtxPacket(
+ const RtpPacketToSend& packet);
+
+ bool IsFecPacket(const RtpPacketToSend& packet) const;
+
+ void UpdateHeaderSizes() RTC_EXCLUSIVE_LOCKS_REQUIRED(send_mutex_);
+
+ void UpdateLastPacketState(const RtpPacketToSend& packet)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(send_mutex_);
+
+ Clock* const clock_;
+ Random random_ RTC_GUARDED_BY(send_mutex_);
+
+ const bool audio_configured_;
+
+ const uint32_t ssrc_;
+ const absl::optional<uint32_t> rtx_ssrc_;
+ const absl::optional<uint32_t> flexfec_ssrc_;
+
+ RtpPacketHistory* const packet_history_;
+ RtpPacketSender* const paced_sender_;
+
+ mutable Mutex send_mutex_;
+
+ bool sending_media_ RTC_GUARDED_BY(send_mutex_);
+ size_t max_packet_size_;
+
+ RtpHeaderExtensionMap rtp_header_extension_map_ RTC_GUARDED_BY(send_mutex_);
+ size_t max_media_packet_header_ RTC_GUARDED_BY(send_mutex_);
+ size_t max_padding_fec_packet_header_ RTC_GUARDED_BY(send_mutex_);
+
+ // RTP variables
+ uint32_t timestamp_offset_ RTC_GUARDED_BY(send_mutex_);
+ // RID value to send in the RID or RepairedRID header extension.
+ const std::string rid_;
+ // MID value to send in the MID header extension.
+ std::string mid_ RTC_GUARDED_BY(send_mutex_);
+ // Should we send MID/RID even when ACKed? (see below).
+ const bool always_send_mid_and_rid_;
+ // Track if any ACK has been received on the SSRC and RTX SSRC to indicate
+ // when to stop sending the MID and RID header extensions.
+ bool ssrc_has_acked_ RTC_GUARDED_BY(send_mutex_);
+ bool rtx_ssrc_has_acked_ RTC_GUARDED_BY(send_mutex_);
+ std::vector<uint32_t> csrcs_ RTC_GUARDED_BY(send_mutex_);
+ int rtx_ RTC_GUARDED_BY(send_mutex_);
+ // Mapping rtx_payload_type_map_[associated] = rtx.
+ std::map<int8_t, int8_t> rtx_payload_type_map_ RTC_GUARDED_BY(send_mutex_);
+ bool supports_bwe_extension_ RTC_GUARDED_BY(send_mutex_);
+
+ RateLimiter* const retransmission_rate_limiter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_SENDER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
new file mode 100644
index 0000000000..244f644bd1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_audio.h"
+
+#include <string.h>
+
+#include <memory>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/rtp_headers.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+namespace {
+[[maybe_unused]] const char* FrameTypeToString(AudioFrameType frame_type) {
+ switch (frame_type) {
+ case AudioFrameType::kEmptyFrame:
+ return "empty";
+ case AudioFrameType::kAudioFrameSpeech:
+ return "audio_speech";
+ case AudioFrameType::kAudioFrameCN:
+ return "audio_cn";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+constexpr char kIncludeCaptureClockOffset[] =
+ "WebRTC-IncludeCaptureClockOffset";
+
+} // namespace
+
+RTPSenderAudio::RTPSenderAudio(Clock* clock, RTPSender* rtp_sender)
+ : clock_(clock),
+ rtp_sender_(rtp_sender),
+ absolute_capture_time_sender_(clock),
+ include_capture_clock_offset_(
+ !absl::StartsWith(field_trials_.Lookup(kIncludeCaptureClockOffset),
+ "Disabled")) {
+ RTC_DCHECK(clock_);
+}
+
+RTPSenderAudio::~RTPSenderAudio() {}
+
+int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name,
+ const int8_t payload_type,
+ const uint32_t frequency,
+ const size_t channels,
+ const uint32_t rate) {
+ if (absl::EqualsIgnoreCase(payload_name, "cn")) {
+ MutexLock lock(&send_audio_mutex_);
+ // we can have multiple CNG payload types
+ switch (frequency) {
+ case 8000:
+ cngnb_payload_type_ = payload_type;
+ break;
+ case 16000:
+ cngwb_payload_type_ = payload_type;
+ break;
+ case 32000:
+ cngswb_payload_type_ = payload_type;
+ break;
+ case 48000:
+ cngfb_payload_type_ = payload_type;
+ break;
+ default:
+ return -1;
+ }
+ } else if (absl::EqualsIgnoreCase(payload_name, "telephone-event")) {
+ MutexLock lock(&send_audio_mutex_);
+ // Don't add it to the list
+ // we dont want to allow send with a DTMF payloadtype
+ dtmf_payload_type_ = payload_type;
+ dtmf_payload_freq_ = frequency;
+ return 0;
+ } else if (payload_name == "audio") {
+ MutexLock lock(&send_audio_mutex_);
+ encoder_rtp_timestamp_frequency_ = frequency;
+ return 0;
+ }
+ return 0;
+}
+
+bool RTPSenderAudio::MarkerBit(AudioFrameType frame_type, int8_t payload_type) {
+ MutexLock lock(&send_audio_mutex_);
+ // for audio true for first packet in a speech burst
+ bool marker_bit = false;
+ if (last_payload_type_ != payload_type) {
+ if (payload_type != -1 && (cngnb_payload_type_ == payload_type ||
+ cngwb_payload_type_ == payload_type ||
+ cngswb_payload_type_ == payload_type ||
+ cngfb_payload_type_ == payload_type)) {
+ // Only set a marker bit when we change payload type to a non CNG
+ return false;
+ }
+
+ // payload_type differ
+ if (last_payload_type_ == -1) {
+ if (frame_type != AudioFrameType::kAudioFrameCN) {
+ // first packet and NOT CNG
+ return true;
+ } else {
+ // first packet and CNG
+ inband_vad_active_ = true;
+ return false;
+ }
+ }
+
+ // not first packet AND
+ // not CNG AND
+ // payload_type changed
+
+ // set a marker bit when we change payload type
+ marker_bit = true;
+ }
+
+ // For G.723 G.729, AMR etc we can have inband VAD
+ if (frame_type == AudioFrameType::kAudioFrameCN) {
+ inband_vad_active_ = true;
+ } else if (inband_vad_active_) {
+ inband_vad_active_ = false;
+ marker_bit = true;
+ }
+ return marker_bit;
+}
+
+bool RTPSenderAudio::SendAudio(AudioFrameType frame_type,
+ int8_t payload_type,
+ uint32_t rtp_timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size) {
+ return SendAudio(frame_type, payload_type, rtp_timestamp, payload_data,
+ payload_size,
+ // TODO(bugs.webrtc.org/10739) replace once plumbed.
+ /*absolute_capture_timestamp_ms=*/-1);
+}
+
+bool RTPSenderAudio::SendAudio(AudioFrameType frame_type,
+ int8_t payload_type,
+ uint32_t rtp_timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ int64_t absolute_capture_timestamp_ms) {
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", rtp_timestamp, "Send", "type",
+ FrameTypeToString(frame_type));
+
+ // From RFC 4733:
+ // A source has wide latitude as to how often it sends event updates. A
+ // natural interval is the spacing between non-event audio packets. [...]
+ // Alternatively, a source MAY decide to use a different spacing for event
+ // updates, with a value of 50 ms RECOMMENDED.
+ constexpr int kDtmfIntervalTimeMs = 50;
+ uint8_t audio_level_dbov = 0;
+ uint32_t dtmf_payload_freq = 0;
+ absl::optional<uint32_t> encoder_rtp_timestamp_frequency;
+ {
+ MutexLock lock(&send_audio_mutex_);
+ audio_level_dbov = audio_level_dbov_;
+ dtmf_payload_freq = dtmf_payload_freq_;
+ encoder_rtp_timestamp_frequency = encoder_rtp_timestamp_frequency_;
+ }
+
+ // Check if we have pending DTMFs to send
+ if (!dtmf_event_is_on_ && dtmf_queue_.PendingDtmf()) {
+ if ((clock_->TimeInMilliseconds() - dtmf_time_last_sent_) >
+ kDtmfIntervalTimeMs) {
+ // New tone to play
+ dtmf_timestamp_ = rtp_timestamp;
+ if (dtmf_queue_.NextDtmf(&dtmf_current_event_)) {
+ dtmf_event_first_packet_sent_ = false;
+ dtmf_length_samples_ =
+ dtmf_current_event_.duration_ms * (dtmf_payload_freq / 1000);
+ dtmf_event_is_on_ = true;
+ }
+ }
+ }
+
+ // A source MAY send events and coded audio packets for the same time
+ // but we don't support it
+ if (dtmf_event_is_on_) {
+ if (frame_type == AudioFrameType::kEmptyFrame) {
+ // kEmptyFrame is used to drive the DTMF when in CN mode
+ // it can be triggered more frequently than we want to send the
+ // DTMF packets.
+ const unsigned int dtmf_interval_time_rtp =
+ dtmf_payload_freq * kDtmfIntervalTimeMs / 1000;
+ if ((rtp_timestamp - dtmf_timestamp_last_sent_) <
+ dtmf_interval_time_rtp) {
+ // not time to send yet
+ return true;
+ }
+ }
+ dtmf_timestamp_last_sent_ = rtp_timestamp;
+ uint32_t dtmf_duration_samples = rtp_timestamp - dtmf_timestamp_;
+ bool ended = false;
+ bool send = true;
+
+ if (dtmf_length_samples_ > dtmf_duration_samples) {
+ if (dtmf_duration_samples <= 0) {
+ // Skip send packet at start, since we shouldn't use duration 0
+ send = false;
+ }
+ } else {
+ ended = true;
+ dtmf_event_is_on_ = false;
+ dtmf_time_last_sent_ = clock_->TimeInMilliseconds();
+ }
+ if (send) {
+ if (dtmf_duration_samples > 0xffff) {
+ // RFC 4733 2.5.2.3 Long-Duration Events
+ SendTelephoneEventPacket(ended, dtmf_timestamp_,
+ static_cast<uint16_t>(0xffff), false);
+
+ // set new timestap for this segment
+ dtmf_timestamp_ = rtp_timestamp;
+ dtmf_duration_samples -= 0xffff;
+ dtmf_length_samples_ -= 0xffff;
+
+ return SendTelephoneEventPacket(
+ ended, dtmf_timestamp_,
+ static_cast<uint16_t>(dtmf_duration_samples), false);
+ } else {
+ if (!SendTelephoneEventPacket(ended, dtmf_timestamp_,
+ dtmf_duration_samples,
+ !dtmf_event_first_packet_sent_)) {
+ return false;
+ }
+ dtmf_event_first_packet_sent_ = true;
+ return true;
+ }
+ }
+ return true;
+ }
+ if (payload_size == 0 || payload_data == NULL) {
+ if (frame_type == AudioFrameType::kEmptyFrame) {
+ // we don't send empty audio RTP packets
+ // no error since we use it to either drive DTMF when we use VAD, or
+ // enter DTX.
+ return true;
+ }
+ return false;
+ }
+
+ std::unique_ptr<RtpPacketToSend> packet = rtp_sender_->AllocatePacket();
+ packet->SetMarker(MarkerBit(frame_type, payload_type));
+ packet->SetPayloadType(payload_type);
+ packet->SetTimestamp(rtp_timestamp);
+ packet->set_capture_time(clock_->CurrentTime());
+ // Update audio level extension, if included.
+ packet->SetExtension<AudioLevel>(
+ frame_type == AudioFrameType::kAudioFrameSpeech, audio_level_dbov);
+
+ if (absolute_capture_timestamp_ms > 0) {
+ // Send absolute capture time periodically in order to optimize and save
+ // network traffic. Missing absolute capture times can be interpolated on
+ // the receiving end if sending intervals are small enough.
+ auto absolute_capture_time = absolute_capture_time_sender_.OnSendPacket(
+ AbsoluteCaptureTimeSender::GetSource(packet->Ssrc(), packet->Csrcs()),
+ packet->Timestamp(),
+ // Replace missing value with 0 (invalid frequency), this will trigger
+ // absolute capture time sending.
+ encoder_rtp_timestamp_frequency.value_or(0),
+ Int64MsToUQ32x32(clock_->ConvertTimestampToNtpTimeInMilliseconds(
+ absolute_capture_timestamp_ms)),
+ /*estimated_capture_clock_offset=*/
+ include_capture_clock_offset_ ? absl::make_optional(0) : absl::nullopt);
+ if (absolute_capture_time) {
+ // It also checks that extension was registered during SDP negotiation. If
+ // not then setter won't do anything.
+ packet->SetExtension<AbsoluteCaptureTimeExtension>(
+ *absolute_capture_time);
+ }
+ }
+
+ uint8_t* payload = packet->AllocatePayload(payload_size);
+ if (!payload) // Too large payload buffer.
+ return false;
+ memcpy(payload, payload_data, payload_size);
+
+ {
+ MutexLock lock(&send_audio_mutex_);
+ last_payload_type_ = payload_type;
+ }
+ TRACE_EVENT_ASYNC_END2("webrtc", "Audio", rtp_timestamp, "timestamp",
+ packet->Timestamp(), "seqnum",
+ packet->SequenceNumber());
+ packet->set_packet_type(RtpPacketMediaType::kAudio);
+ packet->set_allow_retransmission(true);
+ bool send_result = rtp_sender_->SendToNetwork(std::move(packet));
+ if (first_packet_sent_()) {
+ RTC_LOG(LS_INFO) << "First audio RTP packet sent to pacer";
+ }
+ return send_result;
+}
+
+// Audio level magnitude and voice activity flag are set for each RTP packet
+int32_t RTPSenderAudio::SetAudioLevel(uint8_t level_dbov) {
+ if (level_dbov > 127) {
+ return -1;
+ }
+ MutexLock lock(&send_audio_mutex_);
+ audio_level_dbov_ = level_dbov;
+ return 0;
+}
+
+// Send a TelephoneEvent tone using RFC 2833 (4733)
+int32_t RTPSenderAudio::SendTelephoneEvent(uint8_t key,
+ uint16_t time_ms,
+ uint8_t level) {
+ DtmfQueue::Event event;
+ {
+ MutexLock lock(&send_audio_mutex_);
+ if (dtmf_payload_type_ < 0) {
+ // TelephoneEvent payloadtype not configured
+ return -1;
+ }
+ event.payload_type = dtmf_payload_type_;
+ }
+ event.key = key;
+ event.duration_ms = time_ms;
+ event.level = level;
+ return dtmf_queue_.AddDtmf(event) ? 0 : -1;
+}
+
+bool RTPSenderAudio::SendTelephoneEventPacket(bool ended,
+ uint32_t dtmf_timestamp,
+ uint16_t duration,
+ bool marker_bit) {
+ uint8_t send_count = 1;
+ bool result = true;
+
+ if (ended) {
+ // resend last packet in an event 3 times
+ send_count = 3;
+ }
+ do {
+ // Send DTMF data.
+ constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
+ constexpr size_t kDtmfSize = 4;
+ std::unique_ptr<RtpPacketToSend> packet(
+ new RtpPacketToSend(kNoExtensions, kRtpHeaderSize + kDtmfSize));
+ packet->SetPayloadType(dtmf_current_event_.payload_type);
+ packet->SetMarker(marker_bit);
+ packet->SetSsrc(rtp_sender_->SSRC());
+ packet->SetTimestamp(dtmf_timestamp);
+ packet->set_capture_time(clock_->CurrentTime());
+
+ // Create DTMF data.
+ uint8_t* dtmfbuffer = packet->AllocatePayload(kDtmfSize);
+ RTC_DCHECK(dtmfbuffer);
+ /* From RFC 2833:
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | event |E|R| volume | duration |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ // R bit always cleared
+ uint8_t R = 0x00;
+ uint8_t volume = dtmf_current_event_.level;
+
+ // First packet un-ended
+ uint8_t E = ended ? 0x80 : 0x00;
+
+ // First byte is Event number, equals key number
+ dtmfbuffer[0] = dtmf_current_event_.key;
+ dtmfbuffer[1] = E | R | volume;
+ ByteWriter<uint16_t>::WriteBigEndian(dtmfbuffer + 2, duration);
+
+ packet->set_packet_type(RtpPacketMediaType::kAudio);
+ packet->set_allow_retransmission(true);
+ result = rtp_sender_->SendToNetwork(std::move(packet));
+ send_count--;
+ } while (send_count > 0 && result);
+
+ return result;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
new file mode 100644
index 0000000000..6d61facc9a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_SENDER_AUDIO_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_SENDER_AUDIO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/transport/field_trial_based_config.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
+#include "modules/rtp_rtcp/source/dtmf_queue.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "rtc_base/one_time_event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class RTPSenderAudio {
+ public:
+ RTPSenderAudio(Clock* clock, RTPSender* rtp_sender);
+
+ RTPSenderAudio() = delete;
+ RTPSenderAudio(const RTPSenderAudio&) = delete;
+ RTPSenderAudio& operator=(const RTPSenderAudio&) = delete;
+
+ ~RTPSenderAudio();
+
+ int32_t RegisterAudioPayload(absl::string_view payload_name,
+ int8_t payload_type,
+ uint32_t frequency,
+ size_t channels,
+ uint32_t rate);
+
+ bool SendAudio(AudioFrameType frame_type,
+ int8_t payload_type,
+ uint32_t rtp_timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size);
+
+ // `absolute_capture_timestamp_ms` and `Clock::CurrentTime`
+ // should be using the same epoch.
+ bool SendAudio(AudioFrameType frame_type,
+ int8_t payload_type,
+ uint32_t rtp_timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ int64_t absolute_capture_timestamp_ms);
+
+ // Store the audio level in dBov for
+ // header-extension-for-audio-level-indication.
+ // Valid range is [0,100]. Actual value is negative.
+ int32_t SetAudioLevel(uint8_t level_dbov);
+
+ // Send a DTMF tone using RFC 2833 (4733)
+ int32_t SendTelephoneEvent(uint8_t key, uint16_t time_ms, uint8_t level);
+
+ protected:
+ bool SendTelephoneEventPacket(
+ bool ended,
+ uint32_t dtmf_timestamp,
+ uint16_t duration,
+ bool marker_bit); // set on first packet in talk burst
+
+ bool MarkerBit(AudioFrameType frame_type, int8_t payload_type);
+
+ private:
+ Clock* const clock_ = nullptr;
+ RTPSender* const rtp_sender_ = nullptr;
+
+ Mutex send_audio_mutex_;
+
+ // DTMF.
+ bool dtmf_event_is_on_ = false;
+ bool dtmf_event_first_packet_sent_ = false;
+ int8_t dtmf_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1;
+ uint32_t dtmf_payload_freq_ RTC_GUARDED_BY(send_audio_mutex_) = 8000;
+ uint32_t dtmf_timestamp_ = 0;
+ uint32_t dtmf_length_samples_ = 0;
+ int64_t dtmf_time_last_sent_ = 0;
+ uint32_t dtmf_timestamp_last_sent_ = 0;
+ DtmfQueue::Event dtmf_current_event_;
+ DtmfQueue dtmf_queue_;
+
+ // VAD detection, used for marker bit.
+ bool inband_vad_active_ RTC_GUARDED_BY(send_audio_mutex_) = false;
+ int8_t cngnb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1;
+ int8_t cngwb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1;
+ int8_t cngswb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1;
+ int8_t cngfb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1;
+ int8_t last_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1;
+
+ // Audio level indication.
+ // (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/)
+ uint8_t audio_level_dbov_ RTC_GUARDED_BY(send_audio_mutex_) = 0;
+ OneTimeEvent first_packet_sent_;
+
+ absl::optional<uint32_t> encoder_rtp_timestamp_frequency_
+ RTC_GUARDED_BY(send_audio_mutex_);
+
+ AbsoluteCaptureTimeSender absolute_capture_time_sender_;
+
+ const FieldTrialBasedConfig field_trials_;
+ const bool include_capture_clock_offset_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_SENDER_AUDIO_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc
new file mode 100644
index 0000000000..7397a3ac4e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_audio.h"
+
+#include <memory>
+#include <vector>
+
+#include "api/transport/field_trial_based_config.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "rtc_base/thread.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+enum : int { // The first valid value is 1.
+ kAudioLevelExtensionId = 1,
+ kAbsoluteCaptureTimeExtensionId = 2,
+};
+
+const uint16_t kSeqNum = 33;
+const uint32_t kSsrc = 725242;
+const uint8_t kAudioLevel = 0x5a;
+const uint64_t kStartTime = 123456789;
+
+using ::testing::ElementsAreArray;
+
+class LoopbackTransportTest : public webrtc::Transport {
+ public:
+ LoopbackTransportTest() {
+ receivers_extensions_.Register<AudioLevel>(kAudioLevelExtensionId);
+ receivers_extensions_.Register<AbsoluteCaptureTimeExtension>(
+ kAbsoluteCaptureTimeExtensionId);
+ }
+
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& /*options*/) override {
+ sent_packets_.push_back(RtpPacketReceived(&receivers_extensions_));
+ EXPECT_TRUE(sent_packets_.back().Parse(data, len));
+ return true;
+ }
+ bool SendRtcp(const uint8_t* data, size_t len) override { return false; }
+ const RtpPacketReceived& last_sent_packet() { return sent_packets_.back(); }
+ int packets_sent() { return sent_packets_.size(); }
+
+ private:
+ RtpHeaderExtensionMap receivers_extensions_;
+ std::vector<RtpPacketReceived> sent_packets_;
+};
+
+} // namespace
+
+class RtpSenderAudioTest : public ::testing::Test {
+ public:
+ RtpSenderAudioTest()
+ : fake_clock_(kStartTime),
+ rtp_module_(ModuleRtpRtcpImpl2::Create([&] {
+ RtpRtcpInterface::Configuration config;
+ config.audio = true;
+ config.clock = &fake_clock_;
+ config.outgoing_transport = &transport_;
+ config.local_media_ssrc = kSsrc;
+ return config;
+ }())),
+ rtp_sender_audio_(
+ std::make_unique<RTPSenderAudio>(&fake_clock_,
+ rtp_module_->RtpSender())) {
+ rtp_module_->SetSequenceNumber(kSeqNum);
+ }
+
+ rtc::AutoThread main_thread_;
+ SimulatedClock fake_clock_;
+ LoopbackTransportTest transport_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_module_;
+ std::unique_ptr<RTPSenderAudio> rtp_sender_audio_;
+};
+
+TEST_F(RtpSenderAudioTest, SendAudio) {
+ const char payload_name[] = "PAYLOAD_NAME";
+ const uint8_t payload_type = 127;
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ payload_name, payload_type, 48000, 0, 1500));
+ uint8_t payload[] = {47, 11, 32, 93, 89};
+
+ ASSERT_TRUE(
+ rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type,
+ 4321, payload, sizeof(payload),
+ /*absolute_capture_timestamp_ms=*/0));
+
+ auto sent_payload = transport_.last_sent_packet().payload();
+ EXPECT_THAT(sent_payload, ElementsAreArray(payload));
+}
+
+TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
+ EXPECT_EQ(0, rtp_sender_audio_->SetAudioLevel(kAudioLevel));
+ rtp_module_->RegisterRtpHeaderExtension(AudioLevel::Uri(),
+ kAudioLevelExtensionId);
+
+ const char payload_name[] = "PAYLOAD_NAME";
+ const uint8_t payload_type = 127;
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ payload_name, payload_type, 48000, 0, 1500));
+
+ uint8_t payload[] = {47, 11, 32, 93, 89};
+
+ ASSERT_TRUE(
+ rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type,
+ 4321, payload, sizeof(payload),
+ /*absolute_capture_timestamp_ms=*/0));
+
+ auto sent_payload = transport_.last_sent_packet().payload();
+ EXPECT_THAT(sent_payload, ElementsAreArray(payload));
+ // Verify AudioLevel extension.
+ bool voice_activity;
+ uint8_t audio_level;
+ EXPECT_TRUE(transport_.last_sent_packet().GetExtension<AudioLevel>(
+ &voice_activity, &audio_level));
+ EXPECT_EQ(kAudioLevel, audio_level);
+ EXPECT_FALSE(voice_activity);
+}
+
+TEST_F(RtpSenderAudioTest, SendAudioWithoutAbsoluteCaptureTime) {
+ constexpr uint32_t kAbsoluteCaptureTimestampMs = 521;
+ const char payload_name[] = "audio";
+ const uint8_t payload_type = 127;
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ payload_name, payload_type, 48000, 0, 1500));
+ uint8_t payload[] = {47, 11, 32, 93, 89};
+
+ ASSERT_TRUE(rtp_sender_audio_->SendAudio(
+ AudioFrameType::kAudioFrameCN, payload_type, 4321, payload,
+ sizeof(payload), kAbsoluteCaptureTimestampMs));
+
+ EXPECT_FALSE(transport_.last_sent_packet()
+ .HasExtension<AbsoluteCaptureTimeExtension>());
+}
+
+// Essentially the same test as
+// SendAudioWithAbsoluteCaptureTimeWithCaptureClockOffset but with a field
+// trial. We will remove this test eventually.
+TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTime) {
+ // Recreate rtp_sender_audio_ with new field trial.
+ test::ScopedFieldTrials field_trial(
+ "WebRTC-IncludeCaptureClockOffset/Disabled/");
+ rtp_sender_audio_ =
+ std::make_unique<RTPSenderAudio>(&fake_clock_, rtp_module_->RtpSender());
+
+ rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(),
+ kAbsoluteCaptureTimeExtensionId);
+ constexpr uint32_t kAbsoluteCaptureTimestampMs = 521;
+ const char payload_name[] = "audio";
+ const uint8_t payload_type = 127;
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ payload_name, payload_type, 48000, 0, 1500));
+ uint8_t payload[] = {47, 11, 32, 93, 89};
+
+ ASSERT_TRUE(rtp_sender_audio_->SendAudio(
+ AudioFrameType::kAudioFrameCN, payload_type, 4321, payload,
+ sizeof(payload), kAbsoluteCaptureTimestampMs));
+
+ auto absolute_capture_time =
+ transport_.last_sent_packet()
+ .GetExtension<AbsoluteCaptureTimeExtension>();
+ EXPECT_TRUE(absolute_capture_time);
+ EXPECT_EQ(
+ absolute_capture_time->absolute_capture_timestamp,
+ Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds(
+ kAbsoluteCaptureTimestampMs)));
+ EXPECT_FALSE(
+ absolute_capture_time->estimated_capture_clock_offset.has_value());
+}
+
+TEST_F(RtpSenderAudioTest,
+ SendAudioWithAbsoluteCaptureTimeWithCaptureClockOffset) {
+ rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(),
+ kAbsoluteCaptureTimeExtensionId);
+ constexpr uint32_t kAbsoluteCaptureTimestampMs = 521;
+ const char payload_name[] = "audio";
+ const uint8_t payload_type = 127;
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ payload_name, payload_type, 48000, 0, 1500));
+ uint8_t payload[] = {47, 11, 32, 93, 89};
+
+ ASSERT_TRUE(rtp_sender_audio_->SendAudio(
+ AudioFrameType::kAudioFrameCN, payload_type, 4321, payload,
+ sizeof(payload), kAbsoluteCaptureTimestampMs));
+
+ auto absolute_capture_time =
+ transport_.last_sent_packet()
+ .GetExtension<AbsoluteCaptureTimeExtension>();
+ EXPECT_TRUE(absolute_capture_time);
+ EXPECT_EQ(
+ absolute_capture_time->absolute_capture_timestamp,
+ Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds(
+ kAbsoluteCaptureTimestampMs)));
+ EXPECT_TRUE(
+ absolute_capture_time->estimated_capture_clock_offset.has_value());
+ EXPECT_EQ(0, *absolute_capture_time->estimated_capture_clock_offset);
+}
+
+// As RFC4733, named telephone events are carried as part of the audio stream
+// and must use the same sequence number and timestamp base as the regular
+// audio channel.
+// This test checks the marker bit for the first packet and the consequent
+// packets of the same telephone event. Since it is specifically for DTMF
+// events, ignoring audio packets and sending kEmptyFrame instead of those.
+TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
+ const char* kDtmfPayloadName = "telephone-event";
+ const uint32_t kPayloadFrequency = 8000;
+ const uint8_t kPayloadType = 126;
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ kDtmfPayloadName, kPayloadType, kPayloadFrequency, 0, 0));
+ // For Telephone events, payload is not added to the registered payload list,
+ // it will register only the payload used for audio stream.
+ // Registering the payload again for audio stream with different payload name.
+ const char* kPayloadName = "payload_name";
+ ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
+ kPayloadName, kPayloadType, kPayloadFrequency, 1, 0));
+ // Start time is arbitrary.
+ uint32_t capture_timestamp = fake_clock_.TimeInMilliseconds();
+ // DTMF event key=9, duration=500 and attenuationdB=10
+ rtp_sender_audio_->SendTelephoneEvent(9, 500, 10);
+ // During start, it takes the starting timestamp as last sent timestamp.
+ // The duration is calculated as the difference of current and last sent
+ // timestamp. So for first call it will skip since the duration is zero.
+ ASSERT_TRUE(rtp_sender_audio_->SendAudio(
+ AudioFrameType::kEmptyFrame, kPayloadType, capture_timestamp, nullptr, 0,
+ /*absolute_capture_time_ms=0*/ 0));
+
+ // DTMF Sample Length is (Frequency/1000) * Duration.
+ // So in this case, it is (8000/1000) * 500 = 4000.
+ // Sending it as two packets.
+ ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame,
+ kPayloadType,
+ capture_timestamp + 2000, nullptr, 0,
+ /*absolute_capture_time_ms=0*/ 0));
+
+ // Marker Bit should be set to 1 for first packet.
+ EXPECT_TRUE(transport_.last_sent_packet().Marker());
+
+ ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame,
+ kPayloadType,
+ capture_timestamp + 4000, nullptr, 0,
+ /*absolute_capture_time_ms=0*/ 0));
+ // Marker Bit should be set to 0 for rest of the packets.
+ EXPECT_FALSE(transport_.last_sent_packet().Marker());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.cc
new file mode 100644
index 0000000000..5ab910ab58
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.cc
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_egress.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "api/transport/field_trial_based_config.h"
+#include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+constexpr uint32_t kTimestampTicksPerMs = 90;
+constexpr int kSendSideDelayWindowMs = 1000;
+constexpr int kBitrateStatisticsWindowMs = 1000;
+constexpr size_t kRtpSequenceNumberMapMaxEntries = 1 << 13;
+constexpr TimeDelta kUpdateInterval =
+ TimeDelta::Millis(kBitrateStatisticsWindowMs);
+
+bool IsTrialSetTo(const FieldTrialsView* field_trials,
+ absl::string_view name,
+ absl::string_view value) {
+ FieldTrialBasedConfig default_trials;
+ auto& trials = field_trials ? *field_trials : default_trials;
+ return absl::StartsWith(trials.Lookup(name), value);
+}
+} // namespace
+
+RtpSenderEgress::NonPacedPacketSender::NonPacedPacketSender(
+ RtpSenderEgress* sender,
+ PacketSequencer* sequencer)
+ : transport_sequence_number_(0), sender_(sender), sequencer_(sequencer) {
+ RTC_DCHECK(sequencer);
+}
+RtpSenderEgress::NonPacedPacketSender::~NonPacedPacketSender() = default;
+
+void RtpSenderEgress::NonPacedPacketSender::EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ for (auto& packet : packets) {
+ PrepareForSend(packet.get());
+ sender_->SendPacket(packet.get(), PacedPacketInfo());
+ }
+ auto fec_packets = sender_->FetchFecPackets();
+ if (!fec_packets.empty()) {
+ EnqueuePackets(std::move(fec_packets));
+ }
+}
+
+void RtpSenderEgress::NonPacedPacketSender::PrepareForSend(
+ RtpPacketToSend* packet) {
+ // Assign sequence numbers, but not for flexfec which is already running on
+ // an internally maintained sequence number series.
+ if (packet->Ssrc() != sender_->FlexFecSsrc()) {
+ sequencer_->Sequence(*packet);
+ }
+ if (!packet->SetExtension<TransportSequenceNumber>(
+ ++transport_sequence_number_)) {
+ --transport_sequence_number_;
+ }
+ packet->ReserveExtension<TransmissionOffset>();
+ packet->ReserveExtension<AbsoluteSendTime>();
+}
+
+RtpSenderEgress::RtpSenderEgress(const RtpRtcpInterface::Configuration& config,
+ RtpPacketHistory* packet_history)
+ : worker_queue_(TaskQueueBase::Current()),
+ ssrc_(config.local_media_ssrc),
+ rtx_ssrc_(config.rtx_send_ssrc),
+ flexfec_ssrc_(config.fec_generator ? config.fec_generator->FecSsrc()
+ : absl::nullopt),
+ populate_network2_timestamp_(config.populate_network2_timestamp),
+ send_side_bwe_with_overhead_(
+ !IsTrialSetTo(config.field_trials,
+ "WebRTC-SendSideBwe-WithOverhead",
+ "Disabled")),
+ clock_(config.clock),
+ packet_history_(packet_history),
+ transport_(config.outgoing_transport),
+ event_log_(config.event_log),
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+ is_audio_(config.audio),
+#endif
+ need_rtp_packet_infos_(config.need_rtp_packet_infos),
+ fec_generator_(config.fec_generator),
+ transport_feedback_observer_(config.transport_feedback_callback),
+ send_side_delay_observer_(config.send_side_delay_observer),
+ send_packet_observer_(config.send_packet_observer),
+ rtp_stats_callback_(config.rtp_stats_callback),
+ bitrate_callback_(config.send_bitrate_observer),
+ media_has_been_sent_(false),
+ force_part_of_allocation_(false),
+ timestamp_offset_(0),
+ max_delay_it_(send_delays_.end()),
+ sum_delays_ms_(0),
+ total_packet_send_delay_ms_(0),
+ send_rates_(kNumMediaTypes,
+ {kBitrateStatisticsWindowMs, RateStatistics::kBpsScale}),
+ rtp_sequence_number_map_(need_rtp_packet_infos_
+ ? std::make_unique<RtpSequenceNumberMap>(
+ kRtpSequenceNumberMapMaxEntries)
+ : nullptr) {
+ RTC_DCHECK(worker_queue_);
+ pacer_checker_.Detach();
+ if (bitrate_callback_) {
+ update_task_ = RepeatingTaskHandle::DelayedStart(worker_queue_,
+ kUpdateInterval, [this]() {
+ PeriodicUpdate();
+ return kUpdateInterval;
+ });
+ }
+}
+
+RtpSenderEgress::~RtpSenderEgress() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ update_task_.Stop();
+}
+
+void RtpSenderEgress::SendPacket(RtpPacketToSend* packet,
+ const PacedPacketInfo& pacing_info) {
+ RTC_DCHECK_RUN_ON(&pacer_checker_);
+ RTC_DCHECK(packet);
+
+ if (packet->Ssrc() == ssrc_ &&
+ packet->packet_type() != RtpPacketMediaType::kRetransmission) {
+ if (last_sent_seq_.has_value()) {
+ RTC_DCHECK_EQ(static_cast<uint16_t>(*last_sent_seq_ + 1),
+ packet->SequenceNumber());
+ }
+ last_sent_seq_ = packet->SequenceNumber();
+ } else if (packet->Ssrc() == rtx_ssrc_) {
+ if (last_sent_rtx_seq_.has_value()) {
+ RTC_DCHECK_EQ(static_cast<uint16_t>(*last_sent_rtx_seq_ + 1),
+ packet->SequenceNumber());
+ }
+ last_sent_rtx_seq_ = packet->SequenceNumber();
+ }
+
+ RTC_DCHECK(packet->packet_type().has_value());
+ RTC_DCHECK(HasCorrectSsrc(*packet));
+ if (packet->packet_type() == RtpPacketMediaType::kRetransmission) {
+ RTC_DCHECK(packet->retransmitted_sequence_number().has_value());
+ }
+
+ const uint32_t packet_ssrc = packet->Ssrc();
+ const Timestamp now = clock_->CurrentTime();
+
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+ worker_queue_->PostTask(
+ SafeTask(task_safety_.flag(), [this, now, packet_ssrc]() {
+ BweTestLoggingPlot(now.ms(), packet_ssrc);
+ }));
+#endif
+
+ if (need_rtp_packet_infos_ &&
+ packet->packet_type() == RtpPacketToSend::Type::kVideo) {
+ worker_queue_->PostTask(SafeTask(
+ task_safety_.flag(),
+ [this, packet_timestamp = packet->Timestamp(),
+ is_first_packet_of_frame = packet->is_first_packet_of_frame(),
+ is_last_packet_of_frame = packet->Marker(),
+ sequence_number = packet->SequenceNumber()]() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ // Last packet of a frame, add it to sequence number info map.
+ const uint32_t timestamp = packet_timestamp - timestamp_offset_;
+ rtp_sequence_number_map_->InsertPacket(
+ sequence_number,
+ RtpSequenceNumberMap::Info(timestamp, is_first_packet_of_frame,
+ is_last_packet_of_frame));
+ }));
+ }
+
+ if (fec_generator_ && packet->fec_protect_packet()) {
+ // This packet should be protected by FEC, add it to packet generator.
+ RTC_DCHECK(fec_generator_);
+ RTC_DCHECK(packet->packet_type() == RtpPacketMediaType::kVideo);
+ absl::optional<std::pair<FecProtectionParams, FecProtectionParams>>
+ new_fec_params;
+ {
+ MutexLock lock(&lock_);
+ new_fec_params.swap(pending_fec_params_);
+ }
+ if (new_fec_params) {
+ fec_generator_->SetProtectionParameters(new_fec_params->first,
+ new_fec_params->second);
+ }
+ if (packet->is_red()) {
+ RtpPacketToSend unpacked_packet(*packet);
+
+ const rtc::CopyOnWriteBuffer buffer = packet->Buffer();
+ // Grab media payload type from RED header.
+ const size_t headers_size = packet->headers_size();
+ unpacked_packet.SetPayloadType(buffer[headers_size]);
+
+ // Copy the media payload into the unpacked buffer.
+ uint8_t* payload_buffer =
+ unpacked_packet.SetPayloadSize(packet->payload_size() - 1);
+ std::copy(&packet->payload()[0] + 1,
+ &packet->payload()[0] + packet->payload_size(), payload_buffer);
+
+ fec_generator_->AddPacketAndGenerateFec(unpacked_packet);
+ } else {
+ // If not RED encapsulated - we can just insert packet directly.
+ fec_generator_->AddPacketAndGenerateFec(*packet);
+ }
+ }
+
+ // Bug webrtc:7859. While FEC is invoked from rtp_sender_video, and not after
+ // the pacer, these modifications of the header below are happening after the
+ // FEC protection packets are calculated. This will corrupt recovered packets
+ // at the same place. It's not an issue for extensions, which are present in
+ // all the packets (their content just may be incorrect on recovered packets).
+ // In case of VideoTimingExtension, since it's present not in every packet,
+ // data after rtp header may be corrupted if these packets are protected by
+ // the FEC.
+ TimeDelta diff = now - packet->capture_time();
+ if (packet->HasExtension<TransmissionOffset>()) {
+ packet->SetExtension<TransmissionOffset>(kTimestampTicksPerMs * diff.ms());
+ }
+ if (packet->HasExtension<AbsoluteSendTime>()) {
+ packet->SetExtension<AbsoluteSendTime>(AbsoluteSendTime::To24Bits(now));
+ }
+
+ if (packet->HasExtension<VideoTimingExtension>()) {
+ if (populate_network2_timestamp_) {
+ packet->set_network2_time(now);
+ } else {
+ packet->set_pacer_exit_time(now);
+ }
+ }
+
+ const bool is_media = packet->packet_type() == RtpPacketMediaType::kAudio ||
+ packet->packet_type() == RtpPacketMediaType::kVideo;
+
+ PacketOptions options;
+ {
+ MutexLock lock(&lock_);
+ options.included_in_allocation = force_part_of_allocation_;
+ }
+
+ // Downstream code actually uses this flag to distinguish between media and
+ // everything else.
+ options.is_retransmit = !is_media;
+ if (auto packet_id = packet->GetExtension<TransportSequenceNumber>()) {
+ options.packet_id = *packet_id;
+ options.included_in_feedback = true;
+ options.included_in_allocation = true;
+ AddPacketToTransportFeedback(*packet_id, *packet, pacing_info);
+ }
+
+ options.additional_data = packet->additional_data();
+
+ if (packet->packet_type() != RtpPacketMediaType::kPadding &&
+ packet->packet_type() != RtpPacketMediaType::kRetransmission) {
+ UpdateDelayStatistics(packet->capture_time().ms(), now.ms(), packet_ssrc);
+ UpdateOnSendPacket(options.packet_id, packet->capture_time().ms(),
+ packet_ssrc);
+ }
+
+ const bool send_success = SendPacketToNetwork(*packet, options, pacing_info);
+
+ // Put packet in retransmission history or update pending status even if
+ // actual sending fails.
+ if (is_media && packet->allow_retransmission()) {
+ packet_history_->PutRtpPacket(std::make_unique<RtpPacketToSend>(*packet),
+ now);
+ } else if (packet->retransmitted_sequence_number()) {
+ packet_history_->MarkPacketAsSent(*packet->retransmitted_sequence_number());
+ }
+
+ if (send_success) {
+ // `media_has_been_sent_` is used by RTPSender to figure out if it can send
+ // padding in the absence of transport-cc or abs-send-time.
+ // In those cases media must be sent first to set a reference timestamp.
+ media_has_been_sent_ = true;
+
+ // TODO(sprang): Add support for FEC protecting all header extensions, add
+ // media packet to generator here instead.
+
+ RTC_DCHECK(packet->packet_type().has_value());
+ RtpPacketMediaType packet_type = *packet->packet_type();
+ RtpPacketCounter counter(*packet);
+ size_t size = packet->size();
+ worker_queue_->PostTask(
+ SafeTask(task_safety_.flag(), [this, now, packet_ssrc, packet_type,
+ counter = std::move(counter), size]() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ UpdateRtpStats(now.ms(), packet_ssrc, packet_type, std::move(counter),
+ size);
+ }));
+ }
+}
+
+RtpSendRates RtpSenderEgress::GetSendRates() const {
+ MutexLock lock(&lock_);
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ return GetSendRatesLocked(now_ms);
+}
+
+RtpSendRates RtpSenderEgress::GetSendRatesLocked(int64_t now_ms) const {
+ RtpSendRates current_rates;
+ for (size_t i = 0; i < kNumMediaTypes; ++i) {
+ RtpPacketMediaType type = static_cast<RtpPacketMediaType>(i);
+ current_rates[type] =
+ DataRate::BitsPerSec(send_rates_[i].Rate(now_ms).value_or(0));
+ }
+ return current_rates;
+}
+
+void RtpSenderEgress::GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const {
+ // TODO(bugs.webrtc.org/11581): make sure rtx_rtp_stats_ and rtp_stats_ are
+ // only touched on the worker thread.
+ MutexLock lock(&lock_);
+ *rtp_stats = rtp_stats_;
+ *rtx_stats = rtx_rtp_stats_;
+}
+
+void RtpSenderEgress::ForceIncludeSendPacketsInAllocation(
+ bool part_of_allocation) {
+ MutexLock lock(&lock_);
+ force_part_of_allocation_ = part_of_allocation;
+}
+
+bool RtpSenderEgress::MediaHasBeenSent() const {
+ RTC_DCHECK_RUN_ON(&pacer_checker_);
+ return media_has_been_sent_;
+}
+
+void RtpSenderEgress::SetMediaHasBeenSent(bool media_sent) {
+ RTC_DCHECK_RUN_ON(&pacer_checker_);
+ media_has_been_sent_ = media_sent;
+}
+
+void RtpSenderEgress::SetTimestampOffset(uint32_t timestamp) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ timestamp_offset_ = timestamp;
+}
+
+std::vector<RtpSequenceNumberMap::Info> RtpSenderEgress::GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(!sequence_numbers.empty());
+ if (!need_rtp_packet_infos_) {
+ return std::vector<RtpSequenceNumberMap::Info>();
+ }
+
+ std::vector<RtpSequenceNumberMap::Info> results;
+ results.reserve(sequence_numbers.size());
+
+ for (uint16_t sequence_number : sequence_numbers) {
+ const auto& info = rtp_sequence_number_map_->Get(sequence_number);
+ if (!info) {
+ // The empty vector will be returned. We can delay the clearing
+ // of the vector until after we exit the critical section.
+ return std::vector<RtpSequenceNumberMap::Info>();
+ }
+ results.push_back(*info);
+ }
+
+ return results;
+}
+
+void RtpSenderEgress::SetFecProtectionParameters(
+ const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) {
+ // TODO(sprang): Post task to pacer queue instead, one pacer is fully
+ // migrated to a task queue.
+ MutexLock lock(&lock_);
+ pending_fec_params_.emplace(delta_params, key_params);
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>>
+RtpSenderEgress::FetchFecPackets() {
+ RTC_DCHECK_RUN_ON(&pacer_checker_);
+ if (fec_generator_) {
+ return fec_generator_->GetFecPackets();
+ }
+ return {};
+}
+
+bool RtpSenderEgress::HasCorrectSsrc(const RtpPacketToSend& packet) const {
+ switch (*packet.packet_type()) {
+ case RtpPacketMediaType::kAudio:
+ case RtpPacketMediaType::kVideo:
+ return packet.Ssrc() == ssrc_;
+ case RtpPacketMediaType::kRetransmission:
+ case RtpPacketMediaType::kPadding:
+ // Both padding and retransmission must be on either the media or the
+ // RTX stream.
+ return packet.Ssrc() == rtx_ssrc_ || packet.Ssrc() == ssrc_;
+ case RtpPacketMediaType::kForwardErrorCorrection:
+ // FlexFEC is on separate SSRC, ULPFEC uses media SSRC.
+ return packet.Ssrc() == ssrc_ || packet.Ssrc() == flexfec_ssrc_;
+ }
+ return false;
+}
+
+void RtpSenderEgress::AddPacketToTransportFeedback(
+ uint16_t packet_id,
+ const RtpPacketToSend& packet,
+ const PacedPacketInfo& pacing_info) {
+ if (transport_feedback_observer_) {
+ size_t packet_size = packet.payload_size() + packet.padding_size();
+ if (send_side_bwe_with_overhead_) {
+ packet_size = packet.size();
+ }
+
+ RtpPacketSendInfo packet_info;
+ packet_info.transport_sequence_number = packet_id;
+ packet_info.rtp_timestamp = packet.Timestamp();
+ packet_info.length = packet_size;
+ packet_info.pacing_info = pacing_info;
+ packet_info.packet_type = packet.packet_type();
+
+ switch (*packet_info.packet_type) {
+ case RtpPacketMediaType::kAudio:
+ case RtpPacketMediaType::kVideo:
+ packet_info.media_ssrc = ssrc_;
+ packet_info.rtp_sequence_number = packet.SequenceNumber();
+ break;
+ case RtpPacketMediaType::kRetransmission:
+ // For retransmissions, we're want to remove the original media packet
+ // if the retransmit arrives - so populate that in the packet info.
+ packet_info.media_ssrc = ssrc_;
+ packet_info.rtp_sequence_number =
+ *packet.retransmitted_sequence_number();
+ break;
+ case RtpPacketMediaType::kPadding:
+ case RtpPacketMediaType::kForwardErrorCorrection:
+ // We're not interested in feedback about these packets being received
+ // or lost.
+ break;
+ }
+
+ transport_feedback_observer_->OnAddPacket(packet_info);
+ }
+}
+
+void RtpSenderEgress::UpdateDelayStatistics(int64_t capture_time_ms,
+ int64_t now_ms,
+ uint32_t ssrc) {
+ if (!send_side_delay_observer_ || capture_time_ms <= 0)
+ return;
+
+ int avg_delay_ms = 0;
+ int max_delay_ms = 0;
+ uint64_t total_packet_send_delay_ms = 0;
+ {
+ MutexLock lock(&lock_);
+ // Compute the max and average of the recent capture-to-send delays.
+ // The time complexity of the current approach depends on the distribution
+ // of the delay values. This could be done more efficiently.
+
+ // Remove elements older than kSendSideDelayWindowMs.
+ auto lower_bound =
+ send_delays_.lower_bound(now_ms - kSendSideDelayWindowMs);
+ for (auto it = send_delays_.begin(); it != lower_bound; ++it) {
+ if (max_delay_it_ == it) {
+ max_delay_it_ = send_delays_.end();
+ }
+ sum_delays_ms_ -= it->second;
+ }
+ send_delays_.erase(send_delays_.begin(), lower_bound);
+ if (max_delay_it_ == send_delays_.end()) {
+ // Removed the previous max. Need to recompute.
+ RecomputeMaxSendDelay();
+ }
+
+ // Add the new element.
+ RTC_DCHECK_GE(now_ms, 0);
+ RTC_DCHECK_LE(now_ms, std::numeric_limits<int64_t>::max() / 2);
+ RTC_DCHECK_GE(capture_time_ms, 0);
+ RTC_DCHECK_LE(capture_time_ms, std::numeric_limits<int64_t>::max() / 2);
+ int64_t diff_ms = now_ms - capture_time_ms;
+ RTC_DCHECK_GE(diff_ms, static_cast<int64_t>(0));
+ RTC_DCHECK_LE(diff_ms, std::numeric_limits<int>::max());
+ int new_send_delay = rtc::dchecked_cast<int>(now_ms - capture_time_ms);
+ SendDelayMap::iterator it;
+ bool inserted;
+ std::tie(it, inserted) =
+ send_delays_.insert(std::make_pair(now_ms, new_send_delay));
+ if (!inserted) {
+ // TODO(terelius): If we have multiple delay measurements during the same
+ // millisecond then we keep the most recent one. It is not clear that this
+ // is the right decision, but it preserves an earlier behavior.
+ int previous_send_delay = it->second;
+ sum_delays_ms_ -= previous_send_delay;
+ it->second = new_send_delay;
+ if (max_delay_it_ == it && new_send_delay < previous_send_delay) {
+ RecomputeMaxSendDelay();
+ }
+ }
+ if (max_delay_it_ == send_delays_.end() ||
+ it->second >= max_delay_it_->second) {
+ max_delay_it_ = it;
+ }
+ sum_delays_ms_ += new_send_delay;
+ total_packet_send_delay_ms_ += new_send_delay;
+ total_packet_send_delay_ms = total_packet_send_delay_ms_;
+
+ size_t num_delays = send_delays_.size();
+ RTC_DCHECK(max_delay_it_ != send_delays_.end());
+ max_delay_ms = rtc::dchecked_cast<int>(max_delay_it_->second);
+ int64_t avg_ms = (sum_delays_ms_ + num_delays / 2) / num_delays;
+ RTC_DCHECK_GE(avg_ms, static_cast<int64_t>(0));
+ RTC_DCHECK_LE(avg_ms,
+ static_cast<int64_t>(std::numeric_limits<int>::max()));
+ avg_delay_ms =
+ rtc::dchecked_cast<int>((sum_delays_ms_ + num_delays / 2) / num_delays);
+ }
+ send_side_delay_observer_->SendSideDelayUpdated(
+ avg_delay_ms, max_delay_ms, total_packet_send_delay_ms, ssrc);
+}
+
+void RtpSenderEgress::RecomputeMaxSendDelay() {
+ max_delay_it_ = send_delays_.begin();
+ for (auto it = send_delays_.begin(); it != send_delays_.end(); ++it) {
+ if (it->second >= max_delay_it_->second) {
+ max_delay_it_ = it;
+ }
+ }
+}
+
+void RtpSenderEgress::UpdateOnSendPacket(int packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) {
+ if (!send_packet_observer_ || capture_time_ms <= 0 || packet_id == -1) {
+ return;
+ }
+
+ send_packet_observer_->OnSendPacket(packet_id, capture_time_ms, ssrc);
+}
+
+bool RtpSenderEgress::SendPacketToNetwork(const RtpPacketToSend& packet,
+ const PacketOptions& options,
+ const PacedPacketInfo& pacing_info) {
+ int bytes_sent = -1;
+ if (transport_) {
+ bytes_sent = transport_->SendRtp(packet.data(), packet.size(), options)
+ ? static_cast<int>(packet.size())
+ : -1;
+ if (event_log_ && bytes_sent > 0) {
+ event_log_->Log(std::make_unique<RtcEventRtpPacketOutgoing>(
+ packet, pacing_info.probe_cluster_id));
+ }
+ }
+
+ if (bytes_sent <= 0) {
+ RTC_LOG(LS_WARNING) << "Transport failed to send packet.";
+ return false;
+ }
+ return true;
+}
+
+void RtpSenderEgress::UpdateRtpStats(int64_t now_ms,
+ uint32_t packet_ssrc,
+ RtpPacketMediaType packet_type,
+ RtpPacketCounter counter,
+ size_t packet_size) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+
+ // TODO(bugs.webrtc.org/11581): send_rates_ should be touched only on the
+ // worker thread.
+ RtpSendRates send_rates;
+ {
+ MutexLock lock(&lock_);
+
+ // TODO(bugs.webrtc.org/11581): make sure rtx_rtp_stats_ and rtp_stats_ are
+ // only touched on the worker thread.
+ StreamDataCounters* counters =
+ packet_ssrc == rtx_ssrc_ ? &rtx_rtp_stats_ : &rtp_stats_;
+
+ if (counters->first_packet_time_ms == -1) {
+ counters->first_packet_time_ms = now_ms;
+ }
+
+ if (packet_type == RtpPacketMediaType::kForwardErrorCorrection) {
+ counters->fec.Add(counter);
+ } else if (packet_type == RtpPacketMediaType::kRetransmission) {
+ counters->retransmitted.Add(counter);
+ }
+ counters->transmitted.Add(counter);
+
+ send_rates_[static_cast<size_t>(packet_type)].Update(packet_size, now_ms);
+ if (bitrate_callback_) {
+ send_rates = GetSendRatesLocked(now_ms);
+ }
+
+ if (rtp_stats_callback_) {
+ rtp_stats_callback_->DataCountersUpdated(*counters, packet_ssrc);
+ }
+ }
+
+ // The bitrate_callback_ and rtp_stats_callback_ pointers in practice point
+ // to the same object, so these callbacks could be consolidated into one.
+ if (bitrate_callback_) {
+ bitrate_callback_->Notify(
+ send_rates.Sum().bps(),
+ send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_);
+ }
+}
+
+void RtpSenderEgress::PeriodicUpdate() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(bitrate_callback_);
+ RtpSendRates send_rates = GetSendRates();
+ bitrate_callback_->Notify(
+ send_rates.Sum().bps(),
+ send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_);
+}
+
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+void RtpSenderEgress::BweTestLoggingPlot(int64_t now_ms, uint32_t packet_ssrc) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+
+ const auto rates = GetSendRates();
+ if (is_audio_) {
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "AudioTotBitrate_kbps", now_ms,
+ rates.Sum().kbps(), packet_ssrc);
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(
+ 1, "AudioNackBitrate_kbps", now_ms,
+ rates[RtpPacketMediaType::kRetransmission].kbps(), packet_ssrc);
+ } else {
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoTotBitrate_kbps", now_ms,
+ rates.Sum().kbps(), packet_ssrc);
+ BWE_TEST_LOGGING_PLOT_WITH_SSRC(
+ 1, "VideoNackBitrate_kbps", now_ms,
+ rates[RtpPacketMediaType::kRetransmission].kbps(), packet_ssrc);
+ }
+}
+#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.h
new file mode 100644
index 0000000000..0b440c29d1
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_SENDER_EGRESS_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_SENDER_EGRESS_H_
+
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/call/transport.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/data_rate.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class RtpSenderEgress {
+ public:
+ // Helper class that redirects packets directly to the send part of this class
+ // without passing through an actual paced sender.
+ class NonPacedPacketSender : public RtpPacketSender {
+ public:
+ NonPacedPacketSender(RtpSenderEgress* sender, PacketSequencer* sequencer);
+ virtual ~NonPacedPacketSender();
+
+ void EnqueuePackets(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) override;
+
+ private:
+ void PrepareForSend(RtpPacketToSend* packet);
+ uint16_t transport_sequence_number_;
+ RtpSenderEgress* const sender_;
+ PacketSequencer* sequencer_;
+ };
+
+ RtpSenderEgress(const RtpRtcpInterface::Configuration& config,
+ RtpPacketHistory* packet_history);
+ ~RtpSenderEgress();
+
+ void SendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info)
+ RTC_LOCKS_EXCLUDED(lock_);
+ uint32_t Ssrc() const { return ssrc_; }
+ absl::optional<uint32_t> RtxSsrc() const { return rtx_ssrc_; }
+ absl::optional<uint32_t> FlexFecSsrc() const { return flexfec_ssrc_; }
+
+ RtpSendRates GetSendRates() const RTC_LOCKS_EXCLUDED(lock_);
+ void GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const
+ RTC_LOCKS_EXCLUDED(lock_);
+
+ void ForceIncludeSendPacketsInAllocation(bool part_of_allocation)
+ RTC_LOCKS_EXCLUDED(lock_);
+ bool MediaHasBeenSent() const RTC_LOCKS_EXCLUDED(lock_);
+ void SetMediaHasBeenSent(bool media_sent) RTC_LOCKS_EXCLUDED(lock_);
+ void SetTimestampOffset(uint32_t timestamp) RTC_LOCKS_EXCLUDED(lock_);
+
+ // For each sequence number in `sequence_number`, recall the last RTP packet
+ // which bore it - its timestamp and whether it was the first and/or last
+ // packet in that frame. If all of the given sequence numbers could be
+ // recalled, return a vector with all of them (in corresponding order).
+ // If any could not be recalled, return an empty vector.
+ std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ rtc::ArrayView<const uint16_t> sequence_numbers) const
+ RTC_LOCKS_EXCLUDED(lock_);
+
+ void SetFecProtectionParameters(const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params);
+ std::vector<std::unique_ptr<RtpPacketToSend>> FetchFecPackets();
+
+ private:
+ // Maps capture time in milliseconds to send-side delay in milliseconds.
+ // Send-side delay is the difference between transmission time and capture
+ // time.
+ typedef std::map<int64_t, int> SendDelayMap;
+
+ RtpSendRates GetSendRatesLocked(int64_t now_ms) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ bool HasCorrectSsrc(const RtpPacketToSend& packet) const;
+ void AddPacketToTransportFeedback(uint16_t packet_id,
+ const RtpPacketToSend& packet,
+ const PacedPacketInfo& pacing_info);
+ void UpdateDelayStatistics(int64_t capture_time_ms,
+ int64_t now_ms,
+ uint32_t ssrc);
+ void RecomputeMaxSendDelay() RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void UpdateOnSendPacket(int packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc);
+ // Sends packet on to `transport_`, leaving the RTP module.
+ bool SendPacketToNetwork(const RtpPacketToSend& packet,
+ const PacketOptions& options,
+ const PacedPacketInfo& pacing_info);
+
+ void UpdateRtpStats(int64_t now_ms,
+ uint32_t packet_ssrc,
+ RtpPacketMediaType packet_type,
+ RtpPacketCounter counter,
+ size_t packet_size);
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+ void BweTestLoggingPlot(int64_t now_ms, uint32_t packet_ssrc);
+#endif
+
+ // Called on a timer, once a second, on the worker_queue_.
+ void PeriodicUpdate();
+
+ TaskQueueBase* const worker_queue_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker pacer_checker_;
+ const uint32_t ssrc_;
+ const absl::optional<uint32_t> rtx_ssrc_;
+ const absl::optional<uint32_t> flexfec_ssrc_;
+ const bool populate_network2_timestamp_;
+ const bool send_side_bwe_with_overhead_;
+ Clock* const clock_;
+ RtpPacketHistory* const packet_history_;
+ Transport* const transport_;
+ RtcEventLog* const event_log_;
+#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
+ const bool is_audio_;
+#endif
+ const bool need_rtp_packet_infos_;
+ VideoFecGenerator* const fec_generator_ RTC_GUARDED_BY(pacer_checker_);
+ absl::optional<uint16_t> last_sent_seq_ RTC_GUARDED_BY(pacer_checker_);
+ absl::optional<uint16_t> last_sent_rtx_seq_ RTC_GUARDED_BY(pacer_checker_);
+
+ TransportFeedbackObserver* const transport_feedback_observer_;
+ SendSideDelayObserver* const send_side_delay_observer_;
+ SendPacketObserver* const send_packet_observer_;
+ StreamDataCountersCallback* const rtp_stats_callback_;
+ BitrateStatisticsObserver* const bitrate_callback_;
+
+ mutable Mutex lock_;
+ bool media_has_been_sent_ RTC_GUARDED_BY(pacer_checker_);
+ bool force_part_of_allocation_ RTC_GUARDED_BY(lock_);
+ uint32_t timestamp_offset_ RTC_GUARDED_BY(worker_queue_);
+
+ SendDelayMap send_delays_ RTC_GUARDED_BY(lock_);
+ SendDelayMap::const_iterator max_delay_it_ RTC_GUARDED_BY(lock_);
+ // The sum of delays over a kSendSideDelayWindowMs sliding window.
+ int64_t sum_delays_ms_ RTC_GUARDED_BY(lock_);
+ uint64_t total_packet_send_delay_ms_ RTC_GUARDED_BY(lock_);
+ StreamDataCounters rtp_stats_ RTC_GUARDED_BY(lock_);
+ StreamDataCounters rtx_rtp_stats_ RTC_GUARDED_BY(lock_);
+ // One element per value in RtpPacketMediaType, with index matching value.
+ std::vector<RateStatistics> send_rates_ RTC_GUARDED_BY(lock_);
+ absl::optional<std::pair<FecProtectionParams, FecProtectionParams>>
+ pending_fec_params_ RTC_GUARDED_BY(lock_);
+
+ // Maps sent packets' sequence numbers to a tuple consisting of:
+ // 1. The timestamp, without the randomizing offset mandated by the RFC.
+ // 2. Whether the packet was the first in its frame.
+ // 3. Whether the packet was the last in its frame.
+ const std::unique_ptr<RtpSequenceNumberMap> rtp_sequence_number_map_
+ RTC_GUARDED_BY(worker_queue_);
+ RepeatingTaskHandle update_task_ RTC_GUARDED_BY(worker_queue_);
+ ScopedTaskSafety task_safety_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_SENDER_EGRESS_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc
new file mode 100644
index 0000000000..ddeeb8b002
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc
@@ -0,0 +1,971 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_egress.h"
+
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/call/transport.h"
+#include "api/units/data_size.h"
+#include "api/units/timestamp.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/rtp_rtcp/include/flexfec_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_history.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::Field;
+using ::testing::NiceMock;
+using ::testing::Optional;
+using ::testing::StrictMock;
+
+constexpr Timestamp kStartTime = Timestamp::Millis(123456789);
+constexpr int kDefaultPayloadType = 100;
+constexpr int kFlexfectPayloadType = 110;
+constexpr uint16_t kStartSequenceNumber = 33;
+constexpr uint32_t kSsrc = 725242;
+constexpr uint32_t kRtxSsrc = 12345;
+constexpr uint32_t kFlexFecSsrc = 23456;
+enum : int {
+ kTransportSequenceNumberExtensionId = 1,
+ kAbsoluteSendTimeExtensionId,
+ kTransmissionOffsetExtensionId,
+ kVideoTimingExtensionId,
+};
+
+struct TestConfig {
+ explicit TestConfig(bool with_overhead) : with_overhead(with_overhead) {}
+ bool with_overhead = false;
+};
+
+class MockSendPacketObserver : public SendPacketObserver {
+ public:
+ MOCK_METHOD(void, OnSendPacket, (uint16_t, int64_t, uint32_t), (override));
+};
+
+class MockTransportFeedbackObserver : public TransportFeedbackObserver {
+ public:
+ MOCK_METHOD(void, OnAddPacket, (const RtpPacketSendInfo&), (override));
+ MOCK_METHOD(void,
+ OnTransportFeedback,
+ (const rtcp::TransportFeedback&),
+ (override));
+};
+
+class MockStreamDataCountersCallback : public StreamDataCountersCallback {
+ public:
+ MOCK_METHOD(void,
+ DataCountersUpdated,
+ (const StreamDataCounters& counters, uint32_t ssrc),
+ (override));
+};
+
+class MockSendSideDelayObserver : public SendSideDelayObserver {
+ public:
+ MOCK_METHOD(void,
+ SendSideDelayUpdated,
+ (int, int, uint64_t, uint32_t),
+ (override));
+};
+
+class FieldTrialConfig : public FieldTrialsView {
+ public:
+ FieldTrialConfig() : overhead_enabled_(false) {}
+ ~FieldTrialConfig() override {}
+
+ void SetOverHeadEnabled(bool enabled) { overhead_enabled_ = enabled; }
+
+ std::string Lookup(absl::string_view key) const override {
+ if (key == "WebRTC-SendSideBwe-WithOverhead") {
+ return overhead_enabled_ ? "Enabled" : "Disabled";
+ }
+ return "";
+ }
+
+ private:
+ bool overhead_enabled_;
+};
+
+struct TransmittedPacket {
+ TransmittedPacket(rtc::ArrayView<const uint8_t> data,
+ const PacketOptions& packet_options,
+ RtpHeaderExtensionMap* extensions)
+ : packet(extensions), options(packet_options) {
+ EXPECT_TRUE(packet.Parse(data));
+ }
+ RtpPacketReceived packet;
+ PacketOptions options;
+};
+
+class TestTransport : public Transport {
+ public:
+ explicit TestTransport(RtpHeaderExtensionMap* extensions)
+ : total_data_sent_(DataSize::Zero()), extensions_(extensions) {}
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override {
+ total_data_sent_ += DataSize::Bytes(length);
+ last_packet_.emplace(rtc::MakeArrayView(packet, length), options,
+ extensions_);
+ return true;
+ }
+
+ bool SendRtcp(const uint8_t*, size_t) override { RTC_CHECK_NOTREACHED(); }
+
+ absl::optional<TransmittedPacket> last_packet() { return last_packet_; }
+
+ private:
+ DataSize total_data_sent_;
+ absl::optional<TransmittedPacket> last_packet_;
+ RtpHeaderExtensionMap* const extensions_;
+};
+
+} // namespace
+
+class RtpSenderEgressTest : public ::testing::TestWithParam<TestConfig> {
+ protected:
+ RtpSenderEgressTest()
+ : time_controller_(kStartTime),
+ clock_(time_controller_.GetClock()),
+ transport_(&header_extensions_),
+ packet_history_(clock_, /*enable_rtx_padding_prioritization=*/true),
+ sequence_number_(kStartSequenceNumber) {
+ trials_.SetOverHeadEnabled(GetParam().with_overhead);
+ }
+
+ std::unique_ptr<RtpSenderEgress> CreateRtpSenderEgress() {
+ return std::make_unique<RtpSenderEgress>(DefaultConfig(), &packet_history_);
+ }
+
+ RtpRtcp::Configuration DefaultConfig() {
+ RtpRtcp::Configuration config;
+ config.clock = clock_;
+ config.outgoing_transport = &transport_;
+ config.local_media_ssrc = kSsrc;
+ config.rtx_send_ssrc = kRtxSsrc;
+ config.fec_generator = nullptr;
+ config.event_log = &mock_rtc_event_log_;
+ config.send_packet_observer = &send_packet_observer_;
+ config.rtp_stats_callback = &mock_rtp_stats_callback_;
+ config.transport_feedback_callback = &feedback_observer_;
+ config.populate_network2_timestamp = false;
+ config.field_trials = &trials_;
+ return config;
+ }
+
+ std::unique_ptr<RtpPacketToSend> BuildRtpPacket(bool marker_bit,
+ int64_t capture_time_ms) {
+ auto packet = std::make_unique<RtpPacketToSend>(&header_extensions_);
+ packet->SetSsrc(kSsrc);
+ packet->ReserveExtension<AbsoluteSendTime>();
+ packet->ReserveExtension<TransmissionOffset>();
+ packet->ReserveExtension<TransportSequenceNumber>();
+
+ packet->SetPayloadType(kDefaultPayloadType);
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ packet->SetMarker(marker_bit);
+ packet->SetTimestamp(capture_time_ms * 90);
+ packet->set_capture_time(Timestamp::Millis(capture_time_ms));
+ packet->SetSequenceNumber(sequence_number_++);
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketToSend> BuildRtpPacket() {
+ return BuildRtpPacket(/*marker_bit=*/true, clock_->CurrentTime().ms());
+ }
+
+ GlobalSimulatedTimeController time_controller_;
+ Clock* const clock_;
+ NiceMock<MockRtcEventLog> mock_rtc_event_log_;
+ NiceMock<MockStreamDataCountersCallback> mock_rtp_stats_callback_;
+ NiceMock<MockSendPacketObserver> send_packet_observer_;
+ NiceMock<MockTransportFeedbackObserver> feedback_observer_;
+ RtpHeaderExtensionMap header_extensions_;
+ TestTransport transport_;
+ RtpPacketHistory packet_history_;
+ FieldTrialConfig trials_;
+ uint16_t sequence_number_;
+};
+
+TEST_P(RtpSenderEgressTest, TransportFeedbackObserverGetsCorrectByteCount) {
+ constexpr size_t kRtpOverheadBytesPerPacket = 12 + 8;
+ constexpr size_t kPayloadSize = 1400;
+ const uint16_t kTransportSequenceNumber = 17;
+
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ const size_t expected_bytes = GetParam().with_overhead
+ ? kPayloadSize + kRtpOverheadBytesPerPacket
+ : kPayloadSize;
+
+ EXPECT_CALL(
+ feedback_observer_,
+ OnAddPacket(AllOf(
+ Field(&RtpPacketSendInfo::media_ssrc, kSsrc),
+ Field(&RtpPacketSendInfo::transport_sequence_number,
+ kTransportSequenceNumber),
+ Field(&RtpPacketSendInfo::rtp_sequence_number, kStartSequenceNumber),
+ Field(&RtpPacketSendInfo::length, expected_bytes),
+ Field(&RtpPacketSendInfo::pacing_info, PacedPacketInfo()))));
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+ packet->AllocatePayload(kPayloadSize);
+
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, PacketOptionsIsRetransmitSetByPacketType) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ std::unique_ptr<RtpPacketToSend> media_packet = BuildRtpPacket();
+ media_packet->set_packet_type(RtpPacketMediaType::kVideo);
+ sender->SendPacket(media_packet.get(), PacedPacketInfo());
+ EXPECT_FALSE(transport_.last_packet()->options.is_retransmit);
+
+ std::unique_ptr<RtpPacketToSend> retransmission = BuildRtpPacket();
+ retransmission->set_packet_type(RtpPacketMediaType::kRetransmission);
+ retransmission->set_retransmitted_sequence_number(
+ media_packet->SequenceNumber());
+ sender->SendPacket(retransmission.get(), PacedPacketInfo());
+ EXPECT_TRUE(transport_.last_packet()->options.is_retransmit);
+}
+
+TEST_P(RtpSenderEgressTest, DoesnSetIncludedInAllocationByDefault) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ EXPECT_FALSE(transport_.last_packet()->options.included_in_feedback);
+ EXPECT_FALSE(transport_.last_packet()->options.included_in_allocation);
+}
+
+TEST_P(RtpSenderEgressTest,
+ SetsIncludedInFeedbackWhenTransportSequenceNumberExtensionIsRegistered) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ EXPECT_TRUE(transport_.last_packet()->options.included_in_feedback);
+}
+
+TEST_P(
+ RtpSenderEgressTest,
+ SetsIncludedInAllocationWhenTransportSequenceNumberExtensionIsRegistered) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ EXPECT_TRUE(transport_.last_packet()->options.included_in_allocation);
+}
+
+TEST_P(RtpSenderEgressTest,
+ SetsIncludedInAllocationWhenForcedAsPartOfAllocation) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ sender->ForceIncludeSendPacketsInAllocation(true);
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ EXPECT_FALSE(transport_.last_packet()->options.included_in_feedback);
+ EXPECT_TRUE(transport_.last_packet()->options.included_in_allocation);
+}
+
+TEST_P(RtpSenderEgressTest, OnSendSideDelayUpdated) {
+ StrictMock<MockSendSideDelayObserver> send_side_delay_observer;
+ RtpRtcpInterface::Configuration config = DefaultConfig();
+ config.send_side_delay_observer = &send_side_delay_observer;
+ auto sender = std::make_unique<RtpSenderEgress>(config, &packet_history_);
+
+ // Send packet with 10 ms send-side delay. The average, max and total should
+ // be 10 ms.
+ EXPECT_CALL(send_side_delay_observer,
+ SendSideDelayUpdated(10, 10, 10, kSsrc));
+ int64_t capture_time_ms = clock_->TimeInMilliseconds();
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(),
+ PacedPacketInfo());
+
+ // Send another packet with 20 ms delay. The average, max and total should be
+ // 15, 20 and 30 ms respectively.
+ EXPECT_CALL(send_side_delay_observer,
+ SendSideDelayUpdated(15, 20, 30, kSsrc));
+ capture_time_ms = clock_->TimeInMilliseconds();
+ time_controller_.AdvanceTime(TimeDelta::Millis(20));
+ sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(),
+ PacedPacketInfo());
+
+ // Send another packet at the same time, which replaces the last packet.
+ // Since this packet has 0 ms delay, the average is now 5 ms and max is 10 ms.
+ // The total counter stays the same though.
+ // TODO(terelius): Is is not clear that this is the right behavior.
+ EXPECT_CALL(send_side_delay_observer, SendSideDelayUpdated(5, 10, 30, kSsrc));
+ capture_time_ms = clock_->TimeInMilliseconds();
+ sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(),
+ PacedPacketInfo());
+
+ // Send a packet 1 second later. The earlier packets should have timed
+ // out, so both max and average should be the delay of this packet. The total
+ // keeps increasing.
+ time_controller_.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_CALL(send_side_delay_observer, SendSideDelayUpdated(1, 1, 31, kSsrc));
+ capture_time_ms = clock_->TimeInMilliseconds();
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+ sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(),
+ PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, WritesPacerExitToTimingExtension) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ header_extensions_.RegisterByUri(kVideoTimingExtensionId,
+ VideoTimingExtension::Uri());
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->SetExtension<VideoTimingExtension>(VideoSendTiming{});
+
+ const int kStoredTimeInMs = 100;
+ time_controller_.AdvanceTime(TimeDelta::Millis(kStoredTimeInMs));
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ ASSERT_TRUE(transport_.last_packet().has_value());
+
+ VideoSendTiming video_timing;
+ EXPECT_TRUE(
+ transport_.last_packet()->packet.GetExtension<VideoTimingExtension>(
+ &video_timing));
+ EXPECT_EQ(video_timing.pacer_exit_delta_ms, kStoredTimeInMs);
+}
+
+TEST_P(RtpSenderEgressTest, WritesNetwork2ToTimingExtension) {
+ RtpRtcpInterface::Configuration rtp_config = DefaultConfig();
+ rtp_config.populate_network2_timestamp = true;
+ auto sender = std::make_unique<RtpSenderEgress>(rtp_config, &packet_history_);
+ header_extensions_.RegisterByUri(kVideoTimingExtensionId,
+ VideoTimingExtension::Uri());
+
+ const uint16_t kPacerExitMs = 1234u;
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ VideoSendTiming send_timing = {};
+ send_timing.pacer_exit_delta_ms = kPacerExitMs;
+ packet->SetExtension<VideoTimingExtension>(send_timing);
+
+ const int kStoredTimeInMs = 100;
+ time_controller_.AdvanceTime(TimeDelta::Millis(kStoredTimeInMs));
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ ASSERT_TRUE(transport_.last_packet().has_value());
+
+ VideoSendTiming video_timing;
+ EXPECT_TRUE(
+ transport_.last_packet()->packet.GetExtension<VideoTimingExtension>(
+ &video_timing));
+ EXPECT_EQ(video_timing.network2_timestamp_delta_ms, kStoredTimeInMs);
+ EXPECT_EQ(video_timing.pacer_exit_delta_ms, kPacerExitMs);
+}
+
+TEST_P(RtpSenderEgressTest, OnSendPacketUpdated) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ const uint16_t kTransportSequenceNumber = 1;
+ EXPECT_CALL(send_packet_observer_,
+ OnSendPacket(kTransportSequenceNumber,
+ clock_->TimeInMilliseconds(), kSsrc));
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, OnSendPacketNotUpdatedForRetransmits) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ const uint16_t kTransportSequenceNumber = 1;
+ EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(0);
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+ packet->set_packet_type(RtpPacketMediaType::kRetransmission);
+ packet->set_retransmitted_sequence_number(packet->SequenceNumber());
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, ReportsFecRate) {
+ constexpr int kNumPackets = 10;
+ constexpr TimeDelta kTimeBetweenPackets = TimeDelta::Millis(33);
+
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ DataSize total_fec_data_sent = DataSize::Zero();
+ // Send some packets, alternating between media and FEC.
+ for (size_t i = 0; i < kNumPackets; ++i) {
+ std::unique_ptr<RtpPacketToSend> media_packet = BuildRtpPacket();
+ media_packet->set_packet_type(RtpPacketMediaType::kVideo);
+ media_packet->SetPayloadSize(500);
+ sender->SendPacket(media_packet.get(), PacedPacketInfo());
+
+ std::unique_ptr<RtpPacketToSend> fec_packet = BuildRtpPacket();
+ fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection);
+ fec_packet->SetPayloadSize(123);
+ sender->SendPacket(fec_packet.get(), PacedPacketInfo());
+ total_fec_data_sent += DataSize::Bytes(fec_packet->size());
+
+ time_controller_.AdvanceTime(kTimeBetweenPackets);
+ }
+
+ EXPECT_NEAR(
+ (sender->GetSendRates()[RtpPacketMediaType::kForwardErrorCorrection])
+ .bps(),
+ (total_fec_data_sent / (kTimeBetweenPackets * kNumPackets)).bps(), 500);
+}
+
+TEST_P(RtpSenderEgressTest, BitrateCallbacks) {
+ class MockBitrateStaticsObserver : public BitrateStatisticsObserver {
+ public:
+ MOCK_METHOD(void, Notify, (uint32_t, uint32_t, uint32_t), (override));
+ } observer;
+
+ RtpRtcpInterface::Configuration config = DefaultConfig();
+ config.send_bitrate_observer = &observer;
+ auto sender = std::make_unique<RtpSenderEgress>(config, &packet_history_);
+
+ // Simulate kNumPackets sent with kPacketInterval intervals, with the
+ // number of packets selected so that we fill (but don't overflow) the one
+ // second averaging window.
+ const TimeDelta kWindowSize = TimeDelta::Seconds(1);
+ const TimeDelta kPacketInterval = TimeDelta::Millis(20);
+ const int kNumPackets = (kWindowSize - kPacketInterval) / kPacketInterval;
+
+ DataSize total_data_sent = DataSize::Zero();
+
+ // Send all but on of the packets, expect a call for each packet but don't
+ // verify bitrate yet (noisy measurements in the beginning).
+ for (int i = 0; i < kNumPackets; ++i) {
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->SetPayloadSize(500);
+ // Mark all packets as retransmissions - will cause total and retransmission
+ // rates to be equal.
+ packet->set_packet_type(RtpPacketMediaType::kRetransmission);
+ packet->set_retransmitted_sequence_number(packet->SequenceNumber());
+ total_data_sent += DataSize::Bytes(packet->size());
+
+ EXPECT_CALL(observer, Notify(_, _, kSsrc))
+ .WillOnce([&](uint32_t total_bitrate_bps,
+ uint32_t retransmission_bitrate_bps, uint32_t /*ssrc*/) {
+ TimeDelta window_size = i * kPacketInterval + TimeDelta::Millis(1);
+ // If there is just a single data point, there is no well defined
+ // averaging window so a bitrate of zero will be reported.
+ const double expected_bitrate_bps =
+ i == 0 ? 0.0 : (total_data_sent / window_size).bps();
+ EXPECT_NEAR(total_bitrate_bps, expected_bitrate_bps, 500);
+ EXPECT_NEAR(retransmission_bitrate_bps, expected_bitrate_bps, 500);
+ });
+
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(kPacketInterval);
+ }
+}
+
+TEST_P(RtpSenderEgressTest, DoesNotPutNotRetransmittablePacketsInHistory) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ packet_history_.SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->set_allow_retransmission(false);
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ EXPECT_FALSE(packet_history_.GetPacketState(packet->SequenceNumber()));
+}
+
+TEST_P(RtpSenderEgressTest, PutsRetransmittablePacketsInHistory) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ packet_history_.SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->set_allow_retransmission(true);
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+ EXPECT_TRUE(packet_history_.GetPacketState(packet->SequenceNumber()));
+}
+
+TEST_P(RtpSenderEgressTest, DoesNotPutNonMediaInHistory) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ packet_history_.SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+
+ // Non-media packets, even when marked as retransmittable, are not put into
+ // the packet history.
+ std::unique_ptr<RtpPacketToSend> retransmission = BuildRtpPacket();
+ retransmission->set_allow_retransmission(true);
+ retransmission->set_packet_type(RtpPacketMediaType::kRetransmission);
+ retransmission->set_retransmitted_sequence_number(
+ retransmission->SequenceNumber());
+ sender->SendPacket(retransmission.get(), PacedPacketInfo());
+ EXPECT_FALSE(
+ packet_history_.GetPacketState(retransmission->SequenceNumber()));
+
+ std::unique_ptr<RtpPacketToSend> fec = BuildRtpPacket();
+ fec->set_allow_retransmission(true);
+ fec->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection);
+ sender->SendPacket(fec.get(), PacedPacketInfo());
+ EXPECT_FALSE(packet_history_.GetPacketState(fec->SequenceNumber()));
+
+ std::unique_ptr<RtpPacketToSend> padding = BuildRtpPacket();
+ padding->set_allow_retransmission(true);
+ padding->set_packet_type(RtpPacketMediaType::kPadding);
+ sender->SendPacket(padding.get(), PacedPacketInfo());
+ EXPECT_FALSE(packet_history_.GetPacketState(padding->SequenceNumber()));
+}
+
+TEST_P(RtpSenderEgressTest, UpdatesSendStatusOfRetransmittedPackets) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ packet_history_.SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+
+ // Send a packet, putting it in the history.
+ std::unique_ptr<RtpPacketToSend> media_packet = BuildRtpPacket();
+ media_packet->set_allow_retransmission(true);
+ sender->SendPacket(media_packet.get(), PacedPacketInfo());
+ EXPECT_TRUE(packet_history_.GetPacketState(media_packet->SequenceNumber()));
+
+ // Simulate a retransmission, marking the packet as pending.
+ std::unique_ptr<RtpPacketToSend> retransmission =
+ packet_history_.GetPacketAndMarkAsPending(media_packet->SequenceNumber());
+ retransmission->set_retransmitted_sequence_number(
+ media_packet->SequenceNumber());
+ retransmission->set_packet_type(RtpPacketMediaType::kRetransmission);
+ EXPECT_TRUE(packet_history_.GetPacketState(media_packet->SequenceNumber()));
+
+ // Simulate packet leaving pacer, the packet should be marked as non-pending.
+ sender->SendPacket(retransmission.get(), PacedPacketInfo());
+ EXPECT_TRUE(packet_history_.GetPacketState(media_packet->SequenceNumber()));
+}
+
+TEST_P(RtpSenderEgressTest, StreamDataCountersCallbacks) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ const RtpPacketCounter kEmptyCounter;
+ RtpPacketCounter expected_transmitted_counter;
+ RtpPacketCounter expected_retransmission_counter;
+
+ // Send a media packet.
+ std::unique_ptr<RtpPacketToSend> media_packet = BuildRtpPacket();
+ media_packet->SetPayloadSize(6);
+ media_packet->SetSequenceNumber(kStartSequenceNumber);
+ expected_transmitted_counter.packets += 1;
+ expected_transmitted_counter.payload_bytes += media_packet->payload_size();
+ expected_transmitted_counter.header_bytes += media_packet->headers_size();
+
+ EXPECT_CALL(
+ mock_rtp_stats_callback_,
+ DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted,
+ expected_transmitted_counter),
+ Field(&StreamDataCounters::retransmitted,
+ expected_retransmission_counter),
+ Field(&StreamDataCounters::fec, kEmptyCounter)),
+ kSsrc));
+ sender->SendPacket(media_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Send a retransmission. Retransmissions are counted into both transmitted
+ // and retransmitted packet statistics.
+ std::unique_ptr<RtpPacketToSend> retransmission_packet = BuildRtpPacket();
+ retransmission_packet->set_packet_type(RtpPacketMediaType::kRetransmission);
+ retransmission_packet->SetSequenceNumber(kStartSequenceNumber);
+ retransmission_packet->set_retransmitted_sequence_number(
+ kStartSequenceNumber);
+ media_packet->SetPayloadSize(7);
+ expected_transmitted_counter.packets += 1;
+ expected_transmitted_counter.payload_bytes +=
+ retransmission_packet->payload_size();
+ expected_transmitted_counter.header_bytes +=
+ retransmission_packet->headers_size();
+
+ expected_retransmission_counter.packets += 1;
+ expected_retransmission_counter.payload_bytes +=
+ retransmission_packet->payload_size();
+ expected_retransmission_counter.header_bytes +=
+ retransmission_packet->headers_size();
+
+ EXPECT_CALL(
+ mock_rtp_stats_callback_,
+ DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted,
+ expected_transmitted_counter),
+ Field(&StreamDataCounters::retransmitted,
+ expected_retransmission_counter),
+ Field(&StreamDataCounters::fec, kEmptyCounter)),
+ kSsrc));
+ sender->SendPacket(retransmission_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Send a padding packet.
+ std::unique_ptr<RtpPacketToSend> padding_packet = BuildRtpPacket();
+ padding_packet->set_packet_type(RtpPacketMediaType::kPadding);
+ padding_packet->SetPadding(224);
+ padding_packet->SetSequenceNumber(kStartSequenceNumber + 1);
+ expected_transmitted_counter.packets += 1;
+ expected_transmitted_counter.padding_bytes += padding_packet->padding_size();
+ expected_transmitted_counter.header_bytes += padding_packet->headers_size();
+
+ EXPECT_CALL(
+ mock_rtp_stats_callback_,
+ DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted,
+ expected_transmitted_counter),
+ Field(&StreamDataCounters::retransmitted,
+ expected_retransmission_counter),
+ Field(&StreamDataCounters::fec, kEmptyCounter)),
+ kSsrc));
+ sender->SendPacket(padding_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(RtpSenderEgressTest, StreamDataCountersCallbacksFec) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ const RtpPacketCounter kEmptyCounter;
+ RtpPacketCounter expected_transmitted_counter;
+ RtpPacketCounter expected_fec_counter;
+
+ // Send a media packet.
+ std::unique_ptr<RtpPacketToSend> media_packet = BuildRtpPacket();
+ media_packet->SetPayloadSize(6);
+ expected_transmitted_counter.packets += 1;
+ expected_transmitted_counter.payload_bytes += media_packet->payload_size();
+ expected_transmitted_counter.header_bytes += media_packet->headers_size();
+
+ EXPECT_CALL(
+ mock_rtp_stats_callback_,
+ DataCountersUpdated(
+ AllOf(Field(&StreamDataCounters::transmitted,
+ expected_transmitted_counter),
+ Field(&StreamDataCounters::retransmitted, kEmptyCounter),
+ Field(&StreamDataCounters::fec, expected_fec_counter)),
+ kSsrc));
+ sender->SendPacket(media_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Send and FEC packet. FEC is counted into both transmitted and FEC packet
+ // statistics.
+ std::unique_ptr<RtpPacketToSend> fec_packet = BuildRtpPacket();
+ fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection);
+ fec_packet->SetPayloadSize(6);
+ expected_transmitted_counter.packets += 1;
+ expected_transmitted_counter.payload_bytes += fec_packet->payload_size();
+ expected_transmitted_counter.header_bytes += fec_packet->headers_size();
+
+ expected_fec_counter.packets += 1;
+ expected_fec_counter.payload_bytes += fec_packet->payload_size();
+ expected_fec_counter.header_bytes += fec_packet->headers_size();
+
+ EXPECT_CALL(
+ mock_rtp_stats_callback_,
+ DataCountersUpdated(
+ AllOf(Field(&StreamDataCounters::transmitted,
+ expected_transmitted_counter),
+ Field(&StreamDataCounters::retransmitted, kEmptyCounter),
+ Field(&StreamDataCounters::fec, expected_fec_counter)),
+ kSsrc));
+ sender->SendPacket(fec_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(RtpSenderEgressTest, UpdatesDataCounters) {
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ const RtpPacketCounter kEmptyCounter;
+
+ // Send a media packet.
+ std::unique_ptr<RtpPacketToSend> media_packet = BuildRtpPacket();
+ media_packet->SetPayloadSize(6);
+ sender->SendPacket(media_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Send an RTX retransmission packet.
+ std::unique_ptr<RtpPacketToSend> rtx_packet = BuildRtpPacket();
+ rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission);
+ rtx_packet->SetSsrc(kRtxSsrc);
+ rtx_packet->SetPayloadSize(7);
+ rtx_packet->set_retransmitted_sequence_number(media_packet->SequenceNumber());
+ sender->SendPacket(rtx_packet.get(), PacedPacketInfo());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ sender->GetDataCounters(&rtp_stats, &rtx_stats);
+
+ EXPECT_EQ(rtp_stats.transmitted.packets, 1u);
+ EXPECT_EQ(rtp_stats.transmitted.payload_bytes, media_packet->payload_size());
+ EXPECT_EQ(rtp_stats.transmitted.padding_bytes, media_packet->padding_size());
+ EXPECT_EQ(rtp_stats.transmitted.header_bytes, media_packet->headers_size());
+ EXPECT_EQ(rtp_stats.retransmitted, kEmptyCounter);
+ EXPECT_EQ(rtp_stats.fec, kEmptyCounter);
+
+ // Retransmissions are counted both into transmitted and retransmitted
+ // packet counts.
+ EXPECT_EQ(rtx_stats.transmitted.packets, 1u);
+ EXPECT_EQ(rtx_stats.transmitted.payload_bytes, rtx_packet->payload_size());
+ EXPECT_EQ(rtx_stats.transmitted.padding_bytes, rtx_packet->padding_size());
+ EXPECT_EQ(rtx_stats.transmitted.header_bytes, rtx_packet->headers_size());
+ EXPECT_EQ(rtx_stats.retransmitted, rtx_stats.transmitted);
+ EXPECT_EQ(rtx_stats.fec, kEmptyCounter);
+}
+
+TEST_P(RtpSenderEgressTest, SendPacketUpdatesExtensions) {
+ header_extensions_.RegisterByUri(kVideoTimingExtensionId,
+ VideoTimingExtension::Uri());
+ header_extensions_.RegisterByUri(kAbsoluteSendTimeExtensionId,
+ AbsoluteSendTime::Uri());
+ header_extensions_.RegisterByUri(kTransmissionOffsetExtensionId,
+ TransmissionOffset::Uri());
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->set_packetization_finish_time(clock_->CurrentTime());
+
+ const int32_t kDiffMs = 10;
+ time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs));
+
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+
+ RtpPacketReceived received_packet = transport_.last_packet()->packet;
+
+ EXPECT_EQ(received_packet.GetExtension<TransmissionOffset>(), kDiffMs * 90);
+
+ EXPECT_EQ(received_packet.GetExtension<AbsoluteSendTime>(),
+ AbsoluteSendTime::To24Bits(clock_->CurrentTime()));
+
+ VideoSendTiming timing;
+ EXPECT_TRUE(received_packet.GetExtension<VideoTimingExtension>(&timing));
+ EXPECT_EQ(timing.pacer_exit_delta_ms, kDiffMs);
+}
+
+TEST_P(RtpSenderEgressTest, SendPacketSetsPacketOptions) {
+ const uint16_t kPacketId = 42;
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ std::unique_ptr<RtpPacketToSend> packet = BuildRtpPacket();
+ packet->SetExtension<TransportSequenceNumber>(kPacketId);
+ EXPECT_CALL(send_packet_observer_, OnSendPacket);
+ sender->SendPacket(packet.get(), PacedPacketInfo());
+
+ PacketOptions packet_options = transport_.last_packet()->options;
+
+ EXPECT_EQ(packet_options.packet_id, kPacketId);
+ EXPECT_TRUE(packet_options.included_in_allocation);
+ EXPECT_TRUE(packet_options.included_in_feedback);
+ EXPECT_FALSE(packet_options.is_retransmit);
+
+ // Send another packet as retransmission, verify options are populated.
+ std::unique_ptr<RtpPacketToSend> retransmission = BuildRtpPacket();
+ retransmission->SetExtension<TransportSequenceNumber>(kPacketId + 1);
+ retransmission->set_packet_type(RtpPacketMediaType::kRetransmission);
+ retransmission->set_retransmitted_sequence_number(packet->SequenceNumber());
+ sender->SendPacket(retransmission.get(), PacedPacketInfo());
+ EXPECT_TRUE(transport_.last_packet()->options.is_retransmit);
+}
+
+TEST_P(RtpSenderEgressTest, SendPacketUpdatesStats) {
+ const size_t kPayloadSize = 1000;
+ StrictMock<MockSendSideDelayObserver> send_side_delay_observer;
+
+ const rtc::ArrayView<const RtpExtensionSize> kNoRtpHeaderExtensionSizes;
+ FlexfecSender flexfec(kFlexfectPayloadType, kFlexFecSsrc, kSsrc, /*mid=*/"",
+ /*header_extensions=*/{}, kNoRtpHeaderExtensionSizes,
+ /*rtp_state=*/nullptr, time_controller_.GetClock());
+ RtpRtcpInterface::Configuration config = DefaultConfig();
+ config.fec_generator = &flexfec;
+ config.send_side_delay_observer = &send_side_delay_observer;
+ auto sender = std::make_unique<RtpSenderEgress>(config, &packet_history_);
+
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ const int64_t capture_time_ms = clock_->TimeInMilliseconds();
+
+ std::unique_ptr<RtpPacketToSend> video_packet = BuildRtpPacket();
+ video_packet->set_packet_type(RtpPacketMediaType::kVideo);
+ video_packet->SetPayloadSize(kPayloadSize);
+ video_packet->SetExtension<TransportSequenceNumber>(1);
+
+ std::unique_ptr<RtpPacketToSend> rtx_packet = BuildRtpPacket();
+ rtx_packet->SetSsrc(kRtxSsrc);
+ rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission);
+ rtx_packet->set_retransmitted_sequence_number(video_packet->SequenceNumber());
+ rtx_packet->SetPayloadSize(kPayloadSize);
+ rtx_packet->SetExtension<TransportSequenceNumber>(2);
+
+ std::unique_ptr<RtpPacketToSend> fec_packet = BuildRtpPacket();
+ fec_packet->SetSsrc(kFlexFecSsrc);
+ fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection);
+ fec_packet->SetPayloadSize(kPayloadSize);
+ fec_packet->SetExtension<TransportSequenceNumber>(3);
+
+ const int64_t kDiffMs = 25;
+ time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs));
+
+ EXPECT_CALL(send_side_delay_observer,
+ SendSideDelayUpdated(kDiffMs, kDiffMs, kDiffMs, kSsrc));
+ EXPECT_CALL(
+ send_side_delay_observer,
+ SendSideDelayUpdated(kDiffMs, kDiffMs, 2 * kDiffMs, kFlexFecSsrc));
+
+ EXPECT_CALL(send_packet_observer_, OnSendPacket(1, capture_time_ms, kSsrc));
+
+ sender->SendPacket(video_packet.get(), PacedPacketInfo());
+
+ // Send packet observer not called for padding/retransmissions.
+ EXPECT_CALL(send_packet_observer_, OnSendPacket(2, _, _)).Times(0);
+ sender->SendPacket(rtx_packet.get(), PacedPacketInfo());
+
+ EXPECT_CALL(send_packet_observer_,
+ OnSendPacket(3, capture_time_ms, kFlexFecSsrc));
+ sender->SendPacket(fec_packet.get(), PacedPacketInfo());
+
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ sender->GetDataCounters(&rtp_stats, &rtx_stats);
+ EXPECT_EQ(rtp_stats.transmitted.packets, 2u);
+ EXPECT_EQ(rtp_stats.fec.packets, 1u);
+ EXPECT_EQ(rtx_stats.retransmitted.packets, 1u);
+}
+
+TEST_P(RtpSenderEgressTest, TransportFeedbackObserverWithRetransmission) {
+ const uint16_t kTransportSequenceNumber = 17;
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+ std::unique_ptr<RtpPacketToSend> retransmission = BuildRtpPacket();
+ retransmission->set_packet_type(RtpPacketMediaType::kRetransmission);
+ retransmission->SetExtension<TransportSequenceNumber>(
+ kTransportSequenceNumber);
+ uint16_t retransmitted_seq = retransmission->SequenceNumber() - 2;
+ retransmission->set_retransmitted_sequence_number(retransmitted_seq);
+
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ EXPECT_CALL(
+ feedback_observer_,
+ OnAddPacket(AllOf(
+ Field(&RtpPacketSendInfo::media_ssrc, kSsrc),
+ Field(&RtpPacketSendInfo::rtp_sequence_number, retransmitted_seq),
+ Field(&RtpPacketSendInfo::transport_sequence_number,
+ kTransportSequenceNumber))));
+ sender->SendPacket(retransmission.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, TransportFeedbackObserverWithRtxRetransmission) {
+ const uint16_t kTransportSequenceNumber = 17;
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ std::unique_ptr<RtpPacketToSend> rtx_retransmission = BuildRtpPacket();
+ rtx_retransmission->SetSsrc(kRtxSsrc);
+ rtx_retransmission->SetExtension<TransportSequenceNumber>(
+ kTransportSequenceNumber);
+ rtx_retransmission->set_packet_type(RtpPacketMediaType::kRetransmission);
+ uint16_t rtx_retransmitted_seq = rtx_retransmission->SequenceNumber() - 2;
+ rtx_retransmission->set_retransmitted_sequence_number(rtx_retransmitted_seq);
+
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ EXPECT_CALL(
+ feedback_observer_,
+ OnAddPacket(AllOf(
+ Field(&RtpPacketSendInfo::media_ssrc, kSsrc),
+ Field(&RtpPacketSendInfo::rtp_sequence_number, rtx_retransmitted_seq),
+ Field(&RtpPacketSendInfo::transport_sequence_number,
+ kTransportSequenceNumber))));
+ sender->SendPacket(rtx_retransmission.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, TransportFeedbackObserverPadding) {
+ const uint16_t kTransportSequenceNumber = 17;
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+ std::unique_ptr<RtpPacketToSend> padding = BuildRtpPacket();
+ padding->SetPadding(224);
+ padding->set_packet_type(RtpPacketMediaType::kPadding);
+ padding->SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ EXPECT_CALL(
+ feedback_observer_,
+ OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt),
+ Field(&RtpPacketSendInfo::transport_sequence_number,
+ kTransportSequenceNumber))));
+ sender->SendPacket(padding.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, TransportFeedbackObserverRtxPadding) {
+ const uint16_t kTransportSequenceNumber = 17;
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ std::unique_ptr<RtpPacketToSend> rtx_padding = BuildRtpPacket();
+ rtx_padding->SetPadding(224);
+ rtx_padding->SetSsrc(kRtxSsrc);
+ rtx_padding->set_packet_type(RtpPacketMediaType::kPadding);
+ rtx_padding->SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+
+ std::unique_ptr<RtpSenderEgress> sender = CreateRtpSenderEgress();
+ EXPECT_CALL(
+ feedback_observer_,
+ OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt),
+ Field(&RtpPacketSendInfo::transport_sequence_number,
+ kTransportSequenceNumber))));
+ sender->SendPacket(rtx_padding.get(), PacedPacketInfo());
+}
+
+TEST_P(RtpSenderEgressTest, TransportFeedbackObserverFec) {
+ const uint16_t kTransportSequenceNumber = 17;
+ header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId,
+ TransportSequenceNumber::Uri());
+
+ std::unique_ptr<RtpPacketToSend> fec_packet = BuildRtpPacket();
+ fec_packet->SetSsrc(kFlexFecSsrc);
+ fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection);
+ fec_packet->SetExtension<TransportSequenceNumber>(kTransportSequenceNumber);
+
+ const rtc::ArrayView<const RtpExtensionSize> kNoRtpHeaderExtensionSizes;
+ FlexfecSender flexfec(kFlexfectPayloadType, kFlexFecSsrc, kSsrc, /*mid=*/"",
+ /*header_extensions=*/{}, kNoRtpHeaderExtensionSizes,
+ /*rtp_state=*/nullptr, time_controller_.GetClock());
+ RtpRtcpInterface::Configuration config = DefaultConfig();
+ config.fec_generator = &flexfec;
+ auto sender = std::make_unique<RtpSenderEgress>(config, &packet_history_);
+ EXPECT_CALL(
+ feedback_observer_,
+ OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt),
+ Field(&RtpPacketSendInfo::transport_sequence_number,
+ kTransportSequenceNumber))));
+ sender->SendPacket(fec_packet.get(), PacedPacketInfo());
+}
+
+INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead,
+ RtpSenderEgressTest,
+ ::testing::Values(TestConfig(false),
+ TestConfig(true)));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
new file mode 100644
index 0000000000..ea9277f612
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -0,0 +1,1339 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/rtc_event_log/rtc_event.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_timing.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/packet_sequencer.h"
+#include "modules/rtp_rtcp/source/rtp_format_video_generic.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+#include "modules/rtp_rtcp/source/video_fec_generator.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+
+namespace {
+enum : int { // The first valid value is 1.
+ kAbsoluteSendTimeExtensionId = 1,
+ kAudioLevelExtensionId,
+ kGenericDescriptorId,
+ kMidExtensionId,
+ kRepairedRidExtensionId,
+ kRidExtensionId,
+ kTransmissionTimeOffsetExtensionId,
+ kTransportSequenceNumberExtensionId,
+ kVideoRotationExtensionId,
+ kVideoTimingExtensionId,
+};
+
+const int kPayload = 100;
+const int kRtxPayload = 98;
+const uint32_t kTimestamp = 10;
+const uint16_t kSeqNum = 33;
+const uint32_t kSsrc = 725242;
+const uint32_t kRtxSsrc = 12345;
+const uint32_t kFlexFecSsrc = 45678;
+const uint64_t kStartTime = 123456789;
+const size_t kMaxPaddingSize = 224u;
+const uint8_t kPayloadData[] = {47, 11, 32, 93, 89};
+const int64_t kDefaultExpectedRetransmissionTimeMs = 125;
+const size_t kMaxPaddingLength = 224; // Value taken from rtp_sender.cc.
+const uint32_t kTimestampTicksPerMs = 90; // 90kHz clock.
+constexpr absl::string_view kMid = "mid";
+constexpr absl::string_view kRid = "f";
+constexpr bool kMarkerBit = true;
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::AtLeast;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Gt;
+using ::testing::IsEmpty;
+using ::testing::NiceMock;
+using ::testing::Not;
+using ::testing::Pointee;
+using ::testing::Property;
+using ::testing::Return;
+using ::testing::SizeIs;
+
+class MockRtpPacketPacer : public RtpPacketSender {
+ public:
+ MockRtpPacketPacer() {}
+ virtual ~MockRtpPacketPacer() {}
+
+ MOCK_METHOD(void,
+ EnqueuePackets,
+ (std::vector<std::unique_ptr<RtpPacketToSend>>),
+ (override));
+};
+
+} // namespace
+
+class RtpSenderTest : public ::testing::Test {
+ protected:
+ RtpSenderTest()
+ : time_controller_(Timestamp::Millis(kStartTime)),
+ clock_(time_controller_.GetClock()),
+ retransmission_rate_limiter_(clock_, 1000),
+ flexfec_sender_(0,
+ kFlexFecSsrc,
+ kSsrc,
+ "",
+ std::vector<RtpExtension>(),
+ std::vector<RtpExtensionSize>(),
+ nullptr,
+ clock_) {}
+
+ void SetUp() override { SetUpRtpSender(true, false, nullptr); }
+
+ void SetUpRtpSender(bool populate_network2,
+ bool always_send_mid_and_rid,
+ VideoFecGenerator* fec_generator) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.fec_generator = fec_generator;
+ config.populate_network2_timestamp = populate_network2;
+ config.always_send_mid_and_rid = always_send_mid_and_rid;
+ CreateSender(config);
+ }
+
+ RtpRtcpInterface::Configuration GetDefaultConfig() {
+ RtpRtcpInterface::Configuration config;
+ config.clock = clock_;
+ config.local_media_ssrc = kSsrc;
+ config.rtx_send_ssrc = kRtxSsrc;
+ config.event_log = &mock_rtc_event_log_;
+ config.retransmission_rate_limiter = &retransmission_rate_limiter_;
+ config.paced_sender = &mock_paced_sender_;
+ config.field_trials = &field_trials_;
+ // Configure rid unconditionally, it has effect only if
+ // corresponding header extension is enabled.
+ config.rid = std::string(kRid);
+ return config;
+ }
+
+ void CreateSender(const RtpRtcpInterface::Configuration& config) {
+ packet_history_ = std::make_unique<RtpPacketHistory>(
+ config.clock, config.enable_rtx_padding_prioritization);
+ sequencer_.emplace(kSsrc, kRtxSsrc,
+ /*require_marker_before_media_padding=*/!config.audio,
+ clock_);
+ rtp_sender_ = std::make_unique<RTPSender>(config, packet_history_.get(),
+ config.paced_sender);
+ sequencer_->set_media_sequence_number(kSeqNum);
+ rtp_sender_->SetTimestampOffset(0);
+ }
+
+ GlobalSimulatedTimeController time_controller_;
+ Clock* const clock_;
+ NiceMock<MockRtcEventLog> mock_rtc_event_log_;
+ MockRtpPacketPacer mock_paced_sender_;
+ RateLimiter retransmission_rate_limiter_;
+ FlexfecSender flexfec_sender_;
+
+ absl::optional<PacketSequencer> sequencer_;
+ std::unique_ptr<RtpPacketHistory> packet_history_;
+ std::unique_ptr<RTPSender> rtp_sender_;
+
+ const test::ScopedKeyValueConfig field_trials_;
+
+ std::unique_ptr<RtpPacketToSend> BuildRtpPacket(int payload_type,
+ bool marker_bit,
+ uint32_t timestamp,
+ int64_t capture_time_ms) {
+ auto packet = rtp_sender_->AllocatePacket();
+ packet->SetPayloadType(payload_type);
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ packet->SetMarker(marker_bit);
+ packet->SetTimestamp(timestamp);
+ packet->set_capture_time(Timestamp::Millis(capture_time_ms));
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketToSend> SendPacket(int64_t capture_time_ms,
+ int payload_length) {
+ uint32_t timestamp = capture_time_ms * 90;
+ auto packet =
+ BuildRtpPacket(kPayload, kMarkerBit, timestamp, capture_time_ms);
+ packet->AllocatePayload(payload_length);
+ packet->set_allow_retransmission(true);
+
+ // Packet should be stored in a send bucket.
+ EXPECT_TRUE(
+ rtp_sender_->SendToNetwork(std::make_unique<RtpPacketToSend>(*packet)));
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketToSend> SendGenericPacket() {
+ const int64_t kCaptureTimeMs = clock_->TimeInMilliseconds();
+ // Use maximum allowed size to catch corner cases when packet is dropped
+ // because of lack of capacity for the media packet, or for an rtx packet
+ // containing the media packet.
+ return SendPacket(kCaptureTimeMs,
+ /*payload_length=*/rtp_sender_->MaxRtpPacketSize() -
+ rtp_sender_->ExpectedPerPacketOverhead());
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GeneratePadding(
+ size_t target_size_bytes) {
+ return rtp_sender_->GeneratePadding(
+ target_size_bytes, /*media_has_been_sent=*/true,
+ sequencer_->CanSendPaddingOnMediaSsrc());
+ }
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> Sequence(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ for (auto& packet : packets) {
+ sequencer_->Sequence(*packet);
+ }
+ return packets;
+ }
+
+ size_t GenerateAndSendPadding(size_t target_size_bytes) {
+ size_t generated_bytes = 0;
+ for (auto& packet : GeneratePadding(target_size_bytes)) {
+ generated_bytes += packet->payload_size() + packet->padding_size();
+ rtp_sender_->SendToNetwork(std::move(packet));
+ }
+ return generated_bytes;
+ }
+
+ // The following are helpers for configuring the RTPSender. They must be
+ // called before sending any packets.
+
+ // Enable the retransmission stream with sizable packet storage.
+ void EnableRtx() {
+ // RTX needs to be able to read the source packets from the packet store.
+ // Pick a number of packets to store big enough for any unit test.
+ constexpr uint16_t kNumberOfPacketsToStore = 100;
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, kNumberOfPacketsToStore);
+ rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload);
+ rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads);
+ }
+
+ // Enable sending of the MID header extension for both the primary SSRC and
+ // the RTX SSRC.
+ void EnableMidSending(absl::string_view mid) {
+ rtp_sender_->RegisterRtpHeaderExtension(RtpMid::Uri(), kMidExtensionId);
+ rtp_sender_->SetMid(mid);
+ }
+
+ // Enable sending of the RSID header extension for the primary SSRC and the
+ // RRSID header extension for the RTX SSRC.
+ void EnableRidSending() {
+ rtp_sender_->RegisterRtpHeaderExtension(RtpStreamId::Uri(),
+ kRidExtensionId);
+ rtp_sender_->RegisterRtpHeaderExtension(RepairedRtpStreamId::Uri(),
+ kRepairedRidExtensionId);
+ }
+};
+
+TEST_F(RtpSenderTest, AllocatePacketSetCsrc) {
+ // Configure rtp_sender with csrc.
+ std::vector<uint32_t> csrcs;
+ csrcs.push_back(0x23456789);
+ rtp_sender_->SetCsrcs(csrcs);
+
+ auto packet = rtp_sender_->AllocatePacket();
+
+ ASSERT_TRUE(packet);
+ EXPECT_EQ(rtp_sender_->SSRC(), packet->Ssrc());
+ EXPECT_EQ(csrcs, packet->Csrcs());
+}
+
+TEST_F(RtpSenderTest, AllocatePacketReserveExtensions) {
+ // Configure rtp_sender with extensions.
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransmissionOffset::Uri(), kTransmissionTimeOffsetExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ AbsoluteSendTime::Uri(), kAbsoluteSendTimeExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(AudioLevel::Uri(),
+ kAudioLevelExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ VideoOrientation::Uri(), kVideoRotationExtensionId));
+
+ auto packet = rtp_sender_->AllocatePacket();
+
+ ASSERT_TRUE(packet);
+ // Preallocate BWE extensions RtpSender set itself.
+ EXPECT_TRUE(packet->HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(packet->HasExtension<TransportSequenceNumber>());
+ // Do not allocate media specific extensions.
+ EXPECT_FALSE(packet->HasExtension<AudioLevel>());
+ EXPECT_FALSE(packet->HasExtension<VideoOrientation>());
+}
+
+TEST_F(RtpSenderTest, PaddingAlwaysAllowedOnAudio) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.audio = true;
+ CreateSender(config);
+
+ std::unique_ptr<RtpPacketToSend> audio_packet = rtp_sender_->AllocatePacket();
+ // Padding on audio stream allowed regardless of marker in the last packet.
+ audio_packet->SetMarker(false);
+ audio_packet->SetPayloadType(kPayload);
+ sequencer_->Sequence(*audio_packet);
+
+ const size_t kPaddingSize = 59;
+
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(AllOf(
+ Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kPadding)),
+ Pointee(Property(&RtpPacketToSend::padding_size, kPaddingSize))))));
+ EXPECT_EQ(kPaddingSize, GenerateAndSendPadding(kPaddingSize));
+
+ // Requested padding size is too small, will send a larger one.
+ const size_t kMinPaddingSize = 50;
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(
+ AllOf(Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kPadding)),
+ Pointee(Property(&RtpPacketToSend::padding_size,
+ kMinPaddingSize))))));
+ EXPECT_EQ(kMinPaddingSize, GenerateAndSendPadding(kMinPaddingSize - 5));
+}
+
+TEST_F(RtpSenderTest, SendToNetworkForwardsPacketsToPacer) {
+ auto packet = BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, 0);
+ Timestamp now = clock_->CurrentTime();
+
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(AllOf(
+ Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)),
+ Pointee(Property(&RtpPacketToSend::capture_time, now))))));
+ EXPECT_TRUE(
+ rtp_sender_->SendToNetwork(std::make_unique<RtpPacketToSend>(*packet)));
+}
+
+TEST_F(RtpSenderTest, ReSendPacketForwardsPacketsToPacer) {
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ auto packet = BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, now_ms);
+ packet->SetSequenceNumber(kSeqNum);
+ packet->set_allow_retransmission(true);
+ packet_history_->PutRtpPacket(std::move(packet), Timestamp::Millis(now_ms));
+
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(AllOf(
+ Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)),
+ Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)),
+ Pointee(Property(&RtpPacketToSend::capture_time,
+ Timestamp::Millis(now_ms))),
+ Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kRetransmission))))));
+ EXPECT_TRUE(rtp_sender_->ReSendPacket(kSeqNum));
+}
+
+// This test sends 1 regular video packet, then 4 padding packets, and then
+// 1 more regular packet.
+TEST_F(RtpSenderTest, SendPadding) {
+ constexpr int kNumPaddingPackets = 4;
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets);
+ std::unique_ptr<RtpPacketToSend> media_packet =
+ SendPacket(/*capture_time_ms=*/clock_->TimeInMilliseconds(),
+ /*payload_size=*/100);
+ sequencer_->Sequence(*media_packet);
+
+ // Wait 50 ms before generating each padding packet.
+ for (int i = 0; i < kNumPaddingPackets; ++i) {
+ time_controller_.AdvanceTime(TimeDelta::Millis(50));
+ const size_t kPaddingTargetBytes = 100; // Request 100 bytes of padding.
+
+ // Padding should be sent on the media ssrc, with a continous sequence
+ // number range. Size will be forced to full pack size and the timestamp
+ // shall be that of the last media packet.
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::Ssrc, kSsrc),
+ Property(&RtpPacketToSend::padding_size, kMaxPaddingLength),
+ Property(&RtpPacketToSend::SequenceNumber,
+ media_packet->SequenceNumber() + i + 1),
+ Property(&RtpPacketToSend::Timestamp,
+ media_packet->Timestamp()))))));
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets =
+ Sequence(GeneratePadding(kPaddingTargetBytes));
+ ASSERT_THAT(padding_packets, SizeIs(1));
+ rtp_sender_->SendToNetwork(std::move(padding_packets[0]));
+ }
+
+ // Send a regular video packet again.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(Property(
+ &RtpPacketToSend::Timestamp, Gt(media_packet->Timestamp()))))));
+
+ std::unique_ptr<RtpPacketToSend> next_media_packet =
+ SendPacket(/*capture_time_ms=*/clock_->TimeInMilliseconds(),
+ /*payload_size=*/100);
+}
+
+TEST_F(RtpSenderTest, NoPaddingAsFirstPacketWithoutBweExtensions) {
+ EXPECT_THAT(rtp_sender_->GeneratePadding(
+ /*target_size_bytes=*/100,
+ /*media_has_been_sent=*/false,
+ /*can_send_padding_on_media_ssrc=*/false),
+ IsEmpty());
+
+ // Don't send padding before media even with RTX.
+ EnableRtx();
+ EXPECT_THAT(rtp_sender_->GeneratePadding(
+ /*target_size_bytes=*/100,
+ /*media_has_been_sent=*/false,
+ /*can_send_padding_on_media_ssrc=*/false),
+ IsEmpty());
+}
+
+TEST_F(RtpSenderTest, RequiresRtxSsrcToEnableRtx) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = absl::nullopt;
+ RTPSender rtp_sender(config, packet_history_.get(), config.paced_sender);
+ rtp_sender.SetRtxPayloadType(kRtxPayload, kPayload);
+
+ rtp_sender.SetRtxStatus(kRtxRetransmitted);
+
+ EXPECT_EQ(rtp_sender.RtxStatus(), kRtxOff);
+}
+
+TEST_F(RtpSenderTest, RequiresRtxPayloadTypesToEnableRtx) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = kRtxSsrc;
+ RTPSender rtp_sender(config, packet_history_.get(), config.paced_sender);
+
+ rtp_sender.SetRtxStatus(kRtxRetransmitted);
+
+ EXPECT_EQ(rtp_sender.RtxStatus(), kRtxOff);
+}
+
+TEST_F(RtpSenderTest, CanEnableRtxWhenRtxSsrcAndPayloadTypeAreConfigured) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = kRtxSsrc;
+ RTPSender rtp_sender(config, packet_history_.get(), config.paced_sender);
+ rtp_sender.SetRtxPayloadType(kRtxPayload, kPayload);
+
+ ASSERT_EQ(rtp_sender.RtxStatus(), kRtxOff);
+ rtp_sender.SetRtxStatus(kRtxRetransmitted);
+
+ EXPECT_EQ(rtp_sender.RtxStatus(), kRtxRetransmitted);
+}
+
+TEST_F(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithTransportCc) {
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+
+ // Padding can't be sent as first packet on media SSRC since we don't know
+ // what payload type to assign.
+ EXPECT_THAT(rtp_sender_->GeneratePadding(
+ /*target_size_bytes=*/100,
+ /*media_has_been_sent=*/false,
+ /*can_send_padding_on_media_ssrc=*/false),
+ IsEmpty());
+
+ // With transportcc padding can be sent as first packet on the RTX SSRC.
+ EnableRtx();
+ EXPECT_THAT(rtp_sender_->GeneratePadding(
+ /*target_size_bytes=*/100,
+ /*media_has_been_sent=*/false,
+ /*can_send_padding_on_media_ssrc=*/false),
+ Not(IsEmpty()));
+}
+
+TEST_F(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithAbsSendTime) {
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ AbsoluteSendTime::Uri(), kAbsoluteSendTimeExtensionId));
+
+ // Padding can't be sent as first packet on media SSRC since we don't know
+ // what payload type to assign.
+ EXPECT_THAT(rtp_sender_->GeneratePadding(
+ /*target_size_bytes=*/100,
+ /*media_has_been_sent=*/false,
+ /*can_send_padding_on_media_ssrc=*/false),
+ IsEmpty());
+
+ // With abs send time, padding can be sent as first packet on the RTX SSRC.
+ EnableRtx();
+ EXPECT_THAT(rtp_sender_->GeneratePadding(
+ /*target_size_bytes=*/100,
+ /*media_has_been_sent=*/false,
+ /*can_send_padding_on_media_ssrc=*/false),
+ Not(IsEmpty()));
+}
+
+TEST_F(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) {
+ EnableRtx();
+ // Timestamps as set based on capture time in RtpSenderTest.
+ const int64_t start_time = clock_->TimeInMilliseconds();
+ const uint32_t start_timestamp = start_time * kTimestampTicksPerMs;
+
+ // Start by sending one media packet.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(
+ AllOf(Pointee(Property(&RtpPacketToSend::padding_size, 0u)),
+ Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)),
+ Pointee(Property(&RtpPacketToSend::capture_time,
+ Timestamp::Millis(start_time)))))));
+ std::unique_ptr<RtpPacketToSend> media_packet =
+ SendPacket(start_time, /*payload_size=*/600);
+ sequencer_->Sequence(*media_packet);
+
+ // Advance time before sending padding.
+ const TimeDelta kTimeDiff = TimeDelta::Millis(17);
+ time_controller_.AdvanceTime(kTimeDiff);
+
+ // Timestamps on padding should be offset from the sent media.
+ EXPECT_THAT(
+ Sequence(GeneratePadding(/*target_size_bytes=*/100)),
+ Each(Pointee(AllOf(
+ Property(&RtpPacketToSend::padding_size, kMaxPaddingLength),
+ Property(&RtpPacketToSend::Timestamp,
+ start_timestamp + (kTimestampTicksPerMs * kTimeDiff.ms())),
+ Property(&RtpPacketToSend::capture_time,
+ Timestamp::Millis(start_time) + kTimeDiff)))));
+}
+
+TEST_F(RtpSenderTest, KeepsTimestampsOnPayloadPadding) {
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+ EnableRtx();
+ // Timestamps as set based on capture time in RtpSenderTest.
+ const int64_t start_time = clock_->TimeInMilliseconds();
+ const uint32_t start_timestamp = start_time * kTimestampTicksPerMs;
+ const size_t kPayloadSize = 200;
+ const size_t kRtxHeaderSize = 2;
+
+ // Start by sending one media packet and putting in the packet history.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(
+ AllOf(Pointee(Property(&RtpPacketToSend::padding_size, 0u)),
+ Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)),
+ Pointee(Property(&RtpPacketToSend::capture_time,
+ Timestamp::Millis(start_time)))))));
+ std::unique_ptr<RtpPacketToSend> media_packet =
+ SendPacket(start_time, kPayloadSize);
+ packet_history_->PutRtpPacket(std::move(media_packet),
+ Timestamp::Millis(start_time));
+
+ // Advance time before sending padding.
+ const TimeDelta kTimeDiff = TimeDelta::Millis(17);
+ time_controller_.AdvanceTime(kTimeDiff);
+
+ // Timestamps on payload padding should be set to original.
+ EXPECT_THAT(GeneratePadding(/*target_size_bytes=*/100),
+ Each(AllOf(Pointee(Property(&RtpPacketToSend::padding_size, 0u)),
+ Pointee(Property(&RtpPacketToSend::payload_size,
+ kPayloadSize + kRtxHeaderSize)),
+ Pointee(Property(&RtpPacketToSend::Timestamp,
+ start_timestamp)),
+ Pointee(Property(&RtpPacketToSend::capture_time,
+ Timestamp::Millis(start_time))))));
+}
+
+// Test that the MID header extension is included on sent packets when
+// configured.
+TEST_F(RtpSenderTest, MidIncludedOnSentPackets) {
+ EnableMidSending(kMid);
+
+ // Send a couple packets, expect both packets to have the MID set.
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(
+ Property(&RtpPacketToSend::GetExtension<RtpMid>, kMid)))))
+ .Times(2);
+ SendGenericPacket();
+ SendGenericPacket();
+}
+
+TEST_F(RtpSenderTest, RidIncludedOnSentPackets) {
+ EnableRidSending();
+
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(Property(
+ &RtpPacketToSend::GetExtension<RtpStreamId>, kRid)))));
+ SendGenericPacket();
+}
+
+TEST_F(RtpSenderTest, RidIncludedOnRtxSentPackets) {
+ EnableRtx();
+ EnableRidSending();
+
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::GetExtension<RtpStreamId>, kRid),
+ Property(&RtpPacketToSend::HasExtension<RepairedRtpStreamId>,
+ false))))))
+ .WillOnce([&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ sequencer_->Sequence(*packets[0]);
+ packet_history_->PutRtpPacket(std::move(packets[0]),
+ clock_->CurrentTime());
+ });
+ SendGenericPacket();
+
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::GetExtension<RepairedRtpStreamId>, kRid),
+ Property(&RtpPacketToSend::HasExtension<RtpStreamId>, false))))));
+ rtp_sender_->ReSendPacket(kSeqNum);
+}
+
+TEST_F(RtpSenderTest, MidAndRidNotIncludedOnSentPacketsAfterAck) {
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ // This first packet should include both MID and RID.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::GetExtension<RtpMid>, kMid),
+ Property(&RtpPacketToSend::GetExtension<RtpStreamId>, kRid))))));
+ auto first_built_packet = SendGenericPacket();
+ rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber());
+
+ // The second packet should include neither since an ack was received.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::HasExtension<RtpMid>, false),
+ Property(&RtpPacketToSend::HasExtension<RtpStreamId>, false))))));
+ SendGenericPacket();
+}
+
+TEST_F(RtpSenderTest, MidAndRidAlwaysIncludedOnSentPacketsWhenConfigured) {
+ SetUpRtpSender(false, /*always_send_mid_and_rid=*/true, nullptr);
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ // Send two media packets: one before and one after the ack.
+ // Due to the configuration, both sent packets should contain MID and RID.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(
+ AllOf(Property(&RtpPacketToSend::GetExtension<RtpMid>, kMid),
+ Property(&RtpPacketToSend::GetExtension<RtpStreamId>, kRid))))))
+ .Times(2);
+ auto first_built_packet = SendGenericPacket();
+ rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber());
+ SendGenericPacket();
+}
+
+// Test that the first RTX packet includes both MID and RRID even if the packet
+// being retransmitted did not have MID or RID. The MID and RID are needed on
+// the first packets for a given SSRC, and RTX packets are sent on a separate
+// SSRC.
+TEST_F(RtpSenderTest, MidAndRidIncludedOnFirstRtxPacket) {
+ EnableRtx();
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ // This first packet will include both MID and RID.
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets);
+ auto first_built_packet = SendGenericPacket();
+ rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber());
+
+ // The second packet will include neither since an ack was received, put
+ // it in the packet history for retransmission.
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1)))
+ .WillOnce([&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ packet_history_->PutRtpPacket(std::move(packets[0]),
+ clock_->CurrentTime());
+ });
+ auto second_built_packet = SendGenericPacket();
+
+ // The first RTX packet should include MID and RRID.
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::GetExtension<RtpMid>, kMid),
+ Property(&RtpPacketToSend::GetExtension<RepairedRtpStreamId>,
+ kRid))))));
+ rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber());
+}
+
+// Test that the RTX packets sent after receving an ACK on the RTX SSRC does
+// not include either MID or RRID even if the packet being retransmitted did
+// had a MID or RID.
+TEST_F(RtpSenderTest, MidAndRidNotIncludedOnRtxPacketsAfterAck) {
+ EnableRtx();
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ // This first packet will include both MID and RID.
+ auto first_built_packet = SendGenericPacket();
+ sequencer_->Sequence(*first_built_packet);
+ packet_history_->PutRtpPacket(
+ std::make_unique<RtpPacketToSend>(*first_built_packet),
+ /*send_time=*/clock_->CurrentTime());
+ rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber());
+
+ // The second packet will include neither since an ack was received.
+ auto second_built_packet = SendGenericPacket();
+ sequencer_->Sequence(*second_built_packet);
+ packet_history_->PutRtpPacket(
+ std::make_unique<RtpPacketToSend>(*second_built_packet),
+ /*send_time=*/clock_->CurrentTime());
+
+ // The first RTX packet will include MID and RRID.
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1)))
+ .WillOnce([&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ rtp_sender_->OnReceivedAckOnRtxSsrc(packets[0]->SequenceNumber());
+ packet_history_->MarkPacketAsSent(
+ *packets[0]->retransmitted_sequence_number());
+ });
+ rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber());
+
+ // The second and third RTX packets should not include MID nor RRID.
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::HasExtension<RtpMid>, false),
+ Property(&RtpPacketToSend::HasExtension<RepairedRtpStreamId>,
+ false))))))
+ .Times(2);
+ rtp_sender_->ReSendPacket(first_built_packet->SequenceNumber());
+ rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber());
+}
+
+TEST_F(RtpSenderTest, MidAndRidAlwaysIncludedOnRtxPacketsWhenConfigured) {
+ SetUpRtpSender(false, /*always_send_mid_and_rid=*/true, nullptr);
+ EnableRtx();
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ // Send two media packets: one before and one after the ack.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(
+ AllOf(Property(&RtpPacketToSend::GetExtension<RtpMid>, kMid),
+ Property(&RtpPacketToSend::GetExtension<RtpStreamId>, kRid))))))
+ .Times(2)
+ .WillRepeatedly(
+ [&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ packet_history_->PutRtpPacket(std::move(packets[0]),
+ clock_->CurrentTime());
+ });
+ auto media_packet1 = SendGenericPacket();
+ rtp_sender_->OnReceivedAckOnSsrc(media_packet1->SequenceNumber());
+ auto media_packet2 = SendGenericPacket();
+
+ // Send three RTX packets with different combinations of orders w.r.t. the
+ // media and RTX acks.
+ // Due to the configuration, all sent packets should contain MID
+ // and either RID (media) or RRID (RTX).
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::GetExtension<RtpMid>, kMid),
+ Property(&RtpPacketToSend::GetExtension<RepairedRtpStreamId>,
+ kRid))))))
+ .Times(3)
+ .WillRepeatedly(
+ [&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ rtp_sender_->OnReceivedAckOnRtxSsrc(packets[0]->SequenceNumber());
+ packet_history_->MarkPacketAsSent(
+ *packets[0]->retransmitted_sequence_number());
+ });
+ rtp_sender_->ReSendPacket(media_packet2->SequenceNumber());
+ rtp_sender_->ReSendPacket(media_packet1->SequenceNumber());
+ rtp_sender_->ReSendPacket(media_packet2->SequenceNumber());
+}
+
+// Test that if the RtpState indicates an ACK has been received on that SSRC
+// then neither the MID nor RID header extensions will be sent.
+TEST_F(RtpSenderTest, MidAndRidNotIncludedOnSentPacketsAfterRtpStateRestored) {
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ RtpState state = rtp_sender_->GetRtpState();
+ EXPECT_FALSE(state.ssrc_has_acked);
+ state.ssrc_has_acked = true;
+ rtp_sender_->SetRtpState(state);
+
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::HasExtension<RtpMid>, false),
+ Property(&RtpPacketToSend::HasExtension<RtpStreamId>, false))))));
+ SendGenericPacket();
+}
+
+// Test that if the RTX RtpState indicates an ACK has been received on that
+// RTX SSRC then neither the MID nor RRID header extensions will be sent on
+// RTX packets.
+TEST_F(RtpSenderTest, MidAndRridNotIncludedOnRtxPacketsAfterRtpStateRestored) {
+ EnableRtx();
+ EnableMidSending(kMid);
+ EnableRidSending();
+
+ RtpState rtx_state = rtp_sender_->GetRtxRtpState();
+ EXPECT_FALSE(rtx_state.ssrc_has_acked);
+ rtx_state.ssrc_has_acked = true;
+ rtp_sender_->SetRtxRtpState(rtx_state);
+
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1)))
+ .WillOnce([&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ packet_history_->PutRtpPacket(std::move(packets[0]),
+ clock_->CurrentTime());
+ });
+ auto built_packet = SendGenericPacket();
+
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(AllOf(
+ Property(&RtpPacketToSend::HasExtension<RtpMid>, false),
+ Property(&RtpPacketToSend::HasExtension<RtpStreamId>, false))))));
+ ASSERT_LT(0, rtp_sender_->ReSendPacket(built_packet->SequenceNumber()));
+}
+
+TEST_F(RtpSenderTest, RespectsNackBitrateLimit) {
+ const int32_t kPacketSize = 1400;
+ const int32_t kNumPackets = 30;
+ retransmission_rate_limiter_.SetMaxRate(kPacketSize * kNumPackets * 8);
+ EnableRtx();
+
+ std::vector<uint16_t> sequence_numbers;
+ for (int32_t i = 0; i < kNumPackets; ++i) {
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, /*marker_bit=*/true, /*timestamp=*/0,
+ /*capture_time_ms=*/clock_->TimeInMilliseconds());
+ packet->set_allow_retransmission(true);
+ sequencer_->Sequence(*packet);
+ sequence_numbers.push_back(packet->SequenceNumber());
+ packet_history_->PutRtpPacket(std::move(packet),
+ /*send_time=*/clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(1000 - kNumPackets));
+
+ // Resending should work - brings the bandwidth up to the limit.
+ // NACK bitrate is capped to the same bitrate as the encoder, since the max
+ // protection overhead is 50% (see MediaOptimization::SetTargetRates).
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets(ElementsAre(Pointee(Property(
+ &RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kRetransmission)))))
+ .Times(kNumPackets)
+ .WillRepeatedly(
+ [&](std::vector<std::unique_ptr<RtpPacketToSend>> packets) {
+ for (const auto& packet : packets) {
+ packet_history_->MarkPacketAsSent(
+ *packet->retransmitted_sequence_number());
+ }
+ });
+ rtp_sender_->OnReceivedNack(sequence_numbers, 0);
+
+ // Must be at least 5ms in between retransmission attempts.
+ time_controller_.AdvanceTime(TimeDelta::Millis(5));
+
+ // Resending should not work, bandwidth exceeded.
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets).Times(0);
+ rtp_sender_->OnReceivedNack(sequence_numbers, 0);
+}
+
+TEST_F(RtpSenderTest, UpdatingCsrcsUpdatedOverhead) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = {};
+ CreateSender(config);
+
+ // Base RTP overhead is 12B.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+
+ // Adding two csrcs adds 2*4 bytes to the header.
+ rtp_sender_->SetCsrcs({1, 2});
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 20u);
+}
+
+TEST_F(RtpSenderTest, OnOverheadChanged) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = {};
+ CreateSender(config);
+
+ // Base RTP overhead is 12B.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+
+ rtp_sender_->RegisterRtpHeaderExtension(TransmissionOffset::Uri(),
+ kTransmissionTimeOffsetExtensionId);
+
+ // TransmissionTimeOffset extension has a size of 3B, but with the addition
+ // of header index and rounding to 4 byte boundary we end up with 20B total.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 20u);
+}
+
+TEST_F(RtpSenderTest, CountMidOnlyUntilAcked) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = {};
+ CreateSender(config);
+
+ // Base RTP overhead is 12B.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+
+ rtp_sender_->RegisterRtpHeaderExtension(RtpMid::Uri(), kMidExtensionId);
+
+ // Counted only if set.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+ rtp_sender_->SetMid("foo");
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 36u);
+ rtp_sender_->RegisterRtpHeaderExtension(RtpStreamId::Uri(), kRidExtensionId);
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 52u);
+
+ // Ack received, mid/rid no longer sent.
+ rtp_sender_->OnReceivedAckOnSsrc(0);
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+}
+
+TEST_F(RtpSenderTest, DontCountVolatileExtensionsIntoOverhead) {
+ RtpRtcpInterface::Configuration config = GetDefaultConfig();
+ config.rtx_send_ssrc = {};
+ CreateSender(config);
+
+ // Base RTP overhead is 12B.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+
+ rtp_sender_->RegisterRtpHeaderExtension(InbandComfortNoiseExtension::Uri(),
+ 1);
+ rtp_sender_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(),
+ 2);
+ rtp_sender_->RegisterRtpHeaderExtension(VideoOrientation::Uri(), 3);
+ rtp_sender_->RegisterRtpHeaderExtension(PlayoutDelayLimits::Uri(), 4);
+ rtp_sender_->RegisterRtpHeaderExtension(VideoContentTypeExtension::Uri(), 5);
+ rtp_sender_->RegisterRtpHeaderExtension(VideoTimingExtension::Uri(), 6);
+ rtp_sender_->RegisterRtpHeaderExtension(RepairedRtpStreamId::Uri(), 7);
+ rtp_sender_->RegisterRtpHeaderExtension(ColorSpaceExtension::Uri(), 8);
+
+ // Still only 12B counted since can't count on above being sent.
+ EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u);
+}
+
+TEST_F(RtpSenderTest, SendPacketHandlesRetransmissionHistory) {
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+
+ // Ignore calls to EnqueuePackets() for this test.
+ EXPECT_CALL(mock_paced_sender_, EnqueuePackets).WillRepeatedly(Return());
+
+ // Build a media packet and put in the packet history.
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds());
+ const uint16_t media_sequence_number = packet->SequenceNumber();
+ packet->set_allow_retransmission(true);
+ packet_history_->PutRtpPacket(std::move(packet), clock_->CurrentTime());
+
+ // Simulate successful retransmission request.
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0));
+
+ // Packet already pending, retransmission not allowed.
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Eq(0));
+
+ // Simulate packet exiting pacer, mark as not longer pending.
+ packet_history_->MarkPacketAsSent(media_sequence_number);
+
+ // Retransmissions allowed again.
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0));
+}
+
+TEST_F(RtpSenderTest, MarksRetransmittedPackets) {
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+
+ // Build a media packet and put in the packet history.
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds());
+ const uint16_t media_sequence_number = packet->SequenceNumber();
+ packet->set_allow_retransmission(true);
+ packet_history_->PutRtpPacket(std::move(packet), clock_->CurrentTime());
+
+ // Expect a retransmission packet marked with which packet it is a
+ // retransmit of.
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(AllOf(
+ Pointee(Property(&RtpPacketToSend::packet_type,
+ RtpPacketMediaType::kRetransmission)),
+ Pointee(Property(&RtpPacketToSend::retransmitted_sequence_number,
+ Eq(media_sequence_number)))))));
+ EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0));
+}
+
+TEST_F(RtpSenderTest, GeneratedPaddingHasBweExtensions) {
+ // Min requested size in order to use RTX payload.
+ const size_t kMinPaddingSize = 50;
+ EnableRtx();
+
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransmissionOffset::Uri(), kTransmissionTimeOffsetExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ AbsoluteSendTime::Uri(), kAbsoluteSendTimeExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+
+ // Put a packet in the history, in order to facilitate payload padding.
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds());
+ packet->set_allow_retransmission(true);
+ packet->SetPayloadSize(kMinPaddingSize);
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ packet_history_->PutRtpPacket(std::move(packet), clock_->CurrentTime());
+
+ // Generate a plain padding packet, check that extensions are registered.
+ std::vector<std::unique_ptr<RtpPacketToSend>> generated_packets =
+ GeneratePadding(/*target_size_bytes=*/1);
+ ASSERT_THAT(generated_packets, SizeIs(1));
+ auto& plain_padding = generated_packets.front();
+ EXPECT_GT(plain_padding->padding_size(), 0u);
+ EXPECT_TRUE(plain_padding->HasExtension<TransportSequenceNumber>());
+ EXPECT_TRUE(plain_padding->HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(plain_padding->HasExtension<TransmissionOffset>());
+ EXPECT_GT(plain_padding->padding_size(), 0u);
+
+ // Generate a payload padding packets, check that extensions are registered.
+ generated_packets = GeneratePadding(kMinPaddingSize);
+ ASSERT_EQ(generated_packets.size(), 1u);
+ auto& payload_padding = generated_packets.front();
+ EXPECT_EQ(payload_padding->padding_size(), 0u);
+ EXPECT_TRUE(payload_padding->HasExtension<TransportSequenceNumber>());
+ EXPECT_TRUE(payload_padding->HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(payload_padding->HasExtension<TransmissionOffset>());
+ EXPECT_GT(payload_padding->payload_size(), 0u);
+}
+
+TEST_F(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) {
+ // Min requested size in order to use RTX payload.
+ const size_t kMinPaddingSize = 50;
+
+ rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload);
+ rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads);
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 1);
+
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+
+ const size_t kPayloadPacketSize = kMinPaddingSize;
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds());
+ packet->set_allow_retransmission(true);
+ packet->SetPayloadSize(kPayloadPacketSize);
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ packet_history_->PutRtpPacket(std::move(packet), clock_->CurrentTime());
+
+ // Generated padding has large enough budget that the video packet should be
+ // retransmitted as padding.
+ std::vector<std::unique_ptr<RtpPacketToSend>> generated_packets =
+ GeneratePadding(kMinPaddingSize);
+ ASSERT_EQ(generated_packets.size(), 1u);
+ auto& padding_packet = generated_packets.front();
+ EXPECT_EQ(padding_packet->packet_type(), RtpPacketMediaType::kPadding);
+ EXPECT_EQ(padding_packet->Ssrc(), kRtxSsrc);
+ EXPECT_EQ(padding_packet->payload_size(),
+ kPayloadPacketSize + kRtxHeaderSize);
+
+ // Not enough budged for payload padding, use plain padding instead.
+ const size_t kPaddingBytesRequested = kMinPaddingSize - 1;
+
+ size_t padding_bytes_generated = 0;
+ generated_packets = GeneratePadding(kPaddingBytesRequested);
+ EXPECT_EQ(generated_packets.size(), 1u);
+ for (auto& packet : generated_packets) {
+ EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding);
+ EXPECT_EQ(packet->Ssrc(), kRtxSsrc);
+ EXPECT_EQ(packet->payload_size(), 0u);
+ EXPECT_GT(packet->padding_size(), 0u);
+ padding_bytes_generated += packet->padding_size();
+ }
+
+ EXPECT_EQ(padding_bytes_generated, kMaxPaddingSize);
+}
+
+TEST_F(RtpSenderTest, LimitsPayloadPaddingSize) {
+ // RTX payload padding is limited to 3x target size.
+ const double kFactor = 3.0;
+ SetUpRtpSender(false, false, nullptr);
+ rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload);
+ rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads);
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 1);
+
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+
+ // Send a dummy video packet so it ends up in the packet history.
+ const size_t kPayloadPacketSize = 1234u;
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds());
+ packet->set_allow_retransmission(true);
+ packet->SetPayloadSize(kPayloadPacketSize);
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ packet_history_->PutRtpPacket(std::move(packet), clock_->CurrentTime());
+
+ // Smallest target size that will result in the sent packet being returned as
+ // padding.
+ const size_t kMinTargerSizeForPayload =
+ (kPayloadPacketSize + kRtxHeaderSize) / kFactor;
+
+ // Generated padding has large enough budget that the video packet should be
+ // retransmitted as padding.
+ EXPECT_THAT(
+ GeneratePadding(kMinTargerSizeForPayload),
+ AllOf(Not(IsEmpty()),
+ Each(Pointee(Property(&RtpPacketToSend::padding_size, Eq(0u))))));
+
+ // If payload padding is > 2x requested size, plain padding is returned
+ // instead.
+ EXPECT_THAT(
+ GeneratePadding(kMinTargerSizeForPayload - 1),
+ AllOf(Not(IsEmpty()),
+ Each(Pointee(Property(&RtpPacketToSend::padding_size, Gt(0u))))));
+}
+
+TEST_F(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) {
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 1);
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransmissionOffset::Uri(), kTransmissionTimeOffsetExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ AbsoluteSendTime::Uri(), kAbsoluteSendTimeExtensionId));
+ ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(
+ TransportSequenceNumber::Uri(), kTransportSequenceNumberExtensionId));
+
+ const size_t kPayloadPacketSize = 1234;
+ // Send a dummy video packet so it ends up in the packet history. Since we
+ // are not using RTX, it should never be used as padding.
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds());
+ packet->set_allow_retransmission(true);
+ packet->SetPayloadSize(kPayloadPacketSize);
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ sequencer_->Sequence(*packet);
+ packet_history_->PutRtpPacket(std::move(packet), clock_->CurrentTime());
+
+ // Payload padding not available without RTX, only generate plain padding on
+ // the media SSRC.
+ // Number of padding packets is the requested padding size divided by max
+ // padding packet size, rounded up. Pure padding packets are always of the
+ // maximum size.
+ const size_t kPaddingBytesRequested = kPayloadPacketSize + kRtxHeaderSize;
+ const size_t kExpectedNumPaddingPackets =
+ (kPaddingBytesRequested + kMaxPaddingSize - 1) / kMaxPaddingSize;
+ size_t padding_bytes_generated = 0;
+ std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets =
+ GeneratePadding(kPaddingBytesRequested);
+ EXPECT_EQ(padding_packets.size(), kExpectedNumPaddingPackets);
+ for (auto& packet : padding_packets) {
+ EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding);
+ EXPECT_EQ(packet->Ssrc(), kSsrc);
+ EXPECT_EQ(packet->payload_size(), 0u);
+ EXPECT_GT(packet->padding_size(), 0u);
+ padding_bytes_generated += packet->padding_size();
+ EXPECT_TRUE(packet->HasExtension<TransportSequenceNumber>());
+ EXPECT_TRUE(packet->HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(packet->HasExtension<TransmissionOffset>());
+ }
+
+ EXPECT_EQ(padding_bytes_generated,
+ kExpectedNumPaddingPackets * kMaxPaddingSize);
+}
+
+TEST_F(RtpSenderTest, SupportsPadding) {
+ bool kSendingMediaStats[] = {true, false};
+ bool kEnableRedundantPayloads[] = {true, false};
+ absl::string_view kBweExtensionUris[] = {
+ TransportSequenceNumber::Uri(), TransportSequenceNumberV2::Uri(),
+ AbsoluteSendTime::Uri(), TransmissionOffset::Uri()};
+ const int kExtensionsId = 7;
+
+ for (bool sending_media : kSendingMediaStats) {
+ rtp_sender_->SetSendingMediaStatus(sending_media);
+ for (bool redundant_payloads : kEnableRedundantPayloads) {
+ int rtx_mode = kRtxRetransmitted;
+ if (redundant_payloads) {
+ rtx_mode |= kRtxRedundantPayloads;
+ }
+ rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload);
+ rtp_sender_->SetRtxStatus(rtx_mode);
+
+ for (auto extension_uri : kBweExtensionUris) {
+ EXPECT_FALSE(rtp_sender_->SupportsPadding());
+ rtp_sender_->RegisterRtpHeaderExtension(extension_uri, kExtensionsId);
+ if (!sending_media) {
+ EXPECT_FALSE(rtp_sender_->SupportsPadding());
+ } else {
+ EXPECT_TRUE(rtp_sender_->SupportsPadding());
+ if (redundant_payloads) {
+ EXPECT_TRUE(rtp_sender_->SupportsRtxPayloadPadding());
+ } else {
+ EXPECT_FALSE(rtp_sender_->SupportsRtxPayloadPadding());
+ }
+ }
+ rtp_sender_->DeregisterRtpHeaderExtension(extension_uri);
+ EXPECT_FALSE(rtp_sender_->SupportsPadding());
+ }
+ }
+ }
+}
+
+TEST_F(RtpSenderTest, SetsCaptureTimeOnRtxRetransmissions) {
+ EnableRtx();
+
+ // Put a packet in the packet history, with current time as capture time.
+ const int64_t start_time_ms = clock_->TimeInMilliseconds();
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, kMarkerBit, start_time_ms,
+ /*capture_time_ms=*/start_time_ms);
+ packet->set_allow_retransmission(true);
+ sequencer_->Sequence(*packet);
+ packet_history_->PutRtpPacket(std::move(packet),
+ Timestamp::Millis(start_time_ms));
+
+ // Advance time, request an RTX retransmission. Capture timestamp should be
+ // preserved.
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+
+ EXPECT_CALL(
+ mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(Property(
+ &RtpPacketToSend::capture_time, Timestamp::Millis(start_time_ms))))));
+ EXPECT_GT(rtp_sender_->ReSendPacket(kSeqNum), 0);
+}
+
+TEST_F(RtpSenderTest, IgnoresNackAfterDisablingMedia) {
+ const TimeDelta kRtt = TimeDelta::Millis(10);
+
+ EnableRtx();
+ packet_history_->SetRtt(kRtt);
+
+ // Put a packet in the history.
+ const int64_t start_time_ms = clock_->TimeInMilliseconds();
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, kMarkerBit, start_time_ms,
+ /*capture_time_ms=*/start_time_ms);
+ packet->set_allow_retransmission(true);
+ sequencer_->Sequence(*packet);
+ packet_history_->PutRtpPacket(std::move(packet),
+ Timestamp::Millis(start_time_ms));
+
+ // Disable media sending and try to retransmit the packet, it should fail.
+ rtp_sender_->SetSendingMediaStatus(false);
+ time_controller_.AdvanceTime(kRtt);
+ EXPECT_LT(rtp_sender_->ReSendPacket(kSeqNum), 0);
+}
+
+TEST_F(RtpSenderTest, DoesntFecProtectRetransmissions) {
+ // Set up retranmission without RTX, so that a plain copy of the old packet is
+ // re-sent instead.
+ const TimeDelta kRtt = TimeDelta::Millis(10);
+ rtp_sender_->SetSendingMediaStatus(true);
+ rtp_sender_->SetRtxStatus(kRtxOff);
+ packet_history_->SetStorePacketsStatus(
+ RtpPacketHistory::StorageMode::kStoreAndCull, 10);
+ packet_history_->SetRtt(kRtt);
+
+ // Put a fec protected packet in the history.
+ const int64_t start_time_ms = clock_->TimeInMilliseconds();
+ std::unique_ptr<RtpPacketToSend> packet =
+ BuildRtpPacket(kPayload, kMarkerBit, start_time_ms,
+ /*capture_time_ms=*/start_time_ms);
+ packet->set_allow_retransmission(true);
+ packet->set_fec_protect_packet(true);
+ sequencer_->Sequence(*packet);
+ packet_history_->PutRtpPacket(std::move(packet),
+ Timestamp::Millis(start_time_ms));
+
+ // Re-send packet, the retransmitted packet should not have the FEC protection
+ // flag set.
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(ElementsAre(Pointee(
+ Property(&RtpPacketToSend::fec_protect_packet, false)))));
+
+ time_controller_.AdvanceTime(kRtt);
+ EXPECT_GT(rtp_sender_->ReSendPacket(kSeqNum), 0);
+}
+
+TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) {
+ RTPSenderVideo::Config video_config;
+ video_config.clock = clock_;
+ video_config.rtp_sender = rtp_sender_.get();
+ video_config.field_trials = &field_trials_;
+ RTPSenderVideo rtp_sender_video(video_config);
+
+ const uint8_t kPayloadType = 127;
+ const absl::optional<VideoCodecType> kCodecType =
+ VideoCodecType::kVideoCodecGeneric;
+
+ const uint32_t kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock
+
+ {
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(Each(
+ Pointee(Property(&RtpPacketToSend::is_key_frame, true)))))
+ .Times(AtLeast(1));
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ int64_t capture_time_ms = clock_->TimeInMilliseconds();
+ EXPECT_TRUE(rtp_sender_video.SendVideo(
+ kPayloadType, kCodecType,
+ capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
+ kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs));
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(33));
+ }
+
+ {
+ EXPECT_CALL(mock_paced_sender_,
+ EnqueuePackets(Each(
+ Pointee(Property(&RtpPacketToSend::is_key_frame, false)))))
+ .Times(AtLeast(1));
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ int64_t capture_time_ms = clock_->TimeInMilliseconds();
+ EXPECT_TRUE(rtp_sender_video.SendVideo(
+ kPayloadType, kCodecType,
+ capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
+ kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs));
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(33));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
new file mode 100644
index 0000000000..05428ff289
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -0,0 +1,918 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/match.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
+#include "modules/rtp_rtcp/source/time_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+namespace {
+constexpr size_t kRedForFecHeaderLength = 1;
+constexpr int64_t kMaxUnretransmittableFrameIntervalMs = 33 * 4;
+constexpr char kIncludeCaptureClockOffset[] =
+ "WebRTC-IncludeCaptureClockOffset";
+
+void BuildRedPayload(const RtpPacketToSend& media_packet,
+ RtpPacketToSend* red_packet) {
+ uint8_t* red_payload = red_packet->AllocatePayload(
+ kRedForFecHeaderLength + media_packet.payload_size());
+ RTC_DCHECK(red_payload);
+ red_payload[0] = media_packet.PayloadType();
+
+ auto media_payload = media_packet.payload();
+ memcpy(&red_payload[kRedForFecHeaderLength], media_payload.data(),
+ media_payload.size());
+}
+
+bool MinimizeDescriptor(RTPVideoHeader* video_header) {
+ if (auto* vp8 =
+ absl::get_if<RTPVideoHeaderVP8>(&video_header->video_type_header)) {
+ // Set minimum fields the RtpPacketizer is using to create vp8 packets.
+ // nonReference is the only field that doesn't require extra space.
+ bool non_reference = vp8->nonReference;
+ vp8->InitRTPVideoHeaderVP8();
+ vp8->nonReference = non_reference;
+ return true;
+ }
+ // TODO(danilchap): Reduce vp9 codec specific descriptor too.
+ return false;
+}
+
+bool IsBaseLayer(const RTPVideoHeader& video_header) {
+ switch (video_header.codec) {
+ case kVideoCodecVP8: {
+ const auto& vp8 =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ return (vp8.temporalIdx == 0 || vp8.temporalIdx == kNoTemporalIdx);
+ }
+ case kVideoCodecVP9: {
+ const auto& vp9 =
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
+ return (vp9.temporal_idx == 0 || vp9.temporal_idx == kNoTemporalIdx);
+ }
+ case kVideoCodecH264:
+ // TODO(kron): Implement logic for H264 once WebRTC supports temporal
+ // layers for H264.
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+[[maybe_unused]] const char* FrameTypeToString(VideoFrameType frame_type) {
+ switch (frame_type) {
+ case VideoFrameType::kEmptyFrame:
+ return "empty";
+ case VideoFrameType::kVideoFrameKey:
+ return "video_key";
+ case VideoFrameType::kVideoFrameDelta:
+ return "video_delta";
+ default:
+ RTC_DCHECK_NOTREACHED();
+ return "";
+ }
+}
+
+bool IsNoopDelay(const VideoPlayoutDelay& delay) {
+ return delay.min_ms == -1 && delay.max_ms == -1;
+}
+
+absl::optional<VideoPlayoutDelay> LoadVideoPlayoutDelayOverride(
+ const FieldTrialsView* key_value_config) {
+ RTC_DCHECK(key_value_config);
+ FieldTrialOptional<int> playout_delay_min_ms("min_ms", absl::nullopt);
+ FieldTrialOptional<int> playout_delay_max_ms("max_ms", absl::nullopt);
+ ParseFieldTrial({&playout_delay_max_ms, &playout_delay_min_ms},
+ key_value_config->Lookup("WebRTC-ForceSendPlayoutDelay"));
+ return playout_delay_max_ms && playout_delay_min_ms
+ ? absl::make_optional<VideoPlayoutDelay>(*playout_delay_min_ms,
+ *playout_delay_max_ms)
+ : absl::nullopt;
+}
+
+// Some packets can be skipped and the stream can still be decoded. Those
+// packets are less likely to be retransmitted if they are lost.
+bool PacketWillLikelyBeRequestedForRestransmitionIfLost(
+ const RTPVideoHeader& video_header) {
+ return IsBaseLayer(video_header) &&
+ !(video_header.generic.has_value()
+ ? absl::c_linear_search(
+ video_header.generic->decode_target_indications,
+ DecodeTargetIndication::kDiscardable)
+ : false);
+}
+
+} // namespace
+
+RTPSenderVideo::RTPSenderVideo(const Config& config)
+ : rtp_sender_(config.rtp_sender),
+ clock_(config.clock),
+ retransmission_settings_(
+ config.enable_retransmit_all_layers
+ ? kRetransmitAllLayers
+ : (kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers)),
+ last_rotation_(kVideoRotation_0),
+ transmit_color_space_next_frame_(false),
+ send_allocation_(SendVideoLayersAllocation::kDontSend),
+ current_playout_delay_{-1, -1},
+ playout_delay_pending_(false),
+ forced_playout_delay_(LoadVideoPlayoutDelayOverride(config.field_trials)),
+ red_payload_type_(config.red_payload_type),
+ fec_type_(config.fec_type),
+ fec_overhead_bytes_(config.fec_overhead_bytes),
+ packetization_overhead_bitrate_(1000, RateStatistics::kBpsScale),
+ frame_encryptor_(config.frame_encryptor),
+ require_frame_encryption_(config.require_frame_encryption),
+ generic_descriptor_auth_experiment_(!absl::StartsWith(
+ config.field_trials->Lookup("WebRTC-GenericDescriptorAuth"),
+ "Disabled")),
+ absolute_capture_time_sender_(config.clock),
+ frame_transformer_delegate_(
+ config.frame_transformer
+ ? rtc::make_ref_counted<RTPSenderVideoFrameTransformerDelegate>(
+ this,
+ config.frame_transformer,
+ rtp_sender_->SSRC(),
+ config.send_transport_queue)
+ : nullptr),
+ include_capture_clock_offset_(!absl::StartsWith(
+ config.field_trials->Lookup(kIncludeCaptureClockOffset),
+ "Disabled")) {
+ if (frame_transformer_delegate_)
+ frame_transformer_delegate_->Init();
+}
+
+RTPSenderVideo::~RTPSenderVideo() {
+ if (frame_transformer_delegate_)
+ frame_transformer_delegate_->Reset();
+}
+
+void RTPSenderVideo::LogAndSendToNetwork(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets,
+ size_t unpacketized_payload_size) {
+ {
+ MutexLock lock(&stats_mutex_);
+ size_t packetized_payload_size = 0;
+ for (const auto& packet : packets) {
+ if (*packet->packet_type() == RtpPacketMediaType::kVideo) {
+ packetized_payload_size += packet->payload_size();
+ }
+ }
+ // AV1 and H264 packetizers may produce less packetized bytes than
+ // unpacketized.
+ if (packetized_payload_size >= unpacketized_payload_size) {
+ packetization_overhead_bitrate_.Update(
+ packetized_payload_size - unpacketized_payload_size,
+ clock_->TimeInMilliseconds());
+ }
+ }
+
+ rtp_sender_->EnqueuePackets(std::move(packets));
+}
+
+size_t RTPSenderVideo::FecPacketOverhead() const {
+ size_t overhead = fec_overhead_bytes_;
+ if (red_enabled()) {
+ // The RED overhead is due to a small header.
+ overhead += kRedForFecHeaderLength;
+
+ if (fec_type_ == VideoFecGenerator::FecType::kUlpFec) {
+ // For ULPFEC, the overhead is the FEC headers plus RED for FEC header
+ // (see above) plus anything in RTP header beyond the 12 bytes base header
+ // (CSRC list, extensions...)
+ // This reason for the header extensions to be included here is that
+ // from an FEC viewpoint, they are part of the payload to be protected.
+ // (The base RTP header is already protected by the FEC header.)
+ overhead +=
+ rtp_sender_->FecOrPaddingPacketMaxRtpHeaderLength() - kRtpHeaderSize;
+ }
+ }
+ return overhead;
+}
+
+void RTPSenderVideo::SetVideoStructure(
+ const FrameDependencyStructure* video_structure) {
+ if (frame_transformer_delegate_) {
+ frame_transformer_delegate_->SetVideoStructureUnderLock(video_structure);
+ return;
+ }
+ SetVideoStructureInternal(video_structure);
+}
+
+void RTPSenderVideo::SetVideoStructureAfterTransformation(
+ const FrameDependencyStructure* video_structure) {
+ SetVideoStructureInternal(video_structure);
+}
+
+void RTPSenderVideo::SetVideoStructureInternal(
+ const FrameDependencyStructure* video_structure) {
+ RTC_DCHECK_RUNS_SERIALIZED(&send_checker_);
+ if (video_structure == nullptr) {
+ video_structure_ = nullptr;
+ return;
+ }
+ // Simple sanity checks video structure is set up.
+ RTC_DCHECK_GT(video_structure->num_decode_targets, 0);
+ RTC_DCHECK_GT(video_structure->templates.size(), 0);
+
+ int structure_id = 0;
+ if (video_structure_) {
+ if (*video_structure_ == *video_structure) {
+ // Same structure (just a new key frame), no update required.
+ return;
+ }
+ // When setting different video structure make sure structure_id is updated
+ // so that templates from different structures do not collide.
+ static constexpr int kMaxTemplates = 64;
+ structure_id =
+ (video_structure_->structure_id + video_structure_->templates.size()) %
+ kMaxTemplates;
+ }
+
+ video_structure_ =
+ std::make_unique<FrameDependencyStructure>(*video_structure);
+ video_structure_->structure_id = structure_id;
+}
+
+void RTPSenderVideo::SetVideoLayersAllocation(
+ VideoLayersAllocation allocation) {
+ if (frame_transformer_delegate_) {
+ frame_transformer_delegate_->SetVideoLayersAllocationUnderLock(
+ std::move(allocation));
+ return;
+ }
+ SetVideoLayersAllocationInternal(std::move(allocation));
+}
+
+void RTPSenderVideo::SetVideoLayersAllocationAfterTransformation(
+ VideoLayersAllocation allocation) {
+ SetVideoLayersAllocationInternal(std::move(allocation));
+}
+
+void RTPSenderVideo::SetVideoLayersAllocationInternal(
+ VideoLayersAllocation allocation) {
+ RTC_DCHECK_RUNS_SERIALIZED(&send_checker_);
+ if (!allocation_ || allocation.active_spatial_layers.size() !=
+ allocation_->active_spatial_layers.size()) {
+ send_allocation_ = SendVideoLayersAllocation::kSendWithResolution;
+ } else if (send_allocation_ == SendVideoLayersAllocation::kDontSend) {
+ send_allocation_ = SendVideoLayersAllocation::kSendWithoutResolution;
+ }
+ if (send_allocation_ == SendVideoLayersAllocation::kSendWithoutResolution) {
+ // Check if frame rate changed more than 5fps since the last time the
+ // extension was sent with frame rate and resolution.
+ for (size_t i = 0; i < allocation.active_spatial_layers.size(); ++i) {
+ if (abs(static_cast<int>(
+ allocation.active_spatial_layers[i].frame_rate_fps) -
+ static_cast<int>(
+ last_full_sent_allocation_->active_spatial_layers[i]
+ .frame_rate_fps)) > 5) {
+ send_allocation_ = SendVideoLayersAllocation::kSendWithResolution;
+ break;
+ }
+ }
+ }
+ allocation_ = std::move(allocation);
+}
+
+void RTPSenderVideo::AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
+ bool first_packet,
+ bool last_packet,
+ RtpPacketToSend* packet) const {
+ // Send color space when changed or if the frame is a key frame. Keep
+ // sending color space information until the first base layer frame to
+ // guarantee that the information is retrieved by the receiver.
+ bool set_color_space =
+ video_header.color_space != last_color_space_ ||
+ video_header.frame_type == VideoFrameType::kVideoFrameKey ||
+ transmit_color_space_next_frame_;
+ // Color space requires two-byte header extensions if HDR metadata is
+ // included. Therefore, it's best to add this extension first so that the
+ // other extensions in the same packet are written as two-byte headers at
+ // once.
+ if (last_packet && set_color_space && video_header.color_space)
+ packet->SetExtension<ColorSpaceExtension>(video_header.color_space.value());
+
+ // According to
+ // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+ // ts_126114v120700p.pdf Section 7.4.5:
+ // The MTSI client shall add the payload bytes as defined in this clause
+ // onto the last RTP packet in each group of packets which make up a key
+ // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+ // (HEVC)). The MTSI client may also add the payload bytes onto the last RTP
+ // packet in each group of packets which make up another type of frame
+ // (e.g. a P-Frame) only if the current value is different from the previous
+ // value sent.
+ // Set rotation when key frame or when changed (to follow standard).
+ // Or when different from 0 (to follow current receiver implementation).
+ bool set_video_rotation =
+ video_header.frame_type == VideoFrameType::kVideoFrameKey ||
+ video_header.rotation != last_rotation_ ||
+ video_header.rotation != kVideoRotation_0;
+ if (last_packet && set_video_rotation)
+ packet->SetExtension<VideoOrientation>(video_header.rotation);
+
+ // Report content type only for key frames.
+ if (last_packet &&
+ video_header.frame_type == VideoFrameType::kVideoFrameKey &&
+ video_header.content_type != VideoContentType::UNSPECIFIED)
+ packet->SetExtension<VideoContentTypeExtension>(video_header.content_type);
+
+ if (last_packet &&
+ video_header.video_timing.flags != VideoSendTiming::kInvalid)
+ packet->SetExtension<VideoTimingExtension>(video_header.video_timing);
+
+ // If transmitted, add to all packets; ack logic depends on this.
+ if (playout_delay_pending_) {
+ packet->SetExtension<PlayoutDelayLimits>(current_playout_delay_);
+ }
+
+ if (first_packet && video_header.absolute_capture_time.has_value()) {
+ packet->SetExtension<AbsoluteCaptureTimeExtension>(
+ *video_header.absolute_capture_time);
+ }
+
+ if (video_header.generic) {
+ bool extension_is_set = false;
+ if (packet->IsRegistered<RtpDependencyDescriptorExtension>() &&
+ video_structure_ != nullptr) {
+ DependencyDescriptor descriptor;
+ descriptor.first_packet_in_frame = first_packet;
+ descriptor.last_packet_in_frame = last_packet;
+ descriptor.frame_number = video_header.generic->frame_id & 0xFFFF;
+ descriptor.frame_dependencies.spatial_id =
+ video_header.generic->spatial_index;
+ descriptor.frame_dependencies.temporal_id =
+ video_header.generic->temporal_index;
+ for (int64_t dep : video_header.generic->dependencies) {
+ descriptor.frame_dependencies.frame_diffs.push_back(
+ video_header.generic->frame_id - dep);
+ }
+ descriptor.frame_dependencies.chain_diffs =
+ video_header.generic->chain_diffs;
+ descriptor.frame_dependencies.decode_target_indications =
+ video_header.generic->decode_target_indications;
+ RTC_DCHECK_EQ(
+ descriptor.frame_dependencies.decode_target_indications.size(),
+ video_structure_->num_decode_targets);
+
+ if (first_packet) {
+ descriptor.active_decode_targets_bitmask =
+ active_decode_targets_tracker_.ActiveDecodeTargetsBitmask();
+ }
+ // VP9 mark all layer frames of the first picture as kVideoFrameKey,
+ // Structure should be attached to the descriptor to lowest spatial layer
+ // when inter layer dependency is used, i.e. L structures; or to all
+ // layers when inter layer dependency is not used, i.e. S structures.
+ // Distinguish these two cases by checking if there are any dependencies.
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey &&
+ video_header.generic->dependencies.empty() && first_packet) {
+ // To avoid extra structure copy, temporary share ownership of the
+ // video_structure with the dependency descriptor.
+ descriptor.attached_structure =
+ absl::WrapUnique(video_structure_.get());
+ }
+ extension_is_set = packet->SetExtension<RtpDependencyDescriptorExtension>(
+ *video_structure_,
+ active_decode_targets_tracker_.ActiveChainsBitmask(), descriptor);
+
+ // Remove the temporary shared ownership.
+ descriptor.attached_structure.release();
+ }
+
+ // Do not use generic frame descriptor when dependency descriptor is stored.
+ if (packet->IsRegistered<RtpGenericFrameDescriptorExtension00>() &&
+ !extension_is_set) {
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(first_packet);
+ generic_descriptor.SetLastPacketInSubFrame(last_packet);
+
+ if (first_packet) {
+ generic_descriptor.SetFrameId(
+ static_cast<uint16_t>(video_header.generic->frame_id));
+ for (int64_t dep : video_header.generic->dependencies) {
+ generic_descriptor.AddFrameDependencyDiff(
+ video_header.generic->frame_id - dep);
+ }
+
+ uint8_t spatial_bimask = 1 << video_header.generic->spatial_index;
+ generic_descriptor.SetSpatialLayersBitmask(spatial_bimask);
+
+ generic_descriptor.SetTemporalLayer(
+ video_header.generic->temporal_index);
+
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ generic_descriptor.SetResolution(video_header.width,
+ video_header.height);
+ }
+ }
+
+ packet->SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor);
+ }
+ }
+
+ if (packet->IsRegistered<RtpVideoLayersAllocationExtension>() &&
+ first_packet &&
+ send_allocation_ != SendVideoLayersAllocation::kDontSend &&
+ (video_header.frame_type == VideoFrameType::kVideoFrameKey ||
+ PacketWillLikelyBeRequestedForRestransmitionIfLost(video_header))) {
+ VideoLayersAllocation allocation = allocation_.value();
+ allocation.resolution_and_frame_rate_is_valid =
+ send_allocation_ == SendVideoLayersAllocation::kSendWithResolution;
+ packet->SetExtension<RtpVideoLayersAllocationExtension>(allocation);
+ }
+
+ if (first_packet && video_header.video_frame_tracking_id) {
+ packet->SetExtension<VideoFrameTrackingIdExtension>(
+ *video_header.video_frame_tracking_id);
+ }
+}
+
+bool RTPSenderVideo::SendVideo(
+ int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ int64_t capture_time_ms,
+ rtc::ArrayView<const uint8_t> payload,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms) {
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, "Send", "type",
+ FrameTypeToString(video_header.frame_type));
+ RTC_CHECK_RUNS_SERIALIZED(&send_checker_);
+
+ if (video_header.frame_type == VideoFrameType::kEmptyFrame)
+ return true;
+
+ if (payload.empty())
+ return false;
+ if (!rtp_sender_->SendingMedia()) {
+ return false;
+ }
+
+ int32_t retransmission_settings = retransmission_settings_;
+ if (codec_type == VideoCodecType::kVideoCodecH264) {
+ // Backward compatibility for older receivers without temporal layer logic.
+ retransmission_settings = kRetransmitBaseLayer | kRetransmitHigherLayers;
+ }
+
+ MaybeUpdateCurrentPlayoutDelay(video_header);
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ if (!IsNoopDelay(current_playout_delay_)) {
+ // Force playout delay on key-frames, if set.
+ playout_delay_pending_ = true;
+ }
+ if (allocation_) {
+ // Send the bitrate allocation on every key frame.
+ send_allocation_ = SendVideoLayersAllocation::kSendWithResolution;
+ }
+ }
+
+ if (video_structure_ != nullptr && video_header.generic) {
+ active_decode_targets_tracker_.OnFrame(
+ video_structure_->decode_target_protected_by_chain,
+ video_header.generic->active_decode_targets,
+ video_header.frame_type == VideoFrameType::kVideoFrameKey,
+ video_header.generic->frame_id, video_header.generic->chain_diffs);
+ }
+
+ const uint8_t temporal_id = GetTemporalId(video_header);
+ // No FEC protection for upper temporal layers, if used.
+ const bool use_fec = fec_type_.has_value() &&
+ (temporal_id == 0 || temporal_id == kNoTemporalIdx);
+
+ // Maximum size of packet including rtp headers.
+ // Extra space left in case packet will be resent using fec or rtx.
+ int packet_capacity = rtp_sender_->MaxRtpPacketSize() -
+ (use_fec ? FecPacketOverhead() : 0) -
+ (rtp_sender_->RtxStatus() ? kRtxHeaderSize : 0);
+
+ std::unique_ptr<RtpPacketToSend> single_packet =
+ rtp_sender_->AllocatePacket();
+ RTC_DCHECK_LE(packet_capacity, single_packet->capacity());
+ single_packet->SetPayloadType(payload_type);
+ single_packet->SetTimestamp(rtp_timestamp);
+ single_packet->set_capture_time(Timestamp::Millis(capture_time_ms));
+
+ // Construct the absolute capture time extension if not provided.
+ if (!video_header.absolute_capture_time.has_value()) {
+ video_header.absolute_capture_time.emplace();
+ video_header.absolute_capture_time->absolute_capture_timestamp =
+ Int64MsToUQ32x32(
+ clock_->ConvertTimestampToNtpTimeInMilliseconds(capture_time_ms));
+ if (include_capture_clock_offset_) {
+ video_header.absolute_capture_time->estimated_capture_clock_offset = 0;
+ }
+ }
+
+ // Let `absolute_capture_time_sender_` decide if the extension should be sent.
+ video_header.absolute_capture_time =
+ absolute_capture_time_sender_.OnSendPacket(
+ AbsoluteCaptureTimeSender::GetSource(single_packet->Ssrc(),
+ single_packet->Csrcs()),
+ single_packet->Timestamp(), kVideoPayloadTypeFrequency,
+ video_header.absolute_capture_time->absolute_capture_timestamp,
+ video_header.absolute_capture_time->estimated_capture_clock_offset);
+
+ auto first_packet = std::make_unique<RtpPacketToSend>(*single_packet);
+ auto middle_packet = std::make_unique<RtpPacketToSend>(*single_packet);
+ auto last_packet = std::make_unique<RtpPacketToSend>(*single_packet);
+ // Simplest way to estimate how much extensions would occupy is to set them.
+ AddRtpHeaderExtensions(video_header,
+ /*first_packet=*/true, /*last_packet=*/true,
+ single_packet.get());
+ if (video_structure_ != nullptr &&
+ single_packet->IsRegistered<RtpDependencyDescriptorExtension>() &&
+ !single_packet->HasExtension<RtpDependencyDescriptorExtension>()) {
+ RTC_DCHECK_EQ(video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ // Disable attaching dependency descriptor to delta packets (including
+ // non-first packet of a key frame) when it wasn't attached to a key frame,
+ // as dependency descriptor can't be usable in such case.
+ RTC_LOG(LS_WARNING) << "Disable dependency descriptor because failed to "
+ "attach it to a key frame.";
+ video_structure_ = nullptr;
+ }
+
+ AddRtpHeaderExtensions(video_header,
+ /*first_packet=*/true, /*last_packet=*/false,
+ first_packet.get());
+ AddRtpHeaderExtensions(video_header,
+ /*first_packet=*/false, /*last_packet=*/false,
+ middle_packet.get());
+ AddRtpHeaderExtensions(video_header,
+ /*first_packet=*/false, /*last_packet=*/true,
+ last_packet.get());
+
+ RTC_DCHECK_GT(packet_capacity, single_packet->headers_size());
+ RTC_DCHECK_GT(packet_capacity, first_packet->headers_size());
+ RTC_DCHECK_GT(packet_capacity, middle_packet->headers_size());
+ RTC_DCHECK_GT(packet_capacity, last_packet->headers_size());
+ RtpPacketizer::PayloadSizeLimits limits;
+ limits.max_payload_len = packet_capacity - middle_packet->headers_size();
+
+ RTC_DCHECK_GE(single_packet->headers_size(), middle_packet->headers_size());
+ limits.single_packet_reduction_len =
+ single_packet->headers_size() - middle_packet->headers_size();
+
+ RTC_DCHECK_GE(first_packet->headers_size(), middle_packet->headers_size());
+ limits.first_packet_reduction_len =
+ first_packet->headers_size() - middle_packet->headers_size();
+
+ RTC_DCHECK_GE(last_packet->headers_size(), middle_packet->headers_size());
+ limits.last_packet_reduction_len =
+ last_packet->headers_size() - middle_packet->headers_size();
+
+ bool has_generic_descriptor =
+ first_packet->HasExtension<RtpGenericFrameDescriptorExtension00>() ||
+ first_packet->HasExtension<RtpDependencyDescriptorExtension>();
+
+ // Minimization of the vp8 descriptor may erase temporal_id, so use
+ // `temporal_id` rather than reference `video_header` beyond this point.
+ if (has_generic_descriptor) {
+ MinimizeDescriptor(&video_header);
+ }
+
+ // TODO(benwright@webrtc.org) - Allocate enough to always encrypt inline.
+ rtc::Buffer encrypted_video_payload;
+ if (frame_encryptor_ != nullptr) {
+ const size_t max_ciphertext_size =
+ frame_encryptor_->GetMaxCiphertextByteSize(cricket::MEDIA_TYPE_VIDEO,
+ payload.size());
+ encrypted_video_payload.SetSize(max_ciphertext_size);
+
+ size_t bytes_written = 0;
+
+ // Enable header authentication if the field trial isn't disabled.
+ std::vector<uint8_t> additional_data;
+ if (generic_descriptor_auth_experiment_) {
+ additional_data = RtpDescriptorAuthentication(video_header);
+ }
+
+ if (frame_encryptor_->Encrypt(
+ cricket::MEDIA_TYPE_VIDEO, first_packet->Ssrc(), additional_data,
+ payload, encrypted_video_payload, &bytes_written) != 0) {
+ return false;
+ }
+
+ encrypted_video_payload.SetSize(bytes_written);
+ payload = encrypted_video_payload;
+ } else if (require_frame_encryption_) {
+ RTC_LOG(LS_WARNING)
+ << "No FrameEncryptor is attached to this video sending stream but "
+ "one is required since require_frame_encryptor is set";
+ }
+
+ std::unique_ptr<RtpPacketizer> packetizer =
+ RtpPacketizer::Create(codec_type, payload, limits, video_header);
+
+ // TODO(bugs.webrtc.org/10714): retransmission_settings_ should generally be
+ // replaced by expected_retransmission_time_ms.has_value(). For now, though,
+ // only VP8 with an injected frame buffer controller actually controls it.
+ const bool allow_retransmission =
+ expected_retransmission_time_ms.has_value()
+ ? AllowRetransmission(temporal_id, retransmission_settings,
+ expected_retransmission_time_ms.value())
+ : false;
+ const size_t num_packets = packetizer->NumPackets();
+
+ if (num_packets == 0)
+ return false;
+
+ bool first_frame = first_frame_sent_();
+ std::vector<std::unique_ptr<RtpPacketToSend>> rtp_packets;
+ for (size_t i = 0; i < num_packets; ++i) {
+ std::unique_ptr<RtpPacketToSend> packet;
+ int expected_payload_capacity;
+ // Choose right packet template:
+ if (num_packets == 1) {
+ packet = std::move(single_packet);
+ expected_payload_capacity =
+ limits.max_payload_len - limits.single_packet_reduction_len;
+ } else if (i == 0) {
+ packet = std::move(first_packet);
+ expected_payload_capacity =
+ limits.max_payload_len - limits.first_packet_reduction_len;
+ } else if (i == num_packets - 1) {
+ packet = std::move(last_packet);
+ expected_payload_capacity =
+ limits.max_payload_len - limits.last_packet_reduction_len;
+ } else {
+ packet = std::make_unique<RtpPacketToSend>(*middle_packet);
+ expected_payload_capacity = limits.max_payload_len;
+ }
+
+ packet->set_first_packet_of_frame(i == 0);
+
+ if (!packetizer->NextPacket(packet.get()))
+ return false;
+ RTC_DCHECK_LE(packet->payload_size(), expected_payload_capacity);
+
+ packet->set_allow_retransmission(allow_retransmission);
+ packet->set_is_key_frame(video_header.frame_type ==
+ VideoFrameType::kVideoFrameKey);
+
+ // Put packetization finish timestamp into extension.
+ if (packet->HasExtension<VideoTimingExtension>()) {
+ packet->set_packetization_finish_time(clock_->CurrentTime());
+ }
+
+ packet->set_fec_protect_packet(use_fec);
+
+ if (red_enabled()) {
+ // TODO(sprang): Consider packetizing directly into packets with the RED
+ // header already in place, to avoid this copy.
+ std::unique_ptr<RtpPacketToSend> red_packet(new RtpPacketToSend(*packet));
+ BuildRedPayload(*packet, red_packet.get());
+ red_packet->SetPayloadType(*red_payload_type_);
+ red_packet->set_is_red(true);
+
+ // Append `red_packet` instead of `packet` to output.
+ red_packet->set_packet_type(RtpPacketMediaType::kVideo);
+ red_packet->set_allow_retransmission(packet->allow_retransmission());
+ rtp_packets.emplace_back(std::move(red_packet));
+ } else {
+ packet->set_packet_type(RtpPacketMediaType::kVideo);
+ rtp_packets.emplace_back(std::move(packet));
+ }
+
+ if (first_frame) {
+ if (i == 0) {
+ RTC_LOG(LS_INFO)
+ << "Sent first RTP packet of the first video frame (pre-pacer)";
+ }
+ if (i == num_packets - 1) {
+ RTC_LOG(LS_INFO)
+ << "Sent last RTP packet of the first video frame (pre-pacer)";
+ }
+ }
+ }
+
+ LogAndSendToNetwork(std::move(rtp_packets), payload.size());
+
+ // Update details about the last sent frame.
+ last_rotation_ = video_header.rotation;
+
+ if (video_header.color_space != last_color_space_) {
+ last_color_space_ = video_header.color_space;
+ transmit_color_space_next_frame_ = !IsBaseLayer(video_header);
+ } else {
+ transmit_color_space_next_frame_ =
+ transmit_color_space_next_frame_ ? !IsBaseLayer(video_header) : false;
+ }
+
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey ||
+ PacketWillLikelyBeRequestedForRestransmitionIfLost(video_header)) {
+ // This frame will likely be delivered, no need to populate playout
+ // delay extensions until it changes again.
+ playout_delay_pending_ = false;
+ if (send_allocation_ == SendVideoLayersAllocation::kSendWithResolution) {
+ last_full_sent_allocation_ = allocation_;
+ }
+ send_allocation_ = SendVideoLayersAllocation::kDontSend;
+ }
+
+ TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms, "timestamp",
+ rtp_timestamp);
+ return true;
+}
+
+bool RTPSenderVideo::SendEncodedImage(
+ int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ const EncodedImage& encoded_image,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms) {
+ if (frame_transformer_delegate_) {
+ // The frame will be sent async once transformed.
+ return frame_transformer_delegate_->TransformFrame(
+ payload_type, codec_type, rtp_timestamp, encoded_image, video_header,
+ expected_retransmission_time_ms);
+ }
+ return SendVideo(payload_type, codec_type, rtp_timestamp,
+ encoded_image.capture_time_ms_, encoded_image, video_header,
+ expected_retransmission_time_ms);
+}
+
+uint32_t RTPSenderVideo::PacketizationOverheadBps() const {
+ MutexLock lock(&stats_mutex_);
+ return packetization_overhead_bitrate_.Rate(clock_->TimeInMilliseconds())
+ .value_or(0);
+}
+
+bool RTPSenderVideo::AllowRetransmission(
+ uint8_t temporal_id,
+ int32_t retransmission_settings,
+ int64_t expected_retransmission_time_ms) {
+ if (retransmission_settings == kRetransmitOff)
+ return false;
+
+ MutexLock lock(&stats_mutex_);
+ // Media packet storage.
+ if ((retransmission_settings & kConditionallyRetransmitHigherLayers) &&
+ UpdateConditionalRetransmit(temporal_id,
+ expected_retransmission_time_ms)) {
+ retransmission_settings |= kRetransmitHigherLayers;
+ }
+
+ if (temporal_id == kNoTemporalIdx)
+ return true;
+
+ if ((retransmission_settings & kRetransmitBaseLayer) && temporal_id == 0)
+ return true;
+
+ if ((retransmission_settings & kRetransmitHigherLayers) && temporal_id > 0)
+ return true;
+
+ return false;
+}
+
+uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
+ struct TemporalIdGetter {
+ uint8_t operator()(const RTPVideoHeaderVP8& vp8) { return vp8.temporalIdx; }
+ uint8_t operator()(const RTPVideoHeaderVP9& vp9) {
+ return vp9.temporal_idx;
+ }
+ uint8_t operator()(const RTPVideoHeaderH264&) { return kNoTemporalIdx; }
+ uint8_t operator()(const RTPVideoHeaderLegacyGeneric&) {
+ return kNoTemporalIdx;
+ }
+ uint8_t operator()(const absl::monostate&) { return kNoTemporalIdx; }
+ };
+ return absl::visit(TemporalIdGetter(), header.video_type_header);
+}
+
+bool RTPSenderVideo::UpdateConditionalRetransmit(
+ uint8_t temporal_id,
+ int64_t expected_retransmission_time_ms) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ // Update stats for any temporal layer.
+ TemporalLayerStats* current_layer_stats =
+ &frame_stats_by_temporal_layer_[temporal_id];
+ current_layer_stats->frame_rate_fp1000s.Update(1, now_ms);
+ int64_t tl_frame_interval = now_ms - current_layer_stats->last_frame_time_ms;
+ current_layer_stats->last_frame_time_ms = now_ms;
+
+ // Conditional retransmit only applies to upper layers.
+ if (temporal_id != kNoTemporalIdx && temporal_id > 0) {
+ if (tl_frame_interval >= kMaxUnretransmittableFrameIntervalMs) {
+ // Too long since a retransmittable frame in this layer, enable NACK
+ // protection.
+ return true;
+ } else {
+ // Estimate when the next frame of any lower layer will be sent.
+ const int64_t kUndefined = std::numeric_limits<int64_t>::max();
+ int64_t expected_next_frame_time = kUndefined;
+ for (int i = temporal_id - 1; i >= 0; --i) {
+ TemporalLayerStats* stats = &frame_stats_by_temporal_layer_[i];
+ absl::optional<uint32_t> rate = stats->frame_rate_fp1000s.Rate(now_ms);
+ if (rate) {
+ int64_t tl_next = stats->last_frame_time_ms + 1000000 / *rate;
+ if (tl_next - now_ms > -expected_retransmission_time_ms &&
+ tl_next < expected_next_frame_time) {
+ expected_next_frame_time = tl_next;
+ }
+ }
+ }
+
+ if (expected_next_frame_time == kUndefined ||
+ expected_next_frame_time - now_ms > expected_retransmission_time_ms) {
+ // The next frame in a lower layer is expected at a later time (or
+ // unable to tell due to lack of data) than a retransmission is
+ // estimated to be able to arrive, so allow this packet to be nacked.
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+void RTPSenderVideo::MaybeUpdateCurrentPlayoutDelay(
+ const RTPVideoHeader& header) {
+ VideoPlayoutDelay requested_delay =
+ forced_playout_delay_.value_or(header.playout_delay);
+
+ if (IsNoopDelay(requested_delay)) {
+ return;
+ }
+
+ if (requested_delay.min_ms > PlayoutDelayLimits::kMaxMs ||
+ requested_delay.max_ms > PlayoutDelayLimits::kMaxMs) {
+ RTC_DLOG(LS_ERROR)
+ << "Requested playout delay values out of range, ignored";
+ return;
+ }
+ if (requested_delay.max_ms != -1 &&
+ requested_delay.min_ms > requested_delay.max_ms) {
+ RTC_DLOG(LS_ERROR) << "Requested playout delay values out of order";
+ return;
+ }
+
+ if (!playout_delay_pending_) {
+ current_playout_delay_ = requested_delay;
+ playout_delay_pending_ = true;
+ return;
+ }
+
+ if ((requested_delay.min_ms == -1 ||
+ requested_delay.min_ms == current_playout_delay_.min_ms) &&
+ (requested_delay.max_ms == -1 ||
+ requested_delay.max_ms == current_playout_delay_.max_ms)) {
+ // No change, ignore.
+ return;
+ }
+
+ if (requested_delay.min_ms == -1) {
+ RTC_DCHECK_GE(requested_delay.max_ms, 0);
+ requested_delay.min_ms =
+ std::min(current_playout_delay_.min_ms, requested_delay.max_ms);
+ }
+ if (requested_delay.max_ms == -1) {
+ requested_delay.max_ms =
+ std::max(current_playout_delay_.max_ms, requested_delay.min_ms);
+ }
+
+ current_playout_delay_ = requested_delay;
+ playout_delay_pending_ = true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.h
new file mode 100644
index 0000000000..206fcab14f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/frame_transformer_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_codec_type.h"
+#include "api/video/video_frame_type.h"
+#include "api/video/video_layers_allocation.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
+#include "modules/rtp_rtcp/source/active_decode_targets_helper.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_fec_generator.h"
+#include "rtc_base/one_time_event.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class FrameEncryptorInterface;
+class RtpPacketizer;
+class RtpPacketToSend;
+
+// kConditionallyRetransmitHigherLayers allows retransmission of video frames
+// in higher layers if either the last frame in that layer was too far back in
+// time, or if we estimate that a new frame will be available in a lower layer
+// in a shorter time than it would take to request and receive a retransmission.
+enum RetransmissionMode : uint8_t {
+ kRetransmitOff = 0x0,
+ kRetransmitBaseLayer = 0x2,
+ kRetransmitHigherLayers = 0x4,
+ kRetransmitAllLayers = 0x6,
+ kConditionallyRetransmitHigherLayers = 0x8
+};
+
+class RTPSenderVideo {
+ public:
+ static constexpr int64_t kTLRateWindowSizeMs = 2500;
+
+ struct Config {
+ Config() = default;
+ Config(const Config&) = delete;
+ Config(Config&&) = default;
+
+ // All members of this struct, with the exception of `field_trials`, are
+ // expected to outlive the RTPSenderVideo object they are passed to.
+ Clock* clock = nullptr;
+ RTPSender* rtp_sender = nullptr;
+ // Some FEC data is duplicated here in preparation of moving FEC to
+ // the egress stage.
+ absl::optional<VideoFecGenerator::FecType> fec_type;
+ size_t fec_overhead_bytes = 0; // Per packet max FEC overhead.
+ FrameEncryptorInterface* frame_encryptor = nullptr;
+ bool require_frame_encryption = false;
+ bool enable_retransmit_all_layers = false;
+ absl::optional<int> red_payload_type;
+ const FieldTrialsView* field_trials = nullptr;
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer;
+ TaskQueueBase* send_transport_queue = nullptr;
+ };
+
+ explicit RTPSenderVideo(const Config& config);
+
+ virtual ~RTPSenderVideo();
+
+ // expected_retransmission_time_ms.has_value() -> retransmission allowed.
+ // `capture_time_ms` and `clock::CurrentTime` should be using the same epoch.
+ // Calls to this method are assumed to be externally serialized.
+ bool SendVideo(int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ int64_t capture_time_ms,
+ rtc::ArrayView<const uint8_t> payload,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms);
+
+ bool SendEncodedImage(
+ int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ const EncodedImage& encoded_image,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms);
+
+ // Configures video structures produced by encoder to send using the
+ // dependency descriptor rtp header extension. Next call to SendVideo should
+ // have video_header.frame_type == kVideoFrameKey.
+ // All calls to SendVideo after this call must use video_header compatible
+ // with the video_structure.
+ void SetVideoStructure(const FrameDependencyStructure* video_structure);
+ // Should only be used by a RTPSenderVideoFrameTransformerDelegate and exists
+ // to ensure correct syncronization.
+ void SetVideoStructureAfterTransformation(
+ const FrameDependencyStructure* video_structure);
+
+ // Sets current active VideoLayersAllocation. The allocation will be sent
+ // using the rtp video layers allocation extension. The allocation will be
+ // sent in full on every key frame. The allocation will be sent once on a
+ // none discardable delta frame per call to this method and will not contain
+ // resolution and frame rate.
+ void SetVideoLayersAllocation(VideoLayersAllocation allocation);
+ // Should only be used by a RTPSenderVideoFrameTransformerDelegate and exists
+ // to ensure correct syncronization.
+ void SetVideoLayersAllocationAfterTransformation(
+ VideoLayersAllocation allocation);
+
+ // Returns the current packetization overhead rate, in bps. Note that this is
+ // the payload overhead, eg the VP8 payload headers, not the RTP headers
+ // or extension/
+ // TODO(sprang): Consider moving this to RtpSenderEgress so it's in the same
+ // place as the other rate stats.
+ uint32_t PacketizationOverheadBps() const;
+
+ protected:
+ static uint8_t GetTemporalId(const RTPVideoHeader& header);
+ bool AllowRetransmission(uint8_t temporal_id,
+ int32_t retransmission_settings,
+ int64_t expected_retransmission_time_ms);
+
+ private:
+ struct TemporalLayerStats {
+ TemporalLayerStats()
+ : frame_rate_fp1000s(kTLRateWindowSizeMs, 1000 * 1000),
+ last_frame_time_ms(0) {}
+ // Frame rate, in frames per 1000 seconds. This essentially turns the fps
+ // value into a fixed point value with three decimals. Improves precision at
+ // low frame rates.
+ RateStatistics frame_rate_fp1000s;
+ int64_t last_frame_time_ms;
+ };
+
+ enum class SendVideoLayersAllocation {
+ kSendWithResolution,
+ kSendWithoutResolution,
+ kDontSend
+ };
+
+ void SetVideoStructureInternal(
+ const FrameDependencyStructure* video_structure);
+ void SetVideoLayersAllocationInternal(VideoLayersAllocation allocation);
+
+ void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
+ bool first_packet,
+ bool last_packet,
+ RtpPacketToSend* packet) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(send_checker_);
+
+ size_t FecPacketOverhead() const RTC_EXCLUSIVE_LOCKS_REQUIRED(send_checker_);
+
+ void LogAndSendToNetwork(
+ std::vector<std::unique_ptr<RtpPacketToSend>> packets,
+ size_t unpacketized_payload_size);
+
+ bool red_enabled() const { return red_payload_type_.has_value(); }
+
+ bool UpdateConditionalRetransmit(uint8_t temporal_id,
+ int64_t expected_retransmission_time_ms)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(stats_mutex_);
+
+ void MaybeUpdateCurrentPlayoutDelay(const RTPVideoHeader& header)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(send_checker_);
+
+ RTPSender* const rtp_sender_;
+ Clock* const clock_;
+
+ const int32_t retransmission_settings_;
+
+ // These members should only be accessed from within SendVideo() to avoid
+ // potential race conditions.
+ rtc::RaceChecker send_checker_;
+ VideoRotation last_rotation_ RTC_GUARDED_BY(send_checker_);
+ absl::optional<ColorSpace> last_color_space_ RTC_GUARDED_BY(send_checker_);
+ bool transmit_color_space_next_frame_ RTC_GUARDED_BY(send_checker_);
+ std::unique_ptr<FrameDependencyStructure> video_structure_
+ RTC_GUARDED_BY(send_checker_);
+ absl::optional<VideoLayersAllocation> allocation_
+ RTC_GUARDED_BY(send_checker_);
+ // Flag indicating if we should send `allocation_`.
+ SendVideoLayersAllocation send_allocation_ RTC_GUARDED_BY(send_checker_);
+ absl::optional<VideoLayersAllocation> last_full_sent_allocation_
+ RTC_GUARDED_BY(send_checker_);
+
+ // Current target playout delay.
+ VideoPlayoutDelay current_playout_delay_ RTC_GUARDED_BY(send_checker_);
+ // Flag indicating if we need to send `current_playout_delay_` in order
+ // to guarantee it gets delivered.
+ bool playout_delay_pending_;
+ // Set by the field trial WebRTC-ForceSendPlayoutDelay to override the playout
+ // delay of outgoing video frames.
+ const absl::optional<VideoPlayoutDelay> forced_playout_delay_;
+
+ // Should never be held when calling out of this class.
+ Mutex mutex_;
+
+ const absl::optional<int> red_payload_type_;
+ absl::optional<VideoFecGenerator::FecType> fec_type_;
+ const size_t fec_overhead_bytes_; // Per packet max FEC overhead.
+
+ mutable Mutex stats_mutex_;
+ RateStatistics packetization_overhead_bitrate_ RTC_GUARDED_BY(stats_mutex_);
+
+ std::map<int, TemporalLayerStats> frame_stats_by_temporal_layer_
+ RTC_GUARDED_BY(stats_mutex_);
+
+ OneTimeEvent first_frame_sent_;
+
+ // E2EE Custom Video Frame Encryptor (optional)
+ FrameEncryptorInterface* const frame_encryptor_ = nullptr;
+ // If set to true will require all outgoing frames to pass through an
+ // initialized frame_encryptor_ before being sent out of the network.
+ // Otherwise these payloads will be dropped.
+ const bool require_frame_encryption_;
+ // Set to true if the generic descriptor should be authenticated.
+ const bool generic_descriptor_auth_experiment_;
+
+ AbsoluteCaptureTimeSender absolute_capture_time_sender_;
+ // Tracks updates to the active decode targets and decides when active decode
+ // targets bitmask should be attached to the dependency descriptor.
+ ActiveDecodeTargetsHelper active_decode_targets_tracker_;
+
+ const rtc::scoped_refptr<RTPSenderVideoFrameTransformerDelegate>
+ frame_transformer_delegate_;
+
+ const bool include_capture_clock_offset_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
new file mode 100644
index 0000000000..3a9f13ef6c
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+
+namespace webrtc {
+namespace {
+
+class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
+ public:
+ TransformableVideoSenderFrame(
+ const EncodedImage& encoded_image,
+ const RTPVideoHeader& video_header,
+ int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ absl::optional<int64_t> expected_retransmission_time_ms,
+ uint32_t ssrc)
+ : encoded_data_(encoded_image.GetEncodedData()),
+ header_(video_header),
+ metadata_(header_),
+ frame_type_(encoded_image._frameType),
+ payload_type_(payload_type),
+ codec_type_(codec_type),
+ timestamp_(rtp_timestamp),
+ capture_time_ms_(encoded_image.capture_time_ms_),
+ expected_retransmission_time_ms_(expected_retransmission_time_ms),
+ ssrc_(ssrc) {
+ RTC_DCHECK_GE(payload_type_, 0);
+ RTC_DCHECK_LE(payload_type_, 127);
+ }
+
+ ~TransformableVideoSenderFrame() override = default;
+
+ // Implements TransformableVideoFrameInterface.
+ rtc::ArrayView<const uint8_t> GetData() const override {
+ return *encoded_data_;
+ }
+
+ void SetData(rtc::ArrayView<const uint8_t> data) override {
+ encoded_data_ = EncodedImageBuffer::Create(data.data(), data.size());
+ }
+
+ uint32_t GetTimestamp() const override { return timestamp_; }
+ uint32_t GetSsrc() const override { return ssrc_; }
+
+ bool IsKeyFrame() const override {
+ return frame_type_ == VideoFrameType::kVideoFrameKey;
+ }
+
+ std::vector<uint8_t> GetAdditionalData() const override {
+ return RtpDescriptorAuthentication(header_);
+ }
+
+ const VideoFrameMetadata& GetMetadata() const override { return metadata_; }
+
+ const RTPVideoHeader& GetHeader() const { return header_; }
+ uint8_t GetPayloadType() const override { return payload_type_; }
+ absl::optional<VideoCodecType> GetCodecType() const { return codec_type_; }
+ int64_t GetCaptureTimeMs() const { return capture_time_ms_; }
+
+ const absl::optional<int64_t>& GetExpectedRetransmissionTimeMs() const {
+ return expected_retransmission_time_ms_;
+ }
+
+ Direction GetDirection() const override { return Direction::kSender; }
+
+ private:
+ rtc::scoped_refptr<EncodedImageBufferInterface> encoded_data_;
+ const RTPVideoHeader header_;
+ const VideoFrameMetadata metadata_;
+ const VideoFrameType frame_type_;
+ const uint8_t payload_type_;
+ const absl::optional<VideoCodecType> codec_type_ = absl::nullopt;
+ const uint32_t timestamp_;
+ const int64_t capture_time_ms_;
+ const absl::optional<int64_t> expected_retransmission_time_ms_;
+ const uint32_t ssrc_;
+};
+} // namespace
+
+RTPSenderVideoFrameTransformerDelegate::RTPSenderVideoFrameTransformerDelegate(
+ RTPSenderVideo* sender,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ uint32_t ssrc,
+ TaskQueueBase* send_transport_queue)
+ : sender_(sender),
+ frame_transformer_(std::move(frame_transformer)),
+ ssrc_(ssrc),
+ send_transport_queue_(send_transport_queue) {}
+
+void RTPSenderVideoFrameTransformerDelegate::Init() {
+ frame_transformer_->RegisterTransformedFrameSinkCallback(
+ rtc::scoped_refptr<TransformedFrameCallback>(this), ssrc_);
+}
+
+bool RTPSenderVideoFrameTransformerDelegate::TransformFrame(
+ int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ const EncodedImage& encoded_image,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms) {
+ if (!encoder_queue_) {
+ // Save the current task queue to post the transformed frame for sending
+ // once it is transformed. When there is no current task queue, i.e.
+ // encoding is done on an external thread (for example in the case of
+ // hardware encoders), use the send transport queue instead.
+ TaskQueueBase* current = TaskQueueBase::Current();
+ encoder_queue_ = current ? current : send_transport_queue_;
+ }
+ frame_transformer_->Transform(std::make_unique<TransformableVideoSenderFrame>(
+ encoded_image, video_header, payload_type, codec_type, rtp_timestamp,
+ expected_retransmission_time_ms, ssrc_));
+ return true;
+}
+
+void RTPSenderVideoFrameTransformerDelegate::OnTransformedFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) {
+ MutexLock lock(&sender_lock_);
+
+ // The encoder queue normally gets destroyed after the sender;
+ // however, it might still be null by the time a previously queued frame
+ // arrives.
+ if (!sender_ || !encoder_queue_)
+ return;
+ rtc::scoped_refptr<RTPSenderVideoFrameTransformerDelegate> delegate(this);
+ encoder_queue_->PostTask(
+ [delegate = std::move(delegate), frame = std::move(frame)]() mutable {
+ delegate->SendVideo(std::move(frame));
+ });
+}
+
+void RTPSenderVideoFrameTransformerDelegate::SendVideo(
+ std::unique_ptr<TransformableFrameInterface> transformed_frame) const {
+ RTC_CHECK(encoder_queue_->IsCurrent());
+ RTC_CHECK_EQ(transformed_frame->GetDirection(),
+ TransformableFrameInterface::Direction::kSender);
+ MutexLock lock(&sender_lock_);
+ if (!sender_)
+ return;
+ auto* transformed_video_frame =
+ static_cast<TransformableVideoSenderFrame*>(transformed_frame.get());
+ sender_->SendVideo(
+ transformed_video_frame->GetPayloadType(),
+ transformed_video_frame->GetCodecType(),
+ transformed_video_frame->GetTimestamp(),
+ transformed_video_frame->GetCaptureTimeMs(),
+ transformed_video_frame->GetData(),
+ transformed_video_frame->GetHeader(),
+ transformed_video_frame->GetExpectedRetransmissionTimeMs());
+}
+
+void RTPSenderVideoFrameTransformerDelegate::SetVideoStructureUnderLock(
+ const FrameDependencyStructure* video_structure) {
+ MutexLock lock(&sender_lock_);
+ RTC_CHECK(sender_);
+ sender_->SetVideoStructureAfterTransformation(video_structure);
+}
+
+void RTPSenderVideoFrameTransformerDelegate::SetVideoLayersAllocationUnderLock(
+ VideoLayersAllocation allocation) {
+ MutexLock lock(&sender_lock_);
+ RTC_CHECK(sender_);
+ sender_->SetVideoLayersAllocationAfterTransformation(std::move(allocation));
+}
+
+void RTPSenderVideoFrameTransformerDelegate::Reset() {
+ frame_transformer_->UnregisterTransformedFrameSinkCallback(ssrc_);
+ frame_transformer_ = nullptr;
+ {
+ MutexLock lock(&sender_lock_);
+ sender_ = nullptr;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
new file mode 100644
index 0000000000..10d0241455
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_FRAME_TRANSFORMER_DELEGATE_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_FRAME_TRANSFORMER_DELEGATE_H_
+
+#include <memory>
+
+#include "api/frame_transformer_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_layers_allocation.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class RTPSenderVideo;
+
+// Delegates calls to FrameTransformerInterface to transform frames, and to
+// RTPSenderVideo to send the transformed frames. Ensures thread-safe access to
+// the sender.
+class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback {
+ public:
+ RTPSenderVideoFrameTransformerDelegate(
+ RTPSenderVideo* sender,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ uint32_t ssrc,
+ TaskQueueBase* send_transport_queue);
+
+ void Init();
+
+ // Delegates the call to FrameTransformerInterface::TransformFrame.
+ bool TransformFrame(int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ const EncodedImage& encoded_image,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms);
+
+ // Implements TransformedFrameCallback. Can be called on any thread. Posts
+ // the transformed frame to be sent on the `encoder_queue_`.
+ void OnTransformedFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) override;
+
+ // Delegates the call to RTPSendVideo::SendVideo on the `encoder_queue_`.
+ void SendVideo(std::unique_ptr<TransformableFrameInterface> frame) const;
+
+ // Delegates the call to RTPSendVideo::SetVideoStructureAfterTransformation
+ // under `sender_lock_`.
+ void SetVideoStructureUnderLock(
+ const FrameDependencyStructure* video_structure);
+
+ // Delegates the call to
+ // RTPSendVideo::SetVideoLayersAllocationAfterTransformation under
+ // `sender_lock_`.
+ void SetVideoLayersAllocationUnderLock(VideoLayersAllocation allocation);
+
+ // Unregisters and releases the `frame_transformer_` reference, and resets
+ // `sender_` under lock. Called from RTPSenderVideo destructor to prevent the
+ // `sender_` to dangle.
+ void Reset();
+
+ protected:
+ ~RTPSenderVideoFrameTransformerDelegate() override = default;
+
+ private:
+ mutable Mutex sender_lock_;
+ RTPSenderVideo* sender_ RTC_GUARDED_BY(sender_lock_);
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer_;
+ const uint32_t ssrc_;
+ TaskQueueBase* encoder_queue_ = nullptr;
+ TaskQueueBase* send_transport_queue_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_FRAME_TRANSFORMER_DELEGATE_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
new file mode 100644
index 0000000000..3b4347408b
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
@@ -0,0 +1,1571 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "api/rtp_headers.h"
+#include "api/test/mock_frame_encryptor.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_timing.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/rtp_rtcp/source/rtp_format_video_generic.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/thread.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_frame_transformer.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::_;
+using ::testing::ContainerEq;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::ReturnArg;
+using ::testing::SaveArg;
+using ::testing::SizeIs;
+using ::testing::WithArgs;
+
+enum : int { // The first valid value is 1.
+ kAbsoluteSendTimeExtensionId = 1,
+ kGenericDescriptorId,
+ kDependencyDescriptorId,
+ kTransmissionTimeOffsetExtensionId,
+ kTransportSequenceNumberExtensionId,
+ kVideoRotationExtensionId,
+ kVideoTimingExtensionId,
+ kAbsoluteCaptureTimeExtensionId,
+ kPlayoutDelayExtensionId,
+ kVideoLayersAllocationExtensionId,
+};
+
+constexpr int kPayload = 100;
+constexpr VideoCodecType kType = VideoCodecType::kVideoCodecGeneric;
+constexpr uint32_t kTimestamp = 10;
+constexpr uint16_t kSeqNum = 33;
+constexpr uint32_t kSsrc = 725242;
+constexpr int kMaxPacketLength = 1500;
+constexpr uint64_t kStartTime = 123456789;
+constexpr int64_t kDefaultExpectedRetransmissionTimeMs = 125;
+
+class LoopbackTransportTest : public webrtc::Transport {
+ public:
+ LoopbackTransportTest() {
+ receivers_extensions_.Register<TransmissionOffset>(
+ kTransmissionTimeOffsetExtensionId);
+ receivers_extensions_.Register<AbsoluteSendTime>(
+ kAbsoluteSendTimeExtensionId);
+ receivers_extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
+ receivers_extensions_.Register<VideoOrientation>(kVideoRotationExtensionId);
+ receivers_extensions_.Register<VideoTimingExtension>(
+ kVideoTimingExtensionId);
+ receivers_extensions_.Register<RtpGenericFrameDescriptorExtension00>(
+ kGenericDescriptorId);
+ receivers_extensions_.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorId);
+ receivers_extensions_.Register<AbsoluteCaptureTimeExtension>(
+ kAbsoluteCaptureTimeExtensionId);
+ receivers_extensions_.Register<PlayoutDelayLimits>(
+ kPlayoutDelayExtensionId);
+ receivers_extensions_.Register<RtpVideoLayersAllocationExtension>(
+ kVideoLayersAllocationExtensionId);
+ }
+
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
+ sent_packets_.push_back(RtpPacketReceived(&receivers_extensions_));
+ EXPECT_TRUE(sent_packets_.back().Parse(data, len));
+ return true;
+ }
+ bool SendRtcp(const uint8_t* data, size_t len) override { return false; }
+ const RtpPacketReceived& last_sent_packet() { return sent_packets_.back(); }
+ int packets_sent() { return sent_packets_.size(); }
+ const std::vector<RtpPacketReceived>& sent_packets() const {
+ return sent_packets_;
+ }
+
+ private:
+ RtpHeaderExtensionMap receivers_extensions_;
+ std::vector<RtpPacketReceived> sent_packets_;
+};
+
+class TestRtpSenderVideo : public RTPSenderVideo {
+ public:
+ TestRtpSenderVideo(Clock* clock,
+ RTPSender* rtp_sender,
+ const FieldTrialsView& field_trials)
+ : RTPSenderVideo([&] {
+ Config config;
+ config.clock = clock;
+ config.rtp_sender = rtp_sender;
+ config.field_trials = &field_trials;
+ return config;
+ }()) {}
+ ~TestRtpSenderVideo() override {}
+
+ bool AllowRetransmission(const RTPVideoHeader& header,
+ int32_t retransmission_settings,
+ int64_t expected_retransmission_time_ms) {
+ return RTPSenderVideo::AllowRetransmission(GetTemporalId(header),
+ retransmission_settings,
+ expected_retransmission_time_ms);
+ }
+};
+
+class FieldTrials : public FieldTrialsView {
+ public:
+ explicit FieldTrials(bool use_send_side_bwe_with_overhead)
+ : use_send_side_bwe_with_overhead_(use_send_side_bwe_with_overhead),
+ include_capture_clock_offset_(false) {}
+
+ void set_include_capture_clock_offset(bool include_capture_clock_offset) {
+ include_capture_clock_offset_ = include_capture_clock_offset;
+ }
+
+ std::string Lookup(absl::string_view key) const override {
+ if (key == "WebRTC-SendSideBwe-WithOverhead") {
+ return use_send_side_bwe_with_overhead_ ? "Enabled" : "";
+ } else if (key == "WebRTC-IncludeCaptureClockOffset") {
+ return include_capture_clock_offset_ ? "" : "Disabled";
+ }
+ return "";
+ }
+
+ private:
+ bool use_send_side_bwe_with_overhead_;
+ bool include_capture_clock_offset_;
+};
+
+class RtpSenderVideoTest : public ::testing::TestWithParam<bool> {
+ public:
+ RtpSenderVideoTest()
+ : field_trials_(GetParam()),
+ fake_clock_(kStartTime),
+ retransmission_rate_limiter_(&fake_clock_, 1000),
+ rtp_module_(ModuleRtpRtcpImpl2::Create([&] {
+ RtpRtcpInterface::Configuration config;
+ config.clock = &fake_clock_;
+ config.outgoing_transport = &transport_;
+ config.retransmission_rate_limiter = &retransmission_rate_limiter_;
+ config.field_trials = &field_trials_;
+ config.local_media_ssrc = kSsrc;
+ return config;
+ }())),
+ rtp_sender_video_(
+ std::make_unique<TestRtpSenderVideo>(&fake_clock_,
+ rtp_module_->RtpSender(),
+ field_trials_)) {
+ rtp_module_->SetSequenceNumber(kSeqNum);
+ rtp_module_->SetStartTimestamp(0);
+ }
+
+ void UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed(
+ int version);
+
+ protected:
+ rtc::AutoThread main_thread_;
+ const RtpRtcpInterface::Configuration config_;
+ FieldTrials field_trials_;
+ SimulatedClock fake_clock_;
+ LoopbackTransportTest transport_;
+ RateLimiter retransmission_rate_limiter_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_module_;
+ std::unique_ptr<TestRtpSenderVideo> rtp_sender_video_;
+};
+
+TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
+ uint8_t kFrame[kMaxPacketLength];
+ rtp_module_->RegisterRtpHeaderExtension(VideoOrientation::Uri(),
+ kVideoRotationExtensionId);
+
+ RTPVideoHeader hdr;
+ hdr.rotation = kVideoRotation_0;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoRotation rotation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet().GetExtension<VideoOrientation>(&rotation));
+ EXPECT_EQ(kVideoRotation_0, rotation);
+}
+
+TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
+ uint8_t kFrame[kMaxPacketLength];
+ const int64_t kPacketizationTimeMs = 100;
+ const int64_t kEncodeStartDeltaMs = 10;
+ const int64_t kEncodeFinishDeltaMs = 50;
+ rtp_module_->RegisterRtpHeaderExtension(VideoTimingExtension::Uri(),
+ kVideoTimingExtensionId);
+
+ const int64_t kCaptureTimestamp = fake_clock_.TimeInMilliseconds();
+
+ RTPVideoHeader hdr;
+ hdr.video_timing.flags = VideoSendTiming::kTriggeredByTimer;
+ hdr.video_timing.encode_start_delta_ms = kEncodeStartDeltaMs;
+ hdr.video_timing.encode_finish_delta_ms = kEncodeFinishDeltaMs;
+
+ fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
+ kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ VideoSendTiming timing;
+ EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
+ &timing));
+ EXPECT_EQ(kPacketizationTimeMs, timing.packetization_finish_delta_ms);
+ EXPECT_EQ(kEncodeStartDeltaMs, timing.encode_start_delta_ms);
+ EXPECT_EQ(kEncodeFinishDeltaMs, timing.encode_finish_delta_ms);
+}
+
+TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
+ uint8_t kFrame[kMaxPacketLength];
+ rtp_module_->RegisterRtpHeaderExtension(VideoOrientation::Uri(),
+ kVideoRotationExtensionId);
+
+ RTPVideoHeader hdr;
+ hdr.rotation = kVideoRotation_90;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_TRUE(
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs));
+
+ hdr.rotation = kVideoRotation_0;
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_TRUE(
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
+ hdr, kDefaultExpectedRetransmissionTimeMs));
+
+ VideoRotation rotation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet().GetExtension<VideoOrientation>(&rotation));
+ EXPECT_EQ(kVideoRotation_0, rotation);
+}
+
+TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
+ uint8_t kFrame[kMaxPacketLength];
+ rtp_module_->RegisterRtpHeaderExtension(VideoOrientation::Uri(),
+ kVideoRotationExtensionId);
+
+ RTPVideoHeader hdr;
+ hdr.rotation = kVideoRotation_90;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_TRUE(
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs));
+
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_TRUE(
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
+ hdr, kDefaultExpectedRetransmissionTimeMs));
+
+ VideoRotation rotation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet().GetExtension<VideoOrientation>(&rotation));
+ EXPECT_EQ(kVideoRotation_90, rotation);
+}
+
+// Make sure rotation is parsed correctly when the Camera (C) and Flip (F) bits
+// are set in the CVO byte.
+TEST_P(RtpSenderVideoTest, SendVideoWithCameraAndFlipCVO) {
+ // Test extracting rotation when Camera (C) and Flip (F) bits are zero.
+ EXPECT_EQ(kVideoRotation_0, ConvertCVOByteToVideoRotation(0));
+ EXPECT_EQ(kVideoRotation_90, ConvertCVOByteToVideoRotation(1));
+ EXPECT_EQ(kVideoRotation_180, ConvertCVOByteToVideoRotation(2));
+ EXPECT_EQ(kVideoRotation_270, ConvertCVOByteToVideoRotation(3));
+ // Test extracting rotation when Camera (C) and Flip (F) bits are set.
+ const int flip_bit = 1 << 2;
+ const int camera_bit = 1 << 3;
+ EXPECT_EQ(kVideoRotation_0,
+ ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 0));
+ EXPECT_EQ(kVideoRotation_90,
+ ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 1));
+ EXPECT_EQ(kVideoRotation_180,
+ ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 2));
+ EXPECT_EQ(kVideoRotation_270,
+ ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 3));
+}
+
+TEST_P(RtpSenderVideoTest, RetransmissionTypesGeneric) {
+ RTPVideoHeader header;
+ header.codec = kVideoCodecGeneric;
+
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kConditionallyRetransmitHigherLayers,
+ kDefaultExpectedRetransmissionTimeMs));
+}
+
+TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
+ RTPVideoHeader header;
+ header.video_type_header.emplace<RTPVideoHeaderH264>().packetization_mode =
+ H264PacketizationMode::NonInterleaved;
+ header.codec = kVideoCodecH264;
+
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kConditionallyRetransmitHigherLayers,
+ kDefaultExpectedRetransmissionTimeMs));
+}
+
+TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
+ RTPVideoHeader header;
+ header.codec = kVideoCodecVP8;
+ auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.temporalIdx = 0;
+
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers | kRetransmitBaseLayer,
+ kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kConditionallyRetransmitHigherLayers,
+ kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers,
+ kDefaultExpectedRetransmissionTimeMs));
+}
+
+TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) {
+ RTPVideoHeader header;
+ header.codec = kVideoCodecVP8;
+
+ auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
+ vp8_header.temporalIdx = tid;
+
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers | kRetransmitBaseLayer,
+ kDefaultExpectedRetransmissionTimeMs));
+ }
+}
+
+TEST_P(RtpSenderVideoTest, RetransmissionTypesVP9) {
+ RTPVideoHeader header;
+ header.codec = kVideoCodecVP9;
+
+ auto& vp9_header = header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
+ vp9_header.temporal_idx = tid;
+
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
+ header, kRetransmitHigherLayers | kRetransmitBaseLayer,
+ kDefaultExpectedRetransmissionTimeMs));
+ }
+}
+
+TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
+ const int64_t kFrameIntervalMs = 33;
+ const int64_t kRttMs = (kFrameIntervalMs * 3) / 2;
+ const uint8_t kSettings =
+ kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers;
+
+ // Insert VP8 frames for all temporal layers, but stop before the final index.
+ RTPVideoHeader header;
+ header.codec = kVideoCodecVP8;
+
+ // Fill averaging window to prevent rounding errors.
+ constexpr int kNumRepetitions =
+ (RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
+ kFrameIntervalMs;
+ constexpr int kPattern[] = {0, 2, 1, 2};
+ auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
+ vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs);
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ }
+
+ // Since we're at the start of the pattern, the next expected frame in TL0 is
+ // right now. We will wait at most one expected retransmission time before
+ // acknowledging that it did not arrive, which means this frame and the next
+ // will not be retransmitted.
+ vp8_header.temporalIdx = 1;
+ EXPECT_FALSE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ EXPECT_FALSE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+
+ // The TL0 frame did not arrive. So allow retransmission.
+ EXPECT_TRUE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+
+ // Insert a frame for TL2. We just had frame in TL1, so the next one there is
+ // in three frames away. TL0 is still too far in the past. So, allow
+ // retransmission.
+ vp8_header.temporalIdx = 2;
+ EXPECT_TRUE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+
+ // Another TL2, next in TL1 is two frames away. Allow again.
+ EXPECT_TRUE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+
+ // Yet another TL2, next in TL1 is now only one frame away, so don't store
+ // for retransmission.
+ EXPECT_FALSE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+}
+
+TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
+ const int64_t kFrameIntervalMs = 200;
+ const int64_t kRttMs = (kFrameIntervalMs * 3) / 2;
+ const int32_t kSettings =
+ kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers;
+
+ // Insert VP8 frames for all temporal layers, but stop before the final index.
+ RTPVideoHeader header;
+ header.codec = kVideoCodecVP8;
+
+ // Fill averaging window to prevent rounding errors.
+ constexpr int kNumRepetitions =
+ (RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
+ kFrameIntervalMs;
+ constexpr int kPattern[] = {0, 2, 2, 2};
+ auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
+ vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
+
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs);
+ fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
+ }
+
+ // Since we're at the start of the pattern, the next expected frame will be
+ // right now in TL0. Put it in TL1 instead. Regular rules would dictate that
+ // we don't store for retransmission because we expect a frame in a lower
+ // layer, but that last frame in TL1 was a long time ago in absolute terms,
+ // so allow retransmission anyway.
+ vp8_header.temporalIdx = 1;
+ EXPECT_TRUE(
+ rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
+}
+
+TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
+ const int64_t kFrameId = 100000;
+ uint8_t kFrame[100];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpDependencyDescriptorExtension::Uri(), kDependencyDescriptorId);
+ FrameDependencyStructure video_structure;
+ video_structure.num_decode_targets = 2;
+ video_structure.templates = {
+ FrameDependencyTemplate().S(0).T(0).Dtis("SS"),
+ FrameDependencyTemplate().S(1).T(0).Dtis("-S"),
+ FrameDependencyTemplate().S(1).T(1).Dtis("-D"),
+ };
+ rtp_sender_video_->SetVideoStructure(&video_structure);
+
+ // Send key frame.
+ RTPVideoHeader hdr;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ generic.temporal_index = 0;
+ generic.spatial_index = 0;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
+ DecodeTargetIndication::kSwitch};
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ DependencyDescriptor descriptor_key;
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .GetExtension<RtpDependencyDescriptorExtension>(
+ nullptr, &descriptor_key));
+ ASSERT_TRUE(descriptor_key.attached_structure);
+ EXPECT_EQ(descriptor_key.attached_structure->num_decode_targets, 2);
+ EXPECT_THAT(descriptor_key.attached_structure->templates, SizeIs(3));
+ EXPECT_EQ(descriptor_key.frame_number, kFrameId & 0xFFFF);
+ EXPECT_EQ(descriptor_key.frame_dependencies.spatial_id, 0);
+ EXPECT_EQ(descriptor_key.frame_dependencies.temporal_id, 0);
+ EXPECT_EQ(descriptor_key.frame_dependencies.decode_target_indications,
+ generic.decode_target_indications);
+ EXPECT_THAT(descriptor_key.frame_dependencies.frame_diffs, IsEmpty());
+
+ // Send delta frame.
+ generic.frame_id = kFrameId + 1;
+ generic.temporal_index = 1;
+ generic.spatial_index = 1;
+ generic.dependencies = {kFrameId, kFrameId - 500};
+ generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
+ DecodeTargetIndication::kRequired};
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ EXPECT_EQ(transport_.packets_sent(), 2);
+ DependencyDescriptor descriptor_delta;
+ ASSERT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpDependencyDescriptorExtension>(
+ descriptor_key.attached_structure.get(), &descriptor_delta));
+ EXPECT_EQ(descriptor_delta.attached_structure, nullptr);
+ EXPECT_EQ(descriptor_delta.frame_number, (kFrameId + 1) & 0xFFFF);
+ EXPECT_EQ(descriptor_delta.frame_dependencies.spatial_id, 1);
+ EXPECT_EQ(descriptor_delta.frame_dependencies.temporal_id, 1);
+ EXPECT_EQ(descriptor_delta.frame_dependencies.decode_target_indications,
+ generic.decode_target_indications);
+ EXPECT_THAT(descriptor_delta.frame_dependencies.frame_diffs,
+ ElementsAre(1, 501));
+}
+
+TEST_P(RtpSenderVideoTest,
+ SkipsDependencyDescriptorOnDeltaFrameWhenFailedToAttachToKeyFrame) {
+ const int64_t kFrameId = 100000;
+ uint8_t kFrame[100];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpDependencyDescriptorExtension::Uri(), kDependencyDescriptorId);
+ rtp_module_->SetExtmapAllowMixed(false);
+ FrameDependencyStructure video_structure;
+ video_structure.num_decode_targets = 2;
+ // Use many templates so that key dependency descriptor would be too large
+ // to fit into 16 bytes (max size of one byte header rtp header extension)
+ video_structure.templates = {
+ FrameDependencyTemplate().S(0).T(0).Dtis("SS"),
+ FrameDependencyTemplate().S(1).T(0).Dtis("-S"),
+ FrameDependencyTemplate().S(1).T(1).Dtis("-D").FrameDiffs({1, 2, 3, 4}),
+ FrameDependencyTemplate().S(1).T(1).Dtis("-D").FrameDiffs({2, 3, 4, 5}),
+ FrameDependencyTemplate().S(1).T(1).Dtis("-D").FrameDiffs({3, 4, 5, 6}),
+ FrameDependencyTemplate().S(1).T(1).Dtis("-D").FrameDiffs({4, 5, 6, 7}),
+ };
+ rtp_sender_video_->SetVideoStructure(&video_structure);
+
+ // Send key frame.
+ RTPVideoHeader hdr;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ generic.temporal_index = 0;
+ generic.spatial_index = 0;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
+ DecodeTargetIndication::kSwitch};
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ DependencyDescriptor descriptor_key;
+ ASSERT_FALSE(transport_.last_sent_packet()
+ .HasExtension<RtpDependencyDescriptorExtension>());
+
+ // Send delta frame.
+ generic.frame_id = kFrameId + 1;
+ generic.temporal_index = 1;
+ generic.spatial_index = 1;
+ generic.dependencies = {kFrameId, kFrameId - 500};
+ generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
+ DecodeTargetIndication::kRequired};
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ EXPECT_EQ(transport_.packets_sent(), 2);
+ EXPECT_FALSE(transport_.last_sent_packet()
+ .HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) {
+ const int64_t kFrameId = 100000;
+ uint8_t kFrame[100];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpDependencyDescriptorExtension::Uri(), kDependencyDescriptorId);
+ FrameDependencyStructure video_structure;
+ video_structure.num_decode_targets = 2;
+ video_structure.num_chains = 1;
+ video_structure.decode_target_protected_by_chain = {0, 0};
+ video_structure.templates = {
+ FrameDependencyTemplate().S(0).T(0).Dtis("SS").ChainDiffs({1}),
+ };
+ rtp_sender_video_->SetVideoStructure(&video_structure);
+
+ RTPVideoHeader hdr;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
+ DecodeTargetIndication::kSwitch};
+ generic.chain_diffs = {2};
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ DependencyDescriptor descriptor_key;
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .GetExtension<RtpDependencyDescriptorExtension>(
+ nullptr, &descriptor_key));
+ EXPECT_THAT(descriptor_key.frame_dependencies.chain_diffs,
+ ContainerEq(generic.chain_diffs));
+}
+
+TEST_P(RtpSenderVideoTest,
+ PropagatesActiveDecodeTargetsIntoDependencyDescriptor) {
+ const int64_t kFrameId = 100000;
+ uint8_t kFrame[100];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpDependencyDescriptorExtension::Uri(), kDependencyDescriptorId);
+ FrameDependencyStructure video_structure;
+ video_structure.num_decode_targets = 2;
+ video_structure.num_chains = 1;
+ video_structure.decode_target_protected_by_chain = {0, 0};
+ video_structure.templates = {
+ FrameDependencyTemplate().S(0).T(0).Dtis("SS").ChainDiffs({1}),
+ };
+ rtp_sender_video_->SetVideoStructure(&video_structure);
+
+ RTPVideoHeader hdr;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
+ DecodeTargetIndication::kSwitch};
+ generic.active_decode_targets = 0b01;
+ generic.chain_diffs = {1};
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ DependencyDescriptor descriptor_key;
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .GetExtension<RtpDependencyDescriptorExtension>(
+ nullptr, &descriptor_key));
+ EXPECT_EQ(descriptor_key.active_decode_targets_bitmask, 0b01u);
+}
+
+TEST_P(RtpSenderVideoTest,
+ SetDiffentVideoStructureAvoidsCollisionWithThePreviousStructure) {
+ const int64_t kFrameId = 100000;
+ uint8_t kFrame[100];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpDependencyDescriptorExtension::Uri(), kDependencyDescriptorId);
+ FrameDependencyStructure video_structure1;
+ video_structure1.num_decode_targets = 2;
+ video_structure1.templates = {
+ FrameDependencyTemplate().S(0).T(0).Dtis("SS"),
+ FrameDependencyTemplate().S(0).T(1).Dtis("D-"),
+ };
+ FrameDependencyStructure video_structure2;
+ video_structure2.num_decode_targets = 2;
+ video_structure2.templates = {
+ FrameDependencyTemplate().S(0).T(0).Dtis("SS"),
+ FrameDependencyTemplate().S(0).T(1).Dtis("R-"),
+ };
+
+ // Send 1st key frame.
+ RTPVideoHeader hdr;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
+ DecodeTargetIndication::kSwitch};
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SetVideoStructure(&video_structure1);
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ // Parse 1st extension.
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ DependencyDescriptor descriptor_key1;
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .GetExtension<RtpDependencyDescriptorExtension>(
+ nullptr, &descriptor_key1));
+ ASSERT_TRUE(descriptor_key1.attached_structure);
+
+ // Send the delta frame.
+ generic.frame_id = kFrameId + 1;
+ generic.temporal_index = 1;
+ generic.decode_target_indications = {DecodeTargetIndication::kDiscardable,
+ DecodeTargetIndication::kNotPresent};
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ ASSERT_EQ(transport_.packets_sent(), 2);
+ RtpPacket delta_packet = transport_.last_sent_packet();
+
+ // Send 2nd key frame.
+ generic.frame_id = kFrameId + 2;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
+ DecodeTargetIndication::kSwitch};
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SetVideoStructure(&video_structure2);
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ // Parse the 2nd key frame.
+ ASSERT_EQ(transport_.packets_sent(), 3);
+ DependencyDescriptor descriptor_key2;
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .GetExtension<RtpDependencyDescriptorExtension>(
+ nullptr, &descriptor_key2));
+ ASSERT_TRUE(descriptor_key2.attached_structure);
+
+ // Try to parse the 1st delta frame. It should parseble using the structure
+ // from the 1st key frame, but not using the structure from the 2nd key frame.
+ DependencyDescriptor descriptor_delta;
+ EXPECT_TRUE(delta_packet.GetExtension<RtpDependencyDescriptorExtension>(
+ descriptor_key1.attached_structure.get(), &descriptor_delta));
+ EXPECT_FALSE(delta_packet.GetExtension<RtpDependencyDescriptorExtension>(
+ descriptor_key2.attached_structure.get(), &descriptor_delta));
+}
+
+TEST_P(RtpSenderVideoTest,
+ AuthenticateVideoHeaderWhenDependencyDescriptorExtensionIsUsed) {
+ static constexpr size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize] = {1, 2, 3, 4};
+
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpDependencyDescriptorExtension::Uri(), kDependencyDescriptorId);
+ auto encryptor = rtc::make_ref_counted<NiceMock<MockFrameEncryptor>>();
+ ON_CALL(*encryptor, GetMaxCiphertextByteSize).WillByDefault(ReturnArg<1>());
+ ON_CALL(*encryptor, Encrypt)
+ .WillByDefault(WithArgs<3, 5>(
+ [](rtc::ArrayView<const uint8_t> frame, size_t* bytes_written) {
+ *bytes_written = frame.size();
+ return 0;
+ }));
+ RTPSenderVideo::Config config;
+ config.clock = &fake_clock_;
+ config.rtp_sender = rtp_module_->RtpSender();
+ config.field_trials = &field_trials_;
+ config.frame_encryptor = encryptor.get();
+ RTPSenderVideo rtp_sender_video(config);
+
+ FrameDependencyStructure video_structure;
+ video_structure.num_decode_targets = 1;
+ video_structure.templates = {FrameDependencyTemplate().Dtis("S")};
+ rtp_sender_video.SetVideoStructure(&video_structure);
+
+ // Send key frame.
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ hdr.generic.emplace().decode_target_indications =
+ video_structure.templates[0].decode_target_indications;
+
+ EXPECT_CALL(*encryptor,
+ Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _));
+ rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ // Double check packet with the dependency descriptor is sent.
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ EXPECT_TRUE(transport_.last_sent_packet()
+ .HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST_P(RtpSenderVideoTest, PopulateGenericFrameDescriptor) {
+ const int64_t kFrameId = 100000;
+ uint8_t kFrame[100];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpGenericFrameDescriptorExtension00::Uri(), kGenericDescriptorId);
+
+ RTPVideoHeader hdr;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ generic.temporal_index = 3;
+ generic.spatial_index = 2;
+ generic.dependencies.push_back(kFrameId - 1);
+ generic.dependencies.push_back(kFrameId - 500);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ RtpGenericFrameDescriptor descriptor_wire;
+ EXPECT_EQ(1, transport_.packets_sent());
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .GetExtension<RtpGenericFrameDescriptorExtension00>(
+ &descriptor_wire));
+ EXPECT_EQ(static_cast<uint16_t>(generic.frame_id), descriptor_wire.FrameId());
+ EXPECT_EQ(generic.temporal_index, descriptor_wire.TemporalLayer());
+ EXPECT_THAT(descriptor_wire.FrameDependenciesDiffs(), ElementsAre(1, 500));
+ EXPECT_EQ(descriptor_wire.SpatialLayersBitmask(), 0b0000'0100);
+}
+
+void RtpSenderVideoTest::
+ UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed(
+ int version) {
+ const int64_t kFrameId = 100000;
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpGenericFrameDescriptorExtension00::Uri(), kGenericDescriptorId);
+
+ RTPVideoHeader hdr;
+ hdr.codec = kVideoCodecVP8;
+ RTPVideoHeaderVP8& vp8 = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8.pictureId = kFrameId % 0X7FFF;
+ vp8.tl0PicIdx = 13;
+ vp8.temporalIdx = 1;
+ vp8.keyIdx = 2;
+ RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
+ generic.frame_id = kFrameId;
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
+ kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ ASSERT_EQ(transport_.packets_sent(), 1);
+ // Expect only minimal 1-byte vp8 descriptor was generated.
+ EXPECT_EQ(transport_.last_sent_packet().payload_size(), 1 + kFrameSize);
+}
+
+TEST_P(RtpSenderVideoTest,
+ UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed00) {
+ UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed(0);
+}
+
+TEST_P(RtpSenderVideoTest,
+ UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed01) {
+ UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed(1);
+}
+
+TEST_P(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.resolution_and_frame_rate_is_valid = true;
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoLayersAllocation sent_allocation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+ EXPECT_THAT(sent_allocation.active_spatial_layers, ElementsAre(layer));
+
+ // Next key frame also have the allocation.
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+}
+
+TEST_P(RtpSenderVideoTest,
+ VideoLayersAllocationWithoutResolutionSentOnDeltaWhenUpdated) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ allocation.resolution_and_frame_rate_is_valid = true;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_TRUE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // No allocation sent on delta frame unless it has been updated.
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_FALSE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // Update the allocation.
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoLayersAllocation sent_allocation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+ ASSERT_THAT(sent_allocation.active_spatial_layers, SizeIs(1));
+ EXPECT_FALSE(sent_allocation.resolution_and_frame_rate_is_valid);
+ EXPECT_THAT(sent_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(1));
+}
+
+TEST_P(RtpSenderVideoTest,
+ VideoLayersAllocationWithResolutionSentOnDeltaWhenSpatialLayerAdded) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ allocation.resolution_and_frame_rate_is_valid = true;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ layer.spatial_id = 0;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // Update the allocation.
+ layer.width = 640;
+ layer.height = 320;
+ layer.spatial_id = 1;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(100));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoLayersAllocation sent_allocation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+ EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_TRUE(sent_allocation.resolution_and_frame_rate_is_valid);
+}
+
+TEST_P(RtpSenderVideoTest,
+ VideoLayersAllocationWithResolutionSentOnLargeFrameRateChange) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ allocation.resolution_and_frame_rate_is_valid = true;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ layer.spatial_id = 0;
+ layer.frame_rate_fps = 10;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // Update frame rate only.
+ allocation.active_spatial_layers[0].frame_rate_fps = 20;
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoLayersAllocation sent_allocation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+ ASSERT_TRUE(sent_allocation.resolution_and_frame_rate_is_valid);
+ EXPECT_EQ(sent_allocation.active_spatial_layers[0].frame_rate_fps, 20);
+}
+
+TEST_P(RtpSenderVideoTest,
+ VideoLayersAllocationWithoutResolutionSentOnSmallFrameRateChange) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ allocation.resolution_and_frame_rate_is_valid = true;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ layer.spatial_id = 0;
+ layer.frame_rate_fps = 10;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ ASSERT_TRUE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // Update frame rate slightly.
+ allocation.active_spatial_layers[0].frame_rate_fps = 9;
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoLayersAllocation sent_allocation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+ EXPECT_FALSE(sent_allocation.resolution_and_frame_rate_is_valid);
+}
+
+TEST_P(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ VideoLayersAllocation sent_allocation;
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+ EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(1));
+
+ // VideoLayersAllocation not sent on the next delta frame.
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_FALSE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // Update allocation. VideoLayesAllocation should be sent on the next frame.
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_TRUE(
+ transport_.last_sent_packet()
+ .GetExtension<RtpVideoLayersAllocationExtension>(&sent_allocation));
+}
+
+TEST_P(RtpSenderVideoTest, VideoLayersAllocationNotSentOnHigherTemporalLayers) {
+ const size_t kFrameSize = 100;
+ uint8_t kFrame[kFrameSize];
+ rtp_module_->RegisterRtpHeaderExtension(
+ RtpVideoLayersAllocationExtension::Uri(),
+ kVideoLayersAllocationExtensionId);
+
+ VideoLayersAllocation allocation;
+ allocation.resolution_and_frame_rate_is_valid = true;
+ VideoLayersAllocation::SpatialLayer layer;
+ layer.width = 360;
+ layer.height = 180;
+ layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::KilobitsPerSec(50));
+ allocation.active_spatial_layers.push_back(layer);
+ rtp_sender_video_->SetVideoLayersAllocation(allocation);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ hdr.codec = VideoCodecType::kVideoCodecVP8;
+ auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.temporalIdx = 1;
+
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_FALSE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+
+ // Send a delta frame on tl0.
+ vp8_header.temporalIdx = 0;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_TRUE(transport_.last_sent_packet()
+ .HasExtension<RtpVideoLayersAllocationExtension>());
+}
+
+TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) {
+ constexpr int64_t kAbsoluteCaptureTimestampMs = 12345678;
+ uint8_t kFrame[kMaxPacketLength];
+ rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(),
+ kAbsoluteCaptureTimeExtensionId);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
+ kAbsoluteCaptureTimestampMs, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ absl::optional<AbsoluteCaptureTime> absolute_capture_time;
+
+ // It is expected that one and only one of the packets sent on this video
+ // frame has absolute capture time header extension.
+ for (const RtpPacketReceived& packet : transport_.sent_packets()) {
+ if (absolute_capture_time.has_value()) {
+ EXPECT_FALSE(packet.HasExtension<AbsoluteCaptureTimeExtension>());
+ } else {
+ absolute_capture_time =
+ packet.GetExtension<AbsoluteCaptureTimeExtension>();
+ }
+ }
+
+ // Verify the capture timestamp and that the clock offset is not set.
+ ASSERT_TRUE(absolute_capture_time.has_value());
+ EXPECT_EQ(
+ absolute_capture_time->absolute_capture_timestamp,
+ Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds(
+ kAbsoluteCaptureTimestampMs)));
+ EXPECT_FALSE(
+ absolute_capture_time->estimated_capture_clock_offset.has_value());
+}
+
+// Essentially the same test as AbsoluteCaptureTime but with a field trial.
+// After the field trial is experimented, we will remove AbsoluteCaptureTime.
+TEST_P(RtpSenderVideoTest, AbsoluteCaptureTimeWithCaptureClockOffset) {
+ field_trials_.set_include_capture_clock_offset(true);
+ rtp_sender_video_ = std::make_unique<TestRtpSenderVideo>(
+ &fake_clock_, rtp_module_->RtpSender(), field_trials_);
+
+ constexpr int64_t kAbsoluteCaptureTimestampMs = 12345678;
+ uint8_t kFrame[kMaxPacketLength];
+ rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(),
+ kAbsoluteCaptureTimeExtensionId);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
+ kAbsoluteCaptureTimestampMs, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ absl::optional<AbsoluteCaptureTime> absolute_capture_time;
+
+ // It is expected that one and only one of the packets sent on this video
+ // frame has absolute capture time header extension.
+ for (const RtpPacketReceived& packet : transport_.sent_packets()) {
+ if (absolute_capture_time.has_value()) {
+ EXPECT_FALSE(packet.HasExtension<AbsoluteCaptureTimeExtension>());
+ } else {
+ absolute_capture_time =
+ packet.GetExtension<AbsoluteCaptureTimeExtension>();
+ }
+ }
+
+ // Verify the capture timestamp and that the clock offset is set to zero.
+ ASSERT_TRUE(absolute_capture_time.has_value());
+ EXPECT_EQ(
+ absolute_capture_time->absolute_capture_timestamp,
+ Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds(
+ kAbsoluteCaptureTimestampMs)));
+ EXPECT_EQ(absolute_capture_time->estimated_capture_clock_offset, 0);
+}
+
+TEST_P(RtpSenderVideoTest, AbsoluteCaptureTimeWithExtensionProvided) {
+ constexpr AbsoluteCaptureTime kAbsoluteCaptureTime = {
+ 123,
+ absl::optional<int64_t>(456),
+ };
+ uint8_t kFrame[kMaxPacketLength];
+ rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(),
+ kAbsoluteCaptureTimeExtensionId);
+
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ hdr.absolute_capture_time = kAbsoluteCaptureTime;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
+ /*capture_time_ms=*/789, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+
+ absl::optional<AbsoluteCaptureTime> absolute_capture_time;
+
+ // It is expected that one and only one of the packets sent on this video
+ // frame has absolute capture time header extension.
+ for (const RtpPacketReceived& packet : transport_.sent_packets()) {
+ if (absolute_capture_time.has_value()) {
+ EXPECT_FALSE(packet.HasExtension<AbsoluteCaptureTimeExtension>());
+ } else {
+ absolute_capture_time =
+ packet.GetExtension<AbsoluteCaptureTimeExtension>();
+ }
+ }
+
+ // Verify the extension.
+ EXPECT_EQ(absolute_capture_time, kAbsoluteCaptureTime);
+}
+
+TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
+ // Single packet frames.
+ constexpr size_t kPacketSize = 123;
+ uint8_t kFrame[kPacketSize];
+ rtp_module_->RegisterRtpHeaderExtension(PlayoutDelayLimits::Uri(),
+ kPlayoutDelayExtensionId);
+ const VideoPlayoutDelay kExpectedDelay = {10, 20};
+
+ // Send initial key-frame without playout delay.
+ RTPVideoHeader hdr;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ hdr.codec = VideoCodecType::kVideoCodecVP8;
+ auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.temporalIdx = 0;
+
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_FALSE(
+ transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
+
+ // Set playout delay on a discardable frame.
+ hdr.playout_delay = kExpectedDelay;
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp8_header.temporalIdx = 1;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ VideoPlayoutDelay received_delay = VideoPlayoutDelay();
+ ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
+ &received_delay));
+ EXPECT_EQ(received_delay, kExpectedDelay);
+
+ // Set playout delay on a non-discardable frame, the extension should still
+ // be populated since dilvery wasn't guaranteed on the last one.
+ hdr.playout_delay = VideoPlayoutDelay(); // Indicates "no change".
+ vp8_header.temporalIdx = 0;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
+ &received_delay));
+ EXPECT_EQ(received_delay, kExpectedDelay);
+
+ // The next frame does not need the extensions since it's delivery has
+ // already been guaranteed.
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ EXPECT_FALSE(
+ transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
+
+ // Insert key-frame, we need to refresh the state here.
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
+ ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
+ &received_delay));
+ EXPECT_EQ(received_delay, kExpectedDelay);
+}
+
+TEST_P(RtpSenderVideoTest, SendGenericVideo) {
+ const uint8_t kPayloadType = 127;
+ const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric;
+ const uint8_t kPayload[] = {47, 11, 32, 93, 89};
+
+ // Send keyframe.
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321,
+ kPayload, video_header,
+ absl::nullopt));
+
+ rtc::ArrayView<const uint8_t> sent_payload =
+ transport_.last_sent_packet().payload();
+ uint8_t generic_header = sent_payload[0];
+ EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kKeyFrameBit);
+ EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kFirstPacketBit);
+ EXPECT_THAT(sent_payload.subview(1), ElementsAreArray(kPayload));
+
+ // Send delta frame.
+ const uint8_t kDeltaPayload[] = {13, 42, 32, 93, 13};
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321,
+ kDeltaPayload, video_header,
+ absl::nullopt));
+
+ sent_payload = sent_payload = transport_.last_sent_packet().payload();
+ generic_header = sent_payload[0];
+ EXPECT_FALSE(generic_header & RtpFormatVideoGeneric::kKeyFrameBit);
+ EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kFirstPacketBit);
+ EXPECT_THAT(sent_payload.subview(1), ElementsAreArray(kDeltaPayload));
+}
+
+TEST_P(RtpSenderVideoTest, SendRawVideo) {
+ const uint8_t kPayloadType = 111;
+ const uint8_t kPayload[] = {11, 22, 33, 44, 55};
+
+ // Send a frame.
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, absl::nullopt, 1234,
+ 4321, kPayload, video_header,
+ absl::nullopt));
+
+ rtc::ArrayView<const uint8_t> sent_payload =
+ transport_.last_sent_packet().payload();
+ EXPECT_THAT(sent_payload, ElementsAreArray(kPayload));
+}
+
+INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead,
+ RtpSenderVideoTest,
+ ::testing::Bool());
+
+class RtpSenderVideoWithFrameTransformerTest : public ::testing::Test {
+ public:
+ RtpSenderVideoWithFrameTransformerTest()
+ : fake_clock_(kStartTime),
+ retransmission_rate_limiter_(&fake_clock_, 1000),
+ rtp_module_(ModuleRtpRtcpImpl2::Create([&] {
+ RtpRtcpInterface::Configuration config;
+ config.clock = &fake_clock_;
+ config.outgoing_transport = &transport_;
+ config.retransmission_rate_limiter = &retransmission_rate_limiter_;
+ config.field_trials = &field_trials_;
+ config.local_media_ssrc = kSsrc;
+ return config;
+ }())) {
+ rtp_module_->SetSequenceNumber(kSeqNum);
+ rtp_module_->SetStartTimestamp(0);
+ }
+
+ std::unique_ptr<RTPSenderVideo> CreateSenderWithFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> transformer) {
+ RTPSenderVideo::Config config;
+ config.clock = &fake_clock_;
+ config.rtp_sender = rtp_module_->RtpSender();
+ config.field_trials = &field_trials_;
+ config.frame_transformer = transformer;
+ return std::make_unique<RTPSenderVideo>(config);
+ }
+
+ protected:
+ rtc::AutoThread main_thread_;
+ FieldTrialBasedConfig field_trials_;
+ SimulatedClock fake_clock_;
+ LoopbackTransportTest transport_;
+ RateLimiter retransmission_rate_limiter_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_module_;
+};
+
+std::unique_ptr<EncodedImage> CreateDefaultEncodedImage() {
+ const uint8_t data[] = {1, 2, 3, 4};
+ auto encoded_image = std::make_unique<EncodedImage>();
+ encoded_image->SetEncodedData(
+ webrtc::EncodedImageBuffer::Create(data, sizeof(data)));
+ return encoded_image;
+}
+
+TEST_F(RtpSenderVideoWithFrameTransformerTest,
+ CreateSenderRegistersFrameTransformer) {
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ EXPECT_CALL(*mock_frame_transformer,
+ RegisterTransformedFrameSinkCallback(_, kSsrc));
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+}
+
+TEST_F(RtpSenderVideoWithFrameTransformerTest,
+ DestroySenderUnregistersFrameTransformer) {
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+ EXPECT_CALL(*mock_frame_transformer,
+ UnregisterTransformedFrameSinkCallback(kSsrc));
+ rtp_sender_video = nullptr;
+}
+
+TEST_F(RtpSenderVideoWithFrameTransformerTest,
+ SendEncodedImageTransformsFrame) {
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+ auto encoded_image = CreateDefaultEncodedImage();
+ RTPVideoHeader video_header;
+
+ EXPECT_CALL(*mock_frame_transformer, Transform);
+ rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp,
+ *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST_F(RtpSenderVideoWithFrameTransformerTest, ValidPayloadTypes) {
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+ auto encoded_image = CreateDefaultEncodedImage();
+ RTPVideoHeader video_header;
+
+ EXPECT_TRUE(rtp_sender_video->SendEncodedImage(
+ 0, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(rtp_sender_video->SendEncodedImage(
+ 127, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_DEATH(rtp_sender_video->SendEncodedImage(
+ -1, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs),
+ "");
+ EXPECT_DEATH(rtp_sender_video->SendEncodedImage(
+ 128, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs),
+ "");
+}
+#endif
+
+TEST_F(RtpSenderVideoWithFrameTransformerTest, OnTransformedFrameSendsVideo) {
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ rtc::scoped_refptr<TransformedFrameCallback> callback;
+ EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback)
+ .WillOnce(SaveArg<0>(&callback));
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+ ASSERT_TRUE(callback);
+
+ auto encoded_image = CreateDefaultEncodedImage();
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ ON_CALL(*mock_frame_transformer, Transform)
+ .WillByDefault(
+ [&callback](std::unique_ptr<TransformableFrameInterface> frame) {
+ callback->OnTransformedFrame(std::move(frame));
+ });
+ TaskQueueForTest encoder_queue;
+ encoder_queue.SendTask(
+ [&] {
+ rtp_sender_video->SendEncodedImage(
+ kPayload, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs);
+ });
+ encoder_queue.WaitForPreviouslyPostedTasks();
+ EXPECT_EQ(transport_.packets_sent(), 1);
+}
+
+TEST_F(RtpSenderVideoWithFrameTransformerTest,
+ TransformableFrameMetadataHasCorrectValue) {
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+ auto encoded_image = CreateDefaultEncodedImage();
+ RTPVideoHeader video_header;
+ video_header.width = 1280u;
+ video_header.height = 720u;
+ RTPVideoHeader::GenericDescriptorInfo& generic =
+ video_header.generic.emplace();
+ generic.frame_id = 10;
+ generic.temporal_index = 3;
+ generic.spatial_index = 2;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ generic.dependencies = {5};
+
+ // Check that the transformable frame passed to the frame transformer has the
+ // correct metadata.
+ EXPECT_CALL(*mock_frame_transformer, Transform)
+ .WillOnce(
+ [](std::unique_ptr<TransformableFrameInterface> transformable_frame) {
+ auto frame =
+ absl::WrapUnique(static_cast<TransformableVideoFrameInterface*>(
+ transformable_frame.release()));
+ ASSERT_TRUE(frame);
+ auto metadata = frame->GetMetadata();
+ EXPECT_EQ(metadata.GetWidth(), 1280u);
+ EXPECT_EQ(metadata.GetHeight(), 720u);
+ EXPECT_EQ(metadata.GetFrameId(), 10);
+ EXPECT_EQ(metadata.GetTemporalIndex(), 3);
+ EXPECT_EQ(metadata.GetSpatialIndex(), 2);
+ EXPECT_THAT(metadata.GetFrameDependencies(), ElementsAre(5));
+ EXPECT_THAT(metadata.GetDecodeTargetIndications(),
+ ElementsAre(DecodeTargetIndication::kSwitch));
+ });
+ rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp,
+ *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTimeMs);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.cc
new file mode 100644
index 0000000000..441429d442
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+
+#include <algorithm>
+#include <iterator>
+#include <limits>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+
+RtpSequenceNumberMap::RtpSequenceNumberMap(size_t max_entries)
+ : max_entries_(max_entries) {
+ RTC_DCHECK_GT(max_entries_, 4); // See code paring down to `max_entries_`.
+ RTC_DCHECK_LE(max_entries_, 1 << 15);
+}
+
+RtpSequenceNumberMap::~RtpSequenceNumberMap() = default;
+
+void RtpSequenceNumberMap::InsertPacket(uint16_t sequence_number, Info info) {
+ RTC_DCHECK(associations_.size() < 2 ||
+ AheadOf(associations_.back().sequence_number,
+ associations_.front().sequence_number));
+
+ if (associations_.empty()) {
+ associations_.emplace_back(sequence_number, info);
+ return;
+ }
+
+ if (AheadOrAt(sequence_number, associations_.front().sequence_number) &&
+ AheadOrAt(associations_.back().sequence_number, sequence_number)) {
+ // The sequence number has wrapped around and is within the range
+ // currently held by `associations_` - we should invalidate all entries.
+ RTC_LOG(LS_WARNING) << "Sequence number wrapped-around unexpectedly.";
+ associations_.clear();
+ associations_.emplace_back(sequence_number, info);
+ return;
+ }
+
+ std::deque<Association>::iterator erase_to = associations_.begin();
+
+ RTC_DCHECK_LE(associations_.size(), max_entries_);
+ if (associations_.size() == max_entries_) {
+ // Pare down the container so that inserting some additional elements
+ // would not exceed the maximum size.
+ const size_t new_size = 3 * max_entries_ / 4;
+ erase_to = std::next(erase_to, max_entries_ - new_size);
+ }
+
+ // It is guaranteed that `associations_` can be split into two partitions,
+ // either partition possibly empty, such that:
+ // * In the first partition, all elements are AheadOf the new element.
+ // This is the partition of the obsolete elements.
+ // * In the second partition, the new element is AheadOf all the elements.
+ // The elements of this partition may stay.
+ auto cmp = [](const Association& a, uint16_t sequence_number) {
+ return AheadOf(a.sequence_number, sequence_number);
+ };
+ RTC_DCHECK(erase_to != associations_.end());
+ erase_to =
+ std::lower_bound(erase_to, associations_.end(), sequence_number, cmp);
+ associations_.erase(associations_.begin(), erase_to);
+
+ associations_.emplace_back(sequence_number, info);
+
+ RTC_DCHECK(associations_.size() == 1 ||
+ AheadOf(associations_.back().sequence_number,
+ associations_.front().sequence_number));
+}
+
+void RtpSequenceNumberMap::InsertFrame(uint16_t first_sequence_number,
+ size_t packet_count,
+ uint32_t timestamp) {
+ RTC_DCHECK_GT(packet_count, 0);
+ RTC_DCHECK_LE(packet_count, std::numeric_limits<size_t>::max());
+
+ for (size_t i = 0; i < packet_count; ++i) {
+ const bool is_first = (i == 0);
+ const bool is_last = (i == packet_count - 1);
+ InsertPacket(static_cast<uint16_t>(first_sequence_number + i),
+ Info(timestamp, is_first, is_last));
+ }
+}
+
+absl::optional<RtpSequenceNumberMap::Info> RtpSequenceNumberMap::Get(
+ uint16_t sequence_number) const {
+ // To make the binary search easier to understand, we use the fact that
+ // adding a constant offset to all elements, as well as to the searched
+ // element, does not change the relative ordering. This way, we can find
+ // an offset that would make all of the elements strictly ascending according
+ // to normal integer comparison.
+ // Finding such an offset is easy - the offset that would map the oldest
+ // element to 0 would serve this purpose.
+
+ if (associations_.empty()) {
+ return absl::nullopt;
+ }
+
+ const uint16_t offset =
+ static_cast<uint16_t>(0) - associations_.front().sequence_number;
+
+ auto cmp = [offset](const Association& a, uint16_t sequence_number) {
+ return static_cast<uint16_t>(a.sequence_number + offset) <
+ static_cast<uint16_t>(sequence_number + offset);
+ };
+ const auto elem = absl::c_lower_bound(associations_, sequence_number, cmp);
+
+ return elem != associations_.end() && elem->sequence_number == sequence_number
+ ? absl::optional<Info>(elem->info)
+ : absl::nullopt;
+}
+
+size_t RtpSequenceNumberMap::AssociationCountForTesting() const {
+ return associations_.size();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.h
new file mode 100644
index 0000000000..8a036c25a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_SEQUENCE_NUMBER_MAP_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_SEQUENCE_NUMBER_MAP_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// Records the association of RTP sequence numbers to timestamps and to whether
+// the packet was first and/or last in the frame.
+//
+// 1. Limits number of entries. Whenever `max_entries` is about to be exceeded,
+// the size is reduced by approximately 25%.
+// 2. RTP sequence numbers wrap around relatively infrequently.
+// This class therefore only remembers at most the last 2^15 RTP packets,
+// so that the newest packet's sequence number is still AheadOf the oldest
+// packet's sequence number.
+// 3. Media frames are sometimes split into several RTP packets.
+// In such a case, Insert() is expected to be called once for each packet.
+// The timestamp is not expected to change between those calls.
+class RtpSequenceNumberMap final {
+ public:
+ struct Info final {
+ Info(uint32_t timestamp, bool is_first, bool is_last)
+ : timestamp(timestamp), is_first(is_first), is_last(is_last) {}
+
+ friend bool operator==(const Info& lhs, const Info& rhs) {
+ return lhs.timestamp == rhs.timestamp && lhs.is_first == rhs.is_first &&
+ lhs.is_last == rhs.is_last;
+ }
+
+ uint32_t timestamp;
+ bool is_first;
+ bool is_last;
+ };
+
+ explicit RtpSequenceNumberMap(size_t max_entries);
+ RtpSequenceNumberMap(const RtpSequenceNumberMap& other) = delete;
+ RtpSequenceNumberMap& operator=(const RtpSequenceNumberMap& other) = delete;
+ ~RtpSequenceNumberMap();
+
+ void InsertPacket(uint16_t sequence_number, Info info);
+ void InsertFrame(uint16_t first_sequence_number,
+ size_t packet_count,
+ uint32_t timestamp);
+
+ absl::optional<Info> Get(uint16_t sequence_number) const;
+
+ size_t AssociationCountForTesting() const;
+
+ private:
+ struct Association {
+ explicit Association(uint16_t sequence_number)
+ : Association(sequence_number, Info(0, false, false)) {}
+
+ Association(uint16_t sequence_number, Info info)
+ : sequence_number(sequence_number), info(info) {}
+
+ uint16_t sequence_number;
+ Info info;
+ };
+
+ const size_t max_entries_;
+
+ // The non-transitivity of AheadOf() would be problematic with a map,
+ // so we use a deque instead.
+ std::deque<Association> associations_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_SEQUENCE_NUMBER_MAP_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map_unittest.cc
new file mode 100644
index 0000000000..78c9e4a251
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sequence_number_map_unittest.cc
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "absl/types/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+using Info = RtpSequenceNumberMap::Info;
+
+constexpr uint16_t kUint16Max = std::numeric_limits<uint16_t>::max();
+constexpr size_t kMaxPossibleMaxEntries = 1 << 15;
+
+// Just a named pair.
+struct Association final {
+ Association(uint16_t sequence_number, Info info)
+ : sequence_number(sequence_number), info(info) {}
+
+ uint16_t sequence_number;
+ Info info;
+};
+
+class RtpSequenceNumberMapTest : public ::testing::Test {
+ protected:
+ RtpSequenceNumberMapTest() : random_(1983) {}
+ ~RtpSequenceNumberMapTest() override = default;
+
+ Association CreateAssociation(uint16_t sequence_number, uint32_t timestamp) {
+ return Association(sequence_number,
+ {timestamp, random_.Rand<bool>(), random_.Rand<bool>()});
+ }
+
+ void VerifyAssociations(const RtpSequenceNumberMap& uut,
+ const std::vector<Association>& associations) {
+ return VerifyAssociations(uut, associations.begin(), associations.end());
+ }
+
+ void VerifyAssociations(
+ const RtpSequenceNumberMap& uut,
+ std::vector<Association>::const_iterator associations_begin,
+ std::vector<Association>::const_iterator associations_end) {
+ RTC_DCHECK(associations_begin < associations_end);
+ ASSERT_EQ(static_cast<size_t>(associations_end - associations_begin),
+ uut.AssociationCountForTesting());
+ for (auto association = associations_begin; association != associations_end;
+ ++association) {
+ EXPECT_EQ(uut.Get(association->sequence_number), association->info);
+ }
+ }
+
+ // Allows several variations of the same test; definition next to the tests.
+ void GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted(
+ bool with_wrap_around,
+ bool last_element_kept);
+
+ // Allows several variations of the same test; definition next to the tests.
+ void RepeatedSequenceNumberInvalidatesAll(size_t index_of_repeated);
+
+ // Allows several variations of the same test; definition next to the tests.
+ void MaxEntriesReachedAtSameTimeAsObsoletionOfItem(size_t max_entries,
+ size_t obsoleted_count);
+
+ Random random_;
+};
+
+class RtpSequenceNumberMapTestWithParams
+ : public RtpSequenceNumberMapTest,
+ public ::testing::WithParamInterface<std::tuple<size_t, uint16_t>> {
+ protected:
+ RtpSequenceNumberMapTestWithParams() = default;
+ ~RtpSequenceNumberMapTestWithParams() override = default;
+
+ std::vector<Association> ProduceRandomAssociationSequence(
+ size_t association_count,
+ uint16_t first_sequence_number,
+ bool allow_obsoletion) {
+ std::vector<Association> associations;
+ associations.reserve(association_count);
+
+ if (association_count == 0) {
+ return associations;
+ }
+
+ associations.emplace_back(
+ first_sequence_number,
+ Info(0, random_.Rand<bool>(), random_.Rand<bool>()));
+
+ for (size_t i = 1; i < association_count; ++i) {
+ const uint16_t sequence_number =
+ associations[i - 1].sequence_number + random_.Rand(1, 100);
+ RTC_DCHECK(allow_obsoletion ||
+ AheadOf(sequence_number, associations[0].sequence_number));
+
+ const uint32_t timestamp =
+ associations[i - 1].info.timestamp + random_.Rand(1, 10000);
+
+ associations.emplace_back(
+ sequence_number,
+ Info(timestamp, random_.Rand<bool>(), random_.Rand<bool>()));
+ }
+
+ return associations;
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(All,
+ RtpSequenceNumberMapTestWithParams,
+ ::testing::Combine(
+ // Association count.
+ ::testing::Values(1, 2, 100),
+ // First sequence number.
+ ::testing::Values(0,
+ 100,
+ kUint16Max - 100,
+ kUint16Max - 1,
+ kUint16Max)));
+
+TEST_F(RtpSequenceNumberMapTest, GetBeforeAssociationsRecordedReturnsNullOpt) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+ constexpr uint16_t kArbitrarySequenceNumber = 321;
+ EXPECT_FALSE(uut.Get(kArbitrarySequenceNumber));
+}
+
+// Version #1 - any old unknown sequence number.
+TEST_F(RtpSequenceNumberMapTest, GetUnknownSequenceNumberReturnsNullOpt1) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ constexpr uint16_t kKnownSequenceNumber = 10;
+ constexpr uint32_t kArbitrary = 987;
+ uut.InsertPacket(kKnownSequenceNumber, {kArbitrary, false, false});
+
+ constexpr uint16_t kUnknownSequenceNumber = kKnownSequenceNumber + 1;
+ EXPECT_FALSE(uut.Get(kUnknownSequenceNumber));
+}
+
+// Version #2 - intentionally pick a value in the range of currently held
+// values, so as to trigger lower_bound / upper_bound.
+TEST_F(RtpSequenceNumberMapTest, GetUnknownSequenceNumberReturnsNullOpt2) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ const std::vector<Association> setup = {CreateAssociation(1000, 500), //
+ CreateAssociation(1020, 501)};
+ for (const Association& association : setup) {
+ uut.InsertPacket(association.sequence_number, association.info);
+ }
+
+ EXPECT_FALSE(uut.Get(1001));
+}
+
+TEST_P(RtpSequenceNumberMapTestWithParams,
+ GetKnownSequenceNumberReturnsCorrectValue) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ const size_t association_count = std::get<0>(GetParam());
+ const uint16_t first_sequence_number = std::get<1>(GetParam());
+
+ const std::vector<Association> associations =
+ ProduceRandomAssociationSequence(association_count, first_sequence_number,
+ /*allow_obsoletion=*/false);
+
+ for (const Association& association : associations) {
+ uut.InsertPacket(association.sequence_number, association.info);
+ }
+
+ VerifyAssociations(uut, associations);
+}
+
+TEST_F(RtpSequenceNumberMapTest, InsertFrameOnSinglePacketFrame) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ constexpr uint16_t kSequenceNumber = 888;
+ constexpr uint32_t kTimestamp = 98765;
+ uut.InsertFrame(kSequenceNumber, 1, kTimestamp);
+
+ EXPECT_EQ(uut.Get(kSequenceNumber), Info(kTimestamp, true, true));
+}
+
+TEST_F(RtpSequenceNumberMapTest, InsertFrameOnMultiPacketFrameNoWrapAround) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ constexpr uint16_t kFirstSequenceNumber = 0;
+ constexpr uint32_t kTimestamp = 98765;
+ uut.InsertFrame(kFirstSequenceNumber, 3, kTimestamp);
+
+ EXPECT_EQ(uut.Get(kFirstSequenceNumber + 0), Info(kTimestamp, true, false));
+ EXPECT_EQ(uut.Get(kFirstSequenceNumber + 1), Info(kTimestamp, false, false));
+ EXPECT_EQ(uut.Get(kFirstSequenceNumber + 2), Info(kTimestamp, false, true));
+}
+
+TEST_F(RtpSequenceNumberMapTest, InsertFrameOnMultiPacketFrameWithWrapAround) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ constexpr uint16_t kFirstSequenceNumber = kUint16Max;
+ constexpr uint32_t kTimestamp = 98765;
+ uut.InsertFrame(kFirstSequenceNumber, 3, kTimestamp);
+
+// Suppress "truncation of constant value" warning; wrap-around is intended.
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4309)
+#endif
+ EXPECT_EQ(uut.Get(static_cast<uint16_t>(kFirstSequenceNumber + 0u)),
+ Info(kTimestamp, true, false));
+ EXPECT_EQ(uut.Get(static_cast<uint16_t>(kFirstSequenceNumber + 1u)),
+ Info(kTimestamp, false, false));
+ EXPECT_EQ(uut.Get(static_cast<uint16_t>(kFirstSequenceNumber + 2u)),
+ Info(kTimestamp, false, true));
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ GetObsoleteSequenceNumberReturnsNullOptSingleValueObsoleted) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ const std::vector<Association> associations = {
+ CreateAssociation(0, 10), //
+ CreateAssociation(0x8000, 20), //
+ CreateAssociation(0x8001u, 30)};
+
+ uut.InsertPacket(associations[0].sequence_number, associations[0].info);
+
+ // First association not yet obsolete, and therefore remembered.
+ RTC_DCHECK(AheadOf(associations[1].sequence_number,
+ associations[0].sequence_number));
+ uut.InsertPacket(associations[1].sequence_number, associations[1].info);
+ VerifyAssociations(uut, {associations[0], associations[1]});
+
+ // Test focus - new entry obsoletes first entry.
+ RTC_DCHECK(!AheadOf(associations[2].sequence_number,
+ associations[0].sequence_number));
+ uut.InsertPacket(associations[2].sequence_number, associations[2].info);
+ VerifyAssociations(uut, {associations[1], associations[2]});
+}
+
+void RtpSequenceNumberMapTest::
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted(
+ bool with_wrap_around,
+ bool last_element_kept) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ std::vector<Association> associations;
+ if (with_wrap_around) {
+ associations = {CreateAssociation(kUint16Max - 1, 10), //
+ CreateAssociation(kUint16Max, 20), //
+ CreateAssociation(0, 30), //
+ CreateAssociation(1, 40), //
+ CreateAssociation(2, 50)};
+ } else {
+ associations = {CreateAssociation(1, 10), //
+ CreateAssociation(2, 20), //
+ CreateAssociation(3, 30), //
+ CreateAssociation(4, 40), //
+ CreateAssociation(5, 50)};
+ }
+
+ for (auto association : associations) {
+ uut.InsertPacket(association.sequence_number, association.info);
+ }
+ VerifyAssociations(uut, associations);
+
+ // Define a new association that will obsolete either all previous entries,
+ // or all previous entries except for the last one, depending on the
+ // parameter instantiation of this test.
+ RTC_DCHECK_EQ(
+ static_cast<uint16_t>(
+ associations[associations.size() - 1].sequence_number),
+ static_cast<uint16_t>(
+ associations[associations.size() - 2].sequence_number + 1u));
+ uint16_t new_sequence_number;
+ if (last_element_kept) {
+ new_sequence_number =
+ associations[associations.size() - 1].sequence_number + 0x8000;
+ RTC_DCHECK(AheadOf(new_sequence_number,
+ associations[associations.size() - 1].sequence_number));
+ } else {
+ new_sequence_number =
+ associations[associations.size() - 1].sequence_number + 0x8001;
+ RTC_DCHECK(!AheadOf(new_sequence_number,
+ associations[associations.size() - 1].sequence_number));
+ }
+ RTC_DCHECK(!AheadOf(new_sequence_number,
+ associations[associations.size() - 2].sequence_number));
+
+ // Record the new association.
+ const Association new_association =
+ CreateAssociation(new_sequence_number, 60);
+ uut.InsertPacket(new_association.sequence_number, new_association.info);
+
+ // Make sure all obsoleted elements were removed.
+ const size_t obsoleted_count =
+ associations.size() - (last_element_kept ? 1 : 0);
+ for (size_t i = 0; i < obsoleted_count; ++i) {
+ EXPECT_FALSE(uut.Get(associations[i].sequence_number));
+ }
+
+ // Make sure the expected elements were not removed, and return the
+ // expected value.
+ if (last_element_kept) {
+ EXPECT_EQ(uut.Get(associations.back().sequence_number),
+ associations.back().info);
+ }
+ EXPECT_EQ(uut.Get(new_association.sequence_number), new_association.info);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted0) {
+ const bool with_wrap_around = false;
+ const bool last_element_kept = false;
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted(
+ with_wrap_around, last_element_kept);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted1) {
+ const bool with_wrap_around = true;
+ const bool last_element_kept = false;
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted(
+ with_wrap_around, last_element_kept);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted2) {
+ const bool with_wrap_around = false;
+ const bool last_element_kept = true;
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted(
+ with_wrap_around, last_element_kept);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted3) {
+ const bool with_wrap_around = true;
+ const bool last_element_kept = true;
+ GetObsoleteSequenceNumberReturnsNullOptMultipleEntriesObsoleted(
+ with_wrap_around, last_element_kept);
+}
+
+void RtpSequenceNumberMapTest::RepeatedSequenceNumberInvalidatesAll(
+ size_t index_of_repeated) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ const std::vector<Association> setup = {CreateAssociation(100, 500), //
+ CreateAssociation(101, 501), //
+ CreateAssociation(102, 502)};
+ RTC_DCHECK_LT(index_of_repeated, setup.size());
+ for (const Association& association : setup) {
+ uut.InsertPacket(association.sequence_number, association.info);
+ }
+
+ const Association new_association =
+ CreateAssociation(setup[index_of_repeated].sequence_number, 503);
+ uut.InsertPacket(new_association.sequence_number, new_association.info);
+
+ // All entries from setup invalidated.
+ // New entry valid and mapped to new value.
+ for (size_t i = 0; i < setup.size(); ++i) {
+ if (i == index_of_repeated) {
+ EXPECT_EQ(uut.Get(new_association.sequence_number), new_association.info);
+ } else {
+ EXPECT_FALSE(uut.Get(setup[i].sequence_number));
+ }
+ }
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ RepeatedSequenceNumberInvalidatesAllRepeatFirst) {
+ RepeatedSequenceNumberInvalidatesAll(0);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ RepeatedSequenceNumberInvalidatesAllRepeatMiddle) {
+ RepeatedSequenceNumberInvalidatesAll(1);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ RepeatedSequenceNumberInvalidatesAllRepeatLast) {
+ RepeatedSequenceNumberInvalidatesAll(2);
+}
+
+TEST_F(RtpSequenceNumberMapTest,
+ SequenceNumberInsideMemorizedRangeInvalidatesAll) {
+ RtpSequenceNumberMap uut(kMaxPossibleMaxEntries);
+
+ const std::vector<Association> setup = {CreateAssociation(1000, 500), //
+ CreateAssociation(1020, 501), //
+ CreateAssociation(1030, 502)};
+ for (const Association& association : setup) {
+ uut.InsertPacket(association.sequence_number, association.info);
+ }
+
+ const Association new_association = CreateAssociation(1010, 503);
+ uut.InsertPacket(new_association.sequence_number, new_association.info);
+
+ // All entries from setup invalidated.
+ // New entry valid and mapped to new value.
+ for (size_t i = 0; i < setup.size(); ++i) {
+ EXPECT_FALSE(uut.Get(setup[i].sequence_number));
+ }
+ EXPECT_EQ(uut.Get(new_association.sequence_number), new_association.info);
+}
+
+TEST_F(RtpSequenceNumberMapTest, MaxEntriesObserved) {
+ constexpr size_t kMaxEntries = 100;
+ RtpSequenceNumberMap uut(kMaxEntries);
+
+ std::vector<Association> associations;
+ associations.reserve(kMaxEntries);
+ uint32_t timestamp = 789;
+ for (size_t i = 0; i < kMaxEntries; ++i) {
+ associations.push_back(CreateAssociation(i, ++timestamp));
+ uut.InsertPacket(associations[i].sequence_number, associations[i].info);
+ }
+ VerifyAssociations(uut, associations); // Sanity.
+
+ const Association new_association =
+ CreateAssociation(kMaxEntries, ++timestamp);
+ uut.InsertPacket(new_association.sequence_number, new_association.info);
+ associations.push_back(new_association);
+
+ // The +1 is for `new_association`.
+ const size_t kExpectedAssociationCount = 3 * kMaxEntries / 4 + 1;
+ const auto expected_begin =
+ std::prev(associations.end(), kExpectedAssociationCount);
+ VerifyAssociations(uut, expected_begin, associations.end());
+}
+
+void RtpSequenceNumberMapTest::MaxEntriesReachedAtSameTimeAsObsoletionOfItem(
+ size_t max_entries,
+ size_t obsoleted_count) {
+ RtpSequenceNumberMap uut(max_entries);
+
+ std::vector<Association> associations;
+ associations.reserve(max_entries);
+ uint32_t timestamp = 789;
+ for (size_t i = 0; i < max_entries; ++i) {
+ associations.push_back(CreateAssociation(i, ++timestamp));
+ uut.InsertPacket(associations[i].sequence_number, associations[i].info);
+ }
+ VerifyAssociations(uut, associations); // Sanity.
+
+ const uint16_t new_association_sequence_number =
+ static_cast<uint16_t>(obsoleted_count) + (1 << 15);
+ const Association new_association =
+ CreateAssociation(new_association_sequence_number, ++timestamp);
+ uut.InsertPacket(new_association.sequence_number, new_association.info);
+ associations.push_back(new_association);
+
+ // The +1 is for `new_association`.
+ const size_t kExpectedAssociationCount =
+ std::min(3 * max_entries / 4, max_entries - obsoleted_count) + 1;
+ const auto expected_begin =
+ std::prev(associations.end(), kExpectedAssociationCount);
+ VerifyAssociations(uut, expected_begin, associations.end());
+}
+
+// Version #1 - #(obsoleted entries) < #(entries after paring down below max).
+TEST_F(RtpSequenceNumberMapTest,
+ MaxEntriesReachedAtSameTimeAsObsoletionOfItem1) {
+ constexpr size_t kMaxEntries = 100;
+ constexpr size_t kObsoletionTarget = (kMaxEntries / 4) - 1;
+ MaxEntriesReachedAtSameTimeAsObsoletionOfItem(kMaxEntries, kObsoletionTarget);
+}
+
+// Version #2 - #(obsoleted entries) == #(entries after paring down below max).
+TEST_F(RtpSequenceNumberMapTest,
+ MaxEntriesReachedAtSameTimeAsObsoletionOfItem2) {
+ constexpr size_t kMaxEntries = 100;
+ constexpr size_t kObsoletionTarget = kMaxEntries / 4;
+ MaxEntriesReachedAtSameTimeAsObsoletionOfItem(kMaxEntries, kObsoletionTarget);
+}
+
+// Version #3 - #(obsoleted entries) > #(entries after paring down below max).
+TEST_F(RtpSequenceNumberMapTest,
+ MaxEntriesReachedAtSameTimeAsObsoletionOfItem3) {
+ constexpr size_t kMaxEntries = 100;
+ constexpr size_t kObsoletionTarget = (kMaxEntries / 4) + 1;
+ MaxEntriesReachedAtSameTimeAsObsoletionOfItem(kMaxEntries, kObsoletionTarget);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.cc
new file mode 100644
index 0000000000..cf1e54254a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_util.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr uint8_t kRtpVersion = 2;
+constexpr size_t kMinRtpPacketLen = 12;
+constexpr size_t kMinRtcpPacketLen = 4;
+
+bool HasCorrectRtpVersion(rtc::ArrayView<const uint8_t> packet) {
+ return packet[0] >> 6 == kRtpVersion;
+}
+
+// For additional details, see http://tools.ietf.org/html/rfc5761#section-4
+bool PayloadTypeIsReservedForRtcp(uint8_t payload_type) {
+ return 64 <= payload_type && payload_type < 96;
+}
+
+} // namespace
+
+bool IsRtpPacket(rtc::ArrayView<const uint8_t> packet) {
+ return packet.size() >= kMinRtpPacketLen && HasCorrectRtpVersion(packet) &&
+ !PayloadTypeIsReservedForRtcp(packet[1] & 0x7F);
+}
+
+bool IsRtcpPacket(rtc::ArrayView<const uint8_t> packet) {
+ return packet.size() >= kMinRtcpPacketLen && HasCorrectRtpVersion(packet) &&
+ PayloadTypeIsReservedForRtcp(packet[1] & 0x7F);
+}
+
+int ParseRtpPayloadType(rtc::ArrayView<const uint8_t> rtp_packet) {
+ RTC_DCHECK(IsRtpPacket(rtp_packet));
+ return rtp_packet[1] & 0x7F;
+}
+
+uint16_t ParseRtpSequenceNumber(rtc::ArrayView<const uint8_t> rtp_packet) {
+ RTC_DCHECK(IsRtpPacket(rtp_packet));
+ return ByteReader<uint16_t>::ReadBigEndian(rtp_packet.data() + 2);
+}
+
+uint32_t ParseRtpSsrc(rtc::ArrayView<const uint8_t> rtp_packet) {
+ RTC_DCHECK(IsRtpPacket(rtp_packet));
+ return ByteReader<uint32_t>::ReadBigEndian(rtp_packet.data() + 8);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.h
new file mode 100644
index 0000000000..835cfcd6c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_
+
+#include <cstdint>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+bool IsRtcpPacket(rtc::ArrayView<const uint8_t> packet);
+bool IsRtpPacket(rtc::ArrayView<const uint8_t> packet);
+
+// Returns base rtp header fields of the rtp packet.
+// Behaviour is undefined when `!IsRtpPacket(rtp_packet)`.
+int ParseRtpPayloadType(rtc::ArrayView<const uint8_t> rtp_packet);
+uint16_t ParseRtpSequenceNumber(rtc::ArrayView<const uint8_t> rtp_packet);
+uint32_t ParseRtpSsrc(rtc::ArrayView<const uint8_t> rtp_packet);
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util_unittest.cc
new file mode 100644
index 0000000000..3e23416ff4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_util_unittest.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_util.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace {
+
+TEST(RtpUtilTest, IsRtpPacket) {
+ constexpr uint8_t kMinimalisticRtpPacket[] = {0x80, 97, 0, 0, //
+ 0, 0, 0, 0, //
+ 0, 0, 0, 0};
+ EXPECT_TRUE(IsRtpPacket(kMinimalisticRtpPacket));
+
+ constexpr uint8_t kWrongRtpVersion[] = {0xc0, 97, 0, 0, //
+ 0, 0, 0, 0, //
+ 0, 0, 0, 0};
+ EXPECT_FALSE(IsRtpPacket(kWrongRtpVersion));
+
+ constexpr uint8_t kPacketWithPayloadForRtcp[] = {0x80, 200, 0, 0, //
+ 0, 0, 0, 0, //
+ 0, 0, 0, 0};
+ EXPECT_FALSE(IsRtpPacket(kPacketWithPayloadForRtcp));
+
+ constexpr uint8_t kTooSmallRtpPacket[] = {0x80, 97, 0, 0, //
+ 0, 0, 0, 0, //
+ 0, 0, 0};
+ EXPECT_FALSE(IsRtpPacket(kTooSmallRtpPacket));
+
+ EXPECT_FALSE(IsRtpPacket({}));
+}
+
+TEST(RtpUtilTest, IsRtcpPacket) {
+ constexpr uint8_t kMinimalisticRtcpPacket[] = {0x80, 202, 0, 0};
+ EXPECT_TRUE(IsRtcpPacket(kMinimalisticRtcpPacket));
+
+ constexpr uint8_t kWrongRtpVersion[] = {0xc0, 202, 0, 0};
+ EXPECT_FALSE(IsRtcpPacket(kWrongRtpVersion));
+
+ constexpr uint8_t kPacketWithPayloadForRtp[] = {0x80, 225, 0, 0};
+ EXPECT_FALSE(IsRtcpPacket(kPacketWithPayloadForRtp));
+
+ constexpr uint8_t kTooSmallRtcpPacket[] = {0x80, 202, 0};
+ EXPECT_FALSE(IsRtcpPacket(kTooSmallRtcpPacket));
+
+ EXPECT_FALSE(IsRtcpPacket({}));
+}
+
+TEST(RtpUtilTest, ParseRtpPayloadType) {
+ constexpr uint8_t kMinimalisticRtpPacket[] = {0x80, 97, 0, 0, //
+ 0, 0, 0, 0, //
+ 0x12, 0x34, 0x56, 0x78};
+ EXPECT_EQ(ParseRtpPayloadType(kMinimalisticRtpPacket), 97);
+
+ constexpr uint8_t kMinimalisticRtpPacketWithMarker[] = {
+ 0x80, 0x80 | 97, 0, 0, //
+ 0, 0, 0, 0, //
+ 0x12, 0x34, 0x56, 0x78};
+ EXPECT_EQ(ParseRtpPayloadType(kMinimalisticRtpPacketWithMarker), 97);
+}
+
+TEST(RtpUtilTest, ParseRtpSequenceNumber) {
+ constexpr uint8_t kMinimalisticRtpPacket[] = {0x80, 97, 0x12, 0x34, //
+ 0, 0, 0, 0, //
+ 0, 0, 0, 0};
+ EXPECT_EQ(ParseRtpSequenceNumber(kMinimalisticRtpPacket), 0x1234);
+}
+
+TEST(RtpUtilTest, ParseRtpSsrc) {
+ constexpr uint8_t kMinimalisticRtpPacket[] = {0x80, 97, 0, 0, //
+ 0, 0, 0, 0, //
+ 0x12, 0x34, 0x56, 0x78};
+ EXPECT_EQ(ParseRtpSsrc(kMinimalisticRtpPacket), 0x12345678u);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.cc
new file mode 100644
index 0000000000..bb9413ddd5
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+
+namespace webrtc {
+
+RTPVideoHeader::RTPVideoHeader() : video_timing() {}
+RTPVideoHeader::RTPVideoHeader(const RTPVideoHeader& other) = default;
+RTPVideoHeader::~RTPVideoHeader() = default;
+
+RTPVideoHeader::GenericDescriptorInfo::GenericDescriptorInfo() = default;
+RTPVideoHeader::GenericDescriptorInfo::GenericDescriptorInfo(
+ const GenericDescriptorInfo& other) = default;
+RTPVideoHeader::GenericDescriptorInfo::~GenericDescriptorInfo() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.h
new file mode 100644
index 0000000000..115b17d36d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_header.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_HEADER_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_HEADER_H_
+
+#include <bitset>
+#include <cstdint>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/rtp_headers.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/color_space.h"
+#include "api/video/video_codec_type.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame_type.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+
+namespace webrtc {
+// Details passed in the rtp payload for legacy generic rtp packetizer.
+// TODO(bugs.webrtc.org/9772): Deprecate in favor of passing generic video
+// details in an rtp header extension.
+struct RTPVideoHeaderLegacyGeneric {
+ uint16_t picture_id;
+};
+
+using RTPVideoTypeHeader = absl::variant<absl::monostate,
+ RTPVideoHeaderVP8,
+ RTPVideoHeaderVP9,
+ RTPVideoHeaderH264,
+ RTPVideoHeaderLegacyGeneric>;
+
+struct RTPVideoHeader {
+ struct GenericDescriptorInfo {
+ GenericDescriptorInfo();
+ GenericDescriptorInfo(const GenericDescriptorInfo& other);
+ ~GenericDescriptorInfo();
+
+ int64_t frame_id = 0;
+ int spatial_index = 0;
+ int temporal_index = 0;
+ absl::InlinedVector<DecodeTargetIndication, 10> decode_target_indications;
+ absl::InlinedVector<int64_t, 5> dependencies;
+ absl::InlinedVector<int, 4> chain_diffs;
+ std::bitset<32> active_decode_targets = ~uint32_t{0};
+ };
+
+ RTPVideoHeader();
+ RTPVideoHeader(const RTPVideoHeader& other);
+
+ ~RTPVideoHeader();
+
+ absl::optional<GenericDescriptorInfo> generic;
+
+ VideoFrameType frame_type = VideoFrameType::kEmptyFrame;
+ uint16_t width = 0;
+ uint16_t height = 0;
+ VideoRotation rotation = VideoRotation::kVideoRotation_0;
+ VideoContentType content_type = VideoContentType::UNSPECIFIED;
+ bool is_first_packet_in_frame = false;
+ bool is_last_packet_in_frame = false;
+ bool is_last_frame_in_picture = true;
+ uint8_t simulcastIdx = 0;
+ VideoCodecType codec = VideoCodecType::kVideoCodecGeneric;
+
+ VideoPlayoutDelay playout_delay;
+ VideoSendTiming video_timing;
+ absl::optional<ColorSpace> color_space;
+ // This field is meant for media quality testing purpose only. When enabled it
+ // carries the webrtc::VideoFrame id field from the sender to the receiver.
+ absl::optional<uint16_t> video_frame_tracking_id;
+ RTPVideoTypeHeader video_type_header;
+
+ // When provided, is sent as is as an RTP header extension according to
+ // http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time.
+ // Otherwise, it is derived from other relevant information.
+ absl::optional<AbsoluteCaptureTime> absolute_capture_time;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_HEADER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc
new file mode 100644
index 0000000000..6816a6277f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/algorithm/container.h"
+#include "api/video/video_layers_allocation.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+constexpr RTPExtensionType RtpVideoLayersAllocationExtension::kId;
+
+namespace {
+
+constexpr int kMaxNumRtpStreams = 4;
+
+// TODO(bugs.webrtc.org/12000): share Leb128 functions with av1 packetizer.
+// Returns minimum number of bytes required to store `value`.
+int Leb128Size(uint32_t value) {
+ int size = 0;
+ while (value >= 0x80) {
+ ++size;
+ value >>= 7;
+ }
+ return size + 1;
+}
+
+// Returns number of bytes consumed.
+int WriteLeb128(uint32_t value, uint8_t* buffer) {
+ int size = 0;
+ while (value >= 0x80) {
+ buffer[size] = 0x80 | (value & 0x7F);
+ ++size;
+ value >>= 7;
+ }
+ buffer[size] = value;
+ ++size;
+ return size;
+}
+
+// Reads leb128 encoded value and advance read_at by number of bytes consumed.
+// Sets read_at to nullptr on error.
+uint64_t ReadLeb128(const uint8_t*& read_at, const uint8_t* end) {
+ uint64_t value = 0;
+ int fill_bits = 0;
+ while (read_at != end && fill_bits < 64 - 7) {
+ uint8_t leb128_byte = *read_at;
+ value |= uint64_t{leb128_byte & 0x7Fu} << fill_bits;
+ ++read_at;
+ fill_bits += 7;
+ if ((leb128_byte & 0x80) == 0) {
+ return value;
+ }
+ }
+ // Failed to find terminator leb128 byte.
+ read_at = nullptr;
+ return 0;
+}
+
+bool AllocationIsValid(const VideoLayersAllocation& allocation) {
+ // Since all multivalue fields are stored in (rtp_stream_id, spatial_id) order
+ // assume `allocation.active_spatial_layers` is already sorted. It is simpler
+ // to assemble it in the sorted way than to resort during serialization.
+ if (!absl::c_is_sorted(
+ allocation.active_spatial_layers,
+ [](const VideoLayersAllocation::SpatialLayer& lhs,
+ const VideoLayersAllocation::SpatialLayer& rhs) {
+ return std::make_tuple(lhs.rtp_stream_index, lhs.spatial_id) <
+ std::make_tuple(rhs.rtp_stream_index, rhs.spatial_id);
+ })) {
+ return false;
+ }
+
+ int max_rtp_stream_idx = 0;
+ for (const auto& spatial_layer : allocation.active_spatial_layers) {
+ if (spatial_layer.rtp_stream_index < 0 ||
+ spatial_layer.rtp_stream_index >= 4) {
+ return false;
+ }
+ if (spatial_layer.spatial_id < 0 || spatial_layer.spatial_id >= 4) {
+ return false;
+ }
+ if (spatial_layer.target_bitrate_per_temporal_layer.empty() ||
+ spatial_layer.target_bitrate_per_temporal_layer.size() > 4) {
+ return false;
+ }
+ if (max_rtp_stream_idx < spatial_layer.rtp_stream_index) {
+ max_rtp_stream_idx = spatial_layer.rtp_stream_index;
+ }
+ if (allocation.resolution_and_frame_rate_is_valid) {
+ // TODO(danilchap): Add check width and height are no more than 0x10000
+ // when width and height become larger type and thus would support maximum
+ // resolution.
+ if (spatial_layer.width <= 0) {
+ return false;
+ }
+ if (spatial_layer.height <= 0) {
+ return false;
+ }
+ if (spatial_layer.frame_rate_fps > 255) {
+ return false;
+ }
+ }
+ }
+ if (allocation.rtp_stream_index < 0 ||
+ (!allocation.active_spatial_layers.empty() &&
+ allocation.rtp_stream_index > max_rtp_stream_idx)) {
+ return false;
+ }
+ return true;
+}
+
+struct SpatialLayersBitmasks {
+ int max_rtp_stream_id = 0;
+ uint8_t spatial_layer_bitmask[kMaxNumRtpStreams] = {};
+ bool bitmasks_are_the_same = true;
+};
+
+SpatialLayersBitmasks SpatialLayersBitmasksPerRtpStream(
+ const VideoLayersAllocation& allocation) {
+ RTC_DCHECK(AllocationIsValid(allocation));
+ SpatialLayersBitmasks result;
+ for (const auto& layer : allocation.active_spatial_layers) {
+ result.spatial_layer_bitmask[layer.rtp_stream_index] |=
+ (1u << layer.spatial_id);
+ if (result.max_rtp_stream_id < layer.rtp_stream_index) {
+ result.max_rtp_stream_id = layer.rtp_stream_index;
+ }
+ }
+ for (int i = 1; i <= result.max_rtp_stream_id; ++i) {
+ if (result.spatial_layer_bitmask[i] != result.spatial_layer_bitmask[0]) {
+ result.bitmasks_are_the_same = false;
+ break;
+ }
+ }
+ return result;
+}
+
+} // namespace
+
+// +-+-+-+-+-+-+-+-+
+// |RID| NS| sl_bm |
+// +-+-+-+-+-+-+-+-+
+// Spatial layer bitmask |sl0_bm |sl1_bm |
+// up to 2 bytes |---------------|
+// when sl_bm == 0 |sl2_bm |sl3_bm |
+// +-+-+-+-+-+-+-+-+
+// Number of temporal |#tl|#tl|#tl|#tl|
+// layers per spatial layer :---------------:
+// up to 4 bytes | ... |
+// +-+-+-+-+-+-+-+-+
+// Target bitrate in kpbs | |
+// per temporal layer : ... :
+// leb128 encoded | |
+// +-+-+-+-+-+-+-+-+
+// Resolution and framerate | |
+// 5 bytes per spatial layer + width-1 for +
+// (optional) | rid=0, sid=0 |
+// +---------------+
+// | |
+// + height-1 for +
+// | rid=0, sid=0 |
+// +---------------+
+// | max framerate |
+// +-+-+-+-+-+-+-+-+
+// : ... :
+// +-+-+-+-+-+-+-+-+
+//
+// RID: RTP stream index this allocation is sent on, numbered from 0. 2 bits.
+// NS: Number of RTP streams - 1. 2 bits, thus allowing up-to 4 RTP streams.
+// sl_bm: BitMask of the active Spatial Layers when same for all RTP streams or
+// 0 otherwise. 4 bits thus allows up to 4 spatial layers per RTP streams.
+// slX_bm: BitMask of the active Spatial Layers for RTP stream with index=X.
+// byte-aligned. When NS < 2, takes ones byte, otherwise uses two bytes.
+// #tl: 2-bit value of number of temporal layers-1, thus allowing up-to 4
+// temporal layer per spatial layer. One per spatial layer per RTP stream.
+// values are stored in (RTP stream id, spatial id) ascending order.
+// zero-padded to byte alignment.
+// Target bitrate in kbps. Values are stored using leb128 encoding.
+// one value per temporal layer. values are stored in
+// (RTP stream id, spatial id, temporal id) ascending order.
+// All bitrates are total required bitrate to receive the corresponding
+// layer, i.e. in simulcast mode they include only corresponding spatial
+// layer, in full-svc all lower spatial layers are included. All lower
+// temporal layers are also included.
+// Resolution and framerate.
+// Optional. Presense is infered from the rtp header extension size.
+// Encoded (width - 1), 16-bit, (height - 1), 16-bit, max frame rate 8-bit
+// per spatial layer per RTP stream.
+// Values are stored in (RTP stream id, spatial id) ascending order.
+//
+// An empty layer allocation (i.e nothing sent on ssrc) is encoded as
+// special case with a single 0 byte.
+
+bool RtpVideoLayersAllocationExtension::Write(
+ rtc::ArrayView<uint8_t> data,
+ const VideoLayersAllocation& allocation) {
+ RTC_DCHECK(AllocationIsValid(allocation));
+ RTC_DCHECK_GE(data.size(), ValueSize(allocation));
+
+ if (allocation.active_spatial_layers.empty()) {
+ data[0] = 0;
+ return true;
+ }
+
+ SpatialLayersBitmasks slb = SpatialLayersBitmasksPerRtpStream(allocation);
+ uint8_t* write_at = data.data();
+ // First half of the header byte.
+ *write_at = (allocation.rtp_stream_index << 6);
+ // number of rtp stream - 1 is the same as the maximum rtp_stream_id.
+ *write_at |= slb.max_rtp_stream_id << 4;
+ if (slb.bitmasks_are_the_same) {
+ // Second half of the header byte.
+ *write_at |= slb.spatial_layer_bitmask[0];
+ } else {
+ // spatial layer bitmasks when they are different for different RTP streams.
+ *++write_at =
+ (slb.spatial_layer_bitmask[0] << 4) | slb.spatial_layer_bitmask[1];
+ if (slb.max_rtp_stream_id >= 2) {
+ *++write_at =
+ (slb.spatial_layer_bitmask[2] << 4) | slb.spatial_layer_bitmask[3];
+ }
+ }
+ ++write_at;
+
+ { // Number of temporal layers.
+ int bit_offset = 8;
+ *write_at = 0;
+ for (const auto& layer : allocation.active_spatial_layers) {
+ if (bit_offset == 0) {
+ bit_offset = 6;
+ *++write_at = 0;
+ } else {
+ bit_offset -= 2;
+ }
+ *write_at |=
+ ((layer.target_bitrate_per_temporal_layer.size() - 1) << bit_offset);
+ }
+ ++write_at;
+ }
+
+ // Target bitrates.
+ for (const auto& spatial_layer : allocation.active_spatial_layers) {
+ for (const DataRate& bitrate :
+ spatial_layer.target_bitrate_per_temporal_layer) {
+ write_at += WriteLeb128(bitrate.kbps(), write_at);
+ }
+ }
+
+ if (allocation.resolution_and_frame_rate_is_valid) {
+ for (const auto& spatial_layer : allocation.active_spatial_layers) {
+ ByteWriter<uint16_t>::WriteBigEndian(write_at, spatial_layer.width - 1);
+ write_at += 2;
+ ByteWriter<uint16_t>::WriteBigEndian(write_at, spatial_layer.height - 1);
+ write_at += 2;
+ *write_at = spatial_layer.frame_rate_fps;
+ ++write_at;
+ }
+ }
+ RTC_DCHECK_EQ(write_at - data.data(), ValueSize(allocation));
+ return true;
+}
+
+bool RtpVideoLayersAllocationExtension::Parse(
+ rtc::ArrayView<const uint8_t> data,
+ VideoLayersAllocation* allocation) {
+ if (data.empty() || allocation == nullptr) {
+ return false;
+ }
+
+ allocation->active_spatial_layers.clear();
+
+ const uint8_t* read_at = data.data();
+ const uint8_t* const end = data.data() + data.size();
+
+ if (data.size() == 1 && *read_at == 0) {
+ allocation->rtp_stream_index = 0;
+ allocation->resolution_and_frame_rate_is_valid = true;
+ return AllocationIsValid(*allocation);
+ }
+
+ // Header byte.
+ allocation->rtp_stream_index = *read_at >> 6;
+ int num_rtp_streams = 1 + ((*read_at >> 4) & 0b11);
+ uint8_t spatial_layers_bitmasks[kMaxNumRtpStreams];
+ spatial_layers_bitmasks[0] = *read_at & 0b1111;
+
+ if (spatial_layers_bitmasks[0] != 0) {
+ for (int i = 1; i < num_rtp_streams; ++i) {
+ spatial_layers_bitmasks[i] = spatial_layers_bitmasks[0];
+ }
+ } else {
+ // Spatial layer bitmasks when they are different for different RTP streams.
+ if (++read_at == end) {
+ return false;
+ }
+ spatial_layers_bitmasks[0] = *read_at >> 4;
+ spatial_layers_bitmasks[1] = *read_at & 0b1111;
+ if (num_rtp_streams > 2) {
+ if (++read_at == end) {
+ return false;
+ }
+ spatial_layers_bitmasks[2] = *read_at >> 4;
+ spatial_layers_bitmasks[3] = *read_at & 0b1111;
+ }
+ }
+ if (++read_at == end) {
+ return false;
+ }
+
+ // Read number of temporal layers,
+ // Create `allocation->active_spatial_layers` while iterating though it.
+ int bit_offset = 8;
+ for (int stream_idx = 0; stream_idx < num_rtp_streams; ++stream_idx) {
+ for (int sid = 0; sid < VideoLayersAllocation::kMaxSpatialIds; ++sid) {
+ if ((spatial_layers_bitmasks[stream_idx] & (1 << sid)) == 0) {
+ continue;
+ }
+
+ if (bit_offset == 0) {
+ bit_offset = 6;
+ if (++read_at == end) {
+ return false;
+ }
+ } else {
+ bit_offset -= 2;
+ }
+ int num_temporal_layers = 1 + ((*read_at >> bit_offset) & 0b11);
+ allocation->active_spatial_layers.emplace_back();
+ auto& layer = allocation->active_spatial_layers.back();
+ layer.rtp_stream_index = stream_idx;
+ layer.spatial_id = sid;
+ layer.target_bitrate_per_temporal_layer.resize(num_temporal_layers,
+ DataRate::Zero());
+ }
+ }
+ if (++read_at == end) {
+ return false;
+ }
+
+ // Target bitrates.
+ for (auto& layer : allocation->active_spatial_layers) {
+ for (DataRate& rate : layer.target_bitrate_per_temporal_layer) {
+ uint64_t bitrate_kbps = ReadLeb128(read_at, end);
+ // bitrate_kbps might represent larger values than DataRate type,
+ // discard unreasonably large values.
+ if (read_at == nullptr || bitrate_kbps > 1'000'000) {
+ return false;
+ }
+ rate = DataRate::KilobitsPerSec(bitrate_kbps);
+ }
+ }
+
+ if (read_at == end) {
+ allocation->resolution_and_frame_rate_is_valid = false;
+ return AllocationIsValid(*allocation);
+ }
+
+ if (read_at + 5 * allocation->active_spatial_layers.size() != end) {
+ // data is left, but it size is not what can be used for resolutions and
+ // framerates.
+ return false;
+ }
+ allocation->resolution_and_frame_rate_is_valid = true;
+ for (auto& layer : allocation->active_spatial_layers) {
+ layer.width = 1 + ByteReader<uint16_t, 2>::ReadBigEndian(read_at);
+ read_at += 2;
+ layer.height = 1 + ByteReader<uint16_t, 2>::ReadBigEndian(read_at);
+ read_at += 2;
+ layer.frame_rate_fps = *read_at;
+ ++read_at;
+ }
+
+ return AllocationIsValid(*allocation);
+}
+
+size_t RtpVideoLayersAllocationExtension::ValueSize(
+ const VideoLayersAllocation& allocation) {
+ if (allocation.active_spatial_layers.empty()) {
+ return 1;
+ }
+ size_t result = 1; // header
+ SpatialLayersBitmasks slb = SpatialLayersBitmasksPerRtpStream(allocation);
+ if (!slb.bitmasks_are_the_same) {
+ ++result;
+ if (slb.max_rtp_stream_id >= 2) {
+ ++result;
+ }
+ }
+ // 2 bits per active spatial layer, rounded up to full byte, i.e.
+ // 0.25 byte per active spatial layer.
+ result += (allocation.active_spatial_layers.size() + 3) / 4;
+ for (const auto& spatial_layer : allocation.active_spatial_layers) {
+ for (DataRate value : spatial_layer.target_bitrate_per_temporal_layer) {
+ result += Leb128Size(value.kbps());
+ }
+ }
+ if (allocation.resolution_and_frame_rate_is_valid) {
+ result += 5 * allocation.active_spatial_layers.size();
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h
new file mode 100644
index 0000000000..3f1603bcd8
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_
+
+#include "absl/strings/string_view.h"
+#include "api/rtp_parameters.h"
+#include "api/video/video_layers_allocation.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+// TODO(bugs.webrtc.org/12000): Note that this extensions is being developed and
+// the wire format will likely change.
+class RtpVideoLayersAllocationExtension {
+ public:
+ using value_type = VideoLayersAllocation;
+ static constexpr RTPExtensionType kId = kRtpExtensionVideoLayersAllocation;
+ static constexpr absl::string_view Uri() {
+ return RtpExtension::kVideoLayersAllocationUri;
+ }
+
+ static bool Parse(rtc::ArrayView<const uint8_t> data,
+ VideoLayersAllocation* allocation);
+ static size_t ValueSize(const VideoLayersAllocation& allocation);
+ static bool Write(rtc::ArrayView<uint8_t> data,
+ const VideoLayersAllocation& allocation);
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc
new file mode 100644
index 0000000000..db077409ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
+
+#include "api/video/video_layers_allocation.h"
+#include "rtc_base/bit_buffer.h"
+#include "rtc_base/buffer.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace {
+
+TEST(RtpVideoLayersAllocationExtension, WriteEmptyLayersAllocationReturnsTrue) {
+ VideoLayersAllocation written_allocation;
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParseLayersAllocationWithZeroSpatialLayers) {
+ // We require the resolution_and_frame_rate_is_valid to be set to true in
+ // order to send an "empty" allocation.
+ VideoLayersAllocation written_allocation;
+ written_allocation.resolution_and_frame_rate_is_valid = true;
+ written_allocation.rtp_stream_index = 0;
+
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParse2SpatialWith2TemporalLayers) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 1;
+ written_allocation.active_spatial_layers = {
+ {
+ /*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/
+ {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0,
+ },
+ {
+ /*rtp_stream_index*/ 1,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/
+ {DataRate::KilobitsPerSec(100), DataRate::KilobitsPerSec(200)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0,
+ },
+ };
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParseAllocationWithDifferentNumerOfSpatialLayers) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 1;
+ written_allocation.active_spatial_layers = {
+ {/*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(50)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ {/*rtp_stream_index*/ 1,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(100)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ {/*rtp_stream_index*/ 1,
+ /*spatial_id*/ 1,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(200)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ };
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParseAllocationWithSkippedLowerSpatialLayer) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 1;
+ written_allocation.active_spatial_layers = {
+ {/*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(50)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ {/*rtp_stream_index*/ 1,
+ /*spatial_id*/ 1,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(200)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ };
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParseAllocationWithSkippedRtpStreamIds) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 2;
+ written_allocation.active_spatial_layers = {
+ {/*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(50)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ {/*rtp_stream_index*/ 2,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(200)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0},
+ };
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParseAllocationWithDifferentNumerOfTemporalLayers) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 1;
+ written_allocation.active_spatial_layers = {
+ {
+ /*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/
+ {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0,
+ },
+ {
+ /*rtp_stream_index*/ 1,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(100)},
+ /*width*/ 0,
+ /*height*/ 0,
+ /*frame_rate_fps*/ 0,
+ },
+ };
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ CanWriteAndParseAllocationWithResolution) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 1;
+ written_allocation.resolution_and_frame_rate_is_valid = true;
+ written_allocation.active_spatial_layers = {
+ {
+ /*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/
+ {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
+ /*width*/ 320,
+ /*height*/ 240,
+ /*frame_rate_fps*/ 8,
+ },
+ {
+ /*rtp_stream_index*/ 1,
+ /*spatial_id*/ 1,
+ /*target_bitrate_per_temporal_layer*/
+ {DataRate::KilobitsPerSec(100), DataRate::KilobitsPerSec(200)},
+ /*width*/ 640,
+ /*height*/ 320,
+ /*frame_rate_fps*/ 30,
+ },
+ };
+
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+ VideoLayersAllocation parsed_allocation;
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
+ EXPECT_EQ(written_allocation, parsed_allocation);
+}
+
+TEST(RtpVideoLayersAllocationExtension,
+ WriteEmptyAllocationCanHaveAnyRtpStreamIndex) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 1;
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ EXPECT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+}
+
+TEST(RtpVideoLayersAllocationExtension, DiscardsOverLargeDataRate) {
+ constexpr uint8_t buffer[] = {0x4b, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xcb, 0x78, 0xeb, 0x8d, 0xb5, 0x31};
+ VideoLayersAllocation allocation;
+ EXPECT_FALSE(RtpVideoLayersAllocationExtension::Parse(buffer, &allocation));
+}
+
+TEST(RtpVideoLayersAllocationExtension, DiscardsInvalidHeight) {
+ VideoLayersAllocation written_allocation;
+ written_allocation.rtp_stream_index = 0;
+ written_allocation.resolution_and_frame_rate_is_valid = true;
+ written_allocation.active_spatial_layers = {
+ {
+ /*rtp_stream_index*/ 0,
+ /*spatial_id*/ 0,
+ /*target_bitrate_per_temporal_layer*/
+ {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
+ /*width*/ 320,
+ /*height*/ 240,
+ /*frame_rate_fps*/ 8,
+ },
+ };
+ rtc::Buffer buffer(
+ RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
+ ASSERT_TRUE(
+ RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
+
+ // Modify the height to be invalid.
+ buffer[buffer.size() - 3] = 0xff;
+ buffer[buffer.size() - 2] = 0xff;
+ VideoLayersAllocation allocation;
+ EXPECT_FALSE(RtpVideoLayersAllocationExtension::Parse(buffer, &allocation));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.cc
new file mode 100644
index 0000000000..5a15eb98a2
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/source_tracker.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace webrtc {
+
+constexpr int64_t SourceTracker::kTimeoutMs;
+
+SourceTracker::SourceTracker(Clock* clock) : clock_(clock) {}
+
+void SourceTracker::OnFrameDelivered(const RtpPacketInfos& packet_infos) {
+ if (packet_infos.empty()) {
+ return;
+ }
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ MutexLock lock_scope(&lock_);
+
+ for (const auto& packet_info : packet_infos) {
+ for (uint32_t csrc : packet_info.csrcs()) {
+ SourceKey key(RtpSourceType::CSRC, csrc);
+ SourceEntry& entry = UpdateEntry(key);
+ const auto packet_time = packet_info.receive_time_ms();
+ entry.timestamp_ms = packet_time ? packet_time : now_ms;
+ entry.audio_level = packet_info.audio_level();
+ entry.absolute_capture_time = packet_info.absolute_capture_time();
+ entry.rtp_timestamp = packet_info.rtp_timestamp();
+ }
+
+ SourceKey key(RtpSourceType::SSRC, packet_info.ssrc());
+ SourceEntry& entry = UpdateEntry(key);
+
+ entry.timestamp_ms = now_ms;
+ entry.audio_level = packet_info.audio_level();
+ entry.absolute_capture_time = packet_info.absolute_capture_time();
+ entry.rtp_timestamp = packet_info.rtp_timestamp();
+ }
+
+ PruneEntries(now_ms);
+}
+
+std::vector<RtpSource> SourceTracker::GetSources() const {
+ std::vector<RtpSource> sources;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ MutexLock lock_scope(&lock_);
+
+ PruneEntries(now_ms);
+
+ for (const auto& pair : list_) {
+ const SourceKey& key = pair.first;
+ const SourceEntry& entry = pair.second;
+
+ sources.emplace_back(
+ entry.timestamp_ms, key.source, key.source_type, entry.rtp_timestamp,
+ RtpSource::Extensions{entry.audio_level, entry.absolute_capture_time});
+
+ }
+
+ std::sort(sources.begin(), sources.end(), [](const auto &a, const auto &b){
+ return a.timestamp_ms() > b.timestamp_ms();
+ });
+
+ return sources;
+}
+
+SourceTracker::SourceEntry& SourceTracker::UpdateEntry(const SourceKey& key) {
+ // We intentionally do |find() + emplace()|, instead of checking the return
+ // value of `emplace()`, for performance reasons. It's much more likely for
+ // the key to already exist than for it not to.
+ auto map_it = map_.find(key);
+ if (map_it == map_.end()) {
+ // Insert a new entry at the front of the list.
+ list_.emplace_front(key, SourceEntry());
+ map_.emplace(key, list_.begin());
+ } else if (map_it->second != list_.begin()) {
+ // Move the old entry to the front of the list.
+ list_.splice(list_.begin(), list_, map_it->second);
+ }
+
+ return list_.front().second;
+}
+
+void SourceTracker::PruneEntries(int64_t now_ms) const {
+ int64_t prune_ms = now_ms - kTimeoutMs;
+
+ while (!list_.empty() && list_.back().second.timestamp_ms < prune_ms) {
+ map_.erase(list_.back().first);
+ list_.pop_back();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.h b/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.h
new file mode 100644
index 0000000000..3f3ef8cf73
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_SOURCE_TRACKER_H_
+#define MODULES_RTP_RTCP_SOURCE_SOURCE_TRACKER_H_
+
+#include <cstdint>
+#include <list>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/rtp_packet_infos.h"
+#include "api/transport/rtp/rtp_source.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+//
+// Tracker for `RTCRtpContributingSource` and `RTCRtpSynchronizationSource`:
+// - https://w3c.github.io/webrtc-pc/#dom-rtcrtpcontributingsource
+// - https://w3c.github.io/webrtc-pc/#dom-rtcrtpsynchronizationsource
+//
+class SourceTracker {
+ public:
+ // Amount of time before the entry associated with an update is removed. See:
+ // https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
+ static constexpr int64_t kTimeoutMs = 10000; // 10 seconds
+
+ explicit SourceTracker(Clock* clock);
+
+ SourceTracker(const SourceTracker& other) = delete;
+ SourceTracker(SourceTracker&& other) = delete;
+ SourceTracker& operator=(const SourceTracker& other) = delete;
+ SourceTracker& operator=(SourceTracker&& other) = delete;
+
+ // Updates the source entries when a frame is delivered to the
+ // RTCRtpReceiver's MediaStreamTrack.
+ void OnFrameDelivered(const RtpPacketInfos& packet_infos);
+
+ // Returns an `RtpSource` for each unique SSRC and CSRC identifier updated in
+ // the last `kTimeoutMs` milliseconds. Entries appear in reverse chronological
+ // order (i.e. with the most recently updated entries appearing first).
+ std::vector<RtpSource> GetSources() const;
+
+ private:
+ struct SourceKey {
+ SourceKey(RtpSourceType source_type, uint32_t source)
+ : source_type(source_type), source(source) {}
+
+ // Type of `source`.
+ RtpSourceType source_type;
+
+ // CSRC or SSRC identifier of the contributing or synchronization source.
+ uint32_t source;
+ };
+
+ struct SourceKeyComparator {
+ bool operator()(const SourceKey& lhs, const SourceKey& rhs) const {
+ return (lhs.source_type == rhs.source_type) && (lhs.source == rhs.source);
+ }
+ };
+
+ struct SourceKeyHasher {
+ size_t operator()(const SourceKey& value) const {
+ return static_cast<size_t>(value.source_type) +
+ static_cast<size_t>(value.source) * 11076425802534262905ULL;
+ }
+ };
+
+ struct SourceEntry {
+ // Timestamp indicating the most recent time a frame from an RTP packet,
+ // originating from this source, was delivered to the RTCRtpReceiver's
+ // MediaStreamTrack. Its reference clock is the outer class's `clock_`.
+ int64_t timestamp_ms;
+
+ // Audio level from an RFC 6464 or RFC 6465 header extension received with
+ // the most recent packet used to assemble the frame associated with
+ // `timestamp_ms`. May be absent. Only relevant for audio receivers. See the
+ // specs for `RTCRtpContributingSource` for more info.
+ absl::optional<uint8_t> audio_level;
+
+ // Absolute capture time header extension received or interpolated from the
+ // most recent packet used to assemble the frame. For more info see
+ // https://webrtc.org/experiments/rtp-hdrext/abs-capture-time/
+ absl::optional<AbsoluteCaptureTime> absolute_capture_time;
+
+ // RTP timestamp of the most recent packet used to assemble the frame
+ // associated with `timestamp_ms`.
+ uint32_t rtp_timestamp;
+ };
+
+ using SourceList = std::list<std::pair<const SourceKey, SourceEntry>>;
+ using SourceMap = std::unordered_map<SourceKey,
+ SourceList::iterator,
+ SourceKeyHasher,
+ SourceKeyComparator>;
+
+ // Updates an entry by creating it (if it didn't previously exist) and moving
+ // it to the front of the list. Returns a reference to the entry.
+ SourceEntry& UpdateEntry(const SourceKey& key)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // Removes entries that have timed out. Marked as "const" so that we can do
+ // pruning in getters.
+ void PruneEntries(int64_t now_ms) const RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ Clock* const clock_;
+ mutable Mutex lock_;
+
+ // Entries are stored in reverse chronological order (i.e. with the most
+ // recently updated entries appearing first). Mutability is needed for timeout
+ // pruning in const functions.
+ mutable SourceList list_ RTC_GUARDED_BY(lock_);
+ mutable SourceMap map_ RTC_GUARDED_BY(lock_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_SOURCE_TRACKER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker_unittest.cc
new file mode 100644
index 0000000000..b64f03c469
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/source_tracker_unittest.cc
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/source_tracker.h"
+
+#include <algorithm>
+#include <list>
+#include <random>
+#include <set>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Combine;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::TestWithParam;
+using ::testing::Values;
+
+constexpr size_t kPacketInfosCountMax = 5;
+
+// Simple "guaranteed to be correct" re-implementation of `SourceTracker` for
+// dual-implementation testing purposes.
+class ExpectedSourceTracker {
+ public:
+ explicit ExpectedSourceTracker(Clock* clock) : clock_(clock) {}
+
+ void OnFrameDelivered(const RtpPacketInfos& packet_infos) {
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+
+ for (const auto& packet_info : packet_infos) {
+ RtpSource::Extensions extensions = {packet_info.audio_level(),
+ packet_info.absolute_capture_time()};
+
+ for (const auto& csrc : packet_info.csrcs()) {
+ entries_.emplace_front(now_ms, csrc, RtpSourceType::CSRC,
+ packet_info.rtp_timestamp(), extensions);
+ }
+
+ entries_.emplace_front(now_ms, packet_info.ssrc(), RtpSourceType::SSRC,
+ packet_info.rtp_timestamp(), extensions);
+ }
+
+ PruneEntries(now_ms);
+ }
+
+ std::vector<RtpSource> GetSources() const {
+ PruneEntries(clock_->TimeInMilliseconds());
+
+ return std::vector<RtpSource>(entries_.begin(), entries_.end());
+ }
+
+ private:
+ void PruneEntries(int64_t now_ms) const {
+ const int64_t prune_ms = now_ms - 10000; // 10 seconds
+
+ std::set<std::pair<RtpSourceType, uint32_t>> seen;
+
+ auto it = entries_.begin();
+ auto end = entries_.end();
+ while (it != end) {
+ auto next = it;
+ ++next;
+
+ auto key = std::make_pair(it->source_type(), it->source_id());
+ if (!seen.insert(key).second || it->timestamp_ms() < prune_ms) {
+ entries_.erase(it);
+ }
+
+ it = next;
+ }
+ }
+
+ Clock* const clock_;
+
+ mutable std::list<RtpSource> entries_;
+};
+
+class SourceTrackerRandomTest
+ : public TestWithParam<std::tuple<uint32_t, uint32_t>> {
+ protected:
+ SourceTrackerRandomTest()
+ : ssrcs_count_(std::get<0>(GetParam())),
+ csrcs_count_(std::get<1>(GetParam())),
+ generator_(42) {}
+
+ RtpPacketInfos GeneratePacketInfos() {
+ size_t count = std::uniform_int_distribution<size_t>(
+ 1, kPacketInfosCountMax)(generator_);
+
+ RtpPacketInfos::vector_type packet_infos;
+ for (size_t i = 0; i < count; ++i) {
+ packet_infos.emplace_back(GenerateSsrc(), GenerateCsrcs(),
+ GenerateRtpTimestamp(), GenerateAudioLevel(),
+ GenerateAbsoluteCaptureTime(),
+ GenerateReceiveTime());
+ }
+
+ return RtpPacketInfos(std::move(packet_infos));
+ }
+
+ int64_t GenerateClockAdvanceTimeMilliseconds() {
+ double roll = std::uniform_real_distribution<double>(0.0, 1.0)(generator_);
+
+ if (roll < 0.05) {
+ return 0;
+ }
+
+ if (roll < 0.08) {
+ return SourceTracker::kTimeoutMs - 1;
+ }
+
+ if (roll < 0.11) {
+ return SourceTracker::kTimeoutMs;
+ }
+
+ if (roll < 0.19) {
+ return std::uniform_int_distribution<int64_t>(
+ SourceTracker::kTimeoutMs,
+ SourceTracker::kTimeoutMs * 1000)(generator_);
+ }
+
+ return std::uniform_int_distribution<int64_t>(
+ 1, SourceTracker::kTimeoutMs - 1)(generator_);
+ }
+
+ private:
+ uint32_t GenerateSsrc() {
+ return std::uniform_int_distribution<uint32_t>(1, ssrcs_count_)(generator_);
+ }
+
+ std::vector<uint32_t> GenerateCsrcs() {
+ std::vector<uint32_t> csrcs;
+ for (size_t i = 1; i <= csrcs_count_ && csrcs.size() < kRtpCsrcSize; ++i) {
+ if (std::bernoulli_distribution(0.5)(generator_)) {
+ csrcs.push_back(i);
+ }
+ }
+
+ return csrcs;
+ }
+
+ uint32_t GenerateRtpTimestamp() {
+ return std::uniform_int_distribution<uint32_t>()(generator_);
+ }
+
+ absl::optional<uint8_t> GenerateAudioLevel() {
+ if (std::bernoulli_distribution(0.25)(generator_)) {
+ return absl::nullopt;
+ }
+
+ // Workaround for std::uniform_int_distribution<uint8_t> not being allowed.
+ return static_cast<uint8_t>(
+ std::uniform_int_distribution<uint16_t>()(generator_));
+ }
+
+ absl::optional<AbsoluteCaptureTime> GenerateAbsoluteCaptureTime() {
+ if (std::bernoulli_distribution(0.25)(generator_)) {
+ return absl::nullopt;
+ }
+
+ AbsoluteCaptureTime value;
+
+ value.absolute_capture_timestamp =
+ std::uniform_int_distribution<uint64_t>()(generator_);
+
+ if (std::bernoulli_distribution(0.5)(generator_)) {
+ value.estimated_capture_clock_offset = absl::nullopt;
+ } else {
+ value.estimated_capture_clock_offset =
+ std::uniform_int_distribution<int64_t>()(generator_);
+ }
+
+ return value;
+ }
+
+ Timestamp GenerateReceiveTime() {
+ return Timestamp::Micros(
+ std::uniform_int_distribution<int64_t>()(generator_));
+ }
+
+ const uint32_t ssrcs_count_;
+ const uint32_t csrcs_count_;
+
+ std::mt19937 generator_;
+};
+
+} // namespace
+
+TEST_P(SourceTrackerRandomTest, RandomOperations) {
+ constexpr size_t kIterationsCount = 200;
+
+ SimulatedClock clock(1000000000000ULL);
+ SourceTracker actual_tracker(&clock);
+ ExpectedSourceTracker expected_tracker(&clock);
+
+ ASSERT_THAT(actual_tracker.GetSources(), IsEmpty());
+ ASSERT_THAT(expected_tracker.GetSources(), IsEmpty());
+
+ for (size_t i = 0; i < kIterationsCount; ++i) {
+ RtpPacketInfos packet_infos = GeneratePacketInfos();
+
+ actual_tracker.OnFrameDelivered(packet_infos);
+ expected_tracker.OnFrameDelivered(packet_infos);
+
+ clock.AdvanceTimeMilliseconds(GenerateClockAdvanceTimeMilliseconds());
+
+ ASSERT_THAT(actual_tracker.GetSources(),
+ ElementsAreArray(expected_tracker.GetSources()));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+ SourceTrackerRandomTest,
+ Combine(/*ssrcs_count_=*/Values(1, 2, 4),
+ /*csrcs_count_=*/Values(0, 1, 3, 7)));
+
+TEST(SourceTrackerTest, StartEmpty) {
+ SimulatedClock clock(1000000000000ULL);
+ SourceTracker tracker(&clock);
+
+ EXPECT_THAT(tracker.GetSources(), IsEmpty());
+}
+
+TEST(SourceTrackerTest, OnFrameDeliveredRecordsSourcesDistinctSsrcs) {
+ constexpr uint32_t kSsrc1 = 10;
+ constexpr uint32_t kSsrc2 = 11;
+ constexpr uint32_t kCsrcs0 = 20;
+ constexpr uint32_t kCsrcs1 = 21;
+ constexpr uint32_t kCsrcs2 = 22;
+ constexpr uint32_t kRtpTimestamp0 = 40;
+ constexpr uint32_t kRtpTimestamp1 = 50;
+ constexpr absl::optional<uint8_t> kAudioLevel0 = 50;
+ constexpr absl::optional<uint8_t> kAudioLevel1 = 20;
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime =
+ AbsoluteCaptureTime{/*absolute_capture_timestamp=*/12,
+ /*estimated_capture_clock_offset=*/absl::nullopt};
+ constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60);
+ constexpr Timestamp kReceiveTime1 = Timestamp::Millis(70);
+
+ SimulatedClock clock(1000000000000ULL);
+ SourceTracker tracker(&clock);
+
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc1, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0,
+ kAbsoluteCaptureTime, kReceiveTime0),
+ RtpPacketInfo(kSsrc2, {kCsrcs2}, kRtpTimestamp1, kAudioLevel1,
+ kAbsoluteCaptureTime, kReceiveTime1)}));
+
+ int64_t timestamp_ms = clock.TimeInMilliseconds();
+ constexpr RtpSource::Extensions extensions0 = {kAudioLevel0,
+ kAbsoluteCaptureTime};
+ constexpr RtpSource::Extensions extensions1 = {kAudioLevel1,
+ kAbsoluteCaptureTime};
+
+ EXPECT_THAT(tracker.GetSources(),
+ ElementsAre(RtpSource(timestamp_ms, kSsrc2, RtpSourceType::SSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms, kCsrcs2, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms, kSsrc1, RtpSourceType::SSRC,
+ kRtpTimestamp0, extensions0),
+ RtpSource(timestamp_ms, kCsrcs1, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0),
+ RtpSource(timestamp_ms, kCsrcs0, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0)));
+}
+
+TEST(SourceTrackerTest, OnFrameDeliveredRecordsSourcesSameSsrc) {
+ constexpr uint32_t kSsrc = 10;
+ constexpr uint32_t kCsrcs0 = 20;
+ constexpr uint32_t kCsrcs1 = 21;
+ constexpr uint32_t kCsrcs2 = 22;
+ constexpr uint32_t kRtpTimestamp0 = 40;
+ constexpr uint32_t kRtpTimestamp1 = 45;
+ constexpr uint32_t kRtpTimestamp2 = 50;
+ constexpr absl::optional<uint8_t> kAudioLevel0 = 50;
+ constexpr absl::optional<uint8_t> kAudioLevel1 = 20;
+ constexpr absl::optional<uint8_t> kAudioLevel2 = 10;
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime =
+ AbsoluteCaptureTime{/*absolute_capture_timestamp=*/12,
+ /*estimated_capture_clock_offset=*/absl::nullopt};
+ constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60);
+ constexpr Timestamp kReceiveTime1 = Timestamp::Millis(70);
+ constexpr Timestamp kReceiveTime2 = Timestamp::Millis(80);
+
+ SimulatedClock clock(1000000000000ULL);
+ SourceTracker tracker(&clock);
+
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0,
+ kAbsoluteCaptureTime, kReceiveTime0),
+ RtpPacketInfo(kSsrc, {kCsrcs2}, kRtpTimestamp1, kAudioLevel1,
+ kAbsoluteCaptureTime, kReceiveTime1),
+ RtpPacketInfo(kSsrc, {kCsrcs0}, kRtpTimestamp2, kAudioLevel2,
+ kAbsoluteCaptureTime, kReceiveTime2)}));
+
+ int64_t timestamp_ms = clock.TimeInMilliseconds();
+ constexpr RtpSource::Extensions extensions0 = {kAudioLevel0,
+ kAbsoluteCaptureTime};
+ constexpr RtpSource::Extensions extensions1 = {kAudioLevel1,
+ kAbsoluteCaptureTime};
+ constexpr RtpSource::Extensions extensions2 = {kAudioLevel2,
+ kAbsoluteCaptureTime};
+
+ EXPECT_THAT(tracker.GetSources(),
+ ElementsAre(RtpSource(timestamp_ms, kSsrc, RtpSourceType::SSRC,
+ kRtpTimestamp2, extensions2),
+ RtpSource(timestamp_ms, kCsrcs0, RtpSourceType::CSRC,
+ kRtpTimestamp2, extensions2),
+ RtpSource(timestamp_ms, kCsrcs2, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms, kCsrcs1, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0)));
+}
+
+TEST(SourceTrackerTest, OnFrameDeliveredUpdatesSources) {
+ constexpr uint32_t kSsrc1 = 10;
+ constexpr uint32_t kSsrc2 = 11;
+ constexpr uint32_t kCsrcs0 = 20;
+ constexpr uint32_t kCsrcs1 = 21;
+ constexpr uint32_t kCsrcs2 = 22;
+ constexpr uint32_t kRtpTimestamp0 = 40;
+ constexpr uint32_t kRtpTimestamp1 = 41;
+ constexpr uint32_t kRtpTimestamp2 = 42;
+ constexpr absl::optional<uint8_t> kAudioLevel0 = 50;
+ constexpr absl::optional<uint8_t> kAudioLevel1 = absl::nullopt;
+ constexpr absl::optional<uint8_t> kAudioLevel2 = 10;
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime0 =
+ AbsoluteCaptureTime{12, 34};
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime1 =
+ AbsoluteCaptureTime{56, 78};
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime2 =
+ AbsoluteCaptureTime{89, 90};
+ constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60);
+ constexpr Timestamp kReceiveTime1 = Timestamp::Millis(61);
+ constexpr Timestamp kReceiveTime2 = Timestamp::Millis(62);
+
+ constexpr RtpSource::Extensions extensions0 = {kAudioLevel0,
+ kAbsoluteCaptureTime0};
+ constexpr RtpSource::Extensions extensions1 = {kAudioLevel1,
+ kAbsoluteCaptureTime1};
+ constexpr RtpSource::Extensions extensions2 = {kAudioLevel2,
+ kAbsoluteCaptureTime2};
+
+ SimulatedClock clock(1000000000000ULL);
+ SourceTracker tracker(&clock);
+
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc1, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0,
+ kAbsoluteCaptureTime0, kReceiveTime0)}));
+
+ int64_t timestamp_ms_0 = clock.TimeInMilliseconds();
+ EXPECT_THAT(
+ tracker.GetSources(),
+ ElementsAre(RtpSource(timestamp_ms_0, kSsrc1, RtpSourceType::SSRC,
+ kRtpTimestamp0, extensions0),
+ RtpSource(timestamp_ms_0, kCsrcs1, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0),
+ RtpSource(timestamp_ms_0, kCsrcs0, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0)));
+
+ // Deliver packets with updated sources.
+
+ clock.AdvanceTimeMilliseconds(17);
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc1, {kCsrcs0, kCsrcs2}, kRtpTimestamp1, kAudioLevel1,
+ kAbsoluteCaptureTime1, kReceiveTime1)}));
+
+ int64_t timestamp_ms_1 = clock.TimeInMilliseconds();
+
+ EXPECT_THAT(
+ tracker.GetSources(),
+ ElementsAre(RtpSource(timestamp_ms_1, kSsrc1, RtpSourceType::SSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_1, kCsrcs2, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_1, kCsrcs0, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_0, kCsrcs1, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0)));
+
+ // Deliver more packets with update csrcs and a new ssrc.
+ clock.AdvanceTimeMilliseconds(17);
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc2, {kCsrcs0}, kRtpTimestamp2, kAudioLevel2,
+ kAbsoluteCaptureTime2, kReceiveTime2)}));
+
+ int64_t timestamp_ms_2 = clock.TimeInMilliseconds();
+
+ EXPECT_THAT(
+ tracker.GetSources(),
+ ElementsAre(RtpSource(timestamp_ms_2, kSsrc2, RtpSourceType::SSRC,
+ kRtpTimestamp2, extensions2),
+ RtpSource(timestamp_ms_2, kCsrcs0, RtpSourceType::CSRC,
+ kRtpTimestamp2, extensions2),
+ RtpSource(timestamp_ms_1, kSsrc1, RtpSourceType::SSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_1, kCsrcs2, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_0, kCsrcs1, RtpSourceType::CSRC,
+ kRtpTimestamp0, extensions0)));
+}
+
+TEST(SourceTrackerTest, TimedOutSourcesAreRemoved) {
+ constexpr uint32_t kSsrc = 10;
+ constexpr uint32_t kCsrcs0 = 20;
+ constexpr uint32_t kCsrcs1 = 21;
+ constexpr uint32_t kCsrcs2 = 22;
+ constexpr uint32_t kRtpTimestamp0 = 40;
+ constexpr uint32_t kRtpTimestamp1 = 41;
+ constexpr absl::optional<uint8_t> kAudioLevel0 = 50;
+ constexpr absl::optional<uint8_t> kAudioLevel1 = absl::nullopt;
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime0 =
+ AbsoluteCaptureTime{12, 34};
+ constexpr absl::optional<AbsoluteCaptureTime> kAbsoluteCaptureTime1 =
+ AbsoluteCaptureTime{56, 78};
+ constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60);
+ constexpr Timestamp kReceiveTime1 = Timestamp::Millis(61);
+
+ SimulatedClock clock(1000000000000ULL);
+ SourceTracker tracker(&clock);
+
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0,
+ kAbsoluteCaptureTime0, kReceiveTime0)}));
+
+ clock.AdvanceTimeMilliseconds(17);
+
+ tracker.OnFrameDelivered(RtpPacketInfos(
+ {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs2}, kRtpTimestamp1, kAudioLevel1,
+ kAbsoluteCaptureTime1, kReceiveTime1)}));
+
+ int64_t timestamp_ms_1 = clock.TimeInMilliseconds();
+
+ clock.AdvanceTimeMilliseconds(SourceTracker::kTimeoutMs);
+
+ constexpr RtpSource::Extensions extensions1 = {kAudioLevel1,
+ kAbsoluteCaptureTime1};
+
+ EXPECT_THAT(
+ tracker.GetSources(),
+ ElementsAre(RtpSource(timestamp_ms_1, kSsrc, RtpSourceType::SSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_1, kCsrcs2, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1),
+ RtpSource(timestamp_ms_1, kCsrcs0, RtpSourceType::CSRC,
+ kRtpTimestamp1, extensions1)));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.cc
new file mode 100644
index 0000000000..44ca07dabe
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/time_util.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/divide_round.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+uint32_t SaturatedToCompactNtp(TimeDelta delta) {
+ constexpr uint32_t kMaxCompactNtp = 0xFFFFFFFF;
+ constexpr int kCompactNtpInSecond = 0x10000;
+ if (delta <= TimeDelta::Zero())
+ return 0;
+ if (delta.us() >=
+ kMaxCompactNtp * rtc::kNumMicrosecsPerSec / kCompactNtpInSecond)
+ return kMaxCompactNtp;
+ // To convert to compact ntp need to divide by 1e6 to get seconds,
+ // then multiply by 0x10000 to get the final result.
+ // To avoid float operations, multiplication and division swapped.
+ return DivideRoundToNearest(delta.us() * kCompactNtpInSecond,
+ rtc::kNumMicrosecsPerSec);
+}
+
+TimeDelta CompactNtpRttToTimeDelta(uint32_t compact_ntp_interval) {
+ static constexpr TimeDelta kMinRtt = TimeDelta::Millis(1);
+ // Interval to convert expected to be positive, e.g. RTT or delay.
+ // Because interval can be derived from non-monotonic ntp clock,
+ // it might become negative that is indistinguishable from very large values.
+ // Since very large RTT/delay is less likely than non-monotonic ntp clock,
+ // such value is considered negative and converted to minimum value of 1ms.
+ if (compact_ntp_interval > 0x80000000)
+ return kMinRtt;
+ // Convert to 64bit value to avoid multiplication overflow.
+ int64_t value = static_cast<int64_t>(compact_ntp_interval);
+ // To convert to TimeDelta need to divide by 2^16 to get seconds,
+ // then multiply by 1'000'000 to get microseconds. To avoid float operations,
+ // multiplication and division are swapped.
+ int64_t us = DivideRoundToNearest(value * rtc::kNumMicrosecsPerSec, 1 << 16);
+ // Small RTT value is considered too good to be true and increased to 1ms.
+ return std::max(TimeDelta::Micros(us), kMinRtt);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.h b/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.h
new file mode 100644
index 0000000000..9ff444b12e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/time_util.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_TIME_UTIL_H_
+#define MODULES_RTP_RTCP_SOURCE_TIME_UTIL_H_
+
+#include <stdint.h>
+
+#include "api/units/time_delta.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+// Helper function for compact ntp representation:
+// RFC 3550, Section 4. Time Format.
+// Wallclock time is represented using the timestamp format of
+// the Network Time Protocol (NTP).
+// ...
+// In some fields where a more compact representation is
+// appropriate, only the middle 32 bits are used; that is, the low 16
+// bits of the integer part and the high 16 bits of the fractional part.
+inline uint32_t CompactNtp(NtpTime ntp) {
+ return (ntp.seconds() << 16) | (ntp.fractions() >> 16);
+}
+
+// Converts interval to compact ntp (1/2^16 seconds) resolution.
+// Negative values converted to 0, Overlarge values converted to max uint32_t.
+uint32_t SaturatedToCompactNtp(TimeDelta delta);
+
+// Convert interval to the NTP time resolution (1/2^32 seconds ~= 0.2 ns).
+// For deltas with absolute value larger than 35 minutes result is unspecified.
+inline constexpr int64_t ToNtpUnits(TimeDelta delta) {
+ // For better precision `delta` is taken with best TimeDelta precision (us),
+ // then multiplaction and conversion to seconds are swapped to avoid float
+ // arithmetic.
+ // 2^31 us ~= 35.8 minutes.
+ return (rtc::saturated_cast<int32_t>(delta.us()) * (int64_t{1} << 32)) /
+ 1'000'000;
+}
+
+// Converts interval from compact ntp (1/2^16 seconds) resolution to TimeDelta.
+// This interval can be up to ~9.1 hours (2^15 seconds).
+// Values close to 2^16 seconds are considered negative and are converted to
+// minimum value of 1ms.
+TimeDelta CompactNtpRttToTimeDelta(uint32_t compact_ntp_interval);
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_TIME_UTIL_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/time_util_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/time_util_unittest.cc
new file mode 100644
index 0000000000..b3d557fd83
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/time_util_unittest.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/rtp_rtcp/source/time_util.h"
+
+#include <cstdint>
+#include <limits>
+
+#include "api/units/time_delta.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(TimeUtilTest, CompactNtp) {
+ const uint32_t kNtpSec = 0x12345678;
+ const uint32_t kNtpFrac = 0x23456789;
+ const NtpTime kNtp(kNtpSec, kNtpFrac);
+ const uint32_t kNtpMid = 0x56782345;
+ EXPECT_EQ(kNtpMid, CompactNtp(kNtp));
+}
+
+TEST(TimeUtilTest, CompactNtpRttToTimeDelta) {
+ const NtpTime ntp1(0x12345, 0x23456);
+ const NtpTime ntp2(0x12654, 0x64335);
+ int64_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+
+ EXPECT_NEAR(CompactNtpRttToTimeDelta(ntp_diff).ms(), ms_diff, 1);
+}
+
+TEST(TimeUtilTest, CompactNtpRttToTimeDeltaWithWrap) {
+ const NtpTime ntp1(0x1ffff, 0x23456);
+ const NtpTime ntp2(0x20000, 0x64335);
+ int64_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+
+ // While ntp2 > ntp1, there compact ntp presentation happen to be opposite.
+ // That shouldn't be a problem as long as unsigned arithmetic is used.
+ ASSERT_GT(ntp2.ToMs(), ntp1.ToMs());
+ ASSERT_LT(CompactNtp(ntp2), CompactNtp(ntp1));
+
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+ EXPECT_NEAR(CompactNtpRttToTimeDelta(ntp_diff).ms(), ms_diff, 1);
+}
+
+TEST(TimeUtilTest, CompactNtpRttToTimeDeltaLarge) {
+ const NtpTime ntp1(0x10000, 0x00006);
+ const NtpTime ntp2(0x17fff, 0xffff5);
+ int64_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+ // Ntp difference close to 2^15 seconds should convert correctly too.
+ ASSERT_NEAR(ms_diff, ((1 << 15) - 1) * 1000, 1);
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+ EXPECT_NEAR(CompactNtpRttToTimeDelta(ntp_diff).ms(), ms_diff, 1);
+}
+
+TEST(TimeUtilTest, CompactNtpRttToTimeDeltaNegative) {
+ const NtpTime ntp1(0x20000, 0x23456);
+ const NtpTime ntp2(0x1ffff, 0x64335);
+ int64_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+ ASSERT_GT(0, ms_diff);
+ // Ntp difference close to 2^16 seconds should be treated as negative.
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+ EXPECT_EQ(CompactNtpRttToTimeDelta(ntp_diff), TimeDelta::Millis(1));
+}
+
+TEST(TimeUtilTest, SaturatedToCompactNtp) {
+ // Converts negative to zero.
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Micros(-1)), 0u);
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Zero()), 0u);
+ // Converts values just above and just below max uint32_t.
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Micros(65536000000)), 0xffffffff);
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Micros(65535999985)), 0xffffffff);
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Micros(65535999970)), 0xfffffffe);
+ // Converts half-seconds.
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Millis(500)), 0x8000u);
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Seconds(1)), 0x10000u);
+ EXPECT_EQ(SaturatedToCompactNtp(TimeDelta::Millis(1'500)), 0x18000u);
+ // Convert us -> compact_ntp -> TimeDelta. Compact ntp precision is ~15us.
+ EXPECT_NEAR(
+ CompactNtpRttToTimeDelta(SaturatedToCompactNtp(TimeDelta::Micros(1'516)))
+ .us(),
+ 1'516, 16);
+ EXPECT_NEAR(
+ CompactNtpRttToTimeDelta(SaturatedToCompactNtp(TimeDelta::Millis(15)))
+ .us(),
+ 15'000, 16);
+ EXPECT_NEAR(
+ CompactNtpRttToTimeDelta(SaturatedToCompactNtp(TimeDelta::Micros(5'485)))
+ .us(),
+ 5'485, 16);
+ EXPECT_NEAR(
+ CompactNtpRttToTimeDelta(SaturatedToCompactNtp(TimeDelta::Micros(5'515)))
+ .us(),
+ 5'515, 16);
+}
+
+TEST(TimeUtilTest, ToNtpUnits) {
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Zero()), 0);
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Seconds(1)), int64_t{1} << 32);
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Seconds(-1)), -(int64_t{1} << 32));
+
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Millis(500)), int64_t{1} << 31);
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Millis(-1'500)), -(int64_t{3} << 31));
+
+ // Smallest TimeDelta that can be converted without precision loss.
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Micros(15'625)), int64_t{1} << 26);
+
+ // 1 us ~= 4'294.97 NTP units. ToNtpUnits makes no rounding promises.
+ EXPECT_GE(ToNtpUnits(TimeDelta::Micros(1)), 4'294);
+ EXPECT_LE(ToNtpUnits(TimeDelta::Micros(1)), 4'295);
+
+ // Test near maximum and minimum supported values.
+ static constexpr int64_t k35MinutesInNtpUnits = int64_t{35 * 60} << 32;
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Seconds(35 * 60)), k35MinutesInNtpUnits);
+ EXPECT_EQ(ToNtpUnits(TimeDelta::Seconds(-35 * 60)), -k35MinutesInNtpUnits);
+
+ // The result for too large or too small values is unspecified, but
+ // shouldn't cause integer overflow or other undefined behavior.
+ ToNtpUnits(TimeDelta::Micros(std::numeric_limits<int64_t>::max() - 1));
+ ToNtpUnits(TimeDelta::Micros(std::numeric_limits<int64_t>::min() + 1));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.cc
new file mode 100644
index 0000000000..569ed4d8e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/tmmbr_help.h"
+
+#include <stddef.h>
+
+#include <limits>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+std::vector<rtcp::TmmbItem> TMMBRHelp::FindBoundingSet(
+ std::vector<rtcp::TmmbItem> candidates) {
+ // Filter out candidates with 0 bitrate.
+ for (auto it = candidates.begin(); it != candidates.end();) {
+ if (!it->bitrate_bps())
+ it = candidates.erase(it);
+ else
+ ++it;
+ }
+
+ if (candidates.size() <= 1)
+ return candidates;
+
+ size_t num_candidates = candidates.size();
+
+ // 1. Sort by increasing packet overhead.
+ absl::c_sort(candidates,
+ [](const rtcp::TmmbItem& lhs, const rtcp::TmmbItem& rhs) {
+ return lhs.packet_overhead() < rhs.packet_overhead();
+ });
+
+ // 2. For tuples with same overhead, keep the one with the lowest bitrate.
+ for (auto it = candidates.begin(); it != candidates.end();) {
+ RTC_DCHECK(it->bitrate_bps());
+ auto current_min = it;
+ auto next_it = it + 1;
+ // Use fact candidates are sorted by overhead, so candidates with same
+ // overhead are adjusted.
+ while (next_it != candidates.end() &&
+ next_it->packet_overhead() == current_min->packet_overhead()) {
+ if (next_it->bitrate_bps() < current_min->bitrate_bps()) {
+ current_min->set_bitrate_bps(0);
+ current_min = next_it;
+ } else {
+ next_it->set_bitrate_bps(0);
+ }
+ ++next_it;
+ --num_candidates;
+ }
+ it = next_it;
+ }
+
+ // 3. Select and remove tuple with lowest bitrate.
+ // (If more than 1, choose the one with highest overhead).
+ auto min_bitrate_it = candidates.end();
+ for (auto it = candidates.begin(); it != candidates.end(); ++it) {
+ if (it->bitrate_bps()) {
+ min_bitrate_it = it;
+ break;
+ }
+ }
+
+ for (auto it = min_bitrate_it; it != candidates.end(); ++it) {
+ if (it->bitrate_bps() &&
+ it->bitrate_bps() <= min_bitrate_it->bitrate_bps()) {
+ // Get min bitrate.
+ min_bitrate_it = it;
+ }
+ }
+
+ std::vector<rtcp::TmmbItem> bounding_set;
+ bounding_set.reserve(num_candidates);
+ std::vector<float> intersection(num_candidates);
+ std::vector<float> max_packet_rate(num_candidates);
+
+ // First member of selected list.
+ bounding_set.push_back(*min_bitrate_it);
+ intersection[0] = 0;
+ // Calculate its maximum packet rate (where its line crosses x-axis).
+ uint16_t packet_overhead = bounding_set.back().packet_overhead();
+ if (packet_overhead == 0) {
+ // Avoid division by zero.
+ max_packet_rate[0] = std::numeric_limits<float>::max();
+ } else {
+ max_packet_rate[0] =
+ bounding_set.back().bitrate_bps() / static_cast<float>(packet_overhead);
+ }
+ // Remove from candidate list.
+ min_bitrate_it->set_bitrate_bps(0);
+ --num_candidates;
+
+ // 4. Discard from candidate list all tuple with lower overhead
+ // (next tuple must be steeper).
+ for (auto it = candidates.begin(); it != candidates.end(); ++it) {
+ if (it->bitrate_bps() &&
+ it->packet_overhead() < bounding_set.front().packet_overhead()) {
+ it->set_bitrate_bps(0);
+ --num_candidates;
+ }
+ }
+
+ bool get_new_candidate = true;
+ rtcp::TmmbItem cur_candidate;
+ while (num_candidates > 0) {
+ if (get_new_candidate) {
+ // 5. Remove first remaining tuple from candidate list.
+ for (auto it = candidates.begin(); it != candidates.end(); ++it) {
+ if (it->bitrate_bps()) {
+ cur_candidate = *it;
+ it->set_bitrate_bps(0);
+ break;
+ }
+ }
+ }
+
+ // 6. Calculate packet rate and intersection of the current
+ // line with line of last tuple in selected list.
+ RTC_DCHECK_NE(cur_candidate.packet_overhead(),
+ bounding_set.back().packet_overhead());
+ float packet_rate = static_cast<float>(cur_candidate.bitrate_bps() -
+ bounding_set.back().bitrate_bps()) /
+ (cur_candidate.packet_overhead() -
+ bounding_set.back().packet_overhead());
+
+ // 7. If the packet rate is equal or lower than intersection of
+ // last tuple in selected list,
+ // remove last tuple in selected list & go back to step 6.
+ if (packet_rate <= intersection[bounding_set.size() - 1]) {
+ // Remove last tuple and goto step 6.
+ bounding_set.pop_back();
+ get_new_candidate = false;
+ } else {
+ // 8. If packet rate is lower than maximum packet rate of
+ // last tuple in selected list, add current tuple to selected
+ // list.
+ if (packet_rate < max_packet_rate[bounding_set.size() - 1]) {
+ bounding_set.push_back(cur_candidate);
+ intersection[bounding_set.size() - 1] = packet_rate;
+ uint16_t packet_overhead = bounding_set.back().packet_overhead();
+ RTC_DCHECK_NE(packet_overhead, 0);
+ max_packet_rate[bounding_set.size() - 1] =
+ bounding_set.back().bitrate_bps() /
+ static_cast<float>(packet_overhead);
+ }
+ --num_candidates;
+ get_new_candidate = true;
+ }
+
+ // 9. Go back to step 5 if any tuple remains in candidate list.
+ }
+ RTC_DCHECK(!bounding_set.empty());
+ return bounding_set;
+}
+
+bool TMMBRHelp::IsOwner(const std::vector<rtcp::TmmbItem>& bounding,
+ uint32_t ssrc) {
+ for (const rtcp::TmmbItem& item : bounding) {
+ if (item.ssrc() == ssrc) {
+ return true;
+ }
+ }
+ return false;
+}
+
+uint64_t TMMBRHelp::CalcMinBitrateBps(
+ const std::vector<rtcp::TmmbItem>& candidates) {
+ RTC_DCHECK(!candidates.empty());
+ uint64_t min_bitrate_bps = std::numeric_limits<uint64_t>::max();
+ for (const rtcp::TmmbItem& item : candidates)
+ if (item.bitrate_bps() < min_bitrate_bps)
+ min_bitrate_bps = item.bitrate_bps();
+ return min_bitrate_bps;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.h b/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.h
new file mode 100644
index 0000000000..8c26b22eb7
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/tmmbr_help.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_TMMBR_HELP_H_
+#define MODULES_RTP_RTCP_SOURCE_TMMBR_HELP_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h"
+
+namespace webrtc {
+
+class TMMBRHelp {
+ public:
+ static std::vector<rtcp::TmmbItem> FindBoundingSet(
+ std::vector<rtcp::TmmbItem> candidates);
+
+ static bool IsOwner(const std::vector<rtcp::TmmbItem>& bounding,
+ uint32_t ssrc);
+
+ static uint64_t CalcMinBitrateBps(
+ const std::vector<rtcp::TmmbItem>& candidates);
+};
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_TMMBR_HELP_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.cc
new file mode 100644
index 0000000000..20402fc4d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/ulpfec_generator.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr size_t kRedForFecHeaderLength = 1;
+
+// This controls the maximum amount of excess overhead (actual - target)
+// allowed in order to trigger EncodeFec(), before `params_.max_fec_frames`
+// is reached. Overhead here is defined as relative to number of media packets.
+constexpr int kMaxExcessOverhead = 50; // Q8.
+
+// This is the minimum number of media packets required (above some protection
+// level) in order to trigger EncodeFec(), before `params_.max_fec_frames` is
+// reached.
+constexpr size_t kMinMediaPackets = 4;
+
+// Threshold on the received FEC protection level, above which we enforce at
+// least `kMinMediaPackets` packets for the FEC code. Below this
+// threshold `kMinMediaPackets` is set to default value of 1.
+//
+// The range is between 0 and 255, where 255 corresponds to 100% overhead
+// (relative to the number of protected media packets).
+constexpr uint8_t kHighProtectionThreshold = 80;
+
+// This threshold is used to adapt the `kMinMediaPackets` threshold, based
+// on the average number of packets per frame seen so far. When there are few
+// packets per frame (as given by this threshold), at least
+// `kMinMediaPackets` + 1 packets are sent to the FEC code.
+constexpr float kMinMediaPacketsAdaptationThreshold = 2.0f;
+
+// At construction time, we don't know the SSRC that is used for the generated
+// FEC packets, but we still need to give it to the ForwardErrorCorrection ctor
+// to be used in the decoding.
+// TODO(brandtr): Get rid of this awkwardness by splitting
+// ForwardErrorCorrection in two objects -- one encoder and one decoder.
+constexpr uint32_t kUnknownSsrc = 0;
+
+} // namespace
+
+UlpfecGenerator::Params::Params() = default;
+UlpfecGenerator::Params::Params(FecProtectionParams delta_params,
+ FecProtectionParams keyframe_params)
+ : delta_params(delta_params), keyframe_params(keyframe_params) {}
+
+UlpfecGenerator::UlpfecGenerator(int red_payload_type,
+ int ulpfec_payload_type,
+ Clock* clock)
+ : red_payload_type_(red_payload_type),
+ ulpfec_payload_type_(ulpfec_payload_type),
+ clock_(clock),
+ fec_(ForwardErrorCorrection::CreateUlpfec(kUnknownSsrc)),
+ num_protected_frames_(0),
+ min_num_media_packets_(1),
+ media_contains_keyframe_(false),
+ fec_bitrate_(/*max_window_size_ms=*/1000, RateStatistics::kBpsScale) {}
+
+// Used by FlexFecSender, payload types are unused.
+UlpfecGenerator::UlpfecGenerator(std::unique_ptr<ForwardErrorCorrection> fec,
+ Clock* clock)
+ : red_payload_type_(0),
+ ulpfec_payload_type_(0),
+ clock_(clock),
+ fec_(std::move(fec)),
+ num_protected_frames_(0),
+ min_num_media_packets_(1),
+ media_contains_keyframe_(false),
+ fec_bitrate_(/*max_window_size_ms=*/1000, RateStatistics::kBpsScale) {}
+
+UlpfecGenerator::~UlpfecGenerator() = default;
+
+void UlpfecGenerator::SetProtectionParameters(
+ const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) {
+ RTC_DCHECK_GE(delta_params.fec_rate, 0);
+ RTC_DCHECK_LE(delta_params.fec_rate, 255);
+ RTC_DCHECK_GE(key_params.fec_rate, 0);
+ RTC_DCHECK_LE(key_params.fec_rate, 255);
+ // Store the new params and apply them for the next set of FEC packets being
+ // produced.
+ MutexLock lock(&mutex_);
+ pending_params_.emplace(delta_params, key_params);
+}
+
+void UlpfecGenerator::AddPacketAndGenerateFec(const RtpPacketToSend& packet) {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ RTC_DCHECK(generated_fec_packets_.empty());
+
+ {
+ MutexLock lock(&mutex_);
+ if (pending_params_) {
+ current_params_ = *pending_params_;
+ pending_params_.reset();
+
+ if (CurrentParams().fec_rate > kHighProtectionThreshold) {
+ min_num_media_packets_ = kMinMediaPackets;
+ } else {
+ min_num_media_packets_ = 1;
+ }
+ }
+ }
+
+ if (packet.is_key_frame()) {
+ media_contains_keyframe_ = true;
+ }
+ const bool complete_frame = packet.Marker();
+ if (media_packets_.size() < kUlpfecMaxMediaPackets) {
+ // Our packet masks can only protect up to `kUlpfecMaxMediaPackets` packets.
+ auto fec_packet = std::make_unique<ForwardErrorCorrection::Packet>();
+ fec_packet->data = packet.Buffer();
+ media_packets_.push_back(std::move(fec_packet));
+
+ // Keep a copy of the last RTP packet, so we can copy the RTP header
+ // from it when creating newly generated ULPFEC+RED packets.
+ RTC_DCHECK_GE(packet.headers_size(), kRtpHeaderSize);
+ last_media_packet_ = packet;
+ }
+
+ if (complete_frame) {
+ ++num_protected_frames_;
+ }
+
+ auto params = CurrentParams();
+
+ // Produce FEC over at most `params_.max_fec_frames` frames, or as soon as:
+ // (1) the excess overhead (actual overhead - requested/target overhead) is
+ // less than `kMaxExcessOverhead`, and
+ // (2) at least `min_num_media_packets_` media packets is reached.
+ if (complete_frame &&
+ (num_protected_frames_ >= params.max_fec_frames ||
+ (ExcessOverheadBelowMax() && MinimumMediaPacketsReached()))) {
+ // We are not using Unequal Protection feature of the parity erasure code.
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ fec_->EncodeFec(media_packets_, params.fec_rate, kNumImportantPackets,
+ kUseUnequalProtection, params.fec_mask_type,
+ &generated_fec_packets_);
+ if (generated_fec_packets_.empty()) {
+ ResetState();
+ }
+ }
+}
+
+bool UlpfecGenerator::ExcessOverheadBelowMax() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+
+ return ((Overhead() - CurrentParams().fec_rate) < kMaxExcessOverhead);
+}
+
+bool UlpfecGenerator::MinimumMediaPacketsReached() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ float average_num_packets_per_frame =
+ static_cast<float>(media_packets_.size()) / num_protected_frames_;
+ int num_media_packets = static_cast<int>(media_packets_.size());
+ if (average_num_packets_per_frame < kMinMediaPacketsAdaptationThreshold) {
+ return num_media_packets >= min_num_media_packets_;
+ } else {
+ // For larger rates (more packets/frame), increase the threshold.
+ // TODO(brandtr): Investigate what impact this adaptation has.
+ return num_media_packets >= min_num_media_packets_ + 1;
+ }
+}
+
+const FecProtectionParams& UlpfecGenerator::CurrentParams() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ return media_contains_keyframe_ ? current_params_.keyframe_params
+ : current_params_.delta_params;
+}
+
+size_t UlpfecGenerator::MaxPacketOverhead() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ return fec_->MaxPacketOverhead();
+}
+
+std::vector<std::unique_ptr<RtpPacketToSend>> UlpfecGenerator::GetFecPackets() {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ if (generated_fec_packets_.empty()) {
+ return std::vector<std::unique_ptr<RtpPacketToSend>>();
+ }
+
+ // Wrap FEC packet (including FEC headers) in a RED packet. Since the
+ // FEC packets in `generated_fec_packets_` don't have RTP headers, we
+ // reuse the header from the last media packet.
+ RTC_CHECK(last_media_packet_.has_value());
+ last_media_packet_->SetPayloadSize(0);
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets;
+ fec_packets.reserve(generated_fec_packets_.size());
+
+ size_t total_fec_size_bytes = 0;
+ for (const auto* fec_packet : generated_fec_packets_) {
+ std::unique_ptr<RtpPacketToSend> red_packet =
+ std::make_unique<RtpPacketToSend>(*last_media_packet_);
+ red_packet->SetPayloadType(red_payload_type_);
+ red_packet->SetMarker(false);
+ uint8_t* payload_buffer = red_packet->SetPayloadSize(
+ kRedForFecHeaderLength + fec_packet->data.size());
+ // Primary RED header with F bit unset.
+ // See https://tools.ietf.org/html/rfc2198#section-3
+ payload_buffer[0] = ulpfec_payload_type_; // RED header.
+ memcpy(&payload_buffer[1], fec_packet->data.data(),
+ fec_packet->data.size());
+ total_fec_size_bytes += red_packet->size();
+ red_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection);
+ red_packet->set_allow_retransmission(false);
+ red_packet->set_is_red(true);
+ red_packet->set_fec_protect_packet(false);
+ fec_packets.push_back(std::move(red_packet));
+ }
+
+ ResetState();
+
+ MutexLock lock(&mutex_);
+ fec_bitrate_.Update(total_fec_size_bytes, clock_->TimeInMilliseconds());
+
+ return fec_packets;
+}
+
+DataRate UlpfecGenerator::CurrentFecRate() const {
+ MutexLock lock(&mutex_);
+ return DataRate::BitsPerSec(
+ fec_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0));
+}
+
+int UlpfecGenerator::Overhead() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ RTC_DCHECK(!media_packets_.empty());
+ int num_fec_packets =
+ fec_->NumFecPackets(media_packets_.size(), CurrentParams().fec_rate);
+
+ // Return the overhead in Q8.
+ return (num_fec_packets << 8) / media_packets_.size();
+}
+
+void UlpfecGenerator::ResetState() {
+ RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+ media_packets_.clear();
+ last_media_packet_.reset();
+ generated_fec_packets_.clear();
+ num_protected_frames_ = 0;
+ media_contains_keyframe_ = false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.h b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.h
new file mode 100644
index 0000000000..88a8b459e6
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_ULPFEC_GENERATOR_H_
+#define MODULES_RTP_RTCP_SOURCE_ULPFEC_GENERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <vector>
+
+#include "modules/include/module_fec_types.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/video_fec_generator.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class FlexfecSender;
+
+class UlpfecGenerator : public VideoFecGenerator {
+ friend class FlexfecSender;
+
+ public:
+ UlpfecGenerator(int red_payload_type, int ulpfec_payload_type, Clock* clock);
+ ~UlpfecGenerator();
+
+ FecType GetFecType() const override {
+ return VideoFecGenerator::FecType::kUlpFec;
+ }
+ absl::optional<uint32_t> FecSsrc() override { return absl::nullopt; }
+
+ void SetProtectionParameters(const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) override;
+
+ // Adds a media packet to the internal buffer. When enough media packets
+ // have been added, the FEC packets are generated and stored internally.
+ // These FEC packets are then obtained by calling GetFecPacketsAsRed().
+ void AddPacketAndGenerateFec(const RtpPacketToSend& packet) override;
+
+ // Returns the overhead, per packet, for FEC (and possibly RED).
+ size_t MaxPacketOverhead() const override;
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> GetFecPackets() override;
+
+ // Current rate of FEC packets generated, including all RTP-level headers.
+ DataRate CurrentFecRate() const override;
+
+ absl::optional<RtpState> GetRtpState() override { return absl::nullopt; }
+
+ // Currently used protection params.
+ const FecProtectionParams& CurrentParams() const;
+
+ private:
+ struct Params {
+ Params();
+ Params(FecProtectionParams delta_params,
+ FecProtectionParams keyframe_params);
+
+ FecProtectionParams delta_params;
+ FecProtectionParams keyframe_params;
+ };
+
+ UlpfecGenerator(std::unique_ptr<ForwardErrorCorrection> fec, Clock* clock);
+
+ // Overhead is defined as relative to the number of media packets, and not
+ // relative to total number of packets. This definition is inherited from the
+ // protection factor produced by video_coding module and how the FEC
+ // generation is implemented.
+ int Overhead() const;
+
+ // Returns true if the excess overhead (actual - target) for the FEC is below
+ // the amount `kMaxExcessOverhead`. This effects the lower protection level
+ // cases and low number of media packets/frame. The target overhead is given
+ // by `params_.fec_rate`, and is only achievable in the limit of large number
+ // of media packets.
+ bool ExcessOverheadBelowMax() const;
+
+ // Returns true if the number of added media packets is at least
+ // `min_num_media_packets_`. This condition tries to capture the effect
+ // that, for the same amount of protection/overhead, longer codes
+ // (e.g. (2k,2m) vs (k,m)) are generally more effective at recovering losses.
+ bool MinimumMediaPacketsReached() const;
+
+ void ResetState();
+
+ const int red_payload_type_;
+ const int ulpfec_payload_type_;
+ Clock* const clock_;
+
+ rtc::RaceChecker race_checker_;
+ const std::unique_ptr<ForwardErrorCorrection> fec_
+ RTC_GUARDED_BY(race_checker_);
+ ForwardErrorCorrection::PacketList media_packets_
+ RTC_GUARDED_BY(race_checker_);
+ absl::optional<RtpPacketToSend> last_media_packet_
+ RTC_GUARDED_BY(race_checker_);
+ std::list<ForwardErrorCorrection::Packet*> generated_fec_packets_
+ RTC_GUARDED_BY(race_checker_);
+ int num_protected_frames_ RTC_GUARDED_BY(race_checker_);
+ int min_num_media_packets_ RTC_GUARDED_BY(race_checker_);
+ Params current_params_ RTC_GUARDED_BY(race_checker_);
+ bool media_contains_keyframe_ RTC_GUARDED_BY(race_checker_);
+
+ mutable Mutex mutex_;
+ absl::optional<Params> pending_params_ RTC_GUARDED_BY(mutex_);
+ RateStatistics fec_bitrate_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_ULPFEC_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc
new file mode 100644
index 0000000000..18f5685791
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/ulpfec_generator.h"
+
+#include <list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/fec_test_helper.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+using test::fec::AugmentedPacket;
+using test::fec::AugmentedPacketGenerator;
+
+constexpr int kFecPayloadType = 96;
+constexpr int kRedPayloadType = 97;
+constexpr uint32_t kMediaSsrc = 835424;
+} // namespace
+
+void VerifyHeader(uint16_t seq_num,
+ uint32_t timestamp,
+ int red_payload_type,
+ int fec_payload_type,
+ bool marker_bit,
+ const rtc::CopyOnWriteBuffer& data) {
+ // Marker bit not set.
+ EXPECT_EQ(marker_bit ? 0x80 : 0, data[1] & 0x80);
+ EXPECT_EQ(red_payload_type, data[1] & 0x7F);
+ EXPECT_EQ(seq_num, (data[2] << 8) + data[3]);
+ uint32_t parsed_timestamp =
+ (data[4] << 24) + (data[5] << 16) + (data[6] << 8) + data[7];
+ EXPECT_EQ(timestamp, parsed_timestamp);
+ EXPECT_EQ(static_cast<uint8_t>(fec_payload_type), data[kRtpHeaderSize]);
+}
+
+class UlpfecGeneratorTest : public ::testing::Test {
+ protected:
+ UlpfecGeneratorTest()
+ : fake_clock_(1),
+ ulpfec_generator_(kRedPayloadType, kFecPayloadType, &fake_clock_),
+ packet_generator_(kMediaSsrc) {}
+
+ SimulatedClock fake_clock_;
+ UlpfecGenerator ulpfec_generator_;
+ AugmentedPacketGenerator packet_generator_;
+};
+
+// Verifies bug found via fuzzing, where a gap in the packet sequence caused us
+// to move past the end of the current FEC packet mask byte without moving to
+// the next byte. That likely caused us to repeatedly read from the same byte,
+// and if that byte didn't protect packets we would generate empty FEC.
+TEST_F(UlpfecGeneratorTest, NoEmptyFecWithSeqNumGaps) {
+ struct Packet {
+ size_t header_size;
+ size_t payload_size;
+ uint16_t seq_num;
+ bool marker_bit;
+ };
+ std::vector<Packet> protected_packets;
+ protected_packets.push_back({15, 3, 41, 0});
+ protected_packets.push_back({14, 1, 43, 0});
+ protected_packets.push_back({19, 0, 48, 0});
+ protected_packets.push_back({19, 0, 50, 0});
+ protected_packets.push_back({14, 3, 51, 0});
+ protected_packets.push_back({13, 8, 52, 0});
+ protected_packets.push_back({19, 2, 53, 0});
+ protected_packets.push_back({12, 3, 54, 0});
+ protected_packets.push_back({21, 0, 55, 0});
+ protected_packets.push_back({13, 3, 57, 1});
+ FecProtectionParams params = {117, 3, kFecMaskBursty};
+ ulpfec_generator_.SetProtectionParameters(params, params);
+ for (Packet p : protected_packets) {
+ RtpPacketToSend packet(nullptr);
+ packet.SetMarker(p.marker_bit);
+ packet.AllocateExtension(RTPExtensionType::kRtpExtensionMid,
+ p.header_size - packet.headers_size());
+ packet.SetSequenceNumber(p.seq_num);
+ packet.AllocatePayload(p.payload_size);
+ ulpfec_generator_.AddPacketAndGenerateFec(packet);
+
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ ulpfec_generator_.GetFecPackets();
+ if (!p.marker_bit) {
+ EXPECT_TRUE(fec_packets.empty());
+ } else {
+ EXPECT_FALSE(fec_packets.empty());
+ }
+ }
+}
+
+TEST_F(UlpfecGeneratorTest, OneFrameFec) {
+ // The number of media packets (`kNumPackets`), number of frames (one for
+ // this test), and the protection factor (|params->fec_rate|) are set to make
+ // sure the conditions for generating FEC are satisfied. This means:
+ // (1) protection factor is high enough so that actual overhead over 1 frame
+ // of packets is within `kMaxExcessOverhead`, and (2) the total number of
+ // media packets for 1 frame is at least `minimum_media_packets_fec_`.
+ constexpr size_t kNumPackets = 4;
+ FecProtectionParams params = {15, 3, kFecMaskRandom};
+ packet_generator_.NewFrame(kNumPackets);
+ // Expecting one FEC packet.
+ ulpfec_generator_.SetProtectionParameters(params, params);
+ uint32_t last_timestamp = 0;
+ for (size_t i = 0; i < kNumPackets; ++i) {
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator_.NextPacket(i, 10);
+ RtpPacketToSend rtp_packet(nullptr);
+ EXPECT_TRUE(rtp_packet.Parse(packet->data.data(), packet->data.size()));
+ ulpfec_generator_.AddPacketAndGenerateFec(rtp_packet);
+ last_timestamp = packet->header.timestamp;
+ }
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ ulpfec_generator_.GetFecPackets();
+ EXPECT_EQ(fec_packets.size(), 1u);
+ uint16_t seq_num = packet_generator_.NextPacketSeqNum();
+ fec_packets[0]->SetSequenceNumber(seq_num);
+ EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty());
+
+ EXPECT_EQ(fec_packets[0]->headers_size(), kRtpHeaderSize);
+
+ VerifyHeader(seq_num, last_timestamp, kRedPayloadType, kFecPayloadType, false,
+ fec_packets[0]->Buffer());
+}
+
+TEST_F(UlpfecGeneratorTest, TwoFrameFec) {
+ // The number of media packets/frame (`kNumPackets`), the number of frames
+ // (`kNumFrames`), and the protection factor (|params->fec_rate|) are set to
+ // make sure the conditions for generating FEC are satisfied. This means:
+ // (1) protection factor is high enough so that actual overhead over
+ // `kNumFrames` is within `kMaxExcessOverhead`, and (2) the total number of
+ // media packets for `kNumFrames` frames is at least
+ // `minimum_media_packets_fec_`.
+ constexpr size_t kNumPackets = 2;
+ constexpr size_t kNumFrames = 2;
+
+ FecProtectionParams params = {15, 3, kFecMaskRandom};
+ // Expecting one FEC packet.
+ ulpfec_generator_.SetProtectionParameters(params, params);
+ uint32_t last_timestamp = 0;
+ for (size_t i = 0; i < kNumFrames; ++i) {
+ packet_generator_.NewFrame(kNumPackets);
+ for (size_t j = 0; j < kNumPackets; ++j) {
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator_.NextPacket(i * kNumPackets + j, 10);
+ RtpPacketToSend rtp_packet(nullptr);
+ EXPECT_TRUE(rtp_packet.Parse(packet->data.data(), packet->data.size()));
+ ulpfec_generator_.AddPacketAndGenerateFec(rtp_packet);
+ last_timestamp = packet->header.timestamp;
+ }
+ }
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ ulpfec_generator_.GetFecPackets();
+ EXPECT_EQ(fec_packets.size(), 1u);
+ const uint16_t seq_num = packet_generator_.NextPacketSeqNum();
+ fec_packets[0]->SetSequenceNumber(seq_num);
+ VerifyHeader(seq_num, last_timestamp, kRedPayloadType, kFecPayloadType, false,
+ fec_packets[0]->Buffer());
+}
+
+TEST_F(UlpfecGeneratorTest, MixedMediaRtpHeaderLengths) {
+ constexpr size_t kShortRtpHeaderLength = 12;
+ constexpr size_t kLongRtpHeaderLength = 16;
+
+ // Only one frame required to generate FEC.
+ FecProtectionParams params = {127, 1, kFecMaskRandom};
+ ulpfec_generator_.SetProtectionParameters(params, params);
+
+ // Fill up internal buffer with media packets with short RTP header length.
+ packet_generator_.NewFrame(kUlpfecMaxMediaPackets + 1);
+ for (size_t i = 0; i < kUlpfecMaxMediaPackets; ++i) {
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator_.NextPacket(i, 10);
+ RtpPacketToSend rtp_packet(nullptr);
+ EXPECT_TRUE(rtp_packet.Parse(packet->data.data(), packet->data.size()));
+ EXPECT_EQ(rtp_packet.headers_size(), kShortRtpHeaderLength);
+ ulpfec_generator_.AddPacketAndGenerateFec(rtp_packet);
+ EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty());
+ }
+
+ // Kick off FEC generation with media packet with long RTP header length.
+ // Since the internal buffer is full, this packet will not be protected.
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator_.NextPacket(kUlpfecMaxMediaPackets, 10);
+ RtpPacketToSend rtp_packet(nullptr);
+ EXPECT_TRUE(rtp_packet.Parse(packet->data.data(), packet->data.size()));
+ EXPECT_TRUE(rtp_packet.SetPayloadSize(0) != nullptr);
+ const uint32_t csrcs[]{1};
+ rtp_packet.SetCsrcs(csrcs);
+
+ EXPECT_EQ(rtp_packet.headers_size(), kLongRtpHeaderLength);
+
+ ulpfec_generator_.AddPacketAndGenerateFec(rtp_packet);
+ std::vector<std::unique_ptr<RtpPacketToSend>> fec_packets =
+ ulpfec_generator_.GetFecPackets();
+ EXPECT_FALSE(fec_packets.empty());
+
+ // Ensure that the RED header is placed correctly, i.e. the correct
+ // RTP header length was used in the RED packet creation.
+ uint16_t seq_num = packet_generator_.NextPacketSeqNum();
+ for (const auto& fec_packet : fec_packets) {
+ fec_packet->SetSequenceNumber(seq_num++);
+ EXPECT_EQ(kFecPayloadType, fec_packet->data()[kShortRtpHeaderLength]);
+ }
+}
+
+TEST_F(UlpfecGeneratorTest, UpdatesProtectionParameters) {
+ const FecProtectionParams kKeyFrameParams = {25, /*max_fec_frames=*/2,
+ kFecMaskRandom};
+ const FecProtectionParams kDeltaFrameParams = {25, /*max_fec_frames=*/5,
+ kFecMaskRandom};
+
+ ulpfec_generator_.SetProtectionParameters(kDeltaFrameParams, kKeyFrameParams);
+
+ // No params applied yet.
+ EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 0);
+
+ // Helper function to add a single-packet frame market as either key-frame
+ // or delta-frame.
+ auto add_frame = [&](bool is_keyframe) {
+ packet_generator_.NewFrame(1);
+ std::unique_ptr<AugmentedPacket> packet =
+ packet_generator_.NextPacket(0, 10);
+ RtpPacketToSend rtp_packet(nullptr);
+ EXPECT_TRUE(rtp_packet.Parse(packet->data.data(), packet->data.size()));
+ rtp_packet.set_is_key_frame(is_keyframe);
+ ulpfec_generator_.AddPacketAndGenerateFec(rtp_packet);
+ };
+
+ // Add key-frame, keyframe params should apply, no FEC generated yet.
+ add_frame(true);
+ EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 2);
+ EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty());
+
+ // Add delta-frame, generated FEC packet. Params will not be updated until
+ // next added packet though.
+ add_frame(false);
+ EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 2);
+ EXPECT_FALSE(ulpfec_generator_.GetFecPackets().empty());
+
+ // Add delta-frame, now params get updated.
+ add_frame(false);
+ EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 5);
+ EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty());
+
+ // Add yet another delta-frame.
+ add_frame(false);
+ EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 5);
+ EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty());
+
+ // Add key-frame, params immediately switch to key-frame ones. The two
+ // buffered frames plus the key-frame is protected and fec emitted,
+ // even though the frame count is technically over the keyframe frame count
+ // threshold.
+ add_frame(true);
+ EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 2);
+ EXPECT_FALSE(ulpfec_generator_.GetFecPackets().empty());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc
new file mode 100644
index 0000000000..8378a8f19f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/ulpfec_header_reader_writer.h"
+
+#include <string.h>
+
+#include "api/scoped_refptr.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+// Maximum number of media packets that can be protected in one batch.
+constexpr size_t kMaxMediaPackets = 48;
+
+// Maximum number of media packets tracked by FEC decoder.
+// Maintain a sufficiently larger tracking window than `kMaxMediaPackets`
+// to account for packet reordering in pacer/ network.
+constexpr size_t kMaxTrackedMediaPackets = 4 * kMaxMediaPackets;
+
+// Maximum number of FEC packets stored inside ForwardErrorCorrection.
+constexpr size_t kMaxFecPackets = kMaxMediaPackets;
+
+// FEC Level 0 header size in bytes.
+constexpr size_t kFecLevel0HeaderSize = 10;
+
+// FEC Level 1 (ULP) header size in bytes (L bit is set).
+constexpr size_t kFecLevel1HeaderSizeLBitSet = 2 + kUlpfecPacketMaskSizeLBitSet;
+
+// FEC Level 1 (ULP) header size in bytes (L bit is cleared).
+constexpr size_t kFecLevel1HeaderSizeLBitClear =
+ 2 + kUlpfecPacketMaskSizeLBitClear;
+
+constexpr size_t kPacketMaskOffset = kFecLevel0HeaderSize + 2;
+
+size_t UlpfecHeaderSize(size_t packet_mask_size) {
+ RTC_DCHECK_LE(packet_mask_size, kUlpfecPacketMaskSizeLBitSet);
+ if (packet_mask_size <= kUlpfecPacketMaskSizeLBitClear) {
+ return kFecLevel0HeaderSize + kFecLevel1HeaderSizeLBitClear;
+ } else {
+ return kFecLevel0HeaderSize + kFecLevel1HeaderSizeLBitSet;
+ }
+}
+
+} // namespace
+
+UlpfecHeaderReader::UlpfecHeaderReader()
+ : FecHeaderReader(kMaxTrackedMediaPackets, kMaxFecPackets) {}
+
+UlpfecHeaderReader::~UlpfecHeaderReader() = default;
+
+bool UlpfecHeaderReader::ReadFecHeader(
+ ForwardErrorCorrection::ReceivedFecPacket* fec_packet) const {
+ uint8_t* data = fec_packet->pkt->data.MutableData();
+ if (fec_packet->pkt->data.size() < kPacketMaskOffset) {
+ return false; // Truncated packet.
+ }
+ bool l_bit = (data[0] & 0x40) != 0u;
+ size_t packet_mask_size =
+ l_bit ? kUlpfecPacketMaskSizeLBitSet : kUlpfecPacketMaskSizeLBitClear;
+ fec_packet->fec_header_size = UlpfecHeaderSize(packet_mask_size);
+ uint16_t seq_num_base = ByteReader<uint16_t>::ReadBigEndian(&data[2]);
+ fec_packet->protected_ssrc = fec_packet->ssrc; // Due to RED.
+ fec_packet->seq_num_base = seq_num_base;
+ fec_packet->packet_mask_offset = kPacketMaskOffset;
+ fec_packet->packet_mask_size = packet_mask_size;
+ fec_packet->protection_length =
+ ByteReader<uint16_t>::ReadBigEndian(&data[10]);
+
+ // Store length recovery field in temporary location in header.
+ // This makes the header "compatible" with the corresponding
+ // FlexFEC location of the length recovery field, thus simplifying
+ // the XORing operations.
+ memcpy(&data[2], &data[8], 2);
+
+ return true;
+}
+
+UlpfecHeaderWriter::UlpfecHeaderWriter()
+ : FecHeaderWriter(kMaxMediaPackets,
+ kMaxFecPackets,
+ kFecLevel0HeaderSize + kFecLevel1HeaderSizeLBitSet) {}
+
+UlpfecHeaderWriter::~UlpfecHeaderWriter() = default;
+
+// TODO(brandtr): Consider updating this implementation (which actually
+// returns a bound on the sequence number spread), if logic is added to
+// UlpfecHeaderWriter::FinalizeFecHeader to truncate packet masks which end
+// in a string of zeroes. (Similar to how it is done in the FlexFEC case.)
+size_t UlpfecHeaderWriter::MinPacketMaskSize(const uint8_t* packet_mask,
+ size_t packet_mask_size) const {
+ return packet_mask_size;
+}
+
+size_t UlpfecHeaderWriter::FecHeaderSize(size_t packet_mask_size) const {
+ return UlpfecHeaderSize(packet_mask_size);
+}
+
+void UlpfecHeaderWriter::FinalizeFecHeader(
+ uint32_t /* media_ssrc */,
+ uint16_t seq_num_base,
+ const uint8_t* packet_mask,
+ size_t packet_mask_size,
+ ForwardErrorCorrection::Packet* fec_packet) const {
+ uint8_t* data = fec_packet->data.MutableData();
+ // Set E bit to zero.
+ data[0] &= 0x7f;
+ // Set L bit based on packet mask size. (Note that the packet mask
+ // can only take on two discrete values.)
+ bool l_bit = (packet_mask_size == kUlpfecPacketMaskSizeLBitSet);
+ if (l_bit) {
+ data[0] |= 0x40; // Set the L bit.
+ } else {
+ RTC_DCHECK_EQ(packet_mask_size, kUlpfecPacketMaskSizeLBitClear);
+ data[0] &= 0xbf; // Clear the L bit.
+ }
+ // Copy length recovery field from temporary location.
+ memcpy(&data[8], &data[2], 2);
+ // Write sequence number base.
+ ByteWriter<uint16_t>::WriteBigEndian(&data[2], seq_num_base);
+ // Protection length is set to entire packet. (This is not
+ // required in general.)
+ const size_t fec_header_size = FecHeaderSize(packet_mask_size);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ &data[10], fec_packet->data.size() - fec_header_size);
+ // Copy the packet mask.
+ memcpy(&data[12], packet_mask, packet_mask_size);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.h b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.h
new file mode 100644
index 0000000000..a8bb737dbb
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_ULPFEC_HEADER_READER_WRITER_H_
+#define MODULES_RTP_RTCP_SOURCE_ULPFEC_HEADER_READER_WRITER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+
+namespace webrtc {
+
+// FEC Level 0 Header, 10 bytes.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |E|L|P|X| CC |M| PT recovery | SN base |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | TS recovery |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | length recovery |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// FEC Level 1 Header, 4 bytes (L = 0) or 8 bytes (L = 1).
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Protection Length | mask |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | mask cont. (present only when L = 1) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+class UlpfecHeaderReader : public FecHeaderReader {
+ public:
+ UlpfecHeaderReader();
+ ~UlpfecHeaderReader() override;
+
+ bool ReadFecHeader(
+ ForwardErrorCorrection::ReceivedFecPacket* fec_packet) const override;
+};
+
+class UlpfecHeaderWriter : public FecHeaderWriter {
+ public:
+ UlpfecHeaderWriter();
+ ~UlpfecHeaderWriter() override;
+
+ size_t MinPacketMaskSize(const uint8_t* packet_mask,
+ size_t packet_mask_size) const override;
+
+ size_t FecHeaderSize(size_t packet_mask_row_size) const override;
+
+ void FinalizeFecHeader(
+ uint32_t media_ssrc, // Unused by ULPFEC.
+ uint16_t seq_num_base,
+ const uint8_t* packet_mask,
+ size_t packet_mask_size,
+ ForwardErrorCorrection::Packet* fec_packet) const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_ULPFEC_HEADER_READER_WRITER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc
new file mode 100644
index 0000000000..a190a548e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/ulpfec_header_reader_writer.h"
+
+#include <string.h>
+
+#include <memory>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using Packet = ForwardErrorCorrection::Packet;
+using ReceivedFecPacket = ForwardErrorCorrection::ReceivedFecPacket;
+
+constexpr uint32_t kMediaSsrc = 1254983;
+constexpr uint16_t kMediaStartSeqNum = 825;
+constexpr size_t kMediaPacketLength = 1234;
+
+constexpr size_t kUlpfecHeaderSizeLBitClear = 14;
+constexpr size_t kUlpfecHeaderSizeLBitSet = 18;
+constexpr size_t kUlpfecPacketMaskOffset = 12;
+
+std::unique_ptr<uint8_t[]> GeneratePacketMask(size_t packet_mask_size,
+ uint64_t seed) {
+ Random random(seed);
+ std::unique_ptr<uint8_t[]> packet_mask(new uint8_t[packet_mask_size]);
+ for (size_t i = 0; i < packet_mask_size; ++i) {
+ packet_mask[i] = random.Rand<uint8_t>();
+ }
+ return packet_mask;
+}
+
+std::unique_ptr<Packet> WriteHeader(const uint8_t* packet_mask,
+ size_t packet_mask_size) {
+ UlpfecHeaderWriter writer;
+ std::unique_ptr<Packet> written_packet(new Packet());
+ written_packet->data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet->data.MutableData();
+ for (size_t i = 0; i < written_packet->data.size(); ++i) {
+ data[i] = i; // Actual content doesn't matter.
+ }
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, packet_mask,
+ packet_mask_size, written_packet.get());
+ return written_packet;
+}
+
+std::unique_ptr<ReceivedFecPacket> ReadHeader(const Packet& written_packet) {
+ UlpfecHeaderReader reader;
+ std::unique_ptr<ReceivedFecPacket> read_packet(new ReceivedFecPacket());
+ read_packet->ssrc = kMediaSsrc;
+ read_packet->pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet->pkt->data = written_packet.data;
+ EXPECT_TRUE(reader.ReadFecHeader(read_packet.get()));
+ return read_packet;
+}
+
+void VerifyHeaders(size_t expected_fec_header_size,
+ const uint8_t* expected_packet_mask,
+ size_t expected_packet_mask_size,
+ const Packet& written_packet,
+ const ReceivedFecPacket& read_packet) {
+ EXPECT_EQ(kMediaSsrc, read_packet.ssrc);
+ EXPECT_EQ(expected_fec_header_size, read_packet.fec_header_size);
+ EXPECT_EQ(kMediaSsrc, read_packet.protected_ssrc);
+ EXPECT_EQ(kMediaStartSeqNum, read_packet.seq_num_base);
+ EXPECT_EQ(kUlpfecPacketMaskOffset, read_packet.packet_mask_offset);
+ ASSERT_EQ(expected_packet_mask_size, read_packet.packet_mask_size);
+ EXPECT_EQ(written_packet.data.size() - expected_fec_header_size,
+ read_packet.protection_length);
+ EXPECT_EQ(0, memcmp(expected_packet_mask,
+ read_packet.pkt->data.MutableData() +
+ read_packet.packet_mask_offset,
+ read_packet.packet_mask_size));
+ // Verify that the call to ReadFecHeader did not tamper with the payload.
+ EXPECT_EQ(0, memcmp(written_packet.data.data() + expected_fec_header_size,
+ read_packet.pkt->data.cdata() + expected_fec_header_size,
+ written_packet.data.size() - expected_fec_header_size));
+}
+
+} // namespace
+
+TEST(UlpfecHeaderReaderTest, ReadsSmallHeader) {
+ const uint8_t packet[] = {
+ 0x00, 0x12, 0xab, 0xcd, // L bit clear, "random" payload type and SN base
+ 0x12, 0x34, 0x56, 0x78, // "random" TS recovery
+ 0xab, 0xcd, 0x11, 0x22, // "random" length recovery and protection length
+ 0x33, 0x44, // "random" packet mask
+ 0x00, 0x00, 0x00, 0x00 // payload
+ };
+ const size_t packet_length = sizeof(packet);
+ ReceivedFecPacket read_packet;
+ read_packet.pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet.pkt->data.SetData(packet, packet_length);
+
+ UlpfecHeaderReader reader;
+ EXPECT_TRUE(reader.ReadFecHeader(&read_packet));
+
+ EXPECT_EQ(14U, read_packet.fec_header_size);
+ EXPECT_EQ(0xabcdU, read_packet.seq_num_base);
+ EXPECT_EQ(12U, read_packet.packet_mask_offset);
+ EXPECT_EQ(2U, read_packet.packet_mask_size);
+ EXPECT_EQ(0x1122U, read_packet.protection_length);
+}
+
+TEST(UlpfecHeaderReaderTest, ReadsLargeHeader) {
+ const uint8_t packet[] = {
+ 0x40, 0x12, 0xab, 0xcd, // L bit set, "random" payload type and SN base
+ 0x12, 0x34, 0x56, 0x78, // "random" TS recovery
+ 0xab, 0xcd, 0x11, 0x22, // "random" length recovery and protection length
+ 0x33, 0x44, 0x55, 0x66, // "random" packet mask
+ 0x77, 0x88, //
+ 0x00, 0x00, 0x00, 0x00 // payload
+ };
+ const size_t packet_length = sizeof(packet);
+ ReceivedFecPacket read_packet;
+ read_packet.pkt = rtc::scoped_refptr<Packet>(new Packet());
+ read_packet.pkt->data.SetData(packet, packet_length);
+
+ UlpfecHeaderReader reader;
+ EXPECT_TRUE(reader.ReadFecHeader(&read_packet));
+
+ EXPECT_EQ(18U, read_packet.fec_header_size);
+ EXPECT_EQ(0xabcdU, read_packet.seq_num_base);
+ EXPECT_EQ(12U, read_packet.packet_mask_offset);
+ EXPECT_EQ(6U, read_packet.packet_mask_size);
+ EXPECT_EQ(0x1122U, read_packet.protection_length);
+}
+
+TEST(UlpfecHeaderWriterTest, FinalizesSmallHeader) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ Packet written_packet;
+ written_packet.data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet.data.MutableData();
+ for (size_t i = 0; i < written_packet.data.size(); ++i) {
+ data[i] = i;
+ }
+
+ UlpfecHeaderWriter writer;
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, packet_mask.get(),
+ packet_mask_size, &written_packet);
+
+ const uint8_t* packet = written_packet.data.cdata();
+ EXPECT_EQ(0x00, packet[0] & 0x80); // E bit.
+ EXPECT_EQ(0x00, packet[0] & 0x40); // L bit.
+ EXPECT_EQ(kMediaStartSeqNum, ByteReader<uint16_t>::ReadBigEndian(packet + 2));
+ EXPECT_EQ(
+ static_cast<uint16_t>(kMediaPacketLength - kUlpfecHeaderSizeLBitClear),
+ ByteReader<uint16_t>::ReadBigEndian(packet + 10));
+ EXPECT_EQ(0, memcmp(packet + kUlpfecPacketMaskOffset, packet_mask.get(),
+ packet_mask_size));
+}
+
+TEST(UlpfecHeaderWriterTest, FinalizesLargeHeader) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+ Packet written_packet;
+ written_packet.data.SetSize(kMediaPacketLength);
+ uint8_t* data = written_packet.data.MutableData();
+ for (size_t i = 0; i < written_packet.data.size(); ++i) {
+ data[i] = i;
+ }
+
+ UlpfecHeaderWriter writer;
+ writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, packet_mask.get(),
+ packet_mask_size, &written_packet);
+
+ const uint8_t* packet = written_packet.data.cdata();
+ EXPECT_EQ(0x00, packet[0] & 0x80); // E bit.
+ EXPECT_EQ(0x40, packet[0] & 0x40); // L bit.
+ EXPECT_EQ(kMediaStartSeqNum, ByteReader<uint16_t>::ReadBigEndian(packet + 2));
+ EXPECT_EQ(
+ static_cast<uint16_t>(kMediaPacketLength - kUlpfecHeaderSizeLBitSet),
+ ByteReader<uint16_t>::ReadBigEndian(packet + 10));
+ EXPECT_EQ(0, memcmp(packet + kUlpfecPacketMaskOffset, packet_mask.get(),
+ packet_mask_size));
+}
+
+TEST(UlpfecHeaderWriterTest, CalculateSmallHeaderSize) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+
+ UlpfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kUlpfecPacketMaskSizeLBitClear, min_packet_mask_size);
+ EXPECT_EQ(kUlpfecHeaderSizeLBitClear,
+ writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(UlpfecHeaderWriterTest, CalculateLargeHeaderSize) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+
+ UlpfecHeaderWriter writer;
+ size_t min_packet_mask_size =
+ writer.MinPacketMaskSize(packet_mask.get(), packet_mask_size);
+
+ EXPECT_EQ(kUlpfecPacketMaskSizeLBitSet, min_packet_mask_size);
+ EXPECT_EQ(kUlpfecHeaderSizeLBitSet,
+ writer.FecHeaderSize(min_packet_mask_size));
+}
+
+TEST(UlpfecHeaderReaderWriterTest, WriteAndReadSmallHeader) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitClear;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyHeaders(kUlpfecHeaderSizeLBitClear, packet_mask.get(), packet_mask_size,
+ *written_packet, *read_packet);
+}
+
+TEST(UlpfecHeaderReaderWriterTest, WriteAndReadLargeHeader) {
+ const size_t packet_mask_size = kUlpfecPacketMaskSizeLBitSet;
+ auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd);
+
+ auto written_packet = WriteHeader(packet_mask.get(), packet_mask_size);
+ auto read_packet = ReadHeader(*written_packet);
+
+ VerifyHeaders(kUlpfecHeaderSizeLBitSet, packet_mask.get(), packet_mask_size,
+ *written_packet, *read_packet);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.cc
new file mode 100644
index 0000000000..4090d99e8d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.cc
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/ulpfec_receiver.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+UlpfecReceiver::UlpfecReceiver(uint32_t ssrc,
+ int ulpfec_payload_type,
+ RecoveredPacketReceiver* callback,
+ rtc::ArrayView<const RtpExtension> extensions,
+ Clock* clock)
+ : ssrc_(ssrc),
+ ulpfec_payload_type_(ulpfec_payload_type),
+ clock_(clock),
+ extensions_(extensions),
+ recovered_packet_callback_(callback),
+ fec_(ForwardErrorCorrection::CreateUlpfec(ssrc_)) {
+ // TODO(tommi, brandtr): Once considerations for red have been split
+ // away from this implementation, we can require the ulpfec payload type
+ // to always be valid and use uint8 for storage (as is done elsewhere).
+ RTC_DCHECK_GE(ulpfec_payload_type_, -1);
+}
+
+UlpfecReceiver::~UlpfecReceiver() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ if (packet_counter_.first_packet_time != Timestamp::MinusInfinity()) {
+ const Timestamp now = clock_->CurrentTime();
+ TimeDelta elapsed = (now - packet_counter_.first_packet_time);
+ if (elapsed.seconds() >= metrics::kMinRunTimeInSeconds) {
+ if (packet_counter_.num_packets > 0) {
+ RTC_HISTOGRAM_PERCENTAGE(
+ "WebRTC.Video.ReceivedFecPacketsInPercent",
+ static_cast<int>(packet_counter_.num_fec_packets * 100 /
+ packet_counter_.num_packets));
+ }
+ if (packet_counter_.num_fec_packets > 0) {
+ RTC_HISTOGRAM_PERCENTAGE(
+ "WebRTC.Video.RecoveredMediaPacketsInPercentOfFec",
+ static_cast<int>(packet_counter_.num_recovered_packets * 100 /
+ packet_counter_.num_fec_packets));
+ }
+ if (ulpfec_payload_type_ != -1) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.FecBitrateReceivedInKbps",
+ static_cast<int>(packet_counter_.num_bytes * 8 / elapsed.seconds() /
+ 1000));
+ }
+ }
+ }
+
+ received_packets_.clear();
+ fec_->ResetState(&recovered_packets_);
+}
+
+FecPacketCounter UlpfecReceiver::GetPacketCounter() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return packet_counter_;
+}
+
+void UlpfecReceiver::SetRtpExtensions(
+ rtc::ArrayView<const RtpExtension> extensions) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ extensions_.Reset(extensions);
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |F| block PT | timestamp offset | block length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// RFC 2198 RTP Payload for Redundant Audio Data September 1997
+//
+// The bits in the header are specified as follows:
+//
+// F: 1 bit First bit in header indicates whether another header block
+// follows. If 1 further header blocks follow, if 0 this is the
+// last header block.
+// If 0 there is only 1 byte RED header
+//
+// block PT: 7 bits RTP payload type for this block.
+//
+// timestamp offset: 14 bits Unsigned offset of timestamp of this block
+// relative to timestamp given in RTP header. The use of an unsigned
+// offset implies that redundant data must be sent after the primary
+// data, and is hence a time to be subtracted from the current
+// timestamp to determine the timestamp of the data for which this
+// block is the redundancy.
+//
+// block length: 10 bits Length in bytes of the corresponding data
+// block excluding header.
+
+bool UlpfecReceiver::AddReceivedRedPacket(const RtpPacketReceived& rtp_packet) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // TODO(bugs.webrtc.org/11993): We get here via Call::DeliverRtp, so should be
+ // moved to the network thread.
+
+ if (rtp_packet.Ssrc() != ssrc_) {
+ RTC_LOG(LS_WARNING)
+ << "Received RED packet with different SSRC than expected; dropping.";
+ return false;
+ }
+ if (rtp_packet.size() > IP_PACKET_SIZE) {
+ RTC_LOG(LS_WARNING) << "Received RED packet with length exceeds maximum IP "
+ "packet size; dropping.";
+ return false;
+ }
+
+ static constexpr uint8_t kRedHeaderLength = 1;
+
+ if (rtp_packet.payload_size() == 0) {
+ RTC_LOG(LS_WARNING) << "Corrupt/truncated FEC packet.";
+ return false;
+ }
+
+ // Remove RED header of incoming packet and store as a virtual RTP packet.
+ auto received_packet =
+ std::make_unique<ForwardErrorCorrection::ReceivedPacket>();
+ received_packet->pkt = new ForwardErrorCorrection::Packet();
+
+ // Get payload type from RED header and sequence number from RTP header.
+ uint8_t payload_type = rtp_packet.payload()[0] & 0x7f;
+ received_packet->is_fec = payload_type == ulpfec_payload_type_;
+ received_packet->is_recovered = rtp_packet.recovered();
+ received_packet->ssrc = rtp_packet.Ssrc();
+ received_packet->seq_num = rtp_packet.SequenceNumber();
+
+ if (rtp_packet.payload()[0] & 0x80) {
+ // f bit set in RED header, i.e. there are more than one RED header blocks.
+ // WebRTC never generates multiple blocks in a RED packet for FEC.
+ RTC_LOG(LS_WARNING) << "More than 1 block in RED packet is not supported.";
+ return false;
+ }
+
+ ++packet_counter_.num_packets;
+ packet_counter_.num_bytes += rtp_packet.size();
+ if (packet_counter_.first_packet_time == Timestamp::MinusInfinity()) {
+ packet_counter_.first_packet_time = clock_->CurrentTime();
+ }
+
+ if (received_packet->is_fec) {
+ ++packet_counter_.num_fec_packets;
+ // everything behind the RED header
+ received_packet->pkt->data =
+ rtp_packet.Buffer().Slice(rtp_packet.headers_size() + kRedHeaderLength,
+ rtp_packet.payload_size() - kRedHeaderLength);
+ } else {
+ received_packet->pkt->data.EnsureCapacity(rtp_packet.size() -
+ kRedHeaderLength);
+ // Copy RTP header.
+ received_packet->pkt->data.SetData(rtp_packet.data(),
+ rtp_packet.headers_size());
+ // Set payload type.
+ uint8_t& payload_type_byte = received_packet->pkt->data.MutableData()[1];
+ payload_type_byte &= 0x80; // Reset RED payload type.
+ payload_type_byte += payload_type; // Set media payload type.
+ // Copy payload and padding data, after the RED header.
+ received_packet->pkt->data.AppendData(
+ rtp_packet.data() + rtp_packet.headers_size() + kRedHeaderLength,
+ rtp_packet.size() - rtp_packet.headers_size() - kRedHeaderLength);
+ }
+
+ if (received_packet->pkt->data.size() > 0) {
+ received_packets_.push_back(std::move(received_packet));
+ }
+ return true;
+}
+
+void UlpfecReceiver::ProcessReceivedFec() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // If we iterate over `received_packets_` and it contains a packet that cause
+ // us to recurse back to this function (for example a RED packet encapsulating
+ // a RED packet), then we will recurse forever. To avoid this we swap
+ // `received_packets_` with an empty vector so that the next recursive call
+ // wont iterate over the same packet again. This also solves the problem of
+ // not modifying the vector we are currently iterating over (packets are added
+ // in AddReceivedRedPacket).
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>
+ received_packets;
+ received_packets.swap(received_packets_);
+
+ for (const auto& received_packet : received_packets) {
+ // Send received media packet to VCM.
+ if (!received_packet->is_fec) {
+ ForwardErrorCorrection::Packet* packet = received_packet->pkt.get();
+ recovered_packet_callback_->OnRecoveredPacket(packet->data.data(),
+ packet->data.size());
+ // Create a packet with the buffer to modify it.
+ RtpPacketReceived rtp_packet;
+ const uint8_t* const original_data = packet->data.cdata();
+ if (!rtp_packet.Parse(packet->data)) {
+ RTC_LOG(LS_WARNING) << "Corrupted media packet";
+ } else {
+ rtp_packet.IdentifyExtensions(extensions_);
+ // Reset buffer reference, so zeroing would work on a buffer with a
+ // single reference.
+ packet->data = rtc::CopyOnWriteBuffer(0);
+ rtp_packet.ZeroMutableExtensions();
+ packet->data = rtp_packet.Buffer();
+ // Ensure that zeroing of extensions was done in place.
+ RTC_DCHECK_EQ(packet->data.cdata(), original_data);
+ }
+ }
+ if (!received_packet->is_recovered) {
+ // Do not pass recovered packets to FEC. Recovered packet might have
+ // different set of the RTP header extensions and thus different byte
+ // representation than the original packet, That will corrupt
+ // FEC calculation.
+ fec_->DecodeFec(*received_packet, &recovered_packets_);
+ }
+ }
+
+ // Send any recovered media packets to VCM.
+ for (const auto& recovered_packet : recovered_packets_) {
+ if (recovered_packet->returned) {
+ // Already sent to the VCM and the jitter buffer.
+ continue;
+ }
+ ForwardErrorCorrection::Packet* packet = recovered_packet->pkt.get();
+ ++packet_counter_.num_recovered_packets;
+ // Set this flag first; in case the recovered packet carries a RED
+ // header, OnRecoveredPacket will recurse back here.
+ recovered_packet->returned = true;
+ recovered_packet_callback_->OnRecoveredPacket(packet->data.data(),
+ packet->data.size());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.h b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.h
new file mode 100644
index 0000000000..b8ac8d8c30
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_ULPFEC_RECEIVER_H_
+#define MODULES_RTP_RTCP_SOURCE_ULPFEC_RECEIVER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+struct FecPacketCounter {
+ FecPacketCounter() = default;
+ size_t num_packets = 0; // Number of received packets.
+ size_t num_bytes = 0;
+ size_t num_fec_packets = 0; // Number of received FEC packets.
+ size_t num_recovered_packets =
+ 0; // Number of recovered media packets using FEC.
+ // Time when first packet is received.
+ Timestamp first_packet_time = Timestamp::MinusInfinity();
+};
+
+class UlpfecReceiver {
+ public:
+ UlpfecReceiver(uint32_t ssrc,
+ int ulpfec_payload_type,
+ RecoveredPacketReceiver* callback,
+ rtc::ArrayView<const RtpExtension> extensions,
+ Clock* clock);
+ ~UlpfecReceiver();
+
+ int ulpfec_payload_type() const { return ulpfec_payload_type_; }
+
+ bool AddReceivedRedPacket(const RtpPacketReceived& rtp_packet);
+
+ void ProcessReceivedFec();
+
+ FecPacketCounter GetPacketCounter() const;
+
+ void SetRtpExtensions(rtc::ArrayView<const RtpExtension> extensions);
+
+ private:
+ const uint32_t ssrc_;
+ const int ulpfec_payload_type_;
+ Clock* const clock_;
+ RtpHeaderExtensionMap extensions_ RTC_GUARDED_BY(&sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ RecoveredPacketReceiver* const recovered_packet_callback_;
+ const std::unique_ptr<ForwardErrorCorrection> fec_;
+ // TODO(nisse): The AddReceivedRedPacket method adds one or two packets to
+ // this list at a time, after which it is emptied by ProcessReceivedFec. It
+ // will make things simpler to merge AddReceivedRedPacket and
+ // ProcessReceivedFec into a single method, and we can then delete this list.
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>
+ received_packets_ RTC_GUARDED_BY(&sequence_checker_);
+ ForwardErrorCorrection::RecoveredPacketList recovered_packets_
+ RTC_GUARDED_BY(&sequence_checker_);
+ FecPacketCounter packet_counter_ RTC_GUARDED_BY(&sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_ULPFEC_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc
new file mode 100644
index 0000000000..1b0b0daf56
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc
@@ -0,0 +1,545 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/ulpfec_receiver.h"
+
+#include <string.h>
+
+#include <list>
+#include <memory>
+#include <utility>
+
+#include "modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/fec_test_helper.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+using ::testing::_;
+using ::testing::Args;
+using ::testing::ElementsAreArray;
+
+using test::fec::AugmentedPacket;
+using Packet = ForwardErrorCorrection::Packet;
+using test::fec::UlpfecPacketGenerator;
+
+constexpr int kFecPayloadType = 96;
+constexpr uint32_t kMediaSsrc = 835424;
+
+class NullRecoveredPacketReceiver : public RecoveredPacketReceiver {
+ public:
+ void OnRecoveredPacket(const uint8_t* packet, size_t length) override {}
+};
+
+} // namespace
+
+class UlpfecReceiverTest : public ::testing::Test {
+ protected:
+ UlpfecReceiverTest()
+ : fec_(ForwardErrorCorrection::CreateUlpfec(kMediaSsrc)),
+ receiver_fec_(kMediaSsrc,
+ kFecPayloadType,
+ &recovered_packet_receiver_,
+ {},
+ Clock::GetRealTimeClock()),
+ packet_generator_(kMediaSsrc) {}
+
+ // Generates `num_fec_packets` FEC packets, given `media_packets`.
+ void EncodeFec(const ForwardErrorCorrection::PacketList& media_packets,
+ size_t num_fec_packets,
+ std::list<ForwardErrorCorrection::Packet*>* fec_packets);
+
+ // Generates `num_media_packets` corresponding to a single frame.
+ void PacketizeFrame(size_t num_media_packets,
+ size_t frame_offset,
+ std::list<AugmentedPacket*>* augmented_packets,
+ ForwardErrorCorrection::PacketList* packets);
+
+ // Build a media packet using `packet_generator_` and add it
+ // to the receiver.
+ void BuildAndAddRedMediaPacket(AugmentedPacket* packet,
+ bool is_recovered = false);
+
+ // Build a FEC packet using `packet_generator_` and add it
+ // to the receiver.
+ void BuildAndAddRedFecPacket(Packet* packet);
+
+ // Ensure that `recovered_packet_receiver_` will be called correctly
+ // and that the recovered packet will be identical to the lost packet.
+ void VerifyReconstructedMediaPacket(const AugmentedPacket& packet,
+ size_t times);
+
+ void InjectGarbagePacketLength(size_t fec_garbage_offset);
+
+ static void SurvivesMaliciousPacket(const uint8_t* data,
+ size_t length,
+ uint8_t ulpfec_payload_type);
+
+ MockRecoveredPacketReceiver recovered_packet_receiver_;
+ std::unique_ptr<ForwardErrorCorrection> fec_;
+ UlpfecReceiver receiver_fec_;
+ UlpfecPacketGenerator packet_generator_;
+};
+
+void UlpfecReceiverTest::EncodeFec(
+ const ForwardErrorCorrection::PacketList& media_packets,
+ size_t num_fec_packets,
+ std::list<ForwardErrorCorrection::Packet*>* fec_packets) {
+ const uint8_t protection_factor =
+ num_fec_packets * 255 / media_packets.size();
+ // Unequal protection is turned off, and the number of important
+ // packets is thus irrelevant.
+ constexpr int kNumImportantPackets = 0;
+ constexpr bool kUseUnequalProtection = false;
+ constexpr FecMaskType kFecMaskType = kFecMaskBursty;
+ EXPECT_EQ(
+ 0, fec_->EncodeFec(media_packets, protection_factor, kNumImportantPackets,
+ kUseUnequalProtection, kFecMaskType, fec_packets));
+ ASSERT_EQ(num_fec_packets, fec_packets->size());
+}
+
+void UlpfecReceiverTest::PacketizeFrame(
+ size_t num_media_packets,
+ size_t frame_offset,
+ std::list<AugmentedPacket*>* augmented_packets,
+ ForwardErrorCorrection::PacketList* packets) {
+ packet_generator_.NewFrame(num_media_packets);
+ for (size_t i = 0; i < num_media_packets; ++i) {
+ std::unique_ptr<AugmentedPacket> next_packet(
+ packet_generator_.NextPacket(frame_offset + i, kRtpHeaderSize + 10));
+ augmented_packets->push_back(next_packet.get());
+ packets->push_back(std::move(next_packet));
+ }
+}
+
+void UlpfecReceiverTest::BuildAndAddRedMediaPacket(AugmentedPacket* packet,
+ bool is_recovered) {
+ RtpPacketReceived red_packet =
+ packet_generator_.BuildMediaRedPacket(*packet, is_recovered);
+ EXPECT_TRUE(receiver_fec_.AddReceivedRedPacket(red_packet));
+}
+
+void UlpfecReceiverTest::BuildAndAddRedFecPacket(Packet* packet) {
+ RtpPacketReceived red_packet =
+ packet_generator_.BuildUlpfecRedPacket(*packet);
+ EXPECT_TRUE(receiver_fec_.AddReceivedRedPacket(red_packet));
+}
+
+void UlpfecReceiverTest::VerifyReconstructedMediaPacket(
+ const AugmentedPacket& packet,
+ size_t times) {
+ // Verify that the content of the reconstructed packet is equal to the
+ // content of `packet`, and that the same content is received `times` number
+ // of times in a row.
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(_, packet.data.size()))
+ .With(
+ Args<0, 1>(ElementsAreArray(packet.data.cdata(), packet.data.size())))
+ .Times(times);
+}
+
+void UlpfecReceiverTest::InjectGarbagePacketLength(size_t fec_garbage_offset) {
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _));
+
+ const size_t kNumFecPackets = 1;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(2, 0, &augmented_media_packets, &media_packets);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+ ByteWriter<uint16_t>::WriteBigEndian(
+ fec_packets.front()->data.MutableData() + fec_garbage_offset, 0x4711);
+
+ // Inject first media packet, then first FEC packet, skipping the second media
+ // packet to cause a recovery from the FEC packet.
+ BuildAndAddRedMediaPacket(augmented_media_packets.front());
+ BuildAndAddRedFecPacket(fec_packets.front());
+ receiver_fec_.ProcessReceivedFec();
+
+ FecPacketCounter counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(2U, counter.num_packets);
+ EXPECT_EQ(1U, counter.num_fec_packets);
+ EXPECT_EQ(0U, counter.num_recovered_packets);
+}
+
+void UlpfecReceiverTest::SurvivesMaliciousPacket(const uint8_t* data,
+ size_t length,
+ uint8_t ulpfec_payload_type) {
+ NullRecoveredPacketReceiver null_callback;
+ UlpfecReceiver receiver_fec(kMediaSsrc, ulpfec_payload_type, &null_callback,
+ {}, Clock::GetRealTimeClock());
+
+ RtpPacketReceived rtp_packet;
+ ASSERT_TRUE(rtp_packet.Parse(data, length));
+ receiver_fec.AddReceivedRedPacket(rtp_packet);
+}
+
+TEST_F(UlpfecReceiverTest, TwoMediaOneFec) {
+ constexpr size_t kNumFecPackets = 1u;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(2, 0, &augmented_media_packets, &media_packets);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ FecPacketCounter counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(0u, counter.num_packets);
+ EXPECT_EQ(Timestamp::MinusInfinity(), counter.first_packet_time);
+
+ // Recovery
+ auto it = augmented_media_packets.begin();
+ BuildAndAddRedMediaPacket(*it);
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+ counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(1u, counter.num_packets);
+ EXPECT_EQ(0u, counter.num_fec_packets);
+ EXPECT_EQ(0u, counter.num_recovered_packets);
+ const Timestamp first_packet_time = counter.first_packet_time;
+ EXPECT_NE(Timestamp::MinusInfinity(), first_packet_time);
+
+ // Drop one media packet.
+ auto fec_it = fec_packets.begin();
+ BuildAndAddRedFecPacket(*fec_it);
+ ++it;
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+
+ counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(2u, counter.num_packets);
+ EXPECT_EQ(1u, counter.num_fec_packets);
+ EXPECT_EQ(1u, counter.num_recovered_packets);
+ EXPECT_EQ(first_packet_time, counter.first_packet_time);
+}
+
+TEST_F(UlpfecReceiverTest, TwoMediaOneFecNotUsesRecoveredPackets) {
+ constexpr size_t kNumFecPackets = 1u;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(2, 0, &augmented_media_packets, &media_packets);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ FecPacketCounter counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(0u, counter.num_packets);
+ EXPECT_EQ(Timestamp::MinusInfinity(), counter.first_packet_time);
+
+ // Recovery
+ auto it = augmented_media_packets.begin();
+ BuildAndAddRedMediaPacket(*it, /*is_recovered=*/true);
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+ counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(1u, counter.num_packets);
+ EXPECT_EQ(0u, counter.num_fec_packets);
+ EXPECT_EQ(0u, counter.num_recovered_packets);
+ const Timestamp first_packet_time = counter.first_packet_time;
+ EXPECT_NE(Timestamp::MinusInfinity(), first_packet_time);
+
+ // Drop one media packet.
+ auto fec_it = fec_packets.begin();
+ BuildAndAddRedFecPacket(*fec_it);
+ ++it;
+ receiver_fec_.ProcessReceivedFec();
+
+ counter = receiver_fec_.GetPacketCounter();
+ EXPECT_EQ(2u, counter.num_packets);
+ EXPECT_EQ(1u, counter.num_fec_packets);
+ EXPECT_EQ(0u, counter.num_recovered_packets);
+ EXPECT_EQ(first_packet_time, counter.first_packet_time);
+}
+
+TEST_F(UlpfecReceiverTest, InjectGarbageFecHeaderLengthRecovery) {
+ // Byte offset 8 is the 'length recovery' field of the FEC header.
+ InjectGarbagePacketLength(8);
+}
+
+TEST_F(UlpfecReceiverTest, InjectGarbageFecLevelHeaderProtectionLength) {
+ // Byte offset 10 is the 'protection length' field in the first FEC level
+ // header.
+ InjectGarbagePacketLength(10);
+}
+
+TEST_F(UlpfecReceiverTest, TwoMediaTwoFec) {
+ const size_t kNumFecPackets = 2;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(2, 0, &augmented_media_packets, &media_packets);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ // Recovery
+ // Drop both media packets.
+ auto it = augmented_media_packets.begin();
+ auto fec_it = fec_packets.begin();
+ BuildAndAddRedFecPacket(*fec_it);
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+ ++fec_it;
+ BuildAndAddRedFecPacket(*fec_it);
+ ++it;
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, TwoFramesOneFec) {
+ const size_t kNumFecPackets = 1;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(1, 0, &augmented_media_packets, &media_packets);
+ PacketizeFrame(1, 1, &augmented_media_packets, &media_packets);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ // Recovery
+ auto it = augmented_media_packets.begin();
+ BuildAndAddRedMediaPacket(augmented_media_packets.front());
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+ // Drop one media packet.
+ BuildAndAddRedFecPacket(fec_packets.front());
+ ++it;
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, OneCompleteOneUnrecoverableFrame) {
+ const size_t kNumFecPackets = 1;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(1, 0, &augmented_media_packets, &media_packets);
+ PacketizeFrame(2, 1, &augmented_media_packets, &media_packets);
+
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ // Recovery
+ auto it = augmented_media_packets.begin();
+ BuildAndAddRedMediaPacket(*it); // First frame: one packet.
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+ ++it;
+ BuildAndAddRedMediaPacket(*it); // First packet of second frame.
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, MaxFramesOneFec) {
+ const size_t kNumFecPackets = 1;
+ const size_t kNumMediaPackets = 48;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ for (size_t i = 0; i < kNumMediaPackets; ++i) {
+ PacketizeFrame(1, i, &augmented_media_packets, &media_packets);
+ }
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ // Recovery
+ auto it = augmented_media_packets.begin();
+ ++it; // Drop first packet.
+ for (; it != augmented_media_packets.end(); ++it) {
+ BuildAndAddRedMediaPacket(*it);
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+ }
+ BuildAndAddRedFecPacket(fec_packets.front());
+ it = augmented_media_packets.begin();
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, TooManyFrames) {
+ const size_t kNumFecPackets = 1;
+ const size_t kNumMediaPackets = 49;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ for (size_t i = 0; i < kNumMediaPackets; ++i) {
+ PacketizeFrame(1, i, &augmented_media_packets, &media_packets);
+ }
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EXPECT_EQ(-1, fec_->EncodeFec(media_packets,
+ kNumFecPackets * 255 / kNumMediaPackets, 0,
+ false, kFecMaskBursty, &fec_packets));
+}
+
+TEST_F(UlpfecReceiverTest, PacketNotDroppedTooEarly) {
+ // 1 frame with 2 media packets and one FEC packet. One media packet missing.
+ // Delay the FEC packet.
+ Packet* delayed_fec = nullptr;
+ const size_t kNumFecPacketsBatch1 = 1;
+ const size_t kNumMediaPacketsBatch1 = 2;
+ std::list<AugmentedPacket*> augmented_media_packets_batch1;
+ ForwardErrorCorrection::PacketList media_packets_batch1;
+ PacketizeFrame(kNumMediaPacketsBatch1, 0, &augmented_media_packets_batch1,
+ &media_packets_batch1);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets_batch1, kNumFecPacketsBatch1, &fec_packets);
+
+ BuildAndAddRedMediaPacket(augmented_media_packets_batch1.front());
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(1);
+ receiver_fec_.ProcessReceivedFec();
+ delayed_fec = fec_packets.front();
+
+ // Fill the FEC decoder. No packets should be dropped.
+ const size_t kNumMediaPacketsBatch2 = 191;
+ std::list<AugmentedPacket*> augmented_media_packets_batch2;
+ ForwardErrorCorrection::PacketList media_packets_batch2;
+ for (size_t i = 0; i < kNumMediaPacketsBatch2; ++i) {
+ PacketizeFrame(1, i, &augmented_media_packets_batch2,
+ &media_packets_batch2);
+ }
+ for (auto it = augmented_media_packets_batch2.begin();
+ it != augmented_media_packets_batch2.end(); ++it) {
+ BuildAndAddRedMediaPacket(*it);
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(1);
+ receiver_fec_.ProcessReceivedFec();
+ }
+
+ // Add the delayed FEC packet. One packet should be reconstructed.
+ BuildAndAddRedFecPacket(delayed_fec);
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, PacketDroppedWhenTooOld) {
+ // 1 frame with 2 media packets and one FEC packet. One media packet missing.
+ // Delay the FEC packet.
+ Packet* delayed_fec = nullptr;
+ const size_t kNumFecPacketsBatch1 = 1;
+ const size_t kNumMediaPacketsBatch1 = 2;
+ std::list<AugmentedPacket*> augmented_media_packets_batch1;
+ ForwardErrorCorrection::PacketList media_packets_batch1;
+ PacketizeFrame(kNumMediaPacketsBatch1, 0, &augmented_media_packets_batch1,
+ &media_packets_batch1);
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets_batch1, kNumFecPacketsBatch1, &fec_packets);
+
+ BuildAndAddRedMediaPacket(augmented_media_packets_batch1.front());
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(1);
+ receiver_fec_.ProcessReceivedFec();
+ delayed_fec = fec_packets.front();
+
+ // Fill the FEC decoder and force the last packet to be dropped.
+ const size_t kNumMediaPacketsBatch2 = 192;
+ std::list<AugmentedPacket*> augmented_media_packets_batch2;
+ ForwardErrorCorrection::PacketList media_packets_batch2;
+ for (size_t i = 0; i < kNumMediaPacketsBatch2; ++i) {
+ PacketizeFrame(1, i, &augmented_media_packets_batch2,
+ &media_packets_batch2);
+ }
+ for (auto it = augmented_media_packets_batch2.begin();
+ it != augmented_media_packets_batch2.end(); ++it) {
+ BuildAndAddRedMediaPacket(*it);
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(1);
+ receiver_fec_.ProcessReceivedFec();
+ }
+
+ // Add the delayed FEC packet. No packet should be reconstructed since the
+ // first media packet of that frame has been dropped due to being too old.
+ BuildAndAddRedFecPacket(delayed_fec);
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(0);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, OldFecPacketDropped) {
+ // 49 frames with 2 media packets and one FEC packet. All media packets
+ // missing.
+ const size_t kNumMediaPackets = 49 * 2;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ for (size_t i = 0; i < kNumMediaPackets / 2; ++i) {
+ std::list<AugmentedPacket*> frame_augmented_media_packets;
+ ForwardErrorCorrection::PacketList frame_media_packets;
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ PacketizeFrame(2, 0, &frame_augmented_media_packets, &frame_media_packets);
+ EncodeFec(frame_media_packets, 1, &fec_packets);
+ for (auto it = fec_packets.begin(); it != fec_packets.end(); ++it) {
+ // Only FEC packets inserted. No packets recoverable at this time.
+ BuildAndAddRedFecPacket(*it);
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(0);
+ receiver_fec_.ProcessReceivedFec();
+ }
+ // Move unique_ptr's to media_packets for lifetime management.
+ media_packets.insert(media_packets.end(),
+ std::make_move_iterator(frame_media_packets.begin()),
+ std::make_move_iterator(frame_media_packets.end()));
+ augmented_media_packets.insert(augmented_media_packets.end(),
+ frame_augmented_media_packets.begin(),
+ frame_augmented_media_packets.end());
+ }
+ // Insert the oldest media packet. The corresponding FEC packet is too old
+ // and should have been dropped. Only the media packet we inserted will be
+ // returned.
+ BuildAndAddRedMediaPacket(augmented_media_packets.front());
+ EXPECT_CALL(recovered_packet_receiver_, OnRecoveredPacket(_, _)).Times(1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+TEST_F(UlpfecReceiverTest, TruncatedPacketWithFBitSet) {
+ const uint8_t kTruncatedPacket[] = {0x80, 0x2a, 0x68, 0x71, 0x29, 0xa1, 0x27,
+ 0x3a, 0x29, 0x12, 0x2a, 0x98, 0xe0, 0x29};
+
+ SurvivesMaliciousPacket(kTruncatedPacket, sizeof(kTruncatedPacket), 100);
+}
+
+TEST_F(UlpfecReceiverTest,
+ TruncatedPacketWithFBitSetEndingAfterFirstRedHeader) {
+ const uint8_t kPacket[] = {
+ 0x89, 0x27, 0x3a, 0x83, 0x27, 0x3a, 0x3a, 0xf3, 0x67, 0xbe, 0x2a,
+ 0xa9, 0x27, 0x54, 0x3a, 0x3a, 0x2a, 0x67, 0x3a, 0xf3, 0x67, 0xbe,
+ 0x2a, 0x27, 0xe6, 0xf6, 0x03, 0x3e, 0x29, 0x27, 0x21, 0x27, 0x2a,
+ 0x29, 0x21, 0x4b, 0x29, 0x3a, 0x28, 0x29, 0xbf, 0x29, 0x2a, 0x26,
+ 0x29, 0xae, 0x27, 0xa6, 0xf6, 0x00, 0x03, 0x3e};
+ SurvivesMaliciousPacket(kPacket, sizeof(kPacket), 100);
+}
+
+TEST_F(UlpfecReceiverTest, TruncatedPacketWithoutDataPastFirstBlock) {
+ const uint8_t kPacket[] = {
+ 0x82, 0x38, 0x92, 0x38, 0x92, 0x38, 0xde, 0x2a, 0x11, 0xc8, 0xa3, 0xc4,
+ 0x82, 0x38, 0x2a, 0x21, 0x2a, 0x28, 0x92, 0x38, 0x92, 0x00, 0x00, 0x0a,
+ 0x3a, 0xc8, 0xa3, 0x3a, 0x27, 0xc4, 0x2a, 0x21, 0x2a, 0x28};
+ SurvivesMaliciousPacket(kPacket, sizeof(kPacket), 100);
+}
+
+TEST_F(UlpfecReceiverTest, MediaWithPadding) {
+ const size_t kNumFecPackets = 1;
+ std::list<AugmentedPacket*> augmented_media_packets;
+ ForwardErrorCorrection::PacketList media_packets;
+ PacketizeFrame(2, 0, &augmented_media_packets, &media_packets);
+
+ // Append four bytes of padding to the first media packet.
+ const uint8_t kPadding[] = {0, 0, 0, 4};
+ augmented_media_packets.front()->data.AppendData(kPadding);
+ augmented_media_packets.front()->data.MutableData()[0] |= 1 << 5; // P bit.
+ augmented_media_packets.front()->header.paddingLength = 4;
+
+ std::list<ForwardErrorCorrection::Packet*> fec_packets;
+ EncodeFec(media_packets, kNumFecPackets, &fec_packets);
+
+ auto it = augmented_media_packets.begin();
+ BuildAndAddRedMediaPacket(augmented_media_packets.front());
+
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+
+ BuildAndAddRedFecPacket(fec_packets.front());
+ ++it;
+ VerifyReconstructedMediaPacket(**it, 1);
+ receiver_fec_.ProcessReceivedFec();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_fec_generator.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_fec_generator.h
new file mode 100644
index 0000000000..38e4103cb6
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_fec_generator.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_FEC_GENERATOR_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_FEC_GENERATOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/units/data_rate.h"
+#include "modules/include/module_fec_types.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+
+namespace webrtc {
+
+class VideoFecGenerator {
+ public:
+ VideoFecGenerator() = default;
+ virtual ~VideoFecGenerator() = default;
+
+ enum class FecType { kFlexFec, kUlpFec };
+ virtual FecType GetFecType() const = 0;
+ // Returns the SSRC used for FEC packets (i.e. FlexFec SSRC).
+ virtual absl::optional<uint32_t> FecSsrc() = 0;
+ // Returns the overhead, in bytes per packet, for FEC (and possibly RED).
+ virtual size_t MaxPacketOverhead() const = 0;
+ // Current rate of FEC packets generated, including all RTP-level headers.
+ virtual DataRate CurrentFecRate() const = 0;
+ // Set FEC rates, max frames before FEC is sent, and type of FEC masks.
+ virtual void SetProtectionParameters(
+ const FecProtectionParams& delta_params,
+ const FecProtectionParams& key_params) = 0;
+ // Called on new media packet to be protected. The generator may choose
+ // to generate FEC packets at this time, if so they will be stored in an
+ // internal buffer.
+ virtual void AddPacketAndGenerateFec(const RtpPacketToSend& packet) = 0;
+ // Get (and remove) and FEC packets pending in the generator. These packets
+ // will lack sequence numbers, that needs to be set externally.
+ // TODO(bugs.webrtc.org/11340): Actually FlexFec sets seq#, fix that!
+ virtual std::vector<std::unique_ptr<RtpPacketToSend>> GetFecPackets() = 0;
+ // Only called on the VideoSendStream queue, after operation has shut down,
+ // and only populated if there is an RtpState (e.g. FlexFec).
+ virtual absl::optional<RtpState> GetRtpState() = 0;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_FEC_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.cc
new file mode 100644
index 0000000000..bb0bf09e90
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr<EncodedImageBuffer> VideoRtpDepacketizer::AssembleFrame(
+ rtc::ArrayView<const rtc::ArrayView<const uint8_t>> rtp_payloads) {
+ size_t frame_size = 0;
+ for (rtc::ArrayView<const uint8_t> payload : rtp_payloads) {
+ frame_size += payload.size();
+ }
+
+ rtc::scoped_refptr<EncodedImageBuffer> bitstream =
+ EncodedImageBuffer::Create(frame_size);
+
+ uint8_t* write_at = bitstream->data();
+ for (rtc::ArrayView<const uint8_t> payload : rtp_payloads) {
+ memcpy(write_at, payload.data(), payload.size());
+ write_at += payload.size();
+ }
+ RTC_DCHECK_EQ(write_at - bitstream->data(), bitstream->size());
+ return bitstream;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.h
new file mode 100644
index 0000000000..2266120799
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class VideoRtpDepacketizer {
+ public:
+ struct ParsedRtpPayload {
+ RTPVideoHeader video_header;
+ rtc::CopyOnWriteBuffer video_payload;
+ };
+
+ virtual ~VideoRtpDepacketizer() = default;
+ virtual absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) = 0;
+ virtual rtc::scoped_refptr<EncodedImageBuffer> AssembleFrame(
+ rtc::ArrayView<const rtc::ArrayView<const uint8_t>> rtp_payloads);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.cc
new file mode 100644
index 0000000000..5c41b48cc4
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.cc
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/byte_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace {
+// AV1 format:
+//
+// RTP payload syntax:
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |Z|Y| W |N|-|-|-| (REQUIRED)
+// +=+=+=+=+=+=+=+=+ (REPEATED W-1 times, or any times if W = 0)
+// |1| |
+// +-+ OBU fragment|
+// |1| | (REQUIRED, leb128 encoded)
+// +-+ size |
+// |0| |
+// +-+-+-+-+-+-+-+-+
+// | OBU fragment |
+// | ... |
+// +=+=+=+=+=+=+=+=+
+// | ... |
+// +=+=+=+=+=+=+=+=+ if W > 0, last fragment MUST NOT have size field
+// | OBU fragment |
+// | ... |
+// +=+=+=+=+=+=+=+=+
+//
+//
+// OBU syntax:
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |0| type |X|S|-| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// X: | TID |SID|-|-|-| (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// |1| |
+// +-+ OBU payload |
+// S: |1| | (OPTIONAL, variable length leb128 encoded)
+// +-+ size |
+// |0| |
+// +-+-+-+-+-+-+-+-+
+// | OBU payload |
+// | ... |
+class ArrayOfArrayViews {
+ public:
+ class const_iterator;
+ ArrayOfArrayViews() = default;
+ ArrayOfArrayViews(const ArrayOfArrayViews&) = default;
+ ArrayOfArrayViews& operator=(const ArrayOfArrayViews&) = default;
+ ~ArrayOfArrayViews() = default;
+
+ const_iterator begin() const;
+ const_iterator end() const;
+ bool empty() const { return data_.empty(); }
+ size_t size() const { return size_; }
+ void CopyTo(uint8_t* destination, const_iterator first) const;
+
+ void Append(const uint8_t* data, size_t size) {
+ data_.emplace_back(data, size);
+ size_ += size;
+ }
+
+ private:
+ using Storage = absl::InlinedVector<rtc::ArrayView<const uint8_t>, 2>;
+
+ size_t size_ = 0;
+ Storage data_;
+};
+
+class ArrayOfArrayViews::const_iterator {
+ public:
+ const_iterator() = default;
+ const_iterator(const const_iterator&) = default;
+ const_iterator& operator=(const const_iterator&) = default;
+
+ const_iterator& operator++() {
+ if (++inner_ == outer_->size()) {
+ ++outer_;
+ inner_ = 0;
+ }
+ return *this;
+ }
+ uint8_t operator*() const { return (*outer_)[inner_]; }
+
+ friend bool operator==(const const_iterator& lhs, const const_iterator& rhs) {
+ return lhs.outer_ == rhs.outer_ && lhs.inner_ == rhs.inner_;
+ }
+
+ private:
+ friend ArrayOfArrayViews;
+ const_iterator(ArrayOfArrayViews::Storage::const_iterator outer, size_t inner)
+ : outer_(outer), inner_(inner) {}
+
+ Storage::const_iterator outer_;
+ size_t inner_;
+};
+
+ArrayOfArrayViews::const_iterator ArrayOfArrayViews::begin() const {
+ return const_iterator(data_.begin(), 0);
+}
+
+ArrayOfArrayViews::const_iterator ArrayOfArrayViews::end() const {
+ return const_iterator(data_.end(), 0);
+}
+
+void ArrayOfArrayViews::CopyTo(uint8_t* destination,
+ const_iterator first) const {
+ if (first == end()) {
+ // Empty OBU payload. E.g. Temporal Delimiters are always empty.
+ return;
+ }
+ size_t first_chunk_size = first.outer_->size() - first.inner_;
+ memcpy(destination, first.outer_->data() + first.inner_, first_chunk_size);
+ destination += first_chunk_size;
+ for (auto it = std::next(first.outer_); it != data_.end(); ++it) {
+ memcpy(destination, it->data(), it->size());
+ destination += it->size();
+ }
+}
+
+struct ObuInfo {
+ // Size of the obu_header and obu_size fields in the ouput frame.
+ size_t prefix_size = 0;
+ // obu_header() and obu_size (leb128 encoded payload_size).
+ // obu_header can be up to 2 bytes, obu_size - up to 5.
+ std::array<uint8_t, 7> prefix;
+ // Size of the obu payload in the output frame, i.e. excluding header
+ size_t payload_size = 0;
+ // iterator pointing to the beginning of the obu payload.
+ ArrayOfArrayViews::const_iterator payload_offset;
+ // OBU payloads as written in the rtp packet payloads.
+ ArrayOfArrayViews data;
+};
+// Expect that majority of the frame won't use more than 4 obus.
+// In a simple stream delta frame consist of single Frame OBU, while key frame
+// also has Sequence Header OBU.
+using VectorObuInfo = absl::InlinedVector<ObuInfo, 4>;
+
+constexpr uint8_t kObuSizePresentBit = 0b0'0000'010;
+
+bool ObuHasExtension(uint8_t obu_header) {
+ return obu_header & 0b0'0000'100u;
+}
+
+bool ObuHasSize(uint8_t obu_header) {
+ return obu_header & kObuSizePresentBit;
+}
+
+bool RtpStartsWithFragment(uint8_t aggregation_header) {
+ return aggregation_header & 0b1000'0000u;
+}
+bool RtpEndsWithFragment(uint8_t aggregation_header) {
+ return aggregation_header & 0b0100'0000u;
+}
+int RtpNumObus(uint8_t aggregation_header) { // 0 for any number of obus.
+ return (aggregation_header & 0b0011'0000u) >> 4;
+}
+int RtpStartsNewCodedVideoSequence(uint8_t aggregation_header) {
+ return aggregation_header & 0b0000'1000u;
+}
+
+// Reorgonizes array of rtp payloads into array of obus:
+// fills ObuInfo::data field.
+// Returns empty vector on error.
+VectorObuInfo ParseObus(
+ rtc::ArrayView<const rtc::ArrayView<const uint8_t>> rtp_payloads) {
+ VectorObuInfo obu_infos;
+ bool expect_continues_obu = false;
+ for (rtc::ArrayView<const uint8_t> rtp_payload : rtp_payloads) {
+ rtc::ByteBufferReader payload(
+ reinterpret_cast<const char*>(rtp_payload.data()), rtp_payload.size());
+ uint8_t aggregation_header;
+ if (!payload.ReadUInt8(&aggregation_header)) {
+ RTC_DLOG(LS_WARNING)
+ << "Failed to find aggregation header in the packet.";
+ return {};
+ }
+ // Z-bit: 1 if the first OBU contained in the packet is a continuation of a
+ // previous OBU.
+ bool continues_obu = RtpStartsWithFragment(aggregation_header);
+ if (continues_obu != expect_continues_obu) {
+ RTC_DLOG(LS_WARNING) << "Unexpected Z-bit " << continues_obu;
+ return {};
+ }
+ int num_expected_obus = RtpNumObus(aggregation_header);
+ if (payload.Length() == 0) {
+ // rtp packet has just the aggregation header. That may be valid only when
+ // there is exactly one fragment in the packet of size 0.
+ if (num_expected_obus != 1) {
+ RTC_DLOG(LS_WARNING)
+ << "Invalid packet with just an aggregation header.";
+ return {};
+ }
+ if (!continues_obu) {
+ // Empty packet just to notify there is a new OBU.
+ obu_infos.emplace_back();
+ }
+ expect_continues_obu = RtpEndsWithFragment(aggregation_header);
+ continue;
+ }
+
+ for (int obu_index = 1; payload.Length() > 0; ++obu_index) {
+ ObuInfo& obu_info = (obu_index == 1 && continues_obu)
+ ? obu_infos.back()
+ : obu_infos.emplace_back();
+ uint64_t fragment_size;
+ // When num_expected_obus > 0, last OBU (fragment) is not preceeded by
+ // the size field. See W field in
+ // https://aomediacodec.github.io/av1-rtp-spec/#43-av1-aggregation-header
+ bool has_fragment_size = (obu_index != num_expected_obus);
+ if (has_fragment_size) {
+ if (!payload.ReadUVarint(&fragment_size)) {
+ RTC_DLOG(LS_WARNING) << "Failed to read fragment size for obu #"
+ << obu_index << "/" << num_expected_obus;
+ return {};
+ }
+ if (fragment_size > payload.Length()) {
+ // Malformed input: written size is larger than remaining buffer.
+ RTC_DLOG(LS_WARNING) << "Malformed fragment size " << fragment_size
+ << " is larger than remaining size "
+ << payload.Length() << " while reading obu #"
+ << obu_index << "/" << num_expected_obus;
+ return {};
+ }
+ } else {
+ fragment_size = payload.Length();
+ }
+ // While it is in-practical to pass empty fragments, it is still possible.
+ if (fragment_size > 0) {
+ obu_info.data.Append(reinterpret_cast<const uint8_t*>(payload.Data()),
+ fragment_size);
+ payload.Consume(fragment_size);
+ }
+ }
+ // Z flag should be same as Y flag of the next packet.
+ expect_continues_obu = RtpEndsWithFragment(aggregation_header);
+ }
+ if (expect_continues_obu) {
+ RTC_DLOG(LS_WARNING) << "Last packet shouldn't have last obu fragmented.";
+ return {};
+ }
+ return obu_infos;
+}
+
+// Returns number of bytes consumed.
+int WriteLeb128(uint32_t value, uint8_t* buffer) {
+ int size = 0;
+ while (value >= 0x80) {
+ buffer[size] = 0x80 | (value & 0x7F);
+ ++size;
+ value >>= 7;
+ }
+ buffer[size] = value;
+ ++size;
+ return size;
+}
+
+// Calculates sizes for the Obu, i.e. base on ObuInfo::data field calculates
+// all other fields in the ObuInfo structure.
+// Returns false if obu found to be misformed.
+bool CalculateObuSizes(ObuInfo* obu_info) {
+ if (obu_info->data.empty()) {
+ RTC_DLOG(LS_WARNING) << "Invalid bitstream: empty obu provided.";
+ return false;
+ }
+ auto it = obu_info->data.begin();
+ uint8_t obu_header = *it;
+ obu_info->prefix[0] = obu_header | kObuSizePresentBit;
+ obu_info->prefix_size = 1;
+ ++it;
+ if (ObuHasExtension(obu_header)) {
+ if (it == obu_info->data.end()) {
+ return false;
+ }
+ obu_info->prefix[1] = *it; // obu_extension_header
+ obu_info->prefix_size = 2;
+ ++it;
+ }
+ // Read, validate, and skip size, if present.
+ if (!ObuHasSize(obu_header)) {
+ obu_info->payload_size = obu_info->data.size() - obu_info->prefix_size;
+ } else {
+ // Read leb128 encoded field obu_size.
+ uint64_t obu_size_bytes = 0;
+ // Number of bytes obu_size field occupy in the bitstream.
+ int size_of_obu_size_bytes = 0;
+ uint8_t leb128_byte;
+ do {
+ if (it == obu_info->data.end() || size_of_obu_size_bytes >= 8) {
+ RTC_DLOG(LS_WARNING)
+ << "Failed to read obu_size. obu_size field is too long: "
+ << size_of_obu_size_bytes << " bytes processed.";
+ return false;
+ }
+ leb128_byte = *it;
+ obu_size_bytes |= uint64_t{leb128_byte & 0x7Fu}
+ << (size_of_obu_size_bytes * 7);
+ ++size_of_obu_size_bytes;
+ ++it;
+ } while ((leb128_byte & 0x80) != 0);
+
+ obu_info->payload_size =
+ obu_info->data.size() - obu_info->prefix_size - size_of_obu_size_bytes;
+ if (obu_size_bytes != obu_info->payload_size) {
+ // obu_size was present in the bitstream and mismatches calculated size.
+ RTC_DLOG(LS_WARNING) << "Mismatch in obu_size. signaled: "
+ << obu_size_bytes
+ << ", actual: " << obu_info->payload_size;
+ return false;
+ }
+ }
+ obu_info->payload_offset = it;
+ obu_info->prefix_size +=
+ WriteLeb128(rtc::dchecked_cast<uint32_t>(obu_info->payload_size),
+ obu_info->prefix.data() + obu_info->prefix_size);
+ return true;
+}
+
+} // namespace
+
+rtc::scoped_refptr<EncodedImageBuffer> VideoRtpDepacketizerAv1::AssembleFrame(
+ rtc::ArrayView<const rtc::ArrayView<const uint8_t>> rtp_payloads) {
+ VectorObuInfo obu_infos = ParseObus(rtp_payloads);
+ if (obu_infos.empty()) {
+ return nullptr;
+ }
+
+ size_t frame_size = 0;
+ for (ObuInfo& obu_info : obu_infos) {
+ if (!CalculateObuSizes(&obu_info)) {
+ return nullptr;
+ }
+ frame_size += (obu_info.prefix_size + obu_info.payload_size);
+ }
+
+ rtc::scoped_refptr<EncodedImageBuffer> bitstream =
+ EncodedImageBuffer::Create(frame_size);
+ uint8_t* write_at = bitstream->data();
+ for (const ObuInfo& obu_info : obu_infos) {
+ // Copy the obu_header and obu_size fields.
+ memcpy(write_at, obu_info.prefix.data(), obu_info.prefix_size);
+ write_at += obu_info.prefix_size;
+ // Copy the obu payload.
+ obu_info.data.CopyTo(write_at, obu_info.payload_offset);
+ write_at += obu_info.payload_size;
+ }
+ RTC_CHECK_EQ(write_at - bitstream->data(), bitstream->size());
+ return bitstream;
+}
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerAv1::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ if (rtp_payload.size() == 0) {
+ RTC_DLOG(LS_ERROR) << "Empty rtp payload.";
+ return absl::nullopt;
+ }
+ uint8_t aggregation_header = rtp_payload.cdata()[0];
+ if (RtpStartsNewCodedVideoSequence(aggregation_header) &&
+ RtpStartsWithFragment(aggregation_header)) {
+ // new coded video sequence can't start from an OBU fragment.
+ return absl::nullopt;
+ }
+ absl::optional<ParsedRtpPayload> parsed(absl::in_place);
+
+ // To assemble frame, all of the rtp payload is required, including
+ // aggregation header.
+ parsed->video_payload = std::move(rtp_payload);
+
+ parsed->video_header.codec = VideoCodecType::kVideoCodecAV1;
+ // These are not accurate since frame may consist of several packet aligned
+ // chunks of obus, but should be good enough for most cases. It might produce
+ // frame that do not map to any real frame, but av1 decoder should be able to
+ // handle it since it promise to handle individual obus rather than full
+ // frames.
+ parsed->video_header.is_first_packet_in_frame =
+ !RtpStartsWithFragment(aggregation_header);
+ parsed->video_header.is_last_packet_in_frame =
+ !RtpEndsWithFragment(aggregation_header);
+
+ parsed->video_header.frame_type =
+ RtpStartsNewCodedVideoSequence(aggregation_header)
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ return parsed;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h
new file mode 100644
index 0000000000..ac8c7e6d11
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_AV1_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_AV1_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class VideoRtpDepacketizerAv1 : public VideoRtpDepacketizer {
+ public:
+ VideoRtpDepacketizerAv1() = default;
+ VideoRtpDepacketizerAv1(const VideoRtpDepacketizerAv1&) = delete;
+ VideoRtpDepacketizerAv1& operator=(const VideoRtpDepacketizerAv1&) = delete;
+ ~VideoRtpDepacketizerAv1() override = default;
+
+ rtc::scoped_refptr<EncodedImageBuffer> AssembleFrame(
+ rtc::ArrayView<const rtc::ArrayView<const uint8_t>> rtp_payloads)
+ override;
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_AV1_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1_unittest.cc
new file mode 100644
index 0000000000..e9ad1a1b8e
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_av1_unittest.cc
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+
+// Signals number of the OBU (fragments) in the packet.
+constexpr uint8_t kObuCountOne = 0b00'01'0000;
+
+constexpr uint8_t kObuHeaderSequenceHeader = 0b0'0001'000;
+constexpr uint8_t kObuHeaderFrame = 0b0'0110'000;
+
+constexpr uint8_t kObuHeaderHasSize = 0b0'0000'010;
+
+TEST(VideoRtpDepacketizerAv1Test, ParsePassFullRtpPayloadAsCodecPayload) {
+ const uint8_t packet[] = {(uint8_t{1} << 7) | kObuCountOne, 1, 2, 3, 4};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+ EXPECT_EQ(parsed->video_payload.size(), sizeof(packet));
+ EXPECT_TRUE(parsed->video_payload.cdata() == rtp_payload.cdata());
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ ParseTreatsContinuationFlagAsNotBeginningOfFrame) {
+ const uint8_t packet[] = {
+ (uint8_t{1} << 7) | kObuCountOne,
+ kObuHeaderFrame}; // Value doesn't matter since it is a
+ // continuation of the OBU from previous packet.
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet));
+ ASSERT_TRUE(parsed);
+ EXPECT_FALSE(parsed->video_header.is_first_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ ParseTreatsNoContinuationFlagAsBeginningOfFrame) {
+ const uint8_t packet[] = {(uint8_t{0} << 7) | kObuCountOne, kObuHeaderFrame};
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet));
+ ASSERT_TRUE(parsed);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerAv1Test, ParseTreatsWillContinueFlagAsNotEndOfFrame) {
+ const uint8_t packet[] = {(uint8_t{1} << 6) | kObuCountOne, kObuHeaderFrame};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+ EXPECT_FALSE(parsed->video_header.is_last_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerAv1Test, ParseTreatsNoWillContinueFlagAsEndOfFrame) {
+ const uint8_t packet[] = {(uint8_t{0} << 6) | kObuCountOne, kObuHeaderFrame};
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet));
+ ASSERT_TRUE(parsed);
+ EXPECT_TRUE(parsed->video_header.is_last_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ ParseUsesNewCodedVideoSequenceBitAsKeyFrameIndidcator) {
+ const uint8_t packet[] = {(uint8_t{1} << 3) | kObuCountOne,
+ kObuHeaderSequenceHeader};
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet));
+ ASSERT_TRUE(parsed);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(parsed->video_header.frame_type ==
+ VideoFrameType::kVideoFrameKey);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ ParseUsesUnsetNewCodedVideoSequenceBitAsDeltaFrameIndidcator) {
+ const uint8_t packet[] = {(uint8_t{0} << 3) | kObuCountOne,
+ kObuHeaderSequenceHeader};
+ VideoRtpDepacketizerAv1 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet));
+ ASSERT_TRUE(parsed);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(parsed->video_header.frame_type ==
+ VideoFrameType::kVideoFrameDelta);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ ParseRejectsPacketWithNewCVSAndContinuationFlagsBothSet) {
+ const uint8_t packet[] = {0b10'00'1000 | kObuCountOne,
+ kObuHeaderSequenceHeader};
+ VideoRtpDepacketizerAv1 depacketizer;
+ ASSERT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(packet)));
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameSetsOBUPayloadSizeWhenAbsent) {
+ const uint8_t payload1[] = {0b00'01'0000, // aggregation header
+ 0b0'0110'000, // / Frame
+ 20, 30, 40}; // \ OBU
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ rtc::ArrayView<const uint8_t> frame_view(*frame);
+ EXPECT_TRUE(frame_view[0] & kObuHeaderHasSize);
+ EXPECT_EQ(frame_view[1], 3);
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameSetsOBUPayloadSizeWhenPresent) {
+ const uint8_t payload1[] = {0b00'01'0000, // aggregation header
+ 0b0'0110'010, // / Frame OBU header
+ 3, // obu_size
+ 20,
+ 30,
+ 40}; // \ obu_payload
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ rtc::ArrayView<const uint8_t> frame_view(*frame);
+ EXPECT_TRUE(frame_view[0] & kObuHeaderHasSize);
+ EXPECT_EQ(frame_view[1], 3);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameSetsOBUPayloadSizeAfterExtensionWhenAbsent) {
+ const uint8_t payload1[] = {0b00'01'0000, // aggregation header
+ 0b0'0110'100, // / Frame
+ 0b010'01'000, // | extension_header
+ 20, 30, 40}; // \ OBU
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ rtc::ArrayView<const uint8_t> frame_view(*frame);
+ EXPECT_TRUE(frame_view[0] & kObuHeaderHasSize);
+ EXPECT_EQ(frame_view[2], 3);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameSetsOBUPayloadSizeAfterExtensionWhenPresent) {
+ const uint8_t payload1[] = {0b00'01'0000, // aggregation header
+ 0b0'0110'110, // / Frame OBU header
+ 0b010'01'000, // | extension_header
+ 3, // | obu_size
+ 20,
+ 30,
+ 40}; // \ obu_payload
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ rtc::ArrayView<const uint8_t> frame_view(*frame);
+ EXPECT_TRUE(frame_view[0] & kObuHeaderHasSize);
+ EXPECT_EQ(frame_view[2], 3);
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameFromOnePacketWithOneObu) {
+ const uint8_t payload1[] = {0b00'01'0000, // aggregation header
+ 0b0'0110'000, // / Frame
+ 20}; // \ OBU
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0110'010, 1, 20));
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameFromOnePacketWithTwoObus) {
+ const uint8_t payload1[] = {0b00'10'0000, // aggregation header
+ 2, // / Sequence
+ 0b0'0001'000, // | Header
+ 10, // \ OBU
+ 0b0'0110'000, // / Frame
+ 20}; // \ OBU
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0001'010, 1, 10, // Sequence Header OBU
+ 0b0'0110'010, 1, 20)); // Frame OBU
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameFromTwoPacketsWithOneObu) {
+ const uint8_t payload1[] = {0b01'01'0000, // aggregation header
+ 0b0'0110'000, 20, 30};
+ const uint8_t payload2[] = {0b10'01'0000, // aggregation header
+ 40};
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0110'010, 3, 20, 30, 40));
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameFromTwoPacketsWithTwoObu) {
+ const uint8_t payload1[] = {0b01'10'0000, // aggregation header
+ 2, // / Sequence
+ 0b0'0001'000, // | Header
+ 10, // \ OBU
+ 0b0'0110'000, //
+ 20,
+ 30}; //
+ const uint8_t payload2[] = {0b10'01'0000, // aggregation header
+ 40}; //
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0001'010, 1, 10, // SH
+ 0b0'0110'010, 3, 20, 30, 40)); // Frame
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameFromTwoPacketsWithManyObusSomeWithExtensions) {
+ const uint8_t payload1[] = {0b01'00'0000, // aggregation header
+ 2, // /
+ 0b0'0001'000, // | Sequence Header
+ 10, // \ OBU
+ 2, // /
+ 0b0'0101'000, // | Metadata OBU
+ 20, // \ without extension
+ 4, // /
+ 0b0'0101'100, // | Metadata OBU
+ 0b001'10'000, // | with extension
+ 20, // |
+ 30, // \ metadata payload
+ 5, // /
+ 0b0'0110'100, // | Frame OBU
+ 0b001'10'000, // | with extension
+ 40, // |
+ 50, // |
+ 60}; // |
+ const uint8_t payload2[] = {0b10'01'0000, // aggregation header
+ 70, 80, 90}; // \ tail of the frame OBU
+
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre( // Sequence header OBU
+ 0b0'0001'010, 1, 10,
+ // Metadata OBU without extension
+ 0b0'0101'010, 1, 20,
+ // Metadata OBU with extenion
+ 0b0'0101'110, 0b001'10'000, 2, 20, 30,
+ // Frame OBU with extension
+ 0b0'0110'110, 0b001'10'000, 6, 40, 50, 60, 70, 80, 90));
+}
+
+TEST(VideoRtpDepacketizerAv1Test, AssembleFrameWithOneObuFromManyPackets) {
+ const uint8_t payload1[] = {0b01'01'0000, // aggregation header
+ 0b0'0110'000, 11, 12};
+ const uint8_t payload2[] = {0b11'01'0000, // aggregation header
+ 13, 14};
+ const uint8_t payload3[] = {0b11'01'0000, // aggregation header
+ 15, 16, 17};
+ const uint8_t payload4[] = {0b10'01'0000, // aggregation header
+ 18};
+
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2, payload3,
+ payload4};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0110'010, 8, 11, 12, 13, 14, 15, 16, 17, 18));
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameFromManyPacketsWithSomeObuBorderAligned) {
+ const uint8_t payload1[] = {0b01'10'0000, // aggregation header
+ 3, // size of the 1st fragment
+ 0b0'0011'000, // Frame header OBU
+ 11,
+ 12,
+ 0b0'0100'000, // Tile group OBU
+ 21,
+ 22,
+ 23};
+ const uint8_t payload2[] = {0b10'01'0000, // aggregation header
+ 24, 25, 26, 27};
+ // payload2 ends an OBU, payload3 starts a new one.
+ const uint8_t payload3[] = {0b01'10'0000, // aggregation header
+ 3, // size of the 1st fragment
+ 0b0'0111'000, // Redundant frame header OBU
+ 11,
+ 12,
+ 0b0'0100'000, // Tile group OBU
+ 31,
+ 32};
+ const uint8_t payload4[] = {0b10'01'0000, // aggregation header
+ 33, 34, 35, 36};
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2, payload3,
+ payload4};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0011'010, 2, 11, 12, // Frame header
+ 0b0'0100'010, 7, 21, 22, 23, 24, 25, 26, 27, //
+ 0b0'0111'010, 2, 11, 12, //
+ 0b0'0100'010, 6, 31, 32, 33, 34, 35, 36));
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameFromOnePacketsOneObuPayloadSize127Bytes) {
+ uint8_t payload1[4 + 127];
+ memset(payload1, 0, sizeof(payload1));
+ payload1[0] = 0b00'00'0000; // aggregation header
+ payload1[1] = 0x80; // leb128 encoded size of 128 bytes
+ payload1[2] = 0x01; // in two bytes
+ payload1[3] = 0b0'0110'000; // obu_header with size and extension bits unset.
+ payload1[4 + 42] = 0x42;
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame->size(), 2 + 127u);
+ rtc::ArrayView<const uint8_t> frame_view(*frame);
+ EXPECT_EQ(frame_view[0], 0b0'0110'010); // obu_header with size bit set.
+ EXPECT_EQ(frame_view[1], 127); // obu payload size, 1 byte enough to encode.
+ // Check 'random' byte from the payload is at the same 'random' offset.
+ EXPECT_EQ(frame_view[2 + 42], 0x42);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameFromTwoPacketsOneObuPayloadSize128Bytes) {
+ uint8_t payload1[3 + 32];
+ memset(payload1, 0, sizeof(payload1));
+ payload1[0] = 0b01'00'0000; // aggregation header
+ payload1[1] = 33; // leb128 encoded size of 33 bytes in one byte
+ payload1[2] = 0b0'0110'000; // obu_header with size and extension bits unset.
+ payload1[3 + 10] = 0x10;
+ uint8_t payload2[2 + 96];
+ memset(payload2, 0, sizeof(payload2));
+ payload2[0] = 0b10'00'0000; // aggregation header
+ payload2[1] = 96; // leb128 encoded size of 96 bytes in one byte
+ payload2[2 + 20] = 0x20;
+
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2};
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame->size(), 3 + 128u);
+ rtc::ArrayView<const uint8_t> frame_view(*frame);
+ EXPECT_EQ(frame_view[0], 0b0'0110'010); // obu_header with size bit set.
+ EXPECT_EQ(frame_view[1], 0x80); // obu payload size of 128 bytes.
+ EXPECT_EQ(frame_view[2], 0x01); // encoded in two byes
+ // Check two 'random' byte from the payload is at the same 'random' offset.
+ EXPECT_EQ(frame_view[3 + 10], 0x10);
+ EXPECT_EQ(frame_view[3 + 32 + 20], 0x20);
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameFromAlmostEmptyPacketStartingAnOBU) {
+ const uint8_t payload1[] = {0b01'01'0000};
+ const uint8_t payload2[] = {0b10'01'0000, 0b0'0110'000, 10, 20, 30};
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2};
+
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0110'010, 3, 10, 20, 30));
+}
+
+TEST(VideoRtpDepacketizerAv1Test,
+ AssembleFrameFromAlmostEmptyPacketFinishingAnOBU) {
+ const uint8_t payload1[] = {0b01'01'0000, 0b0'0110'000, 10, 20, 30};
+ const uint8_t payload2[] = {0b10'01'0000};
+ rtc::ArrayView<const uint8_t> payloads[] = {payload1, payload2};
+
+ auto frame = VideoRtpDepacketizerAv1().AssembleFrame(payloads);
+ ASSERT_TRUE(frame);
+ EXPECT_THAT(rtc::ArrayView<const uint8_t>(*frame),
+ ElementsAre(0b0'0110'010, 3, 10, 20, 30));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.cc
new file mode 100644
index 0000000000..6010771318
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+constexpr uint8_t kKeyFrameBit = 0b0000'0001;
+constexpr uint8_t kFirstPacketBit = 0b0000'0010;
+// If this bit is set, there will be an extended header contained in this
+// packet. This was added later so old clients will not send this.
+constexpr uint8_t kExtendedHeaderBit = 0b0000'0100;
+
+constexpr size_t kGenericHeaderLength = 1;
+constexpr size_t kExtendedHeaderLength = 2;
+} // namespace
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerGeneric::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ if (rtp_payload.size() == 0) {
+ RTC_LOG(LS_WARNING) << "Empty payload.";
+ return absl::nullopt;
+ }
+ absl::optional<ParsedRtpPayload> parsed(absl::in_place);
+ const uint8_t* payload_data = rtp_payload.cdata();
+
+ uint8_t generic_header = payload_data[0];
+ size_t offset = kGenericHeaderLength;
+
+ parsed->video_header.frame_type = (generic_header & kKeyFrameBit)
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ parsed->video_header.is_first_packet_in_frame =
+ (generic_header & kFirstPacketBit) != 0;
+ parsed->video_header.codec = kVideoCodecGeneric;
+ parsed->video_header.width = 0;
+ parsed->video_header.height = 0;
+
+ if (generic_header & kExtendedHeaderBit) {
+ if (rtp_payload.size() < offset + kExtendedHeaderLength) {
+ RTC_LOG(LS_WARNING) << "Too short payload for generic header.";
+ return absl::nullopt;
+ }
+ parsed->video_header.video_type_header
+ .emplace<RTPVideoHeaderLegacyGeneric>()
+ .picture_id = ((payload_data[1] & 0x7F) << 8) | payload_data[2];
+ offset += kExtendedHeaderLength;
+ }
+
+ parsed->video_payload =
+ rtp_payload.Slice(offset, rtp_payload.size() - offset);
+ return parsed;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h
new file mode 100644
index 0000000000..27056da481
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_GENERIC_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_GENERIC_H_
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class VideoRtpDepacketizerGeneric : public VideoRtpDepacketizer {
+ public:
+ ~VideoRtpDepacketizerGeneric() override = default;
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_GENERIC_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic_unittest.cc
new file mode 100644
index 0000000000..860ddab4fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_generic_unittest.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h"
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::SizeIs;
+
+TEST(VideoRtpDepacketizerGeneric, NonExtendedHeaderNoFrameId) {
+ const size_t kRtpPayloadSize = 10;
+ const uint8_t kPayload[kRtpPayloadSize] = {0x01};
+ rtc::CopyOnWriteBuffer rtp_payload(kPayload);
+
+ VideoRtpDepacketizerGeneric depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+
+ ASSERT_TRUE(parsed);
+ EXPECT_EQ(parsed->video_header.generic, absl::nullopt);
+ EXPECT_THAT(parsed->video_payload, SizeIs(kRtpPayloadSize - 1));
+}
+
+TEST(VideoRtpDepacketizerGeneric, ExtendedHeaderParsesFrameId) {
+ const size_t kRtpPayloadSize = 10;
+ const uint8_t kPayload[kRtpPayloadSize] = {0x05, 0x13, 0x37};
+ rtc::CopyOnWriteBuffer rtp_payload(kPayload);
+
+ VideoRtpDepacketizerGeneric depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+
+ ASSERT_TRUE(parsed);
+ const auto* generic_header = absl::get_if<RTPVideoHeaderLegacyGeneric>(
+ &parsed->video_header.video_type_header);
+ ASSERT_TRUE(generic_header);
+ EXPECT_EQ(generic_header->picture_id, 0x1337);
+ EXPECT_THAT(parsed->video_payload, SizeIs(kRtpPayloadSize - 3));
+}
+
+TEST(VideoRtpDepacketizerGeneric, PassRtpPayloadAsVideoPayload) {
+ const uint8_t kPayload[] = {0x01, 0x25, 0x52};
+ rtc::CopyOnWriteBuffer rtp_payload(kPayload);
+
+ VideoRtpDepacketizerGeneric depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+
+ ASSERT_TRUE(parsed);
+ // Check there was no memcpy involved by verifying return and original buffers
+ // point to the same buffer.
+ EXPECT_EQ(parsed->video_payload.cdata(), rtp_payload.cdata() + 1);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc
new file mode 100644
index 0000000000..ee4d744578
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/h264/pps_parser.h"
+#include "common_video/h264/sps_parser.h"
+#include "common_video/h264/sps_vui_rewriter.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kNalHeaderSize = 1;
+constexpr size_t kFuAHeaderSize = 2;
+constexpr size_t kLengthFieldSize = 2;
+constexpr size_t kStapAHeaderSize = kNalHeaderSize + kLengthFieldSize;
+
+// Bit masks for FU (A and B) indicators.
+enum NalDefs : uint8_t { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
+
+// Bit masks for FU (A and B) headers.
+enum FuDefs : uint8_t { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
+
+// TODO(pbos): Avoid parsing this here as well as inside the jitter buffer.
+bool ParseStapAStartOffsets(const uint8_t* nalu_ptr,
+ size_t length_remaining,
+ std::vector<size_t>* offsets) {
+ size_t offset = 0;
+ while (length_remaining > 0) {
+ // Buffer doesn't contain room for additional nalu length.
+ if (length_remaining < sizeof(uint16_t))
+ return false;
+ uint16_t nalu_size = ByteReader<uint16_t>::ReadBigEndian(nalu_ptr);
+ nalu_ptr += sizeof(uint16_t);
+ length_remaining -= sizeof(uint16_t);
+ if (nalu_size > length_remaining)
+ return false;
+ nalu_ptr += nalu_size;
+ length_remaining -= nalu_size;
+
+ offsets->push_back(offset + kStapAHeaderSize);
+ offset += kLengthFieldSize + nalu_size;
+ }
+ return true;
+}
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> ProcessStapAOrSingleNalu(
+ rtc::CopyOnWriteBuffer rtp_payload) {
+ const uint8_t* const payload_data = rtp_payload.cdata();
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload(
+ absl::in_place);
+ bool modified_buffer = false;
+ parsed_payload->video_payload = rtp_payload;
+ parsed_payload->video_header.width = 0;
+ parsed_payload->video_header.height = 0;
+ parsed_payload->video_header.codec = kVideoCodecH264;
+ parsed_payload->video_header.simulcastIdx = 0;
+ parsed_payload->video_header.is_first_packet_in_frame = true;
+ auto& h264_header = parsed_payload->video_header.video_type_header
+ .emplace<RTPVideoHeaderH264>();
+
+ const uint8_t* nalu_start = payload_data + kNalHeaderSize;
+ const size_t nalu_length = rtp_payload.size() - kNalHeaderSize;
+ uint8_t nal_type = payload_data[0] & kTypeMask;
+ std::vector<size_t> nalu_start_offsets;
+ if (nal_type == H264::NaluType::kStapA) {
+ // Skip the StapA header (StapA NAL type + length).
+ if (rtp_payload.size() <= kStapAHeaderSize) {
+ RTC_LOG(LS_ERROR) << "StapA header truncated.";
+ return absl::nullopt;
+ }
+
+ if (!ParseStapAStartOffsets(nalu_start, nalu_length, &nalu_start_offsets)) {
+ RTC_LOG(LS_ERROR) << "StapA packet with incorrect NALU packet lengths.";
+ return absl::nullopt;
+ }
+
+ h264_header.packetization_type = kH264StapA;
+ nal_type = payload_data[kStapAHeaderSize] & kTypeMask;
+ } else {
+ h264_header.packetization_type = kH264SingleNalu;
+ nalu_start_offsets.push_back(0);
+ }
+ h264_header.nalu_type = nal_type;
+ parsed_payload->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+
+ nalu_start_offsets.push_back(rtp_payload.size() +
+ kLengthFieldSize); // End offset.
+ for (size_t i = 0; i < nalu_start_offsets.size() - 1; ++i) {
+ size_t start_offset = nalu_start_offsets[i];
+ // End offset is actually start offset for next unit, excluding length field
+ // so remove that from this units length.
+ size_t end_offset = nalu_start_offsets[i + 1] - kLengthFieldSize;
+ if (end_offset - start_offset < H264::kNaluTypeSize) {
+ RTC_LOG(LS_ERROR) << "STAP-A packet too short";
+ return absl::nullopt;
+ }
+
+ NaluInfo nalu;
+ nalu.type = payload_data[start_offset] & kTypeMask;
+ nalu.sps_id = -1;
+ nalu.pps_id = -1;
+ start_offset += H264::kNaluTypeSize;
+
+ switch (nalu.type) {
+ case H264::NaluType::kSps: {
+ // Check if VUI is present in SPS and if it needs to be modified to
+ // avoid
+ // excessive decoder latency.
+
+ // Copy any previous data first (likely just the first header).
+ rtc::Buffer output_buffer;
+ if (start_offset)
+ output_buffer.AppendData(payload_data, start_offset);
+
+ absl::optional<SpsParser::SpsState> sps;
+
+ SpsVuiRewriter::ParseResult result = SpsVuiRewriter::ParseAndRewriteSps(
+ &payload_data[start_offset], end_offset - start_offset, &sps,
+ nullptr, &output_buffer, SpsVuiRewriter::Direction::kIncoming);
+
+ if (result == SpsVuiRewriter::ParseResult::kVuiRewritten) {
+ if (modified_buffer) {
+ RTC_LOG(LS_WARNING)
+ << "More than one H264 SPS NAL units needing "
+ "rewriting found within a single STAP-A packet. "
+ "Keeping the first and rewriting the last.";
+ }
+
+ // Rewrite length field to new SPS size.
+ if (h264_header.packetization_type == kH264StapA) {
+ size_t length_field_offset =
+ start_offset - (H264::kNaluTypeSize + kLengthFieldSize);
+ // Stap-A Length includes payload data and type header.
+ size_t rewritten_size =
+ output_buffer.size() - start_offset + H264::kNaluTypeSize;
+ ByteWriter<uint16_t>::WriteBigEndian(
+ &output_buffer[length_field_offset], rewritten_size);
+ }
+
+ parsed_payload->video_payload.SetData(output_buffer.data(),
+ output_buffer.size());
+ // Append rest of packet.
+ parsed_payload->video_payload.AppendData(
+ &payload_data[end_offset],
+ nalu_length + kNalHeaderSize - end_offset);
+
+ modified_buffer = true;
+ }
+
+ if (sps) {
+ parsed_payload->video_header.width = sps->width;
+ parsed_payload->video_header.height = sps->height;
+ nalu.sps_id = sps->id;
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse SPS id from SPS slice.";
+ }
+ parsed_payload->video_header.frame_type =
+ VideoFrameType::kVideoFrameKey;
+ break;
+ }
+ case H264::NaluType::kPps: {
+ uint32_t pps_id;
+ uint32_t sps_id;
+ if (PpsParser::ParsePpsIds(&payload_data[start_offset],
+ end_offset - start_offset, &pps_id,
+ &sps_id)) {
+ nalu.pps_id = pps_id;
+ nalu.sps_id = sps_id;
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Failed to parse PPS id and SPS id from PPS slice.";
+ }
+ break;
+ }
+ case H264::NaluType::kIdr:
+ parsed_payload->video_header.frame_type =
+ VideoFrameType::kVideoFrameKey;
+ [[fallthrough]];
+ case H264::NaluType::kSlice: {
+ absl::optional<uint32_t> pps_id = PpsParser::ParsePpsIdFromSlice(
+ &payload_data[start_offset], end_offset - start_offset);
+ if (pps_id) {
+ nalu.pps_id = *pps_id;
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse PPS id from slice of type: "
+ << static_cast<int>(nalu.type);
+ }
+ break;
+ }
+ // Slices below don't contain SPS or PPS ids.
+ case H264::NaluType::kAud:
+ case H264::NaluType::kEndOfSequence:
+ case H264::NaluType::kEndOfStream:
+ case H264::NaluType::kFiller:
+ case H264::NaluType::kSei:
+ break;
+ case H264::NaluType::kStapA:
+ case H264::NaluType::kFuA:
+ RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
+ return absl::nullopt;
+ }
+
+ if (h264_header.nalus_length == kMaxNalusPerPacket) {
+ RTC_LOG(LS_WARNING)
+ << "Received packet containing more than " << kMaxNalusPerPacket
+ << " NAL units. Will not keep track sps and pps ids for all of them.";
+ } else {
+ h264_header.nalus[h264_header.nalus_length++] = nalu;
+ }
+ }
+
+ return parsed_payload;
+}
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> ParseFuaNalu(
+ rtc::CopyOnWriteBuffer rtp_payload) {
+ if (rtp_payload.size() < kFuAHeaderSize) {
+ RTC_LOG(LS_ERROR) << "FU-A NAL units truncated.";
+ return absl::nullopt;
+ }
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload(
+ absl::in_place);
+ uint8_t fnri = rtp_payload.cdata()[0] & (kFBit | kNriMask);
+ uint8_t original_nal_type = rtp_payload.cdata()[1] & kTypeMask;
+ bool first_fragment = (rtp_payload.cdata()[1] & kSBit) > 0;
+ NaluInfo nalu;
+ nalu.type = original_nal_type;
+ nalu.sps_id = -1;
+ nalu.pps_id = -1;
+ if (first_fragment) {
+ absl::optional<uint32_t> pps_id =
+ PpsParser::ParsePpsIdFromSlice(rtp_payload.cdata() + 2 * kNalHeaderSize,
+ rtp_payload.size() - 2 * kNalHeaderSize);
+ if (pps_id) {
+ nalu.pps_id = *pps_id;
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Failed to parse PPS from first fragment of FU-A NAL "
+ "unit with original type: "
+ << static_cast<int>(nalu.type);
+ }
+ uint8_t original_nal_header = fnri | original_nal_type;
+ rtp_payload =
+ rtp_payload.Slice(kNalHeaderSize, rtp_payload.size() - kNalHeaderSize);
+ rtp_payload.MutableData()[0] = original_nal_header;
+ parsed_payload->video_payload = std::move(rtp_payload);
+ } else {
+ parsed_payload->video_payload =
+ rtp_payload.Slice(kFuAHeaderSize, rtp_payload.size() - kFuAHeaderSize);
+ }
+
+ if (original_nal_type == H264::NaluType::kIdr) {
+ parsed_payload->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ } else {
+ parsed_payload->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ }
+ parsed_payload->video_header.width = 0;
+ parsed_payload->video_header.height = 0;
+ parsed_payload->video_header.codec = kVideoCodecH264;
+ parsed_payload->video_header.simulcastIdx = 0;
+ parsed_payload->video_header.is_first_packet_in_frame = first_fragment;
+ auto& h264_header = parsed_payload->video_header.video_type_header
+ .emplace<RTPVideoHeaderH264>();
+ h264_header.packetization_type = kH264FuA;
+ h264_header.nalu_type = original_nal_type;
+ if (first_fragment) {
+ h264_header.nalus[h264_header.nalus_length] = nalu;
+ h264_header.nalus_length = 1;
+ }
+ return parsed_payload;
+}
+
+} // namespace
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerH264::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ if (rtp_payload.size() == 0) {
+ RTC_LOG(LS_ERROR) << "Empty payload.";
+ return absl::nullopt;
+ }
+
+ uint8_t nal_type = rtp_payload.cdata()[0] & kTypeMask;
+
+ if (nal_type == H264::NaluType::kFuA) {
+ // Fragmented NAL units (FU-A).
+ return ParseFuaNalu(std::move(rtp_payload));
+ } else {
+ // We handle STAP-A and single NALU's the same way here. The jitter buffer
+ // will depacketize the STAP-A into NAL units later.
+ return ProcessStapAOrSingleNalu(std::move(rtp_payload));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h
new file mode 100644
index 0000000000..cbea860049
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H264_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H264_H_
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+class VideoRtpDepacketizerH264 : public VideoRtpDepacketizer {
+ public:
+ ~VideoRtpDepacketizerH264() override = default;
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H264_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc
new file mode 100644
index 0000000000..d335af0244
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+enum Nalu {
+ kSlice = 1,
+ kIdr = 5,
+ kSei = 6,
+ kSps = 7,
+ kPps = 8,
+ kStapA = 24,
+ kFuA = 28
+};
+
+// Bit masks for FU (A and B) indicators.
+enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
+
+// Bit masks for FU (A and B) headers.
+enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
+
+constexpr uint8_t kOriginalSps[] = {kSps, 0x00, 0x00, 0x03, 0x03,
+ 0xF4, 0x05, 0x03, 0xC7, 0xC0};
+constexpr uint8_t kRewrittenSps[] = {kSps, 0x00, 0x00, 0x03, 0x03,
+ 0xF4, 0x05, 0x03, 0xC7, 0xE0,
+ 0x1B, 0x41, 0x10, 0x8D, 0x00};
+constexpr uint8_t kIdrOne[] = {kIdr, 0xFF, 0x00, 0x00, 0x04};
+constexpr uint8_t kIdrTwo[] = {kIdr, 0xFF, 0x00, 0x11};
+
+TEST(VideoRtpDepacketizerH264Test, SingleNalu) {
+ uint8_t packet[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5 (IDR).
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload, rtp_payload);
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH264);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ const RTPVideoHeaderH264& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264SingleNalu);
+ EXPECT_EQ(h264.nalu_type, kIdr);
+}
+
+TEST(VideoRtpDepacketizerH264Test, SingleNaluSpsWithResolution) {
+ uint8_t packet[] = {kSps, 0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40, 0x50,
+ 0x05, 0xBA, 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0,
+ 0x00, 0x00, 0x03, 0x2A, 0xE0, 0xF1, 0x83, 0x25};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload, rtp_payload);
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH264);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed->video_header.width, 1280u);
+ EXPECT_EQ(parsed->video_header.height, 720u);
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264SingleNalu);
+}
+
+TEST(VideoRtpDepacketizerH264Test, StapAKey) {
+ // clang-format off
+ const NaluInfo kExpectedNalus[] = { {H264::kSps, 0, -1},
+ {H264::kPps, 1, 2},
+ {H264::kIdr, -1, 0} };
+ uint8_t packet[] = {kStapA, // F=0, NRI=0, Type=24.
+ // Length, nal header, payload.
+ 0, 0x18, kExpectedNalus[0].type,
+ 0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40, 0x50, 0x05, 0xBA,
+ 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0, 0x00, 0x00, 0x03,
+ 0x2A, 0xE0, 0xF1, 0x83, 0x25,
+ 0, 0xD, kExpectedNalus[1].type,
+ 0x69, 0xFC, 0x0, 0x0, 0x3, 0x0, 0x7, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x40,
+ 0, 0xB, kExpectedNalus[2].type,
+ 0x85, 0xB8, 0x0, 0x4, 0x0, 0x0, 0x13, 0x93, 0x12, 0x0};
+ // clang-format on
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload, rtp_payload);
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH264);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264StapA);
+ // NALU type for aggregated packets is the type of the first packet only.
+ EXPECT_EQ(h264.nalu_type, kSps);
+ ASSERT_EQ(h264.nalus_length, 3u);
+ for (size_t i = 0; i < h264.nalus_length; ++i) {
+ EXPECT_EQ(h264.nalus[i].type, kExpectedNalus[i].type)
+ << "Failed parsing nalu " << i;
+ EXPECT_EQ(h264.nalus[i].sps_id, kExpectedNalus[i].sps_id)
+ << "Failed parsing nalu " << i;
+ EXPECT_EQ(h264.nalus[i].pps_id, kExpectedNalus[i].pps_id)
+ << "Failed parsing nalu " << i;
+ }
+}
+
+TEST(VideoRtpDepacketizerH264Test, StapANaluSpsWithResolution) {
+ uint8_t packet[] = {kStapA, // F=0, NRI=0, Type=24.
+ // Length (2 bytes), nal header, payload.
+ 0x00, 0x19, kSps, 0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40,
+ 0x50, 0x05, 0xBA, 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0,
+ 0x00, 0x00, 0x03, 0x2A, 0xE0, 0xF1, 0x83, 0x25, 0x80,
+ 0x00, 0x03, kIdr, 0xFF, 0x00, 0x00, 0x04, kIdr, 0xFF,
+ 0x00, 0x11};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload, rtp_payload);
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH264);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed->video_header.width, 1280u);
+ EXPECT_EQ(parsed->video_header.height, 720u);
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264StapA);
+}
+
+TEST(VideoRtpDepacketizerH264Test, EmptyStapARejected) {
+ uint8_t lone_empty_packet[] = {kStapA, 0x00, 0x00};
+ uint8_t leading_empty_packet[] = {kStapA, 0x00, 0x00, 0x00, 0x04,
+ kIdr, 0xFF, 0x00, 0x11};
+ uint8_t middle_empty_packet[] = {kStapA, 0x00, 0x03, kIdr, 0xFF, 0x00, 0x00,
+ 0x00, 0x00, 0x04, kIdr, 0xFF, 0x00, 0x11};
+ uint8_t trailing_empty_packet[] = {kStapA, 0x00, 0x03, kIdr,
+ 0xFF, 0x00, 0x00, 0x00};
+
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(lone_empty_packet)));
+ EXPECT_FALSE(
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(leading_empty_packet)));
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(middle_empty_packet)));
+ EXPECT_FALSE(
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(trailing_empty_packet)));
+}
+
+TEST(VideoRtpDepacketizerH264Test, DepacketizeWithRewriting) {
+ rtc::CopyOnWriteBuffer in_buffer;
+ rtc::Buffer out_buffer;
+
+ uint8_t kHeader[2] = {kStapA};
+ in_buffer.AppendData(kHeader, 1);
+ out_buffer.AppendData(kHeader, 1);
+
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kOriginalSps));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kOriginalSps);
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kRewrittenSps));
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kRewrittenSps);
+
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kIdrOne));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kIdrOne);
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kIdrOne);
+
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kIdrTwo));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kIdrTwo);
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kIdrTwo);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ auto parsed = depacketizer.Parse(in_buffer);
+ ASSERT_TRUE(parsed);
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(out_buffer));
+}
+
+TEST(VideoRtpDepacketizerH264Test, DepacketizeWithDoubleRewriting) {
+ rtc::CopyOnWriteBuffer in_buffer;
+ rtc::Buffer out_buffer;
+
+ uint8_t kHeader[2] = {kStapA};
+ in_buffer.AppendData(kHeader, 1);
+ out_buffer.AppendData(kHeader, 1);
+
+ // First SPS will be kept...
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kOriginalSps));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kOriginalSps);
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kOriginalSps);
+
+ // ...only the second one will be rewritten.
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kOriginalSps));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kOriginalSps);
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kRewrittenSps));
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kRewrittenSps);
+
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kIdrOne));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kIdrOne);
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kIdrOne);
+
+ ByteWriter<uint16_t>::WriteBigEndian(kHeader, sizeof(kIdrTwo));
+ in_buffer.AppendData(kHeader, 2);
+ in_buffer.AppendData(kIdrTwo);
+ out_buffer.AppendData(kHeader, 2);
+ out_buffer.AppendData(kIdrTwo);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ auto parsed = depacketizer.Parse(in_buffer);
+ ASSERT_TRUE(parsed);
+ std::vector<uint8_t> expected_packet_payload(
+ out_buffer.data(), &out_buffer.data()[out_buffer.size()]);
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(out_buffer));
+}
+
+TEST(VideoRtpDepacketizerH264Test, StapADelta) {
+ uint8_t packet[16] = {kStapA, // F=0, NRI=0, Type=24.
+ // Length, nal header, payload.
+ 0, 0x02, kSlice, 0xFF, 0, 0x03, kSlice, 0xFF, 0x00, 0,
+ 0x04, kSlice, 0xFF, 0x00, 0x11};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH264 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload.size(), rtp_payload.size());
+ EXPECT_EQ(parsed->video_payload.cdata(), rtp_payload.cdata());
+
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH264);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ const RTPVideoHeaderH264& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264StapA);
+ // NALU type for aggregated packets is the type of the first packet only.
+ EXPECT_EQ(h264.nalu_type, kSlice);
+}
+
+TEST(VideoRtpDepacketizerH264Test, FuA) {
+ // clang-format off
+ uint8_t packet1[] = {
+ kFuA, // F=0, NRI=0, Type=28.
+ kSBit | kIdr, // FU header.
+ 0x85, 0xB8, 0x0, 0x4, 0x0, 0x0, 0x13, 0x93, 0x12, 0x0 // Payload.
+ };
+ // clang-format on
+ const uint8_t kExpected1[] = {kIdr, 0x85, 0xB8, 0x0, 0x4, 0x0,
+ 0x0, 0x13, 0x93, 0x12, 0x0};
+
+ uint8_t packet2[] = {
+ kFuA, // F=0, NRI=0, Type=28.
+ kIdr, // FU header.
+ 0x02 // Payload.
+ };
+ const uint8_t kExpected2[] = {0x02};
+
+ uint8_t packet3[] = {
+ kFuA, // F=0, NRI=0, Type=28.
+ kEBit | kIdr, // FU header.
+ 0x03 // Payload.
+ };
+ const uint8_t kExpected3[] = {0x03};
+
+ VideoRtpDepacketizerH264 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed1 =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet1));
+ ASSERT_TRUE(parsed1);
+ // We expect that the first packet is one byte shorter since the FU-A header
+ // has been replaced by the original nal header.
+ EXPECT_THAT(rtc::MakeArrayView(parsed1->video_payload.cdata(),
+ parsed1->video_payload.size()),
+ ElementsAreArray(kExpected1));
+ EXPECT_EQ(parsed1->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed1->video_header.codec, kVideoCodecH264);
+ EXPECT_TRUE(parsed1->video_header.is_first_packet_in_frame);
+ {
+ const RTPVideoHeaderH264& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed1->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264FuA);
+ EXPECT_EQ(h264.nalu_type, kIdr);
+ ASSERT_EQ(h264.nalus_length, 1u);
+ EXPECT_EQ(h264.nalus[0].type, static_cast<H264::NaluType>(kIdr));
+ EXPECT_EQ(h264.nalus[0].sps_id, -1);
+ EXPECT_EQ(h264.nalus[0].pps_id, 0);
+ }
+
+ // Following packets will be 2 bytes shorter since they will only be appended
+ // onto the first packet.
+ auto parsed2 = depacketizer.Parse(rtc::CopyOnWriteBuffer(packet2));
+ EXPECT_THAT(rtc::MakeArrayView(parsed2->video_payload.cdata(),
+ parsed2->video_payload.size()),
+ ElementsAreArray(kExpected2));
+ EXPECT_FALSE(parsed2->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed2->video_header.codec, kVideoCodecH264);
+ {
+ const RTPVideoHeaderH264& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed2->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264FuA);
+ EXPECT_EQ(h264.nalu_type, kIdr);
+ // NALU info is only expected for the first FU-A packet.
+ EXPECT_EQ(h264.nalus_length, 0u);
+ }
+
+ auto parsed3 = depacketizer.Parse(rtc::CopyOnWriteBuffer(packet3));
+ EXPECT_THAT(rtc::MakeArrayView(parsed3->video_payload.cdata(),
+ parsed3->video_payload.size()),
+ ElementsAreArray(kExpected3));
+ EXPECT_FALSE(parsed3->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed3->video_header.codec, kVideoCodecH264);
+ {
+ const RTPVideoHeaderH264& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed3->video_header.video_type_header);
+ EXPECT_EQ(h264.packetization_type, kH264FuA);
+ EXPECT_EQ(h264.nalu_type, kIdr);
+ // NALU info is only expected for the first FU-A packet.
+ ASSERT_EQ(h264.nalus_length, 0u);
+ }
+}
+
+TEST(VideoRtpDepacketizerH264Test, EmptyPayload) {
+ rtc::CopyOnWriteBuffer empty;
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(empty));
+}
+
+TEST(VideoRtpDepacketizerH264Test, TruncatedFuaNalu) {
+ const uint8_t kPayload[] = {0x9c};
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH264Test, TruncatedSingleStapANalu) {
+ const uint8_t kPayload[] = {0xd8, 0x27};
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH264Test, StapAPacketWithTruncatedNalUnits) {
+ const uint8_t kPayload[] = {0x58, 0xCB, 0xED, 0xDF};
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH264Test, TruncationJustAfterSingleStapANalu) {
+ const uint8_t kPayload[] = {0x38, 0x27, 0x27};
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH264Test, ShortSpsPacket) {
+ const uint8_t kPayload[] = {0x27, 0x80, 0x00};
+ VideoRtpDepacketizerH264 depacketizer;
+ EXPECT_TRUE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH264Test, SeiPacket) {
+ const uint8_t kPayload[] = {
+ kSei, // F=0, NRI=0, Type=6.
+ 0x03, 0x03, 0x03, 0x03 // Payload.
+ };
+ VideoRtpDepacketizerH264 depacketizer;
+ auto parsed = depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload));
+ ASSERT_TRUE(parsed);
+ const RTPVideoHeaderH264& h264 =
+ absl::get<RTPVideoHeaderH264>(parsed->video_header.video_type_header);
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(h264.packetization_type, kH264SingleNalu);
+ EXPECT_EQ(h264.nalu_type, kSei);
+ ASSERT_EQ(h264.nalus_length, 1u);
+ EXPECT_EQ(h264.nalus[0].type, static_cast<H264::NaluType>(kSei));
+ EXPECT_EQ(h264.nalus[0].sps_id, -1);
+ EXPECT_EQ(h264.nalus[0].pps_id, -1);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.cc
new file mode 100644
index 0000000000..81b4e4ab53
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
+
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerRaw::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ absl::optional<ParsedRtpPayload> parsed(absl::in_place);
+ parsed->video_payload = std::move(rtp_payload);
+ return parsed;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h
new file mode 100644
index 0000000000..59c8695352
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_RAW_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_RAW_H_
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class VideoRtpDepacketizerRaw : public VideoRtpDepacketizer {
+ public:
+ ~VideoRtpDepacketizerRaw() override = default;
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_RAW_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw_unittest.cc
new file mode 100644
index 0000000000..36c826ab84
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_raw_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
+
+#include <cstdint>
+
+#include "absl/types/optional.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+TEST(VideoRtpDepacketizerRaw, PassRtpPayloadAsVideoPayload) {
+ const uint8_t kPayload[] = {0x05, 0x25, 0x52};
+ rtc::CopyOnWriteBuffer rtp_payload(kPayload);
+
+ VideoRtpDepacketizerRaw depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+
+ ASSERT_TRUE(parsed);
+ EXPECT_EQ(parsed->video_payload.size(), rtp_payload.size());
+ // Check there was no memcpy involved by verifying return and original buffers
+ // point to the same buffer.
+ EXPECT_EQ(parsed->video_payload.cdata(), rtp_payload.cdata());
+}
+
+TEST(VideoRtpDepacketizerRaw, UsesDefaultValuesForVideoHeader) {
+ const uint8_t kPayload[] = {0x05, 0x25, 0x52};
+ rtc::CopyOnWriteBuffer rtp_payload(kPayload);
+
+ VideoRtpDepacketizerRaw depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+
+ ASSERT_TRUE(parsed);
+ EXPECT_FALSE(parsed->video_header.generic);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecGeneric);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.cc
new file mode 100644
index 0000000000..d6bd33c24d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// VP8 payload descriptor
+// https://datatracker.ietf.org/doc/html/rfc7741#section-4.2
+//
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |X|R|N|S|R| PID | (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// X: |I|L|T|K| RSV | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PictureID | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// | PictureID |
+// +-+-+-+-+-+-+-+-+
+// L: | TL0PICIDX | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// T/K: |TID|Y| KEYIDX | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+//
+// VP8 payload header. Considered part of the actual payload, sent to decoder.
+// https://datatracker.ietf.org/doc/html/rfc7741#section-4.3
+//
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |Size0|H| VER |P|
+// +-+-+-+-+-+-+-+-+
+// : ... :
+// +-+-+-+-+-+-+-+-+
+
+namespace webrtc {
+namespace {
+
+constexpr int kFailedToParse = 0;
+
+int ParseVP8Descriptor(RTPVideoHeaderVP8* vp8,
+ const uint8_t* data,
+ size_t data_length) {
+ RTC_DCHECK_GT(data_length, 0);
+ int parsed_bytes = 0;
+ // Parse mandatory first byte of payload descriptor.
+ bool extension = (*data & 0x80) ? true : false; // X bit
+ vp8->nonReference = (*data & 0x20) ? true : false; // N bit
+ vp8->beginningOfPartition = (*data & 0x10) ? true : false; // S bit
+ vp8->partitionId = (*data & 0x07); // PID field
+
+ data++;
+ parsed_bytes++;
+ data_length--;
+
+ if (!extension)
+ return parsed_bytes;
+
+ if (data_length == 0)
+ return kFailedToParse;
+ // Optional X field is present.
+ bool has_picture_id = (*data & 0x80) ? true : false; // I bit
+ bool has_tl0_pic_idx = (*data & 0x40) ? true : false; // L bit
+ bool has_tid = (*data & 0x20) ? true : false; // T bit
+ bool has_key_idx = (*data & 0x10) ? true : false; // K bit
+
+ // Advance data and decrease remaining payload size.
+ data++;
+ parsed_bytes++;
+ data_length--;
+
+ if (has_picture_id) {
+ if (data_length == 0)
+ return kFailedToParse;
+
+ vp8->pictureId = (*data & 0x7F);
+ if (*data & 0x80) {
+ data++;
+ parsed_bytes++;
+ if (--data_length == 0)
+ return kFailedToParse;
+ // PictureId is 15 bits
+ vp8->pictureId = (vp8->pictureId << 8) + *data;
+ }
+ data++;
+ parsed_bytes++;
+ data_length--;
+ }
+
+ if (has_tl0_pic_idx) {
+ if (data_length == 0)
+ return kFailedToParse;
+
+ vp8->tl0PicIdx = *data;
+ data++;
+ parsed_bytes++;
+ data_length--;
+ }
+
+ if (has_tid || has_key_idx) {
+ if (data_length == 0)
+ return kFailedToParse;
+
+ if (has_tid) {
+ vp8->temporalIdx = ((*data >> 6) & 0x03);
+ vp8->layerSync = (*data & 0x20) ? true : false; // Y bit
+ }
+ if (has_key_idx) {
+ vp8->keyIdx = *data & 0x1F;
+ }
+ data++;
+ parsed_bytes++;
+ data_length--;
+ }
+ return parsed_bytes;
+}
+
+} // namespace
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerVp8::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ rtc::ArrayView<const uint8_t> payload(rtp_payload.cdata(),
+ rtp_payload.size());
+ absl::optional<ParsedRtpPayload> result(absl::in_place);
+ int offset = ParseRtpPayload(payload, &result->video_header);
+ if (offset == kFailedToParse)
+ return absl::nullopt;
+ RTC_DCHECK_LT(offset, rtp_payload.size());
+ result->video_payload =
+ rtp_payload.Slice(offset, rtp_payload.size() - offset);
+ return result;
+}
+
+int VideoRtpDepacketizerVp8::ParseRtpPayload(
+ rtc::ArrayView<const uint8_t> rtp_payload,
+ RTPVideoHeader* video_header) {
+ RTC_DCHECK(video_header);
+ if (rtp_payload.empty()) {
+ RTC_LOG(LS_ERROR) << "Empty rtp payload.";
+ return kFailedToParse;
+ }
+
+ video_header->simulcastIdx = 0;
+ video_header->codec = kVideoCodecVP8;
+ auto& vp8_header =
+ video_header->video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.InitRTPVideoHeaderVP8();
+
+ const int descriptor_size =
+ ParseVP8Descriptor(&vp8_header, rtp_payload.data(), rtp_payload.size());
+ if (descriptor_size == kFailedToParse)
+ return kFailedToParse;
+
+ RTC_DCHECK_LT(vp8_header.partitionId, 8);
+
+ video_header->is_first_packet_in_frame =
+ vp8_header.beginningOfPartition && vp8_header.partitionId == 0;
+
+ int vp8_payload_size = rtp_payload.size() - descriptor_size;
+ if (vp8_payload_size == 0) {
+ RTC_LOG(LS_WARNING) << "Empty vp8 payload.";
+ return kFailedToParse;
+ }
+ const uint8_t* vp8_payload = rtp_payload.data() + descriptor_size;
+
+ // Read P bit from payload header (only at beginning of first partition).
+ if (video_header->is_first_packet_in_frame && (*vp8_payload & 0x01) == 0) {
+ video_header->frame_type = VideoFrameType::kVideoFrameKey;
+
+ if (vp8_payload_size < 10) {
+ // For an I-frame we should always have the uncompressed VP8 header
+ // in the beginning of the partition.
+ return kFailedToParse;
+ }
+ video_header->width = ((vp8_payload[7] << 8) + vp8_payload[6]) & 0x3FFF;
+ video_header->height = ((vp8_payload[9] << 8) + vp8_payload[8]) & 0x3FFF;
+ } else {
+ video_header->frame_type = VideoFrameType::kVideoFrameDelta;
+
+ video_header->width = 0;
+ video_header->height = 0;
+ }
+
+ return descriptor_size;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h
new file mode 100644
index 0000000000..3d7cb3291d
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_VP8_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_VP8_H_
+
+#include <cstdint>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class VideoRtpDepacketizerVp8 : public VideoRtpDepacketizer {
+ public:
+ VideoRtpDepacketizerVp8() = default;
+ VideoRtpDepacketizerVp8(const VideoRtpDepacketizerVp8&) = delete;
+ VideoRtpDepacketizerVp8& operator=(const VideoRtpDepacketizerVp8&) = delete;
+ ~VideoRtpDepacketizerVp8() override = default;
+
+ // Parses vp8 rtp payload descriptor.
+ // Returns zero on error or vp8 payload header offset on success.
+ static int ParseRtpPayload(rtc::ArrayView<const uint8_t> rtp_payload,
+ RTPVideoHeader* video_header);
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_VP8_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8_unittest.cc
new file mode 100644
index 0000000000..77469cf935
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8_unittest.cc
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h"
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp8.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+// VP8 payload descriptor
+// https://datatracker.ietf.org/doc/html/rfc7741#section-4.2
+//
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |X|R|N|S|R| PID | (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// X: |I|L|T|K| RSV | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PictureID | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// | PictureID |
+// +-+-+-+-+-+-+-+-+
+// L: | TL0PICIDX | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+// T/K: |TID|Y| KEYIDX | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+
+//
+// VP8 payload header. Considered part of the actual payload, sent to decoder.
+// https://datatracker.ietf.org/doc/html/rfc7741#section-4.3
+//
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |Size0|H| VER |P|
+// +-+-+-+-+-+-+-+-+
+// : ... :
+// +-+-+-+-+-+-+-+-+
+
+namespace webrtc {
+namespace {
+
+TEST(VideoRtpDepacketizerVp8Test, BasicHeader) {
+ uint8_t packet[4] = {0};
+ packet[0] = 0b0001'0100; // S = 1, partition ID = 4.
+ packet[1] = 0x01; // P frame.
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 1);
+ EXPECT_EQ(video_header.frame_type, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(video_header.codec, kVideoCodecVP8);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_FALSE(vp8_header.nonReference);
+ EXPECT_TRUE(vp8_header.beginningOfPartition);
+ EXPECT_EQ(vp8_header.partitionId, 4);
+ EXPECT_EQ(vp8_header.pictureId, kNoPictureId);
+ EXPECT_EQ(vp8_header.tl0PicIdx, kNoTl0PicIdx);
+ EXPECT_EQ(vp8_header.temporalIdx, kNoTemporalIdx);
+ EXPECT_EQ(vp8_header.keyIdx, kNoKeyIdx);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, OneBytePictureID) {
+ const uint8_t kPictureId = 17;
+ uint8_t packet[10] = {0};
+ packet[0] = 0b1010'0000;
+ packet[1] = 0b1000'0000;
+ packet[2] = kPictureId;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 3);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_EQ(vp8_header.pictureId, kPictureId);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, TwoBytePictureID) {
+ const uint16_t kPictureId = 0x1234;
+ uint8_t packet[10] = {0};
+ packet[0] = 0b1010'0000;
+ packet[1] = 0b1000'0000;
+ packet[2] = 0x80 | 0x12;
+ packet[3] = 0x34;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 4);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_EQ(vp8_header.pictureId, kPictureId);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, Tl0PicIdx) {
+ const uint8_t kTl0PicIdx = 17;
+ uint8_t packet[13] = {0};
+ packet[0] = 0b1000'0000;
+ packet[1] = 0b0100'0000;
+ packet[2] = kTl0PicIdx;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 3);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_EQ(vp8_header.tl0PicIdx, kTl0PicIdx);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, TIDAndLayerSync) {
+ uint8_t packet[10] = {0};
+ packet[0] = 0b1000'0000;
+ packet[1] = 0b0010'0000;
+ packet[2] = 0b10'0'00000; // TID(2) + LayerSync(false)
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 3);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_EQ(vp8_header.temporalIdx, 2);
+ EXPECT_FALSE(vp8_header.layerSync);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, KeyIdx) {
+ const uint8_t kKeyIdx = 17;
+ uint8_t packet[10] = {0};
+ packet[0] = 0b1000'0000;
+ packet[1] = 0b0001'0000;
+ packet[2] = kKeyIdx;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 3);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_EQ(vp8_header.keyIdx, kKeyIdx);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, MultipleExtensions) {
+ uint8_t packet[10] = {0};
+ packet[0] = 0b1010'0110; // X and N bit set, partition ID = 6
+ packet[1] = 0b1111'0000;
+ packet[2] = 0x80 | 0x12; // PictureID, high 7 bits.
+ packet[3] = 0x34; // PictureID, low 8 bits.
+ packet[4] = 42; // Tl0PicIdx.
+ packet[5] = 0b01'1'10001;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 6);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+ EXPECT_TRUE(vp8_header.nonReference);
+ EXPECT_EQ(vp8_header.partitionId, 0b0110);
+ EXPECT_EQ(vp8_header.pictureId, 0x1234);
+ EXPECT_EQ(vp8_header.tl0PicIdx, 42);
+ EXPECT_EQ(vp8_header.temporalIdx, 1);
+ EXPECT_TRUE(vp8_header.layerSync);
+ EXPECT_EQ(vp8_header.keyIdx, 0b10001);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, TooShortHeader) {
+ uint8_t packet[4] = {0};
+ packet[0] = 0b1000'0000;
+ packet[1] = 0b1111'0000; // All extensions are enabled...
+ packet[2] = 0x80 | 17; // ... but only 2 bytes PictureID is provided.
+ packet[3] = 17; // PictureID, low 8 bits.
+
+ RTPVideoHeader unused;
+ EXPECT_EQ(VideoRtpDepacketizerVp8::ParseRtpPayload(packet, &unused), 0);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, WithPacketizer) {
+ uint8_t data[10] = {0};
+ RtpPacketToSend packet(/*extenions=*/nullptr);
+ RTPVideoHeaderVP8 input_header;
+ input_header.nonReference = true;
+ input_header.pictureId = 300;
+ input_header.temporalIdx = 1;
+ input_header.layerSync = false;
+ input_header.tl0PicIdx = kNoTl0PicIdx; // Disable.
+ input_header.keyIdx = 31;
+ RtpPacketizerVp8 packetizer(data, /*limits=*/{}, input_header);
+ EXPECT_EQ(packetizer.NumPackets(), 1u);
+ ASSERT_TRUE(packetizer.NextPacket(&packet));
+
+ VideoRtpDepacketizerVp8 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(packet.PayloadBuffer());
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecVP8);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(parsed->video_header.video_type_header);
+ EXPECT_EQ(vp8_header.nonReference, input_header.nonReference);
+ EXPECT_EQ(vp8_header.pictureId, input_header.pictureId);
+ EXPECT_EQ(vp8_header.tl0PicIdx, input_header.tl0PicIdx);
+ EXPECT_EQ(vp8_header.temporalIdx, input_header.temporalIdx);
+ EXPECT_EQ(vp8_header.layerSync, input_header.layerSync);
+ EXPECT_EQ(vp8_header.keyIdx, input_header.keyIdx);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, ReferencesInputCopyOnWriteBuffer) {
+ constexpr size_t kHeaderSize = 5;
+ uint8_t packet[16] = {0};
+ packet[0] = 0b1000'0000;
+ packet[1] = 0b1111'0000; // with all extensions,
+ packet[2] = 15; // and one-byte picture id.
+
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+ VideoRtpDepacketizerVp8 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload.size(), rtp_payload.size() - kHeaderSize);
+ // Compare pointers to check there was no copy on write buffer unsharing.
+ EXPECT_EQ(parsed->video_payload.cdata(), rtp_payload.cdata() + kHeaderSize);
+}
+
+TEST(VideoRtpDepacketizerVp8Test, FailsOnEmptyPayload) {
+ rtc::ArrayView<const uint8_t> empty;
+ RTPVideoHeader video_header;
+ EXPECT_EQ(VideoRtpDepacketizerVp8::ParseRtpPayload(empty, &video_header), 0);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc
new file mode 100644
index 0000000000..41f363d221
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+
+#include <string.h>
+
+#include "api/video/video_codec_constants.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/bitstream_reader.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+// Picture ID:
+//
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
+// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
+// M: | EXTENDED PID |
+// +-+-+-+-+-+-+-+-+
+//
+void ParsePictureId(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) {
+ if (parser.ReadBit()) { // m_bit
+ vp9->picture_id = parser.ReadBits(15);
+ vp9->max_picture_id = kMaxTwoBytePictureId;
+ } else {
+ vp9->picture_id = parser.ReadBits(7);
+ vp9->max_picture_id = kMaxOneBytePictureId;
+ }
+}
+
+// Layer indices :
+//
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D|
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX | (non-flexible mode only)
+// +-+-+-+-+-+-+-+-+
+//
+void ParseLayerInfo(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) {
+ vp9->temporal_idx = parser.ReadBits(3);
+ vp9->temporal_up_switch = parser.Read<bool>();
+ vp9->spatial_idx = parser.ReadBits(3);
+ vp9->inter_layer_predicted = parser.Read<bool>();
+ if (vp9->spatial_idx >= kMaxSpatialLayers) {
+ parser.Invalidate();
+ return;
+ }
+
+ if (!vp9->flexible_mode) {
+ vp9->tl0_pic_idx = parser.Read<uint8_t>();
+ }
+}
+
+// Reference indices:
+//
+// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index
+// P,F: | P_DIFF |N| up to 3 times has to be specified.
+// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows
+// current P_DIFF.
+//
+void ParseRefIndices(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) {
+ if (vp9->picture_id == kNoPictureId) {
+ parser.Invalidate();
+ return;
+ }
+
+ vp9->num_ref_pics = 0;
+ bool n_bit;
+ do {
+ if (vp9->num_ref_pics == kMaxVp9RefPics) {
+ parser.Invalidate();
+ return;
+ }
+
+ uint8_t p_diff = parser.ReadBits(7);
+ n_bit = parser.Read<bool>();
+
+ vp9->pid_diff[vp9->num_ref_pics] = p_diff;
+ uint32_t scaled_pid = vp9->picture_id;
+ if (p_diff > scaled_pid) {
+ // TODO(asapersson): Max should correspond to the picture id of last wrap.
+ scaled_pid += vp9->max_picture_id + 1;
+ }
+ vp9->ref_picture_id[vp9->num_ref_pics++] = scaled_pid - p_diff;
+ } while (n_bit);
+}
+
+// Scalability structure (SS).
+//
+// +-+-+-+-+-+-+-+-+
+// V: | N_S |Y|G|-|-|-|
+// +-+-+-+-+-+-+-+-+ -|
+// Y: | WIDTH | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ . N_S + 1 times
+// | HEIGHT | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -|
+// G: | N_G | (OPTIONAL)
+// +-+-+-+-+-+-+-+-+ -|
+// N_G: | T |U| R |-|-| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| . N_G times
+// | P_DIFF | (OPTIONAL) . R times .
+// +-+-+-+-+-+-+-+-+ -| -|
+//
+void ParseSsData(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) {
+ vp9->num_spatial_layers = parser.ReadBits(3) + 1;
+ vp9->spatial_layer_resolution_present = parser.Read<bool>();
+ bool g_bit = parser.Read<bool>();
+ parser.ConsumeBits(3);
+ vp9->gof.num_frames_in_gof = 0;
+
+ if (vp9->spatial_layer_resolution_present) {
+ for (size_t i = 0; i < vp9->num_spatial_layers; ++i) {
+ vp9->width[i] = parser.Read<uint16_t>();
+ vp9->height[i] = parser.Read<uint16_t>();
+ }
+ }
+ if (g_bit) {
+ vp9->gof.num_frames_in_gof = parser.Read<uint8_t>();
+ }
+ for (size_t i = 0; i < vp9->gof.num_frames_in_gof; ++i) {
+ vp9->gof.temporal_idx[i] = parser.ReadBits(3);
+ vp9->gof.temporal_up_switch[i] = parser.Read<bool>();
+ vp9->gof.num_ref_pics[i] = parser.ReadBits(2);
+ parser.ConsumeBits(2);
+
+ for (uint8_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) {
+ vp9->gof.pid_diff[i][p] = parser.Read<uint8_t>();
+ }
+ }
+}
+} // namespace
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerVp9::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ absl::optional<ParsedRtpPayload> result(absl::in_place);
+ int offset = ParseRtpPayload(rtp_payload, &result->video_header);
+ if (offset == 0)
+ return absl::nullopt;
+ RTC_DCHECK_LT(offset, rtp_payload.size());
+ result->video_payload =
+ rtp_payload.Slice(offset, rtp_payload.size() - offset);
+ return result;
+}
+
+int VideoRtpDepacketizerVp9::ParseRtpPayload(
+ rtc::ArrayView<const uint8_t> rtp_payload,
+ RTPVideoHeader* video_header) {
+ RTC_DCHECK(video_header);
+ // Parse mandatory first byte of payload descriptor.
+ BitstreamReader parser(rtp_payload);
+ uint8_t first_byte = parser.Read<uint8_t>();
+ bool i_bit = first_byte & 0b1000'0000; // PictureId present .
+ bool p_bit = first_byte & 0b0100'0000; // Inter-picture predicted.
+ bool l_bit = first_byte & 0b0010'0000; // Layer indices present.
+ bool f_bit = first_byte & 0b0001'0000; // Flexible mode.
+ bool b_bit = first_byte & 0b0000'1000; // Begins frame flag.
+ bool e_bit = first_byte & 0b0000'0100; // Ends frame flag.
+ bool v_bit = first_byte & 0b0000'0010; // Scalability structure present.
+ bool z_bit = first_byte & 0b0000'0001; // Not used for inter-layer prediction
+
+ // Parsed payload.
+ video_header->width = 0;
+ video_header->height = 0;
+ video_header->simulcastIdx = 0;
+ video_header->codec = kVideoCodecVP9;
+
+ video_header->frame_type =
+ p_bit ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey;
+
+ auto& vp9_header =
+ video_header->video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_header.InitRTPVideoHeaderVP9();
+ vp9_header.inter_pic_predicted = p_bit;
+ vp9_header.flexible_mode = f_bit;
+ vp9_header.beginning_of_frame = b_bit;
+ vp9_header.end_of_frame = e_bit;
+ vp9_header.ss_data_available = v_bit;
+ vp9_header.non_ref_for_inter_layer_pred = z_bit;
+
+ // Parse fields that are present.
+ if (i_bit) {
+ ParsePictureId(parser, &vp9_header);
+ }
+ if (l_bit) {
+ ParseLayerInfo(parser, &vp9_header);
+ }
+ if (p_bit && f_bit) {
+ ParseRefIndices(parser, &vp9_header);
+ }
+ if (v_bit) {
+ ParseSsData(parser, &vp9_header);
+ if (vp9_header.spatial_layer_resolution_present) {
+ // TODO(asapersson): Add support for spatial layers.
+ video_header->width = vp9_header.width[0];
+ video_header->height = vp9_header.height[0];
+ }
+ }
+ video_header->is_first_packet_in_frame = b_bit;
+ video_header->is_last_packet_in_frame = e_bit;
+
+ int num_remaining_bits = parser.RemainingBitCount();
+ if (num_remaining_bits <= 0) {
+ // Failed to parse or empty vp9 payload data.
+ return 0;
+ }
+ // vp9 descriptor is byte aligned.
+ RTC_DCHECK_EQ(num_remaining_bits % 8, 0);
+ return rtp_payload.size() - num_remaining_bits / 8;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h
new file mode 100644
index 0000000000..4bb358a15f
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_VP9_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_VP9_H_
+
+#include <cstdint>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class VideoRtpDepacketizerVp9 : public VideoRtpDepacketizer {
+ public:
+ VideoRtpDepacketizerVp9() = default;
+ VideoRtpDepacketizerVp9(const VideoRtpDepacketizerVp9&) = delete;
+ VideoRtpDepacketizerVp9& operator=(const VideoRtpDepacketizerVp9&) = delete;
+ ~VideoRtpDepacketizerVp9() override = default;
+
+ // Parses vp9 rtp payload descriptor.
+ // Returns zero on error or vp9 payload header offset on success.
+ static int ParseRtpPayload(rtc::ArrayView<const uint8_t> rtp_payload,
+ RTPVideoHeader* video_header);
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_VP9_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9_unittest.cc
new file mode 100644
index 0000000000..36af59a779
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9_unittest.cc
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+void VerifyHeader(const RTPVideoHeaderVP9& expected,
+ const RTPVideoHeaderVP9& actual) {
+ EXPECT_EQ(expected.inter_layer_predicted, actual.inter_layer_predicted);
+ EXPECT_EQ(expected.inter_pic_predicted, actual.inter_pic_predicted);
+ EXPECT_EQ(expected.flexible_mode, actual.flexible_mode);
+ EXPECT_EQ(expected.beginning_of_frame, actual.beginning_of_frame);
+ EXPECT_EQ(expected.end_of_frame, actual.end_of_frame);
+ EXPECT_EQ(expected.ss_data_available, actual.ss_data_available);
+ EXPECT_EQ(expected.non_ref_for_inter_layer_pred,
+ actual.non_ref_for_inter_layer_pred);
+ EXPECT_EQ(expected.picture_id, actual.picture_id);
+ EXPECT_EQ(expected.max_picture_id, actual.max_picture_id);
+ EXPECT_EQ(expected.temporal_idx, actual.temporal_idx);
+ EXPECT_EQ(expected.spatial_idx, actual.spatial_idx);
+ EXPECT_EQ(expected.gof_idx, actual.gof_idx);
+ EXPECT_EQ(expected.tl0_pic_idx, actual.tl0_pic_idx);
+ EXPECT_EQ(expected.temporal_up_switch, actual.temporal_up_switch);
+
+ EXPECT_EQ(expected.num_ref_pics, actual.num_ref_pics);
+ for (uint8_t i = 0; i < expected.num_ref_pics; ++i) {
+ EXPECT_EQ(expected.pid_diff[i], actual.pid_diff[i]);
+ EXPECT_EQ(expected.ref_picture_id[i], actual.ref_picture_id[i]);
+ }
+ if (expected.ss_data_available) {
+ EXPECT_EQ(expected.spatial_layer_resolution_present,
+ actual.spatial_layer_resolution_present);
+ EXPECT_EQ(expected.num_spatial_layers, actual.num_spatial_layers);
+ if (expected.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < expected.num_spatial_layers; i++) {
+ EXPECT_EQ(expected.width[i], actual.width[i]);
+ EXPECT_EQ(expected.height[i], actual.height[i]);
+ }
+ }
+ EXPECT_EQ(expected.gof.num_frames_in_gof, actual.gof.num_frames_in_gof);
+ for (size_t i = 0; i < expected.gof.num_frames_in_gof; i++) {
+ EXPECT_EQ(expected.gof.temporal_up_switch[i],
+ actual.gof.temporal_up_switch[i]);
+ EXPECT_EQ(expected.gof.temporal_idx[i], actual.gof.temporal_idx[i]);
+ EXPECT_EQ(expected.gof.num_ref_pics[i], actual.gof.num_ref_pics[i]);
+ for (uint8_t j = 0; j < expected.gof.num_ref_pics[i]; j++) {
+ EXPECT_EQ(expected.gof.pid_diff[i][j], actual.gof.pid_diff[i][j]);
+ }
+ }
+ }
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseBasicHeader) {
+ uint8_t packet[4] = {0};
+ packet[0] = 0x0C; // I:0 P:0 L:0 F:0 B:1 E:1 V:0 Z:0
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 1);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ expected.beginning_of_frame = true;
+ expected.end_of_frame = true;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseOneBytePictureId) {
+ uint8_t packet[10] = {0};
+ packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 Z:0
+ packet[1] = kMaxOneBytePictureId;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 2);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ expected.picture_id = kMaxOneBytePictureId;
+ expected.max_picture_id = kMaxOneBytePictureId;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseTwoBytePictureId) {
+ uint8_t packet[10] = {0};
+ packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 Z:0
+ packet[1] = 0x80 | ((kMaxTwoBytePictureId >> 8) & 0x7F);
+ packet[2] = kMaxTwoBytePictureId & 0xFF;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 3);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ expected.picture_id = kMaxTwoBytePictureId;
+ expected.max_picture_id = kMaxTwoBytePictureId;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseLayerInfoWithNonFlexibleMode) {
+ const uint8_t kTemporalIdx = 2;
+ const uint8_t kUbit = 1;
+ const uint8_t kSpatialIdx = 1;
+ const uint8_t kDbit = 1;
+ const uint8_t kTl0PicIdx = 17;
+ uint8_t packet[13] = {0};
+ packet[0] = 0x20; // I:0 P:0 L:1 F:0 B:0 E:0 V:0 Z:0
+ packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit;
+ packet[2] = kTl0PicIdx;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 3);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ // T:2 U:1 S:1 D:1
+ // TL0PICIDX:17
+ expected.temporal_idx = kTemporalIdx;
+ expected.temporal_up_switch = kUbit ? true : false;
+ expected.spatial_idx = kSpatialIdx;
+ expected.inter_layer_predicted = kDbit ? true : false;
+ expected.tl0_pic_idx = kTl0PicIdx;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseLayerInfoWithFlexibleMode) {
+ const uint8_t kTemporalIdx = 2;
+ const uint8_t kUbit = 1;
+ const uint8_t kSpatialIdx = 0;
+ const uint8_t kDbit = 0;
+ uint8_t packet[13] = {0};
+ packet[0] = 0x38; // I:0 P:0 L:1 F:1 B:1 E:0 V:0 Z:0
+ packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 2);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ // I:0 P:0 L:1 F:1 B:1 E:0 V:0 Z:0
+ // L: T:2 U:1 S:0 D:0
+ expected.beginning_of_frame = true;
+ expected.flexible_mode = true;
+ expected.temporal_idx = kTemporalIdx;
+ expected.temporal_up_switch = kUbit ? true : false;
+ expected.spatial_idx = kSpatialIdx;
+ expected.inter_layer_predicted = kDbit ? true : false;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseRefIdx) {
+ const int16_t kPictureId = 17;
+ const uint8_t kPdiff1 = 17;
+ const uint8_t kPdiff2 = 18;
+ const uint8_t kPdiff3 = 127;
+ uint8_t packet[13] = {0};
+ packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 Z:0
+ packet[1] = 0x80 | ((kPictureId >> 8) & 0x7F); // Two byte pictureID.
+ packet[2] = kPictureId;
+ packet[3] = (kPdiff1 << 1) | 1; // P_DIFF N:1
+ packet[4] = (kPdiff2 << 1) | 1; // P_DIFF N:1
+ packet[5] = (kPdiff3 << 1) | 0; // P_DIFF N:0
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 6);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ // I:1 P:1 L:0 F:1 B:1 E:0 V:0 Z:0
+ // I: PICTURE ID:17
+ // I:
+ // P,F: P_DIFF:17 N:1 => refPicId = 17 - 17 = 0
+ // P,F: P_DIFF:18 N:1 => refPicId = (kMaxPictureId + 1) + 17 - 18 = 0x7FFF
+ // P,F: P_DIFF:127 N:0 => refPicId = (kMaxPictureId + 1) + 17 - 127 = 32658
+ expected.beginning_of_frame = true;
+ expected.inter_pic_predicted = true;
+ expected.flexible_mode = true;
+ expected.picture_id = kPictureId;
+ expected.num_ref_pics = 3;
+ expected.pid_diff[0] = kPdiff1;
+ expected.pid_diff[1] = kPdiff2;
+ expected.pid_diff[2] = kPdiff3;
+ expected.ref_picture_id[0] = 0;
+ expected.ref_picture_id[1] = 0x7FFF;
+ expected.ref_picture_id[2] = 32658;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseRefIdxFailsWithNoPictureId) {
+ const uint8_t kPdiff = 3;
+ uint8_t packet[13] = {0};
+ packet[0] = 0x58; // I:0 P:1 L:0 F:1 B:1 E:0 V:0 Z:0
+ packet[1] = (kPdiff << 1); // P,F: P_DIFF:3 N:0
+
+ RTPVideoHeader video_header;
+ EXPECT_EQ(VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header), 0);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseRefIdxFailsWithTooManyRefPics) {
+ const uint8_t kPdiff = 3;
+ uint8_t packet[13] = {0};
+ packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 Z:0
+ packet[1] = kMaxOneBytePictureId; // I: PICTURE ID:127
+ packet[2] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1
+ packet[3] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1
+ packet[4] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1
+ packet[5] = (kPdiff << 1) | 0; // P,F: P_DIFF:3 N:0
+
+ RTPVideoHeader video_header;
+ EXPECT_EQ(VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header), 0);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseSsData) {
+ const uint8_t kYbit = 0;
+ const size_t kNs = 2;
+ const size_t kNg = 2;
+ uint8_t packet[23] = {0};
+ packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 Z:0
+ packet[1] = ((kNs - 1) << 5) | (kYbit << 4) | (1 << 3); // N_S Y G:1 -
+ packet[2] = kNg; // N_G
+ packet[3] = (0 << 5) | (1 << 4) | (0 << 2) | 0; // T:0 U:1 R:0 -
+ packet[4] = (2 << 5) | (0 << 4) | (1 << 2) | 0; // T:2 U:0 R:1 -
+ packet[5] = 33;
+
+ RTPVideoHeader video_header;
+ int offset = VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(offset, 6);
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ expected.beginning_of_frame = true;
+ expected.ss_data_available = true;
+ expected.num_spatial_layers = kNs;
+ expected.spatial_layer_resolution_present = kYbit ? true : false;
+ expected.gof.num_frames_in_gof = kNg;
+ expected.gof.temporal_idx[0] = 0;
+ expected.gof.temporal_idx[1] = 2;
+ expected.gof.temporal_up_switch[0] = true;
+ expected.gof.temporal_up_switch[1] = false;
+ expected.gof.num_ref_pics[0] = 0;
+ expected.gof.num_ref_pics[1] = 1;
+ expected.gof.pid_diff[1][0] = 33;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) {
+ uint8_t packet[2] = {0};
+ packet[0] = 0x08; // I:0 P:0 L:0 F:0 B:1 E:0 V:0 Z:0
+
+ RTPVideoHeader video_header;
+ VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(video_header.is_last_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
+ uint8_t packet[2] = {0};
+ packet[0] = 0x44; // I:0 P:1 L:0 F:0 B:0 E:1 V:0 Z:0
+
+ RTPVideoHeader video_header;
+ VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(video_header.frame_type, VideoFrameType::kVideoFrameDelta);
+ EXPECT_FALSE(video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(video_header.is_last_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseResolution) {
+ const uint16_t kWidth[2] = {640, 1280};
+ const uint16_t kHeight[2] = {360, 720};
+ uint8_t packet[20] = {0};
+ packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 Z:0
+ packet[1] = (1 << 5) | (1 << 4) | 0; // N_S:1 Y:1 G:0
+ packet[2] = kWidth[0] >> 8;
+ packet[3] = kWidth[0] & 0xFF;
+ packet[4] = kHeight[0] >> 8;
+ packet[5] = kHeight[0] & 0xFF;
+ packet[6] = kWidth[1] >> 8;
+ packet[7] = kWidth[1] & 0xFF;
+ packet[8] = kHeight[1] >> 8;
+ packet[9] = kHeight[1] & 0xFF;
+
+ RTPVideoHeader video_header;
+ VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ EXPECT_EQ(video_header.width, kWidth[0]);
+ EXPECT_EQ(video_header.height, kHeight[0]);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) {
+ rtc::ArrayView<const uint8_t> empty;
+
+ RTPVideoHeader video_header;
+ EXPECT_EQ(VideoRtpDepacketizerVp9::ParseRtpPayload(empty, &video_header), 0);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseFailsForTooShortBufferToFitPayload) {
+ uint8_t packet[] = {0};
+
+ RTPVideoHeader video_header;
+ EXPECT_EQ(VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header), 0);
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ParseNonRefForInterLayerPred) {
+ RTPVideoHeader video_header;
+ RTPVideoHeaderVP9 expected;
+ expected.InitRTPVideoHeaderVP9();
+ uint8_t packet[2] = {0};
+
+ packet[0] = 0x08; // I:0 P:0 L:0 F:0 B:1 E:0 V:0 Z:0
+ VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ expected.beginning_of_frame = true;
+ expected.non_ref_for_inter_layer_pred = false;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+
+ packet[0] = 0x05; // I:0 P:0 L:0 F:0 B:0 E:1 V:0 Z:1
+ VideoRtpDepacketizerVp9::ParseRtpPayload(packet, &video_header);
+
+ expected.beginning_of_frame = false;
+ expected.end_of_frame = true;
+ expected.non_ref_for_inter_layer_pred = true;
+ VerifyHeader(expected,
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header));
+}
+
+TEST(VideoRtpDepacketizerVp9Test, ReferencesInputCopyOnWriteBuffer) {
+ constexpr size_t kHeaderSize = 1;
+ uint8_t packet[4] = {0};
+ packet[0] = 0x0C; // I:0 P:0 L:0 F:0 B:1 E:1 V:0 Z:0
+
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+ VideoRtpDepacketizerVp9 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_EQ(parsed->video_payload.size(), rtp_payload.size() - kHeaderSize);
+ // Compare pointers to check there was no copy on write buffer unsharing.
+ EXPECT_EQ(parsed->video_payload.cdata(), rtp_payload.cdata() + kHeaderSize);
+}
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h b/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h
new file mode 100644
index 0000000000..0bc2f56917
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_TEST_TESTFEC_AVERAGE_RESIDUAL_LOSS_XOR_CODES_H_
+#define MODULES_RTP_RTCP_TEST_TESTFEC_AVERAGE_RESIDUAL_LOSS_XOR_CODES_H_
+
+namespace webrtc {
+
+// Maximum number of media packets allowed in this test. The burst mask types
+// are currently defined up to (kMaxMediaPacketsTest, kMaxMediaPacketsTest).
+const int kMaxMediaPacketsTest = 12;
+
+// Maximum number of FEC codes considered in this test.
+const int kNumberCodes = kMaxMediaPacketsTest * (kMaxMediaPacketsTest + 1) / 2;
+
+// For the random mask type: reference level for the maximum average residual
+// loss expected for each code size up to:
+// (kMaxMediaPacketsTest, kMaxMediaPacketsTest).
+const float kMaxResidualLossRandomMask[kNumberCodes] = {
+ 0.009463f, 0.022436f, 0.007376f, 0.033895f, 0.012423f, 0.004644f, 0.043438f,
+ 0.019937f, 0.008820f, 0.003438f, 0.051282f, 0.025795f, 0.012872f, 0.006458f,
+ 0.003195f, 0.057728f, 0.032146f, 0.016708f, 0.009242f, 0.005054f, 0.003078f,
+ 0.063050f, 0.037261f, 0.021767f, 0.012447f, 0.007099f, 0.003826f, 0.002504f,
+ 0.067476f, 0.042348f, 0.026169f, 0.015695f, 0.009478f, 0.005887f, 0.003568f,
+ 0.001689f, 0.071187f, 0.046575f, 0.031697f, 0.019797f, 0.012433f, 0.007027f,
+ 0.004845f, 0.002777f, 0.001753f, 0.074326f, 0.050628f, 0.034978f, 0.021955f,
+ 0.014821f, 0.009462f, 0.006393f, 0.004181f, 0.003105f, 0.001231f, 0.077008f,
+ 0.054226f, 0.038407f, 0.026251f, 0.018634f, 0.011568f, 0.008130f, 0.004957f,
+ 0.003334f, 0.002069f, 0.001304f, 0.079318f, 0.057180f, 0.041268f, 0.028842f,
+ 0.020033f, 0.014061f, 0.009636f, 0.006411f, 0.004583f, 0.002817f, 0.001770f,
+ 0.001258f};
+
+// For the bursty mask type: reference level for the maximum average residual
+// loss expected for each code size up to:
+// (kMaxMediaPacketsTestf, kMaxMediaPacketsTest).
+const float kMaxResidualLossBurstyMask[kNumberCodes] = {
+ 0.033236f, 0.053344f, 0.026616f, 0.064129f, 0.036589f, 0.021892f, 0.071055f,
+ 0.043890f, 0.028009f, 0.018524f, 0.075968f, 0.049828f, 0.033288f, 0.022791f,
+ 0.016088f, 0.079672f, 0.054586f, 0.037872f, 0.026679f, 0.019326f, 0.014293f,
+ 0.082582f, 0.058719f, 0.042045f, 0.030504f, 0.022391f, 0.016894f, 0.012946f,
+ 0.084935f, 0.062169f, 0.045620f, 0.033713f, 0.025570f, 0.019439f, 0.015121f,
+ 0.011920f, 0.086881f, 0.065267f, 0.048721f, 0.037613f, 0.028278f, 0.022152f,
+ 0.017314f, 0.013791f, 0.011130f, 0.088516f, 0.067911f, 0.051709f, 0.040819f,
+ 0.030777f, 0.024547f, 0.019689f, 0.015877f, 0.012773f, 0.010516f, 0.089909f,
+ 0.070332f, 0.054402f, 0.043210f, 0.034096f, 0.026625f, 0.021823f, 0.017648f,
+ 0.014649f, 0.011982f, 0.010035f, 0.091109f, 0.072428f, 0.056775f, 0.045418f,
+ 0.036679f, 0.028599f, 0.023693f, 0.019966f, 0.016603f, 0.013690f, 0.011359f,
+ 0.009657f};
+
+} // namespace webrtc
+#endif // MODULES_RTP_RTCP_TEST_TESTFEC_AVERAGE_RESIDUAL_LOSS_XOR_CODES_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_fec.cc b/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_fec.cc
new file mode 100644
index 0000000000..5ac8feca21
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_fec.cc
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Test application for core FEC algorithm. Calls encoding and decoding
+ * functions in ForwardErrorCorrection directly.
+ */
+
+#include <string.h>
+#include <time.h>
+
+#include <list>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/forward_error_correction.h"
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+// #define VERBOSE_OUTPUT
+
+namespace webrtc {
+namespace fec_private_tables {
+extern const uint8_t** kPacketMaskBurstyTbl[12];
+}
+namespace test {
+using fec_private_tables::kPacketMaskBurstyTbl;
+
+void ReceivePackets(
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>*
+ to_decode_list,
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>*
+ received_packet_list,
+ size_t num_packets_to_decode,
+ float reorder_rate,
+ float duplicate_rate,
+ Random* random) {
+ RTC_DCHECK(to_decode_list->empty());
+ RTC_DCHECK_LE(num_packets_to_decode, received_packet_list->size());
+
+ for (size_t i = 0; i < num_packets_to_decode; i++) {
+ auto it = received_packet_list->begin();
+ // Reorder packets.
+ float random_variable = random->Rand<float>();
+ while (random_variable < reorder_rate) {
+ ++it;
+ if (it == received_packet_list->end()) {
+ --it;
+ break;
+ }
+ random_variable = random->Rand<float>();
+ }
+ to_decode_list->push_back(std::move(*it));
+ received_packet_list->erase(it);
+
+ // Duplicate packets.
+ ForwardErrorCorrection::ReceivedPacket* received_packet =
+ to_decode_list->back().get();
+ random_variable = random->Rand<float>();
+ while (random_variable < duplicate_rate) {
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket> duplicate_packet(
+ new ForwardErrorCorrection::ReceivedPacket());
+ *duplicate_packet = *received_packet;
+ duplicate_packet->pkt = new ForwardErrorCorrection::Packet();
+ duplicate_packet->pkt->data = received_packet->pkt->data;
+
+ to_decode_list->push_back(std::move(duplicate_packet));
+ random_variable = random->Rand<float>();
+ }
+ }
+}
+
+void RunTest(bool use_flexfec) {
+ // TODO(marpan): Split this function into subroutines/helper functions.
+ enum { kMaxNumberMediaPackets = 48 };
+ enum { kMaxNumberFecPackets = 48 };
+
+ const uint32_t kNumMaskBytesL0 = 2;
+ const uint32_t kNumMaskBytesL1 = 6;
+
+ // FOR UEP
+ const bool kUseUnequalProtection = true;
+
+ // FEC mask types.
+ const FecMaskType kMaskTypes[] = {kFecMaskRandom, kFecMaskBursty};
+ const int kNumFecMaskTypes = sizeof(kMaskTypes) / sizeof(*kMaskTypes);
+
+ // Maximum number of media packets allowed for the mask type.
+ const uint16_t kMaxMediaPackets[] = {
+ kMaxNumberMediaPackets,
+ sizeof(kPacketMaskBurstyTbl) / sizeof(*kPacketMaskBurstyTbl)};
+
+ ASSERT_EQ(12, kMaxMediaPackets[1]) << "Max media packets for bursty mode not "
+ "equal to 12.";
+
+ ForwardErrorCorrection::PacketList media_packet_list;
+ std::list<ForwardErrorCorrection::Packet*> fec_packet_list;
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>
+ to_decode_list;
+ std::vector<std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>>
+ received_packet_list;
+ ForwardErrorCorrection::RecoveredPacketList recovered_packet_list;
+ std::list<uint8_t*> fec_mask_list;
+
+ // Running over only two loss rates to limit execution time.
+ const float loss_rate[] = {0.05f, 0.01f};
+ const uint32_t loss_rate_size = sizeof(loss_rate) / sizeof(*loss_rate);
+ const float reorder_rate = 0.1f;
+ const float duplicate_rate = 0.1f;
+
+ uint8_t media_loss_mask[kMaxNumberMediaPackets];
+ uint8_t fec_loss_mask[kMaxNumberFecPackets];
+ uint8_t fec_packet_masks[kMaxNumberFecPackets][kMaxNumberMediaPackets];
+
+ // Seed the random number generator, storing the seed to file in order to
+ // reproduce past results.
+ const unsigned int random_seed = static_cast<unsigned int>(time(nullptr));
+ Random random(random_seed);
+ std::string filename = webrtc::test::OutputPath() + "randomSeedLog.txt";
+ FILE* random_seed_file = fopen(filename.c_str(), "a");
+ fprintf(random_seed_file, "%u\n", random_seed);
+ fclose(random_seed_file);
+ random_seed_file = nullptr;
+
+ uint16_t seq_num = 0;
+ uint32_t timestamp = random.Rand<uint32_t>();
+ const uint32_t media_ssrc = random.Rand(1u, 0xfffffffe);
+ uint32_t fec_ssrc;
+ uint16_t fec_seq_num_offset;
+ if (use_flexfec) {
+ fec_ssrc = random.Rand(1u, 0xfffffffe);
+ fec_seq_num_offset = random.Rand(0, 1 << 15);
+ } else {
+ fec_ssrc = media_ssrc;
+ fec_seq_num_offset = 0;
+ }
+
+ std::unique_ptr<ForwardErrorCorrection> fec;
+ if (use_flexfec) {
+ fec = ForwardErrorCorrection::CreateFlexfec(fec_ssrc, media_ssrc);
+ } else {
+ RTC_DCHECK_EQ(media_ssrc, fec_ssrc);
+ fec = ForwardErrorCorrection::CreateUlpfec(fec_ssrc);
+ }
+
+ // Loop over the mask types: random and bursty.
+ for (int mask_type_idx = 0; mask_type_idx < kNumFecMaskTypes;
+ ++mask_type_idx) {
+ for (uint32_t loss_rate_idx = 0; loss_rate_idx < loss_rate_size;
+ ++loss_rate_idx) {
+ printf("Loss rate: %.2f, Mask type %d \n", loss_rate[loss_rate_idx],
+ mask_type_idx);
+
+ const uint32_t packet_mask_max = kMaxMediaPackets[mask_type_idx];
+ std::unique_ptr<uint8_t[]> packet_mask(
+ new uint8_t[packet_mask_max * kNumMaskBytesL1]);
+
+ FecMaskType fec_mask_type = kMaskTypes[mask_type_idx];
+
+ for (uint32_t num_media_packets = 1; num_media_packets <= packet_mask_max;
+ num_media_packets++) {
+ internal::PacketMaskTable mask_table(fec_mask_type, num_media_packets);
+
+ for (uint32_t num_fec_packets = 1;
+ num_fec_packets <= num_media_packets &&
+ num_fec_packets <= packet_mask_max;
+ num_fec_packets++) {
+ // Loop over num_imp_packets: usually <= (0.3*num_media_packets).
+ // For this test we check up to ~ (num_media_packets / 4).
+ uint32_t max_num_imp_packets = num_media_packets / 4 + 1;
+ for (uint32_t num_imp_packets = 0;
+ num_imp_packets <= max_num_imp_packets &&
+ num_imp_packets <= packet_mask_max;
+ num_imp_packets++) {
+ uint8_t protection_factor =
+ static_cast<uint8_t>(num_fec_packets * 255 / num_media_packets);
+
+ const uint32_t mask_bytes_per_fec_packet =
+ (num_media_packets > 16) ? kNumMaskBytesL1 : kNumMaskBytesL0;
+
+ memset(packet_mask.get(), 0,
+ num_media_packets * mask_bytes_per_fec_packet);
+
+ // Transfer packet masks from bit-mask to byte-mask.
+ internal::GeneratePacketMasks(
+ num_media_packets, num_fec_packets, num_imp_packets,
+ kUseUnequalProtection, &mask_table, packet_mask.get());
+
+#ifdef VERBOSE_OUTPUT
+ printf(
+ "%u media packets, %u FEC packets, %u num_imp_packets, "
+ "loss rate = %.2f \n",
+ num_media_packets, num_fec_packets, num_imp_packets,
+ loss_rate[loss_rate_idx]);
+ printf("Packet mask matrix \n");
+#endif
+
+ for (uint32_t i = 0; i < num_fec_packets; i++) {
+ for (uint32_t j = 0; j < num_media_packets; j++) {
+ const uint8_t byte_mask =
+ packet_mask[i * mask_bytes_per_fec_packet + j / 8];
+ const uint32_t bit_position = (7 - j % 8);
+ fec_packet_masks[i][j] =
+ (byte_mask & (1 << bit_position)) >> bit_position;
+#ifdef VERBOSE_OUTPUT
+ printf("%u ", fec_packet_masks[i][j]);
+#endif
+ }
+#ifdef VERBOSE_OUTPUT
+ printf("\n");
+#endif
+ }
+#ifdef VERBOSE_OUTPUT
+ printf("\n");
+#endif
+ // Check for all zero rows or columns: indicates incorrect mask.
+ uint32_t row_limit = num_media_packets;
+ for (uint32_t i = 0; i < num_fec_packets; ++i) {
+ uint32_t row_sum = 0;
+ for (uint32_t j = 0; j < row_limit; ++j) {
+ row_sum += fec_packet_masks[i][j];
+ }
+ ASSERT_NE(0u, row_sum) << "Row is all zero " << i;
+ }
+ for (uint32_t j = 0; j < row_limit; ++j) {
+ uint32_t column_sum = 0;
+ for (uint32_t i = 0; i < num_fec_packets; ++i) {
+ column_sum += fec_packet_masks[i][j];
+ }
+ ASSERT_NE(0u, column_sum) << "Column is all zero " << j;
+ }
+
+ // Construct media packets.
+ // Reset the sequence number here for each FEC code/mask tested
+ // below, to avoid sequence number wrap-around. In actual decoding,
+ // old FEC packets in list are dropped if sequence number wrap
+ // around is detected. This case is currently not handled below.
+ seq_num = 0;
+ for (uint32_t i = 0; i < num_media_packets; ++i) {
+ std::unique_ptr<ForwardErrorCorrection::Packet> media_packet(
+ new ForwardErrorCorrection::Packet());
+ const uint32_t kMinPacketSize = 12;
+ const uint32_t kMaxPacketSize = static_cast<uint32_t>(
+ IP_PACKET_SIZE - 12 - 28 - fec->MaxPacketOverhead());
+ size_t packet_length =
+ random.Rand(kMinPacketSize, kMaxPacketSize);
+ media_packet->data.SetSize(packet_length);
+
+ uint8_t* data = media_packet->data.MutableData();
+ // Generate random values for the first 2 bytes.
+ data[0] = random.Rand<uint8_t>();
+ data[1] = random.Rand<uint8_t>();
+
+ // The first two bits are assumed to be 10 by the
+ // FEC encoder. In fact the FEC decoder will set the
+ // two first bits to 10 regardless of what they
+ // actually were. Set the first two bits to 10
+ // so that a memcmp can be performed for the
+ // whole restored packet.
+ data[0] |= 0x80;
+ data[0] &= 0xbf;
+
+ // FEC is applied to a whole frame.
+ // A frame is signaled by multiple packets without
+ // the marker bit set followed by the last packet of
+ // the frame for which the marker bit is set.
+ // Only push one (fake) frame to the FEC.
+ data[1] &= 0x7f;
+
+ ByteWriter<uint16_t>::WriteBigEndian(&data[2], seq_num);
+ ByteWriter<uint32_t>::WriteBigEndian(&data[4], timestamp);
+ ByteWriter<uint32_t>::WriteBigEndian(&data[8], media_ssrc);
+ // Generate random values for payload
+ for (size_t j = 12; j < packet_length; ++j) {
+ data[j] = random.Rand<uint8_t>();
+ }
+ media_packet_list.push_back(std::move(media_packet));
+ seq_num++;
+ }
+ media_packet_list.back()->data.MutableData()[1] |= 0x80;
+
+ ASSERT_EQ(0, fec->EncodeFec(media_packet_list, protection_factor,
+ num_imp_packets, kUseUnequalProtection,
+ fec_mask_type, &fec_packet_list))
+ << "EncodeFec() failed";
+
+ ASSERT_EQ(num_fec_packets, fec_packet_list.size())
+ << "We requested " << num_fec_packets
+ << " FEC packets, but "
+ "EncodeFec() produced "
+ << fec_packet_list.size();
+
+ memset(media_loss_mask, 0, sizeof(media_loss_mask));
+ uint32_t media_packet_idx = 0;
+ for (const auto& media_packet : media_packet_list) {
+ // We want a value between 0 and 1.
+ const float loss_random_variable = random.Rand<float>();
+
+ if (loss_random_variable >= loss_rate[loss_rate_idx]) {
+ media_loss_mask[media_packet_idx] = 1;
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>
+ received_packet(
+ new ForwardErrorCorrection::ReceivedPacket());
+ received_packet->pkt = new ForwardErrorCorrection::Packet();
+ received_packet->pkt->data = media_packet->data;
+ received_packet->ssrc = media_ssrc;
+ received_packet->seq_num = ByteReader<uint16_t>::ReadBigEndian(
+ media_packet->data.data() + 2);
+ received_packet->is_fec = false;
+ received_packet_list.push_back(std::move(received_packet));
+ }
+ media_packet_idx++;
+ }
+
+ memset(fec_loss_mask, 0, sizeof(fec_loss_mask));
+ uint32_t fec_packet_idx = 0;
+ for (auto* fec_packet : fec_packet_list) {
+ const float loss_random_variable = random.Rand<float>();
+ if (loss_random_variable >= loss_rate[loss_rate_idx]) {
+ fec_loss_mask[fec_packet_idx] = 1;
+ std::unique_ptr<ForwardErrorCorrection::ReceivedPacket>
+ received_packet(
+ new ForwardErrorCorrection::ReceivedPacket());
+ received_packet->pkt = new ForwardErrorCorrection::Packet();
+ received_packet->pkt->data = fec_packet->data;
+ received_packet->seq_num = fec_seq_num_offset + seq_num;
+ received_packet->is_fec = true;
+ received_packet->ssrc = fec_ssrc;
+ received_packet_list.push_back(std::move(received_packet));
+
+ fec_mask_list.push_back(fec_packet_masks[fec_packet_idx]);
+ }
+ ++fec_packet_idx;
+ ++seq_num;
+ }
+
+#ifdef VERBOSE_OUTPUT
+ printf("Media loss mask:\n");
+ for (uint32_t i = 0; i < num_media_packets; i++) {
+ printf("%u ", media_loss_mask[i]);
+ }
+ printf("\n\n");
+
+ printf("FEC loss mask:\n");
+ for (uint32_t i = 0; i < num_fec_packets; i++) {
+ printf("%u ", fec_loss_mask[i]);
+ }
+ printf("\n\n");
+#endif
+
+ auto fec_mask_it = fec_mask_list.begin();
+ while (fec_mask_it != fec_mask_list.end()) {
+ uint32_t hamming_dist = 0;
+ uint32_t recovery_position = 0;
+ for (uint32_t i = 0; i < num_media_packets; i++) {
+ if (media_loss_mask[i] == 0 && (*fec_mask_it)[i] == 1) {
+ recovery_position = i;
+ ++hamming_dist;
+ }
+ }
+ auto item_to_delete = fec_mask_it;
+ ++fec_mask_it;
+
+ if (hamming_dist == 1) {
+ // Recovery possible. Restart search.
+ media_loss_mask[recovery_position] = 1;
+ fec_mask_it = fec_mask_list.begin();
+ } else if (hamming_dist == 0) {
+ // FEC packet cannot provide further recovery.
+ fec_mask_list.erase(item_to_delete);
+ }
+ }
+#ifdef VERBOSE_OUTPUT
+ printf("Recovery mask:\n");
+ for (uint32_t i = 0; i < num_media_packets; ++i) {
+ printf("%u ", media_loss_mask[i]);
+ }
+ printf("\n\n");
+#endif
+ // For error-checking frame completion.
+ bool fec_packet_received = false;
+ while (!received_packet_list.empty()) {
+ size_t num_packets_to_decode = random.Rand(
+ 1u, static_cast<uint32_t>(received_packet_list.size()));
+ ReceivePackets(&to_decode_list, &received_packet_list,
+ num_packets_to_decode, reorder_rate,
+ duplicate_rate, &random);
+
+ if (fec_packet_received == false) {
+ for (const auto& received_packet : to_decode_list) {
+ if (received_packet->is_fec) {
+ fec_packet_received = true;
+ }
+ }
+ }
+ for (const auto& received_packet : to_decode_list) {
+ fec->DecodeFec(*received_packet, &recovered_packet_list);
+ }
+ to_decode_list.clear();
+ }
+ media_packet_idx = 0;
+ for (const auto& media_packet : media_packet_list) {
+ if (media_loss_mask[media_packet_idx] == 1) {
+ // Should have recovered this packet.
+ auto recovered_packet_list_it = recovered_packet_list.cbegin();
+
+ ASSERT_FALSE(recovered_packet_list_it ==
+ recovered_packet_list.end())
+ << "Insufficient number of recovered packets.";
+ ForwardErrorCorrection::RecoveredPacket* recovered_packet =
+ recovered_packet_list_it->get();
+
+ ASSERT_EQ(recovered_packet->pkt->data.size(),
+ media_packet->data.size())
+ << "Recovered packet length not identical to original "
+ "media packet";
+ ASSERT_EQ(0, memcmp(recovered_packet->pkt->data.cdata(),
+ media_packet->data.cdata(),
+ media_packet->data.size()))
+ << "Recovered packet payload not identical to original "
+ "media packet";
+ recovered_packet_list.pop_front();
+ }
+ ++media_packet_idx;
+ }
+ fec->ResetState(&recovered_packet_list);
+ ASSERT_TRUE(recovered_packet_list.empty())
+ << "Excessive number of recovered packets.\t size is: "
+ << recovered_packet_list.size();
+ // -- Teardown --
+ media_packet_list.clear();
+
+ // Clear FEC packet list, so we don't pass in a non-empty
+ // list in the next call to DecodeFec().
+ fec_packet_list.clear();
+
+ // Delete received packets we didn't pass to DecodeFec(), due to
+ // early frame completion.
+ received_packet_list.clear();
+
+ while (!fec_mask_list.empty()) {
+ fec_mask_list.pop_front();
+ }
+ timestamp += 90000 / 30;
+ } // loop over num_imp_packets
+ } // loop over FecPackets
+ } // loop over num_media_packets
+ } // loop over loss rates
+ } // loop over mask types
+
+ // Have DecodeFec clear the recovered packet list.
+ fec->ResetState(&recovered_packet_list);
+ ASSERT_TRUE(recovered_packet_list.empty())
+ << "Recovered packet list is not empty";
+}
+
+TEST(FecTest, UlpfecTest) {
+ RunTest(false);
+}
+
+TEST(FecTest, FlexfecTest) {
+ RunTest(true);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc b/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc
new file mode 100644
index 0000000000..25ceee585a
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc
@@ -0,0 +1,1060 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The purpose of this test is to compute metrics to characterize the properties
+ * and efficiency of the packets masks used in the generic XOR FEC code.
+ *
+ * The metrics measure the efficiency (recovery potential or residual loss) of
+ * the FEC code, under various statistical loss models for the packet/symbol
+ * loss events. Various constraints on the behavior of these metrics are
+ * verified, and compared to the reference RS (Reed-Solomon) code. This serves
+ * in some way as a basic check/benchmark for the packet masks.
+ *
+ * By an FEC code, we mean an erasure packet/symbol code, characterized by:
+ * (1) The code size parameters (k,m), where k = number of source/media packets,
+ * and m = number of FEC packets,
+ * (2) The code type: XOR or RS.
+ * In the case of XOR, the residual loss is determined via the set of packet
+ * masks (generator matrix). In the case of RS, the residual loss is determined
+ * directly from the MDS (maximum distance separable) property of RS.
+ *
+ * Currently two classes of packets masks are available (random type and bursty
+ * type), so three codes are considered below: RS, XOR-random, and XOR-bursty.
+ * The bursty class is defined up to k=12, so (k=12,m=12) is largest code size
+ * considered in this test.
+ *
+ * The XOR codes are defined via the RFC 5109 and correspond to the class of
+ * LDGM (low density generator matrix) codes, which is a subset of the LDPC
+ * (low density parity check) codes. Future implementation will consider
+ * extending our XOR codes to include LDPC codes, which explicitly include
+ * protection of FEC packets.
+ *
+ * The type of packet/symbol loss models considered in this test are:
+ * (1) Random loss: Bernoulli process, characterized by the average loss rate.
+ * (2) Bursty loss: Markov chain (Gilbert-Elliot model), characterized by two
+ * parameters: average loss rate and average burst length.
+ */
+
+#include <cmath>
+#include <memory>
+
+#include "modules/rtp_rtcp/source/forward_error_correction_internal.h"
+#include "modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+// Maximum number of media packets allows for XOR (RFC 5109) code.
+enum { kMaxNumberMediaPackets = 48 };
+
+// Maximum number of media packets allowed for each mask type.
+const uint16_t kMaxMediaPackets[] = {kMaxNumberMediaPackets, 12};
+
+// Maximum gap size for characterizing the consecutiveness of the loss.
+const int kMaxGapSize = 2 * kMaxMediaPacketsTest;
+
+// Number of gap levels written to file/output.
+const int kGapSizeOutput = 5;
+
+// Maximum number of states for characterizing the residual loss distribution.
+const int kNumStatesDistribution = 2 * kMaxMediaPacketsTest * kMaxGapSize + 1;
+
+// The code type.
+enum CodeType {
+ xor_random_code, // XOR with random mask type.
+ xor_bursty_code, // XOR with bursty mask type.
+ rs_code // Reed_solomon.
+};
+
+// The code size parameters.
+struct CodeSizeParams {
+ int num_media_packets;
+ int num_fec_packets;
+ // Protection level: num_fec_packets / (num_media_packets + num_fec_packets).
+ float protection_level;
+ // Number of loss configurations, for a given loss number and gap number.
+ // The gap number refers to the maximum gap/hole of a loss configuration
+ // (used to measure the "consecutiveness" of the loss).
+ int configuration_density[kNumStatesDistribution];
+};
+
+// The type of loss models.
+enum LossModelType { kRandomLossModel, kBurstyLossModel };
+
+struct LossModel {
+ LossModelType loss_type;
+ float average_loss_rate;
+ float average_burst_length;
+};
+
+// Average loss rates.
+const float kAverageLossRate[] = {0.025f, 0.05f, 0.1f, 0.25f};
+
+// Average burst lengths. The case of |kAverageBurstLength = 1.0| refers to
+// the random model. Note that for the random (Bernoulli) model, the average
+// burst length is determined by the average loss rate, i.e.,
+// AverageBurstLength = 1 / (1 - AverageLossRate) for random model.
+const float kAverageBurstLength[] = {1.0f, 2.0f, 4.0f};
+
+// Total number of loss models: For each burst length case, there are
+// a number of models corresponding to the loss rates.
+const int kNumLossModels =
+ (sizeof(kAverageBurstLength) / sizeof(*kAverageBurstLength)) *
+ (sizeof(kAverageLossRate) / sizeof(*kAverageLossRate));
+
+// Thresholds on the average loss rate of the packet loss model, below which
+// certain properties of the codes are expected.
+float loss_rate_upper_threshold = 0.20f;
+float loss_rate_lower_threshold = 0.025f;
+
+// Set of thresholds on the expected average recovery rate, for each code type.
+// These are global thresholds for now; in future version we may condition them
+// on the code length/size and protection level.
+const float kRecoveryRateXorRandom[3] = {0.94f, 0.50f, 0.19f};
+const float kRecoveryRateXorBursty[3] = {0.90f, 0.54f, 0.22f};
+
+// Metrics for a given FEC code; each code is defined by the code type
+// (RS, XOR-random/bursty), and the code size parameters (k,m), where
+// k = num_media_packets, m = num_fec_packets.
+struct MetricsFecCode {
+ // The average and variance of the residual loss, as a function of the
+ // packet/symbol loss model. The average/variance is computed by averaging
+ // over all loss configurations wrt the loss probability given by the
+ // underlying loss model.
+ double average_residual_loss[kNumLossModels];
+ double variance_residual_loss[kNumLossModels];
+ // The residual loss, as a function of the loss number and the gap number of
+ // the loss configurations. The gap number refers to the maximum gap/hole of
+ // a loss configuration (used to measure the "consecutiveness" of the loss).
+ double residual_loss_per_loss_gap[kNumStatesDistribution];
+ // The recovery rate as a function of the loss number.
+ double recovery_rate_per_loss[2 * kMaxMediaPacketsTest + 1];
+};
+
+MetricsFecCode kMetricsXorRandom[kNumberCodes];
+MetricsFecCode kMetricsXorBursty[kNumberCodes];
+MetricsFecCode kMetricsReedSolomon[kNumberCodes];
+
+class FecPacketMaskMetricsTest : public ::testing::Test {
+ protected:
+ FecPacketMaskMetricsTest() {}
+
+ int max_num_codes_;
+ LossModel loss_model_[kNumLossModels];
+ CodeSizeParams code_params_[kNumberCodes];
+
+ uint8_t fec_packet_masks_[kMaxNumberMediaPackets][kMaxNumberMediaPackets];
+ FILE* fp_mask_;
+
+ // Measure of the gap of the loss for configuration given by `state`.
+ // This is to measure degree of consecutiveness for the loss configuration.
+ // Useful if the packets are sent out in order of sequence numbers and there
+ // is little/no re-ordering during transmission.
+ int GapLoss(int tot_num_packets, uint8_t* state) {
+ int max_gap_loss = 0;
+ // Find the first loss.
+ int first_loss = 0;
+ for (int i = 0; i < tot_num_packets; i++) {
+ if (state[i] == 1) {
+ first_loss = i;
+ break;
+ }
+ }
+ int prev_loss = first_loss;
+ for (int i = first_loss + 1; i < tot_num_packets; i++) {
+ if (state[i] == 1) { // Lost state.
+ int gap_loss = (i - prev_loss) - 1;
+ if (gap_loss > max_gap_loss) {
+ max_gap_loss = gap_loss;
+ }
+ prev_loss = i;
+ }
+ }
+ return max_gap_loss;
+ }
+
+ // Returns the number of recovered media packets for the XOR code, given the
+ // packet mask `fec_packet_masks_`, for the loss state/configuration given by
+ // `state`.
+ int RecoveredMediaPackets(int num_media_packets,
+ int num_fec_packets,
+ uint8_t* state) {
+ std::unique_ptr<uint8_t[]> state_tmp(
+ new uint8_t[num_media_packets + num_fec_packets]);
+ memcpy(state_tmp.get(), state, num_media_packets + num_fec_packets);
+ int num_recovered_packets = 0;
+ bool loop_again = true;
+ while (loop_again) {
+ loop_again = false;
+ bool recovered_new_packet = false;
+ // Check if we can recover anything: loop over all possible FEC packets.
+ for (int i = 0; i < num_fec_packets; i++) {
+ if (state_tmp[i + num_media_packets] == 0) {
+ // We have this FEC packet.
+ int num_packets_in_mask = 0;
+ int num_received_packets_in_mask = 0;
+ for (int j = 0; j < num_media_packets; j++) {
+ if (fec_packet_masks_[i][j] == 1) {
+ num_packets_in_mask++;
+ if (state_tmp[j] == 0) {
+ num_received_packets_in_mask++;
+ }
+ }
+ }
+ if ((num_packets_in_mask - 1) == num_received_packets_in_mask) {
+ // We can recover the missing media packet for this FEC packet.
+ num_recovered_packets++;
+ recovered_new_packet = true;
+ int jsel = -1;
+ int check_num_recovered = 0;
+ // Update the state with newly recovered media packet.
+ for (int j = 0; j < num_media_packets; j++) {
+ if (fec_packet_masks_[i][j] == 1 && state_tmp[j] == 1) {
+ // This is the lost media packet we will recover.
+ jsel = j;
+ check_num_recovered++;
+ }
+ }
+ // Check that we can only recover 1 packet.
+ RTC_DCHECK_EQ(check_num_recovered, 1);
+ // Update the state with the newly recovered media packet.
+ state_tmp[jsel] = 0;
+ }
+ }
+ } // Go to the next FEC packet in the loop.
+ // If we have recovered at least one new packet in this FEC loop,
+ // go through loop again, otherwise we leave loop.
+ if (recovered_new_packet) {
+ loop_again = true;
+ }
+ }
+ return num_recovered_packets;
+ }
+
+ // Compute the probability of occurence of the loss state/configuration,
+ // given by `state`, for all the loss models considered in this test.
+ void ComputeProbabilityWeight(double* prob_weight,
+ uint8_t* state,
+ int tot_num_packets) {
+ // Loop over the loss models.
+ for (int k = 0; k < kNumLossModels; k++) {
+ double loss_rate = static_cast<double>(loss_model_[k].average_loss_rate);
+ double burst_length =
+ static_cast<double>(loss_model_[k].average_burst_length);
+ double result = 1.0;
+ if (loss_model_[k].loss_type == kRandomLossModel) {
+ for (int i = 0; i < tot_num_packets; i++) {
+ if (state[i] == 0) {
+ result *= (1.0 - loss_rate);
+ } else {
+ result *= loss_rate;
+ }
+ }
+ } else { // Gilbert-Elliot model for burst model.
+ RTC_DCHECK_EQ(loss_model_[k].loss_type, kBurstyLossModel);
+ // Transition probabilities: from previous to current state.
+ // Prob. of previous = lost --> current = received.
+ double prob10 = 1.0 / burst_length;
+ // Prob. of previous = lost --> currrent = lost.
+ double prob11 = 1.0 - prob10;
+ // Prob. of previous = received --> current = lost.
+ double prob01 = prob10 * (loss_rate / (1.0 - loss_rate));
+ // Prob. of previous = received --> current = received.
+ double prob00 = 1.0 - prob01;
+
+ // Use stationary probability for first state/packet.
+ if (state[0] == 0) { // Received
+ result = (1.0 - loss_rate);
+ } else { // Lost
+ result = loss_rate;
+ }
+
+ // Subsequent states: use transition probabilities.
+ for (int i = 1; i < tot_num_packets; i++) {
+ // Current state is received
+ if (state[i] == 0) {
+ if (state[i - 1] == 0) {
+ result *= prob00; // Previous received, current received.
+ } else {
+ result *= prob10; // Previous lost, current received.
+ }
+ } else { // Current state is lost
+ if (state[i - 1] == 0) {
+ result *= prob01; // Previous received, current lost.
+ } else {
+ result *= prob11; // Previous lost, current lost.
+ }
+ }
+ }
+ }
+ prob_weight[k] = result;
+ }
+ }
+
+ void CopyMetrics(MetricsFecCode* metrics_output,
+ MetricsFecCode metrics_input) {
+ memcpy(metrics_output->average_residual_loss,
+ metrics_input.average_residual_loss,
+ sizeof(double) * kNumLossModels);
+ memcpy(metrics_output->variance_residual_loss,
+ metrics_input.variance_residual_loss,
+ sizeof(double) * kNumLossModels);
+ memcpy(metrics_output->residual_loss_per_loss_gap,
+ metrics_input.residual_loss_per_loss_gap,
+ sizeof(double) * kNumStatesDistribution);
+ memcpy(metrics_output->recovery_rate_per_loss,
+ metrics_input.recovery_rate_per_loss,
+ sizeof(double) * 2 * kMaxMediaPacketsTest);
+ }
+
+ // Compute the residual loss per gap, by summing the
+ // `residual_loss_per_loss_gap` over all loss configurations up to loss number
+ // = `num_fec_packets`.
+ double ComputeResidualLossPerGap(MetricsFecCode metrics,
+ int gap_number,
+ int num_fec_packets,
+ int code_index) {
+ double residual_loss_gap = 0.0;
+ int tot_num_configs = 0;
+ for (int loss = 1; loss <= num_fec_packets; loss++) {
+ int index = gap_number * (2 * kMaxMediaPacketsTest) + loss;
+ residual_loss_gap += metrics.residual_loss_per_loss_gap[index];
+ tot_num_configs += code_params_[code_index].configuration_density[index];
+ }
+ // Normalize, to compare across code sizes.
+ if (tot_num_configs > 0) {
+ residual_loss_gap =
+ residual_loss_gap / static_cast<double>(tot_num_configs);
+ }
+ return residual_loss_gap;
+ }
+
+ // Compute the recovery rate per loss number, by summing the
+ // `residual_loss_per_loss_gap` over all gap configurations.
+ void ComputeRecoveryRatePerLoss(MetricsFecCode* metrics,
+ int num_media_packets,
+ int num_fec_packets,
+ int code_index) {
+ for (int loss = 1; loss <= num_media_packets + num_fec_packets; loss++) {
+ metrics->recovery_rate_per_loss[loss] = 0.0;
+ int tot_num_configs = 0;
+ double arl = 0.0;
+ for (int gap = 0; gap < kMaxGapSize; gap++) {
+ int index = gap * (2 * kMaxMediaPacketsTest) + loss;
+ arl += metrics->residual_loss_per_loss_gap[index];
+ tot_num_configs +=
+ code_params_[code_index].configuration_density[index];
+ }
+ // Normalize, to compare across code sizes.
+ if (tot_num_configs > 0) {
+ arl = arl / static_cast<double>(tot_num_configs);
+ }
+ // Recovery rate for a given loss `loss` is 1 minus the scaled `arl`,
+ // where the scale factor is relative to code size/parameters.
+ double scaled_loss =
+ static_cast<double>(loss * num_media_packets) /
+ static_cast<double>(num_media_packets + num_fec_packets);
+ metrics->recovery_rate_per_loss[loss] = 1.0 - arl / scaled_loss;
+ }
+ }
+
+ void SetMetricsZero(MetricsFecCode* metrics) {
+ memset(metrics->average_residual_loss, 0, sizeof(double) * kNumLossModels);
+ memset(metrics->variance_residual_loss, 0, sizeof(double) * kNumLossModels);
+ memset(metrics->residual_loss_per_loss_gap, 0,
+ sizeof(double) * kNumStatesDistribution);
+ memset(metrics->recovery_rate_per_loss, 0,
+ sizeof(double) * 2 * kMaxMediaPacketsTest + 1);
+ }
+
+ // Compute the metrics for an FEC code, given by the code type `code_type`
+ // (XOR-random/ bursty or RS), and by the code index `code_index`
+ // (which containes the code size parameters/protection length).
+ void ComputeMetricsForCode(CodeType code_type, int code_index) {
+ std::unique_ptr<double[]> prob_weight(new double[kNumLossModels]);
+ memset(prob_weight.get(), 0, sizeof(double) * kNumLossModels);
+ MetricsFecCode metrics_code;
+ SetMetricsZero(&metrics_code);
+
+ int num_media_packets = code_params_[code_index].num_media_packets;
+ int num_fec_packets = code_params_[code_index].num_fec_packets;
+ int tot_num_packets = num_media_packets + num_fec_packets;
+ std::unique_ptr<uint8_t[]> state(new uint8_t[tot_num_packets]);
+ memset(state.get(), 0, tot_num_packets);
+
+ int num_loss_configurations = 1 << tot_num_packets;
+ // Loop over all loss configurations for the symbol sequence of length
+ // `tot_num_packets`. In this version we process up to (k=12, m=12) codes,
+ // and get exact expressions for the residual loss.
+ // TODO(marpan): For larger codes, loop over some random sample of loss
+ // configurations, sampling driven by the underlying statistical loss model
+ // (importance sampling).
+
+ // The symbols/packets are arranged as a sequence of source/media packets
+ // followed by FEC packets. This is the sequence ordering used in the RTP.
+ // A configuration refers to a sequence of received/lost (0/1 bit) states
+ // for the string of packets/symbols. For example, for a (k=4,m=3) code
+ // (4 media packets, 3 FEC packets), with 2 losses (one media and one FEC),
+ // the loss configurations is:
+ // Media1 Media2 Media3 Media4 FEC1 FEC2 FEC3
+ // 0 0 1 0 0 1 0
+ for (int i = 1; i < num_loss_configurations; i++) {
+ // Counter for number of packets lost.
+ int num_packets_lost = 0;
+ // Counters for the number of media packets lost.
+ int num_media_packets_lost = 0;
+
+ // Map configuration number to a loss state.
+ for (int j = 0; j < tot_num_packets; j++) {
+ state[j] = 0; // Received state.
+ int bit_value = i >> (tot_num_packets - j - 1) & 1;
+ if (bit_value == 1) {
+ state[j] = 1; // Lost state.
+ num_packets_lost++;
+ if (j < num_media_packets) {
+ num_media_packets_lost++;
+ }
+ }
+ } // Done with loop over total number of packets.
+ RTC_DCHECK_LE(num_media_packets_lost, num_media_packets);
+ RTC_DCHECK_LE(num_packets_lost, tot_num_packets && num_packets_lost > 0);
+ double residual_loss = 0.0;
+ // Only need to compute residual loss (number of recovered packets) for
+ // configurations that have at least one media packet lost.
+ if (num_media_packets_lost >= 1) {
+ // Compute the number of recovered packets.
+ int num_recovered_packets = 0;
+ if (code_type == xor_random_code || code_type == xor_bursty_code) {
+ num_recovered_packets = RecoveredMediaPackets(
+ num_media_packets, num_fec_packets, state.get());
+ } else {
+ // For the RS code, we can either completely recover all the packets
+ // if the loss is less than or equal to the number of FEC packets,
+ // otherwise we can recover none of the missing packets. This is the
+ // all or nothing (MDS) property of the RS code.
+ if (num_packets_lost <= num_fec_packets) {
+ num_recovered_packets = num_media_packets_lost;
+ }
+ }
+ RTC_DCHECK_LE(num_recovered_packets, num_media_packets);
+ // Compute the residual loss. We only care about recovering media/source
+ // packets, so residual loss is based on lost/recovered media packets.
+ residual_loss =
+ static_cast<double>(num_media_packets_lost - num_recovered_packets);
+ // Compute the probability weights for this configuration.
+ ComputeProbabilityWeight(prob_weight.get(), state.get(),
+ tot_num_packets);
+ // Update the average and variance of the residual loss.
+ for (int k = 0; k < kNumLossModels; k++) {
+ metrics_code.average_residual_loss[k] +=
+ residual_loss * prob_weight[k];
+ metrics_code.variance_residual_loss[k] +=
+ residual_loss * residual_loss * prob_weight[k];
+ }
+ } // Done with processing for num_media_packets_lost >= 1.
+ // Update the distribution statistics.
+ // Compute the gap of the loss (the "consecutiveness" of the loss).
+ int gap_loss = GapLoss(tot_num_packets, state.get());
+ RTC_DCHECK_LT(gap_loss, kMaxGapSize);
+ int index = gap_loss * (2 * kMaxMediaPacketsTest) + num_packets_lost;
+ RTC_DCHECK_LT(index, kNumStatesDistribution);
+ metrics_code.residual_loss_per_loss_gap[index] += residual_loss;
+ if (code_type == xor_random_code) {
+ // The configuration density is only a function of the code length and
+ // only needs to computed for the first `code_type` passed here.
+ code_params_[code_index].configuration_density[index]++;
+ }
+ } // Done with loop over configurations.
+ // Normalize the average residual loss and compute/normalize the variance.
+ for (int k = 0; k < kNumLossModels; k++) {
+ // Normalize the average residual loss by the total number of packets
+ // `tot_num_packets` (i.e., the code length). For a code with no (zero)
+ // recovery, the average residual loss for that code would be reduced like
+ // ~`average_loss_rate` * `num_media_packets` / `tot_num_packets`. This is
+ // the expected reduction in the average residual loss just from adding
+ // FEC packets to the symbol sequence.
+ metrics_code.average_residual_loss[k] =
+ metrics_code.average_residual_loss[k] /
+ static_cast<double>(tot_num_packets);
+ metrics_code.variance_residual_loss[k] =
+ metrics_code.variance_residual_loss[k] /
+ static_cast<double>(num_media_packets * num_media_packets);
+ metrics_code.variance_residual_loss[k] =
+ metrics_code.variance_residual_loss[k] -
+ (metrics_code.average_residual_loss[k] *
+ metrics_code.average_residual_loss[k]);
+ RTC_DCHECK_GE(metrics_code.variance_residual_loss[k], 0.0);
+ RTC_DCHECK_GT(metrics_code.average_residual_loss[k], 0.0);
+ metrics_code.variance_residual_loss[k] =
+ std::sqrt(metrics_code.variance_residual_loss[k]) /
+ metrics_code.average_residual_loss[k];
+ }
+
+ // Compute marginal distribution as a function of loss parameter.
+ ComputeRecoveryRatePerLoss(&metrics_code, num_media_packets,
+ num_fec_packets, code_index);
+ if (code_type == rs_code) {
+ CopyMetrics(&kMetricsReedSolomon[code_index], metrics_code);
+ } else if (code_type == xor_random_code) {
+ CopyMetrics(&kMetricsXorRandom[code_index], metrics_code);
+ } else if (code_type == xor_bursty_code) {
+ CopyMetrics(&kMetricsXorBursty[code_index], metrics_code);
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+
+ void WriteOutMetricsAllFecCodes() {
+ std::string filename = test::OutputPath() + "data_metrics_all_codes";
+ FILE* fp = fopen(filename.c_str(), "wb");
+ // Loop through codes up to `kMaxMediaPacketsTest`.
+ int code_index = 0;
+ for (int num_media_packets = 1; num_media_packets <= kMaxMediaPacketsTest;
+ num_media_packets++) {
+ for (int num_fec_packets = 1; num_fec_packets <= num_media_packets;
+ num_fec_packets++) {
+ fprintf(fp, "FOR CODE: (%d, %d) \n", num_media_packets,
+ num_fec_packets);
+ for (int k = 0; k < kNumLossModels; k++) {
+ float loss_rate = loss_model_[k].average_loss_rate;
+ float burst_length = loss_model_[k].average_burst_length;
+ fprintf(
+ fp,
+ "Loss rate = %.2f, Burst length = %.2f: %.4f %.4f %.4f"
+ " **** %.4f %.4f %.4f \n",
+ loss_rate, burst_length,
+ 100 * kMetricsReedSolomon[code_index].average_residual_loss[k],
+ 100 * kMetricsXorRandom[code_index].average_residual_loss[k],
+ 100 * kMetricsXorBursty[code_index].average_residual_loss[k],
+ kMetricsReedSolomon[code_index].variance_residual_loss[k],
+ kMetricsXorRandom[code_index].variance_residual_loss[k],
+ kMetricsXorBursty[code_index].variance_residual_loss[k]);
+ }
+ for (int gap = 0; gap < kGapSizeOutput; gap++) {
+ double rs_residual_loss =
+ ComputeResidualLossPerGap(kMetricsReedSolomon[code_index], gap,
+ num_fec_packets, code_index);
+ double xor_random_residual_loss = ComputeResidualLossPerGap(
+ kMetricsXorRandom[code_index], gap, num_fec_packets, code_index);
+ double xor_bursty_residual_loss = ComputeResidualLossPerGap(
+ kMetricsXorBursty[code_index], gap, num_fec_packets, code_index);
+ fprintf(fp,
+ "Residual loss as a function of gap "
+ "%d: %.4f %.4f %.4f \n",
+ gap, rs_residual_loss, xor_random_residual_loss,
+ xor_bursty_residual_loss);
+ }
+ fprintf(fp, "Recovery rate as a function of loss number \n");
+ for (int loss = 1; loss <= num_media_packets + num_fec_packets;
+ loss++) {
+ fprintf(fp, "For loss number %d: %.4f %.4f %.4f \n", loss,
+ kMetricsReedSolomon[code_index].recovery_rate_per_loss[loss],
+ kMetricsXorRandom[code_index].recovery_rate_per_loss[loss],
+ kMetricsXorBursty[code_index].recovery_rate_per_loss[loss]);
+ }
+ fprintf(fp, "******************\n");
+ fprintf(fp, "\n");
+ code_index++;
+ }
+ }
+ fclose(fp);
+ }
+
+ void SetLossModels() {
+ int num_loss_rates = sizeof(kAverageLossRate) / sizeof(*kAverageLossRate);
+ int num_burst_lengths =
+ sizeof(kAverageBurstLength) / sizeof(*kAverageBurstLength);
+ int num_loss_models = 0;
+ for (int k = 0; k < num_burst_lengths; k++) {
+ for (int k2 = 0; k2 < num_loss_rates; k2++) {
+ loss_model_[num_loss_models].average_loss_rate = kAverageLossRate[k2];
+ loss_model_[num_loss_models].average_burst_length =
+ kAverageBurstLength[k];
+ // First set of loss models are of random type.
+ if (k == 0) {
+ loss_model_[num_loss_models].loss_type = kRandomLossModel;
+ } else {
+ loss_model_[num_loss_models].loss_type = kBurstyLossModel;
+ }
+ num_loss_models++;
+ }
+ }
+ RTC_DCHECK_EQ(num_loss_models, kNumLossModels);
+ }
+
+ void SetCodeParams() {
+ int code_index = 0;
+ for (int num_media_packets = 1; num_media_packets <= kMaxMediaPacketsTest;
+ num_media_packets++) {
+ for (int num_fec_packets = 1; num_fec_packets <= num_media_packets;
+ num_fec_packets++) {
+ code_params_[code_index].num_media_packets = num_media_packets;
+ code_params_[code_index].num_fec_packets = num_fec_packets;
+ code_params_[code_index].protection_level =
+ static_cast<float>(num_fec_packets) /
+ static_cast<float>(num_media_packets + num_fec_packets);
+ for (int k = 0; k < kNumStatesDistribution; k++) {
+ code_params_[code_index].configuration_density[k] = 0;
+ }
+ code_index++;
+ }
+ }
+ max_num_codes_ = code_index;
+ }
+
+ // Make some basic checks on the packet masks. Return -1 if any of these
+ // checks fail.
+ int RejectInvalidMasks(int num_media_packets, int num_fec_packets) {
+ // Make sure every FEC packet protects something.
+ for (int i = 0; i < num_fec_packets; i++) {
+ int row_degree = 0;
+ for (int j = 0; j < num_media_packets; j++) {
+ if (fec_packet_masks_[i][j] == 1) {
+ row_degree++;
+ }
+ }
+ if (row_degree == 0) {
+ printf(
+ "Invalid mask: FEC packet has empty mask (does not protect "
+ "anything) %d %d %d \n",
+ i, num_media_packets, num_fec_packets);
+ return -1;
+ }
+ }
+ // Mask sure every media packet has some protection.
+ for (int j = 0; j < num_media_packets; j++) {
+ int column_degree = 0;
+ for (int i = 0; i < num_fec_packets; i++) {
+ if (fec_packet_masks_[i][j] == 1) {
+ column_degree++;
+ }
+ }
+ if (column_degree == 0) {
+ printf(
+ "Invalid mask: Media packet has no protection at all %d %d %d "
+ "\n",
+ j, num_media_packets, num_fec_packets);
+ return -1;
+ }
+ }
+ // Make sure we do not have two identical FEC packets.
+ for (int i = 0; i < num_fec_packets; i++) {
+ for (int i2 = i + 1; i2 < num_fec_packets; i2++) {
+ int overlap = 0;
+ for (int j = 0; j < num_media_packets; j++) {
+ if (fec_packet_masks_[i][j] == fec_packet_masks_[i2][j]) {
+ overlap++;
+ }
+ }
+ if (overlap == num_media_packets) {
+ printf("Invalid mask: Two FEC packets are identical %d %d %d %d \n",
+ i, i2, num_media_packets, num_fec_packets);
+ return -1;
+ }
+ }
+ }
+ // Avoid codes that have two media packets with full protection (all 1s in
+ // their corresponding columns). This would mean that if we lose those
+ // two packets, we can never recover them even if we receive all the other
+ // packets. Exclude the special cases of 1 or 2 FEC packets.
+ if (num_fec_packets > 2) {
+ for (int j = 0; j < num_media_packets; j++) {
+ for (int j2 = j + 1; j2 < num_media_packets; j2++) {
+ int degree = 0;
+ for (int i = 0; i < num_fec_packets; i++) {
+ if (fec_packet_masks_[i][j] == fec_packet_masks_[i][j2] &&
+ fec_packet_masks_[i][j] == 1) {
+ degree++;
+ }
+ }
+ if (degree == num_fec_packets) {
+ printf(
+ "Invalid mask: Two media packets are have full degree "
+ "%d %d %d %d \n",
+ j, j2, num_media_packets, num_fec_packets);
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
+ }
+
+ void GetPacketMaskConvertToBitMask(uint8_t* packet_mask,
+ int num_media_packets,
+ int num_fec_packets,
+ int mask_bytes_fec_packet,
+ CodeType code_type) {
+ for (int i = 0; i < num_fec_packets; i++) {
+ for (int j = 0; j < num_media_packets; j++) {
+ const uint8_t byte_mask =
+ packet_mask[i * mask_bytes_fec_packet + j / 8];
+ const int bit_position = (7 - j % 8);
+ fec_packet_masks_[i][j] =
+ (byte_mask & (1 << bit_position)) >> bit_position;
+ fprintf(fp_mask_, "%d ", fec_packet_masks_[i][j]);
+ }
+ fprintf(fp_mask_, "\n");
+ }
+ fprintf(fp_mask_, "\n");
+ }
+
+ int ProcessXORPacketMasks(CodeType code_type, FecMaskType fec_mask_type) {
+ int code_index = 0;
+ // Maximum number of media packets allowed for the mask type.
+ const int packet_mask_max = kMaxMediaPackets[fec_mask_type];
+ std::unique_ptr<uint8_t[]> packet_mask(
+ new uint8_t[packet_mask_max * kUlpfecMaxPacketMaskSize]);
+ // Loop through codes up to `kMaxMediaPacketsTest`.
+ for (int num_media_packets = 1; num_media_packets <= kMaxMediaPacketsTest;
+ ++num_media_packets) {
+ const int mask_bytes_fec_packet =
+ static_cast<int>(internal::PacketMaskSize(num_media_packets));
+ internal::PacketMaskTable mask_table(fec_mask_type, num_media_packets);
+ for (int num_fec_packets = 1; num_fec_packets <= num_media_packets;
+ num_fec_packets++) {
+ memset(packet_mask.get(), 0, num_media_packets * mask_bytes_fec_packet);
+ rtc::ArrayView<const uint8_t> mask =
+ mask_table.LookUp(num_media_packets, num_fec_packets);
+ memcpy(packet_mask.get(), &mask[0], mask.size());
+ // Convert to bit mask.
+ GetPacketMaskConvertToBitMask(packet_mask.get(), num_media_packets,
+ num_fec_packets, mask_bytes_fec_packet,
+ code_type);
+ if (RejectInvalidMasks(num_media_packets, num_fec_packets) < 0) {
+ return -1;
+ }
+ // Compute the metrics for this code/mask.
+ ComputeMetricsForCode(code_type, code_index);
+ code_index++;
+ }
+ }
+ RTC_DCHECK_EQ(code_index, kNumberCodes);
+ return 0;
+ }
+
+ void ProcessRS(CodeType code_type) {
+ int code_index = 0;
+ for (int num_media_packets = 1; num_media_packets <= kMaxMediaPacketsTest;
+ num_media_packets++) {
+ for (int num_fec_packets = 1; num_fec_packets <= num_media_packets;
+ num_fec_packets++) {
+ // Compute the metrics for this code type.
+ ComputeMetricsForCode(code_type, code_index);
+ code_index++;
+ }
+ }
+ }
+
+ // Compute metrics for all code types and sizes.
+ void ComputeMetricsAllCodes() {
+ SetLossModels();
+ SetCodeParams();
+ // Get metrics for XOR code with packet masks of random type.
+ std::string filename = test::OutputPath() + "data_packet_masks";
+ fp_mask_ = fopen(filename.c_str(), "wb");
+ fprintf(fp_mask_, "MASK OF TYPE RANDOM: \n");
+ EXPECT_EQ(ProcessXORPacketMasks(xor_random_code, kFecMaskRandom), 0);
+ // Get metrics for XOR code with packet masks of bursty type.
+ fprintf(fp_mask_, "MASK OF TYPE BURSTY: \n");
+ EXPECT_EQ(ProcessXORPacketMasks(xor_bursty_code, kFecMaskBursty), 0);
+ fclose(fp_mask_);
+ // Get metrics for Reed-Solomon code.
+ ProcessRS(rs_code);
+ }
+};
+
+// Verify that the average residual loss, averaged over loss models
+// appropriate to each mask type, is below some maximum acceptable level. The
+// acceptable levels are read in from a file, and correspond to a current set
+// of packet masks. The levels for each code may be updated over time.
+TEST_F(FecPacketMaskMetricsTest, FecXorMaxResidualLoss) {
+ SetLossModels();
+ SetCodeParams();
+ ComputeMetricsAllCodes();
+ WriteOutMetricsAllFecCodes();
+ int num_loss_rates = sizeof(kAverageLossRate) / sizeof(*kAverageLossRate);
+ int num_burst_lengths =
+ sizeof(kAverageBurstLength) / sizeof(*kAverageBurstLength);
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ double sum_residual_loss_random_mask_random_loss = 0.0;
+ double sum_residual_loss_bursty_mask_bursty_loss = 0.0;
+ // Compute the sum residual loss across the models, for each mask type.
+ for (int k = 0; k < kNumLossModels; k++) {
+ if (loss_model_[k].loss_type == kRandomLossModel) {
+ sum_residual_loss_random_mask_random_loss +=
+ kMetricsXorRandom[code_index].average_residual_loss[k];
+ } else if (loss_model_[k].loss_type == kBurstyLossModel) {
+ sum_residual_loss_bursty_mask_bursty_loss +=
+ kMetricsXorBursty[code_index].average_residual_loss[k];
+ }
+ }
+ float average_residual_loss_random_mask_random_loss =
+ sum_residual_loss_random_mask_random_loss / num_loss_rates;
+ float average_residual_loss_bursty_mask_bursty_loss =
+ sum_residual_loss_bursty_mask_bursty_loss /
+ (num_loss_rates * (num_burst_lengths - 1));
+ const float ref_random_mask = kMaxResidualLossRandomMask[code_index];
+ const float ref_bursty_mask = kMaxResidualLossBurstyMask[code_index];
+ EXPECT_LE(average_residual_loss_random_mask_random_loss, ref_random_mask);
+ EXPECT_LE(average_residual_loss_bursty_mask_bursty_loss, ref_bursty_mask);
+ }
+}
+
+// Verify the behavior of the XOR codes vs the RS codes.
+// For random loss model with average loss rates <= the code protection level,
+// the RS code (optimal MDS code) is more efficient than XOR codes.
+// However, for larger loss rates (above protection level) and/or bursty
+// loss models, the RS is not always more efficient than XOR (though in most
+// cases it still is).
+TEST_F(FecPacketMaskMetricsTest, FecXorVsRS) {
+ SetLossModels();
+ SetCodeParams();
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ for (int k = 0; k < kNumLossModels; k++) {
+ float loss_rate = loss_model_[k].average_loss_rate;
+ float protection_level = code_params_[code_index].protection_level;
+ // Under these conditions we expect XOR to not be better than RS.
+ if (loss_model_[k].loss_type == kRandomLossModel &&
+ loss_rate <= protection_level) {
+ EXPECT_GE(kMetricsXorRandom[code_index].average_residual_loss[k],
+ kMetricsReedSolomon[code_index].average_residual_loss[k]);
+ EXPECT_GE(kMetricsXorBursty[code_index].average_residual_loss[k],
+ kMetricsReedSolomon[code_index].average_residual_loss[k]);
+ }
+ // TODO(marpan): There are some cases (for high loss rates and/or
+ // burst loss models) where XOR is better than RS. Is there some pattern
+ // we can identify and enforce as a constraint?
+ }
+ }
+}
+
+// Verify the trend (change) in the average residual loss, as a function of
+// loss rate, of the XOR code relative to the RS code.
+// The difference between XOR and RS should not get worse as we increase
+// the average loss rate.
+TEST_F(FecPacketMaskMetricsTest, FecTrendXorVsRsLossRate) {
+ SetLossModels();
+ SetCodeParams();
+ // TODO(marpan): Examine this further to see if the condition can be strictly
+ // satisfied (i.e., scale = 1.0) for all codes with different/better masks.
+ double scale = 0.90;
+ int num_loss_rates = sizeof(kAverageLossRate) / sizeof(*kAverageLossRate);
+ int num_burst_lengths =
+ sizeof(kAverageBurstLength) / sizeof(*kAverageBurstLength);
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ for (int i = 0; i < num_burst_lengths; i++) {
+ for (int j = 0; j < num_loss_rates - 1; j++) {
+ int k = num_loss_rates * i + j;
+ // For XOR random.
+ if (kMetricsXorRandom[code_index].average_residual_loss[k] >
+ kMetricsReedSolomon[code_index].average_residual_loss[k]) {
+ double diff_rs_xor_random_loss1 =
+ (kMetricsXorRandom[code_index].average_residual_loss[k] -
+ kMetricsReedSolomon[code_index].average_residual_loss[k]) /
+ kMetricsXorRandom[code_index].average_residual_loss[k];
+ double diff_rs_xor_random_loss2 =
+ (kMetricsXorRandom[code_index].average_residual_loss[k + 1] -
+ kMetricsReedSolomon[code_index].average_residual_loss[k + 1]) /
+ kMetricsXorRandom[code_index].average_residual_loss[k + 1];
+ EXPECT_GE(diff_rs_xor_random_loss1, scale * diff_rs_xor_random_loss2);
+ }
+ // TODO(marpan): Investigate the cases for the bursty mask where
+ // this trend is not strictly satisfied.
+ }
+ }
+ }
+}
+
+// Verify the average residual loss behavior via the protection level and
+// the code length. The average residual loss for a given (k1,m1) code
+// should generally be higher than that of another code (k2,m2), which has
+// either of the two conditions satisfied:
+// 1) higher protection & code length at least as large: (k2+m2) >= (k1+m1),
+// 2) equal protection and larger code length: (k2+m2) > (k1+m1).
+// Currently does not hold for some cases of the XOR code with random mask.
+TEST_F(FecPacketMaskMetricsTest, FecBehaviorViaProtectionLevelAndLength) {
+ SetLossModels();
+ SetCodeParams();
+ for (int code_index1 = 0; code_index1 < max_num_codes_; code_index1++) {
+ float protection_level1 = code_params_[code_index1].protection_level;
+ int length1 = code_params_[code_index1].num_media_packets +
+ code_params_[code_index1].num_fec_packets;
+ for (int code_index2 = 0; code_index2 < max_num_codes_; code_index2++) {
+ float protection_level2 = code_params_[code_index2].protection_level;
+ int length2 = code_params_[code_index2].num_media_packets +
+ code_params_[code_index2].num_fec_packets;
+ // Codes with higher protection are more efficient, conditioned on the
+ // length of the code (higher protection but shorter length codes are
+ // generally not more efficient). For two codes with equal protection,
+ // the longer code is generally more efficient. For high loss rate
+ // models, this condition may be violated for some codes with equal or
+ // very close protection levels. High loss rate case is excluded below.
+ if ((protection_level2 > protection_level1 && length2 >= length1) ||
+ (protection_level2 == protection_level1 && length2 > length1)) {
+ for (int k = 0; k < kNumLossModels; k++) {
+ float loss_rate = loss_model_[k].average_loss_rate;
+ if (loss_rate < loss_rate_upper_threshold) {
+ EXPECT_LT(
+ kMetricsReedSolomon[code_index2].average_residual_loss[k],
+ kMetricsReedSolomon[code_index1].average_residual_loss[k]);
+ // TODO(marpan): There are some corner cases where this is not
+ // satisfied with the current packet masks. Look into updating
+ // these cases to see if this behavior should/can be satisfied,
+ // with overall lower residual loss for those XOR codes.
+ // EXPECT_LT(
+ // kMetricsXorBursty[code_index2].average_residual_loss[k],
+ // kMetricsXorBursty[code_index1].average_residual_loss[k]);
+ // EXPECT_LT(
+ // kMetricsXorRandom[code_index2].average_residual_loss[k],
+ // kMetricsXorRandom[code_index1].average_residual_loss[k]);
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verify the beheavior of the variance of the XOR codes.
+// The partial recovery of the XOR versus the all or nothing behavior of the RS
+// code means that the variance of the residual loss for XOR should generally
+// not be worse than RS.
+TEST_F(FecPacketMaskMetricsTest, FecVarianceBehaviorXorVsRs) {
+ SetLossModels();
+ SetCodeParams();
+ // The condition is not strictly satisfied with the current masks,
+ // i.e., for some codes, the variance of XOR may be slightly higher than RS.
+ // TODO(marpan): Examine this further to see if the condition can be strictly
+ // satisfied (i.e., scale = 1.0) for all codes with different/better masks.
+ double scale = 0.95;
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ for (int k = 0; k < kNumLossModels; k++) {
+ EXPECT_LE(scale * kMetricsXorRandom[code_index].variance_residual_loss[k],
+ kMetricsReedSolomon[code_index].variance_residual_loss[k]);
+ EXPECT_LE(scale * kMetricsXorBursty[code_index].variance_residual_loss[k],
+ kMetricsReedSolomon[code_index].variance_residual_loss[k]);
+ }
+ }
+}
+
+// For the bursty mask type, the residual loss must be strictly zero for all
+// consecutive losses (i.e, gap = 0) with number of losses <= num_fec_packets.
+// This is a design property of the bursty mask type.
+TEST_F(FecPacketMaskMetricsTest, FecXorBurstyPerfectRecoveryConsecutiveLoss) {
+ SetLossModels();
+ SetCodeParams();
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ int num_fec_packets = code_params_[code_index].num_fec_packets;
+ for (int loss = 1; loss <= num_fec_packets; loss++) {
+ int index = loss; // `gap` is zero.
+ EXPECT_EQ(kMetricsXorBursty[code_index].residual_loss_per_loss_gap[index],
+ 0.0);
+ }
+ }
+}
+
+// The XOR codes with random mask type are generally better than the ones with
+// bursty mask type, for random loss models at low loss rates.
+// The XOR codes with bursty mask types are generally better than the one with
+// random mask type, for bursty loss models and/or high loss rates.
+// TODO(marpan): Enable this test when some of the packet masks are updated.
+// Some isolated cases of the codes don't pass this currently.
+/*
+TEST_F(FecPacketMaskMetricsTest, FecXorRandomVsBursty) {
+ SetLossModels();
+ SetCodeParams();
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ double sum_residual_loss_random_mask_random_loss = 0.0;
+ double sum_residual_loss_bursty_mask_random_loss = 0.0;
+ double sum_residual_loss_random_mask_bursty_loss = 0.0;
+ double sum_residual_loss_bursty_mask_bursty_loss = 0.0;
+ // Compute the sum residual loss across the models, for each mask type.
+ for (int k = 0; k < kNumLossModels; k++) {
+ float loss_rate = loss_model_[k].average_loss_rate;
+ if (loss_model_[k].loss_type == kRandomLossModel &&
+ loss_rate < loss_rate_upper_threshold) {
+ sum_residual_loss_random_mask_random_loss +=
+ kMetricsXorRandom[code_index].average_residual_loss[k];
+ sum_residual_loss_bursty_mask_random_loss +=
+ kMetricsXorBursty[code_index].average_residual_loss[k];
+ } else if (loss_model_[k].loss_type == kBurstyLossModel &&
+ loss_rate > loss_rate_lower_threshold) {
+ sum_residual_loss_random_mask_bursty_loss +=
+ kMetricsXorRandom[code_index].average_residual_loss[k];
+ sum_residual_loss_bursty_mask_bursty_loss +=
+ kMetricsXorBursty[code_index].average_residual_loss[k];
+ }
+ }
+ EXPECT_LE(sum_residual_loss_random_mask_random_loss,
+ sum_residual_loss_bursty_mask_random_loss);
+ EXPECT_LE(sum_residual_loss_bursty_mask_bursty_loss,
+ sum_residual_loss_random_mask_bursty_loss);
+ }
+}
+*/
+
+// Verify that the average recovery rate for each code is equal or above some
+// threshold, for certain loss number conditions.
+TEST_F(FecPacketMaskMetricsTest, FecRecoveryRateUnderLossConditions) {
+ SetLossModels();
+ SetCodeParams();
+ for (int code_index = 0; code_index < max_num_codes_; code_index++) {
+ int num_media_packets = code_params_[code_index].num_media_packets;
+ int num_fec_packets = code_params_[code_index].num_fec_packets;
+ // Perfect recovery (`recovery_rate_per_loss` == 1) is expected for
+ // `loss_number` = 1, for all codes.
+ int loss_number = 1;
+ EXPECT_EQ(
+ kMetricsReedSolomon[code_index].recovery_rate_per_loss[loss_number],
+ 1.0);
+ EXPECT_EQ(kMetricsXorRandom[code_index].recovery_rate_per_loss[loss_number],
+ 1.0);
+ EXPECT_EQ(kMetricsXorBursty[code_index].recovery_rate_per_loss[loss_number],
+ 1.0);
+ // For `loss_number` = `num_fec_packets` / 2, we expect the following:
+ // Perfect recovery for RS, and recovery for XOR above the threshold.
+ loss_number = num_fec_packets / 2 > 0 ? num_fec_packets / 2 : 1;
+ EXPECT_EQ(
+ kMetricsReedSolomon[code_index].recovery_rate_per_loss[loss_number],
+ 1.0);
+ EXPECT_GE(kMetricsXorRandom[code_index].recovery_rate_per_loss[loss_number],
+ kRecoveryRateXorRandom[0]);
+ EXPECT_GE(kMetricsXorBursty[code_index].recovery_rate_per_loss[loss_number],
+ kRecoveryRateXorBursty[0]);
+ // For `loss_number` = `num_fec_packets`, we expect the following:
+ // Perfect recovery for RS, and recovery for XOR above the threshold.
+ loss_number = num_fec_packets;
+ EXPECT_EQ(
+ kMetricsReedSolomon[code_index].recovery_rate_per_loss[loss_number],
+ 1.0);
+ EXPECT_GE(kMetricsXorRandom[code_index].recovery_rate_per_loss[loss_number],
+ kRecoveryRateXorRandom[1]);
+ EXPECT_GE(kMetricsXorBursty[code_index].recovery_rate_per_loss[loss_number],
+ kRecoveryRateXorBursty[1]);
+ // For `loss_number` = `num_fec_packets` + 1, we expect the following:
+ // Zero recovery for RS, but non-zero recovery for XOR.
+ if (num_fec_packets > 1 && num_media_packets > 2) {
+ loss_number = num_fec_packets + 1;
+ EXPECT_EQ(
+ kMetricsReedSolomon[code_index].recovery_rate_per_loss[loss_number],
+ 0.0);
+ EXPECT_GE(
+ kMetricsXorRandom[code_index].recovery_rate_per_loss[loss_number],
+ kRecoveryRateXorRandom[2]);
+ EXPECT_GE(
+ kMetricsXorBursty[code_index].recovery_rate_per_loss[loss_number],
+ kRecoveryRateXorBursty[2]);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/third_party/fft/BUILD.gn b/third_party/libwebrtc/modules/third_party/fft/BUILD.gn
new file mode 100644
index 0000000000..49dbd6f9b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/fft/BUILD.gn
@@ -0,0 +1,16 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the ../../../LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("fft") {
+ sources = [
+ "fft.c",
+ "fft.h",
+ ]
+}
diff --git a/third_party/libwebrtc/modules/third_party/fft/LICENSE b/third_party/libwebrtc/modules/third_party/fft/LICENSE
new file mode 100644
index 0000000000..c0a78058cf
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/fft/LICENSE
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c)1995,97 Mark Olesen <olesen@me.QueensU.CA>
+ * Queen's Univ at Kingston (Canada)
+ *
+ * Permission to use, copy, modify, and distribute this software for
+ * any purpose without fee is hereby granted, provided that this
+ * entire notice is included in all copies of any software which is
+ * or includes a copy or modification of this software and in all
+ * copies of the supporting documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR QUEEN'S
+ * UNIVERSITY AT KINGSTON MAKES ANY REPRESENTATION OR WARRANTY OF ANY
+ * KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * All of which is to say that you can do what you like with this
+ * source code provided you don't try to sell it as your own and you
+ * include an unaltered copy of this message (including the
+ * copyright).
+ *
+ * It is also implicitly understood that bug fixes and improvements
+ * should make their way back to the general Internet community so
+ * that everyone benefits.
+ */
diff --git a/third_party/libwebrtc/modules/third_party/fft/README.chromium b/third_party/libwebrtc/modules/third_party/fft/README.chromium
new file mode 100644
index 0000000000..94d20d422f
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/fft/README.chromium
@@ -0,0 +1,12 @@
+Name: fft
+Short Name: fft
+URL:
+Version: 0
+Date: 2018-07-26
+License: Custom license
+License File: LICENSE
+Security Critical: yes
+
+Description:
+Multivariate complex Fourier transform, computed in place
+using mixed-radix Fast Fourier Transform algorithm.
diff --git a/third_party/libwebrtc/modules/third_party/fft/fft.c b/third_party/libwebrtc/modules/third_party/fft/fft.c
new file mode 100644
index 0000000000..72604626d9
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/fft/fft.c
@@ -0,0 +1,942 @@
+/*
+ * Copyright(c)1995,97 Mark Olesen <olesen@me.QueensU.CA>
+ * Queen's Univ at Kingston (Canada)
+ *
+ * Permission to use, copy, modify, and distribute this software for
+ * any purpose without fee is hereby granted, provided that this
+ * entire notice is included in all copies of any software which is
+ * or includes a copy or modification of this software and in all
+ * copies of the supporting documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR QUEEN'S
+ * UNIVERSITY AT KINGSTON MAKES ANY REPRESENTATION OR WARRANTY OF ANY
+ * KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * All of which is to say that you can do what you like with this
+ * source code provided you don't try to sell it as your own and you
+ * include an unaltered copy of this message (including the
+ * copyright).
+ *
+ * It is also implicitly understood that bug fixes and improvements
+ * should make their way back to the general Internet community so
+ * that everyone benefits.
+ *
+ * Changes:
+ * Trivial type modifications by the WebRTC authors.
+ */
+
+
+/*
+ * File:
+ * WebRtcIsac_Fftn.c
+ *
+ * Public:
+ * WebRtcIsac_Fftn / fftnf ();
+ *
+ * Private:
+ * WebRtcIsac_Fftradix / fftradixf ();
+ *
+ * Descript:
+ * multivariate complex Fourier transform, computed in place
+ * using mixed-radix Fast Fourier Transform algorithm.
+ *
+ * Fortran code by:
+ * RC Singleton, Stanford Research Institute, Sept. 1968
+ *
+ * translated by f2c (version 19950721).
+ *
+ * int WebRtcIsac_Fftn (int ndim, const int dims[], REAL Re[], REAL Im[],
+ * int iSign, double scaling);
+ *
+ * NDIM = the total number dimensions
+ * DIMS = a vector of array sizes
+ * if NDIM is zero then DIMS must be zero-terminated
+ *
+ * RE and IM hold the real and imaginary components of the data, and return
+ * the resulting real and imaginary Fourier coefficients. Multidimensional
+ * data *must* be allocated contiguously. There is no limit on the number
+ * of dimensions.
+ *
+ * ISIGN = the sign of the complex exponential (ie, forward or inverse FFT)
+ * the magnitude of ISIGN (normally 1) is used to determine the
+ * correct indexing increment (see below).
+ *
+ * SCALING = normalizing constant by which the final result is *divided*
+ * if SCALING == -1, normalize by total dimension of the transform
+ * if SCALING < -1, normalize by the square-root of the total dimension
+ *
+ * example:
+ * tri-variate transform with Re[n1][n2][n3], Im[n1][n2][n3]
+ *
+ * int dims[3] = {n1,n2,n3}
+ * WebRtcIsac_Fftn (3, dims, Re, Im, 1, scaling);
+ *
+ *-----------------------------------------------------------------------*
+ * int WebRtcIsac_Fftradix (REAL Re[], REAL Im[], size_t nTotal, size_t nPass,
+ * size_t nSpan, int iSign, size_t max_factors,
+ * size_t max_perm);
+ *
+ * RE, IM - see above documentation
+ *
+ * Although there is no limit on the number of dimensions, WebRtcIsac_Fftradix() must
+ * be called once for each dimension, but the calls may be in any order.
+ *
+ * NTOTAL = the total number of complex data values
+ * NPASS = the dimension of the current variable
+ * NSPAN/NPASS = the spacing of consecutive data values while indexing the
+ * current variable
+ * ISIGN - see above documentation
+ *
+ * example:
+ * tri-variate transform with Re[n1][n2][n3], Im[n1][n2][n3]
+ *
+ * WebRtcIsac_Fftradix (Re, Im, n1*n2*n3, n1, n1, 1, maxf, maxp);
+ * WebRtcIsac_Fftradix (Re, Im, n1*n2*n3, n2, n1*n2, 1, maxf, maxp);
+ * WebRtcIsac_Fftradix (Re, Im, n1*n2*n3, n3, n1*n2*n3, 1, maxf, maxp);
+ *
+ * single-variate transform,
+ * NTOTAL = N = NSPAN = (number of complex data values),
+ *
+ * WebRtcIsac_Fftradix (Re, Im, n, n, n, 1, maxf, maxp);
+ *
+ * The data can also be stored in a single array with alternating real and
+ * imaginary parts, the magnitude of ISIGN is changed to 2 to give correct
+ * indexing increment, and data [0] and data [1] used to pass the initial
+ * addresses for the sequences of real and imaginary values,
+ *
+ * example:
+ * REAL data [2*NTOTAL];
+ * WebRtcIsac_Fftradix ( &data[0], &data[1], NTOTAL, nPass, nSpan, 2, maxf, maxp);
+ *
+ * for temporary allocation:
+ *
+ * MAX_FACTORS >= the maximum prime factor of NPASS
+ * MAX_PERM >= the number of prime factors of NPASS. In addition,
+ * if the square-free portion K of NPASS has two or more prime
+ * factors, then MAX_PERM >= (K-1)
+ *
+ * storage in FACTOR for a maximum of 15 prime factors of NPASS. if NPASS
+ * has more than one square-free factor, the product of the square-free
+ * factors must be <= 210 array storage for maximum prime factor of 23 the
+ * following two constants should agree with the array dimensions.
+ *
+ *----------------------------------------------------------------------*/
+
+#include <stdlib.h>
+#include <math.h>
+
+#include "modules/third_party/fft/fft.h"
+
+/* double precision routine */
+static int
+WebRtcIsac_Fftradix (double Re[], double Im[],
+ size_t nTotal, size_t nPass, size_t nSpan, int isign,
+ int max_factors, unsigned int max_perm,
+ FFTstr *fftstate);
+
+
+
+#ifndef M_PI
+# define M_PI 3.14159265358979323846264338327950288
+#endif
+
+#ifndef SIN60
+# define SIN60 0.86602540378443865 /* sin(60 deg) */
+# define COS72 0.30901699437494742 /* cos(72 deg) */
+# define SIN72 0.95105651629515357 /* sin(72 deg) */
+#endif
+
+# define REAL double
+# define FFTN WebRtcIsac_Fftn
+# define FFTNS "fftn"
+# define FFTRADIX WebRtcIsac_Fftradix
+# define FFTRADIXS "fftradix"
+
+
+int WebRtcIsac_Fftns(unsigned int ndim, const int dims[],
+ double Re[],
+ double Im[],
+ int iSign,
+ double scaling,
+ FFTstr *fftstate)
+{
+
+ size_t nSpan, nPass, nTotal;
+ unsigned int i;
+ int ret, max_factors, max_perm;
+
+ /*
+ * tally the number of elements in the data array
+ * and determine the number of dimensions
+ */
+ nTotal = 1;
+ if (ndim && dims [0])
+ {
+ for (i = 0; i < ndim; i++)
+ {
+ if (dims [i] <= 0)
+ {
+ return -1;
+ }
+ nTotal *= dims [i];
+ }
+ }
+ else
+ {
+ ndim = 0;
+ for (i = 0; dims [i]; i++)
+ {
+ if (dims [i] <= 0)
+ {
+ return -1;
+ }
+ nTotal *= dims [i];
+ ndim++;
+ }
+ }
+
+ /* determine maximum number of factors and permuations */
+#if 1
+ /*
+ * follow John Beale's example, just use the largest dimension and don't
+ * worry about excess allocation. May be someone else will do it?
+ */
+ max_factors = max_perm = 1;
+ for (i = 0; i < ndim; i++)
+ {
+ nSpan = dims [i];
+ if ((int)nSpan > max_factors)
+ {
+ max_factors = (int)nSpan;
+ }
+ if ((int)nSpan > max_perm)
+ {
+ max_perm = (int)nSpan;
+ }
+ }
+#else
+ /* use the constants used in the original Fortran code */
+ max_factors = 23;
+ max_perm = 209;
+#endif
+ /* loop over the dimensions: */
+ nPass = 1;
+ for (i = 0; i < ndim; i++)
+ {
+ nSpan = dims [i];
+ nPass *= nSpan;
+ ret = FFTRADIX (Re, Im, nTotal, nSpan, nPass, iSign,
+ max_factors, max_perm, fftstate);
+ /* exit, clean-up already done */
+ if (ret)
+ return ret;
+ }
+
+ /* Divide through by the normalizing constant: */
+ if (scaling && scaling != 1.0)
+ {
+ if (iSign < 0) iSign = -iSign;
+ if (scaling < 0.0)
+ {
+ scaling = (double)nTotal;
+ if (scaling < -1.0)
+ scaling = sqrt (scaling);
+ }
+ scaling = 1.0 / scaling; /* multiply is often faster */
+ for (i = 0; i < nTotal; i += iSign)
+ {
+ Re [i] *= scaling;
+ Im [i] *= scaling;
+ }
+ }
+ return 0;
+}
+
+/*
+ * singleton's mixed radix routine
+ *
+ * could move allocation out to WebRtcIsac_Fftn(), but leave it here so that it's
+ * possible to make this a standalone function
+ */
+
+static int FFTRADIX (REAL Re[],
+ REAL Im[],
+ size_t nTotal,
+ size_t nPass,
+ size_t nSpan,
+ int iSign,
+ int max_factors,
+ unsigned int max_perm,
+ FFTstr *fftstate)
+{
+ int ii, mfactor, kspan, ispan, inc;
+ int j, jc, jf, jj, k, k1, k2, k3, k4, kk, kt, nn, ns, nt;
+
+
+ REAL radf;
+ REAL c1, c2, c3, cd, aa, aj, ak, ajm, ajp, akm, akp;
+ REAL s1, s2, s3, sd, bb, bj, bk, bjm, bjp, bkm, bkp;
+
+ REAL *Rtmp = NULL; /* temp space for real part*/
+ REAL *Itmp = NULL; /* temp space for imaginary part */
+ REAL *Cos = NULL; /* Cosine values */
+ REAL *Sin = NULL; /* Sine values */
+
+ REAL s60 = SIN60; /* sin(60 deg) */
+ REAL c72 = COS72; /* cos(72 deg) */
+ REAL s72 = SIN72; /* sin(72 deg) */
+ REAL pi2 = M_PI; /* use PI first, 2 PI later */
+
+
+ fftstate->SpaceAlloced = 0;
+ fftstate->MaxPermAlloced = 0;
+
+
+ // initialize to avoid warnings
+ k3 = c2 = c3 = s2 = s3 = 0.0;
+
+ if (nPass < 2)
+ return 0;
+
+ /* allocate storage */
+ if (fftstate->SpaceAlloced < max_factors * sizeof (REAL))
+ {
+#ifdef SUN_BROKEN_REALLOC
+ if (!fftstate->SpaceAlloced) /* first time */
+ {
+ fftstate->SpaceAlloced = max_factors * sizeof (REAL);
+ }
+ else
+ {
+#endif
+ fftstate->SpaceAlloced = max_factors * sizeof (REAL);
+#ifdef SUN_BROKEN_REALLOC
+ }
+#endif
+ }
+ else
+ {
+ /* allow full use of alloc'd space */
+ max_factors = fftstate->SpaceAlloced / sizeof (REAL);
+ }
+ if (fftstate->MaxPermAlloced < max_perm)
+ {
+#ifdef SUN_BROKEN_REALLOC
+ if (!fftstate->MaxPermAlloced) /* first time */
+ else
+#endif
+ fftstate->MaxPermAlloced = max_perm;
+ }
+ else
+ {
+ /* allow full use of alloc'd space */
+ max_perm = fftstate->MaxPermAlloced;
+ }
+
+ /* assign pointers */
+ Rtmp = (REAL *) fftstate->Tmp0;
+ Itmp = (REAL *) fftstate->Tmp1;
+ Cos = (REAL *) fftstate->Tmp2;
+ Sin = (REAL *) fftstate->Tmp3;
+
+ /*
+ * Function Body
+ */
+ inc = iSign;
+ if (iSign < 0) {
+ s72 = -s72;
+ s60 = -s60;
+ pi2 = -pi2;
+ inc = -inc; /* absolute value */
+ }
+
+ /* adjust for strange increments */
+ nt = inc * (int)nTotal;
+ ns = inc * (int)nSpan;
+ kspan = ns;
+
+ nn = nt - inc;
+ jc = ns / (int)nPass;
+ radf = pi2 * (double) jc;
+ pi2 *= 2.0; /* use 2 PI from here on */
+
+ ii = 0;
+ jf = 0;
+ /* determine the factors of n */
+ mfactor = 0;
+ k = (int)nPass;
+ while (k % 16 == 0) {
+ mfactor++;
+ fftstate->factor [mfactor - 1] = 4;
+ k /= 16;
+ }
+ j = 3;
+ jj = 9;
+ do {
+ while (k % jj == 0) {
+ mfactor++;
+ fftstate->factor [mfactor - 1] = j;
+ k /= jj;
+ }
+ j += 2;
+ jj = j * j;
+ } while (jj <= k);
+ if (k <= 4) {
+ kt = mfactor;
+ fftstate->factor [mfactor] = k;
+ if (k != 1)
+ mfactor++;
+ } else {
+ if (k - (k / 4 << 2) == 0) {
+ mfactor++;
+ fftstate->factor [mfactor - 1] = 2;
+ k /= 4;
+ }
+ kt = mfactor;
+ j = 2;
+ do {
+ if (k % j == 0) {
+ mfactor++;
+ fftstate->factor [mfactor - 1] = j;
+ k /= j;
+ }
+ j = ((j + 1) / 2 << 1) + 1;
+ } while (j <= k);
+ }
+ if (kt) {
+ j = kt;
+ do {
+ mfactor++;
+ fftstate->factor [mfactor - 1] = fftstate->factor [j - 1];
+ j--;
+ } while (j);
+ }
+
+ /* test that mfactors is in range */
+ if (mfactor > FFT_NFACTOR)
+ {
+ return -1;
+ }
+
+ /* compute fourier transform */
+ for (;;) {
+ sd = radf / (double) kspan;
+ cd = sin(sd);
+ cd = 2.0 * cd * cd;
+ sd = sin(sd + sd);
+ kk = 0;
+ ii++;
+
+ switch (fftstate->factor [ii - 1]) {
+ case 2:
+ /* transform for factor of 2 (including rotation factor) */
+ kspan /= 2;
+ k1 = kspan + 2;
+ do {
+ do {
+ k2 = kk + kspan;
+ ak = Re [k2];
+ bk = Im [k2];
+ Re [k2] = Re [kk] - ak;
+ Im [k2] = Im [kk] - bk;
+ Re [kk] += ak;
+ Im [kk] += bk;
+ kk = k2 + kspan;
+ } while (kk < nn);
+ kk -= nn;
+ } while (kk < jc);
+ if (kk >= kspan)
+ goto Permute_Results_Label; /* exit infinite loop */
+ do {
+ c1 = 1.0 - cd;
+ s1 = sd;
+ do {
+ do {
+ do {
+ k2 = kk + kspan;
+ ak = Re [kk] - Re [k2];
+ bk = Im [kk] - Im [k2];
+ Re [kk] += Re [k2];
+ Im [kk] += Im [k2];
+ Re [k2] = c1 * ak - s1 * bk;
+ Im [k2] = s1 * ak + c1 * bk;
+ kk = k2 + kspan;
+ } while (kk < (nt-1));
+ k2 = kk - nt;
+ c1 = -c1;
+ kk = k1 - k2;
+ } while (kk > k2);
+ ak = c1 - (cd * c1 + sd * s1);
+ s1 = sd * c1 - cd * s1 + s1;
+ c1 = 2.0 - (ak * ak + s1 * s1);
+ s1 *= c1;
+ c1 *= ak;
+ kk += jc;
+ } while (kk < k2);
+ k1 += inc + inc;
+ kk = (k1 - kspan + 1) / 2 + jc - 1;
+ } while (kk < (jc + jc));
+ break;
+
+ case 4: /* transform for factor of 4 */
+ ispan = kspan;
+ kspan /= 4;
+
+ do {
+ c1 = 1.0;
+ s1 = 0.0;
+ do {
+ do {
+ k1 = kk + kspan;
+ k2 = k1 + kspan;
+ k3 = k2 + kspan;
+ akp = Re [kk] + Re [k2];
+ akm = Re [kk] - Re [k2];
+ ajp = Re [k1] + Re [k3];
+ ajm = Re [k1] - Re [k3];
+ bkp = Im [kk] + Im [k2];
+ bkm = Im [kk] - Im [k2];
+ bjp = Im [k1] + Im [k3];
+ bjm = Im [k1] - Im [k3];
+ Re [kk] = akp + ajp;
+ Im [kk] = bkp + bjp;
+ ajp = akp - ajp;
+ bjp = bkp - bjp;
+ if (iSign < 0) {
+ akp = akm + bjm;
+ bkp = bkm - ajm;
+ akm -= bjm;
+ bkm += ajm;
+ } else {
+ akp = akm - bjm;
+ bkp = bkm + ajm;
+ akm += bjm;
+ bkm -= ajm;
+ }
+ /* avoid useless multiplies */
+ if (s1 == 0.0) {
+ Re [k1] = akp;
+ Re [k2] = ajp;
+ Re [k3] = akm;
+ Im [k1] = bkp;
+ Im [k2] = bjp;
+ Im [k3] = bkm;
+ } else {
+ Re [k1] = akp * c1 - bkp * s1;
+ Re [k2] = ajp * c2 - bjp * s2;
+ Re [k3] = akm * c3 - bkm * s3;
+ Im [k1] = akp * s1 + bkp * c1;
+ Im [k2] = ajp * s2 + bjp * c2;
+ Im [k3] = akm * s3 + bkm * c3;
+ }
+ kk = k3 + kspan;
+ } while (kk < nt);
+
+ c2 = c1 - (cd * c1 + sd * s1);
+ s1 = sd * c1 - cd * s1 + s1;
+ c1 = 2.0 - (c2 * c2 + s1 * s1);
+ s1 *= c1;
+ c1 *= c2;
+ /* values of c2, c3, s2, s3 that will get used next time */
+ c2 = c1 * c1 - s1 * s1;
+ s2 = 2.0 * c1 * s1;
+ c3 = c2 * c1 - s2 * s1;
+ s3 = c2 * s1 + s2 * c1;
+ kk = kk - nt + jc;
+ } while (kk < kspan);
+ kk = kk - kspan + inc;
+ } while (kk < jc);
+ if (kspan == jc)
+ goto Permute_Results_Label; /* exit infinite loop */
+ break;
+
+ default:
+ /* transform for odd factors */
+#ifdef FFT_RADIX4
+ return -1;
+ break;
+#else /* FFT_RADIX4 */
+ k = fftstate->factor [ii - 1];
+ ispan = kspan;
+ kspan /= k;
+
+ switch (k) {
+ case 3: /* transform for factor of 3 (optional code) */
+ do {
+ do {
+ k1 = kk + kspan;
+ k2 = k1 + kspan;
+ ak = Re [kk];
+ bk = Im [kk];
+ aj = Re [k1] + Re [k2];
+ bj = Im [k1] + Im [k2];
+ Re [kk] = ak + aj;
+ Im [kk] = bk + bj;
+ ak -= 0.5 * aj;
+ bk -= 0.5 * bj;
+ aj = (Re [k1] - Re [k2]) * s60;
+ bj = (Im [k1] - Im [k2]) * s60;
+ Re [k1] = ak - bj;
+ Re [k2] = ak + bj;
+ Im [k1] = bk + aj;
+ Im [k2] = bk - aj;
+ kk = k2 + kspan;
+ } while (kk < (nn - 1));
+ kk -= nn;
+ } while (kk < kspan);
+ break;
+
+ case 5: /* transform for factor of 5 (optional code) */
+ c2 = c72 * c72 - s72 * s72;
+ s2 = 2.0 * c72 * s72;
+ do {
+ do {
+ k1 = kk + kspan;
+ k2 = k1 + kspan;
+ k3 = k2 + kspan;
+ k4 = k3 + kspan;
+ akp = Re [k1] + Re [k4];
+ akm = Re [k1] - Re [k4];
+ bkp = Im [k1] + Im [k4];
+ bkm = Im [k1] - Im [k4];
+ ajp = Re [k2] + Re [k3];
+ ajm = Re [k2] - Re [k3];
+ bjp = Im [k2] + Im [k3];
+ bjm = Im [k2] - Im [k3];
+ aa = Re [kk];
+ bb = Im [kk];
+ Re [kk] = aa + akp + ajp;
+ Im [kk] = bb + bkp + bjp;
+ ak = akp * c72 + ajp * c2 + aa;
+ bk = bkp * c72 + bjp * c2 + bb;
+ aj = akm * s72 + ajm * s2;
+ bj = bkm * s72 + bjm * s2;
+ Re [k1] = ak - bj;
+ Re [k4] = ak + bj;
+ Im [k1] = bk + aj;
+ Im [k4] = bk - aj;
+ ak = akp * c2 + ajp * c72 + aa;
+ bk = bkp * c2 + bjp * c72 + bb;
+ aj = akm * s2 - ajm * s72;
+ bj = bkm * s2 - bjm * s72;
+ Re [k2] = ak - bj;
+ Re [k3] = ak + bj;
+ Im [k2] = bk + aj;
+ Im [k3] = bk - aj;
+ kk = k4 + kspan;
+ } while (kk < (nn-1));
+ kk -= nn;
+ } while (kk < kspan);
+ break;
+
+ default:
+ if (k != jf) {
+ jf = k;
+ s1 = pi2 / (double) k;
+ c1 = cos(s1);
+ s1 = sin(s1);
+ if (jf > max_factors){
+ return -1;
+ }
+ Cos [jf - 1] = 1.0;
+ Sin [jf - 1] = 0.0;
+ j = 1;
+ do {
+ Cos [j - 1] = Cos [k - 1] * c1 + Sin [k - 1] * s1;
+ Sin [j - 1] = Cos [k - 1] * s1 - Sin [k - 1] * c1;
+ k--;
+ Cos [k - 1] = Cos [j - 1];
+ Sin [k - 1] = -Sin [j - 1];
+ j++;
+ } while (j < k);
+ }
+ do {
+ do {
+ k1 = kk;
+ k2 = kk + ispan;
+ ak = aa = Re [kk];
+ bk = bb = Im [kk];
+ j = 1;
+ k1 += kspan;
+ do {
+ k2 -= kspan;
+ j++;
+ Rtmp [j - 1] = Re [k1] + Re [k2];
+ ak += Rtmp [j - 1];
+ Itmp [j - 1] = Im [k1] + Im [k2];
+ bk += Itmp [j - 1];
+ j++;
+ Rtmp [j - 1] = Re [k1] - Re [k2];
+ Itmp [j - 1] = Im [k1] - Im [k2];
+ k1 += kspan;
+ } while (k1 < k2);
+ Re [kk] = ak;
+ Im [kk] = bk;
+ k1 = kk;
+ k2 = kk + ispan;
+ j = 1;
+ do {
+ k1 += kspan;
+ k2 -= kspan;
+ jj = j;
+ ak = aa;
+ bk = bb;
+ aj = 0.0;
+ bj = 0.0;
+ k = 1;
+ do {
+ k++;
+ ak += Rtmp [k - 1] * Cos [jj - 1];
+ bk += Itmp [k - 1] * Cos [jj - 1];
+ k++;
+ aj += Rtmp [k - 1] * Sin [jj - 1];
+ bj += Itmp [k - 1] * Sin [jj - 1];
+ jj += j;
+ if (jj > jf) {
+ jj -= jf;
+ }
+ } while (k < jf);
+ k = jf - j;
+ Re [k1] = ak - bj;
+ Im [k1] = bk + aj;
+ Re [k2] = ak + bj;
+ Im [k2] = bk - aj;
+ j++;
+ } while (j < k);
+ kk += ispan;
+ } while (kk < nn);
+ kk -= nn;
+ } while (kk < kspan);
+ break;
+ }
+
+ /* multiply by rotation factor (except for factors of 2 and 4) */
+ if (ii == mfactor)
+ goto Permute_Results_Label; /* exit infinite loop */
+ kk = jc;
+ do {
+ c2 = 1.0 - cd;
+ s1 = sd;
+ do {
+ c1 = c2;
+ s2 = s1;
+ kk += kspan;
+ do {
+ do {
+ ak = Re [kk];
+ Re [kk] = c2 * ak - s2 * Im [kk];
+ Im [kk] = s2 * ak + c2 * Im [kk];
+ kk += ispan;
+ } while (kk < nt);
+ ak = s1 * s2;
+ s2 = s1 * c2 + c1 * s2;
+ c2 = c1 * c2 - ak;
+ kk = kk - nt + kspan;
+ } while (kk < ispan);
+ c2 = c1 - (cd * c1 + sd * s1);
+ s1 += sd * c1 - cd * s1;
+ c1 = 2.0 - (c2 * c2 + s1 * s1);
+ s1 *= c1;
+ c2 *= c1;
+ kk = kk - ispan + jc;
+ } while (kk < kspan);
+ kk = kk - kspan + jc + inc;
+ } while (kk < (jc + jc));
+ break;
+#endif /* FFT_RADIX4 */
+ }
+ }
+
+ /* permute the results to normal order---done in two stages */
+ /* permutation for square factors of n */
+Permute_Results_Label:
+ fftstate->Perm [0] = ns;
+ if (kt) {
+ k = kt + kt + 1;
+ if (mfactor < k)
+ k--;
+ j = 1;
+ fftstate->Perm [k] = jc;
+ do {
+ fftstate->Perm [j] = fftstate->Perm [j - 1] / fftstate->factor [j - 1];
+ fftstate->Perm [k - 1] = fftstate->Perm [k] * fftstate->factor [j - 1];
+ j++;
+ k--;
+ } while (j < k);
+ k3 = fftstate->Perm [k];
+ kspan = fftstate->Perm [1];
+ kk = jc;
+ k2 = kspan;
+ j = 1;
+ if (nPass != nTotal) {
+ /* permutation for multivariate transform */
+ Permute_Multi_Label:
+ do {
+ do {
+ k = kk + jc;
+ do {
+ /* swap Re [kk] <> Re [k2], Im [kk] <> Im [k2] */
+ ak = Re [kk]; Re [kk] = Re [k2]; Re [k2] = ak;
+ bk = Im [kk]; Im [kk] = Im [k2]; Im [k2] = bk;
+ kk += inc;
+ k2 += inc;
+ } while (kk < (k-1));
+ kk += ns - jc;
+ k2 += ns - jc;
+ } while (kk < (nt-1));
+ k2 = k2 - nt + kspan;
+ kk = kk - nt + jc;
+ } while (k2 < (ns-1));
+ do {
+ do {
+ k2 -= fftstate->Perm [j - 1];
+ j++;
+ k2 = fftstate->Perm [j] + k2;
+ } while (k2 > fftstate->Perm [j - 1]);
+ j = 1;
+ do {
+ if (kk < (k2-1))
+ goto Permute_Multi_Label;
+ kk += jc;
+ k2 += kspan;
+ } while (k2 < (ns-1));
+ } while (kk < (ns-1));
+ } else {
+ /* permutation for single-variate transform (optional code) */
+ Permute_Single_Label:
+ do {
+ /* swap Re [kk] <> Re [k2], Im [kk] <> Im [k2] */
+ ak = Re [kk]; Re [kk] = Re [k2]; Re [k2] = ak;
+ bk = Im [kk]; Im [kk] = Im [k2]; Im [k2] = bk;
+ kk += inc;
+ k2 += kspan;
+ } while (k2 < (ns-1));
+ do {
+ do {
+ k2 -= fftstate->Perm [j - 1];
+ j++;
+ k2 = fftstate->Perm [j] + k2;
+ } while (k2 >= fftstate->Perm [j - 1]);
+ j = 1;
+ do {
+ if (kk < k2)
+ goto Permute_Single_Label;
+ kk += inc;
+ k2 += kspan;
+ } while (k2 < (ns-1));
+ } while (kk < (ns-1));
+ }
+ jc = k3;
+ }
+
+ if ((kt << 1) + 1 >= mfactor)
+ return 0;
+ ispan = fftstate->Perm [kt];
+ /* permutation for square-free factors of n */
+ j = mfactor - kt;
+ fftstate->factor [j] = 1;
+ do {
+ fftstate->factor [j - 1] *= fftstate->factor [j];
+ j--;
+ } while (j != kt);
+ kt++;
+ nn = fftstate->factor [kt - 1] - 1;
+ if (nn > (int) max_perm) {
+ return -1;
+ }
+ j = jj = 0;
+ for (;;) {
+ k = kt + 1;
+ k2 = fftstate->factor [kt - 1];
+ kk = fftstate->factor [k - 1];
+ j++;
+ if (j > nn)
+ break; /* exit infinite loop */
+ jj += kk;
+ while (jj >= k2) {
+ jj -= k2;
+ k2 = kk;
+ k++;
+ kk = fftstate->factor [k - 1];
+ jj += kk;
+ }
+ fftstate->Perm [j - 1] = jj;
+ }
+ /* determine the permutation cycles of length greater than 1 */
+ j = 0;
+ for (;;) {
+ do {
+ j++;
+ kk = fftstate->Perm [j - 1];
+ } while (kk < 0);
+ if (kk != j) {
+ do {
+ k = kk;
+ kk = fftstate->Perm [k - 1];
+ fftstate->Perm [k - 1] = -kk;
+ } while (kk != j);
+ k3 = kk;
+ } else {
+ fftstate->Perm [j - 1] = -j;
+ if (j == nn)
+ break; /* exit infinite loop */
+ }
+ }
+ max_factors *= inc;
+ /* reorder a and b, following the permutation cycles */
+ for (;;) {
+ j = k3 + 1;
+ nt -= ispan;
+ ii = nt - inc + 1;
+ if (nt < 0)
+ break; /* exit infinite loop */
+ do {
+ do {
+ j--;
+ } while (fftstate->Perm [j - 1] < 0);
+ jj = jc;
+ do {
+ kspan = jj;
+ if (jj > max_factors) {
+ kspan = max_factors;
+ }
+ jj -= kspan;
+ k = fftstate->Perm [j - 1];
+ kk = jc * k + ii + jj;
+ k1 = kk + kspan - 1;
+ k2 = 0;
+ do {
+ k2++;
+ Rtmp [k2 - 1] = Re [k1];
+ Itmp [k2 - 1] = Im [k1];
+ k1 -= inc;
+ } while (k1 != (kk-1));
+ do {
+ k1 = kk + kspan - 1;
+ k2 = k1 - jc * (k + fftstate->Perm [k - 1]);
+ k = -fftstate->Perm [k - 1];
+ do {
+ Re [k1] = Re [k2];
+ Im [k1] = Im [k2];
+ k1 -= inc;
+ k2 -= inc;
+ } while (k1 != (kk-1));
+ kk = k2 + 1;
+ } while (k != j);
+ k1 = kk + kspan - 1;
+ k2 = 0;
+ do {
+ k2++;
+ Re [k1] = Rtmp [k2 - 1];
+ Im [k1] = Itmp [k2 - 1];
+ k1 -= inc;
+ } while (k1 != (kk-1));
+ } while (jj);
+ } while (j != 1);
+ }
+ return 0; /* exit point here */
+}
+/* ---------------------- end-of-file (c source) ---------------------- */
+
diff --git a/third_party/libwebrtc/modules/third_party/fft/fft.h b/third_party/libwebrtc/modules/third_party/fft/fft.h
new file mode 100644
index 0000000000..f8f8b6f17b
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/fft/fft.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the ../../../LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*--------------------------------*-C-*---------------------------------*
+ * File:
+ * fftn.h
+ * ---------------------------------------------------------------------*
+ * Re[]: real value array
+ * Im[]: imaginary value array
+ * nTotal: total number of complex values
+ * nPass: number of elements involved in this pass of transform
+ * nSpan: nspan/nPass = number of bytes to increment pointer
+ * in Re[] and Im[]
+ * isign: exponent: +1 = forward -1 = reverse
+ * scaling: normalizing constant by which the final result is *divided*
+ * scaling == -1, normalize by total dimension of the transform
+ * scaling < -1, normalize by the square-root of the total dimension
+ *
+ * ----------------------------------------------------------------------
+ * See the comments in the code for correct usage!
+ */
+
+#ifndef MODULES_THIRD_PARTY_FFT_FFT_H_
+#define MODULES_THIRD_PARTY_FFT_FFT_H_
+
+#define FFT_MAXFFTSIZE 2048
+#define FFT_NFACTOR 11
+
+typedef struct {
+ unsigned int SpaceAlloced;
+ unsigned int MaxPermAlloced;
+ double Tmp0[FFT_MAXFFTSIZE];
+ double Tmp1[FFT_MAXFFTSIZE];
+ double Tmp2[FFT_MAXFFTSIZE];
+ double Tmp3[FFT_MAXFFTSIZE];
+ int Perm[FFT_MAXFFTSIZE];
+ int factor[FFT_NFACTOR];
+
+} FFTstr;
+
+/* double precision routine */
+
+int WebRtcIsac_Fftns(unsigned int ndim,
+ const int dims[],
+ double Re[],
+ double Im[],
+ int isign,
+ double scaling,
+ FFTstr* fftstate);
+
+#endif /* MODULES_THIRD_PARTY_FFT_FFT_H_ */
diff --git a/third_party/libwebrtc/modules/third_party/fft/fft_gn/moz.build b/third_party/libwebrtc/modules/third_party/fft/fft_gn/moz.build
new file mode 100644
index 0000000000..f035730365
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/fft/fft_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/third_party/fft/fft.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("fft_gn")
diff --git a/third_party/libwebrtc/modules/third_party/g711/BUILD.gn b/third_party/libwebrtc/modules/third_party/g711/BUILD.gn
new file mode 100644
index 0000000000..f7735a6fe7
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g711/BUILD.gn
@@ -0,0 +1,17 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the ../../../LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("g711_3p") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "g711.c",
+ "g711.h",
+ ]
+}
diff --git a/third_party/libwebrtc/modules/third_party/g711/LICENSE b/third_party/libwebrtc/modules/third_party/g711/LICENSE
new file mode 100644
index 0000000000..3cdf910304
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g711/LICENSE
@@ -0,0 +1,14 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g711.h - In line A-law and u-law conversion routines
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2001 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place this code in the
+ * public domain for the benefit of all mankind - even the slimy
+ * ones who might try to proprietize my work and use it to my
+ * detriment.
+ */
diff --git a/third_party/libwebrtc/modules/third_party/g711/README.chromium b/third_party/libwebrtc/modules/third_party/g711/README.chromium
new file mode 100644
index 0000000000..1baa2637a9
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g711/README.chromium
@@ -0,0 +1,11 @@
+Name: In line A-law and u-law conversion routines
+Short Name: g711
+URL:
+Version: 0
+Date: 2018-06-25
+License: Custom license
+License File: LICENSE
+Security Critical: yes
+
+Description:
+In line A-law and u-law conversion routines
diff --git a/third_party/libwebrtc/modules/third_party/g711/g711.c b/third_party/libwebrtc/modules/third_party/g711/g711.c
new file mode 100644
index 0000000000..b7ae6bb109
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g711/g711.c
@@ -0,0 +1,72 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g711.c - A-law and u-law transcoding routines
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2006 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place this code in the
+ * public domain for the benefit of all mankind - even the slimy
+ * ones who might try to proprietize my work and use it to my
+ * detriment.
+ *
+ * $Id: g711.c,v 1.1 2006/06/07 15:46:39 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed unused include files
+ * -Changed to use WebRtc types
+ * -Added option to run encoder bitexact with ITU-T reference implementation
+ */
+
+#include "modules/third_party/g711/g711.h"
+
+/* Copied from the CCITT G.711 specification */
+static const uint8_t ulaw_to_alaw_table[256] = {
+ 42, 43, 40, 41, 46, 47, 44, 45, 34, 35, 32, 33, 38, 39, 36,
+ 37, 58, 59, 56, 57, 62, 63, 60, 61, 50, 51, 48, 49, 54, 55,
+ 52, 53, 10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6,
+ 7, 4, 26, 27, 24, 25, 30, 31, 28, 29, 18, 19, 16, 17, 22,
+ 23, 20, 21, 106, 104, 105, 110, 111, 108, 109, 98, 99, 96, 97, 102,
+ 103, 100, 101, 122, 120, 126, 127, 124, 125, 114, 115, 112, 113, 118, 119,
+ 116, 117, 75, 73, 79, 77, 66, 67, 64, 65, 70, 71, 68, 69, 90,
+ 91, 88, 89, 94, 95, 92, 93, 82, 82, 83, 83, 80, 80, 81, 81,
+ 86, 86, 87, 87, 84, 84, 85, 85, 170, 171, 168, 169, 174, 175, 172,
+ 173, 162, 163, 160, 161, 166, 167, 164, 165, 186, 187, 184, 185, 190, 191,
+ 188, 189, 178, 179, 176, 177, 182, 183, 180, 181, 138, 139, 136, 137, 142,
+ 143, 140, 141, 130, 131, 128, 129, 134, 135, 132, 154, 155, 152, 153, 158,
+ 159, 156, 157, 146, 147, 144, 145, 150, 151, 148, 149, 234, 232, 233, 238,
+ 239, 236, 237, 226, 227, 224, 225, 230, 231, 228, 229, 250, 248, 254, 255,
+ 252, 253, 242, 243, 240, 241, 246, 247, 244, 245, 203, 201, 207, 205, 194,
+ 195, 192, 193, 198, 199, 196, 197, 218, 219, 216, 217, 222, 223, 220, 221,
+ 210, 210, 211, 211, 208, 208, 209, 209, 214, 214, 215, 215, 212, 212, 213,
+ 213
+};
+
+/* These transcoding tables are copied from the CCITT G.711 specification. To
+ achieve optimal results, do not change them. */
+static const uint8_t alaw_to_ulaw_table[256] = {
+ 42, 43, 40, 41, 46, 47, 44, 45, 34, 35, 32, 33, 38, 39, 36,
+ 37, 57, 58, 55, 56, 61, 62, 59, 60, 49, 50, 47, 48, 53, 54,
+ 51, 52, 10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6,
+ 7, 4, 5, 26, 27, 24, 25, 30, 31, 28, 29, 18, 19, 16, 17,
+ 22, 23, 20, 21, 98, 99, 96, 97, 102, 103, 100, 101, 93, 93, 92,
+ 92, 95, 95, 94, 94, 116, 118, 112, 114, 124, 126, 120, 122, 106, 107,
+ 104, 105, 110, 111, 108, 109, 72, 73, 70, 71, 76, 77, 74, 75, 64,
+ 65, 63, 63, 68, 69, 66, 67, 86, 87, 84, 85, 90, 91, 88, 89,
+ 79, 79, 78, 78, 82, 83, 80, 81, 170, 171, 168, 169, 174, 175, 172,
+ 173, 162, 163, 160, 161, 166, 167, 164, 165, 185, 186, 183, 184, 189, 190,
+ 187, 188, 177, 178, 175, 176, 181, 182, 179, 180, 138, 139, 136, 137, 142,
+ 143, 140, 141, 130, 131, 128, 129, 134, 135, 132, 133, 154, 155, 152, 153,
+ 158, 159, 156, 157, 146, 147, 144, 145, 150, 151, 148, 149, 226, 227, 224,
+ 225, 230, 231, 228, 229, 221, 221, 220, 220, 223, 223, 222, 222, 244, 246,
+ 240, 242, 252, 254, 248, 250, 234, 235, 232, 233, 238, 239, 236, 237, 200,
+ 201, 198, 199, 204, 205, 202, 203, 192, 193, 191, 191, 196, 197, 194, 195,
+ 214, 215, 212, 213, 218, 219, 216, 217, 207, 207, 206, 206, 210, 211, 208,
+ 209
+};
+
+uint8_t alaw_to_ulaw(uint8_t alaw) { return alaw_to_ulaw_table[alaw]; }
+
+uint8_t ulaw_to_alaw(uint8_t ulaw) { return ulaw_to_alaw_table[ulaw]; }
diff --git a/third_party/libwebrtc/modules/third_party/g711/g711.h b/third_party/libwebrtc/modules/third_party/g711/g711.h
new file mode 100644
index 0000000000..4eef42c0bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g711/g711.h
@@ -0,0 +1,350 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g711.h - In line A-law and u-law conversion routines
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2001 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place this code in the
+ * public domain for the benefit of all mankind - even the slimy
+ * ones who might try to proprietize my work and use it to my
+ * detriment.
+ *
+ * $Id: g711.h,v 1.1 2006/06/07 15:46:39 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Changed to use WebRtc types
+ * -Changed __inline__ to __inline
+ * -Two changes to make implementation bitexact with ITU-T reference
+ * implementation
+ */
+
+/*! \page g711_page A-law and mu-law handling
+Lookup tables for A-law and u-law look attractive, until you consider the impact
+on the CPU cache. If it causes a substantial area of your processor cache to get
+hit too often, cache sloshing will severely slow things down. The main reason
+these routines are slow in C, is the lack of direct access to the CPU's "find
+the first 1" instruction. A little in-line assembler fixes that, and the
+conversion routines can be faster than lookup tables, in most real world usage.
+A "find the first 1" instruction is available on most modern CPUs, and is a
+much underused feature.
+
+If an assembly language method of bit searching is not available, these routines
+revert to a method that can be a little slow, so the cache thrashing might not
+seem so bad :(
+
+Feel free to submit patches to add fast "find the first 1" support for your own
+favourite processor.
+
+Look up tables are used for transcoding between A-law and u-law, since it is
+difficult to achieve the precise transcoding procedure laid down in the G.711
+specification by other means.
+*/
+
+#ifndef MODULES_THIRD_PARTY_G711_G711_H_
+#define MODULES_THIRD_PARTY_G711_G711_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#if defined(__i386__)
+/*! \brief Find the bit position of the highest set bit in a word
+ \param bits The word to be searched
+ \return The bit number of the highest set bit, or -1 if the word is zero. */
+static __inline__ int top_bit(unsigned int bits) {
+ int res;
+
+ __asm__ __volatile__(
+ " movl $-1,%%edx;\n"
+ " bsrl %%eax,%%edx;\n"
+ : "=d"(res)
+ : "a"(bits));
+ return res;
+}
+
+/*! \brief Find the bit position of the lowest set bit in a word
+ \param bits The word to be searched
+ \return The bit number of the lowest set bit, or -1 if the word is zero. */
+static __inline__ int bottom_bit(unsigned int bits) {
+ int res;
+
+ __asm__ __volatile__(
+ " movl $-1,%%edx;\n"
+ " bsfl %%eax,%%edx;\n"
+ : "=d"(res)
+ : "a"(bits));
+ return res;
+}
+#elif defined(__x86_64__)
+static __inline__ int top_bit(unsigned int bits) {
+ int res;
+
+ __asm__ __volatile__(
+ " movq $-1,%%rdx;\n"
+ " bsrq %%rax,%%rdx;\n"
+ : "=d"(res)
+ : "a"(bits));
+ return res;
+}
+
+static __inline__ int bottom_bit(unsigned int bits) {
+ int res;
+
+ __asm__ __volatile__(
+ " movq $-1,%%rdx;\n"
+ " bsfq %%rax,%%rdx;\n"
+ : "=d"(res)
+ : "a"(bits));
+ return res;
+}
+#else
+static __inline int top_bit(unsigned int bits) {
+ int i;
+
+ if (bits == 0) {
+ return -1;
+ }
+ i = 0;
+ if (bits & 0xFFFF0000) {
+ bits &= 0xFFFF0000;
+ i += 16;
+ }
+ if (bits & 0xFF00FF00) {
+ bits &= 0xFF00FF00;
+ i += 8;
+ }
+ if (bits & 0xF0F0F0F0) {
+ bits &= 0xF0F0F0F0;
+ i += 4;
+ }
+ if (bits & 0xCCCCCCCC) {
+ bits &= 0xCCCCCCCC;
+ i += 2;
+ }
+ if (bits & 0xAAAAAAAA) {
+ bits &= 0xAAAAAAAA;
+ i += 1;
+ }
+ return i;
+}
+
+static __inline int bottom_bit(unsigned int bits) {
+ int i;
+
+ if (bits == 0) {
+ return -1;
+ }
+ i = 32;
+ if (bits & 0x0000FFFF) {
+ bits &= 0x0000FFFF;
+ i -= 16;
+ }
+ if (bits & 0x00FF00FF) {
+ bits &= 0x00FF00FF;
+ i -= 8;
+ }
+ if (bits & 0x0F0F0F0F) {
+ bits &= 0x0F0F0F0F;
+ i -= 4;
+ }
+ if (bits & 0x33333333) {
+ bits &= 0x33333333;
+ i -= 2;
+ }
+ if (bits & 0x55555555) {
+ bits &= 0x55555555;
+ i -= 1;
+ }
+ return i;
+}
+#endif
+
+/* N.B. It is tempting to use look-up tables for A-law and u-law conversion.
+ * However, you should consider the cache footprint.
+ *
+ * A 64K byte table for linear to x-law and a 512 byte table for x-law to
+ * linear sound like peanuts these days, and shouldn't an array lookup be
+ * real fast? No! When the cache sloshes as badly as this one will, a tight
+ * calculation may be better. The messiest part is normally finding the
+ * segment, but a little inline assembly can fix that on an i386, x86_64
+ * and many other modern processors.
+ */
+
+/*
+ * Mu-law is basically as follows:
+ *
+ * Biased Linear Input Code Compressed Code
+ * ------------------------ ---------------
+ * 00000001wxyza 000wxyz
+ * 0000001wxyzab 001wxyz
+ * 000001wxyzabc 010wxyz
+ * 00001wxyzabcd 011wxyz
+ * 0001wxyzabcde 100wxyz
+ * 001wxyzabcdef 101wxyz
+ * 01wxyzabcdefg 110wxyz
+ * 1wxyzabcdefgh 111wxyz
+ *
+ * Each biased linear code has a leading 1 which identifies the segment
+ * number. The value of the segment number is equal to 7 minus the number
+ * of leading 0's. The quantization interval is directly available as the
+ * four bits wxyz. * The trailing bits (a - h) are ignored.
+ *
+ * Ordinarily the complement of the resulting code word is used for
+ * transmission, and so the code word is complemented before it is returned.
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+
+//#define ULAW_ZEROTRAP /* turn on the trap as per the MIL-STD
+//*/
+#define ULAW_BIAS 0x84 /* Bias for linear code. */
+
+/*! \brief Encode a linear sample to u-law
+ \param linear The sample to encode.
+ \return The u-law value.
+*/
+static __inline uint8_t linear_to_ulaw(int linear) {
+ uint8_t u_val;
+ int mask;
+ int seg;
+
+ /* Get the sign and the magnitude of the value. */
+ if (linear < 0) {
+ /* WebRtc, tlegrand: -1 added to get bitexact to reference implementation */
+ linear = ULAW_BIAS - linear - 1;
+ mask = 0x7F;
+ } else {
+ linear = ULAW_BIAS + linear;
+ mask = 0xFF;
+ }
+
+ seg = top_bit(linear | 0xFF) - 7;
+
+ /*
+ * Combine the sign, segment, quantization bits,
+ * and complement the code word.
+ */
+ if (seg >= 8)
+ u_val = (uint8_t)(0x7F ^ mask);
+ else
+ u_val = (uint8_t)(((seg << 4) | ((linear >> (seg + 3)) & 0xF)) ^ mask);
+#ifdef ULAW_ZEROTRAP
+ /* Optional ITU trap */
+ if (u_val == 0)
+ u_val = 0x02;
+#endif
+ return u_val;
+}
+
+/*! \brief Decode an u-law sample to a linear value.
+ \param ulaw The u-law sample to decode.
+ \return The linear value.
+*/
+static __inline int16_t ulaw_to_linear(uint8_t ulaw) {
+ int t;
+
+ /* Complement to obtain normal u-law value. */
+ ulaw = ~ulaw;
+ /*
+ * Extract and bias the quantization bits. Then
+ * shift up by the segment number and subtract out the bias.
+ */
+ t = (((ulaw & 0x0F) << 3) + ULAW_BIAS) << (((int)ulaw & 0x70) >> 4);
+ return (int16_t)((ulaw & 0x80) ? (ULAW_BIAS - t) : (t - ULAW_BIAS));
+}
+
+/*
+ * A-law is basically as follows:
+ *
+ * Linear Input Code Compressed Code
+ * ----------------- ---------------
+ * 0000000wxyza 000wxyz
+ * 0000001wxyza 001wxyz
+ * 000001wxyzab 010wxyz
+ * 00001wxyzabc 011wxyz
+ * 0001wxyzabcd 100wxyz
+ * 001wxyzabcde 101wxyz
+ * 01wxyzabcdef 110wxyz
+ * 1wxyzabcdefg 111wxyz
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+
+#define ALAW_AMI_MASK 0x55
+
+/*! \brief Encode a linear sample to A-law
+ \param linear The sample to encode.
+ \return The A-law value.
+*/
+static __inline uint8_t linear_to_alaw(int linear) {
+ int mask;
+ int seg;
+
+ if (linear >= 0) {
+ /* Sign (bit 7) bit = 1 */
+ mask = ALAW_AMI_MASK | 0x80;
+ } else {
+ /* Sign (bit 7) bit = 0 */
+ mask = ALAW_AMI_MASK;
+ /* WebRtc, tlegrand: Changed from -8 to -1 to get bitexact to reference
+ * implementation */
+ linear = -linear - 1;
+ }
+
+ /* Convert the scaled magnitude to segment number. */
+ seg = top_bit(linear | 0xFF) - 7;
+ if (seg >= 8) {
+ if (linear >= 0) {
+ /* Out of range. Return maximum value. */
+ return (uint8_t)(0x7F ^ mask);
+ }
+ /* We must be just a tiny step below zero */
+ return (uint8_t)(0x00 ^ mask);
+ }
+ /* Combine the sign, segment, and quantization bits. */
+ return (uint8_t)(((seg << 4) | ((linear >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^
+ mask);
+}
+
+/*! \brief Decode an A-law sample to a linear value.
+ \param alaw The A-law sample to decode.
+ \return The linear value.
+*/
+static __inline int16_t alaw_to_linear(uint8_t alaw) {
+ int i;
+ int seg;
+
+ alaw ^= ALAW_AMI_MASK;
+ i = ((alaw & 0x0F) << 4);
+ seg = (((int)alaw & 0x70) >> 4);
+ if (seg)
+ i = (i + 0x108) << (seg - 1);
+ else
+ i += 8;
+ return (int16_t)((alaw & 0x80) ? i : -i);
+}
+
+/*! \brief Transcode from A-law to u-law, using the procedure defined in G.711.
+ \param alaw The A-law sample to transcode.
+ \return The best matching u-law value.
+*/
+uint8_t alaw_to_ulaw(uint8_t alaw);
+
+/*! \brief Transcode from u-law to A-law, using the procedure defined in G.711.
+ \param alaw The u-law sample to transcode.
+ \return The best matching A-law value.
+*/
+uint8_t ulaw_to_alaw(uint8_t ulaw);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MODULES_THIRD_PARTY_G711_G711_H_ */
diff --git a/third_party/libwebrtc/modules/third_party/g711/g711_3p_gn/moz.build b/third_party/libwebrtc/modules/third_party/g711/g711_3p_gn/moz.build
new file mode 100644
index 0000000000..240656c0f3
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g711/g711_3p_gn/moz.build
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/third_party/g711/g711.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("g711_3p_gn")
diff --git a/third_party/libwebrtc/modules/third_party/g722/BUILD.gn b/third_party/libwebrtc/modules/third_party/g722/BUILD.gn
new file mode 100644
index 0000000000..316cdc7415
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/BUILD.gn
@@ -0,0 +1,18 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the ../../../LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("g722_3p") {
+ poisonous = [ "audio_codecs" ]
+ sources = [
+ "g722_decode.c",
+ "g722_enc_dec.h",
+ "g722_encode.c",
+ ]
+}
diff --git a/third_party/libwebrtc/modules/third_party/g722/LICENSE b/third_party/libwebrtc/modules/third_party/g722/LICENSE
new file mode 100644
index 0000000000..ea6308f0db
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/LICENSE
@@ -0,0 +1,20 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_decode.c - The ITU G.722 codec, decode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based in part on a single channel G.722 codec which is:
+ *
+ * Copyright (c) CMU 1993
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ */
diff --git a/third_party/libwebrtc/modules/third_party/g722/README.chromium b/third_party/libwebrtc/modules/third_party/g722/README.chromium
new file mode 100644
index 0000000000..c427ed8cf2
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/README.chromium
@@ -0,0 +1,11 @@
+Name: The ITU G.722 codec, encode and decode part.
+Short Name: g722
+URL:
+Version: 0
+Date: 2018-06-25
+License: Custom license
+License File: LICENSE
+Security Critical: yes
+
+Description:
+The ITU G.722 codec, encode and decode part.
diff --git a/third_party/libwebrtc/modules/third_party/g722/g722_3p_gn/moz.build b/third_party/libwebrtc/modules/third_party/g722/g722_3p_gn/moz.build
new file mode 100644
index 0000000000..3657b2cbe0
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/g722_3p_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/third_party/g722/g722_encode.c"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/third_party/g722/g722_decode.c"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("g722_3p_gn")
diff --git a/third_party/libwebrtc/modules/third_party/g722/g722_decode.c b/third_party/libwebrtc/modules/third_party/g722/g722_decode.c
new file mode 100644
index 0000000000..012aeb5028
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/g722_decode.c
@@ -0,0 +1,399 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_decode.c - The ITU G.722 codec, decode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based in part on a single channel G.722 codec which is:
+ *
+ * Copyright (c) CMU 1993
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_decode.c,v 1.15 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Changed __inline__ to __inline
+ * -Added saturation check on output
+ */
+
+/*! \file */
+
+
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "modules/third_party/g722/g722_enc_dec.h"
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+ int16_t amp16;
+
+ /* Hopefully this is optimised for the common case - not clipping */
+ amp16 = (int16_t) amp;
+ if (amp == amp16)
+ return amp16;
+ if (amp > WEBRTC_INT16_MAX)
+ return WEBRTC_INT16_MAX;
+ return WEBRTC_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(G722DecoderState *s, int band, int d);
+
+static void block4(G722DecoderState *s, int band, int d)
+{
+ int wd1;
+ int wd2;
+ int wd3;
+ int i;
+
+ /* Block 4, RECONS */
+ s->band[band].d[0] = d;
+ s->band[band].r[0] = saturate(s->band[band].s + d);
+
+ /* Block 4, PARREC */
+ s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+ /* Block 4, UPPOL2 */
+ for (i = 0; i < 3; i++)
+ s->band[band].sg[i] = s->band[band].p[i] >> 15;
+ wd1 = saturate(s->band[band].a[1] * 4);
+
+ wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
+ if (wd2 > 32767)
+ wd2 = 32767;
+ wd3 = (s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128;
+ wd3 += (wd2 >> 7);
+ wd3 += (s->band[band].a[2]*32512) >> 15;
+ if (wd3 > 12288)
+ wd3 = 12288;
+ else if (wd3 < -12288)
+ wd3 = -12288;
+ s->band[band].ap[2] = wd3;
+
+ /* Block 4, UPPOL1 */
+ s->band[band].sg[0] = s->band[band].p[0] >> 15;
+ s->band[band].sg[1] = s->band[band].p[1] >> 15;
+ wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
+ wd2 = (s->band[band].a[1]*32640) >> 15;
+
+ s->band[band].ap[1] = saturate(wd1 + wd2);
+ wd3 = saturate(15360 - s->band[band].ap[2]);
+ if (s->band[band].ap[1] > wd3)
+ s->band[band].ap[1] = wd3;
+ else if (s->band[band].ap[1] < -wd3)
+ s->band[band].ap[1] = -wd3;
+
+ /* Block 4, UPZERO */
+ wd1 = (d == 0) ? 0 : 128;
+ s->band[band].sg[0] = d >> 15;
+ for (i = 1; i < 7; i++)
+ {
+ s->band[band].sg[i] = s->band[band].d[i] >> 15;
+ wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
+ wd3 = (s->band[band].b[i]*32640) >> 15;
+ s->band[band].bp[i] = saturate(wd2 + wd3);
+ }
+
+ /* Block 4, DELAYA */
+ for (i = 6; i > 0; i--)
+ {
+ s->band[band].d[i] = s->band[band].d[i - 1];
+ s->band[band].b[i] = s->band[band].bp[i];
+ }
+
+ for (i = 2; i > 0; i--)
+ {
+ s->band[band].r[i] = s->band[band].r[i - 1];
+ s->band[band].p[i] = s->band[band].p[i - 1];
+ s->band[band].a[i] = s->band[band].ap[i];
+ }
+
+ /* Block 4, FILTEP */
+ wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+ wd1 = (s->band[band].a[1]*wd1) >> 15;
+ wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+ wd2 = (s->band[band].a[2]*wd2) >> 15;
+ s->band[band].sp = saturate(wd1 + wd2);
+
+ /* Block 4, FILTEZ */
+ s->band[band].sz = 0;
+ for (i = 6; i > 0; i--)
+ {
+ wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+ s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+ }
+ s->band[band].sz = saturate(s->band[band].sz);
+
+ /* Block 4, PREDIC */
+ s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
+ int rate,
+ int options) {
+ s = s ? s : malloc(sizeof(*s));
+ memset(s, 0, sizeof(*s));
+ if (rate == 48000)
+ s->bits_per_sample = 6;
+ else if (rate == 56000)
+ s->bits_per_sample = 7;
+ else
+ s->bits_per_sample = 8;
+ if ((options & G722_SAMPLE_RATE_8000))
+ s->eight_k = TRUE;
+ if ((options & G722_PACKED) && s->bits_per_sample != 8)
+ s->packed = TRUE;
+ else
+ s->packed = FALSE;
+ s->band[0].det = 32;
+ s->band[1].det = 8;
+ return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int WebRtc_g722_decode_release(G722DecoderState *s)
+{
+ free(s);
+ return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+size_t WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
+ const uint8_t g722_data[], size_t len)
+{
+ static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
+ static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1,
+ 7, 6, 5, 4, 3, 2, 1, 0 };
+ static const int ilb[32] =
+ {
+ 2048, 2093, 2139, 2186, 2233, 2282, 2332,
+ 2383, 2435, 2489, 2543, 2599, 2656, 2714,
+ 2774, 2834, 2896, 2960, 3025, 3091, 3158,
+ 3228, 3298, 3371, 3444, 3520, 3597, 3676,
+ 3756, 3838, 3922, 4008
+ };
+ static const int wh[3] = {0, -214, 798};
+ static const int rh2[4] = {2, 1, 2, 1};
+ static const int qm2[4] = {-7408, -1616, 7408, 1616};
+ static const int qm4[16] =
+ {
+ 0, -20456, -12896, -8968,
+ -6288, -4240, -2584, -1200,
+ 20456, 12896, 8968, 6288,
+ 4240, 2584, 1200, 0
+ };
+ static const int qm5[32] =
+ {
+ -280, -280, -23352, -17560,
+ -14120, -11664, -9752, -8184,
+ -6864, -5712, -4696, -3784,
+ -2960, -2208, -1520, -880,
+ 23352, 17560, 14120, 11664,
+ 9752, 8184, 6864, 5712,
+ 4696, 3784, 2960, 2208,
+ 1520, 880, 280, -280
+ };
+ static const int qm6[64] =
+ {
+ -136, -136, -136, -136,
+ -24808, -21904, -19008, -16704,
+ -14984, -13512, -12280, -11192,
+ -10232, -9360, -8576, -7856,
+ -7192, -6576, -6000, -5456,
+ -4944, -4464, -4008, -3576,
+ -3168, -2776, -2400, -2032,
+ -1688, -1360, -1040, -728,
+ 24808, 21904, 19008, 16704,
+ 14984, 13512, 12280, 11192,
+ 10232, 9360, 8576, 7856,
+ 7192, 6576, 6000, 5456,
+ 4944, 4464, 4008, 3576,
+ 3168, 2776, 2400, 2032,
+ 1688, 1360, 1040, 728,
+ 432, 136, -432, -136
+ };
+ static const int qmf_coeffs[12] =
+ {
+ 3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
+ };
+
+ int dlowt;
+ int rlow;
+ int ihigh;
+ int dhigh;
+ int rhigh;
+ int xout1;
+ int xout2;
+ int wd1;
+ int wd2;
+ int wd3;
+ int code;
+ size_t outlen;
+ int i;
+ size_t j;
+
+ outlen = 0;
+ rhigh = 0;
+ for (j = 0; j < len; )
+ {
+ if (s->packed)
+ {
+ /* Unpack the code bits */
+ if (s->in_bits < s->bits_per_sample)
+ {
+ s->in_buffer |= (g722_data[j++] << s->in_bits);
+ s->in_bits += 8;
+ }
+ code = s->in_buffer & ((1 << s->bits_per_sample) - 1);
+ s->in_buffer >>= s->bits_per_sample;
+ s->in_bits -= s->bits_per_sample;
+ }
+ else
+ {
+ code = g722_data[j++];
+ }
+
+ switch (s->bits_per_sample)
+ {
+ default:
+ case 8:
+ wd1 = code & 0x3F;
+ ihigh = (code >> 6) & 0x03;
+ wd2 = qm6[wd1];
+ wd1 >>= 2;
+ break;
+ case 7:
+ wd1 = code & 0x1F;
+ ihigh = (code >> 5) & 0x03;
+ wd2 = qm5[wd1];
+ wd1 >>= 1;
+ break;
+ case 6:
+ wd1 = code & 0x0F;
+ ihigh = (code >> 4) & 0x03;
+ wd2 = qm4[wd1];
+ break;
+ }
+ /* Block 5L, LOW BAND INVQBL */
+ wd2 = (s->band[0].det*wd2) >> 15;
+ /* Block 5L, RECONS */
+ rlow = s->band[0].s + wd2;
+ /* Block 6L, LIMIT */
+ if (rlow > 16383)
+ rlow = 16383;
+ else if (rlow < -16384)
+ rlow = -16384;
+
+ /* Block 2L, INVQAL */
+ wd2 = qm4[wd1];
+ dlowt = (s->band[0].det*wd2) >> 15;
+
+ /* Block 3L, LOGSCL */
+ wd2 = rl42[wd1];
+ wd1 = (s->band[0].nb*127) >> 7;
+ wd1 += wl[wd2];
+ if (wd1 < 0)
+ wd1 = 0;
+ else if (wd1 > 18432)
+ wd1 = 18432;
+ s->band[0].nb = wd1;
+
+ /* Block 3L, SCALEL */
+ wd1 = (s->band[0].nb >> 6) & 31;
+ wd2 = 8 - (s->band[0].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[0].det = wd3 << 2;
+
+ block4(s, 0, dlowt);
+
+ if (!s->eight_k)
+ {
+ /* Block 2H, INVQAH */
+ wd2 = qm2[ihigh];
+ dhigh = (s->band[1].det*wd2) >> 15;
+ /* Block 5H, RECONS */
+ rhigh = dhigh + s->band[1].s;
+ /* Block 6H, LIMIT */
+ if (rhigh > 16383)
+ rhigh = 16383;
+ else if (rhigh < -16384)
+ rhigh = -16384;
+
+ /* Block 2H, INVQAH */
+ wd2 = rh2[ihigh];
+ wd1 = (s->band[1].nb*127) >> 7;
+ wd1 += wh[wd2];
+ if (wd1 < 0)
+ wd1 = 0;
+ else if (wd1 > 22528)
+ wd1 = 22528;
+ s->band[1].nb = wd1;
+
+ /* Block 3H, SCALEH */
+ wd1 = (s->band[1].nb >> 6) & 31;
+ wd2 = 10 - (s->band[1].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[1].det = wd3 << 2;
+
+ block4(s, 1, dhigh);
+ }
+
+ if (s->itu_test_mode)
+ {
+ amp[outlen++] = (int16_t) (rlow << 1);
+ amp[outlen++] = (int16_t) (rhigh << 1);
+ }
+ else
+ {
+ if (s->eight_k)
+ {
+ amp[outlen++] = (int16_t) (rlow << 1);
+ }
+ else
+ {
+ /* Apply the receive QMF */
+ for (i = 0; i < 22; i++)
+ s->x[i] = s->x[i + 2];
+ s->x[22] = rlow + rhigh;
+ s->x[23] = rlow - rhigh;
+
+ xout1 = 0;
+ xout2 = 0;
+ for (i = 0; i < 12; i++)
+ {
+ xout2 += s->x[2*i]*qmf_coeffs[i];
+ xout1 += s->x[2*i + 1]*qmf_coeffs[11 - i];
+ }
+ /* We shift by 12 to allow for the QMF filters (DC gain = 4096), less 1
+ to allow for the 15 bit input to the G.722 algorithm. */
+ /* WebRtc, tlegrand: added saturation */
+ amp[outlen++] = saturate(xout1 >> 11);
+ amp[outlen++] = saturate(xout2 >> 11);
+ }
+ }
+ }
+ return outlen;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/third_party/libwebrtc/modules/third_party/g722/g722_enc_dec.h b/third_party/libwebrtc/modules/third_party/g722/g722_enc_dec.h
new file mode 100644
index 0000000000..898fa279cc
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/g722_enc_dec.h
@@ -0,0 +1,153 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722.h - The ITU G.722 codec.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based on a single channel G.722 codec which is:
+ *
+ ***** Copyright (c) CMU 1993 *****
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722.h,v 1.10 2006/06/16 12:45:53 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Changed to use WebRtc types
+ * -Added new defines for minimum and maximum values of short int
+ */
+
+/*! \file */
+
+#ifndef MODULES_THIRD_PARTY_G722_G722_H_
+#define MODULES_THIRD_PARTY_G722_G722_H_
+
+#include <stdint.h>
+
+/*! \page g722_page G.722 encoding and decoding
+\section g722_page_sec_1 What does it do?
+The G.722 module is a bit exact implementation of the ITU G.722 specification
+for all three specified bit rates - 64000bps, 56000bps and 48000bps. It passes
+the ITU tests.
+
+To allow fast and flexible interworking with narrow band telephony, the encoder
+and decoder support an option for the linear audio to be an 8k samples/second
+stream. In this mode the codec is considerably faster, and still fully
+compatible with wideband terminals using G.722.
+
+\section g722_page_sec_2 How does it work?
+???.
+*/
+
+#define WEBRTC_INT16_MAX 32767
+#define WEBRTC_INT16_MIN -32768
+
+enum { G722_SAMPLE_RATE_8000 = 0x0001, G722_PACKED = 0x0002 };
+
+typedef struct {
+ /*! TRUE if the operating in the special ITU test mode, with the band split
+ filters disabled. */
+ int itu_test_mode;
+ /*! TRUE if the G.722 data is packed */
+ int packed;
+ /*! TRUE if encode from 8k samples/second */
+ int eight_k;
+ /*! 6 for 48000kbps, 7 for 56000kbps, or 8 for 64000kbps. */
+ int bits_per_sample;
+
+ /*! Signal history for the QMF */
+ int x[24];
+
+ struct {
+ int s;
+ int sp;
+ int sz;
+ int r[3];
+ int a[3];
+ int ap[3];
+ int p[3];
+ int d[7];
+ int b[7];
+ int bp[7];
+ int sg[7];
+ int nb;
+ int det;
+ } band[2];
+
+ unsigned int in_buffer;
+ int in_bits;
+ unsigned int out_buffer;
+ int out_bits;
+} G722EncoderState;
+
+typedef struct {
+ /*! TRUE if the operating in the special ITU test mode, with the band split
+ filters disabled. */
+ int itu_test_mode;
+ /*! TRUE if the G.722 data is packed */
+ int packed;
+ /*! TRUE if decode to 8k samples/second */
+ int eight_k;
+ /*! 6 for 48000kbps, 7 for 56000kbps, or 8 for 64000kbps. */
+ int bits_per_sample;
+
+ /*! Signal history for the QMF */
+ int x[24];
+
+ struct {
+ int s;
+ int sp;
+ int sz;
+ int r[3];
+ int a[3];
+ int ap[3];
+ int p[3];
+ int d[7];
+ int b[7];
+ int bp[7];
+ int sg[7];
+ int nb;
+ int det;
+ } band[2];
+
+ unsigned int in_buffer;
+ int in_bits;
+ unsigned int out_buffer;
+ int out_bits;
+} G722DecoderState;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+G722EncoderState* WebRtc_g722_encode_init(G722EncoderState* s,
+ int rate,
+ int options);
+int WebRtc_g722_encode_release(G722EncoderState* s);
+size_t WebRtc_g722_encode(G722EncoderState* s,
+ uint8_t g722_data[],
+ const int16_t amp[],
+ size_t len);
+
+G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
+ int rate,
+ int options);
+int WebRtc_g722_decode_release(G722DecoderState* s);
+size_t WebRtc_g722_decode(G722DecoderState* s,
+ int16_t amp[],
+ const uint8_t g722_data[],
+ size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MODULES_THIRD_PARTY_G722_G722_H_ */
diff --git a/third_party/libwebrtc/modules/third_party/g722/g722_encode.c b/third_party/libwebrtc/modules/third_party/g722/g722_encode.c
new file mode 100644
index 0000000000..fedf9f5961
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/g722/g722_encode.c
@@ -0,0 +1,429 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_encode.c - The ITU G.722 codec, encode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * All rights reserved.
+ *
+ * Despite my general liking of the GPL, I place my own contributions
+ * to this code in the public domain for the benefit of all mankind -
+ * even the slimy ones who might try to proprietize my work and use it
+ * to my detriment.
+ *
+ * Based on a single channel 64kbps only G.722 codec which is:
+ *
+ ***** Copyright (c) CMU 1993 *****
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_encode.c,v 1.14 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Added option to run encoder bitexact with ITU-T reference implementation
+ */
+
+/*! \file */
+
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "modules/third_party/g722/g722_enc_dec.h"
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+ int16_t amp16;
+
+ /* Hopefully this is optimised for the common case - not clipping */
+ amp16 = (int16_t) amp;
+ if (amp == amp16)
+ return amp16;
+ if (amp > WEBRTC_INT16_MAX)
+ return WEBRTC_INT16_MAX;
+ return WEBRTC_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(G722EncoderState *s, int band, int d)
+{
+ int wd1;
+ int wd2;
+ int wd3;
+ int i;
+
+ /* Block 4, RECONS */
+ s->band[band].d[0] = d;
+ s->band[band].r[0] = saturate(s->band[band].s + d);
+
+ /* Block 4, PARREC */
+ s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+ /* Block 4, UPPOL2 */
+ for (i = 0; i < 3; i++)
+ s->band[band].sg[i] = s->band[band].p[i] >> 15;
+ wd1 = saturate(s->band[band].a[1] * 4);
+
+ wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
+ if (wd2 > 32767)
+ wd2 = 32767;
+ wd3 = (wd2 >> 7) + ((s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128);
+ wd3 += (s->band[band].a[2]*32512) >> 15;
+ if (wd3 > 12288)
+ wd3 = 12288;
+ else if (wd3 < -12288)
+ wd3 = -12288;
+ s->band[band].ap[2] = wd3;
+
+ /* Block 4, UPPOL1 */
+ s->band[band].sg[0] = s->band[band].p[0] >> 15;
+ s->band[band].sg[1] = s->band[band].p[1] >> 15;
+ wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
+ wd2 = (s->band[band].a[1]*32640) >> 15;
+
+ s->band[band].ap[1] = saturate(wd1 + wd2);
+ wd3 = saturate(15360 - s->band[band].ap[2]);
+ if (s->band[band].ap[1] > wd3)
+ s->band[band].ap[1] = wd3;
+ else if (s->band[band].ap[1] < -wd3)
+ s->band[band].ap[1] = -wd3;
+
+ /* Block 4, UPZERO */
+ wd1 = (d == 0) ? 0 : 128;
+ s->band[band].sg[0] = d >> 15;
+ for (i = 1; i < 7; i++)
+ {
+ s->band[band].sg[i] = s->band[band].d[i] >> 15;
+ wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
+ wd3 = (s->band[band].b[i]*32640) >> 15;
+ s->band[band].bp[i] = saturate(wd2 + wd3);
+ }
+
+ /* Block 4, DELAYA */
+ for (i = 6; i > 0; i--)
+ {
+ s->band[band].d[i] = s->band[band].d[i - 1];
+ s->band[band].b[i] = s->band[band].bp[i];
+ }
+
+ for (i = 2; i > 0; i--)
+ {
+ s->band[band].r[i] = s->band[band].r[i - 1];
+ s->band[band].p[i] = s->band[band].p[i - 1];
+ s->band[band].a[i] = s->band[band].ap[i];
+ }
+
+ /* Block 4, FILTEP */
+ wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+ wd1 = (s->band[band].a[1]*wd1) >> 15;
+ wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+ wd2 = (s->band[band].a[2]*wd2) >> 15;
+ s->band[band].sp = saturate(wd1 + wd2);
+
+ /* Block 4, FILTEZ */
+ s->band[band].sz = 0;
+ for (i = 6; i > 0; i--)
+ {
+ wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+ s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+ }
+ s->band[band].sz = saturate(s->band[band].sz);
+
+ /* Block 4, PREDIC */
+ s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+G722EncoderState* WebRtc_g722_encode_init(G722EncoderState* s,
+ int rate,
+ int options) {
+ if (s == NULL)
+ {
+ if ((s = (G722EncoderState *) malloc(sizeof(*s))) == NULL)
+ return NULL;
+ }
+ memset(s, 0, sizeof(*s));
+ if (rate == 48000)
+ s->bits_per_sample = 6;
+ else if (rate == 56000)
+ s->bits_per_sample = 7;
+ else
+ s->bits_per_sample = 8;
+ if ((options & G722_SAMPLE_RATE_8000))
+ s->eight_k = TRUE;
+ if ((options & G722_PACKED) && s->bits_per_sample != 8)
+ s->packed = TRUE;
+ else
+ s->packed = FALSE;
+ s->band[0].det = 32;
+ s->band[1].det = 8;
+ return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int WebRtc_g722_encode_release(G722EncoderState *s)
+{
+ free(s);
+ return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+/* WebRtc, tlegrand:
+ * Only define the following if bit-exactness with reference implementation
+ * is needed. Will only have any effect if input signal is saturated.
+ */
+//#define RUN_LIKE_REFERENCE_G722
+#ifdef RUN_LIKE_REFERENCE_G722
+int16_t limitValues (int16_t rl)
+{
+
+ int16_t yl;
+
+ yl = (rl > 16383) ? 16383 : ((rl < -16384) ? -16384 : rl);
+
+ return (yl);
+}
+#endif
+
+size_t WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
+ const int16_t amp[], size_t len)
+{
+ static const int q6[32] =
+ {
+ 0, 35, 72, 110, 150, 190, 233, 276,
+ 323, 370, 422, 473, 530, 587, 650, 714,
+ 786, 858, 940, 1023, 1121, 1219, 1339, 1458,
+ 1612, 1765, 1980, 2195, 2557, 2919, 0, 0
+ };
+ static const int iln[32] =
+ {
+ 0, 63, 62, 31, 30, 29, 28, 27,
+ 26, 25, 24, 23, 22, 21, 20, 19,
+ 18, 17, 16, 15, 14, 13, 12, 11,
+ 10, 9, 8, 7, 6, 5, 4, 0
+ };
+ static const int ilp[32] =
+ {
+ 0, 61, 60, 59, 58, 57, 56, 55,
+ 54, 53, 52, 51, 50, 49, 48, 47,
+ 46, 45, 44, 43, 42, 41, 40, 39,
+ 38, 37, 36, 35, 34, 33, 32, 0
+ };
+ static const int wl[8] =
+ {
+ -60, -30, 58, 172, 334, 538, 1198, 3042
+ };
+ static const int rl42[16] =
+ {
+ 0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ static const int ilb[32] =
+ {
+ 2048, 2093, 2139, 2186, 2233, 2282, 2332,
+ 2383, 2435, 2489, 2543, 2599, 2656, 2714,
+ 2774, 2834, 2896, 2960, 3025, 3091, 3158,
+ 3228, 3298, 3371, 3444, 3520, 3597, 3676,
+ 3756, 3838, 3922, 4008
+ };
+ static const int qm4[16] =
+ {
+ 0, -20456, -12896, -8968,
+ -6288, -4240, -2584, -1200,
+ 20456, 12896, 8968, 6288,
+ 4240, 2584, 1200, 0
+ };
+ static const int qm2[4] =
+ {
+ -7408, -1616, 7408, 1616
+ };
+ static const int qmf_coeffs[12] =
+ {
+ 3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
+ };
+ static const int ihn[3] = {0, 1, 0};
+ static const int ihp[3] = {0, 3, 2};
+ static const int wh[3] = {0, -214, 798};
+ static const int rh2[4] = {2, 1, 2, 1};
+
+ int dlow;
+ int dhigh;
+ int el;
+ int wd;
+ int wd1;
+ int ril;
+ int wd2;
+ int il4;
+ int ih2;
+ int wd3;
+ int eh;
+ int mih;
+ int i;
+ size_t j;
+ /* Low and high band PCM from the QMF */
+ int xlow;
+ int xhigh;
+ size_t g722_bytes;
+ /* Even and odd tap accumulators */
+ int sumeven;
+ int sumodd;
+ int ihigh;
+ int ilow;
+ int code;
+
+ g722_bytes = 0;
+ xhigh = 0;
+ for (j = 0; j < len; )
+ {
+ if (s->itu_test_mode)
+ {
+ xlow =
+ xhigh = amp[j++] >> 1;
+ }
+ else
+ {
+ if (s->eight_k)
+ {
+ /* We shift by 1 to allow for the 15 bit input to the G.722 algorithm. */
+ xlow = amp[j++] >> 1;
+ }
+ else
+ {
+ /* Apply the transmit QMF */
+ /* Shuffle the buffer down */
+ for (i = 0; i < 22; i++)
+ s->x[i] = s->x[i + 2];
+ s->x[22] = amp[j++];
+ s->x[23] = amp[j++];
+
+ /* Discard every other QMF output */
+ sumeven = 0;
+ sumodd = 0;
+ for (i = 0; i < 12; i++)
+ {
+ sumodd += s->x[2*i]*qmf_coeffs[i];
+ sumeven += s->x[2*i + 1]*qmf_coeffs[11 - i];
+ }
+ /* We shift by 12 to allow for the QMF filters (DC gain = 4096), plus 1
+ to allow for us summing two filters, plus 1 to allow for the 15 bit
+ input to the G.722 algorithm. */
+ xlow = (sumeven + sumodd) >> 14;
+ xhigh = (sumeven - sumodd) >> 14;
+
+#ifdef RUN_LIKE_REFERENCE_G722
+ /* The following lines are only used to verify bit-exactness
+ * with reference implementation of G.722. Higher precision
+ * is achieved without limiting the values.
+ */
+ xlow = limitValues(xlow);
+ xhigh = limitValues(xhigh);
+#endif
+ }
+ }
+ /* Block 1L, SUBTRA */
+ el = saturate(xlow - s->band[0].s);
+
+ /* Block 1L, QUANTL */
+ wd = (el >= 0) ? el : -(el + 1);
+
+ for (i = 1; i < 30; i++)
+ {
+ wd1 = (q6[i]*s->band[0].det) >> 12;
+ if (wd < wd1)
+ break;
+ }
+ ilow = (el < 0) ? iln[i] : ilp[i];
+
+ /* Block 2L, INVQAL */
+ ril = ilow >> 2;
+ wd2 = qm4[ril];
+ dlow = (s->band[0].det*wd2) >> 15;
+
+ /* Block 3L, LOGSCL */
+ il4 = rl42[ril];
+ wd = (s->band[0].nb*127) >> 7;
+ s->band[0].nb = wd + wl[il4];
+ if (s->band[0].nb < 0)
+ s->band[0].nb = 0;
+ else if (s->band[0].nb > 18432)
+ s->band[0].nb = 18432;
+
+ /* Block 3L, SCALEL */
+ wd1 = (s->band[0].nb >> 6) & 31;
+ wd2 = 8 - (s->band[0].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[0].det = wd3 << 2;
+
+ block4(s, 0, dlow);
+
+ if (s->eight_k)
+ {
+ /* Just leave the high bits as zero */
+ code = (0xC0 | ilow) >> (8 - s->bits_per_sample);
+ }
+ else
+ {
+ /* Block 1H, SUBTRA */
+ eh = saturate(xhigh - s->band[1].s);
+
+ /* Block 1H, QUANTH */
+ wd = (eh >= 0) ? eh : -(eh + 1);
+ wd1 = (564*s->band[1].det) >> 12;
+ mih = (wd >= wd1) ? 2 : 1;
+ ihigh = (eh < 0) ? ihn[mih] : ihp[mih];
+
+ /* Block 2H, INVQAH */
+ wd2 = qm2[ihigh];
+ dhigh = (s->band[1].det*wd2) >> 15;
+
+ /* Block 3H, LOGSCH */
+ ih2 = rh2[ihigh];
+ wd = (s->band[1].nb*127) >> 7;
+ s->band[1].nb = wd + wh[ih2];
+ if (s->band[1].nb < 0)
+ s->band[1].nb = 0;
+ else if (s->band[1].nb > 22528)
+ s->band[1].nb = 22528;
+
+ /* Block 3H, SCALEH */
+ wd1 = (s->band[1].nb >> 6) & 31;
+ wd2 = 10 - (s->band[1].nb >> 11);
+ wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
+ s->band[1].det = wd3 << 2;
+
+ block4(s, 1, dhigh);
+ code = ((ihigh << 6) | ilow) >> (8 - s->bits_per_sample);
+ }
+
+ if (s->packed)
+ {
+ /* Pack the code bits */
+ s->out_buffer |= (code << s->out_bits);
+ s->out_bits += s->bits_per_sample;
+ if (s->out_bits >= 8)
+ {
+ g722_data[g722_bytes++] = (uint8_t) (s->out_buffer & 0xFF);
+ s->out_bits -= 8;
+ s->out_buffer >>= 8;
+ }
+ }
+ else
+ {
+ g722_data[g722_bytes++] = (uint8_t) code;
+ }
+ }
+ return g722_bytes;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/third_party/libwebrtc/modules/third_party/portaudio/BUILD.gn b/third_party/libwebrtc/modules/third_party/portaudio/BUILD.gn
new file mode 100644
index 0000000000..c49c544e9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/portaudio/BUILD.gn
@@ -0,0 +1,18 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the ../../../LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("mac_portaudio") {
+ visibility = [ "../../audio_device:*" ]
+ sources = [
+ "pa_memorybarrier.h",
+ "pa_ringbuffer.c",
+ "pa_ringbuffer.h",
+ ]
+}
diff --git a/third_party/libwebrtc/modules/third_party/portaudio/LICENSE b/third_party/libwebrtc/modules/third_party/portaudio/LICENSE
new file mode 100644
index 0000000000..6ccaca2910
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/portaudio/LICENSE
@@ -0,0 +1,91 @@
+/*
+ * $Id: pa_memorybarrier.h 1240 2007-07-17 13:05:07Z bjornroche $
+ * Portable Audio I/O Library
+ * Memory barrier utilities
+ *
+ * Author: Bjorn Roche, XO Audio, LLC
+ *
+ * This program uses the PortAudio Portable Audio Library.
+ * For more information see: http://www.portaudio.com
+ * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The text above constitutes the entire PortAudio license; however,
+ * the PortAudio community also makes the following non-binding requests:
+ *
+ * Any person wishing to distribute modifications to the Software is
+ * requested to send the modifications to the original developer so that
+ * they can be incorporated into the canonical version. It is also
+ * requested that these non-binding requests be included along with the
+ * license above.
+ */
+
+/*
+ * $Id: pa_ringbuffer.c 1421 2009-11-18 16:09:05Z bjornroche $
+ * Portable Audio I/O Library
+ * Ring Buffer utility.
+ *
+ * Author: Phil Burk, http://www.softsynth.com
+ * modified for SMP safety on Mac OS X by Bjorn Roche
+ * modified for SMP safety on Linux by Leland Lucius
+ * also, allowed for const where possible
+ * modified for multiple-byte-sized data elements by Sven Fischer
+ *
+ * Note that this is safe only for a single-thread reader and a
+ * single-thread writer.
+ *
+ * This program uses the PortAudio Portable Audio Library.
+ * For more information see: http://www.portaudio.com
+ * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The text above constitutes the entire PortAudio license; however,
+ * the PortAudio community also makes the following non-binding requests:
+ *
+ * Any person wishing to distribute modifications to the Software is
+ * requested to send the modifications to the original developer so that
+ * they can be incorporated into the canonical version. It is also
+ * requested that these non-binding requests be included along with the
+ * license above.
+ */
+
diff --git a/third_party/libwebrtc/modules/third_party/portaudio/README.chromium b/third_party/libwebrtc/modules/third_party/portaudio/README.chromium
new file mode 100644
index 0000000000..722dd94345
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/portaudio/README.chromium
@@ -0,0 +1,14 @@
+Name: Portaudio library for mac
+Short Name: portaudio
+URL: https://github.com/PortAudio/portaudio/tree/master/src/common
+Version: 9d8563100d841300f1689b186d131347ad43a0f6
+Date: 2022-04-12
+License: Custom license
+License File: LICENSE
+Security Critical: yes
+
+Description:
+Part of portaudio library to operate with memory barriers and ring buffer.
+
+Local changes:
+ - Minor formatting to make 'git cl format' happy.
diff --git a/third_party/libwebrtc/modules/third_party/portaudio/pa_memorybarrier.h b/third_party/libwebrtc/modules/third_party/portaudio/pa_memorybarrier.h
new file mode 100644
index 0000000000..f8c1852f4e
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/portaudio/pa_memorybarrier.h
@@ -0,0 +1,144 @@
+/*
+ * $Id: pa_memorybarrier.h 1240 2007-07-17 13:05:07Z bjornroche $
+ * Portable Audio I/O Library
+ * Memory barrier utilities
+ *
+ * Author: Bjorn Roche, XO Audio, LLC
+ *
+ * This program uses the PortAudio Portable Audio Library.
+ * For more information see: http://www.portaudio.com
+ * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The text above constitutes the entire PortAudio license; however,
+ * the PortAudio community also makes the following non-binding requests:
+ *
+ * Any person wishing to distribute modifications to the Software is
+ * requested to send the modifications to the original developer so that
+ * they can be incorporated into the canonical version. It is also
+ * requested that these non-binding requests be included along with the
+ * license above.
+ */
+
+/**
+ @file pa_memorybarrier.h
+ @ingroup common_src
+*/
+
+/****************
+ * Some memory barrier primitives based on the system.
+ * right now only OS X, FreeBSD, and Linux are supported. In addition to
+ *providing memory barriers, these functions should ensure that data cached in
+ *registers is written out to cache where it can be snooped by other CPUs. (ie,
+ *the volatile keyword should not be required)
+ *
+ * the primitives that must be defined are:
+ *
+ * PaUtil_FullMemoryBarrier()
+ * PaUtil_ReadMemoryBarrier()
+ * PaUtil_WriteMemoryBarrier()
+ *
+ ****************/
+
+#ifndef MODULES_THIRD_PARTY_PORTAUDIO_PA_MEMORYBARRIER_H_
+#define MODULES_THIRD_PARTY_PORTAUDIO_PA_MEMORYBARRIER_H_
+
+#if defined(__APPLE__)
+/* Support for the atomic library was added in C11.
+ */
+#if (__STDC_VERSION__ < 201112L) || defined(__STDC_NO_ATOMICS__)
+#include <libkern/OSAtomic.h>
+/* Here are the memory barrier functions. Mac OS X only provides
+ full memory barriers, so the three types of barriers are the same,
+ however, these barriers are superior to compiler-based ones.
+ These were deprecated in MacOS 10.12. */
+#define PaUtil_FullMemoryBarrier() OSMemoryBarrier()
+#define PaUtil_ReadMemoryBarrier() OSMemoryBarrier()
+#define PaUtil_WriteMemoryBarrier() OSMemoryBarrier()
+#else
+#include <stdatomic.h>
+#define PaUtil_FullMemoryBarrier() atomic_thread_fence(memory_order_seq_cst)
+#define PaUtil_ReadMemoryBarrier() atomic_thread_fence(memory_order_acquire)
+#define PaUtil_WriteMemoryBarrier() atomic_thread_fence(memory_order_release)
+#endif
+#elif defined(__GNUC__)
+/* GCC >= 4.1 has built-in intrinsics. We'll use those */
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
+#define PaUtil_FullMemoryBarrier() __sync_synchronize()
+#define PaUtil_ReadMemoryBarrier() __sync_synchronize()
+#define PaUtil_WriteMemoryBarrier() __sync_synchronize()
+/* as a fallback, GCC understands volatile asm and "memory" to mean it
+ * should not reorder memory read/writes */
+/* Note that it is not clear that any compiler actually defines __PPC__,
+ * it can probably removed safely. */
+#elif defined(__ppc__) || defined(__powerpc__) || defined(__PPC__)
+#define PaUtil_FullMemoryBarrier() asm volatile("sync" ::: "memory")
+#define PaUtil_ReadMemoryBarrier() asm volatile("sync" ::: "memory")
+#define PaUtil_WriteMemoryBarrier() asm volatile("sync" ::: "memory")
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || \
+ defined(__i686__) || defined(__x86_64__)
+#define PaUtil_FullMemoryBarrier() asm volatile("mfence" ::: "memory")
+#define PaUtil_ReadMemoryBarrier() asm volatile("lfence" ::: "memory")
+#define PaUtil_WriteMemoryBarrier() asm volatile("sfence" ::: "memory")
+#else
+#ifdef ALLOW_SMP_DANGERS
+#warning Memory barriers not defined on this system or system unknown
+#warning For SMP safety, you should fix this.
+#define PaUtil_FullMemoryBarrier()
+#define PaUtil_ReadMemoryBarrier()
+#define PaUtil_WriteMemoryBarrier()
+#else
+# error Memory barriers are not defined on this system. You can still compile by defining ALLOW_SMP_DANGERS, but SMP safety will not be guaranteed.
+#endif
+#endif
+#elif (_MSC_VER >= 1400) && !defined(_WIN32_WCE)
+#include <intrin.h>
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_ReadBarrier)
+#pragma intrinsic(_WriteBarrier)
+/* note that MSVC intrinsics _ReadWriteBarrier(), _ReadBarrier(),
+ * _WriteBarrier() are just compiler barriers *not* memory barriers */
+#define PaUtil_FullMemoryBarrier() _ReadWriteBarrier()
+#define PaUtil_ReadMemoryBarrier() _ReadBarrier()
+#define PaUtil_WriteMemoryBarrier() _WriteBarrier()
+#elif defined(_WIN32_WCE)
+#define PaUtil_FullMemoryBarrier()
+#define PaUtil_ReadMemoryBarrier()
+#define PaUtil_WriteMemoryBarrier()
+#elif defined(_MSC_VER) || defined(__BORLANDC__)
+#define PaUtil_FullMemoryBarrier() _asm { lock add [esp], 0}
+#define PaUtil_ReadMemoryBarrier() _asm { lock add [esp], 0}
+#define PaUtil_WriteMemoryBarrier() _asm { lock add [esp], 0}
+#else
+#ifdef ALLOW_SMP_DANGERS
+#warning Memory barriers not defined on this system or system unknown
+#warning For SMP safety, you should fix this.
+#define PaUtil_FullMemoryBarrier()
+#define PaUtil_ReadMemoryBarrier()
+#define PaUtil_WriteMemoryBarrier()
+#else
+# error Memory barriers are not defined on this system. You can still compile by defining ALLOW_SMP_DANGERS, but SMP safety will not be guaranteed.
+#endif
+#endif
+
+#endif /* MODULES_THIRD_PARTY_PORTAUDIO_PA_MEMORYBARRIER_H_ */
diff --git a/third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.c b/third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.c
new file mode 100644
index 0000000000..b978d54f19
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.c
@@ -0,0 +1,237 @@
+/*
+ * $Id$
+ * Portable Audio I/O Library
+ * Ring Buffer utility.
+ *
+ * Author: Phil Burk, http://www.softsynth.com
+ * modified for SMP safety on Mac OS X by Bjorn Roche
+ * modified for SMP safety on Linux by Leland Lucius
+ * also, allowed for const where possible
+ * modified for multiple-byte-sized data elements by Sven Fischer
+ *
+ * Note that this is safe only for a single-thread reader and a
+ * single-thread writer.
+ *
+ * This program uses the PortAudio Portable Audio Library.
+ * For more information see: http://www.portaudio.com
+ * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The text above constitutes the entire PortAudio license; however,
+ * the PortAudio community also makes the following non-binding requests:
+ *
+ * Any person wishing to distribute modifications to the Software is
+ * requested to send the modifications to the original developer so that
+ * they can be incorporated into the canonical version. It is also
+ * requested that these non-binding requests be included along with the
+ * license above.
+ */
+
+/**
+ @file
+ @ingroup common_src
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include "pa_ringbuffer.h"
+#include <string.h>
+#include "pa_memorybarrier.h"
+
+/***************************************************************************
+ * Initialize FIFO.
+ * elementCount must be power of 2, returns -1 if not.
+ */
+ring_buffer_size_t PaUtil_InitializeRingBuffer( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementSizeBytes, ring_buffer_size_t elementCount, void *dataPtr )
+{
+ if( ((elementCount-1) & elementCount) != 0) return -1; /* Not Power of two. */
+ rbuf->bufferSize = elementCount;
+ rbuf->buffer = (char *)dataPtr;
+ PaUtil_FlushRingBuffer( rbuf );
+ rbuf->bigMask = (elementCount*2)-1;
+ rbuf->smallMask = (elementCount)-1;
+ rbuf->elementSizeBytes = elementSizeBytes;
+ return 0;
+}
+
+/***************************************************************************
+** Return number of elements available for reading. */
+ring_buffer_size_t PaUtil_GetRingBufferReadAvailable( const PaUtilRingBuffer *rbuf )
+{
+ return ( (rbuf->writeIndex - rbuf->readIndex) & rbuf->bigMask );
+}
+/***************************************************************************
+** Return number of elements available for writing. */
+ring_buffer_size_t PaUtil_GetRingBufferWriteAvailable( const PaUtilRingBuffer *rbuf )
+{
+ return ( rbuf->bufferSize - PaUtil_GetRingBufferReadAvailable(rbuf));
+}
+
+/***************************************************************************
+** Clear buffer. Should only be called when buffer is NOT being read or written. */
+void PaUtil_FlushRingBuffer( PaUtilRingBuffer *rbuf )
+{
+ rbuf->writeIndex = rbuf->readIndex = 0;
+}
+
+/***************************************************************************
+** Get address of region(s) to which we can write data.
+** If the region is contiguous, size2 will be zero.
+** If non-contiguous, size2 will be the size of second region.
+** Returns room available to be written or elementCount, whichever is smaller.
+*/
+ring_buffer_size_t PaUtil_GetRingBufferWriteRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount,
+ void **dataPtr1, ring_buffer_size_t *sizePtr1,
+ void **dataPtr2, ring_buffer_size_t *sizePtr2 )
+{
+ ring_buffer_size_t index;
+ ring_buffer_size_t available = PaUtil_GetRingBufferWriteAvailable( rbuf );
+ if( elementCount > available ) elementCount = available;
+ /* Check to see if write is not contiguous. */
+ index = rbuf->writeIndex & rbuf->smallMask;
+ if( (index + elementCount) > rbuf->bufferSize )
+ {
+ /* Write data in two blocks that wrap the buffer. */
+ ring_buffer_size_t firstHalf = rbuf->bufferSize - index;
+ *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes];
+ *sizePtr1 = firstHalf;
+ *dataPtr2 = &rbuf->buffer[0];
+ *sizePtr2 = elementCount - firstHalf;
+ }
+ else
+ {
+ *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes];
+ *sizePtr1 = elementCount;
+ *dataPtr2 = NULL;
+ *sizePtr2 = 0;
+ }
+
+ if( available )
+ PaUtil_FullMemoryBarrier(); /* (write-after-read) => full barrier */
+
+ return elementCount;
+}
+
+
+/***************************************************************************
+*/
+ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount )
+{
+ /* ensure that previous writes are seen before we update the write index
+ (write after write)
+ */
+ PaUtil_WriteMemoryBarrier();
+ return rbuf->writeIndex = (rbuf->writeIndex + elementCount) & rbuf->bigMask;
+}
+
+/***************************************************************************
+** Get address of region(s) from which we can read data.
+** If the region is contiguous, size2 will be zero.
+** If non-contiguous, size2 will be the size of second region.
+** Returns room available to be read or elementCount, whichever is smaller.
+*/
+ring_buffer_size_t PaUtil_GetRingBufferReadRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount,
+ void **dataPtr1, ring_buffer_size_t *sizePtr1,
+ void **dataPtr2, ring_buffer_size_t *sizePtr2 )
+{
+ ring_buffer_size_t index;
+ ring_buffer_size_t available = PaUtil_GetRingBufferReadAvailable( rbuf ); /* doesn't use memory barrier */
+ if( elementCount > available ) elementCount = available;
+ /* Check to see if read is not contiguous. */
+ index = rbuf->readIndex & rbuf->smallMask;
+ if( (index + elementCount) > rbuf->bufferSize )
+ {
+ /* Write data in two blocks that wrap the buffer. */
+ ring_buffer_size_t firstHalf = rbuf->bufferSize - index;
+ *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes];
+ *sizePtr1 = firstHalf;
+ *dataPtr2 = &rbuf->buffer[0];
+ *sizePtr2 = elementCount - firstHalf;
+ }
+ else
+ {
+ *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes];
+ *sizePtr1 = elementCount;
+ *dataPtr2 = NULL;
+ *sizePtr2 = 0;
+ }
+
+ if( available )
+ PaUtil_ReadMemoryBarrier(); /* (read-after-read) => read barrier */
+
+ return elementCount;
+}
+/***************************************************************************
+*/
+ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount )
+{
+ /* ensure that previous reads (copies out of the ring buffer) are always completed before updating (writing) the read index.
+ (write-after-read) => full barrier
+ */
+ PaUtil_FullMemoryBarrier();
+ return rbuf->readIndex = (rbuf->readIndex + elementCount) & rbuf->bigMask;
+}
+
+/***************************************************************************
+** Return elements written. */
+ring_buffer_size_t PaUtil_WriteRingBuffer( PaUtilRingBuffer *rbuf, const void *data, ring_buffer_size_t elementCount )
+{
+ ring_buffer_size_t size1, size2, numWritten;
+ void *data1, *data2;
+ numWritten = PaUtil_GetRingBufferWriteRegions( rbuf, elementCount, &data1, &size1, &data2, &size2 );
+ if( size2 > 0 )
+ {
+
+ memcpy( data1, data, size1*rbuf->elementSizeBytes );
+ data = ((char *)data) + size1*rbuf->elementSizeBytes;
+ memcpy( data2, data, size2*rbuf->elementSizeBytes );
+ }
+ else
+ {
+ memcpy( data1, data, size1*rbuf->elementSizeBytes );
+ }
+ PaUtil_AdvanceRingBufferWriteIndex( rbuf, numWritten );
+ return numWritten;
+}
+
+/***************************************************************************
+** Return elements read. */
+ring_buffer_size_t PaUtil_ReadRingBuffer( PaUtilRingBuffer *rbuf, void *data, ring_buffer_size_t elementCount )
+{
+ ring_buffer_size_t size1, size2, numRead;
+ void *data1, *data2;
+ numRead = PaUtil_GetRingBufferReadRegions( rbuf, elementCount, &data1, &size1, &data2, &size2 );
+ if( size2 > 0 )
+ {
+ memcpy( data, data1, size1*rbuf->elementSizeBytes );
+ data = ((char *)data) + size1*rbuf->elementSizeBytes;
+ memcpy( data, data2, size2*rbuf->elementSizeBytes );
+ }
+ else
+ {
+ memcpy( data, data1, size1*rbuf->elementSizeBytes );
+ }
+ PaUtil_AdvanceRingBufferReadIndex( rbuf, numRead );
+ return numRead;
+}
diff --git a/third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.h b/third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.h
new file mode 100644
index 0000000000..aa9d0aa772
--- /dev/null
+++ b/third_party/libwebrtc/modules/third_party/portaudio/pa_ringbuffer.h
@@ -0,0 +1,263 @@
+#ifndef MODULES_THIRD_PARTY_PORTAUDIO_PA_RINGBUFFER_H_
+#define MODULES_THIRD_PARTY_PORTAUDIO_PA_RINGBUFFER_H_
+/*
+ * $Id$
+ * Portable Audio I/O Library
+ * Ring Buffer utility.
+ *
+ * Author: Phil Burk, http://www.softsynth.com
+ * modified for SMP safety on OS X by Bjorn Roche.
+ * also allowed for const where possible.
+ * modified for multiple-byte-sized data elements by Sven Fischer
+ *
+ * Note that this is safe only for a single-thread reader
+ * and a single-thread writer.
+ *
+ * This program is distributed with the PortAudio Portable Audio Library.
+ * For more information see: http://www.portaudio.com
+ * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The text above constitutes the entire PortAudio license; however,
+ * the PortAudio community also makes the following non-binding requests:
+ *
+ * Any person wishing to distribute modifications to the Software is
+ * requested to send the modifications to the original developer so that
+ * they can be incorporated into the canonical version. It is also
+ * requested that these non-binding requests be included along with the
+ * license above.
+ */
+
+/** @file
+ @ingroup common_src
+ @brief Single-reader single-writer lock-free ring buffer
+
+ PaUtilRingBuffer is a ring buffer used to transport samples between
+ different execution contexts (threads, OS callbacks, interrupt handlers)
+ without requiring the use of any locks. This only works when there is
+ a single reader and a single writer (ie. one thread or callback writes
+ to the ring buffer, another thread or callback reads from it).
+
+ The PaUtilRingBuffer structure manages a ring buffer containing N
+ elements, where N must be a power of two. An element may be any size
+ (specified in bytes).
+
+ The memory area used to store the buffer elements must be allocated by
+ the client prior to calling PaUtil_InitializeRingBuffer() and must outlive
+ the use of the ring buffer.
+
+ @note The ring buffer functions are not normally exposed in the PortAudio
+ libraries. If you want to call them then you will need to add pa_ringbuffer.c
+ to your application source code.
+*/
+
+#if defined(__APPLE__)
+#include <sys/types.h>
+typedef int32_t ring_buffer_size_t;
+#elif defined(__GNUC__)
+typedef long ring_buffer_size_t;
+#elif (_MSC_VER >= 1400)
+typedef long ring_buffer_size_t;
+#elif defined(_MSC_VER) || defined(__BORLANDC__)
+typedef long ring_buffer_size_t;
+#else
+typedef long ring_buffer_size_t;
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef struct PaUtilRingBuffer {
+ ring_buffer_size_t bufferSize; /**< Number of elements in FIFO. Power of 2.
+ Set by PaUtil_InitRingBuffer. */
+ volatile ring_buffer_size_t
+ writeIndex; /**< Index of next writable element. Set by
+ PaUtil_AdvanceRingBufferWriteIndex. */
+ volatile ring_buffer_size_t
+ readIndex; /**< Index of next readable element. Set by
+ PaUtil_AdvanceRingBufferReadIndex. */
+ ring_buffer_size_t bigMask; /**< Used for wrapping indices with extra bit to
+ distinguish full/empty. */
+ ring_buffer_size_t smallMask; /**< Used for fitting indices to buffer. */
+ ring_buffer_size_t elementSizeBytes; /**< Number of bytes per element. */
+ char* buffer; /**< Pointer to the buffer containing the actual data. */
+} PaUtilRingBuffer;
+
+/** Initialize Ring Buffer to empty state ready to have elements written to it.
+
+ @param rbuf The ring buffer.
+
+ @param elementSizeBytes The size of a single data element in bytes.
+
+ @param elementCount The number of elements in the buffer (must be a power of
+ 2).
+
+ @param dataPtr A pointer to a previously allocated area where the data
+ will be maintained. It must be elementCount*elementSizeBytes long.
+
+ @return -1 if elementCount is not a power of 2, otherwise 0.
+*/
+ring_buffer_size_t PaUtil_InitializeRingBuffer(
+ PaUtilRingBuffer* rbuf,
+ ring_buffer_size_t elementSizeBytes,
+ ring_buffer_size_t elementCount,
+ void* dataPtr);
+
+/** Reset buffer to empty. Should only be called when buffer is NOT being read
+ or written.
+
+ @param rbuf The ring buffer.
+*/
+void PaUtil_FlushRingBuffer(PaUtilRingBuffer* rbuf);
+
+/** Retrieve the number of elements available in the ring buffer for writing.
+
+ @param rbuf The ring buffer.
+
+ @return The number of elements available for writing.
+*/
+ring_buffer_size_t PaUtil_GetRingBufferWriteAvailable(
+ const PaUtilRingBuffer* rbuf);
+
+/** Retrieve the number of elements available in the ring buffer for reading.
+
+ @param rbuf The ring buffer.
+
+ @return The number of elements available for reading.
+*/
+ring_buffer_size_t PaUtil_GetRingBufferReadAvailable(
+ const PaUtilRingBuffer* rbuf);
+
+/** Write data to the ring buffer.
+
+ @param rbuf The ring buffer.
+
+ @param data The address of new data to write to the buffer.
+
+ @param elementCount The number of elements to be written.
+
+ @return The number of elements written.
+*/
+ring_buffer_size_t PaUtil_WriteRingBuffer(PaUtilRingBuffer* rbuf,
+ const void* data,
+ ring_buffer_size_t elementCount);
+
+/** Read data from the ring buffer.
+
+ @param rbuf The ring buffer.
+
+ @param data The address where the data should be stored.
+
+ @param elementCount The number of elements to be read.
+
+ @return The number of elements read.
+*/
+ring_buffer_size_t PaUtil_ReadRingBuffer(PaUtilRingBuffer* rbuf,
+ void* data,
+ ring_buffer_size_t elementCount);
+
+/** Get address of region(s) to which we can write data.
+
+ @param rbuf The ring buffer.
+
+ @param elementCount The number of elements desired.
+
+ @param dataPtr1 The address where the first (or only) region pointer will be
+ stored.
+
+ @param sizePtr1 The address where the first (or only) region length will be
+ stored.
+
+ @param dataPtr2 The address where the second region pointer will be stored if
+ the first region is too small to satisfy elementCount.
+
+ @param sizePtr2 The address where the second region length will be stored if
+ the first region is too small to satisfy elementCount.
+
+ @return The room available to be written or elementCount, whichever is smaller.
+*/
+ring_buffer_size_t PaUtil_GetRingBufferWriteRegions(
+ PaUtilRingBuffer* rbuf,
+ ring_buffer_size_t elementCount,
+ void** dataPtr1,
+ ring_buffer_size_t* sizePtr1,
+ void** dataPtr2,
+ ring_buffer_size_t* sizePtr2);
+
+/** Advance the write index to the next location to be written.
+
+ @param rbuf The ring buffer.
+
+ @param elementCount The number of elements to advance.
+
+ @return The new position.
+*/
+ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex(
+ PaUtilRingBuffer* rbuf,
+ ring_buffer_size_t elementCount);
+
+/** Get address of region(s) from which we can read data.
+
+ @param rbuf The ring buffer.
+
+ @param elementCount The number of elements desired.
+
+ @param dataPtr1 The address where the first (or only) region pointer will be
+ stored.
+
+ @param sizePtr1 The address where the first (or only) region length will be
+ stored.
+
+ @param dataPtr2 The address where the second region pointer will be stored if
+ the first region is too small to satisfy elementCount.
+
+ @param sizePtr2 The address where the second region length will be stored if
+ the first region is too small to satisfy elementCount.
+
+ @return The number of elements available for reading.
+*/
+ring_buffer_size_t PaUtil_GetRingBufferReadRegions(
+ PaUtilRingBuffer* rbuf,
+ ring_buffer_size_t elementCount,
+ void** dataPtr1,
+ ring_buffer_size_t* sizePtr1,
+ void** dataPtr2,
+ ring_buffer_size_t* sizePtr2);
+
+/** Advance the read index to the next location to be read.
+
+ @param rbuf The ring buffer.
+
+ @param elementCount The number of elements to advance.
+
+ @return The new position.
+*/
+ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex(
+ PaUtilRingBuffer* rbuf,
+ ring_buffer_size_t elementCount);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* MODULES_THIRD_PARTY_PORTAUDIO_PA_RINGBUFFER_H_ */
diff --git a/third_party/libwebrtc/modules/utility/BUILD.gn b/third_party/libwebrtc/modules/utility/BUILD.gn
new file mode 100644
index 0000000000..2b560943a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+if (is_android) {
+ rtc_library("utility") {
+ visibility = [ "*" ]
+
+ sources = [
+ "include/helpers_android.h",
+ "include/jvm_android.h",
+ "source/helpers_android.cc",
+ "source/jvm_android.cc",
+ ]
+
+ deps = [
+ "../../api:sequence_checker",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base/system:arch",
+ ]
+ }
+} else {
+ # Add an empty source set so that dependent targets may include utility
+ # unconditionally.
+ rtc_source_set("utility") {
+ visibility = [ "*" ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/utility/DEPS b/third_party/libwebrtc/modules/utility/DEPS
new file mode 100644
index 0000000000..e9017d6b27
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "+common_audio",
+ "+common_video",
+ "+system_wrappers",
+]
diff --git a/third_party/libwebrtc/modules/utility/OWNERS b/third_party/libwebrtc/modules/utility/OWNERS
new file mode 100644
index 0000000000..5904b95df7
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/OWNERS
@@ -0,0 +1 @@
+perkj@webrtc.org
diff --git a/third_party/libwebrtc/modules/utility/include/helpers_android.h b/third_party/libwebrtc/modules/utility/include/helpers_android.h
new file mode 100644
index 0000000000..5e8deb9536
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/include/helpers_android.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_UTILITY_INCLUDE_HELPERS_ANDROID_H_
+#define MODULES_UTILITY_INCLUDE_HELPERS_ANDROID_H_
+
+#include <jni.h>
+
+#include <string>
+
+#include "rtc_base/system/arch.h"
+
+// Abort the process if `jni` has a Java exception pending.
+// TODO(henrika): merge with CHECK_JNI_EXCEPTION() in jni_helpers.h.
+#define CHECK_EXCEPTION(jni) \
+ RTC_CHECK(!jni->ExceptionCheck()) \
+ << (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
+
+#if defined(WEBRTC_ARCH_X86)
+// Dalvik JIT generated code doesn't guarantee 16-byte stack alignment on
+// x86 - use force_align_arg_pointer to realign the stack at the JNI
+// boundary. bugs.webrtc.org/9050
+#define JNI_FUNCTION_ALIGN __attribute__((force_align_arg_pointer))
+#else
+#define JNI_FUNCTION_ALIGN
+#endif
+
+namespace webrtc {
+
+// Return a |JNIEnv*| usable on this thread or NULL if this thread is detached.
+JNIEnv* GetEnv(JavaVM* jvm);
+
+// Return a `jlong` that will correctly convert back to `ptr`. This is needed
+// because the alternative (of silently passing a 32-bit pointer to a vararg
+// function expecting a 64-bit param) picks up garbage in the high 32 bits.
+jlong PointerTojlong(void* ptr);
+
+// JNIEnv-helper methods that wraps the API which uses the JNI interface
+// pointer (JNIEnv*). It allows us to RTC_CHECK success and that no Java
+// exception is thrown while calling the method.
+jmethodID GetMethodID(JNIEnv* jni,
+ jclass c,
+ const char* name,
+ const char* signature);
+
+jmethodID GetStaticMethodID(JNIEnv* jni,
+ jclass c,
+ const char* name,
+ const char* signature);
+
+jclass FindClass(JNIEnv* jni, const char* name);
+
+jobject NewGlobalRef(JNIEnv* jni, jobject o);
+
+void DeleteGlobalRef(JNIEnv* jni, jobject o);
+
+// Attach thread to JVM if necessary and detach at scope end if originally
+// attached.
+class AttachThreadScoped {
+ public:
+ explicit AttachThreadScoped(JavaVM* jvm);
+ ~AttachThreadScoped();
+ JNIEnv* env();
+
+ private:
+ bool attached_;
+ JavaVM* jvm_;
+ JNIEnv* env_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_UTILITY_INCLUDE_HELPERS_ANDROID_H_
diff --git a/third_party/libwebrtc/modules/utility/include/jvm_android.h b/third_party/libwebrtc/modules/utility/include/jvm_android.h
new file mode 100644
index 0000000000..bafb8f2c15
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/include/jvm_android.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_UTILITY_INCLUDE_JVM_ANDROID_H_
+#define MODULES_UTILITY_INCLUDE_JVM_ANDROID_H_
+
+#include <jni.h>
+
+#include <memory>
+#include <string>
+
+#include "api/sequence_checker.h"
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+// RAII JavaVM AttachCurrentThread/DetachCurrentThread object.
+//
+// The JNI interface pointer (JNIEnv) is valid only in the current thread.
+// Should another thread need to access the Java VM, it must first call
+// AttachCurrentThread() to attach itself to the VM and obtain a JNI interface
+// pointer. The native thread remains attached to the VM until it calls
+// DetachCurrentThread() to detach.
+class JvmThreadConnector {
+ public:
+ JvmThreadConnector();
+ ~JvmThreadConnector();
+
+ private:
+ SequenceChecker thread_checker_;
+ bool attached_;
+};
+
+// This class is created by the NativeRegistration class and is used to wrap
+// the actual Java object handle (jobject) on which we can call methods from
+// C++ in to Java. See example in JVM for more details.
+// TODO(henrika): extend support for type of function calls.
+class GlobalRef {
+ public:
+ GlobalRef(JNIEnv* jni, jobject object);
+ ~GlobalRef();
+
+ jboolean CallBooleanMethod(jmethodID methodID, ...);
+ jint CallIntMethod(jmethodID methodID, ...);
+ void CallVoidMethod(jmethodID methodID, ...);
+
+ private:
+ JNIEnv* const jni_;
+ const jobject j_object_;
+};
+
+// Wraps the jclass object on which we can call GetMethodId() functions to
+// query method IDs.
+class JavaClass {
+ public:
+ JavaClass(JNIEnv* jni, jclass clazz) : jni_(jni), j_class_(clazz) {}
+ ~JavaClass() {}
+
+ jmethodID GetMethodId(const char* name, const char* signature);
+ jmethodID GetStaticMethodId(const char* name, const char* signature);
+ jobject CallStaticObjectMethod(jmethodID methodID, ...);
+ jint CallStaticIntMethod(jmethodID methodID, ...);
+
+ protected:
+ JNIEnv* const jni_;
+ jclass const j_class_;
+};
+
+// Adds support of the NewObject factory method to the JavaClass class.
+// See example in JVM for more details on how to use it.
+class NativeRegistration : public JavaClass {
+ public:
+ NativeRegistration(JNIEnv* jni, jclass clazz);
+ ~NativeRegistration();
+
+ std::unique_ptr<GlobalRef> NewObject(const char* name,
+ const char* signature,
+ ...);
+
+ private:
+ JNIEnv* const jni_;
+};
+
+// This class is created by the JVM class and is used to expose methods that
+// needs the JNI interface pointer but its main purpose is to create a
+// NativeRegistration object given name of a Java class and a list of native
+// methods. See example in JVM for more details.
+class JNIEnvironment {
+ public:
+ explicit JNIEnvironment(JNIEnv* jni);
+ ~JNIEnvironment();
+
+ // Registers native methods with the Java class specified by `name`.
+ // Note that the class name must be one of the names in the static
+ // `loaded_classes` array defined in jvm_android.cc.
+ // This method must be called on the construction thread.
+ std::unique_ptr<NativeRegistration> RegisterNatives(
+ const char* name,
+ const JNINativeMethod* methods,
+ int num_methods);
+
+ // Converts from Java string to std::string.
+ // This method must be called on the construction thread.
+ std::string JavaToStdString(const jstring& j_string);
+
+ private:
+ SequenceChecker thread_checker_;
+ JNIEnv* const jni_;
+};
+
+// Main class for working with Java from C++ using JNI in WebRTC.
+//
+// Example usage:
+//
+// // At initialization (e.g. in JNI_OnLoad), call JVM::Initialize.
+// JNIEnv* jni = ::base::android::AttachCurrentThread();
+// JavaVM* jvm = NULL;
+// jni->GetJavaVM(&jvm);
+// webrtc::JVM::Initialize(jvm);
+//
+// // Header (.h) file of example class called User.
+// std::unique_ptr<JNIEnvironment> env;
+// std::unique_ptr<NativeRegistration> reg;
+// std::unique_ptr<GlobalRef> obj;
+//
+// // Construction (in .cc file) of User class.
+// User::User() {
+// // Calling thread must be attached to the JVM.
+// env = JVM::GetInstance()->environment();
+// reg = env->RegisterNatives("org/webrtc/WebRtcTest", ,);
+// obj = reg->NewObject("<init>", ,);
+// }
+//
+// // Each User method can now use `reg` and `obj` and call Java functions
+// // in WebRtcTest.java, e.g. boolean init() {}.
+// bool User::Foo() {
+// jmethodID id = reg->GetMethodId("init", "()Z");
+// return obj->CallBooleanMethod(id);
+// }
+//
+// // And finally, e.g. in JNI_OnUnLoad, call JVM::Uninitialize.
+// JVM::Uninitialize();
+class JVM {
+ public:
+ // Stores global handles to the Java VM interface.
+ // Should be called once on a thread that is attached to the JVM.
+ static void Initialize(JavaVM* jvm);
+ // Like the method above but also passes the context to the ContextUtils
+ // class. This method should be used by pure-C++ Android users that can't call
+ // ContextUtils.initialize directly.
+ static void Initialize(JavaVM* jvm, jobject context);
+ // Clears handles stored in Initialize(). Must be called on same thread as
+ // Initialize().
+ static void Uninitialize();
+ // Gives access to the global Java VM interface pointer, which then can be
+ // used to create a valid JNIEnvironment object or to get a JavaClass object.
+ static JVM* GetInstance();
+
+ // Creates a JNIEnvironment object.
+ // This method returns a NULL pointer if AttachCurrentThread() has not been
+ // called successfully. Use the AttachCurrentThreadIfNeeded class if needed.
+ std::unique_ptr<JNIEnvironment> environment();
+
+ // Returns a JavaClass object given class `name`.
+ // Note that the class name must be one of the names in the static
+ // `loaded_classes` array defined in jvm_android.cc.
+ // This method must be called on the construction thread.
+ JavaClass GetClass(const char* name);
+
+ // TODO(henrika): can we make these private?
+ JavaVM* jvm() const { return jvm_; }
+
+ protected:
+ JVM(JavaVM* jvm);
+ ~JVM();
+
+ private:
+ JNIEnv* jni() const { return GetEnv(jvm_); }
+
+ SequenceChecker thread_checker_;
+ JavaVM* const jvm_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_UTILITY_INCLUDE_JVM_ANDROID_H_
diff --git a/third_party/libwebrtc/modules/utility/source/helpers_android.cc b/third_party/libwebrtc/modules/utility/source/helpers_android.cc
new file mode 100644
index 0000000000..9cfee8a2af
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/source/helpers_android.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/utility/include/helpers_android.h"
+
+#include <android/log.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <unistd.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+
+#define TAG "HelpersAndroid"
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+
+namespace webrtc {
+
+JNIEnv* GetEnv(JavaVM* jvm) {
+ void* env = NULL;
+ jint status = jvm->GetEnv(&env, JNI_VERSION_1_6);
+ RTC_CHECK(((env != NULL) && (status == JNI_OK)) ||
+ ((env == NULL) && (status == JNI_EDETACHED)))
+ << "Unexpected GetEnv return: " << status << ":" << env;
+ return reinterpret_cast<JNIEnv*>(env);
+}
+
+// Return a `jlong` that will correctly convert back to `ptr`. This is needed
+// because the alternative (of silently passing a 32-bit pointer to a vararg
+// function expecting a 64-bit param) picks up garbage in the high 32 bits.
+jlong PointerTojlong(void* ptr) {
+ static_assert(sizeof(intptr_t) <= sizeof(jlong),
+ "Time to rethink the use of jlongs");
+ // Going through intptr_t to be obvious about the definedness of the
+ // conversion from pointer to integral type. intptr_t to jlong is a standard
+ // widening by the static_assert above.
+ jlong ret = reinterpret_cast<intptr_t>(ptr);
+ RTC_DCHECK(reinterpret_cast<void*>(ret) == ptr);
+ return ret;
+}
+
+jmethodID GetMethodID(JNIEnv* jni,
+ jclass c,
+ const char* name,
+ const char* signature) {
+ jmethodID m = jni->GetMethodID(c, name, signature);
+ CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
+ << signature;
+ RTC_CHECK(m) << name << ", " << signature;
+ return m;
+}
+
+jmethodID GetStaticMethodID(JNIEnv* jni,
+ jclass c,
+ const char* name,
+ const char* signature) {
+ jmethodID m = jni->GetStaticMethodID(c, name, signature);
+ CHECK_EXCEPTION(jni) << "Error during GetStaticMethodID: " << name << ", "
+ << signature;
+ RTC_CHECK(m) << name << ", " << signature;
+ return m;
+}
+
+jclass FindClass(JNIEnv* jni, const char* name) {
+ jclass c = jni->FindClass(name);
+ CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
+ RTC_CHECK(c) << name;
+ return c;
+}
+
+jobject NewGlobalRef(JNIEnv* jni, jobject o) {
+ jobject ret = jni->NewGlobalRef(o);
+ CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
+ RTC_CHECK(ret);
+ return ret;
+}
+
+void DeleteGlobalRef(JNIEnv* jni, jobject o) {
+ jni->DeleteGlobalRef(o);
+ CHECK_EXCEPTION(jni) << "Error during DeleteGlobalRef";
+}
+
+AttachThreadScoped::AttachThreadScoped(JavaVM* jvm)
+ : attached_(false), jvm_(jvm), env_(NULL) {
+ env_ = GetEnv(jvm);
+ if (!env_) {
+ // Adding debug log here so we can track down potential leaks and figure
+ // out why we sometimes see "Native thread exiting without having called
+ // DetachCurrentThread" in logcat outputs.
+ ALOGD("Attaching thread to JVM[tid=%d]", rtc::CurrentThreadId());
+ jint res = jvm->AttachCurrentThread(&env_, NULL);
+ attached_ = (res == JNI_OK);
+ RTC_CHECK(attached_) << "AttachCurrentThread failed: " << res;
+ }
+}
+
+AttachThreadScoped::~AttachThreadScoped() {
+ if (attached_) {
+ ALOGD("Detaching thread from JVM[tid=%d]", rtc::CurrentThreadId());
+ jint res = jvm_->DetachCurrentThread();
+ RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+ RTC_CHECK(!GetEnv(jvm_));
+ }
+}
+
+JNIEnv* AttachThreadScoped::env() {
+ return env_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/utility/source/jvm_android.cc b/third_party/libwebrtc/modules/utility/source/jvm_android.cc
new file mode 100644
index 0000000000..39ef12f428
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/source/jvm_android.cc
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/utility/include/jvm_android.h"
+
+#include <android/log.h>
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+
+namespace mozilla {
+namespace jni {
+jclass GetClassRef(JNIEnv* aEnv, const char* aClassName);
+}
+}
+
+#define TAG "JVM"
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+
+namespace webrtc {
+
+JVM* g_jvm;
+
+// TODO(henrika): add more clases here if needed.
+struct {
+ const char* name;
+ jclass clazz;
+} loaded_classes[] = {
+ {"org/webrtc/voiceengine/BuildInfo", nullptr},
+ {"org/webrtc/voiceengine/WebRtcAudioManager", nullptr},
+ {"org/webrtc/voiceengine/WebRtcAudioRecord", nullptr},
+ {"org/webrtc/voiceengine/WebRtcAudioTrack", nullptr},
+};
+
+// Android's FindClass() is trickier than usual because the app-specific
+// ClassLoader is not consulted when there is no app-specific frame on the
+// stack. Consequently, we only look up all classes once in native WebRTC.
+// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
+void LoadClasses(JNIEnv* jni) {
+ RTC_LOG(LS_INFO) << "LoadClasses:";
+ for (auto& c : loaded_classes) {
+ ALOGD("name: %s", c.name);
+ jclass clsRef = mozilla::jni::GetClassRef(jni, c.name);
+ RTC_CHECK(clsRef) << c.name;
+ c.clazz = static_cast<jclass>(jni->NewGlobalRef(clsRef));
+ jni->DeleteLocalRef(clsRef);
+ }
+}
+
+void FreeClassReferences(JNIEnv* jni) {
+ for (auto& c : loaded_classes) {
+ jni->DeleteGlobalRef(c.clazz);
+ c.clazz = nullptr;
+ }
+}
+
+jclass LookUpClass(const char* name) {
+ for (auto& c : loaded_classes) {
+ if (strcmp(c.name, name) == 0)
+ return c.clazz;
+ }
+ RTC_CHECK(false) << "Unable to find class in lookup table";
+ return 0;
+}
+
+// JvmThreadConnector implementation.
+JvmThreadConnector::JvmThreadConnector() : attached_(false) {
+ RTC_LOG(LS_INFO) << "JvmThreadConnector::ctor";
+ JavaVM* jvm = JVM::GetInstance()->jvm();
+ RTC_CHECK(jvm);
+ JNIEnv* jni = GetEnv(jvm);
+ if (!jni) {
+ RTC_LOG(LS_INFO) << "Attaching thread to JVM";
+ JNIEnv* env = nullptr;
+ jint ret = jvm->AttachCurrentThread(&env, nullptr);
+ attached_ = (ret == JNI_OK);
+ }
+}
+
+JvmThreadConnector::~JvmThreadConnector() {
+ RTC_LOG(LS_INFO) << "JvmThreadConnector::dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (attached_) {
+ RTC_LOG(LS_INFO) << "Detaching thread from JVM";
+ jint res = JVM::GetInstance()->jvm()->DetachCurrentThread();
+ RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+ }
+}
+
+// GlobalRef implementation.
+GlobalRef::GlobalRef(JNIEnv* jni, jobject object)
+ : jni_(jni), j_object_(NewGlobalRef(jni, object)) {
+ RTC_LOG(LS_INFO) << "GlobalRef::ctor";
+}
+
+GlobalRef::~GlobalRef() {
+ RTC_LOG(LS_INFO) << "GlobalRef::dtor";
+ DeleteGlobalRef(jni_, j_object_);
+}
+
+jboolean GlobalRef::CallBooleanMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jboolean res = jni_->CallBooleanMethodV(j_object_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallBooleanMethod";
+ va_end(args);
+ return res;
+}
+
+jint GlobalRef::CallIntMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jint res = jni_->CallIntMethodV(j_object_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallIntMethod";
+ va_end(args);
+ return res;
+}
+
+void GlobalRef::CallVoidMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jni_->CallVoidMethodV(j_object_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallVoidMethod";
+ va_end(args);
+}
+
+// NativeRegistration implementation.
+NativeRegistration::NativeRegistration(JNIEnv* jni, jclass clazz)
+ : JavaClass(jni, clazz), jni_(jni) {
+ RTC_LOG(LS_INFO) << "NativeRegistration::ctor";
+}
+
+NativeRegistration::~NativeRegistration() {
+ RTC_LOG(LS_INFO) << "NativeRegistration::dtor";
+ jni_->UnregisterNatives(j_class_);
+ CHECK_EXCEPTION(jni_) << "Error during UnregisterNatives";
+}
+
+std::unique_ptr<GlobalRef> NativeRegistration::NewObject(const char* name,
+ const char* signature,
+ ...) {
+ RTC_LOG(LS_INFO) << "NativeRegistration::NewObject";
+ va_list args;
+ va_start(args, signature);
+ jobject obj = jni_->NewObjectV(
+ j_class_, GetMethodID(jni_, j_class_, name, signature), args);
+ CHECK_EXCEPTION(jni_) << "Error during NewObjectV";
+ va_end(args);
+ return std::unique_ptr<GlobalRef>(new GlobalRef(jni_, obj));
+}
+
+// JavaClass implementation.
+jmethodID JavaClass::GetMethodId(const char* name, const char* signature) {
+ return GetMethodID(jni_, j_class_, name, signature);
+}
+
+jmethodID JavaClass::GetStaticMethodId(const char* name,
+ const char* signature) {
+ return GetStaticMethodID(jni_, j_class_, name, signature);
+}
+
+jobject JavaClass::CallStaticObjectMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jobject res = jni_->CallStaticObjectMethodV(j_class_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallStaticObjectMethod";
+ return res;
+}
+
+jint JavaClass::CallStaticIntMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jint res = jni_->CallStaticIntMethodV(j_class_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallStaticIntMethod";
+ return res;
+}
+
+// JNIEnvironment implementation.
+JNIEnvironment::JNIEnvironment(JNIEnv* jni) : jni_(jni) {
+ RTC_LOG(LS_INFO) << "JNIEnvironment::ctor";
+}
+
+JNIEnvironment::~JNIEnvironment() {
+ RTC_LOG(LS_INFO) << "JNIEnvironment::dtor";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+}
+
+std::unique_ptr<NativeRegistration> JNIEnvironment::RegisterNatives(
+ const char* name,
+ const JNINativeMethod* methods,
+ int num_methods) {
+ RTC_LOG(LS_INFO) << "JNIEnvironment::RegisterNatives: " << name;
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ jclass clazz = LookUpClass(name);
+ jni_->RegisterNatives(clazz, methods, num_methods);
+ CHECK_EXCEPTION(jni_) << "Error during RegisterNatives";
+ return std::unique_ptr<NativeRegistration>(
+ new NativeRegistration(jni_, clazz));
+}
+
+std::string JNIEnvironment::JavaToStdString(const jstring& j_string) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ const char* jchars = jni_->GetStringUTFChars(j_string, nullptr);
+ CHECK_EXCEPTION(jni_);
+ const int size = jni_->GetStringUTFLength(j_string);
+ CHECK_EXCEPTION(jni_);
+ std::string ret(jchars, size);
+ jni_->ReleaseStringUTFChars(j_string, jchars);
+ CHECK_EXCEPTION(jni_);
+ return ret;
+}
+
+// static
+void JVM::Initialize(JavaVM* jvm) {
+ if (g_jvm) {
+ return;
+ }
+ g_jvm = new JVM(jvm);
+}
+
+void JVM::Initialize(JavaVM* jvm, jobject context) {
+ Initialize(jvm);
+
+ // Pass in the context to the new ContextUtils class.
+ JNIEnv* jni = g_jvm->jni();
+ jclass context_utils = FindClass(jni, "org/webrtc/ContextUtils");
+ jmethodID initialize_method = jni->GetStaticMethodID(
+ context_utils, "initialize", "(Landroid/content/Context;)V");
+ jni->CallStaticVoidMethod(context_utils, initialize_method, context);
+}
+
+// static
+void JVM::Uninitialize() {
+ RTC_LOG(LS_INFO) << "JVM::Uninitialize";
+ RTC_DCHECK(g_jvm);
+ delete g_jvm;
+ g_jvm = nullptr;
+}
+
+// static
+JVM* JVM::GetInstance() {
+ RTC_DCHECK(g_jvm);
+ return g_jvm;
+}
+
+JVM::JVM(JavaVM* jvm) : jvm_(jvm) {
+ RTC_LOG(LS_INFO) << "JVM::JVM";
+ RTC_CHECK(jni()) << "AttachCurrentThread() must be called on this thread.";
+ LoadClasses(jni());
+}
+
+JVM::~JVM() {
+ RTC_LOG(LS_INFO) << "JVM::~JVM";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ FreeClassReferences(jni());
+}
+
+std::unique_ptr<JNIEnvironment> JVM::environment() {
+ RTC_LOG(LS_INFO) << "JVM::environment";
+ ;
+ // The JNIEnv is used for thread-local storage. For this reason, we cannot
+ // share a JNIEnv between threads. If a piece of code has no other way to get
+ // its JNIEnv, we should share the JavaVM, and use GetEnv to discover the
+ // thread's JNIEnv. (Assuming it has one, if not, use AttachCurrentThread).
+ // See // http://developer.android.com/training/articles/perf-jni.html.
+ JNIEnv* jni = GetEnv(jvm_);
+ if (!jni) {
+ RTC_LOG(LS_ERROR)
+ << "AttachCurrentThread() has not been called on this thread";
+ return std::unique_ptr<JNIEnvironment>();
+ }
+ return std::unique_ptr<JNIEnvironment>(new JNIEnvironment(jni));
+}
+
+JavaClass JVM::GetClass(const char* name) {
+ RTC_LOG(LS_INFO) << "JVM::GetClass: " << name;
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return JavaClass(jni(), LookUpClass(name));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/utility/utility_gn/moz.build b/third_party/libwebrtc/modules/utility/utility_gn/moz.build
new file mode 100644
index 0000000000..5b578af55e
--- /dev/null
+++ b/third_party/libwebrtc/modules/utility/utility_gn/moz.build
@@ -0,0 +1,202 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/utility/source/helpers_android.cc",
+ "/third_party/libwebrtc/modules/utility/source/jvm_android.cc"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("utility_gn")
diff --git a/third_party/libwebrtc/modules/video_capture/BUILD.gn b/third_party/libwebrtc/modules/video_capture/BUILD.gn
new file mode 100644
index 0000000000..2ae0474e9e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/BUILD.gn
@@ -0,0 +1,158 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+# Note this target is missing an implementation for the video capture.
+# Targets must link with either 'video_capture' or
+# 'video_capture_internal_impl' depending on whether they want to
+# use the internal capturer.
+rtc_library("video_capture_module") {
+ visibility = [ "*" ]
+ sources = [
+ "device_info_impl.cc",
+ "device_info_impl.h",
+ "video_capture.h",
+ "video_capture_config.h",
+ "video_capture_defines.h",
+ "video_capture_factory.cc",
+ "video_capture_factory.h",
+ "video_capture_impl.cc",
+ "video_capture_impl.h",
+ ]
+
+ deps = [
+ "../../api:scoped_refptr",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+
+if (!build_with_chromium) {
+ rtc_source_set("video_capture_internal_impl") {
+ visibility = [ "*" ]
+ deps = [
+ ":video_capture_module",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ ]
+
+ if (is_linux || is_bsd || is_chromeos) {
+ sources = [
+ "linux/device_info_linux.cc",
+ "linux/device_info_v4l2.cc",
+ "linux/device_info_v4l2.h",
+ "linux/video_capture_linux.cc",
+ "linux/video_capture_v4l2.cc",
+ "linux/video_capture_v4l2.h",
+ ]
+ deps += [ "../../media:rtc_media_base" ]
+ }
+ if (is_win) {
+ sources = [
+ "windows/device_info_ds.cc",
+ "windows/device_info_ds.h",
+ "windows/help_functions_ds.cc",
+ "windows/help_functions_ds.h",
+ "windows/sink_filter_ds.cc",
+ "windows/sink_filter_ds.h",
+ "windows/video_capture_ds.cc",
+ "windows/video_capture_ds.h",
+ "windows/video_capture_factory_windows.cc",
+ ]
+
+ libs = [
+ "ole32.lib",
+ "oleaut32.lib",
+ "strmiids.lib",
+ "user32.lib",
+ ]
+ }
+ if (is_fuchsia) {
+ sources = [ "video_capture_factory_null.cc" ]
+ }
+
+ if (!build_with_mozilla && is_android) {
+ include_dirs = [
+ "/config/external/nspr",
+ "/nsprpub/lib/ds",
+ "/nsprpub/pr/include",
+ ]
+
+ sources = [
+ "android/device_info_android.cc",
+ "android/video_capture_android.cc",
+ ]
+ }
+ }
+
+ if (!is_android && rtc_include_tests) {
+ rtc_test("video_capture_tests") {
+ sources = [ "test/video_capture_unittest.cc" ]
+ ldflags = []
+ if (is_linux || is_chromeos || is_mac) {
+ ldflags += [
+ "-lpthread",
+ "-lm",
+ ]
+ }
+ if (is_linux || is_chromeos) {
+ ldflags += [
+ "-lrt",
+ "-lXext",
+ "-lX11",
+ ]
+ }
+
+ deps = [
+ ":video_capture_internal_impl",
+ ":video_capture_module",
+ "../../api:scoped_refptr",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../common_video",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../test:frame_utils",
+ "../../test:test_main",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/memory",
+ ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_capture/DEPS b/third_party/libwebrtc/modules/video_capture/DEPS
new file mode 100644
index 0000000000..9ad1d576bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+common_video",
+ "+media/base",
+ "+system_wrappers",
+ "+third_party/libyuv",
+]
diff --git a/third_party/libwebrtc/modules/video_capture/OWNERS b/third_party/libwebrtc/modules/video_capture/OWNERS
new file mode 100644
index 0000000000..364d66d36f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/OWNERS
@@ -0,0 +1,4 @@
+ilnik@webrtc.org
+mflodman@webrtc.org
+perkj@webrtc.org
+tkchin@webrtc.org
diff --git a/third_party/libwebrtc/modules/video_capture/device_info_impl.cc b/third_party/libwebrtc/modules/video_capture/device_info_impl.cc
new file mode 100644
index 0000000000..5313fe90be
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/device_info_impl.cc
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/device_info_impl.h"
+
+#include <stdlib.h>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "rtc_base/logging.h"
+
+#ifndef abs
+#define abs(a) (a >= 0 ? a : -a)
+#endif
+
+namespace webrtc {
+namespace videocapturemodule {
+
+DeviceInfoImpl::DeviceInfoImpl()
+ : _lastUsedDeviceName(NULL), _lastUsedDeviceNameLength(0) {}
+
+DeviceInfoImpl::~DeviceInfoImpl(void) {
+ MutexLock lock(&_apiLock);
+ free(_lastUsedDeviceName);
+}
+
+int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
+ if (!deviceUniqueIdUTF8)
+ return -1;
+
+ MutexLock lock(&_apiLock);
+
+ // Is it the same device that is asked for again.
+ if (absl::EqualsIgnoreCase(
+ deviceUniqueIdUTF8,
+ absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) {
+ return static_cast<int32_t>(_captureCapabilities.size());
+ }
+
+ int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8);
+ return ret;
+}
+
+int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8,
+ const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) {
+ RTC_DCHECK(deviceUniqueIdUTF8);
+
+ MutexLock lock(&_apiLock);
+
+ if (!absl::EqualsIgnoreCase(
+ deviceUniqueIdUTF8,
+ absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) {
+ if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
+ return -1;
+ }
+ }
+
+ // Make sure the number is valid
+ if (deviceCapabilityNumber >= (unsigned int)_captureCapabilities.size()) {
+ RTC_LOG(LS_ERROR) << deviceUniqueIdUTF8 << " Invalid deviceCapabilityNumber "
+ << deviceCapabilityNumber << ">= number of capabilities ("
+ << _captureCapabilities.size() << ").";
+ return -1;
+ }
+
+ capability = _captureCapabilities[deviceCapabilityNumber];
+ return 0;
+}
+
+int32_t DeviceInfoImpl::GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8,
+ const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) {
+ if (!deviceUniqueIdUTF8)
+ return -1;
+
+ MutexLock lock(&_apiLock);
+ if (!absl::EqualsIgnoreCase(
+ deviceUniqueIdUTF8,
+ absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) {
+ if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
+ return -1;
+ }
+ }
+
+ int32_t bestformatIndex = -1;
+ int32_t bestWidth = 0;
+ int32_t bestHeight = 0;
+ int32_t bestFrameRate = 0;
+ VideoType bestVideoType = VideoType::kUnknown;
+
+ const int32_t numberOfCapabilies =
+ static_cast<int32_t>(_captureCapabilities.size());
+
+ bool hasNonRGB24Capability = false;
+ for (int32_t tmp = 0; tmp < numberOfCapabilies;
+ ++tmp) // Loop through all capabilities
+ {
+ VideoCaptureCapability& capability = _captureCapabilities[tmp];
+ if (capability.videoType != VideoType::kRGB24) {
+ hasNonRGB24Capability = true;
+ }
+ }
+
+ for (int32_t tmp = 0; tmp < numberOfCapabilies;
+ ++tmp) // Loop through all capabilities
+ {
+ VideoCaptureCapability& capability = _captureCapabilities[tmp];
+ if (hasNonRGB24Capability && capability.videoType == VideoType::kRGB24) {
+ continue;
+ }
+
+ const int32_t diffWidth = capability.width - requested.width;
+ const int32_t diffHeight = capability.height - requested.height;
+ const int32_t diffFrameRate = capability.maxFPS - requested.maxFPS;
+
+ const int32_t currentbestDiffWith = bestWidth - requested.width;
+ const int32_t currentbestDiffHeight = bestHeight - requested.height;
+ const int32_t currentbestDiffFrameRate = bestFrameRate - requested.maxFPS;
+
+ if ((diffHeight >= 0 &&
+ diffHeight <= abs(currentbestDiffHeight)) // Height better or equalt
+ // that previouse.
+ || (currentbestDiffHeight < 0 && diffHeight >= currentbestDiffHeight)) {
+ if (diffHeight ==
+ currentbestDiffHeight) // Found best height. Care about the width)
+ {
+ if ((diffWidth >= 0 &&
+ diffWidth <= abs(currentbestDiffWith)) // Width better or equal
+ || (currentbestDiffWith < 0 && diffWidth >= currentbestDiffWith)) {
+ if (diffWidth == currentbestDiffWith &&
+ diffHeight == currentbestDiffHeight) // Same size as previously
+ {
+ // Also check the best frame rate if the diff is the same as
+ // previouse
+ if (((diffFrameRate >= 0 &&
+ diffFrameRate <=
+ currentbestDiffFrameRate) // Frame rate to high but
+ // better match than previouse
+ // and we have not selected IUV
+ || (currentbestDiffFrameRate < 0 &&
+ diffFrameRate >=
+ currentbestDiffFrameRate)) // Current frame rate is
+ // lower than requested.
+ // This is better.
+ ) {
+ if ((currentbestDiffFrameRate ==
+ diffFrameRate) // Same frame rate as previous or frame rate
+ // allready good enough
+ || (currentbestDiffFrameRate >= 0)) {
+ if (bestVideoType != requested.videoType &&
+ requested.videoType != VideoType::kUnknown &&
+ (capability.videoType == requested.videoType ||
+ capability.videoType == VideoType::kI420 ||
+ capability.videoType == VideoType::kYUY2 ||
+ capability.videoType == VideoType::kYV12)) {
+ bestVideoType = capability.videoType;
+ bestformatIndex = tmp;
+ }
+ // If width height and frame rate is full filled we can use the
+ // camera for encoding if it is supported.
+ if (capability.height == requested.height &&
+ capability.width == requested.width &&
+ capability.maxFPS >= requested.maxFPS) {
+ bestformatIndex = tmp;
+ }
+ } else // Better frame rate
+ {
+ bestWidth = capability.width;
+ bestHeight = capability.height;
+ bestFrameRate = capability.maxFPS;
+ bestVideoType = capability.videoType;
+ bestformatIndex = tmp;
+ }
+ }
+ } else // Better width than previously
+ {
+ bestWidth = capability.width;
+ bestHeight = capability.height;
+ bestFrameRate = capability.maxFPS;
+ bestVideoType = capability.videoType;
+ bestformatIndex = tmp;
+ }
+ } // else width no good
+ } else // Better height
+ {
+ bestWidth = capability.width;
+ bestHeight = capability.height;
+ bestFrameRate = capability.maxFPS;
+ bestVideoType = capability.videoType;
+ bestformatIndex = tmp;
+ }
+ } // else height not good
+ } // end for
+
+ RTC_LOG(LS_VERBOSE) << "Best camera format: " << bestWidth << "x"
+ << bestHeight << "@" << bestFrameRate
+ << "fps, color format: "
+ << static_cast<int>(bestVideoType);
+
+ // Copy the capability
+ if (bestformatIndex < 0)
+ return -1;
+ resulting = _captureCapabilities[bestformatIndex];
+ return bestformatIndex;
+}
+
+// Default implementation. This should be overridden by Mobile implementations.
+int32_t DeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) {
+ orientation = kVideoRotation_0;
+ return -1;
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/device_info_impl.h b/third_party/libwebrtc/modules/video_capture/device_info_impl.h
new file mode 100644
index 0000000000..8acbef6d69
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/device_info_impl.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_DEVICE_INFO_IMPL_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_DEVICE_INFO_IMPL_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/video/video_rotation.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+class DeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
+ public:
+ DeviceInfoImpl();
+ ~DeviceInfoImpl(void) override;
+ int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) override;
+ int32_t GetCapability(const char* deviceUniqueIdUTF8,
+ uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) override;
+
+ int32_t GetBestMatchedCapability(const char* deviceUniqueIdUTF8,
+ const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) override;
+ int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) override;
+
+ protected:
+ /* Initialize this object*/
+
+ virtual int32_t Init() = 0;
+ int32_t Refresh() override { return 0; }
+ /*
+ * Fills the member variable _captureCapabilities with capabilities for the
+ * given device name.
+ */
+ virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock) = 0;
+
+ protected:
+ // Data members
+ typedef std::vector<VideoCaptureCapability> VideoCaptureCapabilities;
+ VideoCaptureCapabilities _captureCapabilities RTC_GUARDED_BY(_apiLock);
+ Mutex _apiLock;
+ char* _lastUsedDeviceName RTC_GUARDED_BY(_apiLock);
+ uint32_t _lastUsedDeviceNameLength RTC_GUARDED_BY(_apiLock);
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_DEVICE_INFO_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_capture/linux/device_info_linux.cc b/third_party/libwebrtc/modules/video_capture/linux/device_info_linux.cc
new file mode 100644
index 0000000000..4821cbccd5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/linux/device_info_linux.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+// v4l includes
+#if defined(__NetBSD__) || defined(__OpenBSD__) // WEBRTC_BSD
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include <vector>
+
+#include "modules/video_capture/linux/device_info_v4l2.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
+ return new videocapturemodule::DeviceInfoV4l2();
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.cc b/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.cc
new file mode 100644
index 0000000000..39852016d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.cc
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/linux/device_info_v4l2.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+// v4l includes
+#if defined(__NetBSD__) || defined(__OpenBSD__) // WEBRTC_BSD
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include <vector>
+
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "rtc_base/logging.h"
+
+#ifdef WEBRTC_LINUX
+#define EVENT_SIZE ( sizeof (struct inotify_event) )
+#define BUF_LEN ( 1024 * ( EVENT_SIZE + 16 ) )
+#endif
+
+namespace webrtc {
+namespace videocapturemodule {
+#ifdef WEBRTC_LINUX
+void DeviceInfoV4l2::HandleEvent(inotify_event* event, int fd)
+{
+ if (event->mask & IN_CREATE) {
+ if (fd == _fd_v4l) {
+ DeviceChange();
+ } else if ((event->mask & IN_ISDIR) && (fd == _fd_dev)) {
+ if (_wd_v4l < 0) {
+ // Sometimes inotify_add_watch failed if we call it immediately after receiving this event
+ // Adding 5ms delay to let file system settle down
+ usleep(5*1000);
+ _wd_v4l = inotify_add_watch(_fd_v4l, "/dev/v4l/by-path/", IN_CREATE | IN_DELETE | IN_DELETE_SELF);
+ if (_wd_v4l >= 0) {
+ DeviceChange();
+ }
+ }
+ }
+ } else if (event->mask & IN_DELETE) {
+ if (fd == _fd_v4l) {
+ DeviceChange();
+ }
+ } else if (event->mask & IN_DELETE_SELF) {
+ if (fd == _fd_v4l) {
+ inotify_rm_watch(_fd_v4l, _wd_v4l);
+ _wd_v4l = -1;
+ } else {
+ assert(false);
+ }
+ }
+}
+
+int DeviceInfoV4l2::EventCheck(int fd)
+{
+ struct pollfd fds = {
+ .fd = fd,
+ .events = POLLIN,
+ .revents = 0
+ };
+
+ return poll(&fds, 1, 100);
+}
+
+int DeviceInfoV4l2::HandleEvents(int fd)
+{
+ char buffer[BUF_LEN];
+
+ ssize_t r = read(fd, buffer, BUF_LEN);
+
+ if (r <= 0) {
+ return r;
+ }
+
+ ssize_t buffer_i = 0;
+ inotify_event* pevent;
+ size_t eventSize;
+ int count = 0;
+
+ while (buffer_i < r)
+ {
+ pevent = (inotify_event *) (&buffer[buffer_i]);
+ eventSize = sizeof(inotify_event) + pevent->len;
+ char event[sizeof(inotify_event) + FILENAME_MAX + 1] // null-terminated
+ __attribute__ ((aligned(__alignof__(struct inotify_event))));
+
+ memcpy(event, pevent, eventSize);
+
+ HandleEvent((inotify_event*)(event), fd);
+
+ buffer_i += eventSize;
+ count++;
+ }
+
+ return count;
+}
+
+int DeviceInfoV4l2::ProcessInotifyEvents()
+{
+ while (!_isShutdown) {
+ if (EventCheck(_fd_dev) > 0) {
+ if (HandleEvents(_fd_dev) < 0) {
+ break;
+ }
+ }
+ if (EventCheck(_fd_v4l) > 0) {
+ if (HandleEvents(_fd_v4l) < 0) {
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+void DeviceInfoV4l2::InotifyProcess()
+{
+ _fd_v4l = inotify_init();
+ _fd_dev = inotify_init();
+ if (_fd_v4l >= 0 && _fd_dev >= 0) {
+ _wd_v4l = inotify_add_watch(_fd_v4l, "/dev/v4l/by-path/", IN_CREATE | IN_DELETE | IN_DELETE_SELF);
+ _wd_dev = inotify_add_watch(_fd_dev, "/dev/", IN_CREATE);
+ ProcessInotifyEvents();
+
+ if (_wd_v4l >= 0) {
+ inotify_rm_watch(_fd_v4l, _wd_v4l);
+ }
+
+ if (_wd_dev >= 0) {
+ inotify_rm_watch(_fd_dev, _wd_dev);
+ }
+
+ close(_fd_v4l);
+ close(_fd_dev);
+ }
+}
+#endif
+
+DeviceInfoV4l2::DeviceInfoV4l2() : DeviceInfoImpl()
+#ifdef WEBRTC_LINUX
+ , _isShutdown(false)
+#endif
+{
+#ifdef WEBRTC_LINUX
+ _inotifyEventThread = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ InotifyProcess();
+ }, "InotifyEventThread");
+#endif
+}
+
+int32_t DeviceInfoV4l2::Init() {
+ return 0;
+}
+
+DeviceInfoV4l2::~DeviceInfoV4l2() {
+#ifdef WEBRTC_LINUX
+ _isShutdown = true;
+
+ if (!_inotifyEventThread.empty()) {
+ _inotifyEventThread.Finalize();
+ }
+#endif
+}
+
+uint32_t DeviceInfoV4l2::NumberOfDevices() {
+ uint32_t count = 0;
+ char device[20];
+ int fd = -1;
+ struct v4l2_capability cap;
+
+ /* detect /dev/video [0-63]VideoCaptureModule entries */
+ for (int n = 0; n < 64; n++) {
+ snprintf(device, sizeof(device), "/dev/video%d", n);
+ if ((fd = open(device, O_RDONLY)) != -1) {
+ // query device capabilities and make sure this is a video capture device
+ if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 || !IsVideoCaptureDevice(&cap)) {
+ close(fd);
+ continue;
+ }
+
+ close(fd);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int32_t DeviceInfoV4l2::GetDeviceName(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* /*productUniqueIdUTF8*/,
+ uint32_t /*productUniqueIdUTF8Length*/,
+ pid_t* /*pid*/) {
+ // Travel through /dev/video [0-63]
+ uint32_t count = 0;
+ char device[20];
+ int fd = -1;
+ bool found = false;
+ struct v4l2_capability cap;
+ int device_index;
+ for (device_index = 0; device_index < 64; device_index++) {
+ sprintf(device, "/dev/video%d", device_index);
+ if ((fd = open(device, O_RDONLY)) != -1) {
+ // query device capabilities and make sure this is a video capture device
+ if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 || !IsVideoCaptureDevice(&cap)) {
+ close(fd);
+ continue;
+ }
+ if (count == deviceNumber) {
+ // Found the device
+ found = true;
+ break;
+ } else {
+ close(fd);
+ count++;
+ }
+ }
+ }
+
+ if (!found)
+ return -1;
+
+ // query device capabilities
+ if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
+ RTC_LOG(LS_INFO) << "error in querying the device capability for device "
+ << device << ". errno = " << errno;
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ char cameraName[64];
+ memset(deviceNameUTF8, 0, deviceNameLength);
+ memcpy(cameraName, cap.card, sizeof(cap.card));
+
+ if (deviceNameLength > strlen(cameraName)) {
+ memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
+ } else {
+ RTC_LOG(LS_INFO) << "buffer passed is too small";
+ return -1;
+ }
+
+ if (cap.bus_info[0] != 0) { // may not available in all drivers
+ // copy device id
+ size_t len = strlen(reinterpret_cast<const char*>(cap.bus_info));
+ if (deviceUniqueIdUTF8Length > len) {
+ memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
+ memcpy(deviceUniqueIdUTF8, cap.bus_info, len);
+ } else {
+ RTC_LOG(LS_INFO) << "buffer passed is too small";
+ return -1;
+ }
+ } else {
+ // if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
+ if (snprintf(deviceUniqueIdUTF8,
+ deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
+ (int) deviceUniqueIdUTF8Length)
+ {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t DeviceInfoV4l2::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
+ int fd;
+ char device[32];
+ bool found = false;
+
+ const int32_t deviceUniqueIdUTF8Length = strlen(deviceUniqueIdUTF8);
+ if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
+ RTC_LOG(LS_INFO) << "Device name too long";
+ return -1;
+ }
+ RTC_LOG(LS_INFO) << "CreateCapabilityMap called for device "
+ << deviceUniqueIdUTF8;
+
+ /* detect /dev/video [0-63] entries */
+ for (int n = 0; n < 64; ++n) {
+ snprintf(device, sizeof(device), "/dev/video%d", n);
+ fd = open(device, O_RDONLY);
+ if (fd == -1)
+ continue;
+
+ // query device capabilities
+ struct v4l2_capability cap;
+ if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
+ // skip devices without video capture capability
+ if (!IsVideoCaptureDevice(&cap)) {
+ close(fd);
+ continue;
+ }
+
+ if (cap.bus_info[0] != 0) {
+ if (strncmp(reinterpret_cast<const char*>(cap.bus_info),
+ deviceUniqueIdUTF8,
+ strlen(deviceUniqueIdUTF8)) == 0) { // match with device id
+ found = true;
+ break; // fd matches with device unique id supplied
+ }
+ } else { // match for device name
+ if (IsDeviceNameMatches(reinterpret_cast<const char*>(cap.card),
+ deviceUniqueIdUTF8)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ close(fd); // close since this is not the matching device
+ }
+
+ if (!found) {
+ RTC_LOG(LS_INFO) << "no matching device found";
+ return -1;
+ }
+
+ // now fd will point to the matching device
+ // reset old capability list.
+ _captureCapabilities.clear();
+
+ int size = FillCapabilities(fd);
+ close(fd);
+
+ // Store the new used device name
+ _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
+ _lastUsedDeviceName = reinterpret_cast<char*>(
+ realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1));
+ memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
+ _lastUsedDeviceNameLength + 1);
+
+ RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
+
+ return size;
+}
+
+int32_t DeviceInfoV4l2::DisplayCaptureSettingsDialogBox(
+ const char* /*deviceUniqueIdUTF8*/,
+ const char* /*dialogTitleUTF8*/,
+ void* /*parentWindow*/,
+ uint32_t /*positionX*/,
+ uint32_t /*positionY*/) {
+ return -1;
+}
+
+bool DeviceInfoV4l2::IsDeviceNameMatches(const char* name,
+ const char* deviceUniqueIdUTF8) {
+ if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
+ return true;
+ return false;
+}
+
+bool DeviceInfoV4l2::IsVideoCaptureDevice(struct v4l2_capability* cap)
+{
+ if (cap->capabilities & V4L2_CAP_DEVICE_CAPS) {
+ return cap->device_caps & V4L2_CAP_VIDEO_CAPTURE;
+ } else {
+ return cap->capabilities & V4L2_CAP_VIDEO_CAPTURE;
+ }
+}
+
+int32_t DeviceInfoV4l2::FillCapabilities(int fd) {
+ // set image format
+ struct v4l2_format video_fmt;
+ memset(&video_fmt, 0, sizeof(struct v4l2_format));
+
+ video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ video_fmt.fmt.pix.sizeimage = 0;
+
+ int totalFmts = 4;
+ unsigned int videoFormats[] = {V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY};
+
+ int sizes = 13;
+ unsigned int size[][2] = {{128, 96}, {160, 120}, {176, 144}, {320, 240},
+ {352, 288}, {640, 480}, {704, 576}, {800, 600},
+ {960, 720}, {1280, 720}, {1024, 768}, {1440, 1080},
+ {1920, 1080}};
+
+ for (int fmts = 0; fmts < totalFmts; fmts++) {
+ for (int i = 0; i < sizes; i++) {
+ video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
+ video_fmt.fmt.pix.width = size[i][0];
+ video_fmt.fmt.pix.height = size[i][1];
+
+ if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0) {
+ if ((video_fmt.fmt.pix.width == size[i][0]) &&
+ (video_fmt.fmt.pix.height == size[i][1])) {
+ VideoCaptureCapability cap;
+ cap.width = video_fmt.fmt.pix.width;
+ cap.height = video_fmt.fmt.pix.height;
+ if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV) {
+ cap.videoType = VideoType::kYUY2;
+ } else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420) {
+ cap.videoType = VideoType::kI420;
+ } else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG) {
+ cap.videoType = VideoType::kMJPEG;
+ } else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY) {
+ cap.videoType = VideoType::kUYVY;
+ }
+
+ // get fps of current camera mode
+ // V4l2 does not have a stable method of knowing so we just guess.
+ if (cap.width >= 800 && cap.videoType != VideoType::kMJPEG) {
+ cap.maxFPS = 15;
+ } else {
+ cap.maxFPS = 30;
+ }
+
+ _captureCapabilities.push_back(cap);
+ RTC_LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
+ << " height:" << cap.height
+ << " type:" << static_cast<int32_t>(cap.videoType)
+ << " fps:" << cap.maxFPS;
+ }
+ }
+ }
+ }
+
+ RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
+ return _captureCapabilities.size();
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.h b/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.h
new file mode 100644
index 0000000000..0bec3eb765
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
+#define MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
+
+#include <stdint.h>
+
+#include "modules/video_capture/device_info_impl.h"
+
+#include "rtc_base/platform_thread.h"
+#ifdef WEBRTC_LINUX
+#include <sys/inotify.h>
+#endif
+
+struct v4l2_capability;
+
+namespace webrtc {
+namespace videocapturemodule {
+class DeviceInfoV4l2 : public DeviceInfoImpl {
+ public:
+ DeviceInfoV4l2();
+ ~DeviceInfoV4l2() override;
+ uint32_t NumberOfDevices() override;
+ int32_t GetDeviceName(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8 = 0,
+ uint32_t productUniqueIdUTF8Length = 0,
+ pid_t* pid=0) override;
+ /*
+ * Fills the membervariable _captureCapabilities with capabilites for the
+ * given device name.
+ */
+ int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
+ int32_t DisplayCaptureSettingsDialogBox(const char* /*deviceUniqueIdUTF8*/,
+ const char* /*dialogTitleUTF8*/,
+ void* /*parentWindow*/,
+ uint32_t /*positionX*/,
+ uint32_t /*positionY*/) override;
+ int32_t FillCapabilities(int fd) RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
+ int32_t Init() override;
+
+ private:
+ bool IsDeviceNameMatches(const char* name, const char* deviceUniqueIdUTF8);
+ bool IsVideoCaptureDevice(struct v4l2_capability* cap);
+
+#ifdef WEBRTC_LINUX
+ void HandleEvent(inotify_event* event, int fd);
+ int EventCheck(int fd);
+ int HandleEvents(int fd);
+ int ProcessInotifyEvents();
+ rtc::PlatformThread _inotifyEventThread;
+ void InotifyProcess();
+ int _fd_v4l, _fd_dev, _wd_v4l, _wd_dev; /* accessed on InotifyEventThread thread */
+ std::atomic<bool> _isShutdown;
+#endif
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
diff --git a/third_party/libwebrtc/modules/video_capture/linux/video_capture_linux.cc b/third_party/libwebrtc/modules/video_capture/linux/video_capture_linux.cc
new file mode 100644
index 0000000000..f3324a8e68
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/linux/video_capture_linux.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/select.h>
+#include <time.h>
+#include <unistd.h>
+// v4l includes
+#if defined(__NetBSD__) || defined(__OpenBSD__) // WEBRTC_BSD
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include <new>
+#include <string>
+
+#include "api/scoped_refptr.h"
+#include "media/base/video_common.h"
+#include "modules/video_capture/linux/video_capture_v4l2.h"
+#include "modules/video_capture/video_capture.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+ const char* deviceUniqueId) {
+ auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
+
+ if (implementation->Init(deviceUniqueId) != 0)
+ return nullptr;
+
+ return implementation;
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc
new file mode 100644
index 0000000000..d8b9351227
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/linux/video_capture_v4l2.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/select.h>
+#include <time.h>
+#include <unistd.h>
+// v4l includes
+#if defined(__NetBSD__) || defined(__OpenBSD__) // WEBRTC_BSD
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include <new>
+#include <string>
+
+#include "api/scoped_refptr.h"
+#include "media/base/video_common.h"
+#include "modules/video_capture/video_capture.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
+ : VideoCaptureImpl(),
+ _deviceId(-1),
+ _deviceFd(-1),
+ _buffersAllocatedByDevice(-1),
+ _currentWidth(-1),
+ _currentHeight(-1),
+ _currentFrameRate(-1),
+ _captureStarted(false),
+ _captureVideoType(VideoType::kI420),
+ _pool(NULL) {}
+
+int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
+ int len = strlen((const char*)deviceUniqueIdUTF8);
+ _deviceUniqueId = new (std::nothrow) char[len + 1];
+ if (_deviceUniqueId) {
+ memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
+ }
+
+ int device_index;
+ if (sscanf(deviceUniqueIdUTF8,"fake_%d", &device_index) == 1)
+ {
+ _deviceId = device_index;
+ return 0;
+ }
+
+ int fd;
+ char device[32];
+ bool found = false;
+
+ /* detect /dev/video [0-63] entries */
+ int n;
+ for (n = 0; n < 64; n++) {
+ snprintf(device, sizeof(device), "/dev/video%d", n);
+ if ((fd = open(device, O_RDONLY)) != -1) {
+ // query device capabilities
+ struct v4l2_capability cap;
+ if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
+ if (cap.bus_info[0] != 0) {
+ if (strncmp((const char*)cap.bus_info,
+ (const char*)deviceUniqueIdUTF8,
+ strlen((const char*)deviceUniqueIdUTF8)) ==
+ 0) { // match with device id
+ close(fd);
+ found = true;
+ break; // fd matches with device unique id supplied
+ }
+ }
+ }
+ close(fd); // close since this is not the matching device
+ }
+ }
+ if (!found) {
+ RTC_LOG(LS_INFO) << "no matching device found";
+ return -1;
+ }
+ _deviceId = n; // store the device id
+ return 0;
+}
+
+VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2() {
+ StopCapture();
+ if (_deviceFd != -1)
+ close(_deviceFd);
+}
+
+int32_t VideoCaptureModuleV4L2::StartCapture(
+ const VideoCaptureCapability& capability) {
+ if (_captureStarted) {
+ if (capability.width == _currentWidth &&
+ capability.height == _currentHeight &&
+ _captureVideoType == capability.videoType) {
+ return 0;
+ } else {
+ StopCapture();
+ }
+ }
+
+ MutexLock lock(&capture_lock_);
+ // first open /dev/video device
+ char device[20];
+ snprintf(device, sizeof(device), "/dev/video%d", _deviceId);
+
+ if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0) {
+ RTC_LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
+ return -1;
+ }
+
+ // Supported video formats in preferred order.
+ // If the requested resolution is larger than VGA, we prefer MJPEG. Go for
+ // I420 otherwise.
+ const int nFormats = 5;
+ unsigned int fmts[nFormats];
+ if (capability.width > 640 || capability.height > 480) {
+ fmts[0] = V4L2_PIX_FMT_MJPEG;
+ fmts[1] = V4L2_PIX_FMT_YUV420;
+ fmts[2] = V4L2_PIX_FMT_YUYV;
+ fmts[3] = V4L2_PIX_FMT_UYVY;
+ fmts[4] = V4L2_PIX_FMT_JPEG;
+ } else {
+ fmts[0] = V4L2_PIX_FMT_YUV420;
+ fmts[1] = V4L2_PIX_FMT_YUYV;
+ fmts[2] = V4L2_PIX_FMT_UYVY;
+ fmts[3] = V4L2_PIX_FMT_MJPEG;
+ fmts[4] = V4L2_PIX_FMT_JPEG;
+ }
+
+ // Enumerate image formats.
+ struct v4l2_fmtdesc fmt;
+ int fmtsIdx = nFormats;
+ memset(&fmt, 0, sizeof(fmt));
+ fmt.index = 0;
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ RTC_LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
+ while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
+ RTC_LOG(LS_INFO) << " { pixelformat = "
+ << cricket::GetFourccName(fmt.pixelformat)
+ << ", description = '" << fmt.description << "' }";
+ // Match the preferred order.
+ for (int i = 0; i < nFormats; i++) {
+ if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
+ fmtsIdx = i;
+ }
+ // Keep enumerating.
+ fmt.index++;
+ }
+
+ if (fmtsIdx == nFormats) {
+ RTC_LOG(LS_INFO) << "no supporting video formats found";
+ return -1;
+ } else {
+ RTC_LOG(LS_INFO) << "We prefer format "
+ << cricket::GetFourccName(fmts[fmtsIdx]);
+ }
+
+ struct v4l2_format video_fmt;
+ memset(&video_fmt, 0, sizeof(struct v4l2_format));
+ video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ video_fmt.fmt.pix.sizeimage = 0;
+ video_fmt.fmt.pix.width = capability.width;
+ video_fmt.fmt.pix.height = capability.height;
+ video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
+
+ if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
+ _captureVideoType = VideoType::kYUY2;
+ else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
+ _captureVideoType = VideoType::kI420;
+ else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
+ _captureVideoType = VideoType::kUYVY;
+ else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
+ video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
+ _captureVideoType = VideoType::kMJPEG;
+
+ // set format and frame size now
+ if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0) {
+ RTC_LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
+ return -1;
+ }
+
+ // initialize current width and height
+ _currentWidth = video_fmt.fmt.pix.width;
+ _currentHeight = video_fmt.fmt.pix.height;
+
+ // Trying to set frame rate, before check driver capability.
+ bool driver_framerate_support = true;
+ struct v4l2_streamparm streamparms;
+ memset(&streamparms, 0, sizeof(streamparms));
+ streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
+ RTC_LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
+ driver_framerate_support = false;
+ // continue
+ } else {
+ // check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
+ if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
+ // driver supports the feature. Set required framerate.
+ memset(&streamparms, 0, sizeof(streamparms));
+ streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ streamparms.parm.capture.timeperframe.numerator = 1;
+ streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
+ if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
+ RTC_LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
+ driver_framerate_support = false;
+ } else {
+ _currentFrameRate = capability.maxFPS;
+ }
+ }
+ }
+ // If driver doesn't support framerate control, need to hardcode.
+ // Hardcoding the value based on the frame size.
+ if (!driver_framerate_support) {
+ if (_currentWidth >= 800 && _captureVideoType != VideoType::kMJPEG) {
+ _currentFrameRate = 15;
+ } else {
+ _currentFrameRate = 30;
+ }
+ }
+
+ if (!AllocateVideoBuffers()) {
+ RTC_LOG(LS_INFO) << "failed to allocate video capture buffers";
+ return -1;
+ }
+
+ // start capture thread;
+ if (_captureThread.empty()) {
+ quit_ = false;
+ _captureThread = rtc::PlatformThread::SpawnJoinable(
+ [self = rtc::scoped_refptr(this)] {
+ while (self->CaptureProcess()) {
+ }
+ },
+ "CaptureThread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kHigh));
+ }
+
+ // Needed to start UVC camera - from the uvcview application
+ enum v4l2_buf_type type;
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1) {
+ RTC_LOG(LS_INFO) << "Failed to turn on stream";
+ return -1;
+ }
+
+ _captureStarted = true;
+ return 0;
+}
+
+int32_t VideoCaptureModuleV4L2::StopCapture() {
+ if (!_captureThread.empty()) {
+ {
+ MutexLock lock(&capture_lock_);
+ quit_ = true;
+ }
+ // Make sure the capture thread stops using the mutex.
+ _captureThread.Finalize();
+ }
+
+ MutexLock lock(&capture_lock_);
+ if (_captureStarted) {
+ _captureStarted = false;
+
+ DeAllocateVideoBuffers();
+ close(_deviceFd);
+ _deviceFd = -1;
+ }
+
+ return 0;
+}
+
+// critical section protected by the caller
+
+bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
+ struct v4l2_requestbuffers rbuffer;
+ memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
+
+ rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ rbuffer.memory = V4L2_MEMORY_MMAP;
+ rbuffer.count = kNoOfV4L2Bufffers;
+
+ if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0) {
+ RTC_LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
+ return false;
+ }
+
+ if (rbuffer.count > kNoOfV4L2Bufffers)
+ rbuffer.count = kNoOfV4L2Bufffers;
+
+ _buffersAllocatedByDevice = rbuffer.count;
+
+ // Map the buffers
+ _pool = new Buffer[rbuffer.count];
+
+ for (unsigned int i = 0; i < rbuffer.count; i++) {
+ struct v4l2_buffer buffer;
+ memset(&buffer, 0, sizeof(v4l2_buffer));
+ buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buffer.memory = V4L2_MEMORY_MMAP;
+ buffer.index = i;
+
+ if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0) {
+ return false;
+ }
+
+ _pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, _deviceFd, buffer.m.offset);
+
+ if (MAP_FAILED == _pool[i].start) {
+ for (unsigned int j = 0; j < i; j++)
+ munmap(_pool[j].start, _pool[j].length);
+ return false;
+ }
+
+ _pool[i].length = buffer.length;
+
+ if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
+ // unmap buffers
+ for (int i = 0; i < _buffersAllocatedByDevice; i++)
+ munmap(_pool[i].start, _pool[i].length);
+
+ delete[] _pool;
+
+ // turn off stream
+ enum v4l2_buf_type type;
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0) {
+ RTC_LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
+ }
+
+ return true;
+}
+
+bool VideoCaptureModuleV4L2::CaptureStarted() {
+ return _captureStarted;
+}
+
+bool VideoCaptureModuleV4L2::CaptureProcess() {
+ int retVal = 0;
+ struct pollfd rSet;
+
+ rSet.fd = _deviceFd;
+ rSet.events = POLLIN;
+ rSet.revents = 0;
+
+ retVal = poll(&rSet, 1, 1000);
+
+ {
+ MutexLock lock(&capture_lock_);
+
+ if (quit_) {
+ return false;
+ }
+
+ if (retVal < 0 && errno != EINTR) { // continue if interrupted
+ // poll failed
+ return false;
+ } else if (retVal == 0) {
+ // poll timed out
+ return true;
+ } else if (!(rSet.revents & POLLIN)) {
+ // not event on camera handle
+ return true;
+ }
+
+ if (_captureStarted) {
+ struct v4l2_buffer buf;
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ // dequeue a buffer - repeat until dequeued properly!
+ while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0) {
+ if (errno != EINTR) {
+ RTC_LOG(LS_INFO) << "could not sync on a buffer on device "
+ << strerror(errno);
+ return true;
+ }
+ }
+ VideoCaptureCapability frameInfo;
+ frameInfo.width = _currentWidth;
+ frameInfo.height = _currentHeight;
+ frameInfo.videoType = _captureVideoType;
+
+ // convert to to I420 if needed
+ IncomingFrame(reinterpret_cast<uint8_t*>(_pool[buf.index].start),
+ buf.bytesused, frameInfo);
+ // enqueue the buffer again
+ if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1) {
+ RTC_LOG(LS_INFO) << "Failed to enqueue capture buffer";
+ }
+ }
+ }
+ usleep(0);
+ return true;
+}
+
+int32_t VideoCaptureModuleV4L2::CaptureSettings(
+ VideoCaptureCapability& settings) {
+ settings.width = _currentWidth;
+ settings.height = _currentHeight;
+ settings.maxFPS = _currentFrameRate;
+ settings.videoType = _captureVideoType;
+
+ return 0;
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h
new file mode 100644
index 0000000000..65e89e2daa
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
+#define MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "modules/video_capture/video_capture_defines.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+class VideoCaptureModuleV4L2 : public VideoCaptureImpl {
+ public:
+ VideoCaptureModuleV4L2();
+ ~VideoCaptureModuleV4L2() override;
+ int32_t Init(const char* deviceUniqueId);
+ int32_t StartCapture(const VideoCaptureCapability& capability) override;
+ int32_t StopCapture() override;
+ bool CaptureStarted() override;
+ int32_t CaptureSettings(VideoCaptureCapability& settings) override;
+
+ private:
+ enum { kNoOfV4L2Bufffers = 4 };
+
+ static void CaptureThread(void*);
+ bool CaptureProcess();
+ bool AllocateVideoBuffers();
+ bool DeAllocateVideoBuffers();
+
+ rtc::PlatformThread _captureThread;
+ Mutex capture_lock_;
+ bool quit_ RTC_GUARDED_BY(capture_lock_);
+ int32_t _deviceId;
+ int32_t _deviceFd;
+
+ int32_t _buffersAllocatedByDevice;
+ int32_t _currentWidth;
+ int32_t _currentHeight;
+ int32_t _currentFrameRate;
+ bool _captureStarted;
+ VideoType _captureVideoType;
+ struct Buffer {
+ void* start;
+ size_t length;
+ };
+ Buffer* _pool;
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
diff --git a/third_party/libwebrtc/modules/video_capture/test/video_capture_unittest.cc b/third_party/libwebrtc/modules/video_capture/test/video_capture_unittest.cc
new file mode 100644
index 0000000000..4cf3d5931c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/test/video_capture_unittest.cc
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/video_capture.h"
+
+#include <stdio.h>
+
+#include <map>
+#include <memory>
+#include <sstream>
+
+#include "absl/memory/memory.h"
+#include "api/scoped_refptr.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_capture/video_capture_factory.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/frame_utils.h"
+#include "test/gtest.h"
+
+using webrtc::SleepMs;
+using webrtc::VideoCaptureCapability;
+using webrtc::VideoCaptureFactory;
+using webrtc::VideoCaptureModule;
+
+#define WAIT_(ex, timeout, res) \
+ do { \
+ res = (ex); \
+ int64_t start = rtc::TimeMillis(); \
+ while (!res && rtc::TimeMillis() < start + timeout) { \
+ SleepMs(5); \
+ res = (ex); \
+ } \
+ } while (0)
+
+#define EXPECT_TRUE_WAIT(ex, timeout) \
+ do { \
+ bool res; \
+ WAIT_(ex, timeout, res); \
+ if (!res) \
+ EXPECT_TRUE(ex); \
+ } while (0)
+
+static const int kTimeOut = 5000;
+#ifdef WEBRTC_MAC
+static const int kTestHeight = 288;
+static const int kTestWidth = 352;
+static const int kTestFramerate = 30;
+#endif
+
+class TestVideoCaptureCallback
+ : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ TestVideoCaptureCallback()
+ : last_render_time_ms_(0),
+ incoming_frames_(0),
+ timing_warnings_(0),
+ rotate_frame_(webrtc::kVideoRotation_0) {}
+
+ ~TestVideoCaptureCallback() override {
+ if (timing_warnings_ > 0)
+ printf("No of timing warnings %d\n", timing_warnings_);
+ }
+
+ void OnFrame(const webrtc::VideoFrame& videoFrame) override {
+ webrtc::MutexLock lock(&capture_lock_);
+ int height = videoFrame.height();
+ int width = videoFrame.width();
+#if defined(WEBRTC_ANDROID) && WEBRTC_ANDROID
+ // Android camera frames may be rotated depending on test device
+ // orientation.
+ EXPECT_TRUE(height == capability_.height || height == capability_.width);
+ EXPECT_TRUE(width == capability_.width || width == capability_.height);
+#else
+ EXPECT_EQ(height, capability_.height);
+ EXPECT_EQ(width, capability_.width);
+ EXPECT_EQ(rotate_frame_, videoFrame.rotation());
+#endif
+ // RenderTimstamp should be the time now.
+ EXPECT_TRUE(videoFrame.render_time_ms() >= rtc::TimeMillis() - 30 &&
+ videoFrame.render_time_ms() <= rtc::TimeMillis());
+
+ if ((videoFrame.render_time_ms() >
+ last_render_time_ms_ + (1000 * 1.1) / capability_.maxFPS &&
+ last_render_time_ms_ > 0) ||
+ (videoFrame.render_time_ms() <
+ last_render_time_ms_ + (1000 * 0.9) / capability_.maxFPS &&
+ last_render_time_ms_ > 0)) {
+ timing_warnings_++;
+ }
+
+ incoming_frames_++;
+ last_render_time_ms_ = videoFrame.render_time_ms();
+ last_frame_ = videoFrame.video_frame_buffer();
+ }
+
+ void SetExpectedCapability(VideoCaptureCapability capability) {
+ webrtc::MutexLock lock(&capture_lock_);
+ capability_ = capability;
+ incoming_frames_ = 0;
+ last_render_time_ms_ = 0;
+ }
+ int incoming_frames() {
+ webrtc::MutexLock lock(&capture_lock_);
+ return incoming_frames_;
+ }
+
+ int timing_warnings() {
+ webrtc::MutexLock lock(&capture_lock_);
+ return timing_warnings_;
+ }
+ VideoCaptureCapability capability() {
+ webrtc::MutexLock lock(&capture_lock_);
+ return capability_;
+ }
+
+ bool CompareLastFrame(const webrtc::VideoFrame& frame) {
+ webrtc::MutexLock lock(&capture_lock_);
+ return webrtc::test::FrameBufsEqual(last_frame_,
+ frame.video_frame_buffer());
+ }
+
+ void SetExpectedCaptureRotation(webrtc::VideoRotation rotation) {
+ webrtc::MutexLock lock(&capture_lock_);
+ rotate_frame_ = rotation;
+ }
+
+ private:
+ webrtc::Mutex capture_lock_;
+ VideoCaptureCapability capability_;
+ int64_t last_render_time_ms_;
+ int incoming_frames_;
+ int timing_warnings_;
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> last_frame_;
+ webrtc::VideoRotation rotate_frame_;
+};
+
+class VideoCaptureTest : public ::testing::Test {
+ public:
+ VideoCaptureTest() : number_of_devices_(0) {}
+
+ void SetUp() override {
+ device_info_.reset(VideoCaptureFactory::CreateDeviceInfo());
+ RTC_DCHECK(device_info_.get());
+ number_of_devices_ = device_info_->NumberOfDevices();
+ ASSERT_GT(number_of_devices_, 0u);
+ }
+
+ rtc::scoped_refptr<VideoCaptureModule> OpenVideoCaptureDevice(
+ unsigned int device,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* callback) {
+ char device_name[256];
+ char unique_name[256];
+
+ EXPECT_EQ(0, device_info_->GetDeviceName(device, device_name, 256,
+ unique_name, 256));
+
+ rtc::scoped_refptr<VideoCaptureModule> module(
+ VideoCaptureFactory::Create(unique_name));
+ if (module.get() == NULL)
+ return nullptr;
+
+ EXPECT_FALSE(module->CaptureStarted());
+
+ module->RegisterCaptureDataCallback(callback);
+ return module;
+ }
+
+ void StartCapture(VideoCaptureModule* capture_module,
+ VideoCaptureCapability capability) {
+ ASSERT_EQ(0, capture_module->StartCapture(capability));
+ EXPECT_TRUE(capture_module->CaptureStarted());
+
+ VideoCaptureCapability resulting_capability;
+ EXPECT_EQ(0, capture_module->CaptureSettings(resulting_capability));
+ EXPECT_EQ(capability.width, resulting_capability.width);
+ EXPECT_EQ(capability.height, resulting_capability.height);
+ }
+
+ std::unique_ptr<VideoCaptureModule::DeviceInfo> device_info_;
+ unsigned int number_of_devices_;
+};
+
+#ifdef WEBRTC_MAC
+// Currently fails on Mac 64-bit, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5406
+#define MAYBE_CreateDelete DISABLED_CreateDelete
+#else
+#define MAYBE_CreateDelete CreateDelete
+#endif
+TEST_F(VideoCaptureTest, MAYBE_CreateDelete) {
+ for (int i = 0; i < 5; ++i) {
+ int64_t start_time = rtc::TimeMillis();
+ TestVideoCaptureCallback capture_observer;
+ rtc::scoped_refptr<VideoCaptureModule> module(
+ OpenVideoCaptureDevice(0, &capture_observer));
+ ASSERT_TRUE(module.get() != NULL);
+
+ VideoCaptureCapability capability;
+#ifndef WEBRTC_MAC
+ device_info_->GetCapability(module->CurrentDeviceName(), 0, capability);
+#else
+ capability.width = kTestWidth;
+ capability.height = kTestHeight;
+ capability.maxFPS = kTestFramerate;
+ capability.videoType = webrtc::VideoType::kUnknown;
+#endif
+ capture_observer.SetExpectedCapability(capability);
+ ASSERT_NO_FATAL_FAILURE(StartCapture(module.get(), capability));
+
+ // Less than 4s to start the camera.
+ EXPECT_LE(rtc::TimeMillis() - start_time, 4000);
+
+ // Make sure 5 frames are captured.
+ EXPECT_TRUE_WAIT(capture_observer.incoming_frames() >= 5, kTimeOut);
+
+ int64_t stop_time = rtc::TimeMillis();
+ EXPECT_EQ(0, module->StopCapture());
+ EXPECT_FALSE(module->CaptureStarted());
+
+ // Less than 3s to stop the camera.
+ EXPECT_LE(rtc::TimeMillis() - stop_time, 3000);
+ }
+}
+
+#ifdef WEBRTC_MAC
+// Currently fails on Mac 64-bit, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5406
+#define MAYBE_Capabilities DISABLED_Capabilities
+#else
+#define MAYBE_Capabilities Capabilities
+#endif
+TEST_F(VideoCaptureTest, MAYBE_Capabilities) {
+ TestVideoCaptureCallback capture_observer;
+
+ rtc::scoped_refptr<VideoCaptureModule> module(
+ OpenVideoCaptureDevice(0, &capture_observer));
+ ASSERT_TRUE(module.get() != NULL);
+
+ int number_of_capabilities =
+ device_info_->NumberOfCapabilities(module->CurrentDeviceName());
+ EXPECT_GT(number_of_capabilities, 0);
+ // Key is <width>x<height>, value is vector of maxFPS values at that
+ // resolution.
+ typedef std::map<std::string, std::vector<int> > FrameRatesByResolution;
+ FrameRatesByResolution frame_rates_by_resolution;
+ for (int i = 0; i < number_of_capabilities; ++i) {
+ VideoCaptureCapability capability;
+ EXPECT_EQ(0, device_info_->GetCapability(module->CurrentDeviceName(), i,
+ capability));
+ std::ostringstream resolutionStream;
+ resolutionStream << capability.width << "x" << capability.height;
+ resolutionStream.flush();
+ std::string resolution = resolutionStream.str();
+ frame_rates_by_resolution[resolution].push_back(capability.maxFPS);
+
+ // Since Android presents so many resolution/FPS combinations and the test
+ // runner imposes a timeout, we only actually start the capture and test
+ // that a frame was captured for 2 frame-rates at each resolution.
+ if (frame_rates_by_resolution[resolution].size() > 2)
+ continue;
+
+ capture_observer.SetExpectedCapability(capability);
+ ASSERT_NO_FATAL_FAILURE(StartCapture(module.get(), capability));
+ // Make sure at least one frame is captured.
+ EXPECT_TRUE_WAIT(capture_observer.incoming_frames() >= 1, kTimeOut);
+
+ EXPECT_EQ(0, module->StopCapture());
+ }
+
+#if defined(WEBRTC_ANDROID) && WEBRTC_ANDROID
+ // There's no reason for this to _necessarily_ be true, but in practice all
+ // Android devices this test runs on in fact do support multiple capture
+ // resolutions and multiple frame-rates per captured resolution, so we assert
+ // this fact here as a regression-test against the time that we only noticed a
+ // single frame-rate per resolution (bug 2974). If this test starts being run
+ // on devices for which this is untrue (e.g. Nexus4) then the following should
+ // probably be wrapped in a base::android::BuildInfo::model()/device() check.
+ EXPECT_GT(frame_rates_by_resolution.size(), 1U);
+ for (FrameRatesByResolution::const_iterator it =
+ frame_rates_by_resolution.begin();
+ it != frame_rates_by_resolution.end(); ++it) {
+ EXPECT_GT(it->second.size(), 1U) << it->first;
+ }
+#endif // WEBRTC_ANDROID
+}
+
+// NOTE: flaky, crashes sometimes.
+// http://code.google.com/p/webrtc/issues/detail?id=777
+TEST_F(VideoCaptureTest, DISABLED_TestTwoCameras) {
+ if (number_of_devices_ < 2) {
+ printf("There are not two cameras available. Aborting test. \n");
+ return;
+ }
+
+ TestVideoCaptureCallback capture_observer1;
+ rtc::scoped_refptr<VideoCaptureModule> module1(
+ OpenVideoCaptureDevice(0, &capture_observer1));
+ ASSERT_TRUE(module1.get() != NULL);
+ VideoCaptureCapability capability1;
+#ifndef WEBRTC_MAC
+ device_info_->GetCapability(module1->CurrentDeviceName(), 0, capability1);
+#else
+ capability1.width = kTestWidth;
+ capability1.height = kTestHeight;
+ capability1.maxFPS = kTestFramerate;
+ capability1.videoType = webrtc::VideoType::kUnknown;
+#endif
+ capture_observer1.SetExpectedCapability(capability1);
+
+ TestVideoCaptureCallback capture_observer2;
+ rtc::scoped_refptr<VideoCaptureModule> module2(
+ OpenVideoCaptureDevice(1, &capture_observer2));
+ ASSERT_TRUE(module1.get() != NULL);
+
+ VideoCaptureCapability capability2;
+#ifndef WEBRTC_MAC
+ device_info_->GetCapability(module2->CurrentDeviceName(), 0, capability2);
+#else
+ capability2.width = kTestWidth;
+ capability2.height = kTestHeight;
+ capability2.maxFPS = kTestFramerate;
+ capability2.videoType = webrtc::VideoType::kUnknown;
+#endif
+ capture_observer2.SetExpectedCapability(capability2);
+
+ ASSERT_NO_FATAL_FAILURE(StartCapture(module1.get(), capability1));
+ ASSERT_NO_FATAL_FAILURE(StartCapture(module2.get(), capability2));
+ EXPECT_TRUE_WAIT(capture_observer1.incoming_frames() >= 5, kTimeOut);
+ EXPECT_TRUE_WAIT(capture_observer2.incoming_frames() >= 5, kTimeOut);
+ EXPECT_EQ(0, module2->StopCapture());
+ EXPECT_EQ(0, module1->StopCapture());
+}
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture.h b/third_party/libwebrtc/modules/video_capture/video_capture.h
new file mode 100644
index 0000000000..f74491252f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
+#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
+
+#include "api/video/video_rotation.h"
+#include "api/video/video_sink_interface.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include <set>
+
+#if defined(ANDROID)
+#include <jni.h>
+#endif
+
+namespace webrtc {
+
+class VideoInputFeedBack
+{
+public:
+ virtual void OnDeviceChange() = 0;
+protected:
+ virtual ~VideoInputFeedBack(){}
+};
+
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
+ int32_t SetCaptureAndroidVM(JavaVM* javaVM);
+#endif
+
+class VideoCaptureModule : public rtc::RefCountInterface {
+ public:
+ // Interface for receiving information about available camera devices.
+ class DeviceInfo {
+ public:
+ virtual uint32_t NumberOfDevices() = 0;
+ virtual int32_t Refresh() = 0;
+ virtual void DeviceChange() {
+ MutexLock lock(&_inputCallbacksMutex);
+ for (auto inputCallBack : _inputCallBacks) {
+ inputCallBack->OnDeviceChange();
+ }
+ }
+ virtual void RegisterVideoInputFeedBack(VideoInputFeedBack* callBack) {
+ MutexLock lock(&_inputCallbacksMutex);
+ _inputCallBacks.insert(callBack);
+ }
+
+ virtual void DeRegisterVideoInputFeedBack(VideoInputFeedBack* callBack) {
+ MutexLock lock(&_inputCallbacksMutex);
+ auto it = _inputCallBacks.find(callBack);
+ if (it != _inputCallBacks.end()) {
+ _inputCallBacks.erase(it);
+ }
+ }
+
+ // Returns the available capture devices.
+ // deviceNumber - Index of capture device.
+ // deviceNameUTF8 - Friendly name of the capture device.
+ // deviceUniqueIdUTF8 - Unique name of the capture device if it exist.
+ // Otherwise same as deviceNameUTF8.
+ // productUniqueIdUTF8 - Unique product id if it exist.
+ // Null terminated otherwise.
+ virtual int32_t GetDeviceName(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8 = 0,
+ uint32_t productUniqueIdUTF8Length = 0,
+ pid_t* pid = 0) = 0;
+
+ // Returns the number of capabilities this device.
+ virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) = 0;
+
+ // Gets the capabilities of the named device.
+ virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
+ uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) = 0;
+
+ // Gets clockwise angle the captured frames should be rotated in order
+ // to be displayed correctly on a normally rotated display.
+ virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) = 0;
+
+ // Gets the capability that best matches the requested width, height and
+ // frame rate.
+ // Returns the deviceCapabilityNumber on success.
+ virtual int32_t GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8,
+ const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) = 0;
+
+ // Display OS /capture device specific settings dialog
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8,
+ const char* dialogTitleUTF8,
+ void* parentWindow,
+ uint32_t positionX,
+ uint32_t positionY) = 0;
+
+ virtual ~DeviceInfo() {}
+ private:
+ Mutex _inputCallbacksMutex;
+ std::set<VideoInputFeedBack*> _inputCallBacks RTC_GUARDED_BY(_inputCallbacksMutex);
+ };
+
+ // Register capture data callback
+ virtual void RegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) = 0;
+
+ // Remove capture data callback
+ virtual void DeRegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame> *dataCallback) = 0;
+
+ // Start capture device
+ virtual int32_t StartCapture(const VideoCaptureCapability& capability) = 0;
+
+ virtual int32_t StopCaptureIfAllClientsClose() = 0;
+
+ virtual bool FocusOnSelectedSource() { return false; }
+
+ virtual int32_t StopCapture() = 0;
+
+ // Returns the name of the device used by this module.
+ virtual const char* CurrentDeviceName() const = 0;
+
+ // Returns true if the capture device is running
+ virtual bool CaptureStarted() = 0;
+
+ // Gets the current configuration.
+ virtual int32_t CaptureSettings(VideoCaptureCapability& settings) = 0;
+
+ // Set the rotation of the captured frames.
+ // If the rotation is set to the same as returned by
+ // DeviceInfo::GetOrientation the captured frames are
+ // displayed correctly if rendered.
+ virtual int32_t SetCaptureRotation(VideoRotation rotation) = 0;
+
+ // Tells the capture module whether to apply the pending rotation. By default,
+ // the rotation is applied and the generated frame is up right. When set to
+ // false, generated frames will carry the rotation information from
+ // SetCaptureRotation. Return value indicates whether this operation succeeds.
+ virtual bool SetApplyRotation(bool enable) = 0;
+
+ // Return whether the rotation is applied or left pending.
+ virtual bool GetApplyRotation() = 0;
+
+ // Mozilla: TrackingId setter for use in profiler markers.
+ virtual void SetTrackingId(uint32_t aTrackingIdProcId) {}
+
+ protected:
+ ~VideoCaptureModule() override {}
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_config.h b/third_party/libwebrtc/modules/video_capture/video_capture_config.h
new file mode 100644
index 0000000000..f285b9eeb1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_config.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_CONFIG_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_CONFIG_H_
+
+namespace webrtc {
+namespace videocapturemodule {
+enum { kDefaultWidth = 640 }; // Start width
+enum { kDefaultHeight = 480 }; // Start heigt
+enum { kDefaultFrameRate = 30 }; // Start frame rate
+
+enum { kMaxFrameRate = 60 }; // Max allowed frame rate of the start image
+
+enum { kDefaultCaptureDelay = 120 };
+enum {
+ kMaxCaptureDelay = 270
+}; // Max capture delay allowed in the precompiled capture delay values.
+
+enum { kFrameRateCallbackInterval = 1000 };
+enum { kFrameRateCountHistorySize = 90 };
+enum { kFrameRateHistoryWindowMs = 2000 };
+} // namespace videocapturemodule
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_defines.h b/third_party/libwebrtc/modules/video_capture/video_capture_defines.h
new file mode 100644
index 0000000000..23e29af74f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_defines.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
+#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
+
+#include "api/video/video_frame.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+
+namespace webrtc {
+
+enum {
+ kVideoCaptureUniqueNameLength = 1024
+}; // Max unique capture device name lenght
+enum { kVideoCaptureDeviceNameLength = 256 }; // Max capture device name lenght
+enum { kVideoCaptureProductIdLength = 128 }; // Max product id length
+
+struct VideoCaptureCapability {
+ int32_t width;
+ int32_t height;
+ int32_t maxFPS;
+ VideoType videoType;
+ bool interlaced;
+
+ VideoCaptureCapability() {
+ width = 0;
+ height = 0;
+ maxFPS = 0;
+ videoType = VideoType::kUnknown;
+ interlaced = false;
+ }
+ bool operator!=(const VideoCaptureCapability& other) const {
+ if (width != other.width)
+ return true;
+ if (height != other.height)
+ return true;
+ if (maxFPS != other.maxFPS)
+ return true;
+ if (videoType != other.videoType)
+ return true;
+ if (interlaced != other.interlaced)
+ return true;
+ return false;
+ }
+ bool operator==(const VideoCaptureCapability& other) const {
+ return !operator!=(other);
+ }
+};
+
+// Callback class to be implemented by module user
+class VideoCaptureDataCallback
+{
+public:
+ virtual void OnIncomingCapturedFrame(const int32_t id,
+ const VideoFrame& videoFrame) = 0;
+ virtual void OnCaptureDelayChanged(const int32_t id,
+ const int32_t delay) = 0;
+protected:
+ virtual ~VideoCaptureDataCallback() {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_factory.cc b/third_party/libwebrtc/modules/video_capture/video_capture_factory.cc
new file mode 100644
index 0000000000..e4a46902e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_factory.cc
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/video_capture_factory.h"
+
+#include "modules/video_capture/video_capture_impl.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
+ const char* deviceUniqueIdUTF8) {
+ return videocapturemodule::VideoCaptureImpl::Create(deviceUniqueIdUTF8);
+}
+
+VideoCaptureModule::DeviceInfo* VideoCaptureFactory::CreateDeviceInfo() {
+ return videocapturemodule::VideoCaptureImpl::CreateDeviceInfo();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_factory.h b/third_party/libwebrtc/modules/video_capture/video_capture_factory.h
new file mode 100644
index 0000000000..1fe47d9fab
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_factory.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains interfaces used for creating the VideoCaptureModule
+// and DeviceInfo.
+
+#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
+#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
+
+#include "api/scoped_refptr.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_defines.h"
+
+namespace webrtc {
+
+class VideoCaptureFactory {
+ public:
+ // Create a video capture module object
+ // id - unique identifier of this video capture module object.
+ // deviceUniqueIdUTF8 - name of the device.
+ // Available names can be found by using GetDeviceName
+ static rtc::scoped_refptr<VideoCaptureModule> Create(
+ const char* deviceUniqueIdUTF8);
+
+ static VideoCaptureModule::DeviceInfo* CreateDeviceInfo();
+
+ private:
+ ~VideoCaptureFactory();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_factory_null.cc b/third_party/libwebrtc/modules/video_capture/video_capture_factory_null.cc
new file mode 100644
index 0000000000..7808d19851
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_factory_null.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/video_capture_impl.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+
+// static
+VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
+ return nullptr;
+}
+
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+ const char* device_id) {
+ return nullptr;
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_impl.cc b/third_party/libwebrtc/modules/video_capture/video_capture_impl.cc
new file mode 100644
index 0000000000..6c0b45f660
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_impl.cc
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/video_capture_impl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_capture/video_capture_config.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "libyuv/include/libyuv.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+
+const char* VideoCaptureImpl::CurrentDeviceName() const {
+ return _deviceUniqueId;
+}
+
+// static
+int32_t VideoCaptureImpl::RotationFromDegrees(int degrees,
+ VideoRotation* rotation) {
+ switch (degrees) {
+ case 0:
+ *rotation = kVideoRotation_0;
+ return 0;
+ case 90:
+ *rotation = kVideoRotation_90;
+ return 0;
+ case 180:
+ *rotation = kVideoRotation_180;
+ return 0;
+ case 270:
+ *rotation = kVideoRotation_270;
+ return 0;
+ default:
+ return -1;
+ ;
+ }
+}
+
+// static
+int32_t VideoCaptureImpl::RotationInDegrees(VideoRotation rotation,
+ int* degrees) {
+ switch (rotation) {
+ case kVideoRotation_0:
+ *degrees = 0;
+ return 0;
+ case kVideoRotation_90:
+ *degrees = 90;
+ return 0;
+ case kVideoRotation_180:
+ *degrees = 180;
+ return 0;
+ case kVideoRotation_270:
+ *degrees = 270;
+ return 0;
+ }
+ return -1;
+}
+
+VideoCaptureImpl::VideoCaptureImpl()
+ : _deviceUniqueId(NULL),
+ _requestedCapability(),
+ _lastProcessTimeNanos(rtc::TimeNanos()),
+ _lastFrameRateCallbackTimeNanos(rtc::TimeNanos()),
+ _lastProcessFrameTimeNanos(rtc::TimeNanos()),
+ _rotateFrame(kVideoRotation_0),
+ apply_rotation_(false) {
+ _requestedCapability.width = kDefaultWidth;
+ _requestedCapability.height = kDefaultHeight;
+ _requestedCapability.maxFPS = 30;
+ _requestedCapability.videoType = VideoType::kI420;
+ memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
+}
+
+VideoCaptureImpl::~VideoCaptureImpl() {
+ if (_deviceUniqueId)
+ delete[] _deviceUniqueId;
+}
+
+void VideoCaptureImpl::RegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
+ MutexLock lock(&api_lock_);
+ _dataCallBacks.insert(dataCallBack);
+}
+
+void VideoCaptureImpl::DeRegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
+ MutexLock lock(&api_lock_);
+ auto it = _dataCallBacks.find(dataCallBack);
+ if (it != _dataCallBacks.end()) {
+ _dataCallBacks.erase(it);
+ }
+}
+
+int32_t VideoCaptureImpl::StopCaptureIfAllClientsClose() {
+ if (_dataCallBacks.empty()) {
+ return StopCapture();
+ } else {
+ return 0;
+ }
+}
+
+int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
+ UpdateFrameCount(); // frame count used for local frame rate callback.
+
+ for (auto dataCallBack : _dataCallBacks) {
+ dataCallBack->OnFrame(captureFrame);
+ }
+
+ return 0;
+}
+
+int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
+ size_t videoFrameLength,
+ const VideoCaptureCapability& frameInfo,
+ int64_t captureTime /*=0*/) {
+ MutexLock lock(&api_lock_);
+
+ const int32_t width = frameInfo.width;
+ const int32_t height = frameInfo.height;
+
+ TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
+
+ // Not encoded, convert to I420.
+ if (frameInfo.videoType != VideoType::kMJPEG &&
+ CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
+ videoFrameLength) {
+ RTC_LOG(LS_ERROR) << "Wrong incoming frame length.";
+ return -1;
+ }
+
+ int target_width = width;
+ int target_height = abs(height);
+
+ // SetApplyRotation doesn't take any lock. Make a local copy here.
+ bool apply_rotation = apply_rotation_;
+
+ if (apply_rotation &&
+ (_rotateFrame == kVideoRotation_90 ||
+ _rotateFrame == kVideoRotation_270)) {
+ target_width = abs(height);
+ target_height = width;
+ }
+
+ int stride_y = target_width;
+ int stride_uv = (target_width + 1) / 2;
+
+ // Setting absolute height (in case it was negative).
+ // In Windows, the image starts bottom left, instead of top left.
+ // Setting a negative source height, inverts the image (within LibYuv).
+ rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
+ target_width, target_height, stride_y, stride_uv, stride_uv);
+
+ libyuv::RotationMode rotation_mode = libyuv::kRotate0;
+ if (apply_rotation) {
+ switch (_rotateFrame) {
+ case kVideoRotation_0:
+ rotation_mode = libyuv::kRotate0;
+ break;
+ case kVideoRotation_90:
+ rotation_mode = libyuv::kRotate90;
+ break;
+ case kVideoRotation_180:
+ rotation_mode = libyuv::kRotate180;
+ break;
+ case kVideoRotation_270:
+ rotation_mode = libyuv::kRotate270;
+ break;
+ }
+ }
+
+ int dst_width = buffer->width();
+ int dst_height = buffer->height();
+
+ // LibYuv expects pre-rotation_mode values for dst.
+ // Stride values should correspond to the destination values.
+ if (rotation_mode == libyuv::kRotate90 || rotation_mode == libyuv::kRotate270) {
+ std::swap(dst_width, dst_height);
+ }
+
+ const int conversionResult = libyuv::ConvertToI420(
+ videoFrame, videoFrameLength, buffer.get()->MutableDataY(),
+ buffer.get()->StrideY(), buffer.get()->MutableDataU(),
+ buffer.get()->StrideU(), buffer.get()->MutableDataV(),
+ buffer.get()->StrideV(), 0, 0, // No Cropping
+ width, height, dst_width, dst_height, rotation_mode,
+ ConvertVideoType(frameInfo.videoType));
+ if (conversionResult != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to convert capture frame from type "
+ << static_cast<int>(frameInfo.videoType) << "to I420.";
+ return -1;
+ }
+
+ VideoFrame captureFrame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(0)
+ .set_timestamp_ms(rtc::TimeMillis())
+ .set_rotation(!apply_rotation ? _rotateFrame : kVideoRotation_0)
+ .build();
+ captureFrame.set_ntp_time_ms(captureTime);
+
+ // This is one ugly hack to let CamerasParent know what rotation
+ // the frame was captured at. Note that this goes against the intended
+ // meaning of rotation of the frame (how to rotate it before rendering).
+ // We do this so CamerasChild can scale to the proper dimensions
+ // later on in the pipe.
+ captureFrame.set_rotation(_rotateFrame);
+
+ DeliverCapturedFrame(captureFrame);
+
+ return 0;
+}
+
+int32_t VideoCaptureImpl::StartCapture(
+ const VideoCaptureCapability& capability) {
+ _requestedCapability = capability;
+ return -1;
+}
+
+int32_t VideoCaptureImpl::StopCapture() {
+ return -1;
+}
+
+bool VideoCaptureImpl::CaptureStarted() {
+ return false;
+}
+
+int32_t VideoCaptureImpl::CaptureSettings(
+ VideoCaptureCapability& /*settings*/) {
+ return -1;
+}
+
+int32_t VideoCaptureImpl::SetCaptureRotation(VideoRotation rotation) {
+ MutexLock lock(&api_lock_);
+ _rotateFrame = rotation;
+ return 0;
+}
+
+bool VideoCaptureImpl::SetApplyRotation(bool enable) {
+ // We can't take any lock here as it'll cause deadlock with IncomingFrame.
+
+ // The effect of this is the last caller wins.
+ apply_rotation_ = enable;
+ return true;
+}
+
+bool VideoCaptureImpl::GetApplyRotation() {
+ return apply_rotation_;
+}
+
+void VideoCaptureImpl::UpdateFrameCount() {
+ if (_incomingFrameTimesNanos[0] / rtc::kNumNanosecsPerMicrosec == 0) {
+ // first no shift
+ } else {
+ // shift
+ for (int i = (kFrameRateCountHistorySize - 2); i >= 0; --i) {
+ _incomingFrameTimesNanos[i + 1] = _incomingFrameTimesNanos[i];
+ }
+ }
+ _incomingFrameTimesNanos[0] = rtc::TimeNanos();
+}
+
+uint32_t VideoCaptureImpl::CalculateFrameRate(int64_t now_ns) {
+ int32_t num = 0;
+ int32_t nrOfFrames = 0;
+ for (num = 1; num < (kFrameRateCountHistorySize - 1); ++num) {
+ if (_incomingFrameTimesNanos[num] <= 0 ||
+ (now_ns - _incomingFrameTimesNanos[num]) /
+ rtc::kNumNanosecsPerMillisec >
+ kFrameRateHistoryWindowMs) { // don't use data older than 2sec
+ break;
+ } else {
+ nrOfFrames++;
+ }
+ }
+ if (num > 1) {
+ int64_t diff = (now_ns - _incomingFrameTimesNanos[num - 1]) /
+ rtc::kNumNanosecsPerMillisec;
+ if (diff > 0) {
+ return uint32_t((nrOfFrames * 1000.0f / diff) + 0.5f);
+ }
+ }
+
+ return nrOfFrames;
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_impl.h b/third_party/libwebrtc/modules/video_capture/video_capture_impl.h
new file mode 100644
index 0000000000..c3c2b0b85f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_impl.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_IMPL_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_IMPL_H_
+
+/*
+ * video_capture_impl.h
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_sink_interface.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_config.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+namespace videocapturemodule {
+// Class definitions
+class VideoCaptureImpl : public VideoCaptureModule {
+ public:
+ /*
+ * Create a video capture module object
+ *
+ * id - unique identifier of this video capture module object
+ * deviceUniqueIdUTF8 - name of the device. Available names can be found by
+ * using GetDeviceName
+ */
+ static rtc::scoped_refptr<VideoCaptureModule> Create(
+ const char* deviceUniqueIdUTF8);
+
+ static DeviceInfo* CreateDeviceInfo();
+
+ // Helpers for converting between (integral) degrees and
+ // VideoRotation values. Return 0 on success.
+ static int32_t RotationFromDegrees(int degrees, VideoRotation* rotation);
+ static int32_t RotationInDegrees(VideoRotation rotation, int* degrees);
+
+ // Call backs
+ void RegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) override;
+ void DeRegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) override;
+
+ int32_t StopCaptureIfAllClientsClose() override;
+ int32_t SetCaptureRotation(VideoRotation rotation) override;
+ bool SetApplyRotation(bool enable) override;
+ bool GetApplyRotation() override;
+
+ const char* CurrentDeviceName() const override;
+
+ // `capture_time` must be specified in NTP time format in milliseconds.
+ int32_t IncomingFrame(uint8_t* videoFrame,
+ size_t videoFrameLength,
+ const VideoCaptureCapability& frameInfo,
+ int64_t captureTime = 0);
+
+ // Platform dependent
+ int32_t StartCapture(const VideoCaptureCapability& capability) override;
+ int32_t StopCapture() override;
+ bool CaptureStarted() override;
+ int32_t CaptureSettings(VideoCaptureCapability& /*settings*/) override;
+
+ protected:
+ VideoCaptureImpl();
+ ~VideoCaptureImpl() override;
+
+ // moved DeliverCapturedFrame to protected for VideoCaptureAndroid (mjf)
+ int32_t DeliverCapturedFrame(VideoFrame& captureFrame);
+
+ char* _deviceUniqueId; // current Device unique name;
+ Mutex api_lock_;
+ VideoCaptureCapability _requestedCapability; // Should be set by platform
+ // dependent code in
+ // StartCapture.
+ private:
+ void UpdateFrameCount();
+ uint32_t CalculateFrameRate(int64_t now_ns);
+
+ // last time the module process function was called.
+ int64_t _lastProcessTimeNanos;
+ // last time the frame rate callback function was called.
+ int64_t _lastFrameRateCallbackTimeNanos;
+
+ std::set<rtc::VideoSinkInterface<VideoFrame>*> _dataCallBacks;
+
+ int64_t _lastProcessFrameTimeNanos;
+ // timestamp for local captured frames
+ int64_t _incomingFrameTimesNanos[kFrameRateCountHistorySize];
+ VideoRotation _rotateFrame; // Set if the frame should be rotated by the
+ // capture module.
+
+ // Indicate whether rotation should be applied before delivered externally.
+ bool apply_rotation_;
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_internal_impl_gn/moz.build b/third_party/libwebrtc/modules/video_capture/video_capture_internal_impl_gn/moz.build
new file mode 100644
index 0000000000..d7b98c3a78
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_internal_impl_gn/moz.build
@@ -0,0 +1,239 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_capture/linux/device_info_linux.cc",
+ "/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.cc",
+ "/third_party/libwebrtc/modules/video_capture/linux/video_capture_linux.cc",
+ "/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_capture/linux/device_info_linux.cc",
+ "/third_party/libwebrtc/modules/video_capture/linux/device_info_v4l2.cc",
+ "/third_party/libwebrtc/modules/video_capture/linux/video_capture_linux.cc",
+ "/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc"
+ ]
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "ole32",
+ "oleaut32",
+ "secur32",
+ "strmiids",
+ "user32",
+ "winmm"
+ ]
+
+ SOURCES += [
+ "/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.cc",
+ "/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.cc",
+ "/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.cc"
+ ]
+
+ UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.cc",
+ "/third_party/libwebrtc/modules/video_capture/windows/video_capture_factory_windows.cc"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_capture_internal_impl_gn")
diff --git a/third_party/libwebrtc/modules/video_capture/video_capture_module_gn/moz.build b/third_party/libwebrtc/modules/video_capture/video_capture_module_gn/moz.build
new file mode 100644
index 0000000000..4cad010d64
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/video_capture_module_gn/moz.build
@@ -0,0 +1,218 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_capture/device_info_impl.cc",
+ "/third_party/libwebrtc/modules/video_capture/video_capture_factory.cc",
+ "/third_party/libwebrtc/modules/video_capture/video_capture_impl.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_capture_module_gn")
diff --git a/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.cc b/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.cc
new file mode 100644
index 0000000000..6b4c57d01e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.cc
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/windows/device_info_ds.h"
+
+#include <dvdmedia.h>
+
+#include "modules/video_capture/video_capture_config.h"
+#include "modules/video_capture/windows/help_functions_ds.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+
+BOOL isVideoDevice(DEV_BROADCAST_HDR *pHdr)
+{
+ if (pHdr == NULL) {
+ return FALSE;
+ }
+ if (pHdr->dbch_devicetype != DBT_DEVTYP_DEVICEINTERFACE) {
+ return FALSE;
+ }
+ DEV_BROADCAST_DEVICEINTERFACE* pDi = (DEV_BROADCAST_DEVICEINTERFACE*)pHdr;
+ return pDi->dbcc_classguid == KSCATEGORY_VIDEO_CAMERA;
+}
+
+LRESULT CALLBACK WndProc(HWND hWnd, UINT uiMsg, WPARAM wParam, LPARAM lParam)
+{
+ DeviceInfoDS* pParent;
+ if (uiMsg == WM_CREATE)
+ {
+ pParent = (DeviceInfoDS*)((LPCREATESTRUCT)lParam)->lpCreateParams;
+ SetWindowLongPtr(hWnd, GWLP_USERDATA, (LONG_PTR)pParent);
+ }
+ else if (uiMsg == WM_DESTROY)
+ {
+ SetWindowLongPtr(hWnd, GWLP_USERDATA, NULL);
+ }
+ else if (uiMsg == WM_DEVICECHANGE)
+ {
+ pParent = (DeviceInfoDS*)GetWindowLongPtr(hWnd, GWLP_USERDATA);
+ if (pParent && isVideoDevice((PDEV_BROADCAST_HDR)lParam))
+ {
+ pParent->DeviceChange();
+ }
+ }
+ return DefWindowProc(hWnd, uiMsg, wParam, lParam);
+}
+
+// static
+DeviceInfoDS* DeviceInfoDS::Create() {
+ DeviceInfoDS* dsInfo = new DeviceInfoDS();
+ if (!dsInfo || dsInfo->Init() != 0) {
+ delete dsInfo;
+ dsInfo = NULL;
+ }
+ return dsInfo;
+}
+
+DeviceInfoDS::DeviceInfoDS()
+ : _dsDevEnum(NULL),
+ _dsMonikerDevEnum(NULL),
+ _CoUninitializeIsRequired(true),
+ _hdevnotify(NULL) {
+ // 1) Initialize the COM library (make Windows load the DLLs).
+ //
+ // CoInitializeEx must be called at least once, and is usually called only
+ // once, for each thread that uses the COM library. Multiple calls to
+ // CoInitializeEx by the same thread are allowed as long as they pass the same
+ // concurrency flag, but subsequent valid calls return S_FALSE. To close the
+ // COM library gracefully on a thread, each successful call to CoInitializeEx,
+ // including any call that returns S_FALSE, must be balanced by a
+ // corresponding call to CoUninitialize.
+ //
+
+ /*Apartment-threading, while allowing for multiple threads of execution,
+ serializes all incoming calls by requiring that calls to methods of objects
+ created by this thread always run on the same thread the apartment/thread
+ that created them. In addition, calls can arrive only at message-queue
+ boundaries (i.e., only during a PeekMessage, SendMessage, DispatchMessage,
+ etc.). Because of this serialization, it is not typically necessary to write
+ concurrency control into the code for the object, other than to avoid calls
+ to PeekMessage and SendMessage during processing that must not be interrupted
+ by other method invocations or calls to other objects in the same
+ apartment/thread.*/
+
+ /// CoInitializeEx(NULL, COINIT_APARTMENTTHREADED ); //|
+ /// COINIT_SPEED_OVER_MEMORY
+ HRESULT hr = CoInitializeEx(
+ NULL, COINIT_MULTITHREADED); // Use COINIT_MULTITHREADED since Voice
+ // Engine uses COINIT_MULTITHREADED
+ if (FAILED(hr)) {
+ // Avoid calling CoUninitialize() since CoInitializeEx() failed.
+ _CoUninitializeIsRequired = FALSE;
+
+ if (hr == RPC_E_CHANGED_MODE) {
+ // Calling thread has already initialized COM to be used in a
+ // single-threaded apartment (STA). We are then prevented from using STA.
+ // Details: hr = 0x80010106 <=> "Cannot change thread mode after it is
+ // set".
+ //
+ RTC_DLOG(LS_INFO) << __FUNCTION__
+ << ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)"
+ " => RPC_E_CHANGED_MODE, error 0x"
+ << rtc::ToHex(hr);
+ }
+ }
+
+ _hInstance = reinterpret_cast<HINSTANCE>(GetModuleHandle(NULL));
+ _wndClass = {0};
+ _wndClass.lpfnWndProc = &WndProc;
+ _wndClass.lpszClassName = TEXT("DeviceInfoDS");
+ _wndClass.hInstance = _hInstance;
+
+ if (RegisterClass(&_wndClass)) {
+ _hwnd = CreateWindow(_wndClass.lpszClassName, NULL, 0, CW_USEDEFAULT,
+ CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL,
+ NULL, _hInstance, this);
+
+ DEV_BROADCAST_DEVICEINTERFACE di = { 0 };
+ di.dbcc_size = sizeof(di);
+ di.dbcc_devicetype = DBT_DEVTYP_DEVICEINTERFACE;
+ di.dbcc_classguid = KSCATEGORY_VIDEO_CAMERA;
+
+ _hdevnotify = RegisterDeviceNotification(_hwnd, &di,
+ DEVICE_NOTIFY_WINDOW_HANDLE);
+ }
+}
+
+DeviceInfoDS::~DeviceInfoDS() {
+ RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+ RELEASE_AND_CLEAR(_dsDevEnum);
+ if (_CoUninitializeIsRequired) {
+ CoUninitialize();
+ }
+ if (_hdevnotify)
+ {
+ UnregisterDeviceNotification(_hdevnotify);
+ }
+ if (_hwnd != NULL) {
+ DestroyWindow(_hwnd);
+ }
+ UnregisterClass(_wndClass.lpszClassName, _hInstance);
+}
+
+int32_t DeviceInfoDS::Init() {
+ HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC,
+ IID_ICreateDevEnum, (void**)&_dsDevEnum);
+ if (hr != NOERROR) {
+ RTC_LOG(LS_INFO) << "Failed to create CLSID_SystemDeviceEnum, error 0x"
+ << rtc::ToHex(hr);
+ return -1;
+ }
+ return 0;
+}
+uint32_t DeviceInfoDS::NumberOfDevices() {
+ MutexLock lock(&_apiLock);
+ return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0);
+}
+
+int32_t DeviceInfoDS::GetDeviceName(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length,
+ pid_t* pid) {
+ MutexLock lock(&_apiLock);
+ const int32_t result = GetDeviceInfo(
+ deviceNumber, deviceNameUTF8, deviceNameLength, deviceUniqueIdUTF8,
+ deviceUniqueIdUTF8Length, productUniqueIdUTF8, productUniqueIdUTF8Length);
+ return result > (int32_t)deviceNumber ? 0 : -1;
+}
+
+int32_t DeviceInfoDS::GetDeviceInfo(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length)
+
+{
+ // enumerate all video capture devices
+ RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+ HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ &_dsMonikerDevEnum, 0);
+ if (hr != NOERROR) {
+ RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
+ << rtc::ToHex(hr) << ". No webcam exist?";
+ return 0;
+ }
+
+ _dsMonikerDevEnum->Reset();
+ ULONG cFetched;
+ IMoniker* pM;
+ int index = 0;
+ while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched)) {
+ IPropertyBag* pBag;
+ hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
+ if (S_OK == hr) {
+ // Find the description or friendly name.
+ VARIANT varName;
+ VariantInit(&varName);
+ hr = pBag->Read(L"Description", &varName, 0);
+ if (FAILED(hr)) {
+ hr = pBag->Read(L"FriendlyName", &varName, 0);
+ }
+ if (SUCCEEDED(hr)) {
+ // ignore all VFW drivers
+ if ((wcsstr(varName.bstrVal, (L"(VFW)")) == NULL) &&
+ (_wcsnicmp(varName.bstrVal, (L"Google Camera Adapter"), 21) != 0)) {
+ // Found a valid device.
+ if (index == static_cast<int>(deviceNumber)) {
+ int convResult = 0;
+ if (deviceNameLength > 0) {
+ convResult = WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
+ (char*)deviceNameUTF8,
+ deviceNameLength, NULL, NULL);
+ if (convResult == 0) {
+ RTC_LOG(LS_INFO) << "Failed to convert device name to UTF8, "
+ "error = "
+ << GetLastError();
+ return -1;
+ }
+ }
+ if (deviceUniqueIdUTF8Length > 0) {
+ hr = pBag->Read(L"DevicePath", &varName, 0);
+ if (FAILED(hr)) {
+ strncpy_s((char*)deviceUniqueIdUTF8, deviceUniqueIdUTF8Length,
+ (char*)deviceNameUTF8, convResult);
+ RTC_LOG(LS_INFO) << "Failed to get "
+ "deviceUniqueIdUTF8 using "
+ "deviceNameUTF8";
+ } else {
+ convResult = WideCharToMultiByte(
+ CP_UTF8, 0, varName.bstrVal, -1, (char*)deviceUniqueIdUTF8,
+ deviceUniqueIdUTF8Length, NULL, NULL);
+ if (convResult == 0) {
+ RTC_LOG(LS_INFO) << "Failed to convert device "
+ "name to UTF8, error = "
+ << GetLastError();
+ return -1;
+ }
+ if (productUniqueIdUTF8 && productUniqueIdUTF8Length > 0) {
+ GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
+ productUniqueIdUTF8Length);
+ }
+ }
+ }
+ }
+ ++index; // increase the number of valid devices
+ }
+ }
+ VariantClear(&varName);
+ pBag->Release();
+ pM->Release();
+ }
+ }
+ if (deviceNameLength) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8;
+ }
+ return index;
+}
+
+IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length) {
+ const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen(
+ (char*)deviceUniqueIdUTF8); // UTF8 is also NULL terminated
+ if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
+ RTC_LOG(LS_INFO) << "Device name too long";
+ return NULL;
+ }
+
+ // enumerate all video capture devices
+ RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+ HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ &_dsMonikerDevEnum, 0);
+ if (hr != NOERROR) {
+ RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
+ << rtc::ToHex(hr) << ". No webcam exist?";
+ return 0;
+ }
+ _dsMonikerDevEnum->Reset();
+ ULONG cFetched;
+ IMoniker* pM;
+
+ IBaseFilter* captureFilter = NULL;
+ bool deviceFound = false;
+ while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched) && !deviceFound) {
+ IPropertyBag* pBag;
+ hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
+ if (S_OK == hr) {
+ // Find the description or friendly name.
+ VARIANT varName;
+ VariantInit(&varName);
+ if (deviceUniqueIdUTF8Length > 0) {
+ hr = pBag->Read(L"DevicePath", &varName, 0);
+ if (FAILED(hr)) {
+ hr = pBag->Read(L"Description", &varName, 0);
+ if (FAILED(hr)) {
+ hr = pBag->Read(L"FriendlyName", &varName, 0);
+ }
+ }
+ if (SUCCEEDED(hr)) {
+ char tempDevicePathUTF8[256];
+ tempDevicePathUTF8[0] = 0;
+ WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
+ tempDevicePathUTF8, sizeof(tempDevicePathUTF8),
+ NULL, NULL);
+ if (strncmp(tempDevicePathUTF8, (const char*)deviceUniqueIdUTF8,
+ deviceUniqueIdUTF8Length) == 0) {
+ // We have found the requested device
+ deviceFound = true;
+ hr =
+ pM->BindToObject(0, 0, IID_IBaseFilter, (void**)&captureFilter);
+ if
+ FAILED(hr) {
+ RTC_LOG(LS_ERROR) << "Failed to bind to the selected "
+ "capture device "
+ << hr;
+ }
+
+ if (productUniqueIdUTF8 &&
+ productUniqueIdUTF8Length > 0) // Get the device name
+ {
+ GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
+ productUniqueIdUTF8Length);
+ }
+ }
+ }
+ }
+ VariantClear(&varName);
+ pBag->Release();
+ }
+ pM->Release();
+ }
+ return captureFilter;
+}
+
+int32_t DeviceInfoDS::GetWindowsCapability(
+ const int32_t capabilityIndex,
+ VideoCaptureCapabilityWindows& windowsCapability) {
+ MutexLock lock(&_apiLock);
+
+ if (capabilityIndex < 0 || static_cast<size_t>(capabilityIndex) >=
+ _captureCapabilitiesWindows.size()) {
+ return -1;
+ }
+
+ windowsCapability = _captureCapabilitiesWindows[capabilityIndex];
+ return 0;
+}
+
+int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8)
+
+{
+ // Reset old capability list
+ _captureCapabilities.clear();
+
+ const int32_t deviceUniqueIdUTF8Length =
+ (int32_t)strlen((char*)deviceUniqueIdUTF8);
+ if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
+ RTC_LOG(LS_INFO) << "Device name too long";
+ return -1;
+ }
+ RTC_LOG(LS_INFO) << "CreateCapabilityMap called for device "
+ << deviceUniqueIdUTF8;
+
+ char productId[kVideoCaptureProductIdLength];
+ IBaseFilter* captureDevice = DeviceInfoDS::GetDeviceFilter(
+ deviceUniqueIdUTF8, productId, kVideoCaptureProductIdLength);
+ if (!captureDevice)
+ return -1;
+ IPin* outputCapturePin = GetOutputPin(captureDevice, GUID_NULL);
+ if (!outputCapturePin) {
+ RTC_LOG(LS_INFO) << "Failed to get capture device output pin";
+ RELEASE_AND_CLEAR(captureDevice);
+ return -1;
+ }
+ IAMExtDevice* extDevice = NULL;
+ HRESULT hr =
+ captureDevice->QueryInterface(IID_IAMExtDevice, (void**)&extDevice);
+ if (SUCCEEDED(hr) && extDevice) {
+ RTC_LOG(LS_INFO) << "This is an external device";
+ extDevice->Release();
+ }
+
+ IAMStreamConfig* streamConfig = NULL;
+ hr = outputCapturePin->QueryInterface(IID_IAMStreamConfig,
+ (void**)&streamConfig);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to get IID_IAMStreamConfig interface "
+ "from capture device";
+ return -1;
+ }
+
+ // this gets the FPS
+ IAMVideoControl* videoControlConfig = NULL;
+ HRESULT hrVC = captureDevice->QueryInterface(IID_IAMVideoControl,
+ (void**)&videoControlConfig);
+ if (FAILED(hrVC)) {
+ RTC_LOG(LS_INFO) << "IID_IAMVideoControl Interface NOT SUPPORTED";
+ }
+
+ AM_MEDIA_TYPE* pmt = NULL;
+ VIDEO_STREAM_CONFIG_CAPS caps;
+ int count, size;
+
+ hr = streamConfig->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to GetNumberOfCapabilities";
+ RELEASE_AND_CLEAR(videoControlConfig);
+ RELEASE_AND_CLEAR(streamConfig);
+ RELEASE_AND_CLEAR(outputCapturePin);
+ RELEASE_AND_CLEAR(captureDevice);
+ return -1;
+ }
+
+ // Check if the device support formattype == FORMAT_VideoInfo2 and
+ // FORMAT_VideoInfo. Prefer FORMAT_VideoInfo since some cameras (ZureCam) has
+ // been seen having problem with MJPEG and FORMAT_VideoInfo2 Interlace flag is
+ // only supported in FORMAT_VideoInfo2
+ bool supportFORMAT_VideoInfo2 = false;
+ bool supportFORMAT_VideoInfo = false;
+ bool foundInterlacedFormat = false;
+ GUID preferedVideoFormat = FORMAT_VideoInfo;
+ for (int32_t tmp = 0; tmp < count; ++tmp) {
+ hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
+ if (hr == S_OK) {
+ if (pmt->majortype == MEDIATYPE_Video &&
+ pmt->formattype == FORMAT_VideoInfo2) {
+ RTC_LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
+ supportFORMAT_VideoInfo2 = true;
+ VIDEOINFOHEADER2* h =
+ reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
+ RTC_DCHECK(h);
+ foundInterlacedFormat |=
+ h->dwInterlaceFlags &
+ (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
+ }
+ if (pmt->majortype == MEDIATYPE_Video &&
+ pmt->formattype == FORMAT_VideoInfo) {
+ RTC_LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
+ supportFORMAT_VideoInfo = true;
+ }
+
+ FreeMediaType(pmt);
+ pmt = NULL;
+ }
+ }
+ if (supportFORMAT_VideoInfo2) {
+ if (supportFORMAT_VideoInfo && !foundInterlacedFormat) {
+ preferedVideoFormat = FORMAT_VideoInfo;
+ } else {
+ preferedVideoFormat = FORMAT_VideoInfo2;
+ }
+ }
+
+ for (int32_t tmp = 0; tmp < count; ++tmp) {
+ hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
+ if (hr != S_OK) {
+ RTC_LOG(LS_INFO) << "Failed to GetStreamCaps";
+ RELEASE_AND_CLEAR(videoControlConfig);
+ RELEASE_AND_CLEAR(streamConfig);
+ RELEASE_AND_CLEAR(outputCapturePin);
+ RELEASE_AND_CLEAR(captureDevice);
+ return -1;
+ }
+
+ if (pmt->majortype == MEDIATYPE_Video &&
+ pmt->formattype == preferedVideoFormat) {
+ VideoCaptureCapabilityWindows capability;
+ int64_t avgTimePerFrame = 0;
+
+ if (pmt->formattype == FORMAT_VideoInfo) {
+ VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
+ RTC_DCHECK(h);
+ capability.directShowCapabilityIndex = tmp;
+ capability.width = h->bmiHeader.biWidth;
+ capability.height = h->bmiHeader.biHeight;
+ avgTimePerFrame = h->AvgTimePerFrame;
+ }
+ if (pmt->formattype == FORMAT_VideoInfo2) {
+ VIDEOINFOHEADER2* h =
+ reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
+ RTC_DCHECK(h);
+ capability.directShowCapabilityIndex = tmp;
+ capability.width = h->bmiHeader.biWidth;
+ capability.height = h->bmiHeader.biHeight;
+ capability.interlaced =
+ h->dwInterlaceFlags &
+ (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
+ avgTimePerFrame = h->AvgTimePerFrame;
+ }
+
+ if (hrVC == S_OK) {
+ LONGLONG* frameDurationList;
+ LONGLONG maxFPS;
+ long listSize;
+ SIZE size;
+ size.cx = capability.width;
+ size.cy = capability.height;
+
+ // GetMaxAvailableFrameRate doesn't return max frame rate always
+ // eg: Logitech Notebook. This may be due to a bug in that API
+ // because GetFrameRateList array is reversed in the above camera. So
+ // a util method written. Can't assume the first value will return
+ // the max fps.
+ hrVC = videoControlConfig->GetFrameRateList(
+ outputCapturePin, tmp, size, &listSize, &frameDurationList);
+
+ // On some odd cameras, you may get a 0 for duration.
+ // GetMaxOfFrameArray returns the lowest duration (highest FPS)
+ if (hrVC == S_OK && listSize > 0 &&
+ 0 != (maxFPS = GetMaxOfFrameArray(frameDurationList, listSize))) {
+ capability.maxFPS = static_cast<int>(10000000 / maxFPS);
+ capability.supportFrameRateControl = true;
+ } else // use existing method
+ {
+ RTC_LOG(LS_INFO) << "GetMaxAvailableFrameRate NOT SUPPORTED";
+ if (avgTimePerFrame > 0)
+ capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
+ else
+ capability.maxFPS = 0;
+ }
+ } else // use existing method in case IAMVideoControl is not supported
+ {
+ if (avgTimePerFrame > 0)
+ capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
+ else
+ capability.maxFPS = 0;
+ }
+
+ // can't switch MEDIATYPE :~(
+ if (pmt->subtype == MEDIASUBTYPE_I420) {
+ capability.videoType = VideoType::kI420;
+ } else if (pmt->subtype == MEDIASUBTYPE_IYUV) {
+ capability.videoType = VideoType::kIYUV;
+ } else if (pmt->subtype == MEDIASUBTYPE_RGB24) {
+ capability.videoType = VideoType::kRGB24;
+ } else if (pmt->subtype == MEDIASUBTYPE_YUY2) {
+ capability.videoType = VideoType::kYUY2;
+ } else if (pmt->subtype == MEDIASUBTYPE_RGB565) {
+ capability.videoType = VideoType::kRGB565;
+ } else if (pmt->subtype == MEDIASUBTYPE_MJPG) {
+ capability.videoType = VideoType::kMJPEG;
+ } else if (pmt->subtype == MEDIASUBTYPE_dvsl ||
+ pmt->subtype == MEDIASUBTYPE_dvsd ||
+ pmt->subtype ==
+ MEDIASUBTYPE_dvhd) // If this is an external DV camera
+ {
+ capability.videoType =
+ VideoType::kYUY2; // MS DV filter seems to create this type
+ } else if (pmt->subtype ==
+ MEDIASUBTYPE_UYVY) // Seen used by Declink capture cards
+ {
+ capability.videoType = VideoType::kUYVY;
+ } else if (pmt->subtype ==
+ MEDIASUBTYPE_HDYC) // Seen used by Declink capture cards. Uses
+ // BT. 709 color. Not entiry correct to use
+ // UYVY. http://en.wikipedia.org/wiki/YCbCr
+ {
+ RTC_LOG(LS_INFO) << "Device support HDYC.";
+ capability.videoType = VideoType::kUYVY;
+ } else {
+ WCHAR strGuid[39];
+ StringFromGUID2(pmt->subtype, strGuid, 39);
+ RTC_LOG(LS_WARNING)
+ << "Device support unknown media type " << strGuid << ", width "
+ << capability.width << ", height " << capability.height;
+ continue;
+ }
+
+ _captureCapabilities.push_back(capability);
+ _captureCapabilitiesWindows.push_back(capability);
+ RTC_LOG(LS_INFO) << "Camera capability, width:" << capability.width
+ << " height:" << capability.height
+ << " type:" << static_cast<int>(capability.videoType)
+ << " fps:" << capability.maxFPS;
+ }
+ FreeMediaType(pmt);
+ pmt = NULL;
+ }
+ RELEASE_AND_CLEAR(streamConfig);
+ RELEASE_AND_CLEAR(videoControlConfig);
+ RELEASE_AND_CLEAR(outputCapturePin);
+ RELEASE_AND_CLEAR(captureDevice); // Release the capture device
+
+ // Store the new used device name
+ _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
+ _lastUsedDeviceName =
+ (char*)realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1);
+ memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
+ _lastUsedDeviceNameLength + 1);
+ RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
+
+ return static_cast<int32_t>(_captureCapabilities.size());
+}
+
+// Constructs a product ID from the Windows DevicePath. on a USB device the
+// devicePath contains product id and vendor id. This seems to work for firewire
+// as well.
+// Example of device path:
+// "\\?\usb#vid_0408&pid_2010&mi_00#7&258e7aaf&0&0000#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
+// "\\?\avc#sony&dv-vcr&camcorder&dv#65b2d50301460008#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
+void DeviceInfoDS::GetProductId(const char* devicePath,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length) {
+ *productUniqueIdUTF8 = '\0';
+ char* startPos = strstr((char*)devicePath, "\\\\?\\");
+ if (!startPos) {
+ strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
+ RTC_LOG(LS_INFO) << "Failed to get the product Id";
+ return;
+ }
+ startPos += 4;
+
+ char* pos = strchr(startPos, '&');
+ if (!pos || pos >= (char*)devicePath + strlen((char*)devicePath)) {
+ strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
+ RTC_LOG(LS_INFO) << "Failed to get the product Id";
+ return;
+ }
+ // Find the second occurrence.
+ pos = strchr(pos + 1, '&');
+ uint32_t bytesToCopy = (uint32_t)(pos - startPos);
+ if (pos && (bytesToCopy < productUniqueIdUTF8Length) &&
+ bytesToCopy <= kVideoCaptureProductIdLength) {
+ strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length,
+ (char*)startPos, bytesToCopy);
+ } else {
+ strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
+ RTC_LOG(LS_INFO) << "Failed to get the product Id";
+ }
+}
+
+int32_t DeviceInfoDS::DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8,
+ const char* dialogTitleUTF8,
+ void* parentWindow,
+ uint32_t positionX,
+ uint32_t positionY) {
+ MutexLock lock(&_apiLock);
+ HWND window = (HWND)parentWindow;
+
+ IBaseFilter* filter = GetDeviceFilter(deviceUniqueIdUTF8, NULL, 0);
+ if (!filter)
+ return -1;
+
+ ISpecifyPropertyPages* pPages = NULL;
+ CAUUID uuid;
+ HRESULT hr = S_OK;
+
+ hr = filter->QueryInterface(IID_ISpecifyPropertyPages, (LPVOID*)&pPages);
+ if (!SUCCEEDED(hr)) {
+ filter->Release();
+ return -1;
+ }
+ hr = pPages->GetPages(&uuid);
+ if (!SUCCEEDED(hr)) {
+ filter->Release();
+ return -1;
+ }
+
+ WCHAR tempDialogTitleWide[256];
+ tempDialogTitleWide[0] = 0;
+ int size = 255;
+
+ // UTF-8 to wide char
+ MultiByteToWideChar(CP_UTF8, 0, (char*)dialogTitleUTF8, -1,
+ tempDialogTitleWide, size);
+
+ // Invoke a dialog box to display.
+
+ hr = OleCreatePropertyFrame(
+ window, // You must create the parent window.
+ positionX, // Horizontal position for the dialog box.
+ positionY, // Vertical position for the dialog box.
+ tempDialogTitleWide, // String used for the dialog box caption.
+ 1, // Number of pointers passed in pPlugin.
+ (LPUNKNOWN*)&filter, // Pointer to the filter.
+ uuid.cElems, // Number of property pages.
+ uuid.pElems, // Array of property page CLSIDs.
+ LOCALE_USER_DEFAULT, // Locale ID for the dialog box.
+ 0, NULL); // Reserved
+ // Release memory.
+ if (uuid.pElems) {
+ CoTaskMemFree(uuid.pElems);
+ }
+ filter->Release();
+ return 0;
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.h b/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.h
new file mode 100644
index 0000000000..e6dfaed366
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/device_info_ds.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
+
+#include <dshow.h>
+#include <Ks.h>
+#include <dbt.h>
+
+#include "modules/video_capture/device_info_impl.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_impl.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+struct VideoCaptureCapabilityWindows : public VideoCaptureCapability {
+ uint32_t directShowCapabilityIndex;
+ bool supportFrameRateControl;
+ VideoCaptureCapabilityWindows() {
+ directShowCapabilityIndex = 0;
+ supportFrameRateControl = false;
+ }
+};
+
+class DeviceInfoDS : public DeviceInfoImpl {
+ public:
+ // Factory function.
+ static DeviceInfoDS* Create();
+
+ DeviceInfoDS();
+ ~DeviceInfoDS() override;
+
+ int32_t Init() override;
+ uint32_t NumberOfDevices() override;
+
+ /*
+ * Returns the available capture devices.
+ */
+ int32_t GetDeviceName(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length,
+ pid_t* pid) override;
+
+ /*
+ * Display OS /capture device specific settings dialog
+ */
+ int32_t DisplayCaptureSettingsDialogBox(const char* deviceUniqueIdUTF8,
+ const char* dialogTitleUTF8,
+ void* parentWindow,
+ uint32_t positionX,
+ uint32_t positionY) override;
+
+ // Windows specific
+
+ /* Gets a capture device filter
+ The user of this API is responsible for releasing the filter when it not
+ needed.
+ */
+ IBaseFilter* GetDeviceFilter(const char* deviceUniqueIdUTF8,
+ char* productUniqueIdUTF8 = NULL,
+ uint32_t productUniqueIdUTF8Length = 0);
+
+ int32_t GetWindowsCapability(
+ int32_t capabilityIndex,
+ VideoCaptureCapabilityWindows& windowsCapability);
+
+ static void GetProductId(const char* devicePath,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length);
+
+ protected:
+ int32_t GetDeviceInfo(uint32_t deviceNumber,
+ char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length);
+
+ int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
+
+ private:
+ ICreateDevEnum* _dsDevEnum;
+ IEnumMoniker* _dsMonikerDevEnum;
+ bool _CoUninitializeIsRequired;
+ std::vector<VideoCaptureCapabilityWindows> _captureCapabilitiesWindows;
+ HWND _hwnd;
+ WNDCLASS _wndClass;
+ HINSTANCE _hInstance;
+ HDEVNOTIFY _hdevnotify;
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
diff --git a/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.cc b/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.cc
new file mode 100644
index 0000000000..b767726107
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <initguid.h> // Must come before the help_functions_ds.h include so
+ // that DEFINE_GUID() entries will be defined in this
+ // object file.
+
+#include <cguid.h>
+
+#include "modules/video_capture/windows/help_functions_ds.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+// This returns minimum :), which will give max frame rate...
+LONGLONG GetMaxOfFrameArray(LONGLONG* maxFps, long size) {
+ LONGLONG maxFPS = maxFps[0];
+ for (int i = 0; i < size; i++) {
+ if (maxFPS > maxFps[i])
+ maxFPS = maxFps[i];
+ }
+ return maxFPS;
+}
+
+IPin* GetInputPin(IBaseFilter* filter) {
+ IPin* pin = NULL;
+ IEnumPins* pPinEnum = NULL;
+ filter->EnumPins(&pPinEnum);
+ if (pPinEnum == NULL) {
+ return NULL;
+ }
+
+ // get first unconnected pin
+ pPinEnum->Reset(); // set to first pin
+
+ while (S_OK == pPinEnum->Next(1, &pin, NULL)) {
+ PIN_DIRECTION pPinDir;
+ pin->QueryDirection(&pPinDir);
+ if (PINDIR_INPUT == pPinDir) // This is an input pin
+ {
+ IPin* tempPin = NULL;
+ if (S_OK != pin->ConnectedTo(&tempPin)) // The pint is not connected
+ {
+ pPinEnum->Release();
+ return pin;
+ }
+ }
+ pin->Release();
+ }
+ pPinEnum->Release();
+ return NULL;
+}
+
+IPin* GetOutputPin(IBaseFilter* filter, REFGUID Category) {
+ IPin* pin = NULL;
+ IEnumPins* pPinEnum = NULL;
+ filter->EnumPins(&pPinEnum);
+ if (pPinEnum == NULL) {
+ return NULL;
+ }
+ // get first unconnected pin
+ pPinEnum->Reset(); // set to first pin
+ while (S_OK == pPinEnum->Next(1, &pin, NULL)) {
+ PIN_DIRECTION pPinDir;
+ pin->QueryDirection(&pPinDir);
+ if (PINDIR_OUTPUT == pPinDir) // This is an output pin
+ {
+ if (Category == GUID_NULL || PinMatchesCategory(pin, Category)) {
+ pPinEnum->Release();
+ return pin;
+ }
+ }
+ pin->Release();
+ pin = NULL;
+ }
+ pPinEnum->Release();
+ return NULL;
+}
+
+BOOL PinMatchesCategory(IPin* pPin, REFGUID Category) {
+ BOOL bFound = FALSE;
+ IKsPropertySet* pKs = NULL;
+ HRESULT hr = pPin->QueryInterface(IID_PPV_ARGS(&pKs));
+ if (SUCCEEDED(hr)) {
+ GUID PinCategory;
+ DWORD cbReturned;
+ hr = pKs->Get(AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY, NULL, 0,
+ &PinCategory, sizeof(GUID), &cbReturned);
+ if (SUCCEEDED(hr) && (cbReturned == sizeof(GUID))) {
+ bFound = (PinCategory == Category);
+ }
+ pKs->Release();
+ }
+ return bFound;
+}
+
+void ResetMediaType(AM_MEDIA_TYPE* media_type) {
+ if (!media_type)
+ return;
+ if (media_type->cbFormat != 0) {
+ CoTaskMemFree(media_type->pbFormat);
+ media_type->cbFormat = 0;
+ media_type->pbFormat = nullptr;
+ }
+ if (media_type->pUnk) {
+ media_type->pUnk->Release();
+ media_type->pUnk = nullptr;
+ }
+}
+
+void FreeMediaType(AM_MEDIA_TYPE* media_type) {
+ if (!media_type)
+ return;
+ ResetMediaType(media_type);
+ CoTaskMemFree(media_type);
+}
+
+HRESULT CopyMediaType(AM_MEDIA_TYPE* target, const AM_MEDIA_TYPE* source) {
+ RTC_DCHECK_NE(source, target);
+ *target = *source;
+ if (source->cbFormat != 0) {
+ RTC_DCHECK(source->pbFormat);
+ target->pbFormat =
+ reinterpret_cast<BYTE*>(CoTaskMemAlloc(source->cbFormat));
+ if (target->pbFormat == nullptr) {
+ target->cbFormat = 0;
+ return E_OUTOFMEMORY;
+ } else {
+ CopyMemory(target->pbFormat, source->pbFormat, target->cbFormat);
+ }
+ }
+
+ if (target->pUnk != nullptr)
+ target->pUnk->AddRef();
+
+ return S_OK;
+}
+
+wchar_t* DuplicateWideString(const wchar_t* str) {
+ size_t len = lstrlenW(str);
+ wchar_t* ret =
+ reinterpret_cast<LPWSTR>(CoTaskMemAlloc((len + 1) * sizeof(wchar_t)));
+ lstrcpyW(ret, str);
+ return ret;
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.h b/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.h
new file mode 100644
index 0000000000..29479157a8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/help_functions_ds.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_HELP_FUNCTIONS_DS_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_HELP_FUNCTIONS_DS_H_
+
+#include <dshow.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "rtc_base/ref_counter.h"
+
+DEFINE_GUID(MEDIASUBTYPE_I420,
+ 0x30323449,
+ 0x0000,
+ 0x0010,
+ 0x80,
+ 0x00,
+ 0x00,
+ 0xAA,
+ 0x00,
+ 0x38,
+ 0x9B,
+ 0x71);
+DEFINE_GUID(MEDIASUBTYPE_HDYC,
+ 0x43594448,
+ 0x0000,
+ 0x0010,
+ 0x80,
+ 0x00,
+ 0x00,
+ 0xAA,
+ 0x00,
+ 0x38,
+ 0x9B,
+ 0x71);
+
+#define RELEASE_AND_CLEAR(p) \
+ if (p) { \
+ (p)->Release(); \
+ (p) = NULL; \
+ }
+
+namespace webrtc {
+namespace videocapturemodule {
+LONGLONG GetMaxOfFrameArray(LONGLONG* maxFps, long size);
+
+IPin* GetInputPin(IBaseFilter* filter);
+IPin* GetOutputPin(IBaseFilter* filter, REFGUID Category);
+BOOL PinMatchesCategory(IPin* pPin, REFGUID Category);
+void ResetMediaType(AM_MEDIA_TYPE* media_type);
+void FreeMediaType(AM_MEDIA_TYPE* media_type);
+HRESULT CopyMediaType(AM_MEDIA_TYPE* target, const AM_MEDIA_TYPE* source);
+
+// Helper function to make using scoped_refptr with COM interface pointers
+// a little less awkward. rtc::scoped_refptr doesn't support the & operator
+// or a way to receive values via an out ptr.
+// The function is intentionally not called QueryInterface to make things less
+// confusing for the compiler to figure out what the caller wants to do when
+// called from within the context of a class that also implements COM
+// interfaces.
+template <class T>
+HRESULT GetComInterface(IUnknown* object, rtc::scoped_refptr<T>* ptr) {
+ // This helper function is not meant to magically free ptr. If we do that
+ // we add code bloat to most places where it's not needed and make the code
+ // less readable since it's not clear at the call site that the pointer
+ // would get freed even inf QI() fails.
+ RTC_DCHECK(!ptr->get());
+ void* new_ptr = nullptr;
+ HRESULT hr = object->QueryInterface(__uuidof(T), &new_ptr);
+ if (SUCCEEDED(hr))
+ ptr->swap(reinterpret_cast<T**>(&new_ptr));
+ return hr;
+}
+
+// Provides a reference count implementation for COM (IUnknown derived) classes.
+// The implementation uses atomics for managing the ref count.
+template <class T>
+class ComRefCount : public T {
+ public:
+ ComRefCount() {}
+
+ template <class P0>
+ explicit ComRefCount(P0&& p0) : T(std::forward<P0>(p0)) {}
+
+ STDMETHOD_(ULONG, AddRef)() override {
+ ref_count_.IncRef();
+ return 1;
+ }
+
+ STDMETHOD_(ULONG, Release)() override {
+ const auto status = ref_count_.DecRef();
+ if (status == rtc::RefCountReleaseStatus::kDroppedLastRef) {
+ delete this;
+ return 0;
+ }
+ return 1;
+ }
+
+ protected:
+ ~ComRefCount() {}
+
+ private:
+ webrtc::webrtc_impl::RefCounter ref_count_{0};
+};
+
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_HELP_FUNCTIONS_DS_H_
diff --git a/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.cc b/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.cc
new file mode 100644
index 0000000000..0c5acb668d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.cc
@@ -0,0 +1,959 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/windows/sink_filter_ds.h"
+
+#include <dvdmedia.h> // VIDEOINFOHEADER2
+#include <initguid.h>
+
+#include <algorithm>
+#include <list>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/string_utils.h"
+
+DEFINE_GUID(CLSID_SINKFILTER,
+ 0x88cdbbdc,
+ 0xa73b,
+ 0x4afa,
+ 0xac,
+ 0xbf,
+ 0x15,
+ 0xd5,
+ 0xe2,
+ 0xce,
+ 0x12,
+ 0xc3);
+
+namespace webrtc {
+namespace videocapturemodule {
+namespace {
+
+// Simple enumeration implementation that enumerates over a single pin :-/
+class EnumPins : public IEnumPins {
+ public:
+ EnumPins(IPin* pin) : pin_(pin) {}
+
+ protected:
+ virtual ~EnumPins() {}
+
+ private:
+ STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override {
+ if (riid == IID_IUnknown || riid == IID_IEnumPins) {
+ *ppv = static_cast<IEnumPins*>(this);
+ AddRef();
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+ }
+
+ STDMETHOD(Clone)(IEnumPins** pins) {
+ RTC_DCHECK_NOTREACHED();
+ return E_NOTIMPL;
+ }
+
+ STDMETHOD(Next)(ULONG count, IPin** pins, ULONG* fetched) {
+ RTC_DCHECK(count > 0);
+ RTC_DCHECK(pins);
+ // fetched may be NULL.
+
+ if (pos_ > 0) {
+ if (fetched)
+ *fetched = 0;
+ return S_FALSE;
+ }
+
+ ++pos_;
+ pins[0] = pin_.get();
+ pins[0]->AddRef();
+ if (fetched)
+ *fetched = 1;
+
+ return count == 1 ? S_OK : S_FALSE;
+ }
+
+ STDMETHOD(Skip)(ULONG count) {
+ RTC_DCHECK_NOTREACHED();
+ return E_NOTIMPL;
+ }
+
+ STDMETHOD(Reset)() {
+ pos_ = 0;
+ return S_OK;
+ }
+
+ rtc::scoped_refptr<IPin> pin_;
+ int pos_ = 0;
+};
+
+bool IsMediaTypePartialMatch(const AM_MEDIA_TYPE& a, const AM_MEDIA_TYPE& b) {
+ if (b.majortype != GUID_NULL && a.majortype != b.majortype)
+ return false;
+
+ if (b.subtype != GUID_NULL && a.subtype != b.subtype)
+ return false;
+
+ if (b.formattype != GUID_NULL) {
+ // if the format block is specified then it must match exactly
+ if (a.formattype != b.formattype)
+ return false;
+
+ if (a.cbFormat != b.cbFormat)
+ return false;
+
+ if (a.cbFormat != 0 && memcmp(a.pbFormat, b.pbFormat, a.cbFormat) != 0)
+ return false;
+ }
+
+ return true;
+}
+
+bool IsMediaTypeFullySpecified(const AM_MEDIA_TYPE& type) {
+ return type.majortype != GUID_NULL && type.formattype != GUID_NULL;
+}
+
+BYTE* AllocMediaTypeFormatBuffer(AM_MEDIA_TYPE* media_type, ULONG length) {
+ RTC_DCHECK(length);
+ if (media_type->cbFormat == length)
+ return media_type->pbFormat;
+
+ BYTE* buffer = static_cast<BYTE*>(CoTaskMemAlloc(length));
+ if (!buffer)
+ return nullptr;
+
+ if (media_type->pbFormat) {
+ RTC_DCHECK(media_type->cbFormat);
+ CoTaskMemFree(media_type->pbFormat);
+ media_type->pbFormat = nullptr;
+ }
+
+ media_type->cbFormat = length;
+ media_type->pbFormat = buffer;
+ return buffer;
+}
+
+void GetSampleProperties(IMediaSample* sample, AM_SAMPLE2_PROPERTIES* props) {
+ rtc::scoped_refptr<IMediaSample2> sample2;
+ if (SUCCEEDED(GetComInterface(sample, &sample2))) {
+ sample2->GetProperties(sizeof(*props), reinterpret_cast<BYTE*>(props));
+ return;
+ }
+
+ // Get the properties the hard way.
+ props->cbData = sizeof(*props);
+ props->dwTypeSpecificFlags = 0;
+ props->dwStreamId = AM_STREAM_MEDIA;
+ props->dwSampleFlags = 0;
+
+ if (sample->IsDiscontinuity() == S_OK)
+ props->dwSampleFlags |= AM_SAMPLE_DATADISCONTINUITY;
+
+ if (sample->IsPreroll() == S_OK)
+ props->dwSampleFlags |= AM_SAMPLE_PREROLL;
+
+ if (sample->IsSyncPoint() == S_OK)
+ props->dwSampleFlags |= AM_SAMPLE_SPLICEPOINT;
+
+ if (SUCCEEDED(sample->GetTime(&props->tStart, &props->tStop)))
+ props->dwSampleFlags |= AM_SAMPLE_TIMEVALID | AM_SAMPLE_STOPVALID;
+
+ if (sample->GetMediaType(&props->pMediaType) == S_OK)
+ props->dwSampleFlags |= AM_SAMPLE_TYPECHANGED;
+
+ sample->GetPointer(&props->pbBuffer);
+ props->lActual = sample->GetActualDataLength();
+ props->cbBuffer = sample->GetSize();
+}
+
+// Returns true if the media type is supported, false otherwise.
+// For supported types, the `capability` will be populated accordingly.
+bool TranslateMediaTypeToVideoCaptureCapability(
+ const AM_MEDIA_TYPE* media_type,
+ VideoCaptureCapability* capability) {
+ RTC_DCHECK(capability);
+ if (!media_type || media_type->majortype != MEDIATYPE_Video ||
+ !media_type->pbFormat) {
+ return false;
+ }
+
+ const BITMAPINFOHEADER* bih = nullptr;
+ if (media_type->formattype == FORMAT_VideoInfo) {
+ bih = &reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat)->bmiHeader;
+ } else if (media_type->formattype != FORMAT_VideoInfo2) {
+ bih = &reinterpret_cast<VIDEOINFOHEADER2*>(media_type->pbFormat)->bmiHeader;
+ } else {
+ return false;
+ }
+
+ RTC_LOG(LS_INFO) << "TranslateMediaTypeToVideoCaptureCapability width:"
+ << bih->biWidth << " height:" << bih->biHeight
+ << " Compression:0x" << rtc::ToHex(bih->biCompression);
+
+ const GUID& sub_type = media_type->subtype;
+ if (sub_type == MEDIASUBTYPE_MJPG &&
+ bih->biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
+ capability->videoType = VideoType::kMJPEG;
+ } else if (sub_type == MEDIASUBTYPE_I420 &&
+ bih->biCompression == MAKEFOURCC('I', '4', '2', '0')) {
+ capability->videoType = VideoType::kI420;
+ } else if (sub_type == MEDIASUBTYPE_YUY2 &&
+ bih->biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
+ capability->videoType = VideoType::kYUY2;
+ } else if (sub_type == MEDIASUBTYPE_UYVY &&
+ bih->biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
+ capability->videoType = VideoType::kUYVY;
+ } else if (sub_type == MEDIASUBTYPE_HDYC) {
+ capability->videoType = VideoType::kUYVY;
+ } else if (sub_type == MEDIASUBTYPE_RGB24 && bih->biCompression == BI_RGB) {
+ capability->videoType = VideoType::kRGB24;
+ } else {
+ return false;
+ }
+
+ // Store the incoming width and height
+ capability->width = bih->biWidth;
+
+ // Store the incoming height,
+ // for RGB24 we assume the frame to be upside down
+ if (sub_type == MEDIASUBTYPE_RGB24 && bih->biHeight > 0) {
+ capability->height = -(bih->biHeight);
+ } else {
+ capability->height = abs(bih->biHeight);
+ }
+
+ return true;
+}
+
+class MediaTypesEnum : public IEnumMediaTypes {
+ public:
+ MediaTypesEnum(const VideoCaptureCapability& capability)
+ : capability_(capability),
+ format_preference_order_(
+ {// Default preferences, sorted by cost-to-convert-to-i420.
+ VideoType::kI420, VideoType::kYUY2, VideoType::kRGB24,
+ VideoType::kUYVY, VideoType::kMJPEG}) {
+ // Use the preferred video type, if supported.
+ auto it = std::find(format_preference_order_.begin(),
+ format_preference_order_.end(), capability_.videoType);
+ if (it != format_preference_order_.end()) {
+ RTC_LOG(LS_INFO) << "Selected video type: " << *it;
+ // Move it to the front of the list, if it isn't already there.
+ if (it != format_preference_order_.begin()) {
+ format_preference_order_.splice(format_preference_order_.begin(),
+ format_preference_order_, it,
+ std::next(it));
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Unsupported video type: " << *it
+ << ", using default preference list.";
+ }
+ }
+
+ protected:
+ virtual ~MediaTypesEnum() {}
+
+ private:
+ STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override {
+ if (riid == IID_IUnknown || riid == IID_IEnumMediaTypes) {
+ *ppv = static_cast<IEnumMediaTypes*>(this);
+ AddRef();
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+ }
+
+ // IEnumMediaTypes
+ STDMETHOD(Clone)(IEnumMediaTypes** pins) {
+ RTC_DCHECK_NOTREACHED();
+ return E_NOTIMPL;
+ }
+
+ STDMETHOD(Next)(ULONG count, AM_MEDIA_TYPE** types, ULONG* fetched) {
+ RTC_DCHECK(count > 0);
+ RTC_DCHECK(types);
+ // fetched may be NULL.
+ if (fetched)
+ *fetched = 0;
+
+ for (ULONG i = 0;
+ i < count && pos_ < static_cast<int>(format_preference_order_.size());
+ ++i) {
+ AM_MEDIA_TYPE* media_type = reinterpret_cast<AM_MEDIA_TYPE*>(
+ CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)));
+ ZeroMemory(media_type, sizeof(*media_type));
+ types[i] = media_type;
+ VIDEOINFOHEADER* vih = reinterpret_cast<VIDEOINFOHEADER*>(
+ AllocMediaTypeFormatBuffer(media_type, sizeof(VIDEOINFOHEADER)));
+ ZeroMemory(vih, sizeof(*vih));
+ vih->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ vih->bmiHeader.biPlanes = 1;
+ vih->bmiHeader.biClrImportant = 0;
+ vih->bmiHeader.biClrUsed = 0;
+ if (capability_.maxFPS != 0)
+ vih->AvgTimePerFrame = 10000000 / capability_.maxFPS;
+
+ SetRectEmpty(&vih->rcSource); // we want the whole image area rendered.
+ SetRectEmpty(&vih->rcTarget); // no particular destination rectangle
+
+ media_type->majortype = MEDIATYPE_Video;
+ media_type->formattype = FORMAT_VideoInfo;
+ media_type->bTemporalCompression = FALSE;
+
+ // Set format information.
+ auto format_it = std::next(format_preference_order_.begin(), pos_++);
+ SetMediaInfoFromVideoType(*format_it, &vih->bmiHeader, media_type);
+
+ vih->bmiHeader.biWidth = capability_.width;
+ vih->bmiHeader.biHeight = capability_.height;
+ vih->bmiHeader.biSizeImage = ((vih->bmiHeader.biBitCount / 4) *
+ capability_.height * capability_.width) /
+ 2;
+
+ RTC_DCHECK(vih->bmiHeader.biSizeImage);
+ media_type->lSampleSize = vih->bmiHeader.biSizeImage;
+ media_type->bFixedSizeSamples = true;
+ if (fetched)
+ ++(*fetched);
+ }
+ return pos_ == static_cast<int>(format_preference_order_.size()) ? S_FALSE
+ : S_OK;
+ }
+
+ static void SetMediaInfoFromVideoType(VideoType video_type,
+ BITMAPINFOHEADER* bitmap_header,
+ AM_MEDIA_TYPE* media_type) {
+ switch (video_type) {
+ case VideoType::kI420:
+ bitmap_header->biCompression = MAKEFOURCC('I', '4', '2', '0');
+ bitmap_header->biBitCount = 12; // bit per pixel
+ media_type->subtype = MEDIASUBTYPE_I420;
+ break;
+ case VideoType::kYUY2:
+ bitmap_header->biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
+ bitmap_header->biBitCount = 16; // bit per pixel
+ media_type->subtype = MEDIASUBTYPE_YUY2;
+ break;
+ case VideoType::kRGB24:
+ bitmap_header->biCompression = BI_RGB;
+ bitmap_header->biBitCount = 24; // bit per pixel
+ media_type->subtype = MEDIASUBTYPE_RGB24;
+ break;
+ case VideoType::kUYVY:
+ bitmap_header->biCompression = MAKEFOURCC('U', 'Y', 'V', 'Y');
+ bitmap_header->biBitCount = 16; // bit per pixel
+ media_type->subtype = MEDIASUBTYPE_UYVY;
+ break;
+ case VideoType::kMJPEG:
+ bitmap_header->biCompression = MAKEFOURCC('M', 'J', 'P', 'G');
+ bitmap_header->biBitCount = 12; // bit per pixel
+ media_type->subtype = MEDIASUBTYPE_MJPG;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+
+ STDMETHOD(Skip)(ULONG count) {
+ RTC_DCHECK_NOTREACHED();
+ return E_NOTIMPL;
+ }
+
+ STDMETHOD(Reset)() {
+ pos_ = 0;
+ return S_OK;
+ }
+
+ int pos_ = 0;
+ const VideoCaptureCapability capability_;
+ std::list<VideoType> format_preference_order_;
+};
+
+} // namespace
+
+CaptureInputPin::CaptureInputPin(CaptureSinkFilter* filter) {
+ capture_checker_.Detach();
+ // No reference held to avoid circular references.
+ info_.pFilter = filter;
+ info_.dir = PINDIR_INPUT;
+}
+
+CaptureInputPin::~CaptureInputPin() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ ResetMediaType(&media_type_);
+}
+
+HRESULT CaptureInputPin::SetRequestedCapability(
+ const VideoCaptureCapability& capability) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ RTC_DCHECK(Filter()->IsStopped());
+ requested_capability_ = capability;
+ resulting_capability_ = VideoCaptureCapability();
+ return S_OK;
+}
+
+void CaptureInputPin::OnFilterActivated() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ runtime_error_ = false;
+ flushing_ = false;
+ capture_checker_.Detach();
+ capture_thread_id_ = 0;
+}
+
+void CaptureInputPin::OnFilterDeactivated() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ // Expedite shutdown by raising the flushing flag so no further processing
+ // on the capture thread occurs. When the graph is stopped and all filters
+ // have been told to stop, the media controller (graph) will wait for the
+ // capture thread to stop.
+ flushing_ = true;
+ if (allocator_)
+ allocator_->Decommit();
+}
+
+CaptureSinkFilter* CaptureInputPin::Filter() const {
+ return static_cast<CaptureSinkFilter*>(info_.pFilter);
+}
+
+HRESULT CaptureInputPin::AttemptConnection(IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ RTC_DCHECK(Filter()->IsStopped());
+
+ // Check that the connection is valid -- need to do this for every
+ // connect attempt since BreakConnect will undo it.
+ HRESULT hr = CheckDirection(receive_pin);
+ if (FAILED(hr))
+ return hr;
+
+ if (!TranslateMediaTypeToVideoCaptureCapability(media_type,
+ &resulting_capability_)) {
+ ClearAllocator(true);
+ return VFW_E_TYPE_NOT_ACCEPTED;
+ }
+
+ // See if the other pin will accept this type.
+ hr = receive_pin->ReceiveConnection(static_cast<IPin*>(this), media_type);
+ if (FAILED(hr)) {
+ receive_pin_ = nullptr; // Should already be null, but just in case.
+ return hr;
+ }
+
+ // Should have been set as part of the connect process.
+ RTC_DCHECK_EQ(receive_pin_, receive_pin);
+
+ ResetMediaType(&media_type_);
+ CopyMediaType(&media_type_, media_type);
+
+ return S_OK;
+}
+
+std::vector<AM_MEDIA_TYPE*> CaptureInputPin::DetermineCandidateFormats(
+ IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ RTC_DCHECK(receive_pin);
+ RTC_DCHECK(media_type);
+
+ std::vector<AM_MEDIA_TYPE*> ret;
+
+ for (int i = 0; i < 2; i++) {
+ IEnumMediaTypes* types = nullptr;
+ if (i == 0) {
+ // First time around, try types from receive_pin.
+ receive_pin->EnumMediaTypes(&types);
+ } else {
+ // Then try ours.
+ EnumMediaTypes(&types);
+ }
+
+ if (types) {
+ while (true) {
+ ULONG fetched = 0;
+ AM_MEDIA_TYPE* this_type = nullptr;
+ if (types->Next(1, &this_type, &fetched) != S_OK)
+ break;
+
+ if (IsMediaTypePartialMatch(*this_type, *media_type)) {
+ ret.push_back(this_type);
+ } else {
+ FreeMediaType(this_type);
+ }
+ }
+ types->Release();
+ }
+ }
+
+ return ret;
+}
+
+void CaptureInputPin::ClearAllocator(bool decommit) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ if (!allocator_)
+ return;
+ if (decommit)
+ allocator_->Decommit();
+ allocator_ = nullptr;
+}
+
+HRESULT CaptureInputPin::CheckDirection(IPin* pin) const {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ PIN_DIRECTION pd;
+ pin->QueryDirection(&pd);
+ // Fairly basic check, make sure we don't pair input with input etc.
+ return pd == info_.dir ? VFW_E_INVALID_DIRECTION : S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::QueryInterface(REFIID riid,
+ void** ppv) {
+ (*ppv) = nullptr;
+ if (riid == IID_IUnknown || riid == IID_IMemInputPin) {
+ *ppv = static_cast<IMemInputPin*>(this);
+ } else if (riid == IID_IPin) {
+ *ppv = static_cast<IPin*>(this);
+ }
+
+ if (!(*ppv))
+ return E_NOINTERFACE;
+
+ static_cast<IMemInputPin*>(this)->AddRef();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::Connect(IPin* receive_pin, const AM_MEDIA_TYPE* media_type) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ if (!media_type || !receive_pin)
+ return E_POINTER;
+
+ if (!Filter()->IsStopped())
+ return VFW_E_NOT_STOPPED;
+
+ if (receive_pin_) {
+ RTC_DCHECK_NOTREACHED();
+ return VFW_E_ALREADY_CONNECTED;
+ }
+
+ if (IsMediaTypeFullySpecified(*media_type))
+ return AttemptConnection(receive_pin, media_type);
+
+ auto types = DetermineCandidateFormats(receive_pin, media_type);
+ bool connected = false;
+ for (auto* type : types) {
+ if (!connected && AttemptConnection(receive_pin, media_type) == S_OK)
+ connected = true;
+
+ FreeMediaType(type);
+ }
+
+ return connected ? S_OK : VFW_E_NO_ACCEPTABLE_TYPES;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::ReceiveConnection(IPin* connector,
+ const AM_MEDIA_TYPE* media_type) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ RTC_DCHECK(Filter()->IsStopped());
+
+ if (receive_pin_) {
+ RTC_DCHECK_NOTREACHED();
+ return VFW_E_ALREADY_CONNECTED;
+ }
+
+ HRESULT hr = CheckDirection(connector);
+ if (FAILED(hr))
+ return hr;
+
+ if (!TranslateMediaTypeToVideoCaptureCapability(media_type,
+ &resulting_capability_))
+ return VFW_E_TYPE_NOT_ACCEPTED;
+
+ // Complete the connection
+
+ receive_pin_ = connector;
+ ResetMediaType(&media_type_);
+ CopyMediaType(&media_type_, media_type);
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::Disconnect() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ if (!Filter()->IsStopped())
+ return VFW_E_NOT_STOPPED;
+
+ if (!receive_pin_)
+ return S_FALSE;
+
+ ClearAllocator(true);
+ receive_pin_ = nullptr;
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::ConnectedTo(IPin** pin) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+
+ if (!receive_pin_)
+ return VFW_E_NOT_CONNECTED;
+
+ *pin = receive_pin_.get();
+ receive_pin_->AddRef();
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::ConnectionMediaType(AM_MEDIA_TYPE* media_type) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+
+ if (!receive_pin_)
+ return VFW_E_NOT_CONNECTED;
+
+ CopyMediaType(media_type, &media_type_);
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::QueryPinInfo(PIN_INFO* info) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ *info = info_;
+ if (info_.pFilter)
+ info_.pFilter->AddRef();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::QueryDirection(PIN_DIRECTION* pin_dir) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ *pin_dir = info_.dir;
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::QueryId(LPWSTR* id) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ size_t len = lstrlenW(info_.achName);
+ *id = reinterpret_cast<LPWSTR>(CoTaskMemAlloc((len + 1) * sizeof(wchar_t)));
+ lstrcpyW(*id, info_.achName);
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::QueryAccept(const AM_MEDIA_TYPE* media_type) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ RTC_DCHECK(Filter()->IsStopped());
+ VideoCaptureCapability capability(resulting_capability_);
+ return TranslateMediaTypeToVideoCaptureCapability(media_type, &capability)
+ ? S_FALSE
+ : S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::EnumMediaTypes(IEnumMediaTypes** types) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ *types = new ComRefCount<MediaTypesEnum>(requested_capability_);
+ (*types)->AddRef();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::QueryInternalConnections(IPin** pins, ULONG* count) {
+ return E_NOTIMPL;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::EndOfStream() {
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::BeginFlush() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ flushing_ = true;
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::EndFlush() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ flushing_ = false;
+ runtime_error_ = false;
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::NewSegment(REFERENCE_TIME start,
+ REFERENCE_TIME stop,
+ double rate) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::GetAllocator(IMemAllocator** allocator) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ if (allocator_ == nullptr) {
+ HRESULT hr = CoCreateInstance(CLSID_MemoryAllocator, 0,
+ CLSCTX_INPROC_SERVER, IID_IMemAllocator,
+ reinterpret_cast<void**>(allocator));
+ if (FAILED(hr))
+ return hr;
+ allocator_.swap(allocator);
+ }
+ *allocator = allocator_.get();
+ allocator_->AddRef();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::NotifyAllocator(IMemAllocator* allocator, BOOL read_only) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ allocator_.swap(&allocator);
+ if (allocator_)
+ allocator_->AddRef();
+ if (allocator)
+ allocator->Release();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::GetAllocatorRequirements(ALLOCATOR_PROPERTIES* props) {
+ return E_NOTIMPL;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::Receive(IMediaSample* media_sample) {
+ RTC_DCHECK_RUN_ON(&capture_checker_);
+
+ CaptureSinkFilter* const filter = static_cast<CaptureSinkFilter*>(Filter());
+
+ if (flushing_.load(std::memory_order_relaxed))
+ return S_FALSE;
+
+ if (runtime_error_.load(std::memory_order_relaxed))
+ return VFW_E_RUNTIME_ERROR;
+
+ if (!capture_thread_id_) {
+ // Make sure we set the thread name only once.
+ capture_thread_id_ = GetCurrentThreadId();
+ rtc::SetCurrentThreadName("webrtc_video_capture");
+ }
+
+ AM_SAMPLE2_PROPERTIES sample_props = {};
+ GetSampleProperties(media_sample, &sample_props);
+ // Has the format changed in this sample?
+ if (sample_props.dwSampleFlags & AM_SAMPLE_TYPECHANGED) {
+ // Check the derived class accepts the new format.
+ // This shouldn't fail as the source must call QueryAccept first.
+
+ // Note: This will modify resulting_capability_.
+ // That should be OK as long as resulting_capability_ is only modified
+ // on this thread while it is running (filter is not stopped), and only
+ // modified on the main thread when the filter is stopped (i.e. this thread
+ // is not running).
+ if (!TranslateMediaTypeToVideoCaptureCapability(sample_props.pMediaType,
+ &resulting_capability_)) {
+ // Raise a runtime error if we fail the media type
+ runtime_error_ = true;
+ EndOfStream();
+ Filter()->NotifyEvent(EC_ERRORABORT, VFW_E_TYPE_NOT_ACCEPTED, 0);
+ return VFW_E_INVALIDMEDIATYPE;
+ }
+ }
+
+ filter->ProcessCapturedFrame(sample_props.pbBuffer, sample_props.lActual,
+ resulting_capability_);
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureInputPin::ReceiveMultiple(IMediaSample** samples,
+ long count,
+ long* processed) {
+ HRESULT hr = S_OK;
+ *processed = 0;
+ while (count-- > 0) {
+ hr = Receive(samples[*processed]);
+ if (hr != S_OK)
+ break;
+ ++(*processed);
+ }
+ return hr;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::ReceiveCanBlock() {
+ return S_FALSE;
+}
+
+// ----------------------------------------------------------------------------
+
+CaptureSinkFilter::CaptureSinkFilter(VideoCaptureImpl* capture_observer)
+ : input_pin_(new ComRefCount<CaptureInputPin>(this)),
+ capture_observer_(capture_observer) {}
+
+CaptureSinkFilter::~CaptureSinkFilter() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+}
+
+HRESULT CaptureSinkFilter::SetRequestedCapability(
+ const VideoCaptureCapability& capability) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ // Called on the same thread as capture is started on.
+ return input_pin_->SetRequestedCapability(capability);
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::GetState(DWORD msecs, FILTER_STATE* state) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ *state = state_;
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::SetSyncSource(IReferenceClock* clock) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::GetSyncSource(IReferenceClock** clock) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ return E_NOTIMPL;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::Pause() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ state_ = State_Paused;
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::Run(REFERENCE_TIME tStart) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ if (state_ == State_Stopped)
+ Pause();
+
+ state_ = State_Running;
+ input_pin_->OnFilterActivated();
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::Stop() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ if (state_ == State_Stopped)
+ return S_OK;
+
+ state_ = State_Stopped;
+ input_pin_->OnFilterDeactivated();
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::EnumPins(IEnumPins** pins) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ *pins = new ComRefCount<class EnumPins>(input_pin_.get());
+ (*pins)->AddRef();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::FindPin(LPCWSTR id,
+ IPin** pin) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ // There's no ID assigned to our input pin, so looking it up based on one
+ // is pointless (and in practice, this method isn't being used).
+ return VFW_E_NOT_FOUND;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::QueryFilterInfo(FILTER_INFO* info) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ *info = info_;
+ if (info->pGraph)
+ info->pGraph->AddRef();
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::JoinFilterGraph(IFilterGraph* graph, LPCWSTR name) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ RTC_DCHECK(IsStopped());
+
+ // Note, since a reference to the filter is held by the graph manager,
+ // filters must not hold a reference to the graph. If they would, we'd have
+ // a circular reference. Instead, a pointer to the graph can be held without
+ // reference. See documentation for IBaseFilter::JoinFilterGraph for more.
+ info_.pGraph = graph; // No AddRef().
+ sink_ = nullptr;
+
+ if (info_.pGraph) {
+ // make sure we don't hold on to the reference we may receive.
+ // Note that this assumes the same object identity, but so be it.
+ rtc::scoped_refptr<IMediaEventSink> sink;
+ GetComInterface(info_.pGraph, &sink);
+ sink_ = sink.get();
+ }
+
+ info_.achName[0] = L'\0';
+ if (name)
+ lstrcpynW(info_.achName, name, arraysize(info_.achName));
+
+ return S_OK;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::QueryVendorInfo(LPWSTR* vendor_info) {
+ return E_NOTIMPL;
+}
+
+void CaptureSinkFilter::ProcessCapturedFrame(
+ unsigned char* buffer,
+ size_t length,
+ const VideoCaptureCapability& frame_info) {
+ // Called on the capture thread.
+ capture_observer_->IncomingFrame(buffer, length, frame_info);
+}
+
+void CaptureSinkFilter::NotifyEvent(long code,
+ LONG_PTR param1,
+ LONG_PTR param2) {
+ // Called on the capture thread.
+ if (!sink_)
+ return;
+
+ if (EC_COMPLETE == code)
+ param2 = reinterpret_cast<LONG_PTR>(static_cast<IBaseFilter*>(this));
+ sink_->Notify(code, param1, param2);
+}
+
+bool CaptureSinkFilter::IsStopped() const {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ return state_ == State_Stopped;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP
+CaptureSinkFilter::QueryInterface(REFIID riid, void** ppv) {
+ if (riid == IID_IUnknown || riid == IID_IPersist || riid == IID_IBaseFilter) {
+ *ppv = static_cast<IBaseFilter*>(this);
+ AddRef();
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+}
+
+COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::GetClassID(CLSID* clsid) {
+ *clsid = CLSID_SINKFILTER;
+ return S_OK;
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.h b/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.h
new file mode 100644
index 0000000000..b0fabda3cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/sink_filter_ds.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
+
+#include <dshow.h>
+
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "modules/video_capture/windows/help_functions_ds.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+// forward declarations
+class CaptureSinkFilter;
+
+// Input pin for camera input
+// Implements IMemInputPin, IPin.
+class CaptureInputPin : public IMemInputPin, public IPin {
+ public:
+ CaptureInputPin(CaptureSinkFilter* filter);
+
+ HRESULT SetRequestedCapability(const VideoCaptureCapability& capability);
+
+ // Notifications from the filter.
+ void OnFilterActivated();
+ void OnFilterDeactivated();
+
+ protected:
+ virtual ~CaptureInputPin();
+
+ private:
+ CaptureSinkFilter* Filter() const;
+
+ HRESULT AttemptConnection(IPin* receive_pin, const AM_MEDIA_TYPE* media_type);
+ std::vector<AM_MEDIA_TYPE*> DetermineCandidateFormats(
+ IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type);
+ void ClearAllocator(bool decommit);
+ HRESULT CheckDirection(IPin* pin) const;
+
+ // IUnknown
+ STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override;
+
+ // clang-format off
+ // clang isn't sure what to do with the longer STDMETHOD() function
+ // declarations.
+
+ // IPin
+ STDMETHOD(Connect)(IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type) override;
+ STDMETHOD(ReceiveConnection)(IPin* connector,
+ const AM_MEDIA_TYPE* media_type) override;
+ STDMETHOD(Disconnect)() override;
+ STDMETHOD(ConnectedTo)(IPin** pin) override;
+ STDMETHOD(ConnectionMediaType)(AM_MEDIA_TYPE* media_type) override;
+ STDMETHOD(QueryPinInfo)(PIN_INFO* info) override;
+ STDMETHOD(QueryDirection)(PIN_DIRECTION* pin_dir) override;
+ STDMETHOD(QueryId)(LPWSTR* id) override;
+ STDMETHOD(QueryAccept)(const AM_MEDIA_TYPE* media_type) override;
+ STDMETHOD(EnumMediaTypes)(IEnumMediaTypes** types) override;
+ STDMETHOD(QueryInternalConnections)(IPin** pins, ULONG* count) override;
+ STDMETHOD(EndOfStream)() override;
+ STDMETHOD(BeginFlush)() override;
+ STDMETHOD(EndFlush)() override;
+ STDMETHOD(NewSegment)(REFERENCE_TIME start, REFERENCE_TIME stop,
+ double rate) override;
+
+ // IMemInputPin
+ STDMETHOD(GetAllocator)(IMemAllocator** allocator) override;
+ STDMETHOD(NotifyAllocator)(IMemAllocator* allocator, BOOL read_only) override;
+ STDMETHOD(GetAllocatorRequirements)(ALLOCATOR_PROPERTIES* props) override;
+ STDMETHOD(Receive)(IMediaSample* sample) override;
+ STDMETHOD(ReceiveMultiple)(IMediaSample** samples, long count,
+ long* processed) override;
+ STDMETHOD(ReceiveCanBlock)() override;
+ // clang-format on
+
+ SequenceChecker main_checker_;
+ SequenceChecker capture_checker_;
+
+ VideoCaptureCapability requested_capability_ RTC_GUARDED_BY(main_checker_);
+ // Accessed on the main thread when Filter()->IsStopped() (capture thread not
+ // running), otherwise accessed on the capture thread.
+ VideoCaptureCapability resulting_capability_;
+ DWORD capture_thread_id_ = 0;
+ rtc::scoped_refptr<IMemAllocator> allocator_ RTC_GUARDED_BY(main_checker_);
+ rtc::scoped_refptr<IPin> receive_pin_ RTC_GUARDED_BY(main_checker_);
+ std::atomic_bool flushing_{false};
+ std::atomic_bool runtime_error_{false};
+ // Holds a referenceless pointer to the owning filter, the name and
+ // direction of the pin. The filter pointer can be considered const.
+ PIN_INFO info_ = {};
+ AM_MEDIA_TYPE media_type_ RTC_GUARDED_BY(main_checker_) = {};
+};
+
+// Implement IBaseFilter (including IPersist and IMediaFilter).
+class CaptureSinkFilter : public IBaseFilter {
+ public:
+ CaptureSinkFilter(VideoCaptureImpl* capture_observer);
+
+ HRESULT SetRequestedCapability(const VideoCaptureCapability& capability);
+
+ // Called on the capture thread.
+ void ProcessCapturedFrame(unsigned char* buffer,
+ size_t length,
+ const VideoCaptureCapability& frame_info);
+
+ void NotifyEvent(long code, LONG_PTR param1, LONG_PTR param2);
+ bool IsStopped() const;
+
+ // IUnknown
+ STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override;
+
+ // IPersist
+ STDMETHOD(GetClassID)(CLSID* clsid) override;
+
+ // IMediaFilter.
+ STDMETHOD(GetState)(DWORD msecs, FILTER_STATE* state) override;
+ STDMETHOD(SetSyncSource)(IReferenceClock* clock) override;
+ STDMETHOD(GetSyncSource)(IReferenceClock** clock) override;
+ STDMETHOD(Pause)() override;
+ STDMETHOD(Run)(REFERENCE_TIME start) override;
+ STDMETHOD(Stop)() override;
+
+ // IBaseFilter
+ STDMETHOD(EnumPins)(IEnumPins** pins) override;
+ STDMETHOD(FindPin)(LPCWSTR id, IPin** pin) override;
+ STDMETHOD(QueryFilterInfo)(FILTER_INFO* info) override;
+ STDMETHOD(JoinFilterGraph)(IFilterGraph* graph, LPCWSTR name) override;
+ STDMETHOD(QueryVendorInfo)(LPWSTR* vendor_info) override;
+
+ protected:
+ virtual ~CaptureSinkFilter();
+
+ private:
+ SequenceChecker main_checker_;
+ const rtc::scoped_refptr<ComRefCount<CaptureInputPin>> input_pin_;
+ VideoCaptureImpl* const capture_observer_;
+ FILTER_INFO info_ RTC_GUARDED_BY(main_checker_) = {};
+ // Set/cleared in JoinFilterGraph. The filter must be stopped (no capture)
+ // at that time, so no lock is required. While the state is not stopped,
+ // the sink will be used from the capture thread.
+ IMediaEventSink* sink_ = nullptr;
+ FILTER_STATE state_ RTC_GUARDED_BY(main_checker_) = State_Stopped;
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
diff --git a/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.cc b/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.cc
new file mode 100644
index 0000000000..781fbe9f0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.cc
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/windows/video_capture_ds.h"
+
+#include <dvdmedia.h> // VIDEOINFOHEADER2
+
+#include "modules/video_capture/video_capture_config.h"
+#include "modules/video_capture/windows/help_functions_ds.h"
+#include "modules/video_capture/windows/sink_filter_ds.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+VideoCaptureDS::VideoCaptureDS()
+ : _captureFilter(NULL),
+ _graphBuilder(NULL),
+ _mediaControl(NULL),
+ _inputSendPin(NULL),
+ _outputCapturePin(NULL),
+ _dvFilter(NULL),
+ _inputDvPin(NULL),
+ _outputDvPin(NULL) {}
+
+VideoCaptureDS::~VideoCaptureDS() {
+ if (_mediaControl) {
+ _mediaControl->Stop();
+ }
+ if (_graphBuilder) {
+ if (sink_filter_)
+ _graphBuilder->RemoveFilter(sink_filter_.get());
+ if (_captureFilter)
+ _graphBuilder->RemoveFilter(_captureFilter);
+ if (_dvFilter)
+ _graphBuilder->RemoveFilter(_dvFilter);
+ }
+ RELEASE_AND_CLEAR(_inputSendPin);
+ RELEASE_AND_CLEAR(_outputCapturePin);
+
+ RELEASE_AND_CLEAR(_captureFilter); // release the capture device
+ RELEASE_AND_CLEAR(_dvFilter);
+
+ RELEASE_AND_CLEAR(_mediaControl);
+
+ RELEASE_AND_CLEAR(_inputDvPin);
+ RELEASE_AND_CLEAR(_outputDvPin);
+
+ RELEASE_AND_CLEAR(_graphBuilder);
+}
+
+int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) {
+ const int32_t nameLength = (int32_t)strlen((char*)deviceUniqueIdUTF8);
+ if (nameLength >= kVideoCaptureUniqueNameLength)
+ return -1;
+
+ // Store the device name
+ _deviceUniqueId = new (std::nothrow) char[nameLength + 1];
+ memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
+
+ if (_dsInfo.Init() != 0)
+ return -1;
+
+ _captureFilter = _dsInfo.GetDeviceFilter(deviceUniqueIdUTF8);
+ if (!_captureFilter) {
+ RTC_LOG(LS_INFO) << "Failed to create capture filter.";
+ return -1;
+ }
+
+ // Get the interface for DirectShow's GraphBuilder
+ HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
+ IID_IGraphBuilder, (void**)&_graphBuilder);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to create graph builder.";
+ return -1;
+ }
+
+ hr = _graphBuilder->QueryInterface(IID_IMediaControl, (void**)&_mediaControl);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to create media control builder.";
+ return -1;
+ }
+ hr = _graphBuilder->AddFilter(_captureFilter, CAPTURE_FILTER_NAME);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to add the capture device to the graph.";
+ return -1;
+ }
+
+ _outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
+ if (!_outputCapturePin) {
+ RTC_LOG(LS_INFO) << "Failed to get output capture pin";
+ return -1;
+ }
+
+ // Create the sink filte used for receiving Captured frames.
+ sink_filter_ = new ComRefCount<CaptureSinkFilter>(this);
+
+ hr = _graphBuilder->AddFilter(sink_filter_.get(), SINK_FILTER_NAME);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to add the send filter to the graph.";
+ return -1;
+ }
+
+ _inputSendPin = GetInputPin(sink_filter_.get());
+ if (!_inputSendPin) {
+ RTC_LOG(LS_INFO) << "Failed to get input send pin";
+ return -1;
+ }
+
+ // Temporary connect here.
+ // This is done so that no one else can use the capture device.
+ if (SetCameraOutput(_requestedCapability) != 0) {
+ return -1;
+ }
+ hr = _mediaControl->Pause();
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO)
+ << "Failed to Pause the Capture device. Is it already occupied? " << hr;
+ return -1;
+ }
+ RTC_LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8
+ << "' initialized.";
+ return 0;
+}
+
+int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) {
+ MutexLock lock(&api_lock_);
+
+ if (capability != _requestedCapability) {
+ DisconnectGraph();
+
+ if (SetCameraOutput(capability) != 0) {
+ return -1;
+ }
+ }
+ HRESULT hr = _mediaControl->Run();
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to start the Capture device.";
+ return -1;
+ }
+ return 0;
+}
+
+int32_t VideoCaptureDS::StopCapture() {
+ MutexLock lock(&api_lock_);
+
+ HRESULT hr = _mediaControl->Pause();
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
+ return -1;
+ }
+ return 0;
+}
+
+bool VideoCaptureDS::CaptureStarted() {
+ OAFilterState state = 0;
+ HRESULT hr = _mediaControl->GetState(1000, &state);
+ if (hr != S_OK && hr != VFW_S_CANT_CUE) {
+ RTC_LOG(LS_INFO) << "Failed to get the CaptureStarted status";
+ }
+ RTC_LOG(LS_INFO) << "CaptureStarted " << state;
+ return state == State_Running;
+}
+
+int32_t VideoCaptureDS::CaptureSettings(VideoCaptureCapability& settings) {
+ settings = _requestedCapability;
+ return 0;
+}
+
+int32_t VideoCaptureDS::SetCameraOutput(
+ const VideoCaptureCapability& requestedCapability) {
+ // Get the best matching capability
+ VideoCaptureCapability capability;
+ int32_t capabilityIndex;
+
+ // Store the new requested size
+ _requestedCapability = requestedCapability;
+ // Match the requested capability with the supported.
+ if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(
+ _deviceUniqueId, _requestedCapability, capability)) < 0) {
+ return -1;
+ }
+ // Reduce the frame rate if possible.
+ if (capability.maxFPS > requestedCapability.maxFPS) {
+ capability.maxFPS = requestedCapability.maxFPS;
+ } else if (capability.maxFPS <= 0) {
+ capability.maxFPS = 30;
+ }
+
+ // Convert it to the windows capability index since they are not nexessary
+ // the same
+ VideoCaptureCapabilityWindows windowsCapability;
+ if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0) {
+ return -1;
+ }
+
+ IAMStreamConfig* streamConfig = NULL;
+ AM_MEDIA_TYPE* pmt = NULL;
+ VIDEO_STREAM_CONFIG_CAPS caps;
+
+ HRESULT hr = _outputCapturePin->QueryInterface(IID_IAMStreamConfig,
+ (void**)&streamConfig);
+ if (hr) {
+ RTC_LOG(LS_INFO) << "Can't get the Capture format settings.";
+ return -1;
+ }
+
+ // Get the windows capability from the capture device
+ bool isDVCamera = false;
+ hr = streamConfig->GetStreamCaps(windowsCapability.directShowCapabilityIndex,
+ &pmt, reinterpret_cast<BYTE*>(&caps));
+ if (hr == S_OK) {
+ if (pmt->formattype == FORMAT_VideoInfo2) {
+ VIDEOINFOHEADER2* h = reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
+ if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
+ h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
+ }
+ } else {
+ VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
+ if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
+ h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
+ }
+ }
+
+ // Set the sink filter to request this capability
+ sink_filter_->SetRequestedCapability(capability);
+ // Order the capture device to use this capability
+ hr += streamConfig->SetFormat(pmt);
+
+ // Check if this is a DV camera and we need to add MS DV Filter
+ if (pmt->subtype == MEDIASUBTYPE_dvsl ||
+ pmt->subtype == MEDIASUBTYPE_dvsd || pmt->subtype == MEDIASUBTYPE_dvhd)
+ isDVCamera = true; // This is a DV camera. Use MS DV filter
+ }
+ RELEASE_AND_CLEAR(streamConfig);
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_INFO) << "Failed to set capture device output format";
+ return -1;
+ }
+
+ if (isDVCamera) {
+ hr = ConnectDVCamera();
+ } else {
+ hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputSendPin, NULL);
+ }
+ if (hr != S_OK) {
+ RTC_LOG(LS_INFO) << "Failed to connect the Capture graph " << hr;
+ return -1;
+ }
+ return 0;
+}
+
+int32_t VideoCaptureDS::DisconnectGraph() {
+ HRESULT hr = _mediaControl->Stop();
+ hr += _graphBuilder->Disconnect(_outputCapturePin);
+ hr += _graphBuilder->Disconnect(_inputSendPin);
+
+ // if the DV camera filter exist
+ if (_dvFilter) {
+ _graphBuilder->Disconnect(_inputDvPin);
+ _graphBuilder->Disconnect(_outputDvPin);
+ }
+ if (hr != S_OK) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to Stop the Capture device for reconfiguration " << hr;
+ return -1;
+ }
+ return 0;
+}
+
+HRESULT VideoCaptureDS::ConnectDVCamera() {
+ HRESULT hr = S_OK;
+
+ if (!_dvFilter) {
+ hr = CoCreateInstance(CLSID_DVVideoCodec, NULL, CLSCTX_INPROC,
+ IID_IBaseFilter, (void**)&_dvFilter);
+ if (hr != S_OK) {
+ RTC_LOG(LS_INFO) << "Failed to create the dv decoder: " << hr;
+ return hr;
+ }
+ hr = _graphBuilder->AddFilter(_dvFilter, L"VideoDecoderDV");
+ if (hr != S_OK) {
+ RTC_LOG(LS_INFO) << "Failed to add the dv decoder to the graph: " << hr;
+ return hr;
+ }
+ _inputDvPin = GetInputPin(_dvFilter);
+ if (_inputDvPin == NULL) {
+ RTC_LOG(LS_INFO) << "Failed to get input pin from DV decoder";
+ return -1;
+ }
+ _outputDvPin = GetOutputPin(_dvFilter, GUID_NULL);
+ if (_outputDvPin == NULL) {
+ RTC_LOG(LS_INFO) << "Failed to get output pin from DV decoder";
+ return -1;
+ }
+ }
+ hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputDvPin, NULL);
+ if (hr != S_OK) {
+ RTC_LOG(LS_INFO) << "Failed to connect capture device to the dv devoder: "
+ << hr;
+ return hr;
+ }
+
+ hr = _graphBuilder->ConnectDirect(_outputDvPin, _inputSendPin, NULL);
+ if (hr != S_OK) {
+ if (hr == HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES)) {
+ RTC_LOG(LS_INFO) << "Failed to connect the capture device, busy";
+ } else {
+ RTC_LOG(LS_INFO) << "Failed to connect capture device to the send graph: "
+ << hr;
+ }
+ }
+ return hr;
+}
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.h b/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.h
new file mode 100644
index 0000000000..0f01cfaf67
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/video_capture_ds.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_VIDEO_CAPTURE_DS_H_
+#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_VIDEO_CAPTURE_DS_H_
+
+#include "api/scoped_refptr.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "modules/video_capture/windows/device_info_ds.h"
+
+#define CAPTURE_FILTER_NAME L"VideoCaptureFilter"
+#define SINK_FILTER_NAME L"SinkFilter"
+
+namespace webrtc {
+namespace videocapturemodule {
+// Forward declaraion
+class CaptureSinkFilter;
+
+class VideoCaptureDS : public VideoCaptureImpl {
+ public:
+ VideoCaptureDS();
+
+ virtual int32_t Init(const char* deviceUniqueIdUTF8);
+
+ /*************************************************************************
+ *
+ * Start/Stop
+ *
+ *************************************************************************/
+ int32_t StartCapture(const VideoCaptureCapability& capability) override;
+ int32_t StopCapture() override;
+
+ /**************************************************************************
+ *
+ * Properties of the set device
+ *
+ **************************************************************************/
+
+ bool CaptureStarted() override;
+ int32_t CaptureSettings(VideoCaptureCapability& settings) override;
+
+ protected:
+ ~VideoCaptureDS() override;
+
+ // Help functions
+
+ int32_t SetCameraOutput(const VideoCaptureCapability& requestedCapability);
+ int32_t DisconnectGraph();
+ HRESULT ConnectDVCamera();
+
+ DeviceInfoDS _dsInfo;
+
+ IBaseFilter* _captureFilter;
+ IGraphBuilder* _graphBuilder;
+ IMediaControl* _mediaControl;
+ rtc::scoped_refptr<CaptureSinkFilter> sink_filter_;
+ IPin* _inputSendPin;
+ IPin* _outputCapturePin;
+
+ // Microsoft DV interface (external DV cameras)
+ IBaseFilter* _dvFilter;
+ IPin* _inputDvPin;
+ IPin* _outputDvPin;
+};
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_VIDEO_CAPTURE_DS_H_
diff --git a/third_party/libwebrtc/modules/video_capture/windows/video_capture_factory_windows.cc b/third_party/libwebrtc/modules/video_capture/windows/video_capture_factory_windows.cc
new file mode 100644
index 0000000000..a45d6a6640
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_capture/windows/video_capture_factory_windows.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/scoped_refptr.h"
+#include "modules/video_capture/windows/video_capture_ds.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+
+// static
+VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
+ // TODO(tommi): Use the Media Foundation version on Vista and up.
+ return DeviceInfoDS::Create();
+}
+
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(const char* device_id) {
+ if (device_id == nullptr)
+ return nullptr;
+
+ // TODO(tommi): Use Media Foundation implementation for Vista and up.
+ auto capture = rtc::make_ref_counted<VideoCaptureDS>();
+ if (capture->Init(device_id) != 0) {
+ return nullptr;
+ }
+
+ return capture;
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/BUILD.gn b/third_party/libwebrtc/modules/video_coding/BUILD.gn
new file mode 100644
index 0000000000..d4919e99e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/BUILD.gn
@@ -0,0 +1,1312 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//third_party/libaom/options.gni")
+import("../../webrtc.gni")
+
+rtc_library("encoded_frame") {
+ visibility = [ "*" ]
+ sources = [
+ "encoded_frame.cc",
+ "encoded_frame.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../modules:module_api_public",
+ "../../modules/rtp_rtcp:rtp_video_header",
+ "../../rtc_base:checks",
+ "../../rtc_base/experiments:alr_experiment",
+ "../../rtc_base/experiments:rtt_mult_experiment",
+ "../../rtc_base/system:rtc_export",
+ "../../system_wrappers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("chain_diff_calculator") {
+ sources = [
+ "chain_diff_calculator.cc",
+ "chain_diff_calculator.h",
+ ]
+
+ deps = [
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("frame_dependencies_calculator") {
+ sources = [
+ "frame_dependencies_calculator.cc",
+ "frame_dependencies_calculator.h",
+ ]
+
+ deps = [
+ "../../api:array_view",
+ "../../common_video/generic_frame_descriptor",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("nack_requester") {
+ sources = [
+ "histogram.cc",
+ "histogram.h",
+ "nack_requester.cc",
+ "nack_requester.h",
+ ]
+
+ deps = [
+ "..:module_api",
+ "../../api:field_trials_view",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/task_queue:pending_task_safety_flag",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers",
+ ]
+}
+
+rtc_library("packet_buffer") {
+ sources = [
+ "packet_buffer.cc",
+ "packet_buffer.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame_type",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:mod_ops",
+ "../../rtc_base:rtc_numerics",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("h264_packet_buffer") {
+ sources = [
+ "h264_packet_buffer.cc",
+ "h264_packet_buffer.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":packet_buffer",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame_type",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_numerics",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("frame_helpers") {
+ sources = [
+ "frame_helpers.cc",
+ "frame_helpers.h",
+ ]
+ deps = [
+ "../../api/video:encoded_frame",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("frame_buffer2") {
+ sources = [
+ "frame_buffer2.cc",
+ "frame_buffer2.h",
+ ]
+ deps = [
+ ":frame_helpers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api:field_trials_view",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/units:data_size",
+ "../../api/units:time_delta",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_rtp_headers",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:rtt_mult_experiment",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers",
+ "timing:inter_frame_delay",
+ "timing:jitter_estimator",
+ "timing:timing_module",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("video_coding") {
+ visibility = [ "*" ]
+ sources = [
+ "decoder_database.cc",
+ "decoder_database.h",
+ "fec_controller_default.cc",
+ "fec_controller_default.h",
+ "fec_rate_table.h",
+ "frame_object.cc",
+ "frame_object.h",
+ "generic_decoder.cc",
+ "generic_decoder.h",
+ "h264_sprop_parameter_sets.cc",
+ "h264_sprop_parameter_sets.h",
+ "h264_sps_pps_tracker.cc",
+ "h264_sps_pps_tracker.h",
+ "include/video_codec_initializer.h",
+ "internal_defines.h",
+ "loss_notification_controller.cc",
+ "loss_notification_controller.h",
+ "media_opt_util.cc",
+ "media_opt_util.h",
+ "rtp_frame_id_only_ref_finder.cc",
+ "rtp_frame_id_only_ref_finder.h",
+ "rtp_frame_reference_finder.cc",
+ "rtp_frame_reference_finder.h",
+ "rtp_generic_ref_finder.cc",
+ "rtp_generic_ref_finder.h",
+ "rtp_seq_num_only_ref_finder.cc",
+ "rtp_seq_num_only_ref_finder.h",
+ "rtp_vp8_ref_finder.cc",
+ "rtp_vp8_ref_finder.h",
+ "rtp_vp9_ref_finder.cc",
+ "rtp_vp9_ref_finder.h",
+ "video_codec_initializer.cc",
+ "video_receiver2.cc",
+ "video_receiver2.h",
+ ]
+ if (build_with_mozilla) {
+ sources += [
+ "event_wrapper.cc",
+ "event_wrapper.h",
+ ]
+ }
+
+ deps = [
+ ":codec_globals_headers",
+ ":encoded_frame",
+ ":frame_helpers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ ":webrtc_vp8_scalability",
+ ":webrtc_vp9_helpers",
+ "..:module_api",
+ "..:module_api_public",
+ "..:module_fec_api",
+ "../../api:array_view",
+ "../../api:fec_controller_api",
+ "../../api:field_trials_view",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/units:data_rate",
+ "../../api/units:data_size",
+ "../../api/units:frequency",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:builtin_video_bitrate_allocator_factory",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_bitrate_allocator_factory",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:threading",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:alr_experiment",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:min_video_bitrate_experiment",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../rtc_base/experiments:rtt_mult_experiment",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../rtc_base/third_party/base64",
+ "../../rtc_base/time:timestamp_extrapolator",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "../rtp_rtcp",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ "codecs/av1:av1_svc_config",
+ "svc:scalability_mode_util",
+ "timing:inter_frame_delay",
+ "timing:jitter_estimator",
+ "timing:rtt_filter",
+ "timing:timing_module",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("video_codec_interface") {
+ visibility = [ "*" ]
+ sources = [
+ "include/video_codec_interface.cc",
+ "include/video_codec_interface.h",
+ "include/video_coding_defines.h",
+ "include/video_error_codes.h",
+ "video_coding_defines.cc",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../common_video/generic_frame_descriptor",
+ "../../rtc_base/system:rtc_export",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("video_coding_legacy") {
+ visibility = [ ":video_coding_unittests" ]
+ sources = [
+ "decoding_state.cc",
+ "decoding_state.h",
+ "event_wrapper.cc",
+ "event_wrapper.h",
+ "frame_buffer.cc",
+ "frame_buffer.h",
+ "include/video_coding.h",
+ "jitter_buffer.cc",
+ "jitter_buffer.h",
+ "jitter_buffer_common.h",
+ "packet.cc",
+ "packet.h",
+ "receiver.cc",
+ "receiver.h",
+ "session_info.cc",
+ "session_info.h",
+ "video_coding_impl.cc",
+ "video_coding_impl.h",
+ "video_receiver.cc",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":encoded_frame",
+ ":video_codec_interface",
+ ":video_coding",
+ "..:module_api",
+ "..:module_api_public",
+ "../../api:field_trials_view",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:sequence_checker",
+ "../../api/transport:field_trial_based_config",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../modules/rtp_rtcp:rtp_video_header",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:location",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:one_time_event",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base/memory:always_valid_pointer",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ "timing:inter_frame_delay",
+ "timing:jitter_estimator",
+ "timing:timing_module",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_source_set("codec_globals_headers") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/h264/include/h264_globals.h",
+ "codecs/interface/common_constants.h",
+ "codecs/vp8/include/vp8_globals.h",
+ "codecs/vp9/include/vp9_globals.h",
+ ]
+
+ deps = [ "../../rtc_base:checks" ]
+}
+
+rtc_library("video_coding_utility") {
+ visibility = [ "*" ]
+ sources = [
+ "utility/bandwidth_quality_scaler.cc",
+ "utility/bandwidth_quality_scaler.h",
+ "utility/decoded_frames_history.cc",
+ "utility/decoded_frames_history.h",
+ "utility/frame_dropper.cc",
+ "utility/frame_dropper.h",
+ "utility/framerate_controller_deprecated.cc",
+ "utility/framerate_controller_deprecated.h",
+ "utility/ivf_defines.h",
+ "utility/ivf_file_reader.cc",
+ "utility/ivf_file_reader.h",
+ "utility/ivf_file_writer.cc",
+ "utility/ivf_file_writer.h",
+ "utility/qp_parser.cc",
+ "utility/qp_parser.h",
+ "utility/quality_scaler.cc",
+ "utility/quality_scaler.h",
+ "utility/simulcast_rate_allocator.cc",
+ "utility/simulcast_rate_allocator.h",
+ "utility/simulcast_utility.cc",
+ "utility/simulcast_utility.h",
+ "utility/vp8_header_parser.cc",
+ "utility/vp8_header_parser.h",
+ "utility/vp9_constants.h",
+ "utility/vp9_uncompressed_header_parser.cc",
+ "utility/vp9_uncompressed_header_parser.h",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ "../../api:array_view",
+ "../../api:field_trials_view",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/units:time_delta",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_codec_constants",
+ "../../api/video:video_frame",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../modules/rtp_rtcp",
+ "../../rtc_base:bitstream_reader",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base:weak_ptr",
+ "../../rtc_base/experiments:bandwidth_quality_scaler_settings",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/experiments:quality_scaler_settings",
+ "../../rtc_base/experiments:quality_scaling_experiment",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../rtc_base/experiments:stable_target_rate_experiment",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:file_wrapper",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers:field_trial",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/numeric:bits",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("webrtc_h264") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/h264/h264.cc",
+ "codecs/h264/h264_color_space.cc",
+ "codecs/h264/h264_color_space.h",
+ "codecs/h264/h264_decoder_impl.cc",
+ "codecs/h264/h264_decoder_impl.h",
+ "codecs/h264/h264_encoder_impl.cc",
+ "codecs/h264/h264_encoder_impl.h",
+ "codecs/h264/include/h264.h",
+ ]
+
+ defines = []
+ deps = [
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_i010",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/system:rtc_export",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (rtc_use_h264) {
+ deps += [
+ "//third_party/ffmpeg",
+ "//third_party/openh264:encoder",
+ ]
+ if (!build_with_mozilla) {
+ deps += [ "../../media:rtc_media_base" ]
+ }
+ }
+ if (build_with_mozilla) {
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+}
+
+rtc_library("webrtc_multiplex") {
+ sources = [
+ "codecs/multiplex/augmented_video_frame_buffer.cc",
+ "codecs/multiplex/include/augmented_video_frame_buffer.h",
+ "codecs/multiplex/include/multiplex_decoder_adapter.h",
+ "codecs/multiplex/include/multiplex_encoder_adapter.h",
+ "codecs/multiplex/multiplex_decoder_adapter.cc",
+ "codecs/multiplex/multiplex_encoded_image_packer.cc",
+ "codecs/multiplex/multiplex_encoded_image_packer.h",
+ "codecs/multiplex/multiplex_encoder_adapter.cc",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api:fec_controller_api",
+ "../../api:scoped_refptr",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base/synchronization:mutex",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+}
+
+# This target defines a bare-bones interface towards libvpx, used by the
+# VP8 and VP9 wrappers below.
+rtc_library("webrtc_libvpx_interface") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/interface/libvpx_interface.cc",
+ "codecs/interface/libvpx_interface.h",
+ ]
+ deps = [ "../../rtc_base:checks" ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+}
+
+if (!build_with_mozilla) {
+rtc_library("mock_libvpx_interface") {
+ testonly = true
+ sources = [ "codecs/interface/mock_libvpx_interface.h" ]
+ deps = [
+ ":webrtc_libvpx_interface",
+ "../../test:test_support",
+ ]
+}
+}
+
+# This target includes the internal SW codec.
+rtc_library("webrtc_vp8") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ sources = [
+ "codecs/vp8/include/vp8.h",
+ "codecs/vp8/libvpx_vp8_decoder.cc",
+ "codecs/vp8/libvpx_vp8_decoder.h",
+ "codecs/vp8/libvpx_vp8_encoder.cc",
+ "codecs/vp8/libvpx_vp8_encoder.h",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ ":webrtc_libvpx_interface",
+ ":webrtc_vp8_scalability",
+ ":webrtc_vp8_temporal_layers",
+ "../../api:fec_controller_api",
+ "../../api:scoped_refptr",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../api/video_codecs:vp8_temporal_layers_factory",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:cpu_speed_experiment",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "svc:scalability_mode_util",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+ if (build_with_mozilla) {
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+}
+
+rtc_source_set("webrtc_vp8_scalability") {
+ sources = [
+ "codecs/vp8/vp8_scalability.cc",
+ "codecs/vp8/vp8_scalability.h",
+ ]
+ deps = [ "../../api/video_codecs:scalability_mode" ]
+}
+
+rtc_library("webrtc_vp8_temporal_layers") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/vp8/default_temporal_layers.cc",
+ "codecs/vp8/default_temporal_layers.h",
+ "codecs/vp8/include/temporal_layers_checker.h",
+ "codecs/vp8/screenshare_layers.cc",
+ "codecs/vp8/screenshare_layers.h",
+ "codecs/vp8/temporal_layers.h",
+ "codecs/vp8/temporal_layers_checker.cc",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api:fec_controller_api",
+ "../../api/video_codecs:video_codecs_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+# This target includes VP9 files that may be used for any VP9 codec, internal SW or external HW.
+rtc_library("webrtc_vp9_helpers") {
+ sources = [
+ "codecs/vp9/svc_config.cc",
+ "codecs/vp9/svc_config.h",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_codec_constants",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base/experiments:stable_target_rate_experiment",
+ "svc:scalability_mode_util",
+ "svc:scalability_structures",
+ "svc:scalable_video_controller",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("webrtc_vp9") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ sources = [
+ "codecs/vp9/include/vp9.h",
+ "codecs/vp9/libvpx_vp9_decoder.cc",
+ "codecs/vp9/libvpx_vp9_decoder.h",
+ "codecs/vp9/libvpx_vp9_encoder.cc",
+ "codecs/vp9/libvpx_vp9_encoder.h",
+ "codecs/vp9/vp9.cc",
+ "codecs/vp9/vp9_frame_buffer_pool.cc",
+ "codecs/vp9/vp9_frame_buffer_pool.h",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ ":video_coding_utility",
+ ":webrtc_libvpx_interface",
+ ":webrtc_vp9_helpers",
+ "../../api:fec_controller_api",
+ "../../api:field_trials_view",
+ "../../api:refcountedbase",
+ "../../api:scoped_refptr",
+ "../../api/transport:field_trial_based_config",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_i010",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/containers:flat_map",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers:field_trial",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "svc:scalability_mode_util",
+ "svc:scalability_structures",
+ "svc:scalable_video_controller",
+ "svc:svc_rate_allocator",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+}
+
+if (rtc_include_tests) {
+ if (is_android) {
+ rtc_library("android_codec_factory_helper") {
+ sources = [
+ "codecs/test/android_codec_factory_helper.cc",
+ "codecs/test/android_codec_factory_helper.h",
+ ]
+
+ deps = [
+ "../../api/video_codecs:video_codecs_api",
+ "../../modules/utility:utility",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../sdk/android:internal_jni",
+ "../../sdk/android:native_api_base",
+ "../../sdk/android:native_api_codecs",
+ "../../sdk/android:native_api_jni",
+ ]
+ }
+ }
+
+ if (is_ios || is_mac) {
+ rtc_library("objc_codec_factory_helper") {
+ sources = [
+ "codecs/test/objc_codec_factory_helper.h",
+ "codecs/test/objc_codec_factory_helper.mm",
+ ]
+
+ deps = [
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../media:rtc_audio_video",
+ "../../media:rtc_media_base",
+ "../../sdk:native_api",
+ "../../sdk:peerconnectionfactory_base_objc",
+ "../../sdk:videocodec_objc",
+ "../../sdk:videosource_objc",
+ "../../sdk:videotoolbox_objc",
+ ]
+ }
+ }
+
+ rtc_library("encoded_video_frame_producer") {
+ testonly = true
+ sources = [
+ "codecs/test/encoded_video_frame_producer.cc",
+ "codecs/test/encoded_video_frame_producer.h",
+ ]
+ deps = [
+ ":video_codec_interface",
+ "../../api:create_frame_generator",
+ "../../api:frame_generator_api",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video_codecs:video_codecs_api",
+ "../../rtc_base:checks",
+ ]
+ }
+
+ rtc_library("simulcast_test_fixture_impl") {
+ testonly = true
+ sources = [
+ "utility/simulcast_test_fixture_impl.cc",
+ "utility/simulcast_test_fixture_impl.h",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ ":video_coding",
+ ":video_coding_utility",
+ "../../api:mock_video_decoder",
+ "../../api:mock_video_encoder",
+ "../../api:simulcast_test_fixture_api",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../test:test_support",
+ ]
+ }
+
+ rtc_library("video_codecs_test_framework") {
+ testonly = true
+ sources = [
+ "codecs/test/video_codec_unittest.cc",
+ "codecs/test/video_codec_unittest.h",
+ "codecs/test/videoprocessor.cc",
+ "codecs/test/videoprocessor.h",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_coding",
+ ":video_coding_utility",
+ ":videocodec_test_stats_impl",
+ ":webrtc_vp9_helpers",
+ "..:module_api",
+ "../../api:create_frame_generator",
+ "../../api:frame_generator_api",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/task_queue",
+ "../../api/video:builtin_video_bitrate_allocator_factory",
+ "../../api/video:encoded_image",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_bitrate_allocator_factory",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ }
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ if (build_with_mozilla) {
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+ }
+
+ video_coding_modules_tests_resources = []
+ if (is_android) {
+ video_coding_modules_tests_resources += [
+ "../../resources/foreman_128x96.yuv",
+ "../../resources/foreman_160x120.yuv",
+ "../../resources/foreman_176x144.yuv",
+ "../../resources/foreman_240x136.yuv",
+ "../../resources/foreman_320x240.yuv",
+ "../../resources/foreman_480x272.yuv",
+ ]
+ }
+ if (!is_android) {
+ video_coding_modules_tests_resources += [
+ "../../resources/ConferenceMotion_1280_720_50.yuv",
+ "../../resources/FourPeople_1280x720_30.yuv",
+ ]
+ }
+
+ num_video_coding_modules_tests_resources = 0
+ foreach(i, video_coding_modules_tests_resources) {
+ num_video_coding_modules_tests_resources += 1
+ }
+
+ if (num_video_coding_modules_tests_resources > 0) {
+ if (is_ios || is_mac) {
+ bundle_data("video_coding_modules_tests_resources_bundle_data") {
+ testonly = true
+ sources = video_coding_modules_tests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+ }
+
+ rtc_library("videocodec_test_impl") {
+ testonly = true
+ sources = [
+ "codecs/test/videocodec_test_fixture_impl.cc",
+ "codecs/test/videocodec_test_fixture_impl.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_codecs_test_framework",
+ ":video_coding_utility",
+ ":videocodec_test_stats_impl",
+ ":webrtc_vp9_helpers",
+ "../../api:array_view",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/test/video:function_video_factory",
+ "../../api/transport:field_trial_based_config",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video_codecs:video_codecs_api",
+ "../../api/video_codecs:video_decoder_factory_template",
+ "../../api/video_codecs:video_decoder_factory_template_dav1d_adapter",
+ "../../api/video_codecs:video_decoder_factory_template_libvpx_vp8_adapter",
+ "../../api/video_codecs:video_decoder_factory_template_libvpx_vp9_adapter",
+ "../../api/video_codecs:video_decoder_factory_template_open_h264_adapter",
+ "../../api/video_codecs:video_encoder_factory_template",
+ "../../api/video_codecs:video_encoder_factory_template_libaom_av1_adapter",
+ "../../api/video_codecs:video_encoder_factory_template_libvpx_vp8_adapter",
+ "../../api/video_codecs:video_encoder_factory_template_libvpx_vp9_adapter",
+ "../../api/video_codecs:video_encoder_factory_template_open_h264_adapter",
+ "../../call:video_stream_api",
+ "../../common_video",
+ "../../media:rtc_audio_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:perf_test",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ rtc_library("videocodec_test_stats_impl") {
+ testonly = true
+ sources = [
+ "codecs/test/videocodec_test_stats_impl.cc",
+ "codecs/test/videocodec_test_stats_impl.h",
+ ]
+ deps = [
+ "../../api:videocodec_test_fixture_api",
+ "../../api/numerics",
+ "../../rtc_base:checks",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../test:test_common",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ }
+
+ rtc_library("video_coding_modules_tests") {
+ testonly = true
+ defines = []
+
+ sources = [
+ "codecs/h264/test/h264_impl_unittest.cc",
+ "codecs/multiplex/test/multiplex_adapter_unittest.cc",
+ "codecs/test/video_encoder_decoder_instantiation_tests.cc",
+ "codecs/test/videocodec_test_av1.cc",
+ "codecs/test/videocodec_test_libvpx.cc",
+ "codecs/vp8/test/vp8_impl_unittest.cc",
+ ]
+
+ if (rtc_libvpx_build_vp9) {
+ sources += [ "codecs/vp9/test/vp9_impl_unittest.cc" ]
+ }
+
+ if (rtc_use_h264) {
+ sources += [ "codecs/test/videocodec_test_openh264.cc" ]
+ }
+
+ deps = [
+ ":encoded_video_frame_producer",
+ ":mock_libvpx_interface",
+ ":video_codec_interface",
+ ":video_codecs_test_framework",
+ ":video_coding_utility",
+ ":videocodec_test_impl",
+ ":webrtc_h264",
+ ":webrtc_libvpx_interface",
+ ":webrtc_multiplex",
+ ":webrtc_vp8",
+ ":webrtc_vp9",
+ ":webrtc_vp9_helpers",
+ "../../api:create_frame_generator",
+ "../../api:create_videocodec_test_fixture_api",
+ "../../api:frame_generator_api",
+ "../../api:mock_video_codec_factory",
+ "../../api:mock_video_decoder",
+ "../../api:mock_video_encoder",
+ "../../api:scoped_refptr",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/test/video:function_video_factory",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:rtc_software_fallback_wrappers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../common_video/test:utilities",
+ "../../media:rtc_internal_video_codecs",
+ "../../media:rtc_media_base",
+ "../../media:rtc_simulcast_encoder_adapter",
+ "../../rtc_base",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../test:explicit_key_value_config",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "codecs/av1:dav1d_decoder",
+ "codecs/av1:libaom_av1_decoder",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ data = video_coding_modules_tests_resources
+
+ if (is_android) {
+ sources += [ "codecs/test/videocodec_test_mediacodec.cc" ]
+
+ deps += [ ":android_codec_factory_helper" ]
+ }
+
+ if (is_ios || is_mac) {
+ sources += [ "codecs/test/videocodec_test_videotoolbox.cc" ]
+
+ deps += [ ":objc_codec_factory_helper" ]
+
+ if (num_video_coding_modules_tests_resources > 0) {
+ deps += [ ":video_coding_modules_tests_resources_bundle_data" ]
+ }
+ }
+
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+ }
+
+ rtc_library("video_coding_unittests") {
+ testonly = true
+
+ sources = [
+ "chain_diff_calculator_unittest.cc",
+ "codecs/test/videocodec_test_fixture_config_unittest.cc",
+ "codecs/test/videocodec_test_stats_impl_unittest.cc",
+ "codecs/test/videoprocessor_unittest.cc",
+ "codecs/vp8/default_temporal_layers_unittest.cc",
+ "codecs/vp8/libvpx_vp8_simulcast_test.cc",
+ "codecs/vp8/screenshare_layers_unittest.cc",
+ "codecs/vp9/svc_config_unittest.cc",
+ "decoding_state_unittest.cc",
+ "fec_controller_unittest.cc",
+ "frame_buffer2_unittest.cc",
+ "frame_dependencies_calculator_unittest.cc",
+ "generic_decoder_unittest.cc",
+ "h264_packet_buffer_unittest.cc",
+ "h264_sprop_parameter_sets_unittest.cc",
+ "h264_sps_pps_tracker_unittest.cc",
+ "histogram_unittest.cc",
+ "jitter_buffer_unittest.cc",
+ "loss_notification_controller_unittest.cc",
+ "nack_requester_unittest.cc",
+ "packet_buffer_unittest.cc",
+ "receiver_unittest.cc",
+ "rtp_frame_reference_finder_unittest.cc",
+ "rtp_vp8_ref_finder_unittest.cc",
+ "rtp_vp9_ref_finder_unittest.cc",
+ "session_info_unittest.cc",
+ "test/stream_generator.cc",
+ "test/stream_generator.h",
+ "utility/bandwidth_quality_scaler_unittest.cc",
+ "utility/decoded_frames_history_unittest.cc",
+ "utility/frame_dropper_unittest.cc",
+ "utility/framerate_controller_deprecated_unittest.cc",
+ "utility/ivf_file_reader_unittest.cc",
+ "utility/ivf_file_writer_unittest.cc",
+ "utility/qp_parser_unittest.cc",
+ "utility/quality_scaler_unittest.cc",
+ "utility/simulcast_rate_allocator_unittest.cc",
+ "utility/vp9_uncompressed_header_parser_unittest.cc",
+ "video_codec_initializer_unittest.cc",
+ "video_receiver_unittest.cc",
+ ]
+ if (rtc_use_h264) {
+ sources += [
+ "codecs/h264/h264_encoder_impl_unittest.cc",
+ "codecs/h264/h264_simulcast_unittest.cc",
+ ]
+ }
+
+ deps = [
+ ":chain_diff_calculator",
+ ":codec_globals_headers",
+ ":encoded_frame",
+ ":frame_buffer2",
+ ":frame_dependencies_calculator",
+ ":h264_packet_buffer",
+ ":nack_requester",
+ ":packet_buffer",
+ ":simulcast_test_fixture_impl",
+ ":video_codec_interface",
+ ":video_codecs_test_framework",
+ ":video_coding",
+ ":video_coding_legacy",
+ ":video_coding_utility",
+ ":videocodec_test_impl",
+ ":videocodec_test_stats_impl",
+ ":webrtc_h264",
+ ":webrtc_vp8",
+ ":webrtc_vp8_temporal_layers",
+ ":webrtc_vp9",
+ ":webrtc_vp9_helpers",
+ "..:module_fec_api",
+ "../../api:array_view",
+ "../../api:create_simulcast_test_fixture_api",
+ "../../api:fec_controller_api",
+ "../../api:mock_fec_controller_override",
+ "../../api:mock_video_decoder",
+ "../../api:mock_video_encoder",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api:simulcast_test_fixture_api",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/task_queue",
+ "../../api/task_queue:default_task_queue_factory",
+ "../../api/test/video:function_video_factory",
+ "../../api/units:data_size",
+ "../../api/units:frequency",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:builtin_video_bitrate_allocator_factory",
+ "../../api/video:encoded_frame",
+ "../../api/video:render_resolution",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_bitrate_allocator_factory",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../api/video_codecs:vp8_temporal_layers_factory",
+ "../../common_video",
+ "../../common_video/generic_frame_descriptor",
+ "../../common_video/test:utilities",
+ "../../media:rtc_media_base",
+ "../../rtc_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:histogram_percentile_counter",
+ "../../rtc_base:location",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:random",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:unused",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "../../test:fake_encoded_frame",
+ "../../test:fake_video_codecs",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:run_loop",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ "../../test/time_controller:time_controller",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ "codecs/av1:video_coding_codecs_av1_tests",
+ "svc:scalability_structure_tests",
+ "svc:svc_rate_allocator_tests",
+ "timing:jitter_estimator",
+ "timing:timing_module",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_coding/DEPS b/third_party/libwebrtc/modules/video_coding/DEPS
new file mode 100644
index 0000000000..3a7629e84b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/DEPS
@@ -0,0 +1,23 @@
+include_rules = [
+ "+vpx",
+ "+call",
+ "+common_video",
+ "+sdk",
+ "+system_wrappers",
+ "+rtc_tools",
+ "+third_party/libyuv",
+ "+rtc_base/system/rtc_export.h",
+]
+
+specific_include_rules = {
+ "android_codec_factory_helper\.cc": [
+ "+base/android",
+ ],
+ "multiplex_encoder_adapter\.cc": [
+ "+media/base",
+ ],
+ ".*test.*\.cc": [
+ "+media/base",
+ "+media/engine",
+ ],
+}
diff --git a/third_party/libwebrtc/modules/video_coding/OWNERS b/third_party/libwebrtc/modules/video_coding/OWNERS
new file mode 100644
index 0000000000..2e4d968c98
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/OWNERS
@@ -0,0 +1,7 @@
+asapersson@webrtc.org
+brandtr@webrtc.org
+ilnik@webrtc.org
+marpan@webrtc.org
+philipel@webrtc.org
+sprang@webrtc.org
+stefan@webrtc.org
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc
new file mode 100644
index 0000000000..5f852717b5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/chain_diff_calculator.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+void ChainDiffCalculator::Reset(const std::vector<bool>& chains) {
+ last_frame_in_chain_.resize(chains.size());
+ for (size_t i = 0; i < chains.size(); ++i) {
+ if (chains[i]) {
+ last_frame_in_chain_[i] = absl::nullopt;
+ }
+ }
+}
+
+absl::InlinedVector<int, 4> ChainDiffCalculator::ChainDiffs(
+ int64_t frame_id) const {
+ absl::InlinedVector<int, 4> result;
+ result.reserve(last_frame_in_chain_.size());
+ for (const auto& frame_id_in_chain : last_frame_in_chain_) {
+ result.push_back(frame_id_in_chain ? (frame_id - *frame_id_in_chain) : 0);
+ }
+ return result;
+}
+
+absl::InlinedVector<int, 4> ChainDiffCalculator::From(
+ int64_t frame_id,
+ const std::vector<bool>& chains) {
+ auto result = ChainDiffs(frame_id);
+ if (chains.size() != last_frame_in_chain_.size()) {
+ RTC_LOG(LS_ERROR) << "Insconsistent chain configuration for frame#"
+ << frame_id << ": expected "
+ << last_frame_in_chain_.size() << " chains, found "
+ << chains.size();
+ }
+ size_t num_chains = std::min(last_frame_in_chain_.size(), chains.size());
+ for (size_t i = 0; i < num_chains; ++i) {
+ if (chains[i]) {
+ last_frame_in_chain_[i] = frame_id;
+ }
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h
new file mode 100644
index 0000000000..bca7340c6f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CHAIN_DIFF_CALCULATOR_H_
+#define MODULES_VIDEO_CODING_CHAIN_DIFF_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// This class is thread compatible.
+class ChainDiffCalculator {
+ public:
+ ChainDiffCalculator() = default;
+ ChainDiffCalculator(const ChainDiffCalculator&) = default;
+ ChainDiffCalculator& operator=(const ChainDiffCalculator&) = default;
+
+ // Restarts chains, i.e. for position where chains[i] == true next chain_diff
+ // will be 0. Saves chains.size() as number of chains in the stream.
+ void Reset(const std::vector<bool>& chains);
+
+ // Returns chain diffs based on flags if frame is part of the chain.
+ absl::InlinedVector<int, 4> From(int64_t frame_id,
+ const std::vector<bool>& chains);
+
+ private:
+ absl::InlinedVector<int, 4> ChainDiffs(int64_t frame_id) const;
+
+ absl::InlinedVector<absl::optional<int64_t>, 4> last_frame_in_chain_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CHAIN_DIFF_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build
new file mode 100644
index 0000000000..09766af698
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("chain_diff_calculator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc
new file mode 100644
index 0000000000..efd09bd888
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/chain_diff_calculator.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::SizeIs;
+
+TEST(ChainDiffCalculatorTest, SingleChain) {
+ // Simulate a stream with 2 temporal layer where chain
+ // protects temporal layer 0.
+ ChainDiffCalculator calculator;
+ // Key frame.
+ calculator.Reset({true});
+ EXPECT_THAT(calculator.From(1, {true}), ElementsAre(0));
+ // T1 delta frame.
+ EXPECT_THAT(calculator.From(2, {false}), ElementsAre(1));
+ // T0 delta frame.
+ EXPECT_THAT(calculator.From(3, {true}), ElementsAre(2));
+}
+
+TEST(ChainDiffCalculatorTest, TwoChainsFullSvc) {
+ // Simulate a full svc stream with 2 spatial and 2 temporal layers.
+ // chains are protecting temporal layers 0.
+ ChainDiffCalculator calculator;
+ // S0 Key frame.
+ calculator.Reset({true, true});
+ EXPECT_THAT(calculator.From(1, {true, true}), ElementsAre(0, 0));
+ // S1 Key frame.
+ EXPECT_THAT(calculator.From(2, {false, true}), ElementsAre(1, 1));
+ // S0T1 delta frame.
+ EXPECT_THAT(calculator.From(3, {false, false}), ElementsAre(2, 1));
+ // S1T1 delta frame.
+ EXPECT_THAT(calculator.From(4, {false, false}), ElementsAre(3, 2));
+ // S0T0 delta frame.
+ EXPECT_THAT(calculator.From(5, {true, true}), ElementsAre(4, 3));
+ // S1T0 delta frame.
+ EXPECT_THAT(calculator.From(6, {false, true}), ElementsAre(1, 1));
+}
+
+TEST(ChainDiffCalculatorTest, TwoChainsKSvc) {
+ // Simulate a k-svc stream with 2 spatial and 2 temporal layers.
+ // chains are protecting temporal layers 0.
+ ChainDiffCalculator calculator;
+ // S0 Key frame.
+ calculator.Reset({true, true});
+ EXPECT_THAT(calculator.From(1, {true, true}), ElementsAre(0, 0));
+ // S1 Key frame.
+ EXPECT_THAT(calculator.From(2, {false, true}), ElementsAre(1, 1));
+ // S0T1 delta frame.
+ EXPECT_THAT(calculator.From(3, {false, false}), ElementsAre(2, 1));
+ // S1T1 delta frame.
+ EXPECT_THAT(calculator.From(4, {false, false}), ElementsAre(3, 2));
+ // S0T0 delta frame.
+ EXPECT_THAT(calculator.From(5, {true, false}), ElementsAre(4, 3));
+ // S1T0 delta frame.
+ EXPECT_THAT(calculator.From(6, {false, true}), ElementsAre(1, 4));
+}
+
+TEST(ChainDiffCalculatorTest, TwoChainsSimulcast) {
+ // Simulate a k-svc stream with 2 spatial and 2 temporal layers.
+ // chains are protecting temporal layers 0.
+ ChainDiffCalculator calculator;
+ // S0 Key frame.
+ calculator.Reset({true, false});
+ EXPECT_THAT(calculator.From(1, {true, false}), ElementsAre(0, 0));
+ // S1 Key frame.
+ calculator.Reset({false, true});
+ EXPECT_THAT(calculator.From(2, {false, true}), ElementsAre(1, 0));
+ // S0T1 delta frame.
+ EXPECT_THAT(calculator.From(3, {false, false}), ElementsAre(2, 1));
+ // S1T1 delta frame.
+ EXPECT_THAT(calculator.From(4, {false, false}), ElementsAre(3, 2));
+ // S0T0 delta frame.
+ EXPECT_THAT(calculator.From(5, {true, false}), ElementsAre(4, 3));
+ // S1T0 delta frame.
+ EXPECT_THAT(calculator.From(6, {false, true}), ElementsAre(1, 4));
+}
+
+TEST(ChainDiffCalculatorTest, ResilentToAbsentChainConfig) {
+ ChainDiffCalculator calculator;
+ // Key frame.
+ calculator.Reset({true, false});
+ EXPECT_THAT(calculator.From(1, {true, false}), ElementsAre(0, 0));
+ // Forgot to set chains. should still return 2 chain_diffs.
+ EXPECT_THAT(calculator.From(2, {}), ElementsAre(1, 0));
+ // chain diffs for next frame(s) are undefined, but still there should be
+ // correct number of them.
+ EXPECT_THAT(calculator.From(3, {true, false}), SizeIs(2));
+ EXPECT_THAT(calculator.From(4, {false, true}), SizeIs(2));
+ // Since previous two frames updated all the chains, can expect what
+ // chain_diffs would be.
+ EXPECT_THAT(calculator.From(5, {false, false}), ElementsAre(2, 1));
+}
+
+TEST(ChainDiffCalculatorTest, ResilentToTooMainChains) {
+ ChainDiffCalculator calculator;
+ // Key frame.
+ calculator.Reset({true, false});
+ EXPECT_THAT(calculator.From(1, {true, false}), ElementsAre(0, 0));
+ // Set wrong number of chains. Expect number of chain_diffs is not changed.
+ EXPECT_THAT(calculator.From(2, {true, true, true}), ElementsAre(1, 0));
+ // chain diffs for next frame(s) are undefined, but still there should be
+ // correct number of them.
+ EXPECT_THAT(calculator.From(3, {true, false}), SizeIs(2));
+ EXPECT_THAT(calculator.From(4, {false, true}), SizeIs(2));
+ // Since previous two frames updated all the chains, can expect what
+ // chain_diffs would be.
+ EXPECT_THAT(calculator.From(5, {false, false}), ElementsAre(2, 1));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build
new file mode 100644
index 0000000000..d4d71bf7fb
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("codec_globals_headers_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn b/third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn
new file mode 100644
index 0000000000..648778d969
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn
@@ -0,0 +1,153 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//third_party/libaom/options.gni")
+import("../../../../webrtc.gni")
+
+rtc_library("av1_svc_config") {
+ sources = [
+ "av1_svc_config.cc",
+ "av1_svc_config.h",
+ ]
+ deps = [
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:logging",
+ "../../../../rtc_base:stringutils",
+ "../../svc:scalability_mode_util",
+ "../../svc:scalability_structures",
+ "../../svc:scalable_video_controller",
+ ]
+
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("dav1d_decoder") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ public = [ "dav1d_decoder.h" ]
+ sources = [ "dav1d_decoder.cc" ]
+
+ deps = [
+ "../..:video_codec_interface",
+ "../../../../api:scoped_refptr",
+ "../../../../api/video:encoded_image",
+ "../../../../api/video:video_frame",
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../common_video",
+ "../../../../rtc_base:logging",
+ "//third_party/dav1d",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [
+ "//third_party/dav1d",
+ "//third_party/libyuv",
+ ]
+ include_dirs = [
+ "/media/libdav1d/",
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ "/third_party/dav1d/include/dav1d",
+ ]
+ }
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("libaom_av1_decoder") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ public = [ "libaom_av1_decoder.h" ]
+ deps = [ "../../../../api/video_codecs:video_codecs_api" ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
+
+ if (enable_libaom) {
+ sources = [ "libaom_av1_decoder.cc" ]
+ deps += [
+ "../..:video_codec_interface",
+ "../../../../api:scoped_refptr",
+ "../../../../api/video:encoded_image",
+ "../../../../api/video:video_frame",
+ "../../../../common_video",
+ "../../../../rtc_base:logging",
+ "//third_party/libaom",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [
+ "//third_party/libaom",
+ "//third_party/libyuv",
+ ]
+ }
+ absl_deps += [ "//third_party/abseil-cpp/absl/types:optional" ]
+ } else {
+ sources = [ "libaom_av1_decoder_absent.cc" ]
+ }
+}
+
+rtc_library("libaom_av1_encoder") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ public = [ "libaom_av1_encoder.h" ]
+ sources = [ "libaom_av1_encoder.cc" ]
+ deps = [
+ "../..:video_codec_interface",
+ "../../../../api:scoped_refptr",
+ "../../../../api/video:encoded_image",
+ "../../../../api/video:video_frame",
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../common_video",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:logging",
+ "../../svc:scalability_structures",
+ "../../svc:scalable_video_controller",
+ ]
+ if (enable_libaom) {
+ deps += [ "//third_party/libaom" ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_coding_codecs_av1_tests") {
+ testonly = true
+
+ sources = [ "av1_svc_config_unittest.cc" ]
+ deps = [
+ ":av1_svc_config",
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../test:test_support",
+ ]
+
+ if (enable_libaom) {
+ sources += [
+ "libaom_av1_encoder_unittest.cc",
+ "libaom_av1_unittest.cc",
+ ]
+ deps += [
+ ":libaom_av1_decoder",
+ ":libaom_av1_encoder",
+ "../..:encoded_video_frame_producer",
+ "../..:video_codec_interface",
+ "../../../../api:mock_video_encoder",
+ "../../../../api/units:data_size",
+ "../../../../api/units:time_delta",
+ "../../../../api/video:video_frame",
+ "../../svc:scalability_mode_util",
+ "../../svc:scalability_structures",
+ "../../svc:scalable_video_controller",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS b/third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS
new file mode 100644
index 0000000000..bfb1c733d4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+third_party/libaom",
+ "+third_party/dav1d",
+]
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc
new file mode 100644
index 0000000000..43dcf96ab7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/av1_svc_config.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace {
+absl::optional<ScalabilityMode> BuildScalabilityMode(int num_temporal_layers,
+ int num_spatial_layers) {
+ char name[20];
+ rtc::SimpleStringBuilder ss(name);
+ ss << "L" << num_spatial_layers << "T" << num_temporal_layers;
+ if (num_spatial_layers > 1) {
+ ss << "_KEY";
+ }
+
+ return ScalabilityModeFromString(name);
+}
+} // namespace
+
+absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
+LibaomAv1EncoderSupportedScalabilityModes() {
+ absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
+ for (ScalabilityMode scalability_mode : kAllScalabilityModes) {
+ if (ScalabilityStructureConfig(scalability_mode) != absl::nullopt) {
+ scalability_modes.push_back(scalability_mode);
+ }
+ }
+ return scalability_modes;
+}
+
+bool LibaomAv1EncoderSupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ // For libaom AV1, the scalability mode is supported if we can create the
+ // scalability structure.
+ return ScalabilityStructureConfig(scalability_mode) != absl::nullopt;
+}
+
+bool SetAv1SvcConfig(VideoCodec& video_codec,
+ int num_temporal_layers,
+ int num_spatial_layers) {
+ RTC_DCHECK_EQ(video_codec.codecType, kVideoCodecAV1);
+
+ absl::optional<ScalabilityMode> scalability_mode =
+ video_codec.GetScalabilityMode();
+ if (!scalability_mode.has_value()) {
+ scalability_mode =
+ BuildScalabilityMode(num_temporal_layers, num_spatial_layers);
+ if (!scalability_mode) {
+ RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'L1T1'.";
+ scalability_mode = ScalabilityMode::kL1T1;
+ }
+ }
+
+ std::unique_ptr<ScalableVideoController> structure =
+ CreateScalabilityStructure(*scalability_mode);
+ if (structure == nullptr) {
+ RTC_LOG(LS_WARNING) << "Failed to create structure "
+ << static_cast<int>(*scalability_mode);
+ return false;
+ }
+
+ video_codec.SetScalabilityMode(*scalability_mode);
+
+ ScalableVideoController::StreamLayersConfig info = structure->StreamConfig();
+ for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) {
+ SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx];
+ spatial_layer.width = video_codec.width * info.scaling_factor_num[sl_idx] /
+ info.scaling_factor_den[sl_idx];
+ spatial_layer.height = video_codec.height *
+ info.scaling_factor_num[sl_idx] /
+ info.scaling_factor_den[sl_idx];
+ spatial_layer.maxFramerate = video_codec.maxFramerate;
+ spatial_layer.numberOfTemporalLayers = info.num_temporal_layers;
+ spatial_layer.active = true;
+ }
+
+ if (info.num_spatial_layers == 1) {
+ SpatialLayer& spatial_layer = video_codec.spatialLayers[0];
+ spatial_layer.minBitrate = video_codec.minBitrate;
+ spatial_layer.maxBitrate = video_codec.maxBitrate;
+ spatial_layer.targetBitrate =
+ (video_codec.minBitrate + video_codec.maxBitrate) / 2;
+ return true;
+ }
+
+ for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) {
+ SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx];
+ // minBitrate and maxBitrate formulas are copied from vp9 settings and
+ // are not yet tuned for av1.
+ const int num_pixels = spatial_layer.width * spatial_layer.height;
+ int min_bitrate_kbps = (600.0 * std::sqrt(num_pixels) - 95'000.0) / 1000.0;
+ spatial_layer.minBitrate = std::max(min_bitrate_kbps, 20);
+ spatial_layer.maxBitrate = 50 + static_cast<int>(1.6 * num_pixels / 1000.0);
+ spatial_layer.targetBitrate =
+ (spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h
new file mode 100644
index 0000000000..05b886b9f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
+LibaomAv1EncoderSupportedScalabilityModes();
+
+bool LibaomAv1EncoderSupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+// Fills `video_codec.spatialLayers` using other members.
+bool SetAv1SvcConfig(VideoCodec& video_codec,
+ int num_temporal_layers,
+ int num_spatial_layers);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build
new file mode 100644
index 0000000000..f3bef360d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("av1_svc_config_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc
new file mode 100644
index 0000000000..9f1da9865c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/av1_svc_config.h"
+
+#include "api/video_codecs/video_codec.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kDontCare = 0;
+
+TEST(Av1SvcConfigTest, TreatsEmptyAsL1T1) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_TRUE(video_codec.spatialLayers[0].active);
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 1);
+ EXPECT_FALSE(video_codec.spatialLayers[1].active);
+}
+
+TEST(Av1SvcConfigTest, ScalabilityModeFromNumberOfTemporalLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/3,
+ /*num_spatial_layers=*/1));
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
+}
+
+TEST(Av1SvcConfigTest, ScalabilityModeFromNumberOfSpatialLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/3,
+ /*num_spatial_layers=*/2));
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
+ EXPECT_TRUE(video_codec.spatialLayers[0].active);
+ EXPECT_TRUE(video_codec.spatialLayers[1].active);
+ EXPECT_FALSE(video_codec.spatialLayers[2].active);
+}
+
+TEST(Av1SvcConfigTest, SetsActiveSpatialLayersFromScalabilityMode) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_TRUE(video_codec.spatialLayers[0].active);
+ EXPECT_TRUE(video_codec.spatialLayers[1].active);
+ EXPECT_FALSE(video_codec.spatialLayers[2].active);
+}
+
+TEST(Av1SvcConfigTest, ConfiguresDobuleResolutionRatioFromScalabilityMode) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+ video_codec.width = 1200;
+ video_codec.height = 800;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].width, 600);
+ EXPECT_EQ(video_codec.spatialLayers[0].height, 400);
+ EXPECT_EQ(video_codec.spatialLayers[1].width, 1200);
+ EXPECT_EQ(video_codec.spatialLayers[1].height, 800);
+}
+
+TEST(Av1SvcConfigTest, ConfiguresSmallResolutionRatioFromScalabilityMode) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ // h mode uses 1.5:1 ratio
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1h);
+ video_codec.width = 1500;
+ video_codec.height = 900;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].width, 1000);
+ EXPECT_EQ(video_codec.spatialLayers[0].height, 600);
+ EXPECT_EQ(video_codec.spatialLayers[1].width, 1500);
+ EXPECT_EQ(video_codec.spatialLayers[1].height, 900);
+}
+
+TEST(Av1SvcConfigTest, CopiesFramrate) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ // h mode uses 1.5:1 ratio
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+ video_codec.maxFramerate = 27;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].maxFramerate, 27);
+ EXPECT_EQ(video_codec.spatialLayers[1].maxFramerate, 27);
+}
+
+TEST(Av1SvcConfigTest, SetsNumberOfTemporalLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL1T3);
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
+}
+
+TEST(Av1SvcConfigTest, CopiesMinMaxBitrateForSingleSpatialLayer) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL1T3);
+ video_codec.minBitrate = 100;
+ video_codec.maxBitrate = 500;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].minBitrate, 100u);
+ EXPECT_EQ(video_codec.spatialLayers[0].maxBitrate, 500u);
+ EXPECT_LE(video_codec.spatialLayers[0].minBitrate,
+ video_codec.spatialLayers[0].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[0].targetBitrate,
+ video_codec.spatialLayers[0].maxBitrate);
+}
+
+TEST(Av1SvcConfigTest, SetsBitratesForMultipleSpatialLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_GT(video_codec.spatialLayers[0].minBitrate, 0u);
+ EXPECT_LE(video_codec.spatialLayers[0].minBitrate,
+ video_codec.spatialLayers[0].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[0].targetBitrate,
+ video_codec.spatialLayers[0].maxBitrate);
+
+ EXPECT_GT(video_codec.spatialLayers[1].minBitrate, 0u);
+ EXPECT_LE(video_codec.spatialLayers[1].minBitrate,
+ video_codec.spatialLayers[1].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[1].targetBitrate,
+ video_codec.spatialLayers[1].maxBitrate);
+
+ EXPECT_GT(video_codec.spatialLayers[2].minBitrate, 0u);
+ EXPECT_LE(video_codec.spatialLayers[2].minBitrate,
+ video_codec.spatialLayers[2].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[2].targetBitrate,
+ video_codec.spatialLayers[2].maxBitrate);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc
new file mode 100644
index 0000000000..5551666811
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/dav1d_decoder.h"
+
+#include <algorithm>
+
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/logging.h"
+#if defined(WEBRTC_MOZILLA_BUILD)
+#include "dav1d/dav1d.h"
+#include "libyuv/include/libyuv/convert.h"
+#else
+#include "third_party/dav1d/libdav1d/include/dav1d/dav1d.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#endif
+
+namespace webrtc {
+namespace {
+
+class Dav1dDecoder : public VideoDecoder {
+ public:
+ Dav1dDecoder();
+ Dav1dDecoder(const Dav1dDecoder&) = delete;
+ Dav1dDecoder& operator=(const Dav1dDecoder&) = delete;
+
+ ~Dav1dDecoder() override;
+
+ bool Configure(const Settings& settings) override;
+ int32_t Decode(const EncodedImage& encoded_image,
+ bool missing_frames,
+ int64_t render_time_ms) override;
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+ int32_t Release() override;
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ private:
+ VideoFrameBufferPool buffer_pool_;
+ Dav1dContext* context_ = nullptr;
+ DecodedImageCallback* decode_complete_callback_ = nullptr;
+};
+
+class ScopedDav1dData {
+ public:
+ ~ScopedDav1dData() { dav1d_data_unref(&data_); }
+
+ Dav1dData& Data() { return data_; }
+
+ private:
+ Dav1dData data_ = {};
+};
+
+class ScopedDav1dPicture {
+ public:
+ ~ScopedDav1dPicture() { dav1d_picture_unref(&picture_); }
+
+ Dav1dPicture& Picture() { return picture_; }
+
+ private:
+ Dav1dPicture picture_ = {};
+};
+
+constexpr char kDav1dName[] = "dav1d";
+
+// Calling `dav1d_data_wrap` requires a `free_callback` to be registered.
+void NullFreeCallback(const uint8_t* buffer, void* opaque) {}
+
+Dav1dDecoder::Dav1dDecoder()
+ : buffer_pool_(/*zero_initialize=*/false, /*max_number_of_buffers=*/150) {}
+
+Dav1dDecoder::~Dav1dDecoder() {
+ Release();
+}
+
+bool Dav1dDecoder::Configure(const Settings& settings) {
+ Dav1dSettings s;
+ dav1d_default_settings(&s);
+
+ s.n_threads = std::max(2, settings.number_of_cores());
+ s.max_frame_delay = 1; // For low latency decoding.
+ s.all_layers = 0; // Don't output a frame for every spatial layer.
+ s.operating_point = 31; // Decode all operating points.
+
+ return dav1d_open(&context_, &s) == 0;
+}
+
+int32_t Dav1dDecoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* decode_complete_callback) {
+ decode_complete_callback_ = decode_complete_callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t Dav1dDecoder::Release() {
+ dav1d_close(&context_);
+ if (context_ != nullptr) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ buffer_pool_.Release();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoDecoder::DecoderInfo Dav1dDecoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = kDav1dName;
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* Dav1dDecoder::ImplementationName() const {
+ return kDav1dName;
+}
+
+int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image,
+ bool /*missing_frames*/,
+ int64_t /*render_time_ms*/) {
+ if (!context_ || decode_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ ScopedDav1dData scoped_dav1d_data;
+ Dav1dData& dav1d_data = scoped_dav1d_data.Data();
+ dav1d_data_wrap(&dav1d_data, encoded_image.data(), encoded_image.size(),
+ /*free_callback=*/&NullFreeCallback,
+ /*user_data=*/nullptr);
+
+ if (int decode_res = dav1d_send_data(context_, &dav1d_data)) {
+ RTC_LOG(LS_WARNING)
+ << "Dav1dDecoder::Decode decoding failed with error code "
+ << decode_res;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ ScopedDav1dPicture scoped_dav1d_picture;
+ Dav1dPicture& dav1d_picture = scoped_dav1d_picture.Picture();
+ if (int get_picture_res = dav1d_get_picture(context_, &dav1d_picture)) {
+ RTC_LOG(LS_WARNING)
+ << "Dav1dDecoder::Decode getting picture failed with error code "
+ << get_picture_res;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Only accept I420 pixel format and 8 bit depth.
+ if (dav1d_picture.p.layout != DAV1D_PIXEL_LAYOUT_I420 ||
+ dav1d_picture.p.bpc != 8) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<I420Buffer> buffer =
+ buffer_pool_.CreateI420Buffer(dav1d_picture.p.w, dav1d_picture.p.h);
+ if (!buffer.get()) {
+ RTC_LOG(LS_WARNING)
+ << "Dav1dDecoder::Decode failed to get frame from the buffer pool.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ uint8_t* y_data = static_cast<uint8_t*>(dav1d_picture.data[0]);
+ uint8_t* u_data = static_cast<uint8_t*>(dav1d_picture.data[1]);
+ uint8_t* v_data = static_cast<uint8_t*>(dav1d_picture.data[2]);
+ int y_stride = dav1d_picture.stride[0];
+ int uv_stride = dav1d_picture.stride[1];
+ libyuv::I420Copy(y_data, y_stride, //
+ u_data, uv_stride, //
+ v_data, uv_stride, //
+ buffer->MutableDataY(), buffer->StrideY(), //
+ buffer->MutableDataU(), buffer->StrideU(), //
+ buffer->MutableDataV(), buffer->StrideV(), //
+ dav1d_picture.p.w, //
+ dav1d_picture.p.h); //
+
+ VideoFrame decoded_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(encoded_image.Timestamp())
+ .set_ntp_time_ms(encoded_image.ntp_time_ms_)
+ .set_color_space(encoded_image.ColorSpace())
+ .build();
+
+ decode_complete_callback_->Decoded(decoded_frame, absl::nullopt,
+ absl::nullopt);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+} // namespace
+
+std::unique_ptr<VideoDecoder> CreateDav1dDecoder() {
+ return std::make_unique<Dav1dDecoder>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h
new file mode 100644
index 0000000000..c9396d1e03
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder.h"
+
+namespace webrtc {
+
+std::unique_ptr<VideoDecoder> CreateDav1dDecoder();
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder_gn/moz.build b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder_gn/moz.build
new file mode 100644
index 0000000000..c21b84284b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder_gn/moz.build
@@ -0,0 +1,218 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libdav1d/",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/dav1d/include/dav1d/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("dav1d_decoder_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.cc
new file mode 100644
index 0000000000..b05a1f7539
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/codecs/av1/libaom_av1_decoder.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video_codecs/video_decoder.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/logging.h"
+#include "third_party/libaom/source/libaom/aom/aom_decoder.h"
+#include "third_party/libaom/source/libaom/aom/aomdx.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kConfigLowBitDepth = 1; // 8-bits per luma/chroma sample.
+constexpr int kDecFlags = 0; // 0 signals no post processing.
+
+class LibaomAv1Decoder final : public VideoDecoder {
+ public:
+ LibaomAv1Decoder();
+ LibaomAv1Decoder(const LibaomAv1Decoder&) = delete;
+ LibaomAv1Decoder& operator=(const LibaomAv1Decoder&) = delete;
+ ~LibaomAv1Decoder();
+
+ // Implements VideoDecoder.
+ bool Configure(const Settings& settings) override;
+
+ // Decode an encoded video frame.
+ int32_t Decode(const EncodedImage& encoded_image,
+ bool missing_frames,
+ int64_t render_time_ms) override;
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+
+ int32_t Release() override;
+
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ private:
+ aom_codec_ctx_t context_;
+ bool inited_;
+ // Pool of memory buffers to store decoded image data for application access.
+ VideoFrameBufferPool buffer_pool_;
+ DecodedImageCallback* decode_complete_callback_;
+};
+
+LibaomAv1Decoder::LibaomAv1Decoder()
+ : context_(), // Force value initialization instead of default one.
+ inited_(false),
+ buffer_pool_(false, /*max_number_of_buffers=*/150),
+ decode_complete_callback_(nullptr) {}
+
+LibaomAv1Decoder::~LibaomAv1Decoder() {
+ Release();
+}
+
+bool LibaomAv1Decoder::Configure(const Settings& settings) {
+ aom_codec_dec_cfg_t config = {};
+ config.threads = static_cast<unsigned int>(settings.number_of_cores());
+ config.allow_lowbitdepth = kConfigLowBitDepth;
+
+ aom_codec_err_t ret =
+ aom_codec_dec_init(&context_, aom_codec_av1_dx(), &config, kDecFlags);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Configure returned " << ret
+ << " on aom_codec_dec_init.";
+ return false;
+ }
+ inited_ = true;
+ return true;
+}
+
+int32_t LibaomAv1Decoder::Decode(const EncodedImage& encoded_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (decode_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // Decode one video frame.
+ aom_codec_err_t ret =
+ aom_codec_decode(&context_, encoded_image.data(), encoded_image.size(),
+ /*user_priv=*/nullptr);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode returned " << ret
+ << " on aom_codec_decode.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Get decoded frame data.
+ int corrupted_frame = 0;
+ aom_codec_iter_t iter = nullptr;
+ while (aom_image_t* decoded_image = aom_codec_get_frame(&context_, &iter)) {
+ if (aom_codec_control(&context_, AOMD_GET_FRAME_CORRUPTED,
+ &corrupted_frame)) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode "
+ "AOM_GET_FRAME_CORRUPTED.";
+ }
+ // Check that decoded image format is I420 and has 8-bit depth.
+ if (decoded_image->fmt != AOM_IMG_FMT_I420) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode invalid image format";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Return decoded frame data.
+ int qp;
+ ret = aom_codec_control(&context_, AOMD_GET_LAST_QUANTIZER, &qp);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode returned " << ret
+ << " on control AOME_GET_LAST_QUANTIZER.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Allocate memory for decoded frame.
+ rtc::scoped_refptr<I420Buffer> buffer =
+ buffer_pool_.CreateI420Buffer(decoded_image->d_w, decoded_image->d_h);
+ if (!buffer.get()) {
+ // Pool has too many pending frames.
+ RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode returned due to lack of"
+ " space in decoded frame buffer pool.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Copy decoded_image to decoded_frame.
+ libyuv::I420Copy(
+ decoded_image->planes[AOM_PLANE_Y], decoded_image->stride[AOM_PLANE_Y],
+ decoded_image->planes[AOM_PLANE_U], decoded_image->stride[AOM_PLANE_U],
+ decoded_image->planes[AOM_PLANE_V], decoded_image->stride[AOM_PLANE_V],
+ buffer->MutableDataY(), buffer->StrideY(), buffer->MutableDataU(),
+ buffer->StrideU(), buffer->MutableDataV(), buffer->StrideV(),
+ decoded_image->d_w, decoded_image->d_h);
+ VideoFrame decoded_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(encoded_image.Timestamp())
+ .set_ntp_time_ms(encoded_image.ntp_time_ms_)
+ .set_color_space(encoded_image.ColorSpace())
+ .build();
+
+ decode_complete_callback_->Decoded(decoded_frame, absl::nullopt,
+ absl::nullopt);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t LibaomAv1Decoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* decode_complete_callback) {
+ decode_complete_callback_ = decode_complete_callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t LibaomAv1Decoder::Release() {
+ if (aom_codec_destroy(&context_) != AOM_CODEC_OK) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ buffer_pool_.Release();
+ inited_ = false;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoDecoder::DecoderInfo LibaomAv1Decoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = "libaom";
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* LibaomAv1Decoder::ImplementationName() const {
+ return "libaom";
+}
+
+} // namespace
+
+ABSL_CONST_INIT const bool kIsLibaomAv1DecoderSupported = true;
+
+std::unique_ptr<VideoDecoder> CreateLibaomAv1Decoder() {
+ return std::make_unique<LibaomAv1Decoder>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.h
new file mode 100644
index 0000000000..9b01285c73
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_DECODER_H_
+
+#include <memory>
+
+#include "absl/base/attributes.h"
+#include "api/video_codecs/video_decoder.h"
+
+namespace webrtc {
+
+ABSL_CONST_INIT extern const bool kIsLibaomAv1DecoderSupported;
+
+std::unique_ptr<VideoDecoder> CreateLibaomAv1Decoder();
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_absent.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_absent.cc
new file mode 100644
index 0000000000..1b387d17ed
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_absent.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/codecs/av1/libaom_av1_decoder.h"
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder.h"
+
+namespace webrtc {
+
+ABSL_CONST_INIT const bool kIsLibaomAv1DecoderSupported = false;
+
+std::unique_ptr<VideoDecoder> CreateLibaomAv1Decoder() {
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_gn/moz.build b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_gn/moz.build
new file mode 100644
index 0000000000..15ec27b21d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_absent.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("aom_av1_decoder_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
new file mode 100644
index 0000000000..807513bc7b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "third_party/libaom/source/libaom/aom/aom_codec.h"
+#include "third_party/libaom/source/libaom/aom/aom_encoder.h"
+#include "third_party/libaom/source/libaom/aom/aomcx.h"
+
+#define SET_ENCODER_PARAM_OR_RETURN_ERROR(param_id, param_value) \
+ do { \
+ if (!SetEncoderControlParameters(param_id, param_value)) { \
+ return WEBRTC_VIDEO_CODEC_ERROR; \
+ } \
+ } while (0)
+
+namespace webrtc {
+namespace {
+
+// Encoder configuration parameters
+constexpr int kQpMin = 10;
+constexpr int kUsageProfile = AOM_USAGE_REALTIME;
+constexpr int kMinQindex = 145; // Min qindex threshold for QP scaling.
+constexpr int kMaxQindex = 205; // Max qindex threshold for QP scaling.
+constexpr int kBitDepth = 8;
+constexpr int kLagInFrames = 0; // No look ahead.
+constexpr int kRtpTicksPerSecond = 90000;
+constexpr float kMinimumFrameRate = 1.0;
+
+aom_superblock_size_t GetSuperblockSize(int width, int height, int threads) {
+ int resolution = width * height;
+ if (threads >= 4 && resolution >= 960 * 540 && resolution < 1920 * 1080)
+ return AOM_SUPERBLOCK_SIZE_64X64;
+ else
+ return AOM_SUPERBLOCK_SIZE_DYNAMIC;
+}
+
+class LibaomAv1Encoder final : public VideoEncoder {
+ public:
+ explicit LibaomAv1Encoder(
+ const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config);
+ ~LibaomAv1Encoder();
+
+ int InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* encoded_image_callback) override;
+
+ int32_t Release() override;
+
+ int32_t Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ void SetRates(const RateControlParameters& parameters) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ private:
+ template <typename P>
+ bool SetEncoderControlParameters(int param_id, P param_value);
+
+ // Get value to be used for encoder cpu_speed setting
+ int GetCpuSpeed(int width, int height);
+
+ // Determine number of encoder threads to use.
+ int NumberOfThreads(int width, int height, int number_of_cores);
+
+ bool SvcEnabled() const { return svc_params_.has_value(); }
+ // Fills svc_params_ memeber value. Returns false on error.
+ bool SetSvcParams(ScalableVideoController::StreamLayersConfig svc_config);
+ // Configures the encoder with layer for the next frame.
+ void SetSvcLayerId(
+ const ScalableVideoController::LayerFrameConfig& layer_frame);
+ // Configures the encoder which buffers next frame updates and can reference.
+ void SetSvcRefFrameConfig(
+ const ScalableVideoController::LayerFrameConfig& layer_frame);
+ // If pixel format doesn't match, then reallocate.
+ void MaybeRewrapImgWithFormat(const aom_img_fmt_t fmt);
+
+ std::unique_ptr<ScalableVideoController> svc_controller_;
+ bool inited_;
+ bool rates_configured_;
+ absl::optional<aom_svc_params_t> svc_params_;
+ VideoCodec encoder_settings_;
+ absl::optional<LibaomAv1EncoderAuxConfig> aux_config_;
+ aom_image_t* frame_for_encode_;
+ aom_codec_ctx_t ctx_;
+ aom_codec_enc_cfg_t cfg_;
+ EncodedImageCallback* encoded_image_callback_;
+};
+
+int32_t VerifyCodecSettings(const VideoCodec& codec_settings) {
+ if (codec_settings.width < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.height < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // maxBitrate == 0 represents an unspecified maxBitRate.
+ if (codec_settings.maxBitrate > 0 &&
+ codec_settings.minBitrate > codec_settings.maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.maxBitrate > 0 &&
+ codec_settings.startBitrate > codec_settings.maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.startBitrate < codec_settings.minBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+LibaomAv1Encoder::LibaomAv1Encoder(
+ const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config)
+ : inited_(false),
+ rates_configured_(false),
+ aux_config_(aux_config),
+ frame_for_encode_(nullptr),
+ encoded_image_callback_(nullptr) {}
+
+LibaomAv1Encoder::~LibaomAv1Encoder() {
+ Release();
+}
+
+int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) {
+ if (codec_settings == nullptr) {
+ RTC_LOG(LS_WARNING) << "No codec settings provided to "
+ "LibaomAv1Encoder.";
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inited_) {
+ RTC_LOG(LS_WARNING) << "Initing LibaomAv1Encoder without first releasing.";
+ Release();
+ }
+ encoder_settings_ = *codec_settings;
+
+ // Sanity checks for encoder configuration.
+ const int32_t result = VerifyCodecSettings(encoder_settings_);
+ if (result < 0) {
+ RTC_LOG(LS_WARNING) << "Incorrect codec settings provided to "
+ "LibaomAv1Encoder.";
+ return result;
+ }
+ if (encoder_settings_.numberOfSimulcastStreams > 1) {
+ RTC_LOG(LS_WARNING) << "Simulcast is not implemented by LibaomAv1Encoder.";
+ return result;
+ }
+ absl::optional<ScalabilityMode> scalability_mode =
+ encoder_settings_.GetScalabilityMode();
+ if (!scalability_mode.has_value()) {
+ RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'L1T1'.";
+ scalability_mode = ScalabilityMode::kL1T1;
+ }
+ svc_controller_ = CreateScalabilityStructure(*scalability_mode);
+ if (svc_controller_ == nullptr) {
+ RTC_LOG(LS_WARNING) << "Failed to set scalability mode "
+ << static_cast<int>(*scalability_mode);
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (!SetSvcParams(svc_controller_->StreamConfig())) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Initialize encoder configuration structure with default values
+ aom_codec_err_t ret =
+ aom_codec_enc_config_default(aom_codec_av1_cx(), &cfg_, kUsageProfile);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret
+ << " on aom_codec_enc_config_default.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Overwrite default config with input encoder settings & RTC-relevant values.
+ cfg_.g_w = encoder_settings_.width;
+ cfg_.g_h = encoder_settings_.height;
+ cfg_.g_threads =
+ NumberOfThreads(cfg_.g_w, cfg_.g_h, settings.number_of_cores);
+ cfg_.g_timebase.num = 1;
+ cfg_.g_timebase.den = kRtpTicksPerSecond;
+ cfg_.rc_target_bitrate = encoder_settings_.maxBitrate; // kilobits/sec.
+ cfg_.g_input_bit_depth = kBitDepth;
+ cfg_.kf_mode = AOM_KF_DISABLED;
+ cfg_.rc_min_quantizer = kQpMin;
+ cfg_.rc_max_quantizer = encoder_settings_.qpMax;
+ cfg_.rc_undershoot_pct = 50;
+ cfg_.rc_overshoot_pct = 50;
+ cfg_.rc_buf_initial_sz = 600;
+ cfg_.rc_buf_optimal_sz = 600;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.g_usage = kUsageProfile;
+ cfg_.g_error_resilient = 0;
+ // Low-latency settings.
+ cfg_.rc_end_usage = AOM_CBR; // Constant Bit Rate (CBR) mode
+ cfg_.g_pass = AOM_RC_ONE_PASS; // One-pass rate control
+ cfg_.g_lag_in_frames = kLagInFrames; // No look ahead when lag equals 0.
+
+ if (frame_for_encode_ != nullptr) {
+ aom_img_free(frame_for_encode_);
+ frame_for_encode_ = nullptr;
+ }
+
+ // Flag options: AOM_CODEC_USE_PSNR and AOM_CODEC_USE_HIGHBITDEPTH
+ aom_codec_flags_t flags = 0;
+
+ // Initialize an encoder instance.
+ ret = aom_codec_enc_init(&ctx_, aom_codec_av1_cx(), &cfg_, flags);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret
+ << " on aom_codec_enc_init.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ inited_ = true;
+
+ // Set control parameters
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_SET_CPUUSED,
+ GetCpuSpeed(cfg_.g_w, cfg_.g_h));
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_CDEF, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_TPL_MODEL, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_DELTAQ_MODE, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_ORDER_HINT, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_AQ_MODE, 3);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_SET_MAX_INTRA_BITRATE_PCT, 300);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_COEFF_COST_UPD_FREQ, 3);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MODE_COST_UPD_FREQ, 3);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MV_COST_UPD_FREQ, 3);
+
+ if (codec_settings->mode == VideoCodecMode::kScreensharing) {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TUNE_CONTENT,
+ AOM_CONTENT_SCREEN);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 1);
+ } else {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 0);
+ }
+
+ if (cfg_.g_threads == 4 && cfg_.g_w == 640 &&
+ (cfg_.g_h == 360 || cfg_.g_h == 480)) {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_ROWS,
+ static_cast<int>(log2(cfg_.g_threads)));
+ } else {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_COLUMNS,
+ static_cast<int>(log2(cfg_.g_threads)));
+ }
+
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ROW_MT, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_OBMC, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_NOISE_SENSITIVITY, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_WARPED_MOTION, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_GLOBAL_MOTION, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_REF_FRAME_MVS, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(
+ AV1E_SET_SUPERBLOCK_SIZE,
+ GetSuperblockSize(cfg_.g_w, cfg_.g_h, cfg_.g_threads));
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_CFL_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_SMOOTH_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_ANGLE_DELTA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_FILTER_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_INTRA_DEFAULT_TX_ONLY, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_DISABLE_TRELLIS_QUANT, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DIST_WTD_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DIFF_WTD_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DUAL_FILTER, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTERINTRA_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTERINTRA_WEDGE, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTRA_EDGE_FILTER, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTRABC, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_MASKED_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PAETH_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_QM, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_RECT_PARTITIONS, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_RESTORATION, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_SMOOTH_INTERINTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_TX64, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MAX_REFERENCE_FRAMES, 3);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+template <typename P>
+bool LibaomAv1Encoder::SetEncoderControlParameters(int param_id,
+ P param_value) {
+ aom_codec_err_t error_code = aom_codec_control(&ctx_, param_id, param_value);
+ if (error_code != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING)
+ << "LibaomAv1Encoder::SetEncoderControlParameters returned "
+ << error_code << " on id: " << param_id << ".";
+ }
+ return error_code == AOM_CODEC_OK;
+}
+
+// Only positive speeds, range for real-time coding currently is: 6 - 8.
+// Lower means slower/better quality, higher means fastest/lower quality.
+int LibaomAv1Encoder::GetCpuSpeed(int width, int height) {
+ if (aux_config_) {
+ if (auto it = aux_config_->max_pixel_count_to_cpu_speed.lower_bound(width *
+ height);
+ it != aux_config_->max_pixel_count_to_cpu_speed.end()) {
+ return it->second;
+ }
+
+ return 10;
+ } else {
+ // For smaller resolutions, use lower speed setting (get some coding gain at
+ // the cost of increased encoding complexity).
+ switch (encoder_settings_.GetVideoEncoderComplexity()) {
+ case VideoCodecComplexity::kComplexityHigh:
+ if (width * height <= 320 * 180)
+ return 8;
+ else if (width * height <= 640 * 360)
+ return 9;
+ else
+ return 10;
+ case VideoCodecComplexity::kComplexityHigher:
+ if (width * height <= 320 * 180)
+ return 7;
+ else if (width * height <= 640 * 360)
+ return 8;
+ else if (width * height <= 1280 * 720)
+ return 9;
+ else
+ return 10;
+ case VideoCodecComplexity::kComplexityMax:
+ if (width * height <= 320 * 180)
+ return 6;
+ else if (width * height <= 640 * 360)
+ return 7;
+ else if (width * height <= 1280 * 720)
+ return 8;
+ else
+ return 9;
+ default:
+ return 10;
+ }
+ }
+}
+
+int LibaomAv1Encoder::NumberOfThreads(int width,
+ int height,
+ int number_of_cores) {
+ // Keep the number of encoder threads equal to the possible number of
+ // column/row tiles, which is (1, 2, 4, 8). See comments below for
+ // AV1E_SET_TILE_COLUMNS/ROWS.
+ if (width * height >= 640 * 360 && number_of_cores > 4) {
+ return 4;
+ } else if (width * height >= 320 * 180 && number_of_cores > 2) {
+ return 2;
+ } else {
+// Use 2 threads for low res on ARM.
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID)
+ if (width * height >= 320 * 180 && number_of_cores > 2) {
+ return 2;
+ }
+#endif
+ // 1 thread less than VGA.
+ return 1;
+ }
+}
+
+bool LibaomAv1Encoder::SetSvcParams(
+ ScalableVideoController::StreamLayersConfig svc_config) {
+ bool svc_enabled =
+ svc_config.num_spatial_layers > 1 || svc_config.num_temporal_layers > 1;
+ if (!svc_enabled) {
+ svc_params_ = absl::nullopt;
+ return true;
+ }
+ if (svc_config.num_spatial_layers < 1 || svc_config.num_spatial_layers > 4) {
+ RTC_LOG(LS_WARNING) << "Av1 supports up to 4 spatial layers. "
+ << svc_config.num_spatial_layers << " configured.";
+ return false;
+ }
+ if (svc_config.num_temporal_layers < 1 ||
+ svc_config.num_temporal_layers > 8) {
+ RTC_LOG(LS_WARNING) << "Av1 supports up to 8 temporal layers. "
+ << svc_config.num_temporal_layers << " configured.";
+ return false;
+ }
+ aom_svc_params_t& svc_params = svc_params_.emplace();
+ svc_params.number_spatial_layers = svc_config.num_spatial_layers;
+ svc_params.number_temporal_layers = svc_config.num_temporal_layers;
+
+ int num_layers =
+ svc_config.num_spatial_layers * svc_config.num_temporal_layers;
+ for (int i = 0; i < num_layers; ++i) {
+ svc_params.min_quantizers[i] = kQpMin;
+ svc_params.max_quantizers[i] = encoder_settings_.qpMax;
+ }
+
+ // Assume each temporal layer doubles framerate.
+ for (int tid = 0; tid < svc_config.num_temporal_layers; ++tid) {
+ svc_params.framerate_factor[tid] =
+ 1 << (svc_config.num_temporal_layers - tid - 1);
+ }
+
+ for (int sid = 0; sid < svc_config.num_spatial_layers; ++sid) {
+ svc_params.scaling_factor_num[sid] = svc_config.scaling_factor_num[sid];
+ svc_params.scaling_factor_den[sid] = svc_config.scaling_factor_den[sid];
+ }
+
+ return true;
+}
+
+void LibaomAv1Encoder::SetSvcLayerId(
+ const ScalableVideoController::LayerFrameConfig& layer_frame) {
+ aom_svc_layer_id_t layer_id = {};
+ layer_id.spatial_layer_id = layer_frame.SpatialId();
+ layer_id.temporal_layer_id = layer_frame.TemporalId();
+ SetEncoderControlParameters(AV1E_SET_SVC_LAYER_ID, &layer_id);
+}
+
+void LibaomAv1Encoder::SetSvcRefFrameConfig(
+ const ScalableVideoController::LayerFrameConfig& layer_frame) {
+ // Buffer name to use for each layer_frame.buffers position. In particular
+ // when there are 2 buffers are referenced, prefer name them last and golden,
+ // because av1 bitstream format has dedicated fields for these two names.
+ // See last_frame_idx and golden_frame_idx in the av1 spec
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf
+ static constexpr int kPreferedSlotName[] = {0, // Last
+ 3, // Golden
+ 1, 2, 4, 5, 6};
+ static constexpr int kAv1NumBuffers = 8;
+
+ aom_svc_ref_frame_config_t ref_frame_config = {};
+ RTC_CHECK_LE(layer_frame.Buffers().size(), ABSL_ARRAYSIZE(kPreferedSlotName));
+ for (size_t i = 0; i < layer_frame.Buffers().size(); ++i) {
+ const CodecBufferUsage& buffer = layer_frame.Buffers()[i];
+ int slot_name = kPreferedSlotName[i];
+ RTC_CHECK_GE(buffer.id, 0);
+ RTC_CHECK_LT(buffer.id, kAv1NumBuffers);
+ ref_frame_config.ref_idx[slot_name] = buffer.id;
+ if (buffer.referenced) {
+ ref_frame_config.reference[slot_name] = 1;
+ }
+ if (buffer.updated) {
+ ref_frame_config.refresh[buffer.id] = 1;
+ }
+ }
+
+ SetEncoderControlParameters(AV1E_SET_SVC_REF_FRAME_CONFIG, &ref_frame_config);
+}
+
+int32_t LibaomAv1Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* encoded_image_callback) {
+ encoded_image_callback_ = encoded_image_callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t LibaomAv1Encoder::Release() {
+ if (frame_for_encode_ != nullptr) {
+ aom_img_free(frame_for_encode_);
+ frame_for_encode_ = nullptr;
+ }
+ if (inited_) {
+ if (aom_codec_destroy(&ctx_)) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ inited_ = false;
+ }
+ rates_configured_ = false;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void LibaomAv1Encoder::MaybeRewrapImgWithFormat(const aom_img_fmt_t fmt) {
+ if (!frame_for_encode_) {
+ frame_for_encode_ =
+ aom_img_wrap(nullptr, fmt, cfg_.g_w, cfg_.g_h, 1, nullptr);
+
+ } else if (frame_for_encode_->fmt != fmt) {
+ RTC_LOG(LS_INFO) << "Switching AV1 encoder pixel format to "
+ << (fmt == AOM_IMG_FMT_NV12 ? "NV12" : "I420");
+ aom_img_free(frame_for_encode_);
+ frame_for_encode_ =
+ aom_img_wrap(nullptr, fmt, cfg_.g_w, cfg_.g_h, 1, nullptr);
+ }
+ // else no-op since the image is already in the right format.
+}
+
+int32_t LibaomAv1Encoder::Encode(
+ const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (!inited_ || encoded_image_callback_ == nullptr || !rates_configured_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ bool keyframe_required =
+ frame_types != nullptr &&
+ absl::c_linear_search(*frame_types, VideoFrameType::kVideoFrameKey);
+
+ std::vector<ScalableVideoController::LayerFrameConfig> layer_frames =
+ svc_controller_->NextFrameConfig(keyframe_required);
+
+ if (layer_frames.empty()) {
+ RTC_LOG(LS_ERROR) << "SVCController returned no configuration for a frame.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<VideoFrameBuffer> buffer = frame.video_frame_buffer();
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ supported_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ if (buffer->type() != VideoFrameBuffer::Type::kNative) {
+ // `buffer` is already mapped.
+ mapped_buffer = buffer;
+ } else {
+ // Attempt to map to one of the supported formats.
+ mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
+ }
+
+ // Convert input frame to I420, if needed.
+ if (!mapped_buffer ||
+ (absl::c_find(supported_formats, mapped_buffer->type()) ==
+ supported_formats.end() &&
+ mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
+ rtc::scoped_refptr<I420BufferInterface> converted_buffer(buffer->ToI420());
+ if (!converted_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(
+ frame.video_frame_buffer()->type())
+ << " image to I420. Can't encode frame.";
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+ RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ mapped_buffer = converted_buffer;
+ }
+
+ switch (mapped_buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A: {
+ // Set frame_for_encode_ data pointers and strides.
+ MaybeRewrapImgWithFormat(AOM_IMG_FMT_I420);
+ auto i420_buffer = mapped_buffer->GetI420();
+ RTC_DCHECK(i420_buffer);
+ frame_for_encode_->planes[AOM_PLANE_Y] =
+ const_cast<unsigned char*>(i420_buffer->DataY());
+ frame_for_encode_->planes[AOM_PLANE_U] =
+ const_cast<unsigned char*>(i420_buffer->DataU());
+ frame_for_encode_->planes[AOM_PLANE_V] =
+ const_cast<unsigned char*>(i420_buffer->DataV());
+ frame_for_encode_->stride[AOM_PLANE_Y] = i420_buffer->StrideY();
+ frame_for_encode_->stride[AOM_PLANE_U] = i420_buffer->StrideU();
+ frame_for_encode_->stride[AOM_PLANE_V] = i420_buffer->StrideV();
+ break;
+ }
+ case VideoFrameBuffer::Type::kNV12: {
+ MaybeRewrapImgWithFormat(AOM_IMG_FMT_NV12);
+ const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12();
+ RTC_DCHECK(nv12_buffer);
+ frame_for_encode_->planes[AOM_PLANE_Y] =
+ const_cast<unsigned char*>(nv12_buffer->DataY());
+ frame_for_encode_->planes[AOM_PLANE_U] =
+ const_cast<unsigned char*>(nv12_buffer->DataUV());
+ frame_for_encode_->planes[AOM_PLANE_V] = nullptr;
+ frame_for_encode_->stride[AOM_PLANE_Y] = nv12_buffer->StrideY();
+ frame_for_encode_->stride[AOM_PLANE_U] = nv12_buffer->StrideUV();
+ frame_for_encode_->stride[AOM_PLANE_V] = 0;
+ break;
+ }
+ default:
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+
+ const uint32_t duration =
+ kRtpTicksPerSecond / static_cast<float>(encoder_settings_.maxFramerate);
+
+ const size_t num_spatial_layers =
+ svc_params_ ? svc_params_->number_spatial_layers : 1;
+ auto next_layer_frame = layer_frames.begin();
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ // The libaom AV1 encoder requires that `aom_codec_encode` is called for
+ // every spatial layer, even if the configured bitrate for that layer is
+ // zero. For zero bitrate spatial layers no frames will be produced.
+ absl::optional<ScalableVideoController::LayerFrameConfig>
+ non_encoded_layer_frame;
+ ScalableVideoController::LayerFrameConfig* layer_frame;
+ if (next_layer_frame != layer_frames.end() &&
+ next_layer_frame->SpatialId() == static_cast<int>(i)) {
+ layer_frame = &*next_layer_frame;
+ ++next_layer_frame;
+ } else {
+ // For layers that are not encoded only the spatial id matters.
+ non_encoded_layer_frame.emplace().S(i);
+ layer_frame = &*non_encoded_layer_frame;
+ }
+ const bool end_of_picture = (next_layer_frame == layer_frames.end());
+
+ aom_enc_frame_flags_t flags =
+ layer_frame->IsKeyframe() ? AOM_EFLAG_FORCE_KF : 0;
+
+ if (SvcEnabled()) {
+ SetSvcLayerId(*layer_frame);
+ SetSvcRefFrameConfig(*layer_frame);
+
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ERROR_RESILIENT_MODE,
+ layer_frame->TemporalId() > 0 ? 1 : 0);
+ }
+
+ // Encode a frame.
+ aom_codec_err_t ret = aom_codec_encode(&ctx_, frame_for_encode_,
+ frame.timestamp(), duration, flags);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encode returned " << ret
+ << " on aom_codec_encode.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (non_encoded_layer_frame) {
+ continue;
+ }
+
+ // Get encoded image data.
+ EncodedImage encoded_image;
+ aom_codec_iter_t iter = nullptr;
+ int data_pkt_count = 0;
+ while (const aom_codec_cx_pkt_t* pkt =
+ aom_codec_get_cx_data(&ctx_, &iter)) {
+ if (pkt->kind == AOM_CODEC_CX_FRAME_PKT && pkt->data.frame.sz > 0) {
+ if (data_pkt_count > 0) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encoder returned more than "
+ "one data packet for an input video frame.";
+ Release();
+ }
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ /*data=*/static_cast<const uint8_t*>(pkt->data.frame.buf),
+ /*size=*/pkt->data.frame.sz));
+
+ if ((pkt->data.frame.flags & AOM_EFLAG_FORCE_KF) != 0) {
+ layer_frame->Keyframe();
+ }
+
+ encoded_image._frameType = layer_frame->IsKeyframe()
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ encoded_image.SetTimestamp(frame.timestamp());
+ encoded_image.capture_time_ms_ = frame.render_time_ms();
+ encoded_image.rotation_ = frame.rotation();
+ encoded_image.content_type_ = VideoContentType::UNSPECIFIED;
+ // If encoded image width/height info are added to aom_codec_cx_pkt_t,
+ // use those values in lieu of the values in frame.
+ if (svc_params_) {
+ int n = svc_params_->scaling_factor_num[layer_frame->SpatialId()];
+ int d = svc_params_->scaling_factor_den[layer_frame->SpatialId()];
+ encoded_image._encodedWidth = cfg_.g_w * n / d;
+ encoded_image._encodedHeight = cfg_.g_h * n / d;
+ encoded_image.SetSpatialIndex(layer_frame->SpatialId());
+ encoded_image.SetTemporalIndex(layer_frame->TemporalId());
+ } else {
+ encoded_image._encodedWidth = cfg_.g_w;
+ encoded_image._encodedHeight = cfg_.g_h;
+ }
+ encoded_image.timing_.flags = VideoSendTiming::kInvalid;
+
+ int qp = -1;
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_GET_LAST_QUANTIZER, &qp);
+ encoded_image.qp_ = qp;
+
+ encoded_image.SetColorSpace(frame.color_space());
+ ++data_pkt_count;
+ }
+ }
+
+ // Deliver encoded image data.
+ if (encoded_image.size() > 0) {
+ CodecSpecificInfo codec_specific_info;
+ codec_specific_info.codecType = kVideoCodecAV1;
+ codec_specific_info.end_of_picture = end_of_picture;
+ bool is_keyframe = layer_frame->IsKeyframe();
+ codec_specific_info.generic_frame_info =
+ svc_controller_->OnEncodeDone(*layer_frame);
+ if (is_keyframe && codec_specific_info.generic_frame_info) {
+ codec_specific_info.template_structure =
+ svc_controller_->DependencyStructure();
+ auto& resolutions = codec_specific_info.template_structure->resolutions;
+ if (SvcEnabled()) {
+ resolutions.resize(svc_params_->number_spatial_layers);
+ for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
+ int n = svc_params_->scaling_factor_num[sid];
+ int d = svc_params_->scaling_factor_den[sid];
+ resolutions[sid] =
+ RenderResolution(cfg_.g_w * n / d, cfg_.g_h * n / d);
+ }
+ } else {
+ resolutions = {RenderResolution(cfg_.g_w, cfg_.g_h)};
+ }
+ }
+ encoded_image_callback_->OnEncodedImage(encoded_image,
+ &codec_specific_info);
+ }
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void LibaomAv1Encoder::SetRates(const RateControlParameters& parameters) {
+ if (!inited_) {
+ RTC_LOG(LS_WARNING) << "SetRates() while encoder is not initialized";
+ return;
+ }
+ if (parameters.framerate_fps < kMinimumFrameRate) {
+ RTC_LOG(LS_WARNING) << "Unsupported framerate (must be >= "
+ << kMinimumFrameRate
+ << " ): " << parameters.framerate_fps;
+ return;
+ }
+ if (parameters.bitrate.get_sum_bps() == 0) {
+ RTC_LOG(LS_WARNING) << "Attempt to set target bit rate to zero";
+ return;
+ }
+
+ // The bitrates caluclated internally in libaom when `AV1E_SET_SVC_PARAMS` is
+ // called depends on the currently configured `rc_target_bitrate`. If the
+ // total target bitrate is not updated first a division by zero could happen.
+ svc_controller_->OnRatesUpdated(parameters.bitrate);
+ cfg_.rc_target_bitrate = parameters.bitrate.get_sum_kbps();
+ aom_codec_err_t error_code = aom_codec_enc_config_set(&ctx_, &cfg_);
+ if (error_code != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Error configuring encoder, error code: "
+ << error_code;
+ }
+
+ if (SvcEnabled()) {
+ for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
+ // libaom bitrate for spatial id S and temporal id T means bitrate
+ // of frames with spatial_id=S and temporal_id<=T
+ // while `parameters.bitrate` provdies bitrate of frames with
+ // spatial_id=S and temporal_id=T
+ int accumulated_bitrate_bps = 0;
+ for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) {
+ int layer_index = sid * svc_params_->number_temporal_layers + tid;
+ accumulated_bitrate_bps += parameters.bitrate.GetBitrate(sid, tid);
+ // `svc_params.layer_target_bitrate` expects bitrate in kbps.
+ svc_params_->layer_target_bitrate[layer_index] =
+ accumulated_bitrate_bps / 1000;
+ }
+ }
+ SetEncoderControlParameters(AV1E_SET_SVC_PARAMS, &*svc_params_);
+ }
+
+ rates_configured_ = true;
+
+ // Set frame rate to closest integer value.
+ encoder_settings_.maxFramerate =
+ static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+}
+
+VideoEncoder::EncoderInfo LibaomAv1Encoder::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "libaom";
+ info.has_trusted_rate_controller = true;
+ info.is_hardware_accelerated = false;
+ info.scaling_settings = VideoEncoder::ScalingSettings(kMinQindex, kMaxQindex);
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+ if (SvcEnabled()) {
+ for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
+ info.fps_allocation[sid].resize(svc_params_->number_temporal_layers);
+ for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) {
+ info.fps_allocation[sid][tid] =
+ encoder_settings_.maxFramerate / svc_params_->framerate_factor[tid];
+ }
+ }
+ }
+ return info;
+}
+
+} // namespace
+
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder() {
+ return std::make_unique<LibaomAv1Encoder>(absl::nullopt);
+}
+
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder(
+ const LibaomAv1EncoderAuxConfig& aux_config) {
+ return std::make_unique<LibaomAv1Encoder>(aux_config);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h
new file mode 100644
index 0000000000..2fd1d5a754
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
+
+#include <map>
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/video_codecs/video_encoder.h"
+
+namespace webrtc {
+struct LibaomAv1EncoderAuxConfig {
+ // A map of max pixel count --> cpu speed.
+ std::map<int, int> max_pixel_count_to_cpu_speed;
+};
+
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder();
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder(
+ const LibaomAv1EncoderAuxConfig& aux_config);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
new file mode 100644
index 0000000000..5243edc1e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+VideoCodec DefaultCodecSettings() {
+ VideoCodec codec_settings;
+ codec_settings.width = 320;
+ codec_settings.height = 180;
+ codec_settings.maxFramerate = 30;
+ codec_settings.maxBitrate = 1000;
+ codec_settings.qpMax = 63;
+ return codec_settings;
+}
+
+VideoEncoder::Settings DefaultEncoderSettings() {
+ return VideoEncoder::Settings(
+ VideoEncoder::Capabilities(/*loss_notification=*/false),
+ /*number_of_cores=*/1, /*max_payload_size=*/1200);
+}
+
+TEST(LibaomAv1EncoderTest, CanCreate) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ EXPECT_TRUE(encoder);
+}
+
+TEST(LibaomAv1EncoderTest, InitAndRelease) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ ASSERT_TRUE(encoder);
+ VideoCodec codec_settings = DefaultCodecSettings();
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ EXPECT_EQ(encoder->Release(), WEBRTC_VIDEO_CODEC_OK);
+}
+
+TEST(LibaomAv1EncoderTest, NoBitrateOnTopLayerRefecltedInActiveDecodeTargets) {
+ // Configure encoder with 2 temporal layers.
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL1T2);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/0, 300'000);
+ rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/1, 0);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(1));
+ ASSERT_NE(encoded_frames[0].codec_specific_info.generic_frame_info,
+ absl::nullopt);
+ // Assuming L1T2 structure uses 1st decode target for T0 and 2nd decode target
+ // for T0+T1 frames, expect only 1st decode target is active.
+ EXPECT_EQ(encoded_frames[0]
+ .codec_specific_info.generic_frame_info->active_decode_targets,
+ 0b01);
+}
+
+TEST(LibaomAv1EncoderTest,
+ SpatialScalabilityInTemporalUnitReportedAsDeltaFrame) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, 0, 300'000);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(2));
+ EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameKey));
+ EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(LibaomAv1EncoderTest, NoBitrateOnTopSpatialLayerProduceDeltaFrames) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, 0, 0);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(2));
+ EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameKey));
+ EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(LibaomAv1EncoderTest, SetsEndOfPictureForLastFrameInTemporalUnit) {
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 30000);
+ allocation.SetBitrate(1, 0, 40000);
+ allocation.SetBitrate(2, 0, 30000);
+
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ // Configure encoder with 3 spatial layers.
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(6));
+ EXPECT_FALSE(encoded_frames[0].codec_specific_info.end_of_picture);
+ EXPECT_FALSE(encoded_frames[1].codec_specific_info.end_of_picture);
+ EXPECT_TRUE(encoded_frames[2].codec_specific_info.end_of_picture);
+ EXPECT_FALSE(encoded_frames[3].codec_specific_info.end_of_picture);
+ EXPECT_FALSE(encoded_frames[4].codec_specific_info.end_of_picture);
+ EXPECT_TRUE(encoded_frames[5].codec_specific_info.end_of_picture);
+}
+
+TEST(LibaomAv1EncoderTest, CheckOddDimensionsWithSpatialLayers) {
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 30000);
+ allocation.SetBitrate(1, 0, 40000);
+ allocation.SetBitrate(2, 0, 30000);
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ // Configure encoder with 3 spatial layers.
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
+ // Odd width and height values should not make encoder crash.
+ codec_settings.width = 623;
+ codec_settings.height = 405;
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ EncodedVideoFrameProducer evfp(*encoder);
+ evfp.SetResolution(RenderResolution{623, 405});
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ evfp.SetNumInputFrames(2).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(6));
+}
+
+TEST(LibaomAv1EncoderTest, EncoderInfoProvidesFpsAllocation) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T3);
+ codec_settings.maxFramerate = 60;
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ const auto& encoder_info = encoder->GetEncoderInfo();
+ EXPECT_THAT(encoder_info.fps_allocation[0], ElementsAre(15, 30, 60));
+ EXPECT_THAT(encoder_info.fps_allocation[1], ElementsAre(15, 30, 60));
+ EXPECT_THAT(encoder_info.fps_allocation[2], ElementsAre(15, 30, 60));
+ EXPECT_THAT(encoder_info.fps_allocation[3], IsEmpty());
+}
+
+TEST(LibaomAv1EncoderTest, PopulatesEncodedFrameSize) {
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 30000);
+ allocation.SetBitrate(1, 0, 40000);
+ allocation.SetBitrate(2, 0, 30000);
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ ASSERT_GT(codec_settings.width, 4);
+ // Configure encoder with 3 spatial layers.
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ using Frame = EncodedVideoFrameProducer::EncodedFrame;
+ std::vector<Frame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
+ EXPECT_THAT(
+ encoded_frames,
+ ElementsAre(
+ Field(&Frame::encoded_image,
+ AllOf(Field(&EncodedImage::_encodedWidth,
+ codec_settings.width / 4),
+ Field(&EncodedImage::_encodedHeight,
+ codec_settings.height / 4))),
+ Field(&Frame::encoded_image,
+ AllOf(Field(&EncodedImage::_encodedWidth,
+ codec_settings.width / 2),
+ Field(&EncodedImage::_encodedHeight,
+ codec_settings.height / 2))),
+ Field(&Frame::encoded_image,
+ AllOf(Field(&EncodedImage::_encodedWidth, codec_settings.width),
+ Field(&EncodedImage::_encodedHeight,
+ codec_settings.height)))));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
new file mode 100644
index 0000000000..5d9c251bc7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <ostream>
+#include <tuple>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/av1/libaom_av1_decoder.h"
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ContainerEq;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::Ge;
+using ::testing::IsEmpty;
+using ::testing::Not;
+using ::testing::NotNull;
+using ::testing::Pointwise;
+using ::testing::SizeIs;
+using ::testing::Truly;
+using ::testing::Values;
+
+// Use small resolution for this test to make it faster.
+constexpr int kWidth = 320;
+constexpr int kHeight = 180;
+constexpr int kFramerate = 30;
+
+VideoCodec DefaultCodecSettings() {
+ VideoCodec codec_settings;
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ codec_settings.width = kWidth;
+ codec_settings.height = kHeight;
+ codec_settings.maxFramerate = kFramerate;
+ codec_settings.maxBitrate = 1000;
+ codec_settings.qpMax = 63;
+ return codec_settings;
+}
+VideoEncoder::Settings DefaultEncoderSettings() {
+ return VideoEncoder::Settings(
+ VideoEncoder::Capabilities(/*loss_notification=*/false),
+ /*number_of_cores=*/1, /*max_payload_size=*/1200);
+}
+
+class TestAv1Decoder {
+ public:
+ explicit TestAv1Decoder(int decoder_id)
+ : decoder_id_(decoder_id), decoder_(CreateLibaomAv1Decoder()) {
+ if (decoder_ == nullptr) {
+ ADD_FAILURE() << "Failed to create a decoder#" << decoder_id_;
+ return;
+ }
+ EXPECT_TRUE(decoder_->Configure({}));
+ EXPECT_EQ(decoder_->RegisterDecodeCompleteCallback(&callback_),
+ WEBRTC_VIDEO_CODEC_OK);
+ }
+ // This class requires pointer stability and thus not copyable nor movable.
+ TestAv1Decoder(const TestAv1Decoder&) = delete;
+ TestAv1Decoder& operator=(const TestAv1Decoder&) = delete;
+
+ void Decode(int64_t frame_id, const EncodedImage& image) {
+ ASSERT_THAT(decoder_, NotNull());
+ int32_t error = decoder_->Decode(image, /*missing_frames=*/false,
+ /*render_time_ms=*/image.capture_time_ms_);
+ if (error != WEBRTC_VIDEO_CODEC_OK) {
+ ADD_FAILURE() << "Failed to decode frame id " << frame_id
+ << " with error code " << error << " by decoder#"
+ << decoder_id_;
+ return;
+ }
+ decoded_ids_.push_back(frame_id);
+ }
+
+ const std::vector<int64_t>& decoded_frame_ids() const { return decoded_ids_; }
+ size_t num_output_frames() const { return callback_.num_called(); }
+
+ private:
+ // Decoder callback that only counts how many times it was called.
+ // While it is tempting to replace it with a simple mock, that one requires
+ // to set expectation on number of calls in advance. Tests below unsure about
+ // expected number of calls until after calls are done.
+ class DecoderCallback : public DecodedImageCallback {
+ public:
+ size_t num_called() const { return num_called_; }
+
+ private:
+ int32_t Decoded(VideoFrame& /*decoded_image*/) override {
+ ++num_called_;
+ return 0;
+ }
+ void Decoded(VideoFrame& /*decoded_image*/,
+ absl::optional<int32_t> /*decode_time_ms*/,
+ absl::optional<uint8_t> /*qp*/) override {
+ ++num_called_;
+ }
+
+ int num_called_ = 0;
+ };
+
+ const int decoder_id_;
+ std::vector<int64_t> decoded_ids_;
+ DecoderCallback callback_;
+ const std::unique_ptr<VideoDecoder> decoder_;
+};
+
+TEST(LibaomAv1Test, EncodeDecode) {
+ TestAv1Decoder decoder(0);
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 300000);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(4).Encode();
+ for (size_t frame_id = 0; frame_id < encoded_frames.size(); ++frame_id) {
+ decoder.Decode(static_cast<int64_t>(frame_id),
+ encoded_frames[frame_id].encoded_image);
+ }
+
+ // Check encoder produced some frames for decoder to decode.
+ ASSERT_THAT(encoded_frames, Not(IsEmpty()));
+ // Check decoder found all of them valid.
+ EXPECT_THAT(decoder.decoded_frame_ids(), SizeIs(encoded_frames.size()));
+ // Check each of them produced an output frame.
+ EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size());
+}
+
+struct LayerId {
+ friend bool operator==(const LayerId& lhs, const LayerId& rhs) {
+ return std::tie(lhs.spatial_id, lhs.temporal_id) ==
+ std::tie(rhs.spatial_id, rhs.temporal_id);
+ }
+ friend bool operator<(const LayerId& lhs, const LayerId& rhs) {
+ return std::tie(lhs.spatial_id, lhs.temporal_id) <
+ std::tie(rhs.spatial_id, rhs.temporal_id);
+ }
+ friend std::ostream& operator<<(std::ostream& s, const LayerId& layer) {
+ return s << "S" << layer.spatial_id << "T" << layer.temporal_id;
+ }
+
+ int spatial_id = 0;
+ int temporal_id = 0;
+};
+
+struct SvcTestParam {
+ ScalabilityMode GetScalabilityMode() const {
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(name);
+ RTC_CHECK(scalability_mode.has_value());
+ return *scalability_mode;
+ }
+
+ std::string name;
+ int num_frames_to_generate;
+ std::map<LayerId, DataRate> configured_bitrates;
+};
+
+class LibaomAv1SvcTest : public ::testing::TestWithParam<SvcTestParam> {};
+
+TEST_P(LibaomAv1SvcTest, EncodeAndDecodeAllDecodeTargets) {
+ const SvcTestParam param = GetParam();
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(param.GetScalabilityMode());
+ ASSERT_TRUE(svc_controller);
+ VideoBitrateAllocation allocation;
+ if (param.configured_bitrates.empty()) {
+ ScalableVideoController::StreamLayersConfig config =
+ svc_controller->StreamConfig();
+ for (int sid = 0; sid < config.num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < config.num_temporal_layers; ++tid) {
+ allocation.SetBitrate(sid, tid, 100'000);
+ }
+ }
+ } else {
+ for (const auto& kv : param.configured_bitrates) {
+ allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id,
+ kv.second.bps());
+ }
+ }
+
+ size_t num_decode_targets =
+ svc_controller->DependencyStructure().num_decode_targets;
+
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(GetParam().GetScalabilityMode());
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(GetParam().num_frames_to_generate)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(
+ encoded_frames,
+ Each(Truly([&](const EncodedVideoFrameProducer::EncodedFrame& frame) {
+ return frame.codec_specific_info.generic_frame_info &&
+ frame.codec_specific_info.generic_frame_info
+ ->decode_target_indications.size() == num_decode_targets;
+ })));
+
+ for (size_t dt = 0; dt < num_decode_targets; ++dt) {
+ TestAv1Decoder decoder(dt);
+ std::vector<int64_t> requested_ids;
+ for (int64_t frame_id = 0;
+ frame_id < static_cast<int64_t>(encoded_frames.size()); ++frame_id) {
+ const EncodedVideoFrameProducer::EncodedFrame& frame =
+ encoded_frames[frame_id];
+ if (frame.codec_specific_info.generic_frame_info
+ ->decode_target_indications[dt] !=
+ DecodeTargetIndication::kNotPresent) {
+ requested_ids.push_back(frame_id);
+ decoder.Decode(frame_id, frame.encoded_image);
+ }
+ }
+
+ ASSERT_THAT(requested_ids, SizeIs(Ge(2u)));
+ // Check decoder found all of them valid.
+ EXPECT_THAT(decoder.decoded_frame_ids(), ContainerEq(requested_ids))
+ << "Decoder#" << dt;
+ // Check each of them produced an output frame.
+ EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size())
+ << "Decoder#" << dt;
+ }
+}
+
+MATCHER(SameLayerIdAndBitrateIsNear, "") {
+ // First check if layer id is the same.
+ return std::get<0>(arg).first == std::get<1>(arg).first &&
+ // check measured bitrate is not much lower than requested.
+ std::get<0>(arg).second >= std::get<1>(arg).second * 0.8 &&
+ // check measured bitrate is not much larger than requested.
+ std::get<0>(arg).second <= std::get<1>(arg).second * 1.1;
+}
+
+TEST_P(LibaomAv1SvcTest, SetRatesMatchMeasuredBitrate) {
+ const SvcTestParam param = GetParam();
+ if (param.configured_bitrates.empty()) {
+ // Rates are not configured for this particular structure, skip the test.
+ return;
+ }
+ constexpr TimeDelta kDuration = TimeDelta::Seconds(5);
+
+ VideoBitrateAllocation allocation;
+ for (const auto& kv : param.configured_bitrates) {
+ allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id,
+ kv.second.bps());
+ }
+
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ ASSERT_TRUE(encoder);
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(param.GetScalabilityMode());
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ codec_settings.maxFramerate = 30;
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(codec_settings.maxFramerate * kDuration.seconds())
+ .SetResolution({codec_settings.width, codec_settings.height})
+ .SetFramerateFps(codec_settings.maxFramerate)
+ .Encode();
+
+ // Calculate size of each layer.
+ std::map<LayerId, DataSize> layer_size;
+ for (const auto& frame : encoded_frames) {
+ ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
+ const auto& layer = *frame.codec_specific_info.generic_frame_info;
+ LayerId layer_id = {layer.spatial_id, layer.temporal_id};
+ // This is almost same as
+ // layer_size[layer_id] += DataSize::Bytes(frame.encoded_image.size());
+ // but avoids calling deleted default constructor for DataSize.
+ layer_size.emplace(layer_id, DataSize::Zero()).first->second +=
+ DataSize::Bytes(frame.encoded_image.size());
+ }
+ // Convert size of the layer into bitrate of that layer.
+ std::vector<std::pair<LayerId, DataRate>> measured_bitrates;
+ for (const auto& kv : layer_size) {
+ measured_bitrates.emplace_back(kv.first, kv.second / kDuration);
+ }
+ EXPECT_THAT(measured_bitrates, Pointwise(SameLayerIdAndBitrateIsNear(),
+ param.configured_bitrates));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ Svc,
+ LibaomAv1SvcTest,
+ Values(SvcTestParam{"L1T1", /*num_frames_to_generate=*/4},
+ SvcTestParam{"L1T2",
+ /*num_frames_to_generate=*/4,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(60)},
+ {{0, 1}, DataRate::KilobitsPerSec(40)}}},
+ SvcTestParam{"L1T3", /*num_frames_to_generate=*/8},
+ SvcTestParam{"L2T1",
+ /*num_frames_to_generate=*/3,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(30)},
+ {{1, 0}, DataRate::KilobitsPerSec(70)}}},
+ SvcTestParam{"L2T1h",
+ /*num_frames_to_generate=*/3,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(30)},
+ {{1, 0}, DataRate::KilobitsPerSec(70)}}},
+ SvcTestParam{"L2T1_KEY", /*num_frames_to_generate=*/3},
+ SvcTestParam{"L3T1", /*num_frames_to_generate=*/3},
+ SvcTestParam{"L3T3", /*num_frames_to_generate=*/8},
+ SvcTestParam{"S2T1", /*num_frames_to_generate=*/3},
+ SvcTestParam{"S3T3", /*num_frames_to_generate=*/8},
+ SvcTestParam{"L2T2", /*num_frames_to_generate=*/4},
+ SvcTestParam{"L2T2_KEY", /*num_frames_to_generate=*/4},
+ SvcTestParam{"L2T2_KEY_SHIFT",
+ /*num_frames_to_generate=*/4,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(70)},
+ {{0, 1}, DataRate::KilobitsPerSec(30)},
+ {{1, 0}, DataRate::KilobitsPerSec(110)},
+ {{1, 1}, DataRate::KilobitsPerSec(80)}}}),
+ [](const testing::TestParamInfo<SvcTestParam>& info) {
+ return info.param.name;
+ });
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS b/third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS
new file mode 100644
index 0000000000..4e110917d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "+third_party/ffmpeg",
+ "+third_party/openh264",
+ "+media/base",
+]
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS b/third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS
new file mode 100644
index 0000000000..4b06c4e32b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS
@@ -0,0 +1,2 @@
+sprang@webrtc.org
+ssilkin@webrtc.org
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc
new file mode 100644
index 0000000000..23580d7a4a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "modules/video_coding/codecs/h264/include/h264.h"
+
+#include <memory>
+#include <string>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "rtc_base/trace_event.h"
+
+#if defined(WEBRTC_USE_H264)
+#include "modules/video_coding/codecs/h264/h264_decoder_impl.h"
+#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
+#endif
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+#if defined(WEBRTC_USE_H264)
+bool g_rtc_use_h264 = true;
+#endif
+
+// If H.264 OpenH264/FFmpeg codec is supported.
+bool IsH264CodecSupported() {
+#if defined(WEBRTC_USE_H264)
+ return g_rtc_use_h264;
+#else
+ return false;
+#endif
+}
+
+constexpr ScalabilityMode kSupportedScalabilityModes[] = {
+ ScalabilityMode::kL1T1, ScalabilityMode::kL1T2, ScalabilityMode::kL1T3};
+
+} // namespace
+
+SdpVideoFormat CreateH264Format(H264Profile profile,
+ H264Level level,
+ const std::string& packetization_mode,
+ bool add_scalability_modes) {
+ const absl::optional<std::string> profile_string =
+ H264ProfileLevelIdToString(H264ProfileLevelId(profile, level));
+ RTC_CHECK(profile_string);
+ absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
+ if (add_scalability_modes) {
+ for (const auto scalability_mode : kSupportedScalabilityModes) {
+ scalability_modes.push_back(scalability_mode);
+ }
+ }
+ return SdpVideoFormat(
+ cricket::kH264CodecName,
+ {{cricket::kH264FmtpProfileLevelId, *profile_string},
+ {cricket::kH264FmtpLevelAsymmetryAllowed, "1"},
+ {cricket::kH264FmtpPacketizationMode, packetization_mode}},
+ scalability_modes);
+}
+
+void DisableRtcUseH264() {
+#if defined(WEBRTC_USE_H264)
+ g_rtc_use_h264 = false;
+#endif
+}
+
+std::vector<SdpVideoFormat> SupportedH264Codecs(bool add_scalability_modes) {
+ TRACE_EVENT0("webrtc", __func__);
+ if (!IsH264CodecSupported())
+ return std::vector<SdpVideoFormat>();
+ // We only support encoding Constrained Baseline Profile (CBP), but the
+ // decoder supports more profiles. We can list all profiles here that are
+ // supported by the decoder and that are also supersets of CBP, i.e. the
+ // decoder for that profile is required to be able to decode CBP. This means
+ // we can encode and send CBP even though we negotiated a potentially
+ // higher profile. See the H264 spec for more information.
+ //
+ // We support both packetization modes 0 (mandatory) and 1 (optional,
+ // preferred).
+ return {CreateH264Format(H264Profile::kProfileBaseline, H264Level::kLevel3_1,
+ "1", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileBaseline, H264Level::kLevel3_1,
+ "0", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileConstrainedBaseline,
+ H264Level::kLevel3_1, "1", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileConstrainedBaseline,
+ H264Level::kLevel3_1, "0", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileMain, H264Level::kLevel3_1, "1",
+ add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileMain, H264Level::kLevel3_1, "0",
+ add_scalability_modes)};
+}
+
+std::vector<SdpVideoFormat> SupportedH264DecoderCodecs() {
+ TRACE_EVENT0("webrtc", __func__);
+ if (!IsH264CodecSupported())
+ return std::vector<SdpVideoFormat>();
+
+ std::vector<SdpVideoFormat> supportedCodecs = SupportedH264Codecs();
+
+ // OpenH264 doesn't yet support High Predictive 4:4:4 encoding but it does
+ // support decoding.
+ supportedCodecs.push_back(CreateH264Format(
+ H264Profile::kProfilePredictiveHigh444, H264Level::kLevel3_1, "1"));
+ supportedCodecs.push_back(CreateH264Format(
+ H264Profile::kProfilePredictiveHigh444, H264Level::kLevel3_1, "0"));
+
+ return supportedCodecs;
+}
+
+std::unique_ptr<H264Encoder> H264Encoder::Create(
+ const cricket::VideoCodec& codec) {
+ RTC_DCHECK(H264Encoder::IsSupported());
+#if defined(WEBRTC_USE_H264)
+ RTC_CHECK(g_rtc_use_h264);
+ RTC_LOG(LS_INFO) << "Creating H264EncoderImpl.";
+ return std::make_unique<H264EncoderImpl>(codec);
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Encoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+bool H264Encoder::SupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ for (const auto& entry : kSupportedScalabilityModes) {
+ if (entry == scalability_mode) {
+ return true;
+ }
+ }
+ return false;
+}
+
+std::unique_ptr<H264Decoder> H264Decoder::Create() {
+ RTC_DCHECK(H264Decoder::IsSupported());
+#if defined(WEBRTC_USE_H264)
+ RTC_CHECK(g_rtc_use_h264);
+ RTC_LOG(LS_INFO) << "Creating H264DecoderImpl.";
+ return std::make_unique<H264DecoderImpl>();
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Decoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc
new file mode 100644
index 0000000000..59921263e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Everything declared/defined in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#include "modules/video_coding/codecs/h264/h264_color_space.h"
+
+namespace webrtc {
+
+ColorSpace ExtractH264ColorSpace(AVCodecContext* codec) {
+ ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified;
+ switch (codec->color_primaries) {
+ case AVCOL_PRI_BT709:
+ primaries = ColorSpace::PrimaryID::kBT709;
+ break;
+ case AVCOL_PRI_BT470M:
+ primaries = ColorSpace::PrimaryID::kBT470M;
+ break;
+ case AVCOL_PRI_BT470BG:
+ primaries = ColorSpace::PrimaryID::kBT470BG;
+ break;
+ case AVCOL_PRI_SMPTE170M:
+ primaries = ColorSpace::PrimaryID::kSMPTE170M;
+ break;
+ case AVCOL_PRI_SMPTE240M:
+ primaries = ColorSpace::PrimaryID::kSMPTE240M;
+ break;
+ case AVCOL_PRI_FILM:
+ primaries = ColorSpace::PrimaryID::kFILM;
+ break;
+ case AVCOL_PRI_BT2020:
+ primaries = ColorSpace::PrimaryID::kBT2020;
+ break;
+ case AVCOL_PRI_SMPTE428:
+ primaries = ColorSpace::PrimaryID::kSMPTEST428;
+ break;
+ case AVCOL_PRI_SMPTE431:
+ primaries = ColorSpace::PrimaryID::kSMPTEST431;
+ break;
+ case AVCOL_PRI_SMPTE432:
+ primaries = ColorSpace::PrimaryID::kSMPTEST432;
+ break;
+ case AVCOL_PRI_JEDEC_P22:
+ primaries = ColorSpace::PrimaryID::kJEDECP22;
+ break;
+ case AVCOL_PRI_RESERVED0:
+ case AVCOL_PRI_UNSPECIFIED:
+ case AVCOL_PRI_RESERVED:
+ default:
+ break;
+ }
+
+ ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified;
+ switch (codec->color_trc) {
+ case AVCOL_TRC_BT709:
+ transfer = ColorSpace::TransferID::kBT709;
+ break;
+ case AVCOL_TRC_GAMMA22:
+ transfer = ColorSpace::TransferID::kGAMMA22;
+ break;
+ case AVCOL_TRC_GAMMA28:
+ transfer = ColorSpace::TransferID::kGAMMA28;
+ break;
+ case AVCOL_TRC_SMPTE170M:
+ transfer = ColorSpace::TransferID::kSMPTE170M;
+ break;
+ case AVCOL_TRC_SMPTE240M:
+ transfer = ColorSpace::TransferID::kSMPTE240M;
+ break;
+ case AVCOL_TRC_LINEAR:
+ transfer = ColorSpace::TransferID::kLINEAR;
+ break;
+ case AVCOL_TRC_LOG:
+ transfer = ColorSpace::TransferID::kLOG;
+ break;
+ case AVCOL_TRC_LOG_SQRT:
+ transfer = ColorSpace::TransferID::kLOG_SQRT;
+ break;
+ case AVCOL_TRC_IEC61966_2_4:
+ transfer = ColorSpace::TransferID::kIEC61966_2_4;
+ break;
+ case AVCOL_TRC_BT1361_ECG:
+ transfer = ColorSpace::TransferID::kBT1361_ECG;
+ break;
+ case AVCOL_TRC_IEC61966_2_1:
+ transfer = ColorSpace::TransferID::kIEC61966_2_1;
+ break;
+ case AVCOL_TRC_BT2020_10:
+ transfer = ColorSpace::TransferID::kBT2020_10;
+ break;
+ case AVCOL_TRC_BT2020_12:
+ transfer = ColorSpace::TransferID::kBT2020_12;
+ break;
+ case AVCOL_TRC_SMPTE2084:
+ transfer = ColorSpace::TransferID::kSMPTEST2084;
+ break;
+ case AVCOL_TRC_SMPTE428:
+ transfer = ColorSpace::TransferID::kSMPTEST428;
+ break;
+ case AVCOL_TRC_ARIB_STD_B67:
+ transfer = ColorSpace::TransferID::kARIB_STD_B67;
+ break;
+ case AVCOL_TRC_RESERVED0:
+ case AVCOL_TRC_UNSPECIFIED:
+ case AVCOL_TRC_RESERVED:
+ default:
+ break;
+ }
+
+ ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified;
+ switch (codec->colorspace) {
+ case AVCOL_SPC_RGB:
+ matrix = ColorSpace::MatrixID::kRGB;
+ break;
+ case AVCOL_SPC_BT709:
+ matrix = ColorSpace::MatrixID::kBT709;
+ break;
+ case AVCOL_SPC_FCC:
+ matrix = ColorSpace::MatrixID::kFCC;
+ break;
+ case AVCOL_SPC_BT470BG:
+ matrix = ColorSpace::MatrixID::kBT470BG;
+ break;
+ case AVCOL_SPC_SMPTE170M:
+ matrix = ColorSpace::MatrixID::kSMPTE170M;
+ break;
+ case AVCOL_SPC_SMPTE240M:
+ matrix = ColorSpace::MatrixID::kSMPTE240M;
+ break;
+ case AVCOL_SPC_YCGCO:
+ matrix = ColorSpace::MatrixID::kYCOCG;
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ matrix = ColorSpace::MatrixID::kBT2020_NCL;
+ break;
+ case AVCOL_SPC_BT2020_CL:
+ matrix = ColorSpace::MatrixID::kBT2020_CL;
+ break;
+ case AVCOL_SPC_SMPTE2085:
+ matrix = ColorSpace::MatrixID::kSMPTE2085;
+ break;
+ case AVCOL_SPC_CHROMA_DERIVED_NCL:
+ case AVCOL_SPC_CHROMA_DERIVED_CL:
+ case AVCOL_SPC_ICTCP:
+ case AVCOL_SPC_UNSPECIFIED:
+ case AVCOL_SPC_RESERVED:
+ default:
+ break;
+ }
+
+ ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid;
+ switch (codec->color_range) {
+ case AVCOL_RANGE_MPEG:
+ range = ColorSpace::RangeID::kLimited;
+ break;
+ case AVCOL_RANGE_JPEG:
+ range = ColorSpace::RangeID::kFull;
+ break;
+ case AVCOL_RANGE_UNSPECIFIED:
+ default:
+ break;
+ }
+ return ColorSpace(primaries, transfer, matrix, range);
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h
new file mode 100644
index 0000000000..392ccaf563
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_H264_COLOR_SPACE_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_H264_COLOR_SPACE_H_
+
+// Everything declared in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#if defined(WEBRTC_WIN) && !defined(__clang__)
+#error "See: bugs.webrtc.org/9213#c13."
+#endif
+
+#include "api/video/color_space.h"
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+} // extern "C"
+
+namespace webrtc {
+
+// Helper class for extracting color space information from H264 stream.
+ColorSpace ExtractH264ColorSpace(AVCodecContext* codec);
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_H264_COLOR_SPACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
new file mode 100644
index 0000000000..e654e1835b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+// Everything declared/defined in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#include "modules/video_coding/codecs/h264/h264_decoder_impl.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+#include "third_party/ffmpeg/libavformat/avformat.h"
+#include "third_party/ffmpeg/libavutil/imgutils.h"
+} // extern "C"
+
+#include "api/video/color_space.h"
+#include "api/video/i010_buffer.h"
+#include "api/video/i420_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "modules/video_coding/codecs/h264/h264_color_space.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr std::array<AVPixelFormat, 8> kPixelFormatsSupported = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV422P10LE};
+const size_t kYPlaneIndex = 0;
+const size_t kUPlaneIndex = 1;
+const size_t kVPlaneIndex = 2;
+
+// Used by histograms. Values of entries should not be changed.
+enum H264DecoderImplEvent {
+ kH264DecoderEventInit = 0,
+ kH264DecoderEventError = 1,
+ kH264DecoderEventMax = 16,
+};
+
+struct ScopedPtrAVFreePacket {
+ void operator()(AVPacket* packet) { av_packet_free(&packet); }
+};
+typedef std::unique_ptr<AVPacket, ScopedPtrAVFreePacket> ScopedAVPacket;
+
+ScopedAVPacket MakeScopedAVPacket() {
+ ScopedAVPacket packet(av_packet_alloc());
+ return packet;
+}
+
+} // namespace
+
+int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
+ AVFrame* av_frame,
+ int flags) {
+ // Set in `Configure`.
+ H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
+ // DCHECK values set in `Configure`.
+ RTC_DCHECK(decoder);
+ // Necessary capability to be allowed to provide our own buffers.
+ RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
+
+ auto pixelFormatSupported = std::find_if(
+ kPixelFormatsSupported.begin(), kPixelFormatsSupported.end(),
+ [context](AVPixelFormat format) { return context->pix_fmt == format; });
+
+ RTC_CHECK(pixelFormatSupported != kPixelFormatsSupported.end());
+
+ // `av_frame->width` and `av_frame->height` are set by FFmpeg. These are the
+ // actual image's dimensions and may be different from `context->width` and
+ // `context->coded_width` due to reordering.
+ int width = av_frame->width;
+ int height = av_frame->height;
+ // See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
+ // has implications on which resolutions are valid, but we don't use it.
+ RTC_CHECK_EQ(context->lowres, 0);
+ // Adjust the `width` and `height` to values acceptable by the decoder.
+ // Without this, FFmpeg may overflow the buffer. If modified, `width` and/or
+ // `height` are larger than the actual image and the image has to be cropped
+ // (top-left corner) after decoding to avoid visible borders to the right and
+ // bottom of the actual image.
+ avcodec_align_dimensions(context, &width, &height);
+
+ RTC_CHECK_GE(width, 0);
+ RTC_CHECK_GE(height, 0);
+ int ret = av_image_check_size(static_cast<unsigned int>(width),
+ static_cast<unsigned int>(height), 0, nullptr);
+ if (ret < 0) {
+ RTC_LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
+ decoder->ReportError();
+ return ret;
+ }
+
+ // The video frame is stored in `frame_buffer`. `av_frame` is FFmpeg's version
+ // of a video frame and will be set up to reference `frame_buffer`'s data.
+
+ // FFmpeg expects the initial allocation to be zero-initialized according to
+ // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
+ // TODO(https://crbug.com/390941): Delete that feature from the video pool,
+ // instead add an explicit call to InitializeData here.
+ rtc::scoped_refptr<PlanarYuvBuffer> frame_buffer;
+ rtc::scoped_refptr<I444Buffer> i444_buffer;
+ rtc::scoped_refptr<I420Buffer> i420_buffer;
+ rtc::scoped_refptr<I422Buffer> i422_buffer;
+ rtc::scoped_refptr<I010Buffer> i010_buffer;
+ rtc::scoped_refptr<I210Buffer> i210_buffer;
+ int bytes_per_pixel = 1;
+ switch (context->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUVJ420P:
+ i420_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI420Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] = i420_buffer->MutableDataY();
+ av_frame->linesize[kYPlaneIndex] = i420_buffer->StrideY();
+ av_frame->data[kUPlaneIndex] = i420_buffer->MutableDataU();
+ av_frame->linesize[kUPlaneIndex] = i420_buffer->StrideU();
+ av_frame->data[kVPlaneIndex] = i420_buffer->MutableDataV();
+ av_frame->linesize[kVPlaneIndex] = i420_buffer->StrideV();
+ RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
+ frame_buffer = i420_buffer;
+ break;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUVJ444P:
+ i444_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI444Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] = i444_buffer->MutableDataY();
+ av_frame->linesize[kYPlaneIndex] = i444_buffer->StrideY();
+ av_frame->data[kUPlaneIndex] = i444_buffer->MutableDataU();
+ av_frame->linesize[kUPlaneIndex] = i444_buffer->StrideU();
+ av_frame->data[kVPlaneIndex] = i444_buffer->MutableDataV();
+ av_frame->linesize[kVPlaneIndex] = i444_buffer->StrideV();
+ frame_buffer = i444_buffer;
+ break;
+ case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUVJ422P:
+ i422_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI422Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] = i422_buffer->MutableDataY();
+ av_frame->linesize[kYPlaneIndex] = i422_buffer->StrideY();
+ av_frame->data[kUPlaneIndex] = i422_buffer->MutableDataU();
+ av_frame->linesize[kUPlaneIndex] = i422_buffer->StrideU();
+ av_frame->data[kVPlaneIndex] = i422_buffer->MutableDataV();
+ av_frame->linesize[kVPlaneIndex] = i422_buffer->StrideV();
+ frame_buffer = i422_buffer;
+ break;
+ case AV_PIX_FMT_YUV420P10LE:
+ i010_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI010Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i010_buffer->MutableDataY());
+ av_frame->linesize[kYPlaneIndex] = i010_buffer->StrideY() * 2;
+ av_frame->data[kUPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i010_buffer->MutableDataU());
+ av_frame->linesize[kUPlaneIndex] = i010_buffer->StrideU() * 2;
+ av_frame->data[kVPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i010_buffer->MutableDataV());
+ av_frame->linesize[kVPlaneIndex] = i010_buffer->StrideV() * 2;
+ frame_buffer = i010_buffer;
+ bytes_per_pixel = 2;
+ break;
+ case AV_PIX_FMT_YUV422P10LE:
+ i210_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI210Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i210_buffer->MutableDataY());
+ av_frame->linesize[kYPlaneIndex] = i210_buffer->StrideY() * 2;
+ av_frame->data[kUPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i210_buffer->MutableDataU());
+ av_frame->linesize[kUPlaneIndex] = i210_buffer->StrideU() * 2;
+ av_frame->data[kVPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i210_buffer->MutableDataV());
+ av_frame->linesize[kVPlaneIndex] = i210_buffer->StrideV() * 2;
+ frame_buffer = i210_buffer;
+ bytes_per_pixel = 2;
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "Unsupported buffer type " << context->pix_fmt
+ << ". Check supported supported pixel formats!";
+ decoder->ReportError();
+ return -1;
+ }
+
+ int y_size = width * height * bytes_per_pixel;
+ int uv_size = frame_buffer->ChromaWidth() * frame_buffer->ChromaHeight() *
+ bytes_per_pixel;
+ // DCHECK that we have a continuous buffer as is required.
+ RTC_DCHECK_EQ(av_frame->data[kUPlaneIndex],
+ av_frame->data[kYPlaneIndex] + y_size);
+ RTC_DCHECK_EQ(av_frame->data[kVPlaneIndex],
+ av_frame->data[kUPlaneIndex] + uv_size);
+ int total_size = y_size + 2 * uv_size;
+
+ av_frame->format = context->pix_fmt;
+ av_frame->reordered_opaque = context->reordered_opaque;
+
+ // Create a VideoFrame object, to keep a reference to the buffer.
+ // TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
+ // Refactor to do not use a VideoFrame object at all.
+ av_frame->buf[0] = av_buffer_create(
+ av_frame->data[kYPlaneIndex], total_size, AVFreeBuffer2,
+ static_cast<void*>(
+ std::make_unique<VideoFrame>(VideoFrame::Builder()
+ .set_video_frame_buffer(frame_buffer)
+ .set_rotation(kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build())
+ .release()),
+ 0);
+ RTC_CHECK(av_frame->buf[0]);
+ return 0;
+}
+
+void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
+ // The buffer pool recycles the buffer used by `video_frame` when there are no
+ // more references to it. `video_frame` is a thin buffer holder and is not
+ // recycled.
+ VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
+ delete video_frame;
+}
+
+H264DecoderImpl::H264DecoderImpl()
+ : ffmpeg_buffer_pool_(true),
+ decoded_image_callback_(nullptr),
+ has_reported_init_(false),
+ has_reported_error_(false),
+ preferred_output_format_(field_trial::IsEnabled("WebRTC-NV12Decode")
+ ? VideoFrameBuffer::Type::kNV12
+ : VideoFrameBuffer::Type::kI420) {}
+
+H264DecoderImpl::~H264DecoderImpl() {
+ Release();
+}
+
+bool H264DecoderImpl::Configure(const Settings& settings) {
+ ReportInit();
+ if (settings.codec_type() != kVideoCodecH264) {
+ ReportError();
+ return false;
+ }
+
+ // Release necessary in case of re-initializing.
+ int32_t ret = Release();
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ ReportError();
+ return false;
+ }
+ RTC_DCHECK(!av_context_);
+
+ // Initialize AVCodecContext.
+ av_context_.reset(avcodec_alloc_context3(nullptr));
+
+ av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
+ av_context_->codec_id = AV_CODEC_ID_H264;
+ const RenderResolution& resolution = settings.max_render_resolution();
+ if (resolution.Valid()) {
+ av_context_->coded_width = resolution.Width();
+ av_context_->coded_height = resolution.Height();
+ }
+ av_context_->extradata = nullptr;
+ av_context_->extradata_size = 0;
+
+ // If this is ever increased, look at `av_context_->thread_safe_callbacks` and
+ // make it possible to disable the thread checker in the frame buffer pool.
+ av_context_->thread_count = 1;
+ av_context_->thread_type = FF_THREAD_SLICE;
+
+ // Function used by FFmpeg to get buffers to store decoded frames in.
+ av_context_->get_buffer2 = AVGetBuffer2;
+ // `get_buffer2` is called with the context, there `opaque` can be used to get
+ // a pointer `this`.
+ av_context_->opaque = this;
+
+ const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
+ if (!codec) {
+ // This is an indication that FFmpeg has not been initialized or it has not
+ // been compiled/initialized with the correct set of codecs.
+ RTC_LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
+ Release();
+ ReportError();
+ return false;
+ }
+ int res = avcodec_open2(av_context_.get(), codec, nullptr);
+ if (res < 0) {
+ RTC_LOG(LS_ERROR) << "avcodec_open2 error: " << res;
+ Release();
+ ReportError();
+ return false;
+ }
+
+ av_frame_.reset(av_frame_alloc());
+
+ if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
+ if (!ffmpeg_buffer_pool_.Resize(*buffer_pool_size) ||
+ !output_buffer_pool_.Resize(*buffer_pool_size)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int32_t H264DecoderImpl::Release() {
+ av_context_.reset();
+ av_frame_.reset();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decoded_image_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
+ bool /*missing_frames*/,
+ int64_t /*render_time_ms*/) {
+ if (!IsInitialized()) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (!decoded_image_callback_) {
+ RTC_LOG(LS_WARNING)
+ << "Configure() has been called, but a callback function "
+ "has not been set with RegisterDecodeCompleteCallback()";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (!input_image.data() || !input_image.size()) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ ScopedAVPacket packet = MakeScopedAVPacket();
+ if (!packet) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // packet.data has a non-const type, but isn't modified by
+ // avcodec_send_packet.
+ packet->data = const_cast<uint8_t*>(input_image.data());
+ if (input_image.size() >
+ static_cast<size_t>(std::numeric_limits<int>::max())) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ packet->size = static_cast<int>(input_image.size());
+ int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs
+ av_context_->reordered_opaque = frame_timestamp_us;
+
+ int result = avcodec_send_packet(av_context_.get(), packet.get());
+
+ if (result < 0) {
+ RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
+ if (result < 0) {
+ RTC_LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // We don't expect reordering. Decoded frame timestamp should match
+ // the input one.
+ RTC_DCHECK_EQ(av_frame_->reordered_opaque, frame_timestamp_us);
+
+ // TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
+ h264_bitstream_parser_.ParseBitstream(input_image);
+ absl::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
+
+ // Obtain the `video_frame` containing the decoded image.
+ VideoFrame* input_frame =
+ static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
+ RTC_DCHECK(input_frame);
+ rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
+ input_frame->video_frame_buffer();
+
+ // Instantiate Planar YUV buffer according to video frame buffer type
+ const webrtc::PlanarYuvBuffer* planar_yuv_buffer = nullptr;
+ const webrtc::PlanarYuv8Buffer* planar_yuv8_buffer = nullptr;
+ const webrtc::PlanarYuv16BBuffer* planar_yuv16_buffer = nullptr;
+ VideoFrameBuffer::Type video_frame_buffer_type = frame_buffer->type();
+ switch (video_frame_buffer_type) {
+ case VideoFrameBuffer::Type::kI420:
+ planar_yuv_buffer = frame_buffer->GetI420();
+ planar_yuv8_buffer =
+ reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI444:
+ planar_yuv_buffer = frame_buffer->GetI444();
+ planar_yuv8_buffer =
+ reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI422:
+ planar_yuv_buffer = frame_buffer->GetI422();
+ planar_yuv8_buffer =
+ reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI010:
+ planar_yuv_buffer = frame_buffer->GetI010();
+ planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
+ planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI210:
+ planar_yuv_buffer = frame_buffer->GetI210();
+ planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
+ planar_yuv_buffer);
+ break;
+ default:
+ // If this code is changed to allow other video frame buffer type,
+ // make sure that the code below which wraps I420/I422/I444 buffer and
+ // code which converts to NV12 is changed
+ // to work with new video frame buffer type
+
+ RTC_LOG(LS_ERROR) << "frame_buffer type: "
+ << static_cast<int32_t>(video_frame_buffer_type)
+ << " is not supported!";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // When needed, FFmpeg applies cropping by moving plane pointers and adjusting
+ // frame width/height. Ensure that cropped buffers lie within the allocated
+ // memory.
+ RTC_DCHECK_LE(av_frame_->width, planar_yuv_buffer->width());
+ RTC_DCHECK_LE(av_frame_->height, planar_yuv_buffer->height());
+ switch (video_frame_buffer_type) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI444:
+ case VideoFrameBuffer::Type::kI422: {
+ RTC_DCHECK_GE(av_frame_->data[kYPlaneIndex], planar_yuv8_buffer->DataY());
+ RTC_DCHECK_LE(
+ av_frame_->data[kYPlaneIndex] +
+ av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
+ planar_yuv8_buffer->DataY() +
+ planar_yuv8_buffer->StrideY() * planar_yuv8_buffer->height());
+ RTC_DCHECK_GE(av_frame_->data[kUPlaneIndex], planar_yuv8_buffer->DataU());
+ RTC_DCHECK_LE(
+ av_frame_->data[kUPlaneIndex] +
+ av_frame_->linesize[kUPlaneIndex] *
+ planar_yuv8_buffer->ChromaHeight(),
+ planar_yuv8_buffer->DataU() + planar_yuv8_buffer->StrideU() *
+ planar_yuv8_buffer->ChromaHeight());
+ RTC_DCHECK_GE(av_frame_->data[kVPlaneIndex], planar_yuv8_buffer->DataV());
+ RTC_DCHECK_LE(
+ av_frame_->data[kVPlaneIndex] +
+ av_frame_->linesize[kVPlaneIndex] *
+ planar_yuv8_buffer->ChromaHeight(),
+ planar_yuv8_buffer->DataV() + planar_yuv8_buffer->StrideV() *
+ planar_yuv8_buffer->ChromaHeight());
+ break;
+ }
+ case VideoFrameBuffer::Type::kI010:
+ case VideoFrameBuffer::Type::kI210: {
+ RTC_DCHECK_GE(
+ av_frame_->data[kYPlaneIndex],
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()));
+ RTC_DCHECK_LE(
+ av_frame_->data[kYPlaneIndex] +
+ av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()) +
+ planar_yuv16_buffer->StrideY() * 2 *
+ planar_yuv16_buffer->height());
+ RTC_DCHECK_GE(
+ av_frame_->data[kUPlaneIndex],
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()));
+ RTC_DCHECK_LE(
+ av_frame_->data[kUPlaneIndex] +
+ av_frame_->linesize[kUPlaneIndex] *
+ planar_yuv16_buffer->ChromaHeight(),
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()) +
+ planar_yuv16_buffer->StrideU() * 2 *
+ planar_yuv16_buffer->ChromaHeight());
+ RTC_DCHECK_GE(
+ av_frame_->data[kVPlaneIndex],
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()));
+ RTC_DCHECK_LE(
+ av_frame_->data[kVPlaneIndex] +
+ av_frame_->linesize[kVPlaneIndex] *
+ planar_yuv16_buffer->ChromaHeight(),
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()) +
+ planar_yuv16_buffer->StrideV() * 2 *
+ planar_yuv16_buffer->ChromaHeight());
+ break;
+ }
+ default:
+ RTC_LOG(LS_ERROR) << "frame_buffer type: "
+ << static_cast<int32_t>(video_frame_buffer_type)
+ << " is not supported!";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> cropped_buffer;
+ switch (video_frame_buffer_type) {
+ case VideoFrameBuffer::Type::kI420:
+ cropped_buffer = WrapI420Buffer(
+ av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
+ av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
+ av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
+ av_frame_->linesize[kVPlaneIndex],
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI444:
+ cropped_buffer = WrapI444Buffer(
+ av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
+ av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
+ av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
+ av_frame_->linesize[kVPlaneIndex],
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI422:
+ cropped_buffer = WrapI422Buffer(
+ av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
+ av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
+ av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
+ av_frame_->linesize[kVPlaneIndex],
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI010:
+ cropped_buffer = WrapI010Buffer(
+ av_frame_->width, av_frame_->height,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
+ av_frame_->linesize[kYPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
+ av_frame_->linesize[kUPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
+ av_frame_->linesize[kVPlaneIndex] / 2,
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI210:
+ cropped_buffer = WrapI210Buffer(
+ av_frame_->width, av_frame_->height,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
+ av_frame_->linesize[kYPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
+ av_frame_->linesize[kUPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
+ av_frame_->linesize[kVPlaneIndex] / 2,
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "frame_buffer type: "
+ << static_cast<int32_t>(video_frame_buffer_type)
+ << " is not supported!";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Preference for NV12 output format is ignored if actual format isn't
+ // trivially convertible to it.
+ if (preferred_output_format_ == VideoFrameBuffer::Type::kNV12 &&
+ video_frame_buffer_type == VideoFrameBuffer::Type::kI420) {
+ auto nv12_buffer = output_buffer_pool_.CreateNV12Buffer(
+ cropped_buffer->width(), cropped_buffer->height());
+ const PlanarYuv8Buffer* cropped_planar_yuv_buffer =
+ cropped_buffer->GetI420();
+ libyuv::I420ToNV12(cropped_planar_yuv_buffer->DataY(),
+ cropped_planar_yuv_buffer->StrideY(),
+ cropped_planar_yuv_buffer->DataU(),
+ cropped_planar_yuv_buffer->StrideU(),
+ cropped_planar_yuv_buffer->DataV(),
+ cropped_planar_yuv_buffer->StrideV(),
+ nv12_buffer->MutableDataY(), nv12_buffer->StrideY(),
+ nv12_buffer->MutableDataUV(), nv12_buffer->StrideUV(),
+ planar_yuv_buffer->width(), planar_yuv_buffer->height());
+ cropped_buffer = nv12_buffer;
+ }
+
+ // Pass on color space from input frame if explicitly specified.
+ const ColorSpace& color_space =
+ input_image.ColorSpace() ? *input_image.ColorSpace()
+ : ExtractH264ColorSpace(av_context_.get());
+
+ VideoFrame decoded_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(cropped_buffer)
+ .set_timestamp_rtp(input_image.Timestamp())
+ .set_color_space(color_space)
+ .build();
+
+ // Return decoded frame.
+ // TODO(nisse): Timestamp and rotation are all zero here. Change decoder
+ // interface to pass a VideoFrameBuffer instead of a VideoFrame?
+ decoded_image_callback_->Decoded(decoded_frame, absl::nullopt, qp);
+
+ // Stop referencing it, possibly freeing `input_frame`.
+ av_frame_unref(av_frame_.get());
+ input_frame = nullptr;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+const char* H264DecoderImpl::ImplementationName() const {
+ return "FFmpeg";
+}
+
+bool H264DecoderImpl::IsInitialized() const {
+ return av_context_ != nullptr;
+}
+
+void H264DecoderImpl::ReportInit() {
+ if (has_reported_init_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
+ kH264DecoderEventInit, kH264DecoderEventMax);
+ has_reported_init_ = true;
+}
+
+void H264DecoderImpl::ReportError() {
+ if (has_reported_error_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
+ kH264DecoderEventError, kH264DecoderEventMax);
+ has_reported_error_ = true;
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h
new file mode 100644
index 0000000000..e5d9fd3871
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
+
+// Everything declared in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#if defined(WEBRTC_WIN) && !defined(__clang__)
+#error "See: bugs.webrtc.org/9213#c13."
+#endif
+
+#include <memory>
+
+#include "modules/video_coding/codecs/h264/include/h264.h"
+
+// CAVEAT: According to ffmpeg docs for avcodec_send_packet, ffmpeg requires a
+// few extra padding bytes after the end of input. And in addition, docs for
+// AV_INPUT_BUFFER_PADDING_SIZE says "If the first 23 bits of the additional
+// bytes are not 0, then damaged MPEG bitstreams could cause overread and
+// segfault."
+//
+// WebRTC doesn't ensure any such padding, and REQUIRES ffmpeg to be compiled
+// with CONFIG_SAFE_BITSTREAM_READER, which is intended to eliminate
+// out-of-bounds reads. ffmpeg docs doesn't say explicitly what effects this
+// flag has on the h.264 decoder or avcodec_send_packet, though, so this is in
+// some way depending on undocumented behavior. If any problems turn up, we may
+// have to add an extra copy operation, to enforce padding before buffers are
+// passed to ffmpeg.
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+} // extern "C"
+
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+
+namespace webrtc {
+
+struct AVCodecContextDeleter {
+ void operator()(AVCodecContext* ptr) const { avcodec_free_context(&ptr); }
+};
+struct AVFrameDeleter {
+ void operator()(AVFrame* ptr) const { av_frame_free(&ptr); }
+};
+
+class H264DecoderImpl : public H264Decoder {
+ public:
+ H264DecoderImpl();
+ ~H264DecoderImpl() override;
+
+ bool Configure(const Settings& settings) override;
+ int32_t Release() override;
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+
+ // `missing_frames`, `fragmentation` and `render_time_ms` are ignored.
+ int32_t Decode(const EncodedImage& input_image,
+ bool /*missing_frames*/,
+ int64_t render_time_ms = -1) override;
+
+ const char* ImplementationName() const override;
+
+ private:
+ // Called by FFmpeg when it needs a frame buffer to store decoded frames in.
+ // The `VideoFrame` returned by FFmpeg at `Decode` originate from here. Their
+ // buffers are reference counted and freed by FFmpeg using `AVFreeBuffer2`.
+ static int AVGetBuffer2(AVCodecContext* context,
+ AVFrame* av_frame,
+ int flags);
+ // Called by FFmpeg when it is done with a video frame, see `AVGetBuffer2`.
+ static void AVFreeBuffer2(void* opaque, uint8_t* data);
+
+ bool IsInitialized() const;
+
+ // Reports statistics with histograms.
+ void ReportInit();
+ void ReportError();
+
+ // Used by ffmpeg via `AVGetBuffer2()` to allocate I420 images.
+ VideoFrameBufferPool ffmpeg_buffer_pool_;
+ // Used to allocate NV12 images if NV12 output is preferred.
+ VideoFrameBufferPool output_buffer_pool_;
+ std::unique_ptr<AVCodecContext, AVCodecContextDeleter> av_context_;
+ std::unique_ptr<AVFrame, AVFrameDeleter> av_frame_;
+
+ DecodedImageCallback* decoded_image_callback_;
+
+ bool has_reported_init_;
+ bool has_reported_error_;
+
+ webrtc::H264BitstreamParser h264_bitstream_parser_;
+
+ // Decoder should produce this format if possible.
+ const VideoFrameBuffer::Type preferred_output_format_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
new file mode 100644
index 0000000000..f6d52c6539
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+// Everything declared/defined in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "modules/video_coding/utility/simulcast_utility.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+#include "third_party/openh264/src/codec/api/svc/codec_api.h"
+#include "third_party/openh264/src/codec/api/svc/codec_app_def.h"
+#include "third_party/openh264/src/codec/api/svc/codec_def.h"
+#include "third_party/openh264/src/codec/api/svc/codec_ver.h"
+
+namespace webrtc {
+
+namespace {
+
+const bool kOpenH264EncoderDetailedLogging = false;
+
+// QP scaling thresholds.
+static const int kLowH264QpThreshold = 24;
+static const int kHighH264QpThreshold = 37;
+
+// Used by histograms. Values of entries should not be changed.
+enum H264EncoderImplEvent {
+ kH264EncoderEventInit = 0,
+ kH264EncoderEventError = 1,
+ kH264EncoderEventMax = 16,
+};
+
+int NumberOfThreads(int width, int height, int number_of_cores) {
+ // TODO(hbos): In Chromium, multiple threads do not work with sandbox on Mac,
+ // see crbug.com/583348. Until further investigated, only use one thread.
+ // if (width * height >= 1920 * 1080 && number_of_cores > 8) {
+ // return 8; // 8 threads for 1080p on high perf machines.
+ // } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
+ // return 3; // 3 threads for 1080p.
+ // } else if (width * height > 640 * 480 && number_of_cores >= 3) {
+ // return 2; // 2 threads for qHD/HD.
+ // } else {
+ // return 1; // 1 thread for VGA or less.
+ // }
+ // TODO(sprang): Also check sSliceArgument.uiSliceNum om GetEncoderPrams(),
+ // before enabling multithreading here.
+ return 1;
+}
+
+VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
+ switch (type) {
+ case videoFrameTypeIDR:
+ return VideoFrameType::kVideoFrameKey;
+ case videoFrameTypeSkip:
+ case videoFrameTypeI:
+ case videoFrameTypeP:
+ case videoFrameTypeIPMixed:
+ return VideoFrameType::kVideoFrameDelta;
+ case videoFrameTypeInvalid:
+ break;
+ }
+ RTC_DCHECK_NOTREACHED() << "Unexpected/invalid frame type: " << type;
+ return VideoFrameType::kEmptyFrame;
+}
+
+} // namespace
+
+// Helper method used by H264EncoderImpl::Encode.
+// Copies the encoded bytes from `info` to `encoded_image`. The
+// `encoded_image->_buffer` may be deleted and reallocated if a bigger buffer is
+// required.
+//
+// After OpenH264 encoding, the encoded bytes are stored in `info` spread out
+// over a number of layers and "NAL units". Each NAL unit is a fragment starting
+// with the four-byte start code {0,0,0,1}. All of this data (including the
+// start codes) is copied to the `encoded_image->_buffer`.
+static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
+ // Calculate minimum buffer size required to hold encoded data.
+ size_t required_capacity = 0;
+ size_t fragments_count = 0;
+ for (int layer = 0; layer < info->iLayerNum; ++layer) {
+ const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
+ for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
+ RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
+ // Ensure `required_capacity` will not overflow.
+ RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal],
+ std::numeric_limits<size_t>::max() - required_capacity);
+ required_capacity += layerInfo.pNalLengthInByte[nal];
+ }
+ }
+ auto buffer = EncodedImageBuffer::Create(required_capacity);
+ encoded_image->SetEncodedData(buffer);
+
+ // Iterate layers and NAL units, note each NAL unit as a fragment and copy
+ // the data to `encoded_image->_buffer`.
+ const uint8_t start_code[4] = {0, 0, 0, 1};
+ size_t frag = 0;
+ encoded_image->set_size(0);
+ for (int layer = 0; layer < info->iLayerNum; ++layer) {
+ const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
+ // Iterate NAL units making up this layer, noting fragments.
+ size_t layer_len = 0;
+ for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
+ // Because the sum of all layer lengths, `required_capacity`, fits in a
+ // `size_t`, we know that any indices in-between will not overflow.
+ RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
+ layer_len += layerInfo.pNalLengthInByte[nal];
+ }
+ // Copy the entire layer's data (including start codes).
+ memcpy(buffer->data() + encoded_image->size(), layerInfo.pBsBuf, layer_len);
+ encoded_image->set_size(encoded_image->size() + layer_len);
+ }
+}
+
+H264EncoderImpl::H264EncoderImpl(const cricket::VideoCodec& codec)
+ : packetization_mode_(H264PacketizationMode::SingleNalUnit),
+ max_payload_size_(0),
+ number_of_cores_(0),
+ encoded_image_callback_(nullptr),
+ has_reported_init_(false),
+ has_reported_error_(false) {
+ RTC_CHECK(absl::EqualsIgnoreCase(codec.name, cricket::kH264CodecName));
+ std::string packetization_mode_string;
+ if (codec.GetParam(cricket::kH264FmtpPacketizationMode,
+ &packetization_mode_string) &&
+ packetization_mode_string == "1") {
+ packetization_mode_ = H264PacketizationMode::NonInterleaved;
+ }
+ downscaled_buffers_.reserve(kMaxSimulcastStreams - 1);
+ encoded_images_.reserve(kMaxSimulcastStreams);
+ encoders_.reserve(kMaxSimulcastStreams);
+ configurations_.reserve(kMaxSimulcastStreams);
+ tl0sync_limit_.reserve(kMaxSimulcastStreams);
+}
+
+H264EncoderImpl::~H264EncoderImpl() {
+ Release();
+}
+
+int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) {
+ ReportInit();
+ if (!inst || inst->codecType != kVideoCodecH264) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->maxFramerate == 0) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->width < 1 || inst->height < 1) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ int32_t release_ret = Release();
+ if (release_ret != WEBRTC_VIDEO_CODEC_OK) {
+ ReportError();
+ return release_ret;
+ }
+
+ int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
+ bool doing_simulcast = (number_of_streams > 1);
+
+ if (doing_simulcast &&
+ !SimulcastUtility::ValidSimulcastParameters(*inst, number_of_streams)) {
+ return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
+ }
+ downscaled_buffers_.resize(number_of_streams - 1);
+ encoded_images_.resize(number_of_streams);
+ encoders_.resize(number_of_streams);
+ pictures_.resize(number_of_streams);
+ configurations_.resize(number_of_streams);
+ tl0sync_limit_.resize(number_of_streams);
+
+ number_of_cores_ = settings.number_of_cores;
+ max_payload_size_ = settings.max_payload_size;
+ codec_ = *inst;
+
+ // Code expects simulcastStream resolutions to be correct, make sure they are
+ // filled even when there are no simulcast layers.
+ if (codec_.numberOfSimulcastStreams == 0) {
+ codec_.simulcastStream[0].width = codec_.width;
+ codec_.simulcastStream[0].height = codec_.height;
+ }
+
+ for (int i = 0, idx = number_of_streams - 1; i < number_of_streams;
+ ++i, --idx) {
+ ISVCEncoder* openh264_encoder;
+ // Create encoder.
+ if (WelsCreateSVCEncoder(&openh264_encoder) != 0) {
+ // Failed to create encoder.
+ RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
+ RTC_DCHECK(!openh264_encoder);
+ Release();
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(openh264_encoder);
+ if (kOpenH264EncoderDetailedLogging) {
+ int trace_level = WELS_LOG_DETAIL;
+ openh264_encoder->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
+ }
+ // else WELS_LOG_DEFAULT is used by default.
+
+ // Store h264 encoder.
+ encoders_[i] = openh264_encoder;
+
+ // Set internal settings from codec_settings
+ configurations_[i].simulcast_idx = idx;
+ configurations_[i].sending = false;
+ configurations_[i].width = codec_.simulcastStream[idx].width;
+ configurations_[i].height = codec_.simulcastStream[idx].height;
+ configurations_[i].max_frame_rate = static_cast<float>(codec_.maxFramerate);
+ configurations_[i].frame_dropping_on = codec_.GetFrameDropEnabled();
+ configurations_[i].key_frame_interval = codec_.H264()->keyFrameInterval;
+ configurations_[i].num_temporal_layers =
+ std::max(codec_.H264()->numberOfTemporalLayers,
+ codec_.simulcastStream[idx].numberOfTemporalLayers);
+
+ // Create downscaled image buffers.
+ if (i > 0) {
+ downscaled_buffers_[i - 1] = I420Buffer::Create(
+ configurations_[i].width, configurations_[i].height,
+ configurations_[i].width, configurations_[i].width / 2,
+ configurations_[i].width / 2);
+ }
+
+ // Codec_settings uses kbits/second; encoder uses bits/second.
+ configurations_[i].max_bps = codec_.maxBitrate * 1000;
+ configurations_[i].target_bps = codec_.startBitrate * 1000;
+
+ // Create encoder parameters based on the layer configuration.
+ SEncParamExt encoder_params = CreateEncoderParams(i);
+
+ // Initialize.
+ if (openh264_encoder->InitializeExt(&encoder_params) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
+ Release();
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // TODO(pbos): Base init params on these values before submitting.
+ int video_format = EVideoFormatType::videoFormatI420;
+ openh264_encoder->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
+
+ // Initialize encoded image. Default buffer size: size of unencoded data.
+
+ const size_t new_capacity =
+ CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
+ codec_.simulcastStream[idx].height);
+ encoded_images_[i].SetEncodedData(EncodedImageBuffer::Create(new_capacity));
+ encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
+ encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
+ encoded_images_[i].set_size(0);
+
+ tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
+ }
+
+ SimulcastRateAllocator init_allocator(codec_);
+ VideoBitrateAllocation allocation =
+ init_allocator.Allocate(VideoBitrateAllocationParameters(
+ DataRate::KilobitsPerSec(codec_.startBitrate), codec_.maxFramerate));
+ SetRates(RateControlParameters(allocation, codec_.maxFramerate));
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::Release() {
+ while (!encoders_.empty()) {
+ ISVCEncoder* openh264_encoder = encoders_.back();
+ if (openh264_encoder) {
+ RTC_CHECK_EQ(0, openh264_encoder->Uninitialize());
+ WelsDestroySVCEncoder(openh264_encoder);
+ }
+ encoders_.pop_back();
+ }
+ downscaled_buffers_.clear();
+ configurations_.clear();
+ encoded_images_.clear();
+ pictures_.clear();
+ tl0sync_limit_.clear();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_image_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264EncoderImpl::SetRates(const RateControlParameters& parameters) {
+ if (encoders_.empty()) {
+ RTC_LOG(LS_WARNING) << "SetRates() while uninitialized.";
+ return;
+ }
+
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Invalid frame rate: " << parameters.framerate_fps;
+ return;
+ }
+
+ if (parameters.bitrate.get_sum_bps() == 0) {
+ // Encoder paused, turn off all encoding.
+ for (size_t i = 0; i < configurations_.size(); ++i) {
+ configurations_[i].SetStreamState(false);
+ }
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps);
+
+ size_t stream_idx = encoders_.size() - 1;
+ for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
+ // Update layer config.
+ configurations_[i].target_bps =
+ parameters.bitrate.GetSpatialLayerSum(stream_idx);
+ configurations_[i].max_frame_rate = parameters.framerate_fps;
+
+ if (configurations_[i].target_bps) {
+ configurations_[i].SetStreamState(true);
+
+ // Update h264 encoder.
+ SBitrateInfo target_bitrate;
+ memset(&target_bitrate, 0, sizeof(SBitrateInfo));
+ target_bitrate.iLayer = SPATIAL_LAYER_ALL,
+ target_bitrate.iBitrate = configurations_[i].target_bps;
+ encoders_[i]->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
+ encoders_[i]->SetOption(ENCODER_OPTION_FRAME_RATE,
+ &configurations_[i].max_frame_rate);
+ } else {
+ configurations_[i].SetStreamState(false);
+ }
+ }
+}
+
+int32_t H264EncoderImpl::Encode(
+ const VideoFrame& input_frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (encoders_.empty()) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (!encoded_image_callback_) {
+ RTC_LOG(LS_WARNING)
+ << "InitEncode() has been called, but a callback function "
+ "has not been set with RegisterEncodeCompleteCallback()";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> frame_buffer =
+ input_frame.video_frame_buffer()->ToI420();
+ if (!frame_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(
+ input_frame.video_frame_buffer()->type())
+ << " image to I420. Can't encode frame.";
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+ RTC_CHECK(frame_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ frame_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ bool send_key_frame = false;
+ for (size_t i = 0; i < configurations_.size(); ++i) {
+ if (configurations_[i].key_frame_request && configurations_[i].sending) {
+ send_key_frame = true;
+ break;
+ }
+ }
+
+ if (!send_key_frame && frame_types) {
+ for (size_t i = 0; i < configurations_.size(); ++i) {
+ const size_t simulcast_idx =
+ static_cast<size_t>(configurations_[i].simulcast_idx);
+ if (configurations_[i].sending && simulcast_idx < frame_types->size() &&
+ (*frame_types)[simulcast_idx] == VideoFrameType::kVideoFrameKey) {
+ send_key_frame = true;
+ break;
+ }
+ }
+ }
+
+ RTC_DCHECK_EQ(configurations_[0].width, frame_buffer->width());
+ RTC_DCHECK_EQ(configurations_[0].height, frame_buffer->height());
+
+ // Encode image for each layer.
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ // EncodeFrame input.
+ pictures_[i] = {0};
+ pictures_[i].iPicWidth = configurations_[i].width;
+ pictures_[i].iPicHeight = configurations_[i].height;
+ pictures_[i].iColorFormat = EVideoFormatType::videoFormatI420;
+ pictures_[i].uiTimeStamp = input_frame.ntp_time_ms();
+ // Downscale images on second and ongoing layers.
+ if (i == 0) {
+ pictures_[i].iStride[0] = frame_buffer->StrideY();
+ pictures_[i].iStride[1] = frame_buffer->StrideU();
+ pictures_[i].iStride[2] = frame_buffer->StrideV();
+ pictures_[i].pData[0] = const_cast<uint8_t*>(frame_buffer->DataY());
+ pictures_[i].pData[1] = const_cast<uint8_t*>(frame_buffer->DataU());
+ pictures_[i].pData[2] = const_cast<uint8_t*>(frame_buffer->DataV());
+ } else {
+ pictures_[i].iStride[0] = downscaled_buffers_[i - 1]->StrideY();
+ pictures_[i].iStride[1] = downscaled_buffers_[i - 1]->StrideU();
+ pictures_[i].iStride[2] = downscaled_buffers_[i - 1]->StrideV();
+ pictures_[i].pData[0] =
+ const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataY());
+ pictures_[i].pData[1] =
+ const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataU());
+ pictures_[i].pData[2] =
+ const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataV());
+ // Scale the image down a number of times by downsampling factor.
+ libyuv::I420Scale(pictures_[i - 1].pData[0], pictures_[i - 1].iStride[0],
+ pictures_[i - 1].pData[1], pictures_[i - 1].iStride[1],
+ pictures_[i - 1].pData[2], pictures_[i - 1].iStride[2],
+ configurations_[i - 1].width,
+ configurations_[i - 1].height, pictures_[i].pData[0],
+ pictures_[i].iStride[0], pictures_[i].pData[1],
+ pictures_[i].iStride[1], pictures_[i].pData[2],
+ pictures_[i].iStride[2], configurations_[i].width,
+ configurations_[i].height, libyuv::kFilterBox);
+ }
+
+ if (!configurations_[i].sending) {
+ continue;
+ }
+ if (frame_types != nullptr) {
+ // Skip frame?
+ if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
+ continue;
+ }
+ }
+ if (send_key_frame) {
+ // API doc says ForceIntraFrame(false) does nothing, but calling this
+ // function forces a key frame regardless of the `bIDR` argument's value.
+ // (If every frame is a key frame we get lag/delays.)
+ encoders_[i]->ForceIntraFrame(true);
+ configurations_[i].key_frame_request = false;
+ }
+ // EncodeFrame output.
+ SFrameBSInfo info;
+ memset(&info, 0, sizeof(SFrameBSInfo));
+
+ // Encode!
+ int enc_ret = encoders_[i]->EncodeFrame(&pictures_[i], &info);
+ if (enc_ret != 0) {
+ RTC_LOG(LS_ERROR)
+ << "OpenH264 frame encoding failed, EncodeFrame returned " << enc_ret
+ << ".";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ encoded_images_[i]._encodedWidth = configurations_[i].width;
+ encoded_images_[i]._encodedHeight = configurations_[i].height;
+ encoded_images_[i].SetTimestamp(input_frame.timestamp());
+ encoded_images_[i].SetColorSpace(input_frame.color_space());
+ encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
+ encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
+
+ // Split encoded image up into fragments. This also updates
+ // `encoded_image_`.
+ RtpFragmentize(&encoded_images_[i], &info);
+
+ // Encoder can skip frames to save bandwidth in which case
+ // `encoded_images_[i]._length` == 0.
+ if (encoded_images_[i].size() > 0) {
+ // Parse QP.
+ h264_bitstream_parser_.ParseBitstream(encoded_images_[i]);
+ encoded_images_[i].qp_ =
+ h264_bitstream_parser_.GetLastSliceQp().value_or(-1);
+
+ // Deliver encoded image.
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = kVideoCodecH264;
+ codec_specific.codecSpecific.H264.packetization_mode =
+ packetization_mode_;
+ codec_specific.codecSpecific.H264.temporal_idx = kNoTemporalIdx;
+ codec_specific.codecSpecific.H264.idr_frame =
+ info.eFrameType == videoFrameTypeIDR;
+ codec_specific.codecSpecific.H264.base_layer_sync = false;
+ if (configurations_[i].num_temporal_layers > 1) {
+ const uint8_t tid = info.sLayerInfo[0].uiTemporalId;
+ codec_specific.codecSpecific.H264.temporal_idx = tid;
+ codec_specific.codecSpecific.H264.base_layer_sync =
+ tid > 0 && tid < tl0sync_limit_[i];
+ if (codec_specific.codecSpecific.H264.base_layer_sync) {
+ tl0sync_limit_[i] = tid;
+ }
+ if (tid == 0) {
+ tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
+ }
+ }
+ encoded_image_callback_->OnEncodedImage(encoded_images_[i],
+ &codec_specific);
+ }
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// Initialization parameters.
+// There are two ways to initialize. There is SEncParamBase (cleared with
+// memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt
+// which is a superset of SEncParamBase (cleared with GetDefaultParams) used
+// in InitializeExt.
+SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const {
+ SEncParamExt encoder_params;
+ encoders_[i]->GetDefaultParams(&encoder_params);
+ if (codec_.mode == VideoCodecMode::kRealtimeVideo) {
+ encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
+ } else if (codec_.mode == VideoCodecMode::kScreensharing) {
+ encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ encoder_params.iPicWidth = configurations_[i].width;
+ encoder_params.iPicHeight = configurations_[i].height;
+ encoder_params.iTargetBitrate = configurations_[i].target_bps;
+ // Keep unspecified. WebRTC's max codec bitrate is not the same setting
+ // as OpenH264's iMaxBitrate. More details in https://crbug.com/webrtc/11543
+ encoder_params.iMaxBitrate = UNSPECIFIED_BIT_RATE;
+ // Rate Control mode
+ encoder_params.iRCMode = RC_BITRATE_MODE;
+ encoder_params.fMaxFrameRate = configurations_[i].max_frame_rate;
+
+ // The following parameters are extension parameters (they're in SEncParamExt,
+ // not in SEncParamBase).
+ encoder_params.bEnableFrameSkip = configurations_[i].frame_dropping_on;
+ // `uiIntraPeriod` - multiple of GOP size
+ // `keyFrameInterval` - number of frames
+ encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval;
+ // Reuse SPS id if possible. This helps to avoid reset of chromium HW decoder
+ // on each key-frame.
+ // Note that WebRTC resets encoder on resolution change which makes all
+ // EParameterSetStrategy modes except INCREASING_ID (default) essentially
+ // equivalent to CONSTANT_ID.
+ encoder_params.eSpsPpsIdStrategy = SPS_LISTING;
+ encoder_params.uiMaxNalSize = 0;
+ // Threading model: use auto.
+ // 0: auto (dynamic imp. internal encoder)
+ // 1: single thread (default value)
+ // >1: number of threads
+ encoder_params.iMultipleThreadIdc = NumberOfThreads(
+ encoder_params.iPicWidth, encoder_params.iPicHeight, number_of_cores_);
+ // The base spatial layer 0 is the only one we use.
+ encoder_params.sSpatialLayers[0].iVideoWidth = encoder_params.iPicWidth;
+ encoder_params.sSpatialLayers[0].iVideoHeight = encoder_params.iPicHeight;
+ encoder_params.sSpatialLayers[0].fFrameRate = encoder_params.fMaxFrameRate;
+ encoder_params.sSpatialLayers[0].iSpatialBitrate =
+ encoder_params.iTargetBitrate;
+ encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
+ encoder_params.iMaxBitrate;
+ encoder_params.iTemporalLayerNum = configurations_[i].num_temporal_layers;
+ if (encoder_params.iTemporalLayerNum > 1) {
+ // iNumRefFrame specifies total number of reference buffers to allocate.
+ // For N temporal layers we need at least (N - 1) buffers to store last
+ // encoded frames of all reference temporal layers.
+ // Note that there is no API in OpenH264 encoder to specify exact set of
+ // references to be used to prediction of a given frame. Encoder can
+ // theoretically use all available reference buffers.
+ encoder_params.iNumRefFrame = encoder_params.iTemporalLayerNum - 1;
+ }
+ RTC_LOG(LS_INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
+ << OPENH264_MINOR;
+ switch (packetization_mode_) {
+ case H264PacketizationMode::SingleNalUnit:
+ // Limit the size of the packets produced.
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
+ SM_SIZELIMITED_SLICE;
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint =
+ static_cast<unsigned int>(max_payload_size_);
+ RTC_LOG(LS_INFO) << "Encoder is configured with NALU constraint: "
+ << max_payload_size_ << " bytes";
+ break;
+ case H264PacketizationMode::NonInterleaved:
+ // When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
+ // design it with cpu core number.
+ // TODO(sprang): Set to 0 when we understand why the rate controller borks
+ // when uiSliceNum > 1.
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
+ SM_FIXEDSLCNUM_SLICE;
+ break;
+ }
+ return encoder_params;
+}
+
+void H264EncoderImpl::ReportInit() {
+ if (has_reported_init_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
+ kH264EncoderEventInit, kH264EncoderEventMax);
+ has_reported_init_ = true;
+}
+
+void H264EncoderImpl::ReportError() {
+ if (has_reported_error_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
+ kH264EncoderEventError, kH264EncoderEventMax);
+ has_reported_error_ = true;
+}
+
+VideoEncoder::EncoderInfo H264EncoderImpl::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "OpenH264";
+ info.scaling_settings =
+ VideoEncoder::ScalingSettings(kLowH264QpThreshold, kHighH264QpThreshold);
+ info.is_hardware_accelerated = false;
+ info.supports_simulcast = true;
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420};
+ return info;
+}
+
+void H264EncoderImpl::LayerConfig::SetStreamState(bool send_stream) {
+ if (send_stream && !sending) {
+ // Need a key frame if we have not sent this stream before.
+ key_frame_request = true;
+ }
+ sending = send_stream;
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
new file mode 100644
index 0000000000..1163464421
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
+
+// Everything declared in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#if defined(WEBRTC_WIN) && !defined(__clang__)
+#error "See: bugs.webrtc.org/9213#c13."
+#endif
+
+#include <memory>
+#include <vector>
+
+#include "api/video/i420_buffer.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "third_party/openh264/src/codec/api/svc/codec_app_def.h"
+
+class ISVCEncoder;
+
+namespace webrtc {
+
+class H264EncoderImpl : public H264Encoder {
+ public:
+ struct LayerConfig {
+ int simulcast_idx = 0;
+ int width = -1;
+ int height = -1;
+ bool sending = true;
+ bool key_frame_request = false;
+ float max_frame_rate = 0;
+ uint32_t target_bps = 0;
+ uint32_t max_bps = 0;
+ bool frame_dropping_on = false;
+ int key_frame_interval = 0;
+ int num_temporal_layers = 1;
+
+ void SetStreamState(bool send_stream);
+ };
+
+ public:
+ explicit H264EncoderImpl(const cricket::VideoCodec& codec);
+ ~H264EncoderImpl() override;
+
+ // `settings.max_payload_size` is ignored.
+ // The following members of `codec_settings` are used. The rest are ignored.
+ // - codecType (must be kVideoCodecH264)
+ // - targetBitrate
+ // - maxFramerate
+ // - width
+ // - height
+ int32_t InitEncode(const VideoCodec* codec_settings,
+ const VideoEncoder::Settings& settings) override;
+ int32_t Release() override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override;
+ void SetRates(const RateControlParameters& parameters) override;
+
+ // The result of encoding - an EncodedImage and CodecSpecificInfo - are
+ // passed to the encode complete callback.
+ int32_t Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ // Exposed for testing.
+ H264PacketizationMode PacketizationModeForTesting() const {
+ return packetization_mode_;
+ }
+
+ private:
+ SEncParamExt CreateEncoderParams(size_t i) const;
+
+ webrtc::H264BitstreamParser h264_bitstream_parser_;
+ // Reports statistics with histograms.
+ void ReportInit();
+ void ReportError();
+
+ std::vector<ISVCEncoder*> encoders_;
+ std::vector<SSourcePicture> pictures_;
+ std::vector<rtc::scoped_refptr<I420Buffer>> downscaled_buffers_;
+ std::vector<LayerConfig> configurations_;
+ std::vector<EncodedImage> encoded_images_;
+
+ VideoCodec codec_;
+ H264PacketizationMode packetization_mode_;
+ size_t max_payload_size_;
+ int32_t number_of_cores_;
+ EncodedImageCallback* encoded_image_callback_;
+
+ bool has_reported_init_;
+ bool has_reported_error_;
+
+ std::vector<uint8_t> tl0sync_limit_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc
new file mode 100644
index 0000000000..52d26955ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
+
+#include "api/video_codecs/video_encoder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kMaxPayloadSize = 1024;
+const int kNumCores = 1;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities,
+ kNumCores,
+ kMaxPayloadSize);
+
+void SetDefaultSettings(VideoCodec* codec_settings) {
+ codec_settings->codecType = kVideoCodecH264;
+ codec_settings->maxFramerate = 60;
+ codec_settings->width = 640;
+ codec_settings->height = 480;
+ // If frame dropping is false, we get a warning that bitrate can't
+ // be controlled for RC_QUALITY_MODE; RC_BITRATE_MODE and RC_TIMESTAMP_MODE
+ codec_settings->SetFrameDropEnabled(true);
+ codec_settings->startBitrate = 2000;
+ codec_settings->maxBitrate = 4000;
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithDefaultParameters) {
+ H264EncoderImpl encoder(cricket::VideoCodec("H264"));
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::NonInterleaved,
+ encoder.PacketizationModeForTesting());
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithNonInterleavedModeExplicitly) {
+ cricket::VideoCodec codec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "1");
+ H264EncoderImpl encoder(codec);
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::NonInterleaved,
+ encoder.PacketizationModeForTesting());
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithSingleNalUnitModeExplicitly) {
+ cricket::VideoCodec codec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "0");
+ H264EncoderImpl encoder(codec);
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
+ encoder.PacketizationModeForTesting());
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithRemovedParameter) {
+ cricket::VideoCodec codec("H264");
+ codec.RemoveParam(cricket::kH264FmtpPacketizationMode);
+ H264EncoderImpl encoder(codec);
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
+ encoder.PacketizationModeForTesting());
+}
+
+} // anonymous namespace
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc
new file mode 100644
index 0000000000..12b5da1404
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/create_simulcast_test_fixture.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
+ std::unique_ptr<VideoEncoderFactory> encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>(
+ []() { return H264Encoder::Create(cricket::VideoCodec("H264")); });
+ std::unique_ptr<VideoDecoderFactory> decoder_factory =
+ std::make_unique<FunctionVideoDecoderFactory>(
+ []() { return H264Decoder::Create(); });
+ return CreateSimulcastTestFixture(std::move(encoder_factory),
+ std::move(decoder_factory),
+ SdpVideoFormat("H264"));
+}
+} // namespace
+
+TEST(TestH264Simulcast, TestKeyFrameRequestsOnAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestKeyFrameRequestsOnAllStreams();
+}
+
+TEST(TestH264Simulcast, TestPaddingAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingAllStreams();
+}
+
+TEST(TestH264Simulcast, TestPaddingTwoStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreams();
+}
+
+TEST(TestH264Simulcast, TestPaddingTwoStreamsOneMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreamsOneMaxedOut();
+}
+
+TEST(TestH264Simulcast, TestPaddingOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStream();
+}
+
+TEST(TestH264Simulcast, TestPaddingOneStreamTwoMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStreamTwoMaxedOut();
+}
+
+TEST(TestH264Simulcast, TestSendAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSendAllStreams();
+}
+
+TEST(TestH264Simulcast, TestDisablingStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestDisablingStreams();
+}
+
+TEST(TestH264Simulcast, TestActiveStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestActiveStreams();
+}
+
+TEST(TestH264Simulcast, TestSwitchingToOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneStream();
+}
+
+TEST(TestH264Simulcast, TestSwitchingToOneOddStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneOddStream();
+}
+
+TEST(TestH264Simulcast, TestStrideEncodeDecode) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestStrideEncodeDecode();
+}
+
+TEST(TestH264Simulcast, TestSpatioTemporalLayers333PatternEncoder) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSpatioTemporalLayers333PatternEncoder();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h
new file mode 100644
index 0000000000..2635b53842
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "media/base/codec.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+struct SdpVideoFormat;
+
+// Creates an H264 SdpVideoFormat entry with specified paramters.
+RTC_EXPORT SdpVideoFormat
+CreateH264Format(H264Profile profile,
+ H264Level level,
+ const std::string& packetization_mode,
+ bool add_scalability_modes = false);
+
+// Set to disable the H.264 encoder/decoder implementations that are provided if
+// `rtc_use_h264` build flag is true (if false, this function does nothing).
+// This function should only be called before or during WebRTC initialization
+// and is not thread-safe.
+RTC_EXPORT void DisableRtcUseH264();
+
+// Returns a vector with all supported internal H264 encode profiles that we can
+// negotiate in SDP, in order of preference.
+std::vector<SdpVideoFormat> SupportedH264Codecs(
+ bool add_scalability_modes = false);
+
+// Returns a vector with all supported internal H264 decode profiles that we can
+// negotiate in SDP, in order of preference. This will be available for receive
+// only connections.
+std::vector<SdpVideoFormat> SupportedH264DecoderCodecs();
+
+class RTC_EXPORT H264Encoder : public VideoEncoder {
+ public:
+ static std::unique_ptr<H264Encoder> Create(const cricket::VideoCodec& codec);
+ // If H.264 is supported (any implementation).
+ static bool IsSupported();
+ static bool SupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+ ~H264Encoder() override {}
+};
+
+class RTC_EXPORT H264Decoder : public VideoDecoder {
+ public:
+ static std::unique_ptr<H264Decoder> Create();
+ static bool IsSupported();
+
+ ~H264Decoder() override {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h
new file mode 100644
index 0000000000..b61dc8c507
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
+
+#include <string>
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// The packetization types that we support: single, aggregated, and fragmented.
+enum H264PacketizationTypes {
+ kH264SingleNalu, // This packet contains a single NAL unit.
+ kH264StapA, // This packet contains STAP-A (single time
+ // aggregation) packets. If this packet has an
+ // associated NAL unit type, it'll be for the
+ // first such aggregated packet.
+ kH264FuA, // This packet contains a FU-A (fragmentation
+ // unit) packet, meaning it is a part of a frame
+ // that was too large to fit into a single packet.
+};
+
+// Packetization modes are defined in RFC 6184 section 6
+// Due to the structure containing this being initialized with zeroes
+// in some places, and mode 1 being default, mode 1 needs to have the value
+// zero. https://crbug.com/webrtc/6803
+enum class H264PacketizationMode {
+ NonInterleaved = 0, // Mode 1 - STAP-A, FU-A is allowed
+ SingleNalUnit // Mode 0 - only single NALU allowed
+};
+
+// This function is declared inline because it is not clear which
+// .cc file it should belong to.
+// TODO(hta): Refactor. https://bugs.webrtc.org/6842
+// TODO(jonasolsson): Use absl::string_view instead when that's available.
+inline std::string ToString(H264PacketizationMode mode) {
+ if (mode == H264PacketizationMode::NonInterleaved) {
+ return "NonInterleaved";
+ } else if (mode == H264PacketizationMode::SingleNalUnit) {
+ return "SingleNalUnit";
+ }
+ RTC_DCHECK_NOTREACHED();
+ return "";
+}
+
+struct NaluInfo {
+ uint8_t type;
+ int sps_id;
+ int pps_id;
+};
+
+const size_t kMaxNalusPerPacket = 10;
+
+struct RTPVideoHeaderH264 {
+ // The NAL unit type. If this is a header for a
+ // fragmented packet, it's the NAL unit type of
+ // the original data. If this is the header for an
+ // aggregated packet, it's the NAL unit type of
+ // the first NAL unit in the packet.
+ uint8_t nalu_type;
+ // The packetization type of this buffer - single, aggregated or fragmented.
+ H264PacketizationTypes packetization_type;
+ NaluInfo nalus[kMaxNalusPerPacket];
+ size_t nalus_length;
+ // The packetization mode of this transport. Packetization mode
+ // determines which packetization types are allowed when packetizing.
+ H264PacketizationMode packetization_mode;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
new file mode 100644
index 0000000000..595e627bcc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/video/color_space.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+
+class TestH264Impl : public VideoCodecUnitTest {
+ protected:
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return H264Encoder::Create(cricket::VideoCodec(cricket::kH264CodecName));
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return H264Decoder::Create();
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecH264, codec_settings);
+ }
+};
+
+#ifdef WEBRTC_USE_H264
+#define MAYBE_EncodeDecode EncodeDecode
+#define MAYBE_DecodedQpEqualsEncodedQp DecodedQpEqualsEncodedQp
+#else
+#define MAYBE_EncodeDecode DISABLED_EncodeDecode
+#define MAYBE_DecodedQpEqualsEncodedQp DISABLED_DecodedQpEqualsEncodedQp
+#endif
+
+TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
+ VideoFrame input_frame = NextInputFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+
+ const ColorSpace color_space = *decoded_frame->color_space();
+ EXPECT_EQ(ColorSpace::PrimaryID::kUnspecified, color_space.primaries());
+ EXPECT_EQ(ColorSpace::TransferID::kUnspecified, color_space.transfer());
+ EXPECT_EQ(ColorSpace::MatrixID::kUnspecified, color_space.matrix());
+ EXPECT_EQ(ColorSpace::RangeID::kInvalid, color_space.range());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_horizontal());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_vertical());
+}
+
+TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ ASSERT_TRUE(decoded_qp);
+ EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h b/third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h
new file mode 100644
index 0000000000..a8fc6290b9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains constants that are used by multiple global
+// codec definitions (modules/video_coding/codecs/*/include/*_globals.h)
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+const int16_t kNoPictureId = -1;
+const int16_t kNoTl0PicIdx = -1;
+const uint8_t kNoTemporalIdx = 0xFF;
+const int kNoKeyIdx = -1;
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc
new file mode 100644
index 0000000000..4f33bef2ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+class LibvpxFacade : public LibvpxInterface {
+ public:
+ LibvpxFacade() = default;
+ ~LibvpxFacade() override = default;
+
+ vpx_image_t* img_alloc(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align) const override {
+ return ::vpx_img_alloc(img, fmt, d_w, d_h, align);
+ }
+
+ vpx_image_t* img_wrap(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int stride_align,
+ unsigned char* img_data) const override {
+ return ::vpx_img_wrap(img, fmt, d_w, d_h, stride_align, img_data);
+ }
+
+ void img_free(vpx_image_t* img) const override { ::vpx_img_free(img); }
+
+ vpx_codec_err_t codec_enc_config_set(
+ vpx_codec_ctx_t* ctx,
+ const vpx_codec_enc_cfg_t* cfg) const override {
+ return ::vpx_codec_enc_config_set(ctx, cfg);
+ }
+
+ vpx_codec_err_t codec_enc_config_default(vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ unsigned int usage) const override {
+ return ::vpx_codec_enc_config_default(iface, cfg, usage);
+ }
+
+ vpx_codec_err_t codec_enc_init(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ const vpx_codec_enc_cfg_t* cfg,
+ vpx_codec_flags_t flags) const override {
+ return ::vpx_codec_enc_init(ctx, iface, cfg, flags);
+ }
+
+ vpx_codec_err_t codec_enc_init_multi(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ int num_enc,
+ vpx_codec_flags_t flags,
+ vpx_rational_t* dsf) const override {
+ return ::vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf);
+ }
+
+ vpx_codec_err_t codec_destroy(vpx_codec_ctx_t* ctx) const override {
+ return ::vpx_codec_destroy(ctx);
+ }
+
+ // For types related to these parameters, see section
+ // "VP8 encoder control function parameter type" in vpx/vp8cx.h.
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ uint32_t param) const override {
+ // We need an explicit call for each type since vpx_codec_control is a
+ // macro that gets expanded into another call based on the parameter name.
+ switch (ctrl_id) {
+ case VP8E_SET_ENABLEAUTOALTREF:
+ return vpx_codec_control(ctx, VP8E_SET_ENABLEAUTOALTREF, param);
+ case VP8E_SET_NOISE_SENSITIVITY:
+ return vpx_codec_control(ctx, VP8E_SET_NOISE_SENSITIVITY, param);
+ case VP8E_SET_SHARPNESS:
+ return vpx_codec_control(ctx, VP8E_SET_SHARPNESS, param);
+ case VP8E_SET_STATIC_THRESHOLD:
+ return vpx_codec_control(ctx, VP8E_SET_STATIC_THRESHOLD, param);
+ case VP8E_SET_ARNR_MAXFRAMES:
+ return vpx_codec_control(ctx, VP8E_SET_ARNR_MAXFRAMES, param);
+ case VP8E_SET_ARNR_STRENGTH:
+ return vpx_codec_control(ctx, VP8E_SET_ARNR_STRENGTH, param);
+ case VP8E_SET_CQ_LEVEL:
+ return vpx_codec_control(ctx, VP8E_SET_CQ_LEVEL, param);
+ case VP8E_SET_MAX_INTRA_BITRATE_PCT:
+ return vpx_codec_control(ctx, VP8E_SET_MAX_INTRA_BITRATE_PCT, param);
+ case VP9E_SET_MAX_INTER_BITRATE_PCT:
+ return vpx_codec_control(ctx, VP9E_SET_MAX_INTER_BITRATE_PCT, param);
+ case VP8E_SET_GF_CBR_BOOST_PCT:
+ return vpx_codec_control(ctx, VP8E_SET_GF_CBR_BOOST_PCT, param);
+ case VP8E_SET_SCREEN_CONTENT_MODE:
+ return vpx_codec_control(ctx, VP8E_SET_SCREEN_CONTENT_MODE, param);
+ case VP9E_SET_GF_CBR_BOOST_PCT:
+ return vpx_codec_control(ctx, VP9E_SET_GF_CBR_BOOST_PCT, param);
+ case VP9E_SET_LOSSLESS:
+ return vpx_codec_control(ctx, VP9E_SET_LOSSLESS, param);
+ case VP9E_SET_FRAME_PARALLEL_DECODING:
+ return vpx_codec_control(ctx, VP9E_SET_FRAME_PARALLEL_DECODING, param);
+ case VP9E_SET_AQ_MODE:
+ return vpx_codec_control(ctx, VP9E_SET_AQ_MODE, param);
+ case VP9E_SET_FRAME_PERIODIC_BOOST:
+ return vpx_codec_control(ctx, VP9E_SET_FRAME_PERIODIC_BOOST, param);
+ case VP9E_SET_NOISE_SENSITIVITY:
+ return vpx_codec_control(ctx, VP9E_SET_NOISE_SENSITIVITY, param);
+ case VP9E_SET_MIN_GF_INTERVAL:
+ return vpx_codec_control(ctx, VP9E_SET_MIN_GF_INTERVAL, param);
+ case VP9E_SET_MAX_GF_INTERVAL:
+ return vpx_codec_control(ctx, VP9E_SET_MAX_GF_INTERVAL, param);
+ case VP9E_SET_TARGET_LEVEL:
+ return vpx_codec_control(ctx, VP9E_SET_TARGET_LEVEL, param);
+ case VP9E_SET_ROW_MT:
+ return vpx_codec_control(ctx, VP9E_SET_ROW_MT, param);
+ case VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST:
+ return vpx_codec_control(ctx, VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST,
+ param);
+ case VP9E_SET_SVC_INTER_LAYER_PRED:
+ return vpx_codec_control(ctx, VP9E_SET_SVC_INTER_LAYER_PRED, param);
+ case VP9E_SET_SVC_GF_TEMPORAL_REF:
+ return vpx_codec_control(ctx, VP9E_SET_SVC_GF_TEMPORAL_REF, param);
+ case VP9E_SET_POSTENCODE_DROP:
+ return vpx_codec_control(ctx, VP9E_SET_POSTENCODE_DROP, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_FRAME_FLAGS:
+ return vpx_codec_control(ctx, VP8E_SET_FRAME_FLAGS, param);
+ case VP8E_SET_TEMPORAL_LAYER_ID:
+ return vpx_codec_control(ctx, VP8E_SET_TEMPORAL_LAYER_ID, param);
+ case VP9E_SET_SVC:
+ return vpx_codec_control(ctx, VP9E_SET_SVC, param);
+ case VP8E_SET_CPUUSED:
+ return vpx_codec_control(ctx, VP8E_SET_CPUUSED, param);
+ case VP8E_SET_TOKEN_PARTITIONS:
+ return vpx_codec_control(ctx, VP8E_SET_TOKEN_PARTITIONS, param);
+ case VP8E_SET_TUNING:
+ return vpx_codec_control(ctx, VP8E_SET_TUNING, param);
+ case VP9E_SET_TILE_COLUMNS:
+ return vpx_codec_control(ctx, VP9E_SET_TILE_COLUMNS, param);
+ case VP9E_SET_TILE_ROWS:
+ return vpx_codec_control(ctx, VP9E_SET_TILE_ROWS, param);
+ case VP9E_SET_TPL:
+ return vpx_codec_control(ctx, VP9E_SET_TPL, param);
+ case VP9E_SET_ALT_REF_AQ:
+ return vpx_codec_control(ctx, VP9E_SET_ALT_REF_AQ, param);
+ case VP9E_SET_TUNE_CONTENT:
+ return vpx_codec_control(ctx, VP9E_SET_TUNE_CONTENT, param);
+ case VP9E_SET_COLOR_SPACE:
+ return vpx_codec_control(ctx, VP9E_SET_COLOR_SPACE, param);
+ case VP9E_SET_COLOR_RANGE:
+ return vpx_codec_control(ctx, VP9E_SET_COLOR_RANGE, param);
+ case VP9E_SET_DELTA_Q_UV:
+ return vpx_codec_control(ctx, VP9E_SET_DELTA_Q_UV, param);
+ case VP9E_SET_DISABLE_OVERSHOOT_MAXQ_CBR:
+ return vpx_codec_control(ctx, VP9E_SET_DISABLE_OVERSHOOT_MAXQ_CBR,
+ param);
+ case VP9E_SET_DISABLE_LOOPFILTER:
+ return vpx_codec_control(ctx, VP9E_SET_DISABLE_LOOPFILTER, param);
+
+ default:
+ if (param >= 0) {
+ // Might be intended for uint32_t but int literal used, try fallback.
+ return codec_control(ctx, ctrl_id, static_cast<uint32_t>(param));
+ }
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int* param) const override {
+ switch (ctrl_id) {
+ case VP8E_GET_LAST_QUANTIZER:
+ return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER, param);
+ case VP8E_GET_LAST_QUANTIZER_64:
+ return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER_64, param);
+ case VP9E_SET_RENDER_SIZE:
+ return vpx_codec_control(ctx, VP9E_SET_RENDER_SIZE, param);
+ case VP9E_GET_LEVEL:
+ return vpx_codec_control(ctx, VP9E_GET_LEVEL, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_roi_map* param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_ROI_MAP:
+ return vpx_codec_control(ctx, VP8E_SET_ROI_MAP, param);
+ case VP9E_SET_ROI_MAP:
+ return vpx_codec_control(ctx, VP9E_SET_ROI_MAP, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_active_map* param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_ACTIVEMAP:
+ return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param);
+ case VP9E_GET_ACTIVEMAP:
+ return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_scaling_mode* param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_SCALEMODE:
+ return vpx_codec_control(ctx, VP8E_SET_SCALEMODE, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_extra_cfg_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_PARAMETERS:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_PARAMETERS, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_frame_drop_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_FRAME_DROP_LAYER:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_FRAME_DROP_LAYER, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ void* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_PARAMETERS:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_PARAMETERS, param);
+ case VP9E_REGISTER_CX_CALLBACK:
+ return vpx_codec_control_(ctx, VP9E_REGISTER_CX_CALLBACK, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_layer_id_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_LAYER_ID:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_LAYER_ID, param);
+ case VP9E_GET_SVC_LAYER_ID:
+ return vpx_codec_control_(ctx, VP9E_GET_SVC_LAYER_ID, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_ref_frame_config_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_REF_FRAME_CONFIG:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_REF_FRAME_CONFIG, param);
+ case VP9E_GET_SVC_REF_FRAME_CONFIG:
+ return vpx_codec_control_(ctx, VP9E_GET_SVC_REF_FRAME_CONFIG, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_spatial_layer_sync_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_SPATIAL_LAYER_SYNC:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_SPATIAL_LAYER_SYNC, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_rc_funcs_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_EXTERNAL_RATE_CONTROL:
+ return vpx_codec_control_(ctx, VP9E_SET_EXTERNAL_RATE_CONTROL, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx,
+ const vpx_image_t* img,
+ vpx_codec_pts_t pts,
+ uint64_t duration,
+ vpx_enc_frame_flags_t flags,
+ uint64_t deadline) const override {
+ return ::vpx_codec_encode(ctx, img, pts, duration, flags, deadline);
+ }
+
+ const vpx_codec_cx_pkt_t* codec_get_cx_data(
+ vpx_codec_ctx_t* ctx,
+ vpx_codec_iter_t* iter) const override {
+ return ::vpx_codec_get_cx_data(ctx, iter);
+ }
+
+ const char* codec_error_detail(vpx_codec_ctx_t* ctx) const override {
+ return ::vpx_codec_error_detail(ctx);
+ }
+
+ const char* codec_error(vpx_codec_ctx_t* ctx) const override {
+ return ::vpx_codec_error(ctx);
+ }
+
+ const char* codec_err_to_string(vpx_codec_err_t err) const override {
+ return ::vpx_codec_err_to_string(err);
+ }
+};
+
+} // namespace
+
+std::unique_ptr<LibvpxInterface> LibvpxInterface::Create() {
+ return std::make_unique<LibvpxFacade>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h
new file mode 100644
index 0000000000..3dea24dd6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_image.h"
+
+namespace webrtc {
+
+// This interface is a proxy to the static libvpx functions, so that they
+// can be mocked for testing. Currently supports VP8 encoder functions.
+// TODO(sprang): Extend this to VP8 decoder and VP9 encoder/decoder too.
+class LibvpxInterface {
+ public:
+ LibvpxInterface() = default;
+ virtual ~LibvpxInterface() = default;
+
+ virtual vpx_image_t* img_alloc(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align) const = 0;
+ virtual vpx_image_t* img_wrap(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int stride_align,
+ unsigned char* img_data) const = 0;
+ virtual void img_free(vpx_image_t* img) const = 0;
+
+ virtual vpx_codec_err_t codec_enc_config_set(
+ vpx_codec_ctx_t* ctx,
+ const vpx_codec_enc_cfg_t* cfg) const = 0;
+ virtual vpx_codec_err_t codec_enc_config_default(
+ vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ unsigned int usage) const = 0;
+
+ virtual vpx_codec_err_t codec_enc_init(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ const vpx_codec_enc_cfg_t* cfg,
+ vpx_codec_flags_t flags) const = 0;
+ virtual vpx_codec_err_t codec_enc_init_multi(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ int num_enc,
+ vpx_codec_flags_t flags,
+ vpx_rational_t* dsf) const = 0;
+ virtual vpx_codec_err_t codec_destroy(vpx_codec_ctx_t* ctx) const = 0;
+
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ uint32_t param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_roi_map* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_active_map* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_scaling_mode* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_extra_cfg_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_frame_drop_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ void* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_layer_id_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_ref_frame_config_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_spatial_layer_sync_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_rc_funcs_t* param) const = 0;
+ virtual vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx,
+ const vpx_image_t* img,
+ vpx_codec_pts_t pts,
+ uint64_t duration,
+ vpx_enc_frame_flags_t flags,
+ uint64_t deadline) const = 0;
+
+ virtual const vpx_codec_cx_pkt_t* codec_get_cx_data(
+ vpx_codec_ctx_t* ctx,
+ vpx_codec_iter_t* iter) const = 0;
+
+ virtual const char* codec_error_detail(vpx_codec_ctx_t* ctx) const = 0;
+ virtual const char* codec_error(vpx_codec_ctx_t* ctx) const = 0;
+ virtual const char* codec_err_to_string(vpx_codec_err_t err) const = 0;
+
+ // Returns interface wrapping the actual libvpx functions.
+ static std::unique_ptr<LibvpxInterface> Create();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h b/third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h
new file mode 100644
index 0000000000..6dfe733dd0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_
+
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class MockLibvpxInterface : public LibvpxInterface {
+ public:
+ MOCK_METHOD(
+ vpx_image_t*,
+ img_alloc,
+ (vpx_image_t*, vpx_img_fmt_t, unsigned int, unsigned int, unsigned int),
+ (const, override));
+ MOCK_METHOD(vpx_image_t*,
+ img_wrap,
+ (vpx_image_t*,
+ vpx_img_fmt_t,
+ unsigned int,
+ unsigned int,
+ unsigned int,
+ unsigned char*),
+ (const, override));
+ MOCK_METHOD(void, img_free, (vpx_image_t * img), (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_config_set,
+ (vpx_codec_ctx_t*, const vpx_codec_enc_cfg_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_config_default,
+ (vpx_codec_iface_t*, vpx_codec_enc_cfg_t*, unsigned int),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_init,
+ (vpx_codec_ctx_t*,
+ vpx_codec_iface_t*,
+ const vpx_codec_enc_cfg_t*,
+ vpx_codec_flags_t),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_init_multi,
+ (vpx_codec_ctx_t*,
+ vpx_codec_iface_t*,
+ vpx_codec_enc_cfg_t*,
+ int,
+ vpx_codec_flags_t,
+ vpx_rational_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_destroy,
+ (vpx_codec_ctx_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, uint32_t),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, int),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, int*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_roi_map*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_active_map*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_scaling_mode*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_extra_cfg_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_frame_drop_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, void*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_layer_id_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*,
+ vp8e_enc_control_id,
+ vpx_svc_ref_frame_config_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*,
+ vp8e_enc_control_id,
+ vpx_svc_spatial_layer_sync_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_rc_funcs_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_encode,
+ (vpx_codec_ctx_t*,
+ const vpx_image_t*,
+ vpx_codec_pts_t,
+ uint64_t,
+ vpx_enc_frame_flags_t,
+ uint64_t),
+ (const, override));
+ MOCK_METHOD(const vpx_codec_cx_pkt_t*,
+ codec_get_cx_data,
+ (vpx_codec_ctx_t*, vpx_codec_iter_t*),
+ (const, override));
+ MOCK_METHOD(const char*,
+ codec_error_detail,
+ (vpx_codec_ctx_t*),
+ (const, override));
+ MOCK_METHOD(const char*, codec_error, (vpx_codec_ctx_t*), (const, override));
+ MOCK_METHOD(const char*,
+ codec_err_to_string,
+ (vpx_codec_err_t),
+ (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc
new file mode 100644
index 0000000000..8740884f5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "api/video/video_frame_buffer.h"
+
+namespace webrtc {
+
+AugmentedVideoFrameBuffer::AugmentedVideoFrameBuffer(
+ const rtc::scoped_refptr<VideoFrameBuffer>& video_frame_buffer,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size)
+ : augmenting_data_size_(augmenting_data_size),
+ augmenting_data_(std::move(augmenting_data)),
+ video_frame_buffer_(video_frame_buffer) {}
+
+rtc::scoped_refptr<VideoFrameBuffer>
+AugmentedVideoFrameBuffer::GetVideoFrameBuffer() const {
+ return video_frame_buffer_;
+}
+
+uint8_t* AugmentedVideoFrameBuffer::GetAugmentingData() const {
+ return augmenting_data_.get();
+}
+
+uint16_t AugmentedVideoFrameBuffer::GetAugmentingDataSize() const {
+ return augmenting_data_size_;
+}
+
+VideoFrameBuffer::Type AugmentedVideoFrameBuffer::type() const {
+ return video_frame_buffer_->type();
+}
+
+int AugmentedVideoFrameBuffer::width() const {
+ return video_frame_buffer_->width();
+}
+
+int AugmentedVideoFrameBuffer::height() const {
+ return video_frame_buffer_->height();
+}
+
+rtc::scoped_refptr<I420BufferInterface> AugmentedVideoFrameBuffer::ToI420() {
+ return video_frame_buffer_->ToI420();
+}
+
+const I420BufferInterface* AugmentedVideoFrameBuffer::GetI420() const {
+ // TODO(https://crbug.com/webrtc/12021): When AugmentedVideoFrameBuffer is
+ // updated to implement the buffer interfaces of relevant
+ // VideoFrameBuffer::Types, stop overriding GetI420() as a workaround to
+ // AugmentedVideoFrameBuffer not being the type that is returned by type().
+ return video_frame_buffer_->GetI420();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h
new file mode 100644
index 0000000000..d711cd07da
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_frame_buffer.h"
+
+namespace webrtc {
+class AugmentedVideoFrameBuffer : public VideoFrameBuffer {
+ public:
+ AugmentedVideoFrameBuffer(
+ const rtc::scoped_refptr<VideoFrameBuffer>& video_frame_buffer,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size);
+
+ // Retrieves the underlying VideoFrameBuffer without the augmented data
+ rtc::scoped_refptr<VideoFrameBuffer> GetVideoFrameBuffer() const;
+
+ // Gets a pointer to the augmenting data and moves ownership to the caller
+ uint8_t* GetAugmentingData() const;
+
+ // Get the size of the augmenting data
+ uint16_t GetAugmentingDataSize() const;
+
+ // Returns the type of the underlying VideoFrameBuffer
+ Type type() const final;
+
+ // Returns the width of the underlying VideoFrameBuffer
+ int width() const final;
+
+ // Returns the height of the underlying VideoFrameBuffer
+ int height() const final;
+
+ // Get the I140 Buffer from the underlying frame buffer
+ rtc::scoped_refptr<I420BufferInterface> ToI420() final;
+ // Returns GetI420() of the underlying VideoFrameBuffer.
+ // TODO(hbos): AugmentedVideoFrameBuffer should not return a type (such as
+ // kI420) without also implementing that type's interface (i.e.
+ // I420BufferInterface). Either implement all possible Type's interfaces or
+ // return kNative.
+ const I420BufferInterface* GetI420() const final;
+
+ private:
+ uint16_t augmenting_data_size_;
+ std::unique_ptr<uint8_t[]> augmenting_data_;
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
new file mode 100644
index 0000000000..e73f7d0e9f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+
+namespace webrtc {
+
+class MultiplexDecoderAdapter : public VideoDecoder {
+ public:
+ // `factory` is not owned and expected to outlive this class.
+ MultiplexDecoderAdapter(VideoDecoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmenting_data = false);
+ virtual ~MultiplexDecoderAdapter();
+
+ // Implements VideoDecoder
+ bool Configure(const Settings& settings) override;
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override;
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+ int32_t Release() override;
+
+ void Decoded(AlphaCodecStream stream_idx,
+ VideoFrame* decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp);
+
+ private:
+ // Wrapper class that redirects Decoded() calls.
+ class AdapterDecodedImageCallback;
+
+ // Holds the decoded image output of a frame.
+ struct DecodedImageData;
+
+ // Holds the augmenting data of an image
+ struct AugmentingData;
+
+ void MergeAlphaImages(VideoFrame* decoded_image,
+ const absl::optional<int32_t>& decode_time_ms,
+ const absl::optional<uint8_t>& qp,
+ VideoFrame* multiplex_decoded_image,
+ const absl::optional<int32_t>& multiplex_decode_time_ms,
+ const absl::optional<uint8_t>& multiplex_qp,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_length);
+
+ VideoDecoderFactory* const factory_;
+ const SdpVideoFormat associated_format_;
+ std::vector<std::unique_ptr<VideoDecoder>> decoders_;
+ std::vector<std::unique_ptr<AdapterDecodedImageCallback>> adapter_callbacks_;
+ DecodedImageCallback* decoded_complete_callback_;
+
+ // Holds YUV or AXX decode output of a frame that is identified by timestamp.
+ std::map<uint32_t /* timestamp */, DecodedImageData> decoded_data_;
+ std::map<uint32_t /* timestamp */, AugmentingData> decoded_augmenting_data_;
+ const bool supports_augmenting_data_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h
new file mode 100644
index 0000000000..2e5aad8a5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+enum AlphaCodecStream {
+ kYUVStream = 0,
+ kAXXStream = 1,
+ kAlphaCodecStreams = 2,
+};
+
+class MultiplexEncoderAdapter : public VideoEncoder {
+ public:
+ // `factory` is not owned and expected to outlive this class.
+ MultiplexEncoderAdapter(VideoEncoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmenting_data = false);
+ virtual ~MultiplexEncoderAdapter();
+
+ // Implements VideoEncoder
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+ int InitEncode(const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) override;
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+ void SetRates(const RateControlParameters& parameters) override;
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+ void OnRttUpdate(int64_t rtt_ms) override;
+ void OnLossNotification(const LossNotification& loss_notification) override;
+ int Release() override;
+ EncoderInfo GetEncoderInfo() const override;
+
+ EncodedImageCallback::Result OnEncodedImage(
+ AlphaCodecStream stream_idx,
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo);
+
+ private:
+ // Wrapper class that redirects OnEncodedImage() calls.
+ class AdapterEncodedImageCallback;
+
+ VideoEncoderFactory* const factory_;
+ const SdpVideoFormat associated_format_;
+ std::vector<std::unique_ptr<VideoEncoder>> encoders_;
+ std::vector<std::unique_ptr<AdapterEncodedImageCallback>> adapter_callbacks_;
+ EncodedImageCallback* encoded_complete_callback_;
+
+ std::map<uint32_t /* timestamp */, MultiplexImage> stashed_images_
+ RTC_GUARDED_BY(mutex_);
+
+ uint16_t picture_index_ = 0;
+ std::vector<uint8_t> multiplex_dummy_planes_;
+
+ int key_frame_interval_;
+ EncodedImage combined_image_;
+
+ Mutex mutex_;
+
+ const bool supports_augmented_data_;
+ int augmenting_data_size_ = 0;
+
+ EncoderInfo encoder_info_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
new file mode 100644
index 0000000000..0ad3d3883a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+class MultiplexDecoderAdapter::AdapterDecodedImageCallback
+ : public webrtc::DecodedImageCallback {
+ public:
+ AdapterDecodedImageCallback(webrtc::MultiplexDecoderAdapter* adapter,
+ AlphaCodecStream stream_idx)
+ : adapter_(adapter), stream_idx_(stream_idx) {}
+
+ void Decoded(VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ if (!adapter_)
+ return;
+ adapter_->Decoded(stream_idx_, &decoded_image, decode_time_ms, qp);
+ }
+ int32_t Decoded(VideoFrame& decoded_image) override {
+ RTC_DCHECK_NOTREACHED();
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ private:
+ MultiplexDecoderAdapter* adapter_;
+ const AlphaCodecStream stream_idx_;
+};
+
+struct MultiplexDecoderAdapter::DecodedImageData {
+ explicit DecodedImageData(AlphaCodecStream stream_idx)
+ : stream_idx_(stream_idx),
+ decoded_image_(
+ VideoFrame::Builder()
+ .set_video_frame_buffer(
+ I420Buffer::Create(1 /* width */, 1 /* height */))
+ .set_timestamp_rtp(0)
+ .set_timestamp_us(0)
+ .set_rotation(kVideoRotation_0)
+ .build()) {
+ RTC_DCHECK_EQ(kAXXStream, stream_idx);
+ }
+ DecodedImageData(AlphaCodecStream stream_idx,
+ const VideoFrame& decoded_image,
+ const absl::optional<int32_t>& decode_time_ms,
+ const absl::optional<uint8_t>& qp)
+ : stream_idx_(stream_idx),
+ decoded_image_(decoded_image),
+ decode_time_ms_(decode_time_ms),
+ qp_(qp) {}
+
+ DecodedImageData() = delete;
+ DecodedImageData(const DecodedImageData&) = delete;
+ DecodedImageData& operator=(const DecodedImageData&) = delete;
+
+ const AlphaCodecStream stream_idx_;
+ VideoFrame decoded_image_;
+ const absl::optional<int32_t> decode_time_ms_;
+ const absl::optional<uint8_t> qp_;
+};
+
+struct MultiplexDecoderAdapter::AugmentingData {
+ AugmentingData(std::unique_ptr<uint8_t[]> augmenting_data, uint16_t data_size)
+ : data_(std::move(augmenting_data)), size_(data_size) {}
+ AugmentingData() = delete;
+ AugmentingData(const AugmentingData&) = delete;
+ AugmentingData& operator=(const AugmentingData&) = delete;
+
+ std::unique_ptr<uint8_t[]> data_;
+ const uint16_t size_;
+};
+
+MultiplexDecoderAdapter::MultiplexDecoderAdapter(
+ VideoDecoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmenting_data)
+ : factory_(factory),
+ associated_format_(associated_format),
+ supports_augmenting_data_(supports_augmenting_data) {}
+
+MultiplexDecoderAdapter::~MultiplexDecoderAdapter() {
+ Release();
+}
+
+bool MultiplexDecoderAdapter::Configure(const Settings& settings) {
+ RTC_DCHECK_EQ(settings.codec_type(), kVideoCodecMultiplex);
+ Settings associated_settings = settings;
+ associated_settings.set_codec_type(
+ PayloadStringToCodecType(associated_format_.name));
+ for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ factory_->CreateVideoDecoder(associated_format_);
+ if (!decoder->Configure(associated_settings)) {
+ return false;
+ }
+ adapter_callbacks_.emplace_back(
+ new MultiplexDecoderAdapter::AdapterDecodedImageCallback(
+ this, static_cast<AlphaCodecStream>(i)));
+ decoder->RegisterDecodeCompleteCallback(adapter_callbacks_.back().get());
+ decoders_.emplace_back(std::move(decoder));
+ }
+ return true;
+}
+
+int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) {
+ MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
+
+ if (supports_augmenting_data_) {
+ RTC_DCHECK(decoded_augmenting_data_.find(input_image.Timestamp()) ==
+ decoded_augmenting_data_.end());
+ decoded_augmenting_data_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(input_image.Timestamp()),
+ std::forward_as_tuple(std::move(image.augmenting_data),
+ image.augmenting_data_size));
+ }
+
+ if (image.component_count == 1) {
+ RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) ==
+ decoded_data_.end());
+ decoded_data_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(input_image.Timestamp()),
+ std::forward_as_tuple(kAXXStream));
+ }
+ int32_t rv = 0;
+ for (size_t i = 0; i < image.image_components.size(); i++) {
+ rv = decoders_[image.image_components[i].component_index]->Decode(
+ image.image_components[i].encoded_image, missing_frames,
+ render_time_ms);
+ if (rv != WEBRTC_VIDEO_CODEC_OK)
+ return rv;
+ }
+ return rv;
+}
+
+int32_t MultiplexDecoderAdapter::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t MultiplexDecoderAdapter::Release() {
+ for (auto& decoder : decoders_) {
+ const int32_t rv = decoder->Release();
+ if (rv)
+ return rv;
+ }
+ decoders_.clear();
+ adapter_callbacks_.clear();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void MultiplexDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
+ VideoFrame* decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ const auto& other_decoded_data_it =
+ decoded_data_.find(decoded_image->timestamp());
+ const auto& augmenting_data_it =
+ decoded_augmenting_data_.find(decoded_image->timestamp());
+ const bool has_augmenting_data =
+ augmenting_data_it != decoded_augmenting_data_.end();
+ if (other_decoded_data_it != decoded_data_.end()) {
+ uint16_t augmenting_data_size =
+ has_augmenting_data ? augmenting_data_it->second.size_ : 0;
+ std::unique_ptr<uint8_t[]> augmenting_data =
+ has_augmenting_data ? std::move(augmenting_data_it->second.data_)
+ : nullptr;
+ auto& other_image_data = other_decoded_data_it->second;
+ if (stream_idx == kYUVStream) {
+ RTC_DCHECK_EQ(kAXXStream, other_image_data.stream_idx_);
+ MergeAlphaImages(decoded_image, decode_time_ms, qp,
+ &other_image_data.decoded_image_,
+ other_image_data.decode_time_ms_, other_image_data.qp_,
+ std::move(augmenting_data), augmenting_data_size);
+ } else {
+ RTC_DCHECK_EQ(kYUVStream, other_image_data.stream_idx_);
+ RTC_DCHECK_EQ(kAXXStream, stream_idx);
+ MergeAlphaImages(&other_image_data.decoded_image_,
+ other_image_data.decode_time_ms_, other_image_data.qp_,
+ decoded_image, decode_time_ms, qp,
+ std::move(augmenting_data), augmenting_data_size);
+ }
+ decoded_data_.erase(decoded_data_.begin(), other_decoded_data_it);
+ if (has_augmenting_data) {
+ decoded_augmenting_data_.erase(decoded_augmenting_data_.begin(),
+ augmenting_data_it);
+ }
+ return;
+ }
+ RTC_DCHECK(decoded_data_.find(decoded_image->timestamp()) ==
+ decoded_data_.end());
+ decoded_data_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(decoded_image->timestamp()),
+ std::forward_as_tuple(stream_idx, *decoded_image, decode_time_ms, qp));
+}
+
+void MultiplexDecoderAdapter::MergeAlphaImages(
+ VideoFrame* decoded_image,
+ const absl::optional<int32_t>& decode_time_ms,
+ const absl::optional<uint8_t>& qp,
+ VideoFrame* alpha_decoded_image,
+ const absl::optional<int32_t>& alpha_decode_time_ms,
+ const absl::optional<uint8_t>& alpha_qp,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_length) {
+ rtc::scoped_refptr<VideoFrameBuffer> merged_buffer;
+ if (!alpha_decoded_image->timestamp()) {
+ merged_buffer = decoded_image->video_frame_buffer();
+ } else {
+ rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
+ decoded_image->video_frame_buffer()->ToI420();
+ rtc::scoped_refptr<webrtc::I420BufferInterface> alpha_buffer =
+ alpha_decoded_image->video_frame_buffer()->ToI420();
+ RTC_DCHECK_EQ(yuv_buffer->width(), alpha_buffer->width());
+ RTC_DCHECK_EQ(yuv_buffer->height(), alpha_buffer->height());
+ merged_buffer = WrapI420ABuffer(
+ yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
+ yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
+ yuv_buffer->DataV(), yuv_buffer->StrideV(), alpha_buffer->DataY(),
+ alpha_buffer->StrideY(),
+ // To keep references alive.
+ [yuv_buffer, alpha_buffer] {});
+ }
+ if (supports_augmenting_data_) {
+ merged_buffer = rtc::make_ref_counted<AugmentedVideoFrameBuffer>(
+ merged_buffer, std::move(augmenting_data), augmenting_data_length);
+ }
+
+ VideoFrame merged_image = VideoFrame::Builder()
+ .set_video_frame_buffer(merged_buffer)
+ .set_timestamp_rtp(decoded_image->timestamp())
+ .set_timestamp_us(0)
+ .set_rotation(decoded_image->rotation())
+ .set_id(decoded_image->id())
+ .set_packet_infos(decoded_image->packet_infos())
+ .build();
+ decoded_complete_callback_->Decoded(merged_image, decode_time_ms, qp);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
new file mode 100644
index 0000000000..0f05d1a89c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+
+#include <cstring>
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+int PackHeader(uint8_t* buffer, MultiplexImageHeader header) {
+ int offset = 0;
+ ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, header.component_count);
+ offset += sizeof(uint8_t);
+
+ ByteWriter<uint16_t>::WriteBigEndian(buffer + offset, header.image_index);
+ offset += sizeof(uint16_t);
+
+ ByteWriter<uint16_t>::WriteBigEndian(buffer + offset,
+ header.augmenting_data_size);
+ offset += sizeof(uint16_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ header.augmenting_data_offset);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ header.first_component_header_offset);
+ offset += sizeof(uint32_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
+ return offset;
+}
+
+MultiplexImageHeader UnpackHeader(const uint8_t* buffer) {
+ MultiplexImageHeader header;
+ int offset = 0;
+ header.component_count = ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint8_t);
+
+ header.image_index = ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint16_t);
+
+ header.augmenting_data_size =
+ ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint16_t);
+
+ header.augmenting_data_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ header.first_component_header_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
+ return header;
+}
+
+int PackFrameHeader(uint8_t* buffer,
+ MultiplexImageComponentHeader frame_header) {
+ int offset = 0;
+ ByteWriter<uint32_t>::WriteBigEndian(
+ buffer + offset, frame_header.next_component_header_offset);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint8_t>::WriteBigEndian(buffer + offset,
+ frame_header.component_index);
+ offset += sizeof(uint8_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ frame_header.bitstream_offset);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ frame_header.bitstream_length);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
+ offset += sizeof(uint8_t);
+
+ ByteWriter<uint8_t>::WriteBigEndian(
+ buffer + offset, static_cast<uint8_t>(frame_header.frame_type));
+ offset += sizeof(uint8_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
+ return offset;
+}
+
+MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
+ MultiplexImageComponentHeader frame_header;
+ int offset = 0;
+
+ frame_header.next_component_header_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ frame_header.component_index =
+ ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint8_t);
+
+ frame_header.bitstream_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ frame_header.bitstream_length =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ // This makes the wire format depend on the numeric values of the
+ // VideoCodecType and VideoFrameType enum constants.
+ frame_header.codec_type = static_cast<VideoCodecType>(
+ ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
+ offset += sizeof(uint8_t);
+
+ frame_header.frame_type = static_cast<VideoFrameType>(
+ ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
+ offset += sizeof(uint8_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
+ return frame_header;
+}
+
+void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
+ memcpy(buffer, image.encoded_image.data(), image.encoded_image.size());
+}
+
+MultiplexImage::MultiplexImage(uint16_t picture_index,
+ uint8_t frame_count,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size)
+ : image_index(picture_index),
+ component_count(frame_count),
+ augmenting_data_size(augmenting_data_size),
+ augmenting_data(std::move(augmenting_data)) {}
+
+EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
+ const MultiplexImage& multiplex_image) {
+ MultiplexImageHeader header;
+ std::vector<MultiplexImageComponentHeader> frame_headers;
+
+ header.component_count = multiplex_image.component_count;
+ header.image_index = multiplex_image.image_index;
+ int header_offset = kMultiplexImageHeaderSize;
+ header.first_component_header_offset = header_offset;
+ header.augmenting_data_offset =
+ header_offset +
+ kMultiplexImageComponentHeaderSize * header.component_count;
+ header.augmenting_data_size = multiplex_image.augmenting_data_size;
+ int bitstream_offset =
+ header.augmenting_data_offset + header.augmenting_data_size;
+
+ const std::vector<MultiplexImageComponent>& images =
+ multiplex_image.image_components;
+ EncodedImage combined_image = images[0].encoded_image;
+ for (size_t i = 0; i < images.size(); i++) {
+ MultiplexImageComponentHeader frame_header;
+ header_offset += kMultiplexImageComponentHeaderSize;
+ frame_header.next_component_header_offset =
+ (i == images.size() - 1) ? 0 : header_offset;
+ frame_header.component_index = images[i].component_index;
+
+ frame_header.bitstream_offset = bitstream_offset;
+ frame_header.bitstream_length =
+ static_cast<uint32_t>(images[i].encoded_image.size());
+ bitstream_offset += frame_header.bitstream_length;
+
+ frame_header.codec_type = images[i].codec_type;
+ frame_header.frame_type = images[i].encoded_image._frameType;
+
+ // As long as one component is delta frame, we have to mark the combined
+ // frame as delta frame, because it is necessary for all components to be
+ // key frame so as to decode the whole image without previous frame data.
+ // Thus only when all components are key frames, we can mark the combined
+ // frame as key frame.
+ if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) {
+ combined_image._frameType = VideoFrameType::kVideoFrameDelta;
+ }
+
+ frame_headers.push_back(frame_header);
+ }
+
+ auto buffer = EncodedImageBuffer::Create(bitstream_offset);
+ combined_image.SetEncodedData(buffer);
+
+ // header
+ header_offset = PackHeader(buffer->data(), header);
+ RTC_DCHECK_EQ(header.first_component_header_offset,
+ kMultiplexImageHeaderSize);
+
+ // Frame Header
+ for (size_t i = 0; i < images.size(); i++) {
+ int relative_offset =
+ PackFrameHeader(buffer->data() + header_offset, frame_headers[i]);
+ RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
+
+ header_offset = frame_headers[i].next_component_header_offset;
+ RTC_DCHECK_EQ(header_offset,
+ (i == images.size() - 1)
+ ? 0
+ : (kMultiplexImageHeaderSize +
+ kMultiplexImageComponentHeaderSize * (i + 1)));
+ }
+
+ // Augmenting Data
+ if (multiplex_image.augmenting_data_size != 0) {
+ memcpy(buffer->data() + header.augmenting_data_offset,
+ multiplex_image.augmenting_data.get(),
+ multiplex_image.augmenting_data_size);
+ }
+
+ // Bitstreams
+ for (size_t i = 0; i < images.size(); i++) {
+ PackBitstream(buffer->data() + frame_headers[i].bitstream_offset,
+ images[i]);
+ }
+
+ return combined_image;
+}
+
+MultiplexImage MultiplexEncodedImagePacker::Unpack(
+ const EncodedImage& combined_image) {
+ const MultiplexImageHeader& header = UnpackHeader(combined_image.data());
+
+ std::vector<MultiplexImageComponentHeader> frame_headers;
+ int header_offset = header.first_component_header_offset;
+
+ while (header_offset > 0) {
+ frame_headers.push_back(
+ UnpackFrameHeader(combined_image.data() + header_offset));
+ header_offset = frame_headers.back().next_component_header_offset;
+ }
+
+ RTC_DCHECK_LE(frame_headers.size(), header.component_count);
+ std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
+ if (header.augmenting_data_size != 0) {
+ augmenting_data =
+ std::unique_ptr<uint8_t[]>(new uint8_t[header.augmenting_data_size]);
+ memcpy(augmenting_data.get(),
+ combined_image.data() + header.augmenting_data_offset,
+ header.augmenting_data_size);
+ }
+
+ MultiplexImage multiplex_image(header.image_index, header.component_count,
+ std::move(augmenting_data),
+ header.augmenting_data_size);
+
+ for (size_t i = 0; i < frame_headers.size(); i++) {
+ MultiplexImageComponent image_component;
+ image_component.component_index = frame_headers[i].component_index;
+ image_component.codec_type = frame_headers[i].codec_type;
+
+ EncodedImage encoded_image = combined_image;
+ encoded_image.SetTimestamp(combined_image.Timestamp());
+ encoded_image._frameType = frame_headers[i].frame_type;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ combined_image.data() + frame_headers[i].bitstream_offset,
+ frame_headers[i].bitstream_length));
+
+ image_component.encoded_image = encoded_image;
+
+ multiplex_image.image_components.push_back(image_component);
+ }
+
+ return multiplex_image;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h
new file mode 100644
index 0000000000..299a0159d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+// Struct describing the whole bundle of multiple frames of an image.
+// This struct is expected to be the set in the beginning of a picture's
+// bitstream data.
+struct MultiplexImageHeader {
+ // The number of frame components making up the complete picture data.
+ // For example, `frame_count` = 2 for the case of YUV frame with Alpha frame.
+ uint8_t component_count;
+
+ // The increasing image ID given by the encoder. For different components
+ // of a single picture, they have the same `picture_index`.
+ uint16_t image_index;
+
+ // The location of the first MultiplexImageComponentHeader in the bitstream,
+ // in terms of byte from the beginning of the bitstream.
+ uint32_t first_component_header_offset;
+
+ // The location of the augmenting data in the bitstream, in terms of bytes
+ // from the beginning of the bitstream
+ uint32_t augmenting_data_offset;
+
+ // The size of the augmenting data in the bitstream it terms of byte
+ uint16_t augmenting_data_size;
+};
+const int kMultiplexImageHeaderSize =
+ sizeof(uint8_t) + 2 * sizeof(uint16_t) + 2 * sizeof(uint32_t);
+
+// Struct describing the individual image component's content.
+struct MultiplexImageComponentHeader {
+ // The location of the next MultiplexImageComponentHeader in the bitstream,
+ // in terms of the byte from the beginning of the bitstream;
+ uint32_t next_component_header_offset;
+
+ // Identifies which component this frame represent, i.e. YUV frame vs Alpha
+ // frame.
+ uint8_t component_index;
+
+ // The location of the real encoded image data of the frame in the bitstream,
+ // in terms of byte from the beginning of the bitstream.
+ uint32_t bitstream_offset;
+
+ // Indicates the number of bytes of the encoded image data.
+ uint32_t bitstream_length;
+
+ // Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
+ VideoCodecType codec_type;
+
+ // Indicated the underlying frame is a key frame or delta frame.
+ VideoFrameType frame_type;
+};
+const int kMultiplexImageComponentHeaderSize =
+ sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
+ sizeof(uint8_t) + sizeof(uint8_t);
+
+// Struct holding the encoded image for one component.
+struct MultiplexImageComponent {
+ // Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
+ VideoCodecType codec_type;
+
+ // Identifies which component this frame represent, i.e. YUV frame vs Alpha
+ // frame.
+ uint8_t component_index;
+
+ // Stores the actual frame data of the encoded image.
+ EncodedImage encoded_image;
+};
+
+// Struct holding the whole frame bundle of components of an image.
+struct MultiplexImage {
+ uint16_t image_index;
+ uint8_t component_count;
+ uint16_t augmenting_data_size;
+ std::unique_ptr<uint8_t[]> augmenting_data;
+ std::vector<MultiplexImageComponent> image_components;
+
+ MultiplexImage(uint16_t picture_index,
+ uint8_t component_count,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size);
+};
+
+// A utility class providing conversion between two representations of a
+// multiplex image frame:
+// 1. Packed version is just one encoded image, we pack all necessary metadata
+// in the bitstream as headers.
+// 2. Unpacked version is essentially a list of encoded images, one for one
+// component.
+class MultiplexEncodedImagePacker {
+ public:
+ // Note: It is caller responsibility to release the buffer of the result.
+ static EncodedImage PackAndRelease(const MultiplexImage& image);
+
+ // Note: The image components just share the memory with `combined_image`.
+ static MultiplexImage Unpack(const EncodedImage& combined_image);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
new file mode 100644
index 0000000000..80744e2d8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+
+#include <cstring>
+
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/video_common.h"
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// Callback wrapper that helps distinguish returned results from `encoders_`
+// instances.
+class MultiplexEncoderAdapter::AdapterEncodedImageCallback
+ : public webrtc::EncodedImageCallback {
+ public:
+ AdapterEncodedImageCallback(webrtc::MultiplexEncoderAdapter* adapter,
+ AlphaCodecStream stream_idx)
+ : adapter_(adapter), stream_idx_(stream_idx) {}
+
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ if (!adapter_)
+ return Result(Result::OK);
+ return adapter_->OnEncodedImage(stream_idx_, encoded_image,
+ codec_specific_info);
+ }
+
+ private:
+ MultiplexEncoderAdapter* adapter_;
+ const AlphaCodecStream stream_idx_;
+};
+
+MultiplexEncoderAdapter::MultiplexEncoderAdapter(
+ VideoEncoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmented_data)
+ : factory_(factory),
+ associated_format_(associated_format),
+ encoded_complete_callback_(nullptr),
+ key_frame_interval_(0),
+ supports_augmented_data_(supports_augmented_data) {}
+
+MultiplexEncoderAdapter::~MultiplexEncoderAdapter() {
+ Release();
+}
+
+void MultiplexEncoderAdapter::SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) {
+ // Ignored.
+}
+
+int MultiplexEncoderAdapter::InitEncode(
+ const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) {
+ const size_t buffer_size =
+ CalcBufferSize(VideoType::kI420, inst->width, inst->height);
+ multiplex_dummy_planes_.resize(buffer_size);
+ // It is more expensive to encode 0x00, so use 0x80 instead.
+ std::fill(multiplex_dummy_planes_.begin(), multiplex_dummy_planes_.end(),
+ 0x80);
+
+ RTC_DCHECK_EQ(kVideoCodecMultiplex, inst->codecType);
+ VideoCodec video_codec = *inst;
+ video_codec.codecType = PayloadStringToCodecType(associated_format_.name);
+
+ // Take over the key frame interval at adapter level, because we have to
+ // sync the key frames for both sub-encoders.
+ switch (video_codec.codecType) {
+ case kVideoCodecVP8:
+ key_frame_interval_ = video_codec.VP8()->keyFrameInterval;
+ video_codec.VP8()->keyFrameInterval = 0;
+ break;
+ case kVideoCodecVP9:
+ key_frame_interval_ = video_codec.VP9()->keyFrameInterval;
+ video_codec.VP9()->keyFrameInterval = 0;
+ break;
+ case kVideoCodecH264:
+ key_frame_interval_ = video_codec.H264()->keyFrameInterval;
+ video_codec.H264()->keyFrameInterval = 0;
+ break;
+ default:
+ break;
+ }
+
+ encoder_info_ = EncoderInfo();
+ encoder_info_.implementation_name = "MultiplexEncoderAdapter (";
+ encoder_info_.requested_resolution_alignment = 1;
+ encoder_info_.apply_alignment_to_all_simulcast_layers = false;
+ // This needs to be false so that we can do the split in Encode().
+ encoder_info_.supports_native_handle = false;
+
+ for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
+ std::unique_ptr<VideoEncoder> encoder =
+ factory_->CreateVideoEncoder(associated_format_);
+ const int rv = encoder->InitEncode(&video_codec, settings);
+ if (rv) {
+ RTC_LOG(LS_ERROR) << "Failed to create multiplex codec index " << i;
+ return rv;
+ }
+ adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
+ this, static_cast<AlphaCodecStream>(i)));
+ encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get());
+
+ const EncoderInfo& encoder_impl_info = encoder->GetEncoderInfo();
+ encoder_info_.implementation_name += encoder_impl_info.implementation_name;
+ if (i != kAlphaCodecStreams - 1) {
+ encoder_info_.implementation_name += ", ";
+ }
+ // Uses hardware support if any of the encoders uses it.
+ // For example, if we are having issues with down-scaling due to
+ // pipelining delay in HW encoders we need higher encoder usage
+ // thresholds in CPU adaptation.
+ if (i == 0) {
+ encoder_info_.is_hardware_accelerated =
+ encoder_impl_info.is_hardware_accelerated;
+ } else {
+ encoder_info_.is_hardware_accelerated |=
+ encoder_impl_info.is_hardware_accelerated;
+ }
+
+ encoder_info_.requested_resolution_alignment = cricket::LeastCommonMultiple(
+ encoder_info_.requested_resolution_alignment,
+ encoder_impl_info.requested_resolution_alignment);
+
+ if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) {
+ encoder_info_.apply_alignment_to_all_simulcast_layers = true;
+ }
+
+ encoders_.emplace_back(std::move(encoder));
+ }
+ encoder_info_.implementation_name += ")";
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int MultiplexEncoderAdapter::Encode(
+ const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (!encoded_complete_callback_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // The input image is forwarded as-is, unless it is a native buffer and
+ // `supports_augmented_data_` is true in which case we need to map it in order
+ // to access the underlying AugmentedVideoFrameBuffer.
+ VideoFrame forwarded_image = input_image;
+ if (supports_augmented_data_ &&
+ forwarded_image.video_frame_buffer()->type() ==
+ VideoFrameBuffer::Type::kNative) {
+ auto info = GetEncoderInfo();
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer =
+ forwarded_image.video_frame_buffer()->GetMappedFrameBuffer(
+ info.preferred_pixel_formats);
+ if (!mapped_buffer) {
+ // Unable to map the buffer.
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ forwarded_image.set_video_frame_buffer(std::move(mapped_buffer));
+ }
+
+ std::vector<VideoFrameType> adjusted_frame_types;
+ if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
+ adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey);
+ } else {
+ adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
+ }
+ const bool has_alpha = forwarded_image.video_frame_buffer()->type() ==
+ VideoFrameBuffer::Type::kI420A;
+ std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
+ uint16_t augmenting_data_length = 0;
+ AugmentedVideoFrameBuffer* augmented_video_frame_buffer = nullptr;
+ if (supports_augmented_data_) {
+ augmented_video_frame_buffer = static_cast<AugmentedVideoFrameBuffer*>(
+ forwarded_image.video_frame_buffer().get());
+ augmenting_data_length =
+ augmented_video_frame_buffer->GetAugmentingDataSize();
+ augmenting_data =
+ std::unique_ptr<uint8_t[]>(new uint8_t[augmenting_data_length]);
+ memcpy(augmenting_data.get(),
+ augmented_video_frame_buffer->GetAugmentingData(),
+ augmenting_data_length);
+ augmenting_data_size_ = augmenting_data_length;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ stashed_images_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(forwarded_image.timestamp()),
+ std::forward_as_tuple(
+ picture_index_, has_alpha ? kAlphaCodecStreams : 1,
+ std::move(augmenting_data), augmenting_data_length));
+ }
+
+ ++picture_index_;
+
+ // Encode YUV
+ int rv =
+ encoders_[kYUVStream]->Encode(forwarded_image, &adjusted_frame_types);
+
+ // If we do not receive an alpha frame, we send a single frame for this
+ // `picture_index_`. The receiver will receive `frame_count` as 1 which
+ // specifies this case.
+ if (rv || !has_alpha)
+ return rv;
+
+ // Encode AXX
+ rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
+ supports_augmented_data_
+ ? augmented_video_frame_buffer->GetVideoFrameBuffer()
+ : forwarded_image.video_frame_buffer();
+ const I420ABufferInterface* yuva_buffer = frame_buffer->GetI420A();
+ rtc::scoped_refptr<I420BufferInterface> alpha_buffer =
+ WrapI420Buffer(forwarded_image.width(), forwarded_image.height(),
+ yuva_buffer->DataA(), yuva_buffer->StrideA(),
+ multiplex_dummy_planes_.data(), yuva_buffer->StrideU(),
+ multiplex_dummy_planes_.data(), yuva_buffer->StrideV(),
+ // To keep reference alive.
+ [frame_buffer] {});
+ VideoFrame alpha_image =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(alpha_buffer)
+ .set_timestamp_rtp(forwarded_image.timestamp())
+ .set_timestamp_ms(forwarded_image.render_time_ms())
+ .set_rotation(forwarded_image.rotation())
+ .set_id(forwarded_image.id())
+ .set_packet_infos(forwarded_image.packet_infos())
+ .build();
+ rv = encoders_[kAXXStream]->Encode(alpha_image, &adjusted_frame_types);
+ return rv;
+}
+
+int MultiplexEncoderAdapter::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void MultiplexEncoderAdapter::SetRates(
+ const RateControlParameters& parameters) {
+ VideoBitrateAllocation bitrate_allocation(parameters.bitrate);
+ bitrate_allocation.SetBitrate(
+ 0, 0, parameters.bitrate.GetBitrate(0, 0) - augmenting_data_size_);
+ for (auto& encoder : encoders_) {
+ // TODO(emircan): `framerate` is used to calculate duration in encoder
+ // instances. We report the total frame rate to keep real time for now.
+ // Remove this after refactoring duration logic.
+ encoder->SetRates(RateControlParameters(
+ bitrate_allocation,
+ static_cast<uint32_t>(encoders_.size() * parameters.framerate_fps),
+ parameters.bandwidth_allocation -
+ DataRate::BitsPerSec(augmenting_data_size_)));
+ }
+}
+
+void MultiplexEncoderAdapter::OnPacketLossRateUpdate(float packet_loss_rate) {
+ for (auto& encoder : encoders_) {
+ encoder->OnPacketLossRateUpdate(packet_loss_rate);
+ }
+}
+
+void MultiplexEncoderAdapter::OnRttUpdate(int64_t rtt_ms) {
+ for (auto& encoder : encoders_) {
+ encoder->OnRttUpdate(rtt_ms);
+ }
+}
+
+void MultiplexEncoderAdapter::OnLossNotification(
+ const LossNotification& loss_notification) {
+ for (auto& encoder : encoders_) {
+ encoder->OnLossNotification(loss_notification);
+ }
+}
+
+int MultiplexEncoderAdapter::Release() {
+ for (auto& encoder : encoders_) {
+ const int rv = encoder->Release();
+ if (rv)
+ return rv;
+ }
+ encoders_.clear();
+ adapter_callbacks_.clear();
+ MutexLock lock(&mutex_);
+ stashed_images_.clear();
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoEncoder::EncoderInfo MultiplexEncoderAdapter::GetEncoderInfo() const {
+ return encoder_info_;
+}
+
+EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
+ AlphaCodecStream stream_idx,
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo) {
+ // Save the image
+ MultiplexImageComponent image_component;
+ image_component.component_index = stream_idx;
+ image_component.codec_type =
+ PayloadStringToCodecType(associated_format_.name);
+ image_component.encoded_image = encodedImage;
+
+ MutexLock lock(&mutex_);
+ const auto& stashed_image_itr =
+ stashed_images_.find(encodedImage.Timestamp());
+ const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
+ RTC_DCHECK(stashed_image_itr != stashed_images_.end());
+ MultiplexImage& stashed_image = stashed_image_itr->second;
+ const uint8_t frame_count = stashed_image.component_count;
+
+ stashed_image.image_components.push_back(image_component);
+
+ if (stashed_image.image_components.size() == frame_count) {
+ // Complete case
+ for (auto iter = stashed_images_.begin();
+ iter != stashed_images_.end() && iter != stashed_image_next_itr;
+ iter++) {
+ // No image at all, skip.
+ if (iter->second.image_components.size() == 0)
+ continue;
+
+ // We have to send out those stashed frames, otherwise the delta frame
+ // dependency chain is broken.
+ combined_image_ =
+ MultiplexEncodedImagePacker::PackAndRelease(iter->second);
+
+ CodecSpecificInfo codec_info = *codecSpecificInfo;
+ codec_info.codecType = kVideoCodecMultiplex;
+ encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info);
+ }
+
+ stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);
+ }
+ return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
new file mode 100644
index 0000000000..be0f5deb52
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/test/mock_video_decoder_factory.h"
+#include "api/test/mock_video_encoder_factory.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+using ::testing::_;
+using ::testing::Return;
+
+namespace webrtc {
+
+constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
+const VideoCodecType kMultiplexAssociatedCodecType =
+ PayloadStringToCodecType(kMultiplexAssociatedCodecName);
+
+class TestMultiplexAdapter : public VideoCodecUnitTest,
+ public ::testing::WithParamInterface<
+ bool /* supports_augmenting_data */> {
+ public:
+ TestMultiplexAdapter()
+ : decoder_factory_(new webrtc::MockVideoDecoderFactory),
+ encoder_factory_(new webrtc::MockVideoEncoderFactory),
+ supports_augmenting_data_(GetParam()) {}
+
+ protected:
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return std::make_unique<MultiplexDecoderAdapter>(
+ decoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName),
+ supports_augmenting_data_);
+ }
+
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return std::make_unique<MultiplexEncoderAdapter>(
+ encoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName),
+ supports_augmenting_data_);
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kMultiplexAssociatedCodecType, codec_settings);
+ codec_settings->VP9()->numberOfTemporalLayers = 1;
+ codec_settings->VP9()->numberOfSpatialLayers = 1;
+ codec_settings->codecType = webrtc::kVideoCodecMultiplex;
+ }
+
+ std::unique_ptr<VideoFrame> CreateDataAugmentedInputFrame(
+ VideoFrame* video_frame) {
+ rtc::scoped_refptr<VideoFrameBuffer> video_buffer =
+ video_frame->video_frame_buffer();
+ std::unique_ptr<uint8_t[]> data =
+ std::unique_ptr<uint8_t[]>(new uint8_t[16]);
+ for (int i = 0; i < 16; i++) {
+ data[i] = i;
+ }
+ auto augmented_video_frame_buffer =
+ rtc::make_ref_counted<AugmentedVideoFrameBuffer>(video_buffer,
+ std::move(data), 16);
+ return std::make_unique<VideoFrame>(
+ VideoFrame::Builder()
+ .set_video_frame_buffer(augmented_video_frame_buffer)
+ .set_timestamp_rtp(video_frame->timestamp())
+ .set_timestamp_ms(video_frame->render_time_ms())
+ .set_rotation(video_frame->rotation())
+ .set_id(video_frame->id())
+ .build());
+ }
+
+ std::unique_ptr<VideoFrame> CreateI420AInputFrame() {
+ VideoFrame input_frame = NextInputFrame();
+ rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
+ input_frame.video_frame_buffer()->ToI420();
+ rtc::scoped_refptr<I420ABufferInterface> yuva_buffer = WrapI420ABuffer(
+ yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
+ yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
+ yuv_buffer->DataV(), yuv_buffer->StrideV(), yuv_buffer->DataY(),
+ yuv_buffer->StrideY(),
+ // To keep reference alive.
+ [yuv_buffer] {});
+ return std::make_unique<VideoFrame>(VideoFrame::Builder()
+ .set_video_frame_buffer(yuva_buffer)
+ .set_timestamp_rtp(123)
+ .set_timestamp_ms(345)
+ .set_rotation(kVideoRotation_0)
+ .build());
+ }
+
+ std::unique_ptr<VideoFrame> CreateInputFrame(bool contains_alpha) {
+ std::unique_ptr<VideoFrame> video_frame;
+ if (contains_alpha) {
+ video_frame = CreateI420AInputFrame();
+ } else {
+ VideoFrame next_frame = NextInputFrame();
+ video_frame = std::make_unique<VideoFrame>(
+ VideoFrame::Builder()
+ .set_video_frame_buffer(next_frame.video_frame_buffer())
+ .set_timestamp_rtp(next_frame.timestamp())
+ .set_timestamp_ms(next_frame.render_time_ms())
+ .set_rotation(next_frame.rotation())
+ .set_id(next_frame.id())
+ .build());
+ }
+ if (supports_augmenting_data_) {
+ video_frame = CreateDataAugmentedInputFrame(video_frame.get());
+ }
+
+ return video_frame;
+ }
+
+ void CheckData(rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer) {
+ if (!supports_augmenting_data_) {
+ return;
+ }
+ AugmentedVideoFrameBuffer* augmented_buffer =
+ static_cast<AugmentedVideoFrameBuffer*>(video_frame_buffer.get());
+ EXPECT_EQ(augmented_buffer->GetAugmentingDataSize(), 16);
+ uint8_t* data = augmented_buffer->GetAugmentingData();
+ for (int i = 0; i < 16; i++) {
+ EXPECT_EQ(data[i], i);
+ }
+ }
+
+ std::unique_ptr<VideoFrame> ExtractAXXFrame(const VideoFrame& video_frame) {
+ rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer =
+ video_frame.video_frame_buffer();
+ if (supports_augmenting_data_) {
+ AugmentedVideoFrameBuffer* augmentedBuffer =
+ static_cast<AugmentedVideoFrameBuffer*>(video_frame_buffer.get());
+ video_frame_buffer = augmentedBuffer->GetVideoFrameBuffer();
+ }
+ const I420ABufferInterface* yuva_buffer = video_frame_buffer->GetI420A();
+ rtc::scoped_refptr<I420BufferInterface> axx_buffer = WrapI420Buffer(
+ yuva_buffer->width(), yuva_buffer->height(), yuva_buffer->DataA(),
+ yuva_buffer->StrideA(), yuva_buffer->DataU(), yuva_buffer->StrideU(),
+ yuva_buffer->DataV(), yuva_buffer->StrideV(), [video_frame_buffer] {});
+ return std::make_unique<VideoFrame>(VideoFrame::Builder()
+ .set_video_frame_buffer(axx_buffer)
+ .set_timestamp_rtp(123)
+ .set_timestamp_ms(345)
+ .set_rotation(kVideoRotation_0)
+ .build());
+ }
+
+ private:
+ void SetUp() override {
+ EXPECT_CALL(*decoder_factory_, Die);
+ // The decoders/encoders will be owned by the caller of
+ // CreateVideoDecoder()/CreateVideoEncoder().
+ EXPECT_CALL(*decoder_factory_, CreateVideoDecoder)
+ .Times(2)
+ .WillRepeatedly([] { return VP9Decoder::Create(); });
+
+ EXPECT_CALL(*encoder_factory_, Die);
+ EXPECT_CALL(*encoder_factory_, CreateVideoEncoder)
+ .Times(2)
+ .WillRepeatedly([] { return VP9Encoder::Create(); });
+
+ VideoCodecUnitTest::SetUp();
+ }
+
+ const std::unique_ptr<webrtc::MockVideoDecoderFactory> decoder_factory_;
+ const std::unique_ptr<webrtc::MockVideoEncoderFactory> encoder_factory_;
+ const bool supports_augmenting_data_;
+};
+
+// TODO(emircan): Currently VideoCodecUnitTest tests do a complete setup
+// step that goes beyond constructing `decoder_`. Simplify these tests to do
+// less.
+TEST_P(TestMultiplexAdapter, ConstructAndDestructDecoder) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
+}
+
+TEST_P(TestMultiplexAdapter, ConstructAndDestructEncoder) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+}
+
+TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
+ std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(input_frame.get(), decoded_frame.get()), 36);
+ CheckData(decoded_frame->video_frame_buffer());
+}
+
+TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
+ std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(yuva_frame.get(), decoded_frame.get()), 36);
+
+ // Find PSNR for AXX bits.
+ std::unique_ptr<VideoFrame> input_axx_frame = ExtractAXXFrame(*yuva_frame);
+ std::unique_ptr<VideoFrame> output_axx_frame =
+ ExtractAXXFrame(*decoded_frame);
+ EXPECT_GT(I420PSNR(input_axx_frame.get(), output_axx_frame.get()), 47);
+
+ CheckData(decoded_frame->video_frame_buffer());
+}
+
+TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
+ std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+ EXPECT_FALSE(encoded_frame.SpatialIndex());
+
+ const MultiplexImage& unpacked_frame =
+ MultiplexEncodedImagePacker::Unpack(encoded_frame);
+ EXPECT_EQ(0, unpacked_frame.image_index);
+ EXPECT_EQ(1, unpacked_frame.component_count);
+ const MultiplexImageComponent& component = unpacked_frame.image_components[0];
+ EXPECT_EQ(0, component.component_index);
+ EXPECT_NE(nullptr, component.encoded_image.data());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, component.encoded_image._frameType);
+}
+
+TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
+ std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+ EXPECT_FALSE(encoded_frame.SpatialIndex());
+
+ const MultiplexImage& unpacked_frame =
+ MultiplexEncodedImagePacker::Unpack(encoded_frame);
+ EXPECT_EQ(0, unpacked_frame.image_index);
+ EXPECT_EQ(2, unpacked_frame.component_count);
+ EXPECT_EQ(unpacked_frame.image_components.size(),
+ unpacked_frame.component_count);
+ for (int i = 0; i < unpacked_frame.component_count; ++i) {
+ const MultiplexImageComponent& component =
+ unpacked_frame.image_components[i];
+ EXPECT_EQ(i, component.component_index);
+ EXPECT_NE(nullptr, component.encoded_image.data());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey,
+ component.encoded_image._frameType);
+ }
+}
+
+TEST_P(TestMultiplexAdapter, ImageIndexIncreases) {
+ std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
+ const size_t expected_num_encoded_frames = 3;
+ for (size_t i = 0; i < expected_num_encoded_frames; ++i) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ const MultiplexImage& unpacked_frame =
+ MultiplexEncodedImagePacker::Unpack(encoded_frame);
+ EXPECT_EQ(i, unpacked_frame.image_index);
+ EXPECT_EQ(
+ i ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey,
+ encoded_frame._frameType);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(TestMultiplexAdapter,
+ TestMultiplexAdapter,
+ ::testing::Bool());
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc
new file mode 100644
index 0000000000..d1be684cbb
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stddef.h>
+
+#include <memory>
+
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/checks.h"
+#include "sdk/android/native_api/codecs/wrapper.h"
+#include "sdk/android/native_api/jni/class_loader.h"
+#include "sdk/android/native_api/jni/jvm.h"
+#include "sdk/android/native_api/jni/scoped_java_ref.h"
+#include "sdk/android/src/jni/jvm.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
+
+void EnsureInitializedOnce() {
+ RTC_CHECK(::webrtc::jni::GetJVM() != nullptr);
+
+ JNIEnv* jni = ::webrtc::jni::AttachCurrentThreadIfNeeded();
+ JavaVM* jvm = NULL;
+ RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+
+ // Initialize the Java environment (currently only used by the audio manager).
+ webrtc::JVM::Initialize(jvm);
+}
+
+} // namespace
+
+void InitializeAndroidObjects() {
+ RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+}
+
+std::unique_ptr<VideoEncoderFactory> CreateAndroidEncoderFactory() {
+ JNIEnv* env = AttachCurrentThreadIfNeeded();
+ ScopedJavaLocalRef<jclass> factory_class =
+ GetClass(env, "org/webrtc/HardwareVideoEncoderFactory");
+ jmethodID factory_constructor = env->GetMethodID(
+ factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;ZZ)V");
+ ScopedJavaLocalRef<jobject> factory_object(
+ env, env->NewObject(factory_class.obj(), factory_constructor,
+ nullptr /* shared_context */,
+ false /* enable_intel_vp8_encoder */,
+ true /* enable_h264_high_profile */));
+ return JavaToNativeVideoEncoderFactory(env, factory_object.obj());
+}
+
+std::unique_ptr<VideoDecoderFactory> CreateAndroidDecoderFactory() {
+ JNIEnv* env = AttachCurrentThreadIfNeeded();
+ ScopedJavaLocalRef<jclass> factory_class =
+ GetClass(env, "org/webrtc/HardwareVideoDecoderFactory");
+ jmethodID factory_constructor = env->GetMethodID(
+ factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;)V");
+ ScopedJavaLocalRef<jobject> factory_object(
+ env, env->NewObject(factory_class.obj(), factory_constructor,
+ nullptr /* shared_context */));
+ return JavaToNativeVideoDecoderFactory(env, factory_object.obj());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h
new file mode 100644
index 0000000000..ad9cf35162
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+void InitializeAndroidObjects();
+
+std::unique_ptr<VideoEncoderFactory> CreateAndroidEncoderFactory();
+std::unique_ptr<VideoDecoderFactory> CreateAndroidDecoderFactory();
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps
new file mode 100644
index 0000000000..6702195ca9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps
@@ -0,0 +1 @@
+does-not-exist
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh
new file mode 100755
index 0000000000..28083b1808
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+if [ $# -ne 1 ]; then
+ echo "Usage: run-instantiation-tests.sh ADB-DEVICE-ID"
+ exit 1
+fi
+
+# Paths: update these based on your git checkout and gn output folder names.
+WEBRTC_DIR=$HOME/src/webrtc/src
+BUILD_DIR=$WEBRTC_DIR/out/Android_Release
+
+# Other settings.
+ADB=`which adb`
+SERIAL=$1
+TIMEOUT=7200
+
+# Ensure we are using the latest version.
+ninja -C $BUILD_DIR modules_tests
+
+# Transfer the required files by trying to run a test that doesn't exist.
+echo "===> Transferring required resources to device $1."
+$WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "DoesNotExist" \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path $BUILD_DIR/gen.runtime/modules/modules_tests__test_runner_script.runtime_deps \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose
+
+# Run all tests as separate test invocations.
+mkdir $SERIAL
+pushd $SERIAL
+$WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "*InstantiationTest*" \
+ --gtest_also_run_disabled_tests \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path ../empty-runtime-deps \
+ --test-launcher-retry-limit 0 \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose \
+ --num-retries 0 \
+ 2>&1 | tee -a instantiation-tests.log
+popd
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh
new file mode 100755
index 0000000000..25c971ba61
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+if [ $# -ne 1 ]; then
+ echo "Usage: run.sh ADB-DEVICE-ID"
+ exit 1
+fi
+
+# Paths: update these based on your git checkout and gn output folder names.
+WEBRTC_DIR=$HOME/src/webrtc/src
+BUILD_DIR=$WEBRTC_DIR/out/Android_Release
+
+# Clips: update these to encode/decode other content.
+CLIPS=('Foreman')
+RESOLUTIONS=('128x96' '160x120' '176x144' '320x240' '352x288')
+FRAMERATES=(30)
+
+# Other settings.
+ADB=`which adb`
+SERIAL=$1
+TIMEOUT=7200
+
+# Ensure we are using the latest version.
+ninja -C $BUILD_DIR modules_tests
+
+# Transfer the required files by trying to run a test that doesn't exist.
+echo "===> Transferring required resources to device $1."
+$WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "DoesNotExist" \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path $BUILD_DIR/gen.runtime/modules/modules_tests__test_runner_script.runtime_deps \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose
+
+# Run all tests as separate test invocations.
+mkdir $SERIAL
+pushd $SERIAL
+for clip in "${CLIPS[@]}"; do
+ for resolution in "${RESOLUTIONS[@]}"; do
+ for framerate in "${FRAMERATES[@]}"; do
+ test_name="${clip}_${resolution}_${framerate}"
+ log_name="${test_name}.log"
+
+ echo "===> Running ${test_name} on device $1."
+
+ $WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "CodecSettings/*${test_name}*" \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path ../empty-runtime-deps \
+ --test-launcher-retry-limit 0 \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose \
+ 2>&1 | tee -a ${log_name}
+ done
+ done
+done
+popd
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc
new file mode 100644
index 0000000000..899826eee4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_type.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+class EncoderCallback : public EncodedImageCallback {
+ public:
+ explicit EncoderCallback(
+ std::vector<EncodedVideoFrameProducer::EncodedFrame>& output_frames)
+ : output_frames_(output_frames) {}
+
+ private:
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ output_frames_.push_back({encoded_image, *codec_specific_info});
+ return Result(Result::Error::OK);
+ }
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame>& output_frames_;
+};
+
+} // namespace
+
+std::vector<EncodedVideoFrameProducer::EncodedFrame>
+EncodedVideoFrameProducer::Encode() {
+ std::unique_ptr<test::FrameGeneratorInterface> frame_buffer_generator =
+ test::CreateSquareFrameGenerator(
+ resolution_.Width(), resolution_.Height(),
+ test::FrameGeneratorInterface::OutputType::kI420, absl::nullopt);
+
+ std::vector<EncodedFrame> encoded_frames;
+ EncoderCallback encoder_callback(encoded_frames);
+ RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(&encoder_callback),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ uint32_t rtp_tick = 90000 / framerate_fps_;
+ for (int i = 0; i < num_input_frames_; ++i) {
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(frame_buffer_generator->NextFrame().buffer)
+ .set_timestamp_rtp(rtp_timestamp_)
+ .build();
+ rtp_timestamp_ += rtp_tick;
+ RTC_CHECK_EQ(encoder_.Encode(frame, &next_frame_type_),
+ WEBRTC_VIDEO_CODEC_OK);
+ next_frame_type_[0] = VideoFrameType::kVideoFrameDelta;
+ }
+
+ RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(nullptr),
+ WEBRTC_VIDEO_CODEC_OK);
+ return encoded_frames;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h
new file mode 100644
index 0000000000..2216287b92
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// Wrapper around VideoEncoder::Encode for convenient input (generates frames)
+// and output (returns encoded frames instead of passing them to callback)
+class EncodedVideoFrameProducer {
+ public:
+ struct EncodedFrame {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_specific_info;
+ };
+
+ // `encoder` should be initialized, but shouldn't have `EncoderCallback` set.
+ explicit EncodedVideoFrameProducer(VideoEncoder& encoder)
+ : encoder_(encoder) {}
+ EncodedVideoFrameProducer(const EncodedVideoFrameProducer&) = delete;
+ EncodedVideoFrameProducer& operator=(const EncodedVideoFrameProducer&) =
+ delete;
+
+ // Number of the input frames to pass to the encoder.
+ EncodedVideoFrameProducer& SetNumInputFrames(int value);
+ // Encode next frame as key frame.
+ EncodedVideoFrameProducer& ForceKeyFrame();
+ // Resolution of the input frames.
+ EncodedVideoFrameProducer& SetResolution(RenderResolution value);
+
+ EncodedVideoFrameProducer& SetFramerateFps(int value);
+
+ // Generates input video frames and encodes them with `encoder` provided in
+ // the constructor. Returns frame passed to the `OnEncodedImage` by wraping
+ // `EncodedImageCallback` underneath.
+ std::vector<EncodedFrame> Encode();
+
+ private:
+ VideoEncoder& encoder_;
+
+ uint32_t rtp_timestamp_ = 1000;
+ int num_input_frames_ = 1;
+ int framerate_fps_ = 30;
+ RenderResolution resolution_ = {320, 180};
+ std::vector<VideoFrameType> next_frame_type_ = {
+ VideoFrameType::kVideoFrameKey};
+};
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetNumInputFrames(
+ int value) {
+ RTC_DCHECK_GT(value, 0);
+ num_input_frames_ = value;
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::ForceKeyFrame() {
+ next_frame_type_ = {VideoFrameType::kVideoFrameKey};
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetResolution(
+ RenderResolution value) {
+ resolution_ = value;
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetFramerateFps(
+ int value) {
+ RTC_DCHECK_GT(value, 0);
+ framerate_fps_ = value;
+ return *this;
+}
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h
new file mode 100644
index 0000000000..475d0fdd08
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+std::unique_ptr<VideoEncoderFactory> CreateObjCEncoderFactory();
+std::unique_ptr<VideoDecoderFactory> CreateObjCDecoderFactory();
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm
new file mode 100644
index 0000000000..ed82376251
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
+
+#import "sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h"
+#import "sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h"
+#include "sdk/objc/native/api/video_decoder_factory.h"
+#include "sdk/objc/native/api/video_encoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+std::unique_ptr<VideoEncoderFactory> CreateObjCEncoderFactory() {
+ return ObjCToNativeVideoEncoderFactory([[RTC_OBJC_TYPE(RTCVideoEncoderFactoryH264) alloc] init]);
+}
+
+std::unique_ptr<VideoDecoderFactory> CreateObjCDecoderFactory() {
+ return ObjCToNativeVideoDecoderFactory([[RTC_OBJC_TYPE(RTCVideoDecoderFactoryH264) alloc] init]);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py b/third_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py
new file mode 100755
index 0000000000..29e2d6f65a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py
@@ -0,0 +1,438 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Plots statistics from WebRTC integration test logs.
+
+Usage: $ python plot_webrtc_test_logs.py filename.txt
+"""
+
+import numpy
+import sys
+import re
+
+import matplotlib.pyplot as plt
+
+# Log events.
+EVENT_START = 'RUN ] CodecSettings/VideoCodecTestParameterized.'
+EVENT_END = 'OK ] CodecSettings/VideoCodecTestParameterized.'
+
+# Metrics to plot, tuple: (name to parse in file, label to use when plotting).
+WIDTH = ('width', 'width')
+HEIGHT = ('height', 'height')
+FILENAME = ('filename', 'clip')
+CODEC_TYPE = ('codec_type', 'Codec')
+ENCODER_IMPLEMENTATION_NAME = ('enc_impl_name', 'enc name')
+DECODER_IMPLEMENTATION_NAME = ('dec_impl_name', 'dec name')
+CODEC_IMPLEMENTATION_NAME = ('codec_impl_name', 'codec name')
+CORES = ('num_cores', 'CPU cores used')
+DENOISING = ('denoising', 'denoising')
+RESILIENCE = ('resilience', 'resilience')
+ERROR_CONCEALMENT = ('error_concealment', 'error concealment')
+CPU_USAGE = ('cpu_usage_percent', 'CPU usage (%)')
+BITRATE = ('target_bitrate_kbps', 'target bitrate (kbps)')
+FRAMERATE = ('input_framerate_fps', 'fps')
+QP = ('avg_qp', 'QP avg')
+PSNR = ('avg_psnr', 'PSNR (dB)')
+SSIM = ('avg_ssim', 'SSIM')
+ENC_BITRATE = ('bitrate_kbps', 'encoded bitrate (kbps)')
+NUM_FRAMES = ('num_input_frames', 'num frames')
+NUM_DROPPED_FRAMES = ('num_dropped_frames', 'num dropped frames')
+TIME_TO_TARGET = ('time_to_reach_target_bitrate_sec',
+ 'time to reach target rate (sec)')
+ENCODE_SPEED_FPS = ('enc_speed_fps', 'encode speed (fps)')
+DECODE_SPEED_FPS = ('dec_speed_fps', 'decode speed (fps)')
+AVG_KEY_FRAME_SIZE = ('avg_key_frame_size_bytes', 'avg key frame size (bytes)')
+AVG_DELTA_FRAME_SIZE = ('avg_delta_frame_size_bytes',
+ 'avg delta frame size (bytes)')
+
+# Settings.
+SETTINGS = [
+ WIDTH,
+ HEIGHT,
+ FILENAME,
+ NUM_FRAMES,
+]
+
+# Settings, options for x-axis.
+X_SETTINGS = [
+ CORES,
+ FRAMERATE,
+ DENOISING,
+ RESILIENCE,
+ ERROR_CONCEALMENT,
+ BITRATE, # TODO(asapersson): Needs to be last.
+]
+
+# Settings, options for subplots.
+SUBPLOT_SETTINGS = [
+ CODEC_TYPE,
+ ENCODER_IMPLEMENTATION_NAME,
+ DECODER_IMPLEMENTATION_NAME,
+ CODEC_IMPLEMENTATION_NAME,
+] + X_SETTINGS
+
+# Results.
+RESULTS = [
+ PSNR,
+ SSIM,
+ ENC_BITRATE,
+ NUM_DROPPED_FRAMES,
+ TIME_TO_TARGET,
+ ENCODE_SPEED_FPS,
+ DECODE_SPEED_FPS,
+ QP,
+ CPU_USAGE,
+ AVG_KEY_FRAME_SIZE,
+ AVG_DELTA_FRAME_SIZE,
+]
+
+METRICS_TO_PARSE = SETTINGS + SUBPLOT_SETTINGS + RESULTS
+
+Y_METRICS = [res[1] for res in RESULTS]
+
+# Parameters for plotting.
+FIG_SIZE_SCALE_FACTOR_X = 1.6
+FIG_SIZE_SCALE_FACTOR_Y = 1.8
+GRID_COLOR = [0.45, 0.45, 0.45]
+
+
+def ParseSetting(filename, setting):
+ """Parses setting from file.
+
+ Args:
+ filename: The name of the file.
+ setting: Name of setting to parse (e.g. width).
+
+ Returns:
+ A list holding parsed settings, e.g. ['width: 128.0', 'width: 160.0'] """
+
+ settings = []
+
+ settings_file = open(filename)
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_START, line):
+ # Parse event.
+ parsed = {}
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_END, line):
+ # Add parsed setting to list.
+ if setting in parsed:
+ s = setting + ': ' + str(parsed[setting])
+ if s not in settings:
+ settings.append(s)
+ break
+
+ TryFindMetric(parsed, line)
+
+ settings_file.close()
+ return settings
+
+
+def ParseMetrics(filename, setting1, setting2):
+ """Parses metrics from file.
+
+ Args:
+ filename: The name of the file.
+ setting1: First setting for sorting metrics (e.g. width).
+ setting2: Second setting for sorting metrics (e.g. CPU cores used).
+
+ Returns:
+ A dictionary holding parsed metrics.
+
+ For example:
+ metrics[key1][key2][measurement]
+
+ metrics = {
+ "width: 352": {
+ "CPU cores used: 1.0": {
+ "encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ "CPU cores used: 2.0": {
+ "encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ },
+ "width: 176": {
+ "CPU cores used: 1.0": {
+ "encode time (us)": [0.857897, 0.91608, 0.959173, 0.971116, 0.980961],
+ "PSNR (dB)": [30.243646, 33.375592, 37.574387, 39.42184, 41.437897],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ }
+ } """
+
+ metrics = {}
+
+ # Parse events.
+ settings_file = open(filename)
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_START, line):
+ # Parse event.
+ parsed = {}
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_END, line):
+ # Add parsed values to metrics.
+ key1 = setting1 + ': ' + str(parsed[setting1])
+ key2 = setting2 + ': ' + str(parsed[setting2])
+ if key1 not in metrics:
+ metrics[key1] = {}
+ if key2 not in metrics[key1]:
+ metrics[key1][key2] = {}
+
+ for label in parsed:
+ if label not in metrics[key1][key2]:
+ metrics[key1][key2][label] = []
+ metrics[key1][key2][label].append(parsed[label])
+
+ break
+
+ TryFindMetric(parsed, line)
+
+ settings_file.close()
+ return metrics
+
+
+def TryFindMetric(parsed, line):
+ for metric in METRICS_TO_PARSE:
+ name = metric[0]
+ label = metric[1]
+ if re.search(r'%s' % name, line):
+ found, value = GetMetric(name, line)
+ if found:
+ parsed[label] = value
+ return
+
+
+def GetMetric(name, string):
+ # Float (e.g. bitrate = 98.8253).
+ pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name
+ m = re.search(r'%s' % pattern, string)
+ if m is not None:
+ return StringToFloat(m.group(1))
+
+ # Alphanumeric characters (e.g. codec type : VP8).
+ pattern = r'%s\s*[:=]\s*(\w+)' % name
+ m = re.search(r'%s' % pattern, string)
+ if m is not None:
+ return True, m.group(1)
+
+ return False, -1
+
+
+def StringToFloat(value):
+ try:
+ value = float(value)
+ except ValueError:
+ print "Not a float, skipped %s" % value
+ return False, -1
+
+ return True, value
+
+
+def Plot(y_metric, x_metric, metrics):
+ """Plots y_metric vs x_metric per key in metrics.
+
+ For example:
+ y_metric = 'PSNR (dB)'
+ x_metric = 'bitrate (kbps)'
+ metrics = {
+ "CPU cores used: 1.0": {
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ "CPU cores used: 2.0": {
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ }
+ """
+ for key in sorted(metrics):
+ data = metrics[key]
+ if y_metric not in data:
+ print "Failed to find metric: %s" % y_metric
+ continue
+
+ y = numpy.array(data[y_metric])
+ x = numpy.array(data[x_metric])
+ if len(y) != len(x):
+ print "Length mismatch for %s, %s" % (y, x)
+ continue
+
+ label = y_metric + ' - ' + str(key)
+
+ plt.plot(x,
+ y,
+ label=label,
+ linewidth=1.5,
+ marker='o',
+ markersize=5,
+ markeredgewidth=0.0)
+
+
+def PlotFigure(settings, y_metrics, x_metric, metrics, title):
+ """Plots metrics in y_metrics list. One figure is plotted and each entry
+ in the list is plotted in a subplot (and sorted per settings).
+
+ For example:
+ settings = ['width: 128.0', 'width: 160.0']. Sort subplot per setting.
+ y_metrics = ['PSNR (dB)', 'PSNR (dB)']. Metric to plot per subplot.
+ x_metric = 'bitrate (kbps)'
+
+ """
+
+ plt.figure()
+ plt.suptitle(title, fontsize='large', fontweight='bold')
+ settings.sort()
+ rows = len(settings)
+ cols = 1
+ pos = 1
+ while pos <= rows:
+ plt.rc('grid', color=GRID_COLOR)
+ ax = plt.subplot(rows, cols, pos)
+ plt.grid()
+ plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large')
+ plt.setp(ax.get_yticklabels(), fontsize='large')
+ setting = settings[pos - 1]
+ Plot(y_metrics[pos - 1], x_metric, metrics[setting])
+ if setting.startswith(WIDTH[1]):
+ plt.title(setting, fontsize='medium')
+ plt.legend(fontsize='large', loc='best')
+ pos += 1
+
+ plt.xlabel(x_metric, fontsize='large')
+ plt.subplots_adjust(left=0.06,
+ right=0.98,
+ bottom=0.05,
+ top=0.94,
+ hspace=0.08)
+
+
+def GetTitle(filename, setting):
+ title = ''
+ if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]:
+ codec_types = ParseSetting(filename, CODEC_TYPE[1])
+ for i in range(0, len(codec_types)):
+ title += codec_types[i] + ', '
+
+ if setting != CORES[1]:
+ cores = ParseSetting(filename, CORES[1])
+ for i in range(0, len(cores)):
+ title += cores[i].split('.')[0] + ', '
+
+ if setting != FRAMERATE[1]:
+ framerate = ParseSetting(filename, FRAMERATE[1])
+ for i in range(0, len(framerate)):
+ title += framerate[i].split('.')[0] + ', '
+
+ if (setting != CODEC_IMPLEMENTATION_NAME[1]
+ and setting != ENCODER_IMPLEMENTATION_NAME[1]):
+ enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1])
+ for i in range(0, len(enc_names)):
+ title += enc_names[i] + ', '
+
+ if (setting != CODEC_IMPLEMENTATION_NAME[1]
+ and setting != DECODER_IMPLEMENTATION_NAME[1]):
+ dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1])
+ for i in range(0, len(dec_names)):
+ title += dec_names[i] + ', '
+
+ filenames = ParseSetting(filename, FILENAME[1])
+ title += filenames[0].split('_')[0]
+
+ num_frames = ParseSetting(filename, NUM_FRAMES[1])
+ for i in range(0, len(num_frames)):
+ title += ' (' + num_frames[i].split('.')[0] + ')'
+
+ return title
+
+
+def ToString(input_list):
+ return ToStringWithoutMetric(input_list, ('', ''))
+
+
+def ToStringWithoutMetric(input_list, metric):
+ i = 1
+ output_str = ""
+ for m in input_list:
+ if m != metric:
+ output_str = output_str + ("%s. %s\n" % (i, m[1]))
+ i += 1
+ return output_str
+
+
+def GetIdx(text_list):
+ return int(raw_input(text_list)) - 1
+
+
+def main():
+ filename = sys.argv[1]
+
+ # Setup.
+ idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS))
+ if idx_metric == -1:
+ # Plot all metrics. One subplot for each metric.
+ # Per subplot: metric vs bitrate (per resolution).
+ cores = ParseSetting(filename, CORES[1])
+ setting1 = CORES[1]
+ setting2 = WIDTH[1]
+ sub_keys = [cores[0]] * len(Y_METRICS)
+ y_metrics = Y_METRICS
+ x_metric = BITRATE[1]
+ else:
+ resolutions = ParseSetting(filename, WIDTH[1])
+ idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS))
+ if X_SETTINGS[idx] == BITRATE:
+ idx = GetIdx("Plot per:\n%s" %
+ ToStringWithoutMetric(SUBPLOT_SETTINGS, BITRATE))
+ idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx])
+ # Plot one metric. One subplot for each resolution.
+ # Per subplot: metric vs bitrate (per setting).
+ setting1 = WIDTH[1]
+ setting2 = METRICS_TO_PARSE[idx_setting][1]
+ sub_keys = resolutions
+ y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
+ x_metric = BITRATE[1]
+ else:
+ # Plot one metric. One subplot for each resolution.
+ # Per subplot: metric vs setting (per bitrate).
+ setting1 = WIDTH[1]
+ setting2 = BITRATE[1]
+ sub_keys = resolutions
+ y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
+ x_metric = X_SETTINGS[idx][1]
+
+ metrics = ParseMetrics(filename, setting1, setting2)
+
+ # Stretch fig size.
+ figsize = plt.rcParams["figure.figsize"]
+ figsize[0] *= FIG_SIZE_SCALE_FACTOR_X
+ figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y
+ plt.rcParams["figure.figsize"] = figsize
+
+ PlotFigure(sub_keys, y_metrics, x_metric, metrics,
+ GetTitle(filename, setting2))
+
+ plt.show()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc
new file mode 100644
index 0000000000..b81f658dd0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+
+#include <utility>
+
+#include "api/test/create_frame_generator.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/video_codec_settings.h"
+
+static const int kEncodeTimeoutMs = 100;
+static const int kDecodeTimeoutMs = 25;
+// Set bitrate to get higher quality.
+static const int kStartBitrate = 300;
+static const int kMaxBitrate = 4000;
+static const int kWidth = 176; // Width of the input image.
+static const int kHeight = 144; // Height of the input image.
+static const int kMaxFramerate = 30; // Arbitrary value.
+
+namespace webrtc {
+namespace {
+const VideoEncoder::Capabilities kCapabilities(false);
+}
+
+EncodedImageCallback::Result
+VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
+ const EncodedImage& frame,
+ const CodecSpecificInfo* codec_specific_info) {
+ MutexLock lock(&test_->encoded_frame_section_);
+ test_->encoded_frames_.push_back(frame);
+ RTC_DCHECK(codec_specific_info);
+ test_->codec_specific_infos_.push_back(*codec_specific_info);
+ if (!test_->wait_for_encoded_frames_threshold_) {
+ test_->encoded_frame_event_.Set();
+ return Result(Result::OK);
+ }
+
+ if (test_->encoded_frames_.size() ==
+ test_->wait_for_encoded_frames_threshold_) {
+ test_->wait_for_encoded_frames_threshold_ = 1;
+ test_->encoded_frame_event_.Set();
+ }
+ return Result(Result::OK);
+}
+
+void VideoCodecUnitTest::FakeDecodeCompleteCallback::Decoded(
+ VideoFrame& frame,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ MutexLock lock(&test_->decoded_frame_section_);
+ test_->decoded_frame_.emplace(frame);
+ test_->decoded_qp_ = qp;
+ test_->decoded_frame_event_.Set();
+}
+
+void VideoCodecUnitTest::SetUp() {
+ webrtc::test::CodecSettings(kVideoCodecVP8, &codec_settings_);
+ codec_settings_.startBitrate = kStartBitrate;
+ codec_settings_.maxBitrate = kMaxBitrate;
+ codec_settings_.maxFramerate = kMaxFramerate;
+ codec_settings_.width = kWidth;
+ codec_settings_.height = kHeight;
+
+ ModifyCodecSettings(&codec_settings_);
+
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ codec_settings_.width, codec_settings_.height,
+ test::FrameGeneratorInterface::OutputType::kI420, absl::optional<int>());
+
+ encoder_ = CreateEncoder();
+ decoder_ = CreateDecoder();
+ encoder_->RegisterEncodeCompleteCallback(&encode_complete_callback_);
+ decoder_->RegisterDecodeCompleteCallback(&decode_complete_callback_);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(
+ &codec_settings_,
+ VideoEncoder::Settings(kCapabilities, 1 /* number of cores */,
+ 0 /* max payload size (unused) */)));
+
+ VideoDecoder::Settings decoder_settings;
+ decoder_settings.set_codec_type(codec_settings_.codecType);
+ decoder_settings.set_max_render_resolution(
+ {codec_settings_.width, codec_settings_.height});
+ EXPECT_TRUE(decoder_->Configure(decoder_settings));
+}
+
+void VideoCodecUnitTest::ModifyCodecSettings(VideoCodec* codec_settings) {}
+
+VideoFrame VideoCodecUnitTest::NextInputFrame() {
+ test::FrameGeneratorInterface::VideoFrameData frame_data =
+ input_frame_generator_->NextFrame();
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(frame_data.buffer)
+ .set_update_rect(frame_data.update_rect)
+ .build();
+
+ const uint32_t timestamp =
+ last_input_frame_timestamp_ +
+ kVideoPayloadTypeFrequency / codec_settings_.maxFramerate;
+ input_frame.set_timestamp(timestamp);
+
+ last_input_frame_timestamp_ = timestamp;
+ return input_frame;
+}
+
+bool VideoCodecUnitTest::WaitForEncodedFrame(
+ EncodedImage* frame,
+ CodecSpecificInfo* codec_specific_info) {
+ std::vector<EncodedImage> frames;
+ std::vector<CodecSpecificInfo> codec_specific_infos;
+ if (!WaitForEncodedFrames(&frames, &codec_specific_infos))
+ return false;
+ EXPECT_EQ(frames.size(), static_cast<size_t>(1));
+ EXPECT_EQ(frames.size(), codec_specific_infos.size());
+ *frame = frames[0];
+ *codec_specific_info = codec_specific_infos[0];
+ return true;
+}
+
+void VideoCodecUnitTest::SetWaitForEncodedFramesThreshold(size_t num_frames) {
+ MutexLock lock(&encoded_frame_section_);
+ wait_for_encoded_frames_threshold_ = num_frames;
+}
+
+bool VideoCodecUnitTest::WaitForEncodedFrames(
+ std::vector<EncodedImage>* frames,
+ std::vector<CodecSpecificInfo>* codec_specific_info) {
+ EXPECT_TRUE(encoded_frame_event_.Wait(kEncodeTimeoutMs))
+ << "Timed out while waiting for encoded frame.";
+ // This becomes unsafe if there are multiple threads waiting for frames.
+ MutexLock lock(&encoded_frame_section_);
+ EXPECT_FALSE(encoded_frames_.empty());
+ EXPECT_FALSE(codec_specific_infos_.empty());
+ EXPECT_EQ(encoded_frames_.size(), codec_specific_infos_.size());
+ if (!encoded_frames_.empty()) {
+ *frames = encoded_frames_;
+ encoded_frames_.clear();
+ RTC_DCHECK(!codec_specific_infos_.empty());
+ *codec_specific_info = codec_specific_infos_;
+ codec_specific_infos_.clear();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool VideoCodecUnitTest::WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
+ absl::optional<uint8_t>* qp) {
+ bool ret = decoded_frame_event_.Wait(kDecodeTimeoutMs);
+ EXPECT_TRUE(ret) << "Timed out while waiting for a decoded frame.";
+ // This becomes unsafe if there are multiple threads waiting for frames.
+ MutexLock lock(&decoded_frame_section_);
+ EXPECT_TRUE(decoded_frame_);
+ if (decoded_frame_) {
+ frame->reset(new VideoFrame(std::move(*decoded_frame_)));
+ *qp = decoded_qp_;
+ decoded_frame_.reset();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+size_t VideoCodecUnitTest::GetNumEncodedFrames() {
+ MutexLock lock(&encoded_frame_section_);
+ return encoded_frames_.size();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h
new file mode 100644
index 0000000000..7d05882b63
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/test/frame_generator_interface.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class VideoCodecUnitTest : public ::testing::Test {
+ public:
+ VideoCodecUnitTest()
+ : encode_complete_callback_(this),
+ decode_complete_callback_(this),
+ wait_for_encoded_frames_threshold_(1),
+ last_input_frame_timestamp_(0) {}
+
+ protected:
+ class FakeEncodeCompleteCallback : public webrtc::EncodedImageCallback {
+ public:
+ explicit FakeEncodeCompleteCallback(VideoCodecUnitTest* test)
+ : test_(test) {}
+
+ Result OnEncodedImage(const EncodedImage& frame,
+ const CodecSpecificInfo* codec_specific_info);
+
+ private:
+ VideoCodecUnitTest* const test_;
+ };
+
+ class FakeDecodeCompleteCallback : public webrtc::DecodedImageCallback {
+ public:
+ explicit FakeDecodeCompleteCallback(VideoCodecUnitTest* test)
+ : test_(test) {}
+
+ int32_t Decoded(VideoFrame& frame) override {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ void Decoded(VideoFrame& frame,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override;
+
+ private:
+ VideoCodecUnitTest* const test_;
+ };
+
+ virtual std::unique_ptr<VideoEncoder> CreateEncoder() = 0;
+ virtual std::unique_ptr<VideoDecoder> CreateDecoder() = 0;
+
+ void SetUp() override;
+
+ virtual void ModifyCodecSettings(VideoCodec* codec_settings);
+
+ VideoFrame NextInputFrame();
+
+ // Helper method for waiting a single encoded frame.
+ bool WaitForEncodedFrame(EncodedImage* frame,
+ CodecSpecificInfo* codec_specific_info);
+
+ // Helper methods for waiting for multiple encoded frames. Caller must
+ // define how many frames are to be waited for via `num_frames` before calling
+ // Encode(). Then, they can expect to retrive them via WaitForEncodedFrames().
+ void SetWaitForEncodedFramesThreshold(size_t num_frames);
+ bool WaitForEncodedFrames(
+ std::vector<EncodedImage>* frames,
+ std::vector<CodecSpecificInfo>* codec_specific_info);
+
+ // Helper method for waiting a single decoded frame.
+ bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
+ absl::optional<uint8_t>* qp);
+
+ size_t GetNumEncodedFrames();
+
+ VideoCodec codec_settings_;
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ std::unique_ptr<test::FrameGeneratorInterface> input_frame_generator_;
+
+ private:
+ FakeEncodeCompleteCallback encode_complete_callback_;
+ FakeDecodeCompleteCallback decode_complete_callback_;
+
+ rtc::Event encoded_frame_event_;
+ Mutex encoded_frame_section_;
+ size_t wait_for_encoded_frames_threshold_;
+ std::vector<EncodedImage> encoded_frames_
+ RTC_GUARDED_BY(encoded_frame_section_);
+ std::vector<CodecSpecificInfo> codec_specific_infos_
+ RTC_GUARDED_BY(encoded_frame_section_);
+
+ rtc::Event decoded_frame_event_;
+ Mutex decoded_frame_section_;
+ absl::optional<VideoFrame> decoded_frame_
+ RTC_GUARDED_BY(decoded_frame_section_);
+ absl::optional<uint8_t> decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_);
+
+ uint32_t last_input_frame_timestamp_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
new file mode 100644
index 0000000000..41f2304748
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#if defined(WEBRTC_ANDROID)
+#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
+#elif defined(WEBRTC_IOS)
+#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
+#endif
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+using ::testing::NotNull;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+
+int32_t InitEncoder(VideoCodecType codec_type, VideoEncoder* encoder) {
+ VideoCodec codec;
+ CodecSettings(codec_type, &codec);
+ codec.width = 640;
+ codec.height = 480;
+ codec.maxFramerate = 30;
+ RTC_CHECK(encoder);
+ return encoder->InitEncode(
+ &codec, VideoEncoder::Settings(kCapabilities, 1 /* number_of_cores */,
+ 1200 /* max_payload_size */));
+}
+
+VideoDecoder::Settings DecoderSettings(VideoCodecType codec_type) {
+ VideoDecoder::Settings settings;
+ settings.set_max_render_resolution({640, 480});
+ settings.set_codec_type(codec_type);
+ return settings;
+}
+
+} // namespace
+
+class VideoEncoderDecoderInstantiationTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<::testing::tuple<int, int>> {
+ protected:
+ VideoEncoderDecoderInstantiationTest()
+ : vp8_format_("VP8"),
+ vp9_format_("VP9"),
+ h264cbp_format_("H264"),
+ num_encoders_(::testing::get<0>(GetParam())),
+ num_decoders_(::testing::get<1>(GetParam())) {
+#if defined(WEBRTC_ANDROID)
+ InitializeAndroidObjects();
+ encoder_factory_ = CreateAndroidEncoderFactory();
+ decoder_factory_ = CreateAndroidDecoderFactory();
+#elif defined(WEBRTC_IOS)
+ encoder_factory_ = CreateObjCEncoderFactory();
+ decoder_factory_ = CreateObjCDecoderFactory();
+#else
+ RTC_DCHECK_NOTREACHED() << "Only support Android and iOS.";
+#endif
+ }
+
+ ~VideoEncoderDecoderInstantiationTest() {
+ for (auto& encoder : encoders_) {
+ encoder->Release();
+ }
+ for (auto& decoder : decoders_) {
+ decoder->Release();
+ }
+ }
+
+ const SdpVideoFormat vp8_format_;
+ const SdpVideoFormat vp9_format_;
+ const SdpVideoFormat h264cbp_format_;
+ std::unique_ptr<VideoEncoderFactory> encoder_factory_;
+ std::unique_ptr<VideoDecoderFactory> decoder_factory_;
+
+ const int num_encoders_;
+ const int num_decoders_;
+ std::vector<std::unique_ptr<VideoEncoder>> encoders_;
+ std::vector<std::unique_ptr<VideoDecoder>> decoders_;
+};
+
+INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
+ VideoEncoderDecoderInstantiationTest,
+ ::testing::Combine(::testing::Range(1, 4),
+ ::testing::Range(1, 2)));
+
+INSTANTIATE_TEST_SUITE_P(MultipleDecoders,
+ VideoEncoderDecoderInstantiationTest,
+ ::testing::Combine(::testing::Range(1, 2),
+ ::testing::Range(1, 9)));
+
+INSTANTIATE_TEST_SUITE_P(MultipleEncodersDecoders,
+ VideoEncoderDecoderInstantiationTest,
+ ::testing::Combine(::testing::Range(1, 4),
+ ::testing::Range(1, 9)));
+
+// TODO(brandtr): Check that the factories actually support the codecs before
+// trying to instantiate. Currently, we will just crash with a Java exception
+// if the factory does not support the codec.
+TEST_P(VideoEncoderDecoderInstantiationTest, DISABLED_InstantiateVp8Codecs) {
+ for (int i = 0; i < num_encoders_; ++i) {
+ std::unique_ptr<VideoEncoder> encoder =
+ encoder_factory_->CreateVideoEncoder(vp8_format_);
+ EXPECT_EQ(0, InitEncoder(kVideoCodecVP8, encoder.get()));
+ encoders_.emplace_back(std::move(encoder));
+ }
+
+ for (int i = 0; i < num_decoders_; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(vp8_format_);
+ ASSERT_THAT(decoder, NotNull());
+ EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecVP8)));
+ decoders_.emplace_back(std::move(decoder));
+ }
+}
+
+TEST_P(VideoEncoderDecoderInstantiationTest,
+ DISABLED_InstantiateH264CBPCodecs) {
+ for (int i = 0; i < num_encoders_; ++i) {
+ std::unique_ptr<VideoEncoder> encoder =
+ encoder_factory_->CreateVideoEncoder(h264cbp_format_);
+ EXPECT_EQ(0, InitEncoder(kVideoCodecH264, encoder.get()));
+ encoders_.emplace_back(std::move(encoder));
+ }
+
+ for (int i = 0; i < num_decoders_; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(h264cbp_format_);
+ ASSERT_THAT(decoder, NotNull());
+ EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecH264)));
+ decoders_.push_back(std::move(decoder));
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc
new file mode 100644
index 0000000000..4fa343e706
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/video_coding/codecs/av1/libaom_av1_decoder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+// Test clips settings.
+constexpr int kCifWidth = 352;
+constexpr int kCifHeight = 288;
+constexpr int kNumFramesLong = 300;
+
+VideoCodecTestFixture::Config CreateConfig(std::string filename) {
+ VideoCodecTestFixture::Config config;
+ config.filename = filename;
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFramesLong;
+ config.use_single_core = true;
+ return config;
+}
+
+TEST(VideoCodecTestAv1, HighBitrate) {
+ auto config = CreateConfig("foreman_cif");
+ config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
+ kCifWidth, kCifHeight);
+ config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ config.num_frames = kNumFramesLong;
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {12, 1, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 34, 0.94, 0.91}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestAv1, VeryLowBitrate) {
+ auto config = CreateConfig("foreman_cif");
+ config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
+ kCifWidth, kCifHeight);
+ config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{50, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {15, 8, 75, 2, 2, 2, 2, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{28, 24.8, 0.70, 0.55}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if !defined(WEBRTC_ANDROID)
+constexpr int kHdWidth = 1280;
+constexpr int kHdHeight = 720;
+TEST(VideoCodecTestAv1, Hd) {
+ auto config = CreateConfig("ConferenceMotion_1280_720_50");
+ config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
+ kHdWidth, kHdHeight);
+ config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ config.num_frames = kNumFramesLong;
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{1000, 50, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {13, 3, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {
+ {35.9, 31.5, 0.925, 0.865}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+#endif
+
+} // namespace
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc
new file mode 100644
index 0000000000..126aa93ee8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video_codecs/video_codec.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+using ::testing::ElementsAre;
+
+namespace webrtc {
+namespace test {
+
+using Config = VideoCodecTestFixture::Config;
+
+namespace {
+const size_t kNumTemporalLayers = 2;
+} // namespace
+
+TEST(Config, NumberOfCoresWithUseSingleCore) {
+ Config config;
+ config.use_single_core = true;
+ EXPECT_EQ(1u, config.NumberOfCores());
+}
+
+TEST(Config, NumberOfCoresWithoutUseSingleCore) {
+ Config config;
+ config.use_single_core = false;
+ EXPECT_GE(config.NumberOfCores(), 1u);
+}
+
+TEST(Config, NumberOfTemporalLayersIsOne) {
+ Config config;
+ webrtc::test::CodecSettings(kVideoCodecH264, &config.codec_settings);
+ EXPECT_EQ(1u, config.NumberOfTemporalLayers());
+}
+
+TEST(Config, NumberOfTemporalLayers_Vp8) {
+ Config config;
+ webrtc::test::CodecSettings(kVideoCodecVP8, &config.codec_settings);
+ config.codec_settings.VP8()->numberOfTemporalLayers = kNumTemporalLayers;
+ EXPECT_EQ(kNumTemporalLayers, config.NumberOfTemporalLayers());
+}
+
+TEST(Config, NumberOfTemporalLayers_Vp9) {
+ Config config;
+ webrtc::test::CodecSettings(kVideoCodecVP9, &config.codec_settings);
+ config.codec_settings.VP9()->numberOfTemporalLayers = kNumTemporalLayers;
+ EXPECT_EQ(kNumTemporalLayers, config.NumberOfTemporalLayers());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
new file mode 100644
index 0000000000..cd940c9111
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -0,0 +1,831 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/str_replace.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory_template.h"
+#include "api/video_codecs/video_decoder_factory_template_dav1d_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_libvpx_vp9_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_open_h264_adapter.h"
+#include "api/video_codecs/video_encoder_config.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "api/video_codecs/video_encoder_factory_template.h"
+#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h"
+#include "common_video/h264/h264_common.h"
+#include "media/base/media_constants.h"
+#include "media/engine/simulcast.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/cpu_time.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/cpu_info.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+#include "test/testsupport/frame_writer.h"
+#include "test/testsupport/perf_test.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+namespace test {
+
+using VideoStatistics = VideoCodecTestStats::VideoStatistics;
+
+namespace {
+const int kBaseKeyFrameInterval = 3000;
+const double kBitratePriority = 1.0;
+const int kDefaultMaxFramerateFps = 30;
+const int kMaxQp = 56;
+
+void ConfigureSimulcast(VideoCodec* codec_settings) {
+ FieldTrialBasedConfig trials;
+ const std::vector<webrtc::VideoStream> streams = cricket::GetSimulcastConfig(
+ /*min_layer=*/1, codec_settings->numberOfSimulcastStreams,
+ codec_settings->width, codec_settings->height, kBitratePriority, kMaxQp,
+ /* is_screenshare = */ false, true, trials);
+
+ for (size_t i = 0; i < streams.size(); ++i) {
+ SimulcastStream* ss = &codec_settings->simulcastStream[i];
+ ss->width = static_cast<uint16_t>(streams[i].width);
+ ss->height = static_cast<uint16_t>(streams[i].height);
+ ss->numberOfTemporalLayers =
+ static_cast<unsigned char>(*streams[i].num_temporal_layers);
+ ss->maxBitrate = streams[i].max_bitrate_bps / 1000;
+ ss->targetBitrate = streams[i].target_bitrate_bps / 1000;
+ ss->minBitrate = streams[i].min_bitrate_bps / 1000;
+ ss->qpMax = streams[i].max_qp;
+ ss->active = true;
+ }
+}
+
+void ConfigureSvc(VideoCodec* codec_settings) {
+ RTC_CHECK_EQ(kVideoCodecVP9, codec_settings->codecType);
+
+ const std::vector<SpatialLayer> layers = GetSvcConfig(
+ codec_settings->width, codec_settings->height, kDefaultMaxFramerateFps,
+ /*first_active_layer=*/0, codec_settings->VP9()->numberOfSpatialLayers,
+ codec_settings->VP9()->numberOfTemporalLayers,
+ /* is_screen_sharing = */ false);
+ ASSERT_EQ(codec_settings->VP9()->numberOfSpatialLayers, layers.size())
+ << "GetSvcConfig returned fewer spatial layers than configured.";
+
+ for (size_t i = 0; i < layers.size(); ++i) {
+ codec_settings->spatialLayers[i] = layers[i];
+ }
+}
+
+std::string CodecSpecificToString(const VideoCodec& codec) {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ switch (codec.codecType) {
+ case kVideoCodecVP8:
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(codec.VP8().numberOfTemporalLayers);
+ ss << "\ndenoising: " << codec.VP8().denoisingOn;
+ ss << "\nautomatic_resize: " << codec.VP8().automaticResizeOn;
+ ss << "\nkey_frame_interval: " << codec.VP8().keyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(codec.VP9().numberOfTemporalLayers);
+ ss << "\nnum_spatial_layers: "
+ << static_cast<int>(codec.VP9().numberOfSpatialLayers);
+ ss << "\ndenoising: " << codec.VP9().denoisingOn;
+ ss << "\nkey_frame_interval: " << codec.VP9().keyFrameInterval;
+ ss << "\nadaptive_qp_mode: " << codec.VP9().adaptiveQpMode;
+ ss << "\nautomatic_resize: " << codec.VP9().automaticResizeOn;
+ ss << "\nflexible_mode: " << codec.VP9().flexibleMode;
+ break;
+ case kVideoCodecH264:
+ ss << "\nkey_frame_interval: " << codec.H264().keyFrameInterval;
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(codec.H264().numberOfTemporalLayers);
+ break;
+ default:
+ break;
+ }
+ return ss.str();
+}
+
+bool RunEncodeInRealTime(const VideoCodecTestFixtureImpl::Config& config) {
+ if (config.measure_cpu || config.encode_in_real_time) {
+ return true;
+ }
+ return false;
+}
+
+std::string FilenameWithParams(
+ const VideoCodecTestFixtureImpl::Config& config) {
+ return config.filename + "_" + config.CodecName() + "_" +
+ std::to_string(config.codec_settings.startBitrate);
+}
+
+SdpVideoFormat CreateSdpVideoFormat(
+ const VideoCodecTestFixtureImpl::Config& config) {
+ if (config.codec_settings.codecType == kVideoCodecH264) {
+ const char* packetization_mode =
+ config.h264_codec_settings.packetization_mode ==
+ H264PacketizationMode::NonInterleaved
+ ? "1"
+ : "0";
+ SdpVideoFormat::Parameters codec_params = {
+ {cricket::kH264FmtpProfileLevelId,
+ *H264ProfileLevelIdToString(H264ProfileLevelId(
+ config.h264_codec_settings.profile, H264Level::kLevel3_1))},
+ {cricket::kH264FmtpPacketizationMode, packetization_mode},
+ {cricket::kH264FmtpLevelAsymmetryAllowed, "1"}};
+
+ return SdpVideoFormat(config.codec_name, codec_params);
+ } else if (config.codec_settings.codecType == kVideoCodecVP9) {
+ return SdpVideoFormat(config.codec_name, {{"profile-id", "0"}});
+ }
+
+ return SdpVideoFormat(config.codec_name);
+}
+
+} // namespace
+
+VideoCodecTestFixtureImpl::Config::Config() = default;
+
+void VideoCodecTestFixtureImpl::Config::SetCodecSettings(
+ std::string codec_name,
+ size_t num_simulcast_streams,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool denoising_on,
+ bool frame_dropper_on,
+ bool spatial_resize_on,
+ size_t width,
+ size_t height) {
+ this->codec_name = codec_name;
+ VideoCodecType codec_type = PayloadStringToCodecType(codec_name);
+ webrtc::test::CodecSettings(codec_type, &codec_settings);
+
+ // TODO(brandtr): Move the setting of `width` and `height` to the tests, and
+ // DCHECK that they are set before initializing the codec instead.
+ codec_settings.width = static_cast<uint16_t>(width);
+ codec_settings.height = static_cast<uint16_t>(height);
+
+ RTC_CHECK(num_simulcast_streams >= 1 &&
+ num_simulcast_streams <= kMaxSimulcastStreams);
+ RTC_CHECK(num_spatial_layers >= 1 && num_spatial_layers <= kMaxSpatialLayers);
+ RTC_CHECK(num_temporal_layers >= 1 &&
+ num_temporal_layers <= kMaxTemporalStreams);
+
+ // Simulcast is only available with VP8.
+ RTC_CHECK(num_simulcast_streams < 2 || codec_type == kVideoCodecVP8);
+
+ // Spatial scalability is only available with VP9.
+ RTC_CHECK(num_spatial_layers < 2 || codec_type == kVideoCodecVP9);
+
+ // Some base code requires numberOfSimulcastStreams to be set to zero
+ // when simulcast is not used.
+ codec_settings.numberOfSimulcastStreams =
+ num_simulcast_streams <= 1 ? 0
+ : static_cast<uint8_t>(num_simulcast_streams);
+
+ codec_settings.SetFrameDropEnabled(frame_dropper_on);
+ switch (codec_settings.codecType) {
+ case kVideoCodecVP8:
+ codec_settings.VP8()->numberOfTemporalLayers =
+ static_cast<uint8_t>(num_temporal_layers);
+ codec_settings.VP8()->denoisingOn = denoising_on;
+ codec_settings.VP8()->automaticResizeOn = spatial_resize_on;
+ codec_settings.VP8()->keyFrameInterval = kBaseKeyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ codec_settings.VP9()->numberOfTemporalLayers =
+ static_cast<uint8_t>(num_temporal_layers);
+ codec_settings.VP9()->denoisingOn = denoising_on;
+ codec_settings.VP9()->keyFrameInterval = kBaseKeyFrameInterval;
+ codec_settings.VP9()->automaticResizeOn = spatial_resize_on;
+ codec_settings.VP9()->numberOfSpatialLayers =
+ static_cast<uint8_t>(num_spatial_layers);
+ break;
+ case kVideoCodecAV1:
+ codec_settings.qpMax = 63;
+ break;
+ case kVideoCodecH264:
+ codec_settings.H264()->keyFrameInterval = kBaseKeyFrameInterval;
+ codec_settings.H264()->numberOfTemporalLayers =
+ static_cast<uint8_t>(num_temporal_layers);
+ break;
+ default:
+ break;
+ }
+
+ if (codec_settings.numberOfSimulcastStreams > 1) {
+ ConfigureSimulcast(&codec_settings);
+ } else if (codec_settings.codecType == kVideoCodecVP9 &&
+ codec_settings.VP9()->numberOfSpatialLayers > 1) {
+ ConfigureSvc(&codec_settings);
+ }
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfCores() const {
+ return use_single_core ? 1 : CpuInfo::DetectNumberOfCores();
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfTemporalLayers() const {
+ if (codec_settings.codecType == kVideoCodecVP8) {
+ return codec_settings.VP8().numberOfTemporalLayers;
+ } else if (codec_settings.codecType == kVideoCodecVP9) {
+ return codec_settings.VP9().numberOfTemporalLayers;
+ } else if (codec_settings.codecType == kVideoCodecH264) {
+ return codec_settings.H264().numberOfTemporalLayers;
+ } else {
+ return 1;
+ }
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfSpatialLayers() const {
+ if (codec_settings.codecType == kVideoCodecVP9) {
+ return codec_settings.VP9().numberOfSpatialLayers;
+ } else {
+ return 1;
+ }
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfSimulcastStreams() const {
+ return codec_settings.numberOfSimulcastStreams;
+}
+
+std::string VideoCodecTestFixtureImpl::Config::ToString() const {
+ std::string codec_type = CodecTypeToPayloadString(codec_settings.codecType);
+ rtc::StringBuilder ss;
+ ss << "test_name: " << test_name;
+ ss << "\nfilename: " << filename;
+ ss << "\nnum_frames: " << num_frames;
+ ss << "\nmax_payload_size_bytes: " << max_payload_size_bytes;
+ ss << "\ndecode: " << decode;
+ ss << "\nuse_single_core: " << use_single_core;
+ ss << "\nmeasure_cpu: " << measure_cpu;
+ ss << "\nnum_cores: " << NumberOfCores();
+ ss << "\ncodec_type: " << codec_type;
+ ss << "\n\n--> codec_settings";
+ ss << "\nwidth: " << codec_settings.width;
+ ss << "\nheight: " << codec_settings.height;
+ ss << "\nmax_framerate_fps: " << codec_settings.maxFramerate;
+ ss << "\nstart_bitrate_kbps: " << codec_settings.startBitrate;
+ ss << "\nmax_bitrate_kbps: " << codec_settings.maxBitrate;
+ ss << "\nmin_bitrate_kbps: " << codec_settings.minBitrate;
+ ss << "\nmax_qp: " << codec_settings.qpMax;
+ ss << "\nnum_simulcast_streams: "
+ << static_cast<int>(codec_settings.numberOfSimulcastStreams);
+ ss << "\n\n--> codec_settings." << codec_type;
+ ss << "complexity: "
+ << static_cast<int>(codec_settings.GetVideoEncoderComplexity());
+ ss << "\nframe_dropping: " << codec_settings.GetFrameDropEnabled();
+ ss << "\n" << CodecSpecificToString(codec_settings);
+ if (codec_settings.numberOfSimulcastStreams > 1) {
+ for (int i = 0; i < codec_settings.numberOfSimulcastStreams; ++i) {
+ ss << "\n\n--> codec_settings.simulcastStream[" << i << "]";
+ const SimulcastStream& simulcast_stream =
+ codec_settings.simulcastStream[i];
+ ss << "\nwidth: " << simulcast_stream.width;
+ ss << "\nheight: " << simulcast_stream.height;
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(simulcast_stream.numberOfTemporalLayers);
+ ss << "\nmin_bitrate_kbps: " << simulcast_stream.minBitrate;
+ ss << "\ntarget_bitrate_kbps: " << simulcast_stream.targetBitrate;
+ ss << "\nmax_bitrate_kbps: " << simulcast_stream.maxBitrate;
+ ss << "\nmax_qp: " << simulcast_stream.qpMax;
+ ss << "\nactive: " << simulcast_stream.active;
+ }
+ }
+ ss << "\n";
+ return ss.Release();
+}
+
+std::string VideoCodecTestFixtureImpl::Config::CodecName() const {
+ std::string name = codec_name;
+ if (name.empty()) {
+ name = CodecTypeToPayloadString(codec_settings.codecType);
+ }
+ if (codec_settings.codecType == kVideoCodecH264) {
+ if (h264_codec_settings.profile == H264Profile::kProfileConstrainedHigh) {
+ return name + "-CHP";
+ } else {
+ RTC_DCHECK_EQ(h264_codec_settings.profile,
+ H264Profile::kProfileConstrainedBaseline);
+ return name + "-CBP";
+ }
+ }
+ return name;
+}
+
+// TODO(kthelgason): Move this out of the test fixture impl and
+// make available as a shared utility class.
+void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
+ webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const {
+ EXPECT_EQ(kVideoCodecH264, codec);
+ bool contains_sps = false;
+ bool contains_pps = false;
+ bool contains_idr = false;
+ const std::vector<webrtc::H264::NaluIndex> nalu_indices =
+ webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
+ for (const webrtc::H264::NaluIndex& index : nalu_indices) {
+ webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
+ encoded_frame.data()[index.payload_start_offset]);
+ if (nalu_type == webrtc::H264::NaluType::kSps) {
+ contains_sps = true;
+ } else if (nalu_type == webrtc::H264::NaluType::kPps) {
+ contains_pps = true;
+ } else if (nalu_type == webrtc::H264::NaluType::kIdr) {
+ contains_idr = true;
+ }
+ }
+ if (encoded_frame._frameType == VideoFrameType::kVideoFrameKey) {
+ EXPECT_TRUE(contains_sps) << "Keyframe should contain SPS.";
+ EXPECT_TRUE(contains_pps) << "Keyframe should contain PPS.";
+ EXPECT_TRUE(contains_idr) << "Keyframe should contain IDR.";
+ } else if (encoded_frame._frameType == VideoFrameType::kVideoFrameDelta) {
+ EXPECT_FALSE(contains_sps) << "Delta frame should not contain SPS.";
+ EXPECT_FALSE(contains_pps) << "Delta frame should not contain PPS.";
+ EXPECT_FALSE(contains_idr) << "Delta frame should not contain IDR.";
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+class VideoCodecTestFixtureImpl::CpuProcessTime final {
+ public:
+ explicit CpuProcessTime(const Config& config) : config_(config) {}
+ ~CpuProcessTime() {}
+
+ void Start() {
+ if (config_.measure_cpu) {
+ cpu_time_ -= rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ -= rtc::SystemTimeNanos();
+ }
+ }
+ void Stop() {
+ if (config_.measure_cpu) {
+ cpu_time_ += rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ += rtc::SystemTimeNanos();
+ }
+ }
+ void Print() const {
+ if (config_.measure_cpu) {
+ RTC_LOG(LS_INFO) << "cpu_usage_percent: "
+ << GetUsagePercent() / config_.NumberOfCores();
+ }
+ }
+
+ private:
+ double GetUsagePercent() const {
+ return static_cast<double>(cpu_time_) / wallclock_time_ * 100.0;
+ }
+
+ const Config config_;
+ int64_t cpu_time_ = 0;
+ int64_t wallclock_time_ = 0;
+};
+
+VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(Config config)
+ : encoder_factory_(std::make_unique<webrtc::VideoEncoderFactoryTemplate<
+ webrtc::LibvpxVp8EncoderTemplateAdapter,
+ webrtc::LibvpxVp9EncoderTemplateAdapter,
+ webrtc::OpenH264EncoderTemplateAdapter,
+ webrtc::LibaomAv1EncoderTemplateAdapter>>()),
+ decoder_factory_(std::make_unique<webrtc::VideoDecoderFactoryTemplate<
+ webrtc::LibvpxVp8DecoderTemplateAdapter,
+ webrtc::LibvpxVp9DecoderTemplateAdapter,
+ webrtc::OpenH264DecoderTemplateAdapter,
+ webrtc::Dav1dDecoderTemplateAdapter>>()),
+ config_(config) {}
+
+VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(
+ Config config,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ std::unique_ptr<VideoEncoderFactory> encoder_factory)
+ : encoder_factory_(std::move(encoder_factory)),
+ decoder_factory_(std::move(decoder_factory)),
+ config_(config) {}
+
+VideoCodecTestFixtureImpl::~VideoCodecTestFixtureImpl() = default;
+
+// Processes all frames in the clip and verifies the result.
+void VideoCodecTestFixtureImpl::RunTest(
+ const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds) {
+ RTC_DCHECK(!rate_profiles.empty());
+
+ // To emulate operation on a production VideoStreamEncoder, we call the
+ // codecs on a task queue.
+ TaskQueueForTest task_queue("VidProc TQ");
+
+ bool is_setup_succeeded = SetUpAndInitObjects(
+ &task_queue, rate_profiles[0].target_kbps, rate_profiles[0].input_fps);
+ EXPECT_TRUE(is_setup_succeeded);
+ if (!is_setup_succeeded) {
+ ReleaseAndCloseObjects(&task_queue);
+ return;
+ }
+
+ PrintSettings(&task_queue);
+ ProcessAllFrames(&task_queue, rate_profiles);
+ ReleaseAndCloseObjects(&task_queue);
+
+ AnalyzeAllFrames(rate_profiles, rc_thresholds, quality_thresholds,
+ bs_thresholds);
+}
+
+void VideoCodecTestFixtureImpl::ProcessAllFrames(
+ TaskQueueForTest* task_queue,
+ const std::vector<RateProfile>& rate_profiles) {
+ // Set initial rates.
+ auto rate_profile = rate_profiles.begin();
+ task_queue->PostTask([this, rate_profile] {
+ processor_->SetRates(rate_profile->target_kbps, rate_profile->input_fps);
+ });
+
+ cpu_process_time_->Start();
+
+ for (size_t frame_num = 0; frame_num < config_.num_frames; ++frame_num) {
+ auto next_rate_profile = std::next(rate_profile);
+ if (next_rate_profile != rate_profiles.end() &&
+ frame_num == next_rate_profile->frame_num) {
+ rate_profile = next_rate_profile;
+ task_queue->PostTask([this, rate_profile] {
+ processor_->SetRates(rate_profile->target_kbps,
+ rate_profile->input_fps);
+ });
+ }
+
+ task_queue->PostTask([this] { processor_->ProcessFrame(); });
+
+ if (RunEncodeInRealTime(config_)) {
+ // Roughly pace the frames.
+ const int frame_duration_ms =
+ std::ceil(rtc::kNumMillisecsPerSec / rate_profile->input_fps);
+ SleepMs(frame_duration_ms);
+ }
+ }
+
+ task_queue->PostTask([this] { processor_->Finalize(); });
+
+ // Wait until we know that the last frame has been sent for encode.
+ task_queue->SendTask([] {});
+
+ // Give the VideoProcessor pipeline some time to process the last frame,
+ // and then release the codecs.
+ SleepMs(1 * rtc::kNumMillisecsPerSec);
+ cpu_process_time_->Stop();
+}
+
+void VideoCodecTestFixtureImpl::AnalyzeAllFrames(
+ const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds) {
+
+ for (size_t rate_profile_idx = 0; rate_profile_idx < rate_profiles.size();
+ ++rate_profile_idx) {
+ const size_t first_frame_num = rate_profiles[rate_profile_idx].frame_num;
+ const size_t last_frame_num =
+ rate_profile_idx + 1 < rate_profiles.size()
+ ? rate_profiles[rate_profile_idx + 1].frame_num - 1
+ : config_.num_frames - 1;
+ RTC_CHECK(last_frame_num >= first_frame_num);
+
+ VideoStatistics send_stat = stats_.SliceAndCalcAggregatedVideoStatistic(
+ first_frame_num, last_frame_num);
+ RTC_LOG(LS_INFO) << "==> Send stats";
+ RTC_LOG(LS_INFO) << send_stat.ToString("send_") << "\n";
+
+ std::vector<VideoStatistics> layer_stats =
+ stats_.SliceAndCalcLayerVideoStatistic(first_frame_num, last_frame_num);
+ RTC_LOG(LS_INFO) << "==> Receive stats";
+ for (const auto& layer_stat : layer_stats) {
+ RTC_LOG(LS_INFO) << layer_stat.ToString("recv_") << "\n";
+
+ // For perf dashboard.
+ char modifier_buf[256];
+ rtc::SimpleStringBuilder modifier(modifier_buf);
+ modifier << "_r" << rate_profile_idx << "_sl" << layer_stat.spatial_idx;
+
+ auto PrintResultHelper = [&modifier, this](const std::string& measurement,
+ double value,
+ const std::string& units) {
+ PrintResult(measurement, modifier.str(), config_.test_name, value,
+ units, /*important=*/false);
+ };
+
+ if (layer_stat.temporal_idx == config_.NumberOfTemporalLayers() - 1) {
+ PrintResultHelper("enc_speed", layer_stat.enc_speed_fps, "fps");
+ PrintResultHelper("avg_key_frame_size",
+ layer_stat.avg_key_frame_size_bytes, "bytes");
+ PrintResultHelper("num_key_frames", layer_stat.num_key_frames,
+ "frames");
+ printf("\n");
+ }
+
+ modifier << "tl" << layer_stat.temporal_idx;
+ PrintResultHelper("dec_speed", layer_stat.dec_speed_fps, "fps");
+ PrintResultHelper("avg_delta_frame_size",
+ layer_stat.avg_delta_frame_size_bytes, "bytes");
+ PrintResultHelper("bitrate", layer_stat.bitrate_kbps, "kbps");
+ PrintResultHelper("framerate", layer_stat.framerate_fps, "fps");
+ PrintResultHelper("avg_psnr_y", layer_stat.avg_psnr_y, "dB");
+ PrintResultHelper("avg_psnr_u", layer_stat.avg_psnr_u, "dB");
+ PrintResultHelper("avg_psnr_v", layer_stat.avg_psnr_v, "dB");
+ PrintResultHelper("min_psnr_yuv", layer_stat.min_psnr, "dB");
+ PrintResultHelper("avg_qp", layer_stat.avg_qp, "");
+ printf("\n");
+ if (layer_stat.temporal_idx == config_.NumberOfTemporalLayers() - 1) {
+ printf("\n");
+ }
+ }
+
+ const RateControlThresholds* rc_threshold =
+ rc_thresholds ? &(*rc_thresholds)[rate_profile_idx] : nullptr;
+ const QualityThresholds* quality_threshold =
+ quality_thresholds ? &(*quality_thresholds)[rate_profile_idx] : nullptr;
+
+ VerifyVideoStatistic(send_stat, rc_threshold, quality_threshold,
+ bs_thresholds,
+ rate_profiles[rate_profile_idx].target_kbps,
+ rate_profiles[rate_profile_idx].input_fps);
+ }
+
+ if (config_.print_frame_level_stats) {
+ RTC_LOG(LS_INFO) << "==> Frame stats";
+ std::vector<VideoCodecTestStats::FrameStatistics> frame_stats =
+ stats_.GetFrameStatistics();
+ for (const auto& frame_stat : frame_stats) {
+ RTC_LOG(LS_INFO) << frame_stat.ToString();
+ }
+ }
+
+ cpu_process_time_->Print();
+}
+
+void VideoCodecTestFixtureImpl::VerifyVideoStatistic(
+ const VideoStatistics& video_stat,
+ const RateControlThresholds* rc_thresholds,
+ const QualityThresholds* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds,
+ size_t target_bitrate_kbps,
+ double input_framerate_fps) {
+ if (rc_thresholds) {
+ const float bitrate_mismatch_percent =
+ 100 * std::fabs(1.0f * video_stat.bitrate_kbps - target_bitrate_kbps) /
+ target_bitrate_kbps;
+ const float framerate_mismatch_percent =
+ 100 * std::fabs(video_stat.framerate_fps - input_framerate_fps) /
+ input_framerate_fps;
+ EXPECT_LE(bitrate_mismatch_percent,
+ rc_thresholds->max_avg_bitrate_mismatch_percent);
+ EXPECT_LE(video_stat.time_to_reach_target_bitrate_sec,
+ rc_thresholds->max_time_to_reach_target_bitrate_sec);
+ EXPECT_LE(framerate_mismatch_percent,
+ rc_thresholds->max_avg_framerate_mismatch_percent);
+ EXPECT_LE(video_stat.avg_delay_sec,
+ rc_thresholds->max_avg_buffer_level_sec);
+ EXPECT_LE(video_stat.max_key_frame_delay_sec,
+ rc_thresholds->max_max_key_frame_delay_sec);
+ EXPECT_LE(video_stat.max_delta_frame_delay_sec,
+ rc_thresholds->max_max_delta_frame_delay_sec);
+ EXPECT_LE(video_stat.num_spatial_resizes,
+ rc_thresholds->max_num_spatial_resizes);
+ EXPECT_LE(video_stat.num_key_frames, rc_thresholds->max_num_key_frames);
+ }
+
+ if (quality_thresholds) {
+ EXPECT_GT(video_stat.avg_psnr, quality_thresholds->min_avg_psnr);
+ EXPECT_GT(video_stat.min_psnr, quality_thresholds->min_min_psnr);
+
+ // SSIM calculation is not optimized and thus it is disabled in real-time
+ // mode.
+ if (!config_.encode_in_real_time) {
+ EXPECT_GT(video_stat.avg_ssim, quality_thresholds->min_avg_ssim);
+ EXPECT_GT(video_stat.min_ssim, quality_thresholds->min_min_ssim);
+ }
+ }
+
+ if (bs_thresholds) {
+ EXPECT_LE(video_stat.max_nalu_size_bytes,
+ bs_thresholds->max_max_nalu_size_bytes);
+ }
+}
+
+bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() {
+ SdpVideoFormat encoder_format(CreateSdpVideoFormat(config_));
+ SdpVideoFormat decoder_format = encoder_format;
+
+ // Override encoder and decoder formats with explicitly provided ones.
+ if (config_.encoder_format) {
+ RTC_DCHECK_EQ(config_.encoder_format->name, config_.codec_name);
+ encoder_format = *config_.encoder_format;
+ }
+
+ if (config_.decoder_format) {
+ RTC_DCHECK_EQ(config_.decoder_format->name, config_.codec_name);
+ decoder_format = *config_.decoder_format;
+ }
+
+ encoder_ = encoder_factory_->CreateVideoEncoder(encoder_format);
+ EXPECT_TRUE(encoder_) << "Encoder not successfully created.";
+ if (encoder_ == nullptr) {
+ return false;
+ }
+
+ const size_t num_simulcast_or_spatial_layers = std::max(
+ config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(decoder_format);
+ EXPECT_TRUE(decoder) << "Decoder not successfully created.";
+ if (decoder == nullptr) {
+ return false;
+ }
+ decoders_.push_back(std::move(decoder));
+ }
+
+ return true;
+}
+
+void VideoCodecTestFixtureImpl::DestroyEncoderAndDecoder() {
+ decoders_.clear();
+ encoder_.reset();
+}
+
+VideoCodecTestStats& VideoCodecTestFixtureImpl::GetStats() {
+ return stats_;
+}
+
+bool VideoCodecTestFixtureImpl::SetUpAndInitObjects(
+ TaskQueueForTest* task_queue,
+ size_t initial_bitrate_kbps,
+ double initial_framerate_fps) {
+ config_.codec_settings.minBitrate = 0;
+ config_.codec_settings.startBitrate = static_cast<int>(initial_bitrate_kbps);
+ config_.codec_settings.maxFramerate = std::ceil(initial_framerate_fps);
+
+ int clip_width = config_.clip_width.value_or(config_.codec_settings.width);
+ int clip_height = config_.clip_height.value_or(config_.codec_settings.height);
+
+ // Create file objects for quality analysis.
+ source_frame_reader_.reset(new YuvFrameReaderImpl(
+ config_.filepath, clip_width, clip_height,
+ config_.reference_width.value_or(clip_width),
+ config_.reference_height.value_or(clip_height),
+ YuvFrameReaderImpl::RepeatMode::kPingPong, config_.clip_fps,
+ config_.codec_settings.maxFramerate));
+ EXPECT_TRUE(source_frame_reader_->Init());
+
+ RTC_DCHECK(encoded_frame_writers_.empty());
+ RTC_DCHECK(decoded_frame_writers_.empty());
+
+ stats_.Clear();
+
+ cpu_process_time_.reset(new CpuProcessTime(config_));
+
+ bool is_codec_created = false;
+ task_queue->SendTask([this, &is_codec_created]() {
+ is_codec_created = CreateEncoderAndDecoder();
+ });
+
+ if (!is_codec_created) {
+ return false;
+ }
+
+ if (config_.visualization_params.save_encoded_ivf ||
+ config_.visualization_params.save_decoded_y4m) {
+ std::string encoder_name = GetCodecName(task_queue, /*is_encoder=*/true);
+ encoder_name = absl::StrReplaceAll(encoder_name, {{":", ""}, {" ", "-"}});
+
+ const size_t num_simulcast_or_spatial_layers = std::max(
+ config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
+ const size_t num_temporal_layers = config_.NumberOfTemporalLayers();
+ for (size_t simulcast_svc_idx = 0;
+ simulcast_svc_idx < num_simulcast_or_spatial_layers;
+ ++simulcast_svc_idx) {
+ const std::string output_filename_base =
+ JoinFilename(config_.output_path,
+ FilenameWithParams(config_) + "_" + encoder_name +
+ "_sl" + std::to_string(simulcast_svc_idx));
+
+ if (config_.visualization_params.save_encoded_ivf) {
+ for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers;
+ ++temporal_idx) {
+ const std::string output_file_path = output_filename_base + "tl" +
+ std::to_string(temporal_idx) +
+ ".ivf";
+ FileWrapper ivf_file = FileWrapper::OpenWriteOnly(output_file_path);
+
+ const VideoProcessor::LayerKey layer_key(simulcast_svc_idx,
+ temporal_idx);
+ encoded_frame_writers_[layer_key] =
+ IvfFileWriter::Wrap(std::move(ivf_file), /*byte_limit=*/0);
+ }
+ }
+
+ if (config_.visualization_params.save_decoded_y4m) {
+ FrameWriter* decoded_frame_writer = new Y4mFrameWriterImpl(
+ output_filename_base + ".y4m", config_.codec_settings.width,
+ config_.codec_settings.height, config_.codec_settings.maxFramerate);
+ EXPECT_TRUE(decoded_frame_writer->Init());
+ decoded_frame_writers_.push_back(
+ std::unique_ptr<FrameWriter>(decoded_frame_writer));
+ }
+ }
+ }
+
+ task_queue->SendTask(
+ [this]() {
+ processor_ = std::make_unique<VideoProcessor>(
+ encoder_.get(), &decoders_, source_frame_reader_.get(), config_,
+ &stats_, &encoded_frame_writers_,
+ decoded_frame_writers_.empty() ? nullptr : &decoded_frame_writers_);
+ });
+ return true;
+}
+
+void VideoCodecTestFixtureImpl::ReleaseAndCloseObjects(
+ TaskQueueForTest* task_queue) {
+ task_queue->SendTask([this]() {
+ processor_.reset();
+ // The VideoProcessor must be destroyed before the codecs.
+ DestroyEncoderAndDecoder();
+ });
+
+ source_frame_reader_->Close();
+
+ // Close visualization files.
+ for (auto& encoded_frame_writer : encoded_frame_writers_) {
+ EXPECT_TRUE(encoded_frame_writer.second->Close());
+ }
+ encoded_frame_writers_.clear();
+ for (auto& decoded_frame_writer : decoded_frame_writers_) {
+ decoded_frame_writer->Close();
+ }
+ decoded_frame_writers_.clear();
+}
+
+std::string VideoCodecTestFixtureImpl::GetCodecName(
+ TaskQueueForTest* task_queue,
+ bool is_encoder) const {
+ std::string codec_name;
+ task_queue->SendTask([this, is_encoder, &codec_name] {
+ if (is_encoder) {
+ codec_name = encoder_->GetEncoderInfo().implementation_name;
+ } else {
+ codec_name = decoders_.at(0)->ImplementationName();
+ }
+ });
+ return codec_name;
+}
+
+void VideoCodecTestFixtureImpl::PrintSettings(
+ TaskQueueForTest* task_queue) const {
+ RTC_LOG(LS_INFO) << "==> Config";
+ RTC_LOG(LS_INFO) << config_.ToString();
+
+ RTC_LOG(LS_INFO) << "==> Codec names";
+ RTC_LOG(LS_INFO) << "enc_impl_name: "
+ << GetCodecName(task_queue, /*is_encoder=*/true);
+ RTC_LOG(LS_INFO) << "dec_impl_name: "
+ << GetCodecName(task_queue, /*is_encoder=*/false);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h
new file mode 100644
index 0000000000..005b7c0a8e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "modules/video_coding/codecs/test/videoprocessor.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/testsupport/frame_reader.h"
+#include "test/testsupport/frame_writer.h"
+
+namespace webrtc {
+namespace test {
+
+// Integration test for video processor. It does rate control and frame quality
+// analysis using frame statistics collected by video processor and logs the
+// results. If thresholds are specified it checks that corresponding metrics
+// are in desirable range.
+class VideoCodecTestFixtureImpl : public VideoCodecTestFixture {
+ // Verifies that all H.264 keyframes contain SPS/PPS/IDR NALUs.
+ public:
+ class H264KeyframeChecker : public EncodedFrameChecker {
+ public:
+ void CheckEncodedFrame(webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const override;
+ };
+
+ explicit VideoCodecTestFixtureImpl(Config config);
+ VideoCodecTestFixtureImpl(
+ Config config,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ std::unique_ptr<VideoEncoderFactory> encoder_factory);
+ ~VideoCodecTestFixtureImpl() override;
+
+ void RunTest(const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds) override;
+
+ VideoCodecTestStats& GetStats() override;
+
+ private:
+ class CpuProcessTime;
+
+ bool CreateEncoderAndDecoder();
+ void DestroyEncoderAndDecoder();
+ bool SetUpAndInitObjects(TaskQueueForTest* task_queue,
+ size_t initial_bitrate_kbps,
+ double initial_framerate_fps);
+ void ReleaseAndCloseObjects(TaskQueueForTest* task_queue);
+
+ void ProcessAllFrames(TaskQueueForTest* task_queue,
+ const std::vector<RateProfile>& rate_profiles);
+ void AnalyzeAllFrames(
+ const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds);
+
+ void VerifyVideoStatistic(
+ const VideoCodecTestStats::VideoStatistics& video_stat,
+ const RateControlThresholds* rc_thresholds,
+ const QualityThresholds* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds,
+ size_t target_bitrate_kbps,
+ double input_framerate_fps);
+
+ std::string GetCodecName(TaskQueueForTest* task_queue, bool is_encoder) const;
+ void PrintSettings(TaskQueueForTest* task_queue) const;
+
+ // Codecs.
+ const std::unique_ptr<VideoEncoderFactory> encoder_factory_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ const std::unique_ptr<VideoDecoderFactory> decoder_factory_;
+ VideoProcessor::VideoDecoderList decoders_;
+
+ // Helper objects.
+ Config config_;
+ VideoCodecTestStatsImpl stats_;
+ std::unique_ptr<FrameReader> source_frame_reader_;
+ VideoProcessor::IvfFileWriterMap encoded_frame_writers_;
+ VideoProcessor::FrameWriterList decoded_frame_writers_;
+ std::unique_ptr<VideoProcessor> processor_;
+ std::unique_ptr<CpuProcessTime> cpu_process_time_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
new file mode 100644
index 0000000000..062375bd60
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+using VideoStatistics = VideoCodecTestStats::VideoStatistics;
+
+namespace {
+// Codec settings.
+const int kCifWidth = 352;
+const int kCifHeight = 288;
+const int kNumFramesShort = 100;
+const int kNumFramesLong = 300;
+const size_t kBitrateRdPerfKbps[] = {100, 200, 300, 400, 500, 600,
+ 700, 800, 1000, 1250, 1400, 1600,
+ 1800, 2000, 2200, 2500};
+const size_t kNumFirstFramesToSkipAtRdPerfAnalysis = 60;
+
+class QpFrameChecker : public VideoCodecTestFixture::EncodedFrameChecker {
+ public:
+ void CheckEncodedFrame(webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const override {
+ int qp;
+ if (codec == kVideoCodecVP8) {
+ EXPECT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ } else if (codec == kVideoCodecVP9) {
+ EXPECT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
+ }
+};
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFramesLong;
+ config.use_single_core = true;
+ return config;
+}
+
+void PrintRdPerf(std::map<size_t, std::vector<VideoStatistics>> rd_stats) {
+ printf("--> Summary\n");
+ printf("%11s %5s %6s %11s %12s %11s %13s %13s %5s %7s %7s %7s %13s %13s\n",
+ "uplink_kbps", "width", "height", "spatial_idx", "temporal_idx",
+ "target_kbps", "downlink_kbps", "framerate_fps", "psnr", "psnr_y",
+ "psnr_u", "psnr_v", "enc_speed_fps", "dec_speed_fps");
+ for (const auto& rd_stat : rd_stats) {
+ const size_t bitrate_kbps = rd_stat.first;
+ for (const auto& layer_stat : rd_stat.second) {
+ printf(
+ "%11zu %5zu %6zu %11zu %12zu %11zu %13zu %13.2f %5.2f %7.2f %7.2f "
+ "%7.2f"
+ "%13.2f %13.2f\n",
+ bitrate_kbps, layer_stat.width, layer_stat.height,
+ layer_stat.spatial_idx, layer_stat.temporal_idx,
+ layer_stat.target_bitrate_kbps, layer_stat.bitrate_kbps,
+ layer_stat.framerate_fps, layer_stat.avg_psnr, layer_stat.avg_psnr_y,
+ layer_stat.avg_psnr_u, layer_stat.avg_psnr_v,
+ layer_stat.enc_speed_fps, layer_stat.dec_speed_fps);
+ }
+ }
+}
+} // namespace
+
+#if defined(RTC_ENABLE_VP9)
+TEST(VideoCodecTestLibvpx, HighBitrateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ config.num_frames = kNumFramesShort;
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 36, 0.94, 0.92}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, ChangeBitrateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {200, 30, 0}, // target_kbps, input_fps, frame_num
+ {700, 30, 100},
+ {500, 30, 200}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 2, 0, 1, 0.5, 0.1, 0, 1},
+ {15, 3, 0, 1, 0.5, 0.1, 0, 0},
+ {11, 2, 0, 1, 0.5, 0.1, 0, 0}};
+
+ std::vector<QualityThresholds> quality_thresholds = {
+ {34, 33, 0.90, 0.88}, {38, 35, 0.95, 0.91}, {35, 34, 0.93, 0.90}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, ChangeFramerateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {100, 24, 0}, // target_kbps, input_fps, frame_num
+ {100, 15, 100},
+ {100, 10, 200}};
+
+ // Framerate mismatch should be lower for lower framerate.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 2, 40, 1, 0.5, 0.2, 0, 1},
+ {8, 2, 5, 1, 0.5, 0.2, 0, 0},
+ {5, 2, 0, 1, 0.5, 0.3, 0, 0}};
+
+ // Quality should be higher for lower framerates for the same content.
+ std::vector<QualityThresholds> quality_thresholds = {
+ {33, 32, 0.88, 0.86}, {33.5, 32, 0.90, 0.86}, {33.5, 31.5, 0.90, 0.85}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, DenoiserOnVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ config.num_frames = kNumFramesShort;
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37.5, 36, 0.94, 0.93}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, VeryLowBitrateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, true,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{50, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {15, 3, 75, 1, 0.5, 0.4, 2, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{28, 25, 0.80, 0.65}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+// TODO(marpan): Add temporal layer test for VP9, once changes are in
+// vp9 wrapper for this.
+
+#endif // defined(RTC_ENABLE_VP9)
+
+TEST(VideoCodecTestLibvpx, HighBitrateVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ config.num_frames = kNumFramesShort;
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.2, 0.1, 0, 1}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {{35, 33, 0.91, 0.89}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {200, 30, 0}, // target_kbps, input_fps, frame_num
+ {800, 30, 100},
+ {500, 30, 200}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.2, 0.1, 0, 1},
+ {15.5, 1, 0, 1, 0.2, 0.1, 0, 0},
+ {15, 1, 0, 1, 0.2, 0.1, 0, 0}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {
+ {31.8, 31, 0.86, 0.85}, {36, 34.8, 0.92, 0.90}, {33.5, 32, 0.90, 0.88}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {
+ {33, 32, 0.89, 0.88}, {38, 36, 0.94, 0.93}, {35, 34, 0.92, 0.91}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {80, 24, 0}, // target_kbps, input_fps, frame_index_rate_update
+ {80, 15, 100},
+ {80, 10, 200}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 2.42, 60, 1, 0.3, 0.3, 0, 1},
+ {10, 2, 30, 1, 0.3, 0.3, 0, 0},
+ {10, 2, 10, 1, 0.3, 0.2, 0, 0}};
+#else
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 2, 20, 1, 0.3, 0.15, 0, 1},
+ {5, 2, 5, 1, 0.3, 0.15, 0, 0},
+ {4, 2, 1, 1, 0.3, 0.2, 0, 0}};
+#endif
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {
+ {31, 30, 0.85, 0.84}, {31.4, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {
+ {31, 30, 0.87, 0.85}, {32, 31, 0.88, 0.85}, {32, 30, 0.87, 0.82}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_TemporalLayersVP8 DISABLED_TemporalLayersVP8
+#else
+#define MAYBE_TemporalLayersVP8 TemporalLayersVP8
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_TemporalLayersVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 3, true, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{200, 30, 0}, {400, 30, 150}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 1, 2.1, 1, 0.2, 0.1, 0, 1}, {12, 2, 3, 1, 0.2, 0.1, 0, 1}};
+#else
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.2, 0.1, 0, 1}, {10, 2, 0, 1, 0.2, 0.1, 0, 1}};
+#endif
+// Min SSIM drops because of high motion scene with complex backgound (trees).
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {{31, 30, 0.85, 0.83},
+ {31, 28, 0.85, 0.75}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {{32, 30, 0.88, 0.85},
+ {33, 30, 0.89, 0.83}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_MultiresVP8 DISABLED_MultiresVP8
+#else
+#define MAYBE_MultiresVP8 MultiresVP8
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_MultiresVP8) {
+ auto config = CreateConfig();
+ config.filename = "ConferenceMotion_1280_720_50";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 100;
+ config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {4.1, 1.04, 7, 0.18, 0.14, 0.08, 0, 1}};
+#else
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 5, 1, 0.3, 0.1, 0, 1}};
+#endif
+ std::vector<QualityThresholds> quality_thresholds = {{34, 32, 0.90, 0.88}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SimulcastVP8 DISABLED_SimulcastVP8
+#else
+#define MAYBE_SimulcastVP8 SimulcastVP8
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_SimulcastVP8) {
+ auto config = CreateConfig();
+ config.filename = "ConferenceMotion_1280_720_50";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 100;
+ config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+
+ InternalEncoderFactory internal_encoder_factory;
+ std::unique_ptr<VideoEncoderFactory> adapted_encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>([&]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat(cricket::kVp8CodecName));
+ });
+ std::unique_ptr<InternalDecoderFactory> internal_decoder_factory(
+ new InternalDecoderFactory());
+
+ auto fixture =
+ CreateVideoCodecTestFixture(config, std::move(internal_decoder_factory),
+ std::move(adapted_encoder_factory));
+
+ std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {20, 5, 90, 1, 0.5, 0.3, 0, 1}};
+ std::vector<QualityThresholds> quality_thresholds = {{34, 32, 0.90, 0.88}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SvcVP9 DISABLED_SvcVP9
+#else
+#define MAYBE_SvcVP9 SvcVP9
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_SvcVP9) {
+ auto config = CreateConfig();
+ config.filename = "ConferenceMotion_1280_720_50";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 100;
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 3, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 5, 1, 0.3, 0.1, 0, 1}};
+ std::vector<QualityThresholds> quality_thresholds = {{36, 34, 0.93, 0.90}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, DISABLED_MultiresVP8RdPerf) {
+ auto config = CreateConfig();
+ config.filename = "FourPeople_1280x720_30";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 300;
+ config.print_frame_level_stats = true;
+ config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::map<size_t, std::vector<VideoStatistics>> rd_stats;
+ for (size_t bitrate_kbps : kBitrateRdPerfKbps) {
+ std::vector<RateProfile> rate_profiles = {{bitrate_kbps, 30, 0}};
+
+ fixture->RunTest(rate_profiles, nullptr, nullptr, nullptr);
+
+ rd_stats[bitrate_kbps] =
+ fixture->GetStats().SliceAndCalcLayerVideoStatistic(
+ kNumFirstFramesToSkipAtRdPerfAnalysis, config.num_frames - 1);
+ }
+
+ PrintRdPerf(rd_stats);
+}
+
+TEST(VideoCodecTestLibvpx, DISABLED_SvcVP9RdPerf) {
+ auto config = CreateConfig();
+ config.filename = "FourPeople_1280x720_30";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 300;
+ config.print_frame_level_stats = true;
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 3, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::map<size_t, std::vector<VideoStatistics>> rd_stats;
+ for (size_t bitrate_kbps : kBitrateRdPerfKbps) {
+ std::vector<RateProfile> rate_profiles = {{bitrate_kbps, 30, 0}};
+
+ fixture->RunTest(rate_profiles, nullptr, nullptr, nullptr);
+
+ rd_stats[bitrate_kbps] =
+ fixture->GetStats().SliceAndCalcLayerVideoStatistic(
+ kNumFirstFramesToSkipAtRdPerfAnalysis, config.num_frames - 1);
+ }
+
+ PrintRdPerf(rd_stats);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
new file mode 100644
index 0000000000..8a1cf01a66
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kForemanNumFrames = 300;
+const int kForemanFramerateFps = 30;
+
+const size_t kConstRateIntervalSec = 10;
+const std::vector<webrtc::test::RateProfile> kBitRateHighLowHigh = {
+ {/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/0},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/300},
+ {/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/600},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/900},
+ {/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/1200}};
+
+const std::vector<webrtc::test::RateProfile> kBitRateLowHighLow = {
+ {/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/0},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/300},
+ {/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/600},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/900},
+ {/*target_kbps=*/720, /*input_fps=*/30, /*frame_num=*/1200}};
+
+const std::vector<webrtc::test::RateProfile> kFrameRateHighLowHigh = {
+ {/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/0},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/300},
+ {/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/450},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/525},
+ {/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/675}};
+
+const std::vector<webrtc::test::RateProfile> kFrameRateLowHighLow = {
+ {/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/0},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/75},
+ {/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/225},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/525},
+ {/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/775}};
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kForemanNumFrames;
+ // In order to not overwhelm the OpenMAX buffers in the Android MediaCodec.
+ config.encode_in_real_time = true;
+ return config;
+}
+
+std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
+ VideoCodecTestFixture::Config config) {
+ InitializeAndroidObjects(); // Idempotent.
+ auto encoder_factory = CreateAndroidEncoderFactory();
+ auto decoder_factory = CreateAndroidDecoderFactory();
+ return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
+ std::move(encoder_factory));
+}
+} // namespace
+
+TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsVp8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
+
+ // The thresholds below may have to be tweaked to let even poor MediaCodec
+ // implementations pass. If this test fails on the bots, disable it and
+ // ping brandtr@.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 1, 1, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{36, 31, 0.92, 0.86}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsH264CBP) {
+ auto config = CreateConfig();
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
+
+ // The thresholds below may have to be tweaked to let even poor MediaCodec
+ // implementations pass. If this test fails on the bots, disable it and
+ // ping brandtr@.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 1, 1, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{36, 31, 0.92, 0.86}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+// TODO(brandtr): Enable this test when we have trybots/buildbots with
+// HW encoders that support CHP.
+TEST(VideoCodecTestMediaCodec, DISABLED_ForemanCif500kbpsH264CHP) {
+ auto config = CreateConfig();
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+
+ config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
+ config.encoded_frame_checker = frame_checker.get();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
+
+ // The thresholds below may have to be tweaked to let even poor MediaCodec
+ // implementations pass. If this test fails on the bots, disable it and
+ // ping brandtr@.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestMediaCodec, ForemanMixedRes100kbpsVp8H264) {
+ auto config = CreateConfig();
+ const int kNumFrames = 30;
+ const std::vector<std::string> codecs = {cricket::kVp8CodecName,
+ cricket::kH264CodecName};
+ const std::vector<std::tuple<int, int>> resolutions = {
+ {128, 96}, {176, 144}, {320, 240}, {480, 272}};
+ const std::vector<RateProfile> rate_profiles = {
+ {100, kForemanFramerateFps, 0}};
+ const std::vector<QualityThresholds> quality_thresholds = {
+ {29, 26, 0.8, 0.75}};
+
+ for (const auto& codec : codecs) {
+ for (const auto& resolution : resolutions) {
+ const int width = std::get<0>(resolution);
+ const int height = std::get<1>(resolution);
+ config.filename = std::string("foreman_") + std::to_string(width) + "x" +
+ std::to_string(height);
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFrames;
+ config.SetCodecSettings(codec, 1, 1, 1, false, false, false, width,
+ height);
+
+ auto fixture = CreateTestFixtureWithConfig(config);
+ fixture->RunTest(rate_profiles, nullptr /* rc_thresholds */,
+ &quality_thresholds, nullptr /* bs_thresholds */);
+ }
+ }
+}
+
+class VideoCodecTestMediaCodecRateAdaptation
+ : public ::testing::TestWithParam<
+ std::tuple<std::vector<webrtc::test::RateProfile>, std::string>> {};
+
+TEST_P(VideoCodecTestMediaCodecRateAdaptation, DISABLED_RateAdaptation) {
+ const std::vector<webrtc::test::RateProfile> rate_profile =
+ std::get<0>(GetParam());
+ const std::string codec_name = std::get<1>(GetParam());
+
+ VideoCodecTestFixture::Config config;
+ config.filename = "FourPeople_1280x720_30";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = rate_profile.back().frame_num +
+ static_cast<size_t>(kConstRateIntervalSec *
+ rate_profile.back().input_fps);
+ config.encode_in_real_time = true;
+ config.SetCodecSettings(codec_name, 1, 1, 1, false, false, false, 1280, 720);
+
+ auto fixture = CreateTestFixtureWithConfig(config);
+ fixture->RunTest(rate_profile, nullptr, nullptr, nullptr);
+
+ for (size_t i = 0; i < rate_profile.size(); ++i) {
+ const size_t num_frames =
+ static_cast<size_t>(rate_profile[i].input_fps * kConstRateIntervalSec);
+
+ auto stats = fixture->GetStats().SliceAndCalcLayerVideoStatistic(
+ rate_profile[i].frame_num, rate_profile[i].frame_num + num_frames - 1);
+ ASSERT_EQ(stats.size(), 1u);
+
+ // Bitrate mismatch is <= 10%.
+ EXPECT_LE(stats[0].avg_bitrate_mismatch_pct, 10);
+ EXPECT_GE(stats[0].avg_bitrate_mismatch_pct, -10);
+
+ // Avg frame transmission delay and processing latency is <=100..250ms
+ // depending on frame rate.
+ const double expected_delay_sec =
+ std::min(std::max(1 / rate_profile[i].input_fps, 0.1), 0.25);
+ EXPECT_LE(stats[0].avg_delay_sec, expected_delay_sec);
+ EXPECT_LE(stats[0].avg_encode_latency_sec, expected_delay_sec);
+ EXPECT_LE(stats[0].avg_decode_latency_sec, expected_delay_sec);
+
+ // Frame drops are not expected.
+ EXPECT_EQ(stats[0].num_encoded_frames, num_frames);
+ EXPECT_EQ(stats[0].num_decoded_frames, num_frames);
+
+ // Periodic keyframes are not expected.
+ EXPECT_EQ(stats[0].num_key_frames, i == 0 ? 1u : 0);
+
+ // Ensure codec delivers a reasonable spatial quality.
+ EXPECT_GE(stats[0].avg_psnr_y, 35);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RateAdaptation,
+ VideoCodecTestMediaCodecRateAdaptation,
+ ::testing::Combine(::testing::Values(kBitRateLowHighLow,
+ kBitRateHighLowHigh,
+ kFrameRateLowHighLow,
+ kFrameRateHighLowHigh),
+ ::testing::Values(cricket::kVp8CodecName,
+ cricket::kVp9CodecName,
+ cricket::kH264CodecName)));
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc
new file mode 100644
index 0000000000..6513074bad
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Codec settings.
+const int kCifWidth = 352;
+const int kCifHeight = 288;
+const int kNumFrames = 100;
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFrames;
+ // Only allow encoder/decoder to use single core, for predictability.
+ config.use_single_core = true;
+ return config;
+}
+} // namespace
+
+TEST(VideoCodecTestOpenH264, ConstantHighBitrate) {
+ auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+// H264: Enable SingleNalUnit packetization mode. Encoder should split
+// large frames into multiple slices and limit length of NAL units.
+TEST(VideoCodecTestOpenH264, SingleNalUnit) {
+ auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.h264_codec_settings.packetization_mode =
+ H264PacketizationMode::SingleNalUnit;
+ config.max_payload_size_bytes = 500;
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+
+ BitstreamThresholds bs_thresholds = {config.max_payload_size_bytes};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds,
+ &bs_thresholds);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
new file mode 100644
index 0000000000..efb7502e5d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include <limits>
+#include <numeric>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/running_statistics.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace test {
+
+using FrameStatistics = VideoCodecTestStats::FrameStatistics;
+using VideoStatistics = VideoCodecTestStats::VideoStatistics;
+
+namespace {
+const int kMaxBitrateMismatchPercent = 20;
+}
+
+VideoCodecTestStatsImpl::VideoCodecTestStatsImpl() = default;
+VideoCodecTestStatsImpl::~VideoCodecTestStatsImpl() = default;
+
+void VideoCodecTestStatsImpl::AddFrame(const FrameStatistics& frame_stat) {
+ const size_t timestamp = frame_stat.rtp_timestamp;
+ const size_t layer_idx = frame_stat.spatial_idx;
+ RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) ==
+ rtp_timestamp_to_frame_num_[layer_idx].end());
+ rtp_timestamp_to_frame_num_[layer_idx][timestamp] = frame_stat.frame_number;
+ layer_stats_[layer_idx].push_back(frame_stat);
+}
+
+FrameStatistics* VideoCodecTestStatsImpl::GetFrame(size_t frame_num,
+ size_t layer_idx) {
+ RTC_CHECK_LT(frame_num, layer_stats_[layer_idx].size());
+ return &layer_stats_[layer_idx][frame_num];
+}
+
+FrameStatistics* VideoCodecTestStatsImpl::GetFrameWithTimestamp(
+ size_t timestamp,
+ size_t layer_idx) {
+ RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) !=
+ rtp_timestamp_to_frame_num_[layer_idx].end());
+
+ return GetFrame(rtp_timestamp_to_frame_num_[layer_idx][timestamp], layer_idx);
+}
+
+std::vector<FrameStatistics> VideoCodecTestStatsImpl::GetFrameStatistics() {
+ size_t capacity = 0;
+ for (const auto& layer_stat : layer_stats_) {
+ capacity += layer_stat.second.size();
+ }
+
+ std::vector<FrameStatistics> frame_statistics;
+ frame_statistics.reserve(capacity);
+ for (const auto& layer_stat : layer_stats_) {
+ std::copy(layer_stat.second.cbegin(), layer_stat.second.cend(),
+ std::back_inserter(frame_statistics));
+ }
+
+ return frame_statistics;
+}
+
+std::vector<VideoStatistics>
+VideoCodecTestStatsImpl::SliceAndCalcLayerVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num) {
+ std::vector<VideoStatistics> layer_stats;
+
+ size_t num_spatial_layers = 0;
+ size_t num_temporal_layers = 0;
+ GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
+ &num_temporal_layers);
+ RTC_CHECK_GT(num_spatial_layers, 0);
+ RTC_CHECK_GT(num_temporal_layers, 0);
+
+ for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers;
+ ++spatial_idx) {
+ for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers;
+ ++temporal_idx) {
+ VideoStatistics layer_stat = SliceAndCalcVideoStatistic(
+ first_frame_num, last_frame_num, spatial_idx, temporal_idx, false);
+ layer_stats.push_back(layer_stat);
+ }
+ }
+
+ return layer_stats;
+}
+
+VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcAggregatedVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num) {
+ size_t num_spatial_layers = 0;
+ size_t num_temporal_layers = 0;
+ GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
+ &num_temporal_layers);
+ RTC_CHECK_GT(num_spatial_layers, 0);
+ RTC_CHECK_GT(num_temporal_layers, 0);
+
+ return SliceAndCalcVideoStatistic(first_frame_num, last_frame_num,
+ num_spatial_layers - 1,
+ num_temporal_layers - 1, true);
+}
+
+size_t VideoCodecTestStatsImpl::Size(size_t spatial_idx) {
+ return layer_stats_[spatial_idx].size();
+}
+
+void VideoCodecTestStatsImpl::Clear() {
+ layer_stats_.clear();
+ rtp_timestamp_to_frame_num_.clear();
+}
+
+FrameStatistics VideoCodecTestStatsImpl::AggregateFrameStatistic(
+ size_t frame_num,
+ size_t spatial_idx,
+ bool aggregate_independent_layers) {
+ FrameStatistics frame_stat = *GetFrame(frame_num, spatial_idx);
+ bool inter_layer_predicted = frame_stat.inter_layer_predicted;
+ while (spatial_idx-- > 0) {
+ if (aggregate_independent_layers || inter_layer_predicted) {
+ FrameStatistics* base_frame_stat = GetFrame(frame_num, spatial_idx);
+ frame_stat.length_bytes += base_frame_stat->length_bytes;
+ frame_stat.target_bitrate_kbps += base_frame_stat->target_bitrate_kbps;
+
+ inter_layer_predicted = base_frame_stat->inter_layer_predicted;
+ }
+ }
+
+ return frame_stat;
+}
+
+size_t VideoCodecTestStatsImpl::CalcLayerTargetBitrateKbps(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers) {
+ size_t target_bitrate_kbps = 0;
+
+ // We don't know if superframe includes all required spatial layers because
+ // of possible frame drops. Run through all frames in specified range, find
+ // and return maximum target bitrate. Assume that target bitrate in frame
+ // statistic is specified per temporal layer.
+ for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
+ ++frame_num) {
+ FrameStatistics superframe = AggregateFrameStatistic(
+ frame_num, spatial_idx, aggregate_independent_layers);
+
+ if (superframe.temporal_idx <= temporal_idx) {
+ target_bitrate_kbps =
+ std::max(target_bitrate_kbps, superframe.target_bitrate_kbps);
+ }
+ }
+
+ RTC_DCHECK_GT(target_bitrate_kbps, 0);
+ return target_bitrate_kbps;
+}
+
+VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers) {
+ VideoStatistics video_stat;
+
+ float buffer_level_bits = 0.0f;
+ webrtc_impl::RunningStatistics<float> buffer_level_sec;
+
+ webrtc_impl::RunningStatistics<size_t> key_frame_size_bytes;
+ webrtc_impl::RunningStatistics<size_t> delta_frame_size_bytes;
+
+ webrtc_impl::RunningStatistics<size_t> frame_encoding_time_us;
+ webrtc_impl::RunningStatistics<size_t> frame_decoding_time_us;
+
+ webrtc_impl::RunningStatistics<float> psnr_y;
+ webrtc_impl::RunningStatistics<float> psnr_u;
+ webrtc_impl::RunningStatistics<float> psnr_v;
+ webrtc_impl::RunningStatistics<float> psnr;
+ webrtc_impl::RunningStatistics<float> ssim;
+ webrtc_impl::RunningStatistics<int> qp;
+
+ size_t rtp_timestamp_first_frame = 0;
+ size_t rtp_timestamp_prev_frame = 0;
+
+ FrameStatistics last_successfully_decoded_frame(0, 0, 0);
+
+ const size_t target_bitrate_kbps =
+ CalcLayerTargetBitrateKbps(first_frame_num, last_frame_num, spatial_idx,
+ temporal_idx, aggregate_independent_layers);
+ const size_t target_bitrate_bps = 1000 * target_bitrate_kbps;
+ RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by `target_bitrate_kbps`.
+
+ for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
+ ++frame_num) {
+ FrameStatistics frame_stat = AggregateFrameStatistic(
+ frame_num, spatial_idx, aggregate_independent_layers);
+
+ float time_since_first_frame_sec =
+ 1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_first_frame) /
+ kVideoPayloadTypeFrequency;
+ float time_since_prev_frame_sec =
+ 1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_prev_frame) /
+ kVideoPayloadTypeFrequency;
+
+ if (frame_stat.temporal_idx > temporal_idx) {
+ continue;
+ }
+
+ buffer_level_bits -= time_since_prev_frame_sec * 1000 * target_bitrate_kbps;
+ buffer_level_bits = std::max(0.0f, buffer_level_bits);
+ buffer_level_bits += 8.0 * frame_stat.length_bytes;
+ buffer_level_sec.AddSample(buffer_level_bits /
+ (1000 * target_bitrate_kbps));
+
+ video_stat.length_bytes += frame_stat.length_bytes;
+
+ if (frame_stat.encoding_successful) {
+ ++video_stat.num_encoded_frames;
+
+ if (frame_stat.frame_type == VideoFrameType::kVideoFrameKey) {
+ key_frame_size_bytes.AddSample(frame_stat.length_bytes);
+ ++video_stat.num_key_frames;
+ } else {
+ delta_frame_size_bytes.AddSample(frame_stat.length_bytes);
+ }
+
+ frame_encoding_time_us.AddSample(frame_stat.encode_time_us);
+ qp.AddSample(frame_stat.qp);
+
+ video_stat.max_nalu_size_bytes = std::max(video_stat.max_nalu_size_bytes,
+ frame_stat.max_nalu_size_bytes);
+ }
+
+ if (frame_stat.decoding_successful) {
+ ++video_stat.num_decoded_frames;
+
+ video_stat.width = std::max(video_stat.width, frame_stat.decoded_width);
+ video_stat.height =
+ std::max(video_stat.height, frame_stat.decoded_height);
+
+ if (video_stat.num_decoded_frames > 1) {
+ if (last_successfully_decoded_frame.decoded_width !=
+ frame_stat.decoded_width ||
+ last_successfully_decoded_frame.decoded_height !=
+ frame_stat.decoded_height) {
+ ++video_stat.num_spatial_resizes;
+ }
+ }
+
+ frame_decoding_time_us.AddSample(frame_stat.decode_time_us);
+ last_successfully_decoded_frame = frame_stat;
+ }
+
+ if (frame_stat.quality_analysis_successful) {
+ psnr_y.AddSample(frame_stat.psnr_y);
+ psnr_u.AddSample(frame_stat.psnr_u);
+ psnr_v.AddSample(frame_stat.psnr_v);
+ psnr.AddSample(frame_stat.psnr);
+ ssim.AddSample(frame_stat.ssim);
+ }
+
+ if (video_stat.num_input_frames > 0) {
+ if (video_stat.time_to_reach_target_bitrate_sec == 0.0f) {
+ RTC_CHECK_GT(time_since_first_frame_sec, 0);
+ const float curr_kbps =
+ 8.0 * video_stat.length_bytes / 1000 / time_since_first_frame_sec;
+ const float bitrate_mismatch_percent =
+ 100 * std::fabs(curr_kbps - target_bitrate_kbps) /
+ target_bitrate_kbps;
+ if (bitrate_mismatch_percent < kMaxBitrateMismatchPercent) {
+ video_stat.time_to_reach_target_bitrate_sec =
+ time_since_first_frame_sec;
+ }
+ }
+ }
+
+ rtp_timestamp_prev_frame = frame_stat.rtp_timestamp;
+ if (video_stat.num_input_frames == 0) {
+ rtp_timestamp_first_frame = frame_stat.rtp_timestamp;
+ }
+
+ ++video_stat.num_input_frames;
+ }
+
+ const size_t num_frames = last_frame_num - first_frame_num + 1;
+ const size_t timestamp_delta =
+ GetFrame(first_frame_num + 1, spatial_idx)->rtp_timestamp -
+ GetFrame(first_frame_num, spatial_idx)->rtp_timestamp;
+ RTC_CHECK_GT(timestamp_delta, 0);
+ const float input_framerate_fps =
+ 1.0 * kVideoPayloadTypeFrequency / timestamp_delta;
+ RTC_CHECK_GT(input_framerate_fps, 0);
+ const float duration_sec = num_frames / input_framerate_fps;
+
+ video_stat.target_bitrate_kbps = target_bitrate_kbps;
+ video_stat.input_framerate_fps = input_framerate_fps;
+
+ video_stat.spatial_idx = spatial_idx;
+ video_stat.temporal_idx = temporal_idx;
+
+ RTC_CHECK_GT(duration_sec, 0);
+ const float bitrate_bps = 8 * video_stat.length_bytes / duration_sec;
+ video_stat.bitrate_kbps = static_cast<size_t>((bitrate_bps + 500) / 1000);
+ video_stat.framerate_fps = video_stat.num_encoded_frames / duration_sec;
+
+ // http://bugs.webrtc.org/10400: On Windows, we only get millisecond
+ // granularity in the frame encode/decode timing measurements.
+ // So we need to softly avoid a div-by-zero here.
+ const float mean_encode_time_us =
+ frame_encoding_time_us.GetMean().value_or(0);
+ video_stat.enc_speed_fps = mean_encode_time_us > 0.0f
+ ? 1000000.0f / mean_encode_time_us
+ : std::numeric_limits<float>::max();
+ const float mean_decode_time_us =
+ frame_decoding_time_us.GetMean().value_or(0);
+ video_stat.dec_speed_fps = mean_decode_time_us > 0.0f
+ ? 1000000.0f / mean_decode_time_us
+ : std::numeric_limits<float>::max();
+
+ video_stat.avg_encode_latency_sec =
+ frame_encoding_time_us.GetMean().value_or(0) / 1000000.0f;
+ video_stat.max_encode_latency_sec =
+ frame_encoding_time_us.GetMax().value_or(0) / 1000000.0f;
+
+ video_stat.avg_decode_latency_sec =
+ frame_decoding_time_us.GetMean().value_or(0) / 1000000.0f;
+ video_stat.max_decode_latency_sec =
+ frame_decoding_time_us.GetMax().value_or(0) / 1000000.0f;
+
+ auto MaxDelaySec = [target_bitrate_kbps](
+ const webrtc_impl::RunningStatistics<size_t>& stats) {
+ return 8 * stats.GetMax().value_or(0) / 1000 / target_bitrate_kbps;
+ };
+
+ video_stat.avg_delay_sec = buffer_level_sec.GetMean().value_or(0);
+ video_stat.max_key_frame_delay_sec = MaxDelaySec(key_frame_size_bytes);
+ video_stat.max_delta_frame_delay_sec = MaxDelaySec(delta_frame_size_bytes);
+
+ video_stat.avg_bitrate_mismatch_pct =
+ 100 * (bitrate_bps - target_bitrate_bps) / target_bitrate_bps;
+ video_stat.avg_framerate_mismatch_pct =
+ 100 * (video_stat.framerate_fps - input_framerate_fps) /
+ input_framerate_fps;
+
+ video_stat.avg_key_frame_size_bytes =
+ key_frame_size_bytes.GetMean().value_or(0);
+ video_stat.avg_delta_frame_size_bytes =
+ delta_frame_size_bytes.GetMean().value_or(0);
+ video_stat.avg_qp = qp.GetMean().value_or(0);
+
+ video_stat.avg_psnr_y = psnr_y.GetMean().value_or(0);
+ video_stat.avg_psnr_u = psnr_u.GetMean().value_or(0);
+ video_stat.avg_psnr_v = psnr_v.GetMean().value_or(0);
+ video_stat.avg_psnr = psnr.GetMean().value_or(0);
+ video_stat.min_psnr =
+ psnr.GetMin().value_or(std::numeric_limits<float>::max());
+ video_stat.avg_ssim = ssim.GetMean().value_or(0);
+ video_stat.min_ssim =
+ ssim.GetMin().value_or(std::numeric_limits<float>::max());
+
+ return video_stat;
+}
+
+void VideoCodecTestStatsImpl::GetNumberOfEncodedLayers(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t* num_encoded_spatial_layers,
+ size_t* num_encoded_temporal_layers) {
+ *num_encoded_spatial_layers = 0;
+ *num_encoded_temporal_layers = 0;
+
+ const size_t num_spatial_layers = layer_stats_.size();
+
+ for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
+ ++frame_num) {
+ for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers;
+ ++spatial_idx) {
+ FrameStatistics* frame_stat = GetFrame(frame_num, spatial_idx);
+ if (frame_stat->encoding_successful) {
+ *num_encoded_spatial_layers =
+ std::max(*num_encoded_spatial_layers, frame_stat->spatial_idx + 1);
+ *num_encoded_temporal_layers = std::max(*num_encoded_temporal_layers,
+ frame_stat->temporal_idx + 1);
+ }
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h
new file mode 100644
index 0000000000..61850d3622
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "api/test/videocodec_test_stats.h" // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Statistics for a sequence of processed frames. This class is not thread safe.
+class VideoCodecTestStatsImpl : public VideoCodecTestStats {
+ public:
+ VideoCodecTestStatsImpl();
+ ~VideoCodecTestStatsImpl() override;
+
+ // Creates a FrameStatistics for the next frame to be processed.
+ void AddFrame(const FrameStatistics& frame_stat);
+
+ // Returns the FrameStatistics corresponding to `frame_number` or `timestamp`.
+ FrameStatistics* GetFrame(size_t frame_number, size_t spatial_idx);
+ FrameStatistics* GetFrameWithTimestamp(size_t timestamp, size_t spatial_idx);
+
+ // Implements VideoCodecTestStats.
+ std::vector<FrameStatistics> GetFrameStatistics() override;
+ std::vector<VideoStatistics> SliceAndCalcLayerVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num) override;
+
+ VideoStatistics SliceAndCalcAggregatedVideoStatistic(size_t first_frame_num,
+ size_t last_frame_num);
+
+ size_t Size(size_t spatial_idx);
+
+ void Clear();
+
+ private:
+ VideoCodecTestStats::FrameStatistics AggregateFrameStatistic(
+ size_t frame_num,
+ size_t spatial_idx,
+ bool aggregate_independent_layers);
+
+ size_t CalcLayerTargetBitrateKbps(size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers);
+
+ VideoCodecTestStats::VideoStatistics SliceAndCalcVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers);
+
+ void GetNumberOfEncodedLayers(size_t first_frame_num,
+ size_t last_frame_num,
+ size_t* num_encoded_spatial_layers,
+ size_t* num_encoded_temporal_layers);
+
+ // layer_idx -> stats.
+ std::map<size_t, std::vector<FrameStatistics>> layer_stats_;
+ // layer_idx -> rtp_timestamp -> frame_num.
+ std::map<size_t, std::map<size_t, size_t>> rtp_timestamp_to_frame_num_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc
new file mode 100644
index 0000000000..6477b6ab8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+
+#include <vector>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+using FrameStatistics = VideoCodecTestStatsImpl::FrameStatistics;
+
+namespace {
+
+const size_t kTimestamp = 12345;
+
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Field;
+
+} // namespace
+
+TEST(StatsTest, AddAndGetFrame) {
+ VideoCodecTestStatsImpl stats;
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
+ FrameStatistics* frame_stat = stats.GetFrame(0u, 0);
+ EXPECT_EQ(0u, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp);
+}
+
+TEST(StatsTest, AddAndGetFrames) {
+ VideoCodecTestStatsImpl stats;
+ const size_t kNumFrames = 1000;
+ for (size_t i = 0; i < kNumFrames; ++i) {
+ stats.AddFrame(FrameStatistics(i, kTimestamp + i, 0));
+ FrameStatistics* frame_stat = stats.GetFrame(i, 0);
+ EXPECT_EQ(i, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp);
+ }
+ EXPECT_EQ(kNumFrames, stats.Size(0));
+ // Get frame.
+ size_t i = 22;
+ FrameStatistics* frame_stat = stats.GetFrameWithTimestamp(kTimestamp + i, 0);
+ EXPECT_EQ(i, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp);
+}
+
+TEST(StatsTest, AddFrameLayering) {
+ VideoCodecTestStatsImpl stats;
+ for (size_t i = 0; i < 3; ++i) {
+ stats.AddFrame(FrameStatistics(0, kTimestamp + i, i));
+ FrameStatistics* frame_stat = stats.GetFrame(0u, i);
+ EXPECT_EQ(0u, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp - i);
+ EXPECT_EQ(1u, stats.Size(i));
+ }
+}
+
+TEST(StatsTest, GetFrameStatistics) {
+ VideoCodecTestStatsImpl stats;
+
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 1));
+ stats.AddFrame(FrameStatistics(1, kTimestamp + 3000, 0));
+ stats.AddFrame(FrameStatistics(1, kTimestamp + 3000, 1));
+
+ const std::vector<FrameStatistics> frame_stats = stats.GetFrameStatistics();
+
+ auto field_matcher = [](size_t frame_number, size_t spatial_idx) {
+ return AllOf(Field(&FrameStatistics::frame_number, frame_number),
+ Field(&FrameStatistics::spatial_idx, spatial_idx));
+ };
+ EXPECT_THAT(frame_stats, Contains(field_matcher(0, 0)));
+ EXPECT_THAT(frame_stats, Contains(field_matcher(0, 1)));
+ EXPECT_THAT(frame_stats, Contains(field_matcher(1, 0)));
+ EXPECT_THAT(frame_stats, Contains(field_matcher(1, 1)));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
new file mode 100644
index 0000000000..6df974362f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kForemanNumFrames = 300;
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kForemanNumFrames;
+ return config;
+}
+
+std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
+ VideoCodecTestFixture::Config config) {
+ auto decoder_factory = CreateObjCDecoderFactory();
+ auto encoder_factory = CreateObjCEncoderFactory();
+ return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
+ std::move(encoder_factory));
+}
+} // namespace
+
+// TODO(webrtc:9099): Disabled until the issue is fixed.
+// HW codecs don't work on simulators. Only run these tests on device.
+// #if TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR
+// #define MAYBE_TEST TEST
+// #else
+#define MAYBE_TEST(s, name) TEST(s, DISABLED_##name)
+// #endif
+
+// TODO(kthelgason): Use RC Thresholds when the internal bitrateAdjuster is no
+// longer in use.
+MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CBP) {
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{33, 29, 0.9, 0.82}};
+
+ fixture->RunTest(rate_profiles, nullptr, &quality_thresholds, nullptr);
+}
+
+MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CHP) {
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{33, 30, 0.91, 0.83}};
+
+ fixture->RunTest(rate_profiles, nullptr, &quality_thresholds, nullptr);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc
new file mode 100644
index 0000000000..353a00df79
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videoprocessor.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/time_utils.h"
+#include "test/gtest.h"
+#include "third_party/libyuv/include/libyuv/compare.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kMsToRtpTimestamp = kVideoPayloadTypeFrequency / 1000;
+const int kMaxBufferedInputFrames = 20;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+
+size_t GetMaxNaluSizeBytes(const EncodedImage& encoded_frame,
+ const VideoCodecTestFixture::Config& config) {
+ if (config.codec_settings.codecType != kVideoCodecH264)
+ return 0;
+
+ std::vector<webrtc::H264::NaluIndex> nalu_indices =
+ webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
+
+ RTC_CHECK(!nalu_indices.empty());
+
+ size_t max_size = 0;
+ for (const webrtc::H264::NaluIndex& index : nalu_indices)
+ max_size = std::max(max_size, index.payload_size);
+
+ return max_size;
+}
+
+size_t GetTemporalLayerIndex(const CodecSpecificInfo& codec_specific) {
+ size_t temporal_idx = 0;
+ if (codec_specific.codecType == kVideoCodecVP8) {
+ temporal_idx = codec_specific.codecSpecific.VP8.temporalIdx;
+ } else if (codec_specific.codecType == kVideoCodecVP9) {
+ temporal_idx = codec_specific.codecSpecific.VP9.temporal_idx;
+ }
+ if (temporal_idx == kNoTemporalIdx) {
+ temporal_idx = 0;
+ }
+ return temporal_idx;
+}
+
+int GetElapsedTimeMicroseconds(int64_t start_ns, int64_t stop_ns) {
+ int64_t diff_us = (stop_ns - start_ns) / rtc::kNumNanosecsPerMicrosec;
+ RTC_DCHECK_GE(diff_us, std::numeric_limits<int>::min());
+ RTC_DCHECK_LE(diff_us, std::numeric_limits<int>::max());
+ return static_cast<int>(diff_us);
+}
+
+void CalculateFrameQuality(const I420BufferInterface& ref_buffer,
+ const I420BufferInterface& dec_buffer,
+ VideoCodecTestStats::FrameStatistics* frame_stat,
+ bool calc_ssim) {
+ if (ref_buffer.width() != dec_buffer.width() ||
+ ref_buffer.height() != dec_buffer.height()) {
+ RTC_CHECK_GE(ref_buffer.width(), dec_buffer.width());
+ RTC_CHECK_GE(ref_buffer.height(), dec_buffer.height());
+ // Downscale reference frame.
+ rtc::scoped_refptr<I420Buffer> scaled_buffer =
+ I420Buffer::Create(dec_buffer.width(), dec_buffer.height());
+ I420Scale(ref_buffer.DataY(), ref_buffer.StrideY(), ref_buffer.DataU(),
+ ref_buffer.StrideU(), ref_buffer.DataV(), ref_buffer.StrideV(),
+ ref_buffer.width(), ref_buffer.height(),
+ scaled_buffer->MutableDataY(), scaled_buffer->StrideY(),
+ scaled_buffer->MutableDataU(), scaled_buffer->StrideU(),
+ scaled_buffer->MutableDataV(), scaled_buffer->StrideV(),
+ scaled_buffer->width(), scaled_buffer->height(),
+ libyuv::kFilterBox);
+
+ CalculateFrameQuality(*scaled_buffer, dec_buffer, frame_stat, calc_ssim);
+ } else {
+ const uint64_t sse_y = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataY(), dec_buffer.StrideY(), ref_buffer.DataY(),
+ ref_buffer.StrideY(), dec_buffer.width(), dec_buffer.height());
+
+ const uint64_t sse_u = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataU(), dec_buffer.StrideU(), ref_buffer.DataU(),
+ ref_buffer.StrideU(), dec_buffer.width() / 2, dec_buffer.height() / 2);
+
+ const uint64_t sse_v = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataV(), dec_buffer.StrideV(), ref_buffer.DataV(),
+ ref_buffer.StrideV(), dec_buffer.width() / 2, dec_buffer.height() / 2);
+
+ const size_t num_y_samples = dec_buffer.width() * dec_buffer.height();
+ const size_t num_u_samples =
+ dec_buffer.width() / 2 * dec_buffer.height() / 2;
+
+ frame_stat->psnr_y = libyuv::SumSquareErrorToPsnr(sse_y, num_y_samples);
+ frame_stat->psnr_u = libyuv::SumSquareErrorToPsnr(sse_u, num_u_samples);
+ frame_stat->psnr_v = libyuv::SumSquareErrorToPsnr(sse_v, num_u_samples);
+ frame_stat->psnr = libyuv::SumSquareErrorToPsnr(
+ sse_y + sse_u + sse_v, num_y_samples + 2 * num_u_samples);
+
+ if (calc_ssim) {
+ frame_stat->ssim = I420SSIM(ref_buffer, dec_buffer);
+ }
+ }
+}
+
+} // namespace
+
+VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
+ VideoDecoderList* decoders,
+ FrameReader* input_frame_reader,
+ const VideoCodecTestFixture::Config& config,
+ VideoCodecTestStatsImpl* stats,
+ IvfFileWriterMap* encoded_frame_writers,
+ FrameWriterList* decoded_frame_writers)
+ : config_(config),
+ num_simulcast_or_spatial_layers_(
+ std::max(config_.NumberOfSimulcastStreams(),
+ config_.NumberOfSpatialLayers())),
+ analyze_frame_quality_(!config_.measure_cpu),
+ stats_(stats),
+ encoder_(encoder),
+ decoders_(decoders),
+ bitrate_allocator_(
+ CreateBuiltinVideoBitrateAllocatorFactory()
+ ->CreateVideoBitrateAllocator(config_.codec_settings)),
+ framerate_fps_(0),
+ encode_callback_(this),
+ input_frame_reader_(input_frame_reader),
+ merged_encoded_frames_(num_simulcast_or_spatial_layers_),
+ encoded_frame_writers_(encoded_frame_writers),
+ decoded_frame_writers_(decoded_frame_writers),
+ last_inputed_frame_num_(0),
+ last_inputed_timestamp_(0),
+ first_encoded_frame_(num_simulcast_or_spatial_layers_, true),
+ last_encoded_frame_num_(num_simulcast_or_spatial_layers_),
+ first_decoded_frame_(num_simulcast_or_spatial_layers_, true),
+ last_decoded_frame_num_(num_simulcast_or_spatial_layers_),
+ last_decoded_frame_buffer_(num_simulcast_or_spatial_layers_),
+ post_encode_time_ns_(0),
+ is_finalized_(false) {
+ // Sanity checks.
+ RTC_CHECK(TaskQueueBase::Current())
+ << "VideoProcessor must be run on a task queue.";
+ RTC_CHECK(stats_);
+ RTC_CHECK(encoder_);
+ RTC_CHECK(decoders_);
+ RTC_CHECK_EQ(decoders_->size(), num_simulcast_or_spatial_layers_);
+ RTC_CHECK(input_frame_reader_);
+ RTC_CHECK(encoded_frame_writers_);
+ RTC_CHECK(!decoded_frame_writers ||
+ decoded_frame_writers->size() == num_simulcast_or_spatial_layers_);
+
+ // Setup required callbacks for the encoder and decoder and initialize them.
+ RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(&encode_callback_),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ // Initialize codecs so that they are ready to receive frames.
+ RTC_CHECK_EQ(encoder_->InitEncode(
+ &config_.codec_settings,
+ VideoEncoder::Settings(
+ kCapabilities, static_cast<int>(config_.NumberOfCores()),
+ config_.max_payload_size_bytes)),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ decode_callback_.push_back(
+ std::make_unique<VideoProcessorDecodeCompleteCallback>(this, i));
+ VideoDecoder::Settings decoder_settings;
+ decoder_settings.set_max_render_resolution(
+ {config_.codec_settings.width, config_.codec_settings.height});
+ decoder_settings.set_codec_type(config_.codec_settings.codecType);
+ decoder_settings.set_number_of_cores(config_.NumberOfCores());
+ RTC_CHECK(decoders_->at(i)->Configure(decoder_settings));
+ RTC_CHECK_EQ(decoders_->at(i)->RegisterDecodeCompleteCallback(
+ decode_callback_.at(i).get()),
+ WEBRTC_VIDEO_CODEC_OK);
+ }
+}
+
+VideoProcessor::~VideoProcessor() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ if (!is_finalized_) {
+ Finalize();
+ }
+
+ // Explicitly reset codecs, in case they don't do that themselves when they
+ // go out of scope.
+ RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
+ encoder_->RegisterEncodeCompleteCallback(nullptr);
+ for (auto& decoder : *decoders_) {
+ RTC_CHECK_EQ(decoder->Release(), WEBRTC_VIDEO_CODEC_OK);
+ decoder->RegisterDecodeCompleteCallback(nullptr);
+ }
+
+ // Sanity check.
+ RTC_CHECK_LE(input_frames_.size(), kMaxBufferedInputFrames);
+}
+
+void VideoProcessor::ProcessFrame() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_finalized_);
+
+ const size_t frame_number = last_inputed_frame_num_++;
+
+ // Get input frame and store for future quality calculation.
+ rtc::scoped_refptr<I420BufferInterface> buffer =
+ input_frame_reader_->ReadFrame();
+ RTC_CHECK(buffer) << "Tried to read too many frames from the file.";
+ const size_t timestamp =
+ last_inputed_timestamp_ +
+ static_cast<size_t>(kVideoPayloadTypeFrequency / framerate_fps_);
+ VideoFrame input_frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(static_cast<uint32_t>(timestamp))
+ .set_timestamp_ms(static_cast<int64_t>(timestamp / kMsToRtpTimestamp))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .build();
+ // Store input frame as a reference for quality calculations.
+ if (config_.decode && !config_.measure_cpu) {
+ if (input_frames_.size() == kMaxBufferedInputFrames) {
+ input_frames_.erase(input_frames_.begin());
+ }
+
+ if (config_.reference_width != -1 && config_.reference_height != -1 &&
+ (input_frame.width() != config_.reference_width ||
+ input_frame.height() != config_.reference_height)) {
+ rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
+ config_.codec_settings.width, config_.codec_settings.height);
+ scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
+
+ VideoFrame scaled_reference_frame = input_frame;
+ scaled_reference_frame.set_video_frame_buffer(scaled_buffer);
+ input_frames_.emplace(frame_number, scaled_reference_frame);
+
+ if (config_.reference_width == config_.codec_settings.width &&
+ config_.reference_height == config_.codec_settings.height) {
+ // Both encoding and comparison uses the same down-scale factor, reuse
+ // it for encoder below.
+ input_frame = scaled_reference_frame;
+ }
+ } else {
+ input_frames_.emplace(frame_number, input_frame);
+ }
+ }
+ last_inputed_timestamp_ = timestamp;
+
+ post_encode_time_ns_ = 0;
+
+ // Create frame statistics object for all simulcast/spatial layers.
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ FrameStatistics frame_stat(frame_number, timestamp, i);
+ stats_->AddFrame(frame_stat);
+ }
+
+ // For the highest measurement accuracy of the encode time, the start/stop
+ // time recordings should wrap the Encode call as tightly as possible.
+ const int64_t encode_start_ns = rtc::TimeNanos();
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
+ frame_stat->encode_start_ns = encode_start_ns;
+ }
+
+ if (input_frame.width() != config_.codec_settings.width ||
+ input_frame.height() != config_.codec_settings.height) {
+ rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
+ config_.codec_settings.width, config_.codec_settings.height);
+ scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
+ input_frame.set_video_frame_buffer(scaled_buffer);
+ }
+
+ // Encode.
+ const std::vector<VideoFrameType> frame_types =
+ (frame_number == 0)
+ ? std::vector<VideoFrameType>{VideoFrameType::kVideoFrameKey}
+ : std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
+ const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
+ frame_stat->encode_return_code = encode_return_code;
+ }
+}
+
+void VideoProcessor::SetRates(size_t bitrate_kbps, double framerate_fps) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_finalized_);
+
+ framerate_fps_ = framerate_fps;
+ bitrate_allocation_ =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ static_cast<uint32_t>(bitrate_kbps * 1000), framerate_fps_));
+ encoder_->SetRates(
+ VideoEncoder::RateControlParameters(bitrate_allocation_, framerate_fps_));
+}
+
+int32_t VideoProcessor::VideoProcessorDecodeCompleteCallback::Decoded(
+ VideoFrame& image) {
+ // Post the callback to the right task queue, if needed.
+ if (!task_queue_->IsCurrent()) {
+ // There might be a limited amount of output buffers, make a copy to make
+ // sure we don't block the decoder.
+ VideoFrame copy = VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Copy(
+ *image.video_frame_buffer()->ToI420()))
+ .set_rotation(image.rotation())
+ .set_timestamp_us(image.timestamp_us())
+ .set_id(image.id())
+ .build();
+ copy.set_timestamp(image.timestamp());
+
+ task_queue_->PostTask([this, copy]() {
+ video_processor_->FrameDecoded(copy, simulcast_svc_idx_);
+ });
+ return 0;
+ }
+ video_processor_->FrameDecoded(image, simulcast_svc_idx_);
+ return 0;
+}
+
+void VideoProcessor::FrameEncoded(
+ const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo& codec_specific) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // For the highest measurement accuracy of the encode time, the start/stop
+ // time recordings should wrap the Encode call as tightly as possible.
+ const int64_t encode_stop_ns = rtc::TimeNanos();
+
+ const VideoCodecType codec_type = codec_specific.codecType;
+ if (config_.encoded_frame_checker) {
+ config_.encoded_frame_checker->CheckEncodedFrame(codec_type, encoded_image);
+ }
+
+ // Layer metadata.
+ size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
+ size_t temporal_idx = GetTemporalLayerIndex(codec_specific);
+
+ FrameStatistics* frame_stat =
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
+ const size_t frame_number = frame_stat->frame_number;
+
+ // Ensure that the encode order is monotonically increasing, within this
+ // simulcast/spatial layer.
+ RTC_CHECK(first_encoded_frame_[spatial_idx] ||
+ last_encoded_frame_num_[spatial_idx] < frame_number);
+
+ // Ensure SVC spatial layers are delivered in ascending order.
+ const size_t num_spatial_layers = config_.NumberOfSpatialLayers();
+ if (!first_encoded_frame_[spatial_idx] && num_spatial_layers > 1) {
+ for (size_t i = 0; i < spatial_idx; ++i) {
+ RTC_CHECK_LE(last_encoded_frame_num_[i], frame_number);
+ }
+ for (size_t i = spatial_idx + 1; i < num_simulcast_or_spatial_layers_;
+ ++i) {
+ RTC_CHECK_GT(frame_number, last_encoded_frame_num_[i]);
+ }
+ }
+ first_encoded_frame_[spatial_idx] = false;
+ last_encoded_frame_num_[spatial_idx] = frame_number;
+
+ // Update frame statistics.
+ frame_stat->encoding_successful = true;
+ frame_stat->encode_time_us = GetElapsedTimeMicroseconds(
+ frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
+ frame_stat->target_bitrate_kbps =
+ bitrate_allocation_.GetTemporalLayerSum(spatial_idx, temporal_idx) / 1000;
+ frame_stat->target_framerate_fps = framerate_fps_;
+ frame_stat->length_bytes = encoded_image.size();
+ frame_stat->frame_type = encoded_image._frameType;
+ frame_stat->temporal_idx = temporal_idx;
+ frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_);
+ frame_stat->qp = encoded_image.qp_;
+
+ if (codec_type == kVideoCodecVP9) {
+ const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9;
+ frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted;
+ frame_stat->non_ref_for_inter_layer_pred =
+ vp9_info.non_ref_for_inter_layer_pred;
+ } else {
+ frame_stat->inter_layer_predicted = false;
+ frame_stat->non_ref_for_inter_layer_pred = true;
+ }
+
+ const webrtc::EncodedImage* encoded_image_for_decode = &encoded_image;
+ if (config_.decode || !encoded_frame_writers_->empty()) {
+ if (num_spatial_layers > 1) {
+ encoded_image_for_decode = BuildAndStoreSuperframe(
+ encoded_image, codec_type, frame_number, spatial_idx,
+ frame_stat->inter_layer_predicted);
+ }
+ }
+
+ if (config_.decode) {
+ DecodeFrame(*encoded_image_for_decode, spatial_idx);
+
+ if (codec_specific.end_of_picture && num_spatial_layers > 1) {
+ // If inter-layer prediction is enabled and upper layer was dropped then
+ // base layer should be passed to upper layer decoder. Otherwise decoder
+ // won't be able to decode next superframe.
+ const EncodedImage* base_image = nullptr;
+ const FrameStatistics* base_stat = nullptr;
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ const bool layer_dropped = (first_decoded_frame_[i] ||
+ last_decoded_frame_num_[i] < frame_number);
+
+ // Ensure current layer was decoded.
+ RTC_CHECK(layer_dropped == false || i != spatial_idx);
+
+ if (!layer_dropped) {
+ base_image = &merged_encoded_frames_[i];
+ base_stat =
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), i);
+ } else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
+ DecodeFrame(*base_image, i);
+ }
+ }
+ }
+ } else {
+ frame_stat->decode_return_code = WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ // Since frames in higher TLs typically depend on frames in lower TLs,
+ // write out frames in lower TLs to bitstream dumps of higher TLs.
+ for (size_t write_temporal_idx = temporal_idx;
+ write_temporal_idx < config_.NumberOfTemporalLayers();
+ ++write_temporal_idx) {
+ const VideoProcessor::LayerKey layer_key(spatial_idx, write_temporal_idx);
+ auto it = encoded_frame_writers_->find(layer_key);
+ if (it != encoded_frame_writers_->cend()) {
+ RTC_CHECK(it->second->WriteFrame(*encoded_image_for_decode,
+ config_.codec_settings.codecType));
+ }
+ }
+
+ if (!config_.encode_in_real_time) {
+ // To get pure encode time for next layers, measure time spent in encode
+ // callback and subtract it from encode time of next layers.
+ post_encode_time_ns_ += rtc::TimeNanos() - encode_stop_ns;
+ }
+}
+
+void VideoProcessor::CalcFrameQuality(const I420BufferInterface& decoded_frame,
+ FrameStatistics* frame_stat) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ const auto reference_frame = input_frames_.find(frame_stat->frame_number);
+ RTC_CHECK(reference_frame != input_frames_.cend())
+ << "The codecs are either buffering too much, dropping too much, or "
+ "being too slow relative to the input frame rate.";
+
+ // SSIM calculation is not optimized. Skip it in real-time mode.
+ const bool calc_ssim = !config_.encode_in_real_time;
+ CalculateFrameQuality(*reference_frame->second.video_frame_buffer()->ToI420(),
+ decoded_frame, frame_stat, calc_ssim);
+
+ frame_stat->quality_analysis_successful = true;
+}
+
+void VideoProcessor::WriteDecodedFrame(const I420BufferInterface& decoded_frame,
+ FrameWriter& frame_writer) {
+ int input_video_width = config_.codec_settings.width;
+ int input_video_height = config_.codec_settings.height;
+
+ rtc::scoped_refptr<I420Buffer> scaled_buffer;
+ const I420BufferInterface* scaled_frame;
+
+ if (decoded_frame.width() == input_video_width &&
+ decoded_frame.height() == input_video_height) {
+ scaled_frame = &decoded_frame;
+ } else {
+ EXPECT_DOUBLE_EQ(
+ static_cast<double>(input_video_width) / input_video_height,
+ static_cast<double>(decoded_frame.width()) / decoded_frame.height());
+
+ scaled_buffer = I420Buffer::Create(input_video_width, input_video_height);
+ scaled_buffer->ScaleFrom(decoded_frame);
+
+ scaled_frame = scaled_buffer.get();
+ }
+
+ // Ensure there is no padding.
+ RTC_CHECK_EQ(scaled_frame->StrideY(), input_video_width);
+ RTC_CHECK_EQ(scaled_frame->StrideU(), input_video_width / 2);
+ RTC_CHECK_EQ(scaled_frame->StrideV(), input_video_width / 2);
+
+ RTC_CHECK_EQ(3 * input_video_width * input_video_height / 2,
+ frame_writer.FrameLength());
+
+ RTC_CHECK(frame_writer.WriteFrame(scaled_frame->DataY()));
+}
+
+void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame,
+ size_t spatial_idx) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // For the highest measurement accuracy of the decode time, the start/stop
+ // time recordings should wrap the Decode call as tightly as possible.
+ const int64_t decode_stop_ns = rtc::TimeNanos();
+
+ FrameStatistics* frame_stat =
+ stats_->GetFrameWithTimestamp(decoded_frame.timestamp(), spatial_idx);
+ const size_t frame_number = frame_stat->frame_number;
+
+ if (!first_decoded_frame_[spatial_idx]) {
+ for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1;
+ dropped_frame_number < frame_number; ++dropped_frame_number) {
+ FrameStatistics* dropped_frame_stat =
+ stats_->GetFrame(dropped_frame_number, spatial_idx);
+
+ if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) {
+ // Calculate frame quality comparing input frame with last decoded one.
+ CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx],
+ dropped_frame_stat);
+ }
+
+ if (decoded_frame_writers_ != nullptr) {
+ // Fill drops with last decoded frame to make them look like freeze at
+ // playback and to keep decoded layers in sync.
+ WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
+ *decoded_frame_writers_->at(spatial_idx));
+ }
+ }
+ }
+
+ // Ensure that the decode order is monotonically increasing, within this
+ // simulcast/spatial layer.
+ RTC_CHECK(first_decoded_frame_[spatial_idx] ||
+ last_decoded_frame_num_[spatial_idx] < frame_number);
+ first_decoded_frame_[spatial_idx] = false;
+ last_decoded_frame_num_[spatial_idx] = frame_number;
+
+ // Update frame statistics.
+ frame_stat->decoding_successful = true;
+ frame_stat->decode_time_us =
+ GetElapsedTimeMicroseconds(frame_stat->decode_start_ns, decode_stop_ns);
+ frame_stat->decoded_width = decoded_frame.width();
+ frame_stat->decoded_height = decoded_frame.height();
+
+ // Skip quality metrics calculation to not affect CPU usage.
+ if (analyze_frame_quality_ || decoded_frame_writers_) {
+ // Save last decoded frame to handle possible future drops.
+ rtc::scoped_refptr<I420BufferInterface> i420buffer =
+ decoded_frame.video_frame_buffer()->ToI420();
+
+ // Copy decoded frame to a buffer without padding/stride such that we can
+ // dump Y, U and V planes into a file in one shot.
+ last_decoded_frame_buffer_[spatial_idx] = I420Buffer::Copy(
+ i420buffer->width(), i420buffer->height(), i420buffer->DataY(),
+ i420buffer->StrideY(), i420buffer->DataU(), i420buffer->StrideU(),
+ i420buffer->DataV(), i420buffer->StrideV());
+ }
+
+ if (analyze_frame_quality_) {
+ CalcFrameQuality(*decoded_frame.video_frame_buffer()->ToI420(), frame_stat);
+ }
+
+ if (decoded_frame_writers_ != nullptr) {
+ WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
+ *decoded_frame_writers_->at(spatial_idx));
+ }
+
+ // Erase all buffered input frames that we have moved past for all
+ // simulcast/spatial layers. Never buffer more than
+ // `kMaxBufferedInputFrames` frames, to protect against long runs of
+ // consecutive frame drops for a particular layer.
+ const auto min_last_decoded_frame_num = std::min_element(
+ last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend());
+ const size_t min_buffered_frame_num =
+ std::max(0, static_cast<int>(frame_number) - kMaxBufferedInputFrames + 1);
+ RTC_CHECK(min_last_decoded_frame_num != last_decoded_frame_num_.cend());
+ const auto input_frames_erase_before = input_frames_.lower_bound(
+ std::max(*min_last_decoded_frame_num, min_buffered_frame_num));
+ input_frames_.erase(input_frames_.cbegin(), input_frames_erase_before);
+}
+
+void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
+ size_t spatial_idx) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ FrameStatistics* frame_stat =
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
+
+ frame_stat->decode_start_ns = rtc::TimeNanos();
+ frame_stat->decode_return_code =
+ decoders_->at(spatial_idx)->Decode(encoded_image, false, 0);
+}
+
+const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
+ const EncodedImage& encoded_image,
+ const VideoCodecType codec,
+ size_t frame_number,
+ size_t spatial_idx,
+ bool inter_layer_predicted) {
+ // Should only be called for SVC.
+ RTC_CHECK_GT(config_.NumberOfSpatialLayers(), 1);
+
+ EncodedImage base_image;
+ RTC_CHECK_EQ(base_image.size(), 0);
+
+ // Each SVC layer is decoded with dedicated decoder. Find the nearest
+ // non-dropped base frame and merge it and current frame into superframe.
+ if (inter_layer_predicted) {
+ for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
+ --base_idx) {
+ EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
+ if (lower_layer.Timestamp() == encoded_image.Timestamp()) {
+ base_image = lower_layer;
+ break;
+ }
+ }
+ }
+ const size_t payload_size_bytes = base_image.size() + encoded_image.size();
+
+ auto buffer = EncodedImageBuffer::Create(payload_size_bytes);
+ if (base_image.size()) {
+ RTC_CHECK(base_image.data());
+ memcpy(buffer->data(), base_image.data(), base_image.size());
+ }
+ memcpy(buffer->data() + base_image.size(), encoded_image.data(),
+ encoded_image.size());
+
+ EncodedImage copied_image = encoded_image;
+ copied_image.SetEncodedData(buffer);
+ if (base_image.size())
+ copied_image._frameType = base_image._frameType;
+
+ // Replace previous EncodedImage for this spatial layer.
+ merged_encoded_frames_.at(spatial_idx) = std::move(copied_image);
+
+ return &merged_encoded_frames_.at(spatial_idx);
+}
+
+void VideoProcessor::Finalize() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_finalized_);
+ is_finalized_ = true;
+
+ if (!(analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) &&
+ decoded_frame_writers_ == nullptr) {
+ return;
+ }
+
+ for (size_t spatial_idx = 0; spatial_idx < num_simulcast_or_spatial_layers_;
+ ++spatial_idx) {
+ if (first_decoded_frame_[spatial_idx]) {
+ continue; // No decoded frames on this spatial layer.
+ }
+
+ for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1;
+ dropped_frame_number < last_inputed_frame_num_;
+ ++dropped_frame_number) {
+ FrameStatistics* frame_stat =
+ stats_->GetFrame(dropped_frame_number, spatial_idx);
+
+ RTC_DCHECK(!frame_stat->decoding_successful);
+
+ if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) {
+ CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx], frame_stat);
+ }
+
+ if (decoded_frame_writers_ != nullptr) {
+ WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
+ *decoded_frame_writers_->at(spatial_idx));
+ }
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h
new file mode 100644
index 0000000000..4c89c790a9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/include/module_common_types.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "test/testsupport/frame_reader.h"
+#include "test/testsupport/frame_writer.h"
+
+namespace webrtc {
+namespace test {
+
+// Handles encoding/decoding of video using the VideoEncoder/VideoDecoder
+// interfaces. This is done in a sequential manner in order to be able to
+// measure times properly.
+// The class processes a frame at the time for the configured input file.
+// It maintains state of where in the source input file the processing is at.
+class VideoProcessor {
+ public:
+ using VideoDecoderList = std::vector<std::unique_ptr<VideoDecoder>>;
+ using LayerKey = std::pair<int /* spatial_idx */, int /* temporal_idx */>;
+ using IvfFileWriterMap = std::map<LayerKey, std::unique_ptr<IvfFileWriter>>;
+ // TODO(brandtr): Consider changing FrameWriterList to be a FrameWriterMap,
+ // to be able to save different TLs separately.
+ using FrameWriterList = std::vector<std::unique_ptr<FrameWriter>>;
+ using FrameStatistics = VideoCodecTestStats::FrameStatistics;
+
+ VideoProcessor(webrtc::VideoEncoder* encoder,
+ VideoDecoderList* decoders,
+ FrameReader* input_frame_reader,
+ const VideoCodecTestFixture::Config& config,
+ VideoCodecTestStatsImpl* stats,
+ IvfFileWriterMap* encoded_frame_writers,
+ FrameWriterList* decoded_frame_writers);
+ ~VideoProcessor();
+
+ VideoProcessor(const VideoProcessor&) = delete;
+ VideoProcessor& operator=(const VideoProcessor&) = delete;
+
+ // Reads a frame and sends it to the encoder. When the encode callback
+ // is received, the encoded frame is buffered. After encoding is finished
+ // buffered frame is sent to decoder. Quality evaluation is done in
+ // the decode callback.
+ void ProcessFrame();
+
+ // Updates the encoder with target rates. Must be called at least once.
+ void SetRates(size_t bitrate_kbps, double framerate_fps);
+
+ // Signals processor to finalize frame processing and handle possible tail
+ // drops. If not called expelicitly, this will be called in dtor. It is
+ // unexpected to get ProcessFrame() or SetRates() calls after Finalize().
+ void Finalize();
+
+ private:
+ class VideoProcessorEncodeCompleteCallback
+ : public webrtc::EncodedImageCallback {
+ public:
+ explicit VideoProcessorEncodeCompleteCallback(
+ VideoProcessor* video_processor)
+ : video_processor_(video_processor),
+ task_queue_(TaskQueueBase::Current()) {
+ RTC_DCHECK(video_processor_);
+ RTC_DCHECK(task_queue_);
+ }
+
+ Result OnEncodedImage(
+ const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo* codec_specific_info) override {
+ RTC_CHECK(codec_specific_info);
+
+ // Post the callback to the right task queue, if needed.
+ if (!task_queue_->IsCurrent()) {
+ VideoProcessor* video_processor = video_processor_;
+ task_queue_->PostTask([video_processor, encoded_image,
+ codec_specific_info = *codec_specific_info] {
+ video_processor->FrameEncoded(encoded_image, codec_specific_info);
+ });
+ return Result(Result::OK, 0);
+ }
+
+ video_processor_->FrameEncoded(encoded_image, *codec_specific_info);
+ return Result(Result::OK, 0);
+ }
+
+ private:
+ VideoProcessor* const video_processor_;
+ TaskQueueBase* const task_queue_;
+ };
+
+ class VideoProcessorDecodeCompleteCallback
+ : public webrtc::DecodedImageCallback {
+ public:
+ explicit VideoProcessorDecodeCompleteCallback(
+ VideoProcessor* video_processor,
+ size_t simulcast_svc_idx)
+ : video_processor_(video_processor),
+ simulcast_svc_idx_(simulcast_svc_idx),
+ task_queue_(TaskQueueBase::Current()) {
+ RTC_DCHECK(video_processor_);
+ RTC_DCHECK(task_queue_);
+ }
+
+ int32_t Decoded(webrtc::VideoFrame& image) override;
+
+ int32_t Decoded(webrtc::VideoFrame& image,
+ int64_t decode_time_ms) override {
+ return Decoded(image);
+ }
+
+ void Decoded(webrtc::VideoFrame& image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ Decoded(image);
+ }
+
+ private:
+ VideoProcessor* const video_processor_;
+ const size_t simulcast_svc_idx_;
+ TaskQueueBase* const task_queue_;
+ };
+
+ // Invoked by the callback adapter when a frame has completed encoding.
+ void FrameEncoded(const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo& codec_specific);
+
+ // Invoked by the callback adapter when a frame has completed decoding.
+ void FrameDecoded(const webrtc::VideoFrame& image, size_t simulcast_svc_idx);
+
+ void DecodeFrame(const EncodedImage& encoded_image, size_t simulcast_svc_idx);
+
+ // In order to supply the SVC decoders with super frames containing all
+ // lower layer frames, we merge and store the layer frames in this method.
+ const webrtc::EncodedImage* BuildAndStoreSuperframe(
+ const EncodedImage& encoded_image,
+ VideoCodecType codec,
+ size_t frame_number,
+ size_t simulcast_svc_idx,
+ bool inter_layer_predicted) RTC_RUN_ON(sequence_checker_);
+
+ void CalcFrameQuality(const I420BufferInterface& decoded_frame,
+ FrameStatistics* frame_stat);
+
+ void WriteDecodedFrame(const I420BufferInterface& decoded_frame,
+ FrameWriter& frame_writer);
+
+ void HandleTailDrops();
+
+ // Test config.
+ const VideoCodecTestFixture::Config config_;
+ const size_t num_simulcast_or_spatial_layers_;
+ const bool analyze_frame_quality_;
+
+ // Frame statistics.
+ VideoCodecTestStatsImpl* const stats_;
+
+ // Codecs.
+ webrtc::VideoEncoder* const encoder_;
+ VideoDecoderList* const decoders_;
+ const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
+ VideoBitrateAllocation bitrate_allocation_ RTC_GUARDED_BY(sequence_checker_);
+ double framerate_fps_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Adapters for the codec callbacks.
+ VideoProcessorEncodeCompleteCallback encode_callback_;
+ // Assign separate callback object to each decoder. This allows us to identify
+ // decoded layer in frame decode callback.
+ // simulcast_svc_idx -> decode callback.
+ std::vector<std::unique_ptr<VideoProcessorDecodeCompleteCallback>>
+ decode_callback_;
+
+ // Each call to ProcessFrame() will read one frame from `input_frame_reader_`.
+ FrameReader* const input_frame_reader_;
+
+ // Input frames are used as reference for frame quality evaluations.
+ // Async codecs might queue frames. To handle that we keep input frame
+ // and release it after corresponding coded frame is decoded and quality
+ // measurement is done.
+ // frame_number -> frame.
+ std::map<size_t, VideoFrame> input_frames_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Encoder delivers coded frame layer-by-layer. We store coded frames and
+ // then, after all layers are encoded, decode them. Such separation of
+ // frame processing on superframe level simplifies encoding/decoding time
+ // measurement.
+ // simulcast_svc_idx -> merged SVC encoded frame.
+ std::vector<EncodedImage> merged_encoded_frames_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // These (optional) file writers are used to persistently store the encoded
+ // and decoded bitstreams. Each frame writer is enabled by being non-null.
+ IvfFileWriterMap* const encoded_frame_writers_;
+ FrameWriterList* const decoded_frame_writers_;
+
+ // Metadata for inputed/encoded/decoded frames. Used for frame identification,
+ // frame drop detection, etc. We assume that encoded/decoded frames are
+ // ordered within each simulcast/spatial layer, but we do not make any
+ // assumptions of frame ordering between layers.
+ size_t last_inputed_frame_num_ RTC_GUARDED_BY(sequence_checker_);
+ size_t last_inputed_timestamp_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> encode status.
+ std::vector<bool> first_encoded_frame_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> frame_number.
+ std::vector<size_t> last_encoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> decode status.
+ std::vector<bool> first_decoded_frame_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> frame_number.
+ std::vector<size_t> last_decoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> buffer.
+ std::vector<rtc::scoped_refptr<I420Buffer>> last_decoded_frame_buffer_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Time spent in frame encode callback. It is accumulated for layers and
+ // reset when frame encode starts. When next layer is encoded post-encode time
+ // is substracted from measured encode time. Thus we get pure encode time.
+ int64_t post_encode_time_ns_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Indicates whether Finalize() was called or not.
+ bool is_finalized_ RTC_GUARDED_BY(sequence_checker_);
+
+ // This class must be operated on a TaskQueue.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
new file mode 100644
index 0000000000..6af775cece
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videoprocessor.h"
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video/i420_buffer.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/mock/mock_frame_reader.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Property;
+using ::testing::ResultOf;
+using ::testing::Return;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const int kWidth = 352;
+const int kHeight = 288;
+const int kFrameSize = kWidth * kHeight * 3 / 2; // I420.
+
+} // namespace
+
+class VideoProcessorTest : public ::testing::Test {
+ protected:
+ VideoProcessorTest() : q_("VP queue") {
+ config_.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false,
+ false, kWidth, kHeight);
+
+ decoder_mock_ = new MockVideoDecoder();
+ decoders_.push_back(std::unique_ptr<VideoDecoder>(decoder_mock_));
+
+ ExpectInit();
+ EXPECT_CALL(frame_reader_mock_, FrameLength())
+ .WillRepeatedly(Return(kFrameSize));
+ q_.SendTask(
+ [this] {
+ video_processor_ = std::make_unique<VideoProcessor>(
+ &encoder_mock_, &decoders_, &frame_reader_mock_, config_, &stats_,
+ &encoded_frame_writers_, /*decoded_frame_writers=*/nullptr);
+ });
+ }
+
+ ~VideoProcessorTest() {
+ q_.SendTask([this] { video_processor_.reset(); });
+ }
+
+ void ExpectInit() {
+ EXPECT_CALL(encoder_mock_, InitEncode(_, _));
+ EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback);
+ EXPECT_CALL(*decoder_mock_, Configure);
+ EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback);
+ }
+
+ void ExpectRelease() {
+ EXPECT_CALL(encoder_mock_, Release()).Times(1);
+ EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_)).Times(1);
+ EXPECT_CALL(*decoder_mock_, Release()).Times(1);
+ EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback(_)).Times(1);
+ }
+
+ TaskQueueForTest q_;
+
+ VideoCodecTestFixture::Config config_;
+
+ MockVideoEncoder encoder_mock_;
+ MockVideoDecoder* decoder_mock_;
+ std::vector<std::unique_ptr<VideoDecoder>> decoders_;
+ MockFrameReader frame_reader_mock_;
+ VideoCodecTestStatsImpl stats_;
+ VideoProcessor::IvfFileWriterMap encoded_frame_writers_;
+ std::unique_ptr<VideoProcessor> video_processor_;
+};
+
+TEST_F(VideoProcessorTest, InitRelease) {
+ ExpectRelease();
+}
+
+TEST_F(VideoProcessorTest, ProcessFrames_FixedFramerate) {
+ const int kBitrateKbps = 456;
+ const int kFramerateFps = 31;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kFramerateFps))))
+ .Times(1);
+ q_.SendTask([=] { video_processor_->SetRates(kBitrateKbps, kFramerateFps); });
+
+ EXPECT_CALL(frame_reader_mock_, ReadFrame())
+ .WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
+ EXPECT_CALL(
+ encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp, 1 * 90000 / kFramerateFps), _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ EXPECT_CALL(
+ encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp, 2 * 90000 / kFramerateFps), _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ ExpectRelease();
+}
+
+TEST_F(VideoProcessorTest, ProcessFrames_VariableFramerate) {
+ const int kBitrateKbps = 456;
+ const int kStartFramerateFps = 27;
+ const int kStartTimestamp = 90000 / kStartFramerateFps;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kStartFramerateFps))))
+ .Times(1);
+ q_.SendTask(
+ [=] { video_processor_->SetRates(kBitrateKbps, kStartFramerateFps); });
+
+ EXPECT_CALL(frame_reader_mock_, ReadFrame())
+ .WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
+ EXPECT_CALL(encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp, kStartTimestamp), _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ const int kNewFramerateFps = 13;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kNewFramerateFps))))
+ .Times(1);
+ q_.SendTask(
+ [=] { video_processor_->SetRates(kBitrateKbps, kNewFramerateFps); });
+
+ EXPECT_CALL(encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp,
+ kStartTimestamp + 90000 / kNewFramerateFps),
+ _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ ExpectRelease();
+}
+
+TEST_F(VideoProcessorTest, SetRates) {
+ const uint32_t kBitrateKbps = 123;
+ const int kFramerateFps = 17;
+
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(AllOf(ResultOf(
+ [](const VideoEncoder::RateControlParameters& params) {
+ return params.bitrate.get_sum_kbps();
+ },
+ kBitrateKbps),
+ Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kFramerateFps)))))
+ .Times(1);
+ q_.SendTask([=] { video_processor_->SetRates(kBitrateKbps, kFramerateFps); });
+
+ const uint32_t kNewBitrateKbps = 456;
+ const int kNewFramerateFps = 34;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(AllOf(ResultOf(
+ [](const VideoEncoder::RateControlParameters& params) {
+ return params.bitrate.get_sum_kbps();
+ },
+ kNewBitrateKbps),
+ Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kNewFramerateFps)))))
+ .Times(1);
+ q_.SendTask(
+ [=] { video_processor_->SetRates(kNewBitrateKbps, kNewFramerateFps); });
+
+ ExpectRelease();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
new file mode 100644
index 0000000000..94860da1b6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -0,0 +1,884 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+DefaultTemporalLayers::PendingFrame::PendingFrame() = default;
+DefaultTemporalLayers::PendingFrame::PendingFrame(
+ uint32_t timestamp,
+ bool expired,
+ uint8_t updated_buffers_mask,
+ const DependencyInfo& dependency_info)
+ : timestamp(timestamp),
+ expired(expired),
+ updated_buffer_mask(updated_buffers_mask),
+ dependency_info(dependency_info) {}
+
+namespace {
+using BufferFlags = Vp8FrameConfig::BufferFlags;
+using FreezeEntropy = Vp8FrameConfig::FreezeEntropy;
+using Vp8BufferReference = Vp8FrameConfig::Vp8BufferReference;
+
+constexpr BufferFlags kNone = BufferFlags::kNone;
+constexpr BufferFlags kReference = BufferFlags::kReference;
+constexpr BufferFlags kUpdate = BufferFlags::kUpdate;
+constexpr BufferFlags kReferenceAndUpdate = BufferFlags::kReferenceAndUpdate;
+constexpr FreezeEntropy kFreezeEntropy = FreezeEntropy::kFreezeEntropy;
+
+static constexpr uint8_t kUninitializedPatternIndex =
+ std::numeric_limits<uint8_t>::max();
+static constexpr std::array<Vp8BufferReference, 3> kAllBuffers = {
+ {Vp8BufferReference::kLast, Vp8BufferReference::kGolden,
+ Vp8BufferReference::kAltref}};
+
+std::vector<unsigned int> GetTemporalIds(size_t num_layers) {
+ switch (num_layers) {
+ case 1:
+ // Temporal layer structure (single layer):
+ // 0 0 0 0 ...
+ return {0};
+ case 2:
+ // Temporal layer structure:
+ // 1 1 ...
+ // 0 0 ...
+ return {0, 1};
+ case 3:
+ // Temporal layer structure:
+ // 2 2 2 2 ...
+ // 1 1 ...
+ // 0 0 ...
+ return {0, 2, 1, 2};
+ case 4:
+ // Temporal layer structure:
+ // 3 3 3 3 3 3 3 3 ...
+ // 2 2 2 2 ...
+ // 1 1 ...
+ // 0 0 ...
+ return {0, 3, 2, 3, 1, 3, 2, 3};
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return {0};
+}
+
+uint8_t GetUpdatedBuffers(const Vp8FrameConfig& config) {
+ uint8_t flags = 0;
+ if (config.last_buffer_flags & BufferFlags::kUpdate) {
+ flags |= static_cast<uint8_t>(Vp8BufferReference::kLast);
+ }
+ if (config.golden_buffer_flags & BufferFlags::kUpdate) {
+ flags |= static_cast<uint8_t>(Vp8BufferReference::kGolden);
+ }
+ if (config.arf_buffer_flags & BufferFlags::kUpdate) {
+ flags |= static_cast<uint8_t>(Vp8BufferReference::kAltref);
+ }
+ return flags;
+}
+
+size_t BufferToIndex(Vp8BufferReference buffer) {
+ switch (buffer) {
+ case Vp8FrameConfig::Vp8BufferReference::kLast:
+ return 0;
+ case Vp8FrameConfig::Vp8BufferReference::kGolden:
+ return 1;
+ case Vp8FrameConfig::Vp8BufferReference::kAltref:
+ return 2;
+ case Vp8FrameConfig::Vp8BufferReference::kNone:
+ RTC_CHECK_NOTREACHED();
+ }
+}
+
+} // namespace
+
+constexpr size_t DefaultTemporalLayers::kNumReferenceBuffers;
+
+std::vector<DefaultTemporalLayers::DependencyInfo>
+DefaultTemporalLayers::GetDependencyInfo(size_t num_layers) {
+ // For indexing in the patterns described below (which temporal layers they
+ // belong to), see the diagram above.
+ // Layer sync is done similarly for all patterns (except single stream) and
+ // happens every 8 frames:
+ // TL1 layer syncs by periodically by only referencing TL0 ('last'), but still
+ // updating 'golden', so it can be used as a reference by future TL1 frames.
+ // TL2 layer syncs just before TL1 by only depending on TL0 (and not depending
+ // on TL1's buffer before TL1 has layer synced).
+ // TODO(pbos): Consider cyclically updating 'arf' (and 'golden' for 1TL) for
+ // the base layer in 1-3TL instead of 'last' periodically on long intervals,
+ // so that if scene changes occur (user walks between rooms or rotates webcam)
+ // the 'arf' (or 'golden' respectively) is not stuck on a no-longer relevant
+ // keyframe.
+
+ switch (num_layers) {
+ case 1:
+ // Always reference and update the same buffer.
+ return {{"S", {kReferenceAndUpdate, kNone, kNone}}};
+ case 2:
+ // All layers can reference but not update the 'alt' buffer, this means
+ // that the 'alt' buffer reference is effectively the last keyframe.
+ // TL0 also references and updates the 'last' buffer.
+ // TL1 also references 'last' and references and updates 'golden'.
+ if (!field_trial::IsDisabled("WebRTC-UseShortVP8TL2Pattern")) {
+ // Shortened 4-frame pattern:
+ // 1---1 1---1 ...
+ // / / / /
+ // 0---0---0---0 ...
+ return {{"SS", {kReferenceAndUpdate, kNone, kNone}},
+ {"-S", {kReference, kUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-D", {kReference, kReference, kNone, kFreezeEntropy}}};
+ } else {
+ // "Default" 8-frame pattern:
+ // 1---1---1---1 1---1---1---1 ...
+ // / / / / / / / /
+ // 0---0---0---0---0---0---0---0 ...
+ return {{"SS", {kReferenceAndUpdate, kNone, kNone}},
+ {"-S", {kReference, kUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-R", {kReference, kReferenceAndUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-R", {kReference, kReferenceAndUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-D", {kReference, kReference, kNone, kFreezeEntropy}}};
+ }
+ case 3:
+ if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) {
+ // This field trial is intended to check if it is worth using a shorter
+ // temporal pattern, trading some coding efficiency for less risk of
+ // dropped frames.
+ // The coding efficiency will decrease somewhat since the higher layer
+ // state is more volatile, but it will be offset slightly by updating
+ // the altref buffer with TL2 frames, instead of just referencing lower
+ // layers.
+ // If a frame is dropped in a higher layer, the jitter
+ // buffer on the receive side won't be able to decode any higher layer
+ // frame until the next sync frame. So we expect a noticeable decrease
+ // in frame drops on links with high packet loss.
+
+ // TL0 references and updates the 'last' buffer.
+ // TL1 references 'last' and references and updates 'golden'.
+ // TL2 references both 'last' & 'golden' and references and updates
+ // 'arf'.
+ // 2-------2 2-------2 2
+ // / __/ / __/ /
+ // / __1 / __1 /
+ // /___/ /___/ /
+ // 0---------------0---------------0-----
+ // 0 1 2 3 4 5 6 7 8 9 ...
+ return {{"SSS", {kReferenceAndUpdate, kNone, kNone}},
+ {"--S", {kReference, kNone, kUpdate}},
+ {"-DR", {kReference, kUpdate, kNone}},
+ {"--D", {kReference, kReference, kReference, kFreezeEntropy}}};
+ } else {
+ // All layers can reference but not update the 'alt' buffer, this means
+ // that the 'alt' buffer reference is effectively the last keyframe.
+ // TL0 also references and updates the 'last' buffer.
+ // TL1 also references 'last' and references and updates 'golden'.
+ // TL2 references both 'last' and 'golden' but updates no buffer.
+ // 2 __2 _____2 __2 2
+ // / /____/ / / /
+ // / 1---------/-----1 /
+ // /_____/ /_____/ /
+ // 0---------------0---------------0-----
+ // 0 1 2 3 4 5 6 7 8 9 ...
+ return {{"SSS", {kReferenceAndUpdate, kNone, kNone}},
+ {"--D", {kReference, kNone, kNone, kFreezeEntropy}},
+ {"-SS", {kReference, kUpdate, kNone}},
+ {"--D", {kReference, kReference, kNone, kFreezeEntropy}},
+ {"SRR", {kReferenceAndUpdate, kNone, kNone}},
+ {"--D", {kReference, kReference, kNone, kFreezeEntropy}},
+ {"-DS", {kReference, kReferenceAndUpdate, kNone}},
+ {"--D", {kReference, kReference, kNone, kFreezeEntropy}}};
+ }
+ case 4:
+ // TL0 references and updates only the 'last' buffer.
+ // TL1 references 'last' and updates and references 'golden'.
+ // TL2 references 'last' and 'golden', and references and updates 'arf'.
+ // TL3 references all buffers but update none of them.
+ // TODO(philipel): Set decode target information for this structure.
+ return {{"----", {kReferenceAndUpdate, kNone, kNone}},
+ {"----", {kReference, kNone, kNone, kFreezeEntropy}},
+ {"----", {kReference, kNone, kUpdate}},
+ {"----", {kReference, kNone, kReference, kFreezeEntropy}},
+ {"----", {kReference, kUpdate, kNone}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReference, kReferenceAndUpdate}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReferenceAndUpdate, kNone, kNone}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReference, kReferenceAndUpdate}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReferenceAndUpdate, kNone}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReference, kReferenceAndUpdate}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}}};
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return {{"", {kNone, kNone, kNone}}};
+}
+
+std::bitset<DefaultTemporalLayers::kNumReferenceBuffers>
+DefaultTemporalLayers::DetermineStaticBuffers(
+ const std::vector<DependencyInfo>& temporal_pattern) {
+ std::bitset<kNumReferenceBuffers> buffers;
+ buffers.set();
+ for (const DependencyInfo& info : temporal_pattern) {
+ uint8_t updated_buffers = GetUpdatedBuffers(info.frame_config);
+
+ for (Vp8BufferReference buffer : kAllBuffers) {
+ if (static_cast<uint8_t>(buffer) & updated_buffers) {
+ buffers.reset(BufferToIndex(buffer));
+ }
+ }
+ }
+ return buffers;
+}
+
+DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers)
+ : num_layers_(std::max(1, number_of_temporal_layers)),
+ temporal_ids_(GetTemporalIds(num_layers_)),
+ temporal_pattern_(GetDependencyInfo(num_layers_)),
+ is_static_buffer_(DetermineStaticBuffers(temporal_pattern_)),
+ pattern_idx_(kUninitializedPatternIndex),
+ new_bitrates_bps_(std::vector<uint32_t>(num_layers_, 0u)) {
+ RTC_CHECK_GE(kMaxTemporalStreams, number_of_temporal_layers);
+ RTC_CHECK_GE(number_of_temporal_layers, 0);
+ RTC_CHECK_LE(number_of_temporal_layers, 4);
+ // pattern_idx_ wraps around temporal_pattern_.size, this is incorrect if
+ // temporal_ids_ are ever longer. If this is no longer correct it needs to
+ // wrap at max(temporal_ids_.size(), temporal_pattern_.size()).
+ RTC_DCHECK_LE(temporal_ids_.size(), temporal_pattern_.size());
+
+ RTC_DCHECK(
+ checker_ = TemporalLayersChecker::CreateTemporalLayersChecker(
+ Vp8TemporalLayersType::kFixedPattern, number_of_temporal_layers));
+
+ // Always need to start with a keyframe, so pre-populate all frame counters.
+ frames_since_buffer_refresh_.fill(0);
+}
+
+DefaultTemporalLayers::~DefaultTemporalLayers() = default;
+
+void DefaultTemporalLayers::SetQpLimits(size_t stream_index,
+ int min_qp,
+ int max_qp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // Ignore.
+}
+
+size_t DefaultTemporalLayers::StreamCount() const {
+ return 1;
+}
+
+bool DefaultTemporalLayers::SupportsEncoderFrameDropping(
+ size_t stream_index) const {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // This class allows the encoder drop frames as it sees fit.
+ return true;
+}
+
+void DefaultTemporalLayers::OnRatesUpdated(
+ size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(bitrates_bps.size(), 0);
+ RTC_DCHECK_LE(bitrates_bps.size(), num_layers_);
+ // `bitrates_bps` uses individual rate per layer, but Vp8EncoderConfig wants
+ // the accumulated rate, so sum them up.
+ new_bitrates_bps_ = bitrates_bps;
+ new_bitrates_bps_->resize(num_layers_);
+ for (size_t i = 1; i < num_layers_; ++i) {
+ (*new_bitrates_bps_)[i] += (*new_bitrates_bps_)[i - 1];
+ }
+}
+
+Vp8EncoderConfig DefaultTemporalLayers::UpdateConfiguration(
+ size_t stream_index) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+
+ Vp8EncoderConfig config;
+
+ if (!new_bitrates_bps_) {
+ return config;
+ }
+
+ config.temporal_layer_config.emplace();
+ Vp8EncoderConfig::TemporalLayerConfig& ts_config =
+ config.temporal_layer_config.value();
+
+ for (size_t i = 0; i < num_layers_; ++i) {
+ ts_config.ts_target_bitrate[i] = (*new_bitrates_bps_)[i] / 1000;
+ // ..., 4, 2, 1
+ ts_config.ts_rate_decimator[i] = 1 << (num_layers_ - i - 1);
+ }
+
+ ts_config.ts_number_layers = num_layers_;
+ ts_config.ts_periodicity = temporal_ids_.size();
+ std::copy(temporal_ids_.begin(), temporal_ids_.end(),
+ ts_config.ts_layer_id.begin());
+
+ new_bitrates_bps_.reset();
+
+ return config;
+}
+
+bool DefaultTemporalLayers::IsSyncFrame(const Vp8FrameConfig& config) const {
+ // Since we always assign TL0 to 'last' in these patterns, we can infer layer
+ // sync by checking if temporal id > 0 and we only reference TL0 or buffers
+ // containing the last key-frame.
+ if (config.packetizer_temporal_idx == 0) {
+ // TL0 frames are per definition not sync frames.
+ return false;
+ }
+
+ if ((config.last_buffer_flags & BufferFlags::kReference) == 0) {
+ // Sync frames must reference TL0.
+ return false;
+ }
+
+ if ((config.golden_buffer_flags & BufferFlags::kReference) &&
+ !is_static_buffer_[BufferToIndex(Vp8BufferReference::kGolden)]) {
+ // Referencing a golden frame that contains a non-(base layer|key frame).
+ return false;
+ }
+ if ((config.arf_buffer_flags & BufferFlags::kReference) &&
+ !is_static_buffer_[BufferToIndex(Vp8BufferReference::kAltref)]) {
+ // Referencing an altref frame that contains a non-(base layer|key frame).
+ return false;
+ }
+
+ return true;
+}
+
+Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index,
+ uint32_t timestamp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(num_layers_, 0);
+ RTC_DCHECK_GT(temporal_pattern_.size(), 0);
+
+ RTC_DCHECK_GT(kUninitializedPatternIndex, temporal_pattern_.size());
+ const bool first_frame = (pattern_idx_ == kUninitializedPatternIndex);
+
+ pattern_idx_ = (pattern_idx_ + 1) % temporal_pattern_.size();
+ DependencyInfo dependency_info = temporal_pattern_[pattern_idx_];
+ Vp8FrameConfig& tl_config = dependency_info.frame_config;
+ tl_config.encoder_layer_id = tl_config.packetizer_temporal_idx =
+ temporal_ids_[pattern_idx_ % temporal_ids_.size()];
+
+ if (pattern_idx_ == 0) {
+ // Start of new pattern iteration, set up clear state by invalidating any
+ // pending frames, so that we don't make an invalid reference to a buffer
+ // containing data from a previous iteration.
+ for (auto& frame : pending_frames_) {
+ frame.expired = true;
+ }
+ }
+
+ if (first_frame) {
+ tl_config = Vp8FrameConfig::GetIntraFrameConfig();
+ } else {
+ // Last is always ok to reference as it contains the base layer. For other
+ // buffers though, we need to check if the buffer has actually been
+ // refreshed this cycle of the temporal pattern. If the encoder dropped
+ // a frame, it might not have.
+ ValidateReferences(&tl_config.golden_buffer_flags,
+ Vp8BufferReference::kGolden);
+ ValidateReferences(&tl_config.arf_buffer_flags,
+ Vp8BufferReference::kAltref);
+ // Update search order to let the encoder know which buffers contains the
+ // most recent data.
+ UpdateSearchOrder(&tl_config);
+ // Figure out if this a sync frame (non-base-layer frame with only
+ // base-layer references).
+ tl_config.layer_sync = IsSyncFrame(tl_config);
+
+ // Increment frame age, this needs to be in sync with `pattern_idx_`,
+ // so must update it here. Resetting age to 0 must be done when encoding is
+ // complete though, and so in the case of pipelining encoder it might lag.
+ // To prevent this data spill over into the next iteration,
+ // the `pedning_frames_` map is reset in loops. If delay is constant,
+ // the relative age should still be OK for the search order.
+ for (size_t& n : frames_since_buffer_refresh_) {
+ ++n;
+ }
+ }
+
+ // Add frame to set of pending frames, awaiting completion.
+ pending_frames_.emplace_back(timestamp, false, GetUpdatedBuffers(tl_config),
+ dependency_info);
+
+ // Checker does not yet support encoder frame dropping, so validate flags
+ // here before they can be dropped.
+ // TODO(sprang): Update checker to support dropping.
+ RTC_DCHECK(checker_->CheckTemporalConfig(first_frame, tl_config));
+
+ return tl_config;
+}
+
+void DefaultTemporalLayers::ValidateReferences(BufferFlags* flags,
+ Vp8BufferReference ref) const {
+ // Check if the buffer specified by `ref` is actually referenced, and if so
+ // if it also a dynamically updating one (buffers always just containing
+ // keyframes are always safe to reference).
+ if ((*flags & BufferFlags::kReference) &&
+ !is_static_buffer_[BufferToIndex(ref)]) {
+ if (NumFramesSinceBufferRefresh(ref) >= pattern_idx_) {
+ // No valid buffer state, or buffer contains frame that is older than the
+ // current pattern. This reference is not valid, so remove it.
+ *flags = static_cast<BufferFlags>(*flags & ~BufferFlags::kReference);
+ }
+ }
+}
+
+void DefaultTemporalLayers::UpdateSearchOrder(Vp8FrameConfig* config) {
+ // Figure out which of the buffers we can reference, and order them so that
+ // the most recently refreshed is first. Otherwise prioritize last first,
+ // golden second, and altref third.
+ using BufferRefAge = std::pair<Vp8BufferReference, size_t>;
+ std::vector<BufferRefAge> eligible_buffers;
+ if (config->last_buffer_flags & BufferFlags::kReference) {
+ eligible_buffers.emplace_back(
+ Vp8BufferReference::kLast,
+ NumFramesSinceBufferRefresh(Vp8BufferReference::kLast));
+ }
+ if (config->golden_buffer_flags & BufferFlags::kReference) {
+ eligible_buffers.emplace_back(
+ Vp8BufferReference::kGolden,
+ NumFramesSinceBufferRefresh(Vp8BufferReference::kGolden));
+ }
+ if (config->arf_buffer_flags & BufferFlags::kReference) {
+ eligible_buffers.emplace_back(
+ Vp8BufferReference::kAltref,
+ NumFramesSinceBufferRefresh(Vp8BufferReference::kAltref));
+ }
+
+ std::sort(eligible_buffers.begin(), eligible_buffers.end(),
+ [](const BufferRefAge& lhs, const BufferRefAge& rhs) {
+ if (lhs.second != rhs.second) {
+ // Lower count has highest precedence.
+ return lhs.second < rhs.second;
+ }
+ return lhs.first < rhs.first;
+ });
+
+ // Populate the search order fields where possible.
+ if (!eligible_buffers.empty()) {
+ config->first_reference = eligible_buffers.front().first;
+ if (eligible_buffers.size() > 1)
+ config->second_reference = eligible_buffers[1].first;
+ }
+}
+
+size_t DefaultTemporalLayers::NumFramesSinceBufferRefresh(
+ Vp8FrameConfig::Vp8BufferReference ref) const {
+ return frames_since_buffer_refresh_[BufferToIndex(ref)];
+}
+
+void DefaultTemporalLayers::ResetNumFramesSinceBufferRefresh(
+ Vp8FrameConfig::Vp8BufferReference ref) {
+ frames_since_buffer_refresh_[BufferToIndex(ref)] = 0;
+}
+
+void DefaultTemporalLayers::CullPendingFramesBefore(uint32_t timestamp) {
+ while (!pending_frames_.empty() &&
+ pending_frames_.front().timestamp != timestamp) {
+ pending_frames_.pop_front();
+ }
+}
+
+void DefaultTemporalLayers::OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(num_layers_, 0);
+
+ if (size_bytes == 0) {
+ RTC_LOG(LS_WARNING) << "Empty frame; treating as dropped.";
+ OnFrameDropped(stream_index, rtp_timestamp);
+ return;
+ }
+
+ CullPendingFramesBefore(rtp_timestamp);
+ RTC_CHECK(!pending_frames_.empty());
+ PendingFrame& frame = pending_frames_.front();
+ RTC_DCHECK_EQ(frame.timestamp, rtp_timestamp);
+ const Vp8FrameConfig& frame_config = frame.dependency_info.frame_config;
+ if (is_keyframe) {
+ // Signal key-frame so checker resets state.
+ RTC_DCHECK(checker_->CheckTemporalConfig(true, frame_config));
+ }
+
+ CodecSpecificInfoVP8& vp8_info = info->codecSpecific.VP8;
+ if (num_layers_ == 1) {
+ vp8_info.temporalIdx = kNoTemporalIdx;
+ vp8_info.layerSync = false;
+ } else {
+ if (is_keyframe) {
+ // Restart the temporal pattern on keyframes.
+ pattern_idx_ = 0;
+ vp8_info.temporalIdx = 0;
+ vp8_info.layerSync = true; // Keyframes are always sync frames.
+
+ for (Vp8BufferReference buffer : kAllBuffers) {
+ if (is_static_buffer_[BufferToIndex(buffer)]) {
+ // Update frame count of all kf-only buffers, regardless of state of
+ // `pending_frames_`.
+ ResetNumFramesSinceBufferRefresh(buffer);
+ } else {
+ // Key-frames update all buffers, this should be reflected when
+ // updating state in FrameEncoded().
+ frame.updated_buffer_mask |= static_cast<uint8_t>(buffer);
+ }
+ }
+ } else {
+ // Delta frame, update codec specifics with temporal id and sync flag.
+ vp8_info.temporalIdx = frame_config.packetizer_temporal_idx;
+ vp8_info.layerSync = frame_config.layer_sync;
+ }
+ }
+
+ vp8_info.useExplicitDependencies = true;
+ RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
+ RTC_DCHECK_EQ(vp8_info.updatedBuffersCount, 0u);
+
+ GenericFrameInfo& generic_frame_info = info->generic_frame_info.emplace();
+
+ for (int i = 0; i < static_cast<int>(Vp8FrameConfig::Buffer::kCount); ++i) {
+ bool references = false;
+ bool updates = is_keyframe;
+
+ if (!is_keyframe &&
+ frame_config.References(static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.referencedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::referencedBuffers));
+ references = true;
+ vp8_info.referencedBuffers[vp8_info.referencedBuffersCount++] = i;
+ }
+
+ if (is_keyframe ||
+ frame_config.Updates(static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.updatedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::updatedBuffers));
+ updates = true;
+ vp8_info.updatedBuffers[vp8_info.updatedBuffersCount++] = i;
+ }
+
+ if (references || updates) {
+ generic_frame_info.encoder_buffers.emplace_back(i, references, updates);
+ }
+ }
+
+ // The templates are always present on keyframes, and then refered to by
+ // subsequent frames.
+ if (is_keyframe) {
+ info->template_structure = GetTemplateStructure(num_layers_);
+ generic_frame_info.decode_target_indications =
+ temporal_pattern_.front().decode_target_indications;
+ generic_frame_info.temporal_id = 0;
+ } else {
+ generic_frame_info.decode_target_indications =
+ frame.dependency_info.decode_target_indications;
+ generic_frame_info.temporal_id = frame_config.packetizer_temporal_idx;
+ }
+
+ if (!frame.expired) {
+ for (Vp8BufferReference buffer : kAllBuffers) {
+ if (frame.updated_buffer_mask & static_cast<uint8_t>(buffer)) {
+ ResetNumFramesSinceBufferRefresh(buffer);
+ }
+ }
+ }
+
+ pending_frames_.pop_front();
+}
+
+void DefaultTemporalLayers::OnFrameDropped(size_t stream_index,
+ uint32_t rtp_timestamp) {
+ CullPendingFramesBefore(rtp_timestamp);
+ RTC_CHECK(!pending_frames_.empty());
+ RTC_DCHECK_EQ(pending_frames_.front().timestamp, rtp_timestamp);
+ pending_frames_.pop_front();
+}
+
+void DefaultTemporalLayers::OnPacketLossRateUpdate(float packet_loss_rate) {}
+
+void DefaultTemporalLayers::OnRttUpdate(int64_t rtt_ms) {}
+
+void DefaultTemporalLayers::OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) {}
+
+FrameDependencyStructure DefaultTemporalLayers::GetTemplateStructure(
+ int num_layers) const {
+ RTC_CHECK_LT(num_layers, 5);
+ RTC_CHECK_GT(num_layers, 0);
+
+ FrameDependencyStructure template_structure;
+ template_structure.num_decode_targets = num_layers;
+
+ switch (num_layers) {
+ case 1: {
+ template_structure.templates.resize(2);
+ template_structure.templates[0].T(0).Dtis("S");
+ template_structure.templates[1].T(0).Dtis("S").FrameDiffs({1});
+ return template_structure;
+ }
+ case 2: {
+ template_structure.templates.resize(5);
+ template_structure.templates[0].T(0).Dtis("SS");
+ template_structure.templates[1].T(0).Dtis("SS").FrameDiffs({2});
+ template_structure.templates[2].T(0).Dtis("SR").FrameDiffs({2});
+ template_structure.templates[3].T(1).Dtis("-S").FrameDiffs({1});
+ template_structure.templates[4].T(1).Dtis("-D").FrameDiffs({2, 1});
+ return template_structure;
+ }
+ case 3: {
+ if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) {
+ template_structure.templates.resize(5);
+ template_structure.templates[0].T(0).Dtis("SSS");
+ template_structure.templates[1].T(0).Dtis("SSS").FrameDiffs({4});
+ template_structure.templates[2].T(1).Dtis("-DR").FrameDiffs({2});
+ template_structure.templates[3].T(2).Dtis("--S").FrameDiffs({1});
+ template_structure.templates[4].T(2).Dtis("--D").FrameDiffs({2, 1});
+ } else {
+ template_structure.templates.resize(7);
+ template_structure.templates[0].T(0).Dtis("SSS");
+ template_structure.templates[1].T(0).Dtis("SSS").FrameDiffs({4});
+ template_structure.templates[2].T(0).Dtis("SRR").FrameDiffs({4});
+ template_structure.templates[3].T(1).Dtis("-SS").FrameDiffs({2});
+ template_structure.templates[4].T(1).Dtis("-DS").FrameDiffs({4, 2});
+ template_structure.templates[5].T(2).Dtis("--D").FrameDiffs({1});
+ template_structure.templates[6].T(2).Dtis("--D").FrameDiffs({3, 1});
+ }
+ return template_structure;
+ }
+ case 4: {
+ template_structure.templates.resize(8);
+ template_structure.templates[0].T(0).Dtis("SSSS");
+ template_structure.templates[1].T(0).Dtis("SSSS").FrameDiffs({8});
+ template_structure.templates[2].T(1).Dtis("-SRR").FrameDiffs({4});
+ template_structure.templates[3].T(1).Dtis("-SRR").FrameDiffs({4, 8});
+ template_structure.templates[4].T(2).Dtis("--SR").FrameDiffs({2});
+ template_structure.templates[5].T(2).Dtis("--SR").FrameDiffs({2, 4});
+ template_structure.templates[6].T(3).Dtis("---D").FrameDiffs({1});
+ template_structure.templates[7].T(3).Dtis("---D").FrameDiffs({1, 3});
+ return template_structure;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ // To make the compiler happy!
+ return template_structure;
+ }
+}
+
+// Returns list of temporal dependencies for each frame in the temporal pattern.
+// Values are lists of indecies in the pattern.
+std::vector<std::set<uint8_t>> GetTemporalDependencies(
+ int num_temporal_layers) {
+ switch (num_temporal_layers) {
+ case 1:
+ return {{0}};
+ case 2:
+ if (!field_trial::IsDisabled("WebRTC-UseShortVP8TL2Pattern")) {
+ return {{2}, {0}, {0}, {1, 2}};
+ } else {
+ return {{6}, {0}, {0}, {1, 2}, {2}, {3, 4}, {4}, {5, 6}};
+ }
+ case 3:
+ if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) {
+ return {{0}, {0}, {0}, {0, 1, 2}};
+ } else {
+ return {{4}, {0}, {0}, {0, 2}, {0}, {2, 4}, {2, 4}, {4, 6}};
+ }
+ case 4:
+ return {{8}, {0}, {0}, {0, 2},
+ {0}, {0, 2, 4}, {0, 2, 4}, {0, 4, 6},
+ {0}, {4, 6, 8}, {4, 6, 8}, {4, 8, 10},
+ {4, 8}, {8, 10, 12}, {8, 10, 12}, {8, 12, 14}};
+ default:
+ RTC_DCHECK_NOTREACHED();
+ return {};
+ }
+}
+
+DefaultTemporalLayersChecker::DefaultTemporalLayersChecker(
+ int num_temporal_layers)
+ : TemporalLayersChecker(num_temporal_layers),
+ num_layers_(std::max(1, num_temporal_layers)),
+ temporal_ids_(GetTemporalIds(num_layers_)),
+ temporal_dependencies_(GetTemporalDependencies(num_layers_)),
+ pattern_idx_(255) {
+ int i = 0;
+ while (temporal_ids_.size() < temporal_dependencies_.size()) {
+ temporal_ids_.push_back(temporal_ids_[i++]);
+ }
+}
+
+DefaultTemporalLayersChecker::~DefaultTemporalLayersChecker() = default;
+
+bool DefaultTemporalLayersChecker::CheckTemporalConfig(
+ bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config) {
+ if (!TemporalLayersChecker::CheckTemporalConfig(frame_is_keyframe,
+ frame_config)) {
+ return false;
+ }
+ if (frame_config.drop_frame) {
+ return true;
+ }
+
+ if (frame_is_keyframe) {
+ pattern_idx_ = 0;
+ last_ = BufferState();
+ golden_ = BufferState();
+ arf_ = BufferState();
+ return true;
+ }
+
+ ++pattern_idx_;
+ if (pattern_idx_ == temporal_ids_.size()) {
+ // All non key-frame buffers should be updated each pattern cycle.
+ if (!last_.is_keyframe && !last_.is_updated_this_cycle) {
+ RTC_LOG(LS_ERROR) << "Last buffer was not updated during pattern cycle.";
+ return false;
+ }
+ if (!arf_.is_keyframe && !arf_.is_updated_this_cycle) {
+ RTC_LOG(LS_ERROR) << "Arf buffer was not updated during pattern cycle.";
+ return false;
+ }
+ if (!golden_.is_keyframe && !golden_.is_updated_this_cycle) {
+ RTC_LOG(LS_ERROR)
+ << "Golden buffer was not updated during pattern cycle.";
+ return false;
+ }
+ last_.is_updated_this_cycle = false;
+ arf_.is_updated_this_cycle = false;
+ golden_.is_updated_this_cycle = false;
+ pattern_idx_ = 0;
+ }
+ uint8_t expected_tl_idx = temporal_ids_[pattern_idx_];
+ if (frame_config.packetizer_temporal_idx != expected_tl_idx) {
+ RTC_LOG(LS_ERROR) << "Frame has an incorrect temporal index. Expected: "
+ << static_cast<int>(expected_tl_idx) << " Actual: "
+ << static_cast<int>(frame_config.packetizer_temporal_idx);
+ return false;
+ }
+
+ bool need_sync = temporal_ids_[pattern_idx_] > 0 &&
+ temporal_ids_[pattern_idx_] != kNoTemporalIdx;
+ std::vector<int> dependencies;
+
+ if (frame_config.last_buffer_flags & BufferFlags::kReference) {
+ uint8_t referenced_layer = temporal_ids_[last_.pattern_idx];
+ if (referenced_layer > 0) {
+ need_sync = false;
+ }
+ if (!last_.is_keyframe) {
+ dependencies.push_back(last_.pattern_idx);
+ }
+ } else if (frame_config.first_reference == Vp8BufferReference::kLast ||
+ frame_config.second_reference == Vp8BufferReference::kLast) {
+ RTC_LOG(LS_ERROR)
+ << "Last buffer not referenced, but present in search order.";
+ return false;
+ }
+
+ if (frame_config.arf_buffer_flags & BufferFlags::kReference) {
+ uint8_t referenced_layer = temporal_ids_[arf_.pattern_idx];
+ if (referenced_layer > 0) {
+ need_sync = false;
+ }
+ if (!arf_.is_keyframe) {
+ dependencies.push_back(arf_.pattern_idx);
+ }
+ } else if (frame_config.first_reference == Vp8BufferReference::kAltref ||
+ frame_config.second_reference == Vp8BufferReference::kAltref) {
+ RTC_LOG(LS_ERROR)
+ << "Altret buffer not referenced, but present in search order.";
+ return false;
+ }
+
+ if (frame_config.golden_buffer_flags & BufferFlags::kReference) {
+ uint8_t referenced_layer = temporal_ids_[golden_.pattern_idx];
+ if (referenced_layer > 0) {
+ need_sync = false;
+ }
+ if (!golden_.is_keyframe) {
+ dependencies.push_back(golden_.pattern_idx);
+ }
+ } else if (frame_config.first_reference == Vp8BufferReference::kGolden ||
+ frame_config.second_reference == Vp8BufferReference::kGolden) {
+ RTC_LOG(LS_ERROR)
+ << "Golden buffer not referenced, but present in search order.";
+ return false;
+ }
+
+ if (need_sync != frame_config.layer_sync) {
+ RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
+ << need_sync << " Actual: " << frame_config.layer_sync;
+ return false;
+ }
+
+ if (!frame_is_keyframe) {
+ size_t i;
+ for (i = 0; i < dependencies.size(); ++i) {
+ if (temporal_dependencies_[pattern_idx_].find(dependencies[i]) ==
+ temporal_dependencies_[pattern_idx_].end()) {
+ RTC_LOG(LS_ERROR)
+ << "Illegal temporal dependency out of defined pattern "
+ "from position "
+ << static_cast<int>(pattern_idx_) << " to position "
+ << static_cast<int>(dependencies[i]);
+ return false;
+ }
+ }
+ }
+
+ if (frame_config.last_buffer_flags & BufferFlags::kUpdate) {
+ last_.is_updated_this_cycle = true;
+ last_.pattern_idx = pattern_idx_;
+ last_.is_keyframe = false;
+ }
+ if (frame_config.arf_buffer_flags & BufferFlags::kUpdate) {
+ arf_.is_updated_this_cycle = true;
+ arf_.pattern_idx = pattern_idx_;
+ arf_.is_keyframe = false;
+ }
+ if (frame_config.golden_buffer_flags & BufferFlags::kUpdate) {
+ golden_.is_updated_this_cycle = true;
+ golden_.pattern_idx = pattern_idx_;
+ golden_.is_keyframe = false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h
new file mode 100644
index 0000000000..bc6574c54c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+/*
+ * This file defines classes for doing temporal layers with VP8.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_DEFAULT_TEMPORAL_LAYERS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_DEFAULT_TEMPORAL_LAYERS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <bitset>
+#include <deque>
+#include <limits>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "modules/video_coding/codecs/vp8/include/temporal_layers_checker.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+class DefaultTemporalLayers final : public Vp8FrameBufferController {
+ public:
+ explicit DefaultTemporalLayers(int number_of_temporal_layers);
+ ~DefaultTemporalLayers() override;
+
+ void SetQpLimits(size_t stream_index, int min_qp, int max_qp) override;
+
+ size_t StreamCount() const override;
+
+ bool SupportsEncoderFrameDropping(size_t stream_index) const override;
+
+ // Returns the recommended VP8 encode flags needed. May refresh the decoder
+ // and/or update the reference buffers.
+ Vp8FrameConfig NextFrameConfig(size_t stream_index,
+ uint32_t timestamp) override;
+
+ // New target bitrate, per temporal layer.
+ void OnRatesUpdated(size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) override;
+
+ Vp8EncoderConfig UpdateConfiguration(size_t stream_index) override;
+
+ // Callbacks methods on frame completion. OnEncodeDone() or OnFrameDropped()
+ // should be called once for each NextFrameConfig() call (using the RTP
+ // timestamp as ID), and the calls MUST be in the same order.
+ void OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) override;
+ void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) override;
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+
+ void OnRttUpdate(int64_t rtt_ms) override;
+
+ void OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) override;
+
+ private:
+ static constexpr size_t kNumReferenceBuffers = 3; // Last, golden, altref.
+ struct DependencyInfo {
+ DependencyInfo() = default;
+ DependencyInfo(absl::string_view indication_symbols,
+ Vp8FrameConfig frame_config)
+ : decode_target_indications(
+ webrtc_impl::StringToDecodeTargetIndications(indication_symbols)),
+ frame_config(frame_config) {}
+
+ absl::InlinedVector<DecodeTargetIndication, 10> decode_target_indications;
+ Vp8FrameConfig frame_config;
+ };
+ struct PendingFrame {
+ PendingFrame();
+ PendingFrame(uint32_t timestamp,
+ bool expired,
+ uint8_t updated_buffers_mask,
+ const DependencyInfo& dependency_info);
+ uint32_t timestamp = 0;
+ // Flag indicating if this frame has expired, ie it belongs to a previous
+ // iteration of the temporal pattern.
+ bool expired = false;
+ // Bitmask of Vp8BufferReference flags, indicating which buffers this frame
+ // updates.
+ uint8_t updated_buffer_mask = 0;
+ // The frame config returned by NextFrameConfig() for this frame.
+ DependencyInfo dependency_info;
+ };
+
+ static std::vector<DependencyInfo> GetDependencyInfo(size_t num_layers);
+ static std::bitset<kNumReferenceBuffers> DetermineStaticBuffers(
+ const std::vector<DependencyInfo>& temporal_pattern);
+ bool IsSyncFrame(const Vp8FrameConfig& config) const;
+ void ValidateReferences(Vp8FrameConfig::BufferFlags* flags,
+ Vp8FrameConfig::Vp8BufferReference ref) const;
+ void UpdateSearchOrder(Vp8FrameConfig* config);
+ size_t NumFramesSinceBufferRefresh(
+ Vp8FrameConfig::Vp8BufferReference ref) const;
+ void ResetNumFramesSinceBufferRefresh(Vp8FrameConfig::Vp8BufferReference ref);
+ void CullPendingFramesBefore(uint32_t timestamp);
+
+ const size_t num_layers_;
+ const std::vector<unsigned int> temporal_ids_;
+ const std::vector<DependencyInfo> temporal_pattern_;
+ // Per reference buffer flag indicating if it is static, meaning it is only
+ // updated by key-frames.
+ const std::bitset<kNumReferenceBuffers> is_static_buffer_;
+ FrameDependencyStructure GetTemplateStructure(int num_layers) const;
+
+ uint8_t pattern_idx_;
+ // Updated cumulative bitrates, per temporal layer.
+ absl::optional<std::vector<uint32_t>> new_bitrates_bps_;
+
+ // Status for each pending frame, in
+ std::deque<PendingFrame> pending_frames_;
+
+ // One counter per reference buffer, indicating number of frames since last
+ // refresh. For non-base-layer frames (ie golden, altref buffers), this is
+ // reset when the pattern loops.
+ std::array<size_t, kNumReferenceBuffers> frames_since_buffer_refresh_;
+
+ // Optional utility used to verify reference validity.
+ std::unique_ptr<TemporalLayersChecker> checker_;
+};
+
+class DefaultTemporalLayersChecker : public TemporalLayersChecker {
+ public:
+ explicit DefaultTemporalLayersChecker(int number_of_temporal_layers);
+ ~DefaultTemporalLayersChecker() override;
+
+ bool CheckTemporalConfig(bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config) override;
+
+ private:
+ struct BufferState {
+ BufferState()
+ : is_updated_this_cycle(false), is_keyframe(true), pattern_idx(0) {}
+
+ bool is_updated_this_cycle;
+ bool is_keyframe;
+ uint8_t pattern_idx;
+ };
+ const size_t num_layers_;
+ std::vector<unsigned int> temporal_ids_;
+ const std::vector<std::set<uint8_t>> temporal_dependencies_;
+ BufferState last_;
+ BufferState arf_;
+ BufferState golden_;
+ uint8_t pattern_idx_;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_DEFAULT_TEMPORAL_LAYERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
new file mode 100644
index 0000000000..ae027a9d8a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -0,0 +1,781 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+
+#include <cstdint>
+#include <memory>
+
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "vpx/vp8cx.h"
+
+// TODO(bugs.webrtc.org/10582): Test the behavior of UpdateConfiguration().
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using ::testing::Each;
+
+enum {
+ kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
+ kTemporalUpdateGoldenWithoutDependency =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltrefWithoutDependency =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateNoneNoRefAltRef =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateNoneNoRefGolden =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateNoneNoRefGoldenAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_REF_ARF |
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateLastRefAltRef =
+ VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateLastAndGoldenRefAltRef =
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+};
+
+using BufferFlags = Vp8FrameConfig::BufferFlags;
+using Vp8BufferReference = Vp8FrameConfig::Vp8BufferReference;
+
+constexpr uint8_t kNone = static_cast<uint8_t>(Vp8BufferReference::kNone);
+constexpr uint8_t kLast = static_cast<uint8_t>(Vp8BufferReference::kLast);
+constexpr uint8_t kGolden = static_cast<uint8_t>(Vp8BufferReference::kGolden);
+constexpr uint8_t kAltref = static_cast<uint8_t>(Vp8BufferReference::kAltref);
+constexpr uint8_t kAll = kLast | kGolden | kAltref;
+
+constexpr int ToVp8CodecFlags(uint8_t referenced_buffers,
+ uint8_t updated_buffers,
+ bool update_entropy) {
+ return (((referenced_buffers & kLast) == 0) ? VP8_EFLAG_NO_REF_LAST : 0) |
+ (((referenced_buffers & kGolden) == 0) ? VP8_EFLAG_NO_REF_GF : 0) |
+ (((referenced_buffers & kAltref) == 0) ? VP8_EFLAG_NO_REF_ARF : 0) |
+ (((updated_buffers & kLast) == 0) ? VP8_EFLAG_NO_UPD_LAST : 0) |
+ (((updated_buffers & kGolden) == 0) ? VP8_EFLAG_NO_UPD_GF : 0) |
+ (((updated_buffers & kAltref) == 0) ? VP8_EFLAG_NO_UPD_ARF : 0) |
+ (update_entropy ? 0 : VP8_EFLAG_NO_UPD_ENTROPY);
+}
+
+constexpr int kKeyFrameFlags = ToVp8CodecFlags(kNone, kAll, true);
+
+std::vector<uint32_t> GetTemporalLayerRates(int target_bitrate_kbps,
+ int framerate_fps,
+ int num_temporal_layers) {
+ VideoCodec codec;
+ codec.codecType = VideoCodecType::kVideoCodecVP8;
+ codec.numberOfSimulcastStreams = 1;
+ codec.maxBitrate = target_bitrate_kbps;
+ codec.maxFramerate = framerate_fps;
+ codec.simulcastStream[0].targetBitrate = target_bitrate_kbps;
+ codec.simulcastStream[0].maxBitrate = target_bitrate_kbps;
+ codec.simulcastStream[0].numberOfTemporalLayers = num_temporal_layers;
+ codec.simulcastStream[0].active = true;
+ SimulcastRateAllocator allocator(codec);
+ return allocator
+ .Allocate(
+ VideoBitrateAllocationParameters(target_bitrate_kbps, framerate_fps))
+ .GetTemporalLayerAllocation(0);
+}
+
+constexpr int kDefaultBitrateBps = 500;
+constexpr int kDefaultFramerate = 30;
+constexpr int kDefaultBytesPerFrame =
+ (kDefaultBitrateBps / 8) / kDefaultFramerate;
+constexpr int kDefaultQp = 2;
+} // namespace
+
+class TemporalLayersTest : public ::testing::Test {
+ public:
+ ~TemporalLayersTest() override = default;
+
+ CodecSpecificInfo* IgnoredCodecSpecificInfo() {
+ codec_specific_info_ = std::make_unique<CodecSpecificInfo>();
+ return codec_specific_info_.get();
+ }
+
+ private:
+ std::unique_ptr<CodecSpecificInfo> codec_specific_info_;
+};
+
+TEST_F(TemporalLayersTest, 2Layers) {
+ constexpr int kNumLayers = 2;
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ constexpr size_t kPatternSize = 4;
+ constexpr size_t kRepetitions = 4;
+
+ const int expected_flags[kPatternSize] = {
+ ToVp8CodecFlags(kLast, kLast, true),
+ ToVp8CodecFlags(kLast, kGolden, true),
+ ToVp8CodecFlags(kLast, kLast, true),
+ ToVp8CodecFlags(kLast | kGolden, kNone, false),
+ };
+ const int expected_temporal_idx[kPatternSize] = {0, 1, 0, 1};
+ const bool expected_layer_sync[kPatternSize] = {false, true, false, false};
+
+ uint32_t timestamp = 0;
+ for (size_t i = 0; i < kPatternSize * kRepetitions; ++i) {
+ const size_t ind = i % kPatternSize;
+ const bool is_keyframe = (i == 0);
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(is_keyframe ? kKeyFrameFlags : expected_flags[ind],
+ LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << i;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, is_keyframe,
+ kDefaultQp, &info);
+ EXPECT_TRUE(checker.CheckTemporalConfig(is_keyframe, tl_config));
+ EXPECT_EQ(expected_temporal_idx[ind], info.codecSpecific.VP8.temporalIdx);
+ EXPECT_EQ(expected_temporal_idx[ind], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[ind], tl_config.encoder_layer_id);
+ EXPECT_EQ(is_keyframe || expected_layer_sync[ind],
+ info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(expected_layer_sync[ind], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+}
+
+TEST_F(TemporalLayersTest, 3Layers) {
+ constexpr int kNumLayers = 3;
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ int expected_flags[16] = {
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefGoldenAltRef,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateGolden,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefGoldenAltRef,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateGolden,
+ kTemporalUpdateNoneNoRefAltRef,
+ };
+ int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
+ 0, 2, 1, 2, 0, 2, 1, 2};
+
+ bool expected_layer_sync[16] = {false, true, true, false, false, false,
+ false, false, false, true, true, false,
+ false, false, false, false};
+
+ unsigned int timestamp = 0;
+ for (int i = 0; i < 16; ++i) {
+ const bool is_keyframe = (i == 0);
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(is_keyframe ? kKeyFrameFlags : expected_flags[i],
+ LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << i;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, is_keyframe,
+ kDefaultQp, &info);
+ EXPECT_TRUE(checker.CheckTemporalConfig(is_keyframe, tl_config));
+ EXPECT_EQ(expected_temporal_idx[i], info.codecSpecific.VP8.temporalIdx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.encoder_layer_id);
+ EXPECT_EQ(is_keyframe || expected_layer_sync[i],
+ info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(expected_layer_sync[i], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+}
+
+TEST_F(TemporalLayersTest, Alternative3Layers) {
+ constexpr int kNumLayers = 3;
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ int expected_flags[8] = {kTemporalUpdateLast,
+ kTemporalUpdateAltrefWithoutDependency,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNone,
+ kTemporalUpdateLast,
+ kTemporalUpdateAltrefWithoutDependency,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNone};
+ int expected_temporal_idx[8] = {0, 2, 1, 2, 0, 2, 1, 2};
+
+ bool expected_layer_sync[8] = {false, true, true, false,
+ false, true, true, false};
+
+ unsigned int timestamp = 0;
+ for (int i = 0; i < 8; ++i) {
+ const bool is_keyframe = (i == 0);
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(is_keyframe ? kKeyFrameFlags : expected_flags[i],
+ LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << i;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, is_keyframe,
+ kDefaultQp, &info);
+ EXPECT_TRUE(checker.CheckTemporalConfig(is_keyframe, tl_config));
+ EXPECT_EQ(expected_temporal_idx[i], info.codecSpecific.VP8.temporalIdx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.encoder_layer_id);
+ EXPECT_EQ(is_keyframe || expected_layer_sync[i],
+ info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(expected_layer_sync[i], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+}
+
+TEST_F(TemporalLayersTest, SearchOrder) {
+ constexpr int kNumLayers = 3;
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1, 2 update last, golden, altref respectively.
+
+ // Start with a key-frame. tl_config flags can be ignored.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame. First one only references TL0. Updates altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kLast);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kNone);
+
+ // TL1 frame. Can only reference TL0. Updated golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kLast);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kNone);
+
+ // TL2 frame. Can reference all three buffers. Golden was the last to be
+ // updated, the next to last was altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kGolden);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kAltref);
+}
+
+TEST_F(TemporalLayersTest, SearchOrderWithDrop) {
+ constexpr int kNumLayers = 3;
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1, 2 update last, golden, altref respectively.
+
+ // Start with a key-frame. tl_config flags can be ignored.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame. First one only references TL0. Updates altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kLast);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kNone);
+
+ // Dropped TL1 frame. Can only reference TL0. Should have updated golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // TL2 frame. Can normally reference all three buffers, but golden has not
+ // been populated this cycle. Altref was last to be updated, before that last.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kAltref);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kLast);
+}
+
+TEST_F(TemporalLayersTest, DoesNotReferenceDroppedFrames) {
+ constexpr int kNumLayers = 3;
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1, 2 update last, golden, altref respectively.
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Start with a keyframe.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Dropped TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // Dropped TL1 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // TL2 frame. Can reference all three buffers, valid since golden and altref
+ // both contain the last keyframe.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+
+ // Restart of cycle!
+
+ // TL0 base layer frame, updating and referencing last.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame, updating altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL1 frame, updating golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame. Can still reference all buffer since they have been update this
+ // cycle.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+
+ // Restart of cycle!
+
+ // TL0 base layer frame, updating and referencing last.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Dropped TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // Dropped TL1 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // TL2 frame. This time golden and altref contain data from the previous cycle
+ // and cannot be referenced.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+}
+
+TEST_F(TemporalLayersTest, DoesNotReferenceUnlessGuaranteedToExist) {
+ constexpr int kNumLayers = 3;
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1 updates last, golden respectively. Altref is always last keyframe.
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Start with a keyframe.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Do a full cycle of the pattern.
+ for (int i = 0; i < 7; ++i) {
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+
+ // TL0 base layer frame, starting the cycle over.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Encoder has a hiccup and builds a queue, so frame encoding is delayed.
+ // TL1 frame, updating golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+
+ // TL2 frame, that should be referencing golden, but we can't be certain it's
+ // not going to be dropped, so that is not allowed.
+ tl_config = tl.NextFrameConfig(0, timestamp + 1);
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+
+ // TL0 base layer frame.
+ tl_config = tl.NextFrameConfig(0, timestamp + 2);
+
+ // The previous four enqueued frames finally get encoded, and the updated
+ // buffers are now OK to reference.
+ // Enqueued TL1 frame ready.
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ // Enqueued TL2 frame.
+ tl.OnEncodeDone(0, ++timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ // Enqueued TL0 frame.
+ tl.OnEncodeDone(0, ++timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame, all buffers are now in a known good state, OK to reference.
+ tl_config = tl.NextFrameConfig(0, ++timestamp + 1);
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+}
+
+TEST_F(TemporalLayersTest, DoesNotReferenceUnlessGuaranteedToExistLongDelay) {
+ constexpr int kNumLayers = 3;
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1 updates last, golden, altref respectively.
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Start with a keyframe.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Do a full cycle of the pattern.
+ for (int i = 0; i < 3; ++i) {
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+
+ // TL0 base layer frame, starting the cycle over.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Encoder has a hiccup and builds a queue, so frame encoding is delayed.
+ // Encoded, but delayed frames in TL 1, 2.
+ tl_config = tl.NextFrameConfig(0, timestamp + 1);
+ tl_config = tl.NextFrameConfig(0, timestamp + 2);
+
+ // Restart of the pattern!
+
+ // Encoded, but delayed frames in TL 2, 1.
+ tl_config = tl.NextFrameConfig(0, timestamp + 3);
+ tl_config = tl.NextFrameConfig(0, timestamp + 4);
+
+ // TL1 frame from last cycle is ready.
+ tl.OnEncodeDone(0, timestamp + 1, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ // TL2 frame from last cycle is ready.
+ tl.OnEncodeDone(0, timestamp + 2, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame, that should be referencing all buffers, but altref and golden
+ // haven not been updated this cycle. (Don't be fooled by the late frames from
+ // the last cycle!)
+ tl_config = tl.NextFrameConfig(0, timestamp + 5);
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+}
+
+TEST_F(TemporalLayersTest, KeyFrame) {
+ constexpr int kNumLayers = 3;
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ int expected_flags[8] = {
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNoneNoRefGoldenAltRef,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateGolden,
+ kTemporalUpdateNone,
+ };
+ int expected_temporal_idx[8] = {0, 2, 1, 2, 0, 2, 1, 2};
+ bool expected_layer_sync[8] = {true, true, true, false,
+ false, false, false, false};
+
+ uint32_t timestamp = 0;
+ for (int i = 0; i < 7; ++i) {
+ // Temporal pattern starts from 0 after key frame. Let the first `i` - 1
+ // frames be delta frames, and the `i`th one key frame.
+ for (int j = 1; j <= i; ++j) {
+ // Since last frame was always a keyframe and thus index 0 in the pattern,
+ // this loop starts at index 1.
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(expected_flags[j], LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << j;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(checker.CheckTemporalConfig(false, tl_config));
+ EXPECT_EQ(expected_temporal_idx[j], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[j], tl_config.encoder_layer_id);
+ EXPECT_EQ(expected_layer_sync[j], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ &info);
+ EXPECT_TRUE(info.codecSpecific.VP8.layerSync)
+ << "Key frame should be marked layer sync.";
+ EXPECT_EQ(0, info.codecSpecific.VP8.temporalIdx)
+ << "Key frame should always be packetized as layer 0";
+ EXPECT_EQ(0, info.generic_frame_info->temporal_id)
+ << "Key frame should always be packetized as layer 0";
+ EXPECT_THAT(info.generic_frame_info->decode_target_indications,
+ Each(DecodeTargetIndication::kSwitch))
+ << "Key frame is universal switch";
+ EXPECT_TRUE(checker.CheckTemporalConfig(true, tl_config));
+ }
+}
+
+TEST_F(TemporalLayersTest, SetsTlCountOnFirstConfigUpdate) {
+ // Create an instance and fetch config update without setting any rate.
+ constexpr int kNumLayers = 2;
+ DefaultTemporalLayers tl(kNumLayers);
+ Vp8EncoderConfig config = tl.UpdateConfiguration(0);
+
+ // Config should indicate correct number of temporal layers, but zero bitrate.
+ ASSERT_TRUE(config.temporal_layer_config.has_value());
+ EXPECT_EQ(config.temporal_layer_config->ts_number_layers,
+ uint32_t{kNumLayers});
+ std::array<uint32_t, Vp8EncoderConfig::TemporalLayerConfig::kMaxLayers>
+ kZeroRate = {};
+ EXPECT_EQ(config.temporal_layer_config->ts_target_bitrate, kZeroRate);
+
+ // On second call, no new update.
+ config = tl.UpdateConfiguration(0);
+ EXPECT_FALSE(config.temporal_layer_config.has_value());
+}
+
+class TemporalLayersReferenceTest : public TemporalLayersTest,
+ public ::testing::WithParamInterface<int> {
+ public:
+ TemporalLayersReferenceTest()
+ : timestamp_(1),
+ last_sync_timestamp_(timestamp_),
+ tl0_reference_(nullptr) {}
+ virtual ~TemporalLayersReferenceTest() {}
+
+ protected:
+ static const int kMaxPatternLength = 32;
+
+ struct BufferState {
+ BufferState() : BufferState(-1, 0, false) {}
+ BufferState(int temporal_idx, uint32_t timestamp, bool sync)
+ : temporal_idx(temporal_idx), timestamp(timestamp), sync(sync) {}
+ int temporal_idx;
+ uint32_t timestamp;
+ bool sync;
+ };
+
+ bool UpdateSyncRefState(const BufferFlags& flags, BufferState* buffer_state) {
+ if (flags & BufferFlags::kReference) {
+ if (buffer_state->temporal_idx == -1)
+ return true; // References key-frame.
+ if (buffer_state->temporal_idx == 0) {
+ // No more than one reference to TL0 frame.
+ EXPECT_EQ(nullptr, tl0_reference_);
+ tl0_reference_ = buffer_state;
+ return true;
+ }
+ return false; // References higher layer.
+ }
+ return true; // No reference, does not affect sync frame status.
+ }
+
+ void ValidateReference(const BufferFlags& flags,
+ const BufferState& buffer_state,
+ int temporal_layer) {
+ if (flags & BufferFlags::kReference) {
+ if (temporal_layer > 0 && buffer_state.timestamp > 0) {
+ // Check that high layer reference does not go past last sync frame.
+ EXPECT_GE(buffer_state.timestamp, last_sync_timestamp_);
+ }
+ // No reference to buffer in higher layer.
+ EXPECT_LE(buffer_state.temporal_idx, temporal_layer);
+ }
+ }
+
+ uint32_t timestamp_ = 1;
+ uint32_t last_sync_timestamp_ = timestamp_;
+ BufferState* tl0_reference_;
+
+ BufferState last_state;
+ BufferState golden_state;
+ BufferState altref_state;
+};
+
+INSTANTIATE_TEST_SUITE_P(DefaultTemporalLayersTest,
+ TemporalLayersReferenceTest,
+ ::testing::Range(1, kMaxTemporalStreams + 1));
+
+TEST_P(TemporalLayersReferenceTest, ValidFrameConfigs) {
+ const int num_layers = GetParam();
+ DefaultTemporalLayers tl(num_layers);
+ tl.OnRatesUpdated(
+ 0, GetTemporalLayerRates(kDefaultBytesPerFrame, kDefaultFramerate, 1),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Run through the pattern and store the frame dependencies, plus keep track
+ // of the buffer state; which buffers references which temporal layers (if
+ // (any). If a given buffer is never updated, it is legal to reference it
+ // even for sync frames. In order to be general, don't assume TL0 always
+ // updates `last`.
+ std::vector<Vp8FrameConfig> tl_configs(kMaxPatternLength);
+ for (int i = 0; i < kMaxPatternLength; ++i) {
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp_);
+ tl.OnEncodeDone(0, timestamp_, kDefaultBytesPerFrame, i == 0, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ ++timestamp_;
+ EXPECT_FALSE(tl_config.drop_frame);
+ tl_configs.push_back(tl_config);
+ int temporal_idx = tl_config.encoder_layer_id;
+ // For the default layers, always keep encoder and rtp layers in sync.
+ EXPECT_EQ(tl_config.packetizer_temporal_idx, temporal_idx);
+
+ // Determine if this frame is in a higher layer but references only TL0
+ // or untouched buffers, if so verify it is marked as a layer sync.
+ bool is_sync_frame = true;
+ tl0_reference_ = nullptr;
+ if (temporal_idx <= 0) {
+ is_sync_frame = false; // TL0 by definition not a sync frame.
+ } else if (!UpdateSyncRefState(tl_config.last_buffer_flags, &last_state)) {
+ is_sync_frame = false;
+ } else if (!UpdateSyncRefState(tl_config.golden_buffer_flags,
+ &golden_state)) {
+ is_sync_frame = false;
+ } else if (!UpdateSyncRefState(tl_config.arf_buffer_flags, &altref_state)) {
+ is_sync_frame = false;
+ }
+ if (is_sync_frame) {
+ // Cache timestamp for last found sync frame, so that we can verify no
+ // references back past this frame.
+ ASSERT_TRUE(tl0_reference_);
+ last_sync_timestamp_ = tl0_reference_->timestamp;
+ }
+ EXPECT_EQ(tl_config.layer_sync, is_sync_frame);
+
+ // Validate no reference from lower to high temporal layer, or backwards
+ // past last reference frame.
+ ValidateReference(tl_config.last_buffer_flags, last_state, temporal_idx);
+ ValidateReference(tl_config.golden_buffer_flags, golden_state,
+ temporal_idx);
+ ValidateReference(tl_config.arf_buffer_flags, altref_state, temporal_idx);
+
+ // Update the current layer state.
+ BufferState state = {temporal_idx, timestamp_, is_sync_frame};
+ if (tl_config.last_buffer_flags & BufferFlags::kUpdate)
+ last_state = state;
+ if (tl_config.golden_buffer_flags & BufferFlags::kUpdate)
+ golden_state = state;
+ if (tl_config.arf_buffer_flags & BufferFlags::kUpdate)
+ altref_state = state;
+ }
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h
new file mode 100644
index 0000000000..3d1671a676
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_TEMPORAL_LAYERS_CHECKER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_TEMPORAL_LAYERS_CHECKER_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+
+namespace webrtc {
+
+// Interface for a class that verifies correctness of temporal layer
+// configurations (dependencies, sync flag, etc).
+// Intended to be used in tests as well as with real apps in debug mode.
+class TemporalLayersChecker {
+ public:
+ explicit TemporalLayersChecker(int num_temporal_layers);
+ virtual ~TemporalLayersChecker() {}
+
+ virtual bool CheckTemporalConfig(bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config);
+
+ static std::unique_ptr<TemporalLayersChecker> CreateTemporalLayersChecker(
+ Vp8TemporalLayersType type,
+ int num_temporal_layers);
+
+ private:
+ struct BufferState {
+ BufferState() : is_keyframe(true), temporal_layer(0), sequence_number(0) {}
+ bool is_keyframe;
+ uint8_t temporal_layer;
+ uint32_t sequence_number;
+ };
+ bool CheckAndUpdateBufferState(BufferState* state,
+ bool* need_sync,
+ bool frame_is_keyframe,
+ uint8_t temporal_layer,
+ Vp8FrameConfig::BufferFlags flags,
+ uint32_t sequence_number,
+ uint32_t* lowest_sequence_referenced);
+ BufferState last_;
+ BufferState arf_;
+ BufferState golden_;
+ int num_temporal_layers_;
+ uint32_t sequence_number_;
+ uint32_t last_sync_sequence_number_;
+ uint32_t last_tl0_sequence_number_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_TEMPORAL_LAYERS_CHECKER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
new file mode 100644
index 0000000000..2fc647874f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_frame_buffer_controller.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// TODO(brandtr): Move these interfaces to the api/ folder.
+class VP8Encoder {
+ public:
+ struct Settings {
+ // Allows for overriding the Vp8FrameBufferController used by the encoder.
+ // If unset, a default Vp8FrameBufferController will be instantiated
+ // internally.
+ std::unique_ptr<Vp8FrameBufferControllerFactory>
+ frame_buffer_controller_factory = nullptr;
+
+ // Allows for overriding the resolution/bitrate limits exposed through
+ // VideoEncoder::GetEncoderInfo(). No override is done if empty.
+ std::vector<VideoEncoder::ResolutionBitrateLimits>
+ resolution_bitrate_limits = {};
+ };
+
+ static std::unique_ptr<VideoEncoder> Create();
+ static std::unique_ptr<VideoEncoder> Create(Settings settings);
+};
+
+class VP8Decoder {
+ public:
+ static std::unique_ptr<VideoDecoder> Create();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h
new file mode 100644
index 0000000000..1fab5f45a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+
+namespace webrtc {
+
+struct RTPVideoHeaderVP8 {
+ void InitRTPVideoHeaderVP8() {
+ nonReference = false;
+ pictureId = kNoPictureId;
+ tl0PicIdx = kNoTl0PicIdx;
+ temporalIdx = kNoTemporalIdx;
+ layerSync = false;
+ keyIdx = kNoKeyIdx;
+ partitionId = 0;
+ beginningOfPartition = false;
+ }
+
+ bool nonReference; // Frame is discardable.
+ int16_t pictureId; // Picture ID index, 15 bits;
+ // kNoPictureId if PictureID does not exist.
+ int16_t tl0PicIdx; // TL0PIC_IDX, 8 bits;
+ // kNoTl0PicIdx means no value provided.
+ uint8_t temporalIdx; // Temporal layer index, or kNoTemporalIdx.
+ bool layerSync; // This frame is a layer sync frame.
+ // Disabled if temporalIdx == kNoTemporalIdx.
+ int keyIdx; // 5 bits; kNoKeyIdx means not used.
+ int partitionId; // VP8 partition ID
+ bool beginningOfPartition; // True if this packet is the first
+ // in a VP8 partition. Otherwise false
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
new file mode 100644
index 0000000000..5a3032af0c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+#include "libyuv/include/libyuv/convert.h"
+#include "vpx/vp8.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+
+namespace webrtc {
+namespace {
+constexpr int kVp8ErrorPropagationTh = 30;
+// vpx_decoder.h documentation indicates decode deadline is time in us, with
+// "Set to zero for unlimited.", but actual implementation requires this to be
+// a mode with 0 meaning allow delay and 1 not allowing it.
+constexpr long kDecodeDeadlineRealtime = 1; // NOLINT
+
+const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Config-Arm";
+const char kVp8PostProcFieldTrial[] = "WebRTC-VP8-Postproc-Config";
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+constexpr bool kIsArm = true;
+#else
+constexpr bool kIsArm = false;
+#endif
+
+absl::optional<LibvpxVp8Decoder::DeblockParams> DefaultDeblockParams() {
+ return LibvpxVp8Decoder::DeblockParams(/*max_level=*/8,
+ /*degrade_qp=*/60,
+ /*min_qp=*/30);
+}
+
+absl::optional<LibvpxVp8Decoder::DeblockParams>
+GetPostProcParamsFromFieldTrialGroup() {
+ std::string group = webrtc::field_trial::FindFullName(
+ kIsArm ? kVp8PostProcArmFieldTrial : kVp8PostProcFieldTrial);
+ if (group.empty()) {
+ return DefaultDeblockParams();
+ }
+
+ LibvpxVp8Decoder::DeblockParams params;
+ if (sscanf(group.c_str(), "Enabled-%d,%d,%d", &params.max_level,
+ &params.min_qp, &params.degrade_qp) != 3) {
+ return DefaultDeblockParams();
+ }
+
+ if (params.max_level < 0 || params.max_level > 16) {
+ return DefaultDeblockParams();
+ }
+
+ if (params.min_qp < 0 || params.degrade_qp <= params.min_qp) {
+ return DefaultDeblockParams();
+ }
+
+ return params;
+}
+
+} // namespace
+
+std::unique_ptr<VideoDecoder> VP8Decoder::Create() {
+ return std::make_unique<LibvpxVp8Decoder>();
+}
+
+class LibvpxVp8Decoder::QpSmoother {
+ public:
+ QpSmoother() : last_sample_ms_(rtc::TimeMillis()), smoother_(kAlpha) {}
+
+ int GetAvg() const {
+ float value = smoother_.filtered();
+ return (value == rtc::ExpFilter::kValueUndefined) ? 0
+ : static_cast<int>(value);
+ }
+
+ void Add(float sample) {
+ int64_t now_ms = rtc::TimeMillis();
+ smoother_.Apply(static_cast<float>(now_ms - last_sample_ms_), sample);
+ last_sample_ms_ = now_ms;
+ }
+
+ void Reset() { smoother_.Reset(kAlpha); }
+
+ private:
+ const float kAlpha = 0.95f;
+ int64_t last_sample_ms_;
+ rtc::ExpFilter smoother_;
+};
+
+LibvpxVp8Decoder::LibvpxVp8Decoder()
+ : use_postproc_(
+ kIsArm ? webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial)
+ : true),
+ buffer_pool_(false, 300 /* max_number_of_buffers*/),
+ decode_complete_callback_(NULL),
+ inited_(false),
+ decoder_(NULL),
+ propagation_cnt_(-1),
+ last_frame_width_(0),
+ last_frame_height_(0),
+ key_frame_required_(true),
+ deblock_params_(use_postproc_ ? GetPostProcParamsFromFieldTrialGroup()
+ : absl::nullopt),
+ qp_smoother_(use_postproc_ ? new QpSmoother() : nullptr),
+ preferred_output_format_(field_trial::IsEnabled("WebRTC-NV12Decode")
+ ? VideoFrameBuffer::Type::kNV12
+ : VideoFrameBuffer::Type::kI420) {}
+
+LibvpxVp8Decoder::~LibvpxVp8Decoder() {
+ inited_ = true; // in order to do the actual release
+ Release();
+}
+
+bool LibvpxVp8Decoder::Configure(const Settings& settings) {
+ if (Release() < 0) {
+ return false;
+ }
+ if (decoder_ == NULL) {
+ decoder_ = new vpx_codec_ctx_t;
+ memset(decoder_, 0, sizeof(*decoder_));
+ }
+ vpx_codec_dec_cfg_t cfg;
+ // Setting number of threads to a constant value (1)
+ cfg.threads = 1;
+ cfg.h = cfg.w = 0; // set after decode
+
+ vpx_codec_flags_t flags = use_postproc_ ? VPX_CODEC_USE_POSTPROC : 0;
+
+ if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
+ delete decoder_;
+ decoder_ = nullptr;
+ return false;
+ }
+
+ propagation_cnt_ = -1;
+ inited_ = true;
+
+ // Always start with a complete key frame.
+ key_frame_required_ = true;
+ if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
+ if (!buffer_pool_.Resize(*buffer_pool_size)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (decode_complete_callback_ == NULL) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (input_image.data() == NULL && input_image.size() > 0) {
+ // Reset to avoid requesting key frames too often.
+ if (propagation_cnt_ > 0)
+ propagation_cnt_ = 0;
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+// Post process configurations.
+ if (use_postproc_) {
+ vp8_postproc_cfg_t ppcfg;
+ // MFQE enabled to reduce key frame popping.
+ ppcfg.post_proc_flag = VP8_MFQE;
+
+ if (kIsArm) {
+ RTC_DCHECK(deblock_params_.has_value());
+ }
+ if (deblock_params_.has_value()) {
+ // For low resolutions, use stronger deblocking filter.
+ int last_width_x_height = last_frame_width_ * last_frame_height_;
+ if (last_width_x_height > 0 && last_width_x_height <= 320 * 240) {
+ // Enable the deblock and demacroblocker based on qp thresholds.
+ RTC_DCHECK(qp_smoother_);
+ int qp = qp_smoother_->GetAvg();
+ if (qp > deblock_params_->min_qp) {
+ int level = deblock_params_->max_level;
+ if (qp < deblock_params_->degrade_qp) {
+ // Use lower level.
+ level = deblock_params_->max_level *
+ (qp - deblock_params_->min_qp) /
+ (deblock_params_->degrade_qp - deblock_params_->min_qp);
+ }
+ // Deblocking level only affects VP8_DEMACROBLOCK.
+ ppcfg.deblocking_level = std::max(level, 1);
+ ppcfg.post_proc_flag |= VP8_DEBLOCK | VP8_DEMACROBLOCK;
+ }
+ }
+ } else {
+ // Non-arm with no explicit deblock params set.
+ ppcfg.post_proc_flag |= VP8_DEBLOCK;
+ // For VGA resolutions and lower, enable the demacroblocker postproc.
+ if (last_frame_width_ * last_frame_height_ <= 640 * 360) {
+ ppcfg.post_proc_flag |= VP8_DEMACROBLOCK;
+ }
+ // Strength of deblocking filter. Valid range:[0,16]
+ ppcfg.deblocking_level = 3;
+ }
+
+ vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
+ }
+
+ // Always start with a complete key frame.
+ if (key_frame_required_) {
+ if (input_image._frameType != VideoFrameType::kVideoFrameKey)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ key_frame_required_ = false;
+ }
+ // Restrict error propagation using key frame requests.
+ // Reset on a key frame refresh.
+ if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
+ propagation_cnt_ = -1;
+ // Start count on first loss.
+ } else if (missing_frames && propagation_cnt_ == -1) {
+ propagation_cnt_ = 0;
+ }
+ if (propagation_cnt_ >= 0) {
+ propagation_cnt_++;
+ }
+
+ vpx_codec_iter_t iter = NULL;
+ vpx_image_t* img;
+ int ret;
+
+ // Check for missing frames.
+ if (missing_frames) {
+ // Call decoder with zero data length to signal missing frames.
+ if (vpx_codec_decode(decoder_, NULL, 0, 0, kDecodeDeadlineRealtime)) {
+ // Reset to avoid requesting key frames too often.
+ if (propagation_cnt_ > 0)
+ propagation_cnt_ = 0;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ img = vpx_codec_get_frame(decoder_, &iter);
+ iter = NULL;
+ }
+
+ const uint8_t* buffer = input_image.data();
+ if (input_image.size() == 0) {
+ buffer = NULL; // Triggers full frame concealment.
+ }
+ if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
+ kDecodeDeadlineRealtime)) {
+ // Reset to avoid requesting key frames too often.
+ if (propagation_cnt_ > 0) {
+ propagation_cnt_ = 0;
+ }
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ img = vpx_codec_get_frame(decoder_, &iter);
+ int qp;
+ vpx_codec_err_t vpx_ret =
+ vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
+ RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
+ ret = ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
+ if (ret != 0) {
+ // Reset to avoid requesting key frames too often.
+ if (ret < 0 && propagation_cnt_ > 0)
+ propagation_cnt_ = 0;
+ return ret;
+ }
+ // Check Vs. threshold
+ if (propagation_cnt_ > kVp8ErrorPropagationTh) {
+ // Reset to avoid requesting key frames too often.
+ propagation_cnt_ = 0;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp8Decoder::ReturnFrame(
+ const vpx_image_t* img,
+ uint32_t timestamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space) {
+ if (img == NULL) {
+ // Decoder OK and NULL image => No show frame
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+ if (qp_smoother_) {
+ if (last_frame_width_ != static_cast<int>(img->d_w) ||
+ last_frame_height_ != static_cast<int>(img->d_h)) {
+ qp_smoother_->Reset();
+ }
+ qp_smoother_->Add(qp);
+ }
+ last_frame_width_ = img->d_w;
+ last_frame_height_ = img->d_h;
+ // Allocate memory for decoded image.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer;
+
+ if (preferred_output_format_ == VideoFrameBuffer::Type::kNV12) {
+ // Convert instead of making a copy.
+ // Note: libvpx doesn't support creating NV12 image directly.
+ // Due to the bitstream structure such a change would just hide the
+ // conversion operation inside the decode call.
+ rtc::scoped_refptr<NV12Buffer> nv12_buffer =
+ buffer_pool_.CreateNV12Buffer(img->d_w, img->d_h);
+ buffer = nv12_buffer;
+ if (nv12_buffer.get()) {
+ libyuv::I420ToNV12(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ nv12_buffer->MutableDataY(), nv12_buffer->StrideY(),
+ nv12_buffer->MutableDataUV(), nv12_buffer->StrideUV(),
+ img->d_w, img->d_h);
+ }
+ } else {
+ rtc::scoped_refptr<I420Buffer> i420_buffer =
+ buffer_pool_.CreateI420Buffer(img->d_w, img->d_h);
+ buffer = i420_buffer;
+ if (i420_buffer.get()) {
+ libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ i420_buffer->MutableDataY(), i420_buffer->StrideY(),
+ i420_buffer->MutableDataU(), i420_buffer->StrideU(),
+ i420_buffer->MutableDataV(), i420_buffer->StrideV(),
+ img->d_w, img->d_h);
+ }
+ }
+
+ if (!buffer.get()) {
+ // Pool has too many pending frames.
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Video.LibvpxVp8Decoder.TooManyPendingFrames",
+ 1);
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ VideoFrame decoded_image = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(timestamp)
+ .set_color_space(explicit_color_space)
+ .build();
+ decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp8Decoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decode_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp8Decoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ if (decoder_ != NULL) {
+ if (inited_) {
+ if (vpx_codec_destroy(decoder_)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ delete decoder_;
+ decoder_ = NULL;
+ }
+ buffer_pool_.Release();
+ inited_ = false;
+ return ret_val;
+}
+
+VideoDecoder::DecoderInfo LibvpxVp8Decoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = "libvpx";
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* LibvpxVp8Decoder::ImplementationName() const {
+ return "libvpx";
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
new file mode 100644
index 0000000000..4d1e20d246
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_decoder.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+
+namespace webrtc {
+
+class LibvpxVp8Decoder : public VideoDecoder {
+ public:
+ LibvpxVp8Decoder();
+ ~LibvpxVp8Decoder() override;
+
+ bool Configure(const Settings& settings) override;
+ int Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) override;
+
+ int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
+ int Release() override;
+
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ struct DeblockParams {
+ DeblockParams() : max_level(6), degrade_qp(1), min_qp(0) {}
+ DeblockParams(int max_level, int degrade_qp, int min_qp)
+ : max_level(max_level), degrade_qp(degrade_qp), min_qp(min_qp) {}
+ int max_level; // Deblocking strength: [0, 16].
+ int degrade_qp; // If QP value is below, start lowering `max_level`.
+ int min_qp; // If QP value is below, turn off deblocking.
+ };
+
+ private:
+ class QpSmoother;
+ int ReturnFrame(const vpx_image_t* img,
+ uint32_t timeStamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space);
+ const bool use_postproc_;
+
+ VideoFrameBufferPool buffer_pool_;
+ DecodedImageCallback* decode_complete_callback_;
+ bool inited_;
+ vpx_codec_ctx_t* decoder_;
+ int propagation_cnt_;
+ int last_frame_width_;
+ int last_frame_height_;
+ bool key_frame_required_;
+ const absl::optional<DeblockParams> deblock_params_;
+ const std::unique_ptr<QpSmoother> qp_smoother_;
+
+ // Decoder should produce this format if possible.
+ const VideoFrameBuffer::Type preferred_output_format_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
new file mode 100644
index 0000000000..61732443f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -0,0 +1,1428 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "modules/video_coding/utility/simulcast_utility.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/field_trial_units.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/field_trial.h"
+#include "libyuv/include/libyuv/scale.h"
+#include "vpx/vp8cx.h"
+
+namespace webrtc {
+namespace {
+#if defined(WEBRTC_IOS)
+constexpr char kVP8IosMaxNumberOfThreadFieldTrial[] =
+ "WebRTC-VP8IosMaxNumberOfThread";
+constexpr char kVP8IosMaxNumberOfThreadFieldTrialParameter[] = "max_thread";
+#endif
+
+constexpr char kVp8ForcePartitionResilience[] =
+ "WebRTC-VP8-ForcePartitionResilience";
+
+// QP is obtained from VP8-bitstream for HW, so the QP corresponds to the
+// bitstream range of [0, 127] and not the user-level range of [0,63].
+constexpr int kLowVp8QpThreshold = 29;
+constexpr int kHighVp8QpThreshold = 95;
+
+constexpr int kTokenPartitions = VP8_ONE_TOKENPARTITION;
+constexpr uint32_t kVp832ByteAlign = 32u;
+
+constexpr int kRtpTicksPerSecond = 90000;
+constexpr int kRtpTicksPerMs = kRtpTicksPerSecond / 1000;
+
+// VP8 denoiser states.
+enum denoiserState : uint32_t {
+ kDenoiserOff,
+ kDenoiserOnYOnly,
+ kDenoiserOnYUV,
+ kDenoiserOnYUVAggressive,
+ // Adaptive mode defaults to kDenoiserOnYUV on key frame, but may switch
+ // to kDenoiserOnYUVAggressive based on a computed noise metric.
+ kDenoiserOnAdaptive
+};
+
+// Greatest common divisior
+int GCD(int a, int b) {
+ int c = a % b;
+ while (c != 0) {
+ a = b;
+ b = c;
+ c = a % b;
+ }
+ return b;
+}
+
+static_assert(Vp8EncoderConfig::TemporalLayerConfig::kMaxPeriodicity ==
+ VPX_TS_MAX_PERIODICITY,
+ "Vp8EncoderConfig::kMaxPeriodicity must be kept in sync with the "
+ "constant in libvpx.");
+static_assert(Vp8EncoderConfig::TemporalLayerConfig::kMaxLayers ==
+ VPX_TS_MAX_LAYERS,
+ "Vp8EncoderConfig::kMaxLayers must be kept in sync with the "
+ "constant in libvpx.");
+
+// Allow a newer value to override a current value only if the new value
+// is set.
+template <typename T>
+bool MaybeSetNewValue(const absl::optional<T>& new_value,
+ absl::optional<T>* base_value) {
+ if (new_value.has_value() && new_value != *base_value) {
+ *base_value = new_value;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// Adds configuration from `new_config` to `base_config`. Both configs consist
+// of optionals, and only optionals which are set in `new_config` can have
+// an effect. (That is, set values in `base_config` cannot be unset.)
+// Returns `true` iff any changes were made to `base_config`.
+bool MaybeExtendVp8EncoderConfig(const Vp8EncoderConfig& new_config,
+ Vp8EncoderConfig* base_config) {
+ bool changes_made = false;
+ changes_made |= MaybeSetNewValue(new_config.temporal_layer_config,
+ &base_config->temporal_layer_config);
+ changes_made |= MaybeSetNewValue(new_config.rc_target_bitrate,
+ &base_config->rc_target_bitrate);
+ changes_made |= MaybeSetNewValue(new_config.rc_max_quantizer,
+ &base_config->rc_max_quantizer);
+ changes_made |= MaybeSetNewValue(new_config.g_error_resilient,
+ &base_config->g_error_resilient);
+ return changes_made;
+}
+
+void ApplyVp8EncoderConfigToVpxConfig(const Vp8EncoderConfig& encoder_config,
+ vpx_codec_enc_cfg_t* vpx_config) {
+ if (encoder_config.temporal_layer_config.has_value()) {
+ const Vp8EncoderConfig::TemporalLayerConfig& ts_config =
+ encoder_config.temporal_layer_config.value();
+ vpx_config->ts_number_layers = ts_config.ts_number_layers;
+ std::copy(ts_config.ts_target_bitrate.begin(),
+ ts_config.ts_target_bitrate.end(),
+ std::begin(vpx_config->ts_target_bitrate));
+ std::copy(ts_config.ts_rate_decimator.begin(),
+ ts_config.ts_rate_decimator.end(),
+ std::begin(vpx_config->ts_rate_decimator));
+ vpx_config->ts_periodicity = ts_config.ts_periodicity;
+ std::copy(ts_config.ts_layer_id.begin(), ts_config.ts_layer_id.end(),
+ std::begin(vpx_config->ts_layer_id));
+ } else {
+ vpx_config->ts_number_layers = 1;
+ vpx_config->ts_rate_decimator[0] = 1;
+ vpx_config->ts_periodicity = 1;
+ vpx_config->ts_layer_id[0] = 0;
+ }
+
+ if (encoder_config.rc_target_bitrate.has_value()) {
+ vpx_config->rc_target_bitrate = encoder_config.rc_target_bitrate.value();
+ }
+
+ if (encoder_config.rc_max_quantizer.has_value()) {
+ vpx_config->rc_max_quantizer = encoder_config.rc_max_quantizer.value();
+ }
+
+ if (encoder_config.g_error_resilient.has_value()) {
+ vpx_config->g_error_resilient = encoder_config.g_error_resilient.value();
+ }
+}
+
+bool IsCompatibleVideoFrameBufferType(VideoFrameBuffer::Type left,
+ VideoFrameBuffer::Type right) {
+ if (left == VideoFrameBuffer::Type::kI420 ||
+ left == VideoFrameBuffer::Type::kI420A) {
+ // LibvpxVp8Encoder does not care about the alpha channel, I420A and I420
+ // are considered compatible.
+ return right == VideoFrameBuffer::Type::kI420 ||
+ right == VideoFrameBuffer::Type::kI420A;
+ }
+ return left == right;
+}
+
+void SetRawImagePlanes(vpx_image_t* raw_image, VideoFrameBuffer* buffer) {
+ switch (buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A: {
+ const I420BufferInterface* i420_buffer = buffer->GetI420();
+ RTC_DCHECK(i420_buffer);
+ raw_image->planes[VPX_PLANE_Y] =
+ const_cast<uint8_t*>(i420_buffer->DataY());
+ raw_image->planes[VPX_PLANE_U] =
+ const_cast<uint8_t*>(i420_buffer->DataU());
+ raw_image->planes[VPX_PLANE_V] =
+ const_cast<uint8_t*>(i420_buffer->DataV());
+ raw_image->stride[VPX_PLANE_Y] = i420_buffer->StrideY();
+ raw_image->stride[VPX_PLANE_U] = i420_buffer->StrideU();
+ raw_image->stride[VPX_PLANE_V] = i420_buffer->StrideV();
+ break;
+ }
+ case VideoFrameBuffer::Type::kNV12: {
+ const NV12BufferInterface* nv12_buffer = buffer->GetNV12();
+ RTC_DCHECK(nv12_buffer);
+ raw_image->planes[VPX_PLANE_Y] =
+ const_cast<uint8_t*>(nv12_buffer->DataY());
+ raw_image->planes[VPX_PLANE_U] =
+ const_cast<uint8_t*>(nv12_buffer->DataUV());
+ raw_image->planes[VPX_PLANE_V] = raw_image->planes[VPX_PLANE_U] + 1;
+ raw_image->stride[VPX_PLANE_Y] = nv12_buffer->StrideY();
+ raw_image->stride[VPX_PLANE_U] = nv12_buffer->StrideUV();
+ raw_image->stride[VPX_PLANE_V] = nv12_buffer->StrideUV();
+ break;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+} // namespace
+
+std::unique_ptr<VideoEncoder> VP8Encoder::Create() {
+ return std::make_unique<LibvpxVp8Encoder>(LibvpxInterface::Create(),
+ VP8Encoder::Settings());
+}
+
+std::unique_ptr<VideoEncoder> VP8Encoder::Create(
+ VP8Encoder::Settings settings) {
+ return std::make_unique<LibvpxVp8Encoder>(LibvpxInterface::Create(),
+ std::move(settings));
+}
+
+vpx_enc_frame_flags_t LibvpxVp8Encoder::EncodeFlags(
+ const Vp8FrameConfig& references) {
+ RTC_DCHECK(!references.drop_frame);
+
+ vpx_enc_frame_flags_t flags = 0;
+
+ if ((references.last_buffer_flags &
+ Vp8FrameConfig::BufferFlags::kReference) == 0)
+ flags |= VP8_EFLAG_NO_REF_LAST;
+ if ((references.last_buffer_flags & Vp8FrameConfig::BufferFlags::kUpdate) ==
+ 0)
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ if ((references.golden_buffer_flags &
+ Vp8FrameConfig::BufferFlags::kReference) == 0)
+ flags |= VP8_EFLAG_NO_REF_GF;
+ if ((references.golden_buffer_flags & Vp8FrameConfig::BufferFlags::kUpdate) ==
+ 0)
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ if ((references.arf_buffer_flags & Vp8FrameConfig::BufferFlags::kReference) ==
+ 0)
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ if ((references.arf_buffer_flags & Vp8FrameConfig::BufferFlags::kUpdate) == 0)
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ if (references.freeze_entropy)
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+
+ return flags;
+}
+
+LibvpxVp8Encoder::LibvpxVp8Encoder(std::unique_ptr<LibvpxInterface> interface,
+ VP8Encoder::Settings settings)
+ : libvpx_(std::move(interface)),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
+ frame_buffer_controller_factory_(
+ std::move(settings.frame_buffer_controller_factory)),
+ resolution_bitrate_limits_(std::move(settings.resolution_bitrate_limits)),
+ key_frame_request_(kMaxSimulcastStreams, false),
+ variable_framerate_experiment_(ParseVariableFramerateConfig(
+ "WebRTC-VP8VariableFramerateScreenshare")),
+ framerate_controller_(variable_framerate_experiment_.framerate_limit) {
+ // TODO(eladalon/ilnik): These reservations might be wasting memory.
+ // InitEncode() is resizing to the actual size, which might be smaller.
+ raw_images_.reserve(kMaxSimulcastStreams);
+ encoded_images_.reserve(kMaxSimulcastStreams);
+ send_stream_.reserve(kMaxSimulcastStreams);
+ cpu_speed_.assign(kMaxSimulcastStreams, cpu_speed_default_);
+ encoders_.reserve(kMaxSimulcastStreams);
+ vpx_configs_.reserve(kMaxSimulcastStreams);
+ config_overrides_.reserve(kMaxSimulcastStreams);
+ downsampling_factors_.reserve(kMaxSimulcastStreams);
+}
+
+LibvpxVp8Encoder::~LibvpxVp8Encoder() {
+ Release();
+}
+
+int LibvpxVp8Encoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ encoded_images_.clear();
+
+ if (inited_) {
+ for (auto it = encoders_.rbegin(); it != encoders_.rend(); ++it) {
+ if (libvpx_->codec_destroy(&*it)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ }
+ encoders_.clear();
+
+ vpx_configs_.clear();
+ config_overrides_.clear();
+ send_stream_.clear();
+ cpu_speed_.clear();
+
+ for (auto it = raw_images_.rbegin(); it != raw_images_.rend(); ++it) {
+ libvpx_->img_free(&*it);
+ }
+ raw_images_.clear();
+
+ frame_buffer_controller_.reset();
+ inited_ = false;
+ return ret_val;
+}
+
+void LibvpxVp8Encoder::SetRates(const RateControlParameters& parameters) {
+ if (!inited_) {
+ RTC_LOG(LS_WARNING) << "SetRates() while not initialize";
+ return;
+ }
+
+ if (encoders_[0].err) {
+ RTC_LOG(LS_WARNING) << "Encoder in error state.";
+ return;
+ }
+
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Unsupported framerate (must be >= 1.0): "
+ << parameters.framerate_fps;
+ return;
+ }
+
+ if (parameters.bitrate.get_sum_bps() == 0) {
+ // Encoder paused, turn off all encoding.
+ const int num_streams = static_cast<size_t>(encoders_.size());
+ for (int i = 0; i < num_streams; ++i)
+ SetStreamState(false, i);
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+
+ if (encoders_.size() > 1) {
+ // If we have more than 1 stream, reduce the qp_max for the low resolution
+ // stream if frame rate is not too low. The trade-off with lower qp_max is
+ // possibly more dropped frames, so we only do this if the frame rate is
+ // above some threshold (base temporal layer is down to 1/4 for 3 layers).
+ // We may want to condition this on bitrate later.
+ if (rate_control_settings_.Vp8BoostBaseLayerQuality() &&
+ parameters.framerate_fps > 20.0) {
+ vpx_configs_[encoders_.size() - 1].rc_max_quantizer = 45;
+ } else {
+ // Go back to default value set in InitEncode.
+ vpx_configs_[encoders_.size() - 1].rc_max_quantizer = qp_max_;
+ }
+ }
+
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ const size_t stream_idx = encoders_.size() - 1 - i;
+
+ unsigned int target_bitrate_kbps =
+ parameters.bitrate.GetSpatialLayerSum(stream_idx) / 1000;
+
+ bool send_stream = target_bitrate_kbps > 0;
+ if (send_stream || encoders_.size() > 1)
+ SetStreamState(send_stream, stream_idx);
+
+ vpx_configs_[i].rc_target_bitrate = target_bitrate_kbps;
+ if (send_stream) {
+ frame_buffer_controller_->OnRatesUpdated(
+ stream_idx, parameters.bitrate.GetTemporalLayerAllocation(stream_idx),
+ static_cast<int>(parameters.framerate_fps + 0.5));
+ }
+
+ UpdateVpxConfiguration(stream_idx);
+
+ vpx_codec_err_t err =
+ libvpx_->codec_enc_config_set(&encoders_[i], &vpx_configs_[i]);
+ if (err != VPX_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Error configuring codec, error code: " << err
+ << ", details: "
+ << libvpx_->codec_error_detail(&encoders_[i]);
+ }
+ }
+}
+
+void LibvpxVp8Encoder::OnPacketLossRateUpdate(float packet_loss_rate) {
+ // TODO(bugs.webrtc.org/10431): Replace condition by DCHECK.
+ if (frame_buffer_controller_) {
+ frame_buffer_controller_->OnPacketLossRateUpdate(packet_loss_rate);
+ }
+}
+
+void LibvpxVp8Encoder::OnRttUpdate(int64_t rtt_ms) {
+ // TODO(bugs.webrtc.org/10431): Replace condition by DCHECK.
+ if (frame_buffer_controller_) {
+ frame_buffer_controller_->OnRttUpdate(rtt_ms);
+ }
+}
+
+void LibvpxVp8Encoder::OnLossNotification(
+ const LossNotification& loss_notification) {
+ if (frame_buffer_controller_) {
+ frame_buffer_controller_->OnLossNotification(loss_notification);
+ }
+}
+
+void LibvpxVp8Encoder::SetStreamState(bool send_stream, int stream_idx) {
+ if (send_stream && !send_stream_[stream_idx]) {
+ // Need a key frame if we have not sent this stream before.
+ key_frame_request_[stream_idx] = true;
+ }
+ send_stream_[stream_idx] = send_stream;
+}
+
+void LibvpxVp8Encoder::SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) {
+ // TODO(bugs.webrtc.org/10769): Update downstream and remove ability to
+ // pass nullptr.
+ // RTC_DCHECK(fec_controller_override);
+ RTC_DCHECK(!fec_controller_override_);
+ fec_controller_override_ = fec_controller_override;
+}
+
+// TODO(eladalon): s/inst/codec_settings/g.
+int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) {
+ if (inst == NULL) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // allow zero to represent an unspecified maxBitRate
+ if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->width < 1 || inst->height < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (absl::optional<ScalabilityMode> scalability_mode =
+ inst->GetScalabilityMode();
+ scalability_mode.has_value() &&
+ !VP8SupportsScalabilityMode(*scalability_mode)) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ num_active_streams_ = 0;
+ for (int i = 0; i < inst->numberOfSimulcastStreams; ++i) {
+ if (inst->simulcastStream[i].active) {
+ ++num_active_streams_;
+ }
+ }
+ if (inst->numberOfSimulcastStreams == 0 && inst->active) {
+ num_active_streams_ = 1;
+ }
+
+ if (inst->VP8().automaticResizeOn && num_active_streams_ > 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Use the previous pixel format to avoid extra image allocations.
+ vpx_img_fmt_t pixel_format =
+ raw_images_.empty() ? VPX_IMG_FMT_I420 : raw_images_[0].fmt;
+
+ int retVal = Release();
+ if (retVal < 0) {
+ return retVal;
+ }
+
+ int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
+ if (number_of_streams > 1 &&
+ !SimulcastUtility::ValidSimulcastParameters(*inst, number_of_streams)) {
+ return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
+ }
+
+ RTC_DCHECK(!frame_buffer_controller_);
+ if (frame_buffer_controller_factory_) {
+ frame_buffer_controller_ = frame_buffer_controller_factory_->Create(
+ *inst, settings, fec_controller_override_);
+ } else {
+ Vp8TemporalLayersFactory factory;
+ frame_buffer_controller_ =
+ factory.Create(*inst, settings, fec_controller_override_);
+ }
+ RTC_DCHECK(frame_buffer_controller_);
+
+ number_of_cores_ = settings.number_of_cores;
+ timestamp_ = 0;
+ codec_ = *inst;
+
+ // Code expects simulcastStream resolutions to be correct, make sure they are
+ // filled even when there are no simulcast layers.
+ if (codec_.numberOfSimulcastStreams == 0) {
+ codec_.simulcastStream[0].width = codec_.width;
+ codec_.simulcastStream[0].height = codec_.height;
+ }
+
+ encoded_images_.resize(number_of_streams);
+ encoders_.resize(number_of_streams);
+ vpx_configs_.resize(number_of_streams);
+ config_overrides_.resize(number_of_streams);
+ downsampling_factors_.resize(number_of_streams);
+ raw_images_.resize(number_of_streams);
+ send_stream_.resize(number_of_streams);
+ send_stream_[0] = true; // For non-simulcast case.
+ cpu_speed_.resize(number_of_streams);
+ std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
+
+ int idx = number_of_streams - 1;
+ for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
+ int gcd = GCD(inst->simulcastStream[idx].width,
+ inst->simulcastStream[idx - 1].width);
+ downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
+ downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
+ send_stream_[i] = false;
+ }
+ if (number_of_streams > 1) {
+ send_stream_[number_of_streams - 1] = false;
+ downsampling_factors_[number_of_streams - 1].num = 1;
+ downsampling_factors_[number_of_streams - 1].den = 1;
+ }
+
+ // populate encoder configuration with default values
+ if (libvpx_->codec_enc_config_default(vpx_codec_vp8_cx(), &vpx_configs_[0],
+ 0)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // setting the time base of the codec
+ vpx_configs_[0].g_timebase.num = 1;
+ vpx_configs_[0].g_timebase.den = kRtpTicksPerSecond;
+ vpx_configs_[0].g_lag_in_frames = 0; // 0- no frame lagging
+
+ // Set the error resilience mode for temporal layers (but not simulcast).
+ vpx_configs_[0].g_error_resilient =
+ (SimulcastUtility::NumberOfTemporalLayers(*inst, 0) > 1)
+ ? VPX_ERROR_RESILIENT_DEFAULT
+ : 0;
+
+ // Override the error resilience mode if this is not simulcast, but we are
+ // using temporal layers.
+ if (field_trial::IsEnabled(kVp8ForcePartitionResilience) &&
+ (number_of_streams == 1) &&
+ (SimulcastUtility::NumberOfTemporalLayers(*inst, 0) > 1)) {
+ RTC_LOG(LS_INFO) << "Overriding g_error_resilient from "
+ << vpx_configs_[0].g_error_resilient << " to "
+ << VPX_ERROR_RESILIENT_PARTITIONS;
+ vpx_configs_[0].g_error_resilient = VPX_ERROR_RESILIENT_PARTITIONS;
+ }
+
+ // rate control settings
+ vpx_configs_[0].rc_dropframe_thresh = FrameDropThreshold(0);
+ vpx_configs_[0].rc_end_usage = VPX_CBR;
+ vpx_configs_[0].g_pass = VPX_RC_ONE_PASS;
+ // Handle resizing outside of libvpx.
+ vpx_configs_[0].rc_resize_allowed = 0;
+ vpx_configs_[0].rc_min_quantizer =
+ codec_.mode == VideoCodecMode::kScreensharing ? 12 : 2;
+ if (inst->qpMax >= vpx_configs_[0].rc_min_quantizer) {
+ qp_max_ = inst->qpMax;
+ }
+ if (rate_control_settings_.LibvpxVp8QpMax()) {
+ qp_max_ = std::max(rate_control_settings_.LibvpxVp8QpMax().value(),
+ static_cast<int>(vpx_configs_[0].rc_min_quantizer));
+ }
+ vpx_configs_[0].rc_max_quantizer = qp_max_;
+ vpx_configs_[0].rc_undershoot_pct = 100;
+ vpx_configs_[0].rc_overshoot_pct = 15;
+ vpx_configs_[0].rc_buf_initial_sz = 500;
+ vpx_configs_[0].rc_buf_optimal_sz = 600;
+ vpx_configs_[0].rc_buf_sz = 1000;
+
+ // Set the maximum target size of any key-frame.
+ rc_max_intra_target_ = MaxIntraTarget(vpx_configs_[0].rc_buf_optimal_sz);
+
+ if (inst->VP8().keyFrameInterval > 0) {
+ vpx_configs_[0].kf_mode = VPX_KF_AUTO;
+ vpx_configs_[0].kf_max_dist = inst->VP8().keyFrameInterval;
+ } else {
+ vpx_configs_[0].kf_mode = VPX_KF_DISABLED;
+ }
+
+ // Allow the user to set the complexity for the base stream.
+ switch (inst->GetVideoEncoderComplexity()) {
+ case VideoCodecComplexity::kComplexityHigh:
+ cpu_speed_[0] = -5;
+ break;
+ case VideoCodecComplexity::kComplexityHigher:
+ cpu_speed_[0] = -4;
+ break;
+ case VideoCodecComplexity::kComplexityMax:
+ cpu_speed_[0] = -3;
+ break;
+ default:
+ cpu_speed_[0] = -6;
+ break;
+ }
+ cpu_speed_default_ = cpu_speed_[0];
+ // Set encoding complexity (cpu_speed) based on resolution and/or platform.
+ cpu_speed_[0] = GetCpuSpeed(inst->width, inst->height);
+ for (int i = 1; i < number_of_streams; ++i) {
+ cpu_speed_[i] =
+ GetCpuSpeed(inst->simulcastStream[number_of_streams - 1 - i].width,
+ inst->simulcastStream[number_of_streams - 1 - i].height);
+ }
+ vpx_configs_[0].g_w = inst->width;
+ vpx_configs_[0].g_h = inst->height;
+
+ // Determine number of threads based on the image size and #cores.
+ // TODO(fbarchard): Consider number of Simulcast layers.
+ vpx_configs_[0].g_threads = NumberOfThreads(
+ vpx_configs_[0].g_w, vpx_configs_[0].g_h, settings.number_of_cores);
+
+ // Creating a wrapper to the image - setting image data to NULL.
+ // Actual pointer will be set in encode. Setting align to 1, as it
+ // is meaningless (no memory allocation is done here).
+ libvpx_->img_wrap(&raw_images_[0], pixel_format, inst->width, inst->height, 1,
+ NULL);
+
+ // Note the order we use is different from webm, we have lowest resolution
+ // at position 0 and they have highest resolution at position 0.
+ const size_t stream_idx_cfg_0 = encoders_.size() - 1;
+ SimulcastRateAllocator init_allocator(codec_);
+ VideoBitrateAllocation allocation =
+ init_allocator.Allocate(VideoBitrateAllocationParameters(
+ inst->startBitrate * 1000, inst->maxFramerate));
+ std::vector<uint32_t> stream_bitrates;
+ for (int i = 0; i == 0 || i < inst->numberOfSimulcastStreams; ++i) {
+ uint32_t bitrate = allocation.GetSpatialLayerSum(i) / 1000;
+ stream_bitrates.push_back(bitrate);
+ }
+
+ vpx_configs_[0].rc_target_bitrate = stream_bitrates[stream_idx_cfg_0];
+ if (stream_bitrates[stream_idx_cfg_0] > 0) {
+ uint32_t maxFramerate =
+ inst->simulcastStream[stream_idx_cfg_0].maxFramerate;
+ if (!maxFramerate) {
+ maxFramerate = inst->maxFramerate;
+ }
+
+ frame_buffer_controller_->OnRatesUpdated(
+ stream_idx_cfg_0,
+ allocation.GetTemporalLayerAllocation(stream_idx_cfg_0), maxFramerate);
+ }
+ frame_buffer_controller_->SetQpLimits(stream_idx_cfg_0,
+ vpx_configs_[0].rc_min_quantizer,
+ vpx_configs_[0].rc_max_quantizer);
+ UpdateVpxConfiguration(stream_idx_cfg_0);
+ vpx_configs_[0].rc_dropframe_thresh = FrameDropThreshold(stream_idx_cfg_0);
+
+ for (size_t i = 1; i < encoders_.size(); ++i) {
+ const size_t stream_idx = encoders_.size() - 1 - i;
+ memcpy(&vpx_configs_[i], &vpx_configs_[0], sizeof(vpx_configs_[0]));
+
+ vpx_configs_[i].g_w = inst->simulcastStream[stream_idx].width;
+ vpx_configs_[i].g_h = inst->simulcastStream[stream_idx].height;
+
+ // Use 1 thread for lower resolutions.
+ vpx_configs_[i].g_threads = 1;
+
+ vpx_configs_[i].rc_dropframe_thresh = FrameDropThreshold(stream_idx);
+
+ // Setting alignment to 32 - as that ensures at least 16 for all
+ // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
+ // the y plane, but only half of it to the u and v planes.
+ libvpx_->img_alloc(
+ &raw_images_[i], pixel_format, inst->simulcastStream[stream_idx].width,
+ inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
+ SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
+ vpx_configs_[i].rc_target_bitrate = stream_bitrates[stream_idx];
+ if (stream_bitrates[stream_idx] > 0) {
+ uint32_t maxFramerate = inst->simulcastStream[stream_idx].maxFramerate;
+ if (!maxFramerate) {
+ maxFramerate = inst->maxFramerate;
+ }
+ frame_buffer_controller_->OnRatesUpdated(
+ stream_idx, allocation.GetTemporalLayerAllocation(stream_idx),
+ maxFramerate);
+ }
+ frame_buffer_controller_->SetQpLimits(stream_idx,
+ vpx_configs_[i].rc_min_quantizer,
+ vpx_configs_[i].rc_max_quantizer);
+ UpdateVpxConfiguration(stream_idx);
+ }
+
+ return InitAndSetControlSettings();
+}
+
+int LibvpxVp8Encoder::GetCpuSpeed(int width, int height) {
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+ // On mobile platform, use a lower speed setting for lower resolutions for
+ // CPUs with 4 or more cores.
+ RTC_DCHECK_GT(number_of_cores_, 0);
+ if (experimental_cpu_speed_config_arm_
+ .GetValue(width * height, number_of_cores_)
+ .has_value()) {
+ return experimental_cpu_speed_config_arm_
+ .GetValue(width * height, number_of_cores_)
+ .value();
+ }
+
+ if (number_of_cores_ <= 3)
+ return -12;
+
+ if (width * height <= 352 * 288)
+ return -8;
+ else if (width * height <= 640 * 480)
+ return -10;
+ else
+ return -12;
+#else
+ // For non-ARM, increase encoding complexity (i.e., use lower speed setting)
+ // if resolution is below CIF. Otherwise, keep the default/user setting
+ // (`cpu_speed_default_`) set on InitEncode via VP8().complexity.
+ if (width * height < 352 * 288)
+ return (cpu_speed_default_ < -4) ? -4 : cpu_speed_default_;
+ else
+ return cpu_speed_default_;
+#endif
+}
+
+int LibvpxVp8Encoder::NumberOfThreads(int width, int height, int cpus) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+ if (width * height >= 320 * 180) {
+ if (cpus >= 4) {
+ // 3 threads for CPUs with 4 and more cores since most of times only 4
+ // cores will be active.
+ return 3;
+ } else if (cpus == 3 || cpus == 2) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+ return 1;
+#else
+#if defined(WEBRTC_IOS)
+ std::string trial_string =
+ field_trial::FindFullName(kVP8IosMaxNumberOfThreadFieldTrial);
+ FieldTrialParameter<int> max_thread_number(
+ kVP8IosMaxNumberOfThreadFieldTrialParameter, 0);
+ ParseFieldTrial({&max_thread_number}, trial_string);
+ if (max_thread_number.Get() > 0) {
+ if (width * height < 320 * 180) {
+ return 1; // Use single thread for small screens
+ }
+ // thread number must be less than or equal to the number of CPUs.
+ return std::min(cpus, max_thread_number.Get());
+ }
+#endif // defined(WEBRTC_IOS)
+ if (width * height >= 1920 * 1080 && cpus > 8) {
+ return 8; // 8 threads for 1080p on high perf machines.
+ } else if (width * height > 1280 * 960 && cpus >= 6) {
+ // 3 threads for 1080p.
+ return 3;
+ } else if (width * height > 640 * 480 && cpus >= 3) {
+ // Default 2 threads for qHD/HD, but allow 3 if core count is high enough,
+ // as this will allow more margin for high-core/low clock machines or if
+ // not built with highest optimization.
+ if (cpus >= 6) {
+ return 3;
+ }
+ return 2;
+ } else {
+ // 1 thread for VGA or less.
+ return 1;
+ }
+#endif
+}
+
+int LibvpxVp8Encoder::InitAndSetControlSettings() {
+ vpx_codec_flags_t flags = 0;
+ flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
+
+ if (encoders_.size() > 1) {
+ int error = libvpx_->codec_enc_init_multi(
+ &encoders_[0], vpx_codec_vp8_cx(), &vpx_configs_[0], encoders_.size(),
+ flags, &downsampling_factors_[0]);
+ if (error) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ } else {
+ if (libvpx_->codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
+ &vpx_configs_[0], flags)) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ }
+ // Enable denoising for the highest resolution stream, and for
+ // the second highest resolution if we are doing more than 2
+ // spatial layers/streams.
+ // TODO(holmer): Investigate possibility of adding a libvpx API
+ // for getting the denoised frame from the encoder and using that
+ // when encoding lower resolution streams. Would it work with the
+ // multi-res encoding feature?
+ denoiserState denoiser_state = kDenoiserOnYOnly;
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+ denoiser_state = kDenoiserOnYOnly;
+#else
+ denoiser_state = kDenoiserOnAdaptive;
+#endif
+ libvpx_->codec_control(
+ &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.VP8()->denoisingOn ? denoiser_state : kDenoiserOff);
+ if (encoders_.size() > 2) {
+ libvpx_->codec_control(
+ &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.VP8()->denoisingOn ? denoiser_state : kDenoiserOff);
+ }
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ // Allow more screen content to be detected as static.
+ libvpx_->codec_control(
+ &(encoders_[i]), VP8E_SET_STATIC_THRESHOLD,
+ codec_.mode == VideoCodecMode::kScreensharing ? 100u : 1u);
+ libvpx_->codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]);
+ libvpx_->codec_control(
+ &(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS,
+ static_cast<vp8e_token_partitions>(kTokenPartitions));
+ libvpx_->codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target_);
+ // VP8E_SET_SCREEN_CONTENT_MODE 2 = screen content with more aggressive
+ // rate control (drop frames on large target bitrate overshoot)
+ libvpx_->codec_control(
+ &(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE,
+ codec_.mode == VideoCodecMode::kScreensharing ? 2u : 0u);
+ }
+ inited_ = true;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+uint32_t LibvpxVp8Encoder::MaxIntraTarget(uint32_t optimalBuffersize) {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scalePar.
+ // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
+ // This values is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
+ // The target in % is as follows:
+
+ float scalePar = 0.5;
+ uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10;
+
+ // Don't go below 3 times the per frame bandwidth.
+ const uint32_t minIntraTh = 300;
+ return (targetPct < minIntraTh) ? minIntraTh : targetPct;
+}
+
+uint32_t LibvpxVp8Encoder::FrameDropThreshold(size_t spatial_idx) const {
+ if (!codec_.GetFrameDropEnabled()) {
+ return 0;
+ }
+
+ // If temporal layers are used, they get to override the frame dropping
+ // setting, as eg. ScreenshareLayers does not work as intended with frame
+ // dropping on and DefaultTemporalLayers will have performance issues with
+ // frame dropping off.
+ RTC_DCHECK(frame_buffer_controller_);
+ RTC_DCHECK_LT(spatial_idx, frame_buffer_controller_->StreamCount());
+ return frame_buffer_controller_->SupportsEncoderFrameDropping(spatial_idx)
+ ? 30
+ : 0;
+}
+
+size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) {
+ const int encoder_id = encoders_.size() - 1 - sid;
+ size_t bitrate_bps;
+ float fps;
+ if ((SimulcastUtility::IsConferenceModeScreenshare(codec_) && sid == 0) ||
+ vpx_configs_[encoder_id].ts_number_layers <= 1) {
+ // In conference screenshare there's no defined per temporal layer bitrate
+ // and framerate.
+ bitrate_bps = vpx_configs_[encoder_id].rc_target_bitrate * 1000;
+ fps = codec_.maxFramerate;
+ } else {
+ bitrate_bps = vpx_configs_[encoder_id].ts_target_bitrate[tid] * 1000;
+ fps = codec_.maxFramerate /
+ fmax(vpx_configs_[encoder_id].ts_rate_decimator[tid], 1.0);
+ if (tid > 0) {
+ // Layer bitrate and fps are counted as a partial sums.
+ bitrate_bps -= vpx_configs_[encoder_id].ts_target_bitrate[tid - 1] * 1000;
+ fps = codec_.maxFramerate /
+ fmax(vpx_configs_[encoder_id].ts_rate_decimator[tid - 1], 1.0);
+ }
+ }
+
+ if (fps < 1e-9)
+ return 0;
+ return static_cast<size_t>(
+ bitrate_bps / (8 * fps) *
+ (100 -
+ variable_framerate_experiment_.steady_state_undershoot_percentage) /
+ 100 +
+ 0.5);
+}
+
+bool LibvpxVp8Encoder::UpdateVpxConfiguration(size_t stream_index) {
+ RTC_DCHECK(frame_buffer_controller_);
+
+ const size_t config_index = vpx_configs_.size() - 1 - stream_index;
+
+ RTC_DCHECK_LT(config_index, config_overrides_.size());
+ Vp8EncoderConfig* config = &config_overrides_[config_index];
+
+ const Vp8EncoderConfig new_config =
+ frame_buffer_controller_->UpdateConfiguration(stream_index);
+
+ if (new_config.reset_previous_configuration_overrides) {
+ *config = new_config;
+ return true;
+ }
+
+ const bool changes_made = MaybeExtendVp8EncoderConfig(new_config, config);
+
+ // Note that overrides must be applied even if they haven't changed.
+ RTC_DCHECK_LT(config_index, vpx_configs_.size());
+ vpx_codec_enc_cfg_t* vpx_config = &vpx_configs_[config_index];
+ ApplyVp8EncoderConfigToVpxConfig(*config, vpx_config);
+
+ return changes_made;
+}
+
+int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ RTC_DCHECK_EQ(frame.width(), codec_.width);
+ RTC_DCHECK_EQ(frame.height(), codec_.height);
+
+ if (!inited_)
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ if (encoded_complete_callback_ == NULL)
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+
+ bool key_frame_requested = false;
+ for (size_t i = 0; i < key_frame_request_.size() && i < send_stream_.size();
+ ++i) {
+ if (key_frame_request_[i] && send_stream_[i]) {
+ key_frame_requested = true;
+ break;
+ }
+ }
+ if (!key_frame_requested && frame_types) {
+ for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
+ ++i) {
+ if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
+ send_stream_[i]) {
+ key_frame_requested = true;
+ break;
+ }
+ }
+ }
+
+ if (frame.update_rect().IsEmpty() && num_steady_state_frames_ >= 3 &&
+ !key_frame_requested) {
+ if (variable_framerate_experiment_.enabled &&
+ framerate_controller_.DropFrame(frame.timestamp() / kRtpTicksPerMs)) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ framerate_controller_.AddFrame(frame.timestamp() / kRtpTicksPerMs);
+ }
+
+ bool send_key_frame = key_frame_requested;
+ bool drop_frame = false;
+ bool retransmission_allowed = true;
+ Vp8FrameConfig tl_configs[kMaxSimulcastStreams];
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ tl_configs[i] =
+ frame_buffer_controller_->NextFrameConfig(i, frame.timestamp());
+ send_key_frame |= tl_configs[i].IntraFrame();
+ drop_frame |= tl_configs[i].drop_frame;
+ RTC_DCHECK(i == 0 ||
+ retransmission_allowed == tl_configs[i].retransmission_allowed);
+ retransmission_allowed = tl_configs[i].retransmission_allowed;
+ }
+
+ if (drop_frame && !send_key_frame) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ flags[i] = send_key_frame ? VPX_EFLAG_FORCE_KF : EncodeFlags(tl_configs[i]);
+ }
+
+ // Scale and map buffers and set `raw_images_` to hold pointers to the result.
+ // Because `raw_images_` are set to hold pointers to the prepared buffers, we
+ // need to keep these buffers alive through reference counting until after
+ // encoding is complete.
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers =
+ PrepareBuffers(frame.video_frame_buffer());
+ if (prepared_buffers.empty()) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ struct CleanUpOnExit {
+ explicit CleanUpOnExit(
+ vpx_image_t* raw_image,
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers)
+ : raw_image_(raw_image),
+ prepared_buffers_(std::move(prepared_buffers)) {}
+ ~CleanUpOnExit() {
+ raw_image_->planes[VPX_PLANE_Y] = nullptr;
+ raw_image_->planes[VPX_PLANE_U] = nullptr;
+ raw_image_->planes[VPX_PLANE_V] = nullptr;
+ }
+ vpx_image_t* raw_image_;
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers_;
+ } clean_up_on_exit(&raw_images_[0], std::move(prepared_buffers));
+
+ if (send_key_frame) {
+ // Adapt the size of the key frame when in screenshare with 1 temporal
+ // layer.
+ if (encoders_.size() == 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing &&
+ codec_.VP8()->numberOfTemporalLayers <= 1) {
+ const uint32_t forceKeyFrameIntraTh = 100;
+ libvpx_->codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ forceKeyFrameIntraTh);
+ }
+
+ std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
+ }
+
+ // Set the encoder frame flags and temporal layer_id for each spatial stream.
+ // Note that streams are defined starting from lowest resolution at
+ // position 0 to highest resolution at position |encoders_.size() - 1|,
+ // whereas `encoder_` is from highest to lowest resolution.
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ const size_t stream_idx = encoders_.size() - 1 - i;
+
+ if (UpdateVpxConfiguration(stream_idx)) {
+ if (libvpx_->codec_enc_config_set(&encoders_[i], &vpx_configs_[i]))
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ libvpx_->codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS,
+ static_cast<int>(flags[stream_idx]));
+ libvpx_->codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
+ tl_configs[i].encoder_layer_id);
+ }
+ // TODO(holmer): Ideally the duration should be the timestamp diff of this
+ // frame and the next frame to be encoded, which we don't have. Instead we
+ // would like to use the duration of the previous frame. Unfortunately the
+ // rate control seems to be off with that setup. Using the average input
+ // frame rate to calculate an average duration for now.
+ RTC_DCHECK_GT(codec_.maxFramerate, 0);
+ uint32_t duration = kRtpTicksPerSecond / codec_.maxFramerate;
+
+ int error = WEBRTC_VIDEO_CODEC_OK;
+ int num_tries = 0;
+ // If the first try returns WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT
+ // the frame must be reencoded with the same parameters again because
+ // target bitrate is exceeded and encoder state has been reset.
+ while (num_tries == 0 ||
+ (num_tries == 1 &&
+ error == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT)) {
+ ++num_tries;
+ // Note we must pass 0 for `flags` field in encode call below since they are
+ // set above in `libvpx_interface_->vpx_codec_control_` function for each
+ // encoder/spatial layer.
+ error = libvpx_->codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
+ duration, 0, VPX_DL_REALTIME);
+ // Reset specific intra frame thresholds, following the key frame.
+ if (send_key_frame) {
+ libvpx_->codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target_);
+ }
+ if (error)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ // Examines frame timestamps only.
+ error = GetEncodedPartitions(frame, retransmission_allowed);
+ }
+ // TODO(sprang): Shouldn't we use the frame timestamp instead?
+ timestamp_ += duration;
+ return error;
+}
+
+void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ const vpx_codec_cx_pkt_t& pkt,
+ int stream_idx,
+ int encoder_idx,
+ uint32_t timestamp) {
+ RTC_DCHECK(codec_specific);
+ codec_specific->codecType = kVideoCodecVP8;
+ codec_specific->codecSpecific.VP8.keyIdx =
+ kNoKeyIdx; // TODO(hlundin) populate this
+ codec_specific->codecSpecific.VP8.nonReference =
+ (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) != 0;
+
+ int qp = 0;
+ vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
+ bool is_keyframe = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0;
+ frame_buffer_controller_->OnEncodeDone(stream_idx, timestamp,
+ encoded_images_[encoder_idx].size(),
+ is_keyframe, qp, codec_specific);
+ if (is_keyframe && codec_specific->template_structure != absl::nullopt) {
+ // Number of resolutions must match number of spatial layers, VP8 structures
+ // expected to use single spatial layer. Templates must be ordered by
+ // spatial_id, so assumption there is exactly one spatial layer is same as
+ // assumption last template uses spatial_id = 0.
+ // This check catches potential scenario where template_structure is shared
+ // across multiple vp8 streams and they are distinguished using spatial_id.
+ // Assigning single resolution doesn't support such scenario, i.e. assumes
+ // vp8 simulcast is sent using multiple ssrcs.
+ RTC_DCHECK(!codec_specific->template_structure->templates.empty());
+ RTC_DCHECK_EQ(
+ codec_specific->template_structure->templates.back().spatial_id, 0);
+ codec_specific->template_structure->resolutions = {
+ RenderResolution(pkt.data.frame.width[0], pkt.data.frame.height[0])};
+ }
+}
+
+int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
+ bool retransmission_allowed) {
+ int stream_idx = static_cast<int>(encoders_.size()) - 1;
+ int result = WEBRTC_VIDEO_CODEC_OK;
+ for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
+ ++encoder_idx, --stream_idx) {
+ vpx_codec_iter_t iter = NULL;
+ encoded_images_[encoder_idx].set_size(0);
+ encoded_images_[encoder_idx]._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo codec_specific;
+ const vpx_codec_cx_pkt_t* pkt = NULL;
+
+ size_t encoded_size = 0;
+ while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
+ if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ encoded_size += pkt->data.frame.sz;
+ }
+ }
+
+ auto buffer = EncodedImageBuffer::Create(encoded_size);
+
+ iter = NULL;
+ size_t encoded_pos = 0;
+ while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
+ switch (pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT: {
+ RTC_CHECK_LE(encoded_pos + pkt->data.frame.sz, buffer->size());
+ memcpy(&buffer->data()[encoded_pos], pkt->data.frame.buf,
+ pkt->data.frame.sz);
+ encoded_pos += pkt->data.frame.sz;
+ break;
+ }
+ default:
+ break;
+ }
+ // End of frame
+ if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
+ // check if encoded frame is a key frame
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ encoded_images_[encoder_idx]._frameType =
+ VideoFrameType::kVideoFrameKey;
+ }
+ encoded_images_[encoder_idx].SetEncodedData(buffer);
+ encoded_images_[encoder_idx].set_size(encoded_pos);
+ encoded_images_[encoder_idx].SetSpatialIndex(stream_idx);
+ PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, encoder_idx,
+ input_image.timestamp());
+ if (codec_specific.codecSpecific.VP8.temporalIdx != kNoTemporalIdx) {
+ encoded_images_[encoder_idx].SetTemporalIndex(
+ codec_specific.codecSpecific.VP8.temporalIdx);
+ }
+ break;
+ }
+ }
+ encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
+ encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
+ encoded_images_[encoder_idx].SetRetransmissionAllowed(
+ retransmission_allowed);
+
+ if (send_stream_[stream_idx]) {
+ if (encoded_images_[encoder_idx].size() > 0) {
+ TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
+ encoded_images_[encoder_idx].size());
+ encoded_images_[encoder_idx]._encodedHeight =
+ codec_.simulcastStream[stream_idx].height;
+ encoded_images_[encoder_idx]._encodedWidth =
+ codec_.simulcastStream[stream_idx].width;
+ int qp_128 = -1;
+ libvpx_->codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER,
+ &qp_128);
+ encoded_images_[encoder_idx].qp_ = qp_128;
+ encoded_images_[encoder_idx].SetAtTargetQuality(
+ qp_128 <= variable_framerate_experiment_.steady_state_qp);
+ encoded_complete_callback_->OnEncodedImage(encoded_images_[encoder_idx],
+ &codec_specific);
+ const size_t steady_state_size = SteadyStateSize(
+ stream_idx, codec_specific.codecSpecific.VP8.temporalIdx);
+ if (qp_128 > variable_framerate_experiment_.steady_state_qp ||
+ encoded_images_[encoder_idx].size() > steady_state_size) {
+ num_steady_state_frames_ = 0;
+ } else {
+ ++num_steady_state_frames_;
+ }
+ } else if (!frame_buffer_controller_->SupportsEncoderFrameDropping(
+ stream_idx)) {
+ result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
+ if (encoded_images_[encoder_idx].size() == 0) {
+ // Dropped frame that will be re-encoded.
+ frame_buffer_controller_->OnFrameDropped(stream_idx,
+ input_image.timestamp());
+ }
+ }
+ }
+ }
+ return result;
+}
+
+VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "libvpx";
+ info.has_trusted_rate_controller =
+ rate_control_settings_.LibvpxVp8TrustedRateController();
+ info.is_hardware_accelerated = false;
+ info.supports_simulcast = true;
+ if (!resolution_bitrate_limits_.empty()) {
+ info.resolution_bitrate_limits = resolution_bitrate_limits_;
+ }
+ if (encoder_info_override_.requested_resolution_alignment()) {
+ info.requested_resolution_alignment =
+ *encoder_info_override_.requested_resolution_alignment();
+ info.apply_alignment_to_all_simulcast_layers =
+ encoder_info_override_.apply_alignment_to_all_simulcast_layers();
+ }
+ if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
+ info.resolution_bitrate_limits =
+ encoder_info_override_.resolution_bitrate_limits();
+ }
+
+ const bool enable_scaling =
+ num_active_streams_ == 1 &&
+ (vpx_configs_.empty() || vpx_configs_[0].rc_dropframe_thresh > 0) &&
+ codec_.VP8().automaticResizeOn;
+
+ info.scaling_settings = enable_scaling
+ ? VideoEncoder::ScalingSettings(
+ kLowVp8QpThreshold, kHighVp8QpThreshold)
+ : VideoEncoder::ScalingSettings::kOff;
+ if (rate_control_settings_.LibvpxVp8MinPixels()) {
+ info.scaling_settings.min_pixels_per_frame =
+ rate_control_settings_.LibvpxVp8MinPixels().value();
+ }
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+
+ if (inited_) {
+ // `encoder_idx` is libvpx index where 0 is highest resolution.
+ // `si` is simulcast index, where 0 is lowest resolution.
+ for (size_t si = 0, encoder_idx = encoders_.size() - 1;
+ si < encoders_.size(); ++si, --encoder_idx) {
+ info.fps_allocation[si].clear();
+ if ((codec_.numberOfSimulcastStreams > si &&
+ !codec_.simulcastStream[si].active) ||
+ (si == 0 && SimulcastUtility::IsConferenceModeScreenshare(codec_))) {
+ // No defined frame rate fractions if not active or if using
+ // ScreenshareLayers, leave vector empty and continue;
+ continue;
+ }
+ if (vpx_configs_[encoder_idx].ts_number_layers <= 1) {
+ info.fps_allocation[si].push_back(EncoderInfo::kMaxFramerateFraction);
+ } else {
+ for (size_t ti = 0; ti < vpx_configs_[encoder_idx].ts_number_layers;
+ ++ti) {
+ RTC_DCHECK_GT(vpx_configs_[encoder_idx].ts_rate_decimator[ti], 0);
+ info.fps_allocation[si].push_back(rtc::saturated_cast<uint8_t>(
+ EncoderInfo::kMaxFramerateFraction /
+ vpx_configs_[encoder_idx].ts_rate_decimator[ti] +
+ 0.5));
+ }
+ }
+ }
+ }
+
+ return info;
+}
+
+int LibvpxVp8Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void LibvpxVp8Encoder::MaybeUpdatePixelFormat(vpx_img_fmt fmt) {
+ RTC_DCHECK(!raw_images_.empty());
+ if (raw_images_[0].fmt == fmt) {
+ RTC_DCHECK(std::all_of(
+ std::next(raw_images_.begin()), raw_images_.end(),
+ [fmt](const vpx_image_t& raw_img) { return raw_img.fmt == fmt; }))
+ << "Not all raw images had the right format!";
+ return;
+ }
+ RTC_LOG(LS_INFO) << "Updating vp8 encoder pixel format to "
+ << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420");
+ for (size_t i = 0; i < raw_images_.size(); ++i) {
+ vpx_image_t& img = raw_images_[i];
+ auto d_w = img.d_w;
+ auto d_h = img.d_h;
+ libvpx_->img_free(&img);
+ // First image is wrapping the input frame, the rest are allocated.
+ if (i == 0) {
+ libvpx_->img_wrap(&img, fmt, d_w, d_h, 1, NULL);
+ } else {
+ libvpx_->img_alloc(&img, fmt, d_w, d_h, kVp832ByteAlign);
+ }
+ }
+}
+
+std::vector<rtc::scoped_refptr<VideoFrameBuffer>>
+LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
+ RTC_DCHECK_EQ(buffer->width(), raw_images_[0].d_w);
+ RTC_DCHECK_EQ(buffer->height(), raw_images_[0].d_h);
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ supported_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ if (buffer->type() != VideoFrameBuffer::Type::kNative) {
+ // `buffer` is already mapped.
+ mapped_buffer = buffer;
+ } else {
+ // Attempt to map to one of the supported formats.
+ mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
+ }
+ if (!mapped_buffer ||
+ (absl::c_find(supported_formats, mapped_buffer->type()) ==
+ supported_formats.end() &&
+ mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
+ // Unknown pixel format or unable to map, convert to I420 and prepare that
+ // buffer instead to ensure Scale() is safe to use.
+ auto converted_buffer = buffer->ToI420();
+ if (!converted_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(buffer->type())
+ << " image to I420. Can't encode frame.";
+ return {};
+ }
+ RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ // Because `buffer` had to be converted, use `converted_buffer` instead...
+ buffer = mapped_buffer = converted_buffer;
+ }
+
+ // Maybe update pixel format.
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ mapped_type = {mapped_buffer->type()};
+ switch (mapped_buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A:
+ MaybeUpdatePixelFormat(VPX_IMG_FMT_I420);
+ break;
+ case VideoFrameBuffer::Type::kNV12:
+ MaybeUpdatePixelFormat(VPX_IMG_FMT_NV12);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ // Prepare `raw_images_` from `mapped_buffer` and, if simulcast, scaled
+ // versions of `buffer`.
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers;
+ SetRawImagePlanes(&raw_images_[0], mapped_buffer.get());
+ prepared_buffers.push_back(mapped_buffer);
+ for (size_t i = 1; i < encoders_.size(); ++i) {
+ // Native buffers should implement optimized scaling and is the preferred
+ // buffer to scale. But if the buffer isn't native, it should be cheaper to
+ // scale from the previously prepared buffer which is smaller than `buffer`.
+ VideoFrameBuffer* buffer_to_scale =
+ buffer->type() == VideoFrameBuffer::Type::kNative
+ ? buffer.get()
+ : prepared_buffers.back().get();
+
+ auto scaled_buffer =
+ buffer_to_scale->Scale(raw_images_[i].d_w, raw_images_[i].d_h);
+ if (scaled_buffer->type() == VideoFrameBuffer::Type::kNative) {
+ auto mapped_scaled_buffer =
+ scaled_buffer->GetMappedFrameBuffer(mapped_type);
+ RTC_DCHECK(mapped_scaled_buffer) << "Unable to map the scaled buffer.";
+ if (!mapped_scaled_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to map scaled "
+ << VideoFrameBufferTypeToString(scaled_buffer->type())
+ << " image to "
+ << VideoFrameBufferTypeToString(mapped_buffer->type())
+ << ". Can't encode frame.";
+ return {};
+ }
+ scaled_buffer = mapped_scaled_buffer;
+ }
+ if (!IsCompatibleVideoFrameBufferType(scaled_buffer->type(),
+ mapped_buffer->type())) {
+ RTC_LOG(LS_ERROR) << "When scaling "
+ << VideoFrameBufferTypeToString(buffer_to_scale->type())
+ << ", the image was unexpectedly converted to "
+ << VideoFrameBufferTypeToString(scaled_buffer->type())
+ << " instead of "
+ << VideoFrameBufferTypeToString(mapped_buffer->type())
+ << ". Can't encode frame.";
+ RTC_DCHECK_NOTREACHED()
+ << "Scaled buffer type "
+ << VideoFrameBufferTypeToString(scaled_buffer->type())
+ << " is not compatible with mapped buffer type "
+ << VideoFrameBufferTypeToString(mapped_buffer->type());
+ return {};
+ }
+ SetRawImagePlanes(&raw_images_[i], scaled_buffer.get());
+ prepared_buffers.push_back(scaled_buffer);
+ }
+ return prepared_buffers;
+}
+
+// static
+LibvpxVp8Encoder::VariableFramerateExperiment
+LibvpxVp8Encoder::ParseVariableFramerateConfig(std::string group_name) {
+ FieldTrialFlag disabled = FieldTrialFlag("Disabled");
+ FieldTrialParameter<double> framerate_limit("min_fps", 5.0);
+ FieldTrialParameter<int> qp("min_qp", 15);
+ FieldTrialParameter<int> undershoot_percentage("undershoot", 30);
+ ParseFieldTrial({&disabled, &framerate_limit, &qp, &undershoot_percentage},
+ field_trial::FindFullName(group_name));
+ VariableFramerateExperiment config;
+ config.enabled = !disabled.Get();
+ config.framerate_limit = framerate_limit.Get();
+ config.steady_state_qp = qp.Get();
+ config.steady_state_undershoot_percentage = undershoot_percentage.Get();
+
+ return config;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
new file mode 100644
index 0000000000..643758753d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_frame_buffer_controller.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+#include "rtc_base/experiments/cpu_speed_experiment.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+
+namespace webrtc {
+
+class LibvpxVp8Encoder : public VideoEncoder {
+ public:
+ LibvpxVp8Encoder(std::unique_ptr<LibvpxInterface> interface,
+ VP8Encoder::Settings settings);
+ ~LibvpxVp8Encoder() override;
+
+ int Release() override;
+
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+
+ int InitEncode(const VideoCodec* codec_settings,
+ const VideoEncoder::Settings& settings) override;
+
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+
+ void SetRates(const RateControlParameters& parameters) override;
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+
+ void OnRttUpdate(int64_t rtt_ms) override;
+
+ void OnLossNotification(const LossNotification& loss_notification) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ static vpx_enc_frame_flags_t EncodeFlags(const Vp8FrameConfig& references);
+
+ private:
+ // Get the cpu_speed setting for encoder based on resolution and/or platform.
+ int GetCpuSpeed(int width, int height);
+
+ // Determine number of encoder threads to use.
+ int NumberOfThreads(int width, int height, int number_of_cores);
+
+ // Call encoder initialize function and set control settings.
+ int InitAndSetControlSettings();
+
+ void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ const vpx_codec_cx_pkt& pkt,
+ int stream_idx,
+ int encoder_idx,
+ uint32_t timestamp);
+
+ int GetEncodedPartitions(const VideoFrame& input_image,
+ bool retransmission_allowed);
+
+ // Set the stream state for stream `stream_idx`.
+ void SetStreamState(bool send_stream, int stream_idx);
+
+ uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
+
+ uint32_t FrameDropThreshold(size_t spatial_idx) const;
+
+ size_t SteadyStateSize(int sid, int tid);
+
+ bool UpdateVpxConfiguration(size_t stream_index);
+
+ void MaybeUpdatePixelFormat(vpx_img_fmt fmt);
+ // Prepares `raw_image_` to reference image data of `buffer`, or of mapped or
+ // scaled versions of `buffer`. Returns a list of buffers that got referenced
+ // as a result, allowing the caller to keep references to them until after
+ // encoding has finished. On failure to convert the buffer, an empty list is
+ // returned.
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> PrepareBuffers(
+ rtc::scoped_refptr<VideoFrameBuffer> buffer);
+
+ const std::unique_ptr<LibvpxInterface> libvpx_;
+
+ const CpuSpeedExperiment experimental_cpu_speed_config_arm_;
+ const RateControlSettings rate_control_settings_;
+
+ EncodedImageCallback* encoded_complete_callback_ = nullptr;
+ VideoCodec codec_;
+ bool inited_ = false;
+ int64_t timestamp_ = 0;
+ int qp_max_ = 56;
+ int cpu_speed_default_ = -6;
+ int number_of_cores_ = 0;
+ uint32_t rc_max_intra_target_ = 0;
+ int num_active_streams_ = 0;
+ const std::unique_ptr<Vp8FrameBufferControllerFactory>
+ frame_buffer_controller_factory_;
+ std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_;
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>
+ resolution_bitrate_limits_;
+ std::vector<bool> key_frame_request_;
+ std::vector<bool> send_stream_;
+ std::vector<int> cpu_speed_;
+ std::vector<vpx_image_t> raw_images_;
+ std::vector<EncodedImage> encoded_images_;
+ std::vector<vpx_codec_ctx_t> encoders_;
+ std::vector<vpx_codec_enc_cfg_t> vpx_configs_;
+ std::vector<Vp8EncoderConfig> config_overrides_;
+ std::vector<vpx_rational_t> downsampling_factors_;
+
+ // Variable frame-rate screencast related fields and methods.
+ const struct VariableFramerateExperiment {
+ bool enabled = false;
+ // Framerate is limited to this value in steady state.
+ float framerate_limit = 5.0;
+ // This qp or below is considered a steady state.
+ int steady_state_qp = 15;
+ // Frames of at least this percentage below ideal for configured bitrate are
+ // considered in a steady state.
+ int steady_state_undershoot_percentage = 30;
+ } variable_framerate_experiment_;
+ static VariableFramerateExperiment ParseVariableFramerateConfig(
+ std::string group_name);
+ FramerateControllerDeprecated framerate_controller_;
+ int num_steady_state_frames_ = 0;
+
+ FecControllerOverride* fec_controller_override_ = nullptr;
+
+ const LibvpxVp8EncoderInfoSettings encoder_info_override_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
new file mode 100644
index 0000000000..67c9110b3c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/create_simulcast_test_fixture.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
+ std::unique_ptr<VideoEncoderFactory> encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>(
+ []() { return VP8Encoder::Create(); });
+ std::unique_ptr<VideoDecoderFactory> decoder_factory =
+ std::make_unique<FunctionVideoDecoderFactory>(
+ []() { return VP8Decoder::Create(); });
+ return CreateSimulcastTestFixture(std::move(encoder_factory),
+ std::move(decoder_factory),
+ SdpVideoFormat("VP8"));
+}
+} // namespace
+
+TEST(LibvpxVp8SimulcastTest, TestKeyFrameRequestsOnAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestKeyFrameRequestsOnAllStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingAllStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingTwoStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingTwoStreamsOneMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreamsOneMaxedOut();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingOneStreamTwoMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStreamTwoMaxedOut();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSendAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSendAllStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestDisablingStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestDisablingStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestActiveStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestActiveStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneOddStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneOddStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneSmallStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneSmallStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSpatioTemporalLayers333PatternEncoder) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSpatioTemporalLayers333PatternEncoder();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestStrideEncodeDecode) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestStrideEncodeDecode();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
new file mode 100644
index 0000000000..71db0b22c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -0,0 +1,624 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+using BufferFlags = Vp8FrameConfig::BufferFlags;
+
+constexpr BufferFlags kNone = Vp8FrameConfig::BufferFlags::kNone;
+constexpr BufferFlags kReference = Vp8FrameConfig::BufferFlags::kReference;
+constexpr BufferFlags kUpdate = Vp8FrameConfig::BufferFlags::kUpdate;
+constexpr BufferFlags kReferenceAndUpdate =
+ Vp8FrameConfig::BufferFlags::kReferenceAndUpdate;
+
+constexpr int kOneSecond90Khz = 90000;
+constexpr int kMinTimeBetweenSyncs = kOneSecond90Khz * 2;
+constexpr int kMaxTimeBetweenSyncs = kOneSecond90Khz * 4;
+constexpr int kQpDeltaThresholdForSync = 8;
+constexpr int kMinBitrateKbpsForQpBoost = 500;
+constexpr auto kSwitch = DecodeTargetIndication::kSwitch;
+} // namespace
+
+const double ScreenshareLayers::kMaxTL0FpsReduction = 2.5;
+const double ScreenshareLayers::kAcceptableTargetOvershoot = 2.0;
+
+constexpr int ScreenshareLayers::kMaxNumTemporalLayers;
+
+// Always emit a frame with certain interval, even if bitrate targets have
+// been exceeded. This prevents needless keyframe requests.
+const int ScreenshareLayers::kMaxFrameIntervalMs = 2750;
+
+ScreenshareLayers::ScreenshareLayers(int num_temporal_layers)
+ : number_of_temporal_layers_(
+ std::min(kMaxNumTemporalLayers, num_temporal_layers)),
+ active_layer_(-1),
+ last_timestamp_(-1),
+ last_sync_timestamp_(-1),
+ last_emitted_tl0_timestamp_(-1),
+ last_frame_time_ms_(-1),
+ max_debt_bytes_(0),
+ encode_framerate_(1000.0f, 1000.0f), // 1 second window, second scale.
+ bitrate_updated_(false),
+ checker_(TemporalLayersChecker::CreateTemporalLayersChecker(
+ Vp8TemporalLayersType::kBitrateDynamic,
+ num_temporal_layers)) {
+ RTC_CHECK_GT(number_of_temporal_layers_, 0);
+ RTC_CHECK_LE(number_of_temporal_layers_, kMaxNumTemporalLayers);
+}
+
+ScreenshareLayers::~ScreenshareLayers() {
+ UpdateHistograms();
+}
+
+void ScreenshareLayers::SetQpLimits(size_t stream_index,
+ int min_qp,
+ int max_qp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // 0 < min_qp <= max_qp
+ RTC_DCHECK_LT(0, min_qp);
+ RTC_DCHECK_LE(min_qp, max_qp);
+
+ RTC_DCHECK_EQ(min_qp_.has_value(), max_qp_.has_value());
+ if (!min_qp_.has_value()) {
+ min_qp_ = min_qp;
+ max_qp_ = max_qp;
+ } else {
+ RTC_DCHECK_EQ(min_qp, min_qp_.value());
+ RTC_DCHECK_EQ(max_qp, max_qp_.value());
+ }
+}
+
+size_t ScreenshareLayers::StreamCount() const {
+ return 1;
+}
+
+bool ScreenshareLayers::SupportsEncoderFrameDropping(
+ size_t stream_index) const {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // Frame dropping is handled internally by this class.
+ return false;
+}
+
+Vp8FrameConfig ScreenshareLayers::NextFrameConfig(size_t stream_index,
+ uint32_t timestamp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+
+ auto it = pending_frame_configs_.find(timestamp);
+ if (it != pending_frame_configs_.end()) {
+ // Drop and re-encode, reuse the previous config.
+ return it->second.frame_config;
+ }
+
+ if (number_of_temporal_layers_ <= 1) {
+ // No flags needed for 1 layer screenshare.
+ // TODO(pbos): Consider updating only last, and not all buffers.
+ DependencyInfo dependency_info{
+ "S", {kReferenceAndUpdate, kReferenceAndUpdate, kReferenceAndUpdate}};
+ pending_frame_configs_[timestamp] = dependency_info;
+ return dependency_info.frame_config;
+ }
+
+ const int64_t now_ms = rtc::TimeMillis();
+
+ int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
+ int64_t ts_diff;
+ if (last_timestamp_ == -1) {
+ ts_diff = kOneSecond90Khz / capture_framerate_.value_or(*target_framerate_);
+ } else {
+ ts_diff = unwrapped_timestamp - last_timestamp_;
+ }
+
+ if (target_framerate_) {
+ // If input frame rate exceeds target frame rate, either over a one second
+ // averaging window, or if frame interval is below 90% of desired value,
+ // drop frame.
+ if (encode_framerate_.Rate(now_ms).value_or(0) > *target_framerate_)
+ return Vp8FrameConfig(kNone, kNone, kNone);
+
+ // Primarily check if frame interval is too short using frame timestamps,
+ // as if they are correct they won't be affected by queuing in webrtc.
+ const int64_t expected_frame_interval_90khz =
+ kOneSecond90Khz / *target_framerate_;
+ if (last_timestamp_ != -1 && ts_diff > 0) {
+ if (ts_diff < 85 * expected_frame_interval_90khz / 100) {
+ return Vp8FrameConfig(kNone, kNone, kNone);
+ }
+ } else {
+ // Timestamps looks off, use realtime clock here instead.
+ const int64_t expected_frame_interval_ms = 1000 / *target_framerate_;
+ if (last_frame_time_ms_ != -1 &&
+ now_ms - last_frame_time_ms_ <
+ (85 * expected_frame_interval_ms) / 100) {
+ return Vp8FrameConfig(kNone, kNone, kNone);
+ }
+ }
+ }
+
+ if (stats_.first_frame_time_ms_ == -1)
+ stats_.first_frame_time_ms_ = now_ms;
+
+ // Make sure both frame droppers leak out bits.
+ layers_[0].UpdateDebt(ts_diff / 90);
+ layers_[1].UpdateDebt(ts_diff / 90);
+ last_timestamp_ = timestamp;
+ last_frame_time_ms_ = now_ms;
+
+ TemporalLayerState layer_state = TemporalLayerState::kDrop;
+
+ if (active_layer_ == -1 ||
+ layers_[active_layer_].state != TemporalLayer::State::kDropped) {
+ if (last_emitted_tl0_timestamp_ != -1 &&
+ (unwrapped_timestamp - last_emitted_tl0_timestamp_) / 90 >
+ kMaxFrameIntervalMs) {
+ // Too long time has passed since the last frame was emitted, cancel
+ // enough debt to allow a single frame.
+ layers_[0].debt_bytes_ = max_debt_bytes_ - 1;
+ }
+ if (layers_[0].debt_bytes_ > max_debt_bytes_) {
+ // Must drop TL0, encode TL1 instead.
+ if (layers_[1].debt_bytes_ > max_debt_bytes_) {
+ // Must drop both TL0 and TL1.
+ active_layer_ = -1;
+ } else {
+ active_layer_ = 1;
+ }
+ } else {
+ active_layer_ = 0;
+ }
+ }
+
+ switch (active_layer_) {
+ case 0:
+ layer_state = TemporalLayerState::kTl0;
+ last_emitted_tl0_timestamp_ = unwrapped_timestamp;
+ break;
+ case 1:
+ if (layers_[1].state != TemporalLayer::State::kDropped) {
+ if (TimeToSync(unwrapped_timestamp) ||
+ layers_[1].state == TemporalLayer::State::kKeyFrame) {
+ last_sync_timestamp_ = unwrapped_timestamp;
+ layer_state = TemporalLayerState::kTl1Sync;
+ } else {
+ layer_state = TemporalLayerState::kTl1;
+ }
+ } else {
+ layer_state = last_sync_timestamp_ == unwrapped_timestamp
+ ? TemporalLayerState::kTl1Sync
+ : TemporalLayerState::kTl1;
+ }
+ break;
+ case -1:
+ layer_state = TemporalLayerState::kDrop;
+ ++stats_.num_dropped_frames_;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ DependencyInfo dependency_info;
+ // TODO(pbos): Consider referencing but not updating the 'alt' buffer for all
+ // layers.
+ switch (layer_state) {
+ case TemporalLayerState::kDrop:
+ dependency_info = {"", {kNone, kNone, kNone}};
+ break;
+ case TemporalLayerState::kTl0:
+ // TL0 only references and updates 'last'.
+ dependency_info = {"SS", {kReferenceAndUpdate, kNone, kNone}};
+ dependency_info.frame_config.packetizer_temporal_idx = 0;
+ break;
+ case TemporalLayerState::kTl1:
+ // TL1 references both 'last' and 'golden' but only updates 'golden'.
+ dependency_info = {"-R", {kReference, kReferenceAndUpdate, kNone}};
+ dependency_info.frame_config.packetizer_temporal_idx = 1;
+ break;
+ case TemporalLayerState::kTl1Sync:
+ // Predict from only TL0 to allow participants to switch to the high
+ // bitrate stream. Updates 'golden' so that TL1 can continue to refer to
+ // and update 'golden' from this point on.
+ dependency_info = {"-S", {kReference, kUpdate, kNone}};
+ dependency_info.frame_config.packetizer_temporal_idx = 1;
+ dependency_info.frame_config.layer_sync = true;
+ break;
+ }
+
+ pending_frame_configs_[timestamp] = dependency_info;
+ return dependency_info.frame_config;
+}
+
+void ScreenshareLayers::OnRatesUpdated(
+ size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(framerate_fps, 0);
+ RTC_DCHECK_GE(bitrates_bps.size(), 1);
+ RTC_DCHECK_LE(bitrates_bps.size(), 2);
+
+ // `bitrates_bps` uses individual rates per layer, but we want to use the
+ // accumulated rate here.
+ uint32_t tl0_kbps = bitrates_bps[0] / 1000;
+ uint32_t tl1_kbps = tl0_kbps;
+ if (bitrates_bps.size() > 1) {
+ tl1_kbps += bitrates_bps[1] / 1000;
+ }
+
+ if (!target_framerate_) {
+ // First OnRatesUpdated() is called during construction, with the
+ // configured targets as parameters.
+ target_framerate_ = framerate_fps;
+ capture_framerate_ = target_framerate_;
+ bitrate_updated_ = true;
+ } else {
+ if ((capture_framerate_ &&
+ framerate_fps != static_cast<int>(*capture_framerate_)) ||
+ (tl0_kbps != layers_[0].target_rate_kbps_) ||
+ (tl1_kbps != layers_[1].target_rate_kbps_)) {
+ bitrate_updated_ = true;
+ }
+
+ if (framerate_fps < 0) {
+ capture_framerate_.reset();
+ } else {
+ capture_framerate_ = framerate_fps;
+ }
+ }
+
+ layers_[0].target_rate_kbps_ = tl0_kbps;
+ layers_[1].target_rate_kbps_ = tl1_kbps;
+}
+
+void ScreenshareLayers::OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+
+ if (size_bytes == 0) {
+ RTC_LOG(LS_WARNING) << "Empty frame; treating as dropped.";
+ OnFrameDropped(stream_index, rtp_timestamp);
+ return;
+ }
+
+ absl::optional<DependencyInfo> dependency_info;
+ auto it = pending_frame_configs_.find(rtp_timestamp);
+ if (it != pending_frame_configs_.end()) {
+ dependency_info = it->second;
+ pending_frame_configs_.erase(it);
+
+ if (checker_) {
+ RTC_DCHECK(checker_->CheckTemporalConfig(is_keyframe,
+ dependency_info->frame_config));
+ }
+ }
+
+ CodecSpecificInfoVP8& vp8_info = info->codecSpecific.VP8;
+ GenericFrameInfo& generic_frame_info = info->generic_frame_info.emplace();
+
+ if (number_of_temporal_layers_ == 1) {
+ vp8_info.temporalIdx = kNoTemporalIdx;
+ vp8_info.layerSync = false;
+ generic_frame_info.temporal_id = 0;
+ generic_frame_info.decode_target_indications = {kSwitch};
+ generic_frame_info.encoder_buffers.emplace_back(
+ 0, /*referenced=*/!is_keyframe, /*updated=*/true);
+ } else {
+ int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(rtp_timestamp);
+ if (dependency_info) {
+ vp8_info.temporalIdx =
+ dependency_info->frame_config.packetizer_temporal_idx;
+ vp8_info.layerSync = dependency_info->frame_config.layer_sync;
+ generic_frame_info.temporal_id = vp8_info.temporalIdx;
+ generic_frame_info.decode_target_indications =
+ dependency_info->decode_target_indications;
+ } else {
+ RTC_DCHECK(is_keyframe);
+ }
+
+ if (is_keyframe) {
+ vp8_info.temporalIdx = 0;
+ last_sync_timestamp_ = unwrapped_timestamp;
+ vp8_info.layerSync = true;
+ layers_[0].state = TemporalLayer::State::kKeyFrame;
+ layers_[1].state = TemporalLayer::State::kKeyFrame;
+ active_layer_ = 1;
+ info->template_structure =
+ GetTemplateStructure(number_of_temporal_layers_);
+ generic_frame_info.temporal_id = vp8_info.temporalIdx;
+ generic_frame_info.decode_target_indications = {kSwitch, kSwitch};
+ } else if (active_layer_ >= 0 && layers_[active_layer_].state ==
+ TemporalLayer::State::kKeyFrame) {
+ layers_[active_layer_].state = TemporalLayer::State::kNormal;
+ }
+
+ vp8_info.useExplicitDependencies = true;
+ RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
+ RTC_DCHECK_EQ(vp8_info.updatedBuffersCount, 0u);
+
+ // Note that `frame_config` is not derefernced if `is_keyframe`,
+ // meaning it's never dereferenced if the optional may be unset.
+ for (int i = 0; i < static_cast<int>(Vp8FrameConfig::Buffer::kCount); ++i) {
+ bool references = false;
+ bool updates = is_keyframe;
+ if (!is_keyframe && dependency_info->frame_config.References(
+ static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.referencedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::referencedBuffers));
+ references = true;
+ vp8_info.referencedBuffers[vp8_info.referencedBuffersCount++] = i;
+ }
+
+ if (is_keyframe || dependency_info->frame_config.Updates(
+ static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.updatedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::updatedBuffers));
+ updates = true;
+ vp8_info.updatedBuffers[vp8_info.updatedBuffersCount++] = i;
+ }
+
+ if (references || updates)
+ generic_frame_info.encoder_buffers.emplace_back(i, references, updates);
+ }
+ }
+
+ encode_framerate_.Update(1, rtc::TimeMillis());
+
+ if (number_of_temporal_layers_ == 1)
+ return;
+
+ RTC_DCHECK_NE(-1, active_layer_);
+ if (layers_[active_layer_].state == TemporalLayer::State::kDropped) {
+ layers_[active_layer_].state = TemporalLayer::State::kQualityBoost;
+ }
+
+ if (qp != -1)
+ layers_[active_layer_].last_qp = qp;
+
+ if (active_layer_ == 0) {
+ layers_[0].debt_bytes_ += size_bytes;
+ layers_[1].debt_bytes_ += size_bytes;
+ ++stats_.num_tl0_frames_;
+ stats_.tl0_target_bitrate_sum_ += layers_[0].target_rate_kbps_;
+ stats_.tl0_qp_sum_ += qp;
+ } else if (active_layer_ == 1) {
+ layers_[1].debt_bytes_ += size_bytes;
+ ++stats_.num_tl1_frames_;
+ stats_.tl1_target_bitrate_sum_ += layers_[1].target_rate_kbps_;
+ stats_.tl1_qp_sum_ += qp;
+ }
+}
+
+void ScreenshareLayers::OnFrameDropped(size_t stream_index,
+ uint32_t rtp_timestamp) {
+ layers_[active_layer_].state = TemporalLayer::State::kDropped;
+ ++stats_.num_overshoots_;
+}
+
+void ScreenshareLayers::OnPacketLossRateUpdate(float packet_loss_rate) {}
+
+void ScreenshareLayers::OnRttUpdate(int64_t rtt_ms) {}
+
+void ScreenshareLayers::OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) {}
+
+FrameDependencyStructure ScreenshareLayers::GetTemplateStructure(
+ int num_layers) const {
+ RTC_CHECK_LT(num_layers, 3);
+ RTC_CHECK_GT(num_layers, 0);
+
+ FrameDependencyStructure template_structure;
+ template_structure.num_decode_targets = num_layers;
+
+ switch (num_layers) {
+ case 1: {
+ template_structure.templates.resize(2);
+ template_structure.templates[0].T(0).Dtis("S");
+ template_structure.templates[1].T(0).Dtis("S").FrameDiffs({1});
+ return template_structure;
+ }
+ case 2: {
+ template_structure.templates.resize(3);
+ template_structure.templates[0].T(0).Dtis("SS");
+ template_structure.templates[1].T(0).Dtis("SS").FrameDiffs({1});
+ template_structure.templates[2].T(1).Dtis("-S").FrameDiffs({1});
+ return template_structure;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ // To make the compiler happy!
+ return template_structure;
+ }
+}
+
+bool ScreenshareLayers::TimeToSync(int64_t timestamp) const {
+ RTC_DCHECK_EQ(1, active_layer_);
+ RTC_DCHECK_NE(-1, layers_[0].last_qp);
+ if (layers_[1].last_qp == -1) {
+ // First frame in TL1 should only depend on TL0 since there are no
+ // previous frames in TL1.
+ return true;
+ }
+
+ RTC_DCHECK_NE(-1, last_sync_timestamp_);
+ int64_t timestamp_diff = timestamp - last_sync_timestamp_;
+ if (timestamp_diff > kMaxTimeBetweenSyncs) {
+ // After a certain time, force a sync frame.
+ return true;
+ } else if (timestamp_diff < kMinTimeBetweenSyncs) {
+ // If too soon from previous sync frame, don't issue a new one.
+ return false;
+ }
+ // Issue a sync frame if difference in quality between TL0 and TL1 isn't too
+ // large.
+ if (layers_[0].last_qp - layers_[1].last_qp < kQpDeltaThresholdForSync)
+ return true;
+ return false;
+}
+
+uint32_t ScreenshareLayers::GetCodecTargetBitrateKbps() const {
+ uint32_t target_bitrate_kbps = layers_[0].target_rate_kbps_;
+
+ if (number_of_temporal_layers_ > 1) {
+ // Calculate a codec target bitrate. This may be higher than TL0, gaining
+ // quality at the expense of frame rate at TL0. Constraints:
+ // - TL0 frame rate no less than framerate / kMaxTL0FpsReduction.
+ // - Target rate * kAcceptableTargetOvershoot should not exceed TL1 rate.
+ target_bitrate_kbps =
+ std::min(layers_[0].target_rate_kbps_ * kMaxTL0FpsReduction,
+ layers_[1].target_rate_kbps_ / kAcceptableTargetOvershoot);
+ }
+
+ return std::max(layers_[0].target_rate_kbps_, target_bitrate_kbps);
+}
+
+Vp8EncoderConfig ScreenshareLayers::UpdateConfiguration(size_t stream_index) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK(min_qp_.has_value());
+ RTC_DCHECK(max_qp_.has_value());
+
+ const uint32_t target_bitrate_kbps = GetCodecTargetBitrateKbps();
+
+ // TODO(sprang): We _really_ need to make an overhaul of this class. :(
+ // If we're dropping frames in order to meet a target framerate, adjust the
+ // bitrate assigned to the encoder so the total average bitrate is correct.
+ float encoder_config_bitrate_kbps = target_bitrate_kbps;
+ if (target_framerate_ && capture_framerate_ &&
+ *target_framerate_ < *capture_framerate_) {
+ encoder_config_bitrate_kbps *=
+ static_cast<float>(*capture_framerate_) / *target_framerate_;
+ }
+
+ if (bitrate_updated_ ||
+ encoder_config_.rc_target_bitrate !=
+ absl::make_optional(encoder_config_bitrate_kbps)) {
+ encoder_config_.rc_target_bitrate = encoder_config_bitrate_kbps;
+
+ // Don't reconfigure qp limits during quality boost frames.
+ if (active_layer_ == -1 ||
+ layers_[active_layer_].state != TemporalLayer::State::kQualityBoost) {
+ const int min_qp = min_qp_.value();
+ const int max_qp = max_qp_.value();
+
+ // After a dropped frame, a frame with max qp will be encoded and the
+ // quality will then ramp up from there. To boost the speed of recovery,
+ // encode the next frame with lower max qp, if there is sufficient
+ // bandwidth to do so without causing excessive delay.
+ // TL0 is the most important to improve since the errors in this layer
+ // will propagate to TL1.
+ // Currently, reduce max qp by 20% for TL0 and 15% for TL1.
+ if (layers_[1].target_rate_kbps_ >= kMinBitrateKbpsForQpBoost) {
+ layers_[0].enhanced_max_qp = min_qp + (((max_qp - min_qp) * 80) / 100);
+ layers_[1].enhanced_max_qp = min_qp + (((max_qp - min_qp) * 85) / 100);
+ } else {
+ layers_[0].enhanced_max_qp = -1;
+ layers_[1].enhanced_max_qp = -1;
+ }
+ }
+
+ if (capture_framerate_) {
+ int avg_frame_size =
+ (target_bitrate_kbps * 1000) / (8 * *capture_framerate_);
+ // Allow max debt to be the size of a single optimal frame.
+ // TODO(sprang): Determine if this needs to be adjusted by some factor.
+ // (Lower values may cause more frame drops, higher may lead to queuing
+ // delays.)
+ max_debt_bytes_ = avg_frame_size;
+ }
+
+ bitrate_updated_ = false;
+ }
+
+ // Don't try to update boosts state if not active yet.
+ if (active_layer_ == -1)
+ return encoder_config_;
+
+ if (number_of_temporal_layers_ <= 1)
+ return encoder_config_;
+
+ // If layer is in the quality boost state (following a dropped frame), update
+ // the configuration with the adjusted (lower) qp and set the state back to
+ // normal.
+ unsigned int adjusted_max_qp = max_qp_.value(); // Set the normal max qp.
+ if (layers_[active_layer_].state == TemporalLayer::State::kQualityBoost) {
+ if (layers_[active_layer_].enhanced_max_qp != -1) {
+ // Bitrate is high enough for quality boost, update max qp.
+ adjusted_max_qp = layers_[active_layer_].enhanced_max_qp;
+ }
+ // Regardless of qp, reset the boost state for the next frame.
+ layers_[active_layer_].state = TemporalLayer::State::kNormal;
+ }
+ encoder_config_.rc_max_quantizer = adjusted_max_qp;
+
+ return encoder_config_;
+}
+
+void ScreenshareLayers::TemporalLayer::UpdateDebt(int64_t delta_ms) {
+ uint32_t debt_reduction_bytes = target_rate_kbps_ * delta_ms / 8;
+ if (debt_reduction_bytes >= debt_bytes_) {
+ debt_bytes_ = 0;
+ } else {
+ debt_bytes_ -= debt_reduction_bytes;
+ }
+}
+
+void ScreenshareLayers::UpdateHistograms() {
+ if (stats_.first_frame_time_ms_ == -1)
+ return;
+ int64_t duration_sec =
+ (rtc::TimeMillis() - stats_.first_frame_time_ms_ + 500) / 1000;
+ if (duration_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer0.FrameRate",
+ (stats_.num_tl0_frames_ + (duration_sec / 2)) / duration_sec);
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer1.FrameRate",
+ (stats_.num_tl1_frames_ + (duration_sec / 2)) / duration_sec);
+ int total_frames = stats_.num_tl0_frames_ + stats_.num_tl1_frames_;
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.FramesPerDrop",
+ (stats_.num_dropped_frames_ == 0
+ ? 0
+ : total_frames / stats_.num_dropped_frames_));
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.FramesPerOvershoot",
+ (stats_.num_overshoots_ == 0 ? 0
+ : total_frames / stats_.num_overshoots_));
+ if (stats_.num_tl0_frames_ > 0) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.Screenshare.Layer0.Qp",
+ stats_.tl0_qp_sum_ / stats_.num_tl0_frames_);
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer0.TargetBitrate",
+ stats_.tl0_target_bitrate_sum_ / stats_.num_tl0_frames_);
+ }
+ if (stats_.num_tl1_frames_ > 0) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.Screenshare.Layer1.Qp",
+ stats_.tl1_qp_sum_ / stats_.num_tl1_frames_);
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer1.TargetBitrate",
+ stats_.tl1_target_bitrate_sum_ / stats_.num_tl1_frames_);
+ }
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
new file mode 100644
index 0000000000..39477f12f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
+
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "modules/video_coding/codecs/vp8/include/temporal_layers_checker.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/frame_dropper.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+struct CodecSpecificInfoVP8;
+class Clock;
+
+class ScreenshareLayers final : public Vp8FrameBufferController {
+ public:
+ static const double kMaxTL0FpsReduction;
+ static const double kAcceptableTargetOvershoot;
+ static const int kMaxFrameIntervalMs;
+
+ explicit ScreenshareLayers(int num_temporal_layers);
+ ~ScreenshareLayers() override;
+
+ void SetQpLimits(size_t stream_index, int min_qp, int max_qp) override;
+
+ size_t StreamCount() const override;
+
+ bool SupportsEncoderFrameDropping(size_t stream_index) const override;
+
+ // Returns the recommended VP8 encode flags needed. May refresh the decoder
+ // and/or update the reference buffers.
+ Vp8FrameConfig NextFrameConfig(size_t stream_index,
+ uint32_t rtp_timestamp) override;
+
+ // New target bitrate, per temporal layer.
+ void OnRatesUpdated(size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) override;
+
+ Vp8EncoderConfig UpdateConfiguration(size_t stream_index) override;
+
+ void OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) override;
+
+ void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) override;
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+
+ void OnRttUpdate(int64_t rtt_ms) override;
+
+ void OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) override;
+
+ private:
+ enum class TemporalLayerState : int { kDrop, kTl0, kTl1, kTl1Sync };
+
+ struct DependencyInfo {
+ DependencyInfo() = default;
+ DependencyInfo(absl::string_view indication_symbols,
+ Vp8FrameConfig frame_config)
+ : decode_target_indications(
+ webrtc_impl::StringToDecodeTargetIndications(indication_symbols)),
+ frame_config(frame_config) {}
+
+ absl::InlinedVector<DecodeTargetIndication, 10> decode_target_indications;
+ Vp8FrameConfig frame_config;
+ };
+
+ bool TimeToSync(int64_t timestamp) const;
+ uint32_t GetCodecTargetBitrateKbps() const;
+
+ const int number_of_temporal_layers_;
+
+ // TODO(eladalon/sprang): These should be made into const-int set in the ctor.
+ absl::optional<int> min_qp_;
+ absl::optional<int> max_qp_;
+
+ int active_layer_;
+ int64_t last_timestamp_;
+ int64_t last_sync_timestamp_;
+ int64_t last_emitted_tl0_timestamp_;
+ int64_t last_frame_time_ms_;
+ rtc::TimestampWrapAroundHandler time_wrap_handler_;
+ uint32_t max_debt_bytes_;
+
+ std::map<uint32_t, DependencyInfo> pending_frame_configs_;
+
+ // Configured max framerate.
+ absl::optional<uint32_t> target_framerate_;
+ // Incoming framerate from capturer.
+ absl::optional<uint32_t> capture_framerate_;
+
+ // Tracks what framerate we actually encode, and drops frames on overshoot.
+ RateStatistics encode_framerate_;
+ bool bitrate_updated_;
+
+ static constexpr int kMaxNumTemporalLayers = 2;
+ struct TemporalLayer {
+ TemporalLayer()
+ : state(State::kNormal),
+ enhanced_max_qp(-1),
+ last_qp(-1),
+ debt_bytes_(0),
+ target_rate_kbps_(0) {}
+
+ enum class State {
+ kNormal,
+ kDropped,
+ kReencoded,
+ kQualityBoost,
+ kKeyFrame
+ } state;
+
+ int enhanced_max_qp;
+ int last_qp;
+ uint32_t debt_bytes_;
+ uint32_t target_rate_kbps_;
+
+ void UpdateDebt(int64_t delta_ms);
+ } layers_[kMaxNumTemporalLayers];
+
+ void UpdateHistograms();
+ FrameDependencyStructure GetTemplateStructure(int num_layers) const;
+
+ // Data for histogram statistics.
+ struct Stats {
+ int64_t first_frame_time_ms_ = -1;
+ int64_t num_tl0_frames_ = 0;
+ int64_t num_tl1_frames_ = 0;
+ int64_t num_dropped_frames_ = 0;
+ int64_t num_overshoots_ = 0;
+ int64_t tl0_qp_sum_ = 0;
+ int64_t tl1_qp_sum_ = 0;
+ int64_t tl0_target_bitrate_sum_ = 0;
+ int64_t tl1_target_bitrate_sum_ = 0;
+ } stats_;
+
+ Vp8EncoderConfig encoder_config_;
+
+ // Optional utility used to verify reference validity.
+ std::unique_ptr<TemporalLayersChecker> checker_;
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
new file mode 100644
index 0000000000..e5b3bd4fdf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -0,0 +1,788 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/vp8_frame_config.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fake_clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "vpx/vp8cx.h"
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::NiceMock;
+
+namespace webrtc {
+namespace {
+// 5 frames per second at 90 kHz.
+const uint32_t kTimestampDelta5Fps = 90000 / 5;
+const int kDefaultQp = 54;
+const int kDefaultTl0BitrateKbps = 200;
+const int kDefaultTl1BitrateKbps = 2000;
+const int kFrameRate = 5;
+const int kSyncPeriodSeconds = 2;
+const int kMaxSyncPeriodSeconds = 4;
+
+// Expected flags for corresponding temporal layers.
+const int kTl0Flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+const int kTl1Flags =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
+const int kTl1SyncFlags = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
+const std::vector<uint32_t> kDefault2TlBitratesBps = {
+ kDefaultTl0BitrateKbps * 1000,
+ (kDefaultTl1BitrateKbps - kDefaultTl0BitrateKbps) * 1000};
+
+} // namespace
+
+class ScreenshareLayerTest : public ::testing::Test {
+ protected:
+ ScreenshareLayerTest()
+ : min_qp_(2),
+ max_qp_(kDefaultQp),
+ frame_size_(-1),
+ timestamp_(90),
+ config_updated_(false) {}
+ virtual ~ScreenshareLayerTest() {}
+
+ void SetUp() override {
+ layers_.reset(new ScreenshareLayers(2));
+ cfg_ = ConfigureBitrates();
+ }
+
+ int EncodeFrame(bool base_sync, CodecSpecificInfo* info = nullptr) {
+ CodecSpecificInfo ignored_info;
+ if (!info) {
+ info = &ignored_info;
+ }
+
+ int flags = ConfigureFrame(base_sync);
+ if (flags != -1)
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, base_sync, kDefaultQp,
+ info);
+ return flags;
+ }
+
+ int ConfigureFrame(bool key_frame) {
+ tl_config_ = NextFrameConfig(0, timestamp_);
+ EXPECT_EQ(0, tl_config_.encoder_layer_id)
+ << "ScreenshareLayers always encodes using the bitrate allocator for "
+ "layer 0, but may reference different buffers and packetize "
+ "differently.";
+ if (tl_config_.drop_frame) {
+ return -1;
+ }
+ const uint32_t prev_rc_target_bitrate = cfg_.rc_target_bitrate.value_or(-1);
+ const uint32_t prev_rc_max_quantizer = cfg_.rc_max_quantizer.value_or(-1);
+
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ config_updated_ =
+ cfg_.temporal_layer_config.has_value() ||
+ (cfg_.rc_target_bitrate.has_value() &&
+ cfg_.rc_target_bitrate.value() != prev_rc_target_bitrate) ||
+ (cfg_.rc_max_quantizer.has_value() &&
+ cfg_.rc_max_quantizer.value() != prev_rc_max_quantizer) ||
+ cfg_.g_error_resilient.has_value();
+
+ int flags = LibvpxVp8Encoder::EncodeFlags(tl_config_);
+ EXPECT_NE(-1, frame_size_);
+ return flags;
+ }
+
+ Vp8FrameConfig NextFrameConfig(size_t stream_index, uint32_t timestamp) {
+ int64_t timestamp_ms = timestamp / 90;
+ clock_.AdvanceTime(TimeDelta::Millis(timestamp_ms - rtc::TimeMillis()));
+ return layers_->NextFrameConfig(stream_index, timestamp);
+ }
+
+ int FrameSizeForBitrate(int bitrate_kbps) {
+ return ((bitrate_kbps * 1000) / 8) / kFrameRate;
+ }
+
+ Vp8EncoderConfig ConfigureBitrates() {
+ layers_->SetQpLimits(0, min_qp_, max_qp_);
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, kFrameRate);
+ const Vp8EncoderConfig vp8_cfg = layers_->UpdateConfiguration(0);
+ EXPECT_TRUE(vp8_cfg.rc_target_bitrate.has_value());
+ frame_size_ = FrameSizeForBitrate(vp8_cfg.rc_target_bitrate.value());
+ return vp8_cfg;
+ }
+
+ void WithQpLimits(int min_qp, int max_qp) {
+ min_qp_ = min_qp;
+ max_qp_ = max_qp;
+ }
+
+ // Runs a few initial frames and makes sure we have seen frames on both
+ // temporal layers, including sync and non-sync frames.
+ bool RunGracePeriod() {
+ bool got_tl0 = false;
+ bool got_tl1 = false;
+ bool got_tl1_sync = false;
+ for (int i = 0; i < 10; ++i) {
+ CodecSpecificInfo info;
+ EXPECT_NE(-1, EncodeFrame(false, &info));
+ timestamp_ += kTimestampDelta5Fps;
+ if (info.codecSpecific.VP8.temporalIdx == 0) {
+ got_tl0 = true;
+ } else if (info.codecSpecific.VP8.layerSync) {
+ got_tl1_sync = true;
+ } else {
+ got_tl1 = true;
+ }
+ if (got_tl0 && got_tl1 && got_tl1_sync)
+ return true;
+ }
+ return false;
+ }
+
+ // Adds frames until we get one in the specified temporal layer. The last
+ // FrameEncoded() call will be omitted and needs to be done by the caller.
+ // Returns the flags for the last frame.
+ int SkipUntilTl(int layer) {
+ return SkipUntilTlAndSync(layer, absl::nullopt);
+ }
+
+ // Same as SkipUntilTl, but also waits until the sync bit condition is met.
+ int SkipUntilTlAndSync(int layer, absl::optional<bool> sync) {
+ int flags = 0;
+ const int kMaxFramesToSkip =
+ 1 + (sync.value_or(false) ? kMaxSyncPeriodSeconds : 1) * kFrameRate;
+ for (int i = 0; i < kMaxFramesToSkip; ++i) {
+ flags = ConfigureFrame(false);
+ if (tl_config_.packetizer_temporal_idx != layer ||
+ (sync && *sync != tl_config_.layer_sync)) {
+ if (flags != -1) {
+ // If flags do not request a frame drop, report some default values
+ // for frame size etc.
+ CodecSpecificInfo info;
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &info);
+ }
+ timestamp_ += kTimestampDelta5Fps;
+ } else {
+ // Found frame from sought after layer.
+ return flags;
+ }
+ }
+ ADD_FAILURE() << "Did not get a frame of TL" << layer << " in time.";
+ return -1;
+ }
+
+ int min_qp_;
+ uint32_t max_qp_;
+ int frame_size_;
+ rtc::ScopedFakeClock clock_;
+ std::unique_ptr<ScreenshareLayers> layers_;
+
+ uint32_t timestamp_;
+ Vp8FrameConfig tl_config_;
+ Vp8EncoderConfig cfg_;
+ bool config_updated_;
+
+ CodecSpecificInfo* IgnoredCodecSpecificInfo() {
+ ignored_codec_specific_info_ = std::make_unique<CodecSpecificInfo>();
+ return ignored_codec_specific_info_.get();
+ }
+
+ private:
+ std::unique_ptr<CodecSpecificInfo> ignored_codec_specific_info_;
+};
+
+TEST_F(ScreenshareLayerTest, 1Layer) {
+ layers_.reset(new ScreenshareLayers(1));
+ ConfigureBitrates();
+ // One layer screenshare should not use the frame dropper as all frames will
+ // belong to the base layer.
+ const int kSingleLayerFlags = 0;
+ auto info = std::make_unique<CodecSpecificInfo>();
+ int flags = EncodeFrame(/*base_sync=*/false, info.get());
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx),
+ info->codecSpecific.VP8.temporalIdx);
+ EXPECT_FALSE(info->codecSpecific.VP8.layerSync);
+ EXPECT_EQ(info->generic_frame_info->temporal_id, 0);
+
+ info = std::make_unique<CodecSpecificInfo>();
+ flags = EncodeFrame(/*base_sync=*/false, info.get());
+ EXPECT_EQ(kSingleLayerFlags, flags);
+ EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx),
+ info->codecSpecific.VP8.temporalIdx);
+ EXPECT_FALSE(info->codecSpecific.VP8.layerSync);
+ EXPECT_EQ(info->generic_frame_info->temporal_id, 0);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersPeriodicSync) {
+ std::vector<int> sync_times;
+ const int kNumFrames = kSyncPeriodSeconds * kFrameRate * 2 - 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ CodecSpecificInfo info;
+ EncodeFrame(false, &info);
+ timestamp_ += kTimestampDelta5Fps;
+ if (info.codecSpecific.VP8.temporalIdx == 1 &&
+ info.codecSpecific.VP8.layerSync) {
+ sync_times.push_back(timestamp_);
+ }
+ }
+
+ ASSERT_EQ(2u, sync_times.size());
+ EXPECT_GE(sync_times[1] - sync_times[0], 90000 * kSyncPeriodSeconds);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersSyncAfterTimeout) {
+ std::vector<int> sync_times;
+ const int kNumFrames = kMaxSyncPeriodSeconds * kFrameRate * 2 - 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ CodecSpecificInfo info;
+
+ tl_config_ = NextFrameConfig(0, timestamp_);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ // Simulate TL1 being at least 8 qp steps better.
+ if (tl_config_.packetizer_temporal_idx == 0) {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &info);
+ } else {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp - 8,
+ &info);
+ }
+
+ if (info.codecSpecific.VP8.temporalIdx == 1 &&
+ info.codecSpecific.VP8.layerSync)
+ sync_times.push_back(timestamp_);
+
+ timestamp_ += kTimestampDelta5Fps;
+ }
+
+ ASSERT_EQ(2u, sync_times.size());
+ EXPECT_GE(sync_times[1] - sync_times[0], 90000 * kMaxSyncPeriodSeconds);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersSyncAfterSimilarQP) {
+ std::vector<int> sync_times;
+
+ const int kNumFrames = (kSyncPeriodSeconds +
+ ((kMaxSyncPeriodSeconds - kSyncPeriodSeconds) / 2)) *
+ kFrameRate;
+ for (int i = 0; i < kNumFrames; ++i) {
+ CodecSpecificInfo info;
+
+ ConfigureFrame(false);
+
+ // Simulate TL1 being at least 8 qp steps better.
+ if (tl_config_.packetizer_temporal_idx == 0) {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &info);
+ } else {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp - 8,
+ &info);
+ }
+
+ if (info.codecSpecific.VP8.temporalIdx == 1 &&
+ info.codecSpecific.VP8.layerSync)
+ sync_times.push_back(timestamp_);
+
+ timestamp_ += kTimestampDelta5Fps;
+ }
+
+ ASSERT_EQ(1u, sync_times.size());
+
+ bool bumped_tl0_quality = false;
+ for (int i = 0; i < 3; ++i) {
+ CodecSpecificInfo info;
+
+ int flags = ConfigureFrame(false);
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp - 8,
+ &info);
+ if (info.codecSpecific.VP8.temporalIdx == 0) {
+ // Bump TL0 to same quality as TL1.
+ bumped_tl0_quality = true;
+ } else {
+ if (bumped_tl0_quality) {
+ EXPECT_TRUE(info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(kTl1SyncFlags, flags);
+ return;
+ }
+ }
+ timestamp_ += kTimestampDelta5Fps;
+ }
+ ADD_FAILURE() << "No TL1 frame arrived within time limit.";
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersToggling) {
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Insert 50 frames. 2/5 should be TL0.
+ int tl0_frames = 0;
+ int tl1_frames = 0;
+ for (int i = 0; i < 50; ++i) {
+ CodecSpecificInfo info;
+ EncodeFrame(/*base_sync=*/false, &info);
+ EXPECT_EQ(info.codecSpecific.VP8.temporalIdx,
+ info.generic_frame_info->temporal_id);
+ timestamp_ += kTimestampDelta5Fps;
+ switch (info.codecSpecific.VP8.temporalIdx) {
+ case 0:
+ ++tl0_frames;
+ break;
+ case 1:
+ ++tl1_frames;
+ break;
+ default:
+ abort();
+ }
+ }
+ EXPECT_EQ(20, tl0_frames);
+ EXPECT_EQ(30, tl1_frames);
+}
+
+TEST_F(ScreenshareLayerTest, AllFitsLayer0) {
+ frame_size_ = FrameSizeForBitrate(kDefaultTl0BitrateKbps);
+
+ // Insert 50 frames, small enough that all fits in TL0.
+ for (int i = 0; i < 50; ++i) {
+ CodecSpecificInfo info;
+ int flags = EncodeFrame(false, &info);
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_EQ(kTl0Flags, flags);
+ EXPECT_EQ(0, info.codecSpecific.VP8.temporalIdx);
+ }
+}
+
+TEST_F(ScreenshareLayerTest, TooHighBitrate) {
+ frame_size_ = 2 * FrameSizeForBitrate(kDefaultTl1BitrateKbps);
+
+ // Insert 100 frames. Half should be dropped.
+ int tl0_frames = 0;
+ int tl1_frames = 0;
+ int dropped_frames = 0;
+ for (int i = 0; i < 100; ++i) {
+ CodecSpecificInfo info;
+ int flags = EncodeFrame(false, &info);
+ timestamp_ += kTimestampDelta5Fps;
+ if (flags == -1) {
+ ++dropped_frames;
+ } else {
+ switch (info.codecSpecific.VP8.temporalIdx) {
+ case 0:
+ ++tl0_frames;
+ break;
+ case 1:
+ ++tl1_frames;
+ break;
+ default:
+ ADD_FAILURE() << "Unexpected temporal id";
+ }
+ }
+ }
+
+ EXPECT_NEAR(50, tl0_frames + tl1_frames, 1);
+ EXPECT_NEAR(50, dropped_frames, 1);
+}
+
+TEST_F(ScreenshareLayerTest, TargetBitrateCappedByTL0) {
+ const int kTl0_kbps = 100;
+ const int kTl1_kbps = 1000;
+ const std::vector<uint32_t> layer_rates = {kTl0_kbps * 1000,
+ (kTl1_kbps - kTl0_kbps) * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(static_cast<unsigned int>(
+ ScreenshareLayers::kMaxTL0FpsReduction * kTl0_kbps + 0.5),
+ cfg_.rc_target_bitrate);
+}
+
+TEST_F(ScreenshareLayerTest, TargetBitrateCappedByTL1) {
+ const int kTl0_kbps = 100;
+ const int kTl1_kbps = 450;
+ const std::vector<uint32_t> layer_rates = {kTl0_kbps * 1000,
+ (kTl1_kbps - kTl0_kbps) * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(static_cast<unsigned int>(
+ kTl1_kbps / ScreenshareLayers::kAcceptableTargetOvershoot),
+ cfg_.rc_target_bitrate);
+}
+
+TEST_F(ScreenshareLayerTest, TargetBitrateBelowTL0) {
+ const int kTl0_kbps = 100;
+ const std::vector<uint32_t> layer_rates = {kTl0_kbps * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(static_cast<uint32_t>(kTl0_kbps), cfg_.rc_target_bitrate);
+}
+
+TEST_F(ScreenshareLayerTest, EncoderDrop) {
+ EXPECT_TRUE(RunGracePeriod());
+ SkipUntilTl(0);
+
+ // Size 0 indicates dropped frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, 0, IgnoredCodecSpecificInfo());
+
+ // Re-encode frame (so don't advance timestamp).
+ int flags = EncodeFrame(false);
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_FALSE(config_updated_);
+ EXPECT_EQ(kTl0Flags, flags);
+
+ // Next frame should have boosted quality...
+ SkipUntilTl(0);
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+
+ // ...then back to standard setup.
+ SkipUntilTl(0);
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_EQ(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+
+ // Next drop in TL1.
+ SkipUntilTl(1);
+ layers_->OnEncodeDone(0, timestamp_, 0, false, 0, IgnoredCodecSpecificInfo());
+
+ // Re-encode frame (so don't advance timestamp).
+ flags = EncodeFrame(false);
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_FALSE(config_updated_);
+ EXPECT_EQ(kTl1Flags, flags);
+
+ // Next frame should have boosted QP.
+ SkipUntilTl(1);
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+
+ // ...and back to normal.
+ SkipUntilTl(1);
+ EXPECT_EQ(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+}
+
+TEST_F(ScreenshareLayerTest, RespectsMaxIntervalBetweenFrames) {
+ const int kLowBitrateKbps = 50;
+ const int kLargeFrameSizeBytes = 100000;
+ const uint32_t kStartTimestamp = 1234;
+
+ const std::vector<uint32_t> layer_rates = {kLowBitrateKbps * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(kTl0Flags,
+ LibvpxVp8Encoder::EncodeFlags(NextFrameConfig(0, kStartTimestamp)));
+ layers_->OnEncodeDone(0, kStartTimestamp, kLargeFrameSizeBytes, false,
+ kDefaultQp, IgnoredCodecSpecificInfo());
+
+ const uint32_t kTwoSecondsLater =
+ kStartTimestamp + (ScreenshareLayers::kMaxFrameIntervalMs * 90);
+
+ // Sanity check, repayment time should exceed kMaxFrameIntervalMs.
+ ASSERT_GT(kStartTimestamp + 90 * (kLargeFrameSizeBytes * 8) / kLowBitrateKbps,
+ kStartTimestamp + (ScreenshareLayers::kMaxFrameIntervalMs * 90));
+
+ // Expect drop one frame interval before the two second timeout. If we try
+ // any later, the frame will be dropped anyway by the frame rate throttling
+ // logic.
+ EXPECT_TRUE(
+ NextFrameConfig(0, kTwoSecondsLater - kTimestampDelta5Fps).drop_frame);
+
+ // More than two seconds has passed since last frame, one should be emitted
+ // even if bitrate target is then exceeded.
+ EXPECT_EQ(kTl0Flags, LibvpxVp8Encoder::EncodeFlags(
+ NextFrameConfig(0, kTwoSecondsLater + 90)));
+}
+
+TEST_F(ScreenshareLayerTest, UpdatesHistograms) {
+ metrics::Reset();
+ bool trigger_drop = false;
+ bool dropped_frame = false;
+ bool overshoot = false;
+ const int kTl0Qp = 35;
+ const int kTl1Qp = 30;
+ for (int64_t timestamp = 0;
+ timestamp < kTimestampDelta5Fps * 5 * metrics::kMinRunTimeInSeconds;
+ timestamp += kTimestampDelta5Fps) {
+ tl_config_ = NextFrameConfig(0, timestamp);
+ if (tl_config_.drop_frame) {
+ dropped_frame = true;
+ continue;
+ }
+ int flags = LibvpxVp8Encoder::EncodeFlags(tl_config_);
+ if (flags != -1)
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ if (timestamp >= kTimestampDelta5Fps * 5 && !overshoot && flags != -1) {
+ // Simulate one overshoot.
+ layers_->OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+ overshoot = true;
+ }
+
+ if (flags == kTl0Flags) {
+ if (timestamp >= kTimestampDelta5Fps * 20 && !trigger_drop) {
+ // Simulate a too large frame, to cause frame drop.
+ layers_->OnEncodeDone(0, timestamp, frame_size_ * 10, false, kTl0Qp,
+ IgnoredCodecSpecificInfo());
+ trigger_drop = true;
+ } else {
+ layers_->OnEncodeDone(0, timestamp, frame_size_, false, kTl0Qp,
+ IgnoredCodecSpecificInfo());
+ }
+ } else if (flags == kTl1Flags || flags == kTl1SyncFlags) {
+ layers_->OnEncodeDone(0, timestamp, frame_size_, false, kTl1Qp,
+ IgnoredCodecSpecificInfo());
+ } else if (flags == -1) {
+ dropped_frame = true;
+ } else {
+ RTC_DCHECK_NOTREACHED() << "Unexpected flags";
+ }
+ clock_.AdvanceTime(TimeDelta::Millis(1000 / 5));
+ }
+
+ EXPECT_TRUE(overshoot);
+ EXPECT_TRUE(dropped_frame);
+
+ layers_.reset(); // Histograms are reported on destruction.
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer0.FrameRate"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer1.FrameRate"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.FramesPerDrop"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.FramesPerOvershoot"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.Layer0.Qp"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.Layer1.Qp"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer0.TargetBitrate"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer1.TargetBitrate"));
+
+ EXPECT_METRIC_GT(
+ metrics::MinSample("WebRTC.Video.Screenshare.Layer0.FrameRate"), 1);
+ EXPECT_METRIC_GT(
+ metrics::MinSample("WebRTC.Video.Screenshare.Layer1.FrameRate"), 1);
+ EXPECT_METRIC_GT(metrics::MinSample("WebRTC.Video.Screenshare.FramesPerDrop"),
+ 1);
+ EXPECT_METRIC_GT(
+ metrics::MinSample("WebRTC.Video.Screenshare.FramesPerOvershoot"), 1);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer0.Qp", kTl0Qp));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer1.Qp", kTl1Qp));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer0.TargetBitrate",
+ kDefaultTl0BitrateKbps));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer1.TargetBitrate",
+ kDefaultTl1BitrateKbps));
+}
+
+TEST_F(ScreenshareLayerTest, RespectsConfiguredFramerate) {
+ int64_t kTestSpanMs = 2000;
+ int64_t kFrameIntervalsMs = 1000 / kFrameRate;
+
+ uint32_t timestamp = 1234;
+ int num_input_frames = 0;
+ int num_discarded_frames = 0;
+
+ // Send at regular rate - no drops expected.
+ for (int64_t i = 0; i < kTestSpanMs; i += kFrameIntervalsMs) {
+ if (NextFrameConfig(0, timestamp).drop_frame) {
+ ++num_discarded_frames;
+ } else {
+ size_t frame_size_bytes = kDefaultTl0BitrateKbps * kFrameIntervalsMs / 8;
+ layers_->OnEncodeDone(0, timestamp, frame_size_bytes, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+ timestamp += kFrameIntervalsMs * 90;
+ clock_.AdvanceTime(TimeDelta::Millis(kFrameIntervalsMs));
+
+ ++num_input_frames;
+ }
+ EXPECT_EQ(0, num_discarded_frames);
+
+ // Send at twice the configured rate - drop every other frame.
+ num_input_frames = 0;
+ num_discarded_frames = 0;
+ for (int64_t i = 0; i < kTestSpanMs; i += kFrameIntervalsMs / 2) {
+ if (NextFrameConfig(0, timestamp).drop_frame) {
+ ++num_discarded_frames;
+ } else {
+ size_t frame_size_bytes = kDefaultTl0BitrateKbps * kFrameIntervalsMs / 8;
+ layers_->OnEncodeDone(0, timestamp, frame_size_bytes, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+ timestamp += kFrameIntervalsMs * 90 / 2;
+ clock_.AdvanceTime(TimeDelta::Millis(kFrameIntervalsMs));
+ ++num_input_frames;
+ }
+
+ // Allow for some rounding errors in the measurements.
+ EXPECT_NEAR(num_discarded_frames, num_input_frames / 2, 2);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersSyncAtOvershootDrop) {
+ // Run grace period so we have existing frames in both TL0 and Tl1.
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Move ahead until we have a sync frame in TL1.
+ EXPECT_EQ(kTl1SyncFlags, SkipUntilTlAndSync(1, true));
+ ASSERT_TRUE(tl_config_.layer_sync);
+
+ // Simulate overshoot of this frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, 0, nullptr);
+
+ cfg_ = layers_->UpdateConfiguration(0);
+ EXPECT_EQ(kTl1SyncFlags, LibvpxVp8Encoder::EncodeFlags(tl_config_));
+
+ CodecSpecificInfo new_info;
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &new_info);
+ EXPECT_TRUE(new_info.codecSpecific.VP8.layerSync);
+}
+
+TEST_F(ScreenshareLayerTest, DropOnTooShortFrameInterval) {
+ // Run grace period so we have existing frames in both TL0 and Tl1.
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Add a large gap, so there's plenty of room in the rate tracker.
+ timestamp_ += kTimestampDelta5Fps * 3;
+ EXPECT_FALSE(NextFrameConfig(0, timestamp_).drop_frame);
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Frame interval below 90% if desired time is not allowed, try inserting
+ // frame just before this limit.
+ const int64_t kMinFrameInterval = (kTimestampDelta5Fps * 85) / 100;
+ timestamp_ += kMinFrameInterval - 90;
+ EXPECT_TRUE(NextFrameConfig(0, timestamp_).drop_frame);
+
+ // Try again at the limit, now it should pass.
+ timestamp_ += 90;
+ EXPECT_FALSE(NextFrameConfig(0, timestamp_).drop_frame);
+}
+
+TEST_F(ScreenshareLayerTest, AdjustsBitrateWhenDroppingFrames) {
+ const uint32_t kTimestampDelta10Fps = kTimestampDelta5Fps / 2;
+ const int kNumFrames = 30;
+ ASSERT_TRUE(cfg_.rc_target_bitrate.has_value());
+ const uint32_t default_bitrate = cfg_.rc_target_bitrate.value();
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, 10);
+
+ int num_dropped_frames = 0;
+ for (int i = 0; i < kNumFrames; ++i) {
+ if (EncodeFrame(false) == -1)
+ ++num_dropped_frames;
+ timestamp_ += kTimestampDelta10Fps;
+ }
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(num_dropped_frames, kNumFrames / 2);
+ EXPECT_EQ(cfg_.rc_target_bitrate, default_bitrate * 2);
+}
+
+TEST_F(ScreenshareLayerTest, UpdatesConfigurationAfterRateChange) {
+ // Set inital rate again, no need to update configuration.
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ // Rate changed, now update config.
+ std::vector<uint32_t> bitrates = kDefault2TlBitratesBps;
+ bitrates[1] -= 100000;
+ layers_->OnRatesUpdated(0, bitrates, 5);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ // Changed rate, but then set changed rate again before trying to update
+ // configuration, update should still apply.
+ bitrates[1] -= 100000;
+ layers_->OnRatesUpdated(0, bitrates, 5);
+ layers_->OnRatesUpdated(0, bitrates, 5);
+ cfg_ = layers_->UpdateConfiguration(0);
+}
+
+TEST_F(ScreenshareLayerTest, MaxQpRestoredAfterDoubleDrop) {
+ // Run grace period so we have existing frames in both TL0 and Tl1.
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Move ahead until we have a sync frame in TL1.
+ EXPECT_EQ(kTl1SyncFlags, SkipUntilTlAndSync(1, true));
+ ASSERT_TRUE(tl_config_.layer_sync);
+
+ // Simulate overshoot of this frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, -1, nullptr);
+
+ // Simulate re-encoded frame.
+ layers_->OnEncodeDone(0, timestamp_, 1, false, max_qp_,
+ IgnoredCodecSpecificInfo());
+
+ // Next frame, expect boosted quality.
+ // Slightly alter bitrate between each frame.
+ std::vector<uint32_t> kDefault2TlBitratesBpsAlt = kDefault2TlBitratesBps;
+ kDefault2TlBitratesBpsAlt[1] += 4000;
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBpsAlt, kFrameRate);
+ EXPECT_EQ(kTl1Flags, SkipUntilTlAndSync(1, false));
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, max_qp_);
+ ASSERT_TRUE(cfg_.rc_max_quantizer.has_value());
+ const uint32_t adjusted_qp = cfg_.rc_max_quantizer.value();
+
+ // Simulate overshoot of this frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, -1, nullptr);
+
+ // Simulate re-encoded frame.
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, max_qp_,
+ IgnoredCodecSpecificInfo());
+
+ // A third frame, expect boosted quality.
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, kFrameRate);
+ EXPECT_EQ(kTl1Flags, SkipUntilTlAndSync(1, false));
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, max_qp_);
+ EXPECT_EQ(adjusted_qp, cfg_.rc_max_quantizer);
+
+ // Frame encoded.
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, max_qp_,
+ IgnoredCodecSpecificInfo());
+
+ // A fourth frame, max qp should be restored.
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBpsAlt, kFrameRate);
+ EXPECT_EQ(kTl1Flags, SkipUntilTlAndSync(1, false));
+ EXPECT_EQ(cfg_.rc_max_quantizer, max_qp_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h
new file mode 100644
index 0000000000..9576fb27be
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_TEMPORAL_LAYERS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_TEMPORAL_LAYERS_H_
+
+// TODO(webrtc:9012) Remove this file when downstream projects have updated.
+#include "api/video_codecs/vp8_temporal_layers.h"
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_TEMPORAL_LAYERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc
new file mode 100644
index 0000000000..5aebd2c526
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/include/temporal_layers_checker.h"
+
+#include <memory>
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+std::unique_ptr<TemporalLayersChecker>
+TemporalLayersChecker::CreateTemporalLayersChecker(Vp8TemporalLayersType type,
+ int num_temporal_layers) {
+ switch (type) {
+ case Vp8TemporalLayersType::kFixedPattern:
+ return std::make_unique<DefaultTemporalLayersChecker>(
+ num_temporal_layers);
+ case Vp8TemporalLayersType::kBitrateDynamic:
+ // Conference mode temporal layering for screen content in base stream.
+ return std::make_unique<TemporalLayersChecker>(num_temporal_layers);
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+TemporalLayersChecker::TemporalLayersChecker(int num_temporal_layers)
+ : num_temporal_layers_(num_temporal_layers),
+ sequence_number_(0),
+ last_sync_sequence_number_(0),
+ last_tl0_sequence_number_(0) {}
+
+bool TemporalLayersChecker::CheckAndUpdateBufferState(
+ BufferState* state,
+ bool* need_sync,
+ bool frame_is_keyframe,
+ uint8_t temporal_layer,
+ Vp8FrameConfig::BufferFlags flags,
+ uint32_t sequence_number,
+ uint32_t* lowest_sequence_referenced) {
+ if (flags & Vp8FrameConfig::BufferFlags::kReference) {
+ if (state->temporal_layer > 0 && !state->is_keyframe) {
+ *need_sync = false;
+ }
+ if (!state->is_keyframe && !frame_is_keyframe &&
+ state->sequence_number < *lowest_sequence_referenced) {
+ *lowest_sequence_referenced = state->sequence_number;
+ }
+ if (!frame_is_keyframe && !state->is_keyframe &&
+ state->temporal_layer > temporal_layer) {
+ RTC_LOG(LS_ERROR) << "Frame is referencing higher temporal layer.";
+ return false;
+ }
+ }
+ if ((flags & Vp8FrameConfig::BufferFlags::kUpdate)) {
+ state->temporal_layer = temporal_layer;
+ state->sequence_number = sequence_number;
+ state->is_keyframe = frame_is_keyframe;
+ }
+ if (frame_is_keyframe)
+ state->is_keyframe = true;
+ return true;
+}
+
+bool TemporalLayersChecker::CheckTemporalConfig(
+ bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config) {
+ if (frame_config.drop_frame ||
+ frame_config.packetizer_temporal_idx == kNoTemporalIdx) {
+ return true;
+ }
+ ++sequence_number_;
+ if (frame_config.packetizer_temporal_idx >= num_temporal_layers_ ||
+ (frame_config.packetizer_temporal_idx == kNoTemporalIdx &&
+ num_temporal_layers_ > 1)) {
+ RTC_LOG(LS_ERROR) << "Incorrect temporal layer set for frame: "
+ << frame_config.packetizer_temporal_idx
+ << " num_temporal_layers: " << num_temporal_layers_;
+ return false;
+ }
+
+ uint32_t lowest_sequence_referenced = sequence_number_;
+ bool need_sync = frame_config.packetizer_temporal_idx > 0 &&
+ frame_config.packetizer_temporal_idx != kNoTemporalIdx;
+
+ if (!CheckAndUpdateBufferState(
+ &last_, &need_sync, frame_is_keyframe,
+ frame_config.packetizer_temporal_idx, frame_config.last_buffer_flags,
+ sequence_number_, &lowest_sequence_referenced)) {
+ RTC_LOG(LS_ERROR) << "Error in the Last buffer";
+ return false;
+ }
+ if (!CheckAndUpdateBufferState(&golden_, &need_sync, frame_is_keyframe,
+ frame_config.packetizer_temporal_idx,
+ frame_config.golden_buffer_flags,
+ sequence_number_,
+ &lowest_sequence_referenced)) {
+ RTC_LOG(LS_ERROR) << "Error in the Golden buffer";
+ return false;
+ }
+ if (!CheckAndUpdateBufferState(
+ &arf_, &need_sync, frame_is_keyframe,
+ frame_config.packetizer_temporal_idx, frame_config.arf_buffer_flags,
+ sequence_number_, &lowest_sequence_referenced)) {
+ RTC_LOG(LS_ERROR) << "Error in the Arf buffer";
+ return false;
+ }
+
+ if (lowest_sequence_referenced < last_sync_sequence_number_ &&
+ !frame_is_keyframe) {
+ RTC_LOG(LS_ERROR) << "Reference past the last sync frame. Referenced "
+ << lowest_sequence_referenced << ", but sync was at "
+ << last_sync_sequence_number_;
+ return false;
+ }
+
+ if (frame_config.packetizer_temporal_idx == 0) {
+ last_tl0_sequence_number_ = sequence_number_;
+ }
+
+ if (frame_is_keyframe) {
+ last_sync_sequence_number_ = sequence_number_;
+ }
+
+ if (need_sync) {
+ last_sync_sequence_number_ = last_tl0_sequence_number_;
+ }
+
+ // Ignore sync flag on key-frames as it really doesn't matter.
+ if (need_sync != frame_config.layer_sync && !frame_is_keyframe) {
+ RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
+ << need_sync << " Actual: " << frame_config.layer_sync;
+ return false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
new file mode 100644
index 0000000000..8cf761742e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -0,0 +1,913 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "common_video/test/utilities.h"
+#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "rtc_base/time_utils.h"
+#include "test/field_trial.h"
+#include "test/mappable_native_buffer.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Return;
+using EncoderInfo = webrtc::VideoEncoder::EncoderInfo;
+using FramerateFractions =
+ absl::InlinedVector<uint8_t, webrtc::kMaxTemporalStreams>;
+
+namespace {
+constexpr uint32_t kLegacyScreenshareTl0BitrateKbps = 200;
+constexpr uint32_t kLegacyScreenshareTl1BitrateKbps = 1000;
+constexpr uint32_t kInitialTimestampRtp = 123;
+constexpr int64_t kTestNtpTimeMs = 456;
+constexpr int64_t kInitialTimestampMs = 789;
+constexpr int kNumCores = 1;
+constexpr size_t kMaxPayloadSize = 1440;
+constexpr int kWidth = 172;
+constexpr int kHeight = 144;
+constexpr float kFramerateFps = 30;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities,
+ kNumCores,
+ kMaxPayloadSize);
+} // namespace
+
+class TestVp8Impl : public VideoCodecUnitTest {
+ protected:
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return VP8Encoder::Create();
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return VP8Decoder::Create();
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecVP8, codec_settings);
+ codec_settings->width = kWidth;
+ codec_settings->height = kHeight;
+ codec_settings->SetVideoEncoderComplexity(
+ VideoCodecComplexity::kComplexityNormal);
+ }
+
+ void EncodeAndWaitForFrame(const VideoFrame& input_frame,
+ EncodedImage* encoded_frame,
+ CodecSpecificInfo* codec_specific_info,
+ bool keyframe = false) {
+ std::vector<VideoFrameType> frame_types;
+ if (keyframe) {
+ frame_types.emplace_back(VideoFrameType::kVideoFrameKey);
+ } else {
+ frame_types.emplace_back(VideoFrameType::kVideoFrameDelta);
+ }
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(input_frame, &frame_types));
+ ASSERT_TRUE(WaitForEncodedFrame(encoded_frame, codec_specific_info));
+ VerifyQpParser(*encoded_frame);
+ EXPECT_EQ(kVideoCodecVP8, codec_specific_info->codecType);
+ EXPECT_EQ(0, encoded_frame->SpatialIndex());
+ }
+
+ void EncodeAndExpectFrameWith(const VideoFrame& input_frame,
+ uint8_t temporal_idx,
+ bool keyframe = false) {
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info,
+ keyframe);
+ EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP8.temporalIdx);
+ }
+
+ void VerifyQpParser(const EncodedImage& encoded_frame) const {
+ int qp;
+ EXPECT_GT(encoded_frame.size(), 0u);
+ ASSERT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
+ }
+};
+
+TEST_F(TestVp8Impl, ErrorResilienceDisabledForNoTemporalLayers) {
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 1;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_CALL(*vpx,
+ codec_enc_init(
+ _, _, Field(&vpx_codec_enc_cfg_t::g_error_resilient, 0), _));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp8Impl, DefaultErrorResilienceEnabledForTemporalLayers) {
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_CALL(*vpx,
+ codec_enc_init(_, _,
+ Field(&vpx_codec_enc_cfg_t::g_error_resilient,
+ VPX_ERROR_RESILIENT_DEFAULT),
+ _));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp8Impl,
+ PartitionErrorResilienceEnabledForTemporalLayersWithFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP8-ForcePartitionResilience/Enabled/");
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_CALL(*vpx,
+ codec_enc_init(_, _,
+ Field(&vpx_codec_enc_cfg_t::g_error_resilient,
+ VPX_ERROR_RESILIENT_PARTITIONS),
+ _));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp8Impl, SetRates) {
+ codec_settings_.SetFrameDropEnabled(true);
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_,
+ VideoEncoder::Settings(kCapabilities, 1, 1000)));
+
+ const uint32_t kBitrateBps = 300000;
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, kBitrateBps);
+ EXPECT_CALL(
+ *vpx,
+ codec_enc_config_set(
+ _, AllOf(Field(&vpx_codec_enc_cfg_t::rc_target_bitrate,
+ kBitrateBps / 1000),
+ Field(&vpx_codec_enc_cfg_t::rc_undershoot_pct, 100u),
+ Field(&vpx_codec_enc_cfg_t::rc_overshoot_pct, 15u),
+ Field(&vpx_codec_enc_cfg_t::rc_buf_sz, 1000u),
+ Field(&vpx_codec_enc_cfg_t::rc_buf_optimal_sz, 600u),
+ Field(&vpx_codec_enc_cfg_t::rc_dropframe_thresh, 30u))))
+ .WillOnce(Return(VPX_CODEC_OK));
+ encoder.SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, static_cast<double>(codec_settings_.maxFramerate)));
+}
+
+TEST_F(TestVp8Impl, EncodeFrameAndRelease) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
+ encoder_->Encode(NextInputFrame(), nullptr));
+}
+
+TEST_F(TestVp8Impl, EncodeNv12FrameSimulcast) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
+ absl::nullopt);
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
+ encoder_->Encode(NextInputFrame(), nullptr));
+}
+
+TEST_F(TestVp8Impl, EncodeI420FrameAfterNv12Frame) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
+ absl::nullopt);
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420,
+ absl::nullopt);
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
+ encoder_->Encode(NextInputFrame(), nullptr));
+}
+
+TEST_F(TestVp8Impl, Configure) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
+ EXPECT_TRUE(decoder_->Configure({}));
+}
+
+TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
+ VideoFrame input_frame = NextInputFrame();
+ input_frame.set_timestamp(kInitialTimestampRtp);
+ input_frame.set_timestamp_us(kInitialTimestampMs *
+ rtc::kNumMicrosecsPerMillisec);
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
+ EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
+ EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
+}
+
+TEST_F(TestVp8Impl,
+ EncoderFillsResolutionInCodecAgnosticSectionOfCodecSpecificInfo) {
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ ASSERT_TRUE(codec_specific_info.template_structure);
+ EXPECT_THAT(codec_specific_info.template_structure->resolutions,
+ ElementsAre(RenderResolution(kWidth, kHeight)));
+}
+
+TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
+ VideoFrame input_frame = NextInputFrame();
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ ASSERT_TRUE(decoded_qp);
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+ EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
+}
+
+TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
+ codec_settings_.numberOfSimulcastStreams = 2;
+ // Resolutions are not in ascending order, temporal layers do not match.
+ codec_settings_.simulcastStream[0] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 2,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = 30,
+ .numberOfTemporalLayers = 3,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ codec_settings_.numberOfSimulcastStreams = 3;
+ // Resolutions are not in ascending order.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2 - 1,
+ .height = kHeight / 2 - 1,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = 30,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Resolutions are not in ascending order.
+ codec_settings_.simulcastStream[0] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth - 1,
+ .height = kHeight - 1,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Temporal layers do not match.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 2,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 3,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Resolutions do not match codec config.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4 + 1,
+ .height = kHeight / 4 + 1,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2 + 2,
+ .height = kHeight / 2 + 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth + 4,
+ .height = kHeight + 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Everything fine: scaling by 2, top resolution matches video, temporal
+ // settings are the same for all layers.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Everything fine: custom scaling, top resolution matches video, temporal
+ // settings are the same for all layers.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AlignedStrideEncodeDecode DISABLED_AlignedStrideEncodeDecode
+#else
+#define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode
+#endif
+TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
+ VideoFrame input_frame = NextInputFrame();
+ input_frame.set_timestamp(kInitialTimestampRtp);
+ input_frame.set_timestamp_us(kInitialTimestampMs *
+ rtc::kNumMicrosecsPerMillisec);
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
+
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ // Compute PSNR on all planes (faster than SSIM).
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+ EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
+}
+
+TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) {
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Temporal layer 0.
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(0, codec_specific_info.codecSpecific.VP8.temporalIdx);
+ // Temporal layer 1.
+ EncodeAndExpectFrameWith(NextInputFrame(), 1);
+ // Temporal layer 0.
+ EncodeAndExpectFrameWith(NextInputFrame(), 0);
+ // Temporal layer 1.
+ EncodeAndExpectFrameWith(NextInputFrame(), 1);
+}
+
+TEST_F(TestVp8Impl, ScalingDisabledIfAutomaticResizeOff) {
+ codec_settings_.VP8()->automaticResizeOn = false;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoEncoder::ScalingSettings settings =
+ encoder_->GetEncoderInfo().scaling_settings;
+ EXPECT_FALSE(settings.thresholds.has_value());
+}
+
+TEST_F(TestVp8Impl, ScalingEnabledIfAutomaticResizeOn) {
+ codec_settings_.SetFrameDropEnabled(true);
+ codec_settings_.VP8()->automaticResizeOn = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoEncoder::ScalingSettings settings =
+ encoder_->GetEncoderInfo().scaling_settings;
+ EXPECT_TRUE(settings.thresholds.has_value());
+ EXPECT_EQ(kDefaultMinPixelsPerFrame, settings.min_pixels_per_frame);
+}
+
+TEST_F(TestVp8Impl, DontDropKeyframes) {
+ // Set very high resolution to trigger overuse more easily.
+ const int kScreenWidth = 1920;
+ const int kScreenHeight = 1080;
+
+ codec_settings_.width = kScreenWidth;
+ codec_settings_.height = kScreenHeight;
+
+ // Screensharing has the internal frame dropper off, and instead per frame
+ // asks ScreenshareLayers to decide if it should be dropped or not.
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ // ScreenshareLayers triggers on 2 temporal layers and 1000kbps max bitrate.
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+ codec_settings_.maxBitrate = 1000;
+
+ // Reset the frame generator with large number of squares, leading to lots of
+ // details and high probability of overshoot.
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ codec_settings_.width, codec_settings_.height,
+ test::FrameGeneratorInterface::OutputType::kI420,
+ /* num_squares = */ absl::optional<int>(300));
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ // Bitrate only enough for TL0.
+ bitrate_allocation.SetBitrate(0, 0, 200000);
+ encoder_->SetRates(
+ VideoEncoder::RateControlParameters(bitrate_allocation, 5.0));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info,
+ true);
+ EncodeAndExpectFrameWith(NextInputFrame(), 0, true);
+ EncodeAndExpectFrameWith(NextInputFrame(), 0, true);
+ EncodeAndExpectFrameWith(NextInputFrame(), 0, true);
+}
+
+TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ // Settings needed to trigger ScreenshareLayers usage, which is required for
+ // overshoot-drop-reencode logic.
+ codec_settings_.maxBitrate = 1000;
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+ codec_settings_.legacy_conference_mode = true;
+
+ EXPECT_CALL(*vpx, img_wrap(_, _, _, _, _, _))
+ .WillOnce(Invoke([](vpx_image_t* img, vpx_img_fmt_t fmt, unsigned int d_w,
+ unsigned int d_h, unsigned int stride_align,
+ unsigned char* img_data) {
+ img->fmt = fmt;
+ img->d_w = d_w;
+ img->d_h = d_h;
+ img->img_data = img_data;
+ return img;
+ }));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_,
+ VideoEncoder::Settings(kCapabilities, 1, 1000)));
+ MockEncodedImageCallback callback;
+ encoder.RegisterEncodeCompleteCallback(&callback);
+
+ // Simulate overshoot drop, re-encode: encode function will be called twice
+ // with the same parameters. codec_get_cx_data() will by default return no
+ // image data and be interpreted as drop.
+ EXPECT_CALL(*vpx, codec_encode(_, _, /* pts = */ 0, _, _, _))
+ .Times(2)
+ .WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
+
+ auto delta_frame =
+ std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
+ encoder.Encode(NextInputFrame(), &delta_frame);
+}
+
+TEST(LibvpxVp8EncoderTest, GetEncoderInfoReturnsStaticInformation) {
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ const auto info = encoder.GetEncoderInfo();
+
+ EXPECT_FALSE(info.supports_native_handle);
+ EXPECT_FALSE(info.is_hardware_accelerated);
+ EXPECT_TRUE(info.supports_simulcast);
+ EXPECT_EQ(info.implementation_name, "libvpx");
+ EXPECT_EQ(info.requested_resolution_alignment, 1);
+ EXPECT_THAT(info.preferred_pixel_formats,
+ testing::UnorderedElementsAre(VideoFrameBuffer::Type::kNV12,
+ VideoFrameBuffer::Type::kI420));
+}
+
+TEST(LibvpxVp8EncoderTest, RequestedResolutionAlignmentFromFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP8-GetEncoderInfoOverride/"
+ "requested_resolution_alignment:10/");
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ EXPECT_EQ(encoder.GetEncoderInfo().requested_resolution_alignment, 10);
+ EXPECT_FALSE(
+ encoder.GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+ EXPECT_TRUE(encoder.GetEncoderInfo().resolution_bitrate_limits.empty());
+}
+
+TEST(LibvpxVp8EncoderTest, ResolutionBitrateLimitsFromFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP8-GetEncoderInfoOverride/"
+ "frame_size_pixels:123|456|789,"
+ "min_start_bitrate_bps:11000|22000|33000,"
+ "min_bitrate_bps:44000|55000|66000,"
+ "max_bitrate_bps:77000|88000|99000/");
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ EXPECT_THAT(
+ encoder.GetEncoderInfo().resolution_bitrate_limits,
+ ::testing::ElementsAre(
+ VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000},
+ VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000},
+ VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000}));
+}
+
+TEST(LibvpxVp8EncoderTest,
+ GetEncoderInfoReturnsEmptyResolutionBitrateLimitsByDefault) {
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ const auto info = encoder.GetEncoderInfo();
+
+ EXPECT_TRUE(info.resolution_bitrate_limits.empty());
+}
+
+TEST(LibvpxVp8EncoderTest,
+ GetEncoderInfoReturnsResolutionBitrateLimitsAsConfigured) {
+ std::vector<VideoEncoder::ResolutionBitrateLimits> resolution_bitrate_limits =
+ {VideoEncoder::ResolutionBitrateLimits(/*frame_size_pixels=*/640 * 360,
+ /*min_start_bitrate_bps=*/300,
+ /*min_bitrate_bps=*/100,
+ /*max_bitrate_bps=*/1000),
+ VideoEncoder::ResolutionBitrateLimits(320 * 180, 100, 30, 500)};
+ VP8Encoder::Settings settings;
+ settings.resolution_bitrate_limits = resolution_bitrate_limits;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ std::move(settings));
+
+ const auto info = encoder.GetEncoderInfo();
+
+ EXPECT_EQ(info.resolution_bitrate_limits, resolution_bitrate_limits);
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationNoLayers) {
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers] = {
+ FramerateFractions(1, EncoderInfo::kMaxFramerateFraction)};
+
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationTwoTemporalLayers) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ codec_settings_.numberOfSimulcastStreams = 1;
+ codec_settings_.simulcastStream[0].active = true;
+ codec_settings_.simulcastStream[0].targetBitrate = 100;
+ codec_settings_.simulcastStream[0].maxBitrate = 100;
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationThreeTemporalLayers) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ codec_settings_.numberOfSimulcastStreams = 1;
+ codec_settings_.simulcastStream[0].active = true;
+ codec_settings_.simulcastStream[0].targetBitrate = 100;
+ codec_settings_.simulcastStream[0].maxBitrate = 100;
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 3;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationScreenshareLayers) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ codec_settings_.numberOfSimulcastStreams = 1;
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.simulcastStream[0].active = true;
+ codec_settings_.simulcastStream[0].minBitrate = 30;
+ codec_settings_.simulcastStream[0].targetBitrate =
+ kLegacyScreenshareTl0BitrateKbps;
+ codec_settings_.simulcastStream[0].maxBitrate =
+ kLegacyScreenshareTl1BitrateKbps;
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_settings_.legacy_conference_mode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Expect empty vector, since this mode doesn't have a fixed framerate.
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationSimulcastVideo) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+
+ // Set up three simulcast streams with three temporal layers each.
+ codec_settings_.numberOfSimulcastStreams = 3;
+ for (int i = 0; i < codec_settings_.numberOfSimulcastStreams; ++i) {
+ codec_settings_.simulcastStream[i].active = true;
+ codec_settings_.simulcastStream[i].minBitrate = 30;
+ codec_settings_.simulcastStream[i].targetBitrate = 30;
+ codec_settings_.simulcastStream[i].maxBitrate = 30;
+ codec_settings_.simulcastStream[i].numberOfTemporalLayers = 3;
+ codec_settings_.simulcastStream[i].width =
+ codec_settings_.width >>
+ (codec_settings_.numberOfSimulcastStreams - i - 1);
+ codec_settings_.simulcastStream[i].height =
+ codec_settings_.height >>
+ (codec_settings_.numberOfSimulcastStreams - i - 1);
+ }
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+ expected_fps_allocation[1] = expected_fps_allocation[0];
+ expected_fps_allocation[2] = expected_fps_allocation[0];
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+
+ // Release encoder and re-init without temporal layers.
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+
+ // Sanity check fps allocation when not inited.
+ FramerateFractions default_fps_fraction[kMaxSpatialLayers];
+ default_fps_fraction[0].push_back(EncoderInfo::kMaxFramerateFraction);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(default_fps_fraction));
+
+ for (int i = 0; i < codec_settings_.numberOfSimulcastStreams; ++i) {
+ codec_settings_.simulcastStream[i].numberOfTemporalLayers = 1;
+ }
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ for (size_t i = 0; i < 3; ++i) {
+ expected_fps_allocation[i].clear();
+ expected_fps_allocation[i].push_back(EncoderInfo::kMaxFramerateFraction);
+ }
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+class TestVp8ImplForPixelFormat
+ : public TestVp8Impl,
+ public ::testing::WithParamInterface<VideoFrameBuffer::Type> {
+ public:
+ TestVp8ImplForPixelFormat() : TestVp8Impl(), mappable_type_(GetParam()) {}
+
+ protected:
+ VideoFrameBuffer::Type mappable_type_;
+};
+
+TEST_P(TestVp8ImplForPixelFormat, EncodeNativeFrameSimulcast) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+
+ // Configure simulcast.
+ codec_settings_.numberOfSimulcastStreams = 3;
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80,
+ .active = true};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80,
+ .active = true};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80,
+ .active = true};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Create a zero-conversion NV12 frame (calling ToI420 on it crashes).
+ VideoFrame input_frame =
+ test::CreateMappableNativeFrame(1, mappable_type_, kWidth, kHeight);
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ // After encoding, we expect one mapping per simulcast layer.
+ rtc::scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
+ test::GetMappableNativeBufferFromVideoFrame(input_frame);
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_buffers =
+ mappable_buffer->GetMappedFramedBuffers();
+ ASSERT_EQ(mapped_buffers.size(), 3u);
+ EXPECT_EQ(mapped_buffers[0]->type(), mappable_type_);
+ EXPECT_EQ(mapped_buffers[0]->width(), kWidth);
+ EXPECT_EQ(mapped_buffers[0]->height(), kHeight);
+ EXPECT_EQ(mapped_buffers[1]->type(), mappable_type_);
+ EXPECT_EQ(mapped_buffers[1]->width(), kWidth / 2);
+ EXPECT_EQ(mapped_buffers[1]->height(), kHeight / 2);
+ EXPECT_EQ(mapped_buffers[2]->type(), mappable_type_);
+ EXPECT_EQ(mapped_buffers[2]->width(), kWidth / 4);
+ EXPECT_EQ(mapped_buffers[2]->height(), kHeight / 4);
+ EXPECT_FALSE(mappable_buffer->DidConvertToI420());
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+ TestVp8ImplForPixelFormat,
+ ::testing::Values(VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc
new file mode 100644
index 0000000000..9c7495ddf7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
+
+namespace webrtc {
+
+bool VP8SupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ for (const auto& entry : kVP8SupportedScalabilityModes) {
+ if (entry == scalability_mode) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h
new file mode 100644
index 0000000000..923f159118
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_VP8_SCALABILITY_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_VP8_SCALABILITY_H_
+
+#include "api/video_codecs/scalability_mode.h"
+
+namespace webrtc {
+
+inline constexpr ScalabilityMode kVP8SupportedScalabilityModes[] = {
+ ScalabilityMode::kL1T1, ScalabilityMode::kL1T2, ScalabilityMode::kL1T3};
+bool VP8SupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_VP8_SCALABILITY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS b/third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS
new file mode 100644
index 0000000000..cc5cd70142
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+media/base",
+]
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h
new file mode 100644
index 0000000000..79d403ded3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/codec.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// Returns a vector with all supported internal VP9 profiles that we can
+// negotiate in SDP, in order of preference.
+std::vector<SdpVideoFormat> SupportedVP9Codecs(
+ bool add_scalability_modes = false);
+
+// Returns a vector with all supported internal VP9 decode profiles in order of
+// preference. These will be availble for receive-only connections.
+std::vector<SdpVideoFormat> SupportedVP9DecoderCodecs();
+
+class VP9Encoder : public VideoEncoder {
+ public:
+ // Deprecated. Returns default implementation using VP9 Profile 0.
+ // TODO(emircan): Remove once this is no longer used.
+ static std::unique_ptr<VP9Encoder> Create();
+ // Parses VP9 Profile from `codec` and returns the appropriate implementation.
+ static std::unique_ptr<VP9Encoder> Create(const cricket::VideoCodec& codec);
+ static bool SupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+ ~VP9Encoder() override {}
+};
+
+class VP9Decoder : public VideoDecoder {
+ public:
+ static std::unique_ptr<VP9Decoder> Create();
+
+ ~VP9Decoder() override {}
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h
new file mode 100644
index 0000000000..e6f644ec11
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
+
+#include <stdint.h>
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+const int16_t kMaxOneBytePictureId = 0x7F; // 7 bits
+const int16_t kMaxTwoBytePictureId = 0x7FFF; // 15 bits
+const uint8_t kNoSpatialIdx = 0xFF;
+const uint8_t kNoGofIdx = 0xFF;
+const uint8_t kNumVp9Buffers = 8;
+const size_t kMaxVp9RefPics = 3;
+const size_t kMaxVp9FramesInGof = 0xFF; // 8 bits
+const size_t kMaxVp9NumberOfSpatialLayers = 8;
+
+const size_t kMinVp9SpatialLayerLongSideLength = 240;
+const size_t kMinVp9SpatialLayerShortSideLength = 135;
+
+enum TemporalStructureMode {
+ kTemporalStructureMode1, // 1 temporal layer structure - i.e., IPPP...
+ kTemporalStructureMode2, // 2 temporal layers 01...
+ kTemporalStructureMode3, // 3 temporal layers 0212...
+ kTemporalStructureMode4 // 3 temporal layers 02120212...
+};
+
+struct GofInfoVP9 {
+ void SetGofInfoVP9(TemporalStructureMode tm) {
+ switch (tm) {
+ case kTemporalStructureMode1:
+ num_frames_in_gof = 1;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 1;
+ break;
+ case kTemporalStructureMode2:
+ num_frames_in_gof = 2;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 2;
+
+ temporal_idx[1] = 1;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+ break;
+ case kTemporalStructureMode3:
+ num_frames_in_gof = 4;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 4;
+
+ temporal_idx[1] = 2;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+
+ temporal_idx[2] = 1;
+ temporal_up_switch[2] = true;
+ num_ref_pics[2] = 1;
+ pid_diff[2][0] = 2;
+
+ temporal_idx[3] = 2;
+ temporal_up_switch[3] = true;
+ num_ref_pics[3] = 1;
+ pid_diff[3][0] = 1;
+ break;
+ case kTemporalStructureMode4:
+ num_frames_in_gof = 8;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 4;
+
+ temporal_idx[1] = 2;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+
+ temporal_idx[2] = 1;
+ temporal_up_switch[2] = false;
+ num_ref_pics[2] = 1;
+ pid_diff[2][0] = 2;
+
+ temporal_idx[3] = 2;
+ temporal_up_switch[3] = true;
+ num_ref_pics[3] = 2;
+ pid_diff[3][0] = 1;
+ pid_diff[3][1] = 2;
+
+ temporal_idx[4] = 0;
+ temporal_up_switch[4] = false;
+ num_ref_pics[4] = 1;
+ pid_diff[4][0] = 4;
+
+ temporal_idx[5] = 2;
+ temporal_up_switch[5] = true;
+ num_ref_pics[5] = 2;
+ pid_diff[5][0] = 1;
+ pid_diff[5][1] = 2;
+
+ temporal_idx[6] = 1;
+ temporal_up_switch[6] = false;
+ num_ref_pics[6] = 2;
+ pid_diff[6][0] = 2;
+ pid_diff[6][1] = 4;
+
+ temporal_idx[7] = 2;
+ temporal_up_switch[7] = true;
+ num_ref_pics[7] = 2;
+ pid_diff[7][0] = 1;
+ pid_diff[7][1] = 2;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+
+ void CopyGofInfoVP9(const GofInfoVP9& src) {
+ num_frames_in_gof = src.num_frames_in_gof;
+ for (size_t i = 0; i < num_frames_in_gof; ++i) {
+ temporal_idx[i] = src.temporal_idx[i];
+ temporal_up_switch[i] = src.temporal_up_switch[i];
+ num_ref_pics[i] = src.num_ref_pics[i];
+ for (uint8_t r = 0; r < num_ref_pics[i]; ++r) {
+ pid_diff[i][r] = src.pid_diff[i][r];
+ }
+ }
+ }
+
+ size_t num_frames_in_gof;
+ uint8_t temporal_idx[kMaxVp9FramesInGof];
+ bool temporal_up_switch[kMaxVp9FramesInGof];
+ uint8_t num_ref_pics[kMaxVp9FramesInGof];
+ uint8_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
+ uint16_t pid_start;
+};
+
+struct RTPVideoHeaderVP9 {
+ void InitRTPVideoHeaderVP9() {
+ inter_pic_predicted = false;
+ flexible_mode = false;
+ beginning_of_frame = false;
+ end_of_frame = false;
+ ss_data_available = false;
+ non_ref_for_inter_layer_pred = false;
+ picture_id = kNoPictureId;
+ max_picture_id = kMaxTwoBytePictureId;
+ tl0_pic_idx = kNoTl0PicIdx;
+ temporal_idx = kNoTemporalIdx;
+ spatial_idx = kNoSpatialIdx;
+ temporal_up_switch = false;
+ inter_layer_predicted = false;
+ gof_idx = kNoGofIdx;
+ num_ref_pics = 0;
+ num_spatial_layers = 1;
+ first_active_layer = 0;
+ end_of_picture = true;
+ }
+
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode; // This frame is in flexible mode.
+ bool beginning_of_frame; // True if this packet is the first in a VP9 layer
+ // frame.
+ bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
+ bool ss_data_available; // True if SS data is available in this payload
+ // descriptor.
+ bool non_ref_for_inter_layer_pred; // True for frame which is not used as
+ // reference for inter-layer prediction.
+ int16_t picture_id; // PictureID index, 15 bits;
+ // kNoPictureId if PictureID does not exist.
+ int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
+ int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
+ // kNoTl0PicIdx means no value provided.
+ uint8_t temporal_idx; // Temporal layer index, or kNoTemporalIdx.
+ uint8_t spatial_idx; // Spatial layer index, or kNoSpatialIdx.
+ bool temporal_up_switch; // True if upswitch to higher frame rate is possible
+ // meaning subsequent higher temporal layer pictures
+ // will not depend on any picture before the current
+ // picture (in coding order) with temporal layer ID
+ // greater than `temporal_idx` of this frame.
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+
+ uint8_t gof_idx; // Index to predefined temporal frame info in SS data.
+
+ uint8_t num_ref_pics; // Number of reference pictures used by this layer
+ // frame.
+ uint8_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
+ // of the reference pictures.
+ int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures.
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ size_t first_active_layer; // Not sent on wire, used to adjust ss data.
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+
+ bool end_of_picture; // This frame is the last frame in picture.
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
new file mode 100644
index 0000000000..0e39cc638a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifdef RTC_ENABLE_VP9
+
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h"
+
+#include <algorithm>
+
+#include "absl/strings/match.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/color_space.h"
+#include "api/video/i010_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "libyuv/include/libyuv/convert.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+
+namespace webrtc {
+namespace {
+
+// Helper class for extracting VP9 colorspace.
+ColorSpace ExtractVP9ColorSpace(vpx_color_space_t space_t,
+ vpx_color_range_t range_t,
+ unsigned int bit_depth) {
+ ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified;
+ ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified;
+ ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified;
+ switch (space_t) {
+ case VPX_CS_BT_601:
+ case VPX_CS_SMPTE_170:
+ primaries = ColorSpace::PrimaryID::kSMPTE170M;
+ transfer = ColorSpace::TransferID::kSMPTE170M;
+ matrix = ColorSpace::MatrixID::kSMPTE170M;
+ break;
+ case VPX_CS_SMPTE_240:
+ primaries = ColorSpace::PrimaryID::kSMPTE240M;
+ transfer = ColorSpace::TransferID::kSMPTE240M;
+ matrix = ColorSpace::MatrixID::kSMPTE240M;
+ break;
+ case VPX_CS_BT_709:
+ primaries = ColorSpace::PrimaryID::kBT709;
+ transfer = ColorSpace::TransferID::kBT709;
+ matrix = ColorSpace::MatrixID::kBT709;
+ break;
+ case VPX_CS_BT_2020:
+ primaries = ColorSpace::PrimaryID::kBT2020;
+ switch (bit_depth) {
+ case 8:
+ transfer = ColorSpace::TransferID::kBT709;
+ break;
+ case 10:
+ transfer = ColorSpace::TransferID::kBT2020_10;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ matrix = ColorSpace::MatrixID::kBT2020_NCL;
+ break;
+ case VPX_CS_SRGB:
+ primaries = ColorSpace::PrimaryID::kBT709;
+ transfer = ColorSpace::TransferID::kIEC61966_2_1;
+ matrix = ColorSpace::MatrixID::kBT709;
+ break;
+ default:
+ break;
+ }
+
+ ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid;
+ switch (range_t) {
+ case VPX_CR_STUDIO_RANGE:
+ range = ColorSpace::RangeID::kLimited;
+ break;
+ case VPX_CR_FULL_RANGE:
+ range = ColorSpace::RangeID::kFull;
+ break;
+ default:
+ break;
+ }
+ return ColorSpace(primaries, transfer, matrix, range);
+}
+
+} // namespace
+
+LibvpxVp9Decoder::LibvpxVp9Decoder()
+ : LibvpxVp9Decoder(FieldTrialBasedConfig()) {}
+LibvpxVp9Decoder::LibvpxVp9Decoder(const FieldTrialsView& trials)
+ : decode_complete_callback_(nullptr),
+ inited_(false),
+ decoder_(nullptr),
+ key_frame_required_(true),
+ preferred_output_format_(
+ absl::StartsWith(trials.Lookup("WebRTC-NV12Decode"), "Enabled")
+ ? VideoFrameBuffer::Type::kNV12
+ : VideoFrameBuffer::Type::kI420) {}
+
+LibvpxVp9Decoder::~LibvpxVp9Decoder() {
+ inited_ = true; // in order to do the actual release
+ Release();
+ int num_buffers_in_use = libvpx_buffer_pool_.GetNumBuffersInUse();
+ if (num_buffers_in_use > 0) {
+ // The frame buffers are reference counted and frames are exposed after
+ // decoding. There may be valid usage cases where previous frames are still
+ // referenced after ~LibvpxVp9Decoder that is not a leak.
+ RTC_LOG(LS_INFO) << num_buffers_in_use
+ << " Vp9FrameBuffers are still "
+ "referenced during ~LibvpxVp9Decoder.";
+ }
+}
+
+bool LibvpxVp9Decoder::Configure(const Settings& settings) {
+ if (Release() < 0) {
+ return false;
+ }
+
+ if (decoder_ == nullptr) {
+ decoder_ = new vpx_codec_ctx_t;
+ memset(decoder_, 0, sizeof(*decoder_));
+ }
+ vpx_codec_dec_cfg_t cfg;
+ memset(&cfg, 0, sizeof(cfg));
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ // We focus on webrtc fuzzing here, not libvpx itself. Use single thread for
+ // fuzzing, because:
+ // - libvpx's VP9 single thread decoder is more fuzzer friendly. It detects
+ // errors earlier than the multi-threads version.
+ // - Make peak CPU usage under control (not depending on input)
+ cfg.threads = 1;
+#else
+ const RenderResolution& resolution = settings.max_render_resolution();
+ if (!resolution.Valid()) {
+ // Postpone configuring number of threads until resolution is known.
+ cfg.threads = 1;
+ } else {
+ // We want to use multithreading when decoding high resolution videos. But
+ // not too many in order to avoid overhead when many stream are decoded
+ // concurrently.
+ // Set 2 thread as target for 1280x720 pixel count, and then scale up
+ // linearly from there - but cap at physical core count.
+ // For common resolutions this results in:
+ // 1 for 360p
+ // 2 for 720p
+ // 4 for 1080p
+ // 8 for 1440p
+ // 18 for 4K
+ int num_threads = std::max(
+ 1, 2 * resolution.Width() * resolution.Height() / (1280 * 720));
+ cfg.threads = std::min(settings.number_of_cores(), num_threads);
+ }
+#endif
+
+ current_settings_ = settings;
+
+ vpx_codec_flags_t flags = 0;
+ if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) {
+ return false;
+ }
+
+ if (!libvpx_buffer_pool_.InitializeVpxUsePool(decoder_)) {
+ return false;
+ }
+
+ inited_ = true;
+ // Always start with a complete key frame.
+ key_frame_required_ = true;
+ if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
+ if (!libvpx_buffer_pool_.Resize(*buffer_pool_size) ||
+ !output_buffer_pool_.Resize(*buffer_pool_size)) {
+ return false;
+ }
+ }
+
+ vpx_codec_err_t status =
+ vpx_codec_control(decoder_, VP9D_SET_LOOP_FILTER_OPT, 1);
+ if (status != VPX_CODEC_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to enable VP9D_SET_LOOP_FILTER_OPT. "
+ << vpx_codec_error(decoder_);
+ return false;
+ }
+
+ return true;
+}
+
+int LibvpxVp9Decoder::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (decode_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(
+ rtc::MakeArrayView(input_image.data(), input_image.size()));
+ if (frame_info) {
+ RenderResolution frame_resolution(frame_info->frame_width,
+ frame_info->frame_height);
+ if (frame_resolution != current_settings_.max_render_resolution()) {
+ // Resolution has changed, tear down and re-init a new decoder in
+ // order to get correct sizing.
+ Release();
+ current_settings_.set_max_render_resolution(frame_resolution);
+ if (!Configure(current_settings_)) {
+ RTC_LOG(LS_WARNING) << "Failed to re-init decoder.";
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse VP9 header from key-frame.";
+ }
+ }
+
+ // Always start with a complete key frame.
+ if (key_frame_required_) {
+ if (input_image._frameType != VideoFrameType::kVideoFrameKey)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ key_frame_required_ = false;
+ }
+ vpx_codec_iter_t iter = nullptr;
+ vpx_image_t* img;
+ const uint8_t* buffer = input_image.data();
+ if (input_image.size() == 0) {
+ buffer = nullptr; // Triggers full frame concealment.
+ }
+ // During decode libvpx may get and release buffers from
+ // `libvpx_buffer_pool_`. In practice libvpx keeps a few (~3-4) buffers alive
+ // at a time.
+ if (vpx_codec_decode(decoder_, buffer,
+ static_cast<unsigned int>(input_image.size()), 0,
+ VPX_DL_REALTIME)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // `img->fb_priv` contains the image data, a reference counted Vp9FrameBuffer.
+ // It may be released by libvpx during future vpx_codec_decode or
+ // vpx_codec_destroy calls.
+ img = vpx_codec_get_frame(decoder_, &iter);
+ int qp;
+ vpx_codec_err_t vpx_ret =
+ vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
+ RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
+ int ret =
+ ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
+ if (ret != 0) {
+ return ret;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Decoder::ReturnFrame(
+ const vpx_image_t* img,
+ uint32_t timestamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space) {
+ if (img == nullptr) {
+ // Decoder OK and nullptr image => No show frame.
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ // This buffer contains all of `img`'s image data, a reference counted
+ // Vp9FrameBuffer. (libvpx is done with the buffers after a few
+ // vpx_codec_decode calls or vpx_codec_destroy).
+ rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer> img_buffer(
+ static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv));
+
+ // The buffer can be used directly by the VideoFrame (without copy) by
+ // using a Wrapped*Buffer.
+ rtc::scoped_refptr<VideoFrameBuffer> img_wrapped_buffer;
+ switch (img->fmt) {
+ case VPX_IMG_FMT_I420:
+ if (preferred_output_format_ == VideoFrameBuffer::Type::kNV12) {
+ rtc::scoped_refptr<NV12Buffer> nv12_buffer =
+ output_buffer_pool_.CreateNV12Buffer(img->d_w, img->d_h);
+ if (!nv12_buffer.get()) {
+ // Buffer pool is full.
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+ img_wrapped_buffer = nv12_buffer;
+ libyuv::I420ToNV12(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ nv12_buffer->MutableDataY(), nv12_buffer->StrideY(),
+ nv12_buffer->MutableDataUV(),
+ nv12_buffer->StrideUV(), img->d_w, img->d_h);
+ // No holding onto img_buffer as it's no longer needed and can be
+ // reused.
+ } else {
+ img_wrapped_buffer = WrapI420Buffer(
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
+ // WrappedI420Buffer's mechanism for allowing the release of its
+ // frame buffer is through a callback function. This is where we
+ // should release `img_buffer`.
+ [img_buffer] {});
+ }
+ break;
+ case VPX_IMG_FMT_I422:
+ img_wrapped_buffer = WrapI422Buffer(
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
+ // WrappedI444Buffer's mechanism for allowing the release of its
+ // frame buffer is through a callback function. This is where we
+ // should release `img_buffer`.
+ [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I444:
+ img_wrapped_buffer = WrapI444Buffer(
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
+ // WrappedI444Buffer's mechanism for allowing the release of its
+ // frame buffer is through a callback function. This is where we
+ // should release `img_buffer`.
+ [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I42016:
+ img_wrapped_buffer = WrapI010Buffer(
+ img->d_w, img->d_h,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_Y]),
+ img->stride[VPX_PLANE_Y] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_U]),
+ img->stride[VPX_PLANE_U] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_V]),
+ img->stride[VPX_PLANE_V] / 2, [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I42216:
+ img_wrapped_buffer = WrapI210Buffer(
+ img->d_w, img->d_h,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_Y]),
+ img->stride[VPX_PLANE_Y] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_U]),
+ img->stride[VPX_PLANE_U] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_V]),
+ img->stride[VPX_PLANE_V] / 2, [img_buffer] {});
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "Unsupported pixel format produced by the decoder: "
+ << static_cast<int>(img->fmt);
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ auto builder = VideoFrame::Builder()
+ .set_video_frame_buffer(img_wrapped_buffer)
+ .set_timestamp_rtp(timestamp);
+ if (explicit_color_space) {
+ builder.set_color_space(*explicit_color_space);
+ } else {
+ builder.set_color_space(
+ ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
+ }
+ VideoFrame decoded_image = builder.build();
+
+ decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Decoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decode_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Decoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ if (decoder_ != nullptr) {
+ if (inited_) {
+ // When a codec is destroyed libvpx will release any buffers of
+ // `libvpx_buffer_pool_` it is currently using.
+ if (vpx_codec_destroy(decoder_)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ delete decoder_;
+ decoder_ = nullptr;
+ }
+ // Releases buffers from the pool. Any buffers not in use are deleted. Buffers
+ // still referenced externally are deleted once fully released, not returning
+ // to the pool.
+ libvpx_buffer_pool_.ClearPool();
+ output_buffer_pool_.Release();
+ inited_ = false;
+ return ret_val;
+}
+
+VideoDecoder::DecoderInfo LibvpxVp9Decoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = "libvpx";
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* LibvpxVp9Decoder::ImplementationName() const {
+ return "libvpx";
+}
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h
new file mode 100644
index 0000000000..a680441f73
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_
+
+#ifdef RTC_ENABLE_VP9
+
+#include "api/field_trials_view.h"
+#include "api/video_codecs/video_decoder.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+#include "vpx/vp8cx.h"
+
+namespace webrtc {
+
+class LibvpxVp9Decoder : public VP9Decoder {
+ public:
+ LibvpxVp9Decoder();
+ explicit LibvpxVp9Decoder(const FieldTrialsView& trials);
+
+ virtual ~LibvpxVp9Decoder();
+
+ bool Configure(const Settings& settings) override;
+
+ int Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) override;
+
+ int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
+
+ int Release() override;
+
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ private:
+ int ReturnFrame(const vpx_image_t* img,
+ uint32_t timestamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space);
+
+ // Memory pool used to share buffers between libvpx and webrtc.
+ Vp9FrameBufferPool libvpx_buffer_pool_;
+ // Buffer pool used to allocate additionally needed NV12 buffers.
+ VideoFrameBufferPool output_buffer_pool_;
+ DecodedImageCallback* decode_complete_callback_;
+ bool inited_;
+ vpx_codec_ctx_t* decoder_;
+ bool key_frame_required_;
+ Settings current_settings_;
+
+ // Decoder should produce this format if possible.
+ const VideoFrameBuffer::Type preferred_output_format_;
+};
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
new file mode 100644
index 0000000000..fea11b5051
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
@@ -0,0 +1,2183 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifdef RTC_ENABLE_VP9
+
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/match.h"
+#include "api/video/color_space.h"
+#include "api/video/i010_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_list.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "libyuv/include/libyuv/convert.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+
+namespace webrtc {
+
+namespace {
+// Maps from gof_idx to encoder internal reference frame buffer index. These
+// maps work for 1,2 and 3 temporal layers with GOF length of 1,2 and 4 frames.
+uint8_t kRefBufIdx[4] = {0, 0, 0, 1};
+uint8_t kUpdBufIdx[4] = {0, 0, 1, 0};
+
+// Maximum allowed PID difference for differnet per-layer frame-rate case.
+const int kMaxAllowedPidDiff = 30;
+
+// TODO(ilink): Tune these thresholds further.
+// Selected using ConverenceMotion_1280_720_50.yuv clip.
+// No toggling observed on any link capacity from 100-2000kbps.
+// HD was reached consistently when link capacity was 1500kbps.
+// Set resolutions are a bit more conservative than svc_config.cc sets, e.g.
+// for 300kbps resolution converged to 270p instead of 360p.
+constexpr int kLowVp9QpThreshold = 149;
+constexpr int kHighVp9QpThreshold = 205;
+
+std::pair<size_t, size_t> GetActiveLayers(
+ const VideoBitrateAllocation& allocation) {
+ for (size_t sl_idx = 0; sl_idx < kMaxSpatialLayers; ++sl_idx) {
+ if (allocation.GetSpatialLayerSum(sl_idx) > 0) {
+ size_t last_layer = sl_idx + 1;
+ while (last_layer < kMaxSpatialLayers &&
+ allocation.GetSpatialLayerSum(last_layer) > 0) {
+ ++last_layer;
+ }
+ return std::make_pair(sl_idx, last_layer);
+ }
+ }
+ return {0, 0};
+}
+
+std::unique_ptr<ScalableVideoController> CreateVp9ScalabilityStructure(
+ const VideoCodec& codec) {
+ int num_spatial_layers = codec.VP9().numberOfSpatialLayers;
+ int num_temporal_layers =
+ std::max(1, int{codec.VP9().numberOfTemporalLayers});
+ if (num_spatial_layers == 1 && num_temporal_layers == 1) {
+ return std::make_unique<ScalableVideoControllerNoLayering>();
+ }
+
+ char name[20];
+ rtc::SimpleStringBuilder ss(name);
+ if (codec.mode == VideoCodecMode::kScreensharing) {
+ // TODO(bugs.webrtc.org/11999): Compose names of the structures when they
+ // are implemented.
+ return nullptr;
+ } else if (codec.VP9().interLayerPred == InterLayerPredMode::kOn ||
+ num_spatial_layers == 1) {
+ ss << "L" << num_spatial_layers << "T" << num_temporal_layers;
+ } else if (codec.VP9().interLayerPred == InterLayerPredMode::kOnKeyPic) {
+ ss << "L" << num_spatial_layers << "T" << num_temporal_layers << "_KEY";
+ } else {
+ RTC_DCHECK_EQ(codec.VP9().interLayerPred, InterLayerPredMode::kOff);
+ ss << "S" << num_spatial_layers << "T" << num_temporal_layers;
+ }
+
+ // Check spatial ratio.
+ if (num_spatial_layers > 1 && codec.spatialLayers[0].targetBitrate > 0) {
+ if (codec.width != codec.spatialLayers[num_spatial_layers - 1].width ||
+ codec.height != codec.spatialLayers[num_spatial_layers - 1].height) {
+ RTC_LOG(LS_WARNING)
+ << "Top layer resolution expected to match overall resolution";
+ return nullptr;
+ }
+ // Check if the ratio is one of the supported.
+ int numerator;
+ int denominator;
+ if (codec.spatialLayers[1].width == 2 * codec.spatialLayers[0].width) {
+ numerator = 1;
+ denominator = 2;
+ // no suffix for 1:2 ratio.
+ } else if (2 * codec.spatialLayers[1].width ==
+ 3 * codec.spatialLayers[0].width) {
+ numerator = 2;
+ denominator = 3;
+ ss << "h";
+ } else {
+ RTC_LOG(LS_WARNING) << "Unsupported scalability ratio "
+ << codec.spatialLayers[0].width << ":"
+ << codec.spatialLayers[1].width;
+ return nullptr;
+ }
+ // Validate ratio is consistent for all spatial layer transitions.
+ for (int sid = 1; sid < num_spatial_layers; ++sid) {
+ if (codec.spatialLayers[sid].width * numerator !=
+ codec.spatialLayers[sid - 1].width * denominator ||
+ codec.spatialLayers[sid].height * numerator !=
+ codec.spatialLayers[sid - 1].height * denominator) {
+ RTC_LOG(LS_WARNING) << "Inconsistent scalability ratio " << numerator
+ << ":" << denominator;
+ return nullptr;
+ }
+ }
+ }
+
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(name);
+ if (!scalability_mode.has_value()) {
+ RTC_LOG(LS_WARNING) << "Invalid scalability mode " << name;
+ return nullptr;
+ }
+ auto scalability_structure_controller =
+ CreateScalabilityStructure(*scalability_mode);
+ if (scalability_structure_controller == nullptr) {
+ RTC_LOG(LS_WARNING) << "Unsupported scalability structure " << name;
+ } else {
+ RTC_LOG(LS_INFO) << "Created scalability structure " << name;
+ }
+ return scalability_structure_controller;
+}
+
+vpx_svc_ref_frame_config_t Vp9References(
+ rtc::ArrayView<const ScalableVideoController::LayerFrameConfig> layers) {
+ vpx_svc_ref_frame_config_t ref_config = {};
+ for (const ScalableVideoController::LayerFrameConfig& layer_frame : layers) {
+ const auto& buffers = layer_frame.Buffers();
+ RTC_DCHECK_LE(buffers.size(), 3);
+ int sid = layer_frame.SpatialId();
+ if (!buffers.empty()) {
+ ref_config.lst_fb_idx[sid] = buffers[0].id;
+ ref_config.reference_last[sid] = buffers[0].referenced;
+ if (buffers[0].updated) {
+ ref_config.update_buffer_slot[sid] |= (1 << buffers[0].id);
+ }
+ }
+ if (buffers.size() > 1) {
+ ref_config.gld_fb_idx[sid] = buffers[1].id;
+ ref_config.reference_golden[sid] = buffers[1].referenced;
+ if (buffers[1].updated) {
+ ref_config.update_buffer_slot[sid] |= (1 << buffers[1].id);
+ }
+ }
+ if (buffers.size() > 2) {
+ ref_config.alt_fb_idx[sid] = buffers[2].id;
+ ref_config.reference_alt_ref[sid] = buffers[2].referenced;
+ if (buffers[2].updated) {
+ ref_config.update_buffer_slot[sid] |= (1 << buffers[2].id);
+ }
+ }
+ }
+ // TODO(bugs.webrtc.org/11999): Fill ref_config.duration
+ return ref_config;
+}
+
+bool AllowDenoising() {
+ // Do not enable the denoiser on ARM since optimization is pending.
+ // Denoiser is on by default on other platforms.
+#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \
+ !defined(ANDROID)
+ return true;
+#else
+ return false;
+#endif
+}
+
+} // namespace
+
+void LibvpxVp9Encoder::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
+ void* user_data) {
+ LibvpxVp9Encoder* enc = static_cast<LibvpxVp9Encoder*>(user_data);
+ enc->GetEncodedLayerFrame(pkt);
+}
+
+LibvpxVp9Encoder::LibvpxVp9Encoder(const cricket::VideoCodec& codec,
+ std::unique_ptr<LibvpxInterface> interface,
+ const FieldTrialsView& trials)
+ : libvpx_(std::move(interface)),
+ encoded_image_(),
+ encoded_complete_callback_(nullptr),
+ profile_(
+ ParseSdpForVP9Profile(codec.params).value_or(VP9Profile::kProfile0)),
+ inited_(false),
+ timestamp_(0),
+ rc_max_intra_target_(0),
+ encoder_(nullptr),
+ config_(nullptr),
+ raw_(nullptr),
+ input_image_(nullptr),
+ force_key_frame_(true),
+ pics_since_key_(0),
+ num_temporal_layers_(0),
+ num_spatial_layers_(0),
+ num_active_spatial_layers_(0),
+ first_active_layer_(0),
+ layer_deactivation_requires_key_frame_(absl::StartsWith(
+ trials.Lookup("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation"),
+ "Enabled")),
+ is_svc_(false),
+ inter_layer_pred_(InterLayerPredMode::kOn),
+ external_ref_control_(false), // Set in InitEncode because of tests.
+ trusted_rate_controller_(
+ RateControlSettings::ParseFromKeyValueConfig(&trials)
+ .LibvpxVp9TrustedRateController()),
+ layer_buffering_(false),
+ full_superframe_drop_(true),
+ first_frame_in_picture_(true),
+ ss_info_needed_(false),
+ force_all_active_layers_(false),
+ num_cores_(0),
+ is_flexible_mode_(false),
+ variable_framerate_experiment_(ParseVariableFramerateConfig(trials)),
+ variable_framerate_controller_(
+ variable_framerate_experiment_.framerate_limit),
+ quality_scaler_experiment_(ParseQualityScalerConfig(trials)),
+ external_ref_ctrl_(
+ !absl::StartsWith(trials.Lookup("WebRTC-Vp9ExternalRefCtrl"),
+ "Disabled")),
+ performance_flags_(ParsePerformanceFlagsFromTrials(trials)),
+ num_steady_state_frames_(0),
+ config_changed_(true) {
+ codec_ = {};
+ memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
+}
+
+LibvpxVp9Encoder::~LibvpxVp9Encoder() {
+ Release();
+}
+
+void LibvpxVp9Encoder::SetFecControllerOverride(FecControllerOverride*) {
+ // Ignored.
+}
+
+int LibvpxVp9Encoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ if (encoder_ != nullptr) {
+ if (inited_) {
+ if (libvpx_->codec_destroy(encoder_)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ delete encoder_;
+ encoder_ = nullptr;
+ }
+ if (config_ != nullptr) {
+ delete config_;
+ config_ = nullptr;
+ }
+ if (raw_ != nullptr) {
+ libvpx_->img_free(raw_);
+ raw_ = nullptr;
+ }
+ inited_ = false;
+ return ret_val;
+}
+
+bool LibvpxVp9Encoder::ExplicitlyConfiguredSpatialLayers() const {
+ // We check target_bitrate_bps of the 0th layer to see if the spatial layers
+ // (i.e. bitrates) were explicitly configured.
+ return codec_.spatialLayers[0].targetBitrate > 0;
+}
+
+bool LibvpxVp9Encoder::SetSvcRates(
+ const VideoBitrateAllocation& bitrate_allocation) {
+ std::pair<size_t, size_t> current_layers =
+ GetActiveLayers(current_bitrate_allocation_);
+ std::pair<size_t, size_t> new_layers = GetActiveLayers(bitrate_allocation);
+
+ const bool layer_activation_requires_key_frame =
+ inter_layer_pred_ == InterLayerPredMode::kOff ||
+ inter_layer_pred_ == InterLayerPredMode::kOnKeyPic;
+ const bool lower_layers_enabled = new_layers.first < current_layers.first;
+ const bool higher_layers_enabled = new_layers.second > current_layers.second;
+ const bool disabled_layers = new_layers.first > current_layers.first ||
+ new_layers.second < current_layers.second;
+
+ if (lower_layers_enabled ||
+ (higher_layers_enabled && layer_activation_requires_key_frame) ||
+ (disabled_layers && layer_deactivation_requires_key_frame_)) {
+ force_key_frame_ = true;
+ }
+
+ if (current_layers != new_layers) {
+ ss_info_needed_ = true;
+ }
+
+ config_->rc_target_bitrate = bitrate_allocation.get_sum_kbps();
+
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
+ const bool was_layer_active = (config_->ss_target_bitrate[sl_idx] > 0);
+ config_->ss_target_bitrate[sl_idx] =
+ bitrate_allocation.GetSpatialLayerSum(sl_idx) / 1000;
+
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers_; ++tl_idx) {
+ config_->layer_target_bitrate[sl_idx * num_temporal_layers_ + tl_idx] =
+ bitrate_allocation.GetTemporalLayerSum(sl_idx, tl_idx) / 1000;
+ }
+
+ if (!was_layer_active) {
+ // Reset frame rate controller if layer is resumed after pause.
+ framerate_controller_[sl_idx].Reset();
+ }
+
+ framerate_controller_[sl_idx].SetTargetRate(
+ codec_.spatialLayers[sl_idx].maxFramerate);
+ }
+ } else {
+ float rate_ratio[VPX_MAX_LAYERS] = {0};
+ float total = 0;
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ if (svc_params_.scaling_factor_num[i] <= 0 ||
+ svc_params_.scaling_factor_den[i] <= 0) {
+ RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
+ return false;
+ }
+ rate_ratio[i] = static_cast<float>(svc_params_.scaling_factor_num[i]) /
+ svc_params_.scaling_factor_den[i];
+ total += rate_ratio[i];
+ }
+
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ RTC_CHECK_GT(total, 0);
+ config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+ config_->rc_target_bitrate * rate_ratio[i] / total);
+ if (num_temporal_layers_ == 1) {
+ config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 2) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] * 2 / 3;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 3) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] / 2;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->layer_target_bitrate[i * num_temporal_layers_] +
+ (config_->ss_target_bitrate[i] / 4);
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+ config_->ss_target_bitrate[i];
+ } else {
+ RTC_LOG(LS_ERROR) << "Unsupported number of temporal layers: "
+ << num_temporal_layers_;
+ return false;
+ }
+
+ framerate_controller_[i].SetTargetRate(codec_.maxFramerate);
+ }
+ }
+
+ num_active_spatial_layers_ = 0;
+ first_active_layer_ = 0;
+ bool seen_active_layer = false;
+ bool expect_no_more_active_layers = false;
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ if (config_->ss_target_bitrate[i] > 0) {
+ RTC_DCHECK(!expect_no_more_active_layers) << "Only middle layer is "
+ "deactivated.";
+ if (!seen_active_layer) {
+ first_active_layer_ = i;
+ }
+ num_active_spatial_layers_ = i + 1;
+ seen_active_layer = true;
+ } else {
+ expect_no_more_active_layers = seen_active_layer;
+ }
+ }
+
+ if (seen_active_layer && performance_flags_.use_per_layer_speed) {
+ bool denoiser_on =
+ AllowDenoising() && codec_.VP9()->denoisingOn &&
+ performance_flags_by_spatial_index_[num_active_spatial_layers_ - 1]
+ .allow_denoising;
+ libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
+ denoiser_on ? 1 : 0);
+ }
+
+ if (higher_layers_enabled && !force_key_frame_) {
+ // Prohibit drop of all layers for the next frame, so newly enabled
+ // layer would have a valid spatial reference.
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = 0;
+ }
+ force_all_active_layers_ = true;
+ }
+
+ if (svc_controller_) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Bitrates in `layer_target_bitrate` are accumulated for each temporal
+ // layer but in `VideoBitrateAllocation` they should be separated.
+ int previous_bitrate_kbps = 0;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ int accumulated_bitrate_kbps =
+ config_->layer_target_bitrate[sid * num_temporal_layers_ + tid];
+ int single_layer_bitrate_kbps =
+ accumulated_bitrate_kbps - previous_bitrate_kbps;
+ RTC_DCHECK_GE(single_layer_bitrate_kbps, 0);
+ current_bitrate_allocation_.SetBitrate(
+ sid, tid, single_layer_bitrate_kbps * 1'000);
+ previous_bitrate_kbps = accumulated_bitrate_kbps;
+ }
+ }
+ svc_controller_->OnRatesUpdated(current_bitrate_allocation_);
+ } else {
+ current_bitrate_allocation_ = bitrate_allocation;
+ }
+ config_changed_ = true;
+ return true;
+}
+
+void LibvpxVp9Encoder::DisableSpatialLayer(int sid) {
+ RTC_DCHECK_LT(sid, num_spatial_layers_);
+ if (config_->ss_target_bitrate[sid] == 0) {
+ return;
+ }
+ config_->ss_target_bitrate[sid] = 0;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ config_->layer_target_bitrate[sid * num_temporal_layers_ + tid] = 0;
+ }
+ config_changed_ = true;
+}
+
+void LibvpxVp9Encoder::EnableSpatialLayer(int sid) {
+ RTC_DCHECK_LT(sid, num_spatial_layers_);
+ if (config_->ss_target_bitrate[sid] > 0) {
+ return;
+ }
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ config_->layer_target_bitrate[sid * num_temporal_layers_ + tid] =
+ current_bitrate_allocation_.GetTemporalLayerSum(sid, tid) / 1000;
+ }
+ config_->ss_target_bitrate[sid] =
+ current_bitrate_allocation_.GetSpatialLayerSum(sid) / 1000;
+ RTC_DCHECK_GT(config_->ss_target_bitrate[sid], 0);
+ config_changed_ = true;
+}
+
+void LibvpxVp9Encoder::SetActiveSpatialLayers() {
+ // Svc controller may decide to skip a frame at certain spatial layer even
+ // when bitrate for it is non-zero, however libvpx uses configured bitrate as
+ // a signal which layers should be produced.
+ RTC_DCHECK(svc_controller_);
+ RTC_DCHECK(!layer_frames_.empty());
+ RTC_DCHECK(absl::c_is_sorted(
+ layer_frames_, [](const ScalableVideoController::LayerFrameConfig& lhs,
+ const ScalableVideoController::LayerFrameConfig& rhs) {
+ return lhs.SpatialId() < rhs.SpatialId();
+ }));
+
+ auto frame_it = layer_frames_.begin();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (frame_it != layer_frames_.end() && frame_it->SpatialId() == sid) {
+ EnableSpatialLayer(sid);
+ ++frame_it;
+ } else {
+ DisableSpatialLayer(sid);
+ }
+ }
+}
+
+void LibvpxVp9Encoder::SetRates(const RateControlParameters& parameters) {
+ if (!inited_) {
+ RTC_LOG(LS_WARNING) << "SetRates() called while uninitialized.";
+ return;
+ }
+ if (encoder_->err) {
+ RTC_LOG(LS_WARNING) << "Encoder in error state: " << encoder_->err;
+ return;
+ }
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Unsupported framerate: "
+ << parameters.framerate_fps;
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+
+ bool res = SetSvcRates(parameters.bitrate);
+ RTC_DCHECK(res) << "Failed to set new bitrate allocation";
+ config_changed_ = true;
+}
+
+// TODO(eladalon): s/inst/codec_settings/g.
+int LibvpxVp9Encoder::InitEncode(const VideoCodec* inst,
+ const Settings& settings) {
+ if (inst == nullptr) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // Allow zero to represent an unspecified maxBitRate
+ if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->width < 1 || inst->height < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->VP9().numberOfTemporalLayers > 3) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // libvpx probably does not support more than 3 spatial layers.
+ if (inst->VP9().numberOfSpatialLayers > 3) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ absl::optional<vpx_img_fmt_t> previous_img_fmt =
+ raw_ ? absl::make_optional<vpx_img_fmt_t>(raw_->fmt) : absl::nullopt;
+
+ int ret_val = Release();
+ if (ret_val < 0) {
+ return ret_val;
+ }
+ if (encoder_ == nullptr) {
+ encoder_ = new vpx_codec_ctx_t;
+ memset(encoder_, 0, sizeof(*encoder_));
+ }
+ if (config_ == nullptr) {
+ config_ = new vpx_codec_enc_cfg_t;
+ memset(config_, 0, sizeof(*config_));
+ }
+ timestamp_ = 0;
+ if (&codec_ != inst) {
+ codec_ = *inst;
+ }
+ memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
+
+ force_key_frame_ = true;
+ pics_since_key_ = 0;
+ num_cores_ = settings.number_of_cores;
+
+ absl::optional<ScalabilityMode> scalability_mode = inst->GetScalabilityMode();
+ if (scalability_mode.has_value()) {
+ // Use settings from `ScalabilityMode` identifier.
+ RTC_LOG(LS_INFO) << "Create scalability structure "
+ << ScalabilityModeToString(*scalability_mode);
+ svc_controller_ = CreateScalabilityStructure(*scalability_mode);
+ if (!svc_controller_) {
+ RTC_LOG(LS_WARNING) << "Failed to create scalability structure.";
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ ScalableVideoController::StreamLayersConfig info =
+ svc_controller_->StreamConfig();
+ num_spatial_layers_ = info.num_spatial_layers;
+ num_temporal_layers_ = info.num_temporal_layers;
+ inter_layer_pred_ = ScalabilityModeToInterLayerPredMode(*scalability_mode);
+ } else {
+ num_spatial_layers_ = inst->VP9().numberOfSpatialLayers;
+ RTC_DCHECK_GT(num_spatial_layers_, 0);
+ num_temporal_layers_ = inst->VP9().numberOfTemporalLayers;
+ if (num_temporal_layers_ == 0) {
+ num_temporal_layers_ = 1;
+ }
+ inter_layer_pred_ = inst->VP9().interLayerPred;
+ svc_controller_ = CreateVp9ScalabilityStructure(*inst);
+ }
+
+ framerate_controller_ = std::vector<FramerateControllerDeprecated>(
+ num_spatial_layers_, FramerateControllerDeprecated(codec_.maxFramerate));
+
+ is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
+
+ // Populate encoder configuration with default values.
+ if (libvpx_->codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ vpx_img_fmt img_fmt = VPX_IMG_FMT_NONE;
+ unsigned int bits_for_storage = 8;
+ switch (profile_) {
+ case VP9Profile::kProfile0:
+ img_fmt = previous_img_fmt.value_or(VPX_IMG_FMT_I420);
+ bits_for_storage = 8;
+ config_->g_bit_depth = VPX_BITS_8;
+ config_->g_profile = 0;
+ config_->g_input_bit_depth = 8;
+ break;
+ case VP9Profile::kProfile1:
+ // Encoding of profile 1 is not implemented. It would require extended
+ // support for I444, I422, and I440 buffers.
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case VP9Profile::kProfile2:
+ img_fmt = VPX_IMG_FMT_I42016;
+ bits_for_storage = 16;
+ config_->g_bit_depth = VPX_BITS_10;
+ config_->g_profile = 2;
+ config_->g_input_bit_depth = 10;
+ break;
+ case VP9Profile::kProfile3:
+ // Encoding of profile 3 is not implemented.
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ // Creating a wrapper to the image - setting image data to nullptr. Actual
+ // pointer will be set in encode. Setting align to 1, as it is meaningless
+ // (actual memory is not allocated).
+ raw_ = libvpx_->img_wrap(nullptr, img_fmt, codec_.width, codec_.height, 1,
+ nullptr);
+ raw_->bit_depth = bits_for_storage;
+
+ config_->g_w = codec_.width;
+ config_->g_h = codec_.height;
+ config_->rc_target_bitrate = inst->startBitrate; // in kbit/s
+ config_->g_error_resilient = is_svc_ ? VPX_ERROR_RESILIENT_DEFAULT : 0;
+ // Setting the time base of the codec.
+ config_->g_timebase.num = 1;
+ config_->g_timebase.den = 90000;
+ config_->g_lag_in_frames = 0; // 0- no frame lagging
+ config_->g_threads = 1;
+ // Rate control settings.
+ config_->rc_dropframe_thresh = inst->GetFrameDropEnabled() ? 30 : 0;
+ config_->rc_end_usage = VPX_CBR;
+ config_->g_pass = VPX_RC_ONE_PASS;
+ config_->rc_min_quantizer =
+ codec_.mode == VideoCodecMode::kScreensharing ? 8 : 2;
+ config_->rc_max_quantizer = 52;
+ config_->rc_undershoot_pct = 50;
+ config_->rc_overshoot_pct = 50;
+ config_->rc_buf_initial_sz = 500;
+ config_->rc_buf_optimal_sz = 600;
+ config_->rc_buf_sz = 1000;
+ // Set the maximum target size of any key-frame.
+ rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
+ // Key-frame interval is enforced manually by this wrapper.
+ config_->kf_mode = VPX_KF_DISABLED;
+ // TODO(webm:1592): work-around for libvpx issue, as it can still
+ // put some key-frames at will even in VPX_KF_DISABLED kf_mode.
+ config_->kf_max_dist = inst->VP9().keyFrameInterval;
+ config_->kf_min_dist = config_->kf_max_dist;
+ if (quality_scaler_experiment_.enabled) {
+ // In that experiment webrtc wide quality scaler is used instead of libvpx
+ // internal scaler.
+ config_->rc_resize_allowed = 0;
+ } else {
+ config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0;
+ }
+ // Determine number of threads based on the image size and #cores.
+ config_->g_threads =
+ NumberOfThreads(config_->g_w, config_->g_h, settings.number_of_cores);
+
+ is_flexible_mode_ = inst->VP9().flexibleMode;
+
+ if (num_spatial_layers_ > 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing && !is_flexible_mode_) {
+ RTC_LOG(LS_ERROR) << "Flexible mode is required for screenshare with "
+ "several spatial layers";
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // External reference control is required for different frame rate on spatial
+ // layers because libvpx generates rtp incompatible references in this case.
+ external_ref_control_ = external_ref_ctrl_ ||
+ (num_spatial_layers_ > 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing) ||
+ inter_layer_pred_ == InterLayerPredMode::kOn;
+
+ if (num_temporal_layers_ == 1) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode1);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
+ config_->ts_number_layers = 1;
+ config_->ts_rate_decimator[0] = 1;
+ config_->ts_periodicity = 1;
+ config_->ts_layer_id[0] = 0;
+ } else if (num_temporal_layers_ == 2) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode2);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101;
+ config_->ts_number_layers = 2;
+ config_->ts_rate_decimator[0] = 2;
+ config_->ts_rate_decimator[1] = 1;
+ config_->ts_periodicity = 2;
+ config_->ts_layer_id[0] = 0;
+ config_->ts_layer_id[1] = 1;
+ } else if (num_temporal_layers_ == 3) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode3);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0212;
+ config_->ts_number_layers = 3;
+ config_->ts_rate_decimator[0] = 4;
+ config_->ts_rate_decimator[1] = 2;
+ config_->ts_rate_decimator[2] = 1;
+ config_->ts_periodicity = 4;
+ config_->ts_layer_id[0] = 0;
+ config_->ts_layer_id[1] = 2;
+ config_->ts_layer_id[2] = 1;
+ config_->ts_layer_id[3] = 2;
+ } else {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (external_ref_control_) {
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+ if (num_temporal_layers_ > 1 && num_spatial_layers_ > 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing) {
+ // External reference control for several temporal layers with different
+ // frame rates on spatial layers is not implemented yet.
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ }
+ ref_buf_ = {};
+
+ return InitAndSetControlSettings(inst);
+}
+
+int LibvpxVp9Encoder::NumberOfThreads(int width,
+ int height,
+ int number_of_cores) {
+ // Keep the number of encoder threads equal to the possible number of column
+ // tiles, which is (1, 2, 4, 8). See comments below for VP9E_SET_TILE_COLUMNS.
+ if (width * height >= 1280 * 720 && number_of_cores > 4) {
+ return 4;
+ } else if (width * height >= 640 * 360 && number_of_cores > 2) {
+ return 2;
+ } else {
+// Use 2 threads for low res on ARM.
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID)
+ if (width * height >= 320 * 180 && number_of_cores > 2) {
+ return 2;
+ }
+#endif
+ // 1 thread less than VGA.
+ return 1;
+ }
+}
+
+int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) {
+ // Set QP-min/max per spatial and temporal layer.
+ int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
+ for (int i = 0; i < tot_num_layers; ++i) {
+ svc_params_.max_quantizers[i] = config_->rc_max_quantizer;
+ svc_params_.min_quantizers[i] = config_->rc_min_quantizer;
+ }
+ config_->ss_number_layers = num_spatial_layers_;
+ if (svc_controller_) {
+ auto stream_config = svc_controller_->StreamConfig();
+ for (int i = 0; i < stream_config.num_spatial_layers; ++i) {
+ svc_params_.scaling_factor_num[i] = stream_config.scaling_factor_num[i];
+ svc_params_.scaling_factor_den[i] = stream_config.scaling_factor_den[i];
+ }
+ } else if (ExplicitlyConfiguredSpatialLayers()) {
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ const auto& layer = codec_.spatialLayers[i];
+ RTC_CHECK_GT(layer.width, 0);
+ const int scale_factor = codec_.width / layer.width;
+ RTC_DCHECK_GT(scale_factor, 0);
+
+ // Ensure scaler factor is integer.
+ if (scale_factor * layer.width != codec_.width) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Ensure scale factor is the same in both dimensions.
+ if (scale_factor * layer.height != codec_.height) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Ensure scale factor is power of two.
+ const bool is_pow_of_two = (scale_factor & (scale_factor - 1)) == 0;
+ if (!is_pow_of_two) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ svc_params_.scaling_factor_num[i] = 1;
+ svc_params_.scaling_factor_den[i] = scale_factor;
+
+ RTC_DCHECK_GT(codec_.spatialLayers[i].maxFramerate, 0);
+ RTC_DCHECK_LE(codec_.spatialLayers[i].maxFramerate, codec_.maxFramerate);
+ if (i > 0) {
+ // Frame rate of high spatial layer is supposed to be equal or higher
+ // than frame rate of low spatial layer.
+ RTC_DCHECK_GE(codec_.spatialLayers[i].maxFramerate,
+ codec_.spatialLayers[i - 1].maxFramerate);
+ }
+ }
+ } else {
+ int scaling_factor_num = 256;
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // 1:2 scaling in each dimension.
+ svc_params_.scaling_factor_num[i] = scaling_factor_num;
+ svc_params_.scaling_factor_den[i] = 256;
+ }
+ }
+
+ UpdatePerformanceFlags();
+ RTC_DCHECK_EQ(performance_flags_by_spatial_index_.size(),
+ static_cast<size_t>(num_spatial_layers_));
+
+ SvcRateAllocator init_allocator(codec_);
+ current_bitrate_allocation_ =
+ init_allocator.Allocate(VideoBitrateAllocationParameters(
+ inst->startBitrate * 1000, inst->maxFramerate));
+ if (!SetSvcRates(current_bitrate_allocation_)) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ const vpx_codec_err_t rv = libvpx_->codec_enc_init(
+ encoder_, vpx_codec_vp9_cx(), config_,
+ config_->g_bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH);
+ if (rv != VPX_CODEC_OK) {
+ RTC_LOG(LS_ERROR) << "Init error: " << libvpx_->codec_err_to_string(rv);
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ if (performance_flags_.use_per_layer_speed) {
+ for (int si = 0; si < num_spatial_layers_; ++si) {
+ svc_params_.speed_per_layer[si] =
+ performance_flags_by_spatial_index_[si].base_layer_speed;
+ svc_params_.loopfilter_ctrl[si] =
+ performance_flags_by_spatial_index_[si].deblock_mode;
+ }
+ bool denoiser_on =
+ AllowDenoising() && inst->VP9().denoisingOn &&
+ performance_flags_by_spatial_index_[num_spatial_layers_ - 1]
+ .allow_denoising;
+ libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
+ denoiser_on ? 1 : 0);
+ }
+
+ libvpx_->codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target_);
+ libvpx_->codec_control(encoder_, VP9E_SET_AQ_MODE,
+ inst->VP9().adaptiveQpMode ? 3 : 0);
+
+ libvpx_->codec_control(encoder_, VP9E_SET_FRAME_PARALLEL_DECODING, 0);
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_GF_TEMPORAL_REF, 0);
+
+ if (is_svc_) {
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC, 1);
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ }
+ if (!is_svc_ || !performance_flags_.use_per_layer_speed) {
+ libvpx_->codec_control(
+ encoder_, VP8E_SET_CPUUSED,
+ performance_flags_by_spatial_index_.rbegin()->base_layer_speed);
+ }
+
+ if (num_spatial_layers_ > 1) {
+ switch (inter_layer_pred_) {
+ case InterLayerPredMode::kOn:
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 0);
+ break;
+ case InterLayerPredMode::kOff:
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 1);
+ break;
+ case InterLayerPredMode::kOnKeyPic:
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 2);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ memset(&svc_drop_frame_, 0, sizeof(svc_drop_frame_));
+ const bool reverse_constrained_drop_mode =
+ inter_layer_pred_ == InterLayerPredMode::kOn &&
+ codec_.mode == VideoCodecMode::kScreensharing &&
+ num_spatial_layers_ > 1;
+ if (reverse_constrained_drop_mode) {
+ // Screenshare dropping mode: drop a layer only together with all lower
+ // layers. This ensures that drops on lower layers won't reduce frame-rate
+ // for higher layers and reference structure is RTP-compatible.
+ svc_drop_frame_.framedrop_mode = CONSTRAINED_FROM_ABOVE_DROP;
+ svc_drop_frame_.max_consec_drop = 5;
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
+ }
+ // No buffering is needed because the highest layer is always present in
+ // all frames in CONSTRAINED_FROM_ABOVE drop mode.
+ layer_buffering_ = false;
+ } else {
+ // Configure encoder to drop entire superframe whenever it needs to drop
+ // a layer. This mode is preferred over per-layer dropping which causes
+ // quality flickering and is not compatible with RTP non-flexible mode.
+ svc_drop_frame_.framedrop_mode =
+ full_superframe_drop_ ? FULL_SUPERFRAME_DROP : CONSTRAINED_LAYER_DROP;
+ // Buffering is needed only for constrained layer drop, as it's not clear
+ // which frame is the last.
+ layer_buffering_ = !full_superframe_drop_;
+ svc_drop_frame_.max_consec_drop = std::numeric_limits<int>::max();
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
+ }
+ }
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
+ &svc_drop_frame_);
+ }
+
+ // Register callback for getting each spatial layer.
+ vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
+ LibvpxVp9Encoder::EncoderOutputCodedPacketCallback,
+ reinterpret_cast<void*>(this)};
+ libvpx_->codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
+ reinterpret_cast<void*>(&cbp));
+
+ // Control function to set the number of column tiles in encoding a frame, in
+ // log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
+ // The number tile columns will be capped by the encoder based on image size
+ // (minimum width of tile column is 256 pixels, maximum is 4096).
+ libvpx_->codec_control(encoder_, VP9E_SET_TILE_COLUMNS,
+ static_cast<int>((config_->g_threads >> 1)));
+
+ // Turn on row-based multithreading.
+ libvpx_->codec_control(encoder_, VP9E_SET_ROW_MT, 1);
+
+ if (AllowDenoising() && !performance_flags_.use_per_layer_speed) {
+ libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
+ inst->VP9().denoisingOn ? 1 : 0);
+ }
+
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ // Adjust internal parameters to screen content.
+ libvpx_->codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1);
+ }
+ // Enable encoder skip of static/low content blocks.
+ libvpx_->codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+ inited_ = true;
+ config_changed_ = true;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+uint32_t LibvpxVp9Encoder::MaxIntraTarget(uint32_t optimal_buffer_size) {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scale_par.
+ // Max target size = scale_par * optimal_buffer_size * targetBR[Kbps].
+ // This value is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / framerate.
+ // The target in % is as follows:
+ float scale_par = 0.5;
+ uint32_t target_pct =
+ optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
+ // Don't go below 3 times the per frame bandwidth.
+ const uint32_t min_intra_size = 300;
+ return (target_pct < min_intra_size) ? min_intra_size : target_pct;
+}
+
+int LibvpxVp9Encoder::Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (encoded_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (num_active_spatial_layers_ == 0) {
+ // All spatial layers are disabled, return without encoding anything.
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ // We only support one stream at the moment.
+ if (frame_types && !frame_types->empty()) {
+ if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
+ force_key_frame_ = true;
+ }
+ }
+
+ if (pics_since_key_ + 1 ==
+ static_cast<size_t>(codec_.VP9()->keyFrameInterval)) {
+ force_key_frame_ = true;
+ }
+
+ if (svc_controller_) {
+ layer_frames_ = svc_controller_->NextFrameConfig(force_key_frame_);
+ if (layer_frames_.empty()) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ if (layer_frames_.front().IsKeyframe()) {
+ force_key_frame_ = true;
+ }
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ if (!force_key_frame_) {
+ const size_t gof_idx = (pics_since_key_ + 1) % gof_.num_frames_in_gof;
+ layer_id.temporal_layer_id = gof_.temporal_idx[gof_idx];
+
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ const uint32_t frame_timestamp_ms =
+ 1000 * input_image.timestamp() / kVideoPayloadTypeFrequency;
+
+ // To ensure that several rate-limiters with different limits don't
+ // interfere, they must be queried in order of increasing limit.
+
+ bool use_steady_state_limiter =
+ variable_framerate_experiment_.enabled &&
+ input_image.update_rect().IsEmpty() &&
+ num_steady_state_frames_ >=
+ variable_framerate_experiment_.frames_before_steady_state;
+
+ // Need to check all frame limiters, even if lower layers are disabled,
+ // because variable frame-rate limiter should be checked after the first
+ // layer. It's easier to overwrite active layers after, then check all
+ // cases.
+ for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
+ const float layer_fps =
+ framerate_controller_[layer_id.spatial_layer_id].GetTargetRate();
+ // Use steady state rate-limiter at the correct place.
+ if (use_steady_state_limiter &&
+ layer_fps > variable_framerate_experiment_.framerate_limit - 1e-9) {
+ if (variable_framerate_controller_.DropFrame(frame_timestamp_ms)) {
+ layer_id.spatial_layer_id = num_active_spatial_layers_;
+ }
+ // Break always: if rate limiter triggered frame drop, no need to
+ // continue; otherwise, the rate is less than the next limiters.
+ break;
+ }
+ if (framerate_controller_[sl_idx].DropFrame(frame_timestamp_ms)) {
+ ++layer_id.spatial_layer_id;
+ } else {
+ break;
+ }
+ }
+
+ if (use_steady_state_limiter &&
+ layer_id.spatial_layer_id < num_active_spatial_layers_) {
+ variable_framerate_controller_.AddFrame(frame_timestamp_ms);
+ }
+ }
+
+ if (force_all_active_layers_) {
+ layer_id.spatial_layer_id = first_active_layer_;
+ force_all_active_layers_ = false;
+ }
+
+ RTC_DCHECK_LE(layer_id.spatial_layer_id, num_active_spatial_layers_);
+ if (layer_id.spatial_layer_id >= num_active_spatial_layers_) {
+ // Drop entire picture.
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ }
+
+ // Need to set temporal layer id on ALL layers, even disabled ones.
+ // Otherwise libvpx might produce frames on a disabled layer:
+ // http://crbug.com/1051476
+ for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
+ layer_id.temporal_layer_id_per_spatial[sl_idx] = layer_id.temporal_layer_id;
+ }
+
+ if (layer_id.spatial_layer_id < first_active_layer_) {
+ layer_id.spatial_layer_id = first_active_layer_;
+ }
+
+ if (svc_controller_) {
+ layer_id.spatial_layer_id = layer_frames_.front().SpatialId();
+ layer_id.temporal_layer_id = layer_frames_.front().TemporalId();
+ for (const auto& layer : layer_frames_) {
+ layer_id.temporal_layer_id_per_spatial[layer.SpatialId()] =
+ layer.TemporalId();
+ }
+ SetActiveSpatialLayers();
+ }
+
+ if (is_svc_ && performance_flags_.use_per_layer_speed) {
+ // Update speed settings that might depend on temporal index.
+ bool speed_updated = false;
+ for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
+ const int target_speed =
+ layer_id.temporal_layer_id_per_spatial[sl_idx] == 0
+ ? performance_flags_by_spatial_index_[sl_idx].base_layer_speed
+ : performance_flags_by_spatial_index_[sl_idx].high_layer_speed;
+ if (svc_params_.speed_per_layer[sl_idx] != target_speed) {
+ svc_params_.speed_per_layer[sl_idx] = target_speed;
+ speed_updated = true;
+ }
+ }
+ if (speed_updated) {
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ }
+ }
+
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
+
+ if (num_spatial_layers_ > 1) {
+ // Update frame dropping settings as they may change on per-frame basis.
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
+ &svc_drop_frame_);
+ }
+
+ if (config_changed_) {
+ if (libvpx_->codec_enc_config_set(encoder_, config_)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (!performance_flags_.use_per_layer_speed) {
+ // Not setting individual speeds per layer, find the highest active
+ // resolution instead and base the speed on that.
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ if (config_->ss_target_bitrate[i] > 0) {
+ int width = (svc_params_.scaling_factor_num[i] * config_->g_w) /
+ svc_params_.scaling_factor_den[i];
+ int height = (svc_params_.scaling_factor_num[i] * config_->g_h) /
+ svc_params_.scaling_factor_den[i];
+ int speed =
+ std::prev(performance_flags_.settings_by_resolution.lower_bound(
+ width * height))
+ ->second.base_layer_speed;
+ libvpx_->codec_control(encoder_, VP8E_SET_CPUUSED, speed);
+ break;
+ }
+ }
+ }
+ config_changed_ = false;
+ }
+
+ if (input_image.width() != codec_.width ||
+ input_image.height() != codec_.height) {
+ int ret = UpdateCodecFrameSize(input_image);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ RTC_DCHECK_EQ(input_image.width(), raw_->d_w);
+ RTC_DCHECK_EQ(input_image.height(), raw_->d_h);
+
+ // Set input image for use in the callback.
+ // This was necessary since you need some information from input_image.
+ // You can save only the necessary information (such as timestamp) instead of
+ // doing this.
+ input_image_ = &input_image;
+
+ // In case we need to map the buffer, `mapped_buffer` is used to keep it alive
+ // through reference counting until after encoding has finished.
+ rtc::scoped_refptr<const VideoFrameBuffer> mapped_buffer;
+ const I010BufferInterface* i010_buffer;
+ rtc::scoped_refptr<const I010BufferInterface> i010_copy;
+ switch (profile_) {
+ case VP9Profile::kProfile0: {
+ mapped_buffer =
+ PrepareBufferForProfile0(input_image.video_frame_buffer());
+ if (!mapped_buffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ break;
+ }
+ case VP9Profile::kProfile1: {
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ case VP9Profile::kProfile2: {
+ // We can inject kI010 frames directly for encode. All other formats
+ // should be converted to it.
+ switch (input_image.video_frame_buffer()->type()) {
+ case VideoFrameBuffer::Type::kI010: {
+ i010_buffer = input_image.video_frame_buffer()->GetI010();
+ break;
+ }
+ default: {
+ auto i420_buffer = input_image.video_frame_buffer()->ToI420();
+ if (!i420_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(
+ input_image.video_frame_buffer()->type())
+ << " image to I420. Can't encode frame.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ i010_copy = I010Buffer::Copy(*i420_buffer);
+ i010_buffer = i010_copy.get();
+ }
+ }
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(
+ reinterpret_cast<const uint8_t*>(i010_buffer->DataY()));
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(
+ reinterpret_cast<const uint8_t*>(i010_buffer->DataU()));
+ raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(
+ reinterpret_cast<const uint8_t*>(i010_buffer->DataV()));
+ raw_->stride[VPX_PLANE_Y] = i010_buffer->StrideY() * 2;
+ raw_->stride[VPX_PLANE_U] = i010_buffer->StrideU() * 2;
+ raw_->stride[VPX_PLANE_V] = i010_buffer->StrideV() * 2;
+ break;
+ }
+ case VP9Profile::kProfile3: {
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ }
+
+ vpx_enc_frame_flags_t flags = 0;
+ if (force_key_frame_) {
+ flags = VPX_EFLAG_FORCE_KF;
+ }
+
+ if (svc_controller_) {
+ vpx_svc_ref_frame_config_t ref_config = Vp9References(layer_frames_);
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG,
+ &ref_config);
+ } else if (external_ref_control_) {
+ vpx_svc_ref_frame_config_t ref_config =
+ SetReferences(force_key_frame_, layer_id.spatial_layer_id);
+
+ if (VideoCodecMode::kScreensharing == codec_.mode) {
+ for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
+ ref_config.duration[sl_idx] = static_cast<int64_t>(
+ 90000 / (std::min(static_cast<float>(codec_.maxFramerate),
+ framerate_controller_[sl_idx].GetTargetRate())));
+ }
+ }
+
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG,
+ &ref_config);
+ }
+
+ first_frame_in_picture_ = true;
+
+ // TODO(ssilkin): Frame duration should be specified per spatial layer
+ // since their frame rate can be different. For now calculate frame duration
+ // based on target frame rate of the highest spatial layer, which frame rate
+ // is supposed to be equal or higher than frame rate of low spatial layers.
+ // Also, timestamp should represent actual time passed since previous frame
+ // (not 'expected' time). Then rate controller can drain buffer more
+ // accurately.
+ RTC_DCHECK_GE(framerate_controller_.size(), num_active_spatial_layers_);
+ float target_framerate_fps =
+ (codec_.mode == VideoCodecMode::kScreensharing)
+ ? std::min(static_cast<float>(codec_.maxFramerate),
+ framerate_controller_[num_active_spatial_layers_ - 1]
+ .GetTargetRate())
+ : codec_.maxFramerate;
+ uint32_t duration = static_cast<uint32_t>(90000 / target_framerate_fps);
+ const vpx_codec_err_t rv = libvpx_->codec_encode(
+ encoder_, raw_, timestamp_, duration, flags, VPX_DL_REALTIME);
+ if (rv != VPX_CODEC_OK) {
+ RTC_LOG(LS_ERROR) << "Encoding error: " << libvpx_->codec_err_to_string(rv)
+ << "\n"
+ "Details: "
+ << libvpx_->codec_error(encoder_) << "\n"
+ << libvpx_->codec_error_detail(encoder_);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ timestamp_ += duration;
+
+ if (layer_buffering_) {
+ const bool end_of_picture = true;
+ DeliverBufferedFrame(end_of_picture);
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Encoder::UpdateCodecFrameSize(
+ const VideoFrame& input_image) {
+ RTC_LOG(LS_INFO) << "Reconfiging VP from " <<
+ codec_.width << "x" << codec_.height << " to " <<
+ input_image.width() << "x" << input_image.height();
+ // Preserve latest bitrate/framerate setting
+ // TODO: Mozilla - see below, we need to save more state here.
+ //uint32_t old_bitrate_kbit = config_->rc_target_bitrate;
+ //uint32_t old_framerate = codec_.maxFramerate;
+
+ codec_.width = input_image.width();
+ codec_.height = input_image.height();
+
+ vpx_img_free(raw_);
+ raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
+ 1, NULL);
+ // Update encoder context for new frame size.
+ config_->g_w = codec_.width;
+ config_->g_h = codec_.height;
+
+ // Determine number of threads based on the image size and #cores.
+ config_->g_threads = NumberOfThreads(codec_.width, codec_.height,
+ num_cores_);
+
+ // NOTE: We would like to do this the same way vp8 does it
+ // (with vpx_codec_enc_config_set()), but that causes asserts
+ // in AQ 3 (cyclic); and in AQ 0 it works, but on a resize to smaller
+ // than 1/2 x 1/2 original it asserts in convolve(). Given these
+ // bugs in trying to do it the "right" way, we basically re-do
+ // the initialization.
+ vpx_codec_destroy(encoder_); // clean up old state
+ int result = InitAndSetControlSettings(&codec_);
+ if (result == WEBRTC_VIDEO_CODEC_OK) {
+ // TODO: Mozilla rates have become much more complicated, we need to store
+ // more state or find another way of doing this.
+ //return SetRates(old_bitrate_kbit, old_framerate);
+ RTC_CHECK(false);
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ return result;
+}
+
+bool LibvpxVp9Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ absl::optional<int>* spatial_idx,
+ absl::optional<int>* temporal_idx,
+ const vpx_codec_cx_pkt& pkt) {
+ RTC_CHECK(codec_specific != nullptr);
+ codec_specific->codecType = kVideoCodecVP9;
+ CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
+
+ vp9_info->first_frame_in_picture = first_frame_in_picture_;
+ vp9_info->flexible_mode = is_flexible_mode_;
+
+ if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
+ pics_since_key_ = 0;
+ } else if (first_frame_in_picture_) {
+ ++pics_since_key_;
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ // Can't have keyframe with non-zero temporal layer.
+ RTC_DCHECK(pics_since_key_ != 0 || layer_id.temporal_layer_id == 0);
+
+ RTC_CHECK_GT(num_temporal_layers_, 0);
+ RTC_CHECK_GT(num_active_spatial_layers_, 0);
+ if (num_temporal_layers_ == 1) {
+ RTC_CHECK_EQ(layer_id.temporal_layer_id, 0);
+ vp9_info->temporal_idx = kNoTemporalIdx;
+ *temporal_idx = absl::nullopt;
+ } else {
+ vp9_info->temporal_idx = layer_id.temporal_layer_id;
+ *temporal_idx = layer_id.temporal_layer_id;
+ }
+ if (num_active_spatial_layers_ == 1) {
+ RTC_CHECK_EQ(layer_id.spatial_layer_id, 0);
+ *spatial_idx = absl::nullopt;
+ } else {
+ *spatial_idx = layer_id.spatial_layer_id;
+ }
+
+ const bool is_key_pic = (pics_since_key_ == 0);
+ const bool is_inter_layer_pred_allowed =
+ (inter_layer_pred_ == InterLayerPredMode::kOn ||
+ (inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic));
+
+ // Always set inter_layer_predicted to true on high layer frame if inter-layer
+ // prediction (ILP) is allowed even if encoder didn't actually use it.
+ // Setting inter_layer_predicted to false would allow receiver to decode high
+ // layer frame without decoding low layer frame. If that would happen (e.g.
+ // if low layer frame is lost) then receiver won't be able to decode next high
+ // layer frame which uses ILP.
+ vp9_info->inter_layer_predicted =
+ first_frame_in_picture_ ? false : is_inter_layer_pred_allowed;
+
+ // Mark all low spatial layer frames as references (not just frames of
+ // active low spatial layers) if inter-layer prediction is enabled since
+ // these frames are indirect references of high spatial layer, which can
+ // later be enabled without key frame.
+ vp9_info->non_ref_for_inter_layer_pred =
+ !is_inter_layer_pred_allowed ||
+ layer_id.spatial_layer_id + 1 == num_spatial_layers_;
+
+ // Always populate this, so that the packetizer can properly set the marker
+ // bit.
+ vp9_info->num_spatial_layers = num_active_spatial_layers_;
+ vp9_info->first_active_layer = first_active_layer_;
+
+ vp9_info->num_ref_pics = 0;
+ FillReferenceIndices(pkt, pics_since_key_, vp9_info->inter_layer_predicted,
+ vp9_info);
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof_idx = kNoGofIdx;
+ if (!svc_controller_) {
+ if (num_temporal_layers_ == 1) {
+ vp9_info->temporal_up_switch = true;
+ } else {
+ // In flexible mode with > 1 temporal layer but no SVC controller we
+ // can't techincally determine if a frame is an upswitch point, use
+ // gof-based data as proxy for now.
+ // TODO(sprang): Remove once SVC controller is the only choice.
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch =
+ gof_.temporal_up_switch[vp9_info->gof_idx];
+ }
+ }
+ } else {
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
+ RTC_DCHECK(vp9_info->num_ref_pics == gof_.num_ref_pics[vp9_info->gof_idx] ||
+ vp9_info->num_ref_pics == 0);
+ }
+
+ vp9_info->inter_pic_predicted = (!is_key_pic && vp9_info->num_ref_pics > 0);
+
+ // Write SS on key frame of independently coded spatial layers and on base
+ // temporal/spatial layer frame if number of layers changed without issuing
+ // of key picture (inter-layer prediction is enabled).
+ const bool is_key_frame = is_key_pic && !vp9_info->inter_layer_predicted;
+ if (is_key_frame || (ss_info_needed_ && layer_id.temporal_layer_id == 0 &&
+ layer_id.spatial_layer_id == first_active_layer_)) {
+ vp9_info->ss_data_available = true;
+ vp9_info->spatial_layer_resolution_present = true;
+ // Signal disabled layers.
+ for (size_t i = 0; i < first_active_layer_; ++i) {
+ vp9_info->width[i] = 0;
+ vp9_info->height[i] = 0;
+ }
+ for (size_t i = first_active_layer_; i < num_active_spatial_layers_; ++i) {
+ vp9_info->width[i] = codec_.width * svc_params_.scaling_factor_num[i] /
+ svc_params_.scaling_factor_den[i];
+ vp9_info->height[i] = codec_.height * svc_params_.scaling_factor_num[i] /
+ svc_params_.scaling_factor_den[i];
+ }
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof.num_frames_in_gof = 0;
+ } else {
+ vp9_info->gof.CopyGofInfoVP9(gof_);
+ }
+
+ ss_info_needed_ = false;
+ } else {
+ vp9_info->ss_data_available = false;
+ }
+
+ first_frame_in_picture_ = false;
+
+ // Populate codec-agnostic section in the codec specific structure.
+ if (svc_controller_) {
+ auto it = absl::c_find_if(
+ layer_frames_,
+ [&](const ScalableVideoController::LayerFrameConfig& config) {
+ return config.SpatialId() == layer_id.spatial_layer_id;
+ });
+ if (it == layer_frames_.end()) {
+ RTC_LOG(LS_ERROR) << "Encoder produced a frame for layer S"
+ << layer_id.spatial_layer_id << "T"
+ << layer_id.temporal_layer_id
+ << " that wasn't requested.";
+ return false;
+ }
+ codec_specific->generic_frame_info = svc_controller_->OnEncodeDone(*it);
+ if (is_key_frame) {
+ codec_specific->template_structure =
+ svc_controller_->DependencyStructure();
+ auto& resolutions = codec_specific->template_structure->resolutions;
+ resolutions.resize(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ resolutions[sid] = RenderResolution(
+ /*width=*/codec_.width * svc_params_.scaling_factor_num[sid] /
+ svc_params_.scaling_factor_den[sid],
+ /*height=*/codec_.height * svc_params_.scaling_factor_num[sid] /
+ svc_params_.scaling_factor_den[sid]);
+ }
+ }
+ if (is_flexible_mode_) {
+ // Populate data for legacy temporal-upswitch state.
+ // We can switch up to a higher temporal layer only if all temporal layers
+ // higher than this (within the current spatial layer) are switch points.
+ vp9_info->temporal_up_switch = true;
+ for (int i = layer_id.temporal_layer_id + 1; i < num_temporal_layers_;
+ ++i) {
+ // Assumes decode targets are always ordered first by spatial then by
+ // temporal id.
+ size_t dti_index =
+ (layer_id.spatial_layer_id * num_temporal_layers_) + i;
+ vp9_info->temporal_up_switch &=
+ (codec_specific->generic_frame_info
+ ->decode_target_indications[dti_index] ==
+ DecodeTargetIndication::kSwitch);
+ }
+ }
+ }
+ return true;
+}
+
+void LibvpxVp9Encoder::FillReferenceIndices(const vpx_codec_cx_pkt& pkt,
+ const size_t pic_num,
+ const bool inter_layer_predicted,
+ CodecSpecificInfoVP9* vp9_info) {
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ const bool is_key_frame =
+ (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
+
+ std::vector<RefFrameBuffer> ref_buf_list;
+
+ if (is_svc_) {
+ vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG,
+ &enc_layer_conf);
+ int ref_buf_flags = 0;
+
+ if (enc_layer_conf.reference_last[layer_id.spatial_layer_id]) {
+ const size_t fb_idx =
+ enc_layer_conf.lst_fb_idx[layer_id.spatial_layer_id];
+ RTC_DCHECK_LT(fb_idx, ref_buf_.size());
+ if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
+ ref_buf_[fb_idx]) == ref_buf_list.end()) {
+ ref_buf_list.push_back(ref_buf_[fb_idx]);
+ ref_buf_flags |= 1 << fb_idx;
+ }
+ }
+
+ if (enc_layer_conf.reference_alt_ref[layer_id.spatial_layer_id]) {
+ const size_t fb_idx =
+ enc_layer_conf.alt_fb_idx[layer_id.spatial_layer_id];
+ RTC_DCHECK_LT(fb_idx, ref_buf_.size());
+ if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
+ ref_buf_[fb_idx]) == ref_buf_list.end()) {
+ ref_buf_list.push_back(ref_buf_[fb_idx]);
+ ref_buf_flags |= 1 << fb_idx;
+ }
+ }
+
+ if (enc_layer_conf.reference_golden[layer_id.spatial_layer_id]) {
+ const size_t fb_idx =
+ enc_layer_conf.gld_fb_idx[layer_id.spatial_layer_id];
+ RTC_DCHECK_LT(fb_idx, ref_buf_.size());
+ if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
+ ref_buf_[fb_idx]) == ref_buf_list.end()) {
+ ref_buf_list.push_back(ref_buf_[fb_idx]);
+ ref_buf_flags |= 1 << fb_idx;
+ }
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
+ << layer_id.spatial_layer_id << " tl "
+ << layer_id.temporal_layer_id << " refered buffers "
+ << (ref_buf_flags & (1 << 0) ? 1 : 0)
+ << (ref_buf_flags & (1 << 1) ? 1 : 0)
+ << (ref_buf_flags & (1 << 2) ? 1 : 0)
+ << (ref_buf_flags & (1 << 3) ? 1 : 0)
+ << (ref_buf_flags & (1 << 4) ? 1 : 0)
+ << (ref_buf_flags & (1 << 5) ? 1 : 0)
+ << (ref_buf_flags & (1 << 6) ? 1 : 0)
+ << (ref_buf_flags & (1 << 7) ? 1 : 0);
+
+ } else if (!is_key_frame) {
+ RTC_DCHECK_EQ(num_spatial_layers_, 1);
+ RTC_DCHECK_EQ(num_temporal_layers_, 1);
+ // In non-SVC mode encoder doesn't provide reference list. Assume each frame
+ // refers previous one, which is stored in buffer 0.
+ ref_buf_list.push_back(ref_buf_[0]);
+ }
+
+ std::vector<size_t> ref_pid_list;
+
+ vp9_info->num_ref_pics = 0;
+ for (const RefFrameBuffer& ref_buf : ref_buf_list) {
+ RTC_DCHECK_LE(ref_buf.pic_num, pic_num);
+ if (ref_buf.pic_num < pic_num) {
+ if (inter_layer_pred_ != InterLayerPredMode::kOn) {
+ // RTP spec limits temporal prediction to the same spatial layer.
+ // It is safe to ignore this requirement if inter-layer prediction is
+ // enabled for all frames when all base frames are relayed to receiver.
+ RTC_DCHECK_EQ(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
+ } else {
+ RTC_DCHECK_LE(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
+ }
+ RTC_DCHECK_LE(ref_buf.temporal_layer_id, layer_id.temporal_layer_id);
+
+ // Encoder may reference several spatial layers on the same previous
+ // frame in case if some spatial layers are skipped on the current frame.
+ // We shouldn't put duplicate references as it may break some old
+ // clients and isn't RTP compatible.
+ if (std::find(ref_pid_list.begin(), ref_pid_list.end(),
+ ref_buf.pic_num) != ref_pid_list.end()) {
+ continue;
+ }
+ ref_pid_list.push_back(ref_buf.pic_num);
+
+ const size_t p_diff = pic_num - ref_buf.pic_num;
+ RTC_DCHECK_LE(p_diff, 127UL);
+
+ vp9_info->p_diff[vp9_info->num_ref_pics] = static_cast<uint8_t>(p_diff);
+ ++vp9_info->num_ref_pics;
+ } else {
+ RTC_DCHECK(inter_layer_predicted);
+ // RTP spec only allows to use previous spatial layer for inter-layer
+ // prediction.
+ RTC_DCHECK_EQ(ref_buf.spatial_layer_id + 1, layer_id.spatial_layer_id);
+ }
+ }
+}
+
+void LibvpxVp9Encoder::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt,
+ const size_t pic_num) {
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ RefFrameBuffer frame_buf = {.pic_num = pic_num,
+ .spatial_layer_id = layer_id.spatial_layer_id,
+ .temporal_layer_id = layer_id.temporal_layer_id};
+
+ if (is_svc_) {
+ vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG,
+ &enc_layer_conf);
+ const int update_buffer_slot =
+ enc_layer_conf.update_buffer_slot[layer_id.spatial_layer_id];
+
+ for (size_t i = 0; i < ref_buf_.size(); ++i) {
+ if (update_buffer_slot & (1 << i)) {
+ ref_buf_[i] = frame_buf;
+ }
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
+ << layer_id.spatial_layer_id << " tl "
+ << layer_id.temporal_layer_id << " updated buffers "
+ << (update_buffer_slot & (1 << 0) ? 1 : 0)
+ << (update_buffer_slot & (1 << 1) ? 1 : 0)
+ << (update_buffer_slot & (1 << 2) ? 1 : 0)
+ << (update_buffer_slot & (1 << 3) ? 1 : 0)
+ << (update_buffer_slot & (1 << 4) ? 1 : 0)
+ << (update_buffer_slot & (1 << 5) ? 1 : 0)
+ << (update_buffer_slot & (1 << 6) ? 1 : 0)
+ << (update_buffer_slot & (1 << 7) ? 1 : 0);
+ } else {
+ RTC_DCHECK_EQ(num_spatial_layers_, 1);
+ RTC_DCHECK_EQ(num_temporal_layers_, 1);
+ // In non-svc mode encoder doesn't provide reference list. Assume each frame
+ // is reference and stored in buffer 0.
+ ref_buf_[0] = frame_buf;
+ }
+}
+
+vpx_svc_ref_frame_config_t LibvpxVp9Encoder::SetReferences(
+ bool is_key_pic,
+ int first_active_spatial_layer_id) {
+ // kRefBufIdx, kUpdBufIdx need to be updated to support longer GOFs.
+ RTC_DCHECK_LE(gof_.num_frames_in_gof, 4);
+
+ vpx_svc_ref_frame_config_t ref_config;
+ memset(&ref_config, 0, sizeof(ref_config));
+
+ const size_t num_temporal_refs = std::max(1, num_temporal_layers_ - 1);
+ const bool is_inter_layer_pred_allowed =
+ inter_layer_pred_ == InterLayerPredMode::kOn ||
+ (inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic);
+ absl::optional<int> last_updated_buf_idx;
+
+ // Put temporal reference to LAST and spatial reference to GOLDEN. Update
+ // frame buffer (i.e. store encoded frame) if current frame is a temporal
+ // reference (i.e. it belongs to a low temporal layer) or it is a spatial
+ // reference. In later case, always store spatial reference in the last
+ // reference frame buffer.
+ // For the case of 3 temporal and 3 spatial layers we need 6 frame buffers
+ // for temporal references plus 1 buffer for spatial reference. 7 buffers
+ // in total.
+
+ for (int sl_idx = first_active_spatial_layer_id;
+ sl_idx < num_active_spatial_layers_; ++sl_idx) {
+ const size_t curr_pic_num = is_key_pic ? 0 : pics_since_key_ + 1;
+ const size_t gof_idx = curr_pic_num % gof_.num_frames_in_gof;
+
+ if (!is_key_pic) {
+ // Set up temporal reference.
+ const int buf_idx = sl_idx * num_temporal_refs + kRefBufIdx[gof_idx];
+
+ // Last reference frame buffer is reserved for spatial reference. It is
+ // not supposed to be used for temporal prediction.
+ RTC_DCHECK_LT(buf_idx, kNumVp9Buffers - 1);
+
+ const int pid_diff = curr_pic_num - ref_buf_[buf_idx].pic_num;
+ // Incorrect spatial layer may be in the buffer due to a key-frame.
+ const bool same_spatial_layer =
+ ref_buf_[buf_idx].spatial_layer_id == sl_idx;
+ bool correct_pid = false;
+ if (is_flexible_mode_) {
+ correct_pid = pid_diff > 0 && pid_diff < kMaxAllowedPidDiff;
+ } else {
+ // Below code assumes single temporal referecence.
+ RTC_DCHECK_EQ(gof_.num_ref_pics[gof_idx], 1);
+ correct_pid = pid_diff == gof_.pid_diff[gof_idx][0];
+ }
+
+ if (same_spatial_layer && correct_pid) {
+ ref_config.lst_fb_idx[sl_idx] = buf_idx;
+ ref_config.reference_last[sl_idx] = 1;
+ } else {
+ // This reference doesn't match with one specified by GOF. This can
+ // only happen if spatial layer is enabled dynamically without key
+ // frame. Spatial prediction is supposed to be enabled in this case.
+ RTC_DCHECK(is_inter_layer_pred_allowed &&
+ sl_idx > first_active_spatial_layer_id);
+ }
+ }
+
+ if (is_inter_layer_pred_allowed && sl_idx > first_active_spatial_layer_id) {
+ // Set up spatial reference.
+ RTC_DCHECK(last_updated_buf_idx);
+ ref_config.gld_fb_idx[sl_idx] = *last_updated_buf_idx;
+ ref_config.reference_golden[sl_idx] = 1;
+ } else {
+ RTC_DCHECK(ref_config.reference_last[sl_idx] != 0 ||
+ sl_idx == first_active_spatial_layer_id ||
+ inter_layer_pred_ == InterLayerPredMode::kOff);
+ }
+
+ last_updated_buf_idx.reset();
+
+ if (gof_.temporal_idx[gof_idx] < num_temporal_layers_ - 1 ||
+ num_temporal_layers_ == 1) {
+ last_updated_buf_idx = sl_idx * num_temporal_refs + kUpdBufIdx[gof_idx];
+
+ // Ensure last frame buffer is not used for temporal prediction (it is
+ // reserved for spatial reference).
+ RTC_DCHECK_LT(*last_updated_buf_idx, kNumVp9Buffers - 1);
+ } else if (is_inter_layer_pred_allowed) {
+ last_updated_buf_idx = kNumVp9Buffers - 1;
+ }
+
+ if (last_updated_buf_idx) {
+ ref_config.update_buffer_slot[sl_idx] = 1 << *last_updated_buf_idx;
+ }
+ }
+
+ return ref_config;
+}
+
+void LibvpxVp9Encoder::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
+ RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT);
+
+ if (pkt->data.frame.sz == 0) {
+ // Ignore dropped frame.
+ return;
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ if (layer_buffering_) {
+ // Deliver buffered low spatial layer frame.
+ const bool end_of_picture = false;
+ DeliverBufferedFrame(end_of_picture);
+ }
+
+ encoded_image_.SetEncodedData(EncodedImageBuffer::Create(
+ static_cast<const uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz));
+
+ codec_specific_ = {};
+ absl::optional<int> spatial_index;
+ absl::optional<int> temporal_index;
+ if (!PopulateCodecSpecific(&codec_specific_, &spatial_index, &temporal_index,
+ *pkt)) {
+ // Drop the frame.
+ encoded_image_.set_size(0);
+ return;
+ }
+ encoded_image_.SetSpatialIndex(spatial_index);
+ encoded_image_.SetTemporalIndex(temporal_index);
+
+ const bool is_key_frame =
+ ((pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false) &&
+ !codec_specific_.codecSpecific.VP9.inter_layer_predicted;
+
+ // Ensure encoder issued key frame on request.
+ RTC_DCHECK(is_key_frame || !force_key_frame_);
+
+ // Check if encoded frame is a key frame.
+ encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
+ if (is_key_frame) {
+ encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
+ force_key_frame_ = false;
+ }
+
+ UpdateReferenceBuffers(*pkt, pics_since_key_);
+
+ TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
+ encoded_image_.SetTimestamp(input_image_->timestamp());
+ encoded_image_.SetColorSpace(input_image_->color_space());
+ encoded_image_._encodedHeight =
+ pkt->data.frame.height[layer_id.spatial_layer_id];
+ encoded_image_._encodedWidth =
+ pkt->data.frame.width[layer_id.spatial_layer_id];
+ int qp = -1;
+ libvpx_->codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
+ encoded_image_.qp_ = qp;
+
+ if (!layer_buffering_) {
+ const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
+ num_active_spatial_layers_;
+ DeliverBufferedFrame(end_of_picture);
+ }
+}
+
+void LibvpxVp9Encoder::DeliverBufferedFrame(bool end_of_picture) {
+ if (encoded_image_.size() > 0) {
+ if (num_spatial_layers_ > 1) {
+ // Restore frame dropping settings, as dropping may be temporary forbidden
+ // due to dynamically enabled layers.
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
+ }
+ }
+
+ codec_specific_.end_of_picture = end_of_picture;
+
+ encoded_complete_callback_->OnEncodedImage(encoded_image_,
+ &codec_specific_);
+
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
+ const uint32_t frame_timestamp_ms =
+ 1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
+ framerate_controller_[spatial_idx].AddFrame(frame_timestamp_ms);
+
+ const size_t steady_state_size = SteadyStateSize(
+ spatial_idx, codec_specific_.codecSpecific.VP9.temporal_idx);
+
+ // Only frames on spatial layers, which may be limited in a steady state
+ // are considered for steady state detection.
+ if (framerate_controller_[spatial_idx].GetTargetRate() >
+ variable_framerate_experiment_.framerate_limit + 1e-9) {
+ if (encoded_image_.qp_ <=
+ variable_framerate_experiment_.steady_state_qp &&
+ encoded_image_.size() <= steady_state_size) {
+ ++num_steady_state_frames_;
+ } else {
+ num_steady_state_frames_ = 0;
+ }
+ }
+ }
+ encoded_image_.set_size(0);
+ }
+}
+
+int LibvpxVp9Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoEncoder::EncoderInfo LibvpxVp9Encoder::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "libvpx";
+ if (quality_scaler_experiment_.enabled && inited_ &&
+ codec_.VP9().automaticResizeOn) {
+ info.scaling_settings = VideoEncoder::ScalingSettings(
+ quality_scaler_experiment_.low_qp, quality_scaler_experiment_.high_qp);
+ } else {
+ info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
+ }
+ info.has_trusted_rate_controller = trusted_rate_controller_;
+ info.is_hardware_accelerated = false;
+ if (inited_) {
+ // Find the max configured fps of any active spatial layer.
+ float max_fps = 0.0;
+ for (size_t si = 0; si < num_spatial_layers_; ++si) {
+ if (codec_.spatialLayers[si].active &&
+ codec_.spatialLayers[si].maxFramerate > max_fps) {
+ max_fps = codec_.spatialLayers[si].maxFramerate;
+ }
+ }
+
+ for (size_t si = 0; si < num_spatial_layers_; ++si) {
+ info.fps_allocation[si].clear();
+ if (!codec_.spatialLayers[si].active) {
+ continue;
+ }
+
+ // This spatial layer may already use a fraction of the total frame rate.
+ const float sl_fps_fraction =
+ codec_.spatialLayers[si].maxFramerate / max_fps;
+ for (size_t ti = 0; ti < num_temporal_layers_; ++ti) {
+ const uint32_t decimator =
+ num_temporal_layers_ <= 1 ? 1 : config_->ts_rate_decimator[ti];
+ RTC_DCHECK_GT(decimator, 0);
+ info.fps_allocation[si].push_back(
+ rtc::saturated_cast<uint8_t>(EncoderInfo::kMaxFramerateFraction *
+ (sl_fps_fraction / decimator)));
+ }
+ }
+ if (profile_ == VP9Profile::kProfile0) {
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+ }
+ }
+ if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
+ info.resolution_bitrate_limits =
+ encoder_info_override_.resolution_bitrate_limits();
+ }
+ return info;
+}
+
+size_t LibvpxVp9Encoder::SteadyStateSize(int sid, int tid) {
+ const size_t bitrate_bps = current_bitrate_allocation_.GetBitrate(
+ sid, tid == kNoTemporalIdx ? 0 : tid);
+ const float fps = (codec_.mode == VideoCodecMode::kScreensharing)
+ ? std::min(static_cast<float>(codec_.maxFramerate),
+ framerate_controller_[sid].GetTargetRate())
+ : codec_.maxFramerate;
+ return static_cast<size_t>(
+ bitrate_bps / (8 * fps) *
+ (100 -
+ variable_framerate_experiment_.steady_state_undershoot_percentage) /
+ 100 +
+ 0.5);
+}
+
+// static
+LibvpxVp9Encoder::VariableFramerateExperiment
+LibvpxVp9Encoder::ParseVariableFramerateConfig(const FieldTrialsView& trials) {
+ FieldTrialFlag enabled = FieldTrialFlag("Enabled");
+ FieldTrialParameter<double> framerate_limit("min_fps", 5.0);
+ FieldTrialParameter<int> qp("min_qp", 32);
+ FieldTrialParameter<int> undershoot_percentage("undershoot", 30);
+ FieldTrialParameter<int> frames_before_steady_state(
+ "frames_before_steady_state", 5);
+ ParseFieldTrial({&enabled, &framerate_limit, &qp, &undershoot_percentage,
+ &frames_before_steady_state},
+ trials.Lookup("WebRTC-VP9VariableFramerateScreenshare"));
+ VariableFramerateExperiment config;
+ config.enabled = enabled.Get();
+ config.framerate_limit = framerate_limit.Get();
+ config.steady_state_qp = qp.Get();
+ config.steady_state_undershoot_percentage = undershoot_percentage.Get();
+ config.frames_before_steady_state = frames_before_steady_state.Get();
+
+ return config;
+}
+
+// static
+LibvpxVp9Encoder::QualityScalerExperiment
+LibvpxVp9Encoder::ParseQualityScalerConfig(const FieldTrialsView& trials) {
+ FieldTrialFlag disabled = FieldTrialFlag("Disabled");
+ FieldTrialParameter<int> low_qp("low_qp", kLowVp9QpThreshold);
+ FieldTrialParameter<int> high_qp("hihg_qp", kHighVp9QpThreshold);
+ ParseFieldTrial({&disabled, &low_qp, &high_qp},
+ trials.Lookup("WebRTC-VP9QualityScaler"));
+ QualityScalerExperiment config;
+ config.enabled = !disabled.Get();
+ RTC_LOG(LS_INFO) << "Webrtc quality scaler for vp9 is "
+ << (config.enabled ? "enabled." : "disabled");
+ config.low_qp = low_qp.Get();
+ config.high_qp = high_qp.Get();
+
+ return config;
+}
+
+void LibvpxVp9Encoder::UpdatePerformanceFlags() {
+ flat_map<int, PerformanceFlags::ParameterSet> params_by_resolution;
+ if (codec_.GetVideoEncoderComplexity() ==
+ VideoCodecComplexity::kComplexityLow) {
+ // For low tier devices, always use speed 9. Only disable upper
+ // layer deblocking below QCIF.
+ params_by_resolution[0] = {.base_layer_speed = 9,
+ .high_layer_speed = 9,
+ .deblock_mode = 1,
+ .allow_denoising = true};
+ params_by_resolution[352 * 288] = {.base_layer_speed = 9,
+ .high_layer_speed = 9,
+ .deblock_mode = 0,
+ .allow_denoising = true};
+ } else {
+ params_by_resolution = performance_flags_.settings_by_resolution;
+ }
+
+ const auto find_speed = [&](int min_pixel_count) {
+ RTC_DCHECK(!params_by_resolution.empty());
+ auto it = params_by_resolution.upper_bound(min_pixel_count);
+ return std::prev(it)->second;
+ };
+ performance_flags_by_spatial_index_.clear();
+
+ if (is_svc_) {
+ for (int si = 0; si < num_spatial_layers_; ++si) {
+ performance_flags_by_spatial_index_.push_back(find_speed(
+ codec_.spatialLayers[si].width * codec_.spatialLayers[si].height));
+ }
+ } else {
+ performance_flags_by_spatial_index_.push_back(
+ find_speed(codec_.width * codec_.height));
+ }
+}
+
+// static
+LibvpxVp9Encoder::PerformanceFlags
+LibvpxVp9Encoder::ParsePerformanceFlagsFromTrials(
+ const FieldTrialsView& trials) {
+ struct Params : public PerformanceFlags::ParameterSet {
+ int min_pixel_count = 0;
+ };
+
+ FieldTrialStructList<Params> trials_list(
+ {FieldTrialStructMember("min_pixel_count",
+ [](Params* p) { return &p->min_pixel_count; }),
+ FieldTrialStructMember("high_layer_speed",
+ [](Params* p) { return &p->high_layer_speed; }),
+ FieldTrialStructMember("base_layer_speed",
+ [](Params* p) { return &p->base_layer_speed; }),
+ FieldTrialStructMember("deblock_mode",
+ [](Params* p) { return &p->deblock_mode; }),
+ FieldTrialStructMember("denoiser",
+ [](Params* p) { return &p->allow_denoising; })},
+ {});
+
+ FieldTrialFlag per_layer_speed("use_per_layer_speed");
+
+ ParseFieldTrial({&trials_list, &per_layer_speed},
+ trials.Lookup("WebRTC-VP9-PerformanceFlags"));
+
+ PerformanceFlags flags;
+ flags.use_per_layer_speed = per_layer_speed.Get();
+
+ constexpr int kMinSpeed = 1;
+ constexpr int kMaxSpeed = 9;
+ for (auto& f : trials_list.Get()) {
+ if (f.base_layer_speed < kMinSpeed || f.base_layer_speed > kMaxSpeed ||
+ f.high_layer_speed < kMinSpeed || f.high_layer_speed > kMaxSpeed ||
+ f.deblock_mode < 0 || f.deblock_mode > 2) {
+ RTC_LOG(LS_WARNING) << "Ignoring invalid performance flags: "
+ << "min_pixel_count = " << f.min_pixel_count
+ << ", high_layer_speed = " << f.high_layer_speed
+ << ", base_layer_speed = " << f.base_layer_speed
+ << ", deblock_mode = " << f.deblock_mode;
+ continue;
+ }
+ flags.settings_by_resolution[f.min_pixel_count] = f;
+ }
+
+ if (flags.settings_by_resolution.empty()) {
+ return GetDefaultPerformanceFlags();
+ }
+
+ return flags;
+}
+
+// static
+LibvpxVp9Encoder::PerformanceFlags
+LibvpxVp9Encoder::GetDefaultPerformanceFlags() {
+ PerformanceFlags flags;
+ flags.use_per_layer_speed = true;
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID)
+ // Speed 8 on all layers for all resolutions.
+ flags.settings_by_resolution[0] = {.base_layer_speed = 8,
+ .high_layer_speed = 8,
+ .deblock_mode = 0,
+ .allow_denoising = true};
+#else
+
+ // For smaller resolutions, use lower speed setting for the temporal base
+ // layer (get some coding gain at the cost of increased encoding complexity).
+ // Set encoder Speed 5 for TL0, encoder Speed 8 for upper temporal layers, and
+ // disable deblocking for upper-most temporal layers.
+ flags.settings_by_resolution[0] = {.base_layer_speed = 5,
+ .high_layer_speed = 8,
+ .deblock_mode = 1,
+ .allow_denoising = true};
+
+ // Use speed 7 for QCIF and above.
+ // Set encoder Speed 7 for TL0, encoder Speed 8 for upper temporal layers, and
+ // enable deblocking for all temporal layers.
+ flags.settings_by_resolution[352 * 288] = {.base_layer_speed = 7,
+ .high_layer_speed = 8,
+ .deblock_mode = 0,
+ .allow_denoising = true};
+
+ // For very high resolution (1080p and up), turn the speed all the way up
+ // since this is very CPU intensive. Also disable denoising to save CPU, at
+ // these resolutions denoising appear less effective and hopefully you also
+ // have a less noisy video source at this point.
+ flags.settings_by_resolution[1920 * 1080] = {.base_layer_speed = 9,
+ .high_layer_speed = 9,
+ .deblock_mode = 0,
+ .allow_denoising = false};
+
+#endif
+ return flags;
+}
+
+void LibvpxVp9Encoder::MaybeRewrapRawWithFormat(const vpx_img_fmt fmt) {
+ if (!raw_) {
+ raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1,
+ nullptr);
+ } else if (raw_->fmt != fmt) {
+ RTC_LOG(LS_INFO) << "Switching VP9 encoder pixel format to "
+ << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420");
+ libvpx_->img_free(raw_);
+ raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1,
+ nullptr);
+ }
+ // else no-op since the image is already in the right format.
+}
+
+rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
+ rtc::scoped_refptr<VideoFrameBuffer> buffer) {
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ supported_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ if (buffer->type() != VideoFrameBuffer::Type::kNative) {
+ // `buffer` is already mapped.
+ mapped_buffer = buffer;
+ } else {
+ // Attempt to map to one of the supported formats.
+ mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
+ }
+ if (!mapped_buffer ||
+ (absl::c_find(supported_formats, mapped_buffer->type()) ==
+ supported_formats.end() &&
+ mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
+ // Unknown pixel format or unable to map, convert to I420 and prepare that
+ // buffer instead to ensure Scale() is safe to use.
+ auto converted_buffer = buffer->ToI420();
+ if (!converted_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(buffer->type())
+ << " image to I420. Can't encode frame.";
+ return {};
+ }
+ RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ // Because `buffer` had to be converted, use `converted_buffer` instead.
+ buffer = mapped_buffer = converted_buffer;
+ }
+
+ // Prepare `raw_` from `mapped_buffer`.
+ switch (mapped_buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A: {
+ MaybeRewrapRawWithFormat(VPX_IMG_FMT_I420);
+ const I420BufferInterface* i420_buffer = mapped_buffer->GetI420();
+ RTC_DCHECK(i420_buffer);
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(i420_buffer->DataY());
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(i420_buffer->DataU());
+ raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(i420_buffer->DataV());
+ raw_->stride[VPX_PLANE_Y] = i420_buffer->StrideY();
+ raw_->stride[VPX_PLANE_U] = i420_buffer->StrideU();
+ raw_->stride[VPX_PLANE_V] = i420_buffer->StrideV();
+ break;
+ }
+ case VideoFrameBuffer::Type::kNV12: {
+ MaybeRewrapRawWithFormat(VPX_IMG_FMT_NV12);
+ const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12();
+ RTC_DCHECK(nv12_buffer);
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(nv12_buffer->DataY());
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(nv12_buffer->DataUV());
+ raw_->planes[VPX_PLANE_V] = raw_->planes[VPX_PLANE_U] + 1;
+ raw_->stride[VPX_PLANE_Y] = nv12_buffer->StrideY();
+ raw_->stride[VPX_PLANE_U] = nv12_buffer->StrideUV();
+ raw_->stride[VPX_PLANE_V] = nv12_buffer->StrideUV();
+ break;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return mapped_buffer;
+}
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
new file mode 100644
index 0000000000..427e721c1b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_
+
+#ifdef RTC_ENABLE_VP9
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/field_trials_view.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "vpx/vp8cx.h"
+
+namespace webrtc {
+
+class LibvpxVp9Encoder : public VP9Encoder {
+ public:
+ LibvpxVp9Encoder(const cricket::VideoCodec& codec,
+ std::unique_ptr<LibvpxInterface> interface,
+ const FieldTrialsView& trials);
+
+ ~LibvpxVp9Encoder() override;
+
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+
+ int Release() override;
+
+ int InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) override;
+
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+
+ void SetRates(const RateControlParameters& parameters) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ private:
+ // Determine number of encoder threads to use.
+ int NumberOfThreads(int width, int height, int number_of_cores);
+
+ // Call encoder initialize function and set control settings.
+ int InitAndSetControlSettings(const VideoCodec* inst);
+
+ // Update frame size for codec.
+ int UpdateCodecFrameSize(const VideoFrame& input_image);
+
+ bool PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ absl::optional<int>* spatial_idx,
+ absl::optional<int>* temporal_idx,
+ const vpx_codec_cx_pkt& pkt);
+ void FillReferenceIndices(const vpx_codec_cx_pkt& pkt,
+ size_t pic_num,
+ bool inter_layer_predicted,
+ CodecSpecificInfoVP9* vp9_info);
+ void UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt, size_t pic_num);
+ vpx_svc_ref_frame_config_t SetReferences(bool is_key_pic,
+ int first_active_spatial_layer_id);
+
+ bool ExplicitlyConfiguredSpatialLayers() const;
+ bool SetSvcRates(const VideoBitrateAllocation& bitrate_allocation);
+
+ // Configures which spatial layers libvpx should encode according to
+ // configuration provided by svc_controller_.
+ void EnableSpatialLayer(int sid);
+ void DisableSpatialLayer(int sid);
+ void SetActiveSpatialLayers();
+
+ void GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
+
+ // Callback function for outputting packets per spatial layer.
+ static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
+ void* user_data);
+
+ void DeliverBufferedFrame(bool end_of_picture);
+
+ bool DropFrame(uint8_t spatial_idx, uint32_t rtp_timestamp);
+
+ // Determine maximum target for Intra frames
+ //
+ // Input:
+ // - optimal_buffer_size : Optimal buffer size
+ // Return Value : Max target size for Intra frames represented as
+ // percentage of the per frame bandwidth
+ uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
+
+ size_t SteadyStateSize(int sid, int tid);
+
+ void MaybeRewrapRawWithFormat(vpx_img_fmt fmt);
+ // Prepares `raw_` to reference image data of `buffer`, or of mapped or scaled
+ // versions of `buffer`. Returns the buffer that got referenced as a result,
+ // allowing the caller to keep a reference to it until after encoding has
+ // finished. On failure to convert the buffer, null is returned.
+ rtc::scoped_refptr<VideoFrameBuffer> PrepareBufferForProfile0(
+ rtc::scoped_refptr<VideoFrameBuffer> buffer);
+
+ const std::unique_ptr<LibvpxInterface> libvpx_;
+ EncodedImage encoded_image_;
+ CodecSpecificInfo codec_specific_;
+ EncodedImageCallback* encoded_complete_callback_;
+ VideoCodec codec_;
+ const VP9Profile profile_;
+ bool inited_;
+ int64_t timestamp_;
+ uint32_t rc_max_intra_target_;
+ vpx_codec_ctx_t* encoder_;
+ vpx_codec_enc_cfg_t* config_;
+ vpx_image_t* raw_;
+ vpx_svc_extra_cfg_t svc_params_;
+ const VideoFrame* input_image_;
+ GofInfoVP9 gof_; // Contains each frame's temporal information for
+ // non-flexible mode.
+ bool force_key_frame_;
+ size_t pics_since_key_;
+ uint8_t num_temporal_layers_;
+ uint8_t num_spatial_layers_; // Number of configured SLs
+ uint8_t num_active_spatial_layers_; // Number of actively encoded SLs
+ uint8_t first_active_layer_;
+ bool layer_deactivation_requires_key_frame_;
+ bool is_svc_;
+ InterLayerPredMode inter_layer_pred_;
+ bool external_ref_control_;
+ const bool trusted_rate_controller_;
+ bool layer_buffering_;
+ const bool full_superframe_drop_;
+ vpx_svc_frame_drop_t svc_drop_frame_;
+ bool first_frame_in_picture_;
+ VideoBitrateAllocation current_bitrate_allocation_;
+ bool ss_info_needed_;
+ bool force_all_active_layers_;
+ uint8_t num_cores_;
+
+ std::unique_ptr<ScalableVideoController> svc_controller_;
+ std::vector<FramerateControllerDeprecated> framerate_controller_;
+
+ // Used for flexible mode.
+ bool is_flexible_mode_;
+ struct RefFrameBuffer {
+ bool operator==(const RefFrameBuffer& o) {
+ return pic_num == o.pic_num && spatial_layer_id == o.spatial_layer_id &&
+ temporal_layer_id == o.temporal_layer_id;
+ }
+
+ size_t pic_num = 0;
+ int spatial_layer_id = 0;
+ int temporal_layer_id = 0;
+ };
+ std::array<RefFrameBuffer, kNumVp9Buffers> ref_buf_;
+ std::vector<ScalableVideoController::LayerFrameConfig> layer_frames_;
+
+ // Variable frame-rate related fields and methods.
+ const struct VariableFramerateExperiment {
+ bool enabled;
+ // Framerate is limited to this value in steady state.
+ float framerate_limit;
+ // This qp or below is considered a steady state.
+ int steady_state_qp;
+ // Frames of at least this percentage below ideal for configured bitrate are
+ // considered in a steady state.
+ int steady_state_undershoot_percentage;
+ // Number of consecutive frames with good QP and size required to detect
+ // the steady state.
+ int frames_before_steady_state;
+ } variable_framerate_experiment_;
+ static VariableFramerateExperiment ParseVariableFramerateConfig(
+ const FieldTrialsView& trials);
+ FramerateControllerDeprecated variable_framerate_controller_;
+
+ const struct QualityScalerExperiment {
+ int low_qp;
+ int high_qp;
+ bool enabled;
+ } quality_scaler_experiment_;
+ static QualityScalerExperiment ParseQualityScalerConfig(
+ const FieldTrialsView& trials);
+ const bool external_ref_ctrl_;
+
+ // Flags that can affect speed vs quality tradeoff, and are configureable per
+ // resolution ranges.
+ struct PerformanceFlags {
+ // If false, a lookup will be made in `settings_by_resolution` base on the
+ // highest currently active resolution, and the overall speed then set to
+ // to the `base_layer_speed` matching that entry.
+ // If true, each active resolution will have it's speed and deblock_mode set
+ // based on it resolution, and the high layer speed configured for non
+ // base temporal layer frames.
+ bool use_per_layer_speed = false;
+
+ struct ParameterSet {
+ int base_layer_speed = -1; // Speed setting for TL0.
+ int high_layer_speed = -1; // Speed setting for TL1-TL3.
+ // 0 = deblock all temporal layers (TL)
+ // 1 = disable deblock for top-most TL
+ // 2 = disable deblock for all TLs
+ int deblock_mode = 0;
+ bool allow_denoising = true;
+ };
+ // Map from min pixel count to settings for that resolution and above.
+ // E.g. if you want some settings A if below wvga (640x360) and some other
+ // setting B at wvga and above, you'd use map {{0, A}, {230400, B}}.
+ flat_map<int, ParameterSet> settings_by_resolution;
+ };
+ // Performance flags, ordered by `min_pixel_count`.
+ const PerformanceFlags performance_flags_;
+ // Caching of of `speed_configs_`, where index i maps to the resolution as
+ // specified in `codec_.spatialLayer[i]`.
+ std::vector<PerformanceFlags::ParameterSet>
+ performance_flags_by_spatial_index_;
+ void UpdatePerformanceFlags();
+ static PerformanceFlags ParsePerformanceFlagsFromTrials(
+ const FieldTrialsView& trials);
+ static PerformanceFlags GetDefaultPerformanceFlags();
+
+ int num_steady_state_frames_;
+ // Only set config when this flag is set.
+ bool config_changed_;
+
+ const LibvpxVp9EncoderInfoSettings encoder_info_override_;
+};
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
new file mode 100644
index 0000000000..77eee3dbf5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "media/base/video_common.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+const size_t kMinVp9SvcBitrateKbps = 30;
+
+const size_t kMaxNumLayersForScreenSharing = 3;
+const float kMaxScreenSharingLayerFramerateFps[] = {5.0, 10.0, 30.0};
+const size_t kMinScreenSharingLayerBitrateKbps[] = {30, 200, 500};
+const size_t kTargetScreenSharingLayerBitrateKbps[] = {150, 350, 950};
+const size_t kMaxScreenSharingLayerBitrateKbps[] = {250, 500, 950};
+
+// Gets limited number of layers for given resolution.
+size_t GetLimitedNumSpatialLayers(size_t width, size_t height) {
+ const bool is_landscape = width >= height;
+ const size_t min_width = is_landscape ? kMinVp9SpatialLayerLongSideLength
+ : kMinVp9SpatialLayerShortSideLength;
+ const size_t min_height = is_landscape ? kMinVp9SpatialLayerShortSideLength
+ : kMinVp9SpatialLayerLongSideLength;
+ const size_t num_layers_fit_horz = static_cast<size_t>(
+ std::floor(1 + std::max(0.0f, std::log2(1.0f * width / min_width))));
+ const size_t num_layers_fit_vert = static_cast<size_t>(
+ std::floor(1 + std::max(0.0f, std::log2(1.0f * height / min_height))));
+ return std::min(num_layers_fit_horz, num_layers_fit_vert);
+}
+} // namespace
+
+std::vector<SpatialLayer> ConfigureSvcScreenSharing(size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t num_spatial_layers) {
+ num_spatial_layers =
+ std::min(num_spatial_layers, kMaxNumLayersForScreenSharing);
+ std::vector<SpatialLayer> spatial_layers;
+
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ SpatialLayer spatial_layer = {0};
+ spatial_layer.width = input_width;
+ spatial_layer.height = input_height;
+ spatial_layer.maxFramerate =
+ std::min(kMaxScreenSharingLayerFramerateFps[sl_idx], max_framerate_fps);
+ spatial_layer.numberOfTemporalLayers = 1;
+ spatial_layer.minBitrate =
+ static_cast<int>(kMinScreenSharingLayerBitrateKbps[sl_idx]);
+ spatial_layer.maxBitrate =
+ static_cast<int>(kMaxScreenSharingLayerBitrateKbps[sl_idx]);
+ spatial_layer.targetBitrate =
+ static_cast<int>(kTargetScreenSharingLayerBitrateKbps[sl_idx]);
+ spatial_layer.active = true;
+ spatial_layers.push_back(spatial_layer);
+ }
+
+ return spatial_layers;
+}
+
+std::vector<SpatialLayer> ConfigureSvcNormalVideo(
+ size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t first_active_layer,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ absl::optional<ScalableVideoController::StreamLayersConfig> config) {
+ RTC_DCHECK_LT(first_active_layer, num_spatial_layers);
+
+ // Limit number of layers for given resolution.
+ size_t limited_num_spatial_layers =
+ GetLimitedNumSpatialLayers(input_width, input_height);
+ if (limited_num_spatial_layers < num_spatial_layers) {
+ RTC_LOG(LS_WARNING) << "Reducing number of spatial layers from "
+ << num_spatial_layers << " to "
+ << limited_num_spatial_layers
+ << " due to low input resolution.";
+ num_spatial_layers = limited_num_spatial_layers;
+ }
+
+ // First active layer must be configured.
+ num_spatial_layers = std::max(num_spatial_layers, first_active_layer + 1);
+
+ // Ensure top layer is even enough.
+ int required_divisiblity = 1 << (num_spatial_layers - first_active_layer - 1);
+ if (config) {
+ required_divisiblity = 1;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ required_divisiblity = cricket::LeastCommonMultiple(
+ required_divisiblity, config->scaling_factor_den[sl_idx]);
+ }
+ }
+ input_width = input_width - input_width % required_divisiblity;
+ input_height = input_height - input_height % required_divisiblity;
+
+ std::vector<SpatialLayer> spatial_layers;
+ for (size_t sl_idx = first_active_layer; sl_idx < num_spatial_layers;
+ ++sl_idx) {
+ SpatialLayer spatial_layer = {0};
+ spatial_layer.width = input_width >> (num_spatial_layers - sl_idx - 1);
+ spatial_layer.height = input_height >> (num_spatial_layers - sl_idx - 1);
+ spatial_layer.maxFramerate = max_framerate_fps;
+ spatial_layer.numberOfTemporalLayers = num_temporal_layers;
+ spatial_layer.active = true;
+
+ if (config) {
+ spatial_layer.width = input_width * config->scaling_factor_num[sl_idx] /
+ config->scaling_factor_den[sl_idx];
+ spatial_layer.height = input_height * config->scaling_factor_num[sl_idx] /
+ config->scaling_factor_den[sl_idx];
+ }
+
+ // minBitrate and maxBitrate formulas were derived from
+ // subjective-quality data to determing bit rates below which video
+ // quality is unacceptable and above which additional bits do not provide
+ // benefit. The formulas express rate in units of kbps.
+
+ // TODO(ssilkin): Add to the comment PSNR/SSIM we get at encoding certain
+ // video to min/max bitrate specified by those formulas.
+ const size_t num_pixels = spatial_layer.width * spatial_layer.height;
+ int min_bitrate =
+ static_cast<int>((600. * std::sqrt(num_pixels) - 95000.) / 1000.);
+ min_bitrate = std::max(min_bitrate, 0);
+ spatial_layer.minBitrate =
+ std::max(static_cast<size_t>(min_bitrate), kMinVp9SvcBitrateKbps);
+ spatial_layer.maxBitrate =
+ static_cast<int>((1.6 * num_pixels + 50 * 1000) / 1000);
+ spatial_layer.targetBitrate =
+ (spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
+ spatial_layers.push_back(spatial_layer);
+ }
+
+ // A workaround for situation when single HD layer is left with minBitrate
+ // about 500kbps. This would mean that there will always be at least 500kbps
+ // allocated to video regardless of how low is the actual BWE.
+ // Also, boost maxBitrate for the first layer to account for lost ability to
+ // predict from previous layers.
+ if (first_active_layer > 0) {
+ spatial_layers[0].minBitrate = kMinVp9SvcBitrateKbps;
+ // TODO(ilnik): tune this value or come up with a different formula to
+ // ensure that all singlecast configurations look good and not too much
+ // bitrate is added.
+ spatial_layers[0].maxBitrate *= 1.1;
+ }
+
+ return spatial_layers;
+}
+
+// Uses scalability mode to configure spatial layers.
+std::vector<SpatialLayer> GetVp9SvcConfig(VideoCodec& codec) {
+ RTC_DCHECK_EQ(codec.codecType, kVideoCodecVP9);
+
+ absl::optional<ScalabilityMode> scalability_mode = codec.GetScalabilityMode();
+ RTC_DCHECK(scalability_mode.has_value());
+
+ absl::optional<ScalableVideoController::StreamLayersConfig> info =
+ ScalabilityStructureConfig(*scalability_mode);
+ if (!info.has_value()) {
+ RTC_LOG(LS_WARNING) << "Failed to create structure "
+ << ScalabilityModeToString(*scalability_mode);
+ return {};
+ }
+
+ if (static_cast<int>(GetLimitedNumSpatialLayers(codec.width, codec.height)) <
+ info->num_spatial_layers) {
+ // Layers will be reduced, do not use scalability mode for now.
+ // TODO(bugs.webrtc.org/11607): Use a lower scalability mode once all lower
+ // modes are supported.
+ codec.UnsetScalabilityMode();
+ codec.VP9()->interLayerPred =
+ ScalabilityModeToInterLayerPredMode(*scalability_mode);
+ }
+
+ // TODO(bugs.webrtc.org/11607): Add support for screensharing.
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(codec.width, codec.height, codec.maxFramerate,
+ /*first_active_layer=*/0, info->num_spatial_layers,
+ info->num_temporal_layers, /*is_screen_sharing=*/false,
+ codec.GetScalabilityMode() ? info : absl::nullopt);
+ RTC_DCHECK(!spatial_layers.empty());
+
+ // Use codec bitrate limits if spatial layering is not requested.
+ if (info->num_spatial_layers == 1) {
+ spatial_layers.back().minBitrate = codec.minBitrate;
+ spatial_layers.back().targetBitrate = codec.maxBitrate;
+ spatial_layers.back().maxBitrate = codec.maxBitrate;
+ }
+
+ return spatial_layers;
+}
+
+std::vector<SpatialLayer> GetSvcConfig(
+ size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t first_active_layer,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool is_screen_sharing,
+ absl::optional<ScalableVideoController::StreamLayersConfig> config) {
+ RTC_DCHECK_GT(input_width, 0);
+ RTC_DCHECK_GT(input_height, 0);
+ RTC_DCHECK_GT(num_spatial_layers, 0);
+ RTC_DCHECK_GT(num_temporal_layers, 0);
+
+ if (is_screen_sharing) {
+ return ConfigureSvcScreenSharing(input_width, input_height,
+ max_framerate_fps, num_spatial_layers);
+ } else {
+ return ConfigureSvcNormalVideo(input_width, input_height, max_framerate_fps,
+ first_active_layer, num_spatial_layers,
+ num_temporal_layers, config);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h
new file mode 100644
index 0000000000..adeaf0f161
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_SVC_CONFIG_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_SVC_CONFIG_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/video_codecs/spatial_layer.h"
+#include "api/video_codecs/video_codec.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Uses scalability mode to configure spatial layers.
+std::vector<SpatialLayer> GetVp9SvcConfig(VideoCodec& video_codec);
+
+std::vector<SpatialLayer> GetSvcConfig(
+ size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t first_active_layer,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool is_screen_sharing,
+ absl::optional<ScalableVideoController::StreamLayersConfig> config =
+ absl::nullopt);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_SVC_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
new file mode 100644
index 0000000000..4de3c5b2a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+
+#include <cstddef>
+#include <vector>
+
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::ElementsAre;
+using ::testing::Field;
+
+namespace webrtc {
+TEST(SvcConfig, NumSpatialLayers) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 0;
+ const size_t num_spatial_layers = 2;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+
+ EXPECT_EQ(spatial_layers.size(), num_spatial_layers);
+}
+
+TEST(SvcConfig, NumSpatialLayersPortrait) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 0;
+ const size_t num_spatial_layers = 2;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+
+ EXPECT_EQ(spatial_layers.size(), num_spatial_layers);
+}
+
+TEST(SvcConfig, NumSpatialLayersWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 960;
+ codec.height = 540;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3_KEY);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::height, 135),
+ Field(&SpatialLayer::height, 270),
+ Field(&SpatialLayer::height, 540)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 3),
+ Field(&SpatialLayer::numberOfTemporalLayers, 3),
+ Field(&SpatialLayer::numberOfTemporalLayers, 3)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL3T3_KEY);
+}
+
+TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 480;
+ codec.height = 270;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3_KEY);
+
+ // Scalability mode reset, configuration should be in accordance to L2T3_KEY.
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::height, 135),
+ Field(&SpatialLayer::height, 270)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 3),
+ Field(&SpatialLayer::numberOfTemporalLayers, 3)));
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOnKeyPic);
+ EXPECT_EQ(codec.GetScalabilityMode(), absl::nullopt);
+}
+
+TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityModePortrait) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 270;
+ codec.height = 480;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T1);
+
+ // Scalability mode reset, configuration should be in accordance to L2T1.
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::width, 135),
+ Field(&SpatialLayer::width, 270)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 1),
+ Field(&SpatialLayer::numberOfTemporalLayers, 1)));
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOn);
+ EXPECT_EQ(codec.GetScalabilityMode(), absl::nullopt);
+}
+
+TEST(SvcConfig, NumSpatialLayersWithScalabilityModeResolutionRatio1_5) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 270;
+ codec.height = 480;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1h); // 1.5:1
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::width, 180),
+ Field(&SpatialLayer::width, 270)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 1),
+ Field(&SpatialLayer::numberOfTemporalLayers, 1)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL2T1h);
+}
+
+TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityModeResolutionRatio1_5) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 320;
+ codec.height = 180;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1h); // 1.5:1
+
+ // Scalability mode reset, configuration should be in accordance to L1T1.
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::width, 320)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 1)));
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOn);
+ EXPECT_EQ(codec.GetScalabilityMode(), absl::nullopt);
+}
+
+TEST(SvcConfig, AlwaysSendsAtLeastOneLayer) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 5;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength, kMinVp9SpatialLayerShortSideLength, 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+ EXPECT_EQ(spatial_layers.size(), 1u);
+ EXPECT_EQ(spatial_layers.back().width, kMinVp9SpatialLayerLongSideLength);
+}
+
+TEST(SvcConfig, AlwaysSendsAtLeastOneLayerPortrait) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 5;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerShortSideLength, kMinVp9SpatialLayerLongSideLength, 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+ EXPECT_EQ(spatial_layers.size(), 1u);
+ EXPECT_EQ(spatial_layers.back().width, kMinVp9SpatialLayerShortSideLength);
+}
+
+TEST(SvcConfig, EnforcesMinimalRequiredParity) {
+ const size_t max_num_spatial_layers = 3;
+ const size_t kOddSize = 1023;
+
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(kOddSize, kOddSize, 30,
+ /*first_active_layer=*/1, max_num_spatial_layers, 1, false);
+ // Since there are 2 layers total (1, 2), divisiblity by 2 is required.
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 1);
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 1);
+
+ spatial_layers =
+ GetSvcConfig(kOddSize, kOddSize, 30,
+ /*first_active_layer=*/0, max_num_spatial_layers, 1, false);
+ // Since there are 3 layers total (0, 1, 2), divisiblity by 4 is required.
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 3);
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 3);
+
+ spatial_layers =
+ GetSvcConfig(kOddSize, kOddSize, 30,
+ /*first_active_layer=*/2, max_num_spatial_layers, 1, false);
+ // Since there is only 1 layer active (2), divisiblity by 1 is required.
+ EXPECT_EQ(spatial_layers.back().width, kOddSize);
+ EXPECT_EQ(spatial_layers.back().width, kOddSize);
+}
+
+TEST(SvcConfig, EnforcesMinimalRequiredParityWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 1023;
+ codec.height = 1023;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T1);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 4 required.
+ ElementsAre(Field(&SpatialLayer::width, 255),
+ Field(&SpatialLayer::width, 510),
+ Field(&SpatialLayer::width, 1020)));
+
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+ spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 2 required.
+ ElementsAre(Field(&SpatialLayer::width, 511),
+ Field(&SpatialLayer::width, 1022)));
+
+ codec.SetScalabilityMode(ScalabilityMode::kL1T1);
+ spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 1 required.
+ ElementsAre(Field(&SpatialLayer::width, 1023)));
+}
+
+TEST(SvcConfig, EnforcesMinimalRequiredParityWithScalabilityModeResRatio1_5) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 1280;
+ codec.height = 1280;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1h); // 1.5:1
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 3 required.
+ ElementsAre(Field(&SpatialLayer::width, 852),
+ Field(&SpatialLayer::width, 1278)));
+}
+
+TEST(SvcConfig, SkipsInactiveLayers) {
+ const size_t num_spatial_layers = 4;
+ const size_t first_active_layer = 2;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, num_spatial_layers, 1, false);
+ EXPECT_EQ(spatial_layers.size(), 2u);
+ EXPECT_EQ(spatial_layers.back().width,
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1));
+}
+
+TEST(SvcConfig, BitrateThresholds) {
+ const size_t first_active_layer = 0;
+ const size_t num_spatial_layers = 3;
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, num_spatial_layers, 1, false);
+
+ EXPECT_EQ(spatial_layers.size(), num_spatial_layers);
+
+ for (const SpatialLayer& layer : spatial_layers) {
+ EXPECT_LE(layer.minBitrate, layer.maxBitrate);
+ EXPECT_LE(layer.minBitrate, layer.targetBitrate);
+ EXPECT_LE(layer.targetBitrate, layer.maxBitrate);
+ }
+}
+
+TEST(SvcConfig, BitrateThresholdsWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 960;
+ codec.height = 540;
+ codec.SetScalabilityMode(ScalabilityMode::kS3T3);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::height, 135),
+ Field(&SpatialLayer::height, 270),
+ Field(&SpatialLayer::height, 540)));
+
+ for (const SpatialLayer& layer : spatial_layers) {
+ EXPECT_LE(layer.minBitrate, layer.maxBitrate);
+ EXPECT_LE(layer.minBitrate, layer.targetBitrate);
+ EXPECT_LE(layer.targetBitrate, layer.maxBitrate);
+ }
+}
+
+TEST(SvcConfig, ScreenSharing) {
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(1920, 1080, 30, 1, 3, 3, true);
+
+ EXPECT_EQ(spatial_layers.size(), 3UL);
+
+ for (size_t i = 0; i < 3; ++i) {
+ const SpatialLayer& layer = spatial_layers[i];
+ EXPECT_EQ(layer.width, 1920);
+ EXPECT_EQ(layer.height, 1080);
+ EXPECT_EQ(layer.maxFramerate, (i < 1) ? 5 : (i < 2 ? 10 : 30));
+ EXPECT_EQ(layer.numberOfTemporalLayers, 1);
+ EXPECT_LE(layer.minBitrate, layer.maxBitrate);
+ EXPECT_LE(layer.minBitrate, layer.targetBitrate);
+ EXPECT_LE(layer.targetBitrate, layer.maxBitrate);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
new file mode 100644
index 0000000000..b6293a342e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -0,0 +1,2446 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "absl/memory/memory.h"
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/video/color_space.h"
+#include "api/video/i420_buffer.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h"
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/explicit_key_value_config.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mappable_native_buffer.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::A;
+using ::testing::AllOf;
+using ::testing::An;
+using ::testing::AnyNumber;
+using ::testing::ByRef;
+using ::testing::DoAll;
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::SafeMatcherCast;
+using ::testing::SaveArgPointee;
+using ::testing::SetArgPointee;
+using ::testing::SizeIs;
+using ::testing::TypedEq;
+using ::testing::UnorderedElementsAreArray;
+using ::testing::WithArg;
+using EncoderInfo = webrtc::VideoEncoder::EncoderInfo;
+using FramerateFractions =
+ absl::InlinedVector<uint8_t, webrtc::kMaxTemporalStreams>;
+
+constexpr size_t kWidth = 1280;
+constexpr size_t kHeight = 720;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities,
+ /*number_of_cores=*/1,
+ /*max_payload_size=*/0);
+
+VideoCodec DefaultCodecSettings() {
+ VideoCodec codec_settings;
+ webrtc::test::CodecSettings(kVideoCodecVP9, &codec_settings);
+ codec_settings.width = kWidth;
+ codec_settings.height = kHeight;
+ codec_settings.VP9()->numberOfTemporalLayers = 1;
+ codec_settings.VP9()->numberOfSpatialLayers = 1;
+ return codec_settings;
+}
+
+void ConfigureSvc(VideoCodec& codec_settings,
+ int num_spatial_layers,
+ int num_temporal_layers = 1) {
+ codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers;
+ codec_settings.SetFrameDropEnabled(false);
+
+ std::vector<SpatialLayer> layers = GetSvcConfig(
+ codec_settings.width, codec_settings.height, codec_settings.maxFramerate,
+ /*first_active_layer=*/0, num_spatial_layers, num_temporal_layers, false);
+ for (size_t i = 0; i < layers.size(); ++i) {
+ codec_settings.spatialLayers[i] = layers[i];
+ }
+}
+
+} // namespace
+
+class TestVp9Impl : public VideoCodecUnitTest {
+ protected:
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return VP9Encoder::Create();
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return VP9Decoder::Create();
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecVP9, codec_settings);
+ codec_settings->width = kWidth;
+ codec_settings->height = kHeight;
+ codec_settings->VP9()->numberOfTemporalLayers = 1;
+ codec_settings->VP9()->numberOfSpatialLayers = 1;
+ }
+};
+
+class TestVp9ImplForPixelFormat
+ : public TestVp9Impl,
+ public ::testing::WithParamInterface<
+ test::FrameGeneratorInterface::OutputType> {
+ protected:
+ void SetUp() override {
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, GetParam(), absl::optional<int>());
+ TestVp9Impl::SetUp();
+ }
+};
+
+// Disabled on ios as flake, see https://crbug.com/webrtc/7057
+#if defined(WEBRTC_IOS)
+TEST_P(TestVp9ImplForPixelFormat, DISABLED_EncodeDecode) {
+#else
+TEST_P(TestVp9ImplForPixelFormat, EncodeDecode) {
+#endif
+ VideoFrame input_frame = NextInputFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+
+ const ColorSpace color_space = *decoded_frame->color_space();
+ EXPECT_EQ(ColorSpace::PrimaryID::kUnspecified, color_space.primaries());
+ EXPECT_EQ(ColorSpace::TransferID::kUnspecified, color_space.transfer());
+ EXPECT_EQ(ColorSpace::MatrixID::kUnspecified, color_space.matrix());
+ EXPECT_EQ(ColorSpace::RangeID::kLimited, color_space.range());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_horizontal());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_vertical());
+}
+
+TEST_P(TestVp9ImplForPixelFormat, EncodeNativeBuffer) {
+ VideoFrame input_frame = NextInputFrame();
+ // Replace the input frame with a fake native buffer of the same size and
+ // underlying pixel format. Do not allow ToI420() for non-I420 buffers,
+ // ensuring zero-conversion.
+ input_frame = test::CreateMappableNativeFrame(
+ input_frame.ntp_time_ms(), input_frame.video_frame_buffer()->type(),
+ input_frame.width(), input_frame.height());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // After encoding, we would expect a single mapping to have happened.
+ rtc::scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
+ test::GetMappableNativeBufferFromVideoFrame(input_frame);
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_buffers =
+ mappable_buffer->GetMappedFramedBuffers();
+ ASSERT_EQ(mapped_buffers.size(), 1u);
+ EXPECT_EQ(mapped_buffers[0]->type(), mappable_buffer->mappable_type());
+ EXPECT_EQ(mapped_buffers[0]->width(), input_frame.width());
+ EXPECT_EQ(mapped_buffers[0]->height(), input_frame.height());
+ EXPECT_FALSE(mappable_buffer->DidConvertToI420());
+}
+
+TEST_P(TestVp9ImplForPixelFormat, DecodedColorSpaceFromBitstream) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Encoded frame without explicit color space information.
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ // Color space present from encoded bitstream.
+ ASSERT_TRUE(decoded_frame->color_space());
+ // No HDR metadata present.
+ EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata());
+}
+
+TEST_P(TestVp9ImplForPixelFormat, DecodedQpEqualsEncodedQp) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ ASSERT_TRUE(decoded_qp);
+ EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
+}
+
+TEST_F(TestVp9Impl, SwitchInputPixelFormatsWithoutReconfigure) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Change the input frame type from I420 to NV12, encoding should still work.
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
+ absl::optional<int>());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Flipping back to I420, encoding should still work.
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420,
+ absl::optional<int>());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+}
+
+TEST(Vp9ImplTest, ParserQpEqualsEncodedQp) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ encoder->InitEncode(&codec_settings, kSettings);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(1)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+ ASSERT_THAT(frames, SizeIs(1));
+ const auto& encoded_frame = frames.front().encoded_image;
+ int qp = 0;
+ ASSERT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ EXPECT_EQ(encoded_frame.qp_, qp);
+}
+
+TEST(Vp9ImplTest, EncodeAttachesTemplateStructureWithSvcController) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(2)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(2));
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+ EXPECT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+
+ EXPECT_FALSE(frames[1].codec_specific_info.template_structure);
+ EXPECT_TRUE(frames[1].codec_specific_info.generic_frame_info);
+}
+
+TEST(Vp9ImplTest, EncoderWith2TemporalLayers) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfTemporalLayers = 2;
+ // Tl0PidIdx is only used in non-flexible mode.
+ codec_settings.VP9()->flexibleMode = false;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(4)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(4));
+ EXPECT_EQ(frames[0].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+}
+
+TEST(Vp9ImplTest, EncodeTemporalLayersWithSvcController) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfTemporalLayers = 2;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(4)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(4));
+ EXPECT_EQ(frames[0].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+ // Verify codec agnostic part
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[1].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[2].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[3].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->temporal_id, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.generic_frame_info->temporal_id, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.generic_frame_info->temporal_id, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.generic_frame_info->temporal_id, 1);
+}
+
+TEST(Vp9ImplTest, EncoderWith2SpatialLayers) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfSpatialLayers = 2;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(1)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].encoded_image.SpatialIndex(), 0);
+ EXPECT_EQ(frames[1].encoded_image.SpatialIndex(), 1);
+}
+
+TEST(Vp9ImplTest, EncodeSpatialLayersWithSvcController) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfSpatialLayers = 2;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(2)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(4));
+ EXPECT_EQ(frames[0].encoded_image.SpatialIndex(), 0);
+ EXPECT_EQ(frames[1].encoded_image.SpatialIndex(), 1);
+ EXPECT_EQ(frames[2].encoded_image.SpatialIndex(), 0);
+ EXPECT_EQ(frames[3].encoded_image.SpatialIndex(), 1);
+ // Verify codec agnostic part
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[1].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[2].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[3].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.generic_frame_info->spatial_id, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.generic_frame_info->spatial_id, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.generic_frame_info->spatial_id, 1);
+}
+
+TEST_F(TestVp9Impl, EncoderExplicitLayering) {
+ // Override default settings.
+ codec_settings_.VP9()->numberOfTemporalLayers = 1;
+ codec_settings_.VP9()->numberOfSpatialLayers = 2;
+
+ codec_settings_.width = 960;
+ codec_settings_.height = 540;
+ codec_settings_.spatialLayers[0].minBitrate = 200;
+ codec_settings_.spatialLayers[0].maxBitrate = 500;
+ codec_settings_.spatialLayers[0].targetBitrate =
+ (codec_settings_.spatialLayers[0].minBitrate +
+ codec_settings_.spatialLayers[0].maxBitrate) /
+ 2;
+ codec_settings_.spatialLayers[0].active = true;
+
+ codec_settings_.spatialLayers[1].minBitrate = 400;
+ codec_settings_.spatialLayers[1].maxBitrate = 1500;
+ codec_settings_.spatialLayers[1].targetBitrate =
+ (codec_settings_.spatialLayers[1].minBitrate +
+ codec_settings_.spatialLayers[1].maxBitrate) /
+ 2;
+ codec_settings_.spatialLayers[1].active = true;
+
+ codec_settings_.spatialLayers[0].width = codec_settings_.width / 2;
+ codec_settings_.spatialLayers[0].height = codec_settings_.height / 2;
+ codec_settings_.spatialLayers[0].maxFramerate = codec_settings_.maxFramerate;
+ codec_settings_.spatialLayers[1].width = codec_settings_.width;
+ codec_settings_.spatialLayers[1].height = codec_settings_.height;
+ codec_settings_.spatialLayers[1].maxFramerate = codec_settings_.maxFramerate;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Ensure it fails if scaling factors in horz/vert dimentions are different.
+ codec_settings_.spatialLayers[0].width = codec_settings_.width;
+ codec_settings_.spatialLayers[0].height = codec_settings_.height / 2;
+ codec_settings_.spatialLayers[1].width = codec_settings_.width;
+ codec_settings_.spatialLayers[1].height = codec_settings_.height;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_PARAMETER,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Ensure it fails if scaling factor is not power of two.
+ codec_settings_.spatialLayers[0].width = codec_settings_.width / 3;
+ codec_settings_.spatialLayers[0].height = codec_settings_.height / 3;
+ codec_settings_.spatialLayers[1].width = codec_settings_.width;
+ codec_settings_.spatialLayers[1].height = codec_settings_.height;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_PARAMETER,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
+ // Configure encoder to produce N spatial layers. Encode frames of layer 0
+ // then enable layer 1 and encode more frames and so on until layer N-1.
+ // Then disable layers one by one in the same way.
+ // Note: bit rate allocation is high to avoid frame dropping due to rate
+ // control, the encoder should always produce a frame. A dropped
+ // frame indicates a problem and the test will fail.
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(true);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx + 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+ }
+
+ for (size_t i = 0; i < num_spatial_layers - 1; ++i) {
+ const size_t sl_idx = num_spatial_layers - i - 1;
+ bitrate_allocation.SetBitrate(sl_idx, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+ }
+}
+
+TEST(Vp9ImplTest, EnableDisableSpatialLayersWithSvcController) {
+ const int num_spatial_layers = 3;
+ // Configure encoder to produce 3 spatial layers. Encode frames of layer 0
+ // then enable layer 1 and encode more frames and so on.
+ // Then disable layers one by one in the same way.
+ // Note: bit rate allocation is high to avoid frame dropping due to rate
+ // control, the encoder should always produce a frame. A dropped
+ // frame indicates a problem and the test will fail.
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ConfigureSvc(codec_settings, num_spatial_layers);
+ codec_settings.SetFrameDropEnabled(true);
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ EncodedVideoFrameProducer producer(*encoder);
+ producer.SetResolution({kWidth, kHeight});
+
+ // Encode a key frame to validate all other frames are delta frames.
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ producer.SetNumInputFrames(1).Encode();
+ ASSERT_THAT(frames, Not(IsEmpty()));
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+
+ const size_t num_frames_to_encode = 5;
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ // With (sl_idx+1) spatial layers expect (sl_idx+1) frames per input frame.
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * (sl_idx + 1)));
+ for (size_t i = 0; i < frames.size(); ++i) {
+ EXPECT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ }
+ }
+
+ for (int sl_idx = num_spatial_layers - 1; sl_idx > 0; --sl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, 0, 0);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ // With `sl_idx` spatial layer disabled, there are `sl_idx` spatial layers
+ // left.
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * sl_idx));
+ for (size_t i = 0; i < frames.size(); ++i) {
+ EXPECT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ }
+ }
+}
+
+MATCHER_P2(GenericLayerIs, spatial_id, temporal_id, "") {
+ if (arg.codec_specific_info.generic_frame_info == absl::nullopt) {
+ *result_listener << " miss generic_frame_info";
+ return false;
+ }
+ const auto& layer = *arg.codec_specific_info.generic_frame_info;
+ if (layer.spatial_id != spatial_id || layer.temporal_id != temporal_id) {
+ *result_listener << " frame from layer (" << layer.spatial_id << ", "
+ << layer.temporal_id << ")";
+ return false;
+ }
+ return true;
+}
+
+TEST(Vp9ImplTest, SpatialUpswitchNotAtGOFBoundary) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ConfigureSvc(codec_settings, /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3);
+ codec_settings.SetFrameDropEnabled(true);
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ EncodedVideoFrameProducer producer(*encoder);
+ producer.SetResolution({kWidth, kHeight});
+
+ // Disable all but spatial_layer = 0;
+ VideoBitrateAllocation bitrate_allocation;
+ int layer_bitrate_bps = codec_settings.spatialLayers[0].targetBitrate * 1000;
+ bitrate_allocation.SetBitrate(0, 0, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(0, 1, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(0, 2, layer_bitrate_bps);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+ EXPECT_THAT(producer.SetNumInputFrames(3).Encode(),
+ ElementsAre(GenericLayerIs(0, 0), GenericLayerIs(0, 2),
+ GenericLayerIs(0, 1)));
+
+ // Upswitch to spatial_layer = 1
+ layer_bitrate_bps = codec_settings.spatialLayers[1].targetBitrate * 1000;
+ bitrate_allocation.SetBitrate(1, 0, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(1, 1, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(1, 2, layer_bitrate_bps);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+ // Expect upswitch doesn't happen immediately since there is no S1 frame that
+ // S1T2 frame can reference.
+ EXPECT_THAT(producer.SetNumInputFrames(1).Encode(),
+ ElementsAre(GenericLayerIs(0, 2)));
+ // Expect spatial upswitch happens now, at T0 frame.
+ EXPECT_THAT(producer.SetNumInputFrames(1).Encode(),
+ ElementsAre(GenericLayerIs(0, 0), GenericLayerIs(1, 0)));
+}
+// TODO(bugs.webrtc.org/13442) Enable once a forward fix has landed in WebRTC.
+TEST_F(TestVp9Impl, DISABLED_DisableEnableBaseLayerTriggersKeyFrame) {
+ // Configure encoder to produce N spatial layers. Encode frames for all
+ // layers. Then disable all but the last layer. Then reenable all back again.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Vp9ExternalRefCtrl/Enabled/");
+ const size_t num_spatial_layers = 3;
+ const size_t num_temporal_layers = 3;
+ // Must not be multiple of temporal period to exercise all code paths.
+ const size_t num_frames_to_encode = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
+ codec_settings_.mode = VideoCodecMode::kRealtimeVideo;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, tl_idx,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ }
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+
+ // Disable all but top layer.
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, tl_idx, 0);
+ }
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ bool seen_ss_data = false;
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // SS available immediatly after switching on base temporal layer.
+ if (seen_ss_data) {
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ false);
+ } else {
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ codec_specific_info[0].codecSpecific.VP9.temporal_idx == 0);
+ seen_ss_data |=
+ codec_specific_info[0].codecSpecific.VP9.ss_data_available;
+ }
+ // No key-frames generated for disabling layers.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
+ EXPECT_TRUE(seen_ss_data);
+
+ // Force key-frame.
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // Key-frame should be produced.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+
+ // Encode some more frames.
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 1, tl_idx, codec_settings_.spatialLayers[0].targetBitrate * 1000 * 2);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 2u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 1);
+ EXPECT_EQ(encoded_frame[1].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Enable the first layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 0, tl_idx, codec_settings_.spatialLayers[1].targetBitrate * 1000 * 2);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 3u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ }
+}
+// TODO(bugs.webrtc.org/13442) Enable once a forward fix has landed in WebRTC.
+TEST(Vp9ImplTest,
+ DISABLED_DisableEnableBaseLayerWithSvcControllerTriggersKeyFrame) {
+ // Configure encoder to produce N spatial layers. Encode frames for all
+ // layers. Then disable all but the last layer. Then reenable all back again.
+ const size_t num_spatial_layers = 3;
+ const size_t num_temporal_layers = 3;
+ // Must not be multiple of temporal period to exercise all code paths.
+ const size_t num_frames_to_encode = 5;
+
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ConfigureSvc(codec_settings, num_spatial_layers, num_temporal_layers);
+ codec_settings.SetFrameDropEnabled(false);
+ codec_settings.VP9()->flexibleMode = false;
+ codec_settings.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
+ codec_settings.mode = VideoCodecMode::kRealtimeVideo;
+
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, tl_idx,
+ codec_settings.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ }
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ EncodedVideoFrameProducer producer(*encoder);
+ producer.SetResolution({kWidth, kHeight});
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * num_spatial_layers));
+
+ // Disable all but top spatial layer.
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, tl_idx, 0);
+ }
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ EXPECT_THAT(frames, SizeIs(num_frames_to_encode));
+ for (const auto& frame : frames) {
+ // Expect no key-frames generated.
+ EXPECT_FALSE(frame.codec_specific_info.template_structure);
+ ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frame.codec_specific_info.generic_frame_info->spatial_id, 2);
+ }
+
+ frames = producer.ForceKeyFrame().SetNumInputFrames(1).Encode();
+ ASSERT_THAT(frames, SizeIs(1));
+ // Key-frame should be produced.
+ EXPECT_EQ(frames[0].encoded_image._frameType, VideoFrameType::kVideoFrameKey);
+ ASSERT_TRUE(frames[0].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 2);
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode));
+ for (const auto& frame : frames) {
+ EXPECT_EQ(frame.encoded_image._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_FALSE(frame.codec_specific_info.template_structure);
+ ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frame.codec_specific_info.generic_frame_info->spatial_id, 2);
+ }
+
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 1, tl_idx, codec_settings.spatialLayers[0].targetBitrate * 1000 * 2);
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * 2));
+ EXPECT_EQ(frames[0].encoded_image._frameType, VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 1);
+ for (size_t i = 1; i < frames.size(); ++i) {
+ EXPECT_EQ(frames[i].encoded_image._frameType,
+ VideoFrameType::kVideoFrameDelta);
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[i].codec_specific_info.generic_frame_info->spatial_id,
+ 1 + static_cast<int>(i % 2));
+ }
+
+ // Enable the first layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 0, tl_idx, codec_settings.spatialLayers[1].targetBitrate * 1000 * 2);
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * 3));
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 0);
+ for (size_t i = 1; i < frames.size(); ++i) {
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[i].codec_specific_info.generic_frame_info->spatial_id,
+ static_cast<int>(i % 3));
+ }
+}
+
+TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrameForScreenshare) {
+ // Configure encoder to produce N spatial layers. Encode frames for all
+ // layers. Then disable all but the last layer. Then reenable all back again.
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+
+ // Disable all but top layer.
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, 0, 0);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // SS available immediatly after switching off.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // No key-frames generated for disabling layers.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Force key-frame.
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // Key-frame should be produced.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
+
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 2u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 1);
+ EXPECT_EQ(encoded_frame[1].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Enable the first layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 3u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ }
+}
+
+TEST_F(TestVp9Impl, EndOfPicture) {
+ const size_t num_spatial_layers = 2;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Encode both base and upper layers. Check that end-of-superframe flag is
+ // set on upper layer frame but not on base layer frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+
+ std::vector<EncodedImage> frames;
+ std::vector<CodecSpecificInfo> codec_specific;
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+ EXPECT_FALSE(codec_specific[0].end_of_picture);
+ EXPECT_TRUE(codec_specific[1].end_of_picture);
+
+ // Encode only base layer. Check that end-of-superframe flag is
+ // set on base layer frame.
+ bitrate_allocation.SetBitrate(1, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+ EXPECT_FALSE(frames[0].SpatialIndex());
+ EXPECT_TRUE(codec_specific[0].end_of_picture);
+}
+
+TEST_F(TestVp9Impl, InterLayerPred) {
+ const size_t num_spatial_layers = 2;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ bitrate_allocation.SetBitrate(
+ i, 0, codec_settings_.spatialLayers[i].targetBitrate * 1000);
+ }
+
+ const std::vector<InterLayerPredMode> inter_layer_pred_modes = {
+ InterLayerPredMode::kOff, InterLayerPredMode::kOn,
+ InterLayerPredMode::kOnKeyPic};
+
+ for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
+ codec_settings_.VP9()->interLayerPred = inter_layer_pred;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+
+ std::vector<EncodedImage> frames;
+ std::vector<CodecSpecificInfo> codec_specific;
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+
+ // Key frame.
+ ASSERT_EQ(frames[0].SpatialIndex(), 0);
+ ASSERT_FALSE(codec_specific[0].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.inter_layer_predicted);
+ EXPECT_EQ(codec_specific[0].codecSpecific.VP9.non_ref_for_inter_layer_pred,
+ inter_layer_pred == InterLayerPredMode::kOff);
+ EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.ss_data_available);
+
+ ASSERT_EQ(frames[1].SpatialIndex(), 1);
+ ASSERT_FALSE(codec_specific[1].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_EQ(codec_specific[1].codecSpecific.VP9.inter_layer_predicted,
+ inter_layer_pred == InterLayerPredMode::kOn ||
+ inter_layer_pred == InterLayerPredMode::kOnKeyPic);
+ EXPECT_EQ(codec_specific[1].codecSpecific.VP9.ss_data_available,
+ inter_layer_pred == InterLayerPredMode::kOff);
+ EXPECT_TRUE(
+ codec_specific[1].codecSpecific.VP9.non_ref_for_inter_layer_pred);
+
+ // Delta frame.
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+
+ ASSERT_EQ(frames[0].SpatialIndex(), 0);
+ ASSERT_TRUE(codec_specific[0].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.inter_layer_predicted);
+ EXPECT_EQ(codec_specific[0].codecSpecific.VP9.non_ref_for_inter_layer_pred,
+ inter_layer_pred != InterLayerPredMode::kOn);
+ EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.ss_data_available);
+
+ ASSERT_EQ(frames[1].SpatialIndex(), 1);
+ ASSERT_TRUE(codec_specific[1].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_EQ(codec_specific[1].codecSpecific.VP9.inter_layer_predicted,
+ inter_layer_pred == InterLayerPredMode::kOn);
+ EXPECT_TRUE(
+ codec_specific[1].codecSpecific.VP9.non_ref_for_inter_layer_pred);
+ EXPECT_FALSE(codec_specific[1].codecSpecific.VP9.ss_data_available);
+ }
+}
+
+TEST_F(TestVp9Impl,
+ EnablingUpperLayerTriggersKeyFrameIfInterLayerPredIsDisabled) {
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+
+ const std::vector<InterLayerPredMode> inter_layer_pred_modes = {
+ InterLayerPredMode::kOff, InterLayerPredMode::kOn,
+ InterLayerPredMode::kOnKeyPic};
+
+ for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
+ codec_settings_.VP9()->interLayerPred = inter_layer_pred;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx + 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+
+ const bool is_first_upper_layer_frame = (sl_idx > 0 && frame_num == 0);
+ if (is_first_upper_layer_frame) {
+ if (inter_layer_pred == InterLayerPredMode::kOn) {
+ EXPECT_EQ(encoded_frame[0]._frameType,
+ VideoFrameType::kVideoFrameDelta);
+ } else {
+ EXPECT_EQ(encoded_frame[0]._frameType,
+ VideoFrameType::kVideoFrameKey);
+ }
+ } else if (sl_idx == 0 && frame_num == 0) {
+ EXPECT_EQ(encoded_frame[0]._frameType,
+ VideoFrameType::kVideoFrameKey);
+ } else {
+ for (size_t i = 0; i <= sl_idx; ++i) {
+ EXPECT_EQ(encoded_frame[i]._frameType,
+ VideoFrameType::kVideoFrameDelta);
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_F(TestVp9Impl,
+ EnablingUpperLayerUnsetsInterPicPredictedInInterlayerPredModeOn) {
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+
+ const std::vector<InterLayerPredMode> inter_layer_pred_modes = {
+ InterLayerPredMode::kOff, InterLayerPredMode::kOn,
+ InterLayerPredMode::kOnKeyPic};
+
+ for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
+ codec_settings_.VP9()->interLayerPred = inter_layer_pred;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx + 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+
+ ASSERT_EQ(codec_specific_info.size(), sl_idx + 1);
+
+ for (size_t i = 0; i <= sl_idx; ++i) {
+ const bool is_keyframe =
+ encoded_frame[0]._frameType == VideoFrameType::kVideoFrameKey;
+ const bool is_first_upper_layer_frame =
+ (i == sl_idx && frame_num == 0);
+ // Interframe references are there, unless it's a keyframe,
+ // or it's a first activated frame in a upper layer
+ const bool expect_no_references =
+ is_keyframe || (is_first_upper_layer_frame &&
+ inter_layer_pred == InterLayerPredMode::kOn);
+ EXPECT_EQ(
+ codec_specific_info[i].codecSpecific.VP9.inter_pic_predicted,
+ !expect_no_references);
+ }
+ }
+ }
+ }
+}
+
+TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
+ const size_t num_spatial_layers = 2;
+ const size_t num_temporal_layers = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+
+ // Enable both spatial and both temporal layers.
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 0, 1, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+
+ // Encode 3 frames.
+ for (int i = 0; i < 3; ++i) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ }
+
+ // Disable SL1 layer.
+ bitrate_allocation.SetBitrate(1, 0, 0);
+ bitrate_allocation.SetBitrate(1, 1, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 1 frame.
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 1u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
+
+ // Enable SL1 layer.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 1 frame.
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
+ EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted, true);
+}
+
+TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
+ const size_t num_spatial_layers = 2;
+ const size_t num_temporal_layers = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+
+ // Enable both spatial and both temporal layers.
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 0, 1, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+
+ // Encode 3 frames.
+ for (int i = 0; i < 3; ++i) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ }
+
+ // Disable SL1 layer.
+ bitrate_allocation.SetBitrate(1, 0, 0);
+ bitrate_allocation.SetBitrate(1, 1, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 11 frames. More than Gof length 2, and odd to end at TL1 frame.
+ for (int i = 0; i < 11; ++i) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 1u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1 - i % 2);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted,
+ true);
+ }
+
+ // Enable SL1 layer.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 1 frame.
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
+ EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted,
+ false);
+}
+
+TEST_F(TestVp9Impl, EnablingNewLayerInScreenshareForcesAllLayersWithSS) {
+ const size_t num_spatial_layers = 3;
+ // Chosen by hand, the 2nd frame is dropped with configured per-layer max
+ // framerate.
+ const size_t num_frames_to_encode_before_drop = 1;
+
+ codec_settings_.maxFramerate = 30;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.spatialLayers[0].maxFramerate = 5.0;
+ // use 30 for the SL 1 instead of 10, so even if SL 0 frame is dropped due to
+ // framerate capping we would still get back at least a middle layer. It
+ // simplifies the test.
+ codec_settings_.spatialLayers[1].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[2].maxFramerate = 30.0;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Enable all but the last layer.
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode enough frames to force drop due to framerate capping.
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode_before_drop;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ }
+
+ // Enable the last layer.
+ bitrate_allocation.SetBitrate(
+ num_spatial_layers - 1, 0,
+ codec_settings_.spatialLayers[num_spatial_layers - 1].targetBitrate *
+ 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // All layers are encoded, even though frame dropping should happen.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ // Now all 3 layers should be encoded.
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(encoded_frames.size(), 3u);
+ // Scalability structure has to be triggered.
+ EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+}
+
+TEST_F(TestVp9Impl, ScreenshareFrameDropping) {
+ const int num_spatial_layers = 3;
+ const int num_frames_to_detect_drops = 2;
+
+ codec_settings_.maxFramerate = 30;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ // use 30 for the SL0 and SL1 because it simplifies the test.
+ codec_settings_.spatialLayers[0].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[1].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[2].maxFramerate = 30.0;
+ codec_settings_.SetFrameDropEnabled(true);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Enable all but the last layer.
+ VideoBitrateAllocation bitrate_allocation;
+ // Very low bitrate for the lowest spatial layer to ensure rate-control drops.
+ bitrate_allocation.SetBitrate(0, 0, 1000);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000);
+ // Disable highest layer.
+ bitrate_allocation.SetBitrate(2, 0, 0);
+
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ bool frame_dropped = false;
+ // Encode enough frames to force drop due to rate-control.
+ for (size_t frame_num = 0; frame_num < num_frames_to_detect_drops;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_LE(encoded_frames.size(), 2u);
+ EXPECT_GE(encoded_frames.size(), 1u);
+ if (encoded_frames.size() == 1) {
+ frame_dropped = true;
+ // Dropped frame is on the SL0.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex(), 1);
+ }
+ }
+ EXPECT_TRUE(frame_dropped);
+
+ // Enable the last layer.
+ bitrate_allocation.SetBitrate(
+ 2, 0, codec_settings_.spatialLayers[2].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // No drop allowed.
+ EXPECT_EQ(encoded_frames.size(), 3u);
+
+ // Verify that frame-dropping is re-enabled back.
+ frame_dropped = false;
+ // Encode enough frames to force drop due to rate-control.
+ for (size_t frame_num = 0; frame_num < num_frames_to_detect_drops;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_LE(encoded_frames.size(), 3u);
+ EXPECT_GE(encoded_frames.size(), 2u);
+ if (encoded_frames.size() == 2) {
+ frame_dropped = true;
+ // Dropped frame is on the SL0.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex(), 1);
+ EXPECT_EQ(encoded_frames[1].SpatialIndex(), 2);
+ }
+ }
+ EXPECT_TRUE(frame_dropped);
+}
+
+TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
+ const size_t num_spatial_layers = 3;
+ // Chosen by hand, the 2nd frame is dropped with configured per-layer max
+ // framerate.
+ const size_t num_frames_to_encode_before_drop = 1;
+ // Chosen by hand, exactly 5 frames are dropped for input fps=30 and max
+ // framerate = 5.
+ const size_t num_dropped_frames = 5;
+
+ codec_settings_.maxFramerate = 30;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.spatialLayers[0].maxFramerate = 5.0;
+ // use 30 for the SL 1 instead of 5, so even if SL 0 frame is dropped due to
+ // framerate capping we would still get back at least a middle layer. It
+ // simplifies the test.
+ codec_settings_.spatialLayers[1].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[2].maxFramerate = 30.0;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // All layers are enabled from the start.
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode enough frames to force drop due to framerate capping.
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode_before_drop;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ }
+
+ // Now the first layer should not have frames in it.
+ for (size_t frame_num = 0; frame_num < num_dropped_frames - 2; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ // First layer is dropped due to frame rate cap. The last layer should not
+ // be enabled yet.
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // First layer is skipped.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex().value_or(-1), 1);
+ }
+
+ // Disable the last layer.
+ bitrate_allocation.SetBitrate(num_spatial_layers - 1, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Still expected to drop first layer. Last layer has to be disable also.
+ for (size_t frame_num = num_dropped_frames - 2;
+ frame_num < num_dropped_frames; ++frame_num) {
+ // Expect back one frame.
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ // First layer is dropped due to frame rate cap. The last layer should not
+ // be enabled yet.
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // First layer is skipped.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex().value_or(-1), 1);
+ // No SS data on non-base spatial layer.
+ EXPECT_FALSE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+ }
+
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // First layer is not skipped now.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex().value_or(-1), 0);
+ // SS data should be present.
+ EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+}
+
+TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) {
+ const size_t num_spatial_layers = 3;
+ const size_t num_temporal_layers = 2;
+ // Chosen by hand, the 2nd frame is dropped with configured per-layer max
+ // framerate.
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kRealtimeVideo;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
+ codec_settings_.VP9()->flexibleMode = false;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Enable all the layers.
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, tl_idx,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 /
+ num_temporal_layers);
+ }
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+
+ // Encode one TL0 frame
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0u);
+
+ // Disable the last layer.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(num_spatial_layers - 1, tl_idx, 0);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Next is TL1 frame. The last layer is disabled immediately, but SS structure
+ // is not provided here.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1u);
+ EXPECT_FALSE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+
+ // Next is TL0 frame, which should have delayed SS structure.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0u);
+ EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+ EXPECT_TRUE(codec_specific_info[0]
+ .codecSpecific.VP9.spatial_layer_resolution_present);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.num_spatial_layers,
+ num_spatial_layers - 1);
+}
+
+TEST_F(TestVp9Impl,
+ LowLayerMarkedAsRefIfHighLayerNotEncodedAndInterLayerPredIsEnabled) {
+ ConfigureSvc(codec_settings_, 3);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_info));
+ EXPECT_TRUE(codec_info.codecSpecific.VP9.ss_data_available);
+ EXPECT_FALSE(codec_info.codecSpecific.VP9.non_ref_for_inter_layer_pred);
+}
+
+TEST_F(TestVp9Impl, ScalabilityStructureIsAvailableInFlexibleMode) {
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_TRUE(codec_specific_info.codecSpecific.VP9.ss_data_available);
+}
+
+TEST_F(TestVp9Impl, Profile0PreferredPixelFormats) {
+ EXPECT_THAT(encoder_->GetEncoderInfo().preferred_pixel_formats,
+ testing::UnorderedElementsAre(VideoFrameBuffer::Type::kNV12,
+ VideoFrameBuffer::Type::kI420));
+}
+
+TEST_F(TestVp9Impl, EncoderInfoWithoutResolutionBitrateLimits) {
+ EXPECT_TRUE(encoder_->GetEncoderInfo().resolution_bitrate_limits.empty());
+}
+
+TEST_F(TestVp9Impl, EncoderInfoWithBitrateLimitsFromFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP9-GetEncoderInfoOverride/"
+ "frame_size_pixels:123|456|789,"
+ "min_start_bitrate_bps:11000|22000|33000,"
+ "min_bitrate_bps:44000|55000|66000,"
+ "max_bitrate_bps:77000|88000|99000/");
+ SetUp();
+
+ EXPECT_THAT(
+ encoder_->GetEncoderInfo().resolution_bitrate_limits,
+ ::testing::ElementsAre(
+ VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000},
+ VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000},
+ VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000}));
+}
+
+TEST_F(TestVp9Impl, EncoderInfoFpsAllocation) {
+ const uint8_t kNumSpatialLayers = 3;
+ const uint8_t kNumTemporalLayers = 3;
+
+ codec_settings_.maxFramerate = 30;
+ codec_settings_.VP9()->numberOfSpatialLayers = kNumSpatialLayers;
+ codec_settings_.VP9()->numberOfTemporalLayers = kNumTemporalLayers;
+
+ for (uint8_t sl_idx = 0; sl_idx < kNumSpatialLayers; ++sl_idx) {
+ codec_settings_.spatialLayers[sl_idx].width = codec_settings_.width;
+ codec_settings_.spatialLayers[sl_idx].height = codec_settings_.height;
+ codec_settings_.spatialLayers[sl_idx].minBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].maxBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].targetBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].active = true;
+ codec_settings_.spatialLayers[sl_idx].maxFramerate =
+ codec_settings_.maxFramerate;
+ }
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+ expected_fps_allocation[1] = expected_fps_allocation[0];
+ expected_fps_allocation[2] = expected_fps_allocation[0];
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp9Impl, EncoderInfoFpsAllocationFlexibleMode) {
+ const uint8_t kNumSpatialLayers = 3;
+
+ codec_settings_.maxFramerate = 30;
+ codec_settings_.VP9()->numberOfSpatialLayers = kNumSpatialLayers;
+ codec_settings_.VP9()->numberOfTemporalLayers = 1;
+ codec_settings_.VP9()->flexibleMode = true;
+
+ VideoEncoder::RateControlParameters rate_params;
+ for (uint8_t sl_idx = 0; sl_idx < kNumSpatialLayers; ++sl_idx) {
+ codec_settings_.spatialLayers[sl_idx].width = codec_settings_.width;
+ codec_settings_.spatialLayers[sl_idx].height = codec_settings_.height;
+ codec_settings_.spatialLayers[sl_idx].minBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].maxBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].targetBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].active = true;
+ // Force different frame rates for different layers, to verify that total
+ // fraction is correct.
+ codec_settings_.spatialLayers[sl_idx].maxFramerate =
+ codec_settings_.maxFramerate / (kNumSpatialLayers - sl_idx);
+ rate_params.bitrate.SetBitrate(sl_idx, 0,
+ codec_settings_.startBitrate * 1000);
+ }
+ rate_params.bandwidth_allocation =
+ DataRate::BitsPerSec(rate_params.bitrate.get_sum_bps());
+ rate_params.framerate_fps = codec_settings_.maxFramerate;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // No temporal layers allowed when spatial layers have different fps targets.
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 3);
+ expected_fps_allocation[1].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[2].push_back(EncoderInfo::kMaxFramerateFraction);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+
+ // SetRates with current fps does not alter outcome.
+ encoder_->SetRates(rate_params);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+
+ // Higher fps than the codec wants, should still not affect outcome.
+ rate_params.framerate_fps *= 2;
+ encoder_->SetRates(rate_params);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+class Vp9ImplWithLayeringTest
+ : public ::testing::TestWithParam<std::tuple<int, int, bool>> {
+ protected:
+ Vp9ImplWithLayeringTest()
+ : num_spatial_layers_(std::get<0>(GetParam())),
+ num_temporal_layers_(std::get<1>(GetParam())),
+ override_field_trials_(std::get<2>(GetParam())
+ ? "WebRTC-Vp9ExternalRefCtrl/Enabled/"
+ : "") {}
+
+ const uint8_t num_spatial_layers_;
+ const uint8_t num_temporal_layers_;
+ const test::ScopedFieldTrials override_field_trials_;
+};
+
+TEST_P(Vp9ImplWithLayeringTest, FlexibleMode) {
+ // In flexible mode encoder wrapper obtains actual list of references from
+ // encoder and writes it into RTP payload descriptor. Check that reference
+ // list in payload descriptor matches the predefined one, which is used
+ // in non-flexible mode.
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->flexibleMode = true;
+ codec_settings.SetFrameDropEnabled(false);
+ codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers_;
+ codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers_;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ GofInfoVP9 gof;
+ if (num_temporal_layers_ == 1) {
+ gof.SetGofInfoVP9(kTemporalStructureMode1);
+ } else if (num_temporal_layers_ == 2) {
+ gof.SetGofInfoVP9(kTemporalStructureMode2);
+ } else if (num_temporal_layers_ == 3) {
+ gof.SetGofInfoVP9(kTemporalStructureMode3);
+ }
+
+ // Encode at least (num_frames_in_gof + 1) frames to verify references
+ // of non-key frame with gof_idx = 0.
+ int num_input_frames = gof.num_frames_in_gof + 1;
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(num_input_frames)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+ ASSERT_THAT(frames, SizeIs(num_input_frames * num_spatial_layers_));
+
+ for (size_t i = 0; i < frames.size(); ++i) {
+ const EncodedVideoFrameProducer::EncodedFrame& frame = frames[i];
+ const size_t picture_idx = i / num_spatial_layers_;
+ const size_t gof_idx = picture_idx % gof.num_frames_in_gof;
+
+ const CodecSpecificInfoVP9& vp9 =
+ frame.codec_specific_info.codecSpecific.VP9;
+ EXPECT_EQ(frame.encoded_image.SpatialIndex(),
+ num_spatial_layers_ == 1
+ ? absl::nullopt
+ : absl::optional<int>(i % num_spatial_layers_))
+ << "Frame " << i;
+ EXPECT_EQ(vp9.temporal_idx, num_temporal_layers_ == 1
+ ? kNoTemporalIdx
+ : gof.temporal_idx[gof_idx])
+ << "Frame " << i;
+ EXPECT_EQ(vp9.temporal_up_switch, gof.temporal_up_switch[gof_idx])
+ << "Frame " << i;
+ if (picture_idx == 0) {
+ EXPECT_EQ(vp9.num_ref_pics, 0) << "Frame " << i;
+ } else {
+ EXPECT_THAT(rtc::MakeArrayView(vp9.p_diff, vp9.num_ref_pics),
+ UnorderedElementsAreArray(gof.pid_diff[gof_idx],
+ gof.num_ref_pics[gof_idx]))
+ << "Frame " << i;
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+ Vp9ImplWithLayeringTest,
+ ::testing::Combine(::testing::Values(1, 2, 3),
+ ::testing::Values(1, 2, 3),
+ ::testing::Bool()));
+
+class TestVp9ImplFrameDropping : public TestVp9Impl {
+ protected:
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecVP9, codec_settings);
+ // We need to encode quite a lot of frames in this test. Use low resolution
+ // to reduce execution time.
+ codec_settings->width = 64;
+ codec_settings->height = 64;
+ codec_settings->mode = VideoCodecMode::kScreensharing;
+ }
+};
+
+TEST_F(TestVp9ImplFrameDropping, PreEncodeFrameDropping) {
+ const size_t num_frames_to_encode = 100;
+ const float input_framerate_fps = 30.0;
+ const float video_duration_secs = num_frames_to_encode / input_framerate_fps;
+ const float expected_framerate_fps = 5.0f;
+ const float max_abs_framerate_error_fps = expected_framerate_fps * 0.1f;
+
+ codec_settings_.maxFramerate = static_cast<uint32_t>(expected_framerate_fps);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoFrame input_frame = NextInputFrame();
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ const size_t timestamp = input_frame.timestamp() +
+ kVideoPayloadTypeFrequency / input_framerate_fps;
+ input_frame.set_timestamp(static_cast<uint32_t>(timestamp));
+ }
+
+ const size_t num_encoded_frames = GetNumEncodedFrames();
+ const float encoded_framerate_fps = num_encoded_frames / video_duration_secs;
+ EXPECT_NEAR(encoded_framerate_fps, expected_framerate_fps,
+ max_abs_framerate_error_fps);
+}
+
+TEST_F(TestVp9ImplFrameDropping, DifferentFrameratePerSpatialLayer) {
+ // Assign different frame rate to spatial layers and check that result frame
+ // rate is close to the assigned one.
+ const uint8_t num_spatial_layers = 3;
+ const float input_framerate_fps = 30.0;
+ const size_t video_duration_secs = 3;
+ const size_t num_input_frames = video_duration_secs * input_framerate_fps;
+
+ codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = true;
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (uint8_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Frame rate increases from low to high layer.
+ const uint32_t framerate_fps = 10 * (sl_idx + 1);
+
+ codec_settings_.spatialLayers[sl_idx].width = codec_settings_.width;
+ codec_settings_.spatialLayers[sl_idx].height = codec_settings_.height;
+ codec_settings_.spatialLayers[sl_idx].maxFramerate = framerate_fps;
+ codec_settings_.spatialLayers[sl_idx].minBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].maxBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].targetBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].active = true;
+
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ VideoFrame input_frame = NextInputFrame();
+ for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ const size_t timestamp = input_frame.timestamp() +
+ kVideoPayloadTypeFrequency / input_framerate_fps;
+ input_frame.set_timestamp(static_cast<uint32_t>(timestamp));
+ }
+
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_infos;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_infos));
+
+ std::vector<size_t> num_encoded_frames(num_spatial_layers, 0);
+ for (EncodedImage& encoded_frame : encoded_frames) {
+ ++num_encoded_frames[encoded_frame.SpatialIndex().value_or(0)];
+ }
+
+ for (uint8_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ const float layer_target_framerate_fps =
+ codec_settings_.spatialLayers[sl_idx].maxFramerate;
+ const float layer_output_framerate_fps =
+ static_cast<float>(num_encoded_frames[sl_idx]) / video_duration_secs;
+ const float max_framerate_error_fps = layer_target_framerate_fps * 0.1f;
+ EXPECT_NEAR(layer_output_framerate_fps, layer_target_framerate_fps,
+ max_framerate_error_fps);
+ }
+}
+
+class TestVp9ImplProfile2 : public TestVp9Impl {
+ protected:
+ void SetUp() override {
+ // Profile 2 might not be available on some platforms until
+ // https://bugs.chromium.org/p/webm/issues/detail?id=1544 is solved.
+ bool profile_2_is_supported = false;
+ for (const auto& codec : SupportedVP9Codecs()) {
+ if (ParseSdpForVP9Profile(codec.parameters)
+ .value_or(VP9Profile::kProfile0) == VP9Profile::kProfile2) {
+ profile_2_is_supported = true;
+ }
+ }
+ if (!profile_2_is_supported)
+ return;
+
+ TestVp9Impl::SetUp();
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ codec_settings_.width, codec_settings_.height,
+ test::FrameGeneratorInterface::OutputType::kI010,
+ absl::optional<int>());
+ }
+
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ cricket::VideoCodec profile2_codec;
+ profile2_codec.SetParam(kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile2));
+ return VP9Encoder::Create(profile2_codec);
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return VP9Decoder::Create();
+ }
+};
+
+TEST_F(TestVp9ImplProfile2, EncodeDecode) {
+ if (!encoder_)
+ return;
+
+ VideoFrame input_frame = NextInputFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+
+ // TODO(emircan): Add PSNR for different color depths.
+ EXPECT_GT(I420PSNR(*input_frame.video_frame_buffer()->ToI420(),
+ *decoded_frame->video_frame_buffer()->ToI420()),
+ 31);
+}
+
+TEST_F(TestVp9Impl, EncodeWithDynamicRate) {
+ // Configured dynamic rate field trial and re-create the encoder.
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VideoRateControl/vp9_dynamic_rate:true/");
+ SetUp();
+
+ // Set 300kbps target with 100% headroom.
+ VideoEncoder::RateControlParameters params;
+ params.bandwidth_allocation = DataRate::BitsPerSec(300000);
+ params.bitrate.SetBitrate(0, 0, params.bandwidth_allocation.bps());
+ params.framerate_fps = 30.0;
+
+ encoder_->SetRates(params);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Set no headroom and encode again.
+ params.bandwidth_allocation = DataRate::Zero();
+ encoder_->SetRates(params);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+}
+
+TEST_F(TestVp9Impl, ReenablingUpperLayerAfterKFWithInterlayerPredIsEnabled) {
+ const size_t num_spatial_layers = 2;
+ const int num_frames_to_encode = 10;
+ codec_settings_.VP9()->flexibleMode = true;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings_.VP9()->numberOfTemporalLayers = 1;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ // Force low frame-rate, so all layers are present for all frames.
+ codec_settings_.maxFramerate = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific;
+
+ for (int i = 0; i < num_frames_to_encode; ++i) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers);
+ }
+
+ // Disable the last layer.
+ bitrate_allocation.SetBitrate(num_spatial_layers - 1, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (int i = 0; i < num_frames_to_encode; ++i) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers - 1);
+ }
+
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+
+ // Force a key-frame with the last layer still disabled.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers - 1);
+ ASSERT_EQ(encoded_frames[0]._frameType, VideoFrameType::kVideoFrameKey);
+
+ // Re-enable the last layer.
+ bitrate_allocation.SetBitrate(
+ num_spatial_layers - 1, 0,
+ codec_settings_.spatialLayers[num_spatial_layers - 1].targetBitrate *
+ 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers);
+ EXPECT_EQ(encoded_frames[0]._frameType, VideoFrameType::kVideoFrameDelta);
+}
+
+TEST_F(TestVp9Impl, HandlesEmptyDecoderConfigure) {
+ std::unique_ptr<VideoDecoder> decoder = CreateDecoder();
+ // Check that default settings are ok for decoder.
+ EXPECT_TRUE(decoder->Configure({}));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder->Release());
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ TestVp9ImplForPixelFormat,
+ TestVp9ImplForPixelFormat,
+ ::testing::Values(test::FrameGeneratorInterface::OutputType::kI420,
+ test::FrameGeneratorInterface::OutputType::kNV12),
+ [](const auto& info) {
+ return test::FrameGeneratorInterface::OutputTypeToString(info.param);
+ });
+
+// Helper function to populate an vpx_image_t instance with dimensions and
+// potential image data.
+std::function<vpx_image_t*(vpx_image_t*,
+ vpx_img_fmt_t,
+ unsigned int,
+ unsigned int,
+ unsigned int,
+ unsigned char* img_data)>
+GetWrapImageFunction(vpx_image_t* img) {
+ return [img](vpx_image_t* /*img*/, vpx_img_fmt_t fmt, unsigned int d_w,
+ unsigned int d_h, unsigned int /*stride_align*/,
+ unsigned char* img_data) {
+ img->fmt = fmt;
+ img->d_w = d_w;
+ img->d_h = d_h;
+ img->img_data = img_data;
+ return img;
+ };
+}
+
+TEST(Vp9SpeedSettingsTrialsTest, NoSvcUsesGlobalSpeedFromTl0InLayerConfig) {
+ // TL0 speed 8 at >= 480x270, 5 if below that.
+ test::ExplicitKeyValueConfig trials(
+ "WebRTC-VP9-PerformanceFlags/"
+ "use_per_layer_speed,"
+ "min_pixel_count:0|129600,"
+ "base_layer_speed:4|8,"
+ "high_layer_speed:5|9,"
+ "deblock_mode:1|0/");
+
+ // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise
+ // passed on to LibvpxVp9Encoder.
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ absl::WrapUnique<LibvpxInterface>(vpx), trials);
+
+ VideoCodec settings = DefaultCodecSettings();
+ settings.width = 480;
+ settings.height = 270;
+ vpx_image_t img;
+
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+ ON_CALL(*vpx, codec_enc_config_default)
+ .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) {
+ memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t));
+ }),
+ Return(VPX_CODEC_OK)));
+ EXPECT_CALL(*vpx, codec_control(_, _, An<int>())).Times(AnyNumber());
+
+ EXPECT_CALL(*vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS,
+ A<vpx_svc_extra_cfg_t*>()))
+ .Times(0);
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(8)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+
+ encoder.Release();
+ settings.width = 352;
+ settings.height = 216;
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(4)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+}
+
+TEST(Vp9SpeedSettingsTrialsTest,
+ NoPerLayerFlagUsesGlobalSpeedFromTopLayerInConfig) {
+ // TL0 speed 8 at >= 480x270, 5 if below that.
+ test::ExplicitKeyValueConfig trials(
+ "WebRTC-VP9-PerformanceFlags/"
+ "min_pixel_count:0|129600,"
+ "base_layer_speed:4|8,"
+ "high_layer_speed:5|9,"
+ "deblock_mode:1|0/");
+
+ // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise
+ // passed on to LibvpxVp9Encoder.
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ absl::WrapUnique<LibvpxInterface>(vpx), trials);
+
+ VideoCodec settings = DefaultCodecSettings();
+ settings.width = 480;
+ settings.height = 270;
+ ConfigureSvc(settings, 2, 3);
+ vpx_image_t img;
+
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+ ON_CALL(*vpx, codec_enc_config_default)
+ .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) {
+ memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t));
+ }),
+ Return(VPX_CODEC_OK)));
+ EXPECT_CALL(*vpx, codec_control(_, _, An<int>())).Times(AnyNumber());
+
+ // Speed settings not populated when 'use_per_layer_speed' flag is absent.
+ EXPECT_CALL(*vpx,
+ codec_control(
+ _, VP9E_SET_SVC_PARAMETERS,
+ SafeMatcherCast<vpx_svc_extra_cfg_t*>(AllOf(
+ Field(&vpx_svc_extra_cfg_t::speed_per_layer, Each(0)),
+ Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl, Each(0))))))
+ .Times(2);
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(8)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+
+ encoder.Release();
+ settings.width = 476;
+ settings.height = 268;
+ settings.spatialLayers[0].width = settings.width / 2;
+ settings.spatialLayers[0].height = settings.height / 2;
+ settings.spatialLayers[1].width = settings.width;
+ settings.spatialLayers[1].height = settings.height;
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(4)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+}
+
+TEST(Vp9SpeedSettingsTrialsTest, DefaultPerLayerFlagsWithSvc) {
+ // Per-temporal and spatial layer speed settings:
+ // SL0: TL0 = speed 5, TL1/TL2 = speed 8.
+ // SL1/2: TL0 = speed 7, TL1/TL2 = speed 8.
+ // Deblocking-mode per spatial layer:
+ // SL0: mode 1, SL1/2: mode 0.
+ test::ExplicitKeyValueConfig trials(
+ "WebRTC-VP9-PerformanceFlags/"
+ "use_per_layer_speed,"
+ "min_pixel_count:0|129600,"
+ "base_layer_speed:5|7,"
+ "high_layer_speed:8|8,"
+ "deblock_mode:1|0/");
+
+ // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise
+ // passed on to LibvpxVp9Encoder.
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ absl::WrapUnique<LibvpxInterface>(vpx), trials);
+
+ VideoCodec settings = DefaultCodecSettings();
+ constexpr int kNumSpatialLayers = 3;
+ constexpr int kNumTemporalLayers = 3;
+ ConfigureSvc(settings, kNumSpatialLayers, kNumTemporalLayers);
+ VideoBitrateAllocation bitrate_allocation;
+ for (int si = 0; si < kNumSpatialLayers; ++si) {
+ for (int ti = 0; ti < kNumTemporalLayers; ++ti) {
+ uint32_t bitrate_bps =
+ settings.spatialLayers[si].targetBitrate * 1'000 / kNumTemporalLayers;
+ bitrate_allocation.SetBitrate(si, ti, bitrate_bps);
+ }
+ }
+ vpx_image_t img;
+
+ // Speed settings per spatial layer, for TL0.
+ const int kBaseTlSpeed[VPX_MAX_LAYERS] = {5, 7, 7};
+ // Speed settings per spatial layer, for TL1, TL2.
+ const int kHighTlSpeed[VPX_MAX_LAYERS] = {8, 8, 8};
+ // Loopfilter settings are handled within libvpx, so this array is valid for
+ // both TL0 and higher.
+ const int kLoopFilter[VPX_MAX_LAYERS] = {1, 0, 0};
+
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+ ON_CALL(*vpx, codec_enc_init)
+ .WillByDefault(WithArg<0>([](vpx_codec_ctx_t* ctx) {
+ memset(ctx, 0, sizeof(*ctx));
+ return VPX_CODEC_OK;
+ }));
+ ON_CALL(*vpx, codec_enc_config_default)
+ .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) {
+ memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t));
+ }),
+ Return(VPX_CODEC_OK)));
+ EXPECT_CALL(
+ *vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS,
+ SafeMatcherCast<vpx_svc_extra_cfg_t*>(
+ AllOf(Field(&vpx_svc_extra_cfg_t::speed_per_layer,
+ ElementsAreArray(kBaseTlSpeed)),
+ Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl,
+ ElementsAreArray(kLoopFilter))))));
+
+ // Capture the callback into the vp9 wrapper.
+ vpx_codec_priv_output_cx_pkt_cb_pair_t callback_pointer = {};
+ EXPECT_CALL(*vpx, codec_control(_, VP9E_REGISTER_CX_CALLBACK, A<void*>()))
+ .WillOnce(WithArg<2>([&](void* cbp) {
+ callback_pointer =
+ *reinterpret_cast<vpx_codec_priv_output_cx_pkt_cb_pair_t*>(cbp);
+ return VPX_CODEC_OK;
+ }));
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+
+ encoder.SetRates(VideoEncoder::RateControlParameters(bitrate_allocation,
+ settings.maxFramerate));
+
+ MockEncodedImageCallback callback;
+ encoder.RegisterEncodeCompleteCallback(&callback);
+ auto frame_generator = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420, 10);
+ Mock::VerifyAndClearExpectations(vpx);
+
+ uint8_t data[1] = {0};
+ vpx_codec_cx_pkt encoded_data = {};
+ encoded_data.data.frame.buf = &data;
+ encoded_data.data.frame.sz = 1;
+
+ const auto kImageOk =
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
+
+ int spatial_id = 0;
+ int temporal_id = 0;
+ EXPECT_CALL(*vpx,
+ codec_control(_, VP9E_SET_SVC_LAYER_ID, A<vpx_svc_layer_id_t*>()))
+ .Times(AnyNumber());
+ EXPECT_CALL(*vpx,
+ codec_control(_, VP9E_GET_SVC_LAYER_ID, A<vpx_svc_layer_id_t*>()))
+ .WillRepeatedly(WithArg<2>([&](vpx_svc_layer_id_t* layer_id) {
+ layer_id->spatial_layer_id = spatial_id;
+ layer_id->temporal_layer_id = temporal_id;
+ return VPX_CODEC_OK;
+ }));
+ vpx_svc_ref_frame_config_t stored_refs = {};
+ ON_CALL(*vpx, codec_control(_, VP9E_SET_SVC_REF_FRAME_CONFIG,
+ A<vpx_svc_ref_frame_config_t*>()))
+ .WillByDefault(
+ DoAll(SaveArgPointee<2>(&stored_refs), Return(VPX_CODEC_OK)));
+ ON_CALL(*vpx, codec_control(_, VP9E_GET_SVC_REF_FRAME_CONFIG,
+ A<vpx_svc_ref_frame_config_t*>()))
+ .WillByDefault(
+ DoAll(SetArgPointee<2>(ByRef(stored_refs)), Return(VPX_CODEC_OK)));
+
+ // First frame is keyframe.
+ encoded_data.data.frame.flags = VPX_FRAME_IS_KEY;
+
+ // Default 3-layer temporal pattern: 0-2-1-2, then repeat and do two more.
+ for (int ti : {0, 2, 1, 2, 0, 2}) {
+ EXPECT_CALL(*vpx, codec_encode).WillOnce(Return(VPX_CODEC_OK));
+ // No update expected if flags haven't changed, and they change we we move
+ // between base temporal layer and non-base temporal layer.
+ if ((ti > 0) != (temporal_id > 0)) {
+ EXPECT_CALL(*vpx, codec_control(
+ _, VP9E_SET_SVC_PARAMETERS,
+ SafeMatcherCast<vpx_svc_extra_cfg_t*>(AllOf(
+ Field(&vpx_svc_extra_cfg_t::speed_per_layer,
+ ElementsAreArray(ti == 0 ? kBaseTlSpeed
+ : kHighTlSpeed)),
+ Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl,
+ ElementsAreArray(kLoopFilter))))));
+ } else {
+ EXPECT_CALL(*vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS,
+ A<vpx_svc_extra_cfg_t*>()))
+ .Times(0);
+ }
+
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(frame_generator->NextFrame().buffer)
+ .build();
+ encoder.Encode(frame, nullptr);
+
+ temporal_id = ti;
+ for (int si = 0; si < kNumSpatialLayers; ++si) {
+ spatial_id = si;
+
+ EXPECT_CALL(callback, OnEncodedImage).WillOnce(Return(kImageOk));
+ callback_pointer.output_cx_pkt(&encoded_data, callback_pointer.user_priv);
+ }
+
+ encoded_data.data.frame.flags = 0; // Following frames are delta frames.
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc
new file mode 100644
index 0000000000..222e57b6ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "rtc_base/checks.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_codec.h"
+
+namespace webrtc {
+
+std::vector<SdpVideoFormat> SupportedVP9Codecs(bool add_scalability_modes) {
+#ifdef RTC_ENABLE_VP9
+ // Profile 2 might not be available on some platforms until
+ // https://bugs.chromium.org/p/webm/issues/detail?id=1544 is solved.
+ static bool vpx_supports_high_bit_depth =
+ (vpx_codec_get_caps(vpx_codec_vp9_cx()) & VPX_CODEC_CAP_HIGHBITDEPTH) !=
+ 0 &&
+ (vpx_codec_get_caps(vpx_codec_vp9_dx()) & VPX_CODEC_CAP_HIGHBITDEPTH) !=
+ 0;
+
+ absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
+ if (add_scalability_modes) {
+ for (const auto scalability_mode : kAllScalabilityModes) {
+ if (ScalabilityStructureConfig(scalability_mode).has_value()) {
+ scalability_modes.push_back(scalability_mode);
+ }
+ }
+ }
+ std::vector<SdpVideoFormat> supported_formats{SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}},
+ scalability_modes)};
+ if (vpx_supports_high_bit_depth) {
+ supported_formats.push_back(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile2)}},
+ scalability_modes));
+ }
+
+ return supported_formats;
+#else
+ return std::vector<SdpVideoFormat>();
+#endif
+}
+
+std::vector<SdpVideoFormat> SupportedVP9DecoderCodecs() {
+#ifdef RTC_ENABLE_VP9
+ std::vector<SdpVideoFormat> supported_formats = SupportedVP9Codecs();
+ // The WebRTC internal decoder supports VP9 profile 1 and 3. However, there's
+ // currently no way of sending VP9 profile 1 or 3 using the internal encoder.
+ // It would require extended support for I444, I422, and I440 buffers.
+ supported_formats.push_back(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile1)}}));
+ supported_formats.push_back(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile3)}}));
+ return supported_formats;
+#else
+ return std::vector<SdpVideoFormat>();
+#endif
+}
+
+std::unique_ptr<VP9Encoder> VP9Encoder::Create() {
+#ifdef RTC_ENABLE_VP9
+ return std::make_unique<LibvpxVp9Encoder>(cricket::VideoCodec(),
+ LibvpxInterface::Create(),
+ FieldTrialBasedConfig());
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+std::unique_ptr<VP9Encoder> VP9Encoder::Create(
+ const cricket::VideoCodec& codec) {
+#ifdef RTC_ENABLE_VP9
+ return std::make_unique<LibvpxVp9Encoder>(codec, LibvpxInterface::Create(),
+ FieldTrialBasedConfig());
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool VP9Encoder::SupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ return ScalabilityStructureConfig(scalability_mode).has_value();
+}
+
+std::unique_ptr<VP9Decoder> VP9Decoder::Create() {
+#ifdef RTC_ENABLE_VP9
+ return std::make_unique<LibvpxVp9Decoder>();
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
new file mode 100644
index 0000000000..181550ce91
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifdef RTC_ENABLE_VP9
+
+#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_frame_buffer.h"
+
+namespace webrtc {
+
+uint8_t* Vp9FrameBufferPool::Vp9FrameBuffer::GetData() {
+ return data_.data<uint8_t>();
+}
+
+size_t Vp9FrameBufferPool::Vp9FrameBuffer::GetDataSize() const {
+ return data_.size();
+}
+
+void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) {
+ data_.SetSize(size);
+}
+
+bool Vp9FrameBufferPool::InitializeVpxUsePool(
+ vpx_codec_ctx* vpx_codec_context) {
+ RTC_DCHECK(vpx_codec_context);
+ // Tell libvpx to use this pool.
+ if (vpx_codec_set_frame_buffer_functions(
+ // In which context to use these callback functions.
+ vpx_codec_context,
+ // Called by libvpx when it needs another frame buffer.
+ &Vp9FrameBufferPool::VpxGetFrameBuffer,
+ // Called by libvpx when it no longer uses a frame buffer.
+ &Vp9FrameBufferPool::VpxReleaseFrameBuffer,
+ // `this` will be passed as `user_priv` to VpxGetFrameBuffer.
+ this)) {
+ // Failed to configure libvpx to use Vp9FrameBufferPool.
+ return false;
+ }
+ return true;
+}
+
+rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
+Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
+ RTC_DCHECK_GT(min_size, 0);
+ rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
+ {
+ MutexLock lock(&buffers_lock_);
+ // Do we have a buffer we can recycle?
+ for (const auto& buffer : allocated_buffers_) {
+ if (buffer->HasOneRef()) {
+ available_buffer = buffer;
+ break;
+ }
+ }
+ // Otherwise create one.
+ if (available_buffer == nullptr) {
+ available_buffer = new Vp9FrameBuffer();
+ allocated_buffers_.push_back(available_buffer);
+ if (allocated_buffers_.size() > max_num_buffers_) {
+ RTC_LOG(LS_WARNING)
+ << allocated_buffers_.size()
+ << " Vp9FrameBuffers have been "
+ "allocated by a Vp9FrameBufferPool (exceeding what is "
+ "considered reasonable, "
+ << max_num_buffers_ << ").";
+
+ // TODO(phoglund): this limit is being hit in tests since Oct 5 2016.
+ // See https://bugs.chromium.org/p/webrtc/issues/detail?id=6484.
+ // RTC_DCHECK_NOTREACHED();
+ }
+ }
+ }
+
+ available_buffer->SetSize(min_size);
+ return available_buffer;
+}
+
+int Vp9FrameBufferPool::GetNumBuffersInUse() const {
+ int num_buffers_in_use = 0;
+ MutexLock lock(&buffers_lock_);
+ for (const auto& buffer : allocated_buffers_) {
+ if (!buffer->HasOneRef())
+ ++num_buffers_in_use;
+ }
+ return num_buffers_in_use;
+}
+
+bool Vp9FrameBufferPool::Resize(size_t max_number_of_buffers) {
+ MutexLock lock(&buffers_lock_);
+ size_t used_buffers_count = 0;
+ for (const auto& buffer : allocated_buffers_) {
+ // If the buffer is in use, the ref count will be >= 2, one from the list we
+ // are looping over and one from the application. If the ref count is 1,
+ // then the list we are looping over holds the only reference and it's safe
+ // to reuse.
+ if (!buffer->HasOneRef()) {
+ used_buffers_count++;
+ }
+ }
+ if (used_buffers_count > max_number_of_buffers) {
+ return false;
+ }
+ max_num_buffers_ = max_number_of_buffers;
+
+ size_t buffers_to_purge = allocated_buffers_.size() - max_num_buffers_;
+ auto iter = allocated_buffers_.begin();
+ while (iter != allocated_buffers_.end() && buffers_to_purge > 0) {
+ if ((*iter)->HasOneRef()) {
+ iter = allocated_buffers_.erase(iter);
+ buffers_to_purge--;
+ } else {
+ ++iter;
+ }
+ }
+ return true;
+}
+
+void Vp9FrameBufferPool::ClearPool() {
+ MutexLock lock(&buffers_lock_);
+ allocated_buffers_.clear();
+}
+
+// static
+int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
+ size_t min_size,
+ vpx_codec_frame_buffer* fb) {
+ RTC_DCHECK(user_priv);
+ RTC_DCHECK(fb);
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ // Limit size of 8k YUV highdef frame
+ size_t size_limit = 7680 * 4320 * 3 / 2 * 2;
+ if (min_size > size_limit)
+ return -1;
+#endif
+
+ Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
+
+ rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
+ fb->data = buffer->GetData();
+ fb->size = buffer->GetDataSize();
+ // Store Vp9FrameBuffer* in `priv` for use in VpxReleaseFrameBuffer.
+ // This also makes vpx_codec_get_frame return images with their `fb_priv` set
+ // to `buffer` which is important for external reference counting.
+ // Release from refptr so that the buffer's `ref_count_` remains 1 when
+ // `buffer` goes out of scope.
+ fb->priv = static_cast<void*>(buffer.release());
+ return 0;
+}
+
+// static
+int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
+ vpx_codec_frame_buffer* fb) {
+ RTC_DCHECK(user_priv);
+ RTC_DCHECK(fb);
+ Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
+ if (buffer != nullptr) {
+ buffer->Release();
+ // When libvpx fails to decode and you continue to try to decode (and fail)
+ // libvpx can for some reason try to release the same buffer multiple times.
+ // Setting `priv` to null protects against trying to Release multiple times.
+ fb->priv = nullptr;
+ }
+ return 0;
+}
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
new file mode 100644
index 0000000000..f46f1b7ea2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_VP9_FRAME_BUFFER_POOL_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_VP9_FRAME_BUFFER_POOL_H_
+
+#ifdef RTC_ENABLE_VP9
+
+#include <vector>
+
+#include "api/ref_counted_base.h"
+#include "api/scoped_refptr.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/synchronization/mutex.h"
+
+struct vpx_codec_ctx;
+struct vpx_codec_frame_buffer;
+
+namespace webrtc {
+
+// If more buffers than this are allocated we print warnings and crash if in
+// debug mode. VP9 is defined to have 8 reference buffers, of which 3 can be
+// referenced by any frame, see
+// https://tools.ietf.org/html/draft-grange-vp9-bitstream-00#section-2.2.2.
+// Assuming VP9 holds on to at most 8 buffers, any more buffers than that
+// would have to be by application code. Decoded frames should not be
+// referenced for longer than necessary. If we allow ~60 additional buffers
+// then the application has ~1 second to e.g. render each frame of a 60 fps
+// video.
+constexpr size_t kDefaultMaxNumBuffers = 68;
+
+// This memory pool is used to serve buffers to libvpx for decoding purposes in
+// VP9, which is set up in InitializeVPXUsePool. After the initialization any
+// time libvpx wants to decode a frame it will use buffers provided and released
+// through VpxGetFrameBuffer and VpxReleaseFrameBuffer.
+// The benefit of owning the pool that libvpx relies on for decoding is that the
+// decoded frames returned by libvpx (from vpx_codec_get_frame) use parts of our
+// buffers for the decoded image data. By retaining ownership of this buffer
+// using scoped_refptr, the image buffer can be reused by VideoFrames and no
+// frame copy has to occur during decoding and frame delivery.
+//
+// Pseudo example usage case:
+// Vp9FrameBufferPool pool;
+// pool.InitializeVpxUsePool(decoder_ctx);
+// ...
+//
+// // During decoding, libvpx will get and release buffers from the pool.
+// vpx_codec_decode(decoder_ctx, ...);
+//
+// vpx_image_t* img = vpx_codec_get_frame(decoder_ctx, &iter);
+// // Important to use scoped_refptr to protect it against being recycled by
+// // the pool.
+// scoped_refptr<Vp9FrameBuffer> img_buffer = (Vp9FrameBuffer*)img->fb_priv;
+// ...
+//
+// // Destroying the codec will make libvpx release any buffers it was using.
+// vpx_codec_destroy(decoder_ctx);
+class Vp9FrameBufferPool {
+ public:
+ class Vp9FrameBuffer final
+ : public rtc::RefCountedNonVirtual<Vp9FrameBuffer> {
+ public:
+ uint8_t* GetData();
+ size_t GetDataSize() const;
+ void SetSize(size_t size);
+
+ using rtc::RefCountedNonVirtual<Vp9FrameBuffer>::HasOneRef;
+
+ private:
+ // Data as an easily resizable buffer.
+ rtc::Buffer data_;
+ };
+
+ // Configures libvpx to, in the specified context, use this memory pool for
+ // buffers used to decompress frames. This is only supported for VP9.
+ bool InitializeVpxUsePool(vpx_codec_ctx* vpx_codec_context);
+
+ // Gets a frame buffer of at least `min_size`, recycling an available one or
+ // creating a new one. When no longer referenced from the outside the buffer
+ // becomes recyclable.
+ rtc::scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
+ // Gets the number of buffers currently in use (not ready to be recycled).
+ int GetNumBuffersInUse() const;
+ // Changes the max amount of buffers in the pool to the new value.
+ // Returns true if change was successful and false if the amount of already
+ // allocated buffers is bigger than new value.
+ bool Resize(size_t max_number_of_buffers);
+ // Releases allocated buffers, deleting available buffers. Buffers in use are
+ // not deleted until they are no longer referenced.
+ void ClearPool();
+
+ // InitializeVpxUsePool configures libvpx to call this function when it needs
+ // a new frame buffer. Parameters:
+ // `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
+ // to be a pointer to the pool.
+ // `min_size` Minimum size needed by libvpx (to decompress a frame).
+ // `fb` Pointer to the libvpx frame buffer object, this is updated to
+ // use the pool's buffer.
+ // Returns 0 on success. Returns < 0 on failure.
+ static int32_t VpxGetFrameBuffer(void* user_priv,
+ size_t min_size,
+ vpx_codec_frame_buffer* fb);
+
+ // InitializeVpxUsePool configures libvpx to call this function when it has
+ // finished using one of the pool's frame buffer. Parameters:
+ // `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
+ // to be a pointer to the pool.
+ // `fb` Pointer to the libvpx frame buffer object, its `priv` will be
+ // a pointer to one of the pool's Vp9FrameBuffer.
+ static int32_t VpxReleaseFrameBuffer(void* user_priv,
+ vpx_codec_frame_buffer* fb);
+
+ private:
+ // Protects `allocated_buffers_`.
+ mutable Mutex buffers_lock_;
+ // All buffers, in use or ready to be recycled.
+ std::vector<rtc::scoped_refptr<Vp9FrameBuffer>> allocated_buffers_
+ RTC_GUARDED_BY(buffers_lock_);
+ size_t max_num_buffers_ = kDefaultMaxNumBuffers;
+};
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_VP9_FRAME_BUFFER_POOL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/decoder_database.cc b/third_party/libwebrtc/modules/video_coding/decoder_database.cc
new file mode 100644
index 0000000000..01120dc669
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoder_database.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoder_database.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+bool VCMDecoderDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
+ auto it = decoders_.find(payload_type);
+ if (it == decoders_.end()) {
+ // Not found.
+ return false;
+ }
+ // We can't use payload_type to check if the decoder is currently in use,
+ // because payload type may be out of date (e.g. before we decode the first
+ // frame after RegisterReceiveCodec).
+ if (current_decoder_ && current_decoder_->IsSameDecoder(it->second)) {
+ // Release it if it was registered and in use.
+ current_decoder_ = absl::nullopt;
+ }
+ decoders_.erase(it);
+ return true;
+}
+
+// Add the external decoder object to the list of external decoders.
+// Won't be registered as a receive codec until RegisterReceiveCodec is called.
+void VCMDecoderDataBase::RegisterExternalDecoder(
+ uint8_t payload_type,
+ VideoDecoder* external_decoder) {
+ // If payload value already exists, erase old and insert new.
+ DeregisterExternalDecoder(payload_type);
+ decoders_[payload_type] = external_decoder;
+}
+
+bool VCMDecoderDataBase::IsExternalDecoderRegistered(
+ uint8_t payload_type) const {
+ return payload_type == current_payload_type_ ||
+ decoders_.find(payload_type) != decoders_.end();
+}
+
+void VCMDecoderDataBase::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ // If payload value already exists, erase old and insert new.
+ if (payload_type == current_payload_type_) {
+ current_payload_type_ = absl::nullopt;
+ }
+ decoder_settings_[payload_type] = settings;
+}
+
+bool VCMDecoderDataBase::DeregisterReceiveCodec(uint8_t payload_type) {
+ if (decoder_settings_.erase(payload_type) == 0) {
+ return false;
+ }
+ if (payload_type == current_payload_type_) {
+ // This codec is currently in use.
+ current_payload_type_ = absl::nullopt;
+ }
+ return true;
+}
+
+VCMGenericDecoder* VCMDecoderDataBase::GetDecoder(
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback) {
+ RTC_DCHECK(decoded_frame_callback->UserReceiveCallback());
+ uint8_t payload_type = frame.PayloadType();
+ if (payload_type == current_payload_type_ || payload_type == 0) {
+ return current_decoder_.has_value() ? &*current_decoder_ : nullptr;
+ }
+ // If decoder exists - delete.
+ if (current_decoder_.has_value()) {
+ current_decoder_ = absl::nullopt;
+ current_payload_type_ = absl::nullopt;
+ }
+
+ CreateAndInitDecoder(frame);
+ if (current_decoder_ == absl::nullopt) {
+ return nullptr;
+ }
+
+ VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
+ callback->OnIncomingPayloadType(payload_type);
+ if (current_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
+ 0) {
+ current_decoder_ = absl::nullopt;
+ return nullptr;
+ }
+
+ current_payload_type_ = payload_type;
+ return &*current_decoder_;
+}
+
+void VCMDecoderDataBase::CreateAndInitDecoder(const VCMEncodedFrame& frame) {
+ uint8_t payload_type = frame.PayloadType();
+ RTC_LOG(LS_INFO) << "Initializing decoder with payload type '"
+ << int{payload_type} << "'.";
+ auto decoder_item = decoder_settings_.find(payload_type);
+ if (decoder_item == decoder_settings_.end()) {
+ RTC_LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
+ << int{payload_type};
+ return;
+ }
+ auto external_dec_item = decoders_.find(payload_type);
+ if (external_dec_item == decoders_.end()) {
+ RTC_LOG(LS_ERROR) << "No decoder of this type exists.";
+ return;
+ }
+ current_decoder_.emplace(external_dec_item->second);
+
+ // Copy over input resolutions to prevent codec reinitialization due to
+ // the first frame being of a different resolution than the database values.
+ // This is best effort, since there's no guarantee that width/height have been
+ // parsed yet (and may be zero).
+ RenderResolution frame_resolution(frame.EncodedImage()._encodedWidth,
+ frame.EncodedImage()._encodedHeight);
+ if (frame_resolution.Valid()) {
+ decoder_item->second.set_max_render_resolution(frame_resolution);
+ }
+ if (!current_decoder_->Configure(decoder_item->second)) {
+ current_decoder_ = absl::nullopt;
+ RTC_LOG(LS_ERROR) << "Failed to initialize decoder.";
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/decoder_database.h b/third_party/libwebrtc/modules/video_coding/decoder_database.h
new file mode 100644
index 0000000000..98deb1801f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoder_database.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_DECODER_DATABASE_H_
+#define MODULES_VIDEO_CODING_DECODER_DATABASE_H_
+
+#include <stdint.h>
+
+#include <map>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+
+namespace webrtc {
+
+class VCMDecoderDataBase {
+ public:
+ VCMDecoderDataBase() = default;
+ VCMDecoderDataBase(const VCMDecoderDataBase&) = delete;
+ VCMDecoderDataBase& operator=(const VCMDecoderDataBase&) = delete;
+ ~VCMDecoderDataBase() = default;
+
+ bool DeregisterExternalDecoder(uint8_t payload_type);
+ void RegisterExternalDecoder(uint8_t payload_type,
+ VideoDecoder* external_decoder);
+ bool IsExternalDecoderRegistered(uint8_t payload_type) const;
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings);
+ bool DeregisterReceiveCodec(uint8_t payload_type);
+
+ // Returns a decoder specified by frame.PayloadType. The decoded frame
+ // callback of the decoder is set to `decoded_frame_callback`. If no such
+ // decoder already exists an instance will be created and initialized.
+ // nullptr is returned if no decoder with the specified payload type was found
+ // and the function failed to create one.
+ VCMGenericDecoder* GetDecoder(
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback);
+
+ private:
+ void CreateAndInitDecoder(const VCMEncodedFrame& frame);
+
+ absl::optional<uint8_t> current_payload_type_;
+ absl::optional<VCMGenericDecoder> current_decoder_;
+ // Initialization paramaters for decoders keyed by payload type.
+ std::map<uint8_t, VideoDecoder::Settings> decoder_settings_;
+ // Decoders keyed by payload type.
+ std::map<uint8_t, VideoDecoder*> decoders_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_DECODER_DATABASE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/decoding_state.cc b/third_party/libwebrtc/modules/video_coding/decoding_state.cc
new file mode 100644
index 0000000000..5e405cbd05
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoding_state.cc
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoding_state.h"
+
+#include "common_video/h264/h264_common.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+VCMDecodingState::VCMDecodingState()
+ : sequence_num_(0),
+ time_stamp_(0),
+ picture_id_(kNoPictureId),
+ temporal_id_(kNoTemporalIdx),
+ tl0_pic_id_(kNoTl0PicIdx),
+ full_sync_(true),
+ in_initial_state_(true) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+}
+
+VCMDecodingState::~VCMDecodingState() {}
+
+void VCMDecodingState::Reset() {
+ // TODO(mikhal): Verify - not always would want to reset the sync
+ sequence_num_ = 0;
+ time_stamp_ = 0;
+ picture_id_ = kNoPictureId;
+ temporal_id_ = kNoTemporalIdx;
+ tl0_pic_id_ = kNoTl0PicIdx;
+ full_sync_ = true;
+ in_initial_state_ = true;
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+ received_sps_.clear();
+ received_pps_.clear();
+}
+
+uint32_t VCMDecodingState::time_stamp() const {
+ return time_stamp_;
+}
+
+uint16_t VCMDecodingState::sequence_num() const {
+ return sequence_num_;
+}
+
+bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
+ RTC_DCHECK(frame);
+ if (in_initial_state_)
+ return false;
+ return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
+}
+
+bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
+ RTC_DCHECK(packet);
+ if (in_initial_state_)
+ return false;
+ return !IsNewerTimestamp(packet->timestamp, time_stamp_);
+}
+
+void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
+ RTC_DCHECK(frame);
+ RTC_CHECK_GE(frame->GetHighSeqNum(), 0);
+ if (!UsingFlexibleMode(frame))
+ UpdateSyncState(frame);
+ sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
+ time_stamp_ = frame->Timestamp();
+ picture_id_ = frame->PictureId();
+ temporal_id_ = frame->TemporalId();
+ tl0_pic_id_ = frame->Tl0PicId();
+
+ for (const NaluInfo& nalu : frame->GetNaluInfos()) {
+ if (nalu.type == H264::NaluType::kPps) {
+ if (nalu.pps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without pps id.";
+ } else if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without sps id.";
+ } else {
+ received_pps_[nalu.pps_id] = nalu.sps_id;
+ }
+ } else if (nalu.type == H264::NaluType::kSps) {
+ if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received sps without sps id.";
+ } else {
+ received_sps_.insert(nalu.sps_id);
+ }
+ }
+ }
+
+ if (UsingFlexibleMode(frame)) {
+ uint16_t frame_index = picture_id_ % kFrameDecodedLength;
+ if (in_initial_state_) {
+ frame_decoded_cleared_to_ = frame_index;
+ } else if (frame->FrameType() == VideoFrameType::kVideoFrameKey) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+ frame_decoded_cleared_to_ = frame_index;
+ } else {
+ if (AheadOfFramesDecodedClearedTo(frame_index)) {
+ while (frame_decoded_cleared_to_ != frame_index) {
+ frame_decoded_cleared_to_ =
+ (frame_decoded_cleared_to_ + 1) % kFrameDecodedLength;
+ frame_decoded_[frame_decoded_cleared_to_] = false;
+ }
+ }
+ }
+ frame_decoded_[frame_index] = true;
+ }
+
+ in_initial_state_ = false;
+}
+
+void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
+ sequence_num_ = state.sequence_num_;
+ time_stamp_ = state.time_stamp_;
+ picture_id_ = state.picture_id_;
+ temporal_id_ = state.temporal_id_;
+ tl0_pic_id_ = state.tl0_pic_id_;
+ full_sync_ = state.full_sync_;
+ in_initial_state_ = state.in_initial_state_;
+ frame_decoded_cleared_to_ = state.frame_decoded_cleared_to_;
+ memcpy(frame_decoded_, state.frame_decoded_, sizeof(frame_decoded_));
+ received_sps_ = state.received_sps_;
+ received_pps_ = state.received_pps_;
+}
+
+bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
+ bool empty_packet = frame->GetHighSeqNum() == frame->GetLowSeqNum();
+ if (in_initial_state_ && empty_packet) {
+ // Drop empty packets as long as we are in the initial state.
+ return true;
+ }
+ if ((empty_packet && ContinuousSeqNum(frame->GetHighSeqNum())) ||
+ ContinuousFrame(frame)) {
+ // Continuous empty packets or continuous frames can be dropped if we
+ // advance the sequence number.
+ sequence_num_ = frame->GetHighSeqNum();
+ time_stamp_ = frame->Timestamp();
+ return true;
+ }
+ return false;
+}
+
+void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) {
+ RTC_DCHECK(packet);
+ if (packet->timestamp == time_stamp_) {
+ // Late packet belonging to the last decoded frame - make sure we update the
+ // last decoded sequence number.
+ sequence_num_ = LatestSequenceNumber(packet->seqNum, sequence_num_);
+ }
+}
+
+void VCMDecodingState::SetSeqNum(uint16_t new_seq_num) {
+ sequence_num_ = new_seq_num;
+}
+
+bool VCMDecodingState::in_initial_state() const {
+ return in_initial_state_;
+}
+
+bool VCMDecodingState::full_sync() const {
+ return full_sync_;
+}
+
+void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
+ if (in_initial_state_)
+ return;
+ if (frame->TemporalId() == kNoTemporalIdx ||
+ frame->Tl0PicId() == kNoTl0PicIdx) {
+ full_sync_ = true;
+ } else if (frame->FrameType() == VideoFrameType::kVideoFrameKey ||
+ frame->LayerSync()) {
+ full_sync_ = true;
+ } else if (full_sync_) {
+ // Verify that we are still in sync.
+ // Sync will be broken if continuity is true for layers but not for the
+ // other methods (PictureId and SeqNum).
+ if (UsingPictureId(frame)) {
+ // First check for a valid tl0PicId.
+ if (frame->Tl0PicId() - tl0_pic_id_ > 1) {
+ full_sync_ = false;
+ } else {
+ full_sync_ = ContinuousPictureId(frame->PictureId());
+ }
+ } else {
+ full_sync_ =
+ ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
+ }
+ }
+}
+
+bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
+ // Check continuity based on the following hierarchy:
+ // - Temporal layers (stop here if out of sync).
+ // - Picture Id when available.
+ // - Sequence numbers.
+ // Return true when in initial state.
+ // Note that when a method is not applicable it will return false.
+ RTC_DCHECK(frame);
+ // A key frame is always considered continuous as it doesn't refer to any
+ // frames and therefore won't introduce any errors even if prior frames are
+ // missing.
+ if (frame->FrameType() == VideoFrameType::kVideoFrameKey &&
+ HaveSpsAndPps(frame->GetNaluInfos())) {
+ return true;
+ }
+ // When in the initial state we always require a key frame to start decoding.
+ if (in_initial_state_)
+ return false;
+ if (ContinuousLayer(frame->TemporalId(), frame->Tl0PicId()))
+ return true;
+ // tl0picId is either not used, or should remain unchanged.
+ if (frame->Tl0PicId() != tl0_pic_id_)
+ return false;
+ // Base layers are not continuous or temporal layers are inactive.
+ // In the presence of temporal layers, check for Picture ID/sequence number
+ // continuity if sync can be restored by this frame.
+ if (!full_sync_ && !frame->LayerSync())
+ return false;
+ if (UsingPictureId(frame)) {
+ if (UsingFlexibleMode(frame)) {
+ return ContinuousFrameRefs(frame);
+ } else {
+ return ContinuousPictureId(frame->PictureId());
+ }
+ } else {
+ return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum())) &&
+ HaveSpsAndPps(frame->GetNaluInfos());
+ }
+}
+
+bool VCMDecodingState::ContinuousPictureId(int picture_id) const {
+ int next_picture_id = picture_id_ + 1;
+ if (picture_id < picture_id_) {
+ // Wrap
+ if (picture_id_ >= 0x80) {
+ // 15 bits used for picture id
+ return ((next_picture_id & 0x7FFF) == picture_id);
+ } else {
+ // 7 bits used for picture id
+ return ((next_picture_id & 0x7F) == picture_id);
+ }
+ }
+ // No wrap
+ return (next_picture_id == picture_id);
+}
+
+bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
+ return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
+}
+
+bool VCMDecodingState::ContinuousLayer(int temporal_id, int tl0_pic_id) const {
+ // First, check if applicable.
+ if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
+ return false;
+ // If this is the first frame to use temporal layers, make sure we start
+ // from base.
+ else if (tl0_pic_id_ == kNoTl0PicIdx && temporal_id_ == kNoTemporalIdx &&
+ temporal_id == 0)
+ return true;
+
+ // Current implementation: Look for base layer continuity.
+ if (temporal_id != 0)
+ return false;
+ return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
+}
+
+bool VCMDecodingState::ContinuousFrameRefs(const VCMFrameBuffer* frame) const {
+ uint8_t num_refs = frame->CodecSpecific()->codecSpecific.VP9.num_ref_pics;
+ for (uint8_t r = 0; r < num_refs; ++r) {
+ uint16_t frame_ref = frame->PictureId() -
+ frame->CodecSpecific()->codecSpecific.VP9.p_diff[r];
+ uint16_t frame_index = frame_ref % kFrameDecodedLength;
+ if (AheadOfFramesDecodedClearedTo(frame_index) ||
+ !frame_decoded_[frame_index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
+ return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
+}
+
+bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
+ bool is_flexible_mode =
+ frame->CodecSpecific()->codecType == kVideoCodecVP9 &&
+ frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
+ if (is_flexible_mode && frame->PictureId() == kNoPictureId) {
+ RTC_LOG(LS_WARNING) << "Frame is marked as using flexible mode but no"
+ "picture id is set.";
+ return false;
+ }
+ return is_flexible_mode;
+}
+
+// TODO(philipel): change how check work, this check practially
+// limits the max p_diff to 64.
+bool VCMDecodingState::AheadOfFramesDecodedClearedTo(uint16_t index) const {
+ // No way of knowing for sure if we are actually ahead of
+ // frame_decoded_cleared_to_. We just make the assumption
+ // that we are not trying to reference back to a very old
+ // index, but instead are referencing a newer index.
+ uint16_t diff =
+ index > frame_decoded_cleared_to_
+ ? kFrameDecodedLength - (index - frame_decoded_cleared_to_)
+ : frame_decoded_cleared_to_ - index;
+ return diff > kFrameDecodedLength / 2;
+}
+
+bool VCMDecodingState::HaveSpsAndPps(const std::vector<NaluInfo>& nalus) const {
+ std::set<int> new_sps;
+ std::map<int, int> new_pps;
+ for (const NaluInfo& nalu : nalus) {
+ // Check if this nalu actually contains sps/pps information or dependencies.
+ if (nalu.sps_id == -1 && nalu.pps_id == -1)
+ continue;
+ switch (nalu.type) {
+ case H264::NaluType::kPps:
+ if (nalu.pps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without pps id.";
+ } else if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without sps id.";
+ } else {
+ new_pps[nalu.pps_id] = nalu.sps_id;
+ }
+ break;
+ case H264::NaluType::kSps:
+ if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received sps without sps id.";
+ } else {
+ new_sps.insert(nalu.sps_id);
+ }
+ break;
+ default: {
+ int needed_sps = -1;
+ auto pps_it = new_pps.find(nalu.pps_id);
+ if (pps_it != new_pps.end()) {
+ needed_sps = pps_it->second;
+ } else {
+ auto pps_it2 = received_pps_.find(nalu.pps_id);
+ if (pps_it2 == received_pps_.end()) {
+ return false;
+ }
+ needed_sps = pps_it2->second;
+ }
+ if (new_sps.find(needed_sps) == new_sps.end() &&
+ received_sps_.find(needed_sps) == received_sps_.end()) {
+ return false;
+ }
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/decoding_state.h b/third_party/libwebrtc/modules/video_coding/decoding_state.h
new file mode 100644
index 0000000000..ec972949d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoding_state.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_DECODING_STATE_H_
+#define MODULES_VIDEO_CODING_DECODING_STATE_H_
+
+#include <cstdint>
+#include <map>
+#include <set>
+#include <vector>
+
+namespace webrtc {
+
+// Forward declarations
+struct NaluInfo;
+class VCMFrameBuffer;
+class VCMPacket;
+
+class VCMDecodingState {
+ public:
+ // The max number of bits used to reference back
+ // to a previous frame when using flexible mode.
+ static const uint16_t kNumRefBits = 7;
+ static const uint16_t kFrameDecodedLength = 1 << kNumRefBits;
+
+ VCMDecodingState();
+ ~VCMDecodingState();
+ // Check for old frame
+ bool IsOldFrame(const VCMFrameBuffer* frame) const;
+ // Check for old packet
+ bool IsOldPacket(const VCMPacket* packet) const;
+ // Check for frame continuity based on current decoded state. Use best method
+ // possible, i.e. temporal info, picture ID or sequence number.
+ bool ContinuousFrame(const VCMFrameBuffer* frame) const;
+ void SetState(const VCMFrameBuffer* frame);
+ void CopyFrom(const VCMDecodingState& state);
+ bool UpdateEmptyFrame(const VCMFrameBuffer* frame);
+ // Update the sequence number if the timestamp matches current state and the
+ // sequence number is higher than the current one. This accounts for packets
+ // arriving late.
+ void UpdateOldPacket(const VCMPacket* packet);
+ void SetSeqNum(uint16_t new_seq_num);
+ void Reset();
+ uint32_t time_stamp() const;
+ uint16_t sequence_num() const;
+ // Return true if at initial state.
+ bool in_initial_state() const;
+ // Return true when sync is on - decode all layers.
+ bool full_sync() const;
+
+ private:
+ void UpdateSyncState(const VCMFrameBuffer* frame);
+ // Designated continuity functions
+ bool ContinuousPictureId(int picture_id) const;
+ bool ContinuousSeqNum(uint16_t seq_num) const;
+ bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
+ bool ContinuousFrameRefs(const VCMFrameBuffer* frame) const;
+ bool UsingPictureId(const VCMFrameBuffer* frame) const;
+ bool UsingFlexibleMode(const VCMFrameBuffer* frame) const;
+ bool AheadOfFramesDecodedClearedTo(uint16_t index) const;
+ bool HaveSpsAndPps(const std::vector<NaluInfo>& nalus) const;
+
+ // Keep state of last decoded frame.
+ // TODO(mikhal/stefan): create designated classes to handle these types.
+ uint16_t sequence_num_;
+ uint32_t time_stamp_;
+ int picture_id_;
+ int temporal_id_;
+ int tl0_pic_id_;
+ bool full_sync_; // Sync flag when temporal layers are used.
+ bool in_initial_state_;
+
+ // Used to check references in flexible mode.
+ bool frame_decoded_[kFrameDecodedLength];
+ uint16_t frame_decoded_cleared_to_;
+ std::set<int> received_sps_;
+ std::map<int, int> received_pps_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_DECODING_STATE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc b/third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc
new file mode 100644
index 0000000000..bef7f81c62
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc
@@ -0,0 +1,713 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoding_state.h"
+
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/session_info.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(TestDecodingState, Sanity) {
+ VCMDecodingState dec_state;
+ dec_state.Reset();
+ EXPECT_TRUE(dec_state.in_initial_state());
+ EXPECT_TRUE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, FrameContinuity) {
+ VCMDecodingState dec_state;
+ // Check that makes decision based on correct method.
+ VCMFrameBuffer frame;
+ VCMFrameBuffer frame_key;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.codec = kVideoCodecVP8;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.pictureId = 0x007F;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Always start with a key frame.
+ dec_state.Reset();
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame_key.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ // Use pictureId
+ packet.video_header.is_first_packet_in_frame = false;
+ vp8_header.pictureId = 0x0002;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ vp8_header.pictureId = 0;
+ packet.seqNum = 10;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Use sequence numbers.
+ vp8_header.pictureId = kNoPictureId;
+ frame.Reset();
+ packet.seqNum = dec_state.sequence_num() - 1u;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ packet.seqNum = dec_state.sequence_num() + 1u;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Insert another packet to this frame
+ packet.seqNum++;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Verify wrap.
+ EXPECT_LE(dec_state.sequence_num(), 0xffff);
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Insert packet with temporal info.
+ dec_state.Reset();
+ frame.Reset();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ packet.seqNum = 1;
+ packet.timestamp = 1;
+ EXPECT_TRUE(dec_state.full_sync());
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ // 1 layer up - still good.
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 1;
+ packet.seqNum = 2;
+ packet.timestamp = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ // Lost non-base layer packet => should update sync parameter.
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 3;
+ vp8_header.pictureId = 3;
+ packet.seqNum = 4;
+ packet.timestamp = 4;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ // Now insert the next non-base layer (belonging to a next tl0PicId).
+ frame.Reset();
+ vp8_header.tl0PicIdx = 1;
+ vp8_header.temporalIdx = 2;
+ vp8_header.pictureId = 4;
+ packet.seqNum = 5;
+ packet.timestamp = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Checking continuity and not updating the state - this should not trigger
+ // an update of sync state.
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ // Next base layer (dropped interim non-base layers) - should update sync.
+ frame.Reset();
+ vp8_header.tl0PicIdx = 1;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 5;
+ packet.seqNum = 6;
+ packet.timestamp = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+
+ // Check wrap for temporal layers.
+ frame.Reset();
+ vp8_header.tl0PicIdx = 0x00FF;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 6;
+ packet.seqNum = 7;
+ packet.timestamp = 7;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ frame.Reset();
+ vp8_header.tl0PicIdx = 0x0000;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 7;
+ packet.seqNum = 8;
+ packet.timestamp = 8;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ // The current frame is not continuous
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, UpdateOldPacket) {
+ VCMDecodingState dec_state;
+ // Update only if zero size and newer than previous.
+ // Should only update if the timeStamp match.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_EQ(dec_state.sequence_num(), 1);
+ // Insert an empty packet that does not belong to the same frame.
+ // => Sequence num should be the same.
+ packet.timestamp = 2;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 1);
+ // Now insert empty packet belonging to the same frame.
+ packet.timestamp = 1;
+ packet.seqNum = 2;
+ packet.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ packet.sizeBytes = 0;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 2);
+ // Now insert delta packet belonging to the same frame.
+ packet.timestamp = 1;
+ packet.seqNum = 3;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.sizeBytes = 1400;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 3);
+ // Insert a packet belonging to an older timestamp - should not update the
+ // sequence number.
+ packet.timestamp = 0;
+ packet.seqNum = 4;
+ packet.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ packet.sizeBytes = 0;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 3);
+}
+
+TEST(TestDecodingState, MultiLayerBehavior) {
+ // Identify sync/non-sync when more than one layer.
+ VCMDecodingState dec_state;
+ // Identify packets belonging to old frames/packets.
+ // Set state for current frames.
+ // tl0PicIdx 0, temporal id 0.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.codec = kVideoCodecVP8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ // tl0PicIdx 0, temporal id 1.
+ frame.Reset();
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // Lost tl0PicIdx 0, temporal id 2.
+ // Insert tl0PicIdx 0, temporal id 3.
+ frame.Reset();
+ packet.timestamp = 3;
+ packet.seqNum = 3;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 3;
+ vp8_header.pictureId = 3;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ // Insert next base layer
+ frame.Reset();
+ packet.timestamp = 4;
+ packet.seqNum = 4;
+ vp8_header.tl0PicIdx = 1;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 4;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ // Insert key frame - should update sync value.
+ // A key frame is always a base layer.
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 5;
+ packet.seqNum = 5;
+ vp8_header.tl0PicIdx = 2;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // After sync, a continuous PictureId is required
+ // (continuous base layer is not enough )
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.timestamp = 6;
+ packet.seqNum = 6;
+ vp8_header.tl0PicIdx = 3;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 8;
+ packet.seqNum = 8;
+ vp8_header.tl0PicIdx = 4;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 8;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+
+ // Insert a non-ref frame - should update sync value.
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 9;
+ packet.seqNum = 9;
+ vp8_header.tl0PicIdx = 4;
+ vp8_header.temporalIdx = 2;
+ vp8_header.pictureId = 9;
+ vp8_header.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+
+ // The following test will verify the sync flag behavior after a loss.
+ // Create the following pattern:
+ // Update base layer, lose packet 1 (sync flag on, layer 2), insert packet 3
+ // (sync flag on, layer 2) check continuity and sync flag after inserting
+ // packet 2 (sync flag on, layer 1).
+ // Base layer.
+ frame.Reset();
+ dec_state.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.markerBit = 1;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ vp8_header.layerSync = false;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // Layer 2 - 2 packets (insert one, lose one).
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.markerBit = 0;
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 2;
+ vp8_header.pictureId = 1;
+ vp8_header.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ // Layer 1
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.markerBit = 1;
+ packet.timestamp = 2;
+ packet.seqNum = 3;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 2;
+ vp8_header.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet.video_header.codec = kVideoCodecVP8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+
+ // Continuous sequence number but discontinuous picture id. This implies a
+ // a loss and we have to fall back to only decoding the base layer.
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.timestamp += 3000;
+ ++packet.seqNum;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, OldInput) {
+ VCMDecodingState dec_state;
+ // Identify packets belonging to old frames/packets.
+ // Set state for current frames.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.timestamp = 10;
+ packet.seqNum = 1;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ packet.timestamp = 9;
+ EXPECT_TRUE(dec_state.IsOldPacket(&packet));
+ // Check for old frame
+ frame.Reset();
+ frame.InsertPacket(packet, 0, frame_data);
+ EXPECT_TRUE(dec_state.IsOldFrame(&frame));
+}
+
+TEST(TestDecodingState, PictureIdRepeat) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.codec = kVideoCodecVP8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ // tl0PicIdx 0, temporal id 1.
+ frame.Reset();
+ ++packet.timestamp;
+ ++packet.seqNum;
+ vp8_header.temporalIdx++;
+ vp8_header.pictureId++;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ // Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
+ vp8_header.tl0PicIdx += 3;
+ vp8_header.temporalIdx++;
+ vp8_header.tl0PicIdx = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.video_header.codec = kVideoCodecVP9;
+
+ auto& vp9_hdr =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key frame again
+ vp9_hdr.picture_id = 11;
+ frame.Reset();
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.video_header.codec = kVideoCodecVP9;
+
+ auto& vp9_hdr =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 10, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Out of order, last id 15, this id 12, ref to 10, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.pid_diff[0] = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref 10, 12, 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 10;
+ vp9_hdr.pid_diff[1] = 8;
+ vp9_hdr.pid_diff[2] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.video_header.codec = kVideoCodecVP9;
+
+ auto& vp9_hdr =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Delta frame as first frame
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame then delta frame
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11 and 15, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 2;
+ vp9_hdr.pid_diff[0] = 9;
+ vp9_hdr.pid_diff[1] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 10, 15 and 16, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 22;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 12;
+ vp9_hdr.pid_diff[1] = 7;
+ vp9_hdr.pid_diff[2] = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key Frame, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame at last index, ref to KF, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping buffer length, ref to last index, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 0;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 0, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 20;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 10, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 23;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 13;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_hdr.picture_id = 25;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to KF, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = 26;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to frame previous to KF, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 30;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 30;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/encoded_frame.cc b/third_party/libwebrtc/modules/video_coding/encoded_frame.cc
new file mode 100644
index 0000000000..637a20cfc9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/encoded_frame.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/encoded_frame.h"
+
+#include <string.h>
+
+#include "absl/types/variant.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+
+namespace webrtc {
+
+VCMEncodedFrame::VCMEncodedFrame()
+ : webrtc::EncodedImage(),
+ _renderTimeMs(-1),
+ _payloadType(0),
+ _missingFrame(false),
+ _codec(kVideoCodecGeneric) {
+ _codecSpecificInfo.codecType = kVideoCodecGeneric;
+}
+
+VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame&) = default;
+
+VCMEncodedFrame::~VCMEncodedFrame() {
+ Reset();
+}
+
+void VCMEncodedFrame::Reset() {
+ SetTimestamp(0);
+ SetSpatialIndex(absl::nullopt);
+ _renderTimeMs = -1;
+ _payloadType = 0;
+ _frameType = VideoFrameType::kVideoFrameDelta;
+ _encodedWidth = 0;
+ _encodedHeight = 0;
+ _missingFrame = false;
+ set_size(0);
+ _codecSpecificInfo.codecType = kVideoCodecGeneric;
+ _codec = kVideoCodecGeneric;
+ rotation_ = kVideoRotation_0;
+ content_type_ = VideoContentType::UNSPECIFIED;
+ timing_.flags = VideoSendTiming::kInvalid;
+}
+
+void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
+ if (header) {
+ switch (header->codec) {
+ case kVideoCodecVP8: {
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(header->video_type_header);
+ if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
+ _codecSpecificInfo.codecSpecific.VP8.layerSync = false;
+ _codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
+ _codecSpecificInfo.codecType = kVideoCodecVP8;
+ }
+ _codecSpecificInfo.codecSpecific.VP8.nonReference =
+ vp8_header.nonReference;
+ if (vp8_header.temporalIdx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP8.temporalIdx =
+ vp8_header.temporalIdx;
+ _codecSpecificInfo.codecSpecific.VP8.layerSync = vp8_header.layerSync;
+ }
+ if (vp8_header.keyIdx != kNoKeyIdx) {
+ _codecSpecificInfo.codecSpecific.VP8.keyIdx = vp8_header.keyIdx;
+ }
+ break;
+ }
+ case kVideoCodecVP9: {
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(header->video_type_header);
+ if (_codecSpecificInfo.codecType != kVideoCodecVP9) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = false;
+ _codecSpecificInfo.codecType = kVideoCodecVP9;
+ }
+ _codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
+ vp9_header.inter_pic_predicted;
+ _codecSpecificInfo.codecSpecific.VP9.flexible_mode =
+ vp9_header.flexible_mode;
+ _codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
+ vp9_header.num_ref_pics;
+ for (uint8_t r = 0; r < vp9_header.num_ref_pics; ++r) {
+ _codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
+ vp9_header.pid_diff[r];
+ }
+ _codecSpecificInfo.codecSpecific.VP9.ss_data_available =
+ vp9_header.ss_data_available;
+ if (vp9_header.temporal_idx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ vp9_header.temporal_idx;
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ vp9_header.temporal_up_switch;
+ }
+ if (vp9_header.spatial_idx != kNoSpatialIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
+ vp9_header.inter_layer_predicted;
+ SetSpatialIndex(vp9_header.spatial_idx);
+ }
+ if (vp9_header.gof_idx != kNoGofIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx = vp9_header.gof_idx;
+ }
+ if (vp9_header.ss_data_available) {
+ _codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
+ vp9_header.num_spatial_layers;
+ _codecSpecificInfo.codecSpecific.VP9
+ .spatial_layer_resolution_present =
+ vp9_header.spatial_layer_resolution_present;
+ if (vp9_header.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < vp9_header.num_spatial_layers; ++i) {
+ _codecSpecificInfo.codecSpecific.VP9.width[i] =
+ vp9_header.width[i];
+ _codecSpecificInfo.codecSpecific.VP9.height[i] =
+ vp9_header.height[i];
+ }
+ }
+ _codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
+ vp9_header.gof);
+ }
+ break;
+ }
+ case kVideoCodecH264: {
+ _codecSpecificInfo.codecType = kVideoCodecH264;
+ break;
+ }
+ case kVideoCodecAV1: {
+ _codecSpecificInfo.codecType = kVideoCodecAV1;
+ break;
+ }
+ default: {
+ _codecSpecificInfo.codecType = kVideoCodecGeneric;
+ break;
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/encoded_frame.h b/third_party/libwebrtc/modules/video_coding/encoded_frame.h
new file mode 100644
index 0000000000..9cc769277d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/encoded_frame.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+#define MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+
+#include <vector>
+
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class RTC_EXPORT VCMEncodedFrame : public EncodedImage {
+ public:
+ VCMEncodedFrame();
+ VCMEncodedFrame(const VCMEncodedFrame&);
+
+ ~VCMEncodedFrame();
+ /**
+ * Set render time in milliseconds
+ */
+ void SetRenderTime(const int64_t renderTimeMs) {
+ _renderTimeMs = renderTimeMs;
+ }
+
+ VideoPlayoutDelay PlayoutDelay() const { return playout_delay_; }
+
+ void SetPlayoutDelay(VideoPlayoutDelay playout_delay) {
+ playout_delay_ = playout_delay;
+ }
+
+ /**
+ * Get the encoded image
+ */
+ const webrtc::EncodedImage& EncodedImage() const {
+ return static_cast<const webrtc::EncodedImage&>(*this);
+ }
+
+ using EncodedImage::ColorSpace;
+ using EncodedImage::data;
+ using EncodedImage::GetEncodedData;
+ using EncodedImage::NtpTimeMs;
+ using EncodedImage::PacketInfos;
+ using EncodedImage::set_size;
+ using EncodedImage::SetColorSpace;
+ using EncodedImage::SetEncodedData;
+ using EncodedImage::SetPacketInfos;
+ using EncodedImage::SetSpatialIndex;
+ using EncodedImage::SetSpatialLayerFrameSize;
+ using EncodedImage::SetTimestamp;
+ using EncodedImage::size;
+ using EncodedImage::SpatialIndex;
+ using EncodedImage::SpatialLayerFrameSize;
+ using EncodedImage::Timestamp;
+
+ /**
+ * Get render time in milliseconds
+ */
+ int64_t RenderTimeMs() const { return _renderTimeMs; }
+ /**
+ * Get frame type
+ */
+ webrtc::VideoFrameType FrameType() const { return _frameType; }
+ /**
+ * Set frame type
+ */
+ void SetFrameType(webrtc::VideoFrameType frame_type) {
+ _frameType = frame_type;
+ }
+ /**
+ * Get frame rotation
+ */
+ VideoRotation rotation() const { return rotation_; }
+ /**
+ * Get video content type
+ */
+ VideoContentType contentType() const { return content_type_; }
+ /**
+ * Get video timing
+ */
+ EncodedImage::Timing video_timing() const { return timing_; }
+ EncodedImage::Timing* video_timing_mutable() { return &timing_; }
+ /**
+ * True if there's a frame missing before this frame
+ */
+ bool MissingFrame() const { return _missingFrame; }
+ /**
+ * Payload type of the encoded payload
+ */
+ uint8_t PayloadType() const { return _payloadType; }
+ /**
+ * Get codec specific info.
+ * The returned pointer is only valid as long as the VCMEncodedFrame
+ * is valid. Also, VCMEncodedFrame owns the pointer and will delete
+ * the object.
+ */
+ const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
+ void SetCodecSpecific(const CodecSpecificInfo* codec_specific) {
+ _codecSpecificInfo = *codec_specific;
+ }
+
+ protected:
+ void Reset();
+
+ void CopyCodecSpecific(const RTPVideoHeader* header);
+
+ int64_t _renderTimeMs;
+ uint8_t _payloadType;
+ bool _missingFrame;
+ CodecSpecificInfo _codecSpecificInfo;
+ webrtc::VideoCodecType _codec;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build b/third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build
new file mode 100644
index 0000000000..2c74aec10f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/encoded_frame.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("encoded_frame_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/event_wrapper.cc b/third_party/libwebrtc/modules/video_coding/event_wrapper.cc
new file mode 100644
index 0000000000..e6a4752401
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/event_wrapper.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/event_wrapper.h"
+
+#include "rtc_base/event.h"
+
+namespace webrtc {
+
+class EventWrapperImpl : public EventWrapper {
+ public:
+ ~EventWrapperImpl() override {}
+
+ bool Set() override {
+ event_.Set();
+ return true;
+ }
+
+ EventTypeWrapper Wait(int max_time_ms) override {
+ return event_.Wait(max_time_ms) ? kEventSignaled : kEventTimeout;
+ }
+
+ private:
+ rtc::Event event_;
+};
+
+// static
+EventWrapper* EventWrapper::Create() {
+ return new EventWrapperImpl();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/event_wrapper.h b/third_party/libwebrtc/modules/video_coding/event_wrapper.h
new file mode 100644
index 0000000000..c1f160c7f7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/event_wrapper.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_EVENT_WRAPPER_H_
+#define MODULES_VIDEO_CODING_EVENT_WRAPPER_H_
+
+namespace webrtc {
+enum EventTypeWrapper { kEventSignaled = 1, kEventTimeout = 2 };
+
+class EventWrapper {
+ public:
+ // Factory method. Constructor disabled.
+ static EventWrapper* Create();
+
+ virtual ~EventWrapper() {}
+
+ // Releases threads who are calling Wait() and has started waiting. Please
+ // note that a thread calling Wait() will not start waiting immediately.
+ // assumptions to the contrary is a very common source of issues in
+ // multithreaded programming.
+ // Set is sticky in the sense that it will release at least one thread
+ // either immediately or some time in the future.
+ virtual bool Set() = 0;
+
+ // Puts the calling thread into a wait state. The thread may be released
+ // by a Set() call depending on if other threads are waiting and if so on
+ // timing. The thread that was released will reset the event before leaving
+ // preventing more threads from being released. If multiple threads
+ // are waiting for the same Set(), only one (random) thread is guaranteed to
+ // be released. It is possible that multiple (random) threads are released
+ // Depending on timing.
+ //
+ // `max_time_ms` is the maximum time to wait in milliseconds.
+ virtual EventTypeWrapper Wait(int max_time_ms) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_EVENT_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc b/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc
new file mode 100644
index 0000000000..f204b01c7c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/fec_controller_default.h" // NOLINT
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <string>
+
+#include "modules/include/module_fec_types.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+const float kProtectionOverheadRateThreshold = 0.5;
+
+FecControllerDefault::FecControllerDefault(
+ Clock* clock,
+ VCMProtectionCallback* protection_callback)
+ : clock_(clock),
+ protection_callback_(protection_callback),
+ loss_prot_logic_(new media_optimization::VCMLossProtectionLogic(
+ clock_->TimeInMilliseconds())),
+ max_payload_size_(1460),
+ overhead_threshold_(GetProtectionOverheadRateThreshold()) {}
+
+FecControllerDefault::FecControllerDefault(Clock* clock)
+ : clock_(clock),
+ loss_prot_logic_(new media_optimization::VCMLossProtectionLogic(
+ clock_->TimeInMilliseconds())),
+ max_payload_size_(1460),
+ overhead_threshold_(GetProtectionOverheadRateThreshold()) {}
+
+FecControllerDefault::~FecControllerDefault(void) {
+ loss_prot_logic_->Release();
+}
+
+void FecControllerDefault::SetProtectionCallback(
+ VCMProtectionCallback* protection_callback) {
+ protection_callback_ = protection_callback;
+}
+
+void FecControllerDefault::SetEncodingData(size_t width,
+ size_t height,
+ size_t num_temporal_layers,
+ size_t max_payload_size) {
+ MutexLock lock(&mutex_);
+ loss_prot_logic_->UpdateFrameSize(width, height);
+ loss_prot_logic_->UpdateNumLayers(num_temporal_layers);
+ max_payload_size_ = max_payload_size;
+}
+
+float FecControllerDefault::GetProtectionOverheadRateThreshold() {
+ float overhead_threshold =
+ strtof(webrtc::field_trial::FindFullName(
+ "WebRTC-ProtectionOverheadRateThreshold")
+ .c_str(),
+ nullptr);
+ if (overhead_threshold > 0 && overhead_threshold <= 1) {
+ RTC_LOG(LS_INFO) << "ProtectionOverheadRateThreshold is set to "
+ << overhead_threshold;
+ return overhead_threshold;
+ } else if (overhead_threshold < 0 || overhead_threshold > 1) {
+ RTC_LOG(LS_WARNING)
+ << "ProtectionOverheadRateThreshold field trial is set to "
+ "an invalid value, expecting a value between (0, 1].";
+ }
+ // WebRTC-ProtectionOverheadRateThreshold field trial string is not found, use
+ // the default value.
+ return kProtectionOverheadRateThreshold;
+}
+
+uint32_t FecControllerDefault::UpdateFecRates(
+ uint32_t estimated_bitrate_bps,
+ int actual_framerate_fps,
+ uint8_t fraction_lost,
+ std::vector<bool> loss_mask_vector,
+ int64_t round_trip_time_ms) {
+ float target_bitrate_kbps =
+ static_cast<float>(estimated_bitrate_bps) / 1000.0f;
+ // Sanity check.
+ if (actual_framerate_fps < 1.0) {
+ actual_framerate_fps = 1.0;
+ }
+ FecProtectionParams delta_fec_params;
+ FecProtectionParams key_fec_params;
+ {
+ MutexLock lock(&mutex_);
+ loss_prot_logic_->UpdateBitRate(target_bitrate_kbps);
+ loss_prot_logic_->UpdateRtt(round_trip_time_ms);
+ // Update frame rate for the loss protection logic class: frame rate should
+ // be the actual/sent rate.
+ loss_prot_logic_->UpdateFrameRate(actual_framerate_fps);
+ // Returns the filtered packet loss, used for the protection setting.
+ // The filtered loss may be the received loss (no filter), or some
+ // filtered value (average or max window filter).
+ // Use max window filter for now.
+ media_optimization::FilterPacketLossMode filter_mode =
+ media_optimization::kMaxFilter;
+ uint8_t packet_loss_enc = loss_prot_logic_->FilteredLoss(
+ clock_->TimeInMilliseconds(), filter_mode, fraction_lost);
+ // For now use the filtered loss for computing the robustness settings.
+ loss_prot_logic_->UpdateFilteredLossPr(packet_loss_enc);
+ if (loss_prot_logic_->SelectedType() == media_optimization::kNone) {
+ return estimated_bitrate_bps;
+ }
+ // Update method will compute the robustness settings for the given
+ // protection method and the overhead cost
+ // the protection method is set by the user via SetVideoProtection.
+ loss_prot_logic_->UpdateMethod();
+ // Get the bit cost of protection method, based on the amount of
+ // overhead data actually transmitted (including headers) the last
+ // second.
+ // Get the FEC code rate for Key frames (set to 0 when NA).
+ key_fec_params.fec_rate =
+ loss_prot_logic_->SelectedMethod()->RequiredProtectionFactorK();
+ // Get the FEC code rate for Delta frames (set to 0 when NA).
+ delta_fec_params.fec_rate =
+ loss_prot_logic_->SelectedMethod()->RequiredProtectionFactorD();
+ // The RTP module currently requires the same `max_fec_frames` for both
+ // key and delta frames.
+ delta_fec_params.max_fec_frames =
+ loss_prot_logic_->SelectedMethod()->MaxFramesFec();
+ key_fec_params.max_fec_frames =
+ loss_prot_logic_->SelectedMethod()->MaxFramesFec();
+ }
+ // Set the FEC packet mask type. `kFecMaskBursty` is more effective for
+ // consecutive losses and little/no packet re-ordering. As we currently
+ // do not have feedback data on the degree of correlated losses and packet
+ // re-ordering, we keep default setting to `kFecMaskRandom` for now.
+ delta_fec_params.fec_mask_type = kFecMaskRandom;
+ key_fec_params.fec_mask_type = kFecMaskRandom;
+ // Update protection callback with protection settings.
+ uint32_t sent_video_rate_bps = 0;
+ uint32_t sent_nack_rate_bps = 0;
+ uint32_t sent_fec_rate_bps = 0;
+ // Rate cost of the protection methods.
+ float protection_overhead_rate = 0.0f;
+ // TODO(Marco): Pass FEC protection values per layer.
+ protection_callback_->ProtectionRequest(
+ &delta_fec_params, &key_fec_params, &sent_video_rate_bps,
+ &sent_nack_rate_bps, &sent_fec_rate_bps);
+ uint32_t sent_total_rate_bps =
+ sent_video_rate_bps + sent_nack_rate_bps + sent_fec_rate_bps;
+ // Estimate the overhead costs of the next second as staying the same
+ // wrt the source bitrate.
+ if (sent_total_rate_bps > 0) {
+ protection_overhead_rate =
+ static_cast<float>(sent_nack_rate_bps + sent_fec_rate_bps) /
+ sent_total_rate_bps;
+ }
+ // Cap the overhead estimate to a threshold, default is 50%.
+ protection_overhead_rate =
+ std::min(protection_overhead_rate, overhead_threshold_);
+ // Source coding rate: total rate - protection overhead.
+ return estimated_bitrate_bps * (1.0 - protection_overhead_rate);
+}
+
+void FecControllerDefault::SetProtectionMethod(bool enable_fec,
+ bool enable_nack) {
+ media_optimization::VCMProtectionMethodEnum method(media_optimization::kNone);
+ if (enable_fec && enable_nack) {
+ method = media_optimization::kNackFec;
+ } else if (enable_nack) {
+ method = media_optimization::kNack;
+ } else if (enable_fec) {
+ method = media_optimization::kFec;
+ }
+ MutexLock lock(&mutex_);
+ loss_prot_logic_->SetMethod(method);
+}
+
+void FecControllerDefault::UpdateWithEncodedData(
+ const size_t encoded_image_length,
+ const VideoFrameType encoded_image_frametype) {
+ const size_t encoded_length = encoded_image_length;
+ MutexLock lock(&mutex_);
+ if (encoded_length > 0) {
+ const bool delta_frame =
+ encoded_image_frametype != VideoFrameType::kVideoFrameKey;
+ if (max_payload_size_ > 0 && encoded_length > 0) {
+ const float min_packets_per_frame =
+ encoded_length / static_cast<float>(max_payload_size_);
+ if (delta_frame) {
+ loss_prot_logic_->UpdatePacketsPerFrame(min_packets_per_frame,
+ clock_->TimeInMilliseconds());
+ } else {
+ loss_prot_logic_->UpdatePacketsPerFrameKey(
+ min_packets_per_frame, clock_->TimeInMilliseconds());
+ }
+ }
+ if (!delta_frame && encoded_length > 0) {
+ loss_prot_logic_->UpdateKeyFrameSize(static_cast<float>(encoded_length));
+ }
+ }
+}
+
+bool FecControllerDefault::UseLossVectorMask() {
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/fec_controller_default.h b/third_party/libwebrtc/modules/video_coding/fec_controller_default.h
new file mode 100644
index 0000000000..a97dea011b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_controller_default.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FEC_CONTROLLER_DEFAULT_H_
+#define MODULES_VIDEO_CODING_FEC_CONTROLLER_DEFAULT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/fec_controller.h"
+#include "modules/video_coding/media_opt_util.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class FecControllerDefault : public FecController {
+ public:
+ FecControllerDefault(Clock* clock,
+ VCMProtectionCallback* protection_callback);
+ explicit FecControllerDefault(Clock* clock);
+ ~FecControllerDefault() override;
+
+ FecControllerDefault(const FecControllerDefault&) = delete;
+ FecControllerDefault& operator=(const FecControllerDefault&) = delete;
+
+ void SetProtectionCallback(
+ VCMProtectionCallback* protection_callback) override;
+ void SetProtectionMethod(bool enable_fec, bool enable_nack) override;
+ void SetEncodingData(size_t width,
+ size_t height,
+ size_t num_temporal_layers,
+ size_t max_payload_size) override;
+ uint32_t UpdateFecRates(uint32_t estimated_bitrate_bps,
+ int actual_framerate_fps,
+ uint8_t fraction_lost,
+ std::vector<bool> loss_mask_vector,
+ int64_t round_trip_time_ms) override;
+ void UpdateWithEncodedData(size_t encoded_image_length,
+ VideoFrameType encoded_image_frametype) override;
+ bool UseLossVectorMask() override;
+ float GetProtectionOverheadRateThreshold();
+
+ private:
+ enum { kBitrateAverageWinMs = 1000 };
+ Clock* const clock_;
+ VCMProtectionCallback* protection_callback_;
+ Mutex mutex_;
+ std::unique_ptr<media_optimization::VCMLossProtectionLogic> loss_prot_logic_
+ RTC_GUARDED_BY(mutex_);
+ size_t max_payload_size_ RTC_GUARDED_BY(mutex_);
+
+ const float overhead_threshold_;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_FEC_CONTROLLER_DEFAULT_H_
diff --git a/third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc b/third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc
new file mode 100644
index 0000000000..fda3d309a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/fec_controller.h"
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "modules/include/module_fec_types.h"
+#include "modules/video_coding/fec_controller_default.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const int kCodecBitrateBps = 100000;
+
+class ProtectionBitrateCalculatorTest : public ::testing::Test {
+ protected:
+ enum {
+ kSampleRate = 90000 // RTP timestamps per second.
+ };
+
+ class ProtectionCallback : public VCMProtectionCallback {
+ public:
+ int ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) override {
+ *sent_video_rate_bps = kCodecBitrateBps;
+ *sent_nack_rate_bps = nack_rate_bps_;
+ *sent_fec_rate_bps = fec_rate_bps_;
+ return 0;
+ }
+
+ uint32_t fec_rate_bps_ = 0;
+ uint32_t nack_rate_bps_ = 0;
+ };
+
+ // Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
+ // a special case (e.g. frame rate in media optimization).
+ ProtectionBitrateCalculatorTest()
+ : clock_(1000), fec_controller_(&clock_, &protection_callback_) {}
+
+ SimulatedClock clock_;
+ ProtectionCallback protection_callback_;
+ FecControllerDefault fec_controller_;
+};
+
+TEST_F(ProtectionBitrateCalculatorTest, ProtectsUsingFecBitrate) {
+ static const uint32_t kMaxBitrateBps = 130000;
+
+ fec_controller_.SetProtectionMethod(true /*enable_fec*/,
+ false /* enable_nack */);
+ fec_controller_.SetEncodingData(640, 480, 1, 1000);
+
+ // Using 10% of codec bitrate for FEC.
+ protection_callback_.fec_rate_bps_ = kCodecBitrateBps / 10;
+ uint32_t target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 0, std::vector<bool>(1, false), 0);
+
+ EXPECT_GT(target_bitrate, 0u);
+ EXPECT_GT(kMaxBitrateBps, target_bitrate);
+
+ // Using as much for codec bitrate as fec rate, new target rate should share
+ // both equally, but only be half of max (since that ceiling should be hit).
+ protection_callback_.fec_rate_bps_ = kCodecBitrateBps;
+ target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 128, std::vector<bool>(1, false), 100);
+ EXPECT_EQ(kMaxBitrateBps / 2, target_bitrate);
+}
+
+TEST_F(ProtectionBitrateCalculatorTest, ProtectsUsingNackBitrate) {
+ static const uint32_t kMaxBitrateBps = 130000;
+
+ fec_controller_.SetProtectionMethod(false /*enable_fec*/,
+ true /* enable_nack */);
+ fec_controller_.SetEncodingData(640, 480, 1, 1000);
+
+ uint32_t target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 0, std::vector<bool>(1, false), 0);
+
+ EXPECT_EQ(kMaxBitrateBps, target_bitrate);
+
+ // Using as much for codec bitrate as nack rate, new target rate should share
+ // both equally, but only be half of max (since that ceiling should be hit).
+ protection_callback_.nack_rate_bps_ = kMaxBitrateBps;
+ target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 128, std::vector<bool>(1, false), 100);
+ EXPECT_EQ(kMaxBitrateBps / 2, target_bitrate);
+}
+
+TEST_F(ProtectionBitrateCalculatorTest, NoProtection) {
+ static const uint32_t kMaxBitrateBps = 130000;
+
+ fec_controller_.SetProtectionMethod(false /*enable_fec*/,
+ false /* enable_nack */);
+ fec_controller_.SetEncodingData(640, 480, 1, 1000);
+
+ uint32_t target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 128, std::vector<bool>(1, false), 100);
+ EXPECT_EQ(kMaxBitrateBps, target_bitrate);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/fec_rate_table.h b/third_party/libwebrtc/modules/video_coding/fec_rate_table.h
new file mode 100644
index 0000000000..91ec0ce159
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_rate_table.h
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FEC_RATE_TABLE_H_
+#define MODULES_VIDEO_CODING_FEC_RATE_TABLE_H_
+
+// This is a private header for media_opt_util.cc.
+// It should not be included by other files.
+
+namespace webrtc {
+
+// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
+// Input is the packet loss and an effective rate (bits/frame).
+// Output is array kFecRateTable[k], where k = rate_i*129 + loss_j;
+// loss_j = 0,1,..128, and rate_i varies over some range.
+// TODO(brandtr): Consider replacing this big static table with a closed-form
+// expression instead.
+static const int kFecRateTableSize = 6450;
+static const unsigned char kFecRateTable[kFecRateTableSize] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 56, 56, 56,
+ 56, 56, 56, 56, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 6, 6, 6, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 44, 44, 44, 44, 44, 44, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 68, 68, 68, 68, 68, 68, 68, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 88, 88, 88, 88, 88, 88, 88, 88, 88, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 19, 19, 19,
+ 36, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 55, 55, 55, 55, 55, 55, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 75, 75, 80, 80, 80, 80, 80, 97, 97, 97, 97, 97, 97, 97, 97,
+ 97, 97, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116,
+ 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 0, 0, 0, 0, 0, 0, 0, 0, 4,
+ 16, 16, 16, 16, 16, 16, 30, 35, 35, 47, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 63, 63, 63, 63, 63, 63,
+ 77, 77, 77, 77, 77, 77, 77, 82, 82, 82, 82, 94, 94, 94, 94,
+ 94, 105, 105, 105, 105, 110, 110, 110, 110, 110, 110, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 115, 115, 115, 115, 115, 115, 115, 115, 115,
+ 0, 0, 0, 0, 0, 0, 0, 4, 14, 27, 27, 27, 27, 27, 31,
+ 41, 52, 52, 56, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 69, 69, 69, 69, 69, 69, 69, 69, 69, 79, 79, 79, 79, 83, 83,
+ 83, 94, 94, 94, 94, 106, 106, 106, 106, 106, 115, 115, 115, 115, 125,
+ 125, 125, 125, 125, 125, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 0, 3, 3,
+ 3, 17, 28, 38, 38, 38, 38, 38, 47, 51, 63, 63, 63, 72, 72,
+ 72, 72, 72, 72, 72, 76, 76, 76, 76, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 84, 84, 84, 84, 93, 93, 93, 105, 105, 105, 105, 114,
+ 114, 114, 114, 114, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 0, 0, 12, 12, 12, 35, 43, 47, 47, 47,
+ 47, 47, 58, 58, 66, 66, 66, 70, 70, 70, 70, 70, 73, 73, 82,
+ 82, 82, 86, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94,
+ 94, 105, 105, 105, 114, 114, 114, 114, 117, 117, 117, 117, 117, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0,
+ 0, 24, 24, 24, 49, 53, 53, 53, 53, 53, 53, 61, 61, 64, 64,
+ 64, 64, 70, 70, 70, 70, 78, 78, 88, 88, 88, 96, 106, 106, 106,
+ 106, 106, 106, 106, 106, 106, 106, 112, 112, 112, 120, 120, 120, 124, 124,
+ 124, 124, 124, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 0, 5, 36, 36, 36, 55, 55,
+ 55, 55, 55, 55, 55, 58, 58, 58, 58, 58, 64, 78, 78, 78, 78,
+ 87, 87, 94, 94, 94, 103, 110, 110, 110, 110, 110, 110, 110, 110, 116,
+ 116, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 0, 0, 18, 43, 43, 43, 53, 53, 53, 53, 53, 53, 53, 53,
+ 58, 58, 58, 58, 71, 87, 87, 87, 87, 94, 94, 97, 97, 97, 109,
+ 111, 111, 111, 111, 111, 111, 111, 111, 125, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 31, 46, 46,
+ 46, 48, 48, 48, 48, 48, 48, 48, 48, 66, 66, 66, 66, 80, 93,
+ 93, 93, 93, 95, 95, 95, 95, 100, 115, 115, 115, 115, 115, 115, 115,
+ 115, 115, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 4, 40, 45, 45, 45, 45, 45, 45, 45, 45,
+ 49, 49, 49, 74, 74, 74, 74, 86, 90, 90, 90, 90, 95, 95, 95,
+ 95, 106, 120, 120, 120, 120, 120, 120, 120, 120, 120, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 14,
+ 42, 42, 42, 42, 42, 42, 42, 42, 46, 56, 56, 56, 80, 80, 80,
+ 80, 84, 84, 84, 84, 88, 99, 99, 99, 99, 111, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 26, 40, 40, 40, 40, 40, 40,
+ 40, 40, 54, 66, 66, 66, 80, 80, 80, 80, 80, 80, 80, 84, 94,
+ 106, 106, 106, 106, 116, 120, 120, 120, 120, 120, 120, 120, 120, 124, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 3, 34, 38, 38, 38, 38, 38, 42, 42, 42, 63, 72, 72, 76,
+ 80, 80, 80, 80, 80, 80, 80, 89, 101, 114, 114, 114, 114, 118, 118,
+ 118, 118, 118, 118, 118, 118, 118, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 12, 36, 36, 36, 36,
+ 36, 36, 49, 49, 49, 69, 73, 76, 86, 86, 86, 86, 86, 86, 86,
+ 86, 97, 109, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 22, 34, 34, 34, 34, 38, 38, 57, 57, 57, 69,
+ 73, 82, 92, 92, 92, 92, 92, 92, 96, 96, 104, 117, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 29, 33,
+ 33, 33, 33, 44, 44, 62, 62, 62, 69, 77, 87, 95, 95, 95, 95,
+ 95, 95, 107, 107, 110, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 31, 31, 31, 31, 31, 51, 51, 62,
+ 65, 65, 73, 83, 91, 94, 94, 94, 94, 97, 97, 114, 114, 114, 122,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 29, 29, 29, 29, 29, 56, 56, 59, 70, 70, 79, 86, 89, 89,
+ 89, 89, 89, 100, 100, 116, 116, 116, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 28, 28, 28, 28, 28,
+ 57, 57, 57, 76, 76, 83, 86, 86, 86, 86, 86, 89, 104, 104, 114,
+ 114, 114, 124, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 27, 27, 27, 27, 30, 55, 55, 55, 80, 80, 83,
+ 86, 86, 86, 86, 86, 93, 108, 108, 111, 111, 111, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 26, 26,
+ 26, 26, 36, 53, 53, 53, 80, 80, 80, 90, 90, 90, 90, 90, 98,
+ 107, 107, 107, 107, 107, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 26, 26, 26, 28, 42, 52, 54, 54,
+ 78, 78, 78, 95, 95, 95, 97, 97, 104, 106, 106, 106, 106, 106, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 24, 24, 24, 33, 47, 49, 58, 58, 74, 74, 74, 97, 97, 97,
+ 106, 106, 108, 108, 108, 108, 108, 108, 124, 124, 124, 124, 124, 124, 124,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 24, 24, 24, 39, 48,
+ 50, 63, 63, 72, 74, 74, 96, 96, 96, 109, 111, 111, 111, 111, 111,
+ 111, 111, 119, 119, 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 23, 23, 23, 43, 46, 54, 66, 66, 69, 77, 77,
+ 92, 92, 92, 105, 113, 113, 113, 113, 113, 113, 113, 115, 117, 123, 123,
+ 123, 123, 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 22, 22,
+ 22, 44, 44, 59, 67, 67, 67, 81, 81, 89, 89, 89, 97, 112, 112,
+ 112, 112, 112, 112, 112, 112, 119, 126, 126, 126, 126, 126, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 21, 21, 24, 43, 45, 63, 65, 65,
+ 67, 85, 85, 87, 87, 87, 91, 109, 109, 109, 111, 111, 111, 111, 111,
+ 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 21, 21, 28, 42, 50, 63, 63, 66, 71, 85, 85, 85, 85, 87,
+ 92, 106, 106, 108, 114, 114, 114, 114, 114, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 20, 20, 34, 41, 54,
+ 62, 62, 69, 75, 82, 82, 82, 82, 92, 98, 105, 105, 110, 117, 117,
+ 117, 117, 117, 124, 124, 126, 126, 126, 126, 126, 126, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 20, 20, 38, 40, 58, 60, 60, 73, 78, 80, 80,
+ 80, 80, 100, 105, 107, 107, 113, 118, 118, 118, 118, 118, 120, 120, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 19, 21,
+ 38, 40, 58, 58, 60, 75, 77, 77, 77, 81, 81, 107, 109, 109, 109,
+ 114, 116, 116, 116, 116, 116, 116, 116, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 18, 25, 37, 44, 56, 56, 63, 75,
+ 75, 75, 75, 88, 88, 111, 111, 111, 111, 112, 112, 112, 112, 112, 112,
+ 112, 114, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 18, 30, 36, 48, 55, 55, 67, 73, 73, 73, 73, 97, 97, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 116, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 18, 34, 36, 52, 55,
+ 55, 70, 72, 73, 73, 73, 102, 104, 108, 108, 108, 108, 109, 109, 109,
+ 109, 109, 109, 109, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 17, 35, 35, 52, 59, 59, 70, 70, 76, 76, 76,
+ 99, 105, 105, 105, 105, 105, 111, 111, 111, 111, 111, 111, 111, 121, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 17, 34,
+ 36, 51, 61, 62, 70, 70, 80, 80, 80, 93, 103, 103, 103, 103, 103,
+ 112, 112, 112, 112, 112, 116, 118, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 16, 33, 39, 50, 59, 65, 72, 72,
+ 82, 82, 82, 91, 100, 100, 100, 100, 100, 109, 109, 109, 109, 109, 121,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 16, 32, 43, 48, 54, 66, 75, 75, 81, 83, 83, 92, 97, 97,
+ 97, 99, 99, 105, 105, 105, 105, 105, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 31, 46, 47, 49,
+ 69, 77, 77, 81, 85, 85, 93, 95, 95, 95, 100, 100, 102, 102, 102,
+ 102, 102, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 15, 30, 46, 48, 48, 70, 75, 79, 82, 87, 87,
+ 92, 94, 94, 94, 103, 103, 103, 103, 103, 104, 104, 115, 120, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 30,
+ 45, 50, 50, 68, 70, 80, 85, 89, 89, 90, 95, 95, 95, 104, 104,
+ 104, 104, 104, 109, 109, 112, 114, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 14, 29, 44, 54, 54, 64, 64, 83,
+ 87, 88, 88, 88, 98, 98, 98, 103, 103, 103, 103, 103, 113, 113, 113,
+ 113, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 14, 29, 43, 56, 56, 61, 61, 84, 85, 88, 88, 88, 100, 100,
+ 100, 102, 102, 102, 102, 102, 113, 116, 116, 116, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 14, 28, 42, 57, 57,
+ 62, 62, 80, 80, 91, 91, 91, 100, 100, 100, 100, 100, 100, 100, 100,
+ 109, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 14, 28, 42, 56, 56, 65, 66, 76, 76, 92, 92,
+ 92, 97, 97, 97, 101, 101, 101, 101, 101, 106, 121, 121, 121, 126, 126,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 13, 27,
+ 41, 55, 55, 67, 72, 74, 74, 90, 90, 90, 91, 91, 91, 105, 105,
+ 105, 105, 105, 107, 122, 122, 122, 123, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 13, 27, 40, 54, 54, 67, 76, 76,
+ 76, 85, 85, 85, 85, 85, 85, 112, 112, 112, 112, 112, 112, 121, 121,
+ 121, 121, 121, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FEC_RATE_TABLE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer.cc b/third_party/libwebrtc/modules/video_coding/frame_buffer.cc
new file mode 100644
index 0000000000..787da1e5a9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer.cc
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_buffer.h"
+
+#include <string.h>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/packet.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+VCMFrameBuffer::VCMFrameBuffer()
+ : _state(kStateEmpty), _nackCount(0), _latestPacketTimeMs(-1) {}
+
+VCMFrameBuffer::~VCMFrameBuffer() {}
+
+webrtc::VideoFrameType VCMFrameBuffer::FrameType() const {
+ return _sessionInfo.FrameType();
+}
+
+int32_t VCMFrameBuffer::GetLowSeqNum() const {
+ return _sessionInfo.LowSequenceNumber();
+}
+
+int32_t VCMFrameBuffer::GetHighSeqNum() const {
+ return _sessionInfo.HighSequenceNumber();
+}
+
+int VCMFrameBuffer::PictureId() const {
+ return _sessionInfo.PictureId();
+}
+
+int VCMFrameBuffer::TemporalId() const {
+ return _sessionInfo.TemporalId();
+}
+
+bool VCMFrameBuffer::LayerSync() const {
+ return _sessionInfo.LayerSync();
+}
+
+int VCMFrameBuffer::Tl0PicId() const {
+ return _sessionInfo.Tl0PicId();
+}
+
+std::vector<NaluInfo> VCMFrameBuffer::GetNaluInfos() const {
+ return _sessionInfo.GetNaluInfos();
+}
+
+void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::SetGofInfo");
+ _sessionInfo.SetGofInfo(gof_info, idx);
+ // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+}
+
+// Insert packet
+VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
+ int64_t timeInMs,
+ const FrameData& frame_data) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::InsertPacket");
+ RTC_DCHECK(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
+ if (packet.dataPtr != NULL) {
+ _payloadType = packet.payloadType;
+ }
+
+ if (kStateEmpty == _state) {
+ // First packet (empty and/or media) inserted into this frame.
+ // store some info and set some initial values.
+ SetTimestamp(packet.timestamp);
+ // We only take the ntp timestamp of the first packet of a frame.
+ ntp_time_ms_ = packet.ntp_time_ms_;
+ _codec = packet.codec();
+ if (packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
+ // first media packet
+ SetState(kStateIncomplete);
+ }
+ }
+
+ size_t oldSize = encoded_image_buffer_ ? encoded_image_buffer_->size() : 0;
+ uint32_t requiredSizeBytes =
+ size() + packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ if (requiredSizeBytes > oldSize) {
+ const uint8_t* prevBuffer = data();
+ const uint32_t increments =
+ requiredSizeBytes / kBufferIncStepSizeBytes +
+ (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
+ const uint32_t newSize = oldSize + increments * kBufferIncStepSizeBytes;
+ if (newSize > kMaxJBFrameSizeBytes) {
+ RTC_LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
+ "big.";
+ return kSizeError;
+ }
+ if (data() == nullptr) {
+ encoded_image_buffer_ = EncodedImageBuffer::Create(newSize);
+ SetEncodedData(encoded_image_buffer_);
+ set_size(0);
+ } else {
+ RTC_CHECK(encoded_image_buffer_ != nullptr);
+ RTC_DCHECK_EQ(encoded_image_buffer_->data(), data());
+ encoded_image_buffer_->Realloc(newSize);
+ }
+ _sessionInfo.UpdateDataPointers(prevBuffer, data());
+ }
+
+ if (packet.width() > 0 && packet.height() > 0) {
+ _encodedWidth = packet.width();
+ _encodedHeight = packet.height();
+ }
+
+ // Don't copy payload specific data for empty packets (e.g padding packets).
+ if (packet.sizeBytes > 0)
+ CopyCodecSpecific(&packet.video_header);
+
+ int retVal = _sessionInfo.InsertPacket(
+ packet, encoded_image_buffer_ ? encoded_image_buffer_->data() : nullptr,
+ frame_data);
+ if (retVal == -1) {
+ return kSizeError;
+ } else if (retVal == -2) {
+ return kDuplicatePacket;
+ } else if (retVal == -3) {
+ return kOutOfBoundsPacket;
+ }
+ // update size
+ set_size(size() + static_cast<uint32_t>(retVal));
+
+ _latestPacketTimeMs = timeInMs;
+
+ // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+ // ts_126114v120700p.pdf Section 7.4.5.
+ // The MTSI client shall add the payload bytes as defined in this clause
+ // onto the last RTP packet in each group of packets which make up a key
+ // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+ // (HEVC)).
+ if (packet.markerBit) {
+ rotation_ = packet.video_header.rotation;
+ content_type_ = packet.video_header.content_type;
+ if (packet.video_header.video_timing.flags != VideoSendTiming::kInvalid) {
+ timing_.encode_start_ms =
+ ntp_time_ms_ + packet.video_header.video_timing.encode_start_delta_ms;
+ timing_.encode_finish_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.encode_finish_delta_ms;
+ timing_.packetization_finish_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.packetization_finish_delta_ms;
+ timing_.pacer_exit_ms =
+ ntp_time_ms_ + packet.video_header.video_timing.pacer_exit_delta_ms;
+ timing_.network_timestamp_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.network_timestamp_delta_ms;
+ timing_.network2_timestamp_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.network2_timestamp_delta_ms;
+ }
+ timing_.flags = packet.video_header.video_timing.flags;
+ }
+
+ if (packet.is_first_packet_in_frame()) {
+ playout_delay_ = packet.video_header.playout_delay;
+ }
+
+ if (_sessionInfo.complete()) {
+ SetState(kStateComplete);
+ return kCompleteSession;
+ }
+ return kIncomplete;
+}
+
+int64_t VCMFrameBuffer::LatestPacketTimeMs() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::LatestPacketTimeMs");
+ return _latestPacketTimeMs;
+}
+
+void VCMFrameBuffer::IncrementNackCount() {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::IncrementNackCount");
+ _nackCount++;
+}
+
+int16_t VCMFrameBuffer::GetNackCount() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::GetNackCount");
+ return _nackCount;
+}
+
+bool VCMFrameBuffer::HaveFirstPacket() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::HaveFirstPacket");
+ return _sessionInfo.HaveFirstPacket();
+}
+
+int VCMFrameBuffer::NumPackets() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::NumPackets");
+ return _sessionInfo.NumPackets();
+}
+
+void VCMFrameBuffer::Reset() {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
+ set_size(0);
+ _sessionInfo.Reset();
+ _payloadType = 0;
+ _nackCount = 0;
+ _latestPacketTimeMs = -1;
+ _state = kStateEmpty;
+ VCMEncodedFrame::Reset();
+}
+
+// Set state of frame
+void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::SetState");
+ if (_state == state) {
+ return;
+ }
+ switch (state) {
+ case kStateIncomplete:
+ // we can go to this state from state kStateEmpty
+ RTC_DCHECK_EQ(_state, kStateEmpty);
+
+ // Do nothing, we received a packet
+ break;
+
+ case kStateComplete:
+ RTC_DCHECK(_state == kStateEmpty || _state == kStateIncomplete);
+
+ break;
+
+ case kStateEmpty:
+ // Should only be set to empty through Reset().
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ _state = state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
+ return _state;
+}
+
+void VCMFrameBuffer::PrepareForDecode(bool continuous) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::PrepareForDecode");
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ set_size(size() - bytes_removed);
+ // Transfer frame information to EncodedFrame and create any codec
+ // specific information.
+ _frameType = _sessionInfo.FrameType();
+ _missingFrame = !continuous;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer.h b/third_party/libwebrtc/modules/video_coding/frame_buffer.h
new file mode 100644
index 0000000000..76df28e588
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+#define MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/session_info.h"
+
+namespace webrtc {
+
+class VCMFrameBuffer : public VCMEncodedFrame {
+ public:
+ VCMFrameBuffer();
+ virtual ~VCMFrameBuffer();
+
+ virtual void Reset();
+
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
+ int64_t timeInMs,
+ const FrameData& frame_data);
+
+ // State
+ // Get current state of frame
+ VCMFrameBufferStateEnum GetState() const;
+ void PrepareForDecode(bool continuous);
+
+ bool IsSessionComplete() const;
+ bool HaveFirstPacket() const;
+ int NumPackets() const;
+
+ // Sequence numbers
+ // Get lowest packet sequence number in frame
+ int32_t GetLowSeqNum() const;
+ // Get highest packet sequence number in frame
+ int32_t GetHighSeqNum() const;
+
+ int PictureId() const;
+ int TemporalId() const;
+ bool LayerSync() const;
+ int Tl0PicId() const;
+
+ std::vector<NaluInfo> GetNaluInfos() const;
+
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
+ // Increments a counter to keep track of the number of packets of this frame
+ // which were NACKed before they arrived.
+ void IncrementNackCount();
+ // Returns the number of packets of this frame which were NACKed before they
+ // arrived.
+ int16_t GetNackCount() const;
+
+ int64_t LatestPacketTimeMs() const;
+
+ webrtc::VideoFrameType FrameType() const;
+
+ private:
+ void SetState(VCMFrameBufferStateEnum state); // Set state of frame
+
+ VCMFrameBufferStateEnum _state; // Current state of the frame
+ // Set with SetEncodedData, but keep pointer to the concrete class here, to
+ // enable reallocation and mutation.
+ rtc::scoped_refptr<EncodedImageBuffer> encoded_image_buffer_;
+ VCMSessionInfo _sessionInfo;
+ uint16_t _nackCount;
+ int64_t _latestPacketTimeMs;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer2.cc b/third_party/libwebrtc/modules/video_coding/frame_buffer2.cc
new file mode 100644
index 0000000000..813ac69dd6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer2.cc
@@ -0,0 +1,622 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_buffer2.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <iterator>
+#include <memory>
+#include <queue>
+#include <utility>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/frame_helpers.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rtt_mult_experiment.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace video_coding {
+
+namespace {
+// Max number of frames the buffer will hold.
+constexpr size_t kMaxFramesBuffered = 800;
+
+// Default value for the maximum decode queue size that is used when the
+// low-latency renderer is used.
+constexpr size_t kZeroPlayoutDelayDefaultMaxDecodeQueueSize = 8;
+
+// Max number of decoded frame info that will be saved.
+constexpr int kMaxFramesHistory = 1 << 13;
+
+// The time it's allowed for a frame to be late to its rendering prediction and
+// still be rendered.
+constexpr int kMaxAllowedFrameDelayMs = 5;
+
+constexpr int64_t kLogNonDecodedIntervalMs = 5000;
+} // namespace
+
+FrameBuffer::FrameBuffer(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials)
+ : decoded_frames_history_(kMaxFramesHistory),
+ clock_(clock),
+ callback_queue_(nullptr),
+ jitter_estimator_(clock, field_trials),
+ timing_(timing),
+ stopped_(false),
+ protection_mode_(kProtectionNack),
+ last_log_non_decoded_ms_(-kLogNonDecodedIntervalMs),
+ rtt_mult_settings_(RttMultExperiment::GetRttMultValue()),
+ zero_playout_delay_max_decode_queue_size_(
+ "max_decode_queue_size",
+ kZeroPlayoutDelayDefaultMaxDecodeQueueSize) {
+ ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_},
+ field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
+ callback_checker_.Detach();
+}
+
+FrameBuffer::~FrameBuffer() {
+ RTC_DCHECK_RUN_ON(&construction_checker_);
+}
+
+void FrameBuffer::NextFrame(int64_t max_wait_time_ms,
+ bool keyframe_required,
+ TaskQueueBase* callback_queue,
+ NextFrameCallback handler) {
+ RTC_DCHECK_RUN_ON(&callback_checker_);
+ RTC_DCHECK(callback_queue->IsCurrent());
+ TRACE_EVENT0("webrtc", "FrameBuffer::NextFrame");
+ int64_t latest_return_time_ms =
+ clock_->TimeInMilliseconds() + max_wait_time_ms;
+
+ MutexLock lock(&mutex_);
+ if (stopped_) {
+ return;
+ }
+ latest_return_time_ms_ = latest_return_time_ms;
+ keyframe_required_ = keyframe_required;
+ frame_handler_ = handler;
+ callback_queue_ = callback_queue;
+ StartWaitForNextFrameOnQueue();
+}
+
+void FrameBuffer::StartWaitForNextFrameOnQueue() {
+ RTC_DCHECK(callback_queue_);
+ RTC_DCHECK(!callback_task_.Running());
+ int64_t wait_ms = FindNextFrame(clock_->CurrentTime());
+ callback_task_ = RepeatingTaskHandle::DelayedStart(
+ callback_queue_, TimeDelta::Millis(wait_ms),
+ [this] {
+ RTC_DCHECK_RUN_ON(&callback_checker_);
+ // If this task has not been cancelled, we did not get any new frames
+ // while waiting. Continue with frame delivery.
+ std::unique_ptr<EncodedFrame> frame;
+ NextFrameCallback frame_handler;
+ {
+ MutexLock lock(&mutex_);
+ if (!frames_to_decode_.empty()) {
+ // We have frames, deliver!
+ frame = GetNextFrame();
+ timing_->SetLastDecodeScheduledTimestamp(clock_->CurrentTime());
+ } else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) {
+ // If there's no frames to decode and there is still time left, it
+ // means that the frame buffer was cleared between creation and
+ // execution of this task. Continue waiting for the remaining time.
+ int64_t wait_ms = FindNextFrame(clock_->CurrentTime());
+ return TimeDelta::Millis(wait_ms);
+ }
+ frame_handler = std::move(frame_handler_);
+ CancelCallback();
+ }
+ // Deliver frame, if any. Otherwise signal timeout.
+ frame_handler(std::move(frame));
+ return TimeDelta::Zero(); // Ignored.
+ },
+ TaskQueueBase::DelayPrecision::kHigh);
+}
+
+int64_t FrameBuffer::FindNextFrame(Timestamp now) {
+ int64_t wait_ms = latest_return_time_ms_ - now.ms();
+ frames_to_decode_.clear();
+
+ // `last_continuous_frame_` may be empty below, but nullopt is smaller
+ // than everything else and loop will immediately terminate as expected.
+ for (auto frame_it = frames_.begin();
+ frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
+ ++frame_it) {
+ if (!frame_it->second.continuous ||
+ frame_it->second.num_missing_decodable > 0) {
+ continue;
+ }
+
+ EncodedFrame* frame = frame_it->second.frame.get();
+
+ if (keyframe_required_ && !frame->is_keyframe())
+ continue;
+
+ auto last_decoded_frame_timestamp =
+ decoded_frames_history_.GetLastDecodedFrameTimestamp();
+
+ // TODO(https://bugs.webrtc.org/9974): consider removing this check
+ // as it may make a stream undecodable after a very long delay between
+ // frames.
+ if (last_decoded_frame_timestamp &&
+ AheadOf(*last_decoded_frame_timestamp, frame->Timestamp())) {
+ continue;
+ }
+
+ // Gather all remaining frames for the same superframe.
+ std::vector<FrameMap::iterator> current_superframe;
+ current_superframe.push_back(frame_it);
+ bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer;
+ FrameMap::iterator next_frame_it = frame_it;
+ while (!last_layer_completed) {
+ ++next_frame_it;
+
+ if (next_frame_it == frames_.end() || !next_frame_it->second.frame) {
+ break;
+ }
+
+ if (next_frame_it->second.frame->Timestamp() != frame->Timestamp() ||
+ !next_frame_it->second.continuous) {
+ break;
+ }
+
+ if (next_frame_it->second.num_missing_decodable > 0) {
+ bool has_inter_layer_dependency = false;
+ for (size_t i = 0; i < EncodedFrame::kMaxFrameReferences &&
+ i < next_frame_it->second.frame->num_references;
+ ++i) {
+ if (next_frame_it->second.frame->references[i] >= frame_it->first) {
+ has_inter_layer_dependency = true;
+ break;
+ }
+ }
+
+ // If the frame has an undecoded dependency that is not within the same
+ // temporal unit then this frame is not yet ready to be decoded. If it
+ // is within the same temporal unit then the not yet decoded dependency
+ // is just a lower spatial frame, which is ok.
+ if (!has_inter_layer_dependency ||
+ next_frame_it->second.num_missing_decodable > 1) {
+ break;
+ }
+ }
+
+ current_superframe.push_back(next_frame_it);
+ last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer;
+ }
+ // Check if the current superframe is complete.
+ // TODO(bugs.webrtc.org/10064): consider returning all available to
+ // decode frames even if the superframe is not complete yet.
+ if (!last_layer_completed) {
+ continue;
+ }
+
+ frames_to_decode_ = std::move(current_superframe);
+
+ absl::optional<Timestamp> render_time = frame->RenderTimestamp();
+ if (!render_time) {
+ render_time = timing_->RenderTime(frame->Timestamp(), now);
+ frame->SetRenderTime(render_time->ms());
+ }
+ bool too_many_frames_queued =
+ frames_.size() > zero_playout_delay_max_decode_queue_size_ ? true
+ : false;
+ wait_ms =
+ timing_->MaxWaitingTime(*render_time, now, too_many_frames_queued).ms();
+
+ // This will cause the frame buffer to prefer high framerate rather
+ // than high resolution in the case of the decoder not decoding fast
+ // enough and the stream has multiple spatial and temporal layers.
+ // For multiple temporal layers it may cause non-base layer frames to be
+ // skipped if they are late.
+ if (wait_ms < -kMaxAllowedFrameDelayMs)
+ continue;
+
+ break;
+ }
+ wait_ms = std::min<int64_t>(wait_ms, latest_return_time_ms_ - now.ms());
+ wait_ms = std::max<int64_t>(wait_ms, 0);
+ return wait_ms;
+}
+
+std::unique_ptr<EncodedFrame> FrameBuffer::GetNextFrame() {
+ RTC_DCHECK_RUN_ON(&callback_checker_);
+ Timestamp now = clock_->CurrentTime();
+ // TODO(ilnik): remove `frames_out` use frames_to_decode_ directly.
+ std::vector<std::unique_ptr<EncodedFrame>> frames_out;
+
+ RTC_DCHECK(!frames_to_decode_.empty());
+ bool superframe_delayed_by_retransmission = false;
+ DataSize superframe_size = DataSize::Zero();
+ const EncodedFrame& first_frame = *frames_to_decode_[0]->second.frame;
+ absl::optional<Timestamp> render_time = first_frame.RenderTimestamp();
+ int64_t receive_time_ms = first_frame.ReceivedTime();
+ // Gracefully handle bad RTP timestamps and render time issues.
+ if (!render_time ||
+ FrameHasBadRenderTiming(*render_time, now, timing_->TargetVideoDelay())) {
+ jitter_estimator_.Reset();
+ timing_->Reset();
+ render_time = timing_->RenderTime(first_frame.Timestamp(), now);
+ }
+
+ for (FrameMap::iterator& frame_it : frames_to_decode_) {
+ RTC_DCHECK(frame_it != frames_.end());
+ std::unique_ptr<EncodedFrame> frame = std::move(frame_it->second.frame);
+
+ frame->SetRenderTime(render_time->ms());
+
+ superframe_delayed_by_retransmission |= frame->delayed_by_retransmission();
+ receive_time_ms = std::max(receive_time_ms, frame->ReceivedTime());
+ superframe_size += DataSize::Bytes(frame->size());
+
+ PropagateDecodability(frame_it->second);
+ decoded_frames_history_.InsertDecoded(frame_it->first, frame->Timestamp());
+
+ frames_.erase(frames_.begin(), ++frame_it);
+
+ frames_out.emplace_back(std::move(frame));
+ }
+
+ if (!superframe_delayed_by_retransmission) {
+ auto frame_delay = inter_frame_delay_.CalculateDelay(
+ first_frame.Timestamp(), Timestamp::Millis(receive_time_ms));
+
+ if (frame_delay) {
+ jitter_estimator_.UpdateEstimate(*frame_delay, superframe_size);
+ }
+
+ float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
+ absl::optional<TimeDelta> rtt_mult_add_cap_ms = absl::nullopt;
+ if (rtt_mult_settings_.has_value()) {
+ rtt_mult = rtt_mult_settings_->rtt_mult_setting;
+ rtt_mult_add_cap_ms =
+ TimeDelta::Millis(rtt_mult_settings_->rtt_mult_add_cap_ms);
+ }
+ timing_->SetJitterDelay(
+ jitter_estimator_.GetJitterEstimate(rtt_mult, rtt_mult_add_cap_ms));
+ timing_->UpdateCurrentDelay(*render_time, now);
+ } else {
+ if (RttMultExperiment::RttMultEnabled())
+ jitter_estimator_.FrameNacked();
+ }
+
+ if (frames_out.size() == 1) {
+ return std::move(frames_out[0]);
+ } else {
+ return CombineAndDeleteFrames(std::move(frames_out));
+ }
+}
+
+void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::SetProtectionMode");
+ MutexLock lock(&mutex_);
+ protection_mode_ = mode;
+}
+
+void FrameBuffer::Stop() {
+ TRACE_EVENT0("webrtc", "FrameBuffer::Stop");
+ MutexLock lock(&mutex_);
+ if (stopped_)
+ return;
+ stopped_ = true;
+
+ CancelCallback();
+}
+
+void FrameBuffer::Clear() {
+ MutexLock lock(&mutex_);
+ ClearFramesAndHistory();
+}
+
+int FrameBuffer::Size() {
+ MutexLock lock(&mutex_);
+ return frames_.size();
+}
+
+void FrameBuffer::UpdateRtt(int64_t rtt_ms) {
+ MutexLock lock(&mutex_);
+ jitter_estimator_.UpdateRtt(TimeDelta::Millis(rtt_ms));
+}
+
+bool FrameBuffer::ValidReferences(const EncodedFrame& frame) const {
+ for (size_t i = 0; i < frame.num_references; ++i) {
+ if (frame.references[i] >= frame.Id())
+ return false;
+
+ for (size_t j = i + 1; j < frame.num_references; ++j) {
+ if (frame.references[i] == frame.references[j])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void FrameBuffer::CancelCallback() {
+ // Called from the callback queue or from within Stop().
+ frame_handler_ = {};
+ callback_task_.Stop();
+ callback_queue_ = nullptr;
+ callback_checker_.Detach();
+}
+
+int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::InsertFrame");
+ RTC_DCHECK(frame);
+
+ MutexLock lock(&mutex_);
+
+ const auto& pis = frame->PacketInfos();
+ int64_t last_continuous_frame_id = last_continuous_frame_.value_or(-1);
+
+ if (!ValidReferences(*frame)) {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frame dropped (Invalid references)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
+ << " has invalid frame references, dropping frame.";
+ return last_continuous_frame_id;
+ }
+
+ if (frames_.size() >= kMaxFramesBuffered) {
+ if (frame->is_keyframe()) {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frames dropped (KF + Full buffer)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Inserting keyframe " << frame->Id()
+ << " but buffer is full, clearing"
+ " buffer and inserting the frame.";
+ ClearFramesAndHistory();
+ } else {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frame dropped (Full buffer)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
+ << " could not be inserted due to the frame "
+ "buffer being full, dropping frame.";
+ return last_continuous_frame_id;
+ }
+ }
+
+ auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
+ auto last_decoded_frame_timestamp =
+ decoded_frames_history_.GetLastDecodedFrameTimestamp();
+ if (last_decoded_frame && frame->Id() <= *last_decoded_frame) {
+ if (AheadOf(frame->Timestamp(), *last_decoded_frame_timestamp) &&
+ frame->is_keyframe()) {
+ // If this frame has a newer timestamp but an earlier frame id then we
+ // assume there has been a jump in the frame id due to some encoder
+ // reconfiguration or some other reason. Even though this is not according
+ // to spec we can still continue to decode from this frame if it is a
+ // keyframe.
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frames dropped (OOO + PicId jump)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING)
+ << "A jump in frame id was detected, clearing buffer.";
+ ClearFramesAndHistory();
+ last_continuous_frame_id = -1;
+ } else {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frame dropped (Out of order)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Frame " << frame->Id() << " inserted after frame "
+ << *last_decoded_frame
+ << " was handed off for decoding, dropping frame.";
+ return last_continuous_frame_id;
+ }
+ }
+
+ // Test if inserting this frame would cause the order of the frames to become
+ // ambiguous (covering more than half the interval of 2^16). This can happen
+ // when the frame id make large jumps mid stream.
+ if (!frames_.empty() && frame->Id() < frames_.begin()->first &&
+ frames_.rbegin()->first < frame->Id()) {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frames dropped (PicId big-jump)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "A jump in frame id was detected, clearing buffer.";
+ ClearFramesAndHistory();
+ last_continuous_frame_id = -1;
+ }
+
+ auto info = frames_.emplace(frame->Id(), FrameInfo()).first;
+
+ if (info->second.frame) {
+ return last_continuous_frame_id;
+ }
+
+ if (!UpdateFrameInfoWithIncomingFrame(*frame, info))
+ return last_continuous_frame_id;
+
+ // If ReceiveTime is negative then it is not a valid timestamp.
+ if (!frame->delayed_by_retransmission() && frame->ReceivedTime() >= 0)
+ timing_->IncomingTimestamp(frame->Timestamp(),
+ Timestamp::Millis(frame->ReceivedTime()));
+
+ // It can happen that a frame will be reported as fully received even if a
+ // lower spatial layer frame is missing.
+ info->second.frame = std::move(frame);
+
+ if (info->second.num_missing_continuous == 0) {
+ info->second.continuous = true;
+ PropagateContinuity(info);
+ last_continuous_frame_id = *last_continuous_frame_;
+
+ // Since we now have new continuous frames there might be a better frame
+ // to return from NextFrame.
+ if (callback_queue_) {
+ callback_queue_->PostTask([this] {
+ MutexLock lock(&mutex_);
+ if (!callback_task_.Running())
+ return;
+ RTC_CHECK(frame_handler_);
+ callback_task_.Stop();
+ StartWaitForNextFrameOnQueue();
+ });
+ }
+ }
+
+ return last_continuous_frame_id;
+}
+
+void FrameBuffer::PropagateContinuity(FrameMap::iterator start) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::PropagateContinuity");
+ RTC_DCHECK(start->second.continuous);
+
+ std::queue<FrameMap::iterator> continuous_frames;
+ continuous_frames.push(start);
+
+ // A simple BFS to traverse continuous frames.
+ while (!continuous_frames.empty()) {
+ auto frame = continuous_frames.front();
+ continuous_frames.pop();
+
+ if (!last_continuous_frame_ || *last_continuous_frame_ < frame->first) {
+ last_continuous_frame_ = frame->first;
+ }
+
+ // Loop through all dependent frames, and if that frame no longer has
+ // any unfulfilled dependencies then that frame is continuous as well.
+ for (size_t d = 0; d < frame->second.dependent_frames.size(); ++d) {
+ auto frame_ref = frames_.find(frame->second.dependent_frames[d]);
+ RTC_DCHECK(frame_ref != frames_.end());
+
+ // TODO(philipel): Look into why we've seen this happen.
+ if (frame_ref != frames_.end()) {
+ --frame_ref->second.num_missing_continuous;
+ if (frame_ref->second.num_missing_continuous == 0) {
+ frame_ref->second.continuous = true;
+ continuous_frames.push(frame_ref);
+ }
+ }
+ }
+ }
+}
+
+void FrameBuffer::PropagateDecodability(const FrameInfo& info) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::PropagateDecodability");
+ for (size_t d = 0; d < info.dependent_frames.size(); ++d) {
+ auto ref_info = frames_.find(info.dependent_frames[d]);
+ RTC_DCHECK(ref_info != frames_.end());
+ // TODO(philipel): Look into why we've seen this happen.
+ if (ref_info != frames_.end()) {
+ RTC_DCHECK_GT(ref_info->second.num_missing_decodable, 0U);
+ --ref_info->second.num_missing_decodable;
+ }
+ }
+}
+
+bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
+ FrameMap::iterator info) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::UpdateFrameInfoWithIncomingFrame");
+ auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
+ RTC_DCHECK(!last_decoded_frame || *last_decoded_frame < info->first);
+
+ // In this function we determine how many missing dependencies this `frame`
+ // has to become continuous/decodable. If a frame that this `frame` depend
+ // on has already been decoded then we can ignore that dependency since it has
+ // already been fulfilled.
+ //
+ // For all other frames we will register a backwards reference to this `frame`
+ // so that `num_missing_continuous` and `num_missing_decodable` can be
+ // decremented as frames become continuous/are decoded.
+ struct Dependency {
+ int64_t frame_id;
+ bool continuous;
+ };
+ std::vector<Dependency> not_yet_fulfilled_dependencies;
+
+ // Find all dependencies that have not yet been fulfilled.
+ for (size_t i = 0; i < frame.num_references; ++i) {
+ // Does `frame` depend on a frame earlier than the last decoded one?
+ if (last_decoded_frame && frame.references[i] <= *last_decoded_frame) {
+ // Was that frame decoded? If not, this `frame` will never become
+ // decodable.
+ if (!decoded_frames_history_.WasDecoded(frame.references[i])) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (last_log_non_decoded_ms_ + kLogNonDecodedIntervalMs < now_ms) {
+ RTC_LOG(LS_WARNING)
+ << "Frame " << frame.Id()
+ << " depends on a non-decoded frame more previous than the last "
+ "decoded frame, dropping frame.";
+ last_log_non_decoded_ms_ = now_ms;
+ }
+ return false;
+ }
+ } else {
+ auto ref_info = frames_.find(frame.references[i]);
+ bool ref_continuous =
+ ref_info != frames_.end() && ref_info->second.continuous;
+ not_yet_fulfilled_dependencies.push_back(
+ {frame.references[i], ref_continuous});
+ }
+ }
+
+ info->second.num_missing_continuous = not_yet_fulfilled_dependencies.size();
+ info->second.num_missing_decodable = not_yet_fulfilled_dependencies.size();
+
+ for (const Dependency& dep : not_yet_fulfilled_dependencies) {
+ if (dep.continuous)
+ --info->second.num_missing_continuous;
+
+ frames_[dep.frame_id].dependent_frames.push_back(frame.Id());
+ }
+
+ return true;
+}
+
+void FrameBuffer::ClearFramesAndHistory() {
+ TRACE_EVENT0("webrtc", "FrameBuffer::ClearFramesAndHistory");
+ frames_.clear();
+ last_continuous_frame_.reset();
+ frames_to_decode_.clear();
+ decoded_frames_history_.Clear();
+}
+
+// TODO(philipel): Avoid the concatenation of frames here, by replacing
+// NextFrame and GetNextFrame with methods returning multiple frames.
+std::unique_ptr<EncodedFrame> FrameBuffer::CombineAndDeleteFrames(
+ std::vector<std::unique_ptr<EncodedFrame>> frames) const {
+ RTC_DCHECK(!frames.empty());
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> inlined;
+ for (auto& frame : frames) {
+ inlined.push_back(std::move(frame));
+ }
+ return webrtc::CombineAndDeleteFrames(std::move(inlined));
+}
+
+FrameBuffer::FrameInfo::FrameInfo() = default;
+FrameBuffer::FrameInfo::FrameInfo(FrameInfo&&) = default;
+FrameBuffer::FrameInfo::~FrameInfo() = default;
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer2.h b/third_party/libwebrtc/modules/video_coding/frame_buffer2.h
new file mode 100644
index 0000000000..1383c40ae3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer2.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
+#define MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/encoded_frame.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/utility/decoded_frames_history.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/rtt_mult_experiment.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+class VCMReceiveStatisticsCallback;
+class JitterEstimator;
+class VCMTiming;
+
+namespace video_coding {
+
+class FrameBuffer {
+ public:
+ FrameBuffer(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials);
+
+ FrameBuffer() = delete;
+ FrameBuffer(const FrameBuffer&) = delete;
+ FrameBuffer& operator=(const FrameBuffer&) = delete;
+
+ virtual ~FrameBuffer();
+
+ // Insert a frame into the frame buffer. Returns the picture id
+ // of the last continuous frame or -1 if there is no continuous frame.
+ int64_t InsertFrame(std::unique_ptr<EncodedFrame> frame);
+
+ using NextFrameCallback = std::function<void(std::unique_ptr<EncodedFrame>)>;
+ // Get the next frame for decoding. `handler` is invoked with the next frame
+ // or with nullptr if no frame is ready for decoding after `max_wait_time_ms`.
+ void NextFrame(int64_t max_wait_time_ms,
+ bool keyframe_required,
+ TaskQueueBase* callback_queue,
+ NextFrameCallback handler);
+
+ // Tells the FrameBuffer which protection mode that is in use. Affects
+ // the frame timing.
+ // TODO(philipel): Remove this when new timing calculations has been
+ // implemented.
+ void SetProtectionMode(VCMVideoProtection mode);
+
+ // Stop the frame buffer, causing any sleeping thread in NextFrame to
+ // return immediately.
+ void Stop();
+
+ // Updates the RTT for jitter buffer estimation.
+ void UpdateRtt(int64_t rtt_ms);
+
+ // Clears the FrameBuffer, removing all the buffered frames.
+ void Clear();
+
+ int Size();
+
+ private:
+ struct FrameInfo {
+ FrameInfo();
+ FrameInfo(FrameInfo&&);
+ ~FrameInfo();
+
+ // Which other frames that have direct unfulfilled dependencies
+ // on this frame.
+ absl::InlinedVector<int64_t, 8> dependent_frames;
+
+ // A frame is continiuous if it has all its referenced/indirectly
+ // referenced frames.
+ //
+ // How many unfulfilled frames this frame have until it becomes continuous.
+ size_t num_missing_continuous = 0;
+
+ // A frame is decodable if all its referenced frames have been decoded.
+ //
+ // How many unfulfilled frames this frame have until it becomes decodable.
+ size_t num_missing_decodable = 0;
+
+ // If this frame is continuous or not.
+ bool continuous = false;
+
+ // The actual EncodedFrame.
+ std::unique_ptr<EncodedFrame> frame;
+ };
+
+ using FrameMap = std::map<int64_t, FrameInfo>;
+
+ // Check that the references of `frame` are valid.
+ bool ValidReferences(const EncodedFrame& frame) const;
+
+ int64_t FindNextFrame(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ std::unique_ptr<EncodedFrame> GetNextFrame()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Update all directly dependent and indirectly dependent frames and mark
+ // them as continuous if all their references has been fulfilled.
+ void PropagateContinuity(FrameMap::iterator start)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Marks the frame as decoded and updates all directly dependent frames.
+ void PropagateDecodability(const FrameInfo& info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Update the corresponding FrameInfo of `frame` and all FrameInfos that
+ // `frame` references.
+ // Return false if `frame` will never be decodable, true otherwise.
+ bool UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
+ FrameMap::iterator info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // The cleaner solution would be to have the NextFrame function return a
+ // vector of frames, but until the decoding pipeline can support decoding
+ // multiple frames at the same time we combine all frames to one frame and
+ // return it. See bugs.webrtc.org/10064
+ std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
+ std::vector<std::unique_ptr<EncodedFrame>> frames) const;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_checker_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker callback_checker_;
+
+ // Stores only undecoded frames.
+ FrameMap frames_ RTC_GUARDED_BY(mutex_);
+ DecodedFramesHistory decoded_frames_history_ RTC_GUARDED_BY(mutex_);
+
+ Mutex mutex_;
+ Clock* const clock_;
+
+ TaskQueueBase* callback_queue_ RTC_GUARDED_BY(mutex_);
+ RepeatingTaskHandle callback_task_ RTC_GUARDED_BY(mutex_);
+ NextFrameCallback frame_handler_ RTC_GUARDED_BY(mutex_);
+ int64_t latest_return_time_ms_ RTC_GUARDED_BY(mutex_);
+ bool keyframe_required_ RTC_GUARDED_BY(mutex_);
+
+ JitterEstimator jitter_estimator_ RTC_GUARDED_BY(mutex_);
+ VCMTiming* const timing_ RTC_GUARDED_BY(mutex_);
+ InterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int64_t> last_continuous_frame_ RTC_GUARDED_BY(mutex_);
+ std::vector<FrameMap::iterator> frames_to_decode_ RTC_GUARDED_BY(mutex_);
+ bool stopped_ RTC_GUARDED_BY(mutex_);
+ VCMVideoProtection protection_mode_ RTC_GUARDED_BY(mutex_);
+ int64_t last_log_non_decoded_ms_ RTC_GUARDED_BY(mutex_);
+
+ // rtt_mult experiment settings.
+ const absl::optional<RttMultExperiment::Settings> rtt_mult_settings_;
+
+ // Maximum number of frames in the decode queue to allow pacing. If the
+ // queue grows beyond the max limit, pacing will be disabled and frames will
+ // be pushed to the decoder as soon as possible. This only has an effect
+ // when the low-latency rendering path is active, which is indicated by
+ // the frame's render time == 0.
+ FieldTrialParameter<unsigned> zero_playout_delay_max_decode_queue_size_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc b/third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc
new file mode 100644
index 0000000000..0fabd9b496
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_buffer2.h"
+
+#include <algorithm>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/clock.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+using ::testing::_;
+using ::testing::IsEmpty;
+using ::testing::Return;
+using ::testing::SizeIs;
+
+namespace webrtc {
+namespace video_coding {
+
+class VCMTimingFake : public VCMTiming {
+ public:
+ explicit VCMTimingFake(Clock* clock, const FieldTrialsView& field_trials)
+ : VCMTiming(clock, field_trials) {}
+
+ Timestamp RenderTime(uint32_t frame_timestamp, Timestamp now) const override {
+ if (last_render_time_.IsMinusInfinity()) {
+ last_render_time_ = now + kDelay;
+ last_timestamp_ = frame_timestamp;
+ }
+
+ auto diff = MinDiff(frame_timestamp, last_timestamp_);
+ auto timeDiff = TimeDelta::Millis(diff / 90);
+ if (AheadOf(frame_timestamp, last_timestamp_))
+ last_render_time_ += timeDiff;
+ else
+ last_render_time_ -= timeDiff;
+
+ last_timestamp_ = frame_timestamp;
+ return last_render_time_;
+ }
+
+ TimeDelta MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const override {
+ return render_time - now - kDecodeTime;
+ }
+
+ TimeDelta GetCurrentJitter() {
+ return VCMTiming::GetTimings().jitter_buffer_delay;
+ }
+
+ private:
+ static constexpr TimeDelta kDelay = TimeDelta::Millis(50);
+ const TimeDelta kDecodeTime = kDelay / 2;
+ mutable uint32_t last_timestamp_ = 0;
+ mutable Timestamp last_render_time_ = Timestamp::MinusInfinity();
+};
+
+class FrameObjectFake : public EncodedFrame {
+ public:
+ int64_t ReceivedTime() const override { return 0; }
+
+ int64_t RenderTime() const override { return _renderTimeMs; }
+
+ bool delayed_by_retransmission() const override {
+ return delayed_by_retransmission_;
+ }
+ void set_delayed_by_retransmission(bool delayed) {
+ delayed_by_retransmission_ = delayed;
+ }
+
+ private:
+ bool delayed_by_retransmission_ = false;
+};
+
+class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback {
+ public:
+ MOCK_METHOD(void,
+ OnCompleteFrame,
+ (bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type),
+ (override));
+ MOCK_METHOD(void, OnDroppedFrames, (uint32_t frames_dropped), (override));
+ MOCK_METHOD(void,
+ OnFrameBufferTimingsUpdated,
+ (int max_decode,
+ int current_delay,
+ int target_delay,
+ int jitter_buffer,
+ int min_playout_delay,
+ int render_delay),
+ (override));
+ MOCK_METHOD(void,
+ OnTimingFrameInfoUpdated,
+ (const TimingFrameInfo& info),
+ (override));
+};
+
+class TestFrameBuffer2 : public ::testing::Test {
+ protected:
+ static constexpr int kMaxReferences = 5;
+ static constexpr int kFps1 = 1000;
+ static constexpr int kFps10 = kFps1 / 10;
+ static constexpr int kFps20 = kFps1 / 20;
+ static constexpr size_t kFrameSize = 10;
+
+ TestFrameBuffer2()
+ : time_controller_(Timestamp::Seconds(0)),
+ time_task_queue_(
+ time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "extract queue",
+ TaskQueueFactory::Priority::NORMAL)),
+ timing_(time_controller_.GetClock(), field_trials_),
+ buffer_(new FrameBuffer(time_controller_.GetClock(),
+ &timing_,
+ field_trials_)),
+ rand_(0x34678213) {}
+
+ template <typename... T>
+ std::unique_ptr<FrameObjectFake> CreateFrame(uint16_t picture_id,
+ uint8_t spatial_layer,
+ int64_t ts_ms,
+ bool last_spatial_layer,
+ size_t frame_size_bytes,
+ T... refs) {
+ static_assert(sizeof...(refs) <= kMaxReferences,
+ "To many references specified for EncodedFrame.");
+ std::array<uint16_t, sizeof...(refs)> references = {
+ {rtc::checked_cast<uint16_t>(refs)...}};
+
+ auto frame = std::make_unique<FrameObjectFake>();
+ frame->SetId(picture_id);
+ frame->SetSpatialIndex(spatial_layer);
+ frame->SetTimestamp(ts_ms * 90);
+ frame->num_references = references.size();
+ frame->is_last_spatial_layer = last_spatial_layer;
+ // Add some data to buffer.
+ frame->SetEncodedData(EncodedImageBuffer::Create(frame_size_bytes));
+ for (size_t r = 0; r < references.size(); ++r)
+ frame->references[r] = references[r];
+ return frame;
+ }
+
+ template <typename... T>
+ int InsertFrame(uint16_t picture_id,
+ uint8_t spatial_layer,
+ int64_t ts_ms,
+ bool last_spatial_layer,
+ size_t frame_size_bytes,
+ T... refs) {
+ return buffer_->InsertFrame(CreateFrame(picture_id, spatial_layer, ts_ms,
+ last_spatial_layer,
+ frame_size_bytes, refs...));
+ }
+
+ int InsertNackedFrame(uint16_t picture_id, int64_t ts_ms) {
+ std::unique_ptr<FrameObjectFake> frame =
+ CreateFrame(picture_id, 0, ts_ms, true, kFrameSize);
+ frame->set_delayed_by_retransmission(true);
+ return buffer_->InsertFrame(std::move(frame));
+ }
+
+ void ExtractFrame(int64_t max_wait_time = 0, bool keyframe_required = false) {
+ time_task_queue_->PostTask([this, max_wait_time, keyframe_required]() {
+ buffer_->NextFrame(max_wait_time, keyframe_required,
+ time_task_queue_.get(),
+ [this](std::unique_ptr<EncodedFrame> frame) {
+ frames_.emplace_back(std::move(frame));
+ });
+ });
+ if (max_wait_time == 0) {
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+ }
+
+ void CheckFrame(size_t index, int picture_id, int spatial_layer) {
+ ASSERT_LT(index, frames_.size());
+ ASSERT_TRUE(frames_[index]);
+ ASSERT_EQ(picture_id, frames_[index]->Id());
+ ASSERT_EQ(spatial_layer, frames_[index]->SpatialIndex().value_or(0));
+ }
+
+ void CheckFrameSize(size_t index, size_t size) {
+ ASSERT_LT(index, frames_.size());
+ ASSERT_TRUE(frames_[index]);
+ ASSERT_EQ(frames_[index]->size(), size);
+ }
+
+ void CheckNoFrame(size_t index) {
+ ASSERT_LT(index, frames_.size());
+ ASSERT_FALSE(frames_[index]);
+ }
+
+ uint32_t Rand() { return rand_.Rand<uint32_t>(); }
+
+ test::ScopedKeyValueConfig field_trials_;
+ webrtc::GlobalSimulatedTimeController time_controller_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> time_task_queue_;
+ VCMTimingFake timing_;
+ std::unique_ptr<FrameBuffer> buffer_;
+ std::vector<std::unique_ptr<EncodedFrame>> frames_;
+ Random rand_;
+};
+
+// From https://en.cppreference.com/w/cpp/language/static: "If ... a constexpr
+// static data member (since C++11) is odr-used, a definition at namespace scope
+// is still required... This definition is deprecated for constexpr data members
+// since C++17."
+// kFrameSize is odr-used since it is passed by reference to EXPECT_EQ().
+#if __cplusplus < 201703L
+constexpr size_t TestFrameBuffer2::kFrameSize;
+#endif
+
+TEST_F(TestFrameBuffer2, WaitForFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ ExtractFrame(50);
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ time_controller_.AdvanceTime(TimeDelta::Millis(50));
+ CheckFrame(0, pid, 0);
+}
+
+TEST_F(TestFrameBuffer2, ClearWhileWaitingForFrame) {
+ const uint16_t pid = Rand();
+
+ // Insert a frame and wait for it for max 100ms.
+ InsertFrame(pid, 0, 25, true, kFrameSize);
+ ExtractFrame(100);
+ // After 10ms, clear the buffer.
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ buffer_->Clear();
+ // Confirm that the frame was not sent for rendering.
+ time_controller_.AdvanceTime(TimeDelta::Millis(15));
+ EXPECT_THAT(frames_, IsEmpty());
+
+ // We are still waiting for a frame, since 100ms has not passed. Insert a new
+ // frame. This new frame should be the one that is returned as the old frame
+ // was cleared.
+ const uint16_t new_pid = pid + 1;
+ InsertFrame(new_pid, 0, 50, true, kFrameSize);
+ time_controller_.AdvanceTime(TimeDelta::Millis(25));
+ ASSERT_THAT(frames_, SizeIs(1));
+ CheckFrame(0, new_pid, 0);
+}
+
+TEST_F(TestFrameBuffer2, OneSuperFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ InsertFrame(pid + 1, 1, ts, true, kFrameSize);
+ ExtractFrame();
+
+ CheckFrame(0, pid, 1);
+}
+
+TEST_F(TestFrameBuffer2, ZeroPlayoutDelay) {
+ test::ScopedKeyValueConfig field_trials;
+ VCMTiming timing(time_controller_.GetClock(), field_trials);
+ buffer_ = std::make_unique<FrameBuffer>(time_controller_.GetClock(), &timing,
+ field_trials);
+ const VideoPlayoutDelay kPlayoutDelayMs = {0, 0};
+ std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
+ test_frame->SetId(0);
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+ buffer_->InsertFrame(std::move(test_frame));
+ ExtractFrame(0, false);
+ CheckFrame(0, 0, 0);
+ EXPECT_EQ(0, frames_[0]->RenderTimeMs());
+}
+
+// Flaky test, see bugs.webrtc.org/7068.
+TEST_F(TestFrameBuffer2, DISABLED_OneUnorderedSuperFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ ExtractFrame(50);
+ InsertFrame(pid, 1, ts, true, kFrameSize);
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid, 1);
+}
+
+TEST_F(TestFrameBuffer2, DISABLED_OneLayerStreamReordered) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, true, kFrameSize);
+ ExtractFrame();
+ CheckFrame(0, pid, 0);
+ for (int i = 1; i < 10; i += 2) {
+ ExtractFrame(50);
+ InsertFrame(pid + i + 1, 0, ts + (i + 1) * kFps10, true, kFrameSize,
+ pid + i);
+ time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
+ InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
+ time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
+ ExtractFrame();
+ CheckFrame(i, pid + i, 0);
+ CheckFrame(i + 1, pid + i + 1, 0);
+ }
+}
+
+TEST_F(TestFrameBuffer2, ExtractFromEmptyBuffer) {
+ ExtractFrame();
+ CheckNoFrame(0);
+}
+
+TEST_F(TestFrameBuffer2, MissingFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid);
+ InsertFrame(pid + 3, 0, ts, true, kFrameSize, pid + 1, pid + 2);
+ ExtractFrame();
+ ExtractFrame();
+ ExtractFrame();
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid + 2, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, OneLayerStream) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ ExtractFrame();
+ CheckFrame(0, pid, 0);
+ for (int i = 1; i < 10; ++i) {
+ InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
+ ExtractFrame();
+ time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
+ CheckFrame(i, pid + i, 0);
+ }
+}
+
+TEST_F(TestFrameBuffer2, DropTemporalLayerSlowDecoder) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ InsertFrame(pid + 1, 0, ts + kFps20, true, kFrameSize, pid);
+ for (int i = 2; i < 10; i += 2) {
+ uint32_t ts_tl0 = ts + i / 2 * kFps10;
+ InsertFrame(pid + i, 0, ts_tl0, true, kFrameSize, pid + i - 2);
+ InsertFrame(pid + i + 1, 0, ts_tl0 + kFps20, true, kFrameSize, pid + i,
+ pid + i - 1);
+ }
+
+ for (int i = 0; i < 10; ++i) {
+ ExtractFrame();
+ time_controller_.AdvanceTime(TimeDelta::Millis(70));
+ }
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid + 1, 0);
+ CheckFrame(2, pid + 2, 0);
+ CheckFrame(3, pid + 4, 0);
+ CheckFrame(4, pid + 6, 0);
+ CheckFrame(5, pid + 8, 0);
+ CheckNoFrame(6);
+ CheckNoFrame(7);
+ CheckNoFrame(8);
+ CheckNoFrame(9);
+}
+
+TEST_F(TestFrameBuffer2, DropFramesIfSystemIsStalled) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ InsertFrame(pid + 1, 0, ts + 1 * kFps10, true, kFrameSize, pid);
+ InsertFrame(pid + 2, 0, ts + 2 * kFps10, true, kFrameSize, pid + 1);
+ InsertFrame(pid + 3, 0, ts + 3 * kFps10, true, kFrameSize);
+
+ ExtractFrame();
+ // Jump forward in time, simulating the system being stalled for some reason.
+ time_controller_.AdvanceTime(TimeDelta::Millis(3) * kFps10);
+ // Extract one more frame, expect second and third frame to be dropped.
+ ExtractFrame();
+
+ CheckFrame(0, pid + 0, 0);
+ CheckFrame(1, pid + 3, 0);
+}
+
+TEST_F(TestFrameBuffer2, DroppedFramesCountedOnClear) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ for (int i = 1; i < 5; ++i) {
+ InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
+ }
+
+ // All frames should be dropped when Clear is called.
+ buffer_->Clear();
+}
+
+TEST_F(TestFrameBuffer2, InsertLateFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ ExtractFrame();
+ InsertFrame(pid + 2, 0, ts, true, kFrameSize);
+ ExtractFrame();
+ InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid);
+ ExtractFrame();
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid + 2, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, ProtectionModeNackFEC) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+ constexpr int64_t kRttMs = 200;
+ buffer_->UpdateRtt(kRttMs);
+
+ // Jitter estimate unaffected by RTT in this protection mode.
+ buffer_->SetProtectionMode(kProtectionNackFEC);
+ InsertNackedFrame(pid, ts);
+ InsertNackedFrame(pid + 1, ts + 100);
+ InsertNackedFrame(pid + 2, ts + 200);
+ InsertFrame(pid + 3, 0, ts + 300, true, kFrameSize);
+ ExtractFrame();
+ ExtractFrame();
+ ExtractFrame();
+ ExtractFrame();
+ ASSERT_EQ(4u, frames_.size());
+ EXPECT_LT(timing_.GetCurrentJitter().ms(), kRttMs);
+}
+
+TEST_F(TestFrameBuffer2, NoContinuousFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(-1, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid));
+}
+
+TEST_F(TestFrameBuffer2, LastContinuousFrameSingleLayer) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize));
+ EXPECT_EQ(pid, InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid + 1));
+ EXPECT_EQ(pid + 2, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid));
+ EXPECT_EQ(pid + 2, InsertFrame(pid + 4, 0, ts, true, kFrameSize, pid + 3));
+ EXPECT_EQ(pid + 5, InsertFrame(pid + 5, 0, ts, true, kFrameSize));
+}
+
+TEST_F(TestFrameBuffer2, LastContinuousFrameTwoLayers) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, kFrameSize));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 1, ts, true, kFrameSize));
+ EXPECT_EQ(pid + 1,
+ InsertFrame(pid + 3, 1, ts, true, kFrameSize, pid + 1, pid + 2));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 4, 0, ts, false, kFrameSize, pid + 2));
+ EXPECT_EQ(pid + 1,
+ InsertFrame(pid + 5, 1, ts, true, kFrameSize, pid + 3, pid + 4));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 6, 0, ts, false, kFrameSize, pid + 4));
+ EXPECT_EQ(pid + 6, InsertFrame(pid + 2, 0, ts, false, kFrameSize, pid));
+ EXPECT_EQ(pid + 7,
+ InsertFrame(pid + 7, 1, ts, true, kFrameSize, pid + 5, pid + 6));
+}
+
+TEST_F(TestFrameBuffer2, PictureIdJumpBack) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 0, ts + 1, true, kFrameSize, pid));
+ ExtractFrame();
+ CheckFrame(0, pid, 0);
+
+ // Jump back in pid but increase ts.
+ EXPECT_EQ(pid - 1, InsertFrame(pid - 1, 0, ts + 2, true, kFrameSize));
+ ExtractFrame();
+ ExtractFrame();
+ CheckFrame(1, pid - 1, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, ForwardJumps) {
+ EXPECT_EQ(5453, InsertFrame(5453, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(5454, InsertFrame(5454, 0, 1, true, kFrameSize, 5453));
+ ExtractFrame();
+ EXPECT_EQ(15670, InsertFrame(15670, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(29804, InsertFrame(29804, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(29805, InsertFrame(29805, 0, 1, true, kFrameSize, 29804));
+ ExtractFrame();
+ EXPECT_EQ(29806, InsertFrame(29806, 0, 1, true, kFrameSize, 29805));
+ ExtractFrame();
+ EXPECT_EQ(33819, InsertFrame(33819, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(41248, InsertFrame(41248, 0, 1, true, kFrameSize));
+ ExtractFrame();
+}
+
+TEST_F(TestFrameBuffer2, DuplicateFrames) {
+ EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize));
+}
+
+// TODO(philipel): implement more unittests related to invalid references.
+TEST_F(TestFrameBuffer2, InvalidReferences) {
+ EXPECT_EQ(-1, InsertFrame(0, 0, 1000, true, kFrameSize, 2));
+ EXPECT_EQ(1, InsertFrame(1, 0, 2000, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(2, InsertFrame(2, 0, 3000, true, kFrameSize, 1));
+}
+
+TEST_F(TestFrameBuffer2, KeyframeRequired) {
+ EXPECT_EQ(1, InsertFrame(1, 0, 1000, true, kFrameSize));
+ EXPECT_EQ(2, InsertFrame(2, 0, 2000, true, kFrameSize, 1));
+ EXPECT_EQ(3, InsertFrame(3, 0, 3000, true, kFrameSize));
+ ExtractFrame();
+ ExtractFrame(0, true);
+ ExtractFrame();
+
+ CheckFrame(0, 1, 0);
+ CheckFrame(1, 3, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, KeyframeClearsFullBuffer) {
+ const int kMaxBufferSize = 600;
+
+ for (int i = 1; i <= kMaxBufferSize; ++i)
+ EXPECT_EQ(-1, InsertFrame(i, 0, i * 1000, true, kFrameSize, i - 1));
+ ExtractFrame();
+ CheckNoFrame(0);
+
+ EXPECT_EQ(kMaxBufferSize + 1,
+ InsertFrame(kMaxBufferSize + 1, 0, (kMaxBufferSize + 1) * 1000,
+ true, kFrameSize));
+ ExtractFrame();
+ CheckFrame(1, kMaxBufferSize + 1, 0);
+}
+
+TEST_F(TestFrameBuffer2, DontUpdateOnUndecodableFrame) {
+ InsertFrame(1, 0, 0, true, kFrameSize);
+ ExtractFrame(0, true);
+ InsertFrame(3, 0, 0, true, kFrameSize, 2, 0);
+ InsertFrame(3, 0, 0, true, kFrameSize, 0);
+ InsertFrame(2, 0, 0, true, kFrameSize);
+ ExtractFrame(0, true);
+ ExtractFrame(0, true);
+}
+
+TEST_F(TestFrameBuffer2, DontDecodeOlderTimestamp) {
+ InsertFrame(2, 0, 1, true, kFrameSize);
+ InsertFrame(1, 0, 2, true,
+ kFrameSize); // Older picture id but newer timestamp.
+ ExtractFrame(0);
+ ExtractFrame(0);
+ CheckFrame(0, 1, 0);
+ CheckNoFrame(1);
+
+ InsertFrame(3, 0, 4, true, kFrameSize);
+ InsertFrame(4, 0, 3, true,
+ kFrameSize); // Newer picture id but older timestamp.
+ ExtractFrame(0);
+ ExtractFrame(0);
+ CheckFrame(2, 3, 0);
+ CheckNoFrame(3);
+}
+
+TEST_F(TestFrameBuffer2, CombineFramesToSuperframe) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ InsertFrame(pid + 1, 1, ts, true, 2 * kFrameSize, pid);
+ ExtractFrame(0);
+ ExtractFrame(0);
+ CheckFrame(0, pid, 1);
+ CheckNoFrame(1);
+ // Two frames should be combined and returned together.
+ CheckFrameSize(0, 3 * kFrameSize);
+
+ EXPECT_EQ(frames_[0]->SpatialIndex(), 1);
+ EXPECT_EQ(frames_[0]->SpatialLayerFrameSize(0), kFrameSize);
+ EXPECT_EQ(frames_[0]->SpatialLayerFrameSize(1), 2 * kFrameSize);
+}
+
+TEST_F(TestFrameBuffer2, HigherSpatialLayerNonDecodable) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ InsertFrame(pid + 1, 1, ts, true, kFrameSize, pid);
+
+ ExtractFrame(0);
+ CheckFrame(0, pid, 1);
+
+ InsertFrame(pid + 3, 1, ts + kFps20, true, kFrameSize, pid);
+ InsertFrame(pid + 4, 0, ts + kFps10, false, kFrameSize, pid);
+ InsertFrame(pid + 5, 1, ts + kFps10, true, kFrameSize, pid + 3, pid + 4);
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(1000));
+ // Frame pid+3 is decodable but too late.
+ // In superframe pid+4 is decodable, but frame pid+5 is not.
+ // Incorrect implementation might skip pid+2 frame and output undecodable
+ // pid+5 instead.
+ ExtractFrame();
+ ExtractFrame();
+ CheckFrame(1, pid + 3, 1);
+ CheckFrame(2, pid + 4, 1);
+}
+
+TEST_F(TestFrameBuffer2, StopWhileWaitingForFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ ExtractFrame(10);
+ buffer_->Stop();
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ EXPECT_THAT(frames_, IsEmpty());
+
+ // A new frame request should exit immediately and return no new frame.
+ ExtractFrame(0);
+ EXPECT_THAT(frames_, IsEmpty());
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc
new file mode 100644
index 0000000000..7ca59f779a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/frame_dependencies_calculator.h"
+
+#include <stdint.h>
+
+#include <iterator>
+#include <set>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/inlined_vector.h"
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+absl::InlinedVector<int64_t, 5> FrameDependenciesCalculator::FromBuffersUsage(
+ int64_t frame_id,
+ rtc::ArrayView<const CodecBufferUsage> buffers_usage) {
+ absl::InlinedVector<int64_t, 5> dependencies;
+ RTC_DCHECK_GT(buffers_usage.size(), 0);
+ for (const CodecBufferUsage& buffer_usage : buffers_usage) {
+ RTC_CHECK_GE(buffer_usage.id, 0);
+ if (buffers_.size() <= static_cast<size_t>(buffer_usage.id)) {
+ buffers_.resize(buffer_usage.id + 1);
+ }
+ }
+ std::set<int64_t> direct_depenendencies;
+ std::set<int64_t> indirect_depenendencies;
+
+ for (const CodecBufferUsage& buffer_usage : buffers_usage) {
+ if (!buffer_usage.referenced) {
+ continue;
+ }
+ const BufferUsage& buffer = buffers_[buffer_usage.id];
+ if (buffer.frame_id == absl::nullopt) {
+ RTC_LOG(LS_ERROR) << "Odd configuration: frame " << frame_id
+ << " references buffer #" << buffer_usage.id
+ << " that was never updated.";
+ continue;
+ }
+ direct_depenendencies.insert(*buffer.frame_id);
+ indirect_depenendencies.insert(buffer.dependencies.begin(),
+ buffer.dependencies.end());
+ }
+ // Reduce references: if frame #3 depends on frame #2 and #1, and frame #2
+ // depends on frame #1, then frame #3 needs to depend just on frame #2.
+ // Though this set diff removes only 1 level of indirection, it seems
+ // enough for all currently used structures.
+ absl::c_set_difference(direct_depenendencies, indirect_depenendencies,
+ std::back_inserter(dependencies));
+
+ // Update buffers.
+ for (const CodecBufferUsage& buffer_usage : buffers_usage) {
+ if (!buffer_usage.updated) {
+ continue;
+ }
+ BufferUsage& buffer = buffers_[buffer_usage.id];
+ buffer.frame_id = frame_id;
+ buffer.dependencies.assign(direct_depenendencies.begin(),
+ direct_depenendencies.end());
+ }
+
+ return dependencies;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h
new file mode 100644
index 0000000000..2c4a8502e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_DEPENDENCIES_CALCULATOR_H_
+#define MODULES_VIDEO_CODING_FRAME_DEPENDENCIES_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+
+namespace webrtc {
+
+// This class is thread compatible.
+class FrameDependenciesCalculator {
+ public:
+ FrameDependenciesCalculator() = default;
+ FrameDependenciesCalculator(const FrameDependenciesCalculator&) = default;
+ FrameDependenciesCalculator& operator=(const FrameDependenciesCalculator&) =
+ default;
+
+ // Calculates frame dependencies based on previous encoder buffer usage.
+ absl::InlinedVector<int64_t, 5> FromBuffersUsage(
+ int64_t frame_id,
+ rtc::ArrayView<const CodecBufferUsage> buffers_usage);
+
+ private:
+ struct BufferUsage {
+ absl::optional<int64_t> frame_id;
+ absl::InlinedVector<int64_t, 4> dependencies;
+ };
+
+ absl::InlinedVector<BufferUsage, 4> buffers_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_DEPENDENCIES_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build
new file mode 100644
index 0000000000..b39c49bdad
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_dependencies_calculator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc
new file mode 100644
index 0000000000..a09650401a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_dependencies_calculator.h"
+
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+constexpr CodecBufferUsage ReferenceAndUpdate(int id) {
+ return CodecBufferUsage(id, /*referenced=*/true, /*updated=*/true);
+}
+constexpr CodecBufferUsage Reference(int id) {
+ return CodecBufferUsage(id, /*referenced=*/true, /*updated=*/false);
+}
+constexpr CodecBufferUsage Update(int id) {
+ return CodecBufferUsage(id, /*referenced=*/false, /*updated=*/true);
+}
+
+TEST(FrameDependenciesCalculatorTest, SingleLayer) {
+ CodecBufferUsage pattern[] = {ReferenceAndUpdate(0)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern),
+ ElementsAre(3));
+}
+
+TEST(FrameDependenciesCalculatorTest, TwoTemporalLayers) {
+ // Shortened 4-frame pattern:
+ // T1: 2---4 6---8 ...
+ // / / / /
+ // T0: 1---3---5---7 ...
+ CodecBufferUsage pattern0[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern1[] = {Reference(0), Update(1)};
+ CodecBufferUsage pattern2[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern3[] = {Reference(0), Reference(1)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern2),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern3),
+ UnorderedElementsAre(2, 3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0),
+ ElementsAre(3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1),
+ ElementsAre(5));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/7, pattern2),
+ ElementsAre(5));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/8, pattern3),
+ UnorderedElementsAre(6, 7));
+}
+
+TEST(FrameDependenciesCalculatorTest, ThreeTemporalLayers4FramePattern) {
+ // T2: 2---4 6---8 ...
+ // / / / /
+ // T1: | 3 | 7 ...
+ // /_/ /_/
+ // T0: 1-------5----- ...
+ CodecBufferUsage pattern0[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern1[] = {Reference(0), Update(2)};
+ CodecBufferUsage pattern2[] = {Reference(0), Update(1)};
+ CodecBufferUsage pattern3[] = {Reference(0), Reference(1), Reference(2)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern2),
+ ElementsAre(1));
+ // Note that frame#4 references buffer#0 that is updated by frame#1,
+ // yet there is no direct dependency from frame#4 to frame#1.
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern3),
+ UnorderedElementsAre(2, 3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1),
+ ElementsAre(5));
+}
+
+TEST(FrameDependenciesCalculatorTest, SimulcastWith2Layers) {
+ // S1: 2---4---6- ...
+ //
+ // S0: 1---3---5- ...
+ CodecBufferUsage pattern0[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern1[] = {ReferenceAndUpdate(1)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern0),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern1),
+ ElementsAre(2));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0),
+ ElementsAre(3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1),
+ ElementsAre(4));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers.cc b/third_party/libwebrtc/modules/video_coding/frame_helpers.cc
new file mode 100644
index 0000000000..08b47ef547
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_helpers.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+bool FrameHasBadRenderTiming(Timestamp render_time,
+ Timestamp now,
+ TimeDelta target_video_delay) {
+ // Zero render time means render immediately.
+ if (render_time.IsZero()) {
+ return false;
+ }
+ if (render_time < Timestamp::Zero()) {
+ return true;
+ }
+ constexpr TimeDelta kMaxVideoDelay = TimeDelta::Millis(10000);
+ TimeDelta frame_delay = (render_time - now).Abs();
+ if (frame_delay > kMaxVideoDelay) {
+ RTC_LOG(LS_WARNING)
+ << "A frame about to be decoded is out of the configured "
+ "delay bounds ("
+ << frame_delay.ms() << " > " << kMaxVideoDelay.ms()
+ << "). Resetting the video jitter buffer.";
+ return true;
+ }
+ if (target_video_delay > kMaxVideoDelay) {
+ RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
+ << kMaxVideoDelay.ms() << " ms.";
+ return true;
+ }
+ return false;
+}
+
+std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames) {
+ RTC_DCHECK(!frames.empty());
+
+ if (frames.size() == 1) {
+ return std::move(frames[0]);
+ }
+
+ size_t total_length = 0;
+ for (const auto& frame : frames) {
+ total_length += frame->size();
+ }
+ const EncodedFrame& last_frame = *frames.back();
+ std::unique_ptr<EncodedFrame> first_frame = std::move(frames[0]);
+ auto encoded_image_buffer = EncodedImageBuffer::Create(total_length);
+ uint8_t* buffer = encoded_image_buffer->data();
+ first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0),
+ first_frame->size());
+ memcpy(buffer, first_frame->data(), first_frame->size());
+ buffer += first_frame->size();
+
+ // Spatial index of combined frame is set equal to spatial index of its top
+ // spatial layer.
+ first_frame->SetSpatialIndex(last_frame.SpatialIndex().value_or(0));
+
+ first_frame->video_timing_mutable()->network2_timestamp_ms =
+ last_frame.video_timing().network2_timestamp_ms;
+ first_frame->video_timing_mutable()->receive_finish_ms =
+ last_frame.video_timing().receive_finish_ms;
+
+ // Append all remaining frames to the first one.
+ for (size_t i = 1; i < frames.size(); ++i) {
+ // Let |next_frame| fall out of scope so it is deleted after copying.
+ std::unique_ptr<EncodedFrame> next_frame = std::move(frames[i]);
+ first_frame->SetSpatialLayerFrameSize(
+ next_frame->SpatialIndex().value_or(0), next_frame->size());
+ memcpy(buffer, next_frame->data(), next_frame->size());
+ buffer += next_frame->size();
+ }
+ first_frame->SetEncodedData(encoded_image_buffer);
+ return first_frame;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers.h b/third_party/libwebrtc/modules/video_coding/frame_helpers.h
new file mode 100644
index 0000000000..b6d7b0f144
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_HELPERS_H_
+#define MODULES_VIDEO_CODING_FRAME_HELPERS_H_
+
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
+#include "api/video/encoded_frame.h"
+
+namespace webrtc {
+
+bool FrameHasBadRenderTiming(Timestamp render_time,
+ Timestamp now,
+ TimeDelta target_video_delay);
+
+std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_HELPERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build
new file mode 100644
index 0000000000..34db2964e2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/frame_helpers.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_helpers_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/frame_object.cc b/third_party/libwebrtc/modules/video_coding/frame_object.cc
new file mode 100644
index 0000000000..d226dcd013
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_object.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_object.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_timing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+RtpFrameObject::RtpFrameObject(
+ uint16_t first_seq_num,
+ uint16_t last_seq_num,
+ bool markerBit,
+ int times_nacked,
+ int64_t first_packet_received_time,
+ int64_t last_packet_received_time,
+ uint32_t rtp_timestamp,
+ int64_t ntp_time_ms,
+ const VideoSendTiming& timing,
+ uint8_t payload_type,
+ VideoCodecType codec,
+ VideoRotation rotation,
+ VideoContentType content_type,
+ const RTPVideoHeader& video_header,
+ const absl::optional<webrtc::ColorSpace>& color_space,
+ RtpPacketInfos packet_infos,
+ rtc::scoped_refptr<EncodedImageBuffer> image_buffer)
+ : image_buffer_(image_buffer),
+ first_seq_num_(first_seq_num),
+ last_seq_num_(last_seq_num),
+ last_packet_received_time_(last_packet_received_time),
+ times_nacked_(times_nacked) {
+ rtp_video_header_ = video_header;
+
+ // EncodedFrame members
+ codec_type_ = codec;
+
+ // TODO(philipel): Remove when encoded image is replaced by EncodedFrame.
+ // VCMEncodedFrame members
+ CopyCodecSpecific(&rtp_video_header_);
+ _payloadType = payload_type;
+ SetTimestamp(rtp_timestamp);
+ ntp_time_ms_ = ntp_time_ms;
+ _frameType = rtp_video_header_.frame_type;
+
+ // Setting frame's playout delays to the same values
+ // as of the first packet's.
+ SetPlayoutDelay(rtp_video_header_.playout_delay);
+
+ SetEncodedData(image_buffer_);
+ _encodedWidth = rtp_video_header_.width;
+ _encodedHeight = rtp_video_header_.height;
+
+ // EncodedFrame members
+ SetPacketInfos(std::move(packet_infos));
+
+ rotation_ = rotation;
+ SetColorSpace(color_space);
+ SetVideoFrameTrackingId(rtp_video_header_.video_frame_tracking_id);
+ content_type_ = content_type;
+ if (timing.flags != VideoSendTiming::kInvalid) {
+ // ntp_time_ms_ may be -1 if not estimated yet. This is not a problem,
+ // as this will be dealt with at the time of reporting.
+ timing_.encode_start_ms = ntp_time_ms_ + timing.encode_start_delta_ms;
+ timing_.encode_finish_ms = ntp_time_ms_ + timing.encode_finish_delta_ms;
+ timing_.packetization_finish_ms =
+ ntp_time_ms_ + timing.packetization_finish_delta_ms;
+ timing_.pacer_exit_ms = ntp_time_ms_ + timing.pacer_exit_delta_ms;
+ timing_.network_timestamp_ms =
+ ntp_time_ms_ + timing.network_timestamp_delta_ms;
+ timing_.network2_timestamp_ms =
+ ntp_time_ms_ + timing.network2_timestamp_delta_ms;
+ }
+ timing_.receive_start_ms = first_packet_received_time;
+ timing_.receive_finish_ms = last_packet_received_time;
+ timing_.flags = timing.flags;
+ is_last_spatial_layer = markerBit;
+}
+
+RtpFrameObject::~RtpFrameObject() {
+}
+
+uint16_t RtpFrameObject::first_seq_num() const {
+ return first_seq_num_;
+}
+
+uint16_t RtpFrameObject::last_seq_num() const {
+ return last_seq_num_;
+}
+
+int RtpFrameObject::times_nacked() const {
+ return times_nacked_;
+}
+
+VideoFrameType RtpFrameObject::frame_type() const {
+ return rtp_video_header_.frame_type;
+}
+
+VideoCodecType RtpFrameObject::codec_type() const {
+ return codec_type_;
+}
+
+int64_t RtpFrameObject::ReceivedTime() const {
+ return last_packet_received_time_;
+}
+
+int64_t RtpFrameObject::RenderTime() const {
+ return _renderTimeMs;
+}
+
+bool RtpFrameObject::delayed_by_retransmission() const {
+ return times_nacked() > 0;
+}
+
+const RTPVideoHeader& RtpFrameObject::GetRtpVideoHeader() const {
+ return rtp_video_header_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_object.h b/third_party/libwebrtc/modules/video_coding/frame_object.h
new file mode 100644
index 0000000000..c6f069f241
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_object.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_OBJECT_H_
+#define MODULES_VIDEO_CODING_FRAME_OBJECT_H_
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_frame.h"
+
+namespace webrtc {
+
+class RtpFrameObject : public EncodedFrame {
+ public:
+ RtpFrameObject(uint16_t first_seq_num,
+ uint16_t last_seq_num,
+ bool markerBit,
+ int times_nacked,
+ int64_t first_packet_received_time,
+ int64_t last_packet_received_time,
+ uint32_t rtp_timestamp,
+ int64_t ntp_time_ms,
+ const VideoSendTiming& timing,
+ uint8_t payload_type,
+ VideoCodecType codec,
+ VideoRotation rotation,
+ VideoContentType content_type,
+ const RTPVideoHeader& video_header,
+ const absl::optional<webrtc::ColorSpace>& color_space,
+ RtpPacketInfos packet_infos,
+ rtc::scoped_refptr<EncodedImageBuffer> image_buffer);
+
+ ~RtpFrameObject() override;
+ uint16_t first_seq_num() const;
+ uint16_t last_seq_num() const;
+ int times_nacked() const;
+ VideoFrameType frame_type() const;
+ VideoCodecType codec_type() const;
+ int64_t ReceivedTime() const override;
+ int64_t RenderTime() const override;
+ bool delayed_by_retransmission() const override;
+ const RTPVideoHeader& GetRtpVideoHeader() const;
+
+ uint8_t* mutable_data() { return image_buffer_->data(); }
+
+ private:
+ // Reference for mutable access.
+ rtc::scoped_refptr<EncodedImageBuffer> image_buffer_;
+ RTPVideoHeader rtp_video_header_;
+ VideoCodecType codec_type_;
+ uint16_t first_seq_num_;
+ uint16_t last_seq_num_;
+ int64_t last_packet_received_time_;
+
+ // Equal to times nacked of the packet with the highet times nacked
+ // belonging to this frame.
+ int times_nacked_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_OBJECT_H_
diff --git a/third_party/libwebrtc/modules/video_coding/g3doc/index.md b/third_party/libwebrtc/modules/video_coding/g3doc/index.md
new file mode 100644
index 0000000000..2e5695b715
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/g3doc/index.md
@@ -0,0 +1,177 @@
+<?% config.freshness.owner = 'brandtr' %?>
+<?% config.freshness.reviewed = '2021-04-15' %?>
+
+# Video coding in WebRTC
+
+## Introduction to layered video coding
+
+[Video coding][video-coding-wiki] is the process of encoding a stream of
+uncompressed video frames into a compressed bitstream, whose bitrate is lower
+than that of the original stream.
+
+### Block-based hybrid video coding
+
+All video codecs in WebRTC are based on the block-based hybrid video coding
+paradigm, which entails prediction of the original video frame using either
+[information from previously encoded frames][motion-compensation-wiki] or
+information from previously encoded portions of the current frame, subtraction
+of the prediction from the original video, and
+[transform][transform-coding-wiki] and [quantization][quantization-wiki] of the
+resulting difference. The output of the quantization process, quantized
+transform coefficients, is losslessly [entropy coded][entropy-coding-wiki] along
+with other encoder parameters (e.g., those related to the prediction process)
+and then a reconstruction is constructed by inverse quantizing and inverse
+transforming the quantized transform coefficients and adding the result to the
+prediction. Finally, in-loop filtering is applied and the resulting
+reconstruction is stored as a reference frame to be used to develop predictions
+for future frames.
+
+### Frame types
+
+When an encoded frame depends on previously encoded frames (i.e., it has one or
+more inter-frame dependencies), the prior frames must be available at the
+receiver before the current frame can be decoded. In order for a receiver to
+start decoding an encoded bitstream, a frame which has no prior dependencies is
+required. Such a frame is called a "key frame". For real-time-communications
+encoding, key frames typically compress less efficiently than "delta frames"
+(i.e., frames whose predictions are derived from previously encoded frames).
+
+### Single-layer coding
+
+In 1:1 calls, the encoded bitstream has a single recipient. Using end-to-end
+bandwidth estimation, the target bitrate can thus be well tailored for the
+intended recipient. The number of key frames can be kept to a minimum and the
+compressability of the stream can be maximized. One way of achiving this is by
+using "single-layer coding", where each delta frame only depends on the frame
+that was most recently encoded.
+
+### Scalable video coding
+
+In multiway conferences, on the other hand, the encoded bitstream has multiple
+recipients each of whom may have different downlink bandwidths. In order to
+tailor the encoded bitstreams to a heterogeneous network of receivers,
+[scalable video coding][svc-wiki] can be used. The idea is to introduce
+structure into the dependency graph of the encoded bitstream, such that _layers_ of
+the full stream can be decoded using only available lower layers. This structure
+allows for a [selective forwarding unit][sfu-webrtc-glossary] to discard upper
+layers of the of the bitstream in order to achieve the intended downlink
+bandwidth.
+
+There are multiple types of scalability:
+
+* _Temporal scalability_ are layers whose framerate (and bitrate) is lower than that of the upper layer(s)
+* _Spatial scalability_ are layers whose resolution (and bitrate) is lower than that of the upper layer(s)
+* _Quality scalability_ are layers whose bitrate is lower than that of the upper layer(s)
+
+WebRTC supports temporal scalability for `VP8`, `VP9` and `AV1`, and spatial
+scalability for `VP9` and `AV1`.
+
+### Simulcast
+
+Simulcast is another approach for multiway conferencing, where multiple
+_independent_ bitstreams are produced by the encoder.
+
+In cases where multiple encodings of the same source are required (e.g., uplink
+transmission in a multiway call), spatial scalability with inter-layer
+prediction generally offers superior coding efficiency compared with simulcast.
+When a single encoding is required (e.g., downlink transmission in any call),
+simulcast generally provides better coding efficiency for the upper spatial
+layers. The `K-SVC` concept, where spatial inter-layer dependencies are only
+used to encode key frames, for which inter-layer prediction is typically
+significantly more effective than it is for delta frames, can be seen as a
+compromise between full spatial scalability and simulcast.
+
+## Overview of implementation in `modules/video_coding`
+
+Given the general introduction to video coding above, we now describe some
+specifics of the [`modules/video_coding`][modules-video-coding] folder in WebRTC.
+
+### Built-in software codecs in [`modules/video_coding/codecs`][modules-video-coding-codecs]
+
+This folder contains WebRTC-specific classes that wrap software codec
+implementations for different video coding standards:
+
+* [libaom][libaom-src] for [AV1][av1-spec]
+* [libvpx][libvpx-src] for [VP8][vp8-spec] and [VP9][vp9-spec]
+* [OpenH264][openh264-src] for [H.264 constrained baseline profile][h264-spec]
+
+Users of the library can also inject their own codecs, using the
+[VideoEncoderFactory][video-encoder-factory-interface] and
+[VideoDecoderFactory][video-decoder-factory-interface] interfaces. This is how
+platform-supported codecs, such as hardware backed codecs, are implemented.
+
+### Video codec test framework in [`modules/video_coding/codecs/test`][modules-video-coding-codecs-test]
+
+This folder contains a test framework that can be used to evaluate video quality
+performance of different video codec implementations.
+
+### SVC helper classes in [`modules/video_coding/svc`][modules-video-coding-svc]
+
+* [`ScalabilityStructure*`][scalabilitystructure] - different
+ [standardized scalability structures][scalability-structure-spec]
+* [`ScalableVideoController`][scalablevideocontroller] - provides instructions to the video encoder how
+ to create a scalable stream
+* [`SvcRateAllocator`][svcrateallocator] - bitrate allocation to different spatial and temporal
+ layers
+
+### Utility classes in [`modules/video_coding/utility`][modules-video-coding-utility]
+
+* [`FrameDropper`][framedropper] - drops incoming frames when encoder systematically
+ overshoots its target bitrate
+* [`FramerateController`][frameratecontroller] - drops incoming frames to achieve a target framerate
+* [`QpParser`][qpparser] - parses the quantization parameter from a bitstream
+* [`QualityScaler`][qualityscaler] - signals when an encoder generates encoded frames whose
+ quantization parameter is outside the window of acceptable values
+* [`SimulcastRateAllocator`][simulcastrateallocator] - bitrate allocation to simulcast layers
+
+### General helper classes in [`modules/video_coding`][modules-video-coding]
+
+* [`FecControllerDefault`][feccontrollerdefault] - provides a default implementation for rate
+ allocation to [forward error correction][fec-wiki]
+* [`VideoCodecInitializer`][videocodecinitializer] - converts between different encoder configuration
+ structs
+
+### Receiver buffer classes in [`modules/video_coding`][modules-video-coding]
+
+* [`PacketBuffer`][packetbuffer] - (re-)combines RTP packets into frames
+* [`RtpFrameReferenceFinder`][rtpframereferencefinder] - determines dependencies between frames based on information in the RTP header, payload header and RTP extensions
+* [`FrameBuffer`][framebuffer] - order frames based on their dependencies to be fed to the decoder
+
+[video-coding-wiki]: https://en.wikipedia.org/wiki/Video_coding_format
+[motion-compensation-wiki]: https://en.wikipedia.org/wiki/Motion_compensation
+[transform-coding-wiki]: https://en.wikipedia.org/wiki/Transform_coding
+[motion-vector-wiki]: https://en.wikipedia.org/wiki/Motion_vector
+[mpeg-wiki]: https://en.wikipedia.org/wiki/Moving_Picture_Experts_Group
+[svc-wiki]: https://en.wikipedia.org/wiki/Scalable_Video_Coding
+[sfu-webrtc-glossary]: https://webrtcglossary.com/sfu/
+[libvpx-src]: https://chromium.googlesource.com/webm/libvpx/
+[libaom-src]: https://aomedia.googlesource.com/aom/
+[openh264-src]: https://github.com/cisco/openh264
+[vp8-spec]: https://tools.ietf.org/html/rfc6386
+[vp9-spec]: https://storage.googleapis.com/downloads.webmproject.org/docs/vp9/vp9-bitstream-specification-v0.6-20160331-draft.pdf
+[av1-spec]: https://aomediacodec.github.io/av1-spec/
+[h264-spec]: https://www.itu.int/rec/T-REC-H.264-201906-I/en
+[video-encoder-factory-interface]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video_codecs/video_encoder_factory.h;l=27;drc=afadfb24a5e608da6ae102b20b0add53a083dcf3
+[video-decoder-factory-interface]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video_codecs/video_decoder_factory.h;l=27;drc=49c293f03d8f593aa3aca282577fcb14daa63207
+[scalability-structure-spec]: https://w3c.github.io/webrtc-svc/#scalabilitymodes*
+[fec-wiki]: https://en.wikipedia.org/wiki/Error_correction_code#Forward_error_correction
+[entropy-coding-wiki]: https://en.wikipedia.org/wiki/Entropy_encoding
+[modules-video-coding]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/
+[modules-video-coding-codecs]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/codecs/
+[modules-video-coding-codecs-test]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/codecs/test/
+[modules-video-coding-svc]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/
+[modules-video-coding-utility]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/
+[scalabilitystructure]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/create_scalability_structure.h?q=CreateScalabilityStructure
+[scalablevideocontroller]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/scalable_video_controller.h?q=ScalableVideoController
+[svcrateallocator]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/svc_rate_allocator.h?q=SvcRateAllocator
+[framedropper]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/frame_dropper.h?q=FrameDropper
+[frameratecontroller]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/framerate_controller.h?q=FramerateController
+[qpparser]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/qp_parser.h?q=QpParser
+[qualityscaler]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/quality_scaler.h?q=QualityScaler
+[simulcastrateallocator]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/simulcast_rate_allocator.h?q=SimulcastRateAllocator
+[feccontrollerdefault]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/fec_controller_default.h?q=FecControllerDefault
+[videocodecinitializer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/include/video_codec_initializer.h?q=VideoCodecInitializer
+[packetbuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/packet_buffer.h?q=PacketBuffer
+[rtpframereferencefinder]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/rtp_frame_reference_finder.h?q=RtpFrameReferenceFinder
+[framebuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/frame_buffer2.h?q=FrameBuffer
+[quantization-wiki]: https://en.wikipedia.org/wiki/Quantization_(signal_processing)
diff --git a/third_party/libwebrtc/modules/video_coding/generic_decoder.cc b/third_party/libwebrtc/modules/video_coding/generic_decoder.cc
new file mode 100644
index 0000000000..0b8fc147a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/generic_decoder.cc
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/generic_decoder.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
+#include "api/video/video_timing.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace {
+
+// Changed from 10 to 30 in Mozilla Bug 989944: Increase decode
+// timestamp map to handle delayed decode on 8x10. The map is
+// now a deque (as of libwebrtc upstream commit 1c51ec4d74).
+constexpr size_t kDecoderFrameMemoryLength = 30;
+
+}
+
+VCMDecodedFrameCallback::VCMDecodedFrameCallback(
+ VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials)
+ : _clock(clock), _timing(timing) {
+ ntp_offset_ =
+ _clock->CurrentNtpInMilliseconds() - _clock->TimeInMilliseconds();
+}
+
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {}
+
+void VCMDecodedFrameCallback::SetUserReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ RTC_DCHECK(construction_thread_.IsCurrent());
+ RTC_DCHECK((!_receiveCallback && receiveCallback) ||
+ (_receiveCallback && !receiveCallback));
+ _receiveCallback = receiveCallback;
+}
+
+VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() {
+ // Called on the decode thread via VCMCodecDataBase::GetDecoder.
+ // The callback must always have been set before this happens.
+ RTC_DCHECK(_receiveCallback);
+ return _receiveCallback;
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
+ // This function may be called on the decode TaskQueue, but may also be called
+ // on an OS provided queue such as on iOS (see e.g. b/153465112).
+ return Decoded(decodedImage, -1);
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
+ int64_t decode_time_ms) {
+ Decoded(decodedImage,
+ decode_time_ms >= 0 ? absl::optional<int32_t>(decode_time_ms)
+ : absl::nullopt,
+ absl::nullopt);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+std::pair<absl::optional<FrameInfo>, size_t>
+VCMDecodedFrameCallback::FindFrameInfo(uint32_t rtp_timestamp) {
+ absl::optional<FrameInfo> frame_info;
+
+ auto it = absl::c_find_if(frame_infos_, [rtp_timestamp](const auto& entry) {
+ return entry.rtp_timestamp == rtp_timestamp ||
+ IsNewerTimestamp(entry.rtp_timestamp, rtp_timestamp);
+ });
+ size_t dropped_frames = std::distance(frame_infos_.begin(), it);
+
+ if (it != frame_infos_.end() && it->rtp_timestamp == rtp_timestamp) {
+ // Frame was found and should also be removed from the queue.
+ frame_info = std::move(*it);
+ ++it;
+ }
+
+ frame_infos_.erase(frame_infos_.begin(), it);
+ return std::make_pair(std::move(frame_info), dropped_frames);
+}
+
+void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ RTC_DCHECK(_receiveCallback) << "Callback must not be null at this point";
+ TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
+ "timestamp", decodedImage.timestamp());
+ // TODO(holmer): We should improve this so that we can handle multiple
+ // callbacks from one call to Decode().
+ absl::optional<FrameInfo> frame_info;
+ int timestamp_map_size = 0;
+ int dropped_frames = 0;
+ {
+ MutexLock lock(&lock_);
+ std::tie(frame_info, dropped_frames) =
+ FindFrameInfo(decodedImage.timestamp());
+ timestamp_map_size = frame_infos_.size();
+ }
+ if (dropped_frames > 0) {
+ _receiveCallback->OnDroppedFrames(dropped_frames);
+ }
+
+ if (!frame_info) {
+ RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
+ "frame with timestamp "
+ << decodedImage.timestamp();
+ return;
+ }
+
+ decodedImage.set_ntp_time_ms(frame_info->ntp_time_ms);
+ decodedImage.set_packet_infos(frame_info->packet_infos);
+ decodedImage.set_rotation(frame_info->rotation);
+ VideoFrame::RenderParameters render_parameters = _timing->RenderParameters();
+ if (render_parameters.max_composition_delay_in_frames) {
+ // Subtract frames that are in flight.
+ render_parameters.max_composition_delay_in_frames =
+ std::max(0, *render_parameters.max_composition_delay_in_frames -
+ timestamp_map_size);
+ }
+ decodedImage.set_render_parameters(render_parameters);
+
+ RTC_DCHECK(frame_info->decode_start);
+ const Timestamp now = _clock->CurrentTime();
+ const TimeDelta decode_time = decode_time_ms
+ ? TimeDelta::Millis(*decode_time_ms)
+ : now - *frame_info->decode_start;
+ _timing->StopDecodeTimer(decode_time, now);
+ decodedImage.set_processing_time(
+ {*frame_info->decode_start, *frame_info->decode_start + decode_time});
+
+ // Report timing information.
+ TimingFrameInfo timing_frame_info;
+ if (frame_info->timing.flags != VideoSendTiming::kInvalid) {
+ int64_t capture_time_ms = decodedImage.ntp_time_ms() - ntp_offset_;
+ // Convert remote timestamps to local time from ntp timestamps.
+ frame_info->timing.encode_start_ms -= ntp_offset_;
+ frame_info->timing.encode_finish_ms -= ntp_offset_;
+ frame_info->timing.packetization_finish_ms -= ntp_offset_;
+ frame_info->timing.pacer_exit_ms -= ntp_offset_;
+ frame_info->timing.network_timestamp_ms -= ntp_offset_;
+ frame_info->timing.network2_timestamp_ms -= ntp_offset_;
+
+ int64_t sender_delta_ms = 0;
+ if (decodedImage.ntp_time_ms() < 0) {
+ // Sender clock is not estimated yet. Make sure that sender times are all
+ // negative to indicate that. Yet they still should be relatively correct.
+ sender_delta_ms =
+ std::max({capture_time_ms, frame_info->timing.encode_start_ms,
+ frame_info->timing.encode_finish_ms,
+ frame_info->timing.packetization_finish_ms,
+ frame_info->timing.pacer_exit_ms,
+ frame_info->timing.network_timestamp_ms,
+ frame_info->timing.network2_timestamp_ms}) +
+ 1;
+ }
+
+ timing_frame_info.capture_time_ms = capture_time_ms - sender_delta_ms;
+ timing_frame_info.encode_start_ms =
+ frame_info->timing.encode_start_ms - sender_delta_ms;
+ timing_frame_info.encode_finish_ms =
+ frame_info->timing.encode_finish_ms - sender_delta_ms;
+ timing_frame_info.packetization_finish_ms =
+ frame_info->timing.packetization_finish_ms - sender_delta_ms;
+ timing_frame_info.pacer_exit_ms =
+ frame_info->timing.pacer_exit_ms - sender_delta_ms;
+ timing_frame_info.network_timestamp_ms =
+ frame_info->timing.network_timestamp_ms - sender_delta_ms;
+ timing_frame_info.network2_timestamp_ms =
+ frame_info->timing.network2_timestamp_ms - sender_delta_ms;
+ }
+
+ timing_frame_info.flags = frame_info->timing.flags;
+ timing_frame_info.decode_start_ms = frame_info->decode_start->ms();
+ timing_frame_info.decode_finish_ms = now.ms();
+ timing_frame_info.render_time_ms =
+ frame_info->render_time ? frame_info->render_time->ms() : -1;
+ timing_frame_info.rtp_timestamp = decodedImage.timestamp();
+ timing_frame_info.receive_start_ms = frame_info->timing.receive_start_ms;
+ timing_frame_info.receive_finish_ms = frame_info->timing.receive_finish_ms;
+ _timing->SetTimingFrameInfo(timing_frame_info);
+
+ decodedImage.set_timestamp_us(
+ frame_info->render_time ? frame_info->render_time->us() : -1);
+ _receiveCallback->FrameToRender(decodedImage, qp, decode_time,
+ frame_info->content_type);
+}
+
+void VCMDecodedFrameCallback::OnDecoderImplementationName(
+ const char* implementation_name) {
+ _receiveCallback->OnDecoderImplementationName(implementation_name);
+}
+
+void VCMDecodedFrameCallback::Map(FrameInfo frameInfo) {
+ int dropped_frames = 0;
+ {
+ MutexLock lock(&lock_);
+ int initial_size = frame_infos_.size();
+ if (initial_size == kDecoderFrameMemoryLength) {
+ frame_infos_.pop_front();
+ dropped_frames = 1;
+ }
+ frame_infos_.push_back(std::move(frameInfo));
+ // If no frame is dropped, the new size should be `initial_size` + 1
+ }
+ if (dropped_frames > 0) {
+ _receiveCallback->OnDroppedFrames(dropped_frames);
+ }
+}
+
+void VCMDecodedFrameCallback::ClearTimestampMap() {
+ int dropped_frames = 0;
+ {
+ MutexLock lock(&lock_);
+ dropped_frames = frame_infos_.size();
+ frame_infos_.clear();
+ }
+ if (dropped_frames > 0) {
+ _receiveCallback->OnDroppedFrames(dropped_frames);
+ }
+}
+
+VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder)
+ : _callback(NULL),
+ decoder_(decoder),
+ _last_keyframe_content_type(VideoContentType::UNSPECIFIED) {
+ RTC_DCHECK(decoder_);
+}
+
+VCMGenericDecoder::~VCMGenericDecoder() {
+ decoder_->Release();
+}
+
+bool VCMGenericDecoder::Configure(const VideoDecoder::Settings& settings) {
+ TRACE_EVENT0("webrtc", "VCMGenericDecoder::Configure");
+
+ bool ok = decoder_->Configure(settings);
+ decoder_info_ = decoder_->GetDecoderInfo();
+ RTC_LOG(LS_INFO) << "Decoder implementation: " << decoder_info_.ToString();
+ if (_callback) {
+ _callback->OnDecoderImplementationName(
+ decoder_info_.implementation_name.c_str());
+ }
+ return ok;
+}
+
+int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) {
+ TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
+ frame.Timestamp());
+ FrameInfo frame_info;
+ frame_info.rtp_timestamp = frame.Timestamp();
+ frame_info.decode_start = now;
+ frame_info.render_time =
+ frame.RenderTimeMs() >= 0
+ ? absl::make_optional(Timestamp::Millis(frame.RenderTimeMs()))
+ : absl::nullopt;
+ frame_info.rotation = frame.rotation();
+ frame_info.timing = frame.video_timing();
+ frame_info.ntp_time_ms = frame.EncodedImage().ntp_time_ms_;
+ frame_info.packet_infos = frame.PacketInfos();
+
+ // Set correctly only for key frames. Thus, use latest key frame
+ // content type. If the corresponding key frame was lost, decode will fail
+ // and content type will be ignored.
+ if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
+ frame_info.content_type = frame.contentType();
+ _last_keyframe_content_type = frame.contentType();
+ } else {
+ frame_info.content_type = _last_keyframe_content_type;
+ }
+ _callback->Map(std::move(frame_info));
+
+ int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
+ frame.RenderTimeMs());
+ VideoDecoder::DecoderInfo decoder_info = decoder_->GetDecoderInfo();
+ if (decoder_info != decoder_info_) {
+ RTC_LOG(LS_INFO) << "Changed decoder implementation to: "
+ << decoder_info.ToString();
+ decoder_info_ = decoder_info;
+ _callback->OnDecoderImplementationName(
+ decoder_info.implementation_name.empty()
+ ? "unknown"
+ : decoder_info.implementation_name.c_str());
+ }
+ if (ret < WEBRTC_VIDEO_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
+ << frame.Timestamp() << ", error code: " << ret;
+ _callback->ClearTimestampMap();
+ } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT) {
+ // No output.
+ _callback->ClearTimestampMap();
+ }
+ return ret;
+}
+
+int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(
+ VCMDecodedFrameCallback* callback) {
+ _callback = callback;
+ int32_t ret = decoder_->RegisterDecodeCompleteCallback(callback);
+ if (callback && !decoder_info_.implementation_name.empty()) {
+ callback->OnDecoderImplementationName(
+ decoder_info_.implementation_name.c_str());
+ }
+ return ret;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/generic_decoder.h b/third_party/libwebrtc/modules/video_coding/generic_decoder.h
new file mode 100644
index 0000000000..d7e1850abb
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/generic_decoder.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+#define MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+
+#include <cstdint>
+#include <deque>
+#include <string>
+#include <utility>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class VCMReceiveCallback;
+
+struct FrameInfo {
+ FrameInfo() = default;
+ FrameInfo(const FrameInfo&) = delete;
+ FrameInfo& operator=(const FrameInfo&) = delete;
+ FrameInfo(FrameInfo&&) = default;
+ FrameInfo& operator=(FrameInfo&&) = default;
+
+ uint32_t rtp_timestamp;
+ // This is likely not optional, but some inputs seem to sometimes be negative.
+ // TODO(bugs.webrtc.org/13756): See if this can be replaced with Timestamp
+ // once all inputs to this field use Timestamp instead of an integer.
+ absl::optional<Timestamp> render_time;
+ absl::optional<Timestamp> decode_start;
+ VideoRotation rotation;
+ VideoContentType content_type;
+ EncodedImage::Timing timing;
+ int64_t ntp_time_ms;
+ RtpPacketInfos packet_infos;
+ // ColorSpace is not stored here, as it might be modified by decoders.
+};
+
+class VCMDecodedFrameCallback : public DecodedImageCallback {
+ public:
+ VCMDecodedFrameCallback(VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials);
+ ~VCMDecodedFrameCallback() override;
+ void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
+ VCMReceiveCallback* UserReceiveCallback();
+
+ int32_t Decoded(VideoFrame& decodedImage) override;
+ int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
+ void Decoded(VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override;
+
+ void OnDecoderImplementationName(const char* implementation_name);
+
+ void Map(FrameInfo frameInfo);
+ void ClearTimestampMap();
+
+ private:
+ std::pair<absl::optional<FrameInfo>, size_t> FindFrameInfo(
+ uint32_t rtp_timestamp) RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ SequenceChecker construction_thread_;
+ Clock* const _clock;
+ // This callback must be set before the decoder thread starts running
+ // and must only be unset when external threads (e.g decoder thread)
+ // have been stopped. Due to that, the variable should regarded as const
+ // while there are more than one threads involved, it must be set
+ // from the same thread, and therfore a lock is not required to access it.
+ VCMReceiveCallback* _receiveCallback = nullptr;
+ VCMTiming* _timing;
+ Mutex lock_;
+ std::deque<FrameInfo> frame_infos_ RTC_GUARDED_BY(lock_);
+ int64_t ntp_offset_;
+};
+
+class VCMGenericDecoder {
+ public:
+ explicit VCMGenericDecoder(VideoDecoder* decoder);
+ ~VCMGenericDecoder();
+
+ /**
+ * Initialize the decoder with the information from the `settings`
+ */
+ bool Configure(const VideoDecoder::Settings& settings);
+
+ /**
+ * Decode to a raw I420 frame,
+ *
+ * inputVideoBuffer reference to encoded video frame
+ */
+ int32_t Decode(const VCMEncodedFrame& inputFrame, Timestamp now);
+
+ /**
+ * Set decode callback. Deregistering while decoding is illegal.
+ */
+ int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
+
+ bool IsSameDecoder(VideoDecoder* decoder) const {
+ return decoder_ == decoder;
+ }
+
+ private:
+ VCMDecodedFrameCallback* _callback = nullptr;
+ VideoDecoder* const decoder_;
+ VideoContentType _last_keyframe_content_type;
+ VideoDecoder::DecoderInfo decoder_info_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc b/third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc
new file mode 100644
index 0000000000..68bc307e65
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/generic_decoder.h"
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/rtp_packet_infos.h"
+#include "api/video_codecs/video_decoder.h"
+#include "common_video/test/utilities.h"
+#include "modules/video_coding/timing/timing.h"
+#include "system_wrappers/include/clock.h"
+#include "test/fake_decoder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class ReceiveCallback : public VCMReceiveCallback {
+ public:
+ int32_t FrameToRender(VideoFrame& frame,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) override {
+ frames_.push_back(frame);
+ return 0;
+ }
+
+ absl::optional<VideoFrame> PopLastFrame() {
+ if (frames_.empty())
+ return absl::nullopt;
+ auto ret = frames_.front();
+ frames_.pop_back();
+ return ret;
+ }
+
+ rtc::ArrayView<const VideoFrame> GetAllFrames() const { return frames_; }
+
+ void OnDroppedFrames(uint32_t frames_dropped) {
+ frames_dropped_ += frames_dropped;
+ }
+
+ uint32_t frames_dropped() const { return frames_dropped_; }
+
+ private:
+ std::vector<VideoFrame> frames_;
+ uint32_t frames_dropped_ = 0;
+};
+
+class GenericDecoderTest : public ::testing::Test {
+ protected:
+ GenericDecoderTest()
+ : time_controller_(Timestamp::Zero()),
+ clock_(time_controller_.GetClock()),
+ timing_(time_controller_.GetClock(), field_trials_),
+ decoder_(time_controller_.GetTaskQueueFactory()),
+ vcm_callback_(&timing_, time_controller_.GetClock(), field_trials_),
+ generic_decoder_(&decoder_) {}
+
+ void SetUp() override {
+ generic_decoder_.RegisterDecodeCompleteCallback(&vcm_callback_);
+ vcm_callback_.SetUserReceiveCallback(&user_callback_);
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(kVideoCodecVP8);
+ settings.set_max_render_resolution({10, 10});
+ settings.set_number_of_cores(4);
+ generic_decoder_.Configure(settings);
+ }
+
+ GlobalSimulatedTimeController time_controller_;
+ Clock* const clock_;
+ test::ScopedKeyValueConfig field_trials_;
+ VCMTiming timing_;
+ webrtc::test::FakeDecoder decoder_;
+ VCMDecodedFrameCallback vcm_callback_;
+ VCMGenericDecoder generic_decoder_;
+ ReceiveCallback user_callback_;
+};
+
+TEST_F(GenericDecoderTest, PassesPacketInfos) {
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ VCMEncodedFrame encoded_frame;
+ encoded_frame.SetPacketInfos(packet_infos);
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_EQ(decoded_frame->packet_infos().size(), 3U);
+}
+
+TEST_F(GenericDecoderTest, FrameDroppedIfTooManyFramesInFlight) {
+ constexpr int kMaxFramesInFlight = 10;
+ decoder_.SetDelayedDecoding(10);
+ for (int i = 0; i < kMaxFramesInFlight + 1; ++i) {
+ VCMEncodedFrame encoded_frame;
+ encoded_frame.SetTimestamp(90000 * i);
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+
+ auto frames = user_callback_.GetAllFrames();
+ ASSERT_EQ(10U, frames.size());
+ // Expect that the first frame was dropped since all decodes released at the
+ // same time and the oldest frame info is the first one dropped.
+ EXPECT_EQ(frames[0].timestamp(), 90000u);
+ EXPECT_EQ(1u, user_callback_.frames_dropped());
+}
+
+TEST_F(GenericDecoderTest, PassesPacketInfosForDelayedDecoders) {
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ decoder_.SetDelayedDecoding(100);
+
+ {
+ // Ensure the original frame is destroyed before the decoding is completed.
+ VCMEncodedFrame encoded_frame;
+ encoded_frame.SetPacketInfos(packet_infos);
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(200));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_EQ(decoded_frame->packet_infos().size(), 3U);
+}
+
+TEST_F(GenericDecoderTest, MaxCompositionDelayNotSetByDefault) {
+ VCMEncodedFrame encoded_frame;
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_THAT(
+ decoded_frame->render_parameters().max_composition_delay_in_frames,
+ testing::Eq(absl::nullopt));
+}
+
+TEST_F(GenericDecoderTest, MaxCompositionDelayActivatedByPlayoutDelay) {
+ VCMEncodedFrame encoded_frame;
+ // VideoReceiveStream2 would set MaxCompositionDelayInFrames if playout delay
+ // is specified as X,Y, where X=0, Y>0.
+ constexpr int kMaxCompositionDelayInFrames = 3; // ~50 ms at 60 fps.
+ timing_.SetMaxCompositionDelayInFrames(
+ absl::make_optional(kMaxCompositionDelayInFrames));
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_THAT(
+ decoded_frame->render_parameters().max_composition_delay_in_frames,
+ testing::Optional(kMaxCompositionDelayInFrames));
+}
+
+TEST_F(GenericDecoderTest, IsLowLatencyStreamFalseByDefault) {
+ VCMEncodedFrame encoded_frame;
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_FALSE(decoded_frame->render_parameters().use_low_latency_rendering);
+}
+
+TEST_F(GenericDecoderTest, IsLowLatencyStreamActivatedByPlayoutDelay) {
+ VCMEncodedFrame encoded_frame;
+ const VideoPlayoutDelay kPlayoutDelay = {0, 50};
+ timing_.set_min_playout_delay(TimeDelta::Millis(kPlayoutDelay.min_ms));
+ timing_.set_max_playout_delay(TimeDelta::Millis(kPlayoutDelay.max_ms));
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_TRUE(decoded_frame->render_parameters().use_low_latency_rendering);
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc
new file mode 100644
index 0000000000..6096665bda
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_packet_buffer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/video/video_frame_type.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+namespace {
+int64_t EuclideanMod(int64_t n, int64_t div) {
+ RTC_DCHECK_GT(div, 0);
+ return (n %= div) < 0 ? n + div : n;
+}
+
+rtc::ArrayView<const NaluInfo> GetNaluInfos(
+ const RTPVideoHeaderH264& h264_header) {
+ if (h264_header.nalus_length > kMaxNalusPerPacket) {
+ return {};
+ }
+
+ return rtc::MakeArrayView(h264_header.nalus, h264_header.nalus_length);
+}
+
+bool IsFirstPacketOfFragment(const RTPVideoHeaderH264& h264_header) {
+ return h264_header.nalus_length > 0;
+}
+
+bool BeginningOfIdr(const H264PacketBuffer::Packet& packet) {
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ const bool contains_idr_nalu =
+ absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
+ return nalu_info.type == H264::NaluType::kIdr;
+ });
+ switch (h264_header.packetization_type) {
+ case kH264StapA:
+ case kH264SingleNalu: {
+ return contains_idr_nalu;
+ }
+ case kH264FuA: {
+ return contains_idr_nalu && IsFirstPacketOfFragment(h264_header);
+ }
+ }
+}
+
+bool HasSps(const H264PacketBuffer::Packet& packet) {
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ return absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
+ return nalu_info.type == H264::NaluType::kSps;
+ });
+}
+
+// TODO(bugs.webrtc.org/13157): Update the H264 depacketizer so we don't have to
+// fiddle with the payload at this point.
+rtc::CopyOnWriteBuffer FixVideoPayload(rtc::ArrayView<const uint8_t> payload,
+ const RTPVideoHeader& video_header) {
+ constexpr uint8_t kStartCode[] = {0, 0, 0, 1};
+
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(video_header.video_type_header);
+
+ rtc::CopyOnWriteBuffer result;
+ switch (h264_header.packetization_type) {
+ case kH264StapA: {
+ const uint8_t* payload_end = payload.data() + payload.size();
+ const uint8_t* nalu_ptr = payload.data() + 1;
+ while (nalu_ptr < payload_end - 1) {
+ // The first two bytes describe the length of the segment, where a
+ // segment is the nalu type plus nalu payload.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ if (nalu_ptr + segment_length <= payload_end) {
+ result.AppendData(kStartCode);
+ result.AppendData(nalu_ptr, segment_length);
+ }
+ nalu_ptr += segment_length;
+ }
+ return result;
+ }
+
+ case kH264FuA: {
+ if (IsFirstPacketOfFragment(h264_header)) {
+ result.AppendData(kStartCode);
+ }
+ result.AppendData(payload);
+ return result;
+ }
+
+ case kH264SingleNalu: {
+ result.AppendData(kStartCode);
+ result.AppendData(payload);
+ return result;
+ }
+ }
+
+ RTC_DCHECK_NOTREACHED();
+ return result;
+}
+
+} // namespace
+
+H264PacketBuffer::H264PacketBuffer(bool idr_only_keyframes_allowed)
+ : idr_only_keyframes_allowed_(idr_only_keyframes_allowed) {}
+
+H264PacketBuffer::InsertResult H264PacketBuffer::InsertPacket(
+ std::unique_ptr<Packet> packet) {
+ RTC_DCHECK(packet->video_header.codec == kVideoCodecH264);
+
+ InsertResult result;
+ if (!absl::holds_alternative<RTPVideoHeaderH264>(
+ packet->video_header.video_type_header)) {
+ return result;
+ }
+
+ int64_t unwrapped_seq_num = seq_num_unwrapper_.Unwrap(packet->seq_num);
+ auto& packet_slot = GetPacket(unwrapped_seq_num);
+ if (packet_slot != nullptr &&
+ AheadOrAt(packet_slot->timestamp, packet->timestamp)) {
+ // The incoming `packet` is old or a duplicate.
+ return result;
+ } else {
+ packet_slot = std::move(packet);
+ }
+
+ result.packets = FindFrames(unwrapped_seq_num);
+ return result;
+}
+
+std::unique_ptr<H264PacketBuffer::Packet>& H264PacketBuffer::GetPacket(
+ int64_t unwrapped_seq_num) {
+ return buffer_[EuclideanMod(unwrapped_seq_num, kBufferSize)];
+}
+
+bool H264PacketBuffer::BeginningOfStream(
+ const H264PacketBuffer::Packet& packet) const {
+ return HasSps(packet) ||
+ (idr_only_keyframes_allowed_ && BeginningOfIdr(packet));
+}
+
+std::vector<std::unique_ptr<H264PacketBuffer::Packet>>
+H264PacketBuffer::FindFrames(int64_t unwrapped_seq_num) {
+ std::vector<std::unique_ptr<Packet>> found_frames;
+
+ Packet* packet = GetPacket(unwrapped_seq_num).get();
+ RTC_CHECK(packet != nullptr);
+
+ // Check if the packet is continuous or the beginning of a new coded video
+ // sequence.
+ if (unwrapped_seq_num - 1 != last_continuous_unwrapped_seq_num_) {
+ if (unwrapped_seq_num <= last_continuous_unwrapped_seq_num_ ||
+ !BeginningOfStream(*packet)) {
+ return found_frames;
+ }
+
+ last_continuous_unwrapped_seq_num_ = unwrapped_seq_num;
+ }
+
+ for (int64_t seq_num = unwrapped_seq_num;
+ seq_num < unwrapped_seq_num + kBufferSize;) {
+ RTC_DCHECK_GE(seq_num, *last_continuous_unwrapped_seq_num_);
+
+ // Packets that were never assembled into a completed frame will stay in
+ // the 'buffer_'. Check that the `packet` sequence number match the expected
+ // unwrapped sequence number.
+ if (static_cast<uint16_t>(seq_num) != packet->seq_num) {
+ return found_frames;
+ }
+
+ last_continuous_unwrapped_seq_num_ = seq_num;
+ // Last packet of the frame, try to assemble the frame.
+ if (packet->marker_bit) {
+ uint32_t rtp_timestamp = packet->timestamp;
+
+ // Iterate backwards to find where the frame starts.
+ for (int64_t seq_num_start = seq_num;
+ seq_num_start > seq_num - kBufferSize; --seq_num_start) {
+ auto& prev_packet = GetPacket(seq_num_start - 1);
+
+ if (prev_packet == nullptr || prev_packet->timestamp != rtp_timestamp) {
+ if (MaybeAssembleFrame(seq_num_start, seq_num, found_frames)) {
+ // Frame was assembled, continue to look for more frames.
+ break;
+ } else {
+ // Frame was not assembled, no subsequent frame will be continuous.
+ return found_frames;
+ }
+ }
+ }
+ }
+
+ seq_num++;
+ packet = GetPacket(seq_num).get();
+ if (packet == nullptr) {
+ return found_frames;
+ }
+ }
+
+ return found_frames;
+}
+
+bool H264PacketBuffer::MaybeAssembleFrame(
+ int64_t start_seq_num_unwrapped,
+ int64_t end_sequence_number_unwrapped,
+ std::vector<std::unique_ptr<Packet>>& frames) {
+ bool has_sps = false;
+ bool has_pps = false;
+ bool has_idr = false;
+
+ int width = -1;
+ int height = -1;
+
+ for (int64_t seq_num = start_seq_num_unwrapped;
+ seq_num <= end_sequence_number_unwrapped; ++seq_num) {
+ const auto& packet = GetPacket(seq_num);
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
+ for (const auto& nalu : GetNaluInfos(h264_header)) {
+ has_idr |= nalu.type == H264::NaluType::kIdr;
+ has_sps |= nalu.type == H264::NaluType::kSps;
+ has_pps |= nalu.type == H264::NaluType::kPps;
+ }
+
+ width = std::max<int>(packet->video_header.width, width);
+ height = std::max<int>(packet->video_header.height, height);
+ }
+
+ if (has_idr) {
+ if (!idr_only_keyframes_allowed_ && (!has_sps || !has_pps)) {
+ return false;
+ }
+ }
+
+ for (int64_t seq_num = start_seq_num_unwrapped;
+ seq_num <= end_sequence_number_unwrapped; ++seq_num) {
+ auto& packet = GetPacket(seq_num);
+
+ packet->video_header.is_first_packet_in_frame =
+ (seq_num == start_seq_num_unwrapped);
+ packet->video_header.is_last_packet_in_frame =
+ (seq_num == end_sequence_number_unwrapped);
+
+ if (packet->video_header.is_first_packet_in_frame) {
+ if (width > 0 && height > 0) {
+ packet->video_header.width = width;
+ packet->video_header.height = height;
+ }
+
+ packet->video_header.frame_type = has_idr
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ }
+
+ packet->video_payload =
+ FixVideoPayload(packet->video_payload, packet->video_header);
+
+ frames.push_back(std::move(packet));
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h
new file mode 100644
index 0000000000..1671fddb23
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
+#define MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+
+class H264PacketBuffer {
+ public:
+ // The H264PacketBuffer does the same job as the PacketBuffer but for H264
+ // only. To make it fit in with surronding code the PacketBuffer input/output
+ // classes are used.
+ using Packet = video_coding::PacketBuffer::Packet;
+ using InsertResult = video_coding::PacketBuffer::InsertResult;
+
+ explicit H264PacketBuffer(bool idr_only_keyframes_allowed);
+
+ ABSL_MUST_USE_RESULT InsertResult
+ InsertPacket(std::unique_ptr<Packet> packet);
+
+ private:
+ static constexpr int kBufferSize = 2048;
+
+ std::unique_ptr<Packet>& GetPacket(int64_t unwrapped_seq_num);
+ bool BeginningOfStream(const Packet& packet) const;
+ std::vector<std::unique_ptr<Packet>> FindFrames(int64_t unwrapped_seq_num);
+ bool MaybeAssembleFrame(int64_t start_seq_num_unwrapped,
+ int64_t end_sequence_number_unwrapped,
+ std::vector<std::unique_ptr<Packet>>& packets);
+
+ const bool idr_only_keyframes_allowed_;
+ std::array<std::unique_ptr<Packet>, kBufferSize> buffer_;
+ absl::optional<int64_t> last_continuous_unwrapped_seq_num_;
+ SeqNumUnwrapper<uint16_t> seq_num_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc
new file mode 100644
index 0000000000..4f2331da28
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/h264_packet_buffer.h"
+
+#include <cstring>
+#include <limits>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/video/render_resolution.h"
+#include "common_video/h264/h264_common.h"
+#include "rtc_base/system/unused.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+using H264::NaluType::kAud;
+using H264::NaluType::kFuA;
+using H264::NaluType::kIdr;
+using H264::NaluType::kPps;
+using H264::NaluType::kSlice;
+using H264::NaluType::kSps;
+using H264::NaluType::kStapA;
+
+constexpr int kBufferSize = 2048;
+
+std::vector<uint8_t> StartCode() {
+ return {0, 0, 0, 1};
+}
+
+NaluInfo MakeNaluInfo(uint8_t type) {
+ NaluInfo res;
+ res.type = type;
+ res.sps_id = -1;
+ res.pps_id = -1;
+ return res;
+}
+
+class Packet {
+ public:
+ explicit Packet(H264PacketizationTypes type);
+
+ Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Slice(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Aud();
+ Packet& Marker();
+ Packet& AsFirstFragment();
+ Packet& Time(uint32_t rtp_timestamp);
+ Packet& SeqNum(uint16_t rtp_seq_num);
+
+ std::unique_ptr<H264PacketBuffer::Packet> Build();
+
+ private:
+ rtc::CopyOnWriteBuffer BuildFuaPayload() const;
+ rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const;
+ rtc::CopyOnWriteBuffer BuildStapAPayload() const;
+
+ RTPVideoHeaderH264& H264Header() {
+ return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
+ }
+ const RTPVideoHeaderH264& H264Header() const {
+ return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
+ }
+
+ H264PacketizationTypes type_;
+ RTPVideoHeader video_header_;
+ bool first_fragment_ = false;
+ bool marker_bit_ = false;
+ uint32_t rtp_timestamp_ = 0;
+ uint16_t rtp_seq_num_ = 0;
+ std::vector<std::vector<uint8_t>> nalu_payloads_;
+};
+
+Packet::Packet(H264PacketizationTypes type) : type_(type) {
+ video_header_.video_type_header.emplace<RTPVideoHeaderH264>();
+}
+
+Packet& Packet::Idr(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kIdr);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Slice(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSlice);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Sps(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
+ video_header_.width = resolution.Width();
+ video_header_.height = resolution.Height();
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Pps(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kPps);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Aud() {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kAud);
+ nalu_payloads_.push_back({});
+ return *this;
+}
+
+Packet& Packet::Marker() {
+ marker_bit_ = true;
+ return *this;
+}
+
+Packet& Packet::AsFirstFragment() {
+ first_fragment_ = true;
+ return *this;
+}
+
+Packet& Packet::Time(uint32_t rtp_timestamp) {
+ rtp_timestamp_ = rtp_timestamp;
+ return *this;
+}
+
+Packet& Packet::SeqNum(uint16_t rtp_seq_num) {
+ rtp_seq_num_ = rtp_seq_num;
+ return *this;
+}
+
+std::unique_ptr<H264PacketBuffer::Packet> Packet::Build() {
+ auto res = std::make_unique<H264PacketBuffer::Packet>();
+
+ auto& h264_header = H264Header();
+ switch (type_) {
+ case kH264FuA: {
+ RTC_CHECK_EQ(h264_header.nalus_length, 1);
+ res->video_payload = BuildFuaPayload();
+ break;
+ }
+ case kH264SingleNalu: {
+ RTC_CHECK_EQ(h264_header.nalus_length, 1);
+ res->video_payload = BuildSingleNaluPayload();
+ break;
+ }
+ case kH264StapA: {
+ RTC_CHECK_GT(h264_header.nalus_length, 1);
+ RTC_CHECK_LE(h264_header.nalus_length, kMaxNalusPerPacket);
+ res->video_payload = BuildStapAPayload();
+ break;
+ }
+ }
+
+ if (type_ == kH264FuA && !first_fragment_) {
+ h264_header.nalus_length = 0;
+ }
+
+ h264_header.packetization_type = type_;
+ res->marker_bit = marker_bit_;
+ res->video_header = video_header_;
+ res->timestamp = rtp_timestamp_;
+ res->seq_num = rtp_seq_num_;
+ res->video_header.codec = kVideoCodecH264;
+
+ return res;
+}
+
+rtc::CopyOnWriteBuffer Packet::BuildFuaPayload() const {
+ return rtc::CopyOnWriteBuffer(nalu_payloads_[0]);
+}
+
+rtc::CopyOnWriteBuffer Packet::BuildSingleNaluPayload() const {
+ rtc::CopyOnWriteBuffer res;
+ auto& h264_header = H264Header();
+ res.AppendData(&h264_header.nalus[0].type, 1);
+ res.AppendData(nalu_payloads_[0]);
+ return res;
+}
+
+rtc::CopyOnWriteBuffer Packet::BuildStapAPayload() const {
+ rtc::CopyOnWriteBuffer res;
+
+ const uint8_t indicator = H264::NaluType::kStapA;
+ res.AppendData(&indicator, 1);
+
+ auto& h264_header = H264Header();
+ for (size_t i = 0; i < h264_header.nalus_length; ++i) {
+ // The two first bytes indicates the nalu segment size.
+ uint8_t length_as_array[2] = {
+ 0, static_cast<uint8_t>(nalu_payloads_[i].size() + 1)};
+ res.AppendData(length_as_array);
+
+ res.AppendData(&h264_header.nalus[i].type, 1);
+ res.AppendData(nalu_payloads_[i]);
+ }
+ return res;
+}
+
+rtc::ArrayView<const uint8_t> PacketPayload(
+ const std::unique_ptr<H264PacketBuffer::Packet>& packet) {
+ return packet->video_payload;
+}
+
+std::vector<uint8_t> FlatVector(
+ const std::vector<std::vector<uint8_t>>& elems) {
+ std::vector<uint8_t> res;
+ for (const auto& elem : elems) {
+ res.insert(res.end(), elem.begin(), elem.end());
+ }
+ return res;
+}
+
+TEST(H264PacketBufferTest, IdrIsKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, IdrIsNotKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, IdrIsKeyframeFuaRequiresFirstFragmet) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true);
+
+ // Not marked as the first fragment
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ // Marked as first fragment
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .AsFirstFragment()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(3).Time(1).Marker().Build())
+ .packets,
+ SizeIs(2));
+}
+
+TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeSingleNalus) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Pps().SeqNum(1).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(2).Time(0).Marker().Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeSingleNalus) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Pps().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeSingleNalus) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeStapA) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeStapA) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264StapA).Pps().Idr().SeqNum(0).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeStapA) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264StapA).Sps().Idr().SeqNum(2).Time(2).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(3)
+ .Time(3)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, InsertingSpsPpsLastCompletesKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(2).Time(1).Marker().Build()));
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264StapA).Sps().Pps().SeqNum(1).Time(1).Build())
+ .packets,
+ SizeIs(2));
+}
+
+TEST(H264PacketBufferTest, InsertingMidFuaCompletesFrame) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(1).Time(1).AsFirstFragment().Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(3).Time(1).Marker().Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(1).Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, SeqNumJumpDoesNotCompleteFrame) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Slice().SeqNum(1).Time(1).Build())
+ .packets,
+ IsEmpty());
+
+ // Add `kBufferSize` to make the index of the sequence number wrap and end up
+ // where the packet with sequence number 2 would have ended up.
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(2 + kBufferSize)
+ .Time(3)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, OldFramesAreNotCompletedAfterBufferWrap) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264SingleNalu)
+ .Slice()
+ .SeqNum(1)
+ .Time(1)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ // New keyframe, preceedes packet with sequence number 1 in the buffer.
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, OldPacketsDontBlockNewPackets) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 1)
+ .Time(kBufferSize + 1)
+ .AsFirstFragment()
+ .Build()));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 3)
+ .Time(kBufferSize + 1)
+ .Marker()
+ .Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 2)
+ .Time(kBufferSize + 1)
+ .Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, OldPacketDoesntCompleteFrame) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 3)
+ .Time(kBufferSize + 1)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(2).Time(2).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 1)
+ .Time(kBufferSize + 1)
+ .AsFirstFragment()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, FrameBoundariesAreSet) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ auto key = packet_buffer.InsertPacket(
+ Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build());
+
+ ASSERT_THAT(key.packets, SizeIs(1));
+ EXPECT_TRUE(key.packets[0]->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(key.packets[0]->video_header.is_last_packet_in_frame);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(3).Time(2).Build()));
+ auto delta = packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(4).Time(2).Marker().Build());
+
+ ASSERT_THAT(delta.packets, SizeIs(3));
+ EXPECT_TRUE(delta.packets[0]->video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(delta.packets[0]->video_header.is_last_packet_in_frame);
+
+ EXPECT_FALSE(delta.packets[1]->video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(delta.packets[1]->video_header.is_last_packet_in_frame);
+
+ EXPECT_FALSE(delta.packets[2]->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(delta.packets[2]->video_header.is_last_packet_in_frame);
+}
+
+TEST(H264PacketBufferTest, ResolutionSetOnFirstPacket) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
+ auto res = packet_buffer.InsertPacket(Packet(kH264StapA)
+ .SpsWithResolution({320, 240})
+ .Pps()
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .Marker()
+ .Build());
+
+ ASSERT_THAT(res.packets, SizeIs(2));
+ EXPECT_THAT(res.packets[0]->video_header.width, Eq(320));
+ EXPECT_THAT(res.packets[0]->video_header.height, Eq(240));
+}
+
+TEST(H264PacketBufferTest, KeyframeAndDeltaFrameSetOnFirstPacket) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
+ auto key = packet_buffer.InsertPacket(
+ Packet(kH264StapA).Sps().Pps().Idr().SeqNum(2).Time(1).Marker().Build());
+
+ auto delta = packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Slice().SeqNum(3).Time(2).Marker().Build());
+
+ ASSERT_THAT(key.packets, SizeIs(2));
+ EXPECT_THAT(key.packets[0]->video_header.frame_type,
+ Eq(VideoFrameType::kVideoFrameKey));
+ ASSERT_THAT(delta.packets, SizeIs(1));
+ EXPECT_THAT(delta.packets[0]->video_header.frame_type,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(H264PacketBufferTest, RtpSeqNumWrap) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264StapA).Sps().Pps().SeqNum(0xffff).Time(0).Build()));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, StapAFixedBitstream) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ auto packets = packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps({1, 2, 3})
+ .Pps({4, 5, 6})
+ .Idr({7, 8, 9})
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(1));
+ EXPECT_THAT(PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector({StartCode(),
+ {kSps, 1, 2, 3},
+ StartCode(),
+ {kPps, 4, 5, 6},
+ StartCode(),
+ {kIdr, 7, 8, 9}})));
+}
+
+TEST(H264PacketBufferTest, SingleNaluFixedBitstream) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Sps({1, 2, 3}).SeqNum(0).Time(0).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Pps({4, 5, 6}).SeqNum(1).Time(0).Build()));
+ auto packets = packet_buffer
+ .InsertPacket(Packet(kH264SingleNalu)
+ .Idr({7, 8, 9})
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector({StartCode(), {kSps, 1, 2, 3}})));
+ EXPECT_THAT(PacketPayload(packets[1]),
+ ElementsAreArray(FlatVector({StartCode(), {kPps, 4, 5, 6}})));
+ EXPECT_THAT(PacketPayload(packets[2]),
+ ElementsAreArray(FlatVector({StartCode(), {kIdr, 7, 8, 9}})));
+}
+
+TEST(H264PacketBufferTest, StapaAndFuaFixedBitstream) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264StapA)
+ .Sps({1, 2, 3})
+ .Pps({4, 5, 6})
+ .SeqNum(0)
+ .Time(0)
+ .Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
+ .Idr({8, 8, 8})
+ .SeqNum(1)
+ .Time(0)
+ .AsFirstFragment()
+ .Build()));
+ auto packets = packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Idr({9, 9, 9})
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(
+ PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector(
+ {StartCode(), {kSps, 1, 2, 3}, StartCode(), {kPps, 4, 5, 6}})));
+ EXPECT_THAT(PacketPayload(packets[1]),
+ ElementsAreArray(FlatVector({StartCode(), {8, 8, 8}})));
+ // Third is a continuation of second, so only the payload is expected.
+ EXPECT_THAT(PacketPayload(packets[2]),
+ ElementsAreArray(FlatVector({{9, 9, 9}})));
+}
+
+TEST(H264PacketBufferTest, FullPacketBufferDoesNotBlockKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ for (int i = 0; i < kBufferSize; ++i) {
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Slice().SeqNum(i).Time(0).Build())
+ .packets,
+ IsEmpty());
+ }
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(1)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, TooManyNalusInPacket) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ std::unique_ptr<H264PacketBuffer::Packet> packet(
+ Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build());
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
+ h264_header.nalus_length = kMaxNalusPerPacket + 1;
+
+ EXPECT_THAT(packet_buffer.InsertPacket(std::move(packet)).packets, IsEmpty());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc
new file mode 100644
index 0000000000..a64f8885da
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/third_party/base64/base64.h"
+
+namespace {
+
+bool DecodeAndConvert(const std::string& base64, std::vector<uint8_t>* binary) {
+ return rtc::Base64::DecodeFromArray(base64.data(), base64.size(),
+ rtc::Base64::DO_STRICT, binary, nullptr);
+}
+} // namespace
+
+namespace webrtc {
+
+bool H264SpropParameterSets::DecodeSprop(const std::string& sprop) {
+ size_t separator_pos = sprop.find(',');
+ RTC_LOG(LS_INFO) << "Parsing sprop \"" << sprop << "\"";
+ if ((separator_pos <= 0) || (separator_pos >= sprop.length() - 1)) {
+ RTC_LOG(LS_WARNING) << "Invalid seperator position " << separator_pos
+ << " *" << sprop << "*";
+ return false;
+ }
+ std::string sps_str = sprop.substr(0, separator_pos);
+ std::string pps_str = sprop.substr(separator_pos + 1, std::string::npos);
+ if (!DecodeAndConvert(sps_str, &sps_)) {
+ RTC_LOG(LS_WARNING) << "Failed to decode sprop/sps *" << sprop << "*";
+ return false;
+ }
+ if (!DecodeAndConvert(pps_str, &pps_)) {
+ RTC_LOG(LS_WARNING) << "Failed to decode sprop/pps *" << sprop << "*";
+ return false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h
new file mode 100644
index 0000000000..8a32f31cc0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H264_SPROP_PARAMETER_SETS_H_
+#define MODULES_VIDEO_CODING_H264_SPROP_PARAMETER_SETS_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace webrtc {
+
+class H264SpropParameterSets {
+ public:
+ H264SpropParameterSets() {}
+
+ H264SpropParameterSets(const H264SpropParameterSets&) = delete;
+ H264SpropParameterSets& operator=(const H264SpropParameterSets&) = delete;
+
+ bool DecodeSprop(const std::string& sprop);
+ const std::vector<uint8_t>& sps_nalu() { return sps_; }
+ const std::vector<uint8_t>& pps_nalu() { return pps_; }
+
+ private:
+ std::vector<uint8_t> sps_;
+ std::vector<uint8_t> pps_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H264_SPROP_PARAMETER_SETS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
new file mode 100644
index 0000000000..ae263131a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class H264SpropParameterSetsTest : public ::testing::Test {
+ public:
+ H264SpropParameterSets h264_sprop;
+};
+
+TEST_F(H264SpropParameterSetsTest, Base64DecodeSprop) {
+ // Example sprop string from https://tools.ietf.org/html/rfc3984 .
+ EXPECT_TRUE(h264_sprop.DecodeSprop("Z0IACpZTBYmI,aMljiA=="));
+ static const std::vector<uint8_t> raw_sps{0x67, 0x42, 0x00, 0x0A, 0x96,
+ 0x53, 0x05, 0x89, 0x88};
+ static const std::vector<uint8_t> raw_pps{0x68, 0xC9, 0x63, 0x88};
+ EXPECT_EQ(raw_sps, h264_sprop.sps_nalu());
+ EXPECT_EQ(raw_pps, h264_sprop.pps_nalu());
+}
+
+TEST_F(H264SpropParameterSetsTest, InvalidData) {
+ EXPECT_FALSE(h264_sprop.DecodeSprop(","));
+ EXPECT_FALSE(h264_sprop.DecodeSprop(""));
+ EXPECT_FALSE(h264_sprop.DecodeSprop(",iA=="));
+ EXPECT_FALSE(h264_sprop.DecodeSprop("iA==,"));
+ EXPECT_TRUE(h264_sprop.DecodeSprop("iA==,iA=="));
+ EXPECT_FALSE(h264_sprop.DecodeSprop("--,--"));
+ EXPECT_FALSE(h264_sprop.DecodeSprop(",,"));
+ EXPECT_FALSE(h264_sprop.DecodeSprop("iA=="));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc
new file mode 100644
index 0000000000..0741a261e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/types/variant.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/h264/pps_parser.h"
+#include "common_video/h264/sps_parser.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace video_coding {
+
+namespace {
+const uint8_t start_code_h264[] = {0, 0, 0, 1};
+} // namespace
+
+H264SpsPpsTracker::H264SpsPpsTracker() = default;
+H264SpsPpsTracker::~H264SpsPpsTracker() = default;
+
+H264SpsPpsTracker::PpsInfo::PpsInfo() = default;
+H264SpsPpsTracker::PpsInfo::PpsInfo(PpsInfo&& rhs) = default;
+H264SpsPpsTracker::PpsInfo& H264SpsPpsTracker::PpsInfo::operator=(
+ PpsInfo&& rhs) = default;
+H264SpsPpsTracker::PpsInfo::~PpsInfo() = default;
+
+H264SpsPpsTracker::SpsInfo::SpsInfo() = default;
+H264SpsPpsTracker::SpsInfo::SpsInfo(SpsInfo&& rhs) = default;
+H264SpsPpsTracker::SpsInfo& H264SpsPpsTracker::SpsInfo::operator=(
+ SpsInfo&& rhs) = default;
+H264SpsPpsTracker::SpsInfo::~SpsInfo() = default;
+
+H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream(
+ rtc::ArrayView<const uint8_t> bitstream,
+ RTPVideoHeader* video_header) {
+ RTC_DCHECK(video_header);
+ RTC_DCHECK(video_header->codec == kVideoCodecH264);
+ RTC_DCHECK_GT(bitstream.size(), 0);
+
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+
+ bool append_sps_pps = false;
+ auto sps = sps_data_.end();
+ auto pps = pps_data_.end();
+
+ for (size_t i = 0; i < h264_header.nalus_length; ++i) {
+ const NaluInfo& nalu = h264_header.nalus[i];
+ switch (nalu.type) {
+ case H264::NaluType::kSps: {
+ SpsInfo& sps_info = sps_data_[nalu.sps_id];
+ sps_info.width = video_header->width;
+ sps_info.height = video_header->height;
+ break;
+ }
+ case H264::NaluType::kPps: {
+ pps_data_[nalu.pps_id].sps_id = nalu.sps_id;
+ break;
+ }
+ case H264::NaluType::kIdr: {
+ // If this is the first packet of an IDR, make sure we have the required
+ // SPS/PPS and also calculate how much extra space we need in the buffer
+ // to prepend the SPS/PPS to the bitstream with start codes.
+ if (video_header->is_first_packet_in_frame) {
+ if (nalu.pps_id == -1) {
+ RTC_LOG(LS_WARNING) << "No PPS id in IDR nalu.";
+ return {kRequestKeyframe};
+ }
+
+ pps = pps_data_.find(nalu.pps_id);
+ if (pps == pps_data_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "No PPS with id << " << nalu.pps_id << " received";
+ return {kRequestKeyframe};
+ }
+
+ sps = sps_data_.find(pps->second.sps_id);
+ if (sps == sps_data_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "No SPS with id << " << pps->second.sps_id << " received";
+ return {kRequestKeyframe};
+ }
+
+ // Since the first packet of every keyframe should have its width and
+ // height set we set it here in the case of it being supplied out of
+ // band.
+ video_header->width = sps->second.width;
+ video_header->height = sps->second.height;
+
+ // If the SPS/PPS was supplied out of band then we will have saved
+ // the actual bitstream in `data`.
+ if (sps->second.data && pps->second.data) {
+ RTC_DCHECK_GT(sps->second.size, 0);
+ RTC_DCHECK_GT(pps->second.size, 0);
+ append_sps_pps = true;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ RTC_CHECK(!append_sps_pps ||
+ (sps != sps_data_.end() && pps != pps_data_.end()));
+
+ // Calculate how much space we need for the rest of the bitstream.
+ size_t required_size = 0;
+
+ if (append_sps_pps) {
+ required_size += sps->second.size + sizeof(start_code_h264);
+ required_size += pps->second.size + sizeof(start_code_h264);
+ }
+
+ if (h264_header.packetization_type == kH264StapA) {
+ const uint8_t* nalu_ptr = bitstream.data() + 1;
+ while (nalu_ptr < bitstream.data() + bitstream.size() - 1) {
+ RTC_DCHECK(video_header->is_first_packet_in_frame);
+ required_size += sizeof(start_code_h264);
+
+ // The first two bytes describe the length of a segment.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ required_size += segment_length;
+ nalu_ptr += segment_length;
+ }
+ } else {
+ if (h264_header.nalus_length > 0) {
+ required_size += sizeof(start_code_h264);
+ }
+ required_size += bitstream.size();
+ }
+
+ // Then we copy to the new buffer.
+ H264SpsPpsTracker::FixedBitstream fixed;
+ fixed.bitstream.EnsureCapacity(required_size);
+
+ if (append_sps_pps) {
+ // Insert SPS.
+ fixed.bitstream.AppendData(start_code_h264);
+ fixed.bitstream.AppendData(sps->second.data.get(), sps->second.size);
+
+ // Insert PPS.
+ fixed.bitstream.AppendData(start_code_h264);
+ fixed.bitstream.AppendData(pps->second.data.get(), pps->second.size);
+
+ // Update codec header to reflect the newly added SPS and PPS.
+ NaluInfo sps_info;
+ sps_info.type = H264::NaluType::kSps;
+ sps_info.sps_id = sps->first;
+ sps_info.pps_id = -1;
+ NaluInfo pps_info;
+ pps_info.type = H264::NaluType::kPps;
+ pps_info.sps_id = sps->first;
+ pps_info.pps_id = pps->first;
+ if (h264_header.nalus_length + 2 <= kMaxNalusPerPacket) {
+ h264_header.nalus[h264_header.nalus_length++] = sps_info;
+ h264_header.nalus[h264_header.nalus_length++] = pps_info;
+ } else {
+ RTC_LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
+ "SPS/PPS provided out-of-band.";
+ }
+ }
+
+ // Copy the rest of the bitstream and insert start codes.
+ if (h264_header.packetization_type == kH264StapA) {
+ const uint8_t* nalu_ptr = bitstream.data() + 1;
+ while (nalu_ptr < bitstream.data() + bitstream.size() - 1) {
+ fixed.bitstream.AppendData(start_code_h264);
+
+ // The first two bytes describe the length of a segment.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ size_t copy_end = nalu_ptr - bitstream.data() + segment_length;
+ if (copy_end > bitstream.size()) {
+ return {kDrop};
+ }
+
+ fixed.bitstream.AppendData(nalu_ptr, segment_length);
+ nalu_ptr += segment_length;
+ }
+ } else {
+ if (h264_header.nalus_length > 0) {
+ fixed.bitstream.AppendData(start_code_h264);
+ }
+ fixed.bitstream.AppendData(bitstream.data(), bitstream.size());
+ }
+
+ fixed.action = kInsert;
+ return fixed;
+}
+
+void H264SpsPpsTracker::InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
+ const std::vector<uint8_t>& pps) {
+ constexpr size_t kNaluHeaderOffset = 1;
+ if (sps.size() < kNaluHeaderOffset) {
+ RTC_LOG(LS_WARNING) << "SPS size " << sps.size() << " is smaller than "
+ << kNaluHeaderOffset;
+ return;
+ }
+ if ((sps[0] & 0x1f) != H264::NaluType::kSps) {
+ RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
+ return;
+ }
+ if (pps.size() < kNaluHeaderOffset) {
+ RTC_LOG(LS_WARNING) << "PPS size " << pps.size() << " is smaller than "
+ << kNaluHeaderOffset;
+ return;
+ }
+ if ((pps[0] & 0x1f) != H264::NaluType::kPps) {
+ RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
+ return;
+ }
+ absl::optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
+ sps.data() + kNaluHeaderOffset, sps.size() - kNaluHeaderOffset);
+ absl::optional<PpsParser::PpsState> parsed_pps = PpsParser::ParsePps(
+ pps.data() + kNaluHeaderOffset, pps.size() - kNaluHeaderOffset);
+
+ if (!parsed_sps) {
+ RTC_LOG(LS_WARNING) << "Failed to parse SPS.";
+ }
+
+ if (!parsed_pps) {
+ RTC_LOG(LS_WARNING) << "Failed to parse PPS.";
+ }
+
+ if (!parsed_pps || !parsed_sps) {
+ return;
+ }
+
+ SpsInfo sps_info;
+ sps_info.size = sps.size();
+ sps_info.width = parsed_sps->width;
+ sps_info.height = parsed_sps->height;
+ uint8_t* sps_data = new uint8_t[sps_info.size];
+ memcpy(sps_data, sps.data(), sps_info.size);
+ sps_info.data.reset(sps_data);
+ sps_data_[parsed_sps->id] = std::move(sps_info);
+
+ PpsInfo pps_info;
+ pps_info.size = pps.size();
+ pps_info.sps_id = parsed_pps->sps_id;
+ uint8_t* pps_data = new uint8_t[pps_info.size];
+ memcpy(pps_data, pps.data(), pps_info.size);
+ pps_info.data.reset(pps_data);
+ pps_data_[parsed_pps->id] = std::move(pps_info);
+
+ RTC_LOG(LS_INFO) << "Inserted SPS id " << parsed_sps->id << " and PPS id "
+ << parsed_pps->id << " (referencing SPS "
+ << parsed_pps->sps_id << ")";
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h
new file mode 100644
index 0000000000..600e2ee397
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H264_SPS_PPS_TRACKER_H_
+#define MODULES_VIDEO_CODING_H264_SPS_PPS_TRACKER_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class H264SpsPpsTracker {
+ public:
+ enum PacketAction { kInsert, kDrop, kRequestKeyframe };
+ struct FixedBitstream {
+ PacketAction action;
+ rtc::CopyOnWriteBuffer bitstream;
+ };
+
+ H264SpsPpsTracker();
+ ~H264SpsPpsTracker();
+
+ // Returns fixed bitstream and modifies `video_header`.
+ FixedBitstream CopyAndFixBitstream(rtc::ArrayView<const uint8_t> bitstream,
+ RTPVideoHeader* video_header);
+
+ void InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
+ const std::vector<uint8_t>& pps);
+
+ private:
+ struct PpsInfo {
+ PpsInfo();
+ PpsInfo(PpsInfo&& rhs);
+ PpsInfo& operator=(PpsInfo&& rhs);
+ ~PpsInfo();
+
+ int sps_id = -1;
+ size_t size = 0;
+ std::unique_ptr<uint8_t[]> data;
+ };
+
+ struct SpsInfo {
+ SpsInfo();
+ SpsInfo(SpsInfo&& rhs);
+ SpsInfo& operator=(SpsInfo&& rhs);
+ ~SpsInfo();
+
+ size_t size = 0;
+ int width = -1;
+ int height = -1;
+ std::unique_ptr<uint8_t[]> data;
+ };
+
+ std::map<uint32_t, PpsInfo> pps_data_;
+ std::map<uint32_t, SpsInfo> sps_data_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H264_SPS_PPS_TRACKER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc
new file mode 100644
index 0000000000..04abb75e4e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+
+#include <string.h>
+
+#include <vector>
+
+#include "absl/types/variant.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/packet.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+const uint8_t start_code[] = {0, 0, 0, 1};
+
+rtc::ArrayView<const uint8_t> Bitstream(
+ const H264SpsPpsTracker::FixedBitstream& fixed) {
+ return fixed.bitstream;
+}
+
+void ExpectSpsPpsIdr(const RTPVideoHeaderH264& codec_header,
+ uint8_t sps_id,
+ uint8_t pps_id) {
+ bool contains_sps = false;
+ bool contains_pps = false;
+ bool contains_idr = false;
+ for (const auto& nalu : codec_header.nalus) {
+ if (nalu.type == H264::NaluType::kSps) {
+ EXPECT_EQ(sps_id, nalu.sps_id);
+ contains_sps = true;
+ } else if (nalu.type == H264::NaluType::kPps) {
+ EXPECT_EQ(sps_id, nalu.sps_id);
+ EXPECT_EQ(pps_id, nalu.pps_id);
+ contains_pps = true;
+ } else if (nalu.type == H264::NaluType::kIdr) {
+ EXPECT_EQ(pps_id, nalu.pps_id);
+ contains_idr = true;
+ }
+ }
+ EXPECT_TRUE(contains_sps);
+ EXPECT_TRUE(contains_pps);
+ EXPECT_TRUE(contains_idr);
+}
+
+class H264VideoHeader : public RTPVideoHeader {
+ public:
+ H264VideoHeader() {
+ codec = kVideoCodecH264;
+ is_first_packet_in_frame = false;
+ auto& h264_header = video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = 0;
+ h264_header.packetization_type = kH264SingleNalu;
+ }
+
+ RTPVideoHeaderH264& h264() {
+ return absl::get<RTPVideoHeaderH264>(video_type_header);
+ }
+};
+
+} // namespace
+
+class TestH264SpsPpsTracker : public ::testing::Test {
+ public:
+ void AddSps(H264VideoHeader* header,
+ uint8_t sps_id,
+ std::vector<uint8_t>* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kSps;
+ info.sps_id = sps_id;
+ info.pps_id = -1;
+ data->push_back(H264::NaluType::kSps);
+ data->push_back(sps_id); // The sps data, just a single byte.
+
+ header->h264().nalus[header->h264().nalus_length++] = info;
+ }
+
+ void AddPps(H264VideoHeader* header,
+ uint8_t sps_id,
+ uint8_t pps_id,
+ std::vector<uint8_t>* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kPps;
+ info.sps_id = sps_id;
+ info.pps_id = pps_id;
+ data->push_back(H264::NaluType::kPps);
+ data->push_back(pps_id); // The pps data, just a single byte.
+
+ header->h264().nalus[header->h264().nalus_length++] = info;
+ }
+
+ void AddIdr(H264VideoHeader* header, int pps_id) {
+ NaluInfo info;
+ info.type = H264::NaluType::kIdr;
+ info.sps_id = -1;
+ info.pps_id = pps_id;
+
+ header->h264().nalus[header->h264().nalus_length++] = info;
+ }
+
+ protected:
+ H264SpsPpsTracker tracker_;
+};
+
+TEST_F(TestH264SpsPpsTracker, NoNalus) {
+ uint8_t data[] = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264FuA;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(data));
+}
+
+TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
+ uint8_t data[] = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264FuA;
+ header.h264().nalus_length = 1;
+ header.is_first_packet_in_frame = true;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, StapAIncorrectSegmentLength) {
+ uint8_t data[] = {0, 0, 2, 0};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264StapA;
+ header.is_first_packet_in_frame = true;
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kDrop);
+}
+
+TEST_F(TestH264SpsPpsTracker, SingleNaluInsertStartCode) {
+ uint8_t data[] = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().nalus_length = 1;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, NoStartCodeInsertedForSubsequentFuAPacket) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264FuA;
+ // Since no NALU begin in this packet the nalus_length is zero.
+ header.h264().nalus_length = 0;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(data));
+}
+
+TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsPpsInserted) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.is_first_packet_in_frame = true;
+ AddIdr(&header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoPpsInserted) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.is_first_packet_in_frame = true;
+ AddSps(&header, 0, &data);
+ AddIdr(&header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsInserted) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.is_first_packet_in_frame = true;
+ AddPps(&header, 0, 0, &data);
+ AddIdr(&header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
+ std::vector<uint8_t> data;
+ H264VideoHeader sps_pps_header;
+ // Insert SPS/PPS
+ AddSps(&sps_pps_header, 0, &data);
+ AddPps(&sps_pps_header, 0, 1, &data);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &sps_pps_header).action,
+ H264SpsPpsTracker::kInsert);
+
+ // Insert first packet of the IDR
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 1);
+ data = {1, 2, 3};
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &idr_header);
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsIdrInStapA) {
+ std::vector<uint8_t> data;
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264StapA;
+ header.is_first_packet_in_frame = true; // Always true for StapA
+
+ data.insert(data.end(), {0}); // First byte is ignored
+ data.insert(data.end(), {0, 2}); // Length of segment
+ AddSps(&header, 13, &data);
+ data.insert(data.end(), {0, 2}); // Length of segment
+ AddPps(&header, 13, 27, &data);
+ data.insert(data.end(), {0, 5}); // Length of segment
+ AddIdr(&header, 27);
+ data.insert(data.end(), {1, 2, 3, 2, 1});
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_THAT(fixed.action, H264SpsPpsTracker::kInsert);
+
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {H264::NaluType::kSps, 13});
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {H264::NaluType::kPps, 27});
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3, 2, 1});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBand) {
+ constexpr uint8_t kData[] = {1, 2, 3};
+
+ // Generated by "ffmpeg -r 30 -f avfoundation -i "default" out.h264" on macos.
+ // width: 320, height: 240
+ const std::vector<uint8_t> sps(
+ {0x67, 0x7a, 0x00, 0x0d, 0xbc, 0xd9, 0x41, 0x41, 0xfa, 0x10, 0x00, 0x00,
+ 0x03, 0x00, 0x10, 0x00, 0x00, 0x03, 0x03, 0xc0, 0xf1, 0x42, 0x99, 0x60});
+ const std::vector<uint8_t> pps({0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0});
+ tracker_.InsertSpsPpsNalus(sps, pps);
+
+ // Insert first packet of the IDR.
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 0);
+ EXPECT_EQ(idr_header.h264().nalus_length, 1u);
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(kData, &idr_header);
+
+ EXPECT_EQ(idr_header.h264().nalus_length, 3u);
+ EXPECT_EQ(idr_header.width, 320u);
+ EXPECT_EQ(idr_header.height, 240u);
+ ExpectSpsPpsIdr(idr_header.h264(), 0, 0);
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBandWrongNaluHeader) {
+ constexpr uint8_t kData[] = {1, 2, 3};
+
+ // Generated by "ffmpeg -r 30 -f avfoundation -i "default" out.h264" on macos.
+ // Nalu headers manupilated afterwards.
+ const std::vector<uint8_t> sps(
+ {0xff, 0x7a, 0x00, 0x0d, 0xbc, 0xd9, 0x41, 0x41, 0xfa, 0x10, 0x00, 0x00,
+ 0x03, 0x00, 0x10, 0x00, 0x00, 0x03, 0x03, 0xc0, 0xf1, 0x42, 0x99, 0x60});
+ const std::vector<uint8_t> pps({0xff, 0xeb, 0xe3, 0xcb, 0x22, 0xc0});
+ tracker_.InsertSpsPpsNalus(sps, pps);
+
+ // Insert first packet of the IDR.
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(kData, &idr_header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBandIncompleteNalu) {
+ constexpr uint8_t kData[] = {1, 2, 3};
+
+ // Generated by "ffmpeg -r 30 -f avfoundation -i "default" out.h264" on macos.
+ // Nalus damaged afterwards.
+ const std::vector<uint8_t> sps({0x67, 0x7a, 0x00, 0x0d, 0xbc, 0xd9});
+ const std::vector<uint8_t> pps({0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0});
+ tracker_.InsertSpsPpsNalus(sps, pps);
+
+ // Insert first packet of the IDR.
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(kData, &idr_header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, SaveRestoreWidthHeight) {
+ std::vector<uint8_t> data;
+
+ // Insert an SPS/PPS packet with width/height and make sure
+ // that information is set on the first IDR packet.
+ H264VideoHeader sps_pps_header;
+ AddSps(&sps_pps_header, 0, &data);
+ AddPps(&sps_pps_header, 0, 1, &data);
+ sps_pps_header.width = 320;
+ sps_pps_header.height = 240;
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &sps_pps_header).action,
+ H264SpsPpsTracker::kInsert);
+
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 1);
+ data.insert(data.end(), {1, 2, 3});
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &idr_header).action,
+ H264SpsPpsTracker::kInsert);
+
+ EXPECT_EQ(idr_header.width, 320);
+ EXPECT_EQ(idr_header.height, 240);
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/histogram.cc b/third_party/libwebrtc/modules/video_coding/histogram.cc
new file mode 100644
index 0000000000..4e90b19eec
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/histogram.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/histogram.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace video_coding {
+Histogram::Histogram(size_t num_buckets, size_t max_num_values) {
+ RTC_DCHECK_GT(num_buckets, 0);
+ RTC_DCHECK_GT(max_num_values, 0);
+ buckets_.resize(num_buckets);
+ values_.reserve(max_num_values);
+ index_ = 0;
+}
+
+void Histogram::Add(size_t value) {
+ value = std::min<size_t>(value, buckets_.size() - 1);
+ if (index_ < values_.size()) {
+ --buckets_[values_[index_]];
+ RTC_DCHECK_LT(values_[index_], buckets_.size());
+ values_[index_] = value;
+ } else {
+ values_.emplace_back(value);
+ }
+
+ ++buckets_[value];
+ index_ = (index_ + 1) % values_.capacity();
+}
+
+size_t Histogram::InverseCdf(float probability) const {
+ RTC_DCHECK_GE(probability, 0.f);
+ RTC_DCHECK_LE(probability, 1.f);
+ RTC_DCHECK_GT(values_.size(), 0ul);
+
+ size_t bucket = 0;
+ float accumulated_probability = 0;
+ while (accumulated_probability < probability && bucket < buckets_.size()) {
+ accumulated_probability +=
+ static_cast<float>(buckets_[bucket]) / values_.size();
+ ++bucket;
+ }
+ return bucket;
+}
+
+size_t Histogram::NumValues() const {
+ return values_.size();
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/histogram.h b/third_party/libwebrtc/modules/video_coding/histogram.h
new file mode 100644
index 0000000000..aa8d44d80f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/histogram.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_HISTOGRAM_H_
+#define MODULES_VIDEO_CODING_HISTOGRAM_H_
+
+#include <cstddef>
+#include <vector>
+
+namespace webrtc {
+namespace video_coding {
+class Histogram {
+ public:
+ // A discrete histogram where every bucket with range [0, num_buckets).
+ // Values greater or equal to num_buckets will be placed in the last bucket.
+ Histogram(size_t num_buckets, size_t max_num_values);
+
+ // Add a value to the histogram. If there already is max_num_values in the
+ // histogram then the oldest value will be replaced with the new value.
+ void Add(size_t value);
+
+ // Calculates how many buckets have to be summed in order to accumulate at
+ // least the given probability.
+ size_t InverseCdf(float probability) const;
+
+ // How many values that make up this histogram.
+ size_t NumValues() const;
+
+ private:
+ // A circular buffer that holds the values that make up the histogram.
+ std::vector<size_t> values_;
+ std::vector<size_t> buckets_;
+ size_t index_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_HISTOGRAM_H_
diff --git a/third_party/libwebrtc/modules/video_coding/histogram_unittest.cc b/third_party/libwebrtc/modules/video_coding/histogram_unittest.cc
new file mode 100644
index 0000000000..3690a39398
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/histogram_unittest.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/histogram.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class TestHistogram : public ::testing::Test {
+ protected:
+ TestHistogram() : histogram_(5, 10) {}
+ Histogram histogram_;
+};
+
+TEST_F(TestHistogram, NumValues) {
+ EXPECT_EQ(0ul, histogram_.NumValues());
+ histogram_.Add(0);
+ EXPECT_EQ(1ul, histogram_.NumValues());
+}
+
+TEST_F(TestHistogram, InverseCdf) {
+ histogram_.Add(0);
+ histogram_.Add(1);
+ histogram_.Add(2);
+ histogram_.Add(3);
+ histogram_.Add(4);
+ EXPECT_EQ(5ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.2f));
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.2000001f));
+ EXPECT_EQ(4ul, histogram_.InverseCdf(0.8f));
+
+ histogram_.Add(0);
+ EXPECT_EQ(6ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.2f));
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.2000001f));
+}
+
+TEST_F(TestHistogram, ReplaceOldValues) {
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ EXPECT_EQ(10ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.5f));
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.5000001f));
+
+ histogram_.Add(4);
+ histogram_.Add(4);
+ histogram_.Add(4);
+ histogram_.Add(4);
+ EXPECT_EQ(10ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.1f));
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.5f));
+
+ histogram_.Add(20);
+ EXPECT_EQ(10ul, histogram_.NumValues());
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.5f));
+ EXPECT_EQ(5ul, histogram_.InverseCdf(0.5000001f));
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h b/third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h
new file mode 100644
index 0000000000..e979f9c867
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INITIALIZER_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INITIALIZER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/video_codecs/video_encoder_config.h"
+
+namespace webrtc {
+
+class VideoBitrateAllocator;
+class VideoCodec;
+
+class VideoCodecInitializer {
+ public:
+ // Takes a VideoEncoderConfig and the VideoStream configuration and
+ // translates them into the old school VideoCodec type.
+ // It also creates a VideoBitrateAllocator instance, suitable for the codec
+ // type used. For instance, VP8 will create an allocator than can handle
+ // simulcast and temporal layering.
+ // GetBitrateAllocator is called implicitly from here, no need to call again.
+ static bool SetupCodec(const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams,
+ VideoCodec* codec);
+
+ private:
+ static VideoCodec VideoEncoderConfigToVideoCodec(
+ const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INITIALIZER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc
new file mode 100644
index 0000000000..bd033b6c57
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+CodecSpecificInfo::CodecSpecificInfo() : codecType(kVideoCodecGeneric) {
+ memset(&codecSpecific, 0, sizeof(codecSpecific));
+}
+
+CodecSpecificInfo::CodecSpecificInfo(const CodecSpecificInfo&) = default;
+CodecSpecificInfo::~CodecSpecificInfo() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h
new file mode 100644
index 0000000000..261ffb11c1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Note: If any pointers are added to this struct, it must be fitted
+// with a copy-constructor. See below.
+// Hack alert - the code assumes that thisstruct is memset when constructed.
+struct CodecSpecificInfoVP8 {
+ bool nonReference;
+ uint8_t temporalIdx;
+ bool layerSync;
+ int8_t keyIdx; // Negative value to skip keyIdx.
+
+ // Used to generate the list of dependency frames.
+ // `referencedBuffers` and `updatedBuffers` contain buffer IDs.
+ // Note that the buffer IDs here have a one-to-one mapping with the actual
+ // codec buffers, but the exact mapping (i.e. whether 0 refers to Last,
+ // to Golden or to Arf) is not pre-determined.
+ // More references may be specified than are strictly necessary, but not less.
+ // TODO(bugs.webrtc.org/10242): Remove `useExplicitDependencies` once all
+ // encoder-wrappers are updated.
+ bool useExplicitDependencies;
+ static constexpr size_t kBuffersCount = 3;
+ size_t referencedBuffers[kBuffersCount];
+ size_t referencedBuffersCount;
+ size_t updatedBuffers[kBuffersCount];
+ size_t updatedBuffersCount;
+};
+static_assert(std::is_pod<CodecSpecificInfoVP8>::value, "");
+
+// Hack alert - the code assumes that thisstruct is memset when constructed.
+struct CodecSpecificInfoVP9 {
+ bool first_frame_in_picture; // First frame, increment picture_id.
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode;
+ bool ss_data_available;
+ bool non_ref_for_inter_layer_pred;
+
+ uint8_t temporal_idx;
+ bool temporal_up_switch;
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+ uint8_t gof_idx;
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ size_t first_active_layer;
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
+
+ ABSL_DEPRECATED("") bool end_of_picture;
+};
+static_assert(std::is_pod<CodecSpecificInfoVP9>::value, "");
+
+// Hack alert - the code assumes that thisstruct is memset when constructed.
+struct CodecSpecificInfoH264 {
+ H264PacketizationMode packetization_mode;
+ uint8_t temporal_idx;
+ bool base_layer_sync;
+ bool idr_frame;
+};
+static_assert(std::is_pod<CodecSpecificInfoH264>::value, "");
+
+union CodecSpecificInfoUnion {
+ CodecSpecificInfoVP8 VP8;
+ CodecSpecificInfoVP9 VP9;
+ CodecSpecificInfoH264 H264;
+};
+static_assert(std::is_pod<CodecSpecificInfoUnion>::value, "");
+
+// Note: if any pointers are added to this struct or its sub-structs, it
+// must be fitted with a copy-constructor. This is because it is copied
+// in the copy-constructor of VCMEncodedFrame.
+struct RTC_EXPORT CodecSpecificInfo {
+ CodecSpecificInfo();
+ CodecSpecificInfo(const CodecSpecificInfo&);
+ ~CodecSpecificInfo();
+
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
+ bool end_of_picture = true;
+ absl::optional<GenericFrameInfo> generic_frame_info;
+ absl::optional<FrameDependencyStructure> template_structure;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_coding.h b/third_party/libwebrtc/modules/video_coding/include/video_coding.h
new file mode 100644
index 0000000000..ee9326d9fc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_coding.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+
+#include "api/field_trials_view.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+
+namespace webrtc {
+
+class Clock;
+class EncodedImageCallback;
+class VideoDecoder;
+class VideoEncoder;
+struct CodecSpecificInfo;
+
+class VideoCodingModule {
+ public:
+ // DEPRECATED.
+ static VideoCodingModule* Create(
+ Clock* clock,
+ const FieldTrialsView* field_trials = nullptr);
+
+ virtual ~VideoCodingModule() = default;
+
+ /*
+ * Receiver
+ */
+
+ // Register possible receive codecs, can be called multiple times for
+ // different codecs.
+ // The module will automatically switch between registered codecs depending on
+ // the
+ // payload type of incoming frames. The actual decoder will be created when
+ // needed.
+ //
+ // Input:
+ // - payload_type : RTP payload type
+ // - settings : Settings for the decoder to be registered.
+ //
+ virtual void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings) = 0;
+
+ // Register an external decoder object.
+ //
+ // Input:
+ // - externalDecoder : Decoder object to be used for decoding frames.
+ // - payloadType : The payload type which this decoder is bound to.
+ virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) = 0;
+
+ // Register a receive callback. Will be called whenever there is a new frame
+ // ready
+ // for rendering.
+ //
+ // Input:
+ // - receiveCallback : The callback object to be used by the
+ // module when a
+ // frame is ready for rendering.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) = 0;
+
+ // Register a frame type request callback. This callback will be called when
+ // the
+ // module needs to request specific frame types from the send side.
+ //
+ // Input:
+ // - frameTypeCallback : The callback object to be used by the
+ // module when
+ // requesting a specific type of frame from
+ // the send side.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) = 0;
+
+ // Registers a callback which is called whenever the receive side of the VCM
+ // encounters holes in the packet sequence and needs packets to be
+ // retransmitted.
+ //
+ // Input:
+ // - callback : The callback to be registered in the VCM.
+ //
+ // Return value : VCM_OK, on success.
+ // <0, on error.
+ virtual int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) = 0;
+
+ // Waits for the next frame in the jitter buffer to become complete
+ // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
+ // decoding.
+ // Should be called as often as possible to get the most out of the decoder.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
+
+ // Insert a parsed packet into the receiver side of the module. Will be placed
+ // in the
+ // jitter buffer waiting for the frame to become complete. Returns as soon as
+ // the packet
+ // has been placed in the jitter buffer.
+ //
+ // Input:
+ // - incomingPayload : Payload of the packet.
+ // - payloadLength : Length of the payload.
+ // - rtp_header : The parsed RTP header.
+ // - video_header : The relevant extensions and payload header.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header) = 0;
+
+ // Sets the maximum number of sequence numbers that we are allowed to NACK
+ // and the oldest sequence number that we will consider to NACK. If a
+ // sequence number older than `max_packet_age_to_nack` is missing
+ // a key frame will be requested. A key frame will also be requested if the
+ // time of incomplete or non-continuous frames in the jitter buffer is above
+ // `max_incomplete_time_ms`.
+ virtual void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) = 0;
+
+ // Runs delayed tasks. Expected to be called periodically.
+ virtual void Process() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h b/third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h
new file mode 100644
index 0000000000..87076378ea
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_timing.h"
+
+namespace webrtc {
+
+// Error codes
+#define VCM_FRAME_NOT_READY 3
+#define VCM_MISSING_CALLBACK 1
+#define VCM_OK 0
+#define VCM_GENERAL_ERROR -1
+#define VCM_PARAMETER_ERROR -4
+#define VCM_NO_CODEC_REGISTERED -8
+#define VCM_JITTER_BUFFER_ERROR -9
+
+enum {
+ // Timing frames settings. Timing frames are sent every
+ // `kDefaultTimingFramesDelayMs`, or if the frame is at least
+ // `kDefaultOutliserFrameSizePercent` in size of average frame.
+ kDefaultTimingFramesDelayMs = 200,
+ kDefaultOutlierFrameSizePercent = 500,
+ // Maximum number of frames for what we store encode start timing information.
+ kMaxEncodeStartTimeListSize = 150,
+};
+
+enum VCMVideoProtection {
+ kProtectionNack,
+ kProtectionNackFEC,
+};
+
+// Callback class used for passing decoded frames which are ready to be
+// rendered.
+class VCMReceiveCallback {
+ public:
+ virtual int32_t FrameToRender(VideoFrame& videoFrame, // NOLINT
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) = 0;
+
+ virtual void OnDroppedFrames(uint32_t frames_dropped);
+
+ // Called when the current receive codec changes.
+ virtual void OnIncomingPayloadType(int payload_type);
+ virtual void OnDecoderImplementationName(const char* implementation_name);
+
+ protected:
+ virtual ~VCMReceiveCallback() {}
+};
+
+// Callback class used for informing the user of the incoming bit rate and frame
+// rate.
+class VCMReceiveStatisticsCallback {
+ public:
+ virtual void OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) = 0;
+
+ virtual void OnDroppedFrames(uint32_t frames_dropped) = 0;
+
+ virtual void OnDiscardedPackets(uint32_t packets_discarded) = 0;
+
+ virtual void OnFrameBufferTimingsUpdated(int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) = 0;
+
+ virtual void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) = 0;
+
+ protected:
+ virtual ~VCMReceiveStatisticsCallback() {}
+};
+
+// Callback class used for telling the user about what frame type needed to
+// continue decoding.
+// Typically a key frame when the stream has been corrupted in some way.
+class VCMFrameTypeCallback {
+ public:
+ virtual int32_t RequestKeyFrame() = 0;
+
+ protected:
+ virtual ~VCMFrameTypeCallback() {}
+};
+
+// Callback class used for telling the user about which packet sequence numbers
+// are currently
+// missing and need to be resent.
+// TODO(philipel): Deprecate VCMPacketRequestCallback
+// and use NackSender instead.
+class VCMPacketRequestCallback {
+ public:
+ virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
+ uint16_t length) = 0;
+
+ protected:
+ virtual ~VCMPacketRequestCallback() {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
new file mode 100644
index 0000000000..4ae0ca127d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+
+// NOTE: in sync with video_coding_module_defines.h
+
+// Define return values
+
+#define WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME 4
+#define WEBRTC_VIDEO_CODEC_NO_OUTPUT 1
+#define WEBRTC_VIDEO_CODEC_OK 0
+#define WEBRTC_VIDEO_CODEC_ERROR -1
+#define WEBRTC_VIDEO_CODEC_MEMORY -3
+#define WEBRTC_VIDEO_CODEC_ERR_PARAMETER -4
+#define WEBRTC_VIDEO_CODEC_UNINITIALIZED -7
+#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
+#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
+#define WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED -15
+#define WEBRTC_VIDEO_CODEC_ENCODER_FAILURE -16
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/internal_defines.h b/third_party/libwebrtc/modules/video_coding/internal_defines.h
new file mode 100644
index 0000000000..f753f200e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/internal_defines.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+#define MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+
+namespace webrtc {
+
+#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define VCM_FLUSH_INDICATOR 4
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer.cc b/third_party/libwebrtc/modules/video_coding/jitter_buffer.cc
new file mode 100644
index 0000000000..39553c9f3f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer.cc
@@ -0,0 +1,892 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/jitter_buffer.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "api/units/timestamp.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+// Use this rtt if no value has been reported.
+static const int64_t kDefaultRtt = 200;
+
+typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
+
+bool IsKeyFrame(FrameListPair pair) {
+ return pair.second->FrameType() == VideoFrameType::kVideoFrameKey;
+}
+
+bool HasNonEmptyState(FrameListPair pair) {
+ return pair.second->GetState() != kStateEmpty;
+}
+
+void FrameList::InsertFrame(VCMFrameBuffer* frame) {
+ insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
+}
+
+VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
+ FrameList::iterator it = find(timestamp);
+ if (it == end())
+ return NULL;
+ VCMFrameBuffer* frame = it->second;
+ erase(it);
+ return frame;
+}
+
+VCMFrameBuffer* FrameList::Front() const {
+ return begin()->second;
+}
+
+VCMFrameBuffer* FrameList::Back() const {
+ return rbegin()->second;
+}
+
+int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
+ UnorderedFrameList* free_frames) {
+ int drop_count = 0;
+ FrameList::iterator it = begin();
+ while (!empty()) {
+ // Throw at least one frame.
+ it->second->Reset();
+ free_frames->push_back(it->second);
+ erase(it++);
+ ++drop_count;
+ if (it != end() &&
+ it->second->FrameType() == VideoFrameType::kVideoFrameKey) {
+ *key_frame_it = it;
+ return drop_count;
+ }
+ }
+ *key_frame_it = end();
+ return drop_count;
+}
+
+void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
+ UnorderedFrameList* free_frames) {
+ while (!empty()) {
+ VCMFrameBuffer* oldest_frame = Front();
+ bool remove_frame = false;
+ if (oldest_frame->GetState() == kStateEmpty && size() > 1) {
+ // This frame is empty, try to update the last decoded state and drop it
+ // if successful.
+ remove_frame = decoding_state->UpdateEmptyFrame(oldest_frame);
+ } else {
+ remove_frame = decoding_state->IsOldFrame(oldest_frame);
+ }
+ if (!remove_frame) {
+ break;
+ }
+ free_frames->push_back(oldest_frame);
+ erase(begin());
+ }
+}
+
+void FrameList::Reset(UnorderedFrameList* free_frames) {
+ while (!empty()) {
+ begin()->second->Reset();
+ free_frames->push_back(begin()->second);
+ erase(begin());
+ }
+}
+
+VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
+ std::unique_ptr<EventWrapper> event,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ running_(false),
+ frame_event_(std::move(event)),
+ max_number_of_frames_(kStartNumberOfFrames),
+ free_frames_(),
+ decodable_frames_(),
+ incomplete_frames_(),
+ last_decoded_state_(),
+ first_packet_since_reset_(true),
+ num_consecutive_old_packets_(0),
+ num_packets_(0),
+ num_duplicated_packets_(0),
+ jitter_estimate_(clock, field_trials),
+ missing_sequence_numbers_(SequenceNumberLessThan()),
+ latest_received_sequence_number_(0),
+ max_nack_list_size_(0),
+ max_packet_age_to_nack_(0),
+ max_incomplete_time_ms_(0),
+ average_packets_per_frame_(0.0f),
+ frame_counter_(0) {
+ for (int i = 0; i < kStartNumberOfFrames; i++)
+ free_frames_.push_back(new VCMFrameBuffer());
+}
+
+VCMJitterBuffer::~VCMJitterBuffer() {
+ Stop();
+ for (UnorderedFrameList::iterator it = free_frames_.begin();
+ it != free_frames_.end(); ++it) {
+ delete *it;
+ }
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end(); ++it) {
+ delete it->second;
+ }
+ for (FrameList::iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ delete it->second;
+ }
+}
+
+void VCMJitterBuffer::Start() {
+ MutexLock lock(&mutex_);
+ running_ = true;
+
+ num_consecutive_old_packets_ = 0;
+ num_packets_ = 0;
+ num_duplicated_packets_ = 0;
+
+ // Start in a non-signaled state.
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ waiting_for_completion_.latest_packet_time = -1;
+ first_packet_since_reset_ = true;
+ last_decoded_state_.Reset();
+
+ decodable_frames_.Reset(&free_frames_);
+ incomplete_frames_.Reset(&free_frames_);
+}
+
+void VCMJitterBuffer::Stop() {
+ MutexLock lock(&mutex_);
+ running_ = false;
+ last_decoded_state_.Reset();
+
+ // Make sure we wake up any threads waiting on these events.
+ frame_event_->Set();
+}
+
+bool VCMJitterBuffer::Running() const {
+ MutexLock lock(&mutex_);
+ return running_;
+}
+
+void VCMJitterBuffer::Flush() {
+ MutexLock lock(&mutex_);
+ decodable_frames_.Reset(&free_frames_);
+ incomplete_frames_.Reset(&free_frames_);
+ last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
+ num_consecutive_old_packets_ = 0;
+ // Also reset the jitter and delay estimates
+ jitter_estimate_.Reset();
+ inter_frame_delay_.Reset();
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ waiting_for_completion_.latest_packet_time = -1;
+ first_packet_since_reset_ = true;
+ missing_sequence_numbers_.clear();
+}
+
+int VCMJitterBuffer::num_packets() const {
+ MutexLock lock(&mutex_);
+ return num_packets_;
+}
+
+int VCMJitterBuffer::num_duplicated_packets() const {
+ MutexLock lock(&mutex_);
+ return num_duplicated_packets_;
+}
+
+// Returns immediately or a `max_wait_time_ms` ms event hang waiting for a
+// complete frame, `max_wait_time_ms` decided by caller.
+VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) {
+ MutexLock lock(&mutex_);
+ if (!running_) {
+ return nullptr;
+ }
+ CleanUpOldOrEmptyFrames();
+
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ const int64_t end_wait_time_ms =
+ clock_->TimeInMilliseconds() + max_wait_time_ms;
+ int64_t wait_time_ms = max_wait_time_ms;
+ while (wait_time_ms > 0) {
+ mutex_.Unlock();
+ const EventTypeWrapper ret =
+ frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
+ mutex_.Lock();
+ if (ret == kEventSignaled) {
+ // Are we shutting down the jitter buffer?
+ if (!running_) {
+ return nullptr;
+ }
+ // Finding oldest frame ready for decoder.
+ CleanUpOldOrEmptyFrames();
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ wait_time_ms = end_wait_time_ms - clock_->TimeInMilliseconds();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ return nullptr;
+ }
+ return decodable_frames_.Front();
+}
+
+VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
+ MutexLock lock(&mutex_);
+ if (!running_) {
+ return NULL;
+ }
+ // Extract the frame with the desired timestamp.
+ VCMFrameBuffer* frame = decodable_frames_.PopFrame(timestamp);
+ bool continuous = true;
+ if (!frame) {
+ frame = incomplete_frames_.PopFrame(timestamp);
+ if (frame)
+ continuous = last_decoded_state_.ContinuousFrame(frame);
+ else
+ return NULL;
+ }
+ // Frame pulled out from jitter buffer, update the jitter estimate.
+ const bool retransmitted = (frame->GetNackCount() > 0);
+ if (retransmitted) {
+ jitter_estimate_.FrameNacked();
+ } else if (frame->size() > 0) {
+ // Ignore retransmitted and empty frames.
+ if (waiting_for_completion_.latest_packet_time >= 0) {
+ UpdateJitterEstimate(waiting_for_completion_, true);
+ }
+ if (frame->GetState() == kStateComplete) {
+ UpdateJitterEstimate(*frame, false);
+ } else {
+ // Wait for this one to get complete.
+ waiting_for_completion_.frame_size = frame->size();
+ waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
+ waiting_for_completion_.timestamp = frame->Timestamp();
+ }
+ }
+
+ // The state must be changed to decoding before cleaning up zero sized
+ // frames to avoid empty frames being cleaned up and then given to the
+ // decoder. Propagates the missing_frame bit.
+ frame->PrepareForDecode(continuous);
+
+ // We have a frame - update the last decoded state and nack list.
+ last_decoded_state_.SetState(frame);
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+
+ UpdateAveragePacketsPerFrame(frame->NumPackets());
+
+ return frame;
+}
+
+// Release frame when done with decoding. Should never be used to release
+// frames from within the jitter buffer.
+void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) {
+ RTC_CHECK(frame != nullptr);
+ MutexLock lock(&mutex_);
+ VCMFrameBuffer* frame_buffer = static_cast<VCMFrameBuffer*>(frame);
+ RecycleFrameBuffer(frame_buffer);
+}
+
+// Gets frame to use for this timestamp. If no match, get empty frame.
+VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
+ VCMFrameBuffer** frame,
+ FrameList** frame_list) {
+ *frame = incomplete_frames_.PopFrame(packet.timestamp);
+ if (*frame != NULL) {
+ *frame_list = &incomplete_frames_;
+ return kNoError;
+ }
+ *frame = decodable_frames_.PopFrame(packet.timestamp);
+ if (*frame != NULL) {
+ *frame_list = &decodable_frames_;
+ return kNoError;
+ }
+
+ *frame_list = NULL;
+ // No match, return empty frame.
+ *frame = GetEmptyFrame();
+ if (*frame == NULL) {
+ // No free frame! Try to reclaim some...
+ RTC_LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
+ bool found_key_frame = RecycleFramesUntilKeyFrame();
+ *frame = GetEmptyFrame();
+ RTC_CHECK(*frame);
+ if (!found_key_frame) {
+ RecycleFrameBuffer(*frame);
+ return kFlushIndicator;
+ }
+ }
+ (*frame)->Reset();
+ return kNoError;
+}
+
+int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame,
+ bool* retransmitted) const {
+ RTC_DCHECK(retransmitted);
+ MutexLock lock(&mutex_);
+ const VCMFrameBuffer* frame_buffer =
+ static_cast<const VCMFrameBuffer*>(frame);
+ *retransmitted = (frame_buffer->GetNackCount() > 0);
+ return frame_buffer->LatestPacketTimeMs();
+}
+
+VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
+ bool* retransmitted) {
+ MutexLock lock(&mutex_);
+
+ ++num_packets_;
+ // Does this packet belong to an old frame?
+ if (last_decoded_state_.IsOldPacket(&packet)) {
+ // Account only for media packets.
+ if (packet.sizeBytes > 0) {
+ num_consecutive_old_packets_++;
+ }
+ // Update last decoded sequence number if the packet arrived late and
+ // belongs to a frame with a timestamp equal to the last decoded
+ // timestamp.
+ last_decoded_state_.UpdateOldPacket(&packet);
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+
+ // Also see if this old packet made more incomplete frames continuous.
+ FindAndInsertContinuousFramesWithState(last_decoded_state_);
+
+ if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
+ RTC_LOG(LS_WARNING)
+ << num_consecutive_old_packets_
+ << " consecutive old packets received. Flushing the jitter buffer.";
+ Flush();
+ return kFlushIndicator;
+ }
+ return kOldPacket;
+ }
+
+ num_consecutive_old_packets_ = 0;
+
+ VCMFrameBuffer* frame;
+ FrameList* frame_list;
+ const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
+ if (error != kNoError)
+ return error;
+
+ Timestamp now = clock_->CurrentTime();
+ // We are keeping track of the first and latest seq numbers, and
+ // the number of wraps to be able to calculate how many packets we expect.
+ if (first_packet_since_reset_) {
+ // Now it's time to start estimating jitter
+ // reset the delay estimate.
+ inter_frame_delay_.Reset();
+ }
+
+ // Empty packets may bias the jitter estimate (lacking size component),
+ // therefore don't let empty packet trigger the following updates:
+ if (packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
+ if (waiting_for_completion_.timestamp == packet.timestamp) {
+ // This can get bad if we have a lot of duplicate packets,
+ // we will then count some packet multiple times.
+ waiting_for_completion_.frame_size += packet.sizeBytes;
+ waiting_for_completion_.latest_packet_time = now.ms();
+ } else if (waiting_for_completion_.latest_packet_time >= 0 &&
+ waiting_for_completion_.latest_packet_time + 2000 <= now.ms()) {
+ // A packet should never be more than two seconds late
+ UpdateJitterEstimate(waiting_for_completion_, true);
+ waiting_for_completion_.latest_packet_time = -1;
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ }
+ }
+
+ VCMFrameBufferStateEnum previous_state = frame->GetState();
+ // Insert packet.
+ FrameData frame_data;
+ frame_data.rtt_ms = kDefaultRtt;
+ frame_data.rolling_average_packets_per_frame = average_packets_per_frame_;
+ VCMFrameBufferEnum buffer_state =
+ frame->InsertPacket(packet, now.ms(), frame_data);
+
+ if (buffer_state > 0) {
+ if (first_packet_since_reset_) {
+ latest_received_sequence_number_ = packet.seqNum;
+ first_packet_since_reset_ = false;
+ } else {
+ if (IsPacketRetransmitted(packet)) {
+ frame->IncrementNackCount();
+ }
+ if (!UpdateNackList(packet.seqNum) &&
+ packet.video_header.frame_type != VideoFrameType::kVideoFrameKey) {
+ buffer_state = kFlushIndicator;
+ }
+
+ latest_received_sequence_number_ =
+ LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
+ }
+ }
+
+ // Is the frame already in the decodable list?
+ bool continuous = IsContinuous(*frame);
+ switch (buffer_state) {
+ case kGeneralError:
+ case kTimeStampError:
+ case kSizeError: {
+ RecycleFrameBuffer(frame);
+ break;
+ }
+ case kCompleteSession: {
+ if (previous_state != kStateComplete) {
+ if (continuous) {
+ // Signal that we have a complete session.
+ frame_event_->Set();
+ }
+ }
+
+ *retransmitted = (frame->GetNackCount() > 0);
+ if (continuous) {
+ decodable_frames_.InsertFrame(frame);
+ FindAndInsertContinuousFrames(*frame);
+ } else {
+ incomplete_frames_.InsertFrame(frame);
+ }
+ break;
+ }
+ case kIncomplete: {
+ if (frame->GetState() == kStateEmpty &&
+ last_decoded_state_.UpdateEmptyFrame(frame)) {
+ RecycleFrameBuffer(frame);
+ return kNoError;
+ } else {
+ incomplete_frames_.InsertFrame(frame);
+ }
+ break;
+ }
+ case kNoError:
+ case kOutOfBoundsPacket:
+ case kDuplicatePacket: {
+ // Put back the frame where it came from.
+ if (frame_list != NULL) {
+ frame_list->InsertFrame(frame);
+ } else {
+ RecycleFrameBuffer(frame);
+ }
+ ++num_duplicated_packets_;
+ break;
+ }
+ case kFlushIndicator:
+ RecycleFrameBuffer(frame);
+ return kFlushIndicator;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return buffer_state;
+}
+
+bool VCMJitterBuffer::IsContinuousInState(
+ const VCMFrameBuffer& frame,
+ const VCMDecodingState& decoding_state) const {
+ // Is this frame complete and continuous?
+ return (frame.GetState() == kStateComplete) &&
+ decoding_state.ContinuousFrame(&frame);
+}
+
+bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
+ if (IsContinuousInState(frame, last_decoded_state_)) {
+ return true;
+ }
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(last_decoded_state_);
+ for (FrameList::const_iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ VCMFrameBuffer* decodable_frame = it->second;
+ if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
+ break;
+ }
+ decoding_state.SetState(decodable_frame);
+ if (IsContinuousInState(frame, decoding_state)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void VCMJitterBuffer::FindAndInsertContinuousFrames(
+ const VCMFrameBuffer& new_frame) {
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(last_decoded_state_);
+ decoding_state.SetState(&new_frame);
+ FindAndInsertContinuousFramesWithState(decoding_state);
+}
+
+void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& original_decoded_state) {
+ // Copy original_decoded_state so we can move the state forward with each
+ // decodable frame we find.
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(original_decoded_state);
+
+ // When temporal layers are available, we search for a complete or decodable
+ // frame until we hit one of the following:
+ // 1. Continuous base or sync layer.
+ // 2. The end of the list was reached.
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end();) {
+ VCMFrameBuffer* frame = it->second;
+ if (IsNewerTimestamp(original_decoded_state.time_stamp(),
+ frame->Timestamp())) {
+ ++it;
+ continue;
+ }
+ if (IsContinuousInState(*frame, decoding_state)) {
+ decodable_frames_.InsertFrame(frame);
+ incomplete_frames_.erase(it++);
+ decoding_state.SetState(frame);
+ } else if (frame->TemporalId() <= 0) {
+ break;
+ } else {
+ ++it;
+ }
+ }
+}
+
+uint32_t VCMJitterBuffer::EstimatedJitterMs() {
+ MutexLock lock(&mutex_);
+ const double rtt_mult = 1.0f;
+ return jitter_estimate_.GetJitterEstimate(rtt_mult, absl::nullopt).ms();
+}
+
+void VCMJitterBuffer::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ MutexLock lock(&mutex_);
+ RTC_DCHECK_GE(max_packet_age_to_nack, 0);
+ RTC_DCHECK_GE(max_incomplete_time_ms_, 0);
+ max_nack_list_size_ = max_nack_list_size;
+ max_packet_age_to_nack_ = max_packet_age_to_nack;
+ max_incomplete_time_ms_ = max_incomplete_time_ms;
+}
+
+int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
+ if (incomplete_frames_.empty()) {
+ return 0;
+ }
+ uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
+ if (!decodable_frames_.empty()) {
+ start_timestamp = decodable_frames_.Back()->Timestamp();
+ }
+ return incomplete_frames_.Back()->Timestamp() - start_timestamp;
+}
+
+uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
+ const VCMFrameBuffer& frame) const {
+ RTC_DCHECK_GE(frame.GetLowSeqNum(), 0);
+ if (frame.HaveFirstPacket())
+ return frame.GetLowSeqNum();
+
+ // This estimate is not accurate if more than one packet with lower sequence
+ // number is lost.
+ return frame.GetLowSeqNum() - 1;
+}
+
+std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
+ MutexLock lock(&mutex_);
+ *request_key_frame = false;
+ if (last_decoded_state_.in_initial_state()) {
+ VCMFrameBuffer* next_frame = NextFrame();
+ const bool first_frame_is_key =
+ next_frame &&
+ next_frame->FrameType() == VideoFrameType::kVideoFrameKey &&
+ next_frame->HaveFirstPacket();
+ if (!first_frame_is_key) {
+ bool have_non_empty_frame =
+ decodable_frames_.end() != find_if(decodable_frames_.begin(),
+ decodable_frames_.end(),
+ HasNonEmptyState);
+ if (!have_non_empty_frame) {
+ have_non_empty_frame =
+ incomplete_frames_.end() != find_if(incomplete_frames_.begin(),
+ incomplete_frames_.end(),
+ HasNonEmptyState);
+ }
+ bool found_key_frame = RecycleFramesUntilKeyFrame();
+ if (!found_key_frame) {
+ *request_key_frame = have_non_empty_frame;
+ return std::vector<uint16_t>();
+ }
+ }
+ }
+ if (TooLargeNackList()) {
+ *request_key_frame = !HandleTooLargeNackList();
+ }
+ if (max_incomplete_time_ms_ > 0) {
+ int non_continuous_incomplete_duration =
+ NonContinuousOrIncompleteDuration();
+ if (non_continuous_incomplete_duration > 90 * max_incomplete_time_ms_) {
+ RTC_LOG_F(LS_WARNING) << "Too long non-decodable duration: "
+ << non_continuous_incomplete_duration << " > "
+ << 90 * max_incomplete_time_ms_;
+ FrameList::reverse_iterator rit = find_if(
+ incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
+ if (rit == incomplete_frames_.rend()) {
+ // Request a key frame if we don't have one already.
+ *request_key_frame = true;
+ return std::vector<uint16_t>();
+ } else {
+ // Skip to the last key frame. If it's incomplete we will start
+ // NACKing it.
+ // Note that the estimated low sequence number is correct for VP8
+ // streams because only the first packet of a key frame is marked.
+ last_decoded_state_.Reset();
+ DropPacketsFromNackList(EstimatedLowSequenceNumber(*rit->second));
+ }
+ }
+ }
+ std::vector<uint16_t> nack_list(missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.end());
+ return nack_list;
+}
+
+VCMFrameBuffer* VCMJitterBuffer::NextFrame() const {
+ if (!decodable_frames_.empty())
+ return decodable_frames_.Front();
+ if (!incomplete_frames_.empty())
+ return incomplete_frames_.Front();
+ return NULL;
+}
+
+bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
+ // Make sure we don't add packets which are already too old to be decoded.
+ if (!last_decoded_state_.in_initial_state()) {
+ latest_received_sequence_number_ = LatestSequenceNumber(
+ latest_received_sequence_number_, last_decoded_state_.sequence_num());
+ }
+ if (IsNewerSequenceNumber(sequence_number,
+ latest_received_sequence_number_)) {
+ // Push any missing sequence numbers to the NACK list.
+ for (uint16_t i = latest_received_sequence_number_ + 1;
+ IsNewerSequenceNumber(sequence_number, i); ++i) {
+ missing_sequence_numbers_.insert(missing_sequence_numbers_.end(), i);
+ }
+ if (TooLargeNackList() && !HandleTooLargeNackList()) {
+ RTC_LOG(LS_WARNING) << "Requesting key frame due to too large NACK list.";
+ return false;
+ }
+ if (MissingTooOldPacket(sequence_number) &&
+ !HandleTooOldPackets(sequence_number)) {
+ RTC_LOG(LS_WARNING)
+ << "Requesting key frame due to missing too old packets";
+ return false;
+ }
+ } else {
+ missing_sequence_numbers_.erase(sequence_number);
+ }
+ return true;
+}
+
+bool VCMJitterBuffer::TooLargeNackList() const {
+ return missing_sequence_numbers_.size() > max_nack_list_size_;
+}
+
+bool VCMJitterBuffer::HandleTooLargeNackList() {
+ // Recycle frames until the NACK list is small enough. It is likely cheaper to
+ // request a key frame than to retransmit this many missing packets.
+ RTC_LOG_F(LS_WARNING) << "NACK list has grown too large: "
+ << missing_sequence_numbers_.size() << " > "
+ << max_nack_list_size_;
+ bool key_frame_found = false;
+ while (TooLargeNackList()) {
+ key_frame_found = RecycleFramesUntilKeyFrame();
+ }
+ return key_frame_found;
+}
+
+bool VCMJitterBuffer::MissingTooOldPacket(
+ uint16_t latest_sequence_number) const {
+ if (missing_sequence_numbers_.empty()) {
+ return false;
+ }
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
+ // Recycle frames if the NACK list contains too old sequence numbers as
+ // the packets may have already been dropped by the sender.
+ return age_of_oldest_missing_packet > max_packet_age_to_nack_;
+}
+
+bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
+ bool key_frame_found = false;
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
+ RTC_LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
+ << age_of_oldest_missing_packet << " > "
+ << max_packet_age_to_nack_;
+ while (MissingTooOldPacket(latest_sequence_number)) {
+ key_frame_found = RecycleFramesUntilKeyFrame();
+ }
+ return key_frame_found;
+}
+
+void VCMJitterBuffer::DropPacketsFromNackList(
+ uint16_t last_decoded_sequence_number) {
+ // Erase all sequence numbers from the NACK list which we won't need any
+ // longer.
+ missing_sequence_numbers_.erase(
+ missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.upper_bound(last_decoded_sequence_number));
+}
+
+VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() {
+ if (free_frames_.empty()) {
+ if (!TryToIncreaseJitterBufferSize()) {
+ return NULL;
+ }
+ }
+ VCMFrameBuffer* frame = free_frames_.front();
+ free_frames_.pop_front();
+ return frame;
+}
+
+bool VCMJitterBuffer::TryToIncreaseJitterBufferSize() {
+ if (max_number_of_frames_ >= kMaxNumberOfFrames)
+ return false;
+ free_frames_.push_back(new VCMFrameBuffer());
+ ++max_number_of_frames_;
+ return true;
+}
+
+// Recycle oldest frames up to a key frame, used if jitter buffer is completely
+// full.
+bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
+ // First release incomplete frames, and only release decodable frames if there
+ // are no incomplete ones.
+ FrameList::iterator key_frame_it;
+ bool key_frame_found = false;
+ int dropped_frames = 0;
+ dropped_frames += incomplete_frames_.RecycleFramesUntilKeyFrame(
+ &key_frame_it, &free_frames_);
+ key_frame_found = key_frame_it != incomplete_frames_.end();
+ if (dropped_frames == 0) {
+ dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame(
+ &key_frame_it, &free_frames_);
+ key_frame_found = key_frame_it != decodable_frames_.end();
+ }
+ if (key_frame_found) {
+ RTC_LOG(LS_INFO) << "Found key frame while dropping frames.";
+ // Reset last decoded state to make sure the next frame decoded is a key
+ // frame, and start NACKing from here.
+ last_decoded_state_.Reset();
+ DropPacketsFromNackList(EstimatedLowSequenceNumber(*key_frame_it->second));
+ } else if (decodable_frames_.empty()) {
+ // All frames dropped. Reset the decoding state and clear missing sequence
+ // numbers as we're starting fresh.
+ last_decoded_state_.Reset();
+ missing_sequence_numbers_.clear();
+ }
+ return key_frame_found;
+}
+
+void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
+ if (frame_counter_ > kFastConvergeThreshold) {
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kNormalConvergeMultiplier) +
+ current_number_packets * kNormalConvergeMultiplier;
+ } else if (frame_counter_ > 0) {
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kFastConvergeMultiplier) +
+ current_number_packets * kFastConvergeMultiplier;
+ frame_counter_++;
+ } else {
+ average_packets_per_frame_ = current_number_packets;
+ frame_counter_++;
+ }
+}
+
+// Must be called under the critical section `mutex_`.
+void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
+ decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
+ &free_frames_);
+ incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
+ &free_frames_);
+ if (!last_decoded_state_.in_initial_state()) {
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+ }
+}
+
+// Must be called from within `mutex_`.
+bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
+ return missing_sequence_numbers_.find(packet.seqNum) !=
+ missing_sequence_numbers_.end();
+}
+
+// Must be called under the critical section `mutex_`. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample,
+ bool incomplete_frame) {
+ if (sample.latest_packet_time == -1) {
+ return;
+ }
+ UpdateJitterEstimate(sample.latest_packet_time, sample.timestamp,
+ sample.frame_size, incomplete_frame);
+}
+
+// Must be called under the critical section mutex_. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
+ bool incomplete_frame) {
+ if (frame.LatestPacketTimeMs() == -1) {
+ return;
+ }
+ // No retransmitted frames should be a part of the jitter
+ // estimate.
+ UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
+ frame.size(), incomplete_frame);
+}
+
+// Must be called under the critical section `mutex_`. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool /*incomplete_frame*/) {
+ if (latest_packet_time_ms == -1) {
+ return;
+ }
+ auto frame_delay = inter_frame_delay_.CalculateDelay(
+ timestamp, Timestamp::Millis(latest_packet_time_ms));
+
+ bool not_reordered = frame_delay.has_value();
+ // Filter out frames which have been reordered in time by the network
+ if (not_reordered) {
+ // Update the jitter estimate with the new samples
+ jitter_estimate_.UpdateEstimate(*frame_delay, DataSize::Bytes(frame_size));
+ }
+}
+
+void VCMJitterBuffer::RecycleFrameBuffer(VCMFrameBuffer* frame) {
+ frame->Reset();
+ free_frames_.push_back(frame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer.h b/third_party/libwebrtc/modules/video_coding/jitter_buffer.h
new file mode 100644
index 0000000000..a72ae38d11
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+#define MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "modules/include/module_common_types.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/decoding_state.h"
+#include "modules/video_coding/event_wrapper.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// forward declarations
+class Clock;
+class VCMFrameBuffer;
+class VCMPacket;
+class VCMEncodedFrame;
+
+typedef std::list<VCMFrameBuffer*> UnorderedFrameList;
+
+struct VCMJitterSample {
+ VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {}
+ uint32_t timestamp;
+ uint32_t frame_size;
+ int64_t latest_packet_time;
+};
+
+class TimestampLessThan {
+ public:
+ bool operator()(uint32_t timestamp1, uint32_t timestamp2) const {
+ return IsNewerTimestamp(timestamp2, timestamp1);
+ }
+};
+
+class FrameList
+ : public std::map<uint32_t, VCMFrameBuffer*, TimestampLessThan> {
+ public:
+ void InsertFrame(VCMFrameBuffer* frame);
+ VCMFrameBuffer* FindFrame(uint16_t seq_num, uint32_t timestamp);
+ VCMFrameBuffer* PopFrame(uint32_t timestamp);
+ VCMFrameBuffer* Front() const;
+ VCMFrameBuffer* Back() const;
+ int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
+ UnorderedFrameList* free_frames);
+ void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
+ UnorderedFrameList* free_frames);
+ void Reset(UnorderedFrameList* free_frames);
+};
+
+class VCMJitterBuffer {
+ public:
+ VCMJitterBuffer(Clock* clock,
+ std::unique_ptr<EventWrapper> event,
+ const FieldTrialsView& field_trials);
+
+ ~VCMJitterBuffer();
+
+ VCMJitterBuffer(const VCMJitterBuffer&) = delete;
+ VCMJitterBuffer& operator=(const VCMJitterBuffer&) = delete;
+
+ // Initializes and starts jitter buffer.
+ void Start();
+
+ // Signals all internal events and stops the jitter buffer.
+ void Stop();
+
+ // Returns true if the jitter buffer is running.
+ bool Running() const;
+
+ // Empty the jitter buffer of all its data.
+ void Flush();
+
+ // Gets number of packets received.
+ int num_packets() const;
+
+ // Gets number of duplicated packets received.
+ int num_duplicated_packets() const;
+
+ // Wait `max_wait_time_ms` for a complete frame to arrive.
+ // If found, a pointer to the frame is returned. Returns nullptr otherwise.
+ VCMEncodedFrame* NextCompleteFrame(uint32_t max_wait_time_ms);
+
+ // Extract frame corresponding to input timestamp.
+ // Frame will be set to a decoding state.
+ VCMEncodedFrame* ExtractAndSetDecode(uint32_t timestamp);
+
+ // Releases a frame returned from the jitter buffer, should be called when
+ // done with decoding.
+ void ReleaseFrame(VCMEncodedFrame* frame);
+
+ // Returns the time in ms when the latest packet was inserted into the frame.
+ // Retransmitted is set to true if any of the packets belonging to the frame
+ // has been retransmitted.
+ int64_t LastPacketTime(const VCMEncodedFrame* frame,
+ bool* retransmitted) const;
+
+ // Inserts a packet into a frame returned from GetFrame().
+ // If the return value is <= 0, `frame` is invalidated and the pointer must
+ // be dropped after this function returns.
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
+
+ // Returns the estimated jitter in milliseconds.
+ uint32_t EstimatedJitterMs();
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+
+ // Returns a list of the sequence numbers currently missing.
+ std::vector<uint16_t> GetNackList(bool* request_key_frame);
+
+ private:
+ class SequenceNumberLessThan {
+ public:
+ bool operator()(const uint16_t& sequence_number1,
+ const uint16_t& sequence_number2) const {
+ return IsNewerSequenceNumber(sequence_number2, sequence_number1);
+ }
+ };
+ typedef std::set<uint16_t, SequenceNumberLessThan> SequenceNumberSet;
+
+ // Gets the frame assigned to the timestamp of the packet. May recycle
+ // existing frames if no free frames are available. Returns an error code if
+ // failing, or kNoError on success. `frame_list` contains which list the
+ // packet was in, or NULL if it was not in a FrameList (a new frame).
+ VCMFrameBufferEnum GetFrame(const VCMPacket& packet,
+ VCMFrameBuffer** frame,
+ FrameList** frame_list)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns true if `frame` is continuous in `decoding_state`, not taking
+ // decodable frames into account.
+ bool IsContinuousInState(const VCMFrameBuffer& frame,
+ const VCMDecodingState& decoding_state) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Returns true if `frame` is continuous in the `last_decoded_state_`, taking
+ // all decodable frames into account.
+ bool IsContinuous(const VCMFrameBuffer& frame) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Looks for frames in `incomplete_frames_` which are continuous in the
+ // provided `decoded_state`. Starts the search from the timestamp of
+ // `decoded_state`.
+ void FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& decoded_state)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Looks for frames in `incomplete_frames_` which are continuous in
+ // `last_decoded_state_` taking all decodable frames into account. Starts
+ // the search from `new_frame`.
+ void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ VCMFrameBuffer* NextFrame() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Returns true if the NACK list was updated to cover sequence numbers up to
+ // `sequence_number`. If false a key frame is needed to get into a state where
+ // we can continue decoding.
+ bool UpdateNackList(uint16_t sequence_number)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ bool TooLargeNackList() const;
+ // Returns true if the NACK list was reduced without problem. If false a key
+ // frame is needed to get into a state where we can continue decoding.
+ bool HandleTooLargeNackList() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ bool MissingTooOldPacket(uint16_t latest_sequence_number) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Returns true if the too old packets was successfully removed from the NACK
+ // list. If false, a key frame is needed to get into a state where we can
+ // continue decoding.
+ bool HandleTooOldPackets(uint16_t latest_sequence_number)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Drops all packets in the NACK list up until `last_decoded_sequence_number`.
+ void DropPacketsFromNackList(uint16_t last_decoded_sequence_number);
+
+ // Gets an empty frame, creating a new frame if necessary (i.e. increases
+ // jitter buffer size).
+ VCMFrameBuffer* GetEmptyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Attempts to increase the size of the jitter buffer. Returns true on
+ // success, false otherwise.
+ bool TryToIncreaseJitterBufferSize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Recycles oldest frames until a key frame is found. Used if jitter buffer is
+ // completely full. Returns true if a key frame was found.
+ bool RecycleFramesUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Update rolling average of packets per frame.
+ void UpdateAveragePacketsPerFrame(int current_number_packets_);
+
+ // Cleans the frame list in the JB from old/empty frames.
+ // Should only be called prior to actual use.
+ void CleanUpOldOrEmptyFrames() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns true if `packet` is likely to have been retransmitted.
+ bool IsPacketRetransmitted(const VCMPacket& packet) const;
+
+ // The following three functions update the jitter estimate with the
+ // payload size, receive time and RTP timestamp of a frame.
+ void UpdateJitterEstimate(const VCMJitterSample& sample,
+ bool incomplete_frame);
+ void UpdateJitterEstimate(const VCMFrameBuffer& frame, bool incomplete_frame);
+ void UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool incomplete_frame);
+
+ int NonContinuousOrIncompleteDuration() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ uint16_t EstimatedLowSequenceNumber(const VCMFrameBuffer& frame) const;
+
+ // Reset frame buffer and return it to free_frames_.
+ void RecycleFrameBuffer(VCMFrameBuffer* frame)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* clock_;
+ // If we are running (have started) or not.
+ bool running_;
+ mutable Mutex mutex_;
+ // Event to signal when we have a frame ready for decoder.
+ std::unique_ptr<EventWrapper> frame_event_;
+ // Number of allocated frames.
+ int max_number_of_frames_;
+ UnorderedFrameList free_frames_ RTC_GUARDED_BY(mutex_);
+ FrameList decodable_frames_ RTC_GUARDED_BY(mutex_);
+ FrameList incomplete_frames_ RTC_GUARDED_BY(mutex_);
+ VCMDecodingState last_decoded_state_ RTC_GUARDED_BY(mutex_);
+ bool first_packet_since_reset_;
+
+ // Number of packets in a row that have been too old.
+ int num_consecutive_old_packets_;
+ // Number of packets received.
+ int num_packets_ RTC_GUARDED_BY(mutex_);
+ // Number of duplicated packets received.
+ int num_duplicated_packets_ RTC_GUARDED_BY(mutex_);
+
+ // Jitter estimation.
+ // Filter for estimating jitter.
+ JitterEstimator jitter_estimate_;
+ // Calculates network delays used for jitter calculations.
+ InterFrameDelay inter_frame_delay_;
+ VCMJitterSample waiting_for_completion_;
+
+ // Holds the internal NACK list (the missing sequence numbers).
+ SequenceNumberSet missing_sequence_numbers_;
+ uint16_t latest_received_sequence_number_;
+ size_t max_nack_list_size_;
+ int max_packet_age_to_nack_; // Measured in sequence numbers.
+ int max_incomplete_time_ms_;
+
+ // Estimated rolling average of packets per frame
+ float average_packets_per_frame_;
+ // average_packets_per_frame converges fast if we have fewer than this many
+ // frames.
+ int frame_counter_;
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_JITTER_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h b/third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h
new file mode 100644
index 0000000000..2a23f8e4be
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
+#define MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
+
+namespace webrtc {
+
+// Used to estimate rolling average of packets per frame.
+static const float kFastConvergeMultiplier = 0.4f;
+static const float kNormalConvergeMultiplier = 0.2f;
+
+enum { kMaxNumberOfFrames = 300 };
+enum { kStartNumberOfFrames = 6 };
+enum { kMaxVideoDelayMs = 10000 };
+enum { kPacketsPerFrameMultiplier = 5 };
+enum { kFastConvergeThreshold = 5 };
+
+enum VCMJitterBufferEnum {
+ kMaxConsecutiveOldFrames = 60,
+ kMaxConsecutiveOldPackets = 300,
+ // TODO(sprang): Reduce this limit once codecs don't sometimes wildly
+ // overshoot bitrate target.
+ kMaxPacketsInSession = 1400, // Allows ~2MB frames.
+ kBufferIncStepSizeBytes = 30000, // >20 packets.
+ kMaxJBFrameSizeBytes = 4000000, // sanity don't go above 4Mbyte.
+ kBufferSafetyMargin = 100 // enough for ~50 NALs in a STAP-A
+};
+
+enum VCMFrameBufferEnum {
+ kOutOfBoundsPacket = -7,
+ kNotInitialized = -6,
+ kOldPacket = -5,
+ kGeneralError = -4,
+ kFlushIndicator = -3, // Indicator that a flush has occurred.
+ kTimeStampError = -2,
+ kSizeError = -1,
+ kNoError = 0,
+ kIncomplete = 1, // Frame incomplete.
+ kCompleteSession = 3, // at least one layer in the frame complete.
+ kDuplicatePacket = 5 // We're receiving a duplicate packet.
+};
+
+enum VCMFrameBufferStateEnum {
+ kStateEmpty, // frame popped by the RTP receiver
+ kStateIncomplete, // frame that have one or more packet(s) stored
+ kStateComplete, // frame that have all packets
+};
+
+enum { kH264StartCodeLengthBytes = 4 };
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc
new file mode 100644
index 0000000000..930eca5d91
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc
@@ -0,0 +1,1849 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/jitter_buffer.h"
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/media_opt_util.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/test/stream_generator.h"
+#include "rtc_base/location.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+class TestBasicJitterBuffer : public ::testing::Test {
+ protected:
+ TestBasicJitterBuffer() {}
+ void SetUp() override {
+ clock_.reset(new SimulatedClock(0));
+ jitter_buffer_.reset(new VCMJitterBuffer(
+ clock_.get(), absl::WrapUnique(EventWrapper::Create()), field_trials_));
+ jitter_buffer_->Start();
+ seq_num_ = 1234;
+ timestamp_ = 0;
+ size_ = 1400;
+ // Data vector - 0, 0, 0x80, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0x80, 3....
+ data_[0] = 0;
+ data_[1] = 0;
+ data_[2] = 0x80;
+ int count = 3;
+ for (unsigned int i = 3; i < sizeof(data_) - 3; ++i) {
+ data_[i] = count;
+ count++;
+ if (count == 10) {
+ data_[i + 1] = 0;
+ data_[i + 2] = 0;
+ data_[i + 3] = 0x80;
+ count = 3;
+ i += 3;
+ }
+ }
+ RTPHeader rtp_header;
+ RTPVideoHeader video_header;
+ rtp_header.sequenceNumber = seq_num_;
+ rtp_header.timestamp = timestamp_;
+ rtp_header.markerBit = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.is_first_packet_in_frame = true;
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header,
+ /*ntp_time_ms=*/0, clock_->CurrentTime()));
+ }
+
+ VCMEncodedFrame* DecodeCompleteFrame() {
+ VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
+ if (!found_frame)
+ return nullptr;
+ return jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
+ }
+
+ void CheckOutFrame(VCMEncodedFrame* frame_out,
+ unsigned int size,
+ bool startCode) {
+ ASSERT_TRUE(frame_out);
+
+ const uint8_t* outData = frame_out->data();
+ unsigned int i = 0;
+
+ if (startCode) {
+ EXPECT_EQ(0, outData[0]);
+ EXPECT_EQ(0, outData[1]);
+ EXPECT_EQ(0, outData[2]);
+ EXPECT_EQ(1, outData[3]);
+ i += 4;
+ }
+
+ EXPECT_EQ(size, frame_out->size());
+ int count = 3;
+ for (; i < size; i++) {
+ if (outData[i] == 0 && outData[i + 1] == 0 && outData[i + 2] == 0x80) {
+ i += 2;
+ } else if (startCode && outData[i] == 0 && outData[i + 1] == 0) {
+ EXPECT_EQ(0, outData[0]);
+ EXPECT_EQ(0, outData[1]);
+ EXPECT_EQ(0, outData[2]);
+ EXPECT_EQ(1, outData[3]);
+ i += 3;
+ } else {
+ EXPECT_EQ(count, outData[i]);
+ count++;
+ if (count == 10) {
+ count = 3;
+ }
+ }
+ }
+ }
+
+ uint16_t seq_num_;
+ uint32_t timestamp_;
+ int size_;
+ uint8_t data_[1500];
+ test::ScopedKeyValueConfig field_trials_;
+ std::unique_ptr<VCMPacket> packet_;
+ std::unique_ptr<SimulatedClock> clock_;
+ std::unique_ptr<VCMJitterBuffer> jitter_buffer_;
+};
+
+class TestRunningJitterBuffer : public ::testing::Test {
+ protected:
+ enum { kDataBufferSize = 10 };
+
+ virtual void SetUp() {
+ clock_.reset(new SimulatedClock(0));
+ max_nack_list_size_ = 150;
+ oldest_packet_to_nack_ = 250;
+ jitter_buffer_ = new VCMJitterBuffer(
+ clock_.get(), absl::WrapUnique(EventWrapper::Create()), field_trials_);
+ stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
+ jitter_buffer_->Start();
+ jitter_buffer_->SetNackSettings(max_nack_list_size_, oldest_packet_to_nack_,
+ 0);
+ memset(data_buffer_, 0, kDataBufferSize);
+ }
+
+ virtual void TearDown() {
+ jitter_buffer_->Stop();
+ delete stream_generator_;
+ delete jitter_buffer_;
+ }
+
+ VCMFrameBufferEnum InsertPacketAndPop(int index) {
+ VCMPacket packet;
+ packet.dataPtr = data_buffer_;
+ bool packet_available = stream_generator_->PopPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ bool retransmitted = false;
+ return jitter_buffer_->InsertPacket(packet, &retransmitted);
+ }
+
+ VCMFrameBufferEnum InsertPacket(int index) {
+ VCMPacket packet;
+ packet.dataPtr = data_buffer_;
+ bool packet_available = stream_generator_->GetPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ bool retransmitted = false;
+ return jitter_buffer_->InsertPacket(packet, &retransmitted);
+ }
+
+ VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) {
+ stream_generator_->GenerateFrame(
+ frame_type, (frame_type != VideoFrameType::kEmptyFrame) ? 1 : 0,
+ (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
+ clock_->TimeInMilliseconds());
+ VCMFrameBufferEnum ret = InsertPacketAndPop(0);
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ return ret;
+ }
+
+ VCMFrameBufferEnum InsertFrames(int num_frames, VideoFrameType frame_type) {
+ VCMFrameBufferEnum ret_for_all = kNoError;
+ for (int i = 0; i < num_frames; ++i) {
+ VCMFrameBufferEnum ret = InsertFrame(frame_type);
+ if (ret < kNoError) {
+ ret_for_all = ret;
+ } else if (ret_for_all >= kNoError) {
+ ret_for_all = ret;
+ }
+ }
+ return ret_for_all;
+ }
+
+ void DropFrame(int num_packets) {
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta,
+ num_packets, 0,
+ clock_->TimeInMilliseconds());
+ for (int i = 0; i < num_packets; ++i)
+ stream_generator_->DropLastPacket();
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ }
+
+ bool DecodeCompleteFrame() {
+ VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(0);
+ if (!found_frame)
+ return false;
+
+ VCMEncodedFrame* frame =
+ jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
+ bool ret = (frame != NULL);
+ jitter_buffer_->ReleaseFrame(frame);
+ return ret;
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ VCMJitterBuffer* jitter_buffer_;
+ StreamGenerator* stream_generator_;
+ std::unique_ptr<SimulatedClock> clock_;
+ size_t max_nack_list_size_;
+ int oldest_packet_to_nack_;
+ uint8_t data_buffer_[kDataBufferSize];
+};
+
+class TestJitterBufferNack : public TestRunningJitterBuffer {
+ protected:
+ TestJitterBufferNack() {}
+ virtual void SetUp() { TestRunningJitterBuffer::SetUp(); }
+
+ virtual void TearDown() { TestRunningJitterBuffer::TearDown(); }
+};
+
+TEST_F(TestBasicJitterBuffer, StopRunning) {
+ jitter_buffer_->Stop();
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+ jitter_buffer_->Start();
+
+ // No packets inserted.
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+}
+
+TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
+ // Always start with a complete key frame when not allowing errors.
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->timestamp += 123 * 90;
+
+ // Insert the packet to the jitter buffer and get a frame.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ ++seq_num_;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert last packet.
+ ++seq_num_;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
+ // Always start with a complete key frame.
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_FALSE(frame_out == NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->markerBit = false;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->timestamp += 33 * 90;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ packet_->video_header.is_first_packet_in_frame = false;
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+
+ // Insert a packet into a frame.
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert the last packet.
+ ++seq_num_;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
+ // Insert the "first" packet last.
+ seq_num_ += 100;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 packets.
+ int loop = 0;
+ do {
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert the last packet.
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ // check that we fail to get frame since seqnum is not continuous
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_ -= 3;
+ timestamp_ -= 33 * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ // It should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+
+ // Send in an initial good packet/frame (Frame A) to start things off.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Now send in a complete delta frame (Frame C), but with a sequence number
+ // gap. No pic index either, so no temporal scalability cheating :)
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ // Leave a gap of 2 sequence numbers and two frames.
+ packet_->seqNum = seq_num_ + 3;
+ packet_->timestamp = timestamp_ + (66 * 90);
+ // Still isFirst = marker = true.
+ // Session should be complete (frame is complete), but there's nothing to
+ // decode yet.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Now send in a complete delta frame (Frame B) that is continuous from A, but
+ // doesn't fill the full gap to C. The rest of the gap is going to be padding.
+ packet_->seqNum = seq_num_ + 1;
+ packet_->timestamp = timestamp_ + (33 * 90);
+ // Still isFirst = marker = true.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // But Frame C isn't continuous yet.
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Add in the padding. These are empty packets (data length is 0) with no
+ // marker bit and matching the timestamp of Frame B.
+ RTPHeader rtp_header;
+ RTPVideoHeader video_header;
+ rtp_header.sequenceNumber = seq_num_ + 2;
+ rtp_header.timestamp = timestamp_ + (33 * 90);
+ rtp_header.markerBit = false;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ VCMPacket empty_packet(data_, 0, rtp_header, video_header,
+ /*ntp_time_ms=*/0, clock_->CurrentTime());
+ EXPECT_EQ(kOldPacket,
+ jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
+ empty_packet.seqNum += 1;
+ EXPECT_EQ(kOldPacket,
+ jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
+
+ // But now Frame C should be ready!
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(0, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+ EXPECT_EQ(1, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ // Insert a packet into a frame.
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_EQ(2, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+
+ seq_num_++;
+ packet_->seqNum = seq_num_;
+ packet_->markerBit = true;
+ packet_->video_header.is_first_packet_in_frame = false;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, 2 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(3, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(0, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ bool retransmitted = false;
+ // Insert first complete frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Insert 3 delta frames.
+ for (uint16_t i = 1; i <= 3; ++i) {
+ packet_->seqNum = seq_num_ + i;
+ packet_->timestamp = timestamp_ + (i * 33) * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+ }
+
+ // Retransmit second delta frame.
+ packet_->seqNum = seq_num_ + 2;
+ packet_->timestamp = timestamp_ + 66 * 90;
+
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ EXPECT_EQ(5, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+
+ // Should be able to decode 3 delta frames, key frame already decoded.
+ for (size_t i = 0; i < 3; ++i) {
+ frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+ }
+}
+
+TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
+ // Verify that JB skips forward to next base layer frame.
+ // -------------------------------------------------
+ // | 65485 | 65486 | 65487 | 65488 | 65489 | ...
+ // | pid:5 | pid:6 | pid:7 | pid:8 | pid:9 | ...
+ // | tid:0 | tid:2 | tid:1 | tid:2 | tid:0 | ...
+ // | ss | x | x | x | |
+ // -------------------------------------------------
+ // |<----------tl0idx:200--------->|<---tl0idx:201---
+
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ auto& vp9_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+
+ bool re = false;
+ packet_->video_header.codec = kVideoCodecVP9;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ vp9_header.flexible_mode = false;
+ vp9_header.spatial_idx = 0;
+ vp9_header.beginning_of_frame = true;
+ vp9_header.end_of_frame = true;
+ vp9_header.temporal_up_switch = false;
+
+ packet_->seqNum = 65485;
+ packet_->timestamp = 1000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_header.picture_id = 5;
+ vp9_header.tl0_pic_idx = 200;
+ vp9_header.temporal_idx = 0;
+ vp9_header.ss_data_available = true;
+ vp9_header.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert next temporal layer 0.
+ packet_->seqNum = 65489;
+ packet_->timestamp = 13000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.picture_id = 9;
+ vp9_header.tl0_pic_idx = 201;
+ vp9_header.temporal_idx = 0;
+ vp9_header.ss_data_available = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(1000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(13000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
+ // Verify that frames are updated with SS data when SS packet is reordered.
+ // --------------------------------
+ // | 65486 | 65487 | 65485 |...
+ // | pid:6 | pid:7 | pid:5 |...
+ // | tid:2 | tid:1 | tid:0 |...
+ // | | | ss |
+ // --------------------------------
+ // |<--------tl0idx:200--------->|
+
+ auto& vp9_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+
+ bool re = false;
+ packet_->video_header.codec = kVideoCodecVP9;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ vp9_header.flexible_mode = false;
+ vp9_header.spatial_idx = 0;
+ vp9_header.beginning_of_frame = true;
+ vp9_header.end_of_frame = true;
+ vp9_header.tl0_pic_idx = 200;
+
+ packet_->seqNum = 65486;
+ packet_->timestamp = 6000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.picture_id = 6;
+ vp9_header.temporal_idx = 2;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->seqNum = 65487;
+ packet_->timestamp = 9000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.picture_id = 7;
+ vp9_header.temporal_idx = 1;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert first frame with SS data.
+ packet_->seqNum = 65485;
+ packet_->timestamp = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.width = 352;
+ packet_->video_header.height = 288;
+ vp9_header.picture_id = 5;
+ vp9_header.temporal_idx = 0;
+ vp9_header.temporal_up_switch = false;
+ vp9_header.ss_data_available = true;
+ vp9_header.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_FALSE(
+ frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(6000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(9000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
+ // Verify that frames are updated with SS data when SS packet is reordered.
+ // -----------------------------------------
+ // | 65486 | 65487 | 65485 | 65484 |...
+ // | pid:6 | pid:6 | pid:5 | pid:5 |...
+ // | tid:1 | tid:1 | tid:0 | tid:0 |...
+ // | sid:0 | sid:1 | sid:1 | sid:0 |...
+ // | t:6000 | t:6000 | t:3000 | t:3000 |
+ // | | | | ss |
+ // -----------------------------------------
+ // |<-----------tl0idx:200------------>|
+
+ auto& vp9_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+
+ bool re = false;
+ packet_->video_header.codec = kVideoCodecVP9;
+ vp9_header.flexible_mode = false;
+ vp9_header.beginning_of_frame = true;
+ vp9_header.end_of_frame = true;
+ vp9_header.tl0_pic_idx = 200;
+
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = 65486;
+ packet_->timestamp = 6000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.spatial_idx = 0;
+ vp9_header.picture_id = 6;
+ vp9_header.temporal_idx = 1;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = 65487;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.spatial_idx = 1;
+ vp9_header.picture_id = 6;
+ vp9_header.temporal_idx = 1;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = 65485;
+ packet_->timestamp = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_header.spatial_idx = 1;
+ vp9_header.picture_id = 5;
+ vp9_header.temporal_idx = 0;
+ vp9_header.temporal_up_switch = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert first frame with SS data.
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = 65484;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.width = 352;
+ packet_->video_header.height = 288;
+ vp9_header.spatial_idx = 0;
+ vp9_header.picture_id = 5;
+ vp9_header.temporal_idx = 0;
+ vp9_header.temporal_up_switch = false;
+ vp9_header.ss_data_available = true;
+ vp9_header.gof.SetGofInfoVP9(
+ kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_FALSE(
+ frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(6000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->insertStartCode = true;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
+ auto& h264_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ packet_->timestamp = timestamp_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kIdr;
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus[0].sps_id = -1;
+ h264_header.nalus[0].pps_id = 0;
+ h264_header.nalus_length = 1;
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ // Not decodable since sps and pps are missing.
+ EXPECT_EQ(nullptr, DecodeCompleteFrame());
+
+ timestamp_ += 3000;
+ packet_->timestamp = timestamp_;
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kStapA;
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[0].sps_id = 0;
+ h264_header.nalus[0].pps_id = -1;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[1].sps_id = 0;
+ h264_header.nalus[1].pps_id = 0;
+ h264_header.nalus_length = 2;
+ // Not complete since the marker bit hasn't been received.
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kIdr;
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus[0].sps_id = -1;
+ h264_header.nalus[0].pps_id = 0;
+ h264_header.nalus_length = 1;
+ // Complete and decodable since the pps and sps are received in the first
+ // packet of this frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ ASSERT_NE(nullptr, frame_out);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ timestamp_ += 3000;
+ packet_->timestamp = timestamp_;
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kSlice;
+ h264_header.nalus[0].type = H264::NaluType::kSlice;
+ h264_header.nalus[0].sps_id = -1;
+ h264_header.nalus[0].pps_id = 0;
+ h264_header.nalus_length = 1;
+ // Complete and decodable since sps, pps and key frame has been received.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ ASSERT_NE(nullptr, frame_out);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
+ seq_num_ = 0xfff0;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ int loop = 0;
+ do {
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ loop++;
+ } while (loop < 98);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
+ // Insert "first" packet last seqnum.
+ seq_num_ = 10;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ loop++;
+ } while (loop < 98);
+
+ // Insert last packet.
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 3000 t = 2000
+ seq_num_ = 2;
+ timestamp_ = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->timestamp = timestamp_;
+ packet_->seqNum = seq_num_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000u, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_--;
+ timestamp_ = 2000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+}
+
+TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 3000 t = 0xffffff00
+
+ seq_num_ = 2;
+ timestamp_ = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(timestamp_, frame_out->Timestamp());
+
+ CheckOutFrame(frame_out, size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_--;
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ // This timestamp is old.
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+}
+
+TEST_F(TestBasicJitterBuffer, TimestampWrap) {
+ // --------------- ---------------
+ // | 1 | 2 | | 3 | 4 |
+ // --------------- ---------------
+ // t = 0xffffff00 t = 33*90
+
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_++;
+ timestamp_ += 33 * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
+ // ------- -------
+ // | 1 | | 2 |
+ // ------- -------
+ // t = 0xffffff00 t = 2700
+
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ // Insert first frame (session will be complete).
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ // Insert next frame.
+ seq_num_++;
+ timestamp_ = 2700;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(0xffffff00, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
+ EXPECT_EQ(2700u, frame_out2->Timestamp());
+ CheckOutFrame(frame_out2, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out2);
+}
+
+TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 2700 t = 0xffffff00
+
+ seq_num_ = 2;
+ timestamp_ = 2700;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ // Insert second frame
+ seq_num_--;
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(0xffffff00, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
+ EXPECT_EQ(2700u, frame_out2->Timestamp());
+ CheckOutFrame(frame_out2, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out2);
+}
+
+TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
+ int loop = 0;
+ bool firstPacket = true;
+ bool retransmitted = false;
+ // Insert kMaxPacketsInJitterBuffer into frame.
+ do {
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ if (firstPacket) {
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ firstPacket = false;
+ } else {
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ }
+
+ loop++;
+ } while (loop < kMaxPacketsInSession);
+
+ // Max number of packets inserted.
+ // Insert one more packet.
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ // Insert the packet -> frame recycled.
+ EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+}
+
+TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
+ // TEST fill JB with more than max number of frame (50 delta frames +
+ // 51 key frames) with wrap in seq_num_
+ //
+ // --------------------------------------------------------------
+ // | 65485 | 65486 | 65487 | .... | 65535 | 0 | 1 | 2 | .....| 50 |
+ // --------------------------------------------------------------
+ // |<-----------delta frames------------->|<------key frames----->|
+
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+
+ int loop = 0;
+ seq_num_ = 65485;
+ uint32_t first_key_frame_timestamp = 0;
+ bool retransmitted = false;
+ // Insert MAX_NUMBER_OF_FRAMES frames.
+ do {
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ if (loop == 50) {
+ first_key_frame_timestamp = packet_->timestamp;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ }
+
+ // Insert frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ loop++;
+ } while (loop < kMaxNumberOfFrames);
+
+ // Max number of frames inserted.
+
+ // Insert one more frame.
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ // Now, no free frame - frames will be recycled until first key frame.
+ EXPECT_EQ(kFlushIndicator,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
+ seq_num_ = 3;
+ // Insert one empty packet per frame, should never return the last timestamp
+ // inserted. Only return empty frames in the presence of subsequent frames.
+ int maxSize = 1000;
+ bool retransmitted = false;
+ for (int i = 0; i < maxSize + 10; i++) {
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->video_header.frame_type = VideoFrameType::kEmptyFrame;
+
+ EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ }
+}
+
+TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
+ // Test that a we cannot get incomplete frames from the JB if we haven't
+ // received the marker bit, unless we have received a packet from a later
+ // timestamp.
+ // Start with a complete key frame - insert and decode.
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ bool retransmitted = false;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ packet_->seqNum += 2;
+ packet_->timestamp += 33 * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ packet_->seqNum += 2;
+ packet_->timestamp += 33 * 90;
+ packet_->video_header.is_first_packet_in_frame = true;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+}
+
+TEST_F(TestRunningJitterBuffer, Full) {
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ DropFrame(1);
+ // Fill the jitter buffer.
+ EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ // Make sure we can't decode these frames.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ // This frame will make the jitter buffer recycle frames until a key frame.
+ // Since none is found it will have to wait until the next key frame before
+ // decoding.
+ EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+}
+
+TEST_F(TestRunningJitterBuffer, EmptyPackets) {
+ // Make sure a frame can get complete even though empty packets are missing.
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 3,
+ clock_->TimeInMilliseconds());
+ bool request_key_frame = false;
+ // Insert empty packet.
+ EXPECT_EQ(kNoError, InsertPacketAndPop(4));
+ EXPECT_FALSE(request_key_frame);
+ // Insert 3 media packets.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ // Insert empty packet.
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestRunningJitterBuffer, SkipToKeyFrame) {
+ // Insert delta frames.
+ EXPECT_GE(InsertFrames(5, VideoFrameType::kVideoFrameDelta), kNoError);
+ // Can't decode without a key frame.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ // Skip to the next key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestRunningJitterBuffer, DontSkipToKeyFrameIfDecodable) {
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ const int kNumDeltaFrames = 5;
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ for (int i = 0; i < kNumDeltaFrames + 1; ++i) {
+ EXPECT_TRUE(DecodeCompleteFrame());
+ }
+}
+
+TEST_F(TestRunningJitterBuffer, KeyDeltaKeyDelta) {
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ const int kNumDeltaFrames = 5;
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ for (int i = 0; i < 2 * (kNumDeltaFrames + 1); ++i) {
+ EXPECT_TRUE(DecodeCompleteFrame());
+ }
+}
+
+TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_TRUE(DecodeCompleteFrame());
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, EmptyPackets) {
+ // Make sure empty packets doesn't clog the jitter buffer.
+ EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kEmptyFrame),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackTooOldPackets) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Drop one frame and insert `kNackHistoryLength` to trigger NACKing a too
+ // old packet.
+ DropFrame(1);
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
+ VideoFrameType::kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // No key frame will be requested since the jitter buffer is empty.
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(0u, nack_list.size());
+
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
+ // Waiting for a key frame.
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ // The next complete continuous frame isn't a key frame, but we're waiting
+ // for one.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ // Skipping ahead to the key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackLargeJitterBuffer) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_GE(
+ InsertFrames(oldest_packet_to_nack_, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // Verify that the jitter buffer does not request a key frame.
+ EXPECT_FALSE(request_key_frame);
+ // Verify that no packets are NACKed.
+ EXPECT_EQ(0u, nack_list.size());
+ // Verify that we can decode the next frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackListFull) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Generate and drop `kNackHistoryLength` packets to fill the NACK list.
+ DropFrame(max_nack_list_size_ + 1);
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ bool request_key_frame = false;
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // The jitter buffer is empty, so we won't request key frames until we get a
+ // packet.
+ EXPECT_FALSE(request_key_frame);
+
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
+ // Now we have a packet in the jitter buffer, a key frame will be requested
+ // since it's not a key frame.
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // The jitter buffer is empty, so we won't request key frames until we get a
+ // packet.
+ EXPECT_TRUE(request_key_frame);
+ // The next complete continuous frame isn't a key frame, but we're waiting
+ // for one.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ // Skipping ahead to the key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NoNackListReturnedBeforeFirstDecode) {
+ DropFrame(10);
+ // Insert a frame and try to generate a NACK list. Shouldn't get one.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // No list generated, and a key frame request is signaled.
+ EXPECT_EQ(0u, nack_list.size());
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
+ clock_->TimeInMilliseconds());
+ stream_generator_->NextPacket(NULL); // Drop packet.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_TRUE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(1u, nack_list.size());
+}
+
+TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds());
+ VCMPacket packet;
+ stream_generator_->PopPacket(&packet, 0);
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_FALSE(retransmitted);
+ // Drop second packet.
+ stream_generator_->PopPacket(&packet, 1);
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_FALSE(retransmitted);
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ uint16_t seq_num;
+ EXPECT_EQ(1u, nack_list.size());
+ seq_num = nack_list[0];
+ stream_generator_->PopPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, seq_num);
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_TRUE(retransmitted);
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ uint16_t seq_num;
+ ASSERT_EQ(1u, nack_list.size());
+ seq_num = nack_list[0];
+ VCMPacket packet;
+ stream_generator_->GetPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, seq_num);
+}
+
+TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrameSecondInQueue) {
+ VCMPacket packet;
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ // First frame is delta.
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 3, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet in frame.
+ ASSERT_TRUE(stream_generator_->PopPacket(&packet, 0));
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Second frame is key.
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds() + 10);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet in frame.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ uint16_t seq_num;
+ ASSERT_EQ(1u, nack_list.size());
+ seq_num = nack_list[0];
+ stream_generator_->GetPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, seq_num);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperation) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // ----------------------------------------------------------------
+ // | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
+ // ----------------------------------------------------------------
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 100, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Verify that the frame is incomplete.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ while (stream_generator_->PacketsRemaining() > 1) {
+ if (stream_generator_->NextSequenceNumber() % 10 != 0) {
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ }
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_EQ(0, stream_generator_->PacketsRemaining());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool request_key_frame = false;
+
+ // Verify the NACK list.
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ const size_t kExpectedNackSize = 9;
+ ASSERT_EQ(kExpectedNackSize, nack_list.size());
+ for (size_t i = 0; i < nack_list.size(); ++i)
+ EXPECT_EQ((1 + i) * 10, nack_list[i]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperationWrap) {
+ bool request_key_frame = false;
+ // ------- ------------------------------------------------------------
+ // | 65532 | | 65533 | 65534 | 65535 | x | 1 | .. | 9 | x | 11 |.....| 96 |
+ // ------- ------------------------------------------------------------
+ stream_generator_->Init(65532, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 100, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ while (stream_generator_->PacketsRemaining() > 1) {
+ if (stream_generator_->NextSequenceNumber() % 10 != 0) {
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ }
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(0, stream_generator_->PacketsRemaining());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ // Verify the NACK list.
+ const size_t kExpectedNackSize = 10;
+ ASSERT_EQ(kExpectedNackSize, nack_list.size());
+ for (size_t i = 0; i < nack_list.size(); ++i)
+ EXPECT_EQ(i * 10, nack_list[i]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
+ bool request_key_frame = false;
+ // -----------------------------------
+ // | 65532 | 65533 | 65534 | x | 0 | 1 |
+ // -----------------------------------
+ stream_generator_->Init(65532, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ for (int i = 0; i < 5; ++i) {
+ if (stream_generator_->NextSequenceNumber() != 65535) {
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ }
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ // Verify the NACK list.
+ ASSERT_EQ(1u, nack_list.size());
+ EXPECT_EQ(65535, nack_list[0]);
+}
+
+TEST_F(TestJitterBufferNack, ResetByFutureKeyFrameDoesntError) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+
+ // Far-into-the-future video frame, could be caused by resetting the encoder
+ // or otherwise restarting. This should not fail when error when the packet is
+ // a keyframe, even if all of the nack list needs to be flushed.
+ stream_generator_->Init(10000, clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+
+ // Stream should be decodable from this point.
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ InsertFrame(VideoFrameType::kVideoFrameDelta);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc
new file mode 100644
index 0000000000..3377ab5a76
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/loss_notification_controller.h"
+
+#include <stdint.h>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+namespace {
+// Keep a container's size no higher than `max_allowed_size`, by paring its size
+// down to `target_size` whenever it has more than `max_allowed_size` elements.
+template <typename Container>
+void PareDown(Container* container,
+ size_t max_allowed_size,
+ size_t target_size) {
+ if (container->size() > max_allowed_size) {
+ const size_t entries_to_delete = container->size() - target_size;
+ auto erase_to = container->begin();
+ std::advance(erase_to, entries_to_delete);
+ container->erase(container->begin(), erase_to);
+ RTC_DCHECK_EQ(container->size(), target_size);
+ }
+}
+} // namespace
+
+LossNotificationController::LossNotificationController(
+ KeyFrameRequestSender* key_frame_request_sender,
+ LossNotificationSender* loss_notification_sender)
+ : key_frame_request_sender_(key_frame_request_sender),
+ loss_notification_sender_(loss_notification_sender),
+ current_frame_potentially_decodable_(true) {
+ RTC_DCHECK(key_frame_request_sender_);
+ RTC_DCHECK(loss_notification_sender_);
+}
+
+LossNotificationController::~LossNotificationController() = default;
+
+void LossNotificationController::OnReceivedPacket(
+ uint16_t rtp_seq_num,
+ const LossNotificationController::FrameDetails* frame) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // Ignore repeated or reordered packets.
+ // TODO(bugs.webrtc.org/10336): Handle packet reordering.
+ if (last_received_seq_num_ &&
+ !AheadOf(rtp_seq_num, *last_received_seq_num_)) {
+ return;
+ }
+
+ DiscardOldInformation(); // Prevent memory overconsumption.
+
+ const bool seq_num_gap =
+ last_received_seq_num_ &&
+ rtp_seq_num != static_cast<uint16_t>(*last_received_seq_num_ + 1u);
+
+ last_received_seq_num_ = rtp_seq_num;
+
+ // `frame` is not nullptr iff the packet is the first packet in the frame.
+ if (frame != nullptr) {
+ // Ignore repeated or reordered frames.
+ // TODO(bugs.webrtc.org/10336): Handle frame reordering.
+ if (last_received_frame_id_.has_value() &&
+ frame->frame_id <= last_received_frame_id_.value()) {
+ RTC_LOG(LS_WARNING) << "Repeated or reordered frame ID ("
+ << frame->frame_id << ").";
+ return;
+ }
+
+ last_received_frame_id_ = frame->frame_id;
+
+ if (frame->is_keyframe) {
+ // Subsequent frames may not rely on frames before the key frame.
+ // Note that upon receiving a key frame, we do not issue a loss
+ // notification on RTP sequence number gap, unless that gap spanned
+ // the key frame itself. This is because any loss which occurred before
+ // the key frame is no longer relevant.
+ decodable_frame_ids_.clear();
+ current_frame_potentially_decodable_ = true;
+ } else {
+ const bool all_dependencies_decodable =
+ AllDependenciesDecodable(frame->frame_dependencies);
+ current_frame_potentially_decodable_ = all_dependencies_decodable;
+ if (seq_num_gap || !current_frame_potentially_decodable_) {
+ HandleLoss(rtp_seq_num, current_frame_potentially_decodable_);
+ }
+ }
+ } else if (seq_num_gap || !current_frame_potentially_decodable_) {
+ current_frame_potentially_decodable_ = false;
+ // We allow sending multiple loss notifications for a single frame
+ // even if only one of its packets is lost. We do this because the bigger
+ // the frame, the more likely it is to be non-discardable, and therefore
+ // the more robust we wish to be to loss of the feedback messages.
+ HandleLoss(rtp_seq_num, false);
+ }
+}
+
+void LossNotificationController::OnAssembledFrame(
+ uint16_t first_seq_num,
+ int64_t frame_id,
+ bool discardable,
+ rtc::ArrayView<const int64_t> frame_dependencies) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ DiscardOldInformation(); // Prevent memory overconsumption.
+
+ if (discardable) {
+ return;
+ }
+
+ if (!AllDependenciesDecodable(frame_dependencies)) {
+ return;
+ }
+
+ last_decodable_non_discardable_.emplace(first_seq_num);
+ const auto it = decodable_frame_ids_.insert(frame_id);
+ RTC_DCHECK(it.second);
+}
+
+void LossNotificationController::DiscardOldInformation() {
+ constexpr size_t kExpectedKeyFrameIntervalFrames = 3000;
+ constexpr size_t kMaxSize = 2 * kExpectedKeyFrameIntervalFrames;
+ constexpr size_t kTargetSize = kExpectedKeyFrameIntervalFrames;
+ PareDown(&decodable_frame_ids_, kMaxSize, kTargetSize);
+}
+
+bool LossNotificationController::AllDependenciesDecodable(
+ rtc::ArrayView<const int64_t> frame_dependencies) const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // Due to packet reordering, frame buffering and asynchronous decoders, it is
+ // infeasible to make reliable conclusions on the decodability of a frame
+ // immediately when it arrives. We use the following assumptions:
+ // * Intra frames are decodable.
+ // * Inter frames are decodable if all of their references were decodable.
+ // One possibility that is ignored, is that the packet may be corrupt.
+ for (int64_t ref_frame_id : frame_dependencies) {
+ const auto ref_frame_it = decodable_frame_ids_.find(ref_frame_id);
+ if (ref_frame_it == decodable_frame_ids_.end()) {
+ // Reference frame not decodable.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void LossNotificationController::HandleLoss(uint16_t last_received_seq_num,
+ bool decodability_flag) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ if (last_decodable_non_discardable_) {
+ RTC_DCHECK(AheadOf(last_received_seq_num,
+ last_decodable_non_discardable_->first_seq_num));
+ loss_notification_sender_->SendLossNotification(
+ last_decodable_non_discardable_->first_seq_num, last_received_seq_num,
+ decodability_flag, /*buffering_allowed=*/true);
+ } else {
+ key_frame_request_sender_->RequestKeyFrame();
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/loss_notification_controller.h b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.h
new file mode 100644
index 0000000000..ecba41267b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_LOSS_NOTIFICATION_CONTROLLER_H_
+#define MODULES_VIDEO_CODING_LOSS_NOTIFICATION_CONTROLLER_H_
+
+#include <stdint.h>
+
+#include <set>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/sequence_checker.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class LossNotificationController {
+ public:
+ struct FrameDetails {
+ bool is_keyframe;
+ int64_t frame_id;
+ rtc::ArrayView<const int64_t> frame_dependencies;
+ };
+
+ LossNotificationController(KeyFrameRequestSender* key_frame_request_sender,
+ LossNotificationSender* loss_notification_sender);
+ ~LossNotificationController();
+
+ // An RTP packet was received from the network.
+ // `frame` is non-null iff the packet is the first packet in the frame.
+ void OnReceivedPacket(uint16_t rtp_seq_num, const FrameDetails* frame);
+
+ // A frame was assembled from packets previously received.
+ // (Should be called even if the frame was composed of a single packet.)
+ void OnAssembledFrame(uint16_t first_seq_num,
+ int64_t frame_id,
+ bool discardable,
+ rtc::ArrayView<const int64_t> frame_dependencies);
+
+ private:
+ void DiscardOldInformation();
+
+ bool AllDependenciesDecodable(
+ rtc::ArrayView<const int64_t> frame_dependencies) const;
+
+ // When the loss of a packet or the non-decodability of a frame is detected,
+ // produces a key frame request or a loss notification.
+ // 1. `last_received_seq_num` is the last received sequence number.
+ // 2. `decodability_flag` refers to the frame associated with the last packet.
+ // It is set to `true` if and only if all of that frame's dependencies are
+ // known to be decodable, and the frame itself is not yet known to be
+ // unassemblable (i.e. no earlier parts of it were lost).
+ // Clarifications:
+ // a. In a multi-packet frame, the first packet reveals the frame's
+ // dependencies, but it is not yet known whether all parts of the
+ // current frame will be received.
+ // b. In a multi-packet frame, if the first packet is missed, the
+ // dependencies are unknown, but it is known that the frame itself
+ // is unassemblable.
+ void HandleLoss(uint16_t last_received_seq_num, bool decodability_flag);
+
+ KeyFrameRequestSender* const key_frame_request_sender_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ LossNotificationSender* const loss_notification_sender_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Tracked to avoid processing repeated frames (buggy/malicious remote).
+ absl::optional<int64_t> last_received_frame_id_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Tracked to avoid processing repeated packets.
+ absl::optional<uint16_t> last_received_seq_num_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Tracked in order to correctly report the potential-decodability of
+ // multi-packet frames.
+ bool current_frame_potentially_decodable_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Loss notifications contain the sequence number of the first packet of
+ // the last decodable-and-non-discardable frame. Since this is a bit of
+ // a mouthful, last_decodable_non_discardable_.first_seq_num is used,
+ // which hopefully is a bit easier for human beings to parse
+ // than `first_seq_num_of_last_decodable_non_discardable_`.
+ struct FrameInfo {
+ explicit FrameInfo(uint16_t first_seq_num) : first_seq_num(first_seq_num) {}
+ uint16_t first_seq_num;
+ };
+ absl::optional<FrameInfo> last_decodable_non_discardable_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Track which frames are decodable. Later frames are also decodable if
+ // all of their dependencies can be found in this container.
+ // (Naturally, later frames must also be assemblable to be decodable.)
+ std::set<int64_t> decodable_frame_ids_ RTC_GUARDED_BY(sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_LOSS_NOTIFICATION_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc b/third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc
new file mode 100644
index 0000000000..9c4e715b4f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/loss_notification_controller.h"
+
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// The information about an RTP packet that is relevant in these tests.
+struct Packet {
+ uint16_t seq_num;
+ bool first_in_frame;
+ bool is_keyframe;
+ int64_t frame_id;
+ std::vector<int64_t> frame_dependencies;
+};
+
+Packet CreatePacket(
+ bool first_in_frame,
+ bool last_in_frame,
+ uint16_t seq_num,
+ uint16_t frame_id,
+ bool is_key_frame,
+ std::vector<int64_t> ref_frame_ids = std::vector<int64_t>()) {
+ Packet packet;
+ packet.seq_num = seq_num;
+ packet.first_in_frame = first_in_frame;
+ if (first_in_frame) {
+ packet.is_keyframe = is_key_frame;
+ packet.frame_id = frame_id;
+ RTC_DCHECK(!is_key_frame || ref_frame_ids.empty());
+ packet.frame_dependencies = std::move(ref_frame_ids);
+ }
+ return packet;
+}
+
+class PacketStreamCreator final {
+ public:
+ PacketStreamCreator() : seq_num_(0), frame_id_(0), next_is_key_frame_(true) {}
+
+ Packet NextPacket() {
+ std::vector<int64_t> ref_frame_ids;
+ if (!next_is_key_frame_) {
+ ref_frame_ids.push_back(frame_id_ - 1);
+ }
+
+ Packet packet = CreatePacket(true, true, seq_num_++, frame_id_++,
+ next_is_key_frame_, ref_frame_ids);
+
+ next_is_key_frame_ = false;
+
+ return packet;
+ }
+
+ private:
+ uint16_t seq_num_;
+ int64_t frame_id_;
+ bool next_is_key_frame_;
+};
+} // namespace
+
+// Most of the logic for the tests is here. Subclasses allow parameterizing
+// the test, or adding some more specific logic.
+class LossNotificationControllerBaseTest : public ::testing::Test,
+ public KeyFrameRequestSender,
+ public LossNotificationSender {
+ protected:
+ LossNotificationControllerBaseTest()
+ : uut_(this, this), key_frame_requested_(false) {}
+
+ ~LossNotificationControllerBaseTest() override {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+ }
+
+ // KeyFrameRequestSender implementation.
+ void RequestKeyFrame() override {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+ key_frame_requested_ = true;
+ }
+
+ // LossNotificationSender implementation.
+ void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override {
+ EXPECT_TRUE(buffering_allowed); // (Flag useful elsewhere.)
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+ last_loss_notification_.emplace(last_decoded_seq_num, last_received_seq_num,
+ decodability_flag);
+ }
+
+ void OnReceivedPacket(const Packet& packet) {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+
+ if (packet.first_in_frame) {
+ previous_first_packet_in_frame_ = packet;
+ LossNotificationController::FrameDetails frame;
+ frame.is_keyframe = packet.is_keyframe;
+ frame.frame_id = packet.frame_id;
+ frame.frame_dependencies = packet.frame_dependencies;
+ uut_.OnReceivedPacket(packet.seq_num, &frame);
+ } else {
+ uut_.OnReceivedPacket(packet.seq_num, nullptr);
+ }
+ }
+
+ void OnAssembledFrame(uint16_t first_seq_num,
+ int64_t frame_id,
+ bool discardable) {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+
+ ASSERT_TRUE(previous_first_packet_in_frame_);
+ uut_.OnAssembledFrame(first_seq_num, frame_id, discardable,
+ previous_first_packet_in_frame_->frame_dependencies);
+ }
+
+ void ExpectKeyFrameRequest() {
+ EXPECT_EQ(LastLossNotification(), absl::nullopt);
+ EXPECT_TRUE(LastKeyFrameRequest());
+ }
+
+ void ExpectLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag) {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ const auto last_ln = LastLossNotification();
+ ASSERT_TRUE(last_ln);
+ const LossNotification expected_ln(
+ last_decoded_seq_num, last_received_seq_num, decodability_flag);
+ EXPECT_EQ(expected_ln, *last_ln)
+ << "Expected loss notification (" << expected_ln.ToString()
+ << ") != received loss notification (" << last_ln->ToString() + ")";
+ }
+
+ struct LossNotification {
+ LossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag)
+ : last_decoded_seq_num(last_decoded_seq_num),
+ last_received_seq_num(last_received_seq_num),
+ decodability_flag(decodability_flag) {}
+
+ LossNotification& operator=(const LossNotification& other) = default;
+
+ bool operator==(const LossNotification& other) const {
+ return last_decoded_seq_num == other.last_decoded_seq_num &&
+ last_received_seq_num == other.last_received_seq_num &&
+ decodability_flag == other.decodability_flag;
+ }
+
+ std::string ToString() const {
+ return std::to_string(last_decoded_seq_num) + ", " +
+ std::to_string(last_received_seq_num) + ", " +
+ std::to_string(decodability_flag);
+ }
+
+ uint16_t last_decoded_seq_num;
+ uint16_t last_received_seq_num;
+ bool decodability_flag;
+ };
+
+ bool LastKeyFrameRequest() {
+ const bool result = key_frame_requested_;
+ key_frame_requested_ = false;
+ return result;
+ }
+
+ absl::optional<LossNotification> LastLossNotification() {
+ const absl::optional<LossNotification> result = last_loss_notification_;
+ last_loss_notification_ = absl::nullopt;
+ return result;
+ }
+
+ LossNotificationController uut_; // Unit under test.
+
+ bool key_frame_requested_;
+
+ absl::optional<LossNotification> last_loss_notification_;
+
+ // First packet of last frame. (Note that if a test skips the first packet
+ // of a subsequent frame, OnAssembledFrame is not called, and so this is
+ // note read. Therefore, it's not a problem if it is not cleared when
+ // the frame changes.)
+ absl::optional<Packet> previous_first_packet_in_frame_;
+};
+
+class LossNotificationControllerTest
+ : public LossNotificationControllerBaseTest,
+ public ::testing::WithParamInterface<std::tuple<bool, bool, bool>> {
+ protected:
+ // Arbitrary parameterized values, to be used by the tests whenever they
+ // wish to either check some combinations, or wish to demonstrate that
+ // a particular arbitrary value is unimportant.
+ template <size_t N>
+ bool Bool() const {
+ return std::get<N>(GetParam());
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(_,
+ LossNotificationControllerTest,
+ ::testing::Combine(::testing::Bool(),
+ ::testing::Bool(),
+ ::testing::Bool()));
+
+// If the first frame, which is a key frame, is lost, then a new key frame
+// is requested.
+TEST_P(LossNotificationControllerTest,
+ PacketLossBeforeFirstFrameAssembledTriggersKeyFrameRequest) {
+ OnReceivedPacket(CreatePacket(true, false, 100, 0, true));
+ OnReceivedPacket(CreatePacket(Bool<0>(), Bool<1>(), 103, 1, false, {0}));
+ ExpectKeyFrameRequest();
+}
+
+// If packet loss occurs (but not of the first packet), then a loss notification
+// is issued.
+TEST_P(LossNotificationControllerTest,
+ PacketLossAfterFirstFrameAssembledTriggersLossNotification) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ const bool first = Bool<0>();
+ const bool last = Bool<1>();
+ OnReceivedPacket(CreatePacket(first, last, 103, 1, false, {0}));
+ const bool expected_decodability_flag = first;
+ ExpectLossNotification(100, 103, expected_decodability_flag);
+}
+
+// No key frame or loss notifications issued due to an innocuous wrap-around
+// of the sequence number.
+TEST_P(LossNotificationControllerTest, SeqNumWrapAround) {
+ uint16_t seq_num = std::numeric_limits<uint16_t>::max();
+ OnReceivedPacket(CreatePacket(true, true, seq_num, 0, true));
+ OnAssembledFrame(seq_num, 0, false);
+ const bool first = Bool<0>();
+ const bool last = Bool<1>();
+ OnReceivedPacket(CreatePacket(first, last, ++seq_num, 1, false, {0}));
+}
+
+TEST_F(LossNotificationControllerTest,
+ KeyFrameAfterPacketLossProducesNoLossNotifications) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 1, true));
+ OnAssembledFrame(100, 1, false);
+ OnReceivedPacket(CreatePacket(true, true, 108, 8, true));
+}
+
+TEST_P(LossNotificationControllerTest, LostReferenceProducesLossNotification) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ uint16_t last_decodable_non_discardable_seq_num = 100;
+
+ // RTP gap produces loss notification - not the focus of this test.
+ const bool first = Bool<0>();
+ const bool last = Bool<1>();
+ const bool discardable = Bool<2>();
+ const bool decodable = first; // Depends on assemblability.
+ OnReceivedPacket(CreatePacket(first, last, 107, 3, false, {0}));
+ ExpectLossNotification(100, 107, decodable);
+ OnAssembledFrame(107, 3, discardable);
+ if (!discardable) {
+ last_decodable_non_discardable_seq_num = 107;
+ }
+
+ // Test focus - a loss notification is produced because of the missing
+ // dependency (frame ID 2), despite the RTP sequence number being the
+ // next expected one.
+ OnReceivedPacket(CreatePacket(true, true, 108, 4, false, {2, 0}));
+ ExpectLossNotification(last_decodable_non_discardable_seq_num, 108, false);
+}
+
+// The difference between this test and the previous one, is that in this test,
+// although the reference frame was received, it was not decodable.
+TEST_P(LossNotificationControllerTest,
+ UndecodableReferenceProducesLossNotification) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ uint16_t last_decodable_non_discardable_seq_num = 100;
+
+ // RTP gap produces loss notification - not the focus of this test.
+ // Also, not decodable; this is important for later in the test.
+ OnReceivedPacket(CreatePacket(true, true, 107, 3, false, {2}));
+ ExpectLossNotification(100, 107, false);
+ const bool discardable = Bool<0>();
+ OnAssembledFrame(107, 3, discardable);
+
+ // Test focus - a loss notification is produced because of the undecodable
+ // dependency (frame ID 3, which depended on the missing frame ID 2).
+ OnReceivedPacket(CreatePacket(true, true, 108, 4, false, {3, 0}));
+ ExpectLossNotification(last_decodable_non_discardable_seq_num, 108, false);
+}
+
+TEST_P(LossNotificationControllerTest, RobustnessAgainstHighInitialRefFrameId) {
+ constexpr uint16_t max_uint16_t = std::numeric_limits<uint16_t>::max();
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ OnReceivedPacket(CreatePacket(true, true, 101, 1, false, {max_uint16_t}));
+ ExpectLossNotification(100, 101, false);
+ OnAssembledFrame(101, max_uint16_t, Bool<0>());
+}
+
+TEST_P(LossNotificationControllerTest, RepeatedPacketsAreIgnored) {
+ PacketStreamCreator packet_stream;
+
+ const auto key_frame_packet = packet_stream.NextPacket();
+ OnReceivedPacket(key_frame_packet);
+ OnAssembledFrame(key_frame_packet.seq_num, key_frame_packet.frame_id, false);
+
+ const bool gap = Bool<0>();
+
+ if (gap) {
+ // Lose one packet.
+ packet_stream.NextPacket();
+ }
+
+ auto repeated_packet = packet_stream.NextPacket();
+ OnReceivedPacket(repeated_packet);
+ if (gap) {
+ // Loss notification issued because of the gap. This is not the focus of
+ // the test.
+ ExpectLossNotification(key_frame_packet.seq_num, repeated_packet.seq_num,
+ false);
+ }
+ OnReceivedPacket(repeated_packet);
+}
+
+TEST_F(LossNotificationControllerTest,
+ RecognizesDependencyAcrossIntraFrameThatIsNotAKeyframe) {
+ int last_seq_num = 1;
+ auto receive = [&](bool is_key_frame, int64_t frame_id,
+ std::vector<int64_t> ref_frame_ids) {
+ ++last_seq_num;
+ OnReceivedPacket(CreatePacket(
+ /*first_in_frame=*/true, /*last_in_frame=*/true, last_seq_num, frame_id,
+ is_key_frame, std::move(ref_frame_ids)));
+ OnAssembledFrame(last_seq_num, frame_id, /*discardable=*/false);
+ };
+ // 11 -- 13
+ // | |
+ // 10 12
+ receive(/*is_key_frame=*/true, /*frame_id=*/10, /*ref_frame_ids=*/{});
+ receive(/*is_key_frame=*/false, /*frame_id=*/11, /*ref_frame_ids=*/{10});
+ receive(/*is_key_frame=*/false, /*frame_id=*/12, /*ref_frame_ids=*/{});
+ receive(/*is_key_frame=*/false, /*frame_id=*/13, /*ref_frame_ids=*/{11, 12});
+ EXPECT_FALSE(LastLossNotification());
+}
+
+class LossNotificationControllerTestDecodabilityFlag
+ : public LossNotificationControllerBaseTest {
+ protected:
+ LossNotificationControllerTestDecodabilityFlag()
+ : key_frame_seq_num_(100),
+ key_frame_frame_id_(0),
+ never_received_frame_id_(key_frame_frame_id_ + 1),
+ seq_num_(0),
+ frame_id_(0) {}
+
+ void ReceiveKeyFrame() {
+ RTC_DCHECK_NE(key_frame_frame_id_, never_received_frame_id_);
+ OnReceivedPacket(CreatePacket(true, true, key_frame_seq_num_,
+ key_frame_frame_id_, true));
+ OnAssembledFrame(key_frame_seq_num_, key_frame_frame_id_, false);
+ seq_num_ = key_frame_seq_num_;
+ frame_id_ = key_frame_frame_id_;
+ }
+
+ void ReceivePacket(bool first_packet_in_frame,
+ bool last_packet_in_frame,
+ const std::vector<int64_t>& ref_frame_ids) {
+ if (first_packet_in_frame) {
+ frame_id_ += 1;
+ }
+ RTC_DCHECK_NE(frame_id_, never_received_frame_id_);
+ constexpr bool is_key_frame = false;
+ OnReceivedPacket(CreatePacket(first_packet_in_frame, last_packet_in_frame,
+ ++seq_num_, frame_id_, is_key_frame,
+ ref_frame_ids));
+ }
+
+ void CreateGap() {
+ seq_num_ += 50;
+ frame_id_ += 10;
+ }
+
+ const uint16_t key_frame_seq_num_;
+ const uint16_t key_frame_frame_id_;
+
+ // The tests intentionally never receive this, and can therefore always
+ // use this as an unsatisfied dependency.
+ const int64_t never_received_frame_id_ = 123;
+
+ uint16_t seq_num_;
+ int64_t frame_id_;
+};
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ SinglePacketFrameWithDecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ SinglePacketFrameWithUndecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ FirstPacketOfMultiPacketFrameWithDecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ FirstPacketOfMultiPacketFrameWithUndecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithDecodableDependenciesIfFirstMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(false, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithUndecodableDependenciesIfFirstMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(false, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithDecodableDependenciesIfFirstReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers.
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Middle packet in multi-packet frame. No additional gap and the frame is
+ // still potentially decodable, so no additional loss indication.
+ ReceivePacket(false, false, ref_frame_ids);
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+}
+
+TEST_F(
+ LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithUndecodableDependenciesIfFirstReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers. The frame is also recognized
+ // as having non-decodable dependencies.
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Middle packet in multi-packet frame. No additional gap, but the frame is
+ // known to be non-decodable, so we keep issuing loss indications.
+ ReceivePacket(false, false, ref_frame_ids);
+ const bool expected_decodability_flag_middle = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_middle);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithDecodableDependenciesIfAllPrevMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(false, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithUndecodableDependenciesIfAllPrevMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(false, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithDecodableDependenciesIfAllPrevReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers.
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Last packet in multi-packet frame. No additional gap and the frame is
+ // still potentially decodable, so no additional loss indication.
+ ReceivePacket(false, true, ref_frame_ids);
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+}
+
+TEST_F(
+ LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithUndecodableDependenciesIfAllPrevReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers. The frame is also recognized
+ // as having non-decodable dependencies.
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Last packet in multi-packet frame. No additional gap, but the frame is
+ // known to be non-decodable, so we keep issuing loss indications.
+ ReceivePacket(false, true, ref_frame_ids);
+ const bool expected_decodability_flag_last = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_last);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/media_opt_util.cc b/third_party/libwebrtc/modules/video_coding/media_opt_util.cc
new file mode 100644
index 0000000000..7580c95fc7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/media_opt_util.cc
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/media_opt_util.h"
+
+#include <math.h>
+
+#include <algorithm>
+
+#include "modules/video_coding/fec_rate_table.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+// Max value of loss rates in off-line model
+static const int kPacketLossMax = 129;
+
+namespace media_optimization {
+
+VCMProtectionParameters::VCMProtectionParameters()
+ : rtt(0),
+ lossPr(0.0f),
+ bitRate(0.0f),
+ packetsPerFrame(0.0f),
+ packetsPerFrameKey(0.0f),
+ frameRate(0.0f),
+ keyFrameSize(0.0f),
+ fecRateDelta(0),
+ fecRateKey(0),
+ codecWidth(0),
+ codecHeight(0),
+ numLayers(1) {}
+
+VCMProtectionMethod::VCMProtectionMethod()
+ : _effectivePacketLoss(0),
+ _protectionFactorK(0),
+ _protectionFactorD(0),
+ _scaleProtKey(2.0f),
+ _maxPayloadSize(1460),
+ _corrFecCost(1.0),
+ _type(kNone) {}
+
+VCMProtectionMethod::~VCMProtectionMethod() {}
+
+enum VCMProtectionMethodEnum VCMProtectionMethod::Type() const {
+ return _type;
+}
+
+uint8_t VCMProtectionMethod::RequiredPacketLossER() {
+ return _effectivePacketLoss;
+}
+
+uint8_t VCMProtectionMethod::RequiredProtectionFactorK() {
+ return _protectionFactorK;
+}
+
+uint8_t VCMProtectionMethod::RequiredProtectionFactorD() {
+ return _protectionFactorD;
+}
+
+bool VCMProtectionMethod::RequiredUepProtectionK() {
+ return _useUepProtectionK;
+}
+
+bool VCMProtectionMethod::RequiredUepProtectionD() {
+ return _useUepProtectionD;
+}
+
+int VCMProtectionMethod::MaxFramesFec() const {
+ return 1;
+}
+
+VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs)
+ : VCMFecMethod(),
+ _lowRttNackMs(lowRttNackThresholdMs),
+ _highRttNackMs(highRttNackThresholdMs),
+ _maxFramesFec(1) {
+ RTC_DCHECK(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
+ RTC_DCHECK(highRttNackThresholdMs == -1 ||
+ lowRttNackThresholdMs <= highRttNackThresholdMs);
+ RTC_DCHECK(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
+ _type = kNackFec;
+}
+
+VCMNackFecMethod::~VCMNackFecMethod() {
+ //
+}
+bool VCMNackFecMethod::ProtectionFactor(
+ const VCMProtectionParameters* parameters) {
+ // Hybrid Nack FEC has three operational modes:
+ // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
+ // (_protectionFactorD) to zero. -1 means no FEC.
+ // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
+ // -1 means always allow NACK.
+ // 3. Medium RTT values - Hybrid mode: We will only nack the
+ // residual following the decoding of the FEC (refer to JB logic). FEC
+ // delta protection factor will be adjusted based on the RTT.
+
+ // Otherwise: we count on FEC; if the RTT is below a threshold, then we
+ // nack the residual, based on a decision made in the JB.
+
+ // Compute the protection factors
+ VCMFecMethod::ProtectionFactor(parameters);
+ if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs) {
+ _protectionFactorD = 0;
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+
+ // When in Hybrid mode (RTT range), adjust FEC rates based on the
+ // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
+ } else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs) {
+ // TODO(mikhal): Disabling adjustment temporarily.
+ // uint16_t rttIndex = (uint16_t) parameters->rtt;
+ float adjustRtt = 1.0f; // (float)VCMNackFecTable[rttIndex] / 100.0f;
+
+ // Adjust FEC with NACK on (for delta frame only)
+ // table depends on RTT relative to rttMax (NACK Threshold)
+ _protectionFactorD = rtc::saturated_cast<uint8_t>(
+ adjustRtt * rtc::saturated_cast<float>(_protectionFactorD));
+ // update FEC rates after applying adjustment
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+ }
+
+ return true;
+}
+
+int VCMNackFecMethod::ComputeMaxFramesFec(
+ const VCMProtectionParameters* parameters) {
+ if (parameters->numLayers > 2) {
+ // For more than 2 temporal layers we will only have FEC on the base layer,
+ // and the base layers will be pretty far apart. Therefore we force one
+ // frame FEC.
+ return 1;
+ }
+ // We set the max number of frames to base the FEC on so that on average
+ // we will have complete frames in one RTT. Note that this is an upper
+ // bound, and that the actual number of frames used for FEC is decided by the
+ // RTP module based on the actual number of packets and the protection factor.
+ float base_layer_framerate =
+ parameters->frameRate /
+ rtc::saturated_cast<float>(1 << (parameters->numLayers - 1));
+ int max_frames_fec = std::max(
+ rtc::saturated_cast<int>(
+ 2.0f * base_layer_framerate * parameters->rtt / 1000.0f + 0.5f),
+ 1);
+ // `kUpperLimitFramesFec` is the upper limit on how many frames we
+ // allow any FEC to be based on.
+ if (max_frames_fec > kUpperLimitFramesFec) {
+ max_frames_fec = kUpperLimitFramesFec;
+ }
+ return max_frames_fec;
+}
+
+int VCMNackFecMethod::MaxFramesFec() const {
+ return _maxFramesFec;
+}
+
+bool VCMNackFecMethod::BitRateTooLowForFec(
+ const VCMProtectionParameters* parameters) {
+ // Bitrate below which we turn off FEC, regardless of reported packet loss.
+ // The condition should depend on resolution and content. For now, use
+ // threshold on bytes per frame, with some effect for the frame size.
+ // The condition for turning off FEC is also based on other factors,
+ // such as `_numLayers`, `_maxFramesFec`, and `_rtt`.
+ int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
+ int max_bytes_per_frame = kMaxBytesPerFrameForFec;
+ int num_pixels = parameters->codecWidth * parameters->codecHeight;
+ if (num_pixels <= 352 * 288) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
+ } else if (num_pixels > 640 * 480) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
+ }
+ // TODO(marpan): add condition based on maximum frames used for FEC,
+ // and expand condition based on frame size.
+ // Max round trip time threshold in ms.
+ const int64_t kMaxRttTurnOffFec = 200;
+ if (estimate_bytes_per_frame < max_bytes_per_frame &&
+ parameters->numLayers < 3 && parameters->rtt < kMaxRttTurnOffFec) {
+ return true;
+ }
+ return false;
+}
+
+bool VCMNackFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Set the effective packet loss for encoder (based on FEC code).
+ // Compute the effective packet loss and residual packet loss due to FEC.
+ VCMFecMethod::EffectivePacketLoss(parameters);
+ return true;
+}
+
+bool VCMNackFecMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ ProtectionFactor(parameters);
+ EffectivePacketLoss(parameters);
+ _maxFramesFec = ComputeMaxFramesFec(parameters);
+ if (BitRateTooLowForFec(parameters)) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ }
+
+ // Protection/fec rates obtained above are defined relative to total number
+ // of packets (total rate: source + fec) FEC in RTP module assumes
+ // protection factor is defined relative to source number of packets so we
+ // should convert the factor to reduce mismatch between mediaOpt's rate and
+ // the actual one
+ _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+
+VCMNackMethod::VCMNackMethod() : VCMProtectionMethod() {
+ _type = kNack;
+}
+
+VCMNackMethod::~VCMNackMethod() {
+ //
+}
+
+bool VCMNackMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameter) {
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+ return true;
+}
+
+bool VCMNackMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // nackCost = (bitRate - nackCost) * (lossPr)
+ return true;
+}
+
+VCMFecMethod::VCMFecMethod()
+ : VCMProtectionMethod(),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()) {
+ _type = kFec;
+}
+
+VCMFecMethod::~VCMFecMethod() = default;
+
+uint8_t VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const {
+ uint8_t boostRateKey = 2;
+ // Default: ratio scales the FEC protection up for I frames
+ uint8_t ratio = 1;
+
+ if (packetFrameDelta > 0) {
+ ratio = (int8_t)(packetFrameKey / packetFrameDelta);
+ }
+ ratio = VCM_MAX(boostRateKey, ratio);
+
+ return ratio;
+}
+
+uint8_t VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const {
+ return rtc::saturated_cast<uint8_t>(
+ VCM_MIN(255, (0.5 + 255.0 * codeRateRTP /
+ rtc::saturated_cast<float>(255 - codeRateRTP))));
+}
+
+// Update FEC with protectionFactorD
+void VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD) {
+ _protectionFactorD = protectionFactorD;
+}
+
+// Update FEC with protectionFactorK
+void VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK) {
+ _protectionFactorK = protectionFactorK;
+}
+
+bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
+ // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
+
+ // No protection if (filtered) packetLoss is 0
+ uint8_t packetLoss = rtc::saturated_cast<uint8_t>(255 * parameters->lossPr);
+ if (packetLoss == 0) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ return true;
+ }
+
+ // Parameters for FEC setting:
+ // first partition size, thresholds, table pars, spatial resoln fac.
+
+ // First partition protection: ~ 20%
+ uint8_t firstPartitionProt = rtc::saturated_cast<uint8_t>(255 * 0.20);
+
+ // Minimum protection level needed to generate one FEC packet for one
+ // source packet/frame (in RTP sender)
+ uint8_t minProtLevelFec = 85;
+
+ // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
+ // above which we allocate protection to cover at least first partition.
+ uint8_t lossThr = 0;
+ uint8_t packetNumThr = 1;
+
+ // Parameters for range of rate index of table.
+ const uint8_t ratePar1 = 5;
+ const uint8_t ratePar2 = 49;
+
+ // Spatial resolution size, relative to a reference size.
+ float spatialSizeToRef = rtc::saturated_cast<float>(parameters->codecWidth *
+ parameters->codecHeight) /
+ (rtc::saturated_cast<float>(704 * 576));
+ // resolnFac: This parameter will generally increase/decrease the FEC rate
+ // (for fixed bitRate and packetLoss) based on system size.
+ // Use a smaller exponent (< 1) to control/soften system size effect.
+ const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
+
+ const int bitRatePerFrame = BitsPerFrame(parameters);
+
+ // Average number of packets per frame (source and fec):
+ const uint8_t avgTotPackets = rtc::saturated_cast<uint8_t>(
+ 1.5f + rtc::saturated_cast<float>(bitRatePerFrame) * 1000.0f /
+ rtc::saturated_cast<float>(8.0 * _maxPayloadSize));
+
+ // FEC rate parameters: for P and I frame
+ uint8_t codeRateDelta = 0;
+ uint8_t codeRateKey = 0;
+
+ // Get index for table: the FEC protection depends on an effective rate.
+ // The range on the rate index corresponds to rates (bps)
+ // from ~200k to ~8000k, for 30fps
+ const uint16_t effRateFecTable =
+ rtc::saturated_cast<uint16_t>(resolnFac * bitRatePerFrame);
+ uint8_t rateIndexTable = rtc::saturated_cast<uint8_t>(
+ VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) / ratePar1, ratePar2), 0));
+
+ // Restrict packet loss range to 50:
+ // current tables defined only up to 50%
+ if (packetLoss >= kPacketLossMax) {
+ packetLoss = kPacketLossMax - 1;
+ }
+ uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
+
+ // Check on table index
+ RTC_DCHECK_LT(indexTable, kFecRateTableSize);
+
+ // Protection factor for P frame
+ codeRateDelta = kFecRateTable[indexTable];
+
+ if (packetLoss > lossThr && avgTotPackets > packetNumThr) {
+ // Set a minimum based on first partition size.
+ if (codeRateDelta < firstPartitionProt) {
+ codeRateDelta = firstPartitionProt;
+ }
+ }
+
+ // Check limit on amount of protection for P frame; 50% is max.
+ if (codeRateDelta >= kPacketLossMax) {
+ codeRateDelta = kPacketLossMax - 1;
+ }
+
+ // For Key frame:
+ // Effectively at a higher rate, so we scale/boost the rate
+ // The boost factor may depend on several factors: ratio of packet
+ // number of I to P frames, how much protection placed on P frames, etc.
+ const uint8_t packetFrameDelta =
+ rtc::saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrame);
+ const uint8_t packetFrameKey =
+ rtc::saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrameKey);
+ const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
+
+ rateIndexTable = rtc::saturated_cast<uint8_t>(VCM_MAX(
+ VCM_MIN(1 + (boostKey * effRateFecTable - ratePar1) / ratePar1, ratePar2),
+ 0));
+ uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
+
+ indexTableKey = VCM_MIN(indexTableKey, kFecRateTableSize);
+
+ // Check on table index
+ RTC_DCHECK_LT(indexTableKey, kFecRateTableSize);
+
+ // Protection factor for I frame
+ codeRateKey = kFecRateTable[indexTableKey];
+
+ // Boosting for Key frame.
+ int boostKeyProt = _scaleProtKey * codeRateDelta;
+ if (boostKeyProt >= kPacketLossMax) {
+ boostKeyProt = kPacketLossMax - 1;
+ }
+
+ // Make sure I frame protection is at least larger than P frame protection,
+ // and at least as high as filtered packet loss.
+ codeRateKey = rtc::saturated_cast<uint8_t>(
+ VCM_MAX(packetLoss, VCM_MAX(boostKeyProt, codeRateKey)));
+
+ // Check limit on amount of protection for I frame: 50% is max.
+ if (codeRateKey >= kPacketLossMax) {
+ codeRateKey = kPacketLossMax - 1;
+ }
+
+ _protectionFactorK = codeRateKey;
+ _protectionFactorD = codeRateDelta;
+
+ // Generally there is a rate mis-match between the FEC cost estimated
+ // in mediaOpt and the actual FEC cost sent out in RTP module.
+ // This is more significant at low rates (small # of source packets), where
+ // the granularity of the FEC decreases. In this case, non-zero protection
+ // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
+ // is based on rounding off protectionFactor on actual source packet number).
+ // The correction factor (_corrFecCost) attempts to corrects this, at least
+ // for cases of low rates (small #packets) and low protection levels.
+
+ float numPacketsFl =
+ 1.0f + (rtc::saturated_cast<float>(bitRatePerFrame) * 1000.0 /
+ rtc::saturated_cast<float>(8.0 * _maxPayloadSize) +
+ 0.5);
+
+ const float estNumFecGen =
+ 0.5f +
+ rtc::saturated_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
+
+ // We reduce cost factor (which will reduce overhead for FEC and
+ // hybrid method) and not the protectionFactor.
+ _corrFecCost = 1.0f;
+ if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.5f;
+ }
+ if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.0f;
+ }
+
+ // DONE WITH FEC PROTECTION SETTINGS
+ return true;
+}
+
+int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
+ // When temporal layers are available FEC will only be applied on the base
+ // layer.
+ const float bitRateRatio =
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ parameters->numLayers, 0,
+ rate_control_settings_.Vp8BaseHeavyTl3RateAllocation());
+ float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
+ float bitRate = parameters->bitRate * bitRateRatio;
+ float frameRate = parameters->frameRate * frameRateRatio;
+
+ // TODO(mikhal): Update factor following testing.
+ float adjustmentFactor = 1;
+
+ if (frameRate < 1.0f)
+ frameRate = 1.0f;
+ // Average bits per frame (units of kbits)
+ return rtc::saturated_cast<int>(adjustmentFactor * bitRate / frameRate);
+}
+
+bool VCMFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Effective packet loss to encoder is based on RPL (residual packet loss)
+ // this is a soft setting based on degree of FEC protection
+ // RPL = received/input packet loss - average_FEC_recovery
+ // note: received/input packet loss may be filtered based on FilteredLoss
+
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+
+ return true;
+}
+
+bool VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters) {
+ // Compute the protection factor
+ ProtectionFactor(parameters);
+
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // Protection/fec rates obtained above is defined relative to total number
+ // of packets (total rate: source+fec) FEC in RTP module assumes protection
+ // factor is defined relative to source number of packets so we should
+ // convert the factor to reduce mismatch between mediaOpt suggested rate and
+ // the actual rate
+ _protectionFactorK = ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs)
+ : _currentParameters(),
+ _rtt(0),
+ _lossPr(0.0f),
+ _bitRate(0.0f),
+ _frameRate(0.0f),
+ _keyFrameSize(0.0f),
+ _fecRateKey(0),
+ _fecRateDelta(0),
+ _lastPrUpdateT(0),
+ _lossPr255(0.9999f),
+ _lossPrHistory(),
+ _shortMaxLossPr255(0),
+ _packetsPerFrame(0.9999f),
+ _packetsPerFrameKey(0.9999f),
+ _codecWidth(704),
+ _codecHeight(576),
+ _numLayers(1) {
+ Reset(nowMs);
+}
+
+VCMLossProtectionLogic::~VCMLossProtectionLogic() {
+ Release();
+}
+
+void VCMLossProtectionLogic::SetMethod(
+ enum VCMProtectionMethodEnum newMethodType) {
+ if (_selectedMethod && _selectedMethod->Type() == newMethodType)
+ return;
+
+ switch (newMethodType) {
+ case kNack:
+ _selectedMethod.reset(new VCMNackMethod());
+ break;
+ case kFec:
+ _selectedMethod.reset(new VCMFecMethod());
+ break;
+ case kNackFec:
+ _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
+ break;
+ case kNone:
+ _selectedMethod.reset();
+ break;
+ }
+ UpdateMethod();
+}
+
+void VCMLossProtectionLogic::UpdateRtt(int64_t rtt) {
+ _rtt = rtt;
+}
+
+void VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
+ int64_t now) {
+ if (_lossPrHistory[0].timeMs >= 0 &&
+ now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs) {
+ if (lossPr255 > _shortMaxLossPr255) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+ } else {
+ // Only add a new value to the history once a second
+ if (_lossPrHistory[0].timeMs == -1) {
+ // First, no shift
+ _shortMaxLossPr255 = lossPr255;
+ } else {
+ // Shift
+ for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--) {
+ _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
+ _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
+ }
+ }
+ if (_shortMaxLossPr255 == 0) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+
+ _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
+ _lossPrHistory[0].timeMs = now;
+ _shortMaxLossPr255 = 0;
+ }
+}
+
+uint8_t VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const {
+ uint8_t maxFound = _shortMaxLossPr255;
+ if (_lossPrHistory[0].timeMs == -1) {
+ return maxFound;
+ }
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ if (_lossPrHistory[i].timeMs == -1) {
+ break;
+ }
+ if (nowMs - _lossPrHistory[i].timeMs >
+ kLossPrHistorySize * kLossPrShortFilterWinMs) {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_lossPrHistory[i].lossPr255 > maxFound) {
+ // This sample is the largest one this far into the history
+ maxFound = _lossPrHistory[i].lossPr255;
+ }
+ }
+ return maxFound;
+}
+
+uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255) {
+ // Update the max window filter.
+ UpdateMaxLossHistory(lossPr255, nowMs);
+
+ // Update the recursive average filter.
+ _lossPr255.Apply(rtc::saturated_cast<float>(nowMs - _lastPrUpdateT),
+ rtc::saturated_cast<float>(lossPr255));
+ _lastPrUpdateT = nowMs;
+
+ // Filtered loss: default is received loss (no filtering).
+ uint8_t filtered_loss = lossPr255;
+
+ switch (filter_mode) {
+ case kNoFilter:
+ break;
+ case kAvgFilter:
+ filtered_loss = rtc::saturated_cast<uint8_t>(_lossPr255.filtered() + 0.5);
+ break;
+ case kMaxFilter:
+ filtered_loss = MaxFilteredLossPr(nowMs);
+ break;
+ }
+
+ return filtered_loss;
+}
+
+void VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc) {
+ _lossPr = rtc::saturated_cast<float>(packetLossEnc) / 255.0;
+}
+
+void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
+ _bitRate = bitRate;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrame.Apply(
+ rtc::saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateT), nPackets);
+ _lastPacketPerFrameUpdateT = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrameKey.Apply(
+ rtc::saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey),
+ nPackets);
+ _lastPacketPerFrameUpdateTKey = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize) {
+ _keyFrameSize = keyFrameSize;
+}
+
+void VCMLossProtectionLogic::UpdateFrameSize(size_t width, size_t height) {
+ _codecWidth = width;
+ _codecHeight = height;
+}
+
+void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
+ _numLayers = (numLayers == 0) ? 1 : numLayers;
+}
+
+bool VCMLossProtectionLogic::UpdateMethod() {
+ if (!_selectedMethod)
+ return false;
+ _currentParameters.rtt = _rtt;
+ _currentParameters.lossPr = _lossPr;
+ _currentParameters.bitRate = _bitRate;
+ _currentParameters.frameRate = _frameRate; // rename actual frame rate?
+ _currentParameters.keyFrameSize = _keyFrameSize;
+ _currentParameters.fecRateDelta = _fecRateDelta;
+ _currentParameters.fecRateKey = _fecRateKey;
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
+ _currentParameters.codecWidth = _codecWidth;
+ _currentParameters.codecHeight = _codecHeight;
+ _currentParameters.numLayers = _numLayers;
+ return _selectedMethod->UpdateParameters(&_currentParameters);
+}
+
+VCMProtectionMethod* VCMLossProtectionLogic::SelectedMethod() const {
+ return _selectedMethod.get();
+}
+
+VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
+ return _selectedMethod ? _selectedMethod->Type() : kNone;
+}
+
+void VCMLossProtectionLogic::Reset(int64_t nowMs) {
+ _lastPrUpdateT = nowMs;
+ _lastPacketPerFrameUpdateT = nowMs;
+ _lastPacketPerFrameUpdateTKey = nowMs;
+ _lossPr255.Reset(0.9999f);
+ _packetsPerFrame.Reset(0.9999f);
+ _fecRateDelta = _fecRateKey = 0;
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ _lossPrHistory[i].lossPr255 = 0;
+ _lossPrHistory[i].timeMs = -1;
+ }
+ _shortMaxLossPr255 = 0;
+ Release();
+}
+
+void VCMLossProtectionLogic::Release() {
+ _selectedMethod.reset();
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/media_opt_util.h b/third_party/libwebrtc/modules/video_coding/media_opt_util.h
new file mode 100644
index 0000000000..a74d1af6cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/media_opt_util.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+#define MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+#include <memory>
+
+#include "modules/video_coding/internal_defines.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/numerics/exp_filter.h"
+
+namespace webrtc {
+namespace media_optimization {
+
+// Number of time periods used for (max) window filter for packet loss
+// TODO(marpan): set reasonable window size for filtered packet loss,
+// adjustment should be based on logged/real data of loss stats/correlation.
+constexpr int kLossPrHistorySize = 10;
+
+// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
+constexpr int kLossPrShortFilterWinMs = 1000;
+
+// The type of filter used on the received packet loss reports.
+enum FilterPacketLossMode {
+ kNoFilter, // No filtering on received loss.
+ kAvgFilter, // Recursive average filter.
+ kMaxFilter // Max-window filter, over the time interval of:
+ // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
+};
+
+// Thresholds for hybrid NACK/FEC
+// common to media optimization and the jitter buffer.
+constexpr int64_t kLowRttNackMs = 20;
+
+// If the RTT is higher than this an extra RTT wont be added to to the jitter
+// buffer delay.
+constexpr int kMaxRttDelayThreshold = 500;
+
+struct VCMProtectionParameters {
+ VCMProtectionParameters();
+
+ int64_t rtt;
+ float lossPr;
+ float bitRate;
+ float packetsPerFrame;
+ float packetsPerFrameKey;
+ float frameRate;
+ float keyFrameSize;
+ uint8_t fecRateDelta;
+ uint8_t fecRateKey;
+ uint16_t codecWidth;
+ uint16_t codecHeight;
+ int numLayers;
+};
+
+/******************************/
+/* VCMProtectionMethod class */
+/******************************/
+
+enum VCMProtectionMethodEnum { kNack, kFec, kNackFec, kNone };
+
+class VCMLossProbabilitySample {
+ public:
+ VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}
+
+ uint8_t lossPr255;
+ int64_t timeMs;
+};
+
+class VCMProtectionMethod {
+ public:
+ VCMProtectionMethod();
+ virtual ~VCMProtectionMethod();
+
+ // Updates the efficiency of the method using the parameters provided
+ //
+ // Input:
+ // - parameters : Parameters used to calculate efficiency
+ //
+ // Return value : True if this method is recommended in
+ // the given conditions.
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
+
+ // Returns the protection type
+ //
+ // Return value : The protection type
+ VCMProtectionMethodEnum Type() const;
+
+ // Returns the effective packet loss for ER, required by this protection
+ // method
+ //
+ // Return value : Required effective packet loss
+ virtual uint8_t RequiredPacketLossER();
+
+ // Extracts the FEC protection factor for Key frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for Key frame
+ virtual uint8_t RequiredProtectionFactorK();
+
+ // Extracts the FEC protection factor for Delta frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for delta frame
+ virtual uint8_t RequiredProtectionFactorD();
+
+ // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionK();
+
+ // Extracts whether the the FEC Unequal protection (UEP) is used for Delta
+ // frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionD();
+
+ virtual int MaxFramesFec() const;
+
+ protected:
+ uint8_t _effectivePacketLoss;
+ uint8_t _protectionFactorK;
+ uint8_t _protectionFactorD;
+ // Estimation of residual loss after the FEC
+ float _scaleProtKey;
+ int32_t _maxPayloadSize;
+
+ bool _useUepProtectionK;
+ bool _useUepProtectionD;
+ float _corrFecCost;
+ VCMProtectionMethodEnum _type;
+};
+
+class VCMNackMethod : public VCMProtectionMethod {
+ public:
+ VCMNackMethod();
+ ~VCMNackMethod() override;
+ bool UpdateParameters(const VCMProtectionParameters* parameters) override;
+ // Get the effective packet loss
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
+};
+
+class VCMFecMethod : public VCMProtectionMethod {
+ public:
+ VCMFecMethod();
+ ~VCMFecMethod() override;
+ bool UpdateParameters(const VCMProtectionParameters* parameters) override;
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the FEC protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the boost for key frame protection
+ uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const;
+ // Convert the rates: defined relative to total# packets or source# packets
+ uint8_t ConvertFECRate(uint8_t codeRate) const;
+ // Get the average effective recovery from FEC: for random loss model
+ float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
+ // Update FEC with protectionFactorD
+ void UpdateProtectionFactorD(uint8_t protectionFactorD);
+ // Update FEC with protectionFactorK
+ void UpdateProtectionFactorK(uint8_t protectionFactorK);
+ // Compute the bits per frame. Account for temporal layers when applicable.
+ int BitsPerFrame(const VCMProtectionParameters* parameters);
+
+ protected:
+ static constexpr int kUpperLimitFramesFec = 6;
+ // Thresholds values for the bytes/frame and round trip time, below which we
+ // may turn off FEC, depending on `_numLayers` and `_maxFramesFec`.
+ // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
+ static constexpr int kMaxBytesPerFrameForFec = 700;
+ // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
+ static constexpr int kMaxBytesPerFrameForFecLow = 400;
+ // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
+ static constexpr int kMaxBytesPerFrameForFecHigh = 1000;
+
+ const RateControlSettings rate_control_settings_;
+};
+
+class VCMNackFecMethod : public VCMFecMethod {
+ public:
+ VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs);
+ ~VCMNackFecMethod() override;
+ bool UpdateParameters(const VCMProtectionParameters* parameters) override;
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the max number of frames the FEC is allowed to be based on.
+ int MaxFramesFec() const override;
+ // Turn off the FEC based on low bitrate and other factors.
+ bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
+
+ private:
+ int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+
+ int64_t _lowRttNackMs;
+ int64_t _highRttNackMs;
+ int _maxFramesFec;
+};
+
+class VCMLossProtectionLogic {
+ public:
+ explicit VCMLossProtectionLogic(int64_t nowMs);
+ ~VCMLossProtectionLogic();
+
+ // Set the protection method to be used
+ //
+ // Input:
+ // - newMethodType : New requested protection method type. If one
+ // is already set, it will be deleted and replaced
+ void SetMethod(VCMProtectionMethodEnum newMethodType);
+
+ // Update the round-trip time
+ //
+ // Input:
+ // - rtt : Round-trip time in seconds.
+ void UpdateRtt(int64_t rtt);
+
+ // Update the filtered packet loss.
+ //
+ // Input:
+ // - packetLossEnc : The reported packet loss filtered
+ // (max window or average)
+ void UpdateFilteredLossPr(uint8_t packetLossEnc);
+
+ // Update the current target bit rate.
+ //
+ // Input:
+ // - bitRate : The current target bit rate in kbits/s
+ void UpdateBitRate(float bitRate);
+
+ // Update the number of packets per frame estimate, for delta frames
+ //
+ // Input:
+ // - nPackets : Number of packets in the latest sent frame.
+ void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
+
+ // Update the number of packets per frame estimate, for key frames
+ //
+ // Input:
+ // - nPackets : umber of packets in the latest sent frame.
+ void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
+
+ // Update the keyFrameSize estimate
+ //
+ // Input:
+ // - keyFrameSize : The size of the latest sent key frame.
+ void UpdateKeyFrameSize(float keyFrameSize);
+
+ // Update the frame rate
+ //
+ // Input:
+ // - frameRate : The current target frame rate.
+ void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
+
+ // Update the frame size
+ //
+ // Input:
+ // - width : The codec frame width.
+ // - height : The codec frame height.
+ void UpdateFrameSize(size_t width, size_t height);
+
+ // Update the number of active layers
+ //
+ // Input:
+ // - numLayers : Number of layers used.
+ void UpdateNumLayers(int numLayers);
+
+ // The amount of packet loss to cover for with FEC.
+ //
+ // Input:
+ // - fecRateKey : Packet loss to cover for with FEC when
+ // sending key frames.
+ // - fecRateDelta : Packet loss to cover for with FEC when
+ // sending delta frames.
+ void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta) {
+ _fecRateKey = fecRateKey;
+ _fecRateDelta = fecRateDelta;
+ }
+
+ // Update the protection methods with the current VCMProtectionParameters
+ // and set the requested protection settings.
+ // Return value : Returns true on update
+ bool UpdateMethod();
+
+ // Returns the method currently selected.
+ //
+ // Return value : The protection method currently selected.
+ VCMProtectionMethod* SelectedMethod() const;
+
+ // Return the protection type of the currently selected method
+ VCMProtectionMethodEnum SelectedType() const;
+
+ // Updates the filtered loss for the average and max window packet loss,
+ // and returns the filtered loss probability in the interval [0, 255].
+ // The returned filtered loss value depends on the parameter `filter_mode`.
+ // The input parameter `lossPr255` is the received packet loss.
+
+ // Return value : The filtered loss probability
+ uint8_t FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255);
+
+ void Reset(int64_t nowMs);
+
+ void Release();
+
+ private:
+ // Sets the available loss protection methods.
+ void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
+ uint8_t MaxFilteredLossPr(int64_t nowMs) const;
+ std::unique_ptr<VCMProtectionMethod> _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ int64_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ size_t _codecWidth;
+ size_t _codecHeight;
+ int _numLayers;
+};
+
+} // namespace media_optimization
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester.cc b/third_party/libwebrtc/modules/video_coding/nack_requester.cc
new file mode 100644
index 0000000000..4e74032d01
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester.cc
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/nack_requester.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kMaxPacketAge = 10'000;
+constexpr int kMaxNackPackets = 1000;
+constexpr TimeDelta kDefaultRtt = TimeDelta::Millis(100);
+constexpr int kMaxNackRetries = 10;
+constexpr int kMaxReorderedPackets = 128;
+constexpr int kNumReorderingBuckets = 10;
+constexpr TimeDelta kDefaultSendNackDelay = TimeDelta::Zero();
+
+TimeDelta GetSendNackDelay(const FieldTrialsView& field_trials) {
+ int64_t delay_ms = strtol(
+ field_trials.Lookup("WebRTC-SendNackDelayMs").c_str(), nullptr, 10);
+ if (delay_ms > 0 && delay_ms <= 20) {
+ RTC_LOG(LS_INFO) << "SendNackDelay is set to " << delay_ms;
+ return TimeDelta::Millis(delay_ms);
+ }
+ return kDefaultSendNackDelay;
+}
+} // namespace
+
+constexpr TimeDelta NackPeriodicProcessor::kUpdateInterval;
+
+NackPeriodicProcessor::NackPeriodicProcessor(TimeDelta update_interval)
+ : update_interval_(update_interval) {}
+
+NackPeriodicProcessor::~NackPeriodicProcessor() {}
+
+void NackPeriodicProcessor::RegisterNackModule(NackRequesterBase* module) {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ modules_.push_back(module);
+ if (modules_.size() != 1)
+ return;
+ repeating_task_ = RepeatingTaskHandle::DelayedStart(
+ TaskQueueBase::Current(), update_interval_, [this] {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ ProcessNackModules();
+ return update_interval_;
+ });
+}
+
+void NackPeriodicProcessor::UnregisterNackModule(NackRequesterBase* module) {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ auto it = std::find(modules_.begin(), modules_.end(), module);
+ RTC_DCHECK(it != modules_.end());
+ modules_.erase(it);
+ if (modules_.empty())
+ repeating_task_.Stop();
+}
+
+void NackPeriodicProcessor::ProcessNackModules() {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ for (NackRequesterBase* module : modules_)
+ module->ProcessNacks();
+}
+
+ScopedNackPeriodicProcessorRegistration::
+ ScopedNackPeriodicProcessorRegistration(NackRequesterBase* module,
+ NackPeriodicProcessor* processor)
+ : module_(module), processor_(processor) {
+ processor_->RegisterNackModule(module_);
+}
+
+ScopedNackPeriodicProcessorRegistration::
+ ~ScopedNackPeriodicProcessorRegistration() {
+ processor_->UnregisterNackModule(module_);
+}
+
+NackRequester::NackInfo::NackInfo()
+ : seq_num(0),
+ send_at_seq_num(0),
+ created_at_time(Timestamp::MinusInfinity()),
+ sent_at_time(Timestamp::MinusInfinity()),
+ retries(0) {}
+
+NackRequester::NackInfo::NackInfo(uint16_t seq_num,
+ uint16_t send_at_seq_num,
+ Timestamp created_at_time)
+ : seq_num(seq_num),
+ send_at_seq_num(send_at_seq_num),
+ created_at_time(created_at_time),
+ sent_at_time(Timestamp::MinusInfinity()),
+ retries(0) {}
+
+NackRequester::NackRequester(TaskQueueBase* current_queue,
+ NackPeriodicProcessor* periodic_processor,
+ Clock* clock,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ const FieldTrialsView& field_trials)
+ : worker_thread_(current_queue),
+ clock_(clock),
+ nack_sender_(nack_sender),
+ keyframe_request_sender_(keyframe_request_sender),
+ reordering_histogram_(kNumReorderingBuckets, kMaxReorderedPackets),
+ initialized_(false),
+ rtt_(kDefaultRtt),
+ newest_seq_num_(0),
+ send_nack_delay_(GetSendNackDelay(field_trials)),
+ processor_registration_(this, periodic_processor) {
+ RTC_DCHECK(clock_);
+ RTC_DCHECK(nack_sender_);
+ RTC_DCHECK(keyframe_request_sender_);
+ RTC_DCHECK(worker_thread_);
+ RTC_DCHECK(worker_thread_->IsCurrent());
+}
+
+NackRequester::~NackRequester() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+}
+
+void NackRequester::ProcessNacks() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ std::vector<uint16_t> nack_batch = GetNackBatch(kTimeOnly);
+ if (!nack_batch.empty()) {
+ // This batch of NACKs is triggered externally; there is no external
+ // initiator who can batch them with other feedback messages.
+ nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/false);
+ }
+}
+
+int NackRequester::OnReceivedPacket(uint16_t seq_num, bool is_keyframe) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ return OnReceivedPacket(seq_num, is_keyframe, false);
+}
+
+int NackRequester::OnReceivedPacket(uint16_t seq_num,
+ bool is_keyframe,
+ bool is_recovered) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ // TODO(philipel): When the packet includes information whether it is
+ // retransmitted or not, use that value instead. For
+ // now set it to true, which will cause the reordering
+ // statistics to never be updated.
+ bool is_retransmitted = true;
+
+ if (!initialized_) {
+ newest_seq_num_ = seq_num;
+ if (is_keyframe)
+ keyframe_list_.insert(seq_num);
+ initialized_ = true;
+ return 0;
+ }
+
+ // Since the `newest_seq_num_` is a packet we have actually received we know
+ // that packet has never been Nacked.
+ if (seq_num == newest_seq_num_)
+ return 0;
+
+ if (AheadOf(newest_seq_num_, seq_num)) {
+ // An out of order packet has been received.
+ auto nack_list_it = nack_list_.find(seq_num);
+ int nacks_sent_for_packet = 0;
+ if (nack_list_it != nack_list_.end()) {
+ nacks_sent_for_packet = nack_list_it->second.retries;
+ nack_list_.erase(nack_list_it);
+ }
+ if (!is_retransmitted)
+ UpdateReorderingStatistics(seq_num);
+ return nacks_sent_for_packet;
+ }
+
+ // Keep track of new keyframes.
+ if (is_keyframe)
+ keyframe_list_.insert(seq_num);
+
+ // And remove old ones so we don't accumulate keyframes.
+ auto it = keyframe_list_.lower_bound(seq_num - kMaxPacketAge);
+ if (it != keyframe_list_.begin())
+ keyframe_list_.erase(keyframe_list_.begin(), it);
+
+ if (is_recovered) {
+ recovered_list_.insert(seq_num);
+
+ // Remove old ones so we don't accumulate recovered packets.
+ auto it = recovered_list_.lower_bound(seq_num - kMaxPacketAge);
+ if (it != recovered_list_.begin())
+ recovered_list_.erase(recovered_list_.begin(), it);
+
+ // Do not send nack for packets recovered by FEC or RTX.
+ return 0;
+ }
+
+ AddPacketsToNack(newest_seq_num_ + 1, seq_num);
+ newest_seq_num_ = seq_num;
+
+ // Are there any nacks that are waiting for this seq_num.
+ std::vector<uint16_t> nack_batch = GetNackBatch(kSeqNumOnly);
+ if (!nack_batch.empty()) {
+ // This batch of NACKs is triggered externally; the initiator can
+ // batch them with other feedback messages.
+ nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/true);
+ }
+
+ return 0;
+}
+
+void NackRequester::ClearUpTo(uint16_t seq_num) {
+ // Called via RtpVideoStreamReceiver2::FrameContinuous on the network thread.
+ worker_thread_->PostTask(SafeTask(task_safety_.flag(), [seq_num, this]() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ nack_list_.erase(nack_list_.begin(), nack_list_.lower_bound(seq_num));
+ keyframe_list_.erase(keyframe_list_.begin(),
+ keyframe_list_.lower_bound(seq_num));
+ recovered_list_.erase(recovered_list_.begin(),
+ recovered_list_.lower_bound(seq_num));
+ }));
+}
+
+void NackRequester::UpdateRtt(int64_t rtt_ms) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ rtt_ = TimeDelta::Millis(rtt_ms);
+}
+
+bool NackRequester::RemovePacketsUntilKeyFrame() {
+ // Called on worker_thread_.
+ while (!keyframe_list_.empty()) {
+ auto it = nack_list_.lower_bound(*keyframe_list_.begin());
+
+ if (it != nack_list_.begin()) {
+ // We have found a keyframe that actually is newer than at least one
+ // packet in the nack list.
+ nack_list_.erase(nack_list_.begin(), it);
+ return true;
+ }
+
+ // If this keyframe is so old it does not remove any packets from the list,
+ // remove it from the list of keyframes and try the next keyframe.
+ keyframe_list_.erase(keyframe_list_.begin());
+ }
+ return false;
+}
+
+void NackRequester::AddPacketsToNack(uint16_t seq_num_start,
+ uint16_t seq_num_end) {
+ // Called on worker_thread_.
+ // Remove old packets.
+ auto it = nack_list_.lower_bound(seq_num_end - kMaxPacketAge);
+ nack_list_.erase(nack_list_.begin(), it);
+
+ // If the nack list is too large, remove packets from the nack list until
+ // the latest first packet of a keyframe. If the list is still too large,
+ // clear it and request a keyframe.
+ uint16_t num_new_nacks = ForwardDiff(seq_num_start, seq_num_end);
+ if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
+ while (RemovePacketsUntilKeyFrame() &&
+ nack_list_.size() + num_new_nacks > kMaxNackPackets) {
+ }
+
+ if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
+ nack_list_.clear();
+ RTC_LOG(LS_WARNING) << "NACK list full, clearing NACK"
+ " list and requesting keyframe.";
+ keyframe_request_sender_->RequestKeyFrame();
+ return;
+ }
+ }
+
+ for (uint16_t seq_num = seq_num_start; seq_num != seq_num_end; ++seq_num) {
+ // Do not send nack for packets that are already recovered by FEC or RTX
+ if (recovered_list_.find(seq_num) != recovered_list_.end())
+ continue;
+ NackInfo nack_info(seq_num, seq_num + WaitNumberOfPackets(0.5),
+ clock_->CurrentTime());
+ RTC_DCHECK(nack_list_.find(seq_num) == nack_list_.end());
+ nack_list_[seq_num] = nack_info;
+ }
+}
+
+std::vector<uint16_t> NackRequester::GetNackBatch(NackFilterOptions options) {
+ // Called on worker_thread_.
+
+ bool consider_seq_num = options != kTimeOnly;
+ bool consider_timestamp = options != kSeqNumOnly;
+ Timestamp now = clock_->CurrentTime();
+ std::vector<uint16_t> nack_batch;
+ auto it = nack_list_.begin();
+ while (it != nack_list_.end()) {
+ bool delay_timed_out = now - it->second.created_at_time >= send_nack_delay_;
+ bool nack_on_rtt_passed = now - it->second.sent_at_time >= rtt_;
+ bool nack_on_seq_num_passed =
+ it->second.sent_at_time.IsInfinite() &&
+ AheadOrAt(newest_seq_num_, it->second.send_at_seq_num);
+ if (delay_timed_out && ((consider_seq_num && nack_on_seq_num_passed) ||
+ (consider_timestamp && nack_on_rtt_passed))) {
+ nack_batch.emplace_back(it->second.seq_num);
+ ++it->second.retries;
+ it->second.sent_at_time = now;
+ if (it->second.retries >= kMaxNackRetries) {
+ RTC_LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
+ << " removed from NACK list due to max retries.";
+ it = nack_list_.erase(it);
+ } else {
+ ++it;
+ }
+ continue;
+ }
+ ++it;
+ }
+ return nack_batch;
+}
+
+void NackRequester::UpdateReorderingStatistics(uint16_t seq_num) {
+ // Running on worker_thread_.
+ RTC_DCHECK(AheadOf(newest_seq_num_, seq_num));
+ uint16_t diff = ReverseDiff(newest_seq_num_, seq_num);
+ reordering_histogram_.Add(diff);
+}
+
+int NackRequester::WaitNumberOfPackets(float probability) const {
+ // Called on worker_thread_;
+ if (reordering_histogram_.NumValues() == 0)
+ return 0;
+ return reordering_histogram_.InverseCdf(probability);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester.h b/third_party/libwebrtc/modules/video_coding/nack_requester.h
new file mode 100644
index 0000000000..c860787dcf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_NACK_REQUESTER_H_
+#define MODULES_VIDEO_CODING_NACK_REQUESTER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/include/module_common_types.h"
+#include "modules/video_coding/histogram.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class NackRequesterBase {
+ public:
+ virtual ~NackRequesterBase() = default;
+ virtual void ProcessNacks() = 0;
+};
+
+class NackPeriodicProcessor {
+ public:
+ static constexpr TimeDelta kUpdateInterval = TimeDelta::Millis(20);
+ explicit NackPeriodicProcessor(TimeDelta update_interval = kUpdateInterval);
+ ~NackPeriodicProcessor();
+ void RegisterNackModule(NackRequesterBase* module);
+ void UnregisterNackModule(NackRequesterBase* module);
+
+ private:
+ void ProcessNackModules() RTC_RUN_ON(sequence_);
+
+ const TimeDelta update_interval_;
+ RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(sequence_);
+ std::vector<NackRequesterBase*> modules_ RTC_GUARDED_BY(sequence_);
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_;
+};
+
+class ScopedNackPeriodicProcessorRegistration {
+ public:
+ ScopedNackPeriodicProcessorRegistration(NackRequesterBase* module,
+ NackPeriodicProcessor* processor);
+ ~ScopedNackPeriodicProcessorRegistration();
+
+ private:
+ NackRequesterBase* const module_;
+ NackPeriodicProcessor* const processor_;
+};
+
+class NackRequester final : public NackRequesterBase {
+ public:
+ NackRequester(TaskQueueBase* current_queue,
+ NackPeriodicProcessor* periodic_processor,
+ Clock* clock,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ const FieldTrialsView& field_trials);
+ ~NackRequester();
+
+ void ProcessNacks() override;
+
+ int OnReceivedPacket(uint16_t seq_num, bool is_keyframe);
+ int OnReceivedPacket(uint16_t seq_num, bool is_keyframe, bool is_recovered);
+
+ void ClearUpTo(uint16_t seq_num);
+ void UpdateRtt(int64_t rtt_ms);
+
+ private:
+ // Which fields to consider when deciding which packet to nack in
+ // GetNackBatch.
+ enum NackFilterOptions { kSeqNumOnly, kTimeOnly, kSeqNumAndTime };
+
+ // This class holds the sequence number of the packet that is in the nack list
+ // as well as the meta data about when it should be nacked and how many times
+ // we have tried to nack this packet.
+ struct NackInfo {
+ NackInfo();
+ NackInfo(uint16_t seq_num,
+ uint16_t send_at_seq_num,
+ Timestamp created_at_time);
+
+ uint16_t seq_num;
+ uint16_t send_at_seq_num;
+ Timestamp created_at_time;
+ Timestamp sent_at_time;
+ int retries;
+ };
+
+ void AddPacketsToNack(uint16_t seq_num_start, uint16_t seq_num_end)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ // Removes packets from the nack list until the next keyframe. Returns true
+ // if packets were removed.
+ bool RemovePacketsUntilKeyFrame()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+ std::vector<uint16_t> GetNackBatch(NackFilterOptions options)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ // Update the reordering distribution.
+ void UpdateReorderingStatistics(uint16_t seq_num)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ // Returns how many packets we have to wait in order to receive the packet
+ // with probability `probabilty` or higher.
+ int WaitNumberOfPackets(float probability) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ TaskQueueBase* const worker_thread_;
+ Clock* const clock_;
+ NackSender* const nack_sender_;
+ KeyFrameRequestSender* const keyframe_request_sender_;
+
+ // TODO(philipel): Some of the variables below are consistently used on a
+ // known thread (e.g. see `initialized_`). Those probably do not need
+ // synchronized access.
+ std::map<uint16_t, NackInfo, DescendingSeqNumComp<uint16_t>> nack_list_
+ RTC_GUARDED_BY(worker_thread_);
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> keyframe_list_
+ RTC_GUARDED_BY(worker_thread_);
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> recovered_list_
+ RTC_GUARDED_BY(worker_thread_);
+ video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(worker_thread_);
+ bool initialized_ RTC_GUARDED_BY(worker_thread_);
+ TimeDelta rtt_ RTC_GUARDED_BY(worker_thread_);
+ uint16_t newest_seq_num_ RTC_GUARDED_BY(worker_thread_);
+
+ // Adds a delay before send nack on packet received.
+ const TimeDelta send_nack_delay_;
+
+ ScopedNackPeriodicProcessorRegistration processor_registration_;
+
+ // Used to signal destruction to potentially pending tasks.
+ ScopedTaskSafety task_safety_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_NACK_REQUESTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build b/third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build
new file mode 100644
index 0000000000..82f0c8ee2d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/histogram.cc",
+ "/third_party/libwebrtc/modules/video_coding/nack_requester.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("nack_requester_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc b/third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc
new file mode 100644
index 0000000000..6f11cb6e91
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/nack_requester.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/run_loop.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+// TODO(bugs.webrtc.org/11594): Use the use the GlobalSimulatedTimeController
+// instead of RunLoop. At the moment we mix use of the Clock and the underlying
+// implementation of RunLoop, which is realtime.
+class TestNackRequester : public ::testing::Test,
+ public NackSender,
+ public KeyFrameRequestSender {
+ protected:
+ TestNackRequester()
+ : clock_(new SimulatedClock(0)), keyframes_requested_(0) {}
+
+ void SetUp() override {}
+
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override {
+ sent_nacks_.insert(sent_nacks_.end(), sequence_numbers.begin(),
+ sequence_numbers.end());
+ if (waiting_for_send_nack_) {
+ waiting_for_send_nack_ = false;
+ loop_.Quit();
+ }
+ }
+
+ void RequestKeyFrame() override { ++keyframes_requested_; }
+
+ void Flush() {
+ // nack_module.Process();
+ loop_.Flush();
+ }
+
+ bool WaitForSendNack() {
+ if (timed_out_) {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ RTC_DCHECK(!waiting_for_send_nack_);
+
+ waiting_for_send_nack_ = true;
+ loop_.task_queue()->PostDelayedTask(
+ [this]() {
+ timed_out_ = true;
+ loop_.Quit();
+ },
+ TimeDelta::Seconds(1));
+
+ loop_.Run();
+
+ if (timed_out_)
+ return false;
+
+ RTC_DCHECK(!waiting_for_send_nack_);
+ return true;
+ }
+
+ NackRequester& CreateNackModule(
+ TimeDelta interval = NackPeriodicProcessor::kUpdateInterval) {
+ RTC_DCHECK(!nack_module_.get());
+ nack_periodic_processor_ =
+ std::make_unique<NackPeriodicProcessor>(interval);
+ test::ScopedKeyValueConfig empty_field_trials_;
+ nack_module_ = std::make_unique<NackRequester>(
+ TaskQueueBase::Current(), nack_periodic_processor_.get(), clock_.get(),
+ this, this, empty_field_trials_);
+ nack_module_->UpdateRtt(kDefaultRttMs);
+ return *nack_module_.get();
+ }
+
+ static constexpr int64_t kDefaultRttMs = 20;
+ rtc::AutoThread main_thread_;
+ test::RunLoop loop_;
+ std::unique_ptr<SimulatedClock> clock_;
+ std::unique_ptr<NackPeriodicProcessor> nack_periodic_processor_;
+ std::unique_ptr<NackRequester> nack_module_;
+ std::vector<uint16_t> sent_nacks_;
+ int keyframes_requested_;
+ bool waiting_for_send_nack_ = false;
+ bool timed_out_ = false;
+};
+
+TEST_F(TestNackRequester, NackOnePacket) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(3, false, false);
+ ASSERT_EQ(1u, sent_nacks_.size());
+ EXPECT_EQ(2, sent_nacks_[0]);
+}
+
+TEST_F(TestNackRequester, WrappingSeqNum) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0xfffe, false, false);
+ nack_module.OnReceivedPacket(1, false, false);
+ ASSERT_EQ(2u, sent_nacks_.size());
+ EXPECT_EQ(0xffff, sent_nacks_[0]);
+ EXPECT_EQ(0, sent_nacks_[1]);
+}
+
+TEST_F(TestNackRequester, WrappingSeqNumClearToKeyframe) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(10));
+ nack_module.OnReceivedPacket(0xfffe, false, false);
+ nack_module.OnReceivedPacket(1, false, false);
+ ASSERT_EQ(2u, sent_nacks_.size());
+ EXPECT_EQ(0xffff, sent_nacks_[0]);
+ EXPECT_EQ(0, sent_nacks_[1]);
+
+ sent_nacks_.clear();
+ nack_module.OnReceivedPacket(2, true, false);
+ ASSERT_EQ(0u, sent_nacks_.size());
+
+ nack_module.OnReceivedPacket(501, true, false);
+ ASSERT_EQ(498u, sent_nacks_.size());
+ for (int seq_num = 3; seq_num < 501; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 3]);
+
+ sent_nacks_.clear();
+ nack_module.OnReceivedPacket(1001, false, false);
+ EXPECT_EQ(499u, sent_nacks_.size());
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 502]);
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ ASSERT_TRUE(WaitForSendNack());
+ ASSERT_EQ(999u, sent_nacks_.size());
+ EXPECT_EQ(0xffff, sent_nacks_[0]);
+ EXPECT_EQ(0, sent_nacks_[1]);
+ for (int seq_num = 3; seq_num < 501; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 1]);
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 2]);
+
+ // Adding packet 1004 will cause the nack list to reach it's max limit.
+ // It will then clear all nacks up to the next keyframe (seq num 2),
+ // thus removing 0xffff and 0 from the nack list.
+ sent_nacks_.clear();
+ nack_module.OnReceivedPacket(1004, false, false);
+ ASSERT_EQ(2u, sent_nacks_.size());
+ EXPECT_EQ(1002, sent_nacks_[0]);
+ EXPECT_EQ(1003, sent_nacks_[1]);
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ ASSERT_TRUE(WaitForSendNack());
+ ASSERT_EQ(999u, sent_nacks_.size());
+ for (int seq_num = 3; seq_num < 501; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 3]);
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 4]);
+
+ // Adding packet 1007 will cause the nack module to overflow again, thus
+ // clearing everything up to 501 which is the next keyframe.
+ nack_module.OnReceivedPacket(1007, false, false);
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ ASSERT_TRUE(WaitForSendNack());
+ ASSERT_EQ(503u, sent_nacks_.size());
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 502]);
+ EXPECT_EQ(1005, sent_nacks_[501]);
+ EXPECT_EQ(1006, sent_nacks_[502]);
+}
+
+TEST_F(TestNackRequester, ResendNack) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(3, false, false);
+ size_t expected_nacks_sent = 1;
+ ASSERT_EQ(expected_nacks_sent, sent_nacks_.size());
+ EXPECT_EQ(2, sent_nacks_[0]);
+
+ nack_module.UpdateRtt(1);
+ clock_->AdvanceTimeMilliseconds(1);
+ WaitForSendNack(); // Fast retransmit allowed.
+ EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
+
+ // Each try has to wait rtt by default.
+ for (int i = 2; i < 10; ++i) {
+ // Change RTT, above the 40ms max for exponential backoff.
+ TimeDelta rtt = TimeDelta::Millis(160); // + (i * 10 - 40)
+ nack_module.UpdateRtt(rtt.ms());
+
+ // RTT gets capped at 160ms in backoff calculations.
+ TimeDelta expected_backoff_delay =
+ (i - 1) * std::min(rtt, TimeDelta::Millis(160));
+
+ // Move to one millisecond before next allowed NACK.
+ clock_->AdvanceTimeMilliseconds(expected_backoff_delay.ms() - 1);
+ Flush();
+ EXPECT_EQ(expected_nacks_sent, sent_nacks_.size());
+
+ // Move to one millisecond after next allowed NACK.
+ // After rather than on to avoid rounding errors.
+ clock_->AdvanceTimeMilliseconds(2);
+ WaitForSendNack(); // Now allowed.
+ EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
+ }
+
+ // Giving up after 10 tries.
+ clock_->AdvanceTimeMilliseconds(3000);
+ Flush();
+ EXPECT_EQ(expected_nacks_sent, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, ResendPacketMaxRetries) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(3, false, false);
+ ASSERT_EQ(1u, sent_nacks_.size());
+ EXPECT_EQ(2, sent_nacks_[0]);
+
+ int backoff_factor = 1;
+ for (size_t retries = 1; retries < 10; ++retries) {
+ // Exponential backoff, so that we don't reject NACK because of time.
+ clock_->AdvanceTimeMilliseconds(backoff_factor * kDefaultRttMs);
+ backoff_factor *= 2;
+ WaitForSendNack();
+ EXPECT_EQ(retries + 1, sent_nacks_.size());
+ }
+
+ clock_->AdvanceTimeMilliseconds(backoff_factor * kDefaultRttMs);
+ Flush();
+ EXPECT_EQ(10u, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, TooLargeNackList) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(1001, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(0, keyframes_requested_);
+ nack_module.OnReceivedPacket(1003, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(1, keyframes_requested_);
+ nack_module.OnReceivedPacket(1004, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(1, keyframes_requested_);
+}
+
+TEST_F(TestNackRequester, TooLargeNackListWithKeyFrame) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(1, true, false);
+ nack_module.OnReceivedPacket(1001, false, false);
+ EXPECT_EQ(999u, sent_nacks_.size());
+ EXPECT_EQ(0, keyframes_requested_);
+ nack_module.OnReceivedPacket(1003, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(0, keyframes_requested_);
+ nack_module.OnReceivedPacket(1005, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(1, keyframes_requested_);
+}
+
+TEST_F(TestNackRequester, ClearUpTo) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(100, false, false);
+ EXPECT_EQ(99u, sent_nacks_.size());
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ nack_module.ClearUpTo(50);
+ WaitForSendNack();
+ ASSERT_EQ(50u, sent_nacks_.size());
+ EXPECT_EQ(50, sent_nacks_[0]);
+}
+
+TEST_F(TestNackRequester, ClearUpToWrap) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0xfff0, false, false);
+ nack_module.OnReceivedPacket(0xf, false, false);
+ EXPECT_EQ(30u, sent_nacks_.size());
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ nack_module.ClearUpTo(0);
+ WaitForSendNack();
+ ASSERT_EQ(15u, sent_nacks_.size());
+ EXPECT_EQ(0, sent_nacks_[0]);
+}
+
+TEST_F(TestNackRequester, PacketNackCount) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(0, false, false));
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(2, false, false));
+ EXPECT_EQ(1, nack_module.OnReceivedPacket(1, false, false));
+
+ sent_nacks_.clear();
+ nack_module.UpdateRtt(100);
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(5, false, false));
+ clock_->AdvanceTimeMilliseconds(100);
+ WaitForSendNack();
+ EXPECT_EQ(4u, sent_nacks_.size());
+
+ clock_->AdvanceTimeMilliseconds(125);
+ WaitForSendNack();
+
+ EXPECT_EQ(6u, sent_nacks_.size());
+
+ EXPECT_EQ(3, nack_module.OnReceivedPacket(3, false, false));
+ EXPECT_EQ(3, nack_module.OnReceivedPacket(4, false, false));
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(4, false, false));
+}
+
+TEST_F(TestNackRequester, NackListFullAndNoOverlapWithKeyframes) {
+ NackRequester& nack_module = CreateNackModule();
+ const int kMaxNackPackets = 1000;
+ const unsigned int kFirstGap = kMaxNackPackets - 20;
+ const unsigned int kSecondGap = 200;
+ uint16_t seq_num = 0;
+ nack_module.OnReceivedPacket(seq_num++, true, false);
+ seq_num += kFirstGap;
+ nack_module.OnReceivedPacket(seq_num++, true, false);
+ EXPECT_EQ(kFirstGap, sent_nacks_.size());
+ sent_nacks_.clear();
+ seq_num += kSecondGap;
+ nack_module.OnReceivedPacket(seq_num, true, false);
+ EXPECT_EQ(kSecondGap, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, HandleFecRecoveredPacket) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(4, false, true);
+ EXPECT_EQ(0u, sent_nacks_.size());
+ nack_module.OnReceivedPacket(5, false, false);
+ EXPECT_EQ(2u, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, SendNackWithoutDelay) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(100, false, false);
+ EXPECT_EQ(99u, sent_nacks_.size());
+}
+
+class TestNackRequesterWithFieldTrial : public ::testing::Test,
+ public NackSender,
+ public KeyFrameRequestSender {
+ protected:
+ TestNackRequesterWithFieldTrial()
+ : nack_delay_field_trial_("WebRTC-SendNackDelayMs/10/"),
+ clock_(new SimulatedClock(0)),
+ nack_module_(TaskQueueBase::Current(),
+ &nack_periodic_processor_,
+ clock_.get(),
+ this,
+ this,
+ nack_delay_field_trial_),
+ keyframes_requested_(0) {}
+
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override {
+ sent_nacks_.insert(sent_nacks_.end(), sequence_numbers.begin(),
+ sequence_numbers.end());
+ }
+
+ void RequestKeyFrame() override { ++keyframes_requested_; }
+
+ test::ScopedKeyValueConfig nack_delay_field_trial_;
+ rtc::AutoThread main_thread_;
+ std::unique_ptr<SimulatedClock> clock_;
+ NackPeriodicProcessor nack_periodic_processor_;
+ NackRequester nack_module_;
+ std::vector<uint16_t> sent_nacks_;
+ int keyframes_requested_;
+};
+
+TEST_F(TestNackRequesterWithFieldTrial, SendNackWithDelay) {
+ nack_module_.OnReceivedPacket(0, false, false);
+ nack_module_.OnReceivedPacket(100, false, false);
+ EXPECT_EQ(0u, sent_nacks_.size());
+ clock_->AdvanceTimeMilliseconds(10);
+ nack_module_.OnReceivedPacket(106, false, false);
+ EXPECT_EQ(99u, sent_nacks_.size());
+ clock_->AdvanceTimeMilliseconds(10);
+ nack_module_.OnReceivedPacket(109, false, false);
+ EXPECT_EQ(104u, sent_nacks_.size());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/packet.cc b/third_party/libwebrtc/modules/video_coding/packet.cc
new file mode 100644
index 0000000000..f1bac4a305
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/packet.h"
+
+#include "api/rtp_headers.h"
+
+namespace webrtc {
+
+VCMPacket::VCMPacket()
+ : payloadType(0),
+ timestamp(0),
+ ntp_time_ms_(0),
+ seqNum(0),
+ dataPtr(NULL),
+ sizeBytes(0),
+ markerBit(false),
+ timesNacked(-1),
+ completeNALU(kNaluUnset),
+ insertStartCode(false),
+ video_header() {
+ video_header.playout_delay = {-1, -1};
+}
+
+VCMPacket::VCMPacket(const uint8_t* ptr,
+ size_t size,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& videoHeader,
+ int64_t ntp_time_ms,
+ Timestamp receive_time)
+ : payloadType(rtp_header.payloadType),
+ timestamp(rtp_header.timestamp),
+ ntp_time_ms_(ntp_time_ms),
+ seqNum(rtp_header.sequenceNumber),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(rtp_header.markerBit),
+ timesNacked(-1),
+ completeNALU(kNaluIncomplete),
+ insertStartCode(videoHeader.codec == kVideoCodecH264 &&
+ videoHeader.is_first_packet_in_frame),
+ video_header(videoHeader),
+ packet_info(rtp_header, receive_time) {
+ if (is_first_packet_in_frame() && markerBit) {
+ completeNALU = kNaluComplete;
+ } else if (is_first_packet_in_frame()) {
+ completeNALU = kNaluStart;
+ } else if (markerBit) {
+ completeNALU = kNaluEnd;
+ } else {
+ completeNALU = kNaluIncomplete;
+ }
+
+ // Playout decisions are made entirely based on first packet in a frame.
+ if (!is_first_packet_in_frame()) {
+ video_header.playout_delay = {-1, -1};
+ }
+}
+
+VCMPacket::~VCMPacket() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/packet.h b/third_party/libwebrtc/modules/video_coding/packet.h
new file mode 100644
index 0000000000..9aa2d5ce08
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_PACKET_H_
+#define MODULES_VIDEO_CODING_PACKET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_packet_info.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_frame_type.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+
+namespace webrtc {
+
+// Used to indicate if a received packet contain a complete NALU (or equivalent)
+enum VCMNaluCompleteness {
+ kNaluUnset = 0, // Packet has not been filled.
+ kNaluComplete = 1, // Packet can be decoded as is.
+ kNaluStart, // Packet contain beginning of NALU
+ kNaluIncomplete, // Packet is not beginning or end of NALU
+ kNaluEnd, // Packet is the end of a NALU
+};
+
+class VCMPacket {
+ public:
+ VCMPacket();
+
+ VCMPacket(const uint8_t* ptr,
+ size_t size,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header,
+ int64_t ntp_time_ms,
+ Timestamp receive_time);
+
+ ~VCMPacket();
+
+ VideoCodecType codec() const { return video_header.codec; }
+ int width() const { return video_header.width; }
+ int height() const { return video_header.height; }
+
+ bool is_first_packet_in_frame() const {
+ return video_header.is_first_packet_in_frame;
+ }
+ bool is_last_packet_in_frame() const {
+ return video_header.is_last_packet_in_frame;
+ }
+
+ uint8_t payloadType;
+ uint32_t timestamp;
+ // NTP time of the capture time in local timebase in milliseconds.
+ int64_t ntp_time_ms_;
+ uint16_t seqNum;
+ const uint8_t* dataPtr;
+ size_t sizeBytes;
+ bool markerBit;
+ int timesNacked;
+
+ VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
+ bool insertStartCode; // True if a start code should be inserted before this
+ // packet.
+ RTPVideoHeader video_header;
+ absl::optional<RtpGenericFrameDescriptor> generic_descriptor;
+
+ RtpPacketInfo packet_info;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_PACKET_H_
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/packet_buffer.cc
new file mode 100644
index 0000000000..a0f2631881
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer.cc
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/packet_buffer.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "absl/types/variant.h"
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/video/video_frame_type.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/mod_ops.h"
+
+namespace webrtc {
+namespace video_coding {
+
+PacketBuffer::Packet::Packet(const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video_header)
+ : marker_bit(rtp_packet.Marker()),
+ payload_type(rtp_packet.PayloadType()),
+ seq_num(rtp_packet.SequenceNumber()),
+ timestamp(rtp_packet.Timestamp()),
+ times_nacked(-1),
+ video_header(video_header) {}
+
+PacketBuffer::PacketBuffer(size_t start_buffer_size, size_t max_buffer_size)
+ : max_size_(max_buffer_size),
+ first_seq_num_(0),
+ first_packet_received_(false),
+ is_cleared_to_first_seq_num_(false),
+ buffer_(start_buffer_size),
+ sps_pps_idr_is_h264_keyframe_(false) {
+ RTC_DCHECK_LE(start_buffer_size, max_buffer_size);
+ // Buffer size must always be a power of 2.
+ RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0);
+ RTC_DCHECK((max_buffer_size & (max_buffer_size - 1)) == 0);
+}
+
+PacketBuffer::~PacketBuffer() {
+ Clear();
+}
+
+PacketBuffer::InsertResult PacketBuffer::InsertPacket(
+ std::unique_ptr<PacketBuffer::Packet> packet) {
+ PacketBuffer::InsertResult result;
+
+ uint16_t seq_num = packet->seq_num;
+ size_t index = seq_num % buffer_.size();
+
+ if (!first_packet_received_) {
+ first_seq_num_ = seq_num;
+ first_packet_received_ = true;
+ } else if (AheadOf(first_seq_num_, seq_num)) {
+ // If we have explicitly cleared past this packet then it's old,
+ // don't insert it, just silently ignore it.
+ if (is_cleared_to_first_seq_num_) {
+ return result;
+ }
+
+ first_seq_num_ = seq_num;
+ }
+
+ if (buffer_[index] != nullptr) {
+ // Duplicate packet, just delete the payload.
+ if (buffer_[index]->seq_num == packet->seq_num) {
+ return result;
+ }
+
+ // The packet buffer is full, try to expand the buffer.
+ while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()] != nullptr) {
+ }
+ index = seq_num % buffer_.size();
+
+ // Packet buffer is still full since we were unable to expand the buffer.
+ if (buffer_[index] != nullptr) {
+ // Clear the buffer, delete payload, and return false to signal that a
+ // new keyframe is needed.
+ RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
+ ClearInternal();
+ result.buffer_cleared = true;
+ return result;
+ }
+ }
+
+ packet->continuous = false;
+ buffer_[index] = std::move(packet);
+
+ UpdateMissingPackets(seq_num);
+
+ received_padding_.erase(
+ received_padding_.begin(),
+ received_padding_.lower_bound(seq_num - (buffer_.size() / 4)));
+
+ result.packets = FindFrames(seq_num);
+ return result;
+}
+
+uint32_t PacketBuffer::ClearTo(uint16_t seq_num) {
+ // We have already cleared past this sequence number, no need to do anything.
+ if (is_cleared_to_first_seq_num_ &&
+ AheadOf<uint16_t>(first_seq_num_, seq_num)) {
+ return 0;
+ }
+
+ // If the packet buffer was cleared between a frame was created and returned.
+ if (!first_packet_received_)
+ return 0;
+
+ // Avoid iterating over the buffer more than once by capping the number of
+ // iterations to the `size_` of the buffer.
+ ++seq_num;
+ uint32_t num_cleared_packets = 0;
+ size_t diff = ForwardDiff<uint16_t>(first_seq_num_, seq_num);
+ size_t iterations = std::min(diff, buffer_.size());
+ for (size_t i = 0; i < iterations; ++i) {
+ auto& stored = buffer_[first_seq_num_ % buffer_.size()];
+ if (stored != nullptr && AheadOf<uint16_t>(seq_num, stored->seq_num)) {
+ ++num_cleared_packets;
+ stored = nullptr;
+ }
+ ++first_seq_num_;
+ }
+
+ // If `diff` is larger than `iterations` it means that we don't increment
+ // `first_seq_num_` until we reach `seq_num`, so we set it here.
+ first_seq_num_ = seq_num;
+
+ is_cleared_to_first_seq_num_ = true;
+ missing_packets_.erase(missing_packets_.begin(),
+ missing_packets_.lower_bound(seq_num));
+
+ received_padding_.erase(received_padding_.begin(),
+ received_padding_.lower_bound(seq_num));
+
+ return num_cleared_packets;
+}
+
+void PacketBuffer::Clear() {
+ ClearInternal();
+}
+
+PacketBuffer::InsertResult PacketBuffer::InsertPadding(uint16_t seq_num) {
+ PacketBuffer::InsertResult result;
+ UpdateMissingPackets(seq_num);
+ received_padding_.insert(seq_num);
+ result.packets = FindFrames(static_cast<uint16_t>(seq_num + 1));
+ return result;
+}
+
+void PacketBuffer::ForceSpsPpsIdrIsH264Keyframe() {
+ sps_pps_idr_is_h264_keyframe_ = true;
+}
+
+void PacketBuffer::ClearInternal() {
+ for (auto& entry : buffer_) {
+ entry = nullptr;
+ }
+
+ first_packet_received_ = false;
+ is_cleared_to_first_seq_num_ = false;
+ newest_inserted_seq_num_.reset();
+ missing_packets_.clear();
+ received_padding_.clear();
+}
+
+bool PacketBuffer::ExpandBufferSize() {
+ if (buffer_.size() == max_size_) {
+ RTC_LOG(LS_WARNING) << "PacketBuffer is already at max size (" << max_size_
+ << "), failed to increase size.";
+ return false;
+ }
+
+ size_t new_size = std::min(max_size_, 2 * buffer_.size());
+ std::vector<std::unique_ptr<Packet>> new_buffer(new_size);
+ for (std::unique_ptr<Packet>& entry : buffer_) {
+ if (entry != nullptr) {
+ new_buffer[entry->seq_num % new_size] = std::move(entry);
+ }
+ }
+ buffer_ = std::move(new_buffer);
+ RTC_LOG(LS_INFO) << "PacketBuffer size expanded to " << new_size;
+ return true;
+}
+
+bool PacketBuffer::PotentialNewFrame(uint16_t seq_num) const {
+ size_t index = seq_num % buffer_.size();
+ int prev_index = index > 0 ? index - 1 : buffer_.size() - 1;
+ const auto& entry = buffer_[index];
+ const auto& prev_entry = buffer_[prev_index];
+
+ if (entry == nullptr)
+ return false;
+ if (entry->seq_num != seq_num)
+ return false;
+ if (entry->is_first_packet_in_frame())
+ return true;
+ if (prev_entry == nullptr)
+ return false;
+ if (prev_entry->seq_num != static_cast<uint16_t>(entry->seq_num - 1))
+ return false;
+ if (prev_entry->timestamp != entry->timestamp)
+ return false;
+ if (prev_entry->continuous)
+ return true;
+
+ return false;
+}
+
+std::vector<std::unique_ptr<PacketBuffer::Packet>> PacketBuffer::FindFrames(
+ uint16_t seq_num) {
+ std::vector<std::unique_ptr<PacketBuffer::Packet>> found_frames;
+ auto start = seq_num;
+
+ for (size_t i = 0; i < buffer_.size(); ++i) {
+ if (received_padding_.find(seq_num) != received_padding_.end()) {
+ seq_num += 1;
+ continue;
+ }
+
+ if (!PotentialNewFrame(seq_num)) {
+ break;
+ }
+
+ size_t index = seq_num % buffer_.size();
+ buffer_[index]->continuous = true;
+
+ // If all packets of the frame is continuous, find the first packet of the
+ // frame and add all packets of the frame to the returned packets.
+ if (buffer_[index]->is_last_packet_in_frame()) {
+ uint16_t start_seq_num = seq_num;
+
+ // Find the start index by searching backward until the packet with
+ // the `frame_begin` flag is set.
+ int start_index = index;
+ size_t tested_packets = 0;
+ int64_t frame_timestamp = buffer_[start_index]->timestamp;
+
+ // Identify H.264 keyframes by means of SPS, PPS, and IDR.
+ bool is_h264 = buffer_[start_index]->codec() == kVideoCodecH264;
+ bool has_h264_sps = false;
+ bool has_h264_pps = false;
+ bool has_h264_idr = false;
+ bool is_h264_keyframe = false;
+ int idr_width = -1;
+ int idr_height = -1;
+ bool full_frame_found = false;
+ while (true) {
+ ++tested_packets;
+
+ if (!is_h264) {
+ if (buffer_[start_index] == nullptr ||
+ buffer_[start_index]->is_first_packet_in_frame()) {
+ full_frame_found = buffer_[start_index] != nullptr;
+ break;
+ }
+ }
+
+ if (is_h264) {
+ const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
+ &buffer_[start_index]->video_header.video_type_header);
+ if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
+ return found_frames;
+
+ for (size_t j = 0; j < h264_header->nalus_length; ++j) {
+ if (h264_header->nalus[j].type == H264::NaluType::kSps) {
+ has_h264_sps = true;
+ } else if (h264_header->nalus[j].type == H264::NaluType::kPps) {
+ has_h264_pps = true;
+ } else if (h264_header->nalus[j].type == H264::NaluType::kIdr) {
+ has_h264_idr = true;
+ }
+ }
+ if ((sps_pps_idr_is_h264_keyframe_ && has_h264_idr && has_h264_sps &&
+ has_h264_pps) ||
+ (!sps_pps_idr_is_h264_keyframe_ && has_h264_idr)) {
+ is_h264_keyframe = true;
+ // Store the resolution of key frame which is the packet with
+ // smallest index and valid resolution; typically its IDR or SPS
+ // packet; there may be packet preceeding this packet, IDR's
+ // resolution will be applied to them.
+ if (buffer_[start_index]->width() > 0 &&
+ buffer_[start_index]->height() > 0) {
+ idr_width = buffer_[start_index]->width();
+ idr_height = buffer_[start_index]->height();
+ }
+ }
+ }
+
+ if (tested_packets == buffer_.size())
+ break;
+
+ start_index = start_index > 0 ? start_index - 1 : buffer_.size() - 1;
+
+ // In the case of H264 we don't have a frame_begin bit (yes,
+ // `frame_begin` might be set to true but that is a lie). So instead
+ // we traverese backwards as long as we have a previous packet and
+ // the timestamp of that packet is the same as this one. This may cause
+ // the PacketBuffer to hand out incomplete frames.
+ // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
+ if (is_h264 && (buffer_[start_index] == nullptr ||
+ buffer_[start_index]->timestamp != frame_timestamp)) {
+ break;
+ }
+
+ --start_seq_num;
+ }
+
+ if (is_h264) {
+ // Warn if this is an unsafe frame.
+ if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
+ RTC_LOG(LS_WARNING)
+ << "Received H.264-IDR frame "
+ "(SPS: "
+ << has_h264_sps << ", PPS: " << has_h264_pps << "). Treating as "
+ << (sps_pps_idr_is_h264_keyframe_ ? "delta" : "key")
+ << " frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
+ << (sps_pps_idr_is_h264_keyframe_ ? "enabled." : "disabled");
+ }
+
+ // Now that we have decided whether to treat this frame as a key frame
+ // or delta frame in the frame buffer, we update the field that
+ // determines if the RtpFrameObject is a key frame or delta frame.
+ const size_t first_packet_index = start_seq_num % buffer_.size();
+ if (is_h264_keyframe) {
+ buffer_[first_packet_index]->video_header.frame_type =
+ VideoFrameType::kVideoFrameKey;
+ if (idr_width > 0 && idr_height > 0) {
+ // IDR frame was finalized and we have the correct resolution for
+ // IDR; update first packet to have same resolution as IDR.
+ buffer_[first_packet_index]->video_header.width = idr_width;
+ buffer_[first_packet_index]->video_header.height = idr_height;
+ }
+ } else {
+ buffer_[first_packet_index]->video_header.frame_type =
+ VideoFrameType::kVideoFrameDelta;
+ }
+
+ // If this is not a keyframe, make sure there are no gaps in the packet
+ // sequence numbers up until this point.
+ if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) !=
+ missing_packets_.begin()) {
+ return found_frames;
+ }
+ }
+
+ if (is_h264 || full_frame_found) {
+ const uint16_t end_seq_num = seq_num + 1;
+ // Use uint16_t type to handle sequence number wrap around case.
+ uint16_t num_packets = end_seq_num - start_seq_num;
+ found_frames.reserve(found_frames.size() + num_packets);
+ for (uint16_t i = start_seq_num; i != end_seq_num; ++i) {
+ std::unique_ptr<Packet>& packet = buffer_[i % buffer_.size()];
+ RTC_DCHECK(packet);
+ RTC_DCHECK_EQ(i, packet->seq_num);
+ // Ensure frame boundary flags are properly set.
+ packet->video_header.is_first_packet_in_frame = (i == start_seq_num);
+ packet->video_header.is_last_packet_in_frame = (i == seq_num);
+ found_frames.push_back(std::move(packet));
+ }
+
+ missing_packets_.erase(missing_packets_.begin(),
+ missing_packets_.upper_bound(seq_num));
+ received_padding_.erase(received_padding_.lower_bound(start),
+ received_padding_.upper_bound(seq_num));
+ }
+ }
+ ++seq_num;
+ }
+ return found_frames;
+}
+
+void PacketBuffer::UpdateMissingPackets(uint16_t seq_num) {
+ if (!newest_inserted_seq_num_)
+ newest_inserted_seq_num_ = seq_num;
+
+ const int kMaxPaddingAge = 1000;
+ if (AheadOf(seq_num, *newest_inserted_seq_num_)) {
+ uint16_t old_seq_num = seq_num - kMaxPaddingAge;
+ auto erase_to = missing_packets_.lower_bound(old_seq_num);
+ missing_packets_.erase(missing_packets_.begin(), erase_to);
+
+ // Guard against inserting a large amount of missing packets if there is a
+ // jump in the sequence number.
+ if (AheadOf(old_seq_num, *newest_inserted_seq_num_))
+ *newest_inserted_seq_num_ = old_seq_num;
+
+ ++*newest_inserted_seq_num_;
+ while (AheadOf(seq_num, *newest_inserted_seq_num_)) {
+ missing_packets_.insert(*newest_inserted_seq_num_);
+ ++*newest_inserted_seq_num_;
+ }
+ } else {
+ missing_packets_.erase(seq_num);
+ }
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer.h b/third_party/libwebrtc/modules/video_coding/packet_buffer.h
new file mode 100644
index 0000000000..2a64673619
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_PACKET_BUFFER_H_
+#define MODULES_VIDEO_CODING_PACKET_BUFFER_H_
+
+#include <memory>
+#include <queue>
+#include <set>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "api/rtp_packet_info.h"
+#include "api/units/timestamp.h"
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class PacketBuffer {
+ public:
+ struct Packet {
+ Packet() = default;
+ Packet(const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video_header);
+ Packet(const Packet&) = delete;
+ Packet(Packet&&) = delete;
+ Packet& operator=(const Packet&) = delete;
+ Packet& operator=(Packet&&) = delete;
+ ~Packet() = default;
+
+ VideoCodecType codec() const { return video_header.codec; }
+ int width() const { return video_header.width; }
+ int height() const { return video_header.height; }
+
+ bool is_first_packet_in_frame() const {
+ return video_header.is_first_packet_in_frame;
+ }
+ bool is_last_packet_in_frame() const {
+ return video_header.is_last_packet_in_frame;
+ }
+
+ // If all its previous packets have been inserted into the packet buffer.
+ // Set and used internally by the PacketBuffer.
+ bool continuous = false;
+ bool marker_bit = false;
+ uint8_t payload_type = 0;
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0;
+ int times_nacked = -1;
+
+ rtc::CopyOnWriteBuffer video_payload;
+ RTPVideoHeader video_header;
+ };
+ struct InsertResult {
+ std::vector<std::unique_ptr<Packet>> packets;
+ // Indicates if the packet buffer was cleared, which means that a key
+ // frame request should be sent.
+ bool buffer_cleared = false;
+ };
+
+ // Both `start_buffer_size` and `max_buffer_size` must be a power of 2.
+ PacketBuffer(size_t start_buffer_size, size_t max_buffer_size);
+ ~PacketBuffer();
+
+ ABSL_MUST_USE_RESULT InsertResult
+ InsertPacket(std::unique_ptr<Packet> packet);
+ ABSL_MUST_USE_RESULT InsertResult InsertPadding(uint16_t seq_num);
+
+ // Clear all packets older than |seq_num|. Returns the number of packets
+ // cleared.
+ uint32_t ClearTo(uint16_t seq_num);
+ void Clear();
+
+ void ForceSpsPpsIdrIsH264Keyframe();
+
+ private:
+ void ClearInternal();
+
+ // Tries to expand the buffer.
+ bool ExpandBufferSize();
+
+ // Test if all previous packets has arrived for the given sequence number.
+ bool PotentialNewFrame(uint16_t seq_num) const;
+
+ // Test if all packets of a frame has arrived, and if so, returns packets to
+ // create frames.
+ std::vector<std::unique_ptr<Packet>> FindFrames(uint16_t seq_num);
+
+ void UpdateMissingPackets(uint16_t seq_num);
+
+ // buffer_.size() and max_size_ must always be a power of two.
+ const size_t max_size_;
+
+ // The fist sequence number currently in the buffer.
+ uint16_t first_seq_num_;
+
+ // If the packet buffer has received its first packet.
+ bool first_packet_received_;
+
+ // If the buffer is cleared to `first_seq_num_`.
+ bool is_cleared_to_first_seq_num_;
+
+ // Buffer that holds the the inserted packets and information needed to
+ // determine continuity between them.
+ std::vector<std::unique_ptr<Packet>> buffer_;
+
+ absl::optional<uint16_t> newest_inserted_seq_num_;
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> missing_packets_;
+
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> received_padding_;
+
+ // Indicates if we should require SPS, PPS, and IDR for a particular
+ // RTP timestamp to treat the corresponding frame as a keyframe.
+ bool sps_pps_idr_is_h264_keyframe_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build b/third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build
new file mode 100644
index 0000000000..12ec6e5913
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/packet_buffer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("packet_buffer_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc
new file mode 100644
index 0000000000..49afa148e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/packet_buffer.h"
+
+#include <cstring>
+#include <limits>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "api/array_view.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/video_coding/frame_object.h"
+#include "rtc_base/random.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::Matches;
+using ::testing::Pointee;
+using ::testing::SizeIs;
+
+constexpr int kStartSize = 16;
+constexpr int kMaxSize = 64;
+
+void IgnoreResult(PacketBuffer::InsertResult /*result*/) {}
+
+// Validates frame boundaries are valid and returns first sequence_number for
+// each frame.
+std::vector<uint16_t> StartSeqNums(
+ rtc::ArrayView<const std::unique_ptr<PacketBuffer::Packet>> packets) {
+ std::vector<uint16_t> result;
+ bool frame_boundary = true;
+ for (const auto& packet : packets) {
+ EXPECT_EQ(frame_boundary, packet->is_first_packet_in_frame());
+ if (packet->is_first_packet_in_frame()) {
+ result.push_back(packet->seq_num);
+ }
+ frame_boundary = packet->is_last_packet_in_frame();
+ }
+ EXPECT_TRUE(frame_boundary);
+ return result;
+}
+
+MATCHER_P(StartSeqNumsAre, seq_num, "") {
+ return Matches(ElementsAre(seq_num))(StartSeqNums(arg.packets));
+}
+
+MATCHER_P2(StartSeqNumsAre, seq_num1, seq_num2, "") {
+ return Matches(ElementsAre(seq_num1, seq_num2))(StartSeqNums(arg.packets));
+}
+
+MATCHER(KeyFrame, "") {
+ return arg->is_first_packet_in_frame() &&
+ arg->video_header.frame_type == VideoFrameType::kVideoFrameKey;
+}
+
+MATCHER(DeltaFrame, "") {
+ return arg->is_first_packet_in_frame() &&
+ arg->video_header.frame_type == VideoFrameType::kVideoFrameDelta;
+}
+
+struct PacketBufferInsertResult : public PacketBuffer::InsertResult {
+ explicit PacketBufferInsertResult(PacketBuffer::InsertResult result)
+ : InsertResult(std::move(result)) {}
+};
+
+void PrintTo(const PacketBufferInsertResult& result, std::ostream* os) {
+ *os << "frames: { ";
+ for (const auto& packet : result.packets) {
+ if (packet->is_first_packet_in_frame() &&
+ packet->is_last_packet_in_frame()) {
+ *os << "{sn: " << packet->seq_num << " }";
+ } else if (packet->is_first_packet_in_frame()) {
+ *os << "{sn: [" << packet->seq_num << "-";
+ } else if (packet->is_last_packet_in_frame()) {
+ *os << packet->seq_num << "] }, ";
+ }
+ }
+ *os << " }";
+ if (result.buffer_cleared) {
+ *os << ", buffer_cleared";
+ }
+}
+
+class PacketBufferTest : public ::testing::Test {
+ protected:
+ PacketBufferTest() : rand_(0x7732213), packet_buffer_(kStartSize, kMaxSize) {}
+
+ uint16_t Rand() { return rand_.Rand<uint16_t>(); }
+
+ enum IsKeyFrame { kKeyFrame, kDeltaFrame };
+ enum IsFirst { kFirst, kNotFirst };
+ enum IsLast { kLast, kNotLast };
+
+ PacketBufferInsertResult Insert(uint16_t seq_num, // packet sequence number
+ IsKeyFrame keyframe, // is keyframe
+ IsFirst first, // is first packet of frame
+ IsLast last, // is last packet of frame
+ rtc::ArrayView<const uint8_t> data = {},
+ uint32_t timestamp = 123u) { // rtp timestamp
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecGeneric;
+ packet->timestamp = timestamp;
+ packet->seq_num = seq_num;
+ packet->video_header.frame_type = keyframe == kKeyFrame
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ packet->video_header.is_first_packet_in_frame = first == kFirst;
+ packet->video_header.is_last_packet_in_frame = last == kLast;
+ packet->video_payload.SetData(data.data(), data.size());
+
+ return PacketBufferInsertResult(
+ packet_buffer_.InsertPacket(std::move(packet)));
+ }
+
+ Random rand_;
+ PacketBuffer packet_buffer_;
+};
+
+TEST_F(PacketBufferTest, InsertOnePacket) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, InsertMultiplePackets) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(seq_num + 2, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(seq_num + 3, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, InsertDuplicatePacket) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kNotFirst, kLast).packets,
+ SizeIs(2));
+}
+
+TEST_F(PacketBufferTest, SeqNumWrapOneFrame) {
+ Insert(0xFFFF, kKeyFrame, kFirst, kNotLast);
+ EXPECT_THAT(Insert(0x0, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(0xFFFF));
+}
+
+TEST_F(PacketBufferTest, SeqNumWrapTwoFrames) {
+ EXPECT_THAT(Insert(0xFFFF, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(0xFFFF));
+ EXPECT_THAT(Insert(0x0, kKeyFrame, kFirst, kLast), StartSeqNumsAre(0x0));
+}
+
+TEST_F(PacketBufferTest, InsertOldPackets) {
+ EXPECT_THAT(Insert(100, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(102, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(101, kKeyFrame, kNotFirst, kLast).packets, SizeIs(2));
+
+ EXPECT_THAT(Insert(100, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(100, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(102, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+
+ packet_buffer_.ClearTo(102);
+ EXPECT_THAT(Insert(102, kDeltaFrame, kFirst, kLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(103, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, FrameSize) {
+ const uint16_t seq_num = Rand();
+ uint8_t data1[5] = {};
+ uint8_t data2[5] = {};
+ uint8_t data3[5] = {};
+ uint8_t data4[5] = {};
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast, data1);
+ Insert(seq_num + 1, kKeyFrame, kNotFirst, kNotLast, data2);
+ Insert(seq_num + 2, kKeyFrame, kNotFirst, kNotLast, data3);
+ auto packets =
+ Insert(seq_num + 3, kKeyFrame, kNotFirst, kLast, data4).packets;
+ // Expect one frame of 4 packets.
+ EXPECT_THAT(StartSeqNums(packets), ElementsAre(seq_num));
+ EXPECT_THAT(packets, SizeIs(4));
+}
+
+TEST_F(PacketBufferTest, ExpandBuffer) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ for (int i = 1; i < kStartSize; ++i)
+ EXPECT_FALSE(
+ Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast).buffer_cleared);
+
+ // Already inserted kStartSize number of packets, inserting the last packet
+ // should increase the buffer size and also result in an assembled frame.
+ EXPECT_FALSE(
+ Insert(seq_num + kStartSize, kKeyFrame, kNotFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, SingleFrameExpandsBuffer) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ for (int i = 1; i < kStartSize; ++i)
+ Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + kStartSize, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, ExpandBufferOverflow) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_FALSE(Insert(seq_num, kKeyFrame, kFirst, kNotLast).buffer_cleared);
+ for (int i = 1; i < kMaxSize; ++i)
+ EXPECT_FALSE(
+ Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast).buffer_cleared);
+
+ // Already inserted kMaxSize number of packets, inserting the last packet
+ // should overflow the buffer and result in false being returned.
+ EXPECT_TRUE(
+ Insert(seq_num + kMaxSize, kKeyFrame, kNotFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, OnePacketOneFrame) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, TwoPacketsTwoFrames) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 1));
+}
+
+TEST_F(PacketBufferTest, TwoPacketsOneFrames) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, ThreePacketReorderingOneFrame) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 2, kKeyFrame, kNotFirst, kLast).packets,
+ IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kNotFirst, kNotLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, Frames) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num + 1, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 1));
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 2));
+ EXPECT_THAT(Insert(seq_num + 3, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 3));
+}
+
+TEST_F(PacketBufferTest, ClearSinglePacket) {
+ const uint16_t seq_num = Rand();
+
+ for (int i = 0; i < kMaxSize; ++i)
+ Insert(seq_num + i, kDeltaFrame, kFirst, kLast);
+
+ packet_buffer_.ClearTo(seq_num);
+ EXPECT_FALSE(
+ Insert(seq_num + kMaxSize, kDeltaFrame, kFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, ClearPacketBeforeFullyReceivedFrame) {
+ Insert(0, kKeyFrame, kFirst, kNotLast);
+ Insert(1, kKeyFrame, kNotFirst, kNotLast);
+ packet_buffer_.ClearTo(0);
+ EXPECT_THAT(Insert(2, kKeyFrame, kNotFirst, kLast).packets, IsEmpty());
+}
+
+TEST_F(PacketBufferTest, ClearFullBuffer) {
+ for (int i = 0; i < kMaxSize; ++i)
+ Insert(i, kDeltaFrame, kFirst, kLast);
+
+ packet_buffer_.ClearTo(kMaxSize - 1);
+
+ for (int i = kMaxSize; i < 2 * kMaxSize; ++i)
+ EXPECT_FALSE(Insert(i, kDeltaFrame, kFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, DontClearNewerPacket) {
+ EXPECT_THAT(Insert(0, kKeyFrame, kFirst, kLast), StartSeqNumsAre(0));
+ packet_buffer_.ClearTo(0);
+ EXPECT_THAT(Insert(2 * kStartSize, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(2 * kStartSize));
+ EXPECT_THAT(Insert(3 * kStartSize + 1, kKeyFrame, kFirst, kNotLast).packets,
+ IsEmpty());
+ packet_buffer_.ClearTo(2 * kStartSize);
+ EXPECT_THAT(Insert(3 * kStartSize + 2, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(3 * kStartSize + 1));
+}
+
+TEST_F(PacketBufferTest, OneIncompleteFrame) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kDeltaFrame, kFirst, kNotLast).packets,
+ IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num - 1, kDeltaFrame, kNotFirst, kLast).packets,
+ IsEmpty());
+}
+
+TEST_F(PacketBufferTest, TwoIncompleteFramesFullBuffer) {
+ const uint16_t seq_num = Rand();
+
+ for (int i = 1; i < kMaxSize - 1; ++i)
+ Insert(seq_num + i, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num, kDeltaFrame, kFirst, kNotLast).packets,
+ IsEmpty());
+ EXPECT_THAT(Insert(seq_num - 1, kDeltaFrame, kNotFirst, kLast).packets,
+ IsEmpty());
+}
+
+TEST_F(PacketBufferTest, FramesReordered) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num + 1, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 1));
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num + 3, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 3));
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 2));
+}
+
+TEST_F(PacketBufferTest, InsertPacketAfterSequenceNumberWrapAround) {
+ uint16_t kFirstSeqNum = 0;
+ uint32_t kTimestampDelta = 100;
+ uint32_t timestamp = 10000;
+ uint16_t seq_num = kFirstSeqNum;
+
+ // Loop until seq_num wraps around.
+ SeqNumUnwrapper<uint16_t> unwrapper;
+ while (unwrapper.Unwrap(seq_num) < std::numeric_limits<uint16_t>::max()) {
+ Insert(seq_num++, kKeyFrame, kFirst, kNotLast, {}, timestamp);
+ for (int i = 0; i < 5; ++i) {
+ Insert(seq_num++, kKeyFrame, kNotFirst, kNotLast, {}, timestamp);
+ }
+ Insert(seq_num++, kKeyFrame, kNotFirst, kLast, {}, timestamp);
+ timestamp += kTimestampDelta;
+ }
+
+ // Receive frame with overlapping sequence numbers.
+ Insert(seq_num++, kKeyFrame, kFirst, kNotLast, {}, timestamp);
+ for (int i = 0; i < 5; ++i) {
+ Insert(seq_num++, kKeyFrame, kNotFirst, kNotLast, {}, timestamp);
+ }
+ auto packets =
+ Insert(seq_num++, kKeyFrame, kNotFirst, kLast, {}, timestamp).packets;
+ // One frame of 7 packets.
+ EXPECT_THAT(StartSeqNums(packets), SizeIs(1));
+ EXPECT_THAT(packets, SizeIs(7));
+}
+
+// If `sps_pps_idr_is_keyframe` is true, we require keyframes to contain
+// SPS/PPS/IDR and the keyframes we create as part of the test do contain
+// SPS/PPS/IDR. If `sps_pps_idr_is_keyframe` is false, we only require and
+// create keyframes containing only IDR.
+class PacketBufferH264Test : public PacketBufferTest {
+ protected:
+ explicit PacketBufferH264Test(bool sps_pps_idr_is_keyframe)
+ : PacketBufferTest(), sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe) {
+ if (sps_pps_idr_is_keyframe) {
+ packet_buffer_.ForceSpsPpsIdrIsH264Keyframe();
+ }
+ }
+
+ PacketBufferInsertResult InsertH264(
+ uint16_t seq_num, // packet sequence number
+ IsKeyFrame keyframe, // is keyframe
+ IsFirst first, // is first packet of frame
+ IsLast last, // is last packet of frame
+ uint32_t timestamp, // rtp timestamp
+ rtc::ArrayView<const uint8_t> data = {},
+ uint32_t width = 0, // width of frame (SPS/IDR)
+ uint32_t height = 0) { // height of frame (SPS/IDR)
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ packet->seq_num = seq_num;
+ packet->timestamp = timestamp;
+ if (keyframe == kKeyFrame) {
+ if (sps_pps_idr_is_keyframe_) {
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[2].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 3;
+ } else {
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 1;
+ }
+ }
+ packet->video_header.width = width;
+ packet->video_header.height = height;
+ packet->video_header.is_first_packet_in_frame = first == kFirst;
+ packet->video_header.is_last_packet_in_frame = last == kLast;
+ packet->video_payload.SetData(data.data(), data.size());
+
+ return PacketBufferInsertResult(
+ packet_buffer_.InsertPacket(std::move(packet)));
+ }
+
+ PacketBufferInsertResult InsertH264KeyFrameWithAud(
+ uint16_t seq_num, // packet sequence number
+ IsKeyFrame keyframe, // is keyframe
+ IsFirst first, // is first packet of frame
+ IsLast last, // is last packet of frame
+ uint32_t timestamp, // rtp timestamp
+ rtc::ArrayView<const uint8_t> data = {},
+ uint32_t width = 0, // width of frame (SPS/IDR)
+ uint32_t height = 0) { // height of frame (SPS/IDR)
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ packet->seq_num = seq_num;
+ packet->timestamp = timestamp;
+
+ // this should be the start of frame.
+ RTC_CHECK(first == kFirst);
+
+ // Insert a AUD NALU / packet without width/height.
+ h264_header.nalus[0].type = H264::NaluType::kAud;
+ h264_header.nalus_length = 1;
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = false;
+ IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
+ // insert IDR
+ return InsertH264(seq_num + 1, keyframe, kNotFirst, last, timestamp, data,
+ width, height);
+ }
+
+ const bool sps_pps_idr_is_keyframe_;
+};
+
+// This fixture is used to test the general behaviour of the packet buffer
+// in both configurations.
+class PacketBufferH264ParameterizedTest
+ : public ::testing::WithParamInterface<bool>,
+ public PacketBufferH264Test {
+ protected:
+ PacketBufferH264ParameterizedTest() : PacketBufferH264Test(GetParam()) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
+ PacketBufferH264ParameterizedTest,
+ ::testing::Bool());
+
+TEST_P(PacketBufferH264ParameterizedTest, DontRemoveMissingPacketOnClearTo) {
+ InsertH264(0, kKeyFrame, kFirst, kLast, 0);
+ InsertH264(2, kDeltaFrame, kFirst, kNotLast, 2);
+ packet_buffer_.ClearTo(0);
+ // Expect no frame because of missing of packet #1
+ EXPECT_THAT(InsertH264(3, kDeltaFrame, kNotFirst, kLast, 2).packets,
+ IsEmpty());
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamOneFrameFullBuffer) {
+ uint8_t data_arr[kStartSize][1];
+ uint8_t expected[kStartSize];
+
+ for (uint8_t i = 0; i < kStartSize; ++i) {
+ data_arr[i][0] = i;
+ expected[i] = i;
+ }
+
+ InsertH264(0, kKeyFrame, kFirst, kNotLast, 1, data_arr[0]);
+ for (uint8_t i = 1; i < kStartSize - 1; ++i) {
+ InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1, data_arr[i]);
+ }
+
+ auto packets = InsertH264(kStartSize - 1, kKeyFrame, kNotFirst, kLast, 1,
+ data_arr[kStartSize - 1])
+ .packets;
+ ASSERT_THAT(StartSeqNums(packets), ElementsAre(0));
+ EXPECT_THAT(packets, SizeIs(kStartSize));
+ for (size_t i = 0; i < packets.size(); ++i) {
+ EXPECT_THAT(packets[i]->video_payload, SizeIs(1)) << "Packet #" << i;
+ }
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamBufferPadding) {
+ uint16_t seq_num = Rand();
+ rtc::CopyOnWriteBuffer data = "some plain old data";
+
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = 1;
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.packetization_type = kH264SingleNalu;
+ packet->seq_num = seq_num;
+ packet->video_header.codec = kVideoCodecH264;
+ packet->video_payload = data;
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ auto frames = packet_buffer_.InsertPacket(std::move(packet)).packets;
+
+ ASSERT_THAT(frames, SizeIs(1));
+ EXPECT_EQ(frames[0]->seq_num, seq_num);
+ EXPECT_EQ(frames[0]->video_payload, data);
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FrameResolution) {
+ uint16_t seq_num = 100;
+ uint8_t data[] = "some plain old data";
+ uint32_t width = 640;
+ uint32_t height = 360;
+ uint32_t timestamp = 1000;
+
+ auto packets = InsertH264(seq_num, kKeyFrame, kFirst, kLast, timestamp, data,
+ width, height)
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(1));
+ EXPECT_EQ(packets[0]->video_header.width, width);
+ EXPECT_EQ(packets[0]->video_header.height, height);
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FrameResolutionNaluBeforeSPS) {
+ uint16_t seq_num = 100;
+ uint8_t data[] = "some plain old data";
+ uint32_t width = 640;
+ uint32_t height = 360;
+ uint32_t timestamp = 1000;
+
+ auto packets = InsertH264KeyFrameWithAud(seq_num, kKeyFrame, kFirst, kLast,
+ timestamp, data, width, height)
+ .packets;
+
+ ASSERT_THAT(StartSeqNums(packets), ElementsAre(seq_num));
+ EXPECT_EQ(packets[0]->video_header.width, width);
+ EXPECT_EQ(packets[0]->video_header.height, height);
+}
+
+TEST_F(PacketBufferTest, FreeSlotsOnFrameCreation) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ Insert(seq_num + 1, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+
+ // Insert frame that fills the whole buffer.
+ Insert(seq_num + 3, kKeyFrame, kFirst, kNotLast);
+ for (int i = 0; i < kMaxSize - 2; ++i)
+ Insert(seq_num + i + 4, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + kMaxSize + 2, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num + 3));
+}
+
+TEST_F(PacketBufferTest, Clear) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ Insert(seq_num + 1, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+
+ packet_buffer_.Clear();
+
+ Insert(seq_num + kStartSize, kKeyFrame, kFirst, kNotLast);
+ Insert(seq_num + kStartSize + 1, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + kStartSize + 2, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num + kStartSize));
+}
+
+TEST_F(PacketBufferTest, FramesAfterClear) {
+ Insert(9025, kDeltaFrame, kFirst, kLast);
+ Insert(9024, kKeyFrame, kFirst, kLast);
+ packet_buffer_.ClearTo(9025);
+ EXPECT_THAT(Insert(9057, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(9026, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, SameFrameDifferentTimestamps) {
+ Insert(0, kKeyFrame, kFirst, kNotLast, {}, 1000);
+ EXPECT_THAT(Insert(1, kKeyFrame, kNotFirst, kLast, {}, 1001).packets,
+ IsEmpty());
+}
+
+TEST_F(PacketBufferTest, ContinuousSeqNumDoubleMarkerBit) {
+ Insert(2, kKeyFrame, kNotFirst, kNotLast);
+ Insert(1, kKeyFrame, kFirst, kLast);
+ EXPECT_THAT(Insert(3, kKeyFrame, kNotFirst, kLast).packets, IsEmpty());
+}
+
+TEST_F(PacketBufferTest, IncomingCodecChange) {
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ packet->video_header.codec = kVideoCodecVP8;
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ packet->timestamp = 1;
+ packet->seq_num = 1;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ SizeIs(1));
+
+ packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ packet->video_header.codec = kVideoCodecH264;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = 1;
+ packet->timestamp = 3;
+ packet->seq_num = 3;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ IsEmpty());
+
+ packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ packet->video_header.codec = kVideoCodecVP8;
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ packet->timestamp = 2;
+ packet->seq_num = 2;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ SizeIs(2));
+}
+
+TEST_F(PacketBufferTest, TooManyNalusInPacket) {
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ packet->timestamp = 1;
+ packet->seq_num = 1;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = kMaxNalusPerPacket;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ IsEmpty());
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, OneFrameFillBuffer) {
+ InsertH264(0, kKeyFrame, kFirst, kNotLast, 1000);
+ for (int i = 1; i < kStartSize - 1; ++i)
+ InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1000);
+ EXPECT_THAT(InsertH264(kStartSize - 1, kKeyFrame, kNotFirst, kLast, 1000),
+ StartSeqNumsAre(0));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, CreateFramesAfterFilledBuffer) {
+ EXPECT_THAT(InsertH264(kStartSize - 2, kKeyFrame, kFirst, kLast, 0).packets,
+ SizeIs(1));
+
+ InsertH264(kStartSize, kDeltaFrame, kFirst, kNotLast, 2000);
+ for (int i = 1; i < kStartSize; ++i)
+ InsertH264(kStartSize + i, kDeltaFrame, kNotFirst, kNotLast, 2000);
+ EXPECT_THAT(
+ InsertH264(kStartSize + kStartSize, kDeltaFrame, kNotFirst, kLast, 2000)
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(InsertH264(kStartSize - 1, kKeyFrame, kFirst, kLast, 1000),
+ StartSeqNumsAre(kStartSize - 1, kStartSize));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, OneFrameMaxSeqNum) {
+ InsertH264(65534, kKeyFrame, kFirst, kNotLast, 1000);
+ EXPECT_THAT(InsertH264(65535, kKeyFrame, kNotFirst, kLast, 1000),
+ StartSeqNumsAre(65534));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, ClearMissingPacketsOnKeyframe) {
+ InsertH264(0, kKeyFrame, kFirst, kLast, 1000);
+ InsertH264(2, kKeyFrame, kFirst, kLast, 3000);
+ InsertH264(3, kDeltaFrame, kFirst, kNotLast, 4000);
+ InsertH264(4, kDeltaFrame, kNotFirst, kLast, 4000);
+
+ EXPECT_THAT(InsertH264(kStartSize + 1, kKeyFrame, kFirst, kLast, 18000),
+ StartSeqNumsAre(kStartSize + 1));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FindFramesOnPadding) {
+ EXPECT_THAT(InsertH264(0, kKeyFrame, kFirst, kLast, 1000),
+ StartSeqNumsAre(0));
+ EXPECT_THAT(InsertH264(2, kDeltaFrame, kFirst, kLast, 1000).packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer_.InsertPadding(1), StartSeqNumsAre(2));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FindFramesOnReorderedPadding) {
+ EXPECT_THAT(InsertH264(0, kKeyFrame, kFirst, kLast, 1001),
+ StartSeqNumsAre(0));
+ EXPECT_THAT(InsertH264(1, kDeltaFrame, kFirst, kNotLast, 1002).packets,
+ IsEmpty());
+ EXPECT_THAT(packet_buffer_.InsertPadding(3).packets, IsEmpty());
+ EXPECT_THAT(InsertH264(4, kDeltaFrame, kFirst, kLast, 1003).packets,
+ IsEmpty());
+ EXPECT_THAT(InsertH264(2, kDeltaFrame, kNotFirst, kLast, 1002),
+ StartSeqNumsAre(1, 4));
+}
+
+class PacketBufferH264XIsKeyframeTest : public PacketBufferH264Test {
+ protected:
+ const uint16_t kSeqNum = 5;
+
+ explicit PacketBufferH264XIsKeyframeTest(bool sps_pps_idr_is_keyframe)
+ : PacketBufferH264Test(sps_pps_idr_is_keyframe) {}
+
+ std::unique_ptr<PacketBuffer::Packet> CreatePacket() {
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ packet->seq_num = kSeqNum;
+
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ return packet;
+ }
+};
+
+class PacketBufferH264IdrIsKeyframeTest
+ : public PacketBufferH264XIsKeyframeTest {
+ protected:
+ PacketBufferH264IdrIsKeyframeTest()
+ : PacketBufferH264XIsKeyframeTest(false) {}
+};
+
+TEST_F(PacketBufferH264IdrIsKeyframeTest, IdrIsKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 1;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(KeyFrame()));
+}
+
+TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[2].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 3;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(KeyFrame()));
+}
+
+class PacketBufferH264SpsPpsIdrIsKeyframeTest
+ : public PacketBufferH264XIsKeyframeTest {
+ protected:
+ PacketBufferH264SpsPpsIdrIsKeyframeTest()
+ : PacketBufferH264XIsKeyframeTest(true) {}
+};
+
+TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, IdrIsNotKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 1;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(DeltaFrame()));
+}
+
+TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus_length = 2;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(DeltaFrame()));
+}
+
+TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[2].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 3;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(KeyFrame()));
+}
+
+} // namespace
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/receiver.cc b/third_party/libwebrtc/modules/video_coding/receiver.cc
new file mode 100644
index 0000000000..3f954ec9bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/receiver.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/receiver.h"
+
+
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "api/video/encoded_image.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+enum { kMaxReceiverDelayMs = 10000 };
+
+VCMReceiver::VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials)
+ : VCMReceiver::VCMReceiver(timing,
+ clock,
+ absl::WrapUnique(EventWrapper::Create()),
+ absl::WrapUnique(EventWrapper::Create()),
+ field_trials) {}
+
+VCMReceiver::VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ std::unique_ptr<EventWrapper> receiver_event,
+ std::unique_ptr<EventWrapper> jitter_buffer_event,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ jitter_buffer_(clock_, std::move(jitter_buffer_event), field_trials),
+ timing_(timing),
+ render_wait_event_(std::move(receiver_event)),
+ max_video_delay_ms_(kMaxVideoDelayMs) {
+ jitter_buffer_.Start();
+}
+
+VCMReceiver::~VCMReceiver() {
+ render_wait_event_->Set();
+}
+
+int32_t VCMReceiver::InsertPacket(const VCMPacket& packet) {
+ // Insert the packet into the jitter buffer. The packet can either be empty or
+ // contain media at this point.
+ bool retransmitted = false;
+ const VCMFrameBufferEnum ret =
+ jitter_buffer_.InsertPacket(packet, &retransmitted);
+ if (ret == kOldPacket) {
+ return VCM_OK;
+ } else if (ret == kFlushIndicator) {
+ return VCM_FLUSH_INDICATOR;
+ } else if (ret < 0) {
+ return VCM_JITTER_BUFFER_ERROR;
+ }
+ if (ret == kCompleteSession && !retransmitted) {
+ // We don't want to include timestamps which have suffered from
+ // retransmission here, since we compensate with extra retransmission
+ // delay within the jitter estimate.
+ timing_->IncomingTimestamp(packet.timestamp, clock_->CurrentTime());
+ }
+ return VCM_OK;
+}
+
+VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
+ bool prefer_late_decoding) {
+ const int64_t start_time_ms = clock_->TimeInMilliseconds();
+ uint32_t frame_timestamp = 0;
+ int min_playout_delay_ms = -1;
+ int max_playout_delay_ms = -1;
+ int64_t render_time_ms = 0;
+ // Exhaust wait time to get a complete frame for decoding.
+ VCMEncodedFrame* found_frame =
+ jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
+
+ if (found_frame) {
+ frame_timestamp = found_frame->Timestamp();
+ min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
+ max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
+ } else {
+ return nullptr;
+ }
+
+ if (min_playout_delay_ms >= 0)
+ timing_->set_min_playout_delay(TimeDelta::Millis(min_playout_delay_ms));
+
+ if (max_playout_delay_ms >= 0)
+ timing_->set_max_playout_delay(TimeDelta::Millis(max_playout_delay_ms));
+
+ // We have a frame - Set timing and render timestamp.
+ timing_->SetJitterDelay(
+ TimeDelta::Millis(jitter_buffer_.EstimatedJitterMs()));
+ const Timestamp now = clock_->CurrentTime();
+ const int64_t now_ms = now.ms();
+ timing_->UpdateCurrentDelay(frame_timestamp);
+ render_time_ms = timing_->RenderTime(frame_timestamp, now).ms();
+ // Check render timing.
+ bool timing_error = false;
+ // Assume that render timing errors are due to changes in the video stream.
+ if (render_time_ms < 0) {
+ timing_error = true;
+ } else if (std::abs(render_time_ms - now_ms) > max_video_delay_ms_) {
+ int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
+ RTC_LOG(LS_WARNING)
+ << "A frame about to be decoded is out of the configured "
+ "delay bounds ("
+ << frame_delay << " > " << max_video_delay_ms_
+ << "). Resetting the video jitter buffer.";
+ timing_error = true;
+ } else if (static_cast<int>(timing_->TargetVideoDelay().ms()) >
+ max_video_delay_ms_) {
+ RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
+ << max_video_delay_ms_
+ << " ms. Resetting jitter buffer.";
+ timing_error = true;
+ }
+
+ if (timing_error) {
+ // Timing error => reset timing and flush the jitter buffer.
+ jitter_buffer_.Flush();
+ timing_->Reset();
+ return NULL;
+ }
+
+ if (prefer_late_decoding) {
+ // Decode frame as close as possible to the render timestamp.
+ const int32_t available_wait_time =
+ max_wait_time_ms -
+ static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
+ uint16_t new_max_wait_time =
+ static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
+ uint32_t wait_time_ms = rtc::saturated_cast<uint32_t>(
+ timing_
+ ->MaxWaitingTime(Timestamp::Millis(render_time_ms),
+ clock_->CurrentTime(),
+ /*too_many_frames_queued=*/false)
+ .ms());
+ if (new_max_wait_time < wait_time_ms) {
+ // We're not allowed to wait until the frame is supposed to be rendered,
+ // waiting as long as we're allowed to avoid busy looping, and then return
+ // NULL. Next call to this function might return the frame.
+ render_wait_event_->Wait(new_max_wait_time);
+ return NULL;
+ }
+ // Wait until it's time to render.
+ render_wait_event_->Wait(wait_time_ms);
+ }
+
+ // Extract the frame from the jitter buffer and set the render time.
+ VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
+ if (frame == NULL) {
+ return NULL;
+ }
+ frame->SetRenderTime(render_time_ms);
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
+ "render_time", frame->RenderTimeMs());
+ return frame;
+}
+
+void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
+ jitter_buffer_.ReleaseFrame(frame);
+}
+
+void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ jitter_buffer_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
+}
+
+std::vector<uint16_t> VCMReceiver::NackList(bool* request_key_frame) {
+ return jitter_buffer_.GetNackList(request_key_frame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/receiver.h b/third_party/libwebrtc/modules/video_coding/receiver.h
new file mode 100644
index 0000000000..069f8c55c7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/receiver.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RECEIVER_H_
+#define MODULES_VIDEO_CODING_RECEIVER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "modules/video_coding/event_wrapper.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/jitter_buffer.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/timing/timing.h"
+
+namespace webrtc {
+
+class Clock;
+class VCMEncodedFrame;
+
+class VCMReceiver {
+ public:
+ VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials);
+
+ // Using this constructor, you can specify a different event implemetation for
+ // the jitter buffer. Useful for unit tests when you want to simulate incoming
+ // packets, in which case the jitter buffer's wait event is different from
+ // that of VCMReceiver itself.
+ VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ std::unique_ptr<EventWrapper> receiver_event,
+ std::unique_ptr<EventWrapper> jitter_buffer_event,
+ const FieldTrialsView& field_trials);
+
+ ~VCMReceiver();
+
+ int32_t InsertPacket(const VCMPacket& packet);
+ VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
+ bool prefer_late_decoding);
+ void ReleaseFrame(VCMEncodedFrame* frame);
+
+ // NACK.
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+ std::vector<uint16_t> NackList(bool* request_key_frame);
+
+ private:
+ Clock* const clock_;
+ VCMJitterBuffer jitter_buffer_;
+ VCMTiming* timing_;
+ std::unique_ptr<EventWrapper> render_wait_event_;
+ int max_video_delay_ms_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/receiver_unittest.cc b/third_party/libwebrtc/modules/video_coding/receiver_unittest.cc
new file mode 100644
index 0000000000..2beb97e972
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/receiver_unittest.cc
@@ -0,0 +1,493 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/receiver.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/test/stream_generator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+class TestVCMReceiver : public ::testing::Test {
+ protected:
+ TestVCMReceiver()
+ : clock_(0),
+ timing_(&clock_, field_trials_),
+ receiver_(&timing_, &clock_, field_trials_),
+ stream_generator_(0, clock_.TimeInMilliseconds()) {}
+
+ int32_t InsertPacket(int index) {
+ VCMPacket packet;
+ bool packet_available = stream_generator_.GetPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ return receiver_.InsertPacket(packet);
+ }
+
+ int32_t InsertPacketAndPop(int index) {
+ VCMPacket packet;
+ bool packet_available = stream_generator_.PopPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ return receiver_.InsertPacket(packet);
+ }
+
+ int32_t InsertFrame(VideoFrameType frame_type, bool complete) {
+ int num_of_packets = complete ? 1 : 2;
+ stream_generator_.GenerateFrame(
+ frame_type,
+ (frame_type != VideoFrameType::kEmptyFrame) ? num_of_packets : 0,
+ (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
+ clock_.TimeInMilliseconds());
+ int32_t ret = InsertPacketAndPop(0);
+ if (!complete) {
+ // Drop the second packet.
+ VCMPacket packet;
+ stream_generator_.PopPacket(&packet, 0);
+ }
+ clock_.AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ return ret;
+ }
+
+ bool DecodeNextFrame() {
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(0, false);
+ if (!frame)
+ return false;
+ receiver_.ReleaseFrame(frame);
+ return true;
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClock clock_;
+ VCMTiming timing_;
+ VCMReceiver receiver_;
+ StreamGenerator stream_generator_;
+};
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs);
+ EXPECT_TRUE(DecodeNextFrame());
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
+ for (int i = 0; i < kNumFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Insert enough frames to have too long non-decodable sequence.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we get a key frame request.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Insert all but one frame to not trigger a key frame request due to
+ // too long duration of non-decodable frames.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since we haven't generated
+ // enough frames.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert enough frames to have too long non-decodable sequence, except that
+ // we don't have any losses.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since the non-decodable duration
+ // is only one frame.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Insert enough frames to have too long non-decodable sequence.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since we have a key frame
+ // in the list.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+// A simulated clock, when time elapses, will insert frames into the jitter
+// buffer, based on initial settings.
+class SimulatedClockWithFrames : public SimulatedClock {
+ public:
+ SimulatedClockWithFrames(StreamGenerator* stream_generator,
+ VCMReceiver* receiver)
+ : SimulatedClock(0),
+ stream_generator_(stream_generator),
+ receiver_(receiver) {}
+ virtual ~SimulatedClockWithFrames() {}
+
+ // If `stop_on_frame` is true and next frame arrives between now and
+ // now+`milliseconds`, the clock will be advanced to the arrival time of next
+ // frame.
+ // Otherwise, the clock will be advanced by `milliseconds`.
+ //
+ // For both cases, a frame will be inserted into the jitter buffer at the
+ // instant when the clock time is timestamps_.front().arrive_time.
+ //
+ // Return true if some frame arrives between now and now+`milliseconds`.
+ bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
+ return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
+ }
+
+ bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
+ int64_t start_time = TimeInMicroseconds();
+ int64_t end_time = start_time + microseconds;
+ bool frame_injected = false;
+ while (!timestamps_.empty() &&
+ timestamps_.front().arrive_time <= end_time) {
+ RTC_DCHECK_GE(timestamps_.front().arrive_time, start_time);
+
+ SimulatedClock::AdvanceTimeMicroseconds(timestamps_.front().arrive_time -
+ TimeInMicroseconds());
+ GenerateAndInsertFrame((timestamps_.front().render_time + 500) / 1000);
+ timestamps_.pop();
+ frame_injected = true;
+
+ if (stop_on_frame)
+ return frame_injected;
+ }
+
+ if (TimeInMicroseconds() < end_time) {
+ SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
+ }
+ return frame_injected;
+ }
+
+ // Input timestamps are in unit Milliseconds.
+ // And `arrive_timestamps` must be positive and in increasing order.
+ // `arrive_timestamps` determine when we are going to insert frames into the
+ // jitter buffer.
+ // `render_timestamps` are the timestamps on the frame.
+ void SetFrames(const int64_t* arrive_timestamps,
+ const int64_t* render_timestamps,
+ size_t size) {
+ int64_t previous_arrive_timestamp = 0;
+ for (size_t i = 0; i < size; i++) {
+ RTC_CHECK_GE(arrive_timestamps[i], previous_arrive_timestamp);
+ timestamps_.push(TimestampPair(arrive_timestamps[i] * 1000,
+ render_timestamps[i] * 1000));
+ previous_arrive_timestamp = arrive_timestamps[i];
+ }
+ }
+
+ private:
+ struct TimestampPair {
+ TimestampPair(int64_t arrive_timestamp, int64_t render_timestamp)
+ : arrive_time(arrive_timestamp), render_time(render_timestamp) {}
+
+ int64_t arrive_time;
+ int64_t render_time;
+ };
+
+ void GenerateAndInsertFrame(int64_t render_timestamp_ms) {
+ VCMPacket packet;
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey,
+ 1, // media packets
+ 0, // empty packets
+ render_timestamp_ms);
+
+ bool packet_available = stream_generator_->PopPacket(&packet, 0);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return; // Return here to avoid crashes below.
+ receiver_->InsertPacket(packet);
+ }
+
+ std::queue<TimestampPair> timestamps_;
+ StreamGenerator* stream_generator_;
+ VCMReceiver* receiver_;
+};
+
+// Use a SimulatedClockWithFrames
+// Wait call will do either of these:
+// 1. If `stop_on_frame` is true, the clock will be turned to the exact instant
+// that the first frame comes and the frame will be inserted into the jitter
+// buffer, or the clock will be turned to now + `max_time` if no frame comes in
+// the window.
+// 2. If `stop_on_frame` is false, the clock will be turn to now + `max_time`,
+// and all the frames arriving between now and now + `max_time` will be
+// inserted into the jitter buffer.
+//
+// This is used to simulate the JitterBuffer getting packets from internet as
+// time elapses.
+
+class FrameInjectEvent : public EventWrapper {
+ public:
+ FrameInjectEvent(SimulatedClockWithFrames* clock, bool stop_on_frame)
+ : clock_(clock), stop_on_frame_(stop_on_frame) {}
+
+ bool Set() override { return true; }
+
+ EventTypeWrapper Wait(int max_time_ms) override {
+ if (clock_->AdvanceTimeMilliseconds(max_time_ms, stop_on_frame_) &&
+ stop_on_frame_) {
+ return EventTypeWrapper::kEventSignaled;
+ } else {
+ return EventTypeWrapper::kEventTimeout;
+ }
+ }
+
+ private:
+ SimulatedClockWithFrames* clock_;
+ bool stop_on_frame_;
+};
+
+class VCMReceiverTimingTest : public ::testing::Test {
+ protected:
+ VCMReceiverTimingTest()
+ : clock_(&stream_generator_, &receiver_),
+ stream_generator_(0, clock_.TimeInMilliseconds()),
+ timing_(&clock_, field_trials_),
+ receiver_(
+ &timing_,
+ &clock_,
+ std::unique_ptr<EventWrapper>(new FrameInjectEvent(&clock_, false)),
+ std::unique_ptr<EventWrapper>(new FrameInjectEvent(&clock_, true)),
+ field_trials_) {}
+
+ virtual void SetUp() {}
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClockWithFrames clock_;
+ StreamGenerator stream_generator_;
+ VCMTiming timing_;
+ VCMReceiver receiver_;
+};
+
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// `max_wait_time_ms` correctly:
+// 1. The function execution should never take more than `max_wait_time_ms`.
+// 2. If the function exit before now + `max_wait_time_ms`, a frame must be
+// returned.
+TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+
+ const int64_t kMaxWaitTime = 30;
+
+ // Ideally, we should get all frames that we input in InitializeFrames.
+ // In the case that FrameForDecoding kills frames by error, we rely on the
+ // build bot to kill the test.
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(kMaxWaitTime, false);
+ int64_t end_time = clock_.TimeInMilliseconds();
+
+ // In any case the FrameForDecoding should not wait longer than
+ // max_wait_time.
+ // In the case that we did not get a frame, it should have been waiting for
+ // exactly max_wait_time. (By the testing samples we constructed above, we
+ // are sure there is no timing error, so the only case it returns with NULL
+ // is that it runs out of time.)
+ if (frame) {
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ EXPECT_GE(kMaxWaitTime, end_time - start_time);
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// `prefer_late_decoding` and `max_wait_time_ms` correctly:
+// 1. The function execution should never take more than `max_wait_time_ms`.
+// 2. If the function exit before now + `max_wait_time_ms`, a frame must be
+// returned and the end time must be equal to the render timestamp - delay
+// for decoding and rendering.
+TEST_F(VCMReceiverTimingTest, FrameForDecodingPreferLateDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+
+ auto timings = timing_.GetTimings();
+ TimeDelta render_delay = timings.render_delay;
+ TimeDelta max_decode = timings.max_decode_duration;
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+ const int64_t kMaxWaitTime = 30;
+ bool prefer_late_decoding = true;
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+
+ VCMEncodedFrame* frame =
+ receiver_.FrameForDecoding(kMaxWaitTime, prefer_late_decoding);
+ int64_t end_time = clock_.TimeInMilliseconds();
+ if (frame) {
+ EXPECT_EQ(frame->RenderTimeMs() - max_decode.ms() - render_delay.ms(),
+ end_time);
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc
new file mode 100644
index 0000000000..9f3d5bb296
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_frame_id_only_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ int frame_id) {
+ frame->SetSpatialIndex(0);
+ frame->SetId(unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1)));
+ frame->num_references =
+ frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1;
+ frame->references[0] = frame->Id() - 1;
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ res.push_back(std::move(frame));
+ return res;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h
new file mode 100644
index 0000000000..1df4870c5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_
+
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+
+class RtpFrameIdOnlyRefFinder {
+ public:
+ RtpFrameIdOnlyRefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ int frame_id);
+
+ private:
+ static constexpr int kFrameIdLength = 1 << 15;
+ SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc
new file mode 100644
index 0000000000..a44b76bf15
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+
+#include <utility>
+
+#include "absl/types/variant.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_id_only_ref_finder.h"
+#include "modules/video_coding/rtp_generic_ref_finder.h"
+#include "modules/video_coding/rtp_seq_num_only_ref_finder.h"
+#include "modules/video_coding/rtp_vp8_ref_finder.h"
+#include "modules/video_coding/rtp_vp9_ref_finder.h"
+
+namespace webrtc {
+namespace internal {
+class RtpFrameReferenceFinderImpl {
+ public:
+ RtpFrameReferenceFinderImpl() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ using RefFinder = absl::variant<absl::monostate,
+ RtpGenericFrameRefFinder,
+ RtpFrameIdOnlyRefFinder,
+ RtpSeqNumOnlyRefFinder,
+ RtpVp8RefFinder,
+ RtpVp9RefFinder>;
+
+ template <typename T>
+ T& GetRefFinderAs();
+ RefFinder ref_finder_;
+};
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinderImpl::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
+
+ if (video_header.generic.has_value()) {
+ return GetRefFinderAs<RtpGenericFrameRefFinder>().ManageFrame(
+ std::move(frame), *video_header.generic);
+ }
+
+ switch (frame->codec_type()) {
+ case kVideoCodecVP8: {
+ const RTPVideoHeaderVP8& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+
+ if (vp8_header.temporalIdx == kNoTemporalIdx ||
+ vp8_header.tl0PicIdx == kNoTl0PicIdx) {
+ if (vp8_header.pictureId == kNoPictureId) {
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+
+ return GetRefFinderAs<RtpFrameIdOnlyRefFinder>().ManageFrame(
+ std::move(frame), vp8_header.pictureId);
+ }
+
+ return GetRefFinderAs<RtpVp8RefFinder>().ManageFrame(std::move(frame));
+ }
+ case kVideoCodecVP9: {
+ const RTPVideoHeaderVP9& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
+
+ if (vp9_header.temporal_idx == kNoTemporalIdx) {
+ if (vp9_header.picture_id == kNoPictureId) {
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+
+ return GetRefFinderAs<RtpFrameIdOnlyRefFinder>().ManageFrame(
+ std::move(frame), vp9_header.picture_id);
+ }
+
+ return GetRefFinderAs<RtpVp9RefFinder>().ManageFrame(std::move(frame));
+ }
+ case kVideoCodecGeneric: {
+ if (auto* generic_header = absl::get_if<RTPVideoHeaderLegacyGeneric>(
+ &video_header.video_type_header)) {
+ return GetRefFinderAs<RtpFrameIdOnlyRefFinder>().ManageFrame(
+ std::move(frame), generic_header->picture_id);
+ }
+
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+ default: {
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+ }
+}
+
+RtpFrameReferenceFinder::ReturnVector
+RtpFrameReferenceFinderImpl::PaddingReceived(uint16_t seq_num) {
+ if (auto* ref_finder = absl::get_if<RtpSeqNumOnlyRefFinder>(&ref_finder_)) {
+ return ref_finder->PaddingReceived(seq_num);
+ }
+ return {};
+}
+
+void RtpFrameReferenceFinderImpl::ClearTo(uint16_t seq_num) {
+ struct ClearToVisitor {
+ void operator()(absl::monostate& ref_finder) {}
+ void operator()(RtpGenericFrameRefFinder& ref_finder) {}
+ void operator()(RtpFrameIdOnlyRefFinder& ref_finder) {}
+ void operator()(RtpSeqNumOnlyRefFinder& ref_finder) {
+ ref_finder.ClearTo(seq_num);
+ }
+ void operator()(RtpVp8RefFinder& ref_finder) {
+ ref_finder.ClearTo(seq_num);
+ }
+ void operator()(RtpVp9RefFinder& ref_finder) {
+ ref_finder.ClearTo(seq_num);
+ }
+ uint16_t seq_num;
+ };
+
+ absl::visit(ClearToVisitor{seq_num}, ref_finder_);
+}
+
+template <typename T>
+T& RtpFrameReferenceFinderImpl::GetRefFinderAs() {
+ if (auto* ref_finder = absl::get_if<T>(&ref_finder_)) {
+ return *ref_finder;
+ }
+ return ref_finder_.emplace<T>();
+}
+
+} // namespace internal
+
+RtpFrameReferenceFinder::RtpFrameReferenceFinder()
+ : RtpFrameReferenceFinder(0) {}
+
+RtpFrameReferenceFinder::RtpFrameReferenceFinder(
+ int64_t picture_id_offset)
+ : picture_id_offset_(picture_id_offset),
+ impl_(std::make_unique<internal::RtpFrameReferenceFinderImpl>()) {}
+
+RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default;
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ // If we have cleared past this frame, drop it.
+ if (cleared_to_seq_num_ != -1 &&
+ AheadOf<uint16_t>(cleared_to_seq_num_, frame->first_seq_num())) {
+ return {};
+ }
+
+ auto frames = impl_->ManageFrame(std::move(frame));
+ AddPictureIdOffset(frames);
+ return frames;
+}
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::PaddingReceived(
+ uint16_t seq_num) {
+ auto frames = impl_->PaddingReceived(seq_num);
+ AddPictureIdOffset(frames);
+ return frames;
+}
+
+void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) {
+ cleared_to_seq_num_ = seq_num;
+ impl_->ClearTo(seq_num);
+}
+
+void RtpFrameReferenceFinder::AddPictureIdOffset(ReturnVector& frames) {
+ for (auto& frame : frames) {
+ frame->SetId(frame->Id() + picture_id_offset_);
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] += picture_id_offset_;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h
new file mode 100644
index 0000000000..9ce63cd8a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
+
+#include <memory>
+
+#include "modules/video_coding/frame_object.h"
+
+namespace webrtc {
+namespace internal {
+class RtpFrameReferenceFinderImpl;
+} // namespace internal
+
+class RtpFrameReferenceFinder {
+ public:
+ using ReturnVector = absl::InlinedVector<std::unique_ptr<RtpFrameObject>, 3>;
+
+ RtpFrameReferenceFinder();
+ explicit RtpFrameReferenceFinder(int64_t picture_id_offset);
+ ~RtpFrameReferenceFinder();
+
+ // The RtpFrameReferenceFinder will hold onto the frame until:
+ // - the required information to determine its references has been received,
+ // in which case it (and possibly other) frames are returned, or
+ // - There are too many stashed frames (determined by `kMaxStashedFrames`),
+ // in which case it gets dropped, or
+ // - It gets cleared by ClearTo, in which case its dropped.
+ // - The frame is old, in which case it also gets dropped.
+ ReturnVector ManageFrame(std::unique_ptr<RtpFrameObject> frame);
+
+ // Notifies that padding has been received, which the reference finder
+ // might need to calculate the references of a frame.
+ ReturnVector PaddingReceived(uint16_t seq_num);
+
+ // Clear all stashed frames that include packets older than `seq_num`.
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ void AddPictureIdOffset(ReturnVector& frames);
+
+ // How far frames have been cleared out of the buffer by RTP sequence number.
+ // A frame will be cleared if it contains a packet with a sequence number
+ // older than `cleared_to_seq_num_`.
+ int cleared_to_seq_num_ = -1;
+ const int64_t picture_id_offset_;
+ std::unique_ptr<internal::RtpFrameReferenceFinderImpl> impl_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc
new file mode 100644
index 0000000000..c58f1a987d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstring>
+#include <limits>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/random.h"
+#include "rtc_base/ref_count.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+std::unique_ptr<RtpFrameObject> CreateFrame(
+ uint16_t seq_num_start,
+ uint16_t seq_num_end,
+ bool keyframe,
+ VideoCodecType codec,
+ const RTPVideoTypeHeader& video_type_header) {
+ RTPVideoHeader video_header;
+ video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ video_header.video_type_header = video_type_header;
+
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ seq_num_start,
+ seq_num_end,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ codec,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+}
+} // namespace
+
+class TestRtpFrameReferenceFinder : public ::testing::Test {
+ protected:
+ TestRtpFrameReferenceFinder()
+ : rand_(0x8739211),
+ reference_finder_(std::make_unique<RtpFrameReferenceFinder>()),
+ frames_from_callback_(FrameComp()) {}
+
+ uint16_t Rand() { return rand_.Rand<uint16_t>(); }
+
+ void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frames) {
+ for (auto& frame : frames) {
+ int64_t pid = frame->Id();
+ uint16_t sidx = *frame->SpatialIndex();
+ auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx));
+ if (frame_it != frames_from_callback_.end()) {
+ ADD_FAILURE() << "Already received frame with (pid:sidx): (" << pid
+ << ":" << sidx << ")";
+ return;
+ }
+
+ frames_from_callback_.insert(
+ std::make_pair(std::make_pair(pid, sidx), std::move(frame)));
+ }
+ }
+
+ void InsertGeneric(uint16_t seq_num_start,
+ uint16_t seq_num_end,
+ bool keyframe) {
+ std::unique_ptr<RtpFrameObject> frame =
+ CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric,
+ RTPVideoTypeHeader());
+
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+ }
+
+ void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) {
+ std::unique_ptr<RtpFrameObject> frame =
+ CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264,
+ RTPVideoTypeHeader());
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+ }
+
+ void InsertPadding(uint16_t seq_num) {
+ OnCompleteFrames(reference_finder_->PaddingReceived(seq_num));
+ }
+
+ // Check if a frame with picture id `pid` and spatial index `sidx` has been
+ // delivered from the packet buffer, and if so, if it has the references
+ // specified by `refs`.
+ template <typename... T>
+ void CheckReferences(int64_t picture_id_offset,
+ uint16_t sidx,
+ T... refs) const {
+ int64_t pid = picture_id_offset;
+ auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx));
+ if (frame_it == frames_from_callback_.end()) {
+ ADD_FAILURE() << "Could not find frame with (pid:sidx): (" << pid << ":"
+ << sidx << ")";
+ return;
+ }
+
+ std::set<int64_t> actual_refs;
+ for (uint8_t r = 0; r < frame_it->second->num_references; ++r)
+ actual_refs.insert(frame_it->second->references[r]);
+
+ std::set<int64_t> expected_refs;
+ RefsToSet(&expected_refs, refs...);
+
+ ASSERT_EQ(expected_refs, actual_refs);
+ }
+
+ template <typename... T>
+ void CheckReferencesGeneric(int64_t pid, T... refs) const {
+ CheckReferences(pid, 0, refs...);
+ }
+
+ template <typename... T>
+ void CheckReferencesH264(int64_t pid, T... refs) const {
+ CheckReferences(pid, 0, refs...);
+ }
+
+ template <typename... T>
+ void RefsToSet(std::set<int64_t>* m, int64_t ref, T... refs) const {
+ m->insert(ref);
+ RefsToSet(m, refs...);
+ }
+
+ void RefsToSet(std::set<int64_t>* m) const {}
+
+ Random rand_;
+ std::unique_ptr<RtpFrameReferenceFinder> reference_finder_;
+ struct FrameComp {
+ bool operator()(const std::pair<int64_t, uint8_t> f1,
+ const std::pair<int64_t, uint8_t> f2) const {
+ if (f1.first == f2.first)
+ return f1.second < f2.second;
+ return f1.first < f2.first;
+ }
+ };
+ std::
+ map<std::pair<int64_t, uint8_t>, std::unique_ptr<EncodedFrame>, FrameComp>
+ frames_from_callback_;
+};
+
+TEST_F(TestRtpFrameReferenceFinder, PaddingPackets) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertGeneric(sn + 2, sn + 2, false);
+ EXPECT_EQ(1UL, frames_from_callback_.size());
+ InsertPadding(sn + 1);
+ EXPECT_EQ(2UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReordered) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertPadding(sn + 1);
+ InsertPadding(sn + 4);
+ InsertGeneric(sn + 2, sn + 3, false);
+
+ EXPECT_EQ(2UL, frames_from_callback_.size());
+ CheckReferencesGeneric(sn);
+ CheckReferencesGeneric(sn + 3, sn + 0);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReorderedMultipleKeyframes) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertPadding(sn + 1);
+ InsertPadding(sn + 4);
+ InsertGeneric(sn + 2, sn + 3, false);
+ InsertGeneric(sn + 5, sn + 5, true);
+ InsertPadding(sn + 6);
+ InsertPadding(sn + 9);
+ InsertGeneric(sn + 7, sn + 8, false);
+
+ EXPECT_EQ(4UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, AdvanceSavedKeyframe) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertGeneric(sn + 1, sn + 1, true);
+ InsertGeneric(sn + 2, sn + 10000, false);
+ InsertGeneric(sn + 10001, sn + 20000, false);
+ InsertGeneric(sn + 20001, sn + 30000, false);
+ InsertGeneric(sn + 30001, sn + 40000, false);
+
+ EXPECT_EQ(6UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, ClearTo) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn + 1, true);
+ InsertGeneric(sn + 4, sn + 5, false); // stashed
+ EXPECT_EQ(1UL, frames_from_callback_.size());
+
+ InsertGeneric(sn + 6, sn + 7, true); // keyframe
+ EXPECT_EQ(2UL, frames_from_callback_.size());
+ reference_finder_->ClearTo(sn + 7);
+
+ InsertGeneric(sn + 8, sn + 9, false); // first frame after keyframe.
+ EXPECT_EQ(3UL, frames_from_callback_.size());
+
+ InsertGeneric(sn + 2, sn + 3, false); // late, cleared past this frame.
+ EXPECT_EQ(3UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264KeyFrameReferences) {
+ uint16_t sn = Rand();
+ InsertH264(sn, sn, true);
+
+ ASSERT_EQ(1UL, frames_from_callback_.size());
+ CheckReferencesH264(sn);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrap) {
+ uint16_t sn = 0xFFFF;
+
+ InsertH264(sn - 1, sn - 1, true);
+ InsertH264(sn, sn, false);
+ InsertH264(sn + 1, sn + 1, false);
+ InsertH264(sn + 2, sn + 2, false);
+
+ ASSERT_EQ(4UL, frames_from_callback_.size());
+ CheckReferencesH264(sn - 1);
+ CheckReferencesH264(sn, sn - 1);
+ CheckReferencesH264(sn + 1, sn);
+ CheckReferencesH264(sn + 2, sn + 1);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264Frames) {
+ uint16_t sn = Rand();
+
+ InsertH264(sn, sn, true);
+ InsertH264(sn + 1, sn + 1, false);
+ InsertH264(sn + 2, sn + 2, false);
+ InsertH264(sn + 3, sn + 3, false);
+
+ ASSERT_EQ(4UL, frames_from_callback_.size());
+ CheckReferencesH264(sn);
+ CheckReferencesH264(sn + 1, sn);
+ CheckReferencesH264(sn + 2, sn + 1);
+ CheckReferencesH264(sn + 3, sn + 2);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264Reordering) {
+ uint16_t sn = Rand();
+
+ InsertH264(sn, sn, true);
+ InsertH264(sn + 1, sn + 1, false);
+ InsertH264(sn + 3, sn + 3, false);
+ InsertH264(sn + 2, sn + 2, false);
+ InsertH264(sn + 5, sn + 5, false);
+ InsertH264(sn + 6, sn + 6, false);
+ InsertH264(sn + 4, sn + 4, false);
+
+ ASSERT_EQ(7UL, frames_from_callback_.size());
+ CheckReferencesH264(sn);
+ CheckReferencesH264(sn + 1, sn);
+ CheckReferencesH264(sn + 2, sn + 1);
+ CheckReferencesH264(sn + 3, sn + 2);
+ CheckReferencesH264(sn + 4, sn + 3);
+ CheckReferencesH264(sn + 5, sn + 4);
+ CheckReferencesH264(sn + 6, sn + 5);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) {
+ uint16_t sn = 0xFFFF;
+
+ InsertH264(sn - 3, sn - 2, true);
+ InsertH264(sn - 1, sn + 1, false);
+ InsertH264(sn + 2, sn + 3, false);
+ InsertH264(sn + 4, sn + 7, false);
+
+ ASSERT_EQ(4UL, frames_from_callback_.size());
+ CheckReferencesH264(sn - 2);
+ CheckReferencesH264(sn + 1, sn - 2);
+ CheckReferencesH264(sn + 3, sn + 1);
+ CheckReferencesH264(sn + 7, sn + 3);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, Av1FrameNoDependencyDescriptor) {
+ uint16_t sn = 0xFFFF;
+ std::unique_ptr<RtpFrameObject> frame =
+ CreateFrame(/*seq_num_start=*/sn, /*seq_num_end=*/sn, /*keyframe=*/true,
+ kVideoCodecAV1, RTPVideoTypeHeader());
+
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+
+ ASSERT_EQ(1UL, frames_from_callback_.size());
+ CheckReferencesGeneric(sn);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc
new file mode 100644
index 0000000000..fd5b8afda1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_generic_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ const RTPVideoHeader::GenericDescriptorInfo& descriptor) {
+ // Frame IDs are unwrapped in the RtpVideoStreamReceiver, no need to unwrap
+ // them here.
+ frame->SetId(descriptor.frame_id);
+ frame->SetSpatialIndex(descriptor.spatial_index);
+ if (descriptor.temporal_index != kNoTemporalIdx)
+ frame->SetTemporalIndex(descriptor.temporal_index);
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ if (EncodedFrame::kMaxFrameReferences < descriptor.dependencies.size()) {
+ RTC_LOG(LS_WARNING) << "Too many dependencies in generic descriptor.";
+ return res;
+ }
+
+ frame->num_references = descriptor.dependencies.size();
+ for (size_t i = 0; i < descriptor.dependencies.size(); ++i) {
+ frame->references[i] = descriptor.dependencies[i];
+ }
+
+ res.push_back(std::move(frame));
+ return res;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h
new file mode 100644
index 0000000000..87d7b59406
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_
+
+#include <memory>
+
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+
+namespace webrtc {
+
+class RtpGenericFrameRefFinder {
+ public:
+ RtpGenericFrameRefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ const RTPVideoHeader::GenericDescriptorInfo& descriptor);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc
new file mode 100644
index 0000000000..59b027e2ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_seq_num_only_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ FrameDecision decision = ManageFrameInternal(frame.get());
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ switch (decision) {
+ case kStash:
+ if (stashed_frames_.size() > kMaxStashedFrames)
+ stashed_frames_.pop_back();
+ stashed_frames_.push_front(std::move(frame));
+ return res;
+ case kHandOff:
+ res.push_back(std::move(frame));
+ RetryStashedFrames(res);
+ return res;
+ case kDrop:
+ return res;
+ }
+
+ return res;
+}
+
+RtpSeqNumOnlyRefFinder::FrameDecision
+RtpSeqNumOnlyRefFinder::ManageFrameInternal(RtpFrameObject* frame) {
+ if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ last_seq_num_gop_.insert(std::make_pair(
+ frame->last_seq_num(),
+ std::make_pair(frame->last_seq_num(), frame->last_seq_num())));
+ }
+
+ // We have received a frame but not yet a keyframe, stash this frame.
+ if (last_seq_num_gop_.empty())
+ return kStash;
+
+ // Clean up info for old keyframes but make sure to keep info
+ // for the last keyframe.
+ auto clean_to = last_seq_num_gop_.lower_bound(frame->last_seq_num() - 100);
+ for (auto it = last_seq_num_gop_.begin();
+ it != clean_to && last_seq_num_gop_.size() > 1;) {
+ it = last_seq_num_gop_.erase(it);
+ }
+
+ // Find the last sequence number of the last frame for the keyframe
+ // that this frame indirectly references.
+ auto seq_num_it = last_seq_num_gop_.upper_bound(frame->last_seq_num());
+ if (seq_num_it == last_seq_num_gop_.begin()) {
+ RTC_LOG(LS_WARNING) << "Generic frame with packet range ["
+ << frame->first_seq_num() << ", "
+ << frame->last_seq_num()
+ << "] has no GoP, dropping frame.";
+ return kDrop;
+ }
+ seq_num_it--;
+
+ // Make sure the packet sequence numbers are continuous, otherwise stash
+ // this frame.
+ uint16_t last_picture_id_gop = seq_num_it->second.first;
+ uint16_t last_picture_id_with_padding_gop = seq_num_it->second.second;
+ if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) {
+ uint16_t prev_seq_num = frame->first_seq_num() - 1;
+
+ if (prev_seq_num != last_picture_id_with_padding_gop)
+ return kStash;
+ }
+
+ RTC_DCHECK(AheadOrAt(frame->last_seq_num(), seq_num_it->first));
+
+ // Since keyframes can cause reordering we can't simply assign the
+ // picture id according to some incrementing counter.
+ frame->SetId(frame->last_seq_num());
+ frame->num_references =
+ frame->frame_type() == VideoFrameType::kVideoFrameDelta;
+ frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop);
+ if (AheadOf<uint16_t>(frame->Id(), last_picture_id_gop)) {
+ seq_num_it->second.first = frame->Id();
+ seq_num_it->second.second = frame->Id();
+ }
+
+ UpdateLastPictureIdWithPadding(frame->Id());
+ frame->SetSpatialIndex(0);
+ frame->SetId(rtp_seq_num_unwrapper_.Unwrap(frame->Id()));
+ return kHandOff;
+}
+
+void RtpSeqNumOnlyRefFinder::RetryStashedFrames(
+ RtpFrameReferenceFinder::ReturnVector& res) {
+ bool complete_frame = false;
+ do {
+ complete_frame = false;
+ for (auto frame_it = stashed_frames_.begin();
+ frame_it != stashed_frames_.end();) {
+ FrameDecision decision = ManageFrameInternal(frame_it->get());
+
+ switch (decision) {
+ case kStash:
+ ++frame_it;
+ break;
+ case kHandOff:
+ complete_frame = true;
+ res.push_back(std::move(*frame_it));
+ [[fallthrough]];
+ case kDrop:
+ frame_it = stashed_frames_.erase(frame_it);
+ }
+ }
+ } while (complete_frame);
+}
+
+void RtpSeqNumOnlyRefFinder::UpdateLastPictureIdWithPadding(uint16_t seq_num) {
+ auto gop_seq_num_it = last_seq_num_gop_.upper_bound(seq_num);
+
+ // If this padding packet "belongs" to a group of pictures that we don't track
+ // anymore, do nothing.
+ if (gop_seq_num_it == last_seq_num_gop_.begin())
+ return;
+ --gop_seq_num_it;
+
+ // Calculate the next contiuous sequence number and search for it in
+ // the padding packets we have stashed.
+ uint16_t next_seq_num_with_padding = gop_seq_num_it->second.second + 1;
+ auto padding_seq_num_it =
+ stashed_padding_.lower_bound(next_seq_num_with_padding);
+
+ // While there still are padding packets and those padding packets are
+ // continuous, then advance the "last-picture-id-with-padding" and remove
+ // the stashed padding packet.
+ while (padding_seq_num_it != stashed_padding_.end() &&
+ *padding_seq_num_it == next_seq_num_with_padding) {
+ gop_seq_num_it->second.second = next_seq_num_with_padding;
+ ++next_seq_num_with_padding;
+ padding_seq_num_it = stashed_padding_.erase(padding_seq_num_it);
+ }
+
+ // In the case where the stream has been continuous without any new keyframes
+ // for a while there is a risk that new frames will appear to be older than
+ // the keyframe they belong to due to wrapping sequence number. In order
+ // to prevent this we advance the picture id of the keyframe every so often.
+ if (ForwardDiff(gop_seq_num_it->first, seq_num) > 10000) {
+ auto save = gop_seq_num_it->second;
+ last_seq_num_gop_.clear();
+ last_seq_num_gop_[seq_num] = save;
+ }
+}
+
+RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::PaddingReceived(
+ uint16_t seq_num) {
+ auto clean_padding_to =
+ stashed_padding_.lower_bound(seq_num - kMaxPaddingAge);
+ stashed_padding_.erase(stashed_padding_.begin(), clean_padding_to);
+ stashed_padding_.insert(seq_num);
+ UpdateLastPictureIdWithPadding(seq_num);
+ RtpFrameReferenceFinder::ReturnVector res;
+ RetryStashedFrames(res);
+ return res;
+}
+
+void RtpSeqNumOnlyRefFinder::ClearTo(uint16_t seq_num) {
+ auto it = stashed_frames_.begin();
+ while (it != stashed_frames_.end()) {
+ if (AheadOf<uint16_t>(seq_num, (*it)->first_seq_num())) {
+ it = stashed_frames_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h
new file mode 100644
index 0000000000..ef3c022111
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+
+class RtpSeqNumOnlyRefFinder {
+ public:
+ RtpSeqNumOnlyRefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ static constexpr int kMaxStashedFrames = 100;
+ static constexpr int kMaxPaddingAge = 100;
+
+ enum FrameDecision { kStash, kHandOff, kDrop };
+
+ FrameDecision ManageFrameInternal(RtpFrameObject* frame);
+ void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
+ void UpdateLastPictureIdWithPadding(uint16_t seq_num);
+
+ // For every group of pictures, hold two sequence numbers. The first being
+ // the sequence number of the last packet of the last completed frame, and
+ // the second being the sequence number of the last packet of the last
+ // completed frame advanced by any potential continuous packets of padding.
+ std::map<uint16_t,
+ std::pair<uint16_t, uint16_t>,
+ DescendingSeqNumComp<uint16_t>>
+ last_seq_num_gop_;
+
+ // Padding packets that have been received but that are not yet continuous
+ // with any group of pictures.
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> stashed_padding_;
+
+ // Frames that have been fully received but didn't have all the information
+ // needed to determine their references.
+ std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_;
+
+ // Unwrapper used to unwrap generic RTP streams. In a generic stream we derive
+ // a picture id from the packet sequence number.
+ SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc
new file mode 100644
index 0000000000..185756ce51
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_vp8_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpVp8RefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ const RTPVideoHeaderVP8& codec_header = absl::get<RTPVideoHeaderVP8>(
+ frame->GetRtpVideoHeader().video_type_header);
+
+ if (codec_header.temporalIdx != kNoTemporalIdx)
+ frame->SetTemporalIndex(codec_header.temporalIdx);
+
+ int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(codec_header.tl0PicIdx & 0xFF);
+ FrameDecision decision =
+ ManageFrameInternal(frame.get(), codec_header, unwrapped_tl0);
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ switch (decision) {
+ case kStash:
+ if (stashed_frames_.size() > kMaxStashedFrames) {
+ stashed_frames_.pop_back();
+ }
+ stashed_frames_.push_front(
+ {.unwrapped_tl0 = unwrapped_tl0, .frame = std::move(frame)});
+ return res;
+ case kHandOff:
+ res.push_back(std::move(frame));
+ RetryStashedFrames(res);
+ return res;
+ case kDrop:
+ return res;
+ }
+
+ return res;
+}
+
+RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
+ RtpFrameObject* frame,
+ const RTPVideoHeaderVP8& codec_header,
+ int64_t unwrapped_tl0) {
+ // Protect against corrupted packets with arbitrary large temporal idx.
+ if (codec_header.temporalIdx >= kMaxTemporalLayers)
+ return kDrop;
+
+ frame->SetSpatialIndex(0);
+ frame->SetId(codec_header.pictureId & 0x7FFF);
+
+ if (last_picture_id_ == -1)
+ last_picture_id_ = frame->Id();
+
+ // Clean up info about not yet received frames that are too old.
+ uint16_t old_picture_id =
+ Subtract<kFrameIdLength>(frame->Id(), kMaxNotYetReceivedFrames);
+ auto clean_frames_to = not_yet_received_frames_.lower_bound(old_picture_id);
+ not_yet_received_frames_.erase(not_yet_received_frames_.begin(),
+ clean_frames_to);
+ // Avoid re-adding picture ids that were just erased.
+ if (AheadOf<uint16_t, kFrameIdLength>(old_picture_id, last_picture_id_)) {
+ last_picture_id_ = old_picture_id;
+ }
+ // Find if there has been a gap in fully received frames and save the picture
+ // id of those frames in `not_yet_received_frames_`.
+ if (AheadOf<uint16_t, kFrameIdLength>(frame->Id(), last_picture_id_)) {
+ do {
+ last_picture_id_ = Add<kFrameIdLength>(last_picture_id_, 1);
+ not_yet_received_frames_.insert(last_picture_id_);
+ } while (last_picture_id_ != frame->Id());
+ }
+
+ // Clean up info for base layers that are too old.
+ int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo;
+ auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx);
+ layer_info_.erase(layer_info_.begin(), clean_layer_info_to);
+
+ if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ if (codec_header.temporalIdx != 0) {
+ return kDrop;
+ }
+ frame->num_references = 0;
+ layer_info_[unwrapped_tl0].fill(-1);
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+ }
+
+ auto layer_info_it = layer_info_.find(
+ codec_header.temporalIdx == 0 ? unwrapped_tl0 - 1 : unwrapped_tl0);
+
+ // If we don't have the base layer frame yet, stash this frame.
+ if (layer_info_it == layer_info_.end())
+ return kStash;
+
+ // A non keyframe base layer frame has been received, copy the layer info
+ // from the previous base layer frame and set a reference to the previous
+ // base layer frame.
+ if (codec_header.temporalIdx == 0) {
+ layer_info_it =
+ layer_info_.emplace(unwrapped_tl0, layer_info_it->second).first;
+ frame->num_references = 1;
+ int64_t last_pid_on_layer = layer_info_it->second[0];
+
+ // Is this an old frame that has already been used to update the state? If
+ // so, drop it.
+ if (AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer, frame->Id())) {
+ return kDrop;
+ }
+
+ frame->references[0] = last_pid_on_layer;
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+ }
+
+ // Layer sync frame, this frame only references its base layer frame.
+ if (codec_header.layerSync) {
+ frame->num_references = 1;
+ int64_t last_pid_on_layer = layer_info_it->second[codec_header.temporalIdx];
+
+ // Is this an old frame that has already been used to update the state? If
+ // so, drop it.
+ if (last_pid_on_layer != -1 &&
+ AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer, frame->Id())) {
+ return kDrop;
+ }
+
+ frame->references[0] = layer_info_it->second[0];
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+ }
+
+ // Find all references for this frame.
+ frame->num_references = 0;
+ for (uint8_t layer = 0; layer <= codec_header.temporalIdx; ++layer) {
+ // If we have not yet received a previous frame on this temporal layer,
+ // stash this frame.
+ if (layer_info_it->second[layer] == -1)
+ return kStash;
+
+ // If the last frame on this layer is ahead of this frame it means that
+ // a layer sync frame has been received after this frame for the same
+ // base layer frame, drop this frame.
+ if (AheadOf<uint16_t, kFrameIdLength>(layer_info_it->second[layer],
+ frame->Id())) {
+ return kDrop;
+ }
+
+ // If we have not yet received a frame between this frame and the referenced
+ // frame then we have to wait for that frame to be completed first.
+ auto not_received_frame_it =
+ not_yet_received_frames_.upper_bound(layer_info_it->second[layer]);
+ if (not_received_frame_it != not_yet_received_frames_.end() &&
+ AheadOf<uint16_t, kFrameIdLength>(frame->Id(),
+ *not_received_frame_it)) {
+ return kStash;
+ }
+
+ if (!(AheadOf<uint16_t, kFrameIdLength>(frame->Id(),
+ layer_info_it->second[layer]))) {
+ RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->Id()
+ << " and packet range [" << frame->first_seq_num()
+ << ", " << frame->last_seq_num()
+ << "] already received, "
+ " dropping frame.";
+ return kDrop;
+ }
+
+ ++frame->num_references;
+ frame->references[layer] = layer_info_it->second[layer];
+ }
+
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+}
+
+void RtpVp8RefFinder::UpdateLayerInfoVp8(RtpFrameObject* frame,
+ int64_t unwrapped_tl0,
+ uint8_t temporal_idx) {
+ auto layer_info_it = layer_info_.find(unwrapped_tl0);
+
+ // Update this layer info and newer.
+ while (layer_info_it != layer_info_.end()) {
+ if (layer_info_it->second[temporal_idx] != -1 &&
+ AheadOf<uint16_t, kFrameIdLength>(layer_info_it->second[temporal_idx],
+ frame->Id())) {
+ // The frame was not newer, then no subsequent layer info have to be
+ // update.
+ break;
+ }
+
+ layer_info_it->second[temporal_idx] = frame->Id();
+ ++unwrapped_tl0;
+ layer_info_it = layer_info_.find(unwrapped_tl0);
+ }
+ not_yet_received_frames_.erase(frame->Id());
+
+ UnwrapPictureIds(frame);
+}
+
+void RtpVp8RefFinder::RetryStashedFrames(
+ RtpFrameReferenceFinder::ReturnVector& res) {
+ bool complete_frame = false;
+ do {
+ complete_frame = false;
+ for (auto it = stashed_frames_.begin(); it != stashed_frames_.end();) {
+ const RTPVideoHeaderVP8& codec_header = absl::get<RTPVideoHeaderVP8>(
+ it->frame->GetRtpVideoHeader().video_type_header);
+ FrameDecision decision =
+ ManageFrameInternal(it->frame.get(), codec_header, it->unwrapped_tl0);
+
+ switch (decision) {
+ case kStash:
+ ++it;
+ break;
+ case kHandOff:
+ complete_frame = true;
+ res.push_back(std::move(it->frame));
+ [[fallthrough]];
+ case kDrop:
+ it = stashed_frames_.erase(it);
+ }
+ }
+ } while (complete_frame);
+}
+
+void RtpVp8RefFinder::UnwrapPictureIds(RtpFrameObject* frame) {
+ for (size_t i = 0; i < frame->num_references; ++i)
+ frame->references[i] = unwrapper_.Unwrap(frame->references[i]);
+ frame->SetId(unwrapper_.Unwrap(frame->Id()));
+}
+
+void RtpVp8RefFinder::ClearTo(uint16_t seq_num) {
+ auto it = stashed_frames_.begin();
+ while (it != stashed_frames_.end()) {
+ if (AheadOf<uint16_t>(seq_num, it->frame->first_seq_num())) {
+ it = stashed_frames_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h
new file mode 100644
index 0000000000..1ae45cdba3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+
+class RtpVp8RefFinder {
+ public:
+ RtpVp8RefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ static constexpr int kFrameIdLength = 1 << 15;
+ static constexpr int kMaxLayerInfo = 50;
+ static constexpr int kMaxNotYetReceivedFrames = 100;
+ static constexpr int kMaxStashedFrames = 100;
+ static constexpr int kMaxTemporalLayers = 5;
+
+ struct UnwrappedTl0Frame {
+ int64_t unwrapped_tl0;
+ std::unique_ptr<RtpFrameObject> frame;
+ };
+
+ enum FrameDecision { kStash, kHandOff, kDrop };
+
+ FrameDecision ManageFrameInternal(RtpFrameObject* frame,
+ const RTPVideoHeaderVP8& codec_header,
+ int64_t unwrapped_tl0);
+ void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
+ void UpdateLayerInfoVp8(RtpFrameObject* frame,
+ int64_t unwrapped_tl0,
+ uint8_t temporal_idx);
+ void UnwrapPictureIds(RtpFrameObject* frame);
+
+ // Save the last picture id in order to detect when there is a gap in frames
+ // that have not yet been fully received.
+ int last_picture_id_ = -1;
+
+ // Frames earlier than the last received frame that have not yet been
+ // fully received.
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t, kFrameIdLength>>
+ not_yet_received_frames_;
+
+ // Frames that have been fully received but didn't have all the information
+ // needed to determine their references.
+ std::deque<UnwrappedTl0Frame> stashed_frames_;
+
+ // Holds the information about the last completed frame for a given temporal
+ // layer given an unwrapped Tl0 picture index.
+ std::map<int64_t, std::array<int64_t, kMaxTemporalLayers>> layer_info_;
+
+ // Unwrapper used to unwrap VP8/VP9 streams which have their picture id
+ // specified.
+ SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
+
+ SeqNumUnwrapper<uint8_t> tl0_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
new file mode 100644
index 0000000000..7dc6cd5521
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_vp8_ref_finder.h"
+
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/frame_object.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Contains;
+using ::testing::Eq;
+using ::testing::Matcher;
+using ::testing::Matches;
+using ::testing::SizeIs;
+using ::testing::UnorderedElementsAreArray;
+
+namespace webrtc {
+namespace {
+
+MATCHER_P2(HasIdAndRefs, id, refs, "") {
+ return Matches(Eq(id))(arg->Id()) &&
+ Matches(UnorderedElementsAreArray(refs))(
+ rtc::ArrayView<int64_t>(arg->references, arg->num_references));
+}
+
+Matcher<const std::vector<std::unique_ptr<EncodedFrame>>&>
+HasFrameWithIdAndRefs(int64_t frame_id, const std::vector<int64_t>& refs) {
+ return Contains(HasIdAndRefs(frame_id, refs));
+}
+
+class Frame {
+ public:
+ Frame& AsKeyFrame(bool is_keyframe = true) {
+ is_keyframe_ = is_keyframe;
+ return *this;
+ }
+
+ Frame& Pid(int pid) {
+ picture_id_ = pid;
+ return *this;
+ }
+
+ Frame& Tid(int tid) {
+ temporal_id_ = tid;
+ return *this;
+ }
+
+ Frame& Tl0(int tl0) {
+ tl0_idx_ = tl0;
+ return *this;
+ }
+
+ Frame& AsSync(bool is_sync = true) {
+ sync = is_sync;
+ return *this;
+ }
+
+ operator std::unique_ptr<RtpFrameObject>() {
+ RTPVideoHeaderVP8 vp8_header{};
+ vp8_header.pictureId = *picture_id_;
+ vp8_header.temporalIdx = *temporal_id_;
+ vp8_header.tl0PicIdx = *tl0_idx_;
+ vp8_header.layerSync = sync;
+
+ RTPVideoHeader video_header;
+ video_header.frame_type = is_keyframe_ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ video_header.video_type_header = vp8_header;
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ /*seq_num_start=*/0,
+ /*seq_num_end=*/0,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ kVideoCodecVP8,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+ }
+
+ private:
+ bool is_keyframe_ = false;
+ absl::optional<int> picture_id_;
+ absl::optional<int> temporal_id_;
+ absl::optional<int> tl0_idx_;
+ bool sync = false;
+};
+
+} // namespace
+
+class RtpVp8RefFinderTest : public ::testing::Test {
+ protected:
+ RtpVp8RefFinderTest() : ref_finder_(std::make_unique<RtpVp8RefFinder>()) {}
+
+ void Insert(std::unique_ptr<RtpFrameObject> frame) {
+ for (auto& f : ref_finder_->ManageFrame(std::move(frame))) {
+ frames_.push_back(std::move(f));
+ }
+ }
+
+ std::unique_ptr<RtpVp8RefFinder> ref_finder_;
+ std::vector<std::unique_ptr<EncodedFrame>> frames_;
+};
+
+TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrame_0) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrameLayerSync_01) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(1).AsSync());
+ Insert(Frame().Pid(1).Tid(1).Tl0(1).AsSync());
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrame_01) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(0).Tl0(2).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(3));
+ Insert(Frame().Pid(3).Tid(0).Tl0(4));
+ Insert(Frame().Pid(3).Tid(0).Tl0(4));
+
+ EXPECT_THAT(frames_, SizeIs(4));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {2}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_0) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8DuplicateTl1Frames) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(0).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(0).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(1));
+ Insert(Frame().Pid(3).Tid(1).Tl0(1));
+ Insert(Frame().Pid(3).Tid(1).Tl0(1));
+ Insert(Frame().Pid(4).Tid(0).Tl0(2));
+ Insert(Frame().Pid(5).Tid(1).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(6));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3, 4}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_0) {
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(3).Tid(0).Tl0(4));
+ Insert(Frame().Pid(2).Tid(0).Tl0(3));
+ Insert(Frame().Pid(5).Tid(0).Tl0(6));
+ Insert(Frame().Pid(6).Tid(0).Tl0(7));
+ Insert(Frame().Pid(4).Tid(0).Tl0(5));
+
+ EXPECT_THAT(frames_, SizeIs(7));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {3}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {5}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_01) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(255).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(255).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(0));
+ Insert(Frame().Pid(3).Tid(1).Tl0(0));
+
+ EXPECT_THAT(frames_, SizeIs(4));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_01) {
+ Insert(Frame().Pid(1).Tid(1).Tl0(255).AsSync());
+ Insert(Frame().Pid(0).Tid(0).Tl0(255).AsKeyFrame());
+ Insert(Frame().Pid(3).Tid(1).Tl0(0));
+ Insert(Frame().Pid(5).Tid(1).Tl0(1));
+ Insert(Frame().Pid(2).Tid(0).Tl0(0));
+ Insert(Frame().Pid(4).Tid(0).Tl0(1));
+ Insert(Frame().Pid(6).Tid(0).Tl0(2));
+ Insert(Frame().Pid(7).Tid(1).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(8));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3, 4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {5, 6}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_0212) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(55).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(2).Tl0(55).AsSync());
+ Insert(Frame().Pid(2).Tid(1).Tl0(55).AsSync());
+ Insert(Frame().Pid(3).Tid(2).Tl0(55));
+ Insert(Frame().Pid(4).Tid(0).Tl0(56));
+ Insert(Frame().Pid(5).Tid(2).Tl0(56));
+ Insert(Frame().Pid(6).Tid(1).Tl0(56));
+ Insert(Frame().Pid(7).Tid(2).Tl0(56));
+ Insert(Frame().Pid(8).Tid(0).Tl0(57));
+ Insert(Frame().Pid(9).Tid(2).Tl0(57).AsSync());
+ Insert(Frame().Pid(10).Tid(1).Tl0(57).AsSync());
+ Insert(Frame().Pid(11).Tid(2).Tl0(57));
+
+ EXPECT_THAT(frames_, SizeIs(12));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {0, 1, 2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {2, 3, 4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {2, 4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {4, 5, 6}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(8, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(9, {8}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {8}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {8, 9, 10}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersMissingFrame_0212) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(55).AsKeyFrame());
+ Insert(Frame().Pid(2).Tid(1).Tl0(55).AsSync());
+ Insert(Frame().Pid(3).Tid(2).Tl0(55));
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+}
+
+// Test with 3 temporal layers in a 0212 pattern.
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_0212) {
+ Insert(Frame().Pid(127).Tid(2).Tl0(55).AsSync());
+ Insert(Frame().Pid(126).Tid(0).Tl0(55).AsKeyFrame());
+ Insert(Frame().Pid(128).Tid(1).Tl0(55).AsSync());
+ Insert(Frame().Pid(130).Tid(0).Tl0(56));
+ Insert(Frame().Pid(131).Tid(2).Tl0(56));
+ Insert(Frame().Pid(129).Tid(2).Tl0(55));
+ Insert(Frame().Pid(133).Tid(2).Tl0(56));
+ Insert(Frame().Pid(135).Tid(2).Tl0(57).AsSync());
+ Insert(Frame().Pid(132).Tid(1).Tl0(56));
+ Insert(Frame().Pid(134).Tid(0).Tl0(57));
+ Insert(Frame().Pid(137).Tid(2).Tl0(57));
+ Insert(Frame().Pid(136).Tid(1).Tl0(57).AsSync());
+
+ EXPECT_THAT(frames_, SizeIs(12));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(126, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(127, {126}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(128, {126}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(129, {126, 127, 128}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(130, {126}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(131, {128, 129, 130}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(132, {128, 130}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(133, {130, 131, 132}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(134, {130}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(135, {134}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(136, {134}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(137, {134, 135, 136}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8LayerSync) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(0).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(0).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(1));
+ Insert(Frame().Pid(4).Tid(0).Tl0(2));
+ Insert(Frame().Pid(5).Tid(1).Tl0(2).AsSync());
+ Insert(Frame().Pid(6).Tid(0).Tl0(3));
+ Insert(Frame().Pid(7).Tid(1).Tl0(3));
+
+ EXPECT_THAT(frames_, SizeIs(7));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {5, 6}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8Tl1SyncFrameAfterTl1Frame) {
+ Insert(Frame().Pid(1).Tid(0).Tl0(247).AsKeyFrame().AsSync());
+ Insert(Frame().Pid(3).Tid(0).Tl0(248));
+ Insert(Frame().Pid(4).Tid(1).Tl0(248));
+ Insert(Frame().Pid(5).Tid(1).Tl0(248).AsSync());
+
+ EXPECT_THAT(frames_, SizeIs(3));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8DetectMissingFrame_0212) {
+ Insert(Frame().Pid(1).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(2).Tid(2).Tl0(1).AsSync());
+ Insert(Frame().Pid(3).Tid(1).Tl0(1).AsSync());
+ Insert(Frame().Pid(4).Tid(2).Tl0(1));
+ Insert(Frame().Pid(6).Tid(2).Tl0(2));
+ Insert(Frame().Pid(7).Tid(1).Tl0(2));
+ Insert(Frame().Pid(8).Tid(2).Tl0(2));
+ Insert(Frame().Pid(5).Tid(0).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(8));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {1, 2, 3}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {3, 4, 5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {3, 5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(8, {5, 6, 7}));
+}
+
+TEST_F(RtpVp8RefFinderTest, StashedFramesDoNotWrapTl0Backwards) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(0));
+ EXPECT_THAT(frames_, SizeIs(0));
+
+ Insert(Frame().Pid(128).Tid(0).Tl0(128).AsKeyFrame());
+ EXPECT_THAT(frames_, SizeIs(1));
+ Insert(Frame().Pid(129).Tid(0).Tl0(129));
+ EXPECT_THAT(frames_, SizeIs(2));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc
new file mode 100644
index 0000000000..175ed3464b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_vp9_ref_finder.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+RtpFrameReferenceFinder::ReturnVector RtpVp9RefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
+ frame->GetRtpVideoHeader().video_type_header);
+
+ if (codec_header.temporal_idx != kNoTemporalIdx)
+ frame->SetTemporalIndex(codec_header.temporal_idx);
+ frame->SetSpatialIndex(codec_header.spatial_idx);
+ frame->SetId(codec_header.picture_id & (kFrameIdLength - 1));
+
+ FrameDecision decision;
+ if (codec_header.temporal_idx >= kMaxTemporalLayers ||
+ codec_header.spatial_idx >= kMaxSpatialLayers) {
+ decision = kDrop;
+ } else if (codec_header.flexible_mode) {
+ decision = ManageFrameFlexible(frame.get(), codec_header);
+ } else {
+ if (codec_header.tl0_pic_idx == kNoTl0PicIdx) {
+ RTC_LOG(LS_WARNING) << "TL0PICIDX is expected to be present in "
+ "non-flexible mode.";
+ decision = kDrop;
+ } else {
+ int64_t unwrapped_tl0 =
+ tl0_unwrapper_.Unwrap(codec_header.tl0_pic_idx & 0xFF);
+ decision = ManageFrameGof(frame.get(), codec_header, unwrapped_tl0);
+
+ if (decision == kStash) {
+ if (stashed_frames_.size() > kMaxStashedFrames) {
+ stashed_frames_.pop_back();
+ }
+
+ stashed_frames_.push_front(
+ {.unwrapped_tl0 = unwrapped_tl0, .frame = std::move(frame)});
+ }
+ }
+ }
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ switch (decision) {
+ case kStash:
+ return res;
+ case kHandOff:
+ res.push_back(std::move(frame));
+ RetryStashedFrames(res);
+ return res;
+ case kDrop:
+ return res;
+ }
+
+ return res;
+}
+
+RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameFlexible(
+ RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& codec_header) {
+ if (codec_header.num_ref_pics > EncodedFrame::kMaxFrameReferences) {
+ return kDrop;
+ }
+
+ frame->num_references = codec_header.num_ref_pics;
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] =
+ Subtract<kFrameIdLength>(frame->Id(), codec_header.pid_diff[i]);
+ }
+
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+}
+
+RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameGof(
+ RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& codec_header,
+ int64_t unwrapped_tl0) {
+ GofInfo* info;
+ if (codec_header.ss_data_available) {
+ if (codec_header.temporal_idx != 0) {
+ RTC_LOG(LS_WARNING) << "Received scalability structure on a non base "
+ "layer frame. Scalability structure ignored.";
+ } else {
+ if (codec_header.gof.num_frames_in_gof > kMaxVp9FramesInGof) {
+ return kDrop;
+ }
+
+ for (size_t i = 0; i < codec_header.gof.num_frames_in_gof; ++i) {
+ if (codec_header.gof.num_ref_pics[i] > kMaxVp9RefPics) {
+ return kDrop;
+ }
+ }
+
+ GofInfoVP9 gof = codec_header.gof;
+ if (gof.num_frames_in_gof == 0) {
+ RTC_LOG(LS_WARNING) << "Number of frames in GOF is zero. Assume "
+ "that stream has only one temporal layer.";
+ gof.SetGofInfoVP9(kTemporalStructureMode1);
+ }
+
+ current_ss_idx_ = Add<kMaxGofSaved>(current_ss_idx_, 1);
+ scalability_structures_[current_ss_idx_] = gof;
+ scalability_structures_[current_ss_idx_].pid_start = frame->Id();
+ gof_info_.emplace(
+ unwrapped_tl0,
+ GofInfo(&scalability_structures_[current_ss_idx_], frame->Id()));
+ }
+
+ const auto gof_info_it = gof_info_.find(unwrapped_tl0);
+ if (gof_info_it == gof_info_.end())
+ return kStash;
+
+ info = &gof_info_it->second;
+
+ if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ frame->num_references = 0;
+ FrameReceivedVp9(frame->Id(), info);
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+ }
+ } else if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ if (frame->SpatialIndex() == 0) {
+ RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure";
+ return kDrop;
+ }
+ const auto gof_info_it = gof_info_.find(unwrapped_tl0);
+ if (gof_info_it == gof_info_.end())
+ return kStash;
+
+ info = &gof_info_it->second;
+
+ frame->num_references = 0;
+ FrameReceivedVp9(frame->Id(), info);
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+ } else {
+ auto gof_info_it = gof_info_.find(
+ (codec_header.temporal_idx == 0) ? unwrapped_tl0 - 1 : unwrapped_tl0);
+
+ // Gof info for this frame is not available yet, stash this frame.
+ if (gof_info_it == gof_info_.end())
+ return kStash;
+
+ if (codec_header.temporal_idx == 0) {
+ gof_info_it = gof_info_
+ .emplace(unwrapped_tl0,
+ GofInfo(gof_info_it->second.gof, frame->Id()))
+ .first;
+ }
+
+ info = &gof_info_it->second;
+ }
+
+ // Clean up info for base layers that are too old.
+ int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxGofSaved;
+ auto clean_gof_info_to = gof_info_.lower_bound(old_tl0_pic_idx);
+ gof_info_.erase(gof_info_.begin(), clean_gof_info_to);
+
+ FrameReceivedVp9(frame->Id(), info);
+
+ // Make sure we don't miss any frame that could potentially have the
+ // up switch flag set.
+ if (MissingRequiredFrameVp9(frame->Id(), *info))
+ return kStash;
+
+ if (codec_header.temporal_up_switch)
+ up_switch_.emplace(frame->Id(), codec_header.temporal_idx);
+
+ // Clean out old info about up switch frames.
+ uint16_t old_picture_id = Subtract<kFrameIdLength>(frame->Id(), 50);
+ auto up_switch_erase_to = up_switch_.lower_bound(old_picture_id);
+ up_switch_.erase(up_switch_.begin(), up_switch_erase_to);
+
+ size_t diff =
+ ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, frame->Id());
+ size_t gof_idx = diff % info->gof->num_frames_in_gof;
+
+ if (info->gof->num_ref_pics[gof_idx] > EncodedFrame::kMaxFrameReferences) {
+ return kDrop;
+ }
+ // Populate references according to the scalability structure.
+ frame->num_references = info->gof->num_ref_pics[gof_idx];
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] =
+ Subtract<kFrameIdLength>(frame->Id(), info->gof->pid_diff[gof_idx][i]);
+
+ // If this is a reference to a frame earlier than the last up switch point,
+ // then ignore this reference.
+ if (UpSwitchInIntervalVp9(frame->Id(), codec_header.temporal_idx,
+ frame->references[i])) {
+ --frame->num_references;
+ }
+ }
+
+ // Override GOF references.
+ if (!codec_header.inter_pic_predicted) {
+ frame->num_references = 0;
+ }
+
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+}
+
+bool RtpVp9RefFinder::MissingRequiredFrameVp9(uint16_t picture_id,
+ const GofInfo& info) {
+ size_t diff =
+ ForwardDiff<uint16_t, kFrameIdLength>(info.gof->pid_start, picture_id);
+ size_t gof_idx = diff % info.gof->num_frames_in_gof;
+ size_t temporal_idx = info.gof->temporal_idx[gof_idx];
+
+ if (temporal_idx >= kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
+ << " temporal "
+ "layers are supported.";
+ return true;
+ }
+
+ // For every reference this frame has, check if there is a frame missing in
+ // the interval (`ref_pid`, `picture_id`) in any of the lower temporal
+ // layers. If so, we are missing a required frame.
+ uint8_t num_references = info.gof->num_ref_pics[gof_idx];
+ for (size_t i = 0; i < num_references; ++i) {
+ uint16_t ref_pid =
+ Subtract<kFrameIdLength>(picture_id, info.gof->pid_diff[gof_idx][i]);
+ for (size_t l = 0; l < temporal_idx; ++l) {
+ auto missing_frame_it = missing_frames_for_layer_[l].lower_bound(ref_pid);
+ if (missing_frame_it != missing_frames_for_layer_[l].end() &&
+ AheadOf<uint16_t, kFrameIdLength>(picture_id, *missing_frame_it)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void RtpVp9RefFinder::FrameReceivedVp9(uint16_t picture_id, GofInfo* info) {
+ int last_picture_id = info->last_picture_id;
+ size_t gof_size = std::min(info->gof->num_frames_in_gof, kMaxVp9FramesInGof);
+
+ // If there is a gap, find which temporal layer the missing frames
+ // belong to and add the frame as missing for that temporal layer.
+ // Otherwise, remove this frame from the set of missing frames.
+ if (AheadOf<uint16_t, kFrameIdLength>(picture_id, last_picture_id)) {
+ size_t diff = ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start,
+ last_picture_id);
+ size_t gof_idx = diff % gof_size;
+
+ last_picture_id = Add<kFrameIdLength>(last_picture_id, 1);
+ while (last_picture_id != picture_id) {
+ gof_idx = (gof_idx + 1) % gof_size;
+ RTC_CHECK(gof_idx < kMaxVp9FramesInGof);
+
+ size_t temporal_idx = info->gof->temporal_idx[gof_idx];
+ if (temporal_idx >= kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
+ << " temporal "
+ "layers are supported.";
+ return;
+ }
+
+ missing_frames_for_layer_[temporal_idx].insert(last_picture_id);
+ last_picture_id = Add<kFrameIdLength>(last_picture_id, 1);
+ }
+
+ info->last_picture_id = last_picture_id;
+ } else {
+ size_t diff =
+ ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, picture_id);
+ size_t gof_idx = diff % gof_size;
+ RTC_CHECK(gof_idx < kMaxVp9FramesInGof);
+
+ size_t temporal_idx = info->gof->temporal_idx[gof_idx];
+ if (temporal_idx >= kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
+ << " temporal "
+ "layers are supported.";
+ return;
+ }
+
+ missing_frames_for_layer_[temporal_idx].erase(picture_id);
+ }
+}
+
+bool RtpVp9RefFinder::UpSwitchInIntervalVp9(uint16_t picture_id,
+ uint8_t temporal_idx,
+ uint16_t pid_ref) {
+ for (auto up_switch_it = up_switch_.upper_bound(pid_ref);
+ up_switch_it != up_switch_.end() &&
+ AheadOf<uint16_t, kFrameIdLength>(picture_id, up_switch_it->first);
+ ++up_switch_it) {
+ if (up_switch_it->second < temporal_idx)
+ return true;
+ }
+
+ return false;
+}
+
+void RtpVp9RefFinder::RetryStashedFrames(
+ RtpFrameReferenceFinder::ReturnVector& res) {
+ bool complete_frame = false;
+ do {
+ complete_frame = false;
+ for (auto it = stashed_frames_.begin(); it != stashed_frames_.end();) {
+ const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
+ it->frame->GetRtpVideoHeader().video_type_header);
+ RTC_DCHECK(!codec_header.flexible_mode);
+ FrameDecision decision =
+ ManageFrameGof(it->frame.get(), codec_header, it->unwrapped_tl0);
+
+ switch (decision) {
+ case kStash:
+ ++it;
+ break;
+ case kHandOff:
+ complete_frame = true;
+ res.push_back(std::move(it->frame));
+ [[fallthrough]];
+ case kDrop:
+ it = stashed_frames_.erase(it);
+ }
+ }
+ } while (complete_frame);
+}
+
+void RtpVp9RefFinder::FlattenFrameIdAndRefs(RtpFrameObject* frame,
+ bool inter_layer_predicted) {
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] =
+ unwrapper_.Unwrap(frame->references[i]) * kMaxSpatialLayers +
+ *frame->SpatialIndex();
+ }
+ frame->SetId(unwrapper_.Unwrap(frame->Id()) * kMaxSpatialLayers +
+ *frame->SpatialIndex());
+
+ if (inter_layer_predicted &&
+ frame->num_references + 1 <= EncodedFrame::kMaxFrameReferences) {
+ frame->references[frame->num_references] = frame->Id() - 1;
+ ++frame->num_references;
+ }
+}
+
+void RtpVp9RefFinder::ClearTo(uint16_t seq_num) {
+ auto it = stashed_frames_.begin();
+ while (it != stashed_frames_.end()) {
+ if (AheadOf<uint16_t>(seq_num, it->frame->first_seq_num())) {
+ it = stashed_frames_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h
new file mode 100644
index 0000000000..2971f686b1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+
+class RtpVp9RefFinder {
+ public:
+ RtpVp9RefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ static constexpr int kFrameIdLength = 1 << 15;
+ static constexpr int kMaxGofSaved = 50;
+ static constexpr int kMaxLayerInfo = 50;
+ static constexpr int kMaxNotYetReceivedFrames = 100;
+ static constexpr int kMaxStashedFrames = 100;
+ static constexpr int kMaxTemporalLayers = 5;
+
+ enum FrameDecision { kStash, kHandOff, kDrop };
+
+ struct GofInfo {
+ GofInfo(GofInfoVP9* gof, uint16_t last_picture_id)
+ : gof(gof), last_picture_id(last_picture_id) {}
+ GofInfoVP9* gof;
+ uint16_t last_picture_id;
+ };
+
+ struct UnwrappedTl0Frame {
+ int64_t unwrapped_tl0;
+ std::unique_ptr<RtpFrameObject> frame;
+ };
+
+ FrameDecision ManageFrameFlexible(RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& vp9_header);
+ FrameDecision ManageFrameGof(RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& vp9_header,
+ int64_t unwrapped_tl0);
+ void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
+
+ bool MissingRequiredFrameVp9(uint16_t picture_id, const GofInfo& info);
+
+ void FrameReceivedVp9(uint16_t picture_id, GofInfo* info);
+ bool UpSwitchInIntervalVp9(uint16_t picture_id,
+ uint8_t temporal_idx,
+ uint16_t pid_ref);
+
+ void FlattenFrameIdAndRefs(RtpFrameObject* frame, bool inter_layer_predicted);
+
+ // Frames that have been fully received but didn't have all the information
+ // needed to determine their references.
+ std::deque<UnwrappedTl0Frame> stashed_frames_;
+
+ // Where the current scalability structure is in the
+ // `scalability_structures_` array.
+ uint8_t current_ss_idx_ = 0;
+
+ // Holds received scalability structures.
+ std::array<GofInfoVP9, kMaxGofSaved> scalability_structures_;
+
+ // Holds the the Gof information for a given unwrapped TL0 picture index.
+ std::map<int64_t, GofInfo> gof_info_;
+
+ // Keep track of which picture id and which temporal layer that had the
+ // up switch flag set.
+ std::map<uint16_t, uint8_t, DescendingSeqNumComp<uint16_t, kFrameIdLength>>
+ up_switch_;
+
+ // For every temporal layer, keep a set of which frames that are missing.
+ std::array<std::set<uint16_t, DescendingSeqNumComp<uint16_t, kFrameIdLength>>,
+ kMaxTemporalLayers>
+ missing_frames_for_layer_;
+
+ // Unwrapper used to unwrap VP8/VP9 streams which have their picture id
+ // specified.
+ SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
+
+ SeqNumUnwrapper<uint8_t> tl0_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
new file mode 100644
index 0000000000..66b284f020
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_vp9_ref_finder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Contains;
+using ::testing::Matcher;
+using ::testing::MatcherInterface;
+using ::testing::Matches;
+using ::testing::MatchResultListener;
+using ::testing::Pointee;
+using ::testing::Property;
+using ::testing::SizeIs;
+using ::testing::UnorderedElementsAreArray;
+
+namespace webrtc {
+
+namespace {
+class Frame {
+ public:
+ Frame& SeqNum(uint16_t start, uint16_t end) {
+ seq_num_start = start;
+ seq_num_end = end;
+ return *this;
+ }
+
+ Frame& AsKeyFrame(bool is_keyframe = true) {
+ keyframe = is_keyframe;
+ return *this;
+ }
+
+ Frame& Pid(int pid) {
+ picture_id = pid;
+ return *this;
+ }
+
+ Frame& SidAndTid(int sid, int tid) {
+ spatial_id = sid;
+ temporal_id = tid;
+ return *this;
+ }
+
+ Frame& Tl0(int tl0) {
+ tl0_idx = tl0;
+ return *this;
+ }
+
+ Frame& AsUpswitch(bool is_up = true) {
+ up_switch = is_up;
+ return *this;
+ }
+
+ Frame& AsInterLayer(bool is_inter_layer = true) {
+ inter_layer = is_inter_layer;
+ return *this;
+ }
+
+ Frame& NotAsInterPic(bool is_inter_pic = false) {
+ inter_pic = is_inter_pic;
+ return *this;
+ }
+
+ Frame& Gof(GofInfoVP9* ss) {
+ scalability_structure = ss;
+ return *this;
+ }
+
+ Frame& FlexRefs(const std::vector<uint8_t>& refs) {
+ flex_refs = refs;
+ return *this;
+ }
+
+ operator std::unique_ptr<RtpFrameObject>() {
+ RTPVideoHeaderVP9 vp9_header{};
+ vp9_header.picture_id = *picture_id;
+ vp9_header.temporal_idx = *temporal_id;
+ vp9_header.spatial_idx = *spatial_id;
+ if (tl0_idx.has_value()) {
+ RTC_DCHECK(flex_refs.empty());
+ vp9_header.flexible_mode = false;
+ vp9_header.tl0_pic_idx = *tl0_idx;
+ } else {
+ vp9_header.flexible_mode = true;
+ vp9_header.num_ref_pics = flex_refs.size();
+ for (size_t i = 0; i < flex_refs.size(); ++i) {
+ vp9_header.pid_diff[i] = flex_refs.at(i);
+ }
+ }
+ vp9_header.temporal_up_switch = up_switch;
+ vp9_header.inter_layer_predicted = inter_layer;
+ vp9_header.inter_pic_predicted = inter_pic && !keyframe;
+ if (scalability_structure != nullptr) {
+ vp9_header.ss_data_available = true;
+ vp9_header.gof = *scalability_structure;
+ }
+
+ RTPVideoHeader video_header;
+ video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ video_header.video_type_header = vp9_header;
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ seq_num_start,
+ seq_num_end,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ kVideoCodecVP9,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+ }
+
+ private:
+ uint16_t seq_num_start = 0;
+ uint16_t seq_num_end = 0;
+ bool keyframe = false;
+ absl::optional<int> picture_id;
+ absl::optional<int> spatial_id;
+ absl::optional<int> temporal_id;
+ absl::optional<int> tl0_idx;
+ bool up_switch = false;
+ bool inter_layer = false;
+ bool inter_pic = true;
+ GofInfoVP9* scalability_structure = nullptr;
+ std::vector<uint8_t> flex_refs;
+};
+
+using FrameVector = std::vector<std::unique_ptr<EncodedFrame>>;
+
+// Would have been nice to use the MATCHER_P3 macro instead, but when used it
+// fails to infer the type of the vector if not explicitly given in the
+class HasFrameMatcher : public MatcherInterface<const FrameVector&> {
+ public:
+ explicit HasFrameMatcher(int64_t frame_id,
+ const std::vector<int64_t>& expected_refs)
+ : frame_id_(frame_id),
+ expected_refs_(expected_refs) {}
+
+ bool MatchAndExplain(const FrameVector& frames,
+ MatchResultListener* result_listener) const override {
+ auto it = std::find_if(frames.begin(), frames.end(),
+ [this](const std::unique_ptr<EncodedFrame>& f) {
+ return f->Id() == frame_id_;
+ });
+ if (it == frames.end()) {
+ if (result_listener->IsInterested()) {
+ *result_listener << "No frame with frame_id:" << frame_id_;
+ }
+ return false;
+ }
+
+ rtc::ArrayView<int64_t> actual_refs((*it)->references,
+ (*it)->num_references);
+ if (!Matches(UnorderedElementsAreArray(expected_refs_))(actual_refs)) {
+ if (result_listener->IsInterested()) {
+ *result_listener << "Frame with frame_id:" << frame_id_ << " and "
+ << actual_refs.size() << " references { ";
+ for (auto r : actual_refs) {
+ *result_listener << r << " ";
+ }
+ *result_listener << "}";
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "frame with frame_id:" << frame_id_ << " and "
+ << expected_refs_.size() << " references { ";
+ for (auto r : expected_refs_) {
+ *os << r << " ";
+ }
+ *os << "}";
+ }
+
+ private:
+ const int64_t frame_id_;
+ const std::vector<int64_t> expected_refs_;
+};
+
+} // namespace
+
+class RtpVp9RefFinderTest : public ::testing::Test {
+ protected:
+ RtpVp9RefFinderTest() : ref_finder_(std::make_unique<RtpVp9RefFinder>()) {}
+
+ void Insert(std::unique_ptr<RtpFrameObject> frame) {
+ for (auto& f : ref_finder_->ManageFrame(std::move(frame))) {
+ frames_.push_back(std::move(f));
+ }
+ }
+
+ std::unique_ptr<RtpVp9RefFinder> ref_finder_;
+ FrameVector frames_;
+};
+
+Matcher<const FrameVector&> HasFrameWithIdAndRefs(int64_t frame_id,
+ std::vector<int64_t> refs) {
+ return MakeMatcher(new HasFrameMatcher(frame_id, refs));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofInsertOneFrame) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1);
+
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+
+ EXPECT_EQ(frames_.size(), 1UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_0) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer.
+
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+
+ EXPECT_EQ(frames_.size(), 2UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofSpatialLayers_2) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer.
+
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(2).SidAndTid(1, 0).Tl0(1).NotAsInterPic());
+ Insert(Frame().Pid(3).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).Tl0(2));
+
+ EXPECT_EQ(frames_.size(), 5UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_0) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer.
+
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(2).SidAndTid(1, 0).Tl0(1).NotAsInterPic());
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(3).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).Tl0(2));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(5).SidAndTid(1, 0).Tl0(4));
+ Insert(Frame().Pid(4).SidAndTid(1, 0).Tl0(3));
+ Insert(Frame().Pid(5).SidAndTid(0, 0).Tl0(4));
+
+ EXPECT_EQ(frames_.size(), 9UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {15}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(26, {21}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofSkipFramesTemporalLayers_01) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ // Skip GOF with tl0 1
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(5).SidAndTid(0, 1).Tl0(2));
+ // Skip GOF with tl0 3
+ // Skip GOF with tl0 4
+ Insert(Frame().Pid(10).SidAndTid(0, 0).Tl0(5).Gof(&ss));
+ Insert(Frame().Pid(11).SidAndTid(0, 1).Tl0(5));
+
+ ASSERT_EQ(6UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofSkipFramesTemporalLayers_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 02120212 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+
+ ASSERT_EQ(4UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+
+ // Skip frames with tl0 = 1
+
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+
+ // Now insert frames with tl0 = 1
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+
+ ASSERT_EQ(9UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {}));
+
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1));
+
+ ASSERT_EQ(12UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_01) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1));
+
+ ASSERT_EQ(4UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_01) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern
+
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 1).Tl0(2));
+ Insert(Frame().Pid(7).SidAndTid(0, 1).Tl0(3));
+ Insert(Frame().Pid(6).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(4));
+ Insert(Frame().Pid(9).SidAndTid(0, 1).Tl0(4));
+
+ ASSERT_EQ(10UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern
+
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2));
+
+ ASSERT_EQ(12UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersUpSwitch_02120212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode4); // 02120212 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1).AsUpswitch());
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsUpswitch());
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2).AsUpswitch());
+ Insert(Frame().Pid(12).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(13).SidAndTid(0, 2).Tl0(3));
+ Insert(Frame().Pid(14).SidAndTid(0, 1).Tl0(3));
+ Insert(Frame().Pid(15).SidAndTid(0, 2).Tl0(3));
+
+ ASSERT_EQ(16UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {5, 10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {15, 20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {10, 20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {45, 50}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(60, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(65, {55, 60}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(70, {50, 60}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(75, {65, 70}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersUpSwitchReordered_02120212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode4); // 02120212 pattern
+
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1).AsUpswitch());
+ Insert(Frame().Pid(12).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsUpswitch());
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2).AsUpswitch());
+ Insert(Frame().Pid(13).SidAndTid(0, 2).Tl0(3));
+ Insert(Frame().Pid(15).SidAndTid(0, 2).Tl0(3));
+ Insert(Frame().Pid(14).SidAndTid(0, 1).Tl0(3));
+
+ ASSERT_EQ(16UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {5, 10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {15, 20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {10, 20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {45, 50}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(60, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(65, {55, 60}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(70, {50, 60}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(75, {65, 70}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_01_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern
+
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(2));
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2).Gof(&ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(3));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(3));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(3));
+
+ ASSERT_EQ(12UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+}
+
+TEST_F(RtpVp9RefFinderTest, FlexibleModeOneFrame) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+
+ ASSERT_EQ(1UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+}
+
+TEST_F(RtpVp9RefFinderTest, FlexibleModeTwoSpatialLayers) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+ Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame().AsInterLayer());
+ Insert(Frame().Pid(1).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).FlexRefs({2}));
+ Insert(Frame().Pid(2).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).FlexRefs({2}));
+ Insert(Frame().Pid(4).SidAndTid(1, 0).FlexRefs({1}));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {6}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16}));
+}
+
+TEST_F(RtpVp9RefFinderTest, FlexibleModeTwoSpatialLayersReordered) {
+ Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame().AsInterLayer());
+ Insert(Frame().Pid(1).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+ Insert(Frame().Pid(2).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).FlexRefs({2}));
+ Insert(Frame().Pid(4).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).FlexRefs({2}));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {6}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16}));
+}
+
+TEST_F(RtpVp9RefFinderTest, WrappingFlexReference) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).FlexRefs({1}));
+
+ ASSERT_EQ(1UL, frames_.size());
+ const EncodedFrame& frame = *frames_[0];
+
+ ASSERT_EQ(frame.Id() - frame.references[0], 5);
+}
+
+TEST_F(RtpVp9RefFinderTest, GofPidJump) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3);
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1000).SidAndTid(0, 0).Tl0(1));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTl0Jump) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3);
+
+ Insert(Frame()
+ .Pid(0)
+ .SidAndTid(0, 0)
+ .Tl0(125)
+ .AsUpswitch()
+ .AsKeyFrame()
+ .NotAsInterPic()
+ .Gof(&ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).Gof(&ss));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTidTooHigh) {
+ const int kMaxTemporalLayers = 5;
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2);
+ ss.temporal_idx[1] = kMaxTemporalLayers;
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(1));
+
+ ASSERT_EQ(1UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofZeroFrames) {
+ GofInfoVP9 ss;
+ ss.num_frames_in_gof = 0;
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(1));
+
+ ASSERT_EQ(2UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+}
+
+TEST_F(RtpVp9RefFinderTest, SpatialIndex) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+ Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame());
+ Insert(Frame().Pid(0).SidAndTid(2, 0).AsKeyFrame());
+
+ ASSERT_EQ(3UL, frames_.size());
+ EXPECT_THAT(frames_,
+ Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 0))));
+ EXPECT_THAT(frames_,
+ Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 1))));
+ EXPECT_THAT(frames_,
+ Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 2))));
+}
+
+TEST_F(RtpVp9RefFinderTest, StashedFramesDoNotWrapTl0Backwards) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1);
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0));
+ EXPECT_THAT(frames_, SizeIs(0));
+
+ Insert(Frame().Pid(128).SidAndTid(0, 0).Tl0(128).AsKeyFrame().Gof(&ss));
+ EXPECT_THAT(frames_, SizeIs(1));
+ Insert(Frame().Pid(129).SidAndTid(0, 0).Tl0(129));
+ EXPECT_THAT(frames_, SizeIs(2));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/session_info.cc b/third_party/libwebrtc/modules/video_coding/session_info.cc
new file mode 100644
index 0000000000..2aa6111629
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/session_info.cc
@@ -0,0 +1,537 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/session_info.h"
+
+#include <string.h>
+
+#include <vector>
+
+#include "absl/types/variant.h"
+#include "modules/include/module_common_types.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+uint16_t BufferToUWord16(const uint8_t* dataBuffer) {
+ return (dataBuffer[0] << 8) | dataBuffer[1];
+}
+
+} // namespace
+
+VCMSessionInfo::VCMSessionInfo()
+ : complete_(false),
+ frame_type_(VideoFrameType::kVideoFrameDelta),
+ packets_(),
+ empty_seq_num_low_(-1),
+ empty_seq_num_high_(-1),
+ first_packet_seq_num_(-1),
+ last_packet_seq_num_(-1) {}
+
+VCMSessionInfo::~VCMSessionInfo() {}
+
+void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
+ const uint8_t* new_base_ptr) {
+ for (PacketIterator it = packets_.begin(); it != packets_.end(); ++it)
+ if ((*it).dataPtr != NULL) {
+ RTC_DCHECK(old_base_ptr != NULL && new_base_ptr != NULL);
+ (*it).dataPtr = new_base_ptr + ((*it).dataPtr - old_base_ptr);
+ }
+}
+
+int VCMSessionInfo::LowSequenceNumber() const {
+ if (packets_.empty())
+ return empty_seq_num_low_;
+ return packets_.front().seqNum;
+}
+
+int VCMSessionInfo::HighSequenceNumber() const {
+ if (packets_.empty())
+ return empty_seq_num_high_;
+ if (empty_seq_num_high_ == -1)
+ return packets_.back().seqNum;
+ return LatestSequenceNumber(packets_.back().seqNum, empty_seq_num_high_);
+}
+
+int VCMSessionInfo::PictureId() const {
+ if (packets_.empty())
+ return kNoPictureId;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .pictureId;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .picture_id;
+ } else {
+ return kNoPictureId;
+ }
+}
+
+int VCMSessionInfo::TemporalId() const {
+ if (packets_.empty())
+ return kNoTemporalIdx;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .temporalIdx;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .temporal_idx;
+ } else {
+ return kNoTemporalIdx;
+ }
+}
+
+bool VCMSessionInfo::LayerSync() const {
+ if (packets_.empty())
+ return false;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .layerSync;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .temporal_up_switch;
+ } else {
+ return false;
+ }
+}
+
+int VCMSessionInfo::Tl0PicId() const {
+ if (packets_.empty())
+ return kNoTl0PicIdx;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .tl0PicIdx;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .tl0_pic_idx;
+ } else {
+ return kNoTl0PicIdx;
+ }
+}
+
+std::vector<NaluInfo> VCMSessionInfo::GetNaluInfos() const {
+ if (packets_.empty() ||
+ packets_.front().video_header.codec != kVideoCodecH264)
+ return std::vector<NaluInfo>();
+ std::vector<NaluInfo> nalu_infos;
+ for (const VCMPacket& packet : packets_) {
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ for (size_t i = 0; i < h264.nalus_length; ++i) {
+ nalu_infos.push_back(h264.nalus[i]);
+ }
+ }
+ return nalu_infos;
+}
+
+void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ if (packets_.empty())
+ return;
+
+ auto* vp9_header = absl::get_if<RTPVideoHeaderVP9>(
+ &packets_.front().video_header.video_type_header);
+ if (!vp9_header || vp9_header->flexible_mode)
+ return;
+
+ vp9_header->temporal_idx = gof_info.temporal_idx[idx];
+ vp9_header->temporal_up_switch = gof_info.temporal_up_switch[idx];
+ vp9_header->num_ref_pics = gof_info.num_ref_pics[idx];
+ for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
+ vp9_header->pid_diff[i] = gof_info.pid_diff[idx][i];
+ }
+}
+
+void VCMSessionInfo::Reset() {
+ complete_ = false;
+ frame_type_ = VideoFrameType::kVideoFrameDelta;
+ packets_.clear();
+ empty_seq_num_low_ = -1;
+ empty_seq_num_high_ = -1;
+ first_packet_seq_num_ = -1;
+ last_packet_seq_num_ = -1;
+}
+
+size_t VCMSessionInfo::SessionLength() const {
+ size_t length = 0;
+ for (PacketIteratorConst it = packets_.begin(); it != packets_.end(); ++it)
+ length += (*it).sizeBytes;
+ return length;
+}
+
+int VCMSessionInfo::NumPackets() const {
+ return packets_.size();
+}
+
+size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
+ PacketIterator packet_it) {
+ VCMPacket& packet = *packet_it;
+ PacketIterator it;
+
+ // Calculate the offset into the frame buffer for this packet.
+ size_t offset = 0;
+ for (it = packets_.begin(); it != packet_it; ++it)
+ offset += (*it).sizeBytes;
+
+ // Set the data pointer to pointing to the start of this packet in the
+ // frame buffer.
+ const uint8_t* packet_buffer = packet.dataPtr;
+ packet.dataPtr = frame_buffer + offset;
+
+ // We handle H.264 STAP-A packets in a special way as we need to remove the
+ // two length bytes between each NAL unit, and potentially add start codes.
+ // TODO(pbos): Remove H264 parsing from this step and use a fragmentation
+ // header supplied by the H264 depacketizer.
+ const size_t kH264NALHeaderLengthInBytes = 1;
+ const size_t kLengthFieldLength = 2;
+ const auto* h264 =
+ absl::get_if<RTPVideoHeaderH264>(&packet.video_header.video_type_header);
+ if (h264 && h264->packetization_type == kH264StapA) {
+ size_t required_length = 0;
+ const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
+ // Must check that incoming data length doesn't extend past end of buffer.
+ // We allow for 100 bytes of expansion due to startcodes being longer than
+ // length fields.
+ while (nalu_ptr + kLengthFieldLength <= packet_buffer + packet.sizeBytes) {
+ size_t length = BufferToUWord16(nalu_ptr);
+ if (nalu_ptr + kLengthFieldLength + length <= packet_buffer + packet.sizeBytes) {
+ required_length +=
+ length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ nalu_ptr += kLengthFieldLength + length;
+ } else {
+ // Something is very wrong!
+ RTC_LOG(LS_ERROR) << "Failed to insert packet due to corrupt H264 STAP-A";
+ return 0;
+ }
+ }
+ ShiftSubsequentPackets(packet_it, required_length);
+ nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
+ uint8_t* frame_buffer_ptr = frame_buffer + offset;
+ // we already know we won't go past end-of-buffer
+ while (nalu_ptr + kLengthFieldLength <= packet_buffer + packet.sizeBytes) {
+ size_t length = BufferToUWord16(nalu_ptr);
+ nalu_ptr += kLengthFieldLength;
+ frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
+ const_cast<uint8_t*>(frame_buffer_ptr));
+ nalu_ptr += length;
+ }
+ packet.sizeBytes = required_length;
+ return packet.sizeBytes;
+ }
+ ShiftSubsequentPackets(
+ packet_it, packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
+
+ packet.sizeBytes =
+ Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
+ const_cast<uint8_t*>(packet.dataPtr));
+ return packet.sizeBytes;
+}
+
+size_t VCMSessionInfo::Insert(const uint8_t* buffer,
+ size_t length,
+ bool insert_start_code,
+ uint8_t* frame_buffer) {
+ if (insert_start_code) {
+ const unsigned char startCode[] = {0, 0, 0, 1};
+ memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
+ }
+ memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
+ buffer, length);
+ length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
+
+ return length;
+}
+
+void VCMSessionInfo::ShiftSubsequentPackets(PacketIterator it,
+ int steps_to_shift) {
+ ++it;
+ if (it == packets_.end())
+ return;
+ uint8_t* first_packet_ptr = const_cast<uint8_t*>((*it).dataPtr);
+ int shift_length = 0;
+ // Calculate the total move length and move the data pointers in advance.
+ for (; it != packets_.end(); ++it) {
+ shift_length += (*it).sizeBytes;
+ if ((*it).dataPtr != NULL)
+ (*it).dataPtr += steps_to_shift;
+ }
+ memmove(first_packet_ptr + steps_to_shift, first_packet_ptr, shift_length);
+}
+
+void VCMSessionInfo::UpdateCompleteSession() {
+ if (HaveFirstPacket() && HaveLastPacket()) {
+ // Do we have all the packets in this session?
+ bool complete_session = true;
+ PacketIterator it = packets_.begin();
+ PacketIterator prev_it = it;
+ ++it;
+ for (; it != packets_.end(); ++it) {
+ if (!InSequence(it, prev_it)) {
+ complete_session = false;
+ break;
+ }
+ prev_it = it;
+ }
+ complete_ = complete_session;
+ }
+}
+
+bool VCMSessionInfo::complete() const {
+ return complete_;
+}
+
+// Find the end of the NAL unit which the packet pointed to by `packet_it`
+// belongs to. Returns an iterator to the last packet of the frame if the end
+// of the NAL unit wasn't found.
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
+ PacketIterator packet_it) const {
+ if ((*packet_it).completeNALU == kNaluEnd ||
+ (*packet_it).completeNALU == kNaluComplete) {
+ return packet_it;
+ }
+ // Find the end of the NAL unit.
+ for (; packet_it != packets_.end(); ++packet_it) {
+ if (((*packet_it).completeNALU == kNaluComplete &&
+ (*packet_it).sizeBytes > 0) ||
+ // Found next NALU.
+ (*packet_it).completeNALU == kNaluStart)
+ return --packet_it;
+ if ((*packet_it).completeNALU == kNaluEnd)
+ return packet_it;
+ }
+ // The end wasn't found.
+ return --packet_it;
+}
+
+size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
+ PacketIterator end) {
+ size_t bytes_to_delete = 0; // The number of bytes to delete.
+ PacketIterator packet_after_end = end;
+ ++packet_after_end;
+
+ // Get the number of bytes to delete.
+ // Clear the size of these packets.
+ for (PacketIterator it = start; it != packet_after_end; ++it) {
+ bytes_to_delete += (*it).sizeBytes;
+ (*it).sizeBytes = 0;
+ (*it).dataPtr = NULL;
+ }
+ if (bytes_to_delete > 0)
+ ShiftSubsequentPackets(end, -static_cast<int>(bytes_to_delete));
+ return bytes_to_delete;
+}
+
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
+ PacketIterator it) const {
+ while (it != packets_.end()) {
+ if (absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .beginningOfPartition) {
+ return it;
+ }
+ ++it;
+ }
+ return it;
+}
+
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
+ PacketIterator it) const {
+ RTC_DCHECK_EQ((*it).codec(), kVideoCodecVP8);
+ PacketIterator prev_it = it;
+ const int partition_id =
+ absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .partitionId;
+ while (it != packets_.end()) {
+ bool beginning =
+ absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .beginningOfPartition;
+ int current_partition_id =
+ absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .partitionId;
+ bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
+ if (packet_loss_found ||
+ (beginning && current_partition_id != partition_id)) {
+ // Missing packet, the previous packet was the last in sequence.
+ return prev_it;
+ }
+ prev_it = it;
+ ++it;
+ }
+ return prev_it;
+}
+
+bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
+ const PacketIterator& prev_packet_it) {
+ // If the two iterators are pointing to the same packet they are considered
+ // to be in sequence.
+ return (packet_it == prev_packet_it ||
+ (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
+ (*packet_it).seqNum));
+}
+
+size_t VCMSessionInfo::MakeDecodable() {
+ size_t return_length = 0;
+ if (packets_.empty()) {
+ return 0;
+ }
+ PacketIterator it = packets_.begin();
+ // Make sure we remove the first NAL unit if it's not decodable.
+ if ((*it).completeNALU == kNaluIncomplete || (*it).completeNALU == kNaluEnd) {
+ PacketIterator nalu_end = FindNaluEnd(it);
+ return_length += DeletePacketData(it, nalu_end);
+ it = nalu_end;
+ }
+ PacketIterator prev_it = it;
+ // Take care of the rest of the NAL units.
+ for (; it != packets_.end(); ++it) {
+ bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
+ (*it).completeNALU == kNaluComplete);
+ if (!start_of_nalu && !InSequence(it, prev_it)) {
+ // Found a sequence number gap due to packet loss.
+ PacketIterator nalu_end = FindNaluEnd(it);
+ return_length += DeletePacketData(it, nalu_end);
+ it = nalu_end;
+ }
+ prev_it = it;
+ }
+ return return_length;
+}
+
+bool VCMSessionInfo::HaveFirstPacket() const {
+ return !packets_.empty() && (first_packet_seq_num_ != -1);
+}
+
+bool VCMSessionInfo::HaveLastPacket() const {
+ return !packets_.empty() && (last_packet_seq_num_ != -1);
+}
+
+int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
+ uint8_t* frame_buffer,
+ const FrameData& frame_data) {
+ if (packet.video_header.frame_type == VideoFrameType::kEmptyFrame) {
+ // Update sequence number of an empty packet.
+ // Only media packets are inserted into the packet list.
+ InformOfEmptyPacket(packet.seqNum);
+ return 0;
+ }
+
+ if (packets_.size() == kMaxPacketsInSession) {
+ RTC_LOG(LS_ERROR) << "Max number of packets per frame has been reached.";
+ return -1;
+ }
+
+ // Find the position of this packet in the packet list in sequence number
+ // order and insert it. Loop over the list in reverse order.
+ ReversePacketIterator rit = packets_.rbegin();
+ for (; rit != packets_.rend(); ++rit)
+ if (LatestSequenceNumber(packet.seqNum, (*rit).seqNum) == packet.seqNum)
+ break;
+
+ // Check for duplicate packets.
+ if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
+ (*rit).sizeBytes > 0)
+ return -2;
+
+ if (packet.codec() == kVideoCodecH264) {
+ // H.264 can have leading or trailing non-VCL (Video Coding Layer)
+ // NALUs, such as SPS/PPS/SEI and others. Also, the RTP marker bit is
+ // not reliable for the last packet of a frame (RFC 6184 5.1 - "Decoders
+ // [] MUST NOT rely on this property"), so allow out-of-order packets to
+ // update the first and last seq# range. Also mark as a key frame if
+ // any packet is of that type.
+ if (frame_type_ != VideoFrameType::kVideoFrameKey) {
+ frame_type_ = packet.video_header.frame_type;
+ }
+ if (packet.is_first_packet_in_frame() &&
+ (first_packet_seq_num_ == -1 ||
+ IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum))) {
+ first_packet_seq_num_ = packet.seqNum;
+ }
+ // Note: the code does *not* currently handle the Marker bit being totally
+ // absent from a frame. It does not, however, depend on it being on the last
+ // packet of the 'frame'/'session'.
+ if (packet.markerBit &&
+ (last_packet_seq_num_ == -1 ||
+ IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_))) {
+ last_packet_seq_num_ = packet.seqNum;
+ }
+ } else {
+ // Only insert media packets between first and last packets (when
+ // available).
+ // Placing check here, as to properly account for duplicate packets.
+ // Check if this is first packet (only valid for some codecs)
+ // Should only be set for one packet per session.
+ if (packet.is_first_packet_in_frame() && first_packet_seq_num_ == -1) {
+ // The first packet in a frame signals the frame type.
+ frame_type_ = packet.video_header.frame_type;
+ // Store the sequence number for the first packet.
+ first_packet_seq_num_ = static_cast<int>(packet.seqNum);
+ } else if (first_packet_seq_num_ != -1 &&
+ IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum)) {
+ RTC_LOG(LS_WARNING)
+ << "Received packet with a sequence number which is out "
+ "of frame boundaries";
+ return -3;
+ } else if (frame_type_ == VideoFrameType::kEmptyFrame &&
+ packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
+ // Update the frame type with the type of the first media packet.
+ // TODO(mikhal): Can this trigger?
+ frame_type_ = packet.video_header.frame_type;
+ }
+
+ // Track the marker bit, should only be set for one packet per session.
+ if (packet.markerBit && last_packet_seq_num_ == -1) {
+ last_packet_seq_num_ = static_cast<int>(packet.seqNum);
+ } else if (last_packet_seq_num_ != -1 &&
+ IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
+ RTC_LOG(LS_WARNING)
+ << "Received packet with a sequence number which is out "
+ "of frame boundaries";
+ return -3;
+ }
+ }
+
+ // The insert operation invalidates the iterator `rit`.
+ PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
+
+ size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
+ UpdateCompleteSession();
+ return static_cast<int>(returnLength);
+}
+
+void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
+ // Empty packets may be FEC or filler packets. They are sequential and
+ // follow the data packets, therefore, we should only keep track of the high
+ // and low sequence numbers and may assume that the packets in between are
+ // empty packets belonging to the same frame (timestamp).
+ if (empty_seq_num_high_ == -1)
+ empty_seq_num_high_ = seq_num;
+ else
+ empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
+ if (empty_seq_num_low_ == -1 ||
+ IsNewerSequenceNumber(empty_seq_num_low_, seq_num))
+ empty_seq_num_low_ = seq_num;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/session_info.h b/third_party/libwebrtc/modules/video_coding/session_info.h
new file mode 100644
index 0000000000..6079dbbb72
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/session_info.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_SESSION_INFO_H_
+#define MODULES_VIDEO_CODING_SESSION_INFO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <vector>
+
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/packet.h"
+
+namespace webrtc {
+// Used to pass data from jitter buffer to session info.
+// This data is then used in determining whether a frame is decodable.
+struct FrameData {
+ int64_t rtt_ms;
+ float rolling_average_packets_per_frame;
+};
+
+class VCMSessionInfo {
+ public:
+ VCMSessionInfo();
+ ~VCMSessionInfo();
+
+ void UpdateDataPointers(const uint8_t* old_base_ptr,
+ const uint8_t* new_base_ptr);
+ void Reset();
+ int InsertPacket(const VCMPacket& packet,
+ uint8_t* frame_buffer,
+ const FrameData& frame_data);
+ bool complete() const;
+
+ // Makes the frame decodable. I.e., only contain decodable NALUs. All
+ // non-decodable NALUs will be deleted and packets will be moved to in
+ // memory to remove any empty space.
+ // Returns the number of bytes deleted from the session.
+ size_t MakeDecodable();
+
+ size_t SessionLength() const;
+ int NumPackets() const;
+ bool HaveFirstPacket() const;
+ bool HaveLastPacket() const;
+ webrtc::VideoFrameType FrameType() const { return frame_type_; }
+ int LowSequenceNumber() const;
+
+ // Returns highest sequence number, media or empty.
+ int HighSequenceNumber() const;
+ int PictureId() const;
+ int TemporalId() const;
+ bool LayerSync() const;
+ int Tl0PicId() const;
+
+ std::vector<NaluInfo> GetNaluInfos() const;
+
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
+ private:
+ enum { kMaxVP8Partitions = 9 };
+
+ typedef std::list<VCMPacket> PacketList;
+ typedef PacketList::iterator PacketIterator;
+ typedef PacketList::const_iterator PacketIteratorConst;
+ typedef PacketList::reverse_iterator ReversePacketIterator;
+
+ void InformOfEmptyPacket(uint16_t seq_num);
+
+ // Finds the packet of the beginning of the next VP8 partition. If
+ // none is found the returned iterator points to `packets_.end()`.
+ // `it` is expected to point to the last packet of the previous partition,
+ // or to the first packet of the frame. `packets_skipped` is incremented
+ // for each packet found which doesn't have the beginning bit set.
+ PacketIterator FindNextPartitionBeginning(PacketIterator it) const;
+
+ // Returns an iterator pointing to the last packet of the partition pointed to
+ // by `it`.
+ PacketIterator FindPartitionEnd(PacketIterator it) const;
+ static bool InSequence(const PacketIterator& it,
+ const PacketIterator& prev_it);
+ size_t InsertBuffer(uint8_t* frame_buffer, PacketIterator packetIterator);
+ size_t Insert(const uint8_t* buffer,
+ size_t length,
+ bool insert_start_code,
+ uint8_t* frame_buffer);
+ void ShiftSubsequentPackets(PacketIterator it, int steps_to_shift);
+ PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
+ // Deletes the data of all packets between `start` and `end`, inclusively.
+ // Note that this function doesn't delete the actual packets.
+ size_t DeletePacketData(PacketIterator start, PacketIterator end);
+ void UpdateCompleteSession();
+
+ bool complete_;
+ webrtc::VideoFrameType frame_type_;
+ // Packets in this frame.
+ PacketList packets_;
+ int empty_seq_num_low_;
+ int empty_seq_num_high_;
+
+ // The following two variables correspond to the first and last media packets
+ // in a session defined by the first packet flag and the marker bit.
+ // They are not necessarily equal to the front and back packets, as packets
+ // may enter out of order.
+ // TODO(mikhal): Refactor the list to use a map.
+ int first_packet_seq_num_;
+ int last_packet_seq_num_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SESSION_INFO_H_
diff --git a/third_party/libwebrtc/modules/video_coding/session_info_unittest.cc b/third_party/libwebrtc/modules/video_coding/session_info_unittest.cc
new file mode 100644
index 0000000000..867997701d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/session_info_unittest.cc
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/session_info.h"
+
+#include <string.h>
+
+#include "modules/video_coding/packet.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class TestSessionInfo : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ memset(packet_buffer_, 0, sizeof(packet_buffer_));
+ memset(frame_buffer_, 0, sizeof(frame_buffer_));
+ session_.Reset();
+ packet_.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_.sizeBytes = packet_buffer_size();
+ packet_.dataPtr = packet_buffer_;
+ packet_.seqNum = 0;
+ packet_.timestamp = 0;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ }
+
+ void FillPacket(uint8_t start_value) {
+ for (size_t i = 0; i < packet_buffer_size(); ++i)
+ packet_buffer_[i] = start_value + i;
+ }
+
+ void VerifyPacket(uint8_t* start_ptr, uint8_t start_value) {
+ for (size_t j = 0; j < packet_buffer_size(); ++j) {
+ ASSERT_EQ(start_value + j, start_ptr[j]);
+ }
+ }
+
+ size_t packet_buffer_size() const {
+ return sizeof(packet_buffer_) / sizeof(packet_buffer_[0]);
+ }
+ size_t frame_buffer_size() const {
+ return sizeof(frame_buffer_) / sizeof(frame_buffer_[0]);
+ }
+
+ enum { kPacketBufferSize = 10 };
+
+ uint8_t packet_buffer_[kPacketBufferSize];
+ uint8_t frame_buffer_[10 * kPacketBufferSize];
+
+ VCMSessionInfo session_;
+ VCMPacket packet_;
+ FrameData frame_data;
+};
+
+class TestNalUnits : public TestSessionInfo {
+ protected:
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ packet_.video_header.codec = kVideoCodecVP8;
+ }
+
+ bool VerifyNalu(int offset, int packets_expected, int start_value) {
+ EXPECT_GE(session_.SessionLength(),
+ packets_expected * packet_buffer_size());
+ for (int i = 0; i < packets_expected; ++i) {
+ int packet_index = (offset + i) * packet_buffer_size();
+ VerifyPacket(frame_buffer_ + packet_index, start_value + i);
+ }
+ return true;
+ }
+};
+
+class TestNackList : public TestSessionInfo {
+ protected:
+ static const size_t kMaxSeqNumListLength = 30;
+
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ seq_num_list_length_ = 0;
+ memset(seq_num_list_, 0, sizeof(seq_num_list_));
+ }
+
+ void BuildSeqNumList(uint16_t low, uint16_t high) {
+ size_t i = 0;
+ while (low != high + 1) {
+ EXPECT_LT(i, kMaxSeqNumListLength);
+ if (i >= kMaxSeqNumListLength) {
+ seq_num_list_length_ = kMaxSeqNumListLength;
+ return;
+ }
+ seq_num_list_[i] = low;
+ low++;
+ i++;
+ }
+ seq_num_list_length_ = i;
+ }
+
+ void VerifyAll(int value) {
+ for (int i = 0; i < seq_num_list_length_; ++i)
+ EXPECT_EQ(seq_num_list_[i], value);
+ }
+
+ int seq_num_list_[kMaxSeqNumListLength];
+ int seq_num_list_length_;
+};
+
+TEST_F(TestSessionInfo, TestSimpleAPIs) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.seqNum = 0xFFFE;
+ packet_.sizeBytes = packet_buffer_size();
+ packet_.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ EXPECT_FALSE(session_.HaveLastPacket());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, session_.FrameType());
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ packet_.seqNum += 1;
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ EXPECT_TRUE(session_.HaveLastPacket());
+ EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
+ EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
+
+ // Insert empty packet which will be the new high sequence number.
+ // To make things more difficult we will make sure to have a wrap here.
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ packet_.seqNum = 2;
+ packet_.sizeBytes = 0;
+ packet_.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
+}
+
+TEST_F(TestSessionInfo, NormalOperation) {
+ packet_.seqNum = 0xFFFF;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ for (int i = 1; i < 9; ++i) {
+ packet_.seqNum += 1;
+ FillPacket(i);
+ ASSERT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ }
+
+ packet_.seqNum += 1;
+ packet_.markerBit = true;
+ FillPacket(9);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(10 * packet_buffer_size(), session_.SessionLength());
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE("Calling VerifyPacket");
+ VerifyPacket(frame_buffer_ + i * packet_buffer_size(), i);
+ }
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
+ packet_.seqNum = 0x0001;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0000;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, SetMarkerBitOnce) {
+ packet_.seqNum = 0x0005;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ ++packet_.seqNum;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
+ // Allow packets in the range 5-6.
+ packet_.seqNum = 0x0005;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ // Insert an older packet with a first packet set.
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0006;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0008;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
+ packet_.seqNum = 0xFFFE;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0002;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ ASSERT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0xFFF0;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0006;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
+ // Insert out of bound regular packets, and then the first and last packet.
+ // Verify that correct bounds are maintained.
+ packet_.seqNum = 0x0003;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ // Insert an older packet with a first packet set.
+ packet_.seqNum = 0x0005;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0010;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0008;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum = 0x0009;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluComplete;
+ packet_.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ packet_.sizeBytes = 0;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(2 * packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(1, 1, 2));
+}
+
+TEST_F(TestNalUnits, LossInMiddleOfNalu) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.seqNum += 2;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, ReorderWrapNoLoss) {
+ packet_.seqNum = 0xFFFF;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.seqNum += 1;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum -= 1;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(3 * packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, WrapLosses) {
+ packet_.seqNum = 0xFFFF;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+TEST_F(TestNalUnits, ReorderWrapLosses) {
+ packet_.seqNum = 0xFFFF;
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum -= 2;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/BUILD.gn b/third_party/libwebrtc/modules/video_coding/svc/BUILD.gn
new file mode 100644
index 0000000000..b8ce91d99a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/BUILD.gn
@@ -0,0 +1,135 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_source_set("scalability_mode_util") {
+ sources = [
+ "scalability_mode_util.cc",
+ "scalability_mode_util.h",
+ ]
+ deps = [
+ "../../../api/video_codecs:scalability_mode",
+ "../../../api/video_codecs:video_codecs_api",
+ "../../../rtc_base:checks",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("scalable_video_controller") {
+ sources = [
+ "scalable_video_controller.h",
+ "scalable_video_controller_no_layering.cc",
+ "scalable_video_controller_no_layering.h",
+ ]
+ deps = [
+ "../../../api/transport/rtp:dependency_descriptor",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../common_video/generic_frame_descriptor",
+ "../../../rtc_base:checks",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("scalability_structures") {
+ sources = [
+ "create_scalability_structure.cc",
+ "create_scalability_structure.h",
+ "scalability_structure_full_svc.cc",
+ "scalability_structure_full_svc.h",
+ "scalability_structure_key_svc.cc",
+ "scalability_structure_key_svc.h",
+ "scalability_structure_l2t2_key_shift.cc",
+ "scalability_structure_l2t2_key_shift.h",
+ "scalability_structure_simulcast.cc",
+ "scalability_structure_simulcast.h",
+ ]
+ deps = [
+ ":scalable_video_controller",
+ "../../../api/transport/rtp:dependency_descriptor",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../api/video_codecs:scalability_mode",
+ "../../../common_video/generic_frame_descriptor",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("svc_rate_allocator") {
+ sources = [
+ "svc_rate_allocator.cc",
+ "svc_rate_allocator.h",
+ ]
+ deps = [
+ ":scalability_structures",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../api/video:video_bitrate_allocator",
+ "../../../api/video:video_codec_constants",
+ "../../../api/video_codecs:video_codecs_api",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/experiments:stable_target_rate_experiment",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+if (rtc_include_tests) {
+ rtc_source_set("scalability_structure_tests") {
+ testonly = true
+ sources = [
+ "scalability_mode_util_unittest.cc",
+ "scalability_structure_full_svc_unittest.cc",
+ "scalability_structure_key_svc_unittest.cc",
+ "scalability_structure_l2t2_key_shift_unittest.cc",
+ "scalability_structure_test_helpers.cc",
+ "scalability_structure_test_helpers.h",
+ "scalability_structure_unittest.cc",
+ ]
+ deps = [
+ ":scalability_mode_util",
+ ":scalability_structures",
+ ":scalable_video_controller",
+ "..:chain_diff_calculator",
+ "..:frame_dependencies_calculator",
+ "../../../api:array_view",
+ "../../../api/transport/rtp:dependency_descriptor",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../api/video:video_frame_type",
+ "../../../api/video_codecs:scalability_mode",
+ "../../../common_video/generic_frame_descriptor",
+ "../../../rtc_base:stringutils",
+ "../../../test:test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ rtc_source_set("svc_rate_allocator_tests") {
+ testonly = true
+ sources = [ "svc_rate_allocator_unittest.cc" ]
+ deps = [
+ ":svc_rate_allocator",
+ "..:webrtc_vp9_helpers",
+ "../../../rtc_base:checks",
+ "../../../test:field_trial",
+ "../../../test:test_support",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc
new file mode 100644
index 0000000000..d5a85e0e3c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/create_scalability_structure.h"
+
+#include <memory>
+
+#include "api/video_codecs/scalability_mode.h"
+#include "modules/video_coding/svc/scalability_structure_full_svc.h"
+#include "modules/video_coding/svc/scalability_structure_key_svc.h"
+#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h"
+#include "modules/video_coding/svc/scalability_structure_simulcast.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+struct NamedStructureFactory {
+ ScalabilityMode name;
+ // Use function pointer to make NamedStructureFactory trivally destructable.
+ std::unique_ptr<ScalableVideoController> (*factory)();
+ ScalableVideoController::StreamLayersConfig config;
+};
+
+// Wrap std::make_unique function to have correct return type.
+template <typename T>
+std::unique_ptr<ScalableVideoController> Create() {
+ return std::make_unique<T>();
+}
+
+template <typename T>
+std::unique_ptr<ScalableVideoController> CreateH() {
+ // 1.5:1 scaling, see https://w3c.github.io/webrtc-svc/#scalabilitymodes*
+ typename T::ScalingFactor factor;
+ factor.num = 2;
+ factor.den = 3;
+ return std::make_unique<T>(factor);
+}
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL1T1 = {
+ /*num_spatial_layers=*/1, /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL1T2 = {
+ /*num_spatial_layers=*/1, /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/false};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL1T3 = {
+ /*num_spatial_layers=*/1, /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T1 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T1h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T2 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/true,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T3 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/true,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T1 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T3 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/true,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T1 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T3 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T3 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr NamedStructureFactory kFactories[] = {
+ {ScalabilityMode::kL1T1, Create<ScalableVideoControllerNoLayering>,
+ kConfigL1T1},
+ {ScalabilityMode::kL1T2, Create<ScalabilityStructureL1T2>, kConfigL1T2},
+ {ScalabilityMode::kL1T3, Create<ScalabilityStructureL1T3>, kConfigL1T3},
+ {ScalabilityMode::kL2T1, Create<ScalabilityStructureL2T1>, kConfigL2T1},
+ {ScalabilityMode::kL2T1h, CreateH<ScalabilityStructureL2T1>, kConfigL2T1h},
+ {ScalabilityMode::kL2T1_KEY, Create<ScalabilityStructureL2T1Key>,
+ kConfigL2T1},
+ {ScalabilityMode::kL2T2, Create<ScalabilityStructureL2T2>, kConfigL2T2},
+ {ScalabilityMode::kL2T2_KEY, Create<ScalabilityStructureL2T2Key>,
+ kConfigL2T2},
+ {ScalabilityMode::kL2T2_KEY_SHIFT, Create<ScalabilityStructureL2T2KeyShift>,
+ kConfigL2T2},
+ {ScalabilityMode::kL2T3, Create<ScalabilityStructureL2T3>, kConfigL2T3},
+ {ScalabilityMode::kL2T3_KEY, Create<ScalabilityStructureL2T3Key>,
+ kConfigL2T3},
+ {ScalabilityMode::kL3T1, Create<ScalabilityStructureL3T1>, kConfigL3T1},
+ {ScalabilityMode::kL3T3, Create<ScalabilityStructureL3T3>, kConfigL3T3},
+ {ScalabilityMode::kL3T3_KEY, Create<ScalabilityStructureL3T3Key>,
+ kConfigL3T3},
+ {ScalabilityMode::kS2T1, Create<ScalabilityStructureS2T1>, kConfigS2T1},
+ {ScalabilityMode::kS2T3, Create<ScalabilityStructureS2T3>, kConfigS2T3},
+ {ScalabilityMode::kS3T3, Create<ScalabilityStructureS3T3>, kConfigS3T3},
+};
+
+} // namespace
+
+std::unique_ptr<ScalableVideoController> CreateScalabilityStructure(
+ ScalabilityMode name) {
+ for (const auto& entry : kFactories) {
+ if (entry.name == name) {
+ return entry.factory();
+ }
+ }
+ return nullptr;
+}
+
+absl::optional<ScalableVideoController::StreamLayersConfig>
+ScalabilityStructureConfig(ScalabilityMode name) {
+ for (const auto& entry : kFactories) {
+ if (entry.name == name) {
+ return entry.config;
+ }
+ }
+ return absl::nullopt;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h
new file mode 100644
index 0000000000..3b67443693
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_
+#define MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Creates a structure by name according to
+// https://w3c.github.io/webrtc-svc/#scalabilitymodes*
+// Returns nullptr for unknown name.
+std::unique_ptr<ScalableVideoController> CreateScalabilityStructure(
+ ScalabilityMode name);
+
+// Returns description of the scalability structure identified by 'name',
+// Return nullopt for unknown name.
+absl::optional<ScalableVideoController::StreamLayersConfig>
+ScalabilityStructureConfig(ScalabilityMode name);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc
new file mode 100644
index 0000000000..addcdd1342
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/scalability_mode_util.h"
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+absl::optional<ScalabilityMode> ScalabilityModeFromString(
+ absl::string_view mode_string) {
+ if (mode_string == "L1T1")
+ return ScalabilityMode::kL1T1;
+ if (mode_string == "L1T2")
+ return ScalabilityMode::kL1T2;
+ if (mode_string == "L1T2h")
+ return ScalabilityMode::kL1T2h;
+ if (mode_string == "L1T3")
+ return ScalabilityMode::kL1T3;
+ if (mode_string == "L1T3h")
+ return ScalabilityMode::kL1T3h;
+
+ if (mode_string == "L2T1")
+ return ScalabilityMode::kL2T1;
+ if (mode_string == "L2T1h")
+ return ScalabilityMode::kL2T1h;
+ if (mode_string == "L2T1_KEY")
+ return ScalabilityMode::kL2T1_KEY;
+
+ if (mode_string == "L2T2")
+ return ScalabilityMode::kL2T2;
+ if (mode_string == "L2T2h")
+ return ScalabilityMode::kL2T2h;
+ if (mode_string == "L2T2_KEY")
+ return ScalabilityMode::kL2T2_KEY;
+ if (mode_string == "L2T2_KEY_SHIFT")
+ return ScalabilityMode::kL2T2_KEY_SHIFT;
+ if (mode_string == "L2T3")
+ return ScalabilityMode::kL2T3;
+ if (mode_string == "L2T3h")
+ return ScalabilityMode::kL2T3h;
+ if (mode_string == "L2T3_KEY")
+ return ScalabilityMode::kL2T3_KEY;
+
+ if (mode_string == "L3T1")
+ return ScalabilityMode::kL3T1;
+ if (mode_string == "L3T1h")
+ return ScalabilityMode::kL3T1h;
+ if (mode_string == "L3T1_KEY")
+ return ScalabilityMode::kL3T1_KEY;
+
+ if (mode_string == "L3T2")
+ return ScalabilityMode::kL3T2;
+ if (mode_string == "L3T2h")
+ return ScalabilityMode::kL3T2h;
+ if (mode_string == "L3T2_KEY")
+ return ScalabilityMode::kL3T2_KEY;
+
+ if (mode_string == "L3T3")
+ return ScalabilityMode::kL3T3;
+ if (mode_string == "L3T3h")
+ return ScalabilityMode::kL3T3h;
+ if (mode_string == "L3T3_KEY")
+ return ScalabilityMode::kL3T3_KEY;
+
+ if (mode_string == "S2T1")
+ return ScalabilityMode::kS2T1;
+ if (mode_string == "S2T3")
+ return ScalabilityMode::kS2T3;
+ if (mode_string == "S3T3")
+ return ScalabilityMode::kS3T3;
+
+ return absl::nullopt;
+}
+
+InterLayerPredMode ScalabilityModeToInterLayerPredMode(
+ ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ case ScalabilityMode::kL1T2:
+ case ScalabilityMode::kL1T2h:
+ case ScalabilityMode::kL1T3:
+ case ScalabilityMode::kL1T3h:
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL2T1_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL2T3_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL3T1_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL3T2_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL3T3_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kS2T1:
+ case ScalabilityMode::kS2T3:
+ case ScalabilityMode::kS3T3:
+ return InterLayerPredMode::kOff;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+int ScalabilityModeToNumSpatialLayers(ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ case ScalabilityMode::kL1T2:
+ case ScalabilityMode::kL1T2h:
+ case ScalabilityMode::kL1T3:
+ case ScalabilityMode::kL1T3h:
+ return 1;
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1h:
+ case ScalabilityMode::kL2T1_KEY:
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2h:
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3h:
+ case ScalabilityMode::kL2T3_KEY:
+ return 2;
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1h:
+ case ScalabilityMode::kL3T1_KEY:
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2h:
+ case ScalabilityMode::kL3T2_KEY:
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3h:
+ case ScalabilityMode::kL3T3_KEY:
+ return 3;
+ case ScalabilityMode::kS2T1:
+ case ScalabilityMode::kS2T3:
+ return 2;
+ case ScalabilityMode::kS3T3:
+ return 3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+int ScalabilityModeToNumTemporalLayers(ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ return 1;
+ case ScalabilityMode::kL1T2:
+ case ScalabilityMode::kL1T2h:
+ return 2;
+ case ScalabilityMode::kL1T3:
+ case ScalabilityMode::kL1T3h:
+ return 3;
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1h:
+ case ScalabilityMode::kL2T1_KEY:
+ return 1;
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2h:
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ return 2;
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3h:
+ case ScalabilityMode::kL2T3_KEY:
+ return 3;
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1h:
+ case ScalabilityMode::kL3T1_KEY:
+ return 1;
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2h:
+ case ScalabilityMode::kL3T2_KEY:
+ return 2;
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3h:
+ case ScalabilityMode::kL3T3_KEY:
+ return 3;
+ case ScalabilityMode::kS2T1:
+ return 1;
+ case ScalabilityMode::kS2T3:
+ case ScalabilityMode::kS3T3:
+ return 3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h
new file mode 100644
index 0000000000..e9308bde33
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_MODE_UTIL_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_MODE_UTIL_H_
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+absl::optional<ScalabilityMode> ScalabilityModeFromString(
+ absl::string_view scalability_mode_string);
+
+InterLayerPredMode ScalabilityModeToInterLayerPredMode(
+ ScalabilityMode scalability_mode);
+
+int ScalabilityModeToNumSpatialLayers(ScalabilityMode scalability_mode);
+
+int ScalabilityModeToNumTemporalLayers(ScalabilityMode scalability_mode);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_MODE_UTIL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build
new file mode 100644
index 0000000000..2c545af9de
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("scalability_mode_util_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc
new file mode 100644
index 0000000000..7fb103631f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/scalability_mode_util.h"
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+TEST(ScalabilityModeUtil, ConvertsL1T2) {
+ EXPECT_EQ(ScalabilityModeFromString("L1T2"), ScalabilityMode::kL1T2);
+ EXPECT_EQ(ScalabilityModeToString(ScalabilityMode::kL1T2), "L1T2");
+}
+
+TEST(ScalabilityModeUtil, RejectsUnknownString) {
+ EXPECT_EQ(ScalabilityModeFromString(""), absl::nullopt);
+ EXPECT_EQ(ScalabilityModeFromString("not-a-mode"), absl::nullopt);
+}
+
+// Check roundtrip conversion of all enum values.
+TEST(ScalabilityModeUtil, ConvertsAllToAndFromString) {
+ const ScalabilityMode kLastEnum = ScalabilityMode::kS3T3;
+ for (int numerical_enum = 0; numerical_enum <= static_cast<int>(kLastEnum);
+ numerical_enum++) {
+ ScalabilityMode scalability_mode =
+ static_cast<ScalabilityMode>(numerical_enum);
+ absl::string_view scalability_mode_string =
+ ScalabilityModeToString(scalability_mode);
+ EXPECT_FALSE(scalability_mode_string.empty());
+ EXPECT_EQ(ScalabilityModeFromString(scalability_mode_string),
+ scalability_mode);
+ }
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc
new file mode 100644
index 0000000000..d73f167231
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_full_svc.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+constexpr int ScalabilityStructureFullSvc::kMaxNumSpatialLayers;
+constexpr int ScalabilityStructureFullSvc::kMaxNumTemporalLayers;
+constexpr absl::string_view ScalabilityStructureFullSvc::kFramePatternNames[];
+
+ScalabilityStructureFullSvc::ScalabilityStructureFullSvc(
+ int num_spatial_layers,
+ int num_temporal_layers,
+ ScalingFactor resolution_factor)
+ : num_spatial_layers_(num_spatial_layers),
+ num_temporal_layers_(num_temporal_layers),
+ resolution_factor_(resolution_factor),
+ active_decode_targets_(
+ (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) {
+ RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers);
+ RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
+}
+
+ScalabilityStructureFullSvc::~ScalabilityStructureFullSvc() = default;
+
+ScalabilityStructureFullSvc::StreamLayersConfig
+ScalabilityStructureFullSvc::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = num_spatial_layers_;
+ result.num_temporal_layers = num_temporal_layers_;
+ result.scaling_factor_num[num_spatial_layers_ - 1] = 1;
+ result.scaling_factor_den[num_spatial_layers_ - 1] = 1;
+ for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) {
+ result.scaling_factor_num[sid - 1] =
+ resolution_factor_.num * result.scaling_factor_num[sid];
+ result.scaling_factor_den[sid - 1] =
+ resolution_factor_.den * result.scaling_factor_den[sid];
+ }
+ result.uses_reference_scaling = num_spatial_layers_ > 1;
+ return result;
+}
+
+bool ScalabilityStructureFullSvc::TemporalLayerIsActive(int tid) const {
+ if (tid >= num_temporal_layers_) {
+ return false;
+ }
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (DecodeTargetIsActive(sid, tid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+DecodeTargetIndication ScalabilityStructureFullSvc::Dti(
+ int sid,
+ int tid,
+ const LayerFrameConfig& config) {
+ if (sid < config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (sid == config.SpatialId()) {
+ if (tid == 0) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return DecodeTargetIndication::kSwitch;
+ }
+ if (tid == config.TemporalId()) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ if (tid > config.TemporalId()) {
+ RTC_DCHECK_GT(tid, config.TemporalId());
+ return DecodeTargetIndication::kSwitch;
+ }
+ }
+ RTC_DCHECK_GT(sid, config.SpatialId());
+ RTC_DCHECK_GE(tid, config.TemporalId());
+ if (config.IsKeyframe() || config.Id() == kKey) {
+ return DecodeTargetIndication::kSwitch;
+ }
+ return DecodeTargetIndication::kRequired;
+}
+
+ScalabilityStructureFullSvc::FramePattern
+ScalabilityStructureFullSvc::NextPattern() const {
+ switch (last_pattern_) {
+ case kNone:
+ return kKey;
+ case kDeltaT2B:
+ return kDeltaT0;
+ case kDeltaT2A:
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ case kDeltaT1:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2B;
+ }
+ return kDeltaT0;
+ case kKey:
+ case kDeltaT0:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2A;
+ }
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kNone;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureFullSvc::NextFrameConfig(bool restart) {
+ std::vector<LayerFrameConfig> configs;
+ if (active_decode_targets_.none()) {
+ last_pattern_ = kNone;
+ return configs;
+ }
+ configs.reserve(num_spatial_layers_);
+
+ if (last_pattern_ == kNone || restart) {
+ can_reference_t0_frame_for_spatial_id_.reset();
+ last_pattern_ = kNone;
+ }
+ FramePattern current_pattern = NextPattern();
+
+ absl::optional<int> spatial_dependency_buffer_id;
+ switch (current_pattern) {
+ case kDeltaT0:
+ case kKey:
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ // Next frame from the spatial layer `sid` shouldn't depend on
+ // potentially old previous frame from the spatial layer `sid`.
+ can_reference_t0_frame_for_spatial_id_.reset(sid);
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(0);
+
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ } else if (current_pattern == kKey) {
+ config.Keyframe();
+ }
+
+ if (can_reference_t0_frame_for_spatial_id_[sid]) {
+ config.ReferenceAndUpdate(BufferIndex(sid, /*tid=*/0));
+ } else {
+ // TODO(bugs.webrtc.org/11999): Propagate chain restart on delta frame
+ // to ChainDiffCalculator
+ config.Update(BufferIndex(sid, /*tid=*/0));
+ }
+
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/0);
+ }
+ break;
+ case kDeltaT1:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/1) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(1);
+ // Temporal reference.
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ // Spatial reference unless this is the lowest active spatial layer.
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ }
+ // No frame reference top layer frame, so no need save it into a buffer.
+ if (num_temporal_layers_ > 2 || sid < num_spatial_layers_ - 1) {
+ config.Update(BufferIndex(sid, /*tid=*/1));
+ }
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/1);
+ }
+ break;
+ case kDeltaT2A:
+ case kDeltaT2B:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/2) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(2);
+ // Temporal reference.
+ if (current_pattern == kDeltaT2B &&
+ can_reference_t1_frame_for_spatial_id_[sid]) {
+ config.Reference(BufferIndex(sid, /*tid=*/1));
+ } else {
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ }
+ // Spatial reference unless this is the lowest active spatial layer.
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ }
+ // No frame reference top layer frame, so no need save it into a buffer.
+ if (sid < num_spatial_layers_ - 1) {
+ config.Update(BufferIndex(sid, /*tid=*/2));
+ }
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/2);
+ }
+ break;
+ case kNone:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ if (configs.empty() && !restart) {
+ RTC_LOG(LS_WARNING) << "Failed to generate configuration for L"
+ << num_spatial_layers_ << "T" << num_temporal_layers_
+ << " with active decode targets "
+ << active_decode_targets_.to_string('-').substr(
+ active_decode_targets_.size() -
+ num_spatial_layers_ * num_temporal_layers_)
+ << " and transition from "
+ << kFramePatternNames[last_pattern_] << " to "
+ << kFramePatternNames[current_pattern]
+ << ". Resetting.";
+ return NextFrameConfig(/*restart=*/true);
+ }
+
+ return configs;
+}
+
+GenericFrameInfo ScalabilityStructureFullSvc::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ // When encoder drops all frames for a temporal unit, it is better to reuse
+ // old temporal pattern rather than switch to next one, thus switch to next
+ // pattern defered here from the `NextFrameConfig`.
+ // In particular creating VP9 references rely on this behavior.
+ last_pattern_ = static_cast<FramePattern>(config.Id());
+ if (config.TemporalId() == 0) {
+ can_reference_t0_frame_for_spatial_id_.set(config.SpatialId());
+ }
+ if (config.TemporalId() == 1) {
+ can_reference_t1_frame_for_spatial_id_.set(config.SpatialId());
+ }
+
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ frame_info.decode_target_indications.reserve(num_spatial_layers_ *
+ num_temporal_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ if (config.TemporalId() == 0) {
+ frame_info.part_of_chain.resize(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ frame_info.part_of_chain[sid] = config.SpatialId() <= sid;
+ }
+ } else {
+ frame_info.part_of_chain.assign(num_spatial_layers_, false);
+ }
+ frame_info.active_decode_targets = active_decode_targets_;
+ return frame_info;
+}
+
+void ScalabilityStructureFullSvc::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = true;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ // To enable temporal layer, require bitrates for lower temporal layers.
+ active = active && bitrates.GetBitrate(sid, tid) > 0;
+ SetDecodeTargetIsActive(sid, tid, active);
+ }
+ }
+}
+
+FrameDependencyStructure ScalabilityStructureL1T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 1;
+ structure.decode_target_protected_by_chain = {0, 0};
+ structure.templates.resize(3);
+ structure.templates[0].T(0).Dtis("SS").ChainDiffs({0});
+ structure.templates[1].T(0).Dtis("SS").ChainDiffs({2}).FrameDiffs({2});
+ structure.templates[2].T(1).Dtis("-D").ChainDiffs({1}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL1T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 3;
+ structure.num_chains = 1;
+ structure.decode_target_protected_by_chain = {0, 0, 0};
+ structure.templates.resize(5);
+ structure.templates[0].T(0).Dtis("SSS").ChainDiffs({0});
+ structure.templates[1].T(0).Dtis("SSS").ChainDiffs({4}).FrameDiffs({4});
+ structure.templates[2].T(1).Dtis("-DS").ChainDiffs({2}).FrameDiffs({2});
+ structure.templates[3].T(2).Dtis("--D").ChainDiffs({1}).FrameDiffs({1});
+ structure.templates[4].T(2).Dtis("--D").ChainDiffs({3}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 1};
+ structure.templates.resize(4);
+ structure.templates[0].S(0).Dtis("SR").ChainDiffs({2, 1}).FrameDiffs({2});
+ structure.templates[1].S(0).Dtis("SS").ChainDiffs({0, 0});
+ structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({2, 1});
+ structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ structure.templates.resize(6);
+ auto& templates = structure.templates;
+ templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SSRR").ChainDiffs({4, 3}).FrameDiffs({4});
+ templates[2].S(0).T(1).Dtis("-D-R").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({4, 1});
+ templates[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2, 1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1};
+ auto& t = structure.templates;
+ t.resize(10);
+ t[1].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0});
+ t[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({1});
+ t[3].S(0).T(2).Dtis("--D--R").ChainDiffs({2, 1}).FrameDiffs({2});
+ t[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2, 1});
+ t[2].S(0).T(1).Dtis("-DS-RR").ChainDiffs({4, 3}).FrameDiffs({4});
+ t[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4, 1});
+ t[4].S(0).T(2).Dtis("--D--R").ChainDiffs({6, 5}).FrameDiffs({2});
+ t[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2, 1});
+ t[0].S(0).T(0).Dtis("SSSRRR").ChainDiffs({8, 7}).FrameDiffs({8});
+ t[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({8, 1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL3T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 3;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 1, 2};
+ auto& templates = structure.templates;
+ templates.resize(6);
+ templates[0].S(0).Dtis("SRR").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ templates[1].S(0).Dtis("SSS").ChainDiffs({0, 0, 0});
+ templates[2].S(1).Dtis("-SR").ChainDiffs({1, 1, 1}).FrameDiffs({3, 1});
+ templates[3].S(1).Dtis("-SS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ templates[4].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({3, 1});
+ templates[5].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL3T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 9;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2};
+ auto& t = structure.templates;
+ t.resize(15);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement. Indexes are written in hex for nicer alignment.
+ t[0x1].S(0).T(0).Dtis("SSSSSSSSS").ChainDiffs({0, 0, 0});
+ t[0x6].S(1).T(0).Dtis("---SSSSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[0x3].S(0).T(2).Dtis("--D--R--R").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[0x8].S(1).T(2).Dtis("-----D--R").ChainDiffs({4, 3, 2}).FrameDiffs({3, 1});
+ t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3, 1});
+ t[0x2].S(0).T(1).Dtis("-DS-RR-RR").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[0x7].S(1).T(1).Dtis("----DS-RR").ChainDiffs({7, 6, 5}).FrameDiffs({6, 1});
+ t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6, 1});
+ t[0x4].S(0).T(2).Dtis("--D--R--R").ChainDiffs({9, 8, 7}).FrameDiffs({3});
+ t[0x9].S(1).T(2).Dtis("-----D--R").ChainDiffs({10, 9, 8}).FrameDiffs({3, 1});
+ t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3, 1});
+ t[0x0].S(0).T(0).Dtis("SSSRRRRRR").ChainDiffs({12, 11, 10}).FrameDiffs({12});
+ t[0x5].S(1).T(0).Dtis("---SSSRRR").ChainDiffs({1, 1, 1}).FrameDiffs({12, 1});
+ t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({12, 1});
+ return structure;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h
new file mode 100644
index 0000000000..03141ffb10
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_
+
+#include <bitset>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+class ScalabilityStructureFullSvc : public ScalableVideoController {
+ public:
+ struct ScalingFactor {
+ int num = 1;
+ int den = 2;
+ };
+ ScalabilityStructureFullSvc(int num_spatial_layers,
+ int num_temporal_layers,
+ ScalingFactor resolution_factor);
+ ~ScalabilityStructureFullSvc() override;
+
+ StreamLayersConfig StreamConfig() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern {
+ kNone,
+ kKey,
+ kDeltaT2A,
+ kDeltaT1,
+ kDeltaT2B,
+ kDeltaT0,
+ };
+ static constexpr absl::string_view kFramePatternNames[] = {
+ "None", "Key", "DeltaT2A", "DeltaT1", "DeltaT2B", "DeltaT0"};
+ static constexpr int kMaxNumSpatialLayers = 3;
+ static constexpr int kMaxNumTemporalLayers = 3;
+
+ // Index of the buffer to store last frame for layer (`sid`, `tid`)
+ int BufferIndex(int sid, int tid) const {
+ return tid * num_spatial_layers_ + sid;
+ }
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * num_temporal_layers_ + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * num_temporal_layers_ + tid, value);
+ }
+ FramePattern NextPattern() const;
+ bool TemporalLayerIsActive(int tid) const;
+ static DecodeTargetIndication Dti(int sid,
+ int tid,
+ const LayerFrameConfig& frame);
+
+ const int num_spatial_layers_;
+ const int num_temporal_layers_;
+ const ScalingFactor resolution_factor_;
+
+ FramePattern last_pattern_ = kNone;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t0_frame_for_spatial_id_ = 0;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t1_frame_for_spatial_id_ = 0;
+ std::bitset<32> active_decode_targets_;
+};
+
+// T1 0 0
+// / / / ...
+// T0 0---0---0--
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL1T2 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL1T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(1, 2, resolution_factor) {}
+ ~ScalabilityStructureL1T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// T2 0 0 0 0
+// | / | /
+// T1 / 0 / 0 ...
+// |_/ |_/
+// T0 0-------0------
+// Time-> 0 1 2 3 4 5 6 7
+class ScalabilityStructureL1T3 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL1T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(1, 3, resolution_factor) {}
+ ~ScalabilityStructureL1T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1 0--0--0-
+// | | | ...
+// S0 0--0--0-
+class ScalabilityStructureL2T1 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL2T1(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(2, 1, resolution_factor) {}
+ ~ScalabilityStructureL2T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T1 0 0
+// /| /| /
+// S1T0 0-+-0-+-0
+// | | | | | ...
+// S0T1 | 0 | 0 |
+// |/ |/ |/
+// S0T0 0---0---0--
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL2T2 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL2T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(2, 2, resolution_factor) {}
+ ~ScalabilityStructureL2T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T2 4 ,8
+// S1T1 / | 6' |
+// S1T0 2--+-'+--+-...
+// | | | |
+// S0T2 | 3 | ,7
+// S0T1 | / 5'
+// S0T0 1----'-----...
+// Time-> 0 1 2 3
+class ScalabilityStructureL2T3 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL2T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(2, 3, resolution_factor) {}
+ ~ScalabilityStructureL2T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S2 0-0-0-
+// | | |
+// S1 0-0-0-...
+// | | |
+// S0 0-0-0-
+// Time-> 0 1 2
+class ScalabilityStructureL3T1 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL3T1(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(3, 1, resolution_factor) {}
+ ~ScalabilityStructureL3T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// https://www.w3.org/TR/webrtc-svc/#L3T3*
+class ScalabilityStructureL3T3 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL3T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(3, 3, resolution_factor) {}
+ ~ScalabilityStructureL3T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc
new file mode 100644
index 0000000000..1c0a8be8f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_full_svc.h"
+
+#include <vector>
+
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+TEST(ScalabilityStructureL3T3Test, SkipT0FrameByEncoderKeepsReferencesValid) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // Only S0T0 decode target is enabled.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0));
+ // Encoder generates S0T0 key frame.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(1));
+ // Spatial layers 1 is enabled.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/1));
+ // Encoder tries to generate S0T0 and S1T0 delta frames but they are dropped.
+ structure.NextFrameConfig(/*restart=*/false);
+ // Encoder successfully generates S0T0 and S1T0 delta frames.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(3));
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3Test, SkipS1T1FrameKeepsStructureValid) {
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ auto frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].temporal_id, 0);
+
+ frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].temporal_id, 2);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/0));
+ frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(1));
+ EXPECT_EQ(frames[0].temporal_id, 1);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ // Rely on checks inside GenerateFrames frame references are valid.
+ frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].temporal_id, 2);
+}
+
+TEST(ScalabilityStructureL3T3Test, SkipT1FrameByEncoderKeepsReferencesValid) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ // Simulate T1 frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // one more temporal units (T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3Test,
+ SkippingFrameReusePreviousFrameConfiguration) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(6));
+ ASSERT_EQ(frames[0].temporal_id, 0);
+ ASSERT_EQ(frames[3].temporal_id, 2);
+
+ // Simulate a frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // two more temporal unit, expect temporal pattern continues
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(12));
+ // Expect temporal pattern continues as if there were no dropped frames.
+ EXPECT_EQ(frames[6].temporal_id, 1);
+ EXPECT_EQ(frames[9].temporal_id, 2);
+}
+
+TEST(ScalabilityStructureL3T3Test, SwitchSpatialLayerBeforeT1Frame) {
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0));
+ EXPECT_THAT(wrapper.GenerateFrames(1), SizeIs(1));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2));
+ auto frames = wrapper.GenerateFrames(1);
+ ASSERT_THAT(frames, SizeIs(1));
+ EXPECT_THAT(frames[0].frame_diffs, IsEmpty());
+ EXPECT_EQ(frames[0].temporal_id, 0);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc
new file mode 100644
index 0000000000..0ef7e8f156
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_key_svc.h"
+
+#include <bitset>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+constexpr int ScalabilityStructureKeySvc::kMaxNumSpatialLayers;
+constexpr int ScalabilityStructureKeySvc::kMaxNumTemporalLayers;
+
+ScalabilityStructureKeySvc::ScalabilityStructureKeySvc(int num_spatial_layers,
+ int num_temporal_layers)
+ : num_spatial_layers_(num_spatial_layers),
+ num_temporal_layers_(num_temporal_layers),
+ active_decode_targets_(
+ (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) {
+ // There is no point to use this structure without spatial scalability.
+ RTC_DCHECK_GT(num_spatial_layers, 1);
+ RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers);
+ RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
+}
+
+ScalabilityStructureKeySvc::~ScalabilityStructureKeySvc() = default;
+
+ScalableVideoController::StreamLayersConfig
+ScalabilityStructureKeySvc::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = num_spatial_layers_;
+ result.num_temporal_layers = num_temporal_layers_;
+ result.scaling_factor_num[num_spatial_layers_ - 1] = 1;
+ result.scaling_factor_den[num_spatial_layers_ - 1] = 1;
+ for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) {
+ result.scaling_factor_num[sid - 1] = 1;
+ result.scaling_factor_den[sid - 1] = 2 * result.scaling_factor_den[sid];
+ }
+ result.uses_reference_scaling = true;
+ return result;
+}
+
+bool ScalabilityStructureKeySvc::TemporalLayerIsActive(int tid) const {
+ if (tid >= num_temporal_layers_) {
+ return false;
+ }
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (DecodeTargetIsActive(sid, tid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+DecodeTargetIndication ScalabilityStructureKeySvc::Dti(
+ int sid,
+ int tid,
+ const LayerFrameConfig& config) {
+ if (config.IsKeyframe() || config.Id() == kKey) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return sid < config.SpatialId() ? DecodeTargetIndication::kNotPresent
+ : DecodeTargetIndication::kSwitch;
+ }
+
+ if (sid != config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (tid == config.TemporalId() && tid > 0) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ return DecodeTargetIndication::kSwitch;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::KeyframeConfig() {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ absl::optional<int> spatial_dependency_buffer_id;
+ spatial_id_is_enabled_.reset();
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(kKey).S(sid).T(0);
+
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ } else {
+ config.Keyframe();
+ }
+ config.Update(BufferIndex(sid, /*tid=*/0));
+
+ spatial_id_is_enabled_.set(sid);
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/0);
+ }
+ return configs;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::T0Config() {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ spatial_id_is_enabled_.reset(sid);
+ continue;
+ }
+ configs.emplace_back();
+ configs.back().Id(kDeltaT0).S(sid).T(0).ReferenceAndUpdate(
+ BufferIndex(sid, /*tid=*/0));
+ }
+ return configs;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::T1Config() {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/1)) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(kDeltaT1).S(sid).T(1).Reference(BufferIndex(sid, /*tid=*/0));
+ if (num_temporal_layers_ > 2) {
+ config.Update(BufferIndex(sid, /*tid=*/1));
+ }
+ }
+ return configs;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::T2Config(FramePattern pattern) {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/2)) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(pattern).S(sid).T(2);
+ if (can_reference_t1_frame_for_spatial_id_[sid]) {
+ config.Reference(BufferIndex(sid, /*tid=*/1));
+ } else {
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ }
+ }
+ return configs;
+}
+
+ScalabilityStructureKeySvc::FramePattern
+ScalabilityStructureKeySvc::NextPattern(FramePattern last_pattern) const {
+ switch (last_pattern) {
+ case kNone:
+ return kKey;
+ case kDeltaT2B:
+ return kDeltaT0;
+ case kDeltaT2A:
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ case kDeltaT1:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2B;
+ }
+ return kDeltaT0;
+ case kDeltaT0:
+ case kKey:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2A;
+ }
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kNone;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::NextFrameConfig(bool restart) {
+ if (active_decode_targets_.none()) {
+ last_pattern_ = kNone;
+ return {};
+ }
+
+ if (restart) {
+ last_pattern_ = kNone;
+ }
+
+ FramePattern current_pattern = NextPattern(last_pattern_);
+ switch (current_pattern) {
+ case kKey:
+ return KeyframeConfig();
+ case kDeltaT0:
+ return T0Config();
+ case kDeltaT1:
+ return T1Config();
+ case kDeltaT2A:
+ case kDeltaT2B:
+ return T2Config(current_pattern);
+ case kNone:
+ break;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return {};
+}
+
+GenericFrameInfo ScalabilityStructureKeySvc::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ // When encoder drops all frames for a temporal unit, it is better to reuse
+ // old temporal pattern rather than switch to next one, thus switch to next
+ // pattern defered here from the `NextFrameConfig`.
+ // In particular creating VP9 references rely on this behavior.
+ last_pattern_ = static_cast<FramePattern>(config.Id());
+ if (config.TemporalId() == 1) {
+ can_reference_t1_frame_for_spatial_id_.set(config.SpatialId());
+ }
+
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ frame_info.decode_target_indications.reserve(num_spatial_layers_ *
+ num_temporal_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ frame_info.part_of_chain.assign(num_spatial_layers_, false);
+ if (config.IsKeyframe() || config.Id() == kKey) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ for (int sid = config.SpatialId(); sid < num_spatial_layers_; ++sid) {
+ frame_info.part_of_chain[sid] = true;
+ }
+ } else if (config.TemporalId() == 0) {
+ frame_info.part_of_chain[config.SpatialId()] = true;
+ }
+ frame_info.active_decode_targets = active_decode_targets_;
+ return frame_info;
+}
+
+void ScalabilityStructureKeySvc::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = bitrates.GetBitrate(sid, /*tid=*/0) > 0;
+ SetDecodeTargetIsActive(sid, /*tid=*/0, active);
+ if (!spatial_id_is_enabled_[sid] && active) {
+ // Key frame is required to reenable any spatial layer.
+ last_pattern_ = kNone;
+ }
+
+ for (int tid = 1; tid < num_temporal_layers_; ++tid) {
+ // To enable temporal layer, require bitrates for lower temporal layers.
+ active = active && bitrates.GetBitrate(sid, tid) > 0;
+ SetDecodeTargetIsActive(sid, tid, active);
+ }
+ }
+}
+
+ScalabilityStructureL2T1Key::~ScalabilityStructureL2T1Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL2T1Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 1};
+ structure.templates.resize(4);
+ structure.templates[0].S(0).Dtis("S-").ChainDiffs({2, 1}).FrameDiffs({2});
+ structure.templates[1].S(0).Dtis("SS").ChainDiffs({0, 0});
+ structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 2}).FrameDiffs({2});
+ structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({1});
+ return structure;
+}
+
+ScalabilityStructureL2T2Key::~ScalabilityStructureL2T2Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL2T2Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ structure.templates.resize(6);
+ auto& templates = structure.templates;
+ templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SS--").ChainDiffs({4, 3}).FrameDiffs({4});
+ templates[2].S(0).T(1).Dtis("-D--").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 4}).FrameDiffs({4});
+ templates[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2});
+ return structure;
+}
+
+ScalabilityStructureL2T3Key::~ScalabilityStructureL2T3Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL2T3Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1};
+ auto& templates = structure.templates;
+ templates.resize(10);
+ templates[0].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SSS---").ChainDiffs({8, 7}).FrameDiffs({8});
+ templates[2].S(0).T(1).Dtis("-DS---").ChainDiffs({4, 3}).FrameDiffs({4});
+ templates[3].S(0).T(2).Dtis("--D---").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[4].S(0).T(2).Dtis("--D---").ChainDiffs({6, 5}).FrameDiffs({2});
+ templates[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 8}).FrameDiffs({8});
+ templates[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4});
+ templates[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2});
+ templates[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2});
+ return structure;
+}
+
+ScalabilityStructureL3T3Key::~ScalabilityStructureL3T3Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL3T3Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 9;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2};
+ auto& t = structure.templates;
+ t.resize(15);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement. Indexes are written in hex for nicer alignment.
+ t[0x0].S(0).T(0).Dtis("SSSSSSSSS").ChainDiffs({0, 0, 0});
+ t[0x5].S(1).T(0).Dtis("---SSSSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[0x3].S(0).T(2).Dtis("--D------").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[0x8].S(1).T(2).Dtis("-----D---").ChainDiffs({4, 3, 2}).FrameDiffs({3});
+ t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3});
+ t[0x2].S(0).T(1).Dtis("-DS------").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[0x7].S(1).T(1).Dtis("----DS---").ChainDiffs({7, 6, 5}).FrameDiffs({6});
+ t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6});
+ t[0x4].S(0).T(2).Dtis("--D------").ChainDiffs({9, 8, 7}).FrameDiffs({3});
+ t[0x9].S(1).T(2).Dtis("-----D---").ChainDiffs({10, 9, 8}).FrameDiffs({3});
+ t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3});
+ t[0x1].S(0).T(0).Dtis("SSS------").ChainDiffs({12, 11, 10}).FrameDiffs({12});
+ t[0x6].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 12, 11}).FrameDiffs({12});
+ t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 12}).FrameDiffs({12});
+ return structure;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h
new file mode 100644
index 0000000000..b66f6f83e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_
+
+#include <bitset>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+class ScalabilityStructureKeySvc : public ScalableVideoController {
+ public:
+ ScalabilityStructureKeySvc(int num_spatial_layers, int num_temporal_layers);
+ ~ScalabilityStructureKeySvc() override;
+
+ StreamLayersConfig StreamConfig() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern : int {
+ kNone,
+ kKey,
+ kDeltaT0,
+ kDeltaT2A,
+ kDeltaT1,
+ kDeltaT2B,
+ };
+ static constexpr int kMaxNumSpatialLayers = 3;
+ static constexpr int kMaxNumTemporalLayers = 3;
+
+ // Index of the buffer to store last frame for layer (`sid`, `tid`)
+ int BufferIndex(int sid, int tid) const {
+ return tid * num_spatial_layers_ + sid;
+ }
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * num_temporal_layers_ + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * num_temporal_layers_ + tid, value);
+ }
+ bool TemporalLayerIsActive(int tid) const;
+ static DecodeTargetIndication Dti(int sid,
+ int tid,
+ const LayerFrameConfig& config);
+
+ std::vector<LayerFrameConfig> KeyframeConfig();
+ std::vector<LayerFrameConfig> T0Config();
+ std::vector<LayerFrameConfig> T1Config();
+ std::vector<LayerFrameConfig> T2Config(FramePattern pattern);
+
+ FramePattern NextPattern(FramePattern last_pattern) const;
+
+ const int num_spatial_layers_;
+ const int num_temporal_layers_;
+
+ FramePattern last_pattern_ = kNone;
+ std::bitset<kMaxNumSpatialLayers> spatial_id_is_enabled_;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t1_frame_for_spatial_id_;
+ std::bitset<32> active_decode_targets_;
+};
+
+// S1 0--0--0-
+// | ...
+// S0 0--0--0-
+class ScalabilityStructureL2T1Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL2T1Key() : ScalabilityStructureKeySvc(2, 1) {}
+ ~ScalabilityStructureL2T1Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T1 0 0
+// / / /
+// S1T0 0---0---0
+// | ...
+// S0T1 | 0 0
+// |/ / /
+// S0T0 0---0---0
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL2T2Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL2T2Key() : ScalabilityStructureKeySvc(2, 2) {}
+ ~ScalabilityStructureL2T2Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureL2T3Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL2T3Key() : ScalabilityStructureKeySvc(2, 3) {}
+ ~ScalabilityStructureL2T3Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureL3T3Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL3T3Key() : ScalabilityStructureKeySvc(3, 3) {}
+ ~ScalabilityStructureL3T3Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
new file mode 100644
index 0000000000..5f923bb487
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_key_svc.h"
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkipingT1FrameOnOneSpatialLayerKeepsStructureValid) {
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(5));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ ASSERT_THAT(frames, SizeIs(7));
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 2);
+ EXPECT_EQ(frames[3].temporal_id, 2);
+ EXPECT_EQ(frames[4].temporal_id, 1);
+ EXPECT_EQ(frames[5].temporal_id, 2);
+ EXPECT_EQ(frames[6].temporal_id, 2);
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkipT1FrameByEncoderKeepsReferencesValid) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ // Simulate T1 frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // one more temporal unit.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+
+ EXPECT_THAT(frames, SizeIs(9));
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkippingFrameReusePreviousFrameConfiguration) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(6));
+ ASSERT_EQ(frames[0].temporal_id, 0);
+ ASSERT_EQ(frames[3].temporal_id, 2);
+
+ // Simulate a frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // two more temporal unit, expect temporal pattern continues
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(12));
+ // Expect temporal pattern continues as if there were no dropped frames.
+ EXPECT_EQ(frames[6].temporal_id, 1);
+ EXPECT_EQ(frames[9].temporal_id, 2);
+}
+
+TEST(ScalabilityStructureL3T3KeyTest, SkippingKeyFrameTriggersNewKeyFrame) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // Ask for a key frame config, but do not return any frames
+ structure.NextFrameConfig(/*restart=*/false);
+
+ // Ask for more frames, expect they start with a key frame.
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(6));
+ ASSERT_EQ(frames[0].temporal_id, 0);
+ ASSERT_EQ(frames[3].temporal_id, 2);
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkippingT2FrameAndDisablingT2LayerProduceT1AsNextFrame) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ // Ask for next (T2) frame config, but do not return any frames
+ auto config = structure.NextFrameConfig(/*restart=*/false);
+ ASSERT_THAT(config, Not(IsEmpty()));
+ ASSERT_EQ(config.front().TemporalId(), 2);
+
+ // Disable T2 layer,
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+ // Expect instead of reusing unused config, T1 config is generated.
+ config = structure.NextFrameConfig(/*restart=*/false);
+ ASSERT_THAT(config, Not(IsEmpty()));
+ EXPECT_EQ(config.front().TemporalId(), 1);
+}
+
+TEST(ScalabilityStructureL3T3KeyTest, EnableT2LayerWhileProducingT1Frame) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // Disable T2 layer,
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+
+ // Generate the key frame.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ ASSERT_THAT(frames, SizeIs(3));
+ EXPECT_EQ(frames[0].temporal_id, 0);
+
+ // Ask for next (T1) frame config, but do not return any frames yet.
+ auto config = structure.NextFrameConfig(/*restart=*/false);
+ ASSERT_THAT(config, Not(IsEmpty()));
+ ASSERT_EQ(config.front().TemporalId(), 1);
+
+ // Reenable T2 layer.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3, /*s2=*/3));
+
+ // Finish encoding previously requested config.
+ for (auto layer_config : config) {
+ GenericFrameInfo info = structure.OnEncodeDone(layer_config);
+ EXPECT_EQ(info.temporal_id, 1);
+ frames.push_back(info);
+ }
+ ASSERT_THAT(frames, SizeIs(6));
+
+ // Generate more frames, expect T2 pattern resumes.
+ wrapper.GenerateFrames(/*num_temporal_units=*/4, frames);
+ ASSERT_THAT(frames, SizeIs(18));
+ EXPECT_EQ(frames[6].temporal_id, 2);
+ EXPECT_EQ(frames[9].temporal_id, 0);
+ EXPECT_EQ(frames[12].temporal_id, 2);
+ EXPECT_EQ(frames[15].temporal_id, 1);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ ReenablingSpatialLayerBeforeMissedT0FrameDoesntTriggerAKeyFrame) {
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2));
+ wrapper.GenerateFrames(1, frames);
+ EXPECT_THAT(frames, SizeIs(2));
+ // Drop a spatial layer.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0));
+ wrapper.GenerateFrames(1, frames);
+ EXPECT_THAT(frames, SizeIs(3));
+ // Reenable a spatial layer before T0 frame is encoded.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2));
+ wrapper.GenerateFrames(1, frames);
+ EXPECT_THAT(frames, SizeIs(5));
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 1);
+ EXPECT_EQ(frames[3].temporal_id, 0);
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_THAT(frames[3].frame_diffs, SizeIs(1));
+ EXPECT_THAT(frames[4].frame_diffs, SizeIs(1));
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest, ReenablingSpatialLayerTriggersKeyFrame) {
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ // Start with all spatial layers enabled.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+ wrapper.GenerateFrames(3, frames);
+ EXPECT_THAT(frames, SizeIs(9));
+ // Drop a spatial layer. Two remaining spatial layers should just continue.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0, /*s2=*/2));
+ wrapper.GenerateFrames(2, frames);
+ EXPECT_THAT(frames, SizeIs(13));
+ // Reenable spatial layer, expect a full restart.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+ wrapper.GenerateFrames(1, frames);
+ ASSERT_THAT(frames, SizeIs(16));
+
+ // First 3 temporal units with all spatial layers enabled.
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+ // 2 temporal units with spatial layer 1 disabled.
+ EXPECT_EQ(frames[9].spatial_id, 0);
+ EXPECT_EQ(frames[9].temporal_id, 1);
+ EXPECT_EQ(frames[10].spatial_id, 2);
+ EXPECT_EQ(frames[10].temporal_id, 1);
+ // T0 frames were encoded while spatial layer 1 is disabled.
+ EXPECT_EQ(frames[11].spatial_id, 0);
+ EXPECT_EQ(frames[11].temporal_id, 0);
+ EXPECT_EQ(frames[12].spatial_id, 2);
+ EXPECT_EQ(frames[12].temporal_id, 0);
+ // Key frame to reenable spatial layer 1.
+ EXPECT_THAT(frames[13].frame_diffs, IsEmpty());
+ EXPECT_THAT(frames[14].frame_diffs, ElementsAre(1));
+ EXPECT_THAT(frames[15].frame_diffs, ElementsAre(1));
+ EXPECT_EQ(frames[13].temporal_id, 0);
+ EXPECT_EQ(frames[14].temporal_id, 0);
+ EXPECT_EQ(frames[15].temporal_id, 0);
+ auto all_frames = rtc::MakeArrayView(frames.data(), frames.size());
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(0, 13)));
+ // Frames starting from the frame#13 should not reference any earlier frames.
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(13)));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc
new file mode 100644
index 0000000000..4d15942d3e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/base/macros.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+DecodeTargetIndication
+Dti(int sid, int tid, const ScalableVideoController::LayerFrameConfig& config) {
+ if (config.IsKeyframe()) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return sid < config.SpatialId() ? DecodeTargetIndication::kNotPresent
+ : DecodeTargetIndication::kSwitch;
+ }
+
+ if (sid != config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (tid == config.TemporalId() && tid > 0) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ return DecodeTargetIndication::kSwitch;
+}
+
+} // namespace
+
+constexpr int ScalabilityStructureL2T2KeyShift::kNumSpatialLayers;
+constexpr int ScalabilityStructureL2T2KeyShift::kNumTemporalLayers;
+
+ScalabilityStructureL2T2KeyShift::~ScalabilityStructureL2T2KeyShift() = default;
+
+ScalableVideoController::StreamLayersConfig
+ScalabilityStructureL2T2KeyShift::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = 2;
+ result.num_temporal_layers = 2;
+ result.scaling_factor_num[0] = 1;
+ result.scaling_factor_den[0] = 2;
+ result.uses_reference_scaling = true;
+ return result;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T2KeyShift::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ structure.templates.resize(7);
+ auto& templates = structure.templates;
+ templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SS--").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[2].S(0).T(0).Dtis("SS--").ChainDiffs({4, 1}).FrameDiffs({4});
+ templates[3].S(0).T(1).Dtis("-D--").ChainDiffs({2, 3}).FrameDiffs({2});
+ templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[5].S(1).T(0).Dtis("--SS").ChainDiffs({3, 4}).FrameDiffs({4});
+ templates[6].S(1).T(1).Dtis("---D").ChainDiffs({1, 2}).FrameDiffs({2});
+ return structure;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureL2T2KeyShift::NextFrameConfig(bool restart) {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(2);
+ if (restart) {
+ next_pattern_ = kKey;
+ }
+
+ // Buffer0 keeps latest S0T0 frame,
+ // Buffer1 keeps latest S1T0 frame.
+ switch (next_pattern_) {
+ case kKey:
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(0).T(0).Update(0).Keyframe();
+ }
+ if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(1).T(0).Update(1);
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.back().Reference(0);
+ } else {
+ configs.back().Keyframe();
+ }
+ }
+ next_pattern_ = kDelta0;
+ break;
+ case kDelta0:
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(0).T(0).ReferenceAndUpdate(0);
+ }
+ if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/1)) {
+ configs.emplace_back();
+ configs.back().S(1).T(1).Reference(1);
+ }
+ if (configs.empty() && DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(1).T(0).ReferenceAndUpdate(1);
+ }
+ next_pattern_ = kDelta1;
+ break;
+ case kDelta1:
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/1)) {
+ configs.emplace_back();
+ configs.back().S(0).T(1).Reference(0);
+ }
+ if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(1).T(0).ReferenceAndUpdate(1);
+ }
+ if (configs.empty() && DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(0).T(0).ReferenceAndUpdate(0);
+ }
+ next_pattern_ = kDelta0;
+ break;
+ }
+
+ RTC_DCHECK(!configs.empty() || active_decode_targets_.none());
+ return configs;
+}
+
+GenericFrameInfo ScalabilityStructureL2T2KeyShift::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ for (int sid = 0; sid < kNumSpatialLayers; ++sid) {
+ for (int tid = 0; tid < kNumTemporalLayers; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ if (config.IsKeyframe()) {
+ frame_info.part_of_chain = {true, true};
+ } else if (config.TemporalId() == 0) {
+ frame_info.part_of_chain = {config.SpatialId() == 0,
+ config.SpatialId() == 1};
+ } else {
+ frame_info.part_of_chain = {false, false};
+ }
+ return frame_info;
+}
+
+void ScalabilityStructureL2T2KeyShift::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < kNumSpatialLayers; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = bitrates.GetBitrate(sid, /*tid=*/0) > 0;
+ if (!DecodeTargetIsActive(sid, /*tid=*/0) && active) {
+ // Key frame is required to reenable any spatial layer.
+ next_pattern_ = kKey;
+ }
+
+ SetDecodeTargetIsActive(sid, /*tid=*/0, active);
+ SetDecodeTargetIsActive(sid, /*tid=*/1,
+ active && bitrates.GetBitrate(sid, /*tid=*/1) > 0);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h
new file mode 100644
index 0000000000..26d1afcb29
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// S1T1 0 0
+// / / /
+// S1T0 0---0---0
+// | ...
+// S0T1 | 0 0
+// | / /
+// S0T0 0-0---0--
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL2T2KeyShift : public ScalableVideoController {
+ public:
+ ~ScalabilityStructureL2T2KeyShift() override;
+
+ StreamLayersConfig StreamConfig() const override;
+ FrameDependencyStructure DependencyStructure() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern {
+ kKey,
+ kDelta0,
+ kDelta1,
+ };
+
+ static constexpr int kNumSpatialLayers = 2;
+ static constexpr int kNumTemporalLayers = 2;
+
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * kNumTemporalLayers + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * kNumTemporalLayers + tid, value);
+ }
+
+ FramePattern next_pattern_ = kKey;
+ std::bitset<32> active_decode_targets_ = 0b1111;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
new file mode 100644
index 0000000000..40fecf1812
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h"
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+// S1T1 3 7
+// / /
+// S1T0 1---5---9
+// |
+// S0T1 | 4 8
+// | / /
+// S0T0 0-2---6
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, DecodeTargetsAreEnabledByDefault) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+ wrapper.GenerateFrames(/*num_temporal_units=*/5, frames);
+ ASSERT_THAT(frames, SizeIs(10));
+
+ EXPECT_EQ(frames[0].spatial_id, 0);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 0);
+ EXPECT_EQ(frames[3].spatial_id, 1);
+ EXPECT_EQ(frames[4].spatial_id, 0);
+ EXPECT_EQ(frames[5].spatial_id, 1);
+ EXPECT_EQ(frames[6].spatial_id, 0);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+ EXPECT_EQ(frames[8].spatial_id, 0);
+ EXPECT_EQ(frames[9].spatial_id, 1);
+
+ // spatial_id = 0 has the temporal shift.
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+ EXPECT_EQ(frames[4].temporal_id, 1);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+ EXPECT_EQ(frames[8].temporal_id, 1);
+
+ // spatial_id = 1 hasn't temporal shift.
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[7].temporal_id, 1);
+ EXPECT_EQ(frames[9].temporal_id, 0);
+
+ // Key frame diff.
+ EXPECT_THAT(frames[0].frame_diffs, IsEmpty());
+ EXPECT_THAT(frames[1].frame_diffs, ElementsAre(1));
+ // S0T0 frame diffs
+ EXPECT_THAT(frames[2].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[6].frame_diffs, ElementsAre(4));
+ // S1T0 frame diffs
+ EXPECT_THAT(frames[5].frame_diffs, ElementsAre(4));
+ EXPECT_THAT(frames[9].frame_diffs, ElementsAre(4));
+ // T1 frames refer T0 frame of same spatial layer which is 2 frame ids away.
+ EXPECT_THAT(frames[3].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[4].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[7].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[8].frame_diffs, ElementsAre(2));
+}
+
+// S1T0 1---4---7
+// |
+// S0T1 | 3 6
+// | / /
+// S0T0 0-2---5--
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS1T1Layer) {
+ ScalabilityStructureL2T2KeyShift structure;
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/1));
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+ wrapper.GenerateFrames(/*num_temporal_units=*/5, frames);
+ ASSERT_THAT(frames, SizeIs(8));
+
+ EXPECT_EQ(frames[0].spatial_id, 0);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 0);
+ EXPECT_EQ(frames[3].spatial_id, 0);
+ EXPECT_EQ(frames[4].spatial_id, 1);
+ EXPECT_EQ(frames[5].spatial_id, 0);
+ EXPECT_EQ(frames[6].spatial_id, 0);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+
+ // spatial_id = 0 has the temporal shift.
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[6].temporal_id, 1);
+
+ // spatial_id = 1 has single temporal layer.
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+}
+
+// S1T1 3 |
+// / |
+// S1T0 1---5+--7
+// | |
+// S0T1 | 4|
+// | / |
+// S0T0 0-2--+6---8
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableT1LayersAfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ EXPECT_THAT(frames, SizeIs(6));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(9));
+
+ // Skip validation before T1 was disabled as that is covered by the test
+ // where no layers are disabled.
+ EXPECT_EQ(frames[6].spatial_id, 0);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+ EXPECT_EQ(frames[8].spatial_id, 0);
+
+ EXPECT_EQ(frames[6].temporal_id, 0);
+ EXPECT_EQ(frames[7].temporal_id, 0);
+ EXPECT_EQ(frames[8].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 1 3
+// / /
+// S1T0 0---2
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS0FromTheStart) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2));
+ wrapper.GenerateFrames(/*num_temporal_units=*/4, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+
+ EXPECT_EQ(frames[0].spatial_id, 1);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 1);
+ EXPECT_EQ(frames[3].spatial_id, 1);
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 1);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3 |6 8
+// / / /
+// S1T0 1---5+--7
+// | |
+// S0T1 | 4|
+// | / |
+// S0T0 0-2 |
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS0AfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ EXPECT_THAT(frames, SizeIs(6));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(9));
+
+ // Expect frame[6] is delta frame.
+ EXPECT_THAT(frames[6].frame_diffs, ElementsAre(1));
+ // Skip validation before S0 was disabled as that should be covered by
+ // test where no layers are disabled.
+ EXPECT_EQ(frames[6].spatial_id, 1);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+ EXPECT_EQ(frames[8].spatial_id, 1);
+
+ EXPECT_EQ(frames[6].temporal_id, 1);
+ EXPECT_EQ(frames[7].temporal_id, 0);
+ EXPECT_EQ(frames[8].temporal_id, 1);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3| | 8
+// / | | /
+// S1T0 1 | |6
+// | | ||
+// S0T1 | |4||
+// | / ||
+// S0T0 0-2| |5-7
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, ReenableS1TriggersKeyFrame) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0));
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(5));
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2));
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(9));
+
+ EXPECT_THAT(frames[4].spatial_id, 0);
+ EXPECT_THAT(frames[4].temporal_id, 1);
+
+ // Expect frame[5] to be a key frame.
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(
+ rtc::MakeArrayView(frames.data() + 5, 4)));
+
+ EXPECT_THAT(frames[5].spatial_id, 0);
+ EXPECT_THAT(frames[6].spatial_id, 1);
+ EXPECT_THAT(frames[7].spatial_id, 0);
+ EXPECT_THAT(frames[8].spatial_id, 1);
+
+ // S0 should do temporal shift after the key frame.
+ EXPECT_THAT(frames[5].temporal_id, 0);
+ EXPECT_THAT(frames[7].temporal_id, 0);
+
+ // No temporal shift for the top spatial layer.
+ EXPECT_THAT(frames[6].temporal_id, 0);
+ EXPECT_THAT(frames[8].temporal_id, 1);
+}
+
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS0T0FromTheStart) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(3));
+
+ EXPECT_EQ(frames[0].spatial_id, 0);
+ EXPECT_EQ(frames[1].spatial_id, 0);
+ EXPECT_EQ(frames[2].spatial_id, 0);
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3|
+// / |
+// S1T0 1 |
+// | |
+// S0T1 | |
+// | |
+// S0T0 0-2+4-5-6
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS0T0AfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(7));
+
+ EXPECT_EQ(frames[4].spatial_id, 0);
+ EXPECT_EQ(frames[5].spatial_id, 0);
+ EXPECT_EQ(frames[6].spatial_id, 0);
+
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS1T0FromTheStart) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(3));
+
+ EXPECT_EQ(frames[0].spatial_id, 1);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 1);
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3|
+// / |
+// S1T0 1--+4-5-6
+// | |
+// S0T1 | |
+// | |
+// S0T0 0-2|
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS1T0AfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(7));
+
+ EXPECT_EQ(frames[4].spatial_id, 1);
+ EXPECT_EQ(frames[5].spatial_id, 1);
+ EXPECT_EQ(frames[6].spatial_id, 1);
+
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc
new file mode 100644
index 0000000000..0a06e9ee88
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_simulcast.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/base/macros.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+DecodeTargetIndication
+Dti(int sid, int tid, const ScalableVideoController::LayerFrameConfig& config) {
+ if (sid != config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (tid == 0) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return DecodeTargetIndication::kSwitch;
+ }
+ if (tid == config.TemporalId()) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ RTC_DCHECK_GT(tid, config.TemporalId());
+ return DecodeTargetIndication::kSwitch;
+}
+
+} // namespace
+
+constexpr int ScalabilityStructureSimulcast::kMaxNumSpatialLayers;
+constexpr int ScalabilityStructureSimulcast::kMaxNumTemporalLayers;
+
+ScalabilityStructureSimulcast::ScalabilityStructureSimulcast(
+ int num_spatial_layers,
+ int num_temporal_layers)
+ : num_spatial_layers_(num_spatial_layers),
+ num_temporal_layers_(num_temporal_layers),
+ active_decode_targets_(
+ (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) {
+ RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers);
+ RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
+}
+
+ScalabilityStructureSimulcast::~ScalabilityStructureSimulcast() = default;
+
+ScalableVideoController::StreamLayersConfig
+ScalabilityStructureSimulcast::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = num_spatial_layers_;
+ result.num_temporal_layers = num_temporal_layers_;
+ result.scaling_factor_num[num_spatial_layers_ - 1] = 1;
+ result.scaling_factor_den[num_spatial_layers_ - 1] = 1;
+ for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) {
+ result.scaling_factor_num[sid - 1] = 1;
+ result.scaling_factor_den[sid - 1] = 2 * result.scaling_factor_den[sid];
+ }
+ result.uses_reference_scaling = false;
+ return result;
+}
+
+bool ScalabilityStructureSimulcast::TemporalLayerIsActive(int tid) const {
+ if (tid >= num_temporal_layers_) {
+ return false;
+ }
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (DecodeTargetIsActive(sid, tid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+ScalabilityStructureSimulcast::FramePattern
+ScalabilityStructureSimulcast::NextPattern() const {
+ switch (last_pattern_) {
+ case kNone:
+ case kDeltaT2B:
+ return kDeltaT0;
+ case kDeltaT2A:
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ case kDeltaT1:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2B;
+ }
+ return kDeltaT0;
+ case kDeltaT0:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2A;
+ }
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kDeltaT0;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureSimulcast::NextFrameConfig(bool restart) {
+ std::vector<LayerFrameConfig> configs;
+ if (active_decode_targets_.none()) {
+ last_pattern_ = kNone;
+ return configs;
+ }
+ configs.reserve(num_spatial_layers_);
+
+ if (last_pattern_ == kNone || restart) {
+ can_reference_t0_frame_for_spatial_id_.reset();
+ last_pattern_ = kNone;
+ }
+ FramePattern current_pattern = NextPattern();
+
+ switch (current_pattern) {
+ case kDeltaT0:
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ // Next frame from the spatial layer `sid` shouldn't depend on
+ // potentially old previous frame from the spatial layer `sid`.
+ can_reference_t0_frame_for_spatial_id_.reset(sid);
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(0);
+
+ if (can_reference_t0_frame_for_spatial_id_[sid]) {
+ config.ReferenceAndUpdate(BufferIndex(sid, /*tid=*/0));
+ } else {
+ config.Keyframe().Update(BufferIndex(sid, /*tid=*/0));
+ }
+ can_reference_t0_frame_for_spatial_id_.set(sid);
+ }
+ break;
+ case kDeltaT1:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/1) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern)
+ .S(sid)
+ .T(1)
+ .Reference(BufferIndex(sid, /*tid=*/0));
+ // Save frame only if there is a higher temporal layer that may need it.
+ if (num_temporal_layers_ > 2) {
+ config.Update(BufferIndex(sid, /*tid=*/1));
+ }
+ }
+ break;
+ case kDeltaT2A:
+ case kDeltaT2B:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/2) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(2);
+ if (can_reference_t1_frame_for_spatial_id_[sid]) {
+ config.Reference(BufferIndex(sid, /*tid=*/1));
+ } else {
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ }
+ }
+ break;
+ case kNone:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ return configs;
+}
+
+GenericFrameInfo ScalabilityStructureSimulcast::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ last_pattern_ = static_cast<FramePattern>(config.Id());
+ if (config.TemporalId() == 1) {
+ can_reference_t1_frame_for_spatial_id_.set(config.SpatialId());
+ }
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ frame_info.decode_target_indications.reserve(num_spatial_layers_ *
+ num_temporal_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ frame_info.part_of_chain.assign(num_spatial_layers_, false);
+ if (config.TemporalId() == 0) {
+ frame_info.part_of_chain[config.SpatialId()] = true;
+ }
+ frame_info.active_decode_targets = active_decode_targets_;
+ return frame_info;
+}
+
+void ScalabilityStructureSimulcast::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = true;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ // To enable temporal layer, require bitrates for lower temporal layers.
+ active = active && bitrates.GetBitrate(sid, tid) > 0;
+ SetDecodeTargetIsActive(sid, tid, active);
+ }
+ }
+}
+
+FrameDependencyStructure ScalabilityStructureS2T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 1};
+ structure.templates.resize(4);
+ structure.templates[0].S(0).Dtis("S-").ChainDiffs({2, 1}).FrameDiffs({2});
+ structure.templates[1].S(0).Dtis("S-").ChainDiffs({0, 0});
+ structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 2}).FrameDiffs({2});
+ structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 0});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS2T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1};
+ auto& t = structure.templates;
+ t.resize(10);
+ t[1].S(0).T(0).Dtis("SSS---").ChainDiffs({0, 0});
+ t[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 0});
+ t[3].S(0).T(2).Dtis("--D---").ChainDiffs({2, 1}).FrameDiffs({2});
+ t[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2});
+ t[2].S(0).T(1).Dtis("-DS---").ChainDiffs({4, 3}).FrameDiffs({4});
+ t[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4});
+ t[4].S(0).T(2).Dtis("--D---").ChainDiffs({6, 5}).FrameDiffs({2});
+ t[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2});
+ t[0].S(0).T(0).Dtis("SSS---").ChainDiffs({8, 7}).FrameDiffs({8});
+ t[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 8}).FrameDiffs({8});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS3T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 9;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2};
+ auto& t = structure.templates;
+ t.resize(15);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement. Indexes are written in hex for nicer alignment.
+ t[0x1].S(0).T(0).Dtis("SSS------").ChainDiffs({0, 0, 0});
+ t[0x6].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 0, 0});
+ t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 0});
+ t[0x3].S(0).T(2).Dtis("--D------").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[0x8].S(1).T(2).Dtis("-----D---").ChainDiffs({4, 3, 2}).FrameDiffs({3});
+ t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3});
+ t[0x2].S(0).T(1).Dtis("-DS------").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[0x7].S(1).T(1).Dtis("----DS---").ChainDiffs({7, 6, 5}).FrameDiffs({6});
+ t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6});
+ t[0x4].S(0).T(2).Dtis("--D------").ChainDiffs({9, 8, 7}).FrameDiffs({3});
+ t[0x9].S(1).T(2).Dtis("-----D---").ChainDiffs({10, 9, 8}).FrameDiffs({3});
+ t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3});
+ t[0x0].S(0).T(0).Dtis("SSS------").ChainDiffs({12, 11, 10}).FrameDiffs({12});
+ t[0x5].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 12, 11}).FrameDiffs({12});
+ t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 12}).FrameDiffs({12});
+ return structure;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h
new file mode 100644
index 0000000000..53f491c2b6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Scalability structure with multiple independent spatial layers each with the
+// same temporal layering.
+class ScalabilityStructureSimulcast : public ScalableVideoController {
+ public:
+ ScalabilityStructureSimulcast(int num_spatial_layers,
+ int num_temporal_layers);
+ ~ScalabilityStructureSimulcast() override;
+
+ StreamLayersConfig StreamConfig() const override;
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern {
+ kNone,
+ kDeltaT2A,
+ kDeltaT1,
+ kDeltaT2B,
+ kDeltaT0,
+ };
+ static constexpr int kMaxNumSpatialLayers = 3;
+ static constexpr int kMaxNumTemporalLayers = 3;
+
+ // Index of the buffer to store last frame for layer (`sid`, `tid`)
+ int BufferIndex(int sid, int tid) const {
+ return tid * num_spatial_layers_ + sid;
+ }
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * num_temporal_layers_ + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * num_temporal_layers_ + tid, value);
+ }
+ FramePattern NextPattern() const;
+ bool TemporalLayerIsActive(int tid) const;
+
+ const int num_spatial_layers_;
+ const int num_temporal_layers_;
+
+ FramePattern last_pattern_ = kNone;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t0_frame_for_spatial_id_ = 0;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t1_frame_for_spatial_id_ = 0;
+ std::bitset<32> active_decode_targets_;
+};
+
+// S1 0--0--0-
+// ...
+// S0 0--0--0-
+class ScalabilityStructureS2T1 : public ScalabilityStructureSimulcast {
+ public:
+ ScalabilityStructureS2T1() : ScalabilityStructureSimulcast(2, 1) {}
+ ~ScalabilityStructureS2T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T2 3 7
+// | /
+// S1T1 / 5
+// |_/
+// S1T0 1-------9...
+//
+// S0T2 2 6
+// | /
+// S0T1 / 4
+// |_/
+// S0T0 0-------8...
+// Time-> 0 1 2 3 4
+class ScalabilityStructureS2T3 : public ScalabilityStructureSimulcast {
+ public:
+ ScalabilityStructureS2T3() : ScalabilityStructureSimulcast(2, 3) {}
+ ~ScalabilityStructureS2T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureS3T3 : public ScalabilityStructureSimulcast {
+ public:
+ ScalabilityStructureS3T3() : ScalabilityStructureSimulcast(3, 3) {}
+ ~ScalabilityStructureS3T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc
new file mode 100644
index 0000000000..aeb4d88f1a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/video_coding/chain_diff_calculator.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+VideoBitrateAllocation EnableTemporalLayers(int s0, int s1, int s2) {
+ VideoBitrateAllocation bitrate;
+ for (int tid = 0; tid < s0; ++tid) {
+ bitrate.SetBitrate(0, tid, 1'000'000);
+ }
+ for (int tid = 0; tid < s1; ++tid) {
+ bitrate.SetBitrate(1, tid, 1'000'000);
+ }
+ for (int tid = 0; tid < s2; ++tid) {
+ bitrate.SetBitrate(2, tid, 1'000'000);
+ }
+ return bitrate;
+}
+
+void ScalabilityStructureWrapper::GenerateFrames(
+ int num_temporal_units,
+ std::vector<GenericFrameInfo>& frames) {
+ for (int i = 0; i < num_temporal_units; ++i) {
+ for (auto& layer_frame :
+ structure_controller_.NextFrameConfig(/*restart=*/false)) {
+ int64_t frame_id = ++frame_id_;
+ bool is_keyframe = layer_frame.IsKeyframe();
+
+ GenericFrameInfo frame_info =
+ structure_controller_.OnEncodeDone(layer_frame);
+ if (is_keyframe) {
+ chain_diff_calculator_.Reset(frame_info.part_of_chain);
+ }
+ frame_info.chain_diffs =
+ chain_diff_calculator_.From(frame_id, frame_info.part_of_chain);
+ for (int64_t base_frame_id : frame_deps_calculator_.FromBuffersUsage(
+ frame_id, frame_info.encoder_buffers)) {
+ frame_info.frame_diffs.push_back(frame_id - base_frame_id);
+ }
+
+ frames.push_back(std::move(frame_info));
+ }
+ }
+}
+
+bool ScalabilityStructureWrapper::FrameReferencesAreValid(
+ rtc::ArrayView<const GenericFrameInfo> frames) const {
+ bool valid = true;
+ // VP9 and AV1 supports up to 8 buffers. Expect no more buffers are not used.
+ std::bitset<8> buffer_contains_frame;
+ for (size_t i = 0; i < frames.size(); ++i) {
+ const GenericFrameInfo& frame = frames[i];
+ for (const CodecBufferUsage& buffer_usage : frame.encoder_buffers) {
+ if (buffer_usage.id < 0 || buffer_usage.id >= 8) {
+ ADD_FAILURE() << "Invalid buffer id " << buffer_usage.id
+ << " for frame#" << i
+ << ". Up to 8 buffers are supported.";
+ valid = false;
+ continue;
+ }
+ if (buffer_usage.referenced && !buffer_contains_frame[buffer_usage.id]) {
+ ADD_FAILURE() << "buffer " << buffer_usage.id << " for frame#" << i
+ << " was reference before updated.";
+ valid = false;
+ }
+ if (buffer_usage.updated) {
+ buffer_contains_frame.set(buffer_usage.id);
+ }
+ }
+ for (int fdiff : frame.frame_diffs) {
+ if (fdiff <= 0 || static_cast<size_t>(fdiff) > i) {
+ ADD_FAILURE() << "Invalid frame diff " << fdiff << " for frame#" << i;
+ valid = false;
+ }
+ }
+ }
+ return valid;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h
new file mode 100644
index 0000000000..d183be4766
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/chain_diff_calculator.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Creates bitrate allocation with non-zero bitrate for given number of temporal
+// layers for each spatial layer.
+VideoBitrateAllocation EnableTemporalLayers(int s0, int s1 = 0, int s2 = 0);
+
+class ScalabilityStructureWrapper {
+ public:
+ explicit ScalabilityStructureWrapper(ScalableVideoController& structure)
+ : structure_controller_(structure) {}
+
+ std::vector<GenericFrameInfo> GenerateFrames(int num_temporal_units) {
+ std::vector<GenericFrameInfo> frames;
+ GenerateFrames(num_temporal_units, frames);
+ return frames;
+ }
+ void GenerateFrames(int num_temporal_units,
+ std::vector<GenericFrameInfo>& frames);
+
+ // Returns false and ADD_FAILUREs for frames with invalid references.
+ // In particular validates no frame frame reference to frame before frames[0].
+ // In error messages frames are indexed starting with 0.
+ bool FrameReferencesAreValid(
+ rtc::ArrayView<const GenericFrameInfo> frames) const;
+
+ private:
+ ScalableVideoController& structure_controller_;
+ FrameDependenciesCalculator frame_deps_calculator_;
+ ChainDiffCalculator chain_diff_calculator_;
+ int64_t frame_id_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc
new file mode 100644
index 0000000000..86d7cc0fcf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <ostream>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::Ge;
+using ::testing::IsEmpty;
+using ::testing::Le;
+using ::testing::Lt;
+using ::testing::Not;
+using ::testing::NotNull;
+using ::testing::SizeIs;
+using ::testing::TestWithParam;
+using ::testing::Values;
+
+std::string FrameDependencyTemplateToString(const FrameDependencyTemplate& t) {
+ rtc::StringBuilder sb;
+ sb << "S" << t.spatial_id << "T" << t.temporal_id;
+ sb << ": dtis = ";
+ for (const auto dtis : t.decode_target_indications) {
+ switch (dtis) {
+ case DecodeTargetIndication::kNotPresent:
+ sb << "-";
+ break;
+ case DecodeTargetIndication::kDiscardable:
+ sb << "D";
+ break;
+ case DecodeTargetIndication::kSwitch:
+ sb << "S";
+ break;
+ case DecodeTargetIndication::kRequired:
+ sb << "R";
+ break;
+ default:
+ sb << "?";
+ break;
+ }
+ }
+ sb << ", frame diffs = { ";
+ for (int d : t.frame_diffs) {
+ sb << d << ", ";
+ }
+ sb << "}, chain diffs = { ";
+ for (int d : t.chain_diffs) {
+ sb << d << ", ";
+ }
+ sb << "}";
+ return sb.Release();
+}
+
+struct SvcTestParam {
+ friend std::ostream& operator<<(std::ostream& os, const SvcTestParam& param) {
+ return os << param.name;
+ }
+
+ ScalabilityMode GetScalabilityMode() const {
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(name);
+ RTC_CHECK(scalability_mode.has_value());
+ return *scalability_mode;
+ }
+
+ std::string name;
+ int num_temporal_units;
+};
+
+class ScalabilityStructureTest : public TestWithParam<SvcTestParam> {};
+
+TEST_P(ScalabilityStructureTest,
+ StaticConfigMatchesConfigReturnedByController) {
+ std::unique_ptr<ScalableVideoController> controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ absl::optional<ScalableVideoController::StreamLayersConfig> static_config =
+ ScalabilityStructureConfig(GetParam().GetScalabilityMode());
+ ASSERT_THAT(controller, NotNull());
+ ASSERT_NE(static_config, absl::nullopt);
+ ScalableVideoController::StreamLayersConfig config =
+ controller->StreamConfig();
+ EXPECT_EQ(config.num_spatial_layers, static_config->num_spatial_layers);
+ EXPECT_EQ(config.num_temporal_layers, static_config->num_temporal_layers);
+ EXPECT_THAT(
+ rtc::MakeArrayView(config.scaling_factor_num, config.num_spatial_layers),
+ ElementsAreArray(static_config->scaling_factor_num,
+ static_config->num_spatial_layers));
+ EXPECT_THAT(
+ rtc::MakeArrayView(config.scaling_factor_den, config.num_spatial_layers),
+ ElementsAreArray(static_config->scaling_factor_den,
+ static_config->num_spatial_layers));
+}
+
+TEST_P(ScalabilityStructureTest,
+ NumberOfDecodeTargetsAndChainsAreInRangeAndConsistent) {
+ FrameDependencyStructure structure =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode())
+ ->DependencyStructure();
+ EXPECT_GT(structure.num_decode_targets, 0);
+ EXPECT_LE(structure.num_decode_targets,
+ DependencyDescriptor::kMaxDecodeTargets);
+ EXPECT_GE(structure.num_chains, 0);
+ EXPECT_LE(structure.num_chains, structure.num_decode_targets);
+ if (structure.num_chains == 0) {
+ EXPECT_THAT(structure.decode_target_protected_by_chain, IsEmpty());
+ } else {
+ EXPECT_THAT(structure.decode_target_protected_by_chain,
+ AllOf(SizeIs(structure.num_decode_targets), Each(Ge(0)),
+ Each(Lt(structure.num_chains))));
+ }
+ EXPECT_THAT(structure.templates,
+ SizeIs(Lt(size_t{DependencyDescriptor::kMaxTemplates})));
+}
+
+TEST_P(ScalabilityStructureTest, TemplatesAreSortedByLayerId) {
+ FrameDependencyStructure structure =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode())
+ ->DependencyStructure();
+ ASSERT_THAT(structure.templates, Not(IsEmpty()));
+ const auto& first_templates = structure.templates.front();
+ EXPECT_EQ(first_templates.spatial_id, 0);
+ EXPECT_EQ(first_templates.temporal_id, 0);
+ for (size_t i = 1; i < structure.templates.size(); ++i) {
+ const auto& prev_template = structure.templates[i - 1];
+ const auto& next_template = structure.templates[i];
+ if (next_template.spatial_id == prev_template.spatial_id &&
+ next_template.temporal_id == prev_template.temporal_id) {
+ // Same layer, next_layer_idc == 0
+ } else if (next_template.spatial_id == prev_template.spatial_id &&
+ next_template.temporal_id == prev_template.temporal_id + 1) {
+ // Next temporal layer, next_layer_idc == 1
+ } else if (next_template.spatial_id == prev_template.spatial_id + 1 &&
+ next_template.temporal_id == 0) {
+ // Next spatial layer, next_layer_idc == 2
+ } else {
+ // everything else is invalid.
+ ADD_FAILURE() << "Invalid templates order. Template #" << i
+ << " with layer (" << next_template.spatial_id << ","
+ << next_template.temporal_id
+ << ") follows template with layer ("
+ << prev_template.spatial_id << ","
+ << prev_template.temporal_id << ").";
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, TemplatesMatchNumberOfDecodeTargetsAndChains) {
+ FrameDependencyStructure structure =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode())
+ ->DependencyStructure();
+ EXPECT_THAT(
+ structure.templates,
+ Each(AllOf(Field(&FrameDependencyTemplate::decode_target_indications,
+ SizeIs(structure.num_decode_targets)),
+ Field(&FrameDependencyTemplate::chain_diffs,
+ SizeIs(structure.num_chains)))));
+}
+
+TEST_P(ScalabilityStructureTest, FrameInfoMatchesFrameDependencyStructure) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ for (size_t frame_id = 0; frame_id < frame_infos.size(); ++frame_id) {
+ const auto& frame = frame_infos[frame_id];
+ EXPECT_GE(frame.spatial_id, 0) << " for frame " << frame_id;
+ EXPECT_GE(frame.temporal_id, 0) << " for frame " << frame_id;
+ EXPECT_THAT(frame.decode_target_indications,
+ SizeIs(structure.num_decode_targets))
+ << " for frame " << frame_id;
+ EXPECT_THAT(frame.part_of_chain, SizeIs(structure.num_chains))
+ << " for frame " << frame_id;
+ }
+}
+
+TEST_P(ScalabilityStructureTest, ThereIsAPerfectTemplateForEachFrame) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ for (size_t frame_id = 0; frame_id < frame_infos.size(); ++frame_id) {
+ EXPECT_THAT(structure.templates, Contains(frame_infos[frame_id]))
+ << " for frame " << frame_id << ", Expected "
+ << FrameDependencyTemplateToString(frame_infos[frame_id]);
+ }
+}
+
+TEST_P(ScalabilityStructureTest, FrameDependsOnSameOrLowerLayer) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ int64_t num_frames = frame_infos.size();
+
+ for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) {
+ const auto& frame = frame_infos[frame_id];
+ for (int frame_diff : frame.frame_diffs) {
+ int64_t base_frame_id = frame_id - frame_diff;
+ const auto& base_frame = frame_infos[base_frame_id];
+ EXPECT_GE(frame.spatial_id, base_frame.spatial_id)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id;
+ EXPECT_GE(frame.temporal_id, base_frame.temporal_id)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id;
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, NoFrameDependsOnDiscardableOrNotPresent) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ int64_t num_frames = frame_infos.size();
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+
+ for (int dt = 0; dt < structure.num_decode_targets; ++dt) {
+ for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) {
+ const auto& frame = frame_infos[frame_id];
+ if (frame.decode_target_indications[dt] ==
+ DecodeTargetIndication::kNotPresent) {
+ continue;
+ }
+ for (int frame_diff : frame.frame_diffs) {
+ int64_t base_frame_id = frame_id - frame_diff;
+ const auto& base_frame = frame_infos[base_frame_id];
+ EXPECT_NE(base_frame.decode_target_indications[dt],
+ DecodeTargetIndication::kNotPresent)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id
+ << " that is not part of decode target#" << dt;
+ EXPECT_NE(base_frame.decode_target_indications[dt],
+ DecodeTargetIndication::kDiscardable)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id
+ << " that is discardable for decode target#" << dt;
+ }
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, NoFrameDependsThroughSwitchIndication) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ int64_t num_frames = frame_infos.size();
+ std::vector<std::set<int64_t>> full_deps(num_frames);
+
+ // For each frame calculate set of all frames it depends on, both directly and
+ // indirectly.
+ for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) {
+ std::set<int64_t> all_base_frames;
+ for (int frame_diff : frame_infos[frame_id].frame_diffs) {
+ int64_t base_frame_id = frame_id - frame_diff;
+ all_base_frames.insert(base_frame_id);
+ const auto& indirect = full_deps[base_frame_id];
+ all_base_frames.insert(indirect.begin(), indirect.end());
+ }
+ full_deps[frame_id] = std::move(all_base_frames);
+ }
+
+ // Now check the switch indication: frames after the switch indication mustn't
+ // depend on any addition frames before the switch indications.
+ for (int dt = 0; dt < structure.num_decode_targets; ++dt) {
+ for (int64_t switch_frame_id = 0; switch_frame_id < num_frames;
+ ++switch_frame_id) {
+ if (frame_infos[switch_frame_id].decode_target_indications[dt] !=
+ DecodeTargetIndication::kSwitch) {
+ continue;
+ }
+ for (int64_t later_frame_id = switch_frame_id + 1;
+ later_frame_id < num_frames; ++later_frame_id) {
+ if (frame_infos[later_frame_id].decode_target_indications[dt] ==
+ DecodeTargetIndication::kNotPresent) {
+ continue;
+ }
+ for (int frame_diff : frame_infos[later_frame_id].frame_diffs) {
+ int64_t early_frame_id = later_frame_id - frame_diff;
+ if (early_frame_id < switch_frame_id) {
+ EXPECT_THAT(full_deps[switch_frame_id], Contains(early_frame_id))
+ << "For decode target #" << dt << " frame " << later_frame_id
+ << " depends on the frame " << early_frame_id
+ << " that switch indication frame " << switch_frame_id
+ << " doesn't directly on indirectly depend on.";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, ProduceNoFrameForDisabledLayers) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ ScalableVideoController::StreamLayersConfig structure =
+ svc_controller->StreamConfig();
+
+ VideoBitrateAllocation all_bitrates;
+ for (int sid = 0; sid < structure.num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < structure.num_temporal_layers; ++tid) {
+ all_bitrates.SetBitrate(sid, tid, 100'000);
+ }
+ }
+
+ svc_controller->OnRatesUpdated(all_bitrates);
+ ScalabilityStructureWrapper wrapper(*svc_controller);
+ std::vector<GenericFrameInfo> frames =
+ wrapper.GenerateFrames(GetParam().num_temporal_units);
+
+ for (int sid = 0; sid < structure.num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < structure.num_temporal_layers; ++tid) {
+ // When all layers were enabled, expect there was a frame for each layer.
+ EXPECT_THAT(frames,
+ Contains(AllOf(Field(&GenericFrameInfo::spatial_id, sid),
+ Field(&GenericFrameInfo::temporal_id, tid))))
+ << "For layer (" << sid << "," << tid << ")";
+ // Restore bitrates for all layers before disabling single layer.
+ VideoBitrateAllocation bitrates = all_bitrates;
+ bitrates.SetBitrate(sid, tid, 0);
+ svc_controller->OnRatesUpdated(bitrates);
+ // With layer (sid, tid) disabled, expect no frames are produced for it.
+ EXPECT_THAT(
+ wrapper.GenerateFrames(GetParam().num_temporal_units),
+ Not(Contains(AllOf(Field(&GenericFrameInfo::spatial_id, sid),
+ Field(&GenericFrameInfo::temporal_id, tid)))))
+ << "For layer (" << sid << "," << tid << ")";
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ Svc,
+ ScalabilityStructureTest,
+ Values(SvcTestParam{"L1T1", /*num_temporal_units=*/3},
+ SvcTestParam{"L1T2", /*num_temporal_units=*/4},
+ SvcTestParam{"L1T3", /*num_temporal_units=*/8},
+ SvcTestParam{"L2T1", /*num_temporal_units=*/3},
+ SvcTestParam{"L2T1_KEY", /*num_temporal_units=*/3},
+ SvcTestParam{"L3T1", /*num_temporal_units=*/3},
+ SvcTestParam{"L3T3", /*num_temporal_units=*/8},
+ SvcTestParam{"S2T1", /*num_temporal_units=*/3},
+ SvcTestParam{"S2T3", /*num_temporal_units=*/8},
+ SvcTestParam{"S3T3", /*num_temporal_units=*/8},
+ SvcTestParam{"L2T2", /*num_temporal_units=*/4},
+ SvcTestParam{"L2T2_KEY", /*num_temporal_units=*/4},
+ SvcTestParam{"L2T2_KEY_SHIFT", /*num_temporal_units=*/4},
+ SvcTestParam{"L2T3", /*num_temporal_units=*/8},
+ SvcTestParam{"L2T3_KEY", /*num_temporal_units=*/8},
+ SvcTestParam{"L3T3_KEY", /*num_temporal_units=*/8}),
+ [](const testing::TestParamInfo<SvcTestParam>& info) {
+ return info.param.name;
+ });
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build
new file mode 100644
index 0000000000..dbd579d02d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc",
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc",
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc",
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("scalability_structures_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h
new file mode 100644
index 0000000000..c7362657ec
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+
+namespace webrtc {
+
+// Controls how video should be encoded to be scalable. Outputs results as
+// buffer usage configuration for encoder and enough details to communicate the
+// scalability structure via dependency descriptor rtp header extension.
+class ScalableVideoController {
+ public:
+ struct StreamLayersConfig {
+ int num_spatial_layers = 1;
+ int num_temporal_layers = 1;
+ // Indicates if frames can reference frames of a different resolution.
+ bool uses_reference_scaling = true;
+ // Spatial layers scaling. Frames with spatial_id = i expected to be encoded
+ // with original_resolution * scaling_factor_num[i] / scaling_factor_den[i].
+ int scaling_factor_num[DependencyDescriptor::kMaxSpatialIds] = {1, 1, 1, 1};
+ int scaling_factor_den[DependencyDescriptor::kMaxSpatialIds] = {1, 1, 1, 1};
+ };
+ class LayerFrameConfig {
+ public:
+ // Builders/setters.
+ LayerFrameConfig& Id(int value);
+ LayerFrameConfig& Keyframe();
+ LayerFrameConfig& S(int value);
+ LayerFrameConfig& T(int value);
+ LayerFrameConfig& Reference(int buffer_id);
+ LayerFrameConfig& Update(int buffer_id);
+ LayerFrameConfig& ReferenceAndUpdate(int buffer_id);
+
+ // Getters.
+ int Id() const { return id_; }
+ bool IsKeyframe() const { return is_keyframe_; }
+ int SpatialId() const { return spatial_id_; }
+ int TemporalId() const { return temporal_id_; }
+ const absl::InlinedVector<CodecBufferUsage, kMaxEncoderBuffers>& Buffers()
+ const {
+ return buffers_;
+ }
+
+ private:
+ // Id to match configuration returned by NextFrameConfig with
+ // (possibly modified) configuration passed back via OnEncoderDone.
+ // The meaning of the id is an implementation detail of
+ // the ScalableVideoController.
+ int id_ = 0;
+
+ // Indication frame should be encoded as a key frame. In particular when
+ // `is_keyframe=true` property `CodecBufferUsage::referenced` should be
+ // ignored and treated as false.
+ bool is_keyframe_ = false;
+
+ int spatial_id_ = 0;
+ int temporal_id_ = 0;
+ // Describes how encoder which buffers encoder allowed to reference and
+ // which buffers encoder should update.
+ absl::InlinedVector<CodecBufferUsage, kMaxEncoderBuffers> buffers_;
+ };
+
+ virtual ~ScalableVideoController() = default;
+
+ // Returns video structure description for encoder to configure itself.
+ virtual StreamLayersConfig StreamConfig() const = 0;
+
+ // Returns video structure description in format compatible with
+ // dependency descriptor rtp header extension.
+ virtual FrameDependencyStructure DependencyStructure() const = 0;
+
+ // Notifies Controller with updated bitrates per layer. In particular notifies
+ // when certain layers should be disabled.
+ // Controller shouldn't produce LayerFrameConfig for disabled layers.
+ virtual void OnRatesUpdated(const VideoBitrateAllocation& bitrates) = 0;
+
+ // When `restart` is true, first `LayerFrameConfig` should have `is_keyframe`
+ // set to true.
+ // Returned vector shouldn't be empty.
+ virtual std::vector<LayerFrameConfig> NextFrameConfig(bool restart) = 0;
+
+ // Returns configuration to pass to EncoderCallback.
+ virtual GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) = 0;
+};
+
+// Below are implementation details.
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Id(int value) {
+ id_ = value;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Keyframe() {
+ is_keyframe_ = true;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::S(int value) {
+ spatial_id_ = value;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::T(int value) {
+ temporal_id_ = value;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Reference(int buffer_id) {
+ buffers_.emplace_back(buffer_id, /*referenced=*/true, /*updated=*/false);
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Update(int buffer_id) {
+ buffers_.emplace_back(buffer_id, /*referenced=*/false, /*updated=*/true);
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::ReferenceAndUpdate(int buffer_id) {
+ buffers_.emplace_back(buffer_id, /*referenced=*/true, /*updated=*/true);
+ return *this;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build
new file mode 100644
index 0000000000..8e22ed6814
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("scalable_video_controller_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc
new file mode 100644
index 0000000000..a9d530dd9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+ScalableVideoControllerNoLayering::~ScalableVideoControllerNoLayering() =
+ default;
+
+ScalableVideoController::StreamLayersConfig
+ScalableVideoControllerNoLayering::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = 1;
+ result.num_temporal_layers = 1;
+ result.uses_reference_scaling = false;
+ return result;
+}
+
+FrameDependencyStructure
+ScalableVideoControllerNoLayering::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 1;
+ structure.num_chains = 1;
+ structure.decode_target_protected_by_chain = {0};
+
+ FrameDependencyTemplate key_frame;
+ key_frame.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ key_frame.chain_diffs = {0};
+ structure.templates.push_back(key_frame);
+
+ FrameDependencyTemplate delta_frame;
+ delta_frame.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ delta_frame.chain_diffs = {1};
+ delta_frame.frame_diffs = {1};
+ structure.templates.push_back(delta_frame);
+
+ return structure;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalableVideoControllerNoLayering::NextFrameConfig(bool restart) {
+ if (!enabled_) {
+ return {};
+ }
+ std::vector<LayerFrameConfig> result(1);
+ if (restart || start_) {
+ result[0].Id(0).Keyframe().Update(0);
+ } else {
+ result[0].Id(0).ReferenceAndUpdate(0);
+ }
+ start_ = false;
+ return result;
+}
+
+GenericFrameInfo ScalableVideoControllerNoLayering::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ RTC_DCHECK_EQ(config.Id(), 0);
+ GenericFrameInfo frame_info;
+ frame_info.encoder_buffers = config.Buffers();
+ if (config.IsKeyframe()) {
+ for (auto& buffer : frame_info.encoder_buffers) {
+ buffer.referenced = false;
+ }
+ }
+ frame_info.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ frame_info.part_of_chain = {true};
+ return frame_info;
+}
+
+void ScalableVideoControllerNoLayering::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ enabled_ = bitrates.GetBitrate(0, 0) > 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h
new file mode 100644
index 0000000000..6d66b61c8b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+class ScalableVideoControllerNoLayering : public ScalableVideoController {
+ public:
+ ~ScalableVideoControllerNoLayering() override;
+
+ StreamLayersConfig StreamConfig() const override;
+ FrameDependencyStructure DependencyStructure() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ bool start_ = true;
+ bool enabled_ = true;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc
new file mode 100644
index 0000000000..b6ae0d7430
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <numeric>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kSpatialLayeringRateScalingFactor = 0.55f;
+constexpr float kTemporalLayeringRateScalingFactor = 0.55f;
+
+struct ActiveSpatialLayers {
+ size_t first = 0;
+ size_t num = 0;
+};
+
+ActiveSpatialLayers GetActiveSpatialLayers(const VideoCodec& codec,
+ size_t num_spatial_layers) {
+ ActiveSpatialLayers active;
+ for (active.first = 0; active.first < num_spatial_layers; ++active.first) {
+ if (codec.spatialLayers[active.first].active) {
+ break;
+ }
+ }
+
+ size_t last_active_layer = active.first;
+ for (; last_active_layer < num_spatial_layers; ++last_active_layer) {
+ if (!codec.spatialLayers[last_active_layer].active) {
+ break;
+ }
+ }
+ active.num = last_active_layer - active.first;
+
+ return active;
+}
+
+std::vector<DataRate> AdjustAndVerify(
+ const VideoCodec& codec,
+ size_t first_active_layer,
+ const std::vector<DataRate>& spatial_layer_rates) {
+ std::vector<DataRate> adjusted_spatial_layer_rates;
+ // Keep track of rate that couldn't be applied to the previous layer due to
+ // max bitrate constraint, try to pass it forward to the next one.
+ DataRate excess_rate = DataRate::Zero();
+ for (size_t sl_idx = 0; sl_idx < spatial_layer_rates.size(); ++sl_idx) {
+ DataRate min_rate = DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + sl_idx].minBitrate);
+ DataRate max_rate = DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + sl_idx].maxBitrate);
+
+ DataRate layer_rate = spatial_layer_rates[sl_idx] + excess_rate;
+ if (layer_rate < min_rate) {
+ // Not enough rate to reach min bitrate for desired number of layers,
+ // abort allocation.
+ if (spatial_layer_rates.size() == 1) {
+ return spatial_layer_rates;
+ }
+ return adjusted_spatial_layer_rates;
+ }
+
+ if (layer_rate <= max_rate) {
+ excess_rate = DataRate::Zero();
+ adjusted_spatial_layer_rates.push_back(layer_rate);
+ } else {
+ excess_rate = layer_rate - max_rate;
+ adjusted_spatial_layer_rates.push_back(max_rate);
+ }
+ }
+
+ return adjusted_spatial_layer_rates;
+}
+
+static std::vector<DataRate> SplitBitrate(size_t num_layers,
+ DataRate total_bitrate,
+ float rate_scaling_factor) {
+ std::vector<DataRate> bitrates;
+
+ double denominator = 0.0;
+ for (size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
+ denominator += std::pow(rate_scaling_factor, layer_idx);
+ }
+
+ double numerator = std::pow(rate_scaling_factor, num_layers - 1);
+ for (size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
+ bitrates.push_back(numerator * total_bitrate / denominator);
+ numerator /= rate_scaling_factor;
+ }
+
+ const DataRate sum =
+ std::accumulate(bitrates.begin(), bitrates.end(), DataRate::Zero());
+
+ // Keep the sum of split bitrates equal to the total bitrate by adding or
+ // subtracting bits, which were lost due to rounding, to the latest layer.
+ if (total_bitrate > sum) {
+ bitrates.back() += total_bitrate - sum;
+ } else if (total_bitrate < sum) {
+ bitrates.back() -= sum - total_bitrate;
+ }
+
+ return bitrates;
+}
+
+// Returns the minimum bitrate needed for `num_active_layers` spatial layers to
+// become active using the configuration specified by `codec`.
+DataRate FindLayerTogglingThreshold(const VideoCodec& codec,
+ size_t first_active_layer,
+ size_t num_active_layers) {
+ if (num_active_layers == 1) {
+ return DataRate::KilobitsPerSec(codec.spatialLayers[0].minBitrate);
+ }
+
+ if (codec.mode == VideoCodecMode::kRealtimeVideo) {
+ DataRate lower_bound = DataRate::Zero();
+ DataRate upper_bound = DataRate::Zero();
+ if (num_active_layers > 1) {
+ for (size_t i = 0; i < num_active_layers - 1; ++i) {
+ lower_bound += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + i].minBitrate);
+ upper_bound += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + i].maxBitrate);
+ }
+ }
+ upper_bound += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + num_active_layers - 1]
+ .minBitrate);
+
+ // Do a binary search until upper and lower bound is the highest bitrate for
+ // `num_active_layers` - 1 layers and lowest bitrate for `num_active_layers`
+ // layers respectively.
+ while (upper_bound - lower_bound > DataRate::BitsPerSec(1)) {
+ DataRate try_rate = (lower_bound + upper_bound) / 2;
+ if (AdjustAndVerify(codec, first_active_layer,
+ SplitBitrate(num_active_layers, try_rate,
+ kSpatialLayeringRateScalingFactor))
+ .size() == num_active_layers) {
+ upper_bound = try_rate;
+ } else {
+ lower_bound = try_rate;
+ }
+ }
+ return upper_bound;
+ } else {
+ DataRate toggling_rate = DataRate::Zero();
+ for (size_t i = 0; i < num_active_layers - 1; ++i) {
+ toggling_rate += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + i].targetBitrate);
+ }
+ toggling_rate += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + num_active_layers - 1]
+ .minBitrate);
+ return toggling_rate;
+ }
+}
+
+} // namespace
+
+SvcRateAllocator::NumLayers SvcRateAllocator::GetNumLayers(
+ const VideoCodec& codec) {
+ NumLayers layers;
+ if (absl::optional<ScalabilityMode> scalability_mode =
+ codec.GetScalabilityMode();
+ scalability_mode.has_value()) {
+ if (auto structure = CreateScalabilityStructure(*scalability_mode)) {
+ ScalableVideoController::StreamLayersConfig config =
+ structure->StreamConfig();
+ layers.spatial = config.num_spatial_layers;
+ layers.temporal = config.num_temporal_layers;
+ return layers;
+ }
+ }
+ if (codec.codecType == kVideoCodecVP9) {
+ layers.spatial = codec.VP9().numberOfSpatialLayers;
+ layers.temporal = codec.VP9().numberOfTemporalLayers;
+ return layers;
+ }
+ layers.spatial = 1;
+ layers.temporal = 1;
+ return layers;
+}
+
+SvcRateAllocator::SvcRateAllocator(const VideoCodec& codec)
+ : codec_(codec),
+ num_layers_(GetNumLayers(codec)),
+ experiment_settings_(StableTargetRateExperiment::ParseFromFieldTrials()),
+ cumulative_layer_start_bitrates_(GetLayerStartBitrates(codec)),
+ last_active_layer_count_(0) {
+ RTC_DCHECK_GT(num_layers_.spatial, 0);
+ RTC_DCHECK_LE(num_layers_.spatial, kMaxSpatialLayers);
+ RTC_DCHECK_GT(num_layers_.temporal, 0);
+ RTC_DCHECK_LE(num_layers_.temporal, 3);
+ for (size_t layer_idx = 0; layer_idx < num_layers_.spatial; ++layer_idx) {
+ // Verify min <= target <= max.
+ if (codec.spatialLayers[layer_idx].active) {
+ RTC_DCHECK_GT(codec.spatialLayers[layer_idx].maxBitrate, 0);
+ RTC_DCHECK_GE(codec.spatialLayers[layer_idx].maxBitrate,
+ codec.spatialLayers[layer_idx].minBitrate);
+ RTC_DCHECK_GE(codec.spatialLayers[layer_idx].targetBitrate,
+ codec.spatialLayers[layer_idx].minBitrate);
+ RTC_DCHECK_GE(codec.spatialLayers[layer_idx].maxBitrate,
+ codec.spatialLayers[layer_idx].targetBitrate);
+ }
+ }
+}
+
+VideoBitrateAllocation SvcRateAllocator::Allocate(
+ VideoBitrateAllocationParameters parameters) {
+ DataRate total_bitrate = parameters.total_bitrate;
+ if (codec_.maxBitrate != 0) {
+ total_bitrate =
+ std::min(total_bitrate, DataRate::KilobitsPerSec(codec_.maxBitrate));
+ }
+
+ if (codec_.spatialLayers[0].targetBitrate == 0) {
+ // Delegate rate distribution to encoder wrapper if bitrate thresholds
+ // are not set.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, total_bitrate.bps());
+ return bitrate_allocation;
+ }
+
+ const ActiveSpatialLayers active_layers =
+ GetActiveSpatialLayers(codec_, num_layers_.spatial);
+ size_t num_spatial_layers = active_layers.num;
+
+ if (num_spatial_layers == 0) {
+ return VideoBitrateAllocation(); // All layers are deactivated.
+ }
+
+ // Figure out how many spatial layers should be active.
+ if (experiment_settings_.IsEnabled() &&
+ parameters.stable_bitrate > DataRate::Zero()) {
+ double hysteresis_factor;
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ hysteresis_factor = experiment_settings_.GetScreenshareHysteresisFactor();
+ } else {
+ hysteresis_factor = experiment_settings_.GetVideoHysteresisFactor();
+ }
+
+ DataRate stable_rate =
+ std::min(parameters.total_bitrate, parameters.stable_bitrate);
+ // First check if bitrate has grown large enough to enable new layers.
+ size_t num_enabled_with_hysteresis =
+ FindNumEnabledLayers(stable_rate / hysteresis_factor);
+ if (num_enabled_with_hysteresis >= last_active_layer_count_) {
+ num_spatial_layers = num_enabled_with_hysteresis;
+ } else {
+ // We could not enable new layers, check if any should be disabled.
+ num_spatial_layers =
+ std::min(last_active_layer_count_, FindNumEnabledLayers(stable_rate));
+ }
+ } else {
+ num_spatial_layers = FindNumEnabledLayers(parameters.total_bitrate);
+ }
+ last_active_layer_count_ = num_spatial_layers;
+
+ VideoBitrateAllocation allocation;
+ if (codec_.mode == VideoCodecMode::kRealtimeVideo) {
+ allocation = GetAllocationNormalVideo(total_bitrate, active_layers.first,
+ num_spatial_layers);
+ } else {
+ allocation = GetAllocationScreenSharing(total_bitrate, active_layers.first,
+ num_spatial_layers);
+ }
+ allocation.set_bw_limited(num_spatial_layers < active_layers.num);
+ return allocation;
+}
+
+VideoBitrateAllocation SvcRateAllocator::GetAllocationNormalVideo(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const {
+ std::vector<DataRate> spatial_layer_rates;
+ if (num_spatial_layers == 0) {
+ // Not enough rate for even the base layer. Force allocation at the total
+ // bitrate anyway.
+ num_spatial_layers = 1;
+ spatial_layer_rates.push_back(total_bitrate);
+ } else {
+ spatial_layer_rates =
+ AdjustAndVerify(codec_, first_active_layer,
+ SplitBitrate(num_spatial_layers, total_bitrate,
+ kSpatialLayeringRateScalingFactor));
+ RTC_DCHECK_EQ(spatial_layer_rates.size(), num_spatial_layers);
+ }
+
+ VideoBitrateAllocation bitrate_allocation;
+
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ std::vector<DataRate> temporal_layer_rates =
+ SplitBitrate(num_layers_.temporal, spatial_layer_rates[sl_idx],
+ kTemporalLayeringRateScalingFactor);
+
+ // Distribute rate across temporal layers. Allocate more bits to lower
+ // layers since they are used for prediction of higher layers and their
+ // references are far apart.
+ if (num_layers_.temporal == 1) {
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0,
+ temporal_layer_rates[0].bps());
+ } else if (num_layers_.temporal == 2) {
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0,
+ temporal_layer_rates[1].bps());
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 1,
+ temporal_layer_rates[0].bps());
+ } else {
+ RTC_CHECK_EQ(num_layers_.temporal, 3);
+ // In case of three temporal layers the high layer has two frames and the
+ // middle layer has one frame within GOP (in between two consecutive low
+ // layer frames). Thus high layer requires more bits (comparing pure
+ // bitrate of layer, excluding bitrate of base layers) to keep quality on
+ // par with lower layers.
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0,
+ temporal_layer_rates[2].bps());
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 1,
+ temporal_layer_rates[0].bps());
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 2,
+ temporal_layer_rates[1].bps());
+ }
+ }
+
+ return bitrate_allocation;
+}
+
+// Bit-rate is allocated in such a way, that the highest enabled layer will have
+// between min and max bitrate, and all others will have exactly target
+// bit-rate allocated.
+VideoBitrateAllocation SvcRateAllocator::GetAllocationScreenSharing(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const {
+ VideoBitrateAllocation bitrate_allocation;
+
+ if (num_spatial_layers == 0 ||
+ total_bitrate <
+ DataRate::KilobitsPerSec(
+ codec_.spatialLayers[first_active_layer].minBitrate)) {
+ // Always enable at least one layer.
+ bitrate_allocation.SetBitrate(first_active_layer, 0, total_bitrate.bps());
+ return bitrate_allocation;
+ }
+
+ DataRate allocated_rate = DataRate::Zero();
+ DataRate top_layer_rate = DataRate::Zero();
+ size_t sl_idx;
+ for (sl_idx = first_active_layer;
+ sl_idx < first_active_layer + num_spatial_layers; ++sl_idx) {
+ const DataRate min_rate =
+ DataRate::KilobitsPerSec(codec_.spatialLayers[sl_idx].minBitrate);
+ const DataRate target_rate =
+ DataRate::KilobitsPerSec(codec_.spatialLayers[sl_idx].targetBitrate);
+
+ if (allocated_rate + min_rate > total_bitrate) {
+ // Use stable rate to determine if layer should be enabled.
+ break;
+ }
+
+ top_layer_rate = std::min(target_rate, total_bitrate - allocated_rate);
+ bitrate_allocation.SetBitrate(sl_idx, 0, top_layer_rate.bps());
+ allocated_rate += top_layer_rate;
+ }
+
+ if (sl_idx > 0 && total_bitrate - allocated_rate > DataRate::Zero()) {
+ // Add leftover to the last allocated layer.
+ top_layer_rate = std::min(
+ top_layer_rate + (total_bitrate - allocated_rate),
+ DataRate::KilobitsPerSec(codec_.spatialLayers[sl_idx - 1].maxBitrate));
+ bitrate_allocation.SetBitrate(sl_idx - 1, 0, top_layer_rate.bps());
+ }
+
+ return bitrate_allocation;
+}
+
+size_t SvcRateAllocator::FindNumEnabledLayers(DataRate target_rate) const {
+ if (cumulative_layer_start_bitrates_.empty()) {
+ return 0;
+ }
+
+ size_t num_enabled_layers = 0;
+ for (DataRate start_rate : cumulative_layer_start_bitrates_) {
+ // First layer is always enabled.
+ if (num_enabled_layers == 0 || start_rate <= target_rate) {
+ ++num_enabled_layers;
+ } else {
+ break;
+ }
+ }
+
+ return num_enabled_layers;
+}
+
+DataRate SvcRateAllocator::GetMaxBitrate(const VideoCodec& codec) {
+ const NumLayers num_layers = GetNumLayers(codec);
+ const ActiveSpatialLayers active_layers =
+ GetActiveSpatialLayers(codec, num_layers.spatial);
+
+ DataRate max_bitrate = DataRate::Zero();
+ for (size_t sl_idx = 0; sl_idx < active_layers.num; ++sl_idx) {
+ max_bitrate += DataRate::KilobitsPerSec(
+ codec.spatialLayers[active_layers.first + sl_idx].maxBitrate);
+ }
+
+ if (codec.maxBitrate != 0) {
+ max_bitrate =
+ std::min(max_bitrate, DataRate::KilobitsPerSec(codec.maxBitrate));
+ }
+
+ return max_bitrate;
+}
+
+DataRate SvcRateAllocator::GetPaddingBitrate(const VideoCodec& codec) {
+ auto start_bitrate = GetLayerStartBitrates(codec);
+ if (start_bitrate.empty()) {
+ return DataRate::Zero(); // All layers are deactivated.
+ }
+
+ return start_bitrate.back();
+}
+
+absl::InlinedVector<DataRate, kMaxSpatialLayers>
+SvcRateAllocator::GetLayerStartBitrates(const VideoCodec& codec) {
+ absl::InlinedVector<DataRate, kMaxSpatialLayers> start_bitrates;
+ const NumLayers num_layers = GetNumLayers(codec);
+ const ActiveSpatialLayers active_layers =
+ GetActiveSpatialLayers(codec, num_layers.spatial);
+ DataRate last_rate = DataRate::Zero();
+ for (size_t i = 1; i <= active_layers.num; ++i) {
+ DataRate layer_toggling_rate =
+ FindLayerTogglingThreshold(codec, active_layers.first, i);
+ start_bitrates.push_back(layer_toggling_rate);
+ RTC_DCHECK_LE(last_rate, layer_toggling_rate);
+ last_rate = layer_toggling_rate;
+ }
+ return start_bitrates;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h
new file mode 100644
index 0000000000..bd75fca284
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_
+#define MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/container/inlined_vector.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/experiments/stable_target_rate_experiment.h"
+
+namespace webrtc {
+
+class SvcRateAllocator : public VideoBitrateAllocator {
+ public:
+ explicit SvcRateAllocator(const VideoCodec& codec);
+
+ VideoBitrateAllocation Allocate(
+ VideoBitrateAllocationParameters parameters) override;
+
+ static DataRate GetMaxBitrate(const VideoCodec& codec);
+ static DataRate GetPaddingBitrate(const VideoCodec& codec);
+ static absl::InlinedVector<DataRate, kMaxSpatialLayers> GetLayerStartBitrates(
+ const VideoCodec& codec);
+
+ private:
+ struct NumLayers {
+ size_t spatial = 1;
+ size_t temporal = 1;
+ };
+
+ static NumLayers GetNumLayers(const VideoCodec& codec);
+ VideoBitrateAllocation GetAllocationNormalVideo(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const;
+
+ VideoBitrateAllocation GetAllocationScreenSharing(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const;
+
+ // Returns the number of layers that are active and have enough bitrate to
+ // actually be enabled.
+ size_t FindNumEnabledLayers(DataRate target_rate) const;
+
+ const VideoCodec codec_;
+ const NumLayers num_layers_;
+ const StableTargetRateExperiment experiment_settings_;
+ const absl::InlinedVector<DataRate, kMaxSpatialLayers>
+ cumulative_layer_start_bitrates_;
+ size_t last_active_layer_count_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build
new file mode 100644
index 0000000000..cb5e4a4bf7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("svc_rate_allocator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc
new file mode 100644
index 0000000000..b3a365d722
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "rtc_base/checks.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+static VideoCodec Configure(size_t width,
+ size_t height,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool is_screen_sharing) {
+ VideoCodec codec;
+ codec.width = width;
+ codec.height = height;
+ codec.codecType = kVideoCodecVP9;
+ codec.mode = is_screen_sharing ? VideoCodecMode::kScreensharing
+ : VideoCodecMode::kRealtimeVideo;
+
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(width, height, 30, /*first_active_layer=*/0,
+ num_spatial_layers, num_temporal_layers, is_screen_sharing);
+ RTC_CHECK_LE(spatial_layers.size(), kMaxSpatialLayers);
+
+ codec.VP9()->numberOfSpatialLayers =
+ std::min<unsigned char>(num_spatial_layers, spatial_layers.size());
+ codec.VP9()->numberOfTemporalLayers = std::min<unsigned char>(
+ num_temporal_layers, spatial_layers.back().numberOfTemporalLayers);
+
+ for (size_t sl_idx = 0; sl_idx < spatial_layers.size(); ++sl_idx) {
+ codec.spatialLayers[sl_idx] = spatial_layers[sl_idx];
+ }
+
+ return codec;
+}
+
+} // namespace
+
+TEST(SvcRateAllocatorTest, SingleLayerFor320x180Input) {
+ VideoCodec codec = Configure(320, 180, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1000 * 1000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0u);
+}
+
+TEST(SvcRateAllocatorTest, TwoLayersFor640x360Input) {
+ VideoCodec codec = Configure(640, 360, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1000 * 1000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, ThreeLayersFor1280x720Input) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1000 * 1000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest,
+ BaseLayerNonZeroBitrateEvenIfTotalIfLessThanMinimum) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(layers[0].minBitrate * 1000 / 2, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_LT(allocation.GetSpatialLayerSum(0), layers[0].minBitrate * 1000);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0u);
+}
+
+TEST(SvcRateAllocatorTest, Disable640x360Layer) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ size_t min_bitrate_for_640x360_layer_kbps =
+ layers[0].minBitrate + layers[1].minBitrate;
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(
+ min_bitrate_for_640x360_layer_kbps * 1000 - 1, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0u);
+}
+
+TEST(SvcRateAllocatorTest, Disable1280x720Layer) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ size_t min_bitrate_for_1280x720_layer_kbps =
+ layers[0].minBitrate + layers[1].minBitrate + layers[2].minBitrate;
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(
+ min_bitrate_for_1280x720_layer_kbps * 1000 - 1, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, BitrateIsCapped) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ const uint32_t link_mbps = 100;
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(link_mbps * 1000000, 30));
+
+ EXPECT_EQ(allocation.get_sum_kbps(),
+ layers[0].maxBitrate + layers[1].maxBitrate + layers[2].maxBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0) / 1000, layers[0].maxBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1) / 1000, layers[1].maxBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2) / 1000, layers[2].maxBitrate);
+}
+
+TEST(SvcRateAllocatorTest, MinBitrateToGetQualityLayer) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, true);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ EXPECT_LE(codec.VP9()->numberOfSpatialLayers, 3U);
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(layers[0].minBitrate * 1000, 30));
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0) / 1000, layers[0].minBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0UL);
+
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ (layers[0].targetBitrate + layers[1].minBitrate) * 1000, 30));
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0) / 1000, layers[0].targetBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1) / 1000, layers[1].minBitrate);
+}
+
+TEST(SvcRateAllocatorTest, DeactivateHigherLayers) {
+ for (int deactivated_idx = 2; deactivated_idx >= 0; --deactivated_idx) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ EXPECT_LE(codec.VP9()->numberOfSpatialLayers, 3U);
+
+ for (int i = deactivated_idx; i < 3; ++i)
+ codec.spatialLayers[i].active = false;
+
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(10 * 1000 * 1000, 30));
+
+ // Ensure layers spatial_idx < deactivated_idx are activated.
+ for (int spatial_idx = 0; spatial_idx < deactivated_idx; ++spatial_idx) {
+ EXPECT_GT(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+
+ // Ensure layers spatial_idx >= deactivated_idx are deactivated.
+ for (int spatial_idx = deactivated_idx; spatial_idx < 3; ++spatial_idx) {
+ EXPECT_EQ(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+ }
+}
+
+TEST(SvcRateAllocatorTest, DeactivateLowerLayers) {
+ for (int deactivated_idx = 0; deactivated_idx < 3; ++deactivated_idx) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ EXPECT_LE(codec.VP9()->numberOfSpatialLayers, 3U);
+
+ for (int i = deactivated_idx; i >= 0; --i)
+ codec.spatialLayers[i].active = false;
+
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(10 * 1000 * 1000, 30));
+
+ // Ensure layers spatial_idx <= deactivated_idx are deactivated.
+ for (int spatial_idx = 0; spatial_idx <= deactivated_idx; ++spatial_idx) {
+ EXPECT_EQ(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+
+ // Ensure layers spatial_idx > deactivated_idx are activated.
+ for (int spatial_idx = deactivated_idx + 1; spatial_idx < 3;
+ ++spatial_idx) {
+ EXPECT_GT(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+ }
+}
+
+TEST(SvcRateAllocatorTest, SignalsBwLimited) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ // Rough estimate calculated by hand.
+ uint32_t min_to_enable_all = 900000;
+
+ EXPECT_TRUE(
+ allocator
+ .Allocate(VideoBitrateAllocationParameters(min_to_enable_all / 2, 30))
+ .is_bw_limited());
+
+ EXPECT_FALSE(
+ allocator
+ .Allocate(VideoBitrateAllocationParameters(min_to_enable_all, 30))
+ .is_bw_limited());
+}
+
+TEST(SvcRateAllocatorTest, NoPaddingIfAllLayersAreDeactivated) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ EXPECT_EQ(codec.VP9()->numberOfSpatialLayers, 3U);
+ // Deactivation of base layer deactivates all layers.
+ codec.spatialLayers[0].active = false;
+ codec.spatialLayers[1].active = false;
+ codec.spatialLayers[2].active = false;
+ DataRate padding_rate = SvcRateAllocator::GetPaddingBitrate(codec);
+ EXPECT_EQ(padding_rate, DataRate::Zero());
+}
+
+TEST(SvcRateAllocatorTest, FindLayerTogglingThreshold) {
+ // Let's unit test a utility method of the unit test...
+
+ // Predetermined constants indicating the min bitrate needed for two and three
+ // layers to be enabled respectively, using the config from Configure() with
+ // 1280x720 resolution and three spatial layers.
+ const DataRate kTwoLayerMinRate = DataRate::BitsPerSec(299150);
+ const DataRate kThreeLayerMinRate = DataRate::BitsPerSec(891052);
+
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ absl::InlinedVector<DataRate, kMaxSpatialLayers> layer_start_bitrates =
+ SvcRateAllocator::GetLayerStartBitrates(codec);
+ ASSERT_EQ(layer_start_bitrates.size(), 3u);
+ EXPECT_EQ(layer_start_bitrates[1], kTwoLayerMinRate);
+ EXPECT_EQ(layer_start_bitrates[2], kThreeLayerMinRate);
+}
+
+TEST(SvcRateAllocatorTest, SupportsAv1) {
+ VideoCodec codec;
+ codec.width = 640;
+ codec.height = 360;
+ codec.codecType = kVideoCodecAV1;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+ codec.spatialLayers[0].active = true;
+ codec.spatialLayers[0].minBitrate = 30;
+ codec.spatialLayers[0].targetBitrate = 51;
+ codec.spatialLayers[0].maxBitrate = 73;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[1].minBitrate = 49;
+ codec.spatialLayers[1].targetBitrate = 64;
+ codec.spatialLayers[1].maxBitrate = 97;
+ codec.spatialLayers[2].active = true;
+ codec.spatialLayers[2].minBitrate = 193;
+ codec.spatialLayers[2].targetBitrate = 305;
+ codec.spatialLayers[2].maxBitrate = 418;
+
+ SvcRateAllocator allocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, SupportsAv1WithSkippedLayer) {
+ VideoCodec codec;
+ codec.width = 640;
+ codec.height = 360;
+ codec.codecType = kVideoCodecAV1;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+ codec.spatialLayers[0].active = false;
+ codec.spatialLayers[0].minBitrate = 30;
+ codec.spatialLayers[0].targetBitrate = 51;
+ codec.spatialLayers[0].maxBitrate = 73;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[1].minBitrate = 49;
+ codec.spatialLayers[1].targetBitrate = 64;
+ codec.spatialLayers[1].maxBitrate = 97;
+ codec.spatialLayers[2].active = true;
+ codec.spatialLayers[2].minBitrate = 193;
+ codec.spatialLayers[2].targetBitrate = 305;
+ codec.spatialLayers[2].maxBitrate = 418;
+
+ SvcRateAllocator allocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30));
+
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, UsesScalabilityModeToGetNumberOfLayers) {
+ VideoCodec codec;
+ codec.width = 640;
+ codec.height = 360;
+ codec.codecType = kVideoCodecAV1;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T2);
+ codec.spatialLayers[0].active = true;
+ codec.spatialLayers[0].minBitrate = 30;
+ codec.spatialLayers[0].targetBitrate = 51;
+ codec.spatialLayers[0].maxBitrate = 73;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[1].minBitrate = 49;
+ codec.spatialLayers[1].targetBitrate = 64;
+ codec.spatialLayers[1].maxBitrate = 97;
+ codec.spatialLayers[2].active = true;
+ codec.spatialLayers[2].minBitrate = 193;
+ codec.spatialLayers[2].targetBitrate = 305;
+ codec.spatialLayers[2].maxBitrate = 418;
+
+ SvcRateAllocator allocator(codec);
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30));
+
+ // Expect bitrates for 2 temporal layers.
+ EXPECT_TRUE(allocation.HasBitrate(1, /*temporal_index=*/0));
+ EXPECT_TRUE(allocation.HasBitrate(1, /*temporal_index=*/1));
+ EXPECT_FALSE(allocation.HasBitrate(1, /*temporal_index=*/2));
+
+ // expect codec.spatialLayers[2].active is ignored because scability mode uses
+ // just 2 spatial layers.
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+class SvcRateAllocatorTestParametrizedContentType
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<bool> {
+ public:
+ SvcRateAllocatorTestParametrizedContentType()
+ : is_screen_sharing_(GetParam()) {}
+
+ const bool is_screen_sharing_;
+};
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType, MaxBitrate) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ EXPECT_EQ(SvcRateAllocator::GetMaxBitrate(codec),
+ DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate +
+ codec.spatialLayers[1].maxBitrate +
+ codec.spatialLayers[2].maxBitrate));
+
+ // Deactivate middle layer. This causes deactivation of top layer as well.
+ codec.spatialLayers[1].active = false;
+ EXPECT_EQ(SvcRateAllocator::GetMaxBitrate(codec),
+ DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate));
+}
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType, PaddingBitrate) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ DataRate padding_bitrate = SvcRateAllocator::GetPaddingBitrate(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(padding_bitrate, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0UL);
+
+ // Allocate 90% of padding bitrate. Top layer should be disabled.
+ allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(9 * padding_bitrate / 10, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0UL);
+
+ // Deactivate top layer.
+ codec.spatialLayers[2].active = false;
+
+ padding_bitrate = SvcRateAllocator::GetPaddingBitrate(codec);
+ allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(padding_bitrate, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0UL);
+
+ allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(9 * padding_bitrate / 10, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0UL);
+
+ // Deactivate all layers.
+ codec.spatialLayers[0].active = false;
+ codec.spatialLayers[1].active = false;
+ codec.spatialLayers[2].active = false;
+
+ padding_bitrate = SvcRateAllocator::GetPaddingBitrate(codec);
+ // No padding expected.
+ EXPECT_EQ(DataRate::Zero(), padding_bitrate);
+}
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType, StableBitrate) {
+ ScopedFieldTrials field_trial(
+ "WebRTC-StableTargetRate/enabled:true,video_hysteresis_factor:1.0,"
+ "screenshare_hysteresis_factor:1.0/");
+
+ const VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ const auto start_rates = SvcRateAllocator::GetLayerStartBitrates(codec);
+ const DataRate min_rate_two_layers = start_rates[1];
+ const DataRate min_rate_three_layers = start_rates[2];
+
+ const DataRate max_rate_one_layer =
+ DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate);
+ const DataRate max_rate_two_layers =
+ is_screen_sharing_
+ ? DataRate::KilobitsPerSec(codec.spatialLayers[0].targetBitrate +
+ codec.spatialLayers[1].maxBitrate)
+ : DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate +
+ codec.spatialLayers[1].maxBitrate);
+
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ // Two layers, stable and target equal.
+ auto allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_two_layers,
+ /*stable_bitrate=*/min_rate_two_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_EQ(allocation.get_sum_bps(), min_rate_two_layers.bps());
+
+ // Two layers, stable bitrate too low for two layers.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_two_layers,
+ /*stable_bitrate=*/min_rate_two_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_EQ(DataRate::BitsPerSec(allocation.get_sum_bps()),
+ std::min(min_rate_two_layers - DataRate::BitsPerSec(1),
+ max_rate_one_layer));
+
+ // Three layers, stable and target equal.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_three_layers,
+ /*stable_bitrate=*/min_rate_three_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(2));
+ EXPECT_EQ(allocation.get_sum_bps(), min_rate_three_layers.bps());
+
+ // Three layers, stable bitrate too low for three layers.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_three_layers,
+ /*stable_bitrate=*/min_rate_three_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+ EXPECT_EQ(DataRate::BitsPerSec(allocation.get_sum_bps()),
+ std::min(min_rate_three_layers - DataRate::BitsPerSec(1),
+ max_rate_two_layers));
+}
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType,
+ StableBitrateWithHysteresis) {
+ const VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ const auto start_rates = SvcRateAllocator::GetLayerStartBitrates(codec);
+ const DataRate min_rate_single_layer = start_rates[0];
+ const DataRate min_rate_two_layers = start_rates[1];
+ const DataRate min_rate_three_layers = start_rates[2];
+
+ ScopedFieldTrials field_trial(
+ "WebRTC-StableTargetRate/enabled:true,video_hysteresis_factor:1.1,"
+ "screenshare_hysteresis_factor:1.1/");
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+ // Always use max bitrate as target, verify only stable is used for layer
+ // count selection.
+ const DataRate max_bitrate = allocator.GetMaxBitrate(codec);
+
+ // Start with a single layer.
+ auto allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_single_layer, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Min bitrate not enough to enable second layer due to 10% hysteresis.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Add hysteresis, second layer should turn on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers * 1.1, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Remove hysteresis, second layer should stay on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Going below min for two layers, second layer should turn off again.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Min bitrate not enough to enable third layer due to 10% hysteresis.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Add hysteresis, third layer should turn on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers * 1.1, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(2));
+
+ // Remove hysteresis, third layer should stay on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(2));
+
+ // Going below min for three layers, third layer should turn off again.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+}
+
+INSTANTIATE_TEST_SUITE_P(_,
+ SvcRateAllocatorTestParametrizedContentType,
+ ::testing::Bool());
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/test/stream_generator.cc b/third_party/libwebrtc/modules/video_coding/test/stream_generator.cc
new file mode 100644
index 0000000000..98a0cf1cdc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/test/stream_generator.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/test/stream_generator.h"
+
+#include <string.h>
+
+#include <list>
+
+#include "modules/video_coding/packet.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+StreamGenerator::StreamGenerator(uint16_t start_seq_num, int64_t current_time)
+ : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {}
+
+void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
+ packets_.clear();
+ sequence_number_ = start_seq_num;
+ start_time_ = current_time;
+ memset(packet_buffer_, 0, sizeof(packet_buffer_));
+}
+
+void StreamGenerator::GenerateFrame(VideoFrameType type,
+ int num_media_packets,
+ int num_empty_packets,
+ int64_t time_ms) {
+ uint32_t timestamp = 90 * (time_ms - start_time_);
+ for (int i = 0; i < num_media_packets; ++i) {
+ const int packet_size =
+ (kFrameSize + num_media_packets / 2) / num_media_packets;
+ bool marker_bit = (i == num_media_packets - 1);
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, packet_size,
+ (i == 0), marker_bit, type));
+ ++sequence_number_;
+ }
+ for (int i = 0; i < num_empty_packets; ++i) {
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
+ false, VideoFrameType::kEmptyFrame));
+ ++sequence_number_;
+ }
+}
+
+VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number,
+ uint32_t timestamp,
+ unsigned int size,
+ bool first_packet,
+ bool marker_bit,
+ VideoFrameType type) {
+ EXPECT_LT(size, kMaxPacketSize);
+ VCMPacket packet;
+ packet.seqNum = sequence_number;
+ packet.timestamp = timestamp;
+ packet.video_header.frame_type = type;
+ packet.video_header.is_first_packet_in_frame = first_packet;
+ packet.markerBit = marker_bit;
+ packet.sizeBytes = size;
+ packet.dataPtr = packet_buffer_;
+ if (packet.is_first_packet_in_frame())
+ packet.completeNALU = kNaluStart;
+ else if (packet.markerBit)
+ packet.completeNALU = kNaluEnd;
+ else
+ packet.completeNALU = kNaluIncomplete;
+ return packet;
+}
+
+bool StreamGenerator::PopPacket(VCMPacket* packet, int index) {
+ std::list<VCMPacket>::iterator it = GetPacketIterator(index);
+ if (it == packets_.end())
+ return false;
+ if (packet)
+ *packet = (*it);
+ packets_.erase(it);
+ return true;
+}
+
+bool StreamGenerator::GetPacket(VCMPacket* packet, int index) {
+ std::list<VCMPacket>::iterator it = GetPacketIterator(index);
+ if (it == packets_.end())
+ return false;
+ if (packet)
+ *packet = (*it);
+ return true;
+}
+
+bool StreamGenerator::NextPacket(VCMPacket* packet) {
+ if (packets_.empty())
+ return false;
+ if (packet != NULL)
+ *packet = packets_.front();
+ packets_.pop_front();
+ return true;
+}
+
+void StreamGenerator::DropLastPacket() {
+ packets_.pop_back();
+}
+
+uint16_t StreamGenerator::NextSequenceNumber() const {
+ if (packets_.empty())
+ return sequence_number_;
+ return packets_.front().seqNum;
+}
+
+int StreamGenerator::PacketsRemaining() const {
+ return packets_.size();
+}
+
+std::list<VCMPacket>::iterator StreamGenerator::GetPacketIterator(int index) {
+ std::list<VCMPacket>::iterator it = packets_.begin();
+ for (int i = 0; i < index; ++i) {
+ ++it;
+ if (it == packets_.end())
+ break;
+ }
+ return it;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/test/stream_generator.h b/third_party/libwebrtc/modules/video_coding/test/stream_generator.h
new file mode 100644
index 0000000000..ddb23ebb76
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/test/stream_generator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
+#define MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
+
+#include <stdint.h>
+
+#include <list>
+
+#include "modules/video_coding/packet.h"
+
+namespace webrtc {
+
+const unsigned int kDefaultBitrateKbps = 1000;
+const unsigned int kDefaultFrameRate = 25;
+const unsigned int kMaxPacketSize = 1500;
+const unsigned int kFrameSize =
+ (kDefaultBitrateKbps + kDefaultFrameRate * 4) / (kDefaultFrameRate * 8);
+const int kDefaultFramePeriodMs = 1000 / kDefaultFrameRate;
+
+class StreamGenerator {
+ public:
+ StreamGenerator(uint16_t start_seq_num, int64_t current_time);
+
+ StreamGenerator(const StreamGenerator&) = delete;
+ StreamGenerator& operator=(const StreamGenerator&) = delete;
+
+ void Init(uint16_t start_seq_num, int64_t current_time);
+
+ // `time_ms` denotes the timestamp you want to put on the frame, and the unit
+ // is millisecond. GenerateFrame will translate `time_ms` into a 90kHz
+ // timestamp and put it on the frame.
+ void GenerateFrame(VideoFrameType type,
+ int num_media_packets,
+ int num_empty_packets,
+ int64_t time_ms);
+
+ bool PopPacket(VCMPacket* packet, int index);
+ void DropLastPacket();
+
+ bool GetPacket(VCMPacket* packet, int index);
+
+ bool NextPacket(VCMPacket* packet);
+
+ uint16_t NextSequenceNumber() const;
+
+ int PacketsRemaining() const;
+
+ private:
+ VCMPacket GeneratePacket(uint16_t sequence_number,
+ uint32_t timestamp,
+ unsigned int size,
+ bool first_packet,
+ bool marker_bit,
+ VideoFrameType type);
+
+ std::list<VCMPacket>::iterator GetPacketIterator(int index);
+
+ std::list<VCMPacket> packets_;
+ uint16_t sequence_number_;
+ int64_t start_time_;
+ uint8_t packet_buffer_[kMaxPacketSize];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/BUILD.gn b/third_party/libwebrtc/modules/video_coding/timing/BUILD.gn
new file mode 100644
index 0000000000..b130f92154
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/BUILD.gn
@@ -0,0 +1,132 @@
+# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("codec_timer") {
+ sources = [
+ "codec_timer.cc",
+ "codec_timer.h",
+ ]
+ deps = [ "../../../rtc_base:rtc_numerics" ]
+}
+
+rtc_library("inter_frame_delay") {
+ sources = [
+ "inter_frame_delay.cc",
+ "inter_frame_delay.h",
+ ]
+ deps = [
+ "../..:module_api_public",
+ "../../../api/units:frequency",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("frame_delay_delta_kalman_filter") {
+ sources = [
+ "frame_delay_delta_kalman_filter.cc",
+ "frame_delay_delta_kalman_filter.h",
+ ]
+ deps = [
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ ]
+ visibility = [
+ ":jitter_estimator",
+ ":timing_unittests",
+ ]
+}
+
+rtc_library("jitter_estimator") {
+ sources = [
+ "jitter_estimator.cc",
+ "jitter_estimator.h",
+ ]
+ deps = [
+ ":frame_delay_delta_kalman_filter",
+ ":rtt_filter",
+ "../../../api:field_trials_view",
+ "../../../api/units:data_size",
+ "../../../api/units:frequency",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base",
+ "../../../rtc_base:safe_conversions",
+ "../../../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("rtt_filter") {
+ sources = [
+ "rtt_filter.cc",
+ "rtt_filter.h",
+ ]
+ deps = [ "../../../api/units:time_delta" ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ ]
+}
+
+rtc_library("timing_module") {
+ sources = [
+ "timing.cc",
+ "timing.h",
+ ]
+ deps = [
+ ":codec_timer",
+ "../../../api:field_trials_view",
+ "../../../api/units:time_delta",
+ "../../../api/video:video_frame",
+ "../../../api/video:video_rtp_headers",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:rtc_numerics",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../rtc_base/synchronization:mutex",
+ "../../../rtc_base/time:timestamp_extrapolator",
+ "../../../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+if (!build_with_mozilla) {
+rtc_library("timing_unittests") {
+ testonly = true
+ sources = [
+ "frame_delay_delta_kalman_filter_unittest.cc",
+ "inter_frame_delay_unittest.cc",
+ "jitter_estimator_unittest.cc",
+ "rtt_filter_unittest.cc",
+ "timing_unittest.cc",
+ ]
+ deps = [
+ ":frame_delay_delta_kalman_filter",
+ ":inter_frame_delay",
+ ":jitter_estimator",
+ ":rtt_filter",
+ ":timing_module",
+ "../../../api:array_view",
+ "../../../api/units:data_size",
+ "../../../api/units:frequency",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:histogram_percentile_counter",
+ "../../../rtc_base:stringutils",
+ "../../../rtc_base:timeutils",
+ "../../../system_wrappers:system_wrappers",
+ "../../../test:scoped_key_value_config",
+ "../../../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+}
diff --git a/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc
new file mode 100644
index 0000000000..f57d42d40a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/codec_timer.h"
+
+#include <cstdint>
+
+namespace webrtc {
+
+namespace {
+
+// The first kIgnoredSampleCount samples will be ignored.
+const int kIgnoredSampleCount = 5;
+// Return the `kPercentile` value in RequiredDecodeTimeMs().
+const float kPercentile = 0.95f;
+// The window size in ms.
+const int64_t kTimeLimitMs = 10000;
+
+} // anonymous namespace
+
+CodecTimer::CodecTimer() : ignored_sample_count_(0), filter_(kPercentile) {}
+CodecTimer::~CodecTimer() = default;
+
+void CodecTimer::AddTiming(int64_t decode_time_ms, int64_t now_ms) {
+ // Ignore the first `kIgnoredSampleCount` samples.
+ if (ignored_sample_count_ < kIgnoredSampleCount) {
+ ++ignored_sample_count_;
+ return;
+ }
+
+ // Insert new decode time value.
+ filter_.Insert(decode_time_ms);
+ history_.emplace(decode_time_ms, now_ms);
+
+ // Pop old decode time values.
+ while (!history_.empty() &&
+ now_ms - history_.front().sample_time_ms > kTimeLimitMs) {
+ filter_.Erase(history_.front().decode_time_ms);
+ history_.pop();
+ }
+}
+
+// Get the 95th percentile observed decode time within a time window.
+int64_t CodecTimer::RequiredDecodeTimeMs() const {
+ return filter_.GetPercentileValue();
+}
+
+CodecTimer::Sample::Sample(int64_t decode_time_ms, int64_t sample_time_ms)
+ : decode_time_ms(decode_time_ms), sample_time_ms(sample_time_ms) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/codec_timer.h b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.h
new file mode 100644
index 0000000000..9f12d82e98
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_CODEC_TIMER_H_
+#define MODULES_VIDEO_CODING_TIMING_CODEC_TIMER_H_
+
+#include <queue>
+
+#include "rtc_base/numerics/percentile_filter.h"
+
+namespace webrtc {
+
+class CodecTimer {
+ public:
+ CodecTimer();
+ ~CodecTimer();
+
+ // Add a new decode time to the filter.
+ void AddTiming(int64_t new_decode_time_ms, int64_t now_ms);
+
+ // Get the required decode time in ms. It is the 95th percentile observed
+ // decode time within a time window.
+ int64_t RequiredDecodeTimeMs() const;
+
+ private:
+ struct Sample {
+ Sample(int64_t decode_time_ms, int64_t sample_time_ms);
+ int64_t decode_time_ms;
+ int64_t sample_time_ms;
+ };
+
+ // The number of samples ignored so far.
+ int ignored_sample_count_;
+ // Queue with history of latest decode time values.
+ std::queue<Sample> history_;
+ // `filter_` contains the same values as `history_`, but in a data structure
+ // that allows efficient retrieval of the percentile value.
+ PercentileFilter<int64_t> filter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_CODEC_TIMER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build
new file mode 100644
index 0000000000..fe230f262d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("codec_timer_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.cc b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.cc
new file mode 100644
index 0000000000..69af4e25e4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/frame_delay_delta_kalman_filter.h"
+
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+namespace {
+// TODO(brandtr): The value below corresponds to 8 Gbps. Is that reasonable?
+constexpr double kMaxBandwidth = 0.000001; // Unit: [1 / bytes per ms].
+}
+
+FrameDelayDeltaKalmanFilter::FrameDelayDeltaKalmanFilter() {
+ // TODO(brandtr): Is there a factor 1000 missing here?
+ estimate_[0] = 1 / (512e3 / 8); // Unit: [1 / bytes per ms]
+ estimate_[1] = 0; // Unit: [ms]
+
+ // Initial estimate covariance.
+ estimate_cov_[0][0] = 1e-4; // Unit: [(1 / bytes per ms)^2]
+ estimate_cov_[1][1] = 1e2; // Unit: [ms^2]
+ estimate_cov_[0][1] = estimate_cov_[1][0] = 0;
+
+ // Process noise covariance.
+ process_noise_cov_diag_[0] = 2.5e-10; // Unit: [(1 / bytes per ms)^2]
+ process_noise_cov_diag_[1] = 1e-10; // Unit: [ms^2]
+}
+
+void FrameDelayDeltaKalmanFilter::PredictAndUpdate(
+ TimeDelta frame_delay_variation,
+ double frame_size_variation_bytes,
+ DataSize max_frame_size,
+ double var_noise) {
+ // Sanity checks.
+ if (max_frame_size < DataSize::Bytes(1)) {
+ return;
+ }
+ if (var_noise <= 0.0) {
+ return;
+ }
+
+ // This member function follows the data flow in
+ // https://en.wikipedia.org/wiki/Kalman_filter#Details.
+
+ // 1) Estimate prediction: `x = F*x`.
+ // For this model, there is no need to explicitly predict the estimate, since
+ // the state transition matrix is the identity.
+
+ // 2) Estimate covariance prediction: `P = F*P*F' + Q`.
+ // Again, since the state transition matrix is the identity, this update
+ // is performed by simply adding the process noise covariance.
+ estimate_cov_[0][0] += process_noise_cov_diag_[0];
+ estimate_cov_[1][1] += process_noise_cov_diag_[1];
+
+ // 3) Innovation: `y = z - H*x`.
+ // This is the part of the measurement that cannot be explained by the current
+ // estimate.
+ double innovation =
+ frame_delay_variation.ms() -
+ GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes);
+
+ // 4) Innovation variance: `s = H*P*H' + r`.
+ double estim_cov_times_obs[2];
+ estim_cov_times_obs[0] =
+ estimate_cov_[0][0] * frame_size_variation_bytes + estimate_cov_[0][1];
+ estim_cov_times_obs[1] =
+ estimate_cov_[1][0] * frame_size_variation_bytes + estimate_cov_[1][1];
+ double observation_noise_stddev =
+ (300.0 * exp(-fabs(frame_size_variation_bytes) /
+ (1e0 * max_frame_size.bytes())) +
+ 1) *
+ sqrt(var_noise);
+ if (observation_noise_stddev < 1.0) {
+ observation_noise_stddev = 1.0;
+ }
+ // TODO(brandtr): Shouldn't we add observation_noise_stddev^2 here? Otherwise,
+ // the dimensional analysis fails.
+ double innovation_var = frame_size_variation_bytes * estim_cov_times_obs[0] +
+ estim_cov_times_obs[1] + observation_noise_stddev;
+ if ((innovation_var < 1e-9 && innovation_var >= 0) ||
+ (innovation_var > -1e-9 && innovation_var <= 0)) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+
+ // 5) Optimal Kalman gain: `K = P*H'/s`.
+ // How much to trust the model vs. how much to trust the measurement.
+ double kalman_gain[2];
+ kalman_gain[0] = estim_cov_times_obs[0] / innovation_var;
+ kalman_gain[1] = estim_cov_times_obs[1] / innovation_var;
+
+ // 6) Estimate update: `x = x + K*y`.
+ // Optimally weight the new information in the innovation and add it to the
+ // old estimate.
+ estimate_[0] += kalman_gain[0] * innovation;
+ estimate_[1] += kalman_gain[1] * innovation;
+
+ // (This clamping is not part of the linear Kalman filter.)
+ if (estimate_[0] < kMaxBandwidth) {
+ estimate_[0] = kMaxBandwidth;
+ }
+
+ // 7) Estimate covariance update: `P = (I - K*H)*P`
+ double t00 = estimate_cov_[0][0];
+ double t01 = estimate_cov_[0][1];
+ estimate_cov_[0][0] =
+ (1 - kalman_gain[0] * frame_size_variation_bytes) * t00 -
+ kalman_gain[0] * estimate_cov_[1][0];
+ estimate_cov_[0][1] =
+ (1 - kalman_gain[0] * frame_size_variation_bytes) * t01 -
+ kalman_gain[0] * estimate_cov_[1][1];
+ estimate_cov_[1][0] = estimate_cov_[1][0] * (1 - kalman_gain[1]) -
+ kalman_gain[1] * frame_size_variation_bytes * t00;
+ estimate_cov_[1][1] = estimate_cov_[1][1] * (1 - kalman_gain[1]) -
+ kalman_gain[1] * frame_size_variation_bytes * t01;
+
+ // Covariance matrix, must be positive semi-definite.
+ RTC_DCHECK(estimate_cov_[0][0] + estimate_cov_[1][1] >= 0 &&
+ estimate_cov_[0][0] * estimate_cov_[1][1] -
+ estimate_cov_[0][1] * estimate_cov_[1][0] >=
+ 0 &&
+ estimate_cov_[0][0] >= 0);
+}
+
+double FrameDelayDeltaKalmanFilter::GetFrameDelayVariationEstimateSizeBased(
+ double frame_size_variation_bytes) const {
+ // Unit: [1 / bytes per millisecond] * [bytes] = [milliseconds].
+ return estimate_[0] * frame_size_variation_bytes;
+}
+
+double FrameDelayDeltaKalmanFilter::GetFrameDelayVariationEstimateTotal(
+ double frame_size_variation_bytes) const {
+ double frame_transmission_delay_ms =
+ GetFrameDelayVariationEstimateSizeBased(frame_size_variation_bytes);
+ double link_queuing_delay_ms = estimate_[1];
+ return frame_transmission_delay_ms + link_queuing_delay_ms;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.h b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.h
new file mode 100644
index 0000000000..1612ef3aa2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_FRAME_DELAY_DELTA_KALMAN_FILTER_H_
+#define MODULES_VIDEO_CODING_TIMING_FRAME_DELAY_DELTA_KALMAN_FILTER_H_
+
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+// This class uses a linear Kalman filter (see
+// https://en.wikipedia.org/wiki/Kalman_filter) to estimate the frame delay
+// variation (i.e., the difference in transmission time between a frame and the
+// prior frame) for a frame, given its size variation in bytes (i.e., the
+// difference in size between a frame and the prior frame). The idea is that,
+// given a fixed link bandwidth, a larger frame (in bytes) would take
+// proportionally longer to arrive than a correspondingly smaller frame. Using
+// the variations of frame delay and frame size, the underlying bandwidth and
+// queuing delay variation of the network link can be estimated.
+//
+// The filter takes as input the frame delay variation, the difference between
+// the actual inter-frame arrival time and the expected inter-frame arrival time
+// (based on RTP timestamp), and frame size variation, the inter-frame size
+// delta for a single frame. The frame delay variation is seen as the
+// measurement and the frame size variation is used in the observation model.
+// The hidden state of the filter is the link bandwidth and queuing delay
+// buildup. The estimated state can be used to get the expected frame delay
+// variation for a frame, given its frame size variation. This information can
+// then be used to estimate the frame delay variation coming from network
+// jitter.
+//
+// Mathematical details:
+// * The state (`x` in Wikipedia notation) is a 2x1 vector comprising the
+// reciprocal of link bandwidth [1 / bytes per ms] and the
+// link queuing delay buildup [ms].
+// * The state transition matrix (`F`) is the 2x2 identity matrix, meaning that
+// link bandwidth and link queuing delay buildup are modeled as independent.
+// * The measurement (`z`) is the (scalar) frame delay variation [ms].
+// * The observation matrix (`H`) is a 1x2 vector set as
+// `{frame_size_variation [bytes], 1.0}`.
+// * The state estimate covariance (`P`) is a symmetric 2x2 matrix.
+// * The process noise covariance (`Q`) is a constant 2x2 diagonal matrix
+// [(1 / bytes per ms)^2, ms^2].
+// * The observation noise covariance (`r`) is a scalar [ms^2] that is
+// determined externally to this class.
+class FrameDelayDeltaKalmanFilter {
+ public:
+ FrameDelayDeltaKalmanFilter();
+ ~FrameDelayDeltaKalmanFilter() = default;
+
+ // Predicts and updates the filter, given a new pair of frame delay variation
+ // and frame size variation.
+ //
+ // Inputs:
+ // `frame_delay_variation`:
+ // Frame delay variation as calculated by the `InterFrameDelay` estimator.
+ //
+ // `frame_size_variation_bytes`:
+ // Frame size variation, i.e., the current frame size minus the previous
+ // frame size (in bytes). Note that this quantity may be negative.
+ //
+ // `max_frame_size`:
+ // Filtered largest frame size received since the last reset.
+ //
+ // `var_noise`:
+ // Variance of the estimated random jitter.
+ void PredictAndUpdate(TimeDelta frame_delay_variation,
+ double frame_size_variation_bytes,
+ DataSize max_frame_size,
+ double var_noise);
+
+ // Given a frame size variation, returns the estimated frame delay variation
+ // explained by the link bandwidth alone.
+ double GetFrameDelayVariationEstimateSizeBased(
+ double frame_size_variation_bytes) const;
+
+ // Given a frame size variation, returns the estimated frame delay variation
+ // explained by both link bandwidth and link queuing delay buildup.
+ double GetFrameDelayVariationEstimateTotal(
+ double frame_size_variation_bytes) const;
+
+ private:
+ // State estimate (bandwidth [1 / bytes per ms], queue buildup [ms]).
+ double estimate_[2];
+ double estimate_cov_[2][2]; // Estimate covariance.
+
+ // Process noise covariance. This is a diagonal matrix, so we only store the
+ // diagonal entries.
+ double process_noise_cov_diag_[2];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_FRAME_DELAY_DELTA_KALMAN_FILTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_gn/moz.build
new file mode 100644
index 0000000000..35e13f8678
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_delay_delta_kalman_filter_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_unittest.cc
new file mode 100644
index 0000000000..d4c1fbdba0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_delta_kalman_filter_unittest.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/frame_delay_delta_kalman_filter.h"
+
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// This test verifies that the initial filter state (link bandwidth, link
+// propagation delay) is such that a frame of size zero would take no time to
+// propagate.
+TEST(FrameDelayDeltaKalmanFilterTest,
+ InitializedFilterWithZeroSizeFrameTakesNoTimeToPropagate) {
+ FrameDelayDeltaKalmanFilter filter;
+
+ // A zero-sized frame...
+ double frame_size_variation_bytes = 0.0;
+
+ // ...should take no time to propagate due to it's size...
+ EXPECT_EQ(filter.GetFrameDelayVariationEstimateSizeBased(
+ frame_size_variation_bytes),
+ 0.0);
+
+ // ...and no time due to the initial link propagation delay being zero.
+ EXPECT_EQ(
+ filter.GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes),
+ 0.0);
+}
+
+// TODO(brandtr): Look into if there is a factor 1000 missing here? It seems
+// unreasonable to have an initial link bandwidth of 512 _mega_bits per second?
+TEST(FrameDelayDeltaKalmanFilterTest,
+ InitializedFilterWithSmallSizeFrameTakesFixedTimeToPropagate) {
+ FrameDelayDeltaKalmanFilter filter;
+
+ // A 1000-byte frame...
+ double frame_size_variation_bytes = 1000.0;
+ // ...should take around `1000.0 / (512e3 / 8.0) = 0.015625 ms` to transmit.
+ double expected_frame_delay_variation_estimate_ms = 1000.0 / (512e3 / 8.0);
+
+ EXPECT_EQ(filter.GetFrameDelayVariationEstimateSizeBased(
+ frame_size_variation_bytes),
+ expected_frame_delay_variation_estimate_ms);
+ EXPECT_EQ(
+ filter.GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes),
+ expected_frame_delay_variation_estimate_ms);
+}
+
+TEST(FrameDelayDeltaKalmanFilterTest,
+ NegativeNoiseVarianceDoesNotUpdateFilter) {
+ FrameDelayDeltaKalmanFilter filter;
+
+ // Negative variance...
+ double var_noise = -0.1;
+ filter.PredictAndUpdate(/*frame_delay_variation=*/TimeDelta::Millis(3),
+ /*frame_size_variation_bytes=*/200.0,
+ /*max_frame_size=*/DataSize::Bytes(2000), var_noise);
+
+ // ...does _not_ update the filter.
+ EXPECT_EQ(filter.GetFrameDelayVariationEstimateTotal(
+ /*frame_size_variation_bytes=*/0.0),
+ 0.0);
+
+ // Positive variance...
+ var_noise = 0.1;
+ filter.PredictAndUpdate(/*frame_delay_variation=*/TimeDelta::Millis(3),
+ /*frame_size_variation_bytes=*/200.0,
+ /*max_frame_size=*/DataSize::Bytes(2000), var_noise);
+
+ // ...does update the filter.
+ EXPECT_GT(filter.GetFrameDelayVariationEstimateTotal(
+ /*frame_size_variation_bytes=*/0.0),
+ 0.0);
+}
+
+TEST(FrameDelayDeltaKalmanFilterTest,
+ VerifyConvergenceWithAlternatingDeviations) {
+ FrameDelayDeltaKalmanFilter filter;
+
+ // One frame every 33 ms.
+ int framerate_fps = 30;
+ // Let's assume approximately 10% delay variation.
+ TimeDelta frame_delay_variation = TimeDelta::Millis(3);
+ // With a bitrate of 512 kbps, each frame will be around 2000 bytes.
+ DataSize max_frame_size = DataSize::Bytes(2000);
+ // And again, let's assume 10% size deviation.
+ double frame_size_variation_bytes = 200;
+ double var_noise = 0.1;
+ int test_duration_s = 60;
+
+ for (int i = 0; i < test_duration_s * framerate_fps; ++i) {
+ // For simplicity, assume alternating variations.
+ double sign = (i % 2 == 0) ? 1.0 : -1.0;
+ filter.PredictAndUpdate(sign * frame_delay_variation,
+ sign * frame_size_variation_bytes, max_frame_size,
+ var_noise);
+ }
+
+ // Verify that the filter has converged within a margin of 0.1 ms.
+ EXPECT_NEAR(
+ filter.GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes),
+ frame_delay_variation.ms(), 0.1);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc
new file mode 100644
index 0000000000..bed9f875ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/inter_frame_delay.h"
+
+#include "absl/types/optional.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "modules/include/module_common_types_public.h"
+
+namespace webrtc {
+
+namespace {
+constexpr Frequency k90kHz = Frequency::KiloHertz(90);
+}
+
+InterFrameDelay::InterFrameDelay() {
+ Reset();
+}
+
+// Resets the delay estimate.
+void InterFrameDelay::Reset() {
+ prev_wall_clock_ = absl::nullopt;
+ prev_rtp_timestamp_unwrapped_ = 0;
+}
+
+// Calculates the delay of a frame with the given timestamp.
+// This method is called when the frame is complete.
+absl::optional<TimeDelta> InterFrameDelay::CalculateDelay(
+ uint32_t rtp_timestamp,
+ Timestamp now) {
+ int64_t rtp_timestamp_unwrapped = unwrapper_.Unwrap(rtp_timestamp);
+ if (!prev_wall_clock_) {
+ // First set of data, initialization, wait for next frame.
+ prev_wall_clock_ = now;
+ prev_rtp_timestamp_unwrapped_ = rtp_timestamp_unwrapped;
+ return TimeDelta::Zero();
+ }
+
+ // Account for reordering in jitter variance estimate in the future?
+ // Note that this also captures incomplete frames which are grabbed for
+ // decoding after a later frame has been complete, i.e. real packet losses.
+ uint32_t cropped_last = static_cast<uint32_t>(prev_rtp_timestamp_unwrapped_);
+ if (rtp_timestamp_unwrapped < prev_rtp_timestamp_unwrapped_ ||
+ !IsNewerTimestamp(rtp_timestamp, cropped_last)) {
+ return absl::nullopt;
+ }
+
+ // Compute the compensated timestamp difference.
+ int64_t d_rtp_ticks = rtp_timestamp_unwrapped - prev_rtp_timestamp_unwrapped_;
+ TimeDelta dts = d_rtp_ticks / k90kHz;
+ TimeDelta dt = now - *prev_wall_clock_;
+
+ // frameDelay is the difference of dT and dTS -- i.e. the difference of the
+ // wall clock time difference and the timestamp difference between two
+ // following frames.
+ TimeDelta delay = dt - dts;
+
+ prev_rtp_timestamp_unwrapped_ = rtp_timestamp_unwrapped;
+ prev_wall_clock_ = now;
+ return delay;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h
new file mode 100644
index 0000000000..579a488cb1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_INTER_FRAME_DELAY_H_
+#define MODULES_VIDEO_CODING_TIMING_INTER_FRAME_DELAY_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/include/module_common_types_public.h"
+
+namespace webrtc {
+
+class InterFrameDelay {
+ public:
+ InterFrameDelay();
+
+ // Resets the estimate. Zeros are given as parameters.
+ void Reset();
+
+ // Calculates the delay of a frame with the given timestamp.
+ // This method is called when the frame is complete.
+ absl::optional<TimeDelta> CalculateDelay(uint32_t rtp_timestamp,
+ Timestamp now);
+
+ private:
+ // The previous rtp timestamp passed to the delay estimate
+ int64_t prev_rtp_timestamp_unwrapped_;
+ TimestampUnwrapper unwrapper_;
+
+ // The previous wall clock timestamp used by the delay estimate
+ absl::optional<Timestamp> prev_wall_clock_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_INTER_FRAME_DELAY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build
new file mode 100644
index 0000000000..84a87f2a49
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("inter_frame_delay_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc
new file mode 100644
index 0000000000..183b378ced
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/inter_frame_delay.h"
+
+#include <limits>
+
+#include "absl/types/optional.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Test is for frames at 30fps. At 30fps, RTP timestamps will increase by
+// 90000 / 30 = 3000 ticks per frame.
+constexpr Frequency k30Fps = Frequency::Hertz(30);
+constexpr TimeDelta kFrameDelay = 1 / k30Fps;
+constexpr uint32_t kRtpTicksPerFrame = Frequency::KiloHertz(90) / k30Fps;
+constexpr Timestamp kStartTime = Timestamp::Millis(1337);
+
+} // namespace
+
+using ::testing::Eq;
+using ::testing::Optional;
+
+TEST(InterFrameDelayTest, OldRtpTimestamp) {
+ InterFrameDelay inter_frame_delay;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(180000, kStartTime),
+ Optional(TimeDelta::Zero()));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(90000, kStartTime),
+ Eq(absl::nullopt));
+}
+
+TEST(InterFrameDelayTest, NegativeWrapAroundIsSameAsOldRtpTimestamp) {
+ InterFrameDelay inter_frame_delay;
+ uint32_t rtp = 1500;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, kStartTime),
+ Optional(TimeDelta::Zero()));
+ // RTP has wrapped around backwards.
+ rtp -= 3000;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, kStartTime),
+ Eq(absl::nullopt));
+}
+
+TEST(InterFrameDelayTest, CorrectDelayForFrames) {
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+
+ // First frame is always delay 0.
+ uint32_t rtp = 90000;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Perfectly timed frame has 0 delay.
+ clock.AdvanceTime(kFrameDelay);
+ rtp += kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Slightly early frame will have a negative delay.
+ clock.AdvanceTime(kFrameDelay - TimeDelta::Millis(3));
+ rtp += kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-TimeDelta::Millis(3)));
+
+ // Slightly late frame will have positive delay.
+ clock.AdvanceTime(kFrameDelay + TimeDelta::Micros(5125));
+ rtp += kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Micros(5125)));
+
+ // Simulate faster frame RTP at the same clock delay. The frame arrives late,
+ // since the RTP timestamp is faster than the delay, and thus is positive.
+ clock.AdvanceTime(kFrameDelay);
+ rtp += kRtpTicksPerFrame / 2;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(kFrameDelay / 2.0));
+
+ // Simulate slower frame RTP at the same clock delay. The frame is early,
+ // since the RTP timestamp advanced more than the delay, and thus is negative.
+ clock.AdvanceTime(kFrameDelay);
+ rtp += 1.5 * kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-kFrameDelay / 2.0));
+}
+
+TEST(InterFrameDelayTest, PositiveWrapAround) {
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+
+ // First frame is behind the max RTP by 1500.
+ uint32_t rtp = std::numeric_limits<uint32_t>::max() - 1500;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Rtp wraps around, now 1499.
+ rtp += kRtpTicksPerFrame;
+
+ // Frame delay should be as normal, in this case simulated as 1ms late.
+ clock.AdvanceTime(kFrameDelay + TimeDelta::Millis(1));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Millis(1)));
+}
+
+TEST(InterFrameDelayTest, MultipleWrapArounds) {
+ // Simulate a long pauses which cause wrap arounds multiple times.
+ constexpr Frequency k90Khz = Frequency::KiloHertz(90);
+ constexpr uint32_t kHalfRtp = std::numeric_limits<uint32_t>::max() / 2;
+ constexpr TimeDelta kWrapAroundDelay = kHalfRtp / k90Khz;
+
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+ uint32_t rtp = 0;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ rtp += kHalfRtp;
+ clock.AdvanceTime(kWrapAroundDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+ // 1st wrap around.
+ rtp += kHalfRtp + 1;
+ clock.AdvanceTime(kWrapAroundDelay + TimeDelta::Millis(1));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Millis(1) - (1 / k90Khz)));
+
+ rtp += kHalfRtp;
+ clock.AdvanceTime(kWrapAroundDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+ // 2nd wrap arounds.
+ rtp += kHalfRtp + 1;
+ clock.AdvanceTime(kWrapAroundDelay - TimeDelta::Millis(1));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-TimeDelta::Millis(1) - (1 / k90Khz)));
+
+ // Ensure short delay (large RTP delay) between wrap-arounds has correct
+ // jitter.
+ rtp += kHalfRtp;
+ clock.AdvanceTime(TimeDelta::Millis(10));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-(kWrapAroundDelay - TimeDelta::Millis(10))));
+ // 3nd wrap arounds, this time with large RTP delay.
+ rtp += kHalfRtp + 1;
+ clock.AdvanceTime(TimeDelta::Millis(10));
+ EXPECT_THAT(
+ inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-(kWrapAroundDelay - TimeDelta::Millis(10) + (1 / k90Khz))));
+}
+
+TEST(InterFrameDelayTest, NegativeWrapAroundAfterPositiveWrapAround) {
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+ uint32_t rtp = std::numeric_limits<uint32_t>::max() - 1500;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Rtp wraps around, now 1499.
+ rtp += kRtpTicksPerFrame;
+ // Frame delay should be as normal, in this case simulated as 1ms late.
+ clock.AdvanceTime(kFrameDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Wrap back.
+ rtp -= kRtpTicksPerFrame;
+ // Frame delay should be as normal, in this case simulated as 1ms late.
+ clock.AdvanceTime(kFrameDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Eq(absl::nullopt));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc
new file mode 100644
index 0000000000..7c5c7fdc06
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/jitter_estimator.h"
+
+#include <math.h>
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/video_coding/timing/rtt_filter.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+static constexpr uint32_t kStartupDelaySamples = 30;
+static constexpr int64_t kFsAccuStartupSamples = 5;
+static constexpr Frequency kMaxFramerateEstimate = Frequency::Hertz(200);
+static constexpr TimeDelta kNackCountTimeout = TimeDelta::Seconds(60);
+static constexpr double kDefaultMaxTimestampDeviationInSigmas = 3.5;
+
+constexpr double kPhi = 0.97;
+constexpr double kPsi = 0.9999;
+constexpr uint32_t kAlphaCountMax = 400;
+constexpr uint32_t kNackLimit = 3;
+constexpr int32_t kNumStdDevDelayOutlier = 15;
+constexpr int32_t kNumStdDevFrameSizeOutlier = 3;
+// ~Less than 1% chance (look up in normal distribution table)...
+constexpr double kNoiseStdDevs = 2.33;
+// ...of getting 30 ms freezes
+constexpr double kNoiseStdDevOffset = 30.0;
+
+} // namespace
+
+JitterEstimator::JitterEstimator(Clock* clock,
+ const FieldTrialsView& field_trials)
+ : fps_counter_(30), // TODO(sprang): Use an estimator with limit based on
+ // time, rather than number of samples.
+ clock_(clock) {
+ Reset();
+}
+
+JitterEstimator::~JitterEstimator() = default;
+
+// Resets the JitterEstimate.
+void JitterEstimator::Reset() {
+ var_noise_ = 4.0;
+
+ avg_frame_size_ = kDefaultAvgAndMaxFrameSize;
+ max_frame_size_ = kDefaultAvgAndMaxFrameSize;
+ var_frame_size_ = 100;
+ last_update_time_ = absl::nullopt;
+ prev_estimate_ = absl::nullopt;
+ prev_frame_size_ = absl::nullopt;
+ avg_noise_ = 0.0;
+ alpha_count_ = 1;
+ filter_jitter_estimate_ = TimeDelta::Zero();
+ latest_nack_ = Timestamp::Zero();
+ nack_count_ = 0;
+ frame_size_sum_ = DataSize::Zero();
+ frame_size_count_ = 0;
+ startup_count_ = 0;
+ rtt_filter_.Reset();
+ fps_counter_.Reset();
+
+ kalman_filter_ = FrameDelayDeltaKalmanFilter();
+}
+
+// Updates the estimates with the new measurements.
+void JitterEstimator::UpdateEstimate(TimeDelta frame_delay,
+ DataSize frame_size) {
+ if (frame_size.IsZero()) {
+ return;
+ }
+ // Can't use DataSize since this can be negative.
+ double delta_frame_bytes =
+ frame_size.bytes() - prev_frame_size_.value_or(DataSize::Zero()).bytes();
+ if (frame_size_count_ < kFsAccuStartupSamples) {
+ frame_size_sum_ += frame_size;
+ frame_size_count_++;
+ } else if (frame_size_count_ == kFsAccuStartupSamples) {
+ // Give the frame size filter.
+ avg_frame_size_ = frame_size_sum_ / static_cast<double>(frame_size_count_);
+ frame_size_count_++;
+ }
+
+ DataSize avg_frame_size = kPhi * avg_frame_size_ + (1 - kPhi) * frame_size;
+ DataSize deviation_size = DataSize::Bytes(2 * sqrt(var_frame_size_));
+ if (frame_size < avg_frame_size_ + deviation_size) {
+ // Only update the average frame size if this sample wasn't a key frame.
+ avg_frame_size_ = avg_frame_size;
+ }
+
+ double delta_bytes = frame_size.bytes() - avg_frame_size.bytes();
+ var_frame_size_ = std::max(
+ kPhi * var_frame_size_ + (1 - kPhi) * (delta_bytes * delta_bytes), 1.0);
+
+ // Update max_frame_size_ estimate.
+ max_frame_size_ = std::max(kPsi * max_frame_size_, frame_size);
+
+ if (!prev_frame_size_) {
+ prev_frame_size_ = frame_size;
+ return;
+ }
+ prev_frame_size_ = frame_size;
+
+ // Cap frame_delay based on the current time deviation noise.
+ TimeDelta max_time_deviation = TimeDelta::Millis(
+ kDefaultMaxTimestampDeviationInSigmas * sqrt(var_noise_) + 0.5);
+ frame_delay.Clamp(-max_time_deviation, max_time_deviation);
+
+ // Only update the Kalman filter if the sample is not considered an extreme
+ // outlier. Even if it is an extreme outlier from a delay point of view, if
+ // the frame size also is large the deviation is probably due to an incorrect
+ // line slope.
+ double deviation =
+ frame_delay.ms() -
+ kalman_filter_.GetFrameDelayVariationEstimateTotal(delta_frame_bytes);
+
+ if (fabs(deviation) < kNumStdDevDelayOutlier * sqrt(var_noise_) ||
+ frame_size.bytes() >
+ avg_frame_size_.bytes() +
+ kNumStdDevFrameSizeOutlier * sqrt(var_frame_size_)) {
+ // Update the variance of the deviation from the line given by the Kalman
+ // filter.
+ EstimateRandomJitter(deviation);
+ // Prevent updating with frames which have been congested by a large frame,
+ // and therefore arrives almost at the same time as that frame.
+ // This can occur when we receive a large frame (key frame) which has been
+ // delayed. The next frame is of normal size (delta frame), and thus deltaFS
+ // will be << 0. This removes all frame samples which arrives after a key
+ // frame.
+ if (delta_frame_bytes > -0.25 * max_frame_size_.bytes()) {
+ // Update the Kalman filter with the new data
+ kalman_filter_.PredictAndUpdate(frame_delay, delta_frame_bytes,
+ max_frame_size_, var_noise_);
+ }
+ } else {
+ int nStdDev =
+ (deviation >= 0) ? kNumStdDevDelayOutlier : -kNumStdDevDelayOutlier;
+ EstimateRandomJitter(nStdDev * sqrt(var_noise_));
+ }
+ // Post process the total estimated jitter
+ if (startup_count_ >= kStartupDelaySamples) {
+ PostProcessEstimate();
+ } else {
+ startup_count_++;
+ }
+}
+
+// Updates the nack/packet ratio.
+void JitterEstimator::FrameNacked() {
+ if (nack_count_ < kNackLimit) {
+ nack_count_++;
+ }
+ latest_nack_ = clock_->CurrentTime();
+}
+
+// Estimates the random jitter by calculating the variance of the sample
+// distance from the line given by theta.
+void JitterEstimator::EstimateRandomJitter(double d_dT) {
+ Timestamp now = clock_->CurrentTime();
+ if (last_update_time_.has_value()) {
+ fps_counter_.AddSample((now - *last_update_time_).us());
+ }
+ last_update_time_ = now;
+
+ if (alpha_count_ == 0) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+ double alpha =
+ static_cast<double>(alpha_count_ - 1) / static_cast<double>(alpha_count_);
+ alpha_count_++;
+ if (alpha_count_ > kAlphaCountMax)
+ alpha_count_ = kAlphaCountMax;
+
+ // In order to avoid a low frame rate stream to react slower to changes,
+ // scale the alpha weight relative a 30 fps stream.
+ Frequency fps = GetFrameRate();
+ if (fps > Frequency::Zero()) {
+ constexpr Frequency k30Fps = Frequency::Hertz(30);
+ double rate_scale = k30Fps / fps;
+ // At startup, there can be a lot of noise in the fps estimate.
+ // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
+ // at sample #kStartupDelaySamples.
+ if (alpha_count_ < kStartupDelaySamples) {
+ rate_scale =
+ (alpha_count_ * rate_scale + (kStartupDelaySamples - alpha_count_)) /
+ kStartupDelaySamples;
+ }
+ alpha = pow(alpha, rate_scale);
+ }
+
+ double avgNoise = alpha * avg_noise_ + (1 - alpha) * d_dT;
+ double varNoise = alpha * var_noise_ +
+ (1 - alpha) * (d_dT - avg_noise_) * (d_dT - avg_noise_);
+ avg_noise_ = avgNoise;
+ var_noise_ = varNoise;
+ if (var_noise_ < 1.0) {
+ // The variance should never be zero, since we might get stuck and consider
+ // all samples as outliers.
+ var_noise_ = 1.0;
+ }
+}
+
+double JitterEstimator::NoiseThreshold() const {
+ double noiseThreshold = kNoiseStdDevs * sqrt(var_noise_) - kNoiseStdDevOffset;
+ if (noiseThreshold < 1.0) {
+ noiseThreshold = 1.0;
+ }
+ return noiseThreshold;
+}
+
+// Calculates the current jitter estimate from the filtered estimates.
+TimeDelta JitterEstimator::CalculateEstimate() {
+ double retMs = kalman_filter_.GetFrameDelayVariationEstimateSizeBased(
+ max_frame_size_.bytes() - avg_frame_size_.bytes()) +
+ NoiseThreshold();
+
+ TimeDelta ret = TimeDelta::Millis(retMs);
+
+ constexpr TimeDelta kMinEstimate = TimeDelta::Millis(1);
+ constexpr TimeDelta kMaxEstimate = TimeDelta::Seconds(10);
+ // A very low estimate (or negative) is neglected.
+ if (ret < kMinEstimate) {
+ ret = prev_estimate_.value_or(kMinEstimate);
+ // Sanity check to make sure that no other method has set `prev_estimate_`
+ // to a value lower than `kMinEstimate`.
+ RTC_DCHECK_GE(ret, kMinEstimate);
+ } else if (ret > kMaxEstimate) { // Sanity
+ ret = kMaxEstimate;
+ }
+ prev_estimate_ = ret;
+ return ret;
+}
+
+void JitterEstimator::PostProcessEstimate() {
+ filter_jitter_estimate_ = CalculateEstimate();
+}
+
+void JitterEstimator::UpdateRtt(TimeDelta rtt) {
+ rtt_filter_.Update(rtt);
+}
+
+// Returns the current filtered estimate if available,
+// otherwise tries to calculate an estimate.
+TimeDelta JitterEstimator::GetJitterEstimate(
+ double rtt_multiplier,
+ absl::optional<TimeDelta> rtt_mult_add_cap) {
+ TimeDelta jitter = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
+ Timestamp now = clock_->CurrentTime();
+
+ if (now - latest_nack_ > kNackCountTimeout)
+ nack_count_ = 0;
+
+ if (filter_jitter_estimate_ > jitter)
+ jitter = filter_jitter_estimate_;
+ if (nack_count_ >= kNackLimit) {
+ if (rtt_mult_add_cap.has_value()) {
+ jitter += std::min(rtt_filter_.Rtt() * rtt_multiplier,
+ rtt_mult_add_cap.value());
+ } else {
+ jitter += rtt_filter_.Rtt() * rtt_multiplier;
+ }
+ }
+
+ static const Frequency kJitterScaleLowThreshold = Frequency::Hertz(5);
+ static const Frequency kJitterScaleHighThreshold = Frequency::Hertz(10);
+ Frequency fps = GetFrameRate();
+ // Ignore jitter for very low fps streams.
+ if (fps < kJitterScaleLowThreshold) {
+ if (fps.IsZero()) {
+ return std::max(TimeDelta::Zero(), jitter);
+ }
+ return TimeDelta::Zero();
+ }
+
+ // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
+ // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
+ if (fps < kJitterScaleHighThreshold) {
+ jitter = (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
+ (fps - kJitterScaleLowThreshold) * jitter;
+ }
+
+ return std::max(TimeDelta::Zero(), jitter);
+}
+
+Frequency JitterEstimator::GetFrameRate() const {
+ TimeDelta mean_frame_period = TimeDelta::Micros(fps_counter_.ComputeMean());
+ if (mean_frame_period <= TimeDelta::Zero())
+ return Frequency::Zero();
+
+ Frequency fps = 1 / mean_frame_period;
+ // Sanity check.
+ RTC_DCHECK_GE(fps, Frequency::Zero());
+ return std::min(fps, kMaxFramerateEstimate);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h
new file mode 100644
index 0000000000..ec1e696b68
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_JITTER_ESTIMATOR_H_
+#define MODULES_VIDEO_CODING_TIMING_JITTER_ESTIMATOR_H_
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/video_coding/timing/frame_delay_delta_kalman_filter.h"
+#include "modules/video_coding/timing/rtt_filter.h"
+#include "rtc_base/rolling_accumulator.h"
+
+namespace webrtc {
+
+class Clock;
+
+class JitterEstimator {
+ public:
+ explicit JitterEstimator(Clock* clock, const FieldTrialsView& field_trials);
+ ~JitterEstimator();
+ JitterEstimator(const JitterEstimator&) = delete;
+ JitterEstimator& operator=(const JitterEstimator&) = delete;
+
+ // Resets the estimate to the initial state.
+ void Reset();
+
+ // Updates the jitter estimate with the new data.
+ //
+ // Input:
+ // - frame_delay : Delay-delta calculated by UTILDelayEstimate.
+ // - frame_size : Frame size of the current frame.
+ void UpdateEstimate(TimeDelta frame_delay, DataSize frame_size);
+
+ // Returns the current jitter estimate and adds an RTT dependent term in cases
+ // of retransmission.
+ // Input:
+ // - rtt_multiplier : RTT param multiplier (when applicable).
+ // - rtt_mult_add_cap : Multiplier cap from the RTTMultExperiment.
+ //
+ // Return value : Jitter estimate.
+ TimeDelta GetJitterEstimate(double rtt_multiplier,
+ absl::optional<TimeDelta> rtt_mult_add_cap);
+
+ // Updates the nack counter.
+ void FrameNacked();
+
+ // Updates the RTT filter.
+ //
+ // Input:
+ // - rtt : Round trip time.
+ void UpdateRtt(TimeDelta rtt);
+
+ // A constant describing the delay from the jitter buffer to the delay on the
+ // receiving side which is not accounted for by the jitter buffer nor the
+ // decoding delay estimate.
+ static constexpr TimeDelta OPERATING_SYSTEM_JITTER = TimeDelta::Millis(10);
+
+ private:
+ double var_noise_; // Variance of the time-deviation from the line
+
+ // Updates the random jitter estimate, i.e. the variance of the time
+ // deviations from the line given by the Kalman filter.
+ //
+ // Input:
+ // - d_dT : The deviation from the kalman estimate.
+ void EstimateRandomJitter(double d_dT);
+
+ double NoiseThreshold() const;
+
+ // Calculates the current jitter estimate.
+ //
+ // Return value : The current jitter estimate.
+ TimeDelta CalculateEstimate();
+
+ // Post process the calculated estimate.
+ void PostProcessEstimate();
+
+ Frequency GetFrameRate() const;
+
+ // Filters the {frame_delay_delta, frame_size_delta} measurements through
+ // a linear Kalman filter.
+ FrameDelayDeltaKalmanFilter kalman_filter_;
+
+ static constexpr DataSize kDefaultAvgAndMaxFrameSize = DataSize::Bytes(500);
+ DataSize avg_frame_size_ = kDefaultAvgAndMaxFrameSize; // Average frame size
+ double var_frame_size_; // Frame size variance. Unit is bytes^2.
+ // Largest frame size received (descending with a factor kPsi)
+ DataSize max_frame_size_ = kDefaultAvgAndMaxFrameSize;
+ DataSize frame_size_sum_ = DataSize::Zero();
+ uint32_t frame_size_count_;
+
+ absl::optional<Timestamp> last_update_time_;
+ // The previously returned jitter estimate
+ absl::optional<TimeDelta> prev_estimate_;
+ // Frame size of the previous frame
+ absl::optional<DataSize> prev_frame_size_;
+ // Average of the random jitter
+ double avg_noise_;
+ uint32_t alpha_count_;
+ // The filtered sum of jitter estimates
+ TimeDelta filter_jitter_estimate_ = TimeDelta::Zero();
+
+ uint32_t startup_count_;
+ // Time when the latest nack was seen
+ Timestamp latest_nack_ = Timestamp::Zero();
+ // Keeps track of the number of nacks received, but never goes above
+ // kNackLimit.
+ uint32_t nack_count_;
+ RttFilter rtt_filter_;
+
+ // Tracks frame rates in microseconds.
+ rtc::RollingAccumulator<uint64_t> fps_counter_;
+ Clock* clock_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_JITTER_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build
new file mode 100644
index 0000000000..f45f6f072f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("jitter_estimator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc
new file mode 100644
index 0000000000..f442dbb62d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc
@@ -0,0 +1,113 @@
+/* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/jitter_estimator.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/numerics/histogram_percentile_counter.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+class TestJitterEstimator : public ::testing::Test {
+ protected:
+ TestJitterEstimator() : fake_clock_(0) {}
+
+ virtual void SetUp() {
+ estimator_ = std::make_unique<JitterEstimator>(&fake_clock_, field_trials_);
+ }
+
+ SimulatedClock fake_clock_;
+ test::ScopedKeyValueConfig field_trials_;
+ std::unique_ptr<JitterEstimator> estimator_;
+};
+
+// Generates some simple test data in the form of a sawtooth wave.
+class ValueGenerator {
+ public:
+ explicit ValueGenerator(int32_t amplitude)
+ : amplitude_(amplitude), counter_(0) {}
+
+ virtual ~ValueGenerator() = default;
+
+ TimeDelta Delay() const {
+ return TimeDelta::Millis((counter_ % 11) - 5) * amplitude_;
+ }
+
+ DataSize FrameSize() const {
+ return DataSize::Bytes(1000 + Delay().ms() / 5);
+ }
+
+ void Advance() { ++counter_; }
+
+ private:
+ const int32_t amplitude_;
+ int64_t counter_;
+};
+
+TEST_F(TestJitterEstimator, TestLowRate) {
+ ValueGenerator gen(10);
+ // At 5 fps, we disable jitter delay altogether.
+ TimeDelta time_delta = 1 / Frequency::Hertz(5);
+ for (int i = 0; i < 60; ++i) {
+ estimator_->UpdateEstimate(gen.Delay(), gen.FrameSize());
+ fake_clock_.AdvanceTime(time_delta);
+ if (i > 2)
+ EXPECT_EQ(estimator_->GetJitterEstimate(0, absl::nullopt),
+ TimeDelta::Zero());
+ gen.Advance();
+ }
+}
+
+TEST_F(TestJitterEstimator, RttMultAddCap) {
+ std::vector<std::pair<TimeDelta, rtc::HistogramPercentileCounter>>
+ jitter_by_rtt_mult_cap;
+ jitter_by_rtt_mult_cap.emplace_back(
+ /*rtt_mult_add_cap=*/TimeDelta::Millis(10), /*long_tail_boundary=*/1000);
+ jitter_by_rtt_mult_cap.emplace_back(
+ /*rtt_mult_add_cap=*/TimeDelta::Millis(200), /*long_tail_boundary=*/1000);
+
+ for (auto& [rtt_mult_add_cap, jitter] : jitter_by_rtt_mult_cap) {
+ SetUp();
+
+ ValueGenerator gen(50);
+ TimeDelta time_delta = 1 / Frequency::Hertz(30);
+ constexpr TimeDelta kRtt = TimeDelta::Millis(250);
+ for (int i = 0; i < 100; ++i) {
+ estimator_->UpdateEstimate(gen.Delay(), gen.FrameSize());
+ fake_clock_.AdvanceTime(time_delta);
+ estimator_->FrameNacked();
+ estimator_->UpdateRtt(kRtt);
+ jitter.Add(
+ estimator_->GetJitterEstimate(/*rtt_mult=*/1.0, rtt_mult_add_cap)
+ .ms());
+ gen.Advance();
+ }
+ }
+
+ // 200ms cap should result in at least 25% higher max compared to 10ms.
+ EXPECT_GT(*jitter_by_rtt_mult_cap[1].second.GetPercentile(1.0),
+ *jitter_by_rtt_mult_cap[0].second.GetPercentile(1.0) * 1.25);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc
new file mode 100644
index 0000000000..6962224d61
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/rtt_filter.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/inlined_vector.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr TimeDelta kMaxRtt = TimeDelta::Seconds(3);
+constexpr uint32_t kFilterFactorMax = 35;
+constexpr double kJumpStddev = 2.5;
+constexpr double kDriftStdDev = 3.5;
+
+} // namespace
+
+RttFilter::RttFilter()
+ : avg_rtt_(TimeDelta::Zero()),
+ var_rtt_(0),
+ max_rtt_(TimeDelta::Zero()),
+ jump_buf_(kMaxDriftJumpCount, TimeDelta::Zero()),
+ drift_buf_(kMaxDriftJumpCount, TimeDelta::Zero()) {
+ Reset();
+}
+
+void RttFilter::Reset() {
+ got_non_zero_update_ = false;
+ avg_rtt_ = TimeDelta::Zero();
+ var_rtt_ = 0;
+ max_rtt_ = TimeDelta::Zero();
+ filt_fact_count_ = 1;
+ absl::c_fill(jump_buf_, TimeDelta::Zero());
+ absl::c_fill(drift_buf_, TimeDelta::Zero());
+}
+
+void RttFilter::Update(TimeDelta rtt) {
+ if (!got_non_zero_update_) {
+ if (rtt.IsZero()) {
+ return;
+ }
+ got_non_zero_update_ = true;
+ }
+
+ // Sanity check
+ if (rtt > kMaxRtt) {
+ rtt = kMaxRtt;
+ }
+
+ double filt_factor = 0;
+ if (filt_fact_count_ > 1) {
+ filt_factor = static_cast<double>(filt_fact_count_ - 1) / filt_fact_count_;
+ }
+ filt_fact_count_++;
+ if (filt_fact_count_ > kFilterFactorMax) {
+ // This prevents filt_factor from going above
+ // (_filt_fact_max - 1) / filt_fact_max_,
+ // e.g., filt_fact_max_ = 50 => filt_factor = 49/50 = 0.98
+ filt_fact_count_ = kFilterFactorMax;
+ }
+ TimeDelta old_avg = avg_rtt_;
+ int64_t old_var = var_rtt_;
+ avg_rtt_ = filt_factor * avg_rtt_ + (1 - filt_factor) * rtt;
+ int64_t delta_ms = (rtt - avg_rtt_).ms();
+ var_rtt_ = filt_factor * var_rtt_ + (1 - filt_factor) * (delta_ms * delta_ms);
+ max_rtt_ = std::max(rtt, max_rtt_);
+ if (!JumpDetection(rtt) || !DriftDetection(rtt)) {
+ // In some cases we don't want to update the statistics
+ avg_rtt_ = old_avg;
+ var_rtt_ = old_var;
+ }
+}
+
+bool RttFilter::JumpDetection(TimeDelta rtt) {
+ TimeDelta diff_from_avg = avg_rtt_ - rtt;
+ // Unit of var_rtt_ is ms^2.
+ TimeDelta jump_threshold = TimeDelta::Millis(kJumpStddev * sqrt(var_rtt_));
+ if (diff_from_avg.Abs() > jump_threshold) {
+ bool positive_diff = diff_from_avg >= TimeDelta::Zero();
+ if (!jump_buf_.empty() && positive_diff != last_jump_positive_) {
+ // Since the signs differ the samples currently
+ // in the buffer is useless as they represent a
+ // jump in a different direction.
+ jump_buf_.clear();
+ }
+ if (jump_buf_.size() < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time statistics.
+ // The sign of the diff is used for updating the counter since
+ // we want to use the same buffer for keeping track of when
+ // the RTT jumps down and up.
+ jump_buf_.push_back(rtt);
+ last_jump_positive_ = positive_diff;
+ }
+ if (jump_buf_.size() >= kMaxDriftJumpCount) {
+ // Detected an RTT jump
+ ShortRttFilter(jump_buf_);
+ filt_fact_count_ = kMaxDriftJumpCount + 1;
+ jump_buf_.clear();
+ } else {
+ return false;
+ }
+ } else {
+ jump_buf_.clear();
+ }
+ return true;
+}
+
+bool RttFilter::DriftDetection(TimeDelta rtt) {
+ // Unit of sqrt of var_rtt_ is ms.
+ TimeDelta drift_threshold = TimeDelta::Millis(kDriftStdDev * sqrt(var_rtt_));
+ if (max_rtt_ - avg_rtt_ > drift_threshold) {
+ if (drift_buf_.size() < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time statistics.
+ drift_buf_.push_back(rtt);
+ }
+ if (drift_buf_.size() >= kMaxDriftJumpCount) {
+ // Detected an RTT drift
+ ShortRttFilter(drift_buf_);
+ filt_fact_count_ = kMaxDriftJumpCount + 1;
+ drift_buf_.clear();
+ }
+ } else {
+ drift_buf_.clear();
+ }
+ return true;
+}
+
+void RttFilter::ShortRttFilter(const BufferList& buf) {
+ RTC_DCHECK_EQ(buf.size(), kMaxDriftJumpCount);
+ max_rtt_ = TimeDelta::Zero();
+ avg_rtt_ = TimeDelta::Zero();
+ for (const TimeDelta& rtt : buf) {
+ if (rtt > max_rtt_) {
+ max_rtt_ = rtt;
+ }
+ avg_rtt_ += rtt;
+ }
+ avg_rtt_ = avg_rtt_ / static_cast<double>(buf.size());
+}
+
+TimeDelta RttFilter::Rtt() const {
+ return max_rtt_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h
new file mode 100644
index 0000000000..b8700b23ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_RTT_FILTER_H_
+#define MODULES_VIDEO_CODING_TIMING_RTT_FILTER_H_
+
+#include <stdint.h>
+
+#include "absl/container/inlined_vector.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+class RttFilter {
+ public:
+ RttFilter();
+ RttFilter(const RttFilter&) = delete;
+ RttFilter& operator=(const RttFilter&) = delete;
+
+ // Resets the filter.
+ void Reset();
+ // Updates the filter with a new sample.
+ void Update(TimeDelta rtt);
+ // A getter function for the current RTT level.
+ TimeDelta Rtt() const;
+
+ private:
+ // The size of the drift and jump memory buffers
+ // and thus also the detection threshold for these
+ // detectors in number of samples.
+ static constexpr int kMaxDriftJumpCount = 5;
+ using BufferList = absl::InlinedVector<TimeDelta, kMaxDriftJumpCount>;
+
+ // Detects RTT jumps by comparing the difference between
+ // samples and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool JumpDetection(TimeDelta rtt);
+
+ // Detects RTT drifts by comparing the difference between
+ // max and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool DriftDetection(TimeDelta rtt);
+
+ // Computes the short time average and maximum of the vector buf.
+ void ShortRttFilter(const BufferList& buf);
+
+ bool got_non_zero_update_;
+ TimeDelta avg_rtt_;
+ // Variance units are TimeDelta^2. Store as ms^2.
+ int64_t var_rtt_;
+ TimeDelta max_rtt_;
+ uint32_t filt_fact_count_;
+ bool last_jump_positive_ = false;
+ BufferList jump_buf_;
+ BufferList drift_buf_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_RTT_FILTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build
new file mode 100644
index 0000000000..54c90f4fee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtt_filter_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc
new file mode 100644
index 0000000000..05502e6f5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/rtt_filter.h"
+
+#include "api/units/time_delta.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RttFilterTest, RttIsCapped) {
+ RttFilter rtt_filter;
+ rtt_filter.Update(TimeDelta::Seconds(500));
+
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Seconds(3));
+}
+
+// If the difference between samples is more than away 2.5 stddev from the mean
+// then this is considered a jump. After more than 5 data points at the new
+// level, the RTT is reset to the new level.
+TEST(RttFilterTest, PositiveJumpDetection) {
+ RttFilter rtt_filter;
+
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+
+ // Trigger 5 jumps.
+ rtt_filter.Update(TimeDelta::Millis(1400));
+ rtt_filter.Update(TimeDelta::Millis(1500));
+ rtt_filter.Update(TimeDelta::Millis(1600));
+ rtt_filter.Update(TimeDelta::Millis(1600));
+
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(1600));
+
+ rtt_filter.Update(TimeDelta::Millis(1600));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(1600));
+}
+
+TEST(RttFilterTest, NegativeJumpDetection) {
+ RttFilter rtt_filter;
+
+ for (int i = 0; i < 10; ++i)
+ rtt_filter.Update(TimeDelta::Millis(1500));
+
+ // Trigger 5 negative data points that jump rtt down.
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ // Before 5 data points at the new level, max RTT is still 1500.
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(1500));
+
+ rtt_filter.Update(TimeDelta::Millis(300));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(300));
+}
+
+TEST(RttFilterTest, JumpsResetByDirectionShift) {
+ RttFilter rtt_filter;
+ for (int i = 0; i < 10; ++i)
+ rtt_filter.Update(TimeDelta::Millis(1500));
+
+ // Trigger 4 negative jumps, then a positive one. This resets the jump
+ // detection.
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(2000));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(2000));
+
+ rtt_filter.Update(TimeDelta::Millis(300));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(2000));
+}
+
+// If the difference between the max and average is more than 3.5 stddevs away
+// then a drift is detected, and a short filter is applied to find a new max
+// rtt.
+TEST(RttFilterTest, DriftDetection) {
+ RttFilter rtt_filter;
+
+ // Descend RTT by 30ms and settle at 700ms RTT. A drift is detected after rtt
+ // of 700ms is reported around 50 times for these targets.
+ constexpr TimeDelta kStartRtt = TimeDelta::Millis(1000);
+ constexpr TimeDelta kDriftTarget = TimeDelta::Millis(700);
+ constexpr TimeDelta kDelta = TimeDelta::Millis(30);
+ for (TimeDelta rtt = kStartRtt; rtt >= kDriftTarget; rtt -= kDelta)
+ rtt_filter.Update(rtt);
+
+ EXPECT_EQ(rtt_filter.Rtt(), kStartRtt);
+
+ for (int i = 0; i < 50; ++i)
+ rtt_filter.Update(kDriftTarget);
+ EXPECT_EQ(rtt_filter.Rtt(), kDriftTarget);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing.cc b/third_party/libwebrtc/modules/video_coding/timing/timing.cc
new file mode 100644
index 0000000000..37dc825bed
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/timing.h"
+
+#include <algorithm>
+
+#include "api/units/time_delta.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time/timestamp_extrapolator.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+
+// Default pacing that is used for the low-latency renderer path.
+constexpr TimeDelta kZeroPlayoutDelayDefaultMinPacing = TimeDelta::Millis(8);
+constexpr TimeDelta kLowLatencyStreamMaxPlayoutDelayThreshold =
+ TimeDelta::Millis(500);
+
+void CheckDelaysValid(TimeDelta min_delay, TimeDelta max_delay) {
+ if (min_delay > max_delay) {
+ RTC_LOG(LS_ERROR)
+ << "Playout delays set incorrectly: min playout delay (" << min_delay
+ << ") > max playout delay (" << max_delay
+ << "). This is undefined behaviour. Application writers should "
+ "ensure that the min delay is always less than or equals max "
+ "delay. If trying to use the playout delay header extensions "
+ "described in "
+ "https://webrtc.googlesource.com/src/+/refs/heads/main/docs/"
+ "native-code/rtp-hdrext/playout-delay/, be careful that a playout "
+ "delay hint or A/V sync settings may have caused this conflict.";
+ }
+}
+
+} // namespace
+
+VCMTiming::VCMTiming(Clock* clock, const FieldTrialsView& field_trials)
+ : clock_(clock),
+ ts_extrapolator_(
+ std::make_unique<TimestampExtrapolator>(clock_->CurrentTime())),
+ codec_timer_(std::make_unique<CodecTimer>()),
+ render_delay_(kDefaultRenderDelay),
+ min_playout_delay_(TimeDelta::Zero()),
+ max_playout_delay_(TimeDelta::Seconds(10)),
+ jitter_delay_(TimeDelta::Zero()),
+ current_delay_(TimeDelta::Zero()),
+ prev_frame_timestamp_(0),
+ num_decoded_frames_(0),
+ zero_playout_delay_min_pacing_("min_pacing",
+ kZeroPlayoutDelayDefaultMinPacing),
+ last_decode_scheduled_(Timestamp::Zero()) {
+ ParseFieldTrial({&zero_playout_delay_min_pacing_},
+ field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
+}
+
+void VCMTiming::Reset() {
+ MutexLock lock(&mutex_);
+ ts_extrapolator_->Reset(clock_->CurrentTime());
+ codec_timer_ = std::make_unique<CodecTimer>();
+ render_delay_ = kDefaultRenderDelay;
+ min_playout_delay_ = TimeDelta::Zero();
+ jitter_delay_ = TimeDelta::Zero();
+ current_delay_ = TimeDelta::Zero();
+ prev_frame_timestamp_ = 0;
+}
+
+void VCMTiming::set_render_delay(TimeDelta render_delay) {
+ MutexLock lock(&mutex_);
+ render_delay_ = render_delay;
+}
+
+TimeDelta VCMTiming::min_playout_delay() const {
+ MutexLock lock(&mutex_);
+ return min_playout_delay_;
+}
+
+void VCMTiming::set_min_playout_delay(TimeDelta min_playout_delay) {
+ MutexLock lock(&mutex_);
+ if (min_playout_delay_ != min_playout_delay) {
+ CheckDelaysValid(min_playout_delay, max_playout_delay_);
+ min_playout_delay_ = min_playout_delay;
+ }
+}
+
+void VCMTiming::set_max_playout_delay(TimeDelta max_playout_delay) {
+ MutexLock lock(&mutex_);
+ if (max_playout_delay_ != max_playout_delay) {
+ CheckDelaysValid(min_playout_delay_, max_playout_delay);
+ max_playout_delay_ = max_playout_delay;
+ }
+}
+
+void VCMTiming::SetJitterDelay(TimeDelta jitter_delay) {
+ MutexLock lock(&mutex_);
+ if (jitter_delay != jitter_delay_) {
+ jitter_delay_ = jitter_delay;
+ // When in initial state, set current delay to minimum delay.
+ if (current_delay_.IsZero()) {
+ current_delay_ = jitter_delay_;
+ }
+ }
+}
+
+void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
+ MutexLock lock(&mutex_);
+ TimeDelta target_delay = TargetDelayInternal();
+
+ if (current_delay_.IsZero()) {
+ // Not initialized, set current delay to target.
+ current_delay_ = target_delay;
+ } else if (target_delay != current_delay_) {
+ TimeDelta delay_diff = target_delay - current_delay_;
+ // Never change the delay with more than 100 ms every second. If we're
+ // changing the delay in too large steps we will get noticeable freezes. By
+ // limiting the change we can increase the delay in smaller steps, which
+ // will be experienced as the video is played in slow motion. When lowering
+ // the delay the video will be played at a faster pace.
+ TimeDelta max_change = TimeDelta::Zero();
+ if (frame_timestamp < 0x0000ffff && prev_frame_timestamp_ > 0xffff0000) {
+ // wrap
+ max_change =
+ TimeDelta::Millis(kDelayMaxChangeMsPerS *
+ (frame_timestamp + (static_cast<int64_t>(1) << 32) -
+ prev_frame_timestamp_) /
+ 90000);
+ } else {
+ max_change =
+ TimeDelta::Millis(kDelayMaxChangeMsPerS *
+ (frame_timestamp - prev_frame_timestamp_) / 90000);
+ }
+
+ if (max_change <= TimeDelta::Zero()) {
+ // Any changes less than 1 ms are truncated and will be postponed.
+ // Negative change will be due to reordering and should be ignored.
+ return;
+ }
+ delay_diff = std::max(delay_diff, -max_change);
+ delay_diff = std::min(delay_diff, max_change);
+
+ current_delay_ = current_delay_ + delay_diff;
+ }
+ prev_frame_timestamp_ = frame_timestamp;
+}
+
+void VCMTiming::UpdateCurrentDelay(Timestamp render_time,
+ Timestamp actual_decode_time) {
+ MutexLock lock(&mutex_);
+ TimeDelta target_delay = TargetDelayInternal();
+ TimeDelta delayed =
+ (actual_decode_time - render_time) + RequiredDecodeTime() + render_delay_;
+
+ // Only consider `delayed` as negative by more than a few microseconds.
+ if (delayed.ms() < 0) {
+ return;
+ }
+ if (current_delay_ + delayed <= target_delay) {
+ current_delay_ += delayed;
+ } else {
+ current_delay_ = target_delay;
+ }
+}
+
+void VCMTiming::StopDecodeTimer(TimeDelta decode_time, Timestamp now) {
+ MutexLock lock(&mutex_);
+ codec_timer_->AddTiming(decode_time.ms(), now.ms());
+ RTC_DCHECK_GE(decode_time, TimeDelta::Zero());
+ ++num_decoded_frames_;
+}
+
+void VCMTiming::IncomingTimestamp(uint32_t rtp_timestamp, Timestamp now) {
+ MutexLock lock(&mutex_);
+ ts_extrapolator_->Update(now, rtp_timestamp);
+}
+
+Timestamp VCMTiming::RenderTime(uint32_t frame_timestamp, Timestamp now) const {
+ MutexLock lock(&mutex_);
+ return RenderTimeInternal(frame_timestamp, now);
+}
+
+void VCMTiming::SetLastDecodeScheduledTimestamp(
+ Timestamp last_decode_scheduled) {
+ MutexLock lock(&mutex_);
+ last_decode_scheduled_ = last_decode_scheduled;
+}
+
+Timestamp VCMTiming::RenderTimeInternal(uint32_t frame_timestamp,
+ Timestamp now) const {
+ if (UseLowLatencyRendering()) {
+ // Render as soon as possible or with low-latency renderer algorithm.
+ return Timestamp::Zero();
+ }
+ // Note that TimestampExtrapolator::ExtrapolateLocalTime is not a const
+ // method; it mutates the object's wraparound state.
+ Timestamp estimated_complete_time =
+ ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp).value_or(now);
+
+ // Make sure the actual delay stays in the range of `min_playout_delay_`
+ // and `max_playout_delay_`.
+ TimeDelta actual_delay =
+ current_delay_.Clamped(min_playout_delay_, max_playout_delay_);
+ return estimated_complete_time + actual_delay;
+}
+
+TimeDelta VCMTiming::RequiredDecodeTime() const {
+ const int decode_time_ms = codec_timer_->RequiredDecodeTimeMs();
+ RTC_DCHECK_GE(decode_time_ms, 0);
+ return TimeDelta::Millis(decode_time_ms);
+}
+
+TimeDelta VCMTiming::MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const {
+ MutexLock lock(&mutex_);
+
+ if (render_time.IsZero() && zero_playout_delay_min_pacing_->us() > 0 &&
+ min_playout_delay_.IsZero() && max_playout_delay_ > TimeDelta::Zero()) {
+ // `render_time` == 0 indicates that the frame should be decoded and
+ // rendered as soon as possible. However, the decoder can be choked if too
+ // many frames are sent at once. Therefore, limit the interframe delay to
+ // |zero_playout_delay_min_pacing_| unless too many frames are queued in
+ // which case the frames are sent to the decoder at once.
+ if (too_many_frames_queued) {
+ return TimeDelta::Zero();
+ }
+ Timestamp earliest_next_decode_start_time =
+ last_decode_scheduled_ + zero_playout_delay_min_pacing_;
+ TimeDelta max_wait_time = now >= earliest_next_decode_start_time
+ ? TimeDelta::Zero()
+ : earliest_next_decode_start_time - now;
+ return max_wait_time;
+ }
+ return render_time - now - RequiredDecodeTime() - render_delay_;
+}
+
+TimeDelta VCMTiming::TargetVideoDelay() const {
+ MutexLock lock(&mutex_);
+ return TargetDelayInternal();
+}
+
+TimeDelta VCMTiming::TargetDelayInternal() const {
+ return std::max(min_playout_delay_,
+ jitter_delay_ + RequiredDecodeTime() + render_delay_);
+}
+
+VideoFrame::RenderParameters VCMTiming::RenderParameters() const {
+ MutexLock lock(&mutex_);
+ return {.use_low_latency_rendering = UseLowLatencyRendering(),
+ .max_composition_delay_in_frames = max_composition_delay_in_frames_};
+}
+
+bool VCMTiming::UseLowLatencyRendering() const {
+ // min_playout_delay_==0,
+ // max_playout_delay_<=kLowLatencyStreamMaxPlayoutDelayThreshold indicates
+ // that the low-latency path should be used, which means that frames should be
+ // decoded and rendered as soon as possible.
+ return min_playout_delay_.IsZero() &&
+ max_playout_delay_ <= kLowLatencyStreamMaxPlayoutDelayThreshold;
+}
+
+VCMTiming::VideoDelayTimings VCMTiming::GetTimings() const {
+ MutexLock lock(&mutex_);
+ return VideoDelayTimings{.max_decode_duration = RequiredDecodeTime(),
+ .current_delay = current_delay_,
+ .target_delay = TargetDelayInternal(),
+ .jitter_buffer_delay = jitter_delay_,
+ .min_playout_delay = min_playout_delay_,
+ .max_playout_delay = max_playout_delay_,
+ .render_delay = render_delay_,
+ .num_decoded_frames = num_decoded_frames_};
+}
+
+void VCMTiming::SetTimingFrameInfo(const TimingFrameInfo& info) {
+ MutexLock lock(&mutex_);
+ timing_frame_info_.emplace(info);
+}
+
+absl::optional<TimingFrameInfo> VCMTiming::GetTimingFrameInfo() {
+ MutexLock lock(&mutex_);
+ return timing_frame_info_;
+}
+
+void VCMTiming::SetMaxCompositionDelayInFrames(
+ absl::optional<int> max_composition_delay_in_frames) {
+ MutexLock lock(&mutex_);
+ max_composition_delay_in_frames_ = max_composition_delay_in_frames;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing.h b/third_party/libwebrtc/modules/video_coding/timing/timing.h
new file mode 100644
index 0000000000..6ee1cf4d6f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_TIMING_H_
+#define MODULES_VIDEO_CODING_TIMING_TIMING_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/time_delta.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/timing/codec_timer.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time/timestamp_extrapolator.h"
+
+namespace webrtc {
+
+class Clock;
+class TimestampExtrapolator;
+
+class VCMTiming {
+ public:
+ static constexpr auto kDefaultRenderDelay = TimeDelta::Millis(10);
+ static constexpr auto kDelayMaxChangeMsPerS = 100;
+
+ VCMTiming(Clock* clock, const FieldTrialsView& field_trials);
+ virtual ~VCMTiming() = default;
+
+ // Resets the timing to the initial state.
+ void Reset();
+
+ // Set the amount of time needed to render an image. Defaults to 10 ms.
+ void set_render_delay(TimeDelta render_delay);
+
+ // Set the minimum time the video must be delayed on the receiver to
+ // get the desired jitter buffer level.
+ void SetJitterDelay(TimeDelta required_delay);
+
+ // Set/get the minimum playout delay from capture to render.
+ TimeDelta min_playout_delay() const;
+ void set_min_playout_delay(TimeDelta min_playout_delay);
+
+ // Set/get the maximum playout delay from capture to render in ms.
+ void set_max_playout_delay(TimeDelta max_playout_delay);
+
+ // Increases or decreases the current delay to get closer to the target delay.
+ // Calculates how long it has been since the previous call to this function,
+ // and increases/decreases the delay in proportion to the time difference.
+ void UpdateCurrentDelay(uint32_t frame_timestamp);
+
+ // Increases or decreases the current delay to get closer to the target delay.
+ // Given the actual decode time in ms and the render time in ms for a frame,
+ // this function calculates how late the frame is and increases the delay
+ // accordingly.
+ void UpdateCurrentDelay(Timestamp render_time, Timestamp actual_decode_time);
+
+ // Stops the decoder timer, should be called when the decoder returns a frame
+ // or when the decoded frame callback is called.
+ void StopDecodeTimer(TimeDelta decode_time, Timestamp now);
+
+ // Used to report that a frame is passed to decoding. Updates the timestamp
+ // filter which is used to map between timestamps and receiver system time.
+ void IncomingTimestamp(uint32_t rtp_timestamp, Timestamp last_packet_time);
+
+ // Returns the receiver system time when the frame with timestamp
+ // `frame_timestamp` should be rendered, assuming that the system time
+ // currently is `now`.
+ virtual Timestamp RenderTime(uint32_t frame_timestamp, Timestamp now) const;
+
+ // Returns the maximum time in ms that we can wait for a frame to become
+ // complete before we must pass it to the decoder. render_time==0 indicates
+ // that the frames should be processed as quickly as possible, with possibly
+ // only a small delay added to make sure that the decoder is not overloaded.
+ // In this case, the parameter too_many_frames_queued is used to signal that
+ // the decode queue is full and that the frame should be decoded as soon as
+ // possible.
+ virtual TimeDelta MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const;
+
+ // Returns the current target delay which is required delay + decode time +
+ // render delay.
+ TimeDelta TargetVideoDelay() const;
+
+ // Return current timing information. Returns true if the first frame has been
+ // decoded, false otherwise.
+ struct VideoDelayTimings {
+ TimeDelta max_decode_duration;
+ TimeDelta current_delay;
+ TimeDelta target_delay;
+ TimeDelta jitter_buffer_delay;
+ TimeDelta min_playout_delay;
+ TimeDelta max_playout_delay;
+ TimeDelta render_delay;
+ size_t num_decoded_frames;
+ };
+ VideoDelayTimings GetTimings() const;
+
+ void SetTimingFrameInfo(const TimingFrameInfo& info);
+ absl::optional<TimingFrameInfo> GetTimingFrameInfo();
+
+ void SetMaxCompositionDelayInFrames(
+ absl::optional<int> max_composition_delay_in_frames);
+
+ VideoFrame::RenderParameters RenderParameters() const;
+
+ // Updates the last time a frame was scheduled for decoding.
+ void SetLastDecodeScheduledTimestamp(Timestamp last_decode_scheduled);
+
+ protected:
+ TimeDelta RequiredDecodeTime() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ Timestamp RenderTimeInternal(uint32_t frame_timestamp, Timestamp now) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ TimeDelta TargetDelayInternal() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ bool UseLowLatencyRendering() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ private:
+ mutable Mutex mutex_;
+ Clock* const clock_;
+ const std::unique_ptr<TimestampExtrapolator> ts_extrapolator_
+ RTC_PT_GUARDED_BY(mutex_);
+ std::unique_ptr<CodecTimer> codec_timer_ RTC_GUARDED_BY(mutex_)
+ RTC_PT_GUARDED_BY(mutex_);
+ TimeDelta render_delay_ RTC_GUARDED_BY(mutex_);
+ // Best-effort playout delay range for frames from capture to render.
+ // The receiver tries to keep the delay between `min_playout_delay_ms_`
+ // and `max_playout_delay_ms_` taking the network jitter into account.
+ // A special case is where min_playout_delay_ms_ = max_playout_delay_ms_ = 0,
+ // in which case the receiver tries to play the frames as they arrive.
+ TimeDelta min_playout_delay_ RTC_GUARDED_BY(mutex_);
+ TimeDelta max_playout_delay_ RTC_GUARDED_BY(mutex_);
+ TimeDelta jitter_delay_ RTC_GUARDED_BY(mutex_);
+ TimeDelta current_delay_ RTC_GUARDED_BY(mutex_);
+ uint32_t prev_frame_timestamp_ RTC_GUARDED_BY(mutex_);
+ absl::optional<TimingFrameInfo> timing_frame_info_ RTC_GUARDED_BY(mutex_);
+ size_t num_decoded_frames_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int> max_composition_delay_in_frames_ RTC_GUARDED_BY(mutex_);
+ // Set by the field trial WebRTC-ZeroPlayoutDelay. The parameter min_pacing
+ // determines the minimum delay between frames scheduled for decoding that is
+ // used when min playout delay=0 and max playout delay>=0.
+ FieldTrialParameter<TimeDelta> zero_playout_delay_min_pacing_
+ RTC_GUARDED_BY(mutex_);
+ // Timestamp at which the last frame was scheduled to be sent to the decoder.
+ // Used only when the RTP header extension playout delay is set to min=0 ms
+ // which is indicated by a render time set to 0.
+ Timestamp last_decode_scheduled_ RTC_GUARDED_BY(mutex_);
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_TIMING_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build
new file mode 100644
index 0000000000..7f4e361630
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/timing.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("timing_module_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc
new file mode 100644
index 0000000000..8633c0de39
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/timing.h"
+
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+namespace {
+
+constexpr Frequency k25Fps = Frequency::Hertz(25);
+constexpr Frequency k90kHz = Frequency::KiloHertz(90);
+
+} // namespace
+
+TEST(ReceiverTimingTest, JitterDelay) {
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+
+ uint32_t timestamp = 0;
+ timing.UpdateCurrentDelay(timestamp);
+
+ timing.Reset();
+
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ TimeDelta jitter_delay = TimeDelta::Millis(20);
+ timing.SetJitterDelay(jitter_delay);
+ timing.UpdateCurrentDelay(timestamp);
+ timing.set_render_delay(TimeDelta::Zero());
+ auto wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ // First update initializes the render time. Since we have no decode delay
+ // we get wait_time = renderTime - now - renderDelay = jitter.
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ jitter_delay += TimeDelta::Millis(VCMTiming::kDelayMaxChangeMsPerS + 10);
+ timestamp += 90000;
+ clock.AdvanceTimeMilliseconds(1000);
+ timing.SetJitterDelay(jitter_delay);
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ // Since we gradually increase the delay we only get 100 ms every second.
+ EXPECT_EQ(jitter_delay - TimeDelta::Millis(10), wait_time);
+
+ timestamp += 90000;
+ clock.AdvanceTimeMilliseconds(1000);
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ // Insert frames without jitter, verify that this gives the exact wait time.
+ const int kNumFrames = 300;
+ for (int i = 0; i < kNumFrames; i++) {
+ clock.AdvanceTime(1 / k25Fps);
+ timestamp += k90kHz / k25Fps;
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ }
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ // Add decode time estimates for 1 second.
+ const TimeDelta kDecodeTime = TimeDelta::Millis(10);
+ for (int i = 0; i < k25Fps.hertz(); i++) {
+ clock.AdvanceTime(kDecodeTime);
+ timing.StopDecodeTimer(kDecodeTime, clock.CurrentTime());
+ timestamp += k90kHz / k25Fps;
+ clock.AdvanceTime(1 / k25Fps - kDecodeTime);
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ }
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ const TimeDelta kMinTotalDelay = TimeDelta::Millis(200);
+ timing.set_min_playout_delay(kMinTotalDelay);
+ clock.AdvanceTimeMilliseconds(5000);
+ timestamp += 5 * 90000;
+ timing.UpdateCurrentDelay(timestamp);
+ const TimeDelta kRenderDelay = TimeDelta::Millis(10);
+ timing.set_render_delay(kRenderDelay);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ // We should at least have kMinTotalDelayMs - decodeTime (10) - renderTime
+ // (10) to wait.
+ EXPECT_EQ(kMinTotalDelay - kDecodeTime - kRenderDelay, wait_time);
+ // The total video delay should be equal to the min total delay.
+ EXPECT_EQ(kMinTotalDelay, timing.TargetVideoDelay());
+
+ // Reset playout delay.
+ timing.set_min_playout_delay(TimeDelta::Zero());
+ clock.AdvanceTimeMilliseconds(5000);
+ timestamp += 5 * 90000;
+ timing.UpdateCurrentDelay(timestamp);
+}
+
+TEST(ReceiverTimingTest, TimestampWrapAround) {
+ constexpr auto kStartTime = Timestamp::Millis(1337);
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(kStartTime);
+ VCMTiming timing(&clock, field_trials);
+
+ // Provoke a wrap-around. The fifth frame will have wrapped at 25 fps.
+ constexpr uint32_t kRtpTicksPerFrame = k90kHz / k25Fps;
+ uint32_t timestamp = 0xFFFFFFFFu - 3 * kRtpTicksPerFrame;
+ for (int i = 0; i < 5; ++i) {
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ clock.AdvanceTime(1 / k25Fps);
+ timestamp += kRtpTicksPerFrame;
+ EXPECT_EQ(kStartTime + 3 / k25Fps,
+ timing.RenderTime(0xFFFFFFFFu, clock.CurrentTime()));
+ // One ms later in 90 kHz.
+ EXPECT_EQ(kStartTime + 3 / k25Fps + TimeDelta::Millis(1),
+ timing.RenderTime(89u, clock.CurrentTime()));
+ }
+}
+
+TEST(ReceiverTimingTest, UseLowLatencyRenderer) {
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ // Default is false.
+ EXPECT_FALSE(timing.RenderParameters().use_low_latency_rendering);
+ // False if min playout delay > 0.
+ timing.set_min_playout_delay(TimeDelta::Millis(10));
+ timing.set_max_playout_delay(TimeDelta::Millis(20));
+ EXPECT_FALSE(timing.RenderParameters().use_low_latency_rendering);
+ // True if min==0, max > 0.
+ timing.set_min_playout_delay(TimeDelta::Zero());
+ EXPECT_TRUE(timing.RenderParameters().use_low_latency_rendering);
+ // True if min==max==0.
+ timing.set_max_playout_delay(TimeDelta::Zero());
+ EXPECT_TRUE(timing.RenderParameters().use_low_latency_rendering);
+ // True also for max playout delay==500 ms.
+ timing.set_max_playout_delay(TimeDelta::Millis(500));
+ EXPECT_TRUE(timing.RenderParameters().use_low_latency_rendering);
+ // False if max playout delay > 500 ms.
+ timing.set_max_playout_delay(TimeDelta::Millis(501));
+ EXPECT_FALSE(timing.RenderParameters().use_low_latency_rendering);
+}
+
+TEST(ReceiverTimingTest, MaxWaitingTimeIsZeroForZeroRenderTime) {
+ // This is the default path when the RTP playout delay header extension is set
+ // to min==0 and max==0.
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ constexpr TimeDelta kTimeDelta = 1 / Frequency::Hertz(60);
+ constexpr Timestamp kZeroRenderTime = Timestamp::Zero();
+ SimulatedClock clock(kStartTimeUs);
+ test::ScopedKeyValueConfig field_trials;
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ timing.set_max_playout_delay(TimeDelta::Zero());
+ for (int i = 0; i < 10; ++i) {
+ clock.AdvanceTime(kTimeDelta);
+ Timestamp now = clock.CurrentTime();
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ }
+ // Another frame submitted at the same time also returns a negative max
+ // waiting time.
+ Timestamp now = clock.CurrentTime();
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ // MaxWaitingTime should be less than zero even if there's a burst of frames.
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+}
+
+TEST(ReceiverTimingTest, MaxWaitingTimeZeroDelayPacingExperiment) {
+ // The minimum pacing is enabled by a field trial and active if the RTP
+ // playout delay header extension is set to min==0.
+ constexpr TimeDelta kMinPacing = TimeDelta::Millis(3);
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/");
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ constexpr TimeDelta kTimeDelta = 1 / Frequency::Hertz(60);
+ constexpr auto kZeroRenderTime = Timestamp::Zero();
+ SimulatedClock clock(kStartTimeUs);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ // MaxWaitingTime() returns zero for evenly spaced video frames.
+ for (int i = 0; i < 10; ++i) {
+ clock.AdvanceTime(kTimeDelta);
+ Timestamp now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ timing.SetLastDecodeScheduledTimestamp(now);
+ }
+ // Another frame submitted at the same time is paced according to the field
+ // trial setting.
+ auto now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ // If there's a burst of frames, the wait time is calculated based on next
+ // decode time.
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ // Allow a few ms to pass, this should be subtracted from the MaxWaitingTime.
+ constexpr TimeDelta kTwoMs = TimeDelta::Millis(2);
+ clock.AdvanceTime(kTwoMs);
+ now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing - kTwoMs);
+ // A frame is decoded at the current time, the wait time should be restored to
+ // pacing delay.
+ timing.SetLastDecodeScheduledTimestamp(now);
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+}
+
+TEST(ReceiverTimingTest, DefaultMaxWaitingTimeUnaffectedByPacingExperiment) {
+ // The minimum pacing is enabled by a field trial but should not have any
+ // effect if render_time_ms is greater than 0;
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/");
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ const TimeDelta kTimeDelta = TimeDelta::Millis(1000.0 / 60.0);
+ SimulatedClock clock(kStartTimeUs);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ clock.AdvanceTime(kTimeDelta);
+ auto now = clock.CurrentTime();
+ Timestamp render_time = now + TimeDelta::Millis(30);
+ // Estimate the internal processing delay from the first frame.
+ TimeDelta estimated_processing_delay =
+ (render_time - now) -
+ timing.MaxWaitingTime(render_time, now,
+ /*too_many_frames_queued=*/false);
+ EXPECT_GT(estimated_processing_delay, TimeDelta::Zero());
+
+ // Any other frame submitted at the same time should be scheduled according to
+ // its render time.
+ for (int i = 0; i < 5; ++i) {
+ render_time += kTimeDelta;
+ EXPECT_EQ(timing.MaxWaitingTime(render_time, now,
+ /*too_many_frames_queued=*/false),
+ render_time - now - estimated_processing_delay);
+ }
+}
+
+TEST(ReceiverTimingTest, MaxWaitingTimeReturnsZeroIfTooManyFramesQueuedIsTrue) {
+ // The minimum pacing is enabled by a field trial and active if the RTP
+ // playout delay header extension is set to min==0.
+ constexpr TimeDelta kMinPacing = TimeDelta::Millis(3);
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/");
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ const TimeDelta kTimeDelta = TimeDelta::Millis(1000.0 / 60.0);
+ constexpr auto kZeroRenderTime = Timestamp::Zero();
+ SimulatedClock clock(kStartTimeUs);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ // MaxWaitingTime() returns zero for evenly spaced video frames.
+ for (int i = 0; i < 10; ++i) {
+ clock.AdvanceTime(kTimeDelta);
+ auto now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ timing.SetLastDecodeScheduledTimestamp(now);
+ }
+ // Another frame submitted at the same time is paced according to the field
+ // trial setting.
+ auto now_ms = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now_ms,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ // MaxWaitingTime returns 0 even if there's a burst of frames if
+ // too_many_frames_queued is set to true.
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now_ms,
+ /*too_many_frames_queued=*/true),
+ TimeDelta::Zero());
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now_ms,
+ /*too_many_frames_queued=*/true),
+ TimeDelta::Zero());
+}
+
+TEST(ReceiverTimingTest, UpdateCurrentDelayCapsWhenOffByMicroseconds) {
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+
+ // Set larger initial current delay.
+ timing.set_min_playout_delay(TimeDelta::Millis(200));
+ timing.UpdateCurrentDelay(Timestamp::Millis(900), Timestamp::Millis(1000));
+
+ // Add a few microseconds to ensure that the delta of decode time is 0 after
+ // rounding, and should reset to the target delay.
+ timing.set_min_playout_delay(TimeDelta::Millis(50));
+ Timestamp decode_time = Timestamp::Millis(1337);
+ Timestamp render_time =
+ decode_time + TimeDelta::Millis(10) + TimeDelta::Micros(37);
+ timing.UpdateCurrentDelay(render_time, decode_time);
+ EXPECT_EQ(timing.GetTimings().current_delay, timing.TargetVideoDelay());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc
new file mode 100644
index 0000000000..13502a142b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/bandwidth_quality_scaler.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video/video_adaptation_reason.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/bandwidth_quality_scaler_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/weak_ptr.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultMaxWindowSizeMs = 5000;
+constexpr float kHigherMaxBitrateTolerationFactor = 0.95;
+constexpr float kLowerMinBitrateTolerationFactor = 0.8;
+constexpr int kDefaultBitrateStateUpdateIntervalSeconds = 5;
+} // namespace
+
+BandwidthQualityScaler::BandwidthQualityScaler(
+ BandwidthQualityScalerUsageHandlerInterface* handler)
+ : kBitrateStateUpdateInterval(TimeDelta::Seconds(
+ BandwidthQualityScalerSettings::ParseFromFieldTrials()
+ .BitrateStateUpdateInterval()
+ .value_or(kDefaultBitrateStateUpdateIntervalSeconds))),
+ handler_(handler),
+ encoded_bitrate_(kDefaultMaxWindowSizeMs, RateStatistics::kBpsScale),
+ weak_ptr_factory_(this) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK(handler_ != nullptr);
+
+ StartCheckForBitrate();
+}
+
+BandwidthQualityScaler::~BandwidthQualityScaler() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+}
+
+void BandwidthQualityScaler::StartCheckForBitrate() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ TaskQueueBase::Current()->PostDelayedTask(
+ [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), this] {
+ if (!this_weak_ptr) {
+ // The caller BandwidthQualityScaler has been deleted.
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ switch (CheckBitrate()) {
+ case BandwidthQualityScaler::CheckBitrateResult::kHighBitRate: {
+ handler_->OnReportUsageBandwidthHigh();
+ last_frame_size_pixels_.reset();
+ break;
+ }
+ case BandwidthQualityScaler::CheckBitrateResult::kLowBitRate: {
+ handler_->OnReportUsageBandwidthLow();
+ last_frame_size_pixels_.reset();
+ break;
+ }
+ case BandwidthQualityScaler::CheckBitrateResult::kNormalBitrate: {
+ break;
+ }
+ case BandwidthQualityScaler::CheckBitrateResult::
+ kInsufficientSamples: {
+ break;
+ }
+ }
+ StartCheckForBitrate();
+ },
+ kBitrateStateUpdateInterval);
+}
+
+void BandwidthQualityScaler::ReportEncodeInfo(int frame_size_bytes,
+ int64_t time_sent_in_ms,
+ uint32_t encoded_width,
+ uint32_t encoded_height) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ last_time_sent_in_ms_ = time_sent_in_ms;
+ last_frame_size_pixels_ = encoded_width * encoded_height;
+ encoded_bitrate_.Update(frame_size_bytes, time_sent_in_ms);
+}
+
+void BandwidthQualityScaler::SetResolutionBitrateLimits(
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits) {
+ if (resolution_bitrate_limits.empty()) {
+ resolution_bitrate_limits_ = EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted();
+ } else {
+ resolution_bitrate_limits_ = resolution_bitrate_limits;
+ }
+}
+
+BandwidthQualityScaler::CheckBitrateResult
+BandwidthQualityScaler::CheckBitrate() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ if (!last_frame_size_pixels_.has_value() ||
+ !last_time_sent_in_ms_.has_value()) {
+ return BandwidthQualityScaler::CheckBitrateResult::kInsufficientSamples;
+ }
+
+ absl::optional<int64_t> current_bitrate_bps =
+ encoded_bitrate_.Rate(last_time_sent_in_ms_.value());
+ if (!current_bitrate_bps.has_value()) {
+ // We can't get a valid bitrate due to not enough data points.
+ return BandwidthQualityScaler::CheckBitrateResult::kInsufficientSamples;
+ }
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> suitable_bitrate_limit =
+ EncoderInfoSettings::
+ GetSinglecastBitrateLimitForResolutionWhenQpIsUntrusted(
+ last_frame_size_pixels_, resolution_bitrate_limits_);
+
+ if (!suitable_bitrate_limit.has_value()) {
+ return BandwidthQualityScaler::CheckBitrateResult::kInsufficientSamples;
+ }
+
+ // Multiply by toleration factor to solve the frequent adaptation due to
+ // critical value.
+ if (current_bitrate_bps > suitable_bitrate_limit->max_bitrate_bps *
+ kHigherMaxBitrateTolerationFactor) {
+ return BandwidthQualityScaler::CheckBitrateResult::kLowBitRate;
+ } else if (current_bitrate_bps <
+ suitable_bitrate_limit->min_start_bitrate_bps *
+ kLowerMinBitrateTolerationFactor) {
+ return BandwidthQualityScaler::CheckBitrateResult::kHighBitRate;
+ }
+ return BandwidthQualityScaler::CheckBitrateResult::kNormalBitrate;
+}
+
+BandwidthQualityScalerUsageHandlerInterface::
+ ~BandwidthQualityScalerUsageHandlerInterface() {}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h
new file mode 100644
index 0000000000..7cd1de0dd2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_BANDWIDTH_QUALITY_SCALER_H_
+#define MODULES_VIDEO_CODING_UTILITY_BANDWIDTH_QUALITY_SCALER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/weak_ptr.h"
+
+namespace webrtc {
+
+class BandwidthQualityScalerUsageHandlerInterface {
+ public:
+ virtual ~BandwidthQualityScalerUsageHandlerInterface();
+
+ virtual void OnReportUsageBandwidthHigh() = 0;
+ virtual void OnReportUsageBandwidthLow() = 0;
+};
+
+// BandwidthQualityScaler runs asynchronously and monitors bandwidth values of
+// encoded frames. It holds a reference to a
+// BandwidthQualityScalerUsageHandlerInterface implementation to signal an
+// overuse or underuse of bandwidth (which indicate a desire to scale the video
+// stream down or up).
+class BandwidthQualityScaler {
+ public:
+ explicit BandwidthQualityScaler(
+ BandwidthQualityScalerUsageHandlerInterface* handler);
+ virtual ~BandwidthQualityScaler();
+
+ void ReportEncodeInfo(int frame_size_bytes,
+ int64_t time_sent_in_ms,
+ uint32_t encoded_width,
+ uint32_t encoded_height);
+
+ // We prioritise to using the |resolution_bitrate_limits| provided by the
+ // current decoder. If not provided, we will use the default data by
+ // GetDefaultResolutionBitrateLimits().
+ void SetResolutionBitrateLimits(
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits);
+
+ const TimeDelta kBitrateStateUpdateInterval;
+
+ private:
+ enum class CheckBitrateResult {
+ kInsufficientSamples,
+ kNormalBitrate,
+ kHighBitRate,
+ kLowBitRate,
+ };
+
+ // We will periodically check encode bitrate, this function will make
+ // resolution up or down decisions and report the decision to the adapter.
+ void StartCheckForBitrate();
+ CheckBitrateResult CheckBitrate();
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_;
+ BandwidthQualityScalerUsageHandlerInterface* const handler_
+ RTC_GUARDED_BY(&task_checker_);
+
+ absl::optional<int64_t> last_time_sent_in_ms_ RTC_GUARDED_BY(&task_checker_);
+ RateStatistics encoded_bitrate_ RTC_GUARDED_BY(&task_checker_);
+ absl::optional<int> last_frame_size_pixels_ RTC_GUARDED_BY(&task_checker_);
+ rtc::WeakPtrFactory<BandwidthQualityScaler> weak_ptr_factory_;
+
+ std::vector<VideoEncoder::ResolutionBitrateLimits> resolution_bitrate_limits_;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_UTILITY_BANDWIDTH_QUALITY_SCALER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc
new file mode 100644
index 0000000000..67ab8777a5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/bandwidth_quality_scaler.h"
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/time_utils.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kFramerateFps = 30;
+constexpr int kDefaultBitrateStateUpdateIntervalSeconds = 5;
+constexpr int kDefaultEncodeDeltaTimeMs = 33; // 1/30(s) => 33(ms)
+
+} // namespace
+
+class FakeBandwidthQualityScalerHandler
+ : public BandwidthQualityScalerUsageHandlerInterface {
+ public:
+ ~FakeBandwidthQualityScalerHandler() override = default;
+ void OnReportUsageBandwidthHigh() override {
+ adapt_down_event_count_++;
+ event_.Set();
+ }
+
+ void OnReportUsageBandwidthLow() override {
+ adapt_up_event_count_++;
+ event_.Set();
+ }
+
+ rtc::Event event_;
+ int adapt_up_event_count_ = 0;
+ int adapt_down_event_count_ = 0;
+};
+
+class BandwidthQualityScalerUnderTest : public BandwidthQualityScaler {
+ public:
+ explicit BandwidthQualityScalerUnderTest(
+ BandwidthQualityScalerUsageHandlerInterface* handler)
+ : BandwidthQualityScaler(handler) {}
+
+ int GetBitrateStateUpdateIntervalMs() {
+ return this->kBitrateStateUpdateInterval.ms() + 200;
+ }
+};
+
+class BandwidthQualityScalerTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::string> {
+ protected:
+ enum ScaleDirection {
+ kKeepScaleNormalBandwidth,
+ kKeepScaleAboveMaxBandwidth,
+ kKeepScaleUnderMinBandwidth,
+ };
+
+ enum FrameType {
+ kKeyFrame,
+ kNormalFrame,
+ kNormalFrame_Overuse,
+ kNormalFrame_Underuse,
+ };
+ struct FrameConfig {
+ FrameConfig(int frame_num,
+ FrameType frame_type,
+ int actual_width,
+ int actual_height)
+ : frame_num(frame_num),
+ frame_type(frame_type),
+ actual_width(actual_width),
+ actual_height(actual_height) {}
+
+ int frame_num;
+ FrameType frame_type;
+ int actual_width;
+ int actual_height;
+ };
+
+ BandwidthQualityScalerTest()
+ : scoped_field_trial_(GetParam()),
+ task_queue_("BandwidthQualityScalerTestQueue"),
+ handler_(std::make_unique<FakeBandwidthQualityScalerHandler>()) {
+ task_queue_.SendTask(
+ [this] {
+ bandwidth_quality_scaler_ =
+ std::unique_ptr<BandwidthQualityScalerUnderTest>(
+ new BandwidthQualityScalerUnderTest(handler_.get()));
+ bandwidth_quality_scaler_->SetResolutionBitrateLimits(
+ EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted());
+ // Only for testing. Set first_timestamp_ in RateStatistics to 0.
+ bandwidth_quality_scaler_->ReportEncodeInfo(0, 0, 0, 0);
+ });
+ }
+
+ ~BandwidthQualityScalerTest() {
+ task_queue_.SendTask([this] { bandwidth_quality_scaler_ = nullptr; });
+ }
+
+ int GetFrameSizeBytes(
+ const FrameConfig& config,
+ const VideoEncoder::ResolutionBitrateLimits& bitrate_limits) {
+ int scale = 8 * kFramerateFps;
+ switch (config.frame_type) {
+ case FrameType::kKeyFrame: {
+ // 4 is experimental value. Based on the test, the number of bytes of
+ // the key frame is about four times of the normal frame
+ return bitrate_limits.max_bitrate_bps * 4 / scale;
+ }
+ case FrameType::kNormalFrame_Overuse: {
+ return bitrate_limits.max_bitrate_bps * 3 / 2 / scale;
+ }
+ case FrameType::kNormalFrame_Underuse: {
+ return bitrate_limits.min_start_bitrate_bps * 3 / 4 / scale;
+ }
+ case FrameType::kNormalFrame: {
+ return (bitrate_limits.max_bitrate_bps +
+ bitrate_limits.min_start_bitrate_bps) /
+ 2 / scale;
+ }
+ }
+ return -1;
+ }
+
+ absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ GetDefaultSuitableBitrateLimit(int frame_size_pixels) {
+ return EncoderInfoSettings::
+ GetSinglecastBitrateLimitForResolutionWhenQpIsUntrusted(
+ frame_size_pixels,
+ EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted());
+ }
+
+ void TriggerBandwidthQualityScalerTest(
+ const std::vector<FrameConfig>& frame_configs) {
+ task_queue_.SendTask(
+ [frame_configs, this] {
+ RTC_CHECK(!frame_configs.empty());
+
+ int total_frame_nums = 0;
+ for (const FrameConfig& frame_config : frame_configs) {
+ total_frame_nums += frame_config.frame_num;
+ }
+
+ EXPECT_EQ(kFramerateFps * kDefaultBitrateStateUpdateIntervalSeconds,
+ total_frame_nums);
+
+ uint32_t time_send_to_scaler_ms_ = rtc::TimeMillis();
+ for (size_t i = 0; i < frame_configs.size(); ++i) {
+ const FrameConfig& config = frame_configs[i];
+ absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ suitable_bitrate = GetDefaultSuitableBitrateLimit(
+ config.actual_width * config.actual_height);
+ EXPECT_TRUE(suitable_bitrate);
+ for (int j = 0; j <= config.frame_num; ++j) {
+ time_send_to_scaler_ms_ += kDefaultEncodeDeltaTimeMs;
+ int frame_size_bytes =
+ GetFrameSizeBytes(config, suitable_bitrate.value());
+ RTC_CHECK(frame_size_bytes > 0);
+ bandwidth_quality_scaler_->ReportEncodeInfo(
+ frame_size_bytes, time_send_to_scaler_ms_,
+ config.actual_width, config.actual_height);
+ }
+ }
+ });
+ }
+
+ test::ScopedFieldTrials scoped_field_trial_;
+ TaskQueueForTest task_queue_;
+ std::unique_ptr<BandwidthQualityScalerUnderTest> bandwidth_quality_scaler_;
+ std::unique_ptr<FakeBandwidthQualityScalerHandler> handler_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ FieldTrials,
+ BandwidthQualityScalerTest,
+ ::testing::Values("WebRTC-Video-BandwidthQualityScalerSettings/"
+ "bitrate_state_update_interval_s_:1/",
+ "WebRTC-Video-BandwidthQualityScalerSettings/"
+ "bitrate_state_update_interval_s_:2/"));
+
+TEST_P(BandwidthQualityScalerTest, AllNormalFrame_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(150, FrameType::kNormalFrame, 640, 360)};
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 654253, so it falls in the range
+ // without any operation(up/down).
+ EXPECT_FALSE(handler_->event_.Wait(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs()));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(0, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, AllNoramlFrame_AboveMaxBandwidth_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(150, FrameType::kNormalFrame_Overuse, 640, 360)};
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 1208000 > 800000 * 0.95, so it
+ // triggers adapt_up_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs()));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(1, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, AllNormalFrame_Underuse_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(150, FrameType::kNormalFrame_Underuse, 640, 360)};
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 377379 < 500000 * 0.8, so it
+ // triggers adapt_down_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs()));
+ EXPECT_EQ(1, handler_->adapt_down_event_count_);
+ EXPECT_EQ(0, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, FixedFrameTypeTest1_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(5, FrameType::kNormalFrame_Underuse, 640, 360),
+ FrameConfig(110, FrameType::kNormalFrame, 640, 360),
+ FrameConfig(20, FrameType::kNormalFrame_Overuse, 640, 360),
+ FrameConfig(15, FrameType::kKeyFrame, 640, 360),
+ };
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 1059462 > 800000 * 0.95, so it
+ // triggers adapt_up_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs()));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(1, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, FixedFrameTypeTest2_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(10, FrameType::kNormalFrame_Underuse, 640, 360),
+ FrameConfig(50, FrameType::kNormalFrame, 640, 360),
+ FrameConfig(5, FrameType::kKeyFrame, 640, 360),
+ FrameConfig(85, FrameType::kNormalFrame_Overuse, 640, 360),
+ };
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 1059462 > 800000 * 0.95, so it
+ // triggers adapt_up_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs()));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(1, handler_->adapt_up_event_count_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc
new file mode 100644
index 0000000000..1138aa8448
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/decoded_frames_history.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace video_coding {
+
+DecodedFramesHistory::DecodedFramesHistory(size_t window_size)
+ : buffer_(window_size) {}
+
+DecodedFramesHistory::~DecodedFramesHistory() = default;
+
+void DecodedFramesHistory::InsertDecoded(int64_t frame_id, uint32_t timestamp) {
+ last_decoded_frame_ = frame_id;
+ last_decoded_frame_timestamp_ = timestamp;
+ int new_index = FrameIdToIndex(frame_id);
+
+ RTC_DCHECK(last_frame_id_ < frame_id);
+
+ // Clears expired values from the cyclic buffer_.
+ if (last_frame_id_) {
+ int64_t id_jump = frame_id - *last_frame_id_;
+ int last_index = FrameIdToIndex(*last_frame_id_);
+
+ if (id_jump >= static_cast<int64_t>(buffer_.size())) {
+ std::fill(buffer_.begin(), buffer_.end(), false);
+ } else if (new_index > last_index) {
+ std::fill(buffer_.begin() + last_index + 1, buffer_.begin() + new_index,
+ false);
+ } else {
+ std::fill(buffer_.begin() + last_index + 1, buffer_.end(), false);
+ std::fill(buffer_.begin(), buffer_.begin() + new_index, false);
+ }
+ }
+
+ buffer_[new_index] = true;
+ last_frame_id_ = frame_id;
+}
+
+bool DecodedFramesHistory::WasDecoded(int64_t frame_id) const {
+ if (!last_frame_id_)
+ return false;
+
+ // Reference to the picture_id out of the stored should happen.
+ if (frame_id <= *last_frame_id_ - static_cast<int64_t>(buffer_.size())) {
+ RTC_LOG(LS_WARNING) << "Referencing a frame out of the window. "
+ "Assuming it was undecoded to avoid artifacts.";
+ return false;
+ }
+
+ if (frame_id > last_frame_id_)
+ return false;
+
+ return buffer_[FrameIdToIndex(frame_id)];
+}
+
+void DecodedFramesHistory::Clear() {
+ last_decoded_frame_timestamp_.reset();
+ last_decoded_frame_.reset();
+ std::fill(buffer_.begin(), buffer_.end(), false);
+ last_frame_id_.reset();
+}
+
+absl::optional<int64_t> DecodedFramesHistory::GetLastDecodedFrameId() const {
+ return last_decoded_frame_;
+}
+
+absl::optional<uint32_t> DecodedFramesHistory::GetLastDecodedFrameTimestamp()
+ const {
+ return last_decoded_frame_timestamp_;
+}
+
+int DecodedFramesHistory::FrameIdToIndex(int64_t frame_id) const {
+ int m = frame_id % buffer_.size();
+ return m >= 0 ? m : m + buffer_.size();
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h
new file mode 100644
index 0000000000..9b8bf65821
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_DECODED_FRAMES_HISTORY_H_
+#define MODULES_VIDEO_CODING_UTILITY_DECODED_FRAMES_HISTORY_H_
+
+#include <stdint.h>
+
+#include <bitset>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_frame.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class DecodedFramesHistory {
+ public:
+ // window_size - how much frames back to the past are actually remembered.
+ explicit DecodedFramesHistory(size_t window_size);
+ ~DecodedFramesHistory();
+ // Called for each decoded frame. Assumes frame id's are non-decreasing.
+ void InsertDecoded(int64_t frame_id, uint32_t timestamp);
+ // Query if the following (frame_id, spatial_id) pair was inserted before.
+ // Should be at most less by window_size-1 than the last inserted frame id.
+ bool WasDecoded(int64_t frame_id) const;
+
+ void Clear();
+
+ absl::optional<int64_t> GetLastDecodedFrameId() const;
+ absl::optional<uint32_t> GetLastDecodedFrameTimestamp() const;
+
+ private:
+ int FrameIdToIndex(int64_t frame_id) const;
+
+ std::vector<bool> buffer_;
+ absl::optional<int64_t> last_frame_id_;
+ absl::optional<int64_t> last_decoded_frame_;
+ absl::optional<uint32_t> last_decoded_frame_timestamp_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_UTILITY_DECODED_FRAMES_HISTORY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc
new file mode 100644
index 0000000000..ac09a42053
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/decoded_frames_history.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+namespace {
+
+constexpr int kHistorySize = 1 << 13;
+
+TEST(DecodedFramesHistory, RequestOnEmptyHistory) {
+ DecodedFramesHistory history(kHistorySize);
+ EXPECT_EQ(history.WasDecoded(1234), false);
+}
+
+TEST(DecodedFramesHistory, FindsLastDecodedFrame) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ EXPECT_EQ(history.WasDecoded(1234), true);
+}
+
+TEST(DecodedFramesHistory, FindsPreviousFrame) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1235, 0);
+ EXPECT_EQ(history.WasDecoded(1234), true);
+}
+
+TEST(DecodedFramesHistory, ReportsMissingFrame) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1236, 0);
+ EXPECT_EQ(history.WasDecoded(1235), false);
+}
+
+TEST(DecodedFramesHistory, ClearsHistory) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.Clear();
+ EXPECT_EQ(history.WasDecoded(1234), false);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt);
+}
+
+TEST(DecodedFramesHistory, HandlesBigJumpInPictureId) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1235, 0);
+ history.InsertDecoded(1236, 0);
+ history.InsertDecoded(1236 + kHistorySize / 2, 0);
+ EXPECT_EQ(history.WasDecoded(1234), true);
+ EXPECT_EQ(history.WasDecoded(1237), false);
+}
+
+TEST(DecodedFramesHistory, ForgetsTooOldHistory) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1235, 0);
+ history.InsertDecoded(1236, 0);
+ history.InsertDecoded(1236 + kHistorySize * 2, 0);
+ EXPECT_EQ(history.WasDecoded(1234), false);
+ EXPECT_EQ(history.WasDecoded(1237), false);
+}
+
+TEST(DecodedFramesHistory, ReturnsLastDecodedFrameId) {
+ DecodedFramesHistory history(kHistorySize);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt);
+ history.InsertDecoded(1234, 0);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), 1234);
+ history.InsertDecoded(1235, 0);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), 1235);
+}
+
+TEST(DecodedFramesHistory, ReturnsLastDecodedFrameTimestamp) {
+ DecodedFramesHistory history(kHistorySize);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt);
+ history.InsertDecoded(1234, 12345);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12345u);
+ history.InsertDecoded(1235, 12366);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12366u);
+}
+
+TEST(DecodedFramesHistory, NegativePictureIds) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(-1234, 12345);
+ history.InsertDecoded(-1233, 12366);
+ EXPECT_EQ(*history.GetLastDecodedFrameId(), -1233);
+
+ history.InsertDecoded(-1, 12377);
+ history.InsertDecoded(0, 12388);
+ EXPECT_EQ(*history.GetLastDecodedFrameId(), 0);
+
+ history.InsertDecoded(1, 12399);
+ EXPECT_EQ(*history.GetLastDecodedFrameId(), 1);
+
+ EXPECT_EQ(history.WasDecoded(-1234), true);
+ EXPECT_EQ(history.WasDecoded(-1), true);
+ EXPECT_EQ(history.WasDecoded(0), true);
+ EXPECT_EQ(history.WasDecoded(1), true);
+}
+
+} // namespace
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc
new file mode 100644
index 0000000000..8ea8a8e268
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/frame_dropper.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+namespace {
+
+const float kDefaultFrameSizeAlpha = 0.9f;
+const float kDefaultKeyFrameRatioAlpha = 0.99f;
+// 1 key frame every 10th second in 30 fps.
+const float kDefaultKeyFrameRatioValue = 1 / 300.0f;
+
+const float kDefaultDropRatioAlpha = 0.9f;
+const float kDefaultDropRatioValue = 0.96f;
+// Maximum duration over which frames are continuously dropped.
+const float kDefaultMaxDropDurationSecs = 4.0f;
+
+// Default target bitrate.
+// TODO(isheriff): Should this be higher to avoid dropping too many packets when
+// the bandwidth is unknown at the start ?
+const float kDefaultTargetBitrateKbps = 300.0f;
+const float kDefaultIncomingFrameRate = 30;
+const float kLeakyBucketSizeSeconds = 0.5f;
+
+// A delta frame that is bigger than `kLargeDeltaFactor` times the average
+// delta frame is a large frame that is spread out for accumulation.
+const int kLargeDeltaFactor = 3;
+
+// Cap on the frame size accumulator to prevent excessive drops.
+const float kAccumulatorCapBufferSizeSecs = 3.0f;
+} // namespace
+
+FrameDropper::FrameDropper()
+ : key_frame_ratio_(kDefaultKeyFrameRatioAlpha),
+ delta_frame_size_avg_kbits_(kDefaultFrameSizeAlpha),
+ drop_ratio_(kDefaultDropRatioAlpha, kDefaultDropRatioValue),
+ enabled_(true),
+ max_drop_duration_secs_(kDefaultMaxDropDurationSecs) {
+ Reset();
+}
+
+FrameDropper::~FrameDropper() = default;
+
+void FrameDropper::Reset() {
+ key_frame_ratio_.Reset(kDefaultKeyFrameRatioAlpha);
+ key_frame_ratio_.Apply(1.0f, kDefaultKeyFrameRatioValue);
+ delta_frame_size_avg_kbits_.Reset(kDefaultFrameSizeAlpha);
+
+ accumulator_ = 0.0f;
+ accumulator_max_ = kDefaultTargetBitrateKbps / 2;
+ target_bitrate_ = kDefaultTargetBitrateKbps;
+ incoming_frame_rate_ = kDefaultIncomingFrameRate;
+
+ large_frame_accumulation_count_ = 0;
+ large_frame_accumulation_chunk_size_ = 0;
+ large_frame_accumulation_spread_ = 0.5 * kDefaultIncomingFrameRate;
+
+ drop_next_ = false;
+ drop_ratio_.Reset(0.9f);
+ drop_ratio_.Apply(0.0f, 0.0f);
+ drop_count_ = 0;
+ was_below_max_ = true;
+}
+
+void FrameDropper::Enable(bool enable) {
+ enabled_ = enable;
+}
+
+void FrameDropper::Fill(size_t framesize_bytes, bool delta_frame) {
+ if (!enabled_) {
+ return;
+ }
+ float framesize_kbits = 8.0f * static_cast<float>(framesize_bytes) / 1000.0f;
+ if (!delta_frame) {
+ key_frame_ratio_.Apply(1.0, 1.0);
+ // Do not spread if we are already doing it (or we risk dropping bits that
+ // need accumulation). Given we compute the key frame ratio and spread
+ // based on that, this should not normally happen.
+ if (large_frame_accumulation_count_ == 0) {
+ if (key_frame_ratio_.filtered() > 1e-5 &&
+ 1 / key_frame_ratio_.filtered() < large_frame_accumulation_spread_) {
+ large_frame_accumulation_count_ =
+ static_cast<int32_t>(1 / key_frame_ratio_.filtered() + 0.5);
+ } else {
+ large_frame_accumulation_count_ =
+ static_cast<int32_t>(large_frame_accumulation_spread_ + 0.5);
+ }
+ large_frame_accumulation_chunk_size_ =
+ framesize_kbits / large_frame_accumulation_count_;
+ framesize_kbits = 0;
+ }
+ } else {
+ // Identify if it is an unusually large delta frame and spread accumulation
+ // if that is the case.
+ if (delta_frame_size_avg_kbits_.filtered() != -1 &&
+ (framesize_kbits >
+ kLargeDeltaFactor * delta_frame_size_avg_kbits_.filtered()) &&
+ large_frame_accumulation_count_ == 0) {
+ large_frame_accumulation_count_ =
+ static_cast<int32_t>(large_frame_accumulation_spread_ + 0.5);
+ large_frame_accumulation_chunk_size_ =
+ framesize_kbits / large_frame_accumulation_count_;
+ framesize_kbits = 0;
+ } else {
+ delta_frame_size_avg_kbits_.Apply(1, framesize_kbits);
+ }
+ key_frame_ratio_.Apply(1.0, 0.0);
+ }
+ // Change the level of the accumulator (bucket)
+ accumulator_ += framesize_kbits;
+ CapAccumulator();
+}
+
+void FrameDropper::Leak(uint32_t input_framerate) {
+ if (!enabled_) {
+ return;
+ }
+ if (input_framerate < 1) {
+ return;
+ }
+ if (target_bitrate_ < 0.0f) {
+ return;
+ }
+ // Add lower bound for large frame accumulation spread.
+ large_frame_accumulation_spread_ = std::max(0.5 * input_framerate, 5.0);
+ // Expected bits per frame based on current input frame rate.
+ float expected_bits_per_frame = target_bitrate_ / input_framerate;
+ if (large_frame_accumulation_count_ > 0) {
+ expected_bits_per_frame -= large_frame_accumulation_chunk_size_;
+ --large_frame_accumulation_count_;
+ }
+ accumulator_ -= expected_bits_per_frame;
+ if (accumulator_ < 0.0f) {
+ accumulator_ = 0.0f;
+ }
+ UpdateRatio();
+}
+
+void FrameDropper::UpdateRatio() {
+ if (accumulator_ > 1.3f * accumulator_max_) {
+ // Too far above accumulator max, react faster.
+ drop_ratio_.UpdateBase(0.8f);
+ } else {
+ // Go back to normal reaction.
+ drop_ratio_.UpdateBase(0.9f);
+ }
+ if (accumulator_ > accumulator_max_) {
+ // We are above accumulator max, and should ideally drop a frame. Increase
+ // the drop_ratio_ and drop the frame later.
+ if (was_below_max_) {
+ drop_next_ = true;
+ }
+ drop_ratio_.Apply(1.0f, 1.0f);
+ drop_ratio_.UpdateBase(0.9f);
+ } else {
+ drop_ratio_.Apply(1.0f, 0.0f);
+ }
+ was_below_max_ = accumulator_ < accumulator_max_;
+}
+
+// This function signals when to drop frames to the caller. It makes use of the
+// drop_ratio_ to smooth out the drops over time.
+bool FrameDropper::DropFrame() {
+ if (!enabled_) {
+ return false;
+ }
+ if (drop_next_) {
+ drop_next_ = false;
+ drop_count_ = 0;
+ }
+
+ if (drop_ratio_.filtered() >= 0.5f) { // Drops per keep
+ // Limit is the number of frames we should drop between each kept frame
+ // to keep our drop ratio. limit is positive in this case.
+ float denom = 1.0f - drop_ratio_.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ // Put a bound on the max amount of dropped frames between each kept
+ // frame, in terms of frame rate and window size (secs).
+ int max_limit =
+ static_cast<int>(incoming_frame_rate_ * max_drop_duration_secs_);
+ if (limit > max_limit) {
+ limit = max_limit;
+ }
+ if (drop_count_ < 0) {
+ // Reset the drop_count_ since it was negative and should be positive.
+ drop_count_ = -drop_count_;
+ }
+ if (drop_count_ < limit) {
+ // As long we are below the limit we should drop frames.
+ drop_count_++;
+ return true;
+ } else {
+ // Only when we reset drop_count_ a frame should be kept.
+ drop_count_ = 0;
+ return false;
+ }
+ } else if (drop_ratio_.filtered() > 0.0f &&
+ drop_ratio_.filtered() < 0.5f) { // Keeps per drop
+ // Limit is the number of frames we should keep between each drop
+ // in order to keep the drop ratio. limit is negative in this case,
+ // and the drop_count_ is also negative.
+ float denom = drop_ratio_.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ if (drop_count_ > 0) {
+ // Reset the drop_count_ since we have a positive
+ // drop_count_, and it should be negative.
+ drop_count_ = -drop_count_;
+ }
+ if (drop_count_ > limit) {
+ if (drop_count_ == 0) {
+ // Drop frames when we reset drop_count_.
+ drop_count_--;
+ return true;
+ } else {
+ // Keep frames as long as we haven't reached limit.
+ drop_count_--;
+ return false;
+ }
+ } else {
+ drop_count_ = 0;
+ return false;
+ }
+ }
+ drop_count_ = 0;
+ return false;
+}
+
+void FrameDropper::SetRates(float bitrate, float incoming_frame_rate) {
+ // Bit rate of -1 means infinite bandwidth.
+ accumulator_max_ = bitrate * kLeakyBucketSizeSeconds;
+ if (target_bitrate_ > 0.0f && bitrate < target_bitrate_ &&
+ accumulator_ > accumulator_max_) {
+ // Rescale the accumulator level if the accumulator max decreases
+ accumulator_ = bitrate / target_bitrate_ * accumulator_;
+ }
+ target_bitrate_ = bitrate;
+ CapAccumulator();
+ incoming_frame_rate_ = incoming_frame_rate;
+}
+
+// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
+// This is a temporary fix for screencasting where very large frames from
+// encoder will cause very slow response (too many frame drops).
+// TODO(isheriff): Remove this now that large delta frames are also spread out ?
+void FrameDropper::CapAccumulator() {
+ float max_accumulator = target_bitrate_ * kAccumulatorCapBufferSizeSecs;
+ if (accumulator_ > max_accumulator) {
+ accumulator_ = max_accumulator;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h
new file mode 100644
index 0000000000..b45b7fe27f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+#define MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "rtc_base/numerics/exp_filter.h"
+
+namespace webrtc {
+
+// The Frame Dropper implements a variant of the leaky bucket algorithm
+// for keeping track of when to drop frames to avoid bit rate
+// over use when the encoder can't keep its bit rate.
+class FrameDropper {
+ public:
+ FrameDropper();
+ ~FrameDropper();
+
+ // Resets the FrameDropper to its initial state.
+ void Reset();
+
+ void Enable(bool enable);
+
+ // Answers the question if it's time to drop a frame if we want to reach a
+ // given frame rate. Must be called for every frame.
+ //
+ // Return value : True if we should drop the current frame.
+ bool DropFrame();
+
+ // Updates the FrameDropper with the size of the latest encoded frame.
+ // The FrameDropper calculates a new drop ratio (can be seen as the
+ // probability to drop a frame) and updates its internal statistics.
+ //
+ // Input:
+ // - framesize_bytes : The size of the latest frame returned
+ // from the encoder.
+ // - delta_frame : True if the encoder returned a delta frame.
+ void Fill(size_t framesize_bytes, bool delta_frame);
+
+ void Leak(uint32_t input_framerate);
+
+ // Sets the target bit rate and the frame rate produced by the camera.
+ //
+ // Input:
+ // - bitrate : The target bit rate.
+ void SetRates(float bitrate, float incoming_frame_rate);
+
+ private:
+ void UpdateRatio();
+ void CapAccumulator();
+
+ rtc::ExpFilter key_frame_ratio_;
+ rtc::ExpFilter delta_frame_size_avg_kbits_;
+
+ // Key frames and large delta frames are not immediately accumulated in the
+ // bucket since they can immediately overflow the bucket leading to large
+ // drops on the following packets that may be much smaller. Instead these
+ // large frames are accumulated over several frames when the bucket leaks.
+
+ // `large_frame_accumulation_spread_` represents the number of frames over
+ // which a large frame is accumulated.
+ float large_frame_accumulation_spread_;
+ // `large_frame_accumulation_count_` represents the number of frames left
+ // to finish accumulating a large frame.
+ int large_frame_accumulation_count_;
+ // `large_frame_accumulation_chunk_size_` represents the size of a single
+ // chunk for large frame accumulation.
+ float large_frame_accumulation_chunk_size_;
+
+ float accumulator_;
+ float accumulator_max_;
+ float target_bitrate_;
+ bool drop_next_;
+ rtc::ExpFilter drop_ratio_;
+ int drop_count_;
+ float incoming_frame_rate_;
+ bool was_below_max_;
+ bool enabled_;
+ const float max_drop_duration_secs_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc
new file mode 100644
index 0000000000..066103a788
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/frame_dropper.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const float kTargetBitRateKbps = 300;
+const float kIncomingFrameRate = 30;
+const size_t kFrameSizeBytes = 1250;
+
+const size_t kLargeFrameSizeBytes = 25000;
+
+const bool kIncludeKeyFrame = true;
+const bool kDoNotIncludeKeyFrame = false;
+
+} // namespace
+
+class FrameDropperTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ frame_dropper_.SetRates(kTargetBitRateKbps, kIncomingFrameRate);
+ }
+
+ void OverflowLeakyBucket() {
+ // Overflow bucket in frame dropper.
+ for (int i = 0; i < kIncomingFrameRate; ++i) {
+ frame_dropper_.Fill(kFrameSizeBytes, true);
+ }
+ frame_dropper_.Leak(kIncomingFrameRate);
+ }
+
+ void ValidateNoDropsAtTargetBitrate(int large_frame_size_bytes,
+ int large_frame_rate,
+ bool is_large_frame_delta) {
+ // Smaller frame size is computed to meet `kTargetBitRateKbps`.
+ int small_frame_size_bytes =
+ kFrameSizeBytes -
+ (large_frame_size_bytes * large_frame_rate) / kIncomingFrameRate;
+
+ for (int i = 1; i <= 5 * large_frame_rate; ++i) {
+ // Large frame. First frame is always a key frame.
+ frame_dropper_.Fill(large_frame_size_bytes,
+ (i == 1) ? false : is_large_frame_delta);
+ frame_dropper_.Leak(kIncomingFrameRate);
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+
+ // Smaller frames.
+ for (int j = 1; j < kIncomingFrameRate / large_frame_rate; ++j) {
+ frame_dropper_.Fill(small_frame_size_bytes, true);
+ frame_dropper_.Leak(kIncomingFrameRate);
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+ }
+ }
+ }
+
+ void ValidateThroughputMatchesTargetBitrate(int bitrate_kbps,
+ bool include_keyframe) {
+ int delta_frame_size;
+ int total_bytes = 0;
+
+ if (include_keyframe) {
+ delta_frame_size = ((1000.0 / 8 * bitrate_kbps) - kLargeFrameSizeBytes) /
+ (kIncomingFrameRate - 1);
+ } else {
+ delta_frame_size = bitrate_kbps * 1000.0 / (8 * kIncomingFrameRate);
+ }
+ const int kNumIterations = 1000;
+ for (int i = 1; i <= kNumIterations; ++i) {
+ int j = 0;
+ if (include_keyframe) {
+ if (!frame_dropper_.DropFrame()) {
+ frame_dropper_.Fill(kLargeFrameSizeBytes, false);
+ total_bytes += kLargeFrameSizeBytes;
+ }
+ frame_dropper_.Leak(kIncomingFrameRate);
+ j++;
+ }
+ for (; j < kIncomingFrameRate; ++j) {
+ if (!frame_dropper_.DropFrame()) {
+ frame_dropper_.Fill(delta_frame_size, true);
+ total_bytes += delta_frame_size;
+ }
+ frame_dropper_.Leak(kIncomingFrameRate);
+ }
+ }
+ float throughput_kbps = total_bytes * 8.0 / (1000 * kNumIterations);
+ float deviation_from_target =
+ (throughput_kbps - kTargetBitRateKbps) * 100.0 / kTargetBitRateKbps;
+ if (deviation_from_target < 0) {
+ deviation_from_target = -deviation_from_target;
+ }
+
+ // Variation is < 0.1%
+ EXPECT_LE(deviation_from_target, 0.1);
+ }
+
+ FrameDropper frame_dropper_;
+};
+
+TEST_F(FrameDropperTest, NoDropsWhenDisabled) {
+ frame_dropper_.Enable(false);
+ OverflowLeakyBucket();
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+}
+
+TEST_F(FrameDropperTest, DropsByDefaultWhenBucketOverflows) {
+ OverflowLeakyBucket();
+ EXPECT_TRUE(frame_dropper_.DropFrame());
+}
+
+TEST_F(FrameDropperTest, NoDropsWhenFillRateMatchesLeakRate) {
+ for (int i = 0; i < 5 * kIncomingFrameRate; ++i) {
+ frame_dropper_.Fill(kFrameSizeBytes, true);
+ frame_dropper_.Leak(kIncomingFrameRate);
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+ }
+}
+
+TEST_F(FrameDropperTest, LargeKeyFrames) {
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes, 1, false);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 2, 2, false);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 4, 4, false);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 8, 8, false);
+}
+
+TEST_F(FrameDropperTest, LargeDeltaFrames) {
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes, 1, true);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 2, 2, true);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 4, 4, true);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 8, 8, true);
+}
+
+TEST_F(FrameDropperTest, TrafficVolumeAboveAvailableBandwidth) {
+ ValidateThroughputMatchesTargetBitrate(700, kIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(700, kDoNotIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(600, kIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(600, kDoNotIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(500, kIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(500, kDoNotIncludeKeyFrame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc
new file mode 100644
index 0000000000..5978adc3c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+
+namespace webrtc {
+
+FramerateControllerDeprecated::FramerateControllerDeprecated(
+ float target_framerate_fps)
+ : min_frame_interval_ms_(0), framerate_estimator_(1000.0, 1000.0) {
+ SetTargetRate(target_framerate_fps);
+}
+
+void FramerateControllerDeprecated::SetTargetRate(float target_framerate_fps) {
+ if (target_framerate_fps_ != target_framerate_fps) {
+ framerate_estimator_.Reset();
+ if (last_timestamp_ms_) {
+ framerate_estimator_.Update(1, *last_timestamp_ms_);
+ }
+
+ const size_t target_frame_interval_ms = 1000 / target_framerate_fps;
+ target_framerate_fps_ = target_framerate_fps;
+ min_frame_interval_ms_ = 85 * target_frame_interval_ms / 100;
+ }
+}
+
+float FramerateControllerDeprecated::GetTargetRate() {
+ return *target_framerate_fps_;
+}
+
+void FramerateControllerDeprecated::Reset() {
+ framerate_estimator_.Reset();
+ last_timestamp_ms_.reset();
+}
+
+bool FramerateControllerDeprecated::DropFrame(uint32_t timestamp_ms) const {
+ if (timestamp_ms < last_timestamp_ms_) {
+ // Timestamp jumps backward. We can't make adequate drop decision. Don't
+ // drop this frame. Stats will be reset in AddFrame().
+ return false;
+ }
+
+ if (Rate(timestamp_ms).value_or(*target_framerate_fps_) >
+ target_framerate_fps_) {
+ return true;
+ }
+
+ if (last_timestamp_ms_) {
+ const int64_t diff_ms =
+ static_cast<int64_t>(timestamp_ms) - *last_timestamp_ms_;
+ if (diff_ms < min_frame_interval_ms_) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void FramerateControllerDeprecated::AddFrame(uint32_t timestamp_ms) {
+ if (timestamp_ms < last_timestamp_ms_) {
+ // Timestamp jumps backward.
+ Reset();
+ }
+
+ framerate_estimator_.Update(1, timestamp_ms);
+ last_timestamp_ms_ = timestamp_ms;
+}
+
+absl::optional<float> FramerateControllerDeprecated::Rate(
+ uint32_t timestamp_ms) const {
+ return framerate_estimator_.Rate(timestamp_ms);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h
new file mode 100644
index 0000000000..ca0cbea053
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_FRAMERATE_CONTROLLER_DEPRECATED_H_
+#define MODULES_VIDEO_CODING_UTILITY_FRAMERATE_CONTROLLER_DEPRECATED_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "rtc_base/rate_statistics.h"
+
+namespace webrtc {
+
+// Please use webrtc::FramerateController instead.
+class FramerateControllerDeprecated {
+ public:
+ explicit FramerateControllerDeprecated(float target_framerate_fps);
+
+ void SetTargetRate(float target_framerate_fps);
+ float GetTargetRate();
+
+ // Advices user to drop next frame in order to reach target framerate.
+ bool DropFrame(uint32_t timestamp_ms) const;
+
+ void AddFrame(uint32_t timestamp_ms);
+
+ void Reset();
+
+ private:
+ absl::optional<float> Rate(uint32_t timestamp_ms) const;
+
+ absl::optional<float> target_framerate_fps_;
+ absl::optional<uint32_t> last_timestamp_ms_;
+ uint32_t min_frame_interval_ms_;
+ RateStatistics framerate_estimator_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_FRAMERATE_CONTROLLER_DEPRECATED_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc
new file mode 100644
index 0000000000..eabf0529db
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+
+#include <stddef.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(FramerateControllerDeprecated, KeepTargetFramerate) {
+ const float input_framerate_fps = 20;
+ const float target_framerate_fps = 5;
+ const float max_abs_framerate_error_fps = target_framerate_fps * 0.1f;
+ const size_t input_duration_secs = 3;
+ const size_t num_input_frames = input_duration_secs * input_framerate_fps;
+
+ FramerateControllerDeprecated framerate_controller(target_framerate_fps);
+ size_t num_dropped_frames = 0;
+ for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
+ const uint32_t timestamp_ms =
+ static_cast<uint32_t>(1000 * frame_num / input_framerate_fps);
+ if (framerate_controller.DropFrame(timestamp_ms)) {
+ ++num_dropped_frames;
+ } else {
+ framerate_controller.AddFrame(timestamp_ms);
+ }
+ }
+
+ const float output_framerate_fps =
+ static_cast<float>(num_input_frames - num_dropped_frames) /
+ input_duration_secs;
+ EXPECT_NEAR(output_framerate_fps, target_framerate_fps,
+ max_abs_framerate_error_fps);
+}
+
+TEST(FramerateControllerDeprecated, DoNotDropAnyFramesIfTargerEqualsInput) {
+ const float input_framerate_fps = 30;
+ const size_t input_duration_secs = 3;
+ const size_t num_input_frames = input_duration_secs * input_framerate_fps;
+
+ FramerateControllerDeprecated framerate_controller(input_framerate_fps);
+ size_t num_dropped_frames = 0;
+ for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
+ const uint32_t timestamp_ms =
+ static_cast<uint32_t>(1000 * frame_num / input_framerate_fps);
+ if (framerate_controller.DropFrame(timestamp_ms)) {
+ ++num_dropped_frames;
+ } else {
+ framerate_controller.AddFrame(timestamp_ms);
+ }
+ }
+
+ EXPECT_EQ(num_dropped_frames, 0U);
+}
+
+TEST(FramerateControllerDeprecated, DoNotDropFrameWhenTimestampJumpsBackward) {
+ FramerateControllerDeprecated framerate_controller(30);
+ ASSERT_FALSE(framerate_controller.DropFrame(66));
+ framerate_controller.AddFrame(66);
+ EXPECT_FALSE(framerate_controller.DropFrame(33));
+}
+
+TEST(FramerateControllerDeprecated, DropFrameIfItIsTooCloseToPreviousFrame) {
+ FramerateControllerDeprecated framerate_controller(30);
+ ASSERT_FALSE(framerate_controller.DropFrame(33));
+ framerate_controller.AddFrame(33);
+ EXPECT_TRUE(framerate_controller.DropFrame(34));
+}
+
+TEST(FramerateControllerDeprecated, FrameDroppingStartsFromSecondInputFrame) {
+ const float input_framerate_fps = 23;
+ const float target_framerate_fps = 19;
+ const uint32_t input_frame_duration_ms =
+ static_cast<uint32_t>(1000 / input_framerate_fps);
+ FramerateControllerDeprecated framerate_controller(target_framerate_fps);
+ ASSERT_FALSE(framerate_controller.DropFrame(1 * input_frame_duration_ms));
+ framerate_controller.AddFrame(1 * input_frame_duration_ms);
+ EXPECT_TRUE(framerate_controller.DropFrame(2 * input_frame_duration_ms));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h b/third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h
new file mode 100644
index 0000000000..83d6691b87
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains definitions that are common to the IvfFileReader and
+ * IvfFileWriter classes.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_IVF_DEFINES_H_
+#define MODULES_VIDEO_CODING_UTILITY_IVF_DEFINES_H_
+
+namespace webrtc {
+constexpr size_t kIvfHeaderSize = 32;
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_IVF_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc
new file mode 100644
index 0000000000..85d1fa00d7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_reader.h"
+
+#include <string>
+#include <vector>
+
+#include "api/video_codecs/video_codec.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/video_coding/utility/ivf_defines.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kIvfFrameHeaderSize = 12;
+constexpr int kCodecTypeBytesCount = 4;
+
+constexpr uint8_t kFileHeaderStart[kCodecTypeBytesCount] = {'D', 'K', 'I', 'F'};
+constexpr uint8_t kVp8Header[kCodecTypeBytesCount] = {'V', 'P', '8', '0'};
+constexpr uint8_t kVp9Header[kCodecTypeBytesCount] = {'V', 'P', '9', '0'};
+constexpr uint8_t kAv1Header[kCodecTypeBytesCount] = {'A', 'V', '0', '1'};
+constexpr uint8_t kH264Header[kCodecTypeBytesCount] = {'H', '2', '6', '4'};
+
+} // namespace
+
+std::unique_ptr<IvfFileReader> IvfFileReader::Create(FileWrapper file) {
+ auto reader =
+ std::unique_ptr<IvfFileReader>(new IvfFileReader(std::move(file)));
+ if (!reader->Reset()) {
+ return nullptr;
+ }
+ return reader;
+}
+IvfFileReader::~IvfFileReader() {
+ Close();
+}
+
+bool IvfFileReader::Reset() {
+ // Set error to true while initialization.
+ has_error_ = true;
+ if (!file_.Rewind()) {
+ RTC_LOG(LS_ERROR) << "Failed to rewind IVF file";
+ return false;
+ }
+
+ uint8_t ivf_header[kIvfHeaderSize] = {0};
+ size_t read = file_.Read(&ivf_header, kIvfHeaderSize);
+ if (read != kIvfHeaderSize) {
+ RTC_LOG(LS_ERROR) << "Failed to read IVF header";
+ return false;
+ }
+
+ if (memcmp(&ivf_header[0], kFileHeaderStart, 4) != 0) {
+ RTC_LOG(LS_ERROR) << "File is not in IVF format: DKIF header expected";
+ return false;
+ }
+
+ absl::optional<VideoCodecType> codec_type = ParseCodecType(ivf_header, 8);
+ if (!codec_type) {
+ return false;
+ }
+ codec_type_ = *codec_type;
+
+ width_ = ByteReader<uint16_t>::ReadLittleEndian(&ivf_header[12]);
+ height_ = ByteReader<uint16_t>::ReadLittleEndian(&ivf_header[14]);
+ if (width_ == 0 || height_ == 0) {
+ RTC_LOG(LS_ERROR) << "Invalid IVF header: width or height is 0";
+ return false;
+ }
+
+ uint32_t time_scale = ByteReader<uint32_t>::ReadLittleEndian(&ivf_header[16]);
+ if (time_scale == 1000) {
+ using_capture_timestamps_ = true;
+ } else if (time_scale == 90000) {
+ using_capture_timestamps_ = false;
+ } else {
+ RTC_LOG(LS_ERROR) << "Invalid IVF header: Unknown time scale";
+ return false;
+ }
+
+ num_frames_ = static_cast<size_t>(
+ ByteReader<uint32_t>::ReadLittleEndian(&ivf_header[24]));
+ if (num_frames_ <= 0) {
+ RTC_LOG(LS_ERROR) << "Invalid IVF header: number of frames 0 or negative";
+ return false;
+ }
+
+ num_read_frames_ = 0;
+ next_frame_header_ = ReadNextFrameHeader();
+ if (!next_frame_header_) {
+ RTC_LOG(LS_ERROR) << "Failed to read 1st frame header";
+ return false;
+ }
+ // Initialization succeed: reset error.
+ has_error_ = false;
+
+ const char* codec_name = CodecTypeToPayloadString(codec_type_);
+ RTC_LOG(LS_INFO) << "Opened IVF file with codec data of type " << codec_name
+ << " at resolution " << width_ << " x " << height_
+ << ", using " << (using_capture_timestamps_ ? "1" : "90")
+ << "kHz clock resolution.";
+
+ return true;
+}
+
+absl::optional<EncodedImage> IvfFileReader::NextFrame() {
+ if (has_error_ || !HasMoreFrames()) {
+ return absl::nullopt;
+ }
+
+ rtc::scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create();
+ std::vector<size_t> layer_sizes;
+ // next_frame_header_ have to be presented by the way how it was loaded. If it
+ // is missing it means there is a bug in error handling.
+ RTC_DCHECK(next_frame_header_);
+ int64_t current_timestamp = next_frame_header_->timestamp;
+ // The first frame from the file should be marked as Key frame.
+ bool is_first_frame = num_read_frames_ == 0;
+ while (next_frame_header_ &&
+ current_timestamp == next_frame_header_->timestamp) {
+ // Resize payload to fit next spatial layer.
+ size_t current_layer_size = next_frame_header_->frame_size;
+ size_t current_layer_start_pos = payload->size();
+ payload->Realloc(payload->size() + current_layer_size);
+ layer_sizes.push_back(current_layer_size);
+
+ // Read next layer into payload
+ size_t read = file_.Read(&payload->data()[current_layer_start_pos],
+ current_layer_size);
+ if (read != current_layer_size) {
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": failed to read frame payload";
+ has_error_ = true;
+ return absl::nullopt;
+ }
+ num_read_frames_++;
+
+ current_timestamp = next_frame_header_->timestamp;
+ next_frame_header_ = ReadNextFrameHeader();
+ }
+ if (!next_frame_header_) {
+ // If EOF was reached, we need to check that all frames were met.
+ if (!has_error_ && num_read_frames_ != num_frames_) {
+ RTC_LOG(LS_ERROR) << "Unexpected EOF";
+ has_error_ = true;
+ return absl::nullopt;
+ }
+ }
+
+ EncodedImage image;
+ if (using_capture_timestamps_) {
+ image.capture_time_ms_ = current_timestamp;
+ image.SetTimestamp(static_cast<uint32_t>(90 * current_timestamp));
+ } else {
+ image.SetTimestamp(static_cast<uint32_t>(current_timestamp));
+ }
+ image.SetEncodedData(payload);
+ image.SetSpatialIndex(static_cast<int>(layer_sizes.size()) - 1);
+ for (size_t i = 0; i < layer_sizes.size(); ++i) {
+ image.SetSpatialLayerFrameSize(static_cast<int>(i), layer_sizes[i]);
+ }
+ if (is_first_frame) {
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ }
+
+ return image;
+}
+
+bool IvfFileReader::Close() {
+ if (!file_.is_open())
+ return false;
+
+ file_.Close();
+ return true;
+}
+
+absl::optional<VideoCodecType> IvfFileReader::ParseCodecType(uint8_t* buffer,
+ size_t start_pos) {
+ if (memcmp(&buffer[start_pos], kVp8Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecVP8;
+ }
+ if (memcmp(&buffer[start_pos], kVp9Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecVP9;
+ }
+ if (memcmp(&buffer[start_pos], kAv1Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecAV1;
+ }
+ if (memcmp(&buffer[start_pos], kH264Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecH264;
+ }
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Unknown codec type: "
+ << std::string(
+ reinterpret_cast<char const*>(&buffer[start_pos]),
+ kCodecTypeBytesCount);
+ return absl::nullopt;
+}
+
+absl::optional<IvfFileReader::FrameHeader>
+IvfFileReader::ReadNextFrameHeader() {
+ uint8_t ivf_frame_header[kIvfFrameHeaderSize] = {0};
+ size_t read = file_.Read(&ivf_frame_header, kIvfFrameHeaderSize);
+ if (read != kIvfFrameHeaderSize) {
+ if (read != 0 || !file_.ReadEof()) {
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": failed to read IVF frame header";
+ }
+ return absl::nullopt;
+ }
+ FrameHeader header;
+ header.frame_size = static_cast<size_t>(
+ ByteReader<uint32_t>::ReadLittleEndian(&ivf_frame_header[0]));
+ header.timestamp =
+ ByteReader<uint64_t>::ReadLittleEndian(&ivf_frame_header[4]);
+
+ if (header.frame_size == 0) {
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": invalid frame size";
+ return absl::nullopt;
+ }
+
+ if (header.timestamp < 0) {
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": negative timestamp";
+ return absl::nullopt;
+ }
+
+ return header;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h
new file mode 100644
index 0000000000..75f2e3ac8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_IVF_FILE_READER_H_
+#define MODULES_VIDEO_CODING_UTILITY_IVF_FILE_READER_H_
+
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+
+class IvfFileReader {
+ public:
+ // Creates IvfFileReader. Returns nullptr if error acquired.
+ static std::unique_ptr<IvfFileReader> Create(FileWrapper file);
+ ~IvfFileReader();
+
+ IvfFileReader(const IvfFileReader&) = delete;
+ IvfFileReader& operator=(const IvfFileReader&) = delete;
+
+ // Reinitializes reader. Returns false if any error acquired.
+ bool Reset();
+
+ // Returns codec type which was used to create this IVF file and which should
+ // be used to decode EncodedImages from this file.
+ VideoCodecType GetVideoCodecType() const { return codec_type_; }
+ // Returns count of frames in this file.
+ size_t GetFramesCount() const { return num_frames_; }
+
+ // Returns next frame or absl::nullopt if any error acquired. Always returns
+ // absl::nullopt after first error was spotted.
+ absl::optional<EncodedImage> NextFrame();
+ bool HasMoreFrames() const { return num_read_frames_ < num_frames_; }
+ bool HasError() const { return has_error_; }
+
+ uint16_t GetFrameWidth() const { return width_; }
+ uint16_t GetFrameHeight() const { return height_; }
+
+ bool Close();
+
+ private:
+ struct FrameHeader {
+ size_t frame_size;
+ int64_t timestamp;
+ };
+
+ explicit IvfFileReader(FileWrapper file) : file_(std::move(file)) {}
+
+ // Parses codec type from specified position of the buffer. Codec type
+ // contains kCodecTypeBytesCount bytes and caller has to ensure that buffer
+ // won't overflow.
+ absl::optional<VideoCodecType> ParseCodecType(uint8_t* buffer,
+ size_t start_pos);
+ absl::optional<FrameHeader> ReadNextFrameHeader();
+
+ VideoCodecType codec_type_;
+ size_t num_frames_;
+ size_t num_read_frames_;
+ uint16_t width_;
+ uint16_t height_;
+ bool using_capture_timestamps_;
+ FileWrapper file_;
+
+ absl::optional<FrameHeader> next_frame_header_;
+ bool has_error_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_IVF_FILE_READER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc
new file mode 100644
index 0000000000..c9cf14674b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_reader.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+#include <memory>
+#include <string>
+
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kWidth = 320;
+constexpr int kHeight = 240;
+constexpr int kNumFrames = 3;
+constexpr uint8_t kDummyPayload[4] = {'0', '1', '2', '3'};
+
+} // namespace
+
+class IvfFileReaderTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ file_name_ =
+ webrtc::test::TempFilename(webrtc::test::OutputPath(), "test_file.ivf");
+ }
+ void TearDown() override { webrtc::test::RemoveFile(file_name_); }
+
+ bool WriteDummyTestFrames(IvfFileWriter* file_writer,
+ VideoCodecType codec_type,
+ int width,
+ int height,
+ int num_frames,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ EncodedImage frame;
+ frame.SetSpatialIndex(spatial_layers_count);
+ rtc::scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create(
+ sizeof(kDummyPayload) * spatial_layers_count);
+ for (int i = 0; i < spatial_layers_count; ++i) {
+ memcpy(&payload->data()[i * sizeof(kDummyPayload)], kDummyPayload,
+ sizeof(kDummyPayload));
+ frame.SetSpatialLayerFrameSize(i, sizeof(kDummyPayload));
+ }
+ frame.SetEncodedData(payload);
+ frame._encodedWidth = width;
+ frame._encodedHeight = height;
+ for (int i = 1; i <= num_frames; ++i) {
+ if (use_capture_tims_ms) {
+ frame.capture_time_ms_ = i;
+ } else {
+ frame.SetTimestamp(i);
+ }
+ if (!file_writer->WriteFrame(frame, codec_type))
+ return false;
+ }
+ return true;
+ }
+
+ void CreateTestFile(VideoCodecType codec_type,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ std::unique_ptr<IvfFileWriter> file_writer =
+ IvfFileWriter::Wrap(FileWrapper::OpenWriteOnly(file_name_), 0);
+ ASSERT_TRUE(file_writer.get());
+ ASSERT_TRUE(WriteDummyTestFrames(file_writer.get(), codec_type, kWidth,
+ kHeight, kNumFrames, use_capture_tims_ms,
+ spatial_layers_count));
+ ASSERT_TRUE(file_writer->Close());
+ }
+
+ void ValidateFrame(absl::optional<EncodedImage> frame,
+ int frame_index,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame->SpatialIndex(), spatial_layers_count - 1);
+ if (use_capture_tims_ms) {
+ EXPECT_EQ(frame->capture_time_ms_, static_cast<int64_t>(frame_index));
+ EXPECT_EQ(frame->Timestamp(), static_cast<int64_t>(90 * frame_index));
+ } else {
+ EXPECT_EQ(frame->Timestamp(), static_cast<int64_t>(frame_index));
+ }
+ ASSERT_EQ(frame->size(), sizeof(kDummyPayload) * spatial_layers_count);
+ for (int i = 0; i < spatial_layers_count; ++i) {
+ EXPECT_EQ(memcmp(&frame->data()[i * sizeof(kDummyPayload)], kDummyPayload,
+ sizeof(kDummyPayload)),
+ 0)
+ << std::string(reinterpret_cast<char const*>(
+ &frame->data()[i * sizeof(kDummyPayload)]),
+ sizeof(kDummyPayload));
+ }
+ }
+
+ void ValidateContent(VideoCodecType codec_type,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ std::unique_ptr<IvfFileReader> reader =
+ IvfFileReader::Create(FileWrapper::OpenReadOnly(file_name_));
+ ASSERT_TRUE(reader.get());
+ EXPECT_EQ(reader->GetVideoCodecType(), codec_type);
+ EXPECT_EQ(reader->GetFramesCount(),
+ spatial_layers_count * static_cast<size_t>(kNumFrames));
+ for (int i = 1; i <= kNumFrames; ++i) {
+ ASSERT_TRUE(reader->HasMoreFrames());
+ ValidateFrame(reader->NextFrame(), i, use_capture_tims_ms,
+ spatial_layers_count);
+ EXPECT_FALSE(reader->HasError());
+ }
+ EXPECT_FALSE(reader->HasMoreFrames());
+ EXPECT_FALSE(reader->NextFrame());
+ EXPECT_FALSE(reader->HasError());
+ ASSERT_TRUE(reader->Close());
+ }
+
+ std::string file_name_;
+};
+
+TEST_F(IvfFileReaderTest, BasicVp8FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP8, false, 1);
+ ValidateContent(kVideoCodecVP8, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicVP8FileMsTimestamp) {
+ CreateTestFile(kVideoCodecVP8, true, 1);
+ ValidateContent(kVideoCodecVP8, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicVP9FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP9, false, 1);
+ ValidateContent(kVideoCodecVP9, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicVP9FileMsTimestamp) {
+ CreateTestFile(kVideoCodecVP9, true, 1);
+ ValidateContent(kVideoCodecVP9, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicAv1FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecAV1, false, 1);
+ ValidateContent(kVideoCodecAV1, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicAv1FileMsTimestamp) {
+ CreateTestFile(kVideoCodecAV1, true, 1);
+ ValidateContent(kVideoCodecAV1, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicH264FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecH264, false, 1);
+ ValidateContent(kVideoCodecH264, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicH264FileMsTimestamp) {
+ CreateTestFile(kVideoCodecH264, true, 1);
+ ValidateContent(kVideoCodecH264, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerVp8FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP8, false, 3);
+ ValidateContent(kVideoCodecVP8, false, 3);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerVP9FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP9, false, 3);
+ ValidateContent(kVideoCodecVP9, false, 3);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerAv1FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecAV1, false, 3);
+ ValidateContent(kVideoCodecAV1, false, 3);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerH264FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecH264, false, 3);
+ ValidateContent(kVideoCodecH264, false, 3);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc
new file mode 100644
index 0000000000..5b27ef3ef7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+#include <utility>
+
+#include "api/video_codecs/video_codec.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/video_coding/utility/ivf_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// TODO(palmkvist): make logging more informative in the absence of a file name
+// (or get one)
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultWidth = 1280;
+constexpr int kDefaultHeight = 720;
+} // namespace
+
+IvfFileWriter::IvfFileWriter(FileWrapper file, size_t byte_limit)
+ : codec_type_(kVideoCodecGeneric),
+ bytes_written_(0),
+ byte_limit_(byte_limit),
+ num_frames_(0),
+ width_(0),
+ height_(0),
+ last_timestamp_(-1),
+ using_capture_timestamps_(false),
+ file_(std::move(file)) {
+ RTC_DCHECK(byte_limit == 0 || webrtc::kIvfHeaderSize <= byte_limit)
+ << "The byte_limit is too low, not even the header will fit.";
+}
+
+IvfFileWriter::~IvfFileWriter() {
+ Close();
+}
+
+std::unique_ptr<IvfFileWriter> IvfFileWriter::Wrap(FileWrapper file,
+ size_t byte_limit) {
+ return std::unique_ptr<IvfFileWriter>(
+ new IvfFileWriter(std::move(file), byte_limit));
+}
+
+bool IvfFileWriter::WriteHeader() {
+ if (!file_.Rewind()) {
+ RTC_LOG(LS_WARNING) << "Unable to rewind ivf output file.";
+ return false;
+ }
+
+ uint8_t ivf_header[webrtc::kIvfHeaderSize] = {0};
+ ivf_header[0] = 'D';
+ ivf_header[1] = 'K';
+ ivf_header[2] = 'I';
+ ivf_header[3] = 'F';
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[4], 0); // Version.
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
+
+ switch (codec_type_) {
+ case kVideoCodecVP8:
+ ivf_header[8] = 'V';
+ ivf_header[9] = 'P';
+ ivf_header[10] = '8';
+ ivf_header[11] = '0';
+ break;
+ case kVideoCodecVP9:
+ ivf_header[8] = 'V';
+ ivf_header[9] = 'P';
+ ivf_header[10] = '9';
+ ivf_header[11] = '0';
+ break;
+ case kVideoCodecAV1:
+ ivf_header[8] = 'A';
+ ivf_header[9] = 'V';
+ ivf_header[10] = '0';
+ ivf_header[11] = '1';
+ break;
+ case kVideoCodecH264:
+ ivf_header[8] = 'H';
+ ivf_header[9] = '2';
+ ivf_header[10] = '6';
+ ivf_header[11] = '4';
+ break;
+ default:
+ // For unknown codec type use **** code. You can specify actual payload
+ // format when playing the video with ffplay: ffplay -f H263 file.ivf
+ ivf_header[8] = '*';
+ ivf_header[9] = '*';
+ ivf_header[10] = '*';
+ ivf_header[11] = '*';
+ break;
+ }
+
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[12], width_);
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[14], height_);
+ // Render timestamps are in ms (1/1000 scale), while RTP timestamps use a
+ // 90kHz clock.
+ ByteWriter<uint32_t>::WriteLittleEndian(
+ &ivf_header[16], using_capture_timestamps_ ? 1000 : 90000);
+ ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[20], 1);
+ ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[24],
+ static_cast<uint32_t>(num_frames_));
+ ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[28], 0); // Reserved.
+
+ if (!file_.Write(ivf_header, webrtc::kIvfHeaderSize)) {
+ RTC_LOG(LS_ERROR) << "Unable to write IVF header for ivf output file.";
+ return false;
+ }
+
+ if (bytes_written_ < webrtc::kIvfHeaderSize) {
+ bytes_written_ = webrtc::kIvfHeaderSize;
+ }
+
+ return true;
+}
+
+bool IvfFileWriter::InitFromFirstFrame(const EncodedImage& encoded_image,
+ VideoCodecType codec_type) {
+ if (encoded_image._encodedWidth == 0 || encoded_image._encodedHeight == 0) {
+ width_ = kDefaultWidth;
+ height_ = kDefaultHeight;
+ } else {
+ width_ = encoded_image._encodedWidth;
+ height_ = encoded_image._encodedHeight;
+ }
+
+ using_capture_timestamps_ = encoded_image.Timestamp() == 0;
+
+ codec_type_ = codec_type;
+
+ if (!WriteHeader())
+ return false;
+
+ const char* codec_name = CodecTypeToPayloadString(codec_type_);
+ RTC_LOG(LS_WARNING) << "Created IVF file for codec data of type "
+ << codec_name << " at resolution " << width_ << " x "
+ << height_ << ", using "
+ << (using_capture_timestamps_ ? "1" : "90")
+ << "kHz clock resolution.";
+ return true;
+}
+
+bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
+ VideoCodecType codec_type) {
+ if (!file_.is_open())
+ return false;
+
+ if (num_frames_ == 0 && !InitFromFirstFrame(encoded_image, codec_type))
+ return false;
+ RTC_DCHECK_EQ(codec_type_, codec_type);
+
+ if ((encoded_image._encodedWidth > 0 || encoded_image._encodedHeight > 0) &&
+ (encoded_image._encodedHeight != height_ ||
+ encoded_image._encodedWidth != width_)) {
+ RTC_LOG(LS_WARNING)
+ << "Incoming frame has resolution different from previous: (" << width_
+ << "x" << height_ << ") -> (" << encoded_image._encodedWidth << "x"
+ << encoded_image._encodedHeight << ")";
+ }
+
+ int64_t timestamp = using_capture_timestamps_
+ ? encoded_image.capture_time_ms_
+ : wrap_handler_.Unwrap(encoded_image.Timestamp());
+ if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
+ RTC_LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_
+ << " -> " << timestamp;
+ }
+ last_timestamp_ = timestamp;
+
+ bool written_frames = false;
+ size_t max_sl_index = encoded_image.SpatialIndex().value_or(0);
+ const uint8_t* data = encoded_image.data();
+ for (size_t sl_idx = 0; sl_idx <= max_sl_index; ++sl_idx) {
+ size_t cur_size = encoded_image.SpatialLayerFrameSize(sl_idx).value_or(0);
+ if (cur_size > 0) {
+ written_frames = true;
+ if (!WriteOneSpatialLayer(timestamp, data, cur_size)) {
+ return false;
+ }
+ data += cur_size;
+ }
+ }
+
+ // If frame has only one spatial layer it won't have any spatial layers'
+ // sizes. Therefore this case should be addressed separately.
+ if (!written_frames) {
+ return WriteOneSpatialLayer(timestamp, data, encoded_image.size());
+ } else {
+ return true;
+ }
+}
+
+bool IvfFileWriter::WriteOneSpatialLayer(int64_t timestamp,
+ const uint8_t* data,
+ size_t size) {
+ const size_t kFrameHeaderSize = 12;
+ if (byte_limit_ != 0 &&
+ bytes_written_ + kFrameHeaderSize + size > byte_limit_) {
+ RTC_LOG(LS_WARNING) << "Closing IVF file due to reaching size limit: "
+ << byte_limit_ << " bytes.";
+ Close();
+ return false;
+ }
+ uint8_t frame_header[kFrameHeaderSize] = {};
+ ByteWriter<uint32_t>::WriteLittleEndian(&frame_header[0],
+ static_cast<uint32_t>(size));
+ ByteWriter<uint64_t>::WriteLittleEndian(&frame_header[4], timestamp);
+ if (!file_.Write(frame_header, kFrameHeaderSize) ||
+ !file_.Write(data, size)) {
+ RTC_LOG(LS_ERROR) << "Unable to write frame to file.";
+ return false;
+ }
+
+ bytes_written_ += kFrameHeaderSize + size;
+
+ ++num_frames_;
+ return true;
+}
+
+bool IvfFileWriter::Close() {
+ if (!file_.is_open())
+ return false;
+
+ if (num_frames_ == 0) {
+ file_.Close();
+ return true;
+ }
+
+ bool ret = WriteHeader();
+ file_.Close();
+ return ret;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h
new file mode 100644
index 0000000000..b53459b5de
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
+#define MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_codec_type.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+class IvfFileWriter {
+ public:
+ // Takes ownership of the file, which will be closed either through
+ // Close or ~IvfFileWriter. If writing a frame would take the file above the
+ // `byte_limit` the file will be closed, the write (and all future writes)
+ // will fail. A `byte_limit` of 0 is equivalent to no limit.
+ static std::unique_ptr<IvfFileWriter> Wrap(FileWrapper file,
+ size_t byte_limit);
+ ~IvfFileWriter();
+
+ IvfFileWriter(const IvfFileWriter&) = delete;
+ IvfFileWriter& operator=(const IvfFileWriter&) = delete;
+
+ bool WriteFrame(const EncodedImage& encoded_image, VideoCodecType codec_type);
+ bool Close();
+
+ private:
+ explicit IvfFileWriter(FileWrapper file, size_t byte_limit);
+
+ bool WriteHeader();
+ bool InitFromFirstFrame(const EncodedImage& encoded_image,
+ VideoCodecType codec_type);
+ bool WriteOneSpatialLayer(int64_t timestamp,
+ const uint8_t* data,
+ size_t size);
+
+ VideoCodecType codec_type_;
+ size_t bytes_written_;
+ size_t byte_limit_;
+ size_t num_frames_;
+ uint16_t width_;
+ uint16_t height_;
+ int64_t last_timestamp_;
+ bool using_capture_timestamps_;
+ rtc::TimestampWrapAroundHandler wrap_handler_;
+ FileWrapper file_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
new file mode 100644
index 0000000000..c5d30a1286
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+#include <string.h>
+
+#include <memory>
+#include <string>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+static const int kHeaderSize = 32;
+static const int kFrameHeaderSize = 12;
+static uint8_t dummy_payload[4] = {0, 1, 2, 3};
+// As the default parameter when the width and height of encodedImage are 0,
+// the values are copied from ivf_file_writer.cc
+constexpr int kDefaultWidth = 1280;
+constexpr int kDefaultHeight = 720;
+} // namespace
+
+class IvfFileWriterTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ file_name_ =
+ webrtc::test::TempFilename(webrtc::test::OutputPath(), "test_file");
+ }
+ void TearDown() override { webrtc::test::RemoveFile(file_name_); }
+
+ bool WriteDummyTestFrames(VideoCodecType codec_type,
+ int width,
+ int height,
+ int num_frames,
+ bool use_capture_tims_ms) {
+ EncodedImage frame;
+ frame.SetEncodedData(
+ EncodedImageBuffer::Create(dummy_payload, sizeof(dummy_payload)));
+ frame._encodedWidth = width;
+ frame._encodedHeight = height;
+ for (int i = 1; i <= num_frames; ++i) {
+ frame.set_size(i % sizeof(dummy_payload));
+ if (use_capture_tims_ms) {
+ frame.capture_time_ms_ = i;
+ } else {
+ frame.SetTimestamp(i);
+ }
+ if (!file_writer_->WriteFrame(frame, codec_type))
+ return false;
+ }
+ return true;
+ }
+
+ void VerifyIvfHeader(FileWrapper* file,
+ const uint8_t fourcc[4],
+ int width,
+ int height,
+ uint32_t num_frames,
+ bool use_capture_tims_ms) {
+ ASSERT_TRUE(file->is_open());
+ uint8_t data[kHeaderSize];
+ ASSERT_EQ(static_cast<size_t>(kHeaderSize), file->Read(data, kHeaderSize));
+
+ uint8_t dkif[4] = {'D', 'K', 'I', 'F'};
+ EXPECT_EQ(0, memcmp(dkif, data, 4));
+ EXPECT_EQ(0u, ByteReader<uint16_t>::ReadLittleEndian(&data[4]));
+ EXPECT_EQ(32u, ByteReader<uint16_t>::ReadLittleEndian(&data[6]));
+ EXPECT_EQ(0, memcmp(fourcc, &data[8], 4));
+ EXPECT_EQ(width, ByteReader<uint16_t>::ReadLittleEndian(&data[12]));
+ EXPECT_EQ(height, ByteReader<uint16_t>::ReadLittleEndian(&data[14]));
+ EXPECT_EQ(use_capture_tims_ms ? 1000u : 90000u,
+ ByteReader<uint32_t>::ReadLittleEndian(&data[16]));
+ EXPECT_EQ(1u, ByteReader<uint32_t>::ReadLittleEndian(&data[20]));
+ EXPECT_EQ(num_frames, ByteReader<uint32_t>::ReadLittleEndian(&data[24]));
+ EXPECT_EQ(0u, ByteReader<uint32_t>::ReadLittleEndian(&data[28]));
+ }
+
+ void VerifyDummyTestFrames(FileWrapper* file, uint32_t num_frames) {
+ const int kMaxFrameSize = 4;
+ for (uint32_t i = 1; i <= num_frames; ++i) {
+ uint8_t frame_header[kFrameHeaderSize];
+ ASSERT_EQ(static_cast<unsigned int>(kFrameHeaderSize),
+ file->Read(frame_header, kFrameHeaderSize));
+ uint32_t frame_length =
+ ByteReader<uint32_t>::ReadLittleEndian(&frame_header[0]);
+ EXPECT_EQ(i % 4, frame_length);
+ uint64_t timestamp =
+ ByteReader<uint64_t>::ReadLittleEndian(&frame_header[4]);
+ EXPECT_EQ(i, timestamp);
+
+ uint8_t data[kMaxFrameSize] = {};
+ ASSERT_EQ(frame_length,
+ static_cast<uint32_t>(file->Read(data, frame_length)));
+ EXPECT_EQ(0, memcmp(data, dummy_payload, frame_length));
+ }
+ }
+
+ void RunBasicFileStructureTest(VideoCodecType codec_type,
+ const uint8_t fourcc[4],
+ bool use_capture_tims_ms) {
+ file_writer_ =
+ IvfFileWriter::Wrap(FileWrapper::OpenWriteOnly(file_name_), 0);
+ ASSERT_TRUE(file_writer_.get());
+ const int kWidth = 320;
+ const int kHeight = 240;
+ const int kNumFrames = 257;
+ ASSERT_TRUE(WriteDummyTestFrames(codec_type, kWidth, kHeight, kNumFrames,
+ use_capture_tims_ms));
+ EXPECT_TRUE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ VerifyIvfHeader(&out_file, fourcc, kWidth, kHeight, kNumFrames,
+ use_capture_tims_ms);
+ VerifyDummyTestFrames(&out_file, kNumFrames);
+
+ out_file.Close();
+ }
+
+ std::string file_name_;
+ std::unique_ptr<IvfFileWriter> file_writer_;
+};
+
+TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP8, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP8, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP9, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicAv1FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'A', 'V', '0', '1'};
+ RunBasicFileStructureTest(kVideoCodecAV1, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicAv1FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'A', 'V', '0', '1'};
+ RunBasicFileStructureTest(kVideoCodecAV1, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'H', '2', '6', '4'};
+ RunBasicFileStructureTest(kVideoCodecH264, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'H', '2', '6', '4'};
+ RunBasicFileStructureTest(kVideoCodecH264, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicUnknownCodecFileMsTimestamp) {
+ const uint8_t fourcc[4] = {'*', '*', '*', '*'};
+ RunBasicFileStructureTest(kVideoCodecGeneric, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, ClosesWhenReachesLimit) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 320;
+ const int kHeight = 240;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ VerifyIvfHeader(&out_file, fourcc, kWidth, kHeight, kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenWidthAndHeightAreZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 0;
+ const int kHeight = 0;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ // When the width and height are zero, we should expect the width and height
+ // in IvfHeader to be kDefaultWidth and kDefaultHeight instead of kWidth and
+ // kHeight.
+ VerifyIvfHeader(&out_file, fourcc, kDefaultWidth, kDefaultHeight,
+ kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenOnlyWidthIsZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 0;
+ const int kHeight = 360;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ // When the width and height are zero, we should expect the width and height
+ // in IvfHeader to be kDefaultWidth and kDefaultHeight instead of kWidth and
+ // kHeight.
+ VerifyIvfHeader(&out_file, fourcc, kDefaultWidth, kDefaultHeight,
+ kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenOnlyHeightIsZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 240;
+ const int kHeight = 0;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ // When the width and height are zero, we should expect the width and height
+ // in IvfHeader to be kDefaultWidth and kDefaultHeight instead of kWidth and
+ // kHeight.
+ VerifyIvfHeader(&out_file, fourcc, kDefaultWidth, kDefaultHeight,
+ kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenHeightAndWidthAreNotZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 360;
+ const int kHeight = 240;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ VerifyIvfHeader(&out_file, fourcc, kWidth, kHeight, kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc
new file mode 100644
index 0000000000..18f225447d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/qp_parser.h"
+
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+
+namespace webrtc {
+
+absl::optional<uint32_t> QpParser::Parse(VideoCodecType codec_type,
+ size_t spatial_idx,
+ const uint8_t* frame_data,
+ size_t frame_size) {
+ if (frame_data == nullptr || frame_size == 0 ||
+ spatial_idx >= kMaxSimulcastStreams) {
+ return absl::nullopt;
+ }
+
+ if (codec_type == kVideoCodecVP8) {
+ int qp = -1;
+ if (vp8::GetQp(frame_data, frame_size, &qp)) {
+ return qp;
+ }
+ } else if (codec_type == kVideoCodecVP9) {
+ int qp = -1;
+ if (vp9::GetQp(frame_data, frame_size, &qp)) {
+ return qp;
+ }
+ } else if (codec_type == kVideoCodecH264) {
+ return h264_parsers_[spatial_idx].Parse(frame_data, frame_size);
+ }
+
+ return absl::nullopt;
+}
+
+absl::optional<uint32_t> QpParser::H264QpParser::Parse(
+ const uint8_t* frame_data,
+ size_t frame_size) {
+ MutexLock lock(&mutex_);
+ bitstream_parser_.ParseBitstream(
+ rtc::ArrayView<const uint8_t>(frame_data, frame_size));
+ return bitstream_parser_.GetLastSliceQp();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/qp_parser.h b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.h
new file mode 100644
index 0000000000..f132ff9337
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
+#define MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
+
+#include "absl/types/optional.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_codec_type.h"
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+class QpParser {
+ public:
+ absl::optional<uint32_t> Parse(VideoCodecType codec_type,
+ size_t spatial_idx,
+ const uint8_t* frame_data,
+ size_t frame_size);
+
+ private:
+ // A thread safe wrapper for H264 bitstream parser.
+ class H264QpParser {
+ public:
+ absl::optional<uint32_t> Parse(const uint8_t* frame_data,
+ size_t frame_size);
+
+ private:
+ Mutex mutex_;
+ H264BitstreamParser bitstream_parser_ RTC_GUARDED_BY(mutex_);
+ };
+
+ H264QpParser h264_parsers_[kMaxSimulcastStreams];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc
new file mode 100644
index 0000000000..1131288f26
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/qp_parser.h"
+
+#include <stddef.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+// ffmpeg -s 16x16 -f rawvideo -pix_fmt rgb24 -r 30 -i /dev/zero -c:v libvpx
+// -qmin 20 -qmax 20 -crf 20 -frames:v 1 -y out.ivf
+const uint8_t kCodedFrameVp8Qp25[] = {
+ 0x10, 0x02, 0x00, 0x9d, 0x01, 0x2a, 0x10, 0x00, 0x10, 0x00,
+ 0x02, 0x47, 0x08, 0x85, 0x85, 0x88, 0x85, 0x84, 0x88, 0x0c,
+ 0x82, 0x00, 0x0c, 0x0d, 0x60, 0x00, 0xfe, 0xfc, 0x5c, 0xd0};
+
+// ffmpeg -s 16x16 -f rawvideo -pix_fmt rgb24 -r 30 -i /dev/zero -c:v libvpx-vp9
+// -qmin 24 -qmax 24 -crf 24 -frames:v 1 -y out.ivf
+const uint8_t kCodedFrameVp9Qp96[] = {
+ 0xa2, 0x49, 0x83, 0x42, 0xe0, 0x00, 0xf0, 0x00, 0xf6, 0x00,
+ 0x38, 0x24, 0x1c, 0x18, 0xc0, 0x00, 0x00, 0x30, 0x70, 0x00,
+ 0x00, 0x4a, 0xa7, 0xff, 0xfc, 0xb9, 0x01, 0xbf, 0xff, 0xff,
+ 0x97, 0x20, 0xdb, 0xff, 0xff, 0xcb, 0x90, 0x5d, 0x40};
+
+// ffmpeg -s 16x16 -f rawvideo -pix_fmt yuv420p -r 30 -i /dev/zero -c:v libx264
+// -qmin 38 -qmax 38 -crf 38 -profile:v baseline -frames:v 2 -y out.264
+const uint8_t kCodedFrameH264SpsPpsIdrQp38[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x0a, 0xd9, 0x1e, 0x84,
+ 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c,
+ 0x48, 0x99, 0x20, 0x00, 0x00, 0x00, 0x01, 0x68, 0xcb, 0x80, 0xc4,
+ 0xb2, 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0xf1, 0x18, 0xa0, 0x00,
+ 0x20, 0x5b, 0x1c, 0x00, 0x04, 0x07, 0xe3, 0x80, 0x00, 0x80, 0xfe};
+
+const uint8_t kCodedFrameH264SpsPpsIdrQp49[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x0a, 0xd9, 0x1e, 0x84,
+ 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c,
+ 0x48, 0x99, 0x20, 0x00, 0x00, 0x00, 0x01, 0x68, 0xcb, 0x80, 0x5d,
+ 0x2c, 0x80, 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0xf1, 0x18, 0xa0,
+ 0x00, 0x5e, 0x38, 0x00, 0x08, 0x03, 0xc7, 0x00, 0x01, 0x00, 0x7c};
+
+const uint8_t kCodedFrameH264InterSliceQpDelta0[] = {0x00, 0x00, 0x00, 0x01,
+ 0x41, 0x9a, 0x39, 0xea};
+
+} // namespace
+
+TEST(QpParserTest, ParseQpVp8) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ kVideoCodecVP8, 0, kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25));
+ EXPECT_EQ(qp, 25u);
+}
+
+TEST(QpParserTest, ParseQpVp9) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ kVideoCodecVP9, 0, kCodedFrameVp9Qp96, sizeof(kCodedFrameVp9Qp96));
+ EXPECT_EQ(qp, 96u);
+}
+
+TEST(QpParserTest, ParseQpH264) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ VideoCodecType::kVideoCodecH264, 0, kCodedFrameH264SpsPpsIdrQp38,
+ sizeof(kCodedFrameH264SpsPpsIdrQp38));
+ EXPECT_EQ(qp, 38u);
+
+ qp = parser.Parse(kVideoCodecH264, 1, kCodedFrameH264SpsPpsIdrQp49,
+ sizeof(kCodedFrameH264SpsPpsIdrQp49));
+ EXPECT_EQ(qp, 49u);
+
+ qp = parser.Parse(kVideoCodecH264, 0, kCodedFrameH264InterSliceQpDelta0,
+ sizeof(kCodedFrameH264InterSliceQpDelta0));
+ EXPECT_EQ(qp, 38u);
+
+ qp = parser.Parse(kVideoCodecH264, 1, kCodedFrameH264InterSliceQpDelta0,
+ sizeof(kCodedFrameH264InterSliceQpDelta0));
+ EXPECT_EQ(qp, 49u);
+}
+
+TEST(QpParserTest, ParseQpUnsupportedCodecType) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ kVideoCodecGeneric, 0, kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25));
+ EXPECT_FALSE(qp.has_value());
+}
+
+TEST(QpParserTest, ParseQpNullData) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(kVideoCodecVP8, 0, nullptr, 100);
+ EXPECT_FALSE(qp.has_value());
+}
+
+TEST(QpParserTest, ParseQpEmptyData) {
+ QpParser parser;
+ absl::optional<uint32_t> qp =
+ parser.Parse(kVideoCodecVP8, 0, kCodedFrameVp8Qp25, 0);
+ EXPECT_FALSE(qp.has_value());
+}
+
+TEST(QpParserTest, ParseQpSpatialIdxExceedsMax) {
+ QpParser parser;
+ absl::optional<uint32_t> qp =
+ parser.Parse(kVideoCodecVP8, kMaxSimulcastStreams, kCodedFrameVp8Qp25,
+ sizeof(kCodedFrameVp8Qp25));
+ EXPECT_FALSE(qp.has_value());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc
new file mode 100644
index 0000000000..28252b4cfa
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/quality_scaler.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/units/time_delta.h"
+#include "api/video/video_adaptation_reason.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/quality_scaler_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/weak_ptr.h"
+
+// TODO(kthelgason): Some versions of Android have issues with log2.
+// See https://code.google.com/p/android/issues/detail?id=212634 for details
+#if defined(WEBRTC_ANDROID)
+#define log2(x) (log(x) / log(2))
+#endif
+
+namespace webrtc {
+
+namespace {
+// Threshold constant used until first downscale (to permit fast rampup).
+static const int kMeasureMs = 2000;
+static const float kSamplePeriodScaleFactor = 2.5;
+static const int kFramedropPercentThreshold = 60;
+static const size_t kMinFramesNeededToScale = 2 * 30;
+
+} // namespace
+
+class QualityScaler::QpSmoother {
+ public:
+ explicit QpSmoother(float alpha)
+ : alpha_(alpha),
+ // The initial value of last_sample_ms doesn't matter since the smoother
+ // will ignore the time delta for the first update.
+ last_sample_ms_(0),
+ smoother_(alpha) {}
+
+ absl::optional<int> GetAvg() const {
+ float value = smoother_.filtered();
+ if (value == rtc::ExpFilter::kValueUndefined) {
+ return absl::nullopt;
+ }
+ return static_cast<int>(value);
+ }
+
+ void Add(float sample, int64_t time_sent_us) {
+ int64_t now_ms = time_sent_us / 1000;
+ smoother_.Apply(static_cast<float>(now_ms - last_sample_ms_), sample);
+ last_sample_ms_ = now_ms;
+ }
+
+ void Reset() { smoother_.Reset(alpha_); }
+
+ private:
+ const float alpha_;
+ int64_t last_sample_ms_;
+ rtc::ExpFilter smoother_;
+};
+
+// The QualityScaler checks for QP periodically by queuing CheckQpTasks. The
+// task will either run to completion and trigger a new task being queued, or it
+// will be destroyed because the QualityScaler is destroyed.
+//
+// When high or low QP is reported, the task will be pending until a callback is
+// invoked. This lets the QualityScalerQpUsageHandlerInterface react to QP usage
+// asynchronously and prevents checking for QP until the stream has potentially
+// been reconfigured.
+class QualityScaler::CheckQpTask {
+ public:
+ // The result of one CheckQpTask may influence the delay of the next
+ // CheckQpTask.
+ struct Result {
+ bool observed_enough_frames = false;
+ bool qp_usage_reported = false;
+ };
+
+ CheckQpTask(QualityScaler* quality_scaler, Result previous_task_result)
+ : quality_scaler_(quality_scaler),
+ state_(State::kNotStarted),
+ previous_task_result_(previous_task_result),
+ weak_ptr_factory_(this) {}
+
+ void StartDelayedTask() {
+ RTC_DCHECK_EQ(state_, State::kNotStarted);
+ state_ = State::kCheckingQp;
+ TaskQueueBase::Current()->PostDelayedTask(
+ [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), this] {
+ if (!this_weak_ptr) {
+ // The task has been cancelled through destruction.
+ return;
+ }
+ RTC_DCHECK_EQ(state_, State::kCheckingQp);
+ RTC_DCHECK_RUN_ON(&quality_scaler_->task_checker_);
+ switch (quality_scaler_->CheckQp()) {
+ case QualityScaler::CheckQpResult::kInsufficientSamples: {
+ result_.observed_enough_frames = false;
+ // After this line, `this` may be deleted.
+ break;
+ }
+ case QualityScaler::CheckQpResult::kNormalQp: {
+ result_.observed_enough_frames = true;
+ break;
+ }
+ case QualityScaler::CheckQpResult::kHighQp: {
+ result_.observed_enough_frames = true;
+ result_.qp_usage_reported = true;
+ quality_scaler_->fast_rampup_ = false;
+ quality_scaler_->handler_->OnReportQpUsageHigh();
+ quality_scaler_->ClearSamples();
+ break;
+ }
+ case QualityScaler::CheckQpResult::kLowQp: {
+ result_.observed_enough_frames = true;
+ result_.qp_usage_reported = true;
+ quality_scaler_->handler_->OnReportQpUsageLow();
+ quality_scaler_->ClearSamples();
+ break;
+ }
+ }
+ state_ = State::kCompleted;
+ // Starting the next task deletes the pending task. After this line,
+ // `this` has been deleted.
+ quality_scaler_->StartNextCheckQpTask();
+ },
+ TimeDelta::Millis(GetCheckingQpDelayMs()));
+ }
+
+ bool HasCompletedTask() const { return state_ == State::kCompleted; }
+
+ Result result() const {
+ RTC_DCHECK(HasCompletedTask());
+ return result_;
+ }
+
+ private:
+ enum class State {
+ kNotStarted,
+ kCheckingQp,
+ kCompleted,
+ };
+
+ // Determines the sampling period of CheckQpTasks.
+ int64_t GetCheckingQpDelayMs() const {
+ RTC_DCHECK_RUN_ON(&quality_scaler_->task_checker_);
+ if (quality_scaler_->fast_rampup_) {
+ return quality_scaler_->sampling_period_ms_;
+ }
+ if (quality_scaler_->experiment_enabled_ &&
+ !previous_task_result_.observed_enough_frames) {
+ // Use half the interval while waiting for enough frames.
+ return quality_scaler_->sampling_period_ms_ / 2;
+ }
+ if (quality_scaler_->scale_factor_ &&
+ !previous_task_result_.qp_usage_reported) {
+ // Last CheckQp did not call AdaptDown/Up, possibly reduce interval.
+ return quality_scaler_->sampling_period_ms_ *
+ quality_scaler_->scale_factor_.value();
+ }
+ return quality_scaler_->sampling_period_ms_ *
+ quality_scaler_->initial_scale_factor_;
+ }
+
+ QualityScaler* const quality_scaler_;
+ State state_;
+ const Result previous_task_result_;
+ Result result_;
+
+ rtc::WeakPtrFactory<CheckQpTask> weak_ptr_factory_;
+};
+
+QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds)
+ : QualityScaler(handler, thresholds, kMeasureMs) {}
+
+// Protected ctor, should not be called directly.
+QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds,
+ int64_t default_sampling_period_ms)
+ : handler_(handler),
+ thresholds_(thresholds),
+ sampling_period_ms_(QualityScalerSettings::ParseFromFieldTrials()
+ .SamplingPeriodMs()
+ .value_or(default_sampling_period_ms)),
+ fast_rampup_(true),
+ // Arbitrarily choose size based on 30 fps for 5 seconds.
+ average_qp_(QualityScalerSettings::ParseFromFieldTrials()
+ .AverageQpWindow()
+ .value_or(5 * 30)),
+ framedrop_percent_media_opt_(5 * 30),
+ framedrop_percent_all_(5 * 30),
+ experiment_enabled_(QualityScalingExperiment::Enabled()),
+ min_frames_needed_(
+ QualityScalerSettings::ParseFromFieldTrials().MinFrames().value_or(
+ kMinFramesNeededToScale)),
+ initial_scale_factor_(QualityScalerSettings::ParseFromFieldTrials()
+ .InitialScaleFactor()
+ .value_or(kSamplePeriodScaleFactor)),
+ scale_factor_(
+ QualityScalerSettings::ParseFromFieldTrials().ScaleFactor()) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ if (experiment_enabled_) {
+ config_ = QualityScalingExperiment::GetConfig();
+ qp_smoother_high_.reset(new QpSmoother(config_.alpha_high));
+ qp_smoother_low_.reset(new QpSmoother(config_.alpha_low));
+ }
+ RTC_DCHECK(handler_ != nullptr);
+ StartNextCheckQpTask();
+ RTC_LOG(LS_INFO) << "QP thresholds: low: " << thresholds_.low
+ << ", high: " << thresholds_.high;
+}
+
+QualityScaler::~QualityScaler() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+}
+
+void QualityScaler::StartNextCheckQpTask() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK(!pending_qp_task_ || pending_qp_task_->HasCompletedTask())
+ << "A previous CheckQpTask has not completed yet!";
+ CheckQpTask::Result previous_task_result;
+ if (pending_qp_task_) {
+ previous_task_result = pending_qp_task_->result();
+ }
+ pending_qp_task_ = std::make_unique<CheckQpTask>(this, previous_task_result);
+ pending_qp_task_->StartDelayedTask();
+}
+
+void QualityScaler::SetQpThresholds(VideoEncoder::QpThresholds thresholds) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ thresholds_ = thresholds;
+}
+
+void QualityScaler::ReportDroppedFrameByMediaOpt() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_media_opt_.AddSample(100);
+ framedrop_percent_all_.AddSample(100);
+}
+
+void QualityScaler::ReportDroppedFrameByEncoder() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_all_.AddSample(100);
+}
+
+void QualityScaler::ReportQp(int qp, int64_t time_sent_us) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_media_opt_.AddSample(0);
+ framedrop_percent_all_.AddSample(0);
+ average_qp_.AddSample(qp);
+ if (qp_smoother_high_)
+ qp_smoother_high_->Add(qp, time_sent_us);
+ if (qp_smoother_low_)
+ qp_smoother_low_->Add(qp, time_sent_us);
+}
+
+bool QualityScaler::QpFastFilterLow() const {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ size_t num_frames = config_.use_all_drop_reasons
+ ? framedrop_percent_all_.Size()
+ : framedrop_percent_media_opt_.Size();
+ const size_t kMinNumFrames = 10;
+ if (num_frames < kMinNumFrames) {
+ return false; // Wait for more frames before making a decision.
+ }
+ absl::optional<int> avg_qp_high = qp_smoother_high_
+ ? qp_smoother_high_->GetAvg()
+ : average_qp_.GetAverageRoundedDown();
+ return (avg_qp_high) ? (avg_qp_high.value() <= thresholds_.low) : false;
+}
+
+QualityScaler::CheckQpResult QualityScaler::CheckQp() const {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ // Should be set through InitEncode -> Should be set by now.
+ RTC_DCHECK_GE(thresholds_.low, 0);
+
+ // If we have not observed at least this many frames we can't make a good
+ // scaling decision.
+ const size_t frames = config_.use_all_drop_reasons
+ ? framedrop_percent_all_.Size()
+ : framedrop_percent_media_opt_.Size();
+ if (frames < min_frames_needed_) {
+ return CheckQpResult::kInsufficientSamples;
+ }
+
+ // Check if we should scale down due to high frame drop.
+ const absl::optional<int> drop_rate =
+ config_.use_all_drop_reasons
+ ? framedrop_percent_all_.GetAverageRoundedDown()
+ : framedrop_percent_media_opt_.GetAverageRoundedDown();
+ if (drop_rate && *drop_rate >= kFramedropPercentThreshold) {
+ RTC_LOG(LS_INFO) << "Reporting high QP, framedrop percent " << *drop_rate;
+ return CheckQpResult::kHighQp;
+ }
+
+ // Check if we should scale up or down based on QP.
+ const absl::optional<int> avg_qp_high =
+ qp_smoother_high_ ? qp_smoother_high_->GetAvg()
+ : average_qp_.GetAverageRoundedDown();
+ const absl::optional<int> avg_qp_low =
+ qp_smoother_low_ ? qp_smoother_low_->GetAvg()
+ : average_qp_.GetAverageRoundedDown();
+ if (avg_qp_high && avg_qp_low) {
+ RTC_LOG(LS_INFO) << "Checking average QP " << *avg_qp_high << " ("
+ << *avg_qp_low << ").";
+ if (*avg_qp_high > thresholds_.high) {
+ return CheckQpResult::kHighQp;
+ }
+ if (*avg_qp_low <= thresholds_.low) {
+ // QP has been low. We want to try a higher resolution.
+ return CheckQpResult::kLowQp;
+ }
+ }
+ return CheckQpResult::kNormalQp;
+}
+
+void QualityScaler::ClearSamples() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_media_opt_.Reset();
+ framedrop_percent_all_.Reset();
+ average_qp_.Reset();
+ if (qp_smoother_high_)
+ qp_smoother_high_->Reset();
+ if (qp_smoother_low_)
+ qp_smoother_low_->Reset();
+}
+
+QualityScalerQpUsageHandlerInterface::~QualityScalerQpUsageHandlerInterface() {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h
new file mode 100644
index 0000000000..93014e36a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
+#define MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/experiments/quality_scaling_experiment.h"
+#include "rtc_base/numerics/moving_average.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class QualityScalerQpUsageHandlerCallbackInterface;
+class QualityScalerQpUsageHandlerInterface;
+
+// QualityScaler runs asynchronously and monitors QP values of encoded frames.
+// It holds a reference to a QualityScalerQpUsageHandlerInterface implementation
+// to signal an overuse or underuse of QP (which indicate a desire to scale the
+// video stream down or up).
+class QualityScaler {
+ public:
+ // Construct a QualityScaler with given `thresholds` and `handler`.
+ // This starts the quality scaler periodically checking what the average QP
+ // has been recently.
+ QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds);
+ virtual ~QualityScaler();
+ // Should be called each time a frame is dropped at encoding.
+ void ReportDroppedFrameByMediaOpt();
+ void ReportDroppedFrameByEncoder();
+ // Inform the QualityScaler of the last seen QP.
+ void ReportQp(int qp, int64_t time_sent_us);
+
+ void SetQpThresholds(VideoEncoder::QpThresholds thresholds);
+ bool QpFastFilterLow() const;
+
+ // The following members declared protected for testing purposes.
+ protected:
+ QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds,
+ int64_t sampling_period_ms);
+
+ private:
+ class QpSmoother;
+ class CheckQpTask;
+ class CheckQpTaskHandlerCallback;
+
+ enum class CheckQpResult {
+ kInsufficientSamples,
+ kNormalQp,
+ kHighQp,
+ kLowQp,
+ };
+
+ // Starts checking for QP in a delayed task. When the resulting CheckQpTask
+ // completes, it will invoke this method again, ensuring that we always
+ // periodically check for QP. See CheckQpTask for more details. We never run
+ // more than one CheckQpTask at a time.
+ void StartNextCheckQpTask();
+
+ CheckQpResult CheckQp() const;
+ void ClearSamples();
+
+ std::unique_ptr<CheckQpTask> pending_qp_task_ RTC_GUARDED_BY(&task_checker_);
+ QualityScalerQpUsageHandlerInterface* const handler_
+ RTC_GUARDED_BY(&task_checker_);
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_;
+
+ VideoEncoder::QpThresholds thresholds_ RTC_GUARDED_BY(&task_checker_);
+ const int64_t sampling_period_ms_;
+ bool fast_rampup_ RTC_GUARDED_BY(&task_checker_);
+ rtc::MovingAverage average_qp_ RTC_GUARDED_BY(&task_checker_);
+ rtc::MovingAverage framedrop_percent_media_opt_
+ RTC_GUARDED_BY(&task_checker_);
+ rtc::MovingAverage framedrop_percent_all_ RTC_GUARDED_BY(&task_checker_);
+
+ // Used by QualityScalingExperiment.
+ const bool experiment_enabled_;
+ QualityScalingExperiment::Config config_ RTC_GUARDED_BY(&task_checker_);
+ std::unique_ptr<QpSmoother> qp_smoother_high_ RTC_GUARDED_BY(&task_checker_);
+ std::unique_ptr<QpSmoother> qp_smoother_low_ RTC_GUARDED_BY(&task_checker_);
+
+ const size_t min_frames_needed_;
+ const double initial_scale_factor_;
+ const absl::optional<double> scale_factor_;
+};
+
+// Reacts to QP being too high or too low. For best quality, when QP is high it
+// is desired to decrease the resolution or frame rate of the stream and when QP
+// is low it is desired to increase the resolution or frame rate of the stream.
+// Whether to reconfigure the stream is ultimately up to the handler, which is
+// able to respond asynchronously.
+class QualityScalerQpUsageHandlerInterface {
+ public:
+ virtual ~QualityScalerQpUsageHandlerInterface();
+
+ virtual void OnReportQpUsageHigh() = 0;
+ virtual void OnReportQpUsageLow() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc
new file mode 100644
index 0000000000..6202947a35
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/quality_scaler.h"
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+static const int kFramerate = 30;
+static const int kLowQp = 15;
+static const int kHighQp = 40;
+static const int kMinFramesNeededToScale = 60; // From quality_scaler.cc.
+static const size_t kDefaultTimeoutMs = 150;
+} // namespace
+
+class FakeQpUsageHandler : public QualityScalerQpUsageHandlerInterface {
+ public:
+ ~FakeQpUsageHandler() override = default;
+
+ // QualityScalerQpUsageHandlerInterface implementation.
+ void OnReportQpUsageHigh() override {
+ adapt_down_events_++;
+ event.Set();
+ }
+
+ void OnReportQpUsageLow() override {
+ adapt_up_events_++;
+ event.Set();
+ }
+
+ rtc::Event event;
+ int adapt_up_events_ = 0;
+ int adapt_down_events_ = 0;
+};
+
+// Pass a lower sampling period to speed up the tests.
+class QualityScalerUnderTest : public QualityScaler {
+ public:
+ explicit QualityScalerUnderTest(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds)
+ : QualityScaler(handler, thresholds, 5) {}
+};
+
+class QualityScalerTest : public ::testing::Test,
+ public ::testing::WithParamInterface<std::string> {
+ protected:
+ enum ScaleDirection {
+ kKeepScaleAboveLowQp,
+ kKeepScaleAtHighQp,
+ kScaleDown,
+ kScaleDownAboveHighQp,
+ kScaleUp
+ };
+
+ QualityScalerTest()
+ : scoped_field_trial_(GetParam()),
+ task_queue_("QualityScalerTestQueue"),
+ handler_(std::make_unique<FakeQpUsageHandler>()) {
+ task_queue_.SendTask(
+ [this] {
+ qs_ = std::unique_ptr<QualityScaler>(new QualityScalerUnderTest(
+ handler_.get(), VideoEncoder::QpThresholds(kLowQp, kHighQp)));
+ });
+ }
+
+ ~QualityScalerTest() override {
+ task_queue_.SendTask([this] { qs_ = nullptr; });
+ }
+
+ void TriggerScale(ScaleDirection scale_direction) {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ switch (scale_direction) {
+ case kKeepScaleAboveLowQp:
+ qs_->ReportQp(kLowQp + 1, 0);
+ break;
+ case kScaleUp:
+ qs_->ReportQp(kLowQp, 0);
+ break;
+ case kScaleDown:
+ qs_->ReportDroppedFrameByMediaOpt();
+ break;
+ case kKeepScaleAtHighQp:
+ qs_->ReportQp(kHighQp, 0);
+ break;
+ case kScaleDownAboveHighQp:
+ qs_->ReportQp(kHighQp + 1, 0);
+ break;
+ }
+ }
+ }
+
+ test::ScopedFieldTrials scoped_field_trial_;
+ TaskQueueForTest task_queue_;
+ std::unique_ptr<QualityScaler> qs_;
+ std::unique_ptr<FakeQpUsageHandler> handler_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ FieldTrials,
+ QualityScalerTest,
+ ::testing::Values(
+ "WebRTC-Video-QualityScaling/Enabled-1,2,3,4,5,6,7,8,0.9,0.99,1/",
+ "WebRTC-Video-QualityScaling/Disabled/"));
+
+TEST_P(QualityScalerTest, DownscalesAfterContinuousFramedrop) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleDown); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, KeepsScaleAtHighQp) {
+ task_queue_.SendTask([this] { TriggerScale(kKeepScaleAtHighQp); });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DownscalesAboveHighQp) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleDownAboveHighQp); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportQp(kHighQp, 0);
+ }
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportQp(kHighQp, 0);
+ }
+ });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DownscalesAfterTwoThirdsIfFieldTrialEnabled) {
+ const bool kDownScaleExpected =
+ GetParam().find("Enabled") != std::string::npos;
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportDroppedFrameByEncoder();
+ qs_->ReportQp(kHighQp, 0);
+ }
+ });
+ EXPECT_EQ(kDownScaleExpected, handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(kDownScaleExpected ? 1 : 0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, KeepsScaleOnNormalQp) {
+ task_queue_.SendTask([this] { TriggerScale(kKeepScaleAboveLowQp); });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, UpscalesAfterLowQp) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleUp); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, ScalesDownAndBackUp) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleDown); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+ task_queue_.SendTask([this] { TriggerScale(kScaleUp); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DoesNotScaleUntilEnoughFramesObserved) {
+ task_queue_.SendTask([this] {
+ // Not enough frames to make a decision.
+ for (int i = 0; i < kMinFramesNeededToScale - 1; ++i) {
+ qs_->ReportQp(kLowQp, 0);
+ }
+ });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeoutMs));
+ task_queue_.SendTask([this] {
+ // Send 1 more. Enough frames observed, should result in an adapt
+ // request.
+ qs_->ReportQp(kLowQp, 0);
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+
+ // Samples should be cleared after an adapt request.
+ task_queue_.SendTask([this] {
+ // Not enough frames to make a decision.
+ qs_->ReportQp(kLowQp, 0);
+ });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, ScalesDownAndBackUpWithMinFramesNeeded) {
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kMinFramesNeededToScale; ++i) {
+ qs_->ReportQp(kHighQp + 1, 0);
+ }
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+ // Samples cleared.
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kMinFramesNeededToScale; ++i) {
+ qs_->ReportQp(kLowQp, 0);
+ }
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc
new file mode 100644
index 0000000000..1496934e1c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <numeric>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+// Ratio allocation between temporal streams:
+// Values as required for the VP8 codec (accumulating).
+static const float
+ kLayerRateAllocation[kMaxTemporalStreams][kMaxTemporalStreams] = {
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+};
+
+static const float kBaseHeavy3TlRateAllocation[kMaxTemporalStreams] = {
+ 0.6f, 0.8f, 1.0f, 1.0f // 3 layers {60%, 20%, 20%}
+};
+
+const uint32_t kLegacyScreenshareTl0BitrateKbps = 200;
+const uint32_t kLegacyScreenshareTl1BitrateKbps = 1000;
+} // namespace
+
+float SimulcastRateAllocator::GetTemporalRateAllocation(
+ int num_layers,
+ int temporal_id,
+ bool base_heavy_tl3_alloc) {
+ RTC_CHECK_GT(num_layers, 0);
+ RTC_CHECK_LE(num_layers, kMaxTemporalStreams);
+ RTC_CHECK_GE(temporal_id, 0);
+ RTC_CHECK_LT(temporal_id, num_layers);
+ if (num_layers == 3 && base_heavy_tl3_alloc) {
+ return kBaseHeavy3TlRateAllocation[temporal_id];
+ }
+ return kLayerRateAllocation[num_layers - 1][temporal_id];
+}
+
+SimulcastRateAllocator::SimulcastRateAllocator(const VideoCodec& codec)
+ : codec_(codec),
+ stable_rate_settings_(StableTargetRateExperiment::ParseFromFieldTrials()),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
+ legacy_conference_mode_(false) {}
+
+SimulcastRateAllocator::~SimulcastRateAllocator() = default;
+
+VideoBitrateAllocation SimulcastRateAllocator::Allocate(
+ VideoBitrateAllocationParameters parameters) {
+ VideoBitrateAllocation allocated_bitrates;
+ DataRate stable_rate = parameters.total_bitrate;
+ if (stable_rate_settings_.IsEnabled() &&
+ parameters.stable_bitrate > DataRate::Zero()) {
+ stable_rate = std::min(parameters.stable_bitrate, parameters.total_bitrate);
+ }
+ DistributeAllocationToSimulcastLayers(parameters.total_bitrate, stable_rate,
+ &allocated_bitrates);
+ DistributeAllocationToTemporalLayers(&allocated_bitrates);
+ return allocated_bitrates;
+}
+
+void SimulcastRateAllocator::DistributeAllocationToSimulcastLayers(
+ DataRate total_bitrate,
+ DataRate stable_bitrate,
+ VideoBitrateAllocation* allocated_bitrates) {
+ DataRate left_in_total_allocation = total_bitrate;
+ DataRate left_in_stable_allocation = stable_bitrate;
+
+ if (codec_.maxBitrate) {
+ DataRate max_rate = DataRate::KilobitsPerSec(codec_.maxBitrate);
+ left_in_total_allocation = std::min(left_in_total_allocation, max_rate);
+ left_in_stable_allocation = std::min(left_in_stable_allocation, max_rate);
+ }
+
+ if (codec_.numberOfSimulcastStreams == 0) {
+ // No simulcast, just set the target as this has been capped already.
+ if (codec_.active) {
+ allocated_bitrates->SetBitrate(
+ 0, 0,
+ std::max(DataRate::KilobitsPerSec(codec_.minBitrate),
+ left_in_total_allocation)
+ .bps());
+ }
+ return;
+ }
+
+ // Sort the layers by maxFramerate, they might not always be from smallest
+ // to biggest
+ std::vector<size_t> layer_index(codec_.numberOfSimulcastStreams);
+ std::iota(layer_index.begin(), layer_index.end(), 0);
+ std::stable_sort(layer_index.begin(), layer_index.end(),
+ [this](size_t a, size_t b) {
+ return std::tie(codec_.simulcastStream[a].maxBitrate) <
+ std::tie(codec_.simulcastStream[b].maxBitrate);
+ });
+
+ // Find the first active layer. We don't allocate to inactive layers.
+ size_t active_layer = 0;
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ if (codec_.simulcastStream[layer_index[active_layer]].active) {
+ // Found the first active layer.
+ break;
+ }
+ }
+ // All streams could be inactive, and nothing more to do.
+ if (active_layer == codec_.numberOfSimulcastStreams) {
+ return;
+ }
+
+ // Always allocate enough bitrate for the minimum bitrate of the first
+ // active layer. Suspending below min bitrate is controlled outside the
+ // codec implementation and is not overridden by this.
+ DataRate min_rate = DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index[active_layer]].minBitrate);
+ left_in_total_allocation = std::max(left_in_total_allocation, min_rate);
+ left_in_stable_allocation = std::max(left_in_stable_allocation, min_rate);
+
+ // Begin by allocating bitrate to simulcast streams, putting all bitrate in
+ // temporal layer 0. We'll then distribute this bitrate, across potential
+ // temporal layers, when stream allocation is done.
+
+ bool first_allocation = false;
+ if (stream_enabled_.empty()) {
+ // First time allocating, this means we should not include hysteresis in
+ // case this is a reconfiguration of an existing enabled stream.
+ first_allocation = true;
+ stream_enabled_.resize(codec_.numberOfSimulcastStreams, false);
+ }
+
+ size_t top_active_layer = active_layer;
+ // Allocate up to the target bitrate for each active simulcast layer.
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ const SimulcastStream& stream =
+ codec_.simulcastStream[layer_index[active_layer]];
+ if (!stream.active) {
+ stream_enabled_[layer_index[active_layer]] = false;
+ continue;
+ }
+ // If we can't allocate to the current layer we can't allocate to higher
+ // layers because they require a higher minimum bitrate.
+ DataRate min_bitrate = DataRate::KilobitsPerSec(stream.minBitrate);
+ DataRate target_bitrate = DataRate::KilobitsPerSec(stream.targetBitrate);
+ double hysteresis_factor =
+ codec_.mode == VideoCodecMode::kRealtimeVideo
+ ? stable_rate_settings_.GetVideoHysteresisFactor()
+ : stable_rate_settings_.GetScreenshareHysteresisFactor();
+ if (!first_allocation && !stream_enabled_[layer_index[active_layer]]) {
+ min_bitrate = std::min(hysteresis_factor * min_bitrate, target_bitrate);
+ }
+ if (left_in_stable_allocation < min_bitrate) {
+ allocated_bitrates->set_bw_limited(true);
+ break;
+ }
+
+ // We are allocating to this layer so it is the current active allocation.
+ top_active_layer = layer_index[active_layer];
+ stream_enabled_[layer_index[active_layer]] = true;
+ DataRate layer_rate = std::min(left_in_total_allocation, target_bitrate);
+ allocated_bitrates->SetBitrate(layer_index[active_layer], 0,
+ layer_rate.bps());
+ left_in_total_allocation -= layer_rate;
+ left_in_stable_allocation -=
+ std::min(left_in_stable_allocation, target_bitrate);
+ }
+
+ // All layers above this one are not active.
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ stream_enabled_[layer_index[active_layer]] = false;
+ }
+
+ // Next, try allocate remaining bitrate, up to max bitrate, in top active
+ // stream.
+ // TODO(sprang): Allocate up to max bitrate for all layers once we have a
+ // better idea of possible performance implications.
+ if (left_in_total_allocation > DataRate::Zero()) {
+ const SimulcastStream& stream = codec_.simulcastStream[top_active_layer];
+ DataRate initial_layer_rate = DataRate::BitsPerSec(
+ allocated_bitrates->GetSpatialLayerSum(top_active_layer));
+ DataRate additional_allocation = std::min(
+ left_in_total_allocation,
+ DataRate::KilobitsPerSec(stream.maxBitrate) - initial_layer_rate);
+ allocated_bitrates->SetBitrate(
+ top_active_layer, 0,
+ (initial_layer_rate + additional_allocation).bps());
+ }
+}
+
+void SimulcastRateAllocator::DistributeAllocationToTemporalLayers(
+ VideoBitrateAllocation* allocated_bitrates_bps) const {
+ const int num_spatial_streams =
+ std::max(1, static_cast<int>(codec_.numberOfSimulcastStreams));
+
+ // Finally, distribute the bitrate for the simulcast streams across the
+ // available temporal layers.
+ for (int simulcast_id = 0; simulcast_id < num_spatial_streams;
+ ++simulcast_id) {
+ uint32_t target_bitrate_kbps =
+ allocated_bitrates_bps->GetBitrate(simulcast_id, 0) / 1000;
+ if (target_bitrate_kbps == 0) {
+ continue;
+ }
+
+ const uint32_t expected_allocated_bitrate_kbps = target_bitrate_kbps;
+ RTC_DCHECK_EQ(
+ target_bitrate_kbps,
+ allocated_bitrates_bps->GetSpatialLayerSum(simulcast_id) / 1000);
+ const int num_temporal_streams = NumTemporalStreams(simulcast_id);
+ uint32_t max_bitrate_kbps;
+ // Legacy temporal-layered only screenshare, or simulcast screenshare
+ // with legacy mode for simulcast stream 0.
+ if (codec_.mode == VideoCodecMode::kScreensharing &&
+ legacy_conference_mode_ && simulcast_id == 0) {
+ // TODO(holmer): This is a "temporary" hack for screensharing, where we
+ // interpret the startBitrate as the encoder target bitrate. This is
+ // to allow for a different max bitrate, so if the codec can't meet
+ // the target we still allow it to overshoot up to the max before dropping
+ // frames. This hack should be improved.
+ max_bitrate_kbps =
+ std::min(kLegacyScreenshareTl1BitrateKbps, target_bitrate_kbps);
+ target_bitrate_kbps =
+ std::min(kLegacyScreenshareTl0BitrateKbps, target_bitrate_kbps);
+ } else if (num_spatial_streams == 1) {
+ max_bitrate_kbps = codec_.maxBitrate;
+ } else {
+ max_bitrate_kbps = codec_.simulcastStream[simulcast_id].maxBitrate;
+ }
+
+ std::vector<uint32_t> tl_allocation;
+ if (num_temporal_streams == 1) {
+ tl_allocation.push_back(target_bitrate_kbps);
+ } else {
+ if (codec_.mode == VideoCodecMode::kScreensharing &&
+ legacy_conference_mode_ && simulcast_id == 0) {
+ tl_allocation = ScreenshareTemporalLayerAllocation(
+ target_bitrate_kbps, max_bitrate_kbps, simulcast_id);
+ } else {
+ tl_allocation = DefaultTemporalLayerAllocation(
+ target_bitrate_kbps, max_bitrate_kbps, simulcast_id);
+ }
+ }
+ RTC_DCHECK_GT(tl_allocation.size(), 0);
+ RTC_DCHECK_LE(tl_allocation.size(), num_temporal_streams);
+
+ uint64_t tl_allocation_sum_kbps = 0;
+ for (size_t tl_index = 0; tl_index < tl_allocation.size(); ++tl_index) {
+ uint32_t layer_rate_kbps = tl_allocation[tl_index];
+ if (layer_rate_kbps > 0) {
+ allocated_bitrates_bps->SetBitrate(simulcast_id, tl_index,
+ layer_rate_kbps * 1000);
+ }
+ tl_allocation_sum_kbps += layer_rate_kbps;
+ }
+ RTC_DCHECK_LE(tl_allocation_sum_kbps, expected_allocated_bitrate_kbps);
+ }
+}
+
+std::vector<uint32_t> SimulcastRateAllocator::DefaultTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const {
+ const size_t num_temporal_layers = NumTemporalStreams(simulcast_id);
+ std::vector<uint32_t> bitrates;
+ for (size_t i = 0; i < num_temporal_layers; ++i) {
+ float layer_bitrate =
+ bitrate_kbps *
+ GetTemporalRateAllocation(
+ num_temporal_layers, i,
+ rate_control_settings_.Vp8BaseHeavyTl3RateAllocation());
+ bitrates.push_back(static_cast<uint32_t>(layer_bitrate + 0.5));
+ }
+
+ // Allocation table is of aggregates, transform to individual rates.
+ uint32_t sum = 0;
+ for (size_t i = 0; i < num_temporal_layers; ++i) {
+ uint32_t layer_bitrate = bitrates[i];
+ RTC_DCHECK_LE(sum, bitrates[i]);
+ bitrates[i] -= sum;
+ sum = layer_bitrate;
+
+ if (sum >= static_cast<uint32_t>(bitrate_kbps)) {
+ // Sum adds up; any subsequent layers will be 0.
+ bitrates.resize(i + 1);
+ break;
+ }
+ }
+
+ return bitrates;
+}
+
+std::vector<uint32_t>
+SimulcastRateAllocator::ScreenshareTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const {
+ if (simulcast_id > 0) {
+ return DefaultTemporalLayerAllocation(bitrate_kbps, max_bitrate_kbps,
+ simulcast_id);
+ }
+ std::vector<uint32_t> allocation;
+ allocation.push_back(bitrate_kbps);
+ if (max_bitrate_kbps > bitrate_kbps)
+ allocation.push_back(max_bitrate_kbps - bitrate_kbps);
+ return allocation;
+}
+
+const VideoCodec& webrtc::SimulcastRateAllocator::GetCodec() const {
+ return codec_;
+}
+
+int SimulcastRateAllocator::NumTemporalStreams(size_t simulcast_id) const {
+ return std::max<uint8_t>(
+ 1,
+ codec_.codecType == kVideoCodecVP8 && codec_.numberOfSimulcastStreams == 0
+ ? codec_.VP8().numberOfTemporalLayers
+ : codec_.simulcastStream[simulcast_id].numberOfTemporalLayers);
+}
+
+void SimulcastRateAllocator::SetLegacyConferenceMode(bool enabled) {
+ legacy_conference_mode_ = enabled;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h
new file mode 100644
index 0000000000..6f93dbde74
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
+#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/experiments/stable_target_rate_experiment.h"
+
+namespace webrtc {
+
+class SimulcastRateAllocator : public VideoBitrateAllocator {
+ public:
+ explicit SimulcastRateAllocator(const VideoCodec& codec);
+ ~SimulcastRateAllocator() override;
+
+ SimulcastRateAllocator(const SimulcastRateAllocator&) = delete;
+ SimulcastRateAllocator& operator=(const SimulcastRateAllocator&) = delete;
+
+ VideoBitrateAllocation Allocate(
+ VideoBitrateAllocationParameters parameters) override;
+ const VideoCodec& GetCodec() const;
+
+ static float GetTemporalRateAllocation(int num_layers,
+ int temporal_id,
+ bool base_heavy_tl3_alloc);
+
+ void SetLegacyConferenceMode(bool mode) override;
+
+ private:
+ void DistributeAllocationToSimulcastLayers(
+ DataRate total_bitrate,
+ DataRate stable_bitrate,
+ VideoBitrateAllocation* allocated_bitrates);
+ void DistributeAllocationToTemporalLayers(
+ VideoBitrateAllocation* allocated_bitrates) const;
+ std::vector<uint32_t> DefaultTemporalLayerAllocation(int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const;
+ std::vector<uint32_t> ScreenshareTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const;
+ int NumTemporalStreams(size_t simulcast_id) const;
+
+ const VideoCodec codec_;
+ const StableTargetRateExperiment stable_rate_settings_;
+ const RateControlSettings rate_control_settings_;
+ std::vector<bool> stream_enabled_;
+ bool legacy_conference_mode_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
new file mode 100644
index 0000000000..24d7c58bcd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video_codecs/vp8_frame_buffer_controller.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "rtc_base/checks.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+using ::testing::_;
+
+constexpr uint32_t kFramerateFps = 5;
+constexpr uint32_t kMinBitrateKbps = 50;
+// These correspond to kLegacyScreenshareTl(0|1)BitrateKbps in cc.
+constexpr uint32_t kLegacyScreenshareTargetBitrateKbps = 200;
+constexpr uint32_t kLegacyScreenshareMaxBitrateKbps = 1000;
+// Bitrates for upper simulcast screenshare layer.
+constexpr uint32_t kSimulcastScreenshareMinBitrateKbps = 600;
+constexpr uint32_t kSimulcastScreenshareMaxBitrateKbps = 1250;
+// Default video hysteresis factor: allocatable bitrate for next layer must
+// exceed 20% of min setting in order to be initially turned on.
+const double kDefaultHysteresis = 1.2;
+
+class MockTemporalLayers : public Vp8FrameBufferController {
+ public:
+ MOCK_METHOD(Vp8FrameConfig, NextFrameConfig, (size_t, uint32_t), (override));
+ MOCK_METHOD(void,
+ OnRatesUpdated,
+ (size_t, const std::vector<uint32_t>&, int),
+ (override));
+ MOCK_METHOD(Vp8EncoderConfig, UpdateConfiguration, (size_t), (override));
+ MOCK_METHOD(void,
+ OnEncodeDone,
+ (size_t, uint32_t, size_t, bool, int, CodecSpecificInfo*),
+ (override));
+};
+} // namespace
+
+class SimulcastRateAllocatorTest : public ::testing::TestWithParam<bool> {
+ public:
+ SimulcastRateAllocatorTest() {
+ codec_.codecType = kVideoCodecVP8;
+ codec_.minBitrate = kMinBitrateKbps;
+ codec_.maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.active = true;
+ CreateAllocator();
+ }
+ virtual ~SimulcastRateAllocatorTest() {}
+
+ template <size_t S>
+ void ExpectEqual(uint32_t (&expected)[S],
+ const std::vector<uint32_t>& actual) {
+ EXPECT_EQ(S, actual.size());
+ for (size_t i = 0; i < S; ++i)
+ EXPECT_EQ(expected[i], actual[i]) << "Mismatch at index " << i;
+ }
+
+ template <size_t S>
+ void ExpectEqual(uint32_t (&expected)[S],
+ const VideoBitrateAllocation& actual) {
+ // EXPECT_EQ(S, actual.size());
+ uint32_t sum = 0;
+ for (size_t i = 0; i < S; ++i) {
+ uint32_t layer_bitrate = actual.GetSpatialLayerSum(i);
+ if (layer_bitrate == 0) {
+ EXPECT_FALSE(actual.IsSpatialLayerUsed(i));
+ }
+ EXPECT_EQ(expected[i] * 1000U, layer_bitrate)
+ << "Mismatch at index " << i;
+ sum += layer_bitrate;
+ }
+ EXPECT_EQ(sum, actual.get_sum_bps());
+ }
+
+ void CreateAllocator(bool legacy_conference_mode = false) {
+ allocator_.reset(new SimulcastRateAllocator(codec_));
+ allocator_->SetLegacyConferenceMode(legacy_conference_mode);
+ }
+
+ void SetupCodec3SL3TL(const std::vector<bool>& active_streams) {
+ const size_t num_simulcast_layers = 3;
+ RTC_DCHECK_GE(active_streams.size(), num_simulcast_layers);
+ SetupCodec2SL3TL(active_streams);
+ codec_.numberOfSimulcastStreams = num_simulcast_layers;
+ codec_.simulcastStream[2].numberOfTemporalLayers = 3;
+ codec_.simulcastStream[2].maxBitrate = 4000;
+ codec_.simulcastStream[2].targetBitrate = 3000;
+ codec_.simulcastStream[2].minBitrate = 2000;
+ codec_.simulcastStream[2].active = active_streams[2];
+ }
+
+ void SetupCodec2SL3TL(const std::vector<bool>& active_streams) {
+ const size_t num_simulcast_layers = 2;
+ RTC_DCHECK_GE(active_streams.size(), num_simulcast_layers);
+ SetupCodec1SL3TL(active_streams);
+ codec_.numberOfSimulcastStreams = num_simulcast_layers;
+ codec_.simulcastStream[1].numberOfTemporalLayers = 3;
+ codec_.simulcastStream[1].maxBitrate = 1000;
+ codec_.simulcastStream[1].targetBitrate = 500;
+ codec_.simulcastStream[1].minBitrate = 50;
+ codec_.simulcastStream[1].active = active_streams[1];
+ }
+
+ void SetupCodec1SL3TL(const std::vector<bool>& active_streams) {
+ const size_t num_simulcast_layers = 2;
+ RTC_DCHECK_GE(active_streams.size(), num_simulcast_layers);
+ SetupCodec3TL();
+ codec_.numberOfSimulcastStreams = num_simulcast_layers;
+ codec_.simulcastStream[0].numberOfTemporalLayers = 3;
+ codec_.simulcastStream[0].maxBitrate = 500;
+ codec_.simulcastStream[0].targetBitrate = 100;
+ codec_.simulcastStream[0].minBitrate = 10;
+ codec_.simulcastStream[0].active = active_streams[0];
+ }
+
+ void SetupCodec3TL() {
+ codec_.maxBitrate = 0;
+ codec_.VP8()->numberOfTemporalLayers = 3;
+ }
+
+ VideoBitrateAllocation GetAllocation(uint32_t target_bitrate) {
+ return allocator_->Allocate(VideoBitrateAllocationParameters(
+ DataRate::KilobitsPerSec(target_bitrate), kDefaultFrameRate));
+ }
+
+ VideoBitrateAllocation GetAllocation(DataRate target_rate,
+ DataRate stable_rate) {
+ return allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_rate, stable_rate, kDefaultFrameRate));
+ }
+
+ DataRate MinRate(size_t layer_index) const {
+ return DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index].minBitrate);
+ }
+
+ DataRate TargetRate(size_t layer_index) const {
+ return DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index].targetBitrate);
+ }
+
+ DataRate MaxRate(size_t layer_index) const {
+ return DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index].maxBitrate);
+ }
+
+ protected:
+ static const int kDefaultFrameRate = 30;
+ VideoCodec codec_;
+ std::unique_ptr<SimulcastRateAllocator> allocator_;
+};
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastBelowMin) {
+ uint32_t expected[] = {codec_.minBitrate};
+ codec_.active = true;
+ ExpectEqual(expected, GetAllocation(codec_.minBitrate - 1));
+ ExpectEqual(expected, GetAllocation(1));
+ ExpectEqual(expected, GetAllocation(0));
+}
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastAboveMax) {
+ uint32_t expected[] = {codec_.maxBitrate};
+ codec_.active = true;
+ ExpectEqual(expected, GetAllocation(codec_.maxBitrate + 1));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+}
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastNoMax) {
+ const uint32_t kMax = VideoBitrateAllocation::kMaxBitrateBps / 1000;
+ codec_.active = true;
+ codec_.maxBitrate = 0;
+ CreateAllocator();
+
+ uint32_t expected[] = {kMax};
+ ExpectEqual(expected, GetAllocation(kMax));
+}
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastWithinLimits) {
+ codec_.active = true;
+ for (uint32_t bitrate = codec_.minBitrate; bitrate <= codec_.maxBitrate;
+ ++bitrate) {
+ uint32_t expected[] = {bitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+// Tests that when we aren't using simulcast and the codec is marked inactive no
+// bitrate will be allocated.
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastInactive) {
+ codec_.active = false;
+ uint32_t expected[] = {0};
+ CreateAllocator();
+
+ ExpectEqual(expected, GetAllocation(kMinBitrateKbps - 10));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareTargetBitrateKbps));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareMaxBitrateKbps + 10));
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastBelowMin) {
+ // With simulcast, use the min bitrate from the ss spec instead of the global.
+ codec_.numberOfSimulcastStreams = 1;
+ const uint32_t kMin = codec_.minBitrate - 10;
+ codec_.simulcastStream[0].minBitrate = kMin;
+ codec_.simulcastStream[0].targetBitrate = kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].active = true;
+ CreateAllocator();
+
+ uint32_t expected[] = {kMin};
+ ExpectEqual(expected, GetAllocation(kMin - 1));
+ ExpectEqual(expected, GetAllocation(1));
+ ExpectEqual(expected, GetAllocation(0));
+}
+
+TEST_F(SimulcastRateAllocatorTest, SignalsBwLimited) {
+ // Enough to enable all layers.
+ const int kVeryBigBitrate = 100000;
+
+ // With simulcast, use the min bitrate from the ss spec instead of the global.
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ EXPECT_TRUE(
+ GetAllocation(codec_.simulcastStream[0].minBitrate - 10).is_bw_limited());
+ EXPECT_TRUE(
+ GetAllocation(codec_.simulcastStream[0].targetBitrate).is_bw_limited());
+ EXPECT_TRUE(GetAllocation(codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].minBitrate)
+ .is_bw_limited());
+ EXPECT_FALSE(
+ GetAllocation(
+ codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ static_cast<uint32_t>(
+ codec_.simulcastStream[2].minBitrate * kDefaultHysteresis + 0.5))
+ .is_bw_limited());
+ EXPECT_FALSE(GetAllocation(kVeryBigBitrate).is_bw_limited());
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastAboveMax) {
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ const uint32_t kMax = codec_.simulcastStream[0].maxBitrate + 1000;
+ codec_.simulcastStream[0].maxBitrate = kMax;
+ codec_.simulcastStream[0].active = true;
+ CreateAllocator();
+
+ uint32_t expected[] = {kMax};
+ ExpectEqual(expected, GetAllocation(kMax));
+ ExpectEqual(expected, GetAllocation(kMax + 1));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastWithinLimits) {
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ codec_.simulcastStream[0].targetBitrate = kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[0].active = true;
+ CreateAllocator();
+
+ for (uint32_t bitrate = kMinBitrateKbps;
+ bitrate <= kLegacyScreenshareMaxBitrateKbps; ++bitrate) {
+ uint32_t expected[] = {bitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+TEST_F(SimulcastRateAllocatorTest, Regular3TLTemporalRateAllocation) {
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ const VideoBitrateAllocation alloc = GetAllocation(kMinBitrateKbps);
+ // 40/20/40.
+ EXPECT_EQ(static_cast<uint32_t>(0.4 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.2 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 1) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.4 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 2) / 1000);
+}
+
+TEST_F(SimulcastRateAllocatorTest, BaseHeavy3TLTemporalRateAllocation) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-UseBaseHeavyVP8TL3RateAllocation/Enabled/");
+
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ const VideoBitrateAllocation alloc = GetAllocation(kMinBitrateKbps);
+ // 60/20/20.
+ EXPECT_EQ(static_cast<uint32_t>(0.6 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.2 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 1) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.2 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 2) / 1000);
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastInactive) {
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ codec_.simulcastStream[0].targetBitrate = kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[0].active = false;
+ CreateAllocator();
+
+ uint32_t expected[] = {0};
+ ExpectEqual(expected, GetAllocation(kMinBitrateKbps - 10));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareTargetBitrateKbps));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareMaxBitrateKbps + 10));
+}
+
+TEST_F(SimulcastRateAllocatorTest, OneToThreeStreams) {
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ {
+ // Single stream, min bitrate.
+ const uint32_t bitrate = codec_.simulcastStream[0].minBitrate;
+ uint32_t expected[] = {bitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Single stream at target bitrate.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate;
+ uint32_t expected[] = {bitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ uint32_t kMinInitialRateTwoLayers =
+ codec_.simulcastStream[0].targetBitrate +
+ static_cast<uint32_t>(codec_.simulcastStream[1].minBitrate *
+ kDefaultHysteresis);
+ {
+ // Bitrate above target for first stream, but below min for the next one.
+ const uint32_t bitrate = kMinInitialRateTwoLayers - 1;
+ uint32_t expected[] = {bitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Just enough for two streams.
+ const uint32_t bitrate = kMinInitialRateTwoLayers;
+ uint32_t expected[] = {
+ codec_.simulcastStream[0].targetBitrate,
+ kMinInitialRateTwoLayers - codec_.simulcastStream[0].targetBitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Second stream maxed out, but not enough for third.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].maxBitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ uint32_t kMinInitialRateThreeLayers =
+ codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ static_cast<uint32_t>(codec_.simulcastStream[2].minBitrate *
+ kDefaultHysteresis);
+ {
+ // First two streams maxed out, but not enough for third. Nowhere to put
+ // remaining bits.
+ const uint32_t bitrate = kMinInitialRateThreeLayers - 1;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].maxBitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Just enough for all three streams.
+ const uint32_t bitrate = kMinInitialRateThreeLayers;
+ uint32_t expected[] = {
+ codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].targetBitrate,
+ static_cast<uint32_t>(codec_.simulcastStream[2].minBitrate *
+ kDefaultHysteresis)};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Third maxed out.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].targetBitrate,
+ codec_.simulcastStream[2].maxBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Enough to max out all streams which will allocate the target amount to
+ // the lower streams.
+ const uint32_t bitrate = codec_.simulcastStream[0].maxBitrate +
+ codec_.simulcastStream[1].maxBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].targetBitrate,
+ codec_.simulcastStream[2].maxBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+// If three simulcast streams that are all inactive, none of them should be
+// allocated bitrate.
+TEST_F(SimulcastRateAllocatorTest, ThreeStreamsInactive) {
+ SetupCodec3SL3TL({false, false, false});
+ CreateAllocator();
+
+ // Just enough to allocate the min.
+ const uint32_t min_bitrate = codec_.simulcastStream[0].minBitrate +
+ codec_.simulcastStream[1].minBitrate +
+ codec_.simulcastStream[2].minBitrate;
+ // Enough bitrate to allocate target to all streams.
+ const uint32_t target_bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ codec_.simulcastStream[2].targetBitrate;
+ // Enough bitrate to allocate max to all streams.
+ const uint32_t max_bitrate = codec_.simulcastStream[0].maxBitrate +
+ codec_.simulcastStream[1].maxBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {0, 0, 0};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(min_bitrate));
+ ExpectEqual(expected, GetAllocation(target_bitrate));
+ ExpectEqual(expected, GetAllocation(max_bitrate));
+}
+
+// If there are two simulcast streams, we expect the high active stream to be
+// allocated as if it is a single active stream.
+TEST_F(SimulcastRateAllocatorTest, TwoStreamsLowInactive) {
+ SetupCodec2SL3TL({false, true});
+ CreateAllocator();
+
+ const uint32_t kActiveStreamMinBitrate = codec_.simulcastStream[1].minBitrate;
+ const uint32_t kActiveStreamTargetBitrate =
+ codec_.simulcastStream[1].targetBitrate;
+ const uint32_t kActiveStreamMaxBitrate = codec_.simulcastStream[1].maxBitrate;
+ {
+ // Expect that the stream is always allocated its min bitrate.
+ uint32_t expected[] = {0, kActiveStreamMinBitrate};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate - 10));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate));
+ }
+
+ {
+ // The stream should be allocated its target bitrate.
+ uint32_t expected[] = {0, kActiveStreamTargetBitrate};
+ ExpectEqual(expected, GetAllocation(kActiveStreamTargetBitrate));
+ }
+
+ {
+ // The stream should be allocated its max if the target input is sufficient.
+ uint32_t expected[] = {0, kActiveStreamMaxBitrate};
+ ExpectEqual(expected, GetAllocation(kActiveStreamMaxBitrate));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+ }
+}
+
+// If there are two simulcast streams, we expect the low active stream to be
+// allocated as if it is a single active stream.
+TEST_F(SimulcastRateAllocatorTest, TwoStreamsHighInactive) {
+ SetupCodec2SL3TL({true, false});
+ CreateAllocator();
+
+ const uint32_t kActiveStreamMinBitrate = codec_.simulcastStream[0].minBitrate;
+ const uint32_t kActiveStreamTargetBitrate =
+ codec_.simulcastStream[0].targetBitrate;
+ const uint32_t kActiveStreamMaxBitrate = codec_.simulcastStream[0].maxBitrate;
+ {
+ // Expect that the stream is always allocated its min bitrate.
+ uint32_t expected[] = {kActiveStreamMinBitrate, 0};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate - 10));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate));
+ }
+
+ {
+ // The stream should be allocated its target bitrate.
+ uint32_t expected[] = {kActiveStreamTargetBitrate, 0};
+ ExpectEqual(expected, GetAllocation(kActiveStreamTargetBitrate));
+ }
+
+ {
+ // The stream should be allocated its max if the target input is sufficent.
+ uint32_t expected[] = {kActiveStreamMaxBitrate, 0};
+ ExpectEqual(expected, GetAllocation(kActiveStreamMaxBitrate));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+ }
+}
+
+// If there are three simulcast streams and the middle stream is inactive, the
+// other two streams should be allocated bitrate the same as if they are two
+// active simulcast streams.
+TEST_F(SimulcastRateAllocatorTest, ThreeStreamsMiddleInactive) {
+ SetupCodec3SL3TL({true, false, true});
+ CreateAllocator();
+
+ {
+ const uint32_t kLowStreamMinBitrate = codec_.simulcastStream[0].minBitrate;
+ // The lowest stream should always be allocated its minimum bitrate.
+ uint32_t expected[] = {kLowStreamMinBitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(kLowStreamMinBitrate - 10));
+ ExpectEqual(expected, GetAllocation(kLowStreamMinBitrate));
+ }
+
+ {
+ // The lowest stream gets its target bitrate.
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, 0, 0};
+ ExpectEqual(expected,
+ GetAllocation(codec_.simulcastStream[0].targetBitrate));
+ }
+
+ {
+ // The lowest stream gets its max bitrate, but not enough for the high
+ // stream.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[2].minBitrate - 1;
+ uint32_t expected[] = {codec_.simulcastStream[0].maxBitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Both active streams get allocated target bitrate.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[2].targetBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, 0,
+ codec_.simulcastStream[2].targetBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Lowest stream gets its target bitrate, high stream gets its max bitrate.
+ uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, 0,
+ codec_.simulcastStream[2].maxBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ ExpectEqual(expected, GetAllocation(bitrate + 10));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+ }
+}
+
+TEST_F(SimulcastRateAllocatorTest, NonConferenceModeScreenshare) {
+ codec_.mode = VideoCodecMode::kScreensharing;
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ // Make sure we have enough bitrate for all 3 simulcast layers
+ const uint32_t bitrate = codec_.simulcastStream[0].maxBitrate +
+ codec_.simulcastStream[1].maxBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ const VideoBitrateAllocation alloc = GetAllocation(bitrate);
+
+ EXPECT_EQ(alloc.GetTemporalLayerAllocation(0).size(), 3u);
+ EXPECT_EQ(alloc.GetTemporalLayerAllocation(1).size(), 3u);
+ EXPECT_EQ(alloc.GetTemporalLayerAllocation(2).size(), 3u);
+}
+
+TEST_F(SimulcastRateAllocatorTest, StableRate) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-StableTargetRate/"
+ "enabled:true,"
+ "video_hysteresis_factor:1.1/");
+
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ // Let the volatile rate always be be enough for all streams, in this test we
+ // are only interested in how the stable rate affects enablement.
+ const DataRate volatile_rate =
+ (TargetRate(0) + TargetRate(1) + MinRate(2)) * 1.1;
+
+ {
+ // On the first call to a new SimulcastRateAllocator instance, hysteresis
+ // is disabled, but stable rate still caps layers.
+ uint32_t expected[] = {TargetRate(0).kbps<uint32_t>(),
+ MaxRate(1).kbps<uint32_t>()};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate, TargetRate(0) + MinRate(1)));
+ }
+
+ {
+ // Let stable rate go to a bitrate below what is needed for two streams.
+ uint32_t expected[] = {MaxRate(0).kbps<uint32_t>(), 0};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate, TargetRate(0) + MinRate(1) -
+ DataRate::BitsPerSec(1)));
+ }
+
+ {
+ // Don't enable stream as we need to get up above hysteresis threshold.
+ uint32_t expected[] = {MaxRate(0).kbps<uint32_t>(), 0};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate, TargetRate(0) + MinRate(1)));
+ }
+
+ {
+ // Above threshold with hysteresis, enable second stream.
+ uint32_t expected[] = {TargetRate(0).kbps<uint32_t>(),
+ MaxRate(1).kbps<uint32_t>()};
+ ExpectEqual(expected, GetAllocation(volatile_rate,
+ (TargetRate(0) + MinRate(1)) * 1.1));
+ }
+
+ {
+ // Enough to enable all thee layers.
+ uint32_t expected[] = {
+ TargetRate(0).kbps<uint32_t>(), TargetRate(1).kbps<uint32_t>(),
+ (volatile_rate - TargetRate(0) - TargetRate(1)).kbps<uint32_t>()};
+ ExpectEqual(expected, GetAllocation(volatile_rate, volatile_rate));
+ }
+
+ {
+ // Drop hysteresis, all three still on.
+ uint32_t expected[] = {
+ TargetRate(0).kbps<uint32_t>(), TargetRate(1).kbps<uint32_t>(),
+ (volatile_rate - TargetRate(0) - TargetRate(1)).kbps<uint32_t>()};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate,
+ TargetRate(0) + TargetRate(1) + MinRate(2)));
+ }
+}
+
+class ScreenshareRateAllocationTest : public SimulcastRateAllocatorTest {
+ public:
+ void SetupConferenceScreenshare(bool use_simulcast, bool active = true) {
+ codec_.mode = VideoCodecMode::kScreensharing;
+ codec_.minBitrate = kMinBitrateKbps;
+ codec_.maxBitrate =
+ kLegacyScreenshareMaxBitrateKbps + kSimulcastScreenshareMaxBitrateKbps;
+ if (use_simulcast) {
+ codec_.numberOfSimulcastStreams = 2;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ codec_.simulcastStream[0].targetBitrate =
+ kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_.simulcastStream[0].active = active;
+
+ codec_.simulcastStream[1].minBitrate =
+ kSimulcastScreenshareMinBitrateKbps;
+ codec_.simulcastStream[1].targetBitrate =
+ kSimulcastScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[1].maxBitrate =
+ kSimulcastScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[1].numberOfTemporalLayers = 2;
+ codec_.simulcastStream[1].active = active;
+ } else {
+ codec_.numberOfSimulcastStreams = 0;
+ codec_.VP8()->numberOfTemporalLayers = 2;
+ codec_.active = active;
+ }
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(ScreenshareTest,
+ ScreenshareRateAllocationTest,
+ ::testing::Bool());
+
+TEST_P(ScreenshareRateAllocationTest, ConferenceBitrateBelowTl0) {
+ SetupConferenceScreenshare(GetParam());
+ CreateAllocator(true);
+
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ kLegacyScreenshareTargetBitrateKbps * 1000, kFramerateFps));
+
+ // All allocation should go in TL0.
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps, allocation.get_sum_kbps());
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(allocation.is_bw_limited(), GetParam());
+}
+
+TEST_P(ScreenshareRateAllocationTest, ConferenceBitrateAboveTl0) {
+ SetupConferenceScreenshare(GetParam());
+ CreateAllocator(true);
+
+ uint32_t target_bitrate_kbps =
+ (kLegacyScreenshareTargetBitrateKbps + kLegacyScreenshareMaxBitrateKbps) /
+ 2;
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_bitrate_kbps * 1000, kFramerateFps));
+
+ // Fill TL0, then put the rest in TL1.
+ EXPECT_EQ(target_bitrate_kbps, allocation.get_sum_kbps());
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(target_bitrate_kbps - kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 1) / 1000);
+ EXPECT_EQ(allocation.is_bw_limited(), GetParam());
+}
+
+TEST_F(ScreenshareRateAllocationTest, ConferenceBitrateAboveTl1) {
+ // This test is only for the non-simulcast case.
+ SetupConferenceScreenshare(false);
+ CreateAllocator(true);
+
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ kLegacyScreenshareMaxBitrateKbps * 2000, kFramerateFps));
+
+ // Fill both TL0 and TL1, but no more.
+ EXPECT_EQ(kLegacyScreenshareMaxBitrateKbps, allocation.get_sum_kbps());
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(
+ kLegacyScreenshareMaxBitrateKbps - kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 1) / 1000);
+ EXPECT_FALSE(allocation.is_bw_limited());
+}
+
+// This tests when the screenshare is inactive it should be allocated 0 bitrate
+// for all layers.
+TEST_P(ScreenshareRateAllocationTest, InactiveScreenshare) {
+ SetupConferenceScreenshare(GetParam(), false);
+ CreateAllocator();
+
+ // Enough bitrate for TL0 and TL1.
+ uint32_t target_bitrate_kbps =
+ (kLegacyScreenshareTargetBitrateKbps + kLegacyScreenshareMaxBitrateKbps) /
+ 2;
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_bitrate_kbps * 1000, kFramerateFps));
+
+ EXPECT_EQ(0U, allocation.get_sum_kbps());
+}
+
+TEST_F(ScreenshareRateAllocationTest, Hysteresis) {
+ // This test is only for the simulcast case.
+ SetupConferenceScreenshare(true);
+ CreateAllocator();
+
+ // The bitrate at which we would normally enable the upper simulcast stream.
+ const uint32_t default_enable_rate_bps =
+ codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].minBitrate;
+ const uint32_t enable_rate_with_hysteresis_bps =
+ (default_enable_rate_bps * 135) / 100;
+
+ {
+ // On the first call to a new SimulcastRateAllocator instance, hysteresis
+ // is disabled.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].minBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go down to a bitrate below what is needed for two streams.
+ const uint32_t bitrate = default_enable_rate_bps - 1;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Don't enable stream as we need to get up above hysteresis threshold.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Above threshold, enable second stream.
+ const uint32_t bitrate = enable_rate_with_hysteresis_bps;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ enable_rate_with_hysteresis_bps -
+ codec_.simulcastStream[0].targetBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go down again, still keep the second stream alive.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].minBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go down below default enable, second stream is shut down again.
+ const uint32_t bitrate = default_enable_rate_bps - 1;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go up, hysteresis is blocking us again.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
new file mode 100644
index 0000000000..84cd2e1589
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -0,0 +1,918 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Return;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const int kDefaultWidth = 1280;
+const int kDefaultHeight = 720;
+const int kNumberOfSimulcastStreams = 3;
+const int kColorY = 66;
+const int kColorU = 22;
+const int kColorV = 33;
+const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
+const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
+const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
+const float kMaxFramerates[kNumberOfSimulcastStreams] = {30, 30, 30};
+const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
+const int kNoTemporalLayerProfile[3] = {0, 0, 0};
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities, 1, 1200);
+
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
+ expected_values[0] = value0;
+ expected_values[1] = value1;
+ expected_values[2] = value2;
+}
+
+enum PlaneType {
+ kYPlane = 0,
+ kUPlane = 1,
+ kVPlane = 2,
+ kNumOfPlanes = 3,
+};
+
+} // namespace
+
+class SimulcastTestFixtureImpl::TestEncodedImageCallback
+ : public EncodedImageCallback {
+ public:
+ TestEncodedImageCallback() {
+ memset(temporal_layer_, -1, sizeof(temporal_layer_));
+ memset(layer_sync_, false, sizeof(layer_sync_));
+ }
+
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
+ bool is_h264 = (codec_specific_info->codecType == kVideoCodecH264);
+ // Only store the base layer.
+ if (encoded_image.SpatialIndex().value_or(0) == 0) {
+ if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
+ encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create(
+ encoded_image.data(), encoded_image.size()));
+ encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
+ } else {
+ encoded_frame_.SetEncodedData(EncodedImageBuffer::Create(
+ encoded_image.data(), encoded_image.size()));
+ }
+ }
+ if (is_vp8) {
+ layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.VP8.layerSync;
+ temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.VP8.temporalIdx;
+ } else if (is_h264) {
+ layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.H264.base_layer_sync;
+ temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.H264.temporal_idx;
+ }
+ return Result(Result::OK, encoded_image.Timestamp());
+ }
+ // This method only makes sense for VP8.
+ void GetLastEncodedFrameInfo(int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
+ *temporal_layer = temporal_layer_[stream];
+ *layer_sync = layer_sync_[stream];
+ }
+ void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
+ *encoded_key_frame = encoded_key_frame_;
+ }
+ void GetLastEncodedFrame(EncodedImage* encoded_frame) {
+ *encoded_frame = encoded_frame_;
+ }
+
+ private:
+ EncodedImage encoded_key_frame_;
+ EncodedImage encoded_frame_;
+ int temporal_layer_[kNumberOfSimulcastStreams];
+ bool layer_sync_[kNumberOfSimulcastStreams];
+};
+
+class SimulcastTestFixtureImpl::TestDecodedImageCallback
+ : public DecodedImageCallback {
+ public:
+ TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
+ rtc::scoped_refptr<I420BufferInterface> i420_buffer =
+ decoded_image.video_frame_buffer()->ToI420();
+ for (int i = 0; i < decoded_image.width(); ++i) {
+ EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
+ }
+
+ // TODO(mikhal): Verify the difference between U,V and the original.
+ for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
+ EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
+ EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
+ }
+ decoded_frames_++;
+ return 0;
+ }
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ void Decoded(VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ Decoded(decoded_image);
+ }
+ int DecodedFrames() { return decoded_frames_; }
+
+ private:
+ int decoded_frames_;
+};
+
+namespace {
+
+void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
+ for (int i = 0; i < height; i++, data += stride) {
+ // Setting allocated area to zero - setting only image size to
+ // requested values - will make it easier to distinguish between image
+ // size and frame size (accounting for stride).
+ memset(data, value, width);
+ memset(data + width, 0, stride - width);
+ }
+}
+
+// Fills in an I420Buffer from `plane_colors`.
+void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
+ int plane_colors[kNumOfPlanes]) {
+ SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
+ buffer->height(), buffer->StrideY());
+
+ SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
+ buffer->ChromaHeight(), buffer->StrideU());
+
+ SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
+ buffer->ChromaHeight(), buffer->StrideV());
+}
+
+void ConfigureStream(int width,
+ int height,
+ int max_bitrate,
+ int min_bitrate,
+ int target_bitrate,
+ float max_framerate,
+ SimulcastStream* stream,
+ int num_temporal_layers) {
+ RTC_DCHECK(stream);
+ stream->width = width;
+ stream->height = height;
+ stream->maxBitrate = max_bitrate;
+ stream->minBitrate = min_bitrate;
+ stream->targetBitrate = target_bitrate;
+ stream->maxFramerate = max_framerate;
+ if (num_temporal_layers >= 0) {
+ stream->numberOfTemporalLayers = num_temporal_layers;
+ }
+ stream->qpMax = 45;
+ stream->active = true;
+}
+
+} // namespace
+
+void SimulcastTestFixtureImpl::DefaultSettings(
+ VideoCodec* settings,
+ const int* temporal_layer_profile,
+ VideoCodecType codec_type,
+ bool reverse_layer_order) {
+ RTC_CHECK(settings);
+ *settings = {};
+ settings->codecType = codec_type;
+ settings->startBitrate = 300;
+ settings->minBitrate = 30;
+ settings->maxBitrate = 0;
+ settings->maxFramerate = 30;
+ settings->width = kDefaultWidth;
+ settings->height = kDefaultHeight;
+ settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
+ settings->active = true;
+ ASSERT_EQ(3, kNumberOfSimulcastStreams);
+ int layer_order[3] = {0, 1, 2};
+ if (reverse_layer_order) {
+ layer_order[0] = 2;
+ layer_order[2] = 0;
+ }
+ settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
+ kDefaultOutlierFrameSizePercent};
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0], kMaxFramerates[0],
+ &settings->simulcastStream[layer_order[0]],
+ temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1], kMaxFramerates[1],
+ &settings->simulcastStream[layer_order[1]],
+ temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2], kMaxFramerates[2],
+ &settings->simulcastStream[layer_order[2]],
+ temporal_layer_profile[2]);
+ settings->SetFrameDropEnabled(true);
+ if (codec_type == kVideoCodecVP8) {
+ settings->VP8()->denoisingOn = true;
+ settings->VP8()->automaticResizeOn = false;
+ settings->VP8()->keyFrameInterval = 3000;
+ } else {
+ settings->H264()->keyFrameInterval = 3000;
+ }
+}
+
+SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
+ std::unique_ptr<VideoEncoderFactory> encoder_factory,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ SdpVideoFormat video_format)
+ : codec_type_(PayloadStringToCodecType(video_format.name)) {
+ encoder_ = encoder_factory->CreateVideoEncoder(video_format);
+ decoder_ = decoder_factory->CreateVideoDecoder(video_format);
+ SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264)
+ ? kDefaultTemporalLayerProfile
+ : kNoTemporalLayerProfile);
+}
+
+SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
+ encoder_->Release();
+ decoder_->Release();
+}
+
+void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
+ DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+ VideoDecoder::Settings decoder_settings;
+ decoder_settings.set_max_render_resolution({kDefaultWidth, kDefaultHeight});
+ decoder_settings.set_codec_type(codec_type_);
+ EXPECT_TRUE(decoder_->Configure(decoder_settings));
+ input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
+ input_buffer_->InitializeData();
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+}
+
+void SimulcastTestFixtureImpl::SetUpRateAllocator() {
+ rate_allocator_.reset(new SimulcastRateAllocator(settings_));
+}
+
+void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(bitrate_kbps * 1000, fps)),
+ static_cast<double>(fps)));
+}
+
+void SimulcastTestFixtureImpl::RunActiveStreamsTest(
+ const std::vector<bool> active_streams) {
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ UpdateActiveStreams(active_streams);
+ // Set sufficient bitrate for all streams so we can test active without
+ // bitrate being an issue.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
+
+ ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::UpdateActiveStreams(
+ const std::vector<bool> active_streams) {
+ ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
+ for (size_t i = 0; i < active_streams.size(); ++i) {
+ settings_.simulcastStream[i].active = active_streams[i];
+ }
+ // Re initialize the allocator and encoder with the new settings.
+ // TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
+ // reconfiguration of the allocator and encoder. When the video bitrate
+ // allocator has support for updating active streams without a
+ // reinitialization, we can just call that here instead.
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+}
+
+void SimulcastTestFixtureImpl::ExpectStreams(
+ VideoFrameType frame_type,
+ const std::vector<bool> expected_streams_active) {
+ ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
+ kNumberOfSimulcastStreams);
+ if (expected_streams_active[0]) {
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
+ _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ }
+ if (expected_streams_active[1]) {
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
+ _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ }
+ if (expected_streams_active[2]) {
+ EXPECT_CALL(encoder_callback_,
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ }
+}
+
+void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
+ int expected_video_streams) {
+ ASSERT_GE(expected_video_streams, 0);
+ ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
+ std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
+ for (int i = 0; i < expected_video_streams; ++i) {
+ expected_streams_active[i] = true;
+ }
+ ExpectStreams(frame_type, expected_streams_active);
+}
+
+void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ TestEncodedImageCallback* encoder_callback,
+ const int* expected_temporal_idx,
+ const bool* expected_layer_sync,
+ int num_spatial_layers) {
+ int temporal_layer = -1;
+ bool layer_sync = false;
+ for (int i = 0; i < num_spatial_layers; i++) {
+ encoder_callback->GetLastEncodedFrameInfo(&temporal_layer, &layer_sync, i);
+ EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
+ EXPECT_EQ(expected_layer_sync[i], layer_sync);
+ }
+}
+
+// We currently expect all active streams to generate a key frame even though
+// a key frame was only requested for some of them.
+void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ frame_types[0] = VideoFrameType::kVideoFrameKey;
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[1] = VideoFrameType::kVideoFrameKey;
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[2] = VideoFrameType::kVideoFrameKey;
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
+ // We should always encode the base layer.
+ SetRates(kMinBitrates[0] - 1, 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
+ // We have just enough to get only the first stream and padding for two.
+ SetRates(kMinBitrates[0], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
+ // We are just below limit of sending second stream, so we should get
+ // the first stream maxed out (at `maxBitrate`), and padding for two.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingOneStream() {
+ // We have just enough to send two streams, so padding for one stream.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
+ // We are just below limit of sending third stream, so we should get
+ // first stream's rate maxed out at `targetBitrate`, second at `maxBitrate`.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestSendAllStreams() {
+ // We have just enough to send all streams.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestDisablingStreams() {
+ // We should get three media streams.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should only get two streams and padding for one.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should only get the first stream and padding for two.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We don't have enough bitrate for the thumbnail stream, but we should get
+ // it anyway with current configuration.
+ SetRates(kTargetBitrates[0] - 1, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should only get two streams and padding for one.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
+ // We get a key frame because a new stream is being enabled.
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should get all three streams.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
+ // We get a key frame because a new stream is being enabled.
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestActiveStreams() {
+ // All streams on.
+ RunActiveStreamsTest({true, true, true});
+ // All streams off.
+ RunActiveStreamsTest({false, false, false});
+ // Low stream off.
+ RunActiveStreamsTest({false, true, true});
+ // Middle stream off.
+ RunActiveStreamsTest({true, false, true});
+ // High stream off.
+ RunActiveStreamsTest({true, true, false});
+ // Only low stream turned on.
+ RunActiveStreamsTest({true, false, false});
+ // Only middle stream turned on.
+ RunActiveStreamsTest({false, true, false});
+ // Only high stream turned on.
+ RunActiveStreamsTest({false, false, true});
+}
+
+void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
+ const int* temporal_layer_profile = nullptr;
+ // Disable all streams except the last and set the bitrate of the last to
+ // 100 kbps. This verifies the way GTP switches to screenshare mode.
+ if (codec_type_ == kVideoCodecVP8) {
+ settings_.VP8()->numberOfTemporalLayers = 1;
+ temporal_layer_profile = kDefaultTemporalLayerProfile;
+ } else {
+ settings_.H264()->numberOfTemporalLayers = 1;
+ temporal_layer_profile = kNoTemporalLayerProfile;
+ }
+ settings_.maxBitrate = 100;
+ settings_.startBitrate = 100;
+ settings_.width = width;
+ settings_.height = height;
+ for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
+ settings_.simulcastStream[i].maxBitrate = 0;
+ settings_.simulcastStream[i].width = settings_.width;
+ settings_.simulcastStream[i].height = settings_.height;
+ settings_.simulcastStream[i].numberOfTemporalLayers = 1;
+ }
+ // Setting input image to new resolution.
+ input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
+ input_buffer_->InitializeData();
+
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+
+ // The for loop above did not set the bitrate of the highest layer.
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
+ 0;
+ // The highest layer has to correspond to the non-simulcast resolution.
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+
+ // Encode one frame and verify.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
+ VideoFrameType::kVideoFrameKey),
+ Field(&EncodedImage::_encodedWidth, width),
+ Field(&EncodedImage::_encodedHeight, height)),
+ _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // Switch back.
+ DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
+ // Start at the lowest bitrate for enabling base stream.
+ settings_.startBitrate = kMinBitrates[0];
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+ SetRates(settings_.startBitrate, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ // Resize `input_frame_` to the new resolution.
+ input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
+ input_buffer_->InitializeData();
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
+ SwitchingToOneStream(1024, 768);
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
+ SwitchingToOneStream(1023, 769);
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
+ SwitchingToOneStream(4, 4);
+}
+
+// Test the layer pattern and sync flag for various spatial-temporal patterns.
+// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
+// temporal_layer id and layer_sync is expected for all streams.
+void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
+ bool is_h264 = codec_type_ == kVideoCodecH264;
+ TestEncodedImageCallback encoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+
+ int expected_temporal_idx[3] = {-1, -1, -1};
+ bool expected_layer_sync[3] = {false, false, false};
+
+ // First frame: #0.
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
+ SetExpectedValues3<bool>(!is_h264, !is_h264, !is_h264, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #1.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #2.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #3.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #4.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #5.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(is_h264, is_h264, is_h264, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+}
+
+// Test the layer pattern and sync flag for various spatial-temporal patterns.
+// 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
+// 1 temporal layer for highest resolution.
+// For this profile, we expect the temporal index pattern to be:
+// 1st stream: 0, 2, 1, 2, ....
+// 2nd stream: 0, 1, 0, 1, ...
+// 3rd stream: -1, -1, -1, -1, ....
+// Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
+// should always have temporal layer idx set to kNoTemporalIdx = -1.
+// Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
+// TODO(marpan): Although this seems safe for now, we should fix this.
+void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
+ EXPECT_EQ(codec_type_, kVideoCodecVP8);
+ int temporal_layer_profile[3] = {3, 2, 1};
+ SetUpCodec(temporal_layer_profile);
+ TestEncodedImageCallback encoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+
+ int expected_temporal_idx[3] = {-1, -1, -1};
+ bool expected_layer_sync[3] = {false, false, false};
+
+ // First frame: #0.
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #1.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #2.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #3.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #4.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #5.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+}
+
+void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
+ TestEncodedImageCallback encoder_callback;
+ TestDecodedImageCallback decoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
+
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ // Setting two (possibly) problematic use cases for stride:
+ // 1. stride > width 2. stride_y != stride_uv/2
+ int stride_y = kDefaultWidth + 20;
+ int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
+ input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+
+ // Set color.
+ int plane_offset[kNumOfPlanes];
+ plane_offset[kYPlane] = kColorY;
+ plane_offset[kUPlane] = kColorU;
+ plane_offset[kVPlane] = kColorV;
+ CreateImage(input_buffer_, plane_offset);
+
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+
+ // Change color.
+ plane_offset[kYPlane] += 1;
+ plane_offset[kUPlane] += 1;
+ plane_offset[kVPlane] += 1;
+ CreateImage(input_buffer_, plane_offset);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+
+ EncodedImage encoded_frame;
+ // Only encoding one frame - so will be a key frame.
+ encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0));
+ encoder_callback.GetLastEncodedFrame(&encoded_frame);
+ decoder_->Decode(encoded_frame, false, 0);
+ EXPECT_EQ(2, decoder_callback.DecodedFrames());
+}
+
+void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
+ MockEncodedImageCallback encoder_callback;
+ MockDecodedImageCallback decoder_callback;
+
+ EncodedImage encoded_frame[3];
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
+
+ EXPECT_CALL(encoder_callback, OnEncodedImage(_, _))
+ .Times(3)
+ .WillRepeatedly(
+ ::testing::Invoke([&](const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
+
+ size_t index = encoded_image.SpatialIndex().value_or(0);
+ encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create(
+ encoded_image.data(), encoded_image.size()));
+ encoded_frame[index]._frameType = encoded_image._frameType;
+ return EncodedImageCallback::Result(
+ EncodedImageCallback::Result::OK, 0);
+ }));
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+
+ EXPECT_CALL(decoder_callback, Decoded(_, _, _))
+ .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
+ EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
+ }));
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0));
+
+ EXPECT_CALL(decoder_callback, Decoded(_, _, _))
+ .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
+ EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
+ }));
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0));
+
+ EXPECT_CALL(decoder_callback, Decoded(_, _, _))
+ .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ EXPECT_EQ(decodedImage.width(), kDefaultWidth);
+ EXPECT_EQ(decodedImage.height(), kDefaultHeight);
+ }));
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0));
+}
+
+void SimulcastTestFixtureImpl::
+ TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() {
+ VideoEncoder::EncoderInfo encoder_info = encoder_->GetEncoderInfo();
+ EXPECT_EQ(encoder_info.fps_allocation[0].size(),
+ static_cast<size_t>(kDefaultTemporalLayerProfile[0]));
+ EXPECT_EQ(encoder_info.fps_allocation[1].size(),
+ static_cast<size_t>(kDefaultTemporalLayerProfile[1]));
+ EXPECT_EQ(encoder_info.fps_allocation[2].size(),
+ static_cast<size_t>(kDefaultTemporalLayerProfile[2]));
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h
new file mode 100644
index 0000000000..cdfdc609d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
+#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+
+namespace webrtc {
+namespace test {
+
+class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
+ public:
+ SimulcastTestFixtureImpl(std::unique_ptr<VideoEncoderFactory> encoder_factory,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ SdpVideoFormat video_format);
+ ~SimulcastTestFixtureImpl() final;
+
+ // Implements SimulcastTestFixture.
+ void TestKeyFrameRequestsOnAllStreams() override;
+ void TestPaddingAllStreams() override;
+ void TestPaddingTwoStreams() override;
+ void TestPaddingTwoStreamsOneMaxedOut() override;
+ void TestPaddingOneStream() override;
+ void TestPaddingOneStreamTwoMaxedOut() override;
+ void TestSendAllStreams() override;
+ void TestDisablingStreams() override;
+ void TestActiveStreams() override;
+ void TestSwitchingToOneStream() override;
+ void TestSwitchingToOneOddStream() override;
+ void TestSwitchingToOneSmallStream() override;
+ void TestSpatioTemporalLayers333PatternEncoder() override;
+ void TestSpatioTemporalLayers321PatternEncoder() override;
+ void TestStrideEncodeDecode() override;
+ void TestDecodeWidthHeightSet() override;
+ void TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() override;
+
+ static void DefaultSettings(VideoCodec* settings,
+ const int* temporal_layer_profile,
+ VideoCodecType codec_type,
+ bool reverse_layer_order = false);
+
+ private:
+ class TestEncodedImageCallback;
+ class TestDecodedImageCallback;
+
+ void SetUpCodec(const int* temporal_layer_profile);
+ void SetUpRateAllocator();
+ void SetRates(uint32_t bitrate_kbps, uint32_t fps);
+ void RunActiveStreamsTest(std::vector<bool> active_streams);
+ void UpdateActiveStreams(std::vector<bool> active_streams);
+ void ExpectStreams(VideoFrameType frame_type,
+ std::vector<bool> expected_streams_active);
+ void ExpectStreams(VideoFrameType frame_type, int expected_video_streams);
+ void VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ TestEncodedImageCallback* encoder_callback,
+ const int* expected_temporal_idx,
+ const bool* expected_layer_sync,
+ int num_spatial_layers);
+ void SwitchingToOneStream(int width, int height);
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ MockEncodedImageCallback encoder_callback_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ MockDecodedImageCallback decoder_callback_;
+ VideoCodec settings_;
+ rtc::scoped_refptr<I420Buffer> input_buffer_;
+ std::unique_ptr<VideoFrame> input_frame_;
+ std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
+ VideoCodecType codec_type_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc
new file mode 100644
index 0000000000..a407483edd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_utility.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+uint32_t SimulcastUtility::SumStreamMaxBitrate(int streams,
+ const VideoCodec& codec) {
+ uint32_t bitrate_sum = 0;
+ for (int i = 0; i < streams; ++i) {
+ bitrate_sum += codec.simulcastStream[i].maxBitrate;
+ }
+ return bitrate_sum;
+}
+
+int SimulcastUtility::NumberOfSimulcastStreams(const VideoCodec& codec) {
+ int streams =
+ codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
+ uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
+ if (simulcast_max_bitrate == 0) {
+ streams = 1;
+ }
+ return streams;
+}
+
+bool SimulcastUtility::ValidSimulcastParameters(const VideoCodec& codec,
+ int num_streams) {
+ // Check resolution.
+ if (codec.width != codec.simulcastStream[num_streams - 1].width ||
+ codec.height != codec.simulcastStream[num_streams - 1].height) {
+ return false;
+ }
+ for (int i = 0; i < num_streams; ++i) {
+ if (codec.width * codec.simulcastStream[i].height !=
+ codec.height * codec.simulcastStream[i].width) {
+ return false;
+ }
+ }
+ if (codec.codecType == webrtc::kVideoCodecVP8) {
+ for (int i = 1; i < num_streams; ++i) {
+ if (codec.simulcastStream[i].width < codec.simulcastStream[i - 1].width) {
+ return false;
+ }
+ }
+ } else {
+ // TODO(mirtad): H264 encoder implementation still assumes the default
+ // resolution downscaling is used.
+ for (int i = 1; i < num_streams; ++i) {
+ if (codec.simulcastStream[i].width !=
+ codec.simulcastStream[i - 1].width * 2) {
+ return false;
+ }
+ }
+ }
+
+ // Check frame-rate.
+ for (int i = 1; i < num_streams; ++i) {
+ if (fabs(codec.simulcastStream[i].maxFramerate -
+ codec.simulcastStream[i - 1].maxFramerate) > 1e-9) {
+ return false;
+ }
+ }
+
+ // Check temporal layers.
+ for (int i = 0; i < num_streams - 1; ++i) {
+ if (codec.simulcastStream[i].numberOfTemporalLayers !=
+ codec.simulcastStream[i + 1].numberOfTemporalLayers)
+ return false;
+ }
+ return true;
+}
+
+bool SimulcastUtility::IsConferenceModeScreenshare(const VideoCodec& codec) {
+ return codec.mode == VideoCodecMode::kScreensharing &&
+ codec.legacy_conference_mode;
+}
+
+int SimulcastUtility::NumberOfTemporalLayers(const VideoCodec& codec,
+ int spatial_id) {
+ uint8_t num_temporal_layers =
+ std::max<uint8_t>(1, codec.VP8().numberOfTemporalLayers);
+ if (codec.numberOfSimulcastStreams > 0) {
+ RTC_DCHECK_LT(spatial_id, codec.numberOfSimulcastStreams);
+ num_temporal_layers =
+ std::max(num_temporal_layers,
+ codec.simulcastStream[spatial_id].numberOfTemporalLayers);
+ }
+ return num_temporal_layers;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h
new file mode 100644
index 0000000000..e25a594360
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
+#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
+
+#include <stdint.h>
+
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+class SimulcastUtility {
+ public:
+ static uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec);
+ static int NumberOfSimulcastStreams(const VideoCodec& codec);
+ static bool ValidSimulcastParameters(const VideoCodec& codec,
+ int num_streams);
+ static int NumberOfTemporalLayers(const VideoCodec& codec, int spatial_id);
+ // TODO(sprang): Remove this hack when ScreenshareLayers is gone.
+ static bool IsConferenceModeScreenshare(const VideoCodec& codec);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc
new file mode 100644
index 0000000000..80026f9a0f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/utility/vp8_header_parser.h"
+
+#include "rtc_base/logging.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+namespace vp8 {
+namespace {
+const size_t kCommonPayloadHeaderLength = 3;
+const size_t kKeyPayloadHeaderLength = 10;
+const int kMbFeatureTreeProbs = 3;
+const int kNumMbSegments = 4;
+const int kNumRefLfDeltas = 4;
+const int kNumModeLfDeltas = 4;
+
+} // namespace
+
+// Bitstream parser according to
+// https://tools.ietf.org/html/rfc6386#section-7.3
+void VP8InitBitReader(VP8BitReader* const br,
+ const uint8_t* start,
+ const uint8_t* end) {
+ br->range_ = 255;
+ br->buf_ = start;
+ br->buf_end_ = end;
+ br->value_ = 0;
+ br->bits_ = 0;
+
+ // Read 2 bytes.
+ int i = 0;
+ while (++i <= 2) {
+ if (br->buf_ != br->buf_end_) {
+ br->value_ = br->value_ << 8 | *br->buf_++;
+ } else {
+ br->value_ = br->value_ << 8;
+ }
+ }
+}
+
+// Bit decoder according to https://tools.ietf.org/html/rfc6386#section-7.3
+// Reads one bit from the bitstream, given that it has probability prob/256 to
+// be 1.
+int Vp8BitReaderGetBool(VP8BitReader* br, int prob) {
+ uint32_t split = 1 + (((br->range_ - 1) * prob) >> 8);
+ uint32_t split_hi = split << 8;
+ int retval = 0;
+ if (br->value_ >= split_hi) {
+ retval = 1;
+ br->range_ -= split;
+ br->value_ -= split_hi;
+ } else {
+ retval = 0;
+ br->range_ = split;
+ }
+
+ while (br->range_ < 128) {
+ br->value_ <<= 1;
+ br->range_ <<= 1;
+ if (++br->bits_ == 8) {
+ br->bits_ = 0;
+ if (br->buf_ != br->buf_end_) {
+ br->value_ |= *br->buf_++;
+ }
+ }
+ }
+ return retval;
+}
+
+uint32_t VP8GetValue(VP8BitReader* br, int num_bits) {
+ uint32_t v = 0;
+ while (num_bits--) {
+ // According to https://tools.ietf.org/html/rfc6386
+ // Probability 128/256 is used to encode header fields.
+ v = (v << 1) | Vp8BitReaderGetBool(br, 128);
+ }
+ return v;
+}
+
+// Not a read_signed_literal() from RFC 6386!
+// This one is used to read e.g. quantizer_update, which is written as:
+// L(num_bits), sign-bit.
+int32_t VP8GetSignedValue(VP8BitReader* br, int num_bits) {
+ int v = VP8GetValue(br, num_bits);
+ int sign = VP8GetValue(br, 1);
+ return sign ? -v : v;
+}
+
+static void ParseSegmentHeader(VP8BitReader* br) {
+ int use_segment = VP8GetValue(br, 1);
+ if (use_segment) {
+ int update_map = VP8GetValue(br, 1);
+ if (VP8GetValue(br, 1)) { // update_segment_feature_data.
+ VP8GetValue(br, 1); // segment_feature_mode.
+ int s;
+ for (s = 0; s < kNumMbSegments; ++s) {
+ bool quantizer_update = VP8GetValue(br, 1);
+ if (quantizer_update) {
+ VP8GetSignedValue(br, 7);
+ }
+ }
+ for (s = 0; s < kNumMbSegments; ++s) {
+ bool loop_filter_update = VP8GetValue(br, 1);
+ if (loop_filter_update) {
+ VP8GetSignedValue(br, 6);
+ }
+ }
+ }
+ if (update_map) {
+ int s;
+ for (s = 0; s < kMbFeatureTreeProbs; ++s) {
+ bool segment_prob_update = VP8GetValue(br, 1);
+ if (segment_prob_update) {
+ VP8GetValue(br, 8);
+ }
+ }
+ }
+ }
+}
+
+static void ParseFilterHeader(VP8BitReader* br) {
+ VP8GetValue(br, 1); // filter_type.
+ VP8GetValue(br, 6); // loop_filter_level.
+ VP8GetValue(br, 3); // sharpness_level.
+
+ // mb_lf_adjustments.
+ int loop_filter_adj_enable = VP8GetValue(br, 1);
+ if (loop_filter_adj_enable) {
+ int mode_ref_lf_delta_update = VP8GetValue(br, 1);
+ if (mode_ref_lf_delta_update) {
+ int i;
+ for (i = 0; i < kNumRefLfDeltas; ++i) {
+ int ref_frame_delta_update_flag = VP8GetValue(br, 1);
+ if (ref_frame_delta_update_flag) {
+ VP8GetSignedValue(br, 6); // delta_magnitude.
+ }
+ }
+ for (i = 0; i < kNumModeLfDeltas; ++i) {
+ int mb_mode_delta_update_flag = VP8GetValue(br, 1);
+ if (mb_mode_delta_update_flag) {
+ VP8GetSignedValue(br, 6); // delta_magnitude.
+ }
+ }
+ }
+ }
+}
+
+bool GetQp(const uint8_t* buf, size_t length, int* qp) {
+ if (length < kCommonPayloadHeaderLength) {
+ RTC_LOG(LS_WARNING) << "Failed to get QP, invalid length.";
+ return false;
+ }
+ VP8BitReader br;
+ const uint32_t bits = buf[0] | (buf[1] << 8) | (buf[2] << 16);
+ int key_frame = !(bits & 1);
+ // Size of first partition in bytes.
+ uint32_t partition_length = (bits >> 5);
+ size_t header_length = kCommonPayloadHeaderLength;
+ if (key_frame) {
+ header_length = kKeyPayloadHeaderLength;
+ }
+ if (header_length + partition_length > length) {
+ RTC_LOG(LS_WARNING) << "Failed to get QP, invalid length: " << length;
+ return false;
+ }
+ buf += header_length;
+
+ VP8InitBitReader(&br, buf, buf + partition_length);
+ if (key_frame) {
+ // Color space and pixel type.
+ VP8GetValue(&br, 1);
+ VP8GetValue(&br, 1);
+ }
+ ParseSegmentHeader(&br);
+ ParseFilterHeader(&br);
+ // Parse log2_nbr_of_dct_partitions value.
+ VP8GetValue(&br, 2);
+ // Base QP.
+ const int base_q0 = VP8GetValue(&br, 7);
+ if (br.buf_ == br.buf_end_) {
+ RTC_LOG(LS_WARNING) << "Failed to get QP, bitstream is truncated or"
+ " corrupted.";
+ return false;
+ }
+ *qp = base_q0;
+ return true;
+}
+
+} // namespace vp8
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h
new file mode 100644
index 0000000000..dbad999dc8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+namespace webrtc {
+
+namespace vp8 {
+
+typedef struct VP8BitReader VP8BitReader;
+struct VP8BitReader {
+ // Boolean decoder.
+ uint32_t value_; // Current value (2 bytes).
+ uint32_t range_; // Current range (always in [128..255] interval).
+ int bits_; // Number of bits shifted out of value, at most 7.
+ // Read buffer.
+ const uint8_t* buf_; // Next byte to be read.
+ const uint8_t* buf_end_; // End of read buffer.
+};
+
+// Gets the QP, QP range: [0, 127].
+// Returns true on success, false otherwise.
+bool GetQp(const uint8_t* buf, size_t length, int* qp);
+
+} // namespace vp8
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h b/third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h
new file mode 100644
index 0000000000..af2c701b82
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP9_CONSTANTS_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP9_CONSTANTS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+namespace webrtc {
+
+// Number of frames that can be stored for future reference.
+constexpr size_t kVp9NumRefFrames = 8;
+// Number of frame contexts that can be store for future reference.
+constexpr size_t kVp9NumFrameContexts = 4;
+// Each inter frame can use up to 3 frames for reference.
+constexpr size_t kVp9RefsPerFrame = 3;
+// Number of values that can be decoded for mv_fr.
+constexpr size_t kVp9MvFrSize = 4;
+// Number of positions to search in motion vector prediction.
+constexpr size_t kVp9MvrefNeighbours = 8;
+// Number of contexts when decoding intra_mode .
+constexpr size_t kVp9BlockSizeGroups = 4;
+// Number of different block sizes used.
+constexpr size_t kVp9BlockSizes = 13;
+// Sentinel value to mark partition choices that are illegal.
+constexpr size_t kVp9BlockInvalid = 14;
+// Number of contexts when decoding partition.
+constexpr size_t kVp9PartitionContexts = 16;
+// Smallest size of a mode info block.
+constexpr size_t kVp9MiSize = 8;
+// Minimum width of a tile in units of superblocks (although tiles on
+// the right hand edge can be narrower).
+constexpr size_t kVp9MinTileWidth_B64 = 4;
+// Maximum width of a tile in units of superblocks.
+constexpr size_t kVp9MaxTileWidth_B64 = 64;
+// Number of motion vectors returned by find_mv_refs process.
+constexpr size_t kVp9MaxMvRefCandidates = 2;
+// Number of values that can be derived for ref_frame.
+constexpr size_t kVp9MaxRefFrames = 4;
+// Number of contexts for is_inter.
+constexpr size_t kVp9IsInterContexts = 4;
+// Number of contexts for comp_mode.
+constexpr size_t kVp9CompModeContexts = 5;
+// Number of contexts for single_ref and comp_ref.
+constexpr size_t kVp9RefContexts = 5;
+// Number of segments allowed in segmentation map.
+constexpr size_t kVp9MaxSegments = 8;
+// Index for quantizer segment feature.
+constexpr size_t kVp9SegLvlAlt_Q = 0;
+// Index for loop filter segment feature.
+constexpr size_t kVp9SegLvlAlt_L = 1;
+// Index for reference frame segment feature.
+constexpr size_t kVp9SegLvlRefFrame = 2;
+// Index for skip segment feature.
+constexpr size_t kVp9SegLvlSkip = 3;
+// Number of segment features.
+constexpr size_t kVp9SegLvlMax = 4;
+// Number of different plane types (Y or UV).
+constexpr size_t kVp9BlockTypes = 2;
+// Number of different prediction types (intra or inter).
+constexpr size_t kVp9RefTypes = 2;
+// Number of coefficient bands.
+constexpr size_t kVp9CoefBands = 6;
+// Number of contexts for decoding coefficients.
+constexpr size_t kVp9PrevCoefContexts = 6;
+// Number of coefficient probabilities that are directly transmitted.
+constexpr size_t kVp9UnconstrainedNodes = 3;
+// Number of contexts for transform size.
+constexpr size_t kVp9TxSizeContexts = 2;
+// Number of values for interp_filter.
+constexpr size_t kVp9SwitchableFilters = 3;
+// Number of contexts for interp_filter.
+constexpr size_t kVp9InterpFilterContexts = 4;
+// Number of contexts for decoding skip.
+constexpr size_t kVp9SkipContexts = 3;
+// Number of values for partition.
+constexpr size_t kVp9PartitionTypes = 4;
+// Number of values for tx_size.
+constexpr size_t kVp9TxSizes = 4;
+// Number of values for tx_mode.
+constexpr size_t kVp9TxModes = 5;
+// Inverse transform rows with DCT and columns with DCT.
+constexpr size_t kVp9DctDct = 0;
+// Inverse transform rows with DCT and columns with ADST.
+constexpr size_t kVp9AdstDct = 1;
+// Inverse transform rows with ADST and columns with DCT.
+constexpr size_t kVp9DctAdst = 2;
+// Inverse transform rows with ADST and columns with ADST.
+constexpr size_t kVp9AdstAdst = 3;
+// Number of values for y_mode.
+constexpr size_t kVp9MbModeCount = 14;
+// Number of values for intra_mode.
+constexpr size_t kVp9IntraModes = 10;
+// Number of values for inter_mode.
+constexpr size_t kVp9InterModes = 4;
+// Number of contexts for inter_mode.
+constexpr size_t kVp9InterModeContexts = 7;
+// Number of values for mv_joint.
+constexpr size_t kVp9MvJoints = 4;
+// Number of values for mv_class.
+constexpr size_t kVp9MvClasses = 11;
+// Number of values for mv_class0_bit.
+constexpr size_t kVp9Class0Size = 2;
+// Maximum number of bits for decoding motion vectors.
+constexpr size_t kVp9MvOffsetBits = 10;
+// Number of values allowed for a probability adjustment.
+constexpr size_t kVp9MaxProb = 255;
+// Number of different mode types for loop filtering.
+constexpr size_t kVp9MaxModeLfDeltas = 2;
+// Threshold at which motion vectors are considered large.
+constexpr size_t kVp9CompandedMvrefThresh = 8;
+// Maximum value used for loop filtering.
+constexpr size_t kVp9MaxLoopFilter = 63;
+// Number of bits of precision when scaling reference frames.
+constexpr size_t kVp9RefScaleShift = 14;
+// Number of bits of precision when performing inter prediction.
+constexpr size_t kVp9SubpelBits = 4;
+// 1 << kVp9SubpelBits.
+constexpr size_t kVp9SubpelShifts = 16;
+// kVp9SubpelShifts - 1.
+constexpr size_t kVp9SubpelMask = 15;
+// Value used when clipping motion vectors.
+constexpr size_t kVp9MvBorder = 128;
+// Value used when clipping motion vectors.
+constexpr size_t kVp9InterpExtend = 4;
+// Value used when clipping motion vectors.
+constexpr size_t kVp9Borderinpixels = 160;
+// Value used in adapting probabilities.
+constexpr size_t kVp9MaxUpdateFactor = 128;
+// Value used in adapting probabilities.
+constexpr size_t kVp9CountSat = 20;
+// Both candidates use ZEROMV.
+constexpr size_t kVp9BothZero = 0;
+// One candidate uses ZEROMV, one uses NEARMV or NEARESTMV.
+constexpr size_t kVp9ZeroPlusPredicted = 1;
+// Both candidates use NEARMV or NEARESTMV.
+constexpr size_t kVp9BothPredicted = 2;
+// One candidate uses NEWMV, one uses ZEROMV.
+constexpr size_t kVp9NewPlusNonIntra = 3;
+// Both candidates use NEWMV.
+constexpr size_t kVp9BothNew = 4;
+// One candidate uses intra prediction, one uses inter prediction.
+constexpr size_t kVp9IntraPlusNonIntra = 5;
+// Both candidates use intra prediction.
+constexpr size_t kVp9BothIntra = 6;
+// Sentinel value marking a case that can never occur.
+constexpr size_t kVp9InvalidCase = 9;
+
+enum class Vp9TxMode : uint8_t {
+ kOnly4X4 = 0,
+ kAllow8X8 = 1,
+ kAllow16x16 = 2,
+ kAllow32x32 = 3,
+ kTxModeSelect = 4
+};
+
+enum Vp9BlockSize : uint8_t {
+ kBlock4X4 = 0,
+ kBlock4X8 = 1,
+ kBlock8X4 = 2,
+ kBlock8X8 = 3,
+ kBlock8X16 = 4,
+ kBlock16X8 = 5,
+ kBlock16X16 = 6,
+ kBlock16X32 = 7,
+ kBlock32X16 = 8,
+ kBlock32X32 = 9,
+ kBlock32X64 = 10,
+ kBlock64X32 = 11,
+ kBlock64X64 = 12
+};
+
+enum Vp9Partition : uint8_t {
+ kPartitionNone = 0,
+ kPartitionHorizontal = 1,
+ kPartitionVertical = 2,
+ kPartitionSplit = 3
+};
+
+enum class Vp9ReferenceMode : uint8_t {
+ kSingleReference = 0,
+ kCompoundReference = 1,
+ kReferenceModeSelect = 2,
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP9_CONSTANTS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
new file mode 100644
index 0000000000..bf9d51f692
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+
+#include "absl/numeric/bits.h"
+#include "absl/strings/string_view.h"
+#include "rtc_base/bitstream_reader.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace {
+const size_t kVp9NumRefsPerFrame = 3;
+const size_t kVp9MaxRefLFDeltas = 4;
+const size_t kVp9MaxModeLFDeltas = 2;
+const size_t kVp9MinTileWidthB64 = 4;
+const size_t kVp9MaxTileWidthB64 = 64;
+
+void Vp9ReadColorConfig(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ if (frame_info->profile == 2 || frame_info->profile == 3) {
+ frame_info->bit_detph =
+ br.Read<bool>() ? Vp9BitDept::k12Bit : Vp9BitDept::k10Bit;
+ } else {
+ frame_info->bit_detph = Vp9BitDept::k8Bit;
+ }
+
+ frame_info->color_space = static_cast<Vp9ColorSpace>(br.ReadBits(3));
+
+ if (frame_info->color_space != Vp9ColorSpace::CS_RGB) {
+ frame_info->color_range =
+ br.Read<bool>() ? Vp9ColorRange::kFull : Vp9ColorRange::kStudio;
+
+ if (frame_info->profile == 1 || frame_info->profile == 3) {
+ static constexpr Vp9YuvSubsampling kSubSamplings[] = {
+ Vp9YuvSubsampling::k444, Vp9YuvSubsampling::k440,
+ Vp9YuvSubsampling::k422, Vp9YuvSubsampling::k420};
+ frame_info->sub_sampling = kSubSamplings[br.ReadBits(2)];
+
+ if (br.Read<bool>()) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Reserved bit set.";
+ br.Invalidate();
+ return;
+ }
+ } else {
+ // Profile 0 or 2.
+ frame_info->sub_sampling = Vp9YuvSubsampling::k420;
+ }
+ } else {
+ // SRGB
+ frame_info->color_range = Vp9ColorRange::kFull;
+ if (frame_info->profile == 1 || frame_info->profile == 3) {
+ frame_info->sub_sampling = Vp9YuvSubsampling::k444;
+ if (br.Read<bool>()) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Reserved bit set.";
+ br.Invalidate();
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. 4:4:4 color not supported"
+ " in profile 0 or 2.";
+ br.Invalidate();
+ }
+ }
+}
+
+void ReadRefreshFrameFlags(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ // Refresh frame flags.
+ uint8_t flags = br.Read<uint8_t>();
+ for (int i = 0; i < 8; ++i) {
+ frame_info->updated_buffers.set(i, (flags & (0x01 << (7 - i))) != 0);
+ }
+}
+
+void Vp9ReadFrameSize(BitstreamReader& br, Vp9UncompressedHeader* frame_info) {
+ // 16 bits: frame (width|height) - 1.
+ frame_info->frame_width = br.Read<uint16_t>() + 1;
+ frame_info->frame_height = br.Read<uint16_t>() + 1;
+}
+
+void Vp9ReadRenderSize(size_t total_buffer_size_bits,
+ BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ // render_and_frame_size_different
+ if (br.Read<bool>()) {
+ frame_info->render_size_offset_bits =
+ total_buffer_size_bits - br.RemainingBitCount();
+ // 16 bits: render (width|height) - 1.
+ frame_info->render_width = br.Read<uint16_t>() + 1;
+ frame_info->render_height = br.Read<uint16_t>() + 1;
+ } else {
+ frame_info->render_height = frame_info->frame_height;
+ frame_info->render_width = frame_info->frame_width;
+ }
+}
+
+void Vp9ReadFrameSizeFromRefs(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ // Size in refs.
+ if (br.Read<bool>()) {
+ frame_info->infer_size_from_reference = frame_info->reference_buffers[i];
+ return;
+ }
+ }
+
+ Vp9ReadFrameSize(br, frame_info);
+}
+
+void Vp9ReadLoopfilter(BitstreamReader& br) {
+ // 6 bits: filter level.
+ // 3 bits: sharpness level.
+ br.ConsumeBits(9);
+
+ if (!br.Read<bool>()) { // mode_ref_delta_enabled
+ return;
+ }
+ if (!br.Read<bool>()) { // mode_ref_delta_update
+ return;
+ }
+
+ for (size_t i = 0; i < kVp9MaxRefLFDeltas; i++) {
+ if (br.Read<bool>()) { // update_ref_delta
+ br.ConsumeBits(7);
+ }
+ }
+ for (size_t i = 0; i < kVp9MaxModeLFDeltas; i++) {
+ if (br.Read<bool>()) { // update_mode_delta
+ br.ConsumeBits(7);
+ }
+ }
+}
+
+void Vp9ReadQp(BitstreamReader& br, Vp9UncompressedHeader* frame_info) {
+ frame_info->base_qp = br.Read<uint8_t>();
+
+ // yuv offsets
+ frame_info->is_lossless = frame_info->base_qp == 0;
+ for (int i = 0; i < 3; ++i) {
+ if (br.Read<bool>()) { // if delta_coded
+ // delta_q is a signed integer with leading 4 bits containing absolute
+ // value and last bit containing sign. There are are two ways to represent
+ // zero with such encoding.
+ if ((br.ReadBits(5) & 0b1111'0) != 0) { // delta_q
+ frame_info->is_lossless = false;
+ }
+ }
+ }
+}
+
+void Vp9ReadSegmentationParams(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ constexpr int kSegmentationFeatureBits[kVp9SegLvlMax] = {8, 6, 2, 0};
+ constexpr bool kSegmentationFeatureSigned[kVp9SegLvlMax] = {true, true, false,
+ false};
+
+ frame_info->segmentation_enabled = br.Read<bool>();
+ if (!frame_info->segmentation_enabled) {
+ return;
+ }
+
+ if (br.Read<bool>()) { // update_map
+ frame_info->segmentation_tree_probs.emplace();
+ for (int i = 0; i < 7; ++i) {
+ if (br.Read<bool>()) {
+ (*frame_info->segmentation_tree_probs)[i] = br.Read<uint8_t>();
+ } else {
+ (*frame_info->segmentation_tree_probs)[i] = 255;
+ }
+ }
+
+ // temporal_update
+ frame_info->segmentation_pred_prob.emplace();
+ if (br.Read<bool>()) {
+ for (int i = 0; i < 3; ++i) {
+ if (br.Read<bool>()) {
+ (*frame_info->segmentation_pred_prob)[i] = br.Read<uint8_t>();
+ } else {
+ (*frame_info->segmentation_pred_prob)[i] = 255;
+ }
+ }
+ } else {
+ frame_info->segmentation_pred_prob->fill(255);
+ }
+ }
+
+ if (br.Read<bool>()) { // segmentation_update_data
+ frame_info->segmentation_is_delta = br.Read<bool>();
+ for (size_t i = 0; i < kVp9MaxSegments; ++i) {
+ for (size_t j = 0; j < kVp9SegLvlMax; ++j) {
+ if (!br.Read<bool>()) { // feature_enabled
+ continue;
+ }
+ if (kSegmentationFeatureBits[j] == 0) {
+ // No feature bits used and no sign, just mark it and return.
+ frame_info->segmentation_features[i][j] = 1;
+ continue;
+ }
+ frame_info->segmentation_features[i][j] =
+ br.ReadBits(kSegmentationFeatureBits[j]);
+ if (kSegmentationFeatureSigned[j] && br.Read<bool>()) {
+ (*frame_info->segmentation_features[i][j]) *= -1;
+ }
+ }
+ }
+ }
+}
+
+void Vp9ReadTileInfo(BitstreamReader& br, Vp9UncompressedHeader* frame_info) {
+ size_t mi_cols = (frame_info->frame_width + 7) >> 3;
+ size_t sb64_cols = (mi_cols + 7) >> 3;
+
+ size_t min_log2 = 0;
+ while ((kVp9MaxTileWidthB64 << min_log2) < sb64_cols) {
+ ++min_log2;
+ }
+
+ size_t max_log2 = 1;
+ while ((sb64_cols >> max_log2) >= kVp9MinTileWidthB64) {
+ ++max_log2;
+ }
+ --max_log2;
+
+ frame_info->tile_cols_log2 = min_log2;
+ while (frame_info->tile_cols_log2 < max_log2) {
+ if (br.Read<bool>()) {
+ ++frame_info->tile_cols_log2;
+ } else {
+ break;
+ }
+ }
+ frame_info->tile_rows_log2 = 0;
+ if (br.Read<bool>()) {
+ ++frame_info->tile_rows_log2;
+ if (br.Read<bool>()) {
+ ++frame_info->tile_rows_log2;
+ }
+ }
+}
+
+const Vp9InterpolationFilter kLiteralToType[4] = {
+ Vp9InterpolationFilter::kEightTapSmooth, Vp9InterpolationFilter::kEightTap,
+ Vp9InterpolationFilter::kEightTapSharp, Vp9InterpolationFilter::kBilinear};
+} // namespace
+
+std::string Vp9UncompressedHeader::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder oss(buf);
+
+ oss << "Vp9UncompressedHeader { "
+ << "profile = " << profile;
+
+ if (show_existing_frame) {
+ oss << ", show_existing_frame = " << *show_existing_frame << " }";
+ return oss.str();
+ }
+
+ oss << ", frame type = " << (is_keyframe ? "key" : "delta")
+ << ", show_frame = " << (show_frame ? "true" : "false")
+ << ", error_resilient = " << (error_resilient ? "true" : "false");
+
+ oss << ", bit_depth = ";
+ switch (bit_detph) {
+ case Vp9BitDept::k8Bit:
+ oss << "8bit";
+ break;
+ case Vp9BitDept::k10Bit:
+ oss << "10bit";
+ break;
+ case Vp9BitDept::k12Bit:
+ oss << "12bit";
+ break;
+ }
+
+ if (color_space) {
+ oss << ", color_space = ";
+ switch (*color_space) {
+ case Vp9ColorSpace::CS_UNKNOWN:
+ oss << "unknown";
+ break;
+ case Vp9ColorSpace::CS_BT_601:
+ oss << "CS_BT_601 Rec. ITU-R BT.601-7";
+ break;
+ case Vp9ColorSpace::CS_BT_709:
+ oss << "Rec. ITU-R BT.709-6";
+ break;
+ case Vp9ColorSpace::CS_SMPTE_170:
+ oss << "SMPTE-170";
+ break;
+ case Vp9ColorSpace::CS_SMPTE_240:
+ oss << "SMPTE-240";
+ break;
+ case Vp9ColorSpace::CS_BT_2020:
+ oss << "Rec. ITU-R BT.2020-2";
+ break;
+ case Vp9ColorSpace::CS_RESERVED:
+ oss << "Reserved";
+ break;
+ case Vp9ColorSpace::CS_RGB:
+ oss << "sRGB (IEC 61966-2-1)";
+ break;
+ }
+ }
+
+ if (color_range) {
+ oss << ", color_range = ";
+ switch (*color_range) {
+ case Vp9ColorRange::kFull:
+ oss << "full";
+ break;
+ case Vp9ColorRange::kStudio:
+ oss << "studio";
+ break;
+ }
+ }
+
+ if (sub_sampling) {
+ oss << ", sub_sampling = ";
+ switch (*sub_sampling) {
+ case Vp9YuvSubsampling::k444:
+ oss << "444";
+ break;
+ case Vp9YuvSubsampling::k440:
+ oss << "440";
+ break;
+ case Vp9YuvSubsampling::k422:
+ oss << "422";
+ break;
+ case Vp9YuvSubsampling::k420:
+ oss << "420";
+ break;
+ }
+ }
+
+ if (infer_size_from_reference) {
+ oss << ", infer_frame_resolution_from = " << *infer_size_from_reference;
+ } else {
+ oss << ", frame_width = " << frame_width
+ << ", frame_height = " << frame_height;
+ }
+ if (render_width != 0 && render_height != 0) {
+ oss << ", render_width = " << render_width
+ << ", render_height = " << render_height;
+ }
+
+ oss << ", base qp = " << base_qp;
+ if (reference_buffers[0] != -1) {
+ oss << ", last_buffer = " << reference_buffers[0];
+ }
+ if (reference_buffers[1] != -1) {
+ oss << ", golden_buffer = " << reference_buffers[1];
+ }
+ if (reference_buffers[2] != -1) {
+ oss << ", altref_buffer = " << reference_buffers[2];
+ }
+
+ oss << ", updated buffers = { ";
+ bool first = true;
+ for (int i = 0; i < 8; ++i) {
+ if (updated_buffers.test(i)) {
+ if (first) {
+ first = false;
+ } else {
+ oss << ", ";
+ }
+ oss << i;
+ }
+ }
+ oss << " }";
+
+ oss << ", compressed_header_size_bytes = " << compressed_header_size;
+
+ oss << " }";
+ return oss.str();
+}
+
+void Parse(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info,
+ bool qp_only) {
+ const size_t total_buffer_size_bits = br.RemainingBitCount();
+
+ // Frame marker.
+ if (br.ReadBits(2) != 0b10) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Frame marker should be 2.";
+ br.Invalidate();
+ return;
+ }
+
+ // Profile has low bit first.
+ frame_info->profile = br.ReadBit();
+ frame_info->profile |= br.ReadBit() << 1;
+ if (frame_info->profile > 2 && br.Read<bool>()) {
+ RTC_LOG(LS_WARNING)
+ << "Failed to parse header. Unsupported bitstream profile.";
+ br.Invalidate();
+ return;
+ }
+
+ // Show existing frame.
+ if (br.Read<bool>()) {
+ frame_info->show_existing_frame = br.ReadBits(3);
+ return;
+ }
+
+ // Frame type: KEY_FRAME(0), INTER_FRAME(1).
+ frame_info->is_keyframe = !br.Read<bool>();
+ frame_info->show_frame = br.Read<bool>();
+ frame_info->error_resilient = br.Read<bool>();
+
+ if (frame_info->is_keyframe) {
+ if (br.ReadBits(24) != 0x498342) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Invalid sync code.";
+ br.Invalidate();
+ return;
+ }
+
+ Vp9ReadColorConfig(br, frame_info);
+ Vp9ReadFrameSize(br, frame_info);
+ Vp9ReadRenderSize(total_buffer_size_bits, br, frame_info);
+
+ // Key-frames implicitly update all buffers.
+ frame_info->updated_buffers.set();
+ } else {
+ // Non-keyframe.
+ bool is_intra_only = false;
+ if (!frame_info->show_frame) {
+ is_intra_only = br.Read<bool>();
+ }
+ if (!frame_info->error_resilient) {
+ br.ConsumeBits(2); // Reset frame context.
+ }
+
+ if (is_intra_only) {
+ if (br.ReadBits(24) != 0x498342) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Invalid sync code.";
+ br.Invalidate();
+ return;
+ }
+
+ if (frame_info->profile > 0) {
+ Vp9ReadColorConfig(br, frame_info);
+ } else {
+ frame_info->color_space = Vp9ColorSpace::CS_BT_601;
+ frame_info->sub_sampling = Vp9YuvSubsampling::k420;
+ frame_info->bit_detph = Vp9BitDept::k8Bit;
+ }
+ frame_info->reference_buffers.fill(-1);
+ ReadRefreshFrameFlags(br, frame_info);
+ Vp9ReadFrameSize(br, frame_info);
+ Vp9ReadRenderSize(total_buffer_size_bits, br, frame_info);
+ } else {
+ ReadRefreshFrameFlags(br, frame_info);
+
+ frame_info->reference_buffers_sign_bias[0] = false;
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ frame_info->reference_buffers[i] = br.ReadBits(3);
+ frame_info->reference_buffers_sign_bias[Vp9ReferenceFrame::kLast + i] =
+ br.Read<bool>();
+ }
+
+ Vp9ReadFrameSizeFromRefs(br, frame_info);
+ Vp9ReadRenderSize(total_buffer_size_bits, br, frame_info);
+
+ frame_info->allow_high_precision_mv = br.Read<bool>();
+
+ // Interpolation filter.
+ if (br.Read<bool>()) {
+ frame_info->interpolation_filter = Vp9InterpolationFilter::kSwitchable;
+ } else {
+ frame_info->interpolation_filter = kLiteralToType[br.ReadBits(2)];
+ }
+ }
+ }
+
+ if (!frame_info->error_resilient) {
+ // 1 bit: Refresh frame context.
+ // 1 bit: Frame parallel decoding mode.
+ br.ConsumeBits(2);
+ }
+
+ // Frame context index.
+ frame_info->frame_context_idx = br.ReadBits(2);
+
+ Vp9ReadLoopfilter(br);
+
+ // Read base QP.
+ Vp9ReadQp(br, frame_info);
+
+ if (qp_only) {
+ // Not interested in the rest of the header, return early.
+ return;
+ }
+
+ Vp9ReadSegmentationParams(br, frame_info);
+ Vp9ReadTileInfo(br, frame_info);
+ frame_info->compressed_header_size = br.Read<uint16_t>();
+ frame_info->uncompressed_header_size =
+ (total_buffer_size_bits / 8) - (br.RemainingBitCount() / 8);
+}
+
+absl::optional<Vp9UncompressedHeader> ParseUncompressedVp9Header(
+ rtc::ArrayView<const uint8_t> buf) {
+ BitstreamReader reader(buf);
+ Vp9UncompressedHeader frame_info;
+ Parse(reader, &frame_info, /*qp_only=*/false);
+ if (reader.Ok() && frame_info.frame_width > 0) {
+ return frame_info;
+ }
+ return absl::nullopt;
+}
+
+namespace vp9 {
+
+bool GetQp(const uint8_t* buf, size_t length, int* qp) {
+ BitstreamReader reader(rtc::MakeArrayView(buf, length));
+ Vp9UncompressedHeader frame_info;
+ Parse(reader, &frame_info, /*qp_only=*/true);
+ if (!reader.Ok()) {
+ return false;
+ }
+ *qp = frame_info.base_qp;
+ return true;
+}
+
+} // namespace vp9
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h
new file mode 100644
index 0000000000..8d1b88c3d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP9_UNCOMPRESSED_HEADER_PARSER_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP9_UNCOMPRESSED_HEADER_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <bitset>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/video_coding/utility/vp9_constants.h"
+
+namespace webrtc {
+
+namespace vp9 {
+
+// Gets the QP, QP range: [0, 255].
+// Returns true on success, false otherwise.
+bool GetQp(const uint8_t* buf, size_t length, int* qp);
+
+} // namespace vp9
+
+// Bit depth per channel. Support varies by profile.
+enum class Vp9BitDept : uint8_t {
+ k8Bit = 8,
+ k10Bit = 10,
+ k12Bit = 12,
+};
+
+enum class Vp9ColorSpace : uint8_t {
+ CS_UNKNOWN = 0, // Unknown (in this case the color space must be signaled
+ // outside the VP9 bitstream).
+ CS_BT_601 = 1, // CS_BT_601 Rec. ITU-R BT.601-7
+ CS_BT_709 = 2, // Rec. ITU-R BT.709-6
+ CS_SMPTE_170 = 3, // SMPTE-170
+ CS_SMPTE_240 = 4, // SMPTE-240
+ CS_BT_2020 = 5, // Rec. ITU-R BT.2020-2
+ CS_RESERVED = 6, // Reserved
+ CS_RGB = 7, // sRGB (IEC 61966-2-1)
+};
+
+enum class Vp9ColorRange {
+ kStudio, // Studio swing:
+ // For BitDepth equals 8:
+ // Y is between 16 and 235 inclusive.
+ // U and V are between 16 and 240 inclusive.
+ // For BitDepth equals 10:
+ // Y is between 64 and 940 inclusive.
+ // U and V are between 64 and 960 inclusive.
+ // For BitDepth equals 12:
+ // Y is between 256 and 3760.
+ // U and V are between 256 and 3840 inclusive.
+ kFull // Full swing; no restriction on Y, U, V values.
+};
+
+enum class Vp9YuvSubsampling {
+ k444,
+ k440,
+ k422,
+ k420,
+};
+
+enum Vp9ReferenceFrame : int {
+ kNone = -1,
+ kIntra = 0,
+ kLast = 1,
+ kGolden = 2,
+ kAltref = 3,
+};
+
+enum class Vp9InterpolationFilter : uint8_t {
+ kEightTap = 0,
+ kEightTapSmooth = 1,
+ kEightTapSharp = 2,
+ kBilinear = 3,
+ kSwitchable = 4
+};
+
+struct Vp9UncompressedHeader {
+ int profile = 0; // Profiles 0-3 are valid.
+ absl::optional<uint8_t> show_existing_frame;
+ bool is_keyframe = false;
+ bool show_frame = false;
+ bool error_resilient = false;
+ Vp9BitDept bit_detph = Vp9BitDept::k8Bit;
+ absl::optional<Vp9ColorSpace> color_space;
+ absl::optional<Vp9ColorRange> color_range;
+ absl::optional<Vp9YuvSubsampling> sub_sampling;
+ int frame_width = 0;
+ int frame_height = 0;
+ int render_width = 0;
+ int render_height = 0;
+ // Width/height of the tiles used (in units of 8x8 blocks).
+ size_t tile_cols_log2 = 0; // tile_cols = 1 << tile_cols_log2
+ size_t tile_rows_log2 = 0; // tile_rows = 1 << tile_rows_log2
+ absl::optional<size_t> render_size_offset_bits;
+ Vp9InterpolationFilter interpolation_filter =
+ Vp9InterpolationFilter::kEightTap;
+ bool allow_high_precision_mv = false;
+ int base_qp = 0;
+ bool is_lossless = false;
+ uint8_t frame_context_idx = 0;
+
+ bool segmentation_enabled = false;
+ absl::optional<std::array<uint8_t, 7>> segmentation_tree_probs;
+ absl::optional<std::array<uint8_t, 3>> segmentation_pred_prob;
+ bool segmentation_is_delta = false;
+ std::array<std::array<absl::optional<int>, kVp9SegLvlMax>, kVp9MaxSegments>
+ segmentation_features;
+
+ // Which of the 8 reference buffers may be used as references for this frame.
+ // -1 indicates not used (e.g. {-1, -1, -1} for intra-only frames).
+ std::array<int, kVp9RefsPerFrame> reference_buffers = {-1, -1, -1};
+ // Sign bias corresponding to reference buffers, where the index is a
+ // ReferenceFrame.
+ // false/0 indidate backwards reference, true/1 indicate forwards reference).
+ std::bitset<kVp9MaxRefFrames> reference_buffers_sign_bias = 0;
+
+ // Indicates which reference buffer [0,7] to infer the frame size from.
+ absl::optional<int> infer_size_from_reference;
+ // Which of the 8 reference buffers are updated by this frame.
+ std::bitset<kVp9NumRefFrames> updated_buffers = 0;
+
+ // Header sizes, in bytes.
+ uint32_t uncompressed_header_size = 0;
+ uint32_t compressed_header_size = 0;
+
+ bool is_intra_only() const {
+ return reference_buffers[0] == -1 && reference_buffers[1] == -1 &&
+ reference_buffers[2] == -1;
+ }
+
+ std::string ToString() const;
+};
+
+// Parses the uncompressed header and populates (most) values in a
+// UncompressedHeader struct. Returns nullopt on failure.
+absl::optional<Vp9UncompressedHeader> ParseUncompressedVp9Header(
+ rtc::ArrayView<const uint8_t> buf);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP9_UNCOMPRESSED_HEADER_PARSER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc
new file mode 100644
index 0000000000..d8cc738e07
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace vp9 {
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Optional;
+
+TEST(Vp9UncompressedHeaderParserTest, FrameWithSegmentation) {
+ // Uncompressed header from a frame generated with libvpx.
+ // Encoded QVGA frame (SL0 of a VGA frame) that includes a segmentation.
+ const uint8_t kHeader[] = {
+ 0x87, 0x01, 0x00, 0x00, 0x02, 0x7e, 0x01, 0xdf, 0x02, 0x7f, 0x01, 0xdf,
+ 0xc6, 0x87, 0x04, 0x83, 0x83, 0x2e, 0x46, 0x60, 0x20, 0x38, 0x0c, 0x06,
+ 0x03, 0xcd, 0x80, 0xc0, 0x60, 0x9f, 0xc5, 0x46, 0x00, 0x00, 0x00, 0x00,
+ 0x2e, 0x73, 0xb7, 0xee, 0x22, 0x06, 0x81, 0x82, 0xd4, 0xef, 0xc3, 0x58,
+ 0x1f, 0x12, 0xd2, 0x7b, 0x28, 0x1f, 0x80, 0xfc, 0x07, 0xe0, 0x00, 0x00};
+
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(kHeader);
+ ASSERT_TRUE(frame_info.has_value());
+
+ EXPECT_FALSE(frame_info->is_keyframe);
+ EXPECT_TRUE(frame_info->error_resilient);
+ EXPECT_TRUE(frame_info->show_frame);
+ EXPECT_FALSE(frame_info->show_existing_frame);
+ EXPECT_EQ(frame_info->base_qp, 185);
+ EXPECT_EQ(frame_info->frame_width, 320);
+ EXPECT_EQ(frame_info->frame_height, 240);
+ EXPECT_EQ(frame_info->render_width, 640);
+ EXPECT_EQ(frame_info->render_height, 480);
+ EXPECT_TRUE(frame_info->allow_high_precision_mv);
+ EXPECT_EQ(frame_info->frame_context_idx, 0u);
+ EXPECT_EQ(frame_info->interpolation_filter,
+ Vp9InterpolationFilter::kSwitchable);
+ EXPECT_EQ(frame_info->is_lossless, false);
+ EXPECT_EQ(frame_info->profile, 0);
+ EXPECT_THAT(frame_info->reference_buffers, ElementsAre(0, 0, 0));
+ EXPECT_THAT(frame_info->reference_buffers_sign_bias, 0b0000);
+ EXPECT_EQ(frame_info->updated_buffers, 0b10000000);
+ EXPECT_EQ(frame_info->tile_cols_log2, 0u);
+ EXPECT_EQ(frame_info->tile_rows_log2, 0u);
+ EXPECT_EQ(frame_info->render_size_offset_bits, 64u);
+ EXPECT_EQ(frame_info->compressed_header_size, 23u);
+ EXPECT_EQ(frame_info->uncompressed_header_size, 37u);
+
+ EXPECT_TRUE(frame_info->segmentation_enabled);
+ EXPECT_FALSE(frame_info->segmentation_is_delta);
+ EXPECT_THAT(frame_info->segmentation_pred_prob,
+ Optional(ElementsAre(205, 1, 1)));
+ EXPECT_THAT(frame_info->segmentation_tree_probs,
+ Optional(ElementsAre(255, 255, 128, 1, 128, 128, 128)));
+ EXPECT_THAT(frame_info->segmentation_features[1][kVp9SegLvlAlt_Q], Eq(-63));
+ EXPECT_THAT(frame_info->segmentation_features[2][kVp9SegLvlAlt_Q], Eq(-81));
+}
+
+TEST(Vp9UncompressedHeaderParserTest, SegmentationWithDefaultPredProbs) {
+ const uint8_t kHeader[] = {0x90, 0x49, 0x83, 0x42, 0x80, 0x2e,
+ 0x30, 0x0, 0xb0, 0x0, 0x37, 0xff,
+ 0x06, 0x80, 0x0, 0x0, 0x0, 0x0};
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(kHeader);
+ ASSERT_TRUE(frame_info.has_value());
+ EXPECT_THAT(frame_info->segmentation_pred_prob,
+ Optional(ElementsAre(255, 255, 255)));
+}
+
+TEST(Vp9UncompressedHeaderParserTest, SegmentationWithSkipLevel) {
+ const uint8_t kHeader[] = {0x90, 0x49, 0x83, 0x42, 0x80, 0x2e, 0x30, 0x00,
+ 0xb0, 0x00, 0x37, 0xff, 0x06, 0x80, 0x01, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(kHeader);
+ ASSERT_TRUE(frame_info.has_value());
+ EXPECT_THAT(frame_info->segmentation_features[0][kVp9SegLvlSkip], Eq(1));
+}
+
+} // namespace vp9
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc b/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc
new file mode 100644
index 0000000000..e1885d74c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_codec_initializer.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/units/data_rate.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/av1/av1_svc_config.h"
+#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+bool VideoCodecInitializer::SetupCodec(const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams,
+ VideoCodec* codec) {
+ if (config.codec_type == kVideoCodecMultiplex) {
+ VideoEncoderConfig associated_config = config.Copy();
+ associated_config.codec_type = kVideoCodecVP9;
+ if (!SetupCodec(associated_config, streams, codec)) {
+ RTC_LOG(LS_ERROR) << "Failed to create stereo encoder configuration.";
+ return false;
+ }
+ codec->codecType = kVideoCodecMultiplex;
+ return true;
+ }
+
+ *codec = VideoEncoderConfigToVideoCodec(config, streams);
+ return true;
+}
+
+// TODO(sprang): Split this up and separate the codec specific parts.
+VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
+ const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams) {
+ static const int kEncoderMinBitrateKbps = 30;
+ RTC_DCHECK(!streams.empty());
+ RTC_DCHECK_GE(config.min_transmit_bitrate_bps, 0);
+
+ VideoCodec video_codec;
+ video_codec.codecType = config.codec_type;
+
+ switch (config.content_type) {
+ case VideoEncoderConfig::ContentType::kRealtimeVideo:
+ video_codec.mode = VideoCodecMode::kRealtimeVideo;
+ break;
+ case VideoEncoderConfig::ContentType::kScreen:
+ video_codec.mode = VideoCodecMode::kScreensharing;
+ break;
+ }
+
+ video_codec.legacy_conference_mode =
+ config.content_type == VideoEncoderConfig::ContentType::kScreen &&
+ config.legacy_conference_mode;
+
+ video_codec.SetFrameDropEnabled(config.frame_drop_enabled);
+ video_codec.numberOfSimulcastStreams =
+ static_cast<unsigned char>(streams.size());
+ video_codec.minBitrate = streams[0].min_bitrate_bps / 1000;
+ bool codec_active = false;
+ // Active configuration might not be fully copied to `streams` for SVC yet.
+ // Therefore the `config` is checked here.
+ for (const VideoStream& stream : config.simulcast_layers) {
+ if (stream.active) {
+ codec_active = true;
+ break;
+ }
+ }
+ // Set active for the entire video codec for the non simulcast case.
+ video_codec.active = codec_active;
+ if (video_codec.minBitrate < kEncoderMinBitrateKbps)
+ video_codec.minBitrate = kEncoderMinBitrateKbps;
+ video_codec.timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
+ kDefaultOutlierFrameSizePercent};
+ RTC_DCHECK_LE(streams.size(), kMaxSimulcastStreams);
+
+ int max_framerate = 0;
+
+ absl::optional<ScalabilityMode> scalability_mode =
+ streams[0].scalability_mode;
+ for (size_t i = 0; i < streams.size(); ++i) {
+ SimulcastStream* sim_stream = &video_codec.simulcastStream[i];
+ RTC_DCHECK_GT(streams[i].width, 0);
+ RTC_DCHECK_GT(streams[i].height, 0);
+ RTC_DCHECK_GT(streams[i].max_framerate, 0);
+ RTC_DCHECK_GE(streams[i].min_bitrate_bps, 0);
+ RTC_DCHECK_GE(streams[i].target_bitrate_bps, streams[i].min_bitrate_bps);
+ RTC_DCHECK_GE(streams[i].max_bitrate_bps, streams[i].target_bitrate_bps);
+ RTC_DCHECK_GE(streams[i].max_qp, 0);
+
+ sim_stream->width = static_cast<uint16_t>(streams[i].width);
+ sim_stream->height = static_cast<uint16_t>(streams[i].height);
+ sim_stream->maxFramerate = streams[i].max_framerate;
+ sim_stream->minBitrate = streams[i].min_bitrate_bps / 1000;
+ sim_stream->targetBitrate = streams[i].target_bitrate_bps / 1000;
+ sim_stream->maxBitrate = streams[i].max_bitrate_bps / 1000;
+ sim_stream->qpMax = streams[i].max_qp;
+
+ int num_temporal_layers =
+ streams[i].scalability_mode.has_value()
+ ? ScalabilityModeToNumTemporalLayers(*streams[i].scalability_mode)
+ : streams[i].num_temporal_layers.value_or(1);
+
+ sim_stream->numberOfTemporalLayers =
+ static_cast<unsigned char>(num_temporal_layers);
+ sim_stream->active = streams[i].active;
+
+ video_codec.width =
+ std::max(video_codec.width, static_cast<uint16_t>(streams[i].width));
+ video_codec.height =
+ std::max(video_codec.height, static_cast<uint16_t>(streams[i].height));
+ video_codec.minBitrate =
+ std::min(static_cast<uint16_t>(video_codec.minBitrate),
+ static_cast<uint16_t>(streams[i].min_bitrate_bps / 1000));
+ video_codec.maxBitrate += streams[i].max_bitrate_bps / 1000;
+ video_codec.qpMax = std::max(video_codec.qpMax,
+ static_cast<unsigned int>(streams[i].max_qp));
+ max_framerate = std::max(max_framerate, streams[i].max_framerate);
+
+ // TODO(bugs.webrtc.org/11607): Since scalability mode is a top-level
+ // setting on VideoCodec, setting it makes sense only if it is the same for
+ // all simulcast streams.
+ if (streams[0].scalability_mode != streams[i].scalability_mode) {
+ scalability_mode.reset();
+ // For VP8, top-level scalability mode doesn't matter, since configuration
+ // is based on the per-simulcast stream configuration of temporal layers.
+ if (video_codec.codecType != kVideoCodecVP8) {
+ RTC_LOG(LS_WARNING) << "Inconsistent scalability modes configured.";
+ }
+ }
+ }
+
+ if (scalability_mode.has_value()) {
+ video_codec.SetScalabilityMode(*scalability_mode);
+ }
+
+ if (video_codec.maxBitrate == 0) {
+ // Unset max bitrate -> cap to one bit per pixel.
+ video_codec.maxBitrate =
+ (video_codec.width * video_codec.height * video_codec.maxFramerate) /
+ 1000;
+ }
+ if (video_codec.maxBitrate < kEncoderMinBitrateKbps)
+ video_codec.maxBitrate = kEncoderMinBitrateKbps;
+
+ video_codec.maxFramerate = max_framerate;
+ video_codec.spatialLayers[0] = {0};
+ video_codec.spatialLayers[0].width = video_codec.width;
+ video_codec.spatialLayers[0].height = video_codec.height;
+ video_codec.spatialLayers[0].maxFramerate = max_framerate;
+ video_codec.spatialLayers[0].numberOfTemporalLayers =
+ streams[0].scalability_mode.has_value()
+ ? ScalabilityModeToNumTemporalLayers(*streams[0].scalability_mode)
+ : streams[0].num_temporal_layers.value_or(1);
+
+ // Set codec specific options
+ if (config.encoder_specific_settings)
+ config.encoder_specific_settings->FillEncoderSpecificSettings(&video_codec);
+
+ switch (video_codec.codecType) {
+ case kVideoCodecVP8: {
+ if (!config.encoder_specific_settings) {
+ *video_codec.VP8() = VideoEncoder::GetDefaultVp8Settings();
+ }
+
+ // Validate specified scalability modes. If some layer has an unsupported
+ // mode, store it as the top-level scalability mode, which will make
+ // InitEncode fail with an appropriate error.
+ for (const auto& stream : streams) {
+ if (stream.scalability_mode.has_value() &&
+ !VP8SupportsScalabilityMode(*stream.scalability_mode)) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid scalability mode for VP8: "
+ << ScalabilityModeToString(*stream.scalability_mode);
+ video_codec.SetScalabilityMode(*stream.scalability_mode);
+ break;
+ }
+ }
+ video_codec.VP8()->numberOfTemporalLayers =
+ streams.back().scalability_mode.has_value()
+ ? ScalabilityModeToNumTemporalLayers(
+ *streams.back().scalability_mode)
+ : streams.back().num_temporal_layers.value_or(
+ video_codec.VP8()->numberOfTemporalLayers);
+
+ RTC_DCHECK_GE(video_codec.VP8()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP8()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
+ break;
+ }
+ case kVideoCodecVP9: {
+ // Force the first stream to always be active.
+ video_codec.simulcastStream[0].active = codec_active;
+
+ if (!config.encoder_specific_settings) {
+ *video_codec.VP9() = VideoEncoder::GetDefaultVp9Settings();
+ }
+
+ video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
+ streams.back().num_temporal_layers.value_or(
+ video_codec.VP9()->numberOfTemporalLayers));
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
+ RTC_DCHECK(config.spatial_layers.empty() ||
+ config.spatial_layers.size() ==
+ video_codec.VP9()->numberOfSpatialLayers);
+
+ std::vector<SpatialLayer> spatial_layers;
+ if (!config.spatial_layers.empty()) {
+ // Layering is set explicitly.
+ spatial_layers = config.spatial_layers;
+ } else if (scalability_mode.has_value()) {
+ // Layering is set via scalability mode.
+ spatial_layers = GetVp9SvcConfig(video_codec);
+ if (spatial_layers.empty())
+ break;
+ } else {
+ size_t first_active_layer = 0;
+ for (size_t spatial_idx = 0;
+ spatial_idx < config.simulcast_layers.size(); ++spatial_idx) {
+ if (config.simulcast_layers[spatial_idx].active) {
+ first_active_layer = spatial_idx;
+ break;
+ }
+ }
+
+ spatial_layers = GetSvcConfig(
+ video_codec.width, video_codec.height, video_codec.maxFramerate,
+ first_active_layer, video_codec.VP9()->numberOfSpatialLayers,
+ video_codec.VP9()->numberOfTemporalLayers,
+ video_codec.mode == VideoCodecMode::kScreensharing);
+
+ // If there was no request for spatial layering, don't limit bitrate
+ // of single spatial layer.
+ const bool no_spatial_layering =
+ video_codec.VP9()->numberOfSpatialLayers <= 1;
+ if (no_spatial_layering) {
+ // Use codec's bitrate limits.
+ spatial_layers.back().minBitrate = video_codec.minBitrate;
+ spatial_layers.back().targetBitrate = video_codec.maxBitrate;
+ spatial_layers.back().maxBitrate = video_codec.maxBitrate;
+ }
+
+ for (size_t spatial_idx = first_active_layer;
+ spatial_idx < config.simulcast_layers.size() &&
+ spatial_idx < spatial_layers.size() + first_active_layer;
+ ++spatial_idx) {
+ spatial_layers[spatial_idx - first_active_layer].active =
+ config.simulcast_layers[spatial_idx].active;
+ }
+ }
+
+ RTC_DCHECK(!spatial_layers.empty());
+ for (size_t i = 0; i < spatial_layers.size(); ++i) {
+ video_codec.spatialLayers[i] = spatial_layers[i];
+ }
+
+ // The top spatial layer dimensions may not be equal to the input
+ // resolution because of the rounding or explicit configuration.
+ // This difference must be propagated to the stream configuration.
+ video_codec.width = spatial_layers.back().width;
+ video_codec.height = spatial_layers.back().height;
+ video_codec.simulcastStream[0].width = spatial_layers.back().width;
+ video_codec.simulcastStream[0].height = spatial_layers.back().height;
+
+ // Update layering settings.
+ video_codec.VP9()->numberOfSpatialLayers =
+ static_cast<unsigned char>(spatial_layers.size());
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfSpatialLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfSpatialLayers,
+ kMaxSpatialLayers);
+
+ video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
+ spatial_layers.back().numberOfTemporalLayers);
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
+ break;
+ }
+ case kVideoCodecAV1:
+ if (SetAv1SvcConfig(video_codec,
+ /*num_temporal_layers=*/
+ streams.back().num_temporal_layers.value_or(1),
+ /*num_spatial_layers=*/
+ std::max<int>(config.spatial_layers.size(), 1))) {
+ for (size_t i = 0; i < config.spatial_layers.size(); ++i) {
+ video_codec.spatialLayers[i].active = config.spatial_layers[i].active;
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to configure svc bitrates for av1.";
+ }
+ break;
+ case kVideoCodecH264: {
+ RTC_CHECK(!config.encoder_specific_settings);
+
+ *video_codec.H264() = VideoEncoder::GetDefaultH264Settings();
+ video_codec.H264()->numberOfTemporalLayers = static_cast<unsigned char>(
+ streams.back().num_temporal_layers.value_or(
+ video_codec.H264()->numberOfTemporalLayers));
+ RTC_DCHECK_GE(video_codec.H264()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.H264()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+ break;
+ }
+ default:
+ // TODO(pbos): Support encoder_settings codec-agnostically.
+ RTC_DCHECK(!config.encoder_specific_settings)
+ << "Encoder-specific settings for codec type not wired up.";
+ break;
+ }
+
+ const absl::optional<DataRate> experimental_min_bitrate =
+ GetExperimentalMinVideoBitrate(video_codec.codecType);
+ if (experimental_min_bitrate) {
+ const int experimental_min_bitrate_kbps =
+ rtc::saturated_cast<int>(experimental_min_bitrate->kbps());
+ video_codec.minBitrate = experimental_min_bitrate_kbps;
+ video_codec.simulcastStream[0].minBitrate = experimental_min_bitrate_kbps;
+ if (video_codec.codecType == kVideoCodecVP9) {
+ video_codec.spatialLayers[0].minBitrate = experimental_min_bitrate_kbps;
+ }
+ }
+
+ return video_codec;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
new file mode 100644
index 0000000000..0e6f2dfca2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_codec_initializer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/test/mock_fec_controller_override.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+static const int kDefaultWidth = 1280;
+static const int kDefaultHeight = 720;
+static const int kDefaultFrameRate = 30;
+static const uint32_t kDefaultMinBitrateBps = 60000;
+static const uint32_t kDefaultTargetBitrateBps = 2000000;
+static const uint32_t kDefaultMaxBitrateBps = 2000000;
+static const uint32_t kDefaultMinTransmitBitrateBps = 400000;
+static const int kDefaultMaxQp = 48;
+static const uint32_t kScreenshareTl0BitrateBps = 120000;
+static const uint32_t kScreenshareConferenceTl0BitrateBps = 200000;
+static const uint32_t kScreenshareCodecTargetBitrateBps = 200000;
+static const uint32_t kScreenshareDefaultFramerate = 5;
+// Bitrates for the temporal layers of the higher screenshare simulcast stream.
+static const uint32_t kHighScreenshareTl0Bps = 800000;
+static const uint32_t kHighScreenshareTl1Bps = 1200000;
+} // namespace
+
+// TODO(sprang): Extend coverage to handle the rest of the codec initializer.
+class VideoCodecInitializerTest : public ::testing::Test {
+ public:
+ VideoCodecInitializerTest() {}
+ virtual ~VideoCodecInitializerTest() {}
+
+ protected:
+ void SetUpFor(VideoCodecType type,
+ int num_spatial_streams,
+ int num_temporal_streams,
+ bool screenshare) {
+ config_ = VideoEncoderConfig();
+ config_.codec_type = type;
+
+ if (screenshare) {
+ config_.min_transmit_bitrate_bps = kDefaultMinTransmitBitrateBps;
+ config_.content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ if (type == VideoCodecType::kVideoCodecVP8) {
+ config_.number_of_streams = num_spatial_streams;
+ VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.numberOfTemporalLayers = num_temporal_streams;
+ config_.encoder_specific_settings = rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ } else if (type == VideoCodecType::kVideoCodecVP9) {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = num_spatial_streams;
+ vp9_settings.numberOfTemporalLayers = num_temporal_streams;
+ config_.encoder_specific_settings = rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ } else if (type != VideoCodecType::kVideoCodecMultiplex) {
+ ADD_FAILURE() << "Unexpected codec type: " << type;
+ }
+ }
+
+ bool InitializeCodec() {
+ codec_out_ = VideoCodec();
+ frame_buffer_controller_.reset();
+ if (!VideoCodecInitializer::SetupCodec(config_, streams_, &codec_out_)) {
+ return false;
+ }
+ bitrate_allocator_ = CreateBuiltinVideoBitrateAllocatorFactory()
+ ->CreateVideoBitrateAllocator(codec_out_);
+ RTC_CHECK(bitrate_allocator_);
+ if (codec_out_.codecType == VideoCodecType::kVideoCodecMultiplex)
+ return true;
+
+ // Make sure temporal layers instances have been created.
+ if (codec_out_.codecType == VideoCodecType::kVideoCodecVP8) {
+ Vp8TemporalLayersFactory factory;
+ const VideoEncoder::Settings settings(VideoEncoder::Capabilities(false),
+ 1, 1000);
+ frame_buffer_controller_ =
+ factory.Create(codec_out_, settings, &fec_controller_override_);
+ }
+ return true;
+ }
+
+ VideoStream DefaultStream() {
+ VideoStream stream;
+ stream.width = kDefaultWidth;
+ stream.height = kDefaultHeight;
+ stream.max_framerate = kDefaultFrameRate;
+ stream.min_bitrate_bps = kDefaultMinBitrateBps;
+ stream.target_bitrate_bps = kDefaultTargetBitrateBps;
+ stream.max_bitrate_bps = kDefaultMaxBitrateBps;
+ stream.max_qp = kDefaultMaxQp;
+ stream.num_temporal_layers = 1;
+ stream.active = true;
+ return stream;
+ }
+
+ VideoStream DefaultScreenshareStream() {
+ VideoStream stream = DefaultStream();
+ stream.min_bitrate_bps = 30000;
+ stream.target_bitrate_bps = kScreenshareCodecTargetBitrateBps;
+ stream.max_bitrate_bps = 1000000;
+ stream.max_framerate = kScreenshareDefaultFramerate;
+ stream.num_temporal_layers = 2;
+ stream.active = true;
+ return stream;
+ }
+
+ MockFecControllerOverride fec_controller_override_;
+
+ // Input settings.
+ VideoEncoderConfig config_;
+ std::vector<VideoStream> streams_;
+
+ // Output.
+ VideoCodec codec_out_;
+ std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
+ std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_;
+};
+
+TEST_F(VideoCodecInitializerTest, SingleStreamVp8Screenshare) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 1, true);
+ streams_.push_back(DefaultStream());
+ EXPECT_TRUE(InitializeCodec());
+
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kDefaultTargetBitrateBps, kDefaultFrameRate));
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ EXPECT_EQ(kDefaultTargetBitrateBps, bitrate_allocation.get_sum_bps());
+}
+
+TEST_F(VideoCodecInitializerTest, SingleStreamVp8ScreenshareInactive) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 1, true);
+ VideoStream inactive_stream = DefaultStream();
+ inactive_stream.active = false;
+ streams_.push_back(inactive_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kDefaultTargetBitrateBps, kDefaultFrameRate));
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ EXPECT_EQ(0U, bitrate_allocation.get_sum_bps());
+}
+
+TEST_F(VideoCodecInitializerTest, TemporalLayeredVp8ScreenshareConference) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 2, true);
+ streams_.push_back(DefaultScreenshareStream());
+ EXPECT_TRUE(InitializeCodec());
+ bitrate_allocator_->SetLegacyConferenceMode(true);
+
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(2u, codec_out_.VP8()->numberOfTemporalLayers);
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kScreenshareCodecTargetBitrateBps, kScreenshareDefaultFramerate));
+ EXPECT_EQ(kScreenshareCodecTargetBitrateBps,
+ bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(kScreenshareConferenceTl0BitrateBps,
+ bitrate_allocation.GetBitrate(0, 0));
+}
+
+TEST_F(VideoCodecInitializerTest, TemporalLayeredVp8Screenshare) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 2, true);
+ streams_.push_back(DefaultScreenshareStream());
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(2u, codec_out_.VP8()->numberOfTemporalLayers);
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kScreenshareCodecTargetBitrateBps, kScreenshareDefaultFramerate));
+ EXPECT_EQ(kScreenshareCodecTargetBitrateBps,
+ bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(kScreenshareTl0BitrateBps, bitrate_allocation.GetBitrate(0, 0));
+}
+
+TEST_F(VideoCodecInitializerTest, SimulcastVp8Screenshare) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 1, true);
+ streams_.push_back(DefaultScreenshareStream());
+ VideoStream video_stream = DefaultStream();
+ video_stream.max_framerate = kScreenshareDefaultFramerate;
+ streams_.push_back(video_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(2u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ const uint32_t max_bitrate_bps =
+ streams_[0].target_bitrate_bps + streams_[1].max_bitrate_bps;
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ max_bitrate_bps, kScreenshareDefaultFramerate));
+ EXPECT_EQ(max_bitrate_bps, bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].target_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(0));
+ EXPECT_EQ(static_cast<uint32_t>(streams_[1].max_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(1));
+}
+
+// Tests that when a video stream is inactive, then the bitrate allocation will
+// be 0 for that stream.
+TEST_F(VideoCodecInitializerTest, SimulcastVp8ScreenshareInactive) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 1, true);
+ streams_.push_back(DefaultScreenshareStream());
+ VideoStream inactive_video_stream = DefaultStream();
+ inactive_video_stream.active = false;
+ inactive_video_stream.max_framerate = kScreenshareDefaultFramerate;
+ streams_.push_back(inactive_video_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(2u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ const uint32_t target_bitrate =
+ streams_[0].target_bitrate_bps + streams_[1].target_bitrate_bps;
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_bitrate, kScreenshareDefaultFramerate));
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].max_bitrate_bps),
+ bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].max_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(0));
+ EXPECT_EQ(0U, bitrate_allocation.GetSpatialLayerSum(1));
+}
+
+TEST_F(VideoCodecInitializerTest, HighFpsSimulcastVp8Screenshare) {
+ // Two simulcast streams, the lower one using legacy settings (two temporal
+ // streams, 5fps), the higher one using 3 temporal streams and 30fps.
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 3, true);
+ streams_.push_back(DefaultScreenshareStream());
+ VideoStream video_stream = DefaultStream();
+ video_stream.num_temporal_layers = 3;
+ streams_.push_back(video_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(2u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(3u, codec_out_.VP8()->numberOfTemporalLayers);
+ const uint32_t max_bitrate_bps =
+ streams_[0].target_bitrate_bps + streams_[1].max_bitrate_bps;
+ VideoBitrateAllocation bitrate_allocation = bitrate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(max_bitrate_bps, kDefaultFrameRate));
+ EXPECT_EQ(max_bitrate_bps, bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].target_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(0));
+ EXPECT_EQ(static_cast<uint32_t>(streams_[1].max_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(1));
+ EXPECT_EQ(kHighScreenshareTl0Bps, bitrate_allocation.GetBitrate(1, 0));
+ EXPECT_EQ(kHighScreenshareTl1Bps - kHighScreenshareTl0Bps,
+ bitrate_allocation.GetBitrate(1, 1));
+}
+
+TEST_F(VideoCodecInitializerTest, SingleStreamMultiplexCodec) {
+ SetUpFor(VideoCodecType::kVideoCodecMultiplex, 1, 1, true);
+ streams_.push_back(DefaultStream());
+ EXPECT_TRUE(InitializeCodec());
+}
+
+TEST_F(VideoCodecInitializerTest, Vp9SvcDefaultLayering) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 3;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3u);
+ EXPECT_EQ(codec_out_.VP9()->numberOfTemporalLayers, 3u);
+}
+
+TEST_F(VideoCodecInitializerTest, Vp9SvcAdjustedLayering) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 3;
+ // Set resolution which is only enough to produce 2 spatial layers.
+ stream.width = kMinVp9SpatialLayerLongSideLength * 2;
+ stream.height = kMinVp9SpatialLayerShortSideLength * 2;
+
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2u);
+}
+
+TEST_F(VideoCodecInitializerTest,
+ Vp9SingleSpatialLayerMaxBitrateIsEqualToCodecMaxBitrate) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 1, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 3;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.spatialLayers[0].maxBitrate,
+ kDefaultMaxBitrateBps / 1000);
+}
+
+TEST_F(VideoCodecInitializerTest,
+ Vp9SingleSpatialLayerTargetBitrateIsEqualToCodecMaxBitrate) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 1, 1, true);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 1;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.spatialLayers[0].targetBitrate,
+ kDefaultMaxBitrateBps / 1000);
+}
+
+TEST_F(VideoCodecInitializerTest,
+ Vp9KeepBitrateLimitsIfNumberOfSpatialLayersIsReducedToOne) {
+ // Request 3 spatial layers for 320x180 input. Actual number of layers will be
+ // reduced to 1 due to low input resolution but SVC bitrate limits should be
+ // applied.
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.width = 320;
+ stream.height = 180;
+ stream.num_temporal_layers = 3;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_LT(codec_out_.spatialLayers[0].maxBitrate,
+ kDefaultMaxBitrateBps / 1000);
+}
+
+TEST_F(VideoCodecInitializerTest, Vp9DeactivateLayers) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 1, false);
+ VideoStream stream = DefaultStream();
+ streams_.push_back(stream);
+
+ config_.simulcast_layers.resize(3);
+
+ // Activate all layers.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[1].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[2].active);
+
+ // Deactivate top layer.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = false;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[1].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[2].active);
+
+ // Deactivate middle layer.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = false;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[1].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[2].active);
+
+ // Deactivate first layer.
+ config_.simulcast_layers[0].active = false;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[1].active);
+
+ // HD singlecast.
+ config_.simulcast_layers[0].active = false;
+ config_.simulcast_layers[1].active = false;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 1);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+
+ // VGA singlecast.
+ config_.simulcast_layers[0].active = false;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = false;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[1].active);
+
+ // QVGA singlecast.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = false;
+ config_.simulcast_layers[2].active = false;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[1].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[2].active);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1SingleSpatialLayerBitratesAreConsistent) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL1T2;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_GE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].minBitrate);
+ EXPECT_LE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].maxBitrate);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersBitratesAreConsistent) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T2;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_GE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].minBitrate);
+ EXPECT_LE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].maxBitrate);
+
+ EXPECT_GE(codec.spatialLayers[1].targetBitrate,
+ codec.spatialLayers[1].minBitrate);
+ EXPECT_LE(codec.spatialLayers[1].targetBitrate,
+ codec.spatialLayers[1].maxBitrate);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersActiveByDefault) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T2;
+ config.spatial_layers = {};
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_TRUE(codec.spatialLayers[0].active);
+ EXPECT_TRUE(codec.spatialLayers[1].active);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersOneDeactivated) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T2;
+ config.spatial_layers.resize(2);
+ config.spatial_layers[0].active = true;
+ config.spatial_layers[1].active = false;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_TRUE(codec.spatialLayers[0].active);
+ EXPECT_FALSE(codec.spatialLayers[1].active);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
new file mode 100644
index 0000000000..da29622b17
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc",
+ "/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_codec_interface_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc b/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc
new file mode 100644
index 0000000000..424b23f971
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_coding_defines.h"
+
+namespace webrtc {
+
+void VCMReceiveCallback::OnDroppedFrames(uint32_t frames_dropped) {}
+void VCMReceiveCallback::OnIncomingPayloadType(int payload_type) {}
+void VCMReceiveCallback::OnDecoderImplementationName(
+ const char* implementation_name) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build
new file mode 100644
index 0000000000..f04ffa380a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build
@@ -0,0 +1,230 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/decoder_database.cc",
+ "/third_party/libwebrtc/modules/video_coding/event_wrapper.cc",
+ "/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc",
+ "/third_party/libwebrtc/modules/video_coding/frame_object.cc",
+ "/third_party/libwebrtc/modules/video_coding/generic_decoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc",
+ "/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc",
+ "/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc",
+ "/third_party/libwebrtc/modules/video_coding/media_opt_util.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc",
+ "/third_party/libwebrtc/modules/video_coding/video_receiver2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_coding_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_impl.cc b/third_party/libwebrtc/modules/video_coding/video_coding_impl.cc
new file mode 100644
index 0000000000..e0ad033d7c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_impl.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/video_coding_impl.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/encoded_image.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/memory/always_valid_pointer.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace vcm {
+
+int64_t VCMProcessTimer::Period() const {
+ return _periodMs;
+}
+
+int64_t VCMProcessTimer::TimeUntilProcess() const {
+ const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
+ const int64_t time_until_process = _periodMs - time_since_process;
+ return std::max<int64_t>(time_until_process, 0);
+}
+
+void VCMProcessTimer::Processed() {
+ _latestMs = _clock->TimeInMilliseconds();
+}
+} // namespace vcm
+
+namespace {
+
+class VideoCodingModuleImpl : public VideoCodingModule {
+ public:
+ explicit VideoCodingModuleImpl(Clock* clock,
+ const FieldTrialsView* field_trials)
+ : VideoCodingModule(),
+ field_trials_(field_trials),
+ timing_(new VCMTiming(clock, *field_trials_)),
+ receiver_(clock, timing_.get(), *field_trials_) {}
+
+ ~VideoCodingModuleImpl() override = default;
+
+ void Process() override { receiver_.Process(); }
+
+ void RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& decoder_settings) override {
+ receiver_.RegisterReceiveCodec(payload_type, decoder_settings);
+ }
+
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) override {
+ receiver_.RegisterExternalDecoder(externalDecoder, payloadType);
+ }
+
+ int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) override {
+ RTC_DCHECK(construction_thread_.IsCurrent());
+ return receiver_.RegisterReceiveCallback(receiveCallback);
+ }
+
+ int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) override {
+ return receiver_.RegisterFrameTypeCallback(frameTypeCallback);
+ }
+
+ int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) override {
+ RTC_DCHECK(construction_thread_.IsCurrent());
+ return receiver_.RegisterPacketRequestCallback(callback);
+ }
+
+ int32_t Decode(uint16_t maxWaitTimeMs) override {
+ return receiver_.Decode(maxWaitTimeMs);
+ }
+
+ int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header) override {
+ return receiver_.IncomingPacket(incomingPayload, payloadLength, rtp_header,
+ video_header);
+ }
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) override {
+ return receiver_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
+ }
+
+ private:
+ AlwaysValidPointer<const FieldTrialsView, FieldTrialBasedConfig>
+ field_trials_;
+ SequenceChecker construction_thread_;
+ const std::unique_ptr<VCMTiming> timing_;
+ vcm::VideoReceiver receiver_;
+};
+} // namespace
+
+// DEPRECATED. Create method for current interface, will be removed when the
+// new jitter buffer is in place.
+VideoCodingModule* VideoCodingModule::Create(
+ Clock* clock,
+ const FieldTrialsView* field_trials) {
+ RTC_DCHECK(clock);
+ return new VideoCodingModuleImpl(clock, field_trials);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_impl.h b/third_party/libwebrtc/modules/video_coding/video_coding_impl.h
new file mode 100644
index 0000000000..22237ca78e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_impl.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
+#define MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/jitter_buffer.h"
+#include "modules/video_coding/receiver.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/one_time_event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class VideoBitrateAllocator;
+class VideoBitrateAllocationObserver;
+
+namespace vcm {
+
+class VCMProcessTimer {
+ public:
+ static const int64_t kDefaultProcessIntervalMs = 1000;
+
+ VCMProcessTimer(int64_t periodMs, Clock* clock)
+ : _clock(clock),
+ _periodMs(periodMs),
+ _latestMs(_clock->TimeInMilliseconds()) {}
+ int64_t Period() const;
+ int64_t TimeUntilProcess() const;
+ void Processed();
+
+ private:
+ Clock* _clock;
+ int64_t _periodMs;
+ int64_t _latestMs;
+};
+
+class VideoReceiver {
+ public:
+ VideoReceiver(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials);
+ ~VideoReceiver();
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings);
+
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType);
+ int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
+ int32_t RegisterFrameTypeCallback(VCMFrameTypeCallback* frameTypeCallback);
+ int32_t RegisterPacketRequestCallback(VCMPacketRequestCallback* callback);
+
+ int32_t Decode(uint16_t maxWaitTimeMs);
+
+ int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header);
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+
+ void Process();
+
+ protected:
+ int32_t Decode(const webrtc::VCMEncodedFrame& frame);
+ int32_t RequestKeyFrame();
+
+ private:
+ // Used for DCHECKing thread correctness.
+ // In build where DCHECKs are enabled, will return false before
+ // DecoderThreadStarting is called, then true until DecoderThreadStopped
+ // is called.
+ // In builds where DCHECKs aren't enabled, it will return true.
+ bool IsDecoderThreadRunning();
+
+ SequenceChecker construction_thread_checker_;
+ SequenceChecker decoder_thread_checker_;
+ SequenceChecker module_thread_checker_;
+ Clock* const clock_;
+ Mutex process_mutex_;
+ VCMTiming* _timing;
+ VCMReceiver _receiver;
+ VCMDecodedFrameCallback _decodedFrameCallback;
+
+ // These callbacks are set on the construction thread before being attached
+ // to the module thread or decoding started, so a lock is not required.
+ VCMFrameTypeCallback* _frameTypeCallback;
+ VCMPacketRequestCallback* _packetRequestCallback;
+
+ // Used on both the module and decoder thread.
+ bool _scheduleKeyRequest RTC_GUARDED_BY(process_mutex_);
+ bool drop_frames_until_keyframe_ RTC_GUARDED_BY(process_mutex_);
+
+ // Modified on the construction thread while not attached to the process
+ // thread. Once attached to the process thread, its value is only read
+ // so a lock is not required.
+ size_t max_nack_list_size_;
+
+ // Callbacks are set before the decoder thread starts.
+ // Once the decoder thread has been started, usage of `_codecDataBase` moves
+ // over to the decoder thread.
+ VCMDecoderDataBase _codecDataBase;
+
+ VCMProcessTimer _retransmissionTimer RTC_GUARDED_BY(module_thread_checker_);
+ VCMProcessTimer _keyRequestTimer RTC_GUARDED_BY(module_thread_checker_);
+ ThreadUnsafeOneTimeEvent first_frame_received_
+ RTC_GUARDED_BY(decoder_thread_checker_);
+};
+
+} // namespace vcm
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build
new file mode 100644
index 0000000000..1fb1c34559
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_coding_utility_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver.cc b/third_party/libwebrtc/modules/video_coding/video_receiver.cc
new file mode 100644
index 0000000000..51d74c9981
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver.cc
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "api/rtp_headers.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/jitter_buffer.h"
+#include "modules/video_coding/media_opt_util.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/receiver.h"
+#include "modules/video_coding/timing/timing.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/one_time_event.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace vcm {
+
+VideoReceiver::VideoReceiver(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ _timing(timing),
+ _receiver(_timing, clock_, field_trials),
+ _decodedFrameCallback(_timing, clock_, field_trials),
+ _frameTypeCallback(nullptr),
+ _packetRequestCallback(nullptr),
+ _scheduleKeyRequest(false),
+ drop_frames_until_keyframe_(false),
+ max_nack_list_size_(0),
+ _codecDataBase(),
+ _retransmissionTimer(10, clock_),
+ _keyRequestTimer(500, clock_) {
+ decoder_thread_checker_.Detach();
+ module_thread_checker_.Detach();
+}
+
+VideoReceiver::~VideoReceiver() {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+}
+
+void VideoReceiver::Process() {
+ RTC_DCHECK_RUN_ON(&module_thread_checker_);
+
+ // Key frame requests
+ if (_keyRequestTimer.TimeUntilProcess() == 0) {
+ _keyRequestTimer.Processed();
+ bool request_key_frame = _frameTypeCallback != nullptr;
+ if (request_key_frame) {
+ MutexLock lock(&process_mutex_);
+ request_key_frame = _scheduleKeyRequest;
+ }
+ if (request_key_frame)
+ RequestKeyFrame();
+ }
+
+ // Packet retransmission requests
+ // TODO(holmer): Add API for changing Process interval and make sure it's
+ // disabled when NACK is off.
+ if (_retransmissionTimer.TimeUntilProcess() == 0) {
+ _retransmissionTimer.Processed();
+ bool callback_registered = _packetRequestCallback != nullptr;
+ uint16_t length = max_nack_list_size_;
+ if (callback_registered && length > 0) {
+ // Collect sequence numbers from the default receiver.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nackList = _receiver.NackList(&request_key_frame);
+ int32_t ret = VCM_OK;
+ if (request_key_frame) {
+ ret = RequestKeyFrame();
+ }
+ if (ret == VCM_OK && !nackList.empty()) {
+ MutexLock lock(&process_mutex_);
+ if (_packetRequestCallback != nullptr) {
+ _packetRequestCallback->ResendPackets(&nackList[0], nackList.size());
+ }
+ }
+ }
+ }
+}
+
+// Register a receive callback. Will be called whenever there is a new frame
+// ready for rendering.
+int32_t VideoReceiver::RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ // This value is set before the decoder thread starts and unset after
+ // the decoder thread has been stopped.
+ _decodedFrameCallback.SetUserReceiveCallback(receiveCallback);
+ return VCM_OK;
+}
+
+// Register an externally defined decoder object.
+void VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ if (externalDecoder == nullptr) {
+ RTC_CHECK(_codecDataBase.DeregisterExternalDecoder(payloadType));
+ return;
+ }
+ _codecDataBase.RegisterExternalDecoder(payloadType, externalDecoder);
+}
+
+// Register a frame type request callback.
+int32_t VideoReceiver::RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ // This callback is used on the module thread, but since we don't get
+ // callbacks on the module thread while the decoder thread isn't running
+ // (and this function must not be called when the decoder is running),
+ // we don't need a lock here.
+ _frameTypeCallback = frameTypeCallback;
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ // This callback is used on the module thread, but since we don't get
+ // callbacks on the module thread while the decoder thread isn't running
+ // (and this function must not be called when the decoder is running),
+ // we don't need a lock here.
+ _packetRequestCallback = callback;
+ return VCM_OK;
+}
+
+// Decode next frame, blocking.
+// Should be called as often as possible to get the most out of the decoder.
+int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
+ RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
+ VCMEncodedFrame* frame = _receiver.FrameForDecoding(maxWaitTimeMs, true);
+
+ if (!frame)
+ return VCM_FRAME_NOT_READY;
+
+ bool drop_frame = false;
+ {
+ MutexLock lock(&process_mutex_);
+ if (drop_frames_until_keyframe_) {
+ // Still getting delta frames, schedule another keyframe request as if
+ // decode failed.
+ if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
+ drop_frame = true;
+ _scheduleKeyRequest = true;
+ } else {
+ drop_frames_until_keyframe_ = false;
+ }
+ }
+ }
+
+ if (drop_frame) {
+ _receiver.ReleaseFrame(frame);
+ return VCM_FRAME_NOT_READY;
+ }
+
+ // If this frame was too late, we should adjust the delay accordingly
+ if (frame->RenderTimeMs() > 0)
+ _timing->UpdateCurrentDelay(Timestamp::Millis(frame->RenderTimeMs()),
+ clock_->CurrentTime());
+
+ if (first_frame_received_()) {
+ RTC_LOG(LS_INFO) << "Received first complete decodable video frame";
+ }
+
+ const int32_t ret = Decode(*frame);
+ _receiver.ReleaseFrame(frame);
+ return ret;
+}
+
+int32_t VideoReceiver::RequestKeyFrame() {
+ RTC_DCHECK_RUN_ON(&module_thread_checker_);
+
+ TRACE_EVENT0("webrtc", "RequestKeyFrame");
+ if (_frameTypeCallback != nullptr) {
+ const int32_t ret = _frameTypeCallback->RequestKeyFrame();
+ if (ret < 0) {
+ return ret;
+ }
+ MutexLock lock(&process_mutex_);
+ _scheduleKeyRequest = false;
+ } else {
+ return VCM_MISSING_CALLBACK;
+ }
+ return VCM_OK;
+}
+
+// Must be called from inside the receive side critical section.
+int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
+ RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
+ TRACE_EVENT0("webrtc", "VideoReceiver::Decode");
+ // Change decoder if payload type has changed
+ VCMGenericDecoder* decoder =
+ _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
+ if (decoder == nullptr) {
+ return VCM_NO_CODEC_REGISTERED;
+ }
+ return decoder->Decode(frame, clock_->CurrentTime());
+}
+
+// Register possible receive codecs, can be called multiple times
+void VideoReceiver::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ _codecDataBase.RegisterReceiveCodec(payload_type, settings);
+}
+
+// Incoming packet from network parsed and ready for decode, non blocking.
+int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header) {
+ RTC_DCHECK_RUN_ON(&module_thread_checker_);
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
+ rtp_header.sequenceNumber);
+ }
+ if (incomingPayload == nullptr) {
+ // The jitter buffer doesn't handle non-zero payload lengths for packets
+ // without payload.
+ // TODO(holmer): We should fix this in the jitter buffer.
+ payloadLength = 0;
+ }
+ // Callers don't provide any ntp time.
+ const VCMPacket packet(incomingPayload, payloadLength, rtp_header,
+ video_header, /*ntp_time_ms=*/0,
+ clock_->CurrentTime());
+ int32_t ret = _receiver.InsertPacket(packet);
+
+ // TODO(holmer): Investigate if this somehow should use the key frame
+ // request scheduling to throttle the requests.
+ if (ret == VCM_FLUSH_INDICATOR) {
+ {
+ MutexLock lock(&process_mutex_);
+ drop_frames_until_keyframe_ = true;
+ }
+ RequestKeyFrame();
+ } else if (ret < 0) {
+ return ret;
+ }
+ return VCM_OK;
+}
+
+void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ if (max_nack_list_size != 0) {
+ max_nack_list_size_ = max_nack_list_size;
+ }
+ _receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
+}
+
+} // namespace vcm
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver2.cc b/third_party/libwebrtc/modules/video_coding/video_receiver2.cc
new file mode 100644
index 0000000000..2e100209e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver2.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/video_receiver2.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+VideoReceiver2::VideoReceiver2(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ timing_(timing),
+ decodedFrameCallback_(timing_, clock_, field_trials),
+ codecDataBase_() {
+ decoder_sequence_checker_.Detach();
+}
+
+VideoReceiver2::~VideoReceiver2() {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+}
+
+// Register a receive callback. Will be called whenever there is a new frame
+// ready for rendering.
+int32_t VideoReceiver2::RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ RTC_DCHECK(!IsDecoderThreadRunning());
+ // This value is set before the decoder thread starts and unset after
+ // the decoder thread has been stopped.
+ decodedFrameCallback_.SetUserReceiveCallback(receiveCallback);
+ return VCM_OK;
+}
+
+// Register an externally defined decoder object. This may be called on either
+// the construction sequence or the decoder sequence to allow for lazy creation
+// of video decoders. If called on the decoder sequence `externalDecoder` cannot
+// be a nullptr. It's the responsibility of the caller to make sure that the
+// access from the two sequences are mutually exclusive.
+void VideoReceiver2::RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) {
+ if (IsDecoderThreadRunning()) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ // Don't allow deregistering decoders on the decoder thread.
+ RTC_DCHECK(externalDecoder != nullptr);
+ } else {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ }
+
+ if (externalDecoder == nullptr) {
+ codecDataBase_.DeregisterExternalDecoder(payloadType);
+ return;
+ }
+ codecDataBase_.RegisterExternalDecoder(payloadType, externalDecoder);
+}
+
+bool VideoReceiver2::IsExternalDecoderRegistered(uint8_t payloadType) const {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ return codecDataBase_.IsExternalDecoderRegistered(payloadType);
+}
+
+void VideoReceiver2::DecoderThreadStarting() {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ RTC_DCHECK(!IsDecoderThreadRunning());
+#if RTC_DCHECK_IS_ON
+ decoder_thread_is_running_ = true;
+#endif
+}
+
+void VideoReceiver2::DecoderThreadStopped() {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ RTC_DCHECK(IsDecoderThreadRunning());
+#if RTC_DCHECK_IS_ON
+ decoder_thread_is_running_ = false;
+ decoder_sequence_checker_.Detach();
+#endif
+}
+
+// Must be called from inside the receive side critical section.
+int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ TRACE_EVENT0("webrtc", "VideoReceiver2::Decode");
+ // Change decoder if payload type has changed
+ VCMGenericDecoder* decoder =
+ codecDataBase_.GetDecoder(*frame, &decodedFrameCallback_);
+ if (decoder == nullptr) {
+ return VCM_NO_CODEC_REGISTERED;
+ }
+ return decoder->Decode(*frame, clock_->CurrentTime());
+}
+
+// Register possible receive codecs, can be called multiple times
+void VideoReceiver2::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ RTC_DCHECK(!IsDecoderThreadRunning());
+ codecDataBase_.RegisterReceiveCodec(payload_type, settings);
+}
+
+bool VideoReceiver2::IsDecoderThreadRunning() {
+#if RTC_DCHECK_IS_ON
+ return decoder_thread_is_running_;
+#else
+ return true;
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver2.h b/third_party/libwebrtc/modules/video_coding/video_receiver2.h
new file mode 100644
index 0000000000..c7db2fe4e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver2.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_
+#define MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/timing/timing.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+// This class is a copy of vcm::VideoReceiver, trimmed down to what's used by
+// VideoReceive stream, with the aim to incrementally trim it down further and
+// ultimately delete it. It's difficult to do this incrementally with the
+// original VideoReceiver class, since it is used by the legacy
+// VideoCodingModule api.
+class VideoReceiver2 {
+ public:
+ VideoReceiver2(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials);
+ ~VideoReceiver2();
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& decoder_settings);
+
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType);
+ bool IsExternalDecoderRegistered(uint8_t payloadType) const;
+ int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
+
+ int32_t Decode(const webrtc::VCMEncodedFrame* frame);
+
+ // Notification methods that are used to check our internal state and validate
+ // threading assumptions. These are called by VideoReceiveStreamInterface.
+ // See `IsDecoderThreadRunning()` for more details.
+ void DecoderThreadStarting();
+ void DecoderThreadStopped();
+
+ private:
+ // Used for DCHECKing thread correctness.
+ // In build where DCHECKs are enabled, will return false before
+ // DecoderThreadStarting is called, then true until DecoderThreadStopped
+ // is called.
+ // In builds where DCHECKs aren't enabled, it will return true.
+ bool IsDecoderThreadRunning();
+
+ SequenceChecker construction_sequence_checker_;
+ SequenceChecker decoder_sequence_checker_;
+ Clock* const clock_;
+ VCMTiming* timing_;
+ VCMDecodedFrameCallback decodedFrameCallback_;
+
+ // Callbacks are set before the decoder thread starts.
+ // Once the decoder thread has been started, usage of `_codecDataBase` moves
+ // over to the decoder thread.
+ VCMDecoderDataBase codecDataBase_;
+
+#if RTC_DCHECK_IS_ON
+ bool decoder_thread_is_running_ = false;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc
new file mode 100644
index 0000000000..f2ebce8ec2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/test/mock_video_decoder.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/timing/timing.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::NiceMock;
+
+namespace webrtc {
+namespace vcm {
+namespace {
+
+class MockPacketRequestCallback : public VCMPacketRequestCallback {
+ public:
+ MOCK_METHOD(int32_t,
+ ResendPackets,
+ (const uint16_t* sequenceNumbers, uint16_t length),
+ (override));
+};
+
+class MockVCMReceiveCallback : public VCMReceiveCallback {
+ public:
+ MockVCMReceiveCallback() {}
+ virtual ~MockVCMReceiveCallback() {}
+
+ MOCK_METHOD(
+ int32_t,
+ FrameToRender,
+ (VideoFrame&, absl::optional<uint8_t>, TimeDelta, VideoContentType),
+ (override));
+ MOCK_METHOD(void, OnIncomingPayloadType, (int), (override));
+ MOCK_METHOD(void, OnDecoderImplementationName, (const char*), (override));
+};
+
+class TestVideoReceiver : public ::testing::Test {
+ protected:
+ static const int kUnusedPayloadType = 10;
+ static const uint16_t kMaxWaitTimeMs = 100;
+
+ TestVideoReceiver()
+ : clock_(0),
+ timing_(&clock_, field_trials_),
+ receiver_(&clock_, &timing_, field_trials_) {}
+
+ virtual void SetUp() {
+ // Register decoder.
+ receiver_.RegisterExternalDecoder(&decoder_, kUnusedPayloadType);
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(kVideoCodecVP8);
+ receiver_.RegisterReceiveCodec(kUnusedPayloadType, settings);
+
+ // Set protection mode.
+ const size_t kMaxNackListSize = 250;
+ const int kMaxPacketAgeToNack = 450;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
+ EXPECT_EQ(
+ 0, receiver_.RegisterPacketRequestCallback(&packet_request_callback_));
+
+ // Since we call Decode, we need to provide a valid receive callback.
+ // However, for the purposes of these tests, we ignore the callbacks.
+ EXPECT_CALL(receive_callback_, OnIncomingPayloadType(_)).Times(AnyNumber());
+ EXPECT_CALL(receive_callback_, OnDecoderImplementationName(_))
+ .Times(AnyNumber());
+ receiver_.RegisterReceiveCallback(&receive_callback_);
+ }
+
+ RTPHeader GetDefaultRTPHeader() const {
+ RTPHeader header;
+ header.markerBit = false;
+ header.payloadType = kUnusedPayloadType;
+ header.ssrc = 1;
+ header.headerLength = 12;
+ return header;
+ }
+
+ RTPVideoHeader GetDefaultVp8Header() const {
+ RTPVideoHeader video_header = {};
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ video_header.codec = kVideoCodecVP8;
+ return video_header;
+ }
+
+ void InsertAndVerifyPaddingFrame(const uint8_t* payload,
+ RTPHeader* header,
+ const RTPVideoHeader& video_header) {
+ for (int j = 0; j < 5; ++j) {
+ // Padding only packets are passed to the VCM with payload size 0.
+ EXPECT_EQ(0, receiver_.IncomingPacket(payload, 0, *header, video_header));
+ ++header->sequenceNumber;
+ }
+ receiver_.Process();
+ EXPECT_CALL(decoder_, Decode(_, _, _)).Times(0);
+ EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_.Decode(kMaxWaitTimeMs));
+ }
+
+ void InsertAndVerifyDecodableFrame(const uint8_t* payload,
+ size_t length,
+ RTPHeader* header,
+ const RTPVideoHeader& video_header) {
+ EXPECT_EQ(0,
+ receiver_.IncomingPacket(payload, length, *header, video_header));
+ ++header->sequenceNumber;
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+
+ receiver_.Process();
+ EXPECT_CALL(decoder_, Decode(_, _, _)).Times(1);
+ EXPECT_EQ(0, receiver_.Decode(kMaxWaitTimeMs));
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClock clock_;
+ NiceMock<MockVideoDecoder> decoder_;
+ NiceMock<MockPacketRequestCallback> packet_request_callback_;
+ VCMTiming timing_;
+ MockVCMReceiveCallback receive_callback_;
+ VideoReceiver receiver_;
+};
+
+TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
+ const size_t kPaddingSize = 220;
+ const uint8_t kPayload[kPaddingSize] = {0};
+ RTPHeader header = GetDefaultRTPHeader();
+ RTPVideoHeader video_header = GetDefaultVp8Header();
+ header.paddingLength = kPaddingSize;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ InsertAndVerifyPaddingFrame(kPayload, &header, video_header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+}
+
+TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
+ const uint8_t kPayload[kFrameSize] = {0};
+ RTPHeader header = GetDefaultRTPHeader();
+ RTPVideoHeader video_header = GetDefaultVp8Header();
+ header.paddingLength = kPaddingSize;
+ video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+
+ // Insert one video frame to get one frame decoded.
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ video_header.is_first_packet_in_frame = true;
+ header.markerBit = true;
+ InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header, video_header);
+
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ video_header.is_first_packet_in_frame = false;
+ header.markerBit = false;
+ // Insert padding frames.
+ for (int i = 0; i < 10; ++i) {
+ // Lose one packet from the 6th frame.
+ if (i == 5) {
+ ++header.sequenceNumber;
+ }
+ // Lose the 4th frame.
+ if (i == 3) {
+ header.sequenceNumber += 5;
+ } else {
+ if (i > 3 && i < 5) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, 5)).Times(1);
+ } else if (i >= 5) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, 6)).Times(1);
+ } else {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ }
+ InsertAndVerifyPaddingFrame(kPayload, &header, video_header);
+ }
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+}
+
+TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
+ const uint8_t kPayload[kFrameSize] = {0};
+ RTPHeader header = GetDefaultRTPHeader();
+ RTPVideoHeader video_header = GetDefaultVp8Header();
+ video_header.is_first_packet_in_frame = false;
+ header.paddingLength = kPaddingSize;
+ auto& vp8_header =
+ video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.pictureId = -1;
+ vp8_header.tl0PicIdx = -1;
+
+ for (int i = 0; i < 3; ++i) {
+ // Insert 2 video frames.
+ for (int j = 0; j < 2; ++j) {
+ if (i == 0 && j == 0) // First frame should be a key frame.
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ else
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ video_header.is_first_packet_in_frame = true;
+ header.markerBit = true;
+ InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header,
+ video_header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+
+ // Insert 2 padding only frames.
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ video_header.is_first_packet_in_frame = false;
+ header.markerBit = false;
+ for (int j = 0; j < 2; ++j) {
+ // InsertAndVerifyPaddingFrame(kPayload, &header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+ }
+}
+
+} // namespace
+} // namespace vcm
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_h264_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_h264_gn/moz.build
new file mode 100644
index 0000000000..68f59decaa
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_h264_gn/moz.build
@@ -0,0 +1,219 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_h264_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build
new file mode 100644
index 0000000000..6f360d915d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_libvpx_interface_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_multiplex_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_multiplex_gn/moz.build
new file mode 100644
index 0000000000..2b1679f941
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_multiplex_gn/moz.build
@@ -0,0 +1,217 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_multiplex_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build
new file mode 100644
index 0000000000..d4a85cbfc7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build
@@ -0,0 +1,217 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp8_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build
new file mode 100644
index 0000000000..1e8317f46e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build
@@ -0,0 +1,201 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp8_scalability_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build
new file mode 100644
index 0000000000..2e8bb42e84
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build
@@ -0,0 +1,219 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp8_temporal_layers_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build
new file mode 100644
index 0000000000..e3f0d1774e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build
@@ -0,0 +1,219 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp9_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build
new file mode 100644
index 0000000000..c73282c2ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build
@@ -0,0 +1,214 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp9_helpers_gn")
diff --git a/third_party/libwebrtc/modules/video_processing/BUILD.gn b/third_party/libwebrtc/modules/video_processing/BUILD.gn
new file mode 100644
index 0000000000..4b25b365f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/BUILD.gn
@@ -0,0 +1,112 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+build_video_processing_sse2 = target_cpu == "x86" || target_cpu == "x64"
+
+rtc_library("video_processing") {
+ visibility = [ "*" ]
+ sources = [
+ "util/denoiser_filter.cc",
+ "util/denoiser_filter_c.cc",
+ "util/denoiser_filter_c.h",
+ "util/noise_estimation.cc",
+ "util/noise_estimation.h",
+ "util/skin_detection.cc",
+ "util/skin_detection.h",
+ "video_denoiser.cc",
+ "video_denoiser.h",
+ ]
+
+ deps = [
+ ":denoiser_filter",
+ "../../api:scoped_refptr",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../common_audio",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base/system:arch",
+ "../../system_wrappers",
+ "//third_party/libyuv",
+ ]
+ if (build_with_mozilla) {
+ deps -= [ "//third_party/libyuv" ]
+ include_dirs = [
+ "/media/libyuv",
+ "/media/libyuv/libyuv/include",
+ ]
+ }
+ if (build_video_processing_sse2) {
+ deps += [ ":video_processing_sse2" ]
+ }
+ if (rtc_build_with_neon) {
+ deps += [ ":video_processing_neon" ]
+ }
+}
+
+rtc_source_set("denoiser_filter") {
+ # Target that only exists to avoid cyclic depdency errors for the SSE2 and
+ # Neon implementations below.
+ sources = [ "util/denoiser_filter.h" ]
+}
+
+if (build_video_processing_sse2) {
+ rtc_library("video_processing_sse2") {
+ sources = [
+ "util/denoiser_filter_sse2.cc",
+ "util/denoiser_filter_sse2.h",
+ ]
+
+ deps = [
+ ":denoiser_filter",
+ "../../system_wrappers",
+ ]
+
+ if (is_posix || is_fuchsia) {
+ cflags = [ "-msse2" ]
+ }
+ }
+}
+
+if (rtc_build_with_neon) {
+ rtc_library("video_processing_neon") {
+ sources = [
+ "util/denoiser_filter_neon.cc",
+ "util/denoiser_filter_neon.h",
+ ]
+
+ deps = [ ":denoiser_filter" ]
+
+ if (target_cpu != "arm64") {
+ suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+ }
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_processing_unittests") {
+ testonly = true
+
+ sources = [ "test/denoiser_test.cc" ]
+ deps = [
+ ":denoiser_filter",
+ ":video_processing",
+ "../../api:scoped_refptr",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../common_video",
+ "../../test:fileutils",
+ "../../test:frame_utils",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_processing/DEPS b/third_party/libwebrtc/modules/video_processing/DEPS
new file mode 100644
index 0000000000..f034c67ff8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+common_audio",
+ "+common_video",
+ "+system_wrappers",
+ "+third_party/libyuv",
+]
diff --git a/third_party/libwebrtc/modules/video_processing/OWNERS b/third_party/libwebrtc/modules/video_processing/OWNERS
new file mode 100644
index 0000000000..07c2987707
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/OWNERS
@@ -0,0 +1,2 @@
+stefan@webrtc.org
+marpan@webrtc.org
diff --git a/third_party/libwebrtc/modules/video_processing/denoiser_filter_gn/moz.build b/third_party/libwebrtc/modules/video_processing/denoiser_filter_gn/moz.build
new file mode 100644
index 0000000000..0aaecfdf51
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/denoiser_filter_gn/moz.build
@@ -0,0 +1,189 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("denoiser_filter_gn")
diff --git a/third_party/libwebrtc/modules/video_processing/test/denoiser_test.cc b/third_party/libwebrtc/modules/video_processing/test/denoiser_test.cc
new file mode 100644
index 0000000000..569b0f66a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/test/denoiser_test.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <memory>
+#include <string>
+
+#include "api/scoped_refptr.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "modules/video_processing/util/denoiser_filter.h"
+#include "modules/video_processing/util/skin_detection.h"
+#include "modules/video_processing/video_denoiser.h"
+#include "test/frame_utils.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+TEST(VideoDenoiserTest, Variance) {
+ std::unique_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false, nullptr));
+ std::unique_ptr<DenoiserFilter> df_sse_neon(
+ DenoiserFilter::Create(true, nullptr));
+ uint8_t src[16 * 16], dst[16 * 16];
+ uint32_t sum = 0, sse = 0, var;
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+ // Compute the 16x8 variance of the 16x16 block.
+ for (int i = 0; i < 8; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ sum += (i * 32 + j);
+ sse += (i * 32 + j) * (i * 32 + j);
+ }
+ }
+ var = sse - ((sum * sum) >> 7);
+ memset(dst, 0, 16 * 16);
+ EXPECT_EQ(var, df_c->Variance16x8(src, 16, dst, 16, &sse));
+ EXPECT_EQ(var, df_sse_neon->Variance16x8(src, 16, dst, 16, &sse));
+}
+
+TEST(VideoDenoiserTest, MbDenoise) {
+ std::unique_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false, nullptr));
+ std::unique_ptr<DenoiserFilter> df_sse_neon(
+ DenoiserFilter::Create(true, nullptr));
+ uint8_t running_src[16 * 16], src[16 * 16];
+ uint8_t dst[16 * 16], dst_sse_neon[16 * 16];
+
+ // Test case: `diff` <= |3 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 2;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ memset(dst_sse_neon, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst_sse_neon, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_sse_neon, 16 * 16));
+
+ // Test case: `diff` >= |4 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 5;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ memset(dst_sse_neon, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst_sse_neon, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_sse_neon, 16 * 16));
+
+ // Test case: `diff` >= 8
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 8;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ memset(dst_sse_neon, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst_sse_neon, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_sse_neon, 16 * 16));
+
+ // Test case: `diff` > 15
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 16;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ DenoiserDecision decision =
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+ decision = df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+}
+
+TEST(VideoDenoiserTest, Denoiser) {
+ const int kWidth = 352;
+ const int kHeight = 288;
+
+ const std::string video_file =
+ webrtc::test::ResourcePath("foreman_cif", "yuv");
+ FILE* source_file = fopen(video_file.c_str(), "rb");
+ ASSERT_TRUE(source_file != nullptr)
+ << "Cannot open source file: " << video_file;
+
+ // Create pure C denoiser.
+ VideoDenoiser denoiser_c(false);
+ // Create SSE or NEON denoiser.
+ VideoDenoiser denoiser_sse_neon(true);
+
+ for (;;) {
+ rtc::scoped_refptr<I420BufferInterface> video_frame_buffer(
+ test::ReadI420Buffer(kWidth, kHeight, source_file));
+ if (!video_frame_buffer)
+ break;
+
+ rtc::scoped_refptr<I420BufferInterface> denoised_frame_c(
+ denoiser_c.DenoiseFrame(video_frame_buffer, false));
+ rtc::scoped_refptr<I420BufferInterface> denoised_frame_sse_neon(
+ denoiser_sse_neon.DenoiseFrame(video_frame_buffer, false));
+
+ // Denoising results should be the same for C and SSE/NEON denoiser.
+ ASSERT_TRUE(
+ test::FrameBufsEqual(denoised_frame_c, denoised_frame_sse_neon));
+ }
+ ASSERT_NE(0, feof(source_file)) << "Error reading source file";
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.cc b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.cc
new file mode 100644
index 0000000000..0e1570114a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/util/denoiser_filter.h"
+
+#include "modules/video_processing/util/denoiser_filter_c.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include "modules/video_processing/util/denoiser_filter_sse2.h"
+#elif defined(WEBRTC_HAS_NEON)
+#include "modules/video_processing/util/denoiser_filter_neon.h"
+#endif
+
+namespace webrtc {
+
+const int kMotionMagnitudeThreshold = 8 * 3;
+const int kSumDiffThreshold = 96;
+const int kSumDiffThresholdHigh = 448;
+
+std::unique_ptr<DenoiserFilter> DenoiserFilter::Create(
+ bool runtime_cpu_detection,
+ CpuType* cpu_type) {
+ std::unique_ptr<DenoiserFilter> filter;
+
+ if (cpu_type != nullptr)
+ *cpu_type = CPU_NOT_NEON;
+ if (runtime_cpu_detection) {
+// If we know the minimum architecture at compile time, avoid CPU detection.
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#if defined(__SSE2__)
+ filter.reset(new DenoiserFilterSSE2());
+#else
+ // x86 CPU detection required.
+ if (GetCPUInfo(kSSE2)) {
+ filter.reset(new DenoiserFilterSSE2());
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+#endif
+#elif defined(WEBRTC_HAS_NEON)
+ filter.reset(new DenoiserFilterNEON());
+ if (cpu_type != nullptr)
+ *cpu_type = CPU_NEON;
+#else
+ filter.reset(new DenoiserFilterC());
+#endif
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+
+ RTC_DCHECK(filter.get() != nullptr);
+ return filter;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.h b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.h
new file mode 100644
index 0000000000..1d574f4a4f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
+#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+namespace webrtc {
+
+extern const int kMotionMagnitudeThreshold;
+extern const int kSumDiffThreshold;
+extern const int kSumDiffThresholdHigh;
+
+enum DenoiserDecision { COPY_BLOCK, FILTER_BLOCK };
+enum CpuType { CPU_NEON, CPU_NOT_NEON };
+
+class DenoiserFilter {
+ public:
+ static std::unique_ptr<DenoiserFilter> Create(bool runtime_cpu_detection,
+ CpuType* cpu_type);
+
+ virtual ~DenoiserFilter() {}
+ virtual uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) = 0;
+ virtual DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.cc b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.cc
new file mode 100644
index 0000000000..55c0ea7b35
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/util/denoiser_filter_c.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace webrtc {
+
+uint32_t DenoiserFilterC::Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ uint32_t* sse) {
+ int sum = 0;
+ *sse = 0;
+ a_stride <<= 1;
+ b_stride <<= 1;
+
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 16; j++) {
+ const int diff = a[j] - b[j];
+ sum += diff;
+ *sse += diff * diff;
+ }
+
+ a += a_stride;
+ b += b_stride;
+ }
+ return *sse - ((static_cast<int64_t>(sum) * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterC::MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ int sum_diff_thresh = 0;
+ int sum_diff = 0;
+ int adj_val[3] = {3, 4, 6};
+ int shift_inc1 = 0;
+ int shift_inc2 = 1;
+ int col_sum[16] = {0};
+ if (motion_magnitude <= kMotionMagnitudeThreshold) {
+ if (increase_denoising) {
+ shift_inc1 = 1;
+ shift_inc2 = 2;
+ }
+ adj_val[0] += shift_inc2;
+ adj_val[1] += shift_inc2;
+ adj_val[2] += shift_inc2;
+ }
+
+ for (int r = 0; r < 16; ++r) {
+ for (int c = 0; c < 16; ++c) {
+ int diff = 0;
+ int adjustment = 0;
+ int absdiff = 0;
+
+ diff = mc_running_avg_y[c] - sig[c];
+ absdiff = abs(diff);
+
+ // When `diff` <= |3 + shift_inc1|, use pixel value from
+ // last denoised raw.
+ if (absdiff <= 3 + shift_inc1) {
+ running_avg_y[c] = mc_running_avg_y[c];
+ col_sum[c] += diff;
+ } else {
+ if (absdiff >= 4 + shift_inc1 && absdiff <= 7)
+ adjustment = adj_val[0];
+ else if (absdiff >= 8 && absdiff <= 15)
+ adjustment = adj_val[1];
+ else
+ adjustment = adj_val[2];
+
+ if (diff > 0) {
+ if ((sig[c] + adjustment) > 255)
+ running_avg_y[c] = 255;
+ else
+ running_avg_y[c] = sig[c] + adjustment;
+
+ col_sum[c] += adjustment;
+ } else {
+ if ((sig[c] - adjustment) < 0)
+ running_avg_y[c] = 0;
+ else
+ running_avg_y[c] = sig[c] - adjustment;
+
+ col_sum[c] -= adjustment;
+ }
+ }
+ }
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ for (int c = 0; c < 16; ++c) {
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ sum_diff_thresh =
+ increase_denoising ? kSumDiffThresholdHigh : kSumDiffThreshold;
+ if (abs(sum_diff) > sum_diff_thresh)
+ return COPY_BLOCK;
+
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.h b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.h
new file mode 100644
index 0000000000..5633c171f0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
+#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
+
+#include <stdint.h>
+
+#include "modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterC : public DenoiserFilter {
+ public:
+ DenoiserFilterC() {}
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.cc b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.cc
new file mode 100644
index 0000000000..e1e6ed4f18
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/util/denoiser_filter_neon.h"
+
+#include <arm_neon.h>
+
+namespace webrtc {
+
+const int kSumDiffThresholdHighNeon = 600;
+
+static int HorizontalAddS16x8(const int16x8_t v_16x8) {
+ const int32x4_t a = vpaddlq_s16(v_16x8);
+ const int64x2_t b = vpaddlq_s32(a);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static int HorizontalAddS32x4(const int32x4_t v_32x4) {
+ const int64x2_t b = vpaddlq_s32(v_32x4);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static void VarianceNeonW8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ int w,
+ int h,
+ uint32_t* sse,
+ int64_t* sum) {
+ int16x8_t v_sum = vdupq_n_s16(0);
+ int32x4_t v_sse_lo = vdupq_n_s32(0);
+ int32x4_t v_sse_hi = vdupq_n_s32(0);
+
+ for (int i = 0; i < h; ++i) {
+ for (int j = 0; j < w; j += 8) {
+ const uint8x8_t v_a = vld1_u8(&a[j]);
+ const uint8x8_t v_b = vld1_u8(&b[j]);
+ const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
+ const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
+ v_sum = vaddq_s16(v_sum, sv_diff);
+ v_sse_lo =
+ vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
+ v_sse_hi =
+ vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
+ }
+ a += a_stride;
+ b += b_stride;
+ }
+
+ *sum = HorizontalAddS16x8(v_sum);
+ *sse =
+ static_cast<uint32_t>(HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi)));
+}
+
+uint32_t DenoiserFilterNEON::Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ uint32_t* sse) {
+ int64_t sum = 0;
+ VarianceNeonW8(a, a_stride << 1, b, b_stride << 1, 16, 8, sse, &sum);
+ return *sse - ((sum * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterNEON::MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_running_avg_y_stride,
+ uint8_t* running_avg_y,
+ int running_avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ // If motion_magnitude is small, making the denoiser more aggressive by
+ // increasing the adjustment for each level, level1 adjustment is
+ // increased, the deltas stay the same.
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
+ : 0;
+ int sum_diff_thresh = 0;
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= kMotionMagnitudeThreshold) ? 4 + shift_inc : 3);
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+ int64x2_t v_sum_diff_total = vdupq_n_s64(0);
+
+ // Go over lines.
+ for (int r = 0; r < 16; ++r) {
+ // Load inputs.
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ // Calculate absolute difference and sign masks.
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
+
+ // Figure out which level that put us in.
+ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
+ const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
+ const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
+
+ // Calculate absolute adjustments for level 1, 2 and 3.
+ const uint8x16_t v_level2_adjustment =
+ vandq_u8(v_level2_mask, v_delta_level_1_and_2);
+ const uint8x16_t v_level3_adjustment =
+ vandq_u8(v_level3_mask, v_delta_level_2_and_3);
+ const uint8x16_t v_level1and2_adjustment =
+ vaddq_u8(v_level1_adjustment, v_level2_adjustment);
+ const uint8x16_t v_level1and2and3_adjustment =
+ vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
+
+ // Figure adjustment absolute value by selecting between the absolute
+ // difference if in level0 or the value for level 1, 2 and 3.
+ const uint8x16_t v_abs_adjustment =
+ vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
+
+ // Calculate positive and negative adjustments. Apply them to the signal
+ // and accumulate them. Adjustments are less than eight and the maximum
+ // sum of them (7 * 16) can fit in a signed char.
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
+
+ // Store results.
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ // Sum all the accumulators to have the sum of all pixel differences
+ // for this macroblock.
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
+ vreinterpretq_s8_u8(v_neg_adjustment));
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+
+ // Too much adjustments => copy block.
+ int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+ sum_diff_thresh =
+ increase_denoising ? kSumDiffThresholdHighNeon : kSumDiffThreshold;
+ if (sum_diff > sum_diff_thresh)
+ return COPY_BLOCK;
+
+ // Tell above level that block was filtered.
+ running_avg_y -= running_avg_y_stride * 16;
+ sig -= sig_stride * 16;
+
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.h b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.h
new file mode 100644
index 0000000000..4d9f271e5a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
+#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
+
+#include "modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterNEON : public DenoiserFilter {
+ public:
+ DenoiserFilterNEON() {}
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.cc b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.cc
new file mode 100644
index 0000000000..5ca5f0cf34
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/util/denoiser_filter_sse2.h"
+
+#include <emmintrin.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace webrtc {
+
+static void Get8x8varSse2(const uint8_t* src,
+ int src_stride,
+ const uint8_t* ref,
+ int ref_stride,
+ unsigned int* sse,
+ int* sum) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i vsum = _mm_setzero_si128();
+ __m128i vsse = _mm_setzero_si128();
+
+ for (int i = 0; i < 8; i += 2) {
+ const __m128i src0 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(src + i * src_stride)), zero);
+ const __m128i ref0 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(ref + i * ref_stride)), zero);
+ const __m128i diff0 = _mm_sub_epi16(src0, ref0);
+
+ const __m128i src1 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(src + (i + 1) * src_stride)), zero);
+ const __m128i ref1 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(ref + (i + 1) * ref_stride)), zero);
+ const __m128i diff1 = _mm_sub_epi16(src1, ref1);
+
+ vsum = _mm_add_epi16(vsum, diff0);
+ vsum = _mm_add_epi16(vsum, diff1);
+ vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0));
+ vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1));
+ }
+
+ // sum
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
+ *sum = static_cast<int16_t>(_mm_extract_epi16(vsum, 0));
+
+ // sse
+ vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
+ vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4));
+ *sse = _mm_cvtsi128_si32(vsse);
+}
+
+static void VarianceSSE2(const unsigned char* src,
+ int src_stride,
+ const unsigned char* ref,
+ int ref_stride,
+ int w,
+ int h,
+ uint32_t* sse,
+ int64_t* sum,
+ int block_size) {
+ *sse = 0;
+ *sum = 0;
+
+ for (int i = 0; i < h; i += block_size) {
+ for (int j = 0; j < w; j += block_size) {
+ uint32_t sse0 = 0;
+ int32_t sum0 = 0;
+
+ Get8x8varSse2(src + src_stride * i + j, src_stride,
+ ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+ *sse += sse0;
+ *sum += sum0;
+ }
+ }
+}
+
+// Compute the sum of all pixel differences of this MB.
+static uint32_t AbsSumDiff16x1(__m128i acc_diff) {
+ const __m128i k_1 = _mm_set1_epi16(1);
+ const __m128i acc_diff_lo =
+ _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_hi =
+ _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
+ const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
+ const __m128i hgfe_dcba =
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
+ const __m128i hgfedcba =
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
+ unsigned int sum_diff = abs(_mm_cvtsi128_si32(hgfedcba));
+
+ return sum_diff;
+}
+
+uint32_t DenoiserFilterSSE2::Variance16x8(const uint8_t* src,
+ int src_stride,
+ const uint8_t* ref,
+ int ref_stride,
+ uint32_t* sse) {
+ int64_t sum = 0;
+ VarianceSSE2(src, src_stride << 1, ref, ref_stride << 1, 16, 8, sse, &sum, 8);
+ return *sse - ((sum * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterSSE2::MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ DenoiserDecision decision = FILTER_BLOCK;
+ unsigned int sum_diff_thresh = 0;
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
+ : 0;
+ __m128i acc_diff = _mm_setzero_si128();
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ // Modify each level's adjustment according to motion_magnitude.
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= kMotionMagnitudeThreshold) ? 7 + shift_inc : 6);
+ // Difference between level 3 and level 2 is 2.
+ const __m128i l32 = _mm_set1_epi8(2);
+ // Difference between level 2 and level 1 is 1.
+ const __m128i l21 = _mm_set1_epi8(1);
+
+ for (int r = 0; r < 16; ++r) {
+ // Calculate differences.
+ const __m128i v_sig =
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(&mc_running_avg_y[0]));
+ __m128i v_running_avg_y;
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to 16 to be used to get mask. Doing this
+ // allows us to use _mm_cmpgt_epi8, which operates on signed byte.
+ const __m128i clamped_absdiff =
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16);
+ // Get masks for l2 l1 and l0 adjustments.
+ const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff);
+ const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff);
+ const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff);
+ // Get adjustments for l2, l1, and l0.
+ __m128i adj2 = _mm_and_si128(mask2, l32);
+ const __m128i adj1 = _mm_and_si128(mask1, l21);
+ const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
+ __m128i adj, padj, nadj;
+
+ // Combine the adjustments and get absolute adjustments.
+ adj2 = _mm_add_epi8(adj2, adj1);
+ adj = _mm_sub_epi8(l3, adj2);
+ adj = _mm_andnot_si128(mask0, adj);
+ adj = _mm_or_si128(adj, adj0);
+
+ // Restore the sign and get positive and negative adjustments.
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+
+ // Calculate filtered value.
+ v_running_avg_y = _mm_adds_epu8(v_sig, padj);
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(running_avg_y),
+ v_running_avg_y);
+
+ // Adjustments <=7, and each element in acc_diff can fit in signed
+ // char.
+ acc_diff = _mm_adds_epi8(acc_diff, padj);
+ acc_diff = _mm_subs_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ // Compute the sum of all pixel differences of this MB.
+ unsigned int abs_sum_diff = AbsSumDiff16x1(acc_diff);
+ sum_diff_thresh =
+ increase_denoising ? kSumDiffThresholdHigh : kSumDiffThreshold;
+ if (abs_sum_diff > sum_diff_thresh)
+ decision = COPY_BLOCK;
+ return decision;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.h b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.h
new file mode 100644
index 0000000000..8fe4b905ae
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
+#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
+
+#include <stdint.h>
+
+#include "modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterSSE2 : public DenoiserFilter {
+ public:
+ DenoiserFilterSSE2() {}
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
diff --git a/third_party/libwebrtc/modules/video_processing/util/noise_estimation.cc b/third_party/libwebrtc/modules/video_processing/util/noise_estimation.cc
new file mode 100644
index 0000000000..c72f764901
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/noise_estimation.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/util/noise_estimation.h"
+#if DISPLAYNEON
+#include <android/log.h>
+#endif
+
+namespace webrtc {
+
+void NoiseEstimation::Init(int width, int height, CpuType cpu_type) {
+ int mb_cols = width >> 4;
+ int mb_rows = height >> 4;
+ consec_low_var_.reset(new uint32_t[mb_cols * mb_rows]());
+ width_ = width;
+ height_ = height;
+ mb_cols_ = width_ >> 4;
+ mb_rows_ = height_ >> 4;
+ cpu_type_ = cpu_type;
+}
+
+void NoiseEstimation::GetNoise(int mb_index, uint32_t var, uint32_t luma) {
+ consec_low_var_[mb_index]++;
+ num_static_block_++;
+ if (consec_low_var_[mb_index] >= kConsecLowVarFrame &&
+ (luma >> 6) < kAverageLumaMax && (luma >> 6) > kAverageLumaMin) {
+ // Normalized var by the average luma value, this gives more weight to
+ // darker blocks.
+ int nor_var = var / (luma >> 10);
+ noise_var_ +=
+ nor_var > kBlockSelectionVarMax ? kBlockSelectionVarMax : nor_var;
+ num_noisy_block_++;
+ }
+}
+
+void NoiseEstimation::ResetConsecLowVar(int mb_index) {
+ consec_low_var_[mb_index] = 0;
+}
+
+void NoiseEstimation::UpdateNoiseLevel() {
+ // TODO(jackychen): Tune a threshold for numb_noisy_block > T to make the
+ // condition more reasonable.
+ // No enough samples implies the motion of the camera or too many moving
+ // objects in the frame.
+ if (num_static_block_ <
+ (0.65 * mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL) ||
+ !num_noisy_block_) {
+#if DISPLAY
+ printf("Not enough samples. %d \n", num_static_block_);
+#elif DISPLAYNEON
+ __android_log_print(ANDROID_LOG_DEBUG, "DISPLAY",
+ "Not enough samples. %d \n", num_static_block_);
+#endif
+ noise_var_ = 0;
+ noise_var_accum_ = 0;
+ num_noisy_block_ = 0;
+ num_static_block_ = 0;
+ return;
+ } else {
+#if DISPLAY
+ printf("%d %d fraction = %.3f\n", num_static_block_,
+ mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL,
+ percent_static_block_);
+#elif DISPLAYNEON
+ __android_log_print(ANDROID_LOG_DEBUG, "DISPLAY", "%d %d fraction = %.3f\n",
+ num_static_block_,
+ mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL,
+ percent_static_block_);
+#endif
+ // Normalized by the number of noisy blocks.
+ noise_var_ /= num_noisy_block_;
+ // Get the percentage of static blocks.
+ percent_static_block_ = static_cast<double>(num_static_block_) /
+ (mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL);
+ num_noisy_block_ = 0;
+ num_static_block_ = 0;
+ }
+ // For the first frame just update the value with current noise_var_,
+ // otherwise, use the averaging window.
+ if (noise_var_accum_ == 0) {
+ noise_var_accum_ = noise_var_;
+ } else {
+ noise_var_accum_ = (noise_var_accum_ * 15 + noise_var_) / 16;
+ }
+#if DISPLAY
+ printf("noise_var_accum_ = %.1f, noise_var_ = %d.\n", noise_var_accum_,
+ noise_var_);
+#elif DISPLAYNEON
+ __android_log_print(ANDROID_LOG_DEBUG, "DISPLAY",
+ "noise_var_accum_ = %.1f, noise_var_ = %d.\n",
+ noise_var_accum_, noise_var_);
+#endif
+ // Reset noise_var_ for the next frame.
+ noise_var_ = 0;
+}
+
+uint8_t NoiseEstimation::GetNoiseLevel() {
+ int noise_thr = cpu_type_ ? kNoiseThreshold : kNoiseThresholdNeon;
+ UpdateNoiseLevel();
+ if (noise_var_accum_ > noise_thr) {
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/noise_estimation.h b/third_party/libwebrtc/modules/video_processing/util/noise_estimation.h
new file mode 100644
index 0000000000..4c5f10f1d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/noise_estimation.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_UTIL_NOISE_ESTIMATION_H_
+#define MODULES_VIDEO_PROCESSING_UTIL_NOISE_ESTIMATION_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+#define DISPLAY 0 // Rectangle diagnostics
+#define DISPLAYNEON 0 // Rectangle diagnostics on NEON
+
+const int kNoiseThreshold = 150;
+const int kNoiseThresholdNeon = 70;
+const int kConsecLowVarFrame = 6;
+const int kAverageLumaMin = 20;
+const int kAverageLumaMax = 220;
+const int kBlockSelectionVarMax = kNoiseThreshold << 1;
+
+// TODO(jackychen): To test different sampling strategy.
+// Collect noise data every NOISE_SUBSAMPLE_INTERVAL blocks.
+#define NOISE_SUBSAMPLE_INTERVAL 41
+
+class NoiseEstimation {
+ public:
+ void Init(int width, int height, CpuType cpu_type);
+ // Collect noise data from one qualified block.
+ void GetNoise(int mb_index, uint32_t var, uint32_t luma);
+ // Reset the counter for consecutive low-var blocks.
+ void ResetConsecLowVar(int mb_index);
+ // Update noise level for current frame.
+ void UpdateNoiseLevel();
+ // 0: low noise, 1: high noise
+ uint8_t GetNoiseLevel();
+
+ private:
+ int width_;
+ int height_;
+ int mb_rows_;
+ int mb_cols_;
+ int num_noisy_block_;
+ int num_static_block_;
+ CpuType cpu_type_;
+ uint32_t noise_var_;
+ double noise_var_accum_;
+ double percent_static_block_;
+ std::unique_ptr<uint32_t[]> consec_low_var_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_UTIL_NOISE_ESTIMATION_H_
diff --git a/third_party/libwebrtc/modules/video_processing/util/skin_detection.cc b/third_party/libwebrtc/modules/video_processing/util/skin_detection.cc
new file mode 100644
index 0000000000..76399d6e73
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/skin_detection.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/util/skin_detection.h"
+
+namespace webrtc {
+
+// Fixed-point skin color model parameters.
+static const int skin_mean[5][2] = {{7463, 9614},
+ {6400, 10240},
+ {7040, 10240},
+ {8320, 9280},
+ {6800, 9614}};
+static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
+static const int skin_threshold[6] = {1570636, 1400000, 800000,
+ 800000, 800000, 800000}; // q18
+
+// Thresholds on luminance.
+static const int y_low = 40;
+static const int y_high = 220;
+
+// Evaluates the Mahalanobis distance measure for the input CbCr values.
+static int EvaluateSkinColorDifference(int cb, int cr, int idx) {
+ const int cb_q6 = cb << 6;
+ const int cr_q6 = cr << 6;
+ const int cb_diff_q12 =
+ (cb_q6 - skin_mean[idx][0]) * (cb_q6 - skin_mean[idx][0]);
+ const int cbcr_diff_q12 =
+ (cb_q6 - skin_mean[idx][0]) * (cr_q6 - skin_mean[idx][1]);
+ const int cr_diff_q12 =
+ (cr_q6 - skin_mean[idx][1]) * (cr_q6 - skin_mean[idx][1]);
+ const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
+ const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
+ const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
+ const int skin_diff =
+ skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
+ skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
+ return skin_diff;
+}
+
+static int SkinPixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
+ if (y < y_low || y > y_high) {
+ return 0;
+ } else {
+ if (MODEL_MODE == 0) {
+ return (EvaluateSkinColorDifference(cb, cr, 0) < skin_threshold[0]);
+ } else {
+ // Exit on grey.
+ if (cb == 128 && cr == 128)
+ return 0;
+ // Exit on very strong cb.
+ if (cb > 150 && cr < 110)
+ return 0;
+ // Exit on (another) low luminance threshold if either color is high.
+ if (y < 50 && (cb > 140 || cr > 140))
+ return 0;
+ for (int i = 0; i < 5; i++) {
+ int diff = EvaluateSkinColorDifference(cb, cr, i);
+ if (diff < skin_threshold[i + 1]) {
+ return 1;
+ } else if (diff > (skin_threshold[i + 1] << 3)) {
+ // Exit if difference is much large than the threshold.
+ return 0;
+ }
+ }
+ return 0;
+ }
+ }
+}
+
+bool MbHasSkinColor(const uint8_t* y_src,
+ const uint8_t* u_src,
+ const uint8_t* v_src,
+ const int stride_y,
+ const int stride_u,
+ const int stride_v,
+ const int mb_row,
+ const int mb_col) {
+ const uint8_t* y = y_src + ((mb_row << 4) + 8) * stride_y + (mb_col << 4) + 8;
+ const uint8_t* u = u_src + ((mb_row << 3) + 4) * stride_u + (mb_col << 3) + 4;
+ const uint8_t* v = v_src + ((mb_row << 3) + 4) * stride_v + (mb_col << 3) + 4;
+ // Use 2x2 average of center pixel to compute skin area.
+ uint8_t y_avg = (*y + *(y + 1) + *(y + stride_y) + *(y + stride_y + 1)) >> 2;
+ uint8_t u_avg = (*u + *(u + 1) + *(u + stride_u) + *(u + stride_u + 1)) >> 2;
+ uint8_t v_avg = (*v + *(v + 1) + *(v + stride_v) + *(v + stride_v + 1)) >> 2;
+ return SkinPixel(y_avg, u_avg, v_avg) == 1;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/util/skin_detection.h b/third_party/libwebrtc/modules/video_processing/util/skin_detection.h
new file mode 100644
index 0000000000..7f2e17aa87
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/util/skin_detection.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
+#define MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
+
+namespace webrtc {
+
+#define MODEL_MODE 0
+
+typedef unsigned char uint8_t;
+bool MbHasSkinColor(const uint8_t* y_src,
+ const uint8_t* u_src,
+ const uint8_t* v_src,
+ int stride_y,
+ int stride_u,
+ int stride_v,
+ int mb_row,
+ int mb_col);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
diff --git a/third_party/libwebrtc/modules/video_processing/video_denoiser.cc b/third_party/libwebrtc/modules/video_processing/video_denoiser.cc
new file mode 100644
index 0000000000..7d5368b934
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/video_denoiser.cc
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_processing/video_denoiser.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "api/video/i420_buffer.h"
+#include "libyuv/include/libyuv/planar_functions.h"
+
+namespace webrtc {
+
+#if DISPLAY || DISPLAYNEON
+static void ShowRect(const std::unique_ptr<DenoiserFilter>& filter,
+ const std::unique_ptr<uint8_t[]>& d_status,
+ const std::unique_ptr<uint8_t[]>& moving_edge_red,
+ const std::unique_ptr<uint8_t[]>& x_density,
+ const std::unique_ptr<uint8_t[]>& y_density,
+ const uint8_t* u_src,
+ int stride_u_src,
+ const uint8_t* v_src,
+ int stride_v_src,
+ uint8_t* u_dst,
+ int stride_u_dst,
+ uint8_t* v_dst,
+ int stride_v_dst,
+ int mb_rows_,
+ int mb_cols_) {
+ for (int mb_row = 0; mb_row < mb_rows_; ++mb_row) {
+ for (int mb_col = 0; mb_col < mb_cols_; ++mb_col) {
+ int mb_index = mb_row * mb_cols_ + mb_col;
+ const uint8_t* mb_src_u =
+ u_src + (mb_row << 3) * stride_u_src + (mb_col << 3);
+ const uint8_t* mb_src_v =
+ v_src + (mb_row << 3) * stride_v_src + (mb_col << 3);
+ uint8_t* mb_dst_u = u_dst + (mb_row << 3) * stride_u_dst + (mb_col << 3);
+ uint8_t* mb_dst_v = v_dst + (mb_row << 3) * stride_v_dst + (mb_col << 3);
+ uint8_t uv_tmp[8 * 8];
+ memset(uv_tmp, 200, 8 * 8);
+ if (d_status[mb_index] == 1) {
+ // Paint to red.
+ libyuv::CopyPlane(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst, 8, 8);
+ libyuv::CopyPlane(uv_tmp, 8, mb_dst_v, stride_v_dst, 8, 8);
+ } else if (moving_edge_red[mb_row * mb_cols_ + mb_col] &&
+ x_density[mb_col] * y_density[mb_row]) {
+ // Paint to blue.
+ libyuv::CopyPlane(uv_tmp, 8, mb_dst_u, stride_u_dst, 8, 8);
+ libyuv::CopyPlane(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst, 8, 8);
+ } else {
+ libyuv::CopyPlane(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst, 8, 8);
+ libyuv::CopyPlane(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst, 8, 8);
+ }
+ }
+ }
+}
+#endif
+
+VideoDenoiser::VideoDenoiser(bool runtime_cpu_detection)
+ : width_(0),
+ height_(0),
+ filter_(DenoiserFilter::Create(runtime_cpu_detection, &cpu_type_)),
+ ne_(new NoiseEstimation()) {}
+
+void VideoDenoiser::DenoiserReset(
+ rtc::scoped_refptr<I420BufferInterface> frame) {
+ width_ = frame->width();
+ height_ = frame->height();
+ mb_cols_ = width_ >> 4;
+ mb_rows_ = height_ >> 4;
+
+ // Init noise estimator and allocate buffers.
+ ne_->Init(width_, height_, cpu_type_);
+ moving_edge_.reset(new uint8_t[mb_cols_ * mb_rows_]);
+ mb_filter_decision_.reset(new DenoiserDecision[mb_cols_ * mb_rows_]);
+ x_density_.reset(new uint8_t[mb_cols_]);
+ y_density_.reset(new uint8_t[mb_rows_]);
+ moving_object_.reset(new uint8_t[mb_cols_ * mb_rows_]);
+}
+
+int VideoDenoiser::PositionCheck(int mb_row, int mb_col, int noise_level) {
+ if (noise_level == 0)
+ return 1;
+ if ((mb_row <= (mb_rows_ >> 4)) || (mb_col <= (mb_cols_ >> 4)) ||
+ (mb_col >= (15 * mb_cols_ >> 4)))
+ return 3;
+ else if ((mb_row <= (mb_rows_ >> 3)) || (mb_col <= (mb_cols_ >> 3)) ||
+ (mb_col >= (7 * mb_cols_ >> 3)))
+ return 2;
+ else
+ return 1;
+}
+
+void VideoDenoiser::ReduceFalseDetection(
+ const std::unique_ptr<uint8_t[]>& d_status,
+ std::unique_ptr<uint8_t[]>* moving_edge_red,
+ int noise_level) {
+ // From up left corner.
+ int mb_col_stop = mb_cols_ - 1;
+ for (int mb_row = 0; mb_row <= mb_rows_ - 1; ++mb_row) {
+ for (int mb_col = 0; mb_col <= mb_col_stop; ++mb_col) {
+ if (d_status[mb_row * mb_cols_ + mb_col]) {
+ mb_col_stop = mb_col - 1;
+ break;
+ }
+ (*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
+ }
+ }
+ // From bottom left corner.
+ mb_col_stop = mb_cols_ - 1;
+ for (int mb_row = mb_rows_ - 1; mb_row >= 0; --mb_row) {
+ for (int mb_col = 0; mb_col <= mb_col_stop; ++mb_col) {
+ if (d_status[mb_row * mb_cols_ + mb_col]) {
+ mb_col_stop = mb_col - 1;
+ break;
+ }
+ (*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
+ }
+ }
+ // From up right corner.
+ mb_col_stop = 0;
+ for (int mb_row = 0; mb_row <= mb_rows_ - 1; ++mb_row) {
+ for (int mb_col = mb_cols_ - 1; mb_col >= mb_col_stop; --mb_col) {
+ if (d_status[mb_row * mb_cols_ + mb_col]) {
+ mb_col_stop = mb_col + 1;
+ break;
+ }
+ (*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
+ }
+ }
+ // From bottom right corner.
+ mb_col_stop = 0;
+ for (int mb_row = mb_rows_ - 1; mb_row >= 0; --mb_row) {
+ for (int mb_col = mb_cols_ - 1; mb_col >= mb_col_stop; --mb_col) {
+ if (d_status[mb_row * mb_cols_ + mb_col]) {
+ mb_col_stop = mb_col + 1;
+ break;
+ }
+ (*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
+ }
+ }
+}
+
+bool VideoDenoiser::IsTrailingBlock(const std::unique_ptr<uint8_t[]>& d_status,
+ int mb_row,
+ int mb_col) {
+ bool ret = false;
+ int mb_index = mb_row * mb_cols_ + mb_col;
+ if (!mb_row || !mb_col || mb_row == mb_rows_ - 1 || mb_col == mb_cols_ - 1)
+ ret = false;
+ else
+ ret = d_status[mb_index + 1] || d_status[mb_index - 1] ||
+ d_status[mb_index + mb_cols_] || d_status[mb_index - mb_cols_];
+ return ret;
+}
+
+void VideoDenoiser::CopySrcOnMOB(const uint8_t* y_src,
+ int stride_src,
+ uint8_t* y_dst,
+ int stride_dst) {
+ // Loop over to copy src block if the block is marked as moving object block
+ // or if the block may cause trailing artifacts.
+ for (int mb_row = 0; mb_row < mb_rows_; ++mb_row) {
+ const int mb_index_base = mb_row * mb_cols_;
+ const uint8_t* mb_src_base = y_src + (mb_row << 4) * stride_src;
+ uint8_t* mb_dst_base = y_dst + (mb_row << 4) * stride_dst;
+ for (int mb_col = 0; mb_col < mb_cols_; ++mb_col) {
+ const int mb_index = mb_index_base + mb_col;
+ const uint32_t offset_col = mb_col << 4;
+ const uint8_t* mb_src = mb_src_base + offset_col;
+ uint8_t* mb_dst = mb_dst_base + offset_col;
+ // Check if the block is a moving object block or may cause a trailing
+ // artifacts.
+ if (mb_filter_decision_[mb_index] != FILTER_BLOCK ||
+ IsTrailingBlock(moving_edge_, mb_row, mb_col) ||
+ (x_density_[mb_col] * y_density_[mb_row] &&
+ moving_object_[mb_row * mb_cols_ + mb_col])) {
+ // Copy y source.
+ libyuv::CopyPlane(mb_src, stride_src, mb_dst, stride_dst, 16, 16);
+ }
+ }
+ }
+}
+
+void VideoDenoiser::CopyLumaOnMargin(const uint8_t* y_src,
+ int stride_src,
+ uint8_t* y_dst,
+ int stride_dst) {
+ int height_margin = height_ - (mb_rows_ << 4);
+ if (height_margin > 0) {
+ const uint8_t* margin_y_src = y_src + (mb_rows_ << 4) * stride_src;
+ uint8_t* margin_y_dst = y_dst + (mb_rows_ << 4) * stride_dst;
+ libyuv::CopyPlane(margin_y_src, stride_src, margin_y_dst, stride_dst,
+ width_, height_margin);
+ }
+ int width_margin = width_ - (mb_cols_ << 4);
+ if (width_margin > 0) {
+ const uint8_t* margin_y_src = y_src + (mb_cols_ << 4);
+ uint8_t* margin_y_dst = y_dst + (mb_cols_ << 4);
+ libyuv::CopyPlane(margin_y_src, stride_src, margin_y_dst, stride_dst,
+ width_ - (mb_cols_ << 4), mb_rows_ << 4);
+ }
+}
+
+rtc::scoped_refptr<I420BufferInterface> VideoDenoiser::DenoiseFrame(
+ rtc::scoped_refptr<I420BufferInterface> frame,
+ bool noise_estimation_enabled) {
+ // If previous width and height are different from current frame's, need to
+ // reallocate the buffers and no denoising for the current frame.
+ if (!prev_buffer_ || width_ != frame->width() || height_ != frame->height()) {
+ DenoiserReset(frame);
+ prev_buffer_ = frame;
+ return frame;
+ }
+
+ // Set buffer pointers.
+ const uint8_t* y_src = frame->DataY();
+ int stride_y_src = frame->StrideY();
+ rtc::scoped_refptr<I420Buffer> dst =
+ buffer_pool_.CreateI420Buffer(width_, height_);
+
+ uint8_t* y_dst = dst->MutableDataY();
+ int stride_y_dst = dst->StrideY();
+
+ const uint8_t* y_dst_prev = prev_buffer_->DataY();
+ int stride_prev = prev_buffer_->StrideY();
+
+ memset(x_density_.get(), 0, mb_cols_);
+ memset(y_density_.get(), 0, mb_rows_);
+ memset(moving_object_.get(), 1, mb_cols_ * mb_rows_);
+
+ uint8_t noise_level = noise_estimation_enabled ? ne_->GetNoiseLevel() : 0;
+ int thr_var_base = 16 * 16 * 2;
+ // Loop over blocks to accumulate/extract noise level and update x/y_density
+ // factors for moving object detection.
+ for (int mb_row = 0; mb_row < mb_rows_; ++mb_row) {
+ const int mb_index_base = mb_row * mb_cols_;
+ const uint8_t* mb_src_base = y_src + (mb_row << 4) * stride_y_src;
+ uint8_t* mb_dst_base = y_dst + (mb_row << 4) * stride_y_dst;
+ const uint8_t* mb_dst_prev_base = y_dst_prev + (mb_row << 4) * stride_prev;
+ for (int mb_col = 0; mb_col < mb_cols_; ++mb_col) {
+ const int mb_index = mb_index_base + mb_col;
+ const bool ne_enable = (mb_index % NOISE_SUBSAMPLE_INTERVAL == 0);
+ const int pos_factor = PositionCheck(mb_row, mb_col, noise_level);
+ const uint32_t thr_var_adp = thr_var_base * pos_factor;
+ const uint32_t offset_col = mb_col << 4;
+ const uint8_t* mb_src = mb_src_base + offset_col;
+ uint8_t* mb_dst = mb_dst_base + offset_col;
+ const uint8_t* mb_dst_prev = mb_dst_prev_base + offset_col;
+
+ // TODO(jackychen): Need SSE2/NEON opt.
+ int luma = 0;
+ if (ne_enable) {
+ for (int i = 4; i < 12; ++i) {
+ for (int j = 4; j < 12; ++j) {
+ luma += mb_src[i * stride_y_src + j];
+ }
+ }
+ }
+
+ // Get the filtered block and filter_decision.
+ mb_filter_decision_[mb_index] =
+ filter_->MbDenoise(mb_dst_prev, stride_prev, mb_dst, stride_y_dst,
+ mb_src, stride_y_src, 0, noise_level);
+
+ // If filter decision is FILTER_BLOCK, no need to check moving edge.
+ // It is unlikely for a moving edge block to be filtered in current
+ // setting.
+ if (mb_filter_decision_[mb_index] == FILTER_BLOCK) {
+ uint32_t sse_t = 0;
+ if (ne_enable) {
+ // The variance used in noise estimation is based on the src block in
+ // time t (mb_src) and filtered block in time t-1 (mb_dist_prev).
+ uint32_t noise_var = filter_->Variance16x8(
+ mb_dst_prev, stride_y_dst, mb_src, stride_y_src, &sse_t);
+ ne_->GetNoise(mb_index, noise_var, luma);
+ }
+ moving_edge_[mb_index] = 0; // Not a moving edge block.
+ } else {
+ uint32_t sse_t = 0;
+ // The variance used in MOD is based on the filtered blocks in time
+ // T (mb_dst) and T-1 (mb_dst_prev).
+ uint32_t noise_var = filter_->Variance16x8(
+ mb_dst_prev, stride_prev, mb_dst, stride_y_dst, &sse_t);
+ if (noise_var > thr_var_adp) { // Moving edge checking.
+ if (ne_enable) {
+ ne_->ResetConsecLowVar(mb_index);
+ }
+ moving_edge_[mb_index] = 1; // Mark as moving edge block.
+ x_density_[mb_col] += (pos_factor < 3);
+ y_density_[mb_row] += (pos_factor < 3);
+ } else {
+ moving_edge_[mb_index] = 0;
+ if (ne_enable) {
+ // The variance used in noise estimation is based on the src block
+ // in time t (mb_src) and filtered block in time t-1 (mb_dist_prev).
+ uint32_t noise_var = filter_->Variance16x8(
+ mb_dst_prev, stride_prev, mb_src, stride_y_src, &sse_t);
+ ne_->GetNoise(mb_index, noise_var, luma);
+ }
+ }
+ }
+ } // End of for loop
+ } // End of for loop
+
+ ReduceFalseDetection(moving_edge_, &moving_object_, noise_level);
+
+ CopySrcOnMOB(y_src, stride_y_src, y_dst, stride_y_dst);
+
+ // When frame width/height not divisible by 16, copy the margin to
+ // denoised_frame.
+ if ((mb_rows_ << 4) != height_ || (mb_cols_ << 4) != width_)
+ CopyLumaOnMargin(y_src, stride_y_src, y_dst, stride_y_dst);
+
+ // Copy u/v planes.
+ libyuv::CopyPlane(frame->DataU(), frame->StrideU(), dst->MutableDataU(),
+ dst->StrideU(), (width_ + 1) >> 1, (height_ + 1) >> 1);
+ libyuv::CopyPlane(frame->DataV(), frame->StrideV(), dst->MutableDataV(),
+ dst->StrideV(), (width_ + 1) >> 1, (height_ + 1) >> 1);
+
+#if DISPLAY || DISPLAYNEON
+ // Show rectangular region
+ ShowRect(filter_, moving_edge_, moving_object_, x_density_, y_density_,
+ frame->DataU(), frame->StrideU(), frame->DataV(), frame->StrideV(),
+ dst->MutableDataU(), dst->StrideU(), dst->MutableDataV(),
+ dst->StrideV(), mb_rows_, mb_cols_);
+#endif
+ prev_buffer_ = dst;
+ return dst;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_processing/video_denoiser.h b/third_party/libwebrtc/modules/video_processing/video_denoiser.h
new file mode 100644
index 0000000000..eb98c5bc53
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/video_denoiser.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
+#define MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_frame_buffer.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_processing/util/denoiser_filter.h"
+#include "modules/video_processing/util/noise_estimation.h"
+#include "modules/video_processing/util/skin_detection.h"
+
+namespace webrtc {
+
+class VideoDenoiser {
+ public:
+ explicit VideoDenoiser(bool runtime_cpu_detection);
+
+ rtc::scoped_refptr<I420BufferInterface> DenoiseFrame(
+ rtc::scoped_refptr<I420BufferInterface> frame,
+ bool noise_estimation_enabled);
+
+ private:
+ void DenoiserReset(rtc::scoped_refptr<I420BufferInterface> frame);
+
+ // Check the mb position, return 1: close to the frame center (between 1/8
+ // and 7/8 of width/height), 3: close to the border (out of 1/16 and 15/16
+ // of width/height), 2: in between.
+ int PositionCheck(int mb_row, int mb_col, int noise_level);
+
+ // To reduce false detection in moving object detection (MOD).
+ void ReduceFalseDetection(const std::unique_ptr<uint8_t[]>& d_status,
+ std::unique_ptr<uint8_t[]>* d_status_red,
+ int noise_level);
+
+ // Return whether a block might cause trailing artifact by checking if one of
+ // its neighbor blocks is a moving edge block.
+ bool IsTrailingBlock(const std::unique_ptr<uint8_t[]>& d_status,
+ int mb_row,
+ int mb_col);
+
+ // Copy input blocks to dst buffer on moving object blocks (MOB).
+ void CopySrcOnMOB(const uint8_t* y_src,
+ int stride_src,
+ uint8_t* y_dst,
+ int stride_dst);
+
+ // Copy luma margin blocks when frame width/height not divisible by 16.
+ void CopyLumaOnMargin(const uint8_t* y_src,
+ int stride_src,
+ uint8_t* y_dst,
+ int stride_dst);
+
+ int width_;
+ int height_;
+ int mb_rows_;
+ int mb_cols_;
+ CpuType cpu_type_;
+ std::unique_ptr<DenoiserFilter> filter_;
+ std::unique_ptr<NoiseEstimation> ne_;
+ // 1 for moving edge block, 0 for static block.
+ std::unique_ptr<uint8_t[]> moving_edge_;
+ // 1 for moving object block, 0 for static block.
+ std::unique_ptr<uint8_t[]> moving_object_;
+ // x_density_ and y_density_ are used in MOD process.
+ std::unique_ptr<uint8_t[]> x_density_;
+ std::unique_ptr<uint8_t[]> y_density_;
+ // Save the return values by MbDenoise for each block.
+ std::unique_ptr<DenoiserDecision[]> mb_filter_decision_;
+ VideoFrameBufferPool buffer_pool_;
+ rtc::scoped_refptr<I420BufferInterface> prev_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
diff --git a/third_party/libwebrtc/modules/video_processing/video_processing_gn/moz.build b/third_party/libwebrtc/modules/video_processing/video_processing_gn/moz.build
new file mode 100644
index 0000000000..aa332adfa9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/video_processing_gn/moz.build
@@ -0,0 +1,220 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_processing/util/denoiser_filter.cc",
+ "/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_c.cc",
+ "/third_party/libwebrtc/modules/video_processing/util/noise_estimation.cc",
+ "/third_party/libwebrtc/modules/video_processing/util/skin_detection.cc",
+ "/third_party/libwebrtc/modules/video_processing/video_denoiser.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "dl",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_processing_gn")
diff --git a/third_party/libwebrtc/modules/video_processing/video_processing_neon_gn/moz.build b/third_party/libwebrtc/modules/video_processing/video_processing_neon_gn/moz.build
new file mode 100644
index 0000000000..3588b0ac32
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/video_processing_neon_gn/moz.build
@@ -0,0 +1,175 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_HAS_NEON"] = True
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_neon.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+
+Library("video_processing_neon_gn")
diff --git a/third_party/libwebrtc/modules/video_processing/video_processing_sse2_gn/moz.build b/third_party/libwebrtc/modules/video_processing/video_processing_sse2_gn/moz.build
new file mode 100644
index 0000000000..9d41f521fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_processing/video_processing_sse2_gn/moz.build
@@ -0,0 +1,184 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_AVX2"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_processing/util/denoiser_filter_sse2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+Library("video_processing_sse2_gn")